diff --git a/.clang-format b/.clang-format index 9b3aa8b7213b2..ecb44bfabd9aa 100644 --- a/.clang-format +++ b/.clang-format @@ -1 +1,2 @@ BasedOnStyle: LLVM +LineEnding: LF diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 0000000000000..6ce98c4e7b105 --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,12 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "includePaths": [".github/**"], + "schedule": "* 0 * * 1", + "minimumReleaseAge": "3 days", + "assignees": ["boomanaiden154"], + "ignorePaths": [".github/workflows/containers/**"], + "groupName": "[Github] Update GHA Dependencies" +} diff --git a/.github/workflows/build-ci-container-windows.yml b/.github/workflows/build-ci-container-windows.yml index 167e7cf06b3b2..14c349b1b2fe5 100644 --- a/.github/workflows/build-ci-container-windows.yml +++ b/.github/workflows/build-ci-container-windows.yml @@ -44,7 +44,7 @@ jobs: run: | docker save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - name: Upload container image - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: container path: ${{ steps.vars.outputs.container-filename }} diff --git a/.github/workflows/build-ci-container.yml b/.github/workflows/build-ci-container.yml index 67f35fd30701f..01f1b8dc4f990 100644 --- a/.github/workflows/build-ci-container.yml +++ b/.github/workflows/build-ci-container.yml @@ -64,7 +64,7 @@ jobs: podman save ${{ steps.vars.outputs.container-name-agent-tag }} > ${{ steps.vars.outputs.container-agent-filename }} - name: Upload container image - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: container-${{ matrix.arch }} path: "*.tar" diff --git a/.github/workflows/build-metrics-container.yml b/.github/workflows/build-metrics-container.yml index cadcaa9a42e8f..69b571575f40c 100644 --- a/.github/workflows/build-metrics-container.yml +++ b/.github/workflows/build-metrics-container.yml @@ -49,7 +49,7 @@ jobs: run: | podman save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - name: Upload Container Image - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: container path: ${{ steps.vars.outputs.container-filename }} diff --git a/.github/workflows/check-ci.yml b/.github/workflows/check-ci.yml index 7e8c15696e344..f18a69c192ee9 100644 --- a/.github/workflows/check-ci.yml +++ b/.github/workflows/check-ci.yml @@ -26,7 +26,7 @@ jobs: with: sparse-checkout: .ci - name: Setup Python - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: 3.13 cache: 'pip' diff --git a/.github/workflows/ci-post-commit-analyzer.yml b/.github/workflows/ci-post-commit-analyzer.yml index 7d37b900d7909..49cf4100dd71c 100644 --- a/.github/workflows/ci-post-commit-analyzer.yml +++ b/.github/workflows/ci-post-commit-analyzer.yml @@ -44,7 +44,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: # A full build of llvm, clang, lld, and lldb takes about 250MB # of ccache space. There's not much reason to have more than this, @@ -87,7 +87,7 @@ jobs: scan-build --generate-index-only build/analyzer-results - name: Upload Results - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: analyzer-results diff --git a/.github/workflows/commit-access-review.yml b/.github/workflows/commit-access-review.yml index a7be81b0e2da5..734dc212fa648 100644 --- a/.github/workflows/commit-access-review.yml +++ b/.github/workflows/commit-access-review.yml @@ -28,7 +28,7 @@ jobs: python3 .github/workflows/commit-access-review.py $GITHUB_TOKEN - name: Upload Triage List - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: triagers path: triagers.log diff --git a/.github/workflows/containers/github-action-ci/Dockerfile b/.github/workflows/containers/github-action-ci/Dockerfile index 892fc9005de85..dc0c9cabc7f01 100644 --- a/.github/workflows/containers/github-action-ci/Dockerfile +++ b/.github/workflows/containers/github-action-ci/Dockerfile @@ -1,7 +1,7 @@ -FROM docker.io/library/ubuntu:24.04 as base +FROM docker.io/library/ubuntu:24.04 AS base ENV LLVM_SYSROOT=/opt/llvm -FROM base as stage1-toolchain +FROM base AS stage1-toolchain ENV LLVM_VERSION=21.1.1 RUN apt-get update && \ @@ -37,7 +37,7 @@ RUN cmake -B ./build -G Ninja ./llvm \ RUN ninja -C ./build stage2-clang-bolt stage2-install-distribution && ninja -C ./build install-distribution -FROM base as ci-container +FROM base AS ci-container COPY --from=stage1-toolchain $LLVM_SYSROOT $LLVM_SYSROOT @@ -62,6 +62,7 @@ RUN apt-get update && \ # Having a symlink from python to python3 enables code sharing between # the Linux and Windows pipelines. python3-pip \ + python3-venv \ file \ tzdata \ python-is-python3 && \ @@ -97,7 +98,7 @@ RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers USER gha WORKDIR /home/gha -FROM ci-container as ci-container-agent +FROM ci-container AS ci-container-agent ENV GITHUB_RUNNER_VERSION=2.328.0 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 8cdd39c164cca..b5f3413fe3b6b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -95,9 +95,9 @@ jobs: workflow: - '.github/workflows/docs.yml' - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.11' + python-version: '3.13' cache: 'pip' cache-dependency-path: 'llvm/docs/requirements-hashed.txt' - name: Install python dependencies @@ -209,7 +209,7 @@ jobs: mkdir built-docs/flang cp -r flang-build/docs/* built-docs/flang/ - name: Upload docs - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: docs-output path: built-docs/ diff --git a/.github/workflows/email-check.yaml b/.github/workflows/email-check.yaml index 9390fba4d4e3b..981c6fa62cb19 100644 --- a/.github/workflows/email-check.yaml +++ b/.github/workflows/email-check.yaml @@ -39,7 +39,7 @@ jobs: [{"body" : "$COMMENT"}] EOF - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/gha-codeql.yml b/.github/workflows/gha-codeql.yml index efb8143877c4e..63388ebc706bd 100644 --- a/.github/workflows/gha-codeql.yml +++ b/.github/workflows/gha-codeql.yml @@ -29,9 +29,9 @@ jobs: sparse-checkout: | .github/ - name: Initialize CodeQL - uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/init@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 with: languages: actions queries: security-extended - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/analyze@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 diff --git a/.github/workflows/hlsl-test-all.yaml b/.github/workflows/hlsl-test-all.yaml index 72cbbe2b7dded..dcb852312d41a 100644 --- a/.github/workflows/hlsl-test-all.yaml +++ b/.github/workflows/hlsl-test-all.yaml @@ -80,7 +80,7 @@ jobs: ninja check-hlsl-unit ninja ${{ inputs.TestTarget }} - name: Publish Test Results - uses: EnricoMi/publish-unit-test-result-action/macos@170bf24d20d201b842d7a52403b73ed297e6645b # v2 + uses: EnricoMi/publish-unit-test-result-action/macos@3a74b2957438d0b6e2e61d67b05318aa25c9e6c6 # v2.20.0 if: always() && runner.os == 'macOS' with: comment_mode: off diff --git a/.github/workflows/issue-write.yml b/.github/workflows/issue-write.yml index db9389b6afe53..26cd60c070251 100644 --- a/.github/workflows/issue-write.yml +++ b/.github/workflows/issue-write.yml @@ -40,7 +40,7 @@ jobs: - name: 'Comment on PR' if: steps.download-artifact.outputs.artifact-id != '' - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/libc-fullbuild-tests.yml b/.github/workflows/libc-fullbuild-tests.yml index 8967cd0949c11..3a048aeb9405b 100644 --- a/.github/workflows/libc-fullbuild-tests.yml +++ b/.github/workflows/libc-fullbuild-tests.yml @@ -61,7 +61,7 @@ jobs: # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects # frequent small object writes. - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 1G key: libc_fullbuild_${{ matrix.c_compiler }} diff --git a/.github/workflows/libc-overlay-tests.yml b/.github/workflows/libc-overlay-tests.yml index 7154946ac5c3d..df9a20dce8eae 100644 --- a/.github/workflows/libc-overlay-tests.yml +++ b/.github/workflows/libc-overlay-tests.yml @@ -51,7 +51,7 @@ jobs: # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects # frequent small object writes. - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 1G key: libc_overlay_build_${{ matrix.os }}_${{ matrix.compiler.c_compiler }} diff --git a/.github/workflows/libclang-abi-tests.yml b/.github/workflows/libclang-abi-tests.yml index d53a2f306afa2..5ccf976848197 100644 --- a/.github/workflows/libclang-abi-tests.yml +++ b/.github/workflows/libclang-abi-tests.yml @@ -131,7 +131,7 @@ jobs: sed -i 's/LLVM_[0-9]\+/LLVM_NOVERSION/' $lib-${{ matrix.ref }}.abi done - name: Upload ABI file - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: ${{ matrix.name }} path: '*${{ matrix.ref }}.abi' @@ -165,7 +165,7 @@ jobs: done - name: Upload ABI Comparison if: always() - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: compat-report-${{ github.sha }} path: compat_reports/ diff --git a/.github/workflows/libclang-python-tests.yml b/.github/workflows/libclang-python-tests.yml index e168928325561..8fb8cec3b4f00 100644 --- a/.github/workflows/libclang-python-tests.yml +++ b/.github/workflows/libclang-python-tests.yml @@ -34,11 +34,11 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Python - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: ${{ matrix.python-version }} - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 2G key: spirv-ubuntu-24.04 diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml index 2e6ff7f91b6fc..1c07a0adc6e99 100644 --- a/.github/workflows/libcxx-build-and-test.yaml +++ b/.github/workflows/libcxx-build-and-test.yaml @@ -60,7 +60,7 @@ jobs: env: CC: ${{ matrix.cc }} CXX: ${{ matrix.cxx }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: ${{ matrix.config }}-${{ matrix.cxx }}-results @@ -105,7 +105,7 @@ jobs: env: CC: ${{ matrix.cc }} CXX: ${{ matrix.cxx }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() # Upload artifacts even if the build or test suite fails with: name: ${{ matrix.config }}-${{ matrix.cxx }}-results @@ -169,7 +169,7 @@ jobs: env: CC: clang-22 CXX: clang++-22 - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: ${{ matrix.config }}-results @@ -215,7 +215,7 @@ jobs: - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 with: # https://github.com/actions/runner-images/blob/main/images/macos/macos-15-Readme.md - xcode-version: '16.3' + xcode-version: '26.0' - uses: seanmiddleditch/gha-setup-ninja@3b1f8f94a2f8254bd26914c4ab9474d4f0015f67 # v6 - name: Build and test run: | @@ -223,7 +223,7 @@ jobs: source .venv/bin/activate python -m pip install psutil bash libcxx/utils/ci/run-buildbot ${{ matrix.config }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() # Upload artifacts even if the build or test suite fails with: name: macos-${{ matrix.config }}-results diff --git a/.github/workflows/libcxx-build-containers.yml b/.github/workflows/libcxx-build-containers.yml index cbaa8e0f65129..312cb47fc3d93 100644 --- a/.github/workflows/libcxx-build-containers.yml +++ b/.github/workflows/libcxx-build-containers.yml @@ -55,7 +55,7 @@ jobs: TAG: ${{ github.sha }} - name: Log in to GitHub Container Registry - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/libcxx-check-generated-files.yml b/.github/workflows/libcxx-check-generated-files.yml index f338bd6952779..d34b6a79556d1 100644 --- a/.github/workflows/libcxx-check-generated-files.yml +++ b/.github/workflows/libcxx-check-generated-files.yml @@ -15,7 +15,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install dependencies - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: clangformat: 17.0.1 ninja: true diff --git a/.github/workflows/libcxx-run-benchmarks.yml b/.github/workflows/libcxx-run-benchmarks.yml index 17a97df029ba5..0379a0a1f857d 100644 --- a/.github/workflows/libcxx-run-benchmarks.yml +++ b/.github/workflows/libcxx-run-benchmarks.yml @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: '3.10' + python-version: '3.13' - name: Extract information from the PR id: vars diff --git a/.github/workflows/llvm-bugs.yml b/.github/workflows/llvm-bugs.yml index 5470662c97628..7d42abfadde7b 100644 --- a/.github/workflows/llvm-bugs.yml +++ b/.github/workflows/llvm-bugs.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-24.04 if: github.repository == 'llvm/llvm-project' steps: - - uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: 18 check-latest: true diff --git a/.github/workflows/llvm-tests.yml b/.github/workflows/llvm-tests.yml index ea80e229512d5..c4701c7283da0 100644 --- a/.github/workflows/llvm-tests.yml +++ b/.github/workflows/llvm-tests.yml @@ -128,14 +128,14 @@ jobs: # Remove symbol versioning from dumps, so we can compare across major versions. sed -i 's/LLVM_${{ matrix.llvm_version_major }}/LLVM_NOVERSION/' ${{ matrix.ref }}.abi - name: Upload ABI file - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: ${{ matrix.name }} path: ${{ matrix.ref }}.abi - name: Upload symbol list file if: matrix.name == 'build-baseline' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: symbol-list path: llvm.symbols @@ -179,7 +179,7 @@ jobs: abi-compliance-checker $EXTRA_ARGS -l libLLVM.so -old build-baseline/*.abi -new build-latest/*.abi || test "${{ needs.abi-dump-setup.outputs.ABI_HEADERS }}" = "llvm-c" - name: Upload ABI Comparison if: always() - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: compat-report-${{ github.sha }} path: compat_reports/ diff --git a/.github/workflows/mlir-spirv-tests.yml b/.github/workflows/mlir-spirv-tests.yml index 78952ccad2642..5bb16c739cdde 100644 --- a/.github/workflows/mlir-spirv-tests.yml +++ b/.github/workflows/mlir-spirv-tests.yml @@ -30,7 +30,7 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 2G key: spirv-mlir-ubuntu-24.04 diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 61c8680cd72a1..1e0dc7045c1cc 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -43,14 +43,14 @@ jobs: # of a release cycle (x.1.0) or the last version of a release cycle, or # if there have been relevant clang-format backports. - name: Install clang-format - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: clangformat: 21.1.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.11' + python-version: '3.13' cache: 'pip' cache-dependency-path: 'llvm/utils/git/requirements_formatting.txt' @@ -72,7 +72,7 @@ jobs: --end-rev HEAD \ --changed-files "$CHANGED_FILES" - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/pr-code-lint.yml b/.github/workflows/pr-code-lint.yml index 0350e57b8c553..776ec4af9d2dc 100644 --- a/.github/workflows/pr-code-lint.yml +++ b/.github/workflows/pr-code-lint.yml @@ -27,7 +27,7 @@ jobs: cancel-in-progress: true steps: - name: Fetch LLVM sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 with: fetch-depth: 2 @@ -47,15 +47,18 @@ jobs: echo "Changed files:" echo "$CHANGED_FILES" + # The clang tidy version should always be upgraded to the first version + # of a release cycle (x.1.0) or the last version of a release cycle, or + # if there have been relevant clang-format backports. - name: Install clang-tidy - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: - clang-tidy: 20.1.8 + clang-tidy: 21.1.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.12' + python-version: '3.13' - name: Install Python dependencies run: python3 -m pip install -r llvm/utils/git/requirements_linting.txt @@ -104,7 +107,7 @@ jobs: --changed-files "$CHANGED_FILES" - name: Upload results - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/pr-request-release-note.yml b/.github/workflows/pr-request-release-note.yml index f0197d71d6aa9..8162a8984ee5f 100644 --- a/.github/workflows/pr-request-release-note.yml +++ b/.github/workflows/pr-request-release-note.yml @@ -41,7 +41,7 @@ jobs: request-release-note \ --pr-number ${{ github.event.pull_request.number}} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml index 63ab4a8356971..a9c107e4a5f08 100644 --- a/.github/workflows/premerge.yaml +++ b/.github/workflows/premerge.yaml @@ -76,7 +76,7 @@ jobs: # https://github.com/actions/upload-artifact/issues/569 continue-on-error: true if: '!cancelled()' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: Premerge Artifacts (Linux) path: artifacts/ @@ -130,7 +130,7 @@ jobs: # https://github.com/actions/upload-artifact/issues/569 continue-on-error: true if: '!cancelled()' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: Premerge Artifacts (Windows) path: artifacts/ @@ -151,7 +151,7 @@ jobs: with: fetch-depth: 2 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: "2000M" - name: Install Ninja diff --git a/.github/workflows/release-asset-audit.yml b/.github/workflows/release-asset-audit.yml index 6546540a1b547..8b24948b568eb 100644 --- a/.github/workflows/release-asset-audit.yml +++ b/.github/workflows/release-asset-audit.yml @@ -38,7 +38,7 @@ jobs: if: >- github.event_name != 'pull_request' && failure() - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: github-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} script: | diff --git a/.github/workflows/release-binaries-save-stage/action.yml b/.github/workflows/release-binaries-save-stage/action.yml index f08088c7bc56f..84ccf98c23a82 100644 --- a/.github/workflows/release-binaries-save-stage/action.yml +++ b/.github/workflows/release-binaries-save-stage/action.yml @@ -30,14 +30,14 @@ runs: tar -C ${{ inputs.build-prefix }} -c build/ | zstd -T0 -c > build.tar.zst - name: Upload Stage 1 Source - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ runner.os }}-${{ runner.arch }}-${{ github.job }}-source path: llvm-project.tar.zst retention-days: 2 - name: Upload Stage 1 Build Dir - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ runner.os}}-${{ runner.arch }}-${{ github.job }}-build path: build.tar.zst diff --git a/.github/workflows/release-binaries-setup-stage/action.yml b/.github/workflows/release-binaries-setup-stage/action.yml index 8f45e22886b6e..475a25fa6b772 100644 --- a/.github/workflows/release-binaries-setup-stage/action.yml +++ b/.github/workflows/release-binaries-setup-stage/action.yml @@ -22,7 +22,7 @@ runs: using: "composite" steps: - name: Install Ninja - uses: llvm/actions/install-ninja@22e9f909d35b50bd1181709564bfe816eaeaae81 # main + uses: llvm/actions/install-ninja@a1ea791b03c8e61f53a0e66f2f73db283aa0f01e # main - name: Setup Windows if: startsWith(runner.os, 'Windows') diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 8f422a0147748..cba48e4d0c70a 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -68,9 +68,9 @@ jobs: steps: # It's good practice to use setup-python, but this is also required on macos-14 # due to https://github.com/actions/runner-images/issues/10385 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: '3.12' + python-version: '3.13' - name: Checkout LLVM uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -250,7 +250,7 @@ jobs: release_dir=`find ${{ steps.setup-stage.outputs.build-prefix }}/build -iname 'stage2-bins'` mv $release_dir/${{ needs.prepare.outputs.release-binary-filename }} . - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ runner.os }}-${{ runner.arch }}-release-binary # Due to path differences on Windows when running in bash vs running on node, @@ -301,7 +301,7 @@ jobs: - name: Attest Build Provenance id: provenance - uses: actions/attest-build-provenance@897ed5eab6ed058a474202017ada7f40bfa52940 # v1.0.0 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: ${{ needs.prepare.outputs.release-binary-filename }} @@ -310,7 +310,7 @@ jobs: mv ${{ steps.provenance.outputs.bundle-path }} ${{ needs.prepare.outputs.release-binary-filename }}.jsonl - name: Upload Build Provenance - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 #v4.3.3 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ needs.prepare.outputs.release-binary-filename }}-attestation path: ${{ needs.prepare.outputs.release-binary-filename }}.jsonl diff --git a/.github/workflows/release-documentation.yml b/.github/workflows/release-documentation.yml index 712ff1831170e..d3d375d3a6df9 100644 --- a/.github/workflows/release-documentation.yml +++ b/.github/workflows/release-documentation.yml @@ -37,7 +37,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: cache: 'pip' cache-dependency-path: './llvm/docs/requirements.txt' @@ -59,7 +59,7 @@ jobs: ./llvm/utils/release/build-docs.sh -release "${{ inputs.release-version }}" -no-doxygen - name: Create Release Notes Artifact - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: release-notes path: docs-build/html-export/ diff --git a/.github/workflows/release-doxygen.yml b/.github/workflows/release-doxygen.yml index 17c677413f744..79e509e5e6a8b 100644 --- a/.github/workflows/release-doxygen.yml +++ b/.github/workflows/release-doxygen.yml @@ -43,7 +43,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: cache: 'pip' cache-dependency-path: './llvm/docs/requirements.txt' diff --git a/.github/workflows/release-lit.yml b/.github/workflows/release-lit.yml index 60ec64462bc31..8b1ce04e12c4f 100644 --- a/.github/workflows/release-lit.yml +++ b/.github/workflows/release-lit.yml @@ -45,7 +45,7 @@ jobs: ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user ${{ github.actor }} --user-token "$USER_TOKEN" check-permissions - name: Setup Cpp - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: compiler: llvm-16.0.6 cmake: true @@ -66,14 +66,14 @@ jobs: python3 setup.py sdist bdist_wheel - name: Upload lit to test.pypi.org - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 with: password: ${{ secrets.LLVM_LIT_TEST_PYPI_API_TOKEN }} repository-url: https://test.pypi.org/legacy/ packages-dir: llvm/utils/lit/dist/ - name: Upload lit to pypi.org - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 with: password: ${{ secrets.LLVM_LIT_PYPI_API_TOKEN }} packages-dir: llvm/utils/lit/dist/ diff --git a/.github/workflows/release-sources.yml b/.github/workflows/release-sources.yml index 14cc4c4e9b94f..2278b96dbe242 100644 --- a/.github/workflows/release-sources.yml +++ b/.github/workflows/release-sources.yml @@ -92,14 +92,14 @@ jobs: - name: Attest Build Provenance if: github.event_name != 'pull_request' id: provenance - uses: actions/attest-build-provenance@897ed5eab6ed058a474202017ada7f40bfa52940 # v1.0.0 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: "*.xz" - if: github.event_name != 'pull_request' run: | mv ${{ steps.provenance.outputs.bundle-path }} . - name: Create Tarball Artifacts - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 #v4.3.3 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: path: | *.xz diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 40db5504294ef..c07df338cf989 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -36,7 +36,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif @@ -49,7 +49,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: SARIF file path: results.sarif @@ -57,6 +57,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@80f993039571a6de66594ecaa432875a6942e8e0 # v2.20.6 + uses: github/codeql-action/upload-sarif@b8d3b6e8af63cde30bdc382c0bc28114f4346c88 # v2.28.1 with: sarif_file: results.sarif diff --git a/.github/workflows/spirv-tests.yml b/.github/workflows/spirv-tests.yml index 8708fb06d9eb8..69374ae563306 100644 --- a/.github/workflows/spirv-tests.yml +++ b/.github/workflows/spirv-tests.yml @@ -26,7 +26,7 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 2G key: spirv-ubuntu-24.04 diff --git a/.github/workflows/unprivileged-download-artifact/action.yml b/.github/workflows/unprivileged-download-artifact/action.yml index 9d8fb59a67c0e..5b50d7ce3d3fb 100644 --- a/.github/workflows/unprivileged-download-artifact/action.yml +++ b/.github/workflows/unprivileged-download-artifact/action.yml @@ -27,7 +27,7 @@ outputs: runs: using: "composite" steps: - - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 + - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 id: artifact-url with: script: | diff --git a/bolt/include/bolt/Core/BinaryFunction.h b/bolt/include/bolt/Core/BinaryFunction.h index 51b139a15e1a0..7e0e3bff83259 100644 --- a/bolt/include/bolt/Core/BinaryFunction.h +++ b/bolt/include/bolt/Core/BinaryFunction.h @@ -192,9 +192,6 @@ class BinaryFunction { mutable MCSymbol *FunctionConstantIslandLabel{nullptr}; mutable MCSymbol *FunctionColdConstantIslandLabel{nullptr}; - - // Returns constant island alignment - uint16_t getAlignment() const { return sizeof(uint64_t); } }; static constexpr uint64_t COUNT_NO_PROFILE = @@ -2114,9 +2111,7 @@ class BinaryFunction { return *std::prev(CodeIter) <= *DataIter; } - uint16_t getConstantIslandAlignment() const { - return Islands ? Islands->getAlignment() : 1; - } + uint16_t getConstantIslandAlignment() const; /// If there is a constant island in the range [StartOffset, EndOffset), /// return its address. @@ -2168,6 +2163,11 @@ class BinaryFunction { return Islands && !Islands->DataOffsets.empty(); } + /// Return true if the whole function is a constant island. + bool isDataObject() const { + return Islands && Islands->CodeOffsets.size() == 0; + } + bool isStartOfConstantIsland(uint64_t Offset) const { return hasConstantIsland() && Islands->DataOffsets.count(Offset); } diff --git a/bolt/include/bolt/Core/FunctionLayout.h b/bolt/include/bolt/Core/FunctionLayout.h index ee4dd689b8dd6..240d5138a093a 100644 --- a/bolt/include/bolt/Core/FunctionLayout.h +++ b/bolt/include/bolt/Core/FunctionLayout.h @@ -232,8 +232,24 @@ class FunctionLayout { return Blocks[Index]; } + /// Return the basic block after the given basic block iterator in the layout + /// or nullptr if the last basic block iterator is given. + const BinaryBasicBlock *getBasicBlockAfter(block_const_iterator BlockIt, + bool IgnoreSplits = true) const; + + /// Returns the basic block after the given basic block in the layout or + /// nullptr if the last basic block is given. + /// + /// Note: prefer the version that takes the iterator as this function uses + /// linear basic block lookup. + const BinaryBasicBlock *getBasicBlockAfter(const BinaryBasicBlock *BB, + bool IgnoreSplits = true) const; + /// Returns the basic block after the given basic block in the layout or /// nullptr if the last basic block is given. + /// + /// Note: prefer the version that takes the iterator as this function uses + /// linear basic block lookup. BinaryBasicBlock *getBasicBlockAfter(const BinaryBasicBlock *const BB, const bool IgnoreSplits = true) { return const_cast( @@ -241,11 +257,6 @@ class FunctionLayout { BB, IgnoreSplits)); } - /// Returns the basic block after the given basic block in the layout or - /// nullptr if the last basic block is given. - const BinaryBasicBlock *getBasicBlockAfter(const BinaryBasicBlock *BB, - bool IgnoreSplits = true) const; - /// True if the layout contains at least two non-empty fragments. bool isSplit() const; diff --git a/bolt/include/bolt/Core/MCInstUtils.h b/bolt/include/bolt/Core/MCInstUtils.h new file mode 100644 index 0000000000000..291e31e0e0fdf --- /dev/null +++ b/bolt/include/bolt/Core/MCInstUtils.h @@ -0,0 +1,347 @@ +//===- bolt/Core/MCInstUtils.h ----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef BOLT_CORE_MCINSTUTILS_H +#define BOLT_CORE_MCINSTUTILS_H + +#include "bolt/Core/BinaryBasicBlock.h" +#include "bolt/Core/MCPlus.h" +#include +#include + +namespace llvm { +class MCCodeEmitter; +} + +namespace llvm { +namespace bolt { + +class BinaryFunction; + +/// MCInstReference represents a reference to a constant MCInst as stored either +/// in a BinaryFunction (i.e. before a CFG is created), or in a BinaryBasicBlock +/// (after a CFG is created). +/// +/// The reference may be invalidated when the function containing the referenced +/// instruction is modified. +class MCInstReference { +public: + using nocfg_const_iterator = std::map::const_iterator; + + /// Constructs an empty reference. + MCInstReference() : Reference(RefInBB(nullptr, /*Index=*/0)) {} + + /// Constructs a reference to the instruction inside the basic block. + MCInstReference(const BinaryBasicBlock &BB, const MCInst &Inst) + : Reference(RefInBB(&BB, getInstIndexInBB(BB, Inst))) {} + /// Constructs a reference to the instruction inside the basic block. + MCInstReference(const BinaryBasicBlock &BB, unsigned Index) + : Reference(RefInBB(&BB, Index)) {} + + /// Constructs a reference to the instruction inside the function without + /// CFG information. + MCInstReference(const BinaryFunction &BF, nocfg_const_iterator It) + : Reference(RefInBF(&BF, It)) {} + + /// Locates an instruction inside a function and returns a reference. + static MCInstReference get(const MCInst &Inst, const BinaryFunction &BF); + + bool operator==(const MCInstReference &Other) const { + return Reference == Other.Reference; + } + + const MCInst &getMCInst() const { + assert(!empty() && "Empty reference"); + if (auto *Ref = tryGetRefInBB()) { + [[maybe_unused]] unsigned NumInstructions = Ref->BB->size(); + assert(Ref->Index < NumInstructions && "Invalid reference"); + return Ref->BB->getInstructionAtIndex(Ref->Index); + } + return getRefInBF().It->second; + } + + operator const MCInst &() const { return getMCInst(); } + + bool empty() const { + if (auto *Ref = tryGetRefInBB()) + return Ref->BB == nullptr; + return getRefInBF().BF == nullptr; + } + + bool hasCFG() const { return !empty() && tryGetRefInBB() != nullptr; } + + const BinaryFunction *getFunction() const { + assert(!empty() && "Empty reference"); + if (auto *Ref = tryGetRefInBB()) + return Ref->BB->getFunction(); + return getRefInBF().BF; + } + + const BinaryBasicBlock *getBasicBlock() const { + assert(!empty() && "Empty reference"); + if (auto *Ref = tryGetRefInBB()) + return Ref->BB; + return nullptr; + } + + /// Computes the original address of the instruction (or offset from base + /// for PIC), assuming the containing function was not modified. + /// + /// This function is intended for the use cases like debug printing, as it + /// is only as precise as BinaryContext::computeCodeSize() is and requires + /// iterating over the prefix of the basic block (when CFG is available). + /// + /// MCCodeEmitter is not thread safe and the default instance from + /// BinaryContext is used by default, thus pass an instance explicitly if + /// this function may be called from multithreaded code. + uint64_t computeAddress(const MCCodeEmitter *Emitter = nullptr) const; + + raw_ostream &print(raw_ostream &OS) const; + +private: + static unsigned getInstIndexInBB(const BinaryBasicBlock &BB, + const MCInst &Inst) { + // Usage of pointer arithmetic assumes the instructions are stored in a + // vector, see BasicBlockStorageIsVector in MCInstUtils.cpp. + const MCInst *FirstInstInBB = &*BB.begin(); + return &Inst - FirstInstInBB; + } + + // Two cases are possible: + // * functions with CFG reconstructed - a function stores a collection of + // basic blocks, each basic block stores a contiguous vector of MCInst + // * functions without CFG - there are no basic blocks created, + // the instructions are directly stored in std::map in BinaryFunction + // + // In both cases, the direct parent of MCInst is stored together with an + // index or iterator pointing to the instruction. + + // Helper struct: CFG is available, the direct parent is a basic block. + struct RefInBB { + RefInBB(const BinaryBasicBlock *BB, unsigned Index) + : BB(BB), Index(Index) {} + RefInBB(const RefInBB &Other) = default; + RefInBB &operator=(const RefInBB &Other) = default; + + const BinaryBasicBlock *BB; + unsigned Index; + + bool operator==(const RefInBB &Other) const { + return BB == Other.BB && Index == Other.Index; + } + }; + + // Helper struct: CFG is *not* available, the direct parent is a function, + // iterator's type is std::map::iterator (the mapped value + // is an instruction's offset). + struct RefInBF { + RefInBF(const BinaryFunction *BF, nocfg_const_iterator It) + : BF(BF), It(It) {} + RefInBF(const RefInBF &Other) = default; + RefInBF &operator=(const RefInBF &Other) = default; + + const BinaryFunction *BF; + nocfg_const_iterator It; + + bool operator==(const RefInBF &Other) const { + return BF == Other.BF && It->first == Other.It->first; + } + }; + + std::variant Reference; + + // Utility methods to be used like this: + // + // if (auto *Ref = tryGetRefInBB()) + // return Ref->doSomething(...); + // return getRefInBF().doSomethingElse(...); + const RefInBB *tryGetRefInBB() const { + assert(std::get_if(&Reference) || + std::get_if(&Reference)); + return std::get_if(&Reference); + } + const RefInBF &getRefInBF() const { + assert(std::get_if(&Reference)); + return *std::get_if(&Reference); + } +}; + +static inline raw_ostream &operator<<(raw_ostream &OS, + const MCInstReference &Ref) { + return Ref.print(OS); +} + +/// Instruction-matching helpers operating on a single instruction at a time. +/// +/// The idea is to make low-level instruction matching as readable as possible. +/// The classes contained in this namespace are intended to be used as a +/// domain-specific language to match MCInst with the particular opcode and +/// operands. +/// +/// The goals of this DSL include +/// * matching a single instruction against the template consisting of the +/// particular target-specific opcode and a pattern of operands +/// * matching operands against the known values (such as 42, AArch64::X1 or +/// "the value of --brk-operand=N command line argument") +/// * capturing operands of an instruction ("whatever is the destination +/// register of AArch64::ADDXri instruction, store it to Xd variable to be +/// queried later") +/// * expressing repeated operands of a single matched instruction (such as +/// "ADDXri Xd, Xd, 42, 0" for an arbitrary register Xd) as well as across +/// multiple calls to matchInst(), which is naturally achieved by sequentially +/// capturing the operands and matching operands against the known values +/// * matching multi-instruction code patterns by sequentially calling +/// matchInst() while passing around already matched operands +/// +/// The non-goals (compared to MCPlusBuilder::MCInstMatcher) include +/// * matching an arbitrary tree of instructions in a single matchInst() call +/// * encapsulation of target-specific knowledge ("match an increment of Xm +/// by 42") +/// +/// Unlike MCPlusBuilder::MCInstMatcher, this DSL focuses on the use cases when +/// the precise control over the instruction order is important. For example, +/// let's consider a target-specific function that has to match two particular +/// instructions against this pattern (for two different registers Xm and Xn) +/// +/// ADDXrs Xm, Xn, Xm, #0 +/// BR Xm +/// +/// and return the register holding the branch target. Assuming the instructions +/// are available as MaybeAdd and MaybeBr, the following code can be used: +/// +/// // Bring the short names into the local scope: +/// using namespace LowLevelInstMatcherDSL; +/// // Declare the registers to capture: +/// Reg Xn, Xm; +/// // Capture the 0th and 1st operands, match the 2nd operand against the +/// // just captured Xm register, match the 3rd operand against literal 0: +/// if (!matchInst(MaybeAdd, AArch64::ADDXrs, Xm, Xn, Xm, Imm(0)) +/// return AArch64::NoRegister; +/// // Match the 0th operand against Xm: +/// if (!matchInst(MaybeBr, AArch64::BR, Xm)) +/// return AArch64::NoRegister; +/// // Manually check that Xm and Xn did not match the same register: +/// if (Xm.get() == Xn.get()) +/// return AArch64::NoRegister; +/// // Return the matched register: +/// return Xm.get(); +/// +namespace LowLevelInstMatcherDSL { + +// The base class to match an operand of type T. +// +// The subclasses of OpMatcher are intended to be allocated on the stack and +// to only be used by passing them to matchInst() and by calling their get() +// function, thus the peculiar `mutable` specifiers: to make the calling code +// compact and readable, the templated matchInst() function has to accept both +// long-lived Imm/Reg wrappers declared as local variables (intended to capture +// the first operand's value and match the subsequent operands, whether inside +// a single instruction or across multiple instructions), as well as temporary +// wrappers around literal values to match, f.e. Imm(42) or Reg(AArch64::XZR). +template class OpMatcher { + mutable std::optional Value; + mutable std::optional SavedValue; + + // Remember/restore the last Value - to be called by matchInst. + void remember() const { SavedValue = Value; } + void restore() const { Value = SavedValue; } + + template + friend bool matchInst(const MCInst &, unsigned, const OpMatchers &...); + +protected: + OpMatcher(std::optional ValueToMatch) : Value(ValueToMatch) {} + + bool matchValue(T OpValue) const { + // Check that OpValue does not contradict the existing Value. + bool MatchResult = !Value || *Value == OpValue; + // If MatchResult is false, all matchers will be reset before returning from + // matchInst, including this one, thus no need to assign conditionally. + Value = OpValue; + + return MatchResult; + } + +public: + /// Returns the captured value. + T get() const { + assert(Value.has_value()); + return *Value; + } +}; + +class Reg : public OpMatcher { + bool matches(const MCOperand &Op) const { + if (!Op.isReg()) + return false; + + return matchValue(Op.getReg()); + } + + template + friend bool matchInst(const MCInst &, unsigned, const OpMatchers &...); + +public: + Reg(std::optional RegToMatch = std::nullopt) + : OpMatcher(RegToMatch) {} +}; + +class Imm : public OpMatcher { + bool matches(const MCOperand &Op) const { + if (!Op.isImm()) + return false; + + return matchValue(Op.getImm()); + } + + template + friend bool matchInst(const MCInst &, unsigned, const OpMatchers &...); + +public: + Imm(std::optional ImmToMatch = std::nullopt) + : OpMatcher(ImmToMatch) {} +}; + +/// Tries to match Inst and updates Ops on success. +/// +/// If Inst has the specified Opcode and its operand list prefix matches Ops, +/// this function returns true and updates Ops, otherwise false is returned and +/// values of Ops are kept as before matchInst was called. +/// +/// Please note that while Ops are technically passed by a const reference to +/// make invocations like `matchInst(MI, Opcode, Imm(42))` possible, all their +/// fields are marked mutable. +template +bool matchInst(const MCInst &Inst, unsigned Opcode, const OpMatchers &...Ops) { + if (Inst.getOpcode() != Opcode) + return false; + assert(sizeof...(Ops) <= MCPlus::getNumPrimeOperands(Inst) && + "Too many operands are matched for the Opcode"); + + // Ask each matcher to remember its current value in case of rollback. + (Ops.remember(), ...); + + // Check if all matchers match the corresponding operands. + auto It = Inst.begin(); + auto AllMatched = (Ops.matches(*(It++)) && ... && true); + + // If match failed, restore the original captured values. + if (!AllMatched) { + (Ops.restore(), ...); + return false; + } + + return true; +} + +} // namespace LowLevelInstMatcherDSL + +} // namespace bolt +} // namespace llvm + +#endif diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h index 90129d475d870..5b711b0e27bab 100644 --- a/bolt/include/bolt/Core/MCPlusBuilder.h +++ b/bolt/include/bolt/Core/MCPlusBuilder.h @@ -51,6 +51,7 @@ class raw_ostream; namespace bolt { class BinaryBasicBlock; +class BinaryContext; class BinaryFunction; /// Different types of indirect branches encountered during disassembly. @@ -530,10 +531,15 @@ class MCPlusBuilder { return 0; } + /// Create a helper function to increment counter for Instrumentation + virtual void createInstrCounterIncrFunc(BinaryContext &BC) { + llvm_unreachable("not implemented"); + } + /// Create increment contents of target by 1 for Instrumentation - virtual InstructionListType - createInstrIncMemory(const MCSymbol *Target, MCContext *Ctx, bool IsLeaf, - unsigned CodePointerSize) const { + virtual InstructionListType createInstrIncMemory(const MCSymbol *Target, + MCContext *Ctx, bool IsLeaf, + unsigned CodePointerSize) { llvm_unreachable("not implemented"); return InstructionListType(); } diff --git a/bolt/include/bolt/Passes/PAuthGadgetScanner.h b/bolt/include/bolt/Passes/PAuthGadgetScanner.h index 721fd664a3253..cb865a725d72a 100644 --- a/bolt/include/bolt/Passes/PAuthGadgetScanner.h +++ b/bolt/include/bolt/Passes/PAuthGadgetScanner.h @@ -11,187 +11,13 @@ #include "bolt/Core/BinaryContext.h" #include "bolt/Core/BinaryFunction.h" +#include "bolt/Core/MCInstUtils.h" #include "bolt/Passes/BinaryPasses.h" #include "llvm/Support/raw_ostream.h" #include namespace llvm { namespace bolt { - -/// @brief MCInstReference represents a reference to an MCInst as stored either -/// in a BinaryFunction (i.e. before a CFG is created), or in a BinaryBasicBlock -/// (after a CFG is created). It aims to store the necessary information to be -/// able to find the specific MCInst in either the BinaryFunction or -/// BinaryBasicBlock data structures later, so that e.g. the InputAddress of -/// the corresponding instruction can be computed. - -struct MCInstInBBReference { - BinaryBasicBlock *BB; - int64_t BBIndex; - MCInstInBBReference(BinaryBasicBlock *BB, int64_t BBIndex) - : BB(BB), BBIndex(BBIndex) {} - MCInstInBBReference() : BB(nullptr), BBIndex(0) {} - static MCInstInBBReference get(const MCInst *Inst, BinaryFunction &BF) { - for (BinaryBasicBlock &BB : BF) - for (size_t I = 0; I < BB.size(); ++I) - if (Inst == &BB.getInstructionAtIndex(I)) - return MCInstInBBReference(&BB, I); - return {}; - } - bool operator==(const MCInstInBBReference &RHS) const { - return BB == RHS.BB && BBIndex == RHS.BBIndex; - } - bool operator<(const MCInstInBBReference &RHS) const { - return std::tie(BB, BBIndex) < std::tie(RHS.BB, RHS.BBIndex); - } - operator MCInst &() const { - assert(BB != nullptr); - return BB->getInstructionAtIndex(BBIndex); - } - uint64_t getAddress() const { - // 4 bytes per instruction on AArch64. - // FIXME: the assumption of 4 byte per instruction needs to be fixed before - // this method gets used on any non-AArch64 binaries (but should be fine for - // pac-ret analysis, as that is an AArch64-specific feature). - return BB->getFunction()->getAddress() + BB->getOffset() + BBIndex * 4; - } -}; - -raw_ostream &operator<<(raw_ostream &OS, const MCInstInBBReference &); - -struct MCInstInBFReference { - BinaryFunction *BF; - uint64_t Offset; - MCInstInBFReference(BinaryFunction *BF, uint64_t Offset) - : BF(BF), Offset(Offset) {} - - static MCInstInBFReference get(const MCInst *Inst, BinaryFunction &BF) { - for (auto &I : BF.instrs()) - if (Inst == &I.second) - return MCInstInBFReference(&BF, I.first); - return {}; - } - - MCInstInBFReference() : BF(nullptr), Offset(0) {} - bool operator==(const MCInstInBFReference &RHS) const { - return BF == RHS.BF && Offset == RHS.Offset; - } - bool operator<(const MCInstInBFReference &RHS) const { - return std::tie(BF, Offset) < std::tie(RHS.BF, RHS.Offset); - } - operator MCInst &() const { - assert(BF != nullptr); - return *BF->getInstructionAtOffset(Offset); - } - - uint64_t getOffset() const { return Offset; } - - uint64_t getAddress() const { return BF->getAddress() + getOffset(); } -}; - -raw_ostream &operator<<(raw_ostream &OS, const MCInstInBFReference &); - -struct MCInstReference { - enum Kind { FunctionParent, BasicBlockParent }; - Kind ParentKind; - union U { - MCInstInBBReference BBRef; - MCInstInBFReference BFRef; - U(MCInstInBBReference BBRef) : BBRef(BBRef) {} - U(MCInstInBFReference BFRef) : BFRef(BFRef) {} - } U; - MCInstReference(MCInstInBBReference BBRef) - : ParentKind(BasicBlockParent), U(BBRef) {} - MCInstReference(MCInstInBFReference BFRef) - : ParentKind(FunctionParent), U(BFRef) {} - MCInstReference(BinaryBasicBlock *BB, int64_t BBIndex) - : MCInstReference(MCInstInBBReference(BB, BBIndex)) {} - MCInstReference(BinaryFunction *BF, uint32_t Offset) - : MCInstReference(MCInstInBFReference(BF, Offset)) {} - - static MCInstReference get(const MCInst *Inst, BinaryFunction &BF) { - if (BF.hasCFG()) - return MCInstInBBReference::get(Inst, BF); - return MCInstInBFReference::get(Inst, BF); - } - - bool operator<(const MCInstReference &RHS) const { - if (ParentKind != RHS.ParentKind) - return ParentKind < RHS.ParentKind; - switch (ParentKind) { - case BasicBlockParent: - return U.BBRef < RHS.U.BBRef; - case FunctionParent: - return U.BFRef < RHS.U.BFRef; - } - llvm_unreachable(""); - } - - bool operator==(const MCInstReference &RHS) const { - if (ParentKind != RHS.ParentKind) - return false; - switch (ParentKind) { - case BasicBlockParent: - return U.BBRef == RHS.U.BBRef; - case FunctionParent: - return U.BFRef == RHS.U.BFRef; - } - llvm_unreachable(""); - } - - operator MCInst &() const { - switch (ParentKind) { - case BasicBlockParent: - return U.BBRef; - case FunctionParent: - return U.BFRef; - } - llvm_unreachable(""); - } - - operator bool() const { - switch (ParentKind) { - case BasicBlockParent: - return U.BBRef.BB != nullptr; - case FunctionParent: - return U.BFRef.BF != nullptr; - } - llvm_unreachable(""); - } - - uint64_t getAddress() const { - switch (ParentKind) { - case BasicBlockParent: - return U.BBRef.getAddress(); - case FunctionParent: - return U.BFRef.getAddress(); - } - llvm_unreachable(""); - } - - BinaryFunction *getFunction() const { - switch (ParentKind) { - case FunctionParent: - return U.BFRef.BF; - case BasicBlockParent: - return U.BBRef.BB->getFunction(); - } - llvm_unreachable(""); - } - - BinaryBasicBlock *getBasicBlock() const { - switch (ParentKind) { - case FunctionParent: - return nullptr; - case BasicBlockParent: - return U.BBRef.BB; - } - llvm_unreachable(""); - } -}; - -raw_ostream &operator<<(raw_ostream &OS, const MCInstReference &); - namespace PAuthGadgetScanner { // The report classes are designed to be used in an immutable manner. diff --git a/bolt/include/bolt/Rewrite/RewriteInstance.h b/bolt/include/bolt/Rewrite/RewriteInstance.h index 19dcce8205ebc..0fe2e32b61933 100644 --- a/bolt/include/bolt/Rewrite/RewriteInstance.h +++ b/bolt/include/bolt/Rewrite/RewriteInstance.h @@ -249,12 +249,11 @@ class RewriteInstance { /// Analyze relocation \p Rel. /// Return true if the relocation was successfully processed, false otherwise. /// The \p SymbolName, \p SymbolAddress, \p Addend and \p ExtractedValue - /// parameters will be set on success. The \p Skip argument indicates - /// that the relocation was analyzed, but it must not be processed. + /// parameters will be set on success. bool analyzeRelocation(const object::RelocationRef &Rel, uint32_t &RType, std::string &SymbolName, bool &IsSectionRelocation, uint64_t &SymbolAddress, int64_t &Addend, - uint64_t &ExtractedValue, bool &Skip) const; + uint64_t &ExtractedValue) const; /// Rewrite non-allocatable sections with modifications. void rewriteNoteSections(); diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp index 98440cde7cebd..b7ded6b931a15 100644 --- a/bolt/lib/Core/BinaryContext.cpp +++ b/bolt/lib/Core/BinaryContext.cpp @@ -1662,7 +1662,7 @@ void BinaryContext::preprocessDWODebugInfo() { "files.\n"; } // Prevent failures when DWOName is already an absolute path. - sys::fs::make_absolute(DWOCompDir, AbsolutePath); + sys::path::make_absolute(DWOCompDir, AbsolutePath); DWARFUnit *DWOCU = DwarfUnit->getNonSkeletonUnitDIE(false, AbsolutePath).getDwarfUnit(); if (!DWOCU->isDWOUnit()) { diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp index 578a87dc6c09d..07bc71ee538d6 100644 --- a/bolt/lib/Core/BinaryFunction.cpp +++ b/bolt/lib/Core/BinaryFunction.cpp @@ -284,6 +284,33 @@ BinaryFunction::getBasicBlockContainingOffset(uint64_t Offset) { return (Offset < BB->getOffset() + BB->getOriginalSize()) ? BB : nullptr; } +uint16_t BinaryFunction::getConstantIslandAlignment() const { + if (Islands == nullptr) + return 1; + + // For constant island inside a function, the default 8-byte alignment is + // probably good enough. + const uint16_t DefaultAlignment = sizeof(uint64_t); + if (!isDataObject()) + return DefaultAlignment; + + // If the constant island itself is a binary function, get its alignment + // based on its size, original address, and its owning section's alignment. + const uint64_t MaxAlignment = + std::min(uint64_t(1) << llvm::countr_zero(getAddress()), + OriginSection->getAlignment()); + const uint64_t MinAlignment = + std::max((uint64_t)DefaultAlignment, + uint64_t(1) << (63 - llvm::countl_zero(getSize()))); + uint64_t Alignment = std::min(MinAlignment, MaxAlignment); + if (Alignment >> 16) { + BC.errs() << "BOLT-ERROR: the constant island's alignment is too big: 0x" + << Twine::utohexstr(Alignment) << "\n"; + exit(1); + } + return (uint16_t)Alignment; +} + void BinaryFunction::markUnreachableBlocks() { std::stack Stack; @@ -3598,7 +3625,9 @@ void BinaryFunction::fixBranches() { auto &MIB = BC.MIB; MCContext *Ctx = BC.Ctx.get(); - for (BinaryBasicBlock *BB : BasicBlocks) { + for (auto BBI = Layout.block_begin(), BBE = Layout.block_end(); BBI != BBE; + ++BBI) { + BinaryBasicBlock *BB = *BBI; const MCSymbol *TBB = nullptr; const MCSymbol *FBB = nullptr; MCInst *CondBranch = nullptr; @@ -3612,7 +3641,7 @@ void BinaryFunction::fixBranches() { // Basic block that follows the current one in the final layout. const BinaryBasicBlock *const NextBB = - Layout.getBasicBlockAfter(BB, /*IgnoreSplits=*/false); + Layout.getBasicBlockAfter(BBI, /*IgnoreSplits*/ false); if (BB->succ_size() == 1) { // __builtin_unreachable() could create a conditional branch that diff --git a/bolt/lib/Core/CMakeLists.txt b/bolt/lib/Core/CMakeLists.txt index fc72dc023c590..58cfcab370f16 100644 --- a/bolt/lib/Core/CMakeLists.txt +++ b/bolt/lib/Core/CMakeLists.txt @@ -32,6 +32,7 @@ add_llvm_library(LLVMBOLTCore GDBIndex.cpp HashUtilities.cpp JumpTable.cpp + MCInstUtils.cpp MCPlusBuilder.cpp ParallelUtilities.cpp Relocation.cpp diff --git a/bolt/lib/Core/FunctionLayout.cpp b/bolt/lib/Core/FunctionLayout.cpp index 4498fc44da954..98ed6e1320b3e 100644 --- a/bolt/lib/Core/FunctionLayout.cpp +++ b/bolt/lib/Core/FunctionLayout.cpp @@ -224,23 +224,29 @@ void FunctionLayout::clear() { } const BinaryBasicBlock * -FunctionLayout::getBasicBlockAfter(const BinaryBasicBlock *BB, +FunctionLayout::getBasicBlockAfter(block_const_iterator BBIter, bool IgnoreSplits) const { - const block_const_iterator BBPos = find(blocks(), BB); - if (BBPos == block_end()) - return nullptr; - - const block_const_iterator BlockAfter = std::next(BBPos); + const block_const_iterator BlockAfter = std::next(BBIter); if (BlockAfter == block_end()) return nullptr; if (!IgnoreSplits) - if (BlockAfter == getFragment(BB->getFragmentNum()).end()) + if (BlockAfter == getFragment((*BBIter)->getFragmentNum()).end()) return nullptr; return *BlockAfter; } +const BinaryBasicBlock * +FunctionLayout::getBasicBlockAfter(const BinaryBasicBlock *BB, + bool IgnoreSplits) const { + const block_const_iterator BBPos = find(blocks(), BB); + if (BBPos == block_end()) + return nullptr; + + return getBasicBlockAfter(BBPos, IgnoreSplits); +} + bool FunctionLayout::isSplit() const { const unsigned NonEmptyFragCount = llvm::count_if( fragments(), [](const FunctionFragment &FF) { return !FF.empty(); }); diff --git a/bolt/lib/Core/GDBIndex.cpp b/bolt/lib/Core/GDBIndex.cpp index c7fb4889646b4..4c34f5ee7fca7 100644 --- a/bolt/lib/Core/GDBIndex.cpp +++ b/bolt/lib/Core/GDBIndex.cpp @@ -77,7 +77,8 @@ void GDBIndex::updateGdbIndexSection( exit(1); } DenseSet OriginalOffsets; - for (unsigned Index = 0, Units = BC.DwCtx->getNumCompileUnits(); + for (unsigned Index = 0, PresentUnitsIndex = 0, + Units = BC.DwCtx->getNumCompileUnits(); Index < Units; ++Index) { const DWARFUnit *CU = BC.DwCtx->getUnitAtIndex(Index); if (SkipTypeUnits && CU->isTypeUnit()) @@ -90,7 +91,7 @@ void GDBIndex::updateGdbIndexSection( } OriginalOffsets.insert(Offset); - OffsetToIndexMap[Offset] = Index; + OffsetToIndexMap[Offset] = PresentUnitsIndex++; } // Ignore old address table. @@ -99,10 +100,19 @@ void GDBIndex::updateGdbIndexSection( Data += SymbolTableOffset - CUTypesOffset; // Calculate the size of the new address table. + const auto IsValidAddressRange = [](const DebugAddressRange &Range) { + return Range.HighPC > Range.LowPC; + }; + uint32_t NewAddressTableSize = 0; for (const auto &CURangesPair : ARangesSectionWriter.getCUAddressRanges()) { const SmallVector &Ranges = CURangesPair.second; - NewAddressTableSize += Ranges.size() * 20; + NewAddressTableSize += + llvm::count_if(Ranges, + [&IsValidAddressRange](const DebugAddressRange &Range) { + return IsValidAddressRange(Range); + }) * + 20; } // Difference between old and new table (and section) sizes. @@ -125,16 +135,52 @@ void GDBIndex::updateGdbIndexSection( using MapEntry = std::pair; std::vector CUVector(CUMap.begin(), CUMap.end()); + // Remove the CUs we won't emit anyway. + CUVector.erase(std::remove_if(CUVector.begin(), CUVector.end(), + [&OriginalOffsets](const MapEntry &It) { + // Skipping TU for DWARF5 when they are not + // included in CU list. + return OriginalOffsets.count(It.first) == 0; + }), + CUVector.end()); // Need to sort since we write out all of TUs in .debug_info before CUs. std::sort(CUVector.begin(), CUVector.end(), [](const MapEntry &E1, const MapEntry &E2) -> bool { return E1.second.Offset < E2.second.Offset; }); + // Create the original CU index -> updated CU index mapping, + // as the sort above could've changed the order and we have to update + // indices correspondingly in address map and constant pool. + std::unordered_map OriginalCUIndexToUpdatedCUIndexMap; + OriginalCUIndexToUpdatedCUIndexMap.reserve(CUVector.size()); + for (uint32_t I = 0; I < CUVector.size(); ++I) { + OriginalCUIndexToUpdatedCUIndexMap[OffsetToIndexMap.at(CUVector[I].first)] = + I; + } + const auto RemapCUIndex = [&OriginalCUIndexToUpdatedCUIndexMap, + CUVectorSize = CUVector.size(), + TUVectorSize = getGDBIndexTUEntryVector().size()]( + uint32_t OriginalIndex) { + if (OriginalIndex >= CUVectorSize) { + if (OriginalIndex >= CUVectorSize + TUVectorSize) { + errs() << "BOLT-ERROR: .gdb_index unknown CU index\n"; + exit(1); + } + // The index is into TU CU List, which we don't reorder, so return as is. + return OriginalIndex; + } + + const auto It = OriginalCUIndexToUpdatedCUIndexMap.find(OriginalIndex); + if (It == OriginalCUIndexToUpdatedCUIndexMap.end()) { + errs() << "BOLT-ERROR: .gdb_index unknown CU index\n"; + exit(1); + } + + return It->second; + }; + // Writing out CU List for (auto &CUInfo : CUVector) { - // Skipping TU for DWARF5 when they are not included in CU list. - if (!OriginalOffsets.count(CUInfo.first)) - continue; write64le(Buffer, CUInfo.second.Offset); // Length encoded in CU doesn't contain first 4 bytes that encode length. write64le(Buffer + 8, CUInfo.second.Length + 4); @@ -160,13 +206,19 @@ void GDBIndex::updateGdbIndexSection( // Generate new address table. for (const std::pair &CURangesPair : ARangesSectionWriter.getCUAddressRanges()) { - const uint32_t CUIndex = OffsetToIndexMap[CURangesPair.first]; + const uint32_t OriginalCUIndex = OffsetToIndexMap[CURangesPair.first]; + const uint32_t UpdatedCUIndex = RemapCUIndex(OriginalCUIndex); const DebugAddressRangesVector &Ranges = CURangesPair.second; for (const DebugAddressRange &Range : Ranges) { - write64le(Buffer, Range.LowPC); - write64le(Buffer + 8, Range.HighPC); - write32le(Buffer + 16, CUIndex); - Buffer += 20; + // Don't emit ranges that break gdb, + // https://sourceware.org/bugzilla/show_bug.cgi?id=33247. + // We've seen [0, 0) ranges here, for instance. + if (IsValidAddressRange(Range)) { + write64le(Buffer, Range.LowPC); + write64le(Buffer + 8, Range.HighPC); + write32le(Buffer + 16, UpdatedCUIndex); + Buffer += 20; + } } } @@ -178,6 +230,56 @@ void GDBIndex::updateGdbIndexSection( // Copy over the rest of the original data. memcpy(Buffer, Data, TrailingSize); + // Fixup CU-indices in constant pool. + const char *const OriginalConstantPoolData = + GdbIndexContents.data() + ConstantPoolOffset; + uint8_t *const UpdatedConstantPoolData = + NewGdbIndexContents + ConstantPoolOffset + Delta; + + const char *OriginalSymbolTableData = + GdbIndexContents.data() + SymbolTableOffset; + std::set CUVectorOffsets; + // Parse the symbol map and extract constant pool CU offsets from it. + while (OriginalSymbolTableData < OriginalConstantPoolData) { + const uint32_t NameOffset = read32le(OriginalSymbolTableData); + const uint32_t CUVectorOffset = read32le(OriginalSymbolTableData + 4); + OriginalSymbolTableData += 8; + + // Iff both are zero, then the slot is considered empty in the hash-map. + if (NameOffset || CUVectorOffset) { + CUVectorOffsets.insert(CUVectorOffset); + } + } + + // Update the CU-indicies in the constant pool + for (const auto CUVectorOffset : CUVectorOffsets) { + const char *CurrentOriginalConstantPoolData = + OriginalConstantPoolData + CUVectorOffset; + uint8_t *CurrentUpdatedConstantPoolData = + UpdatedConstantPoolData + CUVectorOffset; + + const uint32_t Num = read32le(CurrentOriginalConstantPoolData); + CurrentOriginalConstantPoolData += 4; + CurrentUpdatedConstantPoolData += 4; + + for (uint32_t J = 0; J < Num; ++J) { + const uint32_t OriginalCUIndexAndAttributes = + read32le(CurrentOriginalConstantPoolData); + CurrentOriginalConstantPoolData += 4; + + // We only care for the index, which is the lowest 24 bits, other bits are + // left as is. + const uint32_t OriginalCUIndex = + OriginalCUIndexAndAttributes & ((1 << 24) - 1); + const uint32_t Attributes = OriginalCUIndexAndAttributes >> 24; + const uint32_t UpdatedCUIndexAndAttributes = + RemapCUIndex(OriginalCUIndex) | (Attributes << 24); + + write32le(CurrentUpdatedConstantPoolData, UpdatedCUIndexAndAttributes); + CurrentUpdatedConstantPoolData += 4; + } + } + // Register the new section. BC.registerOrUpdateNoteSection(".gdb_index", NewGdbIndexContents, NewGdbIndexSize); diff --git a/bolt/lib/Core/MCInstUtils.cpp b/bolt/lib/Core/MCInstUtils.cpp new file mode 100644 index 0000000000000..f505bf73c64eb --- /dev/null +++ b/bolt/lib/Core/MCInstUtils.cpp @@ -0,0 +1,86 @@ +//===- bolt/Core/MCInstUtils.cpp ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "bolt/Core/MCInstUtils.h" +#include "bolt/Core/BinaryBasicBlock.h" +#include "bolt/Core/BinaryFunction.h" + +#include + +using namespace llvm; +using namespace llvm::bolt; + +// It is assumed in a few places that BinaryBasicBlock stores its instructions +// in a contiguous vector. +using BasicBlockStorageIsVector = + std::is_same::const_iterator>; +static_assert(BasicBlockStorageIsVector::value); + +MCInstReference MCInstReference::get(const MCInst &Inst, + const BinaryFunction &BF) { + if (BF.hasCFG()) { + for (BinaryBasicBlock &BB : BF) { + for (MCInst &MI : BB) + if (&MI == &Inst) + return MCInstReference(BB, Inst); + } + llvm_unreachable("Inst is not contained in BF"); + } + + for (auto I = BF.instrs().begin(), E = BF.instrs().end(); I != E; ++I) { + if (&I->second == &Inst) + return MCInstReference(BF, I); + } + llvm_unreachable("Inst is not contained in BF"); +} + +uint64_t MCInstReference::computeAddress(const MCCodeEmitter *Emitter) const { + assert(!empty() && "Taking instruction address by empty reference"); + + const BinaryContext &BC = getFunction()->getBinaryContext(); + if (auto *Ref = tryGetRefInBB()) { + const uint64_t AddressOfBB = + getFunction()->getAddress() + Ref->BB->getOffset(); + const MCInst *FirstInstInBB = &*Ref->BB->begin(); + const MCInst *ThisInst = &getMCInst(); + + // Usage of plain 'const MCInst *' as iterators assumes the instructions + // are stored in a vector, see BasicBlockStorageIsVector. + const uint64_t OffsetInBB = + BC.computeCodeSize(FirstInstInBB, ThisInst, Emitter); + + return AddressOfBB + OffsetInBB; + } + + auto &Ref = getRefInBF(); + const uint64_t OffsetInBF = Ref.It->first; + + return getFunction()->getAddress() + OffsetInBF; +} + +raw_ostream &MCInstReference::print(raw_ostream &OS) const { + if (const RefInBB *Ref = tryGetRefInBB()) { + OS << "MCInstBBRef<"; + if (Ref->BB == nullptr) + OS << "BB:(null)"; + else + OS << "BB:" << Ref->BB->getName() << ":" << Ref->Index; + OS << ">"; + return OS; + } + + const RefInBF &Ref = getRefInBF(); + OS << "MCInstBFRef<"; + if (Ref.BF == nullptr) + OS << "BF:(null)"; + else + OS << "BF:" << Ref.BF->getPrintName() << ":" << Ref.It->first; + OS << ">"; + return OS; +} diff --git a/bolt/lib/Core/Relocation.cpp b/bolt/lib/Core/Relocation.cpp index f882627222242..4b827b647b06c 100644 --- a/bolt/lib/Core/Relocation.cpp +++ b/bolt/lib/Core/Relocation.cpp @@ -81,7 +81,6 @@ static bool isSupportedAArch64(uint32_t Type) { case ELF::R_AARCH64_LD64_GOT_LO12_NC: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: case ELF::R_AARCH64_PREL16: case ELF::R_AARCH64_PREL32: @@ -193,7 +192,6 @@ static size_t getSizeForTypeAArch64(uint32_t Type) { case ELF::R_AARCH64_LD64_GOT_LO12_NC: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: case ELF::R_AARCH64_PREL32: case ELF::R_AARCH64_MOVW_UABS_G0: @@ -248,7 +246,14 @@ static bool skipRelocationTypeX86(uint32_t Type) { } static bool skipRelocationTypeAArch64(uint32_t Type) { - return Type == ELF::R_AARCH64_NONE || Type == ELF::R_AARCH64_LD_PREL_LO19; + switch (Type) { + default: + return false; + case ELF::R_AARCH64_NONE: + case ELF::R_AARCH64_LD_PREL_LO19: + case ELF::R_AARCH64_TLSDESC_CALL: + return true; + } } static bool skipRelocationTypeRISCV(uint32_t Type) { @@ -362,7 +367,6 @@ static uint64_t extractValueAArch64(uint32_t Type, uint64_t Contents, return static_cast(PC) + SignExtend64<32>(Contents & 0xffffffff); case ELF::R_AARCH64_PREL64: return static_cast(PC) + Contents; - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_JUMP26: case ELF::R_AARCH64_CALL26: // Immediate goes in bits 25:0 of B and BL. @@ -552,7 +556,6 @@ static bool isGOTAArch64(uint32_t Type) { case ELF::R_AARCH64_TLSDESC_ADR_PAGE21: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: return true; } } @@ -591,7 +594,6 @@ static bool isTLSAArch64(uint32_t Type) { case ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: case ELF::R_AARCH64_TLSDESC_LD64_LO12: case ELF::R_AARCH64_TLSDESC_ADD_LO12: - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: return true; } @@ -667,7 +669,6 @@ static bool isPCRelativeAArch64(uint32_t Type) { case ELF::R_AARCH64_MOVW_UABS_G2_NC: case ELF::R_AARCH64_MOVW_UABS_G3: return false; - case ELF::R_AARCH64_TLSDESC_CALL: case ELF::R_AARCH64_CALL26: case ELF::R_AARCH64_JUMP26: case ELF::R_AARCH64_TSTBR14: diff --git a/bolt/lib/Passes/Instrumentation.cpp b/bolt/lib/Passes/Instrumentation.cpp index c2f876f0dff9e..150461b020f06 100644 --- a/bolt/lib/Passes/Instrumentation.cpp +++ b/bolt/lib/Passes/Instrumentation.cpp @@ -753,6 +753,8 @@ void Instrumentation::createAuxiliaryFunctions(BinaryContext &BC) { createSimpleFunction("__bolt_fini_trampoline", BC.MIB->createReturnInstructionList(BC.Ctx.get())); } + if (BC.isAArch64()) + BC.MIB->createInstrCounterIncrFunc(BC); } } diff --git a/bolt/lib/Passes/PAuthGadgetScanner.cpp b/bolt/lib/Passes/PAuthGadgetScanner.cpp index 65c84ebc8c4f4..01b350b2f11fe 100644 --- a/bolt/lib/Passes/PAuthGadgetScanner.cpp +++ b/bolt/lib/Passes/PAuthGadgetScanner.cpp @@ -14,6 +14,7 @@ #include "bolt/Passes/PAuthGadgetScanner.h" #include "bolt/Core/ParallelUtilities.h" #include "bolt/Passes/DataflowAnalysis.h" +#include "bolt/Utils/CommandLineOpts.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" #include "llvm/MC/MCInst.h" @@ -24,41 +25,13 @@ namespace llvm { namespace bolt { - -raw_ostream &operator<<(raw_ostream &OS, const MCInstInBBReference &Ref) { - OS << "MCInstBBRef<"; - if (Ref.BB == nullptr) - OS << "BB:(null)"; - else - OS << "BB:" << Ref.BB->getName() << ":" << Ref.BBIndex; - OS << ">"; - return OS; -} - -raw_ostream &operator<<(raw_ostream &OS, const MCInstInBFReference &Ref) { - OS << "MCInstBFRef<"; - if (Ref.BF == nullptr) - OS << "BF:(null)"; - else - OS << "BF:" << Ref.BF->getPrintName() << ":" << Ref.getOffset(); - OS << ">"; - return OS; -} - -raw_ostream &operator<<(raw_ostream &OS, const MCInstReference &Ref) { - switch (Ref.ParentKind) { - case MCInstReference::BasicBlockParent: - OS << Ref.U.BBRef; - return OS; - case MCInstReference::FunctionParent: - OS << Ref.U.BFRef; - return OS; - } - llvm_unreachable(""); -} - namespace PAuthGadgetScanner { +static cl::opt AuthTrapsOnFailure( + "auth-traps-on-failure", + cl::desc("Assume authentication instructions always trap on failure"), + cl::cat(opts::BinaryAnalysisCategory)); + [[maybe_unused]] static void traceInst(const BinaryContext &BC, StringRef Label, const MCInst &MI) { dbgs() << " " << Label << ": "; @@ -91,10 +64,10 @@ template static void iterateOverInstrs(BinaryFunction &BF, T Fn) { if (BF.hasCFG()) { for (BinaryBasicBlock &BB : BF) for (int64_t I = 0, E = BB.size(); I < E; ++I) - Fn(MCInstInBBReference(&BB, I)); + Fn(MCInstReference(BB, I)); } else { - for (auto I : BF.instrs()) - Fn(MCInstInBFReference(&BF, I.first)); + for (auto I = BF.instrs().begin(), E = BF.instrs().end(); I != E; ++I) + Fn(MCInstReference(BF, I)); } } @@ -115,8 +88,8 @@ class TrackedRegisters { TrackedRegisters(ArrayRef RegsToTrack) : Registers(RegsToTrack), RegToIndexMapping(getMappingSize(RegsToTrack), NoIndex) { - for (unsigned I = 0; I < RegsToTrack.size(); ++I) - RegToIndexMapping[RegsToTrack[I]] = I; + for (auto [MappedIndex, Reg] : llvm::enumerate(RegsToTrack)) + RegToIndexMapping[Reg] = MappedIndex; } ArrayRef getRegisters() const { return Registers; } @@ -230,9 +203,9 @@ struct SrcState { SafeToDerefRegs &= StateIn.SafeToDerefRegs; TrustedRegs &= StateIn.TrustedRegs; - for (unsigned I = 0; I < LastInstWritingReg.size(); ++I) - for (const MCInst *J : StateIn.LastInstWritingReg[I]) - LastInstWritingReg[I].insert(J); + for (auto [ThisSet, OtherSet] : + llvm::zip_equal(LastInstWritingReg, StateIn.LastInstWritingReg)) + ThisSet.insert_range(OtherSet); return *this; } @@ -251,11 +224,9 @@ struct SrcState { static void printInstsShort(raw_ostream &OS, ArrayRef Insts) { OS << "Insts: "; - for (unsigned I = 0; I < Insts.size(); ++I) { - auto &Set = Insts[I]; + for (auto [I, PtrSet] : llvm::enumerate(Insts)) { OS << "[" << I << "]("; - for (const MCInst *MCInstP : Set) - OS << MCInstP << " "; + interleave(PtrSet, OS, " "); OS << ")"; } } @@ -397,6 +368,34 @@ class SrcSafetyAnalysis { return Clobbered; } + std::optional getRegMadeTrustedByChecking(const MCInst &Inst, + SrcState Cur) const { + // This function cannot return multiple registers. This is never the case + // on AArch64. + std::optional RegCheckedByInst = + BC.MIB->getAuthCheckedReg(Inst, /*MayOverwrite=*/false); + if (RegCheckedByInst && Cur.SafeToDerefRegs[*RegCheckedByInst]) + return *RegCheckedByInst; + + auto It = CheckerSequenceInfo.find(&Inst); + if (It == CheckerSequenceInfo.end()) + return std::nullopt; + + MCPhysReg RegCheckedBySequence = It->second.first; + const MCInst *FirstCheckerInst = It->second.second; + + // FirstCheckerInst should belong to the same basic block (see the + // assertion in DataflowSrcSafetyAnalysis::run()), meaning it was + // deterministically processed a few steps before this instruction. + const SrcState &StateBeforeChecker = getStateBefore(*FirstCheckerInst); + + // The sequence checks the register, but it should be authenticated before. + if (!StateBeforeChecker.SafeToDerefRegs[RegCheckedBySequence]) + return std::nullopt; + + return RegCheckedBySequence; + } + // Returns all registers that can be treated as if they are written by an // authentication instruction. SmallVector getRegsMadeSafeToDeref(const MCInst &Point, @@ -415,22 +414,43 @@ class SrcSafetyAnalysis { // ... an address can be updated in a safe manner, producing the result // which is as trusted as the input address. if (auto DstAndSrc = BC.MIB->analyzeAddressArithmeticsForPtrAuth(Point)) { - if (Cur.SafeToDerefRegs[DstAndSrc->second]) - Regs.push_back(DstAndSrc->first); + auto [DstReg, SrcReg] = *DstAndSrc; + if (Cur.SafeToDerefRegs[SrcReg]) + Regs.push_back(DstReg); } + // Make sure explicit checker sequence keeps register safe-to-dereference + // when the register would be clobbered according to the regular rules: + // + // ; LR is safe to dereference here + // mov x16, x30 ; start of the sequence, LR is s-t-d right before + // xpaclri ; clobbers LR, LR is not safe anymore + // cmp x30, x16 + // b.eq 1f ; end of the sequence: LR is marked as trusted + // brk 0x1234 + // 1: + // ; at this point LR would be marked as trusted, + // ; but not safe-to-dereference + // + // or even just + // + // ; X1 is safe to dereference here + // ldr x0, [x1, #8]! + // ; X1 is trusted here, but it was clobbered due to address write-back + if (auto CheckedReg = getRegMadeTrustedByChecking(Point, Cur)) + Regs.push_back(*CheckedReg); + return Regs; } // Returns all registers made trusted by this instruction. SmallVector getRegsMadeTrusted(const MCInst &Point, const SrcState &Cur) const { + assert(!AuthTrapsOnFailure && "Use getRegsMadeSafeToDeref instead"); SmallVector Regs; // An authenticated pointer can be checked, or - std::optional CheckedReg = - BC.MIB->getAuthCheckedReg(Point, /*MayOverwrite=*/false); - if (CheckedReg && Cur.SafeToDerefRegs[*CheckedReg]) + if (auto CheckedReg = getRegMadeTrustedByChecking(Point, Cur)) Regs.push_back(*CheckedReg); // ... a pointer can be authenticated by an instruction that always checks @@ -441,19 +461,6 @@ class SrcSafetyAnalysis { if (AutReg && IsChecked) Regs.push_back(*AutReg); - if (CheckerSequenceInfo.contains(&Point)) { - MCPhysReg CheckedReg; - const MCInst *FirstCheckerInst; - std::tie(CheckedReg, FirstCheckerInst) = CheckerSequenceInfo.at(&Point); - - // FirstCheckerInst should belong to the same basic block (see the - // assertion in DataflowSrcSafetyAnalysis::run()), meaning it was - // deterministically processed a few steps before this instruction. - const SrcState &StateBeforeChecker = getStateBefore(*FirstCheckerInst); - if (StateBeforeChecker.SafeToDerefRegs[CheckedReg]) - Regs.push_back(CheckedReg); - } - // ... a safe address can be materialized, or if (auto NewAddrReg = BC.MIB->getMaterializedAddressRegForPtrAuth(Point)) Regs.push_back(*NewAddrReg); @@ -461,8 +468,9 @@ class SrcSafetyAnalysis { // ... an address can be updated in a safe manner, producing the result // which is as trusted as the input address. if (auto DstAndSrc = BC.MIB->analyzeAddressArithmeticsForPtrAuth(Point)) { - if (Cur.TrustedRegs[DstAndSrc->second]) - Regs.push_back(DstAndSrc->first); + auto [DstReg, SrcReg] = *DstAndSrc; + if (Cur.TrustedRegs[SrcReg]) + Regs.push_back(DstReg); } return Regs; @@ -496,28 +504,11 @@ class SrcSafetyAnalysis { BitVector Clobbered = getClobberedRegs(Point); SmallVector NewSafeToDerefRegs = getRegsMadeSafeToDeref(Point, Cur); - SmallVector NewTrustedRegs = getRegsMadeTrusted(Point, Cur); - - // Ideally, being trusted is a strictly stronger property than being - // safe-to-dereference. To simplify the computation of Next state, enforce - // this for NewSafeToDerefRegs and NewTrustedRegs. Additionally, this - // fixes the properly for "cumulative" register states in tricky cases - // like the following: - // - // ; LR is safe to dereference here - // mov x16, x30 ; start of the sequence, LR is s-t-d right before - // xpaclri ; clobbers LR, LR is not safe anymore - // cmp x30, x16 - // b.eq 1f ; end of the sequence: LR is marked as trusted - // brk 0x1234 - // 1: - // ; at this point LR would be marked as trusted, - // ; but not safe-to-dereference - // - for (auto TrustedReg : NewTrustedRegs) { - if (!is_contained(NewSafeToDerefRegs, TrustedReg)) - NewSafeToDerefRegs.push_back(TrustedReg); - } + // If authentication instructions trap on failure, safe-to-dereference + // registers are always trusted. + SmallVector NewTrustedRegs = + AuthTrapsOnFailure ? NewSafeToDerefRegs + : getRegsMadeTrusted(Point, Cur); // Then, compute the state after this instruction is executed. SrcState Next = Cur; @@ -554,6 +545,11 @@ class SrcSafetyAnalysis { dbgs() << ")\n"; }); + // Being trusted is a strictly stronger property than being + // safe-to-dereference. + assert(!Next.TrustedRegs.test(Next.SafeToDerefRegs) && + "SafeToDerefRegs should contain all TrustedRegs"); + return Next; } @@ -564,11 +560,8 @@ class SrcSafetyAnalysis { const SrcState &S = getStateBefore(Inst); std::vector Result; - for (const MCInst *Inst : lastWritingInsts(S, ClobberedReg)) { - MCInstReference Ref = MCInstReference::get(Inst, BF); - assert(Ref && "Expected Inst to be found"); - Result.push_back(Ref); - } + for (const MCInst *Inst : lastWritingInsts(S, ClobberedReg)) + Result.push_back(MCInstReference::get(*Inst, BF)); return Result; } }; @@ -872,9 +865,9 @@ struct DstState { return (*this = StateIn); CannotEscapeUnchecked &= StateIn.CannotEscapeUnchecked; - for (unsigned I = 0; I < FirstInstLeakingReg.size(); ++I) - for (const MCInst *J : StateIn.FirstInstLeakingReg[I]) - FirstInstLeakingReg[I].insert(J); + for (auto [ThisSet, OtherSet] : + llvm::zip_equal(FirstInstLeakingReg, StateIn.FirstInstLeakingReg)) + ThisSet.insert_range(OtherSet); return *this; } @@ -1040,8 +1033,7 @@ class DstSafetyAnalysis { // ... an address can be updated in a safe manner, or if (auto DstAndSrc = BC.MIB->analyzeAddressArithmeticsForPtrAuth(Inst)) { - MCPhysReg DstReg, SrcReg; - std::tie(DstReg, SrcReg) = *DstAndSrc; + auto [DstReg, SrcReg] = *DstAndSrc; // Note that *all* registers containing the derived values must be safe, // both source and destination ones. No temporaries are supported at now. if (Cur.CannotEscapeUnchecked[SrcReg] && @@ -1081,7 +1073,7 @@ class DstSafetyAnalysis { // If this instruction terminates the program immediately, no // authentication oracles are possible past this point. if (BC.MIB->isTrap(Point)) { - LLVM_DEBUG({ traceInst(BC, "Trap instruction found", Point); }); + LLVM_DEBUG(traceInst(BC, "Trap instruction found", Point)); DstState Next(NumRegs, RegsToTrackInstsFor.getNumTrackedRegisters()); Next.CannotEscapeUnchecked.set(); return Next; @@ -1136,11 +1128,8 @@ class DstSafetyAnalysis { const DstState &S = getStateAfter(Inst); std::vector Result; - for (const MCInst *Inst : firstLeakingInsts(S, LeakedReg)) { - MCInstReference Ref = MCInstReference::get(Inst, BF); - assert(Ref && "Expected Inst to be found"); - Result.push_back(Ref); - } + for (const MCInst *Inst : firstLeakingInsts(S, LeakedReg)) + Result.push_back(MCInstReference::get(*Inst, BF)); return Result; } }; @@ -1169,6 +1158,11 @@ class DataflowDstSafetyAnalysis } void run() override { + // As long as DstSafetyAnalysis is only computed to detect authentication + // oracles, it is a waste of time to compute it when authentication + // instructions are known to always trap on failure. + assert(!AuthTrapsOnFailure && + "DstSafetyAnalysis is useless with faulting auth"); for (BinaryBasicBlock &BB : Func) { if (auto CheckerInfo = BC.MIB->getAuthCheckedReg(BB)) { LLVM_DEBUG({ @@ -1254,7 +1248,7 @@ class CFGUnawareDstSafetyAnalysis : public DstSafetyAnalysis, // starting to analyze Inst. if (BC.MIB->isCall(Inst) || BC.MIB->isBranch(Inst) || BC.MIB->isReturn(Inst)) { - LLVM_DEBUG({ traceInst(BC, "Control flow instruction", Inst); }); + LLVM_DEBUG(traceInst(BC, "Control flow instruction", Inst)); S = createUnsafeState(); } @@ -1345,8 +1339,7 @@ static bool shouldAnalyzeTailCallInst(const BinaryContext &BC, // (such as isBranch at the time of writing this comment), some don't (such // as isCall). For that reason, call MCInstrDesc's methods explicitly when // it is important. - const MCInstrDesc &Desc = - BC.MII->get(static_cast(Inst).getOpcode()); + const MCInstrDesc &Desc = BC.MII->get(Inst.getMCInst().getOpcode()); // Tail call should be a branch (but not necessarily an indirect one). if (!Desc.isBranch()) return false; @@ -1400,7 +1393,7 @@ shouldReportUnsafeTailCall(const BinaryContext &BC, const BinaryFunction &BF, // such libc, ignore tail calls performed by ELF entry function. if (BC.StartFunctionAddress && *BC.StartFunctionAddress == Inst.getFunction()->getAddress()) { - LLVM_DEBUG({ dbgs() << " Skipping tail call in ELF entry function.\n"; }); + LLVM_DEBUG(dbgs() << " Skipping tail call in ELF entry function.\n"); return std::nullopt; } @@ -1474,7 +1467,7 @@ shouldReportAuthOracle(const BinaryContext &BC, const MCInstReference &Inst, }); if (S.empty()) { - LLVM_DEBUG({ dbgs() << " DstState is empty!\n"; }); + LLVM_DEBUG(dbgs() << " DstState is empty!\n"); return make_generic_report( Inst, "Warning: no state computed for an authentication instruction " "(possibly unreachable)"); @@ -1501,7 +1494,7 @@ collectRegsToTrack(ArrayRef> Reports) { void FunctionAnalysisContext::findUnsafeUses( SmallVector> &Reports) { auto Analysis = SrcSafetyAnalysis::create(BF, AllocatorId, {}); - LLVM_DEBUG({ dbgs() << "Running src register safety analysis...\n"; }); + LLVM_DEBUG(dbgs() << "Running src register safety analysis...\n"); Analysis->run(); LLVM_DEBUG({ dbgs() << "After src register safety analysis:\n"; @@ -1541,7 +1534,7 @@ void FunctionAnalysisContext::findUnsafeUses( // This is printed as "[message] in function [name], basic block ..., // at address ..." when the issue is reported to the user. Reports.push_back(make_generic_report( - MCInstReference::get(FirstInst, BF), + MCInstReference(BB, *FirstInst), "Warning: possibly imprecise CFG, the analysis quality may be " "degraded in this function. According to BOLT, unreachable code is " "found" /* in function [name]... */)); @@ -1558,8 +1551,7 @@ void FunctionAnalysisContext::findUnsafeUses( const SrcState &S = Analysis->getStateBefore(Inst); if (S.empty()) { - LLVM_DEBUG( - { traceInst(BC, "Instruction has no state, skipping", Inst); }); + LLVM_DEBUG(traceInst(BC, "Instruction has no state, skipping", Inst)); assert(UnreachableBBReported && "Should be reported at least once"); (void)UnreachableBBReported; return; @@ -1586,8 +1578,7 @@ void FunctionAnalysisContext::augmentUnsafeUseReports( SmallVector RegsToTrack = collectRegsToTrack(Reports); // Re-compute the analysis with register tracking. auto Analysis = SrcSafetyAnalysis::create(BF, AllocatorId, RegsToTrack); - LLVM_DEBUG( - { dbgs() << "\nRunning detailed src register safety analysis...\n"; }); + LLVM_DEBUG(dbgs() << "\nRunning detailed src register safety analysis...\n"); Analysis->run(); LLVM_DEBUG({ dbgs() << "After detailed src register safety analysis:\n"; @@ -1597,7 +1588,7 @@ void FunctionAnalysisContext::augmentUnsafeUseReports( // Augment gadget reports. for (auto &Report : Reports) { MCInstReference Location = Report.Issue->Location; - LLVM_DEBUG({ traceInst(BC, "Attaching clobbering info to", Location); }); + LLVM_DEBUG(traceInst(BC, "Attaching clobbering info to", Location)); assert(Report.RequestedDetails && "Should be removed by handleSimpleReports"); auto DetailedInfo = @@ -1611,9 +1602,11 @@ void FunctionAnalysisContext::findUnsafeDefs( SmallVector> &Reports) { if (PacRetGadgetsOnly) return; + if (AuthTrapsOnFailure) + return; auto Analysis = DstSafetyAnalysis::create(BF, AllocatorId, {}); - LLVM_DEBUG({ dbgs() << "Running dst register safety analysis...\n"; }); + LLVM_DEBUG(dbgs() << "Running dst register safety analysis...\n"); Analysis->run(); LLVM_DEBUG({ dbgs() << "After dst register safety analysis:\n"; @@ -1636,8 +1629,7 @@ void FunctionAnalysisContext::augmentUnsafeDefReports( SmallVector RegsToTrack = collectRegsToTrack(Reports); // Re-compute the analysis with register tracking. auto Analysis = DstSafetyAnalysis::create(BF, AllocatorId, RegsToTrack); - LLVM_DEBUG( - { dbgs() << "\nRunning detailed dst register safety analysis...\n"; }); + LLVM_DEBUG(dbgs() << "\nRunning detailed dst register safety analysis...\n"); Analysis->run(); LLVM_DEBUG({ dbgs() << "After detailed dst register safety analysis:\n"; @@ -1647,7 +1639,7 @@ void FunctionAnalysisContext::augmentUnsafeDefReports( // Augment gadget reports. for (auto &Report : Reports) { MCInstReference Location = Report.Issue->Location; - LLVM_DEBUG({ traceInst(BC, "Attaching leakage info to", Location); }); + LLVM_DEBUG(traceInst(BC, "Attaching leakage info to", Location)); assert(Report.RequestedDetails && "Should be removed by handleSimpleReports"); auto DetailedInfo = std::make_shared( @@ -1705,48 +1697,44 @@ void Analysis::runOnFunction(BinaryFunction &BF, } } -static void printBB(const BinaryContext &BC, const BinaryBasicBlock *BB, +static void printBB(const BinaryContext &BC, const BinaryBasicBlock &BB, size_t StartIndex = 0, size_t EndIndex = -1) { if (EndIndex == (size_t)-1) - EndIndex = BB->size() - 1; - const BinaryFunction *BF = BB->getFunction(); + EndIndex = BB.size() - 1; + const BinaryFunction *BF = BB.getFunction(); for (unsigned I = StartIndex; I <= EndIndex; ++I) { - // FIXME: this assumes all instructions are 4 bytes in size. This is true - // for AArch64, but it might be good to extract this function so it can be - // used elsewhere and for other targets too. - uint64_t Address = BB->getOffset() + BF->getAddress() + 4 * I; - const MCInst &Inst = BB->getInstructionAtIndex(I); + MCInstReference Inst(BB, I); if (BC.MIB->isCFI(Inst)) continue; - BC.printInstruction(outs(), Inst, Address, BF); + BC.printInstruction(outs(), Inst, Inst.computeAddress(), BF); } } static void reportFoundGadgetInSingleBBSingleRelatedInst( raw_ostream &OS, const BinaryContext &BC, const MCInstReference RelatedInst, const MCInstReference Location) { - BinaryBasicBlock *BB = Location.getBasicBlock(); - assert(RelatedInst.ParentKind == MCInstReference::BasicBlockParent); - assert(Location.ParentKind == MCInstReference::BasicBlockParent); - MCInstInBBReference RelatedInstBB = RelatedInst.U.BBRef; - if (BB == RelatedInstBB.BB) { + const BinaryBasicBlock *BB = Location.getBasicBlock(); + assert(RelatedInst.hasCFG()); + assert(Location.hasCFG()); + if (BB == RelatedInst.getBasicBlock()) { OS << " This happens in the following basic block:\n"; - printBB(BC, BB); + printBB(BC, *BB); } } void Diagnostic::printBasicInfo(raw_ostream &OS, const BinaryContext &BC, StringRef IssueKind) const { - BinaryFunction *BF = Location.getFunction(); - BinaryBasicBlock *BB = Location.getBasicBlock(); + const BinaryBasicBlock *BB = Location.getBasicBlock(); + const BinaryFunction *BF = Location.getFunction(); + const uint64_t Address = Location.computeAddress(); OS << "\nGS-PAUTH: " << IssueKind; OS << " in function " << BF->getPrintName(); if (BB) OS << ", basic block " << BB->getName(); - OS << ", at address " << llvm::format("%x", Location.getAddress()) << "\n"; + OS << ", at address " << llvm::format("%x", Address) << "\n"; OS << " The instruction is "; - BC.printInstruction(OS, Location, Location.getAddress(), BF); + BC.printInstruction(OS, Location, Address, BF); } void GadgetDiagnostic::generateReport(raw_ostream &OS, @@ -1760,21 +1748,23 @@ static void printRelatedInstrs(raw_ostream &OS, const MCInstReference Location, const BinaryContext &BC = BF.getBinaryContext(); // Sort by address to ensure output is deterministic. - SmallVector RI(RelatedInstrs); - llvm::sort(RI, [](const MCInstReference &A, const MCInstReference &B) { - return A.getAddress() < B.getAddress(); - }); + SmallVector> RI; + for (auto &InstRef : RelatedInstrs) + RI.push_back(std::make_pair(InstRef.computeAddress(), InstRef)); + llvm::sort(RI, [](auto A, auto B) { return A.first < B.first; }); + for (unsigned I = 0; I < RI.size(); ++I) { - MCInstReference InstRef = RI[I]; + auto [Address, InstRef] = RI[I]; OS << " " << (I + 1) << ". "; - BC.printInstruction(OS, InstRef, InstRef.getAddress(), &BF); + BC.printInstruction(OS, InstRef, Address, &BF); }; + if (RelatedInstrs.size() == 1) { const MCInstReference RelatedInst = RelatedInstrs[0]; // Printing the details for the MCInstReference::FunctionParent case // is not implemented not to overcomplicate the code, as most functions // are expected to have CFG information. - if (RelatedInst.ParentKind == MCInstReference::BasicBlockParent) + if (RelatedInst.hasCFG()) reportFoundGadgetInSingleBBSingleRelatedInst(OS, BC, RelatedInst, Location); } diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp index 6752489ad562a..7366d2aca35ea 100644 --- a/bolt/lib/Rewrite/DWARFRewriter.cpp +++ b/bolt/lib/Rewrite/DWARFRewriter.cpp @@ -504,9 +504,7 @@ static void emitDWOBuilder(const std::string &DWOName, } emitUnit(DWODIEBuilder, *Streamer, SplitCU); } else { - for (std::unique_ptr &CU : - SplitCU.getContext().dwo_compile_units()) - emitUnit(DWODIEBuilder, *Streamer, *CU); + emitUnit(DWODIEBuilder, *Streamer, SplitCU); // emit debug_types sections for dwarf4 for (DWARFUnit *CU : DWODIEBuilder.getDWARF4TUVector()) @@ -1855,7 +1853,7 @@ void DWARFRewriter::writeDWOFiles( else if (!sys::fs::exists(CompDir)) CompDir = "."; // Prevent failures when DWOName is already an absolute path. - sys::fs::make_absolute(CompDir, AbsolutePath); + sys::path::make_absolute(CompDir, AbsolutePath); std::error_code EC; std::unique_ptr TempOut = diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp index a6e4dbc9c192f..c13a9f016e8ae 100644 --- a/bolt/lib/Rewrite/RewriteInstance.cpp +++ b/bolt/lib/Rewrite/RewriteInstance.cpp @@ -1312,7 +1312,9 @@ void RewriteInstance::discoverFileObjects() { // Annotate functions with code/data markers in AArch64 for (auto &[Address, Type] : MarkerSymbols) { - auto *BF = BC->getBinaryFunctionContainingAddress(Address, true, true); + auto *BF = BC->getBinaryFunctionContainingAddress(Address, + /*CheckPastEnd*/ false, + /*UseMaxSize*/ true); if (!BF) { // Stray marker @@ -2272,8 +2274,7 @@ uint32_t getRelocationSymbol(const ELFObjectFileBase *Obj, bool RewriteInstance::analyzeRelocation( const RelocationRef &Rel, uint32_t &RType, std::string &SymbolName, bool &IsSectionRelocation, uint64_t &SymbolAddress, int64_t &Addend, - uint64_t &ExtractedValue, bool &Skip) const { - Skip = false; + uint64_t &ExtractedValue) const { if (!Relocation::isSupported(RType)) return false; @@ -2705,9 +2706,8 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection, int64_t Addend; uint64_t ExtractedValue; bool IsSectionRelocation; - bool Skip; if (!analyzeRelocation(Rel, RType, SymbolName, IsSectionRelocation, - SymbolAddress, Addend, ExtractedValue, Skip)) { + SymbolAddress, Addend, ExtractedValue)) { LLVM_DEBUG({ dbgs() << "BOLT-WARNING: failed to analyze relocation @ offset = " << formatv("{0:x}; type name = {1}\n", Rel.getOffset(), TypeName); @@ -2716,14 +2716,6 @@ void RewriteInstance::handleRelocation(const SectionRef &RelocatedSection, return; } - if (Skip) { - LLVM_DEBUG({ - dbgs() << "BOLT-DEBUG: skipping relocation @ offset = " - << formatv("{0:x}; type name = {1}\n", Rel.getOffset(), TypeName); - }); - return; - } - if (!IsFromCode && !IsWritable && (IsX86 || IsAArch64) && Relocation::isPCRelative(RType)) { BinaryData *BD = BC->getBinaryDataContainingAddress(Rel.getOffset()); diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp index f972646aa12ea..f271867cb2004 100644 --- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp +++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp @@ -19,6 +19,7 @@ #include "Utils/AArch64BaseInfo.h" #include "bolt/Core/BinaryBasicBlock.h" #include "bolt/Core/BinaryFunction.h" +#include "bolt/Core/MCInstUtils.h" #include "bolt/Core/MCPlusBuilder.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/MC/MCContext.h" @@ -26,6 +27,7 @@ #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegister.h" #include "llvm/MC/MCRegisterInfo.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/DataExtractor.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -35,6 +37,15 @@ using namespace llvm; using namespace bolt; +namespace opts { +extern cl::OptionCategory BoltInstrCategory; +static cl::opt NoLSEAtomics( + "no-lse-atomics", + cl::desc("generate instrumentation code sequence without using LSE atomic " + "instruction"), + cl::init(false), cl::Optional, cl::cat(BoltInstrCategory)); +} // namespace opts + namespace { static void getSystemFlag(MCInst &Inst, MCPhysReg RegName) { @@ -106,7 +117,7 @@ static void storeReg(MCInst &Inst, MCPhysReg From, MCPhysReg To) { } static void atomicAdd(MCInst &Inst, MCPhysReg RegTo, MCPhysReg RegCnt) { - // NOTE: Supports only ARM with LSE extension + assert(!opts::NoLSEAtomics && "Supports only ARM with LSE extension"); Inst.setOpcode(AArch64::LDADDX); Inst.clear(); Inst.addOperand(MCOperand::createReg(AArch64::XZR)); @@ -135,6 +146,8 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { public: using MCPlusBuilder::MCPlusBuilder; + BinaryFunction *InstrCounterIncrFunc{nullptr}; + std::unique_ptr createTargetSymbolizer(BinaryFunction &Function, bool CreateNewSymbols) const override { @@ -389,81 +402,59 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { // Iterate over the instructions of BB in reverse order, matching opcodes // and operands. - MCPhysReg TestedReg = 0; - MCPhysReg ScratchReg = 0; + auto It = BB.end(); - auto StepAndGetOpcode = [&It, &BB]() -> int { - if (It == BB.begin()) - return -1; - --It; - return It->getOpcode(); + auto StepBack = [&]() { + while (It != BB.begin()) { + --It; + // Skip any CFI instructions, but no other pseudos are expected here. + if (!isCFI(*It)) + return true; + } + return false; }; - - switch (StepAndGetOpcode()) { - default: - // Not matched the branch instruction. + // Step to the last non-CFI instruction. + if (!StepBack()) return std::nullopt; - case AArch64::Bcc: - // Bcc EQ, .Lon_success - if (It->getOperand(0).getImm() != AArch64CC::EQ) - return std::nullopt; - // Not checking .Lon_success (see above). - // SUBSXrs XZR, TestedReg, ScratchReg, 0 (used by "CMP reg, reg" alias) - if (StepAndGetOpcode() != AArch64::SUBSXrs || - It->getOperand(0).getReg() != AArch64::XZR || - It->getOperand(3).getImm() != 0) + using namespace llvm::bolt::LowLevelInstMatcherDSL; + Reg TestedReg; + Reg ScratchReg; + + if (matchInst(*It, AArch64::Bcc, Imm(AArch64CC::EQ) /*, .Lon_success*/)) { + if (!StepBack() || !matchInst(*It, AArch64::SUBSXrs, Reg(AArch64::XZR), + TestedReg, ScratchReg, Imm(0))) return std::nullopt; - TestedReg = It->getOperand(1).getReg(); - ScratchReg = It->getOperand(2).getReg(); // Either XPAC(I|D) ScratchReg, ScratchReg // or XPACLRI - switch (StepAndGetOpcode()) { - default: + if (!StepBack()) return std::nullopt; - case AArch64::XPACLRI: + if (matchInst(*It, AArch64::XPACLRI)) { // No operands to check, but using XPACLRI forces TestedReg to be X30. - if (TestedReg != AArch64::LR) - return std::nullopt; - break; - case AArch64::XPACI: - case AArch64::XPACD: - if (It->getOperand(0).getReg() != ScratchReg || - It->getOperand(1).getReg() != ScratchReg) + if (TestedReg.get() != AArch64::LR) return std::nullopt; - break; + } else if (!matchInst(*It, AArch64::XPACI, ScratchReg, ScratchReg) && + !matchInst(*It, AArch64::XPACD, ScratchReg, ScratchReg)) { + return std::nullopt; } - // ORRXrs ScratchReg, XZR, TestedReg, 0 (used by "MOV reg, reg" alias) - if (StepAndGetOpcode() != AArch64::ORRXrs) - return std::nullopt; - if (It->getOperand(0).getReg() != ScratchReg || - It->getOperand(1).getReg() != AArch64::XZR || - It->getOperand(2).getReg() != TestedReg || - It->getOperand(3).getImm() != 0) + if (!StepBack() || !matchInst(*It, AArch64::ORRXrs, ScratchReg, + Reg(AArch64::XZR), TestedReg, Imm(0))) return std::nullopt; - return std::make_pair(TestedReg, &*It); - - case AArch64::TBZX: - // TBZX ScratchReg, 62, .Lon_success - ScratchReg = It->getOperand(0).getReg(); - if (It->getOperand(1).getImm() != 62) - return std::nullopt; - // Not checking .Lon_success (see above). + return std::make_pair(TestedReg.get(), &*It); + } - // EORXrs ScratchReg, TestedReg, TestedReg, 1 - if (StepAndGetOpcode() != AArch64::EORXrs) - return std::nullopt; - TestedReg = It->getOperand(1).getReg(); - if (It->getOperand(0).getReg() != ScratchReg || - It->getOperand(2).getReg() != TestedReg || - It->getOperand(3).getImm() != 1) + if (matchInst(*It, AArch64::TBZX, ScratchReg, Imm(62) /*, .Lon_success*/)) { + if (!StepBack() || !matchInst(*It, AArch64::EORXrs, ScratchReg, TestedReg, + TestedReg, Imm(1))) return std::nullopt; - return std::make_pair(TestedReg, &*It); + return std::make_pair(TestedReg.get(), &*It); } + + return std::nullopt; } std::optional getAuthCheckedReg(const MCInst &Inst, @@ -2513,22 +2504,129 @@ class AArch64MCPlusBuilder : public MCPlusBuilder { return Insts; } - InstructionListType - createInstrIncMemory(const MCSymbol *Target, MCContext *Ctx, bool IsLeaf, - unsigned CodePointerSize) const override { + // Instrumentation code sequence using LSE atomic instruction has a total of + // 6 instructions: + // + // stp x0, x1, [sp, #-0x10]! + // adrp x0, page_address(counter) + // add x0, x0, page_offset(counter) + // mov x1, #0x1 + // stadd x1, [x0] + // ldp x0, x1, [sp], #0x10 + // + // Instrumentation code sequence without using LSE atomic instruction has + // 8 instructions at instrumentation place, with 6 instructions in the helper: + // + // stp x0, x30, [sp, #-0x10]! + // stp x1, x2, [sp, #-0x10]! + // adrp x0, page_address(counter) + // add x0, x0, page_offset(counter) + // adrp x1, page_address(helper) + // add x1, x1, page_offset(helper) + // blr x1 + // ldp x0, x30, [sp], #0x10 + // + // : + // ldaxr x1, [x0] + // add x1, x1, #0x1 + // stlxr w2, x1, [x0] + // cbnz w2, + // ldp x1, x2, [sp], #0x10 + // ret + + void createInstrCounterIncrFunc(BinaryContext &BC) override { + assert(InstrCounterIncrFunc == nullptr && + "helper function of counter increment for instrumentation " + "has already been created"); + + if (!opts::NoLSEAtomics) + return; + + MCContext *Ctx = BC.Ctx.get(); + InstrCounterIncrFunc = BC.createInjectedBinaryFunction( + "__bolt_instr_counter_incr", /*IsSimple*/ false); + std::vector> BBs; + + BBs.emplace_back(InstrCounterIncrFunc->createBasicBlock()); + InstructionListType Instrs(4); + Instrs[0].setOpcode(AArch64::LDAXRX); + Instrs[0].clear(); + Instrs[0].addOperand(MCOperand::createReg(AArch64::X1)); + Instrs[0].addOperand(MCOperand::createReg(AArch64::X0)); + Instrs[1].setOpcode(AArch64::ADDXri); + Instrs[1].clear(); + Instrs[1].addOperand(MCOperand::createReg(AArch64::X1)); + Instrs[1].addOperand(MCOperand::createReg(AArch64::X1)); + Instrs[1].addOperand(MCOperand::createImm(1)); + Instrs[1].addOperand(MCOperand::createImm(0)); + Instrs[2].setOpcode(AArch64::STLXRX); + Instrs[2].clear(); + Instrs[2].addOperand(MCOperand::createReg(AArch64::W2)); + Instrs[2].addOperand(MCOperand::createReg(AArch64::X1)); + Instrs[2].addOperand(MCOperand::createReg(AArch64::X0)); + Instrs[3].setOpcode(AArch64::CBNZW); + Instrs[3].clear(); + Instrs[3].addOperand(MCOperand::createReg(AArch64::W2)); + Instrs[3].addOperand(MCOperand::createExpr( + MCSymbolRefExpr::create(BBs.back()->getLabel(), *Ctx))); + BBs.back()->addInstructions(Instrs.begin(), Instrs.end()); + BBs.back()->setCFIState(0); + + BBs.emplace_back(InstrCounterIncrFunc->createBasicBlock()); + InstructionListType InstrsEpilog(2); + createPopRegisters(InstrsEpilog[0], AArch64::X1, AArch64::X2); + createReturn(InstrsEpilog[1]); + BBs.back()->addInstructions(InstrsEpilog.begin(), InstrsEpilog.end()); + BBs.back()->setCFIState(0); + + BBs[0]->addSuccessor(BBs[0].get()); + BBs[0]->addSuccessor(BBs[1].get()); + + InstrCounterIncrFunc->insertBasicBlocks(nullptr, std::move(BBs), + /*UpdateLayout*/ true, + /*UpdateCFIState*/ false); + InstrCounterIncrFunc->updateState(BinaryFunction::State::CFG_Finalized); + + LLVM_DEBUG({ + dbgs() << "BOLT-DEBUG: instrumentation counter increment helper:\n"; + InstrCounterIncrFunc->dump(); + }); + } + + InstructionListType createInstrIncMemory(const MCSymbol *Target, + MCContext *Ctx, bool IsLeaf, + unsigned CodePointerSize) override { unsigned int I = 0; - InstructionListType Instrs(6); + InstructionListType Instrs(opts::NoLSEAtomics ? 8 : 6); + + if (opts::NoLSEAtomics) { + createPushRegisters(Instrs[I++], AArch64::X0, AArch64::LR); + createPushRegisters(Instrs[I++], AArch64::X1, AArch64::X2); + } else { + createPushRegisters(Instrs[I++], AArch64::X0, AArch64::X1); + } - createPushRegisters(Instrs[I++], AArch64::X0, AArch64::X1); InstructionListType Addr = materializeAddress(Target, Ctx, AArch64::X0); assert(Addr.size() == 2 && "Invalid Addr size"); std::copy(Addr.begin(), Addr.end(), Instrs.begin() + I); I += Addr.size(); - InstructionListType Insts = createIncMemory(AArch64::X0, AArch64::X1); - assert(Insts.size() == 2 && "Invalid Insts size"); - std::copy(Insts.begin(), Insts.end(), Instrs.begin() + I); - I += Insts.size(); - createPopRegisters(Instrs[I++], AArch64::X0, AArch64::X1); + + if (opts::NoLSEAtomics) { + const MCSymbol *Helper = InstrCounterIncrFunc->getSymbol(); + InstructionListType HelperAddr = + materializeAddress(Helper, Ctx, AArch64::X1); + assert(HelperAddr.size() == 2 && "Invalid HelperAddr size"); + std::copy(HelperAddr.begin(), HelperAddr.end(), Instrs.begin() + I); + I += HelperAddr.size(); + createIndirectCallInst(Instrs[I++], /*IsTailCall*/ false, AArch64::X1); + } else { + InstructionListType Insts = createIncMemory(AArch64::X0, AArch64::X1); + assert(Insts.size() == 2 && "Invalid Insts size"); + std::copy(Insts.begin(), Insts.end(), Instrs.begin() + I); + I += Insts.size(); + } + createPopRegisters(Instrs[I++], AArch64::X0, + opts::NoLSEAtomics ? AArch64::LR : AArch64::X1); return Instrs; } diff --git a/bolt/lib/Target/AArch64/CMakeLists.txt b/bolt/lib/Target/AArch64/CMakeLists.txt index cb38117de659e..53554e75de15c 100644 --- a/bolt/lib/Target/AArch64/CMakeLists.txt +++ b/bolt/lib/Target/AArch64/CMakeLists.txt @@ -28,7 +28,7 @@ add_llvm_library(LLVMBOLTTargetAArch64 AArch64CommonTableGen ) -target_link_libraries(LLVMBOLTTargetAArch64 PRIVATE LLVMBOLTCore) +target_link_libraries(LLVMBOLTTargetAArch64 PRIVATE LLVMBOLTCore LLVMBOLTUtils) include_directories( ${LLVM_MAIN_SRC_DIR}/lib/Target/AArch64 diff --git a/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp b/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp index 10b4913b6ab7f..7c4a8781fd57d 100644 --- a/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp +++ b/bolt/lib/Target/RISCV/RISCVMCPlusBuilder.cpp @@ -626,9 +626,9 @@ class RISCVMCPlusBuilder : public MCPlusBuilder { return Insts; } - InstructionListType - createInstrIncMemory(const MCSymbol *Target, MCContext *Ctx, bool IsLeaf, - unsigned CodePointerSize) const override { + InstructionListType createInstrIncMemory(const MCSymbol *Target, + MCContext *Ctx, bool IsLeaf, + unsigned CodePointerSize) override { // We need 2 scratch registers: one for the target address (x10), and one // for the increment value (x11). // addi sp, sp, -16 diff --git a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp index 1842509dcc5e0..9026a9df7b5c2 100644 --- a/bolt/lib/Target/X86/X86MCPlusBuilder.cpp +++ b/bolt/lib/Target/X86/X86MCPlusBuilder.cpp @@ -3053,9 +3053,9 @@ class X86MCPlusBuilder : public MCPlusBuilder { Inst.clear(); } - InstructionListType - createInstrIncMemory(const MCSymbol *Target, MCContext *Ctx, bool IsLeaf, - unsigned CodePointerSize) const override { + InstructionListType createInstrIncMemory(const MCSymbol *Target, + MCContext *Ctx, bool IsLeaf, + unsigned CodePointerSize) override { InstructionListType Instrs(IsLeaf ? 13 : 11); unsigned int I = 0; diff --git a/bolt/test/AArch64/constant-island-alignment.s b/bolt/test/AArch64/constant-island-alignment.s index 3ce0df9d4f290..957c4705f5eec 100644 --- a/bolt/test/AArch64/constant-island-alignment.s +++ b/bolt/test/AArch64/constant-island-alignment.s @@ -1,14 +1,36 @@ // This test checks that the constant island is aligned after BOLT tool. -// In case the nop before .Lci will be removed the pointer to exit function -// won't be alinged and the test will fail. + +# RUN: split-file %s %t + +// For the first test case, in case the nop before .Lci will be removed +// the pointer to exit function won't be alinged and the test will fail. # RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown \ -# RUN: %s -o %t.o -# RUN: %clang %cflags -fPIC -pie %t.o -o %t.exe -Wl,-q \ +# RUN: %t/xword_align.s -o %t_xa.o +# RUN: %clang %cflags -fPIC -pie %t_xa.o -o %t_xa.exe -Wl,-q \ # RUN: -nostartfiles -nodefaultlibs -Wl,-z,notext -# RUN: llvm-bolt %t.exe -o %t.bolt --use-old-text=0 --lite=0 --trap-old-code -# RUN: llvm-objdump -d --disassemble-symbols='$d' %t.bolt | FileCheck %s +# RUN: llvm-bolt %t_xa.exe -o %t_xa.bolt --use-old-text=0 --lite=0 \ +# RUN: --trap-old-code +# RUN: llvm-objdump -d --disassemble-symbols='$d' %t_xa.bolt | FileCheck %s + +// For the second and third test cases, we want to set an alignment based +// on various heuristics. + +# RUN: %clang %cflags -pie %t/page_align.s -o %t_pa.exe -Wl,-q \ +# RUN: -Wl,--init=_foo -Wl,--fini=_foo +# RUN: llvm-bolt %t_pa.exe -o %t_pa.bolt +# RUN: llvm-objdump -t %t_pa.exe | grep _const_island +# RUN: llvm-objdump -t %t_pa.bolt | grep _const_island | FileCheck %s \ +# RUN: --check-prefix=PAGE + +# RUN: %clang %cflags -pie %t/64B_align.s -o %t_64B.exe -Wl,-q \ +# RUN: -Wl,--init=_foo -Wl,--fini=_foo +# RUN: llvm-bolt %t_64B.exe -o %t_64B.bolt +# RUN: llvm-objdump -t %t_64B.exe | grep _const_island +# RUN: llvm-objdump -t %t_64B.bolt | grep _const_island | FileCheck %s \ +# RUN: --check-prefix=64BYTE +;--- xword_align.s .text .align 4 .global @@ -36,3 +58,51 @@ _start: .Lci: .xword exitOk .xword 0 + +;--- page_align.s + .text + .global _foo + .type _foo, %function +_foo: + ret + + .text + .global _const_island + .align 12 +# PAGE: {{[0-9a-f]*}}000 g +_const_island: + .rept 0x25100 + .byte 0xbb + .endr + + .global _start + .type _start, %function +_start: + ret + + # Dummy relocation to force relocation mode + .reloc 0, R_AARCH64_NONE + +;--- 64B_align.s + .text + .global _foo + .type _foo, %function +_foo: + ret + + .text + .global _const_island + .align 6 +# 64BYTE: {{[0-9a-f]*}}{{0|4|8|c}}0 g +_const_island: + .rept 0x2048 + .byte 0xbb + .endr + + .global _start + .type _start, %function +_start: + ret + + # Dummy relocation to force relocation mode + .reloc 0, R_AARCH64_NONE diff --git a/bolt/test/AArch64/dwarf4-dwp-aarch64.s b/bolt/test/AArch64/dwarf4-dwp-aarch64.s new file mode 100755 index 0000000000000..37507e100a62d --- /dev/null +++ b/bolt/test/AArch64/dwarf4-dwp-aarch64.s @@ -0,0 +1,407 @@ +## This test checks updating debuginfo via dwarf4 dwp file +# RUN: rm -rf %t && mkdir -p %t && cd %t +# RUN: split-file %s %t +# RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown --split-dwarf-file=main.exe-main.dwo %t/main.s -o %t/main.o +# RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown --split-dwarf-file=main.exe-callee.dwo %t/callee.s -o %t/callee.o +# RUN: %clangxx %cxxflags -gdwarf-4 -gsplit-dwarf=split -Wl,-e,main %t/main.o %t/callee.o -o main.exe +# RUN: llvm-dwp -e %t/main.exe -o %t/main.exe.dwp +# RUN: llvm-bolt %t/main.exe -o %t/main.exe.bolt -update-debug-sections 2>&1 | FileCheck %s + +# CHECK-NOT: Assertion + +#--- main.s + .file "main.cpp" + .globl main // -- Begin function main + .type main,@function +main: // @main +.Lfunc_begin0: + .file 1 "." "main.cpp" + .loc 1 2 0 // main.cpp:2:0 + .loc 1 2 21 prologue_end // main.cpp:2:21 + .loc 1 2 14 epilogue_begin is_stmt 0 // main.cpp:2:14 + ret +.Lfunc_end0: + .size main, .Lfunc_end0-main + .section .debug_abbrev,"",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 0 // DW_CHILDREN_no + .byte 16 // DW_AT_stmt_list + .byte 23 // DW_FORM_sec_offset + .byte 27 // DW_AT_comp_dir + .byte 14 // DW_FORM_strp + .ascii "\264B" // DW_AT_GNU_pubnames + .byte 25 // DW_FORM_flag_present + .ascii "\260B" // DW_AT_GNU_dwo_name + .byte 14 // DW_FORM_strp + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 17 // DW_AT_low_pc + .byte 1 // DW_FORM_addr + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .ascii "\263B" // DW_AT_GNU_addr_base + .byte 23 // DW_FORM_sec_offset + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .word .Ldebug_info_end0-.Ldebug_info_start0 // Length of Unit +.Ldebug_info_start0: + .hword 4 // DWARF version number + .word .debug_abbrev // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .word .Lline_table_start0 // DW_AT_stmt_list + .word .Lskel_string0 // DW_AT_comp_dir + // DW_AT_GNU_pubnames + .word .Lskel_string1 // DW_AT_GNU_dwo_name + .xword 1465063543908291764 // DW_AT_GNU_dwo_id + .xword .Lfunc_begin0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .word .Laddr_table_base0 // DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." // string offset=0 +.Lskel_string1: + .asciz "main.exe-main.dwo" // string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "main" // string offset=0 +.Linfo_string1: + .asciz "int" // string offset=5 +.Linfo_string2: + .byte 0 // string offset=9 +.Linfo_string3: + .asciz "main.cpp" // string offset=10 +.Linfo_string4: + .asciz "main.exe-main.dwo" // string offset=19 + .section .debug_str_offsets.dwo,"e",@progbits + .word 0 + .word 5 + .word 9 + .word 10 + .word 19 + .section .debug_info.dwo,"e",@progbits + .word .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 // Length of Unit +.Ldebug_info_dwo_start0: + .hword 4 // DWARF version number + .word 0 // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x22 DW_TAG_compile_unit + .byte 2 // DW_AT_producer + .hword 33 // DW_AT_language + .byte 3 // DW_AT_name + .byte 4 // DW_AT_GNU_dwo_name + .xword 1465063543908291764 // DW_AT_GNU_dwo_id + .byte 2 // Abbrev [2] 0x19:0xf DW_TAG_subprogram + .byte 0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .byte 1 // DW_AT_frame_base + .byte 109 + .byte 0 // DW_AT_name + .byte 1 // DW_AT_decl_file + .byte 2 // DW_AT_decl_line + .word 40 // DW_AT_type + // DW_AT_external + .byte 3 // Abbrev [3] 0x28:0x4 DW_TAG_base_type + .byte 1 // DW_AT_name + .byte 5 // DW_AT_encoding + .byte 4 // DW_AT_byte_size + .byte 0 // End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 1 // DW_CHILDREN_yes + .byte 37 // DW_AT_producer + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 19 // DW_AT_language + .byte 5 // DW_FORM_data2 + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\260B" // DW_AT_GNU_dwo_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 2 // Abbreviation Code + .byte 46 // DW_TAG_subprogram + .byte 0 // DW_CHILDREN_no + .byte 17 // DW_AT_low_pc + .ascii "\201>" // DW_FORM_GNU_addr_index + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .byte 64 // DW_AT_frame_base + .byte 24 // DW_FORM_exprloc + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 58 // DW_AT_decl_file + .byte 11 // DW_FORM_data1 + .byte 59 // DW_AT_decl_line + .byte 11 // DW_FORM_data1 + .byte 73 // DW_AT_type + .byte 19 // DW_FORM_ref4 + .byte 63 // DW_AT_external + .byte 25 // DW_FORM_flag_present + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 3 // Abbreviation Code + .byte 36 // DW_TAG_base_type + .byte 0 // DW_CHILDREN_no + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 62 // DW_AT_encoding + .byte 11 // DW_FORM_data1 + .byte 11 // DW_AT_byte_size + .byte 11 // DW_FORM_data1 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .xword .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .word .LpubNames_end0-.LpubNames_start0 // Length of Public Names Info +.LpubNames_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 25 // DIE offset + .byte 48 // Attributes: FUNCTION, EXTERNAL + .asciz "main" // External Name + .word 0 // End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .word .LpubTypes_end0-.LpubTypes_start0 // Length of Public Types Info +.LpubTypes_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 40 // DIE offset + .byte 144 // Attributes: TYPE, STATIC + .asciz "int" // External Name + .word 0 // End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .addrsig_sym _Z6calleei + .section .debug_line,"",@progbits +.Lline_table_start0: +#--- callee.s + .file "callee.cpp" + .globl _Z6calleei // -- Begin function _Z6calleei + .type _Z6calleei,@function +_Z6calleei: // @_Z6calleei +.Lfunc_begin0: + .file 1 "." "callee.cpp" + .loc 1 1 0 // callee.cpp:1:0 + .loc 1 1 28 prologue_end // callee.cpp:1:28 + .loc 1 1 21 epilogue_begin is_stmt 0 // callee.cpp:1:21 + ret +.Lfunc_end0: + .size _Z6calleei, .Lfunc_end0-_Z6calleei + .section .debug_abbrev,"",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 0 // DW_CHILDREN_no + .byte 16 // DW_AT_stmt_list + .byte 23 // DW_FORM_sec_offset + .byte 27 // DW_AT_comp_dir + .byte 14 // DW_FORM_strp + .ascii "\264B" // DW_AT_GNU_pubnames + .byte 25 // DW_FORM_flag_present + .ascii "\260B" // DW_AT_GNU_dwo_name + .byte 14 // DW_FORM_strp + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 17 // DW_AT_low_pc + .byte 1 // DW_FORM_addr + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .ascii "\263B" // DW_AT_GNU_addr_base + .byte 23 // DW_FORM_sec_offset + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .word .Ldebug_info_end0-.Ldebug_info_start0 // Length of Unit +.Ldebug_info_start0: + .hword 4 // DWARF version number + .word .debug_abbrev // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .word .Lline_table_start0 // DW_AT_stmt_list + .word .Lskel_string0 // DW_AT_comp_dir + // DW_AT_GNU_pubnames + .word .Lskel_string1 // DW_AT_GNU_dwo_name + .xword 7650227797527095061 // DW_AT_GNU_dwo_id + .xword .Lfunc_begin0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .word .Laddr_table_base0 // DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." // string offset=0 +.Lskel_string1: + .asciz "main.exe-callee.dwo" // string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "_Z6calleei" // string offset=0 +.Linfo_string1: + .asciz "callee" // string offset=11 +.Linfo_string2: + .asciz "int" // string offset=18 +.Linfo_string3: + .asciz "x" // string offset=22 +.Linfo_string4: + .byte 0 // string offset=24 +.Linfo_string5: + .asciz "callee.cpp" // string offset=25 +.Linfo_string6: + .asciz "main.exe-callee.dwo" // string offset=36 + .section .debug_str_offsets.dwo,"e",@progbits + .word 0 + .word 11 + .word 18 + .word 22 + .word 24 + .word 25 + .word 36 + .section .debug_info.dwo,"e",@progbits + .word .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 // Length of Unit +.Ldebug_info_dwo_start0: + .hword 4 // DWARF version number + .word 0 // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x2f DW_TAG_compile_unit + .byte 4 // DW_AT_producer + .hword 33 // DW_AT_language + .byte 5 // DW_AT_name + .byte 6 // DW_AT_GNU_dwo_name + .xword 7650227797527095061 // DW_AT_GNU_dwo_id + .byte 2 // Abbrev [2] 0x19:0x1c DW_TAG_subprogram + .byte 0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .byte 1 // DW_AT_frame_base + .byte 111 + .byte 0 // DW_AT_linkage_name + .byte 1 // DW_AT_name + .byte 1 // DW_AT_decl_file + .byte 1 // DW_AT_decl_line + .word 53 // DW_AT_type + // DW_AT_external + .byte 3 // Abbrev [3] 0x29:0xb DW_TAG_formal_parameter + .byte 2 // DW_AT_location + .byte 145 + .byte 12 + .byte 3 // DW_AT_name + .byte 1 // DW_AT_decl_file + .byte 1 // DW_AT_decl_line + .word 53 // DW_AT_type + .byte 0 // End Of Children Mark + .byte 4 // Abbrev [4] 0x35:0x4 DW_TAG_base_type + .byte 2 // DW_AT_name + .byte 5 // DW_AT_encoding + .byte 4 // DW_AT_byte_size + .byte 0 // End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 1 // DW_CHILDREN_yes + .byte 37 // DW_AT_producer + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 19 // DW_AT_language + .byte 5 // DW_FORM_data2 + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\260B" // DW_AT_GNU_dwo_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 2 // Abbreviation Code + .byte 46 // DW_TAG_subprogram + .byte 1 // DW_CHILDREN_yes + .byte 17 // DW_AT_low_pc + .ascii "\201>" // DW_FORM_GNU_addr_index + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .byte 64 // DW_AT_frame_base + .byte 24 // DW_FORM_exprloc + .byte 110 // DW_AT_linkage_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 58 // DW_AT_decl_file + .byte 11 // DW_FORM_data1 + .byte 59 // DW_AT_decl_line + .byte 11 // DW_FORM_data1 + .byte 73 // DW_AT_type + .byte 19 // DW_FORM_ref4 + .byte 63 // DW_AT_external + .byte 25 // DW_FORM_flag_present + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 3 // Abbreviation Code + .byte 5 // DW_TAG_formal_parameter + .byte 0 // DW_CHILDREN_no + .byte 2 // DW_AT_location + .byte 24 // DW_FORM_exprloc + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 58 // DW_AT_decl_file + .byte 11 // DW_FORM_data1 + .byte 59 // DW_AT_decl_line + .byte 11 // DW_FORM_data1 + .byte 73 // DW_AT_type + .byte 19 // DW_FORM_ref4 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 4 // Abbreviation Code + .byte 36 // DW_TAG_base_type + .byte 0 // DW_CHILDREN_no + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 62 // DW_AT_encoding + .byte 11 // DW_FORM_data1 + .byte 11 // DW_AT_byte_size + .byte 11 // DW_FORM_data1 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .xword .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .word .LpubNames_end0-.LpubNames_start0 // Length of Public Names Info +.LpubNames_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 25 // DIE offset + .byte 48 // Attributes: FUNCTION, EXTERNAL + .asciz "callee" // External Name + .word 0 // End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .word .LpubTypes_end0-.LpubTypes_start0 // Length of Public Types Info +.LpubTypes_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 53 // DIE offset + .byte 144 // Attributes: TYPE, STATIC + .asciz "int" // External Name + .word 0 // End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/bolt/test/AArch64/instrumentation_sequence.s b/bolt/test/AArch64/instrumentation_sequence.s new file mode 100644 index 0000000000000..371851fe9a8e3 --- /dev/null +++ b/bolt/test/AArch64/instrumentation_sequence.s @@ -0,0 +1,50 @@ +# This test is to validate instrumentation code sequence generated with +# and without `--no-lse-atomics`. + +# REQUIRES: system-linux,bolt-runtime,target=aarch64{{.*}} + +# RUN: %clang %cflags -pie %s -o %t.so -Wl,-q -Wl,--init=_foo -Wl,--fini=_foo + + .text + .global _foo + .type _foo, %function +_foo: + ret + + .global _start + .type _start, %function +_start: + ret + + # Dummy relocation to force relocation mode + .reloc 0, R_AARCH64_NONE + +# RUN: llvm-bolt %t.so -o %t.instr.so --instrument +# RUN: llvm-objdump -d %t.instr.so | FileCheck %s --check-prefix=INLINE +# INLINE: {{.*}} <_foo>: +# INLINE-NEXT: {{.*}} stp x0, x1, [sp, #-0x10]! +# INLINE-NEXT: {{.*}} adrp x0, 0x{{[0-9a-f]*}} {{.*}} +# INLINE-NEXT: {{.*}} add x0, x0, #0x{{[0-9a-f]*}} +# INLINE-NEXT: {{.*}} mov x1, #0x1 +# INLINE-NEXT: {{.*}} stadd x1, [x0] +# INLINE-NEXT: {{.*}} ldp x0, x1, [sp], #0x10 + +# RUN: llvm-bolt %t.so -o %t.instr.no_lse.so --instrument \ +# RUN: --no-lse-atomics +# RUN: llvm-objdump -d %t.instr.no_lse.so | FileCheck %s --check-prefix=NOLSE +# NOLSE: {{.*}} <_foo>: +# NOLSE-NEXT: {{.*}} stp x0, x30, [sp, #-0x10]! +# NOLSE-NEXT: {{.*}} stp x1, x2, [sp, #-0x10]! +# NOLSE-NEXT: {{.*}} adrp x0, 0x{{[0-9a-f]*}} {{.*}} +# NOLSE-NEXT: {{.*}} add x0, x0, #0x{{[0-9a-f]*}} +# NOLSE-NEXT: {{.*}} adrp x1, 0x[[PAGEBASE:[0-9a-f]*]]000 {{.*}} +# NOLSE-NEXT: {{.*}} add x1, x1, #0x[[PAGEOFF:[0-9a-f]*]] +# NOLSE-NEXT: {{.*}} blr x1 +# NOLSE-NEXT: {{.*}} ldp x0, x30, [sp], #0x10 +# NOLSE: {{[0]*}}[[PAGEBASE]][[PAGEOFF]] <__bolt_instr_counter_incr>: +# NOLSE-NEXT: {{.*}} ldaxr x1, [x0] +# NOLSE-NEXT: {{.*}} add x1, x1, #0x1 +# NOLSE-NEXT: {{.*}} stlxr w2, x1, [x0] +# NOLSE-NEXT: {{.*}} cbnz w2, 0x{{[0-9[a-f]*}} <__bolt_instr_counter_incr> +# NOLSE-NEXT: {{.*}} ldp x1, x2, [sp], #0x10 +# NOLSE-NEXT: {{.*}} ret diff --git a/bolt/test/AArch64/tls-desc-call.s b/bolt/test/AArch64/tls-desc-call.s new file mode 100644 index 0000000000000..05753803c3d36 --- /dev/null +++ b/bolt/test/AArch64/tls-desc-call.s @@ -0,0 +1,35 @@ +# RUN: %clang %cflags %s -o %t.so -fPIC -shared -Wl,-q +# RUN: llvm-bolt %t.so -o %t.bolt --debug-only=bolt 2>&1 | FileCheck %s + +# REQUIRES: asserts + +## Verify that R_AARCH64_TLSDESC_CALL relocations are ignored + +# CHECK-NOT: Relocation {{.*}} R_AARCH64_TLSDESC_CALL + + .text + .globl get_tls_var + .p2align 2 + .type get_tls_var,@function +get_tls_var: + .cfi_startproc + str x30, [sp, #-16]! + adrp x0, :tlsdesc:tls_var + ldr x1, [x0, :tlsdesc_lo12:tls_var] + add x0, x0, :tlsdesc_lo12:tls_var + .tlsdesccall tls_var + blr x1 + mrs x8, TPIDR_EL0 + ldr w0, [x8, x0] + ldr x30, [sp], #16 + ret + .size get_tls_var, .-get_tls_var + .cfi_endproc + + .type tls_var,@object + .section .tdata,"awT",@progbits + .globl tls_var + .p2align 2, 0x0 +tls_var: + .word 42 + .size tls_var, 4 diff --git a/bolt/test/AArch64/unmarked-data.test b/bolt/test/AArch64/unmarked-data.test index 7a62994bb5c38..af6de11f3df60 100644 --- a/bolt/test/AArch64/unmarked-data.test +++ b/bolt/test/AArch64/unmarked-data.test @@ -2,7 +2,7 @@ // RUN: yaml2obj %S/Inputs/unmarked-data.yaml -o %t.exe // RUN: llvm-bolt %t.exe -o %t.bolt --lite=0 --use-old-text=0 2>&1 | FileCheck %s -// CHECK-NOT: BOLT-WARNING +// CHECK-NOT: BOLT-WARNING: unable to disassemble instruction at offset // RUN: llvm-objdump -j .text -d --disassemble-symbols=first,second %t.bolt | FileCheck %s -check-prefix=CHECK-SYMBOL // CHECK-SYMBOL: : // CHECK-SYMBOL: : diff --git a/bolt/test/X86/dwarf4-dwp-x86.s b/bolt/test/X86/dwarf4-dwp-x86.s new file mode 100755 index 0000000000000..6dde1678f3840 --- /dev/null +++ b/bolt/test/X86/dwarf4-dwp-x86.s @@ -0,0 +1,405 @@ +## This test checks updating debuginfo via dwarf4 dwp file +# RUN: rm -rf %t && mkdir -p %t && cd %t +# RUN: split-file %s %t +# RUN: %clangxx %cxxflags -g -gdwarf-4 -gsplit-dwarf %t/main.s %t/callee.s -o main.exe +# RUN: llvm-dwp -e %t/main.exe -o %t/main.exe.dwp +# RUN: llvm-bolt %t/main.exe -o %t/main.exe.bolt -update-debug-sections 2>&1 | FileCheck %s + +# CHECK-NOT: Assertion + +#--- main.s + .file "main.cpp" + .globl main # -- Begin function main + .type main,@function +main: # @main +.Lfunc_begin0: + .file 1 "." "main.cpp" + .loc 1 2 0 # main.cpp:2:0 + .loc 1 2 21 prologue_end # main.cpp:2:21 + .loc 1 2 14 epilogue_begin is_stmt 0 # main.cpp:2:14 + retq +.Lfunc_end0: + .size main, .Lfunc_end0-main + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 0 # DW_CHILDREN_no + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 14 # DW_FORM_strp + .ascii "\264B" # DW_AT_GNU_pubnames + .byte 25 # DW_FORM_flag_present + .ascii "\260B" # DW_AT_GNU_dwo_name + .byte 14 # DW_FORM_strp + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 17 # DW_AT_low_pc + .byte 1 # DW_FORM_addr + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .ascii "\263B" # DW_AT_GNU_addr_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 4 # DWARF version number + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .long .Lline_table_start0 # DW_AT_stmt_list + .long .Lskel_string0 # DW_AT_comp_dir + # DW_AT_GNU_pubnames + .long .Lskel_string1 # DW_AT_GNU_dwo_name + .quad 1465063543908291764 # DW_AT_GNU_dwo_id + .quad .Lfunc_begin0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." # string offset=0 +.Lskel_string1: + .asciz "main.exe-main.dwo" # string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "main" # string offset=0 +.Linfo_string1: + .asciz "int" # string offset=5 +.Linfo_string2: + .byte 0 # string offset=9 +.Linfo_string3: + .asciz "main.cpp" # string offset=10 +.Linfo_string4: + .asciz "main.exe-main.dwo" # string offset=19 + .section .debug_str_offsets.dwo,"e",@progbits + .long 0 + .long 5 + .long 9 + .long 10 + .long 19 + .section .debug_info.dwo,"e",@progbits + .long .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 # Length of Unit +.Ldebug_info_dwo_start0: + .short 4 # DWARF version number + .long 0 # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x22 DW_TAG_compile_unit + .byte 2 # DW_AT_producer + .short 33 # DW_AT_language + .byte 3 # DW_AT_name + .byte 4 # DW_AT_GNU_dwo_name + .quad 1465063543908291764 # DW_AT_GNU_dwo_id + .byte 2 # Abbrev [2] 0x19:0xf DW_TAG_subprogram + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 86 + .byte 0 # DW_AT_name + .byte 1 # DW_AT_decl_file + .byte 2 # DW_AT_decl_line + .long 40 # DW_AT_type + # DW_AT_external + .byte 3 # Abbrev [3] 0x28:0x4 DW_TAG_base_type + .byte 1 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\260B" # DW_AT_GNU_dwo_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 0 # DW_CHILDREN_no + .byte 17 # DW_AT_low_pc + .ascii "\201>" # DW_FORM_GNU_addr_index + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .quad .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .long .LpubNames_end0-.LpubNames_start0 # Length of Public Names Info +.LpubNames_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 25 # DIE offset + .byte 48 # Attributes: FUNCTION, EXTERNAL + .asciz "main" # External Name + .long 0 # End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .long .LpubTypes_end0-.LpubTypes_start0 # Length of Public Types Info +.LpubTypes_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 40 # DIE offset + .byte 144 # Attributes: TYPE, STATIC + .asciz "int" # External Name + .long 0 # End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .addrsig_sym _Z6calleei + .section .debug_line,"",@progbits +.Lline_table_start0: +#--- callee.s + .file "callee.cpp" + .globl _Z6calleei # -- Begin function _Z6calleei + .type _Z6calleei,@function +_Z6calleei: # @_Z6calleei +.Lfunc_begin0: + .file 1 "." "callee.cpp" + .loc 1 1 0 # callee.cpp:1:0 + .loc 1 1 28 prologue_end # callee.cpp:1:28 + .loc 1 1 21 epilogue_begin is_stmt 0 # callee.cpp:1:21 + retq +.Lfunc_end0: + .size _Z6calleei, .Lfunc_end0-_Z6calleei + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 0 # DW_CHILDREN_no + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 14 # DW_FORM_strp + .ascii "\264B" # DW_AT_GNU_pubnames + .byte 25 # DW_FORM_flag_present + .ascii "\260B" # DW_AT_GNU_dwo_name + .byte 14 # DW_FORM_strp + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 17 # DW_AT_low_pc + .byte 1 # DW_FORM_addr + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .ascii "\263B" # DW_AT_GNU_addr_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 4 # DWARF version number + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .long .Lline_table_start0 # DW_AT_stmt_list + .long .Lskel_string0 # DW_AT_comp_dir + # DW_AT_GNU_pubnames + .long .Lskel_string1 # DW_AT_GNU_dwo_name + .quad -8413212350243343807 # DW_AT_GNU_dwo_id + .quad .Lfunc_begin0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." # string offset=0 +.Lskel_string1: + .asciz "main.exe-callee.dwo" # string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "_Z6calleei" # string offset=0 +.Linfo_string1: + .asciz "callee" # string offset=11 +.Linfo_string2: + .asciz "int" # string offset=18 +.Linfo_string3: + .asciz "x" # string offset=22 +.Linfo_string4: + .byte 0 # string offset=24 +.Linfo_string5: + .asciz "callee.cpp" # string offset=25 +.Linfo_string6: + .asciz "main.exe-callee.dwo" # string offset=36 + .section .debug_str_offsets.dwo,"e",@progbits + .long 0 + .long 11 + .long 18 + .long 22 + .long 24 + .long 25 + .long 36 + .section .debug_info.dwo,"e",@progbits + .long .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 # Length of Unit +.Ldebug_info_dwo_start0: + .short 4 # DWARF version number + .long 0 # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x2f DW_TAG_compile_unit + .byte 4 # DW_AT_producer + .short 33 # DW_AT_language + .byte 5 # DW_AT_name + .byte 6 # DW_AT_GNU_dwo_name + .quad -8413212350243343807 # DW_AT_GNU_dwo_id + .byte 2 # Abbrev [2] 0x19:0x1c DW_TAG_subprogram + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 86 + .byte 0 # DW_AT_linkage_name + .byte 1 # DW_AT_name + .byte 1 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 53 # DW_AT_type + # DW_AT_external + .byte 3 # Abbrev [3] 0x29:0xb DW_TAG_formal_parameter + .byte 2 # DW_AT_location + .byte 145 + .byte 124 + .byte 3 # DW_AT_name + .byte 1 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 53 # DW_AT_type + .byte 0 # End Of Children Mark + .byte 4 # Abbrev [4] 0x35:0x4 DW_TAG_base_type + .byte 2 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\260B" # DW_AT_GNU_dwo_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 17 # DW_AT_low_pc + .ascii "\201>" # DW_FORM_GNU_addr_index + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 110 # DW_AT_linkage_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 4 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .quad .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .long .LpubNames_end0-.LpubNames_start0 # Length of Public Names Info +.LpubNames_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 25 # DIE offset + .byte 48 # Attributes: FUNCTION, EXTERNAL + .asciz "callee" # External Name + .long 0 # End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .long .LpubTypes_end0-.LpubTypes_start0 # Length of Public Types Info +.LpubTypes_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 53 # DIE offset + .byte 144 # Attributes: TYPE, STATIC + .asciz "int" # External Name + .long 0 # End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-gdb-generated-gdb11.test b/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-gdb-generated-gdb11.test index 465062560d4fc..9b20325bd1fab 100644 --- a/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-gdb-generated-gdb11.test +++ b/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-gdb-generated-gdb11.test @@ -18,9 +18,9 @@ # POSTCHECK-NEXT: 1: offset = 0x00000000, type_offset = 0x0000001e, type_signature = 0x00f6cca4e3a15118 # POSTCHECK: Address area offset = 0x68, has 2 entries # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR:]], -# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 1 +# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 0 # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR1:]], -# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 2 +# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 1 # POSTCHECK: Symbol table offset = 0x90, size = 1024, filled slots # POSTCHECK-NEXT: 2: Name offset = 0x20, CU vector offset = 0x0 # POSTCHECK-NEXT: String name: S, CU vector index: 0 diff --git a/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-lld-generated.test b/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-lld-generated.test index 7589bfac57f58..e70bc89c42e22 100644 --- a/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-lld-generated.test +++ b/bolt/test/X86/dwarf5-dwarf4-gdb-index-types-lld-generated.test @@ -15,9 +15,9 @@ # POSTCHECK: Types CU list offset = 0x38, has 0 entries # POSTCHECK: Address area offset = 0x38, has 2 entries # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR:]], -# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 1 +# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 0 # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR1:]], -# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 2 +# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 1 # POSTCHECK: Symbol table offset = 0x60, size = 1024, filled slots # POSTCHECK-NEXT: 2: Name offset = 0x38, CU vector offset = 0x0 # POSTCHECK-NEXT: String name: S, CU vector index: 0 diff --git a/bolt/test/X86/dwarf5-dwoid-no-dwoname.s b/bolt/test/X86/dwarf5-dwoid-no-dwoname.s index 415d0b8f987e6..bc35973dc6f3f 100644 --- a/bolt/test/X86/dwarf5-dwoid-no-dwoname.s +++ b/bolt/test/X86/dwarf5-dwoid-no-dwoname.s @@ -1,7 +1,7 @@ ## Check that DWARF CU with a valid DWOId but missing a dwo_name is correctly detected. # RUN: rm -rf %t && mkdir -p %t && cd %t # RUN: llvm-mc -dwarf-version=5 -filetype=obj -triple x86_64-unknown-linux %s -split-dwarf-file=main.dwo -o main.o -# RUN: %clang -O3 -g -gdwarf-5 -gsplit-dwarf -Wl,-q %t/main.o -o main.exe +# RUN: %clang %cflags -O3 -g -gdwarf-5 -gsplit-dwarf -Wl,-q %t/main.o -o main.exe # RUN: llvm-bolt %t/main.exe -o %t/main.exe.bolt -update-debug-sections 2>&1 | FileCheck %s --check-prefix=PRECHECK # PRECHECK: BOLT-ERROR: broken DWARF found in CU at offset 0x3e (DWOId=0x0, missing DW_AT_dwo_name / DW_AT_GNU_dwo_name) diff --git a/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb11.test b/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb11.test index 139b24afa1b0d..2426f240ad11c 100644 --- a/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb11.test +++ b/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb11.test @@ -18,9 +18,9 @@ # POSTCHECK-NEXT: 1: offset = 0x00000040, type_offset = 0x00000023, type_signature = 0x00f6cca4e3a15118 # POSTCHECK: Address area offset = 0x68, has 2 entries # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR:]], -# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 1 +# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 0 # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR1:]], -# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 3 +# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 1 # POSTCHECK: Symbol table offset = 0x90, size = 1024, filled slots # POSTCHECK-NEXT: 2: Name offset = 0x28, CU vector offset = 0x0 # POSTCHECK-NEXT: String name: S, CU vector index: 0 diff --git a/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb9.test b/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb9.test index 26ee101e9d1d1..b67c5b28e7ce9 100644 --- a/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb9.test +++ b/bolt/test/X86/dwarf5-gdb-index-types-gdb-generated-gdb9.test @@ -20,7 +20,7 @@ # POSTCHECK-NEXT: 1: offset = 0x00000040, type_offset = 0x00000023, type_signature = 0x00f6cca4e3a15118 # POSTCHECK: Address area offset = 0x88, has 2 entries # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR:]], -# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 1 +# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 2 # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR1:]], # POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 3 # POSTCHECK: Symbol table offset = 0xb0, size = 1024, filled slots @@ -37,7 +37,7 @@ # POSTCHECK-NEXT: 754: Name offset = 0x43, CU vector offset = 0x0 # POSTCHECK-NEXT: String name: int, CU vector index: 0 # POSTCHECK: Constant pool offset = 0x20b0, has 5 CU vectors -# POSTCHECK-NEXT: 0(0x0): 0x90000001 +# POSTCHECK-NEXT: 0(0x0): 0x90000002 # POSTCHECK-NEXT: 1(0x8): 0x90000003 -# POSTCHECK-NEXT: 2(0x10): 0x30000001 +# POSTCHECK-NEXT: 2(0x10): 0x30000002 # POSTCHECK-NEXT: 3(0x18): 0x30000003 diff --git a/bolt/test/X86/dwarf5-gdb-index-types-lld-generated.test b/bolt/test/X86/dwarf5-gdb-index-types-lld-generated.test index 731c560133399..740f199d14042 100644 --- a/bolt/test/X86/dwarf5-gdb-index-types-lld-generated.test +++ b/bolt/test/X86/dwarf5-gdb-index-types-lld-generated.test @@ -15,9 +15,9 @@ # POSTCHECK: Types CU list offset = 0x38, has 0 entries # POSTCHECK: Address area offset = 0x38, has 2 entries # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR:]], -# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 1 +# POSTCHECK-SAME: 0x[[#ADDR + 0xf]]) (Size: 0xf), CU id = 0 # POSTCHECK-NEXT: Low/High address = [0x[[#%.4x,ADDR1:]], -# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 3 +# POSTCHECK-SAME: 0x[[#ADDR1 + 0xd]]) (Size: 0xd), CU id = 1 # POSTCHECK: Symbol table offset = 0x60, size = 1024, filled slots # POSTCHECK-NEXT: 2: Name offset = 0x38, CU vector offset = 0x0 # POSTCHECK-NEXT: String name: S, CU vector index: 0 diff --git a/bolt/test/binary-analysis/AArch64/cmdline-args.test b/bolt/test/binary-analysis/AArch64/cmdline-args.test index 3e70b2c0d3bb9..9660ad3bf80f7 100644 --- a/bolt/test/binary-analysis/AArch64/cmdline-args.test +++ b/bolt/test/binary-analysis/AArch64/cmdline-args.test @@ -33,6 +33,7 @@ HELP-NEXT: OPTIONS: HELP-EMPTY: HELP-NEXT: BinaryAnalysis options: HELP-EMPTY: +HELP-NEXT: --auth-traps-on-failure - Assume authentication instructions always trap on failure HELP-NEXT: --scanners= - which gadget scanners to run HELP-NEXT: =pacret - pac-ret: return address protection (subset of "pauth") HELP-NEXT: =pauth - All Pointer Authentication scanners diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s b/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s index f44ba21b9d484..9f580b66f47c7 100644 --- a/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s +++ b/bolt/test/binary-analysis/AArch64/gs-pauth-authentication-oracles.s @@ -1,6 +1,7 @@ // RUN: %clang %cflags -march=armv8.3-a %s -o %t.exe -// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s -// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s +// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck -check-prefix=FPAC %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s // The detection of compiler-generated explicit pointer checks is tested in // gs-pauth-address-checks.s, for that reason only test here "dummy-load" and @@ -8,6 +9,7 @@ // detected per-instruction and per-BB. // PACRET-NOT: authentication oracle found in function +// FPAC-NOT: authentication oracle found in function .text diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s b/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s index fb0bc7cff2377..5e88e105a33f0 100644 --- a/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s +++ b/bolt/test/binary-analysis/AArch64/gs-pauth-calls.s @@ -1,6 +1,7 @@ // RUN: %clang %cflags -march=armv8.3-a %s -o %t.exe -// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s -// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s +// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s // PACRET-NOT: non-protected call found in function diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s b/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s index b1cec7f92ad05..a3ad7effe4b0d 100644 --- a/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s +++ b/bolt/test/binary-analysis/AArch64/gs-pauth-debug-output.s @@ -1,10 +1,14 @@ // REQUIRES: asserts // // RUN: %clang %cflags -march=armv8.3-a %s -o %t.exe -// RUN: llvm-bolt-binary-analysis --scanners=pacret -no-threads \ -// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck %s -// RUN: llvm-bolt-binary-analysis --scanners=pauth -no-threads \ -// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,PAUTH %s +// RUN: llvm-bolt-binary-analysis --scanners=pacret --no-threads \ +// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC %s +// RUN: llvm-bolt-binary-analysis --scanners=pacret --no-threads --auth-traps-on-failure \ +// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth --no-threads \ +// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC,AUTH-ORACLES,PAUTH %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth --no-threads --auth-traps-on-failure \ +// RUN: -debug-only bolt-pauth-scanner %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC,PAUTH %s // Check the debug output generated by PAuth gadget scanner to make sure the // that output is kept meaningful and to provide an overview of what happens @@ -61,30 +65,54 @@ simple: // CHECK-NEXT: State 1: src-state // CHECK-NEXT: State 2: src-state) // CHECK-NEXT: merged state: src-state -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: DataflowSrcSafetyAnalysis::Confluence( -// CHECK-NEXT: State 1: src-state -// CHECK-NEXT: State 2: src-state) -// CHECK-NEXT: merged state: src-state -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) -// CHECK-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: DataflowSrcSafetyAnalysis::Confluence( +// NOFPAC-NEXT: State 1: src-state +// NOFPAC-NEXT: State 2: src-state) +// NOFPAC-NEXT: merged state: src-state +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// NOFPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) +// NOFPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: DataflowSrcSafetyAnalysis::Confluence( +// FPAC-NEXT: State 1: src-state +// FPAC-NEXT: State 2: src-state) +// FPAC-NEXT: merged state: src-state +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( autiza x0, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( blr x0, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ldp x29, x30, [sp], #0x10, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( hint #29, src-state) +// FPAC-NEXT: .. result: (src-state) +// FPAC-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) +// FPAC-NEXT: .. result: (src-state) // CHECK-NEXT: After src register safety analysis: // CHECK-NEXT: Binary Function "simple" { // CHECK-NEXT: Number : 1 @@ -149,9 +177,9 @@ clobber: // CHECK-EMPTY: // CHECK-NEXT: Running detailed src register safety analysis... // CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( mov w30, #0x0, src-state) -// CHECK-NEXT: .. result: (src-state) -// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) -// CHECK-NEXT: .. result: (src-state) +// CHECK-NEXT: .. result: (src-state) +// CHECK-NEXT: SrcSafetyAnalysis::ComputeNext( ret x30, src-state) +// CHECK-NEXT: .. result: (src-state) // CHECK-NEXT: After detailed src register safety analysis: // CHECK-NEXT: Binary Function "clobber" { // ... @@ -161,7 +189,7 @@ clobber: // Iterating over the reports and attaching clobbering info: // CHECK-EMPTY: -// CHECK-NEXT: Attaching clobbering info to: 00000000: ret # DataflowSrcSafetyAnalysis: src-state +// CHECK-NEXT: Attaching clobbering info to: 00000000: ret # DataflowSrcSafetyAnalysis: src-state .globl nocfg .type nocfg,@function @@ -255,53 +283,56 @@ auth_oracle: // ... // CHECK: End of Function "auth_oracle" // ... -// PAUTH: Running dst register safety analysis... -// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state) -// PAUTH-NEXT: .. result: (dst-state) -// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state) -// PAUTH-NEXT: .. result: (dst-state) -// PAUTH-NEXT: After dst register safety analysis: -// PAUTH-NEXT: Binary Function "auth_oracle" { -// PAUTH-NEXT: Number : 4 -// PAUTH-NEXT: State : CFG constructed +// FPAC-NOT: Running dst register safety analysis +// FPAC-NOT: DstSafetyAnalysis::ComputeNext +// FPAC-NOT: {{.*dst-state.*}} +// AUTH-ORACLES: Running dst register safety analysis... +// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state) +// AUTH-ORACLES-NEXT: .. result: (dst-state) +// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state) +// AUTH-ORACLES-NEXT: .. result: (dst-state) +// AUTH-ORACLES-NEXT: After dst register safety analysis: +// AUTH-ORACLES-NEXT: Binary Function "auth_oracle" { +// AUTH-ORACLES-NEXT: Number : 4 +// AUTH-ORACLES-NEXT: State : CFG constructed // ... -// PAUTH: BB Layout : [[BB0]] -// PAUTH-NEXT: } -// PAUTH-NEXT: [[BB0]] (2 instructions, align : 1) -// PAUTH-NEXT: Entry Point -// PAUTH-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state -// PAUTH-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state -// PAUTH-EMPTY: -// PAUTH-NEXT: DWARF CFI Instructions: -// PAUTH-NEXT: -// PAUTH-NEXT: End of Function "auth_oracle" -// PAUTH-EMPTY: -// PAUTH-NEXT: Found auth inst: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state -// PAUTH-NEXT: Authenticated reg: X0 -// PAUTH-NEXT: safe output registers: LR W30 W30_HI{{[ \t]*$}} -// PAUTH-EMPTY: -// PAUTH-NEXT: Running detailed dst register safety analysis... -// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state) -// PAUTH-NEXT: .. result: (dst-state) -// PAUTH-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state) -// PAUTH-NEXT: .. result: (dst-state) -// PAUTH-NEXT: After detailed dst register safety analysis: -// PAUTH-NEXT: Binary Function "auth_oracle" { -// PAUTH-NEXT: Number : 4 -// PAUTH-NEXT: State : CFG constructed +// AUTH-ORACLES: BB Layout : [[BB0]] +// AUTH-ORACLES-NEXT: } +// AUTH-ORACLES-NEXT: [[BB0]] (2 instructions, align : 1) +// AUTH-ORACLES-NEXT: Entry Point +// AUTH-ORACLES-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state +// AUTH-ORACLES-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state +// AUTH-ORACLES-EMPTY: +// AUTH-ORACLES-NEXT: DWARF CFI Instructions: +// AUTH-ORACLES-NEXT: +// AUTH-ORACLES-NEXT: End of Function "auth_oracle" +// AUTH-ORACLES-EMPTY: +// AUTH-ORACLES-NEXT: Found auth inst: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state +// AUTH-ORACLES-NEXT: Authenticated reg: X0 +// AUTH-ORACLES-NEXT: safe output registers: LR W30 W30_HI{{[ \t]*$}} +// AUTH-ORACLES-EMPTY: +// AUTH-ORACLES-NEXT: Running detailed dst register safety analysis... +// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( ret x30, dst-state) +// AUTH-ORACLES-NEXT: .. result: (dst-state) +// AUTH-ORACLES-NEXT: DstSafetyAnalysis::ComputeNext( autia x0, x1, dst-state) +// AUTH-ORACLES-NEXT: .. result: (dst-state) +// AUTH-ORACLES-NEXT: After detailed dst register safety analysis: +// AUTH-ORACLES-NEXT: Binary Function "auth_oracle" { +// AUTH-ORACLES-NEXT: Number : 4 +// AUTH-ORACLES-NEXT: State : CFG constructed // ... -// PAUTH: BB Layout : [[BB0]] -// PAUTH-NEXT: } -// PAUTH-NEXT: [[BB0]] (2 instructions, align : 1) -// PAUTH-NEXT: Entry Point -// PAUTH-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state -// PAUTH-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state -// PAUTH-EMPTY: -// PAUTH-NEXT: DWARF CFI Instructions: -// PAUTH-NEXT: -// PAUTH-NEXT: End of Function "auth_oracle" -// PAUTH-EMPTY: -// PAUTH-NEXT: Attaching leakage info to: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state +// AUTH-ORACLES: BB Layout : [[BB0]] +// AUTH-ORACLES-NEXT: } +// AUTH-ORACLES-NEXT: [[BB0]] (2 instructions, align : 1) +// AUTH-ORACLES-NEXT: Entry Point +// AUTH-ORACLES-NEXT: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state +// AUTH-ORACLES-NEXT: 00000004: ret # DataflowDstSafetyAnalysis: dst-state +// AUTH-ORACLES-EMPTY: +// AUTH-ORACLES-NEXT: DWARF CFI Instructions: +// AUTH-ORACLES-NEXT: +// AUTH-ORACLES-NEXT: End of Function "auth_oracle" +// AUTH-ORACLES-EMPTY: +// AUTH-ORACLES-NEXT: Attaching leakage info to: 00000000: autia x0, x1 # DataflowDstSafetyAnalysis: dst-state // Gadget scanner should not crash on CFI instructions, including when debug-printing them. // Note that the particular debug output is not checked, but BOLT should be diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s b/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s index 4d4bb7b0fb251..7d908f234d852 100644 --- a/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s +++ b/bolt/test/binary-analysis/AArch64/gs-pauth-signing-oracles.s @@ -1,6 +1,7 @@ // RUN: %clang %cflags -march=armv8.3-a+pauth-lr -Wl,--no-relax %s -o %t.exe -// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s -// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s +// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC %s // The detection of compiler-generated explicit pointer checks is tested in // gs-pauth-address-checks.s, for that reason only test here "dummy-load" and @@ -66,9 +67,10 @@ good_sign_auted_checked_brk: .globl bad_sign_authed_unchecked .type bad_sign_authed_unchecked,@function bad_sign_authed_unchecked: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_sign_authed_unchecked +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: autda x0, x2 pacda x0, x1 ret @@ -266,9 +268,10 @@ bad_call_between_checked_and_used: .globl bad_transition_check_then_auth .type bad_transition_check_then_auth,@function bad_transition_check_then_auth: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_transition_check_then_auth +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: ldr x2, [x0] autda x0, x2 pacda x0, x1 @@ -278,9 +281,10 @@ bad_transition_check_then_auth: .globl bad_transition_auth_then_auth .type bad_transition_auth_then_auth,@function bad_transition_auth_then_auth: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_transition_auth_then_auth +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: autda x0, x2 autda x0, x2 pacda x0, x1 @@ -363,9 +367,10 @@ good_sign_auted_checked_brk_multi_bb: .globl bad_sign_authed_unchecked_multi_bb .type bad_sign_authed_unchecked_multi_bb,@function bad_sign_authed_unchecked_multi_bb: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_multi_bb, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_sign_authed_unchecked_multi_bb +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_multi_bb, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: autda x0, x2 cbz x3, 1f ldr x2, [x0] @@ -534,9 +539,10 @@ good_sign_auted_checked_ldr_nocfg: .globl bad_sign_authed_unchecked_nocfg .type bad_sign_authed_unchecked_nocfg,@function bad_sign_authed_unchecked_nocfg: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_sign_authed_unchecked_nocfg +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_sign_authed_unchecked_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: adr x3, 1f br x3 1: @@ -640,9 +646,10 @@ bad_clobber_between_checked_and_used_nocfg: .globl bad_transition_check_then_auth_nocfg .type bad_transition_check_then_auth_nocfg,@function bad_transition_check_then_auth_nocfg: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_transition_check_then_auth_nocfg +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_check_then_auth_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: adr x3, 1f br x3 1: @@ -655,9 +662,10 @@ bad_transition_check_then_auth_nocfg: .globl bad_transition_auth_then_auth_nocfg .type bad_transition_auth_then_auth_nocfg,@function bad_transition_auth_then_auth_nocfg: -// CHECK-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// FPAC-NOT: bad_transition_auth_then_auth_nocfg +// NOFPAC-LABEL: GS-PAUTH: signing oracle found in function bad_transition_auth_then_auth_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: pacda x0, x1 +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: adr x3, 1f br x3 1: diff --git a/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s b/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s index 2d3c2f1a632ca..59b7d929275a9 100644 --- a/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s +++ b/bolt/test/binary-analysis/AArch64/gs-pauth-tail-calls.s @@ -1,6 +1,7 @@ // RUN: %clang %cflags -Wl,--entry=_custom_start -march=armv8.3-a %s -o %t.exe -// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s -// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck %s +// RUN: llvm-bolt-binary-analysis --scanners=pacret %t.exe 2>&1 | FileCheck -check-prefix=PACRET %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth --auth-traps-on-failure %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,FPAC %s +// RUN: llvm-bolt-binary-analysis --scanners=pauth %t.exe 2>&1 | FileCheck -check-prefixes=CHECK,NOFPAC %s // PACRET-NOT: untrusted link register found before tail call @@ -89,19 +90,20 @@ bad_indirect_tailcall_not_auted: .globl bad_direct_tailcall_untrusted .type bad_direct_tailcall_untrusted,@function bad_direct_tailcall_untrusted: -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL -// CHECK-NEXT: This happens in the following basic block: -// CHECK-NEXT: {{[0-9a-f]+}}: paciasp -// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! -// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 -// CHECK-NEXT: {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: {{[0-9a-f]+}}: b callee # TAILCALL +// FPAC-NOT: bad_direct_tailcall_untrusted +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL +// NOFPAC-NEXT: This happens in the following basic block: +// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! +// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 +// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: b callee # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! ldp x29, x30, [sp], #0x10 @@ -114,19 +116,20 @@ bad_direct_tailcall_untrusted: bad_plt_tailcall_untrusted: // FIXME: Calls via PLT are disassembled incorrectly. Nevertheless, they are // still detected as tail calls. -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL -// CHECK-NEXT: This happens in the following basic block: -// CHECK-NEXT: {{[0-9a-f]+}}: paciasp -// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! -// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 -// CHECK-NEXT: {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL +// FPAC-NOT: bad_plt_tailcall_untrusted +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL +// NOFPAC-NEXT: This happens in the following basic block: +// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! +// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 +// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! ldp x29, x30, [sp], #0x10 @@ -137,20 +140,21 @@ bad_plt_tailcall_untrusted: .globl bad_indirect_tailcall_untrusted .type bad_indirect_tailcall_untrusted,@function bad_indirect_tailcall_untrusted: -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: br x0 # TAILCALL -// CHECK-NEXT: This happens in the following basic block: -// CHECK-NEXT: {{[0-9a-f]+}}: paciasp -// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! -// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 -// CHECK-NEXT: {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: {{[0-9a-f]+}}: autia x0, x1 -// CHECK-NEXT: {{[0-9a-f]+}}: br x0 # TAILCALL +// FPAC-NOT: bad_indirect_tailcall_untrusted +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: br x0 # TAILCALL +// NOFPAC-NEXT: This happens in the following basic block: +// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! +// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 +// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: autia x0, x1 +// NOFPAC-NEXT: {{[0-9a-f]+}}: br x0 # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! ldp x29, x30, [sp], #0x10 @@ -251,13 +255,14 @@ bad_indirect_tailcall_not_auted_multi_bb: .globl bad_direct_tailcall_untrusted_multi_bb .type bad_direct_tailcall_untrusted_multi_bb,@function bad_direct_tailcall_untrusted_multi_bb: -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL +// FPAC-NOT: bad_direct_tailcall_untrusted_multi_bb +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! ldp x29, x30, [sp], #0x10 @@ -271,12 +276,13 @@ bad_direct_tailcall_untrusted_multi_bb: .globl bad_indirect_tailcall_untrusted_multi_bb .type bad_indirect_tailcall_untrusted_multi_bb,@function bad_indirect_tailcall_untrusted_multi_bb: -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # UNKNOWN CONTROL FLOW -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 0 instructions that leak the affected registers are: +// FPAC-NOT: bad_indirect_tailcall_untrusted_multi_bb +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: br x0 # UNKNOWN CONTROL FLOW +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_multi_bb, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 0 instructions that leak the affected registers are: paciasp stp x29, x30, [sp, #-0x10]! ldp x29, x30, [sp], #0x10 @@ -397,13 +403,14 @@ bad_indirect_tailcall_not_auted_nocfg: .globl bad_direct_tailcall_untrusted_nocfg .type bad_direct_tailcall_untrusted_nocfg,@function bad_direct_tailcall_untrusted_nocfg: -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL +// FPAC-NOT: bad_direct_tailcall_untrusted_nocfg +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_direct_tailcall_untrusted_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b callee # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_direct_tailcall_untrusted_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b callee # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! adr x3, 1f @@ -419,13 +426,14 @@ bad_direct_tailcall_untrusted_nocfg: bad_plt_tailcall_untrusted_nocfg: // FIXME: Calls via PLT are disassembled incorrectly. Nevertheless, they are // still detected as tail calls. -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL +// FPAC-NOT: bad_plt_tailcall_untrusted_nocfg +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_plt_tailcall_untrusted_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_plt_tailcall_untrusted_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: b bad_indirect_tailcall_untrusted_nocfg # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! adr x3, 1f @@ -441,11 +449,12 @@ bad_plt_tailcall_untrusted_nocfg: bad_indirect_tailcall_untrusted_nocfg: // Known false negative: ignoring UNKNOWN CONTROL FLOW without CFG. // Authentication oracle is found by a generic checker, though. -// CHECK-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_nocfg, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 0 instructions that leak the affected registers are: -// CHECK-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg +// FPAC-NOT: bad_indirect_tailcall_untrusted_nocfg +// NOFPAC-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_nocfg, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 0 instructions that leak the affected registers are: +// NOFPAC-NOT: untrusted link register{{.*}}bad_indirect_tailcall_untrusted_nocfg paciasp stp x29, x30, [sp, #-0x10]! adr x3, 1f @@ -515,19 +524,20 @@ good_indirect_tailcall_no_clobber_v83: .globl bad_indirect_tailcall_untrusted_v83 .type bad_indirect_tailcall_untrusted_v83,@function bad_indirect_tailcall_untrusted_v83: -// CHECK-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: braa x0, x1 # TAILCALL -// CHECK-NEXT: The 0 instructions that write to the affected registers after any authentication are: -// CHECK-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address -// CHECK-NEXT: The instruction is {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: The 1 instructions that leak the affected registers are: -// CHECK-NEXT: 1. {{[0-9a-f]+}}: braa x0, x1 # TAILCALL -// CHECK-NEXT: This happens in the following basic block: -// CHECK-NEXT: {{[0-9a-f]+}}: paciasp -// CHECK-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! -// CHECK-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 -// CHECK-NEXT: {{[0-9a-f]+}}: autiasp -// CHECK-NEXT: {{[0-9a-f]+}}: braa x0, x1 # TAILCALL +// FPAC-NOT: bad_indirect_tailcall_untrusted_v83 +// NOFPAC-LABEL: GS-PAUTH: untrusted link register found before tail call in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: braa x0, x1 # TAILCALL +// NOFPAC-NEXT: The 0 instructions that write to the affected registers after any authentication are: +// NOFPAC-LABEL: GS-PAUTH: authentication oracle found in function bad_indirect_tailcall_untrusted_v83, basic block {{[^,]+}}, at address +// NOFPAC-NEXT: The instruction is {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: The 1 instructions that leak the affected registers are: +// NOFPAC-NEXT: 1. {{[0-9a-f]+}}: braa x0, x1 # TAILCALL +// NOFPAC-NEXT: This happens in the following basic block: +// NOFPAC-NEXT: {{[0-9a-f]+}}: paciasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: stp x29, x30, [sp, #-0x10]! +// NOFPAC-NEXT: {{[0-9a-f]+}}: ldp x29, x30, [sp], #0x10 +// NOFPAC-NEXT: {{[0-9a-f]+}}: autiasp +// NOFPAC-NEXT: {{[0-9a-f]+}}: braa x0, x1 # TAILCALL paciasp stp x29, x30, [sp, #-0x10]! ldp x29, x30, [sp], #0x10 diff --git a/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp b/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp index b895075e4f31c..0ac8f712e112f 100644 --- a/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp +++ b/clang-tools-extra/clang-apply-replacements/lib/Tooling/ApplyReplacements.cpp @@ -142,7 +142,7 @@ groupReplacements(const TUReplacements &TUs, const TUDiagnostics &TUDs, // build directories, make them absolute immediately. SmallString<128> Path = R.getFilePath(); if (BuildDir) - llvm::sys::fs::make_absolute(*BuildDir, Path); + llvm::sys::path::make_absolute(*BuildDir, Path); else SM.getFileManager().makeAbsolutePath(Path); diff --git a/clang-tools-extra/clang-doc/HTMLMustacheGenerator.cpp b/clang-tools-extra/clang-doc/HTMLMustacheGenerator.cpp index 1ab40aacbfe09..b37dc272ea156 100644 --- a/clang-tools-extra/clang-doc/HTMLMustacheGenerator.cpp +++ b/clang-tools-extra/clang-doc/HTMLMustacheGenerator.cpp @@ -274,11 +274,12 @@ Error MustacheHTMLGenerator::generateDocForInfo(Info *I, raw_ostream &OS, } Error MustacheHTMLGenerator::createResources(ClangDocContext &CDCtx) { + std::string ResourcePath(CDCtx.OutDirectory + "/html"); for (const auto &FilePath : CDCtx.UserStylesheets) - if (Error Err = copyFile(FilePath, CDCtx.OutDirectory)) + if (Error Err = copyFile(FilePath, ResourcePath)) return Err; for (const auto &FilePath : CDCtx.JsScripts) - if (Error Err = copyFile(FilePath, CDCtx.OutDirectory)) + if (Error Err = copyFile(FilePath, ResourcePath)) return Err; return Error::success(); } diff --git a/clang-tools-extra/clang-doc/Serialize.cpp b/clang-tools-extra/clang-doc/Serialize.cpp index dd7cd0b2ae736..186f634dd892a 100644 --- a/clang-tools-extra/clang-doc/Serialize.cpp +++ b/clang-tools-extra/clang-doc/Serialize.cpp @@ -780,12 +780,10 @@ static void populateSymbolInfo(SymbolInfo &I, const T *D, const FullComment *C, MangledStream << D->getNameAsString(); // A 250 length limit was chosen since 255 is a common limit across // different filesystems, with a 5 character buffer for file extensions. - if (MangledName.size() > 250) - // File creation fails if the mangled name is too long, so default to the - // USR. We should look for a better check since filesystems differ in - // maximum filename length - I.MangledName = llvm::toStringRef(llvm::toHex(I.USR)); - else + if (MangledName.size() > 250) { + auto SymbolID = llvm::toStringRef(llvm::toHex(I.USR)).str(); + I.MangledName = MangledName.substr(0, 250 - SymbolID.size()) + SymbolID; + } else I.MangledName = MangledName; delete Mangler; } diff --git a/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp b/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp index d2ae13c022b23..e825547ba0134 100644 --- a/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp +++ b/clang-tools-extra/clang-include-fixer/IncludeFixer.cpp @@ -96,7 +96,7 @@ bool IncludeFixerActionFactory::runInvocation( // diagnostics here. Compiler.createDiagnostics(new clang::IgnoringDiagConsumer, /*ShouldOwnClient=*/true); - Compiler.createSourceManager(*Files); + Compiler.createSourceManager(); // We abort on fatal errors so don't let a large number of errors become // fatal. A missing #include can cause thousands of errors. diff --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py index 471dbf8c110b4..49a1b14932644 100755 --- a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py +++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py @@ -26,7 +26,7 @@ import json import multiprocessing import os -import Queue +from queue import Queue import shutil import subprocess import sys @@ -105,7 +105,7 @@ def main(): try: # Spin up a bunch of tidy-launching threads. - queue = Queue.Queue(max_task) + queue = Queue(max_task) for _ in range(max_task): t = threading.Thread( target=run_find_all_symbols, args=(args, tmpdir, build_path, queue) diff --git a/clang-tools-extra/clang-move/Move.cpp b/clang-tools-extra/clang-move/Move.cpp index 17f597170f9f6..519d359991cdb 100644 --- a/clang-tools-extra/clang-move/Move.cpp +++ b/clang-tools-extra/clang-move/Move.cpp @@ -75,7 +75,7 @@ std::string MakeAbsolutePath(StringRef CurrentDir, StringRef Path) { return ""; llvm::SmallString<128> InitialDirectory(CurrentDir); llvm::SmallString<128> AbsolutePath(Path); - llvm::sys::fs::make_absolute(InitialDirectory, AbsolutePath); + llvm::sys::path::make_absolute(InitialDirectory, AbsolutePath); return CleanPath(std::move(AbsolutePath)); } diff --git a/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp b/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp index affa276a0c550..5770bf767bc3c 100644 --- a/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp +++ b/clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp @@ -164,6 +164,22 @@ getNewFieldsOrder(const RecordDecl *Definition, return NewFieldsOrder; } +static bool isOrderValid(const RecordDecl *RD, ArrayRef FieldOrder) { + if (FieldOrder.empty()) + return false; + + // If there is a flexible array member in the struct, it must remain the last + // field. + if (RD->hasFlexibleArrayMember() && + FieldOrder.back() != FieldOrder.size() - 1) { + llvm::errs() + << "Flexible array member must remain the last field in the struct\n"; + return false; + } + + return true; +} + struct ReorderedStruct { public: ReorderedStruct(const RecordDecl *Decl, ArrayRef NewFieldsOrder) @@ -662,7 +678,7 @@ class ReorderingConsumer : public ASTConsumer { return; SmallVector NewFieldsOrder = getNewFieldsOrder(RD, DesiredFieldsOrder); - if (NewFieldsOrder.empty()) + if (!isOrderValid(RD, NewFieldsOrder)) return; ReorderedStruct RS{RD, NewFieldsOrder}; @@ -699,7 +715,7 @@ class ReorderingConsumer : public ASTConsumer { std::unique_ptr ReorderFieldsAction::newASTConsumer() { return std::make_unique(RecordName, DesiredFieldsOrder, - Replacements); + Replacements); } } // namespace reorder_fields diff --git a/clang-tools-extra/clang-tidy/android/ComparisonInTempFailureRetryCheck.cpp b/clang-tools-extra/clang-tidy/android/ComparisonInTempFailureRetryCheck.cpp index 78e58bccaeba1..36ac9a44695c9 100644 --- a/clang-tools-extra/clang-tidy/android/ComparisonInTempFailureRetryCheck.cpp +++ b/clang-tools-extra/clang-tidy/android/ComparisonInTempFailureRetryCheck.cpp @@ -19,7 +19,7 @@ ComparisonInTempFailureRetryCheck::ComparisonInTempFailureRetryCheck( StringRef Name, ClangTidyContext *Context) : ClangTidyCheck(Name, Context), RawRetryList(Options.get("RetryMacros", "TEMP_FAILURE_RETRY")) { - StringRef(RawRetryList).split(RetryMacros, ",", -1, false); + RawRetryList.split(RetryMacros, ",", -1, false); } void ComparisonInTempFailureRetryCheck::storeOptions( diff --git a/clang-tools-extra/clang-tidy/bugprone/AssertSideEffectCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/AssertSideEffectCheck.cpp index 227641d73885e..170050247014a 100644 --- a/clang-tools-extra/clang-tidy/bugprone/AssertSideEffectCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/AssertSideEffectCheck.cpp @@ -92,7 +92,7 @@ AssertSideEffectCheck::AssertSideEffectCheck(StringRef Name, RawAssertList(Options.get("AssertMacros", "assert,NSAssert,NSCAssert")), IgnoredFunctions(utils::options::parseListPair( "__builtin_expect;", Options.get("IgnoredFunctions", ""))) { - StringRef(RawAssertList).split(AssertMacros, ",", -1, false); + RawAssertList.split(AssertMacros, ",", -1, false); } // The options are explained in AssertSideEffectCheck.h. diff --git a/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp b/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp index 8baa8f6b35d4c..e6115f67656bc 100644 --- a/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp @@ -19,6 +19,7 @@ #include "CapturingThisInMemberVariableCheck.h" #include "CastingThroughVoidCheck.h" #include "ChainedComparisonCheck.h" +#include "CommandProcessorCheck.h" #include "ComparePointerToMemberVirtualFunctionCheck.h" #include "CopyConstructorInitCheck.h" #include "CrtpConstructorAccessibilityCheck.h" @@ -130,6 +131,8 @@ class BugproneModule : public ClangTidyModule { "bugprone-casting-through-void"); CheckFactories.registerCheck( "bugprone-chained-comparison"); + CheckFactories.registerCheck( + "bugprone-command-processor"); CheckFactories.registerCheck( "bugprone-compare-pointer-to-member-virtual-function"); CheckFactories.registerCheck( diff --git a/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt b/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt index b0dbe84a16cd4..c8943e5b22ef8 100644 --- a/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt @@ -15,6 +15,7 @@ add_clang_library(clangTidyBugproneModule STATIC CapturingThisInMemberVariableCheck.cpp CastingThroughVoidCheck.cpp ChainedComparisonCheck.cpp + CommandProcessorCheck.cpp ComparePointerToMemberVirtualFunctionCheck.cpp CopyConstructorInitCheck.cpp CrtpConstructorAccessibilityCheck.cpp diff --git a/clang-tools-extra/clang-tidy/cert/CommandProcessorCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/CommandProcessorCheck.cpp similarity index 95% rename from clang-tools-extra/clang-tidy/cert/CommandProcessorCheck.cpp rename to clang-tools-extra/clang-tidy/bugprone/CommandProcessorCheck.cpp index d87396f5189b1..a09c1a931cdb5 100644 --- a/clang-tools-extra/clang-tidy/cert/CommandProcessorCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/CommandProcessorCheck.cpp @@ -11,7 +11,7 @@ using namespace clang::ast_matchers; -namespace clang::tidy::cert { +namespace clang::tidy::bugprone { void CommandProcessorCheck::registerMatchers(MatchFinder *Finder) { Finder->addMatcher( @@ -35,4 +35,4 @@ void CommandProcessorCheck::check(const MatchFinder::MatchResult &Result) { diag(E->getExprLoc(), "calling %0 uses a command processor") << Fn; } -} // namespace clang::tidy::cert +} // namespace clang::tidy::bugprone diff --git a/clang-tools-extra/clang-tidy/cert/CommandProcessorCheck.h b/clang-tools-extra/clang-tidy/bugprone/CommandProcessorCheck.h similarity index 72% rename from clang-tools-extra/clang-tidy/cert/CommandProcessorCheck.h rename to clang-tools-extra/clang-tidy/bugprone/CommandProcessorCheck.h index c2f8b39faaab1..bd4683410ae6f 100644 --- a/clang-tools-extra/clang-tidy/cert/CommandProcessorCheck.h +++ b/clang-tools-extra/clang-tidy/bugprone/CommandProcessorCheck.h @@ -6,12 +6,12 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CERT_COMMAND_PROCESSOR_CHECK_H -#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CERT_COMMAND_PROCESSOR_CHECK_H +#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_COMMANDPROCESSORCHECK_H +#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_COMMANDPROCESSORCHECK_H #include "../ClangTidyCheck.h" -namespace clang::tidy::cert { +namespace clang::tidy::bugprone { /// Execution of a command processor can lead to security vulnerabilities, /// and is generally not required. Instead, prefer to launch executables @@ -19,7 +19,7 @@ namespace clang::tidy::cert { /// actually launched. /// /// For the user-facing documentation see: -/// https://clang.llvm.org/extra/clang-tidy/checks/cert/env33-c.html +/// https://clang.llvm.org/extra/clang-tidy/checks/bugprone/command-processor.html class CommandProcessorCheck : public ClangTidyCheck { public: CommandProcessorCheck(StringRef Name, ClangTidyContext *Context) @@ -28,6 +28,6 @@ class CommandProcessorCheck : public ClangTidyCheck { void check(const ast_matchers::MatchFinder::MatchResult &Result) override; }; -} // namespace clang::tidy::cert +} // namespace clang::tidy::bugprone -#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_CERT_COMMAND_PROCESSOR_CHECK_H +#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_COMMANDPROCESSORCHECK_H diff --git a/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.cpp index 3d839b5111cc8..837a86ff8655e 100644 --- a/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.cpp @@ -39,12 +39,12 @@ ExceptionEscapeCheck::ExceptionEscapeCheck(StringRef Name, RawIgnoredExceptions(Options.get("IgnoredExceptions", "")) { llvm::SmallVector FunctionsThatShouldNotThrowVec, IgnoredExceptionsVec; - StringRef(RawFunctionsThatShouldNotThrow) - .split(FunctionsThatShouldNotThrowVec, ",", -1, false); + RawFunctionsThatShouldNotThrow.split(FunctionsThatShouldNotThrowVec, ",", -1, + false); FunctionsThatShouldNotThrow.insert_range(FunctionsThatShouldNotThrowVec); llvm::StringSet<> IgnoredExceptions; - StringRef(RawIgnoredExceptions).split(IgnoredExceptionsVec, ",", -1, false); + RawIgnoredExceptions.split(IgnoredExceptionsVec, ",", -1, false); IgnoredExceptions.insert_range(IgnoredExceptionsVec); Tracer.ignoreExceptions(std::move(IgnoredExceptions)); Tracer.ignoreBadAlloc(true); diff --git a/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.h b/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.h index ae6e2024e415d..bd1e7bae57f5d 100644 --- a/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.h +++ b/clang-tools-extra/clang-tidy/bugprone/ExceptionEscapeCheck.h @@ -33,8 +33,8 @@ class ExceptionEscapeCheck : public ClangTidyCheck { void check(const ast_matchers::MatchFinder::MatchResult &Result) override; private: - std::string RawFunctionsThatShouldNotThrow; - std::string RawIgnoredExceptions; + StringRef RawFunctionsThatShouldNotThrow; + StringRef RawIgnoredExceptions; llvm::StringSet<> FunctionsThatShouldNotThrow; utils::ExceptionAnalyzer Tracer; diff --git a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp index 1e657888b0fc0..4fc1b3b99ece4 100644 --- a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp @@ -7,6 +7,8 @@ //===----------------------------------------------------------------------===// #include "InvalidEnumDefaultInitializationCheck.h" +#include "../utils/Matchers.h" +#include "../utils/OptionsUtils.h" #include "clang/AST/ASTContext.h" #include "clang/AST/TypeVisitor.h" #include "clang/ASTMatchers/ASTMatchFinder.h" @@ -88,12 +90,24 @@ class FindEnumMember : public TypeVisitor { InvalidEnumDefaultInitializationCheck::InvalidEnumDefaultInitializationCheck( StringRef Name, ClangTidyContext *Context) - : ClangTidyCheck(Name, Context) {} + : ClangTidyCheck(Name, Context), + IgnoredEnums( + utils::options::parseStringList(Options.get("IgnoredEnums", ""))) { + IgnoredEnums.emplace_back("::std::errc"); +} + +void InvalidEnumDefaultInitializationCheck::storeOptions( + ClangTidyOptions::OptionMap &Opts) { + Options.store(Opts, "IgnoredEnums", + utils::options::serializeStringList(IgnoredEnums)); +} void InvalidEnumDefaultInitializationCheck::registerMatchers( MatchFinder *Finder) { - auto EnumWithoutZeroValue = enumType( - hasDeclaration(enumDecl(isCompleteAndHasNoZeroValue()).bind("enum"))); + auto EnumWithoutZeroValue = enumType(hasDeclaration( + enumDecl(isCompleteAndHasNoZeroValue(), + unless(matchers::matchesAnyListedName(IgnoredEnums))) + .bind("enum"))); auto EnumOrArrayOfEnum = qualType(hasUnqualifiedDesugaredType( anyOf(EnumWithoutZeroValue, arrayType(hasElementType(qualType( diff --git a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h index 4f1a4a2a21af3..5e2662f642cd7 100644 --- a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h +++ b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h @@ -24,6 +24,10 @@ class InvalidEnumDefaultInitializationCheck : public ClangTidyCheck { ClangTidyContext *Context); void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; + void storeOptions(ClangTidyOptions::OptionMap &Opts) override; + +private: + std::vector IgnoredEnums; }; } // namespace clang::tidy::bugprone diff --git a/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp index 89eafb15f2652..9781f0a5ac9de 100644 --- a/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp @@ -17,8 +17,11 @@ namespace clang::tidy::bugprone { void ThrowKeywordMissingCheck::registerMatchers(MatchFinder *Finder) { Finder->addMatcher( cxxConstructExpr( - hasType(cxxRecordDecl( - isSameOrDerivedFrom(matchesName("[Ee]xception|EXCEPTION")))), + hasType(cxxRecordDecl(anyOf( + matchesName("[Ee]xception|EXCEPTION"), + hasAnyBase(hasType(hasCanonicalType(recordType(hasDeclaration( + cxxRecordDecl(matchesName("[Ee]xception|EXCEPTION")) + .bind("base"))))))))), unless(anyOf( hasAncestor( stmt(anyOf(cxxThrowExpr(), callExpr(), returnStmt()))), @@ -37,6 +40,11 @@ void ThrowKeywordMissingCheck::check(const MatchFinder::MatchResult &Result) { diag(TemporaryExpr->getBeginLoc(), "suspicious exception object created but " "not thrown; did you mean 'throw %0'?") << TemporaryExpr->getType().getBaseTypeIdentifier()->getName(); + + if (const auto *BaseDecl = Result.Nodes.getNodeAs("base")) + diag(BaseDecl->getLocation(), + "object type inherits from base class declared here", + DiagnosticIDs::Note); } } // namespace clang::tidy::bugprone diff --git a/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp b/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp index 9ba62219afee9..c1ca2cec7a1eb 100644 --- a/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp +++ b/clang-tools-extra/clang-tidy/cert/CERTTidyModule.cpp @@ -10,6 +10,7 @@ #include "../ClangTidyModule.h" #include "../ClangTidyModuleRegistry.h" #include "../bugprone/BadSignalToKillThreadCheck.h" +#include "../bugprone/CommandProcessorCheck.h" #include "../bugprone/PointerArithmeticOnPolymorphicObjectCheck.h" #include "../bugprone/ReservedIdentifierCheck.h" #include "../bugprone/SignalHandlerCheck.h" @@ -33,7 +34,6 @@ #include "../performance/MoveConstructorInitCheck.h" #include "../readability/EnumInitialValueCheck.h" #include "../readability/UppercaseLiteralSuffixCheck.h" -#include "CommandProcessorCheck.h" #include "DefaultOperatorNewAlignmentCheck.h" #include "DontModifyStdNamespaceCheck.h" #include "FloatLoopCounter.h" @@ -296,7 +296,8 @@ class CERTModule : public ClangTidyModule { CheckFactories.registerCheck( "cert-dcl37-c"); // ENV - CheckFactories.registerCheck("cert-env33-c"); + CheckFactories.registerCheck( + "cert-env33-c"); // ERR CheckFactories.registerCheck( "cert-err33-c"); diff --git a/clang-tools-extra/clang-tidy/cert/CMakeLists.txt b/clang-tools-extra/clang-tidy/cert/CMakeLists.txt index 4933763f03fb5..453d1d30921e9 100644 --- a/clang-tools-extra/clang-tidy/cert/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/cert/CMakeLists.txt @@ -5,7 +5,6 @@ set(LLVM_LINK_COMPONENTS add_clang_library(clangTidyCERTModule STATIC CERTTidyModule.cpp - CommandProcessorCheck.cpp DefaultOperatorNewAlignmentCheck.cpp DontModifyStdNamespaceCheck.cpp FloatLoopCounter.cpp diff --git a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp b/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp index aa95fadb0290b..b8bca7286ce69 100644 --- a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp +++ b/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.cpp @@ -20,7 +20,7 @@ ProperlySeededRandomGeneratorCheck::ProperlySeededRandomGeneratorCheck( : ClangTidyCheck(Name, Context), RawDisallowedSeedTypes( Options.get("DisallowedSeedTypes", "time_t,std::time_t")) { - StringRef(RawDisallowedSeedTypes).split(DisallowedSeedTypes, ','); + RawDisallowedSeedTypes.split(DisallowedSeedTypes, ','); } void ProperlySeededRandomGeneratorCheck::storeOptions( diff --git a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h b/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h index ea30127e25e08..7da01cc857187 100644 --- a/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h +++ b/clang-tools-extra/clang-tidy/cert/ProperlySeededRandomGeneratorCheck.h @@ -33,7 +33,7 @@ class ProperlySeededRandomGeneratorCheck : public ClangTidyCheck { void checkSeed(const ast_matchers::MatchFinder::MatchResult &Result, const T *Func); - std::string RawDisallowedSeedTypes; + StringRef RawDisallowedSeedTypes; SmallVector DisallowedSeedTypes; }; diff --git a/clang-tools-extra/clang-tidy/modernize/UseDefaultMemberInitCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseDefaultMemberInitCheck.cpp index d920af7fc477b..0d2c3a79b9ece 100644 --- a/clang-tools-extra/clang-tidy/modernize/UseDefaultMemberInitCheck.cpp +++ b/clang-tools-extra/clang-tidy/modernize/UseDefaultMemberInitCheck.cpp @@ -8,17 +8,57 @@ #include "UseDefaultMemberInitCheck.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Expr.h" #include "clang/ASTMatchers/ASTMatchFinder.h" +#include "clang/ASTMatchers/ASTMatchers.h" #include "clang/Lex/Lexer.h" +#include "llvm/ADT/TypeSwitch.h" using namespace clang::ast_matchers; namespace clang::tidy::modernize { +static bool isExprAllowedInMemberInit(const Expr *E) { + if (!E) + return false; + return llvm::TypeSwitch(E) + .Case( + [](const auto *) { return true; }) + .Case([](const auto *) { return true; }) + .Case([](const ParenExpr *PE) { + return isExprAllowedInMemberInit(PE->getSubExpr()); + }) + .Case([](const UnaryOperator *UO) { + return isExprAllowedInMemberInit(UO->getSubExpr()); + }) + .Case([](const BinaryOperator *BO) { + return isExprAllowedInMemberInit(BO->getLHS()) && + isExprAllowedInMemberInit(BO->getRHS()); + }) + .Case([](const CastExpr *CE) { + return isExprAllowedInMemberInit(CE->getSubExpr()); + }) + .Case([](const DeclRefExpr *DRE) { + if (const ValueDecl *D = DRE->getDecl()) { + if (isa(D)) + return true; + if (const auto *VD = dyn_cast(D)) + return VD->isConstexpr() || VD->getStorageClass() == SC_Static; + } + return false; + }) + .Default(false); +} + namespace { + AST_MATCHER_P(InitListExpr, initCountIs, unsigned, N) { return Node.getNumInits() == N; } + +AST_MATCHER(Expr, allowedInitExpr) { return isExprAllowedInMemberInit(&Node); } + } // namespace static StringRef getValueOfValueInit(const QualType InitType) { @@ -206,30 +246,10 @@ void UseDefaultMemberInitCheck::storeOptions( } void UseDefaultMemberInitCheck::registerMatchers(MatchFinder *Finder) { - auto NumericLiteral = anyOf(integerLiteral(), floatLiteral()); - auto UnaryNumericLiteral = unaryOperator(hasAnyOperatorName("+", "-"), - hasUnaryOperand(NumericLiteral)); - - auto ConstExprRef = varDecl(anyOf(isConstexpr(), isStaticStorageClass())); - auto ImmutableRef = - declRefExpr(to(decl(anyOf(enumConstantDecl(), ConstExprRef)))); - - auto BinaryNumericExpr = binaryOperator( - hasOperands(anyOf(NumericLiteral, ImmutableRef, binaryOperator()), - anyOf(NumericLiteral, ImmutableRef, binaryOperator()))); - - auto InitBase = - anyOf(stringLiteral(), characterLiteral(), NumericLiteral, - UnaryNumericLiteral, cxxBoolLiteral(), cxxNullPtrLiteralExpr(), - implicitValueInitExpr(), ImmutableRef, BinaryNumericExpr); - - auto ExplicitCastExpr = castExpr(hasSourceExpression(InitBase)); - auto InitMatcher = anyOf(InitBase, ExplicitCastExpr); - - auto Init = - anyOf(initListExpr(anyOf(allOf(initCountIs(1), hasInit(0, InitMatcher)), - initCountIs(0), hasType(arrayType()))), - InitBase, ExplicitCastExpr); + auto Init = anyOf( + initListExpr(anyOf(allOf(initCountIs(1), hasInit(0, allowedInitExpr())), + initCountIs(0), hasType(arrayType()))), + allowedInitExpr()); Finder->addMatcher( cxxConstructorDecl(forEachConstructorInitializer( diff --git a/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp index 4dc4baecddd50..b921819ad13e6 100644 --- a/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp +++ b/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp @@ -53,7 +53,7 @@ StatementMatcher makeCastSequenceMatcher(llvm::ArrayRef NameList) { unless(hasImplicitDestinationType( qualType(matchers::matchesAnyListedTypeName(NameList))))); - auto IsOrHasDescendant = [](auto InnerMatcher) { + auto IsOrHasDescendant = [](const auto &InnerMatcher) { return anyOf(InnerMatcher, hasDescendant(InnerMatcher)); }; @@ -494,7 +494,7 @@ UseNullptrCheck::UseNullptrCheck(StringRef Name, ClangTidyContext *Context) NullMacrosStr(Options.get("NullMacros", "NULL")), IgnoredTypes(utils::options::parseStringList(Options.get( "IgnoredTypes", "_CmpUnspecifiedParam;^std::__cmp_cat::__unspec"))) { - StringRef(NullMacrosStr).split(NullMacros, ","); + NullMacrosStr.split(NullMacros, ","); } void UseNullptrCheck::storeOptions(ClangTidyOptions::OptionMap &Opts) { diff --git a/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.cpp b/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.cpp index f9becee92e148..3801fc0f420e5 100644 --- a/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.cpp +++ b/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.cpp @@ -23,7 +23,7 @@ ExceptionEscapeCheck::ExceptionEscapeCheck(StringRef Name, llvm::SmallVector IgnoredExceptionsVec; llvm::StringSet<> IgnoredExceptions; - StringRef(RawIgnoredExceptions).split(IgnoredExceptionsVec, ",", -1, false); + RawIgnoredExceptions.split(IgnoredExceptionsVec, ",", -1, false); llvm::transform(IgnoredExceptionsVec, IgnoredExceptionsVec.begin(), [](StringRef S) { return S.trim(); }); IgnoredExceptions.insert_range(IgnoredExceptionsVec); diff --git a/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.h b/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.h index 39da124a4b37c..757a9337bb9ed 100644 --- a/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.h +++ b/clang-tools-extra/clang-tidy/openmp/ExceptionEscapeCheck.h @@ -30,7 +30,7 @@ class ExceptionEscapeCheck : public ClangTidyCheck { void check(const ast_matchers::MatchFinder::MatchResult &Result) override; private: - std::string RawIgnoredExceptions; + StringRef RawIgnoredExceptions; utils::ExceptionAnalyzer Tracer; }; diff --git a/clang-tools-extra/clang-tidy/readability/CMakeLists.txt b/clang-tools-extra/clang-tidy/readability/CMakeLists.txt index 4b4c49d3b17d1..0d0641c4b22bf 100644 --- a/clang-tools-extra/clang-tidy/readability/CMakeLists.txt +++ b/clang-tools-extra/clang-tidy/readability/CMakeLists.txt @@ -44,6 +44,7 @@ add_clang_library(clangTidyReadabilityModule STATIC RedundantDeclarationCheck.cpp RedundantFunctionPtrDereferenceCheck.cpp RedundantMemberInitCheck.cpp + RedundantParenthesesCheck.cpp RedundantPreprocessorCheck.cpp RedundantSmartptrGetCheck.cpp RedundantStringCStrCheck.cpp diff --git a/clang-tools-extra/clang-tidy/readability/ReadabilityTidyModule.cpp b/clang-tools-extra/clang-tidy/readability/ReadabilityTidyModule.cpp index d01882dfc9daa..fcfac05b000e4 100644 --- a/clang-tools-extra/clang-tidy/readability/ReadabilityTidyModule.cpp +++ b/clang-tools-extra/clang-tidy/readability/ReadabilityTidyModule.cpp @@ -47,6 +47,7 @@ #include "RedundantFunctionPtrDereferenceCheck.h" #include "RedundantInlineSpecifierCheck.h" #include "RedundantMemberInitCheck.h" +#include "RedundantParenthesesCheck.h" #include "RedundantPreprocessorCheck.h" #include "RedundantSmartptrGetCheck.h" #include "RedundantStringCStrCheck.h" @@ -138,6 +139,8 @@ class ReadabilityModule : public ClangTidyModule { "readability-redundant-function-ptr-dereference"); CheckFactories.registerCheck( "readability-redundant-member-init"); + CheckFactories.registerCheck( + "readability-redundant-parentheses"); CheckFactories.registerCheck( "readability-redundant-preprocessor"); CheckFactories.registerCheck( diff --git a/clang-tools-extra/clang-tidy/readability/RedundantParenthesesCheck.cpp b/clang-tools-extra/clang-tidy/readability/RedundantParenthesesCheck.cpp new file mode 100644 index 0000000000000..0ab59fff39d88 --- /dev/null +++ b/clang-tools-extra/clang-tidy/readability/RedundantParenthesesCheck.cpp @@ -0,0 +1,55 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "RedundantParenthesesCheck.h" +#include "clang/AST/Expr.h" +#include "clang/ASTMatchers/ASTMatchFinder.h" +#include "clang/ASTMatchers/ASTMatchers.h" +#include "clang/ASTMatchers/ASTMatchersMacros.h" +#include + +using namespace clang::ast_matchers; + +namespace clang::tidy::readability { + +namespace { + +AST_MATCHER_P(ParenExpr, subExpr, ast_matchers::internal::Matcher, + InnerMatcher) { + return InnerMatcher.matches(*Node.getSubExpr(), Finder, Builder); +} + +AST_MATCHER(ParenExpr, isInMacro) { + const Expr *E = Node.getSubExpr(); + return Node.getLParen().isMacroID() || Node.getRParen().isMacroID() || + E->getBeginLoc().isMacroID() || E->getEndLoc().isMacroID(); +} + +} // namespace + +void RedundantParenthesesCheck::registerMatchers(MatchFinder *Finder) { + const auto ConstantExpr = + expr(anyOf(integerLiteral(), floatLiteral(), characterLiteral(), + cxxBoolLiteral(), stringLiteral(), cxxNullPtrLiteralExpr())); + Finder->addMatcher( + parenExpr(subExpr(anyOf(parenExpr(), ConstantExpr, declRefExpr())), + unless(anyOf(isInMacro(), + // sizeof(...) is common used. + hasParent(unaryExprOrTypeTraitExpr())))) + .bind("dup"), + this); +} + +void RedundantParenthesesCheck::check(const MatchFinder::MatchResult &Result) { + const auto *PE = Result.Nodes.getNodeAs("dup"); + diag(PE->getBeginLoc(), "redundant parentheses around expression") + << FixItHint::CreateRemoval(PE->getLParen()) + << FixItHint::CreateRemoval(PE->getRParen()); +} + +} // namespace clang::tidy::readability diff --git a/clang-tools-extra/clang-tidy/readability/RedundantParenthesesCheck.h b/clang-tools-extra/clang-tidy/readability/RedundantParenthesesCheck.h new file mode 100644 index 0000000000000..9a0409b83fff3 --- /dev/null +++ b/clang-tools-extra/clang-tidy/readability/RedundantParenthesesCheck.h @@ -0,0 +1,34 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_READABILITY_REDUNDANTPARENTHESESCHECK_H +#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_READABILITY_REDUNDANTPARENTHESESCHECK_H + +#include "../ClangTidyCheck.h" +#include "clang/Basic/LangOptions.h" + +namespace clang::tidy::readability { + +/// Detect redundant parentheses. +/// +/// For the user-facing documentation see: +/// https://clang.llvm.org/extra/clang-tidy/checks/readability/redundant-parentheses.html +class RedundantParenthesesCheck : public ClangTidyCheck { +public: + RedundantParenthesesCheck(StringRef Name, ClangTidyContext *Context) + : ClangTidyCheck(Name, Context) {} + void registerMatchers(ast_matchers::MatchFinder *Finder) override; + void check(const ast_matchers::MatchFinder::MatchResult &Result) override; + bool isLanguageVersionSupported(const LangOptions &LangOpts) const override { + return LangOpts.CPlusPlus | LangOpts.C99; + } +}; + +} // namespace clang::tidy::readability + +#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_READABILITY_REDUNDANTPARENTHESESCHECK_H diff --git a/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp b/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp index bdde7249d2796..fd4320eb8144b 100644 --- a/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp +++ b/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp @@ -595,6 +595,11 @@ ExceptionAnalyzer::throwsException(const Stmt *St, Results.merge(DestructorExcs); } } + } else if (const auto *Lambda = dyn_cast(St)) { + for (const Stmt *Init : Lambda->capture_inits()) { + ExceptionInfo Excs = throwsException(Init, Caught, CallStack); + Results.merge(Excs); + } } else { for (const Stmt *Child : St->children()) { ExceptionInfo Excs = throwsException(Child, Caught, CallStack); diff --git a/clang-tools-extra/clangd/CompileCommands.cpp b/clang-tools-extra/clangd/CompileCommands.cpp index 80391fe8cce25..c9da98e96ccfb 100644 --- a/clang-tools-extra/clangd/CompileCommands.cpp +++ b/clang-tools-extra/clangd/CompileCommands.cpp @@ -270,7 +270,8 @@ void CommandMangler::operator()(tooling::CompileCommand &Command, if (auto *DashDash = ArgList.getLastArgNoClaim(driver::options::OPT__DASH_DASH)) { auto DashDashIndex = DashDash->getIndex() + 1; // +1 accounts for Cmd[0] - for (unsigned I = DashDashIndex; I < Cmd.size(); ++I) + // Another +1 so we don't treat the `--` itself as an input. + for (unsigned I = DashDashIndex + 1; I < Cmd.size(); ++I) SawInput(Cmd[I]); Cmd.resize(DashDashIndex); } diff --git a/clang-tools-extra/clangd/ConfigCompile.cpp b/clang-tools-extra/clangd/ConfigCompile.cpp index 962a48bcb7671..18e31809aa7c7 100644 --- a/clang-tools-extra/clangd/ConfigCompile.cpp +++ b/clang-tools-extra/clangd/ConfigCompile.cpp @@ -131,7 +131,7 @@ struct FragmentCompiler { return std::nullopt; } llvm::SmallString<256> AbsPath = llvm::StringRef(*Path); - llvm::sys::fs::make_absolute(FragmentDirectory, AbsPath); + llvm::sys::path::make_absolute(FragmentDirectory, AbsPath); llvm::sys::path::native(AbsPath, Style); return AbsPath.str().str(); } diff --git a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp index 106de1b84c5c6..4a5cd3bb78b2f 100644 --- a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp +++ b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp @@ -106,7 +106,7 @@ struct DriverArgs { // relative or absolute). if (llvm::any_of(Driver, [](char C) { return llvm::sys::path::is_separator(C); })) { - llvm::sys::fs::make_absolute(Cmd.Directory, Driver); + llvm::sys::path::make_absolute(Cmd.Directory, Driver); } this->Driver = Driver.str().str(); for (size_t I = 0, E = Cmd.CommandLine.size(); I < E; ++I) { diff --git a/clang-tools-extra/clangd/index/SymbolCollector.cpp b/clang-tools-extra/clangd/index/SymbolCollector.cpp index 6bdb1080fb294..39c479b5f4d5b 100644 --- a/clang-tools-extra/clangd/index/SymbolCollector.cpp +++ b/clang-tools-extra/clangd/index/SymbolCollector.cpp @@ -325,7 +325,7 @@ class SymbolCollector::HeaderFileURICache { if (R.second) { llvm::SmallString<256> AbsPath = Path; if (!llvm::sys::path::is_absolute(AbsPath) && !FallbackDir.empty()) - llvm::sys::fs::make_absolute(FallbackDir, AbsPath); + llvm::sys::path::make_absolute(FallbackDir, AbsPath); assert(llvm::sys::path::is_absolute(AbsPath) && "If the VFS can't make paths absolute, a FallbackDir must be " "provided"); diff --git a/clang-tools-extra/clangd/tool/ClangdMain.cpp b/clang-tools-extra/clangd/tool/ClangdMain.cpp index 4de2f213565e4..4a990f8f716ca 100644 --- a/clang-tools-extra/clangd/tool/ClangdMain.cpp +++ b/clang-tools-extra/clangd/tool/ClangdMain.cpp @@ -578,7 +578,7 @@ class TestScheme : public URIScheme { Body = Body.ltrim('/'); llvm::SmallString<16> Path(Body); path::native(Path); - fs::make_absolute(TestScheme::TestDir, Path); + path::make_absolute(TestScheme::TestDir, Path); return std::string(Path); } diff --git a/clang-tools-extra/clangd/unittests/CompileCommandsTests.cpp b/clang-tools-extra/clangd/unittests/CompileCommandsTests.cpp index 2ce2975bd962b..660540afd2320 100644 --- a/clang-tools-extra/clangd/unittests/CompileCommandsTests.cpp +++ b/clang-tools-extra/clangd/unittests/CompileCommandsTests.cpp @@ -526,6 +526,25 @@ TEST(CommandMangler, RespectsOriginalSysroot) { Not(HasSubstr(testPath("fake/sysroot")))); } } + +TEST(CommandMangler, StdLatestFlag) { + const auto Mangler = CommandMangler::forTests(); + tooling::CompileCommand Cmd; + Cmd.CommandLine = {"clang-cl", "/std:c++latest", "--", "/Users/foo.cc"}; + Mangler(Cmd, "/Users/foo.cc"); + // Check that the /std:c++latest flag is not dropped + EXPECT_THAT(llvm::join(Cmd.CommandLine, " "), HasSubstr("/std:c++latest")); +} + +TEST(CommandMangler, StdLatestFlag_Inference) { + const auto Mangler = CommandMangler::forTests(); + tooling::CompileCommand Cmd; + Cmd.CommandLine = {"clang-cl", "/std:c++latest", "--", "/Users/foo.cc"}; + Mangler(Cmd, "/Users/foo.hpp"); + // Check that the /std:c++latest flag is not dropped during inference + EXPECT_THAT(llvm::join(Cmd.CommandLine, " "), HasSubstr("/std:c++latest")); +} + } // namespace } // namespace clangd } // namespace clang diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index bc916396a14ca..62e1987377989 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -203,6 +203,11 @@ New checks Finds virtual function overrides with different visibility than the function in the base class. +- New :doc:`readability-redundant-parentheses + ` check. + + Detect redundant parentheses. + New check aliases ^^^^^^^^^^^^^^^^^ @@ -211,6 +216,11 @@ New check aliases ` keeping initial check as an alias to the new one. +- Renamed :doc:`cert-env33-c ` to + :doc:`bugprone-command-processor + ` + keeping initial check as an alias to the new one. + - Renamed :doc:`cert-err34-c ` to :doc:`bugprone-unchecked-string-to-number-conversion ` @@ -234,10 +244,19 @@ Changes in existing checks correcting a spelling mistake on its option ``NamePrefixSuffixSilenceDissimilarityTreshold``. +- Improved :doc:`bugprone-exception-escape + ` check's handling of lambdas: + exceptions from captures are now diagnosed, exceptions in the bodies of + lambdas that aren't actually invoked are not. + - Improved :doc:`bugprone-infinite-loop ` check by adding detection for variables introduced by structured bindings. +- Improved :doc:`bugprone-invalid-enum-default-initialization + ` with new + `IgnoredEnums` option to ignore specified enums during analysis. + - Improved :doc:`bugprone-narrowing-conversions ` check by fixing false positive from analysis of a conditional expression in C. @@ -260,6 +279,11 @@ Changes in existing checks namespace are treated as the tag or the data part of a user-defined tagged union respectively. +- Improved :doc:`bugprone-throw-keyword-missing + ` check by only considering + the canonical types of base classes as written and adding a note on the base + class that triggered the warning. + - Improved :doc:`bugprone-unchecked-optional-access ` check by supporting ``NullableValue::makeValue`` and ``NullableValue::makeValueInplace`` to @@ -301,10 +325,19 @@ Changes in existing checks uses of non-standard ``enable_if`` with a signature different from ``std::enable_if`` (such as ``boost::enable_if``). +- Improved :doc:`modernize-use-default-member-init + ` check to + enhance the robustness of the member initializer detection. + - Improved :doc:`modernize-use-designated-initializers ` check to suggest using designated initializers for aliased aggregate types. +- Improved :doc:`modernize-use-nullptr + ` check by fixing a crash + on Windows when the check was enabled with a 32-bit :program:`clang-tidy` + binary. + - Improved :doc:`modernize-use-std-format ` check to correctly match when the format string is converted to a different type by an implicit diff --git a/clang-tools-extra/docs/clang-tidy/Contributing.rst b/clang-tools-extra/docs/clang-tidy/Contributing.rst index ad7f22381a3ca..4ede4ea36c13d 100644 --- a/clang-tools-extra/docs/clang-tidy/Contributing.rst +++ b/clang-tools-extra/docs/clang-tidy/Contributing.rst @@ -436,7 +436,7 @@ in the release notes, as the first sentence in the doxygen comments in the heade for your check class and as the first sentence of the check documentation. Avoid the phrase "this check" in your check summary and check documentation. -If your check relates to a published coding guideline (C++ Core Guidelines, MISRA, etc.) +If your check relates to a published coding guideline (C++ Core Guidelines, SEI CERT, etc.) or style guide, provide links to the relevant guideline or style guide sections in your check documentation. diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/command-processor.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/command-processor.rst new file mode 100644 index 0000000000000..cbffe7dddae04 --- /dev/null +++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/command-processor.rst @@ -0,0 +1,16 @@ +.. title:: clang-tidy - bugprone-command-processor + +bugprone-command-processor +========================== + +Flags calls to ``system()``, ``popen()``, and ``_popen()``, which +execute a command processor. It does not flag calls to ``system()`` with a null +pointer argument, as such a call checks for the presence of a command processor +but does not actually attempt to execute a command. + +References +---------- + +This check corresponds to the CERT C Coding Standard rule +`ENV33-C. Do not call system() +`_. diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst index a3bd2b6d85c37..45cb878383a7d 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst @@ -19,6 +19,9 @@ The check emits a warning only if an ``enum`` variable is default-initialized value of 0. The type can be a scoped or non-scoped ``enum``. Unions are not handled by the check (if it contains a member of enumeration type). +Note that the ``enum`` ``std::errc`` is always ignored because it is expected to +be default initialized, despite not defining an enumerator with the value 0. + .. code-block:: c++ enum class Enum1: int { @@ -70,3 +73,12 @@ enum type) are set to 0. enum Enum1 Array3[2][2] = {{Enum1_A, Enum1_A}}; // warn: elements of second array are initialized to 0 struct Struct1 S1 = {1}; // warn: element 'b' is initialized to 0 + + +Options +------- + +.. option:: IgnoredEnums + + Semicolon-separated list of regexes specifying enums for which this check won't be + enforced. Default is `::std::errc`. diff --git a/clang-tools-extra/docs/clang-tidy/checks/cert/env33-c.rst b/clang-tools-extra/docs/clang-tidy/checks/cert/env33-c.rst index 9271c9ecccc00..751bccfaee8f2 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/cert/env33-c.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/cert/env33-c.rst @@ -3,10 +3,9 @@ cert-env33-c ============ -This check flags calls to ``system()``, ``popen()``, and ``_popen()``, which -execute a command processor. It does not flag calls to ``system()`` with a null -pointer argument, as such a call checks for the presence of a command processor -but does not actually attempt to execute a command. +The `cert-env33-c` check is an alias, please see +`bugprone-command-processor <../bugprone/command-processor.html>`_ +for more information. This check corresponds to the CERT C Coding Standard rule `ENV33-C. Do not call system() diff --git a/clang-tools-extra/docs/clang-tidy/checks/list.rst b/clang-tools-extra/docs/clang-tidy/checks/list.rst index 472d509101cdb..f94696d4ef9c7 100644 --- a/clang-tools-extra/docs/clang-tidy/checks/list.rst +++ b/clang-tools-extra/docs/clang-tidy/checks/list.rst @@ -87,6 +87,7 @@ Clang-Tidy Checks :doc:`bugprone-capturing-this-in-member-variable `, :doc:`bugprone-casting-through-void `, :doc:`bugprone-chained-comparison `, + :doc:`bugprone-command-processor `, :doc:`bugprone-compare-pointer-to-member-virtual-function `, :doc:`bugprone-copy-constructor-init `, "Yes" :doc:`bugprone-crtp-constructor-accessibility `, "Yes" @@ -173,7 +174,6 @@ Clang-Tidy Checks :doc:`bugprone-use-after-move `, :doc:`bugprone-virtual-near-miss `, "Yes" :doc:`cert-dcl58-cpp `, - :doc:`cert-env33-c `, :doc:`cert-err33-c `, :doc:`cert-err60-cpp `, :doc:`cert-flp30-c `, @@ -404,6 +404,7 @@ Clang-Tidy Checks :doc:`readability-redundant-function-ptr-dereference `, "Yes" :doc:`readability-redundant-inline-specifier `, "Yes" :doc:`readability-redundant-member-init `, "Yes" + :doc:`readability-redundant-parentheses `, "Yes" :doc:`readability-redundant-preprocessor `, :doc:`readability-redundant-smartptr-get `, "Yes" :doc:`readability-redundant-string-cstr `, "Yes" @@ -440,6 +441,7 @@ Check aliases :doc:`cert-dcl54-cpp `, :doc:`misc-new-delete-overloads `, :doc:`cert-dcl59-cpp `, :doc:`google-build-namespaces `, :doc:`cert-err09-cpp `, :doc:`misc-throw-by-value-catch-by-reference `, + :doc:`cert-env33-c `, :doc:`bugprone-command-processor `, :doc:`cert-err34-c `, :doc:`bugprone-unchecked-string-to-number-conversion `, :doc:`cert-err52-cpp `, :doc:`modernize-avoid-setjmp-longjmp `, :doc:`cert-err58-cpp `, :doc:`bugprone-throwing-static-initialization `, diff --git a/clang-tools-extra/docs/clang-tidy/checks/readability/redundant-parentheses.rst b/clang-tools-extra/docs/clang-tidy/checks/readability/redundant-parentheses.rst new file mode 100644 index 0000000000000..23d975e646490 --- /dev/null +++ b/clang-tools-extra/docs/clang-tidy/checks/readability/redundant-parentheses.rst @@ -0,0 +1,29 @@ +.. title:: clang-tidy - readability-redundant-parentheses + +readability-redundant-parentheses +================================= + +Detect redundant parentheses. + +When modifying code, one often forgets to remove the corresponding parentheses. +This results in overly lengthy code. When the expression is complex, finding +the matching parentheses becomes particularly difficult. + +Example +------- + +.. code-block:: c++ + + (1); + ((a + 2)) * 3; + (a); + ("aaa"); + +Currently this check does not take into account the precedence of operations. +Even if the expression within the parentheses has a higher priority than that +outside the parentheses. In other words, removing the parentheses will not +affect the semantics. + +.. code-block:: c++ + + int a = (1 * 2) + 3; // no warning diff --git a/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp b/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp index 372ab5fa2706e..fefbfc3a9614d 100644 --- a/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp +++ b/clang-tools-extra/include-cleaner/tool/IncludeCleaner.cpp @@ -344,7 +344,7 @@ mapInputsToAbsPaths(clang::tooling::CompilationDatabase &CDB, } for (const auto &Cmd : Cmds) { llvm::SmallString<256> CDBPath(Cmd.Filename); - llvm::sys::fs::make_absolute(Cmd.Directory, CDBPath); + llvm::sys::path::make_absolute(Cmd.Directory, CDBPath); CDBToAbsPaths[std::string(CDBPath)] = std::string(AbsPath); } } diff --git a/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp b/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp index 3fb49796039f2..cbf7bae23b365 100644 --- a/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp +++ b/clang-tools-extra/include-cleaner/unittests/RecordTest.cpp @@ -649,11 +649,12 @@ TEST_F(PragmaIncludeTest, ExportInUnnamedBuffer) { Clang->createVirtualFileSystem(VFS); Clang->createDiagnostics(); - auto *FM = Clang->createFileManager(); + Clang->createFileManager(); + FileManager &FM = Clang->getFileManager(); ASSERT_TRUE(Clang->ExecuteAction(*Inputs.MakeAction())); EXPECT_THAT( - PI.getExporters(llvm::cantFail(FM->getFileRef("foo.h")), *FM), - testing::ElementsAre(llvm::cantFail(FM->getFileRef("exporter.h")))); + PI.getExporters(llvm::cantFail(FM.getFileRef("foo.h")), FM), + testing::ElementsAre(llvm::cantFail(FM.getFileRef("exporter.h")))); } TEST_F(PragmaIncludeTest, OutlivesFMAndSM) { diff --git a/clang-tools-extra/test/clang-doc/long-name.cpp b/clang-tools-extra/test/clang-doc/long-name.cpp index b33337588da19..e29c468ecc4da 100644 --- a/clang-tools-extra/test/clang-doc/long-name.cpp +++ b/clang-tools-extra/test/clang-doc/long-name.cpp @@ -1,3 +1,5 @@ +// FIXME: This test seems to break on windows, so disable it for now. +// UNSUPPORTED: system-windows // RUN: rm -rf %t && mkdir -p %t // RUN: clang-doc --output=%t --format=mustache --executor=standalone %s // RUN: ls %t/json | FileCheck %s -check-prefix=CHECK-JSON @@ -9,6 +11,6 @@ struct ThisStructHasANameThatResultsInAMangledNameThatIsExactly250CharactersLong struct ThisStructHasANameThatResultsInAMangledNameThatIsExactly251CharactersLongThatIsSupposedToTestTheFilenameLengthLimitsWithinClangDocInOrdertoSeeifclangdocwillcrashornotdependingonthelengthofthestructIfTheLengthIsTooLongThenClangDocWillCrashAnd123 {}; // CHECK-JSON: ThisStructHasANameThatResultsInAMangledNameThatIsExactly250CharactersLongThatIsSupposedToTestTheFilenameLengthLimitsWithinClangDocInOrdertoSeeifclangdocwillcrashornotdependingonthelengthofthestructIfTheLengthIsTooLongThenClangDocWillCrashAnd12.json -// CHECK-JSON: {{[0-9A-F]*}}.json +// CHECK-JSON: _ZTV244ThisStructHasANameThatResultsInAMangledNameThatIsExactly251CharactersLongThatIsSupposedToTestTheFilenameLengthLimitsWithinClangDocInOrdertoSeeifclangdocwillcrashornotdependingonthelengthofthestructIfTheL29DE8558215A13A506661C0E01E50AA3E5C9C7FA.json // CHECK-HTML: ThisStructHasANameThatResultsInAMangledNameThatIsExactly250CharactersLongThatIsSupposedToTestTheFilenameLengthLimitsWithinClangDocInOrdertoSeeifclangdocwillcrashornotdependingonthelengthofthestructIfTheLengthIsTooLongThenClangDocWillCrashAnd12.html -// CHECK-HTML: {{[0-9A-F]*}}.html +// CHECK-HTML: _ZTV244ThisStructHasANameThatResultsInAMangledNameThatIsExactly251CharactersLongThatIsSupposedToTestTheFilenameLengthLimitsWithinClangDocInOrdertoSeeifclangdocwillcrashornotdependingonthelengthofthestructIfTheL29DE8558215A13A506661C0E01E50AA3E5C9C7FA.html diff --git a/clang-tools-extra/test/clang-reorder-fields/FlexibleArrayMember.c b/clang-tools-extra/test/clang-reorder-fields/FlexibleArrayMember.c new file mode 100644 index 0000000000000..ef64350fd08e6 --- /dev/null +++ b/clang-tools-extra/test/clang-reorder-fields/FlexibleArrayMember.c @@ -0,0 +1,10 @@ +// RUN: clang-reorder-fields -record-name Foo -fields-order z,y,x %s -- 2>&1 | FileCheck --check-prefix=CHECK-BAD %s +// RUN: clang-reorder-fields -record-name Foo -fields-order y,x,z %s -- | FileCheck --check-prefix=CHECK-GOOD %s + +// CHECK-BAD: {{^Flexible array member must remain the last field in the struct}} + +struct Foo { + int x; // CHECK-GOOD: {{^ int y;}} + int y; // CHECK-GOOD-NEXT: {{^ int x;}} + int z[]; // CHECK-GOOD-NEXT: {{^ int z\[\];}} +}; diff --git a/clang-tools-extra/test/clang-tidy/checkers/cert/env33-c.c b/clang-tools-extra/test/clang-tidy/checkers/bugprone/command-processor.c similarity index 83% rename from clang-tools-extra/test/clang-tidy/checkers/cert/env33-c.c rename to clang-tools-extra/test/clang-tidy/checkers/bugprone/command-processor.c index 5846b496242c5..e592b57c9fb29 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/cert/env33-c.c +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/command-processor.c @@ -1,4 +1,4 @@ -// RUN: %check_clang_tidy %s cert-env33-c %t +// RUN: %check_clang_tidy %s bugprone-command-processor %t typedef struct FILE {} FILE; @@ -11,7 +11,7 @@ void f(void) { system(0); system("test"); - // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: calling 'system' uses a command processor [cert-env33-c] + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: calling 'system' uses a command processor [bugprone-command-processor] popen("test", "test"); // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: calling 'popen' uses a command processor diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp index b10bd1d482867..a52bbe2246d1e 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp @@ -894,3 +894,65 @@ void pointer_exception_can_not_escape_with_void_handler() noexcept { } catch (void *) { } } + +void throw_in_uninvoked_lambda() noexcept { + [] { throw 42; }; +} + +void throw_in_lambda() noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda' which should not throw exceptions + [] { throw 42; }(); + // CHECK-MESSAGES: :[[@LINE-1]]:8: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-2]]:19: note: frame #1: function 'throw_in_lambda' calls function 'operator()' here +} + +struct copy_constructor_throws { + copy_constructor_throws(const copy_constructor_throws&) { throw 42; } +}; + +void throw_in_lambda_default_by_value_capture(const copy_constructor_throws& a) noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda_default_by_value_capture' which should not throw exceptions + [=] { a; }; + // CHECK-MESSAGES: :[[@LINE-6]]:61: note: frame #0: unhandled exception of type 'int' may be thrown in function 'copy_constructor_throws' here + // CHECK-MESSAGES: :[[@LINE-2]]:4: note: frame #1: function 'throw_in_lambda_default_by_value_capture' calls function 'copy_constructor_throws' here +} + +void throw_in_lambda_explicit_by_value_capture(const copy_constructor_throws& a) noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda_explicit_by_value_capture' which should not throw exceptions + [a] {}; + // CHECK-MESSAGES: :[[@LINE-13]]:61: note: frame #0: unhandled exception of type 'int' may be thrown in function 'copy_constructor_throws' here + // CHECK-MESSAGES: :[[@LINE-2]]:4: note: frame #1: function 'throw_in_lambda_explicit_by_value_capture' calls function 'copy_constructor_throws' here +} + +void no_throw_in_lambda_by_reference_capture(const copy_constructor_throws& a) noexcept { + [&] { a; }; + [&a] {}; +} + +void throw_in_lambda_init_capture() noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda_init_capture' which should not throw exceptions + [a = [] { throw 42; return 0; }()] {}; + // CHECK-MESSAGES: :[[@LINE-1]]:13: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-2]]:34: note: frame #1: function 'throw_in_lambda_init_capture' calls function 'operator()' here +} + +void throw_from_nested_lambda() noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_from_nested_lambda' which should not throw exceptions + [] { [] { throw 42; }(); }(); + // CHECK-MESSAGES: :[[@LINE-1]]:13: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-2]]:24: note: frame #1: function 'operator()' calls function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-3]]:29: note: frame #2: function 'throw_from_nested_lambda' calls function 'operator()' here +} + +const auto throw_in_noexcept_lambda = [] () noexcept { throw 42; }; +// CHECK-MESSAGES: :[[@LINE-1]]:39: warning: an exception may be thrown in function 'operator()' which should not throw exceptions +// CHECK-MESSAGES: :[[@LINE-2]]:56: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + +void thrower() { + throw 42; +} + +const auto indirect_throw_in_noexcept_lambda = [] () noexcept { thrower(); }; +// CHECK-MESSAGES: :[[@LINE-1]]:48: warning: an exception may be thrown in function 'operator()' which should not throw exceptions +// CHECK-MESSAGES: :[[@LINE-5]]:3: note: frame #0: unhandled exception of type 'int' may be thrown in function 'thrower' here +// CHECK-MESSAGES: :[[@LINE-3]]:65: note: frame #1: function 'operator()' calls function 'thrower' here diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp index eb3d5632eaef7..85ff481aae301 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp @@ -1,4 +1,5 @@ -// RUN: %check_clang_tidy -std=c++17 %s bugprone-invalid-enum-default-initialization %t +// RUN: %check_clang_tidy -check-suffixes=,DEFAULT -std=c++17-or-later %s bugprone-invalid-enum-default-initialization %t +// RUN: %check_clang_tidy -std=c++17-or-later %s bugprone-invalid-enum-default-initialization %t -- -config="{CheckOptions: {bugprone-invalid-enum-default-initialization.IgnoredEnums: '::MyEnum'}}" enum class Enum0: int { A = 0, @@ -24,10 +25,10 @@ Enum0 E0_6{Enum0::B}; Enum1 E1_1{}; // CHECK-NOTES: :[[@LINE-1]]:11: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator -// CHECK-NOTES: :8:12: note: enum is defined here +// CHECK-NOTES: :9:12: note: enum is defined here Enum1 E1_2 = Enum1(); // CHECK-NOTES: :[[@LINE-1]]:14: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator -// CHECK-NOTES: :8:12: note: enum is defined here +// CHECK-NOTES: :9:12: note: enum is defined here Enum1 E1_3; Enum1 E1_4{0}; Enum1 E1_5{Enum1::A}; @@ -35,44 +36,44 @@ Enum1 E1_6{Enum1::B}; Enum2 E2_1{}; // CHECK-NOTES: :[[@LINE-1]]:11: warning: enum value of type 'Enum2' initialized with invalid value of 0, enum doesn't have a zero-value enumerator -// CHECK-NOTES: :13:6: note: enum is defined here +// CHECK-NOTES: :14:6: note: enum is defined here Enum2 E2_2 = Enum2(); // CHECK-NOTES: :[[@LINE-1]]:14: warning: enum value of type 'Enum2' initialized with invalid value of 0, enum doesn't have a zero-value enumerator -// CHECK-NOTES: :13:6: note: enum is defined here +// CHECK-NOTES: :14:6: note: enum is defined here void f1() { static Enum1 S; // FIMXE: warn for this? Enum1 A; Enum1 B = Enum1(); // CHECK-NOTES: :[[@LINE-1]]:13: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here int C = int(); } void f2() { Enum1 A{}; // CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here Enum1 B = Enum1(); // CHECK-NOTES: :[[@LINE-1]]:13: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here Enum1 C[5] = {{}}; // CHECK-NOTES: :[[@LINE-1]]:16: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here // CHECK-NOTES: :[[@LINE-3]]:17: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here Enum1 D[5] = {}; // FIMXE: warn for this? // CHECK-NOTES: :[[@LINE-1]]:16: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here } struct S1 { Enum1 E_1{}; // CHECK-NOTES: :[[@LINE-1]]:12: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here Enum1 E_2 = Enum1(); // CHECK-NOTES: :[[@LINE-1]]:15: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here Enum1 E_3; Enum1 E_4; Enum1 E_5; @@ -80,10 +81,10 @@ struct S1 { S1() : E_3{}, // CHECK-NOTES: :[[@LINE-1]]:8: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here E_4(), // CHECK-NOTES: :[[@LINE-1]]:8: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here E_5{Enum1::B} {} }; @@ -110,22 +111,22 @@ struct S5 { S2 VarS2{}; // CHECK-NOTES: :[[@LINE-1]]:9: warning: enum value of type 'Enum1' initialized with invalid value of 0 -// CHECK-NOTES: :8:12: note: enum is defined here +// CHECK-NOTES: :9:12: note: enum is defined here // CHECK-NOTES: :[[@LINE-3]]:9: warning: enum value of type 'Enum2' initialized with invalid value of 0 -// CHECK-NOTES: :13:6: note: enum is defined here +// CHECK-NOTES: :14:6: note: enum is defined here S3 VarS3{}; // CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0 -// CHECK-NOTES: :8:12: note: enum is defined here +// CHECK-NOTES: :9:12: note: enum is defined here // CHECK-NOTES: :[[@LINE-3]]:10: warning: enum value of type 'Enum2' initialized with invalid value of 0 -// CHECK-NOTES: :13:6: note: enum is defined here +// CHECK-NOTES: :14:6: note: enum is defined here S4 VarS4{}; // CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0 -// CHECK-NOTES: :8:12: note: enum is defined here +// CHECK-NOTES: :9:12: note: enum is defined here // CHECK-NOTES: :[[@LINE-3]]:10: warning: enum value of type 'Enum2' initialized with invalid value of 0 -// CHECK-NOTES: :13:6: note: enum is defined here +// CHECK-NOTES: :14:6: note: enum is defined here S5 VarS5{}; // CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0 -// CHECK-NOTES: :8:12: note: enum is defined here +// CHECK-NOTES: :9:12: note: enum is defined here enum class EnumFwd; @@ -139,7 +140,25 @@ template struct Templ { T Mem1{}; // CHECK-NOTES: :[[@LINE-1]]:9: warning: enum value of type 'Enum1' initialized with invalid value of 0 - // CHECK-NOTES: :8:12: note: enum is defined here + // CHECK-NOTES: :9:12: note: enum is defined here }; Templ TemplVar; + +enum MyEnum { + A = 1, + B +}; + +MyEnum MyEnumVar{}; +// CHECK-NOTES-DEFAULT: :[[@LINE-1]]:17: warning: enum value of type 'MyEnum' initialized with invalid value of 0, enum doesn't have a zero-value enumerator +// CHECK-NOTES-DEFAULT: :148:6: note: enum is defined here + +namespace std { + enum errc { + A = 1, + B + }; +} + +std::errc err{}; diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp index bafd3d19b5a31..0ae51780ccc00 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp @@ -20,6 +20,7 @@ typedef basic_string string; typedef basic_string wstring; // std::exception and std::runtime_error declaration. +// CHECK-MESSAGES-DAG: [[#EXCEPTION_LINE:@LINE + 1]]:8 struct exception { exception(); exception(const exception &other); @@ -32,8 +33,9 @@ struct runtime_error : public exception { } // namespace std -// The usage of this class should never emit a warning. +// The usage of these classes should never emit a warning. struct RegularClass {}; +struct RegularDerived : public RegularClass {}; // Class name contains the substring "exception", in certain cases using this class should emit a warning. struct RegularException { @@ -41,18 +43,21 @@ struct RegularException { // Constructors with a single argument are treated differently (cxxFunctionalCastExpr). RegularException(int) {} + + typedef RegularClass RegularAlias; }; // -------------- void stdExceptionNotTrownTest(int i) { if (i < 0) - // CHECK-MESSAGES: :[[@LINE+1]]:5: warning: suspicious exception object created but not thrown; did you mean 'throw {{.*}}'? [bugprone-throw-keyword-missing] + // CHECK-MESSAGES-DAG: :[[@LINE+1]]:5: warning: suspicious exception object created but not thrown; did you mean 'throw {{.*}}'? [bugprone-throw-keyword-missing] std::exception(); if (i > 0) - // CHECK-MESSAGES: :[[@LINE+1]]:5: warning: suspicious exception + // CHECK-MESSAGES-DAG: :[[@LINE+1]]:5: warning: suspicious exception std::runtime_error("Unexpected argument"); + // CHECK-MESSAGES: note: object type inherits from base class declared here } void stdExceptionThrownTest(int i) { @@ -68,6 +73,10 @@ void regularClassNotThrownTest(int i) { RegularClass(); } +void regularClassWithAliasNotThrownTest(int i) { + RegularDerived(); +} + void regularClassThrownTest(int i) { if (i < 0) throw RegularClass(); @@ -174,6 +183,7 @@ class RegularError : public ERROR_BASE {}; void typedefTest() { // CHECK-MESSAGES: :[[@LINE+1]]:3: warning: suspicious exception RegularError(); + // CHECK-MESSAGES: :[[#EXCEPTION_LINE]]:8: note: object type inherits from base class declared here } struct ExceptionRAII { diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/type-traits.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/type-traits.cpp index 97ba1fce2a1ec..e5de9e33bccd9 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/modernize/type-traits.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/type-traits.cpp @@ -14,11 +14,25 @@ namespace std { static constexpr bool value = true; }; + template + static constexpr bool is_same_v = is_same::value; // NOLINT + template struct enable_if { using type = T; }; + template + using enable_if_t = typename enable_if::type; // NOLINT + + template + struct remove_reference { + using type = T; + }; + + template + using remove_reference_t = typename remove_reference::type; // NOLINT + template struct common_type { using type = int; @@ -126,3 +140,13 @@ namespace my_std = std; using Alias = my_std::add_const::type; // CHECK-MESSAGES: :[[@LINE-1]]:15: warning: use c++14 style type templates // CHECK-FIXES: using Alias = my_std::add_const_t; + +template +struct ImplicitlyInstantiatedConstructor { + template >> + ImplicitlyInstantiatedConstructor(U) {} +}; + +const ImplicitlyInstantiatedConstructor ImplicitInstantiation(std::remove_reference::type(123)); +// CHECK-MESSAGES: :[[@LINE-1]]:68: warning: use c++14 style type templates +// CHECK-FIXES: const ImplicitlyInstantiatedConstructor ImplicitInstantiation(std::remove_reference_t(123)); diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-default-member-init.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-default-member-init.cpp index 015216c4a9d59..52b15dec37cd5 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/modernize/use-default-member-init.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/use-default-member-init.cpp @@ -596,3 +596,26 @@ class DefaultMemberInitWithArithmetic { }; } //namespace PR122480 + +namespace GH156295 { + +class NotFix { + NotFix(int v) : x(0 + 0 + (0 * 0 * (((((((v)))) - 20))) + 10)) {} + int x; +}; + +class ShouldFix { + ShouldFix(int v) : x(0 + 0 + (0 * 0 * (((((((1)))) - 20))) + 10)) {} + int x; + // CHECK-MESSAGES: :[[@LINE-1]]:7: warning: use default member initializer for 'x' [modernize-use-default-member-init] + // CHECK-FIXES: int x{0 + 0 + (0 * 0 * (((((((1)))) - 20))) + 10)}; +}; + +} // namespace GH156295 + +namespace GH160394 { +struct A { + A(int i) : f((i & 0x1f) == 1) {} + bool f; +}; +} // namespace GH160394 diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-parentheses.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-parentheses.cpp new file mode 100644 index 0000000000000..926cb118c77cf --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/readability/redundant-parentheses.cpp @@ -0,0 +1,64 @@ +// RUN: %check_clang_tidy %s readability-redundant-parentheses %t + +void parenExpr() { + 1 + 1; + (1 + 1); + ((1 + 1)); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: (1 + 1); + (((1 + 1))); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-MESSAGES: :[[@LINE-2]]:4: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: (1 + 1); + ((((1 + 1)))); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-MESSAGES: :[[@LINE-2]]:4: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-MESSAGES: :[[@LINE-3]]:5: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: (1 + 1); +} + +#define EXP (1 + 1) +#define PAREN(e) (e) +void parenExprWithMacro() { + EXP; // 1 + (EXP); // 2 + ((EXP)); // 3 + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: (EXP); // 3 + PAREN((1)); +} + +void constant() { + (1); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: 1; + (1.0); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: 1.0; + (true); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: true; + (','); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: ','; + ("v4"); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: "v4"; + (nullptr); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: nullptr; +} + +void declRefExpr(int a) { + (a); + // CHECK-MESSAGES: :[[@LINE-1]]:3: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: a; +} + +void exceptions() { + sizeof(1); + alignof(2); + alignof((3)); + // CHECK-MESSAGES: :[[@LINE-1]]:11: warning: redundant parentheses around expression [readability-redundant-parentheses] + // CHECK-FIXES: alignof(3); +} diff --git a/clang-tools-extra/unittests/clang-doc/HTMLMustacheGeneratorTest.cpp b/clang-tools-extra/unittests/clang-doc/HTMLMustacheGeneratorTest.cpp index 602058f5d9eb8..c7ac387ecf7c3 100644 --- a/clang-tools-extra/unittests/clang-doc/HTMLMustacheGeneratorTest.cpp +++ b/clang-tools-extra/unittests/clang-doc/HTMLMustacheGeneratorTest.cpp @@ -12,9 +12,7 @@ #include "config.h" #include "support/Utils.h" #include "clang/Basic/Version.h" -#include "llvm/Support/Path.h" #include "llvm/Testing/Support/Error.h" -#include "llvm/Testing/Support/SupportHelpers.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -48,41 +46,10 @@ getClangDocContext(std::vector UserStylesheets = {}, return CDCtx; } -static void verifyFileContents(const Twine &Path, StringRef Contents) { - auto Buffer = MemoryBuffer::getFile(Path); - ASSERT_TRUE((bool)Buffer); - StringRef Data = Buffer.get()->getBuffer(); - ASSERT_EQ(Data, Contents); -} - TEST(HTMLMustacheGeneratorTest, createResources) { auto G = getHTMLMustacheGenerator(); ASSERT_THAT(G, NotNull()) << "Could not find HTMLMustacheGenerator"; ClangDocContext CDCtx = getClangDocContext(); EXPECT_THAT_ERROR(G->createResources(CDCtx), Failed()) << "Empty UserStylesheets or JsScripts should fail!"; - - unittest::TempDir RootTestDirectory("createResourcesTest", /*Unique=*/true); - CDCtx.OutDirectory = RootTestDirectory.path(); - - unittest::TempFile CSS("clang-doc-mustache", "css", "CSS"); - unittest::TempFile JS("mustache", "js", "JavaScript"); - - CDCtx.UserStylesheets[0] = CSS.path(); - CDCtx.JsScripts[0] = JS.path(); - - EXPECT_THAT_ERROR(G->createResources(CDCtx), Succeeded()) - << "Failed to create resources with valid UserStylesheets and JsScripts"; - { - SmallString<256> PathBuf; - llvm::sys::path::append(PathBuf, RootTestDirectory.path(), - "clang-doc-mustache.css"); - verifyFileContents(PathBuf, "CSS"); - } - - { - SmallString<256> PathBuf; - llvm::sys::path::append(PathBuf, RootTestDirectory.path(), "mustache.js"); - verifyFileContents(PathBuf, "JavaScript"); - } } diff --git a/clang/.clang-format b/clang/.clang-format index 9b3aa8b7213b2..ecb44bfabd9aa 100644 --- a/clang/.clang-format +++ b/clang/.clang-format @@ -1 +1,2 @@ BasedOnStyle: LLVM +LineEnding: LF diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index 1bb73599970c1..e4cb1a359620d 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -80,6 +80,12 @@ if(CLANG_BUILT_STANDALONE) include(GetErrcMessages) include(LLVMDistributionSupport) + if(CMAKE_CROSSCOMPILING) + set(LLVM_USE_HOST_TOOLS ON) + include(CrossCompile) + llvm_create_cross_target(Clang NATIVE "" Release) + endif() + set(PACKAGE_VERSION "${LLVM_PACKAGE_VERSION}") set(BUG_REPORT_URL "${LLVM_PACKAGE_BUGREPORT}" CACHE STRING "Default URL where bug reports are to be submitted.") @@ -748,11 +754,22 @@ if (CLANG_ENABLE_BOOTSTRAP) if(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED) add_dependencies(clang-bootstrap-deps llvm-profdata) set(PGO_OPT -DLLVM_PROFDATA=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profdata) + string(TOUPPER "${BOOTSTRAP_LLVM_BUILD_INSTRUMENTED}" BOOTSTRAP_LLVM_BUILD_INSTRUMENTED) + if (BOOTSTRAP_LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO") + add_dependencies(clang-bootstrap-deps llvm-profgen) + list(APPEND PGO_OPT -DLLVM_PROFGEN=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profgen) + endif() endif() if(LLVM_BUILD_INSTRUMENTED) - add_dependencies(clang-bootstrap-deps generate-profdata) - set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata) + string(TOUPPER "${LLVM_BUILD_INSTRUMENTED}" LLVM_BUILD_INSTRUMENTED) + if (LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO") + add_dependencies(clang-bootstrap-deps generate-sprofdata) + set(PGO_OPT -DLLVM_SPROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.sprofdata) + else() + add_dependencies(clang-bootstrap-deps generate-profdata) + set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata) + endif() # Use the current tools for LTO instead of the instrumented ones list(APPEND _BOOTSTRAP_DEFAULT_PASSTHROUGH CMAKE_CXX_COMPILER diff --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py index 13a91d83ede1c..80140d2787608 100644 --- a/clang/bindings/python/clang/cindex.py +++ b/clang/bindings/python/clang/cindex.py @@ -1435,58 +1435,61 @@ def is_unexposed(self): OMP_SCOPE_DIRECTIVE = 306 # OpenMP reverse directive. - OMPReverseDirective = 307 + OMP_REVERSE_DIRECTIVE = 307 # OpenMP interchange directive. - OMPInterchangeDirective = 308 + OMP_INTERCHANGE_DIRECTIVE = 308 # OpenMP assume directive. - OMPAssumeDirective = 309 + OMP_ASSUME_DIRECTIVE = 309 # OpenMP stripe directive. OMP_STRIPE_DIRECTIVE = 310 + # OpenMP fuse directive. + OMP_FUSE_DIRECTIVE = 311 + # OpenACC Compute Construct. OPEN_ACC_COMPUTE_DIRECTIVE = 320 # OpenACC Loop Construct. - OpenACCLoopConstruct = 321 + OPEN_ACC_LOOP_CONSTRUCT = 321 # OpenACC Combined Constructs. - OpenACCCombinedConstruct = 322 + OPEN_ACC_COMBINED_CONSTRUCT = 322 # OpenACC data Construct. - OpenACCDataConstruct = 323 + OPEN_ACC_DATA_CONSTRUCT = 323 # OpenACC enter data Construct. - OpenACCEnterDataConstruct = 324 + OPEN_ACC_ENTER_DATA_CONSTRUCT = 324 # OpenACC exit data Construct. - OpenACCExitDataConstruct = 325 + OPEN_ACC_EXIT_DATA_CONSTRUCT = 325 # OpenACC host_data Construct. - OpenACCHostDataConstruct = 326 + OPEN_ACC_HOST_DATA_CONSTRUCT = 326 # OpenACC wait Construct. - OpenACCWaitConstruct = 327 + OPEN_ACC_WAIT_CONSTRUCT = 327 # OpenACC init Construct. - OpenACCInitConstruct = 328 + OPEN_ACC_INIT_CONSTRUCT = 328 # OpenACC shutdown Construct. - OpenACCShutdownConstruct = 329 + OPEN_ACC_SHUTDOWN_CONSTRUCT = 329 # OpenACC set Construct. - OpenACCSetConstruct = 330 + OPEN_ACC_SET_CONSTRUCT = 330 # OpenACC update Construct. - OpenACCUpdateConstruct = 331 + OPEN_ACC_UPDATE_CONSTRUCT = 331 # OpenACC atomic Construct. - OpenACCAtomicConstruct = 332 + OPEN_ACC_ATOMIC_CONSTRUCT = 332 # OpenACC cache Construct. - OpenACCCacheConstruct = 333 + OPEN_ACC_CACHE_CONSTRUCT = 333 ### # Other Kinds diff --git a/clang/cmake/caches/BOLT-CSSPGO.cmake b/clang/cmake/caches/BOLT-CSSPGO.cmake new file mode 100644 index 0000000000000..b1c204ad57ac5 --- /dev/null +++ b/clang/cmake/caches/BOLT-CSSPGO.cmake @@ -0,0 +1,3 @@ +set(BOLT_PGO_CMAKE_CACHE "CSSPGO" CACHE STRING "") +set(BOOTSTRAP_CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "") +include(${CMAKE_CURRENT_LIST_DIR}/BOLT-PGO.cmake) diff --git a/clang/cmake/caches/BOLT-PGO.cmake b/clang/cmake/caches/BOLT-PGO.cmake index 1a04ca9a74e5e..cc9410fd0e95c 100644 --- a/clang/cmake/caches/BOLT-PGO.cmake +++ b/clang/cmake/caches/BOLT-PGO.cmake @@ -1,3 +1,4 @@ +set(BOLT_PGO_CMAKE_CACHE "PGO" CACHE STRING "") set(LLVM_ENABLE_PROJECTS "bolt;clang;lld" CACHE STRING "") set(CLANG_BOOTSTRAP_TARGETS @@ -14,4 +15,4 @@ set(BOOTSTRAP_CLANG_BOOTSTRAP_TARGETS set(PGO_BUILD_CONFIGURATION ${CMAKE_CURRENT_LIST_DIR}/BOLT.cmake CACHE STRING "") -include(${CMAKE_CURRENT_LIST_DIR}/PGO.cmake) +include(${CMAKE_CURRENT_LIST_DIR}/${BOLT_PGO_CMAKE_CACHE}.cmake) diff --git a/clang/cmake/caches/CSSPGO.cmake b/clang/cmake/caches/CSSPGO.cmake new file mode 100644 index 0000000000000..59e08a64f8aad --- /dev/null +++ b/clang/cmake/caches/CSSPGO.cmake @@ -0,0 +1,2 @@ +set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED "CSSPGO" CACHE STRING "") +include(${CMAKE_CURRENT_LIST_DIR}/PGO.cmake) diff --git a/clang/cmake/caches/PGO.cmake b/clang/cmake/caches/PGO.cmake index 15bc755d110d1..d6471160037c1 100644 --- a/clang/cmake/caches/PGO.cmake +++ b/clang/cmake/caches/PGO.cmake @@ -5,7 +5,7 @@ set(LLVM_ENABLE_PROJECTS "clang;lld" CACHE STRING "") set(LLVM_ENABLE_RUNTIMES "compiler-rt;libcxx;libcxxabi;libunwind" CACHE STRING "") set(LLVM_TARGETS_TO_BUILD Native CACHE STRING "") -set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED ON CACHE BOOL "") +set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED IR CACHE BOOL "") set(CLANG_BOOTSTRAP_TARGETS generate-profdata stage2 diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst index 25f4e3b3fbd26..6bb99c757cd19 100644 --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -950,7 +950,8 @@ Each builtin accesses memory according to a provided boolean mask. These are provided as ``__builtin_masked_load`` and ``__builtin_masked_store``. The first argument is always boolean mask vector. The ``__builtin_masked_load`` builtin takes an optional third vector argument that will be used for the result of the -masked-off lanes. These builtins assume the memory is always aligned. +masked-off lanes. These builtins assume the memory is unaligned, use +``__builtin_assume_aligned`` if alignment is desired. The ``__builtin_masked_expand_load`` and ``__builtin_masked_compress_store`` builtins have the same interface but store the result in consecutive indices. @@ -969,17 +970,17 @@ Example: using v8b = bool [[clang::ext_vector_type(8)]]; using v8i = int [[clang::ext_vector_type(8)]]; - v8i load(v8b mask, v8i *ptr) { return __builtin_masked_load(mask, ptr); } + v8i load(v8b mask, int *ptr) { return __builtin_masked_load(mask, ptr); } - v8i load_expand(v8b mask, v8i *ptr) { + v8i load_expand(v8b mask, int *ptr) { return __builtin_masked_expand_load(mask, ptr); } - void store(v8b mask, v8i val, v8i *ptr) { + void store(v8b mask, v8i val, int *ptr) { __builtin_masked_store(mask, val, ptr); } - void store_compress(v8b mask, v8i val, v8i *ptr) { + void store_compress(v8b mask, v8i val, int *ptr) { __builtin_masked_compress_store(mask, val, ptr); } @@ -2064,9 +2065,9 @@ The following type trait primitives are supported by Clang. Those traits marked Returns true if a reference ``T`` can be copy-initialized from a temporary of type a non-cv-qualified ``U``. * ``__underlying_type`` (C++, GNU, Microsoft) -* ``__builtin_lt_synthesises_from_spaceship``, ``__builtin_gt_synthesises_from_spaceship``, - ``__builtin_le_synthesises_from_spaceship``, ``__builtin_ge_synthesises_from_spaceship`` (Clang): - These builtins can be used to determine whether the corresponding operator is synthesised from a spaceship operator. +* ``__builtin_lt_synthesizes_from_spaceship``, ``__builtin_gt_synthesizes_from_spaceship``, + ``__builtin_le_synthesizes_from_spaceship``, ``__builtin_ge_synthesizes_from_spaceship`` (Clang): + These builtins can be used to determine whether the corresponding operator is synthesized from a spaceship operator. In addition, the following expression traits are supported: diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst index b09bd9ce6ae9a..cf89e31aa93ef 100644 --- a/clang/docs/OpenMPSupport.rst +++ b/clang/docs/OpenMPSupport.rst @@ -360,6 +360,7 @@ information or if you want to help with the implementation. + +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ |Feature | C/C++ Status | Fortran Status | Reviews | +=============================================================+===========================+===========================+==========================================================================+ @@ -407,8 +408,16 @@ implementation. +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ + +.. _OpenMP 5.2 Deprecations: + +OpenMP 5.2 Deprecations +======================= + + + +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -|OpenMP 5.2 Deprecations | C/C++ Status | Fortran Status | Reviews | +| | C/C++ Status | Fortran Status | Reviews | +=============================================================+===========================+===========================+==========================================================================+ | Linear clause syntax | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ @@ -473,6 +482,8 @@ implementation. +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | loop transformation apply clause | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ +| loop fuse transformation | :good:`done` | :none:`unclaimed` | | ++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | workdistribute construct | | :none:`in progress` | @skc7, @mjklemm | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | task_iteration | :none:`unclaimed` | :none:`unclaimed` | | @@ -569,9 +580,12 @@ implementation. | need_device_addr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Parsing/Sema: https://github.com/llvm/llvm-project/pull/143442 | | | | | https://github.com/llvm/llvm-project/pull/149586 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -| Prescriptive num_threads | :part:`In Progress` | :none:`unclaimed` | ro-i | +| Prescriptive num_threads | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/160659 | +| | | | https://github.com/llvm/llvm-project/pull/146403 | +| | | | https://github.com/llvm/llvm-project/pull/146404 | +| | | | https://github.com/llvm/llvm-project/pull/146405 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ -| Message and severity clauses | :part:`In Progress` | :none:`unclaimed` | ro-i | +| Message and severity clauses | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/146093 | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | Local clause on declare target | :part:`In Progress` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 70c82b090107a..145a83af514ed 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -142,14 +142,18 @@ What's New in Clang |release|? C++ Language Changes -------------------- -- A new family of builtins ``__builtin_*_synthesises_from_spaceship`` has been added. These can be queried to know - whether the ``<`` (``lt``), ``>`` (``gt``), ``<=`` (``le``), or ``>=`` (``ge``) operators are synthesised from a +- A new family of builtins ``__builtin_*_synthesizes_from_spaceship`` has been added. These can be queried to know + whether the ``<`` (``lt``), ``>`` (``gt``), ``<=`` (``le``), or ``>=`` (``ge``) operators are synthesized from a ``<=>``. This makes it possible to optimize certain facilities by using the ``<=>`` operation directly instead of doing multiple comparisons. C++2c Feature Support ^^^^^^^^^^^^^^^^^^^^^ +- Started the implementation of `P2686R5 `_ Constexpr structured bindings. + At this timem, references to constexpr and decomposition of *tuple-like* types are not supported + (only arrays and aggregates are). + C++23 Feature Support ^^^^^^^^^^^^^^^^^^^^^ @@ -268,6 +272,9 @@ Attribute Changes in Clang attribute, allowing the attribute to only be attached to the declaration. Prior, this would be treated as an error where the definition and declaration would have differing types. +- New format attributes ``gnu_printf``, ``gnu_scanf``, ``gnu_strftime`` and ``gnu_strfmon`` are added + as aliases for ``printf``, ``scanf``, ``strftime`` and ``strfmon``. (#GH16219) + Improvements to Clang's diagnostics ----------------------------------- - Added a separate diagnostic group ``-Wfunction-effect-redeclarations``, for the more pedantic @@ -292,7 +299,8 @@ Improvements to Clang's diagnostics "format specifies type 'unsigned int' but the argument has type 'int', which differs in signedness [-Wformat-signedness]" "signedness of format specifier 'u' is incompatible with 'c' [-Wformat-signedness]" and the API-visible diagnostic id will be appropriate. - +- Clang now produces better diagnostics for template template parameter matching + involving 'auto' template parameters. - Fixed false positives in ``-Waddress-of-packed-member`` diagnostics when potential misaligned members get processed before they can get discarded. (#GH144729) @@ -353,6 +361,7 @@ Bug Fixes in This Version first parameter. (#GH113323). - Fixed a crash with incompatible pointer to integer conversions in designated initializers involving string literals. (#GH154046) +- Fix crash on CTAD for alias template. (#GH131342) - Clang now emits a frontend error when a function marked with the `flatten` attribute calls another function that requires target features not enabled in the caller. This prevents a fatal error in the backend. @@ -422,6 +431,11 @@ Bug Fixes to C++ Support ``__builtin_addressof``, and related issues with builtin arguments. (#GH154034) - Fix an assertion failure when taking the address on a non-type template parameter argument of object type. (#GH151531) +- Suppress ``-Wdouble-promotion`` when explicitly asked for with C++ list initialization (#GH33409). +- Fix the result of `__builtin_is_implicit_lifetime` for types with a user-provided constructor. (#GH160610) +- Correctly deduce return types in ``decltype`` expressions. (#GH160497) (#GH56652) (#GH116319) (#GH161196) +- Fixed a crash in the pre-C++23 warning for attributes before a lambda declarator (#GH161070). +- Fix a crash when attempting to deduce a deduction guide from a non deducible template template parameter. (#130604) Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -430,6 +444,7 @@ Bug Fixes to AST Handling legal representation. This is fixed because ElaboratedTypes don't exist anymore. (#GH43179) (#GH68670) (#GH92757) - Fix unrecognized html tag causing undesirable comment lexing (#GH152944) - Fix comment lexing of special command names (#GH152943) +- Use `extern` as a hint to continue parsing when recovering from a malformed declaration. Miscellaneous Bug Fixes ^^^^^^^^^^^^^^^^^^^^^^^ @@ -558,6 +573,7 @@ Crash and bug fixes - Fixed a crash in the static analyzer that when the expression in an ``[[assume(expr)]]`` attribute was enclosed in parentheses. (#GH151529) - Fixed a crash when parsing ``#embed`` parameters with unmatched closing brackets. (#GH152829) +- Fixed a crash when compiling ``__real__`` or ``__imag__`` unary operator on scalar value with type promotion. (#GH160583) Improvements ^^^^^^^^^^^^ @@ -569,10 +585,13 @@ Moved checkers Sanitizers ---------- +- Improved documentation for legacy ``no_sanitize`` attributes. Python Binding Changes ---------------------- -- Exposed `clang_getCursorLanguage` via `Cursor.language`. +- Exposed ``clang_getCursorLanguage`` via ``Cursor.language``. +- Add all missing ``CursorKind``s, ``TypeKind``s and + ``ExceptionSpecificationKind``s from ``Index.h`` OpenMP Support -------------- @@ -586,6 +605,7 @@ OpenMP Support - Added support for ``defaultmap`` directive implicit-behavior ``storage``. - Added support for ``defaultmap`` directive implicit-behavior ``private``. - Added parsing and semantic analysis support for ``groupprivate`` directive. +- Added support for 'omp fuse' directive. Improvements ^^^^^^^^^^^^ diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h index be038d9165fc6..f13d9c9307b40 100644 --- a/clang/include/clang-c/Index.h +++ b/clang/include/clang-c/Index.h @@ -2162,6 +2162,10 @@ enum CXCursorKind { */ CXCursor_OMPStripeDirective = 310, + /** OpenMP fuse directive + */ + CXCursor_OMPFuseDirective = 311, + /** OpenACC Compute Construct. */ CXCursor_OpenACCComputeConstruct = 320, diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h index a2c55c71e09ae..12351e98e5a2b 100644 --- a/clang/include/clang/AST/ASTContext.h +++ b/clang/include/clang/AST/ASTContext.h @@ -25,10 +25,12 @@ #include "clang/AST/RawCommentList.h" #include "clang/AST/SYCLKernelInfo.h" #include "clang/AST/TemplateName.h" +#include "clang/AST/TypeOrdering.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" @@ -51,6 +53,36 @@ class FixedPointSemantics; struct fltSemantics; template class SmallPtrSet; +struct ScalableVecTyKey { + clang::QualType EltTy; + unsigned NumElts; + unsigned NumFields; + + bool operator==(const ScalableVecTyKey &RHS) const { + return EltTy == RHS.EltTy && NumElts == RHS.NumElts && + NumFields == RHS.NumFields; + } +}; + +// Provide a DenseMapInfo specialization so that ScalableVecTyKey can be used +// as a key in DenseMap. +template <> struct DenseMapInfo { + static inline ScalableVecTyKey getEmptyKey() { + return {DenseMapInfo::getEmptyKey(), ~0U, ~0U}; + } + static inline ScalableVecTyKey getTombstoneKey() { + return {DenseMapInfo::getTombstoneKey(), ~0U, ~0U}; + } + static unsigned getHashValue(const ScalableVecTyKey &Val) { + return hash_combine(DenseMapInfo::getHashValue(Val.EltTy), + Val.NumElts, Val.NumFields); + } + static bool isEqual(const ScalableVecTyKey &LHS, + const ScalableVecTyKey &RHS) { + return LHS == RHS; + } +}; + } // namespace llvm namespace clang { @@ -505,6 +537,9 @@ class ASTContext : public RefCountedBase { SmallVector> ObjCSubClasses; + // A mapping from Scalable Vector Type keys to their corresponding QualType. + mutable llvm::DenseMap ScalableVecTyMap; + ASTContext &this_() { return *this; } public: diff --git a/clang/include/clang/AST/CharUnits.h b/clang/include/clang/AST/CharUnits.h index c06354451dfbe..e570bfae69524 100644 --- a/clang/include/clang/AST/CharUnits.h +++ b/clang/include/clang/AST/CharUnits.h @@ -141,7 +141,7 @@ namespace clang { /// Among other things, this promises that /// self.alignTo(N) will just return self. bool isMultipleOf(CharUnits N) const { - return (*this % N) == 0; + return (*this % N) == CharUnits::Zero(); } // Arithmetic operators. @@ -165,8 +165,8 @@ namespace clang { CharUnits operator% (QuantityType N) const { return CharUnits(Quantity % N); } - QuantityType operator% (const CharUnits &Other) const { - return Quantity % Other.Quantity; + CharUnits operator%(const CharUnits &Other) const { + return CharUnits(Quantity % Other.Quantity); } CharUnits operator+ (const CharUnits &Other) const { return CharUnits(Quantity + Other.Quantity); diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h index d85d04d2a4d53..406d79ebd6641 100644 --- a/clang/include/clang/AST/Decl.h +++ b/clang/include/clang/AST/Decl.h @@ -80,6 +80,7 @@ class TypeAliasTemplateDecl; class UnresolvedSetImpl; class VarTemplateDecl; enum class ImplicitParamKind; +struct UsualDeleteParams; // Holds a constraint expression along with a pack expansion index, if // expanded. @@ -2646,6 +2647,8 @@ class FunctionDecl : public DeclaratorDecl, bool isTypeAwareOperatorNewOrDelete() const; void setIsTypeAwareOperatorNewOrDelete(bool IsTypeAwareOperator = true); + UsualDeleteParams getUsualDeleteParams() const; + /// Compute the language linkage. LanguageLinkage getLanguageLinkage() const; diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h index 9fedb230ce397..d78c7b6363b5d 100644 --- a/clang/include/clang/AST/ExprCXX.h +++ b/clang/include/clang/AST/ExprCXX.h @@ -2342,6 +2342,14 @@ struct ImplicitDeallocationParameters { SizedDeallocationMode PassSize; }; +/// The parameters to pass to a usual operator delete. +struct UsualDeleteParams { + TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No; + bool DestroyingDelete = false; + bool Size = false; + AlignedAllocationMode Alignment = AlignedAllocationMode::No; +}; + /// Represents a new-expression for memory allocation and constructor /// calls, e.g: "new CXXNewExpr(foo)". class CXXNewExpr final @@ -4714,7 +4722,7 @@ class SubstNonTypeTemplateParmExpr : public Expr { // sugared: it doesn't need to be resugared later. bool getFinal() const { return Final; } - NamedDecl *getParameter() const; + NonTypeTemplateParmDecl *getParameter() const; bool isReferenceParameter() const { return AssociatedDeclAndRef.getInt(); } diff --git a/clang/include/clang/AST/HLSLResource.h b/clang/include/clang/AST/HLSLResource.h new file mode 100644 index 0000000000000..9cdd81b2d8dab --- /dev/null +++ b/clang/include/clang/AST/HLSLResource.h @@ -0,0 +1,78 @@ +//===- HLSLResource.h - Routines for HLSL resources and bindings ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides shared routines to help analyze HLSL resources and +// theirs bindings during Sema and CodeGen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_HLSLRESOURCE_H +#define LLVM_CLANG_AST_HLSLRESOURCE_H + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Attrs.inc" +#include "clang/AST/DeclBase.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/Support/Compiler.h" +#include "llvm/Support/raw_ostream.h" + +namespace clang { + +class HLSLResourceBindingAttr; +class HLSLRVkBindingAttr; + +namespace hlsl { + +struct ResourceBindingAttrs { + HLSLResourceBindingAttr *RegBinding; + HLSLVkBindingAttr *VkBinding; + + ResourceBindingAttrs(const Decl *D) { + RegBinding = D->getAttr(); + bool IsSpirv = D->getASTContext().getTargetInfo().getTriple().isSPIRV(); + VkBinding = IsSpirv ? D->getAttr() : nullptr; + } + + bool hasBinding() const { return RegBinding || VkBinding; } + bool isExplicit() const { + return (RegBinding && RegBinding->hasRegisterSlot()) || VkBinding; + } + + unsigned getSlot() const { + assert(isExplicit() && "no explicit binding"); + if (VkBinding) + return VkBinding->getBinding(); + if (RegBinding && RegBinding->hasRegisterSlot()) + return RegBinding->getSlotNumber(); + llvm_unreachable("no explicit binding"); + } + + unsigned getSpace() const { + if (VkBinding) + return VkBinding->getSet(); + if (RegBinding) + return RegBinding->getSpaceNumber(); + return 0; + } + + bool hasImplicitOrderID() const { + return RegBinding && RegBinding->hasImplicitBindingOrderID(); + } + + unsigned getImplicitOrderID() const { + assert(hasImplicitOrderID()); + return RegBinding->getImplicitBindingOrderID(); + } +}; + +} // namespace hlsl + +} // namespace clang + +#endif // LLVM_CLANG_AST_HLSLRESOURCE_H diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h index 081244fe0efb6..58ba8d91f1277 100644 --- a/clang/include/clang/AST/OpenACCClause.h +++ b/clang/include/clang/AST/OpenACCClause.h @@ -840,16 +840,13 @@ class OpenACCClauseWithVarList : public OpenACCClauseWithExprs { // alloca at the level of the base, and the init at the element level. struct OpenACCPrivateRecipe { VarDecl *AllocaDecl; - Expr *InitExpr; - OpenACCPrivateRecipe(VarDecl *A, Expr *I) : AllocaDecl(A), InitExpr(I) { - assert(!AllocaDecl || AllocaDecl->getInit() == nullptr); - } + OpenACCPrivateRecipe(VarDecl *A) : AllocaDecl(A) {} bool isSet() const { return AllocaDecl; } static OpenACCPrivateRecipe Empty() { - return OpenACCPrivateRecipe(nullptr, nullptr); + return OpenACCPrivateRecipe(/*AllocaDecl=*/nullptr); } }; @@ -901,18 +898,17 @@ class OpenACCPrivateClause final // InitFromTemporary is the 'temp' declaration we put in to be 'copied from'. struct OpenACCFirstPrivateRecipe { VarDecl *AllocaDecl; - Expr *InitExpr; VarDecl *InitFromTemporary; - OpenACCFirstPrivateRecipe(VarDecl *A, Expr *I, VarDecl *T) - : AllocaDecl(A), InitExpr(I), InitFromTemporary(T) { - assert(!AllocaDecl || AllocaDecl->getInit() == nullptr); + OpenACCFirstPrivateRecipe(VarDecl *A, VarDecl *T) + : AllocaDecl(A), InitFromTemporary(T) { assert(!InitFromTemporary || InitFromTemporary->getInit() == nullptr); } bool isSet() const { return AllocaDecl; } static OpenACCFirstPrivateRecipe Empty() { - return OpenACCFirstPrivateRecipe(nullptr, nullptr, nullptr); + return OpenACCFirstPrivateRecipe(/*AllocaDecl=*/nullptr, + /*InitFromTemporary=*/nullptr); } }; @@ -1284,16 +1280,13 @@ class OpenACCCreateClause final // 'main' declaration used for initializaiton, which is fixed. struct OpenACCReductionRecipe { VarDecl *AllocaDecl; - Expr *InitExpr; // TODO: OpenACC: this should eventually have the operations here too. - OpenACCReductionRecipe(VarDecl *A, Expr *I) : AllocaDecl(A), InitExpr(I) { - assert(!AllocaDecl || AllocaDecl->getInit() == nullptr); - } + OpenACCReductionRecipe(VarDecl *A) : AllocaDecl(A) {} bool isSet() const { return AllocaDecl; } static OpenACCReductionRecipe Empty() { - return OpenACCReductionRecipe(nullptr, nullptr); + return OpenACCReductionRecipe(/*AllocaDecl=*/nullptr); } }; diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h index b2a6d4b9182b0..68d220a77b18c 100644 --- a/clang/include/clang/AST/OpenMPClause.h +++ b/clang/include/clang/AST/OpenMPClause.h @@ -1149,6 +1149,80 @@ class OMPFullClause final : public OMPNoChildClause { static OMPFullClause *CreateEmpty(const ASTContext &C); }; +/// This class represents the 'looprange' clause in the +/// '#pragma omp fuse' directive +/// +/// \code {c} +/// #pragma omp fuse looprange(1,2) +/// { +/// for(int i = 0; i < 64; ++i) +/// for(int j = 0; j < 256; j+=2) +/// for(int k = 127; k >= 0; --k) +/// \endcode +class OMPLoopRangeClause final : public OMPClause { + friend class OMPClauseReader; + /// Location of '(' + SourceLocation LParenLoc; + + /// Location of first and count expressions + SourceLocation FirstLoc, CountLoc; + + /// Number of looprange arguments (always 2: first, count) + enum { FirstExpr, CountExpr, NumArgs }; + Stmt *Args[NumArgs] = {nullptr, nullptr}; + + /// Set looprange 'first' expression + void setFirst(Expr *E) { Args[FirstExpr] = E; } + + /// Set looprange 'count' expression + void setCount(Expr *E) { Args[CountExpr] = E; } + + /// Build an empty clause for deserialization. + explicit OMPLoopRangeClause() + : OMPClause(llvm::omp::OMPC_looprange, {}, {}) {} + +public: + /// Build a 'looprange' clause AST node. + static OMPLoopRangeClause * + Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation FirstLoc, SourceLocation CountLoc, + SourceLocation EndLoc, Expr *First, Expr *Count); + + /// Build an empty 'looprange' clause node. + static OMPLoopRangeClause *CreateEmpty(const ASTContext &C); + + // Location getters/setters + SourceLocation getLParenLoc() const { return LParenLoc; } + SourceLocation getFirstLoc() const { return FirstLoc; } + SourceLocation getCountLoc() const { return CountLoc; } + + void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } + void setFirstLoc(SourceLocation Loc) { FirstLoc = Loc; } + void setCountLoc(SourceLocation Loc) { CountLoc = Loc; } + + /// Get looprange 'first' expression + Expr *getFirst() const { return cast_or_null(Args[FirstExpr]); } + + /// Get looprange 'count' expression + Expr *getCount() const { return cast_or_null(Args[CountExpr]); } + + child_range children() { return child_range(Args, Args + NumArgs); } + const_child_range children() const { + return const_child_range(Args, Args + NumArgs); + } + + child_range used_children() { + return child_range(child_iterator(), child_iterator()); + } + const_child_range used_children() const { + return const_child_range(const_child_iterator(), const_child_iterator()); + } + + static bool classof(const OMPClause *T) { + return T->getClauseKind() == llvm::omp::OMPC_looprange; + } +}; + /// Representation of the 'partial' clause of the '#pragma omp unroll' /// directive. /// @@ -5816,6 +5890,12 @@ class OMPClauseMappableExprCommon { ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } + + bool operator==(const MappableComponent &Other) const { + return AssociatedExpressionNonContiguousPr == + Other.AssociatedExpressionNonContiguousPr && + AssociatedDeclaration == Other.AssociatedDeclaration; + } }; // List of components of an expression. This first one is the whole @@ -5829,6 +5909,95 @@ class OMPClauseMappableExprCommon { using MappableExprComponentLists = SmallVector; using MappableExprComponentListsRef = ArrayRef; + // Hash function to allow usage as DenseMap keys. + friend llvm::hash_code hash_value(const MappableComponent &MC) { + return llvm::hash_combine(MC.getAssociatedExpression(), + MC.getAssociatedDeclaration(), + MC.isNonContiguous()); + } + +public: + /// Get the type of an element of a ComponentList Expr \p Exp. + /// + /// For something like the following: + /// ```c + /// int *p, **p; + /// ``` + /// The types for the following Exprs would be: + /// Expr | Type + /// ---------|----------- + /// p | int * + /// *p | int + /// p[0] | int + /// p[0:1] | int + /// pp | int ** + /// pp[0] | int * + /// pp[0:1] | int * + /// Note: this assumes that if \p Exp is an array-section, it is contiguous. + static QualType getComponentExprElementType(const Expr *Exp); + + /// Find the attach pointer expression from a list of mappable expression + /// components. + /// + /// This function traverses the component list to find the first + /// expression that has a pointer type, which represents the attach + /// base pointer expr for the current component-list. + /// + /// For example, given the following: + /// + /// ```c + /// struct S { + /// int a; + /// int b[10]; + /// int c[10][10]; + /// int *p; + /// int **pp; + /// } + /// S s, *ps, **pps, *(pas[10]), ***ppps; + /// int i; + /// ``` + /// + /// The base-pointers for the following map operands would be: + /// map list-item | attach base-pointer | attach base-pointer + /// | for directives except | target_update (if + /// | target_update | different) + /// ----------------|-----------------------|--------------------- + /// s | N/A | + /// s.a | N/A | + /// s.p | N/A | + /// ps | N/A | + /// ps->p | ps | + /// ps[1] | ps | + /// *(ps + 1) | ps | + /// (ps + 1)[1] | ps | + /// ps[1:10] | ps | + /// ps->b[10] | ps | + /// ps->p[10] | ps->p | + /// ps->c[1][2] | ps | + /// ps->c[1:2][2] | (error diagnostic) | N/A, TODO: ps + /// ps->c[1:1][2] | ps | N/A, TODO: ps + /// pps[1][2] | pps[1] | + /// pps[1:1][2] | pps[1:1] | N/A, TODO: pps[1:1] + /// pps[1:i][2] | pps[1:i] | N/A, TODO: pps[1:i] + /// pps[1:2][2] | (error diagnostic) | N/A + /// pps[1]->p | pps[1] | + /// pps[1]->p[10] | pps[1] | + /// pas[1] | N/A | + /// pas[1][2] | pas[1] | + /// ppps[1][2] | ppps[1] | + /// ppps[1][2][3] | ppps[1][2] | + /// ppps[1][2:1][3] | ppps[1][2:1] | N/A, TODO: ppps[1][2:1] + /// ppps[1][2:2][3] | (error diagnostic) | N/A + /// Returns a pair of the attach pointer expression and its depth in the + /// component list. + /// TODO: This may need to be updated to handle ref_ptr/ptee cases for byref + /// map operands. + /// TODO: Handle cases for target-update, where the list-item is a + /// non-contiguous array-section that still has a base-pointer. + static std::pair> + findAttachPtrExpr(MappableExprComponentListRef Components, + OpenMPDirectiveKind CurDirKind); + protected: // Return the total number of elements in a list of component lists. static unsigned diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h index 1d1b7f183f75a..7a2881f6124f3 100644 --- a/clang/include/clang/AST/RecursiveASTVisitor.h +++ b/clang/include/clang/AST/RecursiveASTVisitor.h @@ -2194,6 +2194,7 @@ bool RecursiveASTVisitor::TraverseTemplateArgumentLocsHelper( is the only callback that's made for this instantiation. \ We use getTemplateArgsAsWritten() to distinguish. */ \ if (const auto *ArgsWritten = D->getTemplateArgsAsWritten()) { \ + assert(D->getTemplateSpecializationKind() != TSK_ImplicitInstantiation); \ /* The args that remains unspecialized. */ \ TRY_TO(TraverseTemplateArgumentLocsHelper( \ ArgsWritten->getTemplateArgs(), ArgsWritten->NumTemplateArgs)); \ @@ -3176,6 +3177,9 @@ DEF_TRAVERSE_STMT(OMPUnrollDirective, DEF_TRAVERSE_STMT(OMPReverseDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) +DEF_TRAVERSE_STMT(OMPFuseDirective, + { TRY_TO(TraverseOMPExecutableDirective(S)); }) + DEF_TRAVERSE_STMT(OMPInterchangeDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) @@ -3493,6 +3497,14 @@ bool RecursiveASTVisitor::VisitOMPFullClause(OMPFullClause *C) { return true; } +template +bool RecursiveASTVisitor::VisitOMPLoopRangeClause( + OMPLoopRangeClause *C) { + TRY_TO(TraverseStmt(C->getFirst())); + TRY_TO(TraverseStmt(C->getCount())); + return true; +} + template bool RecursiveASTVisitor::VisitOMPPartialClause(OMPPartialClause *C) { TRY_TO(TraverseStmt(C->getFactor())); diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h index d9f87f1e49b40..bc6aeaa8d143c 100644 --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -21,6 +21,7 @@ #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" +#include "llvm/Support/Casting.h" namespace clang { @@ -677,6 +678,10 @@ class OMPParallelDirective : public OMPExecutableDirective { } }; +// Forward declaration of a generic loop transformation. Used in the declaration +// of OMPLoopBasedDirective. +class OMPLoopTransformationDirective; + /// The base class for all loop-based directives, including loop transformation /// directives. class OMPLoopBasedDirective : public OMPExecutableDirective { @@ -889,24 +894,23 @@ class OMPLoopBasedDirective : public OMPExecutableDirective { /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. - static bool doForAllLoops( - Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, - llvm::function_ref Callback, - llvm::function_ref - OnTransformationCallback); + static bool + doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, + unsigned NumLoops, + llvm::function_ref Callback, + llvm::function_ref + OnTransformationCallback); static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref Callback, - llvm::function_ref< - void(const OMPCanonicalLoopNestTransformationDirective *)> + llvm::function_ref OnTransformationCallback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; auto &&NewTransformCb = - [OnTransformationCallback]( - OMPCanonicalLoopNestTransformationDirective *A) { + [OnTransformationCallback](OMPLoopTransformationDirective *A) { OnTransformationCallback(A); }; return doForAllLoops(const_cast(CurStmt), TryImperfectlyNestedLoops, @@ -919,7 +923,7 @@ class OMPLoopBasedDirective : public OMPExecutableDirective { doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref Callback) { - auto &&TransformCb = [](OMPCanonicalLoopNestTransformationDirective *) {}; + auto &&TransformCb = [](OMPLoopTransformationDirective *) {}; return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback, TransformCb); } @@ -957,9 +961,11 @@ class OMPLoopBasedDirective : public OMPExecutableDirective { }; /// Common class of data shared between -/// OMPCanonicalLoopNestTransformationDirective and transformations over -/// canonical loop sequences. +/// OMPCanonicalLoopNestTransformationDirective and +/// OMPCanonicalLoopSequenceTransformationDirective class OMPLoopTransformationDirective { + friend class ASTStmtReader; + /// Number of (top-level) generated loops. /// This value is 1 for most transformations as they only map one loop nest /// into another. @@ -969,15 +975,39 @@ class OMPLoopTransformationDirective { /// generate more than one loop nest, so the value would be >= 1. unsigned NumGeneratedTopLevelLoops = 1; + /// We need this because we cannot easily make OMPLoopTransformationDirective + /// a proper Stmt. + Stmt *S = nullptr; + protected: void setNumGeneratedTopLevelLoops(unsigned N) { NumGeneratedTopLevelLoops = N; } + explicit OMPLoopTransformationDirective(Stmt *S) : S(S) {} + public: unsigned getNumGeneratedTopLevelLoops() const { return NumGeneratedTopLevelLoops; } + + /// Returns the specific directive related to this loop transformation. + Stmt *getDirective() const { return S; } + + /// Get the de-sugared statements after the loop transformation. + /// + /// Might be nullptr if either the directive generates no loops and is handled + /// directly in CodeGen, or resolving a template-dependence context is + /// required. + Stmt *getTransformedStmt() const; + + /// Return preinits statement. + Stmt *getPreInits() const; + + static bool classof(const Stmt *T) { + return isa(T); + } }; /// The base class for all transformation directives of canonical loop nests. @@ -990,7 +1020,8 @@ class OMPCanonicalLoopNestTransformationDirective explicit OMPCanonicalLoopNestTransformationDirective( StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) - : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {} + : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops), + OMPLoopTransformationDirective(this) {} public: /// Return the number of associated (consumed) loops. @@ -5928,6 +5959,112 @@ class OMPInterchangeDirective final } }; +/// The base class for all transformation directives of canonical loop +/// sequences (currently only 'fuse') +class OMPCanonicalLoopSequenceTransformationDirective + : public OMPExecutableDirective, + public OMPLoopTransformationDirective { + friend class ASTStmtReader; + +protected: + explicit OMPCanonicalLoopSequenceTransformationDirective( + StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, + SourceLocation EndLoc) + : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), + OMPLoopTransformationDirective(this) {} + +public: + /// Get the de-sugared statements after the loop transformation. + /// + /// Might be nullptr if either the directive generates no loops and is handled + /// directly in CodeGen, or resolving a template-dependence context is + /// required. + Stmt *getTransformedStmt() const; + + /// Return preinits statement. + Stmt *getPreInits() const; + + static bool classof(const Stmt *T) { + Stmt::StmtClass C = T->getStmtClass(); + return C == OMPFuseDirectiveClass; + } +}; + +/// Represents the '#pragma omp fuse' loop transformation directive +/// +/// \code{c} +/// #pragma omp fuse +/// { +/// for(int i = 0; i < m1; ++i) {...} +/// for(int j = 0; j < m2; ++j) {...} +/// ... +/// } +/// \endcode +class OMPFuseDirective final + : public OMPCanonicalLoopSequenceTransformationDirective { + friend class ASTStmtReader; + friend class OMPExecutableDirective; + + // Offsets of child members. + enum { + PreInitsOffset = 0, + TransformedStmtOffset, + }; + + explicit OMPFuseDirective(SourceLocation StartLoc, SourceLocation EndLoc) + : OMPCanonicalLoopSequenceTransformationDirective( + OMPFuseDirectiveClass, llvm::omp::OMPD_fuse, StartLoc, EndLoc) {} + + void setPreInits(Stmt *PreInits) { + Data->getChildren()[PreInitsOffset] = PreInits; + } + + void setTransformedStmt(Stmt *S) { + Data->getChildren()[TransformedStmtOffset] = S; + } + +public: + /// Create a new AST node representation for #pragma omp fuse' + /// + /// \param C Context of the AST + /// \param StartLoc Location of the introducer (e.g the 'omp' token) + /// \param EndLoc Location of the directive's end (e.g the tok::eod) + /// \param Clauses The directive's clauses + /// \param NumLoops Total number of loops in the canonical loop sequence. + /// \param NumGeneratedTopLevelLoops Number of top-level generated loops. + // Typically 1 but looprange clause can + // change this. + /// \param AssociatedStmt The outermost associated loop + /// \param TransformedStmt The loop nest after fusion, or nullptr in + /// dependent + /// \param PreInits Helper preinits statements for the loop nest + static OMPFuseDirective * + Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef Clauses, unsigned NumGeneratedTopLevelLoops, + Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); + + /// Build an empty '#pragma omp fuse' AST node for deserialization + /// + /// \param C Context of the AST + /// \param NumClauses Number of clauses to allocate + /// \param NumLoops Number of top level loops to allocate + static OMPFuseDirective *CreateEmpty(const ASTContext &C, + unsigned NumClauses); + + /// Gets the associated loops after the transformation. This is the de-sugared + /// replacement or nulltpr in dependent contexts. + Stmt *getTransformedStmt() const { + return Data->getChildren()[TransformedStmtOffset]; + } + + /// Return preinits statement. + Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } + + static bool classof(const Stmt *T) { + return T->getStmtClass() == OMPFuseDirectiveClass; + } +}; + /// This represents '#pragma omp scan' directive. /// /// \code @@ -6596,4 +6733,37 @@ class OMPAssumeDirective final : public OMPExecutableDirective { } // end namespace clang +namespace llvm { +// Allow a Stmt* be casted correctly to an OMPLoopTransformationDirective*. +// The default routines would just use a C-style cast which won't work well +// for the multiple inheritance here. We have to use a static cast from the +// corresponding subclass. +template <> +struct CastInfo + : public NullableValueCastFailed, + public DefaultDoCastIfPossible< + clang::OMPLoopTransformationDirective *, clang::Stmt *, + CastInfo> { + static bool isPossible(const clang::Stmt *T) { + return clang::OMPLoopTransformationDirective::classof(T); + } + + static clang::OMPLoopTransformationDirective *doCast(clang::Stmt *T) { + if (auto *D = + dyn_cast(T)) + return static_cast(D); + if (auto *D = + dyn_cast(T)) + return static_cast(D); + llvm_unreachable("unexpected type"); + } +}; +template <> +struct CastInfo + : public ConstStrippingForwardingCast< + clang::OMPLoopTransformationDirective, const clang::Stmt *, + CastInfo> {}; + +} // namespace llvm + #endif diff --git a/clang/include/clang/AST/TypeBase.h b/clang/include/clang/AST/TypeBase.h index b02d9c7499fe5..6786b2f6cbc78 100644 --- a/clang/include/clang/AST/TypeBase.h +++ b/clang/include/clang/AST/TypeBase.h @@ -3495,7 +3495,9 @@ class AdjustedType : public Type, public llvm::FoldingSetNode { AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy, QualType CanonicalPtr) - : Type(TC, CanonicalPtr, OriginalTy->getDependence()), + : Type(TC, CanonicalPtr, + AdjustedTy->getDependence() | + (OriginalTy->getDependence() & ~TypeDependence::Dependent)), OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {} public: @@ -6700,15 +6702,21 @@ class HLSLAttributedResourceType : public Type, public llvm::FoldingSetNode { LLVM_PREFERRED_TYPE(bool) uint8_t RawBuffer : 1; + LLVM_PREFERRED_TYPE(bool) + uint8_t IsCounter : 1; + Attributes(llvm::dxil::ResourceClass ResourceClass, bool IsROV = false, - bool RawBuffer = false) - : ResourceClass(ResourceClass), IsROV(IsROV), RawBuffer(RawBuffer) {} + bool RawBuffer = false, bool IsCounter = false) + : ResourceClass(ResourceClass), IsROV(IsROV), RawBuffer(RawBuffer), + IsCounter(IsCounter) {} - Attributes() : Attributes(llvm::dxil::ResourceClass::UAV, false, false) {} + Attributes() + : Attributes(llvm::dxil::ResourceClass::UAV, false, false, false) {} friend bool operator==(const Attributes &LHS, const Attributes &RHS) { - return std::tie(LHS.ResourceClass, LHS.IsROV, LHS.RawBuffer) == - std::tie(RHS.ResourceClass, RHS.IsROV, RHS.RawBuffer); + return std::tie(LHS.ResourceClass, LHS.IsROV, LHS.RawBuffer, + LHS.IsCounter) == std::tie(RHS.ResourceClass, RHS.IsROV, + RHS.RawBuffer, RHS.IsCounter); } friend bool operator!=(const Attributes &LHS, const Attributes &RHS) { return !(LHS == RHS); @@ -6749,6 +6757,7 @@ class HLSLAttributedResourceType : public Type, public llvm::FoldingSetNode { ID.AddInteger(static_cast(Attrs.ResourceClass)); ID.AddBoolean(Attrs.IsROV); ID.AddBoolean(Attrs.RawBuffer); + ID.AddBoolean(Attrs.IsCounter); } static bool classof(const Type *T) { diff --git a/clang/include/clang/AST/TypeProperties.td b/clang/include/clang/AST/TypeProperties.td index b3932a67db69d..9dc85fb88e267 100644 --- a/clang/include/clang/AST/TypeProperties.td +++ b/clang/include/clang/AST/TypeProperties.td @@ -662,6 +662,9 @@ let Class = HLSLAttributedResourceType in { def : Property<"rawBuffer", Bool> { let Read = [{ node->getAttrs().RawBuffer }]; } + def : Property<"isCounter", Bool> { + let Read = [{ node->getAttrs().IsCounter }]; + } def : Property<"wrappedTy", QualType> { let Read = [{ node->getWrappedType() }]; } @@ -669,7 +672,7 @@ let Class = HLSLAttributedResourceType in { let Read = [{ node->getContainedType() }]; } def : Creator<[{ - HLSLAttributedResourceType::Attributes attrs(static_cast(resClass), isROV, rawBuffer); + HLSLAttributedResourceType::Attributes attrs(static_cast(resClass), isROV, rawBuffer, isCounter); return ctx.getHLSLAttributedResourceType(wrappedTy, containedTy, attrs); }]>; } diff --git a/clang/include/clang/Analysis/Analyses/LifetimeAnnotations.h b/clang/include/clang/Analysis/Analyses/LifetimeAnnotations.h new file mode 100644 index 0000000000000..229d16c20b0f8 --- /dev/null +++ b/clang/include/clang/Analysis/Analyses/LifetimeAnnotations.h @@ -0,0 +1,44 @@ +//===- LifetimeAnnotations.h - -*--------------- C++--------------------*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Helper functions to inspect and infer lifetime annotations. +//===----------------------------------------------------------------------===// +#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMEANNOTATIONS_H +#define LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMEANNOTATIONS_H + +#include "clang/AST/DeclCXX.h" + +namespace clang { +namespace lifetimes { + +/// Returns the most recent declaration of the method to ensure all +/// lifetime-bound attributes from redeclarations are considered. +const FunctionDecl *getDeclWithMergedLifetimeBoundAttrs(const FunctionDecl *FD); + +/// Returns the most recent declaration of the method to ensure all +/// lifetime-bound attributes from redeclarations are considered. +const CXXMethodDecl * +getDeclWithMergedLifetimeBoundAttrs(const CXXMethodDecl *CMD); + +// Return true if this is an "normal" assignment operator. +// We assume that a normal assignment operator always returns *this, that is, +// an lvalue reference that is the same type as the implicit object parameter +// (or the LHS for a non-member operator==). +bool isNormalAssignmentOperator(const FunctionDecl *FD); + +/// Returns true if this is an assignment operator where the parameter +/// has the lifetimebound attribute. +bool isAssignmentOperatorLifetimeBound(const CXXMethodDecl *CMD); + +/// Returns true if the implicit object parameter (this) should be considered +/// lifetimebound, either due to an explicit lifetimebound attribute on the +/// method or because it's a normal assignment operator. +bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD); +} // namespace lifetimes +} // namespace clang + +#endif // LLVM_CLANG_ANALYSIS_ANALYSES_LIFETIMEANNOTATIONS_H diff --git a/clang/include/clang/Analysis/Analyses/LifetimeSafety.h b/clang/include/clang/Analysis/Analyses/LifetimeSafety.h index 7e1bfc903083e..512cb76cd6349 100644 --- a/clang/include/clang/Analysis/Analyses/LifetimeSafety.h +++ b/clang/include/clang/Analysis/Analyses/LifetimeSafety.h @@ -75,13 +75,14 @@ template struct ID { } }; -template -inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, ID ID) { - return OS << ID.Value; -} - using LoanID = ID; using OriginID = ID; +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, LoanID ID) { + return OS << ID.Value; +} +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, OriginID ID) { + return OS << ID.Value; +} // Using LLVM's immutable collections is efficient for dataflow analysis // as it avoids deep copies during state transitions. diff --git a/clang/include/clang/Analysis/CFG.h b/clang/include/clang/Analysis/CFG.h index 1b1ff5e558ec5..6dd7d138e4357 100644 --- a/clang/include/clang/Analysis/CFG.h +++ b/clang/include/clang/Analysis/CFG.h @@ -1251,6 +1251,7 @@ class CFG { bool MarkElidedCXXConstructors = false; bool AddVirtualBaseBranches = false; bool OmitImplicitValueInitializers = false; + bool AssumeReachableDefaultInSwitchStatements = false; BuildOptions() = default; diff --git a/clang/include/clang/Analysis/FlowSensitive/CachedConstAccessorsLattice.h b/clang/include/clang/Analysis/FlowSensitive/CachedConstAccessorsLattice.h index 78b03d325efd9..6496771ad037e 100644 --- a/clang/include/clang/Analysis/FlowSensitive/CachedConstAccessorsLattice.h +++ b/clang/include/clang/Analysis/FlowSensitive/CachedConstAccessorsLattice.h @@ -90,7 +90,7 @@ template class CachedConstAccessorsLattice : public Base { return Base::operator==(Other); } - LatticeJoinEffect join(const CachedConstAccessorsLattice &Other); + LatticeEffect join(const CachedConstAccessorsLattice &Other); private: // Maps a record storage location and const method to the value to return @@ -121,13 +121,14 @@ joinConstMethodMap( llvm::SmallDenseMap> &Map2, LatticeEffect &Effect) { + // Intersect the two maps, and note if change was made. llvm::SmallDenseMap> Result; for (auto &[Loc, DeclToT] : Map1) { auto It = Map2.find(Loc); if (It == Map2.end()) { - Effect = LatticeJoinEffect::Changed; + Effect = LatticeEffect::Changed; continue; } const auto &OtherDeclToT = It->second; @@ -135,7 +136,7 @@ joinConstMethodMap( for (auto [Func, Var] : DeclToT) { T *OtherVar = OtherDeclToT.lookup(Func); if (OtherVar == nullptr || OtherVar != Var) { - Effect = LatticeJoinEffect::Changed; + Effect = LatticeEffect::Changed; continue; } JoinedDeclToT.insert({Func, Var}); diff --git a/clang/include/clang/Analysis/PathDiagnostic.h b/clang/include/clang/Analysis/PathDiagnostic.h index 5907df022e449..197920d4cd100 100644 --- a/clang/include/clang/Analysis/PathDiagnostic.h +++ b/clang/include/clang/Analysis/PathDiagnostic.h @@ -885,6 +885,10 @@ class PathDiagnostic : public llvm::FoldingSetNode { return UniqueingDecl; } + /// Get a hash that identifies the issue. + SmallString<32> getIssueHash(const SourceManager &SrcMgr, + const LangOptions &LangOpts) const; + void flattenLocations() { Loc.flatten(); for (const auto &I : pathImpl) diff --git a/clang/include/clang/Basic/AMDGPUTypes.def b/clang/include/clang/Basic/AMDGPUTypes.def index d3dff446f9edf..089a72b5c102e 100644 --- a/clang/include/clang/Basic/AMDGPUTypes.def +++ b/clang/include/clang/Basic/AMDGPUTypes.def @@ -21,6 +21,7 @@ #endif AMDGPU_OPAQUE_PTR_TYPE("__amdgpu_buffer_rsrc_t", AMDGPUBufferRsrc, AMDGPUBufferRsrcTy, 128, 128, 8) +AMDGPU_OPAQUE_PTR_TYPE("__amdgpu_texture_t", AMDGPUTexture, AMDGPUTextureTy, 256, 256, 0) AMDGPU_NAMED_BARRIER_TYPE("__amdgpu_named_workgroup_barrier_t", AMDGPUNamedWorkgroupBarrier, AMDGPUNamedWorkgroupBarrierTy, 128, 32, 0) diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td index 2623f9ff6972f..3c697ed8dd882 100644 --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -1470,7 +1470,7 @@ def Constructor : InheritableAttr { let TemplateDependent = 1; let Documentation = [CtorDtorDocs]; let AdditionalMembers = [{ - static constexpr unsigned DefaultPriority = 65535; + static constexpr unsigned DefaultPriority = 65535; }]; } @@ -1815,7 +1815,7 @@ def Destructor : InheritableAttr { let TemplateDependent = 1; let Documentation = [CtorDtorDocs]; let AdditionalMembers = [{ - static constexpr unsigned int DefaultPriority = 65535; + static constexpr unsigned int DefaultPriority = 65535; }]; } @@ -3921,16 +3921,31 @@ def NoSanitize : InheritableAttr { }]; } -// Attributes to disable a specific sanitizer. No new sanitizers should be added +// Attribute to disable AddressSanitizer. No new spellings should be added // to this list; the no_sanitize attribute should be extended instead. -def NoSanitizeSpecific : InheritableAttr { +def NoSanitizeAddress : InheritableAttr { let Spellings = [GCC<"no_address_safety_analysis">, - GCC<"no_sanitize_address">, - GCC<"no_sanitize_thread">, - Clang<"no_sanitize_memory">]; + GCC<"no_sanitize_address">]; let Subjects = SubjectList<[Function, GlobalVar], ErrorDiag>; - let Documentation = [NoSanitizeAddressDocs, NoSanitizeThreadDocs, - NoSanitizeMemoryDocs]; + let Documentation = [NoSanitizeAddressDocs]; + let ASTNode = 0; +} + +// Attribute to disable ThreadSanitizer. No new spellings should be added +// to this list; the no_sanitize attribute should be extended instead. +def NoSanitizeThread : InheritableAttr { + let Spellings = [GCC<"no_sanitize_thread">]; + let Subjects = SubjectList<[Function], ErrorDiag>; + let Documentation = [NoSanitizeThreadDocs]; + let ASTNode = 0; +} + +// Attribute to disable MemorySanitizer. No new spellings should be added +// to this list; the no_sanitize attribute should be extended instead. +def NoSanitizeMemory : InheritableAttr { + let Spellings = [Clang<"no_sanitize_memory">]; + let Subjects = SubjectList<[Function], ErrorDiag>; + let Documentation = [NoSanitizeMemoryDocs]; let ASTNode = 0; } @@ -5059,6 +5074,12 @@ def HLSLRawBuffer : TypeAttr { let Documentation = [InternalOnly]; } +def HLSLIsCounter : TypeAttr { + let Spellings = [CXX11<"hlsl", "is_counter">]; + let LangOpts = [HLSL]; + let Documentation = [InternalOnly]; +} + def HLSLGroupSharedAddressSpace : TypeAttr { let Spellings = [CustomKeyword<"groupshared">]; let Subjects = SubjectList<[Var]>; diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td index ee212a9b50f36..20a52b49a8f10 100644 --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -13,16 +13,16 @@ // version control. // // To run clang-tblgen to generate the .rst file: -// clang-tblgen -gen-attr-docs -I /llvm/tools/clang/include -// /llvm/tools/clang/include/clang/Basic/Attr.td -o -// /llvm/tools/clang/docs/AttributeReference.rst +// clang-tblgen -gen-attr-docs -I /clang/include +// /clang/include/clang/Basic/Attr.td -o +// /clang/docs/AttributeReference.rst // // To run sphinx to generate the .html files (note that sphinx-build must be // available on the PATH): // Windows (from within the clang\docs directory): // make.bat html -// Non-Windows (from within the clang\docs directory): -// sphinx-build -b html _build/html +// Non-Windows (from within the clang/docs directory): +// sphinx-build -b html . _build/html def GlobalDocumentation { code Intro =[{.. @@ -3629,6 +3629,7 @@ instrumentations should not be applied. The attribute takes a list of string literals with the following accepted values: + * all values accepted by ``-fno-sanitize=``; * ``coverage``, to disable SanitizerCoverage instrumentation. diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def index 48437c9397570..b856ad145824d 100644 --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -34,6 +34,7 @@ // Q -> target builtin type, followed by a character to distinguish the builtin type // Qa -> AArch64 svcount_t builtin type. // Qb -> AMDGPU __amdgpu_buffer_rsrc_t builtin type. +// Qt -> AMDGPU __amdgpu_texture_t builtin type. // E -> ext_vector, followed by the number of elements and the base type. // X -> _Complex, followed by the base type. // Y -> ptrdiff_t @@ -66,7 +67,8 @@ // The third value provided to the macro specifies information about attributes // of the function. These must be kept in sync with the predicates in the -// Builtin::Context class. Currently we have: +// Builtin::Context class. Note: In the descriptions below, {num} is a +// placeholder for an integer. Currently we have: // n -> nothrow // r -> noreturn // U -> pure @@ -82,23 +84,23 @@ // h -> this function requires a specific header or an explicit declaration. // i -> this is a runtime library implemented function without the // '__builtin_' prefix. It will be implemented in compiler-rt or libgcc. -// p:N: -> this is a printf-like function whose Nth argument is the format -// string. -// P:N: -> similar to the p:N: attribute, but the function is like vprintf -// in that it accepts its arguments as a va_list rather than -// through an ellipsis -// s:N: -> this is a scanf-like function whose Nth argument is the format -// string. -// S:N: -> similar to the s:N: attribute, but the function is like vscanf -// in that it accepts its arguments as a va_list rather than -// through an ellipsis +// p:{num}: -> this is a printf-like function whose {num}th argument is the +// format string. +// P:{num}: -> similar to the p:{num}: attribute, but the function is like +// vprintf in that it accepts its arguments as a va_list rather than +// through an ellipsis +// s:{num}: -> this is a scanf-like function whose {num}th argument is the +// format string. +// S:{num}: -> similar to the s:{num}: attribute, but the function is like +// vscanf in that it accepts its arguments as a va_list rather than +// through an ellipsis // e -> const, but only when -fno-math-errno and FP exceptions are ignored // g -> const when FP exceptions are ignored // j -> returns_twice (like setjmp) // u -> arguments are not evaluated for their side-effects -// V:N: -> requires vectors of at least N bits to be legal -// C -> callback behavior: argument N is called with argument -// M_0, ..., M_k as payload +// V:{num}: -> requires vectors of at least {num} bits to be legal +// C<{num},M_0,...,M_k> -> callback behavior: argument {num} is called with +// argument M_0, ..., M_k as payload // z -> this is a function in (possibly-versioned) namespace std // E -> this function can be constant evaluated by Clang frontend // G -> this is a C++20 consteval function diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index 35d2c3e19fdf9..468121f7d20ab 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -4945,6 +4945,12 @@ def HLSLResourceHandleFromImplicitBinding : LangBuiltin<"HLSL_LANG"> { let Prototype = "void(...)"; } +def HLSLResourceNonUniformIndex : LangBuiltin<"HLSL_LANG"> { + let Spellings = ["__builtin_hlsl_resource_nonuniformindex"]; + let Attributes = [NoThrow]; + let Prototype = "uint32_t(uint32_t)"; +} + def HLSLAll : LangBuiltin<"HLSL_LANG"> { let Spellings = ["__builtin_hlsl_all"]; let Attributes = [NoThrow, Const]; @@ -5095,6 +5101,12 @@ def HLSLIsinf : LangBuiltin<"HLSL_LANG"> { let Prototype = "void(...)"; } +def HLSLIsnan : LangBuiltin<"HLSL_LANG"> { + let Spellings = ["__builtin_hlsl_elementwise_isnan"]; + let Attributes = [NoThrow, Const]; + let Prototype = "void(...)"; +} + def HLSLLerp : LangBuiltin<"HLSL_LANG"> { let Spellings = ["__builtin_hlsl_lerp"]; let Attributes = [NoThrow, Const, CustomTypeChecking]; diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def index 32b5aa5ac1377..3e45c04687a64 100644 --- a/clang/include/clang/Basic/BuiltinsAMDGPU.def +++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def @@ -163,7 +163,7 @@ BUILTIN(__builtin_amdgcn_qsad_pk_u16_u8, "WUiWUiUiWUi", "nc") BUILTIN(__builtin_amdgcn_mqsad_pk_u16_u8, "WUiWUiUiWUi", "nc") BUILTIN(__builtin_amdgcn_mqsad_u32_u8, "V4UiWUiUiV4Ui", "nc") -BUILTIN(__builtin_amdgcn_make_buffer_rsrc, "Qbv*sii", "nc") +BUILTIN(__builtin_amdgcn_make_buffer_rsrc, "Qbv*sWii", "nc") BUILTIN(__builtin_amdgcn_raw_buffer_store_b8, "vUcQbiiIi", "n") BUILTIN(__builtin_amdgcn_raw_buffer_store_b16, "vUsQbiiIi", "n") BUILTIN(__builtin_amdgcn_raw_buffer_store_b32, "vUiQbiiIi", "n") diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def index db71efc238386..cf8bdd2a429df 100644 --- a/clang/include/clang/Basic/BuiltinsPPC.def +++ b/clang/include/clang/Basic/BuiltinsPPC.def @@ -1105,6 +1105,13 @@ UNALIASED_CUSTOM_BUILTIN(mma_disassemble_dmr, "vv*W1024*", false, UNALIASED_CUSTOM_BUILTIN(mma_build_dmr, "vW1024*VVVVVVVV", false, "mma,isa-future-instructions") +UNALIASED_CUSTOM_BUILTIN(mma_dmsha2hash, "vW1024*W1024*Ii", true, + "mma,isa-future-instructions") +UNALIASED_CUSTOM_BUILTIN(mma_dmsha3hash, "vW2048*Ii", true, + "mma,isa-future-instructions") +UNALIASED_CUSTOM_BUILTIN(mma_dmxxshapad, "vW1024*VIiIiIi", true, + "mma,isa-future-instructions") + // MMA builtins with positive/negative multiply/accumulate. UNALIASED_CUSTOM_MMA_BUILTIN(mma_xvf16ger2, "vW512*VV", "mma,paired-vector-memops") diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td index b80f733066b65..e98bee28c15be 100644 --- a/clang/include/clang/Basic/BuiltinsX86.td +++ b/clang/include/clang/Basic/BuiltinsX86.td @@ -93,9 +93,6 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in { } let Features = "sse2" in { - def packsswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; - def packssdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">; - def packuswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; def vec_ext_v2di : X86Builtin<"long long int(_Vector<2, long long int>, _Constant int)">; def vec_ext_v4si : X86Builtin<"int(_Vector<4, int>, _Constant int)">; def vec_ext_v4sf : X86Builtin<"float(_Vector<4, float>, _Constant int)">; @@ -108,6 +105,9 @@ let Attributes = [Const, NoThrow, RequiredVectorWidth<128>] in { def pavgw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">; def pmulhw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">; def pmulhuw128 : X86Builtin<"_Vector<8, unsigned short>(_Vector<8, unsigned short>, _Vector<8, unsigned short>)">; + def packsswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; + def packssdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">; + def packuswb128 : X86Builtin<"_Vector<16, char>(_Vector<8, short>, _Vector<8, short>)">; } let Features = "sse3" in { @@ -312,7 +312,6 @@ let Features = "ssse3", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] let Features = "sse4.1", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { def insertps128 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Constant char)">; - def packusdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">; def roundps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Constant int)">; def roundss : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Constant int)">; def roundsd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, double>, _Constant int)">; @@ -338,6 +337,7 @@ let Features = "sse4.1", Attributes = [NoThrow, Const, Constexpr, RequiredVector def pblendvb128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>, _Vector<16, char>)">; def pmuldq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">; + def packusdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">; } let Features = "sse4.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { @@ -571,10 +571,6 @@ let Features = "avx", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { def mpsadbw256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant char)">; - def packsswb256 : X86Builtin<"_Vector<32, char>(_Vector<16, short>, _Vector<16, short>)">; - def packssdw256 : X86Builtin<"_Vector<16, short>(_Vector<8, int>, _Vector<8, int>)">; - def packuswb256 : X86Builtin<"_Vector<32, char>(_Vector<16, short>, _Vector<16, short>)">; - def packusdw256 : X86Builtin<"_Vector<16, short>(_Vector<8, int>, _Vector<8, int>)">; def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant int)">; def phaddw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">; def phaddd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">; @@ -647,6 +643,10 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi def psrlv4di : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>)">; def insert128i256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>, _Constant int)">; + def packusdw256 : X86Builtin<"_Vector<16, short>(_Vector<8, int>, _Vector<8, int>)">; + def packsswb256 : X86Builtin<"_Vector<32, char>(_Vector<16, short>, _Vector<16, short>)">; + def packssdw256 : X86Builtin<"_Vector<16, short>(_Vector<8, int>, _Vector<8, int>)">; + def packuswb256 : X86Builtin<"_Vector<32, char>(_Vector<16, short>, _Vector<16, short>)">; } let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in { @@ -1109,51 +1109,51 @@ let Features = "avx512vnni", Attributes = [NoThrow, Const, RequiredVectorWidth<5 } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vpdpbssd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">; + def vpdpbssd128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<16, char>, _Vector<16, char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vpdpbssd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">; + def vpdpbssd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<32, char>, _Vector<32, char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vpdpbssds128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">; + def vpdpbssds128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<16, char>, _Vector<16, char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vpdpbssds256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">; + def vpdpbssds256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<32, char>, _Vector<32, char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vpdpbsud128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">; + def vpdpbsud128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<16, char>, _Vector<16, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vpdpbsud256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">; + def vpdpbsud256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<32, char>, _Vector<32, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vpdpbsuds128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">; + def vpdpbsuds128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<16, char>, _Vector<16, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vpdpbsuds256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">; + def vpdpbsuds256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<32, char>, _Vector<32, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vpdpbuud128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">; + def vpdpbuud128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<16, unsigned char>, _Vector<16, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vpdpbuud256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">; + def vpdpbuud256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<32, unsigned char>, _Vector<32, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { - def vpdpbuuds128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>, _Vector<4, int>)">; + def vpdpbuuds128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<16, unsigned char>, _Vector<16, unsigned char>)">; } let Features = "avxvnniint8|avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in { - def vpdpbuuds256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>, _Vector<8, int>)">; + def vpdpbuuds256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<32, unsigned char>, _Vector<32, unsigned char>)">; } let Features = "movrs", Attributes = [NoThrow, Const] in { @@ -1308,11 +1308,14 @@ let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512> let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { def ucmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">; - def packssdw512 : X86Builtin<"_Vector<32, short>(_Vector<16, int>, _Vector<16, int>)">; + def pshufb512 : X86Builtin<"_Vector<64, char>(_Vector<64, char>, _Vector<64, char>)">; +} + +let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in { def packsswb512 : X86Builtin<"_Vector<64, char>(_Vector<32, short>, _Vector<32, short>)">; - def packusdw512 : X86Builtin<"_Vector<32, short>(_Vector<16, int>, _Vector<16, int>)">; + def packssdw512 : X86Builtin<"_Vector<32, short>(_Vector<16, int>, _Vector<16, int>)">; def packuswb512 : X86Builtin<"_Vector<64, char>(_Vector<32, short>, _Vector<32, short>)">; - def pshufb512 : X86Builtin<"_Vector<64, char>(_Vector<64, char>, _Vector<64, char>)">; + def packusdw512 : X86Builtin<"_Vector<32, short>(_Vector<16, int>, _Vector<16, int>)">; } let Features = "avx512cd,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in { @@ -4279,12 +4282,12 @@ let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<256> let Features = "avx10.2", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in { def vdpphps512 : X86Builtin<"_Vector<16, float>(_Vector<16, float>, _Vector<32, _Float16>, _Vector<32, _Float16>)">; - def vpdpbssd512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">; - def vpdpbssds512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">; - def vpdpbsud512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">; - def vpdpbsuds512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">; - def vpdpbuud512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">; - def vpdpbuuds512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<16, int>, _Vector<16, int>)">; + def vpdpbssd512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<64, char>, _Vector<64, char>)">; + def vpdpbssds512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<64, char>, _Vector<64, char>)">; + def vpdpbsud512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<64, char>, _Vector<64, unsigned char>)">; + def vpdpbsuds512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<64, char>, _Vector<64, unsigned char>)">; + def vpdpbuud512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<64, unsigned char>, _Vector<64, unsigned char>)">; + def vpdpbuuds512 : X86Builtin<"_Vector<16, int>(_Vector<16, int>, _Vector<64, unsigned char>, _Vector<64, unsigned char>)">; } let Features = "avx10.2", Attributes = [NoThrow, RequiredVectorWidth<512>] in { diff --git a/clang/include/clang/Basic/Diagnostic.h b/clang/include/clang/Basic/Diagnostic.h index af26a04d94889..e540040ddc524 100644 --- a/clang/include/clang/Basic/Diagnostic.h +++ b/clang/include/clang/Basic/Diagnostic.h @@ -25,6 +25,7 @@ #include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Compiler.h" #include @@ -1366,6 +1367,22 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB, return DB; } +inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB, + const llvm::APSInt &Int) { + DB.AddString(toString(Int, /*Radix=*/10, Int.isSigned(), + /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true)); + return DB; +} + +inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB, + const llvm::APInt &Int) { + DB.AddString(toString(Int, /*Radix=*/10, /*Signed=*/false, + /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true)); + return DB; +} + inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB, int I) { DB.AddTaggedVal(I, DiagnosticsEngine::ak_sint); diff --git a/clang/include/clang/Basic/DiagnosticCommonKinds.td b/clang/include/clang/Basic/DiagnosticCommonKinds.td index 0bd8a423c393e..6e50e225a8cc1 100644 --- a/clang/include/clang/Basic/DiagnosticCommonKinds.td +++ b/clang/include/clang/Basic/DiagnosticCommonKinds.td @@ -433,6 +433,12 @@ def err_omp_more_one_clause : Error< "directive '#pragma omp %0' cannot contain more than one '%1' clause%select{| with '%3' name modifier| with 'source' dependence}2">; def err_omp_required_clause : Error< "directive '#pragma omp %0' requires the '%1' clause">; +def warn_omp_gpu_unsupported_clause: Warning< + "clause '%0' is currently not supported on a GPU; clause ignored">, + InGroup; +def warn_omp_gpu_unsupported_modifier_for_clause: Warning< + "modifier '%0' is currently not supported on a GPU for the '%1' clause; modifier ignored">, + InGroup; // Static Analyzer Core def err_unknown_analyzer_checker_or_package : Error< diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td index 4d9e123eb4ef1..c724136a7fdaf 100644 --- a/clang/include/clang/Basic/DiagnosticParseKinds.td +++ b/clang/include/clang/Basic/DiagnosticParseKinds.td @@ -1141,7 +1141,7 @@ def warn_cxx23_compat_binding_pack : Warning< def err_capture_default_first : Error< "capture default must be first">; def ext_decl_attrs_on_lambda : ExtWarn< - "%select{an attribute specifier sequence|%0}1 in this position " + "%select{an attribute specifier sequence|%1}0 in this position " "is a C++23 extension">, InGroup; def ext_lambda_missing_parens : ExtWarn< "lambda without a parameter clause is a C++23 extension">, diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index dd82c3b092eb5..b157cbb0b8069 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -35,9 +35,8 @@ defm decomp_decl : CXX17Compat<"decomposition declarations are">; defm inline_variable : CXX17Compat<"inline variables are">; // C++20 compatibility with C++17 and earlier. -defm decomp_decl_spec : CXX20Compat< - "decomposition declaration declared " - "%plural{1:'%1'|:with '%1' specifiers}0 is">; +defm decomp_decl_spec + : CXX20Compat<"decomposition declaration declared '%0' is">; defm constexpr_local_var_no_init : CXX20Compat< "uninitialized variable in a constexpr %select{function|constructor}0 is">; defm constexpr_function_try_block : CXX20Compat< @@ -593,9 +592,8 @@ def warn_modifying_shadowing_decl : // C++ decomposition declarations def err_decomp_decl_context : Error< "decomposition declaration not permitted in this context">; -def err_decomp_decl_spec : Error< - "decomposition declaration cannot be declared " - "%plural{1:'%1'|:with '%1' specifiers}0">; +def err_decomp_decl_spec + : Error<"decomposition declaration cannot be declared '%0'">; def err_decomp_decl_type : Error< "decomposition declaration cannot be declared with type %0; " "declared type must be 'auto' or reference to 'auto'">; @@ -1777,7 +1775,8 @@ def note_unsatisfied_trait "%Empty{empty}|" "%StandardLayout{standard-layout}|" "%Aggregate{aggregate}|" - "%Final{final}" + "%Final{final}|" + "%Abstract{abstract}" "}1">; def note_unsatisfied_trait_reason @@ -1827,7 +1826,12 @@ def note_unsatisfied_trait_reason "%PrivateProtectedDirectDataMember{has a %select{private|protected}1 direct data member}|" "%PrivateProtectedDirectBase{has a %select{private|protected}1 direct base}|" "%NotClassOrUnion{is not a class or union type}|" - "%NotMarkedFinal{is not marked 'final'}" + "%NotMarkedFinal{is not marked 'final'}|" + "%PointerType{is a pointer type}|" + "%ArrayType{is an array type}|" + "%UnionType{is a union type}|" + "%NotStructOrClass{is not a struct or class type}|" + "%OverridesAllPureVirtual{overrides all pure virtual functions from base class %1}" "}0">; def warn_consteval_if_always_true : Warning< @@ -5766,8 +5770,10 @@ def err_template_recursion_depth_exceeded : Error< def err_constraint_depends_on_self : Error<"satisfaction of constraint %0 depends on itself">, NoSFINAE; -def note_template_recursion_depth : Note< - "use -ftemplate-depth=N to increase recursive template instantiation depth">; +def note_template_recursion_depth + : Note<"use -ftemplate-depth=N to increase recursive template " + "instantiation depth">, + NoSFINAE; def err_template_instantiate_within_definition : Error< "%select{implicit|explicit}0 instantiation of template %1 within its" @@ -10456,6 +10462,9 @@ def warn_format_conversion_argument_type_mismatch : Warning< "format specifies type %0 but the argument has " "%select{type|underlying type}2 %1">, InGroup; +def err_format_conversion_argument_type_mismatch : Error< + "format specifies type %0 but the argument has " + "%select{type|underlying type}2 %1">; def warn_format_conversion_argument_type_mismatch_pedantic : Extension< warn_format_conversion_argument_type_mismatch.Summary>, InGroup; @@ -10505,6 +10514,8 @@ def warn_printf_asterisk_missing_arg : Warning< def warn_printf_asterisk_wrong_type : Warning< "field %select{width|precision}0 should have type %1, but argument has type %2">, InGroup; +def err_printf_asterisk_wrong_type : Error< + "field %select{width|precision}0 should have type %1, but argument has type %2">; def warn_printf_nonsensical_optional_amount: Warning< "%select{field width|precision}0 used with '%1' conversion specifier, resulting in undefined behavior">, InGroup; @@ -11752,6 +11763,18 @@ def note_omp_implicit_dsa : Note< "implicitly determined as %0">; def err_omp_loop_var_dsa : Error< "loop iteration variable in the associated loop of 'omp %1' directive may not be %0, predetermined as %2">; +def err_omp_not_a_loop_sequence + : Error<"statement after '#pragma omp %0' must be a loop sequence " + "containing canonical loops or loop-generating constructs">; +def err_omp_empty_loop_sequence + : Error<"loop sequence after '#pragma omp %0' must contain at least 1 " + "canonical loop or loop-generating construct">; +def err_omp_invalid_looprange + : Error<"looprange clause selects loops from %1 to %2 but this exceeds the " + "number of loops (%3) in the loop sequence">; +def warn_omp_redundant_fusion : Warning<"looprange clause selects a single " + "loop, resulting in redundant fusion">, + InGroup; def err_omp_not_for : Error< "%select{statement after '#pragma omp %1' must be a for loop|" "expected %2 for loops after '#pragma omp %1'%select{|, but found only %4}3}0">; diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h index a8943df5b39aa..41595ec2a060d 100644 --- a/clang/include/clang/Basic/LangOptions.h +++ b/clang/include/clang/Basic/LangOptions.h @@ -549,8 +549,7 @@ class LangOptions : public LangOptionsBase { bool CheckNew = false; /// The HLSL root signature version for dxil. - llvm::dxbc::RootSignatureVersion HLSLRootSigVer = - llvm::dxbc::RootSignatureVersion::V1_1; + llvm::dxbc::RootSignatureVersion HLSLRootSigVer; /// The HLSL root signature that will be used to overide the root signature /// used for the shader entry point. diff --git a/clang/include/clang/Basic/OpenMPKinds.h b/clang/include/clang/Basic/OpenMPKinds.h index 115af7b19d6e4..ed89a31e2684b 100644 --- a/clang/include/clang/Basic/OpenMPKinds.h +++ b/clang/include/clang/Basic/OpenMPKinds.h @@ -312,6 +312,14 @@ bool isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind); /// otherwise - false. bool isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind); +/// Checks if the specified directive is a map-entering target directive. +/// \param DKind Specified directive. +/// \return true - the directive is a map-entering target directive like +/// 'omp target', 'omp target data', 'omp target enter data', +/// 'omp target parallel', etc. (excludes 'omp target exit data', 'omp target +/// update') otherwise - false. +bool isOpenMPTargetMapEnteringDirective(OpenMPDirectiveKind DKind); + /// Checks if the specified composite/combined directive constitutes a teams /// directive in the outermost nest. For example /// 'omp teams distribute' or 'omp teams distribute parallel for'. @@ -383,6 +391,13 @@ bool isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind); bool isOpenMPCanonicalLoopNestTransformationDirective( OpenMPDirectiveKind DKind); +/// Checks if the specified directive is a loop transformation directive that +/// applies to a canonical loop sequence. +/// \param DKind Specified directive. +/// \return True iff the directive is a loop transformation. +bool isOpenMPCanonicalLoopSequenceTransformationDirective( + OpenMPDirectiveKind DKind); + /// Checks if the specified directive is a loop transformation directive. /// \param DKind Specified directive. /// \return True iff the directive is a loop transformation. diff --git a/clang/include/clang/Basic/PPCTypes.def b/clang/include/clang/Basic/PPCTypes.def index fc4155ca98b2d..9c0fa9198d5b1 100644 --- a/clang/include/clang/Basic/PPCTypes.def +++ b/clang/include/clang/Basic/PPCTypes.def @@ -30,6 +30,7 @@ #endif +PPC_VECTOR_MMA_TYPE(__dmr2048, DMR2048, 2048) PPC_VECTOR_MMA_TYPE(__dmr1024, DMR1024, 1024) PPC_VECTOR_MMA_TYPE(__vector_quad, VectorQuad, 512) PPC_VECTOR_VSX_TYPE(__vector_pair, VectorPair, 256) diff --git a/clang/include/clang/Basic/Sarif.h b/clang/include/clang/Basic/Sarif.h index e6c46224b316d..a88d1ee2965a9 100644 --- a/clang/include/clang/Basic/Sarif.h +++ b/clang/include/clang/Basic/Sarif.h @@ -322,6 +322,8 @@ class SarifResult { uint32_t RuleIdx; std::string RuleId; std::string DiagnosticMessage; + std::string HostedViewerURI; + llvm::SmallDenseMap PartialFingerprints; llvm::SmallVector Locations; llvm::SmallVector ThreadFlows; std::optional LevelOverride; @@ -347,6 +349,11 @@ class SarifResult { return *this; } + SarifResult setHostedViewerURI(llvm::StringRef URI) { + HostedViewerURI = URI.str(); + return *this; + } + SarifResult setLocations(llvm::ArrayRef DiagLocs) { #ifndef NDEBUG for (const auto &Loc : DiagLocs) { @@ -366,6 +373,12 @@ class SarifResult { LevelOverride = TheLevel; return *this; } + + SarifResult addPartialFingerprint(llvm::StringRef key, + llvm::StringRef value) { + PartialFingerprints[key] = value; + return *this; + } }; /// This class handles creating a valid SARIF document given various input @@ -475,6 +488,8 @@ class SarifDocumentWriter { /// reported diagnostics, resulting in an expensive call. llvm::json::Object createDocument(); + static std::string fileNameToURI(llvm::StringRef Filename); + private: /// Source Manager to use for the current SARIF document. const SourceManager &SourceMgr; diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td index dd1a24405fae7..bf3686bb372d5 100644 --- a/clang/include/clang/Basic/StmtNodes.td +++ b/clang/include/clang/Basic/StmtNodes.td @@ -238,6 +238,10 @@ def OMPUnrollDirective : StmtNode; def OMPReverseDirective : StmtNode; def OMPInterchangeDirective : StmtNode; +def OMPCanonicalLoopSequenceTransformationDirective + : StmtNode; +def OMPFuseDirective + : StmtNode; def OMPForDirective : StmtNode; def OMPForSimdDirective : StmtNode; def OMPSectionsDirective : StmtNode; diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index e5c5ada3b0858..ceb16174e13e7 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -1259,6 +1259,10 @@ class TargetInfo : public TransferrableTargetInfo, ArrayRef OutputConstraints, unsigned &Index) const; + std::string + simplifyConstraint(StringRef Constraint, + SmallVectorImpl *OutCons = nullptr) const; + // Constraint parm will be left pointing at the last character of // the constraint. In practice, it won't be changed unless the // constraint is longer than one character. diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def index 9d1a23d1af218..564d6010181cc 100644 --- a/clang/include/clang/Basic/TokenKinds.def +++ b/clang/include/clang/Basic/TokenKinds.def @@ -552,10 +552,10 @@ TYPE_TRAIT_1(__can_pass_in_regs, CanPassInRegs, KEYCXX) TYPE_TRAIT_2(__reference_binds_to_temporary, ReferenceBindsToTemporary, KEYCXX) TYPE_TRAIT_2(__reference_constructs_from_temporary, ReferenceConstructsFromTemporary, KEYCXX) TYPE_TRAIT_2(__reference_converts_from_temporary, ReferenceConvertsFromTemporary, KEYCXX) -TYPE_TRAIT_2(__builtin_lt_synthesises_from_spaceship, LtSynthesisesFromSpaceship, KEYCXX) -TYPE_TRAIT_2(__builtin_le_synthesises_from_spaceship, LeSynthesisesFromSpaceship, KEYCXX) -TYPE_TRAIT_2(__builtin_gt_synthesises_from_spaceship, GtSynthesisesFromSpaceship, KEYCXX) -TYPE_TRAIT_2(__builtin_ge_synthesises_from_spaceship, GeSynthesisesFromSpaceship, KEYCXX) +TYPE_TRAIT_2(__builtin_lt_synthesizes_from_spaceship, LtSynthesizesFromSpaceship, KEYCXX) +TYPE_TRAIT_2(__builtin_le_synthesizes_from_spaceship, LeSynthesizesFromSpaceship, KEYCXX) +TYPE_TRAIT_2(__builtin_gt_synthesizes_from_spaceship, GtSynthesizesFromSpaceship, KEYCXX) +TYPE_TRAIT_2(__builtin_ge_synthesizes_from_spaceship, GeSynthesizesFromSpaceship, KEYCXX) // IsDeducible is only used internally by clang for CTAD implementation and // is not exposed to users. TYPE_TRAIT_2(/*EmptySpellingName*/, IsDeducible, KEYCXX) diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h index a3f167e3cde2c..8a5bf0376ec98 100644 --- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h +++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h @@ -148,9 +148,10 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { } mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand) { - auto operandTy = mlir::cast(operand.getType()); - return cir::ComplexRealOp::create(*this, loc, operandTy.getElementType(), - operand); + auto resultType = operand.getType(); + if (auto complexResultType = mlir::dyn_cast(resultType)) + resultType = complexResultType.getElementType(); + return cir::ComplexRealOp::create(*this, loc, resultType, operand); } mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand) { @@ -243,6 +244,13 @@ class CIRBaseBuilderTy : public mlir::OpBuilder { return cir::AllocaOp::create(*this, loc, addrType, type, name, alignment); } + mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, + mlir::Type type, llvm::StringRef name, + clang::CharUnits alignment) { + mlir::IntegerAttr alignmentAttr = getAlignmentAttr(alignment); + return createAlloca(loc, addrType, type, name, alignmentAttr); + } + /// Get constant address of a global variable as an MLIR attribute. /// This wrapper infers the attribute type through the global op. cir::GlobalViewAttr getGlobalViewAttr(cir::GlobalOp globalOp, diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index bb394440bf8d8..0a78492aa9a86 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -209,9 +209,10 @@ def CIR_CastOp : CIR_Op<"cast", [ Example: ```mlir - %4 = cir.cast(int_to_bool, %3 : i32), !cir.bool + %4 = cir.cast int_to_bool %3 : i32 -> !cir.bool ... - %x = cir.cast(array_to_ptrdecay, %0 : !cir.ptr>), !cir.ptr + %x = cir.cast array_to_ptrdecay %0 + : !cir.ptr> -> !cir.ptr ``` }]; @@ -219,8 +220,7 @@ def CIR_CastOp : CIR_Op<"cast", [ let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ - `(` $kind `,` $src `:` type($src) `)` - `,` type($result) attr-dict + $kind $src `:` type($src) `->` type($result) attr-dict }]; // The input and output types should match the cast kind. @@ -683,8 +683,8 @@ def CIR_ConditionOp : CIR_Op<"condition", [ //===----------------------------------------------------------------------===// defvar CIR_YieldableScopes = [ - "ArrayCtor", "ArrayDtor", "CaseOp", "DoWhileOp", "ForOp", "IfOp", "ScopeOp", - "SwitchOp", "TernaryOp", "WhileOp" + "ArrayCtor", "ArrayDtor", "CaseOp", "DoWhileOp", "ForOp", "GlobalOp", "IfOp", + "ScopeOp", "SwitchOp", "TernaryOp", "WhileOp" ]; def CIR_YieldOp : CIR_Op<"yield", [ @@ -1176,7 +1176,7 @@ def CIR_GotoOp : CIR_Op<"goto", [Terminator]> { ```mlir cir.scope { // REGION #1 %2 = cir.load %0 : !cir.ptr, !s32i - %3 = cir.cast(int_to_bool, %2 : !s32i), !cir.bool + %3 = cir.cast int_to_bool %2 : !s32i -> !cir.bool cir.if %3 { cir.goto "label" } @@ -1776,7 +1776,9 @@ def CIR_GlobalLinkageKind : CIR_I32EnumAttr< // is upstreamed. def CIR_GlobalOp : CIR_Op<"global", [ - DeclareOpInterfaceMethods + DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, + NoRegionArguments ]> { let summary = "Declare or define a global variable"; let description = [{ @@ -1807,6 +1809,9 @@ def CIR_GlobalOp : CIR_Op<"global", [ UnitAttr:$dso_local, OptionalAttr:$alignment); + let regions = (region MaxSizedRegion<1>:$ctorRegion, + MaxSizedRegion<1>:$dtorRegion); + let assemblyFormat = [{ ($sym_visibility^)? (`` $global_visibility^)? @@ -1815,24 +1820,34 @@ def CIR_GlobalOp : CIR_Op<"global", [ (`comdat` $comdat^)? (`dso_local` $dso_local^)? $sym_name - custom($sym_type, $initial_value) + custom($sym_type, $initial_value, + $ctorRegion, $dtorRegion) attr-dict }]; let extraClassDeclaration = [{ - bool isDeclaration() { return !getInitialValue(); } + bool isDeclaration() { + return !getInitialValue() && getCtorRegion().empty() && getDtorRegion().empty(); + } bool hasInitializer() { return !isDeclaration(); } }]; let skipDefaultBuilders = 1; - let builders = [OpBuilder<(ins - "llvm::StringRef":$sym_name, - "mlir::Type":$sym_type, - CArg<"bool", "false">:$isConstant, - // CIR defaults to external linkage. - CArg<"cir::GlobalLinkageKind", - "cir::GlobalLinkageKind::ExternalLinkage">:$linkage)>]; + let builders = [ + OpBuilder<(ins + "llvm::StringRef":$sym_name, + "mlir::Type":$sym_type, + CArg<"bool", "false">:$isConstant, + // CIR defaults to external linkage. + CArg<"cir::GlobalLinkageKind", + "cir::GlobalLinkageKind::ExternalLinkage">:$linkage, + CArg<"llvm::function_ref", + "nullptr">:$ctorBuilder, + CArg<"llvm::function_ref", + "nullptr">:$dtorBuilder) + > + ]; let hasVerifier = 1; @@ -3245,18 +3260,20 @@ def CIR_ComplexCreateOp : CIR_Op<"complex.create", [Pure, SameTypeOperands]> { def CIR_ComplexRealOp : CIR_Op<"complex.real", [Pure]> { let summary = "Extract the real part of a complex value"; let description = [{ - `cir.complex.real` operation takes an operand of `!cir.complex` type and - yields the real part of it. + `cir.complex.real` operation takes an operand of `!cir.complex`, `!cir.int` + or `!cir.float`. If the operand is `!cir.complex`, the real part of it will + be returned, otherwise the value returned unmodified. Example: ```mlir - %1 = cir.complex.real %0 : !cir.complex -> !cir.float + %real = cir.complex.real %complex : !cir.complex -> !cir.float + %real = cir.complex.real %scalar : !cir.float -> !cir.float ``` }]; let results = (outs CIR_AnyIntOrFloatType:$result); - let arguments = (ins CIR_ComplexType:$operand); + let arguments = (ins CIR_AnyComplexOrIntOrFloatType:$operand); let assemblyFormat = [{ $operand `:` qualified(type($operand)) `->` qualified(type($result)) @@ -3979,9 +3996,9 @@ def CIR_VAStartOp : CIR_Op<"va_start"> { ```mlir // %args : !cir.ptr> - %p = cir.cast(array_to_ptrdecay, %args - : !cir.ptr>), - !cir.ptr + %p = cir.cast array_to_ptrdecay %args + : !cir.ptr>) + -> !cir.ptr %count = cir.load %0 : !cir.ptr, !s32i cir.va_start %p %count : !cir.ptr, !s32i ``` @@ -4018,9 +4035,9 @@ def CIR_VAEndOp : CIR_Op<"va_end"> { Example: ```mlir // %args : !cir.ptr> - %p = cir.cast(array_to_ptrdecay, %args - : !cir.ptr>), - !cir.ptr + %p = cir.cast array_to_ptrdecay %args + : !cir.ptr> + -> !cir.ptr cir.va_end %p : !cir.ptr ``` }]; @@ -4053,9 +4070,9 @@ def CIR_VAArgOp : CIR_Op<"va_arg"> { Example: ```mlir // %args : !cir.ptr> - %p = cir.cast(array_to_ptrdecay, %args - : !cir.ptr>), - !cir.ptr + %p = cir.cast array_to_ptrdecay %args + : !cir.ptr> + -> !cir.ptr cir.va.start %p : !cir.ptr // Fetch an `int` from the vararg list. diff --git a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td index 82f6e1d33043e..da03a291a7690 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td +++ b/clang/include/clang/CIR/Dialect/IR/CIRTypeConstraints.td @@ -165,6 +165,12 @@ def CIR_AnyIntOrFloatType : AnyTypeOf<[CIR_AnyFloatType, CIR_AnyIntType], def CIR_AnyComplexType : CIR_TypeBase<"::cir::ComplexType", "complex type">; +def CIR_AnyComplexOrIntOrFloatType : AnyTypeOf<[ + CIR_AnyComplexType, CIR_AnyFloatType, CIR_AnyIntType +], "complex, integer or floating point type"> { + let cppFunctionName = "isComplexOrIntegerOrFloatingPointType"; +} + //===----------------------------------------------------------------------===// // Array Type predicates //===----------------------------------------------------------------------===// diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 9d2cf03b24c0c..3dfcafc0399a5 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -37,6 +37,8 @@ struct MissingFeatures { static bool opGlobalDLLImportExport() { return false; } static bool opGlobalPartition() { return false; } static bool opGlobalUsedOrCompilerUsed() { return false; } + static bool setDSOLocal() { return false; } + static bool setComdat() { return false; } static bool supportIFuncAttr() { return false; } static bool supportVisibility() { return false; } @@ -131,7 +133,6 @@ struct MissingFeatures { // RecordType static bool skippedLayout() { return false; } static bool astRecordDeclAttr() { return false; } - static bool recordZeroInit() { return false; } static bool recordZeroInitPadding() { return false; } static bool zeroSizeRecordMembers() { return false; } @@ -190,6 +191,7 @@ struct MissingFeatures { static bool builtinCheckKind() { return false; } static bool cgCapturedStmtInfo() { return false; } static bool cgFPOptionsRAII() { return false; } + static bool checkBitfieldClipping() { return false; } static bool cirgenABIInfo() { return false; } static bool cleanupAfterErrorDiags() { return false; } static bool cleanupsToDeactivate() { return false; } @@ -208,16 +210,19 @@ struct MissingFeatures { static bool dataLayoutTypeAllocSize() { return false; } static bool dataLayoutTypeStoreSize() { return false; } static bool deferredCXXGlobalInit() { return false; } + static bool deleteArray() { return false; } static bool devirtualizeMemberFunction() { return false; } static bool ehCleanupFlags() { return false; } static bool ehCleanupScope() { return false; } static bool ehCleanupScopeRequiresEHCleanup() { return false; } static bool ehCleanupBranchFixups() { return false; } static bool ehstackBranches() { return false; } + static bool emitBranchThroughCleanup() { return false; } static bool emitCheckedInBoundsGEP() { return false; } static bool emitCondLikelihoodViaExpectIntrinsic() { return false; } static bool emitLifetimeMarkers() { return false; } static bool emitLValueAlignmentAssumption() { return false; } + static bool emitNullCheckForDeleteCalls() { return false; } static bool emitNullabilityCheck() { return false; } static bool emitTypeCheck() { return false; } static bool emitTypeMetadataCodeForVCall() { return false; } @@ -243,7 +248,6 @@ struct MissingFeatures { static bool metaDataNode() { return false; } static bool moduleNameHash() { return false; } static bool msabi() { return false; } - static bool needsGlobalCtorDtor() { return false; } static bool nrvo() { return false; } static bool objCBlocks() { return false; } static bool objCGC() { return false; } diff --git a/clang/include/clang/CodeGen/BackendUtil.h b/clang/include/clang/CodeGen/BackendUtil.h index 92e0d13bf25b6..8b0d975a876e6 100644 --- a/clang/include/clang/CodeGen/BackendUtil.h +++ b/clang/include/clang/CodeGen/BackendUtil.h @@ -49,7 +49,7 @@ void EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts, llvm::MemoryBufferRef Buf); void EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts, - DiagnosticsEngine &Diags); + llvm::vfs::FileSystem &VFS, DiagnosticsEngine &Diags); } // namespace clang #endif diff --git a/clang/include/clang/CodeGen/ModuleBuilder.h b/clang/include/clang/CodeGen/ModuleBuilder.h index 59b9840d02e08..f1b8229edd362 100644 --- a/clang/include/clang/CodeGen/ModuleBuilder.h +++ b/clang/include/clang/CodeGen/ModuleBuilder.h @@ -52,6 +52,12 @@ namespace CodeGen { class CodeGenerator : public ASTConsumer { virtual void anchor(); +protected: + /// True if we've finished generating IR. This prevents us from generating + /// additional LLVM IR after emitting output in HandleTranslationUnit. This + /// can happen when Clang plugins trigger additional AST deserialization. + bool IRGenFinished = false; + public: /// Return an opaque reference to the CodeGenModule object, which can /// be used in various secondary APIs. It is valid as long as the diff --git a/clang/include/clang/Driver/CommonArgs.h b/clang/include/clang/Driver/CommonArgs.h index 1464ce4e1b31b..23426c0a3e02e 100644 --- a/clang/include/clang/Driver/CommonArgs.h +++ b/clang/include/clang/Driver/CommonArgs.h @@ -105,6 +105,16 @@ unsigned DwarfVersionNum(StringRef ArgValue); const llvm::opt::Arg *getDwarfNArg(const llvm::opt::ArgList &Args); unsigned getDwarfVersion(const ToolChain &TC, const llvm::opt::ArgList &Args); +enum class DwarfFissionKind { None, Split, Single }; + +DwarfFissionKind getDebugFissionKind(const Driver &D, + const llvm::opt::ArgList &Args, + llvm::opt::Arg *&Arg); + +bool checkDebugInfoOption(const llvm::opt::Arg *A, + const llvm::opt::ArgList &Args, const Driver &D, + const ToolChain &TC); + void AddAssemblerKPIC(const ToolChain &ToolChain, const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs); @@ -294,6 +304,11 @@ std::string complexRangeKindToStr(LangOptions::ComplexRangeKind Range); // Render a frontend option corresponding to ComplexRangeKind. std::string renderComplexRangeOption(LangOptions::ComplexRangeKind Range); +// Set the complex range and output a warning as needed. +void setComplexRange(const Driver &D, StringRef NewOpt, + LangOptions::ComplexRangeKind NewRange, StringRef &LastOpt, + LangOptions::ComplexRangeKind &Range); + } // end namespace tools } // end namespace driver } // end namespace clang diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 16e1c396fedbe..2ef609831637e 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -2750,6 +2750,9 @@ def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations"> Group; def fassociative_math : Flag<["-"], "fassociative-math">, Visibility<[ClangOption, FlangOption]>, Group; def fno_associative_math : Flag<["-"], "fno-associative-math">, Visibility<[ClangOption, FlangOption]>, Group; +def fno_fast_real_mod : Flag<["-"], "fno-fast-real-mod">, + Group, Visibility<[FlangOption, FC1Option]>, + HelpText<"Disable optimization of MOD for REAL types in presence of -ffast-math">; defm reciprocal_math : BoolFOption<"reciprocal-math", LangOpts<"AllowRecip">, DefaultFalse, PosFlag; -def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">; -def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">; -def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">; +def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">, + HelpText<"Allow the optimizer to discretely increase the number of " + "teams. May cause ignore environment variables that set " + "the number of teams to be ignored. The combination of " + "-fopenmp-assume-teams-oversubscription " + "and -fopenmp-assume-threads-oversubscription " + "may allow the conversion of loops into sequential code by " + "ensuring that each team/thread executes at most one iteration.">; +def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">, + HelpText<"Allow the optimizer to discretely increase the number of " + "threads. May cause ignore environment variables that set " + "the number of threads to be ignored. The combination of " + "-fopenmp-assume-teams-oversubscription " + "and -fopenmp-assume-threads-oversubscription " + "may allow the conversion of loops into sequential code by " + "ensuring that each team/thread executes at most one iteration.">; +def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">, + HelpText<"Do not assume teams oversubscription.">; +def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">, + HelpText<"Do not assume threads oversubscription.">; def fopenmp_assume_no_thread_state : Flag<["-"], "fopenmp-assume-no-thread-state">, HelpText<"Assert no thread in a parallel region modifies an ICV">, MarshallingInfoFlag>; @@ -4587,7 +4606,7 @@ defm ptrauth_block_descriptor_pointers : OptInCC1FFlag<"ptrauth-block-descriptor def fenable_matrix : Flag<["-"], "fenable-matrix">, Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Enable matrix data type and related builtin functions">, - MarshallingInfoFlag>; + MarshallingInfoFlag, hlsl.KeyPath>; defm raw_string_literals : BoolFOption<"raw-string-literals", LangOpts<"RawStringLiterals">, Default, @@ -4754,13 +4773,13 @@ defm column_info : BoolOption<"g", "column-info", PosFlag, BothFlags<[], [ClangOption, CLOption, DXCOption]>>, Group; def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group, - Visibility<[ClangOption, CLOption, DXCOption]>; + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group, - Visibility<[ClangOption, CLOption, DXCOption]>, + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>, HelpText<"Set DWARF fission mode">, Values<"split,single">; def gno_split_dwarf : Flag<["-"], "gno-split-dwarf">, Group, - Visibility<[ClangOption, CLOption, DXCOption]>; + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def gtemplate_alias : Flag<["-"], "gtemplate-alias">, Group, Visibility<[ClangOption, CC1Option]>; def gno_template_alias : Flag<["-"], "gno-template-alias">, Group, Visibility<[ClangOption]>; def gsimple_template_names : Flag<["-"], "gsimple-template-names">, Group; @@ -8405,7 +8424,7 @@ def main_file_name : Separate<["-"], "main-file-name">, MarshallingInfoString>; def split_dwarf_output : Separate<["-"], "split-dwarf-output">, HelpText<"File name to use for split dwarf debug info output">, - Visibility<[CC1Option, CC1AsOption]>, + Visibility<[CC1Option, CC1AsOption, FC1Option]>, MarshallingInfoString>; let Visibility = [CC1Option, FC1Option] in { @@ -8437,6 +8456,10 @@ def dependent_lib : Joined<["--"], "dependent-lib=">, HelpText<"Add dependent library">, MarshallingInfoStringVector>; +def split_dwarf_file : Separate<["-"], "split-dwarf-file">, + HelpText<"Name of the split dwarf debug info file to encode in the object file">, + MarshallingInfoString>; + } // let Visibility = [CC1Option, FC1Option] let Visibility = [CC1Option] in { @@ -8447,9 +8470,6 @@ def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">, def fexternc_nounwind : Flag<["-"], "fexternc-nounwind">, HelpText<"Assume all functions with C linkage do not unwind">, MarshallingInfoFlag>; -def split_dwarf_file : Separate<["-"], "split-dwarf-file">, - HelpText<"Name of the split dwarf debug info file to encode in the object file">, - MarshallingInfoString>; def fno_wchar : Flag<["-"], "fno-wchar">, HelpText<"Disable C++ builtin type wchar_t">, MarshallingInfoNegativeFlag, cplusplus.KeyPath>, @@ -9456,7 +9476,7 @@ def target_profile : DXCJoinedOrSeparate<"T">, MetaVarName<"">, "lib_6_3, lib_6_4, lib_6_5, lib_6_6, lib_6_7, lib_6_x," "ms_6_5, ms_6_6, ms_6_7," "as_6_5, as_6_6, as_6_7," - "rootsig_1_0, rootsig_1_1">; + "rootsig_1_0, rootsig_1_1, rootsig_1_2">; def emit_pristine_llvm : DXCFlag<"emit-pristine-llvm">, HelpText<"Emit pristine LLVM IR from the frontend by not running any LLVM passes at all." "Same as -S + -emit-llvm + -disable-llvm-passes.">; @@ -9469,9 +9489,9 @@ def fdx_rootsignature_version : Group, Visibility<[ClangOption, CC1Option]>, HelpText<"Root Signature Version">, - Values<"rootsig_1_0,rootsig_1_1">, + Values<"rootsig_1_0,rootsig_1_1,rootsig_1_2">, NormalizedValuesScope<"llvm::dxbc::RootSignatureVersion">, - NormalizedValues<["V1_0", "V1_1"]>, + NormalizedValues<["V1_0", "V1_1", "V1_2"]>, MarshallingInfoEnum, "V1_1">; def dxc_rootsig_ver : Separate<["/", "-"], "force-rootsig-ver">, diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h index a6b6993b708d0..44fff69c217c5 100644 --- a/clang/include/clang/Frontend/CompilerInstance.h +++ b/clang/include/clang/Frontend/CompilerInstance.h @@ -712,12 +712,10 @@ class CompilerInstance : public ModuleLoader { const CodeGenOptions *CodeGenOpts = nullptr); /// Create the file manager and replace any existing one with it. - /// - /// \return The new file manager on success, or null on failure. - FileManager *createFileManager(); + void createFileManager(); /// Create the source manager and replace any existing one with it. - void createSourceManager(FileManager &FileMgr); + void createSourceManager(); /// Create the preprocessor, using the invocation, file, and source managers, /// and replace any existing one with it. diff --git a/clang/include/clang/Frontend/Utils.h b/clang/include/clang/Frontend/Utils.h index f86c2f5074de0..49fd920d1ec43 100644 --- a/clang/include/clang/Frontend/Utils.h +++ b/clang/include/clang/Frontend/Utils.h @@ -143,8 +143,9 @@ class ModuleDependencyCollector : public DependencyCollector { std::error_code copyToRoot(StringRef Src, StringRef Dst = {}); public: - ModuleDependencyCollector(std::string DestDir) - : DestDir(std::move(DestDir)) {} + ModuleDependencyCollector(std::string DestDir, + IntrusiveRefCntPtr VFS) + : DestDir(std::move(DestDir)), Canonicalizer(std::move(VFS)) {} ~ModuleDependencyCollector() override { writeFileMap(); } StringRef getDest() { return DestDir; } diff --git a/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def b/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def index a5cfeb34b2b51..1d7f7adbe076f 100644 --- a/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def +++ b/clang/include/clang/Lex/HLSLRootSignatureTokenKinds.def @@ -65,6 +65,9 @@ #ifndef STATIC_BORDER_COLOR_ENUM #define STATIC_BORDER_COLOR_ENUM(NAME, LIT) ENUM(NAME, LIT) #endif +#ifndef STATIC_SAMPLER_FLAG_ENUM +#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) ENUM(NAME, LIT) +#endif // General Tokens: TOK(invalid, "invalid identifier") @@ -228,6 +231,10 @@ STATIC_BORDER_COLOR_ENUM(OpaqueWhite, "STATIC_BORDER_COLOR_OPAQUE_WHITE") STATIC_BORDER_COLOR_ENUM(OpaqueBlackUint, "STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT") STATIC_BORDER_COLOR_ENUM(OpaqueWhiteUint, "STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT") +// Root Descriptor Flag Enums: +STATIC_SAMPLER_FLAG_ENUM(UintBorderColor, "UINT_BORDER_COLOR") +STATIC_SAMPLER_FLAG_ENUM(NonNormalizedCoordinates, "NON_NORMALIZED_COORDINATES") + #undef STATIC_BORDER_COLOR_ENUM #undef COMPARISON_FUNC_ENUM #undef TEXTURE_ADDRESS_MODE_ENUM @@ -237,6 +244,7 @@ STATIC_BORDER_COLOR_ENUM(OpaqueWhiteUint, "STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT #undef DESCRIPTOR_RANGE_FLAG_ENUM_OFF #undef DESCRIPTOR_RANGE_FLAG_ENUM_ON #undef ROOT_DESCRIPTOR_FLAG_ENUM +#undef STATIC_SAMPLER_FLAG_ENUM #undef ROOT_FLAG_ENUM #undef DESCRIPTOR_RANGE_OFFSET_ENUM #undef UNBOUNDED_ENUM diff --git a/clang/include/clang/Parse/ParseHLSLRootSignature.h b/clang/include/clang/Parse/ParseHLSLRootSignature.h index b06846fd83c09..8f91d7cd7b031 100644 --- a/clang/include/clang/Parse/ParseHLSLRootSignature.h +++ b/clang/include/clang/Parse/ParseHLSLRootSignature.h @@ -130,6 +130,7 @@ class RootSignatureParser { std::optional MaxLOD; std::optional Space; std::optional Visibility; + std::optional Flags; }; std::optional parseStaticSamplerParams(); @@ -153,6 +154,8 @@ class RootSignatureParser { parseRootDescriptorFlags(RootSignatureToken::Kind Context); std::optional parseDescriptorRangeFlags(RootSignatureToken::Kind Context); + std::optional + parseStaticSamplerFlags(RootSignatureToken::Kind Context); /// Use NumericLiteralParser to convert CurToken.NumSpelling into a unsigned /// 32-bit integer diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index 30edd303e1824..e301cf1080977 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -6767,6 +6767,9 @@ class Parser : public CodeCompletionHandler { OpenMPClauseKind Kind, bool ParseOnly); + /// Parses the 'looprange' clause of a '#pragma omp fuse' directive. + OMPClause *ParseOpenMPLoopRangeClause(); + /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); diff --git a/clang/include/clang/Sema/HLSLExternalSemaSource.h b/clang/include/clang/Sema/HLSLExternalSemaSource.h index d93fb8c8eef6b..049fc7b8fe3f2 100644 --- a/clang/include/clang/Sema/HLSLExternalSemaSource.h +++ b/clang/include/clang/Sema/HLSLExternalSemaSource.h @@ -44,6 +44,7 @@ class HLSLExternalSemaSource : public ExternalSemaSource { private: void defineTrivialHLSLTypes(); void defineHLSLVectorAlias(); + void defineHLSLMatrixAlias(); void defineHLSLTypesWithForwardDeclarations(); void onCompletion(CXXRecordDecl *Record, CompletionFunction Fn); }; diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index d017d1f829015..f53aafdeb4f36 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -503,7 +503,6 @@ enum class FormatStringType { FreeBSDKPrintf, OSTrace, OSLog, - Syslog, Unknown }; @@ -11715,6 +11714,23 @@ class Sema final : public SemaBase { const TemplateArgumentListInfo *TemplateArgs, bool IsAddressOfOperand); + UnsignedOrNone getPackIndex(TemplateArgument Pack) const { + return Pack.pack_size() - 1 - *ArgPackSubstIndex; + } + + TemplateArgument + getPackSubstitutedTemplateArgument(TemplateArgument Arg) const { + Arg = Arg.pack_elements()[*ArgPackSubstIndex]; + if (Arg.isPackExpansion()) + Arg = Arg.getPackExpansionPattern(); + return Arg; + } + + ExprResult BuildSubstNonTypeTemplateParmExpr( + Decl *AssociatedDecl, const NonTypeTemplateParmDecl *NTTP, + SourceLocation loc, TemplateArgument Replacement, + UnsignedOrNone PackIndex, bool Final); + /// Form a template name from a name that is syntactically required to name a /// template, either due to use of the 'template' keyword or because a name in /// this syntactic context is assumed to name a template (C++ @@ -13319,8 +13335,6 @@ class Sema final : public SemaBase { Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; - bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, - SourceRange InstantiationRange); InstantiatingTemplate(Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, @@ -13513,7 +13527,7 @@ class Sema final : public SemaBase { ~ArgPackSubstIndexRAII() { Self.ArgPackSubstIndex = OldSubstIndex; } }; - void pushCodeSynthesisContext(CodeSynthesisContext Ctx); + bool pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); void PrintContextStack(InstantiationContextDiagFuncRef DiagFunc) { diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h index c0fd7a6d63611..daf58b18a03cb 100644 --- a/clang/include/clang/Sema/SemaOpenMP.h +++ b/clang/include/clang/Sema/SemaOpenMP.h @@ -463,6 +463,13 @@ class SemaOpenMP : public SemaBase { Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); + + /// Called on well-formed '#pragma omp fuse' after parsing of its + /// clauses and the associated statement. + StmtResult ActOnOpenMPFuseDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult @@ -921,6 +928,12 @@ class SemaOpenMP : public SemaBase { SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); + + /// Called on well-form 'looprange' clause after parsing its arguments. + OMPClause * + ActOnOpenMPLoopRangeClause(Expr *First, Expr *Count, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation FirstLoc, + SourceLocation CountLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, @@ -1485,7 +1498,81 @@ class SemaOpenMP : public SemaBase { bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl &LoopHelpers, - Stmt *&Body, SmallVectorImpl> &OriginalInits); + Stmt *&Body, SmallVectorImpl> &OriginalInits); + + /// Holds the result of the analysis of a (possibly canonical) loop. + struct LoopAnalysis { + /// The analyzed loop or loop transformation. + Stmt *AStmt = nullptr; + /// Loop analyses results. + OMPLoopBasedDirective::HelperExprs HelperExprs; + /// The for-statement of the loop. TheForStmt equals AStmt only when the + /// latter is a canonical loop (i.e. not a loop transformation). + Stmt *TheForStmt = nullptr; + /// Initialization statements before transformations. + SmallVector OriginalInits; + /// Initialization statements required after transformation of this loop. + SmallVector TransformsPreInits; + + explicit LoopAnalysis(Stmt *S) : AStmt(S) {} + + bool isRegularLoop() const { return isRegularLoop(AStmt); } + bool isLoopTransformation() const { return isLoopTransformation(AStmt); } + + // Convenience functions used when building LoopSequenceAnalysis. + static bool isRegularLoop(Stmt *S) { + return isa(S); + } + static bool isLoopTransformation(Stmt *S) { + return isa(S); + } + }; + + /// Holds the result of the analysis of a (possibly canonical) loop sequence. + struct LoopSequenceAnalysis { + /// Number of top level canonical loops. + unsigned LoopSeqSize = 0; + /// For each loop results of the analysis. + SmallVector Loops; + /// Additional code required before entering the transformed loop sequence. + SmallVector LoopSequencePreInits; + + // Convenience function used when building the LoopSequenceAnalysis. + static bool isLoopSequenceDerivation(Stmt *S) { + return LoopAnalysis::isRegularLoop(S) || + LoopAnalysis::isLoopTransformation(S); + } + }; + + /// The main recursive process of `checkTransformableLoopSequence` that + /// performs grammatical parsing of a canonical loop sequence. It extracts + /// key information, such as the number of top-level loops, loop statements, + /// helper expressions, and other relevant loop-related data, all in a single + /// execution to avoid redundant traversals. This analysis flattens inner + /// Loop Sequences + /// + /// \param LoopSeqStmt The AST of the original statement. + /// \param SeqAnalysis [out] Result of the analysis of \p LoopSeqStmt + /// \param Context + /// \param Kind The loop transformation directive kind. + /// \return Whether the original statement is both syntactically and + /// semantically correct according to OpenMP 6.0 canonical loop + /// sequence definition. + bool analyzeLoopSequence(Stmt *LoopSeqStmt, LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context, OpenMPDirectiveKind Kind); + + /// Validates and checks whether a loop sequence can be transformed according + /// to the given directive, providing necessary setup and initialization + /// (Driver function) before recursion using `analyzeLoopSequence`. + /// + /// \param Kind The loop transformation directive kind. + /// \param AStmt The AST of the original statement + /// \param SeqAnalysis [out] Result of the analysis of \p LoopSeqStmt + /// \param Context + /// \return Whether there was an absence of errors or not + bool checkTransformableLoopSequence(OpenMPDirectiveKind Kind, Stmt *AStmt, + LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h index 441047d64f48c..5d09d5536e5ab 100644 --- a/clang/include/clang/Serialization/ASTBitCodes.h +++ b/clang/include/clang/Serialization/ASTBitCodes.h @@ -1160,7 +1160,7 @@ enum PredefinedTypeIDs { /// /// Type IDs for non-predefined types will start at /// NUM_PREDEF_TYPE_IDs. -const unsigned NUM_PREDEF_TYPE_IDS = 513; +const unsigned NUM_PREDEF_TYPE_IDS = 514; // Ensure we do not overrun the predefined types we reserved // in the enum PredefinedTypeIDs above. @@ -1951,6 +1951,7 @@ enum StmtCode { STMT_OMP_UNROLL_DIRECTIVE, STMT_OMP_REVERSE_DIRECTIVE, STMT_OMP_INTERCHANGE_DIRECTIVE, + STMT_OMP_FUSE_DIRECTIVE, STMT_OMP_FOR_DIRECTIVE, STMT_OMP_FOR_SIMD_DIRECTIVE, STMT_OMP_SECTIONS_DIRECTIVE, diff --git a/clang/include/clang/Serialization/ModuleCache.h b/clang/include/clang/Serialization/ModuleCache.h index 3117d954a09cc..ec052c5c18e0a 100644 --- a/clang/include/clang/Serialization/ModuleCache.h +++ b/clang/include/clang/Serialization/ModuleCache.h @@ -45,11 +45,15 @@ class ModuleCache : public RefCountedBase { /// were validated. virtual void updateModuleTimestamp(StringRef ModuleFilename) = 0; + /// Prune module files that haven't been accessed in a long time. + virtual void maybePrune(StringRef Path, time_t PruneInterval, + time_t PruneAfter) = 0; + /// Returns this process's view of the module cache. virtual InMemoryModuleCache &getInMemoryModuleCache() = 0; virtual const InMemoryModuleCache &getInMemoryModuleCache() const = 0; - // TODO: Virtualize writing/reading PCM files, pruning, etc. + // TODO: Virtualize writing/reading PCM files, etc. virtual ~ModuleCache() = default; }; @@ -59,6 +63,9 @@ class ModuleCache : public RefCountedBase { /// \c CompilerInstance instances participating in building modules for single /// translation unit in order to share the same \c InMemoryModuleCache. IntrusiveRefCntPtr createCrossProcessModuleCache(); + +/// Shared implementation of `ModuleCache::maybePrune()`. +void maybePruneImpl(StringRef Path, time_t PruneInterval, time_t PruneAfter); } // namespace clang #endif diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h index 5dcf03f7a4648..c233ca1af0256 100644 --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h @@ -1414,7 +1414,7 @@ class CallEventManager { } public: - CallEventManager(llvm::BumpPtrAllocator &alloc) : Alloc(alloc) {} + CallEventManager(llvm::BumpPtrAllocator &alloc); /// Gets an outside caller given a callee context. CallEventRef<> getCaller(const StackFrameContext *CalleeCtx, diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h index c3601a4e73e1f..f222ded8a966a 100644 --- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h +++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h @@ -220,7 +220,6 @@ class FullDependencyConsumer : public DependencyConsumer { std::vector VisibleModules; std::vector Commands; std::string ContextHash; - std::vector OutputPaths; const llvm::DenseSet &AlreadySeen; }; diff --git a/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h b/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h index 3234b0976a8e7..ed2aa55c99279 100644 --- a/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h +++ b/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h @@ -37,11 +37,11 @@ namespace internal { template struct HasHandle { private: template - static auto check(ClassT *) -> typename std::is_same< - decltype(std::declval().visit( - std::declval(), - *std::declval *>())), - void>::type; + static auto check(ClassT *) + -> std::is_same().visit( + std::declval(), + *std::declval *>())), + void>; template static std::false_type check(...); diff --git a/clang/lib/AST/APValue.cpp b/clang/lib/AST/APValue.cpp index 7173c2a0e1a2a..2e1c8eb3726cf 100644 --- a/clang/lib/AST/APValue.cpp +++ b/clang/lib/AST/APValue.cpp @@ -784,7 +784,7 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, if (!O.isZero()) { if (IsReference) Out << "*("; - if (S.isZero() || O % S) { + if (S.isZero() || !O.isMultipleOf(S)) { Out << "(char*)"; S = CharUnits::One(); } diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 97c59b2ceec2f..056bfe36b2a0a 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -3501,6 +3501,7 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx, case BuiltinType::VectorQuad: case BuiltinType::VectorPair: case BuiltinType::DMR1024: + case BuiltinType::DMR2048: OS << "?"; return; @@ -4567,6 +4568,10 @@ QualType ASTContext::getWebAssemblyExternrefType() const { /// type. QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, unsigned NumFields) const { + auto K = llvm::ScalableVecTyKey{EltTy, NumElts, NumFields}; + if (auto It = ScalableVecTyMap.find(K); It != ScalableVecTyMap.end()) + return It->second; + if (Target->hasAArch64ACLETypes()) { uint64_t EltTySize = getTypeSize(EltTy); @@ -4575,29 +4580,29 @@ QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \ EltTy->hasSignedIntegerRepresentation() == IsSigned && \ EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \ ElBits, NF) \ if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \ ElBits, NF) \ if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \ ElBits, NF) \ if (EltTy->isMFloat8Type() && EltTySize == ElBits && \ NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \ if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \ - return SingletonId; + return ScalableVecTyMap[K] = SingletonId; #include "clang/Basic/AArch64ACLETypes.def" } else if (Target->hasRISCVVTypes()) { uint64_t EltTySize = getTypeSize(EltTy); @@ -4611,10 +4616,10 @@ QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ IsBF && !IsFP)) && \ EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ - return SingletonId; + return ScalableVecTyMap[K] = SingletonId; #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ if (EltTy->isBooleanType() && NumElts == NumEls) \ - return SingletonId; + return ScalableVecTyMap[K] = SingletonId; #include "clang/Basic/RISCVVTypes.def" } return QualType(); @@ -5869,8 +5874,14 @@ ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) { QualType Canon; TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); - if (!CanonArgPack.structurallyEquals(ArgPack)) + if (!CanonArgPack.structurallyEquals(ArgPack)) { Canon = getSubstBuiltinTemplatePack(CanonArgPack); + // Refresh InsertPos, in case the recursive call above caused rehashing, + // which would invalidate the bucket pointer. + [[maybe_unused]] const auto *Nothing = + SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!Nothing); + } auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType)) SubstBuiltinTemplatePackType(Canon, ArgPack); @@ -12580,6 +12591,10 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, Type = Context.AMDGPUBufferRsrcTy; break; } + case 't': { + Type = Context.AMDGPUTextureTy; + break; + } default: llvm_unreachable("Unexpected target builtin type"); } diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 7518cfd2cf94d..c71fd22fe9d7e 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -540,7 +540,8 @@ bool Compiler::VisitCastExpr(const CastExpr *CE) { if (const auto *IL = dyn_cast(SubExpr)) { if (ToT != PT_IntAP && ToT != PT_IntAPS && FromT != PT_IntAP && FromT != PT_IntAPS && !CE->getType()->isEnumeralType()) - return this->emitConst(IL->getValue(), CE); + return this->emitConst(APSInt(IL->getValue(), !isSignedType(*FromT)), + CE); if (!this->emitConst(IL->getValue(), SubExpr)) return false; } else { @@ -2383,13 +2384,8 @@ bool Compiler::VisitMemberExpr(const MemberExpr *E) { return this->visitDeclRef(Member, E); } - if (Initializing) { - if (!this->delegate(Base)) - return false; - } else { - if (!this->visit(Base)) - return false; - } + if (!this->visit(Base)) + return false; // Base above gives us a pointer on the stack. const auto *FD = cast(Member); @@ -2939,8 +2935,9 @@ bool Compiler::VisitMaterializeTemporaryExpr( // For everyhing else, use local variables. if (SubExprT) { bool IsConst = SubExpr->getType().isConstQualified(); - unsigned LocalIndex = - allocateLocalPrimitive(E, *SubExprT, IsConst, E->getExtendingDecl()); + bool IsVolatile = SubExpr->getType().isVolatileQualified(); + unsigned LocalIndex = allocateLocalPrimitive( + E, *SubExprT, IsConst, IsVolatile, E->getExtendingDecl()); if (!this->visit(SubExpr)) return false; if (!this->emitSetLocal(*SubExprT, LocalIndex, E)) @@ -4457,6 +4454,9 @@ bool Compiler::visitAssignment(const Expr *LHS, const Expr *RHS, if (!this->visit(LHS)) return false; + if (LHS->getType().isVolatileQualified()) + return this->emitInvalidStore(LHS->getType().getTypePtr(), E); + // We don't support assignments in C. if (!Ctx.getLangOpts().CPlusPlus && !this->emitInvalid(E)) return false; @@ -4542,7 +4542,14 @@ bool Compiler::emitConst(T Value, const Expr *E) { template bool Compiler::emitConst(const APSInt &Value, PrimType Ty, const Expr *E) { - return this->emitConst(static_cast(Value), Ty, E); + if (Ty == PT_IntAPS) + return this->emitConstIntAPS(Value, E); + if (Ty == PT_IntAP) + return this->emitConstIntAP(Value, E); + + if (Value.isSigned()) + return this->emitConst(Value.getSExtValue(), Ty, E); + return this->emitConst(Value.getZExtValue(), Ty, E); } template @@ -4565,13 +4572,14 @@ bool Compiler::emitConst(const APSInt &Value, const Expr *E) { template unsigned Compiler::allocateLocalPrimitive( - DeclTy &&Src, PrimType Ty, bool IsConst, const ValueDecl *ExtendingDecl, - ScopeKind SC, bool IsConstexprUnknown) { + DeclTy &&Src, PrimType Ty, bool IsConst, bool IsVolatile, + const ValueDecl *ExtendingDecl, ScopeKind SC, bool IsConstexprUnknown) { // FIXME: There are cases where Src.is() is wrong, e.g. // (int){12} in C. Consider using Expr::isTemporaryObject() instead // or isa(). Descriptor *D = P.createDescriptor(Src, Ty, nullptr, Descriptor::InlineDescMD, - IsConst, isa(Src)); + IsConst, isa(Src), + /*IsMutable=*/false, IsVolatile); D->IsConstexprUnknown = IsConstexprUnknown; Scope::Local Local = this->createLocal(D); if (auto *VD = dyn_cast_if_present(Src.dyn_cast())) @@ -4879,7 +4887,8 @@ Compiler::visitVarDecl(const VarDecl *VD, const Expr *Init, if (VarT) { unsigned Offset = this->allocateLocalPrimitive( - VD, *VarT, VD->getType().isConstQualified(), nullptr, ScopeKind::Block, + VD, *VarT, VD->getType().isConstQualified(), + VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block, IsConstexprUnknown); if (Init) { // If this is a toplevel declaration, create a scope for the diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h index 09599b3547888..5c46f75af4da3 100644 --- a/clang/lib/AST/ByteCode/Compiler.h +++ b/clang/lib/AST/ByteCode/Compiler.h @@ -327,6 +327,7 @@ class Compiler : public ConstStmtVisitor, bool>, /// Creates a local primitive value. unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst, + bool IsVolatile = false, const ValueDecl *ExtendingDecl = nullptr, ScopeKind SC = ScopeKind::Block, bool IsConstexprUnknown = false); diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp index cfda6e8ded760..683e916391337 100644 --- a/clang/lib/AST/ByteCode/Context.cpp +++ b/clang/lib/AST/ByteCode/Context.cpp @@ -18,6 +18,7 @@ #include "clang/AST/ASTLambda.h" #include "clang/AST/Expr.h" #include "clang/Basic/TargetInfo.h" +#include "llvm/Support/SystemZ/zOSSupport.h" using namespace clang; using namespace clang::interp; @@ -236,6 +237,52 @@ bool Context::evaluateCharRange(State &Parent, const Expr *SizeExpr, return evaluateStringRepr(Parent, SizeExpr, PtrExpr, Result); } +bool Context::evaluateString(State &Parent, const Expr *E, + std::string &Result) { + assert(Stk.empty()); + Compiler C(*this, *P, Parent, Stk); + + auto PtrRes = C.interpretAsPointer(E, [&](const Pointer &Ptr) { + const Descriptor *FieldDesc = Ptr.getFieldDesc(); + if (!FieldDesc->isPrimitiveArray()) + return false; + + if (!Ptr.isConst()) + return false; + + unsigned N = Ptr.getNumElems(); + + if (Ptr.elemSize() == 1 /* bytes */) { + const char *Chars = reinterpret_cast(Ptr.getRawAddress()); + unsigned Length = strnlen(Chars, N); + // Wasn't null terminated. + if (N == Length) + return false; + Result.assign(Chars, Length); + return true; + } + + PrimType ElemT = FieldDesc->getPrimType(); + for (unsigned I = Ptr.getIndex(); I != N; ++I) { + INT_TYPE_SWITCH(ElemT, { + auto Elem = Ptr.elem(I); + if (Elem.isZero()) + return true; + Result.push_back(static_cast(Elem)); + }); + } + // We didn't find a 0 byte. + return false; + }); + + if (PtrRes.isInvalid()) { + C.cleanup(); + Stk.clear(); + return false; + } + return true; +} + bool Context::evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result) { assert(Stk.empty()); Compiler C(*this, *P, Parent, Stk); @@ -245,6 +292,9 @@ bool Context::evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result) { if (!FieldDesc->isPrimitiveArray()) return false; + if (Ptr.isDummy() || Ptr.isUnknownSizeArray()) + return false; + unsigned N = Ptr.getNumElems(); if (Ptr.elemSize() == 1) { Result = strnlen(reinterpret_cast(Ptr.getRawAddress()), N); @@ -517,9 +567,15 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) { // Assign descriptors to all parameters. // Composite objects are lowered to pointers. for (const ParmVarDecl *PD : FuncDecl->parameters()) { + bool IsConst = PD->getType().isConstQualified(); + bool IsVolatile = PD->getType().isVolatileQualified(); + OptPrimType T = classify(PD->getType()); PrimType PT = T.value_or(PT_Ptr); - Descriptor *Desc = P->createDescriptor(PD, PT); + Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt, + IsConst, /*IsTemporary=*/false, + /*IsMutable=*/false, IsVolatile); + ParamDescriptors.insert({ParamOffset, {PT, Desc}}); ParamOffsets.push_back(ParamOffset); ParamOffset += align(primSize(PT)); @@ -545,9 +601,14 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) { // Assign descriptors to all parameters. // Composite objects are lowered to pointers. for (const ParmVarDecl *PD : BD->parameters()) { + bool IsConst = PD->getType().isConstQualified(); + bool IsVolatile = PD->getType().isVolatileQualified(); + OptPrimType T = classify(PD->getType()); PrimType PT = T.value_or(PT_Ptr); - Descriptor *Desc = P->createDescriptor(PD, PT); + Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt, + IsConst, /*IsTemporary=*/false, + /*IsMutable=*/false, IsVolatile); ParamDescriptors.insert({ParamOffset, {PT, Desc}}); ParamOffsets.push_back(ParamOffset); ParamOffset += align(primSize(PT)); diff --git a/clang/lib/AST/ByteCode/Context.h b/clang/lib/AST/ByteCode/Context.h index 280a31725555f..f5fa977cbcad8 100644 --- a/clang/lib/AST/ByteCode/Context.h +++ b/clang/lib/AST/ByteCode/Context.h @@ -67,6 +67,10 @@ class Context final { bool evaluateCharRange(State &Parent, const Expr *SizeExpr, const Expr *PtrExpr, std::string &Result); + /// Evaluate \param E and if it can be evaluated to a null-terminated string, + /// copy the result into \param Result. + bool evaluateString(State &Parent, const Expr *E, std::string &Result); + /// Evalute \param E and if it can be evaluated to a string literal, /// run strlen() on it. bool evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result); diff --git a/clang/lib/AST/ByteCode/EvalEmitter.h b/clang/lib/AST/ByteCode/EvalEmitter.h index e81ea67adf97a..a9f87db5d7f8d 100644 --- a/clang/lib/AST/ByteCode/EvalEmitter.h +++ b/clang/lib/AST/ByteCode/EvalEmitter.h @@ -16,6 +16,7 @@ #include "EvaluationResult.h" #include "InterpState.h" #include "PrimType.h" +#include "Record.h" #include "Source.h" namespace clang { diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 0f322f6ed42ac..21af3d6ac7f90 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -889,6 +889,8 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { return false; if (!CheckConst(S, OpPC, Ptr)) return false; + if (!CheckVolatile(S, OpPC, Ptr, AK_Assign)) + return false; if (!S.inConstantContext() && isConstexprUnknown(Ptr)) return false; return true; @@ -1027,8 +1029,8 @@ static bool CheckCallDepth(InterpState &S, CodePtr OpPC) { return true; } -bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) { - if (!This.isZero()) +bool CheckThis(InterpState &S, CodePtr OpPC) { + if (S.Current->hasThisPointer()) return true; const Expr *E = S.Current->getExpr(OpPC); @@ -1198,8 +1200,8 @@ static bool runRecordDestructor(InterpState &S, CodePtr OpPC, const Record *R = Desc->ElemRecord; assert(R); - if (Pointer::pointToSameBlock(BasePtr, S.Current->getThis()) && - S.Current->getFunction()->isDestructor()) { + if (S.Current->hasThisPointer() && S.Current->getFunction()->isDestructor() && + Pointer::pointToSameBlock(BasePtr, S.Current->getThis())) { const SourceInfo &Loc = S.Current->getSource(OpPC); S.FFDiag(Loc, diag::note_constexpr_double_destroy); return false; diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index b3b4b998439cc..bb0c4580b14a9 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -104,7 +104,7 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr); bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr); /// Checks the 'this' pointer. -bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This); +bool CheckThis(InterpState &S, CodePtr OpPC); /// Checks if dynamic memory allocation is available in the current /// language mode. @@ -1440,9 +1440,9 @@ template ::T> bool GetThisField(InterpState &S, CodePtr OpPC, uint32_t I) { if (S.checkingPotentialConstantExpression()) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); const Pointer &Field = This.atField(I); if (!CheckLoad(S, OpPC, Field)) return false; @@ -1454,10 +1454,10 @@ template ::T> bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) { if (S.checkingPotentialConstantExpression()) return false; + if (!CheckThis(S, OpPC)) + return false; const T &Value = S.Stk.pop(); const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) - return false; const Pointer &Field = This.atField(I); if (!CheckStore(S, OpPC, Field)) return false; @@ -1560,9 +1560,9 @@ template ::T> bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) { if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); const Pointer &Field = This.atField(I); assert(Field.canBeInitialized()); Field.deref() = S.Stk.pop(); @@ -1574,9 +1574,9 @@ template ::T> bool InitThisFieldActivate(InterpState &S, CodePtr OpPC, uint32_t I) { if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); const Pointer &Field = This.atField(I); assert(Field.canBeInitialized()); Field.deref() = S.Stk.pop(); @@ -1593,9 +1593,9 @@ bool InitThisBitField(InterpState &S, CodePtr OpPC, const Record::Field *F, assert(F->isBitField()); if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); const Pointer &Field = This.atField(FieldOffset); assert(Field.canBeInitialized()); const auto &Value = S.Stk.pop(); @@ -1610,9 +1610,9 @@ bool InitThisBitFieldActivate(InterpState &S, CodePtr OpPC, assert(F->isBitField()); if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); const Pointer &Field = This.atField(FieldOffset); assert(Field.canBeInitialized()); const auto &Value = S.Stk.pop(); @@ -1730,9 +1730,8 @@ inline bool GetPtrLocal(InterpState &S, CodePtr OpPC, uint32_t I) { } inline bool GetPtrParam(InterpState &S, CodePtr OpPC, uint32_t I) { - if (S.checkingPotentialConstantExpression()) { + if (S.Current->isBottomFrame()) return false; - } S.Stk.push(S.Current->getParamPointer(I)); return true; } @@ -1750,9 +1749,9 @@ bool GetPtrFieldPop(InterpState &S, CodePtr OpPC, uint32_t Off); inline bool GetPtrThisField(InterpState &S, CodePtr OpPC, uint32_t Off) { if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); S.Stk.push(This.atField(Off)); return true; } @@ -1844,9 +1843,9 @@ inline bool GetMemberPtrBasePop(InterpState &S, CodePtr OpPC, int32_t Off) { inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) { if (S.checkingPotentialConstantExpression()) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); S.Stk.push(This.atField(Off)); return true; } @@ -1925,10 +1924,10 @@ inline bool GetPtrThisVirtBase(InterpState &S, CodePtr OpPC, assert(D); if (S.checkingPotentialConstantExpression()) return false; - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; - return VirtBaseHelper(S, OpPC, D, S.Current->getThis()); + const Pointer &This = S.Current->getThis(); + return VirtBaseHelper(S, OpPC, D, This); } //===----------------------------------------------------------------------===// @@ -1991,6 +1990,8 @@ static inline bool Activate(InterpState &S, CodePtr OpPC) { static inline bool ActivateThisField(InterpState &S, CodePtr OpPC, uint32_t I) { if (S.checkingPotentialConstantExpression()) return false; + if (!S.Current->hasThisPointer()) + return false; const Pointer &Ptr = S.Current->getThis(); assert(Ptr.atField(I).canBeInitialized()); @@ -2124,10 +2125,10 @@ bool InitElem(InterpState &S, CodePtr OpPC, uint32_t Idx) { const T &Value = S.Stk.pop(); const Pointer &Ptr = S.Stk.peek(); - if (Ptr.isUnknownSizeArray()) + const Descriptor *Desc = Ptr.getFieldDesc(); + if (Desc->isUnknownSizeArray()) return false; - const Descriptor *Desc = Ptr.getFieldDesc(); // In the unlikely event that we're initializing the first item of // a non-array, skip the atIndex(). if (Idx == 0 && !Desc->isArray()) { @@ -2158,10 +2159,10 @@ bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) { const T &Value = S.Stk.pop(); const Pointer &Ptr = S.Stk.pop(); - if (Ptr.isUnknownSizeArray()) + const Descriptor *Desc = Ptr.getFieldDesc(); + if (Desc->isUnknownSizeArray()) return false; - const Descriptor *Desc = Ptr.getFieldDesc(); // In the unlikely event that we're initializing the first item of // a non-array, skip the atIndex(). if (Idx == 0 && !Desc->isArray()) { @@ -2813,13 +2814,11 @@ inline bool IsNonNull(InterpState &S, CodePtr OpPC) { inline bool This(InterpState &S, CodePtr OpPC) { // Cannot read 'this' in this mode. - if (S.checkingPotentialConstantExpression()) { + if (S.checkingPotentialConstantExpression()) return false; - } - - const Pointer &This = S.Current->getThis(); - if (!CheckThis(S, OpPC, This)) + if (!CheckThis(S, OpPC)) return false; + const Pointer &This = S.Current->getThis(); // Ensure the This pointer has been cast to the correct base. if (!This.isDummy()) { @@ -3344,6 +3343,18 @@ inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind, return false; } +inline bool InvalidStore(InterpState &S, CodePtr OpPC, const Type *T) { + if (S.getLangOpts().CPlusPlus) { + QualType VolatileType = QualType(T, 0).withVolatile(); + S.FFDiag(S.Current->getSource(OpPC), + diag::note_constexpr_access_volatile_type) + << AK_Assign << VolatileType; + } else { + S.FFDiag(S.Current->getSource(OpPC)); + } + return false; +} + inline bool InvalidDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR, bool InitializerFailed) { assert(DR); @@ -3534,6 +3545,9 @@ inline bool AllocCN(InterpState &S, CodePtr OpPC, const Descriptor *ElementDesc, if (!CheckDynamicMemoryAllocation(S, OpPC)) return false; + if (!ElementDesc) + return false; + SizeT NumElements = S.Stk.pop(); if (!CheckArraySize(S, OpPC, &NumElements, ElementDesc->getSize(), IsNoThrow)) { diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h index ea9f44c38842e..9b3dadca6cc14 100644 --- a/clang/lib/AST/ByteCode/InterpBlock.h +++ b/clang/lib/AST/ByteCode/InterpBlock.h @@ -115,9 +115,10 @@ class Block final { return reinterpret_cast(this) + sizeof(Block); } - template T deref() const { + template const T &deref() const { return *reinterpret_cast(data()); } + template T &deref() { return *reinterpret_cast(data()); } /// Invokes the constructor. void invokeCtor() { diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 4b259dab000b1..a2e97fcafdfef 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -563,9 +563,9 @@ static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, case Builtin::BI__builtin_islessequal: return LHS <= RHS; case Builtin::BI__builtin_islessgreater: { - ComparisonCategoryResult cmp = LHS.compare(RHS); - return cmp == ComparisonCategoryResult::Less || - cmp == ComparisonCategoryResult::Greater; + ComparisonCategoryResult Cmp = LHS.compare(RHS); + return Cmp == ComparisonCategoryResult::Less || + Cmp == ComparisonCategoryResult::Greater; } case Builtin::BI__builtin_isunordered: return LHS.compare(RHS) == ComparisonCategoryResult::Unordered; @@ -583,8 +583,7 @@ static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType()); - APSInt FPClassArg = popToAPSInt(S.Stk, FPClassArgT); + APSInt FPClassArg = popToAPSInt(S, Call->getArg(1)); const Floating &F = S.Stk.pop(); int32_t Result = static_cast( @@ -655,8 +654,7 @@ static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); if (Val == APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false)) return false; @@ -666,16 +664,6 @@ static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, return true; } -static bool interp__builtin_knot(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - APSInt Val = - popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)->getType())); - APInt Result = ~Val; - pushInteger(S, APSInt(std::move(Result), true), Call->getType()); - return true; -} - static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { @@ -684,8 +672,7 @@ static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const Pointer &Arg = S.Stk.pop(); Val = convertBoolVectorToInt(Arg); } else { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - Val = popToAPSInt(S.Stk, ArgT); + Val = popToAPSInt(S, Call->getArg(0)); } pushInteger(S, Val.popcount(), Call->getType()); return true; @@ -694,8 +681,7 @@ static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); pushInteger(S, Val.popcount() % 2, Call->getType()); return true; } @@ -703,8 +689,7 @@ static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType()); return true; } @@ -712,8 +697,7 @@ static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); pushInteger(S, Val.reverseBits(), Call->getType()); return true; } @@ -756,11 +740,8 @@ static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Right) { - PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType()); - PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType()); - - APSInt Amount = popToAPSInt(S.Stk, AmountT); - APSInt Value = popToAPSInt(S.Stk, ValueT); + APSInt Amount = popToAPSInt(S, Call->getArg(1)); + APSInt Value = popToAPSInt(S, Call->getArg(0)); APSInt Result; if (Right) @@ -777,8 +758,7 @@ static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Value = popToAPSInt(S.Stk, ArgT); + APSInt Value = popToAPSInt(S, Call->getArg(0)); uint64_t N = Value.countr_zero(); pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType()); @@ -806,8 +786,7 @@ static bool interp__builtin_move(InterpState &S, CodePtr OpPC, static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Arg = popToAPSInt(S.Stk, ArgT); + APSInt Arg = popToAPSInt(S, Call->getArg(0)); int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber( Arg.getZExtValue()); @@ -981,17 +960,15 @@ static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, unsigned BuiltinOp) { std::optional Fallback; - if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) { - PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); - Fallback = popToAPSInt(S.Stk, FallbackT); - } + if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) + Fallback = popToAPSInt(S, Call->getArg(1)); + APSInt Val; if (Call->getArg(0)->getType()->isExtVectorBoolType()) { const Pointer &Arg = S.Stk.pop(); Val = convertBoolVectorToInt(Arg); } else { - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - Val = popToAPSInt(S.Stk, ValT); + Val = popToAPSInt(S, Call->getArg(0)); } // When the argument is 0, the result of GCC builtins is undefined, whereas @@ -1018,17 +995,15 @@ static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID) { std::optional Fallback; - if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) { - PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); - Fallback = popToAPSInt(S.Stk, FallbackT); - } + if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) + Fallback = popToAPSInt(S, Call->getArg(1)); + APSInt Val; if (Call->getArg(0)->getType()->isExtVectorBoolType()) { const Pointer &Arg = S.Stk.pop(); Val = convertBoolVectorToInt(Arg); } else { - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - Val = popToAPSInt(S.Stk, ValT); + Val = popToAPSInt(S, Call->getArg(0)); } if (Val == 0) { @@ -1046,13 +1021,10 @@ static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ReturnT = *S.getContext().classify(Call->getType()); - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - const APSInt &Val = popToAPSInt(S.Stk, ValT); + const APSInt &Val = popToAPSInt(S, Call->getArg(0)); assert(Val.getActiveBits() <= 64); - INT_TYPE_SWITCH(ReturnT, - { S.Stk.push(T::from(Val.byteSwap().getZExtValue())); }); + pushInteger(S, Val.byteSwap(), Call->getType()); return true; } @@ -1067,9 +1039,8 @@ static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, return true; }; - PrimType ValT = *S.getContext().classify(Call->getArg(0)); const Pointer &Ptr = S.Stk.pop(); - const APSInt &SizeVal = popToAPSInt(S.Stk, ValT); + const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0)); // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power // of two less than or equal to the maximum inline atomic width, we know it @@ -1135,21 +1106,17 @@ static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - const APSInt &SizeVal = popToAPSInt(S.Stk, ValT); - - auto returnBool = [&S](bool Value) -> bool { - S.Stk.push(Value); - return true; - }; + const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0)); CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); if (Size.isPowerOfTwo()) { // Check against inlining width. unsigned InlineWidthBits = S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth(); - if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) - return returnBool(true); + if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) { + S.Stk.push(true); + return true; + } } return false; // returnBool(false); @@ -1179,8 +1146,7 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp) { - PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); - const APSInt &Alignment = popToAPSInt(S.Stk, AlignmentT); + const APSInt &Alignment = popToAPSInt(S, Call->getArg(1)); if (Alignment < 0 || !Alignment.isPowerOf2()) { S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment; @@ -1194,8 +1160,7 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, return false; } - // The first parameter is either an integer or a pointer (but not a function - // pointer). + // The first parameter is either an integer or a pointer. PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0)); if (isIntegralType(FirstArgT)) { @@ -1214,12 +1179,12 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, } return true; } - assert(FirstArgT == PT_Ptr); const Pointer &Ptr = S.Stk.pop(); + if (!Ptr.isBlockPointer()) + return false; - unsigned PtrOffset = Ptr.getByteOffset(); - PtrOffset = Ptr.getIndex(); + unsigned PtrOffset = Ptr.getIndex(); CharUnits BaseAlignment = S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl()); CharUnits PtrAlign = @@ -1329,123 +1294,6 @@ static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, return true; } -static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || - !Call->getArg(1)->getType()->isIntegerType()) - return false; - - PrimType ValT = *S.Ctx.classify(Call->getArg(0)); - PrimType IndexT = *S.Ctx.classify(Call->getArg(1)); - APSInt Index = popToAPSInt(S.Stk, IndexT); - APSInt Val = popToAPSInt(S.Stk, ValT); - - unsigned BitWidth = Val.getBitWidth(); - uint64_t Shift = Index.extractBitsAsZExtValue(8, 0); - uint64_t Length = Index.extractBitsAsZExtValue(8, 8); - Length = Length > BitWidth ? BitWidth : Length; - - // Handle out of bounds cases. - if (Length == 0 || Shift >= BitWidth) { - pushInteger(S, 0, Call->getType()); - return true; - } - - uint64_t Result = Val.getZExtValue() >> Shift; - Result &= llvm::maskTrailingOnes(Length); - pushInteger(S, Result, Call->getType()); - return true; -} - -static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - QualType CallType = Call->getType(); - if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || - !Call->getArg(1)->getType()->isIntegerType() || - !CallType->isIntegerType()) - return false; - - APSInt Idx = popToAPSInt(S, Call->getArg(1)); - APSInt Val = popToAPSInt(S, Call->getArg(0)); - - unsigned BitWidth = Val.getBitWidth(); - uint64_t Index = Idx.extractBitsAsZExtValue(8, 0); - - if (Index < BitWidth) - Val.clearHighBits(BitWidth - Index); - - pushInteger(S, Val, CallType); - return true; -} - -static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - QualType CallType = Call->getType(); - if (!CallType->isIntegerType() || - !Call->getArg(0)->getType()->isIntegerType()) - return false; - - APSInt Val = popToAPSInt(S, Call->getArg(0)); - pushInteger(S, Val.countLeadingZeros(), CallType); - return true; -} - -static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - QualType CallType = Call->getType(); - if (!CallType->isIntegerType() || - !Call->getArg(0)->getType()->isIntegerType()) - return false; - - APSInt Val = popToAPSInt(S, Call->getArg(0)); - pushInteger(S, Val.countTrailingZeros(), CallType); - return true; -} - -static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || - !Call->getArg(1)->getType()->isIntegerType()) - return false; - - APSInt Mask = popToAPSInt(S, Call->getArg(1)); - APSInt Val = popToAPSInt(S, Call->getArg(0)); - - unsigned BitWidth = Val.getBitWidth(); - APInt Result = APInt::getZero(BitWidth); - for (unsigned I = 0, P = 0; I != BitWidth; ++I) { - if (Mask[I]) - Result.setBitVal(I, Val[P++]); - } - pushInteger(S, std::move(Result), Call->getType()); - return true; -} - -static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, - const InterpFrame *Frame, - const CallExpr *Call) { - if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || - !Call->getArg(1)->getType()->isIntegerType()) - return false; - - APSInt Mask = popToAPSInt(S, Call->getArg(1)); - APSInt Val = popToAPSInt(S, Call->getArg(0)); - - unsigned BitWidth = Val.getBitWidth(); - APInt Result = APInt::getZero(BitWidth); - for (unsigned I = 0, P = 0; I != BitWidth; ++I) { - if (Mask[I]) - Result.setBitVal(P++, Val[I]); - } - pushInteger(S, std::move(Result), Call->getType()); - return true; -} - /// (CarryIn, LHS, RHS, Result) static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, @@ -2551,6 +2399,24 @@ static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, return true; } +static bool interp__builtin_elementwise_int_unaryop( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref Fn) { + assert(Call->getNumArgs() == 1); + assert(Call->getType()->isIntegerType()); + + // Single integer case. + if (!Call->getArg(0)->getType()->isVectorType()) { + APSInt Src = popToAPSInt(S, Call->getArg(0)); + APInt Result = Fn(Src); + pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType()); + return true; + } + + // TODO: Add vector integer handling. + return false; +} + static bool interp__builtin_elementwise_int_binop( InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref Fn) { @@ -2614,6 +2480,52 @@ static bool interp__builtin_elementwise_int_binop( return true; } +static bool +interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, + llvm::function_ref PackFn) { + const auto *VT0 = E->getArg(0)->getType()->castAs(); + [[maybe_unused]] const auto *VT1 = + E->getArg(1)->getType()->castAs(); + assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType"); + assert(VT0->getElementType() == VT1->getElementType() && + VT0->getNumElements() == VT1->getNumElements() && + "pack builtin VT0 and VT1 ElementType must be same"); + + const Pointer &RHS = S.Stk.pop(); + const Pointer &LHS = S.Stk.pop(); + const Pointer &Dst = S.Stk.peek(); + + const ASTContext &ASTCtx = S.getASTContext(); + const unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType()); + const unsigned LHSVecLen = VT0->getNumElements(); + const unsigned SrcPerLane = 128 / SrcBits; + const unsigned Lanes = LHSVecLen * SrcBits / 128; + + PrimType SrcT = *S.getContext().classify(VT0->getElementType()); + PrimType DstT = *S.getContext().classify(getElemType(Dst)); + const bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType(); + + for (unsigned Lane = 0; Lane != Lanes; ++Lane) { + const unsigned BaseSrc = Lane * SrcPerLane; + const unsigned BaseDst = Lane * (2 * SrcPerLane); + + for (unsigned I = 0; I != SrcPerLane; ++I) { + INT_TYPE_SWITCH_NO_BOOL(SrcT, { + APSInt A = LHS.elem(BaseSrc + I).toAPSInt(); + APSInt B = RHS.elem(BaseSrc + I).toAPSInt(); + + assignInteger(S, Dst.atIndex(BaseDst + I), DstT, + APSInt(PackFn(A), IsUnsigend)); + assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT, + APSInt(PackFn(B), IsUnsigend)); + }); + } + } + + Dst.initializeAllElements(); + return true; +} + static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID) { @@ -3274,29 +3186,83 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case clang::X86::BI__builtin_ia32_bextr_u64: case clang::X86::BI__builtin_ia32_bextri_u32: case clang::X86::BI__builtin_ia32_bextri_u64: - return interp__builtin_ia32_bextr(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) { + unsigned BitWidth = Val.getBitWidth(); + uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0); + uint64_t Length = Idx.extractBitsAsZExtValue(8, 8); + if (Length > BitWidth) { + Length = BitWidth; + } + + // Handle out of bounds cases. + if (Length == 0 || Shift >= BitWidth) + return APInt(BitWidth, 0); + + uint64_t Result = Val.getZExtValue() >> Shift; + Result &= llvm::maskTrailingOnes(Length); + return APInt(BitWidth, Result); + }); case clang::X86::BI__builtin_ia32_bzhi_si: case clang::X86::BI__builtin_ia32_bzhi_di: - return interp__builtin_ia32_bzhi(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) { + unsigned BitWidth = Val.getBitWidth(); + uint64_t Index = Idx.extractBitsAsZExtValue(8, 0); + APSInt Result = Val; + + if (Index < BitWidth) + Result.clearHighBits(BitWidth - Index); + + return Result; + }); case clang::X86::BI__builtin_ia32_lzcnt_u16: case clang::X86::BI__builtin_ia32_lzcnt_u32: case clang::X86::BI__builtin_ia32_lzcnt_u64: - return interp__builtin_ia32_lzcnt(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_unaryop( + S, OpPC, Call, [](const APSInt &Src) { + return APInt(Src.getBitWidth(), Src.countLeadingZeros()); + }); case clang::X86::BI__builtin_ia32_tzcnt_u16: case clang::X86::BI__builtin_ia32_tzcnt_u32: case clang::X86::BI__builtin_ia32_tzcnt_u64: - return interp__builtin_ia32_tzcnt(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_unaryop( + S, OpPC, Call, [](const APSInt &Src) { + return APInt(Src.getBitWidth(), Src.countTrailingZeros()); + }); case clang::X86::BI__builtin_ia32_pdep_si: case clang::X86::BI__builtin_ia32_pdep_di: - return interp__builtin_ia32_pdep(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) { + unsigned BitWidth = Val.getBitWidth(); + APInt Result = APInt::getZero(BitWidth); + + for (unsigned I = 0, P = 0; I != BitWidth; ++I) { + if (Mask[I]) + Result.setBitVal(I, Val[P++]); + } + + return Result; + }); case clang::X86::BI__builtin_ia32_pext_si: case clang::X86::BI__builtin_ia32_pext_di: - return interp__builtin_ia32_pext(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) { + unsigned BitWidth = Val.getBitWidth(); + APInt Result = APInt::getZero(BitWidth); + + for (unsigned I = 0, P = 0; I != BitWidth; ++I) { + if (Mask[I]) + Result.setBitVal(P++, Val[I]); + } + + return Result; + }); case clang::X86::BI__builtin_ia32_addcarryx_u32: case clang::X86::BI__builtin_ia32_addcarryx_u64: @@ -3487,6 +3453,29 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, } return LHS.lshr(RHS.getZExtValue()); }); + case clang::X86::BI__builtin_ia32_packsswb128: + case clang::X86::BI__builtin_ia32_packsswb256: + case clang::X86::BI__builtin_ia32_packsswb512: + case clang::X86::BI__builtin_ia32_packssdw128: + case clang::X86::BI__builtin_ia32_packssdw256: + case clang::X86::BI__builtin_ia32_packssdw512: + return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) { + return APInt(Src).truncSSat(Src.getBitWidth() / 2); + }); + case clang::X86::BI__builtin_ia32_packusdw128: + case clang::X86::BI__builtin_ia32_packusdw256: + case clang::X86::BI__builtin_ia32_packusdw512: + case clang::X86::BI__builtin_ia32_packuswb128: + case clang::X86::BI__builtin_ia32_packuswb256: + case clang::X86::BI__builtin_ia32_packuswb512: + return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) { + unsigned DstBits = Src.getBitWidth() / 2; + if (Src.isNegative()) + return APInt::getZero(DstBits); + if (Src.isIntN(DstBits)) + return APInt(Src).trunc(DstBits); + return APInt::getAllOnes(DstBits); + }); case clang::X86::BI__builtin_ia32_vprotbi: case clang::X86::BI__builtin_ia32_vprotdi: @@ -3661,7 +3650,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case X86::BI__builtin_ia32_knothi: case X86::BI__builtin_ia32_knotsi: case X86::BI__builtin_ia32_knotdi: - return interp__builtin_knot(S, OpPC, Frame, Call); + return interp__builtin_elementwise_int_unaryop( + S, OpPC, Call, [](const APSInt &Src) { return ~Src; }); case X86::BI__builtin_ia32_kaddqi: case X86::BI__builtin_ia32_kaddhi: diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp index c411a371282ef..039acb5d72b2c 100644 --- a/clang/lib/AST/ByteCode/InterpFrame.cpp +++ b/clang/lib/AST/ByteCode/InterpFrame.cpp @@ -58,15 +58,12 @@ InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC, // If the fuction has a This pointer, that one is next. // Then follow the actual arguments (but those are handled // in getParamPointer()). - if (Func->hasRVO()) - RVOPtr = stackRef(0); - - if (Func->hasThisPointer()) { - if (Func->hasRVO()) - This = stackRef(sizeof(Pointer)); - else - This = stackRef(0); + if (Func->hasRVO()) { + // RVO pointer offset is always 0. } + + if (Func->hasThisPointer()) + ThisPointerOffset = Func->hasRVO() ? sizeof(Pointer) : 0; } InterpFrame::~InterpFrame() { @@ -167,7 +164,7 @@ void InterpFrame::describe(llvm::raw_ostream &OS) const { /*Indentation=*/0); OS << "."; } else if (const auto *M = dyn_cast(F)) { - print(OS, This, S.getASTContext(), + print(OS, getThis(), S.getASTContext(), S.getASTContext().getLValueReferenceType( S.getASTContext().getCanonicalTagType(M->getParent()))); OS << "."; @@ -234,6 +231,8 @@ Pointer InterpFrame::getParamPointer(unsigned Off) { if (auto Pt = Params.find(Off); Pt != Params.end()) return Pointer(reinterpret_cast(Pt->second.get())); + assert(!isBottomFrame()); + // Allocate memory to store the parameter and the block metadata. const auto &Desc = Func->getParamDescriptor(Off); size_t BlockSize = sizeof(Block) + Desc.second->getAllocSize(); diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h index 129851155bd86..fa9de2e1e7c6d 100644 --- a/clang/lib/AST/ByteCode/InterpFrame.h +++ b/clang/lib/AST/ByteCode/InterpFrame.h @@ -14,7 +14,8 @@ #define LLVM_CLANG_AST_INTERP_INTERPFRAME_H #include "Frame.h" -#include "Program.h" +#include "InterpBlock.h" +#include "Pointer.h" namespace clang { namespace interp { @@ -93,7 +94,7 @@ class InterpFrame final : public Frame { auto Pt = Params.find(Offset); if (Pt == Params.end()) return stackRef(Offset); - return Pointer(reinterpret_cast(Pt->second.get())).deref(); + return reinterpret_cast(Pt->second.get())->deref(); } /// Mutates a local copy of a parameter. @@ -104,11 +105,19 @@ class InterpFrame final : public Frame { /// Returns a pointer to an argument - lazily creates a block. Pointer getParamPointer(unsigned Offset); + bool hasThisPointer() const { return Func && Func->hasThisPointer(); } /// Returns the 'this' pointer. - const Pointer &getThis() const { return This; } + const Pointer &getThis() const { + assert(hasThisPointer()); + return stackRef(ThisPointerOffset); + } /// Returns the RVO pointer, if the Function has one. - const Pointer &getRVOPtr() const { return RVOPtr; } + const Pointer &getRVOPtr() const { + assert(Func); + assert(Func->hasRVO()); + return stackRef(0); + } /// Checks if the frame is a root frame - return should quit the interpreter. bool isRoot() const { return !Func; } @@ -143,7 +152,7 @@ class InterpFrame final : public Frame { /// Returns an offset to a local. template T &localRef(unsigned Offset) const { - return getLocalPointer(Offset).deref(); + return localBlock(Offset)->deref(); } /// Returns a pointer to a local's block. @@ -163,10 +172,8 @@ class InterpFrame final : public Frame { unsigned Depth; /// Reference to the function being executed. const Function *Func; - /// Current object pointer for methods. - Pointer This; - /// Pointer the non-primitive return value gets constructed in. - Pointer RVOPtr; + /// Offset of the instance pointer. Use with stackRef<>(). + unsigned ThisPointerOffset; /// Return address. CodePtr RetPC; /// The size of all the arguments. diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td index 7af2df5318106..532c4448e6f40 100644 --- a/clang/lib/AST/ByteCode/Opcodes.td +++ b/clang/lib/AST/ByteCode/Opcodes.td @@ -797,6 +797,7 @@ def SideEffect : Opcode {} def InvalidCast : Opcode { let Args = [ArgCastKind, ArgBool]; } +def InvalidStore : Opcode { let Args = [ArgTypePtr]; } def CheckPseudoDtor : Opcode {} def InvalidDeclRef : Opcode { diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp index 81d4ce14f9310..663134c8696de 100644 --- a/clang/lib/AST/ByteCode/Pointer.cpp +++ b/clang/lib/AST/ByteCode/Pointer.cpp @@ -110,19 +110,21 @@ Pointer &Pointer::operator=(const Pointer &P) { StorageKind = P.StorageKind; Offset = P.Offset; - if (P.isBlockPointer()) { + switch (StorageKind) { + case Storage::Int: + Int = P.Int; + break; + case Storage::Block: BS = P.BS; if (BS.Pointee) BS.Pointee->addPointer(this); - } else if (P.isIntegralPointer()) { - Int = P.Int; - } else if (P.isFunctionPointer()) { + break; + case Storage::Fn: Fn = P.Fn; - } else if (P.isTypeidPointer()) { + break; + case Storage::Typeid: Typeid = P.Typeid; - } else { - assert(false && "Unhandled storage kind"); } return *this; } @@ -147,19 +149,21 @@ Pointer &Pointer::operator=(Pointer &&P) { StorageKind = P.StorageKind; Offset = P.Offset; - if (P.isBlockPointer()) { + switch (StorageKind) { + case Storage::Int: + Int = P.Int; + break; + case Storage::Block: BS = P.BS; if (BS.Pointee) BS.Pointee->addPointer(this); - } else if (P.isIntegralPointer()) { - Int = P.Int; - } else if (P.isFunctionPointer()) { + break; + case Storage::Fn: Fn = P.Fn; - } else if (P.isTypeidPointer()) { + break; + case Storage::Typeid: Typeid = P.Typeid; - } else { - assert(false && "Unhandled storage kind"); } return *this; } @@ -358,13 +362,17 @@ void Pointer::print(llvm::raw_ostream &OS) const { } size_t Pointer::computeOffsetForComparison() const { - if (isIntegralPointer()) - return asIntPointer().Value + Offset; - if (isTypeidPointer()) + switch (StorageKind) { + case Storage::Int: + return Int.Value + Offset; + case Storage::Block: + // See below. + break; + case Storage::Fn: + return Fn.getIntegerRepresentation() + Offset; + case Storage::Typeid: return reinterpret_cast(asTypeidPointer().TypePtr) + Offset; - - if (!isBlockPointer()) - return Offset; + } size_t Result = 0; Pointer P = *this; diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h index bbf20801ce923..cd738ce8b2a3e 100644 --- a/clang/lib/AST/ByteCode/Pointer.h +++ b/clang/lib/AST/ByteCode/Pointer.h @@ -56,7 +56,7 @@ struct TypeidPointer { const Type *TypeInfoType; }; -enum class Storage { Block, Int, Fn, Typeid }; +enum class Storage { Int, Block, Fn, Typeid }; /// A pointer to a memory block, live or dead. /// @@ -252,14 +252,17 @@ class Pointer { /// Checks if the pointer is null. bool isZero() const { - if (isBlockPointer()) + switch (StorageKind) { + case Storage::Int: + return Int.Value == 0 && Offset == 0; + case Storage::Block: return BS.Pointee == nullptr; - if (isFunctionPointer()) + case Storage::Fn: return Fn.isZero(); - if (isTypeidPointer()) + case Storage::Typeid: return false; - assert(isIntegralPointer()); - return Int.Value == 0 && Offset == 0; + } + llvm_unreachable("Unknown clang::interp::Storage enum"); } /// Checks if the pointer is live. bool isLive() const { diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index cd8e495e82c80..c7341552be365 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -3552,6 +3552,53 @@ void FunctionDecl::setIsTypeAwareOperatorNewOrDelete(bool IsTypeAware) { getASTContext().setIsTypeAwareOperatorNewOrDelete(this, IsTypeAware); } +UsualDeleteParams FunctionDecl::getUsualDeleteParams() const { + UsualDeleteParams Params; + + // This function should only be called for operator delete declarations. + assert(getDeclName().isAnyOperatorDelete()); + if (!getDeclName().isAnyOperatorDelete()) + return Params; + + const FunctionProtoType *FPT = getType()->castAs(); + auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); + + if (isTypeAwareOperatorNewOrDelete()) { + Params.TypeAwareDelete = TypeAwareAllocationMode::Yes; + assert(AI != AE); + ++AI; + } + + // The first argument after the type-identity parameter (if any) is + // always a void* (or C* for a destroying operator delete for class + // type C). + ++AI; + + // The next parameter may be a std::destroying_delete_t. + if (isDestroyingOperatorDelete()) { + assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); + Params.DestroyingDelete = true; + assert(AI != AE); + ++AI; + } + + // Figure out what other parameters we should be implicitly passing. + if (AI != AE && (*AI)->isIntegerType()) { + Params.Size = true; + ++AI; + } else + assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); + + if (AI != AE && (*AI)->isAlignValT()) { + Params.Alignment = AlignedAllocationMode::Yes; + ++AI; + } else + assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); + + assert(AI == AE && "unexpected usual deallocation function parameter"); + return Params; +} + LanguageLinkage FunctionDecl::getLanguageLinkage() const { return getDeclLanguageLinkage(*this); } diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp index 97ae4a07f32aa..95de6a82a5270 100644 --- a/clang/lib/AST/ExprCXX.cpp +++ b/clang/lib/AST/ExprCXX.cpp @@ -1725,8 +1725,8 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context, return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs); } -NamedDecl *SubstNonTypeTemplateParmExpr::getParameter() const { - return cast( +NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const { + return cast( getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]); } diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index d10e2afeb2341..b706b14945b6d 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -11575,6 +11575,46 @@ static bool handleVectorElementCast(EvalInfo &Info, const FPOptions FPO, return false; } +static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result, + llvm::function_ref PackFn) { + APValue LHS, RHS; + if (!EvaluateAsRValue(Info, E->getArg(0), LHS) || + !EvaluateAsRValue(Info, E->getArg(1), RHS)) + return false; + + unsigned LHSVecLen = LHS.getVectorLength(); + unsigned RHSVecLen = RHS.getVectorLength(); + + assert(LHSVecLen != 0 && LHSVecLen == RHSVecLen && + "pack builtin LHSVecLen must equal to RHSVecLen"); + + const VectorType *VT0 = E->getArg(0)->getType()->castAs(); + const unsigned SrcBits = Info.Ctx.getIntWidth(VT0->getElementType()); + + const VectorType *DstVT = E->getType()->castAs(); + QualType DstElemTy = DstVT->getElementType(); + const bool DstIsUnsigned = DstElemTy->isUnsignedIntegerType(); + + const unsigned SrcPerLane = 128 / SrcBits; + const unsigned Lanes = LHSVecLen * SrcBits / 128; + + SmallVector Out; + Out.reserve(LHSVecLen + RHSVecLen); + + for (unsigned Lane = 0; Lane != Lanes; ++Lane) { + unsigned base = Lane * SrcPerLane; + for (unsigned I = 0; I != SrcPerLane; ++I) + Out.emplace_back(APValue( + APSInt(PackFn(LHS.getVectorElt(base + I).getInt()), DstIsUnsigned))); + for (unsigned I = 0; I != SrcPerLane; ++I) + Out.emplace_back(APValue( + APSInt(PackFn(RHS.getVectorElt(base + I).getInt()), DstIsUnsigned))); + } + + Result = APValue(Out.data(), Out.size()); + return true; +} + bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { if (!IsConstantEvaluatedBuiltinCall(E)) return ExprEvaluatorBaseTy::VisitCallExpr(E); @@ -11768,7 +11808,29 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { } return LHS.lshr(RHS.getZExtValue()); }); - + case X86::BI__builtin_ia32_packsswb128: + case X86::BI__builtin_ia32_packsswb256: + case X86::BI__builtin_ia32_packsswb512: + case X86::BI__builtin_ia32_packssdw128: + case X86::BI__builtin_ia32_packssdw256: + case X86::BI__builtin_ia32_packssdw512: + return evalPackBuiltin(E, Info, Result, [](const APSInt &Src) { + return APSInt(Src).truncSSat(Src.getBitWidth() / 2); + }); + case X86::BI__builtin_ia32_packusdw128: + case X86::BI__builtin_ia32_packusdw256: + case X86::BI__builtin_ia32_packusdw512: + case X86::BI__builtin_ia32_packuswb128: + case X86::BI__builtin_ia32_packuswb256: + case X86::BI__builtin_ia32_packuswb512: + return evalPackBuiltin(E, Info, Result, [](const APSInt &Src) { + unsigned DstBits = Src.getBitWidth() / 2; + if (Src.isNegative()) + return APInt::getZero(DstBits); + if (Src.isIntN(DstBits)) + return APInt((Src).trunc(DstBits)); + return APInt::getAllOnes(DstBits); + }); case clang::X86::BI__builtin_ia32_pmuldq128: case clang::X86::BI__builtin_ia32_pmuldq256: case clang::X86::BI__builtin_ia32_pmuldq512: @@ -18843,9 +18905,15 @@ std::optional Expr::tryEvaluateString(ASTContext &Ctx) const { uint64_t Result; std::string StringResult; + if (Info.EnableNewConstInterp) { + if (!Info.Ctx.getInterpContext().evaluateString(Info, this, StringResult)) + return std::nullopt; + return StringResult; + } + if (EvaluateBuiltinStrLen(this, Result, Info, &StringResult)) return StringResult; - return {}; + return std::nullopt; } template diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp index 2173aed5b45af..844db79f18a4a 100644 --- a/clang/lib/AST/ItaniumMangle.cpp +++ b/clang/lib/AST/ItaniumMangle.cpp @@ -4624,6 +4624,8 @@ void CXXNameMangler::mangleType(const HLSLAttributedResourceType *T) { Str += "_ROV"; if (Attrs.RawBuffer) Str += "_Raw"; + if (Attrs.IsCounter) + Str += "_Counter"; if (T->hasContainedType()) Str += "_CT"; mangleVendorQualifier(Str); diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp index 11a43e8c7a030..2ce4419940e52 100644 --- a/clang/lib/AST/OpenMPClause.cpp +++ b/clang/lib/AST/OpenMPClause.cpp @@ -15,6 +15,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclOpenMP.h" +#include "clang/AST/ExprOpenMP.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/TargetInfo.h" @@ -1023,6 +1024,26 @@ OMPPartialClause *OMPPartialClause::CreateEmpty(const ASTContext &C) { return new (C) OMPPartialClause(); } +OMPLoopRangeClause * +OMPLoopRangeClause::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation FirstLoc, + SourceLocation CountLoc, SourceLocation EndLoc, + Expr *First, Expr *Count) { + OMPLoopRangeClause *Clause = CreateEmpty(C); + Clause->setLocStart(StartLoc); + Clause->setLParenLoc(LParenLoc); + Clause->setFirstLoc(FirstLoc); + Clause->setCountLoc(CountLoc); + Clause->setLocEnd(EndLoc); + Clause->setFirst(First); + Clause->setCount(Count); + return Clause; +} + +OMPLoopRangeClause *OMPLoopRangeClause::CreateEmpty(const ASTContext &C) { + return new (C) OMPLoopRangeClause(); +} + OMPAllocateClause *OMPAllocateClause::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, Expr *Alignment, SourceLocation ColonLoc, @@ -1159,6 +1180,77 @@ unsigned OMPClauseMappableExprCommon::getUniqueDeclarationsTotalNumber( return UniqueDecls.size(); } +QualType +OMPClauseMappableExprCommon::getComponentExprElementType(const Expr *Exp) { + assert(!isa(Exp) && + "Cannot get element-type from array-shaping expr."); + + // Unless we are handling array-section expressions, including + // array-subscripts, derefs, we can rely on getType. + if (!isa(Exp)) + return Exp->getType().getNonReferenceType().getCanonicalType(); + + // For array-sections, we need to find the type of one element of + // the section. + const auto *OASE = cast(Exp); + + QualType BaseType = ArraySectionExpr::getBaseOriginalType(OASE->getBase()); + + QualType ElemTy; + if (const auto *ATy = BaseType->getAsArrayTypeUnsafe()) + ElemTy = ATy->getElementType(); + else + ElemTy = BaseType->getPointeeType(); + + ElemTy = ElemTy.getNonReferenceType().getCanonicalType(); + return ElemTy; +} + +std::pair> +OMPClauseMappableExprCommon::findAttachPtrExpr( + MappableExprComponentListRef Components, OpenMPDirectiveKind CurDirKind) { + + // If we only have a single component, we have a map like "map(p)", which + // cannot have a base-pointer. + if (Components.size() < 2) + return {nullptr, std::nullopt}; + + // Only check for non-contiguous sections on target_update, since we can + // assume array-sections are contiguous on maps on other constructs, even if + // we are not sure of it at compile-time, like for a[1:x][2]. + if (Components.back().isNonContiguous() && CurDirKind == OMPD_target_update) + return {nullptr, std::nullopt}; + + // To find the attach base-pointer, we start with the second component, + // stripping away one component at a time, until we reach a pointer Expr + // (that is not a binary operator). The first such pointer should be the + // attach base-pointer for the component list. + for (auto [I, Component] : llvm::enumerate(Components)) { + // Skip past the first component. + if (I == 0) + continue; + + const Expr *CurExpr = Component.getAssociatedExpression(); + if (!CurExpr) + break; + + // If CurExpr is something like `p + 10`, we need to ignore it, since + // we are looking for `p`. + if (isa(CurExpr)) + continue; + + // Keep going until we reach an Expr of pointer type. + QualType CurType = getComponentExprElementType(CurExpr); + if (!CurType->isPointerType()) + continue; + + // We have found a pointer Expr. This must be the attach pointer. + return {CurExpr, Components.size() - I}; + } + + return {nullptr, std::nullopt}; +} + OMPMapClause *OMPMapClause::Create( const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef Vars, ArrayRef Declarations, @@ -1892,6 +1984,21 @@ void OMPClausePrinter::VisitOMPPartialClause(OMPPartialClause *Node) { } } +void OMPClausePrinter::VisitOMPLoopRangeClause(OMPLoopRangeClause *Node) { + OS << "looprange"; + + Expr *First = Node->getFirst(); + Expr *Count = Node->getCount(); + + if (First && Count) { + OS << "("; + First->printPretty(OS, nullptr, Policy, 0); + OS << ","; + Count->printPretty(OS, nullptr, Policy, 0); + OS << ")"; + } +} + void OMPClausePrinter::VisitOMPAllocatorClause(OMPAllocatorClause *Node) { OS << "allocator("; Node->getAllocator()->printPretty(OS, nullptr, Policy, 0); diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp index 43f4e070748bb..00b938bdf308d 100644 --- a/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/clang/lib/AST/RecordLayoutBuilder.cpp @@ -2087,9 +2087,8 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, if (InsertExtraPadding) { CharUnits ASanAlignment = CharUnits::fromQuantity(8); CharUnits ExtraSizeForAsan = ASanAlignment; - if (FieldSize % ASanAlignment) - ExtraSizeForAsan += - ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment); + if (!FieldSize.isMultipleOf(ASanAlignment)) + ExtraSizeForAsan += ASanAlignment - (FieldSize % ASanAlignment); EffectiveFieldSize = FieldSize = FieldSize + ExtraSizeForAsan; } @@ -2119,10 +2118,10 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, if (RD->hasAttr() || !MaxFieldAlignment.isZero()) if (FieldAlign < OriginalFieldAlign) if (D->getType()->isRecordType()) { - // If the offset is a multiple of the alignment of + // If the offset is not a multiple of the alignment of // the type, raise the warning. // TODO: Takes no account the alignment of the outer struct - if (FieldOffset % OriginalFieldAlign != 0) + if (!FieldOffset.isMultipleOf(OriginalFieldAlign)) Diag(D->getLocation(), diag::warn_unaligned_access) << Context.getCanonicalTagType(RD) << D->getName() << D->getType(); diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp index 1f6586f95a9f8..a5b0cd3786a28 100644 --- a/clang/lib/AST/StmtOpenMP.cpp +++ b/clang/lib/AST/StmtOpenMP.cpp @@ -125,13 +125,12 @@ OMPLoopBasedDirective::tryToFindNextInnerLoop(Stmt *CurStmt, bool OMPLoopBasedDirective::doForAllLoops( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref Callback, - llvm::function_ref + llvm::function_ref OnTransformationCallback) { CurStmt = CurStmt->IgnoreContainers(); for (unsigned Cnt = 0; Cnt < NumLoops; ++Cnt) { while (true) { - auto *Dir = - dyn_cast(CurStmt); + auto *Dir = dyn_cast(CurStmt); if (!Dir) break; @@ -371,6 +370,22 @@ OMPForDirective *OMPForDirective::Create( return Dir; } +Stmt *OMPLoopTransformationDirective::getTransformedStmt() const { + if (auto *D = dyn_cast(S)) + return D->getTransformedStmt(); + if (auto *D = dyn_cast(S)) + return D->getTransformedStmt(); + llvm_unreachable("unexpected object type"); +} + +Stmt *OMPLoopTransformationDirective::getPreInits() const { + if (auto *D = dyn_cast(S)) + return D->getPreInits(); + if (auto *D = dyn_cast(S)) + return D->getPreInits(); + llvm_unreachable("unexpected object type"); +} + Stmt *OMPCanonicalLoopNestTransformationDirective::getTransformedStmt() const { switch (getStmtClass()) { #define STMT(CLASS, PARENT) @@ -380,7 +395,7 @@ Stmt *OMPCanonicalLoopNestTransformationDirective::getTransformedStmt() const { return static_cast(this)->getTransformedStmt(); #include "clang/AST/StmtNodes.inc" default: - llvm_unreachable("Not a loop transformation"); + llvm_unreachable("Not a loop transformation for canonical loop nests"); } } @@ -393,7 +408,34 @@ Stmt *OMPCanonicalLoopNestTransformationDirective::getPreInits() const { return static_cast(this)->getPreInits(); #include "clang/AST/StmtNodes.inc" default: - llvm_unreachable("Not a loop transformation"); + llvm_unreachable("Not a loop transformation for canonical loop nests"); + } +} + +Stmt * +OMPCanonicalLoopSequenceTransformationDirective::getTransformedStmt() const { + switch (getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define OMPCANONICALLOOPSEQUENCETRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + return static_cast(this)->getTransformedStmt(); +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("Not a loop transformation for canonical loop sequences"); + } +} + +Stmt *OMPCanonicalLoopSequenceTransformationDirective::getPreInits() const { + switch (getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define OMPCANONICALLOOPSEQUENCETRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + return static_cast(this)->getPreInits(); +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("Not a loop transformation for canonical loop sequences"); } } @@ -510,6 +552,27 @@ OMPInterchangeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses, SourceLocation(), SourceLocation(), NumLoops); } +OMPFuseDirective *OMPFuseDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef Clauses, unsigned NumGeneratedTopLevelLoops, + Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits) { + + OMPFuseDirective *Dir = createDirective( + C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc); + Dir->setTransformedStmt(TransformedStmt); + Dir->setPreInits(PreInits); + Dir->setNumGeneratedTopLevelLoops(NumGeneratedTopLevelLoops); + return Dir; +} + +OMPFuseDirective *OMPFuseDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses) { + OMPFuseDirective *Dir = createEmptyDirective( + C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1, + SourceLocation(), SourceLocation()); + return Dir; +} + OMPForSimdDirective * OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp index 2c9c3581a2962..586c3000f105c 100644 --- a/clang/lib/AST/StmtPrinter.cpp +++ b/clang/lib/AST/StmtPrinter.cpp @@ -795,6 +795,11 @@ void StmtPrinter::VisitOMPInterchangeDirective(OMPInterchangeDirective *Node) { PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPFuseDirective(OMPFuseDirective *Node) { + Indent() << "#pragma omp fuse"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) { Indent() << "#pragma omp for"; PrintOMPExecutableDirective(Node); diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 37c4d43ec0b2f..f3b5478222488 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -510,6 +510,13 @@ void OMPClauseProfiler::VisitOMPPartialClause(const OMPPartialClause *C) { Profiler->VisitExpr(Factor); } +void OMPClauseProfiler::VisitOMPLoopRangeClause(const OMPLoopRangeClause *C) { + if (const Expr *First = C->getFirst()) + Profiler->VisitExpr(First); + if (const Expr *Count = C->getCount()) + Profiler->VisitExpr(Count); +} + void OMPClauseProfiler::VisitOMPAllocatorClause(const OMPAllocatorClause *C) { if (C->getAllocator()) Profiler->VisitStmt(C->getAllocator()); @@ -1025,6 +1032,15 @@ void StmtProfiler::VisitOMPInterchangeDirective( VisitOMPCanonicalLoopNestTransformationDirective(S); } +void StmtProfiler::VisitOMPCanonicalLoopSequenceTransformationDirective( + const OMPCanonicalLoopSequenceTransformationDirective *S) { + VisitOMPExecutableDirective(S); +} + +void StmtProfiler::VisitOMPFuseDirective(const OMPFuseDirective *S) { + VisitOMPCanonicalLoopSequenceTransformationDirective(S); +} + void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) { VisitOMPLoopDirective(S); } @@ -1353,7 +1369,8 @@ void StmtProfiler::VisitExpr(const Expr *S) { } void StmtProfiler::VisitConstantExpr(const ConstantExpr *S) { - VisitExpr(S); + // Profile exactly as the sub-expression. + Visit(S->getSubExpr()); } void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) { @@ -2638,8 +2655,6 @@ void OpenACCClauseProfiler::VisitPrivateClause( for (auto &Recipe : Clause.getInitRecipes()) { Profiler.VisitDecl(Recipe.AllocaDecl); - if (Recipe.InitExpr) - Profiler.VisitExpr(Recipe.InitExpr); } } @@ -2649,8 +2664,6 @@ void OpenACCClauseProfiler::VisitFirstPrivateClause( for (auto &Recipe : Clause.getInitRecipes()) { Profiler.VisitDecl(Recipe.AllocaDecl); - if (Recipe.InitExpr) - Profiler.VisitExpr(Recipe.InitExpr); Profiler.VisitDecl(Recipe.InitFromTemporary); } } @@ -2756,12 +2769,10 @@ void OpenACCClauseProfiler::VisitReductionClause( for (auto &Recipe : Clause.getRecipes()) { Profiler.VisitDecl(Recipe.AllocaDecl); - if (Recipe.InitExpr) - Profiler.VisitExpr(Recipe.InitExpr); // TODO: OpenACC: Make sure we remember to update this when we figure out // what we're adding for the operation recipe, in the meantime, a static // assert will make sure we don't add something. - static_assert(sizeof(OpenACCReductionRecipe) == 2 * sizeof(int *)); + static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *)); } } diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 8f7fe3bea4e8f..cf5e9147ad78b 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -3095,6 +3095,9 @@ void TextNodeDumper::VisitHLSLRootSignatureDecl( case llvm::dxbc::RootSignatureVersion::V1_1: OS << "1.1"; break; + case llvm::dxbc::RootSignatureVersion::V1_2: + OS << "1.2"; + break; } OS << ", "; llvm::hlsl::rootsig::dumpRootElements(OS, D->getRootElements()); diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp index cd59678d67f2f..66a1b684ec68b 100644 --- a/clang/lib/AST/TypePrinter.cpp +++ b/clang/lib/AST/TypePrinter.cpp @@ -846,16 +846,45 @@ void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) { } } -void TypePrinter::printConstantMatrixBefore(const ConstantMatrixType *T, - raw_ostream &OS) { - printBefore(T->getElementType(), OS); - OS << " __attribute__((matrix_type("; +static void printDims(const ConstantMatrixType *T, raw_ostream &OS) { OS << T->getNumRows() << ", " << T->getNumColumns(); +} + +static void printHLSLMatrixBefore(TypePrinter &TP, const ConstantMatrixType *T, + raw_ostream &OS) { + OS << "matrix<"; + TP.printBefore(T->getElementType(), OS); +} + +static void printHLSLMatrixAfter(const ConstantMatrixType *T, raw_ostream &OS) { + OS << ", "; + printDims(T, OS); + OS << ">"; +} + +static void printClangMatrixBefore(TypePrinter &TP, const ConstantMatrixType *T, + raw_ostream &OS) { + TP.printBefore(T->getElementType(), OS); + OS << " __attribute__((matrix_type("; + printDims(T, OS); OS << ")))"; } +void TypePrinter::printConstantMatrixBefore(const ConstantMatrixType *T, + raw_ostream &OS) { + if (Policy.UseHLSLTypes) { + printHLSLMatrixBefore(*this, T, OS); + return; + } + printClangMatrixBefore(*this, T, OS); +} + void TypePrinter::printConstantMatrixAfter(const ConstantMatrixType *T, raw_ostream &OS) { + if (Policy.UseHLSLTypes) { + printHLSLMatrixAfter(T, OS); + return; + } printAfter(T->getElementType(), OS); } @@ -2033,6 +2062,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, case attr::HLSLROV: case attr::HLSLRawBuffer: case attr::HLSLContainedType: + case attr::HLSLIsCounter: llvm_unreachable("HLSL resource type attributes handled separately"); case attr::OpenCLPrivateAddressSpace: @@ -2181,6 +2211,8 @@ void TypePrinter::printHLSLAttributedResourceAfter( OS << " [[hlsl::is_rov]]"; if (Attrs.RawBuffer) OS << " [[hlsl::raw_buffer]]"; + if (Attrs.IsCounter) + OS << " [[hlsl::is_counter]]"; QualType ContainedTy = T->getContainedType(); if (!ContainedTy.isNull()) { diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp index 60a2d113c08e2..cdde849b0e026 100644 --- a/clang/lib/Analysis/CFG.cpp +++ b/clang/lib/Analysis/CFG.cpp @@ -4516,10 +4516,13 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) { // // Note: We add a successor to a switch that is considered covered yet has no // case statements if the enumeration has no enumerators. + // We also consider this successor reachable if + // BuildOpts.SwitchReqDefaultCoveredEnum is true. bool SwitchAlwaysHasSuccessor = false; SwitchAlwaysHasSuccessor |= switchExclusivelyCovered; - SwitchAlwaysHasSuccessor |= Terminator->isAllEnumCasesCovered() && - Terminator->getSwitchCaseList(); + SwitchAlwaysHasSuccessor |= + !BuildOpts.AssumeReachableDefaultInSwitchStatements && + Terminator->isAllEnumCasesCovered() && Terminator->getSwitchCaseList(); addSuccessor(SwitchTerminatedBlock, DefaultCaseBlock, !SwitchAlwaysHasSuccessor); diff --git a/clang/lib/Analysis/CMakeLists.txt b/clang/lib/Analysis/CMakeLists.txt index 0523d92480cb3..5a26f3eeea418 100644 --- a/clang/lib/Analysis/CMakeLists.txt +++ b/clang/lib/Analysis/CMakeLists.txt @@ -21,6 +21,7 @@ add_clang_library(clangAnalysis FixitUtil.cpp IntervalPartition.cpp IssueHash.cpp + LifetimeAnnotations.cpp LifetimeSafety.cpp LiveVariables.cpp MacroExpansionContext.cpp diff --git a/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/clang/lib/Analysis/FlowSensitive/Transfer.cpp index 60371d9498c25..06f12784aa82d 100644 --- a/clang/lib/Analysis/FlowSensitive/Transfer.cpp +++ b/clang/lib/Analysis/FlowSensitive/Transfer.cpp @@ -657,7 +657,12 @@ class TransferVisitor : public ConstStmtVisitor { if (LocSrc == nullptr || LocDst == nullptr) return; - copyRecord(*LocSrc, *LocDst, Env); + // If the destination object here is of a derived class, `Arg0` may be a + // cast of that object to a base class, and the source object may be of a + // sibling derived class. To handle these cases, ensure we are copying + // only the fields for `Arg0`'s type, not the type of the underlying + // `RecordStorageLocation`. + copyRecord(*LocSrc, *LocDst, Env, Arg0->getType()); // The assignment operator can have an arbitrary return type. We model the // return value only if the return type is the same as or a base class of diff --git a/clang/lib/Analysis/LifetimeAnnotations.cpp b/clang/lib/Analysis/LifetimeAnnotations.cpp new file mode 100644 index 0000000000000..e79122475625e --- /dev/null +++ b/clang/lib/Analysis/LifetimeAnnotations.cpp @@ -0,0 +1,75 @@ +//===- LifetimeAnnotations.cpp - -*--------------- C++------------------*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "clang/Analysis/Analyses/LifetimeAnnotations.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/Type.h" +#include "clang/AST/TypeLoc.h" + +namespace clang { +namespace lifetimes { + +const FunctionDecl * +getDeclWithMergedLifetimeBoundAttrs(const FunctionDecl *FD) { + return FD != nullptr ? FD->getMostRecentDecl() : nullptr; +} + +const CXXMethodDecl * +getDeclWithMergedLifetimeBoundAttrs(const CXXMethodDecl *CMD) { + const FunctionDecl *FD = CMD; + return cast_if_present( + getDeclWithMergedLifetimeBoundAttrs(FD)); +} + +bool isNormalAssignmentOperator(const FunctionDecl *FD) { + OverloadedOperatorKind OO = FD->getDeclName().getCXXOverloadedOperator(); + bool IsAssignment = OO == OO_Equal || isCompoundAssignmentOperator(OO); + if (!IsAssignment) + return false; + QualType RetT = FD->getReturnType(); + if (!RetT->isLValueReferenceType()) + return false; + ASTContext &Ctx = FD->getASTContext(); + QualType LHST; + auto *MD = dyn_cast(FD); + if (MD && MD->isCXXInstanceMember()) + LHST = Ctx.getLValueReferenceType(MD->getFunctionObjectParameterType()); + else + LHST = FD->getParamDecl(0)->getType(); + return Ctx.hasSameType(RetT, LHST); +} + +bool isAssignmentOperatorLifetimeBound(const CXXMethodDecl *CMD) { + CMD = getDeclWithMergedLifetimeBoundAttrs(CMD); + return CMD && isNormalAssignmentOperator(CMD) && CMD->param_size() == 1 && + CMD->getParamDecl(0)->hasAttr(); +} + +bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) { + FD = getDeclWithMergedLifetimeBoundAttrs(FD); + const TypeSourceInfo *TSI = FD->getTypeSourceInfo(); + if (!TSI) + return false; + // Don't declare this variable in the second operand of the for-statement; + // GCC miscompiles that by ending its lifetime before evaluating the + // third operand. See gcc.gnu.org/PR86769. + AttributedTypeLoc ATL; + for (TypeLoc TL = TSI->getTypeLoc(); + (ATL = TL.getAsAdjusted()); + TL = ATL.getModifiedLoc()) { + if (ATL.getAttrAs()) + return true; + } + + return isNormalAssignmentOperator(FD); +} + +} // namespace lifetimes +} // namespace clang diff --git a/clang/lib/Analysis/LifetimeSafety.cpp b/clang/lib/Analysis/LifetimeSafety.cpp index d016c6f12e82e..c18b8fb890a05 100644 --- a/clang/lib/Analysis/LifetimeSafety.cpp +++ b/clang/lib/Analysis/LifetimeSafety.cpp @@ -10,6 +10,7 @@ #include "clang/AST/Expr.h" #include "clang/AST/StmtVisitor.h" #include "clang/AST/Type.h" +#include "clang/Analysis/Analyses/LifetimeAnnotations.h" #include "clang/Analysis/Analyses/PostOrderCFGView.h" #include "clang/Analysis/AnalysisDeclContext.h" #include "clang/Analysis/CFG.h" @@ -213,10 +214,13 @@ class Fact { /// out of scope). Expire, /// An origin is propagated from a source to a destination (e.g., p = q). - AssignOrigin, + /// This can also optionally kill the destination origin before flowing into + /// it. Otherwise, the source's loan set is merged into the destination's + /// loan set. + OriginFlow, /// An origin escapes the function by flowing into the return value. ReturnOfOrigin, - /// An origin is used (eg. dereferencing a pointer). + /// An origin is used (eg. appears as l-value expression like DeclRefExpr). Use, /// A marker for a specific point in the code, for testing. TestPoint, @@ -285,25 +289,33 @@ class ExpireFact : public Fact { } }; -class AssignOriginFact : public Fact { +class OriginFlowFact : public Fact { OriginID OIDDest; OriginID OIDSrc; + // True if the destination origin should be killed (i.e., its current loans + // cleared) before the source origin's loans are flowed into it. + bool KillDest; public: static bool classof(const Fact *F) { - return F->getKind() == Kind::AssignOrigin; + return F->getKind() == Kind::OriginFlow; } - AssignOriginFact(OriginID OIDDest, OriginID OIDSrc) - : Fact(Kind::AssignOrigin), OIDDest(OIDDest), OIDSrc(OIDSrc) {} + OriginFlowFact(OriginID OIDDest, OriginID OIDSrc, bool KillDest) + : Fact(Kind::OriginFlow), OIDDest(OIDDest), OIDSrc(OIDSrc), + KillDest(KillDest) {} + OriginID getDestOriginID() const { return OIDDest; } OriginID getSrcOriginID() const { return OIDSrc; } + bool getKillDest() const { return KillDest; } + void dump(llvm::raw_ostream &OS, const LoanManager &, const OriginManager &OM) const override { - OS << "AssignOrigin (Dest: "; + OS << "OriginFlow (Dest: "; OM.dump(getDestOriginID(), OS); OS << ", Src: "; OM.dump(getSrcOriginID(), OS); + OS << (getKillDest() ? "" : ", Merge"); OS << ")\n"; } }; @@ -454,7 +466,7 @@ class FactGenerator : public ConstStmtVisitor { if (const auto *VD = dyn_cast(D)) if (hasOrigin(VD)) if (const Expr *InitExpr = VD->getInit()) - addAssignOriginFact(*VD, *InitExpr); + killAndFlowOrigin(*VD, *InitExpr); } void VisitDeclRefExpr(const DeclRefExpr *DRE) { @@ -492,9 +504,23 @@ class FactGenerator : public ConstStmtVisitor { isa(MCE->getCalleeDecl())) { // The argument is the implicit object itself. handleFunctionCall(MCE, MCE->getMethodDecl(), - {MCE->getImplicitObjectArgument()}); + {MCE->getImplicitObjectArgument()}, + /*IsGslConstruction=*/true); + } + if (const CXXMethodDecl *Method = MCE->getMethodDecl()) { + // Construct the argument list, with the implicit 'this' object as the + // first argument. + llvm::SmallVector Args; + Args.push_back(MCE->getImplicitObjectArgument()); + Args.append(MCE->getArgs(), MCE->getArgs() + MCE->getNumArgs()); + + handleFunctionCall(MCE, Method, Args, /*IsGslConstruction=*/false); } - // FIXME: A more general VisitCallExpr could also be used here. + } + + void VisitCallExpr(const CallExpr *CE) { + handleFunctionCall(CE, CE->getDirectCallee(), + {CE->getArgs(), CE->getNumArgs()}); } void VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *N) { @@ -508,7 +534,7 @@ class FactGenerator : public ConstStmtVisitor { return; // An ImplicitCastExpr node itself gets an origin, which flows from the // origin of its sub-expression (after stripping its own parens/casts). - addAssignOriginFact(*ICE, *ICE->getSubExpr()); + killAndFlowOrigin(*ICE, *ICE->getSubExpr()); } void VisitUnaryOperator(const UnaryOperator *UO) { @@ -522,7 +548,7 @@ class FactGenerator : public ConstStmtVisitor { // its sub-expression (x). This fact will cause the dataflow analysis // to propagate any loans held by the sub-expression's origin to the // origin of this UnaryOperator expression. - addAssignOriginFact(*UO, *SubExpr); + killAndFlowOrigin(*UO, *SubExpr); } } @@ -542,8 +568,15 @@ class FactGenerator : public ConstStmtVisitor { } void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *OCE) { - if (OCE->isAssignmentOp() && OCE->getNumArgs() == 2) + // Assignment operators have special "kill-then-propagate" semantics + // and are handled separately. + if (OCE->isAssignmentOp() && OCE->getNumArgs() == 2) { handleAssignment(OCE->getArg(0), OCE->getArg(1)); + return; + } + handleFunctionCall(OCE, OCE->getDirectCallee(), + {OCE->getArgs(), OCE->getNumArgs()}, + /*IsGslConstruction=*/false); } void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *FCE) { @@ -552,7 +585,7 @@ class FactGenerator : public ConstStmtVisitor { if (handleTestPoint(FCE)) return; if (isGslPointerType(FCE->getType())) - addAssignOriginFact(*FCE, *FCE->getSubExpr()); + killAndFlowOrigin(*FCE, *FCE->getSubExpr()); } void VisitInitListExpr(const InitListExpr *ILE) { @@ -561,7 +594,7 @@ class FactGenerator : public ConstStmtVisitor { // For list initialization with a single element, like `View{...}`, the // origin of the list itself is the origin of its single element. if (ILE->getNumInits() == 1) - addAssignOriginFact(*ILE, *ILE->getInit(0)); + killAndFlowOrigin(*ILE, *ILE->getInit(0)); } void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *MTE) { @@ -569,7 +602,7 @@ class FactGenerator : public ConstStmtVisitor { return; // A temporary object's origin is the same as the origin of the // expression that initializes it. - addAssignOriginFact(*MTE, *MTE->getSubExpr()); + killAndFlowOrigin(*MTE, *MTE->getSubExpr()); } void handleDestructor(const CFGAutomaticObjDtor &DtorOpt) { @@ -624,34 +657,51 @@ class FactGenerator : public ConstStmtVisitor { if (CCE->getNumArgs() != 1) return; if (hasOrigin(CCE->getArg(0))) - addAssignOriginFact(*CCE, *CCE->getArg(0)); + killAndFlowOrigin(*CCE, *CCE->getArg(0)); else // This could be a new borrow. handleFunctionCall(CCE, CCE->getConstructor(), - {CCE->getArgs(), CCE->getNumArgs()}); + {CCE->getArgs(), CCE->getNumArgs()}, + /*IsGslConstruction=*/true); } /// Checks if a call-like expression creates a borrow by passing a value to a /// reference parameter, creating an IssueFact if it does. + /// \param IsGslConstruction True if this is a GSL construction where all + /// argument origins should flow to the returned origin. void handleFunctionCall(const Expr *Call, const FunctionDecl *FD, - ArrayRef Args) { - if (!FD) + ArrayRef Args, + bool IsGslConstruction = false) { + // Ignore functions returning values with no origin. + if (!FD || !hasOrigin(Call)) return; - // TODO: Handle more than one arguments. - for (unsigned I = 0; I <= 0 /*Args.size()*/; ++I) { - const Expr *ArgExpr = Args[I]; - - // Propagate origins for CXX this. - if (FD->isCXXClassMember() && I == 0) { - addAssignOriginFact(*Call, *ArgExpr); - continue; + auto IsArgLifetimeBound = [FD](unsigned I) -> bool { + const ParmVarDecl *PVD = nullptr; + if (const auto *Method = dyn_cast(FD); + Method && Method->isInstance()) { + if (I == 0) + // For the 'this' argument, the attribute is on the method itself. + return implicitObjectParamIsLifetimeBound(Method); + if ((I - 1) < Method->getNumParams()) + // For explicit arguments, find the corresponding parameter + // declaration. + PVD = Method->getParamDecl(I - 1); + } else if (I < FD->getNumParams()) + // For free functions or static methods. + PVD = FD->getParamDecl(I); + return PVD ? PVD->hasAttr() : false; + }; + if (Args.empty()) + return; + bool killedSrc = false; + for (unsigned I = 0; I < Args.size(); ++I) + if (IsGslConstruction || IsArgLifetimeBound(I)) { + if (!killedSrc) { + killedSrc = true; + killAndFlowOrigin(*Call, *Args[I]); + } else + flowOrigin(*Call, *Args[I]); } - // The parameter is a pointer, reference, or gsl::Pointer. - // This is a borrow. We propagate the origin from the argument expression - // at the call site to the parameter declaration in the callee. - if (hasOrigin(ArgExpr)) - addAssignOriginFact(*Call, *ArgExpr); - } } /// Creates a loan for the storage path of a given declaration reference. @@ -668,11 +718,19 @@ class FactGenerator : public ConstStmtVisitor { } template - void addAssignOriginFact(const Destination &D, const Source &S) { + void flowOrigin(const Destination &D, const Source &S) { + OriginID DestOID = FactMgr.getOriginMgr().getOrCreate(D); + OriginID SrcOID = FactMgr.getOriginMgr().get(S); + CurrentBlockFacts.push_back(FactMgr.createFact( + DestOID, SrcOID, /*KillDest=*/false)); + } + + template + void killAndFlowOrigin(const Destination &D, const Source &S) { OriginID DestOID = FactMgr.getOriginMgr().getOrCreate(D); OriginID SrcOID = FactMgr.getOriginMgr().get(S); CurrentBlockFacts.push_back( - FactMgr.createFact(DestOID, SrcOID)); + FactMgr.createFact(DestOID, SrcOID, /*KillDest=*/true)); } /// Checks if the expression is a `void("__lifetime_test_point_...")` cast. @@ -703,12 +761,11 @@ class FactGenerator : public ConstStmtVisitor { if (const auto *DRE_LHS = dyn_cast(LHSExpr->IgnoreParenImpCasts())) { markUseAsWrite(DRE_LHS); - if (const auto *VD_LHS = dyn_cast(DRE_LHS->getDecl())) - // We are interested in assignments like `ptr1 = ptr2` or `ptr = &var`. - // LHS must be a pointer/reference type that can be an origin. RHS must - // also represent an origin (either another pointer/ref or an - // address-of). - addAssignOriginFact(*VD_LHS, *RHSExpr); + if (const auto *VD_LHS = dyn_cast(DRE_LHS->getDecl())) { + // Kill the old loans of the destination origin and flow the new loans + // from the source origin. + killAndFlowOrigin(*VD_LHS, *RHSExpr); + } } } @@ -882,8 +939,8 @@ class DataflowAnalysis { return D->transfer(In, *F->getAs()); case Fact::Kind::Expire: return D->transfer(In, *F->getAs()); - case Fact::Kind::AssignOrigin: - return D->transfer(In, *F->getAs()); + case Fact::Kind::OriginFlow: + return D->transfer(In, *F->getAs()); case Fact::Kind::ReturnOfOrigin: return D->transfer(In, *F->getAs()); case Fact::Kind::Use: @@ -897,7 +954,7 @@ class DataflowAnalysis { public: Lattice transfer(Lattice In, const IssueFact &) { return In; } Lattice transfer(Lattice In, const ExpireFact &) { return In; } - Lattice transfer(Lattice In, const AssignOriginFact &) { return In; } + Lattice transfer(Lattice In, const OriginFlowFact &) { return In; } Lattice transfer(Lattice In, const ReturnOfOriginFact &) { return In; } Lattice transfer(Lattice In, const UseFact &) { return In; } Lattice transfer(Lattice In, const TestPointFact &) { return In; } @@ -910,13 +967,10 @@ template static llvm::ImmutableSet join(llvm::ImmutableSet A, llvm::ImmutableSet B, typename llvm::ImmutableSet::Factory &F) { - if (A == B) - return A; if (A.getHeight() < B.getHeight()) std::swap(A, B); for (const T &E : B) - if (!A.contains(E)) - A = F.add(A, E); + A = F.add(A, E); return A; } @@ -950,11 +1004,10 @@ join(llvm::ImmutableMap A, llvm::ImmutableMap B, for (const auto &Entry : B) { const K &Key = Entry.first; const V &ValB = Entry.second; - const V *ValA = A.lookup(Key); - if (!ValA) - A = F.add(A, Key, ValB); - else if (*ValA != ValB) + if (const V *ValA = A.lookup(Key)) A = F.add(A, Key, JoinValues(*ValA, ValB)); + else + A = F.add(A, Key, ValB); } return A; } @@ -970,9 +1023,11 @@ using ExpiredLoanMap = llvm::ImmutableMap; /// An object to hold the factories for immutable collections, ensuring /// that all created states share the same underlying memory management. struct LifetimeFactory { - OriginLoanMap::Factory OriginMapFactory; - LoanSet::Factory LoanSetFactory; - ExpiredLoanMap::Factory ExpiredLoanMapFactory; + llvm::BumpPtrAllocator Allocator; + OriginLoanMap::Factory OriginMapFactory{Allocator, /*canonicalize=*/false}; + LoanSet::Factory LoanSetFactory{Allocator, /*canonicalize=*/false}; + ExpiredLoanMap::Factory ExpiredLoanMapFactory{Allocator, + /*canonicalize=*/false}; }; /// Represents the dataflow lattice for loan propagation. @@ -1049,14 +1104,20 @@ class LoanPropagationAnalysis LoanSetFactory.add(LoanSetFactory.getEmptySet(), LID))); } - /// The destination origin's loan set is replaced by the source's. - /// This implicitly "resets" the old loans of the destination. - Lattice transfer(Lattice In, const AssignOriginFact &F) { + /// A flow from source to destination. If `KillDest` is true, this replaces + /// the destination's loans with the source's. Otherwise, the source's loans + /// are merged into the destination's. + Lattice transfer(Lattice In, const OriginFlowFact &F) { OriginID DestOID = F.getDestOriginID(); OriginID SrcOID = F.getSrcOriginID(); + + LoanSet DestLoans = + F.getKillDest() ? LoanSetFactory.getEmptySet() : getLoans(In, DestOID); LoanSet SrcLoans = getLoans(In, SrcOID); + LoanSet MergedLoans = utils::join(DestLoans, SrcLoans, LoanSetFactory); + return LoanPropagationLattice( - OriginLoanMapFactory.add(In.Origins, DestOID, SrcLoans)); + OriginLoanMapFactory.add(In.Origins, DestOID, MergedLoans)); } LoanSet getLoans(OriginID OID, ProgramPoint P) { diff --git a/clang/lib/Analysis/PathDiagnostic.cpp b/clang/lib/Analysis/PathDiagnostic.cpp index ef24efd3c4bd0..e42731b93bfb2 100644 --- a/clang/lib/Analysis/PathDiagnostic.cpp +++ b/clang/lib/Analysis/PathDiagnostic.cpp @@ -24,6 +24,7 @@ #include "clang/AST/Type.h" #include "clang/Analysis/AnalysisDeclContext.h" #include "clang/Analysis/CFG.h" +#include "clang/Analysis/IssueHash.h" #include "clang/Analysis/ProgramPoint.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" @@ -1075,6 +1076,19 @@ unsigned PathDiagnostic::full_size() { return size; } +SmallString<32> +PathDiagnostic::getIssueHash(const SourceManager &SrcMgr, + const LangOptions &LangOpts) const { + PathDiagnosticLocation UPDLoc = getUniqueingLoc(); + FullSourceLoc FullLoc( + SrcMgr.getExpansionLoc(UPDLoc.isValid() ? UPDLoc.asLocation() + : getLocation().asLocation()), + SrcMgr); + + return clang::getIssueHash(FullLoc, getCheckerName(), getBugType(), + getDeclWithIssue(), LangOpts); +} + //===----------------------------------------------------------------------===// // FoldingSet profiling methods. //===----------------------------------------------------------------------===// diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp index d19f86a2223d8..a56fdb1abd625 100644 --- a/clang/lib/Analysis/ThreadSafety.cpp +++ b/clang/lib/Analysis/ThreadSafety.cpp @@ -419,22 +419,28 @@ class LocalVariableMap { // The expression for this variable, OR const Expr *Exp = nullptr; - // Reference to another VarDefinition - unsigned Ref = 0; + // Direct reference to another VarDefinition + unsigned DirectRef = 0; + + // Reference to underlying canonical non-reference VarDefinition. + unsigned CanonicalRef = 0; // The map with which Exp should be interpreted. Context Ctx; bool isReference() const { return !Exp; } + void invalidateRef() { DirectRef = CanonicalRef = 0; } + private: // Create ordinary variable definition VarDefinition(const NamedDecl *D, const Expr *E, Context C) : Dec(D), Exp(E), Ctx(C) {} // Create reference to previous definition - VarDefinition(const NamedDecl *D, unsigned R, Context C) - : Dec(D), Ref(R), Ctx(C) {} + VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef, + Context C) + : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {} }; private: @@ -445,7 +451,7 @@ class LocalVariableMap { public: LocalVariableMap() { // index 0 is a placeholder for undefined variables (aka phi-nodes). - VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext())); + VarDefinitions.push_back(VarDefinition(nullptr, 0, 0, getEmptyContext())); } /// Look up a definition, within the given context. @@ -471,7 +477,7 @@ class LocalVariableMap { Ctx = VarDefinitions[i].Ctx; return VarDefinitions[i].Exp; } - i = VarDefinitions[i].Ref; + i = VarDefinitions[i].DirectRef; } return nullptr; } @@ -508,7 +514,7 @@ class LocalVariableMap { void dump() { for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) { const Expr *Exp = VarDefinitions[i].Exp; - unsigned Ref = VarDefinitions[i].Ref; + unsigned Ref = VarDefinitions[i].DirectRef; dumpVarDefinitionName(i); llvm::errs() << " = "; @@ -539,9 +545,9 @@ class LocalVariableMap { friend class VarMapBuilder; // Resolve any definition ID down to its non-reference base ID. - unsigned getCanonicalDefinitionID(unsigned ID) { + unsigned getCanonicalDefinitionID(unsigned ID) const { while (ID > 0 && VarDefinitions[ID].isReference()) - ID = VarDefinitions[ID].Ref; + ID = VarDefinitions[ID].CanonicalRef; return ID; } @@ -564,10 +570,11 @@ class LocalVariableMap { } // Add a new reference to an existing definition. - Context addReference(const NamedDecl *D, unsigned i, Context Ctx) { + Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) { unsigned newID = VarDefinitions.size(); Context NewCtx = ContextFactory.add(Ctx, D, newID); - VarDefinitions.push_back(VarDefinition(D, i, Ctx)); + VarDefinitions.push_back( + VarDefinition(D, Ref, getCanonicalDefinitionID(Ref), Ctx)); return NewCtx; } @@ -769,15 +776,14 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) { const unsigned *I2 = C2.lookup(P.first); if (!I2) { // Variable does not exist at the end of the loop, invalidate. - VDef->Ref = 0; + VDef->invalidateRef(); continue; } // Compare the canonical IDs. This correctly handles chains of references // and determines if the variable is truly loop-invariant. - if (getCanonicalDefinitionID(VDef->Ref) != getCanonicalDefinitionID(*I2)) { - VDef->Ref = 0; // Mark this variable as undefined - } + if (VDef->CanonicalRef != getCanonicalDefinitionID(*I2)) + VDef->invalidateRef(); // Mark this variable as undefined } } diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp index 508685883364c..64b2bff063340 100644 --- a/clang/lib/Basic/OpenMPKinds.cpp +++ b/clang/lib/Basic/OpenMPKinds.cpp @@ -282,6 +282,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str, case OMPC_affinity: case OMPC_when: case OMPC_append_args: + case OMPC_looprange: break; default: break; @@ -627,6 +628,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind, case OMPC_affinity: case OMPC_when: case OMPC_append_args: + case OMPC_looprange: break; default: break; @@ -677,6 +679,11 @@ bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) { DKind == OMPD_target_exit_data || DKind == OMPD_target_update; } +bool clang::isOpenMPTargetMapEnteringDirective(OpenMPDirectiveKind DKind) { + return DKind == OMPD_target_data || DKind == OMPD_target_enter_data || + isOpenMPTargetExecutionDirective(DKind); +} + bool clang::isOpenMPNestingTeamsDirective(OpenMPDirectiveKind DKind) { if (DKind == OMPD_teams) return true; @@ -750,9 +757,14 @@ bool clang::isOpenMPCanonicalLoopNestTransformationDirective( DKind == OMPD_interchange || DKind == OMPD_stripe; } +bool clang::isOpenMPCanonicalLoopSequenceTransformationDirective( + OpenMPDirectiveKind DKind) { + return DKind == OMPD_fuse; +} + bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) { - // FIXME: There will be more cases when we implement 'fuse'. - return isOpenMPCanonicalLoopNestTransformationDirective(DKind); + return isOpenMPCanonicalLoopNestTransformationDirective(DKind) || + isOpenMPCanonicalLoopSequenceTransformationDirective(DKind); } bool clang::isOpenMPCombinedParallelADirective(OpenMPDirectiveKind DKind) { diff --git a/clang/lib/Basic/Sarif.cpp b/clang/lib/Basic/Sarif.cpp index 69862b73febd7..b3fb9a21249e9 100644 --- a/clang/lib/Basic/Sarif.cpp +++ b/clang/lib/Basic/Sarif.cpp @@ -67,7 +67,7 @@ static std::string percentEncodeURICharacter(char C) { /// \param Filename The filename to be represented as URI. /// /// \return RFC3986 URI representing the input file name. -static std::string fileNameToURI(StringRef Filename) { +std::string SarifDocumentWriter::fileNameToURI(StringRef Filename) { SmallString<32> Ret = StringRef("file://"); // Get the root name to see if it has a URI authority. @@ -391,6 +391,11 @@ void SarifDocumentWriter::appendResult(const SarifResult &Result) { json::Object Ret{{"message", createMessage(Result.DiagnosticMessage)}, {"ruleIndex", static_cast(RuleIdx)}, {"ruleId", Rule.Id}}; + + if (!Result.HostedViewerURI.empty()) { + Ret["hostedViewerUri"] = Result.HostedViewerURI; + } + if (!Result.Locations.empty()) { json::Array Locs; for (auto &Range : Result.Locations) { @@ -398,6 +403,15 @@ void SarifDocumentWriter::appendResult(const SarifResult &Result) { } Ret["locations"] = std::move(Locs); } + + if (!Result.PartialFingerprints.empty()) { + json::Object fingerprints = {}; + for (auto &pair : Result.PartialFingerprints) { + fingerprints[pair.first] = pair.second; + } + Ret["partialFingerprints"] = std::move(fingerprints); + } + if (!Result.ThreadFlows.empty()) Ret["codeFlows"] = json::Array{createCodeFlow(Result.ThreadFlows)}; diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index 72ee09d209e02..f4d7c1288cc04 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -18,6 +18,7 @@ #include "clang/Basic/LangOptions.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/TargetParser/TargetParser.h" #include @@ -1042,3 +1043,51 @@ void TargetInfo::copyAuxTarget(const TargetInfo *Aux) { auto *Src = static_cast(Aux); *Target = *Src; } + +std::string +TargetInfo::simplifyConstraint(StringRef Constraint, + SmallVectorImpl *OutCons) const { + std::string Result; + + for (const char *I = Constraint.begin(), *E = Constraint.end(); I < E; I++) { + switch (*I) { + default: + Result += convertConstraint(I); + break; + // Ignore these + case '*': + case '?': + case '!': + case '=': // Will see this and the following in mult-alt constraints. + case '+': + break; + case '#': // Ignore the rest of the constraint alternative. + while (I + 1 != E && I[1] != ',') + I++; + break; + case '&': + case '%': + Result += *I; + while (I + 1 != E && I[1] == *I) + I++; + break; + case ',': + Result += "|"; + break; + case 'g': + Result += "imr"; + break; + case '[': { + assert(OutCons && + "Must pass output names to constraints with a symbolic name"); + unsigned Index; + bool ResolveResult = resolveSymbolicName(I, *OutCons, Index); + assert(ResolveResult && "Could not resolve symbolic name"); + (void)ResolveResult; + Result += llvm::utostr(Index); + break; + } + } + } + return Result; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6a1746a7ad0ac..58345b45c97bc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -89,6 +89,11 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return cir::ConstRecordAttr::get(sTy, arrayAttr); } + cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) { + cir::ConstRecordAttr anonRecord = getAnonConstRecord(fieldsAttr); + return cir::TypeInfoAttr::get(anonRecord.getType(), fieldsAttr); + } + std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); } std::string getUniqueRecordName(const std::string &baseName) { diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index da507d6f28335..d5b35c25c83ba 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -15,10 +15,89 @@ #include "clang/AST/GlobalDecl.h" #include "clang/CIR/MissingFeatures.h" +#include "llvm/Support/SaveAndRestore.h" using namespace clang; using namespace clang::CIRGen; +static void emitDeclInit(CIRGenFunction &cgf, const VarDecl *varDecl, + cir::GlobalOp globalOp) { + assert((varDecl->hasGlobalStorage() || + (varDecl->hasLocalStorage() && + cgf.getContext().getLangOpts().OpenCLCPlusPlus)) && + "VarDecl must have global or local (in the case of OpenCL) storage!"); + assert(!varDecl->getType()->isReferenceType() && + "Should not call emitDeclInit on a reference!"); + + CIRGenBuilderTy &builder = cgf.getBuilder(); + + // Set up the ctor region. + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::Block *block = builder.createBlock(&globalOp.getCtorRegion()); + CIRGenFunction::LexicalScope lexScope{cgf, globalOp.getLoc(), + builder.getInsertionBlock()}; + lexScope.setAsGlobalInit(); + builder.setInsertionPointToStart(block); + + Address declAddr(cgf.cgm.getAddrOfGlobalVar(varDecl), + cgf.cgm.getASTContext().getDeclAlign(varDecl)); + + QualType type = varDecl->getType(); + LValue lv = cgf.makeAddrLValue(declAddr, type); + + const Expr *init = varDecl->getInit(); + switch (CIRGenFunction::getEvaluationKind(type)) { + case cir::TEK_Scalar: + assert(!cir::MissingFeatures::objCGC()); + cgf.emitScalarInit(init, cgf.getLoc(varDecl->getLocation()), lv, false); + break; + case cir::TEK_Complex: + cgf.cgm.errorNYI(varDecl->getSourceRange(), "complex global initializer"); + break; + case cir::TEK_Aggregate: + assert(!cir::MissingFeatures::aggValueSlotGC()); + cgf.emitAggExpr(init, + AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); + break; + } + + // Finish the ctor region. + builder.setInsertionPointToEnd(block); + cir::YieldOp::create(builder, globalOp.getLoc()); +} + +static void emitDeclDestroy(CIRGenFunction &cgf, const VarDecl *vd, + cir::GlobalOp addr) { + // Honor __attribute__((no_destroy)) and bail instead of attempting + // to emit a reference to a possibly nonexistent destructor, which + // in turn can cause a crash. This will result in a global constructor + // that isn't balanced out by a destructor call as intended by the + // attribute. This also checks for -fno-c++-static-destructors and + // bails even if the attribute is not present. + QualType::DestructionKind dtorKind = vd->needsDestruction(cgf.getContext()); + + // FIXME: __attribute__((cleanup)) ? + + switch (dtorKind) { + case QualType::DK_none: + return; + + case QualType::DK_cxx_destructor: + break; + + case QualType::DK_objc_strong_lifetime: + case QualType::DK_objc_weak_lifetime: + case QualType::DK_nontrivial_c_struct: + // We don't care about releasing objects during process teardown. + assert(!vd->getTLSKind() && "should have rejected this"); + return; + } + + cgf.cgm.errorNYI(vd->getSourceRange(), "global with destructor"); +} + cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) { const CIRGenFunctionInfo &fnInfo = getTypes().arrangeCXXStructorDeclaration(gd); @@ -38,3 +117,63 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl gd) { assert(!cir::MissingFeatures::opFuncAttributesForDefinition()); return fn; } + +// Global variables requiring non-trivial initialization are handled +// differently in CIR than in classic codegen. Classic codegen emits +// a global init function (__cxx_global_var_init) and inserts +// initialization for each global there. In CIR, we attach a ctor +// region to the global variable and insert the initialization code +// into the ctor region. This will be moved into the +// __cxx_global_var_init function during the LoweringPrepare pass. +void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, + cir::GlobalOp addr, + bool performInit) { + QualType ty = varDecl->getType(); + + // TODO: handle address space + // The address space of a static local variable (addr) may be different + // from the address space of the "this" argument of the constructor. In that + // case, we need an addrspacecast before calling the constructor. + // + // struct StructWithCtor { + // __device__ StructWithCtor() {...} + // }; + // __device__ void foo() { + // __shared__ StructWithCtor s; + // ... + // } + // + // For example, in the above CUDA code, the static local variable s has a + // "shared" address space qualifier, but the constructor of StructWithCtor + // expects "this" in the "generic" address space. + assert(!cir::MissingFeatures::addressSpace()); + + // Create a CIRGenFunction to emit the initializer. While this isn't a true + // function, the handling works the same way. + CIRGenFunction cgf{*this, builder, true}; + llvm::SaveAndRestore savedCGF(curCGF, &cgf); + curCGF->curFn = addr; + + CIRGenFunction::SourceLocRAIIObject fnLoc{cgf, + getLoc(varDecl->getLocation())}; + + assert(!cir::MissingFeatures::astVarDeclInterface()); + + if (!ty->isReferenceType()) { + assert(!cir::MissingFeatures::openMP()); + + bool needsDtor = varDecl->needsDestruction(getASTContext()) == + QualType::DK_cxx_destructor; + // PerformInit, constant store invariant / destroy handled below. + if (performInit) + emitDeclInit(cgf, varDecl, addr); + + if (varDecl->getType().isConstantStorage(getASTContext(), true, !needsDtor)) + errorNYI(varDecl->getSourceRange(), "global with constant storage"); + else + emitDeclDestroy(cgf, varDecl, addr); + return; + } + + errorNYI(varDecl->getSourceRange(), "global with reference type"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index ae922599809b8..1dee77425c30d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -114,6 +114,9 @@ class CIRGenCXXABI { virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0; + virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType ty) = 0; + /// Get the type of the implicit "this" parameter used by a method. May return /// zero if no specific type is applicable, e.g. if the ABI expects the "this" /// parameter to point to some artificial offset in a complete object due to diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h index 81cbb854f3b7d..52d541f2b09b5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.h +++ b/clang/lib/CIR/CodeGen/CIRGenCall.h @@ -256,6 +256,7 @@ class ReturnValueSlot { ReturnValueSlot() = default; ReturnValueSlot(Address addr) : addr(addr) {} + bool isNull() const { return !addr.isValid(); } Address getValue() const { return addr; } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 18e62f0213dd6..9d12a13dd79c0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -778,6 +778,86 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) { s->getStmtClassName()); } +void CIRGenFunction::emitForwardingCallToLambda( + const CXXMethodDecl *callOperator, CallArgList &callArgs) { + // Get the address of the call operator. + const CIRGenFunctionInfo &calleeFnInfo = + cgm.getTypes().arrangeCXXMethodDeclaration(callOperator); + cir::FuncOp calleePtr = cgm.getAddrOfFunction( + GlobalDecl(callOperator), cgm.getTypes().getFunctionType(calleeFnInfo)); + + // Prepare the return slot. + const FunctionProtoType *fpt = + callOperator->getType()->castAs(); + QualType resultType = fpt->getReturnType(); + ReturnValueSlot returnSlot; + + // We don't need to separately arrange the call arguments because + // the call can't be variadic anyway --- it's impossible to forward + // variadic arguments. + + // Now emit our call. + CIRGenCallee callee = + CIRGenCallee::forDirect(calleePtr, GlobalDecl(callOperator)); + RValue rv = emitCall(calleeFnInfo, callee, returnSlot, callArgs); + + // If necessary, copy the returned value into the slot. + if (!resultType->isVoidType() && returnSlot.isNull()) { + if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) + cgm.errorNYI(callOperator->getSourceRange(), + "emitForwardingCallToLambda: ObjCAutoRefCount"); + emitReturnOfRValue(*currSrcLoc, rv, resultType); + } else { + cgm.errorNYI(callOperator->getSourceRange(), + "emitForwardingCallToLambda: return slot is not null"); + } +} + +void CIRGenFunction::emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md) { + const CXXRecordDecl *lambda = md->getParent(); + + // Start building arguments for forwarding call + CallArgList callArgs; + + QualType lambdaType = getContext().getCanonicalTagType(lambda); + QualType thisType = getContext().getPointerType(lambdaType); + Address thisPtr = + createMemTemp(lambdaType, getLoc(md->getSourceRange()), "unused.capture"); + callArgs.add(RValue::get(thisPtr.getPointer()), thisType); + + // Add the rest of the parameters. + for (auto *param : md->parameters()) + emitDelegateCallArg(callArgs, param, param->getBeginLoc()); + + const CXXMethodDecl *callOp = lambda->getLambdaCallOperator(); + // For a generic lambda, find the corresponding call operator specialization + // to which the call to the static-invoker shall be forwarded. + if (lambda->isGenericLambda()) { + assert(md->isFunctionTemplateSpecialization()); + const TemplateArgumentList *tal = md->getTemplateSpecializationArgs(); + FunctionTemplateDecl *callOpTemplate = + callOp->getDescribedFunctionTemplate(); + void *InsertPos = nullptr; + FunctionDecl *correspondingCallOpSpecialization = + callOpTemplate->findSpecialization(tal->asArray(), InsertPos); + assert(correspondingCallOpSpecialization); + callOp = cast(correspondingCallOpSpecialization); + } + emitForwardingCallToLambda(callOp, callArgs); +} + +void CIRGenFunction::emitLambdaStaticInvokeBody(const CXXMethodDecl *md) { + if (md->isVariadic()) { + // Codgen for LLVM doesn't emit code for this as well, it says: + // FIXME: Making this work correctly is nasty because it requires either + // cloning the body of the call operator or making the call operator + // forward. + cgm.errorNYI(md->getSourceRange(), "emitLambdaStaticInvokeBody: variadic"); + } + + emitLambdaDelegatingInvokeBody(md); +} + void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr, QualType type) { const auto *record = type->castAsCXXRecordDecl(); @@ -871,28 +951,37 @@ Address CIRGenFunction::getAddressOfBaseClass( bool nullCheckValue, SourceLocation loc) { assert(!path.empty() && "Base path should not be empty!"); + CastExpr::path_const_iterator start = path.begin(); + const CXXRecordDecl *vBase = nullptr; + if ((*path.begin())->isVirtual()) { - // The implementation here is actually complete, but let's flag this - // as an error until the rest of the virtual base class support is in place. - cgm.errorNYI(loc, "getAddrOfBaseClass: virtual base"); - return Address::invalid(); + vBase = (*start)->getType()->castAsCXXRecordDecl(); + ++start; } // Compute the static offset of the ultimate destination within its // allocating subobject (the virtual base, if there is one, or else // the "complete" object that we see). - CharUnits nonVirtualOffset = - cgm.computeNonVirtualBaseClassOffset(derived, path); + CharUnits nonVirtualOffset = cgm.computeNonVirtualBaseClassOffset( + vBase ? vBase : derived, {start, path.end()}); + + // If there's a virtual step, we can sometimes "devirtualize" it. + // For now, that's limited to when the derived type is final. + // TODO: "devirtualize" this for accesses to known-complete objects. + if (vBase && derived->hasAttr()) { + const ASTRecordLayout &layout = getContext().getASTRecordLayout(derived); + CharUnits vBaseOffset = layout.getVBaseClassOffset(vBase); + nonVirtualOffset += vBaseOffset; + vBase = nullptr; // we no longer have a virtual step + } // Get the base pointer type. mlir::Type baseValueTy = convertType((path.end()[-1])->getType()); assert(!cir::MissingFeatures::addressSpace()); - // The if statement here is redundant now, but it will be needed when we add - // support for virtual base classes. // If there is no virtual base, use cir.base_class_addr. It takes care of // the adjustment and the null pointer check. - if (nonVirtualOffset.isZero()) { + if (nonVirtualOffset.isZero() && !vBase) { assert(!cir::MissingFeatures::sanitizers()); return builder.createBaseClassAddr(getLoc(loc), value, baseValueTy, 0, /*assumeNotNull=*/true); @@ -900,10 +989,17 @@ Address CIRGenFunction::getAddressOfBaseClass( assert(!cir::MissingFeatures::sanitizers()); - // Apply the offset - value = builder.createBaseClassAddr(getLoc(loc), value, baseValueTy, - nonVirtualOffset.getQuantity(), - /*assumeNotNull=*/true); + // Compute the virtual offset. + mlir::Value virtualOffset = nullptr; + if (vBase) { + virtualOffset = cgm.getCXXABI().getVirtualBaseClassOffset( + getLoc(loc), *this, value, derived, vBase); + } + + // Apply both offsets. + value = applyNonVirtualAndVirtualOffset( + getLoc(loc), *this, value, nonVirtualOffset, virtualOffset, derived, + vBase, baseValueTy, not nullCheckValue); // Cast to the destination type. value = value.withElementType(builder, baseValueTy); diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 10b8255623763..563a753ab4efd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -35,8 +35,8 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, getContext().getLangOpts().ElideConstructors && d.isNRVOVariable(); CIRGenFunction::AutoVarEmission emission(d); - emission.IsEscapingByRef = d.isEscapingByref(); - if (emission.IsEscapingByRef) + emission.isEscapingByRef = d.isEscapingByref(); + if (emission.isEscapingByRef) cgm.errorNYI(d.getSourceRange(), "emitAutoVarDecl: decl escaping by reference"); @@ -78,7 +78,7 @@ CIRGenFunction::emitAutoVarAlloca(const VarDecl &d, alignment); } - emission.Addr = address; + emission.addr = address; setAddrOfLocalVar(&d, address); return emission; @@ -101,13 +101,13 @@ bool CIRGenFunction::isTrivialInitializer(const Expr *init) { void CIRGenFunction::emitAutoVarInit( const CIRGenFunction::AutoVarEmission &emission) { - assert(emission.Variable && "emission was not valid!"); + assert(emission.variable && "emission was not valid!"); // If this was emitted as a global constant, we're done. if (emission.wasEmittedAsGlobal()) return; - const VarDecl &d = *emission.Variable; + const VarDecl &d = *emission.variable; QualType type = d.getType(); @@ -124,7 +124,7 @@ void CIRGenFunction::emitAutoVarInit( return; } - const Address addr = emission.Addr; + const Address addr = emission.addr; // Check whether this is a byref variable that's potentially // captured and moved by its own initializer. If so, we'll need to @@ -153,7 +153,7 @@ void CIRGenFunction::emitAutoVarInit( } mlir::Attribute constant; - if (emission.IsConstantAggregate || + if (emission.isConstantAggregate || d.mightBeUsableInConstantExpressions(getContext())) { // FIXME: Differently from LLVM we try not to emit / lower too much // here for CIR since we are interested in seeing the ctor in some @@ -196,7 +196,7 @@ void CIRGenFunction::emitAutoVarInit( // FIXME(cir): migrate most of this file to use mlir::TypedAttr directly. auto typedConstant = mlir::dyn_cast(constant); assert(typedConstant && "expected typed attribute"); - if (!emission.IsConstantAggregate) { + if (!emission.isConstantAggregate) { // For simple scalar/complex initialization, store the value directly. LValue lv = makeAddrLValue(addr, type); assert(init && "expected initializer"); @@ -209,7 +209,7 @@ void CIRGenFunction::emitAutoVarInit( void CIRGenFunction::emitAutoVarCleanups( const CIRGenFunction::AutoVarEmission &emission) { - const VarDecl &d = *emission.Variable; + const VarDecl &d = *emission.variable; // Check the type for a cleanup. if (QualType::DestructionKind dtorKind = d.needsDestruction(getContext())) @@ -821,7 +821,7 @@ void CIRGenFunction::emitAutoVarTypeCleanup( // original stack object, not the possibly forwarded object. Address addr = emission.getObjectAddress(*this); - const VarDecl *var = emission.Variable; + const VarDecl *var = emission.variable; QualType type = var->getType(); CleanupKind cleanupKind = NormalAndEHCleanup; @@ -834,7 +834,7 @@ void CIRGenFunction::emitAutoVarTypeCleanup( case QualType::DK_cxx_destructor: // If there's an NRVO flag on the emission, we need a different // cleanup. - if (emission.NRVOFlag) { + if (emission.nrvoFlag) { cgm.errorNYI(var->getSourceRange(), "emitAutoVarTypeCleanup: NRVO"); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp new file mode 100644 index 0000000000000..d1efed80aaf0e --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -0,0 +1,28 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with code generation of C++ declarations +// +//===----------------------------------------------------------------------===// + +#include "CIRGenModule.h" +#include "clang/AST/Attr.h" +#include "clang/Basic/LangOptions.h" + +using namespace clang; +using namespace clang::CIRGen; + +void CIRGenModule::emitCXXGlobalVarDeclInitFunc(const VarDecl *vd, + cir::GlobalOp addr, + bool performInit) { + assert(!cir::MissingFeatures::cudaSupport()); + + assert(!cir::MissingFeatures::deferredCXXGlobalInit()); + + emitCXXGlobalVarDeclInit(vd, addr, performInit); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 62769e952e45d..fa68ad931ba74 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -913,8 +913,7 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) { assert(e->isPrefix() && "Prefix operator in unexpected state!"); if (e->getType()->isAnyComplexType()) { - cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec"); - lv = LValue(); + emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true); } else { emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true); } @@ -1916,8 +1915,7 @@ RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, LValue lvalue = makeAddrLValue(addr, type, AlignmentSource::Decl); switch (getEvaluationKind(type)) { case cir::TEK_Complex: - cgm.errorNYI(loc, "convertTempToRValue: complex type"); - return RValue::get(nullptr); + return RValue::getComplex(emitLoadOfComplex(lvalue, loc)); case cir::TEK_Aggregate: cgm.errorNYI(loc, "convertTempToRValue: aggregate type"); return RValue::get(nullptr); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp index 5615960ea5247..1e987f3bedc7e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp @@ -131,12 +131,9 @@ class AggExprEmitter : public StmtVisitor { std::string("AggExprEmitter::VisitStmt: ") + s->getStmtClassName()); } - void VisitParenExpr(ParenExpr *pe) { - cgf.cgm.errorNYI(pe->getSourceRange(), "AggExprEmitter: VisitParenExpr"); - } + void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); } void VisitGenericSelectionExpr(GenericSelectionExpr *ge) { - cgf.cgm.errorNYI(ge->getSourceRange(), - "AggExprEmitter: VisitGenericSelectionExpr"); + Visit(ge->getResultExpr()); } void VisitCoawaitExpr(CoawaitExpr *e) { cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr"); @@ -213,9 +210,7 @@ class AggExprEmitter : public StmtVisitor { cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAbstractConditionalOperator"); } - void VisitChooseExpr(const ChooseExpr *e) { - cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitChooseExpr"); - } + void VisitChooseExpr(const ChooseExpr *e) { Visit(e->getChosenSubExpr()); } void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) { cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXParenListInitExpr"); @@ -500,7 +495,7 @@ void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) { switch (cgf.getEvaluationKind(type)) { case cir::TEK_Complex: - cgf.cgm.errorNYI("emitInitializationToLValue TEK_Complex"); + cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true); break; case cir::TEK_Aggregate: cgf.emitAggExpr(e, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 3db34ccb1748d..7989ad2e30f17 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -238,8 +238,8 @@ static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init, cgf.makeAddrLValue(newPtr, allocType), false); return; case cir::TEK_Complex: - cgf.cgm.errorNYI(init->getSourceRange(), - "storeAnyExprIntoOneUnit: complex"); + cgf.emitComplexExprIntoLValue(init, cgf.makeAddrLValue(newPtr, allocType), + /*isInit*/ true); return; case cir::TEK_Aggregate: { assert(!cir::MissingFeatures::aggValueSlotGC()); @@ -332,6 +332,117 @@ static RValue emitNewDeleteCall(CIRGenFunction &cgf, return rv; } +namespace { +/// Calls the given 'operator delete' on a single object. +struct CallObjectDelete final : EHScopeStack::Cleanup { + mlir::Value ptr; + const FunctionDecl *operatorDelete; + QualType elementType; + + CallObjectDelete(mlir::Value ptr, const FunctionDecl *operatorDelete, + QualType elementType) + : ptr(ptr), operatorDelete(operatorDelete), elementType(elementType) {} + + void emit(CIRGenFunction &cgf) override { + cgf.emitDeleteCall(operatorDelete, ptr, elementType); + } + + // This is a placeholder until EHCleanupScope is implemented. + size_t getSize() const override { + assert(!cir::MissingFeatures::ehCleanupScope()); + return sizeof(CallObjectDelete); + } +}; +} // namespace + +/// Emit the code for deleting a single object. +static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, + Address ptr, QualType elementType) { + // C++11 [expr.delete]p3: + // If the static type of the object to be deleted is different from its + // dynamic type, the static type shall be a base class of the dynamic type + // of the object to be deleted and the static type shall have a virtual + // destructor or the behavior is undefined. + assert(!cir::MissingFeatures::emitTypeCheck()); + + const FunctionDecl *operatorDelete = de->getOperatorDelete(); + assert(!operatorDelete->isDestroyingOperatorDelete()); + + // Find the destructor for the type, if applicable. If the + // destructor is virtual, we'll just emit the vcall and return. + const CXXDestructorDecl *dtor = nullptr; + if (const auto *rd = elementType->getAsCXXRecordDecl()) { + if (rd->hasDefinition() && !rd->hasTrivialDestructor()) { + dtor = rd->getDestructor(); + + if (dtor->isVirtual()) { + cgf.cgm.errorNYI(de->getSourceRange(), + "emitObjectDelete: virtual destructor"); + } + } + } + + // Make sure that we call delete even if the dtor throws. + // This doesn't have to a conditional cleanup because we're going + // to pop it off in a second. + cgf.ehStack.pushCleanup( + NormalAndEHCleanup, ptr.getPointer(), operatorDelete, elementType); + + if (dtor) { + cgf.emitCXXDestructorCall(dtor, Dtor_Complete, + /*ForVirtualBase=*/false, + /*Delegating=*/false, ptr, elementType); + } else if (elementType.getObjCLifetime()) { + assert(!cir::MissingFeatures::objCLifetime()); + cgf.cgm.errorNYI(de->getSourceRange(), "emitObjectDelete: ObjCLifetime"); + } + + // In traditional LLVM codegen null checks are emitted to save a delete call. + // In CIR we optimize for size by default, the null check should be added into + // this function callers. + assert(!cir::MissingFeatures::emitNullCheckForDeleteCalls()); + + cgf.popCleanupBlock(); +} + +void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *e) { + const Expr *arg = e->getArgument(); + Address ptr = emitPointerWithAlignment(arg); + + // Null check the pointer. + // + // We could avoid this null check if we can determine that the object + // destruction is trivial and doesn't require an array cookie; we can + // unconditionally perform the operator delete call in that case. For now, we + // assume that deleted pointers are null rarely enough that it's better to + // keep the branch. This might be worth revisiting for a -O0 code size win. + // + // CIR note: emit the code size friendly by default for now, such as mentioned + // in `emitObjectDelete`. + assert(!cir::MissingFeatures::emitNullCheckForDeleteCalls()); + QualType deleteTy = e->getDestroyedType(); + + // A destroying operator delete overrides the entire operation of the + // delete expression. + if (e->getOperatorDelete()->isDestroyingOperatorDelete()) { + cgm.errorNYI(e->getSourceRange(), + "emitCXXDeleteExpr: destroying operator delete"); + return; + } + + // We might be deleting a pointer to array. + deleteTy = getContext().getBaseElementType(deleteTy); + ptr = ptr.withElementType(builder, convertTypeForMem(deleteTy)); + + if (e->isArrayForm()) { + assert(!cir::MissingFeatures::deleteArray()); + cgm.errorNYI(e->getSourceRange(), "emitCXXDeleteExpr: array delete"); + return; + } else { + emitObjectDelete(*this, e, ptr, deleteTy); + } +} + mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) { // The element type being allocated. QualType allocType = getContext().getBaseElementType(e->getAllocatedType()); @@ -443,3 +554,53 @@ mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) { allocSizeWithoutCookie); return result.getPointer(); } + +void CIRGenFunction::emitDeleteCall(const FunctionDecl *deleteFD, + mlir::Value ptr, QualType deleteTy) { + assert(!cir::MissingFeatures::deleteArray()); + + const auto *deleteFTy = deleteFD->getType()->castAs(); + CallArgList deleteArgs; + + UsualDeleteParams params = deleteFD->getUsualDeleteParams(); + auto paramTypeIt = deleteFTy->param_type_begin(); + + // Pass std::type_identity tag if present + if (isTypeAwareAllocation(params.TypeAwareDelete)) + cgm.errorNYI(deleteFD->getSourceRange(), + "emitDeleteCall: type aware delete"); + + // Pass the pointer itself. + QualType argTy = *paramTypeIt++; + mlir::Value deletePtr = + builder.createBitcast(ptr.getLoc(), ptr, convertType(argTy)); + deleteArgs.add(RValue::get(deletePtr), argTy); + + // Pass the std::destroying_delete tag if present. + if (params.DestroyingDelete) + cgm.errorNYI(deleteFD->getSourceRange(), + "emitDeleteCall: destroying delete"); + + // Pass the size if the delete function has a size_t parameter. + if (params.Size) { + QualType sizeType = *paramTypeIt++; + CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy); + assert(mlir::isa(convertType(sizeType)) && + "expected cir::IntType"); + cir::ConstantOp size = builder.getConstInt( + *currSrcLoc, convertType(sizeType), deleteTypeSize.getQuantity()); + + deleteArgs.add(RValue::get(size), sizeType); + } + + // Pass the alignment if the delete function has an align_val_t parameter. + if (isAlignedAllocation(params.Alignment)) + cgm.errorNYI(deleteFD->getSourceRange(), + "emitDeleteCall: aligned allocation"); + + assert(paramTypeIt == deleteFTy->param_type_end() && + "unknown parameter to usual delete function"); + + // Emit the call to delete. + emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index b7ae55e72bdfc..fcde4875393cd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -193,8 +193,7 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value VisitUnaryNot(const UnaryOperator *e); // LNot,Real,Imag never return complex. mlir::Value VisitUnaryExtension(const UnaryOperator *e) { - cgf.cgm.errorNYI(e->getExprLoc(), "ComplexExprEmitter VisitUnaryExtension"); - return {}; + return Visit(e->getSubExpr()); } mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) { cgf.cgm.errorNYI(dae->getExprLoc(), @@ -317,8 +316,7 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value VisitVAArgExpr(VAArgExpr *e); mlir::Value VisitAtomicExpr(AtomicExpr *e) { - cgf.cgm.errorNYI(e->getExprLoc(), "ComplexExprEmitter VisitAtomicExpr"); - return {}; + return cgf.emitAtomicExpr(e).getComplexValue(); } mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) { diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp index f660544d13cfa..e20a4fc3c63aa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConstant.cpp @@ -775,7 +775,9 @@ class ConstExprEmitter } mlir::Attribute VisitCXXConstructExpr(CXXConstructExpr *e, QualType ty) { - cgm.errorNYI(e->getBeginLoc(), "ConstExprEmitter::VisitCXXConstructExpr"); + if (!e->getConstructor()->isTrivial()) + return nullptr; + cgm.errorNYI(e->getBeginLoc(), "trivial constructor const handling"); return {}; } @@ -1464,25 +1466,24 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &value, case APValue::ComplexInt: case APValue::ComplexFloat: { mlir::Type desiredType = cgm.convertType(destType); - cir::ComplexType complexType = - mlir::dyn_cast(desiredType); + auto complexType = mlir::dyn_cast(desiredType); mlir::Type complexElemTy = complexType.getElementType(); if (isa(complexElemTy)) { - llvm::APSInt real = value.getComplexIntReal(); - llvm::APSInt imag = value.getComplexIntImag(); - return builder.getAttr( - complexType, cir::IntAttr::get(complexElemTy, real), - cir::IntAttr::get(complexElemTy, imag)); + const llvm::APSInt &real = value.getComplexIntReal(); + const llvm::APSInt &imag = value.getComplexIntImag(); + return cir::ConstComplexAttr::get(builder.getContext(), complexType, + cir::IntAttr::get(complexElemTy, real), + cir::IntAttr::get(complexElemTy, imag)); } assert(isa(complexElemTy) && "expected floating-point type"); - llvm::APFloat real = value.getComplexFloatReal(); - llvm::APFloat imag = value.getComplexFloatImag(); - return builder.getAttr( - complexType, cir::FPAttr::get(complexElemTy, real), - cir::FPAttr::get(complexElemTy, imag)); + const llvm::APFloat &real = value.getComplexFloatReal(); + const llvm::APFloat &imag = value.getComplexFloatImag(); + return cir::ConstComplexAttr::get(builder.getContext(), complexType, + cir::FPAttr::get(complexElemTy, real), + cir::FPAttr::get(complexElemTy, imag)); } case APValue::FixedPoint: case APValue::AddrLabelDiff: diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 276adcfc5c6be..500007f6f241b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -676,6 +676,10 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitRealImag(const UnaryOperator *e, QualType promotionType = QualType()); + mlir::Value VisitUnaryExtension(const UnaryOperator *e) { + return Visit(e->getSubExpr()); + } + mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) { CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die); return Visit(die->getExpr()); @@ -687,6 +691,10 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) { return cgf.emitCXXNewExpr(e); } + mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) { + cgf.emitCXXDeleteExpr(e); + return {}; + } mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) { cgf.emitCXXThrowExpr(e); @@ -1274,13 +1282,8 @@ mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e, } else if (const auto *uo = dyn_cast(e)) { switch (uo->getOpcode()) { case UO_Imag: - cgf.cgm.errorNYI(e->getSourceRange(), - "ScalarExprEmitter::emitPromoted unary imag"); - return {}; case UO_Real: - cgf.cgm.errorNYI(e->getSourceRange(), - "ScalarExprEmitter::emitPromoted unary real"); - return {}; + return VisitRealImag(uo, promotionType); case UO_Minus: return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType); case UO_Plus: @@ -2087,9 +2090,13 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) { if (e->getType()->isVectorType() && e->getType()->castAs()->getVectorKind() == VectorKind::Generic) { - assert(!cir::MissingFeatures::vectorType()); - cgf.cgm.errorNYI(e->getSourceRange(), "vector logical not"); - return {}; + mlir::Value oper = Visit(e->getSubExpr()); + mlir::Location loc = cgf.getLoc(e->getExprLoc()); + auto operVecTy = mlir::cast(oper.getType()); + auto exprVecTy = mlir::cast(cgf.convertType(e->getType())); + mlir::Value zeroVec = builder.getNullValue(operVecTy, loc); + return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq, + oper, zeroVec); } // Compare operand to zero. @@ -2125,33 +2132,43 @@ mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e, "Invalid UnaryOp kind for ComplexType Real or Imag"); Expr *op = e->getSubExpr(); + mlir::Location loc = cgf.getLoc(e->getExprLoc()); if (op->getType()->isAnyComplexType()) { // If it's an l-value, load through the appropriate subobject l-value. // Note that we have to ask `e` because `op` might be an l-value that - // this won't work for, e.g. an Obj-C property. - if (e->isGLValue()) { - mlir::Location loc = cgf.getLoc(e->getExprLoc()); - mlir::Value complex = cgf.emitComplexExpr(op); - if (!promotionTy.isNull()) { - complex = cgf.emitPromotedValue(complex, promotionTy); - } - - return e->getOpcode() == clang::UO_Real - ? builder.createComplexReal(loc, complex) - : builder.createComplexImag(loc, complex); + // this won't work for, e.g. an Obj-C property + mlir::Value complex = cgf.emitComplexExpr(op); + if (e->isGLValue() && !promotionTy.isNull()) { + promotionTy = promotionTy->isAnyComplexType() + ? promotionTy + : cgf.getContext().getComplexType(promotionTy); + complex = cgf.emitPromotedValue(complex, promotionTy); } - // Otherwise, calculate and project. - cgf.cgm.errorNYI(e->getSourceRange(), - "VisitRealImag calculate and project"); - return {}; + return e->getOpcode() == clang::UO_Real + ? builder.createComplexReal(loc, complex) + : builder.createComplexImag(loc, complex); + } + + if (e->getOpcode() == UO_Real) { + mlir::Value operand = promotionTy.isNull() + ? Visit(op) + : cgf.emitPromotedScalarExpr(op, promotionTy); + return builder.createComplexReal(loc, operand); } - // __real or __imag on a scalar returns zero. Emit the subexpr to ensure side + // __imag on a scalar returns zero. Emit the subexpr to ensure side // effects are evaluated, but not the actual value. - cgf.cgm.errorNYI(e->getSourceRange(), - "VisitRealImag __real or __imag on a scalar"); - return {}; + if (op->isGLValue()) + cgf.emitLValue(op); + else if (!promotionTy.isNull()) + cgf.emitPromotedScalarExpr(op, promotionTy); + else + cgf.emitScalarExpr(op); + + mlir::Type valueTy = + cgf.convertType(promotionTy.isNull() ? e->getType() : promotionTy); + return builder.getNullValue(valueTy, loc); } /// Return the size or alignment of the type of argument of the sizeof @@ -2355,4 +2372,4 @@ mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e, bool isPre) { return ScalarExprEmitter(*this, builder) .emitScalarPrePostIncDec(e, lv, kind, isPre); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index f43a0e60c9f5b..b26b4f2500579 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -342,10 +342,12 @@ void CIRGenFunction::LexicalScope::cleanup() { cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) { CIRGenBuilderTy &builder = cgf.getBuilder(); - if (!cgf.curFn.getFunctionType().hasVoidReturn()) { + auto fn = dyn_cast(cgf.curFn); + assert(fn && "emitReturn from non-function"); + if (!fn.getFunctionType().hasVoidReturn()) { // Load the value from `__retval` and return it via the `cir.return` op. auto value = builder.create( - loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca); + loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca); return builder.create(loc, llvm::ArrayRef(value.getResult())); } @@ -459,7 +461,9 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType, const auto *md = cast(d); if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) { // We're in a lambda. - curFn.setLambda(true); + auto fn = dyn_cast(curFn); + assert(fn && "lambda in non-function region"); + fn.setLambda(true); // Figure out the captures. md->getParent()->getCaptureFields(lambdaCaptureFields, @@ -577,7 +581,10 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn, getCIRGenModule().errorNYI(bodyRange, "CUDA kernel"); } else if (isa(funcDecl) && cast(funcDecl)->isLambdaStaticInvoker()) { - getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker"); + // The lambda static invoker function is special, because it forwards or + // clones the body of the function call operator (but is actually + // static). + emitLambdaStaticInvokeBody(cast(funcDecl)); } else if (funcDecl->isDefaulted() && isa(funcDecl) && (cast(funcDecl)->isCopyAssignmentOperator() || cast(funcDecl)->isMoveAssignmentOperator())) { @@ -829,6 +836,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { return emitCallExprLValue(cast(e)); case Expr::ParenExprClass: return emitLValue(cast(e)->getSubExpr()); + case Expr::GenericSelectionExprClass: + return emitLValue(cast(e)->getResultExpr()); case Expr::DeclRefExprClass: return emitDeclRefLValue(cast(e)); case Expr::CStyleCastExprClass: @@ -838,6 +847,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { return emitCastLValue(cast(e)); case Expr::MaterializeTemporaryExprClass: return emitMaterializeTemporaryExpr(cast(e)); + case Expr::ChooseExprClass: + return emitLValue(cast(e)->getChosenSubExpr()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a0c571a544322..cb7cf983006e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -98,8 +98,10 @@ class CIRGenFunction : public CIRGenTypeCache { /// This is the inner-most code context, which includes blocks. const clang::Decl *curCodeDecl = nullptr; - /// The function for which code is currently being generated. - cir::FuncOp curFn; + /// The current function or global initializer that is generated code for. + /// This is usually a cir::FuncOp, but it can also be a cir::GlobalOp for + /// global initializers. + mlir::Operation *curFn = nullptr; using DeclMapTy = llvm::DenseMap; /// This keeps track of the CIR allocas or globals for local C @@ -116,7 +118,11 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenModule &getCIRGenModule() { return cgm; } const CIRGenModule &getCIRGenModule() const { return cgm; } - mlir::Block *getCurFunctionEntryBlock() { return &curFn.getRegion().front(); } + mlir::Block *getCurFunctionEntryBlock() { + // We currently assume this isn't called for a global initializer. + auto fn = mlir::cast(curFn); + return &fn.getRegion().front(); + } /// Sanitizers enabled for this function. clang::SanitizerSet sanOpts; @@ -473,55 +479,55 @@ class CIRGenFunction : public CIRGenTypeCache { ConstantEmission tryEmitAsConstant(const MemberExpr *me); struct AutoVarEmission { - const clang::VarDecl *Variable; + const clang::VarDecl *variable; /// The address of the alloca for languages with explicit address space /// (e.g. OpenCL) or alloca casted to generic pointer for address space /// agnostic languages (e.g. C++). Invalid if the variable was emitted /// as a global constant. - Address Addr; + Address addr; /// True if the variable is of aggregate type and has a constant /// initializer. - bool IsConstantAggregate = false; + bool isConstantAggregate = false; /// True if the variable is a __block variable that is captured by an /// escaping block. - bool IsEscapingByRef = false; + bool isEscapingByRef = false; /// True if the variable was emitted as an offload recipe, and thus doesn't /// have the same sort of alloca initialization. - bool EmittedAsOffload = false; + bool emittedAsOffload = false; - mlir::Value NRVOFlag{}; + mlir::Value nrvoFlag{}; struct Invalid {}; - AutoVarEmission(Invalid) : Variable(nullptr), Addr(Address::invalid()) {} + AutoVarEmission(Invalid) : variable(nullptr), addr(Address::invalid()) {} AutoVarEmission(const clang::VarDecl &variable) - : Variable(&variable), Addr(Address::invalid()) {} + : variable(&variable), addr(Address::invalid()) {} static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); } - bool wasEmittedAsGlobal() const { return !Addr.isValid(); } + bool wasEmittedAsGlobal() const { return !addr.isValid(); } - bool wasEmittedAsOffloadClause() const { return EmittedAsOffload; } + bool wasEmittedAsOffloadClause() const { return emittedAsOffload; } /// Returns the raw, allocated address, which is not necessarily /// the address of the object itself. It is casted to default /// address space for address space agnostic languages. - Address getAllocatedAddress() const { return Addr; } + Address getAllocatedAddress() const { return addr; } // Changes the stored address for the emission. This function should only // be used in extreme cases, and isn't required to model normal AST // initialization/variables. - void setAllocatedAddress(Address A) { Addr = A; } + void setAllocatedAddress(Address a) { addr = a; } /// Returns the address of the object within this declaration. /// Note that this does not chase the forwarding pointer for /// __block decls. Address getObjectAddress(CIRGenFunction &cgf) const { - if (!IsEscapingByRef) - return Addr; + if (!isEscapingByRef) + return addr; assert(!cir::MissingFeatures::opAllocaEscapeByReference()); return Address::invalid(); @@ -1197,6 +1203,8 @@ class CIRGenFunction : public CIRGenTypeCache { bool delegating, Address thisAddr, CallArgList &args, clang::SourceLocation loc); + void emitCXXDeleteExpr(const CXXDeleteExpr *e); + void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy); @@ -1244,6 +1252,9 @@ class CIRGenFunction : public CIRGenTypeCache { void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *ctor, const FunctionArgList &args); + void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, + QualType deleteTy); + mlir::LogicalResult emitDoStmt(const clang::DoStmt &s); /// Emit an expression as an initializer for an object (variable, field, etc.) @@ -1274,6 +1285,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType); + void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty); + /// Emit the computation of the specified expression of scalar type. mlir::Value emitScalarExpr(const clang::Expr *e); @@ -1293,6 +1306,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult emitForStmt(const clang::ForStmt &s); + void emitForwardingCallToLambda(const CXXMethodDecl *lambdaCallOperator, + CallArgList &callArgs); + /// Emit the computation of the specified expression of complex type, /// returning the result. mlir::Value emitComplexExpr(const Expr *e); @@ -1355,6 +1371,9 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::LogicalResult emitLabel(const clang::LabelDecl &d); mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s); + void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *md); + void emitLambdaStaticInvokeBody(const CXXMethodDecl *md); + mlir::LogicalResult emitIfStmt(const clang::IfStmt &s); /// Emit code to compute the specified expression, @@ -1708,6 +1727,10 @@ class CIRGenFunction : public CIRGenTypeCache { ~ActiveOpenACCLoopRAII() { cgf.activeLoopOp = oldLoopOp; } }; + // Keep track of the last place we inserted a 'recipe' so that we can insert + // the next one in lexical order. + mlir::OpBuilder::InsertPoint lastRecipeLocation; + public: // Helper type used to store the list of important information for a 'data' // clause variable, or a 'cache' variable reference. @@ -1715,9 +1738,17 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Location beginLoc; mlir::Value varValue; std::string name; + // The type of the original variable reference: that is, after 'bounds' have + // removed pointers/array types/etc. So in the case of int arr[5], and a + // private(arr[1]), 'origType' is 'int', but 'baseType' is 'int[5]'. + QualType origType; QualType baseType; llvm::SmallVector bounds; + // The list of types that we found when going through the bounds, which we + // can use to properly set the alloca section. + llvm::SmallVector boundTypes; }; + // Gets the collection of info required to lower and OpenACC clause or cache // construct variable reference. OpenACCDataOperandInfo getOpenACCDataOperandInfo(const Expr *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 0bf6cf556787c..debea8af66b50 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -103,6 +103,9 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { const CXXRecordDecl *rd) override; void emitVirtualInheritanceTables(const CXXRecordDecl *rd) override; + mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, + QualType ty) override; + bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override { return true; } @@ -111,6 +114,34 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf, Address thisAddr, const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) override; + + /**************************** RTTI Uniqueness ******************************/ +protected: + /// Returns true if the ABI requires RTTI type_info objects to be unique + /// across a program. + virtual bool shouldRTTIBeUnique() const { return true; } + +public: + /// What sort of unique-RTTI behavior should we use? + enum RTTIUniquenessKind { + /// We are guaranteeing, or need to guarantee, that the RTTI string + /// is unique. + RUK_Unique, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// can demote to hidden visibility but must use string comparisons. + RUK_NonUniqueHidden, + + /// We are not guaranteeing uniqueness for the RTTI string, so we + /// have to use string comparisons, but we also have to emit it with + /// non-hidden visibility. + RUK_NonUniqueVisible + }; + + /// Return the required visibility status for the given type and linkage in + /// the current ABI. + RTTIUniquenessKind + classifyRTTIUniqueness(QualType canTy, cir::GlobalLinkageKind linkage) const; }; } // namespace @@ -424,6 +455,1038 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( vtables.emitVTTDefinition(vtt, cgm.getVTableLinkage(rd), rd); } +namespace { +class CIRGenItaniumRTTIBuilder { + CIRGenModule &cgm; // Per-module state. + const CIRGenItaniumCXXABI &cxxABI; // Per-module state. + + /// The fields of the RTTI descriptor currently being built. + SmallVector fields; + + // Returns the mangled type name of the given type. + cir::GlobalOp getAddrOfTypeName(mlir::Location loc, QualType ty, + cir::GlobalLinkageKind linkage); + + /// descriptor of the given type. + mlir::Attribute getAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType ty); + + /// Build the vtable pointer for the given type. + void buildVTablePointer(mlir::Location loc, const Type *ty); + + /// Build an abi::__si_class_type_info, used for single inheritance, according + /// to the Itanium C++ ABI, 2.9.5p6b. + void buildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd); + + /// Build an abi::__vmi_class_type_info, used for + /// classes with bases that do not satisfy the abi::__si_class_type_info + /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. + void buildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd); + +public: + CIRGenItaniumRTTIBuilder(const CIRGenItaniumCXXABI &abi, CIRGenModule &cgm) + : cgm(cgm), cxxABI(abi) {} + + /// Build the RTTI type info struct for the given type, or + /// link to an existing RTTI descriptor if one already exists. + mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty); + + /// Build the RTTI type info struct for the given type. + mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty, + cir::GlobalLinkageKind linkage, + mlir::SymbolTable::Visibility visibility); +}; +} // namespace + +// TODO(cir): Will be removed after sharing them with the classical codegen +namespace { + +// Pointer type info flags. +enum { + /// PTI_Const - Type has const qualifier. + PTI_Const = 0x1, + + /// PTI_Volatile - Type has volatile qualifier. + PTI_Volatile = 0x2, + + /// PTI_Restrict - Type has restrict qualifier. + PTI_Restrict = 0x4, + + /// PTI_Incomplete - Type is incomplete. + PTI_Incomplete = 0x8, + + /// PTI_ContainingClassIncomplete - Containing class is incomplete. + /// (in pointer to member). + PTI_ContainingClassIncomplete = 0x10, + + /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). + // PTI_TransactionSafe = 0x20, + + /// PTI_Noexcept - Pointee is noexcept function (C++1z). + PTI_Noexcept = 0x40, +}; + +// VMI type info flags. +enum { + /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. + VMI_NonDiamondRepeat = 0x1, + + /// VMI_DiamondShaped - Class is diamond shaped. + VMI_DiamondShaped = 0x2 +}; + +// Base class type info flags. +enum { + /// BCTI_Virtual - Base class is virtual. + BCTI_Virtual = 0x1, + + /// BCTI_Public - Base class is public. + BCTI_Public = 0x2 +}; + +/// Given a builtin type, returns whether the type +/// info for that type is defined in the standard library. +/// TODO(cir): this can unified with LLVM codegen +static bool typeInfoIsInStandardLibrary(const BuiltinType *ty) { + // Itanium C++ ABI 2.9.2: + // Basic type information (e.g. for "int", "bool", etc.) will be kept in + // the run-time support library. Specifically, the run-time support + // library should contain type_info objects for the types X, X* and + // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, + // unsigned char, signed char, short, unsigned short, int, unsigned int, + // long, unsigned long, long long, unsigned long long, float, double, + // long double, char16_t, char32_t, and the IEEE 754r decimal and + // half-precision floating point types. + // + // GCC also emits RTTI for __int128. + // FIXME: We do not emit RTTI information for decimal types here. + + // Types added here must also be added to emitFundamentalRTTIDescriptors. + switch (ty->getKind()) { + case BuiltinType::WasmExternRef: + case BuiltinType::HLSLResource: + llvm_unreachable("NYI"); + case BuiltinType::Void: + case BuiltinType::NullPtr: + case BuiltinType::Bool: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char_U: + case BuiltinType::Char_S: + case BuiltinType::UChar: + case BuiltinType::SChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + case BuiltinType::Half: + case BuiltinType::Float: + case BuiltinType::Double: + case BuiltinType::LongDouble: + case BuiltinType::Float16: + case BuiltinType::Float128: + case BuiltinType::Ibm128: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Int128: + case BuiltinType::UInt128: + return true; + +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id: +#include "clang/Basic/OpenCLExtensionTypes.def" + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: +#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AArch64ACLETypes.def" +#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id: +#include "clang/Basic/PPCTypes.def" +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" + case BuiltinType::ShortAccum: + case BuiltinType::Accum: + case BuiltinType::LongAccum: + case BuiltinType::UShortAccum: + case BuiltinType::UAccum: + case BuiltinType::ULongAccum: + case BuiltinType::ShortFract: + case BuiltinType::Fract: + case BuiltinType::LongFract: + case BuiltinType::UShortFract: + case BuiltinType::UFract: + case BuiltinType::ULongFract: + case BuiltinType::SatShortAccum: + case BuiltinType::SatAccum: + case BuiltinType::SatLongAccum: + case BuiltinType::SatUShortAccum: + case BuiltinType::SatUAccum: + case BuiltinType::SatULongAccum: + case BuiltinType::SatShortFract: + case BuiltinType::SatFract: + case BuiltinType::SatLongFract: + case BuiltinType::SatUShortFract: + case BuiltinType::SatUFract: + case BuiltinType::SatULongFract: + case BuiltinType::BFloat16: + return false; + + case BuiltinType::Dependent: +#define BUILTIN_TYPE(Id, SingletonId) +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("asking for RRTI for a placeholder type!"); + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + llvm_unreachable("FIXME: Objective-C types are unsupported!"); + } + + llvm_unreachable("Invalid BuiltinType Kind!"); +} + +static bool typeInfoIsInStandardLibrary(const PointerType *pointerTy) { + QualType pointeeTy = pointerTy->getPointeeType(); + const auto *builtinTy = dyn_cast(pointeeTy); + if (!builtinTy) + return false; + + // Check the qualifiers. + Qualifiers quals = pointeeTy.getQualifiers(); + quals.removeConst(); + + if (!quals.empty()) + return false; + + return typeInfoIsInStandardLibrary(builtinTy); +} + +/// IsStandardLibraryRTTIDescriptor - Returns whether the type +/// information for the given type exists in the standard library. +static bool isStandardLibraryRttiDescriptor(QualType ty) { + // Type info for builtin types is defined in the standard library. + if (const auto *builtinTy = dyn_cast(ty)) + return typeInfoIsInStandardLibrary(builtinTy); + + // Type info for some pointer types to builtin types is defined in the + // standard library. + if (const auto *pointerTy = dyn_cast(ty)) + return typeInfoIsInStandardLibrary(pointerTy); + + return false; +} + +/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for +/// the given type exists somewhere else, and that we should not emit the type +/// information in this translation unit. Assumes that it is not a +/// standard-library type. +static bool shouldUseExternalRttiDescriptor(CIRGenModule &cgm, QualType ty) { + ASTContext &context = cgm.getASTContext(); + + // If RTTI is disabled, assume it might be disabled in the + // translation unit that defines any potential key function, too. + if (!context.getLangOpts().RTTI) + return false; + + if (const auto *recordTy = dyn_cast(ty)) { + const CXXRecordDecl *rd = + cast(recordTy->getOriginalDecl())->getDefinitionOrSelf(); + if (!rd->hasDefinition()) + return false; + + if (!rd->isDynamicClass()) + return false; + + // FIXME: this may need to be reconsidered if the key function + // changes. + // N.B. We must always emit the RTTI data ourselves if there exists a key + // function. + bool isDLLImport = rd->hasAttr(); + + // Don't import the RTTI but emit it locally. + if (cgm.getTriple().isOSCygMing()) + return false; + + if (cgm.getVTables().isVTableExternal(rd)) { + if (cgm.getTarget().hasPS4DLLImportExport()) + return true; + + return !isDLLImport || cgm.getTriple().isWindowsItaniumEnvironment(); + } + + if (isDLLImport) + return true; + } + + return false; +} + +/// Contains virtual and non-virtual bases seen when traversing a class +/// hierarchy. +struct SeenBases { + llvm::SmallPtrSet nonVirtualBases; + llvm::SmallPtrSet virtualBases; +}; + +/// Compute the value of the flags member in abi::__vmi_class_type_info. +/// +static unsigned computeVmiClassTypeInfoFlags(const CXXBaseSpecifier *base, + SeenBases &bases) { + + unsigned flags = 0; + auto *baseDecl = base->getType()->castAsCXXRecordDecl(); + + if (base->isVirtual()) { + // Mark the virtual base as seen. + if (!bases.virtualBases.insert(baseDecl).second) { + // If this virtual base has been seen before, then the class is diamond + // shaped. + flags |= VMI_DiamondShaped; + } else { + if (bases.nonVirtualBases.count(baseDecl)) + flags |= VMI_NonDiamondRepeat; + } + } else { + // Mark the non-virtual base as seen. + if (!bases.nonVirtualBases.insert(baseDecl).second) { + // If this non-virtual base has been seen before, then the class has non- + // diamond shaped repeated inheritance. + flags |= VMI_NonDiamondRepeat; + } else { + if (bases.virtualBases.count(baseDecl)) + flags |= VMI_NonDiamondRepeat; + } + } + + // Walk all bases. + for (const auto &bs : baseDecl->bases()) + flags |= computeVmiClassTypeInfoFlags(&bs, bases); + + return flags; +} + +static unsigned computeVmiClassTypeInfoFlags(const CXXRecordDecl *rd) { + unsigned flags = 0; + SeenBases bases; + + // Walk all bases. + for (const auto &bs : rd->bases()) + flags |= computeVmiClassTypeInfoFlags(&bs, bases); + + return flags; +} + +// Return whether the given record decl has a "single, +// public, non-virtual base at offset zero (i.e. the derived class is dynamic +// iff the base is)", according to Itanium C++ ABI, 2.95p6b. +// TODO(cir): this can unified with LLVM codegen +static bool canUseSingleInheritance(const CXXRecordDecl *rd) { + // Check the number of bases. + if (rd->getNumBases() != 1) + return false; + + // Get the base. + CXXRecordDecl::base_class_const_iterator base = rd->bases_begin(); + + // Check that the base is not virtual. + if (base->isVirtual()) + return false; + + // Check that the base is public. + if (base->getAccessSpecifier() != AS_public) + return false; + + // Check that the class is dynamic iff the base is. + auto *baseDecl = base->getType()->castAsCXXRecordDecl(); + return baseDecl->isEmpty() || + baseDecl->isDynamicClass() == rd->isDynamicClass(); +} + +/// IsIncompleteClassType - Returns whether the given record type is incomplete. +static bool isIncompleteClassType(const RecordType *recordTy) { + return !recordTy->getOriginalDecl() + ->getDefinitionOrSelf() + ->isCompleteDefinition(); +} + +/// Returns whether the given type contains an +/// incomplete class type. This is true if +/// +/// * The given type is an incomplete class type. +/// * The given type is a pointer type whose pointee type contains an +/// incomplete class type. +/// * The given type is a member pointer type whose class is an incomplete +/// class type. +/// * The given type is a member pointer type whoise pointee type contains an +/// incomplete class type. +/// is an indirect or direct pointer to an incomplete class type. +static bool containsIncompleteClassType(QualType ty) { + if (const auto *recordTy = dyn_cast(ty)) { + if (isIncompleteClassType(recordTy)) + return true; + } + + if (const auto *pointerTy = dyn_cast(ty)) + return containsIncompleteClassType(pointerTy->getPointeeType()); + + if (const auto *memberPointerTy = dyn_cast(ty)) { + // Check if the class type is incomplete. + if (!memberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition()) + return true; + + return containsIncompleteClassType(memberPointerTy->getPointeeType()); + } + + return false; +} + +const char *vTableClassNameForType(const CIRGenModule &cgm, const Type *ty) { + // abi::__class_type_info. + static const char *const classTypeInfo = + "_ZTVN10__cxxabiv117__class_type_infoE"; + // abi::__si_class_type_info. + static const char *const siClassTypeInfo = + "_ZTVN10__cxxabiv120__si_class_type_infoE"; + // abi::__vmi_class_type_info. + static const char *const vmiClassTypeInfo = + "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; + + switch (ty->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + llvm_unreachable("Pipe types shouldn't get here"); + + case Type::ArrayParameter: + llvm_unreachable("Array Parameter types should not get here."); + + case Type::Builtin: + case Type::BitInt: + // GCC treats vector and complex types as fundamental types. + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::Atomic: + // FIXME: GCC treats block pointers as fundamental types?! + case Type::BlockPointer: + cgm.errorNYI("VTableClassNameForType: __fundamental_type_info"); + break; + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + cgm.errorNYI("VTableClassNameForType: __array_type_info"); + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + cgm.errorNYI("VTableClassNameForType: __function_type_info"); + break; + + case Type::Enum: + cgm.errorNYI("VTableClassNameForType: Enum"); + break; + + case Type::Record: { + const CXXRecordDecl *rd = + cast(cast(ty)->getOriginalDecl()) + ->getDefinitionOrSelf(); + + if (!rd->hasDefinition() || !rd->getNumBases()) { + return classTypeInfo; + } + + if (canUseSingleInheritance(rd)) { + return siClassTypeInfo; + } + + return vmiClassTypeInfo; + } + + case Type::ObjCObject: + cgm.errorNYI("VTableClassNameForType: ObjCObject"); + break; + + case Type::ObjCInterface: + cgm.errorNYI("VTableClassNameForType: ObjCInterface"); + break; + + case Type::ObjCObjectPointer: + case Type::Pointer: + cgm.errorNYI("VTableClassNameForType: __pointer_type_info"); + break; + + case Type::MemberPointer: + cgm.errorNYI("VTableClassNameForType: __pointer_to_member_type_info"); + break; + + case Type::HLSLAttributedResource: + case Type::HLSLInlineSpirv: + llvm_unreachable("HLSL doesn't support virtual functions"); + } + + return nullptr; +} +} // namespace + +/// Return the linkage that the type info and type info name constants +/// should have for the given type. +static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm, + QualType ty) { + // In addition, it and all of the intermediate abi::__pointer_type_info + // structs in the chain down to the abi::__class_type_info for the + // incomplete class type must be prevented from resolving to the + // corresponding type_info structs for the complete class type, possibly + // by making them local static objects. Finally, a dummy class RTTI is + // generated for the incomplete type that will not resolve to the final + // complete class RTTI (because the latter need not exist), possibly by + // making it a local static object. + if (containsIncompleteClassType(ty)) + return cir::GlobalLinkageKind::InternalLinkage; + + switch (ty->getLinkage()) { + case Linkage::Invalid: + llvm_unreachable("Linkage hasn't been computed!"); + + case Linkage::None: + case Linkage::Internal: + case Linkage::UniqueExternal: + return cir::GlobalLinkageKind::InternalLinkage; + + case Linkage::VisibleNone: + case Linkage::Module: + case Linkage::External: + // RTTI is not enabled, which means that this type info struct is going + // to be used for exception handling. Give it linkonce_odr linkage. + if (!cgm.getLangOpts().RTTI) + return cir::GlobalLinkageKind::LinkOnceODRLinkage; + + if (const RecordType *record = dyn_cast(ty)) { + const CXXRecordDecl *rd = + cast(record->getOriginalDecl())->getDefinitionOrSelf(); + if (rd->hasAttr()) + return cir::GlobalLinkageKind::WeakODRLinkage; + + if (cgm.getTriple().isWindowsItaniumEnvironment()) + if (rd->hasAttr() && + shouldUseExternalRttiDescriptor(cgm, ty)) + return cir::GlobalLinkageKind::ExternalLinkage; + + // MinGW always uses LinkOnceODRLinkage for type info. + if (rd->isDynamicClass() && !cgm.getASTContext() + .getTargetInfo() + .getTriple() + .isWindowsGNUEnvironment()) + return cgm.getVTableLinkage(rd); + } + + return cir::GlobalLinkageKind::LinkOnceODRLinkage; + } + + llvm_unreachable("Invalid linkage!"); +} + +cir::GlobalOp +CIRGenItaniumRTTIBuilder::getAddrOfTypeName(mlir::Location loc, QualType ty, + cir::GlobalLinkageKind linkage) { + CIRGenBuilderTy &builder = cgm.getBuilder(); + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTIName(ty, out); + + // We know that the mangled name of the type starts at index 4 of the + // mangled name of the typename, so we can just index into it in order to + // get the mangled name of the type. + mlir::Attribute init = builder.getString( + name.substr(4), cgm.convertType(cgm.getASTContext().CharTy), + std::nullopt); + + CharUnits align = + cgm.getASTContext().getTypeAlignInChars(cgm.getASTContext().CharTy); + + // builder.getString can return a #cir.zero if the string given to it only + // contains null bytes. However, type names cannot be full of null bytes. + // So cast Init to a ConstArrayAttr should be safe. + auto initStr = cast(init); + + cir::GlobalOp gv = cgm.createOrReplaceCXXRuntimeVariable( + loc, name, initStr.getType(), linkage, align); + CIRGenModule::setInitializer(gv, init); + return gv; +} + +mlir::Attribute +CIRGenItaniumRTTIBuilder::getAddrOfExternalRTTIDescriptor(mlir::Location loc, + QualType ty) { + // Mangle the RTTI name. + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out); + CIRGenBuilderTy &builder = cgm.getBuilder(); + + // Look for an existing global. + cir::GlobalOp gv = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name)); + + if (!gv) { + // Create a new global variable. + // From LLVM codegen => Note for the future: If we would ever like to do + // deferred emission of RTTI, check if emitting vtables opportunistically + // need any adjustment. + gv = CIRGenModule::createGlobalOp(cgm, loc, name, builder.getUInt8PtrTy(), + /*isConstant=*/true); + const CXXRecordDecl *rd = ty->getAsCXXRecordDecl(); + cgm.setGVProperties(gv, rd); + + // Import the typeinfo symbol when all non-inline virtual methods are + // imported. + if (cgm.getTarget().hasPS4DLLImportExport()) { + cgm.errorNYI("getAddrOfExternalRTTIDescriptor: hasPS4DLLImportExport"); + } + } + + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), gv); +} + +void CIRGenItaniumRTTIBuilder::buildVTablePointer(mlir::Location loc, + const Type *ty) { + CIRGenBuilderTy &builder = cgm.getBuilder(); + const char *vTableName = vTableClassNameForType(cgm, ty); + + // Check if the alias exists. If it doesn't, then get or create the global. + if (cgm.getItaniumVTableContext().isRelativeLayout()) { + cgm.errorNYI("buildVTablePointer: isRelativeLayout"); + return; + } + + mlir::Type vtableGlobalTy = builder.getPointerTo(builder.getUInt8PtrTy()); + llvm::Align align = cgm.getDataLayout().getABITypeAlign(vtableGlobalTy); + cir::GlobalOp vTable = cgm.createOrReplaceCXXRuntimeVariable( + loc, vTableName, vtableGlobalTy, cir::GlobalLinkageKind::ExternalLinkage, + CharUnits::fromQuantity(align)); + + // The vtable address point is 2. + mlir::Attribute field{}; + if (cgm.getItaniumVTableContext().isRelativeLayout()) { + cgm.errorNYI("buildVTablePointer: isRelativeLayout"); + } else { + SmallVector offsets{ + cgm.getBuilder().getI32IntegerAttr(2)}; + auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets); + field = cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(), + vTable, indices); + } + + assert(field && "expected attribute"); + fields.push_back(field); +} + +/// Build an abi::__si_class_type_info, used for single inheritance, according +/// to the Itanium C++ ABI, 2.95p6b. +void CIRGenItaniumRTTIBuilder::buildSIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *rd) { + // Itanium C++ ABI 2.9.5p6b: + // It adds to abi::__class_type_info a single member pointing to the + // type_info structure for the base type, + mlir::Attribute baseTypeInfo = + CIRGenItaniumRTTIBuilder(cxxABI, cgm) + .buildTypeInfo(loc, rd->bases_begin()->getType()); + fields.push_back(baseTypeInfo); +} + +/// Build an abi::__vmi_class_type_info, used for +/// classes with bases that do not satisfy the abi::__si_class_type_info +/// constraints, according to the Itanium C++ ABI, 2.9.5p5c. +void CIRGenItaniumRTTIBuilder::buildVMIClassTypeInfo(mlir::Location loc, + const CXXRecordDecl *rd) { + mlir::Type unsignedIntLTy = + cgm.convertType(cgm.getASTContext().UnsignedIntTy); + + // Itanium C++ ABI 2.9.5p6c: + // __flags is a word with flags describing details about the class + // structure, which may be referenced by using the __flags_masks + // enumeration. These flags refer to both direct and indirect bases. + unsigned flags = computeVmiClassTypeInfoFlags(rd); + fields.push_back(cir::IntAttr::get(unsignedIntLTy, flags)); + + // Itanium C++ ABI 2.9.5p6c: + // __base_count is a word with the number of direct proper base class + // descriptions that follow. + fields.push_back(cir::IntAttr::get(unsignedIntLTy, rd->getNumBases())); + + if (!rd->getNumBases()) + return; + + // Now add the base class descriptions. + + // Itanium C++ ABI 2.9.5p6c: + // __base_info[] is an array of base class descriptions -- one for every + // direct proper base. Each description is of the type: + // + // struct abi::__base_class_type_info { + // public: + // const __class_type_info *__base_type; + // long __offset_flags; + // + // enum __offset_flags_masks { + // __virtual_mask = 0x1, + // __public_mask = 0x2, + // __offset_shift = 8 + // }; + // }; + + // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long + // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on + // LLP64 platforms. + // FIXME: Consider updating libc++abi to match, and extend this logic to all + // LLP64 platforms. + QualType offsetFlagsTy = cgm.getASTContext().LongTy; + const TargetInfo &ti = cgm.getASTContext().getTargetInfo(); + if (ti.getTriple().isOSCygMing() && + ti.getPointerWidth(LangAS::Default) > ti.getLongWidth()) + offsetFlagsTy = cgm.getASTContext().LongLongTy; + mlir::Type offsetFlagsLTy = cgm.convertType(offsetFlagsTy); + + for (const CXXBaseSpecifier &base : rd->bases()) { + // The __base_type member points to the RTTI for the base type. + fields.push_back(CIRGenItaniumRTTIBuilder(cxxABI, cgm) + .buildTypeInfo(loc, base.getType())); + + CXXRecordDecl *baseDecl = base.getType()->castAsCXXRecordDecl(); + int64_t offsetFlags = 0; + + // All but the lower 8 bits of __offset_flags are a signed offset. + // For a non-virtual base, this is the offset in the object of the base + // subobject. For a virtual base, this is the offset in the virtual table of + // the virtual base offset for the virtual base referenced (negative). + CharUnits offset; + if (base.isVirtual()) + offset = cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset( + rd, baseDecl); + else { + const ASTRecordLayout &layout = + cgm.getASTContext().getASTRecordLayout(rd); + offset = layout.getBaseClassOffset(baseDecl); + } + offsetFlags = uint64_t(offset.getQuantity()) << 8; + + // The low-order byte of __offset_flags contains flags, as given by the + // masks from the enumeration __offset_flags_masks. + if (base.isVirtual()) + offsetFlags |= BCTI_Virtual; + if (base.getAccessSpecifier() == AS_public) + offsetFlags |= BCTI_Public; + + fields.push_back(cir::IntAttr::get(offsetFlagsLTy, offsetFlags)); + } +} + +mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(mlir::Location loc, + QualType ty) { + // We want to operate on the canonical type. + ty = ty.getCanonicalType(); + + // Check if we've already emitted an RTTI descriptor for this type. + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out); + + auto oldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name)); + + if (oldGV && !oldGV.isDeclaration()) { + assert(!oldGV.hasAvailableExternallyLinkage() && + "available_externally typeinfos not yet implemented"); + return cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(), + oldGV); + } + + // Check if there is already an external RTTI descriptor for this type. + if (isStandardLibraryRttiDescriptor(ty) || + shouldUseExternalRttiDescriptor(cgm, ty)) + return getAddrOfExternalRTTIDescriptor(loc, ty); + + // Emit the standard library with external linkage. + cir::GlobalLinkageKind linkage = getTypeInfoLinkage(cgm, ty); + + // Give the type_info object and name the formal visibility of the + // type itself. + assert(!cir::MissingFeatures::hiddenVisibility()); + assert(!cir::MissingFeatures::protectedVisibility()); + + mlir::SymbolTable::Visibility symVisibility; + if (cir::isLocalLinkage(linkage)) + // If the linkage is local, only default visibility makes sense. + symVisibility = mlir::SymbolTable::Visibility::Public; + else if (cxxABI.classifyRTTIUniqueness(ty, linkage) == + CIRGenItaniumCXXABI::RUK_NonUniqueHidden) { + cgm.errorNYI( + "buildTypeInfo: classifyRTTIUniqueness == RUK_NonUniqueHidden"); + symVisibility = CIRGenModule::getMLIRVisibility(ty->getVisibility()); + } else + symVisibility = CIRGenModule::getMLIRVisibility(ty->getVisibility()); + + return buildTypeInfo(loc, ty, linkage, symVisibility); +} + +mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo( + mlir::Location loc, QualType ty, cir::GlobalLinkageKind linkage, + mlir::SymbolTable::Visibility visibility) { + CIRGenBuilderTy &builder = cgm.getBuilder(); + + assert(!cir::MissingFeatures::setDLLStorageClass()); + + // Add the vtable pointer. + buildVTablePointer(loc, cast(ty)); + + // And the name. + cir::GlobalOp typeName = getAddrOfTypeName(loc, ty, linkage); + mlir::Attribute typeNameField; + + // If we're supposed to demote the visibility, be sure to set a flag + // to use a string comparison for type_info comparisons. + CIRGenItaniumCXXABI::RTTIUniquenessKind rttiUniqueness = + cxxABI.classifyRTTIUniqueness(ty, linkage); + if (rttiUniqueness != CIRGenItaniumCXXABI::RUK_Unique) { + // The flag is the sign bit, which on ARM64 is defined to be clear + // for global pointers. This is very ARM64-specific. + cgm.errorNYI( + "buildTypeInfo: rttiUniqueness != CIRGenItaniumCXXABI::RUK_Unique"); + } else { + typeNameField = + builder.getGlobalViewAttr(builder.getUInt8PtrTy(), typeName); + } + + fields.push_back(typeNameField); + + switch (ty->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("Non-canonical and dependent types shouldn't get here"); + + // GCC treats vector types as fundamental types. + case Type::Builtin: + case Type::Vector: + case Type::ExtVector: + case Type::ConstantMatrix: + case Type::Complex: + case Type::BlockPointer: + // Itanium C++ ABI 2.9.5p4: + // abi::__fundamental_type_info adds no data members to std::type_info. + break; + + case Type::LValueReference: + case Type::RValueReference: + llvm_unreachable("References shouldn't get here"); + + case Type::Auto: + case Type::DeducedTemplateSpecialization: + llvm_unreachable("Undeduced type shouldn't get here"); + + case Type::Pipe: + break; + + case Type::BitInt: + break; + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::ArrayParameter: + // Itanium C++ ABI 2.9.5p5: + // abi::__array_type_info adds no data members to std::type_info. + break; + + case Type::FunctionNoProto: + case Type::FunctionProto: + // Itanium C++ ABI 2.9.5p5: + // abi::__function_type_info adds no data members to std::type_info. + break; + + case Type::Enum: + // Itanium C++ ABI 2.9.5p5: + // abi::__enum_type_info adds no data members to std::type_info. + break; + + case Type::Record: { + const auto *rd = + cast(cast(ty)->getOriginalDecl()) + ->getDefinitionOrSelf(); + if (!rd->hasDefinition() || !rd->getNumBases()) { + // We don't need to emit any fields. + break; + } + + if (canUseSingleInheritance(rd)) { + buildSIClassTypeInfo(loc, rd); + } else { + buildVMIClassTypeInfo(loc, rd); + } + + break; + } + + case Type::ObjCObject: + case Type::ObjCInterface: + cgm.errorNYI("buildTypeInfo: ObjCObject & ObjCInterface"); + break; + + case Type::ObjCObjectPointer: + cgm.errorNYI("buildTypeInfo: ObjCObjectPointer"); + break; + + case Type::Pointer: + cgm.errorNYI("buildTypeInfo: Pointer"); + break; + + case Type::MemberPointer: + cgm.errorNYI("buildTypeInfo: MemberPointer"); + break; + + case Type::Atomic: + // No fields, at least for the moment. + break; + + case Type::HLSLAttributedResource: + case Type::HLSLInlineSpirv: + llvm_unreachable("HLSL doesn't support RTTI"); + } + + assert(!cir::MissingFeatures::opGlobalDLLImportExport()); + cir::TypeInfoAttr init = builder.getTypeInfo(builder.getArrayAttr(fields)); + + SmallString<256> name; + llvm::raw_svector_ostream out(name); + cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out); + + // Create new global and search for an existing global. + auto oldGV = dyn_cast_or_null( + mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name)); + + cir::GlobalOp gv = + CIRGenModule::createGlobalOp(cgm, loc, name, init.getType(), + /*isConstant=*/true); + + // Export the typeinfo in the same circumstances as the vtable is + // exported. + if (cgm.getTarget().hasPS4DLLImportExport()) { + cgm.errorNYI("buildTypeInfo: target hasPS4DLLImportExport"); + return {}; + } + + // If there's already an old global variable, replace it with the new one. + if (oldGV) { + // Replace occurrences of the old variable if needed. + gv.setName(oldGV.getName()); + if (!oldGV->use_empty()) { + cgm.errorNYI("buildTypeInfo: old GV !use_empty"); + return {}; + } + oldGV->erase(); + } + + if (cgm.supportsCOMDAT() && cir::isWeakForLinker(gv.getLinkage())) { + assert(!cir::MissingFeatures::setComdat()); + cgm.errorNYI("buildTypeInfo: supportsCOMDAT & isWeakForLinker"); + return {}; + } + + CharUnits align = cgm.getASTContext().toCharUnitsFromBits( + cgm.getTarget().getPointerAlign(LangAS::Default)); + gv.setAlignmentAttr(cgm.getSize(align)); + + // The Itanium ABI specifies that type_info objects must be globally + // unique, with one exception: if the type is an incomplete class + // type or a (possibly indirect) pointer to one. That exception + // affects the general case of comparing type_info objects produced + // by the typeid operator, which is why the comparison operators on + // std::type_info generally use the type_info name pointers instead + // of the object addresses. However, the language's built-in uses + // of RTTI generally require class types to be complete, even when + // manipulating pointers to those class types. This allows the + // implementation of dynamic_cast to rely on address equality tests, + // which is much faster. + + // All of this is to say that it's important that both the type_info + // object and the type_info name be uniqued when weakly emitted. + + mlir::SymbolTable::setSymbolVisibility(typeName, visibility); + assert(!cir::MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::opGlobalPartition()); + assert(!cir::MissingFeatures::setDSOLocal()); + + mlir::SymbolTable::setSymbolVisibility(gv, visibility); + assert(!cir::MissingFeatures::setDLLStorageClass()); + assert(!cir::MissingFeatures::opGlobalPartition()); + assert(!cir::MissingFeatures::setDSOLocal()); + + CIRGenModule::setInitializer(gv, init); + return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), gv); +} + +mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location loc, + QualType ty) { + return CIRGenItaniumRTTIBuilder(*this, cgm).buildTypeInfo(loc, ty); +} + +/// What sort of uniqueness rules should we use for the RTTI for the +/// given type? +CIRGenItaniumCXXABI::RTTIUniquenessKind +CIRGenItaniumCXXABI::classifyRTTIUniqueness( + QualType canTy, cir::GlobalLinkageKind linkage) const { + if (shouldRTTIBeUnique()) + return RUK_Unique; + + // It's only necessary for linkonce_odr or weak_odr linkage. + if (linkage != cir::GlobalLinkageKind::LinkOnceODRLinkage && + linkage != cir::GlobalLinkageKind::WeakODRLinkage) + return RUK_Unique; + + // It's only necessary with default visibility. + if (canTy->getVisibility() != DefaultVisibility) + return RUK_Unique; + + // If we're not required to publish this symbol, hide it. + if (linkage == cir::GlobalLinkageKind::LinkOnceODRLinkage) + return RUK_NonUniqueHidden; + + // If we're required to publish this symbol, as we might be under an + // explicit instantiation, leave it with default visibility but + // enable string-comparisons. + assert(linkage == cir::GlobalLinkageKind::WeakODRLinkage); + return RUK_NonUniqueVisible; +} + void CIRGenItaniumCXXABI::emitDestructorCall( CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) { diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index eef23a0ebda7f..2bd2729f0b0fb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -119,6 +119,19 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext, cir::OptInfoAttr::get(&mlirContext, cgo.OptimizationLevel, cgo.OptimizeSize)); + // Set the module name to be the name of the main file. TranslationUnitDecl + // often contains invalid source locations and isn't a reliable source for the + // module location. + FileID mainFileId = astContext.getSourceManager().getMainFileID(); + const FileEntry &mainFile = + *astContext.getSourceManager().getFileEntryForID(mainFileId); + StringRef path = mainFile.tryGetRealPathName(); + if (!path.empty()) { + theModule.setSymName(path); + theModule->setLoc(mlir::FileLineColLoc::get(&mlirContext, path, + /*line=*/0, + /*column=*/0)); + } } CIRGenModule::~CIRGenModule() = default; @@ -717,7 +730,6 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, // since this is the job for its original source. bool isDefinitionAvailableExternally = astContext.GetGVALinkageForVariable(vd) == GVA_AvailableExternally; - assert(!cir::MissingFeatures::needsGlobalCtorDtor()); // It is useless to emit the definition for an available_externally variable // which can't be marked as const. @@ -730,6 +742,10 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, return; mlir::Attribute init; + bool needsGlobalCtor = false; + bool needsGlobalDtor = + !isDefinitionAvailableExternally && + vd->needsDestruction(astContext) == QualType::DK_cxx_destructor; const VarDecl *initDecl; const Expr *initExpr = vd->getAnyInitializer(initDecl); @@ -764,8 +780,8 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, if (initDecl->hasFlexibleArrayInit(astContext)) errorNYI(vd->getSourceRange(), "flexible array initializer"); init = builder.getZeroInitAttr(convertType(qt)); - if (astContext.GetGVALinkageForVariable(vd) != GVA_AvailableExternally) - errorNYI(vd->getSourceRange(), "global constructor"); + if (!isDefinitionAvailableExternally) + needsGlobalCtor = true; } else { errorNYI(vd->getSourceRange(), "static initializer"); } @@ -774,8 +790,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, // We don't need an initializer, so remove the entry for the delayed // initializer position (just in case this entry was delayed) if we // also don't need to register a destructor. - if (vd->needsDestruction(astContext) == QualType::DK_cxx_destructor) - errorNYI(vd->getSourceRange(), "delayed destructor"); + assert(!cir::MissingFeatures::deferredCXXGlobalInit()); } } @@ -814,6 +829,9 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, if (emitter) emitter->finalize(gv); + assert(!cir::MissingFeatures::opGlobalConstant()); + assert(!cir::MissingFeatures::opGlobalSection()); + // Set CIR's linkage type as appropriate. cir::GlobalLinkageKind linkage = getCIRLinkageVarDefinition(vd, /*IsConstant=*/false); @@ -831,6 +849,10 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd, assert(!cir::MissingFeatures::opGlobalThreadLocal()); maybeSetTrivialComdat(*vd, gv); + + // Emit the initializer function if necessary. + if (needsGlobalCtor || needsGlobalDtor) + emitCXXGlobalVarDeclInitFunc(vd, gv, needsGlobalCtor); } void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd, @@ -2171,8 +2193,13 @@ mlir::Attribute CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc, if (!shouldEmitRTTI(forEh)) return builder.getConstNullPtrAttr(builder.getUInt8PtrTy()); - errorNYI(loc, "getAddrOfRTTIDescriptor"); - return mlir::Attribute(); + if (forEh && ty->isObjCObjectPointerType() && + langOpts.ObjCRuntime.isGNUFamily()) { + errorNYI(loc, "getAddrOfRTTIDescriptor: Objc PtrType & Objc RT GUN"); + return {}; + } + + return getCXXABI().getAddrOfRTTIDescriptor(loc, ty); } // TODO(cir): this can be shared with LLVM codegen. diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 073e8d96b773b..2c4c6dd14e2ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -256,6 +256,24 @@ class CIRGenModule : public CIRGenTypeCache { mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty, bool forEH = false); + static mlir::SymbolTable::Visibility getMLIRVisibility(Visibility v) { + switch (v) { + case DefaultVisibility: + return mlir::SymbolTable::Visibility::Public; + case HiddenVisibility: + return mlir::SymbolTable::Visibility::Private; + case ProtectedVisibility: + // The distinction between ProtectedVisibility and DefaultVisibility is + // that symbols with ProtectedVisibility, while visible to the dynamic + // linker like DefaultVisibility, are guaranteed to always dynamically + // resolve to a symbol in the current shared object. There is currently no + // equivalent MLIR visibility, so we fall back on the fact that the symbol + // is visible. + return mlir::SymbolTable::Visibility::Public; + } + llvm_unreachable("unknown visibility!"); + } + /// Return a constant array for the given string. mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *e); @@ -408,6 +426,13 @@ class CIRGenModule : public CIRGenTypeCache { void emitGlobalVarDefinition(const clang::VarDecl *vd, bool isTentative = false); + /// Emit the function that initializes the specified global + void emitCXXGlobalVarDeclInit(const VarDecl *varDecl, cir::GlobalOp addr, + bool performInit); + + void emitCXXGlobalVarDeclInitFunc(const VarDecl *vd, cir::GlobalOp addr, + bool performInit); + void emitGlobalOpenACCDecl(const clang::OpenACCConstructDecl *cd); // C++ related functions. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp index 907cb5fa11401..a9af753381db3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACC.cpp @@ -62,20 +62,39 @@ mlir::Value CIRGenFunction::createOpenACCConstantInt(mlir::Location loc, auto constOp = builder.create( loc, builder.getIntegerAttr(ty, value)); - return constOp.getResult(); + return constOp; } CIRGenFunction::OpenACCDataOperandInfo CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { const Expr *curVarExpr = e->IgnoreParenImpCasts(); + QualType origType = + curVarExpr->getType().getNonReferenceType().getUnqualifiedType(); + // Array sections are special, and we have to treat them that way. + if (const auto *section = + dyn_cast(curVarExpr->IgnoreParenImpCasts())) + origType = ArraySectionExpr::getBaseOriginalType(section); mlir::Location exprLoc = cgm.getLoc(curVarExpr->getBeginLoc()); llvm::SmallVector bounds; + llvm::SmallVector boundTypes; std::string exprString; llvm::raw_string_ostream os(exprString); e->printPretty(os, nullptr, getContext().getPrintingPolicy()); + auto addBoundType = [&](const Expr *e) { + if (const auto *section = dyn_cast(curVarExpr)) { + QualType baseTy = ArraySectionExpr::getBaseOriginalType( + section->getBase()->IgnoreParenImpCasts()); + boundTypes.push_back(QualType(baseTy->getPointeeOrArrayElementType(), 0)); + } else { + boundTypes.push_back(curVarExpr->getType()); + } + }; + + addBoundType(curVarExpr); + while (isa(curVarExpr)) { mlir::Location boundLoc = cgm.getLoc(curVarExpr->getBeginLoc()); mlir::Value lowerBound; @@ -115,19 +134,28 @@ CIRGenFunction::getOpenACCDataOperandInfo(const Expr *e) { bounds.push_back(createBound(*this, this->builder, boundLoc, lowerBound, upperBound, extent)); + addBoundType(curVarExpr); } if (const auto *memExpr = dyn_cast(curVarExpr)) - return {exprLoc, emitMemberExpr(memExpr).getPointer(), exprString, + return {exprLoc, + emitMemberExpr(memExpr).getPointer(), + exprString, + origType, curVarExpr->getType().getNonReferenceType().getUnqualifiedType(), - std::move(bounds)}; + std::move(bounds), + std::move(boundTypes)}; // Sema has made sure that only 4 types of things can get here, array // subscript, array section, member expr, or DRE to a var decl (or the // former 3 wrapping a var-decl), so we should be able to assume this is // right. const auto *dre = cast(curVarExpr); - return {exprLoc, emitDeclRefLValue(dre).getPointer(), exprString, + return {exprLoc, + emitDeclRefLValue(dre).getPointer(), + exprString, + origType, curVarExpr->getType().getNonReferenceType().getUnqualifiedType(), - std::move(bounds)}; + std::move(bounds), + std::move(boundTypes)}; } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp index 9959cf6c15792..3d86f71b077e2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp @@ -53,6 +53,7 @@ class OpenACCClauseCIREmitter final template friend class OpenACCClauseCIREmitter; OpTy &operation; + mlir::OpBuilder::InsertPoint &recipeInsertLocation; CIRGen::CIRGenFunction &cgf; CIRGen::CIRGenBuilderTy &builder; @@ -109,7 +110,7 @@ class OpenACCClauseCIREmitter final auto constOp = builder.create( loc, builder.getIntegerAttr(ty, value)); - return constOp.getResult(); + return constOp; } mlir::Value createConstantInt(SourceLocation loc, unsigned width, @@ -148,7 +149,7 @@ class OpenACCClauseCIREmitter final mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPoint(operation.loopOp); OpenACCClauseCIREmitter loopEmitter{ - operation.loopOp, cgf, builder, dirKind, dirLoc}; + operation.loopOp, recipeInsertLocation, cgf, builder, dirKind, dirLoc}; loopEmitter.lastDeviceTypeValues = lastDeviceTypeValues; loopEmitter.Visit(&c); } @@ -159,7 +160,12 @@ class OpenACCClauseCIREmitter final mlir::OpBuilder::InsertionGuard guardCase(builder); builder.setInsertionPoint(operation.computeOp); OpenACCClauseCIREmitter computeEmitter{ - operation.computeOp, cgf, builder, dirKind, dirLoc}; + operation.computeOp, + recipeInsertLocation, + cgf, + builder, + dirKind, + dirLoc}; computeEmitter.lastDeviceTypeValues = lastDeviceTypeValues; @@ -224,13 +230,13 @@ class OpenACCClauseCIREmitter final std::is_same_v) { // Detach/Delete ops don't have the variable reference here, so they // take 1 fewer argument to their build function. - afterOp = builder.create( - opInfo.beginLoc, beforeOp.getResult(), structured, implicit, - opInfo.name, opInfo.bounds); + afterOp = + builder.create(opInfo.beginLoc, beforeOp, structured, + implicit, opInfo.name, opInfo.bounds); } else { afterOp = builder.create( - opInfo.beginLoc, beforeOp.getResult(), opInfo.varValue, structured, - implicit, opInfo.name, opInfo.bounds); + opInfo.beginLoc, beforeOp, opInfo.varValue, structured, implicit, + opInfo.name, opInfo.bounds); } } @@ -358,11 +364,13 @@ class OpenACCClauseCIREmitter final } public: - OpenACCClauseCIREmitter(OpTy &operation, CIRGen::CIRGenFunction &cgf, + OpenACCClauseCIREmitter(OpTy &operation, + mlir::OpBuilder::InsertPoint &recipeInsertLocation, + CIRGen::CIRGenFunction &cgf, CIRGen::CIRGenBuilderTy &builder, OpenACCDirectiveKind dirKind, SourceLocation dirLoc) - : operation(operation), cgf(cgf), builder(builder), dirKind(dirKind), - dirLoc(dirLoc) {} + : operation(operation), recipeInsertLocation(recipeInsertLocation), + cgf(cgf), builder(builder), dirKind(dirKind), dirLoc(dirLoc) {} void VisitClause(const OpenACCClause &clause) { clauseNotImplemented(clause); @@ -988,20 +996,16 @@ class OpenACCClauseCIREmitter final { mlir::OpBuilder::InsertionGuard guardCase(builder); - // TODO: OpenACC: At the moment this is a bit of a hacky way of doing - // this, and won't work when we get to bounds/etc. Do this for now to - // limit the scope of this refactor. - VarDecl *allocaDecl = varRecipe.AllocaDecl; - allocaDecl->setInit(varRecipe.InitExpr); - allocaDecl->setInitStyle(VarDecl::CallInit); auto recipe = OpenACCRecipeBuilder(cgf, builder) - .getOrCreateRecipe(cgf.getContext(), varExpr, allocaDecl, - /*temporary=*/nullptr, - OpenACCReductionOperator::Invalid, - Decl::castToDeclContext(cgf.curFuncDecl), - opInfo.baseType, privateOp.getResult()); + .getOrCreateRecipe( + cgf.getContext(), recipeInsertLocation, varExpr, + varRecipe.AllocaDecl, + /*temporary=*/nullptr, OpenACCReductionOperator::Invalid, + Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, + opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, + privateOp); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we // probably need to change these here. @@ -1032,22 +1036,17 @@ class OpenACCClauseCIREmitter final { mlir::OpBuilder::InsertionGuard guardCase(builder); - // TODO: OpenACC: At the moment this is a bit of a hacky way of doing - // this, and won't work when we get to bounds/etc. Do this for now to - // limit the scope of this refactor. - VarDecl *allocaDecl = varRecipe.AllocaDecl; - allocaDecl->setInit(varRecipe.InitExpr); - allocaDecl->setInitStyle(VarDecl::CallInit); auto recipe = OpenACCRecipeBuilder(cgf, builder) - .getOrCreateRecipe(cgf.getContext(), varExpr, allocaDecl, - varRecipe.InitFromTemporary, - OpenACCReductionOperator::Invalid, - Decl::castToDeclContext(cgf.curFuncDecl), - opInfo.baseType, - firstPrivateOp.getResult()); + .getOrCreateRecipe( + cgf.getContext(), recipeInsertLocation, varExpr, + varRecipe.AllocaDecl, varRecipe.InitFromTemporary, + OpenACCReductionOperator::Invalid, + Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, + opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, + firstPrivateOp); // TODO: OpenACC: The dialect is going to change in the near future to // have these be on a different operation, so when that changes, we @@ -1080,20 +1079,16 @@ class OpenACCClauseCIREmitter final { mlir::OpBuilder::InsertionGuard guardCase(builder); - // TODO: OpenACC: At the moment this is a bit of a hacky way of doing - // this, and won't work when we get to bounds/etc. Do this for now to - // limit the scope of this refactor. - VarDecl *allocaDecl = varRecipe.AllocaDecl; - allocaDecl->setInit(varRecipe.InitExpr); - allocaDecl->setInitStyle(VarDecl::CallInit); auto recipe = OpenACCRecipeBuilder(cgf, builder) - .getOrCreateRecipe(cgf.getContext(), varExpr, allocaDecl, - /*temporary=*/nullptr, - clause.getReductionOp(), - Decl::castToDeclContext(cgf.curFuncDecl), - opInfo.baseType, reductionOp.getResult()); + .getOrCreateRecipe( + cgf.getContext(), recipeInsertLocation, varExpr, + varRecipe.AllocaDecl, + /*temporary=*/nullptr, clause.getReductionOp(), + Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType, + opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType, + reductionOp); operation.addReduction(builder.getContext(), reductionOp, recipe); } @@ -1109,10 +1104,13 @@ class OpenACCClauseCIREmitter final }; template -auto makeClauseEmitter(OpTy &op, CIRGen::CIRGenFunction &cgf, +auto makeClauseEmitter(OpTy &op, + mlir::OpBuilder::InsertPoint &recipeInsertLocation, + CIRGen::CIRGenFunction &cgf, CIRGen::CIRGenBuilderTy &builder, OpenACCDirectiveKind dirKind, SourceLocation dirLoc) { - return OpenACCClauseCIREmitter(op, cgf, builder, dirKind, dirLoc); + return OpenACCClauseCIREmitter(op, recipeInsertLocation, cgf, builder, + dirKind, dirLoc); } } // namespace @@ -1125,7 +1123,8 @@ void CIRGenFunction::emitOpenACCClauses( // Sets insertion point before the 'op', since every new expression needs to // be before the operation. builder.setInsertionPoint(op); - makeClauseEmitter(op, *this, builder, dirKind, dirLoc).emitClauses(clauses); + makeClauseEmitter(op, lastRecipeLocation, *this, builder, dirKind, dirLoc) + .emitClauses(clauses); } #define EXPL_SPEC(N) \ @@ -1157,7 +1156,8 @@ void CIRGenFunction::emitOpenACCClauses( // We cannot set the insertion point here and do so in the emitter, but make // sure we reset it with the 'guard' anyway. mlir::OpBuilder::InsertionGuard guardCase(builder); - makeClauseEmitter(inf, *this, builder, dirKind, dirLoc).emitClauses(clauses); + makeClauseEmitter(inf, lastRecipeLocation, *this, builder, dirKind, dirLoc) + .emitClauses(clauses); } #define EXPL_SPEC(N) \ diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp new file mode 100644 index 0000000000000..ea6ea2c63acc3 --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -0,0 +1,536 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Helperes to emit OpenACC clause recipes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include + +#include "CIRGenOpenACCRecipe.h" + +namespace clang::CIRGen { +mlir::Block *OpenACCRecipeBuilderBase::createRecipeBlock(mlir::Region ®ion, + mlir::Type opTy, + mlir::Location loc, + size_t numBounds, + bool isInit) { + llvm::SmallVector types; + types.reserve(numBounds + 2); + types.push_back(opTy); + // The init section is the only one that doesn't have TWO copies of the + // operation-type. Copy has a to/from, and destroy has a + // 'reference'/'privatized' copy version. + if (!isInit) + types.push_back(opTy); + + auto boundsTy = mlir::acc::DataBoundsType::get(&cgf.getMLIRContext()); + for (size_t i = 0; i < numBounds; ++i) + types.push_back(boundsTy); + + llvm::SmallVector locs{types.size(), loc}; + return builder.createBlock(®ion, region.end(), types, locs); +} +void OpenACCRecipeBuilderBase::makeAllocaCopy(mlir::Location loc, + mlir::Type copyType, + mlir::Value numEltsToCopy, + mlir::Value offsetPerSubarray, + mlir::Value destAlloca, + mlir::Value srcAlloca) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + + mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); + auto itrPtrTy = cir::PointerType::get(itrTy); + mlir::IntegerAttr itrAlign = + cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars( + cgf.getContext().UnsignedLongLongTy)); + + auto loopBuilder = [&]() { + auto itr = + cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", itrAlign); + cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0); + builder.CIRBaseBuilderTy::createStore(loc, constZero, itr); + builder.createFor( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // itr < numEltsToCopy + // Enforce a trip count of 1 if there wasn't any element count, this + // way we can just use this loop with a constant bounds instead of a + // separate code path. + if (!numEltsToCopy) + numEltsToCopy = builder.getConstInt(loc, itrTy, 1); + + auto loadCur = cir::LoadOp::create(builder, loc, {itr}); + auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadCur, + numEltsToCopy); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // destAlloca[itr] = srcAlloca[offsetPerSubArray * itr]; + auto loadCur = cir::LoadOp::create(builder, loc, {itr}); + auto srcOffset = builder.createMul(loc, offsetPerSubarray, loadCur); + + auto ptrToOffsetIntoSrc = cir::PtrStrideOp::create( + builder, loc, copyType, srcAlloca, srcOffset); + + auto offsetIntoDecayDest = cir::PtrStrideOp::create( + builder, loc, builder.getPointerTo(copyType), destAlloca, + loadCur); + + builder.CIRBaseBuilderTy::createStore(loc, ptrToOffsetIntoSrc, + offsetIntoDecayDest); + builder.createYield(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + // Simple increment of the iterator. + auto load = cir::LoadOp::create(builder, loc, {itr}); + auto inc = cir::UnaryOp::create(builder, loc, load.getType(), + cir::UnaryOpKind::Inc, load); + builder.CIRBaseBuilderTy::createStore(loc, inc, itr); + builder.createYield(loc); + }); + }; + + cir::ScopeOp::create(builder, loc, + [&](mlir::OpBuilder &b, mlir::Location loc) { + loopBuilder(); + builder.createYield(loc); + }); +} + +mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca( + mlir::Block *block, SourceRange exprRange, mlir::Location loc, + std::string_view allocaName, size_t numBounds, + llvm::ArrayRef boundTypes) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + + // Get the range of bounds arguments, which are all but the 1st arg. + llvm::ArrayRef boundsRange = + block->getArguments().drop_front(1); + + // boundTypes contains the before and after of each bounds, so it ends up + // having 1 extra. Assert this is the case to ensure we don't call this in the + // wrong 'block'. + assert(boundsRange.size() + 1 == boundTypes.size()); + + mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); + auto idxType = mlir::IndexType::get(&cgf.getMLIRContext()); + + auto getUpperBound = [&](mlir::Value bound) { + auto upperBoundVal = + mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound); + return mlir::UnrealizedConversionCastOp::create(builder, loc, itrTy, + upperBoundVal.getResult()) + .getResult(0); + }; + + auto isArrayTy = [&](QualType ty) { + if (ty->isArrayType() && !ty->isConstantArrayType()) + cgf.cgm.errorNYI(exprRange, "OpenACC recipe init for VLAs"); + return ty->isConstantArrayType(); + }; + + mlir::Type topLevelTy = cgf.convertType(boundTypes.back()); + cir::PointerType topLevelTyPtr = builder.getPointerTo(topLevelTy); + // Do an alloca for the 'top' level type without bounds. + mlir::Value initialAlloca = builder.createAlloca( + loc, topLevelTyPtr, topLevelTy, allocaName, + cgf.getContext().getTypeAlignInChars(boundTypes.back())); + + bool lastBoundWasArray = isArrayTy(boundTypes.back()); + + // Make sure we track a moving version of this so we can get our + // 'copying' back to correct. + mlir::Value lastAlloca = initialAlloca; + + // Since we're iterating the types in reverse, this sets up for each index + // corresponding to the boundsRange to be the 'after application of the + // bounds. + llvm::ArrayRef boundResults = boundTypes.drop_back(1); + + // Collect the 'do we have any allocas needed after this type' list. + llvm::SmallVector allocasLeftArr; + llvm::ArrayRef resultTypes = boundTypes.drop_front(); + std::transform_inclusive_scan( + resultTypes.begin(), resultTypes.end(), + std::back_inserter(allocasLeftArr), std::plus{}, + [](QualType ty) { return !ty->isConstantArrayType(); }, false); + + // Keep track of the number of 'elements' that we're allocating. Individual + // allocas should multiply this by the size of its current allocation. + mlir::Value cumulativeElts; + for (auto [bound, resultType, allocasLeft] : llvm::reverse( + llvm::zip_equal(boundsRange, boundResults, allocasLeftArr))) { + + // if there is no further 'alloca' operation we need to do, we can skip + // creating the UB/multiplications/etc. + if (!allocasLeft) + break; + + // First: figure out the number of elements in the current 'bound' list. + mlir::Value eltsPerSubArray = getUpperBound(bound); + mlir::Value eltsToAlloca; + + // IF we are in a sub-bounds, the total number of elements to alloca is + // the product of that one and the current 'bounds' size. That is, + // arr[5][5], we would need 25 elements, not just 5. Else it is just the + // current number of elements. + if (cumulativeElts) + eltsToAlloca = builder.createMul(loc, eltsPerSubArray, cumulativeElts); + else + eltsToAlloca = eltsPerSubArray; + + if (!lastBoundWasArray) { + // If we have to do an allocation, figure out the size of the + // allocation. alloca takes the number of bytes, not elements. + TypeInfoChars eltInfo = cgf.getContext().getTypeInfoInChars(resultType); + cir::ConstantOp eltSize = builder.getConstInt( + loc, itrTy, eltInfo.Width.alignTo(eltInfo.Align).getQuantity()); + mlir::Value curSize = builder.createMul(loc, eltsToAlloca, eltSize); + + mlir::Type eltTy = cgf.convertType(resultType); + cir::PointerType ptrTy = builder.getPointerTo(eltTy); + mlir::Value curAlloca = builder.createAlloca( + loc, ptrTy, eltTy, "openacc.init.bounds", + cgf.getContext().getTypeAlignInChars(resultType), curSize); + + makeAllocaCopy(loc, ptrTy, cumulativeElts, eltsPerSubArray, lastAlloca, + curAlloca); + lastAlloca = curAlloca; + } else { + // In the case of an array, we just need to decay the pointer, so just do + // a zero-offset stride on the last alloca to decay it down an array + // level. + cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0); + lastAlloca = builder.getArrayElement(loc, loc, lastAlloca, + cgf.convertType(resultType), + constZero, /*shouldDecay=*/true); + } + + cumulativeElts = eltsToAlloca; + lastBoundWasArray = isArrayTy(resultType); + } + return initialAlloca; +} + +mlir::Value +OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, + mlir::Value bound, + mlir::Location loc, bool inverse) { + mlir::Operation *bodyInsertLoc; + + mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); + auto itrPtrTy = cir::PointerType::get(itrTy); + mlir::IntegerAttr itrAlign = + cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars( + cgf.getContext().UnsignedLongLongTy)); + auto idxType = mlir::IndexType::get(&cgf.getMLIRContext()); + + auto doSubscriptOp = [&](mlir::Value subVal, + cir::LoadOp idxLoad) -> mlir::Value { + auto eltTy = cast(subVal.getType()).getPointee(); + + if (auto arrayTy = dyn_cast(eltTy)) + return builder.getArrayElement(loc, loc, subVal, arrayTy.getElementType(), + idxLoad, + /*shouldDecay=*/true); + + assert(isa(eltTy)); + + auto eltLoad = cir::LoadOp::create(builder, loc, {subVal}); + + return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad, + idxLoad); + + }; + + auto forStmtBuilder = [&]() { + // get the lower and upper bound for iterating over. + auto lowerBoundVal = + mlir::acc::GetLowerboundOp::create(builder, loc, idxType, bound); + auto lbConversion = mlir::UnrealizedConversionCastOp::create( + builder, loc, itrTy, lowerBoundVal.getResult()); + auto upperBoundVal = + mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound); + auto ubConversion = mlir::UnrealizedConversionCastOp::create( + builder, loc, itrTy, upperBoundVal.getResult()); + + // Create a memory location for the iterator. + auto itr = + cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "iter", itrAlign); + // Store to the iterator: either lower bound, or if inverse loop, upper + // bound. + if (inverse) { + cir::ConstantOp constOne = builder.getConstInt(loc, itrTy, 1); + + auto sub = cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub, + ubConversion.getResult(0), constOne); + + // Upperbound is exclusive, so subtract 1. + builder.CIRBaseBuilderTy::createStore(loc, sub, itr); + } else { + // Lowerbound is inclusive, so we can include it. + builder.CIRBaseBuilderTy::createStore(loc, lbConversion.getResult(0), + itr); + } + // Save the 'end' iterator based on whether we are inverted or not. This + // end iterator never changes, so we can just get it and convert it, so no + // need to store/load/etc. + auto endItr = inverse ? lbConversion : ubConversion; + + builder.createFor( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadCur = cir::LoadOp::create(builder, loc, {itr}); + // Use 'not equal' since we are just doing an increment/decrement. + auto cmp = builder.createCompare( + loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, loadCur, + endItr.getResult(0)); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto load = cir::LoadOp::create(builder, loc, {itr}); + + if (subscriptedValue) + subscriptedValue = doSubscriptOp(subscriptedValue, load); + bodyInsertLoc = builder.createYield(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto load = cir::LoadOp::create(builder, loc, {itr}); + auto unary = cir::UnaryOp::create( + builder, loc, load.getType(), + inverse ? cir::UnaryOpKind::Dec : cir::UnaryOpKind::Inc, load); + builder.CIRBaseBuilderTy::createStore(loc, unary, itr); + builder.createYield(loc); + }); + }; + + cir::ScopeOp::create(builder, loc, + [&](mlir::OpBuilder &b, mlir::Location loc) { + forStmtBuilder(); + builder.createYield(loc); + }); + + // Leave the insertion point to be inside the body, so we can loop over + // these things. + builder.setInsertionPoint(bodyInsertLoc); + return subscriptedValue; +} + +mlir::acc::ReductionOperator +OpenACCRecipeBuilderBase::convertReductionOp(OpenACCReductionOperator op) { + switch (op) { + case OpenACCReductionOperator::Addition: + return mlir::acc::ReductionOperator::AccAdd; + case OpenACCReductionOperator::Multiplication: + return mlir::acc::ReductionOperator::AccMul; + case OpenACCReductionOperator::Max: + return mlir::acc::ReductionOperator::AccMax; + case OpenACCReductionOperator::Min: + return mlir::acc::ReductionOperator::AccMin; + case OpenACCReductionOperator::BitwiseAnd: + return mlir::acc::ReductionOperator::AccIand; + case OpenACCReductionOperator::BitwiseOr: + return mlir::acc::ReductionOperator::AccIor; + case OpenACCReductionOperator::BitwiseXOr: + return mlir::acc::ReductionOperator::AccXor; + case OpenACCReductionOperator::And: + return mlir::acc::ReductionOperator::AccLand; + case OpenACCReductionOperator::Or: + return mlir::acc::ReductionOperator::AccLor; + case OpenACCReductionOperator::Invalid: + llvm_unreachable("invalid reduction operator"); + } + + llvm_unreachable("invalid reduction operator"); +} + +// This function generates the 'destroy' section for a recipe. Note +// that this function is not 'insertion point' clean, in that it alters the +// insertion point to be inside of the 'destroy' section of the recipe, but +// doesn't restore it aftewards. +void OpenACCRecipeBuilderBase::createRecipeDestroySection( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + CharUnits alignment, QualType origType, size_t numBounds, QualType baseType, + mlir::Region &destroyRegion) { + mlir::Block *block = createRecipeBlock(destroyRegion, mainOp.getType(), loc, + numBounds, /*isInit=*/false); + builder.setInsertionPointToEnd(&destroyRegion.back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + mlir::Type elementTy = + mlir::cast(mainOp.getType()).getPointee(); + auto emitDestroy = [&](mlir::Value var, mlir::Type ty) { + Address addr{var, ty, alignment}; + cgf.emitDestroy(addr, origType, + cgf.getDestroyer(QualType::DK_cxx_destructor)); + }; + + if (numBounds) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + // Get the range of bounds arguments, which are all but the 1st 2. 1st is + // a 'reference', 2nd is the 'private' variant we need to destroy from. + llvm::MutableArrayRef boundsRange = + block->getArguments().drop_front(2); + + mlir::Value subscriptedValue = block->getArgument(1); + for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) + subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc, + /*inverse=*/true); + + emitDestroy(subscriptedValue, cgf.cgm.convertType(origType)); + } else { + // If we don't have any bounds, we can just destroy the variable directly. + // The destroy region has a signature of "original item, privatized item". + // So the 2nd item is the one that needs destroying, the former is just + // for reference and we don't really have a need for it at the moment. + emitDestroy(block->getArgument(1), elementTy); + } + + mlir::acc::YieldOp::create(builder, locEnd); +} +void OpenACCRecipeBuilderBase::makeBoundsInit( + mlir::Value alloca, mlir::Location loc, mlir::Block *block, + const VarDecl *allocaDecl, QualType origType, bool isInitSection) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + builder.setInsertionPointToEnd(block); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + CIRGenFunction::AutoVarEmission tempDeclEmission{*allocaDecl}; + tempDeclEmission.emittedAsOffload = true; + + // The init section is the only one of the handful that only has a single + // argument for the 'type', so we have to drop 1 for init, and future calls + // to this will need to drop 2. + llvm::MutableArrayRef boundsRange = + block->getArguments().drop_front(isInitSection ? 1 : 2); + + mlir::Value subscriptedValue = alloca; + for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) + subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc, + /*inverse=*/false); + + tempDeclEmission.setAllocatedAddress( + Address{subscriptedValue, cgf.convertType(origType), + cgf.getContext().getDeclAlign(allocaDecl)}); + cgf.emitAutoVarInit(tempDeclEmission); +} + +// TODO: OpenACC: When we get this implemented for the reduction/firstprivate, +// this might end up re-merging with createRecipeInitCopy. For now, keep it +// separate until we're sure what everything looks like to keep this as clean +// as possible. +void OpenACCRecipeBuilderBase::createPrivateInitRecipe( + mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, + mlir::Value mainOp, mlir::acc::PrivateRecipeOp recipe, size_t numBounds, + llvm::ArrayRef boundTypes, const VarDecl *allocaDecl, + QualType origType) { + assert(allocaDecl && "Required recipe variable not set?"); + CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, allocaDecl}; + + mlir::Block *block = + createRecipeBlock(recipe.getInitRegion(), mainOp.getType(), loc, + numBounds, /*isInit=*/true); + builder.setInsertionPointToEnd(&recipe.getInitRegion().back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + const Type *allocaPointeeType = + allocaDecl->getType()->getPointeeOrArrayElementType(); + // We are OK with no init for builtins, arrays of builtins, or pointers, + // else we should NYI so we know to go look for these. + if (cgf.getContext().getLangOpts().CPlusPlus && !allocaDecl->getInit() && + !allocaDecl->getType()->isPointerType() && + !allocaPointeeType->isBuiltinType() && + !allocaPointeeType->isPointerType()) { + // If we don't have any initialization recipe, we failed during Sema to + // initialize this correctly. If we disable the + // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll + // emit an error to tell us. However, emitting those errors during + // production is a violation of the standard, so we cannot do them. + cgf.cgm.errorNYI(exprRange, "private default-init recipe"); + } + + if (!numBounds) { + // This is an 'easy' case, we just have to use the builtin init stuff to + // initialize this variable correctly. + CIRGenFunction::AutoVarEmission tempDeclEmission = + cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint()); + cgf.emitAutoVarInit(tempDeclEmission); + } else { + mlir::Value alloca = makeBoundsAlloca( + block, exprRange, loc, "openacc.private.init", numBounds, boundTypes); + + // If the initializer is trivial, there is nothing to do here, so save + // ourselves some effort. + if (allocaDecl->getInit() && + (!cgf.isTrivialInitializer(allocaDecl->getInit()) || + cgf.getContext().getLangOpts().getTrivialAutoVarInit() != + LangOptions::TrivialAutoVarInitKind::Uninitialized)) + makeBoundsInit(alloca, loc, block, allocaDecl, origType, + /*isInitSection=*/true); + } + + mlir::acc::YieldOp::create(builder, locEnd); +} + +void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + CIRGenFunction::AutoVarEmission tempDeclEmission, + mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, + const VarDecl *temporary) { + mlir::Block *block = + createRecipeBlock(recipe.getCopyRegion(), mainOp.getType(), loc, + /*numBounds=*/0, /*isInit=*/false); + builder.setInsertionPointToEnd(&recipe.getCopyRegion().back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + mlir::BlockArgument fromArg = block->getArgument(0); + mlir::BlockArgument toArg = block->getArgument(1); + + mlir::Type elementTy = + mlir::cast(mainOp.getType()).getPointee(); + + // Set the address of the emission to be the argument, so that we initialize + // that instead of the variable in the other block. + tempDeclEmission.setAllocatedAddress( + Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); + tempDeclEmission.emittedAsOffload = true; + + CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary}; + cgf.setAddrOfLocalVar( + temporary, + Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); + + cgf.emitAutoVarInit(tempDeclEmission); + mlir::acc::YieldOp::create(builder, locEnd); +} +// This function generates the 'combiner' section for a reduction recipe. Note +// that this function is not 'insertion point' clean, in that it alters the +// insertion point to be inside of the 'combiner' section of the recipe, but +// doesn't restore it aftewards. +void OpenACCRecipeBuilderBase::createReductionRecipeCombiner( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + mlir::acc::ReductionRecipeOp recipe) { + mlir::Block *block = builder.createBlock( + &recipe.getCombinerRegion(), recipe.getCombinerRegion().end(), + {mainOp.getType(), mainOp.getType()}, {loc, loc}); + builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + mlir::BlockArgument lhsArg = block->getArgument(0); + + mlir::acc::YieldOp::create(builder, locEnd, lhsArg); +} + +} // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h index 102fd890e5579..a05b0bdaf6774 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "clang/AST/ASTContext.h" @@ -22,38 +23,70 @@ #include "mlir/Dialect/OpenACC/OpenACC.h" namespace clang::CIRGen { -template class OpenACCRecipeBuilder { +class OpenACCRecipeBuilderBase { + // makes the copy of the addresses of an alloca to the previous allocation. + void makeAllocaCopy(mlir::Location loc, mlir::Type copyType, + mlir::Value numEltsToCopy, mlir::Value offsetPerSubarray, + mlir::Value destAlloca, mlir::Value srcAlloca); + // This function generates the required alloca, similar to + // 'emitAutoVarAlloca', except for the OpenACC array/pointer types. + mlir::Value makeBoundsAlloca(mlir::Block *block, SourceRange exprRange, + mlir::Location loc, std::string_view allocaName, + size_t numBounds, + llvm::ArrayRef boundTypes); + + void makeBoundsInit(mlir::Value alloca, mlir::Location loc, + mlir::Block *block, const VarDecl *allocaDecl, + QualType origType, bool isInitSection); + +protected: CIRGen::CIRGenFunction &cgf; CIRGen::CIRGenBuilderTy &builder; - mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op) { - switch (op) { - case OpenACCReductionOperator::Addition: - return mlir::acc::ReductionOperator::AccAdd; - case OpenACCReductionOperator::Multiplication: - return mlir::acc::ReductionOperator::AccMul; - case OpenACCReductionOperator::Max: - return mlir::acc::ReductionOperator::AccMax; - case OpenACCReductionOperator::Min: - return mlir::acc::ReductionOperator::AccMin; - case OpenACCReductionOperator::BitwiseAnd: - return mlir::acc::ReductionOperator::AccIand; - case OpenACCReductionOperator::BitwiseOr: - return mlir::acc::ReductionOperator::AccIor; - case OpenACCReductionOperator::BitwiseXOr: - return mlir::acc::ReductionOperator::AccXor; - case OpenACCReductionOperator::And: - return mlir::acc::ReductionOperator::AccLand; - case OpenACCReductionOperator::Or: - return mlir::acc::ReductionOperator::AccLor; - case OpenACCReductionOperator::Invalid: - llvm_unreachable("invalid reduction operator"); - } + mlir::Block *createRecipeBlock(mlir::Region ®ion, mlir::Type opTy, + mlir::Location loc, size_t numBounds, + bool isInit); + // Creates a loop through an 'acc.bounds', leaving the 'insertion' point to be + // the inside of the loop body. Traverses LB->UB UNLESS `inverse` is set. + // Returns the 'subscriptedValue' changed with the new bounds subscript. + mlir::Value createBoundsLoop(mlir::Value subscriptedValue, mlir::Value bound, + mlir::Location loc, bool inverse); + mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op); + void createFirstprivateRecipeCopy( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + CIRGenFunction::AutoVarEmission tempDeclEmission, + mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, + const VarDecl *temporary); - llvm_unreachable("invalid reduction operator"); - } + // This function generates the 'combiner' section for a reduction recipe. Note + // that this function is not 'insertion point' clean, in that it alters the + // insertion point to be inside of the 'combiner' section of the recipe, but + // doesn't restore it aftewards. + void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, + mlir::Value mainOp, + mlir::acc::ReductionRecipeOp recipe); + void createPrivateInitRecipe(mlir::Location loc, mlir::Location locEnd, + SourceRange exprRange, mlir::Value mainOp, + mlir::acc::PrivateRecipeOp recipe, + size_t numBounds, + llvm::ArrayRef boundTypes, + const VarDecl *allocaDecl, QualType origType); + + void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd, + mlir::Value mainOp, CharUnits alignment, + QualType origType, size_t numBounds, + QualType baseType, + mlir::Region &destroyRegion); + + OpenACCRecipeBuilderBase(CIRGen::CIRGenFunction &cgf, + CIRGen::CIRGenBuilderTy &builder) + : cgf(cgf), builder(builder) {} +}; +template +class OpenACCRecipeBuilder : OpenACCRecipeBuilderBase { std::string getRecipeName(SourceRange loc, QualType baseType, + unsigned numBounds, OpenACCReductionOperator reductionOp) { std::string recipeName; { @@ -106,44 +139,17 @@ template class OpenACCRecipeBuilder { static_assert(!sizeof(RecipeTy), "Unknown Recipe op kind"); } + // The naming convention from Flang with bounds doesn't map to C++ types + // very well, so we're just going to choose our own here. + if (numBounds) + stream << "_Bcnt" << numBounds << '_'; + MangleContext &mc = cgf.cgm.getCXXABI().getMangleContext(); mc.mangleCanonicalTypeName(baseType, stream); } return recipeName; } - void createFirstprivateRecipeCopy( - mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, - CIRGenFunction::AutoVarEmission tempDeclEmission, - mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, - const VarDecl *temporary) { - mlir::Block *block = builder.createBlock( - &recipe.getCopyRegion(), recipe.getCopyRegion().end(), - {mainOp.getType(), mainOp.getType()}, {loc, loc}); - builder.setInsertionPointToEnd(&recipe.getCopyRegion().back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - mlir::BlockArgument fromArg = block->getArgument(0); - mlir::BlockArgument toArg = block->getArgument(1); - - mlir::Type elementTy = - mlir::cast(mainOp.getType()).getPointee(); - - // Set the address of the emission to be the argument, so that we initialize - // that instead of the variable in the other block. - tempDeclEmission.setAllocatedAddress( - Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); - tempDeclEmission.EmittedAsOffload = true; - - CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary}; - cgf.setAddrOfLocalVar( - temporary, - Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); - - cgf.emitAutoVarInit(tempDeclEmission); - mlir::acc::YieldOp::create(builder, locEnd); - } - // Create the 'init' section of the recipe, including the 'copy' section for // 'firstprivate'. Note that this function is not 'insertion point' clean, in // that it alters the insertion point to be inside of the 'destroy' section of @@ -152,6 +158,9 @@ template class OpenACCRecipeBuilder { SourceRange exprRange, mlir::Value mainOp, RecipeTy recipe, const VarDecl *varRecipe, const VarDecl *temporary) { + // TODO: OpenACC: when we get the 'pointer' variants for + // firstprivate/reduction, this probably should be removed/split into + // functions for the BuilderBase. assert(varRecipe && "Required recipe variable not set?"); CIRGenFunction::AutoVarEmission tempDeclEmission{ @@ -160,9 +169,9 @@ template class OpenACCRecipeBuilder { // Do the 'init' section of the recipe IR, which does an alloca, then the // initialization (except for firstprivate). - mlir::Block *block = builder.createBlock(&recipe.getInitRegion(), - recipe.getInitRegion().end(), - {mainOp.getType()}, {loc}); + mlir::Block *block = + createRecipeBlock(recipe.getInitRegion(), mainOp.getType(), loc, + /*numBounds=*/0, /*isInit=*/true); builder.setInsertionPointToEnd(&recipe.getInitRegion().back()); CIRGenFunction::LexicalScope ls(cgf, loc, block); @@ -170,28 +179,9 @@ template class OpenACCRecipeBuilder { cgf.emitAutoVarAlloca(*varRecipe, builder.saveInsertionPoint()); // 'firstprivate' doesn't do its initialization in the 'init' section, - // instead does it in the 'copy' section. SO only do init here. - // 'reduction' appears to use it too (rather than a 'copy' section), so - // we probably have to do it here too, but we can do that when we get to - // reduction implementation. - if constexpr (std::is_same_v) { - // We are OK with no init for builtins, arrays of builtins, or pointers, - // else we should NYI so we know to go look for these. - if (cgf.getContext().getLangOpts().CPlusPlus && - !varRecipe->getType() - ->getPointeeOrArrayElementType() - ->isBuiltinType() && - !varRecipe->getType()->isPointerType() && !varRecipe->getInit()) { - // If we don't have any initialization recipe, we failed during Sema to - // initialize this correctly. If we disable the - // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll - // emit an error to tell us. However, emitting those errors during - // production is a violation of the standard, so we cannot do them. - cgf.cgm.errorNYI(exprRange, "private default-init recipe"); - } - cgf.emitAutoVarInit(tempDeclEmission); - } else if constexpr (std::is_same_v) { + // instead it does it in the 'copy' section. SO, only do 'init' here for + // reduction. + if constexpr (std::is_same_v) { // Unlike Private, the recipe here is always required as it has to do // init, not just 'default' init. if (!varRecipe->getInit()) @@ -217,79 +207,39 @@ template class OpenACCRecipeBuilder { } } - // This function generates the 'combiner' section for a reduction recipe. Note - // that this function is not 'insertion point' clean, in that it alters the - // insertion point to be inside of the 'combiner' section of the recipe, but - // doesn't restore it aftewards. - void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, - mlir::Value mainOp, - mlir::acc::ReductionRecipeOp recipe) { - mlir::Block *block = builder.createBlock( - &recipe.getCombinerRegion(), recipe.getCombinerRegion().end(), - {mainOp.getType(), mainOp.getType()}, {loc, loc}); - builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - mlir::BlockArgument lhsArg = block->getArgument(0); - - mlir::acc::YieldOp::create(builder, locEnd, lhsArg); - } - - // This function generates the 'destroy' section for a recipe. Note - // that this function is not 'insertion point' clean, in that it alters the - // insertion point to be inside of the 'destroy' section of the recipe, but - // doesn't restore it aftewards. - void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd, - mlir::Value mainOp, CharUnits alignment, - QualType baseType, - mlir::Region &destroyRegion) { - mlir::Block *block = - builder.createBlock(&destroyRegion, destroyRegion.end(), - {mainOp.getType(), mainOp.getType()}, {loc, loc}); - builder.setInsertionPointToEnd(&destroyRegion.back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - mlir::Type elementTy = - mlir::cast(mainOp.getType()).getPointee(); - // The destroy region has a signature of "original item, privatized item". - // So the 2nd item is the one that needs destroying, the former is just for - // reference and we don't really have a need for it at the moment. - Address addr{block->getArgument(1), elementTy, alignment}; - cgf.emitDestroy(addr, baseType, - cgf.getDestroyer(QualType::DK_cxx_destructor)); - - mlir::acc::YieldOp::create(builder, locEnd); - } - public: OpenACCRecipeBuilder(CIRGen::CIRGenFunction &cgf, CIRGen::CIRGenBuilderTy &builder) - : cgf(cgf), builder(builder) {} - RecipeTy getOrCreateRecipe(ASTContext &astCtx, const Expr *varRef, - const VarDecl *varRecipe, const VarDecl *temporary, - OpenACCReductionOperator reductionOp, - DeclContext *dc, QualType baseType, - mlir::Value mainOp) { - - if (baseType->isPointerType() || - (baseType->isArrayType() && !baseType->isConstantArrayType())) { - // It is clear that the use of pointers/VLAs in a recipe are not properly - // generated/don't do what they are supposed to do. In the case where we - // have 'bounds', we can actually figure out what we want to - // initialize/copy/destroy/compare/etc, but we haven't figured out how - // that looks yet, both between the IR and generation code. For now, we - // will do an NYI error no it. - cgf.cgm.errorNYI( - varRef->getSourceRange(), - "OpenACC recipe generation for pointer/non-constant arrays"); + : OpenACCRecipeBuilderBase(cgf, builder) {} + RecipeTy getOrCreateRecipe( + ASTContext &astCtx, mlir::OpBuilder::InsertPoint &insertLocation, + const Expr *varRef, const VarDecl *varRecipe, const VarDecl *temporary, + OpenACCReductionOperator reductionOp, DeclContext *dc, QualType origType, + size_t numBounds, llvm::ArrayRef boundTypes, QualType baseType, + mlir::Value mainOp) { + assert(!varRecipe->getType()->isSpecificBuiltinType( + BuiltinType::ArraySection) && + "array section shouldn't make it to recipe creation"); + + // TODO: OpenACC: This is a bit of a hackery to get this to not change for + // the non-private recipes. This will be removed soon, when we get this + // 'right' for firstprivate and reduction. + if constexpr (!std::is_same_v) { + if (numBounds) { + cgf.cgm.errorNYI(varRef->getSourceRange(), + "firstprivate/reduction-init with bounds"); + } + boundTypes = {}; + numBounds = 0; + origType = baseType; } mlir::ModuleOp mod = builder.getBlock() ->getParent() ->template getParentOfType(); - std::string recipeName = - getRecipeName(varRef->getSourceRange(), baseType, reductionOp); + std::string recipeName = getRecipeName(varRef->getSourceRange(), baseType, + numBounds, reductionOp); if (auto recipe = mod.lookupSymbol(recipeName)) return recipe; @@ -297,6 +247,8 @@ template class OpenACCRecipeBuilder { mlir::Location locEnd = cgf.cgm.getLoc(varRef->getEndLoc()); mlir::OpBuilder modBuilder(mod.getBodyRegion()); + if (insertLocation.isSet()) + modBuilder.restoreInsertionPoint(insertLocation); RecipeTy recipe; if constexpr (std::is_same_v) { @@ -305,18 +257,25 @@ template class OpenACCRecipeBuilder { } else { recipe = RecipeTy::create(modBuilder, loc, recipeName, mainOp.getType()); } + insertLocation = modBuilder.saveInsertionPoint(); - createRecipeInitCopy(loc, locEnd, varRef->getSourceRange(), mainOp, recipe, - varRecipe, temporary); + if constexpr (std::is_same_v) { + createPrivateInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp, + recipe, numBounds, boundTypes, varRecipe, + origType); + } else { + createRecipeInitCopy(loc, locEnd, varRef->getSourceRange(), mainOp, + recipe, varRecipe, temporary); + } if constexpr (std::is_same_v) { createReductionRecipeCombiner(loc, locEnd, mainOp, recipe); } - if (varRecipe && varRecipe->needsDestruction(cgf.getContext())) - createRecipeDestroySection(loc, locEnd, mainOp, - cgf.getContext().getDeclAlign(varRecipe), - baseType, recipe.getDestroyRegion()); + if (origType.isDestructedType()) + createRecipeDestroySection( + loc, locEnd, mainOp, cgf.getContext().getDeclAlign(varRecipe), + origType, numBounds, baseType, recipe.getDestroyRegion()); return recipe; } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index 914ef16c2a5ee..bf0ddc5875059 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -57,7 +57,7 @@ namespace clang::CIRGen { /// cir.func @store_field() { /// %0 = cir.alloca !rec_S, !cir.ptr, ["s"] {alignment = 4 : i64} /// %1 = cir.const #cir.int<2> : !s32i -/// %2 = cir.cast(integral, %1 : !s32i), !u32i +/// %2 = cir.cast integral %1 : !s32i -> !u32i /// %3 = cir.get_member %0[3] {name = "more_bits"} : !cir.ptr -> /// !cir.ptr /// %4 = cir.set_bitfield(#bfi_more_bits, %3 : diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp index a7628816089d0..87f23409e8e4b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp @@ -296,9 +296,8 @@ void CIRRecordLowering::lower(bool nonVirtualBaseType) { } llvm::stable_sort(members); - // TODO: implement clipTailPadding once bitfields are implemented - assert(!cir::MissingFeatures::bitfields()); - assert(!cir::MissingFeatures::recordZeroInit()); + // TODO: Verify bitfield clipping + assert(!cir::MissingFeatures::checkBitfieldClipping()); members.push_back(makeStorageInfo(size, getUIntNType(8))); determinePacked(nonVirtualBaseType); @@ -319,9 +318,11 @@ void CIRRecordLowering::fillOutputFields() { fieldIdxMap[member.fieldDecl->getCanonicalDecl()] = fieldTypes.size() - 1; // A field without storage must be a bitfield. - assert(!cir::MissingFeatures::bitfields()); - if (!member.data) + if (!member.data) { + assert(member.fieldDecl && + "member.data is a nullptr so member.fieldDecl should not be"); setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back()); + } } else if (member.kind == MemberInfo::InfoKind::Base) { nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1; } else if (member.kind == MemberInfo::InfoKind::VBase) { @@ -615,7 +616,7 @@ void CIRRecordLowering::determinePacked(bool nvBaseType) { continue; // If any member falls at an offset that it not a multiple of its alignment, // then the entire record must be packed. - if (member.offset % getAlignment(member.data)) + if (!member.offset.isMultipleOf(getAlignment(member.data))) packed = true; if (member.offset < nvSize) nvAlignment = std::max(nvAlignment, getAlignment(member.data)); @@ -623,12 +624,12 @@ void CIRRecordLowering::determinePacked(bool nvBaseType) { } // If the size of the record (the capstone's offset) is not a multiple of the // record's alignment, it must be packed. - if (members.back().offset % alignment) + if (!members.back().offset.isMultipleOf(alignment)) packed = true; // If the non-virtual sub-object is not a multiple of the non-virtual // sub-object's alignment, it must be packed. We cannot have a packed // non-virtual sub-object and an unpacked complete object or vise versa. - if (nvSize % nvAlignment) + if (!nvSize.isMultipleOf(nvAlignment)) packed = true; // Update the alignment of the sentinel. if (!packed) @@ -697,13 +698,9 @@ CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) { ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{}, (bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase); - assert(!cir::MissingFeatures::recordZeroInit()); - rl->nonVirtualBases.swap(lowering.nonVirtualBases); rl->completeObjectVirtualBases.swap(lowering.virtualBases); - assert(!cir::MissingFeatures::bitfields()); - // Add all the field numbers. rl->fieldIdxMap.swap(lowering.fieldIdxMap); @@ -824,7 +821,7 @@ void CIRRecordLowering::lowerUnion() { appendPaddingBytes(layoutSize - getSize(storageType)); // Set packed if we need it. - if (layoutSize % getAlignment(storageType)) + if (!layoutSize.isMultipleOf(getAlignment(storageType))) packed = true; } diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index f116efc202061..644c383693e37 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -216,6 +216,7 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s, case Stmt::OMPSimdDirectiveClass: case Stmt::OMPTileDirectiveClass: case Stmt::OMPUnrollDirectiveClass: + case Stmt::OMPFuseDirectiveClass: case Stmt::OMPForDirectiveClass: case Stmt::OMPForSimdDirectiveClass: case Stmt::OMPSectionsDirectiveClass: @@ -488,8 +489,11 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) { auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc); // This should emit a branch through the cleanup block if one exists. builder.create(loc, retBlock); + assert(!cir::MissingFeatures::emitBranchThroughCleanup()); if (ehStack.stable_begin() != currentCleanupStackDepth) cgm.errorNYI(s.getSourceRange(), "return with cleanup stack"); + + // Insert the new block to continue codegen after branch to ret block. builder.createBlock(builder.getBlock()->getParent()); return mlir::success(); @@ -1041,3 +1045,21 @@ mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const clang::SwitchStmt &s) { return res; } + +void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv, + QualType ty) { + if (rv.isScalar()) { + builder.createStore(loc, rv.getValue(), returnValue); + } else if (rv.isAggregate()) { + LValue dest = makeAddrLValue(returnValue, ty); + LValue src = makeAddrLValue(rv.getAggregateAddress(), ty); + emitAggregateCopy(dest, src, ty, getOverlapForReturnValue()); + } else { + cgm.errorNYI(loc, "emitReturnOfRValue: complex return type"); + } + mlir::Block *retBlock = curLexScope->getOrCreateRetBlock(*this, loc); + assert(!cir::MissingFeatures::emitBranchThroughCleanup()); + builder.create(loc, retBlock); + if (ehStack.stable_begin() != currentCleanupStackDepth) + cgm.errorNYI(loc, "return with cleanup stack"); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index af8f5ae2cc0a5..94d856b41b3ce 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -47,6 +47,49 @@ cir::RecordType CIRGenVTables::getVTableType(const VTableLayout &layout) { return cgm.getBuilder().getAnonRecordTy(tys, /*incomplete=*/false); } +/// At this point in the translation unit, does it appear that can we +/// rely on the vtable being defined elsewhere in the program? +/// +/// The response is really only definitive when called at the end of +/// the translation unit. +/// +/// The only semantic restriction here is that the object file should +/// not contain a vtable definition when that vtable is defined +/// strongly elsewhere. Otherwise, we'd just like to avoid emitting +/// vtables when unnecessary. +/// TODO(cir): this should be merged into common AST helper for codegen. +bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *rd) { + assert(rd->isDynamicClass() && "Non-dynamic classes have no VTable."); + + // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't + // emit them even if there is an explicit template instantiation. + if (cgm.getTarget().getCXXABI().isMicrosoft()) + return false; + + // If we have an explicit instantiation declaration (and not a + // definition), the vtable is defined elsewhere. + TemplateSpecializationKind tsk = rd->getTemplateSpecializationKind(); + if (tsk == TSK_ExplicitInstantiationDeclaration) + return true; + + // Otherwise, if the class is an instantiated template, the + // vtable must be defined here. + if (tsk == TSK_ImplicitInstantiation || + tsk == TSK_ExplicitInstantiationDefinition) + return false; + + // Otherwise, if the class doesn't have a key function (possibly + // anymore), the vtable must be defined here. + const CXXMethodDecl *keyFunction = + cgm.getASTContext().getCurrentKeyFunction(rd); + if (!keyFunction) + return false; + + // Otherwise, if we don't have a definition of the key function, the + // vtable must be defined somewhere else. + return !keyFunction->hasBody(); +} + /// This is a callback from Sema to tell us that a particular vtable is /// required to be emitted in this translation unit. /// diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index e19242c651034..9c425ab43b3d9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -100,6 +100,8 @@ class CIRGenVTables { /// is enabled) and the VTT (if the class has virtual bases). void generateClassData(const CXXRecordDecl *rd); + bool isVTableExternal(const clang::CXXRecordDecl *rd); + /// Returns the type of a vtable with the given layout. Normally a struct of /// arrays of pointers, with one struct element for each vtable in the vtable /// group. diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index c7b76e8372efc..3ebf460f7d34c 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -18,6 +18,7 @@ add_clang_library(clangCIR CIRGenCXXABI.cpp CIRGenBuiltin.cpp CIRGenDecl.cpp + CIRGenDeclCXX.cpp CIRGenDeclOpenACC.cpp CIRGenException.cpp CIRGenExpr.cpp @@ -31,6 +32,7 @@ add_clang_library(clangCIR CIRGenModule.cpp CIRGenOpenACC.cpp CIRGenOpenACCClause.cpp + CIRGenOpenACCRecipe.cpp CIRGenRecordLayoutBuilder.cpp CIRGenStmt.cpp CIRGenStmtOpenACC.cpp diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index 58ef500446aa7..6b5cc808e9a29 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1355,9 +1355,11 @@ mlir::LogicalResult cir::GlobalOp::verify() { return success(); } -void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, - llvm::StringRef sym_name, mlir::Type sym_type, - bool isConstant, cir::GlobalLinkageKind linkage) { +void cir::GlobalOp::build( + OpBuilder &odsBuilder, OperationState &odsState, llvm::StringRef sym_name, + mlir::Type sym_type, bool isConstant, cir::GlobalLinkageKind linkage, + function_ref ctorBuilder, + function_ref dtorBuilder) { odsState.addAttribute(getSymNameAttrName(odsState.name), odsBuilder.getStringAttr(sym_name)); odsState.addAttribute(getSymTypeAttrName(odsState.name), @@ -1370,26 +1372,88 @@ void cir::GlobalOp::build(OpBuilder &odsBuilder, OperationState &odsState, cir::GlobalLinkageKindAttr::get(odsBuilder.getContext(), linkage); odsState.addAttribute(getLinkageAttrName(odsState.name), linkageAttr); + Region *ctorRegion = odsState.addRegion(); + if (ctorBuilder) { + odsBuilder.createBlock(ctorRegion); + ctorBuilder(odsBuilder, odsState.location); + } + + Region *dtorRegion = odsState.addRegion(); + if (dtorBuilder) { + odsBuilder.createBlock(dtorRegion); + dtorBuilder(odsBuilder, odsState.location); + } + odsState.addAttribute(getGlobalVisibilityAttrName(odsState.name), cir::VisibilityAttr::get(odsBuilder.getContext())); } +/// Given the region at `index`, or the parent operation if `index` is None, +/// return the successor regions. These are the regions that may be selected +/// during the flow of control. `operands` is a set of optional attributes that +/// correspond to a constant value for each operand, or null if that operand is +/// not a constant. +void cir::GlobalOp::getSuccessorRegions( + mlir::RegionBranchPoint point, SmallVectorImpl ®ions) { + // The `ctor` and `dtor` regions always branch back to the parent operation. + if (!point.isParent()) { + regions.push_back(RegionSuccessor()); + return; + } + + // Don't consider the ctor region if it is empty. + Region *ctorRegion = &this->getCtorRegion(); + if (ctorRegion->empty()) + ctorRegion = nullptr; + + // Don't consider the dtor region if it is empty. + Region *dtorRegion = &this->getCtorRegion(); + if (dtorRegion->empty()) + dtorRegion = nullptr; + + // If the condition isn't constant, both regions may be executed. + if (ctorRegion) + regions.push_back(RegionSuccessor(ctorRegion)); + if (dtorRegion) + regions.push_back(RegionSuccessor(dtorRegion)); +} + static void printGlobalOpTypeAndInitialValue(OpAsmPrinter &p, cir::GlobalOp op, - TypeAttr type, - Attribute initAttr) { + TypeAttr type, Attribute initAttr, + mlir::Region &ctorRegion, + mlir::Region &dtorRegion) { + auto printType = [&]() { p << ": " << type; }; if (!op.isDeclaration()) { p << "= "; - // This also prints the type... - if (initAttr) - printConstant(p, initAttr); + if (!ctorRegion.empty()) { + p << "ctor "; + printType(); + p << " "; + p.printRegion(ctorRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } else { + // This also prints the type... + if (initAttr) + printConstant(p, initAttr); + } + + if (!dtorRegion.empty()) { + p << " dtor "; + p.printRegion(dtorRegion, + /*printEntryBlockArgs=*/false, + /*printBlockTerminators=*/false); + } } else { - p << ": " << type; + printType(); } } -static ParseResult -parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, - Attribute &initialValueAttr) { +static ParseResult parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, + TypeAttr &typeAttr, + Attribute &initialValueAttr, + mlir::Region &ctorRegion, + mlir::Region &dtorRegion) { mlir::Type opTy; if (parser.parseOptionalEqual().failed()) { // Absence of equal means a declaration, so we need to parse the type. @@ -1397,16 +1461,38 @@ parseGlobalOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, if (parser.parseColonType(opTy)) return failure(); } else { - // Parse constant with initializer, examples: - // cir.global @y = #cir.fp<1.250000e+00> : !cir.double - // cir.global @rgb = #cir.const_array<[...] : !cir.array> - if (parseConstantValue(parser, initialValueAttr).failed()) - return failure(); + // Parse contructor, example: + // cir.global @rgb = ctor : type { ... } + if (!parser.parseOptionalKeyword("ctor")) { + if (parser.parseColonType(opTy)) + return failure(); + auto parseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(ctorRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (ensureRegionTerm(parser, ctorRegion, parseLoc).failed()) + return failure(); + } else { + // Parse constant with initializer, examples: + // cir.global @y = 3.400000e+00 : f32 + // cir.global @rgb = #cir.const_array<[...] : !cir.array> + if (parseConstantValue(parser, initialValueAttr).failed()) + return failure(); + + assert(mlir::isa(initialValueAttr) && + "Non-typed attrs shouldn't appear here."); + auto typedAttr = mlir::cast(initialValueAttr); + opTy = typedAttr.getType(); + } - assert(mlir::isa(initialValueAttr) && - "Non-typed attrs shouldn't appear here."); - auto typedAttr = mlir::cast(initialValueAttr); - opTy = typedAttr.getType(); + // Parse destructor, example: + // dtor { ... } + if (!parser.parseOptionalKeyword("dtor")) { + auto parseLoc = parser.getCurrentLocation(); + if (parser.parseRegion(dtorRegion, /*arguments=*/{}, /*argTypes=*/{})) + return failure(); + if (ensureRegionTerm(parser, dtorRegion, parseLoc).failed()) + return failure(); + } } typeAttr = TypeAttr::get(opTy); @@ -2302,14 +2388,23 @@ OpFoldResult cir::ComplexCreateOp::fold(FoldAdaptor adaptor) { //===----------------------------------------------------------------------===// LogicalResult cir::ComplexRealOp::verify() { - if (getType() != getOperand().getType().getElementType()) { + mlir::Type operandTy = getOperand().getType(); + if (auto complexOperandTy = mlir::dyn_cast(operandTy)) { + operandTy = complexOperandTy.getElementType(); + } + + if (getType() != operandTy) { emitOpError() << ": result type does not match operand type"; return failure(); } + return success(); } OpFoldResult cir::ComplexRealOp::fold(FoldAdaptor adaptor) { + if (!mlir::isa(getOperand().getType())) + return nullptr; + if (auto complexCreateOp = getOperand().getDefiningOp()) return complexCreateOp.getOperand(0); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 1596ccb6a2617..4bc7783175120 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -222,8 +222,9 @@ class CIRAttrToValue { return llvm::TypeSwitch(attr) .Case([&](auto attrT) { return visitCirAttr(attrT); }) + cir::ConstPtrAttr, cir::GlobalViewAttr, cir::TypeInfoAttr, + cir::VTableAttr, cir::ZeroAttr>( + [&](auto attrT) { return visitCirAttr(attrT); }) .Default([&](auto attrT) { return mlir::Value(); }); } @@ -1694,7 +1695,7 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal( // TODO: Generalize this handling when more types are needed here. assert((isa(init))); + cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>(init))); // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals @@ -1710,6 +1711,11 @@ CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal( mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( cir::GlobalOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { + // If this global requires non-trivial initialization or destruction, + // that needs to be moved to runtime handlers during LoweringPrepare. + if (!op.getCtorRegion().empty() || !op.getDtorRegion().empty()) + return op.emitError() << "GlobalOp ctor and dtor regions should be removed " + "in LoweringPrepare"; std::optional init = op.getInitialValue(); @@ -1749,7 +1755,8 @@ mlir::LogicalResult CIRToLLVMGlobalOpLowering::matchAndRewrite( } else if (mlir::isa(init.value())) { + cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>( + init.value())) { // TODO(cir): once LLVM's dialect has proper equivalent attributes this // should be updated. For now, we use a custom op to initialize globals // to the appropriate value. @@ -1941,8 +1948,14 @@ mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite( // Pointer unary operations: + only. (++ and -- of pointers are implemented // with cir.ptr_stride, not cir.unary.) if (mlir::isa(elementType)) { - return op.emitError() - << "Unary operation on pointer types is not yet implemented"; + switch (op.getKind()) { + case cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); + return mlir::success(); + default: + op.emitError() << "Unknown pointer unary operation during CIR lowering"; + return mlir::failure(); + } } return op.emitError() << "Unary operation has unsupported type: " @@ -2412,7 +2425,7 @@ static void prepareTypeConverter(mlir::LLVMTypeConverter &converter, // For instance, this CIR code: // // cir.func @foo(%arg0: !s32i) -> !s32i { -// %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool // cir.if %4 { // %5 = cir.const #cir.int<1> : !s32i // cir.return %5 : !s32i @@ -2986,8 +2999,13 @@ mlir::LogicalResult CIRToLLVMComplexRealOpLowering::matchAndRewrite( cir::ComplexRealOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { mlir::Type resultLLVMTy = getTypeConverter()->convertType(op.getType()); - rewriter.replaceOpWithNewOp( - op, resultLLVMTy, adaptor.getOperand(), llvm::ArrayRef{0}); + mlir::Value operand = adaptor.getOperand(); + if (mlir::isa(op.getOperand().getType())) { + operand = mlir::LLVM::ExtractValueOp::create( + rewriter, op.getLoc(), resultLLVMTy, operand, + llvm::ArrayRef{0}); + } + rewriter.replaceOp(op, operand); return mlir::success(); } diff --git a/clang/lib/CodeGen/BackendConsumer.h b/clang/lib/CodeGen/BackendConsumer.h index ad3adfca36785..b7bbb81074836 100644 --- a/clang/lib/CodeGen/BackendConsumer.h +++ b/clang/lib/CodeGen/BackendConsumer.h @@ -40,11 +40,6 @@ class BackendConsumer : public ASTConsumer { llvm::Timer LLVMIRGeneration; unsigned LLVMIRGenerationRefCount = 0; - /// True if we've finished generating IR. This prevents us from generating - /// additional LLVM IR after emitting output in HandleTranslationUnit. This - /// can happen when Clang plugins trigger additional AST deserialization. - bool IRGenFinished = false; - bool TimerIsEnabled = false; BackendAction Action; diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 106363fa83e2b..64f1917739e12 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -437,7 +437,8 @@ static bool initTargetOptions(const CompilerInstance &CI, if (Options.BBSections == llvm::BasicBlockSection::List) { ErrorOr> MBOrErr = - MemoryBuffer::getFile(CodeGenOpts.BBSections.substr(5)); + CI.getVirtualFileSystem().getBufferForFile( + CodeGenOpts.BBSections.substr(5)); if (!MBOrErr) { Diags.Report(diag::err_fe_unable_to_load_basic_block_sections_file) << MBOrErr.getError().message(); @@ -785,7 +786,8 @@ static void addSanitizers(const Triple &TargetTriple, HWASanPass(SanitizerKind::KernelHWAddress, true); if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) { - MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles)); + MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles, + PB.getVirtualFileSystemPtr())); } }; if (ClSanitizeOnOptimizerEarlyEP) { @@ -837,9 +839,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline( if (CodeGenOpts.hasProfileIRInstr()) // -fprofile-generate. PGOOpt = PGOOptions(getProfileGenName(CodeGenOpts), "", "", - CodeGenOpts.MemoryProfileUsePath, nullptr, - PGOOptions::IRInstr, PGOOptions::NoCSAction, - ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling, + CodeGenOpts.MemoryProfileUsePath, PGOOptions::IRInstr, + PGOOptions::NoCSAction, ClPGOColdFuncAttr, + CodeGenOpts.DebugInfoForProfiling, /*PseudoProbeForProfiling=*/false, CodeGenOpts.AtomicProfileUpdate); else if (CodeGenOpts.hasProfileIRUse()) { @@ -848,32 +850,30 @@ void EmitAssemblyHelper::RunOptimizationPipeline( : PGOOptions::NoCSAction; PGOOpt = PGOOptions(CodeGenOpts.ProfileInstrumentUsePath, "", CodeGenOpts.ProfileRemappingFile, - CodeGenOpts.MemoryProfileUsePath, VFS, - PGOOptions::IRUse, CSAction, ClPGOColdFuncAttr, + CodeGenOpts.MemoryProfileUsePath, PGOOptions::IRUse, + CSAction, ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling); } else if (!CodeGenOpts.SampleProfileFile.empty()) // -fprofile-sample-use PGOOpt = PGOOptions( CodeGenOpts.SampleProfileFile, "", CodeGenOpts.ProfileRemappingFile, - CodeGenOpts.MemoryProfileUsePath, VFS, PGOOptions::SampleUse, + CodeGenOpts.MemoryProfileUsePath, PGOOptions::SampleUse, PGOOptions::NoCSAction, ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling, CodeGenOpts.PseudoProbeForProfiling); else if (!CodeGenOpts.MemoryProfileUsePath.empty()) // -fmemory-profile-use (without any of the above options) - PGOOpt = PGOOptions("", "", "", CodeGenOpts.MemoryProfileUsePath, VFS, + PGOOpt = PGOOptions("", "", "", CodeGenOpts.MemoryProfileUsePath, PGOOptions::NoAction, PGOOptions::NoCSAction, ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling); else if (CodeGenOpts.PseudoProbeForProfiling) // -fpseudo-probe-for-profiling - PGOOpt = - PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr, - PGOOptions::NoAction, PGOOptions::NoCSAction, - ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling, true); + PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", PGOOptions::NoAction, + PGOOptions::NoCSAction, ClPGOColdFuncAttr, + CodeGenOpts.DebugInfoForProfiling, true); else if (CodeGenOpts.DebugInfoForProfiling) // -fdebug-info-for-profiling - PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr, - PGOOptions::NoAction, PGOOptions::NoCSAction, - ClPGOColdFuncAttr, true); + PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", PGOOptions::NoAction, + PGOOptions::NoCSAction, ClPGOColdFuncAttr, true); // Check to see if we want to generate a CS profile. if (CodeGenOpts.hasProfileCSIRInstr()) { @@ -889,7 +889,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline( PGOOpt->CSAction = PGOOptions::CSIRInstr; } else PGOOpt = PGOOptions("", getProfileGenName(CodeGenOpts), "", - /*MemoryProfile=*/"", nullptr, PGOOptions::NoAction, + /*MemoryProfile=*/"", PGOOptions::NoAction, PGOOptions::CSIRInstr, ClPGOColdFuncAttr, CodeGenOpts.DebugInfoForProfiling); } @@ -926,7 +926,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline( (CodeGenOpts.DebugPassManager || DebugPassStructure), CodeGenOpts.VerifyEach, PrintPassOpts); SI.registerCallbacks(PIC, &MAM); - PassBuilder PB(TM.get(), PTO, PGOOpt, &PIC); + PassBuilder PB(TM.get(), PTO, PGOOpt, &PIC, CI.getVirtualFileSystemPtr()); // Handle the assignment tracking feature options. switch (CodeGenOpts.getAssignmentTrackingMode()) { @@ -1090,8 +1090,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline( if (std::optional Options = getGCOVOptions(CodeGenOpts, LangOpts)) PB.registerPipelineStartEPCallback( - [Options](ModulePassManager &MPM, OptimizationLevel Level) { - MPM.addPass(GCOVProfilerPass(*Options)); + [this, Options](ModulePassManager &MPM, OptimizationLevel Level) { + MPM.addPass( + GCOVProfilerPass(*Options, CI.getVirtualFileSystemPtr())); }); if (std::optional Options = getInstrProfOptions(CodeGenOpts, LangOpts)) @@ -1476,13 +1477,13 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts, } void clang::EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts, - DiagnosticsEngine &Diags) { + llvm::vfs::FileSystem &VFS, DiagnosticsEngine &Diags) { if (CGOpts.OffloadObjects.empty()) return; for (StringRef OffloadObject : CGOpts.OffloadObjects) { llvm::ErrorOr> ObjectOrErr = - llvm::MemoryBuffer::getFileOrSTDIN(OffloadObject); + VFS.getBufferForFile(OffloadObject); if (ObjectOrErr.getError()) { auto DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "could not open '%0' for embedding"); diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 9106c4cd8e139..4a3446abcc78f 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -734,7 +734,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr, CGF.Builder.getInt8(1), Order, Scope, E); RMWI->setVolatile(E->isVolatile()); - llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool"); + llvm::Value *Result = CGF.EmitToMemory( + CGF.Builder.CreateIsNotNull(RMWI, "tobool"), E->getType()); auto *I = CGF.Builder.CreateStore(Result, Dest); CGF.addInstToCurrentSourceAtom(I, Result); return; @@ -879,7 +880,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { CharUnits MaxInlineWidth = getContext().toCharUnitsFromBits(MaxInlineWidthInBits); DiagnosticsEngine &Diags = CGM.getDiags(); - bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0; + bool Misaligned = !Ptr.getAlignment().isMultipleOf(TInfo.Width); bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits; if (Misaligned) { Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned) diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index f7c3dea257d50..9ee810c9d5775 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4277,18 +4277,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::Value *Ptr = EmitScalarExpr(E->getArg(1)); llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType()); - CharUnits Align = CGM.getNaturalTypeAlignment(E->getType(), nullptr); - llvm::Value *AlignVal = - llvm::ConstantInt::get(Int32Ty, Align.getQuantity()); - llvm::Value *PassThru = llvm::PoisonValue::get(RetTy); if (E->getNumArgs() > 2) PassThru = EmitScalarExpr(E->getArg(2)); + CharUnits Align = CGM.getNaturalTypeAlignment( + E->getType()->getAs()->getElementType(), nullptr); + llvm::Value *AlignVal = + llvm::ConstantInt::get(Int32Ty, Align.getQuantity()); + llvm::Value *Result; if (BuiltinID == Builtin::BI__builtin_masked_load) { Function *F = - CGM.getIntrinsic(Intrinsic::masked_load, {RetTy, UnqualPtrTy}); + CGM.getIntrinsic(Intrinsic::masked_load, {RetTy, Ptr->getType()}); Result = Builder.CreateCall(F, {Ptr, AlignVal, Mask, PassThru}, "masked_load"); } else { @@ -4333,15 +4334,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, QualType ValTy = E->getArg(1)->getType(); llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy); - llvm::Type *PtrTy = Ptr->getType(); - CharUnits Align = CGM.getNaturalTypeAlignment(ValTy, nullptr); + CharUnits Align = CGM.getNaturalTypeAlignment( + E->getArg(1)->getType()->getAs()->getElementType(), + nullptr); llvm::Value *AlignVal = llvm::ConstantInt::get(Int32Ty, Align.getQuantity()); if (BuiltinID == Builtin::BI__builtin_masked_store) { - llvm::Function *F = - CGM.getIntrinsic(llvm::Intrinsic::masked_store, {ValLLTy, PtrTy}); + llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::masked_store, + {ValLLTy, Ptr->getType()}); Builder.CreateCall(F, {Val, Ptr, AlignVal, Mask}); } else { llvm::Function *F = diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 0b2fce4244fb6..a931ce476b8ae 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -2438,7 +2438,10 @@ void CodeGenModule::ConstructAttributeList(StringRef Name, // Some ABIs may result in additional accesses to arguments that may // otherwise not be present. + std::optional MemAttrForPtrArgs; + bool AddedPotentialArgAccess = false; auto AddPotentialArgAccess = [&]() { + AddedPotentialArgAccess = true; llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory); if (A.isValid()) FuncAttrs.addMemoryAttr(A.getMemoryEffects() | @@ -2499,11 +2502,13 @@ void CodeGenModule::ConstructAttributeList(StringRef Name, // gcc specifies that 'const' functions have greater restrictions than // 'pure' functions, so they also cannot have infinite loops. FuncAttrs.addAttribute(llvm::Attribute::WillReturn); + MemAttrForPtrArgs = llvm::Attribute::ReadNone; } else if (TargetDecl->hasAttr()) { FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly()); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); // gcc specifies that 'pure' functions cannot have infinite loops. FuncAttrs.addAttribute(llvm::Attribute::WillReturn); + MemAttrForPtrArgs = llvm::Attribute::ReadOnly; } else if (TargetDecl->hasAttr()) { FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly()); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); @@ -3011,6 +3016,27 @@ void CodeGenModule::ConstructAttributeList(StringRef Name, } assert(ArgNo == FI.arg_size()); + ArgNo = 0; + if (AddedPotentialArgAccess && MemAttrForPtrArgs) { + llvm::FunctionType *FunctionType = FunctionType = + getTypes().GetFunctionType(FI); + for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), + E = FI.arg_end(); + I != E; ++I, ++ArgNo) { + if (I->info.isDirect() || I->info.isExpand() || + I->info.isCoerceAndExpand()) { + unsigned FirstIRArg, NumIRArgs; + std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); + for (unsigned i = FirstIRArg; i < FirstIRArg + NumIRArgs; ++i) { + if (FunctionType->getParamType(i)->isPointerTy()) { + ArgAttrs[i] = + ArgAttrs[i].addAttribute(getLLVMContext(), *MemAttrForPtrArgs); + } + } + } + } + } + AttrList = llvm::AttributeList::get( getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp index 827385f9c1a1f..b76450152203d 100644 --- a/clang/lib/CodeGen/CGCoroutine.cpp +++ b/clang/lib/CodeGen/CGCoroutine.cpp @@ -575,17 +575,19 @@ struct CallCoroEnd final : public EHScopeStack::Cleanup { llvm::Function *CoroEndFn = CGM.getIntrinsic(llvm::Intrinsic::coro_end); // See if we have a funclet bundle to associate coro.end with. (WinEH) auto Bundles = getBundlesForCoroEnd(CGF); - auto *CoroEnd = - CGF.Builder.CreateCall(CoroEndFn, - {NullPtr, CGF.Builder.getTrue(), - llvm::ConstantTokenNone::get(CoroEndFn->getContext())}, - Bundles); + CGF.Builder.CreateCall( + CoroEndFn, + {NullPtr, CGF.Builder.getTrue(), + llvm::ConstantTokenNone::get(CoroEndFn->getContext())}, + Bundles); if (Bundles.empty()) { // Otherwise, (landingpad model), create a conditional branch that leads // either to a cleanup block or a block with EH resume instruction. auto *ResumeBB = CGF.getEHResumeBlock(/*isCleanup=*/true); auto *CleanupContBB = CGF.createBasicBlock("cleanup.cont"); - CGF.Builder.CreateCondBr(CoroEnd, ResumeBB, CleanupContBB); + auto *CoroIsInRampFn = CGM.getIntrinsic(llvm::Intrinsic::coro_is_in_ramp); + auto *CoroIsInRamp = CGF.Builder.CreateCall(CoroIsInRampFn); + CGF.Builder.CreateCondBr(CoroIsInRamp, CleanupContBB, ResumeBB); CGF.EmitBlock(CleanupContBB); } } diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index 12c7d48e20d67..fee6bc0cbb64b 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -26,6 +26,7 @@ #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" +#include "clang/AST/LambdaCapture.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/VTableBuilder.h" @@ -1903,46 +1904,61 @@ CGDebugInfo::createInlinedSubprogram(StringRef FuncName, return SP; } +llvm::StringRef +CGDebugInfo::GetLambdaCaptureName(const LambdaCapture &Capture) { + if (Capture.capturesThis()) + return CGM.getCodeGenOpts().EmitCodeView ? "__this" : "this"; + + assert(Capture.capturesVariable()); + + const ValueDecl *CaptureDecl = Capture.getCapturedVar(); + assert(CaptureDecl && "Expected valid decl for captured variable."); + + return CaptureDecl->getName(); +} + void CGDebugInfo::CollectRecordLambdaFields( const CXXRecordDecl *CXXDecl, SmallVectorImpl &elements, llvm::DIType *RecordTy) { // For C++11 Lambdas a Field will be the same as a Capture, but the Capture // has the name and the location of the variable so we should iterate over // both concurrently. - const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(CXXDecl); RecordDecl::field_iterator Field = CXXDecl->field_begin(); unsigned fieldno = 0; for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(), E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) { - const LambdaCapture &C = *I; - if (C.capturesVariable()) { - SourceLocation Loc = C.getLocation(); - assert(!Field->isBitField() && "lambdas don't have bitfield members!"); - ValueDecl *V = C.getCapturedVar(); - StringRef VName = V->getName(); - llvm::DIFile *VUnit = getOrCreateFile(Loc); - auto Align = getDeclAlignIfRequired(V, CGM.getContext()); - llvm::DIType *FieldType = createFieldType( - VName, Field->getType(), Loc, Field->getAccess(), - layout.getFieldOffset(fieldno), Align, VUnit, RecordTy, CXXDecl); - elements.push_back(FieldType); - } else if (C.capturesThis()) { + const LambdaCapture &Capture = *I; + const uint64_t FieldOffset = + CGM.getContext().getASTRecordLayout(CXXDecl).getFieldOffset(fieldno); + + assert(!Field->isBitField() && "lambdas don't have bitfield members!"); + + SourceLocation Loc; + uint32_t Align = 0; + + if (Capture.capturesThis()) { // TODO: Need to handle 'this' in some way by probably renaming the // this of the lambda class and having a field member of 'this' or // by using AT_object_pointer for the function and having that be // used as 'this' for semantic references. - FieldDecl *f = *Field; - llvm::DIFile *VUnit = getOrCreateFile(f->getLocation()); - QualType type = f->getType(); - StringRef ThisName = - CGM.getCodeGenOpts().EmitCodeView ? "__this" : "this"; - llvm::DIType *fieldType = createFieldType( - ThisName, type, f->getLocation(), f->getAccess(), - layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); - - elements.push_back(fieldType); + Loc = Field->getLocation(); + } else if (Capture.capturesVariable()) { + Loc = Capture.getLocation(); + + const ValueDecl *CaptureDecl = Capture.getCapturedVar(); + assert(CaptureDecl && "Expected valid decl for captured variable."); + + Align = getDeclAlignIfRequired(CaptureDecl, CGM.getContext()); + } else { + continue; } + + llvm::DIFile *VUnit = getOrCreateFile(Loc); + + elements.push_back(createFieldType( + GetLambdaCaptureName(Capture), Field->getType(), Loc, + Field->getAccess(), FieldOffset, Align, VUnit, RecordTy, CXXDecl)); } } diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h index f86077369a42a..78c3eb9c5792e 100644 --- a/clang/lib/CodeGen/CGDebugInfo.h +++ b/clang/lib/CodeGen/CGDebugInfo.h @@ -397,6 +397,7 @@ class CGDebugInfo { void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F, SmallVectorImpl &E, llvm::DICompositeType *RecordTy); + llvm::StringRef GetLambdaCaptureName(const LambdaCapture &Capture); /// If the C++ class has vtable info then insert appropriate debug /// info entry in EltTys vector. diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index a092b718412be..c52526c89f171 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1376,58 +1376,6 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, llvm_unreachable("predeclared global operator new/delete is missing"); } -namespace { -/// The parameters to pass to a usual operator delete. -struct UsualDeleteParams { - TypeAwareAllocationMode TypeAwareDelete = TypeAwareAllocationMode::No; - bool DestroyingDelete = false; - bool Size = false; - AlignedAllocationMode Alignment = AlignedAllocationMode::No; -}; -} - -static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { - UsualDeleteParams Params; - - const FunctionProtoType *FPT = FD->getType()->castAs(); - auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); - - if (FD->isTypeAwareOperatorNewOrDelete()) { - Params.TypeAwareDelete = TypeAwareAllocationMode::Yes; - assert(AI != AE); - ++AI; - } - - // The first argument after the type-identity parameter (if any) is - // always a void* (or C* for a destroying operator delete for class - // type C). - ++AI; - - // The next parameter may be a std::destroying_delete_t. - if (FD->isDestroyingOperatorDelete()) { - assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); - Params.DestroyingDelete = true; - assert(AI != AE); - ++AI; - } - - // Figure out what other parameters we should be implicitly passing. - if (AI != AE && (*AI)->isIntegerType()) { - Params.Size = true; - ++AI; - } else - assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); - - if (AI != AE && (*AI)->isAlignValT()) { - Params.Alignment = AlignedAllocationMode::Yes; - ++AI; - } else - assert(!isTypeAwareAllocation(Params.TypeAwareDelete)); - - assert(AI == AE && "unexpected usual deallocation function parameter"); - return Params; -} - namespace { /// A cleanup to call the given 'operator delete' function upon abnormal /// exit from a new expression. Templated on a traits type that deals with @@ -1505,7 +1453,7 @@ namespace { } else { // For a non-placement new-expression, 'operator delete' can take a // size and/or an alignment if it has the right parameters. - Params = getUsualDeleteParams(OperatorDelete); + Params = OperatorDelete->getUsualDeleteParams(); } assert(!Params.DestroyingDelete && @@ -1838,7 +1786,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, const auto *DeleteFTy = DeleteFD->getType()->castAs(); CallArgList DeleteArgs; - auto Params = getUsualDeleteParams(DeleteFD); + auto Params = DeleteFD->getUsualDeleteParams(); auto ParamTypeIt = DeleteFTy->param_type_begin(); std::optional TagAlloca; diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp index b44dd9ecc717e..6407afc3d9447 100644 --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -433,7 +433,7 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom( // All remaining elements must be the same type. if (Elems[I]->getType() != CommonType || - Offset(I) % ElemSize != 0) { + !Offset(I).isMultipleOf(ElemSize)) { CanEmitArray = false; break; } diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 4fa25c5d66669..f319b176513f8 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -3672,17 +3672,19 @@ Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E, // If it's an l-value, load through the appropriate subobject l-value. // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. - if (E->isGLValue()) { + if (E->isGLValue()) { if (!PromotionType.isNull()) { CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true); - if (result.first) - result.first = CGF.EmitPromotedValue(result, PromotionType).first; - return result.first; - } else { - return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) - .getScalarVal(); + PromotionType = PromotionType->isAnyComplexType() + ? PromotionType + : CGF.getContext().getComplexType(PromotionType); + return result.first ? CGF.EmitPromotedValue(result, PromotionType).first + : result.first; } + + return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) + .getScalarVal(); } // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, false, true).first; @@ -3715,13 +3717,16 @@ Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E, if (!PromotionType.isNull()) { CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign); - if (result.second) - result.second = CGF.EmitPromotedValue(result, PromotionType).second; - return result.second; - } else { - return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) - .getScalarVal(); + PromotionType = PromotionType->isAnyComplexType() + ? PromotionType + : CGF.getContext().getComplexType(PromotionType); + return result.second + ? CGF.EmitPromotedValue(result, PromotionType).second + : result.second; } + + return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) + .getScalarVal(); } // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, true, false).second; diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp index 7b5b924b1fe82..6c0fc8d7f07be 100644 --- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp +++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp @@ -352,6 +352,13 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, SmallVector Args{OrderID, SpaceOp, RangeOp, IndexOp, Name}; return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args); } + case Builtin::BI__builtin_hlsl_resource_nonuniformindex: { + Value *IndexOp = EmitScalarExpr(E->getArg(0)); + llvm::Type *RetTy = ConvertType(E->getType()); + return Builder.CreateIntrinsic( + RetTy, CGM.getHLSLRuntime().getNonUniformResourceIndexIntrinsic(), + ArrayRef{IndexOp}); + } case Builtin::BI__builtin_hlsl_all: { Value *Op0 = EmitScalarExpr(E->getArg(0)); return Builder.CreateIntrinsic( @@ -540,6 +547,21 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, retType, CGM.getHLSLRuntime().getIsInfIntrinsic(), ArrayRef{Op0}, nullptr, "hlsl.isinf"); } + case Builtin::BI__builtin_hlsl_elementwise_isnan: { + Value *Op0 = EmitScalarExpr(E->getArg(0)); + llvm::Type *Xty = Op0->getType(); + llvm::Type *retType = llvm::Type::getInt1Ty(this->getLLVMContext()); + if (Xty->isVectorTy()) { + auto *XVecTy = E->getArg(0)->getType()->castAs(); + retType = llvm::VectorType::get( + retType, ElementCount::getFixed(XVecTy->getNumElements())); + } + if (!E->getArg(0)->getType()->hasFloatingRepresentation()) + llvm_unreachable("isnan operand must have a float representation"); + return Builder.CreateIntrinsic( + retType, CGM.getHLSLRuntime().getIsNaNIntrinsic(), + ArrayRef{Op0}, nullptr, "hlsl.isnan"); + } case Builtin::BI__builtin_hlsl_mad: { Value *M = EmitScalarExpr(E->getArg(0)); Value *A = EmitScalarExpr(E->getArg(1)); diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp index cf018c8c7de2a..ede1780592bf5 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.cpp +++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp @@ -21,6 +21,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Attrs.inc" #include "clang/AST/Decl.h" +#include "clang/AST/HLSLResource.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/Type.h" #include "clang/Basic/TargetOptions.h" @@ -131,35 +132,24 @@ static CXXMethodDecl *lookupMethod(CXXRecordDecl *Record, StringRef Name, static CXXMethodDecl *lookupResourceInitMethodAndSetupArgs( CodeGenModule &CGM, CXXRecordDecl *ResourceDecl, llvm::Value *Range, - llvm::Value *Index, StringRef Name, HLSLResourceBindingAttr *RBA, - HLSLVkBindingAttr *VkBinding, CallArgList &Args) { - assert((VkBinding || RBA) && "at least one a binding attribute expected"); + llvm::Value *Index, StringRef Name, ResourceBindingAttrs &Binding, + CallArgList &Args) { + assert(Binding.hasBinding() && "at least one binding attribute expected"); ASTContext &AST = CGM.getContext(); - std::optional RegisterSlot; - uint32_t SpaceNo = 0; - if (VkBinding) { - RegisterSlot = VkBinding->getBinding(); - SpaceNo = VkBinding->getSet(); - } else { - if (RBA->hasRegisterSlot()) - RegisterSlot = RBA->getSlotNumber(); - SpaceNo = RBA->getSpaceNumber(); - } - CXXMethodDecl *CreateMethod = nullptr; Value *NameStr = buildNameForResource(Name, CGM); - Value *Space = llvm::ConstantInt::get(CGM.IntTy, SpaceNo); + Value *Space = llvm::ConstantInt::get(CGM.IntTy, Binding.getSpace()); - if (RegisterSlot.has_value()) { + if (Binding.isExplicit()) { // explicit binding - auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, RegisterSlot.value()); + auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, Binding.getSlot()); Args.add(RValue::get(RegSlot), AST.UnsignedIntTy); CreateMethod = lookupMethod(ResourceDecl, "__createFromBinding", SC_Static); } else { // implicit binding auto *OrderID = - llvm::ConstantInt::get(CGM.IntTy, RBA->getImplicitBindingOrderID()); + llvm::ConstantInt::get(CGM.IntTy, Binding.getImplicitOrderID()); Args.add(RValue::get(OrderID), AST.UnsignedIntTy); CreateMethod = lookupMethod(ResourceDecl, "__createFromImplicitBinding", SC_Static); @@ -194,8 +184,8 @@ static std::optional initializeLocalResourceArray( CodeGenFunction &CGF, CXXRecordDecl *ResourceDecl, const ConstantArrayType *ArrayTy, AggValueSlot &ValueSlot, llvm::Value *Range, llvm::Value *StartIndex, StringRef ResourceName, - HLSLResourceBindingAttr *RBA, HLSLVkBindingAttr *VkBinding, - ArrayRef PrevGEPIndices, SourceLocation ArraySubsExprLoc) { + ResourceBindingAttrs &Binding, ArrayRef PrevGEPIndices, + SourceLocation ArraySubsExprLoc) { ASTContext &AST = CGF.getContext(); llvm::IntegerType *IntTy = CGF.CGM.IntTy; @@ -220,7 +210,7 @@ static std::optional initializeLocalResourceArray( } std::optional MaybeIndex = initializeLocalResourceArray( CGF, ResourceDecl, SubArrayTy, ValueSlot, Range, Index, ResourceName, - RBA, VkBinding, GEPIndices, ArraySubsExprLoc); + Binding, GEPIndices, ArraySubsExprLoc); if (!MaybeIndex) return std::nullopt; Index = *MaybeIndex; @@ -244,8 +234,7 @@ static std::optional initializeLocalResourceArray( CallArgList Args; CXXMethodDecl *CreateMethod = lookupResourceInitMethodAndSetupArgs( - CGF.CGM, ResourceDecl, Range, Index, ResourceName, RBA, VkBinding, - Args); + CGF.CGM, ResourceDecl, Range, Index, ResourceName, Binding, Args); if (!CreateMethod) // This can happen if someone creates an array of structs that looks like @@ -439,14 +428,7 @@ void CGHLSLRuntime::addBuffer(const HLSLBufferDecl *BufDecl) { emitBufferGlobalsAndMetadata(BufDecl, BufGV); // Initialize cbuffer from binding (implicit or explicit) - if (HLSLVkBindingAttr *VkBinding = BufDecl->getAttr()) { - initializeBufferFromBinding(BufDecl, BufGV, VkBinding); - } else { - HLSLResourceBindingAttr *RBA = BufDecl->getAttr(); - assert(RBA && - "cbuffer/tbuffer should always have resource binding attribute"); - initializeBufferFromBinding(BufDecl, BufGV, RBA); - } + initializeBufferFromBinding(BufDecl, BufGV); } void CGHLSLRuntime::addRootSignature( @@ -810,44 +792,29 @@ static void initializeBuffer(CodeGenModule &CGM, llvm::GlobalVariable *GV, } void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl, - llvm::GlobalVariable *GV, - HLSLVkBindingAttr *VkBinding) { - assert(VkBinding && "expect a nonnull binding attribute"); - auto *Index = llvm::ConstantInt::get(CGM.IntTy, 0); - auto *RangeSize = llvm::ConstantInt::get(CGM.IntTy, 1); - auto *Set = llvm::ConstantInt::get(CGM.IntTy, VkBinding->getSet()); - auto *Binding = llvm::ConstantInt::get(CGM.IntTy, VkBinding->getBinding()); - Value *Name = buildNameForResource(BufDecl->getName(), CGM); - llvm::Intrinsic::ID IntrinsicID = - CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic(); - - SmallVector Args{Set, Binding, RangeSize, Index, Name}; - initializeBuffer(CGM, GV, IntrinsicID, Args); -} + llvm::GlobalVariable *GV) { + ResourceBindingAttrs Binding(BufDecl); + assert(Binding.hasBinding() && + "cbuffer/tbuffer should always have resource binding attribute"); -void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl, - llvm::GlobalVariable *GV, - HLSLResourceBindingAttr *RBA) { - assert(RBA && "expect a nonnull binding attribute"); auto *Index = llvm::ConstantInt::get(CGM.IntTy, 0); auto *RangeSize = llvm::ConstantInt::get(CGM.IntTy, 1); - auto *Space = llvm::ConstantInt::get(CGM.IntTy, RBA->getSpaceNumber()); + auto *Space = llvm::ConstantInt::get(CGM.IntTy, Binding.getSpace()); Value *Name = buildNameForResource(BufDecl->getName(), CGM); - llvm::Intrinsic::ID IntrinsicID = - RBA->hasRegisterSlot() - ? CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic() - : CGM.getHLSLRuntime().getCreateHandleFromImplicitBindingIntrinsic(); - // buffer with explicit binding - if (RBA->hasRegisterSlot()) { - auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, RBA->getSlotNumber()); + if (Binding.isExplicit()) { + llvm::Intrinsic::ID IntrinsicID = + CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic(); + auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, Binding.getSlot()); SmallVector Args{Space, RegSlot, RangeSize, Index, Name}; initializeBuffer(CGM, GV, IntrinsicID, Args); } else { // buffer with implicit binding + llvm::Intrinsic::ID IntrinsicID = + CGM.getHLSLRuntime().getCreateHandleFromImplicitBindingIntrinsic(); auto *OrderID = - llvm::ConstantInt::get(CGM.IntTy, RBA->getImplicitBindingOrderID()); + llvm::ConstantInt::get(CGM.IntTy, Binding.getImplicitOrderID()); SmallVector Args{OrderID, Space, RangeSize, Index, Name}; initializeBuffer(CGM, GV, IntrinsicID, Args); } @@ -960,9 +927,9 @@ std::optional CGHLSLRuntime::emitResourceArraySubscriptExpr( // Find binding info for the resource array. For implicit binding // an HLSLResourceBindingAttr should have been added by SemaHLSL. - HLSLVkBindingAttr *VkBinding = ArrayDecl->getAttr(); - HLSLResourceBindingAttr *RBA = ArrayDecl->getAttr(); - assert((VkBinding || RBA) && "resource array must have a binding attribute"); + ResourceBindingAttrs Binding(ArrayDecl); + assert((Binding.hasBinding()) && + "resource array must have a binding attribute"); // Find the individual resource type. QualType ResultTy = ArraySubsExpr->getType(); @@ -992,7 +959,7 @@ std::optional CGHLSLRuntime::emitResourceArraySubscriptExpr( CallArgList Args; CXXMethodDecl *CreateMethod = lookupResourceInitMethodAndSetupArgs( CGF.CGM, ResourceTy->getAsCXXRecordDecl(), Range, Index, - ArrayDecl->getName(), RBA, VkBinding, Args); + ArrayDecl->getName(), Binding, Args); if (!CreateMethod) // This can happen if someone creates an array of structs that looks like @@ -1009,8 +976,8 @@ std::optional CGHLSLRuntime::emitResourceArraySubscriptExpr( cast(ResultTy.getTypePtr()); std::optional EndIndex = initializeLocalResourceArray( CGF, ResourceTy->getAsCXXRecordDecl(), ArrayTy, ValueSlot, Range, Index, - ArrayDecl->getName(), RBA, VkBinding, - {llvm::ConstantInt::get(CGM.IntTy, 0)}, ArraySubsExpr->getExprLoc()); + ArrayDecl->getName(), Binding, {llvm::ConstantInt::get(CGM.IntTy, 0)}, + ArraySubsExpr->getExprLoc()); if (!EndIndex) return std::nullopt; } diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h index 370f3d5c5d30d..7c6c2850fd4d4 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.h +++ b/clang/lib/CodeGen/CGHLSLRuntime.h @@ -95,6 +95,7 @@ class CGHLSLRuntime { GENERATE_HLSL_INTRINSIC_FUNCTION(FlattenedThreadIdInGroup, flattened_thread_id_in_group) GENERATE_HLSL_INTRINSIC_FUNCTION(IsInf, isinf) + GENERATE_HLSL_INTRINSIC_FUNCTION(IsNaN, isnan) GENERATE_HLSL_INTRINSIC_FUNCTION(Lerp, lerp) GENERATE_HLSL_INTRINSIC_FUNCTION(Normalize, normalize) GENERATE_HLSL_INTRINSIC_FUNCTION(Rsqrt, rsqrt) @@ -129,6 +130,8 @@ class CGHLSLRuntime { resource_handlefrombinding) GENERATE_HLSL_INTRINSIC_FUNCTION(CreateHandleFromImplicitBinding, resource_handlefromimplicitbinding) + GENERATE_HLSL_INTRINSIC_FUNCTION(NonUniformResourceIndex, + resource_nonuniformindex) GENERATE_HLSL_INTRINSIC_FUNCTION(BufferUpdateCounter, resource_updatecounter) GENERATE_HLSL_INTRINSIC_FUNCTION(GroupMemoryBarrierWithGroupSync, group_memory_barrier_with_group_sync) @@ -197,11 +200,7 @@ class CGHLSLRuntime { void emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl, llvm::GlobalVariable *BufGV); void initializeBufferFromBinding(const HLSLBufferDecl *BufDecl, - llvm::GlobalVariable *GV, - HLSLVkBindingAttr *VkBinding); - void initializeBufferFromBinding(const HLSLBufferDecl *BufDecl, - llvm::GlobalVariable *GV, - HLSLResourceBindingAttr *RBA); + llvm::GlobalVariable *GV); llvm::Triple::ArchType getArch(); llvm::DenseMap LayoutTypes; diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 60f30a1334d6d..dbcce9b86ad52 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -5367,7 +5367,7 @@ IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC, // Ignore scan requests that don't start at an even multiple of the // word size. We can't encode them. - if ((beginOfScan % WordSize) != 0) + if (!beginOfScan.isMultipleOf(WordSize)) continue; // Ignore scan requests that start before the instance start. diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index a503aaf613e30..8cda583313ca4 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -1542,15 +1542,14 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc( SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc); - llvm::sys::fs::UniqueID ID; - if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) { + if (!CGM.getFileSystem()->exists(PLoc.getFilename())) PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false); - } return std::pair(PLoc.getFilename(), PLoc.getLine()); }; - return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName); + return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, + *CGM.getFileSystem(), ParentName); } ConstantAddress CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) { @@ -2703,7 +2702,8 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF, } llvm::Value *CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF, - const Expr *Message) { + const Expr *Message, + SourceLocation Loc) { if (!Message) return llvm::ConstantPointerNull::get(CGF.VoidPtrTy); return CGF.EmitScalarExpr(Message); @@ -2713,11 +2713,13 @@ llvm::Value * CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF, const OMPMessageClause *MessageClause) { return emitMessageClause( - CGF, MessageClause ? MessageClause->getMessageString() : nullptr); + CGF, MessageClause ? MessageClause->getMessageString() : nullptr, + MessageClause->getBeginLoc()); } llvm::Value * -CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity) { +CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc) { // OpenMP 6.0, 10.4: "If no severity clause is specified then the effect is // as if sev-level is fatal." return llvm::ConstantInt::get(CGM.Int32Ty, @@ -2727,13 +2729,15 @@ CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity) { llvm::Value * CGOpenMPRuntime::emitSeverityClause(const OMPSeverityClause *SeverityClause) { return emitSeverityClause(SeverityClause ? SeverityClause->getSeverityKind() - : OMPC_SEVERITY_unknown); + : OMPC_SEVERITY_unknown, + SeverityClause->getBeginLoc()); } void CGOpenMPRuntime::emitNumThreadsClause( CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity, - const Expr *Message) { + SourceLocation SeverityLoc, const Expr *Message, + SourceLocation MessageLoc) { if (!CGF.HaveInsertPoint()) return; llvm::SmallVector Args( @@ -2745,8 +2749,8 @@ void CGOpenMPRuntime::emitNumThreadsClause( RuntimeFunction FnID = OMPRTL___kmpc_push_num_threads; if (Modifier == OMPC_NUMTHREADS_strict) { FnID = OMPRTL___kmpc_push_num_threads_strict; - Args.push_back(emitSeverityClause(Severity)); - Args.push_back(emitMessageClause(CGF, Message)); + Args.push_back(emitSeverityClause(Severity, SeverityLoc)); + Args.push_back(emitMessageClause(CGF, Message, MessageLoc)); } CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args); @@ -6799,6 +6803,240 @@ LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); // code for that information. class MappableExprsHandler { public: + /// Custom comparator for attach-pointer expressions that compares them by + /// complexity (i.e. their component-depth) first, then by the order in which + /// they were computed by collectAttachPtrExprInfo(), if they are semantically + /// different. + struct AttachPtrExprComparator { + const MappableExprsHandler *Handler = nullptr; + // Cache of previous equality comparison results. + mutable llvm::DenseMap, bool> + CachedEqualityComparisons; + + AttachPtrExprComparator(const MappableExprsHandler *H) : Handler(H) {} + + // Return true iff LHS is "less than" RHS. + bool operator()(const Expr *LHS, const Expr *RHS) const { + if (LHS == RHS) + return false; + + // First, compare by complexity (depth) + const auto ItLHS = Handler->AttachPtrComponentDepthMap.find(LHS); + const auto ItRHS = Handler->AttachPtrComponentDepthMap.find(RHS); + + std::optional DepthLHS = + (ItLHS != Handler->AttachPtrComponentDepthMap.end()) ? ItLHS->second + : std::nullopt; + std::optional DepthRHS = + (ItRHS != Handler->AttachPtrComponentDepthMap.end()) ? ItRHS->second + : std::nullopt; + + // std::nullopt (no attach pointer) has lowest complexity + if (!DepthLHS.has_value() && !DepthRHS.has_value()) { + // Both have same complexity, now check semantic equality + if (areEqual(LHS, RHS)) + return false; + // Different semantically, compare by computation order + return wasComputedBefore(LHS, RHS); + } + if (!DepthLHS.has_value()) + return true; // LHS has lower complexity + if (!DepthRHS.has_value()) + return false; // RHS has lower complexity + + // Both have values, compare by depth (lower depth = lower complexity) + if (DepthLHS.value() != DepthRHS.value()) + return DepthLHS.value() < DepthRHS.value(); + + // Same complexity, now check semantic equality + if (areEqual(LHS, RHS)) + return false; + // Different semantically, compare by computation order + return wasComputedBefore(LHS, RHS); + } + + public: + /// Return true if \p LHS and \p RHS are semantically equal. Uses pre-cached + /// results, if available, otherwise does a recursive semantic comparison. + bool areEqual(const Expr *LHS, const Expr *RHS) const { + // Check cache first for faster lookup + const auto CachedResultIt = CachedEqualityComparisons.find({LHS, RHS}); + if (CachedResultIt != CachedEqualityComparisons.end()) + return CachedResultIt->second; + + bool ComparisonResult = areSemanticallyEqual(LHS, RHS); + + // Cache the result for future lookups (both orders since semantic + // equality is commutative) + CachedEqualityComparisons[{LHS, RHS}] = ComparisonResult; + CachedEqualityComparisons[{RHS, LHS}] = ComparisonResult; + return ComparisonResult; + } + + /// Compare the two attach-ptr expressions by their computation order. + /// Returns true iff LHS was computed before RHS by + /// collectAttachPtrExprInfo(). + bool wasComputedBefore(const Expr *LHS, const Expr *RHS) const { + const size_t &OrderLHS = Handler->AttachPtrComputationOrderMap.at(LHS); + const size_t &OrderRHS = Handler->AttachPtrComputationOrderMap.at(RHS); + + return OrderLHS < OrderRHS; + } + + private: + /// Helper function to compare attach-pointer expressions semantically. + /// This function handles various expression types that can be part of an + /// attach-pointer. + /// TODO: Not urgent, but we should ideally return true when comparing + /// `p[10]`, `*(p + 10)`, `*(p + 5 + 5)`, `p[10:1]` etc. + bool areSemanticallyEqual(const Expr *LHS, const Expr *RHS) const { + if (LHS == RHS) + return true; + + // If only one is null, they aren't equal + if (!LHS || !RHS) + return false; + + ASTContext &Ctx = Handler->CGF.getContext(); + // Strip away parentheses and no-op casts to get to the core expression + LHS = LHS->IgnoreParenNoopCasts(Ctx); + RHS = RHS->IgnoreParenNoopCasts(Ctx); + + // Direct pointer comparison of the underlying expressions + if (LHS == RHS) + return true; + + // Check if the expression classes match + if (LHS->getStmtClass() != RHS->getStmtClass()) + return false; + + // Handle DeclRefExpr (variable references) + if (const auto *LD = dyn_cast(LHS)) { + const auto *RD = dyn_cast(RHS); + if (!RD) + return false; + return LD->getDecl()->getCanonicalDecl() == + RD->getDecl()->getCanonicalDecl(); + } + + // Handle ArraySubscriptExpr (array indexing like a[i]) + if (const auto *LA = dyn_cast(LHS)) { + const auto *RA = dyn_cast(RHS); + if (!RA) + return false; + return areSemanticallyEqual(LA->getBase(), RA->getBase()) && + areSemanticallyEqual(LA->getIdx(), RA->getIdx()); + } + + // Handle MemberExpr (member access like s.m or p->m) + if (const auto *LM = dyn_cast(LHS)) { + const auto *RM = dyn_cast(RHS); + if (!RM) + return false; + if (LM->getMemberDecl()->getCanonicalDecl() != + RM->getMemberDecl()->getCanonicalDecl()) + return false; + return areSemanticallyEqual(LM->getBase(), RM->getBase()); + } + + // Handle UnaryOperator (unary operations like *p, &x, etc.) + if (const auto *LU = dyn_cast(LHS)) { + const auto *RU = dyn_cast(RHS); + if (!RU) + return false; + if (LU->getOpcode() != RU->getOpcode()) + return false; + return areSemanticallyEqual(LU->getSubExpr(), RU->getSubExpr()); + } + + // Handle BinaryOperator (binary operations like p + offset) + if (const auto *LB = dyn_cast(LHS)) { + const auto *RB = dyn_cast(RHS); + if (!RB) + return false; + if (LB->getOpcode() != RB->getOpcode()) + return false; + return areSemanticallyEqual(LB->getLHS(), RB->getLHS()) && + areSemanticallyEqual(LB->getRHS(), RB->getRHS()); + } + + // Handle ArraySectionExpr (array sections like a[0:1]) + // Attach pointers should not contain array-sections, but currently we + // don't emit an error. + if (const auto *LAS = dyn_cast(LHS)) { + const auto *RAS = dyn_cast(RHS); + if (!RAS) + return false; + return areSemanticallyEqual(LAS->getBase(), RAS->getBase()) && + areSemanticallyEqual(LAS->getLowerBound(), + RAS->getLowerBound()) && + areSemanticallyEqual(LAS->getLength(), RAS->getLength()); + } + + // Handle CastExpr (explicit casts) + if (const auto *LC = dyn_cast(LHS)) { + const auto *RC = dyn_cast(RHS); + if (!RC) + return false; + if (LC->getCastKind() != RC->getCastKind()) + return false; + return areSemanticallyEqual(LC->getSubExpr(), RC->getSubExpr()); + } + + // Handle CXXThisExpr (this pointer) + if (isa(LHS) && isa(RHS)) + return true; + + // Handle IntegerLiteral (integer constants) + if (const auto *LI = dyn_cast(LHS)) { + const auto *RI = dyn_cast(RHS); + if (!RI) + return false; + return LI->getValue() == RI->getValue(); + } + + // Handle CharacterLiteral (character constants) + if (const auto *LC = dyn_cast(LHS)) { + const auto *RC = dyn_cast(RHS); + if (!RC) + return false; + return LC->getValue() == RC->getValue(); + } + + // Handle FloatingLiteral (floating point constants) + if (const auto *LF = dyn_cast(LHS)) { + const auto *RF = dyn_cast(RHS); + if (!RF) + return false; + // Use bitwise comparison for floating point literals + return LF->getValue().bitwiseIsEqual(RF->getValue()); + } + + // Handle StringLiteral (string constants) + if (const auto *LS = dyn_cast(LHS)) { + const auto *RS = dyn_cast(RHS); + if (!RS) + return false; + return LS->getString() == RS->getString(); + } + + // Handle CXXNullPtrLiteralExpr (nullptr) + if (isa(LHS) && isa(RHS)) + return true; + + // Handle CXXBoolLiteralExpr (true/false) + if (const auto *LB = dyn_cast(LHS)) { + const auto *RB = dyn_cast(RHS); + if (!RB) + return false; + return LB->getValue() == RB->getValue(); + } + + // Fallback for other forms - use the existing comparison method + return Expr::isSameComparisonOperand(LHS, RHS); + } + }; + /// Get the offset of the OMP_MAP_MEMBER_OF field. static unsigned getFlagMemberOffset() { unsigned Offset = 0; @@ -6876,6 +7114,45 @@ class MappableExprsHandler { bool HasCompleteRecord = false; }; + /// A struct to store the attach pointer and pointee information, to be used + /// when emitting an attach entry. + struct AttachInfoTy { + Address AttachPtrAddr = Address::invalid(); + Address AttachPteeAddr = Address::invalid(); + const ValueDecl *AttachPtrDecl = nullptr; + const Expr *AttachMapExpr = nullptr; + + bool isValid() const { + return AttachPtrAddr.isValid() && AttachPteeAddr.isValid(); + } + }; + + /// Check if there's any component list where the attach pointer expression + /// matches the given captured variable. + bool hasAttachEntryForCapturedVar(const ValueDecl *VD) const { + for (const auto &AttachEntry : AttachPtrExprMap) { + if (AttachEntry.second) { + // Check if the attach pointer expression is a DeclRefExpr that + // references the captured variable + if (const auto *DRE = dyn_cast(AttachEntry.second)) + if (DRE->getDecl() == VD) + return true; + } + } + return false; + } + + /// Get the previously-cached attach pointer for a component list, if-any. + const Expr *getAttachPtrExpr( + OMPClauseMappableExprCommon::MappableExprComponentListRef Components) + const { + const auto It = AttachPtrExprMap.find(Components); + if (It != AttachPtrExprMap.end()) + return It->second; + + return nullptr; + } + private: /// Kind that defines how a device pointer has to be returned. struct MapInfo { @@ -6948,6 +7225,27 @@ class MappableExprsHandler { /// Map between lambda declarations and their map type. llvm::DenseMap LambdasMap; + /// Map from component lists to their attach pointer expressions. + llvm::DenseMap + AttachPtrExprMap; + + /// Map from attach pointer expressions to their component depth. + /// nullptr key has std::nullopt depth. This can be used to order attach-ptr + /// expressions with increasing/decreasing depth. + /// The component-depth of `nullptr` (i.e. no attach-ptr) is `std::nullopt`. + /// TODO: Not urgent, but we should ideally use the number of pointer + /// dereferences in an expr as an indicator of its complexity, instead of the + /// component-depth. That would be needed for us to treat `p[1]`, `*(p + 10)`, + /// `*(p + 5 + 5)` together. + llvm::DenseMap> + AttachPtrComponentDepthMap = {{nullptr, std::nullopt}}; + + /// Map from attach pointer expressions to the order they were computed in, in + /// collectAttachPtrExprInfo(). + llvm::DenseMap AttachPtrComputationOrderMap = { + {nullptr, 0}}; + llvm::Value *getExprTypeSize(const Expr *E) const { QualType ExprTy = E->getType().getCanonicalType(); @@ -8167,6 +8465,103 @@ class MappableExprsHandler { } } + /// Returns the address corresponding to \p PointerExpr. + static Address getAttachPtrAddr(const Expr *PointerExpr, + CodeGenFunction &CGF) { + assert(PointerExpr && "Cannot get addr from null attach-ptr expr"); + Address AttachPtrAddr = Address::invalid(); + + if (auto *DRE = dyn_cast(PointerExpr)) { + // If the pointer is a variable, we can use its address directly. + AttachPtrAddr = CGF.EmitLValue(DRE).getAddress(); + } else if (auto *OASE = dyn_cast(PointerExpr)) { + AttachPtrAddr = + CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/true).getAddress(); + } else if (auto *ASE = dyn_cast(PointerExpr)) { + AttachPtrAddr = CGF.EmitLValue(ASE).getAddress(); + } else if (auto *ME = dyn_cast(PointerExpr)) { + AttachPtrAddr = CGF.EmitMemberExpr(ME).getAddress(); + } else if (auto *UO = dyn_cast(PointerExpr)) { + assert(UO->getOpcode() == UO_Deref && + "Unexpected unary-operator on attach-ptr-expr"); + AttachPtrAddr = CGF.EmitLValue(UO).getAddress(); + } + assert(AttachPtrAddr.isValid() && + "Failed to get address for attach pointer expression"); + return AttachPtrAddr; + } + + /// Get the address of the attach pointer, and a load from it, to get the + /// pointee base address. + /// \return A pair containing AttachPtrAddr and AttachPteeBaseAddr. The pair + /// contains invalid addresses if \p AttachPtrExpr is null. + static std::pair + getAttachPtrAddrAndPteeBaseAddr(const Expr *AttachPtrExpr, + CodeGenFunction &CGF) { + + if (!AttachPtrExpr) + return {Address::invalid(), Address::invalid()}; + + Address AttachPtrAddr = getAttachPtrAddr(AttachPtrExpr, CGF); + assert(AttachPtrAddr.isValid() && "Invalid attach pointer addr"); + + QualType AttachPtrType = + OMPClauseMappableExprCommon::getComponentExprElementType(AttachPtrExpr) + .getCanonicalType(); + + Address AttachPteeBaseAddr = CGF.EmitLoadOfPointer( + AttachPtrAddr, AttachPtrType->castAs()); + assert(AttachPteeBaseAddr.isValid() && "Invalid attach pointee base addr"); + + return {AttachPtrAddr, AttachPteeBaseAddr}; + } + + /// Returns whether an attach entry should be emitted for a map on + /// \p MapBaseDecl on the directive \p CurDir. + static bool + shouldEmitAttachEntry(const Expr *PointerExpr, const ValueDecl *MapBaseDecl, + CodeGenFunction &CGF, + llvm::PointerUnion + CurDir) { + if (!PointerExpr) + return false; + + // Pointer attachment is needed at map-entering time or for declare + // mappers. + return isa(CurDir) || + isOpenMPTargetMapEnteringDirective( + cast(CurDir) + ->getDirectiveKind()); + } + + /// Computes the attach-ptr expr for \p Components, and updates various maps + /// with the information. + /// It internally calls OMPClauseMappableExprCommon::findAttachPtrExpr() + /// with the OpenMPDirectiveKind extracted from \p CurDir. + /// It updates AttachPtrComputationOrderMap, AttachPtrComponentDepthMap, and + /// AttachPtrExprMap. + void collectAttachPtrExprInfo( + OMPClauseMappableExprCommon::MappableExprComponentListRef Components, + llvm::PointerUnion + CurDir) { + + OpenMPDirectiveKind CurDirectiveID = + isa(CurDir) + ? OMPD_declare_mapper + : cast(CurDir)->getDirectiveKind(); + + const auto &[AttachPtrExpr, Depth] = + OMPClauseMappableExprCommon::findAttachPtrExpr(Components, + CurDirectiveID); + + AttachPtrComputationOrderMap.try_emplace( + AttachPtrExpr, AttachPtrComputationOrderMap.size()); + AttachPtrComponentDepthMap.try_emplace(AttachPtrExpr, Depth); + AttachPtrExprMap.try_emplace(Components, AttachPtrExpr); + } + /// Generate all the base pointers, section pointers, sizes, map types, and /// mappers for the extracted mappable expressions (all included in \a /// CombinedInfo). Also, for each item that relates with a device pointer, a @@ -12263,7 +12658,8 @@ llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF, void CGOpenMPSIMDRuntime::emitNumThreadsClause( CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity, - const Expr *Message) { + SourceLocation SeverityLoc, const Expr *Message, + SourceLocation MessageLoc) { llvm_unreachable("Not supported in SIMD-only mode"); } diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h index eb04eceee236c..ba76ba6b5f523 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.h +++ b/clang/lib/CodeGen/CGOpenMPRuntime.h @@ -1049,11 +1049,13 @@ class CGOpenMPRuntime { Address UB, Address ST); virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF, - const Expr *Message); + const Expr *Message, + SourceLocation Loc); virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF, const OMPMessageClause *MessageClause); - virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity); + virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc); virtual llvm::Value * emitSeverityClause(const OMPSeverityClause *SeverityClause); @@ -1069,7 +1071,9 @@ class CGOpenMPRuntime { CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal, - const Expr *Message = nullptr); + SourceLocation SeverityLoc = SourceLocation(), + const Expr *Message = nullptr, + SourceLocation MessageLoc = SourceLocation()); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. @@ -1956,7 +1960,9 @@ class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal, - const Expr *Message = nullptr) override; + SourceLocation SeverityLoc = SourceLocation(), + const Expr *Message = nullptr, + SourceLocation MessageLoc = SourceLocation()) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 44a091e1b3c75..4272d8b1a1f51 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -899,10 +899,34 @@ void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF, // Nothing to do. } +llvm::Value *CGOpenMPRuntimeGPU::emitMessageClause(CodeGenFunction &CGF, + const Expr *Message, + SourceLocation Loc) { + CGM.getDiags().Report(Loc, diag::warn_omp_gpu_unsupported_clause) + << getOpenMPClauseName(OMPC_message); + return nullptr; +} + +llvm::Value * +CGOpenMPRuntimeGPU::emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc) { + CGM.getDiags().Report(Loc, diag::warn_omp_gpu_unsupported_clause) + << getOpenMPClauseName(OMPC_severity); + return nullptr; +} + void CGOpenMPRuntimeGPU::emitNumThreadsClause( CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity, - const Expr *Message) { + SourceLocation SeverityLoc, const Expr *Message, + SourceLocation MessageLoc) { + if (Modifier == OMPC_NUMTHREADS_strict) { + CGM.getDiags().Report(Loc, + diag::warn_omp_gpu_unsupported_modifier_for_clause) + << "strict" << getOpenMPClauseName(OMPC_num_threads); + return; + } + // Nothing to do. } diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h index 665221b7d7890..810d6aa082156 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h @@ -162,6 +162,14 @@ class CGOpenMPRuntimeGPU : public CGOpenMPRuntime { llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; + // Currently unsupported on the device. + llvm::Value *emitMessageClause(CodeGenFunction &CGF, const Expr *Message, + SourceLocation Loc) override; + + // Currently unsupported on the device. + virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc) override; + /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. @@ -169,7 +177,9 @@ class CGOpenMPRuntimeGPU : public CGOpenMPRuntime { CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal, - const Expr *Message = nullptr) override; + SourceLocation SeverityLoc = SourceLocation(), + const Expr *Message = nullptr, + SourceLocation MessageLoc = SourceLocation()) override; /// This function ought to emit, in the general case, a call to // the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp index 5f6136c917ac2..e9205c68c2812 100644 --- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -369,11 +369,11 @@ void CGRecordLowering::lowerUnion(bool isNonVirtualBaseType) { appendPaddingBytes(LayoutSize - getSize(StorageType)); // Set packed if we need it. const auto StorageAlignment = getAlignment(StorageType); - assert((Layout.getSize() % StorageAlignment == 0 || - Layout.getDataSize() % StorageAlignment) && + assert((Layout.getSize().isMultipleOf(StorageAlignment) || + !Layout.getDataSize().isMultipleOf(StorageAlignment)) && "Union's standard layout and no_unique_address layout must agree on " "packedness"); - if (Layout.getDataSize() % StorageAlignment) + if (!Layout.getDataSize().isMultipleOf(StorageAlignment)) Packed = true; } @@ -977,7 +977,7 @@ void CGRecordLowering::determinePacked(bool NVBaseType) { continue; // If any member falls at an offset that it not a multiple of its alignment, // then the entire record must be packed. - if (Member.Offset % getAlignment(Member.Data)) + if (!Member.Offset.isMultipleOf(getAlignment(Member.Data))) Packed = true; if (Member.Offset < NVSize) NVAlignment = std::max(NVAlignment, getAlignment(Member.Data)); @@ -985,12 +985,12 @@ void CGRecordLowering::determinePacked(bool NVBaseType) { } // If the size of the record (the capstone's offset) is not a multiple of the // record's alignment, it must be packed. - if (Members.back().Offset % Alignment) + if (!Members.back().Offset.isMultipleOf(Alignment)) Packed = true; // If the non-virtual sub-object is not a multiple of the non-virtual // sub-object's alignment, it must be packed. We cannot have a packed // non-virtual sub-object and an unpacked complete object or vise versa. - if (NVSize % NVAlignment) + if (!NVSize.isMultipleOf(NVAlignment)) Packed = true; // Update the alignment of the sentinel. if (!Packed) diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index aeff73d525c10..92636f27fd4e5 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -234,6 +234,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef Attrs) { case Stmt::OMPInterchangeDirectiveClass: EmitOMPInterchangeDirective(cast(*S)); break; + case Stmt::OMPFuseDirectiveClass: + EmitOMPFuseDirective(cast(*S)); + break; case Stmt::OMPForDirectiveClass: EmitOMPForDirective(cast(*S)); break; @@ -1291,7 +1294,9 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S, ArrayRef ForAttrs) { JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); - LexicalScope ForScope(*this, S.getSourceRange()); + std::optional ForScope; + if (getLangOpts().C99 || getLangOpts().CPlusPlus) + ForScope.emplace(*this, S.getSourceRange()); // Evaluate the first part before the loop. if (S.getInit()) @@ -1350,7 +1355,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S, llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); // If there are any cleanups between here and the loop-exit scope, // create a block to stage a loop exit along. - if (ForScope.requiresCleanups()) + if (ForScope && ForScope->requiresCleanups()) ExitBlock = createBasicBlock("for.cond.cleanup"); // As long as the condition is true, iterate the loop. @@ -1419,7 +1424,8 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S, EmitStopPoint(&S); EmitBranch(CondBlock); - ForScope.ForceCleanup(); + if (ForScope) + ForScope->ForceCleanup(); LoopStack.pop(); @@ -2468,56 +2474,6 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { CaseRangeBlock = SavedCRBlock; } -static std::string -SimplifyConstraint(const char *Constraint, const TargetInfo &Target, - SmallVectorImpl *OutCons=nullptr) { - std::string Result; - - while (*Constraint) { - switch (*Constraint) { - default: - Result += Target.convertConstraint(Constraint); - break; - // Ignore these - case '*': - case '?': - case '!': - case '=': // Will see this and the following in mult-alt constraints. - case '+': - break; - case '#': // Ignore the rest of the constraint alternative. - while (Constraint[1] && Constraint[1] != ',') - Constraint++; - break; - case '&': - case '%': - Result += *Constraint; - while (Constraint[1] && Constraint[1] == *Constraint) - Constraint++; - break; - case ',': - Result += "|"; - break; - case 'g': - Result += "imr"; - break; - case '[': { - assert(OutCons && - "Must pass output names to constraints with a symbolic name"); - unsigned Index; - bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); - assert(result && "Could not resolve symbolic name"); (void)result; - Result += llvm::utostr(Index); - break; - } - } - - Constraint++; - } - - return Result; -} - /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared /// as using a particular register add that as a constraint that will be used /// in this asm stmt. @@ -2896,8 +2852,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Simplify the output constraint. std::string OutputConstraint(S.getOutputConstraint(i)); - OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, - getTarget(), &OutputConstraintInfos); + OutputConstraint = getTarget().simplifyConstraint( + StringRef(OutputConstraint).substr(1), &OutputConstraintInfos); const Expr *OutExpr = S.getOutputExpr(i); OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); @@ -3059,8 +3015,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Simplify the input constraint. std::string InputConstraint(S.getInputConstraint(i)); - InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), - &OutputConstraintInfos); + InputConstraint = + getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos); InputConstraint = AddVariableConstraints( InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp index d72cd8fbfd608..efc06a276267a 100644 --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -201,6 +201,24 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { } else { llvm_unreachable("Unknown loop-based directive kind."); } + doEmitPreinits(PreInits); + PreCondVars.restore(CGF); + } + + void + emitPreInitStmt(CodeGenFunction &CGF, + const OMPCanonicalLoopSequenceTransformationDirective &S) { + const Stmt *PreInits; + if (const auto *Fuse = dyn_cast(&S)) { + PreInits = Fuse->getPreInits(); + } else { + llvm_unreachable( + "Unknown canonical loop sequence transform directive kind."); + } + doEmitPreinits(PreInits); + } + + void doEmitPreinits(const Stmt *PreInits) { if (PreInits) { // CompoundStmts and DeclStmts are used as lists of PreInit statements and // declarations. Since declarations must be visible in the the following @@ -222,7 +240,6 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { CGF.EmitStmt(S); } } - PreCondVars.restore(CGF); } public: @@ -230,6 +247,11 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { : CodeGenFunction::RunCleanupsScope(CGF) { emitPreInitStmt(CGF, S); } + OMPLoopScope(CodeGenFunction &CGF, + const OMPCanonicalLoopSequenceTransformationDirective &S) + : CodeGenFunction::RunCleanupsScope(CGF) { + emitPreInitStmt(CGF, S); + } }; class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { @@ -1622,22 +1644,30 @@ static void emitCommonOMPParallelDirective( // if sev-level is fatal." OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal; clang::Expr *Message = nullptr; + SourceLocation SeverityLoc = SourceLocation(); + SourceLocation MessageLoc = SourceLocation(); + llvm::Function *OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); + if (const auto *NumThreadsClause = S.getSingleClause()) { CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true); Modifier = NumThreadsClause->getModifier(); - if (const auto *MessageClause = S.getSingleClause()) + if (const auto *MessageClause = S.getSingleClause()) { Message = MessageClause->getMessageString(); - if (const auto *SeverityClause = S.getSingleClause()) + MessageLoc = MessageClause->getBeginLoc(); + } + if (const auto *SeverityClause = S.getSingleClause()) { Severity = SeverityClause->getSeverityKind(); + SeverityLoc = SeverityClause->getBeginLoc(); + } CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( CGF, NumThreads, NumThreadsClause->getBeginLoc(), Modifier, Severity, - Message); + SeverityLoc, Message, MessageLoc); } if (const auto *ProcBindClause = S.getSingleClause()) { CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); @@ -1921,6 +1951,15 @@ class OMPTransformDirectiveScopeRAII { CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); } + if (const auto *Dir = + dyn_cast(S)) { + // For simplicity we reuse the loop scope similarly to what we do with + // OMPCanonicalLoopNestTransformationDirective do by being a subclass + // of OMPLoopBasedDirective. + Scope = new OMPLoopScope(CGF, *Dir); + CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); + CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); + } } ~OMPTransformDirectiveScopeRAII() { if (!Scope) @@ -1948,8 +1987,7 @@ static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, return; } if (SimplifiedS == NextLoop) { - if (auto *Dir = - dyn_cast(SimplifiedS)) + if (auto *Dir = dyn_cast(SimplifiedS)) SimplifiedS = Dir->getTransformedStmt(); if (const auto *CanonLoop = dyn_cast(SimplifiedS)) SimplifiedS = CanonLoop->getLoopStmt(); @@ -2944,6 +2982,12 @@ void CodeGenFunction::EmitOMPInterchangeDirective( EmitStmt(S.getTransformedStmt()); } +void CodeGenFunction::EmitOMPFuseDirective(const OMPFuseDirective &S) { + // Emit the de-sugared statement + OMPTransformDirectiveScopeRAII FuseScope(*this, &S); + EmitStmt(S.getTransformedStmt()); +} + void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) { bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder; diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp index 8e3234998df2a..60d6b7fa009e7 100644 --- a/clang/lib/CodeGen/CodeGenAction.cpp +++ b/clang/lib/CodeGen/CodeGenAction.cpp @@ -190,9 +190,7 @@ void BackendConsumer::HandleInlineFunctionDefinition(FunctionDecl *D) { } void BackendConsumer::HandleInterestingDecl(DeclGroupRef D) { - // Ignore interesting decls from the AST reader after IRGen is finished. - if (!IRGenFinished) - HandleTopLevelDecl(D); + HandleTopLevelDecl(D); } // Links each entry in LinkModules into our module. Returns true on error. @@ -243,8 +241,6 @@ void BackendConsumer::HandleTranslationUnit(ASTContext &C) { if (TimerIsEnabled && !--LLVMIRGenerationRefCount) LLVMIRGeneration.yieldTo(CI.getFrontendTimer()); - - IRGenFinished = true; } // Silently ignore if we weren't initialized for some reason. @@ -1141,7 +1137,8 @@ void CodeGenAction::ExecuteAction() { TheModule->setTargetTriple(Triple(TargetOpts.Triple)); } - EmbedObject(TheModule.get(), CodeGenOpts, Diagnostics); + EmbedObject(TheModule.get(), CodeGenOpts, CI.getVirtualFileSystem(), + Diagnostics); EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile); LLVMContext &Ctx = TheModule->getContext(); diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 727487b46054f..f0565c1de04c4 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -3861,6 +3861,7 @@ class CodeGenFunction : public CodeGenTypeCache { void EmitOMPUnrollDirective(const OMPUnrollDirective &S); void EmitOMPReverseDirective(const OMPReverseDirective &S); void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S); + void EmitOMPFuseDirective(const OMPFuseDirective &S); void EmitOMPForDirective(const OMPForDirective &S); void EmitOMPForSimdDirective(const OMPForSimdDirective &S); void EmitOMPScopeDirective(const OMPScopeDirective &S); diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 0eac7c351b164..f6f7f22a09004 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -1556,7 +1556,7 @@ void CodeGenModule::Release() { EmitBackendOptionsMetadata(getCodeGenOpts()); // If there is device offloading code embed it in the host now. - EmbedObject(&getModule(), CodeGenOpts, getDiags()); + EmbedObject(&getModule(), CodeGenOpts, *getFileSystem(), getDiags()); // Set visibility from DLL storage class // We do this at the end of LLVM IR generation; after any operation @@ -8172,12 +8172,17 @@ void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS, // Get the UniqueID for the file containing the decl. llvm::sys::fs::UniqueID ID; - if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) { + auto Status = FS->status(PLoc.getFilename()); + if (!Status) { PLoc = SM.getPresumedLoc(D->getLocation(), /*UseLineDirectives=*/false); assert(PLoc.isValid() && "Source location is expected to be valid."); - if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) - SM.getDiagnostics().Report(diag::err_cannot_open_file) - << PLoc.getFilename() << EC.message(); + Status = FS->status(PLoc.getFilename()); + } + if (!Status) { + SM.getDiagnostics().Report(diag::err_cannot_open_file) + << PLoc.getFilename() << Status.getError().message(); + } else { + ID = Status->getUniqueID(); } OS << llvm::format("%x", ID.getFile()) << llvm::format("%x", ID.getDevice()) << "_" << llvm::utohexstr(Result.low(), /*LowerCase=*/true, /*Width=*/8); diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp index 98b30e084b18b..8f095649f87ce 100644 --- a/clang/lib/CodeGen/CodeGenPGO.cpp +++ b/clang/lib/CodeGen/CodeGenPGO.cpp @@ -972,7 +972,7 @@ void PGOHash::combine(HashType Type) { if (Count && Count % NumTypesPerWord == 0) { using namespace llvm::support; uint64_t Swapped = - endian::byte_swap(Working); + endian::byte_swap(Working, llvm::endianness::little); MD5.update(llvm::ArrayRef((uint8_t *)&Swapped, sizeof(Swapped))); Working = 0; } @@ -999,7 +999,7 @@ uint64_t PGOHash::finalize() { } else { using namespace llvm::support; uint64_t Swapped = - endian::byte_swap(Working); + endian::byte_swap(Working, llvm::endianness::little); MD5.update(llvm::ArrayRef((uint8_t *)&Swapped, sizeof(Swapped))); } } diff --git a/clang/lib/CodeGen/ModuleBuilder.cpp b/clang/lib/CodeGen/ModuleBuilder.cpp index 8c1fee8c974f1..96f3f6221e20f 100644 --- a/clang/lib/CodeGen/ModuleBuilder.cpp +++ b/clang/lib/CodeGen/ModuleBuilder.cpp @@ -138,6 +138,8 @@ namespace { assert(!M && "Replacing existing Module?"); M.reset(new llvm::Module(ExpandModuleName(ModuleName, CodeGenOpts), C)); + IRGenFinished = false; + std::unique_ptr OldBuilder = std::move(Builder); Initialize(*Ctx); @@ -179,6 +181,10 @@ namespace { } bool HandleTopLevelDecl(DeclGroupRef DG) override { + // Ignore interesting decls from the AST reader after IRGen is finished. + if (IRGenFinished) + return true; // We can't CodeGen more but pass to other consumers. + // FIXME: Why not return false and abort parsing? if (Diags.hasUnrecoverableErrorOccurred()) return true; @@ -292,8 +298,9 @@ namespace { if (Builder) Builder->clear(); M.reset(); - return; } + + IRGenFinished = true; } void AssignInheritanceModel(CXXRecordDecl *RD) override { diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp index 07cf08c54985a..6596ec06199dc 100644 --- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp @@ -192,9 +192,17 @@ static Value *emitFPIntBuiltin(CodeGenFunction &CGF, return CGF.Builder.CreateCall(F, {Src0, Src1}); } +static inline StringRef mapScopeToSPIRV(StringRef AMDGCNScope) { + if (AMDGCNScope == "agent") + return "device"; + if (AMDGCNScope == "wavefront") + return "subgroup"; + return AMDGCNScope; +} + // For processing memory ordering and memory scope arguments of various // amdgcn builtins. -// \p Order takes a C++11 comptabile memory-ordering specifier and converts +// \p Order takes a C++11 compatible memory-ordering specifier and converts // it into LLVM's memory ordering specifier using atomic C ABI, and writes // to \p AO. \p Scope takes a const char * and converts it into AMDGCN // specific SyncScopeID and writes it to \p SSID. @@ -227,6 +235,8 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, // Some of the atomic builtins take the scope as a string name. StringRef scp; if (llvm::getConstantStringInfo(Scope, scp)) { + if (getTarget().getTriple().isSPIRV()) + scp = mapScopeToSPIRV(scp); SSID = getLLVMContext().getOrInsertSyncScopeID(scp); return; } @@ -238,13 +248,19 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, SSID = llvm::SyncScope::System; break; case 1: // __MEMORY_SCOPE_DEVICE - SSID = getLLVMContext().getOrInsertSyncScopeID("agent"); + if (getTarget().getTriple().isSPIRV()) + SSID = getLLVMContext().getOrInsertSyncScopeID("device"); + else + SSID = getLLVMContext().getOrInsertSyncScopeID("agent"); break; case 2: // __MEMORY_SCOPE_WRKGRP SSID = getLLVMContext().getOrInsertSyncScopeID("workgroup"); break; case 3: // __MEMORY_SCOPE_WVFRNT - SSID = getLLVMContext().getOrInsertSyncScopeID("wavefront"); + if (getTarget().getTriple().isSPIRV()) + SSID = getLLVMContext().getOrInsertSyncScopeID("subgroup"); + else + SSID = getLLVMContext().getOrInsertSyncScopeID("wavefront"); break; case 4: // __MEMORY_SCOPE_SINGLE SSID = llvm::SyncScope::SingleThread; @@ -1510,7 +1526,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, // // The global/flat cases need to use agent scope to consistently produce // the native instruction instead of a cmpxchg expansion. - SSID = getLLVMContext().getOrInsertSyncScopeID("agent"); + if (getTarget().getTriple().isSPIRV()) + SSID = getLLVMContext().getOrInsertSyncScopeID("device"); + else + SSID = getLLVMContext().getOrInsertSyncScopeID("agent"); AO = AtomicOrdering::Monotonic; // The v2bf16 builtin uses i16 instead of a natural bfloat type. diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp index ba65cf1ce9b90..e71dc9ea523a2 100644 --- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp @@ -1153,7 +1153,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, } if (BuiltinID == PPC::BI__builtin_mma_dmmr || BuiltinID == PPC::BI__builtin_mma_dmxor || - BuiltinID == PPC::BI__builtin_mma_disassemble_dmr) { + BuiltinID == PPC::BI__builtin_mma_disassemble_dmr || + BuiltinID == PPC::BI__builtin_mma_dmsha2hash) { Address Addr = EmitPointerWithAlignment(E->getArg(1)); Ops[1] = Builder.CreateLoad(Addr); } diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp index 2e3fc53c58edc..4aa63143a66cd 100644 --- a/clang/lib/CodeGen/Targets/SPIR.cpp +++ b/clang/lib/CodeGen/Targets/SPIR.cpp @@ -486,6 +486,12 @@ llvm::Type *CommonSPIRTargetCodeGenInfo::getHLSLType( return getSPIRVImageTypeFromHLSLResource(ResAttrs, ContainedTy, CGM); } + if (ResAttrs.IsCounter) { + llvm::Type *ElemType = llvm::Type::getInt32Ty(Ctx); + uint32_t StorageClass = /* StorageBuffer storage class */ 12; + return llvm::TargetExtType::get(Ctx, "spirv.VulkanBuffer", {ElemType}, + {StorageClass, true}); + } llvm::Type *ElemType = CGM.getTypes().ConvertTypeForMem(ContainedTy); llvm::ArrayType *RuntimeArrayType = llvm::ArrayType::get(ElemType, 0); uint32_t StorageClass = /* StorageBuffer storage class */ 12; diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp index 5f3c15d106eb6..38dbebdec2429 100644 --- a/clang/lib/CodeGen/Targets/Sparc.cpp +++ b/clang/lib/CodeGen/Targets/Sparc.cpp @@ -8,6 +8,7 @@ #include "ABIInfoImpl.h" #include "TargetInfo.h" +#include using namespace clang; using namespace clang::CodeGen; @@ -109,7 +110,8 @@ class SparcV9ABIInfo : public ABIInfo { SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} private: - ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; + ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit, + unsigned &RegOffset) const; void computeInfo(CGFunctionInfo &FI) const override; RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override; @@ -222,128 +224,114 @@ class SparcV9ABIInfo : public ABIInfo { }; } // end anonymous namespace -ABIArgInfo -SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { +ABIArgInfo SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit, + unsigned &RegOffset) const { if (Ty->isVoidType()) return ABIArgInfo::getIgnore(); - uint64_t Size = getContext().getTypeSize(Ty); + auto &Context = getContext(); + auto &VMContext = getVMContext(); + + uint64_t Size = Context.getTypeSize(Ty); + unsigned Alignment = Context.getTypeAlign(Ty); + bool NeedPadding = (Alignment > 64) && (RegOffset % 2 != 0); // Anything too big to fit in registers is passed with an explicit indirect // pointer / sret pointer. - if (Size > SizeLimit) + if (Size > SizeLimit) { + RegOffset += 1; return getNaturalAlignIndirect( Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(), /*ByVal=*/false); + } // Treat an enum type as its underlying type. if (const auto *ED = Ty->getAsEnumDecl()) Ty = ED->getIntegerType(); // Integer types smaller than a register are extended. - if (Size < 64 && Ty->isIntegerType()) + if (Size < 64 && Ty->isIntegerType()) { + RegOffset += 1; return ABIArgInfo::getExtend(Ty); + } if (const auto *EIT = Ty->getAs()) - if (EIT->getNumBits() < 64) + if (EIT->getNumBits() < 64) { + RegOffset += 1; return ABIArgInfo::getExtend(Ty); + } // Other non-aggregates go in registers. - if (!isAggregateTypeForABI(Ty)) + if (!isAggregateTypeForABI(Ty)) { + RegOffset += Size / 64; return ABIArgInfo::getDirect(); + } // If a C++ object has either a non-trivial copy constructor or a non-trivial // destructor, it is passed with an explicit indirect pointer / sret pointer. - if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) + if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { + RegOffset += 1; return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(), RAA == CGCXXABI::RAA_DirectInMemory); + } // This is a small aggregate type that should be passed in registers. // Build a coercion type from the LLVM struct type. llvm::StructType *StrTy = dyn_cast(CGT.ConvertType(Ty)); - if (!StrTy) + if (!StrTy) { + RegOffset += Size / 64; return ABIArgInfo::getDirect(); + } - CoerceBuilder CB(getVMContext(), getDataLayout()); + CoerceBuilder CB(VMContext, getDataLayout()); CB.addStruct(0, StrTy); // All structs, even empty ones, should take up a register argument slot, // so pin the minimum struct size to one bit. CB.pad(llvm::alignTo( std::max(CB.DL.getTypeSizeInBits(StrTy).getKnownMinValue(), uint64_t(1)), 64)); + RegOffset += CB.Size / 64; + + // If we're dealing with overaligned structs we may need to add a padding in + // the front, to preserve the correct register-memory mapping. + // + // See SCD 2.4.1, pages 3P-11 and 3P-12. + llvm::Type *Padding = + NeedPadding ? llvm::Type::getInt64Ty(VMContext) : nullptr; + RegOffset += NeedPadding ? 1 : 0; // Try to use the original type for coercion. llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); - if (CB.InReg) - return ABIArgInfo::getDirectInReg(CoerceTy); - else - return ABIArgInfo::getDirect(CoerceTy); + ABIArgInfo AAI = ABIArgInfo::getDirect(CoerceTy, 0, Padding); + AAI.setInReg(CB.InReg); + return AAI; } RValue SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const { - ABIArgInfo AI = classifyType(Ty, 16 * 8); - llvm::Type *ArgTy = CGT.ConvertType(Ty); - if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) - AI.setCoerceToType(ArgTy); - CharUnits SlotSize = CharUnits::fromQuantity(8); + auto TInfo = getContext().getTypeInfoInChars(Ty); - CGBuilderTy &Builder = CGF.Builder; - Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"), - getVAListElementType(CGF), SlotSize); - llvm::Type *ArgPtrTy = CGF.UnqualPtrTy; - - auto TypeInfo = getContext().getTypeInfoInChars(Ty); - - Address ArgAddr = Address::invalid(); - CharUnits Stride; - switch (AI.getKind()) { - case ABIArgInfo::Expand: - case ABIArgInfo::CoerceAndExpand: - case ABIArgInfo::InAlloca: - case ABIArgInfo::TargetSpecific: - llvm_unreachable("Unsupported ABI kind for va_arg"); - - case ABIArgInfo::Extend: { - Stride = SlotSize; - CharUnits Offset = SlotSize - TypeInfo.Width; - ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); - break; - } - - case ABIArgInfo::Direct: { - auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); - Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); - ArgAddr = Addr; - break; - } - - case ABIArgInfo::Indirect: - case ABIArgInfo::IndirectAliased: - Stride = SlotSize; - ArgAddr = Addr.withElementType(ArgPtrTy); - ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy, - TypeInfo.Align); - break; + // Zero-sized types have a width of one byte for parameter passing purposes. + TInfo.Width = std::max(TInfo.Width, CharUnits::fromQuantity(1)); - case ABIArgInfo::Ignore: - return Slot.asRValue(); - } - - // Update VAList. - Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); - Builder.CreateStore(NextPtr.emitRawPointer(CGF), VAListAddr); - - return CGF.EmitLoadOfAnyValue( - CGF.MakeAddrLValue(ArgAddr.withElementType(ArgTy), Ty), Slot); + // Arguments bigger than 2*SlotSize bytes are passed indirectly. + return emitVoidPtrVAArg(CGF, VAListAddr, Ty, + /*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo, + SlotSize, + /*AllowHigherAlign=*/true, Slot); } void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { - FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); + unsigned RetOffset = 0; + ABIArgInfo RetType = classifyType(FI.getReturnType(), 32 * 8, RetOffset); + FI.getReturnInfo() = RetType; + + // Indirect returns will have its pointer passed as an argument. + unsigned ArgOffset = RetType.isIndirect() ? RetOffset : 0; for (auto &I : FI.arguments()) - I.info = classifyType(I.type, 16 * 8); + I.info = classifyType(I.type, 16 * 8, ArgOffset); } namespace { diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp index c03ba9487a6dc..fb789489664df 100644 --- a/clang/lib/CodeGen/Targets/X86.cpp +++ b/clang/lib/CodeGen/Targets/X86.cpp @@ -1343,9 +1343,10 @@ class X86_64ABIInfo : public ABIInfo { } bool returnCXXRecordGreaterThan128InMem() const { - // Clang <= 20.0 did not do this. + // Clang <= 20.0 did not do this, and PlayStation does not do this. if (getContext().getLangOpts().getClangABICompat() <= - LangOptions::ClangABI::Ver20) + LangOptions::ClangABI::Ver20 || + getTarget().getTriple().isPS()) return false; return true; diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index f110dbab3e5a5..85a1335785542 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -6613,6 +6613,9 @@ std::string Driver::GetStdModuleManifestPath(const Compilation &C, const ToolChain &TC) const { std::string error = ""; + if (C.getArgs().hasArg(options::OPT_nostdlib)) + return error; + switch (TC.GetCXXStdlibType(C.getArgs())) { case ToolChain::CST_Libcxx: { auto evaluate = [&](const char *library) -> std::optional { diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp index ae546e9767039..654a382e87e40 100644 --- a/clang/lib/Driver/ToolChains/AMDGPU.cpp +++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp @@ -882,6 +882,16 @@ void AMDGPUToolChain::addClangWarningOptions(ArgStringList &CC1Args) const { CC1Args.push_back("-Werror=atomic-alignment"); } +void AMDGPUToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(options::OPT_nostdinc) || + DriverArgs.hasArg(options::OPT_nostdlibinc)) + return; + + if (std::optional Path = getStdlibIncludePath()) + addSystemInclude(DriverArgs, CC1Args, *Path); +} + StringRef AMDGPUToolChain::getGPUArch(const llvm::opt::ArgList &DriverArgs) const { return getProcessorFromTargetID( diff --git a/clang/lib/Driver/ToolChains/AMDGPU.h b/clang/lib/Driver/ToolChains/AMDGPU.h index e5d41e2401db6..e90a5736911e4 100644 --- a/clang/lib/Driver/ToolChains/AMDGPU.h +++ b/clang/lib/Driver/ToolChains/AMDGPU.h @@ -79,6 +79,9 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUToolChain : public Generic_ELF { void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, Action::OffloadKind DeviceOffloadKind) const override; + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; /// Return whether denormals should be flushed, and treated as 0 by default /// for the subtarget. diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index f67454ee517bd..412a176006bc0 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -695,16 +695,6 @@ RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs, } } -static bool checkDebugInfoOption(const Arg *A, const ArgList &Args, - const Driver &D, const ToolChain &TC) { - assert(A && "Expected non-nullptr argument."); - if (TC.supportsDebugInfoOption(A)) - return true; - D.Diag(diag::warn_drv_unsupported_debug_info_opt_for_target) - << A->getAsString(Args) << TC.getTripleString(); - return false; -} - static void RenderDebugInfoCompressionArgs(const ArgList &Args, ArgStringList &CmdArgs, const Driver &D, @@ -1109,26 +1099,15 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdinc) && Args.hasFlag(options::OPT_offload_inc, options::OPT_no_offload_inc, true) && - !Args.hasArg(options::OPT_nobuiltininc)) { - // Without an offloading language we will include these headers directly. - // Offloading languages will instead only use the declarations stored in - // the resource directory at clang/lib/Headers/llvm_libc_wrappers. - if (getToolChain().getTriple().isGPU() && - C.getActiveOffloadKinds() == Action::OFK_None) { - SmallString<128> P(llvm::sys::path::parent_path(D.Dir)); - llvm::sys::path::append(P, "include"); - llvm::sys::path::append(P, getToolChain().getTripleString()); - CmdArgs.push_back("-internal-isystem"); - CmdArgs.push_back(Args.MakeArgString(P)); - } else if (C.getActiveOffloadKinds() == Action::OFK_OpenMP) { - // TODO: CUDA / HIP include their own headers for some common functions - // implemented here. We'll need to clean those up so they do not conflict. - SmallString<128> P(D.ResourceDir); - llvm::sys::path::append(P, "include"); - llvm::sys::path::append(P, "llvm_libc_wrappers"); - CmdArgs.push_back("-internal-isystem"); - CmdArgs.push_back(Args.MakeArgString(P)); - } + !Args.hasArg(options::OPT_nobuiltininc) && + (C.getActiveOffloadKinds() == Action::OFK_OpenMP)) { + // TODO: CUDA / HIP include their own headers for some common functions + // implemented here. We'll need to clean those up so they do not conflict. + SmallString<128> P(D.ResourceDir); + llvm::sys::path::append(P, "include"); + llvm::sys::path::append(P, "llvm_libc_wrappers"); + CmdArgs.push_back("-internal-isystem"); + CmdArgs.push_back(Args.MakeArgString(P)); } // Add system include arguments for all targets but IAMCU. @@ -2744,42 +2723,6 @@ static void CollectArgsForIntegratedAssembler(Compilation &C, } } -static void EmitComplexRangeDiag(const Driver &D, StringRef LastOpt, - LangOptions::ComplexRangeKind Range, - StringRef NewOpt, - LangOptions::ComplexRangeKind NewRange) { - // Do not emit a warning if NewOpt overrides LastOpt in the following cases. - // - // | LastOpt | NewOpt | - // |-----------------------|-----------------------| - // | -fcx-limited-range | -fno-cx-limited-range | - // | -fno-cx-limited-range | -fcx-limited-range | - // | -fcx-fortran-rules | -fno-cx-fortran-rules | - // | -fno-cx-fortran-rules | -fcx-fortran-rules | - // | -ffast-math | -fno-fast-math | - // | -ffp-model= | -ffast-math | - // | -ffp-model= | -fno-fast-math | - // | -ffp-model= | -ffp-model= | - // | -fcomplex-arithmetic= | -fcomplex-arithmetic= | - if (LastOpt == NewOpt || NewOpt.empty() || LastOpt.empty() || - (LastOpt == "-fcx-limited-range" && NewOpt == "-fno-cx-limited-range") || - (LastOpt == "-fno-cx-limited-range" && NewOpt == "-fcx-limited-range") || - (LastOpt == "-fcx-fortran-rules" && NewOpt == "-fno-cx-fortran-rules") || - (LastOpt == "-fno-cx-fortran-rules" && NewOpt == "-fcx-fortran-rules") || - (LastOpt == "-ffast-math" && NewOpt == "-fno-fast-math") || - (LastOpt.starts_with("-ffp-model=") && NewOpt == "-ffast-math") || - (LastOpt.starts_with("-ffp-model=") && NewOpt == "-fno-fast-math") || - (LastOpt.starts_with("-ffp-model=") && - NewOpt.starts_with("-ffp-model=")) || - (LastOpt.starts_with("-fcomplex-arithmetic=") && - NewOpt.starts_with("-fcomplex-arithmetic="))) - return; - - D.Diag(clang::diag::warn_drv_overriding_complex_range) - << LastOpt << NewOpt << complexRangeKindToStr(Range) - << complexRangeKindToStr(NewRange); -} - static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, bool OFastEnabled, const ArgList &Args, ArgStringList &CmdArgs, @@ -2836,27 +2779,19 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, std::string ComplexRangeStr; StringRef LastComplexRangeOption; - auto setComplexRange = [&](StringRef NewOption, - LangOptions::ComplexRangeKind NewRange) { - // Warn if user overrides the previously set complex number - // multiplication/division option. - if (Range != LangOptions::ComplexRangeKind::CX_None && Range != NewRange) - EmitComplexRangeDiag(D, LastComplexRangeOption, Range, NewOption, - NewRange); - LastComplexRangeOption = NewOption; - Range = NewRange; - }; - // Lambda to set fast-math options. This is also used by -ffp-model=fast auto applyFastMath = [&](bool Aggressive, StringRef CallerOption) { if (Aggressive) { HonorINFs = false; HonorNaNs = false; - setComplexRange(CallerOption, LangOptions::ComplexRangeKind::CX_Basic); + setComplexRange(D, CallerOption, LangOptions::ComplexRangeKind::CX_Basic, + LastComplexRangeOption, Range); } else { HonorINFs = true; HonorNaNs = true; - setComplexRange(CallerOption, LangOptions::ComplexRangeKind::CX_Promoted); + setComplexRange(D, CallerOption, + LangOptions::ComplexRangeKind::CX_Promoted, + LastComplexRangeOption, Range); } MathErrno = false; AssociativeMath = true; @@ -2908,18 +2843,24 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, default: continue; case options::OPT_fcx_limited_range: - setComplexRange(A->getSpelling(), - LangOptions::ComplexRangeKind::CX_Basic); + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_Basic, + LastComplexRangeOption, Range); break; case options::OPT_fno_cx_limited_range: - setComplexRange(A->getSpelling(), LangOptions::ComplexRangeKind::CX_Full); + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_Full, + LastComplexRangeOption, Range); break; case options::OPT_fcx_fortran_rules: - setComplexRange(A->getSpelling(), - LangOptions::ComplexRangeKind::CX_Improved); + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_Improved, + LastComplexRangeOption, Range); break; case options::OPT_fno_cx_fortran_rules: - setComplexRange(A->getSpelling(), LangOptions::ComplexRangeKind::CX_Full); + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_Full, + LastComplexRangeOption, Range); break; case options::OPT_fcomplex_arithmetic_EQ: { LangOptions::ComplexRangeKind RangeVal; @@ -2937,7 +2878,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, << A->getSpelling() << Val; break; } - setComplexRange(Args.MakeArgString(A->getSpelling() + Val), RangeVal); + setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val), RangeVal, + LastComplexRangeOption, Range); break; } case options::OPT_ffp_model_EQ: { @@ -2977,8 +2919,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, FPModel = Val; FPContract = "on"; LastFpContractOverrideOption = "-ffp-model=precise"; - setComplexRange(Args.MakeArgString(A->getSpelling() + Val), - LangOptions::ComplexRangeKind::CX_Full); + setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val), + LangOptions::ComplexRangeKind::CX_Full, + LastComplexRangeOption, Range); } else if (Val == "strict") { StrictFPModel = true; FPExceptionBehavior = "strict"; @@ -2987,8 +2930,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, LastFpContractOverrideOption = "-ffp-model=strict"; TrappingMath = true; RoundingFPMath = true; - setComplexRange(Args.MakeArgString(A->getSpelling() + Val), - LangOptions::ComplexRangeKind::CX_Full); + setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val), + LangOptions::ComplexRangeKind::CX_Full, + LastComplexRangeOption, Range); } else D.Diag(diag::err_drv_unsupported_option_argument) << A->getSpelling() << Val; @@ -3195,8 +3139,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D, SignedZeros = true; restoreFPContractState(); if (Range != LangOptions::ComplexRangeKind::CX_Full) - setComplexRange(A->getSpelling(), - LangOptions::ComplexRangeKind::CX_None); + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_None, + LastComplexRangeOption, Range); else Range = LangOptions::ComplexRangeKind::CX_None; LastComplexRangeOption = ""; @@ -4336,27 +4281,6 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args, Args.addLastArg(CmdArgs, options::OPT_warning_suppression_mappings_EQ); } -DwarfFissionKind tools::getDebugFissionKind(const Driver &D, - const ArgList &Args, Arg *&Arg) { - Arg = Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ, - options::OPT_gno_split_dwarf); - if (!Arg || Arg->getOption().matches(options::OPT_gno_split_dwarf)) - return DwarfFissionKind::None; - - if (Arg->getOption().matches(options::OPT_gsplit_dwarf)) - return DwarfFissionKind::Split; - - StringRef Value = Arg->getValue(); - if (Value == "split") - return DwarfFissionKind::Split; - if (Value == "single") - return DwarfFissionKind::Single; - - D.Diag(diag::err_drv_unsupported_option_argument) - << Arg->getSpelling() << Arg->getValue(); - return DwarfFissionKind::None; -} - static void renderDwarfFormat(const Driver &D, const llvm::Triple &T, const ArgList &Args, ArgStringList &CmdArgs, unsigned DwarfVersion) { diff --git a/clang/lib/Driver/ToolChains/Clang.h b/clang/lib/Driver/ToolChains/Clang.h index 18f6c5ed06a59..c22789591e00a 100644 --- a/clang/lib/Driver/ToolChains/Clang.h +++ b/clang/lib/Driver/ToolChains/Clang.h @@ -187,12 +187,6 @@ class LLVM_LIBRARY_VISIBILITY LinkerWrapper final : public Tool { const char *LinkingOutput) const override; }; -enum class DwarfFissionKind { None, Split, Single }; - -DwarfFissionKind getDebugFissionKind(const Driver &D, - const llvm::opt::ArgList &Args, - llvm::opt::Arg *&Arg); - // Calculate the output path of the module file when compiling a module unit // with the `-fmodule-output` option or `-fmodule-output=` option specified. // The behavior is: diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 08cd98fd04df0..49ee53f0ba3bf 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -2270,6 +2270,37 @@ unsigned tools::getDwarfVersion(const ToolChain &TC, return DwarfVersion; } +DwarfFissionKind tools::getDebugFissionKind(const Driver &D, + const ArgList &Args, Arg *&Arg) { + Arg = Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ, + options::OPT_gno_split_dwarf); + if (!Arg || Arg->getOption().matches(options::OPT_gno_split_dwarf)) + return DwarfFissionKind::None; + + if (Arg->getOption().matches(options::OPT_gsplit_dwarf)) + return DwarfFissionKind::Split; + + StringRef Value = Arg->getValue(); + if (Value == "split") + return DwarfFissionKind::Split; + if (Value == "single") + return DwarfFissionKind::Single; + + D.Diag(diag::err_drv_unsupported_option_argument) + << Arg->getSpelling() << Arg->getValue(); + return DwarfFissionKind::None; +} + +bool tools::checkDebugInfoOption(const Arg *A, const ArgList &Args, + const Driver &D, const ToolChain &TC) { + assert(A && "Expected non-nullptr argument."); + if (TC.supportsDebugInfoOption(A)) + return true; + D.Diag(diag::warn_drv_unsupported_debug_info_opt_for_target) + << A->getAsString(Args) << TC.getTripleString(); + return false; +} + void tools::AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args, ArgStringList &CmdArgs) { llvm::Reloc::Model RelocationModel; @@ -3315,20 +3346,16 @@ bool tools::shouldEnableVectorizerAtOLevel(const ArgList &Args, bool isSlpVec) { void tools::handleVectorizeLoopsArgs(const ArgList &Args, ArgStringList &CmdArgs) { bool EnableVec = shouldEnableVectorizerAtOLevel(Args, false); - OptSpecifier vectorizeAliasOption = - EnableVec ? options::OPT_O_Group : options::OPT_fvectorize; - if (Args.hasFlag(options::OPT_fvectorize, vectorizeAliasOption, - options::OPT_fno_vectorize, EnableVec)) + if (Args.hasFlag(options::OPT_fvectorize, options::OPT_fno_vectorize, + EnableVec)) CmdArgs.push_back("-vectorize-loops"); } void tools::handleVectorizeSLPArgs(const ArgList &Args, ArgStringList &CmdArgs) { bool EnableSLPVec = shouldEnableVectorizerAtOLevel(Args, true); - OptSpecifier SLPVectAliasOption = - EnableSLPVec ? options::OPT_O_Group : options::OPT_fslp_vectorize; - if (Args.hasFlag(options::OPT_fslp_vectorize, SLPVectAliasOption, - options::OPT_fno_slp_vectorize, EnableSLPVec)) + if (Args.hasFlag(options::OPT_fslp_vectorize, options::OPT_fno_slp_vectorize, + EnableSLPVec)) CmdArgs.push_back("-vectorize-slp"); } @@ -3530,3 +3557,51 @@ tools::renderComplexRangeOption(LangOptionsBase::ComplexRangeKind Range) { return "-complex-range=" + ComplexRangeStr; return ComplexRangeStr; } + +static void emitComplexRangeDiag(const Driver &D, StringRef LastOpt, + LangOptions::ComplexRangeKind Range, + StringRef NewOpt, + LangOptions::ComplexRangeKind NewRange) { + // Do not emit a warning if NewOpt overrides LastOpt in the following cases. + // + // | LastOpt | NewOpt | + // |-----------------------|-----------------------| + // | -fcx-limited-range | -fno-cx-limited-range | + // | -fno-cx-limited-range | -fcx-limited-range | + // | -fcx-fortran-rules | -fno-cx-fortran-rules | + // | -fno-cx-fortran-rules | -fcx-fortran-rules | + // | -ffast-math | -fno-fast-math | + // | -ffp-model= | -ffast-math | + // | -ffp-model= | -fno-fast-math | + // | -ffp-model= | -ffp-model= | + // | -fcomplex-arithmetic= | -fcomplex-arithmetic= | + if (LastOpt == NewOpt || NewOpt.empty() || LastOpt.empty() || + (LastOpt == "-fcx-limited-range" && NewOpt == "-fno-cx-limited-range") || + (LastOpt == "-fno-cx-limited-range" && NewOpt == "-fcx-limited-range") || + (LastOpt == "-fcx-fortran-rules" && NewOpt == "-fno-cx-fortran-rules") || + (LastOpt == "-fno-cx-fortran-rules" && NewOpt == "-fcx-fortran-rules") || + (LastOpt == "-ffast-math" && NewOpt == "-fno-fast-math") || + (LastOpt.starts_with("-ffp-model=") && NewOpt == "-ffast-math") || + (LastOpt.starts_with("-ffp-model=") && NewOpt == "-fno-fast-math") || + (LastOpt.starts_with("-ffp-model=") && + NewOpt.starts_with("-ffp-model=")) || + (LastOpt.starts_with("-fcomplex-arithmetic=") && + NewOpt.starts_with("-fcomplex-arithmetic="))) + return; + + D.Diag(clang::diag::warn_drv_overriding_complex_range) + << LastOpt << NewOpt << complexRangeKindToStr(Range) + << complexRangeKindToStr(NewRange); +} + +void tools::setComplexRange(const Driver &D, StringRef NewOpt, + LangOptions::ComplexRangeKind NewRange, + StringRef &LastOpt, + LangOptions::ComplexRangeKind &Range) { + // Warn if user overrides the previously set complex number + // multiplication/division option. + if (Range != LangOptions::ComplexRangeKind::CX_None && Range != NewRange) + emitComplexRangeDiag(D, LastOpt, Range, NewOpt, NewRange); + LastOpt = NewOpt; + Range = NewRange; +} diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp index 327cb5183f837..07201cc4676ac 100644 --- a/clang/lib/Driver/ToolChains/Cuda.cpp +++ b/clang/lib/Driver/ToolChains/Cuda.cpp @@ -778,6 +778,16 @@ void NVPTXToolChain::addClangTargetOptions( const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, Action::OffloadKind DeviceOffloadingKind) const {} +void NVPTXToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(options::OPT_nostdinc) || + DriverArgs.hasArg(options::OPT_nostdlibinc)) + return; + + if (std::optional Path = getStdlibIncludePath()) + addSystemInclude(DriverArgs, CC1Args, *Path); +} + bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const { const Option &O = A->getOption(); return (O.matches(options::OPT_gN_Group) && diff --git a/clang/lib/Driver/ToolChains/Cuda.h b/clang/lib/Driver/ToolChains/Cuda.h index 8aeba53dd0030..6193328908828 100644 --- a/clang/lib/Driver/ToolChains/Cuda.h +++ b/clang/lib/Driver/ToolChains/Cuda.h @@ -92,6 +92,9 @@ class LLVM_LIBRARY_VISIBILITY NVPTXToolChain : public ToolChain { addClangTargetOptions(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, Action::OffloadKind DeviceOffloadKind) const override; + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; // Never try to use the integrated assembler with CUDA; always fork out to // ptxas. diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index 6fc372eb75eb7..a56fa41c49d34 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -120,7 +120,11 @@ static bool shouldLoopVersion(const ArgList &Args) { return false; } -void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const { +void Flang::addDebugOptions(const llvm::opt::ArgList &Args, const JobAction &JA, + const InputInfo &Output, const InputInfo &Input, + llvm::opt::ArgStringList &CmdArgs) const { + const auto &TC = getToolChain(); + const Driver &D = TC.getDriver(); Args.addAllArgs(CmdArgs, {options::OPT_module_dir, options::OPT_fdebug_module_writer, options::OPT_fintrinsic_modules_path, options::OPT_pedantic, @@ -131,20 +135,60 @@ void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const { options::OPT_finstrument_functions}); llvm::codegenoptions::DebugInfoKind DebugInfoKind; + bool hasDwarfNArg = getDwarfNArg(Args) != nullptr; if (Args.hasArg(options::OPT_gN_Group)) { Arg *gNArg = Args.getLastArg(options::OPT_gN_Group); DebugInfoKind = debugLevelToInfoKind(*gNArg); - } else if (Args.hasArg(options::OPT_g_Group)) { + } else if (Args.hasArg(options::OPT_g_Flag) || hasDwarfNArg) { DebugInfoKind = llvm::codegenoptions::FullDebugInfo; } else { DebugInfoKind = llvm::codegenoptions::NoDebugInfo; } addDebugInfoKind(CmdArgs, DebugInfoKind); - if (getDwarfNArg(Args)) { + if (hasDwarfNArg) { const unsigned DwarfVersion = getDwarfVersion(getToolChain(), Args); CmdArgs.push_back( Args.MakeArgString("-dwarf-version=" + Twine(DwarfVersion))); } + if (Args.hasArg(options::OPT_gsplit_dwarf) || + Args.hasArg(options::OPT_gsplit_dwarf_EQ)) { + // FIXME: -gsplit-dwarf on AIX is currently unimplemented. + if (TC.getTriple().isOSAIX()) { + D.Diag(diag::err_drv_unsupported_opt_for_target) + << Args.getLastArg(options::OPT_gsplit_dwarf)->getSpelling() + << TC.getTriple().str(); + return; + } + if (DebugInfoKind == llvm::codegenoptions::NoDebugInfo) + return; + + Arg *SplitDWARFArg; + DwarfFissionKind DwarfFission = getDebugFissionKind(D, Args, SplitDWARFArg); + + if (DwarfFission == DwarfFissionKind::None || + !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) + return; + + if (!TC.getTriple().isOSBinFormatELF() && + !TC.getTriple().isOSBinFormatWasm() && + !TC.getTriple().isOSBinFormatCOFF()) { + D.Diag(diag::warn_drv_unsupported_debug_info_opt_for_target) + << SplitDWARFArg->getSpelling() << TC.getTriple().str(); + return; + } + + if (!isa(JA) && !isa(JA) && + isa(JA)) + return; + + const char *SplitDWARFOut = SplitDebugName(JA, Args, Input, Output); + CmdArgs.push_back("-split-dwarf-file"); + CmdArgs.push_back(SplitDWARFOut); + if (DwarfFission == DwarfFissionKind::Split) { + CmdArgs.push_back("-split-dwarf-output"); + CmdArgs.push_back(SplitDWARFOut); + } + } } void Flang::addCodegenOptions(const ArgList &Args, @@ -649,6 +693,7 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args, bool AssociativeMath = false; bool ReciprocalMath = false; + StringRef LastComplexRangeOption; LangOptions::ComplexRangeKind Range = LangOptions::ComplexRangeKind::CX_None; if (const Arg *A = Args.getLastArg(options::OPT_ffp_contract)) { @@ -676,17 +721,22 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args, continue; case options::OPT_fcomplex_arithmetic_EQ: { + LangOptions::ComplexRangeKind NewRange; StringRef Val = A->getValue(); if (Val == "full") - Range = LangOptions::ComplexRangeKind::CX_Full; + NewRange = LangOptions::ComplexRangeKind::CX_Full; else if (Val == "improved") - Range = LangOptions::ComplexRangeKind::CX_Improved; + NewRange = LangOptions::ComplexRangeKind::CX_Improved; else if (Val == "basic") - Range = LangOptions::ComplexRangeKind::CX_Basic; + NewRange = LangOptions::ComplexRangeKind::CX_Basic; else { D.Diag(diag::err_drv_unsupported_option_argument) << A->getSpelling() << Val; + break; } + + setComplexRange(D, Args.MakeArgString(A->getSpelling() + Val), NewRange, + LastComplexRangeOption, Range); break; } case options::OPT_fhonor_infinities: @@ -735,6 +785,9 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args, ApproxFunc = true; SignedZeros = false; FPContract = "fast"; + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_Basic, + LastComplexRangeOption, Range); break; case options::OPT_fno_fast_math: HonorINFs = true; @@ -748,6 +801,9 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args, // --ffp-contract=off -fno-fast-math --> -ffp-contract=off if (FPContract == "fast") FPContract = ""; + setComplexRange(D, A->getSpelling(), + LangOptions::ComplexRangeKind::CX_None, + LastComplexRangeOption, Range); break; } @@ -766,6 +822,9 @@ static void addFloatingPointOptions(const Driver &D, const ArgList &Args, complexRangeKindToStr(Range))); } + if (Args.hasArg(options::OPT_fno_fast_real_mod)) + CmdArgs.push_back("-fno-fast-real-mod"); + if (!HonorINFs && !HonorNaNs && AssociativeMath && ReciprocalMath && ApproxFunc && !SignedZeros && (FPContract == "fast" || FPContract.empty())) { @@ -936,8 +995,8 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA, if (willEmitRemarks(Args)) renderRemarksOptions(Args, CmdArgs, Input); - // Add other compile options - addOtherOptions(Args, CmdArgs); + // Add debug compile options + addDebugOptions(Args, JA, Output, Input, CmdArgs); // Disable all warnings // TODO: Handle interactions between -w, -pedantic, -Wall, -WOption diff --git a/clang/lib/Driver/ToolChains/Flang.h b/clang/lib/Driver/ToolChains/Flang.h index 98167e1b75e15..c0837b80c032e 100644 --- a/clang/lib/Driver/ToolChains/Flang.h +++ b/clang/lib/Driver/ToolChains/Flang.h @@ -125,12 +125,16 @@ class LLVM_LIBRARY_VISIBILITY Flang : public Tool { void addCodegenOptions(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const; - /// Extract other compilation options from the driver arguments and add them + /// Extract debug compilation options from the driver arguments and add them /// to the command arguments. /// /// \param [in] Args The list of input driver arguments + /// \param [in] JA The job action + /// \param [in] Output The output information on the current file output + /// \param [in] Input The input information on the current file input /// \param [out] CmdArgs The list of output command arguments - void addOtherOptions(const llvm::opt::ArgList &Args, + void addDebugOptions(const llvm::opt::ArgList &Args, const JobAction &JA, + const InputInfo &Output, const InputInfo &Input, llvm::opt::ArgStringList &CmdArgs) const; public: diff --git a/clang/lib/Driver/ToolChains/HLSL.cpp b/clang/lib/Driver/ToolChains/HLSL.cpp index f4858e4c960de..2869549e6b3f0 100644 --- a/clang/lib/Driver/ToolChains/HLSL.cpp +++ b/clang/lib/Driver/ToolChains/HLSL.cpp @@ -64,7 +64,7 @@ bool isLegalShaderModel(Triple &T) { } break; case Triple::EnvironmentType::RootSignature: VersionTuple MinVer(1, 0); - VersionTuple MaxVer(1, 1); + VersionTuple MaxVer(1, 2); return MinVer <= Version && Version <= MaxVer; } return false; diff --git a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp index 1087eb3001856..6966d4097d64a 100644 --- a/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp +++ b/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp @@ -444,8 +444,7 @@ bool ExtractAPIAction::PrepareToExecuteAction(CompilerInstance &CI) { return true; if (!CI.hasFileManager()) - if (!CI.createFileManager()) - return false; + CI.createFileManager(); auto Kind = Inputs[0].getKind(); diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp index 9413c13a4137e..cd4c1aabac971 100644 --- a/clang/lib/Format/ContinuationIndenter.cpp +++ b/clang/lib/Format/ContinuationIndenter.cpp @@ -368,7 +368,7 @@ bool ContinuationIndenter::canBreak(const LineState &State) { // If binary operators are moved to the next line (including commas for some // styles of constructor initializers), that's always ok. - if (!Current.isOneOf(TT_BinaryOperator, tok::comma) && + if (Current.isNoneOf(TT_BinaryOperator, tok::comma) && // Allow breaking opening brace of lambdas (when passed as function // arguments) to a new line when BeforeLambdaBody brace wrapping is // enabled. @@ -445,7 +445,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { (!Style.BreakBeforeTernaryOperators && Previous.is(TT_ConditionalExpr))) && CurrentState.BreakBeforeParameter && !Current.isTrailingComment() && - !Current.isOneOf(tok::r_paren, tok::r_brace)) { + Current.isNoneOf(tok::r_paren, tok::r_brace)) { return true; } if (CurrentState.IsChainedConditional && @@ -523,9 +523,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { if (Style.AlwaysBreakBeforeMultilineStrings && (NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth || Previous.is(tok::comma) || Current.NestingLevel < 2) && - !Previous.isOneOf(tok::kw_return, tok::lessless, tok::at, + Previous.isNoneOf(tok::kw_return, tok::lessless, tok::at, Keywords.kw_dollar) && - !Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) && + Previous.isNoneOf(TT_InlineASMColon, TT_ConditionalExpr) && nextIsMultilineString(State)) { return true; } @@ -648,7 +648,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { // into the ColumnLimit, they are checked here in the ContinuationIndenter. if (Style.ColumnLimit != 0 && Previous.is(BK_Block) && Previous.is(tok::l_brace) && - !Current.isOneOf(tok::r_brace, tok::comment)) { + Current.isNoneOf(tok::r_brace, tok::comment)) { return true; } @@ -752,7 +752,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, return false; const auto *Next = Comma->getNextNonComment(); - return Next && !Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret); + return Next && Next->isNoneOf(TT_LambdaLSquare, tok::l_brace, tok::caret); }; if (DisallowLineBreaks()) @@ -835,7 +835,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, return Tok.is(tok::l_brace) && Tok.isNot(BK_Block) && Style.Cpp11BracedListStyle; }; - if (!Tok.isOneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) && + if (Tok.isNoneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) && !IsStartOfBracedList()) { return false; } @@ -843,7 +843,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, return true; if (Tok.Previous->isIf()) return Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak; - return !Tok.Previous->isOneOf(TT_CastRParen, tok::kw_for, tok::kw_while, + return Tok.Previous->isNoneOf(TT_CastRParen, tok::kw_for, tok::kw_while, tok::kw_switch) && !(Style.isJavaScript() && Tok.Previous->is(Keywords.kw_await)); }; @@ -882,8 +882,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, Tok.isOneOf(tok::ellipsis, Keywords.kw_await))) { return true; } - const auto *Previous = Tok.Previous; - if (!Previous || (!Previous->isOneOf(TT_FunctionDeclarationLParen, + if (const auto *Previous = Tok.Previous; + !Previous || (Previous->isNoneOf(TT_FunctionDeclarationLParen, TT_LambdaDefinitionLParen) && !IsFunctionCallParen(*Previous))) { return true; @@ -920,9 +920,9 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, // align the commas with the opening paren. if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign && !CurrentState.IsCSharpGenericTypeConstraint && Previous.opensScope() && - Previous.isNot(TT_ObjCMethodExpr) && Previous.isNot(TT_RequiresClause) && - Previous.isNot(TT_TableGenDAGArgOpener) && - Previous.isNot(TT_TableGenDAGArgOpenerToBreak) && + Previous.isNoneOf(TT_ObjCMethodExpr, TT_RequiresClause, + TT_TableGenDAGArgOpener, + TT_TableGenDAGArgOpenerToBreak) && !(Current.MacroParent && Previous.MacroParent) && (Current.isNot(TT_LineComment) || Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen)) && @@ -962,7 +962,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, if (Current.isNot(tok::comment) && P && (P->isOneOf(TT_BinaryOperator, tok::comma) || (P->is(TT_ConditionalExpr) && P->is(tok::colon))) && - !P->isOneOf(TT_OverloadedOperator, TT_CtorInitializerComma) && + P->isNoneOf(TT_OverloadedOperator, TT_CtorInitializerComma) && P->getPrecedence() != prec::Assignment && P->getPrecedence() != prec::Relational && P->getPrecedence() != prec::Spaceship) { @@ -992,7 +992,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, // parameter, i.e. let nested calls have a continuation indent. CurrentState.LastSpace = State.Column; CurrentState.NestedBlockIndent = State.Column; - } else if (!Current.isOneOf(tok::comment, tok::caret) && + } else if (Current.isNoneOf(tok::comment, tok::caret) && ((Previous.is(tok::comma) && Previous.isNot(TT_OverloadedOperator)) || (Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) { @@ -1099,7 +1099,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State, if (Current.isNot(TT_LambdaArrow) && (!Style.isJavaScript() || Current.NestingLevel != 0 || !PreviousNonComment || PreviousNonComment->isNot(tok::equal) || - !Current.isOneOf(Keywords.kw_async, Keywords.kw_function))) { + Current.isNoneOf(Keywords.kw_async, Keywords.kw_function))) { CurrentState.NestedBlockIndent = State.Column; } @@ -1239,11 +1239,11 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State, } if (PreviousNonComment && - !PreviousNonComment->isOneOf(tok::comma, tok::colon, tok::semi) && + PreviousNonComment->isNoneOf(tok::comma, tok::colon, tok::semi) && ((PreviousNonComment->isNot(TT_TemplateCloser) && !PreviousNonComment->ClosesRequiresClause) || Current.NestingLevel != 0) && - !PreviousNonComment->isOneOf( + PreviousNonComment->isNoneOf( TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation, TT_LeadingJavaAnnotation) && Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope() && @@ -1281,8 +1281,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State, bool AllowAllConstructorInitializersOnNextLine = Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine || Style.PackConstructorInitializers == FormatStyle::PCIS_NextLineOnly; - if (!(Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) || - PreviousIsBreakingCtorInitializerColon) || + if ((Previous.isNoneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) && + !PreviousIsBreakingCtorInitializerColon) || (!Style.AllowAllParametersOfDeclarationOnNextLine && State.Line->MustBeDeclaration) || (!Style.AllowAllArgumentsOnNextLine && @@ -1576,7 +1576,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { if (Previous.is(tok::r_paren) && Previous.isNot(TT_TableGenDAGArgOperatorToBreak) && !Current.isBinaryOperator() && - !Current.isOneOf(tok::colon, tok::comment)) { + Current.isNoneOf(tok::colon, tok::comment)) { return ContinuationIndent; } if (Current.is(TT_ProtoExtensionLSquare)) @@ -1591,7 +1591,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { NextNonComment->SpacesRequiredBefore; } if (CurrentState.Indent == State.FirstIndent && PreviousNonComment && - !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma)) { + PreviousNonComment->isNoneOf(tok::r_brace, TT_CtorInitializerComma)) { // Ensure that we fall back to the continuation indent width instead of // just flushing continuations left. return CurrentState.Indent + Style.ContinuationIndentWidth; @@ -1734,7 +1734,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State, } if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) || (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) && - !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr, + Previous->isNoneOf(TT_DictLiteral, TT_ObjCMethodExpr, TT_CtorInitializerColon)))) { CurrentState.NestedBlockInlined = !Newline && hasNestedBlockInlined(Previous, Current, Style); @@ -1758,7 +1758,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State, State.StartOfStringLiteral = State.Column + 1; } else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) { State.StartOfStringLiteral = State.Column; - } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) && + } else if (Current.isNoneOf(tok::comment, tok::identifier, tok::hash) && !Current.isStringLiteral()) { State.StartOfStringLiteral = 0; } @@ -2057,7 +2057,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State, // array literals as these follow different indentation rules. bool NoLineBreak = Current.Children.empty() && - !Current.isOneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) && + Current.isNoneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) && (CurrentState.NoLineBreak || CurrentState.NoLineBreakInOperand || (Current.is(TT_TemplateOpener) && CurrentState.ContainsUnwrappedBuilder)); diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp index b38f2810c0a74..2bf62448a7df3 100644 --- a/clang/lib/Format/Format.cpp +++ b/clang/lib/Format/Format.cpp @@ -2185,47 +2185,68 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config, if (Input.error()) return Input.error(); - for (unsigned i = 0; i < Styles.size(); ++i) { - // Ensures that only the first configuration can skip the Language option. - if (Styles[i].Language == FormatStyle::LK_None && i != 0) + assert(!Styles.empty()); + const auto StyleCount = Styles.size(); + + // Start from the second style as (only) the first one may be the default. + for (unsigned I = 1; I < StyleCount; ++I) { + const auto Lang = Styles[I].Language; + if (Lang == FormatStyle::LK_None) return make_error_code(ParseError::Error); // Ensure that each language is configured at most once. - for (unsigned j = 0; j < i; ++j) { - if (Styles[i].Language == Styles[j].Language) { + for (unsigned J = 0; J < I; ++J) { + if (Lang == Styles[J].Language) { LLVM_DEBUG(llvm::dbgs() << "Duplicate languages in the config file on positions " - << j << " and " << i << "\n"); + << J << " and " << I << '\n'); return make_error_code(ParseError::Error); } } } - // Look for a suitable configuration starting from the end, so we can - // find the configuration for the specific language first, and the default - // configuration (which can only be at slot 0) after it. - FormatStyle::FormatStyleSet StyleSet; - bool LanguageFound = false; - for (const FormatStyle &Style : llvm::reverse(Styles)) { - const auto Lang = Style.Language; - if (Lang != FormatStyle::LK_None) - StyleSet.Add(Style); - if (Lang == Language || - // For backward compatibility. - (Lang == FormatStyle::LK_Cpp && Language == FormatStyle::LK_C)) { - LanguageFound = true; - } else if (IsDotHFile && Language == FormatStyle::LK_Cpp && - (Lang == FormatStyle::LK_C || Lang == FormatStyle::LK_ObjC)) { - Language = Lang; - LanguageFound = true; + + int LanguagePos = -1; // Position of the style for Language. + int CppPos = -1; // Position of the style for C++. + int CPos = -1; // Position of the style for C. + + // Search Styles for Language and store the positions of C++ and C styles in + // case Language is not found. + for (unsigned I = 0; I < StyleCount; ++I) { + const auto Lang = Styles[I].Language; + if (Lang == Language) { + LanguagePos = I; + break; } - } - if (!LanguageFound) { - if (Styles.empty() || Styles[0].Language != FormatStyle::LK_None) + if (Lang == FormatStyle::LK_Cpp) + CppPos = I; + else if (Lang == FormatStyle::LK_C) + CPos = I; + } + + // If Language is not found, use the default style if there is one. Otherwise, + // use the C style for C++ .h files and for backward compatibility, the C++ + // style for .c files. + if (LanguagePos < 0) { + if (Styles[0].Language == FormatStyle::LK_None) // Default style. + LanguagePos = 0; + else if (IsDotHFile && Language == FormatStyle::LK_Cpp) + LanguagePos = CPos; + else if (!IsDotHFile && Language == FormatStyle::LK_C) + LanguagePos = CppPos; + if (LanguagePos < 0) return make_error_code(ParseError::Unsuitable); - FormatStyle DefaultStyle = Styles[0]; - DefaultStyle.Language = Language; - StyleSet.Add(std::move(DefaultStyle)); } - *Style = *StyleSet.Get(Language); + + for (const auto &S : llvm::reverse(llvm::drop_begin(Styles))) + Style->StyleSet.Add(S); + + *Style = Styles[LanguagePos]; + + if (LanguagePos == 0) { + if (Style->Language == FormatStyle::LK_None) // Default style. + Style->Language = Language; + Style->StyleSet.Add(*Style); + } + if (Style->InsertTrailingCommas != FormatStyle::TCS_None && Style->BinPackArguments) { // See comment on FormatStyle::TSC_Wrapped. @@ -2256,14 +2277,8 @@ FormatStyle::FormatStyleSet::Get(FormatStyle::LanguageKind Language) const { if (!Styles) return std::nullopt; auto It = Styles->find(Language); - if (It == Styles->end()) { - if (Language != FormatStyle::LK_C) - return std::nullopt; - // For backward compatibility. - It = Styles->find(FormatStyle::LK_Cpp); - if (It == Styles->end()) - return std::nullopt; - } + if (It == Styles->end()) + return std::nullopt; FormatStyle Style = It->second; Style.StyleSet = *this; return Style; @@ -2420,7 +2435,7 @@ class BracesRemover : public TokenAnalyzer { const auto *NextLine = I + 1 == End ? nullptr : I[1]; for (const auto *Token = Line->First; Token && !Token->Finalized; Token = Token->Next) { - if (!Token->Optional || !Token->isOneOf(tok::l_brace, tok::r_brace)) + if (!Token->Optional || Token->isNoneOf(tok::l_brace, tok::r_brace)) continue; auto *Next = Token->Next; assert(Next || Token == Line->Last); diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp index c60ae8f0d2852..c2956a179b8ed 100644 --- a/clang/lib/Format/FormatToken.cpp +++ b/clang/lib/Format/FormatToken.cpp @@ -108,7 +108,7 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State, // Ensure that we start on the opening brace. const FormatToken *LBrace = State.NextToken->Previous->getPreviousNonComment(); - if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) || + if (!LBrace || LBrace->isNoneOf(tok::l_brace, TT_ArrayInitializerLSquare) || LBrace->is(BK_Block) || LBrace->is(TT_DictLiteral) || LBrace->Next->is(TT_DesignatedInitializerPeriod)) { return 0; @@ -177,7 +177,7 @@ static unsigned CodePointsBetween(const FormatToken *Begin, void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { // FIXME: At some point we might want to do this for other lists, too. if (!Token->MatchingParen || - !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) { + Token->isNoneOf(tok::l_brace, TT_ArrayInitializerLSquare)) { return; } diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h index e04b0e7af10c0..e4ddd610b9722 100644 --- a/clang/lib/Format/FormatToken.h +++ b/clang/lib/Format/FormatToken.h @@ -55,7 +55,7 @@ namespace format { TYPE(ConflictAlternative) \ TYPE(ConflictEnd) \ TYPE(ConflictStart) \ - /* l_brace of if/for/while */ \ + /* l_brace of if/for/while/switch/catch */ \ TYPE(ControlStatementLBrace) \ TYPE(ControlStatementRBrace) \ TYPE(CppCastLParen) \ @@ -645,6 +645,9 @@ struct FormatToken { return is(K1) || isOneOf(K2, Ks...); } template bool isNot(T Kind) const { return !is(Kind); } + template bool isNoneOf(Ts... Ks) const { + return !isOneOf(Ks...); + } bool isIf(bool AllowConstexprMacro = true) const { return is(tok::kw_if) || endsSequence(tok::kw_constexpr, tok::kw_if) || @@ -748,7 +751,7 @@ struct FormatToken { /// Returns \c true if this is a "." or "->" accessing a member. bool isMemberAccess() const { return isOneOf(tok::arrow, tok::period, tok::arrowstar) && - !isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow, + isNoneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow, TT_LambdaArrow, TT_LeadingJavaAnnotation); } diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp index 3f4aa52a87d2e..86a5185a92a52 100644 --- a/clang/lib/Format/FormatTokenLexer.cpp +++ b/clang/lib/Format/FormatTokenLexer.cpp @@ -733,7 +733,7 @@ void FormatTokenLexer::tryParseJavaTextBlock() { // its text if successful. void FormatTokenLexer::tryParseJSRegexLiteral() { FormatToken *RegexToken = Tokens.back(); - if (!RegexToken->isOneOf(tok::slash, tok::slashequal)) + if (RegexToken->isNoneOf(tok::slash, tok::slashequal)) return; FormatToken *Prev = nullptr; @@ -1041,7 +1041,7 @@ void FormatTokenLexer::handleTemplateStrings() { void FormatTokenLexer::tryParsePythonComment() { FormatToken *HashToken = Tokens.back(); - if (!HashToken->isOneOf(tok::hash, tok::hashhash)) + if (HashToken->isNoneOf(tok::hash, tok::hashhash)) return; // Turn the remainder of this line into a comment. const char *CommentBegin = diff --git a/clang/lib/Format/MacroExpander.cpp b/clang/lib/Format/MacroExpander.cpp index 85a53c9bb12fe..445e17358844d 100644 --- a/clang/lib/Format/MacroExpander.cpp +++ b/clang/lib/Format/MacroExpander.cpp @@ -86,7 +86,7 @@ class MacroExpander::DefinitionParser { } bool parseExpansion() { - if (!Current->isOneOf(tok::equal, tok::eof)) + if (Current->isNoneOf(tok::equal, tok::eof)) return false; if (Current->is(tok::equal)) nextToken(); diff --git a/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/clang/lib/Format/NamespaceEndCommentsFixer.cpp index 08f8d6840fe00..95ccfac5e6e61 100644 --- a/clang/lib/Format/NamespaceEndCommentsFixer.cpp +++ b/clang/lib/Format/NamespaceEndCommentsFixer.cpp @@ -70,7 +70,7 @@ std::string computeName(const FormatToken *NamespaceTok) { // and closing parenthesis or comma. assert(Tok && Tok->is(tok::l_paren) && "expected an opening parenthesis"); Tok = Tok->getNextNonComment(); - while (Tok && !Tok->isOneOf(tok::r_paren, tok::comma)) { + while (Tok && Tok->isNoneOf(tok::r_paren, tok::comma)) { name += Tok->TokenText; Tok = Tok->getNextNonComment(); } @@ -85,7 +85,7 @@ std::string computeName(const FormatToken *NamespaceTok) { // one token before that up until the '{'. A '(' might be a macro with // arguments. const FormatToken *FirstNSTok = nullptr; - while (Tok && !Tok->isOneOf(tok::l_brace, tok::coloncolon, tok::l_paren)) { + while (Tok && Tok->isNoneOf(tok::l_brace, tok::coloncolon, tok::l_paren)) { if (FirstNSTok) FirstNSName += FirstNSTok->TokenText; FirstNSTok = Tok; diff --git a/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp b/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp index b885942efcb55..b12b370538c96 100644 --- a/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp +++ b/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp @@ -61,7 +61,7 @@ void ObjCPropertyAttributeOrderFixer::sortPropertyAttributes( } // Most attributes look like identifiers, but `class` is a keyword. - if (!Tok->isOneOf(tok::identifier, tok::kw_class)) { + if (Tok->isNoneOf(tok::identifier, tok::kw_class)) { // If we hit any other kind of token, just bail. return; } diff --git a/clang/lib/Format/QualifierAlignmentFixer.cpp b/clang/lib/Format/QualifierAlignmentFixer.cpp index 441a37a4902b7..e3e30ca8e2e89 100644 --- a/clang/lib/Format/QualifierAlignmentFixer.cpp +++ b/clang/lib/Format/QualifierAlignmentFixer.cpp @@ -508,7 +508,7 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft( // Don't change declarations such as // `foo(struct Foo const a);` -> `foo(struct Foo const a);` - if (!Previous || !Previous->isOneOf(tok::kw_struct, tok::kw_class)) { + if (!Previous || Previous->isNoneOf(tok::kw_struct, tok::kw_class)) { insertQualifierBefore(SourceMgr, Fixes, TypeToken, Qualifier); removeToken(SourceMgr, Fixes, Tok); } @@ -571,7 +571,7 @@ void LeftRightQualifierAlignmentFixer::fixQualifierAlignment( for (const auto *Tok = First; Tok && Tok != Last && Tok->Next; Tok = Tok->Next) { - if (Tok->MustBreakBefore) + if (Tok->MustBreakBefore && Tok != First) break; if (Tok->is(tok::comment)) continue; diff --git a/clang/lib/Format/SortJavaScriptImports.cpp b/clang/lib/Format/SortJavaScriptImports.cpp index ace3dffebec40..a403a4fed664c 100644 --- a/clang/lib/Format/SortJavaScriptImports.cpp +++ b/clang/lib/Format/SortJavaScriptImports.cpp @@ -439,7 +439,7 @@ class JavaScriptImportSorter : public TokenAnalyzer { // for grammar EBNF (production ModuleItem). bool parseModuleReference(const AdditionalKeywords &Keywords, JsModuleReference &Reference) { - if (!Current || !Current->isOneOf(Keywords.kw_import, tok::kw_export)) + if (!Current || Current->isNoneOf(Keywords.kw_import, tok::kw_export)) return false; Reference.IsExport = Current->is(tok::kw_export); @@ -570,7 +570,7 @@ class JavaScriptImportSorter : public TokenAnalyzer { Symbol.Range.setEnd(Current->Tok.getLocation()); Reference.Symbols.push_back(Symbol); - if (!Current->isOneOf(tok::r_brace, tok::comma)) + if (Current->isNoneOf(tok::r_brace, tok::comma)) return false; } Reference.SymbolsEnd = Current->Tok.getLocation(); diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index 4bfb803ebedf7..59f81b3617ad9 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -203,7 +203,7 @@ class AnnotatingParser { return false; } if (InExpr && SeenTernaryOperator && - (!Next || !Next->isOneOf(tok::l_paren, tok::l_brace))) { + (!Next || Next->isNoneOf(tok::l_paren, tok::l_brace))) { return false; } if (!MaybeAngles) @@ -577,7 +577,7 @@ class AnnotatingParser { if (IsIf && CurrentToken->is(tok::semi)) { for (auto *Tok = OpeningParen.Next; Tok != CurrentToken && - !Tok->isOneOf(tok::equal, tok::l_paren, tok::l_brace); + Tok->isNoneOf(tok::equal, tok::l_paren, tok::l_brace); Tok = Tok->Next) { if (Tok->isPointerOrReference()) Tok->setFinalizedType(TT_PointerOrReference); @@ -704,7 +704,7 @@ class AnnotatingParser { !IsCppStructuredBinding && !InsideInlineASM && !CppArrayTemplates && IsCpp && !IsCpp11AttributeSpecifier && !IsCSharpAttributeSpecifier && Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) && - !CurrentToken->isOneOf(tok::l_brace, tok::r_square) && + CurrentToken->isNoneOf(tok::l_brace, tok::r_square) && (!Parent || Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren, tok::kw_return, tok::kw_throw) || @@ -833,11 +833,6 @@ class AnnotatingParser { if (Parent && Parent->is(TT_PointerOrReference)) Parent->overwriteFixedType(TT_BinaryOperator); } - // An arrow after an ObjC method expression is not a lambda arrow. - if (CurrentToken->is(TT_ObjCMethodExpr) && CurrentToken->Next && - CurrentToken->Next->is(TT_LambdaArrow)) { - CurrentToken->Next->overwriteFixedType(TT_Unknown); - } Left->MatchingParen = CurrentToken; CurrentToken->MatchingParen = Left; // FirstObjCSelectorName is set when a colon is found. This does @@ -1339,7 +1334,7 @@ class AnnotatingParser { if (Style.isJavaScript()) { if (Contexts.back().ColonIsForRangeExpr || // colon in for loop (Contexts.size() == 1 && // switch/case labels - !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) || + Line.First->isNoneOf(tok::kw_enum, tok::kw_case)) || Contexts.back().ContextKind == tok::l_paren || // function params Contexts.back().ContextKind == tok::l_square || // array type (!Contexts.back().IsExpression && @@ -1416,7 +1411,7 @@ class AnnotatingParser { } else if (Contexts.back().ColonIsForRangeExpr) { Tok->setType(TT_RangeBasedForLoopColon); for (auto *Token = Prev; - Token && !Token->isOneOf(tok::semi, tok::l_paren); + Token && Token->isNoneOf(tok::semi, tok::l_paren); Token = Token->Previous) { if (Token->isPointerOrReference()) Token->setFinalizedType(TT_PointerOrReference); @@ -1430,7 +1425,7 @@ class AnnotatingParser { Scopes.back() == ST_Class)) { Tok->setType(TT_BitFieldColon); } else if (Contexts.size() == 1 && - !Line.getFirstNonComment()->isOneOf(tok::kw_enum, tok::kw_case, + Line.getFirstNonComment()->isNoneOf(tok::kw_enum, tok::kw_case, tok::kw_default) && !Line.startsWith(tok::kw_typedef, tok::kw_enum)) { if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept) || @@ -1567,10 +1562,10 @@ class AnnotatingParser { if (Line.MustBeDeclaration && Contexts.size() == 1 && !Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) && !Line.startsWith(tok::l_paren) && - !Tok->isOneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen)) { + Tok->isNoneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen)) { if (!Prev || (!Prev->isAttribute() && - !Prev->isOneOf(TT_RequiresClause, TT_LeadingJavaAnnotation, + Prev->isNoneOf(TT_RequiresClause, TT_LeadingJavaAnnotation, TT_BinaryOperator))) { Line.MightBeFunctionDecl = true; Tok->MightBeFunctionDeclParen = true; @@ -1669,7 +1664,7 @@ class AnnotatingParser { } } while (CurrentToken && - !CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) { + CurrentToken->isNoneOf(tok::l_paren, tok::semi, tok::r_paren)) { if (CurrentToken->isOneOf(tok::star, tok::amp)) CurrentToken->setType(TT_PointerOrReference); auto Next = CurrentToken->getNextNonComment(); @@ -1733,8 +1728,8 @@ class AnnotatingParser { // cond ? id : "B"; // cond ? cond2 ? "A" : "B" : "C"; if (!Contexts.back().IsExpression && Line.MustBeDeclaration && - (!Next || !Next->isOneOf(tok::identifier, tok::string_literal) || - !Next->Next || !Next->Next->isOneOf(tok::colon, tok::question))) { + (!Next || Next->isNoneOf(tok::identifier, tok::string_literal) || + !Next->Next || Next->Next->isNoneOf(tok::colon, tok::question))) { Tok->setType(TT_CSharpNullable); break; } @@ -1801,7 +1796,7 @@ class AnnotatingParser { if (!parseTableGenValue()) return false; } else if (Tok->isOneOf(Keywords.kw_def, Keywords.kw_defm) && - (!Next || !Next->isOneOf(tok::colon, tok::l_brace))) { + (!Next || Next->isNoneOf(tok::colon, tok::l_brace))) { // The case NameValue appears. if (!parseTableGenValue(true)) return false; @@ -2099,7 +2094,7 @@ class AnnotatingParser { // Reset token type in case we have already looked at it and then // recovered from an error (e.g. failure to find the matching >). if (!CurrentToken->isTypeFinalized() && - !CurrentToken->isOneOf( + CurrentToken->isNoneOf( TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro, TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace, TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow, @@ -2235,7 +2230,7 @@ class AnnotatingParser { // type or non-type. if (Contexts.back().ContextKind == tok::less) { assert(Current.Previous->Previous); - return !Current.Previous->Previous->isOneOf(tok::kw_typename, + return Current.Previous->Previous->isNoneOf(tok::kw_typename, tok::kw_class); } @@ -2271,7 +2266,7 @@ class AnnotatingParser { if (!Line.startsWith(TT_UnaryOperator)) { for (FormatToken *Previous = Current.Previous; Previous && Previous->Previous && - !Previous->Previous->isOneOf(tok::comma, tok::semi); + Previous->Previous->isNoneOf(tok::comma, tok::semi); Previous = Previous->Previous) { if (Previous->isOneOf(tok::r_square, tok::r_paren, tok::greater)) { Previous = Previous->MatchingParen; @@ -2435,7 +2430,7 @@ class AnnotatingParser { Current.setType(TT_BinaryOperator); } else if (Current.is(tok::arrow) && AutoFound && Line.MightBeFunctionDecl && Current.NestingLevel == 0 && - !Current.Previous->isOneOf(tok::kw_operator, tok::identifier)) { + Current.Previous->isNoneOf(tok::kw_operator, tok::identifier)) { // not auto operator->() -> xxx; Current.setType(TT_TrailingReturnArrow); } else if (Current.is(tok::arrow) && Current.Previous && @@ -2516,7 +2511,7 @@ class AnnotatingParser { Current.setType(TT_CastRParen); if (Current.MatchingParen && Current.Next && !Current.Next->isBinaryOperator() && - !Current.Next->isOneOf( + Current.Next->isNoneOf( tok::semi, tok::colon, tok::l_brace, tok::l_paren, tok::comma, tok::period, tok::arrow, tok::coloncolon, tok::kw_noexcept)) { if (FormatToken *AfterParen = Current.MatchingParen->Next; @@ -2574,7 +2569,7 @@ class AnnotatingParser { } else if (Current.isOneOf(tok::identifier, tok::kw_const, tok::kw_noexcept, tok::kw_requires) && Current.Previous && - !Current.Previous->isOneOf(tok::equal, tok::at, + Current.Previous->isNoneOf(tok::equal, tok::at, TT_CtorInitializerComma, TT_CtorInitializerColon) && Line.MightBeFunctionDecl && Contexts.size() == 1) { @@ -2663,7 +2658,7 @@ class AnnotatingParser { if (PreviousNotConst->is(TT_TemplateCloser)) { return PreviousNotConst && PreviousNotConst->MatchingParen && PreviousNotConst->MatchingParen->Previous && - !PreviousNotConst->MatchingParen->Previous->isOneOf( + PreviousNotConst->MatchingParen->Previous->isNoneOf( tok::period, tok::kw_template); } @@ -2785,7 +2780,7 @@ class AnnotatingParser { // If there is an identifier (or with a few exceptions a keyword) right // before the parentheses, this is unlikely to be a cast. if (LeftOfParens->Tok.getIdentifierInfo() && - !LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case, + LeftOfParens->isNoneOf(Keywords.kw_in, tok::kw_return, tok::kw_case, tok::kw_delete, tok::kw_throw)) { return false; } @@ -2923,7 +2918,7 @@ class AnnotatingParser { const bool NextIsAmpOrStar = AfterRParen->isOneOf(tok::amp, tok::star); if (!(AfterRParen->isUnaryOperator() || NextIsAmpOrStar) || AfterRParen->is(tok::plus) || - !AfterRParen->Next->isOneOf(tok::identifier, tok::numeric_constant)) { + AfterRParen->Next->isNoneOf(tok::identifier, tok::numeric_constant)) { return false; } @@ -2953,7 +2948,7 @@ class AnnotatingParser { // Search for unexpected tokens. for (Prev = BeforeRParen; Prev != LParen; Prev = Prev->Previous) - if (!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon)) + if (Prev->isNoneOf(tok::kw_const, tok::identifier, tok::coloncolon)) return false; return true; @@ -3745,7 +3740,7 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) { const bool InRequiresExpression = Line.Type == LT_RequiresExpression; for (auto &Child : Line.Children) { if (InRequiresExpression && - !Child->First->isOneOf(tok::kw_typename, tok::kw_requires, + Child->First->isNoneOf(tok::kw_typename, tok::kw_requires, TT_CompoundRequirementLBrace)) { Child->Type = LT_SimpleRequirement; } @@ -3862,7 +3857,7 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts, // Find parentheses of parameter list. if (Current.is(tok::kw_operator)) { if (Previous.Tok.getIdentifierInfo() && - !Previous.isOneOf(tok::kw_return, tok::kw_co_return)) { + Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) { return true; } if (Previous.is(tok::r_paren) && Previous.is(TT_TypeDeclarationParen)) { @@ -4026,29 +4021,28 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const { } } - if (IsCpp && - (LineIsFunctionDeclaration || - (FirstNonComment && FirstNonComment->is(TT_CtorDtorDeclName))) && - Line.endsWith(tok::semi, tok::r_brace)) { - auto *Tok = Line.Last->Previous; - while (Tok->isNot(tok::r_brace)) - Tok = Tok->Previous; - if (auto *LBrace = Tok->MatchingParen; LBrace && LBrace->is(TT_Unknown)) { - assert(LBrace->is(tok::l_brace)); - Tok->setBlockKind(BK_Block); - LBrace->setBlockKind(BK_Block); - LBrace->setFinalizedType(TT_FunctionLBrace); + if (IsCpp) { + if ((LineIsFunctionDeclaration || + (FirstNonComment && FirstNonComment->is(TT_CtorDtorDeclName))) && + Line.endsWith(tok::semi, tok::r_brace)) { + auto *Tok = Line.Last->Previous; + while (Tok->isNot(tok::r_brace)) + Tok = Tok->Previous; + if (auto *LBrace = Tok->MatchingParen; LBrace && LBrace->is(TT_Unknown)) { + assert(LBrace->is(tok::l_brace)); + Tok->setBlockKind(BK_Block); + LBrace->setBlockKind(BK_Block); + LBrace->setFinalizedType(TT_FunctionLBrace); + } } - } - if (IsCpp && SeenName && AfterLastAttribute && - mustBreakAfterAttributes(*AfterLastAttribute, Style)) { - AfterLastAttribute->MustBreakBefore = true; - if (LineIsFunctionDeclaration) - Line.ReturnTypeWrapped = true; - } + if (SeenName && AfterLastAttribute && + mustBreakAfterAttributes(*AfterLastAttribute, Style)) { + AfterLastAttribute->MustBreakBefore = true; + if (LineIsFunctionDeclaration) + Line.ReturnTypeWrapped = true; + } - if (IsCpp) { if (!LineIsFunctionDeclaration) { // Annotate */&/&& in `operator` function calls as binary operators. for (const auto *Tok = FirstNonComment; Tok; Tok = Tok->Next) { @@ -4094,6 +4088,11 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const { } } + if (First->is(TT_ElseLBrace)) { + First->CanBreakBefore = true; + First->MustBreakBefore = true; + } + bool InFunctionDecl = Line.MightBeFunctionDecl; bool InParameterList = false; for (auto *Current = First->Next; Current; Current = Current->Next) { @@ -4329,7 +4328,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, // Slightly prefer formatting local lambda definitions like functions. if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal)) return 35; - if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare, + if (Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare, TT_ArrayInitializerLSquare, TT_DesignatedInitializerLSquare, TT_AttributeSquare)) { return 500; @@ -4520,7 +4519,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left, const FormatToken &Right) const { if (Left.is(tok::kw_return) && - !Right.isOneOf(tok::semi, tok::r_paren, tok::hashhash)) { + Right.isNoneOf(tok::semi, tok::r_paren, tok::hashhash)) { return true; } if (Left.is(tok::kw_throw) && Right.is(tok::l_paren) && Right.MatchingParen && @@ -4580,7 +4579,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, } // co_await (x), co_yield (x), co_return (x) if (Left.isOneOf(tok::kw_co_await, tok::kw_co_yield, tok::kw_co_return) && - !Right.isOneOf(tok::semi, tok::r_paren)) { + Right.isNoneOf(tok::semi, tok::r_paren)) { return true; } @@ -4657,7 +4656,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, return getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left; } - return !Left.isOneOf(TT_PointerOrReference, tok::l_paren) && + return Left.isNoneOf(TT_PointerOrReference, tok::l_paren) && (getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left || (Line.IsMultiVariableDeclStmt && @@ -4730,7 +4729,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, const auto *LParen = Right.Next->MatchingParen; return !LParen || LParen->isNot(TT_FunctionTypeLParen); } - return !BeforeLeft->isOneOf(tok::l_paren, tok::l_square); + return BeforeLeft->isNoneOf(tok::l_paren, tok::l_square); } // Ensure right pointer alignment with ellipsis e.g. int *...P if (Left.is(tok::ellipsis) && BeforeLeft && @@ -4809,10 +4808,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, TT_LambdaLSquare))); } if (Right.is(tok::l_square) && - !Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare, + Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare, TT_DesignatedInitializerLSquare, TT_StructuredBindingLSquare, TT_AttributeSquare) && - !Left.isOneOf(tok::numeric_constant, TT_DictLiteral) && + Left.isNoneOf(tok::numeric_constant, TT_DictLiteral) && !(Left.isNot(tok::r_square) && Style.SpaceBeforeSquareBrackets && Right.is(TT_ArraySubscriptLSquare))) { return false; @@ -4895,7 +4894,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName || spaceRequiredBeforeParens(Right); } - if (!BeforeLeft || !BeforeLeft->isOneOf(tok::period, tok::arrow)) { + if (!BeforeLeft || BeforeLeft->isNoneOf(tok::period, tok::arrow)) { if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch)) { return Style.SpaceBeforeParensOptions.AfterControlStatements || spaceRequiredBeforeParens(Right); @@ -4918,7 +4917,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, if (Left.is(tok::at) && Right.isNot(tok::objc_not_keyword)) return false; if (Right.is(TT_UnaryOperator)) { - return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) && + return Left.isNoneOf(tok::l_paren, tok::l_square, tok::at) && (Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr)); } // No space between the variable name and the initializer list. @@ -5261,7 +5260,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, if (Left.is(tok::ellipsis)) return false; if (Left.is(TT_TemplateCloser) && - !Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square, + Right.isNoneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square, Keywords.kw_implements, Keywords.kw_extends)) { // Type assertions ('expr') are not followed by whitespace. Other // locations that should have whitespace following are identified by the @@ -5300,7 +5299,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, // Add space between things in a primitive's state table unless in a // transition like `(0?)`. if ((Left.is(TT_VerilogTableItem) && - !Right.isOneOf(tok::r_paren, tok::semi)) || + Right.isNoneOf(tok::r_paren, tok::semi)) || (Right.is(TT_VerilogTableItem) && Left.isNot(tok::l_paren))) { const FormatToken *Next = Right.getNextNonComment(); return !(Next && Next->is(tok::r_paren)); @@ -5349,8 +5348,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, // previous rule. if ((Right.is(Keywords.kw_apostrophe) || (Right.is(BK_BracedInit) && Right.is(tok::l_brace))) && - !(Left.isOneOf(Keywords.kw_assign, Keywords.kw_unique) || - Keywords.isVerilogWordOperator(Left)) && + Left.isNoneOf(Keywords.kw_assign, Keywords.kw_unique) && + !Keywords.isVerilogWordOperator(Left) && (Left.isOneOf(tok::r_square, tok::r_paren, tok::r_brace, tok::numeric_constant) || Keywords.isWordLike(Left))) { @@ -5550,14 +5549,14 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, return Right.hasWhitespaceBefore(); } if (Right.is(tok::coloncolon) && - !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren)) { + Left.isNoneOf(tok::l_brace, tok::comment, tok::l_paren)) { // Put a space between < and :: in vector< ::std::string > return (Left.is(TT_TemplateOpener) && ((Style.Standard < FormatStyle::LS_Cpp11) || ShouldAddSpacesInAngles())) || - !(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square, - tok::kw___super, TT_TemplateOpener, - TT_TemplateCloser)) || + Left.isNoneOf(tok::l_paren, tok::r_paren, tok::l_square, + tok::kw___super, TT_TemplateOpener, + TT_TemplateCloser) || (Left.is(tok::l_paren) && Style.SpacesInParensOptions.Other); } if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser))) @@ -5568,7 +5567,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, } // Space before TT_StructuredBindingLSquare. if (Right.is(TT_StructuredBindingLSquare)) { - return !Left.isOneOf(tok::amp, tok::ampamp) || + return Left.isNoneOf(tok::amp, tok::ampamp) || getTokenReferenceAlignment(Left) != FormatStyle::PAS_Right; } // Space before & or && following a TT_StructuredBindingLSquare. @@ -5600,7 +5599,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, // Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style. static bool isAllmanBrace(const FormatToken &Tok) { return Tok.is(tok::l_brace) && Tok.is(BK_Block) && - !Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral); + Tok.isNoneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral); } // Returns 'true' if 'Tok' is a function argument. @@ -5618,7 +5617,7 @@ isEmptyLambdaAllowed(const FormatToken &Tok, static bool isAllmanLambdaBrace(const FormatToken &Tok) { return Tok.is(tok::l_brace) && Tok.is(BK_Block) && - !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral); + Tok.isNoneOf(TT_ObjCBlockLBrace, TT_DictLiteral); } bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, @@ -5687,7 +5686,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, tok::kw_const) && // kw_var/kw_let are pseudo-tokens that are tok::identifier, so match // above. - !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let)) { + Line.First->isNoneOf(Keywords.kw_var, Keywords.kw_let)) { // Object literals on the top level of a file are treated as "enum-style". // Each key/value pair is put on a separate line, instead of bin-packing. return true; @@ -5832,7 +5831,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, } if (Right.is(tok::comment)) { - return !Left.isOneOf(BK_BracedInit, TT_CtorInitializerColon) && + return Left.isNoneOf(BK_BracedInit, TT_CtorInitializerColon) && Right.NewlinesBefore > 0 && Right.HasUnescapedNewline; } if (Left.isTrailingComment()) @@ -5874,7 +5873,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, case FormatStyle::RCPS_WithPreceding: return Right.isNot(tok::semi); case FormatStyle::RCPS_OwnLineWithBrace: - return !Right.isOneOf(tok::semi, tok::l_brace); + return Right.isNoneOf(tok::semi, tok::l_brace); default: break; } @@ -6001,7 +6000,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, // Put multiple Java annotation on a new line. if ((Style.isJava() || Style.isJavaScript()) && Left.is(TT_LeadingJavaAnnotation) && - !Right.isOneOf(TT_LeadingJavaAnnotation, tok::l_paren) && + Right.isNoneOf(TT_LeadingJavaAnnotation, tok::l_paren) && (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations)) { return true; } @@ -6207,7 +6206,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, return false; // Avoid to break after '(' in the cases that is in bang operators. if (Right.is(tok::l_paren)) { - return !Left.isOneOf(TT_TableGenBangOperator, TT_TableGenCondOperator, + return Left.isNoneOf(TT_TableGenBangOperator, TT_TableGenCondOperator, TT_TemplateCloser); } // Avoid to break between the value and its suffix part. @@ -6295,7 +6294,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, } if (Right.is(tok::colon) && - !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon, + Right.isNoneOf(TT_CtorInitializerColon, TT_InlineASMColon, TT_BitFieldColon)) { return false; } @@ -6379,7 +6378,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, } if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator, tok::kw_operator)) return false; - if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) && + if (Left.is(tok::equal) && Right.isNoneOf(tok::kw_default, tok::kw_delete) && Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0) { return false; } @@ -6406,7 +6405,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, // Allow breaking after a trailing annotation, e.g. after a method // declaration. if (Left.is(TT_TrailingAnnotation)) { - return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren, + return Right.isNoneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren, tok::less, tok::coloncolon); } @@ -6449,7 +6448,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const)) return true; if ((Left.isBinaryOperator() || Left.is(TT_BinaryOperator)) && - !Left.isOneOf(tok::arrowstar, tok::lessless) && + Left.isNoneOf(tok::arrowstar, tok::lessless) && Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All && (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None || Left.getPrecedence() == prec::Assignment)) { @@ -6496,13 +6495,14 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) const { << "):\n"; const FormatToken *Tok = Line.First; while (Tok) { - llvm::errs() << " M=" << Tok->MustBreakBefore + llvm::errs() << " I=" << Tok->IndentLevel << " M=" << Tok->MustBreakBefore << " C=" << Tok->CanBreakBefore << " T=" << getTokenTypeName(Tok->getType()) << " S=" << Tok->SpacesRequiredBefore << " F=" << Tok->Finalized << " B=" << Tok->BlockParameterCount << " BK=" << Tok->getBlockKind() << " P=" << Tok->SplitPenalty - << " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength + << " Name=" << Tok->Tok.getName() << " N=" << Tok->NestingLevel + << " L=" << Tok->TotalLength << " PPK=" << Tok->getPackingKind() << " FakeLParens="; for (prec::Level LParen : Tok->FakeLParens) llvm::errs() << LParen << "/"; diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp index ac9d147defc13..ac9c81d4416c9 100644 --- a/clang/lib/Format/UnwrappedLineFormatter.cpp +++ b/clang/lib/Format/UnwrappedLineFormatter.cpp @@ -506,7 +506,7 @@ class LineJoiner { (NextLine.First->is(tok::r_brace) && !Style.BraceWrapping.SplitEmptyRecord); } else if (TheLine->InPPDirective || - !TheLine->First->isOneOf(tok::kw_class, tok::kw_enum, + TheLine->First->isNoneOf(tok::kw_class, tok::kw_enum, tok::kw_struct)) { // Try to merge a block with left brace unwrapped that wasn't yet // covered. @@ -686,8 +686,8 @@ class LineJoiner { } Limit = limitConsideringMacros(I + 1, E, Limit); AnnotatedLine &Line = **I; - if (Line.First->isNot(tok::kw_do) && Line.First->isNot(tok::kw_else) && - Line.Last->isNot(tok::kw_else) && Line.Last->isNot(tok::r_paren)) { + if (Line.First->isNoneOf(tok::kw_do, tok::kw_else) && + Line.Last->isNoneOf(tok::kw_else, tok::r_paren)) { return 0; } // Only merge `do while` if `do` is the only statement on the line. diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index 2c9766c9b7bc0..28797433e06e3 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -405,7 +405,7 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace, case tok::r_brace: if (OpeningBrace) { if (!Style.RemoveBracesLLVM || Line->InPPDirective || - !OpeningBrace->isOneOf(TT_ControlStatementLBrace, TT_ElseLBrace)) { + OpeningBrace->isNoneOf(TT_ControlStatementLBrace, TT_ElseLBrace)) { return false; } if (FormatTok->isNot(tok::r_brace) || StatementCount != 1 || HasLabel || @@ -427,7 +427,7 @@ bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace, unsigned StoredPosition = Tokens->getPosition(); auto *Next = Tokens->getNextNonComment(); FormatTok = Tokens->setPosition(StoredPosition); - if (!Next->isOneOf(tok::colon, tok::arrow)) { + if (Next->isNoneOf(tok::colon, tok::arrow)) { // default not followed by `:` or `->` is not a case label; treat it // like an identifier. parseStructuralElement(); @@ -584,7 +584,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) { ProbablyBracedList = ProbablyBracedList || (NextTok->is(tok::identifier) && - !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)); + PrevTok->isNoneOf(tok::semi, tok::r_brace, tok::l_brace)); ProbablyBracedList = ProbablyBracedList || (NextTok->is(tok::semi) && @@ -607,7 +607,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) { // A statement can end with only `;` (simple statement), a block // closing brace (compound statement), or `:` (label statement). // If PrevTok is a block opening brace, Tok ends an empty block. - !PrevTok->isOneOf(tok::semi, BK_Block, tok::colon)) { + PrevTok->isNoneOf(tok::semi, BK_Block, tok::colon)) { ProbablyBracedList = true; } } @@ -1157,7 +1157,7 @@ void UnwrappedLineParser::parsePPDefine() { IncludeGuard = IG_Defined; IncludeGuardToken = nullptr; for (auto &Line : Lines) { - if (!Line.Tokens.front().Tok->isOneOf(tok::comment, tok::hash)) { + if (Line.Tokens.front().Tok->isNoneOf(tok::comment, tok::hash)) { IncludeGuard = IG_Rejected; break; } @@ -1233,7 +1233,7 @@ void UnwrappedLineParser::parsePPUnknown() { static bool tokenCanStartNewLine(const FormatToken &Tok) { // Semicolon can be a null-statement, l_square can be a start of a macro or // a C++11 attribute, but this doesn't seem to be common. - return !Tok.isOneOf(tok::semi, tok::l_brace, + return Tok.isNoneOf(tok::semi, tok::l_brace, // Tokens that can only be used as binary operators and a // part of overloaded operator names. tok::period, tok::periodstar, tok::arrow, tok::arrowstar, @@ -1256,7 +1256,7 @@ static bool mustBeJSIdent(const AdditionalKeywords &Keywords, // FIXME: This returns true for C/C++ keywords like 'struct'. return FormatTok->is(tok::identifier) && (!FormatTok->Tok.getIdentifierInfo() || - !FormatTok->isOneOf( + FormatTok->isNoneOf( Keywords.kw_in, Keywords.kw_of, Keywords.kw_as, Keywords.kw_async, Keywords.kw_await, Keywords.kw_yield, Keywords.kw_finally, Keywords.kw_function, Keywords.kw_import, Keywords.kw_is, @@ -1322,7 +1322,7 @@ static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next, return false; if (!isC78Type(*Tok) && - !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) { + Tok->isNoneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) { return false; } @@ -1345,7 +1345,7 @@ bool UnwrappedLineParser::parseModuleImport() { if (auto Token = Tokens->peekNextToken(/*SkipComment=*/true); !Token->Tok.getIdentifierInfo() && - !Token->isOneOf(tok::colon, tok::less, tok::string_literal)) { + Token->isNoneOf(tok::colon, tok::less, tok::string_literal)) { return false; } @@ -1357,7 +1357,7 @@ bool UnwrappedLineParser::parseModuleImport() { // Handle import as we would an include statement. else if (FormatTok->is(tok::less)) { nextToken(); - while (!FormatTok->isOneOf(tok::semi, tok::greater) && !eof()) { + while (FormatTok->isNoneOf(tok::semi, tok::greater) && !eof()) { // Mark tokens up to the trailing line comments as implicit string // literals. if (FormatTok->isNot(tok::comment) && @@ -2268,7 +2268,7 @@ bool UnwrappedLineParser::tryToParseLambda() { if (!tryToParseLambdaIntroducer()) return false; - bool SeenArrow = false; + FormatToken *Arrow = nullptr; bool InTemplateParameterList = false; while (FormatTok->isNot(tok::l_brace)) { @@ -2343,17 +2343,13 @@ bool UnwrappedLineParser::tryToParseLambda() { case tok::ellipsis: case tok::kw_true: case tok::kw_false: - if (SeenArrow || InTemplateParameterList) { + if (Arrow || InTemplateParameterList) { nextToken(); break; } return true; case tok::arrow: - // This might or might not actually be a lambda arrow (this could be an - // ObjC method invocation followed by a dereferencing arrow). We might - // reset this back to TT_Unknown in TokenAnnotator. - FormatTok->setFinalizedType(TT_LambdaArrow); - SeenArrow = true; + Arrow = FormatTok; nextToken(); break; case tok::kw_requires: { @@ -2375,6 +2371,9 @@ bool UnwrappedLineParser::tryToParseLambda() { FormatTok->setFinalizedType(TT_LambdaLBrace); LSquare.setFinalizedType(TT_LambdaLSquare); + if (Arrow) + Arrow->setFinalizedType(TT_LambdaArrow); + NestedLambdas.push_back(Line->SeenDecltypeAuto); parseChildBlock(); assert(!NestedLambdas.empty()); @@ -2388,11 +2387,6 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() { const FormatToken *LeftSquare = FormatTok; nextToken(); if (Previous) { - if (Previous->Tok.getIdentifierInfo() && - !Previous->isOneOf(tok::kw_return, tok::kw_co_await, tok::kw_co_yield, - tok::kw_co_return)) { - return false; - } if (Previous->closesScope()) { // Not a potential C-style cast. if (Previous->isNot(tok::r_paren)) @@ -2400,8 +2394,15 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() { const auto *BeforeRParen = Previous->getPreviousNonComment(); // Lambdas can be cast to function types only, e.g. `std::function` // and `int (*)()`. - if (!BeforeRParen || !BeforeRParen->isOneOf(tok::greater, tok::r_paren)) + if (!BeforeRParen || BeforeRParen->isNoneOf(tok::greater, tok::r_paren)) return false; + } else if (Previous->is(tok::star)) { + Previous = Previous->getPreviousNonComment(); + } + if (Previous && Previous->Tok.getIdentifierInfo() && + Previous->isNoneOf(tok::kw_return, tok::kw_co_await, tok::kw_co_yield, + tok::kw_co_return)) { + return false; } } if (LeftSquare->isCppStructuredBinding(IsCpp)) @@ -2449,7 +2450,7 @@ void UnwrappedLineParser::tryToParseJSFunction() { if (FormatTok->is(tok::l_brace)) tryToParseBracedList(); else - while (!FormatTok->isOneOf(tok::l_brace, tok::semi) && !eof()) + while (FormatTok->isNoneOf(tok::l_brace, tok::semi) && !eof()) nextToken(); } @@ -3107,11 +3108,11 @@ void UnwrappedLineParser::parseTryCatch() { for (bool SeenCatch = false;;) { if (FormatTok->is(tok::at)) nextToken(); - if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except, - tok::kw___finally, tok::objc_catch, - tok::objc_finally) || - ((Style.isJava() || Style.isJavaScript()) && - FormatTok->is(Keywords.kw_finally)))) { + if (FormatTok->isNoneOf(tok::kw_catch, Keywords.kw___except, + tok::kw___finally, tok::objc_catch, + tok::objc_finally) && + !((Style.isJava() || Style.isJavaScript()) && + FormatTok->is(Keywords.kw_finally))) { break; } if (FormatTok->is(tok::kw_catch)) @@ -3289,7 +3290,7 @@ void UnwrappedLineParser::parseForOrWhileLoop(bool HasParens) { Keywords.kw_repeat))) && "'for', 'while' or foreach macro expected"); const bool KeepBraces = !Style.RemoveBracesLLVM || - !FormatTok->isOneOf(tok::kw_for, tok::kw_while); + FormatTok->isNoneOf(tok::kw_for, tok::kw_while); nextToken(); // JS' for await ( ... @@ -4338,7 +4339,7 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() { // to the terminating `;`. For everything else, just return and continue // parsing the structural element, i.e. the declaration or expression for // `export default`. - if (!IsImport && !FormatTok->isOneOf(tok::l_brace, tok::star) && + if (!IsImport && FormatTok->isNoneOf(tok::l_brace, tok::star) && !FormatTok->isStringLiteral() && !(FormatTok->is(Keywords.kw_type) && Tokens->peekNextToken()->isOneOf(tok::l_brace, tok::star))) { @@ -4885,7 +4886,7 @@ void UnwrappedLineParser::readToken(int LevelDifference) { const auto *Next = Tokens->peekNextToken(); if ((Style.isVerilog() && !Keywords.isVerilogPPDirective(*Next)) || (Style.isTableGen() && - !Next->isOneOf(tok::kw_else, tok::pp_define, tok::pp_ifdef, + Next->isNoneOf(tok::kw_else, tok::pp_define, tok::pp_ifdef, tok::pp_ifndef, tok::pp_endif))) { break; } diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp index cc3cc0f6906cc..54f366fc02502 100644 --- a/clang/lib/Format/WhitespaceManager.cpp +++ b/clang/lib/Format/WhitespaceManager.cpp @@ -279,20 +279,19 @@ void WhitespaceManager::calculateLineBreakInformation() { } // Align a single sequence of tokens, see AlignTokens below. -// Column - The token for which Matches returns true is moved to this column. +// Column - The tokens indexed in Matches are moved to this column. // RightJustify - Whether it is the token's right end or left end that gets // moved to that column. -template static void AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, - unsigned Column, bool RightJustify, F &&Matches, + unsigned Column, bool RightJustify, + ArrayRef Matches, SmallVector &Changes) { - bool FoundMatchOnLine = false; int Shift = 0; // ScopeStack keeps track of the current scope depth. It contains indices of // the first token on each scope. - // We only run the "Matches" function on tokens from the outer-most scope. + // The "Matches" indices should only have tokens from the outer-most scope. // However, we do need to pay special attention to one class of tokens // that are not in the outer-most scope, and that is function parameters // which are split across multiple lines, as illustrated by this example: @@ -314,6 +313,9 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, for (unsigned i = Start; i != End; ++i) { auto &CurrentChange = Changes[i]; + if (!Matches.empty() && Matches[0] < i) + Matches.consume_front(); + assert(Matches.empty() || Matches[0] >= i); if (!ScopeStack.empty() && CurrentChange.indentAndNestingLevel() < Changes[ScopeStack.back()].indentAndNestingLevel()) { @@ -338,26 +340,16 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, Changes[i - 1].Tok->is(tok::string_literal); bool SkipMatchCheck = InsideNestedScope || ContinuedStringLiteral; - if (CurrentChange.NewlinesBefore > 0 && !SkipMatchCheck) { + if (CurrentChange.NewlinesBefore > 0 && !SkipMatchCheck) Shift = 0; - FoundMatchOnLine = false; - } // If this is the first matching token to be aligned, remember by how many // spaces it has to be shifted, so the rest of the changes on the line are // shifted by the same amount - if (!FoundMatchOnLine && !SkipMatchCheck && Matches(CurrentChange)) { - FoundMatchOnLine = true; + if (!Matches.empty() && Matches[0] == i) { Shift = Column - (RightJustify ? CurrentChange.TokenLength : 0) - CurrentChange.StartOfTokenColumn; CurrentChange.Spaces += Shift; - // FIXME: This is a workaround that should be removed when we fix - // http://llvm.org/PR53699. An assertion later below verifies this. - if (CurrentChange.NewlinesBefore == 0) { - CurrentChange.Spaces = - std::max(CurrentChange.Spaces, - static_cast(CurrentChange.Tok->SpacesRequiredBefore)); - } } if (Shift == 0) @@ -470,7 +462,7 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End, if ((Style.PointerAlignment == FormatStyle::PAS_Right || Style.ReferenceAlignment == FormatStyle::RAS_Right) && CurrentChange.Spaces != 0 && - !CurrentChange.Tok->isOneOf(tok::equal, tok::r_paren, + CurrentChange.Tok->isNoneOf(tok::equal, tok::r_paren, TT_TemplateCloser)) { const bool ReferenceNotRightAligned = Style.ReferenceAlignment != FormatStyle::RAS_Right && @@ -532,12 +524,14 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches, bool RightJustify = false) { // We arrange each line in 3 parts. The operator to be aligned (the anchor), // and text to its left and right. In the aligned text the width of each part - // will be the maximum of that over the block that has been aligned. Maximum - // widths of each part so far. When RightJustify is true and ACS.PadOperators - // is false, the part from start of line to the right end of the anchor. - // Otherwise, only the part to the left of the anchor. Including the space - // that exists on its left from the start. Not including the padding added on - // the left to right-justify the anchor. + // will be the maximum of that over the block that has been aligned. + + // Maximum widths of each part so far. + // When RightJustify is true and ACS.PadOperators is false, the part from + // start of line to the right end of the anchor. Otherwise, only the part to + // the left of the anchor. Including the space that exists on its left from + // the start. Not including the padding added on the left to right-justify the + // anchor. unsigned WidthLeft = 0; // The operator to be aligned when RightJustify is true and ACS.PadOperators // is false. 0 otherwise. @@ -550,6 +544,9 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches, unsigned StartOfSequence = 0; unsigned EndOfSequence = 0; + // The positions of the tokens to be aligned. + SmallVector MatchedIndices; + // Measure the scope level (i.e. depth of (), [], {}) of the first token, and // abort when we hit any token in a higher scope than the starting one. auto IndentAndNestingLevel = StartAt < Changes.size() @@ -578,7 +575,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches, auto AlignCurrentSequence = [&] { if (StartOfSequence > 0 && StartOfSequence < EndOfSequence) { AlignTokenSequence(Style, StartOfSequence, EndOfSequence, - WidthLeft + WidthAnchor, RightJustify, Matches, + WidthLeft + WidthAnchor, RightJustify, MatchedIndices, Changes); } WidthLeft = 0; @@ -586,6 +583,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches, WidthRight = 0; StartOfSequence = 0; EndOfSequence = 0; + MatchedIndices.clear(); }; unsigned i = StartAt; @@ -637,8 +635,10 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches, // If there is more than one matching token per line, or if the number of // preceding commas, do not match anymore, end the sequence. - if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch) + if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch) { + MatchedIndices.push_back(i); AlignCurrentSequence(); + } CommasBeforeLastMatch = CommasBeforeMatch; FoundMatchOnLine = true; @@ -684,6 +684,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches, WidthAnchor = NewAnchor; WidthRight = NewRight; } + MatchedIndices.push_back(i); } EndOfSequence = i; diff --git a/clang/lib/Frontend/ChainedIncludesSource.cpp b/clang/lib/Frontend/ChainedIncludesSource.cpp index 82249f893a795..049277c2df7a9 100644 --- a/clang/lib/Frontend/ChainedIncludesSource.cpp +++ b/clang/lib/Frontend/ChainedIncludesSource.cpp @@ -129,7 +129,7 @@ clang::createChainedIncludesSource(CompilerInstance &CI, Clang->setTarget(TargetInfo::CreateTargetInfo( Clang->getDiagnostics(), Clang->getInvocation().getTargetOpts())); Clang->createFileManager(); - Clang->createSourceManager(Clang->getFileManager()); + Clang->createSourceManager(); Clang->createPreprocessor(TU_Prefix); Clang->getDiagnosticClient().BeginSourceFile(Clang->getLangOpts(), &Clang->getPreprocessor()); diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp index d6f3aec981336..584436665622d 100644 --- a/clang/lib/Frontend/CompilerInstance.cpp +++ b/clang/lib/Frontend/CompilerInstance.cpp @@ -382,17 +382,18 @@ IntrusiveRefCntPtr CompilerInstance::createDiagnostics( // File Manager -FileManager *CompilerInstance::createFileManager() { +void CompilerInstance::createFileManager() { assert(VFS && "CompilerInstance needs a VFS for creating FileManager"); FileMgr = llvm::makeIntrusiveRefCnt(getFileSystemOpts(), VFS); - return FileMgr.get(); } // Source Manager -void CompilerInstance::createSourceManager(FileManager &FileMgr) { - SourceMgr = - llvm::makeIntrusiveRefCnt(getDiagnostics(), FileMgr); +void CompilerInstance::createSourceManager() { + assert(Diagnostics && "DiagnosticsEngine needed for creating SourceManager"); + assert(FileMgr && "FileManager needed for creating SourceManager"); + SourceMgr = llvm::makeIntrusiveRefCnt(getDiagnostics(), + getFileManager()); } // Initialize the remapping of files to alternative contents, e.g., @@ -503,7 +504,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) { // then we're the top level compiler instance and need to create one. if (!ModuleDepCollector && !DepOpts.ModuleDependencyOutputDir.empty()) { ModuleDepCollector = std::make_shared( - DepOpts.ModuleDependencyOutputDir); + DepOpts.ModuleDependencyOutputDir, getVirtualFileSystemPtr()); } // If there is a module dep collector, register with other dep collectors @@ -1186,7 +1187,7 @@ std::unique_ptr CompilerInstance::cloneForModuleCompileImpl( if (llvm::is_contained(DiagOpts.SystemHeaderWarningsModules, ModuleName)) Instance.getDiagnostics().setSuppressSystemWarnings(false); - Instance.createSourceManager(Instance.getFileManager()); + Instance.createSourceManager(); SourceManager &SourceMgr = Instance.getSourceManager(); if (ThreadSafeConfig) { @@ -1598,90 +1599,6 @@ static void checkConfigMacros(Preprocessor &PP, Module *M, } } -/// Write a new timestamp file with the given path. -static void writeTimestampFile(StringRef TimestampFile) { - std::error_code EC; - llvm::raw_fd_ostream Out(TimestampFile.str(), EC, llvm::sys::fs::OF_None); -} - -/// Prune the module cache of modules that haven't been accessed in -/// a long time. -static void pruneModuleCache(const HeaderSearchOptions &HSOpts) { - llvm::sys::fs::file_status StatBuf; - llvm::SmallString<128> TimestampFile; - TimestampFile = HSOpts.ModuleCachePath; - assert(!TimestampFile.empty()); - llvm::sys::path::append(TimestampFile, "modules.timestamp"); - - // Try to stat() the timestamp file. - if (std::error_code EC = llvm::sys::fs::status(TimestampFile, StatBuf)) { - // If the timestamp file wasn't there, create one now. - if (EC == std::errc::no_such_file_or_directory) { - writeTimestampFile(TimestampFile); - } - return; - } - - // Check whether the time stamp is older than our pruning interval. - // If not, do nothing. - time_t TimeStampModTime = - llvm::sys::toTimeT(StatBuf.getLastModificationTime()); - time_t CurrentTime = time(nullptr); - if (CurrentTime - TimeStampModTime <= time_t(HSOpts.ModuleCachePruneInterval)) - return; - - // Write a new timestamp file so that nobody else attempts to prune. - // There is a benign race condition here, if two Clang instances happen to - // notice at the same time that the timestamp is out-of-date. - writeTimestampFile(TimestampFile); - - // Walk the entire module cache, looking for unused module files and module - // indices. - std::error_code EC; - for (llvm::sys::fs::directory_iterator Dir(HSOpts.ModuleCachePath, EC), - DirEnd; - Dir != DirEnd && !EC; Dir.increment(EC)) { - // If we don't have a directory, there's nothing to look into. - if (!llvm::sys::fs::is_directory(Dir->path())) - continue; - - // Walk all of the files within this directory. - for (llvm::sys::fs::directory_iterator File(Dir->path(), EC), FileEnd; - File != FileEnd && !EC; File.increment(EC)) { - // We only care about module and global module index files. - StringRef Extension = llvm::sys::path::extension(File->path()); - if (Extension != ".pcm" && Extension != ".timestamp" && - llvm::sys::path::filename(File->path()) != "modules.idx") - continue; - - // Look at this file. If we can't stat it, there's nothing interesting - // there. - if (llvm::sys::fs::status(File->path(), StatBuf)) - continue; - - // If the file has been used recently enough, leave it there. - time_t FileAccessTime = llvm::sys::toTimeT(StatBuf.getLastAccessedTime()); - if (CurrentTime - FileAccessTime <= - time_t(HSOpts.ModuleCachePruneAfter)) { - continue; - } - - // Remove the file. - llvm::sys::fs::remove(File->path()); - - // Remove the timestamp file. - std::string TimpestampFilename = File->path() + ".timestamp"; - llvm::sys::fs::remove(TimpestampFilename); - } - - // If we removed all of the files in the directory, remove the directory - // itself. - if (llvm::sys::fs::directory_iterator(Dir->path(), EC) == - llvm::sys::fs::directory_iterator() && !EC) - llvm::sys::fs::remove(Dir->path()); - } -} - void CompilerInstance::createASTReader() { if (TheASTReader) return; @@ -1692,11 +1609,10 @@ void CompilerInstance::createASTReader() { // If we're implicitly building modules but not currently recursively // building a module, check whether we need to prune the module cache. if (getSourceManager().getModuleBuildStack().empty() && - !getPreprocessor().getHeaderSearchInfo().getModuleCachePath().empty() && - getHeaderSearchOpts().ModuleCachePruneInterval > 0 && - getHeaderSearchOpts().ModuleCachePruneAfter > 0) { - pruneModuleCache(getHeaderSearchOpts()); - } + !getPreprocessor().getHeaderSearchInfo().getModuleCachePath().empty()) + ModCache->maybePrune(getHeaderSearchOpts().ModuleCachePath, + getHeaderSearchOpts().ModuleCachePruneInterval, + getHeaderSearchOpts().ModuleCachePruneAfter); HeaderSearchOptions &HSOpts = getHeaderSearchOpts(); std::string Sysroot = HSOpts.Sysroot; diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp index 6cc3b65a16cb2..1b63c40a6efd7 100644 --- a/clang/lib/Frontend/FrontendAction.cpp +++ b/clang/lib/Frontend/FrontendAction.cpp @@ -879,7 +879,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, // file, otherwise the CompilerInstance will happily destroy them. CI.setVirtualFileSystem(AST->getFileManager().getVirtualFileSystemPtr()); CI.setFileManager(AST->getFileManagerPtr()); - CI.createSourceManager(CI.getFileManager()); + CI.createSourceManager(); CI.getSourceManager().initializeForReplay(AST->getSourceManager()); // Preload all the module files loaded transitively by the AST unit. Also @@ -971,13 +971,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, // Set up the file system, file and source managers, if needed. if (!CI.hasVirtualFileSystem()) CI.createVirtualFileSystem(); - if (!CI.hasFileManager()) { - if (!CI.createFileManager()) { - return false; - } - } + if (!CI.hasFileManager()) + CI.createFileManager(); if (!CI.hasSourceManager()) { - CI.createSourceManager(CI.getFileManager()); + CI.createSourceManager(); if (CI.getDiagnosticOpts().getFormat() == DiagnosticOptions::SARIF) { static_cast(&CI.getDiagnosticClient()) ->setSarifWriter( diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp index 7424958d46612..d7d56b8166350 100644 --- a/clang/lib/Frontend/FrontendActions.cpp +++ b/clang/lib/Frontend/FrontendActions.cpp @@ -971,14 +971,17 @@ void DumpModuleInfoAction::ExecuteAction() { // Emit the macro definitions in the module file so that we can know how // much definitions in the module file quickly. // TODO: Emit the macro definition bodies completely. - if (auto FilteredMacros = llvm::make_filter_range( - R->getPreprocessor().macros(), - [](const auto &Macro) { return Macro.first->isFromAST(); }); - !FilteredMacros.empty()) { - Out << " Macro Definitions:\n"; - for (/* pair*/ const auto &Macro : - FilteredMacros) - Out << " " << Macro.first->getName() << "\n"; + { + std::vector MacroNames; + for (const auto &M : R->getPreprocessor().macros()) { + if (M.first->isFromAST()) + MacroNames.push_back(M.first->getName()); + } + llvm::sort(MacroNames); + if (!MacroNames.empty()) + Out << " Macro Definitions:\n"; + for (StringRef Name : MacroNames) + Out << " " << Name << "\n"; } // Now let's print out any modules we did not see as part of the Primary. diff --git a/clang/lib/Frontend/HeaderIncludeGen.cpp b/clang/lib/Frontend/HeaderIncludeGen.cpp index 8ab335905f9f2..7cd9c8a3a5bd7 100644 --- a/clang/lib/Frontend/HeaderIncludeGen.cpp +++ b/clang/lib/Frontend/HeaderIncludeGen.cpp @@ -112,11 +112,22 @@ class HeaderIncludesJSONCallback : public PPCallbacks { /// an array of separate entries, one for each non-system source file used in /// the compilation showing only the direct includes and imports from that file. class HeaderIncludesDirectPerFileCallback : public PPCallbacks { + struct HeaderIncludeInfo { + SourceLocation Location; + FileEntryRef File; + const Module *ImportedModule; + + HeaderIncludeInfo(SourceLocation Location, FileEntryRef File, + const Module *ImportedModule) + : Location(Location), File(File), ImportedModule(ImportedModule) {} + }; + SourceManager &SM; HeaderSearch &HSI; raw_ostream *OutputFile; bool OwnsOutputFile; - using DependencyMap = llvm::DenseMap>; + using DependencyMap = + llvm::DenseMap>; DependencyMap Dependencies; public: @@ -295,8 +306,8 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc, } } -void HeaderIncludesCallback::FileSkipped(const FileEntryRef &SkippedFile, const - Token &FilenameTok, +void HeaderIncludesCallback::FileSkipped(const FileEntryRef &SkippedFile, + const Token &FilenameTok, SrcMgr::CharacteristicKind FileType) { if (!DepOpts.ShowSkippedHeaderIncludes) return; @@ -390,18 +401,41 @@ void HeaderIncludesDirectPerFileCallback::EndOfMainFile() { std::string Str; llvm::raw_string_ostream OS(Str); llvm::json::OStream JOS(OS); - JOS.array([&] { - for (auto S = SourceFiles.begin(), SE = SourceFiles.end(); S != SE; ++S) { - JOS.object([&] { - SmallVector &Deps = Dependencies[*S]; - JOS.attribute("source", S->getName().str()); - JOS.attributeArray("includes", [&] { - for (unsigned I = 0, N = Deps.size(); I != N; ++I) - JOS.value(Deps[I].getName().str()); + JOS.object([&] { + JOS.attribute("version", "2.0.0"); + JOS.attributeArray("dependencies", [&] { + for (const auto &S : SourceFiles) { + JOS.object([&] { + SmallVector &Deps = Dependencies[S]; + JOS.attribute("source", S.getName().str()); + JOS.attributeArray("includes", [&] { + for (unsigned I = 0, N = Deps.size(); I != N; ++I) { + if (!Deps[I].ImportedModule) { + JOS.object([&] { + JOS.attribute("location", Deps[I].Location.printToString(SM)); + JOS.attribute("file", Deps[I].File.getName()); + }); + } + } + }); + JOS.attributeArray("imports", [&] { + for (unsigned I = 0, N = Deps.size(); I != N; ++I) { + if (Deps[I].ImportedModule) { + JOS.object([&] { + JOS.attribute("location", Deps[I].Location.printToString(SM)); + JOS.attribute( + "module", + Deps[I].ImportedModule->getTopLevelModuleName()); + JOS.attribute("file", Deps[I].File.getName()); + }); + } + } + }); }); - }); - } + } + }); }); + OS << "\n"; if (OutputFile->get_kind() == raw_ostream::OStreamKind::OK_FDStream) { @@ -427,7 +461,18 @@ void HeaderIncludesDirectPerFileCallback::InclusionDirective( if (!FromFile) return; - Dependencies[*FromFile].push_back(*File); + FileEntryRef HeaderOrModuleMapFile = *File; + if (ModuleImported && SuggestedModule) { + OptionalFileEntryRef ModuleMapFile = + HSI.getModuleMap().getModuleMapFileForUniquing(SuggestedModule); + if (ModuleMapFile) { + HeaderOrModuleMapFile = *ModuleMapFile; + } + } + + HeaderIncludeInfo DependenciesEntry( + Loc, HeaderOrModuleMapFile, (ModuleImported ? SuggestedModule : nullptr)); + Dependencies[*FromFile].push_back(DependenciesEntry); } void HeaderIncludesDirectPerFileCallback::moduleImport(SourceLocation ImportLoc, @@ -448,5 +493,6 @@ void HeaderIncludesDirectPerFileCallback::moduleImport(SourceLocation ImportLoc, if (!ModuleMapFile) return; - Dependencies[*FromFile].push_back(*ModuleMapFile); + HeaderIncludeInfo DependenciesEntry(Loc, *ModuleMapFile, Imported); + Dependencies[*FromFile].push_back(DependenciesEntry); } diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp index edf0a091e087c..877ab02850667 100644 --- a/clang/lib/Frontend/InitPreprocessor.cpp +++ b/clang/lib/Frontend/InitPreprocessor.cpp @@ -742,7 +742,10 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts, Builder.defineMacro("__cpp_impl_coroutine", "201902L"); Builder.defineMacro("__cpp_designated_initializers", "201707L"); Builder.defineMacro("__cpp_impl_three_way_comparison", "201907L"); - //Builder.defineMacro("__cpp_modules", "201907L"); + // Intentionally to set __cpp_modules to 1. + // See https://github.com/llvm/llvm-project/issues/71364 for details. + // Builder.defineMacro("__cpp_modules", "201907L"); + Builder.defineMacro("__cpp_modules", "1"); Builder.defineMacro("__cpp_using_enum", "201907L"); } // C++23 features. diff --git a/clang/lib/Frontend/ModuleDependencyCollector.cpp b/clang/lib/Frontend/ModuleDependencyCollector.cpp index 3b363f948a3a8..ff37065885289 100644 --- a/clang/lib/Frontend/ModuleDependencyCollector.cpp +++ b/clang/lib/Frontend/ModuleDependencyCollector.cpp @@ -91,10 +91,10 @@ void ModuleDependencyCollector::attachToPreprocessor(Preprocessor &PP) { std::make_unique(*this)); } -static bool isCaseSensitivePath(StringRef Path) { +static bool isCaseSensitivePath(llvm::vfs::FileSystem &VFS, StringRef Path) { SmallString<256> TmpDest = Path, UpperDest, RealDest; // Remove component traversals, links, etc. - if (llvm::sys::fs::real_path(Path, TmpDest)) + if (VFS.getRealPath(Path, TmpDest)) return true; // Current default value in vfs.yaml Path = TmpDest; @@ -104,7 +104,7 @@ static bool isCaseSensitivePath(StringRef Path) { // already expects when sensitivity isn't setup. for (auto &C : Path) UpperDest.push_back(toUppercase(C)); - if (!llvm::sys::fs::real_path(UpperDest, RealDest) && Path == RealDest) + if (!VFS.getRealPath(UpperDest, RealDest) && Path == RealDest) return false; return true; } @@ -121,7 +121,8 @@ void ModuleDependencyCollector::writeFileMap() { // Explicitly set case sensitivity for the YAML writer. For that, find out // the sensitivity at the path where the headers all collected to. - VFSWriter.setCaseSensitivity(isCaseSensitivePath(VFSDir)); + VFSWriter.setCaseSensitivity( + isCaseSensitivePath(Canonicalizer.getFileSystem(), VFSDir)); // Do not rely on real path names when executing the crash reproducer scripts // since we only want to actually use the files we have on the VFS cache. @@ -153,7 +154,7 @@ std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src, } else { // When collecting entries from input vfsoverlays, copy the external // contents into the cache but still map from the source. - if (!fs::exists(Dst)) + if (!Canonicalizer.getFileSystem().exists(Dst)) return std::error_code(); path::append(CacheDst, Dst); Paths.CopyFrom = Dst; diff --git a/clang/lib/Headers/avx10_2_512niintrin.h b/clang/lib/Headers/avx10_2_512niintrin.h index 67679fce82296..fdb57c7c9e27b 100644 --- a/clang/lib/Headers/avx10_2_512niintrin.h +++ b/clang/lib/Headers/avx10_2_512niintrin.h @@ -64,8 +64,8 @@ static __inline__ __m512 __DEFAULT_FN_ATTRS _mm512_maskz_dpph_ps(__mmask16 __U, static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_dpbssd_epi32(__m512i __W, __m512i __A, __m512i __B) { - return (__m512i)__builtin_ia32_vpdpbssd512((__v16si)__W, (__v16si)__A, - (__v16si)__B); + return (__m512i)__builtin_ia32_vpdpbssd512((__v16si)__W, (__v64qi)__A, + (__v64qi)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS @@ -84,8 +84,8 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpbssd_epi32( static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_dpbssds_epi32(__m512i __W, __m512i __A, __m512i __B) { - return (__m512i)__builtin_ia32_vpdpbssds512((__v16si)__W, (__v16si)__A, - (__v16si)__B); + return (__m512i)__builtin_ia32_vpdpbssds512((__v16si)__W, (__v64qi)__A, + (__v64qi)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_dpbssds_epi32( @@ -104,8 +104,8 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpbssds_epi32( static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_dpbsud_epi32(__m512i __W, __m512i __A, __m512i __B) { - return (__m512i)__builtin_ia32_vpdpbsud512((__v16si)__W, (__v16si)__A, - (__v16si)__B); + return (__m512i)__builtin_ia32_vpdpbsud512((__v16si)__W, (__v64qi)__A, + (__v64qu)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS @@ -124,8 +124,8 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpbsud_epi32( static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_dpbsuds_epi32(__m512i __W, __m512i __A, __m512i __B) { - return (__m512i)__builtin_ia32_vpdpbsuds512((__v16si)__W, (__v16si)__A, - (__v16si)__B); + return (__m512i)__builtin_ia32_vpdpbsuds512((__v16si)__W, (__v64qi)__A, + (__v64qu)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_dpbsuds_epi32( @@ -144,8 +144,8 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpbsuds_epi32( static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_dpbuud_epi32(__m512i __W, __m512i __A, __m512i __B) { - return (__m512i)__builtin_ia32_vpdpbuud512((__v16si)__W, (__v16si)__A, - (__v16si)__B); + return (__m512i)__builtin_ia32_vpdpbuud512((__v16si)__W, (__v64qu)__A, + (__v64qu)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS @@ -164,8 +164,8 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpbuud_epi32( static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_dpbuuds_epi32(__m512i __W, __m512i __A, __m512i __B) { - return (__m512i)__builtin_ia32_vpdpbuuds512((__v16si)__W, (__v16si)__A, - (__v16si)__B); + return (__m512i)__builtin_ia32_vpdpbuuds512((__v16si)__W, (__v64qu)__A, + (__v64qu)__B); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_dpbuuds_epi32( diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h index a3a5b02579081..31759c5386d9f 100644 --- a/clang/lib/Headers/avx2intrin.h +++ b/clang/lib/Headers/avx2intrin.h @@ -165,9 +165,8 @@ _mm256_abs_epi32(__m256i __a) { /// A 256-bit vector of [16 x i16] used to generate result[127:64] and /// result[255:192]. /// \returns A 256-bit integer vector containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packs_epi16(__m256i __a, __m256i __b) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_packs_epi16(__m256i __a, __m256i __b) { return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); } @@ -197,9 +196,8 @@ _mm256_packs_epi16(__m256i __a, __m256i __b) /// A 256-bit vector of [8 x i32] used to generate result[127:64] and /// result[255:192]. /// \returns A 256-bit vector of [16 x i16] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packs_epi32(__m256i __a, __m256i __b) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_packs_epi32(__m256i __a, __m256i __b) { return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); } @@ -228,9 +226,8 @@ _mm256_packs_epi32(__m256i __a, __m256i __b) /// A 256-bit vector of [16 x i16] used to generate result[127:64] and /// result[255:192]. /// \returns A 256-bit integer vector containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packus_epi16(__m256i __a, __m256i __b) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_packus_epi16(__m256i __a, __m256i __b) { return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b); } @@ -260,9 +257,8 @@ _mm256_packus_epi16(__m256i __a, __m256i __b) /// A 256-bit vector of [8 x i32] used to generate result[127:64] and /// result[255:192]. /// \returns A 256-bit vector of [16 x i16] containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packus_epi32(__m256i __V1, __m256i __V2) -{ +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR +_mm256_packus_epi32(__m256i __V1, __m256i __V2) { return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2); } diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h index 8d80e3ec2911a..c36bd814725fa 100644 --- a/clang/lib/Headers/avx512bwintrin.h +++ b/clang/lib/Headers/avx512bwintrin.h @@ -510,9 +510,8 @@ _mm512_maskz_abs_epi16(__mmask32 __U, __m512i __A) { (__v32hi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_packs_epi32(__m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_packs_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B); } @@ -532,9 +531,8 @@ _mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_packs_epi16(__m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_packs_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B); } @@ -554,9 +552,8 @@ _mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) (__v64qi)_mm512_setzero_si512()); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_packus_epi32(__m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_packus_epi32(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B); } @@ -576,9 +573,8 @@ _mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) (__v32hi)__W); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_packus_epi16(__m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_packus_epi16(__m512i __A, __m512i __B) { return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B); } diff --git a/clang/lib/Headers/avx512vlfp16intrin.h b/clang/lib/Headers/avx512vlfp16intrin.h index 16a4ff3034244..c0bcc080dbe93 100644 --- a/clang/lib/Headers/avx512vlfp16intrin.h +++ b/clang/lib/Headers/avx512vlfp16intrin.h @@ -42,7 +42,8 @@ static __inline__ _Float16 __DEFAULT_FN_ATTRS256 _mm256_cvtsh_h(__m256h __a) { return __a[0]; } -static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_set_sh(_Float16 __h) { +static __inline__ __m128h __DEFAULT_FN_ATTRS128_CONSTEXPR +_mm_set_sh(_Float16 __h) { return __extension__(__m128h){__h, 0, 0, 0, 0, 0, 0, 0}; } @@ -57,23 +58,23 @@ _mm256_set1_ph(_Float16 __h) { __h, __h, __h, __h, __h, __h, __h, __h}; } -static __inline __m128h __DEFAULT_FN_ATTRS128 +static __inline __m128h __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) { return (__m128h)(__v8hf){__h8, __h7, __h6, __h5, __h4, __h3, __h2, __h1}; } -static __inline __m256h __DEFAULT_FN_ATTRS256 +static __inline __m256h __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_set1_pch(_Float16 _Complex h) { return (__m256h)_mm256_set1_ps(__builtin_bit_cast(float, h)); } -static __inline __m128h __DEFAULT_FN_ATTRS128 +static __inline __m128h __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_set1_pch(_Float16 _Complex h) { return (__m128h)_mm_set1_ps(__builtin_bit_cast(float, h)); } -static __inline __m256h __DEFAULT_FN_ATTRS256 +static __inline __m256h __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8, _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12, @@ -83,13 +84,13 @@ _mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, __h4, __h3, __h2, __h1}; } -static __inline__ __m128h __DEFAULT_FN_ATTRS128 +static __inline__ __m128h __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_setr_ph(_Float16 e0, _Float16 e1, _Float16 e2, _Float16 e3, _Float16 e4, _Float16 e5, _Float16 e6, _Float16 e7) { return _mm_set_ph(e7, e6, e5, e4, e3, e2, e1, e0); } -static __inline__ __m256h __DEFAULT_FN_ATTRS256 +static __inline__ __m256h __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_setr_ph(_Float16 e0, _Float16 e1, _Float16 e2, _Float16 e3, _Float16 e4, _Float16 e5, _Float16 e6, _Float16 e7, _Float16 e8, _Float16 e9, _Float16 e10, _Float16 e11, _Float16 e12, _Float16 e13, diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h index a7f70994be9db..d6ba19a6c78af 100644 --- a/clang/lib/Headers/avxintrin.h +++ b/clang/lib/Headers/avxintrin.h @@ -2311,10 +2311,9 @@ _mm256_cvttps_epi32(__m256 __a) /// \param __a /// A 256-bit vector of [4 x double]. /// \returns A 64 bit double containing the first element of the input vector. -static __inline double __DEFAULT_FN_ATTRS -_mm256_cvtsd_f64(__m256d __a) -{ - return __a[0]; +static __inline double __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_cvtsd_f64(__m256d __a) { + return __a[0]; } /// Returns the first element of the input vector of [8 x i32]. @@ -2327,11 +2326,10 @@ _mm256_cvtsd_f64(__m256d __a) /// \param __a /// A 256-bit vector of [8 x i32]. /// \returns A 32 bit integer containing the first element of the input vector. -static __inline int __DEFAULT_FN_ATTRS -_mm256_cvtsi256_si32(__m256i __a) -{ - __v8si __b = (__v8si)__a; - return __b[0]; +static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_cvtsi256_si32(__m256i __a) { + __v8si __b = (__v8si)__a; + return __b[0]; } /// Returns the first element of the input vector of [8 x float]. @@ -2344,10 +2342,9 @@ _mm256_cvtsi256_si32(__m256i __a) /// \param __a /// A 256-bit vector of [8 x float]. /// \returns A 32 bit float containing the first element of the input vector. -static __inline float __DEFAULT_FN_ATTRS -_mm256_cvtss_f32(__m256 __a) -{ - return __a[0]; +static __inline float __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_cvtss_f32(__m256 __a) { + return __a[0]; } /* Vector replicate */ diff --git a/clang/lib/Headers/avxvnniint8intrin.h b/clang/lib/Headers/avxvnniint8intrin.h index c211620c68f07..858b66b138f31 100644 --- a/clang/lib/Headers/avxvnniint8intrin.h +++ b/clang/lib/Headers/avxvnniint8intrin.h @@ -14,6 +14,7 @@ #ifndef __AVXVNNIINT8INTRIN_H #define __AVXVNNIINT8INTRIN_H +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding signed 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -44,10 +45,12 @@ /// ENDFOR /// dst[MAX:128] := 0 /// \endcode +// clang-format on #define _mm_dpbssd_epi32(__W, __A, __B) \ - ((__m128i)__builtin_ia32_vpdpbssd128((__v4si)(__W), (__v4si)(__A), \ - (__v4si)(__B))) + ((__m128i)__builtin_ia32_vpdpbssd128((__v4si)(__W), (__v16qi)(__A), \ + (__v16qi)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding signed 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -78,10 +81,12 @@ /// ENDFOR /// dst[MAX:256] := 0 /// \endcode +// clang-format on #define _mm256_dpbssd_epi32(__W, __A, __B) \ - ((__m256i)__builtin_ia32_vpdpbssd256((__v8si)(__W), (__v8si)(__A), \ - (__v8si)(__B))) + ((__m256i)__builtin_ia32_vpdpbssd256((__v8si)(__W), (__v32qi)(__A), \ + (__v32qi)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding signed 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -94,7 +99,7 @@ /// _mm_dpbssds_epi32( __m128i __W, __m128i __A, __m128i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBSSDS instruction. /// /// \param __A /// A 128-bit vector of [16 x char]. @@ -113,10 +118,12 @@ /// ENDFOR /// dst[MAX:128] := 0 /// \endcode +// clang-format on #define _mm_dpbssds_epi32(__W, __A, __B) \ - ((__m128i)__builtin_ia32_vpdpbssds128((__v4si)(__W), (__v4si)(__A), \ - (__v4si)(__B))) + ((__m128i)__builtin_ia32_vpdpbssds128((__v4si)(__W), (__v16qi)(__A), \ + (__v16qi)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding signed 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -129,7 +136,7 @@ /// _mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBSSDS instruction. /// /// \param __A /// A 256-bit vector of [32 x char]. @@ -148,10 +155,12 @@ /// ENDFOR /// dst[MAX:256] := 0 /// \endcode +// clang-format on #define _mm256_dpbssds_epi32(__W, __A, __B) \ - ((__m256i)__builtin_ia32_vpdpbssds256((__v8si)(__W), (__v8si)(__A), \ - (__v8si)(__B))) + ((__m256i)__builtin_ia32_vpdpbssds256((__v8si)(__W), (__v32qi)(__A), \ + (__v32qi)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -163,7 +172,7 @@ /// _mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBSUD instruction. /// /// \param __A /// A 128-bit vector of [16 x char]. @@ -182,10 +191,12 @@ /// ENDFOR /// dst[MAX:128] := 0 /// \endcode +// clang-format on #define _mm_dpbsud_epi32(__W, __A, __B) \ - ((__m128i)__builtin_ia32_vpdpbsud128((__v4si)(__W), (__v4si)(__A), \ - (__v4si)(__B))) + ((__m128i)__builtin_ia32_vpdpbsud128((__v4si)(__W), (__v16qi)(__A), \ + (__v16qu)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -197,7 +208,7 @@ /// _mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBSUD instruction. /// /// \param __A /// A 256-bit vector of [32 x char]. @@ -216,10 +227,12 @@ /// ENDFOR /// dst[MAX:256] := 0 /// \endcode +// clang-format on #define _mm256_dpbsud_epi32(__W, __A, __B) \ - ((__m256i)__builtin_ia32_vpdpbsud256((__v8si)(__W), (__v8si)(__A), \ - (__v8si)(__B))) + ((__m256i)__builtin_ia32_vpdpbsud256((__v8si)(__W), (__v32qi)(__A), \ + (__v32qu)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -232,7 +245,7 @@ /// _mm_dpbsuds_epi32( __m128i __W, __m128i __A, __m128i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBSUDS instruction. /// /// \param __A /// A 128-bit vector of [16 x char]. @@ -251,10 +264,12 @@ /// ENDFOR /// dst[MAX:128] := 0 /// \endcode +// clang-format on #define _mm_dpbsuds_epi32(__W, __A, __B) \ - ((__m128i)__builtin_ia32_vpdpbsuds128((__v4si)(__W), (__v4si)(__A), \ - (__v4si)(__B))) + ((__m128i)__builtin_ia32_vpdpbsuds128((__v4si)(__W), (__v16qi)(__A), \ + (__v16qu)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -267,7 +282,7 @@ /// _mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBSUDS instruction. /// /// \param __A /// A 256-bit vector of [32 x char]. @@ -286,10 +301,12 @@ /// ENDFOR /// dst[MAX:256] := 0 /// \endcode +// clang-format on #define _mm256_dpbsuds_epi32(__W, __A, __B) \ - ((__m256i)__builtin_ia32_vpdpbsuds256((__v8si)(__W), (__v8si)(__A), \ - (__v8si)(__B))) + ((__m256i)__builtin_ia32_vpdpbsuds256((__v8si)(__W), (__v32qi)(__A), \ + (__v32qu)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -301,7 +318,7 @@ /// _mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBUUD instruction. /// /// \param __A /// A 128-bit vector of [16 x unsigned char]. @@ -320,10 +337,12 @@ /// ENDFOR /// dst[MAX:128] := 0 /// \endcode +// clang-format on #define _mm_dpbuud_epi32(__W, __A, __B) \ - ((__m128i)__builtin_ia32_vpdpbuud128((__v4si)(__W), (__v4si)(__A), \ - (__v4si)(__B))) + ((__m128i)__builtin_ia32_vpdpbuud128((__v4si)(__W), (__v16qu)(__A), \ + (__v16qu)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -335,7 +354,7 @@ /// _mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B); /// \endcode /// -/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// This intrinsic corresponds to the \c VPDPBUUD instruction. /// /// \param __A /// A 256-bit vector of [32 x unsigned char]. @@ -354,10 +373,12 @@ /// ENDFOR /// dst[MAX:256] := 0 /// \endcode +// clang-format on #define _mm256_dpbuud_epi32(__W, __A, __B) \ - ((__m256i)__builtin_ia32_vpdpbuud256((__v8si)(__W), (__v8si)(__A), \ - (__v8si)(__B))) + ((__m256i)__builtin_ia32_vpdpbuud256((__v8si)(__W), (__v32qu)(__A), \ + (__v32qu)(__B))) +// clang-format off /// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding @@ -389,10 +410,12 @@ /// ENDFOR /// dst[MAX:128] := 0 /// \endcode +// clang-format on #define _mm_dpbuuds_epi32(__W, __A, __B) \ - ((__m128i)__builtin_ia32_vpdpbuuds128((__v4si)(__W), (__v4si)(__A), \ - (__v4si)(__B))) + ((__m128i)__builtin_ia32_vpdpbuuds128((__v4si)(__W), (__v16qu)(__A), \ + (__v16qu)(__B))) +// clang-format off /// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate /// signed 16-bit results. Sum these 4 results with the corresponding /// 32-bit integer in \a __W with signed saturation, and store the packed @@ -423,8 +446,9 @@ /// ENDFOR /// dst[MAX:256] := 0 /// \endcode +// clang-format on #define _mm256_dpbuuds_epi32(__W, __A, __B) \ - ((__m256i)__builtin_ia32_vpdpbuuds256((__v8si)(__W), (__v8si)(__A), \ - (__v8si)(__B))) + ((__m256i)__builtin_ia32_vpdpbuuds256((__v8si)(__W), (__v32qu)(__A), \ + (__v32qu)(__B))) #endif // __AVXVNNIINT8INTRIN_H diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h index fca6229a065be..6597e7e7d4030 100644 --- a/clang/lib/Headers/emmintrin.h +++ b/clang/lib/Headers/emmintrin.h @@ -4159,8 +4159,8 @@ void _mm_mfence(void); /// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [16 x i8] containing the converted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_packs_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b); } @@ -4182,8 +4182,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, /// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values /// are written to the higher 64 bits of the result. /// \returns A 128-bit vector of [8 x i16] containing the converted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_packs_epi32(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b); } @@ -4205,8 +4205,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, /// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [16 x i8] containing the converted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_packus_epi16(__m128i __a, __m128i __b) { return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b); } diff --git a/clang/lib/Headers/f16cintrin.h b/clang/lib/Headers/f16cintrin.h index 83965334e2c9b..b6ca7088d3864 100644 --- a/clang/lib/Headers/f16cintrin.h +++ b/clang/lib/Headers/f16cintrin.h @@ -15,17 +15,20 @@ #define __F16CINTRIN_H /* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS128 \ - __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(128))) -#define __DEFAULT_FN_ATTRS256 \ - __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(256))) - #if defined(__cplusplus) && (__cplusplus >= 201103L) -#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr -#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), \ + __min_vector_width__(128))) constexpr +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), \ + __min_vector_width__(256))) constexpr #else -#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 -#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), \ + __min_vector_width__(256))) #endif /* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h, @@ -43,7 +46,7 @@ /// \param __a /// A 16-bit half-precision float value. /// \returns The converted 32-bit float value. -static __inline float __DEFAULT_FN_ATTRS128_CONSTEXPR +static __inline float __DEFAULT_FN_ATTRS128 _cvtsh_ss(unsigned short __a) { return (float)__builtin_bit_cast(__fp16, __a); @@ -112,7 +115,7 @@ _cvtsh_ss(unsigned short __a) /// A 128-bit vector containing 16-bit half-precision float values. The lower /// 64 bits are used in the conversion. /// \returns A 128-bit vector of [4 x float] containing converted float values. -static __inline __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR +static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_cvtph_ps(__m128i __a) { typedef __fp16 __v4fp16 __attribute__((__vector_size__(8))); @@ -159,7 +162,7 @@ _mm_cvtph_ps(__m128i __a) /// converted to 32-bit single-precision float values. /// \returns A vector of [8 x float] containing the converted 32-bit /// single-precision float values. -static __inline __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR +static __inline __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtph_ps(__m128i __a) { typedef __fp16 __v8fp16 __attribute__((__vector_size__(16), __aligned__(16))); @@ -169,7 +172,5 @@ _mm256_cvtph_ps(__m128i __a) #undef __DEFAULT_FN_ATTRS128 #undef __DEFAULT_FN_ATTRS256 -#undef __DEFAULT_FN_ATTRS128_CONSTEXPR -#undef __DEFAULT_FN_ATTRS256_CONSTEXPR #endif /* __F16CINTRIN_H */ diff --git a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h index cd1ffc8c23298..d973371312701 100644 --- a/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h +++ b/clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h @@ -1292,6 +1292,39 @@ bool3 isinf(float3); _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isinf) bool4 isinf(float4); +//===----------------------------------------------------------------------===// +// isnan builtins +//===----------------------------------------------------------------------===// + +/// \fn T isnan(T x) +/// \brief Determines if the specified value \a x is Not a Number. +/// \param x The specified input value. +/// +/// Returns a value of the same size as the input, with a value set +/// to True if the x parameter is NaN or QNaN. Otherwise, False. + +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool isnan(half); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool2 isnan(half2); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool3 isnan(half3); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool4 isnan(half4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool isnan(float); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool2 isnan(float2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool3 isnan(float3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_isnan) +bool4 isnan(float4); + //===----------------------------------------------------------------------===// // lerp builtins //===----------------------------------------------------------------------===// diff --git a/clang/lib/Headers/hlsl/hlsl_basic_types.h b/clang/lib/Headers/hlsl/hlsl_basic_types.h index eff94e0d7f950..fc1e265067714 100644 --- a/clang/lib/Headers/hlsl/hlsl_basic_types.h +++ b/clang/lib/Headers/hlsl/hlsl_basic_types.h @@ -115,6 +115,239 @@ typedef vector float64_t2; typedef vector float64_t3; typedef vector float64_t4; +#ifdef __HLSL_ENABLE_16_BIT +typedef matrix int16_t1x1; +typedef matrix int16_t1x2; +typedef matrix int16_t1x3; +typedef matrix int16_t1x4; +typedef matrix int16_t2x1; +typedef matrix int16_t2x2; +typedef matrix int16_t2x3; +typedef matrix int16_t2x4; +typedef matrix int16_t3x1; +typedef matrix int16_t3x2; +typedef matrix int16_t3x3; +typedef matrix int16_t3x4; +typedef matrix int16_t4x1; +typedef matrix int16_t4x2; +typedef matrix int16_t4x3; +typedef matrix int16_t4x4; +typedef matrix uint16_t1x1; +typedef matrix uint16_t1x2; +typedef matrix uint16_t1x3; +typedef matrix uint16_t1x4; +typedef matrix uint16_t2x1; +typedef matrix uint16_t2x2; +typedef matrix uint16_t2x3; +typedef matrix uint16_t2x4; +typedef matrix uint16_t3x1; +typedef matrix uint16_t3x2; +typedef matrix uint16_t3x3; +typedef matrix uint16_t3x4; +typedef matrix uint16_t4x1; +typedef matrix uint16_t4x2; +typedef matrix uint16_t4x3; +typedef matrix uint16_t4x4; +#endif + +typedef matrix int1x1; +typedef matrix int1x2; +typedef matrix int1x3; +typedef matrix int1x4; +typedef matrix int2x1; +typedef matrix int2x2; +typedef matrix int2x3; +typedef matrix int2x4; +typedef matrix int3x1; +typedef matrix int3x2; +typedef matrix int3x3; +typedef matrix int3x4; +typedef matrix int4x1; +typedef matrix int4x2; +typedef matrix int4x3; +typedef matrix int4x4; +typedef matrix uint1x1; +typedef matrix uint1x2; +typedef matrix uint1x3; +typedef matrix uint1x4; +typedef matrix uint2x1; +typedef matrix uint2x2; +typedef matrix uint2x3; +typedef matrix uint2x4; +typedef matrix uint3x1; +typedef matrix uint3x2; +typedef matrix uint3x3; +typedef matrix uint3x4; +typedef matrix uint4x1; +typedef matrix uint4x2; +typedef matrix uint4x3; +typedef matrix uint4x4; +typedef matrix int32_t1x1; +typedef matrix int32_t1x2; +typedef matrix int32_t1x3; +typedef matrix int32_t1x4; +typedef matrix int32_t2x1; +typedef matrix int32_t2x2; +typedef matrix int32_t2x3; +typedef matrix int32_t2x4; +typedef matrix int32_t3x1; +typedef matrix int32_t3x2; +typedef matrix int32_t3x3; +typedef matrix int32_t3x4; +typedef matrix int32_t4x1; +typedef matrix int32_t4x2; +typedef matrix int32_t4x3; +typedef matrix int32_t4x4; +typedef matrix uint32_t1x1; +typedef matrix uint32_t1x2; +typedef matrix uint32_t1x3; +typedef matrix uint32_t1x4; +typedef matrix uint32_t2x1; +typedef matrix uint32_t2x2; +typedef matrix uint32_t2x3; +typedef matrix uint32_t2x4; +typedef matrix uint32_t3x1; +typedef matrix uint32_t3x2; +typedef matrix uint32_t3x3; +typedef matrix uint32_t3x4; +typedef matrix uint32_t4x1; +typedef matrix uint32_t4x2; +typedef matrix uint32_t4x3; +typedef matrix uint32_t4x4; +typedef matrix int64_t1x1; +typedef matrix int64_t1x2; +typedef matrix int64_t1x3; +typedef matrix int64_t1x4; +typedef matrix int64_t2x1; +typedef matrix int64_t2x2; +typedef matrix int64_t2x3; +typedef matrix int64_t2x4; +typedef matrix int64_t3x1; +typedef matrix int64_t3x2; +typedef matrix int64_t3x3; +typedef matrix int64_t3x4; +typedef matrix int64_t4x1; +typedef matrix int64_t4x2; +typedef matrix int64_t4x3; +typedef matrix int64_t4x4; +typedef matrix uint64_t1x1; +typedef matrix uint64_t1x2; +typedef matrix uint64_t1x3; +typedef matrix uint64_t1x4; +typedef matrix uint64_t2x1; +typedef matrix uint64_t2x2; +typedef matrix uint64_t2x3; +typedef matrix uint64_t2x4; +typedef matrix uint64_t3x1; +typedef matrix uint64_t3x2; +typedef matrix uint64_t3x3; +typedef matrix uint64_t3x4; +typedef matrix uint64_t4x1; +typedef matrix uint64_t4x2; +typedef matrix uint64_t4x3; +typedef matrix uint64_t4x4; + +typedef matrix half1x1; +typedef matrix half1x2; +typedef matrix half1x3; +typedef matrix half1x4; +typedef matrix half2x1; +typedef matrix half2x2; +typedef matrix half2x3; +typedef matrix half2x4; +typedef matrix half3x1; +typedef matrix half3x2; +typedef matrix half3x3; +typedef matrix half3x4; +typedef matrix half4x1; +typedef matrix half4x2; +typedef matrix half4x3; +typedef matrix half4x4; +typedef matrix float1x1; +typedef matrix float1x2; +typedef matrix float1x3; +typedef matrix float1x4; +typedef matrix float2x1; +typedef matrix float2x2; +typedef matrix float2x3; +typedef matrix float2x4; +typedef matrix float3x1; +typedef matrix float3x2; +typedef matrix float3x3; +typedef matrix float3x4; +typedef matrix float4x1; +typedef matrix float4x2; +typedef matrix float4x3; +typedef matrix float4x4; +typedef matrix double1x1; +typedef matrix double1x2; +typedef matrix double1x3; +typedef matrix double1x4; +typedef matrix double2x1; +typedef matrix double2x2; +typedef matrix double2x3; +typedef matrix double2x4; +typedef matrix double3x1; +typedef matrix double3x2; +typedef matrix double3x3; +typedef matrix double3x4; +typedef matrix double4x1; +typedef matrix double4x2; +typedef matrix double4x3; +typedef matrix double4x4; + +#ifdef __HLSL_ENABLE_16_BIT +typedef matrix float16_t1x1; +typedef matrix float16_t1x2; +typedef matrix float16_t1x3; +typedef matrix float16_t1x4; +typedef matrix float16_t2x1; +typedef matrix float16_t2x2; +typedef matrix float16_t2x3; +typedef matrix float16_t2x4; +typedef matrix float16_t3x1; +typedef matrix float16_t3x2; +typedef matrix float16_t3x3; +typedef matrix float16_t3x4; +typedef matrix float16_t4x1; +typedef matrix float16_t4x2; +typedef matrix float16_t4x3; +typedef matrix float16_t4x4; +#endif + +typedef matrix float32_t1x1; +typedef matrix float32_t1x2; +typedef matrix float32_t1x3; +typedef matrix float32_t1x4; +typedef matrix float32_t2x1; +typedef matrix float32_t2x2; +typedef matrix float32_t2x3; +typedef matrix float32_t2x4; +typedef matrix float32_t3x1; +typedef matrix float32_t3x2; +typedef matrix float32_t3x3; +typedef matrix float32_t3x4; +typedef matrix float32_t4x1; +typedef matrix float32_t4x2; +typedef matrix float32_t4x3; +typedef matrix float32_t4x4; +typedef matrix float64_t1x1; +typedef matrix float64_t1x2; +typedef matrix float64_t1x3; +typedef matrix float64_t1x4; +typedef matrix float64_t2x1; +typedef matrix float64_t2x2; +typedef matrix float64_t2x3; +typedef matrix float64_t2x4; +typedef matrix float64_t3x1; +typedef matrix float64_t3x2; +typedef matrix float64_t3x3; +typedef matrix float64_t3x4; +typedef matrix float64_t4x1; +typedef matrix float64_t4x2; +typedef matrix float64_t4x3; +typedef matrix float64_t4x4; + } // namespace hlsl #endif //_HLSL_HLSL_BASIC_TYPES_H_ diff --git a/clang/lib/Headers/hlsl/hlsl_compat_overloads.h b/clang/lib/Headers/hlsl/hlsl_compat_overloads.h index 72a7bed21f3c9..fe4277ed4a7d2 100644 --- a/clang/lib/Headers/hlsl/hlsl_compat_overloads.h +++ b/clang/lib/Headers/hlsl/hlsl_compat_overloads.h @@ -352,6 +352,15 @@ constexpr bool3 isinf(double3 V) { return isinf((float3)V); } _DXC_DEPRECATED_64BIT_FN(fn) constexpr bool4 isinf(double4 V) { return isinf((float4)V); } +//===----------------------------------------------------------------------===// +// isnan builtins overloads +//===----------------------------------------------------------------------===// + +constexpr bool isnan(double V) { return isnan((float)V); } +constexpr bool2 isnan(double2 V) { return isnan((float2)V); } +constexpr bool3 isnan(double3 V) { return isnan((float3)V); } +constexpr bool4 isnan(double4 V) { return isnan((float4)V); } + //===----------------------------------------------------------------------===// // lerp builtins overloads //===----------------------------------------------------------------------===// diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h index d9d87c827e6a4..5ba5bfb9abde0 100644 --- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h +++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h @@ -422,6 +422,30 @@ constexpr int4 D3DCOLORtoUBYTE4(float4 V) { return __detail::d3d_color_to_ubyte4_impl(V); } +//===----------------------------------------------------------------------===// +// NonUniformResourceIndex builtin +//===----------------------------------------------------------------------===// + +/// \fn uint NonUniformResourceIndex(uint I) +/// \brief A compiler hint to indicate that a resource index varies across +/// threads within a wave (i.e., it is non-uniform). +/// \param I [in] Resource array index +/// +/// The return value is the \Index parameter. +/// +/// When indexing into an array of shader resources (e.g., textures, buffers), +/// some GPU hardware and drivers require the compiler to know whether the index +/// is uniform (same for all threads) or non-uniform (varies per thread). +/// +/// Using NonUniformResourceIndex explicitly marks an index as non-uniform, +/// disabling certain assumptions or optimizations that could lead to incorrect +/// behavior when dynamically accessing resource arrays with non-uniform +/// indices. + +constexpr uint32_t NonUniformResourceIndex(uint32_t Index) { + return __builtin_hlsl_resource_nonuniformindex(Index); +} + //===----------------------------------------------------------------------===// // reflect builtin //===----------------------------------------------------------------------===// diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h index 18e2c2154362a..5f617530b6f78 100644 --- a/clang/lib/Headers/mmintrin.h +++ b/clang/lib/Headers/mmintrin.h @@ -156,11 +156,10 @@ _mm_cvtm64_si64(__m64 __m) /// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 -_mm_packs_pi16(__m64 __m1, __m64 __m2) -{ - return __trunc64(__builtin_ia32_packsswb128( - (__v8hi)__builtin_shufflevector(__m1, __m2, 0, 1), (__v8hi){})); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +_mm_packs_pi16(__m64 __m1, __m64 __m2) { + return __trunc64(__builtin_ia32_packsswb128( + (__v8hi)__builtin_shufflevector(__m1, __m2, 0, 1), (__v8hi){})); } /// Converts, with saturation, 32-bit signed integers from both 64-bit integer @@ -182,11 +181,10 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2) /// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [4 x i16] containing the converted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 -_mm_packs_pi32(__m64 __m1, __m64 __m2) -{ - return __trunc64(__builtin_ia32_packssdw128( - (__v4si)__builtin_shufflevector(__m1, __m2, 0, 1), (__v4si){})); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +_mm_packs_pi32(__m64 __m1, __m64 __m2) { + return __trunc64(__builtin_ia32_packssdw128( + (__v4si)__builtin_shufflevector(__m1, __m2, 0, 1), (__v4si){})); } /// Converts, with saturation, 16-bit signed integers from both 64-bit integer @@ -208,11 +206,10 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2) /// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 -_mm_packs_pu16(__m64 __m1, __m64 __m2) -{ - return __trunc64(__builtin_ia32_packuswb128( - (__v8hi)__builtin_shufflevector(__m1, __m2, 0, 1), (__v8hi){})); +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +_mm_packs_pu16(__m64 __m1, __m64 __m2) { + return __trunc64(__builtin_ia32_packuswb128( + (__v8hi)__builtin_shufflevector(__m1, __m2, 0, 1), (__v8hi){})); } /// Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8] diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h index 6319fdbbeb8f0..5e63a1ae321bc 100644 --- a/clang/lib/Headers/smmintrin.h +++ b/clang/lib/Headers/smmintrin.h @@ -1466,8 +1466,8 @@ _mm_cvtepu32_epi64(__m128i __V) { /// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [8 x i16] containing the converted values. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, - __m128i __V2) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_packus_epi32(__m128i __V1, __m128i __V2) { return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2); } @@ -1534,9 +1534,16 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) { so we'll do the same. */ #undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_CONSTEXPR #define __DEFAULT_FN_ATTRS \ __attribute__((__always_inline__, __nodebug__, __target__("sse4.2"))) +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + /* These specify the type of data that we're comparing. */ #define _SIDD_UBYTE_OPS 0x00 #define _SIDD_UWORD_OPS 0x01 diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h index 4891e3ce077b5..d876b4735a7d2 100644 --- a/clang/lib/Headers/xmmintrin.h +++ b/clang/lib/Headers/xmmintrin.h @@ -2363,9 +2363,8 @@ _mm_max_pi16(__m64 __a, __m64 __b) { /// \param __b /// A 64-bit integer vector containing one of the source operands. /// \returns A 64-bit integer vector containing the comparison results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 -_mm_max_pu8(__m64 __a, __m64 __b) -{ +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +_mm_max_pu8(__m64 __a, __m64 __b) { return (__m64)__builtin_elementwise_max((__v8qu)__a, (__v8qu)__b); } @@ -2400,9 +2399,8 @@ _mm_min_pi16(__m64 __a, __m64 __b) { /// \param __b /// A 64-bit integer vector containing one of the source operands. /// \returns A 64-bit integer vector containing the comparison results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 -_mm_min_pu8(__m64 __a, __m64 __b) -{ +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR +_mm_min_pu8(__m64 __a, __m64 __b) { return (__m64)__builtin_elementwise_min((__v8qu)__a, (__v8qu)__b); } diff --git a/clang/lib/Interpreter/IncrementalAction.cpp b/clang/lib/Interpreter/IncrementalAction.cpp index 4d1bc4c59e851..3d489fce54bc6 100644 --- a/clang/lib/Interpreter/IncrementalAction.cpp +++ b/clang/lib/Interpreter/IncrementalAction.cpp @@ -106,7 +106,8 @@ std::unique_ptr IncrementalAction::GenModule() { // around we created an empty module to make CodeGen happy. We should make // sure it always stays empty. assert(((!CachedInCodeGenModule || - !CI.getPreprocessorOpts().Includes.empty()) || + !CI.getPreprocessorOpts().Includes.empty() || + !CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) || (CachedInCodeGenModule->empty() && CachedInCodeGenModule->global_empty() && CachedInCodeGenModule->alias_empty() && diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp index 32d1663fbe1a9..bf08911e23533 100644 --- a/clang/lib/Interpreter/IncrementalParser.cpp +++ b/clang/lib/Interpreter/IncrementalParser.cpp @@ -37,6 +37,10 @@ IncrementalParser::IncrementalParser(CompilerInstance &Instance, llvm::ErrorAsOutParameter EAO(&Err); Consumer = &S.getASTConsumer(); P.reset(new Parser(S.getPreprocessor(), S, /*SkipBodies=*/false)); + + if (ExternalASTSource *External = S.getASTContext().getExternalSource()) + External->StartTranslationUnit(Consumer); + P->Initialize(); } diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp index 9cc1c450b7650..b05cb5a0f1dbe 100644 --- a/clang/lib/Interpreter/Interpreter.cpp +++ b/clang/lib/Interpreter/Interpreter.cpp @@ -278,9 +278,10 @@ Interpreter::Interpreter(std::unique_ptr Instance, if (Act->getCodeGen()) { Act->CacheCodeGenModule(); - // The initial PTU is filled by `-include` or by CUDA includes - // automatically. - if (!CI->getPreprocessorOpts().Includes.empty()) { + // The initial PTU is filled by `-include`/`-include-pch` or by CUDA + // includes automatically. + if (!CI->getPreprocessorOpts().Includes.empty() || + !CI->getPreprocessorOpts().ImplicitPCHInclude.empty()) { // We can't really directly pass the CachedInCodeGenModule to the Jit // because it will steal it, causing dangling references as explained in // Interpreter::Execute diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp index f28a74f5d0ae5..238c5e2f2d9a5 100644 --- a/clang/lib/Lex/HeaderSearch.cpp +++ b/clang/lib/Lex/HeaderSearch.cpp @@ -672,9 +672,8 @@ OptionalFileEntryRef DirectoryLookup::DoFrameworkLookup( if (getDirCharacteristic() == SrcMgr::C_User) { SmallString<1024> SystemFrameworkMarker(FrameworkName); SystemFrameworkMarker += ".system_framework"; - if (llvm::sys::fs::exists(SystemFrameworkMarker)) { + if (FileMgr.getOptionalFileRef(SystemFrameworkMarker)) CacheEntry.IsUserSpecifiedSystemFramework = true; - } } } @@ -2078,7 +2077,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics( llvm::SmallString<32> FilePath = File; if (!WorkingDir.empty() && !path::is_absolute(FilePath)) - fs::make_absolute(WorkingDir, FilePath); + path::make_absolute(WorkingDir, FilePath); // remove_dots switches to backslashes on windows as a side-effect! // We always want to suggest forward slashes for includes. // (not remove_dots(..., posix) as that misparses windows paths). @@ -2092,7 +2091,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics( // `BestPrefixLength` accordingly. auto CheckDir = [&](llvm::SmallString<32> Dir) -> bool { if (!WorkingDir.empty() && !path::is_absolute(Dir)) - fs::make_absolute(WorkingDir, Dir); + path::make_absolute(WorkingDir, Dir); path::remove_dots(Dir, /*remove_dot_dot=*/true); for (auto NI = path::begin(File), NE = path::end(File), DI = path::begin(Dir), DE = path::end(Dir); diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp index 22c01c4e371f3..d6cd7eb8c2c3d 100644 --- a/clang/lib/Parse/ParseDecl.cpp +++ b/clang/lib/Parse/ParseDecl.cpp @@ -2083,6 +2083,9 @@ void Parser::SkipMalformedDecl() { return; break; + case tok::kw_extern: + // 'extern' at the start of a line is almost certainly a good + // place to pick back up parsing case tok::kw_namespace: // 'namespace' at the start of a line is almost certainly a good // place to pick back up parsing, except in an Objective-C diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp index 8605ba2cdb49b..a2c69578d5087 100644 --- a/clang/lib/Parse/ParseExprCXX.cpp +++ b/clang/lib/Parse/ParseExprCXX.cpp @@ -1299,7 +1299,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer( Diag(Tok, getLangOpts().CPlusPlus23 ? diag::warn_cxx20_compat_decl_attrs_on_lambda : diag::ext_decl_attrs_on_lambda) - << Tok.getIdentifierInfo() << Tok.isRegularKeywordAttribute(); + << Tok.isRegularKeywordAttribute() << Tok.getIdentifierInfo(); MaybeParseCXX11Attributes(D); } diff --git a/clang/lib/Parse/ParseHLSLRootSignature.cpp b/clang/lib/Parse/ParseHLSLRootSignature.cpp index 3b16efb1f1199..7be6eecc520b1 100644 --- a/clang/lib/Parse/ParseHLSLRootSignature.cpp +++ b/clang/lib/Parse/ParseHLSLRootSignature.cpp @@ -485,6 +485,9 @@ std::optional RootSignatureParser::parseStaticSampler() { if (Params->Visibility.has_value()) Sampler.Visibility = Params->Visibility.value(); + if (Params->Flags.has_value()) + Sampler.Flags = Params->Flags.value(); + return Sampler; } @@ -926,6 +929,20 @@ RootSignatureParser::parseStaticSamplerParams() { if (!Visibility.has_value()) return std::nullopt; Params.Visibility = Visibility; + } else if (tryConsumeExpectedToken(TokenKind::kw_flags)) { + // `flags` `=` STATIC_SAMPLE_FLAGS + if (Params.Flags.has_value()) { + reportDiag(diag::err_hlsl_rootsig_repeat_param) << CurToken.TokKind; + return std::nullopt; + } + + if (consumeExpectedToken(TokenKind::pu_equal)) + return std::nullopt; + + auto Flags = parseStaticSamplerFlags(TokenKind::kw_flags); + if (!Flags.has_value()) + return std::nullopt; + Params.Flags = Flags; } else { consumeNextToken(); // let diagnostic be at the start of invalid token reportDiag(diag::err_hlsl_invalid_token) @@ -1255,6 +1272,50 @@ RootSignatureParser::parseDescriptorRangeFlags(TokenKind Context) { return Flags; } +std::optional +RootSignatureParser::parseStaticSamplerFlags(TokenKind Context) { + assert(CurToken.TokKind == TokenKind::pu_equal && + "Expects to only be invoked starting at given keyword"); + + // Handle the edge-case of '0' to specify no flags set + if (tryConsumeExpectedToken(TokenKind::int_literal)) { + if (!verifyZeroFlag()) { + reportDiag(diag::err_hlsl_rootsig_non_zero_flag); + return std::nullopt; + } + return llvm::dxbc::StaticSamplerFlags::None; + } + + TokenKind Expected[] = { +#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) TokenKind::en_##NAME, +#include "clang/Lex/HLSLRootSignatureTokenKinds.def" + }; + + std::optional Flags; + + do { + if (tryConsumeExpectedToken(Expected)) { + switch (CurToken.TokKind) { +#define STATIC_SAMPLER_FLAG_ENUM(NAME, LIT) \ + case TokenKind::en_##NAME: \ + Flags = maybeOrFlag( \ + Flags, llvm::dxbc::StaticSamplerFlags::NAME); \ + break; +#include "clang/Lex/HLSLRootSignatureTokenKinds.def" + default: + llvm_unreachable("Switch for consumed enum token was not provided"); + } + } else { + consumeNextToken(); // consume token to point at invalid token + reportDiag(diag::err_hlsl_invalid_token) + << /*value=*/1 << /*value of*/ Context; + return std::nullopt; + } + } while (tryConsumeExpectedToken(TokenKind::pu_or)); + + return Flags; +} + std::optional RootSignatureParser::handleUIntLiteral() { // Parse the numeric value and do semantic checks on its specification clang::NumericLiteralParser Literal( diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp index 02f3f109b2562..04f29c83dd457 100644 --- a/clang/lib/Parse/ParseOpenMP.cpp +++ b/clang/lib/Parse/ParseOpenMP.cpp @@ -2968,6 +2968,39 @@ OMPClause *Parser::ParseOpenMPSizesClause() { OpenLoc, CloseLoc); } +OMPClause *Parser::ParseOpenMPLoopRangeClause() { + SourceLocation ClauseNameLoc = ConsumeToken(); + SourceLocation FirstLoc, CountLoc; + + BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end); + if (T.consumeOpen()) { + Diag(Tok, diag::err_expected) << tok::l_paren; + return nullptr; + } + + FirstLoc = Tok.getLocation(); + ExprResult FirstVal = ParseConstantExpression(); + if (!FirstVal.isUsable()) { + T.skipToEnd(); + return nullptr; + } + + ExpectAndConsume(tok::comma); + + CountLoc = Tok.getLocation(); + ExprResult CountVal = ParseConstantExpression(); + if (!CountVal.isUsable()) { + T.skipToEnd(); + return nullptr; + } + + T.consumeClose(); + + return Actions.OpenMP().ActOnOpenMPLoopRangeClause( + FirstVal.get(), CountVal.get(), ClauseNameLoc, T.getOpenLocation(), + FirstLoc, CountLoc, T.getCloseLocation()); +} + OMPClause *Parser::ParseOpenMPPermutationClause() { SourceLocation ClauseNameLoc, OpenLoc, CloseLoc; SmallVector ArgExprs; @@ -3473,6 +3506,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind, } Clause = ParseOpenMPClause(CKind, WrongDirective); break; + case OMPC_looprange: + Clause = ParseOpenMPLoopRangeClause(); + break; default: break; } diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp index 1b66d83df5171..8606227152a84 100644 --- a/clang/lib/Sema/AnalysisBasedWarnings.cpp +++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp @@ -983,10 +983,9 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, case UninitUse::AfterDecl: case UninitUse::AfterCall: S.Diag(VD->getLocation(), diag::warn_sometimes_uninit_var) - << VD->getDeclName() << IsCapturedByBlock - << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) - << const_cast(VD->getLexicalDeclContext()) - << VD->getSourceRange(); + << VD->getDeclName() << IsCapturedByBlock + << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) + << VD->getLexicalDeclContext() << VD->getSourceRange(); S.Diag(Use.getUser()->getBeginLoc(), diag::note_uninit_var_use) << IsCapturedByBlock << Use.getUser()->getSourceRange(); return; diff --git a/clang/lib/Sema/CheckExprLifetime.cpp b/clang/lib/Sema/CheckExprLifetime.cpp index e02e00231e58e..e8a7ad3bd355a 100644 --- a/clang/lib/Sema/CheckExprLifetime.cpp +++ b/clang/lib/Sema/CheckExprLifetime.cpp @@ -10,6 +10,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" #include "clang/AST/Type.h" +#include "clang/Analysis/Analyses/LifetimeAnnotations.h" #include "clang/Basic/DiagnosticSema.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Sema.h" @@ -503,60 +504,6 @@ shouldTrackFirstArgumentForConstructor(const CXXConstructExpr *Ctor) { return true; } -// Return true if this is an "normal" assignment operator. -// We assume that a normal assignment operator always returns *this, that is, -// an lvalue reference that is the same type as the implicit object parameter -// (or the LHS for a non-member operator$=). -static bool isNormalAssignmentOperator(const FunctionDecl *FD) { - OverloadedOperatorKind OO = FD->getDeclName().getCXXOverloadedOperator(); - if (OO == OO_Equal || isCompoundAssignmentOperator(OO)) { - QualType RetT = FD->getReturnType(); - if (RetT->isLValueReferenceType()) { - ASTContext &Ctx = FD->getASTContext(); - QualType LHST; - auto *MD = dyn_cast(FD); - if (MD && MD->isCXXInstanceMember()) - LHST = Ctx.getLValueReferenceType(MD->getFunctionObjectParameterType()); - else - LHST = FD->getParamDecl(0)->getType(); - if (Ctx.hasSameType(RetT, LHST)) - return true; - } - } - return false; -} - -static const FunctionDecl * -getDeclWithMergedLifetimeBoundAttrs(const FunctionDecl *FD) { - return FD != nullptr ? FD->getMostRecentDecl() : nullptr; -} - -static const CXXMethodDecl * -getDeclWithMergedLifetimeBoundAttrs(const CXXMethodDecl *CMD) { - const FunctionDecl *FD = CMD; - return cast_if_present( - getDeclWithMergedLifetimeBoundAttrs(FD)); -} - -bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) { - FD = getDeclWithMergedLifetimeBoundAttrs(FD); - const TypeSourceInfo *TSI = FD->getTypeSourceInfo(); - if (!TSI) - return false; - // Don't declare this variable in the second operand of the for-statement; - // GCC miscompiles that by ending its lifetime before evaluating the - // third operand. See gcc.gnu.org/PR86769. - AttributedTypeLoc ATL; - for (TypeLoc TL = TSI->getTypeLoc(); - (ATL = TL.getAsAdjusted()); - TL = ATL.getModifiedLoc()) { - if (ATL.getAttrAs()) - return true; - } - - return isNormalAssignmentOperator(FD); -} - // Visit lifetimebound or gsl-pointer arguments. static void visitFunctionCallArguments(IndirectLocalPath &Path, Expr *Call, LocalVisitor Visit) { @@ -639,7 +586,8 @@ static void visitFunctionCallArguments(IndirectLocalPath &Path, Expr *Call, // lifetimebound. if (Sema::CanBeGetReturnObject(Callee)) CheckCoroObjArg = false; - if (implicitObjectParamIsLifetimeBound(Callee) || CheckCoroObjArg) + if (lifetimes::implicitObjectParamIsLifetimeBound(Callee) || + CheckCoroObjArg) VisitLifetimeBoundArg(Callee, ObjectArg); else if (EnableGSLAnalysis) { if (auto *CME = dyn_cast(Callee); @@ -648,7 +596,8 @@ static void visitFunctionCallArguments(IndirectLocalPath &Path, Expr *Call, } } - const FunctionDecl *CanonCallee = getDeclWithMergedLifetimeBoundAttrs(Callee); + const FunctionDecl *CanonCallee = + lifetimes::getDeclWithMergedLifetimeBoundAttrs(Callee); unsigned NP = std::min(Callee->getNumParams(), CanonCallee->getNumParams()); for (unsigned I = 0, N = std::min(NP, Args.size()); I != N; ++I) { Expr *Arg = Args[I]; @@ -1276,19 +1225,14 @@ static AnalysisResult analyzePathForGSLPointer(const IndirectLocalPath &Path, return Report; } -static bool isAssignmentOperatorLifetimeBound(const CXXMethodDecl *CMD) { - CMD = getDeclWithMergedLifetimeBoundAttrs(CMD); - return CMD && isNormalAssignmentOperator(CMD) && CMD->param_size() == 1 && - CMD->getParamDecl(0)->hasAttr(); -} - static bool shouldRunGSLAssignmentAnalysis(const Sema &SemaRef, const AssignedEntity &Entity) { bool EnableGSLAssignmentWarnings = !SemaRef.getDiagnostics().isIgnored( diag::warn_dangling_lifetime_pointer_assignment, SourceLocation()); return (EnableGSLAssignmentWarnings && (isRecordWithAttr(Entity.LHS->getType()) || - isAssignmentOperatorLifetimeBound(Entity.AssignmentOperator))); + lifetimes::isAssignmentOperatorLifetimeBound( + Entity.AssignmentOperator))); } static void @@ -1610,11 +1554,11 @@ checkExprLifetimeImpl(Sema &SemaRef, const InitializedEntity *InitEntity, switch (LK) { case LK_Assignment: { if (shouldRunGSLAssignmentAnalysis(SemaRef, *AEntity)) - Path.push_back( - {isAssignmentOperatorLifetimeBound(AEntity->AssignmentOperator) - ? IndirectLocalPathEntry::LifetimeBoundCall - : IndirectLocalPathEntry::GslPointerAssignment, - Init}); + Path.push_back({lifetimes::isAssignmentOperatorLifetimeBound( + AEntity->AssignmentOperator) + ? IndirectLocalPathEntry::LifetimeBoundCall + : IndirectLocalPathEntry::GslPointerAssignment, + Init}); break; } case LK_LifetimeCapture: { diff --git a/clang/lib/Sema/CheckExprLifetime.h b/clang/lib/Sema/CheckExprLifetime.h index 6351e52a362f1..16595d0ca1b36 100644 --- a/clang/lib/Sema/CheckExprLifetime.h +++ b/clang/lib/Sema/CheckExprLifetime.h @@ -60,8 +60,6 @@ void checkCaptureByLifetime(Sema &SemaRef, const CapturingEntity &Entity, void checkExprLifetimeMustTailArg(Sema &SemaRef, const InitializedEntity &Entity, Expr *Init); -bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD); - } // namespace clang::sema #endif // LLVM_CLANG_SEMA_CHECK_EXPR_LIFETIME_H diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp index 5eafd03d89efe..3c20ccd799b2d 100644 --- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp +++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.cpp @@ -138,7 +138,16 @@ struct BuiltinTypeMethodBuilder { // LastStmt - refers to the last statement in the method body; referencing // LastStmt will remove the statement from the method body since // it will be linked from the new expression being constructed. - enum class PlaceHolder { _0, _1, _2, _3, _4, Handle = 128, LastStmt }; + enum class PlaceHolder { + _0, + _1, + _2, + _3, + _4, + Handle = 128, + CounterHandle, + LastStmt + }; Expr *convertPlaceholder(PlaceHolder PH); Expr *convertPlaceholder(LocalVar &Var); @@ -178,10 +187,14 @@ struct BuiltinTypeMethodBuilder { template BuiltinTypeMethodBuilder &setHandleFieldOnResource(ResourceT ResourceRecord, ValueT HandleValue); + template + BuiltinTypeMethodBuilder & + accessCounterHandleFieldOnResource(T ResourceRecord); template BuiltinTypeMethodBuilder &returnValue(T ReturnValue); BuiltinTypeMethodBuilder &returnThis(); BuiltinTypeDeclBuilder &finalize(); Expr *getResourceHandleExpr(); + Expr *getResourceCounterHandleExpr(); private: void createDecl(); @@ -346,6 +359,8 @@ TemplateParameterListBuilder::finalizeTemplateArgs(ConceptDecl *CD) { Expr *BuiltinTypeMethodBuilder::convertPlaceholder(PlaceHolder PH) { if (PH == PlaceHolder::Handle) return getResourceHandleExpr(); + if (PH == PlaceHolder::CounterHandle) + return getResourceCounterHandleExpr(); if (PH == PlaceHolder::LastStmt) { assert(!StmtsList.empty() && "no statements in the list"); @@ -467,6 +482,18 @@ Expr *BuiltinTypeMethodBuilder::getResourceHandleExpr() { OK_Ordinary); } +Expr *BuiltinTypeMethodBuilder::getResourceCounterHandleExpr() { + ensureCompleteDecl(); + + ASTContext &AST = DeclBuilder.SemaRef.getASTContext(); + CXXThisExpr *This = CXXThisExpr::Create( + AST, SourceLocation(), Method->getFunctionObjectParameterType(), true); + FieldDecl *HandleField = DeclBuilder.getResourceCounterHandleField(); + return MemberExpr::CreateImplicit(AST, This, false, HandleField, + HandleField->getType(), VK_LValue, + OK_Ordinary); +} + BuiltinTypeMethodBuilder & BuiltinTypeMethodBuilder::declareLocalVar(LocalVar &Var) { ensureCompleteDecl(); @@ -583,6 +610,22 @@ BuiltinTypeMethodBuilder::setHandleFieldOnResource(ResourceT ResourceRecord, return *this; } +template +BuiltinTypeMethodBuilder & +BuiltinTypeMethodBuilder::accessCounterHandleFieldOnResource(T ResourceRecord) { + ensureCompleteDecl(); + + Expr *ResourceExpr = convertPlaceholder(ResourceRecord); + + ASTContext &AST = DeclBuilder.SemaRef.getASTContext(); + FieldDecl *HandleField = DeclBuilder.getResourceCounterHandleField(); + MemberExpr *HandleExpr = MemberExpr::CreateImplicit( + AST, ResourceExpr, false, HandleField, HandleField->getType(), VK_LValue, + OK_Ordinary); + StmtsList.push_back(HandleExpr); + return *this; +} + template BuiltinTypeMethodBuilder &BuiltinTypeMethodBuilder::returnValue(T ReturnValue) { ensureCompleteDecl(); @@ -722,8 +765,31 @@ BuiltinTypeDeclBuilder::addMemberVariable(StringRef Name, QualType Type, return *this; } +BuiltinTypeDeclBuilder & +BuiltinTypeDeclBuilder::addBufferHandles(ResourceClass RC, bool IsROV, + bool RawBuffer, bool HasCounter, + AccessSpecifier Access) { + addHandleMember(RC, IsROV, RawBuffer, Access); + if (HasCounter) + addCounterHandleMember(RC, IsROV, RawBuffer, Access); + return *this; +} + BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addHandleMember( ResourceClass RC, bool IsROV, bool RawBuffer, AccessSpecifier Access) { + return addResourceMember("__handle", RC, IsROV, RawBuffer, + /*IsCounter=*/false, Access); +} + +BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCounterHandleMember( + ResourceClass RC, bool IsROV, bool RawBuffer, AccessSpecifier Access) { + return addResourceMember("__counter_handle", RC, IsROV, RawBuffer, + /*IsCounter=*/true, Access); +} + +BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addResourceMember( + StringRef MemberName, ResourceClass RC, bool IsROV, bool RawBuffer, + bool IsCounter, AccessSpecifier Access) { assert(!Record->isCompleteDefinition() && "record is already complete"); ASTContext &Ctx = SemaRef.getASTContext(); @@ -739,17 +805,19 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addHandleMember( ElementTypeInfo ? HLSLContainedTypeAttr::CreateImplicit(Ctx, ElementTypeInfo) : nullptr}; + if (IsCounter) + Attrs.push_back(HLSLIsCounterAttr::CreateImplicit(Ctx)); + if (CreateHLSLAttributedResourceType(SemaRef, Ctx.HLSLResourceTy, Attrs, AttributedResTy)) - addMemberVariable("__handle", AttributedResTy, {}, Access); + addMemberVariable(MemberName, AttributedResTy, {}, Access); return *this; } // Adds default constructor to the resource class: // Resource::Resource() BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addDefaultHandleConstructor() { - if (Record->isCompleteDefinition()) - return *this; + assert(!Record->isCompleteDefinition() && "record is already complete"); using PH = BuiltinTypeMethodBuilder::PlaceHolder; QualType HandleType = getResourceHandleField()->getType(); @@ -773,8 +841,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addDefaultHandleConstructor() { // return tmp; // } BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCreateFromBinding() { - if (Record->isCompleteDefinition()) - return *this; + assert(!Record->isCompleteDefinition() && "record is already complete"); using PH = BuiltinTypeMethodBuilder::PlaceHolder; ASTContext &AST = SemaRef.getASTContext(); @@ -811,8 +878,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCreateFromBinding() { // return tmp; // } BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCreateFromImplicitBinding() { - if (Record->isCompleteDefinition()) - return *this; + assert(!Record->isCompleteDefinition() && "record is already complete"); using PH = BuiltinTypeMethodBuilder::PlaceHolder; ASTContext &AST = SemaRef.getASTContext(); @@ -838,8 +904,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCreateFromImplicitBinding() { } BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyConstructor() { - if (Record->isCompleteDefinition()) - return *this; + assert(!Record->isCompleteDefinition() && "record is already complete"); ASTContext &AST = SemaRef.getASTContext(); QualType RecordType = AST.getCanonicalTagType(Record); @@ -848,17 +913,21 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyConstructor() { using PH = BuiltinTypeMethodBuilder::PlaceHolder; - return BuiltinTypeMethodBuilder(*this, /*Name=*/"", AST.VoidTy, - /*IsConst=*/false, /*IsCtor=*/true) - .addParam("other", ConstRecordRefType) + BuiltinTypeMethodBuilder MMB(*this, /*Name=*/"", AST.VoidTy, + /*IsConst=*/false, /*IsCtor=*/true); + MMB.addParam("other", ConstRecordRefType) .accessHandleFieldOnResource(PH::_0) - .assign(PH::Handle, PH::LastStmt) - .finalize(); + .assign(PH::Handle, PH::LastStmt); + + if (getResourceCounterHandleField()) + MMB.accessCounterHandleFieldOnResource(PH::_0).assign(PH::CounterHandle, + PH::LastStmt); + + return MMB.finalize(); } BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyAssignmentOperator() { - if (Record->isCompleteDefinition()) - return *this; + assert(!Record->isCompleteDefinition() && "record is already complete"); ASTContext &AST = SemaRef.getASTContext(); QualType RecordType = AST.getCanonicalTagType(Record); @@ -868,12 +937,16 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addCopyAssignmentOperator() { using PH = BuiltinTypeMethodBuilder::PlaceHolder; DeclarationName Name = AST.DeclarationNames.getCXXOperatorName(OO_Equal); - return BuiltinTypeMethodBuilder(*this, Name, RecordRefType) - .addParam("other", ConstRecordRefType) + BuiltinTypeMethodBuilder MMB(*this, Name, RecordRefType); + MMB.addParam("other", ConstRecordRefType) .accessHandleFieldOnResource(PH::_0) - .assign(PH::Handle, PH::LastStmt) - .returnThis() - .finalize(); + .assign(PH::Handle, PH::LastStmt); + + if (getResourceCounterHandleField()) + MMB.accessCounterHandleFieldOnResource(PH::_0).assign(PH::CounterHandle, + PH::LastStmt); + + return MMB.returnThis().finalize(); } BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addArraySubscriptOperators() { @@ -889,8 +962,7 @@ BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addArraySubscriptOperators() { } BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::addLoadMethods() { - if (Record->isCompleteDefinition()) - return *this; + assert(!Record->isCompleteDefinition() && "record is already complete"); ASTContext &AST = Record->getASTContext(); IdentifierInfo &II = AST.Idents.get("Load", tok::TokenKind::identifier); @@ -909,6 +981,14 @@ FieldDecl *BuiltinTypeDeclBuilder::getResourceHandleField() const { return I->second; } +FieldDecl *BuiltinTypeDeclBuilder::getResourceCounterHandleField() const { + auto I = Fields.find("__counter_handle"); + if (I == Fields.end() || + !I->second->getType()->isHLSLAttributedResourceType()) + return nullptr; + return I->second; +} + QualType BuiltinTypeDeclBuilder::getFirstTemplateTypeParam() { assert(Template && "record it not a template"); if (const auto *TTD = dyn_cast( @@ -931,12 +1011,6 @@ BuiltinTypeDeclBuilder::getResourceAttrs() const { return cast(HandleType)->getAttrs(); } -// BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::startDefinition() { -// assert(!Record->isCompleteDefinition() && "record is already complete"); -// Record->startDefinition(); -// return *this; -// } - BuiltinTypeDeclBuilder &BuiltinTypeDeclBuilder::completeDefinition() { assert(!Record->isCompleteDefinition() && "record is already complete"); assert(Record->isBeingDefined() && diff --git a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h index 9448af13530cb..a981602a50461 100644 --- a/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h +++ b/clang/lib/Sema/HLSLBuiltinTypeDeclBuilder.h @@ -72,8 +72,9 @@ class BuiltinTypeDeclBuilder { AccessSpecifier Access = AccessSpecifier::AS_private); BuiltinTypeDeclBuilder & - addHandleMember(ResourceClass RC, bool IsROV, bool RawBuffer, - AccessSpecifier Access = AccessSpecifier::AS_private); + addBufferHandles(ResourceClass RC, bool IsROV, bool RawBuffer, + bool HasCounter, + AccessSpecifier Access = AccessSpecifier::AS_private); BuiltinTypeDeclBuilder &addArraySubscriptOperators(); // Builtin types constructors @@ -95,7 +96,18 @@ class BuiltinTypeDeclBuilder { BuiltinTypeDeclBuilder &addConsumeMethod(); private: + BuiltinTypeDeclBuilder &addResourceMember(StringRef MemberName, + ResourceClass RC, bool IsROV, + bool RawBuffer, bool IsCounter, + AccessSpecifier Access); + BuiltinTypeDeclBuilder & + addHandleMember(ResourceClass RC, bool IsROV, bool RawBuffer, + AccessSpecifier Access = AccessSpecifier::AS_private); + BuiltinTypeDeclBuilder & + addCounterHandleMember(ResourceClass RC, bool IsROV, bool RawBuffer, + AccessSpecifier Access = AccessSpecifier::AS_private); FieldDecl *getResourceHandleField() const; + FieldDecl *getResourceCounterHandleField() const; QualType getFirstTemplateTypeParam(); QualType getHandleElementType(); Expr *getConstantIntExpr(int value); diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp index 781f0445d0b61..cc43e9474ea79 100644 --- a/clang/lib/Sema/HLSLExternalSemaSource.cpp +++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp @@ -121,16 +121,118 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() { HLSLNamespace->addDecl(Template); } +void HLSLExternalSemaSource::defineHLSLMatrixAlias() { + ASTContext &AST = SemaPtr->getASTContext(); + llvm::SmallVector TemplateParams; + + auto *TypeParam = TemplateTypeParmDecl::Create( + AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 0, + &AST.Idents.get("element", tok::TokenKind::identifier), false, false); + TypeParam->setDefaultArgument( + AST, SemaPtr->getTrivialTemplateArgumentLoc( + TemplateArgument(AST.FloatTy), QualType(), SourceLocation())); + + TemplateParams.emplace_back(TypeParam); + + // these should be 64 bit to be consistent with other clang matrices. + auto *RowsParam = NonTypeTemplateParmDecl::Create( + AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 1, + &AST.Idents.get("rows_count", tok::TokenKind::identifier), AST.IntTy, + false, AST.getTrivialTypeSourceInfo(AST.IntTy)); + llvm::APInt RVal(AST.getIntWidth(AST.IntTy), 4); + TemplateArgument RDefault(AST, llvm::APSInt(std::move(RVal)), AST.IntTy, + /*IsDefaulted=*/true); + RowsParam->setDefaultArgument( + AST, SemaPtr->getTrivialTemplateArgumentLoc(RDefault, AST.IntTy, + SourceLocation(), RowsParam)); + TemplateParams.emplace_back(RowsParam); + + auto *ColsParam = NonTypeTemplateParmDecl::Create( + AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 2, + &AST.Idents.get("cols_count", tok::TokenKind::identifier), AST.IntTy, + false, AST.getTrivialTypeSourceInfo(AST.IntTy)); + llvm::APInt CVal(AST.getIntWidth(AST.IntTy), 4); + TemplateArgument CDefault(AST, llvm::APSInt(std::move(CVal)), AST.IntTy, + /*IsDefaulted=*/true); + ColsParam->setDefaultArgument( + AST, SemaPtr->getTrivialTemplateArgumentLoc(CDefault, AST.IntTy, + SourceLocation(), ColsParam)); + TemplateParams.emplace_back(ColsParam); + + const unsigned MaxMatDim = 4; + auto *MaxRow = IntegerLiteral::Create( + AST, llvm::APInt(AST.getIntWidth(AST.IntTy), MaxMatDim), AST.IntTy, + SourceLocation()); + auto *MaxCol = IntegerLiteral::Create( + AST, llvm::APInt(AST.getIntWidth(AST.IntTy), MaxMatDim), AST.IntTy, + SourceLocation()); + + auto *RowsRef = DeclRefExpr::Create( + AST, NestedNameSpecifierLoc(), SourceLocation(), RowsParam, + /*RefersToEnclosingVariableOrCapture*/ false, + DeclarationNameInfo(RowsParam->getDeclName(), SourceLocation()), + AST.IntTy, VK_LValue); + auto *ColsRef = DeclRefExpr::Create( + AST, NestedNameSpecifierLoc(), SourceLocation(), ColsParam, + /*RefersToEnclosingVariableOrCapture*/ false, + DeclarationNameInfo(ColsParam->getDeclName(), SourceLocation()), + AST.IntTy, VK_LValue); + + auto *RowsLE = BinaryOperator::Create(AST, RowsRef, MaxRow, BO_LE, AST.BoolTy, + VK_PRValue, OK_Ordinary, + SourceLocation(), FPOptionsOverride()); + auto *ColsLE = BinaryOperator::Create(AST, ColsRef, MaxCol, BO_LE, AST.BoolTy, + VK_PRValue, OK_Ordinary, + SourceLocation(), FPOptionsOverride()); + + auto *RequiresExpr = BinaryOperator::Create( + AST, RowsLE, ColsLE, BO_LAnd, AST.BoolTy, VK_PRValue, OK_Ordinary, + SourceLocation(), FPOptionsOverride()); + + auto *ParamList = TemplateParameterList::Create( + AST, SourceLocation(), SourceLocation(), TemplateParams, SourceLocation(), + RequiresExpr); + + IdentifierInfo &II = AST.Idents.get("matrix", tok::TokenKind::identifier); + + QualType AliasType = AST.getDependentSizedMatrixType( + AST.getTemplateTypeParmType(0, 0, false, TypeParam), + DeclRefExpr::Create( + AST, NestedNameSpecifierLoc(), SourceLocation(), RowsParam, false, + DeclarationNameInfo(RowsParam->getDeclName(), SourceLocation()), + AST.IntTy, VK_LValue), + DeclRefExpr::Create( + AST, NestedNameSpecifierLoc(), SourceLocation(), ColsParam, false, + DeclarationNameInfo(ColsParam->getDeclName(), SourceLocation()), + AST.IntTy, VK_LValue), + SourceLocation()); + + auto *Record = TypeAliasDecl::Create(AST, HLSLNamespace, SourceLocation(), + SourceLocation(), &II, + AST.getTrivialTypeSourceInfo(AliasType)); + Record->setImplicit(true); + + auto *Template = + TypeAliasTemplateDecl::Create(AST, HLSLNamespace, SourceLocation(), + Record->getIdentifier(), ParamList, Record); + + Record->setDescribedAliasTemplate(Template); + Template->setImplicit(true); + Template->setLexicalDeclContext(Record->getDeclContext()); + HLSLNamespace->addDecl(Template); +} + void HLSLExternalSemaSource::defineTrivialHLSLTypes() { defineHLSLVectorAlias(); + defineHLSLMatrixAlias(); } /// Set up common members and attributes for buffer types static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S, ResourceClass RC, bool IsROV, - bool RawBuffer) { + bool RawBuffer, bool HasCounter) { return BuiltinTypeDeclBuilder(S, Decl) - .addHandleMember(RC, IsROV, RawBuffer) + .addBufferHandles(RC, IsROV, RawBuffer, HasCounter) .addDefaultHandleConstructor() .addCopyConstructor() .addCopyAssignmentOperator() @@ -275,7 +377,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false, - /*RawBuffer=*/false) + /*RawBuffer=*/false, /*HasCounter=*/false) .addArraySubscriptOperators() .addLoadMethods() .completeDefinition(); @@ -287,7 +389,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false, - /*RawBuffer=*/false) + /*RawBuffer=*/false, /*HasCounter=*/false) .addArraySubscriptOperators() .addLoadMethods() .completeDefinition(); @@ -299,7 +401,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true, - /*RawBuffer=*/false) + /*RawBuffer=*/false, /*HasCounter=*/false) .addArraySubscriptOperators() .addLoadMethods() .completeDefinition(); @@ -310,7 +412,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/false) .addArraySubscriptOperators() .addLoadMethods() .completeDefinition(); @@ -321,7 +423,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/true) .addArraySubscriptOperators() .addLoadMethods() .addIncrementCounterMethod() @@ -335,7 +437,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/true) .addAppendMethod() .completeDefinition(); }); @@ -346,7 +448,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/true) .addConsumeMethod() .completeDefinition(); }); @@ -357,7 +459,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/true) .addArraySubscriptOperators() .addLoadMethods() .addIncrementCounterMethod() @@ -369,14 +471,14 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::SRV, /*IsROV=*/false, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/false) .completeDefinition(); }); Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWByteAddressBuffer") .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/false, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/false) .completeDefinition(); }); Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, @@ -384,7 +486,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() { .finalizeForwardDeclaration(); onCompletion(Decl, [this](CXXRecordDecl *Decl) { setupBufferType(Decl, *SemaPtr, ResourceClass::UAV, /*IsROV=*/true, - /*RawBuffer=*/true) + /*RawBuffer=*/true, /*HasCounter=*/false) .completeDefinition(); }); } diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp index b53e726b1c970..3a0c23187d45d 100644 --- a/clang/lib/Sema/SemaAMDGPU.cpp +++ b/clang/lib/Sema/SemaAMDGPU.cpp @@ -58,9 +58,11 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, [[fallthrough]]; } default: - Diag(ArgExpr->getExprLoc(), diag::err_amdgcn_load_lds_size_invalid_value) + SemaRef.targetDiag(ArgExpr->getExprLoc(), + diag::err_amdgcn_load_lds_size_invalid_value) << ArgExpr->getSourceRange(); - Diag(ArgExpr->getExprLoc(), diag::note_amdgcn_load_lds_size_valid_value) + SemaRef.targetDiag(ArgExpr->getExprLoc(), + diag::note_amdgcn_load_lds_size_valid_value) << HasGFX950Insts << ArgExpr->getSourceRange(); return true; } diff --git a/clang/lib/Sema/SemaAPINotes.cpp b/clang/lib/Sema/SemaAPINotes.cpp index 99a29add8211d..35cdfbf8bf390 100644 --- a/clang/lib/Sema/SemaAPINotes.cpp +++ b/clang/lib/Sema/SemaAPINotes.cpp @@ -10,7 +10,6 @@ // //===----------------------------------------------------------------------===// -#include "CheckExprLifetime.h" #include "TypeLocBuilder.h" #include "clang/APINotes/APINotesReader.h" #include "clang/APINotes/Types.h" @@ -18,6 +17,7 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/TypeLoc.h" +#include "clang/Analysis/Analyses/LifetimeAnnotations.h" #include "clang/Basic/SourceLocation.h" #include "clang/Lex/Lexer.h" #include "clang/Sema/SemaObjC.h" @@ -654,7 +654,7 @@ static void ProcessAPINotes(Sema &S, CXXMethodDecl *Method, const api_notes::CXXMethodInfo &Info, VersionedInfoMetadata Metadata) { if (Info.This && Info.This->isLifetimebound() && - !sema::implicitObjectParamIsLifetimeBound(Method)) { + !lifetimes::implicitObjectParamIsLifetimeBound(Method)) { auto MethodType = Method->getType(); auto *attr = ::new (S.Context) LifetimeBoundAttr(S.Context, getPlaceholderAttrInfo()); diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index b3b67230f7687..7ce3513fe0969 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -2268,7 +2268,8 @@ static bool BuiltinCountZeroBitsGeneric(Sema &S, CallExpr *TheCall) { } static bool CheckMaskedBuiltinArgs(Sema &S, Expr *MaskArg, Expr *PtrArg, - unsigned Pos, bool Vector = true) { + unsigned Pos, bool AllowConst, + bool AllowAS) { QualType MaskTy = MaskArg->getType(); if (!MaskTy->isExtVectorBoolType()) return S.Diag(MaskArg->getBeginLoc(), diag::err_builtin_invalid_arg_type) @@ -2276,11 +2277,38 @@ static bool CheckMaskedBuiltinArgs(Sema &S, Expr *MaskArg, Expr *PtrArg, << MaskTy; QualType PtrTy = PtrArg->getType(); - if (!PtrTy->isPointerType() || - (Vector && !PtrTy->getPointeeType()->isVectorType()) || - (!Vector && PtrTy->getPointeeType()->isVectorType())) + if (!PtrTy->isPointerType() || PtrTy->getPointeeType()->isVectorType()) return S.Diag(PtrArg->getExprLoc(), diag::err_vec_masked_load_store_ptr) - << Pos << (Vector ? "pointer to vector" : "scalar pointer"); + << Pos << "scalar pointer"; + + QualType PointeeTy = PtrTy->getPointeeType(); + if (PointeeTy.isVolatileQualified() || PointeeTy->isAtomicType() || + (!AllowConst && PointeeTy.isConstQualified()) || + (!AllowAS && PointeeTy.hasAddressSpace())) { + QualType Target = + S.Context.getPointerType(PointeeTy.getAtomicUnqualifiedType()); + return S.Diag(PtrArg->getExprLoc(), + diag::err_typecheck_convert_incompatible) + << PtrTy << Target << /*different qualifiers=*/5 + << /*qualifier difference=*/0 << /*parameter mismatch=*/3 << 2 + << PtrTy << Target; + } + return false; +} + +static bool ConvertMaskedBuiltinArgs(Sema &S, CallExpr *TheCall) { + bool TypeDependent = false; + for (unsigned Arg = 0, E = TheCall->getNumArgs(); Arg != E; ++Arg) { + ExprResult Converted = + S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(Arg)); + if (Converted.isInvalid()) + return true; + TheCall->setArg(Arg, Converted.get()); + TypeDependent |= Converted.get()->isTypeDependent(); + } + + if (TypeDependent) + TheCall->setType(S.Context.DependentTy); return false; } @@ -2288,33 +2316,35 @@ static ExprResult BuiltinMaskedLoad(Sema &S, CallExpr *TheCall) { if (S.checkArgCountRange(TheCall, 2, 3)) return ExprError(); + if (ConvertMaskedBuiltinArgs(S, TheCall)) + return ExprError(); + Expr *MaskArg = TheCall->getArg(0); Expr *PtrArg = TheCall->getArg(1); - if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 2)) + if (TheCall->isTypeDependent()) + return TheCall; + + if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 2, /*AllowConst=*/true, + TheCall->getBuiltinCallee() == + Builtin::BI__builtin_masked_load)) return ExprError(); QualType MaskTy = MaskArg->getType(); QualType PtrTy = PtrArg->getType(); QualType PointeeTy = PtrTy->getPointeeType(); const VectorType *MaskVecTy = MaskTy->getAs(); - const VectorType *DataVecTy = PointeeTy->getAs(); + QualType RetTy = S.Context.getExtVectorType(PointeeTy.getUnqualifiedType(), + MaskVecTy->getNumElements()); if (TheCall->getNumArgs() == 3) { Expr *PassThruArg = TheCall->getArg(2); QualType PassThruTy = PassThruArg->getType(); - if (!S.Context.hasSameType(PassThruTy, PointeeTy)) + if (!S.Context.hasSameType(PassThruTy, RetTy)) return S.Diag(PtrArg->getExprLoc(), diag::err_vec_masked_load_store_ptr) - << /* third argument */ 3 << PointeeTy; + << /* third argument */ 3 << RetTy; } - if (MaskVecTy->getNumElements() != DataVecTy->getNumElements()) - return ExprError( - S.Diag(TheCall->getBeginLoc(), diag::err_vec_masked_load_store_size) - << S.getASTContext().BuiltinInfo.getQuotedName( - TheCall->getBuiltinCallee()) - << MaskTy << PointeeTy); - - TheCall->setType(PointeeTy); + TheCall->setType(RetTy); return TheCall; } @@ -2322,11 +2352,18 @@ static ExprResult BuiltinMaskedStore(Sema &S, CallExpr *TheCall) { if (S.checkArgCount(TheCall, 3)) return ExprError(); + if (ConvertMaskedBuiltinArgs(S, TheCall)) + return ExprError(); + Expr *MaskArg = TheCall->getArg(0); Expr *ValArg = TheCall->getArg(1); Expr *PtrArg = TheCall->getArg(2); + if (TheCall->isTypeDependent()) + return TheCall; - if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 3)) + if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 3, /*AllowConst=*/false, + TheCall->getBuiltinCallee() == + Builtin::BI__builtin_masked_store)) return ExprError(); QualType MaskTy = MaskArg->getType(); @@ -2339,18 +2376,10 @@ static ExprResult BuiltinMaskedStore(Sema &S, CallExpr *TheCall) { QualType PointeeTy = PtrTy->getPointeeType(); const VectorType *MaskVecTy = MaskTy->getAs(); - const VectorType *ValVecTy = ValTy->getAs(); - const VectorType *PtrVecTy = PointeeTy->getAs(); - - if (MaskVecTy->getNumElements() != ValVecTy->getNumElements() || - MaskVecTy->getNumElements() != PtrVecTy->getNumElements()) - return ExprError( - S.Diag(TheCall->getBeginLoc(), diag::err_vec_masked_load_store_size) - << S.getASTContext().BuiltinInfo.getQuotedName( - TheCall->getBuiltinCallee()) - << MaskTy << PointeeTy); - - if (!S.Context.hasSameType(ValTy, PointeeTy)) + QualType MemoryTy = S.Context.getExtVectorType(PointeeTy.getUnqualifiedType(), + MaskVecTy->getNumElements()); + if (!S.Context.hasSameType(ValTy.getUnqualifiedType(), + MemoryTy.getUnqualifiedType())) return ExprError(S.Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_incompatible_vector) << TheCall->getDirectCallee() << /*isMorethantwoArgs*/ 2 @@ -2365,10 +2394,17 @@ static ExprResult BuiltinMaskedGather(Sema &S, CallExpr *TheCall) { if (S.checkArgCountRange(TheCall, 3, 4)) return ExprError(); + if (ConvertMaskedBuiltinArgs(S, TheCall)) + return ExprError(); + Expr *MaskArg = TheCall->getArg(0); Expr *IdxArg = TheCall->getArg(1); Expr *PtrArg = TheCall->getArg(2); - if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 3, /*Vector=*/false)) + if (TheCall->isTypeDependent()) + return TheCall; + + if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 3, /*AllowConst=*/true, + /*AllowAS=*/true)) return ExprError(); QualType IdxTy = IdxArg->getType(); @@ -2389,8 +2425,8 @@ static ExprResult BuiltinMaskedGather(Sema &S, CallExpr *TheCall) { TheCall->getBuiltinCallee()) << MaskTy << IdxTy); - QualType RetTy = - S.Context.getExtVectorType(PointeeTy, MaskVecTy->getNumElements()); + QualType RetTy = S.Context.getExtVectorType(PointeeTy.getUnqualifiedType(), + MaskVecTy->getNumElements()); if (TheCall->getNumArgs() == 4) { Expr *PassThruArg = TheCall->getArg(3); QualType PassThruTy = PassThruArg->getType(); @@ -2408,12 +2444,18 @@ static ExprResult BuiltinMaskedScatter(Sema &S, CallExpr *TheCall) { if (S.checkArgCount(TheCall, 4)) return ExprError(); + if (ConvertMaskedBuiltinArgs(S, TheCall)) + return ExprError(); + Expr *MaskArg = TheCall->getArg(0); Expr *IdxArg = TheCall->getArg(1); Expr *ValArg = TheCall->getArg(2); Expr *PtrArg = TheCall->getArg(3); + if (TheCall->isTypeDependent()) + return TheCall; - if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 3, /*Vector=*/false)) + if (CheckMaskedBuiltinArgs(S, MaskArg, PtrArg, 4, /*AllowConst=*/false, + /*AllowAS=*/true)) return ExprError(); QualType IdxTy = IdxArg->getType(); @@ -2443,9 +2485,9 @@ static ExprResult BuiltinMaskedScatter(Sema &S, CallExpr *TheCall) { TheCall->getBuiltinCallee()) << MaskTy << ValTy); - QualType ArgTy = - S.Context.getExtVectorType(PointeeTy, MaskVecTy->getNumElements()); - if (!S.Context.hasSameType(ValTy, ArgTy)) + QualType ArgTy = S.Context.getExtVectorType(PointeeTy.getUnqualifiedType(), + MaskVecTy->getNumElements()); + if (!S.Context.hasSameType(ValTy.getUnqualifiedType(), ArgTy)) return ExprError(S.Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_incompatible_vector) << TheCall->getDirectCallee() << /*isMoreThanTwoArgs*/ 2 @@ -6882,11 +6924,12 @@ StringRef Sema::GetFormatStringTypeName(FormatStringType FST) { FormatStringType Sema::GetFormatStringType(StringRef Flavor) { return llvm::StringSwitch(Flavor) - .Case("scanf", FormatStringType::Scanf) - .Cases("printf", "printf0", "syslog", FormatStringType::Printf) + .Cases("gnu_scanf", "scanf", FormatStringType::Scanf) + .Cases("gnu_printf", "printf", "printf0", "syslog", + FormatStringType::Printf) .Cases("NSString", "CFString", FormatStringType::NSString) - .Case("strftime", FormatStringType::Strftime) - .Case("strfmon", FormatStringType::Strfmon) + .Cases("gnu_strftime", "strftime", FormatStringType::Strftime) + .Cases("gnu_strfmon", "strfmon", FormatStringType::Strfmon) .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FormatStringType::Kprintf) .Case("freebsd_kprintf", FormatStringType::FreeBSDKPrintf) @@ -7006,7 +7049,6 @@ bool Sema::CheckFormatArguments(ArrayRef Args, case FormatStringType::Kprintf: case FormatStringType::FreeBSDKPrintf: case FormatStringType::Printf: - case FormatStringType::Syslog: Diag(FormatLoc, diag::note_format_security_fixit) << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); break; @@ -7673,6 +7715,14 @@ void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); } +// Error out if struct or complex type argments are passed to os_log. +static bool isInvalidOSLogArgTypeForCodeGen(FormatStringType FSType, + QualType T) { + if (FSType != FormatStringType::OSLog) + return false; + return T->isRecordType() || T->isComplexType(); +} + bool CheckPrintfHandler::HandleAmount( const analyze_format_string::OptionalAmount &Amt, unsigned k, const char *startSpecifier, unsigned specifierLen) { @@ -7705,11 +7755,14 @@ bool CheckPrintfHandler::HandleAmount( assert(AT.isValid()); if (!AT.matchesType(S.Context, T)) { - EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) - << k << AT.getRepresentativeTypeName(S.Context) - << T << Arg->getSourceRange(), + unsigned DiagID = isInvalidOSLogArgTypeForCodeGen(FSType, T) + ? diag::err_printf_asterisk_wrong_type + : diag::warn_printf_asterisk_wrong_type; + EmitFormatDiagnostic(S.PDiag(DiagID) + << k << AT.getRepresentativeTypeName(S.Context) + << T << Arg->getSourceRange(), getLocationOfByte(Amt.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); // Don't do any more checking. We will just emit // spurious errors. @@ -8764,7 +8817,9 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; break; case ArgType::NoMatch: - Diag = diag::warn_format_conversion_argument_type_mismatch; + Diag = isInvalidOSLogArgTypeForCodeGen(FSType, ExprTy) + ? diag::err_format_conversion_argument_type_mismatch + : diag::warn_format_conversion_argument_type_mismatch; break; } @@ -9123,8 +9178,7 @@ static void CheckFormatString( if (Type == FormatStringType::Printf || Type == FormatStringType::NSString || Type == FormatStringType::Kprintf || Type == FormatStringType::FreeBSDKPrintf || - Type == FormatStringType::OSLog || Type == FormatStringType::OSTrace || - Type == FormatStringType::Syslog) { + Type == FormatStringType::OSLog || Type == FormatStringType::OSTrace) { bool IsObjC = Type == FormatStringType::NSString || Type == FormatStringType::OSTrace; if (ReferenceFormatString == nullptr) { @@ -9160,8 +9214,7 @@ bool Sema::CheckFormatStringsCompatible( if (Type != FormatStringType::Printf && Type != FormatStringType::NSString && Type != FormatStringType::Kprintf && Type != FormatStringType::FreeBSDKPrintf && - Type != FormatStringType::OSLog && Type != FormatStringType::OSTrace && - Type != FormatStringType::Syslog) + Type != FormatStringType::OSLog && Type != FormatStringType::OSTrace) return true; bool IsObjC = @@ -9195,8 +9248,7 @@ bool Sema::ValidateFormatString(FormatStringType Type, if (Type != FormatStringType::Printf && Type != FormatStringType::NSString && Type != FormatStringType::Kprintf && Type != FormatStringType::FreeBSDKPrintf && - Type != FormatStringType::OSLog && Type != FormatStringType::OSTrace && - Type != FormatStringType::Syslog) + Type != FormatStringType::OSLog && Type != FormatStringType::OSTrace) return true; FormatStringLiteral RefLit = Str; @@ -13030,7 +13082,19 @@ static void AnalyzeImplicitConversions( // Skip past explicit casts. if (auto *CE = dyn_cast(E)) { - E = CE->getSubExpr()->IgnoreParenImpCasts(); + E = CE->getSubExpr(); + // In the special case of a C++ function-style cast with braces, + // CXXFunctionalCastExpr has an InitListExpr as direct child with a single + // initializer. This InitListExpr basically belongs to the cast itself, so + // we skip it too. Specifically this is needed to silence -Wdouble-promotion + if (isa(CE)) { + if (auto *InitListE = dyn_cast(E)) { + if (InitListE->getNumInits() == 1) { + E = InitListE->getInit(0); + } + } + } + E = E->IgnoreParenImpCasts(); if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); WorkList.push_back({E, CC, IsListInit}); @@ -14817,13 +14881,11 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, // Diag message shows element size in bits and in "bytes" (platform- // dependent CharUnits) DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, - PDiag(DiagID) - << toString(index, 10, true) << AddrBits - << (unsigned)ASTC.toBits(*ElemCharUnits) - << toString(ElemBytes, 10, false) - << toString(MaxElems, 10, false) - << (unsigned)MaxElems.getLimitedValue(~0U) - << IndexExpr->getSourceRange()); + PDiag(DiagID) << index << AddrBits + << (unsigned)ASTC.toBits(*ElemCharUnits) + << ElemBytes << MaxElems + << MaxElems.getZExtValue() + << IndexExpr->getSourceRange()); const NamedDecl *ND = nullptr; // Try harder to find a NamedDecl to point at in the note. @@ -14906,10 +14968,10 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); - DiagRuntimeBehavior( - BaseExpr->getBeginLoc(), BaseExpr, - PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() - << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); + DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, + PDiag(DiagID) + << index << ArrayTy->desugar() << CastMsg + << CastMsgTy << IndexExpr->getSourceRange()); } else { unsigned DiagID = diag::warn_array_index_precedes_bounds; if (!ASE) { @@ -14918,8 +14980,7 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, } DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, - PDiag(DiagID) << toString(index, 10, true) - << IndexExpr->getSourceRange()); + PDiag(DiagID) << index << IndexExpr->getSourceRange()); } const NamedDecl *ND = nullptr; @@ -15882,7 +15943,7 @@ void Sema::RefersToMemberWithReducedAlignment( } // Check if the synthesized offset fulfills the alignment. - if (Offset % ExpectedAlignment != 0 || + if (!Offset.isMultipleOf(ExpectedAlignment) || // It may fulfill the offset it but the effective alignment may still be // lower than the expected expression alignment. CompleteObjectAlignment < ExpectedAlignment) { diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp index d238b7916a330..dc6d232d9a525 100644 --- a/clang/lib/Sema/SemaConcept.cpp +++ b/clang/lib/Sema/SemaConcept.cpp @@ -193,7 +193,7 @@ DiagRecursiveConstraintEval(Sema &S, llvm::FoldingSetNodeID &ID, // Sema::InstantiatingTemplate::isAlreadyBeingInstantiated function. if (S.SatisfactionStackContains(Templ, ID)) { S.Diag(E->getExprLoc(), diag::err_constraint_depends_on_self) - << const_cast(E) << E->getSourceRange(); + << E << E->getSourceRange(); return true; } diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index e10511cc7fc4e..0069b08f1991a 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -6794,7 +6794,9 @@ bool Sema::tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo, if (SizeIsNegative) Diag(Loc, diag::err_typecheck_negative_array_size); else if (Oversized.getBoolValue()) - Diag(Loc, diag::err_array_too_large) << toString(Oversized, 10); + Diag(Loc, diag::err_array_too_large) << toString( + Oversized, 10, Oversized.isSigned(), /*formatAsCLiteral=*/false, + /*UpperCase=*/false, /*InsertSeparators=*/true); else if (FailedFoldDiagID) Diag(Loc, FailedFoldDiagID); return false; @@ -18907,8 +18909,7 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc, // 'bool'. if (BitfieldIsOverwide && !FieldTy->isBooleanType() && FieldName) { Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width) - << FieldName << toString(Value, 10) - << (unsigned)TypeWidth; + << FieldName << Value << (unsigned)TypeWidth; } } diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index b876911384f6f..328ccf6694073 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -3629,10 +3629,11 @@ static FormatAttrKind getFormatAttrKind(StringRef Format) { // Check for formats that get handled specially. .Case("NSString", NSStringFormat) .Case("CFString", CFStringFormat) - .Case("strftime", StrftimeFormat) + .Cases("gnu_strftime", "strftime", StrftimeFormat) // Otherwise, check for supported formats. - .Cases("scanf", "printf", "printf0", "strfmon", SupportedFormat) + .Cases("gnu_scanf", "scanf", "gnu_printf", "printf", "printf0", + "gnu_strfmon", "strfmon", SupportedFormat) .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat) .Cases("kprintf", "syslog", SupportedFormat) // OpenBSD. .Case("freebsd_kprintf", SupportedFormat) // FreeBSD. @@ -6360,19 +6361,8 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) { Sanitizers.size())); } -static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D, - const ParsedAttr &AL) { - StringRef AttrName = AL.getAttrName()->getName(); - normalizeName(AttrName); - StringRef SanitizerName = llvm::StringSwitch(AttrName) - .Case("no_address_safety_analysis", "address") - .Case("no_sanitize_address", "address") - .Case("no_sanitize_thread", "thread") - .Case("no_sanitize_memory", "memory"); - if (isGlobalVar(D) && SanitizerName != "address") - S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type) - << AL << AL.isRegularKeywordAttribute() << ExpectedFunction; - +static AttributeCommonInfo +getNoSanitizeAttrInfo(const ParsedAttr &NoSanitizeSpecificAttr) { // FIXME: Rather than create a NoSanitizeSpecificAttr, this creates a // NoSanitizeAttr object; but we need to calculate the correct spelling list // index rather than incorrectly assume the index for NoSanitizeSpecificAttr @@ -6382,11 +6372,32 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D, // getSpelling() or prettyPrint() on the resulting semantic attribute object // without failing assertions. unsigned TranslatedSpellingIndex = 0; - if (AL.isStandardAttributeSyntax()) + if (NoSanitizeSpecificAttr.isStandardAttributeSyntax()) TranslatedSpellingIndex = 1; - AttributeCommonInfo Info = AL; + AttributeCommonInfo Info = NoSanitizeSpecificAttr; Info.setAttributeSpellingListIndex(TranslatedSpellingIndex); + return Info; +} + +static void handleNoSanitizeAddressAttr(Sema &S, Decl *D, + const ParsedAttr &AL) { + StringRef SanitizerName = "address"; + AttributeCommonInfo Info = getNoSanitizeAttrInfo(AL); + D->addAttr(::new (S.Context) + NoSanitizeAttr(S.Context, Info, &SanitizerName, 1)); +} + +static void handleNoSanitizeThreadAttr(Sema &S, Decl *D, const ParsedAttr &AL) { + StringRef SanitizerName = "thread"; + AttributeCommonInfo Info = getNoSanitizeAttrInfo(AL); + D->addAttr(::new (S.Context) + NoSanitizeAttr(S.Context, Info, &SanitizerName, 1)); +} + +static void handleNoSanitizeMemoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) { + StringRef SanitizerName = "memory"; + AttributeCommonInfo Info = getNoSanitizeAttrInfo(AL); D->addAttr(::new (S.Context) NoSanitizeAttr(S.Context, Info, &SanitizerName, 1)); } @@ -7512,8 +7523,14 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL, case ParsedAttr::AT_NoSanitize: handleNoSanitizeAttr(S, D, AL); break; - case ParsedAttr::AT_NoSanitizeSpecific: - handleNoSanitizeSpecificAttr(S, D, AL); + case ParsedAttr::AT_NoSanitizeAddress: + handleNoSanitizeAddressAttr(S, D, AL); + break; + case ParsedAttr::AT_NoSanitizeThread: + handleNoSanitizeThreadAttr(S, D, AL); + break; + case ParsedAttr::AT_NoSanitizeMemory: + handleNoSanitizeMemoryAttr(S, D, AL); break; case ParsedAttr::AT_GuardedBy: handleGuardedByAttr(S, D, AL); diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index ea08f41437e70..16d42d27d3b4e 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -768,59 +768,45 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D, // C++23 [dcl.pre]/6: // Each decl-specifier in the decl-specifier-seq shall be static, // thread_local, auto (9.2.9.6 [dcl.spec.auto]), or a cv-qualifier. + // C++23 [dcl.pre]/7: + // Each decl-specifier in the decl-specifier-seq shall be constexpr, + // constinit, static, thread_local, auto, or a cv-qualifier auto &DS = D.getDeclSpec(); - { - // Note: While constrained-auto needs to be checked, we do so separately so - // we can emit a better diagnostic. - SmallVector BadSpecifiers; - SmallVector BadSpecifierLocs; - SmallVector CPlusPlus20Specifiers; - SmallVector CPlusPlus20SpecifierLocs; - if (auto SCS = DS.getStorageClassSpec()) { - if (SCS == DeclSpec::SCS_static) { - CPlusPlus20Specifiers.push_back(DeclSpec::getSpecifierName(SCS)); - CPlusPlus20SpecifierLocs.push_back(DS.getStorageClassSpecLoc()); - } else { - BadSpecifiers.push_back(DeclSpec::getSpecifierName(SCS)); - BadSpecifierLocs.push_back(DS.getStorageClassSpecLoc()); - } - } - if (auto TSCS = DS.getThreadStorageClassSpec()) { - CPlusPlus20Specifiers.push_back(DeclSpec::getSpecifierName(TSCS)); - CPlusPlus20SpecifierLocs.push_back(DS.getThreadStorageClassSpecLoc()); - } - if (DS.hasConstexprSpecifier()) { - BadSpecifiers.push_back( - DeclSpec::getSpecifierName(DS.getConstexprSpecifier())); - BadSpecifierLocs.push_back(DS.getConstexprSpecLoc()); - } - if (DS.isInlineSpecified()) { - BadSpecifiers.push_back("inline"); - BadSpecifierLocs.push_back(DS.getInlineSpecLoc()); - } - - if (!BadSpecifiers.empty()) { - auto &&Err = Diag(BadSpecifierLocs.front(), diag::err_decomp_decl_spec); - Err << (int)BadSpecifiers.size() - << llvm::join(BadSpecifiers.begin(), BadSpecifiers.end(), " "); - // Don't add FixItHints to remove the specifiers; we do still respect - // them when building the underlying variable. - for (auto Loc : BadSpecifierLocs) - Err << SourceRange(Loc, Loc); - } else if (!CPlusPlus20Specifiers.empty()) { - auto &&Warn = DiagCompat(CPlusPlus20SpecifierLocs.front(), - diag_compat::decomp_decl_spec); - Warn << (int)CPlusPlus20Specifiers.size() - << llvm::join(CPlusPlus20Specifiers.begin(), - CPlusPlus20Specifiers.end(), " "); - for (auto Loc : CPlusPlus20SpecifierLocs) - Warn << SourceRange(Loc, Loc); - } - // We can't recover from it being declared as a typedef. - if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) - return nullptr; + auto DiagBadSpecifier = [&](StringRef Name, SourceLocation Loc) { + Diag(Loc, diag::err_decomp_decl_spec) << Name; + }; + + auto DiagCpp20Specifier = [&](StringRef Name, SourceLocation Loc) { + DiagCompat(Loc, diag_compat::decomp_decl_spec) << Name; + }; + + if (auto SCS = DS.getStorageClassSpec()) { + if (SCS == DeclSpec::SCS_static) + DiagCpp20Specifier(DeclSpec::getSpecifierName(SCS), + DS.getStorageClassSpecLoc()); + else + DiagBadSpecifier(DeclSpec::getSpecifierName(SCS), + DS.getStorageClassSpecLoc()); + } + if (auto TSCS = DS.getThreadStorageClassSpec()) + DiagCpp20Specifier(DeclSpec::getSpecifierName(TSCS), + DS.getThreadStorageClassSpecLoc()); + + if (DS.isInlineSpecified()) + DiagBadSpecifier("inline", DS.getInlineSpecLoc()); + + if (ConstexprSpecKind ConstexprSpec = DS.getConstexprSpecifier(); + ConstexprSpec != ConstexprSpecKind::Unspecified) { + if (ConstexprSpec == ConstexprSpecKind::Consteval || + !getLangOpts().CPlusPlus26) + DiagBadSpecifier(DeclSpec::getSpecifierName(ConstexprSpec), + DS.getConstexprSpecLoc()); } + // We can't recover from it being declared as a typedef. + if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) + return nullptr; + // C++2a [dcl.struct.bind]p1: // A cv that includes volatile is deprecated if ((DS.getTypeQualifiers() & DeclSpec::TQ_volatile) && @@ -13674,7 +13660,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, if (Cxx20Enumerator) { Diag(NameLoc, diag::warn_cxx17_compat_using_decl_non_member_enumerator) - << SS.getRange(); + << SS.getScopeRep() << SS.getRange(); return false; } diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp index 552c92996dc2e..a0483c3027199 100644 --- a/clang/lib/Sema/SemaExceptionSpec.cpp +++ b/clang/lib/Sema/SemaExceptionSpec.cpp @@ -1493,6 +1493,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) { case Stmt::OMPUnrollDirectiveClass: case Stmt::OMPReverseDirectiveClass: case Stmt::OMPInterchangeDirectiveClass: + case Stmt::OMPFuseDirectiveClass: case Stmt::OMPSingleDirectiveClass: case Stmt::OMPTargetDataDirectiveClass: case Stmt::OMPTargetDirectiveClass: diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 3b267c1b1693d..4d3c7d611f370 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -16791,12 +16791,11 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *OrigExpr = E; bool IsMS = false; - // CUDA device code does not support varargs. + // CUDA device global function does not support varargs. if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { if (const FunctionDecl *F = dyn_cast(CurContext)) { CUDAFunctionTarget T = CUDA().IdentifyTarget(F); - if (T == CUDAFunctionTarget::Global || T == CUDAFunctionTarget::Device || - T == CUDAFunctionTarget::HostDevice) + if (T == CUDAFunctionTarget::Global) return ExprError(Diag(E->getBeginLoc(), diag::err_va_arg_in_device)); } } @@ -20109,7 +20108,9 @@ static void DoMarkVarDeclReferenced( isPotentiallyConstantEvaluatedContext(SemaRef) && UsableInConstantExpr; bool NeedDefinition = - OdrUse == OdrUseContext::Used || NeededForConstantEvaluation; + OdrUse == OdrUseContext::Used || NeededForConstantEvaluation || + (TSK != clang::TSK_Undeclared && !UsableInConstantExpr && + Var->getType()->isUndeducedType()); assert(!isa(Var) && "Can't instantiate a partial template specialization."); diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index 293097fd708fb..576eb326e6529 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -1251,6 +1251,10 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S, else Record = cast(ContextDecl); + // 'this' never refers to the lambda class itself. + if (Record->isLambda()) + return; + QualType T = S.Context.getCanonicalTagType(Record); T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals); @@ -2395,7 +2399,10 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) return ExprError( Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large) - << toString(*Value, 10) << (*ArraySize)->getSourceRange()); + << toString(*Value, 10, Value->isSigned(), + /*formatAsCLiteral=*/false, /*UpperCase=*/false, + /*InsertSeparators=*/true) + << (*ArraySize)->getSourceRange()); } KnownArraySize = Value->getZExtValue(); diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index e01673f64b781..fa30c66b62684 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -19,6 +19,7 @@ #include "clang/AST/DeclarationName.h" #include "clang/AST/DynamicRecursiveASTVisitor.h" #include "clang/AST/Expr.h" +#include "clang/AST/HLSLResource.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/Builtins.h" @@ -52,6 +53,7 @@ #include using namespace clang; +using namespace clang::hlsl; using RegisterType = HLSLResourceBindingAttr::RegisterType; static CXXRecordDecl *createHostLayoutStruct(Sema &S, @@ -1808,6 +1810,13 @@ bool clang::CreateHLSLAttributedResourceType( } ResAttrs.RawBuffer = true; break; + case attr::HLSLIsCounter: + if (ResAttrs.IsCounter) { + S.Diag(A->getLocation(), diag::warn_duplicate_attribute_exact) << A; + return false; + } + ResAttrs.IsCounter = true; + break; case attr::HLSLContainedType: { const HLSLContainedTypeAttr *CTAttr = cast(A); QualType Ty = CTAttr->getType(); @@ -1900,6 +1909,10 @@ bool SemaHLSL::handleResourceTypeAttr(QualType T, const ParsedAttr &AL) { A = HLSLRawBufferAttr::Create(getASTContext(), ACI); break; + case ParsedAttr::AT_HLSLIsCounter: + A = HLSLIsCounterAttr::Create(getASTContext(), ACI); + break; + case ParsedAttr::AT_HLSLContainedType: { if (AL.getNumArgs() != 1 && !AL.hasParsedType()) { Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1; @@ -3090,7 +3103,8 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { return true; break; } - case Builtin::BI__builtin_hlsl_elementwise_isinf: { + case Builtin::BI__builtin_hlsl_elementwise_isinf: + case Builtin::BI__builtin_hlsl_elementwise_isnan: { if (SemaRef.checkArgCount(TheCall, 1)) return true; if (CheckAllArgTypesAreCorrect(&SemaRef, TheCall, @@ -3291,7 +3305,6 @@ static void BuildFlattenedTypeList(QualType BaseTy, while (!WorkList.empty()) { QualType T = WorkList.pop_back_val(); T = T.getCanonicalType().getUnqualifiedType(); - assert(!isa(T) && "Matrix types not yet supported in HLSL"); if (const auto *AT = dyn_cast(T)) { llvm::SmallVector ElementFields; // Generally I've avoided recursion in this algorithm, but arrays of @@ -3323,7 +3336,8 @@ static void BuildFlattenedTypeList(QualType BaseTy, llvm::SmallVector FieldTypes; for (const auto *FD : RD->fields()) - FieldTypes.push_back(FD->getType()); + if (!FD->isUnnamedBitField()) + FieldTypes.push_back(FD->getType()); // Reverse the newly added sub-range. std::reverse(FieldTypes.begin(), FieldTypes.end()); llvm::append_range(WorkList, FieldTypes); @@ -3798,19 +3812,8 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) { uint64_t UIntTySize = AST.getTypeSize(AST.UnsignedIntTy); uint64_t IntTySize = AST.getTypeSize(AST.IntTy); - // Gather resource binding information from attributes. - HLSLResourceBindingAttr *RBA = VD->getAttr(); - HLSLVkBindingAttr *VkBinding = VD->getAttr(); - std::optional RegisterSlot; - uint32_t SpaceNo = 0; - if (VkBinding) { - RegisterSlot = VkBinding->getBinding(); - SpaceNo = VkBinding->getSet(); - } else if (RBA) { - if (RBA->hasRegisterSlot()) - RegisterSlot = RBA->getSlotNumber(); - SpaceNo = RBA->getSpaceNumber(); - } + // Gather resource binding attributes. + ResourceBindingAttrs Binding(VD); // Find correct initialization method and create its arguments. QualType ResourceTy = VD->getType(); @@ -3818,21 +3821,21 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) { CXXMethodDecl *CreateMethod = nullptr; llvm::SmallVector Args; - if (RegisterSlot.has_value()) { + if (Binding.isExplicit()) { // The resource has explicit binding. CreateMethod = lookupMethod(SemaRef, ResourceDecl, "__createFromBinding", VD->getLocation()); - IntegerLiteral *RegSlot = IntegerLiteral::Create( - AST, llvm::APInt(UIntTySize, RegisterSlot.value()), AST.UnsignedIntTy, - SourceLocation()); + IntegerLiteral *RegSlot = + IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, Binding.getSlot()), + AST.UnsignedIntTy, SourceLocation()); Args.push_back(RegSlot); } else { // The resource has implicit binding. CreateMethod = lookupMethod(SemaRef, ResourceDecl, "__createFromImplicitBinding", VD->getLocation()); - uint32_t OrderID = (RBA && RBA->hasImplicitBindingOrderID()) - ? RBA->getImplicitBindingOrderID() + uint32_t OrderID = (Binding.hasImplicitOrderID()) + ? Binding.getImplicitOrderID() : getNextImplicitBindingOrderID(); IntegerLiteral *OrderId = IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, OrderID), @@ -3847,7 +3850,7 @@ bool SemaHLSL::initGlobalResourceDecl(VarDecl *VD) { return false; IntegerLiteral *Space = - IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, SpaceNo), + IntegerLiteral::Create(AST, llvm::APInt(UIntTySize, Binding.getSpace()), AST.UnsignedIntTy, SourceLocation()); Args.push_back(Space); @@ -4158,6 +4161,8 @@ class InitListTransformer { while (!RecordDecls.empty()) { CXXRecordDecl *RD = RecordDecls.pop_back_val(); for (auto *FD : RD->fields()) { + if (FD->isUnnamedBitField()) + continue; DeclAccessPair Found = DeclAccessPair::make(FD, FD->getAccess()); DeclarationNameInfo NameInfo(FD->getDeclName(), E->getBeginLoc()); ExprResult Res = S.BuildFieldReferenceExpr( @@ -4207,7 +4212,8 @@ class InitListTransformer { while (!RecordDecls.empty()) { CXXRecordDecl *RD = RecordDecls.pop_back_val(); for (auto *FD : RD->fields()) - Inits.push_back(generateInitListsImpl(FD->getType())); + if (!FD->isUnnamedBitField()) + Inits.push_back(generateInitListsImpl(FD->getType())); } } auto *NewInit = new (Ctx) InitListExpr(Ctx, Inits.front()->getBeginLoc(), @@ -4280,6 +4286,9 @@ bool SemaHLSL::transformInitList(const InitializedEntity &Entity, } size_t ExpectedSize = ILT.DestTypes.size(); size_t ActualSize = ILT.ArgExprs.size(); + if (ExpectedSize == 0 && ActualSize == 0) + return true; + // For incomplete arrays it is completely arbitrary to choose whether we think // the user intended fewer or more elements. This implementation assumes that // the user intended more, and errors that there are too few initializers to diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp index fbd8022cd68ba..9aaf7f463403d 100644 --- a/clang/lib/Sema/SemaOpenACC.cpp +++ b/clang/lib/Sema/SemaOpenACC.cpp @@ -2758,16 +2758,18 @@ ExprResult FinishValueInit(Sema &S, InitializedEntity &Entity, } // namespace OpenACCPrivateRecipe SemaOpenACC::CreatePrivateInitRecipe(const Expr *VarExpr) { - VarExpr = StripOffBounds(VarExpr); - + // We don't strip bounds here, so that we are doing our recipe init at the + // 'lowest' possible level. Codegen is going to have to do its own 'looping'. if (!VarExpr || VarExpr->getType()->isDependentType()) return OpenACCPrivateRecipe::Empty(); QualType VarTy = VarExpr->getType().getNonReferenceType().getUnqualifiedType(); - // TODO: OpenACC: for arrays/bounds versions, we're going to have to do a - // different initializer, but for now we can go ahead with this. + // Array sections are special, and we have to treat them that way. + if (const auto *ASE = + dyn_cast(VarExpr->IgnoreParenImpCasts())) + VarTy = ArraySectionExpr::getBaseOriginalType(ASE); VarDecl *AllocaDecl = CreateAllocaDecl( getASTContext(), SemaRef.getCurContext(), VarExpr->getBeginLoc(), @@ -2780,11 +2782,19 @@ OpenACCPrivateRecipe SemaOpenACC::CreatePrivateInitRecipe(const Expr *VarExpr) { InitializationSequence InitSeq(SemaRef.SemaRef, Entity, Kind, {}); ExprResult Init = InitSeq.Perform(SemaRef.SemaRef, Entity, Kind, {}); - return OpenACCPrivateRecipe(AllocaDecl, Init.get()); + // For 'no bounds' version, we can use this as a shortcut, so set the init + // anyway. + if (Init.isUsable()) { + AllocaDecl->setInit(Init.get()); + AllocaDecl->setInitStyle(VarDecl::CallInit); + } + + return OpenACCPrivateRecipe(AllocaDecl); } OpenACCFirstPrivateRecipe SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) { + // TODO: OpenACC: This shouldn't be necessary, see PrivateInitRecipe VarExpr = StripOffBounds(VarExpr); if (!VarExpr || VarExpr->getType()->isDependentType()) @@ -2818,7 +2828,14 @@ SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) { if (!ArrTy) { ExprResult Init = FinishValueInit( SemaRef.SemaRef, Entity, VarExpr->getBeginLoc(), VarTy, TemporaryDRE); - return OpenACCFirstPrivateRecipe(AllocaDecl, Init.get(), Temporary); + + // For 'no bounds' version, we can use this as a shortcut, so set the init + // anyway. + if (Init.isUsable()) { + AllocaDecl->setInit(Init.get()); + AllocaDecl->setInitStyle(VarDecl::CallInit); + } + return OpenACCFirstPrivateRecipe(AllocaDecl, Temporary); } // Arrays need to have each individual element initialized as there @@ -2865,10 +2882,19 @@ SemaOpenACC::CreateFirstPrivateInitRecipe(const Expr *VarExpr) { ExprResult Init = FinishValueInit(SemaRef.SemaRef, Entity, VarExpr->getBeginLoc(), VarTy, InitExpr); - return OpenACCFirstPrivateRecipe(AllocaDecl, Init.get(), Temporary); + // For 'no bounds' version, we can use this as a shortcut, so set the init + // anyway. + if (Init.isUsable()) { + AllocaDecl->setInit(Init.get()); + AllocaDecl->setInitStyle(VarDecl::CallInit); + } + + return OpenACCFirstPrivateRecipe(AllocaDecl, Temporary); } + OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe( OpenACCReductionOperator ReductionOperator, const Expr *VarExpr) { + // TODO: OpenACC: This shouldn't be necessary, see PrivateInitRecipe VarExpr = StripOffBounds(VarExpr); if (!VarExpr || VarExpr->getType()->isDependentType()) @@ -2921,5 +2947,12 @@ OpenACCReductionRecipe SemaOpenACC::CreateReductionInitRecipe( ExprResult Init = FinishValueInit(SemaRef.SemaRef, Entity, VarExpr->getBeginLoc(), VarTy, InitExpr); - return OpenACCReductionRecipe(AllocaDecl, Init.get()); + + // For 'no bounds' version, we can use this as a shortcut, so set the init + // anyway. + if (Init.isUsable()) { + AllocaDecl->setInit(Init.get()); + AllocaDecl->setInitStyle(VarDecl::CallInit); + } + return OpenACCReductionRecipe(AllocaDecl); } diff --git a/clang/lib/Sema/SemaOpenACCAtomic.cpp b/clang/lib/Sema/SemaOpenACCAtomic.cpp index a9319dce6c586..ad21129d30c15 100644 --- a/clang/lib/Sema/SemaOpenACCAtomic.cpp +++ b/clang/lib/Sema/SemaOpenACCAtomic.cpp @@ -454,9 +454,7 @@ class AtomicOperandChecker { // If nothing matches, error out. DiagnoseInvalidAtomic(BinInf->FoundExpr->getExprLoc(), SemaRef.PDiag(diag::note_acc_atomic_mismatch_operand) - << const_cast(AssignInf.LHS) - << const_cast(BinInf->LHS) - << const_cast(BinInf->RHS)); + << AssignInf.LHS << BinInf->LHS << BinInf->RHS); return IDACInfo::Fail(); } @@ -592,8 +590,7 @@ class AtomicOperandChecker { PartialDiagnostic PD = SemaRef.PDiag(diag::note_acc_atomic_mismatch_compound_operand) - << FirstKind << const_cast(FirstX) << SecondKind - << const_cast(SecondX); + << FirstKind << FirstX << SecondKind << SecondX; return DiagnoseInvalidAtomic(SecondX->getExprLoc(), PD); } diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index 48e06d1dc7579..0fa21e89b1236 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -2490,7 +2490,8 @@ VarDecl *SemaOpenMP::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo, DSAStackTy::DSAVarData DVarTop = DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode()); if (DVarTop.CKind != OMPC_unknown && isOpenMPPrivate(DVarTop.CKind) && - (!VD || VD->hasLocalStorage() || !DVarTop.AppliedToPointee)) + (!VD || VD->hasLocalStorage() || + !(DVarTop.AppliedToPointee && DVarTop.CKind != OMPC_reduction))) return VD ? VD : cast(DVarTop.PrivateCopy->getDecl()); // Threadprivate variables must not be captured. if (isOpenMPThreadPrivate(DVarTop.CKind)) @@ -4569,6 +4570,7 @@ void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, case OMPD_unroll: case OMPD_reverse: case OMPD_interchange: + case OMPD_fuse: case OMPD_assume: break; default: @@ -6410,6 +6412,10 @@ StmtResult SemaOpenMP::ActOnOpenMPExecutableDirective( Res = ActOnOpenMPInterchangeDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; + case OMPD_fuse: + Res = + ActOnOpenMPFuseDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); + break; case OMPD_for: Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); @@ -9488,7 +9494,9 @@ static bool checkOpenMPIterationSpace( // sharing attributes. VarsWithImplicitDSA.erase(LCDecl); - assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars"); + assert((isOpenMPLoopDirective(DKind) || + isOpenMPCanonicalLoopSequenceTransformationDirective(DKind)) && + "DSA for non-loop vars"); // Check test-expr. HasErrors |= ISC.checkAndSetCond(For ? For->getCond() : CXXFor->getCond()); @@ -9916,7 +9924,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, unsigned NumLoops = std::max(OrderedLoopCount, NestedLoopCount); SmallVector IterSpaces(NumLoops); if (!OMPLoopBasedDirective::doForAllLoops( - AStmt->IgnoreContainers(!isOpenMPLoopTransformationDirective(DKind)), + AStmt->IgnoreContainers( + !isOpenMPCanonicalLoopNestTransformationDirective(DKind)), SupportsNonPerfectlyNested, NumLoops, [DKind, &SemaRef, &DSA, NumLoops, NestedLoopCount, CollapseLoopCountExpr, OrderedLoopCountExpr, &VarsWithImplicitDSA, @@ -9938,8 +9947,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, } return false; }, - [&SemaRef, - &Captures](OMPCanonicalLoopNestTransformationDirective *Transform) { + [&SemaRef, &Captures](OMPLoopTransformationDirective *Transform) { Stmt *DependentPreInits = Transform->getPreInits(); if (!DependentPreInits) return; @@ -9954,7 +9962,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, auto *D = cast(C); DeclRefExpr *Ref = buildDeclRefExpr( SemaRef, D, D->getType().getNonReferenceType(), - Transform->getBeginLoc()); + cast(Transform->getDirective()) + ->getBeginLoc()); Captures[Ref] = Ref; } } @@ -14404,10 +14413,34 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeSimdDirective( getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } +/// Updates OriginalInits by checking Transform against loop transformation +/// directives and appending their pre-inits if a match is found. +static void updatePreInits(OMPLoopTransformationDirective *Transform, + SmallVectorImpl &PreInits) { + Stmt *Dir = Transform->getDirective(); + switch (Dir->getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define COMMON_OMP_LOOP_TRANSFORMATION(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + appendFlattenedStmtList(PreInits, \ + static_cast(Dir)->getPreInits()); \ + break; +#define OMPCANONICALLOOPNESTTRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + COMMON_OMP_LOOP_TRANSFORMATION(CLASS, PARENT) +#define OMPCANONICALLOOPSEQUENCETRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + COMMON_OMP_LOOP_TRANSFORMATION(CLASS, PARENT) +#include "clang/AST/StmtNodes.inc" +#undef COMMON_OMP_LOOP_TRANSFORMATION + default: + llvm_unreachable("Not a loop transformation"); + } +} + bool SemaOpenMP::checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl &LoopHelpers, - Stmt *&Body, SmallVectorImpl> &OriginalInits) { + Stmt *&Body, SmallVectorImpl> &OriginalInits) { OriginalInits.emplace_back(); bool Result = OMPLoopBasedDirective::doForAllLoops( AStmt->IgnoreContainers(), /*TryImperfectlyNestedLoops=*/false, NumLoops, @@ -14433,29 +14466,268 @@ bool SemaOpenMP::checkTransformableLoopNest( OriginalInits.emplace_back(); return false; }, - [&OriginalInits](OMPLoopBasedDirective *Transform) { - Stmt *DependentPreInits; - if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else - llvm_unreachable("Unhandled loop transformation"); - - appendFlattenedStmtList(OriginalInits.back(), DependentPreInits); + [&OriginalInits](OMPLoopTransformationDirective *Transform) { + updatePreInits(Transform, OriginalInits.back()); }); assert(OriginalInits.back().empty() && "No preinit after innermost loop"); OriginalInits.pop_back(); return Result; } -/// Add preinit statements that need to be propageted from the selected loop. +/// Counts the total number of OpenMP canonical nested loops, including the +/// outermost loop (the original loop). PRECONDITION of this visitor is that it +/// must be invoked from the original loop to be analyzed. The traversal stops +/// for Decl's and Expr's given that they may contain inner loops that must not +/// be counted. +/// +/// Example AST structure for the code: +/// +/// int main() { +/// #pragma omp fuse +/// { +/// for (int i = 0; i < 100; i++) { <-- Outer loop +/// []() { +/// for(int j = 0; j < 100; j++) {} <-- NOT A LOOP (1) +/// }; +/// for(int j = 0; j < 5; ++j) {} <-- Inner loop +/// } +/// for (int r = 0; i < 100; i++) { <-- Outer loop +/// struct LocalClass { +/// void bar() { +/// for(int j = 0; j < 100; j++) {} <-- NOT A LOOP (2) +/// } +/// }; +/// for(int k = 0; k < 10; ++k) {} <-- Inner loop +/// {x = 5; for(k = 0; k < 10; ++k) x += k; x}; <-- NOT A LOOP (3) +/// } +/// } +/// } +/// (1) because in a different function (here: a lambda) +/// (2) because in a different function (here: class method) +/// (3) because considered to be intervening-code of non-perfectly nested loop +/// Result: Loop 'i' contains 2 loops, Loop 'r' also contains 2 loops. +class NestedLoopCounterVisitor final : public DynamicRecursiveASTVisitor { +private: + unsigned NestedLoopCount = 0; + +public: + explicit NestedLoopCounterVisitor() = default; + + unsigned getNestedLoopCount() const { return NestedLoopCount; } + + bool VisitForStmt(ForStmt *FS) override { + ++NestedLoopCount; + return true; + } + + bool VisitCXXForRangeStmt(CXXForRangeStmt *FRS) override { + ++NestedLoopCount; + return true; + } + + bool TraverseStmt(Stmt *S) override { + if (!S) + return true; + + // Skip traversal of all expressions, including special cases like + // LambdaExpr, StmtExpr, BlockExpr, and RequiresExpr. These expressions + // may contain inner statements (and even loops), but they are not part + // of the syntactic body of the surrounding loop structure. + // Therefore must not be counted. + if (isa(S)) + return true; + + // Only recurse into CompoundStmt (block {}) and loop bodies. + if (isa(S)) { + return DynamicRecursiveASTVisitor::TraverseStmt(S); + } + + // Stop traversal of the rest of statements, that break perfect + // loop nesting, such as control flow (IfStmt, SwitchStmt...). + return true; + } + + bool TraverseDecl(Decl *D) override { + // Stop in the case of finding a declaration, it is not important + // in order to find nested loops (Possible CXXRecordDecl, RecordDecl, + // FunctionDecl...). + return true; + } +}; + +bool SemaOpenMP::analyzeLoopSequence(Stmt *LoopSeqStmt, + LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context, + OpenMPDirectiveKind Kind) { + VarsWithInheritedDSAType TmpDSA; + // Helper Lambda to handle storing initialization and body statements for + // both ForStmt and CXXForRangeStmt. + auto StoreLoopStatements = [](LoopAnalysis &Analysis, Stmt *LoopStmt) { + if (auto *For = dyn_cast(LoopStmt)) { + Analysis.OriginalInits.push_back(For->getInit()); + Analysis.TheForStmt = For; + } else { + auto *CXXFor = cast(LoopStmt); + Analysis.OriginalInits.push_back(CXXFor->getBeginStmt()); + Analysis.TheForStmt = CXXFor; + } + }; + + // Helper lambda functions to encapsulate the processing of different + // derivations of the canonical loop sequence grammar + // Modularized code for handling loop generation and transformations. + auto AnalyzeLoopGeneration = [&](Stmt *Child) { + auto *LoopTransform = cast(Child); + Stmt *TransformedStmt = LoopTransform->getTransformedStmt(); + unsigned NumGeneratedTopLevelLoops = + LoopTransform->getNumGeneratedTopLevelLoops(); + // Handle the case where transformed statement is not available due to + // dependent contexts + if (!TransformedStmt) { + if (NumGeneratedTopLevelLoops > 0) { + SeqAnalysis.LoopSeqSize += NumGeneratedTopLevelLoops; + return true; + } + // Unroll full (0 loops produced) + Diag(Child->getBeginLoc(), diag::err_omp_not_for) + << 0 << getOpenMPDirectiveName(Kind); + return false; + } + // Handle loop transformations with multiple loop nests + // Unroll full + if (!NumGeneratedTopLevelLoops) { + Diag(Child->getBeginLoc(), diag::err_omp_not_for) + << 0 << getOpenMPDirectiveName(Kind); + return false; + } + // Loop transformatons such as split or loopranged fuse + if (NumGeneratedTopLevelLoops > 1) { + // Get the preinits related to this loop sequence generating + // loop transformation (i.e loopranged fuse, split...) + // These preinits differ slightly from regular inits/pre-inits related + // to single loop generating loop transformations (interchange, unroll) + // given that they are not bounded to a particular loop nest + // so they need to be treated independently + updatePreInits(LoopTransform, SeqAnalysis.LoopSequencePreInits); + return analyzeLoopSequence(TransformedStmt, SeqAnalysis, Context, Kind); + } + // Vast majority: (Tile, Unroll, Stripe, Reverse, Interchange, Fuse all) + // Process the transformed loop statement + LoopAnalysis &NewTransformedSingleLoop = + SeqAnalysis.Loops.emplace_back(Child); + unsigned IsCanonical = checkOpenMPLoop( + Kind, nullptr, nullptr, TransformedStmt, SemaRef, *DSAStack, TmpDSA, + NewTransformedSingleLoop.HelperExprs); + + if (!IsCanonical) + return false; + + StoreLoopStatements(NewTransformedSingleLoop, TransformedStmt); + updatePreInits(LoopTransform, NewTransformedSingleLoop.TransformsPreInits); + + SeqAnalysis.LoopSeqSize++; + return true; + }; + + // Modularized code for handling regular canonical loops. + auto AnalyzeRegularLoop = [&](Stmt *Child) { + LoopAnalysis &NewRegularLoop = SeqAnalysis.Loops.emplace_back(Child); + unsigned IsCanonical = + checkOpenMPLoop(Kind, nullptr, nullptr, Child, SemaRef, *DSAStack, + TmpDSA, NewRegularLoop.HelperExprs); + + if (!IsCanonical) + return false; + + StoreLoopStatements(NewRegularLoop, Child); + NestedLoopCounterVisitor NLCV; + NLCV.TraverseStmt(Child); + return true; + }; + + // High level grammar validation. + for (Stmt *Child : LoopSeqStmt->children()) { + if (!Child) + continue; + // Skip over non-loop-sequence statements. + if (!LoopSequenceAnalysis::isLoopSequenceDerivation(Child)) { + Child = Child->IgnoreContainers(); + // Ignore empty compound statement. + if (!Child) + continue; + // In the case of a nested loop sequence ignoring containers would not + // be enough, a recurisve transversal of the loop sequence is required. + if (isa(Child)) { + if (!analyzeLoopSequence(Child, SeqAnalysis, Context, Kind)) + return false; + // Already been treated, skip this children + continue; + } + } + // Regular loop sequence handling. + if (LoopSequenceAnalysis::isLoopSequenceDerivation(Child)) { + if (LoopAnalysis::isLoopTransformation(Child)) { + if (!AnalyzeLoopGeneration(Child)) + return false; + // AnalyzeLoopGeneration updates SeqAnalysis.LoopSeqSize accordingly. + } else { + if (!AnalyzeRegularLoop(Child)) + return false; + SeqAnalysis.LoopSeqSize++; + } + } else { + // Report error for invalid statement inside canonical loop sequence. + Diag(Child->getBeginLoc(), diag::err_omp_not_for) + << 0 << getOpenMPDirectiveName(Kind); + return false; + } + } + return true; +} + +bool SemaOpenMP::checkTransformableLoopSequence( + OpenMPDirectiveKind Kind, Stmt *AStmt, LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context) { + // Following OpenMP 6.0 API Specification, a Canonical Loop Sequence follows + // the grammar: + // + // canonical-loop-sequence: + // { + // loop-sequence+ + // } + // where loop-sequence can be any of the following: + // 1. canonical-loop-sequence + // 2. loop-nest + // 3. loop-sequence-generating-construct (i.e OMPLoopTransformationDirective) + // + // To recognise and traverse this structure the helper function + // analyzeLoopSequence serves as the recurisve entry point + // and tries to match the input AST to the canonical loop sequence grammar + // structure. This function will perform both a semantic and syntactical + // analysis of the given statement according to OpenMP 6.0 definition of + // the aforementioned canonical loop sequence. + + // We expect an outer compound statement. + if (!isa(AStmt)) { + Diag(AStmt->getBeginLoc(), diag::err_omp_not_a_loop_sequence) + << getOpenMPDirectiveName(Kind); + return false; + } + + // Recursive entry point to process the main loop sequence + if (!analyzeLoopSequence(AStmt, SeqAnalysis, Context, Kind)) + return false; + + // Diagnose an empty loop sequence. + if (!SeqAnalysis.LoopSeqSize) { + Diag(AStmt->getBeginLoc(), diag::err_omp_empty_loop_sequence) + << getOpenMPDirectiveName(Kind); + return false; + } + return true; +} + +/// Add preinit statements that need to be propagated from the selected loop. static void addLoopPreInits(ASTContext &Context, OMPLoopBasedDirective::HelperExprs &LoopHelper, Stmt *LoopStmt, ArrayRef OriginalInit, @@ -14540,7 +14812,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef Clauses, // Verify and diagnose loop nest. SmallVector LoopHelpers(NumLoops); Stmt *Body = nullptr; - SmallVector, 4> OriginalInits; + SmallVector, 4> OriginalInits; if (!checkTransformableLoopNest(OMPD_tile, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -14817,7 +15089,7 @@ StmtResult SemaOpenMP::ActOnOpenMPStripeDirective(ArrayRef Clauses, // Verify and diagnose loop nest. SmallVector LoopHelpers(NumLoops); Stmt *Body = nullptr; - SmallVector, 4> OriginalInits; + SmallVector, 4> OriginalInits; if (!checkTransformableLoopNest(OMPD_stripe, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15078,7 +15350,7 @@ StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef Clauses, Stmt *Body = nullptr; SmallVector LoopHelpers( NumLoops); - SmallVector, NumLoops + 1> OriginalInits; + SmallVector, NumLoops + 1> OriginalInits; if (!checkTransformableLoopNest(OMPD_unroll, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15348,7 +15620,7 @@ StmtResult SemaOpenMP::ActOnOpenMPReverseDirective(Stmt *AStmt, Stmt *Body = nullptr; SmallVector LoopHelpers( NumLoops); - SmallVector, NumLoops + 1> OriginalInits; + SmallVector, NumLoops + 1> OriginalInits; if (!checkTransformableLoopNest(OMPD_reverse, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15540,7 +15812,7 @@ StmtResult SemaOpenMP::ActOnOpenMPInterchangeDirective( // Verify and diagnose loop nest. SmallVector LoopHelpers(NumLoops); Stmt *Body = nullptr; - SmallVector, 2> OriginalInits; + SmallVector, 2> OriginalInits; if (!checkTransformableLoopNest(OMPD_interchange, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15716,6 +15988,484 @@ StmtResult SemaOpenMP::ActOnOpenMPInterchangeDirective( buildPreInits(Context, PreInits)); } +StmtResult SemaOpenMP::ActOnOpenMPFuseDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { + + ASTContext &Context = getASTContext(); + DeclContext *CurrContext = SemaRef.CurContext; + Scope *CurScope = SemaRef.getCurScope(); + CaptureVars CopyTransformer(SemaRef); + + // Ensure the structured block is not empty + if (!AStmt) + return StmtError(); + + // Defer transformation in dependent contexts + // The NumLoopNests argument is set to a placeholder 1 (even though + // using looprange fuse could yield up to 3 top level loop nests) + // because a dependent context could prevent determining its true value + if (CurrContext->isDependentContext()) + return OMPFuseDirective::Create(Context, StartLoc, EndLoc, Clauses, + /* NumLoops */ 1, AStmt, nullptr, nullptr); + + // Validate that the potential loop sequence is transformable for fusion + // Also collect the HelperExprs, Loop Stmts, Inits, and Number of loops + LoopSequenceAnalysis SeqAnalysis; + if (!checkTransformableLoopSequence(OMPD_fuse, AStmt, SeqAnalysis, Context)) + return StmtError(); + + // SeqAnalysis.LoopSeqSize exists mostly to handle dependent contexts, + // otherwise it must be the same as SeqAnalysis.Loops.size(). + assert(SeqAnalysis.LoopSeqSize == SeqAnalysis.Loops.size() && + "Inconsistent size of the loop sequence and the number of loops " + "found in the sequence"); + + // Handle clauses, which can be any of the following: [looprange, apply] + const auto *LRC = + OMPExecutableDirective::getSingleClause(Clauses); + + // The clause arguments are invalidated if any error arises + // such as non-constant or non-positive arguments + if (LRC && (!LRC->getFirst() || !LRC->getCount())) + return StmtError(); + + // Delayed semantic check of LoopRange constraint + // Evaluates the loop range arguments and returns the first and count values + auto EvaluateLoopRangeArguments = [&Context](Expr *First, Expr *Count, + uint64_t &FirstVal, + uint64_t &CountVal) { + llvm::APSInt FirstInt = First->EvaluateKnownConstInt(Context); + llvm::APSInt CountInt = Count->EvaluateKnownConstInt(Context); + FirstVal = FirstInt.getZExtValue(); + CountVal = CountInt.getZExtValue(); + }; + + // OpenMP [6.0, Restrictions] + // first + count - 1 must not evaluate to a value greater than the + // loop sequence length of the associated canonical loop sequence. + auto ValidLoopRange = [](uint64_t FirstVal, uint64_t CountVal, + unsigned NumLoops) -> bool { + return FirstVal + CountVal - 1 <= NumLoops; + }; + uint64_t FirstVal = 1, CountVal = 0, LastVal = SeqAnalysis.LoopSeqSize; + + // Validates the loop range after evaluating the semantic information + // and ensures that the range is valid for the given loop sequence size. + // Expressions are evaluated at compile time to obtain constant values. + if (LRC) { + EvaluateLoopRangeArguments(LRC->getFirst(), LRC->getCount(), FirstVal, + CountVal); + if (CountVal == 1) + SemaRef.Diag(LRC->getCountLoc(), diag::warn_omp_redundant_fusion) + << getOpenMPDirectiveName(OMPD_fuse); + + if (!ValidLoopRange(FirstVal, CountVal, SeqAnalysis.LoopSeqSize)) { + SemaRef.Diag(LRC->getFirstLoc(), diag::err_omp_invalid_looprange) + << getOpenMPDirectiveName(OMPD_fuse) << FirstVal + << (FirstVal + CountVal - 1) << SeqAnalysis.LoopSeqSize; + return StmtError(); + } + + LastVal = FirstVal + CountVal - 1; + } + + // Complete fusion generates a single canonical loop nest + // However looprange clause may generate several loop nests + unsigned NumGeneratedTopLevelLoops = + LRC ? SeqAnalysis.LoopSeqSize - CountVal + 1 : 1; + + // Emit a warning for redundant loop fusion when the sequence contains only + // one loop. + if (SeqAnalysis.LoopSeqSize == 1) + SemaRef.Diag(AStmt->getBeginLoc(), diag::warn_omp_redundant_fusion) + << getOpenMPDirectiveName(OMPD_fuse); + + // Select the type with the largest bit width among all induction variables + QualType IVType = + SeqAnalysis.Loops[FirstVal - 1].HelperExprs.IterationVarRef->getType(); + for (unsigned I : llvm::seq(FirstVal, LastVal)) { + QualType CurrentIVType = + SeqAnalysis.Loops[I].HelperExprs.IterationVarRef->getType(); + if (Context.getTypeSize(CurrentIVType) > Context.getTypeSize(IVType)) { + IVType = CurrentIVType; + } + } + uint64_t IVBitWidth = Context.getIntWidth(IVType); + + // Create pre-init declarations for all loops lower bounds, upper bounds, + // strides and num-iterations for every top level loop in the fusion + SmallVector LBVarDecls; + SmallVector STVarDecls; + SmallVector NIVarDecls; + SmallVector UBVarDecls; + SmallVector IVVarDecls; + + // Helper lambda to create variables for bounds, strides, and other + // expressions. Generates both the variable declaration and the corresponding + // initialization statement. + auto CreateHelperVarAndStmt = + [&, &SemaRef = SemaRef](Expr *ExprToCopy, const std::string &BaseName, + unsigned I, bool NeedsNewVD = false) { + Expr *TransformedExpr = + AssertSuccess(CopyTransformer.TransformExpr(ExprToCopy)); + if (!TransformedExpr) + return std::pair(nullptr, StmtError()); + + auto Name = (Twine(".omp.") + BaseName + std::to_string(I)).str(); + + VarDecl *VD; + if (NeedsNewVD) { + VD = buildVarDecl(SemaRef, SourceLocation(), IVType, Name); + SemaRef.AddInitializerToDecl(VD, TransformedExpr, false); + } else { + // Create a unique variable name + DeclRefExpr *DRE = cast(TransformedExpr); + VD = cast(DRE->getDecl()); + VD->setDeclName(&SemaRef.PP.getIdentifierTable().get(Name)); + } + // Create the corresponding declaration statement + StmtResult DeclStmt = new (Context) class DeclStmt( + DeclGroupRef(VD), SourceLocation(), SourceLocation()); + return std::make_pair(VD, DeclStmt); + }; + + // PreInits hold a sequence of variable declarations that must be executed + // before the fused loop begins. These include bounds, strides, and other + // helper variables required for the transformation. Other loop transforms + // also contain their own preinits + SmallVector PreInits; + + // Update the general preinits using the preinits generated by loop sequence + // generating loop transformations. These preinits differ slightly from + // single-loop transformation preinits, as they can be detached from a + // specific loop inside multiple generated loop nests. This happens + // because certain helper variables, like '.omp.fuse.max', are introduced to + // handle fused iteration spaces and may not be directly tied to a single + // original loop. The preinit structure must ensure that hidden variables + // like '.omp.fuse.max' are still properly handled. + // Transformations that apply this concept: Loopranged Fuse, Split + llvm::append_range(PreInits, SeqAnalysis.LoopSequencePreInits); + + // Process each single loop to generate and collect declarations + // and statements for all helper expressions related to + // particular single loop nests + + // Also In the case of the fused loops, we keep track of their original + // inits by appending them to their preinits statement, and in the case of + // transformations, also append their preinits (which contain the original + // loop initialization statement or other statements) + + // Firstly we need to set TransformIndex to match the begining of the + // looprange section + unsigned int TransformIndex = 0; + for (unsigned I : llvm::seq(FirstVal - 1)) { + if (SeqAnalysis.Loops[I].isLoopTransformation()) + ++TransformIndex; + } + + for (unsigned int I = FirstVal - 1, J = 0; I < LastVal; ++I, ++J) { + if (SeqAnalysis.Loops[I].isRegularLoop()) { + addLoopPreInits(Context, SeqAnalysis.Loops[I].HelperExprs, + SeqAnalysis.Loops[I].TheForStmt, + SeqAnalysis.Loops[I].OriginalInits, PreInits); + } else if (SeqAnalysis.Loops[I].isLoopTransformation()) { + // For transformed loops, insert both pre-inits and original inits. + // Order matters: pre-inits may define variables used in the original + // inits such as upper bounds... + SmallVector &TransformPreInit = + SeqAnalysis.Loops[TransformIndex++].TransformsPreInits; + llvm::append_range(PreInits, TransformPreInit); + + addLoopPreInits(Context, SeqAnalysis.Loops[I].HelperExprs, + SeqAnalysis.Loops[I].TheForStmt, + SeqAnalysis.Loops[I].OriginalInits, PreInits); + } + auto [UBVD, UBDStmt] = + CreateHelperVarAndStmt(SeqAnalysis.Loops[I].HelperExprs.UB, "ub", J); + auto [LBVD, LBDStmt] = + CreateHelperVarAndStmt(SeqAnalysis.Loops[I].HelperExprs.LB, "lb", J); + auto [STVD, STDStmt] = + CreateHelperVarAndStmt(SeqAnalysis.Loops[I].HelperExprs.ST, "st", J); + auto [NIVD, NIDStmt] = CreateHelperVarAndStmt( + SeqAnalysis.Loops[I].HelperExprs.NumIterations, "ni", J, true); + auto [IVVD, IVDStmt] = CreateHelperVarAndStmt( + SeqAnalysis.Loops[I].HelperExprs.IterationVarRef, "iv", J); + + assert(LBVD && STVD && NIVD && IVVD && + "OpenMP Fuse Helper variables creation failed"); + + UBVarDecls.push_back(UBVD); + LBVarDecls.push_back(LBVD); + STVarDecls.push_back(STVD); + NIVarDecls.push_back(NIVD); + IVVarDecls.push_back(IVVD); + + PreInits.push_back(LBDStmt.get()); + PreInits.push_back(STDStmt.get()); + PreInits.push_back(NIDStmt.get()); + PreInits.push_back(IVDStmt.get()); + } + + auto MakeVarDeclRef = [&SemaRef = this->SemaRef](VarDecl *VD) { + return buildDeclRefExpr(SemaRef, VD, VD->getType(), VD->getLocation(), + false); + }; + + // Following up the creation of the final fused loop will be performed + // which has the following shape (considering the selected loops): + // + // for (fuse.index = 0; fuse.index < max(ni0, ni1..., nik); ++fuse.index) { + // if (fuse.index < ni0){ + // iv0 = lb0 + st0 * fuse.index; + // original.index0 = iv0 + // body(0); + // } + // if (fuse.index < ni1){ + // iv1 = lb1 + st1 * fuse.index; + // original.index1 = iv1 + // body(1); + // } + // + // ... + // + // if (fuse.index < nik){ + // ivk = lbk + stk * fuse.index; + // original.indexk = ivk + // body(k); Expr *InitVal = IntegerLiteral::Create(Context, + // llvm::APInt(IVWidth, 0), + // } + + // 1. Create the initialized fuse index + StringRef IndexName = ".omp.fuse.index"; + Expr *InitVal = IntegerLiteral::Create(Context, llvm::APInt(IVBitWidth, 0), + IVType, SourceLocation()); + VarDecl *IndexDecl = + buildVarDecl(SemaRef, {}, IVType, IndexName, nullptr, nullptr); + SemaRef.AddInitializerToDecl(IndexDecl, InitVal, false); + StmtResult InitStmt = new (Context) + DeclStmt(DeclGroupRef(IndexDecl), SourceLocation(), SourceLocation()); + + if (!InitStmt.isUsable()) + return StmtError(); + + auto MakeIVRef = [&SemaRef = this->SemaRef, IndexDecl, IVType, + Loc = InitVal->getExprLoc()]() { + return buildDeclRefExpr(SemaRef, IndexDecl, IVType, Loc, false); + }; + + // 2. Iteratively compute the max number of logical iterations Max(NI_1, NI_2, + // ..., NI_k) + // + // This loop accumulates the maximum value across multiple expressions, + // ensuring each step constructs a unique AST node for correctness. By using + // intermediate temporary variables and conditional operators, we maintain + // distinct nodes and avoid duplicating subtrees, For instance, max(a,b,c): + // omp.temp0 = max(a, b) + // omp.temp1 = max(omp.temp0, c) + // omp.fuse.max = max(omp.temp1, omp.temp0) + + ExprResult MaxExpr; + // I is the range of loops in the sequence that we fuse. + for (unsigned I = FirstVal - 1, J = 0; I < LastVal; ++I, ++J) { + DeclRefExpr *NIRef = MakeVarDeclRef(NIVarDecls[J]); + QualType NITy = NIRef->getType(); + + if (MaxExpr.isUnset()) { + // Initialize MaxExpr with the first NI expression + MaxExpr = NIRef; + } else { + // Create a new acummulator variable t_i = MaxExpr + std::string TempName = (Twine(".omp.temp.") + Twine(J)).str(); + VarDecl *TempDecl = + buildVarDecl(SemaRef, {}, NITy, TempName, nullptr, nullptr); + TempDecl->setInit(MaxExpr.get()); + DeclRefExpr *TempRef = + buildDeclRefExpr(SemaRef, TempDecl, NITy, SourceLocation(), false); + DeclRefExpr *TempRef2 = + buildDeclRefExpr(SemaRef, TempDecl, NITy, SourceLocation(), false); + // Add a DeclStmt to PreInits to ensure the variable is declared. + StmtResult TempStmt = new (Context) + DeclStmt(DeclGroupRef(TempDecl), SourceLocation(), SourceLocation()); + + if (!TempStmt.isUsable()) + return StmtError(); + PreInits.push_back(TempStmt.get()); + + // Build MaxExpr <-(MaxExpr > NIRef ? MaxExpr : NIRef) + ExprResult Comparison = + SemaRef.BuildBinOp(nullptr, SourceLocation(), BO_GT, TempRef, NIRef); + // Handle any errors in Comparison creation + if (!Comparison.isUsable()) + return StmtError(); + + DeclRefExpr *NIRef2 = MakeVarDeclRef(NIVarDecls[J]); + // Update MaxExpr using a conditional expression to hold the max value + MaxExpr = new (Context) ConditionalOperator( + Comparison.get(), SourceLocation(), TempRef2, SourceLocation(), + NIRef2->getExprStmt(), NITy, VK_LValue, OK_Ordinary); + + if (!MaxExpr.isUsable()) + return StmtError(); + } + } + if (!MaxExpr.isUsable()) + return StmtError(); + + // 3. Declare the max variable + const std::string MaxName = Twine(".omp.fuse.max").str(); + VarDecl *MaxDecl = + buildVarDecl(SemaRef, {}, IVType, MaxName, nullptr, nullptr); + MaxDecl->setInit(MaxExpr.get()); + DeclRefExpr *MaxRef = buildDeclRefExpr(SemaRef, MaxDecl, IVType, {}, false); + StmtResult MaxStmt = new (Context) + DeclStmt(DeclGroupRef(MaxDecl), SourceLocation(), SourceLocation()); + + if (MaxStmt.isInvalid()) + return StmtError(); + PreInits.push_back(MaxStmt.get()); + + // 4. Create condition Expr: index < n_max + ExprResult CondExpr = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LT, + MakeIVRef(), MaxRef); + if (!CondExpr.isUsable()) + return StmtError(); + + // 5. Increment Expr: ++index + ExprResult IncrExpr = + SemaRef.BuildUnaryOp(CurScope, SourceLocation(), UO_PreInc, MakeIVRef()); + if (!IncrExpr.isUsable()) + return StmtError(); + + // 6. Build the Fused Loop Body + // The final fused loop iterates over the maximum logical range. Inside the + // loop, each original loop's index is calculated dynamically, and its body + // is executed conditionally. + // + // Each sub-loop's body is guarded by a conditional statement to ensure + // it executes only within its logical iteration range: + // + // if (fuse.index < ni_k){ + // iv_k = lb_k + st_k * fuse.index; + // original.index = iv_k + // body(k); + // } + + CompoundStmt *FusedBody = nullptr; + SmallVector FusedBodyStmts; + for (unsigned I = FirstVal - 1, J = 0; I < LastVal; ++I, ++J) { + // Assingment of the original sub-loop index to compute the logical index + // IV_k = LB_k + omp.fuse.index * ST_k + ExprResult IdxExpr = + SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Mul, + MakeVarDeclRef(STVarDecls[J]), MakeIVRef()); + if (!IdxExpr.isUsable()) + return StmtError(); + IdxExpr = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Add, + MakeVarDeclRef(LBVarDecls[J]), IdxExpr.get()); + + if (!IdxExpr.isUsable()) + return StmtError(); + IdxExpr = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Assign, + MakeVarDeclRef(IVVarDecls[J]), IdxExpr.get()); + if (!IdxExpr.isUsable()) + return StmtError(); + + // Update the original i_k = IV_k + SmallVector BodyStmts; + BodyStmts.push_back(IdxExpr.get()); + llvm::append_range(BodyStmts, SeqAnalysis.Loops[I].HelperExprs.Updates); + + // If the loop is a CXXForRangeStmt then the iterator variable is needed + if (auto *SourceCXXFor = + dyn_cast(SeqAnalysis.Loops[I].TheForStmt)) + BodyStmts.push_back(SourceCXXFor->getLoopVarStmt()); + + Stmt *Body = + (isa(SeqAnalysis.Loops[I].TheForStmt)) + ? cast(SeqAnalysis.Loops[I].TheForStmt)->getBody() + : cast(SeqAnalysis.Loops[I].TheForStmt)->getBody(); + BodyStmts.push_back(Body); + + CompoundStmt *CombinedBody = + CompoundStmt::Create(Context, BodyStmts, FPOptionsOverride(), + SourceLocation(), SourceLocation()); + ExprResult Condition = + SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LT, MakeIVRef(), + MakeVarDeclRef(NIVarDecls[J])); + + if (!Condition.isUsable()) + return StmtError(); + + IfStmt *IfStatement = IfStmt::Create( + Context, SourceLocation(), IfStatementKind::Ordinary, nullptr, nullptr, + Condition.get(), SourceLocation(), SourceLocation(), CombinedBody, + SourceLocation(), nullptr); + + FusedBodyStmts.push_back(IfStatement); + } + FusedBody = CompoundStmt::Create(Context, FusedBodyStmts, FPOptionsOverride(), + SourceLocation(), SourceLocation()); + + // 7. Construct the final fused loop + ForStmt *FusedForStmt = new (Context) + ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr, IncrExpr.get(), + FusedBody, InitStmt.get()->getBeginLoc(), SourceLocation(), + IncrExpr.get()->getEndLoc()); + + // In the case of looprange, the result of fuse won't simply + // be a single loop (ForStmt), but rather a loop sequence + // (CompoundStmt) of 3 parts: the pre-fusion loops, the fused loop + // and the post-fusion loops, preserving its original order. + // + // Note: If looprange clause produces a single fused loop nest then + // this compound statement wrapper is unnecessary (Therefore this + // treatment is skipped) + + Stmt *FusionStmt = FusedForStmt; + if (LRC && CountVal != SeqAnalysis.LoopSeqSize) { + SmallVector FinalLoops; + + // Reset the transform index + TransformIndex = 0; + + // Collect all non-fused loops before and after the fused region. + // Pre-fusion and post-fusion loops are inserted in order exploiting their + // symmetry, along with their corresponding transformation pre-inits if + // needed. The fused loop is added between the two regions. + for (unsigned I : llvm::seq(SeqAnalysis.LoopSeqSize)) { + if (I >= FirstVal - 1 && I < FirstVal + CountVal - 1) { + // Update the Transformation counter to skip already treated + // loop transformations + if (!SeqAnalysis.Loops[I].isLoopTransformation()) + ++TransformIndex; + continue; + } + + // No need to handle: + // Regular loops: they are kept intact as-is. + // Loop-sequence-generating transformations: already handled earlier. + // Only TransformSingleLoop requires inserting pre-inits here + if (SeqAnalysis.Loops[I].isRegularLoop()) { + const auto &TransformPreInit = + SeqAnalysis.Loops[TransformIndex++].TransformsPreInits; + if (!TransformPreInit.empty()) + llvm::append_range(PreInits, TransformPreInit); + } + + FinalLoops.push_back(SeqAnalysis.Loops[I].TheForStmt); + } + + FinalLoops.insert(FinalLoops.begin() + (FirstVal - 1), FusedForStmt); + FusionStmt = CompoundStmt::Create(Context, FinalLoops, FPOptionsOverride(), + SourceLocation(), SourceLocation()); + } + return OMPFuseDirective::Create(Context, StartLoc, EndLoc, Clauses, + NumGeneratedTopLevelLoops, AStmt, FusionStmt, + buildPreInits(Context, PreInits)); +} + OMPClause *SemaOpenMP::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, @@ -16887,6 +17637,31 @@ OMPClause *SemaOpenMP::ActOnOpenMPPartialClause(Expr *FactorExpr, FactorExpr); } +OMPClause *SemaOpenMP::ActOnOpenMPLoopRangeClause( + Expr *First, Expr *Count, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation FirstLoc, SourceLocation CountLoc, SourceLocation EndLoc) { + + // OpenMP [6.0, Restrictions] + // First and Count must be integer expressions with positive value + ExprResult FirstVal = + VerifyPositiveIntegerConstantInClause(First, OMPC_looprange); + if (FirstVal.isInvalid()) + First = nullptr; + + ExprResult CountVal = + VerifyPositiveIntegerConstantInClause(Count, OMPC_looprange); + if (CountVal.isInvalid()) + Count = nullptr; + + // OpenMP [6.0, Restrictions] + // first + count - 1 must not evaluate to a value greater than the + // loop sequence length of the associated canonical loop sequence. + // This check must be performed afterwards due to the delayed + // parsing and computation of the associated loop sequence + return OMPLoopRangeClause::Create(getASTContext(), StartLoc, LParenLoc, + FirstLoc, CountLoc, EndLoc, First, Count); +} + OMPClause *SemaOpenMP::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp index f051a246f954f..2bf1511c5cfa0 100644 --- a/clang/lib/Sema/SemaTemplate.cpp +++ b/clang/lib/Sema/SemaTemplate.cpp @@ -775,6 +775,40 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS, TemplateArgs); } +ExprResult Sema::BuildSubstNonTypeTemplateParmExpr( + Decl *AssociatedDecl, const NonTypeTemplateParmDecl *NTTP, + SourceLocation Loc, TemplateArgument Arg, UnsignedOrNone PackIndex, + bool Final) { + // The template argument itself might be an expression, in which case we just + // return that expression. This happens when substituting into an alias + // template. + Expr *Replacement; + bool refParam = true; + if (Arg.getKind() == TemplateArgument::Expression) { + Replacement = Arg.getAsExpr(); + refParam = Replacement->isLValue(); + if (refParam && Replacement->getType()->isRecordType()) { + QualType ParamType = + NTTP->isExpandedParameterPack() + ? NTTP->getExpansionType(*SemaRef.ArgPackSubstIndex) + : NTTP->getType(); + if (const auto *PET = dyn_cast(ParamType)) + ParamType = PET->getPattern(); + refParam = ParamType->isReferenceType(); + } + } else { + ExprResult result = + SemaRef.BuildExpressionFromNonTypeTemplateArgument(Arg, Loc); + if (result.isInvalid()) + return ExprError(); + Replacement = result.get(); + refParam = Arg.getNonTypeTemplateArgumentType()->isReferenceType(); + } + return new (SemaRef.Context) SubstNonTypeTemplateParmExpr( + Replacement->getType(), Replacement->getValueKind(), Loc, Replacement, + AssociatedDecl, NTTP->getIndex(), PackIndex, refParam, Final); +} + bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, @@ -7068,22 +7102,8 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType, // If the parameter type somehow involves auto, deduce the type now. DeducedType *DeducedT = ParamType->getContainedDeducedType(); - if (getLangOpts().CPlusPlus17 && DeducedT && !DeducedT->isDeduced()) { - // During template argument deduction, we allow 'decltype(auto)' to - // match an arbitrary dependent argument. - // FIXME: The language rules don't say what happens in this case. - // FIXME: We get an opaque dependent type out of decltype(auto) if the - // expression is merely instantiation-dependent; is this enough? - if (DeductionArg->isTypeDependent()) { - auto *AT = dyn_cast(DeducedT); - if (AT && AT->isDecltypeAuto()) { - SugaredConverted = TemplateArgument(Arg, /*IsCanonical=*/false); - CanonicalConverted = TemplateArgument( - Context.getCanonicalTemplateArgument(SugaredConverted)); - return Arg; - } - } - + bool IsDeduced = DeducedT && DeducedT->getDeducedType().isNull(); + if (IsDeduced) { // When checking a deduced template argument, deduce from its type even if // the type is dependent, in order to check the types of non-type template // arguments line up properly in partial ordering. @@ -7112,17 +7132,21 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType, // along with the other associated constraints after // checking the template argument list. /*IgnoreConstraints=*/true); - if (Result == TemplateDeductionResult::AlreadyDiagnosed) { - return ExprError(); - } else if (Result != TemplateDeductionResult::Success) { - if (const auto *NTTP = dyn_cast(Param)) { - Diag(Arg->getExprLoc(), - diag::err_non_type_template_parm_type_deduction_failure) - << Param->getDeclName() << NTTP->getType() << Arg->getType() - << Arg->getSourceRange(); + if (Result != TemplateDeductionResult::Success) { + ParamType = TSI->getType(); + if (StrictCheck || !DeductionArg->isTypeDependent()) { + if (Result == TemplateDeductionResult::AlreadyDiagnosed) + return ExprError(); + if (const auto *NTTP = dyn_cast(Param)) + Diag(Arg->getExprLoc(), + diag::err_non_type_template_parm_type_deduction_failure) + << Param->getDeclName() << NTTP->getType() << Arg->getType() + << Arg->getSourceRange(); + NoteTemplateParameterLocation(*Param); + return ExprError(); } - NoteTemplateParameterLocation(*Param); - return ExprError(); + ParamType = SubstAutoTypeDependent(ParamType); + assert(!ParamType.isNull() && "substituting DependentTy can't fail"); } } // CheckNonTypeTemplateParameterType will produce a diagnostic if there's @@ -7144,14 +7168,16 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType, // type-dependent, there's nothing we can check now. if (ParamType->isDependentType() || DeductionArg->isTypeDependent()) { // Force the argument to the type of the parameter to maintain invariants. - ExprResult E = ImpCastExprToType( - DeductionArg, ParamType.getNonLValueExprType(Context), CK_Dependent, - ParamType->isLValueReferenceType() ? VK_LValue - : ParamType->isRValueReferenceType() ? VK_XValue - : VK_PRValue); - if (E.isInvalid()) - return ExprError(); - setDeductionArg(E.get()); + if (!IsDeduced) { + ExprResult E = ImpCastExprToType( + DeductionArg, ParamType.getNonLValueExprType(Context), CK_Dependent, + ParamType->isLValueReferenceType() ? VK_LValue + : ParamType->isRValueReferenceType() ? VK_XValue + : VK_PRValue); + if (E.isInvalid()) + return ExprError(); + setDeductionArg(E.get()); + } SugaredConverted = TemplateArgument(Arg, /*IsCanonical=*/false); CanonicalConverted = TemplateArgument( Context.getCanonicalTemplateArgument(SugaredConverted)); @@ -8555,6 +8581,7 @@ static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) { static bool CheckNonTypeTemplatePartialSpecializationArgs( Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param, const TemplateArgument *Args, unsigned NumArgs, bool IsDefaultArgument) { + bool HasError = false; for (unsigned I = 0; I != NumArgs; ++I) { if (Args[I].getKind() == TemplateArgument::Pack) { if (CheckNonTypeTemplatePartialSpecializationArgs( @@ -8569,6 +8596,10 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs( continue; Expr *ArgExpr = Args[I].getAsExpr(); + if (ArgExpr->containsErrors()) { + HasError = true; + continue; + } // We can have a pack expansion of any of the bullets below. if (PackExpansionExpr *Expansion = dyn_cast(ArgExpr)) @@ -8638,7 +8669,7 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs( } } - return false; + return HasError; } bool Sema::CheckTemplatePartialSpecializationArgs( diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp index 62e867c44ad14..f6ee7452c2f9a 100644 --- a/clang/lib/Sema/SemaTemplateDeduction.cpp +++ b/clang/lib/Sema/SemaTemplateDeduction.cpp @@ -483,7 +483,7 @@ DeduceNonTypeTemplateArgument(Sema &S, TemplateParameterList *TemplateParams, return TemplateDeductionResult::Inconsistent; } Deduced[NTTP.getIndex()] = Result; - if (!S.getLangOpts().CPlusPlus17) + if (!S.getLangOpts().CPlusPlus17 && !PartialOrdering) return TemplateDeductionResult::Success; if (NTTP.isExpandedParameterPack()) @@ -2652,28 +2652,11 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, getDeducedNTTParameterFromExpr(Info, P.getAsExpr())) { switch (A.getKind()) { case TemplateArgument::Expression: { - const Expr *E = A.getAsExpr(); - // When checking NTTP, if either the parameter or the argument is - // dependent, as there would be otherwise nothing to deduce, we force - // the argument to the parameter type using this dependent implicit - // cast, in order to maintain invariants. Now we can deduce the - // resulting type from the original type, and deduce the original type - // against the parameter we are checking. - if (const auto *ICE = dyn_cast(E); - ICE && ICE->getCastKind() == clang::CK_Dependent) { - E = ICE->getSubExpr(); - if (auto Result = DeduceTemplateArgumentsByTypeMatch( - S, TemplateParams, ICE->getType(), E->getType(), Info, - Deduced, TDF_SkipNonDependent, - PartialOrdering ? PartialOrderingKind::NonCall - : PartialOrderingKind::None, - /*DeducedFromArrayBound=*/false, HasDeducedAnyParam); - Result != TemplateDeductionResult::Success) - return Result; - } + // The type of the value is the type of the expression as written. return DeduceNonTypeTemplateArgument( - S, TemplateParams, NTTP, DeducedTemplateArgument(A), E->getType(), - Info, PartialOrdering, Deduced, HasDeducedAnyParam); + S, TemplateParams, NTTP, DeducedTemplateArgument(A), + A.getAsExpr()->IgnoreImplicitAsWritten()->getType(), Info, + PartialOrdering, Deduced, HasDeducedAnyParam); } case TemplateArgument::Integral: case TemplateArgument::StructuralValue: @@ -5279,18 +5262,6 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, SmallVector Deduced; Deduced.resize(1); - // If deduction failed, don't diagnose if the initializer is dependent; it - // might acquire a matching type in the instantiation. - auto DeductionFailed = [&](TemplateDeductionResult TDK) { - if (Init->isTypeDependent()) { - Result = - SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type); - assert(!Result.isNull() && "substituting DependentTy can't fail"); - return TemplateDeductionResult::Success; - } - return TDK; - }; - SmallVector OriginalCallArgs; QualType DeducedType; @@ -5340,9 +5311,9 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, Diag(Info.getLocation(), diag::err_auto_inconsistent_deduction) << Info.FirstArg << Info.SecondArg << DeducedFromInitRange << Init->getSourceRange(); - return DeductionFailed(TemplateDeductionResult::AlreadyDiagnosed); + return TemplateDeductionResult::AlreadyDiagnosed; } - return DeductionFailed(TDK); + return TDK; } if (DeducedFromInitRange.isInvalid() && @@ -5364,12 +5335,12 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, OriginalCallArgs, /*Decomposed=*/false, /*ArgIdx=*/0, /*TDF=*/0, FailedTSC); TDK != TemplateDeductionResult::Success) - return DeductionFailed(TDK); + return TDK; } // Could be null if somehow 'auto' appears in a non-deduced context. if (Deduced[0].getKind() != TemplateArgument::Type) - return DeductionFailed(TemplateDeductionResult::Incomplete); + return TemplateDeductionResult::Incomplete; DeducedType = Deduced[0].getAsType(); if (InitList) { @@ -5383,7 +5354,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, if (!Context.hasSameType(DeducedType, Result)) { Info.FirstArg = Result; Info.SecondArg = DeducedType; - return DeductionFailed(TemplateDeductionResult::Inconsistent); + return TemplateDeductionResult::Inconsistent; } DeducedType = Context.getCommonSugaredType(Result, DeducedType); } @@ -5407,7 +5378,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA); TDK != TemplateDeductionResult::Success) { Result = QualType(); - return DeductionFailed(TDK); + return TDK; } } @@ -5429,13 +5400,17 @@ TypeSourceInfo *Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, } QualType Sema::SubstAutoTypeDependent(QualType TypeWithAuto) { - return SubstituteDeducedTypeTransform(*this, DependentAuto{false}) + return SubstituteDeducedTypeTransform( + *this, + DependentAuto{/*IsPack=*/isa(TypeWithAuto)}) .TransformType(TypeWithAuto); } TypeSourceInfo * Sema::SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto) { - return SubstituteDeducedTypeTransform(*this, DependentAuto{false}) + return SubstituteDeducedTypeTransform( + *this, DependentAuto{/*IsPack=*/isa( + TypeWithAuto->getType())}) .TransformType(TypeWithAuto); } diff --git a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp index 3d54d1eb4373a..fe673eac8fcfa 100644 --- a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp +++ b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp @@ -1428,10 +1428,13 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template, DeclareImplicitDeductionGuidesForTypeAlias(*this, AliasTemplate, Loc); return; } - if (CXXRecordDecl *DefRecord = - cast(Template->getTemplatedDecl())->getDefinition()) { + CXXRecordDecl *DefRecord = + dyn_cast_or_null(Template->getTemplatedDecl()); + if (!DefRecord) + return; + if (const CXXRecordDecl *Definition = DefRecord->getDefinition()) { if (TemplateDecl *DescribedTemplate = - DefRecord->getDescribedClassTemplate()) + Definition->getDescribedClassTemplate()) Template = DescribedTemplate; } diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp index a72c95d6d77cf..f1c9c5c868159 100644 --- a/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -616,29 +616,30 @@ Sema::InstantiatingTemplate::InstantiatingTemplate( Invalid = true; return; } - Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange); + + CodeSynthesisContext Inst; + Inst.Kind = Kind; + Inst.PointOfInstantiation = PointOfInstantiation; + Inst.Entity = Entity; + Inst.Template = Template; + Inst.TemplateArgs = TemplateArgs.data(); + Inst.NumTemplateArgs = TemplateArgs.size(); + Inst.DeductionInfo = DeductionInfo; + Inst.InstantiationRange = InstantiationRange; + Inst.InConstraintSubstitution = + Inst.Kind == CodeSynthesisContext::ConstraintSubstitution; + if (!SemaRef.CodeSynthesisContexts.empty()) + Inst.InConstraintSubstitution |= + SemaRef.CodeSynthesisContexts.back().InConstraintSubstitution; + + Invalid = SemaRef.pushCodeSynthesisContext(Inst); if (!Invalid) { - CodeSynthesisContext Inst; - Inst.Kind = Kind; - Inst.PointOfInstantiation = PointOfInstantiation; - Inst.Entity = Entity; - Inst.Template = Template; - Inst.TemplateArgs = TemplateArgs.data(); - Inst.NumTemplateArgs = TemplateArgs.size(); - Inst.DeductionInfo = DeductionInfo; - Inst.InstantiationRange = InstantiationRange; - Inst.InConstraintSubstitution = - Inst.Kind == CodeSynthesisContext::ConstraintSubstitution; - if (!SemaRef.CodeSynthesisContexts.empty()) - Inst.InConstraintSubstitution |= - SemaRef.CodeSynthesisContexts.back().InConstraintSubstitution; - - SemaRef.pushCodeSynthesisContext(Inst); - - AlreadyInstantiating = !Inst.Entity ? false : - !SemaRef.InstantiatingSpecializations - .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind}) - .second; + AlreadyInstantiating = + !Inst.Entity + ? false + : !SemaRef.InstantiatingSpecializations + .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind}) + .second; atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst); } } @@ -834,18 +835,34 @@ Sema::InstantiatingTemplate::InstantiatingTemplate( : InstantiatingTemplate(SemaRef, CodeSynthesisContext::PartialOrderingTTP, ArgLoc, InstantiationRange, PArg) {} -void Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) { +bool Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) { Ctx.SavedInNonInstantiationSFINAEContext = InNonInstantiationSFINAEContext; InNonInstantiationSFINAEContext = false; - CodeSynthesisContexts.push_back(Ctx); - - if (!Ctx.isInstantiationRecord()) + if (!Ctx.isInstantiationRecord()) { ++NonInstantiationEntries; + } else { + assert(SemaRef.NonInstantiationEntries <= + SemaRef.CodeSynthesisContexts.size()); + if ((SemaRef.CodeSynthesisContexts.size() - + SemaRef.NonInstantiationEntries) > + SemaRef.getLangOpts().InstantiationDepth) { + SemaRef.Diag(Ctx.PointOfInstantiation, + diag::err_template_recursion_depth_exceeded) + << SemaRef.getLangOpts().InstantiationDepth << Ctx.InstantiationRange; + SemaRef.Diag(Ctx.PointOfInstantiation, + diag::note_template_recursion_depth) + << SemaRef.getLangOpts().InstantiationDepth; + return true; + } + } + + CodeSynthesisContexts.push_back(Ctx); // Check to see if we're low on stack space. We can't do anything about this // from here, but we can at least warn the user. StackHandler.warnOnStackNearlyExhausted(Ctx.PointOfInstantiation); + return false; } void Sema::popCodeSynthesisContext() { @@ -907,25 +924,6 @@ static std::string convertCallArgsToString(Sema &S, return Result; } -bool Sema::InstantiatingTemplate::CheckInstantiationDepth( - SourceLocation PointOfInstantiation, - SourceRange InstantiationRange) { - assert(SemaRef.NonInstantiationEntries <= - SemaRef.CodeSynthesisContexts.size()); - if ((SemaRef.CodeSynthesisContexts.size() - - SemaRef.NonInstantiationEntries) - <= SemaRef.getLangOpts().InstantiationDepth) - return false; - - SemaRef.Diag(PointOfInstantiation, - diag::err_template_recursion_depth_exceeded) - << SemaRef.getLangOpts().InstantiationDepth - << InstantiationRange; - SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth) - << SemaRef.getLangOpts().InstantiationDepth; - return true; -} - void Sema::PrintInstantiationStack(InstantiationContextDiagFuncRef DiagFunc) { // Determine which template instantiations to skip, if any. unsigned SkipStart = CodeSynthesisContexts.size(), SkipEnd = SkipStart; @@ -1373,16 +1371,6 @@ std::optional Sema::isSFINAEContext() const { return std::nullopt; } -static TemplateArgument -getPackSubstitutedTemplateArgument(Sema &S, TemplateArgument Arg) { - assert(S.ArgPackSubstIndex); - assert(*S.ArgPackSubstIndex < Arg.pack_size()); - Arg = Arg.pack_begin()[*S.ArgPackSubstIndex]; - if (Arg.isPackExpansion()) - Arg = Arg.getPackExpansionPattern(); - return Arg; -} - //===----------------------------------------------------------------------===/ // Template Instantiation for Types //===----------------------------------------------------------------------===/ @@ -1449,13 +1437,6 @@ namespace { return TemplateArgs.getNewDepth(Depth); } - UnsignedOrNone getPackIndex(TemplateArgument Pack) { - UnsignedOrNone Index = getSema().ArgPackSubstIndex; - if (!Index) - return std::nullopt; - return Pack.pack_size() - 1 - *Index; - } - bool TryExpandParameterPacks(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef Unexpanded, @@ -1537,7 +1518,7 @@ namespace { if (TA.getKind() != TemplateArgument::Pack) return TA; if (SemaRef.ArgPackSubstIndex) - return getPackSubstitutedTemplateArgument(SemaRef, TA); + return SemaRef.getPackSubstitutedTemplateArgument(TA); assert(TA.pack_size() == 1 && TA.pack_begin()->isPackExpansion() && "unexpected pack arguments in template rewrite"); TemplateArgument Arg = *TA.pack_begin(); @@ -1643,10 +1624,6 @@ namespace { ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E, NonTypeTemplateParmDecl *D); - ExprResult TransformSubstNonTypeTemplateParmPackExpr( - SubstNonTypeTemplateParmPackExpr *E); - ExprResult TransformSubstNonTypeTemplateParmExpr( - SubstNonTypeTemplateParmExpr *E); /// Rebuild a DeclRefExpr for a VarDecl reference. ExprResult RebuildVarDeclRefExpr(ValueDecl *PD, SourceLocation Loc); @@ -1933,12 +1910,6 @@ namespace { SmallVectorImpl &PTypes, SmallVectorImpl &TransParams, Sema::ExtParameterInfoBuilder &PInfos); - - private: - ExprResult - transformNonTypeTemplateParmRef(Decl *AssociatedDecl, const NamedDecl *parm, - SourceLocation loc, TemplateArgument arg, - UnsignedOrNone PackIndex, bool Final); }; } @@ -1975,7 +1946,7 @@ Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) { if (TTP->isParameterPack()) { assert(Arg.getKind() == TemplateArgument::Pack && "Missing argument pack"); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } TemplateName Template = Arg.getAsTemplate(); @@ -2079,7 +2050,7 @@ TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D, if (!getSema().ArgPackSubstIndex) return nullptr; - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } QualType T = Arg.getAsType(); @@ -2165,8 +2136,8 @@ TemplateName TemplateInstantiator::TransformTemplateName( Arg, AssociatedDecl, TTP->getIndex(), Final); } - PackIndex = getPackIndex(Arg); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + PackIndex = SemaRef.getPackIndex(Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } TemplateName Template = Arg.getAsTemplate(); @@ -2183,10 +2154,10 @@ TemplateName TemplateInstantiator::TransformTemplateName( TemplateArgument Pack = SubstPack->getArgumentPack(); TemplateName Template = - getPackSubstitutedTemplateArgument(getSema(), Pack).getAsTemplate(); + SemaRef.getPackSubstitutedTemplateArgument(Pack).getAsTemplate(); return getSema().Context.getSubstTemplateTemplateParm( Template, SubstPack->getAssociatedDecl(), SubstPack->getIndex(), - getPackIndex(Pack), SubstPack->getFinal()); + SemaRef.getPackIndex(Pack), SubstPack->getFinal()); } return inherited::TransformTemplateName( @@ -2252,11 +2223,11 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E, ExprType, TargetType->isReferenceType() ? VK_LValue : VK_PRValue, E->getLocation(), Arg, AssociatedDecl, NTTP->getPosition(), Final); } - PackIndex = getPackIndex(Arg); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + PackIndex = SemaRef.getPackIndex(Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } - return transformNonTypeTemplateParmRef(AssociatedDecl, NTTP, E->getLocation(), - Arg, PackIndex, Final); + return SemaRef.BuildSubstNonTypeTemplateParmExpr( + AssociatedDecl, NTTP, E->getLocation(), Arg, PackIndex, Final); } const AnnotateAttr * @@ -2344,144 +2315,6 @@ TemplateInstantiator::TransformOpenACCRoutineDeclAttr( "applies to a Function Decl (and a few places for VarDecl)"); } -ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef( - Decl *AssociatedDecl, const NamedDecl *parm, SourceLocation loc, - TemplateArgument arg, UnsignedOrNone PackIndex, bool Final) { - ExprResult result; - - // Determine the substituted parameter type. We can usually infer this from - // the template argument, but not always. - auto SubstParamType = [&] { - if (const auto *NTTP = dyn_cast(parm)) { - QualType T; - if (NTTP->isExpandedParameterPack()) - T = NTTP->getExpansionType(*SemaRef.ArgPackSubstIndex); - else - T = NTTP->getType(); - if (parm->isParameterPack() && isa(T)) - T = cast(T)->getPattern(); - return SemaRef.SubstType(T, TemplateArgs, loc, parm->getDeclName()); - } - return SemaRef.SubstType(arg.getAsExpr()->getType(), TemplateArgs, loc, - parm->getDeclName()); - }; - - bool refParam = false; - - // The template argument itself might be an expression, in which case we just - // return that expression. This happens when substituting into an alias - // template. - if (arg.getKind() == TemplateArgument::Expression) { - Expr *argExpr = arg.getAsExpr(); - result = argExpr; - if (argExpr->isLValue()) { - if (argExpr->getType()->isRecordType()) { - // Check whether the parameter was actually a reference. - QualType paramType = SubstParamType(); - if (paramType.isNull()) - return ExprError(); - refParam = paramType->isReferenceType(); - } else { - refParam = true; - } - } - } else if (arg.getKind() == TemplateArgument::Declaration || - arg.getKind() == TemplateArgument::NullPtr) { - if (arg.getKind() == TemplateArgument::Declaration) { - ValueDecl *VD = arg.getAsDecl(); - - // Find the instantiation of the template argument. This is - // required for nested templates. - VD = cast_or_null( - getSema().FindInstantiatedDecl(loc, VD, TemplateArgs)); - if (!VD) - return ExprError(); - } - - QualType paramType = arg.getNonTypeTemplateArgumentType(); - assert(!paramType.isNull() && "type substitution failed for param type"); - assert(!paramType->isDependentType() && "param type still dependent"); - result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, paramType, loc); - refParam = paramType->isReferenceType(); - } else { - QualType paramType = arg.getNonTypeTemplateArgumentType(); - result = SemaRef.BuildExpressionFromNonTypeTemplateArgument(arg, loc); - refParam = paramType->isReferenceType(); - assert(result.isInvalid() || - SemaRef.Context.hasSameType(result.get()->getType(), - paramType.getNonReferenceType())); - } - - if (result.isInvalid()) - return ExprError(); - - Expr *resultExpr = result.get(); - return new (SemaRef.Context) SubstNonTypeTemplateParmExpr( - resultExpr->getType(), resultExpr->getValueKind(), loc, resultExpr, - AssociatedDecl, - clang::getDepthAndIndex(const_cast(parm)).second, PackIndex, - refParam, Final); -} - -ExprResult -TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr( - SubstNonTypeTemplateParmPackExpr *E) { - if (!getSema().ArgPackSubstIndex) { - // We aren't expanding the parameter pack, so just return ourselves. - return E; - } - - TemplateArgument Pack = E->getArgumentPack(); - TemplateArgument Arg = getPackSubstitutedTemplateArgument(getSema(), Pack); - return transformNonTypeTemplateParmRef( - E->getAssociatedDecl(), E->getParameterPack(), - E->getParameterPackLocation(), Arg, getPackIndex(Pack), E->getFinal()); -} - -ExprResult -TemplateInstantiator::TransformSubstNonTypeTemplateParmExpr( - SubstNonTypeTemplateParmExpr *E) { - ExprResult SubstReplacement = E->getReplacement(); - if (!isa(SubstReplacement.get())) - SubstReplacement = TransformExpr(E->getReplacement()); - if (SubstReplacement.isInvalid()) - return true; - QualType SubstType = TransformType(E->getParameterType(getSema().Context)); - if (SubstType.isNull()) - return true; - // The type may have been previously dependent and not now, which means we - // might have to implicit cast the argument to the new type, for example: - // template - // concept C = sizeof(U) == 4; - // void foo() requires C<2, 'a'> { } - // When normalizing foo(), we first form the normalized constraints of C: - // AtomicExpr(sizeof(U) == 4, - // U=SubstNonTypeTemplateParmExpr(Param=U, - // Expr=DeclRef(U), - // Type=decltype(T))) - // Then we substitute T = 2, U = 'a' into the parameter mapping, and need to - // produce: - // AtomicExpr(sizeof(U) == 4, - // U=SubstNonTypeTemplateParmExpr(Param=U, - // Expr=ImpCast( - // decltype(2), - // SubstNTTPE(Param=U, Expr='a', - // Type=char)), - // Type=decltype(2))) - // The call to CheckTemplateArgument here produces the ImpCast. - TemplateArgument SugaredConverted, CanonicalConverted; - if (SemaRef - .CheckTemplateArgument(E->getParameter(), SubstType, - SubstReplacement.get(), SugaredConverted, - CanonicalConverted, - /*StrictCheck=*/false, Sema::CTAK_Specified) - .isInvalid()) - return true; - return transformNonTypeTemplateParmRef( - E->getAssociatedDecl(), E->getParameter(), E->getExprLoc(), - SugaredConverted, E->getPackIndex(), E->getFinal()); -} - ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(ValueDecl *PD, SourceLocation Loc) { DeclarationNameInfo NameInfo(PD->getDeclName(), Loc); @@ -2701,8 +2534,8 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB, } // PackIndex starts from last element. - PackIndex = getPackIndex(Arg); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + PackIndex = SemaRef.getPackIndex(Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } assert(Arg.getKind() == TemplateArgument::Type && @@ -2749,20 +2582,20 @@ QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType( } TemplateArgument Pack = T->getArgumentPack(); - TemplateArgument Arg = getPackSubstitutedTemplateArgument(getSema(), Pack); + TemplateArgument Arg = SemaRef.getPackSubstitutedTemplateArgument(Pack); return BuildSubstTemplateTypeParmType( TLB, SuppressObjCLifetime, T->getFinal(), NewReplaced, T->getIndex(), - getPackIndex(Pack), Arg, TL.getNameLoc()); + SemaRef.getPackIndex(Pack), Arg, TL.getNameLoc()); } QualType TemplateInstantiator::TransformSubstBuiltinTemplatePackType( TypeLocBuilder &TLB, SubstBuiltinTemplatePackTypeLoc TL) { if (!getSema().ArgPackSubstIndex) return TreeTransform::TransformSubstBuiltinTemplatePackType(TLB, TL); - auto &Sema = getSema(); - TemplateArgument Result = getPackSubstitutedTemplateArgument( - Sema, TL.getTypePtr()->getArgumentPack()); - TLB.pushTrivial(Sema.getASTContext(), Result.getAsType(), TL.getBeginLoc()); + TemplateArgument Result = SemaRef.getPackSubstitutedTemplateArgument( + TL.getTypePtr()->getArgumentPack()); + TLB.pushTrivial(SemaRef.getASTContext(), Result.getAsType(), + TL.getBeginLoc()); return Result.getAsType(); } diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index adac3dff5b2b4..e2dc70360506e 100644 --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -3742,7 +3742,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( ExpandedParams.reserve(D->getNumExpansionTemplateParameters()); for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); I != N; ++I) { - LocalInstantiationScope Scope(SemaRef); + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); TemplateParameterList *Expansion = SubstTemplateParams(D->getExpansionTemplateParameters(I)); if (!Expansion) @@ -3774,7 +3774,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( if (Expand) { for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgPackSubstIndexRAII SubstIndex(SemaRef, I); - LocalInstantiationScope Scope(SemaRef); + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); TemplateParameterList *Expansion = SubstTemplateParams(TempParams); if (!Expansion) return nullptr; @@ -3785,21 +3785,18 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( // expanded parameter pack is the original expansion type, but callers // will end up using the expanded parameter pack types for type-checking. IsExpandedParameterPack = true; - InstParams = TempParams; - } else { - // We cannot fully expand the pack expansion now, so just substitute - // into the pattern. - Sema::ArgPackSubstIndexRAII SubstIndex(SemaRef, std::nullopt); - - LocalInstantiationScope Scope(SemaRef); - InstParams = SubstTemplateParams(TempParams); - if (!InstParams) - return nullptr; } + + Sema::ArgPackSubstIndexRAII SubstIndex(SemaRef, std::nullopt); + + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); + InstParams = SubstTemplateParams(TempParams); + if (!InstParams) + return nullptr; } else { // Perform the actual substitution of template parameters within a new, // local instantiation scope. - LocalInstantiationScope Scope(SemaRef); + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp index d723fb80f437e..bee613aa5f1c5 100644 --- a/clang/lib/Sema/SemaType.cpp +++ b/clang/lib/Sema/SemaType.cpp @@ -2270,7 +2270,10 @@ QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM, : ConstVal.getActiveBits(); if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) { Diag(ArraySize->getBeginLoc(), diag::err_array_too_large) - << toString(ConstVal, 10) << ArraySize->getSourceRange(); + << toString(ConstVal, 10, ConstVal.isSigned(), + /*formatAsCLiteral=*/false, /*UpperCase=*/false, + /*InsertSeparators=*/true) + << ArraySize->getSourceRange(); return QualType(); } diff --git a/clang/lib/Sema/SemaTypeTraits.cpp b/clang/lib/Sema/SemaTypeTraits.cpp index 1ca769ebb50f0..3e34675cbf064 100644 --- a/clang/lib/Sema/SemaTypeTraits.cpp +++ b/clang/lib/Sema/SemaTypeTraits.cpp @@ -1163,13 +1163,16 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, // - it has at least one trivial eligible constructor and a trivial, // non-deleted destructor. const CXXDestructorDecl *Dtor = RD->getDestructor(); - if (UnqualT->isAggregateType()) - if (Dtor && !Dtor->isUserProvided()) - return true; - if (RD->hasTrivialDestructor() && (!Dtor || !Dtor->isDeleted())) - if (RD->hasTrivialDefaultConstructor() || - RD->hasTrivialCopyConstructor() || RD->hasTrivialMoveConstructor()) - return true; + if (UnqualT->isAggregateType() && (!Dtor || !Dtor->isUserProvided())) + return true; + if (RD->hasTrivialDestructor() && (!Dtor || !Dtor->isDeleted())) { + for (CXXConstructorDecl *Ctr : RD->ctors()) { + if (Ctr->isIneligibleOrNotSelected() || Ctr->isDeleted()) + continue; + if (Ctr->isTrivial()) + return true; + } + } return false; } case UTT_IsIntangibleType: @@ -1827,10 +1830,10 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, return Self.HLSL().IsScalarizedLayoutCompatible(LhsT, RhsT); } - case BTT_LtSynthesisesFromSpaceship: - case BTT_LeSynthesisesFromSpaceship: - case BTT_GtSynthesisesFromSpaceship: - case BTT_GeSynthesisesFromSpaceship: { + case BTT_LtSynthesizesFromSpaceship: + case BTT_LeSynthesizesFromSpaceship: + case BTT_GtSynthesizesFromSpaceship: + case BTT_GeSynthesizesFromSpaceship: { EnterExpressionEvaluationContext UnevaluatedContext( Self, Sema::ExpressionEvaluationContext::Unevaluated); Sema::SFINAETrap SFINAE(Self, /*ForValidityCheck=*/true); @@ -1849,13 +1852,13 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, auto OpKind = [&] { switch (BTT) { - case BTT_LtSynthesisesFromSpaceship: + case BTT_LtSynthesizesFromSpaceship: return BinaryOperatorKind::BO_LT; - case BTT_LeSynthesisesFromSpaceship: + case BTT_LeSynthesizesFromSpaceship: return BinaryOperatorKind::BO_LE; - case BTT_GtSynthesisesFromSpaceship: + case BTT_GtSynthesizesFromSpaceship: return BinaryOperatorKind::BO_GT; - case BTT_GeSynthesisesFromSpaceship: + case BTT_GeSynthesizesFromSpaceship: return BinaryOperatorKind::BO_GE; default: llvm_unreachable("Trying to Synthesize non-comparison operator?"); @@ -2014,6 +2017,7 @@ static std::optional StdNameToTypeTrait(StringRef Name) { .Case("is_aggregate", TypeTrait::UTT_IsAggregate) .Case("is_constructible", TypeTrait::TT_IsConstructible) .Case("is_final", TypeTrait::UTT_IsFinal) + .Case("is_abstract", TypeTrait::UTT_IsAbstract) .Default(std::nullopt); } @@ -2774,6 +2778,75 @@ static void DiagnoseNonAggregateReason(Sema &SemaRef, SourceLocation Loc, DiagnoseNonAggregateReason(SemaRef, Loc, D); } +static void DiagnoseNonAbstractReason(Sema &SemaRef, SourceLocation Loc, + const CXXRecordDecl *D) { + // If this type has any abstract base classes, their respective virtual + // functions must have been overridden. + for (const CXXBaseSpecifier &B : D->bases()) { + if (B.getType()->castAsCXXRecordDecl()->isAbstract()) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::OverridesAllPureVirtual + << B.getType() << B.getSourceRange(); + } + } +} + +static void DiagnoseNonAbstractReason(Sema &SemaRef, SourceLocation Loc, + QualType T) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait) + << T << diag::TraitName::Abstract; + + if (T->isReferenceType()) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::Ref; + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::NotStructOrClass; + return; + } + + if (T->isUnionType()) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::UnionType; + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::NotStructOrClass; + return; + } + + if (SemaRef.Context.getAsArrayType(T)) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::ArrayType; + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::NotStructOrClass; + return; + } + + if (T->isFunctionType()) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::FunctionType; + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::NotStructOrClass; + return; + } + + if (T->isPointerType()) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::PointerType; + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::NotStructOrClass; + return; + } + + if (!T->isStructureOrClassType()) { + SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason) + << diag::TraitNotSatisfiedReason::NotStructOrClass; + return; + } + + const CXXRecordDecl *D = T->getAsCXXRecordDecl(); + if (D->hasDefinition()) + DiagnoseNonAbstractReason(SemaRef, Loc, D); +} + void Sema::DiagnoseTypeTraitDetails(const Expr *E) { E = E->IgnoreParenImpCasts(); if (E->containsErrors()) @@ -2818,6 +2891,9 @@ void Sema::DiagnoseTypeTraitDetails(const Expr *E) { DiagnoseIsFinalReason(*this, E->getBeginLoc(), QT); // unsatisfied break; } + case UTT_IsAbstract: + DiagnoseNonAbstractReason(*this, E->getBeginLoc(), Args[0]); + break; default: break; } diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index 242ffb09af006..6967301483361 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -1783,6 +1783,14 @@ class TreeTransform { LParenLoc, EndLoc); } + OMPClause * + RebuildOMPLoopRangeClause(Expr *First, Expr *Count, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation FirstLoc, + SourceLocation CountLoc, SourceLocation EndLoc) { + return getSema().OpenMP().ActOnOpenMPLoopRangeClause( + First, Count, StartLoc, LParenLoc, FirstLoc, CountLoc, EndLoc); + } + /// Build a new OpenMP 'allocator' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. @@ -9607,6 +9615,17 @@ StmtResult TreeTransform::TransformOMPInterchangeDirective( return Res; } +template +StmtResult +TreeTransform::TransformOMPFuseDirective(OMPFuseDirective *D) { + DeclarationNameInfo DirName; + getDerived().getSema().OpenMP().StartOpenMPDSABlock( + D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc()); + StmtResult Res = getDerived().TransformOMPExecutableDirective(D); + getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get()); + return Res; +} + template StmtResult TreeTransform::TransformOMPForDirective(OMPForDirective *D) { @@ -10500,6 +10519,31 @@ TreeTransform::TransformOMPPartialClause(OMPPartialClause *C) { C->getEndLoc()); } +template +OMPClause * +TreeTransform::TransformOMPLoopRangeClause(OMPLoopRangeClause *C) { + ExprResult F = getDerived().TransformExpr(C->getFirst()); + if (F.isInvalid()) + return nullptr; + + ExprResult Cn = getDerived().TransformExpr(C->getCount()); + if (Cn.isInvalid()) + return nullptr; + + Expr *First = F.get(); + Expr *Count = Cn.get(); + + bool Changed = (First != C->getFirst()) || (Count != C->getCount()); + + // If no changes and AlwaysRebuild() is false, return the original clause + if (!Changed && !getDerived().AlwaysRebuild()) + return C; + + return RebuildOMPLoopRangeClause(First, Count, C->getBeginLoc(), + C->getLParenLoc(), C->getFirstLoc(), + C->getCountLoc(), C->getEndLoc()); +} + template OMPClause * TreeTransform::TransformOMPCollapseClause(OMPCollapseClause *C) { @@ -16289,20 +16333,68 @@ TreeTransform::TransformPackIndexingExpr(PackIndexingExpr *E) { IndexExpr.get(), ExpandedExprs, FullySubstituted); } -template -ExprResult -TreeTransform::TransformSubstNonTypeTemplateParmPackExpr( - SubstNonTypeTemplateParmPackExpr *E) { - // Default behavior is to do nothing with this transformation. - return E; +template +ExprResult TreeTransform::TransformSubstNonTypeTemplateParmPackExpr( + SubstNonTypeTemplateParmPackExpr *E) { + if (!getSema().ArgPackSubstIndex) + // We aren't expanding the parameter pack, so just return ourselves. + return E; + + TemplateArgument Pack = E->getArgumentPack(); + TemplateArgument Arg = SemaRef.getPackSubstitutedTemplateArgument(Pack); + return SemaRef.BuildSubstNonTypeTemplateParmExpr( + E->getAssociatedDecl(), E->getParameterPack(), + E->getParameterPackLocation(), Arg, SemaRef.getPackIndex(Pack), + E->getFinal()); } -template -ExprResult -TreeTransform::TransformSubstNonTypeTemplateParmExpr( - SubstNonTypeTemplateParmExpr *E) { - // Default behavior is to do nothing with this transformation. - return E; +template +ExprResult TreeTransform::TransformSubstNonTypeTemplateParmExpr( + SubstNonTypeTemplateParmExpr *E) { + Expr *OrigReplacement = E->getReplacement()->IgnoreImplicitAsWritten(); + ExprResult Replacement = getDerived().TransformExpr(OrigReplacement); + if (Replacement.isInvalid()) + return true; + + Decl *AssociatedDecl = + getDerived().TransformDecl(E->getNameLoc(), E->getAssociatedDecl()); + if (!AssociatedDecl) + return true; + + if (Replacement.get() == OrigReplacement && + AssociatedDecl == E->getAssociatedDecl()) + return E; + + // If the replacement expression did not change, and the parameter type + // did not change, we can skip the semantic action because it would + // produce the same result anyway. + auto *Param = cast( + getReplacedTemplateParameterList(AssociatedDecl) + ->asArray()[E->getIndex()]); + if (QualType ParamType = Param->getType(); + !SemaRef.Context.hasSameType(ParamType, E->getParameter()->getType()) || + Replacement.get() != OrigReplacement) { + + // When transforming the replacement expression previously, all Sema + // specific annotations, such as implicit casts, are discarded. Calling the + // corresponding sema action is necessary to recover those. Otherwise, + // equivalency of the result would be lost. + TemplateArgument SugaredConverted, CanonicalConverted; + Replacement = SemaRef.CheckTemplateArgument( + Param, ParamType, Replacement.get(), SugaredConverted, + CanonicalConverted, + /*StrictCheck=*/false, Sema::CTAK_Specified); + if (Replacement.isInvalid()) + return true; + } else { + // Otherwise, the same expression would have been produced. + Replacement = E->getReplacement(); + } + + return new (SemaRef.Context) SubstNonTypeTemplateParmExpr( + Replacement.get()->getType(), Replacement.get()->getValueKind(), + E->getNameLoc(), Replacement.get(), AssociatedDecl, E->getIndex(), + E->getPackIndex(), E->isReferenceParameter(), E->getFinal()); } template diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 9ee8a0fb0f060..6acf79acea111 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -11215,6 +11215,9 @@ OMPClause *OMPClauseReader::readClause() { case llvm::omp::OMPC_partial: C = OMPPartialClause::CreateEmpty(Context); break; + case llvm::omp::OMPC_looprange: + C = OMPLoopRangeClause::CreateEmpty(Context); + break; case llvm::omp::OMPC_allocator: C = new (Context) OMPAllocatorClause(); break; @@ -11618,6 +11621,14 @@ void OMPClauseReader::VisitOMPPartialClause(OMPPartialClause *C) { C->setLParenLoc(Record.readSourceLocation()); } +void OMPClauseReader::VisitOMPLoopRangeClause(OMPLoopRangeClause *C) { + C->setFirst(Record.readSubExpr()); + C->setCount(Record.readSubExpr()); + C->setLParenLoc(Record.readSourceLocation()); + C->setFirstLoc(Record.readSourceLocation()); + C->setCountLoc(Record.readSourceLocation()); +} + void OMPClauseReader::VisitOMPAllocatorClause(OMPAllocatorClause *C) { C->setAllocator(Record.readExpr()); C->setLParenLoc(Record.readSourceLocation()); @@ -12849,10 +12860,9 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() { llvm::SmallVector RecipeList; for (unsigned I = 0; I < VarList.size(); ++I) { - static_assert(sizeof(OpenACCPrivateRecipe) == 2 * sizeof(int *)); + static_assert(sizeof(OpenACCPrivateRecipe) == 1 * sizeof(int *)); VarDecl *Alloca = readDeclAs(); - Expr *InitExpr = readSubExpr(); - RecipeList.push_back({Alloca, InitExpr}); + RecipeList.push_back({Alloca}); } return OpenACCPrivateClause::Create(getContext(), BeginLoc, LParenLoc, @@ -12875,11 +12885,10 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() { llvm::SmallVector VarList = readOpenACCVarList(); llvm::SmallVector RecipeList; for (unsigned I = 0; I < VarList.size(); ++I) { - static_assert(sizeof(OpenACCFirstPrivateRecipe) == 3 * sizeof(int *)); + static_assert(sizeof(OpenACCFirstPrivateRecipe) == 2 * sizeof(int *)); VarDecl *Recipe = readDeclAs(); - Expr *InitExpr = readSubExpr(); VarDecl *RecipeTemp = readDeclAs(); - RecipeList.push_back({Recipe, InitExpr, RecipeTemp}); + RecipeList.push_back({Recipe, RecipeTemp}); } return OpenACCFirstPrivateClause::Create(getContext(), BeginLoc, LParenLoc, @@ -13000,10 +13009,9 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() { llvm::SmallVector RecipeList; for (unsigned I = 0; I < VarList.size(); ++I) { - static_assert(sizeof(OpenACCReductionRecipe) == 2 * sizeof(int *)); + static_assert(sizeof(OpenACCReductionRecipe) == sizeof(int *)); VarDecl *Recipe = readDeclAs(); - Expr *InitExpr = readSubExpr(); - RecipeList.push_back({Recipe, InitExpr}); + RecipeList.push_back({Recipe}); } return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op, diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp index 213c2c2148f64..70b898a53fcbd 100644 --- a/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/clang/lib/Serialization/ASTReaderStmt.cpp @@ -2469,10 +2469,21 @@ void ASTStmtReader::VisitOMPReverseDirective(OMPReverseDirective *D) { VisitOMPCanonicalLoopNestTransformationDirective(D); } +void ASTStmtReader::VisitOMPCanonicalLoopSequenceTransformationDirective( + OMPCanonicalLoopSequenceTransformationDirective *D) { + VisitStmt(D); + VisitOMPExecutableDirective(D); + D->setNumGeneratedTopLevelLoops(Record.readUInt32()); +} + void ASTStmtReader::VisitOMPInterchangeDirective(OMPInterchangeDirective *D) { VisitOMPCanonicalLoopNestTransformationDirective(D); } +void ASTStmtReader::VisitOMPFuseDirective(OMPFuseDirective *D) { + VisitOMPCanonicalLoopSequenceTransformationDirective(D); +} + void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) { VisitOMPLoopDirective(D); D->setHasCancel(Record.readBool()); @@ -3615,6 +3626,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { break; } + case STMT_OMP_FUSE_DIRECTIVE: { + unsigned NumClauses = Record[ASTStmtReader::NumStmtFields]; + S = OMPFuseDirective::CreateEmpty(Context, NumClauses); + break; + } + case STMT_OMP_INTERCHANGE_DIRECTIVE: { unsigned NumLoops = Record[ASTStmtReader::NumStmtFields]; unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1]; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 09859da171fcd..09b1e58ef220c 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -7882,6 +7882,14 @@ void OMPClauseWriter::VisitOMPPartialClause(OMPPartialClause *C) { Record.AddSourceLocation(C->getLParenLoc()); } +void OMPClauseWriter::VisitOMPLoopRangeClause(OMPLoopRangeClause *C) { + Record.AddStmt(C->getFirst()); + Record.AddStmt(C->getCount()); + Record.AddSourceLocation(C->getLParenLoc()); + Record.AddSourceLocation(C->getFirstLoc()); + Record.AddSourceLocation(C->getCountLoc()); +} + void OMPClauseWriter::VisitOMPAllocatorClause(OMPAllocatorClause *C) { Record.AddStmt(C->getAllocator()); Record.AddSourceLocation(C->getLParenLoc()); @@ -8771,9 +8779,8 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) { writeOpenACCVarList(PC); for (const OpenACCPrivateRecipe &R : PC->getInitRecipes()) { - static_assert(sizeof(R) == 2 * sizeof(int *)); + static_assert(sizeof(R) == 1 * sizeof(int *)); AddDeclRef(R.AllocaDecl); - AddStmt(const_cast(R.InitExpr)); } return; } @@ -8795,9 +8802,8 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) { writeOpenACCVarList(FPC); for (const OpenACCFirstPrivateRecipe &R : FPC->getInitRecipes()) { - static_assert(sizeof(R) == 3 * sizeof(int *)); + static_assert(sizeof(R) == 2 * sizeof(int *)); AddDeclRef(R.AllocaDecl); - AddStmt(const_cast(R.InitExpr)); AddDeclRef(R.InitFromTemporary); } return; @@ -8919,9 +8925,8 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) { writeOpenACCVarList(RC); for (const OpenACCReductionRecipe &R : RC->getRecipes()) { - static_assert(sizeof(OpenACCReductionRecipe) == 2 * sizeof(int *)); + static_assert(sizeof(OpenACCReductionRecipe) == 1 * sizeof(int *)); AddDeclRef(R.AllocaDecl); - AddStmt(const_cast(R.InitExpr)); } return; } diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp index 21c04ddbc2c7a..ebda91e3819c3 100644 --- a/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/clang/lib/Serialization/ASTWriterStmt.cpp @@ -2487,6 +2487,18 @@ void ASTStmtWriter::VisitOMPInterchangeDirective(OMPInterchangeDirective *D) { Code = serialization::STMT_OMP_INTERCHANGE_DIRECTIVE; } +void ASTStmtWriter::VisitOMPCanonicalLoopSequenceTransformationDirective( + OMPCanonicalLoopSequenceTransformationDirective *D) { + VisitStmt(D); + VisitOMPExecutableDirective(D); + Record.writeUInt32(D->getNumGeneratedTopLevelLoops()); +} + +void ASTStmtWriter::VisitOMPFuseDirective(OMPFuseDirective *D) { + VisitOMPCanonicalLoopSequenceTransformationDirective(D); + Code = serialization::STMT_OMP_FUSE_DIRECTIVE; +} + void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) { VisitOMPLoopDirective(D); Record.writeBool(D->hasCancel()); diff --git a/clang/lib/Serialization/ModuleCache.cpp b/clang/lib/Serialization/ModuleCache.cpp index f42bdc16d815d..96687277ebafd 100644 --- a/clang/lib/Serialization/ModuleCache.cpp +++ b/clang/lib/Serialization/ModuleCache.cpp @@ -16,6 +16,87 @@ using namespace clang; +/// Write a new timestamp file with the given path. +static void writeTimestampFile(StringRef TimestampFile) { + std::error_code EC; + llvm::raw_fd_ostream Out(TimestampFile.str(), EC, llvm::sys::fs::OF_None); +} + +void clang::maybePruneImpl(StringRef Path, time_t PruneInterval, + time_t PruneAfter) { + if (PruneInterval <= 0 || PruneAfter <= 0) + return; + + llvm::SmallString<128> TimestampFile(Path); + llvm::sys::path::append(TimestampFile, "modules.timestamp"); + + // Try to stat() the timestamp file. + llvm::sys::fs::file_status StatBuf; + if (std::error_code EC = llvm::sys::fs::status(TimestampFile, StatBuf)) { + // If the timestamp file wasn't there, create one now. + if (EC == std::errc::no_such_file_or_directory) + writeTimestampFile(TimestampFile); + return; + } + + // Check whether the time stamp is older than our pruning interval. + // If not, do nothing. + time_t TimestampModTime = + llvm::sys::toTimeT(StatBuf.getLastModificationTime()); + time_t CurrentTime = time(nullptr); + if (CurrentTime - TimestampModTime <= PruneInterval) + return; + + // Write a new timestamp file so that nobody else attempts to prune. + // There is a benign race condition here, if two Clang instances happen to + // notice at the same time that the timestamp is out-of-date. + writeTimestampFile(TimestampFile); + + // Walk the entire module cache, looking for unused module files and module + // indices. + std::error_code EC; + for (llvm::sys::fs::directory_iterator Dir(Path, EC), DirEnd; + Dir != DirEnd && !EC; Dir.increment(EC)) { + // If we don't have a directory, there's nothing to look into. + if (!llvm::sys::fs::is_directory(Dir->path())) + continue; + + // Walk all the files within this directory. + for (llvm::sys::fs::directory_iterator File(Dir->path(), EC), FileEnd; + File != FileEnd && !EC; File.increment(EC)) { + // We only care about module and global module index files. + StringRef Extension = llvm::sys::path::extension(File->path()); + if (Extension != ".pcm" && Extension != ".timestamp" && + llvm::sys::path::filename(File->path()) != "modules.idx") + continue; + + // Look at this file. If we can't stat it, there's nothing interesting + // there. + if (llvm::sys::fs::status(File->path(), StatBuf)) + continue; + + // If the file has been used recently enough, leave it there. + time_t FileAccessTime = llvm::sys::toTimeT(StatBuf.getLastAccessedTime()); + if (CurrentTime - FileAccessTime <= PruneAfter) + continue; + + // Remove the file. + llvm::sys::fs::remove(File->path()); + + // Remove the timestamp file. + std::string TimpestampFilename = File->path() + ".timestamp"; + llvm::sys::fs::remove(TimpestampFilename); + } + + // If we removed all the files in the directory, remove the directory + // itself. + if (llvm::sys::fs::directory_iterator(Dir->path(), EC) == + llvm::sys::fs::directory_iterator() && + !EC) + llvm::sys::fs::remove(Dir->path()); + } +} + namespace { class CrossProcessModuleCache : public ModuleCache { InMemoryModuleCache InMemory; @@ -53,6 +134,11 @@ class CrossProcessModuleCache : public ModuleCache { OS.clear_error(); // Avoid triggering a fatal error. } + void maybePrune(StringRef Path, time_t PruneInterval, + time_t PruneAfter) override { + maybePruneImpl(Path, PruneInterval, PruneAfter); + } + InMemoryModuleCache &getInMemoryModuleCache() override { return InMemory; } const InMemoryModuleCache &getInMemoryModuleCache() const override { return InMemory; diff --git a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp index e64153d53bbd6..309e3d250de06 100644 --- a/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp @@ -129,7 +129,8 @@ class AnalysisOrderChecker llvm::errs() << " {argno: " << Call.getNumArgs() << '}'; llvm::errs() << " [" << Call.getKindAsString() << ']'; llvm::errs() << '\n'; - return true; + // We can't return `true` from this callback without binding the return + // value. Let's just fallthrough here and return `false`. } return false; } diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp index 36f316df0c3ff..0ae784c000f60 100644 --- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp @@ -672,6 +672,10 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C, ProgramStateRef stateTrue, stateFalse; + if (!First.Expression->getType()->isAnyPointerType() || + !Second.Expression->getType()->isAnyPointerType()) + return state; + // Assume different address spaces cannot overlap. if (First.Expression->getType()->getPointeeType().getAddressSpace() != Second.Expression->getType()->getPointeeType().getAddressSpace()) diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp index 392c7eeea234a..c71623575ae97 100644 --- a/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp @@ -262,6 +262,15 @@ class CheckerDocumentation /// state. This callback allows a checker to provide domain specific knowledge /// about the particular functions it knows about. /// + /// Note that to evaluate a call, the handler MUST bind the return value if + /// its a non-void function. Invalidate the arguments if necessary. + /// + /// Note that in general, user-provided functions should not be eval-called + /// because the checker can't predict the exact semantics/contract of the + /// callee, and by having the eval::Call callback, we also prevent it from + /// getting inlined, potentially regressing analysis quality. + /// Consider using check::PreCall or check::PostCall to allow inlining. + /// /// \returns true if the call has been successfully evaluated /// and false otherwise. Note, that only one checker can evaluate a call. If /// more than one checker claims that they can evaluate the same call the diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp index a97a37f85e96c..15a0c5a7fd9dc 100644 --- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RawPtrRefMemberChecker.cpp @@ -130,17 +130,16 @@ class RawPtrRefMemberChecker if (BR->getSourceManager().isInSystemHeader(CD->getLocation())) return; - ObjCContainerDecl::PropertyMap map; - CD->collectPropertiesToImplement(map); - for (auto it : map) - visitObjCPropertyDecl(CD, it.second); - - if (auto *ID = dyn_cast(CD)) { - for (auto *Ivar : ID->ivars()) - visitIvarDecl(CD, Ivar); - return; - } if (auto *ID = dyn_cast(CD)) { + ObjCContainerDecl::PropertyMap map; + CD->collectPropertiesToImplement(map); + for (auto it : map) + visitObjCPropertyDecl(CD, it.second); + + if (auto *Interface = ID->getClassInterface()) { + for (auto *Ivar : Interface->ivars()) + visitIvarDecl(CD, Ivar); + } for (auto *PropImpl : ID->property_impls()) visitPropImpl(CD, PropImpl); for (auto *Ivar : ID->ivars()) diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp index e1f9a77f5a5ca..955b8d19a820c 100644 --- a/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/RetainPtrCtorAdoptChecker.cpp @@ -385,6 +385,10 @@ class RetainPtrCtorAdoptChecker if (RTC.isUnretained(RetValue->getType())) return; } + if (retainsRet && *retainsRet) { + CreateOrCopyFnCall.insert(RetValue); + return; + } if (auto *CE = dyn_cast(RetValue)) { auto *Callee = CE->getDirectCallee(); if (!Callee || !isCreateOrCopyFunction(Callee)) diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp index 180056cf68b64..06ba01507fa4f 100644 --- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp +++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp @@ -1254,6 +1254,15 @@ template <> struct DenseMapInfo { }; } // end namespace llvm +// NOTE: This cache is a "global" variable, and it is cleared by +// CallEventManager's constructor so we do not keep old entries when +// loading/unloading ASTs. If we are worried about concurrency, we may need to +// revisit this someday. In terms of memory, this table stays around until clang +// quits, which also may be bad if we need to release memory. +using PrivateMethodCacheTy = + llvm::DenseMap>; +static PrivateMethodCacheTy PrivateMethodCache; + static const ObjCMethodDecl * lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface, Selector LookupSelector, bool InstanceMethod) { @@ -1262,21 +1271,8 @@ lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface, // that repeated queries on the same ObjCIntefaceDecl and Selector // don't incur the same cost. On some test cases, we can see the // same query being issued thousands of times. - // - // NOTE: This cache is essentially a "global" variable, but it - // only gets lazily created when we get here. The value of the - // cache probably comes from it being global across ExprEngines, - // where the same queries may get issued. If we are worried about - // concurrency, or possibly loading/unloading ASTs, etc., we may - // need to revisit this someday. In terms of memory, this table - // stays around until clang quits, which also may be bad if we - // need to release memory. - using PrivateMethodCache = - llvm::DenseMap>; - - static PrivateMethodCache PMC; std::optional &Val = - PMC[{Interface, LookupSelector, InstanceMethod}]; + PrivateMethodCache[{Interface, LookupSelector, InstanceMethod}]; // Query lookupPrivateMethod() if the cache does not hit. if (!Val) { @@ -1422,6 +1418,13 @@ void ObjCMethodCall::getInitialStackFrameContents( } } +CallEventManager::CallEventManager(llvm::BumpPtrAllocator &alloc) + : Alloc(alloc) { + // Clear the method cache to avoid hits when multiple AST are loaded/unloaded + // within a single process. This can happen with unit tests, for instance. + PrivateMethodCache.clear(); +} + CallEventRef<> CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State, const LocationContext *LCtx, diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index 785cdfa15bf04..4e472b7fc38b0 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1814,6 +1814,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::OMPStripeDirectiveClass: case Stmt::OMPTileDirectiveClass: case Stmt::OMPInterchangeDirectiveClass: + case Stmt::OMPFuseDirectiveClass: case Stmt::OMPInteropDirectiveClass: case Stmt::OMPDispatchDirectiveClass: case Stmt::OMPMaskedDirectiveClass: diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp index dee34e3e9d6a5..75d7e265af0f3 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp @@ -909,7 +909,14 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE, ExplodedNodeSet DstPostCall; StmtNodeBuilder CallBldr(DstPreCall, DstPostCall, *currBldrCtx); for (ExplodedNode *I : DstPreCall) { - // FIXME: Provide evalCall for checkers? + // Operator new calls (CXXNewExpr) are intentionally not eval-called, + // because it does not make sense to eval-call user-provided functions. + // 1) If the new operator can be inlined, then don't prevent it from + // inlining by having an eval-call of that operator. + // 2) If it can't be inlined, then the default conservative modeling + // is what we want anyway. + // So the best is to not allow eval-calling CXXNewExprs from checkers. + // Checkers can provide their pre/post-call callbacks if needed. defaultEvalCall(CallBldr, I, *Call); } // If the call is inlined, DstPostCall will be empty and we bail out now. @@ -1110,6 +1117,10 @@ void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) { StmtNodeBuilder Bldr(DstPreCall, DstPostCall, *currBldrCtx); for (ExplodedNode *I : DstPreCall) { + // Intentionally either inline or conservative eval-call the operator + // delete, but avoid triggering an eval-call event for checkers. + // As detailed at handling CXXNewExprs, in short, because it does not + // really make sense to eval-call user-provided functions. defaultEvalCall(Bldr, I, *Call); } } else { diff --git a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp index 4c9c619f2487a..217b853305ed1 100644 --- a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "HTMLDiagnostics.h" #include "PlistDiagnostics.h" #include "SarifDiagnostics.h" #include "clang/AST/Decl.h" @@ -82,7 +83,7 @@ class HTMLDiagnostics : public PathDiagnosticConsumer { void FlushDiagnosticsImpl(std::vector &Diags, FilesMade *filesMade) override; - StringRef getName() const override { return "HTMLDiagnostics"; } + StringRef getName() const override { return HTML_DIAGNOSTICS_NAME; } bool supportsCrossFileDiagnostics() const override { return SupportsCrossFileDiagnostics; @@ -254,18 +255,6 @@ void HTMLDiagnostics::FlushDiagnosticsImpl( ReportDiag(*Diag, filesMade); } -static llvm::SmallString<32> getIssueHash(const PathDiagnostic &D, - const Preprocessor &PP) { - SourceManager &SMgr = PP.getSourceManager(); - PathDiagnosticLocation UPDLoc = D.getUniqueingLoc(); - FullSourceLoc L(SMgr.getExpansionLoc(UPDLoc.isValid() - ? UPDLoc.asLocation() - : D.getLocation().asLocation()), - SMgr); - return getIssueHash(L, D.getCheckerName(), D.getBugType(), - D.getDeclWithIssue(), PP.getLangOpts()); -} - void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, FilesMade *filesMade) { // Create the HTML directory if it is missing. @@ -310,7 +299,8 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, } } - SmallString<32> IssueHash = getIssueHash(D, PP); + SmallString<32> IssueHash = + D.getIssueHash(PP.getSourceManager(), PP.getLangOpts()); auto [It, IsNew] = EmittedHashes.insert(IssueHash); if (!IsNew) { // We've already emitted a duplicate issue. It'll get overwritten anyway. @@ -369,6 +359,12 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, if (EC != llvm::errc::file_exists) { llvm::errs() << "warning: could not create file in '" << Directory << "': " << EC.message() << '\n'; + } else if (filesMade) { + // Record that we created the file so that it gets referenced in the + // plist and SARIF reports for every translation unit that found the + // issue. + filesMade->addDiagnostic(D, getName(), + llvm::sys::path::filename(ResultPath)); } return; } @@ -679,8 +675,8 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic &D, Rewriter &R, os << "\n\n"; - os << "\n\n"; + os << "\n\n"; os << "\n c++26 that's shared by the driver code + // (clang/lib/Driver/ToolChains/Clang.cpp) and this file. + return LangStandard::lang_cxx26; +} + // A CompileCommand that can be applied to another file. struct TransferableCommand { // Flags that should not apply to all files are stripped from CommandLine. @@ -237,9 +246,16 @@ struct TransferableCommand { // --std flag may only be transferred if the language is the same. // We may consider "translating" these, e.g. c++11 -> c11. if (Std != LangStandard::lang_unspecified && foldType(TargetType) == Type) { - Result.CommandLine.emplace_back(( - llvm::Twine(ClangCLMode ? "/std:" : "-std=") + - LangStandard::getLangStandardForKind(Std).getName()).str()); + const char *Spelling = + LangStandard::getLangStandardForKind(Std).getName(); + // In clang-cl mode, the latest standard is spelled 'c++latest' rather + // than e.g. 'c++26', and the driver does not accept the latter, so emit + // the spelling that the driver does accept. + if (ClangCLMode && Std == latestLangStandard()) { + Spelling = "c++latest"; + } + Result.CommandLine.emplace_back( + (llvm::Twine(ClangCLMode ? "/std:" : "-std=") + Spelling).str()); } Result.CommandLine.push_back("--"); Result.CommandLine.push_back(std::string(Filename)); @@ -296,8 +312,14 @@ struct TransferableCommand { // Try to interpret the argument as '-std='. std::optional tryParseStdArg(const llvm::opt::Arg &Arg) { using namespace driver::options; - if (Arg.getOption().matches(ClangCLMode ? OPT__SLASH_std : OPT_std_EQ)) + if (Arg.getOption().matches(ClangCLMode ? OPT__SLASH_std : OPT_std_EQ)) { + // "c++latest" is not a recognized LangStandard, but it's accepted by + // the clang driver in CL mode. + if (ClangCLMode && StringRef(Arg.getValue()) == "c++latest") { + return latestLangStandard(); + } return LangStandard::getLangKind(Arg.getValue()); + } return std::nullopt; } }; diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp index 2d4790b205b1a..ea5a37216e959 100644 --- a/clang/lib/Tooling/Tooling.cpp +++ b/clang/lib/Tooling/Tooling.cpp @@ -458,7 +458,7 @@ bool FrontendActionFactory::runInvocation( if (!Compiler.hasDiagnostics()) return false; - Compiler.createSourceManager(*Files); + Compiler.createSourceManager(); const bool Success = Compiler.ExecuteAction(*ScopedToolAction); diff --git a/clang/test/AST/ByteCode/builtin-bit-cast.cpp b/clang/test/AST/ByteCode/builtin-bit-cast.cpp index a12f305caf877..c1d29b2ca4c00 100644 --- a/clang/test/AST/ByteCode/builtin-bit-cast.cpp +++ b/clang/test/AST/ByteCode/builtin-bit-cast.cpp @@ -556,6 +556,8 @@ namespace VectorCast { } static_assert(test2() == 0); + /// On s390x, S is only 8 bytes. +#if !defined(__s390x__) struct S { unsigned __int128 a : 3; }; @@ -569,6 +571,7 @@ namespace VectorCast { static_assert(s.a == 0); // ref-error {{not an integral constant expression}} \ // ref-note {{initializer of 's' is not a constant expression}} #endif +#endif } #endif diff --git a/clang/test/AST/ByteCode/const-eval.c b/clang/test/AST/ByteCode/const-eval.c index c6b51d16b811e..d6cf600b378a8 100644 --- a/clang/test/AST/ByteCode/const-eval.c +++ b/clang/test/AST/ByteCode/const-eval.c @@ -144,7 +144,7 @@ EVAL_EXPR(52, &pr24622 == (void *)&PR24622); // We evaluate these by providing 2s' complement semantics in constant // expressions, like we do for integers. -void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // both-warning {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}} +void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // both-warning {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}} void *PR28739b = &PR28739b + (__int128)(unsigned long)-1; // both-warning {{refers past the last possible element}} __int128 PR28739c = (&PR28739c + (__int128)(unsigned long)-1) - &PR28739c; // both-warning {{refers past the last possible element}} diff --git a/clang/test/AST/ByteCode/cxx03.cpp b/clang/test/AST/ByteCode/cxx03.cpp index 10e5232b9f873..58d7f3632082d 100644 --- a/clang/test/AST/ByteCode/cxx03.cpp +++ b/clang/test/AST/ByteCode/cxx03.cpp @@ -40,3 +40,9 @@ struct B2 : B { }; _Static_assert(&(B2().a) == &p, ""); // both-error {{taking the address of a temporary object of type 'int'}} \ // both-error {{not an integral constant expression}} + +typedef __attribute__((ext_vector_type(4))) int vi4b; +struct S { + vi4b w; +}; +const int s = S().w[1]; diff --git a/clang/test/AST/ByteCode/cxx23.cpp b/clang/test/AST/ByteCode/cxx23.cpp index 72c751d627a44..ce0a4777ffa9b 100644 --- a/clang/test/AST/ByteCode/cxx23.cpp +++ b/clang/test/AST/ByteCode/cxx23.cpp @@ -1,8 +1,8 @@ // UNSUPPORTED: target={{.*}}-zos{{.*}} -// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -verify=ref,ref20,all,all20 %s -// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -verify=ref,ref23,all,all23 %s -// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -verify=expected20,all,all20 %s -fexperimental-new-constant-interpreter -// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -verify=expected23,all,all23 %s -fexperimental-new-constant-interpreter +// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=ref,ref20,all,all20 %s +// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=ref,ref23,all,all23 %s +// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=expected20,all,all20 %s -fexperimental-new-constant-interpreter +// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=expected23,all,all23 %s -fexperimental-new-constant-interpreter #define assert_active(F) if (!__builtin_is_within_lifetime(&F)) (1/0); @@ -393,6 +393,59 @@ namespace UnionMemberCallDiags { static_assert(g()); // all-error {{not an integral constant expression}} \ // all-note {{in call to}} } +#endif + +namespace VolatileWrites { + constexpr void test1() {// all20-error {{never produces a constant expression}} + int k; + volatile int &m = k; + m = 10; // all20-note {{assignment to volatile-qualified type 'volatile int'}} + } + constexpr void test2() { // all20-error {{never produces a constant expression}} + volatile int k = 12; + k = 13; // all20-note {{assignment to volatile-qualified type 'volatile int'}} + } + + constexpr void test3() { // all20-error {{never produces a constant expression}} + volatile int k = 12; // all20-note {{volatile object declared here}} + + *((int *)&k) = 13; // all20-note {{assignment to volatile object 'k' is not allowed in a constant expression}} + } + + constexpr void test4() { // all20-error {{never produces a constant expression}} + int k = 12; + + *((volatile int *)&k) = 13; // all20-note {{assignment to volatile-qualified type 'volatile int' is not allowed in a constant expression}} + } + +#if __cplusplus >= 202302L + struct S { + volatile int k; + }; + constexpr int test5() { + S s; + s.k = 12; // all-note {{assignment to volatile-qualified type 'volatile int' is not}} + + return 0; + } + static_assert(test5() == 0); // all-error{{not an integral constant expression}} \ + // all-note {{in call to}} #endif + + constexpr bool test6(volatile int k) { // ref20-error {{never produces a constant expression}} + k = 14; // ref20-note {{assignment to volatile-qualified type 'volatile int' is not}} \ + // all-note {{assignment to volatile-qualified type 'volatile int' is not}} + return true; + } + static_assert(test6(5)); // all-error {{not an integral constant expression}} \ + // all-note {{in call to}} + + constexpr bool test7(volatile int k) { // all-note {{declared here}} + *((int *)&k) = 13; // all-note {{assignment to volatile object 'k' is not allowed in a constant expression}} + return true; + } + static_assert(test7(12)); // all-error {{not an integral constant expression}} \ + // all-note {{in call to}} +} diff --git a/clang/test/AST/ByteCode/invalid.cpp b/clang/test/AST/ByteCode/invalid.cpp index affb40eada870..00db27419e36b 100644 --- a/clang/test/AST/ByteCode/invalid.cpp +++ b/clang/test/AST/ByteCode/invalid.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -fexperimental-new-constant-interpreter -verify=expected,both %s -// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=ref,both %s +// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=ref,both %s namespace Throw { diff --git a/clang/test/AST/ByteCode/literals.cpp b/clang/test/AST/ByteCode/literals.cpp index 5bc3f7f4c815c..5028ebfa3de30 100644 --- a/clang/test/AST/ByteCode/literals.cpp +++ b/clang/test/AST/ByteCode/literals.cpp @@ -28,6 +28,8 @@ static_assert(number != 10, ""); // both-error{{failed}} \ static_assert(__objc_yes, ""); static_assert(!__objc_no, ""); +static_assert((long long)0x00000000FFFF0000 == 4294901760, ""); + constexpr bool b = number; static_assert(b, ""); constexpr int one = true; diff --git a/clang/test/AST/ByteCode/new-delete.cpp b/clang/test/AST/ByteCode/new-delete.cpp index af747d7a15b12..f54854070573c 100644 --- a/clang/test/AST/ByteCode/new-delete.cpp +++ b/clang/test/AST/ByteCode/new-delete.cpp @@ -1091,6 +1091,19 @@ namespace NewNegSizeNothrow { static_assert(test_nothrow_neg_size(), "expected nullptr"); } // namespace NewNegSizeNothrow +#if __SIZEOF_SIZE_T == 8 +/// We can't allocate the array here as it is too big. +/// Make sure we're not crashing by assuming an non-null +/// Descriptor. +namespace HugeAllocation { + void *p; + void foo () + { + p = new char [256][256][256][256][256]; + } +} +#endif + #else /// Make sure we reject this prior to C++20 constexpr int a() { // both-error {{never produces a constant expression}} diff --git a/clang/test/AST/ByteCode/strlen-unknown-size-array.cpp b/clang/test/AST/ByteCode/strlen-unknown-size-array.cpp new file mode 100644 index 0000000000000..ddc857009f57d --- /dev/null +++ b/clang/test/AST/ByteCode/strlen-unknown-size-array.cpp @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -std=c++20 -fexperimental-new-constant-interpreter %s -verify +// RUN: %clang_cc1 -std=c++20 %s -verify=ref + +// expected-no-diagnostics +// ref-no-diagnostics + +/// Test that __builtin_strlen() on external/unknown declarations doesn't crash the bytecode interpreter. +extern const char s[]; +void foo(char *x) +{ + unsigned long len = __builtin_strlen(s); + __builtin_strcpy(x, s); +} diff --git a/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl b/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl index 91441e32e047d..129ab7022f361 100644 --- a/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl +++ b/clang/test/AST/HLSL/RootSignature-Target-AST.hlsl @@ -1,9 +1,15 @@ // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ +// RUN: -fdx-rootsignature-version=rootsig_1_0 \ +// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_0 + +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ +// RUN: -fdx-rootsignature-version=rootsig_1_1 \ // RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_1 // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ -// RUN: -fdx-rootsignature-version=rootsig_1_0 \ -// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_0 +// RUN: -fdx-rootsignature-version=rootsig_1_2 \ +// RUN: -hlsl-entry EntryRootSig -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_2 + // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-rootsignature -ast-dump \ // RUN: -D CmdRS='"UAV(u0)"'\ @@ -12,11 +18,13 @@ // CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[ENTRY_RS_DECL:__hlsl_rootsig_decl_\d*]] // CHECK-V1_0-SAME: version: 1.0, // CHECK-V1_1-SAME: version: 1.1, +// CHECK-V1_2-SAME: version: 1.2, // CHECK-SAME: RootElements{ // CHECK-SAME: RootCBV(b0, // CHECK-SAME: space = 0, visibility = All, // CHECK-V1_0-SAME: flags = DataVolatile // CHECK-V1_1-SAME: flags = DataStaticWhileSetAtExecute +// CHECK-V1_2-SAME: flags = DataStaticWhileSetAtExecute // CHECK-SAME: ) // CHECK-SAME: } #define EntryRootSig "CBV(b0)" diff --git a/clang/test/AST/HLSL/RootSignatures-AST.hlsl b/clang/test/AST/HLSL/RootSignatures-AST.hlsl index 32da1f14853b0..0f0f3a5ca706f 100644 --- a/clang/test/AST/HLSL/RootSignatures-AST.hlsl +++ b/clang/test/AST/HLSL/RootSignatures-AST.hlsl @@ -6,6 +6,9 @@ // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \ // RUN: -fdx-rootsignature-version=rootsig_1_1 \ // RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_1 +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -ast-dump \ +// RUN: -fdx-rootsignature-version=rootsig_1_2 \ +// RUN: -disable-llvm-passes -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-V1_2 // This test ensures that the sample root signature is parsed without error and // the Attr AST Node is created succesfully. If an invalid root signature was @@ -31,6 +34,7 @@ // CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[SAMPLE_RS_DECL:__hlsl_rootsig_decl_\d*]] // CHECK-V1_0: version: 1.0, // CHECK-V1_1: version: 1.1, +// CHECK-V1_2: version: 1.2, // CHECK-SAME: RootElements{ // CHECK-SAME: RootFlags(AllowInputAssemblerInputLayout | DenyVertexShaderRootAccess), // CHECK-SAME: RootCBV(b0, @@ -62,6 +66,7 @@ // CHECK-SAME: s0, numDescriptors = 4, space = 1, offset = DescriptorTableOffsetAppend, // CHECK-V1_0-SAME: flags = DescriptorsVolatile // CHECK-V1_1-SAME: flags = None +// CHECK-V1_2-SAME: flags = None // CHECK-SAME: ), // CHECK-SAME: DescriptorTable( // CHECK-SAME: numClauses = 1, visibility = All @@ -73,6 +78,7 @@ // CHECK-SAME: s1, filter = Anisotropic, addressU = Wrap, addressV = Wrap, addressW = Wrap, // CHECK-SAME: mipLODBias = 0.000000e+00, maxAnisotropy = 16, comparisonFunc = LessEqual, // CHECK-SAME: borderColor = OpaqueWhite, minLOD = 0.000000e+00, maxLOD = 3.402823e+38, space = 0, visibility = All +// CHECK-SAME: flags = None // CHECK-SAME: )} // CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[SAMPLE_RS_DECL]] @@ -131,3 +137,24 @@ void same_rs_string_main() {} // CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]] [RootSignature(SampleDifferentRS)] void different_rs_string_main() {} + +#define SampleStaticSamplerRS \ + "StaticSampler(s0, flags = NON_NORMALIZED_COORDINATES)" + +// Ensure that static samplers flags are correctly parsed in different versions + +// CHECK: -HLSLRootSignatureDecl 0x{{.*}} {{.*}} implicit [[DIFF_RS_DECL:__hlsl_rootsig_decl_\d*]] +// CHECK-V1_0: version: 1.0, +// CHECK-V1_1: version: 1.1, +// CHECK-V1_2: version: 1.2, +// CHECK-SAME: RootElements{ +// CHECK-SAME: StaticSampler( +// CHECK-SAME: s0, filter = Anisotropic, addressU = Wrap, addressV = Wrap, addressW = Wrap, +// CHECK-SAME: mipLODBias = 0.000000e+00, maxAnisotropy = 16, comparisonFunc = LessEqual, +// CHECK-SAME: borderColor = OpaqueWhite, minLOD = 0.000000e+00, maxLOD = 3.402823e+38, space = 0, visibility = All +// CHECK-SAME: flags = NonNormalizedCoordinates +// CHECK-SAME: )} + +// CHECK: -RootSignatureAttr 0x{{.*}} {{.*}} [[DIFF_RS_DECL]] +[RootSignature(SampleStaticSamplerRS)] +void statoc_sampler_v12_main() {} diff --git a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl index a490b22ab437b..6779abb10bec4 100644 --- a/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl +++ b/clang/test/AST/HLSL/StructuredBuffers-AST.hlsl @@ -12,7 +12,7 @@ // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \ // RUN: -DRESOURCE=RWStructuredBuffer %s | FileCheck -DRESOURCE=RWStructuredBuffer \ -// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-COUNTER,CHECK-LOAD %s +// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-COUNTER,CHECK-LOAD,CHECK-COUNTER-HANDLE %s // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump -DEMPTY \ // RUN: -DRESOURCE=AppendStructuredBuffer %s | FileCheck -DRESOURCE=AppendStructuredBuffer \ @@ -20,7 +20,7 @@ // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \ // RUN: -DRESOURCE=AppendStructuredBuffer %s | FileCheck -DRESOURCE=AppendStructuredBuffer \ -// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-APPEND %s +// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-APPEND,CHECK-COUNTER-HANDLE %s // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump -DEMPTY \ // RUN: -DRESOURCE=ConsumeStructuredBuffer %s | FileCheck -DRESOURCE=ConsumeStructuredBuffer \ @@ -28,7 +28,7 @@ // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \ // RUN: -DRESOURCE=ConsumeStructuredBuffer %s | FileCheck -DRESOURCE=ConsumeStructuredBuffer \ -// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-CONSUME %s +// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-NOSUBSCRIPT,CHECK-CONSUME,CHECK-COUNTER-HANDLE %s // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump -DEMPTY \ // RUN: -DRESOURCE=RasterizerOrderedStructuredBuffer %s | FileCheck -DRESOURCE=RasterizerOrderedStructuredBuffer \ @@ -36,7 +36,7 @@ // // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-library -x hlsl -ast-dump \ // RUN: -DRESOURCE=RasterizerOrderedStructuredBuffer %s | FileCheck -DRESOURCE=RasterizerOrderedStructuredBuffer \ -// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-ROV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-LOAD %s +// RUN: -check-prefixes=CHECK,CHECK-UAV,CHECK-ROV,CHECK-SUBSCRIPT,CHECK-SUBSCRIPT-UAV,CHECK-LOAD,CHECK-COUNTER-HANDLE %s // This test tests two different AST generations for each structured buffer. // The "EMPTY" test mode verifies the AST generated by forward declaration @@ -113,6 +113,11 @@ RESOURCE Buffer; // CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this // CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle // CHECK-NEXT: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]] &' +// CHECK-COUNTER-HANDLE-NEXT: BinaryOperator {{.*}} '=' +// CHECK-COUNTER-HANDLE-NEXT: MemberExpr {{.*}} lvalue .__counter_handle +// CHECK-COUNTER-HANDLE-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this +// CHECK-COUNTER-HANDLE-NEXT: MemberExpr {{.*}} lvalue .__counter_handle +// CHECK-COUNTER-HANDLE-NEXT: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]] &' // CHECK-NEXT: AlwaysInlineAttr // operator= @@ -125,6 +130,11 @@ RESOURCE Buffer; // CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this // CHECK-NEXT: MemberExpr {{.*}} lvalue .__handle // CHECK-NEXT: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]] &' +// CHECK-COUNTER-HANDLE: BinaryOperator {{.*}} '=' +// CHECK-COUNTER-HANDLE: MemberExpr {{.*}} lvalue .__counter_handle +// CHECK-COUNTER-HANDLE: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this +// CHECK-COUNTER-HANDLE: MemberExpr {{.*}} lvalue .__counter_handle +// CHECK-COUNTER-HANDLE: DeclRefExpr {{.*}} 'const hlsl::[[RESOURCE]]' ParmVar {{.*}} 'other' 'const hlsl::[[RESOURCE]] &' // CHECK-NEXT: ReturnStmt // CHECK-NEXT: CXXThisExpr {{.*}} 'hlsl::[[RESOURCE]]' lvalue implicit this // CHECK-NEXT: AlwaysInlineAttr @@ -334,3 +344,8 @@ RESOURCE Buffer; // CHECK-ROV-SAME{LITERAL}: [[hlsl::is_rov]] // CHECK-SAME{LITERAL}: [[hlsl::raw_buffer]] // CHECK-SAME{LITERAL}: [[hlsl::contained_type(float)]] +// CHECK-COUNTER-HANDLE: FieldDecl {{.*}} implicit referenced __counter_handle '__hlsl_resource_t +// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::resource_class(UAV)]] +// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::raw_buffer]] +// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::is_counter]] +// CHECK-COUNTER-HANDLE-SAME{LITERAL}: [[hlsl::contained_type(float)]] diff --git a/clang/test/AST/HLSL/matrix-alias.hlsl b/clang/test/AST/HLSL/matrix-alias.hlsl new file mode 100644 index 0000000000000..2758b6f0d202f --- /dev/null +++ b/clang/test/AST/HLSL/matrix-alias.hlsl @@ -0,0 +1,49 @@ +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-compute -ast-dump -o - %s | FileCheck %s + +// Test that matrix aliases are set up properly for HLSL + +// CHECK: NamespaceDecl 0x{{[0-9a-fA-F]+}} <> implicit hlsl +// CHECK-NEXT: TypeAliasTemplateDecl 0x{{[0-9a-fA-F]+}} <> implicit vector +// CHECK-NEXT: TemplateTypeParmDecl 0x{{[0-9a-fA-F]+}} <> class depth 0 index 0 element +// CHECK-NEXT: TemplateArgument type 'float' +// CHECK-NEXT: BuiltinType 0x{{[0-9a-fA-F]+}} 'float' +// CHECK-NEXT: NonTypeTemplateParmDecl 0x{{[0-9a-fA-F]+}} <> 'int' depth 0 index 1 element_count +// CHECK-NEXT: TemplateArgument expr +// CHECK-NEXT: IntegerLiteral 0x{{[0-9a-fA-F]+}} <> 'int' 4 +// CHECK-NEXT: TypeAliasDecl 0x{{[0-9a-fA-F]+}} <> implicit vector 'vector' +// CHECK-NEXT: DependentSizedExtVectorType 0x{{[0-9a-fA-F]+}} 'vector' dependent +// CHECK-NEXT: TemplateTypeParmType 0x{{[0-9a-fA-F]+}} 'element' dependent depth 0 index 0 +// CHECK-NEXT: TemplateTypeParm 0x{{[0-9a-fA-F]+}} 'element' +// CHECK-NEXT: DeclRefExpr 0x{{[0-9a-fA-F]+}} <> 'int' lvalue +// CHECK-SAME: NonTypeTemplateParm 0x{{[0-9a-fA-F]+}} 'element_count' 'int' + +// Make sure we got a using directive at the end. +// CHECK: UsingDirectiveDecl 0x{{[0-9a-fA-F]+}} <> Namespace 0x{{[0-9a-fA-F]+}} 'hlsl' + +[numthreads(1,1,1)] +int entry() { + // Verify that the alias is generated inside the hlsl namespace. + hlsl::matrix Mat2x2f; + + // CHECK: DeclStmt 0x{{[0-9a-fA-F]+}} + // CHECK-NEXT: VarDecl 0x{{[0-9a-fA-F]+}} col:29 Mat2x2f 'hlsl::matrix' + + // Verify that you don't need to specify the namespace. + matrix Mat2x2i; + + // CHECK: DeclStmt 0x{{[0-9a-fA-F]+}} + // CHECK-NEXT: VarDecl 0x{{[0-9a-fA-F]+}} col:21 Mat2x2i 'matrix' + + // Build a bigger matrix. + matrix Mat4x4d; + + // CHECK: DeclStmt 0x{{[0-9a-fA-F]+}} + // CHECK-NEXT: VarDecl 0x{{[0-9a-fA-F]+}} col:24 Mat4x4d 'matrix' + + // Verify that the implicit arguments generate the correct type. + matrix<> ImpMat4x4; + + // CHECK: DeclStmt 0x{{[0-9a-fA-F]+}} + // CHECK-NEXT: VarDecl 0x{{[0-9a-fA-F]+}} col:12 ImpMat4x4 'matrix<>':'matrix' + return 1; +} diff --git a/clang/test/AST/ast-dump-ppc-types.c b/clang/test/AST/ast-dump-ppc-types.c index 1c860c268e0ec..6112af5ebf92c 100644 --- a/clang/test/AST/ast-dump-ppc-types.c +++ b/clang/test/AST/ast-dump-ppc-types.c @@ -17,6 +17,8 @@ // are correctly defined. We also added checks on a couple of other targets to // ensure the types are target-dependent. +// CHECK: TypedefDecl {{.*}} implicit __dmr2048 '__dmr2048' +// CHECK: `-BuiltinType {{.*}} '__dmr2048' // CHECK: TypedefDecl {{.*}} implicit __dmr1024 '__dmr1024' // CHECK: `-BuiltinType {{.*}} '__dmr1024' // CHECK: TypedefDecl {{.*}} implicit __vector_quad '__vector_quad' diff --git a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h index 39dee1746158b..dacb713130818 100644 --- a/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h +++ b/clang/test/Analysis/Checkers/WebKit/objc-mock-types.h @@ -17,6 +17,20 @@ template typename remove_reference::type&& move(T&& t); #endif +namespace std { + +template struct enable_if { +}; + +template struct enable_if { + using type = T; +}; + +template +using enable_if_t = typename enable_if::type; + +} + @class NSString; @class NSArray; @class NSMutableArray; @@ -100,6 +114,7 @@ id CFBridgingRelease(CFTypeRef X) { __attribute__((objc_root_class)) @interface NSObject + (instancetype) alloc; ++ (instancetype) allocWithZone:(NSZone *)zone; + (Class) class; + (Class) superclass; - (instancetype) init; @@ -232,6 +247,14 @@ template struct RemovePointer { typedef T Type; }; +template struct IsPointer { + static constexpr bool value = false; +}; + +template struct IsPointer { + static constexpr bool value = true; +}; + template struct RetainPtr { using ValueType = typename RemovePointer::Type; using PtrType = ValueType*; @@ -285,12 +308,23 @@ template struct RetainPtr { PtrType operator->() const { return t; } T &operator*() const { return *t; } RetainPtr &operator=(PtrType t); - PtrType leakRef() + + template + std::enable_if_t::value, U> leakRef() CF_RETURNS_RETAINED + { + PtrType s = t; + t = nullptr; + return s; + } + + template + std::enable_if_t::value, U> leakRef() NS_RETURNS_RETAINED { PtrType s = t; t = nullptr; return s; } + operator PtrType() const { return t; } operator bool() const { return t; } diff --git a/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm b/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm index 769901778cdf0..45705615f3196 100644 --- a/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm +++ b/clang/test/Analysis/Checkers/WebKit/retain-ptr-ctor-adopt-use.mm @@ -104,6 +104,14 @@ - (void)setValue:value { _number = value; } +- (id)copyWithZone:(NSZone *)zone { + auto copy = adoptNS([(SomeObj *)[SomeObj allocWithZone:zone] init]); + [copy setValue:_number]; + [copy setNext:_next]; + [copy setOther:_other]; + return copy.leakRef(); +} + @end; RetainPtr cf_out_argument() { @@ -151,7 +159,7 @@ CFTypeRef LeakWrapper() { extern Class (*getNSArrayClass)(); NSArray *allocArrayInstance() NS_RETURNS_RETAINED { - return [[getNSArrayClass() alloc] init]; + return adoptNS([[getNSArrayClass() alloc] init]).leakRef(); } extern int (*GetObj)(CF_RETURNS_RETAINED CFTypeRef* objOut); @@ -294,7 +302,7 @@ -(NSString *)leak_string { } -(NSString *)make_string { - return [[NSString alloc] initWithUTF8String:"hello"]; + return adoptNS([[NSString alloc] initWithUTF8String:"hello"]).leakRef(); } -(void)local_leak_string { diff --git a/clang/test/Analysis/Checkers/WebKit/unretained-members-arc.mm b/clang/test/Analysis/Checkers/WebKit/unretained-members-arc.mm index 19c54c4dc07ba..4eef372d26480 100644 --- a/clang/test/Analysis/Checkers/WebKit/unretained-members-arc.mm +++ b/clang/test/Analysis/Checkers/WebKit/unretained-members-arc.mm @@ -84,6 +84,21 @@ @interface AnotherObject : NSObject { @property(nonatomic, unsafe_unretained) NSString *prop_string3; // expected-warning@-1{{Property 'prop_string3' in 'AnotherObject' is a raw pointer to retainable type 'NSString'; member variables must be a RetainPtr}} @property(nonatomic, readonly) NSString *prop_string4; +@property(nonatomic, readonly) NSString *prop_safe; +@end + +@implementation AnotherObject +- (NSString *)prop_safe { + return nil; +} +@end + +// No warnings for @interface declaration itself. +@interface InterfaceOnlyObject : NSObject +@property(nonatomic, strong) NSString *prop_string1; +@property(nonatomic, assign) NSString *prop_string2; +@property(nonatomic, unsafe_unretained) NSString *prop_string3; +@property(nonatomic, readonly) NSString *prop_string4; @property(nonatomic, readonly) dispatch_queue_t prop_string5; @end diff --git a/clang/test/Analysis/Checkers/WebKit/unretained-members.mm b/clang/test/Analysis/Checkers/WebKit/unretained-members.mm index 155848f9834af..adf1d8aef9d7d 100644 --- a/clang/test/Analysis/Checkers/WebKit/unretained-members.mm +++ b/clang/test/Analysis/Checkers/WebKit/unretained-members.mm @@ -112,8 +112,59 @@ @interface AnotherObject : NSObject { dispatch_queue_t dispatch; // expected-warning@-1{{Instance variable 'dispatch' in 'AnotherObject' is a retainable type 'dispatch_queue_t'}} } -@property(nonatomic, strong) NSString *prop_string; -// expected-warning@-1{{Property 'prop_string' in 'AnotherObject' is a raw pointer to retainable type 'NSString'}} +@property(nonatomic, readonly, strong) NSString *prop_string; +// expected-warning@-1{{Property 'prop_string' in 'AnotherObject' is a raw pointer to retainable type 'NSString'; member variables must be a RetainPtr}} +@property(nonatomic, readonly) NSString *prop_safe; +@end + +@implementation AnotherObject +- (NSString *)prop_safe { + return nil; +} +@end + +@interface DerivedObject : AnotherObject { + NSNumber *ns_number; + // expected-warning@-1{{Instance variable 'ns_number' in 'DerivedObject' is a raw pointer to retainable type 'NSNumber'}} + CGImageRef cg_image; + // expected-warning@-1{{Instance variable 'cg_image' in 'DerivedObject' is a retainable type 'CGImageRef'}} + dispatch_queue_t os_dispatch; + // expected-warning@-1{{Instance variable 'os_dispatch' in 'DerivedObject' is a retainable type 'dispatch_queue_t'}} +} +@property(nonatomic, strong) NSNumber *prop_number; +// expected-warning@-1{{Property 'prop_number' in 'DerivedObject' is a raw pointer to retainable type 'NSNumber'; member variables must be a RetainPtr}} +@property(nonatomic, readonly) NSString *prop_string; +@end + +@implementation DerivedObject +- (NSString *)prop_string { + return nil; +} +@end + +// No warnings for @interface declaration itself. +@interface InterfaceOnlyObject : NSObject +@property(nonatomic, strong) NSString *prop_string1; +@property(nonatomic, assign) NSString *prop_string2; +@property(nonatomic, unsafe_unretained) NSString *prop_string3; +@property(nonatomic, readonly) NSString *prop_string4; +@end + +@interface InterfaceOnlyObject2 : NSObject +@property(nonatomic, strong) NSString *prop_string1; +@property(nonatomic, assign) NSString *prop_string2; +@property(nonatomic, unsafe_unretained) NSString *prop_string3; +// expected-warning@-1{{Property 'prop_string3' in 'DerivedObject2' is a raw pointer to retainable type 'NSString'}} +@property(nonatomic, readonly) NSString *prop_string4; +@end + +@interface DerivedObject2 : InterfaceOnlyObject2 +@property(nonatomic, readonly) NSString *prop_string5; +// expected-warning@-1{{Property 'prop_string5' in 'DerivedObject2' is a raw pointer to retainable type 'NSString'}} +@end + +@implementation DerivedObject2 +@synthesize prop_string3; @end NS_REQUIRES_PROPERTY_DEFINITIONS diff --git a/clang/test/Analysis/LifetimeSafety/benchmark.py b/clang/test/Analysis/LifetimeSafety/benchmark.py index 2373f9984eecd..d2e5f0b2122a3 100644 --- a/clang/test/Analysis/LifetimeSafety/benchmark.py +++ b/clang/test/Analysis/LifetimeSafety/benchmark.py @@ -340,7 +340,7 @@ def run_single_test( "name": "cycle", "title": "Pointer Cycle in Loop", "generator_func": generate_cpp_cycle_test, - "n_values": [25, 50, 75, 100], + "n_values": [50, 75, 100, 200, 300], }, { "name": "merge", diff --git a/clang/test/Analysis/buffer-overlap-decls.c b/clang/test/Analysis/buffer-overlap-decls.c new file mode 100644 index 0000000000000..4830f4e9691d8 --- /dev/null +++ b/clang/test/Analysis/buffer-overlap-decls.c @@ -0,0 +1,23 @@ +// RUN: %clang_analyze_cc1 -verify %s -Wno-incompatible-library-redeclaration \ +// RUN: -analyzer-checker=alpha.unix.cstring.BufferOverlap +// expected-no-diagnostics + +typedef typeof(sizeof(int)) size_t; + +void memcpy(int dst, int src, size_t size); + +void test_memcpy_proxy() { + memcpy(42, 42, 42); // no-crash +} + +void strcpy(int dst, char *src); + +void test_strcpy_proxy() { + strcpy(42, (char *)42); // no-crash +} + +void strxfrm(int dst, char *src, size_t size); + +void test_strxfrm_proxy() { + strxfrm(42, (char *)42, 42); // no-crash +} diff --git a/clang/test/Analysis/buffer-overlap.c b/clang/test/Analysis/buffer-overlap.c index 8414a764541e2..defb17a62ae0b 100644 --- a/clang/test/Analysis/buffer-overlap.c +++ b/clang/test/Analysis/buffer-overlap.c @@ -96,3 +96,10 @@ void test_snprintf6() { char b[4] = {0}; snprintf(a, sizeof(a), "%s", b); // no-warning } + +void* memcpy(void* dest, const void* src, size_t count); + +void test_memcpy_esoteric() { +label: + memcpy((char *)&&label, (const char *)memcpy, 1); +} diff --git a/clang/test/Analysis/cxxctr-evalcall-analysis-order.cpp b/clang/test/Analysis/cxxctr-evalcall-analysis-order.cpp index 0e1ec2f9de566..743c5ad0fa8cd 100644 --- a/clang/test/Analysis/cxxctr-evalcall-analysis-order.cpp +++ b/clang/test/Analysis/cxxctr-evalcall-analysis-order.cpp @@ -18,16 +18,33 @@ void foo() { C C0; C C1(42); C *C2 = new C{2, 3}; + delete C2; } // CHECK: PreCall (C::C) [CXXConstructorCall] // CHECK-NEXT: EvalCall (C::C) {argno: 0} [CXXConstructorCall] // CHECK-NEXT: PostCall (C::C) [CXXConstructorCall] + // CHECK-NEXT: PreCall (C::C) [CXXConstructorCall] // CHECK-NEXT: EvalCall (C::C) {argno: 1} [CXXConstructorCall] // CHECK-NEXT: PostCall (C::C) [CXXConstructorCall] + // CHECK-NEXT: PreCall (operator new) [CXXAllocatorCall] +// COMMENT: Operator new calls (CXXNewExpr) are intentionally not eval-called, +// COMMENT: because it does not make sense to eval call user-provided functions. +// COMMENT: 1) If the new operator can be inlined, then don't prevent it from +// COMMENT: inlining by having an eval-call of that operator. +// COMMENT: 2) If it can't be inlined, then the default conservative modeling +// COMMENT: is what we anyways want anyway. +// COMMENT: So the EvalCall event will not be triggered for operator new calls. +// CHECK-NOT: EvalCall // CHECK-NEXT: PostCall (operator new) [CXXAllocatorCall] + // CHECK-NEXT: PreCall (C::C) [CXXConstructorCall] // CHECK-NEXT: EvalCall (C::C) {argno: 2} [CXXConstructorCall] // CHECK-NEXT: PostCall (C::C) [CXXConstructorCall] + +// CHECK-NEXT: PreCall (operator delete) [CXXDeallocatorCall] +// COMMENT: Same reasoning as for CXXNewExprs above. +// CHECK-NOT: EvalCall +// CHECK-NEXT: PostCall (operator delete) [CXXDeallocatorCall] diff --git a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-diagnostics-taint-test.c.sarif b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-diagnostics-taint-test.c.sarif index 0bded6f0925d1..76f25475e3b21 100644 --- a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-diagnostics-taint-test.c.sarif +++ b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-diagnostics-taint-test.c.sarif @@ -4,9 +4,10 @@ { "artifacts": [ { - "length": 425, + "length": -1, "location": { "index": 0, + "uri": "file:///[...]/sarif-diagnostics-taint-test.c" }, "mimeType": "text/plain", "roles": [ @@ -31,6 +32,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-diagnostics-taint-test.c" }, "region": { "endColumn": 6, @@ -50,6 +52,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-diagnostics-taint-test.c" }, "region": { "endColumn": 18, @@ -71,6 +74,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-diagnostics-taint-test.c" }, "region": { "endColumn": 18, @@ -84,6 +88,9 @@ "message": { "text": "tainted" }, + "partialFingerprints": { + "clang/issueHash/v1": "5c964815b8d6db3989bacdd308e657d0" + }, "ruleId": "debug.TaintTest", "ruleIndex": 0 } @@ -108,8 +115,10 @@ "name": "debug.TaintTest" } ], + "version": "[clang version]" } } } ], + "version": "[SARIF version]" } diff --git a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif index e35ab695bb38e..4aa6239f6312d 100644 --- a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif +++ b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif @@ -4,9 +4,10 @@ { "artifacts": [ { - "length": 1152, + "length": -1, "location": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "mimeType": "text/plain", "roles": [ @@ -31,6 +32,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 6, @@ -50,6 +52,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 18, @@ -65,12 +68,14 @@ ] } ], + "hostedViewerUri": "file:///[...]/report-5c9648.html", "level": "warning", "locations": [ { "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 18, @@ -84,6 +89,9 @@ "message": { "text": "tainted" }, + "partialFingerprints": { + "clang/issueHash/v1": "5c964815b8d6db3989bacdd308e657d0" + }, "ruleId": "debug.TaintTest", "ruleIndex": 0 }, @@ -102,6 +110,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 6, @@ -121,6 +130,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 12, @@ -140,6 +150,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 9, @@ -155,12 +166,14 @@ ] } ], + "hostedViewerUri": "file:///[...]/report-256f65.html", "level": "warning", "locations": [ { "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 9, @@ -174,6 +187,9 @@ "message": { "text": "Called function pointer is an uninitialized pointer value" }, + "partialFingerprints": { + "clang/issueHash/v1": "256f6502719de88bece09a676d4102c6" + }, "ruleId": "core.CallAndMessage", "ruleIndex": 1 }, @@ -192,6 +208,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 13, @@ -211,6 +228,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 3, @@ -229,6 +247,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 14, @@ -243,12 +262,14 @@ ] } ], + "hostedViewerUri": "file:///[...]/report-91023b.html", "level": "warning", "locations": [ { "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 14, @@ -261,6 +282,9 @@ "message": { "text": "Division by zero" }, + "partialFingerprints": { + "clang/issueHash/v1": "91023b85b7e0ff79f11ab603e63cfa58" + }, "ruleId": "core.DivideZero", "ruleIndex": 2 }, @@ -279,6 +303,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 24, @@ -298,6 +323,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 12, @@ -317,6 +343,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 3, @@ -335,6 +362,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 12, @@ -349,12 +377,14 @@ ] } ], + "hostedViewerUri": "file:///[...]/report-b18daa.html", "level": "warning", "locations": [ { "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 12, @@ -367,6 +397,9 @@ "message": { "text": "Potential leak of memory pointed to by 'mem'" }, + "partialFingerprints": { + "clang/issueHash/v1": "b18daabce2816b9efb6afffaa64ca9f9" + }, "ruleId": "unix.Malloc", "ruleIndex": 3 }, @@ -385,6 +418,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 12, @@ -404,6 +438,7 @@ "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 20, @@ -418,12 +453,14 @@ ] } ], + "hostedViewerUri": "file:///[...]/report-4e5361.html", "level": "warning", "locations": [ { "physicalLocation": { "artifactLocation": { "index": 0, + "uri": "file:///[...]/sarif-multi-diagnostic-test.c" }, "region": { "endColumn": 20, @@ -436,6 +473,9 @@ "message": { "text": "Division by zero" }, + "partialFingerprints": { + "clang/issueHash/v1": "4e53611783411e0dae06a4084b00281c" + }, "ruleId": "core.DivideZero", "ruleIndex": 2 } @@ -499,8 +539,10 @@ "name": "unix.Malloc" } ], + "version": "[clang version]" } } } ], + "version": "[SARIF version]" } diff --git a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-file-diagnostics.c.sarif b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-file-diagnostics.c.sarif new file mode 100644 index 0000000000000..501d27ca22361 --- /dev/null +++ b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-file-diagnostics.c.sarif @@ -0,0 +1,144 @@ +{ + "$schema": "https://docs.oasis-open.org/sarif/sarif/v2.1.0/cos02/schemas/sarif-schema-2.1.0.json", + "runs": [ + { + "artifacts": [ + { + "length": -1, + "location": { + "index": 0, + "uri": "file:///[...]/sarif-multi-file-diagnostics.c" + }, + "mimeType": "text/plain", + "roles": [ + "resultFile" + ] + } + ], + "columnKind": "unicodeCodePoints", + "results": [ + { + "codeFlows": [ + { + "threadFlows": [ + { + "locations": [ + { + "importance": "important", + "location": { + "message": { + "text": "Assuming 'p' is null" + }, + "physicalLocation": { + "artifactLocation": { + "index": 0, + "uri": "file:///[...]/sarif-multi-file-diagnostics.c" + }, + "region": { + "endColumn": 7, + "startColumn": 7, + "startLine": 8 + } + } + } + }, + { + "importance": "unimportant", + "location": { + "message": { + "text": "Taking false branch" + }, + "physicalLocation": { + "artifactLocation": { + "index": 0, + "uri": "file:///[...]/sarif-multi-file-diagnostics.c" + }, + "region": { + "endColumn": 3, + "startColumn": 3, + "startLine": 8 + } + } + } + }, + { + "importance": "essential", + "location": { + "message": { + "text": "Dereference of null pointer (loaded from variable 'p')" + }, + "physicalLocation": { + "artifactLocation": { + "index": 0, + "uri": "file:///[...]/sarif-multi-file-diagnostics.c" + }, + "region": { + "endColumn": 14, + "endLine": 11, + "startColumn": 12, + "startLine": 11 + } + } + } + } + ] + } + ] + } + ], + "hostedViewerUri": "file:///[...]/report-d03238.html", + "level": "warning", + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "index": 0, + "uri": "file:///[...]/sarif-multi-file-diagnostics.c" + }, + "region": { + "endColumn": 14, + "endLine": 11, + "startColumn": 12, + "startLine": 11 + } + } + } + ], + "message": { + "text": "Dereference of null pointer (loaded from variable 'p')" + }, + "partialFingerprints": { + "clang/issueHash/v1": "d0323824ffaf9fee78b866e18d300fda" + }, + "ruleId": "core.NullDereference", + "ruleIndex": 0 + } + ], + "tool": { + "driver": { + "fullName": "clang static analyzer", + "informationUri": "https://clang.llvm.org/docs/UsersManual.html", + "language": "en-US", + "name": "clang", + "rules": [ + { + "defaultConfiguration": { + "enabled": true, + "level": "warning", + "rank": -1 + }, + "fullDescription": { + "text": "Check for dereferences of null pointers" + }, + "helpUri": "https://clang.llvm.org/docs/analyzer/checkers.html#core-nulldereference", + "id": "core.NullDereference", + "name": "core.NullDereference" + } + ], + "version": "[clang version]" + } + } + } + ], + "version": "[SARIF version]" +} diff --git a/clang/test/Analysis/diagnostics/sarif-multi-file-diagnostics.c b/clang/test/Analysis/diagnostics/sarif-multi-file-diagnostics.c new file mode 100644 index 0000000000000..48880b592f261 --- /dev/null +++ b/clang/test/Analysis/diagnostics/sarif-multi-file-diagnostics.c @@ -0,0 +1,12 @@ +// RUN: rm -rf %t && mkdir %t +// RUN: %clang_analyze_cc1 -analyzer-checker=core %s -verify -analyzer-output=sarif-html -o %t%{fs-sep}out1.sarif +// RUN: %clang_analyze_cc1 -analyzer-checker=core %s -verify -analyzer-output=sarif-html -o %t%{fs-sep}out2.sarif +// RUN: cat %t%{fs-sep}out1.sarif | %normalize_sarif | diff -U1 -b %S/Inputs/expected-sarif/sarif-multi-file-diagnostics.c.sarif - +// RUN: cat %t%{fs-sep}out2.sarif | %normalize_sarif | diff -U1 -b %S/Inputs/expected-sarif/sarif-multi-file-diagnostics.c.sarif - + +int test(int *p) { + if (p) + return 0; + else + return *p; // expected-warning {{Dereference of null pointer (loaded from variable 'p')}} +} diff --git a/clang/test/Analysis/initializer.cpp b/clang/test/Analysis/initializer.cpp index 713e121168571..88758f7c3ac1d 100644 --- a/clang/test/Analysis/initializer.cpp +++ b/clang/test/Analysis/initializer.cpp @@ -610,3 +610,51 @@ void top() { consume(parseMatchComponent()); } } // namespace elementwise_copy_small_array_from_post_initializer_of_cctor + +namespace gh147686 { +// The problem reported in https://github.com/llvm/llvm-project/issues/147686 +// is sensitive to the initializer form: using parenthesis to initialize m_ptr +// resulted in crashes when analyzing *m_ptr = '\0'; but using braces is fine. + +struct A { + A() : m_ptr(m_buf) { *m_ptr = '\0'; } // no-crash + A(int overload) : m_ptr{m_buf} { *m_ptr = '\0'; } + A(char src) : m_ptr(m_buf) { *m_ptr = src; } // no-crash + A(char src, int overload) : m_ptr{m_buf} { *m_ptr = src; } + char m_buf[64] = {0}; + char * m_ptr; +}; + +void test1() { + A a; + clang_analyzer_eval(a.m_buf[0] == 0); // expected-warning{{TRUE}} + // FIXME The next eval should result in TRUE. + clang_analyzer_eval(*a.m_ptr == 0); // expected-warning{{UNKNOWN}} +} + +void test2() { + A a(314); + clang_analyzer_eval(a.m_buf[0] == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(*a.m_ptr == 0); // expected-warning{{TRUE}} +} + +void test3() { + A a(0); + clang_analyzer_eval(a.m_buf[0] == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(*a.m_ptr == 0); // expected-warning{{TRUE}} +} + +void test3Bis(char arg) { + A a(arg); + // FIXME This test should behave like test3. + clang_analyzer_eval(a.m_buf[0] == arg); // expected-warning{{FALSE}} // expected-warning{{TRUE}} + clang_analyzer_eval(*a.m_ptr == arg); // expected-warning{{UNKNOWN}} +} + +void test4(char arg) { + A a(arg, 314); + clang_analyzer_eval(a.m_buf[0] == arg); // expected-warning{{TRUE}} + clang_analyzer_eval(*a.m_ptr == arg); // expected-warning{{TRUE}} +} + +} // namespace gh147686 diff --git a/clang/test/Analysis/lit.local.cfg b/clang/test/Analysis/lit.local.cfg index f08ff8d6cce63..03ab418a5a4f7 100644 --- a/clang/test/Analysis/lit.local.cfg +++ b/clang/test/Analysis/lit.local.cfg @@ -17,15 +17,21 @@ config.substitutions.append( ) ) +sed_cmd = "/opt/freeware/bin/sed" if "system-aix" in config.available_features else "sed" + # Filtering command for testing SARIF output against reference output. config.substitutions.append( ( "%normalize_sarif", - "grep -Ev '^[[:space:]]*(%s|%s|%s)[[:space:]]*$'" + f"{sed_cmd} -r '%s;%s;%s;%s'" % ( - '"uri": "file:.*%basename_t"', - '"version": ".* version .*"', - '"version": "2.1.0"', + # Replace version strings that are likely to change. + r's/"version": ".* version .*"/"version": "[clang version]"/', + r's/"version": "2.1.0"/"version": "[SARIF version]"/', + # Strip directories from file URIs + r's/"file:(\/+)([^"\/]+\/)*([^"]+)"/"file:\1[...]\/\3"/', + # Set "length" to -1 + r's/"length": [[:digit:]]+/"length": -1/' ), ) ) diff --git a/clang/test/C/C23/n2838.c b/clang/test/C/C23/n2838.c index cd20ea59884b2..c74f8cbe0a96c 100644 --- a/clang/test/C/C23/n2838.c +++ b/clang/test/C/C23/n2838.c @@ -4,9 +4,9 @@ * Types and sizes */ -char buffer4[0xFFFF'FFFF'FFFF'FFFF'1wb]; /* expected-error {{array is too large (295147905179352825841 elements)}} */ -char buffer3[0xFFFF'FFFF'FFFF'FFFFwb]; /* expected-error {{array is too large (18446744073709551615 elements)}} */ -char buffer2[0x7FFF'FFFF'FFFF'FFFFwb]; /* expected-error {{array is too large (9223372036854775807 elements)}} */ +char buffer4[0xFFFF'FFFF'FFFF'FFFF'1wb]; /* expected-error {{array is too large (295'147'905'179'352'825'841 elements)}} */ +char buffer3[0xFFFF'FFFF'FFFF'FFFFwb]; /* expected-error {{array is too large (18'446'744'073'709'551'615 elements)}} */ +char buffer2[0x7FFF'FFFF'FFFF'FFFFwb]; /* expected-error {{array is too large (9'223'372'036'854'775'807 elements)}} */ char buffer1[0x1FFF'FFFF'FFFF'FFFFwb]; /* array is juuuuuust right */ /* The largest object we can create is still smaller than SIZE_MAX. */ diff --git a/clang/test/C/drs/dr2xx.c b/clang/test/C/drs/dr2xx.c index ffdf5aac377d9..7567d485d4324 100644 --- a/clang/test/C/drs/dr2xx.c +++ b/clang/test/C/drs/dr2xx.c @@ -370,7 +370,7 @@ void dr266(void) { */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wlong-long" - (void)sizeof(int[__SIZE_MAX__ / 2][__SIZE_MAX__ / 2]); /* expected-error-re 2 {{array is too large ({{[0-9]+}} elements)}} */ + (void)sizeof(int[__SIZE_MAX__ / 2][__SIZE_MAX__ / 2]); /* expected-error-re 2 {{array is too large ({{[0-9']+}} elements)}} */ #pragma clang diagnostic pop } diff --git a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c index 00378f725d76a..92eae6aab6800 100644 --- a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c +++ b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c @@ -86,7 +86,7 @@ int check_load(st1 *s1) { // CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr // CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b, [[MEMBER]] {is_volatile} : !cir.ptr) -> !u32i -// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i +// CIR: [[CAST:%.*]] = cir.cast integral [[BITFI]] : !u32i -> !s32i // CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr // CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr, !s32i // CIR: cir.return [[RET]] : !s32i @@ -118,7 +118,7 @@ int check_load_exception(st3 *s3) { // CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr // CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_b1, [[MEMBER]] {is_volatile} : !cir.ptr) -> !u32i -// CIR: [[CAST:%.*]] = cir.cast(integral, [[BITFI]] : !u32i), !s32i +// CIR: [[CAST:%.*]] = cir.cast integral [[BITFI]] : !u32i -> !s32i // CIR: cir.store [[CAST]], [[RETVAL:%.*]] : !s32i, !cir.ptr // CIR: [[RET:%.*]] = cir.load [[RETVAL]] : !cir.ptr, !s32i // CIR: cir.return [[RET]] : !s32i @@ -180,7 +180,7 @@ void check_store(st2 *s2) { // CIR: cir.func dso_local @check_store // CIR: [[CONST:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !s16i +// CIR: [[CAST:%.*]] = cir.cast integral [[CONST]] : !s32i -> !s16i // CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr // CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][0] {name = "a"} : !cir.ptr -> !cir.ptr // CIR: [[SETBF:%.*]] = cir.set_bitfield align(8) (#bfi_a, [[MEMBER]] : !cir.ptr, [[CAST]] : !s16i) {is_volatile} -> !s16i @@ -211,7 +211,7 @@ void check_store_exception(st3 *s3) { // CIR: cir.func dso_local @check_store_exception // CIR: [[CONST:%.*]] = cir.const #cir.int<2> : !s32i -// CIR: [[CAST:%.*]] = cir.cast(integral, [[CONST]] : !s32i), !u32i +// CIR: [[CAST:%.*]] = cir.cast integral [[CONST]] : !s32i -> !u32i // CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr // CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: [[SETBF:%.*]] = cir.set_bitfield align(4) (#bfi_b1, [[MEMBER]] : !cir.ptr, [[CAST]] : !u32i) {is_volatile} -> !u32i @@ -263,7 +263,7 @@ void check_store_second_member (st4 *s4) { // CIR: cir.func dso_local @check_store_second_member // CIR: [[ONE:%.*]] = cir.const #cir.int<1> : !s32i -// CIR: [[CAST:%.*]] = cir.cast(integral, [[ONE]] : !s32i), !u64i +// CIR: [[CAST:%.*]] = cir.cast integral [[ONE]] : !s32i -> !u64i // CIR: [[LOAD:%.*]] = cir.load align(8) {{.*}} : !cir.ptr>, !cir.ptr // CIR: [[MEMBER:%.*]] = cir.get_member [[LOAD]][2] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: cir.set_bitfield align(8) (#bfi_b2, [[MEMBER]] : !cir.ptr, [[CAST]] : !u64i) {is_volatile} -> !u64i diff --git a/clang/test/CIR/CodeGen/array-ctor.cpp b/clang/test/CIR/CodeGen/array-ctor.cpp index bad4868ed8c34..5583d9d56954e 100644 --- a/clang/test/CIR/CodeGen/array-ctor.cpp +++ b/clang/test/CIR/CodeGen/array-ctor.cpp @@ -27,7 +27,7 @@ void foo() { // CIR: cir.func dso_local @_Z3foov() // CIR: %[[ARRAY:.*]] = cir.alloca !cir.array, !cir.ptr>, ["s", init] // CIR: %[[CONST42:.*]] = cir.const #cir.int<42> : !u64i -// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARRAY]] : !cir.ptr>), !cir.ptr +// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARRAY]] : !cir.ptr> -> !cir.ptr // CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[CONST42]] : !u64i), !cir.ptr // CIR: %[[ITER:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] // CIR: cir.store %[[DECAY]], %[[ITER]] : !cir.ptr, !cir.ptr> @@ -111,7 +111,7 @@ void multi_dimensional() { // CIR-BEFORE-LPP: cir.func{{.*}} @_Z17multi_dimensionalv() // CIR-BEFORE-LPP: %[[S:.*]] = cir.alloca !cir.array x 3>, !cir.ptr x 3>>, ["s", init] -// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast(bitcast, %[[S]] : !cir.ptr x 3>>), !cir.ptr> +// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast bitcast %[[S]] : !cir.ptr x 3>> -> !cir.ptr> // CIR-BEFORE-LPP: cir.array.ctor %[[FLAT]] : !cir.ptr> { // CIR-BEFORE-LPP: ^bb0(%[[ARG:.*]]: !cir.ptr): // CIR-BEFORE-LPP: cir.call @_ZN1SC1Ev(%[[ARG]]) : (!cir.ptr) -> () @@ -122,7 +122,7 @@ void multi_dimensional() { // CIR: cir.func{{.*}} @_Z17multi_dimensionalv() // CIR: %[[S:.*]] = cir.alloca !cir.array x 3>, !cir.ptr x 3>>, ["s", init] // CIR: %[[CONST15:.*]] = cir.const #cir.int<15> : !u64i -// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, {{.*}} : !cir.ptr>), !cir.ptr +// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay {{.*}} : !cir.ptr> -> !cir.ptr // CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[CONST15]] : !u64i), !cir.ptr // CIR: %[[ITER:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] // CIR: cir.store %[[DECAY]], %[[ITER]] : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/array-dtor.cpp b/clang/test/CIR/CodeGen/array-dtor.cpp index 36db265a6dfed..e969d50842a03 100644 --- a/clang/test/CIR/CodeGen/array-dtor.cpp +++ b/clang/test/CIR/CodeGen/array-dtor.cpp @@ -26,7 +26,7 @@ void test_cleanup_array() { // CIR: cir.func{{.*}} @_Z18test_cleanup_arrayv() // CIR: %[[S:.*]] = cir.alloca !cir.array, !cir.ptr>, ["s"] // CIR: %[[CONST41:.*]] = cir.const #cir.int<41> : !u64i -// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[S]] : !cir.ptr>), !cir.ptr +// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[S]] : !cir.ptr> -> !cir.ptr // CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[CONST41]] : !u64i), !cir.ptr // CIR: %[[ITER:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] // CIR: cir.store %[[END_PTR]], %[[ITER]] : !cir.ptr, !cir.ptr> @@ -109,7 +109,7 @@ void multi_dimensional() { // CIR-BEFORE-LPP: cir.func{{.*}} @_Z17multi_dimensionalv() // CIR-BEFORE-LPP: %[[S:.*]] = cir.alloca !cir.array x 3>, !cir.ptr x 3>>, ["s"] -// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast(bitcast, %[[S]] : !cir.ptr x 3>>), !cir.ptr> +// CIR-BEFORE-LPP: %[[FLAT:.*]] = cir.cast bitcast %[[S]] : !cir.ptr x 3>> -> !cir.ptr> // CIR-BEFORE-LPP: cir.array.dtor %[[FLAT]] : !cir.ptr> { // CIR-BEFORE-LPP: ^bb0(%[[ARG:.*]]: !cir.ptr): // CIR-BEFORE-LPP: cir.call @_ZN1SD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -> () @@ -119,9 +119,9 @@ void multi_dimensional() { // CIR: cir.func{{.*}} @_Z17multi_dimensionalv() // CIR: %[[S:.*]] = cir.alloca !cir.array x 3>, !cir.ptr x 3>>, ["s"] -// CIR: %[[FLAT:.*]] = cir.cast(bitcast, %[[S]] : !cir.ptr x 3>>), !cir.ptr> +// CIR: %[[FLAT:.*]] = cir.cast bitcast %[[S]] : !cir.ptr x 3>> -> !cir.ptr> // CIR: %[[CONST14:.*]] = cir.const #cir.int<14> : !u64i -// CIR: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[FLAT]] : !cir.ptr>), !cir.ptr +// CIR: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[FLAT]] : !cir.ptr> -> !cir.ptr // CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[CONST14]] : !u64i), !cir.ptr // CIR: %[[ITER:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] // CIR: cir.store %[[END_PTR]], %[[ITER]] : !cir.ptr, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/array.cpp b/clang/test/CIR/CodeGen/array.cpp index a643de2d26189..3333634a256dc 100644 --- a/clang/test/CIR/CodeGen/array.cpp +++ b/clang/test/CIR/CodeGen/array.cpp @@ -113,12 +113,12 @@ void func() { // CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr, ["e", init] // CIR: %[[INIT_2:.*]] = cir.alloca !s32i, !cir.ptr, ["e2", init] // CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr, %[[IDX]] : !s32i), !cir.ptr // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr, !s32i // CIR" cir.store %[[TMP]], %[[INIT]] : !s32i, !cir.ptr // CIR: %[[IDX:.*]] = cir.const #cir.int<1> : !s32i -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr, %[[IDX]] : !s32i), !cir.ptr // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr, !s32i // CIR" cir.store %[[TMP]], %[[INIT_2]] : !s32i, !cir.ptr @@ -152,7 +152,7 @@ void func2() { // CIR: %[[ARR2:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] // CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] -// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR2]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR2]] : !cir.ptr> -> !cir.ptr // CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i // CIR: cir.store{{.*}} %[[FIVE]], %[[ARR_0]] : !s32i, !cir.ptr // CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i @@ -209,7 +209,7 @@ void func3() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] // CIR: %[[IDX:.*]] = cir.alloca !s32i, !cir.ptr, ["idx", init] // CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr, ["e", init] -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: %[[V0:.*]] = cir.const #cir.int<5> : !s32i // CIR: cir.store{{.*}} %[[V0]], %[[ARR_PTR]] : !s32i, !cir.ptr // CIR: %[[OFFSET_0:.*]] = cir.const #cir.int<1> : !s64i @@ -219,7 +219,7 @@ void func3() { // CIR: %[[IDX_V:.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store{{.*}} %[[IDX_V]], %[[IDX]] : !s32i, !cir.ptr // CIR: %[[TMP_IDX:.*]] = cir.load{{.*}} %[[IDX]] : !cir.ptr, !s32i -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: %[[ELE_PTR:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr, %[[TMP_IDX]] : !s32i), !cir.ptr // CIR: %[[ELE_TMP:.*]] = cir.load{{.*}} %[[ELE_PTR]] : !cir.ptr, !s32i // CIR: cir.store{{.*}} %[[ELE_TMP]], %[[INIT]] : !s32i, !cir.ptr @@ -258,20 +258,20 @@ void func4() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr", init] // CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr, ["e", init] -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr x 2>>), !cir.ptr> -// CIR: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_PTR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 2>> -> !cir.ptr> +// CIR: %[[ARR_0_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_PTR]] : !cir.ptr> -> !cir.ptr // CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i // CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr // CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i // CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr>, %[[OFFSET]] : !s64i), !cir.ptr> -// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr> -> !cir.ptr // CIR: %[[V_1_0:.*]] = cir.const #cir.int<6> : !s32i // CIR: cir.store{{.*}} %[[V_1_0]], %[[ARR_1_PTR]] : !s32i, !cir.ptr // CIR: %[[IDX:.*]] = cir.const #cir.int<0> : !s32i // CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr x 2>>), !cir.ptr> +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 2>> -> !cir.ptr> // CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr>, %[[IDX_1]] : !s32i), !cir.ptr> -// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr> -> !cir.ptr // CIR: %[[ELE_0:.*]] = cir.ptr_stride(%[[ARR_1_PTR]] : !cir.ptr, %[[IDX]] : !s32i), !cir.ptr // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[ELE_0]] : !cir.ptr, !s32i // CIR: cir.store{{.*}} %[[TMP]], %[[INIT]] : !s32i, !cir.ptr @@ -306,8 +306,8 @@ void func5() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr", init] // CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["arrayinit.temp", init] -// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %0 : !cir.ptr x 2>>), !cir.ptr> -// CIR: %[[ARR_0_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_0]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %0 : !cir.ptr x 2>> -> !cir.ptr> +// CIR: %[[ARR_0_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_0]] : !cir.ptr> -> !cir.ptr // CIR: %[[V_0_0:.*]] = cir.const #cir.int<5> : !s32i // CIR: cir.store{{.*}} %[[V_0_0]], %[[ARR_0_PTR]] : !s32i, !cir.ptr // CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i @@ -364,7 +364,7 @@ void func6() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] // CIR: %[[V:.*]] = cir.const #cir.int<4> : !s32i // CIR: cir.store{{.*}} %[[V]], %[[VAR]] : !s32i, !cir.ptr -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[VAR]] : !cir.ptr, !s32i // CIR: cir.store{{.*}} %[[TMP]], %[[ARR_PTR]] : !s32i, !cir.ptr // CIR: %[[OFFSET:.*]] = cir.const #cir.int<1> : !s64i @@ -396,7 +396,7 @@ void func7() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 1>, !cir.ptr x 1>>, ["arr", init] // CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["arrayinit.temp", init] -// CIR: %[[ARR_0:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr x 1>>), !cir.ptr> +// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 1>> -> !cir.ptr> // CIR: cir.store{{.*}} %[[ARR_0]], %[[ARR_PTR]] : !cir.ptr>, !cir.ptr>> // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[ARR_0]] : !cir.ptr>, %[[ONE]] : !s64i), !cir.ptr> @@ -497,7 +497,7 @@ void func9(int arr[10][5]) { // CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i // CIR: %[[TMP_1:.*]] = cir.load{{.*}} %[[ARR]] : !cir.ptr>>, !cir.ptr> // CIR: %[[ARR_1:.*]] = cir.ptr_stride(%[[TMP_1]] : !cir.ptr>, %[[IDX_1]] : !s32i), !cir.ptr> -// CIR: %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_1_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR_1]] : !cir.ptr> -> !cir.ptr // CIR: %[[ARR_1_2:.*]] = cir.ptr_stride(%[[ARR_1_PTR]] : !cir.ptr, %[[IDX]] : !s32i), !cir.ptr // CIR: %[[TMP_2:.*]] = cir.load{{.*}} %[[ARR_1_2]] : !cir.ptr, !s32i // CIR: cir.store{{.*}} %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr @@ -575,3 +575,25 @@ void func12() { // LLVM: %[[ARR:.*]] = alloca [4 x %struct.Point], i64 1, align 16 // OGCG: %[[ARR:.*]] = alloca [4 x %struct.Point], align 16 + +void array_with_complex_elements() { + _Complex float arr[2] = {{1.1f, 2.2f}, {3.3f, 4.4f}}; +} + +// CIR: %[[ARR_ADDR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr", init] +// CIR: %[[ARR_0:.*]] = cir.cast array_to_ptrdecay %[[ARR_ADDR]] : !cir.ptr x 2>> -> !cir.ptr> +// CIR: %[[CONST_COMPLEX_0:.*]] = cir.const #cir.const_complex<#cir.fp<1.100000e+00> : !cir.float, #cir.fp<2.200000e+00> : !cir.float> : !cir.complex +// CIR: cir.store{{.*}} %[[CONST_COMPLEX_0]], %[[ARR_0]] : !cir.complex, !cir.ptr> +// CIR: %[[IDX_1:.*]] = cir.const #cir.int<1> : !s64i +// CIR: %[[ARR_1:.*]] = cir.ptr_stride(%1 : !cir.ptr>, %[[IDX_1]] : !s64i), !cir.ptr> +// CIR: %[[CONST_COMPLEX_1:.*]] = cir.const #cir.const_complex<#cir.fp<3.300000e+00> : !cir.float, #cir.fp<4.400000e+00> : !cir.float> : !cir.complex +// CIR: cir.store{{.*}} %[[CONST_COMPLEX_1]], %[[ARR_1]] : !cir.complex, !cir.ptr> + +// LLVM: %[[ARR_ADDR:.*]] = alloca [2 x { float, float }], i64 1, align 16 +// LLVM: %[[ARR_0:.*]] = getelementptr { float, float }, ptr %[[ARR_ADDR]], i32 0 +// LLVM: store { float, float } { float 0x3FF19999A0000000, float 0x40019999A0000000 }, ptr %[[ARR_0]], align 8 +// LLVM: %[[ARR_1:.*]] = getelementptr { float, float }, ptr %[[ARR_0]], i64 1 +// LLVM: store { float, float } { float 0x400A666660000000, float 0x40119999A0000000 }, ptr %[[ARR_1]], align 8 + +// OGCG: %[[ARR_ADDR:.*]] = alloca [2 x { float, float }], align 16 +// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %[[ARR_ADDR]], ptr align 16 @__const._Z27array_with_complex_elementsv.arr, i64 16, i1 false) diff --git a/clang/test/CIR/CodeGen/assign-operator.cpp b/clang/test/CIR/CodeGen/assign-operator.cpp index 3e509f59368b6..1089d4b6e69f8 100644 --- a/clang/test/CIR/CodeGen/assign-operator.cpp +++ b/clang/test/CIR/CodeGen/assign-operator.cpp @@ -17,7 +17,7 @@ void a() { // CIR: cir.func{{.*}} @_Z1av() // CIR: %[[A_ADDR:.*]] = cir.alloca !rec_x, !cir.ptr, ["a"] // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CIR: %[[ONE_CAST:.*]] = cir.cast(integral, %[[ONE]] : !u32i), !s32i +// CIR: %[[ONE_CAST:.*]] = cir.cast integral %[[ONE]] : !u32i -> !s32i // CIR: %[[RET:.*]] = cir.call @_ZN1xaSEi(%[[A_ADDR]], %[[ONE_CAST]]) : (!cir.ptr, !s32i) -> !s32i // LLVM: define{{.*}} @_Z1av() @@ -75,10 +75,10 @@ void copy_c(C &c1, C &c2) { // CIR: %[[A_MEMBER_2:.*]] = cir.get_member %[[ARG1_LOAD]][0] {name = "a"} // CIR: %[[C_A:.*]] = cir.call @_ZN1AaSERKS_(%[[A_MEMBER]], %[[A_MEMBER_2]]) // CIR: %[[B_MEMBER:.*]] = cir.get_member %[[THIS]][1] {name = "b"} -// CIR: %[[B_VOID_PTR:.*]] = cir.cast(bitcast, %[[B_MEMBER]] : !cir.ptr>), !cir.ptr +// CIR: %[[B_VOID_PTR:.*]] = cir.cast bitcast %[[B_MEMBER]] : !cir.ptr> -> !cir.ptr // CIR: %[[RET_LOAD:.*]] = cir.load %[[ARG1_ADDR]] // CIR: %[[B_MEMBER_2:.*]] = cir.get_member %[[RET_LOAD]][1] {name = "b"} -// CIR: %[[B_VOID_PTR_2:.*]] = cir.cast(bitcast, %[[B_MEMBER_2]] : !cir.ptr>), !cir.ptr +// CIR: %[[B_VOID_PTR_2:.*]] = cir.cast bitcast %[[B_MEMBER_2]] : !cir.ptr> -> !cir.ptr // CIR: %[[SIZE:.*]] = cir.const #cir.int<64> : !u64i // CIR: %[[COUNT:.*]] = cir.call @memcpy(%[[B_VOID_PTR]], %[[B_VOID_PTR_2]], %[[SIZE]]) // CIR: cir.store %[[THIS]], %[[RET_ADDR]] diff --git a/clang/test/CIR/CodeGen/basic.c b/clang/test/CIR/CodeGen/basic.c index 2c3c5b0f22a5c..9268615bc9fb0 100644 --- a/clang/test/CIR/CodeGen/basic.c +++ b/clang/test/CIR/CodeGen/basic.c @@ -296,7 +296,7 @@ size_type max_size(void) { // CIR: %0 = cir.alloca !u64i, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CIR: %1 = cir.const #cir.int<0> : !s32i // CIR: %2 = cir.unary(not, %1) : !s32i, !s32i -// CIR: %3 = cir.cast(integral, %2 : !s32i), !u64i +// CIR: %3 = cir.cast integral %2 : !s32i -> !u64i // CIR: %4 = cir.const #cir.int<8> : !u64i // CIR: %5 = cir.binop(div, %3, %4) : !u64i diff --git a/clang/test/CIR/CodeGen/basic.cpp b/clang/test/CIR/CodeGen/basic.cpp index fe6dd938f0faf..af8de6fff047a 100644 --- a/clang/test/CIR/CodeGen/basic.cpp +++ b/clang/test/CIR/CodeGen/basic.cpp @@ -124,7 +124,7 @@ size_type max_size() { // CHECK: %0 = cir.alloca !u64i, !cir.ptr, ["__retval"] {alignment = 8 : i64} // CHECK: %1 = cir.const #cir.int<0> : !s32i // CHECK: %2 = cir.unary(not, %1) : !s32i, !s32i -// CHECK: %3 = cir.cast(integral, %2 : !s32i), !u64i +// CHECK: %3 = cir.cast integral %2 : !s32i -> !u64i // CHECK: %4 = cir.const #cir.int<8> : !u64i // CHECK: %5 = cir.binop(div, %3, %4) : !u64i // CHECK: cir.store{{.*}} %5, %0 : !u64i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/binassign.c b/clang/test/CIR/CodeGen/binassign.c index 541b50a664c0e..65bea4df7d837 100644 --- a/clang/test/CIR/CodeGen/binassign.c +++ b/clang/test/CIR/CodeGen/binassign.c @@ -25,7 +25,7 @@ void binary_assign(void) { // CIR: %[[TRUE:.*]] = cir.const #true // CIR: cir.store{{.*}} %[[TRUE]], %[[B]] : !cir.bool, !cir.ptr // CIR: %[[CHAR_INI_INIT:.*]] = cir.const #cir.int<65> : !s32i -// CIR: %[[CHAR_VAL:.*]] = cir.cast(integral, %[[CHAR_INI_INIT]] : !s32i), !s8i +// CIR: %[[CHAR_VAL:.*]] = cir.cast integral %[[CHAR_INI_INIT]] : !s32i -> !s8i // CIR: cir.store{{.*}} %[[CHAR_VAL]], %[[C]] : !s8i, !cir.ptr // CIR: %[[FLOAT_VAL:.*]] = cir.const #cir.fp<3.140000e+00> : !cir.float // CIR: cir.store{{.*}} %[[FLOAT_VAL]], %[[F]] : !cir.float, !cir.ptr diff --git a/clang/test/CIR/CodeGen/binop.c b/clang/test/CIR/CodeGen/binop.c index 280fd29b067f9..4427e4b605297 100644 --- a/clang/test/CIR/CodeGen/binop.c +++ b/clang/test/CIR/CodeGen/binop.c @@ -5,9 +5,9 @@ void conditionalResultIimplicitCast(int a, int b, float f) { // Should implicit cast back to int. int x = a && b; // CHECK: %[[#INT:]] = cir.ternary - // CHECK: %{{.+}} = cir.cast(bool_to_int, %[[#INT]] : !cir.bool), !s32i + // CHECK: %{{.+}} = cir.cast bool_to_int %[[#INT]] : !cir.bool -> !s32i float y = f && f; // CHECK: %[[#BOOL:]] = cir.ternary - // CHECK: %[[#INT:]] = cir.cast(bool_to_int, %[[#BOOL]] : !cir.bool), !s32i - // CHECK: %{{.+}} = cir.cast(int_to_float, %[[#INT]] : !s32i), !cir.float + // CHECK: %[[#INT:]] = cir.cast bool_to_int %[[#BOOL]] : !cir.bool -> !s32i + // CHECK: %{{.+}} = cir.cast int_to_float %[[#INT]] : !s32i -> !cir.float } diff --git a/clang/test/CIR/CodeGen/binop.cpp b/clang/test/CIR/CodeGen/binop.cpp index 847e81755939f..c1a432dbc2c32 100644 --- a/clang/test/CIR/CodeGen/binop.cpp +++ b/clang/test/CIR/CodeGen/binop.cpp @@ -337,13 +337,13 @@ void zext_shift_example(int a, unsigned char b) { // CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !s32i // CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !u8i -// CIR: %[[B1_EXT:.*]] = cir.cast(integral, %[[B1]] : !u8i), !s32i +// CIR: %[[B1_EXT:.*]] = cir.cast integral %[[B1]] : !u8i -> !s32i // CIR: %[[ASHR:.*]] = cir.shift(right, %[[A1]] : !s32i, %[[B1_EXT]] : !s32i) -> !s32i // CIR: cir.store{{.*}} %[[ASHR]], %[[X_PTR]] : !s32i, !cir.ptr // CIR: %[[A2:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !s32i // CIR: %[[B2:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !u8i -// CIR: %[[B2_EXT:.*]] = cir.cast(integral, %[[B2]] : !u8i), !s32i +// CIR: %[[B2_EXT:.*]] = cir.cast integral %[[B2]] : !u8i -> !s32i // CIR: %[[SHL:.*]] = cir.shift(left, %[[A2]] : !s32i, %[[B2_EXT]] : !s32i) -> !s32i // CIR: cir.store{{.*}} %[[SHL]], %[[X_PTR]] : !s32i, !cir.ptr @@ -409,13 +409,13 @@ void sext_shift_example(int a, signed char b) { // CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !s32i // CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !s8i -// CIR: %[[B1_EXT:.*]] = cir.cast(integral, %[[B1]] : !s8i), !s32i +// CIR: %[[B1_EXT:.*]] = cir.cast integral %[[B1]] : !s8i -> !s32i // CIR: %[[ASHR:.*]] = cir.shift(right, %[[A1]] : !s32i, %[[B1_EXT]] : !s32i) -> !s32i // CIR: cir.store{{.*}} %[[ASHR]], %[[X_PTR]] : !s32i, !cir.ptr // CIR: %[[A2:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !s32i // CIR: %[[B2:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !s8i -// CIR: %[[B2_EXT:.*]] = cir.cast(integral, %[[B2]] : !s8i), !s32i +// CIR: %[[B2_EXT:.*]] = cir.cast integral %[[B2]] : !s8i -> !s32i // CIR: %[[SHL:.*]] = cir.shift(left, %[[A2]] : !s32i, %[[B2_EXT]] : !s32i) -> !s32i // CIR: cir.store{{.*}} %[[SHL]], %[[X_PTR]] : !s32i, !cir.ptr @@ -481,13 +481,13 @@ void long_shift_example(long long a, short b) { // CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !s64i // CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !s16i -// CIR: %[[B1_EXT:.*]] = cir.cast(integral, %[[B1]] : !s16i), !s32i +// CIR: %[[B1_EXT:.*]] = cir.cast integral %[[B1]] : !s16i -> !s32i // CIR: %[[ASHR:.*]] = cir.shift(right, %[[A1]] : !s64i, %[[B1_EXT]] : !s32i) -> !s64i // CIR: cir.store{{.*}} %[[ASHR]], %[[X_PTR]] : !s64i, !cir.ptr // CIR: %[[A2:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !s64i // CIR: %[[B2:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !s16i -// CIR: %[[B2_EXT:.*]] = cir.cast(integral, %[[B2]] : !s16i), !s32i +// CIR: %[[B2_EXT:.*]] = cir.cast integral %[[B2]] : !s16i -> !s32i // CIR: %[[SHL:.*]] = cir.shift(left, %[[A2]] : !s64i, %[[B2_EXT]] : !s32i) -> !s64i // CIR: cir.store{{.*}} %[[SHL]], %[[X_PTR]] : !s64i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/builtin_bit.cpp b/clang/test/CIR/CodeGen/builtin_bit.cpp index 8b9a187e799ed..32a53d883a170 100644 --- a/clang/test/CIR/CodeGen/builtin_bit.cpp +++ b/clang/test/CIR/CodeGen/builtin_bit.cpp @@ -34,7 +34,7 @@ int test_builtin_clrsbl(long x) { // CIR-LABEL: _Z19test_builtin_clrsbll // CIR: [[TMP:%.+]] = cir.clrsb %{{.+}} : !s64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !s64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !s64i -> !s32i // LLVM-LABEL: _Z19test_builtin_clrsbll // LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8 @@ -58,7 +58,7 @@ int test_builtin_clrsbll(long long x) { // CIR-LABEL: _Z20test_builtin_clrsbllx // CIR: [[TMP:%.+]] = cir.clrsb %{{.+}} : !s64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !s64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !s64i -> !s32i // LLVM-LABEL: _Z20test_builtin_clrsbllx // LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8 @@ -82,7 +82,7 @@ int test_builtin_ctzs(unsigned short x) { // CIR-LABEL: _Z17test_builtin_ctzst // CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u16i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u16i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u16i -> !s32i // LLVM-LABEL: _Z17test_builtin_ctzst // LLVM: %{{.+}} = call i16 @llvm.cttz.i16(i16 %{{.+}}, i1 true) @@ -96,7 +96,7 @@ int test_builtin_ctz(unsigned x) { // CIR-LABEL: _Z16test_builtin_ctzj // CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z16test_builtin_ctzj // LLVM: %{{.+}} = call i32 @llvm.cttz.i32(i32 %{{.+}}, i1 true) @@ -110,7 +110,7 @@ int test_builtin_ctzl(unsigned long x) { // CIR-LABEL: _Z17test_builtin_ctzlm // CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z17test_builtin_ctzlm // LLVM: %{{.+}} = call i64 @llvm.cttz.i64(i64 %{{.+}}, i1 true) @@ -124,7 +124,7 @@ int test_builtin_ctzll(unsigned long long x) { // CIR-LABEL: _Z18test_builtin_ctzlly // CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z18test_builtin_ctzlly // LLVM: %{{.+}} = call i64 @llvm.cttz.i64(i64 %{{.+}}, i1 true) @@ -138,7 +138,7 @@ int test_builtin_ctzg(unsigned x) { // CIR-LABEL: _Z17test_builtin_ctzgj // CIR: [[TMP:%.+]] = cir.ctz %{{.+}} poison_zero : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z17test_builtin_ctzgj // LLVM: %{{.+}} = call i32 @llvm.cttz.i32(i32 %{{.+}}, i1 true) @@ -152,7 +152,7 @@ int test_builtin_clzs(unsigned short x) { // CIR-LABEL: _Z17test_builtin_clzst // CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u16i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u16i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u16i -> !s32i // LLVM-LABEL: _Z17test_builtin_clzst // LLVM: %{{.+}} = call i16 @llvm.ctlz.i16(i16 %{{.+}}, i1 true) @@ -166,7 +166,7 @@ int test_builtin_clz(unsigned x) { // CIR-LABEL: _Z16test_builtin_clzj // CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z16test_builtin_clzj // LLVM: %{{.+}} = call i32 @llvm.ctlz.i32(i32 %{{.+}}, i1 true) @@ -180,7 +180,7 @@ int test_builtin_clzl(unsigned long x) { // CIR-LABEL: _Z17test_builtin_clzlm // CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z17test_builtin_clzlm // LLVM: %{{.+}} = call i64 @llvm.ctlz.i64(i64 %{{.+}}, i1 true) @@ -194,7 +194,7 @@ int test_builtin_clzll(unsigned long long x) { // CIR-LABEL: _Z18test_builtin_clzlly // CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z18test_builtin_clzlly // LLVM: %{{.+}} = call i64 @llvm.ctlz.i64(i64 %{{.+}}, i1 true) @@ -208,7 +208,7 @@ int test_builtin_clzg(unsigned x) { // CIR-LABEL: _Z17test_builtin_clzgj // CIR: [[TMP:%.+]] = cir.clz %{{.+}} poison_zero : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z17test_builtin_clzgj // LLVM: %{{.+}} = call i32 @llvm.ctlz.i32(i32 %{{.+}}, i1 true) @@ -294,7 +294,7 @@ int test_builtin_parity(unsigned x) { // CIR-LABEL: _Z19test_builtin_parityj // CIR: [[TMP:%.+]] = cir.parity %{{.+}} : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z19test_builtin_parityj // LLVM: %[[X:.+]] = load i32, ptr %{{.+}}, align 4 @@ -312,7 +312,7 @@ int test_builtin_parityl(unsigned long x) { // CIR-LABEL: _Z20test_builtin_paritylm // CIR: [[TMP:%.+]] = cir.parity %{{.+}} : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z20test_builtin_paritylm // LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8 @@ -330,7 +330,7 @@ int test_builtin_parityll(unsigned long long x) { // CIR-LABEL: _Z21test_builtin_paritylly // CIR: [[TMP:%.+]] = cir.parity %{{.+}} : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z21test_builtin_paritylly // LLVM: %[[X:.+]] = load i64, ptr %{{.+}}, align 8 @@ -348,7 +348,7 @@ int test_builtin_popcount(unsigned x) { // CIR-LABEL: _Z21test_builtin_popcountj // CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z21test_builtin_popcountj // LLVM: %{{.+}} = call i32 @llvm.ctpop.i32(i32 %{{.+}}) @@ -362,7 +362,7 @@ int test_builtin_popcountl(unsigned long x) { // CIR-LABEL: _Z22test_builtin_popcountlm // CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z22test_builtin_popcountlm // LLVM: %{{.+}} = call i64 @llvm.ctpop.i64(i64 %{{.+}}) @@ -376,7 +376,7 @@ int test_builtin_popcountll(unsigned long long x) { // CIR-LABEL: _Z23test_builtin_popcountlly // CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u64i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u64i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u64i -> !s32i // LLVM-LABEL: _Z23test_builtin_popcountlly // LLVM: %{{.+}} = call i64 @llvm.ctpop.i64(i64 %{{.+}}) @@ -390,7 +390,7 @@ int test_builtin_popcountg(unsigned x) { // CIR-LABEL: _Z22test_builtin_popcountgj // CIR: [[TMP:%.+]] = cir.popcount %{{.+}} : !u32i -// CIR: {{%.+}} = cir.cast(integral, [[TMP]] : !u32i), !s32i +// CIR: {{%.+}} = cir.cast integral [[TMP]] : !u32i -> !s32i // LLVM-LABEL: _Z22test_builtin_popcountgj // LLVM: %{{.+}} = call i32 @llvm.ctpop.i32(i32 %{{.+}}) diff --git a/clang/test/CIR/CodeGen/builtin_call.cpp b/clang/test/CIR/CodeGen/builtin_call.cpp index 853d894a3311b..a30df97250d19 100644 --- a/clang/test/CIR/CodeGen/builtin_call.cpp +++ b/clang/test/CIR/CodeGen/builtin_call.cpp @@ -165,9 +165,9 @@ void expect(int x, int y) { // CIR-LABEL: cir.func{{.*}} @_Z6expectii // CIR: %[[X:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i -// CIR-NEXT: %[[X_LONG:.+]] = cir.cast(integral, %[[X]] : !s32i), !s64i +// CIR-NEXT: %[[X_LONG:.+]] = cir.cast integral %[[X]] : !s32i -> !s64i // CIR-NEXT: %[[Y:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i -// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast(integral, %[[Y]] : !s32i), !s64i +// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast integral %[[Y]] : !s32i -> !s64i // CIR-NEXT: %{{.+}} = cir.expect(%[[X_LONG]], %[[Y_LONG]]) : !s64i // CIR: } @@ -185,9 +185,9 @@ void expect_prob(int x, int y) { // CIR-LABEL: cir.func{{.*}} @_Z11expect_probii // CIR: %[[X:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i -// CIR-NEXT: %[[X_LONG:.+]] = cir.cast(integral, %[[X]] : !s32i), !s64i +// CIR-NEXT: %[[X_LONG:.+]] = cir.cast integral %[[X]] : !s32i -> !s64i // CIR-NEXT: %[[Y:.+]] = cir.load align(4) %{{.+}} : !cir.ptr, !s32i -// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast(integral, %[[Y]] : !s32i), !s64i +// CIR-NEXT: %[[Y_LONG:.+]] = cir.cast integral %[[Y]] : !s32i -> !s64i // CIR-NEXT: %{{.+}} = cir.expect(%[[X_LONG]], %[[Y_LONG]], 2.500000e-01) : !s64i // CIR: } diff --git a/clang/test/CIR/CodeGen/builtin_printf.cpp b/clang/test/CIR/CodeGen/builtin_printf.cpp index 80875c349bfcf..898984a6c12d3 100644 --- a/clang/test/CIR/CodeGen/builtin_printf.cpp +++ b/clang/test/CIR/CodeGen/builtin_printf.cpp @@ -28,11 +28,11 @@ void func(char const * const str, int i) { // CIR: %[[null_ptr:.+]] = cir.const #cir.ptr : !cir.ptr // CIR: %[[printf_result1:.+]] = cir.call @printf(%[[null_ptr]]) nothrow : (!cir.ptr) -> !s32i // CIR: %[[str_fmt_global:.+]] = cir.get_global @".str" : !cir.ptr> -// CIR: %[[str_fmt_ptr:.+]] = cir.cast(array_to_ptrdecay, %[[str_fmt_global]] : !cir.ptr>), !cir.ptr +// CIR: %[[str_fmt_ptr:.+]] = cir.cast array_to_ptrdecay %[[str_fmt_global]] : !cir.ptr> -> !cir.ptr // CIR: %[[str_val:.+]] = cir.load{{.*}} %[[str_ptr]] : !cir.ptr>, !cir.ptr // CIR: %[[printf_result2:.+]] = cir.call @printf(%[[str_fmt_ptr]], %[[str_val]]) nothrow : (!cir.ptr, !cir.ptr) -> !s32i // CIR: %[[full_fmt_global:.+]] = cir.get_global @".str.1" : !cir.ptr> -// CIR: %[[full_fmt_ptr:.+]] = cir.cast(array_to_ptrdecay, %[[full_fmt_global]] : !cir.ptr>), !cir.ptr +// CIR: %[[full_fmt_ptr:.+]] = cir.cast array_to_ptrdecay %[[full_fmt_global]] : !cir.ptr> -> !cir.ptr // CIR: %[[str_val2:.+]] = cir.load{{.*}} %[[str_ptr]] : !cir.ptr>, !cir.ptr // CIR: %[[i_val:.+]] = cir.load{{.*}} %[[i_ptr]] : !cir.ptr, !s32i // CIR: %[[printf_result3:.+]] = cir.call @printf(%[[full_fmt_ptr]], %[[str_val2]], %[[i_val]]) nothrow : (!cir.ptr, !cir.ptr, !s32i) -> !s32i diff --git a/clang/test/CIR/CodeGen/cast.cpp b/clang/test/CIR/CodeGen/cast.cpp index caf6de7c7d485..7afa955cf3bcf 100644 --- a/clang/test/CIR/CodeGen/cast.cpp +++ b/clang/test/CIR/CodeGen/cast.cpp @@ -12,7 +12,7 @@ unsigned char cxxstaticcast_0(unsigned int x) { // CIR: %[[RV:[0-9]+]] = cir.alloca !u8i, !cir.ptr, ["__retval"] {alignment = 1 : i64} // CIR: cir.store %arg0, %[[XPTR]] : !u32i, !cir.ptr // CIR: %[[XVAL:[0-9]+]] = cir.load{{.*}} %[[XPTR]] : !cir.ptr, !u32i -// CIR: %[[CASTED:[0-9]+]] = cir.cast(integral, %[[XVAL]] : !u32i), !u8i +// CIR: %[[CASTED:[0-9]+]] = cir.cast integral %[[XVAL]] : !u32i -> !u8i // CIR: cir.store %[[CASTED]], %[[RV]] : !u8i, !cir.ptr // CIR: %[[R:[0-9]+]] = cir.load{{.*}} %1 : !cir.ptr, !u8i // CIR: cir.return %[[R]] : !u8i @@ -30,55 +30,55 @@ int cStyleCasts_0(unsigned x1, int x2, float x3, short x4, double x5) { // LLVM: define{{.*}} i32 @_Z13cStyleCasts_0jifsd char a = (char)x1; // truncate - // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s8i + // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !u32i -> !s8i // LLVM: %{{[0-9]+}} = trunc i32 %{{[0-9]+}} to i8 short b = (short)x2; // truncate with sign - // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s16i + // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !s32i -> !s16i // LLVM: %{{[0-9]+}} = trunc i32 %{{[0-9]+}} to i16 long long c = (long long)x1; // zero extend - // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s64i + // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !u32i -> !s64i // LLVM: %{{[0-9]+}} = zext i32 %{{[0-9]+}} to i64 long long d = (long long)x2; // sign extend - // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !s64i + // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !s32i -> !s64i // LLVM: %{{[0-9]+}} = sext i32 %{{[0-9]+}} to i64 unsigned ui = (unsigned)x2; // sign drop - // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !s32i), !u32i + // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !s32i -> !u32i int si = (int)x1; // sign add - // CIR: %{{[0-9]+}} = cir.cast(integral, %{{[0-9]+}} : !u32i), !s32i + // CIR: %{{[0-9]+}} = cir.cast integral %{{[0-9]+}} : !u32i -> !s32i bool ib; int bi = (int)ib; // bool to int - // CIR: %{{[0-9]+}} = cir.cast(bool_to_int, %{{[0-9]+}} : !cir.bool), !s32i + // CIR: %{{[0-9]+}} = cir.cast bool_to_int %{{[0-9]+}} : !cir.bool -> !s32i // LLVM: %{{[0-9]+}} = zext i1 %{{[0-9]+}} to i32 bool b2 = x2; // int to bool - // CIR: %{{[0-9]+}} = cir.cast(int_to_bool, %{{[0-9]+}} : !s32i), !cir.bool + // CIR: %{{[0-9]+}} = cir.cast int_to_bool %{{[0-9]+}} : !s32i -> !cir.bool // LLVM: %[[INTTOBOOL:[0-9]+]] = icmp ne i32 %{{[0-9]+}}, 0 // LLVM: zext i1 %[[INTTOBOOL]] to i8 void *p; bool b3 = p; // ptr to bool - // CIR: %{{[0-9]+}} = cir.cast(ptr_to_bool, %{{[0-9]+}} : !cir.ptr), !cir.bool + // CIR: %{{[0-9]+}} = cir.cast ptr_to_bool %{{[0-9]+}} : !cir.ptr -> !cir.bool // LLVM: %[[PTRTOBOOL:[0-9]+]] = icmp ne ptr %{{[0-9]+}}, null // LLVM: zext i1 %[[PTRTOBOOL]] to i8 float f; bool b4 = f; // float to bool - // CIR: %{{[0-9]+}} = cir.cast(float_to_bool, %{{[0-9]+}} : !cir.float), !cir.bool + // CIR: %{{[0-9]+}} = cir.cast float_to_bool %{{[0-9]+}} : !cir.float -> !cir.bool // LLVM: %{{[0-9]+}} = fcmp une float %{{[0-9]+}}, 0.000000e+00 // LLVM: %{{[0-9]+}} = zext i1 %{{[0-9]+}} to i8 double d2 = f; // float to double - // CIR: %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : !cir.float), !cir.double + // CIR: %{{[0-9]+}} = cir.cast floating %{{[0-9]+}} : !cir.float -> !cir.double // LLVM: %{{[0-9]+}} = fpext float %{{[0-9]+}} to double f = d2; // double to float - // CIR: %{{[0-9]+}} = cir.cast(floating, %{{[0-9]+}} : !cir.double), !cir.float + // CIR: %{{[0-9]+}} = cir.cast floating %{{[0-9]+}} : !cir.double -> !cir.float // LLVM: %{{[0-9]+}} = fptrunc double %{{[0-9]+}} to float return 0; @@ -93,7 +93,7 @@ bool cptr(void *d) { // CIR: %[[DPTR:[0-9]+]] = cir.alloca !cir.ptr, !cir.ptr>, ["d", init] {alignment = 8 : i64} // CIR: %[[DVAL:[0-9]+]] = cir.load{{.*}} %[[DPTR]] : !cir.ptr>, !cir.ptr -// CIR: %{{[0-9]+}} = cir.cast(ptr_to_bool, %[[DVAL]] : !cir.ptr), !cir.bool +// CIR: %{{[0-9]+}} = cir.cast ptr_to_bool %[[DVAL]] : !cir.ptr -> !cir.bool // LLVM-LABEL: define{{.*}} i1 @_Z4cptrPv(ptr %0) // LLVM: %[[ARG_STORAGE:.*]] = alloca ptr, i64 1 @@ -127,7 +127,7 @@ void bitcast() { } // CIR: %[[D_VEC:.*]] = cir.load{{.*}} {{.*}} : !cir.ptr>, !cir.vector<2 x !cir.double> -// CIR: %[[I_VEC:.*]] = cir.cast(bitcast, %[[D_VEC]] : !cir.vector<2 x !cir.double>), !cir.vector<4 x !s32i> +// CIR: %[[I_VEC:.*]] = cir.cast bitcast %[[D_VEC]] : !cir.vector<2 x !cir.double> -> !cir.vector<4 x !s32i> // LLVM: %[[D_VEC:.*]] = load <2 x double>, ptr {{.*}}, align 16 // LLVM: %[[I_VEC:.*]] = bitcast <2 x double> %[[D_VEC]] to <4 x i32> diff --git a/clang/test/CIR/CodeGen/cmp.cpp b/clang/test/CIR/CodeGen/cmp.cpp index 75c8cda0c3603..7e32d16e88d57 100644 --- a/clang/test/CIR/CodeGen/cmp.cpp +++ b/clang/test/CIR/CodeGen/cmp.cpp @@ -407,9 +407,9 @@ void bool_cmp(bool a, bool b) { // CIR: %[[X_PTR:.*]] = cir.alloca !cir.bool, !cir.ptr, ["x", init] // CIR: %[[A1:.*]] = cir.load{{.*}} %[[A_PTR]] : !cir.ptr, !cir.bool -// CIR: %[[A1_INT:.*]] = cir.cast(bool_to_int, %[[A1]] : !cir.bool), !s32i +// CIR: %[[A1_INT:.*]] = cir.cast bool_to_int %[[A1]] : !cir.bool -> !s32i // CIR: %[[B1:.*]] = cir.load{{.*}} %[[B_PTR]] : !cir.ptr, !cir.bool -// CIR: %[[B1_INT:.*]] = cir.cast(bool_to_int, %[[B1]] : !cir.bool), !s32i +// CIR: %[[B1_INT:.*]] = cir.cast bool_to_int %[[B1]] : !cir.bool -> !s32i // CIR: %{{.*}} = cir.cmp(gt, %[[A1_INT]], %[[B1_INT]]) : !s32i, !cir.bool // CIR: cir.store{{.*}} {{.*}}, %[[X_PTR]] : !cir.bool, !cir.ptr diff --git a/clang/test/CIR/CodeGen/comma.c b/clang/test/CIR/CodeGen/comma.c index a1479b85d3f04..cc26a3f200664 100644 --- a/clang/test/CIR/CodeGen/comma.c +++ b/clang/test/CIR/CodeGen/comma.c @@ -24,7 +24,7 @@ void comma(void) { // CIR: %[[TRUE:.*]] = cir.const #true // CIR: cir.store{{.*}} %[[TRUE]], %[[B]] : !cir.bool, !cir.ptr // CIR: %[[CHAR_INI_INIT:.*]] = cir.const #cir.int<65> : !s32i -// CIR: %[[CHAR_VAL:.*]] = cir.cast(integral, %[[CHAR_INI_INIT]] : !s32i), !s8i +// CIR: %[[CHAR_VAL:.*]] = cir.cast integral %[[CHAR_INI_INIT]] : !s32i -> !s8i // CIR: cir.store{{.*}} %[[CHAR_VAL]], %[[C]] : !s8i, !cir.ptr // CIR: %[[FLOAT_VAL:.*]] = cir.const #cir.fp<3.140000e+00> : !cir.float // CIR: cir.store{{.*}} %[[FLOAT_VAL]], %[[F]] : !cir.float, !cir.ptr diff --git a/clang/test/CIR/CodeGen/complex-cast.cpp b/clang/test/CIR/CodeGen/complex-cast.cpp index a8f51cd627f9d..5dc08eb414a5b 100644 --- a/clang/test/CIR/CodeGen/complex-cast.cpp +++ b/clang/test/CIR/CodeGen/complex-cast.cpp @@ -20,7 +20,7 @@ void scalar_to_complex() { ci = sd; } -// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %{{.*}} : !cir.double), !cir.complex +// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %{{.*}} : !cir.double -> !cir.complex // CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !cir.double // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double @@ -35,7 +35,7 @@ void scalar_to_complex() { // OGCG: store double %[[REAL]], ptr {{.*}}, align 8 // OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr @cd, i32 0, i32 1), align 8 -// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %{{.*}} : !s32i), !cir.complex +// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %{{.*}} : !s32i -> !cir.complex // CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !s32i // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i @@ -50,11 +50,11 @@ void scalar_to_complex() { // OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4 // OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4 -// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %{{.*}} : !s32i), !cir.double -// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %[[INT_TO_FP]] : !cir.double), !cir.complex +// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast int_to_float %{{.*}} : !s32i -> !cir.double +// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %[[INT_TO_FP]] : !cir.double -> !cir.complex // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !s32i -// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(int_to_float, %[[TMP]] : !s32i), !cir.double +// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast int_to_float %[[TMP]] : !s32i -> !cir.double // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double // CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex @@ -69,11 +69,11 @@ void scalar_to_complex() { // OGCG: store double %[[REAL]], ptr {{.*}}, align 8 // OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8 -// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %{{.*}} : !cir.double), !s32i -// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %[[FP_TO_INT]] : !s32i), !cir.complex +// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast float_to_int %{{.*}} : !cir.double -> !s32i +// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %[[FP_TO_INT]] : !s32i -> !cir.complex // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !cir.double -// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.double), !s32i +// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast float_to_int %[[TMP]] : !cir.double -> !s32i // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i // CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex @@ -95,7 +95,7 @@ void scalar_to_complex_explicit() { ci = (int _Complex)sd; } -// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %{{.*}} : !cir.double), !cir.complex +// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %{{.*}} : !cir.double -> !cir.complex // CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !cir.double // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double @@ -110,7 +110,7 @@ void scalar_to_complex_explicit() { // OGCG: store double %[[REAL]], ptr {{.*}}, align 8 // OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr @cd, i32 0, i32 1), align 8 -// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %{{.*}} : !s32i), !cir.complex +// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %{{.*}} : !s32i -> !cir.complex // CIR-AFTER: %[[REAL:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !s32i // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i @@ -125,11 +125,11 @@ void scalar_to_complex_explicit() { // OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4 // OGCG: store i32 0, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci, i32 0, i32 1), align 4 -// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %{{.*}} : !s32i), !cir.double -// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast(float_to_complex, %[[INT_TO_FP]] : !cir.double), !cir.complex +// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast int_to_float %{{.*}} : !s32i -> !cir.double +// CIR-BEFORE: %[[FP_TO_COMPLEX:.*]] = cir.cast float_to_complex %[[INT_TO_FP]] : !cir.double -> !cir.complex // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !s32i -// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(int_to_float, %[[TMP]] : !s32i), !cir.double +// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast int_to_float %[[TMP]] : !s32i -> !cir.double // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.double // CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !cir.double -> !cir.complex @@ -144,11 +144,11 @@ void scalar_to_complex_explicit() { // OGCG: store double %[[REAL]], ptr {{.*}}, align 8 // OGCG: store double 0.000000e+00, ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8 -// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %{{.*}} : !cir.double), !s32i -// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast(int_to_complex, %[[FP_TO_INT]] : !s32i), !cir.complex +// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast float_to_int %{{.*}} : !cir.double -> !s32i +// CIR-BEFORE: %[[INT_TO_COMPLEX:.*]] = cir.cast int_to_complex %[[FP_TO_INT]] : !s32i -> !cir.complex // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr, !cir.double -// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.double), !s32i +// CIR-AFTER-NEXT: %[[REAL:.*]] = cir.cast float_to_int %[[TMP]] : !cir.double -> !s32i // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.const #cir.int<0> : !s32i // CIR-AFTER-NEXT: %{{.*}} = cir.complex.create %[[REAL]], %[[IMAG]] : !s32i -> !cir.complex @@ -170,7 +170,7 @@ void complex_to_scalar() { si = (int)cd; } -// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast(float_complex_to_real, %{{.*}} : !cir.complex), !cir.double +// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast float_complex_to_real %{{.*}} : !cir.complex -> !cir.double // CIR-AFTER: %{{.*}} = cir.complex.real %{{.*}} : !cir.complex -> !cir.double @@ -180,7 +180,7 @@ void complex_to_scalar() { // OGCG: %[[REAL:.*]] = load double, ptr {{.*}}, align 8 // OGCG: store double %[[REAL]], ptr {{.*}}, align 8 -// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast(int_complex_to_real, %{{.*}} : !cir.complex), !s32i +// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast int_complex_to_real %{{.*}} : !cir.complex -> !s32i // CIR-AFTER: %{{.*}} = cir.complex.real %{{.*}} : !cir.complex -> !s32i @@ -190,11 +190,11 @@ void complex_to_scalar() { // OGCG: %[[REAL:.*]] = load i32, ptr {{.*}}, align 4 // OGCG: store i32 %[[REAL]], ptr {{.*}}, align 4 -// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast(int_complex_to_real, %{{.*}} : !cir.complex), !s32i -// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast(int_to_float, %[[INT_COMPLEX_TO_REAL]] : !s32i), !cir.double +// CIR-BEFORE: %[[INT_COMPLEX_TO_REAL:.*]] = cir.cast int_complex_to_real %{{.*}} : !cir.complex -> !s32i +// CIR-BEFORE: %[[INT_TO_FP:.*]] = cir.cast int_to_float %[[INT_COMPLEX_TO_REAL]] : !s32i -> !cir.double // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex -> !s32i -// CIR-AFTER-NEXT: %{{.*}} = cir.cast(int_to_float, %[[REAL]] : !s32i), !cir.double +// CIR-AFTER-NEXT: %{{.*}} = cir.cast int_to_float %[[REAL]] : !s32i -> !cir.double // LLVM: %[[REAL:.*]] = extractvalue { i32, i32 } %{{.+}}, 0 // LLVM-NEXT: %[[REAL_TO_DOUBLE:.*]] = sitofp i32 %[[REAL]] to double @@ -204,11 +204,11 @@ void complex_to_scalar() { // OGCG: %[[INT_TO_FP:.*]] = sitofp i32 %[[REAL]] to double // OGCG: store double %[[INT_TO_FP]], ptr {{.*}}, align 8 -// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast(float_complex_to_real, %{{.*}} : !cir.complex), !cir.double -// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast(float_to_int, %[[FP_TO_COMPLEX_REAL]] : !cir.double), !s32i +// CIR-BEFORE: %[[FP_TO_COMPLEX_REAL:.*]] = cir.cast float_complex_to_real %{{.*}} : !cir.complex -> !cir.double +// CIR-BEFORE: %[[FP_TO_INT:.*]] = cir.cast float_to_int %[[FP_TO_COMPLEX_REAL]] : !cir.double -> !s32i // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex -> !cir.double -// CIR-AFTER-NEXT: %{{.*}} = cir.cast(float_to_int, %[[REAL]] : !cir.double), !s32i +// CIR-AFTER-NEXT: %{{.*}} = cir.cast float_to_int %[[REAL]] : !cir.double -> !s32i // LLVM: %[[REAL:.*]] = extractvalue { double, double } %{{.+}}, 0 // LLVM-NEXT: %[[REAL_TO_INT:.*]] = fptosi double %[[REAL]] to i32 @@ -223,12 +223,12 @@ void complex_to_bool() { b = (bool)ci; } -// CIR-BEFORE: %[[FP_COMPLEX_TO_BOOL:.*]] = cir.cast(float_complex_to_bool, %{{.*}} : !cir.complex), !cir.bool +// CIR-BEFORE: %[[FP_COMPLEX_TO_BOOL:.*]] = cir.cast float_complex_to_bool %{{.*}} : !cir.complex -> !cir.bool // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex -> !cir.double // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex -> !cir.double -// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast(float_to_bool, %[[REAL]] : !cir.double), !cir.bool -// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast(float_to_bool, %[[IMAG]] : !cir.double), !cir.bool +// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast float_to_bool %[[REAL]] : !cir.double -> !cir.bool +// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast float_to_bool %[[IMAG]] : !cir.double -> !cir.bool // CIR-AFTER-NEXT: %[[CONST_TRUE:.*]] = cir.const #true // CIR-AFTER-NEXT: %{{.*}} = cir.select if %[[REAL_TO_BOOL]] then %[[CONST_TRUE]] else %[[IMAG_TO_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool @@ -248,12 +248,12 @@ void complex_to_bool() { // OGCG: %[[BOOL_TO_INT:.*]] = zext i1 %[[COMPLEX_TO_BOOL]] to i8 // OGCG: store i8 %[[BOOL_TO_INT]], ptr {{.*}}, align 1 -// CIR-BEFORE: %[[INT_COMPLEX_TO_BOOL:.*]] = cir.cast(int_complex_to_bool, %{{.*}} : !cir.complex), !cir.bool +// CIR-BEFORE: %[[INT_COMPLEX_TO_BOOL:.*]] = cir.cast int_complex_to_bool %{{.*}} : !cir.complex -> !cir.bool // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex -> !s32i // CIR-AFTER-NEXT: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex -> !s32i -// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast(int_to_bool, %[[REAL]] : !s32i), !cir.bool -// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast(int_to_bool, %[[IMAG]] : !s32i), !cir.bool +// CIR-AFTER-NEXT: %[[REAL_TO_BOOL:.*]] = cir.cast int_to_bool %[[REAL]] : !s32i -> !cir.bool +// CIR-AFTER-NEXT: %[[IMAG_TO_BOOL:.*]] = cir.cast int_to_bool %[[IMAG]] : !s32i -> !cir.bool // CIR-AFTER-NEXT: %[[CONST_TRUE:.*]] = cir.const #true // CIR-AFTER-NEXT: %{{.+}} = cir.select if %[[REAL_TO_BOOL]] then %[[CONST_TRUE]] else %[[IMAG_TO_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool @@ -279,12 +279,12 @@ void complex_to_complex_cast() { } // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr>, !cir.complex -// CIR-BEFORE: %[[FP_COMPLEX:.*]] = cir.cast(float_complex, %[[TMP]] : !cir.complex), !cir.complex +// CIR-BEFORE: %[[FP_COMPLEX:.*]] = cir.cast float_complex %[[TMP]] : !cir.complex -> !cir.complex // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex -> !cir.float // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex -> !cir.float -// CIR-AFTER: %[[REAL_FP_CAST:.*]] = cir.cast(floating, %[[REAL]] : !cir.float), !cir.double -// CIR-AFTER: %[[IMAG_FP_CAST:.*]] = cir.cast(floating, %[[IMAG]] : !cir.float), !cir.double +// CIR-AFTER: %[[REAL_FP_CAST:.*]] = cir.cast floating %[[REAL]] : !cir.float -> !cir.double +// CIR-AFTER: %[[IMAG_FP_CAST:.*]] = cir.cast floating %[[IMAG]] : !cir.float -> !cir.double // CIR-AFTER: %{{.*}} = cir.complex.create %[[REAL_FP_CAST]], %[[IMAG_FP_CAST]] : !cir.double -> !cir.complex // LLVM: %[[REAL:.*]] = extractvalue { float, float } %{{.*}}, 0 @@ -303,12 +303,12 @@ void complex_to_complex_cast() { // OGCG: store double %[[IMAG_FP_CAST]], ptr getelementptr inbounds nuw ({ double, double }, ptr {{.*}}, i32 0, i32 1), align 8 // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %{{.*}} : !cir.ptr>, !cir.complex -// CIR-BEFORE: %[[INT_COMPLEX:.*]] = cir.cast(int_complex, %[[TMP]] : !cir.complex), !cir.complex +// CIR-BEFORE: %[[INT_COMPLEX:.*]] = cir.cast int_complex %[[TMP]] : !cir.complex -> !cir.complex // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %{{.*}} : !cir.complex -> !s16i // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %{{.*}} : !cir.complex -> !s16i -// CIR-AFTER: %[[REAL_INT_CAST:.*]] = cir.cast(integral, %[[REAL]] : !s16i), !s32i -// CIR-AFTER: %[[IMAG_INT_CAST:.*]] = cir.cast(integral, %[[IMAG]] : !s16i), !s32i +// CIR-AFTER: %[[REAL_INT_CAST:.*]] = cir.cast integral %[[REAL]] : !s16i -> !s32i +// CIR-AFTER: %[[IMAG_INT_CAST:.*]] = cir.cast integral %[[IMAG]] : !s16i -> !s32i // CIR-AFTER: %{{.*}} = cir.complex.create %[[REAL_INT_CAST]], %[[IMAG_INT_CAST]] : !s32i -> !cir.complex // LLVM: %[[REAL:.*]] = extractvalue { i16, i16 } %{{.*}}, 0 @@ -336,9 +336,9 @@ void lvalue_to_rvalue_bitcast() { double _Complex b = __builtin_bit_cast(double _Complex, a); } -// CIR-BEFORE: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr), !cir.ptr> +// CIR-BEFORE: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr -> !cir.ptr> -// CIR-AFTER: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr), !cir.ptr> +// CIR-AFTER: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr -> !cir.ptr> // LLVM: %[[PTR_ADDR:.*]] = alloca %struct.CX, i64 1, align 8 // LLVM: %[[COMPLEX_ADDR:.*]] = alloca { double, double }, i64 1, align 8 @@ -361,9 +361,9 @@ void lvalue_bitcast() { (double _Complex &)a = {}; } -// CIR-BEFORE: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr), !cir.ptr> +// CIR-BEFORE: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr -> !cir.ptr> -// CIR-AFTER: %{{.*}} = cir.cast(bitcast, %{{.*}} : !cir.ptr), !cir.ptr> +// CIR-AFTER: %{{.*}} = cir.cast bitcast %{{.*}} : !cir.ptr -> !cir.ptr> // LLVM: %[[A_ADDR:.*]] = alloca %struct.CX, i64 1, align 8 // LLVM: store { double, double } zeroinitializer, ptr %[[A_ADDR]], align 8 diff --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp index 9909985e7819c..a5070f51fad63 100644 --- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp +++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp @@ -154,20 +154,20 @@ void foo3() { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 -// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float -// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex -> !cir.f16 // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex -> !cir.f16 -// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float -// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float +// CIR: %[[B_REAL_F32:.*]] = cir.cast floating %[[B_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[B_IMAG_F32:.*]] = cir.cast floating %[[B_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[B_COMPLEX_F32]], %[[A_COMPLEX_F32]] : !cir.complex // CIR: %[[ADD_REAL:.*]] = cir.complex.real %[[ADD_A_B]] : !cir.complex -> !cir.float // CIR: %[[ADD_IMAG:.*]] = cir.complex.imag %[[ADD_A_B]] : !cir.complex -> !cir.float -// CIR: %[[ADD_REAL_F16:.*]] = cir.cast(floating, %[[ADD_REAL]] : !cir.float), !cir.f16 -// CIR: %[[ADD_IMAG_F16:.*]] = cir.cast(floating, %[[ADD_IMAG]] : !cir.float), !cir.f16 +// CIR: %[[ADD_REAL_F16:.*]] = cir.cast floating %[[ADD_REAL]] : !cir.float -> !cir.f16 +// CIR: %[[ADD_IMAG_F16:.*]] = cir.cast floating %[[ADD_IMAG]] : !cir.float -> !cir.f16 // CIR: %[[RESULT:.*]] = cir.complex.create %[[ADD_REAL_F16]], %[[ADD_IMAG_F16]] : !cir.f16 -> !cir.complex // CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex, !cir.ptr> @@ -712,14 +712,14 @@ void foo13() { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 -// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float -// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex -> !cir.f16 // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex -> !cir.f16 -// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float -// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float +// CIR: %[[B_REAL_F32:.*]] = cir.cast floating %[[B_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[B_IMAG_F32:.*]] = cir.cast floating %[[B_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float // CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float @@ -729,8 +729,8 @@ void foo13() { // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex -> !cir.f16 // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex -> !cir.f16 -// CIR: %[[B_REAL_F32:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.f16), !cir.float -// CIR: %[[B_IMAG_F32:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.f16), !cir.float +// CIR: %[[B_REAL_F32:.*]] = cir.cast floating %[[B_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[B_IMAG_F32:.*]] = cir.cast floating %[[B_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[B_COMPLEX_F32:.*]] = cir.complex.create %[[B_REAL_F32]], %[[B_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[B_REAL_F32:.*]] = cir.complex.real %[[B_COMPLEX_F32]] : !cir.complex -> !cir.float // CIR: %[[B_IMAG_F32:.*]] = cir.complex.imag %[[B_COMPLEX_F32]] : !cir.complex -> !cir.float @@ -739,8 +739,8 @@ void foo13() { // CIR: %[[RESULT:.*]] = cir.call @__divsc3(%[[B_REAL_F32]], %[[B_IMAG_F32]], %[[DIV_AB_REAL]], %[[DIV_AB_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex // CIR: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT]] : !cir.complex -> !cir.float // CIR: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT]] : !cir.complex -> !cir.float -// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16 -// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16 +// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16 +// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16 // CIR: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex // CIR: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/complex-mul-div.cpp b/clang/test/CIR/CodeGen/complex-mul-div.cpp index d49304660b4d4..b306981434dc6 100644 --- a/clang/test/CIR/CodeGen/complex-mul-div.cpp +++ b/clang/test/CIR/CodeGen/complex-mul-div.cpp @@ -549,10 +549,10 @@ void foo3() { // CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float // CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex -> !cir.float // CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex -> !cir.float -// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.float), !cir.double -// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.float), !cir.double -// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.float), !cir.double -// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.float), !cir.double +// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.double +// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast floating %[[A_IMAG]] : !cir.float -> !cir.double +// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast floating %[[B_REAL]] : !cir.float -> !cir.double +// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast floating %[[B_IMAG]] : !cir.float -> !cir.double // CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_REAL_F64]]) : !cir.double // CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double // CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], %[[B_REAL_F64]]) : !cir.double @@ -567,8 +567,8 @@ void foo3() { // CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex // CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real %[[RESULT_F64]] : !cir.complex -> !cir.double // CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag %[[RESULT_F64]] : !cir.complex -> !cir.double -// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, %[[RESULT_REAL_F64]] : !cir.double), !cir.float -// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, %[[RESULT_IMAG_F64]] : !cir.double), !cir.float +// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast floating %[[RESULT_REAL_F64]] : !cir.double -> !cir.float +// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast floating %[[RESULT_IMAG_F64]] : !cir.double -> !cir.float // CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex // CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : !cir.complex, !cir.ptr> @@ -1044,10 +1044,10 @@ void foo6() { // CIR-AFTER-PROMOTED: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex -> !cir.float // CIR-AFTER-PROMOTED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex -> !cir.float // CIR-AFTER-PROMOTED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex -> !cir.float -// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.float), !cir.double -// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.float), !cir.double -// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast(floating, %[[B_REAL]] : !cir.float), !cir.double -// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast(floating, %[[B_IMAG]] : !cir.float), !cir.double +// CIR-AFTER-PROMOTED: %[[A_REAL_F64:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.double +// CIR-AFTER-PROMOTED: %[[A_IMAG_F64:.*]] = cir.cast floating %[[A_IMAG]] : !cir.float -> !cir.double +// CIR-AFTER-PROMOTED: %[[B_REAL_F64:.*]] = cir.cast floating %[[B_REAL]] : !cir.float -> !cir.double +// CIR-AFTER-PROMOTED: %[[B_IMAG_F64:.*]] = cir.cast floating %[[B_IMAG]] : !cir.float -> !cir.double // CIR-AFTER-PROMOTED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL_F64]], %[[B_REAL_F64]]) : !cir.double // CIR-AFTER-PROMOTED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG_F64]], %[[B_IMAG_F64]]) : !cir.double // CIR-AFTER-PROMOTED: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL_F64]], %[[B_REAL_F64]]) : !cir.double @@ -1062,8 +1062,8 @@ void foo6() { // CIR-AFTER-PROMOTED: %[[RESULT_F64:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.double -> !cir.complex // CIR-AFTER-PROMOTED: %[[RESULT_REAL_F64:.*]] = cir.complex.real %[[RESULT_F64]] : !cir.complex -> !cir.double // CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F64:.*]] = cir.complex.imag %[[RESULT_F64]] : !cir.complex -> !cir.double -// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast(floating, %[[RESULT_REAL_F64]] : !cir.double), !cir.float -// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast(floating, %[[RESULT_IMAG_F64]] : !cir.double), !cir.float +// CIR-AFTER-PROMOTED: %[[RESULT_REAL_F32:.*]] = cir.cast floating %[[RESULT_REAL_F64]] : !cir.double -> !cir.float +// CIR-AFTER-PROMOTED: %[[RESULT_IMAG_F32:.*]] = cir.cast floating %[[RESULT_IMAG_F64]] : !cir.double -> !cir.float // CIR-AFTER-PROMOTED: %[[RESULT_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex // CIR-AFTER-PROMOTED: cir.store{{.*}} %[[RESULT_F32]], %[[C_ADDR]] : !cir.complex, !cir.ptr> diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp b/clang/test/CIR/CodeGen/complex-unary.cpp index e945d9b09f613..a8e434b903763 100644 --- a/clang/test/CIR/CodeGen/complex-unary.cpp +++ b/clang/test/CIR/CodeGen/complex-unary.cpp @@ -380,9 +380,9 @@ void foo9() { // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b", init] // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex -// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex), !cir.complex +// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex -> !cir.complex // CIR-BEFORE: %[[RESULT:.*]] = cir.unary(plus, %[[A_COMPLEX_F32]]) : !cir.complex, !cir.complex -// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex), !cir.complex +// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[RESULT]] : !cir.complex -> !cir.complex // CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex, !cir.ptr> // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] @@ -390,8 +390,8 @@ void foo9() { // CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 // CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 -// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float -// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex // CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float // CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float @@ -400,8 +400,8 @@ void foo9() { // CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex // CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex -> !cir.float // CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex -> !cir.float -// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16 -// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16 +// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16 +// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16 // CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex // CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex, !cir.ptr> @@ -445,9 +445,9 @@ void foo10() { // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b", init] // CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex -// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : !cir.complex), !cir.complex +// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast float_complex %[[TMP_A]] : !cir.complex -> !cir.complex // CIR-BEFORE: %[[RESULT:.*]] = cir.unary(minus, %[[A_COMPLEX_F32]]) : !cir.complex, !cir.complex -// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : !cir.complex), !cir.complex +// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast float_complex %[[RESULT]] : !cir.complex -> !cir.complex // CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex, !cir.ptr> // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] @@ -455,8 +455,8 @@ void foo10() { // CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 // CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 -// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float -// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex // CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float // CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float @@ -465,8 +465,8 @@ void foo10() { // CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex // CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : !cir.complex -> !cir.float // CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : !cir.complex -> !cir.float -// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : !cir.float), !cir.f16 -// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : !cir.float), !cir.f16 +// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast floating %[[RESULT_REAL_F32]] : !cir.float -> !cir.f16 +// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast floating %[[RESULT_IMAG_F32]] : !cir.float -> !cir.f16 // CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex // CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : !cir.complex, !cir.ptr> @@ -505,3 +505,80 @@ void foo10() { // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2 // OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2 + +void complex_unary_inc_lvalue() { + float _Complex a; + ++a; +} + + +// CIR-BEFORE: %[[A_ADDR]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(inc, %[[TMP_A]]) : !cir.complex, !cir.complex +// CIR-BEFORE: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex, !cir.ptr> + +// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.float +// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float +// CIR-AFTER: %[[RESULT_REAL:.*]] = cir.unary(inc, %2) : !cir.float, !cir.float +// CIR-AFTER: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex +// CIR-AFTER: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[RESULT_REAL:.*]] = fadd float 1.000000e+00, %[[A_REAL]] +// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1 +// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[RESULT_REAL:.*]] = fadd float %[[A_REAL]], 1.000000e+00 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4 +// OGCG: store float %[[A_IMAG]], ptr %[[A_IMAG_PTR]], align 4 + +void complex_unary_dec_lvalue() { + float _Complex a; + --a; +} + +// CIR-BEFORE: %[[A_ADDR]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(dec, %[[TMP_A]]) : !cir.complex, !cir.complex +// CIR-BEFORE: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex, !cir.ptr> + +// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.float +// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float +// CIR-AFTER: %[[RESULT_REAL:.*]] = cir.unary(dec, %2) : !cir.float, !cir.float +// CIR-AFTER: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex +// CIR-AFTER: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[RESULT_REAL:.*]] = fadd float -1.000000e+00, %[[A_REAL]] +// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1 +// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[RESULT_REAL:.*]] = fadd float %[[A_REAL]], -1.000000e+00 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4 +// OGCG: store float %[[A_IMAG]], ptr %[[A_IMAG_PTR]], align 4 diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp index 8335fff414d21..ae69b2486efd0 100644 --- a/clang/test/CIR/CodeGen/complex.cpp +++ b/clang/test/CIR/CodeGen/complex.cpp @@ -612,7 +612,7 @@ void foo24() { // CIR: %[[ARR:.*]] = cir.alloca !cir.array x 2>, !cir.ptr x 2>>, ["arr"] // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["r", init] // CIR: %[[IDX:.*]] = cir.const #cir.int<1> : !s32i -// CIR: %[[ARR_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr x 2>>), !cir.ptr> +// CIR: %[[ARR_PTR:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr x 2>> -> !cir.ptr> // CIR: %[[RESULT_VAL:.*]] = cir.ptr_stride(%[[ARR_PTR]] : !cir.ptr>, %[[IDX]] : !s32i), !cir.ptr> // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[RESULT_VAL]] : !cir.ptr>, !cir.complex // CIR: cir.store{{.*}} %[[TMP]], %[[RESULT]] : !cir.complex, !cir.ptr> @@ -938,11 +938,11 @@ void foo35() { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 -// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float -// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float -// CIR: %[[A_REAL_F16:.*]] = cir.cast(floating, %[[A_REAL_F32]] : !cir.float), !cir.f16 +// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL_F32]] : !cir.float -> !cir.f16 // CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[REAL_ADDR]] : !cir.f16, !cir.ptr // LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 @@ -975,11 +975,11 @@ void foo36() { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 -// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float -// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float // CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex // CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float -// CIR: %[[A_IMAG_F16:.*]] = cir.cast(floating, %[[A_IMAG_F32]] : !cir.float), !cir.f16 +// CIR: %[[A_IMAG_F16:.*]] = cir.cast floating %[[A_IMAG_F32]] : !cir.float -> !cir.f16 // CIR: cir.store{{.*}} %[[A_IMAG_F16]], %[[IMAG_ADDR]] : !cir.f16, !cir.ptr // LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 @@ -1001,3 +1001,313 @@ void foo36() { // OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float // OGCG: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half // OGCG: store half %[[A_IMAG_F16]], ptr %[[IMAG_ADDR]], align 2 + +void foo37() { + _Complex float a; + _Complex float b = __extension__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: store { float, float } %[[TMP_A]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store float %[[A_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store float %[[A_IMAG]], ptr %[[B_IMAG_PTR]], align 4 + +void real_on_non_glvalue() { + float _Complex a; + float b = __real__(+a); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.float +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float +// CIR: %[[A_REAL_PLUS:.*]] = cir.unary(plus, %[[A_REAL]]) : !cir.float, !cir.float +// CIR: %[[A_IMAG_PLUS:.*]] = cir.unary(plus, %[[A_IMAG]]) : !cir.float, !cir.float +// CIR: %[[RESULT:.*]] = cir.complex.create %[[A_REAL_PLUS]], %[[A_IMAG_PLUS]] : !cir.float -> !cir.complex +// CIR: %[[RESULT_REAL:.*]] = cir.complex.real %[[RESULT]] : !cir.complex -> !cir.float +// CIR: cir.store{{.*}} %[[RESULT_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1 +// LLVM: store float %[[A_REAL]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca float, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: store float %[[A_REAL]], ptr %[[B_ADDR]], align 4 + +void imag_on_non_glvalue() { + float _Complex a; + float b = __imag__(+a); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.float +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float +// CIR: %[[A_REAL_PLUS:.*]] = cir.unary(plus, %[[A_REAL]]) : !cir.float, !cir.float +// CIR: %[[A_IMAG_PLUS:.*]] = cir.unary(plus, %[[A_IMAG]]) : !cir.float, !cir.float +// CIR: %[[RESULT:.*]] = cir.complex.create %[[A_REAL_PLUS]], %[[A_IMAG_PLUS]] : !cir.float -> !cir.complex +// CIR: %[[RESULT_IMAG:.*]] = cir.complex.imag %[[RESULT]] : !cir.complex -> !cir.float +// CIR: cir.store{{.*}} %[[RESULT_IMAG]], %[[B_ADDR]] : !cir.float, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1 +// LLVM: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca float, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4 + +void atomic_complex_type() { + _Atomic(float _Complex) a; + float _Complex b = __c11_atomic_load(&a, __ATOMIC_RELAXED); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b", init] +// CIR: %[[ATOMIC_TMP_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["atomic-temp"] +// CIR: %[[A_PTR:.*]] = cir.cast bitcast %[[A_ADDR]] : !cir.ptr> -> !cir.ptr +// CIR: %[[ATOMIC_TMP_PTR:.*]] = cir.cast bitcast %[[ATOMIC_TMP_ADDR]] : !cir.ptr> -> !cir.ptr +// CIR: %[[TMP_A_ATOMIC:.*]] = cir.load{{.*}} atomic(relaxed) %[[A_PTR]] : !cir.ptr, !u64i +// CIR: cir.store{{.*}} %[[TMP_A_ATOMIC]], %[[ATOMIC_TMP_PTR]] : !u64i, !cir.ptr +// CIR: %[[TMP_ATOMIC_PTR:.*]] = cir.cast bitcast %[[ATOMIC_TMP_PTR]] : !cir.ptr -> !cir.ptr> +// CIR: %[[TMP_ATOMIC:.*]] = cir.load{{.*}} %[[TMP_ATOMIC_PTR]] : !cir.ptr>, !cir.complex +// CIR: cir.store{{.*}} %[[TMP_ATOMIC]], %[[B_ADDR]] : !cir.complex, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 8 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[ATOMIC_TMP_ADDR:.*]] = alloca { float, float }, i64 1, align 8 +// LLVM: %[[TMP_A_ATOMIC:.*]] = load atomic i64, ptr %[[A_ADDR]] monotonic, align 8 +// LLVM: store i64 %[[TMP_A_ATOMIC]], ptr %[[ATOMIC_TMP_ADDR]], align 8 +// LLVM: %[[TMP_ATOMIC:.*]] = load { float, float }, ptr %[[ATOMIC_TMP_ADDR]], align 8 +// LLVM: store { float, float } %[[TMP_ATOMIC]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 8 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[ATOMIC_TMP_ADDR:.*]] = alloca { float, float }, align 8 +// OGCG: %[[TMP_A_ATOMIC:.*]] = load atomic i64, ptr %[[A_ADDR]] monotonic, align 8 +// OGCG: store i64 %[[TMP_A_ATOMIC]], ptr %[[ATOMIC_TMP_ADDR]], align 8 +// OGCG: %[[ATOMIC_TMP_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[ATOMIC_TMP_ADDR]], i32 0, i32 0 +// OGCG: %[[ATOMIC_TMP_REAL:.*]] = load float, ptr %[[ATOMIC_TMP_REAL_PTR]], align 8 +// OGCG: %[[ATOMIC_TMP_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[ATOMIC_TMP_ADDR]], i32 0, i32 1 +// OGCG: %[[ATOMIC_TMP_IMAG:.*]] = load float, ptr %[[ATOMIC_TMP_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store float %[[ATOMIC_TMP_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store float %[[ATOMIC_TMP_IMAG]], ptr %[[B_IMAG_PTR]], align 4 + +void real_on_scalar_glvalue() { + float a; + float b = __real__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !cir.float +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.float -> !cir.float +// CIR: cir.store{{.*}} %[[A_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4 +// LLVM: store float %[[TMP_A]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca float, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca float, align 4 +// OGCG: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4 +// OGCG: store float %[[TMP_A]], ptr %[[B_ADDR]], align 4 + +void imag_on_scalar_glvalue() { + float a; + float b = __imag__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["b", init] +// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float +// CIR: cir.store{{.*}} %[[CONST_ZERO]], %[[B_ADDR]] : !cir.float, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: store float 0.000000e+00, ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca float, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca float, align 4 +// OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4 + +void real_on_scalar_with_type_promotion() { + _Float16 a; + _Float16 b = __real__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !cir.f16 +// CIR: %[[TMP_A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A_F32]] : !cir.float -> !cir.float +// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.f16 +// CIR: cir.store{{.*}} %[[TMP_A_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2 +// LLVM: %[[TMP_A_F32:.*]] = fpext half %[[TMP_A]] to float +// LLVM: %[[TMP_A_F16:.*]] = fptrunc float %[[TMP_A_F32]] to half +// LLVM: store half %[[TMP_A_F16]], ptr %[[B_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2 +// OGCG: %[[TMP_A_F32:.*]] = fpext half %[[TMP_A]] to float +// OGCG: %[[TMP_A_F16:.*]] = fptrunc float %[[TMP_A_F32]] to half +// OGCG: store half %[[TMP_A_F16]], ptr %[[B_ADDR]], align 2 + +void imag_on_scalar_with_type_promotion() { + _Float16 a; + _Float16 b = __imag__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float +// CIR: %[[CONST_ZERO_F16:.*]] = cir.cast floating %[[CONST_ZERO]] : !cir.float -> !cir.f16 +// CIR: cir.store{{.*}} %[[CONST_ZERO_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: store half 0xH0000, ptr %[[B_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: store half 0xH0000, ptr %[[B_ADDR]], align 2 + +void imag_on_const_scalar() { + float a; + float b = __imag__ 1.0f; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr, ["b", init] +// CIR: %[[CONST_ONE:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float +// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float +// CIR: cir.store{{.*}} %[[CONST_ZERO]], %[[B_ADDR]] : !cir.float, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4 +// LLVM: store float 0.000000e+00, ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca float, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca float, align 4 +// OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4 + +void real_on_scalar_from_real_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__real__ a); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float +// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[A_REAL_F32]] : !cir.float -> !cir.float +// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL]] : !cir.float -> !cir.f16 +// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[B_ADDR]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1 +// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0 +// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1 +// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half +// LLVM: store half %[[A_REAL_F16]], ptr %[[B_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load half, ptr %[[A_REAL_PTR]], align 2 +// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// OGCG: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half +// OGCG: store half %[[A_REAL_F16]], ptr %[[B_ADDR]], align 2 + +void real_on_scalar_from_imag_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__imag__ a); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_REAL_F32:.*]] = cir.cast floating %[[A_REAL]] : !cir.f16 -> !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast floating %[[A_IMAG]] : !cir.f16 -> !cir.float +// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float +// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_IMAG_F32]] : !cir.float -> !cir.float +// CIR: %[[A_REAL_F16:.*]] = cir.cast floating %[[A_REAL_F32]] : !cir.float -> !cir.f16 +// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[B_ADDR]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1 +// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0 +// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1 +// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half +// LLVM: store half %[[A_IMAG_F16]], ptr %[[B_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load half, ptr %[[A_IMAG_PTR]], align 2 +// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// OGCG: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half +// OGCG: store half %[[A_IMAG_F16]], ptr %[[B_ADDR]], align 2 diff --git a/clang/test/CIR/CodeGen/cxx-default-init.cpp b/clang/test/CIR/CodeGen/cxx-default-init.cpp index 06d3a27f61cc9..b3d706ffa831f 100644 --- a/clang/test/CIR/CodeGen/cxx-default-init.cpp +++ b/clang/test/CIR/CodeGen/cxx-default-init.cpp @@ -33,7 +33,7 @@ struct ZeroInit { // CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CIR: cir.store{{.*}} %[[ZERO]], %[[P_B]] // CIR: %[[ARR:.*]] = cir.get_member %[[THIS]][2] {name = "arr"} -// CIR: %[[ARR_BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: cir.store{{.*}} %[[ARR_BEGIN]], %[[ITER]] // CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s64i // CIR: %[[END:.*]] = cir.ptr_stride(%[[ARR_BEGIN]] : !cir.ptr, %[[FOUR]] : !s64i) @@ -139,7 +139,7 @@ struct ValueInit { // CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !s32i // CIR: cir.store{{.*}} %[[THREE]], %[[P_B]] // CIR: %[[ARR:.*]] = cir.get_member %[[THIS]][2] {name = "arr"} -// CIR: %[[ARR_BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>), !cir.ptr +// CIR: %[[ARR_BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> -> !cir.ptr // CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s32i // CIR: cir.store{{.*}} %[[FOUR]], %[[ARR_BEGIN]] // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i @@ -169,7 +169,7 @@ struct ValueInit { // CIR: cir.store{{.*}} %[[FOUR_FIVEI]], %[[C]] // CIR: %[[BF:.*]] = cir.get_member %[[THIS]][4] {name = "bf"} // CIR: %[[FF:.*]] = cir.const #cir.int<255> : !s32i -// CIR: %[[FF_CAST:.*]] = cir.cast(integral, %[[FF]] : !s32i), !u32i +// CIR: %[[FF_CAST:.*]] = cir.cast integral %[[FF]] : !s32i -> !u32i // CIR: %[[BF_VAL:.*]] = cir.set_bitfield{{.*}} (#bfi_bf, %[[BF]] : !cir.ptr, %[[FF_CAST]] : !u32i) // LLVM: define{{.*}} void @_ZN9ValueInitC2Ev(ptr %[[THIS_ARG:.*]]) diff --git a/clang/test/CIR/CodeGen/delegating-ctor.cpp b/clang/test/CIR/CodeGen/delegating-ctor.cpp index 73ee6b719940a..c95ecf44dcb10 100644 --- a/clang/test/CIR/CodeGen/delegating-ctor.cpp +++ b/clang/test/CIR/CodeGen/delegating-ctor.cpp @@ -116,23 +116,23 @@ Derived::Derived(const void *inVoid) { squawk(); } // CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] // CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] // CIR: %[[VPTR_GLOBAL_ADDR:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_PTR:.*]] = cir.cast(bitcast, %[[VPTR_GLOBAL_ADDR]] : !cir.ptr>), !cir.ptr +// CIR: %[[VPTR_PTR:.*]] = cir.cast bitcast %[[VPTR_GLOBAL_ADDR]] : !cir.ptr> -> !cir.ptr // CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_PTR]] : !cir.ptr, !cir.vptr // CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr // CIR: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr // CIR: %[[VPTR_BASE_ADDR:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: %[[VPTR_BASE_PTR:.*]] = cir.cast(bitcast, %[[VPTR_BASE_ADDR]] : !cir.ptr>), !cir.ptr +// CIR: %[[VPTR_BASE_PTR:.*]] = cir.cast bitcast %[[VPTR_BASE_ADDR]] : !cir.ptr> -> !cir.ptr // CIR: %[[VPTR_BASE:.*]] = cir.load{{.*}} %[[VPTR_BASE_PTR]] : !cir.ptr, !cir.vptr // CIR: %[[VPTR_DERIVED_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr // CIR: %[[VPTR_DERIVED:.*]] = cir.load{{.*}} %[[VPTR_DERIVED_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[VPTR_DERIVED_AS_I8PTR:.*]] = cir.cast(bitcast, %[[VPTR_DERIVED]] : !cir.vptr), !cir.ptr +// CIR: %[[VPTR_DERIVED_AS_I8PTR:.*]] = cir.cast bitcast %[[VPTR_DERIVED]] : !cir.vptr -> !cir.ptr // CIR: %[[BASE_LOC_OFFSET:.*]] = cir.const #cir.int<-32> : !s64i // CIR: %[[BASE_OFFSET_PTR:.*]] = cir.ptr_stride(%[[VPTR_DERIVED_AS_I8PTR]] : !cir.ptr, %[[BASE_LOC_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_I64PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_PTR]] : !cir.ptr), !cir.ptr +// CIR: %[[BASE_OFFSET_I64PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_PTR]] : !cir.ptr -> !cir.ptr // CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_I64PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_AS_I8PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr +// CIR: %[[THIS_AS_I8PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr -> !cir.ptr // CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_AS_I8PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_AS_I8PTR:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr +// CIR: %[[BASE_AS_I8PTR:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr -> !cir.ptr // CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_AS_I8PTR]] : !cir.ptr -> !cir.ptr // CIR: cir.store{{.*}} %[[VPTR_BASE]], %[[BASE_VPTR_ADDR]] : !cir.vptr, !cir.ptr // CIR: %[[VPTR_BASE_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr diff --git a/clang/test/CIR/CodeGen/delete.cpp b/clang/test/CIR/CodeGen/delete.cpp new file mode 100644 index 0000000000000..69640aa04531f --- /dev/null +++ b/clang/test/CIR/CodeGen/delete.cpp @@ -0,0 +1,88 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -fclangir -mconstructor-aliases -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -std=c++20 -mconstructor-aliases -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s + +typedef __typeof(sizeof(int)) size_t; + +struct SizedDelete { + void operator delete(void*, size_t); + int member; +}; +void test_sized_delete(SizedDelete *x) { + delete x; +} + +// SizedDelete::operator delete(void*, unsigned long) +// CIR: cir.func private @_ZN11SizedDeletedlEPvm(!cir.ptr, !u64i) +// LLVM: declare void @_ZN11SizedDeletedlEPvm(ptr, i64) + +// CIR: cir.func dso_local @_Z17test_sized_deleteP11SizedDelete +// CIR: %[[X:.*]] = cir.load{{.*}} %{{.*}} +// CIR: %[[X_CAST:.*]] = cir.cast bitcast %[[X]] : !cir.ptr -> !cir.ptr +// CIR: %[[OBJ_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CIR: cir.call @_ZN11SizedDeletedlEPvm(%[[X_CAST]], %[[OBJ_SIZE]]) nothrow : (!cir.ptr, !u64i) -> () + +// LLVM: define dso_local void @_Z17test_sized_deleteP11SizedDelete +// LLVM: %[[X:.*]] = load ptr, ptr %{{.*}} +// LLVM: call void @_ZN11SizedDeletedlEPvm(ptr %[[X]], i64 4) + +// OGCG: define dso_local void @_Z17test_sized_deleteP11SizedDelete +// OGCG: %[[X:.*]] = load ptr, ptr %{{.*}} +// OGCG: %[[ISNULL:.*]] = icmp eq ptr %[[X]], null +// OGCG: br i1 %[[ISNULL]], label %{{.*}}, label %[[DELETE_NOTNULL:.*]] +// OGCG: [[DELETE_NOTNULL]]: +// OGCG: call void @_ZN11SizedDeletedlEPvm(ptr noundef %[[X]], i64 noundef 4) + +// This function is declared below the call in OGCG. +// OGCG: declare void @_ZN11SizedDeletedlEPvm(ptr noundef, i64 noundef) + +struct Contents { + ~Contents() {} +}; +struct Container { + Contents *contents; + ~Container(); +}; +Container::~Container() { delete contents; } + +// Contents::~Contents() +// CIR: cir.func comdat linkonce_odr @_ZN8ContentsD2Ev +// LLVM: define linkonce_odr void @_ZN8ContentsD2Ev + +// operator delete(void*, unsigned long) +// CIR: cir.func private @_ZdlPvm(!cir.ptr, !u64i) +// LLVM: declare void @_ZdlPvm(ptr, i64) + +// Container::~Container() +// CIR: cir.func dso_local @_ZN9ContainerD2Ev +// CIR: %[[THIS:.*]] = cir.load %{{.*}} +// CIR: %[[CONTENTS_PTR_ADDR:.*]] = cir.get_member %[[THIS]][0] {name = "contents"} : !cir.ptr -> !cir.ptr> +// CIR: %[[CONTENTS_PTR:.*]] = cir.load{{.*}} %[[CONTENTS_PTR_ADDR]] +// CIR: cir.call @_ZN8ContentsD2Ev(%[[CONTENTS_PTR]]) nothrow : (!cir.ptr) -> () +// CIR: %[[CONTENTS_CAST:.*]] = cir.cast bitcast %[[CONTENTS_PTR]] : !cir.ptr -> !cir.ptr +// CIR: %[[OBJ_SIZE:.*]] = cir.const #cir.int<1> : !u64i +// CIR: cir.call @_ZdlPvm(%[[CONTENTS_CAST]], %[[OBJ_SIZE]]) nothrow : (!cir.ptr, !u64i) -> () + +// LLVM: define dso_local void @_ZN9ContainerD2Ev +// LLVM: %[[THIS:.*]] = load ptr, ptr %{{.*}} +// LLVM: %[[CONTENTS_PTR_ADDR:.*]] = getelementptr %struct.Container, ptr %[[THIS]], i32 0, i32 0 +// LLVM: %[[CONTENTS_PTR:.*]] = load ptr, ptr %[[CONTENTS_PTR_ADDR]] +// LLVM: call void @_ZN8ContentsD2Ev(ptr %[[CONTENTS_PTR]]) +// LLVM: call void @_ZdlPvm(ptr %[[CONTENTS_PTR]], i64 1) + +// OGCG: define dso_local void @_ZN9ContainerD2Ev +// OGCG: %[[THIS:.*]] = load ptr, ptr %{{.*}} +// OGCG: %[[CONTENTS:.*]] = getelementptr inbounds nuw %struct.Container, ptr %[[THIS]], i32 0, i32 0 +// OGCG: %[[CONTENTS_PTR:.*]] = load ptr, ptr %[[CONTENTS]] +// OGCG: %[[ISNULL:.*]] = icmp eq ptr %[[CONTENTS_PTR]], null +// OGCG: br i1 %[[ISNULL]], label %{{.*}}, label %[[DELETE_NOTNULL:.*]] +// OGCG: [[DELETE_NOTNULL]]: +// OGCG: call void @_ZN8ContentsD2Ev(ptr noundef nonnull align 1 dereferenceable(1) %[[CONTENTS_PTR]]) +// OGCG: call void @_ZdlPvm(ptr noundef %[[CONTENTS_PTR]], i64 noundef 1) + +// These functions are declared/defined below the calls in OGCG. +// OGCG: define linkonce_odr void @_ZN8ContentsD2Ev +// OGCG: declare void @_ZdlPvm(ptr noundef, i64 noundef) diff --git a/clang/test/CIR/CodeGen/destructors.cpp b/clang/test/CIR/CodeGen/destructors.cpp index fde0732a4352f..1ede1569a826f 100644 --- a/clang/test/CIR/CodeGen/destructors.cpp +++ b/clang/test/CIR/CodeGen/destructors.cpp @@ -64,7 +64,7 @@ void test_array_destructor() { // CIR: cir.func dso_local @_Z21test_array_destructorv() // CIR: %[[ARR:.*]] = cir.alloca !cir.array, !cir.ptr>, ["arr", init] // CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp", init] -// CIR: %[[BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>) +// CIR: %[[BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> // CIR: cir.store{{.*}} %[[BEGIN]], %[[ARR_PTR]] // CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s64i // CIR: %[[ARR_END:.*]] = cir.ptr_stride(%[[BEGIN]] : !cir.ptr, %[[FIVE]] : !s64i) @@ -80,7 +80,7 @@ void test_array_destructor() { // CIR: cir.condition(%[[CMP]]) // CIR: } // CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !u64i -// CIR: %[[BEGIN:.*]] = cir.cast(array_to_ptrdecay, %[[ARR]] : !cir.ptr>) +// CIR: %[[BEGIN:.*]] = cir.cast array_to_ptrdecay %[[ARR]] : !cir.ptr> // CIR: %[[END:.*]] = cir.ptr_stride(%[[BEGIN]] : !cir.ptr, %[[FOUR]] : !u64i) // CIR: %[[ARR_PTR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] // CIR: cir.store %[[END]], %[[ARR_PTR]] diff --git a/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp b/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp index 930b0a9c70059..d9ccd273ff3ba 100644 --- a/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp +++ b/clang/test/CIR/CodeGen/finegrain-bitfield-access.cpp @@ -70,7 +70,7 @@ void write8_1() { // CIR-LABEL: @_Z8write8_1v // CIR: [[CONST3:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: [[INT3:%.*]] = cir.cast(integral, [[CONST3]] : !s32i), !u32i +// CIR: [[INT3:%.*]] = cir.cast integral [[CONST3]] : !s32i -> !u32i // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f3"} : !cir.ptr -> !cir.ptr // CIR: cir.set_bitfield align(1) (#bfi_f3, [[MEMBER]] : !cir.ptr, [[INT3]] : !u32i) -> !u32i @@ -116,7 +116,7 @@ void write8_2() { // CIR-LABEL: @_Z8write8_2v // CIR: [[CONST3:%.*]] = cir.const #cir.int<3> : !s32i -// CIR: [[INT3:%.*]] = cir.cast(integral, [[CONST3]] : !s32i), !u32i +// CIR: [[INT3:%.*]] = cir.cast integral [[CONST3]] : !s32i -> !u32i // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[2] {name = "f5"} : !cir.ptr -> !cir.ptr // CIR: cir.set_bitfield align(2) (#bfi_f5, %3 : !cir.ptr, {{.*}} : !u32i) -> !u32i @@ -141,7 +141,7 @@ unsigned read16_1() { // CIR-LABEL: @_Z8read16_1v // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[0] {name = "f1"} : !cir.ptr -> !cir.ptr // CIR: [[BITFI:%.*]] = cir.get_bitfield align(8) (#bfi_f1, [[MEMBER]] : !cir.ptr) -> !u64i -// CIR: [[BFCAST:%.*]] = cir.cast(integral, [[BITFI]] : !u64i), !u32i +// CIR: [[BFCAST:%.*]] = cir.cast integral [[BITFI]] : !u64i -> !u32i // CIR: cir.store [[BFCAST]], {{.*}} : !u32i, !cir.ptr // CIR: [[RET:%.*]] = cir.load {{.*}} : !cir.ptr, !u32i // CIR: cir.return [[RET]] : !u32i @@ -167,7 +167,7 @@ unsigned read16_2() { // CIR-LABEL: @_Z8read16_2v // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f2"} : !cir.ptr -> !cir.ptr // CIR: [[BITFI:%.*]] = cir.get_bitfield align(2) (#bfi_f2, [[MEMBER]] : !cir.ptr) -> !u64i -// CIR: [[BFCAST:%.*]] = cir.cast(integral, [[BITFI]] : !u64i), !u32i +// CIR: [[BFCAST:%.*]] = cir.cast integral [[BITFI]] : !u64i -> !u32i // CIR: cir.store [[BFCAST]], {{.*}} : !u32i, !cir.ptr // CIR: [[RET:%.*]] = cir.load {{.*}} : !cir.ptr, !u32i // CIR: cir.return [[RET]] : !u32i @@ -192,7 +192,7 @@ void write16_1() { // CIR-LABEL: @_Z9write16_1v // CIR: [[CONST5:%.*]] = cir.const #cir.int<5> : !s32i -// CIR: [[INT5:%.*]] = cir.cast(integral, [[CONST5]] : !s32i), !u64i +// CIR: [[INT5:%.*]] = cir.cast integral [[CONST5]] : !s32i -> !u64i // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[0] {name = "f1"} : !cir.ptr -> !cir.ptr // CIR: cir.set_bitfield align(8) (#bfi_f1, [[MEMBER]] : !cir.ptr, [[INT5]] : !u64i) -> !u64i // CIR: cir.return @@ -212,7 +212,7 @@ void write16_2() { // CIR-LABEL: @_Z9write16_2v // CIR: [[CONST5:%.*]] = cir.const #cir.int<5> : !s32i -// CIR: [[INT5:%.*]] = cir.cast(integral, [[CONST5]] : !s32i), !u64i +// CIR: [[INT5:%.*]] = cir.cast integral [[CONST5]] : !s32i -> !u64i // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f2"} : !cir.ptr -> !cir.ptr // CIR: cir.set_bitfield align(2) (#bfi_f2, [[MEMBER]] : !cir.ptr, {{.*}} : !u64i) -> !u64i // CIR: cir.return @@ -232,7 +232,7 @@ unsigned read32_1() { // CIR-LABEL: @_Z8read32_1v // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f3"} : !cir.ptr -> !cir.ptr // CIR: [[BITFI:%.*]] = cir.get_bitfield align(4) (#bfi_f3_1, [[MEMBER]] : !cir.ptr) -> !u64i -// CIR: [[BFCAST:%.*]] = cir.cast(integral, [[BITFI]] : !u64i), !u32i +// CIR: [[BFCAST:%.*]] = cir.cast integral [[BITFI]] : !u64i -> !u32i // CIR: cir.store [[BFCAST]], {{.*}} : !u32i, !cir.ptr // CIR: [[RET:%.*]] = cir.load {{.*}} : !cir.ptr, !u32i // CIR: cir.return [[RET]] : !u32i @@ -257,7 +257,7 @@ void write32_1() { // CIR-LABEL: @_Z9write32_1v // CIR: [[CONST5:%.*]] = cir.const #cir.int<5> : !s32i -// CIR: [[INT5:%.*]] = cir.cast(integral, [[CONST5]] : !s32i), !u64i +// CIR: [[INT5:%.*]] = cir.cast integral [[CONST5]] : !s32i -> !u64i // CIR: [[MEMBER:%.*]] = cir.get_member {{.*}}[1] {name = "f3"} : !cir.ptr -> !cir.ptr // CIR: cir.set_bitfield align(4) (#bfi_f3_1, [[MEMBER]] : !cir.ptr, [[INT5]] : !u64i) -> !u64i // CIR: cir.return diff --git a/clang/test/CIR/CodeGen/global-init.cpp b/clang/test/CIR/CodeGen/global-init.cpp new file mode 100644 index 0000000000000..102affc5563ac --- /dev/null +++ b/clang/test/CIR/CodeGen/global-init.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR + +// Note: The CIR generated from this test isn't ready for lowering to LLVM yet. +// That will require changes to LoweringPrepare. + +struct NeedsCtor { + NeedsCtor(); +}; + +NeedsCtor needsCtor; + +// CIR: cir.func private @_ZN9NeedsCtorC1Ev(!cir.ptr) +// CIR: cir.global external @needsCtor = ctor : !rec_NeedsCtor { +// CIR: %[[THIS:.*]] = cir.get_global @needsCtor : !cir.ptr +// CIR: cir.call @_ZN9NeedsCtorC1Ev(%[[THIS]]) : (!cir.ptr) -> () +// CIR: } diff --git a/clang/test/CIR/CodeGen/if.cpp b/clang/test/CIR/CodeGen/if.cpp index daaec8a61484d..823539b88834f 100644 --- a/clang/test/CIR/CodeGen/if.cpp +++ b/clang/test/CIR/CodeGen/if.cpp @@ -74,7 +74,7 @@ void if1(int a) { // CIR: cir.func{{.*}} @_Z3if1i(%arg0: !s32i loc({{.*}})) // CIR: cir.scope { // CIR: %3 = cir.load{{.*}} %0 : !cir.ptr, !s32i -// CIR: %4 = cir.cast(int_to_bool, %3 : !s32i), !cir.bool +// CIR: %4 = cir.cast int_to_bool %3 : !s32i -> !cir.bool // CIR-NEXT: cir.if %4 { // CIR-NEXT: %5 = cir.const #cir.int<3> : !s32i // CIR-NEXT: cir.store{{.*}} %5, %1 : !s32i, !cir.ptr @@ -141,7 +141,7 @@ void if2(int a, bool b, bool c) { // CIR: cir.func{{.*}} @_Z3if2ibb(%arg0: !s32i loc({{.*}}), %arg1: !cir.bool loc({{.*}}), %arg2: !cir.bool loc({{.*}})) // CIR: cir.scope { // CIR: %5 = cir.load{{.*}} %0 : !cir.ptr, !s32i -// CIR: %6 = cir.cast(int_to_bool, %5 : !s32i), !cir.bool +// CIR: %6 = cir.cast int_to_bool %5 : !s32i -> !cir.bool // CIR: cir.if %6 { // CIR: %7 = cir.const #cir.int<3> : !s32i // CIR: cir.store{{.*}} %7, %3 : !s32i, !cir.ptr @@ -267,7 +267,7 @@ int if_init() { // CIR: %[[CONST42:.*]] = cir.const #cir.int<42> : !s32i // CIR: cir.store{{.*}} %[[CONST42]], %[[X]] : !s32i, !cir.ptr // CIR: %[[X_VAL:.*]] = cir.load{{.*}} %[[X]] : !cir.ptr, !s32i -// CIR: %[[COND:.*]] = cir.cast(int_to_bool, %[[X_VAL]] : !s32i), !cir.bool +// CIR: %[[COND:.*]] = cir.cast int_to_bool %[[X_VAL]] : !s32i -> !cir.bool // CIR: cir.if %[[COND]] { // CIR: %[[X_IF:.*]] = cir.load{{.*}} %[[X]] : !cir.ptr, !s32i // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i diff --git a/clang/test/CIR/CodeGen/int-to-bool.cpp b/clang/test/CIR/CodeGen/int-to-bool.cpp index ad36af4552c2f..97b799b60d25f 100644 --- a/clang/test/CIR/CodeGen/int-to-bool.cpp +++ b/clang/test/CIR/CodeGen/int-to-bool.cpp @@ -10,7 +10,7 @@ bool f1(unsigned char c) { } // CIR: cir.func{{.*}} @_Z2f1h -// CIR: cir.cast(int_to_bool, %{{.*}} : !u8i), !cir.bool +// CIR: cir.cast int_to_bool %{{.*}} : !u8i -> !cir.bool // Note: The full zext/store/load/trunc sequence is checked here to show what // CIR is being lowered to. There's no need to check it for every function since @@ -33,7 +33,7 @@ bool f2(short s) { } // CIR: cir.func{{.*}} @_Z2f2s -// CIR: cir.cast(int_to_bool, %{{.*}} : !s16i), !cir.bool +// CIR: cir.cast int_to_bool %{{.*}} : !s16i -> !cir.bool // LLVM: define{{.*}} i1 @_Z2f2s // LLVM: %[[CMP:.*]] = icmp ne i16 %4, 0 @@ -48,7 +48,7 @@ bool f3(unsigned u) { } // CIR: cir.func{{.*}} @_Z2f3j -// CIR: cir.cast(int_to_bool, %{{.*}} : !u32i), !cir.bool +// CIR: cir.cast int_to_bool %{{.*}} : !u32i -> !cir.bool // LLVM: define{{.*}} i1 @_Z2f3j // LLVM: %[[CMP:.*]] = icmp ne i32 %4, 0 @@ -63,7 +63,7 @@ bool f4(long l) { } // CIR: cir.func{{.*}} @_Z2f4l -// CIR: cir.cast(int_to_bool, %{{.*}} : !s64i), !cir.bool +// CIR: cir.cast int_to_bool %{{.*}} : !s64i -> !cir.bool // LLVM: define{{.*}} i1 @_Z2f4l // LLVM: %[[CMP:.*]] = icmp ne i64 %4, 0 diff --git a/clang/test/CIR/CodeGen/lambda-static-invoker.cpp b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp new file mode 100644 index 0000000000000..15d768ef21b03 --- /dev/null +++ b/clang/test/CIR/CodeGen/lambda-static-invoker.cpp @@ -0,0 +1,199 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s + +// We declare anonymous record types to represent lambdas. Rather than trying to +// to match the declarations, we establish variables for these when they are used. + +int g3() { + auto* fn = +[](int const& i) -> int { return i; }; + auto task = fn(3); + return task; +} + +// The order of these functions is different in OGCG. + +// OGCG: define dso_local noundef i32 @_Z2g3v() +// OGCG: %[[FN_PTR:.*]] = alloca ptr +// OGCG: %[[REF_TMP:.*]] = alloca %[[REC_LAM_G3:.*]] +// OGCG: %[[TASK:.*]] = alloca i32 +// OGCG: %[[REF_TMP1:.*]] = alloca i32 +// OGCG: %[[CALL:.*]] = call {{.*}} ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr {{.*}} %[[REF_TMP]]) +// OGCG: store ptr %[[CALL]], ptr %[[FN_PTR]] +// OGCG: %[[FN:.*]] = load ptr, ptr %[[FN_PTR]] +// OGCG: store i32 3, ptr %[[REF_TMP1]] +// OGCG: %[[CALL2:.*]] = call {{.*}} i32 %[[FN]](ptr {{.*}} %[[REF_TMP1]]) +// OGCG: store i32 %[[CALL2]], ptr %[[TASK]] +// OGCG: %[[RESULT:.*]] = load i32, ptr %[[TASK]] +// OGCG: ret i32 %[[RESULT]] + +// OGCG: define internal noundef ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: ret ptr @"_ZZ2g3vEN3$_08__invokeERKi" + +// lambda operator() +// CIR: cir.func lambda internal private dso_local @_ZZ2g3vENK3$_0clERKi(%[[THIS_ARG:.*]]: !cir.ptr {{.*}}, %[[REF_I_ARG:.*]]: !cir.ptr {{.*}}) +// CIR: %[[THIS_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] +// CIR: %[[REF_I_ALLOCA:.*]] = cir.alloca {{.*}} ["i", init, const] +// CIR: %[[RETVAL:.*]] = cir.alloca {{.*}} ["__retval"] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ALLOCA]] +// CIR: cir.store %[[REF_I_ARG]], %[[REF_I_ALLOCA]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ALLOCA]] +// CIR: %[[REF_I:.*]] = cir.load %[[REF_I_ALLOCA]] +// CIR: %[[I:.*]] = cir.load{{.*}} %[[REF_I]] +// CIR: cir.store %[[I]], %[[RETVAL]] +// CIR: %[[RET:.*]] = cir.load %[[RETVAL]] +// CIR: cir.return %[[RET]] + +// LLVM: define internal i32 @"_ZZ2g3vENK3$_0clERKi"(ptr %[[THIS_ARG:.*]], ptr %[[REF_I_ARG:.*]]) { +// LLVM: %[[THIS_ALLOCA:.*]] = alloca ptr +// LLVM: %[[REF_I_ALLOCA:.*]] = alloca ptr +// LLVM: %[[RETVAL:.*]] = alloca i32 +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]] +// LLVM: store ptr %[[REF_I_ARG]], ptr %[[REF_I_ALLOCA]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]] +// LLVM: %[[REF_I:.*]] = load ptr, ptr %[[REF_I_ALLOCA]] +// LLVM: %[[I:.*]] = load i32, ptr %[[REF_I]] +// LLVM: store i32 %[[I]], ptr %[[RETVAL]] +// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]] +// LLVM: ret i32 %[[RET]] + +// In OGCG, the _ZZ2g3vENK3$_0clERKi function is emitted after _ZZ2g3vEN3$_08__invokeERKi, see below. + +// lambda invoker +// CIR: cir.func internal private dso_local @_ZZ2g3vEN3$_08__invokeERKi(%[[REF_I_ARG:.*]]: !cir.ptr {{.*}}) -> !s32i { +// CIR: %[[REF_I_ALLOCA:.*]] = cir.alloca {{.*}} ["i", init, const] +// CIR: %[[RETVAL:.*]] = cir.alloca {{.*}} ["__retval"] +// CIR: %[[LAM_ALLOCA:.*]] = cir.alloca ![[REC_LAM_G3]], !cir.ptr, ["unused.capture"] +// CIR: cir.store %[[REF_I_ARG]], %[[REF_I_ALLOCA]] +// CIR: %[[REF_I:.*]] = cir.load{{.*}} %[[REF_I_ALLOCA]] +// CIR: %[[LAM_RESULT:.*]] = cir.call @_ZZ2g3vENK3$_0clERKi(%2, %3) : (!cir.ptr, !cir.ptr) -> !s32i +// CIR: cir.store{{.*}} %[[LAM_RESULT]], %[[RETVAL]] +// CIR: %[[RET:.*]] = cir.load %[[RETVAL]] +// CIR: cir.return %[[RET]] + +// LLVM: define internal i32 @"_ZZ2g3vEN3$_08__invokeERKi"(ptr %[[REF_I_ARG:.*]]) { +// LLVM: %[[REF_I_ALLOCA:.*]] = alloca ptr +// LLVM: %[[RETVAL:.*]] = alloca i32 +// LLVM: %[[LAM_ALLOCA:.*]] = alloca %[[REC_LAM_G3:.*]], +// LLVM: store ptr %[[REF_I_ARG]], ptr %[[REF_I_ALLOCA]] +// LLVM: %[[REF_I:.*]] = load ptr, ptr %[[REF_I_ALLOCA]] +// LLVM: %[[LAM_RESULT:.*]] = call i32 @"_ZZ2g3vENK3$_0clERKi"(ptr %[[LAM_ALLOCA]], ptr %[[REF_I]]) +// LLVM: store i32 %[[LAM_RESULT]], ptr %[[RETVAL]] +// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]] +// LLVM: ret i32 %[[RET]] + +// In OGCG, the _ZZ2g3vEN3$_08__invokeERKi function is emitted after _ZN1A3barEv, see below. + +// lambda operator int (*)(int const&)() +// CIR: cir.func internal private dso_local @_ZZ2g3vENK3$_0cvPFiRKiEEv(%[[THIS_ARG:.*]]: !cir.ptr {{.*}}) -> !cir.ptr) -> !s32i>> { +// CIR: %[[THIS_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] +// CIR: %[[RETVAL:.*]] = cir.alloca !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>>, ["__retval"] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ALLOCA]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ALLOCA]] +// CIR: %[[INVOKER:.*]] = cir.get_global @_ZZ2g3vEN3$_08__invokeERKi : !cir.ptr) -> !s32i>> +// CIR: cir.store %[[INVOKER]], %[[RETVAL]] +// CIR: %[[RET:.*]] = cir.load %[[RETVAL]] +// CIR: cir.return %[[RET]] + +// LLVM: define internal ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr %[[THIS_ARG:.*]]) { +// LLVM: %[[THIS_ALLOCA:.*]] = alloca ptr +// LLVM: %[[RETVAL:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ALLOCA]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ALLOCA]] +// LLVM: store ptr @"_ZZ2g3vEN3$_08__invokeERKi", ptr %[[RETVAL]] +// LLVM: %[[RET:.*]] = load ptr, ptr %[[RETVAL]] +// LLVM: ret ptr %[[RET]] + +// In OGCG, the _ZZ2g3vENK3$_0cvPFiRKiEEv function is emitted just after the _Z2g3v function, see above. + +// CIR: cir.func{{.*}} @_Z2g3v() -> !s32i { +// CIR: %[[RETVAL:.*]] = cir.alloca !s32i, !cir.ptr, ["__retval"] +// CIR: %[[FN_ADDR:.*]] = cir.alloca !cir.ptr) -> !s32i>>, !cir.ptr) -> !s32i>>>, ["fn", init] +// CIR: %[[TASK:.*]] = cir.alloca !s32i, !cir.ptr, ["task", init] + +// 1. Use `operator int (*)(int const&)()` to retrieve the fnptr to `__invoke()`. +// CIR: %[[SCOPE_RET:.*]] = cir.scope { +// CIR: %[[LAM_ALLOCA:.*]] = cir.alloca ![[REC_LAM_G3]], !cir.ptr, ["ref.tmp0"] +// CIR: %[[OPERATOR_RESULT:.*]] = cir.call @_ZZ2g3vENK3$_0cvPFiRKiEEv(%[[LAM_ALLOCA]]){{.*}} +// CIR: %[[PLUS:.*]] = cir.unary(plus, %[[OPERATOR_RESULT]]) +// CIR: cir.yield %[[PLUS]] +// CIR: } + +// 2. Load ptr to `__invoke()`. +// CIR: cir.store{{.*}} %[[SCOPE_RET]], %[[FN_ADDR]] +// CIR: %[[SCOPE_RET2:.*]] = cir.scope { +// CIR: %[[REF_TMP1:.*]] = cir.alloca !s32i, !cir.ptr, ["ref.tmp1", init] +// CIR: %[[FN:.*]] = cir.load{{.*}} %[[FN_ADDR]] +// CIR: %[[THREE:.*]] = cir.const #cir.int<3> : !s32i +// CIR: cir.store{{.*}} %[[THREE]], %[[REF_TMP1]] + +// 3. Call `__invoke()`, which effectively executes `operator()`. +// CIR: %[[RESULT:.*]] = cir.call %[[FN]](%[[REF_TMP1]]) +// CIR: cir.yield %[[RESULT]] +// CIR: } + +// CIR: cir.store{{.*}} %[[SCOPE_RET2]], %[[TASK]] +// CIR: %[[TASK_RET:.*]] = cir.load{{.*}} %[[TASK]] +// CIR: cir.store{{.*}} %[[TASK_RET]], %[[RETVAL]] +// CIR: %[[RET:.*]] = cir.load{{.*}} %[[RETVAL]] +// CIR: cir.return %[[RET]] +// CIR: } + +// LLVM: define dso_local i32 @_Z2g3v() { +// LLVM: %[[LAM_ALLOCA:.*]] = alloca %[[REC_LAM_G3]] +// LLVM: %[[REF_TMP1:.*]] = alloca i32 +// LLVM: %[[RETVAL:.*]] = alloca i32 +// LLVM: %[[FN_PTR:.*]] = alloca ptr +// LLVM: %[[TASK:.*]] = alloca i32 +// LLVM: br label %[[SCOPE_BB0:.*]] + +// LLVM: [[SCOPE_BB0]]: +// LLVM: %[[OPERATOR_RESULT:.*]] = call ptr @"_ZZ2g3vENK3$_0cvPFiRKiEEv"(ptr %[[LAM_ALLOCA]]) +// LLVM: br label %[[SCOPE_BB1:.*]] + +// LLVM: [[SCOPE_BB1]]: +// LLVM: %[[TMP0:.*]] = phi ptr [ %[[OPERATOR_RESULT]], %[[SCOPE_BB0]] ] +// LLVM: store ptr %[[TMP0]], ptr %[[FN_PTR]] +// LLVM: br label %[[SCOPE_BB2:.*]] + +// LLVM: [[SCOPE_BB2]]: +// LLVM: %[[FN:.*]] = load ptr, ptr %[[FN_PTR]] +// LLVM: store i32 3, ptr %[[REF_TMP1]] +// LLVM: %[[RESULT:.*]] = call i32 %[[FN]](ptr %[[REF_TMP1]]) +// LLVM: br label %[[RET_BB:.*]] + +// LLVM: [[RET_BB]]: +// LLVM: %[[TMP1:.*]] = phi i32 [ %[[RESULT]], %[[SCOPE_BB2]] ] +// LLVM: store i32 %[[TMP1]], ptr %[[TASK]] +// LLVM: %[[TMP2:.*]] = load i32, ptr %[[TASK]] +// LLVM: store i32 %[[TMP2]], ptr %[[RETVAL]] +// LLVM: %[[RET:.*]] = load i32, ptr %[[RETVAL]] +// LLVM: ret i32 %[[RET]] + +// The definition for _Z2g3v in OGCG is first among the functions for the g3 test, see above. + +// The functions below are emitted later in OGCG, see above for the corresponding LLVM checks. + +// OGCG: define internal noundef i32 @"_ZZ2g3vEN3$_08__invokeERKi"(ptr {{.*}} %[[I_ARG:.*]]) +// OGCG: %[[I_ADDR:.*]] = alloca ptr +// OGCG: %[[UNUSED_CAPTURE:.*]] = alloca %[[REC_LAM_G3:.*]] +// OGCG: store ptr %[[I_ARG]], ptr %[[I_ADDR]] +// OGCG: %[[I_PTR:.*]] = load ptr, ptr %[[I_ADDR]] +// OGCG: %[[CALL:.*]] = call {{.*}} i32 @"_ZZ2g3vENK3$_0clERKi"(ptr {{.*}} %[[UNUSED_CAPTURE]], ptr {{.*}} %[[I_PTR]]) +// OGCG: ret i32 %[[CALL]] + +// OGCG: define internal noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[I_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: %[[I_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: store ptr %[[I_ARG]], ptr %[[I_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: %[[I_PTR:.*]] = load ptr, ptr %[[I_ADDR]] +// OGCG: %[[I:.*]] = load i32, ptr %[[I_PTR]] +// OGCG: ret i32 %[[I]] diff --git a/clang/test/CIR/CodeGen/lang-c-cpp.cpp b/clang/test/CIR/CodeGen/lang-c-cpp.cpp index e126932104de2..893178384b472 100644 --- a/clang/test/CIR/CodeGen/lang-c-cpp.cpp +++ b/clang/test/CIR/CodeGen/lang-c-cpp.cpp @@ -3,8 +3,8 @@ // RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.c.cir // RUN: FileCheck --check-prefix=CIR-C --input-file=%t.c.cir %s -// CIR-CPP: module attributes {{{.*}}cir.lang = #cir.lang{{.*}}} -// CIR-C: module attributes {{{.*}}cir.lang = #cir.lang{{.*}}} +// CIR-CPP: module{{.*}} attributes {{{.*}}cir.lang = #cir.lang{{.*}}} +// CIR-C: module{{.*}} attributes {{{.*}}cir.lang = #cir.lang{{.*}}} int main() { return 0; diff --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp index 0eba0bbc97c15..b30589cd1b6ec 100644 --- a/clang/test/CIR/CodeGen/loop.cpp +++ b/clang/test/CIR/CodeGen/loop.cpp @@ -205,10 +205,10 @@ void l4() { // CIR: %[[N_ADDR:.*]] = cir.alloca {{.*}} ["n", init] // CIR: cir.store{{.*}} %[[A_ADDR]], %[[RANGE_ADDR]] // CIR: %[[RANGE_LOAD:.*]] = cir.load{{.*}} %[[RANGE_ADDR]] -// CIR: %[[RANGE_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[RANGE_LOAD]] : {{.*}}) +// CIR: %[[RANGE_CAST:.*]] = cir.cast array_to_ptrdecay %[[RANGE_LOAD]] : {{.*}} // CIR: cir.store{{.*}} %[[RANGE_CAST]], %[[BEGIN_ADDR]] // CIR: %[[BEGIN:.*]] = cir.load{{.*}} %[[RANGE_ADDR]] -// CIR: %[[BEGIN_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[BEGIN]] : {{.*}}) +// CIR: %[[BEGIN_CAST:.*]] = cir.cast array_to_ptrdecay %[[BEGIN]] : {{.*}} // CIR: %[[TEN:.*]] = cir.const #cir.int<10> // CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[BEGIN_CAST]] : {{.*}}, %[[TEN]] : {{.*}}) // CIR: cir.store{{.*}} %[[END_PTR]], %[[END_ADDR]] @@ -312,7 +312,7 @@ void l5() { // CIR: %[[BEGIN_ADDR:.*]] = cir.alloca {{.*}} ["__begin1", init] // CIR: %[[END_ADDR:.*]] = cir.alloca {{.*}} ["__end1", init] // CIR: %[[X_ADDR:.*]] = cir.alloca {{.*}} ["x", init] -// CIR: %[[ARR_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_ADDR]] : {{.*}}) +// CIR: %[[ARR_CAST:.*]] = cir.cast array_to_ptrdecay %[[ARR_ADDR]] : {{.*}} // CIR: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CIR: cir.store{{.*}} %[[ONE]], %[[ARR_CAST]] // CIR: %[[OFFSET1:.*]] = cir.const #cir.int<1> : !s64i @@ -329,10 +329,10 @@ void l5() { // CIR: cir.store{{.*}} %[[FOUR]], %[[STRIDE3]] // CIR: cir.store{{.*}} %[[ARR_ADDR]], %[[RANGE_ADDR]] // CIR: %[[RANGE_LOAD:.*]] = cir.load{{.*}} %[[RANGE_ADDR]] -// CIR: %[[RANGE_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[RANGE_LOAD]] : {{.*}}) +// CIR: %[[RANGE_CAST:.*]] = cir.cast array_to_ptrdecay %[[RANGE_LOAD]] : {{.*}} // CIR: cir.store{{.*}} %[[RANGE_CAST]], %[[BEGIN_ADDR]] // CIR: %[[BEGIN:.*]] = cir.load{{.*}} %[[RANGE_ADDR]] -// CIR: %[[BEGIN_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[BEGIN]] : {{.*}}) +// CIR: %[[BEGIN_CAST:.*]] = cir.cast array_to_ptrdecay %[[BEGIN]] : {{.*}} // CIR: %[[FOUR:.*]] = cir.const #cir.int<4> : !s64i // CIR: %[[END_PTR:.*]] = cir.ptr_stride(%[[BEGIN_CAST]] : {{.*}}, %[[FOUR]] : {{.*}}) // CIR: cir.store{{.*}} %[[END_PTR]], %[[END_ADDR]] @@ -445,7 +445,7 @@ void test_do_while_false() { // CIR-NEXT: cir.yield // CIR-NEXT: } while { // CIR-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CIR-NEXT: %[[FALSE:.*]] = cir.cast(int_to_bool, %[[ZERO]] : !s32i), !cir.bool +// CIR-NEXT: %[[FALSE:.*]] = cir.cast int_to_bool %[[ZERO]] : !s32i -> !cir.bool // CIR-NEXT: cir.condition(%[[FALSE]]) // LLVM: define{{.*}} void @_Z19test_do_while_falsev() diff --git a/clang/test/CIR/CodeGen/module-filename.cpp b/clang/test/CIR/CodeGen/module-filename.cpp new file mode 100644 index 0000000000000..05e2e929e3238 --- /dev/null +++ b/clang/test/CIR/CodeGen/module-filename.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s + +// Normally, we try to avoid checking the filename of a test, but that's the +// entire point of this test, so we use a wildcard for the path but check the +// filename. +// CIR: module @"{{.*}}module-filename.cpp" + +int main() { + return 0; +} diff --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp index 31adb9bf4859b..91dae3f28c572 100644 --- a/clang/test/CIR/CodeGen/new.cpp +++ b/clang/test/CIR/CodeGen/new.cpp @@ -22,15 +22,15 @@ void test_basic_new() { // CHECK: %[[PD_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["pd", init] // CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8> // CHECK: %[[NEW_S:.*]] = cir.call @_Znwm(%[[EIGHT]]) -// CHECK: %[[NEW_S_PTR:.*]] = cir.cast(bitcast, %[[NEW_S]] +// CHECK: %[[NEW_S_PTR:.*]] = cir.cast bitcast %[[NEW_S]] // CHECK: cir.store{{.*}} %[[NEW_S_PTR]], %[[PS_ADDR]] // CHECK: %[[FOUR:.*]] = cir.const #cir.int<4> // CHECK: %[[NEW_INT:.*]] = cir.call @_Znwm(%[[FOUR]]) -// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast(bitcast, %[[NEW_INT]] +// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast bitcast %[[NEW_INT]] // CHECK: cir.store{{.*}} %[[NEW_INT_PTR]], %[[PN_ADDR]] // CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8> // CHECK: %[[NEW_DOUBLE:.*]] = cir.call @_Znwm(%[[EIGHT]]) -// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast(bitcast, %[[NEW_DOUBLE]] +// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast bitcast %[[NEW_DOUBLE]] // CHECK: cir.store{{.*}} %[[NEW_DOUBLE_PTR]], %[[PD_ADDR]] // CHECK: cir.return @@ -68,13 +68,13 @@ void test_new_with_init() { // CHECK: %[[PD_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["pd", init] // CHECK: %[[FOUR:.*]] = cir.const #cir.int<4> // CHECK: %[[NEW_INT:.*]] = cir.call @_Znwm(%[[FOUR]]) -// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast(bitcast, %[[NEW_INT]] +// CHECK: %[[NEW_INT_PTR:.*]] = cir.cast bitcast %[[NEW_INT]] // CHECK: %[[TWO:.*]] = cir.const #cir.int<2> // CHECK: cir.store{{.*}} %[[TWO]], %[[NEW_INT_PTR]] // CHECK: cir.store{{.*}} %[[NEW_INT_PTR]], %[[PN_ADDR]] // CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8> // CHECK: %[[NEW_DOUBLE:.*]] = cir.call @_Znwm(%[[EIGHT]]) -// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast(bitcast, %[[NEW_DOUBLE]] +// CHECK: %[[NEW_DOUBLE_PTR:.*]] = cir.cast bitcast %[[NEW_DOUBLE]] // CHECK: %[[THREE:.*]] = cir.const #cir.fp<3.000000e+00> // CHECK: cir.store{{.*}} %[[THREE]], %[[NEW_DOUBLE_PTR]] // CHECK: cir.store{{.*}} %[[NEW_DOUBLE_PTR]], %[[PD_ADDR]] @@ -119,12 +119,12 @@ void test_new_with_ctor() { // CHECK: %[[PS2_2_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["ps2_2", init] // CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8> // CHECK: %[[NEW_S2:.*]] = cir.call @_Znwm(%[[EIGHT]]) -// CHECK: %[[NEW_S2_PTR:.*]] = cir.cast(bitcast, %[[NEW_S2]] +// CHECK: %[[NEW_S2_PTR:.*]] = cir.cast bitcast %[[NEW_S2]] // CHECK: cir.call @_ZN2S2C1Ev(%[[NEW_S2_PTR]]) // CHECK: cir.store{{.*}} %[[NEW_S2_PTR]], %[[PS2_ADDR]] // CHECK: %[[EIGHT:.*]] = cir.const #cir.int<8> // CHECK: %[[NEW_S2_2:.*]] = cir.call @_Znwm(%[[EIGHT]]) -// CHECK: %[[NEW_S2_2_PTR:.*]] = cir.cast(bitcast, %[[NEW_S2_2]] +// CHECK: %[[NEW_S2_2_PTR:.*]] = cir.cast bitcast %[[NEW_S2_2]] // CHECK: %[[ONE:.*]] = cir.const #cir.int<1> // CHECK: %[[TWO:.*]] = cir.const #cir.int<2> // CHECK: cir.call @_ZN2S2C1Eii(%[[NEW_S2_2_PTR]], %[[ONE]], %[[TWO]]) @@ -152,3 +152,31 @@ void test_new_with_ctor() { // OGCG: call{{.*}} void @_ZN2S2C1Eii(ptr {{.*}} %[[NEW_S2_2]], i32 noundef 1, i32 noundef 2) // OGCG: store ptr %[[NEW_S2_2]], ptr %[[PS2_2_ADDR]], align 8 // OGCG: ret void + +void test_new_with_complex_type() { + _Complex float *a = new _Complex float{1.0f, 2.0f}; +} + +// CHECK: cir.func{{.*}} @_Z26test_new_with_complex_typev +// CHECK: %0 = cir.alloca !cir.ptr>, !cir.ptr>>, ["a", init] +// CHECK: %1 = cir.const #cir.int<8> : !u64i +// CHECK: %2 = cir.call @_Znwm(%1) : (!u64i) -> !cir.ptr +// CHECK: %3 = cir.cast bitcast %2 : !cir.ptr -> !cir.ptr> +// CHECK: %4 = cir.const #cir.const_complex<#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float> : !cir.complex +// CHECK: cir.store align(8) %4, %3 : !cir.complex, !cir.ptr> +// CHECK: cir.store align(8) %3, %0 : !cir.ptr>, !cir.ptr>> + +// LLVM: define{{.*}} void @_Z26test_new_with_complex_typev +// LLVM: %[[A_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM: %[[NEW_COMPLEX:.*]] = call ptr @_Znwm(i64 8) +// LLVM: store { float, float } { float 1.000000e+00, float 2.000000e+00 }, ptr %[[NEW_COMPLEX]], align 8 +// LLVM: store ptr %[[NEW_COMPLEX]], ptr %[[A_ADDR]], align 8 + +// OGCG: define{{.*}} void @_Z26test_new_with_complex_typev +// OGCG: %[[A_ADDR:.*]] = alloca ptr, align 8 +// OGCG: %[[NEW_COMPLEX:.*]] = call noalias noundef nonnull ptr @_Znwm(i64 noundef 8) +// OGCG: %[[COMPLEX_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[NEW_COMPLEX]], i32 0, i32 0 +// OGCG: %[[COMPLEX_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[NEW_COMPLEX]], i32 0, i32 1 +// OGCG: store float 1.000000e+00, ptr %[[COMPLEX_REAL_PTR]], align 8 +// OGCG: store float 2.000000e+00, ptr %[[COMPLEX_IMAG_PTR]], align 4 +// OGCG: store ptr %[[NEW_COMPLEX]], ptr %[[A_ADDR]], align 8 diff --git a/clang/test/CIR/CodeGen/no-prototype.c b/clang/test/CIR/CodeGen/no-prototype.c index 4be6a94c12129..728c4b80b95a2 100644 --- a/clang/test/CIR/CodeGen/no-prototype.c +++ b/clang/test/CIR/CodeGen/no-prototype.c @@ -51,7 +51,7 @@ int test3(int x) { // CHECK: cir.func dso_local @test3 return noProto3(x); // CHECK: [[GGO:%.*]] = cir.get_global @noProto3 : !cir.ptr !s32i>> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> + // CHECK: [[CAST:%.*]] = cir.cast bitcast [[GGO]] : !cir.ptr !s32i>> -> !cir.ptr !s32i>> // CHECK: {{%.*}} = cir.call [[CAST]](%{{[0-9]+}}) : (!cir.ptr !s32i>>, !s32i) -> !s32i } @@ -68,7 +68,7 @@ int noProto4() { return 0; } int test4(int x) { return noProto4(x); // Even if we know the definition, this should compile. // CHECK: [[GGO:%.*]] = cir.get_global @noProto4 : !cir.ptr !s32i>> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> + // CHECK: [[CAST:%.*]] = cir.cast bitcast [[GGO]] : !cir.ptr !s32i>> -> !cir.ptr !s32i>> // CHECK: {{%.*}} = cir.call [[CAST]]({{%.*}}) : (!cir.ptr !s32i>>, !s32i) -> !s32i } @@ -77,7 +77,7 @@ int noProto5(); int test5(int x) { return noProto5(); // CHECK: [[GGO:%.*]] = cir.get_global @noProto5 : !cir.ptr !s32i>> - // CHECK: [[CAST:%.*]] = cir.cast(bitcast, [[GGO]] : !cir.ptr !s32i>>), !cir.ptr !s32i>> + // CHECK: [[CAST:%.*]] = cir.cast bitcast [[GGO]] : !cir.ptr !s32i>> -> !cir.ptr !s32i>> // CHECK: {{%.*}} = cir.call [[CAST]]() : (!cir.ptr !s32i>>) -> !s32i } int noProto5(int x) { return x; } diff --git a/clang/test/CIR/CodeGen/opaque.c b/clang/test/CIR/CodeGen/opaque.c index 96ecdfc4cd978..73f6402e8a484 100644 --- a/clang/test/CIR/CodeGen/opaque.c +++ b/clang/test/CIR/CodeGen/opaque.c @@ -17,8 +17,8 @@ void foo2() { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.float // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float -// CIR: %[[A_REAL_BOOL:.*]] = cir.cast(float_to_bool, %[[A_REAL]] : !cir.float), !cir.bool -// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast(float_to_bool, %[[A_IMAG]] : !cir.float), !cir.bool +// CIR: %[[A_REAL_BOOL:.*]] = cir.cast float_to_bool %[[A_REAL]] : !cir.float -> !cir.bool +// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast float_to_bool %[[A_IMAG]] : !cir.float -> !cir.bool // CIR: %[[CONST_TRUE:.*]] = cir.const #true // CIR: %[[COND:.*]] = cir.select if %[[A_REAL_BOOL]] then %[[CONST_TRUE]] else %[[A_IMAG_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool // CIR: %[[RESULT:.*]] = cir.ternary(%[[COND]], true { diff --git a/clang/test/CIR/CodeGen/opaque.cpp b/clang/test/CIR/CodeGen/opaque.cpp index a48c013e5c20b..028bfd9ef4cd0 100644 --- a/clang/test/CIR/CodeGen/opaque.cpp +++ b/clang/test/CIR/CodeGen/opaque.cpp @@ -35,8 +35,8 @@ void foo2() { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.float // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.float -// CIR: %[[A_REAL_BOOL:.*]] = cir.cast(float_to_bool, %[[A_REAL]] : !cir.float), !cir.bool -// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast(float_to_bool, %[[A_IMAG]] : !cir.float), !cir.bool +// CIR: %[[A_REAL_BOOL:.*]] = cir.cast float_to_bool %[[A_REAL]] : !cir.float -> !cir.bool +// CIR: %[[A_IMAG_BOOL:.*]] = cir.cast float_to_bool %[[A_IMAG]] : !cir.float -> !cir.bool // CIR: %[[CONST_TRUE:.*]] = cir.const #true // CIR: %[[COND:.*]] = cir.select if %[[A_REAL_BOOL]] then %[[CONST_TRUE]] else %[[A_IMAG_BOOL]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool // CIR: %[[RESULT:.*]] = cir.ternary(%[[COND]], true { @@ -111,7 +111,7 @@ void foo3() { // CIR: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["b"] // CIR: %[[C_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["c", init] // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !s32i -// CIR: %[[A_BOOL:.*]] = cir.cast(int_to_bool, %[[TMP_A]] : !s32i), !cir.bool +// CIR: %[[A_BOOL:.*]] = cir.cast int_to_bool %[[TMP_A]] : !s32i -> !cir.bool // CIR: %[[RESULT:.*]] = cir.ternary(%[[A_BOOL]], true { // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !s32i // CIR: cir.yield %[[TMP_A]] : !s32i diff --git a/clang/test/CIR/CodeGen/opt-info-attr.cpp b/clang/test/CIR/CodeGen/opt-info-attr.cpp index 444286b8db8a9..97071d7ac2b2b 100644 --- a/clang/test/CIR/CodeGen/opt-info-attr.cpp +++ b/clang/test/CIR/CodeGen/opt-info-attr.cpp @@ -13,10 +13,10 @@ void f() {} -// CHECK-O0: module attributes +// CHECK-O0: module{{.*}} attributes // CHECK-O0-NOT: cir.opt_info -// CHECK-O1: module attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} -// CHECK-O2: module attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} -// CHECK-O3: module attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} -// CHECK-Os: module attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} -// CHECK-Oz: module attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} +// CHECK-O1: module{{.*}} attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} +// CHECK-O2: module{{.*}} attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} +// CHECK-O3: module{{.*}} attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} +// CHECK-Os: module{{.*}} attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} +// CHECK-Oz: module{{.*}} attributes {{.+}}cir.opt_info = #cir.opt_info{{.+}} diff --git a/clang/test/CIR/CodeGen/pointers.cpp b/clang/test/CIR/CodeGen/pointers.cpp index dcfcc723f4da1..2c3dbb0fd6c58 100644 --- a/clang/test/CIR/CodeGen/pointers.cpp +++ b/clang/test/CIR/CodeGen/pointers.cpp @@ -24,7 +24,7 @@ void foo(int *iptr, char *cptr, unsigned ustride) { // Must convert unsigned stride to a signed one. iptr - ustride; // CHECK: %[[#STRIDE:]] = cir.load{{.*}} %{{.+}} : !cir.ptr, !u32i - // CHECK: %[[#SIGNSTRIDE:]] = cir.cast(integral, %[[#STRIDE]] : !u32i), !s32i + // CHECK: %[[#SIGNSTRIDE:]] = cir.cast integral %[[#STRIDE]] : !u32i -> !s32i // CHECK: %[[#NEGSTRIDE:]] = cir.unary(minus, %[[#SIGNSTRIDE]]) : !s32i, !s32i // CHECK: cir.ptr_stride(%{{.+}} : !cir.ptr, %[[#NEGSTRIDE]] : !s32i), !cir.ptr diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp index ee6c4cab7341f..96db82a89977c 100644 --- a/clang/test/CIR/CodeGen/struct.cpp +++ b/clang/test/CIR/CodeGen/struct.cpp @@ -93,3 +93,93 @@ void f3() { // OGCG: %[[O:.*]] = alloca %struct.Outer, align 4 // OGCG: %[[O_I:.*]] = getelementptr inbounds nuw %struct.Outer, ptr %[[O]], i32 0, i32 0 // OGCG: %[[O_I_N:.*]] = getelementptr inbounds nuw %struct.Inner, ptr %[[O_I]], i32 0, i32 0 + +void paren_expr() { + struct Point { + int x; + int y; + }; + + Point a = (Point{}); + Point b = (a); +} + +// CIR: cir.func{{.*}} @_Z10paren_exprv() +// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_Point, !cir.ptr, ["a", init] +// CIR: %[[B_ADDR:.*]] = cir.alloca !rec_Point, !cir.ptr, ["b", init] +// CIR: %[[X_ELEM_PTR:.*]] = cir.get_member %[[A_ADDR]][0] {name = "x"} : !cir.ptr -> !cir.ptr +// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store{{.*}} %[[CONST_0]], %[[X_ELEM_PTR]] : !s32i, !cir.ptr +// CIR: %[[Y_ELEM_PTR:.*]] = cir.get_member %[[A_ADDR]][1] {name = "y"} : !cir.ptr -> !cir.ptr +// CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i +// CIR: cir.store{{.*}} %[[CONST_0]], %[[Y_ELEM_PTR]] : !s32i, !cir.ptr +// CIR: cir.call @_ZZ10paren_exprvEN5PointC1ERKS_(%[[B_ADDR]], %[[A_ADDR]]) nothrow : (!cir.ptr, !cir.ptr) -> () + +// LLVM: define{{.*}} void @_Z10paren_exprv() +// LLVM: %[[A_ADDR:.*]] = alloca %struct.Point, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca %struct.Point, i64 1, align 4 +// LLVM: %[[X_ELEM_PTR:.*]] = getelementptr %struct.Point, ptr %[[A_ADDR]], i32 0, i32 0 +// LLVM: store i32 0, ptr %[[X_ELEM_PTR]], align 4 +// LLVM: %[[Y_ELEM_PTR:.*]] = getelementptr %struct.Point, ptr %[[A_ADDR]], i32 0, i32 1 +// LLVM: store i32 0, ptr %[[Y_ELEM_PTR]], align 4 +// LLVM: call void @_ZZ10paren_exprvEN5PointC1ERKS_(ptr %[[B_ADDR]], ptr %[[A_ADDR]]) + +// OGCG: define{{.*}} void @_Z10paren_exprv() +// OGCG: %[[A_ADDR:.*]] = alloca %struct.Point, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca %struct.Point, align 4 +// OGCG: call void @llvm.memset.p0.i64(ptr align 4 %[[A_ADDR]], i8 0, i64 8, i1 false) +// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[B_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false) + +void choose_expr() { + CompleteS a; + CompleteS b; + CompleteS c = __builtin_choose_expr(true, a, b); +} + +// CIR: cir.func{{.*}} @_Z11choose_exprv() +// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["c", init] +// TODO(cir): Call to default copy constructor should be replaced by `cir.copy` op +// CIR: cir.call @_ZN9CompleteSC1ERKS_(%[[C_ADDR]], %[[A_ADDR]]) nothrow : (!cir.ptr, !cir.ptr) -> () + +// LLVM: define{{.*}} void @_Z11choose_exprv() +// LLVM: %[[A_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: call void @_ZN9CompleteSC1ERKS_(ptr %[[C_ADDR]], ptr %[[A_ADDR]]) + +// OGCG: define{{.*}} void @_Z11choose_exprv() +// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[C_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false) + +void generic_selection() { + CompleteS a; + CompleteS b; + int c; + CompleteS d = _Generic(c, int : a, default: b); +} + +// CIR: cir.func{{.*}} @_Z17generic_selectionv() +// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !s32i, !cir.ptr, ["c"] +// CIR: %[[D_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr, ["d", init] +// TODO(cir): Call to default copy constructor should be replaced by `cir.copy` op +// CIR: cir.call @_ZN9CompleteSC1ERKS_(%[[D_ADDR]], %[[A_ADDR]]) nothrow : (!cir.ptr, !cir.ptr) -> () + +// LLVM: define{{.*}} void @_Z17generic_selectionv() +// LLVM: %1 = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: %2 = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: %3 = alloca i32, i64 1, align 4 +// LLVM: %4 = alloca %struct.CompleteS, i64 1, align 4 +// LLVM: call void @_ZN9CompleteSC1ERKS_(ptr %4, ptr %1) + +// OGCG: define{{.*}} void @_Z17generic_selectionv() +// OGCG: %[[A_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca i32, align 4 +// OGCG: %[[D_ADDR:.*]] = alloca %struct.CompleteS, align 4 +// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D_ADDR]], ptr align 4 %[[A_ADDR]], i64 8, i1 false) diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp index 781286a94cc2e..eb38ee3083e5c 100644 --- a/clang/test/CIR/CodeGen/ternary.cpp +++ b/clang/test/CIR/CodeGen/ternary.cpp @@ -69,7 +69,7 @@ int foo(int a, int b) { // CIR: [[ALOAD2:%.+]] = cir.load align(4) [[A]] : !cir.ptr, !s32i // CIR: cir.yield [[ALOAD2]] : !s32i // CIR: }) : (!cir.bool) -> !s32i -// CIR: [[CAST:%.+]] = cir.cast(int_to_bool, [[TERNARY_RES]] : !s32i), !cir.bool +// CIR: [[CAST:%.+]] = cir.cast int_to_bool [[TERNARY_RES]] : !s32i -> !cir.bool // CIR: cir.if [[CAST]] { // CIR: [[ONE:%.+]] = cir.const #cir.int<1> : !s32i // CIR: [[MINUS_ONE:%.+]] = cir.unary(minus, [[ONE]]) nsw : !s32i, !s32i diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp index c37524bc8b2c9..ac1ae344c6b48 100644 --- a/clang/test/CIR/CodeGen/unary.cpp +++ b/clang/test/CIR/CodeGen/unary.cpp @@ -410,10 +410,10 @@ void chars(char c) { // CHECK: cir.func{{.*}} @_Z5charsc int c1 = +c; - // CHECK: %[[PROMO:.*]] = cir.cast(integral, %{{.+}} : !s8i), !s32i + // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i // CHECK: cir.unary(plus, %[[PROMO]]) : !s32i, !s32i int c2 = -c; - // CHECK: %[[PROMO:.*]] = cir.cast(integral, %{{.+}} : !s8i), !s32i + // CHECK: %[[PROMO:.*]] = cir.cast integral %{{.+}} : !s8i -> !s32i // CHECK: cir.unary(minus, %[[PROMO]]) nsw : !s32i, !s32i // Chars can go through some integer promotion codegen paths even when not promoted. @@ -431,9 +431,9 @@ _Float16 fp16UPlus(_Float16 f) { // CHECK: cir.func{{.*}} @_Z9fp16UPlusDF16_({{.*}}) -> !cir.f16 // CHECK: %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]] -// CHECK: %[[PROMOTED:.*]] = cir.cast(floating, %[[INPUT]] : !cir.f16), !cir.float +// CHECK: %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float // CHECK: %[[RESULT:.*]] = cir.unary(plus, %[[PROMOTED]]) -// CHECK: %[[UNPROMOTED:.*]] = cir.cast(floating, %[[RESULT]] : !cir.float), !cir.f16 +// CHECK: %[[UNPROMOTED:.*]] = cir.cast floating %[[RESULT]] : !cir.float -> !cir.f16 // LLVM: define{{.*}} half @_Z9fp16UPlusDF16_({{.*}}) // LLVM: %[[F_LOAD:.*]] = load half, ptr %{{.*}}, align 2 @@ -451,9 +451,9 @@ _Float16 fp16UMinus(_Float16 f) { // CHECK: cir.func{{.*}} @_Z10fp16UMinusDF16_({{.*}}) -> !cir.f16 // CHECK: %[[INPUT:.*]] = cir.load{{.*}} %[[F:.*]] -// CHECK: %[[PROMOTED:.*]] = cir.cast(floating, %[[INPUT]] : !cir.f16), !cir.float +// CHECK: %[[PROMOTED:.*]] = cir.cast floating %[[INPUT]] : !cir.f16 -> !cir.float // CHECK: %[[RESULT:.*]] = cir.unary(minus, %[[PROMOTED]]) -// CHECK: %[[UNPROMOTED:.*]] = cir.cast(floating, %[[RESULT]] : !cir.float), !cir.f16 +// CHECK: %[[UNPROMOTED:.*]] = cir.cast floating %[[RESULT]] : !cir.float -> !cir.f16 // LLVM: define{{.*}} half @_Z10fp16UMinusDF16_({{.*}}) // LLVM: %[[F_LOAD:.*]] = load half, ptr %{{.*}}, align 2 @@ -482,24 +482,24 @@ void test_logical_not() { // CHECK: cir.func{{.*}} @_Z16test_logical_notv() // CHECK: %[[A:.*]] = cir.load{{.*}} %[[A_ADDR:.*]] : !cir.ptr, !s32i -// CHECK: %[[A_BOOL:.*]] = cir.cast(int_to_bool, %[[A]] : !s32i), !cir.bool +// CHECK: %[[A_BOOL:.*]] = cir.cast int_to_bool %[[A]] : !s32i -> !cir.bool // CHECK: %[[A_NOT:.*]] = cir.unary(not, %[[A_BOOL]]) : !cir.bool, !cir.bool -// CHECK: %[[A_CAST:.*]] = cir.cast(bool_to_int, %[[A_NOT]] : !cir.bool), !s32i +// CHECK: %[[A_CAST:.*]] = cir.cast bool_to_int %[[A_NOT]] : !cir.bool -> !s32i // CHECK: cir.store{{.*}} %[[A_CAST]], %[[A_ADDR]] : !s32i, !cir.ptr // CHECK: %[[B:.*]] = cir.load{{.*}} %[[B_ADDR:.*]] : !cir.ptr, !cir.bool // CHECK: %[[B_NOT:.*]] = cir.unary(not, %[[B]]) : !cir.bool, !cir.bool // CHECK: cir.store{{.*}} %[[B_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr // CHECK: %[[C:.*]] = cir.load{{.*}} %[[C_ADDR:.*]] : !cir.ptr, !cir.float -// CHECK: %[[C_BOOL:.*]] = cir.cast(float_to_bool, %[[C]] : !cir.float), !cir.bool +// CHECK: %[[C_BOOL:.*]] = cir.cast float_to_bool %[[C]] : !cir.float -> !cir.bool // CHECK: %[[C_NOT:.*]] = cir.unary(not, %[[C_BOOL]]) : !cir.bool, !cir.bool -// CHECK: %[[C_CAST:.*]] = cir.cast(bool_to_float, %[[C_NOT]] : !cir.bool), !cir.float +// CHECK: %[[C_CAST:.*]] = cir.cast bool_to_float %[[C_NOT]] : !cir.bool -> !cir.float // CHECK: cir.store{{.*}} %[[C_CAST]], %[[C_ADDR]] : !cir.float, !cir.ptr // CHECK: %[[P:.*]] = cir.load{{.*}} %[[P_ADDR:.*]] : !cir.ptr>, !cir.ptr -// CHECK: %[[P_BOOL:.*]] = cir.cast(ptr_to_bool, %[[P]] : !cir.ptr), !cir.bool +// CHECK: %[[P_BOOL:.*]] = cir.cast ptr_to_bool %[[P]] : !cir.ptr -> !cir.bool // CHECK: %[[P_NOT:.*]] = cir.unary(not, %[[P_BOOL]]) : !cir.bool, !cir.bool // CHECK: cir.store{{.*}} %[[P_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr // CHECK: %[[D:.*]] = cir.load{{.*}} %[[D_ADDR:.*]] : !cir.ptr, !cir.double -// CHECK: %[[D_BOOL:.*]] = cir.cast(float_to_bool, %[[D]] : !cir.double), !cir.bool +// CHECK: %[[D_BOOL:.*]] = cir.cast float_to_bool %[[D]] : !cir.double -> !cir.bool // CHECK: %[[D_NOT:.*]] = cir.unary(not, %[[D_BOOL]]) : !cir.bool, !cir.bool // CHECK: cir.store{{.*}} %[[D_NOT]], %[[B_ADDR]] : !cir.bool, !cir.ptr @@ -566,10 +566,10 @@ void f16NestedUPlus() { // CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["a"] // CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] // CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !cir.f16 -// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float +// CHECK: %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float // CHECK: %[[A_PLUS:.*]] = cir.unary(plus, %[[A_F32]]) : !cir.float, !cir.float // CHECK: %[[RESULT_F32:.*]] = cir.unary(plus, %[[A_PLUS]]) : !cir.float, !cir.float -// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16 +// CHECK: %[[RESULT:.*]] = cir.cast floating %[[RESULT_F32]] : !cir.float -> !cir.f16 // CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr // LLVM: define{{.*}} void @_Z14f16NestedUPlusv() @@ -597,10 +597,10 @@ void f16NestedUMinus() { // CHECK: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["a"] // CHECK: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] // CHECK: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr, !cir.f16 -// CHECK: %[[A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float +// CHECK: %[[A_F32:.*]] = cir.cast floating %[[TMP_A]] : !cir.f16 -> !cir.float // CHECK: %[[A_MINUS:.*]] = cir.unary(minus, %[[A_F32]]) : !cir.float, !cir.float // CHECK: %[[RESULT_F32:.*]] = cir.unary(minus, %[[A_MINUS]]) : !cir.float, !cir.float -// CHECK: %[[RESULT:.*]] = cir.cast(floating, %[[RESULT_F32]] : !cir.float), !cir.f16 +// CHECK: %[[RESULT:.*]] = cir.cast floating %[[RESULT_F32]] : !cir.float -> !cir.f16 // CHECK: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.f16, !cir.ptr // LLVM: define{{.*}} void @_Z15f16NestedUMinusv() diff --git a/clang/test/CIR/CodeGen/union.c b/clang/test/CIR/CodeGen/union.c index 23e862b24517d..bda8e77b89048 100644 --- a/clang/test/CIR/CodeGen/union.c +++ b/clang/test/CIR/CodeGen/union.c @@ -116,7 +116,7 @@ void shouldGenerateUnionAccess(union U2 u) { // CIR-NEXT: %[[U:.*]] = cir.alloca !rec_U2, !cir.ptr, ["u", init] {alignment = 8 : i64} // CIR-NEXT: cir.store{{.*}} %[[ARG]], %[[U]] : !rec_U2, !cir.ptr // CIR-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast(integral, %[[ZERO]] : !s32i), !s8i +// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast integral %[[ZERO]] : !s32i -> !s8i // CIR-NEXT: %[[B_PTR:.*]] = cir.get_member %[[U]][0] {name = "b"} : !cir.ptr -> !cir.ptr // CIR-NEXT: cir.store{{.*}} %[[ZERO_CHAR]], %[[B_PTR]] : !s8i, !cir.ptr // CIR-NEXT: %[[B_PTR2:.*]] = cir.get_member %[[U]][0] {name = "b"} : !cir.ptr -> !cir.ptr @@ -174,10 +174,10 @@ void f3(union U3 u) { // CIR-NEXT: %[[U:.*]] = cir.alloca !rec_U3, !cir.ptr, ["u", init] {alignment = 1 : i64} // CIR-NEXT: cir.store{{.*}} %[[ARG]], %[[U]] : !rec_U3, !cir.ptr // CIR-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast(integral, %[[ZERO]] : !s32i), !s8i +// CIR-NEXT: %[[ZERO_CHAR:.*]] = cir.cast integral %[[ZERO]] : !s32i -> !s8i // CIR-NEXT: %[[IDX:.*]] = cir.const #cir.int<2> : !s32i // CIR-NEXT: %[[C_PTR:.*]] = cir.get_member %[[U]][0] {name = "c"} : !cir.ptr -> !cir.ptr> -// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[C_PTR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast array_to_ptrdecay %[[C_PTR]] : !cir.ptr> -> !cir.ptr // CIR-NEXT: %[[ELEM_PTR:.*]] = cir.ptr_stride(%[[C_DECAY]] : !cir.ptr, %[[IDX]] : !s32i), !cir.ptr // CIR-NEXT: cir.store{{.*}} %[[ZERO_CHAR]], %[[ELEM_PTR]] : !s8i, !cir.ptr // CIR-NEXT: cir.return @@ -206,10 +206,10 @@ void f5(union U4 u) { // CIR-NEXT: %[[U:.*]] = cir.alloca !rec_U4, !cir.ptr, ["u", init] {alignment = 4 : i64} // CIR-NEXT: cir.store{{.*}} %[[ARG]], %[[U]] : !rec_U4, !cir.ptr // CIR-NEXT: %[[CHAR_VAL:.*]] = cir.const #cir.int<65> : !s32i -// CIR-NEXT: %[[CHAR_CAST:.*]] = cir.cast(integral, %[[CHAR_VAL]] : !s32i), !s8i +// CIR-NEXT: %[[CHAR_CAST:.*]] = cir.cast integral %[[CHAR_VAL]] : !s32i -> !s8i // CIR-NEXT: %[[IDX:.*]] = cir.const #cir.int<4> : !s32i // CIR-NEXT: %[[C_PTR:.*]] = cir.get_member %[[U]][0] {name = "c"} : !cir.ptr -> !cir.ptr> -// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[C_PTR]] : !cir.ptr>), !cir.ptr +// CIR-NEXT: %[[C_DECAY:.*]] = cir.cast array_to_ptrdecay %[[C_PTR]] : !cir.ptr> -> !cir.ptr // CIR-NEXT: %[[ELEM_PTR:.*]] = cir.ptr_stride(%[[C_DECAY]] : !cir.ptr, %[[IDX]] : !s32i), !cir.ptr // CIR-NEXT: cir.store{{.*}} %[[CHAR_CAST]], %[[ELEM_PTR]] : !s8i, !cir.ptr // CIR-NEXT: cir.return diff --git a/clang/test/CIR/CodeGen/var_arg.c b/clang/test/CIR/CodeGen/var_arg.c index e9c4acb15d009..f5b92c61e11ad 100644 --- a/clang/test/CIR/CodeGen/var_arg.c +++ b/clang/test/CIR/CodeGen/var_arg.c @@ -23,13 +23,13 @@ int varargs(int count, ...) { // CIR: %[[VAAREA:.+]] = cir.alloca !cir.array, !cir.ptr>, ["args"] // CIR: %[[RES_ADDR:.+]] = cir.alloca !s32i, !cir.ptr, ["res", init] // CIR: cir.store %arg0, %[[COUNT_ADDR]] : !s32i, !cir.ptr -// CIR: %[[VA_PTR0:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr>), !cir.ptr +// CIR: %[[VA_PTR0:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr> -> !cir.ptr // CIR: %[[COUNT_VAL:.+]] = cir.load{{.*}} %[[COUNT_ADDR]] : !cir.ptr, !s32i // CIR: cir.va_start %[[VA_PTR0]] %[[COUNT_VAL]] : !cir.ptr, !s32i -// CIR: %[[VA_PTR1:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr>), !cir.ptr +// CIR: %[[VA_PTR1:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr> -> !cir.ptr // CIR: %[[VA_ARG:.+]] = cir.va_arg %[[VA_PTR1]] : (!cir.ptr) -> !s32i // CIR: cir.store{{.*}} %[[VA_ARG]], %[[RES_ADDR]] : !s32i, !cir.ptr -// CIR: %[[VA_PTR2:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr>), !cir.ptr +// CIR: %[[VA_PTR2:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr> -> !cir.ptr // CIR: cir.va_end %[[VA_PTR2]] : !cir.ptr // CIR: %[[RESULT:.+]] = cir.load{{.*}} %[[RES_ADDR]] : !cir.ptr, !s32i // CIR: cir.store %[[RESULT]], %[[RET_ADDR]] : !s32i, !cir.ptr @@ -99,13 +99,13 @@ int stdarg_start(int count, ...) { // CIR: %[[VAAREA:.+]] = cir.alloca !cir.array, !cir.ptr>, ["args"] // CIR: %[[RES_ADDR:.+]] = cir.alloca !s32i, !cir.ptr, ["res", init] // CIR: cir.store %arg0, %[[COUNT_ADDR]] : !s32i, !cir.ptr -// CIR: %[[VA_PTR0:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr>), !cir.ptr +// CIR: %[[VA_PTR0:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr> -> !cir.ptr // CIR: %[[C12345:.+]] = cir.const #cir.int<12345> : !s32i // CIR: cir.va_start %[[VA_PTR0]] %[[C12345]] : !cir.ptr, !s32i -// CIR: %[[VA_PTR1:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr>), !cir.ptr +// CIR: %[[VA_PTR1:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr> -> !cir.ptr // CIR: %[[VA_ARG:.+]] = cir.va_arg %[[VA_PTR1]] : (!cir.ptr) -> !s32i // CIR: cir.store{{.*}} %[[VA_ARG]], %[[RES_ADDR]] : !s32i, !cir.ptr -// CIR: %[[VA_PTR2:.+]] = cir.cast(array_to_ptrdecay, %[[VAAREA]] : !cir.ptr>), !cir.ptr +// CIR: %[[VA_PTR2:.+]] = cir.cast array_to_ptrdecay %[[VAAREA]] : !cir.ptr> -> !cir.ptr // CIR: cir.va_end %[[VA_PTR2]] : !cir.ptr // CIR: %[[RESULT:.+]] = cir.load{{.*}} %[[RES_ADDR]] : !cir.ptr, !s32i // CIR: cir.store %[[RESULT]], %[[RET_ADDR]] : !s32i, !cir.ptr diff --git a/clang/test/CIR/CodeGen/variable-decomposition.cpp b/clang/test/CIR/CodeGen/variable-decomposition.cpp index 40dfe73c411c9..ba59109ab625f 100644 --- a/clang/test/CIR/CodeGen/variable-decomposition.cpp +++ b/clang/test/CIR/CodeGen/variable-decomposition.cpp @@ -27,7 +27,7 @@ float function() { // CIR: cir.store{{.*}} %[[TWO_FP]], %[[MEMBER_B]] // CIR: %[[MEMBER_A:.+]] = cir.get_member %[[STRUCT]][0] {name = "a"} : !cir.ptr -> !cir.ptr // CIR: %[[LOAD_A:.+]] = cir.load align(4) %[[MEMBER_A]] : !cir.ptr, !s32i -// CIR: %[[CAST_A:.+]] = cir.cast(int_to_float, %[[LOAD_A]] : !s32i), !cir.float +// CIR: %[[CAST_A:.+]] = cir.cast int_to_float %[[LOAD_A]] : !s32i -> !cir.float // CIR: %[[MEMBER_B:.+]] = cir.get_member %[[STRUCT]][1] {name = "b"} : !cir.ptr -> !cir.ptr // CIR: %[[LOAD_B:.+]] = cir.load align(4) %[[MEMBER_B]] : !cir.ptr, !cir.float // CIR: %[[ADD:.+]] = cir.binop(add, %[[CAST_A]], %[[LOAD_B]]) : !cir.float diff --git a/clang/test/CIR/CodeGen/vbase.cpp b/clang/test/CIR/CodeGen/vbase.cpp index 91396518a40b0..86469c5d6ae7d 100644 --- a/clang/test/CIR/CodeGen/vbase.cpp +++ b/clang/test/CIR/CodeGen/vbase.cpp @@ -13,19 +13,29 @@ class Base { class Derived : public virtual Base {}; -// This is just here to force the record types to be emitted. void f() { Derived d; + d.f(); +} + +class DerivedFinal final : public virtual Base {}; + +void g() { + DerivedFinal df; + df.f(); } // CIR: !rec_Base = !cir.record // CIR: !rec_Derived = !cir.record +// CIR: !rec_DerivedFinal = !cir.record // LLVM: %class.Derived = type { %class.Base } // LLVM: %class.Base = type { ptr } +// LLVM: %class.DerivedFinal = type { %class.Base } // OGCG: %class.Derived = type { %class.Base } // OGCG: %class.Base = type { ptr } +// OGCG: %class.DerivedFinal = type { %class.Base } // Test the constructor handling for a class with a virtual base. struct A { @@ -47,6 +57,76 @@ void ppp() { B b; } // OGCG: @_ZTV1B = linkonce_odr unnamed_addr constant { [3 x ptr] } { [3 x ptr] [ptr inttoptr (i64 12 to ptr), ptr null, ptr @_ZTI1B] }, comdat, align 8 +// CIR: cir.func {{.*}}@_Z1fv() { +// CIR: %[[D:.+]] = cir.alloca !rec_Derived, !cir.ptr, ["d", init] +// CIR: cir.call @_ZN7DerivedC1Ev(%[[D]]) nothrow : (!cir.ptr) -> () +// CIR: %[[VPTR_PTR:.+]] = cir.vtable.get_vptr %[[D]] : !cir.ptr -> !cir.ptr +// CIR: %[[VPTR:.+]] = cir.load {{.*}} %[[VPTR_PTR]] : !cir.ptr, !cir.vptr +// CIR: %[[VPTR_I8:.+]] = cir.cast bitcast %[[VPTR]] : !cir.vptr -> !cir.ptr +// CIR: %[[NEG32:.+]] = cir.const #cir.int<-32> : !s64i +// CIR: %[[ADJ_VPTR_I8:.+]] = cir.ptr_stride(%[[VPTR_I8]] : !cir.ptr, %[[NEG32]] : !s64i), !cir.ptr +// CIR: %[[OFFSET_PTR:.+]] = cir.cast bitcast %[[ADJ_VPTR_I8]] : !cir.ptr -> !cir.ptr +// CIR: %[[OFFSET:.+]] = cir.load {{.*}} %[[OFFSET_PTR]] : !cir.ptr, !s64i +// CIR: %[[D_I8:.+]] = cir.cast bitcast %[[D]] : !cir.ptr -> !cir.ptr +// CIR: %[[ADJ_THIS_I8:.+]] = cir.ptr_stride(%[[D_I8]] : !cir.ptr, %[[OFFSET]] : !s64i), !cir.ptr +// CIR: %[[ADJ_THIS_D:.+]] = cir.cast bitcast %[[ADJ_THIS_I8]] : !cir.ptr -> !cir.ptr +// CIR: %[[BASE_THIS:.+]] = cir.cast bitcast %[[ADJ_THIS_D]] : !cir.ptr -> !cir.ptr +// CIR: %[[BASE_VPTR_PTR:.+]] = cir.vtable.get_vptr %[[BASE_THIS]] : !cir.ptr -> !cir.ptr +// CIR: %[[BASE_VPTR:.+]] = cir.load {{.*}} %[[BASE_VPTR_PTR]] : !cir.ptr, !cir.vptr +// CIR: %[[SLOT_PTR:.+]] = cir.vtable.get_virtual_fn_addr %[[BASE_VPTR]][0] : !cir.vptr -> !cir.ptr)>>> +// CIR: %[[FN:.+]] = cir.load {{.*}} %[[SLOT_PTR]] : !cir.ptr)>>>, !cir.ptr)>> +// CIR: cir.call %[[FN]](%[[BASE_THIS]]) : (!cir.ptr)>>, !cir.ptr) -> () +// CIR: cir.return + +// CIR: cir.func {{.*}}@_Z1gv() { +// CIR: %[[DF:.+]] = cir.alloca !rec_DerivedFinal, !cir.ptr, ["df", init] +// CIR: cir.call @_ZN12DerivedFinalC1Ev(%[[DF]]) nothrow : (!cir.ptr) -> () +// CIR: %[[BASE_THIS_2:.+]] = cir.base_class_addr %[[DF]] : !cir.ptr nonnull [0] -> !cir.ptr +// CIR: %[[BASE_VPTR_PTR_2:.+]] = cir.vtable.get_vptr %[[BASE_THIS_2]] : !cir.ptr -> !cir.ptr +// CIR: %[[BASE_VPTR_2:.+]] = cir.load {{.*}} %[[BASE_VPTR_PTR_2]] : !cir.ptr, !cir.vptr +// CIR: %[[SLOT_PTR_2:.+]] = cir.vtable.get_virtual_fn_addr %[[BASE_VPTR_2]][0] : !cir.vptr -> !cir.ptr)>>> +// CIR: %[[FN_2:.+]] = cir.load {{.*}} %[[SLOT_PTR_2]] : !cir.ptr)>>>, !cir.ptr)>> +// CIR: cir.call %[[FN_2]](%[[BASE_THIS_2]]) : (!cir.ptr)>>, !cir.ptr) -> () +// CIR: cir.return + +// LLVM: define {{.*}}void @_Z1fv() +// LLVM: %[[D:.+]] = alloca {{.*}} +// LLVM: call void @_ZN7DerivedC1Ev(ptr %[[D]]) +// LLVM: %[[VPTR_ADDR:.+]] = load ptr, ptr %[[D]] +// LLVM: %[[NEG32_PTR:.+]] = getelementptr i8, ptr %[[VPTR_ADDR]], i64 -32 +// LLVM: %[[OFF:.+]] = load i64, ptr %[[NEG32_PTR]] +// LLVM: %[[ADJ_THIS:.+]] = getelementptr i8, ptr %[[D]], i64 %[[OFF]] +// LLVM: %[[VFN_TAB:.+]] = load ptr, ptr %[[ADJ_THIS]] +// LLVM: %[[SLOT0:.+]] = getelementptr inbounds ptr, ptr %[[VFN_TAB]], i32 0 +// LLVM: %[[VFN:.+]] = load ptr, ptr %[[SLOT0]] +// LLVM: call void %[[VFN]](ptr %[[ADJ_THIS]]) +// LLVM: ret void + +// LLVM: define {{.*}}void @_Z1gv() +// LLVM: %[[DF:.+]] = alloca {{.*}} +// LLVM: call void @_ZN12DerivedFinalC1Ev(ptr %[[DF]]) +// LLVM: %[[VPTR2:.+]] = load ptr, ptr %[[DF]] +// LLVM: %[[SLOT0_2:.+]] = getelementptr inbounds ptr, ptr %[[VPTR2]], i32 0 +// LLVM: %[[VFN2:.+]] = load ptr, ptr %[[SLOT0_2]] +// LLVM: call void %[[VFN2]](ptr %[[DF]]) +// LLVM: ret void + +// OGCG: define {{.*}}void @_Z1fv() +// OGCG: %[[D:.+]] = alloca {{.*}} +// OGCG: call void @_ZN7DerivedC1Ev(ptr {{.*}} %[[D]]) +// OGCG: %[[VTABLE:.+]] = load ptr, ptr %[[D]] +// OGCG: %[[NEG32_PTR:.+]] = getelementptr i8, ptr %[[VTABLE]], i64 -32 +// OGCG: %[[OFF:.+]] = load i64, ptr %[[NEG32_PTR]] +// OGCG: %[[ADJ_THIS:.+]] = getelementptr inbounds i8, ptr %[[D]], i64 %[[OFF]] +// OGCG: call void @_ZN4Base1fEv(ptr {{.*}} %[[ADJ_THIS]]) +// OGCG: ret void + +// OGCG: define {{.*}}void @_Z1gv() +// OGCG: %[[DF:.+]] = alloca {{.*}} +// OGCG: call void @_ZN12DerivedFinalC1Ev(ptr {{.*}} %[[DF]]) +// OGCG: call void @_ZN4Base1fEv(ptr {{.*}} %[[DF]]) +// OGCG: ret void + // Constructor for B // CIR: cir.func comdat linkonce_odr @_ZN1BC1Ev(%arg0: !cir.ptr // CIR: %[[THIS_ADDR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["this", init] diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp index 287d016ff6d1e..2fd493f87c1ee 100644 --- a/clang/test/CIR/CodeGen/vector-ext.cpp +++ b/clang/test/CIR/CodeGen/vector-ext.cpp @@ -1048,7 +1048,7 @@ void foo17() { // CIR: %[[VEC_A:.*]] = cir.alloca !cir.vector<2 x !cir.double>, !cir.ptr>, ["a"] // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[VEC_A]] : !cir.ptr>, !cir.vector<2 x !cir.double> -// CIR: %[[RES:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.vector<2 x !cir.double>), !cir.vector<2 x !u16i> +// CIR: %[[RES:.*]] = cir.cast float_to_int %[[TMP]] : !cir.vector<2 x !cir.double> -> !cir.vector<2 x !u16i> // LLVM: %[[VEC_A:.*]] = alloca <2 x double>, i64 1, align 16 // LLVM: %[[TMP:.*]] = load <2 x double>, ptr %[[VEC_A]], align 16 @@ -1228,11 +1228,11 @@ void foo24() { // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr>, ["b"] // CIR: %[[C_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr>, ["c", init] // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !cir.f16> -// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float> +// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[TMP_A]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float> // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.vector<4 x !cir.f16> -// CIR: %[[TMP_B_F16:.*]] = cir.cast(floating, %[[TMP_B]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float> +// CIR: %[[TMP_B_F16:.*]] = cir.cast floating %[[TMP_B]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float> // CIR: %[[RESULT:.*]] = cir.binop(add, %[[TMP_A_F16]], %[[TMP_B_F16]]) : !cir.vector<4 x !cir.float> -// CIR: %[[RESULT_VF16:.*]] = cir.cast(floating, %[[RESULT]] : !cir.vector<4 x !cir.float>), !cir.vector<4 x !cir.f16> +// CIR: %[[RESULT_VF16:.*]] = cir.cast floating %[[RESULT]] : !cir.vector<4 x !cir.float> -> !cir.vector<4 x !cir.f16> // CIR: cir.store{{.*}} %[[RESULT_VF16]], %[[C_ADDR]] : !cir.vector<4 x !cir.f16>, !cir.ptr> // LLVM: %[[A_ADDR:.*]] = alloca <4 x half>, i64 1, align 8 @@ -1295,4 +1295,50 @@ void foo23() { // OGCG: %[[NE_B_ZERO:.*]] = icmp ne <4 x i32> %[[TMP_B]], zeroinitializer // OGCG: %[[VEC_OR:.*]] = and <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]] // OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32> -// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 \ No newline at end of file +// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 + +void logical_not() { + vi4 a; + vi4 b = !a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}}) %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !s32i> +// CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// LLVM: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// OGCG: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +void unary_extension() { + vi4 a; + vi4 b = __extension__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// LLVM: store <4 x i32> %[[TMP_A]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// OGCG: store <4 x i32> %[[TMP_A]], ptr %[[B_ADDR]], align 16 diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index d66c7a3d2aba6..86551d277fa71 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -1035,7 +1035,7 @@ void foo17() { // CIR: %[[VEC_A:.*]] = cir.alloca !cir.vector<2 x !cir.double>, !cir.ptr>, ["a"] // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[VEC_A]] : !cir.ptr>, !cir.vector<2 x !cir.double> -// CIR: %[[RES:.*]] = cir.cast(float_to_int, %[[TMP]] : !cir.vector<2 x !cir.double>), !cir.vector<2 x !u16i> +// CIR: %[[RES:.*]] = cir.cast float_to_int %[[TMP]] : !cir.vector<2 x !cir.double> -> !cir.vector<2 x !u16i> // LLVM: %[[VEC_A:.*]] = alloca <2 x double>, i64 1, align 16 // LLVM: %[[TMP:.*]] = load <2 x double>, ptr %[[VEC_A]], align 16 @@ -1270,11 +1270,11 @@ void foo27() { // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr>, ["b"] // CIR: %[[C_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.f16>, !cir.ptr>, ["c", init] // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !cir.f16> -// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float> +// CIR: %[[TMP_A_F16:.*]] = cir.cast floating %[[TMP_A]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float> // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr>, !cir.vector<4 x !cir.f16> -// CIR: %[[TMP_B_F16:.*]] = cir.cast(floating, %[[TMP_B]] : !cir.vector<4 x !cir.f16>), !cir.vector<4 x !cir.float> +// CIR: %[[TMP_B_F16:.*]] = cir.cast floating %[[TMP_B]] : !cir.vector<4 x !cir.f16> -> !cir.vector<4 x !cir.float> // CIR: %[[RESULT:.*]] = cir.binop(add, %[[TMP_A_F16]], %[[TMP_B_F16]]) : !cir.vector<4 x !cir.float> -// CIR: %[[RESULT_VF16:.*]] = cir.cast(floating, %[[RESULT]] : !cir.vector<4 x !cir.float>), !cir.vector<4 x !cir.f16> +// CIR: %[[RESULT_VF16:.*]] = cir.cast floating %[[RESULT]] : !cir.vector<4 x !cir.float> -> !cir.vector<4 x !cir.f16> // CIR: cir.store{{.*}} %[[RESULT_VF16]], %[[C_ADDR]] : !cir.vector<4 x !cir.f16>, !cir.ptr> // LLVM: %[[A_ADDR:.*]] = alloca <4 x half>, i64 1, align 8 @@ -1337,4 +1337,76 @@ void foo26() { // OGCG: %[[NE_B_ZERO:.*]] = icmp ne <4 x i32> %[[TMP_B]], zeroinitializer // OGCG: %[[VEC_OR:.*]] = and <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]] // OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32> -// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 \ No newline at end of file +// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 + +void logical_not() { + vi4 a; + vi4 b = !a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}}) %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !s32i> +// CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// LLVM: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// OGCG: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +void logical_not_float() { + vf4 a; + vi4 b = !a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.float>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !cir.float> +// CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !cir.float> +// CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !cir.float>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x float>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x float>, ptr %[[A_ADDR]], align 16 +// LLVM: %[[RESULT:.*]] = fcmp oeq <4 x float> %[[TMP_A]], zeroinitializer +// LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x float>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[A_ADDR]], align 16 +// OGCG: %[[RESULT:.*]] = fcmp oeq <4 x float> %[[TMP_A]], zeroinitializer +// OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +void unary_extension() { + vi4 a; + vi4 b = __extension__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// LLVM: store <4 x i32> %[[TMP_A]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// OGCG: store <4 x i32> %[[TMP_A]], ptr %[[B_ADDR]], align 16 diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index 9d88acef91eef..f47da41e5b200 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -1,9 +1,16 @@ // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -fclangir -emit-cir %s -o %t.cir -// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: FileCheck --check-prefixes=CIR-NO-RTTI,CIR-COMMON --input-file=%t.cir %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -fclangir -emit-llvm %s -o %t-cir.ll -// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s +// RUN: FileCheck --check-prefixes=LLVM-NO-RTTI,LLVM-COMMON --input-file=%t-cir.ll %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fno-rtti -emit-llvm %s -o %t.ll -// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s +// RUN: FileCheck --check-prefixes=OGCG-NO-RTTI,OGCG-COMMON --input-file=%t.ll %s + +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --check-prefixes=CIR-RTTI,CIR-COMMON --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --check-prefixes=LLVM-RTTI,LLVM-COMMON --input-file=%t-cir.ll %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll +// RUN: FileCheck --check-prefixes=OGCG-RTTI,OGCG-COMMON --input-file=%t.ll %s // Note: This test will be expanded to verify VTT emission and VTT implicit // argument handling. For now, it's just test the record layout. @@ -39,449 +46,511 @@ void f(D *d) {} // Trigger vtable and VTT emission for D. void D::y() {} -// CIR: !rec_A2Ebase = !cir.record -// CIR: !rec_B2Ebase = !cir.record -// CIR: !rec_C2Ebase = !cir.record -// CIR: !rec_A = !cir.record}> -// CIR: !rec_B = !cir.record, !rec_A2Ebase, !cir.array}> -// CIR: !rec_C = !cir.record -// CIR: !rec_D = !cir.record +// CIR-COMMON: !rec_A2Ebase = !cir.record +// CIR-COMMON: !rec_B2Ebase = !cir.record +// CIR-COMMON: !rec_C2Ebase = !cir.record +// CIR-COMMON: !rec_A = !cir.record}> +// CIR-COMMON: !rec_B = !cir.record, !rec_A2Ebase, !cir.array}> +// CIR-COMMON: !rec_C = !cir.record +// CIR-COMMON: !rec_D = !cir.record -// CIR: !rec_anon_struct = !cir.record x 5>, !cir.array x 4>, !cir.array x 4>}> -// CIR: !rec_anon_struct1 = !cir.record x 4>, !cir.array x 4>}> +// CIR-RTTI: ![[REC_TYPE_INFO_VTABLE:.*]]= !cir.record, !cir.ptr, !u32i, !u32i, !cir.ptr, !s64i, !cir.ptr, !s64i}> +// CIR-COMMON: ![[REC_D_VTABLE:.*]] = !cir.record x 5>, !cir.array x 4>, !cir.array x 4>}> +// CIR-COMMON: ![[REC_B_OR_C_IN_D_VTABLE:.*]]= !cir.record x 4>, !cir.array x 4>}> // Vtable for D -// CIR: cir.global{{.*}} @_ZTV1D = #cir.vtable<{ -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1D1yEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 5>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<24 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr<-16 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr<-40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4> -// CIR-SAME: }> : !rec_anon_struct {alignment = 8 : i64} - -// LLVM: @_ZTV1D = global { [5 x ptr], [4 x ptr], [4 x ptr] } { -// LLVM-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], -// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], -// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// LLVM-SAME: }, align 8 - -// OGCG: @_ZTV1D = unnamed_addr constant { [5 x ptr], [4 x ptr], [4 x ptr] } { -// OGCG-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], -// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], -// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// OGCG-SAME: }, align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTV1D = #cir.vtable<{ +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<40 : i64> : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1D1yEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 5>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<24 : i64> : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-16 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-40 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1D> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4> +// CIR-COMMON-SAME: }> : ![[REC_D_VTABLE]] {alignment = 8 : i64} + +// LLVM-COMMON: @_ZTV1D = global { [5 x ptr], [4 x ptr], [4 x ptr] } { +// LLVM-NO-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// LLVM-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], +// LLVM-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// LLVM-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1D, ptr @_ZN1A1vEv] +// LLVM-COMMON-SAME: }, align 8 + +// OGCG-COMMON: @_ZTV1D = unnamed_addr constant { [5 x ptr], [4 x ptr], [4 x ptr] } { +// OGCG-NO-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// OGCG-RTTI-SAME: [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr null, ptr @_ZN1C1xEv], +// OGCG-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// OGCG-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1D, ptr @_ZN1A1vEv] +// OGCG-COMMON-SAME: }, align 8 // VTT for D -// CIR: cir.global{{.*}} @_ZTT1D = #cir.const_array<[ -// CIR-SAME: #cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 7> {alignment = 8 : i64} - -// LLVM: @_ZTT1D = global [7 x ptr] [ -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 24), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 56), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 24), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 56), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), -// LLVM-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64) -// LLVM-SAME: ], align 8 - -// OGCG: @_ZTT1D = unnamed_addr constant [7 x ptr] [ -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 0, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 1, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 0, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 1, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), -// OGCG-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3) -// OGCG-SAME: ], align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTT1D = #cir.const_array<[ +// CIR-COMMON-SAME: #cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 7> {alignment = 8 : i64} + +// LLVM-COMMON: @_ZTT1D = global [7 x ptr] [ +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 24), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 56), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 24), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 56), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), +// LLVM-COMMON-SAME: ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64) +// LLVM-COMMON-SAME: ], align 8 + +// OGCG-COMMON: @_ZTT1D = unnamed_addr constant [7 x ptr] [ +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 0, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 1, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 0, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 1, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), +// OGCG-COMMON-SAME: ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3) +// OGCG-COMMON-SAME: ], align 8 // Construction vtable for B-in-D -// CIR: cir.global{{.*}} @_ZTC1D0_1B = #cir.vtable<{ -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr<-40 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4> -// CIR-SAME: }> : !rec_anon_struct1 {alignment = 8 : i64} - -// LLVM: @_ZTC1D0_1B = global { [4 x ptr], [4 x ptr] } { -// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], -// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// LLVM-SAME: }, align 8 - -// OGCG: @_ZTC1D0_1B = unnamed_addr constant { [4 x ptr], [4 x ptr] } { -// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], -// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] -// OGCG-SAME: }, align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTC1D0_1B = #cir.vtable<{ +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<40 : i64> : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1B1wEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-40 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4> +// CIR-COMMON-SAME: }> : ![[REC_B_OR_C_IN_D_VTABLE]] + +// LLVM-COMMON: @_ZTC1D0_1B = global { [4 x ptr], [4 x ptr] } { +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], +// LLVM-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1B, ptr @_ZN1B1wEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// LLVM-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1B, ptr @_ZN1A1vEv] +// LLVM-COMMON-SAME: }, align 8 + +// OGCG-COMMON: @_ZTC1D0_1B = unnamed_addr constant { [4 x ptr], [4 x ptr] } { +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr null, ptr @_ZN1B1wEv], +// OGCG-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr @_ZTI1B, ptr @_ZN1B1wEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr null, ptr @_ZN1A1vEv] +// OGCG-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr @_ZTI1B, ptr @_ZN1A1vEv] +// OGCG-COMMON-SAME: }, align 8 + +// CIR-RTTI: cir.global{{.*}} @_ZTI1B : !cir.ptr + +// LLVM-RTTI: @_ZTI1B = external global ptr + +// OGCG-RTTI: @_ZTI1B = external constant ptr // Construction vtable for C-in-D -// CIR: cir.global{{.*}} @_ZTC1D16_1C = #cir.vtable<{ -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr<24 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4>, -// CIR-SAME: #cir.const_array<[ -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.ptr<-24 : i64> : !cir.ptr, -// CIR-SAME: #cir.ptr : !cir.ptr, -// CIR-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr -// CIR-SAME: ]> : !cir.array x 4> -// CIR-SAME: }> : !rec_anon_struct1 {alignment = 8 : i64} - -// LLVM: @_ZTC1D16_1C = global { [4 x ptr], [4 x ptr] } { -// LLVM-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], -// LLVM-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] -// LLVM-SAME: }, align 8 - -// OGCG: @_ZTC1D16_1C = unnamed_addr constant { [4 x ptr], [4 x ptr] } { -// OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], -// OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] -// OGCG-SAME: }, align 8 + +// CIR-COMMON: cir.global{{.*}} @_ZTC1D16_1C = #cir.vtable<{ +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr<24 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1C1xEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>, +// CIR-COMMON-SAME: #cir.const_array<[ +// CIR-COMMON-SAME: #cir.ptr : !cir.ptr, +// CIR-COMMON-SAME: #cir.ptr<-24 : i64> : !cir.ptr, +// CIR-NO-RTTI-SAME: #cir.ptr : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr, +// CIR-COMMON-SAME: #cir.global_view<@_ZN1A1vEv> : !cir.ptr +// CIR-COMMON-SAME: ]> : !cir.array x 4>}> +// CIR-COMMON-SAME: : ![[REC_B_OR_C_IN_D_VTABLE]] + +// LLVM-COMMON: @_ZTC1D16_1C = global { [4 x ptr], [4 x ptr] } { +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], +// LLVM-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C, ptr @_ZN1C1xEv], +// LLVM-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] +// LLVM-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C, ptr @_ZN1A1vEv] +// LLVM-COMMON-SAME: }, align 8 + +// OGCG-COMMON: @_ZTC1D16_1C = unnamed_addr constant { [4 x ptr], [4 x ptr] } { +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], +// OGCG-RTTI-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr @_ZTI1C, ptr @_ZN1C1xEv], +// OGCG-NO-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] +// OGCG-RTTI-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr @_ZTI1C, ptr @_ZN1A1vEv] +// OGCG-COMMON-SAME: }, align 8 + +// RTTI class type info for D + +// CIR-RTTI: cir.globa{{.*}} @_ZTVN10__cxxabiv121__vmi_class_type_infoE : !cir.ptr> + +// CIR-RTTI: cir.global{{.*}} @_ZTS1D = #cir.const_array<"1D" : !cir.array> : !cir.array + +// CIR-RTTI: cir.global{{.*}} @_ZTI1D = #cir.typeinfo<{ +// CIR-RTTI-SAME: #cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 : i32]> : !cir.ptr, +// CIR-RTTI-SAME: #cir.global_view<@_ZTS1D> : !cir.ptr, +// CIR-RTTI-SAME: #cir.int<2> : !u32i, #cir.int<2> : !u32i, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1B> : !cir.ptr, +// CIR-RTTI-SAME: #cir.int<2> : !s64i, +// CIR-RTTI-SAME: #cir.global_view<@_ZTI1C> : !cir.ptr, +// CIR-RTTI-SAME: #cir.int<4098> : !s64i}> : !rec_anon_struct + +// CIR-RTTI: cir.global{{.*}} @_ZTV1A : !rec_anon_struct3 + +// LLVM-RTTI: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr +// LLVM-RTTI: @_ZTS1D = global [2 x i8] c"1D", align 1 + +// LLVM-RTTI: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } { +// LLVM-RTTI-SAME: ptr getelementptr (i8, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 16), +// LLVM-RTTI-SAME: ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, i64 4098 } + +// OGCG-RTTI: @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } { +// OGCG-RTTI-SAME: ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2), +// OGCG-RTTI-SAME: ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, i64 4098 }, align 8 + +// OGCG-RTTI: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global [0 x ptr] +// OGCG-RTTI: @_ZTS1D = constant [3 x i8] c"1D\00", align 1 +// OGCG-RTTI: @_ZTV1A = external unnamed_addr constant { [3 x ptr] }, align 8 D::D() {} // In CIR, this gets emitted after the B and C constructors. See below. // Base (C2) constructor for D -// OGCG: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: %[[VTT_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// OGCG: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 -// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]]) -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3 -// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]]) -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] -// OGCG: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 5 -// OGCG: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] -// OGCG: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] -// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 -// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// OGCG: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] -// OGCG: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 6 -// OGCG: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] - +// OGCG-COMMON: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG-COMMON: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]]) +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3 +// OGCG-COMMON: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]]) +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG-COMMON: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 5 +// OGCG-COMMON: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] +// OGCG-COMMON: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] +// OGCG-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 +// OGCG-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG-COMMON: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG-COMMON: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] +// OGCG-COMMON: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 6 +// OGCG-COMMON: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] // Base (C2) constructor for B -// CIR: cir.func {{.*}} @_ZN1BC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr> -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] -// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] -// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]] -// CIR: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] -// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] -// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr -// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> -// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] -// CIR: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]] - -// LLVM: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: %[[VTT_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] -// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 -// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] - -// OGCG: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: %[[VTT_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] -// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 -// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] +// CIR-COMMON: cir.func {{.*}} @_ZN1BC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON-SAME: %[[VTT_ARG:.*]]: !cir.ptr> +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR-COMMON: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast bitcast %[[VTT_ADDR_POINT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] +// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]] +// CIR-COMMON: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.cast bitcast %[[B_VTT_ADDR_POINT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] +// CIR-COMMON: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] +// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast bitcast %[[VPTR]] : !cir.vptr -> !cir.ptr +// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> +// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR-COMMON: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]] + +// LLVM-COMMON: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// LLVM-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// LLVM-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM-COMMON: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// OGCG-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// OGCG-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG-COMMON: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] // Base (C2) constructor for C -// CIR: cir.func {{.*}} @_ZN1CC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr> -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] -// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]] -// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] -// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr -// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> -// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] -// CIR: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]] - -// LLVM: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: %[[VTT_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] -// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 -// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] - -// OGCG: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: %[[VTT_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] -// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 -// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] -// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] -// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 -// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] +// CIR-COMMON: cir.func {{.*}} @_ZN1CC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON-SAME: %[[VTT_ARG:.*]]: !cir.ptr> +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR-COMMON: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast bitcast %[[VTT_ADDR_POINT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]] +// CIR-COMMON: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast bitcast %[[C_VTT_ADDR_POINT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] +// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast bitcast %[[VPTR]] : !cir.vptr -> !cir.ptr +// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> +// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]] + +// LLVM-COMMON: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// LLVM-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// LLVM-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM-COMMON: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG-COMMON: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG-COMMON: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// OGCG-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// OGCG-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// OGCG-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG-COMMON: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG-COMMON: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] // Base (C2) constructor for D -// CIR: cir.func {{.*}} @_ZN1DC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr> -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] -// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr -// CIR: %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> -// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 3 -> !cir.ptr> -// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> -// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : !cir.ptr>), !cir.ptr -// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] -// CIR: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]] -// CIR: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 5 -> !cir.ptr> -// CIR: %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr -// CIR: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr, !cir.vptr -// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), !cir.ptr -// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i -// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr -// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i -// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr -// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr), !cir.ptr -// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] -// CIR: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]] -// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 6 -> !cir.ptr> -// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr>), !cir.ptr -// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr, !cir.vptr -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr - -// LLVM: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) { -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: %[[VTT_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] -// LLVM: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 -// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr %[[B_VTT]]) -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 3 -// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr %[[C_VTT]]) -// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] -// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] -// LLVM: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 5 -// LLVM: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] -// LLVM: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] -// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 -// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] -// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] -// LLVM: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] -// LLVM: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 6 -// LLVM: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] +// CIR-COMMON: cir.func {{.*}} @_ZN1DC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON-SAME: %[[VTT_ARG:.*]]: !cir.ptr> +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR-COMMON: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr +// CIR-COMMON: %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 1 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 3 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 0 -> !cir.ptr> +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.cast bitcast %[[D_VTT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]] +// CIR-COMMON: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 5 -> !cir.ptr> +// CIR-COMMON: %[[D_VPTR_ADDR:.*]] = cir.cast bitcast %[[D_VTT_ADDR_POINT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[VPTR_ADDR2:.*]] = cir.cast bitcast %[[VPTR2]] : !cir.vptr -> !cir.ptr +// CIR-COMMON: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i +// CIR-COMMON: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr, %[[CONST_24]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET_PTR:.*]] = cir.cast bitcast %[[BASE_OFFSET_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr, !s64i +// CIR-COMMON: %[[THIS_PTR:.*]] = cir.cast bitcast %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr, %[[BASE_OFFSET]] : !s64i), !cir.ptr +// CIR-COMMON: %[[BASE_CAST:.*]] = cir.cast bitcast %[[BASE_PTR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR-COMMON: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]] +// CIR-COMMON: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr>, offset = 6 -> !cir.ptr> +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.cast bitcast %[[C_VTT_ADDR_POINT]] : !cir.ptr> -> !cir.ptr +// CIR-COMMON: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr, !cir.vptr +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr + +// LLVM-COMMON: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) { +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM-COMMON: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM-COMMON: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr %[[B_VTT]]) +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 3 +// LLVM-COMMON: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr %[[C_VTT]]) +// LLVM-COMMON: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM-COMMON: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM-COMMON: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 5 +// LLVM-COMMON: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] +// LLVM-COMMON: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] +// LLVM-COMMON: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 +// LLVM-COMMON: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM-COMMON: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM-COMMON: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] +// LLVM-COMMON: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 6 +// LLVM-COMMON: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] // The C2 constructor for D gets emitted earlier in OGCG, see above. // Base (C2) constructor for A -// CIR: cir.func {{.*}} @_ZN1AC2Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = ) : !cir.vptr -// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr - -// LLVM: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) { -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8 -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8 -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]] +// CIR-COMMON: cir.func {{.*}} @_ZN1AC2Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = ) : !cir.vptr +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr + +// LLVM-COMMON: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) { +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8 +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]] // The C2 constructor for A gets emitted later in OGCG, see below. // Complete (C1) constructor for D -// CIR: cir.func {{.*}} @_ZN1DC1Ev -// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr -// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] -// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] -// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] -// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr -// CIR: cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr) -> () -// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr -// CIR: %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr> -// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr> -// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () -// CIR: %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr -// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr -// CIR: %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr -// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr -// CIR: %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, !cir.ptr -// CIR: %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr -// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr -// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr -// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr - -// LLVM: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]]) -// LLVM: %[[THIS_ADDR:.*]] = alloca ptr -// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 -// LLVM: call void @_ZN1AC2Ev(ptr %[[A_ADDR]]) -// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 8)) -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 24)) -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr %[[THIS]] -// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr %[[A_ADDR]] -// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 -// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr %[[C_ADDR]] - -// OGCG: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 -// OGCG: call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]]) -// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1)) -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3)) -// OGCG: store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]] -// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 -// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]] -// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 -// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]] - -// OGCG: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]]) -// OGCG: %[[THIS_ADDR:.*]] = alloca ptr -// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] -// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] -// OGCG: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]] +// CIR-COMMON: cir.func {{.*}} @_ZN1DC1Ev +// CIR-COMMON-SAME: %[[THIS_ARG:.*]]: !cir.ptr +// CIR-COMMON: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR-COMMON: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR-COMMON: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR-COMMON: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr +// CIR-COMMON: cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr) -> () +// CIR-COMMON: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [0] -> !cir.ptr +// CIR-COMMON: %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr> +// CIR-COMMON: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr, !cir.ptr>) -> () +// CIR-COMMON: %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr +// CIR-COMMON: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr +// CIR-COMMON: %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr +// CIR-COMMON: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [40] -> !cir.ptr +// CIR-COMMON: %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, !cir.ptr +// CIR-COMMON: %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = ) : !cir.vptr +// CIR-COMMON: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr nonnull [16] -> !cir.ptr +// CIR-COMMON: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr -> !cir.ptr +// CIR-COMMON: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr + +// LLVM-COMMON: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]]) +// LLVM-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM-COMMON: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM-COMMON: call void @_ZN1AC2Ev(ptr %[[A_ADDR]]) +// LLVM-COMMON: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 8)) +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 24)) +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr %[[THIS]] +// LLVM-COMMON: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr %[[A_ADDR]] +// LLVM-COMMON: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM-COMMON: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr %[[C_ADDR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 +// OGCG-COMMON: call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]]) +// OGCG-COMMON: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1)) +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3)) +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]] +// OGCG-COMMON: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]] +// OGCG-COMMON: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]] + +// OGCG-COMMON: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG-COMMON: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG-COMMON: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG-COMMON: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG-COMMON: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]] diff --git a/clang/test/CIR/CodeGenOpenACC/combined-copy.c b/clang/test/CIR/CodeGenOpenACC/combined-copy.c index b4573e66f24a5..c1dc938912845 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-copy.c +++ b/clang/test/CIR/CodeGenOpenACC/combined-copy.c @@ -1090,7 +1090,7 @@ void copy_member_of_array_element_member() { for(int i = 0; i < 5; ++i); // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> : !s32i // CHECK-NEXT: %[[GETINNER:.*]] = cir.get_member %[[OUTER]][0] {name = "inner"} : !cir.ptr -> !cir.ptr> - // CHECK-NEXT: %[[INNERDECAY:.*]] = cir.cast(array_to_ptrdecay, %[[GETINNER]] : !cir.ptr>), !cir.ptr + // CHECK-NEXT: %[[INNERDECAY:.*]] = cir.cast array_to_ptrdecay %[[GETINNER]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[INNERDECAY]] : !cir.ptr, %[[TWO]] : !s32i), !cir.ptr // CHECK-NEXT: %[[GETB:.*]] = cir.get_member %[[STRIDE]][1] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[COPYIN1:.*]] = acc.copyin varPtr(%[[GETB]] : !cir.ptr) -> !cir.ptr {dataClause = #acc, name = "outer.inner[2].b"} diff --git a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp index 7a71842d5014f..e836a37a9bccd 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-firstprivate-clause.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct NoCopyConstruct {}; @@ -15,153 +15,163 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[DECAY_TO]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: acc.yield +// CHECK-NEXT: } // +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () -// +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } // @@ -171,200 +181,190 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { // CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// +// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr +// +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] +// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp index 6063f7d7f500b..f636a0f3fc416 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-private-clause.cpp @@ -15,79 +15,43 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.private.recipe @privatization__ZTSA5_7HasDtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init", init] -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ARRPTR]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ELEM_LOAD]]) : (!cir.ptr) -> () -// CHECK-NEXT: %[[ONE_CONST:.*]] = cir.const #cir.int<1> : !u64i -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[ONE_CONST]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[LAST_ELEM]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// Int +// CHECK: acc.private.recipe @privatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_13CopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// Float +// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_15NoCopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// NoCopyConstruct +// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_f : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CopyConstruct +// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// NonDefaultCtor +// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // +// HasDtor // CHECK-NEXT: acc.private.recipe @privatization__ZTS7HasDtor : !cir.ptr init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): // CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr, ["openacc.private.init"] @@ -98,34 +62,98 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () +// int[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] +// float[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] +// NoCopyConstruct[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] +// CopyConstruct[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] +// NonDefaultCtor[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// HasDtor[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -244,7 +272,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.loop combined(serial) private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(serial) private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -259,7 +287,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -274,7 +302,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.loop combined(serial) private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(serial) private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -289,7 +317,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[HASCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "hasCopyArr[1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -304,7 +332,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOTDEFCTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "notDefCtorArr[1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -319,7 +347,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -369,12 +397,12 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.loop combined(serial) private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(serial) private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -391,7 +419,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -407,7 +435,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.loop combined(serial) private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(serial) private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -423,7 +451,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -439,7 +467,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[HASCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "hasCopyArr[1:1]"} - // CHECK-NEXT: acc.loop combined(serial) private(@privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(serial) private(@privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -455,7 +483,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOTDEFCTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "notDefCtorArr[1:1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -471,7 +499,7 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield @@ -527,12 +555,12 @@ extern "C" void acc_combined() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) + // CHECK-NEXT: acc.loop combined(parallel) private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc // CHECK-NEXT: acc.yield diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp index 6f657e5e18980..3d295d58d1026 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -8,12 +8,262 @@ struct DefaultOperators { bool b; }; +template +void acc_combined() { + T someVar; + T someVarArr[5]; +#pragma acc parallel loop reduction(+:someVar) + for(int i=0;i < 5; ++i); +// CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } +#pragma acc parallel loop reduction(*:someVar) + +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVar) + +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +#pragma acc parallel loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -34,7 +284,6 @@ struct DefaultOperators { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -51,11 +300,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -152,184 +402,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -338,11 +504,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr @@ -439,99 +606,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -540,11 +708,99 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -641,12 +897,13 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -667,6 +924,7 @@ struct DefaultOperators { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -683,321 +941,48 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_combined() { - T someVar; - T someVarArr[5]; -#pragma acc parallel loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc parallel loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z12acc_combined } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp index 737426a9ead1a..be33afe07e363 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp @@ -1,11 +1,137 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +template +void acc_combined() { + T someVar; + T someVarArr[5]; +#pragma acc parallel loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { +#pragma acc parallel loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -29,11 +155,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,28 +186,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -88,28 +217,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -117,11 +248,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -147,29 +279,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -177,29 +309,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +339,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +370,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -266,199 +400,48 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_combined() { - T someVar; - T someVarArr[5]; -#pragma acc parallel loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc parallel loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z12acc_combined } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp index 375bef5bc7169..f13d96d171123 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsInline { int i; @@ -21,257 +21,296 @@ struct HasOperatorsInline { HasOperatorsInline &operator=(HasOperatorsInline& other); }; -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +template +void acc_combined() { + T someVar; + T someVarArr[5]; +#pragma acc parallel loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVar) +// CHECK: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +#pragma acc parallel loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -292,7 +331,6 @@ struct HasOperatorsInline { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -309,130 +347,9 @@ struct HasOperatorsInline { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -450,99 +367,100 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -553,7 +471,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -571,11 +489,12 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr @@ -674,7 +593,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -692,99 +611,222 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -795,7 +837,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -813,12 +855,13 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -839,6 +882,7 @@ struct HasOperatorsInline { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -855,9 +899,9 @@ struct HasOperatorsInline { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -875,358 +919,298 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVarArr) +// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -template -void acc_combined() { - T someVar; - T someVarArr[5]; -#pragma acc parallel loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc parallel loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z12acc_combined } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp index 269e881580962..952fee9b1ac1a 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp @@ -1,10 +1,140 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { +template +void acc_combined() { + T someVar; + T someVarArr[5]; +#pragma acc parallel loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); + +#pragma acc parallel loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -19,7 +149,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -28,11 +158,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -58,87 +189,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -146,11 +220,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -176,29 +251,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -206,11 +282,72 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -236,12 +373,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -256,7 +394,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -265,201 +403,48 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_combined() { - T someVar; - T someVarArr[5]; -#pragma acc parallel loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc parallel loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z12acc_combined } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp index 20ac00330ba3d..15646ed87b284 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsOutline { int i; unsigned u; @@ -20,257 +20,297 @@ bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &); // For min/max HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +template +void acc_combined() { + T someVar; + T someVarArr[5]; +#pragma acc parallel loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +#pragma acc parallel loop reduction(*:someVar) +// CHECK: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVar) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +#pragma acc parallel loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -291,7 +331,6 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -308,130 +347,9 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -449,99 +367,100 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -552,7 +471,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -570,11 +489,12 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr @@ -673,7 +593,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -691,99 +611,222 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -794,7 +837,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -812,12 +855,13 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -838,6 +882,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -854,9 +899,9 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -874,357 +919,299 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc parallel loop reduction(||:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -template -void acc_combined() { - T someVar; - T someVarArr[5]; -#pragma acc parallel loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc parallel loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc parallel loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc parallel loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z12acc_combined } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/combined.cpp b/clang/test/CIR/CodeGenOpenACC/combined.cpp index b8140335f7c29..98f2ffd2cb12a 100644 --- a/clang/test/CIR/CodeGenOpenACC/combined.cpp +++ b/clang/test/CIR/CodeGenOpenACC/combined.cpp @@ -191,7 +191,7 @@ extern "C" void acc_combined(int N, int cond) { #pragma acc serial loop self(N) for(unsigned I = 0; I < N; ++I); // CHECK-NEXT: %[[N_LOAD:.*]] = cir.load{{.*}} %[[ALLOCA_N]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[N_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[N_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.serial combined(loop) self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.loop combined(serial) { @@ -203,7 +203,7 @@ extern "C" void acc_combined(int N, int cond) { #pragma acc parallel loop if(N) for(unsigned I = 0; I < N; ++I); // CHECK-NEXT: %[[N_LOAD:.*]] = cir.load{{.*}} %[[ALLOCA_N]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[N_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[N_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.parallel combined(loop) if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.loop combined(parallel) { @@ -215,7 +215,7 @@ extern "C" void acc_combined(int N, int cond) { #pragma acc serial loop if(1) for(unsigned I = 0; I < N; ++I); // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.serial combined(loop) if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.loop combined(serial) { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp index 2ab806429ad30..ed968e21630cc 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause-templates.cpp @@ -13,14 +13,24 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] + +// CHECK: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[LOAD:.*]] = cir.load {{.*}} %[[ARG_FROM]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // @@ -38,23 +48,14 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[LOAD:.*]] = cir.load {{.*}} %[[ARG_FROM]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c index ec4f330dade6a..de6e7b0314fa9 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.c @@ -1,47 +1,83 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct NoCopyConstruct {}; -// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.copy %[[ARG_FROM]] to %[[ARG_TO]] : !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.copy %[[FROM_OFFSET:.*]] to %[[DECAY_TO]] : !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr -// +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } // @@ -51,123 +87,87 @@ struct NoCopyConstruct {}; // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { // CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr // CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float // CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr // CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float // CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr // CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float // CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr // CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float // CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr // CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float // CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_15NoCopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.copy %[[FROM_OFFSET:.*]] to %[[TO_DECAY]] : !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.copy %[[ARG_FROM]] to %[[ARG_TO]] : !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.copy %[[FROM_OFFSET]] to %[[TO_OFFSET]] : !cir.ptr // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp index 5ec85255a21f6..fca3ca85c9edf 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-firstprivate-clause.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct NoCopyConstruct {}; @@ -15,153 +15,163 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[DECAY_TO:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[DECAY_TO]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // -// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // -// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // -// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } // +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () // CHECK-NEXT: acc.yield +// CHECK-NEXT: } // +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } copy { +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: acc.yield // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () -// +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // CHECK-NEXT: } // @@ -171,200 +181,190 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { // CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr // CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_f : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_13CopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) : (!cir.ptr, !cir.ptr) -> () +// // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_TO]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> -// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[FROM_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[FROM_DECAY]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr // CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr // CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // // CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_TO]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr // CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> -// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast(array_to_ptrdecay, %[[ARG_FROM]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[FROM_OFFSET]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[TO_OFFSET]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS7HasDtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.firstprivate.init"] +// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSA5_7HasDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr> {{.*}}, %[[ARG_TO:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TO_DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG_TO]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ZERO]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_DECAY]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN13CopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_2:.*]] = cir.const #cir.int<1> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[ONE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTS15NoCopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN15NoCopyConstructC1ERKS_(%[[ARG_TO]], %[[ARG_FROM]]) nothrow : (!cir.ptr, !cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[TWO:.*]] = cir.const #cir.int<2> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[TWO]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[TWO_2:.*]] = cir.const #cir.int<2> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[TWO_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.firstprivate.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } +// CHECK-NEXT: %[[THREE:.*]] = cir.const #cir.int<3> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[THREE]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[THREE_2:.*]] = cir.const #cir.int<3> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[THREE_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () +// +// CHECK-NEXT: %[[FOUR:.*]] = cir.const #cir.int<4> +// CHECK-NEXT: %[[TO_OFFSET:.*]] = cir.ptr_stride(%[[TO_DECAY]] : !cir.ptr, %[[FOUR]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[FOUR_2:.*]] = cir.const #cir.int<4> +// CHECK-NEXT: %[[DECAY_FROM:.*]] = cir.cast array_to_ptrdecay %[[ARG_FROM]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[FROM_OFFSET:.*]] = cir.ptr_stride(%[[DECAY_FROM]] : !cir.ptr, %[[FOUR_2]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorC1ERKS_(%[[TO_OFFSET]], %[[FROM_OFFSET]]) nothrow : (!cir.ptr, !cir.ptr) -> () // -// CHECK-NEXT: acc.firstprivate.recipe @firstprivatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.firstprivate.init"] // CHECK-NEXT: acc.yield -// CHECK-NEXT: } copy { -// CHECK-NEXT: ^bb0(%[[ARG_FROM:.*]]: !cir.ptr {{.*}}, %[[ARG_TO:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[FROM_LOAD:.*]] = cir.load {{.*}}%[[ARG_FROM]] : !cir.ptr, !s32i -// CHECK-NEXT: cir.store {{.*}} %[[FROM_LOAD]], %[[ARG_TO]] : !s32i, !cir.ptr +// +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] +// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause-templates.cpp b/clang/test/CIR/CodeGenOpenACC/compute-private-clause-templates.cpp index 1b4ec4b7abeb1..b0b47ad8129fd 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause-templates.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause-templates.cpp @@ -13,9 +13,16 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.private.recipe @privatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] +// CHECK: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // @@ -29,16 +36,9 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c index a128bd3b78e1f..34b8b6995792b 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.c @@ -2,39 +2,45 @@ struct NoCopyConstruct {}; -// CHECK: acc.private.recipe @privatization__ZTSA5_15NoCopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_f : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// int +// CHECK: acc.private.recipe @privatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// float +// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // +// NoCopyConstruct // CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): // CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] +// int[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] +// float[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// NoCopyConstruct[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -94,7 +100,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(someFloatArr[1]) @@ -106,7 +112,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(noCopyArr[1]) @@ -118,7 +124,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(someIntArr[1], someFloatArr[1], noCopyArr[1]) @@ -144,9 +150,9 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc @@ -160,7 +166,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(someFloatArr[1:1]) @@ -173,7 +179,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(noCopyArr[1:1]) @@ -186,7 +192,7 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1]) @@ -215,9 +221,9 @@ void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE3:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp index b4d947b91dbe8..af84684476322 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-private-clause.cpp @@ -15,76 +15,34 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.private.recipe @privatization__ZTSA5_7HasDtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init", init] -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ARRPTR]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ELEM_LOAD]]) : (!cir.ptr) -> () -// CHECK-NEXT: %[[ONE_CONST:.*]] = cir.const #cir.int<1> : !u64i -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[ONE_CONST]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[LAST_ELEM]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK: acc.private.recipe @privatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_13CopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_15NoCopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_f : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // @@ -98,34 +56,92 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -222,7 +238,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(someFloatArr[1]) @@ -234,7 +250,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(noCopyArr[1]) @@ -246,7 +262,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(hasCopyArr[1]) @@ -258,7 +274,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[HASCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "hasCopyArr[1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(notDefCtorArr[1]) @@ -270,7 +286,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOTDEFCTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "notDefCtorArr[1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(dtorArr[1]) @@ -282,7 +298,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(someIntArr[1], someFloatArr[1], noCopyArr[1], hasCopyArr[1], notDefCtorArr[1], dtorArr[1]) @@ -329,12 +345,12 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc @@ -348,7 +364,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(someFloatArr[1:1]) @@ -361,7 +377,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(noCopyArr[1:1]) @@ -374,7 +390,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc serial private(hasCopyArr[1:1]) @@ -387,7 +403,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[HASCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "hasCopyArr[1:1]"} - // CHECK-NEXT: acc.serial private(@privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.serial private(@privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(notDefCtorArr[1:1]) @@ -400,7 +416,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOTDEFCTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "notDefCtorArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(dtorArr[1:1]) @@ -413,7 +429,7 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc #pragma acc parallel private(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1], hasCopyArr[1:1], notDefCtorArr[1:1], dtorArr[1:1]) @@ -466,12 +482,12 @@ extern "C" void acc_compute() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.parallel private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) + // CHECK-NEXT: acc.parallel private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) // CHECK-NEXT: acc.yield // CHECK-NEXT: } loc } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c index 05b58586da945..e357f440eb4c3 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -std=c23 -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -std=c23 -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -8,12 +8,259 @@ struct DefaultOperators { bool b; }; +void acc_compute() { + struct DefaultOperators someVar; + struct DefaultOperators someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -37,11 +284,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -138,157 +386,101 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators -// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators -// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -297,11 +489,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr @@ -398,99 +591,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -499,11 +693,72 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.zero : !rec_DefaultOperators +// CHECK-NEXT: cir.store{{.*}} %[[ZERO]], %[[TEMP_LOAD]] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -600,12 +855,13 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -629,281 +885,6 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -void acc_compute() { - struct DefaultOperators someVar; - struct DefaultOperators someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -943,5 +924,9 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@acc_compute } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp index d6cac5f46a024..e0098bc625459 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -8,12 +8,262 @@ struct DefaultOperators { bool b; }; +template +void acc_compute() { + T someVar; + T someVarArr[5]; +#pragma acc parallel reduction(+:someVar) + ; +// CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } +#pragma acc parallel reduction(*:someVar) + +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) + +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -34,7 +284,6 @@ struct DefaultOperators { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -51,11 +300,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -152,184 +402,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -338,11 +504,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr @@ -439,99 +606,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -540,11 +708,99 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -641,12 +897,13 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -667,6 +924,7 @@ struct DefaultOperators { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -683,283 +941,6 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - - -template -void acc_compute() { - T someVar; - T someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -999,6 +980,9 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z11acc_compute } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c index ad21e23d28408..5336fadc9fd0c 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c @@ -1,11 +1,137 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +void acc_compute() { + float someVar; + float someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; + +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -29,11 +155,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,28 +186,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -88,28 +217,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -117,11 +248,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -147,29 +279,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -177,29 +309,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +339,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +370,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -266,159 +400,6 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -void acc_compute() { - float someVar; - float someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -458,4 +439,7 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@acc_compute } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp index e823233de16d8..a51388203a3d8 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp @@ -1,11 +1,138 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +template +void acc_compute() { + T someVar; + T someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; + +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -29,11 +156,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,28 +187,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -88,28 +218,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -117,11 +249,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -147,29 +280,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -177,29 +310,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +340,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +371,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -266,160 +401,6 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_compute() { - T someVar; - T someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -459,6 +440,9 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z11acc_compute } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp index 316f949546714..1968c0ac740dd 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsInline { int i; @@ -21,258 +21,296 @@ struct HasOperatorsInline { HasOperatorsInline &operator=(HasOperatorsInline& other); }; - -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +template +void acc_compute() { + T someVar; + T someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(*:someVar) +// CHECK: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + ; -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -293,7 +331,6 @@ struct HasOperatorsInline { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -310,130 +347,9 @@ struct HasOperatorsInline { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -451,99 +367,100 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -554,7 +471,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -572,11 +489,12 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr @@ -675,7 +593,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -693,99 +611,222 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -796,7 +837,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -814,12 +855,13 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -840,6 +882,7 @@ struct HasOperatorsInline { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -856,9 +899,9 @@ struct HasOperatorsInline { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -876,318 +919,256 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -template -void acc_compute() { - T someVar; - T someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -1227,6 +1208,9 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z11acc_compute } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c index f768ceb5816ca..f63e340b29aa7 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c @@ -1,11 +1,138 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +void acc_compute() { + int someVar; + int someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -20,7 +147,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -29,11 +156,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,87 +187,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -147,11 +218,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -177,29 +249,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +280,72 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +371,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -257,7 +392,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -266,161 +401,6 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -void acc_compute() { - int someVar; - int someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -460,4 +440,7 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@acc_compute } diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp index 285606816ccc7..48e5ac94627f5 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp @@ -1,11 +1,140 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +template +void acc_compute() { + T someVar; + T someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; + +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -20,7 +149,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -29,11 +158,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,87 +189,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -147,11 +220,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -177,29 +251,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +282,72 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +373,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -257,7 +394,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -266,162 +403,6 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_compute() { - T someVar; - T someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -461,6 +442,9 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z11acc_compute } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp index a058e589b1894..6d204bc9060b0 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsOutline { int i; unsigned u; @@ -20,257 +20,297 @@ bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &); // For min/max HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +template +void acc_compute() { + T someVar; + T someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + ; -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +#pragma acc parallel reduction(*:someVar) +// CHECK: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + ; -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -291,7 +331,6 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -308,130 +347,9 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -449,99 +367,100 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -552,7 +471,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -570,11 +489,12 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr @@ -673,7 +593,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -691,99 +611,222 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -794,7 +837,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -812,12 +855,13 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -838,6 +882,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -854,9 +899,9 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -874,318 +919,256 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -template -void acc_compute() { - T someVar; - T someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -1225,6 +1208,10 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z11acc_compute } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c index e3ee97ff30fb2..35a7e7a951f74 100644 --- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c +++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c @@ -1,11 +1,138 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +void acc_compute() { + unsigned int someVar; + unsigned int someVarArr[5]; +#pragma acc parallel reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSj : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + ; -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_j : !cir.ptr> reduction_operator init { +#pragma acc parallel reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -20,7 +147,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -29,11 +156,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_j : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !u32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,87 +187,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_j : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_j : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_j : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !u32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -147,11 +218,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + ; +#pragma acc parallel reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i // CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !u32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -177,29 +249,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_j : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !u32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !u32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +280,72 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_j : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_j : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + ; +#pragma acc parallel reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_j : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !u32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +371,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_j : !cir.ptr> reduction_operator init { + ; +#pragma acc parallel reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_j : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -257,7 +392,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -266,161 +401,6 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSj : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !u32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !u32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -void acc_compute() { - unsigned int someVar; - unsigned int someVarArr[5]; -#pragma acc parallel reduction(+:someVar) - ; -#pragma acc parallel reduction(*:someVar) - ; -#pragma acc parallel reduction(max:someVar) - ; -#pragma acc parallel reduction(min:someVar) - ; -#pragma acc parallel reduction(&:someVar) - ; -#pragma acc parallel reduction(|:someVar) - ; -#pragma acc parallel reduction(^:someVar) - ; -#pragma acc parallel reduction(&&:someVar) - ; -#pragma acc parallel reduction(||:someVar) - ; - -#pragma acc parallel reduction(+:someVarArr) - ; -#pragma acc parallel reduction(*:someVarArr) - ; -#pragma acc parallel reduction(max:someVarArr) - ; -#pragma acc parallel reduction(min:someVarArr) - ; -#pragma acc parallel reduction(&:someVarArr) - ; -#pragma acc parallel reduction(|:someVarArr) - ; -#pragma acc parallel reduction(^:someVarArr) - ; -#pragma acc parallel reduction(&&:someVarArr) - ; -#pragma acc parallel reduction(||:someVarArr) ; #pragma acc parallel reduction(+:someVarArr[2]) @@ -460,4 +440,7 @@ void acc_compute() { ; #pragma acc parallel reduction(||:someVarArr[1:1]) ; + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@acc_compute } diff --git a/clang/test/CIR/CodeGenOpenACC/data.c b/clang/test/CIR/CodeGenOpenACC/data.c index 1f6a76ce1ea7c..4e13f17f4bfd7 100644 --- a/clang/test/CIR/CodeGenOpenACC/data.c +++ b/clang/test/CIR/CodeGenOpenACC/data.c @@ -87,7 +87,7 @@ void acc_data(int cond) { #pragma acc data default(none) if(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.data if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.terminator @@ -96,7 +96,7 @@ void acc_data(int cond) { #pragma acc data default(none) if(1) {} // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.data if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.terminator diff --git a/clang/test/CIR/CodeGenOpenACC/host_data.c b/clang/test/CIR/CodeGenOpenACC/host_data.c index fa06d2a1cbd26..bcfa175f4e525 100644 --- a/clang/test/CIR/CodeGenOpenACC/host_data.c +++ b/clang/test/CIR/CodeGenOpenACC/host_data.c @@ -38,7 +38,7 @@ void acc_host_data(int cond, int var1, int var2, int *arr) { // CHECK-NEXT: %[[USE_DEV1:.*]] = acc.use_device varPtr(%[[V1]] : !cir.ptr) -> !cir.ptr {name = "var1"} // CHECK-NEXT: %[[USE_DEV2:.*]] = acc.use_device varPtr(%[[V2]] : !cir.ptr) -> !cir.ptr {name = "var2"} // CHECK-NEXT: %[[LOAD_COND:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast(int_to_bool, %[[LOAD_COND]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast int_to_bool %[[LOAD_COND]] : !s32i -> !cir.bool // CHECK-NEXT: %[[COND_CAST:.*]] = builtin.unrealized_conversion_cast %[[COND_BOOL]] : !cir.bool to i1 // CHECK-NEXT: acc.host_data if(%[[COND_CAST]]) dataOperands(%[[USE_DEV1]], %[[USE_DEV2]] : !cir.ptr, !cir.ptr) { // CHECK-NEXT: acc.terminator @@ -49,7 +49,7 @@ void acc_host_data(int cond, int var1, int var2, int *arr) { // CHECK-NEXT: %[[USE_DEV1:.*]] = acc.use_device varPtr(%[[V1]] : !cir.ptr) -> !cir.ptr {name = "var1"} // CHECK-NEXT: %[[USE_DEV2:.*]] = acc.use_device varPtr(%[[V2]] : !cir.ptr) -> !cir.ptr {name = "var2"} // CHECK-NEXT: %[[LOAD_COND:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast(int_to_bool, %[[LOAD_COND]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_BOOL:.*]] = cir.cast int_to_bool %[[LOAD_COND]] : !s32i -> !cir.bool // CHECK-NEXT: %[[COND_CAST:.*]] = builtin.unrealized_conversion_cast %[[COND_BOOL]] : !cir.bool to i1 // CHECK-NEXT: acc.host_data if(%[[COND_CAST]]) dataOperands(%[[USE_DEV1]], %[[USE_DEV2]] : !cir.ptr, !cir.ptr) { // CHECK-NEXT: acc.terminator diff --git a/clang/test/CIR/CodeGenOpenACC/init.c b/clang/test/CIR/CodeGenOpenACC/init.c index 805fb08dbf487..829850f2c82d6 100644 --- a/clang/test/CIR/CodeGenOpenACC/init.c +++ b/clang/test/CIR/CodeGenOpenACC/init.c @@ -18,13 +18,13 @@ void acc_init(int cond) { #pragma acc init if(cond) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.init if(%[[BOOL_CONV]]) #pragma acc init if(1) // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[ONE_TO_BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.init if(%[[BOOL_CONV]]) @@ -40,7 +40,7 @@ void acc_init(int cond) { #pragma acc init if(cond) device_num(cond) device_type(*) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1 // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i // CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32 diff --git a/clang/test/CIR/CodeGenOpenACC/kernels.c b/clang/test/CIR/CodeGenOpenACC/kernels.c index 9b10b7489e814..9f33e54a345b1 100644 --- a/clang/test/CIR/CodeGenOpenACC/kernels.c +++ b/clang/test/CIR/CodeGenOpenACC/kernels.c @@ -29,7 +29,7 @@ void acc_kernels(int cond) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.while { // CHECK-NEXT: %[[INT:.*]] = cir.const #cir.int<1> - // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[INT]] : + // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[INT]] // CHECK-NEXT: cir.condition(%[[CAST]]) // CHECK-NEXT: } do { // CHECK-NEXT: cir.yield @@ -49,7 +49,7 @@ void acc_kernels(int cond) { #pragma acc kernels self(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.kernels self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.terminator @@ -58,7 +58,7 @@ void acc_kernels(int cond) { #pragma acc kernels self(0) {} // CHECK-NEXT: %[[ZERO_LITERAL:.*]] = cir.const #cir.int<0> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ZERO_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ZERO_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.kernels self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.terminator @@ -67,7 +67,7 @@ void acc_kernels(int cond) { #pragma acc kernels if(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.kernels if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.terminator @@ -76,7 +76,7 @@ void acc_kernels(int cond) { #pragma acc kernels if(1) {} // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.kernels if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.terminator diff --git a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp index e614d91550bd6..6824f77b98e71 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-private-clause.cpp @@ -15,79 +15,43 @@ struct HasDtor { ~HasDtor(); }; -// CHECK: acc.private.recipe @privatization__ZTSA5_7HasDtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[ELEM_LOAD]]) nothrow : (!cir.ptr) -> () -// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[PREVELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[PREVELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[ARRPTR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } -// -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_14NonDefaultCtor : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init", init] -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !u64i -// CHECK-NEXT: %[[ARRPTR:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELEM:.*]] = cir.ptr_stride(%[[ARRPTR]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] -// CHECK-NEXT: cir.store %[[ARRPTR]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ELEM_LOAD]]) : (!cir.ptr) -> () -// CHECK-NEXT: %[[ONE_CONST:.*]] = cir.const #cir.int<1> : !u64i -// CHECK-NEXT: %[[ELEM:.*]] = cir.ptr_stride(%[[ELEM_LOAD]] : !cir.ptr, %[[ONE_CONST]] : !u64i), !cir.ptr -// CHECK-NEXT: cir.store %[[ELEM]], %[[ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[ELEM_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[ELEM_LOAD]], %[[LAST_ELEM]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// int +// CHECK: acc.private.recipe @privatization__ZTSi : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_13CopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// float +// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_15NoCopyConstruct : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// NoCopyConstruct +// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_f : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CopyConstruct +// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_i : !cir.ptr> init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// NonDefaultCtor +// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () // CHECK-NEXT: acc.yield // CHECK-NEXT: } // +// HasDtor // CHECK-NEXT: acc.private.recipe @privatization__ZTS7HasDtor : !cir.ptr init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): // CHECK-NEXT: cir.alloca !rec_HasDtor, !cir.ptr, ["openacc.private.init"] @@ -98,34 +62,98 @@ struct HasDtor { // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS14NonDefaultCtor : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_NonDefaultCtor, !cir.ptr, ["openacc.private.init", init] -// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[ALLOCA]]) : (!cir.ptr) -> () +// int[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS13CopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_CopyConstruct, !cir.ptr, ["openacc.private.init"] +// float[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_f : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTS15NoCopyConstruct : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !rec_NoCopyConstruct, !cir.ptr, ["openacc.private.init"] +// NoCopyConstruct[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_15NoCopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSf : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !cir.float, !cir.ptr, ["openacc.private.init"] +// CopyConstruct[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_13CopyConstruct : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] // CHECK-NEXT: acc.yield // CHECK-NEXT: } // -// CHECK-NEXT: acc.private.recipe @privatization__ZTSi : !cir.ptr init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.private.init"] +// NonDefaultCtor[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_14NonDefaultCtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i +// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN14NonDefaultCtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } + // CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// +// HasDtor[5] with 1 'bound' +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSA5_7HasDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN7HasDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } @@ -222,7 +250,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(someFloatArr[1]) @@ -234,7 +262,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(noCopyArr[1]) @@ -246,7 +274,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(hasCopyArr[1]) @@ -258,7 +286,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[HASCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "hasCopyArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(notDefCtorArr[1]) @@ -270,7 +298,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOTDEFCTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "notDefCtorArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(dtorArr[1]) @@ -282,7 +310,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(someIntArr[1], someFloatArr[1], noCopyArr[1], hasCopyArr[1], notDefCtorArr[1], dtorArr[1]) @@ -329,12 +357,12 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CONST]] : i64) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc @@ -348,7 +376,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[INTARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someIntArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(someFloatArr[1:1]) @@ -361,7 +389,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[FLOATARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "someFloatArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(noCopyArr[1:1]) @@ -374,7 +402,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "noCopyArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(hasCopyArr[1:1]) @@ -387,7 +415,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[HASCOPYARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "hasCopyArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(notDefCtorArr[1:1]) @@ -400,7 +428,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[NOTDEFCTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "notDefCtorArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(dtorArr[1:1]) @@ -413,7 +441,7 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc #pragma acc loop private(someIntArr[1:1], someFloatArr[1:1], noCopyArr[1:1], hasCopyArr[1:1], notDefCtorArr[1:1], dtorArr[1:1]) @@ -466,12 +494,12 @@ extern "C" void acc_loop() { // CHECK-NEXT: %[[ONE_CONST2:.*]] = arith.constant 1 // CHECK-NEXT: %[[BOUNDS:.*]] = acc.bounds lowerbound(%[[ONE_CAST]] : si32) extent(%[[ONE_CAST2]] : si32) stride(%[[ONE_CONST2]] : i64) startIdx(%[[ZERO_CONST]] : i64) // CHECK-NEXT: %[[PRIVATE6:.*]] = acc.private varPtr(%[[DTORARR]] : !cir.ptr>) bounds(%[[BOUNDS]]) -> !cir.ptr> {name = "dtorArr[1:1]"} - // CHECK-NEXT: acc.loop private(@privatization__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, - // CHECK-SAME: @privatization__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) + // CHECK-NEXT: acc.loop private(@privatization__Bcnt1__ZTSA5_i -> %[[PRIVATE1]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_f -> %[[PRIVATE2]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_15NoCopyConstruct -> %[[PRIVATE3]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_13CopyConstruct -> %[[PRIVATE4]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_14NonDefaultCtor -> %[[PRIVATE5]] : !cir.ptr>, + // CHECK-SAME: @privatization__Bcnt1__ZTSA5_7HasDtor -> %[[PRIVATE6]] : !cir.ptr>) // CHECK: acc.yield // CHECK-NEXT: } loc } diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp index e4d035954b73e..73b8fe27c6aa1 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct DefaultOperators { int i; @@ -8,12 +8,262 @@ struct DefaultOperators { bool b; }; +template +void acc_loop() { + T someVar; + T someVarArr[5]; +#pragma acc loop reduction(+:someVar) + for(int i=0;i < 5; ++i); +// CHECK: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } +#pragma acc loop reduction(*:someVar) + +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVar) + +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +#pragma acc loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -34,7 +284,6 @@ struct DefaultOperators { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -51,11 +300,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -152,184 +402,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -338,11 +504,12 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr @@ -439,99 +606,100 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LEAST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LEAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -540,11 +708,99 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][3] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr @@ -641,12 +897,13 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_16DefaultOperators : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -667,6 +924,7 @@ struct DefaultOperators { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -683,321 +941,48 @@ struct DefaultOperators { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS16DefaultOperators : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_DefaultOperators, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][3] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][4] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_loop() { - T someVar; - T someVarArr[5]; -#pragma acc loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z8acc_loop } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp index 4994d46eccbb4..77c61382c06bf 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp @@ -1,11 +1,138 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +template +void acc_loop() { + T someVar; + T someVarArr[5]; +#pragma acc loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { +#pragma acc loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -29,11 +156,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,28 +187,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -88,28 +218,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -117,11 +249,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -147,29 +280,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -177,29 +310,29 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +340,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +371,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_f : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_f : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -266,199 +401,48 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSf : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.float, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !cir.float, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_loop() { - T someVar; - T someVarArr[5]; -#pragma acc loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z8acc_loop } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp index 3490d2ebfeb29..6ca0654b0384d 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsInline { int i; @@ -21,258 +21,296 @@ struct HasOperatorsInline { HasOperatorsInline &operator=(HasOperatorsInline& other); }; - -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +template +void acc_loop() { + T someVar; + T someVarArr[5]; +#pragma acc loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVar) +// CHECK: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +#pragma acc loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -293,7 +331,6 @@ struct HasOperatorsInline { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -310,130 +347,9 @@ struct HasOperatorsInline { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -451,99 +367,100 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -554,7 +471,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -572,11 +489,12 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr @@ -675,7 +593,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -693,99 +611,222 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -796,7 +837,7 @@ struct HasOperatorsInline { // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -814,12 +855,13 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -840,6 +882,7 @@ struct HasOperatorsInline { // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -856,9 +899,9 @@ struct HasOperatorsInline { // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -876,357 +919,298 @@ struct HasOperatorsInline { // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS18HasOperatorsInline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVarArr) +// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_18HasOperatorsInline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -template -void acc_loop() { - T someVar; - T someVarArr[5]; -#pragma acc loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z8acc_loop } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp index cee3683e9565f..dd3c54fa8f023 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp @@ -1,11 +1,140 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +template +void acc_loop() { + T someVar; + T someVarArr[5]; +#pragma acc loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { +#pragma acc loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -20,7 +149,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -29,11 +158,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -59,87 +189,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -147,11 +220,12 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[LARGEST]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -177,29 +251,30 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[DECAY]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[LEAST]], %[[NEXT_ELT]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store{{.*}} %[[ALL_ONES]], %[[NEXT_ELT]] : !s32i, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { @@ -207,11 +282,72 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[TEMP_LOAD]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_i : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[DECAY]] : !s32i, !cir.ptr // CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i @@ -237,12 +373,13 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_i : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_i : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -257,7 +394,7 @@ // CHECK-NEXT: } while { // CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr // CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } // CHECK-NEXT: acc.yield // @@ -266,201 +403,48 @@ // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store{{.*}} %[[ONE]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSi : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[ALLOCA]] : !s32i, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } - -template -void acc_loop() { - T someVar; - T someVarArr[5]; -#pragma acc loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z8acc_loop } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp index 8ea4ddee3d1fd..d36f9c608920e 100644 --- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp +++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s +// RUN: not %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s struct HasOperatorsOutline { int i; unsigned u; @@ -20,257 +20,297 @@ bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &); // For min/max HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); -// CHECK: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr +template +void acc_loop() { + T someVar; + T someVarArr[5]; +#pragma acc loop reduction(+:someVar) +// CHECK: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +#pragma acc loop reduction(*:someVar) +// CHECK: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVar) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: acc.yield // -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i -// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVar) +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +#pragma acc loop reduction(+:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -291,7 +331,6 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -308,130 +347,9 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): -// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr -// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} -// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.do { -// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) -// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i -// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr -// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> -// CHECK-NEXT: cir.yield -// CHECK-NEXT: } while { -// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr -// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool -// CHECK-NEXT: cir.condition(%[[CMP]]) -// CHECK-NEXT: } -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// -// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -449,99 +367,100 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(*:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -552,7 +471,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -570,11 +489,12 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(max:someVarArr) // CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr @@ -673,7 +593,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -691,99 +611,222 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(min:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // -// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i -// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[LARGEST_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LARGEST_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: acc.yield +// +// CHECK-NEXT: } combiner { +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) +// TODO OpenACC: Expecting combination operation here +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ALL_ONES_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ALL_ONES_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // // CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i // CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr // CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr // CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr // CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr // CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr // // CHECK-NEXT: acc.yield // @@ -794,7 +837,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } destroy { // CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -812,12 +855,13 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(|:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { // CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) // CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] // CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ALLOCA]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> // CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i // CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr @@ -838,6 +882,7 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i // CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr // CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> @@ -854,9 +899,9 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // TODO OpenACC: Expecting combination operation here // CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): // CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i -// CHECK-NEXT: %[[DECAY:.*]] = cir.cast(array_to_ptrdecay, %[[ARG]] : !cir.ptr>), !cir.ptr +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr // CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr // CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} // CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> @@ -874,357 +919,299 @@ HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &); // CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - - -// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ONE:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_ior__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(^:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_xor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_iand__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<-1> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #cir.fp<0xFF{{.*}}> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[ALL_ONES:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[ALL_ONES]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield -// CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<2147483647> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.int<4294967295> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #cir.fp<1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LARGEST:.*]] = cir.const #true -// CHECK-NEXT: cir.store {{.*}} %[[LARGEST]], %[[GET_B]] : !cir.bool, !cir.ptr -// CHECK-NEXT: acc.yield -// -// CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) -// TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr -// CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) -// CHECK-NEXT: acc.yield +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #false +// CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_max__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<-2147483648> : !s32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.int<0> : !u32i -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-3.4{{.*}}E+38> : !cir.float -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #cir.fp<-1.7{{.*}}E+308> : !cir.double -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr -// CHECK-NEXT: %[[LEAST:.*]] = cir.const #false -// CHECK-NEXT: cir.store {{.*}} %[[LEAST]], %[[GET_B]] : !cir.bool, !cir.ptr // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -// CHECK-NEXT: acc.reduction.recipe @reduction_mul__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(&&:someVarArr) +// CHECK-NEXT: acc.reduction.recipe @reduction_land__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[DECAY]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[DECAY]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[DECAY]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[DECAY]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[DECAY]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE_IDX:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ONE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[TWO_IDX:.*]] = cir.const #cir.int<2> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[TWO_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// +// CHECK-NEXT: %[[THREE_IDX:.*]] = cir.const #cir.int<3> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[THREE_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #true +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[FOUR_IDX:.*]] = cir.const #cir.int<4> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[FOUR_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[NEXT_ELT]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_I]] : !s32i, !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[NEXT_ELT]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u32i +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_U]] : !u32i, !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[NEXT_ELT]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.float +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_F]] : !cir.float, !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[NEXT_ELT]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.fp<1{{.*}}> : !cir.double +// CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_D]] : !cir.double, !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[NEXT_ELT]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ONE:.*]] = cir.const #true // CHECK-NEXT: cir.store {{.*}} %[[ONE]], %[[GET_B]] : !cir.bool, !cir.ptr +// // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } + for(int i=0;i < 5; ++i); +#pragma acc loop reduction(||:someVarArr) -// CHECK-NEXT: acc.reduction.recipe @reduction_add__ZTS19HasOperatorsOutline : !cir.ptr reduction_operator init { -// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr{{.*}}) -// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr, ["openacc.reduction.init", init] -// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: acc.reduction.recipe @reduction_lor__ZTSA5_19HasOperatorsOutline : !cir.ptr> reduction_operator init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>{{.*}}) +// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.reduction.init", init] +// CHECK-NEXT: %[[TEMP_ITR:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["arrayinit.temp"] +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[DECAY]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<5> : !s64i +// CHECK-NEXT: %[[END_ITR:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[TEMP_LOAD]][0] {name = "i"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_I]] : !s32i, !cir.ptr -// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[ALLOCA]][1] {name = "u"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_U:.*]] = cir.get_member %[[TEMP_LOAD]][1] {name = "u"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u32i // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_U]] : !u32i, !cir.ptr -// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[ALLOCA]][2] {name = "f"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_F:.*]] = cir.get_member %[[TEMP_LOAD]][2] {name = "f"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.float // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_F]] : !cir.float, !cir.ptr -// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[ALLOCA]][4] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_D:.*]] = cir.get_member %[[TEMP_LOAD]][4] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.fp<0{{.*}}> : !cir.double // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_D]] : !cir.double, !cir.ptr -// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[ALLOCA]][5] {name = "b"} : !cir.ptr -> !cir.ptr +// CHECK-NEXT: %[[GET_B:.*]] = cir.get_member %[[TEMP_LOAD]][5] {name = "b"} : !cir.ptr -> !cir.ptr // CHECK-NEXT: %[[ZERO:.*]] = cir.const #false // CHECK-NEXT: cir.store {{.*}} %[[ZERO]], %[[GET_B]] : !cir.bool, !cir.ptr +// +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !s64i +// CHECK-NEXT: %[[NEXT_ITEM:.*]] = cir.ptr_stride(%[[TEMP_LOAD]] : !cir.ptr, %[[ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store {{.*}} %[[NEXT_ITEM]], %[[TEMP_ITR]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[TEMP_LOAD:.*]] = cir.load {{.*}} %[[TEMP_ITR]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[TEMP_LOAD]], %[[END_ITR]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // // CHECK-NEXT: } combiner { -// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr {{.*}}, %[[RHSARG:.*]]: !cir.ptr {{.*}}) +// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr> {{.*}}, %[[RHSARG:.*]]: !cir.ptr> {{.*}}) // TODO OpenACC: Expecting combination operation here -// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr +// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr> // CHECK-NEXT: } destroy { -// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr {{.*}}, %[[ARG:.*]]: !cir.ptr {{.*}}): -// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[ARG]]) nothrow : (!cir.ptr) +// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr> {{.*}}, %[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARG]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[CUR:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineD1Ev(%[[CUR]]) nothrow : (!cir.ptr) +// CHECK-NEXT: %[[NEG:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEW_ITEM:.*]] = cir.ptr_stride(%[[CUR]] : !cir.ptr, %[[NEG]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEW_ITEM]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[CUR_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[CUR_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } // CHECK-NEXT: acc.yield // CHECK-NEXT: } - -template -void acc_loop() { - T someVar; - T someVarArr[5]; -#pragma acc loop reduction(+:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVar) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVar) - for(int i = 0; i < 5; ++i); - -#pragma acc loop reduction(+:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(*:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(max:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(min:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(|:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(^:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(&&:someVarArr) - for(int i = 0; i < 5; ++i); -#pragma acc loop reduction(||:someVarArr) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[2]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(+:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(*:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(max:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(min:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(|:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(^:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(&&:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); #pragma acc loop reduction(||:someVarArr[1:1]) - for(int i = 0; i < 5; ++i); + for(int i=0;i < 5; ++i); + + // TODO OpenACC: When pointers/arrays are handled correctly, we should see all + // of the above repeated for arrays/pointers. + // CHECK-NEXT: cir.func {{.*}}@_Z8acc_loop } void uses() { diff --git a/clang/test/CIR/CodeGenOpenACC/parallel.c b/clang/test/CIR/CodeGenOpenACC/parallel.c index 5db174fb6549b..7080a8d5e579a 100644 --- a/clang/test/CIR/CodeGenOpenACC/parallel.c +++ b/clang/test/CIR/CodeGenOpenACC/parallel.c @@ -28,7 +28,7 @@ void acc_parallel(int cond) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.while { // CHECK-NEXT: %[[INT:.*]] = cir.const #cir.int<1> - // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[INT]] : + // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[INT]] // CHECK-NEXT: cir.condition(%[[CAST]]) // CHECK-NEXT: } do { // CHECK-NEXT: cir.yield @@ -48,7 +48,7 @@ void acc_parallel(int cond) { #pragma acc parallel self(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.parallel self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield @@ -57,7 +57,7 @@ void acc_parallel(int cond) { #pragma acc parallel self(0) {} // CHECK-NEXT: %[[ZERO_LITERAL:.*]] = cir.const #cir.int<0> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ZERO_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ZERO_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.parallel self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield @@ -66,7 +66,7 @@ void acc_parallel(int cond) { #pragma acc parallel if(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.parallel if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield @@ -75,7 +75,7 @@ void acc_parallel(int cond) { #pragma acc parallel if(1) {} // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.parallel if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp new file mode 100644 index 0000000000000..101f18e8d071c --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-CtorDtor.cpp @@ -0,0 +1,658 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +struct CtorDtor { + int i; + CtorDtor(); + CtorDtor(const CtorDtor&); + ~CtorDtor(); +}; + +template +void do_things(unsigned A, unsigned B) { + T OneArr[5]; +#pragma acc parallel private(OneArr[A:B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_8CtorDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] {alignment = 4 : i64} +// +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OneArr[B]) + ; +#pragma acc parallel private(OneArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_8CtorDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init", init] {alignment = 16 : i64} +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ONE_PAST_LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[DEC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[DEC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; + + T TwoArr[5][5]; +#pragma acc parallel private(TwoArr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_8CtorDtor : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT:acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(TwoArr[B][A:B]) + ; +#pragma acc parallel private(TwoArr[A:B][A:B]) + ; +#pragma acc parallel private(TwoArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_8CtorDtor : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init", init] {alignment = 16 : i64} +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<25> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ONE_PAST_LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[PRIVATE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<24> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[DEC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[DEC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; + + T ThreeArr[5][5][5]; +#pragma acc parallel private(ThreeArr[B][B][B]) +// CHECK-NEXT:acc.private.recipe @privatization__Bcnt3__ZTSA5_A5_A5_8CtorDtor : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5> x 5>> -> !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[BOUND1_STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr x 5> x 5>> -> !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[BOUND1_STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT:acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreeArr[B][B][A:B]) + ; +#pragma acc parallel private(ThreeArr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_A5_8CtorDtor : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5> x 5>> -> !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr x 5>> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr>, %[[ITR1_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i +// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[ARR_DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[ARR_DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr x 5> x 5>> -> !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr x 5>> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[LAST_SUB_ONE:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[LAST_SUB_ONE]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr>, %[[ITR1_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[ARR_DECAY]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[DEC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[DEC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ARR_DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT:acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreeArr[B][A:B]) + ; +#pragma acc parallel private(ThreeArr[A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_A5_8CtorDtor : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init", init] {alignment = 16 : i64} +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr x 5> x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<125> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ONE_PAST_LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5> x 5>> {{.*}})): +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[PRIVATE]] : !cir.ptr x 5> x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<124> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[LAST_ELT]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[DEC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[DEC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp new file mode 100644 index 0000000000000..7e2b8b83bdf87 --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-NoOps.cpp @@ -0,0 +1,353 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +struct NoOps { int i = 0; }; + +template +void do_things(unsigned A, unsigned B) { + T OneArr[5]; +#pragma acc parallel private(OneArr[A:B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_5NoOps : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] {alignment = 4 : i64} +// +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OneArr[B]) + ; +#pragma acc parallel private(OneArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_5NoOps : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init", init] {alignment = 16 : i64} +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ONE_PAST_LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; + + T TwoArr[5][5]; +#pragma acc parallel private(TwoArr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_5NoOps : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(TwoArr[B][A:B]) + ; +#pragma acc parallel private(TwoArr[A:B][A:B]) + ; +#pragma acc parallel private(TwoArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_5NoOps : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init", init] {alignment = 16 : i64} +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<25> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ONE_PAST_LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; + + T ThreeArr[5][5][5]; +#pragma acc parallel private(ThreeArr[B][B][B]) +// CHECK-NEXT:acc.private.recipe @privatization__Bcnt3__ZTSA5_A5_A5_5NoOps : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5> x 5>> -> !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[BOUND1_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[BOUND1_STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreeArr[B][B][A:B]) + ; +#pragma acc parallel private(ThreeArr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_A5_5NoOps : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5> x 5>> -> !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr x 5>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr x 5>> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr>, %[[ITR1_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i +// CHECK-NEXT: %[[ARR_DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[LAST_ELT:.*]] = cir.ptr_stride(%[[ARR_DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[ARR_DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreeArr[B][A:B]) + ; +#pragma acc parallel private(ThreeArr[A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_A5_5NoOps : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init", init] {alignment = 16 : i64} +// CHECK-NEXT: %[[BITCAST:.*]] = cir.cast bitcast %[[TL_ALLOCA]] : !cir.ptr x 5> x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<125> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BITCAST]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ONE_PAST_LAST_ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[ARR_IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC_STRIDE:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC_STRIDE]], %[[ARR_IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[ARR_IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ONE_PAST_LAST_ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp new file mode 100644 index 0000000000000..e83e548cd138b --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-array-recipes-int.cpp @@ -0,0 +1,80 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +template +void do_things(unsigned A, unsigned B) { + T OneArr[5]; +#pragma acc parallel private(OneArr[A:B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] {alignment = 4 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OneArr[B]) + ; +#pragma acc parallel private(OneArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_i : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array, !cir.ptr>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T TwoArr[5][5]; +#pragma acc parallel private(TwoArr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_i : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(TwoArr[B][A:B]) + ; +#pragma acc parallel private(TwoArr[A:B][A:B]) + ; +#pragma acc parallel private(TwoArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_i : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; + + T ThreeArr[5][5][5]; +#pragma acc parallel private(ThreeArr[B][B][B]) +// CHECK-NEXT:acc.private.recipe @privatization__Bcnt3__ZTSA5_A5_A5_i : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreeArr[B][B][A:B]) + ; +#pragma acc parallel private(ThreeArr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_A5_A5_i : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 4 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreeArr[B][A:B]) + ; +#pragma acc parallel private(ThreeArr[A:B][A:B]) + ; +#pragma acc parallel private(ThreeArr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_A5_A5_i : !cir.ptr x 5> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5> x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array x 5> x 5>, !cir.ptr x 5> x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp new file mode 100644 index 0000000000000..3149493095d9a --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-CtorDtor.cpp @@ -0,0 +1,1973 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +struct CtorDtor { + int i; + CtorDtor(); + CtorDtor(const CtorDtor&); + ~CtorDtor(); +}; + +template +void do_things(unsigned A, unsigned B) { + T *OnePtr; +#pragma acc parallel private(OnePtr[A:B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSP8CtorDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[ELT_STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OnePtr[B]) + ; +#pragma acc parallel private(OnePtr) +// CHECK: acc.private.recipe @privatization__ZTSP8CtorDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T **TwoPtr; +#pragma acc parallel private(TwoPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPP8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][A:B]) + ; +#pragma acc parallel private(TwoPtr[A:B][A:B]) + ; +#pragma acc parallel private(TwoPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPP8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T ***ThreePtr; +#pragma acc parallel private(ThreePtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// Init: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + + T *ArrayOfPtr[5]; +#pragma acc parallel private(ArrayOfPtr[B][A:B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_P8CtorDtor : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>, %[[ZERO]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[ELT_STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtr[A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtr[B][B]) + ; +#pragma acc parallel private(ArrayOfPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_P8CtorDtor : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + using TArrayTy = T[5]; + TArrayTy *PtrToArrays; +#pragma acc parallel private(PtrToArrays[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPA5_8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[ELT_STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrToArrays[B][A:B]) + ; +#pragma acc parallel private(PtrToArrays[A:B][A:B]) + ; +#pragma acc parallel private(PtrToArrays) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPA5_8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T **ArrayOfPtrPtr[5]; +#pragma acc parallel private(ArrayOfPtrPtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSA5_PP8CtorDtor : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>>, %[[ZERO]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr> x 5>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[PRIVATE]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_PP8CtorDtor : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>>, %[[ZERO]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_PP8CtorDtor : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + TArrayTy **PtrPtrToArray; +#pragma acc parallel private(PtrPtrToArray[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPA5_8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrPtrToArray[B][B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[B][A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[A:B][A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPA5_8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Initialization. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr>, %[[ITR1_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[IDX_LOAD]]) : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[TLA_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr>, %[[ITR1_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[LAST_IDX:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[LAST_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[NEG_ONE:.*]] = cir.const #cir.int<-1> : !s64i +// CHECK-NEXT: %[[NEXT_ELT:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[NEG_ONE]] : !s64i), !cir.ptr +// CHECK-NEXT: cir.store %[[NEXT_ELT]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[DECAY]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrPtrToArray[B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPA5_8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + using PtrTArrayTy = T*[5]; + PtrTArrayTy *PtrArrayPtr; + +#pragma acc parallel private(PtrArrayPtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPA5_P8CtorDtor : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr x 5>>, !cir.ptr x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>, %[[ZERO]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr x 5>>>, !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr x 5>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr x 5>>>, !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrArrayPtr[B][B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[B][A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPA5_P8CtorDtor : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr x 5>>, !cir.ptr x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrArrayPtr[B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPA5_P8CtorDtor : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp new file mode 100644 index 0000000000000..ed8c38080f4b2 --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-NoOps.cpp @@ -0,0 +1,1376 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +struct NoOps { int i = 0; }; + +template +void do_things(unsigned A, unsigned B) { + T *OnePtr; +#pragma acc parallel private(OnePtr[A:B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSP5NoOps : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[ELT_STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OnePtr[B]) + ; +#pragma acc parallel private(OnePtr) +// CHECK: acc.private.recipe @privatization__ZTSP5NoOps : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T **TwoPtr; +#pragma acc parallel private(TwoPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPP5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][A:B]) + ; +#pragma acc parallel private(TwoPtr[A:B][A:B]) + ; +#pragma acc parallel private(TwoPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPP5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T ***ThreePtr; +#pragma acc parallel private(ThreePtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + + T *ArrayOfPtr[5]; +#pragma acc parallel private(ArrayOfPtr[B][A:B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_P5NoOps : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>, %[[ZERO]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[ELT_STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtr[A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtr[B][B]) + ; +#pragma acc parallel private(ArrayOfPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_P5NoOps : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + using TArrayTy = T[5]; + TArrayTy *PtrToArrays; +#pragma acc parallel private(PtrToArrays[B][B]) + ; +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPA5_5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELT_STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[ELT_STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } +#pragma acc parallel private(PtrToArrays[B][A:B]) + ; +#pragma acc parallel private(PtrToArrays[A:B][A:B]) + ; +#pragma acc parallel private(PtrToArrays) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPA5_5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T **ArrayOfPtrPtr[5]; +#pragma acc parallel private(ArrayOfPtrPtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSA5_PP5NoOps : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>>, %[[ZERO]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_DECAY]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_PP5NoOps : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>>, %[[ZERO]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_PP5NoOps : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + TArrayTy **PtrPtrToArray; +#pragma acc parallel private(PtrPtrToArray[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPA5_5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND2_STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_DECAY]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrPtrToArray[B][B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[B][A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[A:B][A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPA5_5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr>>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR2_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr>, %[[ITR1_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<5> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[STRIDE]] : !cir.ptr> -> !cir.ptr +// CHECK-NEXT: %[[ELT:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr, %[[ARR_SIZE]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[IDX:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["__array_idx"] {alignment = 1 : i64} +// CHECK-NEXT: cir.store %[[DECAY]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.do { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[IDX_LOAD]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: %[[ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.ptr_stride(%[[IDX_LOAD]] : !cir.ptr, %[[ONE]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.store %[[INC]], %[[IDX]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } while { +// CHECK-NEXT: %[[IDX_LOAD:.*]] = cir.load %[[IDX]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(ne, %[[IDX_LOAD]], %[[ELT]]) : !cir.ptr, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrPtrToArray[B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPA5_5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + using PtrTArrayTy = T*[5]; + PtrTArrayTy *PtrArrayPtr; + +#pragma acc parallel private(PtrArrayPtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPA5_P5NoOps : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr x 5>>, !cir.ptr x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>, %[[ZERO]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Init Section: +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TL_ALLOCA]] : !cir.ptr x 5>>>, !cir.ptr x 5>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr x 5>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_DECAY:.*]] = cir.cast array_to_ptrdecay %[[BOUND3_STRIDE]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_DECAY]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrArrayPtr[B][B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[B][A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPA5_P5NoOps : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr x 5>>, !cir.ptr x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrArrayPtr[B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPA5_P5NoOps : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp new file mode 100644 index 0000000000000..aac75730c4703 --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-array-recipes-int.cpp @@ -0,0 +1,853 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +template +void do_things(unsigned A, unsigned B) { + T *OnePtr; +#pragma acc parallel private(OnePtr[A:B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSPi : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OnePtr[B]) + ; +#pragma acc parallel private(OnePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPi : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%arg0: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T **TwoPtr; +#pragma acc parallel private(TwoPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPi : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][A:B]) + ; +#pragma acc parallel private(TwoPtr[A:B][A:B]) + ; +#pragma acc parallel private(TwoPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPi : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T ***ThreePtr; +#pragma acc parallel private(ThreePtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA3:.*]] = cir.alloca !s32i, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA3]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + + T *ArrayOfPtr[5]; +#pragma acc parallel private(ArrayOfPtr[B][A:B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_Pi : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>, %[[ZERO]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtr[A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtr[B][B]) + ; +#pragma acc parallel private(ArrayOfPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_Pi : !cir.ptr x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array x 5>, !cir.ptr x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + using TArrayTy = T[5]; + TArrayTy *PtrToArrays; +#pragma acc parallel private(PtrToArrays[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPA5_i : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrToArrays[B][A:B]) + ; +#pragma acc parallel private(PtrToArrays[A:B][A:B]) + ; +#pragma acc parallel private(PtrToArrays) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPA5_i : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + T **ArrayOfPtrPtr[5]; +#pragma acc parallel private(ArrayOfPtrPtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSA5_PPi : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>>, %[[ZERO]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSA5_PPi : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[TL_ALLOCA]] : !cir.ptr> x 5>> -> !cir.ptr>> +// CHECK-NEXT: %[[TL_DEREF:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>>, %[[ZERO]] : !u64i), !cir.ptr>> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_DEREF]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ArrayOfPtrPtr[B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr[A:B][A:B]) + ; +#pragma acc parallel private(ArrayOfPtrPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSA5_PPi : !cir.ptr> x 5>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> x 5>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.array> x 5>, !cir.ptr> x 5>>, ["openacc.private.init"] {alignment = 16 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + TArrayTy **PtrPtrToArray; +#pragma acc parallel private(PtrPtrToArray[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPA5_i : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB3_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(PtrPtrToArray[B][B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[B][A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[A:B][A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPA5_i : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[UB2_CAST]]) : !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<20> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !cir.array, !cir.ptr>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrPtrToArray[B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray[A:B][A:B]) + ; +#pragma acc parallel private(PtrPtrToArray) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPA5_i : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; + + using PtrTArrayTy = T*[5]; + PtrTArrayTy *PtrArrayPtr; + +#pragma acc parallel private(PtrArrayPtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPA5_Pi : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr x 5>>, !cir.ptr x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[UB3_CAST]]) : !u64i +// +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: %[[DECAY:.*]] = cir.cast array_to_ptrdecay %[[ARR_ALLOCA]] : !cir.ptr x 5>> -> !cir.ptr> +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[DECAY]] : !cir.ptr>, %[[ZERO]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS2:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[ELT_SIZE:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS2]], %[[ELT_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA2:.*]] = cir.alloca !s32i, !cir.ptr, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB1_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA2]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[STRIDE]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrArrayPtr[B][B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[B][A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[A:B][A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[B][B]) +// #pragma acc parallel private(PtrArrayPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPA5_Pi : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TL_ALLOCA:.*]] = cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ARR_SIZE:.*]] = cir.const #cir.int<40> : !u64i +// CHECK-NEXT: %[[ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ARR_SIZE]]) : !u64i +// CHECK-NEXT: %[[ARR_ALLOCA:.*]] = cir.alloca !cir.array x 5>, !cir.ptr x 5>>, %[[ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPP_BOUND:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPP_BOUND]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UB2_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC:.*]] = cir.ptr_stride(%[[ARR_ALLOCA]] : !cir.ptr x 5>>, %[[SRC_IDX]] : !u64i), !cir.ptr x 5>> +// CHECK-NEXT: %[[DEST:.*]] = cir.ptr_stride(%[[TL_ALLOCA]] : !cir.ptr x 5>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr x 5>>> +// CHECK-NEXT: cir.store %[[SRC]], %[[DEST]] : !cir.ptr x 5>>, !cir.ptr x 5>>> +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(PtrArrayPtr[B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr[A:B][A:B]) + ; +#pragma acc parallel private(PtrArrayPtr) +// CHECK: acc.private.recipe @privatization__ZTSPA5_Pi : !cir.ptr x 5>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr x 5>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr x 5>>, !cir.ptr x 5>>>, ["openacc.private.init"] +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp new file mode 100644 index 0000000000000..77b7143e41aec --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-CtorDtor.cpp @@ -0,0 +1,753 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +struct CtorDtor { + int i; + CtorDtor(); + CtorDtor(const CtorDtor&); + ~CtorDtor(); +}; + +template +void do_things(unsigned A, unsigned B) { + + T ***ThreePtr; +#pragma acc parallel private(ThreePtr) +// CHECK: acc.private.recipe @privatization__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreePtr[A]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][B][B]) + ; +#pragma acc parallel private(ThreePtr[B][B][A:B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPP8CtorDtor : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// Initialization Section +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr + +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUNDS3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB3:.*]] = cir.binop(sub, %[[UB3_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB3]], %[[ITR3]] : !u64i, !cir.ptr + +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR3_LOAD]], %[[LB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B][A:B]) + ; + + T **TwoPtr; +#pragma acc parallel private(TwoPtr) +// CHECK: acc.private.recipe @privatization__ZTSPP8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(TwoPtr[A]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSPP8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPP8CtorDtor : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Initialization Section +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[TLA_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr>> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr>> {{.*}}, %[[BOUNDS1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUNDS2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUNDS2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB2:.*]] = cir.binop(sub, %[[UB2_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB2]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR2_LOAD]], %[[LB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[TLA_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUNDS1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB1:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB1]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR1_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][A:B]) + ; +#pragma acc parallel private(TwoPtr[A:B][A:B]) + ; + + T *OnePtr; +#pragma acc parallel private(OnePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSP8CtorDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(OnePtr[B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSP8CtorDtor : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_CTORDTOR:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_CTORDTOR]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_CtorDtor, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Initialization Section +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorC1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } destroy { +// CHECK-NEXT: ^bb0(%[[REF:.*]]: !cir.ptr> {{.*}}, %[[PRIVATE:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: %[[CONST_ONE:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ONE_BELOW_UB:.*]] = cir.binop(sub, %[[UB1_CAST]], %[[CONST_ONE]]) : !u64i +// CHECK-NEXT: cir.store %[[ONE_BELOW_UB]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(ge, %[[ITR_LOAD]], %[[LB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[PRIVATE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN8CtorDtorD1Ev(%[[STRIDE]]) : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[DEC:.*]] = cir.unary(dec, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[DEC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OnePtr[A:B]) + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp new file mode 100644 index 0000000000000..b988fc4f282d0 --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-NoOps.cpp @@ -0,0 +1,582 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +struct NoOps { int i = 0; }; + +template +void do_things(unsigned A, unsigned B) { + + T ***ThreePtr; +#pragma acc parallel private(ThreePtr) +// CHECK: acc.private.recipe @privatization__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreePtr[A]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPP5NoOps : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB3:.*]] = acc.get_lowerbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB3]] : index to !u64i +// CHECK-NEXT: %[[UB3:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB3_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB3]] : index to !u64i +// CHECK-NEXT: %[[ITR3:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB3_CAST]], %[[ITR3]] : !u64i, !cir.ptr + +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR3_LOAD]], %[[UB3_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR3_LOAD:.*]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, !cir.ptr>> +// CHECK-NEXT: %[[BOUND3_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>>, %[[ITR3_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND3_STRIDE_LOAD:.*]] = cir.load %[[BOUND3_STRIDE]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[BOUND2_STRIDE:.*]] = cir.ptr_stride(%[[BOUND3_STRIDE_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[BOUND2_STRIDE_LOAD:.*]] = cir.load %[[BOUND2_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[BOUND2_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR3_LOAD]] = cir.load %[[ITR3]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR3_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR3]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B][A:B]) + ; + + T **TwoPtr; +#pragma acc parallel private(TwoPtr) +// CHECK: acc.private.recipe @privatization__ZTSPP5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(TwoPtr[A]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSPP5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPP5NoOps : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Initialization Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB2:.*]] = acc.get_lowerbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB2]] : index to !u64i +// CHECK-NEXT: %[[UB2:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB2_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB2]] : index to !u64i +// CHECK-NEXT: %[[ITR2:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB2_CAST]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR2_LOAD]], %[[UB2_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// +// CHECK-NEXT: %[[ITR2_LOAD:.*]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, !cir.ptr> +// CHECK-NEXT: %[[TLA_STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr>, %[[ITR2_LOAD]] : !u64i), !cir.ptr> +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR1:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR1]] : !u64i, !cir.ptr +// +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR1_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR1_LOAD:.*]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_STRIDE_LOAD:.*]] = cir.load %[[TLA_STRIDE]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_STRIDE_LOAD]] : !cir.ptr, %[[ITR1_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR1_LOAD]] = cir.load %[[ITR1]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR1_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR1]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR2_LOAD]] = cir.load %[[ITR2]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR2_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR2]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][A:B]) + ; +#pragma acc parallel private(TwoPtr[A:B][A:B]) + ; + + T *OnePtr; +#pragma acc parallel private(OnePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSP5NoOps : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(OnePtr[B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSP5NoOps : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_NOOPS:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_NOOPS]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !rec_NoOps, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// Init Section. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[LB1:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[LB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB1]] : index to !u64i +// CHECK-NEXT: %[[UB1:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UB1_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB1]] : index to !u64i +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["iter"] {alignment = 8 : i64} +// CHECK-NEXT: cir.store %[[LB1_CAST]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB1_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[COND]]) +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[TLA_LOAD:.*]] = cir.load %[[TOP_LEVEL_ALLOCA]] : !cir.ptr>, !cir.ptr +// CHECK-NEXT: %[[STRIDE:.*]] = cir.ptr_stride(%[[TLA_LOAD]] : !cir.ptr, %[[ITR_LOAD]] : !u64i), !cir.ptr +// CHECK-NEXT: cir.call @_ZN5NoOpsC1Ev(%[[STRIDE]]) nothrow : (!cir.ptr) -> () +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OnePtr[A:B]) + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp new file mode 100644 index 0000000000000..c87e1a6e8a89b --- /dev/null +++ b/clang/test/CIR/CodeGenOpenACC/private-clause-pointer-recipes-int.cpp @@ -0,0 +1,428 @@ +// RUN: %clang_cc1 -fopenacc -triple x86_64-linux-gnu -Wno-openacc-self-if-potential-conflict -emit-cir -fclangir -triple x86_64-linux-pc %s -o - | FileCheck %s + +template +void do_things(unsigned A, unsigned B) { + + T ***ThreePtr; +#pragma acc parallel private(ThreePtr) +// CHECK: acc.private.recipe @privatization__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(ThreePtr[A]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} + +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } + +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt3__ZTSPPPi : !cir.ptr>>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND3:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>>, !cir.ptr>>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND3]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[SRC_IDX]] : !u64i), !cir.ptr>> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>>, !cir.ptr>>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_PTR_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_PTR_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_PTR_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_VLA_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_3:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS_2:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[NUM_ELTS]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS_2]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[NUM_ELTS]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_3]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_PTR_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(ThreePtr[B][B][A:B]) + ; +#pragma acc parallel private(ThreePtr[B][A:B][A:B]) + ; +#pragma acc parallel private(ThreePtr[A:B][A:B][A:B]) + ; + + T **TwoPtr; +#pragma acc parallel private(TwoPtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPPi : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(TwoPtr[A]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt1__ZTSPPi : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][B]) +// CHECK-NEXT: acc.private.recipe @privatization__Bcnt2__ZTSPPi : !cir.ptr>> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}, %[[BOUND2:.*]]: !acc.data_bounds_ty {{.*}}): +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr>, !cir.ptr>>, ["openacc.private.init"] {alignment = 8 : i64} +// +// CHECK-NEXT: %[[INT_PTR_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND2]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_PTR:.*]] = cir.const #cir.int<8> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_PTR]]) : !u64i +// CHECK-NEXT: %[[INT_PTR_VLA_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 8 : i64} +// +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[SRC_IDX]] : !u64i), !cir.ptr> +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>>, %[[ITR_LOAD]] : !u64i), !cir.ptr>> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr>, !cir.ptr>> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST_2:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[NUM_ELTS:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[UPPER_BOUND_CAST]]) : !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[NUM_ELTS]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_BOUND_CAST]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST_2]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[INT_PTR_VLA_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(TwoPtr[B][A:B]) + ; +#pragma acc parallel private(TwoPtr[A:B][A:B]) + ; + + T *OnePtr; +#pragma acc parallel private(OnePtr) +// CHECK-NEXT: acc.private.recipe @privatization__ZTSPi : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}): +// CHECK-NEXT: cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] {alignment = 8 : i64} +// CHECK-NEXT: acc.yield +// CHECK-NEXT:} + ; +#pragma acc parallel private(OnePtr[B]) +// CHECK: acc.private.recipe @privatization__Bcnt1__ZTSPi : !cir.ptr> init { +// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty {{.*}}): +// 'init' section: +// CHECK-NEXT: %[[TOP_LEVEL_ALLOCA:.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["openacc.private.init"] +// +// CHECK-NEXT: %[[INT_PTR_UPPER_BOUND:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index +// CHECK-NEXT: %[[UPPER_BOUND_CAST:.*]] = builtin.unrealized_conversion_cast %[[INT_PTR_UPPER_BOUND]] : index to !u64i +// CHECK-NEXT: %[[SIZEOF_INT:.*]] = cir.const #cir.int<4> : !u64i +// CHECK-NEXT: %[[CALC_ALLOCA_SIZE:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[SIZEOF_INT]]) : !u64i +// CHECK-NEXT: %[[INT_VLA_ALLOCA:.*]] = cir.alloca !s32i, !cir.ptr, %[[CALC_ALLOCA_SIZE]] : !u64i, ["openacc.init.bounds"] {alignment = 4 : i64} +// +// Copy array pointer to the original alloca. +// CHECK-NEXT: cir.scope { +// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr, ["itr"] {alignment = 8 : i64} +// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !u64i +// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.for : cond { +// CHECK-NEXT: %[[UPPER_LIMIT:.*]] = cir.const #cir.int<1> : !u64i +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UPPER_LIMIT]]) : !u64i, !cir.bool +// CHECK-NEXT: cir.condition(%[[CMP]]) +// +// CHECK-NEXT: } body { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[SRC_IDX:.*]] = cir.binop(mul, %[[UPPER_BOUND_CAST]], %[[ITR_LOAD]]) : !u64i +// CHECK-NEXT: %[[SRC_STRIDE:.*]] = cir.ptr_stride(%[[INT_VLA_ALLOCA]] : !cir.ptr, %[[SRC_IDX]] : !u64i), !cir.ptr +// CHECK-NEXT: %[[DEST_STRIDE:.*]] = cir.ptr_stride(%[[TOP_LEVEL_ALLOCA]] : !cir.ptr>, %[[ITR_LOAD]] : !u64i), !cir.ptr> +// CHECK-NEXT: cir.store %[[SRC_STRIDE]], %[[DEST_STRIDE]] : !cir.ptr, !cir.ptr> +// CHECK-NEXT: cir.yield +// +// CHECK-NEXT: } step { +// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr, !u64i +// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i +// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr +// CHECK-NEXT: cir.yield +// CHECK-NEXT: } +// CHECK-NEXT: } +// CHECK-NEXT: acc.yield +// CHECK-NEXT: } + ; +#pragma acc parallel private(OnePtr[A:B]) + ; +} + +void use(unsigned A, unsigned B) { + do_things(A, B); +} + diff --git a/clang/test/CIR/CodeGenOpenACC/serial.c b/clang/test/CIR/CodeGenOpenACC/serial.c index 9e3359141838f..aae4a92b13b0e 100644 --- a/clang/test/CIR/CodeGenOpenACC/serial.c +++ b/clang/test/CIR/CodeGenOpenACC/serial.c @@ -29,7 +29,7 @@ void acc_serial(int cond) { // CHECK-NEXT: cir.scope { // CHECK-NEXT: cir.while { // CHECK-NEXT: %[[INT:.*]] = cir.const #cir.int<1> - // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[INT]] : + // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[INT]] // CHECK-NEXT: cir.condition(%[[CAST]]) // CHECK-NEXT: } do { // CHECK-NEXT: cir.yield @@ -49,7 +49,7 @@ void acc_serial(int cond) { #pragma acc serial self(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.serial self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield @@ -58,7 +58,7 @@ void acc_serial(int cond) { #pragma acc serial self(0) {} // CHECK-NEXT: %[[ZERO_LITERAL:.*]] = cir.const #cir.int<0> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ZERO_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ZERO_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.serial self(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield @@ -67,7 +67,7 @@ void acc_serial(int cond) { #pragma acc serial if(cond) {} // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.serial if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield @@ -76,7 +76,7 @@ void acc_serial(int cond) { #pragma acc serial if(1) {} // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.serial if(%[[CONV_CAST]]) { // CHECK-NEXT: acc.yield diff --git a/clang/test/CIR/CodeGenOpenACC/set.c b/clang/test/CIR/CodeGenOpenACC/set.c index 0b87f42603776..b8030dfd9d883 100644 --- a/clang/test/CIR/CodeGenOpenACC/set.c +++ b/clang/test/CIR/CodeGenOpenACC/set.c @@ -26,7 +26,7 @@ void acc_set(int cond) { // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i // CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32 // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.set device_num(%[[COND_CONV]] : si32) if(%[[BOOL_CONV]]) @@ -36,7 +36,7 @@ void acc_set(int cond) { // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i // CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32 // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.set default_async(%[[ONE_CONV]] : si32) device_num(%[[COND_CONV]] : si32) if(%[[BOOL_CONV]]) attributes {device_type = #acc.device_type} diff --git a/clang/test/CIR/CodeGenOpenACC/shutdown.c b/clang/test/CIR/CodeGenOpenACC/shutdown.c index b68ef90e07252..8c27fa6c2d544 100644 --- a/clang/test/CIR/CodeGenOpenACC/shutdown.c +++ b/clang/test/CIR/CodeGenOpenACC/shutdown.c @@ -18,13 +18,13 @@ void acc_shutdown(int cond) { #pragma acc shutdown if(cond) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.shutdown if(%[[BOOL_CONV]]) #pragma acc shutdown if(1) // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i - // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[ONE_LITERAL]] : !s32i), !cir.bool + // CHECK-NEXT: %[[ONE_TO_BOOL_CAST:.*]] = cir.cast int_to_bool %[[ONE_LITERAL]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[ONE_TO_BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.shutdown if(%[[BOOL_CONV]]) @@ -40,7 +40,7 @@ void acc_shutdown(int cond) { #pragma acc shutdown if(cond) device_num(cond) device_type(*) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[COND_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[BOOL_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_CAST]] : !cir.bool to i1 // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i // CHECK-NEXT: %[[COND_CONV:.*]] = builtin.unrealized_conversion_cast %[[COND_LOAD]] : !s32i to si32 diff --git a/clang/test/CIR/CodeGenOpenACC/wait.c b/clang/test/CIR/CodeGenOpenACC/wait.c index aeda8b955a6d0..8be8665923c59 100644 --- a/clang/test/CIR/CodeGenOpenACC/wait.c +++ b/clang/test/CIR/CodeGenOpenACC/wait.c @@ -10,7 +10,7 @@ void acc_wait(int cond) { #pragma acc wait if (cond) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: acc.wait if(%[[CONV_CAST]]) @@ -37,7 +37,7 @@ void acc_wait(int cond) { #pragma acc wait(queues:1) if (cond) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE_LITERAL]] : !s32i to si32 @@ -54,7 +54,7 @@ void acc_wait(int cond) { #pragma acc wait(devnum:1: 2, 3) if (cond) // CHECK-NEXT: %[[COND_LOAD:.*]] = cir.load{{.*}} %[[COND]] : !cir.ptr, !s32i - // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast(int_to_bool, %[[COND_LOAD]] : !s32i), !cir.bool + // CHECK-NEXT: %[[BOOL_CAST:.*]] = cir.cast int_to_bool %[[COND_LOAD]] : !s32i -> !cir.bool // CHECK-NEXT: %[[CONV_CAST:.*]] = builtin.unrealized_conversion_cast %[[BOOL_CAST]] : !cir.bool to i1 // CHECK-NEXT: %[[ONE_LITERAL:.*]] = cir.const #cir.int<1> : !s32i // CHECK-NEXT: %[[ONE_CAST:.*]] = builtin.unrealized_conversion_cast %[[ONE_LITERAL]] : !s32i to si32 diff --git a/clang/test/CIR/IR/alloca.cir b/clang/test/CIR/IR/alloca.cir index 12f7e6ac6a914..d94da815f37a7 100644 --- a/clang/test/CIR/IR/alloca.cir +++ b/clang/test/CIR/IR/alloca.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !u64i = !cir.int !u8i = !cir.int @@ -12,7 +12,7 @@ module { %2 = cir.load align(8) %0 : !cir.ptr, !u64i // Dynamically sized alloca %3 = cir.alloca !u8i, !cir.ptr, %2 : !u64i, ["bi_alloca"] {alignment = 16 : i64} - %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr + %4 = cir.cast bitcast %3 : !cir.ptr -> !cir.ptr cir.store %4, %1 : !cir.ptr, !cir.ptr> %5 = cir.load %1 : !cir.ptr>, !cir.ptr cir.return %5 : !cir.ptr @@ -24,7 +24,7 @@ module { // CHECK: cir.store %arg0, %0 : !u64i, !cir.ptr // CHECK: %2 = cir.load align(8) %0 : !cir.ptr, !u64i // CHECK: %3 = cir.alloca !u8i, !cir.ptr, %2 : !u64i, ["bi_alloca"] {alignment = 16 : i64} - // CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr + // CHECK: %4 = cir.cast bitcast %3 : !cir.ptr -> !cir.ptr // CHECK: cir.store %4, %1 : !cir.ptr, !cir.ptr> // CHECK: %5 = cir.load %1 : !cir.ptr>, !cir.ptr // CHECK: cir.return %5 : !cir.ptr diff --git a/clang/test/CIR/IR/array-ctor.cir b/clang/test/CIR/IR/array-ctor.cir index 2378992bbd9fc..fd2ec7eb93c23 100644 --- a/clang/test/CIR/IR/array-ctor.cir +++ b/clang/test/CIR/IR/array-ctor.cir @@ -1,5 +1,5 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !u8i = !cir.int !rec_S = !cir.record diff --git a/clang/test/CIR/IR/array-dtor.cir b/clang/test/CIR/IR/array-dtor.cir index 6d08d1639f0db..1bb9ff9169a9d 100644 --- a/clang/test/CIR/IR/array-dtor.cir +++ b/clang/test/CIR/IR/array-dtor.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !u8i = !cir.int !rec_S = !cir.record diff --git a/clang/test/CIR/IR/array.cir b/clang/test/CIR/IR/array.cir index bba536062d740..ddc6b92b11ee9 100644 --- a/clang/test/CIR/IR/array.cir +++ b/clang/test/CIR/IR/array.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/atomic.cir b/clang/test/CIR/IR/atomic.cir index 6ca5af2aac175..85207633a5294 100644 --- a/clang/test/CIR/IR/atomic.cir +++ b/clang/test/CIR/IR/atomic.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int !u32i = !cir.int diff --git a/clang/test/CIR/IR/binassign.cir b/clang/test/CIR/IR/binassign.cir index a25729635094e..02471264d779e 100644 --- a/clang/test/CIR/IR/binassign.cir +++ b/clang/test/CIR/IR/binassign.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | cir-opt | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int !s8i = !cir.int @@ -12,7 +12,7 @@ module { %4 = cir.const #true cir.store %4, %0 : !cir.bool, !cir.ptr %5 = cir.const #cir.int<65> : !s32i - %6 = cir.cast(integral, %5 : !s32i), !s8i + %6 = cir.cast integral %5 : !s32i -> !s8i cir.store %6, %1 : !s8i, !cir.ptr %7 = cir.const #cir.fp<3.140000e+00> : !cir.float cir.store %7, %2 : !cir.float, !cir.ptr @@ -34,7 +34,7 @@ module { // CHECK: %4 = cir.const #true // CHECK: cir.store %4, %0 : !cir.bool, !cir.ptr // CHECK: %5 = cir.const #cir.int<65> : !s32i -// CHECK: %6 = cir.cast(integral, %5 : !s32i), !s8i +// CHECK: %6 = cir.cast integral %5 : !s32i -> !s8i // CHECK: cir.store %6, %1 : !s8i, !cir.ptr // CHECK: %7 = cir.const #cir.fp<3.140000e+00> : !cir.float // CHECK: cir.store %7, %2 : !cir.float, !cir.ptr diff --git a/clang/test/CIR/IR/bitfield_info.cir b/clang/test/CIR/IR/bitfield_info.cir index 682e0903fd552..2d743fbfbf595 100644 --- a/clang/test/CIR/IR/bitfield_info.cir +++ b/clang/test/CIR/IR/bitfield_info.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int !u32i = !cir.int diff --git a/clang/test/CIR/IR/call.cir b/clang/test/CIR/IR/call.cir index 9607df7202e0f..59f28be36846f 100644 --- a/clang/test/CIR/IR/call.cir +++ b/clang/test/CIR/IR/call.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/cast.cir b/clang/test/CIR/IR/cast.cir index a335887de7ec7..3f2fca9fc307b 100644 --- a/clang/test/CIR/IR/cast.cir +++ b/clang/test/CIR/IR/cast.cir @@ -1,23 +1,23 @@ -// RUN: cir-opt %s | cir-opt | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int module { cir.func @yolo(%arg0 : !s32i) { - %a = cir.cast (int_to_bool, %arg0 : !s32i), !cir.bool + %a = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool %0 = cir.const #cir.int<0> : !s32i cir.return } cir.func @bitcast(%p: !cir.ptr) { - %0 = cir.cast(bitcast, %p : !cir.ptr), !cir.ptr + %0 = cir.cast bitcast %p : !cir.ptr -> !cir.ptr cir.return } } // CHECK: cir.func{{.*}} @yolo(%arg0: !s32i) -// CHECK: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK: %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool // CHECK: %1 = cir.const #cir.int<0> : !s32i // CHECK: cir.func{{.*}} @bitcast -// CHECK: %0 = cir.cast(bitcast, %arg0 : !cir.ptr), !cir.ptr +// CHECK: %0 = cir.cast bitcast %arg0 : !cir.ptr -> !cir.ptr diff --git a/clang/test/CIR/IR/cmp.cir b/clang/test/CIR/IR/cmp.cir index 818527189af01..0d473986df1c2 100644 --- a/clang/test/CIR/IR/cmp.cir +++ b/clang/test/CIR/IR/cmp.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | cir-opt | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int !u32i = !cir.int @@ -274,39 +274,39 @@ module { cir.store %arg0, %0 : !cir.bool, !cir.ptr cir.store %arg1, %1 : !cir.bool, !cir.ptr %3 = cir.load %0 : !cir.ptr, !cir.bool - %4 = cir.cast(bool_to_int, %3 : !cir.bool), !s32i + %4 = cir.cast bool_to_int %3 : !cir.bool -> !s32i %5 = cir.load %1 : !cir.ptr, !cir.bool - %6 = cir.cast(bool_to_int, %5 : !cir.bool), !s32i + %6 = cir.cast bool_to_int %5 : !cir.bool -> !s32i %7 = cir.cmp(gt, %4, %6) : !s32i, !cir.bool cir.store %7, %2 : !cir.bool, !cir.ptr %8 = cir.load %0 : !cir.ptr, !cir.bool - %9 = cir.cast(bool_to_int, %8 : !cir.bool), !s32i + %9 = cir.cast bool_to_int %8 : !cir.bool -> !s32i %10 = cir.load %1 : !cir.ptr, !cir.bool - %11 = cir.cast(bool_to_int, %10 : !cir.bool), !s32i + %11 = cir.cast bool_to_int %10 : !cir.bool -> !s32i %12 = cir.cmp(lt, %9, %11) : !s32i, !cir.bool cir.store %12, %2 : !cir.bool, !cir.ptr %13 = cir.load %0 : !cir.ptr, !cir.bool - %14 = cir.cast(bool_to_int, %13 : !cir.bool), !s32i + %14 = cir.cast bool_to_int %13 : !cir.bool -> !s32i %15 = cir.load %1 : !cir.ptr, !cir.bool - %16 = cir.cast(bool_to_int, %15 : !cir.bool), !s32i + %16 = cir.cast bool_to_int %15 : !cir.bool -> !s32i %17 = cir.cmp(ge, %14, %16) : !s32i, !cir.bool cir.store %17, %2 : !cir.bool, !cir.ptr %18 = cir.load %0 : !cir.ptr, !cir.bool - %19 = cir.cast(bool_to_int, %18 : !cir.bool), !s32i + %19 = cir.cast bool_to_int %18 : !cir.bool -> !s32i %20 = cir.load %1 : !cir.ptr, !cir.bool - %21 = cir.cast(bool_to_int, %20 : !cir.bool), !s32i + %21 = cir.cast bool_to_int %20 : !cir.bool -> !s32i %22 = cir.cmp(le, %19, %21) : !s32i, !cir.bool cir.store %22, %2 : !cir.bool, !cir.ptr %23 = cir.load %0 : !cir.ptr, !cir.bool - %24 = cir.cast(bool_to_int, %23 : !cir.bool), !s32i + %24 = cir.cast bool_to_int %23 : !cir.bool -> !s32i %25 = cir.load %1 : !cir.ptr, !cir.bool - %26 = cir.cast(bool_to_int, %25 : !cir.bool), !s32i + %26 = cir.cast bool_to_int %25 : !cir.bool -> !s32i %27 = cir.cmp(eq, %24, %26) : !s32i, !cir.bool cir.store %27, %2 : !cir.bool, !cir.ptr %28 = cir.load %0 : !cir.ptr, !cir.bool - %29 = cir.cast(bool_to_int, %28 : !cir.bool), !s32i + %29 = cir.cast bool_to_int %28 : !cir.bool -> !s32i %30 = cir.load %1 : !cir.ptr, !cir.bool - %31 = cir.cast(bool_to_int, %30 : !cir.bool), !s32i + %31 = cir.cast bool_to_int %30 : !cir.bool -> !s32i %32 = cir.cmp(ne, %29, %31) : !s32i, !cir.bool cir.store %32, %2 : !cir.bool, !cir.ptr cir.return @@ -319,39 +319,39 @@ module { // CHECK-NEXT: cir.store %arg0, %0 : !cir.bool, !cir.ptr // CHECK-NEXT: cir.store %arg1, %1 : !cir.bool, !cir.ptr // CHECK-NEXT: %3 = cir.load %0 : !cir.ptr, !cir.bool - // CHECK-NEXT: %4 = cir.cast(bool_to_int, %3 : !cir.bool), !s32i + // CHECK-NEXT: %4 = cir.cast bool_to_int %3 : !cir.bool -> !s32i // CHECK-NEXT: %5 = cir.load %1 : !cir.ptr, !cir.bool - // CHECK-NEXT: %6 = cir.cast(bool_to_int, %5 : !cir.bool), !s32i + // CHECK-NEXT: %6 = cir.cast bool_to_int %5 : !cir.bool -> !s32i // CHECK-NEXT: %7 = cir.cmp(gt, %4, %6) : !s32i, !cir.bool // CHECK-NEXT: cir.store %7, %2 : !cir.bool, !cir.ptr // CHECK-NEXT: %8 = cir.load %0 : !cir.ptr, !cir.bool - // CHECK-NEXT: %9 = cir.cast(bool_to_int, %8 : !cir.bool), !s32i + // CHECK-NEXT: %9 = cir.cast bool_to_int %8 : !cir.bool -> !s32i // CHECK-NEXT: %10 = cir.load %1 : !cir.ptr, !cir.bool - // CHECK-NEXT: %11 = cir.cast(bool_to_int, %10 : !cir.bool), !s32i + // CHECK-NEXT: %11 = cir.cast bool_to_int %10 : !cir.bool -> !s32i // CHECK-NEXT: %12 = cir.cmp(lt, %9, %11) : !s32i, !cir.bool // CHECK-NEXT: cir.store %12, %2 : !cir.bool, !cir.ptr // CHECK-NEXT: %13 = cir.load %0 : !cir.ptr, !cir.bool - // CHECK-NEXT: %14 = cir.cast(bool_to_int, %13 : !cir.bool), !s32i + // CHECK-NEXT: %14 = cir.cast bool_to_int %13 : !cir.bool -> !s32i // CHECK-NEXT: %15 = cir.load %1 : !cir.ptr, !cir.bool - // CHECK-NEXT: %16 = cir.cast(bool_to_int, %15 : !cir.bool), !s32i + // CHECK-NEXT: %16 = cir.cast bool_to_int %15 : !cir.bool -> !s32i // CHECK-NEXT: %17 = cir.cmp(ge, %14, %16) : !s32i, !cir.bool // CHECK-NEXT: cir.store %17, %2 : !cir.bool, !cir.ptr // CHECK-NEXT: %18 = cir.load %0 : !cir.ptr, !cir.bool - // CHECK-NEXT: %19 = cir.cast(bool_to_int, %18 : !cir.bool), !s32i + // CHECK-NEXT: %19 = cir.cast bool_to_int %18 : !cir.bool -> !s32i // CHECK-NEXT: %20 = cir.load %1 : !cir.ptr, !cir.bool - // CHECK-NEXT: %21 = cir.cast(bool_to_int, %20 : !cir.bool), !s32i + // CHECK-NEXT: %21 = cir.cast bool_to_int %20 : !cir.bool -> !s32i // CHECK-NEXT: %22 = cir.cmp(le, %19, %21) : !s32i, !cir.bool // CHECK-NEXT: cir.store %22, %2 : !cir.bool, !cir.ptr // CHECK-NEXT: %23 = cir.load %0 : !cir.ptr, !cir.bool - // CHECK-NEXT: %24 = cir.cast(bool_to_int, %23 : !cir.bool), !s32i + // CHECK-NEXT: %24 = cir.cast bool_to_int %23 : !cir.bool -> !s32i // CHECK-NEXT: %25 = cir.load %1 : !cir.ptr, !cir.bool - // CHECK-NEXT: %26 = cir.cast(bool_to_int, %25 : !cir.bool), !s32i + // CHECK-NEXT: %26 = cir.cast bool_to_int %25 : !cir.bool -> !s32i // CHECK-NEXT: %27 = cir.cmp(eq, %24, %26) : !s32i, !cir.bool // CHECK-NEXT: cir.store %27, %2 : !cir.bool, !cir.ptr // CHECK-NEXT: %28 = cir.load %0 : !cir.ptr, !cir.bool - // CHECK-NEXT: %29 = cir.cast(bool_to_int, %28 : !cir.bool), !s32i + // CHECK-NEXT: %29 = cir.cast bool_to_int %28 : !cir.bool -> !s32i // CHECK-NEXT: %30 = cir.load %1 : !cir.ptr, !cir.bool - // CHECK-NEXT: %31 = cir.cast(bool_to_int, %30 : !cir.bool), !s32i + // CHECK-NEXT: %31 = cir.cast bool_to_int %30 : !cir.bool -> !s32i // CHECK-NEXT: %32 = cir.cmp(ne, %29, %31) : !s32i, !cir.bool // CHECK-NEXT: cir.store %32, %2 : !cir.bool, !cir.ptr // CHECK-NEXT: cir.return diff --git a/clang/test/CIR/IR/complex.cir b/clang/test/CIR/IR/complex.cir index a73a8654ca274..a7e0c77696d66 100644 --- a/clang/test/CIR/IR/complex.cir +++ b/clang/test/CIR/IR/complex.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/copy.cir b/clang/test/CIR/IR/copy.cir index 2cfb25d82b278..f9db29aa0e01f 100644 --- a/clang/test/CIR/IR/copy.cir +++ b/clang/test/CIR/IR/copy.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int module { diff --git a/clang/test/CIR/IR/func.cir b/clang/test/CIR/IR/func.cir index 0e9a92fcf8201..9532859587629 100644 --- a/clang/test/CIR/IR/func.cir +++ b/clang/test/CIR/IR/func.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int !s64i = !cir.int diff --git a/clang/test/CIR/IR/global-init.cir b/clang/test/CIR/IR/global-init.cir new file mode 100644 index 0000000000000..2fd25df4e050b --- /dev/null +++ b/clang/test/CIR/IR/global-init.cir @@ -0,0 +1,48 @@ +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s + +!u8i = !cir.int + +!rec_NeedsCtor = !cir.record +!rec_NeedsDtor = !cir.record +!rec_NeedsCtorDtor = !cir.record + +module attributes {cir.triple = "x86_64-unknown-linux-gnu"} { + cir.func private @_ZN9NeedsCtorC1Ev(!cir.ptr) + cir.global external @needsCtor = ctor : !rec_NeedsCtor { + %0 = cir.get_global @needsCtor : !cir.ptr + cir.call @_ZN9NeedsCtorC1Ev(%0) : (!cir.ptr) -> () + } + // CHECK: cir.global external @needsCtor = ctor : !rec_NeedsCtor { + // CHECK: %0 = cir.get_global @needsCtor : !cir.ptr + // CHECK: cir.call @_ZN9NeedsCtorC1Ev(%0) : (!cir.ptr) -> () + // CHECK: } + + cir.func private @_ZN9NeedsDtorD1Ev(!cir.ptr) + cir.global external dso_local @needsDtor = #cir.zero : !rec_NeedsDtor dtor { + %0 = cir.get_global @needsDtor : !cir.ptr + cir.call @_ZN9NeedsDtorD1Ev(%0) : (!cir.ptr) -> () + } + // CHECK: cir.global external dso_local @needsDtor = #cir.zero : !rec_NeedsDtor dtor { + // CHECK: %0 = cir.get_global @needsDtor : !cir.ptr + // CHECK: cir.call @_ZN9NeedsDtorD1Ev(%0) : (!cir.ptr) -> () + // CHECK: } + + cir.func private @_ZN13NeedsCtorDtorC1Ev(!cir.ptr) + cir.func private @_ZN13NeedsCtorDtorD1Ev(!cir.ptr) + cir.global external dso_local @needsCtorDtor = ctor : !rec_NeedsCtorDtor { + %0 = cir.get_global @needsCtorDtor : !cir.ptr + cir.call @_ZN13NeedsCtorDtorC1Ev(%0) : (!cir.ptr) -> () + } dtor { + %0 = cir.get_global @needsCtorDtor : !cir.ptr + cir.call @_ZN13NeedsCtorDtorD1Ev(%0) : (!cir.ptr) -> () + } + // CHECK: cir.func private @_ZN13NeedsCtorDtorC1Ev(!cir.ptr) + // CHECK: cir.func private @_ZN13NeedsCtorDtorD1Ev(!cir.ptr) + // CHECK: cir.global external dso_local @needsCtorDtor = ctor : !rec_NeedsCtorDtor { + // CHECK: %0 = cir.get_global @needsCtorDtor : !cir.ptr + // CHECK: cir.call @_ZN13NeedsCtorDtorC1Ev(%0) : (!cir.ptr) -> () + // CHECK: } dtor { + // CHECK: %0 = cir.get_global @needsCtorDtor : !cir.ptr + // CHECK: cir.call @_ZN13NeedsCtorDtorD1Ev(%0) : (!cir.ptr) -> () + // CHECK: } +} diff --git a/clang/test/CIR/IR/global-var-linkage.cir b/clang/test/CIR/IR/global-var-linkage.cir index e1b7de4bb2156..df74e3825e967 100644 --- a/clang/test/CIR/IR/global-var-linkage.cir +++ b/clang/test/CIR/IR/global-var-linkage.cir @@ -1,5 +1,4 @@ -// RUN: cir-opt %s -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/global.cir b/clang/test/CIR/IR/global.cir index 28fad6bbf4471..0464db822448e 100644 --- a/clang/test/CIR/IR/global.cir +++ b/clang/test/CIR/IR/global.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s -o - | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s8i = !cir.int !s16i = !cir.int diff --git a/clang/test/CIR/IR/label.cir b/clang/test/CIR/IR/label.cir index 2211a4e8da331..1049766e7ce69 100644 --- a/clang/test/CIR/IR/label.cir +++ b/clang/test/CIR/IR/label.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/module.cir b/clang/test/CIR/IR/module.cir index 7ce2c0ba21cb0..8c782fdb2dbc6 100644 --- a/clang/test/CIR/IR/module.cir +++ b/clang/test/CIR/IR/module.cir @@ -1,5 +1,4 @@ -// RUN: cir-opt %s -split-input-file -o %t.cir -// RUN: FileCheck --input-file=%t.cir %s +// RUN: cir-opt %s -split-input-file --verify-roundtrip | FileCheck %s // Should parse and print C source language attribute. module attributes {cir.lang = #cir.lang} { } diff --git a/clang/test/CIR/IR/stack-save-restore.cir b/clang/test/CIR/IR/stack-save-restore.cir index f98889ac1083a..476f2120a079d 100644 --- a/clang/test/CIR/IR/stack-save-restore.cir +++ b/clang/test/CIR/IR/stack-save-restore.cir @@ -1,6 +1,6 @@ // Test the CIR operations can parse and print correctly (roundtrip) -// RUN: cir-opt %s | cir-opt | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !u8i = !cir.int diff --git a/clang/test/CIR/IR/struct.cir b/clang/test/CIR/IR/struct.cir index 33f2e9860c5cb..2e011fba36b26 100644 --- a/clang/test/CIR/IR/struct.cir +++ b/clang/test/CIR/IR/struct.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !u8i = !cir.int !u16i = !cir.int diff --git a/clang/test/CIR/IR/switch-flat.cir b/clang/test/CIR/IR/switch-flat.cir index 8c11a74484d39..d39c3e7e81215 100644 --- a/clang/test/CIR/IR/switch-flat.cir +++ b/clang/test/CIR/IR/switch-flat.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int cir.func @FlatSwitchWithoutDefault(%arg0: !s32i) { diff --git a/clang/test/CIR/IR/switch.cir b/clang/test/CIR/IR/switch.cir index 0bdc9c1e7e896..87d45bf1f5219 100644 --- a/clang/test/CIR/IR/switch.cir +++ b/clang/test/CIR/IR/switch.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int cir.func @s0() { diff --git a/clang/test/CIR/IR/ternary.cir b/clang/test/CIR/IR/ternary.cir index e419c7f5af40c..78e1de4eea8f1 100644 --- a/clang/test/CIR/IR/ternary.cir +++ b/clang/test/CIR/IR/ternary.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | cir-opt | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !u32i = !cir.int module { diff --git a/clang/test/CIR/IR/throw.cir b/clang/test/CIR/IR/throw.cir index 8b24b481057b1..e7a1bf4f2f283 100644 --- a/clang/test/CIR/IR/throw.cir +++ b/clang/test/CIR/IR/throw.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/unary.cir b/clang/test/CIR/IR/unary.cir index ba3bc20d574f5..d01d4eb3c920a 100644 --- a/clang/test/CIR/IR/unary.cir +++ b/clang/test/CIR/IR/unary.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int !s64i = !cir.int diff --git a/clang/test/CIR/IR/vector.cir b/clang/test/CIR/IR/vector.cir index 6d8e5beffd63f..d274c35099ee5 100644 --- a/clang/test/CIR/IR/vector.cir +++ b/clang/test/CIR/IR/vector.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !s32i = !cir.int diff --git a/clang/test/CIR/IR/vtable-addrpt.cir b/clang/test/CIR/IR/vtable-addrpt.cir index 0b809cc2506e6..7c8fa8d5ebe18 100644 --- a/clang/test/CIR/IR/vtable-addrpt.cir +++ b/clang/test/CIR/IR/vtable-addrpt.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s // Test the parsing and printing of a constructor that uses a vtable addess_point op. @@ -14,7 +14,7 @@ module { cir.store %arg0, %0 : !cir.ptr, !cir.ptr> %1 = cir.load %0 : !cir.ptr>, !cir.ptr %2 = cir.vtable.address_point(@_ZTV1S, address_point = ) : !cir.vptr - %3 = cir.cast(bitcast, %1 : !cir.ptr), !cir.ptr + %3 = cir.cast bitcast %1 : !cir.ptr -> !cir.ptr cir.store align(8) %2, %3 : !cir.vptr, !cir.ptr cir.return } diff --git a/clang/test/CIR/IR/vtable-attr.cir b/clang/test/CIR/IR/vtable-attr.cir index 3854208ff78cc..70e32969c1985 100644 --- a/clang/test/CIR/IR/vtable-attr.cir +++ b/clang/test/CIR/IR/vtable-attr.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s !rec_Q = !cir.record !rec_S = !cir.record diff --git a/clang/test/CIR/IR/vtt-addrpoint.cir b/clang/test/CIR/IR/vtt-addrpoint.cir index f05bb782c6911..823ddd2e7dc1d 100644 --- a/clang/test/CIR/IR/vtt-addrpoint.cir +++ b/clang/test/CIR/IR/vtt-addrpoint.cir @@ -1,4 +1,4 @@ -// RUN: cir-opt %s | FileCheck %s +// RUN: cir-opt %s --verify-roundtrip | FileCheck %s // Test the parsing and printing of the two forms of vtt.address_point op, as // they will appear in constructors. @@ -26,7 +26,7 @@ module { cir.call @_ZN1BC2Ev(%4, %5) : (!cir.ptr, !cir.ptr>) -> () %6 = cir.vtt.address_point %3 : !cir.ptr>, offset = 0 -> !cir.ptr> - %7 = cir.cast(bitcast, %6 : !cir.ptr>), !cir.ptr + %7 = cir.cast bitcast %6 : !cir.ptr> -> !cir.ptr %8 = cir.load align(8) %7 : !cir.ptr, !cir.vptr %9 = cir.vtable.get_vptr %2 : !cir.ptr -> !cir.ptr cir.store align(8) %8, %9 : !cir.vptr, !cir.ptr diff --git a/clang/test/CIR/Lowering/cast.cir b/clang/test/CIR/Lowering/cast.cir index 6842905dae6a4..ec104edec2405 100644 --- a/clang/test/CIR/Lowering/cast.cir +++ b/clang/test/CIR/Lowering/cast.cir @@ -26,51 +26,51 @@ module { // Integer casts. %9 = cir.load %0 : !cir.ptr, !u32i - %10 = cir.cast(integral, %9 : !u32i), !s8i + %10 = cir.cast integral %9 : !u32i -> !s8i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i8 cir.store %10, %3 : !s8i, !cir.ptr %11 = cir.load %1 : !cir.ptr, !s32i - %12 = cir.cast(integral, %11 : !s32i), !s16i + %12 = cir.cast integral %11 : !s32i -> !s16i // CHECK: %{{[0-9]+}} = llvm.trunc %{{[0-9]+}} : i32 to i16 cir.store %12, %4 : !s16i, !cir.ptr %13 = cir.load %0 : !cir.ptr, !u32i - %14 = cir.cast(integral, %13 : !u32i), !s64i + %14 = cir.cast integral %13 : !u32i -> !s64i // CHECK: %{{[0-9]+}} = llvm.zext %{{[0-9]+}} : i32 to i64 cir.store %14, %5 : !s64i, !cir.ptr %15 = cir.load %1 : !cir.ptr, !s32i - %16 = cir.cast(integral, %15 : !s32i), !s64i + %16 = cir.cast integral %15 : !s32i -> !s64i // CHECK: %{{[0-9]+}} = llvm.sext %{{[0-9]+}} : i32 to i64 - %30 = cir.cast(integral, %arg1 : !s32i), !u32i + %30 = cir.cast integral %arg1 : !s32i -> !u32i // Should not produce a cast. - %32 = cir.cast(integral, %arg0 : !u32i), !s32i + %32 = cir.cast integral %arg0 : !u32i -> !s32i // Should not produce a cast. %21 = cir.load %20 : !cir.ptr, !s16i - %22 = cir.cast(integral, %21 : !s16i), !u64i + %22 = cir.cast integral %21 : !s16i -> !u64i // CHECK: %[[TMP:[0-9]+]] = llvm.sext %{{[0-9]+}} : i16 to i64 - %33 = cir.cast(int_to_bool, %arg1 : !s32i), !cir.bool + %33 = cir.cast int_to_bool %arg1 : !s32i -> !cir.bool // CHECK: %[[#ZERO:]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[#CMP:]] = llvm.icmp "ne" %arg1, %[[#ZERO]] : i32 // Pointer casts. cir.store %16, %6 : !s64i, !cir.ptr - %23 = cir.cast(int_to_ptr, %22 : !u64i), !cir.ptr + %23 = cir.cast int_to_ptr %22 : !u64i -> !cir.ptr // CHECK: %[[TMP2:[0-9]+]] = llvm.inttoptr %[[TMP]] : i64 to !llvm.ptr - %24 = cir.cast(ptr_to_int, %23 : !cir.ptr), !s32i + %24 = cir.cast ptr_to_int %23 : !cir.ptr -> !s32i // CHECK: %{{[0-9]+}} = llvm.ptrtoint %[[TMP2]] : !llvm.ptr to i32 - %29 = cir.cast(ptr_to_bool, %23 : !cir.ptr), !cir.bool + %29 = cir.cast ptr_to_bool %23 : !cir.ptr -> !cir.bool // Floating point casts. - %25 = cir.cast(int_to_float, %arg1 : !s32i), !cir.float + %25 = cir.cast int_to_float %arg1 : !s32i -> !cir.float // CHECK: %{{.+}} = llvm.sitofp %{{.+}} : i32 to f32 - %26 = cir.cast(int_to_float, %arg0 : !u32i), !cir.float + %26 = cir.cast int_to_float %arg0 : !u32i -> !cir.float // CHECK: %{{.+}} = llvm.uitofp %{{.+}} : i32 to f32 - %27 = cir.cast(float_to_int, %arg2 : !cir.float), !s32i + %27 = cir.cast float_to_int %arg2 : !cir.float -> !s32i // CHECK: %{{.+}} = llvm.fptosi %{{.+}} : f32 to i32 - %28 = cir.cast(float_to_int, %arg2 : !cir.float), !u32i + %28 = cir.cast float_to_int %arg2 : !cir.float -> !u32i // CHECK: %{{.+}} = llvm.fptoui %{{.+}} : f32 to i32 %18 = cir.const #cir.int<0> : !s32i // CHECK: %{{.+}} = llvm.fptrunc %{{.+}} : f64 to f32 - %34 = cir.cast(floating, %arg3 : !cir.double), !cir.float + %34 = cir.cast floating %arg3 : !cir.double -> !cir.float cir.store %18, %2 : !s32i, !cir.ptr %19 = cir.load %2 : !cir.ptr, !s32i @@ -84,7 +84,7 @@ module { cir.store %arg0, %0 : !cir.bool, !cir.ptr %2 = cir.load %0 : !cir.ptr, !cir.bool - %3 = cir.cast(bool_to_int, %2 : !cir.bool), !u8i + %3 = cir.cast bool_to_int %2 : !cir.bool -> !u8i // CHECK: %[[LOAD_BOOL:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i8 // CHECK: %[[TRUNC:.*]] = llvm.trunc %[[LOAD_BOOL]] : i8 to i1 // CHECK: %[[EXT:.*]] = llvm.zext %[[TRUNC]] : i1 to i8 diff --git a/clang/test/CIR/Lowering/if.cir b/clang/test/CIR/Lowering/if.cir index 3a077aa9ef057..888fb38e2d77c 100644 --- a/clang/test/CIR/Lowering/if.cir +++ b/clang/test/CIR/Lowering/if.cir @@ -4,7 +4,7 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool cir.if %4 { %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i @@ -44,7 +44,7 @@ module { // LLVM-NEXT: } cir.func @onlyIf(%arg0: !s32i) -> !s32i { - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool cir.if %4 { %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i @@ -66,7 +66,7 @@ module { // Verify empty if clause is properly lowered to empty block cir.func @emptyIfClause(%arg0: !s32i) -> !s32i { // MLIR-LABEL: llvm.func @emptyIfClause - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool // MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[PHI:.*]] cir.if %4 { // MLIR-NEXT: ^[[T]]: @@ -82,7 +82,7 @@ module { // addressed cir.func @emptyIfElseClause(%arg0: !s32i) -> !s32i { // MLIR-LABEL: llvm.func @emptyIfElseClause - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool // MLIR: llvm.cond_br {{%.*}}, ^[[T:.*]], ^[[F:.*]] cir.if %4 { // MLIR-NEXT: ^[[T]]: diff --git a/clang/test/CIR/Lowering/vtt-addrpoint.cir b/clang/test/CIR/Lowering/vtt-addrpoint.cir index 96dc27d991cd4..e1bfd00245b1b 100644 --- a/clang/test/CIR/Lowering/vtt-addrpoint.cir +++ b/clang/test/CIR/Lowering/vtt-addrpoint.cir @@ -24,7 +24,7 @@ module { %5 = cir.vtt.address_point %3 : !cir.ptr>, offset = 1 -> !cir.ptr> cir.call @_ZN1BC2Ev(%4, %5) : (!cir.ptr, !cir.ptr>) -> () %6 = cir.vtt.address_point %3 : !cir.ptr>, offset = 0 -> !cir.ptr> - %7 = cir.cast(bitcast, %6 : !cir.ptr>), !cir.ptr + %7 = cir.cast bitcast %6 : !cir.ptr> -> !cir.ptr %8 = cir.load align(8) %7 : !cir.ptr, !cir.vptr %9 = cir.vtable.get_vptr %2 : !cir.ptr -> !cir.ptr cir.store align(8) %8, %9 : !cir.vptr, !cir.ptr diff --git a/clang/test/CIR/Transforms/canonicalize.cir b/clang/test/CIR/Transforms/canonicalize.cir index 5daff119a626f..5606f9e16a690 100644 --- a/clang/test/CIR/Transforms/canonicalize.cir +++ b/clang/test/CIR/Transforms/canonicalize.cir @@ -50,39 +50,39 @@ module { // CHECK-NEXT: } cir.func @cast1(%arg0: !cir.bool) -> !cir.bool { - %0 = cir.cast(bool_to_int, %arg0 : !cir.bool), !s32i - %1 = cir.cast(int_to_bool, %0 : !s32i), !cir.bool + %0 = cir.cast bool_to_int %arg0 : !cir.bool -> !s32i + %1 = cir.cast int_to_bool %0 : !s32i -> !cir.bool cir.return %1 : !cir.bool } // CHECK: cir.func{{.*}} @cast1(%[[ARG0:.*]]: !cir.bool) -> !cir.bool // CHECK-NEXT: cir.return %[[ARG0]] : !cir.bool cir.func @cast2(%arg0: !s32i) -> !cir.bool { - %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool - %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i - %2 = cir.cast(integral, %1 : !s32i), !s64i - %3 = cir.cast(int_to_bool, %2 : !s64i), !cir.bool + %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool + %1 = cir.cast bool_to_int %0 : !cir.bool -> !s32i + %2 = cir.cast integral %1 : !s32i -> !s64i + %3 = cir.cast int_to_bool %2 : !s64i -> !cir.bool cir.return %3 : !cir.bool } // CHECK: cir.func{{.*}} @cast2(%[[ARG0:.*]]: !s32i) -> !cir.bool - // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[ARG0]] : !s32i), !cir.bool + // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[ARG0]] : !s32i -> !cir.bool // CHECK-NEXT: cir.return %[[CAST]] : !cir.bool cir.func @no_fold_cast(%arg0: !s32i) -> !s64i { - %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool - %1 = cir.cast(bool_to_int, %0 : !cir.bool), !s32i - %2 = cir.cast(integral, %1 : !s32i), !s64i + %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool + %1 = cir.cast bool_to_int %0 : !cir.bool -> !s32i + %2 = cir.cast integral %1 : !s32i -> !s64i cir.return %2 : !s64i } // CHECK: cir.func{{.*}} @no_fold_cast(%[[ARG0:.*]]: !s32i) -> !s64i - // CHECK-NEXT: %[[CAST:.*]] = cir.cast(int_to_bool, %[[ARG0]] : !s32i), !cir.bool - // CHECK-NEXT: %[[CAST2:.*]] = cir.cast(bool_to_int, %[[CAST]] : !cir.bool), !s32i - // CHECK-NEXT: %[[CAST3:.*]] = cir.cast(integral, %[[CAST2]] : !s32i), !s64i + // CHECK-NEXT: %[[CAST:.*]] = cir.cast int_to_bool %[[ARG0]] : !s32i -> !cir.bool + // CHECK-NEXT: %[[CAST2:.*]] = cir.cast bool_to_int %[[CAST]] : !cir.bool -> !s32i + // CHECK-NEXT: %[[CAST3:.*]] = cir.cast integral %[[CAST2]] : !s32i -> !s64i // CHECK-NEXT: cir.return %[[CAST3]] : !s64i cir.func @cast_poison() -> !s64i { %0 = cir.const #cir.poison : !s32i - %1 = cir.cast(integral, %0 : !s32i), !s64i + %1 = cir.cast integral %0 : !s32i -> !s64i cir.return %1 : !s64i } // CHECK: @cast_poison diff --git a/clang/test/CIR/Transforms/if.cir b/clang/test/CIR/Transforms/if.cir index 3f817c793643f..ced288f7ecf29 100644 --- a/clang/test/CIR/Transforms/if.cir +++ b/clang/test/CIR/Transforms/if.cir @@ -4,7 +4,7 @@ module { cir.func @foo(%arg0: !s32i) -> !s32i { - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool cir.if %4 { %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i @@ -15,7 +15,7 @@ module { cir.return %arg0 : !s32i } // CHECK: cir.func{{.*}} @foo(%arg0: !s32i) -> !s32i { -// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i @@ -28,7 +28,7 @@ module { // CHECK-NEXT: } cir.func @onlyIf(%arg0: !s32i) -> !s32i { - %4 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool + %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool cir.if %4 { %5 = cir.const #cir.int<1> : !s32i cir.return %5 : !s32i @@ -36,7 +36,7 @@ module { cir.return %arg0 : !s32i } // CHECK: cir.func{{.*}} @onlyIf(%arg0: !s32i) -> !s32i { -// CHECK-NEXT: %0 = cir.cast(int_to_bool, %arg0 : !s32i), !cir.bool +// CHECK-NEXT: %0 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool // CHECK-NEXT: cir.brcond %0 ^bb1, ^bb2 // CHECK-NEXT: ^bb1: // pred: ^bb0 // CHECK-NEXT: %1 = cir.const #cir.int<1> : !s32i diff --git a/clang/test/CIR/Transforms/switch.cir b/clang/test/CIR/Transforms/switch.cir index a000d6b70fbcc..3addfe37061cd 100644 --- a/clang/test/CIR/Transforms/switch.cir +++ b/clang/test/CIR/Transforms/switch.cir @@ -261,8 +261,8 @@ module { // CHECK-NEXT: %[[RANGE:[0-9]+]] = cir.const #cir.int<99> // CHECK-NEXT: %[[LOWER_BOUND:[0-9]+]] = cir.const #cir.int<1> // CHECK-NEXT: %[[DIFF:[0-9]+]] = cir.binop(sub, %[[X]], %[[LOWER_BOUND]]) -// CHECK-NEXT: %[[U_DIFF:[0-9]+]] = cir.cast(integral, %[[DIFF]] : !s32i), !u32i -// CHECK-NEXT: %[[U_RANGE:[0-9]+]] = cir.cast(integral, %[[RANGE]] : !s32i), !u32i +// CHECK-NEXT: %[[U_DIFF:[0-9]+]] = cir.cast integral %[[DIFF]] : !s32i -> !u32i +// CHECK-NEXT: %[[U_RANGE:[0-9]+]] = cir.cast integral %[[RANGE]] : !s32i -> !u32i // CHECK-NEXT: %[[CMP_RESULT:[0-9]+]] = cir.cmp(le, %[[U_DIFF]], %[[U_RANGE]]) // CHECK-NEXT: cir.brcond %[[CMP_RESULT]] ^[[CASE_RANGE]], ^[[CASE_DEFAULT:bb[0-9]+]] // CHECK-NEXT: ^[[CASE_DEFAULT]]: @@ -304,8 +304,8 @@ module { // CHECK: %[[CONST97:.*]] = cir.const #cir.int<97> : !s32i // CHECK: %[[CONST3:.*]] = cir.const #cir.int<3> : !s32i // CHECK: %[[SUB:.*]] = cir.binop(sub, %[[COND]], %[[CONST3]]) : !s32i -// CHECK: %[[CAST1:.*]] = cir.cast(integral, %[[SUB]] : !s32i), !u32i -// CHECK: %[[CAST2:.*]] = cir.cast(integral, %[[CONST97]] : !s32i), !u32i +// CHECK: %[[CAST1:.*]] = cir.cast integral %[[SUB]] : !s32i -> !u32i +// CHECK: %[[CAST2:.*]] = cir.cast integral %[[CONST97]] : !s32i -> !u32i // CHECK: %[[CMP:.*]] = cir.cmp(le, %[[CAST1]], %[[CAST2]]) : !u32i, !cir.bool // CHECK: cir.brcond %7 ^bb[[#DEFAULT_BB]], ^bb[[#RANGE_BB:]] // CHECK: ^bb[[#RANGE_BB]]: // pred: ^bb[[#RANGE_BR]] diff --git a/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c b/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c index f62656757c8c5..d8306a74ad2e9 100644 --- a/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c +++ b/clang/test/CodeGen/PowerPC/builtins-ppc-dmf.c @@ -208,6 +208,75 @@ void test_dmf_basic2(char *p1, char *res1, char *res2, __builtin_mma_build_dmr((__dmr1024*)res2, vv, vv, vv, vv, vv, vv, vv, vv); __builtin_mma_disassemble_dmr(res1, (__dmr1024*)p1); } + +// CHECK-LABEL: define dso_local void @test_dmsha2hash( +// CHECK-SAME: ptr noundef readonly captures(none) [[VDMRP1:%.*]], ptr noundef readonly captures(none) [[VDMRP2:%.*]], ptr noundef writeonly captures(none) initializes((0, 128)) [[RESP:%.*]]) local_unnamed_addr #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP1]], align 128, !tbaa [[__DMR1024_TBAA6]] +// CHECK-NEXT: [[TMP1:%.*]] = load <1024 x i1>, ptr [[VDMRP2]], align 128, !tbaa [[__DMR1024_TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmsha2hash(<1024 x i1> [[TMP0]], <1024 x i1> [[TMP1]], i32 1) +// CHECK-NEXT: store <1024 x i1> [[TMP2]], ptr [[RESP]], align 128, !tbaa [[__DMR1024_TBAA6]] +// CHECK-NEXT: ret void +// +// AIX-LABEL: define void @test_dmsha2hash( +// AIX-SAME: ptr noundef readonly captures(none) [[VDMRP1:%.*]], ptr noundef readonly captures(none) [[VDMRP2:%.*]], ptr noundef writeonly captures(none) initializes((0, 128)) [[RESP:%.*]]) local_unnamed_addr #[[ATTR0]] { +// AIX-NEXT: [[ENTRY:.*:]] +// AIX-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP1]], align 128, !tbaa [[__DMR1024_TBAA6]] +// AIX-NEXT: [[TMP1:%.*]] = load <1024 x i1>, ptr [[VDMRP2]], align 128, !tbaa [[__DMR1024_TBAA6]] +// AIX-NEXT: [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmsha2hash(<1024 x i1> [[TMP0]], <1024 x i1> [[TMP1]], i32 1) +// AIX-NEXT: store <1024 x i1> [[TMP2]], ptr [[RESP]], align 128, !tbaa [[__DMR1024_TBAA6]] +// AIX-NEXT: ret void +// +void test_dmsha2hash(unsigned char *vdmrp1, unsigned char *vdmrp2, unsigned char *resp) { + __dmr1024 vdmr1 = *((__dmr1024 *)vdmrp1); + __dmr1024 vdmr2 = *((__dmr1024 *)vdmrp2); + __builtin_mma_dmsha2hash(&vdmr1, &vdmr2, 1); + *((__dmr1024 *)resp) = vdmr1; +} + +// CHECK-LABEL: define dso_local void @test_dmsha3hash( +// CHECK-SAME: ptr noundef readonly captures(none) [[VDMRPP:%.*]], ptr noundef writeonly captures(none) initializes((0, 256)) [[RESP:%.*]]) local_unnamed_addr #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = load <2048 x i1>, ptr [[VDMRPP]], align 256, !tbaa [[__DMR2048_TBAA9:![0-9]+]] +// CHECK-NEXT: [[TMP1:%.*]] = tail call <2048 x i1> @llvm.ppc.mma.dmsha3hash(<2048 x i1> [[TMP0]], i32 4) +// CHECK-NEXT: store <2048 x i1> [[TMP1]], ptr [[RESP]], align 256, !tbaa [[__DMR2048_TBAA9]] +// CHECK-NEXT: ret void +// +// AIX-LABEL: define void @test_dmsha3hash( +// AIX-SAME: ptr noundef readonly captures(none) [[VDMRPP:%.*]], ptr noundef writeonly captures(none) initializes((0, 256)) [[RESP:%.*]]) local_unnamed_addr #[[ATTR0]] { +// AIX-NEXT: [[ENTRY:.*:]] +// AIX-NEXT: [[TMP0:%.*]] = load <2048 x i1>, ptr [[VDMRPP]], align 256, !tbaa [[__DMR2048_TBAA9:![0-9]+]] +// AIX-NEXT: [[TMP1:%.*]] = tail call <2048 x i1> @llvm.ppc.mma.dmsha3hash(<2048 x i1> [[TMP0]], i32 4) +// AIX-NEXT: store <2048 x i1> [[TMP1]], ptr [[RESP]], align 256, !tbaa [[__DMR2048_TBAA9]] +// AIX-NEXT: ret void +// +void test_dmsha3hash(unsigned char *vdmrpp, unsigned char *resp) { + __dmr2048 vdmrp = *((__dmr2048 *)vdmrpp); + __builtin_mma_dmsha3hash(&vdmrp, 4); + *((__dmr2048 *)resp) = vdmrp; +} + +// CHECK-LABEL: define dso_local void @test_dmxxshapad( +// CHECK-SAME: ptr noundef readonly captures(none) [[VDMRP:%.*]], <16 x i8> noundef [[VC:%.*]], ptr noundef writeonly captures(none) initializes((0, 128)) [[RESP:%.*]]) local_unnamed_addr #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP]], align 128, !tbaa [[__DMR1024_TBAA6]] +// CHECK-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxxshapad(<1024 x i1> [[TMP0]], <16 x i8> [[VC]], i32 2, i32 1, i32 5) +// CHECK-NEXT: store <1024 x i1> [[TMP1]], ptr [[RESP]], align 128, !tbaa [[__DMR1024_TBAA6]] +// CHECK-NEXT: ret void +// +// AIX-LABEL: define void @test_dmxxshapad( +// AIX-SAME: ptr noundef readonly captures(none) [[VDMRP:%.*]], <16 x i8> noundef [[VC:%.*]], ptr noundef writeonly captures(none) initializes((0, 128)) [[RESP:%.*]]) local_unnamed_addr #[[ATTR0]] { +// AIX-NEXT: [[ENTRY:.*:]] +// AIX-NEXT: [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP]], align 128, !tbaa [[__DMR1024_TBAA6]] +// AIX-NEXT: [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxxshapad(<1024 x i1> [[TMP0]], <16 x i8> [[VC]], i32 2, i32 1, i32 5) +// AIX-NEXT: store <1024 x i1> [[TMP1]], ptr [[RESP]], align 128, !tbaa [[__DMR1024_TBAA6]] +// AIX-NEXT: ret void +// +void test_dmxxshapad(unsigned char *vdmrp, vector unsigned char vc, unsigned char *resp) { + __dmr1024 vdmr = *((__dmr1024 *)vdmrp); + __builtin_mma_dmxxshapad(&vdmr, vc, 2, 1, 5); + *((__dmr1024 *)resp) = vdmr; +} //. // CHECK: [[__VECTOR_PAIR_TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0} // CHECK: [[META3]] = !{!"__vector_pair", [[META4:![0-9]+]], i64 0} @@ -216,6 +285,8 @@ void test_dmf_basic2(char *p1, char *res1, char *res2, // CHECK: [[__DMR1024_TBAA6]] = !{[[META7:![0-9]+]], [[META7]], i64 0} // CHECK: [[META7]] = !{!"__dmr1024", [[META4]], i64 0} // CHECK: [[CHAR_TBAA8]] = !{[[META4]], [[META4]], i64 0} +// CHECK: [[__DMR2048_TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0} +// CHECK: [[META10]] = !{!"__dmr2048", [[META4]], i64 0} //. // AIX: [[__VECTOR_PAIR_TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0} // AIX: [[META3]] = !{!"__vector_pair", [[META4:![0-9]+]], i64 0} @@ -224,4 +295,6 @@ void test_dmf_basic2(char *p1, char *res1, char *res2, // AIX: [[__DMR1024_TBAA6]] = !{[[META7:![0-9]+]], [[META7]], i64 0} // AIX: [[META7]] = !{!"__dmr1024", [[META4]], i64 0} // AIX: [[CHAR_TBAA8]] = !{[[META4]], [[META4]], i64 0} +// AIX: [[__DMR2048_TBAA9]] = !{[[META10:![0-9]+]], [[META10]], i64 0} +// AIX: [[META10]] = !{!"__dmr2048", [[META4]], i64 0} //. diff --git a/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c b/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c index 06497555b840f..66b9d797c65d3 100644 --- a/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c +++ b/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c @@ -9,7 +9,9 @@ // RUN: FileCheck --check-prefix=ISA_FUTURE %s //__attribute__((target("no-mma"))) -void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc) { +__attribute__((target("no-mma"))) +void test_mma(unsigned char *vdmrpp, unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc) { + __dmr2048 vdmrpair = *((__dmr2048 *)vdmrpp); __dmr1024 vdmr = *((__dmr1024 *)vdmrp); __vector_pair vp = *((__vector_pair *)vpp); __builtin_mma_dmxvi8gerx4(&vdmr, vp, vc); @@ -23,6 +25,9 @@ void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc) __builtin_mma_dmxor(&vdmr, (__dmr1024*)vpp); __builtin_mma_build_dmr(&vdmr, vc, vc, vc, vc, vc, vc, vc, vc); __builtin_mma_disassemble_dmr(vdmrp, &vdmr); + __builtin_mma_dmsha2hash(&vdmr, &vdmr, 0); + __builtin_mma_dmsha3hash(&vdmrpair, 0); + __builtin_mma_dmxxshapad(&vdmr, vc, 0, 0, 0); // CHECK: error: '__builtin_mma_dmxvi8gerx4' needs target feature mma,paired-vector-memops // CHECK: error: '__builtin_mma_pmdmxvi8gerx4' needs target feature mma,paired-vector-memops @@ -35,6 +40,9 @@ void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc) // ISA_FUTURE: error: '__builtin_mma_dmxor' needs target feature mma,isa-future-instructions // ISA_FUTURE: error: '__builtin_mma_build_dmr' needs target feature mma,isa-future-instructions // ISA_FUTURE: error: '__builtin_mma_disassemble_dmr' needs target feature mma,isa-future-instructions +// CHECK: error: '__builtin_mma_dmsha2hash' needs target feature mma,isa-future-instructions +// CHECK: error: '__builtin_mma_dmsha3hash' needs target feature mma,isa-future-instructions +// CHECK: error: '__builtin_mma_dmxxshapad' needs target feature mma,isa-future-instructions // DMF VSX Vector bfloat16 GER 2x builtins. diff --git a/clang/test/CodeGen/PowerPC/ppc-dmf-types.c b/clang/test/CodeGen/PowerPC/ppc-dmf-types.c index 9dff289370eb5..fbbe62133763e 100644 --- a/clang/test/CodeGen/PowerPC/ppc-dmf-types.c +++ b/clang/test/CodeGen/PowerPC/ppc-dmf-types.c @@ -2,6 +2,162 @@ // RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu future \ // RUN: -emit-llvm -o - %s | FileCheck %s +// CHECK-LABEL: @test_dmrp_copy( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR1:%.*]], ptr [[PTR1_ADDR]], align 8 +// CHECK-NEXT: store ptr [[PTR2:%.*]], ptr [[PTR2_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8 +// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <2048 x i1>, ptr [[TMP0]], i64 2 +// CHECK-NEXT: [[TMP1:%.*]] = load <2048 x i1>, ptr [[ADD_PTR]], align 256 +// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8 +// CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds <2048 x i1>, ptr [[TMP2]], i64 1 +// CHECK-NEXT: store <2048 x i1> [[TMP1]], ptr [[ADD_PTR1]], align 256 +// CHECK-NEXT: ret void +// +void test_dmrp_copy(__dmr2048 *ptr1, __dmr2048 *ptr2) { + *(ptr2 + 1) = *(ptr1 + 2); +} + +// CHECK-LABEL: @test_dmrp_typedef( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[INP_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[OUTP_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPIN:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPOUT:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[INP:%.*]], ptr [[INP_ADDR]], align 8 +// CHECK-NEXT: store ptr [[OUTP:%.*]], ptr [[OUTP_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[INP_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP0]], ptr [[VDMRPIN]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[OUTP_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP1]], ptr [[VDMRPOUT]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VDMRPIN]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load <2048 x i1>, ptr [[TMP2]], align 256 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VDMRPOUT]], align 8 +// CHECK-NEXT: store <2048 x i1> [[TMP3]], ptr [[TMP4]], align 256 +// CHECK-NEXT: ret void +// +void test_dmrp_typedef(int *inp, int *outp) { + __dmr2048 *vdmrpin = (__dmr2048 *)inp; + __dmr2048 *vdmrpout = (__dmr2048 *)outp; + *vdmrpout = *vdmrpin; +} + +// CHECK-LABEL: @test_dmrp_arg( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VDMRP_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPP:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[VDMRP:%.*]], ptr [[VDMRP_ADDR]], align 8 +// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP0]], ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VDMRP_ADDR]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load <2048 x i1>, ptr [[TMP1]], align 256 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VDMRPP]], align 8 +// CHECK-NEXT: store <2048 x i1> [[TMP2]], ptr [[TMP3]], align 256 +// CHECK-NEXT: ret void +// +void test_dmrp_arg(__dmr2048 *vdmrp, int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + *vdmrpp = *vdmrp; +} + +// CHECK-LABEL: @test_dmrp_const_arg( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VDMRP_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPP:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[VDMRP:%.*]], ptr [[VDMRP_ADDR]], align 8 +// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP0]], ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VDMRP_ADDR]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load <2048 x i1>, ptr [[TMP1]], align 256 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VDMRPP]], align 8 +// CHECK-NEXT: store <2048 x i1> [[TMP2]], ptr [[TMP3]], align 256 +// CHECK-NEXT: ret void +// +void test_dmrp_const_arg(const __dmr2048 *const vdmrp, int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + *vdmrpp = *vdmrp; +} + +// CHECK-LABEL: @test_dmrp_array_arg( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[VDMRPA_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPP:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[VDMRPA:%.*]], ptr [[VDMRPA_ADDR]], align 8 +// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP0]], ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VDMRPA_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <2048 x i1>, ptr [[TMP1]], i64 0 +// CHECK-NEXT: [[TMP2:%.*]] = load <2048 x i1>, ptr [[ARRAYIDX]], align 256 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VDMRPP]], align 8 +// CHECK-NEXT: store <2048 x i1> [[TMP2]], ptr [[TMP3]], align 256 +// CHECK-NEXT: ret void +// +void test_dmrp_array_arg(__dmr2048 vdmrpa[], int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + *vdmrpp = vdmrpa[0]; +} + +// CHECK-LABEL: @test_dmrp_ret_const( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPP:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP0]], ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <2048 x i1>, ptr [[TMP1]], i64 2 +// CHECK-NEXT: ret ptr [[ADD_PTR]] +// +const __dmr2048 *test_dmrp_ret_const(int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + return vdmrpp + 2; +} + +// CHECK-LABEL: @test_dmrp_sizeof_alignof( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRPP:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[VDMRP:%.*]] = alloca <2048 x i1>, align 256 +// CHECK-NEXT: [[SIZET:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[ALIGNT:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[SIZEV:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[ALIGNV:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store ptr [[TMP0]], ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VDMRPP]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load <2048 x i1>, ptr [[TMP1]], align 256 +// CHECK-NEXT: store <2048 x i1> [[TMP2]], ptr [[VDMRP]], align 256 +// CHECK-NEXT: store i32 256, ptr [[SIZET]], align 4 +// CHECK-NEXT: store i32 256, ptr [[ALIGNT]], align 4 +// CHECK-NEXT: store i32 256, ptr [[SIZEV]], align 4 +// CHECK-NEXT: store i32 256, ptr [[ALIGNV]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[SIZET]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ALIGNT]], align 4 +// CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP3]], [[TMP4]] +// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIZEV]], align 4 +// CHECK-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[TMP5]] +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ALIGNV]], align 4 +// CHECK-NEXT: [[ADD2:%.*]] = add i32 [[ADD1]], [[TMP6]] +// CHECK-NEXT: ret i32 [[ADD2]] +// +int test_dmrp_sizeof_alignof(int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + __dmr2048 vdmrp = *vdmrpp; + unsigned sizet = sizeof(__dmr2048); + unsigned alignt = __alignof__(__dmr2048); + unsigned sizev = sizeof(vdmrp); + unsigned alignv = __alignof__(vdmrp); + return sizet + alignt + sizev + alignv; +} // CHECK-LABEL: @test_dmr_copy( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/X86/avx-builtins.c b/clang/test/CodeGen/X86/avx-builtins.c index 347cd9ee6a667..3018bb9719b89 100644 --- a/clang/test/CodeGen/X86/avx-builtins.c +++ b/clang/test/CodeGen/X86/avx-builtins.c @@ -985,18 +985,21 @@ double test_mm256_cvtsd_f64(__m256d __a) { // CHECK: extractelement <4 x double> %{{.*}}, i32 0 return _mm256_cvtsd_f64(__a); } +TEST_CONSTEXPR(_mm256_cvtsd_f64((__m256d){8.0, 7.0, 6.0, 5.0}) == 8.0); int test_mm256_cvtsi256_si32(__m256i __a) { // CHECK-LABEL: test_mm256_cvtsi256_si32 // CHECK: extractelement <8 x i32> %{{.*}}, i32 0 return _mm256_cvtsi256_si32(__a); } +TEST_CONSTEXPR(_mm256_cvtsi256_si32((__m256i)(__v8si){8, 7, 6, 5, 4, 3, 2, 1}) == 8); float test_mm256_cvtss_f32(__m256 __a) { // CHECK-LABEL: test_mm256_cvtss_f32 // CHECK: extractelement <8 x float> %{{.*}}, i32 0 return _mm256_cvtss_f32(__a); } +TEST_CONSTEXPR(_mm256_cvtss_f32((__m256){8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}) == 8.0f); __m128i test_mm256_cvttpd_epi32(__m256d A) { // CHECK-LABEL: test_mm256_cvttpd_epi32 diff --git a/clang/test/CodeGen/X86/avx-cxx-record.cpp b/clang/test/CodeGen/X86/avx-cxx-record.cpp index 6ce6815a521a1..b20bcdd616a43 100644 --- a/clang/test/CodeGen/X86/avx-cxx-record.cpp +++ b/clang/test/CodeGen/X86/avx-cxx-record.cpp @@ -1,7 +1,9 @@ // RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -emit-llvm -O2 -target-cpu x86-64-v3 -o - | FileCheck %s // RUN: %clang_cc1 %s -triple x86_64-unknown-linux-gnu -emit-llvm -O2 -target-cpu x86-64-v3 -fclang-abi-compat=20 -o - | FileCheck --check-prefix CLANG-20 %s +// RUN: %clang_cc1 %s -triple x86_64-sie-ps4 -emit-llvm -O2 -target-cpu x86-64-v3 -o - | FileCheck --check-prefix CLANG-20 %s using UInt64x2 = unsigned long long __attribute__((__vector_size__(16), may_alias)); +using UInt64x4 = unsigned long long __attribute__((__vector_size__(32), may_alias)); template struct XMM1 { @@ -23,3 +25,24 @@ XMM2 foo() { ((XMM1<1>*)&result)->x = UInt64x2{3, 4}; return result; } + +template +struct YMM1 { + UInt64x4 x; +}; + +struct YMM2 : YMM1<0>, YMM1<1> { +}; + +// CHECK: define{{.*}} @_Z3barv({{.*}} [[ARG:%.*]]){{.*}} +// CLANG-20: define{{.*}} <8 x double> @_Z3barv() +// CHECK: entry: +// CHECK-NEXT: store {{.*}}, ptr [[ARG]]{{.*}} +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr {{.*}}, ptr [[ARG]]{{.*}} +// CHECK-NEXT: store {{.*}}, ptr [[TMP1]]{{.*}} +YMM2 bar() { + YMM2 result; + ((YMM1<0>*)&result)->x = UInt64x4{1, 2, 3, 4}; + ((YMM1<1>*)&result)->x = UInt64x4{5, 6, 7, 8}; + return result; +} diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c index 00cb1a4390d79..eff2797e87c75 100644 --- a/clang/test/CodeGen/X86/avx2-builtins.c +++ b/clang/test/CodeGen/X86/avx2-builtins.c @@ -263,6 +263,7 @@ __m256i test_mm_broadcastsi128_si256(__m128i a) { // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> return _mm_broadcastsi128_si256(a); } +TEST_CONSTEXPR(match_m256i(_mm_broadcastsi128_si256(((__m128i)(__v2di){42, -99})), 42, -99, 42, -99)); __m128 test_mm_broadcastss_ps(__m128 a) { // CHECK-LABEL: test_mm_broadcastss_ps @@ -1038,24 +1039,28 @@ __m256i test_mm256_packs_epi16(__m256i a, __m256i b) { // CHECK: call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}) return _mm256_packs_epi16(a, b); } +TEST_CONSTEXPR(match_v32qi(_mm256_packs_epi16((__m256i)(__v16hi){130, -200, 127, -128, 300, -1000, 42, -42, 500, -500, 1, -1, 128, -129, 256, -256}, (__m256i)(__v16hi){0, 1, -1, 255, -129, 128, 20000, -32768, 32767, -32767, 127, -128, 30000, -30000, 90, -90}), 127, -128, 127, -128, 127, -128, 42, -42, 0, 1, -1, 127, -128, 127, 127, -128, 127, -128, 1, -1, 127, -128, 127, -128, 127, -128, 127, -128, 127, -128, 90, -90)); __m256i test_mm256_packs_epi32(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_packs_epi32 // CHECK: call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}) return _mm256_packs_epi32(a, b); } +TEST_CONSTEXPR(match_v16hi(_mm256_packs_epi32((__m256i)(__v8si){40000, -50000, 32767, -32768, 70000, -70000, 42, -42}, (__m256i)(__v8si){0, 1, -1, 65536, -1000000, 1000000, 32768, -32769}), 32767, -32768, 32767, -32768, 0, 1, -1, 32767, 32767, -32768, 42, -42, -32768, 32767, 32767, -32768)); __m256i test_mm256_packs_epu16(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_packs_epu16 // CHECK: call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %{{.*}}, <16 x i16> %{{.*}}) return _mm256_packus_epi16(a, b); } +TEST_CONSTEXPR(match_v32qi(_mm256_packus_epi16((__m256i)(__v16hi){-1, 0, 1, 127, 128, 255, 256, -200, 300, 42, -42, 500, 20000, -32768, 129, -129}, (__m256i)(__v16hi){0, 1, -1, 255, -129, 128, 20000, -32768, 32767, -32767, 127, -128, 30000, -30000, 90, -90}), 0, 0, 1, 127, -128, -1, -1, 0, 0, 1, 0, -1, 0, -128, -1, 0, -1, 42, 0, -1, -1, 0, -127, 0, -1, 0, 127, 0, -1, 0, 90, 0)); __m256i test_mm256_packs_epu32(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_packs_epu32 // CHECK: call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}) return _mm256_packus_epi32(a, b); } +TEST_CONSTEXPR(match_v16hi(_mm256_packus_epi32((__m256i)(__v8si){40000, -50000, 32767, -32768, 70000, -70000, 42, -42}, (__m256i)(__v8si){0, 1, -1, 65536, -1000000, 1000000, 32768, -32769}), -25536, 0, 32767, 0, 0, 1, 0, -1, -1, 0, 42, 0, 0, -1, -32768, 0)); __m256i test_mm256_permute2x128_si256(__m256i a, __m256i b) { // CHECK-LABEL: test_mm256_permute2x128_si256 diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c index 1875e202b0c0a..3f42ac0268978 100644 --- a/clang/test/CodeGen/X86/avx512bw-builtins.c +++ b/clang/test/CodeGen/X86/avx512bw-builtins.c @@ -1058,6 +1058,7 @@ __m512i test_mm512_packs_epi32(__m512i __A, __m512i __B) { // CHECK: @llvm.x86.avx512.packssdw.512 return _mm512_packs_epi32(__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_packs_epi32((__m512i)(__v16si){40000, -50000, 32767, -32768, 70000, -70000, 42, -42, 0, 1, -1, 30000, 32768, -32769, 65535, -65536}, (__m512i)(__v16si){0, 1, -1, 65536, -1000000, 1000000, 32768, -32769, 123456, -123456, 32767, -32768, 22222, -22222, 40000, -40000}), 32767, -32768, 32767, -32768, 0, 1, -1, 32767, 32767, -32768, 42, -42, -32768, 32767, 32767, -32768, 0, 1, -1, 30000, 32767, -32768, 32767, -32768, 32767, -32768, 32767, -32768, 22222, -22222, 32767, -32768)); __m512i test_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_packs_epi32 // CHECK: @llvm.x86.avx512.packssdw.512 @@ -1075,6 +1076,7 @@ __m512i test_mm512_packs_epi16(__m512i __A, __m512i __B) { // CHECK: @llvm.x86.avx512.packsswb.512 return _mm512_packs_epi16(__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_packs_epi16((__m512i)(__v32hi){130, -200, 127, -128, 300, -1000, 42, -42, 32767, -32767, 127, -128, 30000, -30000, 90, -90, 130, -200, 0, -1, 126, -127, 128, -129, 500, -500, 7, -7, 255, -255, 127, -128}, (__m512i)(__v32hi){0, 1, -1, 255, -129, 128, 20000, -32768, 5, -5, 100, -100, 127, -128, 512, -512, 1, 2, -2, 300, -300, 127, -128, 42, 0, 1, -1, 127, -128, 90, -90, -32768}), 127, -128, 127, -128, 127, -128, 42, -42, 0, 1, -1, 127, -128, 127, 127, -128, 127, -128, 127, -128, 127, -128, 90, -90, 5, -5, 100, -100, 127, -128, 127, -128, 127, -128, 0, -1, 126, -127, 127, -128, 1, 2, -2, 127, -128, 127, -128, 42, 127, -128, 7, -7, 127, -128, 127, -128, 0, 1, -1, 127, -128, 90, -90, -128)); __m512i test_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_packs_epi16 // CHECK: @llvm.x86.avx512.packsswb.512 @@ -1092,6 +1094,7 @@ __m512i test_mm512_packus_epi32(__m512i __A, __m512i __B) { // CHECK: @llvm.x86.avx512.packusdw.512 return _mm512_packus_epi32(__A,__B); } +TEST_CONSTEXPR(match_v32hi(_mm512_packus_epi32((__m512i)(__v16si){40000, -50000, 32767, -32768, 70000, -70000, 42, -42, 0, 1, -1, 65535, 32768, -32769, 22222, -22222}, (__m512i)(__v16si){0, 1, -1, 65536, -1000000, 1000000, 32768, -32769, 123456, -123456, 32767, -32768, 40000, -40000, 65535, 0}), -25536, 0, 32767, 0, 0, 1, 0, -1, -1, 0, 42, 0, 0, -1, -32768, 0, 0, 1, 0, -1, -1, 0, 32767, 0, -32768, 0, 22222, 0, -25536, 0, -1, 0)); __m512i test_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_maskz_packus_epi32 // CHECK: @llvm.x86.avx512.packusdw.512 @@ -1109,6 +1112,7 @@ __m512i test_mm512_packus_epi16(__m512i __A, __m512i __B) { // CHECK: @llvm.x86.avx512.packuswb.512 return _mm512_packus_epi16(__A,__B); } +TEST_CONSTEXPR(match_v64qi(_mm512_packus_epi16((__m512i)(__v32hi){-1, 0, 1, 127, 128, 255, 256, -200, 300, 42, -42, 500, 20000, -32768, 129, -129, -1, 0, 1, 127, 128, 255, 256, -200, 300, 42, -42, 500, 20000, -32768, 129, -129}, (__m512i)(__v32hi){0, 1, -1, 255, -129, 128, 20000, -32768, 32767, -32767, 127, -128, 30000, -30000, 90, -90, 0, 1, -1, 255, -129, 128, 20000, -32768, 32767, -32767, 127, -128, 30000, -30000, 90, -90}), 0, 0, 1, 127, -128, -1, -1, 0, 0, 1, 0, -1, 0, -128, -1, 0, -1, 42, 0, -1, -1, 0, -127, 0, -1, 0, 127, 0, -1, 0, 90, 0, 0, 0, 1, 127, -128, -1, -1, 0, 0, 1, 0, -1, 0, -128, -1, 0, -1, 42, 0, -1, -1, 0, -127, 0, -1, 0, 127, 0, -1, 0, 90, 0)); __m512i test_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) { // CHECK-LABEL: test_mm512_mask_packus_epi16 // CHECK: @llvm.x86.avx512.packuswb.512 diff --git a/clang/test/CodeGen/X86/avx512ifma-builtins.c b/clang/test/CodeGen/X86/avx512ifma-builtins.c index 7c7c492c79c99..eebefb0bad4ab 100644 --- a/clang/test/CodeGen/X86/avx512ifma-builtins.c +++ b/clang/test/CodeGen/X86/avx512ifma-builtins.c @@ -3,6 +3,11 @@ // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror | FileCheck %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -flax-vector-conversions=none -ffreestanding %s -triple=i386-apple-darwin -target-feature +avx512ifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s + #include diff --git a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c index c115b60381383..89108fc037520 100644 --- a/clang/test/CodeGen/X86/avx512ifmavl-builtins.c +++ b/clang/test/CodeGen/X86/avx512ifmavl-builtins.c @@ -3,6 +3,12 @@ // RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s +// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ %s -flax-vector-conversions=none -ffreestanding -triple=i386-apple-darwin -target-feature +avx512ifma -target-feature +avx512vl -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s + + #include __m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) { diff --git a/clang/test/CodeGen/X86/avx512vlfp16-builtins.c b/clang/test/CodeGen/X86/avx512vlfp16-builtins.c index ce120b20a4cca..f1865aae4a9e2 100644 --- a/clang/test/CodeGen/X86/avx512vlfp16-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlfp16-builtins.c @@ -37,6 +37,8 @@ __m128h test_mm_set_sh(_Float16 __h) { return _mm_set_sh(__h); } +TEST_CONSTEXPR(match_m128h(_mm_set_sh(2.0), 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)); + __m128h test_mm_set1_ph(_Float16 h) { // CHECK-LABEL: test_mm_set1_ph // CHECK: insertelement <8 x half> {{.*}}, i32 0 @@ -84,6 +86,8 @@ __m128h test_mm_set1_pch(_Float16 _Complex h) { return _mm_set1_pch(h); } +TEST_CONSTEXPR(match_m128h(_mm_set1_pch(1.0), 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0)); + __m256h test_mm256_set1_pch(_Float16 _Complex h) { // CHECK-LABEL: test_mm256_set1_pch // CHECK: insertelement <8 x float> {{.*}}, i32 0 @@ -97,6 +101,8 @@ __m256h test_mm256_set1_pch(_Float16 _Complex h) { return _mm256_set1_pch(h); } +TEST_CONSTEXPR(match_m256h(_mm256_set1_pch(1.0), 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0)); + __m128h test_mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) { // CHECK-LABEL: test_mm_set_ph @@ -110,6 +116,7 @@ __m128h test_mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h // CHECK: insertelement <8 x half> {{.*}}, i32 7 return _mm_set_ph(__h1, __h2, __h3, __h4, __h5, __h6, __h7, __h8); } +TEST_CONSTEXPR(match_m128h(_mm_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0), 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0)); __m256h test_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8, @@ -136,6 +143,8 @@ __m256h test_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h9, __h10, __h11, __h12, __h13, __h14, __h15, __h16); } +TEST_CONSTEXPR(match_m256h(_mm256_set_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0), 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0)); + __m128h test_mm_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) { // CHECK-LABEL: test_mm_setr_ph @@ -150,6 +159,8 @@ __m128h test_mm_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __ return _mm_setr_ph(__h1, __h2, __h3, __h4, __h5, __h6, __h7, __h8); } +TEST_CONSTEXPR(match_m128h(_mm_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)); + __m256h test_mm256_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8, _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12, @@ -175,6 +186,8 @@ __m256h test_mm256_setr_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h9, __h10, __h11, __h12, __h13, __h14, __h15, __h16); } +TEST_CONSTEXPR(match_m256h(_mm256_setr_ph(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0), 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)); + __m256h test_mm256_add_ph(__m256h __A, __m256h __B) { // CHECK-LABEL: test_mm256_add_ph // CHECK: %{{.*}} = fadd <16 x half> %{{.*}}, %{{.*}} diff --git a/clang/test/CodeGen/X86/avxifma-builtins.c b/clang/test/CodeGen/X86/avxifma-builtins.c index dd0f220b378b4..aa151591ed143 100644 --- a/clang/test/CodeGen/X86/avxifma-builtins.c +++ b/clang/test/CodeGen/X86/avxifma-builtins.c @@ -3,6 +3,12 @@ // RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror | FileCheck %s +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +avxifma -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s + + #include __m128i test_mm_madd52hi_epu64(__m128i __X, __m128i __Y, __m128i __Z) { diff --git a/clang/test/CodeGen/X86/avxvnniint8-builtins.c b/clang/test/CodeGen/X86/avxvnniint8-builtins.c index dd4a4483abaab..021e658cc9d2c 100644 --- a/clang/test/CodeGen/X86/avxvnniint8-builtins.c +++ b/clang/test/CodeGen/X86/avxvnniint8-builtins.c @@ -10,73 +10,73 @@ #include // CHECK-LABEL: test_mm_dpbssd_epi32 -// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) +// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) __m128i test_mm_dpbssd_epi32(__m128i __W, __m128i __A, __m128i __B) { return _mm_dpbssd_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm_dpbssds_epi32 -// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) +// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) __m128i test_mm_dpbssds_epi32(__m128i __W, __m128i __A, __m128i __B) { return _mm_dpbssds_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm_dpbsud_epi32 -// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) +// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) __m128i test_mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B) { return _mm_dpbsud_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm_dpbsuds_epi32 -// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) +// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) __m128i test_mm_dpbsuds_epi32(__m128i __W, __m128i __A, __m128i __B) { return _mm_dpbsuds_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm_dpbuud_epi32 -// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) +// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) __m128i test_mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B) { return _mm_dpbuud_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm_dpbuuds_epi32 -// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) +// CHECK: call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) __m128i test_mm_dpbuuds_epi32(__m128i __W, __m128i __A, __m128i __B) { return _mm_dpbuuds_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm256_dpbssd_epi32 -// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}) +// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}) __m256i test_mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B) { return _mm256_dpbssd_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm256_dpbssds_epi32 -// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}) +// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}) __m256i test_mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B) { return _mm256_dpbssds_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm256_dpbsud_epi32 -// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}) +// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}) __m256i test_mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B) { return _mm256_dpbsud_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm256_dpbsuds_epi32 -// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}) +// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}) __m256i test_mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B) { return _mm256_dpbsuds_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm256_dpbuud_epi32 -// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}) +// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}) __m256i test_mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B) { return _mm256_dpbuud_epi32(__W, __A, __B); } // CHECK-LABEL: test_mm256_dpbuuds_epi32 -// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}) +// CHECK: call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}) __m256i test_mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B) { return _mm256_dpbuuds_epi32(__W, __A, __B); } diff --git a/clang/test/CodeGen/X86/bmi-builtins.c b/clang/test/CodeGen/X86/bmi-builtins.c index ded40ca59781e..d0ae0c7939255 100644 --- a/clang/test/CodeGen/X86/bmi-builtins.c +++ b/clang/test/CodeGen/X86/bmi-builtins.c @@ -1,7 +1,16 @@ -// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,TZCNT -// RUN: %clang_cc1 -x c -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -emit-llvm -o - -Wall -Werror -DTEST_TZCNT | FileCheck %s --check-prefix=TZCNT -// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,TZCNT -// RUN: %clang_cc1 -x c++ -std=c++11 -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -emit-llvm -o - -Wall -Werror -DTEST_TZCNT | FileCheck %s --check-prefix=TZCNT +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X64,TZCNT,TZCNT64 +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,TZCNT +// RUN: %clang_cc1 -x c -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -emit-llvm -o - -Wall -Werror -DTEST_TZCNT | FileCheck %s --check-prefixes=TZCNT,TZCNT64 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,X64,TZCNT,TZCNT64 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefixes=CHECK,TZCNT +// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -emit-llvm -o - -Wall -Werror -DTEST_TZCNT | FileCheck %s --check-prefixes=TZCNT,TZCNT64 + +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64,TZCNT,TZCNT64 +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,TZCNT +// RUN: %clang_cc1 -x c -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -emit-llvm -o - -Wall -Werror -DTEST_TZCNT -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=TZCNT,TZCNT64 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64,TZCNT,TZCNT64 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi -emit-llvm -o - -Wall -Werror -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,TZCNT +// RUN: %clang_cc1 -x c++ -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 -ffreestanding %s -triple=x86_64-windows-msvc -emit-llvm -o - -Wall -Werror -DTEST_TZCNT -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=TZCNT,TZCNT64 #include @@ -48,20 +57,20 @@ unsigned int test_tzcnt_u32(unsigned int __X) { #ifdef __x86_64__ unsigned long long test__tzcnt_u64(unsigned long long __X) { -// TZCNT-LABEL: test__tzcnt_u64 -// TZCNT: i64 @llvm.cttz.i64(i64 %{{.*}}, i1 false) +// TZCNT64-LABEL: test__tzcnt_u64 +// TZCNT64: i64 @llvm.cttz.i64(i64 %{{.*}}, i1 false) return __tzcnt_u64(__X); } long long test_mm_tzcnt_64(unsigned long long __X) { -// TZCNT-LABEL: test_mm_tzcnt_64 -// TZCNT: i64 @llvm.cttz.i64(i64 %{{.*}}, i1 false) +// TZCNT64-LABEL: test_mm_tzcnt_64 +// TZCNT64: i64 @llvm.cttz.i64(i64 %{{.*}}, i1 false) return _mm_tzcnt_64(__X); } unsigned long long test_tzcnt_u64(unsigned long long __X) { -// TZCNT-LABEL: test_tzcnt_u64 -// TZCNT: i64 @llvm.cttz.i64(i64 %{{.*}}, i1 false) +// TZCNT64-LABEL: test_tzcnt_u64 +// TZCNT64: i64 @llvm.cttz.i64(i64 %{{.*}}, i1 false) return _tzcnt_u64(__X); } #endif @@ -103,36 +112,36 @@ unsigned int test__blsr_u32(unsigned int __X) { #ifdef __x86_64__ unsigned long long test__andn_u64(unsigned long __X, unsigned long __Y) { -// CHECK-LABEL: test__andn_u64 -// CHECK: xor i64 %{{.*}}, -1 -// CHECK: and i64 %{{.*}}, %{{.*}} +// X64-LABEL: test__andn_u64 +// X64: xor i64 %{{.*}}, -1 +// X64: and i64 %{{.*}}, %{{.*}} return __andn_u64(__X, __Y); } unsigned long long test__bextr_u64(unsigned long __X, unsigned long __Y) { -// CHECK-LABEL: test__bextr_u64 -// CHECK: i64 @llvm.x86.bmi.bextr.64(i64 %{{.*}}, i64 %{{.*}}) +// X64-LABEL: test__bextr_u64 +// X64: i64 @llvm.x86.bmi.bextr.64(i64 %{{.*}}, i64 %{{.*}}) return __bextr_u64(__X, __Y); } unsigned long long test__blsi_u64(unsigned long long __X) { -// CHECK-LABEL: test__blsi_u64 -// CHECK: sub i64 0, %{{.*}} -// CHECK: and i64 %{{.*}}, %{{.*}} +// X64-LABEL: test__blsi_u64 +// X64: sub i64 0, %{{.*}} +// X64: and i64 %{{.*}}, %{{.*}} return __blsi_u64(__X); } unsigned long long test__blsmsk_u64(unsigned long long __X) { -// CHECK-LABEL: test__blsmsk_u64 -// CHECK: sub i64 %{{.*}}, 1 -// CHECK: xor i64 %{{.*}}, %{{.*}} +// X64-LABEL: test__blsmsk_u64 +// X64: sub i64 %{{.*}}, 1 +// X64: xor i64 %{{.*}}, %{{.*}} return __blsmsk_u64(__X); } unsigned long long test__blsr_u64(unsigned long long __X) { -// CHECK-LABEL: test__blsr_u64 -// CHECK: sub i64 %{{.*}}, 1 -// CHECK: and i64 %{{.*}}, %{{.*}} +// X64-LABEL: test__blsr_u64 +// X64: sub i64 %{{.*}}, 1 +// X64: and i64 %{{.*}}, %{{.*}} return __blsr_u64(__X); } #endif @@ -186,49 +195,49 @@ unsigned int test_blsr_u32(unsigned int __X) { #ifdef __x86_64__ unsigned long long test_andn_u64(unsigned long __X, unsigned long __Y) { -// CHECK-LABEL: test_andn_u64 -// CHECK: xor i64 %{{.*}}, -1 -// CHECK: and i64 %{{.*}}, %{{.*}} +// X64-LABEL: test_andn_u64 +// X64: xor i64 %{{.*}}, -1 +// X64: and i64 %{{.*}}, %{{.*}} return _andn_u64(__X, __Y); } unsigned long long test_bextr_u64(unsigned long __X, unsigned int __Y, unsigned int __Z) { -// CHECK-LABEL: test_bextr_u64 -// CHECK: and i32 %{{.*}}, 255 -// CHECK: and i32 %{{.*}}, 255 -// CHECK: shl i32 %{{.*}}, 8 -// CHECK: or i32 %{{.*}}, %{{.*}} -// CHECK: zext i32 %{{.*}} to i64 -// CHECK: i64 @llvm.x86.bmi.bextr.64(i64 %{{.*}}, i64 %{{.*}}) +// X64-LABEL: test_bextr_u64 +// X64: and i32 %{{.*}}, 255 +// X64: and i32 %{{.*}}, 255 +// X64: shl i32 %{{.*}}, 8 +// X64: or i32 %{{.*}}, %{{.*}} +// X64: zext i32 %{{.*}} to i64 +// X64: i64 @llvm.x86.bmi.bextr.64(i64 %{{.*}}, i64 %{{.*}}) return _bextr_u64(__X, __Y, __Z); } unsigned long long test_bextr2_u64(unsigned long long __X, unsigned long long __Y) { -// CHECK-LABEL: test_bextr2_u64 -// CHECK: i64 @llvm.x86.bmi.bextr.64(i64 %{{.*}}, i64 %{{.*}}) +// X64-LABEL: test_bextr2_u64 +// X64: i64 @llvm.x86.bmi.bextr.64(i64 %{{.*}}, i64 %{{.*}}) return _bextr2_u64(__X, __Y); } unsigned long long test_blsi_u64(unsigned long long __X) { -// CHECK-LABEL: test_blsi_u64 -// CHECK: sub i64 0, %{{.*}} -// CHECK: and i64 %{{.*}}, %{{.*}} +// X64-LABEL: test_blsi_u64 +// X64: sub i64 0, %{{.*}} +// X64: and i64 %{{.*}}, %{{.*}} return _blsi_u64(__X); } unsigned long long test_blsmsk_u64(unsigned long long __X) { -// CHECK-LABEL: test_blsmsk_u64 -// CHECK: sub i64 %{{.*}}, 1 -// CHECK: xor i64 %{{.*}}, %{{.*}} +// X64-LABEL: test_blsmsk_u64 +// X64: sub i64 %{{.*}}, 1 +// X64: xor i64 %{{.*}}, %{{.*}} return _blsmsk_u64(__X); } unsigned long long test_blsr_u64(unsigned long long __X) { -// CHECK-LABEL: test_blsr_u64 -// CHECK: sub i64 %{{.*}}, 1 -// CHECK: and i64 %{{.*}}, %{{.*}} +// X64-LABEL: test_blsr_u64 +// X64: sub i64 %{{.*}}, 1 +// X64: and i64 %{{.*}}, %{{.*}} return _blsr_u64(__X); } #endif diff --git a/clang/test/CodeGen/X86/bmi2-builtins.c b/clang/test/CodeGen/X86/bmi2-builtins.c index 48424f553768b..1b2cb9048adb2 100644 --- a/clang/test/CodeGen/X86/bmi2-builtins.c +++ b/clang/test/CodeGen/X86/bmi2-builtins.c @@ -3,6 +3,11 @@ // RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi2 -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi2 -emit-llvm -o - | FileCheck %s --check-prefix=B32 +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi2 -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi2 -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefix=B32 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +bmi2 -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-apple-darwin -target-feature +bmi2 -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefix=B32 + #include diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c index 43d9ec5e6cc8b..26c5f7315457e 100644 --- a/clang/test/CodeGen/X86/mmx-builtins.c +++ b/clang/test/CodeGen/X86/mmx-builtins.c @@ -371,7 +371,6 @@ __m64 test_mm_max_pi16(__m64 a, __m64 b) { // CHECK: call <4 x i16> @llvm.smax.v4i16( return _mm_max_pi16(a, b); } - TEST_CONSTEXPR(match_v4hi(_mm_max_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, 2, -3, 4}), 1, 2, 3, 4)); __m64 test_mm_max_pu8(__m64 a, __m64 b) { @@ -379,13 +378,13 @@ __m64 test_mm_max_pu8(__m64 a, __m64 b) { // CHECK: call <8 x i8> @llvm.umax.v8i8( return _mm_max_pu8(a, b); } +TEST_CONSTEXPR(match_v8qi(_mm_max_pu8((__m64)(__v8qs){ 16, 17, 18, -19, -20, 21, -22, -23}, (__m64)(__v8qs){ 1, -2, -3, 4, 5, 0, 7, -8}), 16, -2, -3, -19, -20, 21, -22, -8)); __m64 test_mm_min_pi16(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_min_pi16 // CHECK: call <4 x i16> @llvm.smin.v4i16( return _mm_min_pi16(a, b); } - TEST_CONSTEXPR(match_v4hi(_mm_min_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, 2, -3, 4}), -1, -2, -3, -4)); __m64 test_mm_min_pu8(__m64 a, __m64 b) { @@ -393,6 +392,7 @@ __m64 test_mm_min_pu8(__m64 a, __m64 b) { // CHECK: call <8 x i8> @llvm.umin.v8i8( return _mm_min_pu8(a, b); } +TEST_CONSTEXPR(match_v8qi(_mm_min_pu8((__m64)(__v8qs){ 16, 17, 18, -19, -20, 21, -22, -23}, (__m64)(__v8qs){ 1, -2, -3, 4, 5, 0, 7, -8}), 1, 17, 18, 4, 5, 0, 7, -23)); int test_mm_movemask_pi8(__m64 a) { // CHECK-LABEL: test_mm_movemask_pi8 @@ -448,18 +448,21 @@ __m64 test_mm_packs_pi16(__m64 a, __m64 b) { // CHECK: call <16 x i8> @llvm.x86.sse2.packsswb.128( return _mm_packs_pi16(a, b); } +TEST_CONSTEXPR(match_v8qi(_mm_packs_pi16((__m64)(__v4hi){130, -200, 127, -128}, (__m64)(__v4hi){0, 1, -1, 255}), 127, -128, 127, -128, 0, 1, -1, 127)); __m64 test_mm_packs_pi32(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_packs_pi32 // CHECK: call <8 x i16> @llvm.x86.sse2.packssdw.128( return _mm_packs_pi32(a, b); } +TEST_CONSTEXPR(match_v4hi(_mm_packs_pi32((__m64)(__v2si){40000, -50000}, (__m64)(__v2si){0, 70000}), 32767, -32768, 0, 32767)); __m64 test_mm_packs_pu16(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_packs_pu16 // CHECK: call <16 x i8> @llvm.x86.sse2.packuswb.128( return _mm_packs_pu16(a, b); } +TEST_CONSTEXPR(match_v8qi(_mm_packs_pu16((__m64)(__v4hi){-1, 0, 128, 300}, (__m64)(__v4hi){255, -200, 42, -42}), 0, 0, -128, -1, -1, 0, 42, 0)); __m64 test_mm_sad_pu8(__m64 a, __m64 b) { // CHECK-LABEL: test_mm_sad_pu8 diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c index ddcff9728c543..84b90c09444c2 100644 --- a/clang/test/CodeGen/X86/sse2-builtins.c +++ b/clang/test/CodeGen/X86/sse2-builtins.c @@ -1026,18 +1026,21 @@ __m128i test_mm_packs_epi16(__m128i A, __m128i B) { // CHECK: call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm_packs_epi16(A, B); } +TEST_CONSTEXPR(match_v16qi(_mm_packs_epi16((__m128i)(__v8hi){130, -200, 127, -128, 300, -1000, 42, -42}, (__m128i)(__v8hi){0, 1, -1, 255, -129, 128, 20000, -32768}), 127, -128, 127, -128, 127, -128, 42, -42, 0, 1, -1, 127, -128, 127, 127, -128)); __m128i test_mm_packs_epi32(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_packs_epi32 // CHECK: call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm_packs_epi32(A, B); } +TEST_CONSTEXPR(match_v8hi(_mm_packs_epi32((__m128i)(__v4si){40000, -50000, 32767, -32768}, (__m128i)(__v4si){0, 1, -1, 70000}), 32767, -32768, 32767, -32768, 0, 1, -1, 32767)); __m128i test_mm_packus_epi16(__m128i A, __m128i B) { // CHECK-LABEL: test_mm_packus_epi16 // CHECK: call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) return _mm_packus_epi16(A, B); } +TEST_CONSTEXPR(match_v16qi(_mm_packus_epi16((__m128i)(__v8hi){-1, 0, 1, 127, 300, -1000, 255, -42}, (__m128i)(__v8hi){0, 1, -1, 255, -129, 128, 20000, -32768}), 0, 0, 1, 127, -1, 0, -1, 0, 0, 1, 0, -1, 0, -128, -1, 0)); void test_mm_pause(void) { // CHECK-LABEL: test_mm_pause diff --git a/clang/test/CodeGen/X86/sse41-builtins.c b/clang/test/CodeGen/X86/sse41-builtins.c index c7265b188d572..3c3724643870e 100644 --- a/clang/test/CodeGen/X86/sse41-builtins.c +++ b/clang/test/CodeGen/X86/sse41-builtins.c @@ -399,6 +399,7 @@ __m128i test_mm_packus_epi32(__m128i x, __m128i y) { // CHECK: call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) return _mm_packus_epi32(x, y); } +TEST_CONSTEXPR(match_v8hi(_mm_packus_epi32((__m128i)(__v4si){40000, -50000, 32767, -32768}, (__m128i)(__v4si){0, 1, -1, 70000}), -25536, 0, 32767, 0, 0, 1, 0, -1)); __m128d test_mm_round_pd(__m128d x) { // CHECK-LABEL: test_mm_round_pd diff --git a/clang/test/CodeGen/X86/tbm-builtins.c b/clang/test/CodeGen/X86/tbm-builtins.c index d916627a23f57..89746bf67e909 100644 --- a/clang/test/CodeGen/X86/tbm-builtins.c +++ b/clang/test/CodeGen/X86/tbm-builtins.c @@ -1,5 +1,12 @@ -// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s -// RUN: %clang_cc1 -x c++ -std=c++11 -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,X64 +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,X64 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-unknown-unknown -target-feature +tbm -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK + +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64 +// RUN: %clang_cc1 -x c -ffreestanding %s -triple=i386-unknown-unknown -target-feature +tbm -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +tbm -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK,X64 +// RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=i386-unknown-unknown -target-feature +tbm -emit-llvm -o - -fexperimental-new-constant-interpreter | FileCheck %s --check-prefixes=CHECK #include @@ -13,14 +20,14 @@ unsigned int test__bextri_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__bextri_u64(unsigned long long a) { - // CHECK-LABEL: test__bextri_u64 - // CHECK: call i64 @llvm.x86.tbm.bextri.u64(i64 %{{.*}}, i64 2) + // X64-LABEL: test__bextri_u64 + // X64: call i64 @llvm.x86.tbm.bextri.u64(i64 %{{.*}}, i64 2) return __bextri_u64(a, 2); } unsigned long long test__bextri_u64_bigint(unsigned long long a) { - // CHECK-LABEL: test__bextri_u64_bigint - // CHECK: call i64 @llvm.x86.tbm.bextri.u64(i64 %{{.*}}, i64 549755813887) + // X64-LABEL: test__bextri_u64_bigint + // X64: call i64 @llvm.x86.tbm.bextri.u64(i64 %{{.*}}, i64 549755813887) return __bextri_u64(a, 0x7fffffffffLL); } #endif @@ -34,9 +41,9 @@ unsigned int test__blcfill_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blcfill_u64(unsigned long long a) { - // CHECK-LABEL: test__blcfill_u64 - // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1 - // CHECK: %{{.*}} = and i64 %{{.*}}, [[TMP]] + // X64-LABEL: test__blcfill_u64 + // X64: [[TMP:%.*]] = add i64 %{{.*}}, 1 + // X64: %{{.*}} = and i64 %{{.*}}, [[TMP]] return __blcfill_u64(a); } #endif @@ -51,10 +58,10 @@ unsigned int test__blci_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blci_u64(unsigned long long a) { - // CHECK-LABEL: test__blci_u64 - // CHECK: [[TMP1:%.*]] = add i64 %{{.*}}, 1 - // CHECK: [[TMP2:%.*]] = xor i64 [[TMP1]], -1 - // CHECK: %{{.*}} = or i64 %{{.*}}, [[TMP2]] + // X64-LABEL: test__blci_u64 + // X64: [[TMP1:%.*]] = add i64 %{{.*}}, 1 + // X64: [[TMP2:%.*]] = xor i64 [[TMP1]], -1 + // X64: %{{.*}} = or i64 %{{.*}}, [[TMP2]] return __blci_u64(a); } #endif @@ -69,10 +76,10 @@ unsigned int test__blcic_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blcic_u64(unsigned long long a) { - // CHECK-LABEL: test__blcic_u64 - // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 - // CHECK: [[TMP2:%.*]] = add i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]] + // X64-LABEL: test__blcic_u64 + // X64: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 + // X64: [[TMP2:%.*]] = add i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]] return __blcic_u64(a); } #endif @@ -86,9 +93,9 @@ unsigned int test__blcmsk_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blcmsk_u64(unsigned long long a) { - // CHECK-LABEL: test__blcmsk_u64 - // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = xor i64 %{{.*}}, [[TMP]] + // X64-LABEL: test__blcmsk_u64 + // X64: [[TMP:%.*]] = add i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = xor i64 %{{.*}}, [[TMP]] return __blcmsk_u64(a); } #endif @@ -102,9 +109,9 @@ unsigned int test__blcs_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blcs_u64(unsigned long long a) { - // CHECK-LABEL: test__blcs_u64 - // CHECK: [[TMP:%.*]] = add i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]] + // X64-LABEL: test__blcs_u64 + // X64: [[TMP:%.*]] = add i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]] return __blcs_u64(a); } #endif @@ -118,9 +125,9 @@ unsigned int test__blsfill_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blsfill_u64(unsigned long long a) { - // CHECK-LABEL: test__blsfill_u64 - // CHECK: [[TMP:%.*]] = sub i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]] + // X64-LABEL: test__blsfill_u64 + // X64: [[TMP:%.*]] = sub i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = or i64 %{{.*}}, [[TMP]] return __blsfill_u64(a); } #endif @@ -135,10 +142,10 @@ unsigned int test__blsic_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__blsic_u64(unsigned long long a) { - // CHECK-LABEL: test__blsic_u64 - // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 - // CHECK: [[TMP2:%.*]] = sub i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]] + // X64-LABEL: test__blsic_u64 + // X64: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 + // X64: [[TMP2:%.*]] = sub i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]] return __blsic_u64(a); } #endif @@ -153,10 +160,10 @@ unsigned int test__t1mskc_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__t1mskc_u64(unsigned long long a) { - // CHECK-LABEL: test__t1mskc_u64 - // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 - // CHECK: [[TMP2:%.*]] = add i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]] + // X64-LABEL: test__t1mskc_u64 + // X64: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 + // X64: [[TMP2:%.*]] = add i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = or i64 [[TMP1]], [[TMP2]] return __t1mskc_u64(a); } #endif @@ -171,10 +178,10 @@ unsigned int test__tzmsk_u32(unsigned int a) { #ifdef __x86_64__ unsigned long long test__tzmsk_u64(unsigned long long a) { - // CHECK-LABEL: test__tzmsk_u64 - // CHECK: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 - // CHECK: [[TMP2:%.*]] = sub i64 %{{.*}}, 1 - // CHECK-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]] + // X64-LABEL: test__tzmsk_u64 + // X64: [[TMP1:%.*]] = xor i64 %{{.*}}, -1 + // X64: [[TMP2:%.*]] = sub i64 %{{.*}}, 1 + // X64-NEXT: {{.*}} = and i64 [[TMP1]], [[TMP2]] return __tzmsk_u64(a); } #endif diff --git a/clang/test/CodeGen/amdgpu-image-rsrc-type-debug-info.c b/clang/test/CodeGen/amdgpu-image-rsrc-type-debug-info.c new file mode 100644 index 0000000000000..ef68c79bef592 --- /dev/null +++ b/clang/test/CodeGen/amdgpu-image-rsrc-type-debug-info.c @@ -0,0 +1,17 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -triple amdgcn -emit-llvm -o - %s -debug-info-kind=limited | FileCheck %s + +// CHECK-LABEL: define dso_local void @test_locals( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] !dbg [[DBG6:![0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[IMG:%.*]] = alloca ptr, align 32, addrspace(5) +// CHECK-NEXT: [[IMG_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[IMG]] to ptr +// CHECK-NEXT: #dbg_declare(ptr addrspace(5) [[IMG]], [[META11:![0-9]+]], !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), [[META14:![0-9]+]]) +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[IMG_ASCAST]], align 32, !dbg [[DBG15:![0-9]+]] +// CHECK-NEXT: ret void, !dbg [[DBG16:![0-9]+]] +// +void test_locals(void) { + __amdgpu_texture_t img; + (void)img; +} diff --git a/clang/test/CodeGen/atomic-test-and-set.c b/clang/test/CodeGen/atomic-test-and-set.c index 39d4cef16b21d..6438094567f33 100644 --- a/clang/test/CodeGen/atomic-test-and-set.c +++ b/clang/test/CodeGen/atomic-test-and-set.c @@ -81,7 +81,8 @@ void clear_dynamic(char *ptr, int order) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -99,7 +100,8 @@ void test_and_set_relaxed(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -117,7 +119,8 @@ void test_and_set_consume(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -135,7 +138,8 @@ void test_and_set_acquire(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -153,7 +157,8 @@ void test_and_set_release(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -171,7 +176,8 @@ void test_and_set_acq_rel(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -200,27 +206,32 @@ void test_and_set_seq_cst(char *ptr) { // CHECK: [[MONOTONIC]]: // CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP2]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]] // CHECK: [[ACQUIRE]]: // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 // CHECK-NEXT: [[TOBOOL1:%.*]] = icmp ne i8 [[TMP3]], 0 -// CHECK-NEXT: store i1 [[TOBOOL1]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT1:%.*]] = zext i1 [[TOBOOL1]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT1]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[RELEASE]]: // CHECK-NEXT: [[TMP4:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 // CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i8 [[TMP4]], 0 -// CHECK-NEXT: store i1 [[TOBOOL2]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT2:%.*]] = zext i1 [[TOBOOL2]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT2]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[ACQREL]]: // CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 // CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i8 [[TMP5]], 0 -// CHECK-NEXT: store i1 [[TOBOOL3]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT3:%.*]] = zext i1 [[TOBOOL3]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT3]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[SEQCST]]: // CHECK-NEXT: [[TMP6:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 // CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i8 [[TMP6]], 0 -// CHECK-NEXT: store i1 [[TOBOOL4]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT4:%.*]] = zext i1 [[TOBOOL4]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT4]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[ATOMIC_CONTINUE]]: // CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 @@ -239,7 +250,8 @@ void test_and_set_dynamic(char *ptr, int order) { // CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], ptr [[X]], i64 0, i64 0 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw volatile xchg ptr [[ARRAYDECAY]], i8 1 seq_cst, align 4 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP0]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP1]] to i1 // CHECK-NEXT: ret void @@ -301,7 +313,8 @@ void clear_incomplete(struct incomplete *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 4 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -318,7 +331,8 @@ void test_and_set_int(int *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -335,7 +349,8 @@ void test_and_set_void(void *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/attr-cleanup.c b/clang/test/CodeGen/attr-cleanup.c index 755ede86c1382..05723a8ab60ab 100644 --- a/clang/test/CodeGen/attr-cleanup.c +++ b/clang/test/CodeGen/attr-cleanup.c @@ -1,7 +1,27 @@ -// RUN: %clang_cc1 -emit-llvm %s -o %t +// RUN: %clang_cc1 -std=c89 -emit-llvm %s -o - | FileCheck %s --check-prefix=C89 +// RUN: %clang_cc1 -std=c99 -emit-llvm %s -o - | FileCheck %s --check-prefix=C99 void f(void* arg); void g(void) { __attribute__((cleanup(f))) void *g; } +void cleaner(int *p); + +// C89-LABEL: define{{.*}} void @test_nested_for_loop_cleanup() +// C99-LABEL: define{{.*}} void @test_nested_for_loop_cleanup() +void test_nested_for_loop_cleanup(void) { + for (int i = 10; 0;) { + for (__attribute__((cleanup(cleaner))) int j = 20; 0;) + ; + i = 5; // Some operation after inner loop + } +} + +// C89: for.end: +// C89-NEXT: store i32 5, ptr %i, align 4 +// C89-NEXT: call void @cleaner(ptr noundef %j) + +// C99: for.cond.cleanup: +// C99-NEXT: call void @cleaner(ptr noundef %j) +// C99-NEXT: br label %for.end diff --git a/clang/test/CodeGen/builtin-masked.c b/clang/test/CodeGen/builtin-masked.c index adb1ad4b698ac..e2b5e099a4ba9 100644 --- a/clang/test/CodeGen/builtin-masked.c +++ b/clang/test/CodeGen/builtin-masked.c @@ -19,10 +19,10 @@ typedef _Bool v8b __attribute__((ext_vector_type(8))); // CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P_ADDR]], align 8 -// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 32, <8 x i1> [[TMP1]], <8 x i32> poison) +// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x i32> poison) // CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]] // -v8i test_load(v8b m, v8i *p) { +v8i test_load(v8b m, int *p) { return __builtin_masked_load(m, p); } @@ -45,10 +45,10 @@ v8i test_load(v8b m, v8i *p) { // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> // CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32 -// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 32, <8 x i1> [[TMP2]], <8 x i32> [[TMP4]]) +// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP2]], <8 x i32> [[TMP4]]) // CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]] // -v8i test_load_passthru(v8b m, v8i *p, v8i t) { +v8i test_load_passthru(v8b m, int *p, v8i t) { return __builtin_masked_load(m, p, t); } @@ -74,7 +74,7 @@ v8i test_load_passthru(v8b m, v8i *p, v8i t) { // CHECK-NEXT: [[MASKED_EXPAND_LOAD:%.*]] = call <8 x i32> @llvm.masked.expandload.v8i32(ptr [[TMP3]], <8 x i1> [[TMP2]], <8 x i32> [[TMP4]]) // CHECK-NEXT: ret <8 x i32> [[MASKED_EXPAND_LOAD]] // -v8i test_load_expand(v8b m, v8i *p, v8i t) { +v8i test_load_expand(v8b m, int *p, v8i t) { return __builtin_masked_expand_load(m, p, t); } @@ -97,10 +97,10 @@ v8i test_load_expand(v8b m, v8i *p, v8i t) { // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8 -// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 32, <8 x i1> [[TMP2]]) +// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 4, <8 x i1> [[TMP2]]) // CHECK-NEXT: ret void // -void test_store(v8b m, v8i v, v8i *p) { +void test_store(v8b m, v8i v, int *p) { __builtin_masked_store(m, v, p); } @@ -126,7 +126,7 @@ void test_store(v8b m, v8i v, v8i *p) { // CHECK-NEXT: call void @llvm.masked.compressstore.v8i32(<8 x i32> [[TMP3]], ptr [[TMP4]], <8 x i1> [[TMP2]]) // CHECK-NEXT: ret void // -void test_compress_store(v8b m, v8i v, v8i *p) { +void test_compress_store(v8b m, v8i v, int *p) { __builtin_masked_compress_store(m, v, p); } @@ -187,3 +187,109 @@ v8i test_gather(v8b mask, v8i idx, int *ptr) { void test_scatter(v8b mask, v8i val, v8i idx, int *ptr) { __builtin_masked_scatter(mask, val, idx, ptr); } + +// CHECK-LABEL: define dso_local <8 x i32> @test_load_as( +// CHECK-SAME: i8 noundef [[MASK_COERCE:%.*]], ptr addrspace(42) noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[MASK:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[MASK_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(42), align 8 +// CHECK-NEXT: store i8 [[MASK_COERCE]], ptr [[MASK]], align 1 +// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[MASK]], align 1 +// CHECK-NEXT: [[MASK1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> +// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i1> [[MASK1]] to i8 +// CHECK-NEXT: store i8 [[TMP0]], ptr [[MASK_ADDR]], align 1 +// CHECK-NEXT: store ptr addrspace(42) [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[MASK_ADDR]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> +// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p42(ptr addrspace(42) [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x i32> poison) +// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]] +// +v8i test_load_as(v8b mask, int __attribute__((address_space(42))) * ptr) { + return __builtin_masked_load(mask, ptr); +} + +// CHECK-LABEL: define dso_local void @test_store_as( +// CHECK-SAME: i8 noundef [[M_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr addrspace(42) noundef [[P:%.*]]) #[[ATTR3]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[M:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[V_ADDR:%.*]] = alloca <8 x i32>, align 32 +// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(42), align 8 +// CHECK-NEXT: store i8 [[M_COERCE]], ptr [[M]], align 1 +// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[M]], align 1 +// CHECK-NEXT: [[M1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> +// CHECK-NEXT: [[V:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[M1]] to i8 +// CHECK-NEXT: store i8 [[TMP1]], ptr [[M_ADDR]], align 1 +// CHECK-NEXT: store <8 x i32> [[V]], ptr [[V_ADDR]], align 32 +// CHECK-NEXT: store ptr addrspace(42) [[P]], ptr [[P_ADDR]], align 8 +// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> +// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(42), ptr [[P_ADDR]], align 8 +// CHECK-NEXT: call void @llvm.masked.store.v8i32.p42(<8 x i32> [[TMP3]], ptr addrspace(42) [[TMP4]], i32 4, <8 x i1> [[TMP2]]) +// CHECK-NEXT: ret void +// +void test_store_as(v8b m, v8i v, int __attribute__((address_space(42))) *p) { + __builtin_masked_store(m, v, p); +} + +// CHECK-LABEL: define dso_local <8 x i32> @test_gather_as( +// CHECK-SAME: i8 noundef [[MASK_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr addrspace(42) noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[MASK:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[MASK_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[IDX_ADDR:%.*]] = alloca <8 x i32>, align 32 +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(42), align 8 +// CHECK-NEXT: store i8 [[MASK_COERCE]], ptr [[MASK]], align 1 +// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[MASK]], align 1 +// CHECK-NEXT: [[MASK1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> +// CHECK-NEXT: [[IDX:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 +// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i1> [[MASK1]] to i8 +// CHECK-NEXT: store i8 [[TMP1]], ptr [[MASK_ADDR]], align 1 +// CHECK-NEXT: store <8 x i32> [[IDX]], ptr [[IDX_ADDR]], align 32 +// CHECK-NEXT: store ptr addrspace(42) [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[MASK_ADDR]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> +// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32 +// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr addrspace(42) [[TMP4]], <8 x i32> [[TMP3]] +// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p42(<8 x ptr addrspace(42)> [[TMP5]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison) +// CHECK-NEXT: ret <8 x i32> [[MASKED_GATHER]] +// +v8i test_gather_as(v8b mask, v8i idx, int __attribute__((address_space(42))) *ptr) { + return __builtin_masked_gather(mask, idx, ptr); +} + +// CHECK-LABEL: define dso_local void @test_scatter_as( +// CHECK-SAME: i8 noundef [[MASK_COERCE:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP0:%.*]], ptr noundef byval(<8 x i32>) align 32 [[TMP1:%.*]], ptr addrspace(42) noundef [[PTR:%.*]]) #[[ATTR3]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[MASK:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[MASK_ADDR:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca <8 x i32>, align 32 +// CHECK-NEXT: [[IDX_ADDR:%.*]] = alloca <8 x i32>, align 32 +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(42), align 8 +// CHECK-NEXT: store i8 [[MASK_COERCE]], ptr [[MASK]], align 1 +// CHECK-NEXT: [[LOAD_BITS:%.*]] = load i8, ptr [[MASK]], align 1 +// CHECK-NEXT: [[MASK1:%.*]] = bitcast i8 [[LOAD_BITS]] to <8 x i1> +// CHECK-NEXT: [[VAL:%.*]] = load <8 x i32>, ptr [[TMP0]], align 32 +// CHECK-NEXT: [[IDX:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32 +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i1> [[MASK1]] to i8 +// CHECK-NEXT: store i8 [[TMP2]], ptr [[MASK_ADDR]], align 1 +// CHECK-NEXT: store <8 x i32> [[VAL]], ptr [[VAL_ADDR]], align 32 +// CHECK-NEXT: store <8 x i32> [[IDX]], ptr [[IDX_ADDR]], align 32 +// CHECK-NEXT: store ptr addrspace(42) [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[MASK_ADDR]], align 1 +// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1> +// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[VAL_ADDR]], align 32 +// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32 +// CHECK-NEXT: [[TMP6:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr addrspace(42) [[TMP6]], <8 x i32> [[TMP4]] +// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p42(<8 x i32> [[TMP5]], <8 x ptr addrspace(42)> [[TMP7]], i32 4, <8 x i1> [[TMP3]]) +// CHECK-NEXT: ret void +// +void test_scatter_as(v8b mask, v8i val, v8i idx, int __attribute__((address_space(42))) *ptr) { + __builtin_masked_scatter(mask, val, idx, ptr); +} diff --git a/clang/test/CodeGen/complex.c b/clang/test/CodeGen/complex.c index 6233529a18f8b..91fc9dda72f72 100644 --- a/clang/test/CodeGen/complex.c +++ b/clang/test/CodeGen/complex.c @@ -1,5 +1,81 @@ -// RUN: %clang_cc1 -emit-llvm-only %s +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 +// RUN: %clang_cc1 %s -emit-llvm -triple x86_64-unknown-unknown -o - | FileCheck %s +// CHECK-LABEL: define dso_local i32 @main( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*]]: +// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[B:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: store double 5.000000e+00, ptr [[A_REALP]], align 8 +// CHECK-NEXT: store double 0.000000e+00, ptr [[A_IMAGP]], align 8 +// CHECK-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: store double 4.200000e+01, ptr [[B_REALP]], align 8 +// CHECK-NEXT: store double 0.000000e+00, ptr [[B_IMAGP]], align 8 +// CHECK-NEXT: [[A_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP1]], align 8 +// CHECK-NEXT: [[A_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP2]], align 8 +// CHECK-NEXT: [[B_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP3]], align 8 +// CHECK-NEXT: [[B_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP4]], align 8 +// CHECK-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]] +// CHECK-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]] +// CHECK-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]] +// CHECK-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]] +// CHECK-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]] +// CHECK-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]] +// CHECK-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno double [[MUL_R]], [[MUL_R]] +// CHECK-NEXT: br i1 [[ISNAN_CMP]], label %[[COMPLEX_MUL_IMAG_NAN:.*]], label %[[COMPLEX_MUL_CONT:.*]], !prof [[PROF2:![0-9]+]] +// CHECK: [[COMPLEX_MUL_IMAG_NAN]]: +// CHECK-NEXT: [[ISNAN_CMP5:%.*]] = fcmp uno double [[MUL_I]], [[MUL_I]] +// CHECK-NEXT: br i1 [[ISNAN_CMP5]], label %[[COMPLEX_MUL_LIBCALL:.*]], label %[[COMPLEX_MUL_CONT]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_LIBCALL]]: +// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @__muldc3(double noundef [[A_REAL]], double noundef [[A_IMAG]], double noundef [[B_REAL]], double noundef [[B_IMAG]]) #[[ATTR4:[0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0 +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1 +// CHECK-NEXT: br label %[[COMPLEX_MUL_CONT]] +// CHECK: [[COMPLEX_MUL_CONT]]: +// CHECK-NEXT: [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], %[[ENTRY]] ], [ [[MUL_R]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], %[[ENTRY]] ], [ [[MUL_I]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: [[B_REALP6:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[B_REAL7:%.*]] = load double, ptr [[B_REALP6]], align 8 +// CHECK-NEXT: [[B_IMAGP8:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[B_IMAG9:%.*]] = load double, ptr [[B_IMAGP8]], align 8 +// CHECK-NEXT: [[A_REALP10:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_REAL11:%.*]] = load double, ptr [[A_REALP10]], align 8 +// CHECK-NEXT: [[A_IMAGP12:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[A_IMAG13:%.*]] = load double, ptr [[A_IMAGP12]], align 8 +// CHECK-NEXT: [[MUL_AC14:%.*]] = fmul double [[B_REAL7]], [[A_REAL11]] +// CHECK-NEXT: [[MUL_BD15:%.*]] = fmul double [[B_IMAG9]], [[A_IMAG13]] +// CHECK-NEXT: [[MUL_AD16:%.*]] = fmul double [[B_REAL7]], [[A_IMAG13]] +// CHECK-NEXT: [[MUL_BC17:%.*]] = fmul double [[B_IMAG9]], [[A_REAL11]] +// CHECK-NEXT: [[MUL_R18:%.*]] = fsub double [[MUL_AC14]], [[MUL_BD15]] +// CHECK-NEXT: [[MUL_I19:%.*]] = fadd double [[MUL_AD16]], [[MUL_BC17]] +// CHECK-NEXT: [[ISNAN_CMP20:%.*]] = fcmp uno double [[MUL_R18]], [[MUL_R18]] +// CHECK-NEXT: br i1 [[ISNAN_CMP20]], label %[[COMPLEX_MUL_IMAG_NAN21:.*]], label %[[COMPLEX_MUL_CONT25:.*]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_IMAG_NAN21]]: +// CHECK-NEXT: [[ISNAN_CMP22:%.*]] = fcmp uno double [[MUL_I19]], [[MUL_I19]] +// CHECK-NEXT: br i1 [[ISNAN_CMP22]], label %[[COMPLEX_MUL_LIBCALL23:.*]], label %[[COMPLEX_MUL_CONT25]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_LIBCALL23]]: +// CHECK-NEXT: [[CALL24:%.*]] = call { double, double } @__muldc3(double noundef [[B_REAL7]], double noundef [[B_IMAG9]], double noundef [[A_REAL11]], double noundef [[A_IMAG13]]) #[[ATTR4]] +// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { double, double } [[CALL24]], 0 +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { double, double } [[CALL24]], 1 +// CHECK-NEXT: br label %[[COMPLEX_MUL_CONT25]] +// CHECK: [[COMPLEX_MUL_CONT25]]: +// CHECK-NEXT: [[REAL_MUL_PHI26:%.*]] = phi double [ [[MUL_R18]], %[[COMPLEX_MUL_CONT]] ], [ [[MUL_R18]], %[[COMPLEX_MUL_IMAG_NAN21]] ], [ [[TMP2]], %[[COMPLEX_MUL_LIBCALL23]] ] +// CHECK-NEXT: [[IMAG_MUL_PHI27:%.*]] = phi double [ [[MUL_I19]], %[[COMPLEX_MUL_CONT]] ], [ [[MUL_I19]], %[[COMPLEX_MUL_IMAG_NAN21]] ], [ [[TMP3]], %[[COMPLEX_MUL_LIBCALL23]] ] +// CHECK-NEXT: [[CMP_R:%.*]] = fcmp une double [[REAL_MUL_PHI]], [[REAL_MUL_PHI26]] +// CHECK-NEXT: [[CMP_I:%.*]] = fcmp une double [[IMAG_MUL_PHI]], [[IMAG_MUL_PHI27]] +// CHECK-NEXT: [[OR_RI:%.*]] = or i1 [[CMP_R]], [[CMP_I]] +// CHECK-NEXT: [[CONV:%.*]] = zext i1 [[OR_RI]] to i32 +// CHECK-NEXT: ret i32 [[CONV]] +// int main(void) { double _Complex a = 5; @@ -12,6 +88,36 @@ _Complex double bar(int); void test(_Complex double*); void takecomplex(_Complex double); +// CHECK-LABEL: define dso_local void @test2( +// CHECK-SAME: i32 noundef [[C:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[X:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[COERCE:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 +// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @bar(i32 noundef 1) +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0 +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1 +// CHECK-NEXT: [[X_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 0 +// CHECK-NEXT: [[X_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: store double [[TMP0]], ptr [[X_REALP]], align 8 +// CHECK-NEXT: store double [[TMP1]], ptr [[X_IMAGP]], align 8 +// CHECK-NEXT: call void @test(ptr noundef [[X]]) +// CHECK-NEXT: [[X_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 0 +// CHECK-NEXT: [[X_REAL:%.*]] = load double, ptr [[X_REALP1]], align 8 +// CHECK-NEXT: [[X_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: [[X_IMAG:%.*]] = load double, ptr [[X_IMAGP2]], align 8 +// CHECK-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 0 +// CHECK-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 1 +// CHECK-NEXT: store double [[X_REAL]], ptr [[COERCE_REALP]], align 8 +// CHECK-NEXT: store double [[X_IMAG]], ptr [[COERCE_IMAGP]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 0 +// CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 1 +// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP4]], align 8 +// CHECK-NEXT: call void @takecomplex(double noundef [[TMP3]], double noundef [[TMP5]]) +// CHECK-NEXT: ret void +// void test2(int c) { _Complex double X; X = bar(1); @@ -23,6 +129,104 @@ _Complex double g1, g2; _Complex float cf; double D; +// CHECK-LABEL: define dso_local void @test3( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*]]: +// CHECK-NEXT: [[GR:%.*]] = alloca double, align 8 +// CHECK-NEXT: [[G1_REAL:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G2_REAL:%.*]] = load double, ptr @g2, align 8 +// CHECK-NEXT: [[G2_IMAG:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g2, i32 0, i32 1), align 8 +// CHECK-NEXT: [[ADD_R:%.*]] = fadd double [[G1_REAL]], [[G2_REAL]] +// CHECK-NEXT: [[ADD_I:%.*]] = fadd double [[G1_IMAG]], [[G2_IMAG]] +// CHECK-NEXT: store double [[ADD_R]], ptr @g1, align 8 +// CHECK-NEXT: store double [[ADD_I]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL1:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG2:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G2_REAL3:%.*]] = load double, ptr @g2, align 8 +// CHECK-NEXT: [[G2_IMAG4:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g2, i32 0, i32 1), align 8 +// CHECK-NEXT: [[SUB_R:%.*]] = fsub double [[G1_REAL1]], [[G2_REAL3]] +// CHECK-NEXT: [[SUB_I:%.*]] = fsub double [[G1_IMAG2]], [[G2_IMAG4]] +// CHECK-NEXT: store double [[SUB_R]], ptr @g1, align 8 +// CHECK-NEXT: store double [[SUB_I]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL5:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG6:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G2_REAL7:%.*]] = load double, ptr @g2, align 8 +// CHECK-NEXT: [[G2_IMAG8:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g2, i32 0, i32 1), align 8 +// CHECK-NEXT: [[MUL_AC:%.*]] = fmul double [[G1_REAL5]], [[G2_REAL7]] +// CHECK-NEXT: [[MUL_BD:%.*]] = fmul double [[G1_IMAG6]], [[G2_IMAG8]] +// CHECK-NEXT: [[MUL_AD:%.*]] = fmul double [[G1_REAL5]], [[G2_IMAG8]] +// CHECK-NEXT: [[MUL_BC:%.*]] = fmul double [[G1_IMAG6]], [[G2_REAL7]] +// CHECK-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]] +// CHECK-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]] +// CHECK-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno double [[MUL_R]], [[MUL_R]] +// CHECK-NEXT: br i1 [[ISNAN_CMP]], label %[[COMPLEX_MUL_IMAG_NAN:.*]], label %[[COMPLEX_MUL_CONT:.*]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_IMAG_NAN]]: +// CHECK-NEXT: [[ISNAN_CMP9:%.*]] = fcmp uno double [[MUL_I]], [[MUL_I]] +// CHECK-NEXT: br i1 [[ISNAN_CMP9]], label %[[COMPLEX_MUL_LIBCALL:.*]], label %[[COMPLEX_MUL_CONT]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_LIBCALL]]: +// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @__muldc3(double noundef [[G1_REAL5]], double noundef [[G1_IMAG6]], double noundef [[G2_REAL7]], double noundef [[G2_IMAG8]]) #[[ATTR4]] +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0 +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1 +// CHECK-NEXT: br label %[[COMPLEX_MUL_CONT]] +// CHECK: [[COMPLEX_MUL_CONT]]: +// CHECK-NEXT: [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], %[[ENTRY]] ], [ [[MUL_R]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], %[[ENTRY]] ], [ [[MUL_I]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: store double [[REAL_MUL_PHI]], ptr @g1, align 8 +// CHECK-NEXT: store double [[IMAG_MUL_PHI]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL10:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG11:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[CONJ_I:%.*]] = fneg double [[G1_IMAG11]] +// CHECK-NEXT: [[NEG_R:%.*]] = fneg double [[G1_REAL10]] +// CHECK-NEXT: [[NEG_I:%.*]] = fneg double [[CONJ_I]] +// CHECK-NEXT: store double [[NEG_R]], ptr @g1, align 8 +// CHECK-NEXT: store double [[NEG_I]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: store double [[TMP2]], ptr [[GR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[CF_REAL:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV:%.*]] = fpext float [[CF_REAL]] to double +// CHECK-NEXT: [[CONV12:%.*]] = fpext float [[CF_IMAG]] to double +// CHECK-NEXT: [[ADD_R13:%.*]] = fadd double [[CONV]], [[TMP3]] +// CHECK-NEXT: [[CONV14:%.*]] = fptrunc double [[ADD_R13]] to float +// CHECK-NEXT: [[CONV15:%.*]] = fptrunc double [[CONV12]] to float +// CHECK-NEXT: store float [[CONV14]], ptr @cf, align 4 +// CHECK-NEXT: store float [[CONV15]], ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CF_REAL16:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG17:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV18:%.*]] = fpext float [[CF_REAL16]] to double +// CHECK-NEXT: [[CONV19:%.*]] = fpext float [[CF_IMAG17]] to double +// CHECK-NEXT: [[TMP4:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[ADD_R20:%.*]] = fadd double [[TMP4]], [[CONV18]] +// CHECK-NEXT: store double [[ADD_R20]], ptr @D, align 8 +// CHECK-NEXT: [[G1_REAL21:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG22:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[CF_REAL23:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG24:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV25:%.*]] = fpext float [[CF_REAL23]] to double +// CHECK-NEXT: [[CONV26:%.*]] = fpext float [[CF_IMAG24]] to double +// CHECK-NEXT: [[CALL27:%.*]] = call { double, double } @__divdc3(double noundef [[CONV25]], double noundef [[CONV26]], double noundef [[G1_REAL21]], double noundef [[G1_IMAG22]]) #[[ATTR4]] +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { double, double } [[CALL27]], 0 +// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { double, double } [[CALL27]], 1 +// CHECK-NEXT: [[CONV28:%.*]] = fptrunc double [[TMP5]] to float +// CHECK-NEXT: [[CONV29:%.*]] = fptrunc double [[TMP6]] to float +// CHECK-NEXT: store float [[CONV28]], ptr @cf, align 4 +// CHECK-NEXT: store float [[CONV29]], ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[G1_REAL30:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG31:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[TMP7:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[ADD_R32:%.*]] = fadd double [[G1_REAL30]], [[TMP7]] +// CHECK-NEXT: store double [[ADD_R32]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG31]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[TMP8:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[G1_REAL33:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG34:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[ADD_R35:%.*]] = fadd double [[TMP8]], [[G1_REAL33]] +// CHECK-NEXT: store double [[ADD_R35]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG34]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: ret void +// void test3(void) { g1 = g1 + g2; g1 = g1 - g2; @@ -41,6 +245,101 @@ void test3(void) { __complex__ int ci1, ci2; __complex__ short cs; int i; +// CHECK-LABEL: define dso_local void @test3int( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[CI1_REAL:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI2_REAL:%.*]] = load i32, ptr @ci2, align 4 +// CHECK-NEXT: [[CI2_IMAG:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci2, i32 0, i32 1), align 4 +// CHECK-NEXT: [[ADD_R:%.*]] = add i32 [[CI1_REAL]], [[CI2_REAL]] +// CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[CI1_IMAG]], [[CI2_IMAG]] +// CHECK-NEXT: store i32 [[ADD_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[ADD_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL1:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG2:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI2_REAL3:%.*]] = load i32, ptr @ci2, align 4 +// CHECK-NEXT: [[CI2_IMAG4:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci2, i32 0, i32 1), align 4 +// CHECK-NEXT: [[SUB_R:%.*]] = sub i32 [[CI1_REAL1]], [[CI2_REAL3]] +// CHECK-NEXT: [[SUB_I:%.*]] = sub i32 [[CI1_IMAG2]], [[CI2_IMAG4]] +// CHECK-NEXT: store i32 [[SUB_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[SUB_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL5:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG6:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI2_REAL7:%.*]] = load i32, ptr @ci2, align 4 +// CHECK-NEXT: [[CI2_IMAG8:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci2, i32 0, i32 1), align 4 +// CHECK-NEXT: [[MUL_RL:%.*]] = mul i32 [[CI1_REAL5]], [[CI2_REAL7]] +// CHECK-NEXT: [[MUL_RR:%.*]] = mul i32 [[CI1_IMAG6]], [[CI2_IMAG8]] +// CHECK-NEXT: [[MUL_R:%.*]] = sub i32 [[MUL_RL]], [[MUL_RR]] +// CHECK-NEXT: [[MUL_IL:%.*]] = mul i32 [[CI1_IMAG6]], [[CI2_REAL7]] +// CHECK-NEXT: [[MUL_IR:%.*]] = mul i32 [[CI1_REAL5]], [[CI2_IMAG8]] +// CHECK-NEXT: [[MUL_I:%.*]] = add i32 [[MUL_IL]], [[MUL_IR]] +// CHECK-NEXT: store i32 [[MUL_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[MUL_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL9:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG10:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONJ_I:%.*]] = sub i32 0, [[CI1_IMAG10]] +// CHECK-NEXT: [[NEG_R:%.*]] = sub i32 0, [[CI1_REAL9]] +// CHECK-NEXT: [[NEG_I:%.*]] = sub i32 0, [[CONJ_I]] +// CHECK-NEXT: store i32 [[NEG_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[NEG_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr @i, align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4 +// CHECK-NEXT: [[CS_REAL:%.*]] = load i16, ptr @cs, align 2 +// CHECK-NEXT: [[CS_IMAG:%.*]] = load i16, ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[CS_REAL]] to i32 +// CHECK-NEXT: [[CONV11:%.*]] = sext i16 [[CS_IMAG]] to i32 +// CHECK-NEXT: [[ADD_R12:%.*]] = add i32 [[CONV]], [[TMP1]] +// CHECK-NEXT: [[ADD_I13:%.*]] = add i32 [[CONV11]], 0 +// CHECK-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD_R12]] to i16 +// CHECK-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD_I13]] to i16 +// CHECK-NEXT: store i16 [[CONV14]], ptr @cs, align 2 +// CHECK-NEXT: store i16 [[CONV15]], ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CF_REAL:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV16:%.*]] = fpext float [[CF_REAL]] to double +// CHECK-NEXT: [[CONV17:%.*]] = fpext float [[CF_IMAG]] to double +// CHECK-NEXT: [[TMP2:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[ADD_R18:%.*]] = fadd double [[TMP2]], [[CONV16]] +// CHECK-NEXT: store double [[ADD_R18]], ptr @D, align 8 +// CHECK-NEXT: [[CI1_REAL19:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG20:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CS_REAL21:%.*]] = load i16, ptr @cs, align 2 +// CHECK-NEXT: [[CS_IMAG22:%.*]] = load i16, ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CONV23:%.*]] = sext i16 [[CS_REAL21]] to i32 +// CHECK-NEXT: [[CONV24:%.*]] = sext i16 [[CS_IMAG22]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[CONV23]], [[CI1_REAL19]] +// CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[CONV24]], [[CI1_IMAG20]] +// CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[CI1_REAL19]], [[CI1_REAL19]] +// CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[CI1_IMAG20]], [[CI1_IMAG20]] +// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP6]], [[TMP7]] +// CHECK-NEXT: [[TMP9:%.*]] = mul i32 [[CONV24]], [[CI1_REAL19]] +// CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[CONV23]], [[CI1_IMAG20]] +// CHECK-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]] +// CHECK-NEXT: [[TMP12:%.*]] = sdiv i32 [[TMP5]], [[TMP8]] +// CHECK-NEXT: [[TMP13:%.*]] = sdiv i32 [[TMP11]], [[TMP8]] +// CHECK-NEXT: [[CONV25:%.*]] = trunc i32 [[TMP12]] to i16 +// CHECK-NEXT: [[CONV26:%.*]] = trunc i32 [[TMP13]] to i16 +// CHECK-NEXT: store i16 [[CONV25]], ptr @cs, align 2 +// CHECK-NEXT: store i16 [[CONV26]], ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CI1_REAL27:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG28:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr @i, align 4 +// CHECK-NEXT: [[ADD_R29:%.*]] = add i32 [[CI1_REAL27]], [[TMP14]] +// CHECK-NEXT: [[ADD_I30:%.*]] = add i32 [[CI1_IMAG28]], 0 +// CHECK-NEXT: store i32 [[ADD_R29]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[ADD_I30]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr @i, align 4 +// CHECK-NEXT: [[CI1_REAL31:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG32:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[ADD_R33:%.*]] = add i32 [[TMP15]], [[CI1_REAL31]] +// CHECK-NEXT: [[ADD_I34:%.*]] = add i32 0, [[CI1_IMAG32]] +// CHECK-NEXT: store i32 [[ADD_R33]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[ADD_I34]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: ret void +// void test3int(void) { ci1 = ci1 + ci2; ci1 = ci1 - ci2; @@ -56,15 +355,37 @@ void test3int(void) { ci1 = i + ci1; } +// CHECK-LABEL: define dso_local void @t1( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: store float 4.000000e+00, ptr @cf, align 4 +// CHECK-NEXT: ret void +// void t1(void) { (__real__ cf) = 4.0; } +// CHECK-LABEL: define dso_local void @t2( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: store float 4.000000e+00, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: ret void +// void t2(void) { (__imag__ cf) = 4.0; } // PR1960 +// CHECK-LABEL: define dso_local void @t3( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[V:%.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: [[V_REALP:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 0 +// CHECK-NEXT: [[V_IMAGP:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 1 +// CHECK-NEXT: store i64 2, ptr [[V_REALP]], align 8 +// CHECK-NEXT: store i64 0, ptr [[V_IMAGP]], align 8 +// CHECK-NEXT: ret void +// void t3(void) { __complex__ long long v = 2; } @@ -72,10 +393,72 @@ void t3(void) { // PR3131 float _Complex t4(void); +// CHECK-LABEL: define dso_local void @t5( +// CHECK-SAME: ) #[[ATTR2:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[X:%.*]] = alloca { float, float }, align 4 +// CHECK-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4 +// CHECK-NEXT: [[CALL:%.*]] = call <2 x float> @t4() +// CHECK-NEXT: store <2 x float> [[CALL]], ptr [[COERCE]], align 4 +// CHECK-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0 +// CHECK-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4 +// CHECK-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1 +// CHECK-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4 +// CHECK-NEXT: [[X_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[X]], i32 0, i32 0 +// CHECK-NEXT: [[X_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: store float [[COERCE_REAL]], ptr [[X_REALP]], align 4 +// CHECK-NEXT: store float [[COERCE_IMAG]], ptr [[X_IMAGP]], align 4 +// CHECK-NEXT: ret void +// void t5(void) { float _Complex x = t4(); } +// CHECK-LABEL: define dso_local void @t6( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[G1_REAL:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[INC:%.*]] = fadd double [[G1_REAL]], 1.000000e+00 +// CHECK-NEXT: store double [[INC]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL1:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG2:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[DEC:%.*]] = fadd double [[G1_REAL1]], -1.000000e+00 +// CHECK-NEXT: store double [[DEC]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG2]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL3:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG4:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[INC5:%.*]] = fadd double [[G1_REAL3]], 1.000000e+00 +// CHECK-NEXT: store double [[INC5]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG4]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL6:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG7:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[DEC8:%.*]] = fadd double [[G1_REAL6]], -1.000000e+00 +// CHECK-NEXT: store double [[DEC8]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG7]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[CI1_REAL:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[INC9:%.*]] = add i32 [[CI1_REAL]], 1 +// CHECK-NEXT: store i32 [[INC9]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL10:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG11:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[DEC12:%.*]] = add i32 [[CI1_REAL10]], -1 +// CHECK-NEXT: store i32 [[DEC12]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG11]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL13:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG14:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[INC15:%.*]] = add i32 [[CI1_REAL13]], 1 +// CHECK-NEXT: store i32 [[INC15]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG14]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL16:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG17:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[DEC18:%.*]] = add i32 [[CI1_REAL16]], -1 +// CHECK-NEXT: store i32 [[DEC18]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG17]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: ret void +// void t6(void) { g1++; g1--; @@ -87,18 +470,68 @@ void t6(void) { --ci1; } +// CHECK-LABEL: define dso_local double @t7( +// CHECK-SAME: double noundef [[C_COERCE0:%.*]], double noundef [[C_COERCE1:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[C]], i32 0, i32 0 +// CHECK-NEXT: store double [[C_COERCE0]], ptr [[TMP0]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[C]], i32 0, i32 1 +// CHECK-NEXT: store double [[C_COERCE1]], ptr [[TMP1]], align 8 +// CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[C]], i32 0, i32 0 +// CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[C_REALP]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = call double @llvm.fabs.f64(double [[TMP2]]) +// CHECK-NEXT: ret double [[TMP3]] +// double t7(double _Complex c) { return __builtin_fabs(__real__(c)); } +// CHECK-LABEL: define dso_local void @t8( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca { i32, i32 }, align 4 +// CHECK-NEXT: [[DOTCOMPOUNDLITERAL_REALP:%.*]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0 +// CHECK-NEXT: [[DOTCOMPOUNDLITERAL_IMAGP:%.*]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 1 +// CHECK-NEXT: store i32 1, ptr [[DOTCOMPOUNDLITERAL_REALP]], align 4 +// CHECK-NEXT: store i32 0, ptr [[DOTCOMPOUNDLITERAL_IMAGP]], align 4 +// CHECK-NEXT: store ptr [[DOTCOMPOUNDLITERAL]], ptr [[X]], align 8 +// CHECK-NEXT: ret void +// void t8(void) { __complex__ int *x = &(__complex__ int){1}; } const _Complex double test9const = 0; +// CHECK-LABEL: define dso_local { double, double } @test9func( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1 +// CHECK-NEXT: store double 0.000000e+00, ptr [[RETVAL_REALP]], align 8 +// CHECK-NEXT: store double 0.000000e+00, ptr [[RETVAL_IMAGP]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load { double, double }, ptr [[RETVAL]], align 8 +// CHECK-NEXT: ret { double, double } [[TMP0]] +// _Complex double test9func(void) { return test9const; } // D6217 +// CHECK-LABEL: define dso_local void @t91( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca [0 x i8], align 1 +// CHECK-NEXT: br i1 false, label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK: [[COND_TRUE]]: +// CHECK-NEXT: br label %[[COND_END:.*]] +// CHECK: [[COND_FALSE]]: +// CHECK-NEXT: br label %[[COND_END]] +// CHECK: [[COND_END]]: +// CHECK-NEXT: [[COND_R:%.*]] = phi double [ 2.000000e+00, %[[COND_TRUE]] ], [ 2.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: [[COND_I:%.*]] = phi double [ 0.000000e+00, %[[COND_TRUE]] ], [ 0.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: ret void +// void t91(void) { // Check for proper type promotion of conditional expression char c[(int)(sizeof(typeof((0 ? 2.0f : (_Complex double) 2.0f))) - sizeof(_Complex double))]; @@ -106,6 +539,20 @@ void t91(void) { (0 ? 2.0f : (_Complex double) 2.0f); } +// CHECK-LABEL: define dso_local void @t92( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca [0 x i8], align 1 +// CHECK-NEXT: br i1 false, label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK: [[COND_TRUE]]: +// CHECK-NEXT: br label %[[COND_END:.*]] +// CHECK: [[COND_FALSE]]: +// CHECK-NEXT: br label %[[COND_END]] +// CHECK: [[COND_END]]: +// CHECK-NEXT: [[COND_R:%.*]] = phi double [ 2.000000e+00, %[[COND_TRUE]] ], [ 2.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: [[COND_I:%.*]] = phi double [ 0.000000e+00, %[[COND_TRUE]] ], [ 0.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: ret void +// void t92(void) { // Check for proper type promotion of conditional expression char c[(int)(sizeof(typeof((0 ? (_Complex double) 2.0f : 2.0f))) - sizeof(_Complex double))]; @@ -113,3 +560,36 @@ void t92(void) { (0 ? (_Complex double) 2.0f : 2.0f); } +// CHECK-LABEL: define dso_local void @real_on_scalar_with_type_promotion( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 +// CHECK-NEXT: [[B:%.*]] = alloca half, align 2 +// CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 +// CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float +// CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half +// CHECK-NEXT: store half [[UNPROMOTION]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void real_on_scalar_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__real__ a); +} + +// CHECK-LABEL: define dso_local void @imag_on_scalar_with_type_promotion( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 +// CHECK-NEXT: [[B:%.*]] = alloca half, align 2 +// CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 +// CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_IMAG]] to float +// CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half +// CHECK-NEXT: store half [[UNPROMOTION]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void imag_on_scalar_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__imag__ a); +} diff --git a/clang/test/CodeGen/sparcv9-abi.c b/clang/test/CodeGen/sparcv9-abi.c index 5a3d64fd37889..d3f79fd7989fc 100644 --- a/clang/test/CodeGen/sparcv9-abi.c +++ b/clang/test/CodeGen/sparcv9-abi.c @@ -25,12 +25,35 @@ long double f_ld(long double x) { return x; } struct empty {}; struct emptyarr { struct empty a[10]; }; +// In 16-byte structs, 16-byte aligned members are expanded +// to their corresponding i128/f128 types. +struct align16_int { _Alignas(16) int x; }; +struct align16_mixed { _Alignas(16) int x; double y; }; +struct align16_longdouble { long double x; }; + // CHECK-LABEL: define{{.*}} i64 @f_empty(i64 %x.coerce) struct empty f_empty(struct empty x) { return x; } // CHECK-LABEL: define{{.*}} i64 @f_emptyarr(i64 %x.coerce) struct empty f_emptyarr(struct emptyarr x) { return x.a[0]; } +// CHECK-LABEL: define{{.*}} void @f_aligncaller(i64 %a.coerce0, i64 %a.coerce1) +// CHECK-LABEL: declare{{.*}} void @f_aligncallee(i32 noundef signext, i64, i64, i64) +void f_aligncallee(int pad, struct align16_int a); +void f_aligncaller(struct align16_int a) { + f_aligncallee(0, a); +} + +// CHECK-LABEL: define{{.*}} double @f_mixed_aligned(i64 noundef %a, i64 %0, i64 %b.coerce0, double %b.coerce1) +double f_mixed_aligned(long a, struct align16_mixed b) { + return b.y; +} + +// CHECK-LABEL: define{{.*}} fp128 @f_longdouble(i64 noundef %a, i64 %0, fp128 %b.coerce) +long double f_longdouble(long a, struct align16_longdouble b) { + return b.x; +} + // CHECK-LABEL: define{{.*}} i64 @f_emptyvar(i32 noundef zeroext %count, ...) long f_emptyvar(unsigned count, ...) { long ret; @@ -80,6 +103,11 @@ struct medium { int *c, *d; }; +struct medium_aligned { + _Alignas(16) int *a; + int *b, *c, *d; +}; + // CHECK-LABEL: define{{.*}} %struct.medium @f_medium(ptr dead_on_return noundef %x) struct medium f_medium(struct medium x) { x.a += *x.b; @@ -87,6 +115,13 @@ struct medium f_medium(struct medium x) { return x; } +// CHECK-LABEL: define{{.*}} %struct.medium_aligned @f_medium_aligned(ptr dead_on_return noundef %x) +struct medium_aligned f_medium_aligned(struct medium_aligned x) { + x.a += *x.b; + x.b = 0; + return x; +} + // Large structs are also returned indirectly. struct large { int *a, *b; @@ -101,6 +136,15 @@ struct large f_large(struct large x) { return x; } +// Large returns are converted into a pointer argument. +// Such conversion should preserve the alignment of overaligned arguments. +// define{{.*}} void @f_largereturn_aligned(ptr dead_on_unwind noalias writable sret(%struct.large) align 8 %agg.result, i64 %0, i64 %x.coerce0, i64 %x.coerce1) +struct large f_largereturn_aligned(struct align16_int x) { + struct large ret; + ret.x = x.x; + return ret; +} + // A 64-bit struct fits in a register. struct reg { int a, b; @@ -215,6 +259,18 @@ int f_variable(char *f, ...) { case 'm': s += *va_arg(ap, struct medium).a; break; + +// CHECK: %[[CUR:[^ ]+]] = load ptr, ptr %ap +// CHECK-DAG: %[[TMP:[^ ]+]] = getelementptr inbounds i8, ptr %[[CUR]], i32 15 +// CHECK-DAG: %[[ALIGNED:[^ ]+]] = call ptr @llvm.ptrmask.p0.i64(ptr %[[TMP]], i64 -16) +// CHECK-DAG: %[[NXT:[^ ]+]] = getelementptr inbounds i8, ptr %[[ALIGNED]], i64 16 +// CHECK-DAG: store ptr %[[NXT]], ptr %ap +// CHECK-DAG: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{.*}}, ptr align 16 %[[ALIGNED]], i64 16, i1 false) +// CHECK: br + case 'a': + s += va_arg(ap, struct align16_int).x; + break; } + va_end(ap); return s; } diff --git a/clang/test/CodeGen/struct-passing.c b/clang/test/CodeGen/struct-passing.c index c8cfeb9c8168a..ba96798dc51ef 100644 --- a/clang/test/CodeGen/struct-passing.c +++ b/clang/test/CodeGen/struct-passing.c @@ -11,17 +11,25 @@ T0 __attribute__((const)) f0(void); T0 __attribute__((pure)) f1(void); T1 __attribute__((const)) f2(void); T1 __attribute__((pure)) f3(void); -void __attribute__((const)) f4(T1 a); -void __attribute__((pure)) f5(T1 a); +int __attribute__((const)) f4(T1 a); +int __attribute__((pure)) f5(T1 a); -void *ps[] = { f0, f1, f2, f3, f4, f5 }; +// NOTE: The int parameters verifies non-ptr parameters are not a problem +T1 __attribute__((const)) f6(void*, int); +T1 __attribute__((pure)) f7(void*, int); + +void *ps[] = { f0, f1, f2, f3, f4, f5, f6, f7 }; // CHECK: declare i32 @f0() [[RN:#[0-9]+]] // CHECK: declare i32 @f1() [[RO:#[0-9]+]] -// CHECK: declare void @f2({{.*}} sret({{.*}}) align 4) -// CHECK: declare void @f3({{.*}} sret({{.*}}) align 4) -// CHECK: declare void @f4({{.*}} byval({{.*}}) align 4) -// CHECK: declare void @f5({{.*}} byval({{.*}}) align 4) +// CHECK: declare void @f2(ptr {{[^,]*}} sret({{[^)]*}}) align 4) [[RNRW:#[0-9]+]] +// CHECK: declare void @f3(ptr {{[^,]*}} sret({{[^)]*}}) align 4) [[RORW:#[0-9]+]] +// CHECK: declare i32 @f4(ptr {{[^,]*}} byval({{[^)]*}}) align 4) [[RNRW:#[0-9]+]] +// CHECK: declare i32 @f5(ptr {{[^,]*}} byval({{[^)]*}}) align 4) [[RORW:#[0-9]+]] +// CHECK: declare void @f6(ptr {{[^,]*}} sret({{[^)]*}}) align 4, ptr {{[^,]*}} readnone, i32 {{[^,]*}}) [[RNRW:#[0-9]+]] +// CHECK: declare void @f7(ptr {{[^,]*}} sret({{[^)]*}}) align 4, ptr {{[^,]*}} readonly, i32 {{[^,]*}}) [[RORW:#[0-9]+]] // CHECK: attributes [[RN]] = { nounwind willreturn memory(none){{.*}} } // CHECK: attributes [[RO]] = { nounwind willreturn memory(read){{.*}} } +// CHECK: attributes [[RNRW]] = { nounwind willreturn memory(argmem: readwrite){{.*}} } +// CHECK: attributes [[RORW]] = { nounwind willreturn memory(read, argmem: readwrite){{.*}} } diff --git a/clang/test/CodeGenCXX/amdgpu-image-rsrc-typeinfo.cpp b/clang/test/CodeGenCXX/amdgpu-image-rsrc-typeinfo.cpp new file mode 100644 index 0000000000000..0dbd51774321b --- /dev/null +++ b/clang/test/CodeGenCXX/amdgpu-image-rsrc-typeinfo.cpp @@ -0,0 +1,7 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -triple amdgcn %s -emit-llvm -o - | FileCheck %s +namespace std { class type_info; } +auto &a = typeid(__amdgpu_texture_t); +//// NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +// CHECK: {{.*}} diff --git a/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp b/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp index 5920ceda4a811..137a49beee9a6 100644 --- a/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp +++ b/clang/test/CodeGenCXX/builtin-amdgcn-atomic-inc-dec.cpp @@ -1,7 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: amdgpu-registered-target +// REQUIRES: spirv-registered-target // RUN: %clang_cc1 %s -x hip -fcuda-is-device -emit-llvm -O0 -o - \ -// RUN: -triple=amdgcn-amd-amdhsa | FileCheck %s +// RUN: -triple=amdgcn-amd-amdhsa | FileCheck --check-prefix=GCN %s +// RUN: %clang_cc1 %s -x hip -fcuda-is-device -emit-llvm -O0 -o - \ +// RUN: -triple=spirv64-amd-amdhsa | FileCheck --check-prefix=AMDGCNSPIRV %s // CHECK-LABEL: @_Z29test_non_volatile_parameter32Pj( // CHECK-NEXT: entry: @@ -21,6 +24,43 @@ // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z29test_non_volatile_parameter32Pj( +// GCN-NEXT: entry: +// GCN-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// GCN-NEXT: [[RES:%.*]] = alloca i32, align 4, addrspace(5) +// GCN-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr +// GCN-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr +// GCN-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4:![0-9]+]] +// GCN-NEXT: store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4 +// GCN-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z29test_non_volatile_parameter32Pj( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(4), align 8 +// AMDGCNSPIRV-NEXT: [[RES:%.*]] = alloca i32, align 4 +// AMDGCNSPIRV-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr [[PTR_ADDR]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr [[RES]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: store ptr addrspace(4) [[PTR:%.*]], ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5:![0-9]+]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) [[RES_ASCAST]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspace(4) [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP7]], ptr addrspace(4) [[RES_ASCAST]], align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_non_volatile_parameter32(__UINT32_TYPE__ *ptr) { __UINT32_TYPE__ res; @@ -47,6 +87,43 @@ __attribute__((device)) void test_non_volatile_parameter32(__UINT32_TYPE__ *ptr) // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z29test_non_volatile_parameter64Py( +// GCN-NEXT: entry: +// GCN-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// GCN-NEXT: [[RES:%.*]] = alloca i64, align 8, addrspace(5) +// GCN-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr +// GCN-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr +// GCN-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8 +// GCN-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z29test_non_volatile_parameter64Py( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(4), align 8 +// AMDGCNSPIRV-NEXT: [[RES:%.*]] = alloca i64, align 8 +// AMDGCNSPIRV-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr [[PTR_ADDR]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr [[RES]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: store ptr addrspace(4) [[PTR:%.*]], ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) [[TMP1]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) [[RES_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i64, ptr addrspace(4) [[TMP5]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspace(4) [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP7]], ptr addrspace(4) [[RES_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_non_volatile_parameter64(__UINT64_TYPE__ *ptr) { __UINT64_TYPE__ res; @@ -73,6 +150,43 @@ __attribute__((device)) void test_non_volatile_parameter64(__UINT64_TYPE__ *ptr) // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z25test_volatile_parameter32PVj( +// GCN-NEXT: entry: +// GCN-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// GCN-NEXT: [[RES:%.*]] = alloca i32, align 4, addrspace(5) +// GCN-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr +// GCN-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr +// GCN-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP2:%.*]] = load volatile i32, ptr [[TMP1]], align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4 +// GCN-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP6:%.*]] = load volatile i32, ptr [[TMP5]], align 4 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z25test_volatile_parameter32PVj( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(4), align 8 +// AMDGCNSPIRV-NEXT: [[RES:%.*]] = alloca i32, align 4 +// AMDGCNSPIRV-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr [[PTR_ADDR]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr [[RES]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: store ptr addrspace(4) [[PTR:%.*]], ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load volatile i32, ptr addrspace(4) [[TMP1]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr addrspace(4) [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) [[RES_ASCAST]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load volatile i32, ptr addrspace(4) [[TMP5]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr addrspace(4) [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP7]], ptr addrspace(4) [[RES_ASCAST]], align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_volatile_parameter32(volatile __UINT32_TYPE__ *ptr) { __UINT32_TYPE__ res; @@ -99,6 +213,43 @@ __attribute__((device)) void test_volatile_parameter32(volatile __UINT32_TYPE__ // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z25test_volatile_parameter64PVy( +// GCN-NEXT: entry: +// GCN-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) +// GCN-NEXT: [[RES:%.*]] = alloca i64, align 8, addrspace(5) +// GCN-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr +// GCN-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr +// GCN-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP2:%.*]] = load volatile i64, ptr [[TMP1]], align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8 +// GCN-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8 +// GCN-NEXT: [[TMP6:%.*]] = load volatile i64, ptr [[TMP5]], align 8 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z25test_volatile_parameter64PVy( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[PTR_ADDR:%.*]] = alloca ptr addrspace(4), align 8 +// AMDGCNSPIRV-NEXT: [[RES:%.*]] = alloca i64, align 8 +// AMDGCNSPIRV-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr [[PTR_ADDR]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr [[RES]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: store ptr addrspace(4) [[PTR:%.*]], ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load volatile i64, ptr addrspace(4) [[TMP1]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr addrspace(4) [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) [[RES_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[PTR_ADDR_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load volatile i64, ptr addrspace(4) [[TMP5]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr addrspace(4) [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP7]], ptr addrspace(4) [[RES_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_volatile_parameter64(volatile __UINT64_TYPE__ *ptr) { __UINT64_TYPE__ res; @@ -116,6 +267,25 @@ __attribute__((device)) void test_volatile_parameter64(volatile __UINT64_TYPE__ // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z13test_shared32v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z13test_shared32v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr addrspace(4)), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr addrspace(4)), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_shared32() { __attribute__((shared)) __UINT32_TYPE__ val; @@ -134,6 +304,25 @@ __attribute__((device)) void test_shared32() { // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z13test_shared64v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z13test_shared64v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr addrspace(4)), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr addrspace(4)), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_shared64() { __attribute__((shared)) __UINT64_TYPE__ val; @@ -153,6 +342,25 @@ __attribute__((device)) __UINT32_TYPE__ global_val32; // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z13test_global32v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4 +// GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z13test_global32v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val32 to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val32 to ptr addrspace(4)), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val32 to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val32 to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val32 to ptr addrspace(4)), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val32 to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_global32() { global_val32 = __builtin_amdgcn_atomic_inc32(&global_val32, global_val32, __ATOMIC_SEQ_CST, "workgroup"); @@ -170,6 +378,25 @@ __attribute__((device)) __UINT64_TYPE__ global_val64; // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z13test_global64v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z13test_global64v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val64 to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val64 to ptr addrspace(4)), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val64 to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val64 to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val64 to ptr addrspace(4)), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(1) @global_val64 to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_global64() { global_val64 = __builtin_amdgcn_atomic_inc64(&global_val64, global_val64, __ATOMIC_SEQ_CST, "workgroup"); @@ -189,6 +416,29 @@ __attribute__((constant)) __UINT32_TYPE__ cval32; // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z15test_constant32v( +// GCN-NEXT: entry: +// GCN-NEXT: [[LOCAL_VAL:%.*]] = alloca i32, align 4, addrspace(5) +// GCN-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr +// GCN-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 4 +// GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z15test_constant32v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[LOCAL_VAL:%.*]] = alloca i32, align 4 +// AMDGCNSPIRV-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr [[LOCAL_VAL]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval32 to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval32 to ptr addrspace(4)), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP1]], ptr addrspace(4) [[LOCAL_VAL_ASCAST]], align 4 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval32 to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval32 to ptr addrspace(4)), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) [[LOCAL_VAL_ASCAST]], align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_constant32() { __UINT32_TYPE__ local_val; @@ -210,6 +460,29 @@ __attribute__((constant)) __UINT64_TYPE__ cval64; // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z15test_constant64v( +// GCN-NEXT: entry: +// GCN-NEXT: [[LOCAL_VAL:%.*]] = alloca i64, align 8, addrspace(5) +// GCN-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr +// GCN-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z15test_constant64v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[LOCAL_VAL:%.*]] = alloca i64, align 8 +// AMDGCNSPIRV-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr [[LOCAL_VAL]] to ptr addrspace(4) +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval64 to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval64 to ptr addrspace(4)), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP1]], ptr addrspace(4) [[LOCAL_VAL_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval64 to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(1) @cval64 to ptr addrspace(4)), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) [[LOCAL_VAL_ASCAST]], align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_constant64() { __UINT64_TYPE__ local_val; @@ -240,6 +513,49 @@ __attribute__((device)) void test_constant64() { // CHECK-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z12test_order32v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") acquire, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP4]] syncscope("workgroup") acquire, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP6]] syncscope("workgroup") release, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP8:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP8]] syncscope("workgroup") acq_rel, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP10:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z12test_order32v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), i32 [[TMP0]] syncscope("workgroup") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), i32 [[TMP2]] syncscope("workgroup") acquire, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), i32 [[TMP4]] syncscope("workgroup") acquire, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP5]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), i32 [[TMP6]] syncscope("workgroup") release, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP7]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), i32 [[TMP8]] syncscope("workgroup") acq_rel, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP9]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), i32 [[TMP10]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP11]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_order32() { __attribute__((shared)) __UINT32_TYPE__ val; @@ -278,6 +594,49 @@ __attribute__((device)) void test_order32() { // CHECK-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z12test_order64v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") acquire, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP4]] syncscope("workgroup") acquire, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP6]] syncscope("workgroup") release, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP8:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP8]] syncscope("workgroup") acq_rel, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP10:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z12test_order64v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), i64 [[TMP0]] syncscope("workgroup") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), i64 [[TMP2]] syncscope("workgroup") acquire, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), i64 [[TMP4]] syncscope("workgroup") acquire, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP5]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), i64 [[TMP6]] syncscope("workgroup") release, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP7]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), i64 [[TMP8]] syncscope("workgroup") acq_rel, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP9]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), i64 [[TMP10]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP11]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_order64() { __attribute__((shared)) __UINT64_TYPE__ val; @@ -310,6 +669,37 @@ __attribute__((device)) void test_order64() { // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]] syncscope("wavefront") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z12test_scope32v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP0]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP4]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]] syncscope("wavefront") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z12test_scope32v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), i32 [[TMP0]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), i32 [[TMP4]] syncscope("device") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP5]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), i32 [[TMP6]] syncscope("subgroup") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i32 [[TMP7]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr addrspace(4)), align 4 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_scope32() { __attribute__((shared)) __UINT32_TYPE__ val; @@ -338,6 +728,37 @@ __attribute__((device)) void test_scope32() { // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]] syncscope("wavefront") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] // CHECK-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 // CHECK-NEXT: ret void +// GCN-LABEL: @_Z12test_scope64v( +// GCN-NEXT: entry: +// GCN-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP0]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP4]] syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]] syncscope("wavefront") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META4]] +// GCN-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8 +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: @_Z12test_scope64v( +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), i64 [[TMP0]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP1]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP3]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), i64 [[TMP4]] syncscope("device") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP5]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i64, ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), i64 [[TMP6]] syncscope("subgroup") seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META5]] +// AMDGCNSPIRV-NEXT: store i64 [[TMP7]], ptr addrspace(4) addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr addrspace(4)), align 8 +// AMDGCNSPIRV-NEXT: ret void // __attribute__((device)) void test_scope64() { __attribute__((shared)) __UINT64_TYPE__ val; diff --git a/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp b/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp index 1e977dd6420f4..dd1ca459d68b5 100644 --- a/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp +++ b/clang/test/CodeGenCXX/builtin-amdgcn-fence.cpp @@ -1,7 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4 // REQUIRES: amdgpu-registered-target +// REQUIRES: spirv-registered-target // RUN: %clang_cc1 %s -emit-llvm -O0 -o - \ -// RUN: -triple=amdgcn-amd-amdhsa | FileCheck %s +// RUN: -triple=amdgcn-amd-amdhsa | FileCheck --check-prefix=GCN %s +// RUN: %clang_cc1 %s -emit-llvm -O0 -o - \ +// RUN: -triple=spirv64-amd-amdhsa | FileCheck --check-prefix=AMDGCNSPIRV %s // CHECK-LABEL: define dso_local void @_Z25test_memory_fence_successv( // CHECK-SAME: ) #[[ATTR0:[0-9]+]] { @@ -12,6 +15,25 @@ // CHECK-NEXT: fence syncscope("agent") acq_rel // CHECK-NEXT: fence syncscope("workgroup") release // CHECK-NEXT: ret void +// GCN-LABEL: define dso_local void @_Z25test_memory_fence_successv( +// GCN-SAME: ) #[[ATTR0:[0-9]+]] { +// GCN-NEXT: entry: +// GCN-NEXT: fence syncscope("workgroup") seq_cst +// GCN-NEXT: fence syncscope("agent") acquire +// GCN-NEXT: fence seq_cst +// GCN-NEXT: fence syncscope("agent") acq_rel +// GCN-NEXT: fence syncscope("workgroup") release +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: define spir_func void @_Z25test_memory_fence_successv( +// AMDGCNSPIRV-SAME: ) addrspace(4) #[[ATTR0:[0-9]+]] { +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") seq_cst +// AMDGCNSPIRV-NEXT: fence syncscope("device") acquire +// AMDGCNSPIRV-NEXT: fence seq_cst +// AMDGCNSPIRV-NEXT: fence syncscope("device") acq_rel +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") release +// AMDGCNSPIRV-NEXT: ret void // void test_memory_fence_success() { @@ -35,6 +57,25 @@ void test_memory_fence_success() { // CHECK-NEXT: fence syncscope("agent") acq_rel, !mmra [[META3]] // CHECK-NEXT: fence syncscope("workgroup") release, !mmra [[META3]] // CHECK-NEXT: ret void +// GCN-LABEL: define dso_local void @_Z10test_localv( +// GCN-SAME: ) #[[ATTR0]] { +// GCN-NEXT: entry: +// GCN-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META3:![0-9]+]] +// GCN-NEXT: fence syncscope("agent") acquire, !mmra [[META3]] +// GCN-NEXT: fence seq_cst, !mmra [[META3]] +// GCN-NEXT: fence syncscope("agent") acq_rel, !mmra [[META3]] +// GCN-NEXT: fence syncscope("workgroup") release, !mmra [[META3]] +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: define spir_func void @_Z10test_localv( +// AMDGCNSPIRV-SAME: ) addrspace(4) #[[ATTR0]] { +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META3:![0-9]+]] +// AMDGCNSPIRV-NEXT: fence syncscope("device") acquire, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence seq_cst, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence syncscope("device") acq_rel, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") release, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: ret void // void test_local() { __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local"); @@ -58,6 +99,25 @@ void test_local() { // CHECK-NEXT: fence syncscope("agent") acq_rel, !mmra [[META4]] // CHECK-NEXT: fence syncscope("workgroup") release, !mmra [[META4]] // CHECK-NEXT: ret void +// GCN-LABEL: define dso_local void @_Z11test_globalv( +// GCN-SAME: ) #[[ATTR0]] { +// GCN-NEXT: entry: +// GCN-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META4:![0-9]+]] +// GCN-NEXT: fence syncscope("agent") acquire, !mmra [[META4]] +// GCN-NEXT: fence seq_cst, !mmra [[META4]] +// GCN-NEXT: fence syncscope("agent") acq_rel, !mmra [[META4]] +// GCN-NEXT: fence syncscope("workgroup") release, !mmra [[META4]] +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: define spir_func void @_Z11test_globalv( +// AMDGCNSPIRV-SAME: ) addrspace(4) #[[ATTR0]] { +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META4:![0-9]+]] +// AMDGCNSPIRV-NEXT: fence syncscope("device") acquire, !mmra [[META4]] +// AMDGCNSPIRV-NEXT: fence seq_cst, !mmra [[META4]] +// AMDGCNSPIRV-NEXT: fence syncscope("device") acq_rel, !mmra [[META4]] +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") release, !mmra [[META4]] +// AMDGCNSPIRV-NEXT: ret void // void test_global() { __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "global"); @@ -80,6 +140,25 @@ void test_global() { // CHECK-NEXT: fence syncscope("agent") acq_rel, !mmra [[META3]] // CHECK-NEXT: fence syncscope("workgroup") release, !mmra [[META3]] // CHECK-NEXT: ret void +// GCN-LABEL: define dso_local void @_Z10test_imagev( +// GCN-SAME: ) #[[ATTR0]] { +// GCN-NEXT: entry: +// GCN-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META3]] +// GCN-NEXT: fence syncscope("agent") acquire, !mmra [[META3]] +// GCN-NEXT: fence seq_cst, !mmra [[META3]] +// GCN-NEXT: fence syncscope("agent") acq_rel, !mmra [[META3]] +// GCN-NEXT: fence syncscope("workgroup") release, !mmra [[META3]] +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: define spir_func void @_Z10test_imagev( +// AMDGCNSPIRV-SAME: ) addrspace(4) #[[ATTR0]] { +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence syncscope("device") acquire, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence seq_cst, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence syncscope("device") acq_rel, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") release, !mmra [[META3]] +// AMDGCNSPIRV-NEXT: ret void // void test_image() { __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local"); @@ -99,13 +178,33 @@ void test_image() { // CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5:![0-9]+]] // CHECK-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5]] // CHECK-NEXT: ret void +// GCN-LABEL: define dso_local void @_Z10test_mixedv( +// GCN-SAME: ) #[[ATTR0]] { +// GCN-NEXT: entry: +// GCN-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5:![0-9]+]] +// GCN-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5]] +// GCN-NEXT: ret void +// +// AMDGCNSPIRV-LABEL: define spir_func void @_Z10test_mixedv( +// AMDGCNSPIRV-SAME: ) addrspace(4) #[[ATTR0]] { +// AMDGCNSPIRV-NEXT: entry: +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5:![0-9]+]] +// AMDGCNSPIRV-NEXT: fence syncscope("workgroup") seq_cst, !mmra [[META5]] +// AMDGCNSPIRV-NEXT: ret void // void test_mixed() { __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local", "global"); __builtin_amdgcn_fence( __ATOMIC_SEQ_CST, "workgroup", "local", "local", "global", "local", "local"); } -//. // CHECK: [[META3]] = !{!"amdgpu-synchronize-as", !"local"} // CHECK: [[META4]] = !{!"amdgpu-synchronize-as", !"global"} // CHECK: [[META5]] = !{[[META4]], [[META3]]} //. +// GCN: [[META3]] = !{!"amdgpu-synchronize-as", !"local"} +// GCN: [[META4]] = !{!"amdgpu-synchronize-as", !"global"} +// GCN: [[META5]] = !{[[META4]], [[META3]]} +//. +// AMDGCNSPIRV: [[META3]] = !{!"amdgpu-synchronize-as", !"local"} +// AMDGCNSPIRV: [[META4]] = !{!"amdgpu-synchronize-as", !"global"} +// AMDGCNSPIRV: [[META5]] = !{[[META4]], [[META3]]} +//. diff --git a/clang/test/CodeGenCXX/gh56652.cpp b/clang/test/CodeGenCXX/gh56652.cpp new file mode 100644 index 0000000000000..06a496e320bfc --- /dev/null +++ b/clang/test/CodeGenCXX/gh56652.cpp @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-elf-gnu %s -emit-llvm -o - | FileCheck %s + +namespace GH56652{ + +struct foo {}; + +template struct bar { + using type = T; + + template inline static constexpr auto b = true; +}; + +template +concept C = requires(T a) { T::template b; }; + +template auto fn(T) { + if constexpr (!C) + return foo{}; + else + return T{}; +} + +auto a = decltype(fn(bar{})){}; + +} + +namespace GH116319 { + +template struct a { +template static constexpr auto b = 2; +template static void c() noexcept(noexcept(b)) {} +}; + +void test() { a<>::c(); } + + +} + +// CHECK: %"struct.GH56652::bar" = type { i8 } +// CHECK: $_ZN8GH1163191aILi0EE1cIiEEvv = comdat any +// CHECK: @_ZN7GH566521aE = global %"struct.GH56652::bar" undef diff --git a/clang/test/CodeGenCXX/ppc-mangle-mma-types.cpp b/clang/test/CodeGenCXX/ppc-mangle-mma-types.cpp index 1e213e7f75127..6b792dceba2c6 100644 --- a/clang/test/CodeGenCXX/ppc-mangle-mma-types.cpp +++ b/clang/test/CodeGenCXX/ppc-mangle-mma-types.cpp @@ -7,6 +7,9 @@ // RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr8 %s \ // RUN: -emit-llvm -o - | FileCheck %s +// CHECK: _Z1fPu9__dmr2048 +void f(__dmr2048 *vdmrp) {} + // CHECK: _Z2f0Pu9__dmr1024 void f0(__dmr1024 *vdmr) {} diff --git a/clang/test/CodeGenCoroutines/coro-builtins.c b/clang/test/CodeGenCoroutines/coro-builtins.c index 79f119b2b60ff..0c2553274f09f 100644 --- a/clang/test/CodeGenCoroutines/coro-builtins.c +++ b/clang/test/CodeGenCoroutines/coro-builtins.c @@ -37,7 +37,7 @@ void f(int n) { // CHECK-NEXT: call ptr @llvm.coro.free(token %[[COROID]], ptr %[[FRAME]]) __builtin_coro_free(__builtin_coro_frame()); - // CHECK-NEXT: call i1 @llvm.coro.end(ptr %[[FRAME]], i1 false, token none) + // CHECK-NEXT: call void @llvm.coro.end(ptr %[[FRAME]], i1 false, token none) __builtin_coro_end(__builtin_coro_frame(), 0); // CHECK-NEXT: call i8 @llvm.coro.suspend(token none, i1 true) diff --git a/clang/test/CodeGenCoroutines/coro-eh-cleanup.cpp b/clang/test/CodeGenCoroutines/coro-eh-cleanup.cpp index 725cf8faa6b4c..6b61ccde5728b 100644 --- a/clang/test/CodeGenCoroutines/coro-eh-cleanup.cpp +++ b/clang/test/CodeGenCoroutines/coro-eh-cleanup.cpp @@ -60,7 +60,7 @@ coro_t f() { // CHECK: [[COROENDBB]]: // CHECK-NEXT: %[[CLPAD:.+]] = cleanuppad within none -// CHECK-NEXT: call i1 @llvm.coro.end(ptr null, i1 true, token none) [ "funclet"(token %[[CLPAD]]) ] +// CHECK-NEXT: call void @llvm.coro.end(ptr null, i1 true, token none) [ "funclet"(token %[[CLPAD]]) ] // CHECK-NEXT: cleanupret from %[[CLPAD]] unwind label // CHECK-LPAD: @_Z1fv( @@ -76,8 +76,8 @@ coro_t f() { // CHECK-LPAD: to label %{{.+}} unwind label %[[UNWINDBB:.+]] // CHECK-LPAD: [[UNWINDBB]]: -// CHECK-LPAD: %[[I1RESUME:.+]] = call i1 @llvm.coro.end(ptr null, i1 true, token none) -// CHECK-LPAD: br i1 %[[I1RESUME]], label %[[EHRESUME:.+]], label +// CHECK-LPAD: %[[InRamp:.+]] = call i1 @llvm.coro.is_in_ramp() +// CHECK-LPAD: br i1 %[[InRamp]], label %{{.+}}, label %[[EHRESUME:.+]] // CHECK-LPAD: [[EHRESUME]]: // CHECK-LPAD-NEXT: %[[exn:.+]] = load ptr, ptr %exn.slot, align 8 // CHECK-LPAD-NEXT: %[[sel:.+]] = load i32, ptr %ehselector.slot, align 4 diff --git a/clang/test/CodeGenCoroutines/coro-lambda.cpp b/clang/test/CodeGenCoroutines/coro-lambda.cpp index 26c51070f9e2d..b24a190ab41fb 100644 --- a/clang/test/CodeGenCoroutines/coro-lambda.cpp +++ b/clang/test/CodeGenCoroutines/coro-lambda.cpp @@ -55,4 +55,4 @@ void f() { // CHECK: alloca %"struct.Task::promise_type" // CHECK: call token @llvm.coro.id( // CHECK: call i8 @llvm.coro.suspend( -// CHECK: call i1 @llvm.coro.end( +// CHECK: call void @llvm.coro.end( diff --git a/clang/test/CodeGenCoroutines/coro-params.cpp b/clang/test/CodeGenCoroutines/coro-params.cpp index 719726cca29c5..79e77a21017fa 100644 --- a/clang/test/CodeGenCoroutines/coro-params.cpp +++ b/clang/test/CodeGenCoroutines/coro-params.cpp @@ -117,7 +117,7 @@ void f(int val, MoveOnly moParam, MoveAndCopy mcParam, TrivialABI trivialParam) // CHECK-NEXT: call ptr @llvm.coro.free( // The original trivial_abi parameter is destroyed when returning from the ramp. - // CHECK: call i1 @llvm.coro.end + // CHECK: call void @llvm.coro.end // CHECK: call void @_ZN10TrivialABID1Ev(ptr {{[^,]*}} %[[TrivialAlloca]]) } @@ -242,6 +242,6 @@ void msabi(MSParm p) { co_return; // The local alloca is used for the destructor call at the end of the ramp. - // MSABI: call i1 @llvm.coro.end + // MSABI: call void @llvm.coro.end // MSABI: call void @"??1MSParm@@QEAA@XZ"(ptr{{.*}} %[[ParamAlloca]]) } diff --git a/clang/test/CodeGenDirectX/unsupported_intrinsic.hlsl b/clang/test/CodeGenDirectX/unsupported_intrinsic.hlsl index db91cb8fb789e..fc4b449bebf10 100644 --- a/clang/test/CodeGenDirectX/unsupported_intrinsic.hlsl +++ b/clang/test/CodeGenDirectX/unsupported_intrinsic.hlsl @@ -1,5 +1,6 @@ // REQUIRES: directx-registered-target -// RUN: not %clang_dxc -T lib_6_3 %s 2>&1 | FileCheck %s +// RUN: not %clang_cc1 -triple dxilv1.3-unknown-shadermodel6.3-library \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s // CHECK: error: Unsupported intrinsic llvm.vector.reduce.and.v4i32 for DXIL lowering diff --git a/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip b/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip index 2342fcefb5f89..e92105091712c 100644 --- a/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip +++ b/clang/test/CodeGenHIP/builtins-make-buffer-rsrc.hip @@ -24,8 +24,9 @@ // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4 +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64 // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP4:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]]) +// CHECK-NEXT: [[TMP4:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i64 [[CONV]], i32 [[TMP3]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP4]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, int num, int flags) { @@ -48,8 +49,9 @@ __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short // CHECK-NEXT: store i32 [[FLAGS]], ptr [[FLAGS_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4 +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP1]] to i64 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 4, i32 [[TMP1]], i32 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 4, i64 [[CONV]], i32 [[TMP2]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP3]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) { @@ -73,7 +75,7 @@ __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constan // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[FLAGS_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 1234, i32 [[TMP2]]) +// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i64 1234, i32 [[TMP2]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP3]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) { @@ -97,7 +99,8 @@ __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(v // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[STRIDE_ADDR_ASCAST]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[NUM_ADDR_ASCAST]], align 4 -// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i32 [[TMP2]], i32 5678) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64 +// CHECK-NEXT: [[TMP3:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[TMP0]], i16 [[TMP1]], i64 [[CONV]], i32 5678) // CHECK-NEXT: ret ptr addrspace(8) [[TMP3]] // __device__ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) { diff --git a/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl b/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl index c30c640519cda..7e83e5f168538 100644 --- a/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl +++ b/clang/test/CodeGenHLSL/BasicFeatures/InitLists.hlsl @@ -45,6 +45,27 @@ struct SlicyBits { int W : 8; }; +struct Unnamed { + int A; + int : 8; +}; + +struct Empty { +}; + +struct UnnamedOnly { + int : 8; +}; + +struct EmptyDerived : Empty {}; + +struct UnnamedDerived : UnnamedOnly {}; + +// CHECK-DAG: [[ConstE:@.*]] = private unnamed_addr constant %struct.Empty undef, align 1 +// CHECK-DAG: [[ConstUO:@.*]] = private unnamed_addr constant %struct.UnnamedOnly undef, align 1 +// CHECK-DAG: [[ConstED:@.*]] = private unnamed_addr constant %struct.EmptyDerived undef, align 1 +// CHECK-DAG: [[ConstUD:@.*]] = private unnamed_addr constant %struct.UnnamedDerived undef, align 1 + // Case 1: Extraneous braces get ignored in literal instantiation. // CHECK-LABEL: define hidden void @_Z5case1v( // CHECK-SAME: ptr dead_on_unwind noalias writable sret([[STRUCT_TWOFLOATS:%.*]]) align 1 [[AGG_RESULT:%.*]]) #[[ATTR0:[0-9]+]] { @@ -959,3 +980,78 @@ int case17Helper(int x) { void case17() { int2 X = {case17Helper(0), case17Helper(1)}; } + +// InitList with Struct with unnamed bitfield on LHS +// CHECK-LABEL: case18 +// CHECK: [[U:%.*]] = alloca %struct.Unnamed, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[U]], ptr align 1 {{.*}}, i32 5, i1 false) +void case18() { + Unnamed U = {1}; +} + +// InitList with Struct with unnamed bitfield on RHS +// CHECK-LABEL: case19 +// CHECK: [[TI:%.*]] = alloca %struct.TwoInts, align 1 +// CHECK-NEXT: [[Z:%.*]] = getelementptr inbounds nuw %struct.TwoInts, ptr [[TI]], i32 0, i32 0 +// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds nuw %struct.Unnamed, ptr %U, i32 0, i32 0 +// CHECK-NEXT: [[L:%.*]] = load i32, ptr [[A]], align 1 +// CHECK-NEXT: store i32 [[L]], ptr [[Z]], align 1 +// CHECK-NEXT: [[W:%.*]] = getelementptr inbounds nuw %struct.TwoInts, ptr [[TI]], i32 0, i32 1 +// CHECK-NEXT: store i32 1, ptr [[W]], align 1 +void case19(Unnamed U) { + TwoInts TI = {U, 1}; +} + +// InitList with Empty Struct on LHS +// CHECK-LABEL: case20 +// CHECK: [[E:%.*]] = alloca %struct.Empty, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[E]], ptr align 1 [[ConstE]], i32 1, i1 false) +void case20() { + Empty E = {}; +} + +// InitList with Empty Struct on RHS +// CHECK-LABEL: case21 +// CHECK: [[TI:%.*]] = alloca %struct.TwoInts, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %TI, ptr align 1 {{.*}}, i32 8, i1 false) +void case21(Empty E) { + TwoInts TI = {E, 1, 2}; +} + +// InitList with Struct with only unnamed bitfield on LHS +// CHECK-LABEL: case22 +// CHECK: [[UO:%.*]] = alloca %struct.UnnamedOnly, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[UO]], ptr align 1 [[ConstUO]], i32 1, i1 false) +void case22() { + UnnamedOnly UO = {}; +} + +// InitList with Struct with only unnamed bitfield on RHS +// CHECK-LABEL: case23 +// CHECK: [[TI:%.*]] = alloca %struct.TwoInts, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[TI]], ptr align 1 {{.*}}, i32 8, i1 false) +void case23(UnnamedOnly UO) { + TwoInts TI = {UO, 1, 2}; +} + +// InitList with Derived empty struct on LHS +// InitList with Derived unnamed bitfield on LHS +// CHECK-LABEL: case24 +// CHECK: [[ED:%.*]] = alloca %struct.EmptyDerived, align 1 +// CHECK-NEXT: [[UD:%.*]] = alloca %struct.UnnamedDerived, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %ED, ptr align 1 [[ConstED]], i32 1, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %UD, ptr align 1 [[ConstUD]], i32 1, i1 false) +void case24() { + EmptyDerived ED = {}; + UnnamedDerived UD = {}; +} + +// CHECK-LABEL: case25 +// CHECK: [[TI1:%.*]] = alloca %struct.TwoInts, align 1 +// CHECK-NEXT: [[TI2:%.*]] = alloca %struct.TwoInts, align 1 +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %TI1, ptr align 1 {{.*}}, i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %TI2, ptr align 1 {{.*}}, i32 8, i1 false) +void case25(EmptyDerived ED, UnnamedDerived UD) { + TwoInts TI1 = {ED, 1, 2}; + TwoInts TI2 = {UD, 1, 2}; +} diff --git a/clang/test/CodeGenHLSL/RootSignature.hlsl b/clang/test/CodeGenHLSL/RootSignature.hlsl index bc40bdd79ce59..eaff3a9e73305 100644 --- a/clang/test/CodeGenHLSL/RootSignature.hlsl +++ b/clang/test/CodeGenHLSL/RootSignature.hlsl @@ -82,8 +82,8 @@ void RootDescriptorsEntry() {} // checking minLOD, maxLOD // CHECK-SAME: float -1.280000e+02, float 1.280000e+02, -// checking register, space and visibility -// CHECK-SAME: i32 42, i32 0, i32 0} +// checking register, space, visibility and flag +// CHECK-SAME: i32 42, i32 0, i32 0, i32 1} #define SampleStaticSampler \ "StaticSampler(s42, " \ @@ -96,6 +96,7 @@ void RootDescriptorsEntry() {} " borderColor = STATIC_BORDER_COLOR_OPAQUE_WHITE, " \ " minLOD = -128.f, maxLOD = 128.f, " \ " space = 0, visibility = SHADER_VISIBILITY_ALL, " \ + " flags = UINT_BORDER_COLOR" \ ")" [shader("compute"), RootSignature(SampleStaticSampler)] [numthreads(1,1,1)] diff --git a/clang/test/CodeGenHLSL/builtins/isnan-overloads.hlsl b/clang/test/CodeGenHLSL/builtins/isnan-overloads.hlsl new file mode 100644 index 0000000000000..a0c3eee5da636 --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/isnan-overloads.hlsl @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -std=hlsl202x -finclude-default-header -x hlsl -triple \ +// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \ +// RUN: -o - | FileCheck %s + +// CHECK: define hidden noundef i1 @ +// CHECK: %hlsl.isnan = call i1 @llvm.dx.isnan.f32( +// CHECK: ret i1 %hlsl.isnan +bool test_isnan_double(double p0) { return isnan(p0); } +// CHECK: define hidden noundef <2 x i1> @ +// CHECK: %hlsl.isnan = call <2 x i1> @llvm.dx.isnan.v2f32 +// CHECK: ret <2 x i1> %hlsl.isnan +bool2 test_isnan_double2(double2 p0) { return isnan(p0); } +// CHECK: define hidden noundef <3 x i1> @ +// CHECK: %hlsl.isnan = call <3 x i1> @llvm.dx.isnan.v3f32 +// CHECK: ret <3 x i1> %hlsl.isnan +bool3 test_isnan_double3(double3 p0) { return isnan(p0); } +// CHECK: define hidden noundef <4 x i1> @ +// CHECK: %hlsl.isnan = call <4 x i1> @llvm.dx.isnan.v4f32 +// CHECK: ret <4 x i1> %hlsl.isnan +bool4 test_isnan_double4(double4 p0) { return isnan(p0); } diff --git a/clang/test/CodeGenHLSL/builtins/isnan.hlsl b/clang/test/CodeGenHLSL/builtins/isnan.hlsl new file mode 100644 index 0000000000000..ce7dbe1aedea4 --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/isnan.hlsl @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \ +// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \ +// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \ +// RUN: --check-prefixes=CHECK,DXCHECK,NATIVE_HALF +// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \ +// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \ +// RUN: -o - | FileCheck %s --check-prefixes=CHECK,DXCHECK,NO_HALF + +// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \ +// RUN: spirv-unknown-vulkan-compute %s -fnative-half-type \ +// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \ +// RUN: --check-prefixes=CHECK,SPVCHECK,NATIVE_HALF +// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \ +// RUN: spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \ +// RUN: -o - | FileCheck %s --check-prefixes=CHECK,SPVCHECK,NO_HALF + +// DXCHECK: define hidden [[FN_TYPE:]]noundef i1 @ +// SPVCHECK: define hidden [[FN_TYPE:spir_func ]]noundef i1 @ +// DXCHECK: %hlsl.isnan = call i1 @llvm.[[ICF:dx]].isnan.f32( +// SPVCHECK: %hlsl.isnan = call i1 @llvm.[[ICF:spv]].isnan.f32( +// CHECK: ret i1 %hlsl.isnan +bool test_isnan_float(float p0) { return isnan(p0); } + +// CHECK: define hidden [[FN_TYPE]]noundef i1 @ +// NATIVE_HALF: %hlsl.isnan = call i1 @llvm.[[ICF]].isnan.f16( +// NO_HALF: %hlsl.isnan = call i1 @llvm.[[ICF]].isnan.f32( +// CHECK: ret i1 %hlsl.isnan +bool test_isnan_half(half p0) { return isnan(p0); } + +// CHECK: define hidden [[FN_TYPE]]noundef <2 x i1> @ +// NATIVE_HALF: %hlsl.isnan = call <2 x i1> @llvm.[[ICF]].isnan.v2f16 +// NO_HALF: %hlsl.isnan = call <2 x i1> @llvm.[[ICF]].isnan.v2f32( +// CHECK: ret <2 x i1> %hlsl.isnan +bool2 test_isnan_half2(half2 p0) { return isnan(p0); } + +// NATIVE_HALF: define hidden [[FN_TYPE]]noundef <3 x i1> @ +// NATIVE_HALF: %hlsl.isnan = call <3 x i1> @llvm.[[ICF]].isnan.v3f16 +// NO_HALF: %hlsl.isnan = call <3 x i1> @llvm.[[ICF]].isnan.v3f32( +// CHECK: ret <3 x i1> %hlsl.isnan +bool3 test_isnan_half3(half3 p0) { return isnan(p0); } + +// NATIVE_HALF: define hidden [[FN_TYPE]]noundef <4 x i1> @ +// NATIVE_HALF: %hlsl.isnan = call <4 x i1> @llvm.[[ICF]].isnan.v4f16 +// NO_HALF: %hlsl.isnan = call <4 x i1> @llvm.[[ICF]].isnan.v4f32( +// CHECK: ret <4 x i1> %hlsl.isnan +bool4 test_isnan_half4(half4 p0) { return isnan(p0); } + + +// CHECK: define hidden [[FN_TYPE]]noundef <2 x i1> @ +// CHECK: %hlsl.isnan = call <2 x i1> @llvm.[[ICF]].isnan.v2f32 +// CHECK: ret <2 x i1> %hlsl.isnan +bool2 test_isnan_float2(float2 p0) { return isnan(p0); } + +// CHECK: define hidden [[FN_TYPE]]noundef <3 x i1> @ +// CHECK: %hlsl.isnan = call <3 x i1> @llvm.[[ICF]].isnan.v3f32 +// CHECK: ret <3 x i1> %hlsl.isnan +bool3 test_isnan_float3(float3 p0) { return isnan(p0); } + +// CHECK: define hidden [[FN_TYPE]]noundef <4 x i1> @ +// CHECK: %hlsl.isnan = call <4 x i1> @llvm.[[ICF]].isnan.v4f32 +// CHECK: ret <4 x i1> %hlsl.isnan +bool4 test_isnan_float4(float4 p0) { return isnan(p0); } diff --git a/clang/test/CodeGenHLSL/builtins/transpose-builtin.hlsl b/clang/test/CodeGenHLSL/builtins/transpose-builtin.hlsl new file mode 100644 index 0000000000000..86aa7cd6985dd --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/transpose-builtin.hlsl @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple dxil-pc-shadermodel6.3-library %s -fnative-half-type -emit-llvm -disable-llvm-passes -o - | FileCheck %s + +// NOTE: This test is only to confirm we can do codgen with the matrix alias. + +// CHECK-LABEL: define {{.*}}transpose_half_2x2 +void transpose_half_2x2(half2x2 a) { + // CHECK: [[A:%.*]] = load <4 x half>, ptr {{.*}}, align 2 + // CHECK-NEXT: [[TRANS:%.*]] = call {{.*}}<4 x half> @llvm.matrix.transpose.v4f16(<4 x half> [[A]], i32 2, i32 2) + // CHECK-NEXT: store <4 x half> [[TRANS]], ptr %a_t, align 2 + + half2x2 a_t = __builtin_matrix_transpose(a); +} + +// CHECK-LABEL: define {{.*}}transpose_float_3x2 +void transpose_float_3x2(float3x2 a) { + // CHECK: [[A:%.*]] = load <6 x float>, ptr {{.*}}, align 4 + // CHECK-NEXT: [[TRANS:%.*]] = call {{.*}}<6 x float> @llvm.matrix.transpose.v6f32(<6 x float> [[A]], i32 3, i32 2) + // CHECK-NEXT: store <6 x float> [[TRANS]], ptr %a_t, align 4 + + float2x3 a_t = __builtin_matrix_transpose(a); +} + +// CHECK-LABEL: define {{.*}}transpose_int_4x3 +void transpose_int_4x3(int4x3 a) { + // CHECK: [[A:%.*]] = load <12 x i32>, ptr {{.*}}, align 4 + // CHECK-NEXT: [[TRANS:%.*]] = call <12 x i32> @llvm.matrix.transpose.v12i32(<12 x i32> [[A]], i32 4, i32 3) + // CHECK-NEXT: store <12 x i32> [[TRANS]], ptr %a_t, align 4 + + int3x4 a_t = __builtin_matrix_transpose(a); +} diff --git a/clang/test/CodeGenHLSL/enable-16bit-types.hlsl b/clang/test/CodeGenHLSL/enable-16bit-types.hlsl index 9b5c742f9dacd..690404c4fde24 100644 --- a/clang/test/CodeGenHLSL/enable-16bit-types.hlsl +++ b/clang/test/CodeGenHLSL/enable-16bit-types.hlsl @@ -1,5 +1,7 @@ -// RUN: %clang_dxc -enable-16bit-types -T lib_6_3 -HV 202x -Vd -Xclang -emit-llvm %s | FileCheck %s --check-prefix=FLAG -// RUN: %clang_dxc -T lib_6_3 -HV 202x -Vd -Xclang -emit-llvm %s | FileCheck %s --check-prefix=NOFLAG +// RUN: %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.3-unknown-shadermodel6.3-library \ +// RUN: -finclude-default-header -emit-llvm -o - %s 2>&1 | FileCheck %s --check-prefix=FLAG +// RUN: %clang_cc1 -std=hlsl202x -triple dxilv1.3-unknown-shadermodel6.3-library \ +// RUN: -finclude-default-header -emit-llvm -o - %s 2>&1 | FileCheck %s --check-prefix=NOFLAG // FLAG-DAG: ![[NLP:.*]] = !{i32 1, !"dx.nativelowprec", i32 1} // FLAG-DAG: !llvm.module.flags = !{{{.*}}![[NLP]]{{.*}}} diff --git a/clang/test/CodeGenHLSL/res-may-alias.hlsl b/clang/test/CodeGenHLSL/res-may-alias.hlsl index 8cb30249a649a..f609c9bc07b6f 100644 --- a/clang/test/CodeGenHLSL/res-may-alias.hlsl +++ b/clang/test/CodeGenHLSL/res-may-alias.hlsl @@ -1,5 +1,7 @@ -// RUN: %clang_dxc -res-may-alias -T lib_6_3 -HV 202x -Vd -Xclang -emit-llvm %s | FileCheck %s --check-prefix=FLAG -// RUN: %clang_dxc -T lib_6_3 -HV 202x -Vd -Xclang -emit-llvm %s | FileCheck %s --check-prefix=NOFLAG +// RUN: %clang_cc1 -res-may-alias -std=hlsl202x -triple dxilv1.3-unknown-shadermodel6.3-library \ +// RUN: -finclude-default-header -emit-llvm -o - %s 2>&1 | FileCheck %s --check-prefix=FLAG +// RUN: %clang_cc1 -std=hlsl202x -triple dxilv1.3-unknown-shadermodel6.3-library \ +// RUN: -finclude-default-header -emit-llvm -o - %s 2>&1 | FileCheck %s --check-prefix=NOFLAG // FLAG-DAG: ![[RMA:.*]] = !{i32 1, !"dx.resmayalias", i32 1} // FLAG-DAG: !llvm.module.flags = !{{{.*}}![[RMA]]{{.*}}} diff --git a/clang/test/CodeGenHLSL/resources/NonUniformResourceIndex.hlsl b/clang/test/CodeGenHLSL/resources/NonUniformResourceIndex.hlsl new file mode 100644 index 0000000000000..ab512ce111d19 --- /dev/null +++ b/clang/test/CodeGenHLSL/resources/NonUniformResourceIndex.hlsl @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.3-compute -emit-llvm -disable-llvm-passes -o - %s \ +// RUN: | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,DXIL +// RUN: %clang_cc1 -finclude-default-header -triple spirv-pc-vulkan1.3-compute -emit-llvm -disable-llvm-passes -o - %s \ +// RUN: | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,SPV + +RWBuffer A[10]; + +[numthreads(4,1,1)] +void main(uint GI : SV_GroupID) { + // CHECK: %[[GI:.*]] = load i32, ptr %GI.addr + // CHECK: %[[NURI_1:.*]] = call {{.*}} i32 @hlsl::NonUniformResourceIndex(unsigned int)(i32 noundef %[[GI]]) + // CHECK: call void @hlsl::RWBuffer::__createFromImplicitBinding(unsigned int, unsigned int, int, unsigned int, char const*) + // CHECK-SAME: (ptr {{.*}}, i32 noundef 0, i32 noundef 0, i32 noundef 10, i32 noundef %[[NURI_1]], ptr noundef @A.str) + float a = A[NonUniformResourceIndex(GI)][0]; + + // CHECK: %[[GI:.*]] = load i32, ptr %GI.addr + // CHECK: %[[ADD:.*]] = add i32 %[[GI]], 1 + // CHECK: %[[NURI_2:.*]] = call {{.*}} i32 @hlsl::NonUniformResourceIndex(unsigned int)(i32 noundef %[[ADD]]) + // CHECK: %[[MOD:.*]] = urem i32 %[[NURI_2]], 10 + // CHECK: call void @hlsl::RWBuffer::__createFromImplicitBinding(unsigned int, unsigned int, int, unsigned int, char const*) + // CHECK-SAME: (ptr {{.*}}, i32 noundef 0, i32 noundef 0, i32 noundef 10, i32 noundef %[[MOD]], ptr noundef @A.str) + float b = A[NonUniformResourceIndex(GI + 1) % 10][0]; + + // CHECK: %[[GI:.*]] = load i32, ptr %GI.addr + // CHECK: %[[NURI_3:.*]] = call {{.*}} i32 @hlsl::NonUniformResourceIndex(unsigned int)(i32 noundef %[[GI]]) + // CHECK: %[[MUL:.*]] = mul i32 3, %[[NURI_3]] + // CHECK: %[[ADD2:.*]] = add i32 10, %[[MUL]] + // CHECK: call void @hlsl::RWBuffer::__createFromImplicitBinding(unsigned int, unsigned int, int, unsigned int, char const*) + // CHECK-SAME: (ptr {{.*}}, i32 noundef 0, i32 noundef 0, i32 noundef 10, i32 noundef %[[ADD2]], ptr noundef @A.str) + float c = A[10 + 3 * NonUniformResourceIndex(GI)][0]; + A[0][0] = a + b + c; +} + +// CHECK: define {{.*}} i32 @hlsl::NonUniformResourceIndex(unsigned int)(i32 noundef %Index) +// CHECK: %[[INDEX1:.*]] = load i32, ptr %Index.addr, align 4 +// DXIL: %[[INDEX2:.*]] = call i32 @llvm.dx.resource.nonuniformindex(i32 %[[INDEX1]]) +// SPV: %[[INDEX2:.*]] = call i32 @llvm.spv.resource.nonuniformindex(i32 %[[INDEX1]]) +// CHECK: ret i32 %[[INDEX2]] diff --git a/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl b/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl index 472b9a8dae2ae..9f0a5b79ee6e4 100644 --- a/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl +++ b/clang/test/CodeGenHLSL/resources/RWStructuredBuffer-elementtype.hlsl @@ -1,23 +1,36 @@ // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.2-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s -check-prefixes=CHECK // RUN: %clang_cc1 -triple spirv-unknown-vulkan1.3-compute -finclude-default-header -fnative-half-type -emit-llvm -o - %s | FileCheck %s -check-prefixes=SPV -// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer.12" = type { target("dx.RawBuffer", i32, 1, 0) } -// SPV: %"class.hlsl::RWStructuredBuffer.12" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1) -// CHECK: %"class.hlsl::RWStructuredBuffer.13" = type { target("dx.RawBuffer", <4 x i32>, 1, 0) } -// SPV: %"class.hlsl::RWStructuredBuffer.13" = type { target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) +// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 0), target("dx.RawBuffer", i16, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer" = type { target("spirv.VulkanBuffer", [0 x i16], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 0), target("dx.RawBuffer", i16, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.0" = type { target("spirv.VulkanBuffer", [0 x i16], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 0), target("dx.RawBuffer", i32, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.1" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 0), target("dx.RawBuffer", i32, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.2" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 0), target("dx.RawBuffer", i64, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.3" = type { target("spirv.VulkanBuffer", [0 x i64], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 0), target("dx.RawBuffer", i64, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.4" = type { target("spirv.VulkanBuffer", [0 x i64], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 0), target("dx.RawBuffer", half, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.5" = type { target("spirv.VulkanBuffer", [0 x half], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.6" = type { target("spirv.VulkanBuffer", [0 x float], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 0), target("dx.RawBuffer", double, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.7" = type { target("spirv.VulkanBuffer", [0 x double], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 0), target("dx.RawBuffer", <4 x i16>, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.8" = type { target("spirv.VulkanBuffer", [0 x <4 x i16>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 0), target("dx.RawBuffer", <3 x i32>, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.9" = type { target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 0), target("dx.RawBuffer", <2 x half>, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.10" = type { target("spirv.VulkanBuffer", [0 x <2 x half>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 0), target("dx.RawBuffer", <3 x float>, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.11" = type { target("spirv.VulkanBuffer", [0 x <3 x float>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.12" = type { target("dx.RawBuffer", i32, 1, 0), target("dx.RawBuffer", i32, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.12" = type { target("spirv.VulkanBuffer", [0 x i32], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } +// CHECK: %"class.hlsl::RWStructuredBuffer.13" = type { target("dx.RawBuffer", <4 x i32>, 1, 0), target("dx.RawBuffer", <4 x i32>, 1, 0) } +// SPV: %"class.hlsl::RWStructuredBuffer.13" = type { target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1), target("spirv.VulkanBuffer", i32, 12, 1) } RWStructuredBuffer BufI16; RWStructuredBuffer BufU16; diff --git a/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl b/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl index 6c5a705d5cf2e..c97ad4237000f 100644 --- a/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl +++ b/clang/test/CodeGenHLSL/resources/RasterizerOrderedStructuredBuffer-elementtype.hlsl @@ -5,19 +5,19 @@ struct MyStruct { int2 b; }; -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 1) } -// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer" = type { target("dx.RawBuffer", i16, 1, 1), target("dx.RawBuffer", i16, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.0" = type { target("dx.RawBuffer", i16, 1, 1), target("dx.RawBuffer", i16, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.1" = type { target("dx.RawBuffer", i32, 1, 1), target("dx.RawBuffer", i32, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.2" = type { target("dx.RawBuffer", i32, 1, 1), target("dx.RawBuffer", i32, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.3" = type { target("dx.RawBuffer", i64, 1, 1), target("dx.RawBuffer", i64, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.4" = type { target("dx.RawBuffer", i64, 1, 1), target("dx.RawBuffer", i64, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.5" = type { target("dx.RawBuffer", half, 1, 1), target("dx.RawBuffer", half, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.6" = type { target("dx.RawBuffer", float, 1, 1), target("dx.RawBuffer", float, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.7" = type { target("dx.RawBuffer", double, 1, 1), target("dx.RawBuffer", double, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.8" = type { target("dx.RawBuffer", <4 x i16>, 1, 1), target("dx.RawBuffer", <4 x i16>, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.9" = type { target("dx.RawBuffer", <3 x i32>, 1, 1), target("dx.RawBuffer", <3 x i32>, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.10" = type { target("dx.RawBuffer", <2 x half>, 1, 1), target("dx.RawBuffer", <2 x half>, 1, 1) } +// DXIL: %"class.hlsl::RasterizerOrderedStructuredBuffer.11" = type { target("dx.RawBuffer", <3 x float>, 1, 1), target("dx.RawBuffer", <3 x float>, 1, 1) } // DXIL: %struct.MyStruct = type <{ <4 x float>, <2 x i32> }> RasterizerOrderedStructuredBuffer BufI16; diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl index 4f005eab5c71a..89a66b047a3bd 100644 --- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl +++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-constructors.hlsl @@ -21,8 +21,8 @@ export void foo() { } // CHECK-DXIL: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", float, 0, 0) } -// CHECK-DXIL: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) } -// CHECK-DXIL: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) } +// CHECK-DXIL: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } +// CHECK-DXIL: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } // CHECK: @Buf1 = internal global %"class.hlsl::StructuredBuffer" poison, align 4 // CHECK: @[[Buf1Str:.*]] = private unnamed_addr constant [5 x i8] c"Buf1\00", align 1 diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl index 93aa218f63ecf..43ddd2e768ea0 100644 --- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl +++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-lib.hlsl @@ -10,9 +10,9 @@ AppendStructuredBuffer ASB : register(u2); ConsumeStructuredBuffer CSB : register(u3); // CHECK: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", float, 0, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) } -// CHECK: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) } -// CHECK: %"class.hlsl::ConsumeStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) } +// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } +// CHECK: %"class.hlsl::AppendStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } +// CHECK: %"class.hlsl::ConsumeStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } export int TestIncrementCounter() { return RWSB1.IncrementCounter(); diff --git a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl index b513963d72474..9e08a6d0d7ae0 100644 --- a/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl +++ b/clang/test/CodeGenHLSL/resources/StructuredBuffers-methods-ps.hlsl @@ -6,7 +6,7 @@ RWStructuredBuffer RWSB1, RWSB2; RasterizerOrderedStructuredBuffer ROSB1, ROSB2; -// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0) } +// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", float, 1, 0), target("dx.RawBuffer", float, 1, 0) } export void TestIncrementCounter() { // CHECK: define void @_Z20TestIncrementCounterv() diff --git a/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl b/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl index 4ffa7cfc84e17..1d85048db87a8 100644 --- a/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl +++ b/clang/test/CodeGenHLSL/resources/resource-bindings.hlsl @@ -4,7 +4,7 @@ // CHECK: %"class.hlsl::RWBuffer" = type { target("dx.TypedBuffer", <4 x float>, 1, 0, 0) } // CHECK: %"class.hlsl::RWBuffer.0" = type { target("dx.TypedBuffer", float, 1, 0, 0) } // CHECK: %"class.hlsl::StructuredBuffer" = type { target("dx.RawBuffer", i32, 0, 0) } -// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", %struct.S, 1, 0) } +// CHECK: %"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", %struct.S, 1, 0), target("dx.RawBuffer", %struct.S, 1, 0) } // CHECK: %"class.hlsl::RWBuffer.1" = type { target("dx.TypedBuffer", double, 1, 0, 0) } // CHECK: @_ZL4U0S0 = internal global %"class.hlsl::RWBuffer" poison, align 4 diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl index 19ab6562e52b9..7cd3f1417844c 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx11.cl @@ -1,13 +1,13 @@ // REQUIRES: amdgpu-registered-target -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1100 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1101 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1102 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1103 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1150 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1151 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1152 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1153 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple spirv64-amd-amdhsa -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1100 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1101 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1102 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1103 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1150 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1151 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1152 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1153 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple spirv64-amd-amdhsa -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,AMDGCNSPIRV %s typedef unsigned int uint; typedef unsigned long ulong; @@ -50,7 +50,8 @@ void test_s_wait_event_export_ready() { } // CHECK-LABEL: @test_global_add_f32 -// CHECK: = atomicrmw fadd ptr addrspace(1) %addr, float %x syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+}}, !amdgpu.ignore.denormal.mode !{{[0-9]+$}} +// GCN: = atomicrmw fadd ptr addrspace(1) %addr, float %x syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+}}, !amdgpu.ignore.denormal.mode !{{[0-9]+$}} +// AMDGCNSPIRV: = atomicrmw fadd ptr addrspace(1) %addr, float %x syncscope("device") monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+}}, !amdgpu.ignore.denormal.mode !{{[0-9]+$}} #if !defined(__SPIRV__) void test_global_add_f32(float *rtn, global float *addr, float x) { #else diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl index 29093c09c39d0..4b5232c0010aa 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-make-buffer-rsrc.cl @@ -4,7 +4,8 @@ // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, int num, int flags) { @@ -13,7 +14,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0(void *p, short stride, in // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_stride_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 4, i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, int num, int flags) { @@ -22,7 +24,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_stride_constant(void *p, // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_num_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i64 1234, i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, short stride, int flags) { @@ -31,7 +33,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_num_constant(void *p, sho // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p0_flags_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P:%.*]], i16 [[STRIDE:%.*]], i64 [[CONV]], i32 5678) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, short stride, int num) { @@ -40,7 +43,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p0_flags_constant(void *p, s // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1(global void *p, short stride, int num, int flags) { @@ -49,7 +53,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1(global void *p, short str // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_stride_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 4, i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 4, i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_stride_constant(global void *p, int num, int flags) { @@ -58,7 +63,7 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_stride_constant(global vo // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_num_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 1234, i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i64 1234, i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_num_constant(global void *p, short stride, int flags) { @@ -67,7 +72,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_num_constant(global void // CHECK-LABEL: @test_amdgcn_make_buffer_rsrc_p1_flags_constant( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 5678) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P:%.*]], i16 [[STRIDE:%.*]], i64 [[CONV]], i32 5678) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_flags_constant(global void *p, short stride, int num) { @@ -76,7 +82,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_rsrc_p1_flags_constant(global voi // CHECK-LABEL: @test_amdgcn_make_buffer_p0_nullptr( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr null, i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int num, int flags) { @@ -85,7 +92,8 @@ __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p0_nullptr(short stride, int num, // CHECK-LABEL: @test_amdgcn_make_buffer_p1_nullptr( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i32 [[NUM:%.*]], i32 [[FLAGS:%.*]]) +// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[NUM:%.*]] to i64 +// CHECK-NEXT: [[TMP0:%.*]] = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) null, i16 [[STRIDE:%.*]], i64 [[CONV]], i32 [[FLAGS:%.*]]) // CHECK-NEXT: ret ptr addrspace(8) [[TMP0]] // __amdgpu_buffer_rsrc_t test_amdgcn_make_buffer_p1_nullptr(short stride, int num, int flags) { diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl index 5f202baa8a592..6bb20bff436fb 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-vi.cl @@ -1,9 +1,9 @@ // REQUIRES: amdgpu-registered-target -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu tonga -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx900 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1010 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1012 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -triple spirv64-amd-amdhsa -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu tonga -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx900 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1010 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1012 -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,GCN %s +// RUN: %clang_cc1 -triple spirv64-amd-amdhsa -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,AMDGCNSPIRV %s #pragma OPENCL EXTENSION cl_khr_fp16 : enable @@ -252,9 +252,11 @@ void test_update_dpp_const_int(global int* out, int arg1) // CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src seq_cst, align 4{{$}} // CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src seq_cst, align 4{{$}} -// CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("agent") monotonic, align 4{{$}} +// GCN: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("agent") monotonic, align 4{{$}} +// AMDGCNSPIRV: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("device") monotonic, align 4{{$}} // CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("workgroup") monotonic, align 4{{$}} -// CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("wavefront") monotonic, align 4{{$}} +// GCN: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("wavefront") monotonic, align 4{{$}} +// AMDGCNSPIRV: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("subgroup") monotonic, align 4{{$}} // CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src syncscope("singlethread") monotonic, align 4{{$}} // CHECK: atomicrmw fadd ptr addrspace(3) %out, float %src monotonic, align 4{{$}} #if !defined(__SPIRV__) @@ -293,9 +295,11 @@ void test_ds_faddf(local float *out, float src) { // CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src seq_cst, align 4{{$}} // CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src seq_cst, align 4{{$}} -// CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("agent") monotonic, align 4{{$}} +// GCN: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("agent") monotonic, align 4{{$}} +// AMDGCNSPIRV: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("device") monotonic, align 4{{$}} // CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("workgroup") monotonic, align 4{{$}} -// CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("wavefront") monotonic, align 4{{$}} +// GCN: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("wavefront") monotonic, align 4{{$}} +// AMDGCNSPIRV: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("subgroup") monotonic, align 4{{$}} // CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src syncscope("singlethread") monotonic, align 4{{$}} // CHECK: atomicrmw fmin ptr addrspace(3) %out, float %src monotonic, align 4{{$}} @@ -334,9 +338,11 @@ void test_ds_fminf(__attribute__((address_space(3))) float *out, float src) { // CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src seq_cst, align 4{{$}} // CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src seq_cst, align 4{{$}} -// CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("agent") monotonic, align 4{{$}} +// GCN: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("agent") monotonic, align 4{{$}} +// AMDGCNSPIRV: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("device") monotonic, align 4{{$}} // CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("workgroup") monotonic, align 4{{$}} -// CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("wavefront") monotonic, align 4{{$}} +// GCN: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("wavefront") monotonic, align 4{{$}} +// AMDGCNSPIRV: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("subgroup") monotonic, align 4{{$}} // CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src syncscope("singlethread") monotonic, align 4{{$}} // CHECK: atomicrmw fmax ptr addrspace(3) %out, float %src monotonic, align 4{{$}} diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl index 039d03237b530..ab0b0b936abdc 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl @@ -1231,7 +1231,8 @@ void test_atomic_inc_dec(__attribute__((address_space(3))) uint *lptr, __attribu // CHECK: atomicrmw udec_wrap ptr addrspace(3) %lptr, i32 %val syncscope("workgroup") seq_cst, align 4 res = __builtin_amdgcn_atomic_dec32(lptr, val, __ATOMIC_SEQ_CST, "workgroup"); - // CHECK: atomicrmw uinc_wrap ptr addrspace(1) %gptr, i32 %val syncscope("agent") seq_cst, align 4 + // CHECK-AMDGCN: atomicrmw uinc_wrap ptr addrspace(1) %gptr, i32 %val syncscope("agent") seq_cst, align 4 + // CHECK-SPIRV: atomicrmw uinc_wrap ptr addrspace(1) %gptr, i32 %val syncscope("device") seq_cst, align 4 res = __builtin_amdgcn_atomic_inc32(gptr, val, __ATOMIC_SEQ_CST, "agent"); // CHECK: atomicrmw udec_wrap ptr addrspace(1) %gptr, i32 %val seq_cst, align 4 diff --git a/clang/test/DebugInfo/CXX/lambda-capture-packs.cpp b/clang/test/DebugInfo/CXX/lambda-capture-packs.cpp new file mode 100644 index 0000000000000..021b85d4ce3a4 --- /dev/null +++ b/clang/test/DebugInfo/CXX/lambda-capture-packs.cpp @@ -0,0 +1,196 @@ +// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm \ +// RUN: -debug-info-kind=standalone -std=c++26 %s -o - | FileCheck %s + + +// CHECK: ![[PACK1:[0-9]+]] = distinct !DISubprogram(name: "capture_pack" +// CHECK: ![[PACK2:[0-9]+]] = distinct !DISubprogram(name: "capture_pack" +// CHECK: ![[PACK3:[0-9]+]] = distinct !DISubprogram(name: "capture_pack_and_locals" +// CHECK: ![[PACK4:[0-9]+]] = distinct !DISubprogram(name: "capture_pack_and_locals" +// CHECK: ![[PACK5:[0-9]+]] = distinct !DISubprogram(name: "capture_pack_and_this" +// CHECK: ![[PACK6:[0-9]+]] = distinct !DISubprogram(name: "capture_pack_and_this" +// CHECK: ![[PACK7:[0-9]+]] = distinct !DISubprogram(name: "capture_binding_and_param_pack" + +template +auto capture_pack(Args... args) { + return [args..., ...params = args] { + return 0; + }(); +} + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK1]] +// CHECK-SAME: elements: ![[PACK1_ELEMS:[0-9]+]] +// CHECK-DAG: ![[PACK1_ELEMS]] = !{![[PACK1_ARGS:[0-9]+]], ![[PACK1_PARAMS:[0-9]+]]} +// CHECK-DAG: ![[PACK1_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK1_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK2]] +// CHECK-SAME: elements: ![[PACK2_ELEMS:[0-9]+]] +// CHECK: ![[PACK2_ELEMS]] = !{![[PACK2_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK2_ARGS]] +// CHECK-SAME: ![[PACK2_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK2_PARAMS]]} +// CHECK-DAG: ![[PACK2_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK2_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-NOT: DW_TAG_member + +template +auto capture_pack_and_locals(int x, Args... args) { + int w = 0; + return [=, &args..., &x, ...params = args] { + return w; + }(); +} + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK3]] +// CHECK-SAME: elements: ![[PACK3_ELEMS:[0-9]+]] +// CHECK: ![[PACK3_ELEMS]] = !{![[PACK3_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK3_X:[0-9]+]] +// CHECK-SAME: ![[PACK3_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK3_W:[0-9]+]]} +// CHECK-DAG: ![[PACK3_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: !DIDerivedType(tag: DW_TAG_reference_type +// CHECK-DAG: ![[PACK3_X]] = !DIDerivedType(tag: DW_TAG_member, name: "x" +// CHECK-DAG: ![[PACK3_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-DAG: ![[PACK3_W]] = !DIDerivedType(tag: DW_TAG_member, name: "w" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK4]] +// CHECK-SAME: elements: ![[PACK4_ELEMS:[0-9]+]] +// CHECK: ![[PACK4_ELEMS]] = !{![[PACK4_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK4_ARGS]] +// CHECK-SAME: ![[PACK4_X:[0-9]+]] +// CHECK-SAME: ![[PACK4_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK4_PARAMS]] +// CHECK-SAME: ![[PACK4_W:[0-9]+]]} +// CHECK-DAG: ![[PACK4_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK4_X]] = !DIDerivedType(tag: DW_TAG_member, name: "x" +// CHECK-DAG: ![[PACK4_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-DAG: ![[PACK4_W]] = !DIDerivedType(tag: DW_TAG_member, name: "w" +// CHECK-NOT: DW_TAG_member + +struct Foo { + template + auto capture_pack_and_this(Args... args) { + auto val1 = [this, args..., ...params = args] { + return w; + }(); + + auto val2 = [args..., this, ...params = args] { + return w; + }(); + + auto val3 = [args..., ...params = args, this] { + return w; + }(); + + return val1 + val2 + val3; + } + + int w = 10; +} f; + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK5]] +// CHECK-SAME: elements: ![[PACK5a_ELEMS:[0-9]+]] +// CHECK: ![[PACK5a_ELEMS]] = !{![[PACK5a_THIS:[0-9]+]] +// CHECK-SAME: ![[PACK5a_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK5a_PARAMS:[0-9]+]]} +// CHECK-DAG: ![[PACK5a_THIS]] = !DIDerivedType(tag: DW_TAG_member, name: "this" +// CHECK-DAG: ![[PACK5a_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK5a_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK5]] +// CHECK-SAME: elements: ![[PACK5b_ELEMS:[0-9]+]] +// CHECK: ![[PACK5b_ELEMS]] = !{![[PACK5b_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK5b_THIS:[0-9]+]] +// CHECK-SAME: ![[PACK5b_PARAMS:[0-9]+]]} +// CHECK-DAG: ![[PACK5b_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK5b_THIS]] = !DIDerivedType(tag: DW_TAG_member, name: "this" +// CHECK-DAG: ![[PACK5b_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK5]] +// CHECK: elements: ![[PACK5c_ELEMS:[0-9]+]] +// CHECK-NEXT: ![[PACK5c_ELEMS]] = !{![[PACK5c_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK5c_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK5c_THIS:[0-9]+]]} +// CHECK-DAG: ![[PACK5c_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK5c_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-DAG: ![[PACK5c_THIS]] = !DIDerivedType(tag: DW_TAG_member, name: "this" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK6]] +// CHECK-SAME: elements: ![[PACK6a_ELEMS:[0-9]+]] +// CHECK: ![[PACK6a_ELEMS]] = !{![[PACK6a_THIS:[0-9]+]] +// CHECK-SAME: ![[PACK6a_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK6a_ARGS]] +// CHECK-SAME: ![[PACK6a_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK6a_PARAMS]] +// CHECK-DAG: ![[PACK6a_THIS]] = !DIDerivedType(tag: DW_TAG_member, name: "this" +// CHECK-DAG: ![[PACK6a_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK6a_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK6]] +// CHECK-SAME: elements: ![[PACK6b_ELEMS:[0-9]+]] +// CHECK: ![[PACK6b_ELEMS]] = !{![[PACK6b_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK6b_ARGS]] +// CHECK-SAME: ![[PACK6b_THIS:[0-9]+]] +// CHECK-SAME: ![[PACK6b_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK6b_PARAMS]]} +// CHECK-DAG: ![[PACK6b_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK6b_THIS]] = !DIDerivedType(tag: DW_TAG_member, name: "this" +// CHECK-DAG: ![[PACK6b_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-NOT: DW_TAG_member + +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK6]] +// CHECK-SAME: elements: ![[PACK6c_ELEMS:[0-9]+]] +// CHECK: ![[PACK6c_ELEMS]] = !{![[PACK6c_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK6c_ARGS]] +// CHECK-SAME: ![[PACK6c_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK6c_PARAMS]] +// CHECK-SAME: ![[PACK6c_THIS:[0-9]+]]} +// CHECK-DAG: ![[PACK6c_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK6c_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-DAG: ![[PACK6c_THIS]] = !DIDerivedType(tag: DW_TAG_member, name: "this" +// CHECK-NOT: DW_TAG_member + +template +auto capture_binding_and_param_pack(Args... args) { + struct C { int x = 2; int y = 3; }; + + auto [x, ...e] = C(); + + return [&, args..., x, ...params = args, + ...es = e] { + return e...[0] + es...[0]; + }(); +} + +// CHECK: distinct !DICompositeType(tag: DW_TAG_structure_type, name: "C" +// CHECK: distinct !DICompositeType(tag: DW_TAG_class_type, scope: ![[PACK7]] +// CHECK-SAME: elements: ![[PACK7_ELEMS:[0-9]+]] +// CHECK: ![[PACK7_ELEMS]] = !{![[PACK7_ARGS:[0-9]+]] +// CHECK-SAME: ![[PACK7_ARGS]] +// CHECK-SAME: ![[PACK7_X:[0-9]+]] +// CHECK-SAME: ![[PACK7_PARAMS:[0-9]+]] +// CHECK-SAME: ![[PACK7_PARAMS]] +// CHECK-SAME: ![[PACK7_ES:[0-9]+]] +// CHECK-SAME: ![[PACK7_E:[0-9]+]]} +// CHECK-DAG: ![[PACK7_ARGS]] = !DIDerivedType(tag: DW_TAG_member, name: "args" +// CHECK-DAG: ![[PACK7_X]] = !DIDerivedType(tag: DW_TAG_member, name: "x" +// CHECK-DAG: ![[PACK7_PARAMS]] = !DIDerivedType(tag: DW_TAG_member, name: "params" +// CHECK-DAG: ![[PACK7_ES]] = !DIDerivedType(tag: DW_TAG_member, name: "es" +// CHECK-DAG: ![[PACK7_E]] = !DIDerivedType(tag: DW_TAG_member, name: "e" +// CHECK-NOT: DW_TAG_member + +int main() { + return capture_pack(1) + + capture_pack(1, 2) + + capture_pack_and_locals(1, 2) + + capture_pack_and_locals(1, 2, 3) + + f.capture_pack_and_this(1) + + f.capture_pack_and_this(1, 2) + + capture_binding_and_param_pack(1, 2); +} diff --git a/clang/test/DebugInfo/CXX/structured-binding.cpp b/clang/test/DebugInfo/CXX/structured-binding.cpp index 95457f477deeb..51818e7e16f00 100644 --- a/clang/test/DebugInfo/CXX/structured-binding.cpp +++ b/clang/test/DebugInfo/CXX/structured-binding.cpp @@ -9,8 +9,8 @@ // CHECK: #dbg_declare(ptr %z1, ![[VAR_5:[0-9]+]], !DIExpression() // CHECK: #dbg_declare(ptr %z2, ![[VAR_6:[0-9]+]], !DIExpression() // CHECK: #dbg_declare(ptr %k, ![[VAR_7:[0-9]+]], !DIExpression() -// CHECK: #dbg_declare(ptr %v, ![[VAR_8:[0-9]+]], !DIExpression() -// CHECK: #dbg_declare(ptr %w, ![[VAR_9:[0-9]+]], !DIExpression() +// CHECK: #dbg_declare(ptr %v{{[0-9]*}}, ![[VAR_8:[0-9]+]], !DIExpression() +// CHECK: #dbg_declare(ptr %w{{[0-9]*}}, ![[VAR_9:[0-9]+]], !DIExpression() // CHECK: #dbg_declare(ptr %m, ![[VAR_10:[0-9]+]], !DIExpression() // CHECK: #dbg_declare(ptr %n, ![[VAR_11:[0-9]+]], !DIExpression() // CHECK: #dbg_declare(ptr %s, ![[VAR_12:[0-9]+]], !DIExpression() diff --git a/clang/test/DebugInfo/KeyInstructions/atomic.c b/clang/test/DebugInfo/KeyInstructions/atomic.c index c25f4385843b9..37355d0f6edb6 100644 --- a/clang/test/DebugInfo/KeyInstructions/atomic.c +++ b/clang/test/DebugInfo/KeyInstructions/atomic.c @@ -29,79 +29,80 @@ void fun() { int r3 = __atomic_test_and_set(&x, __ATOMIC_RELAXED); // CHECK-NEXT: %6 = atomicrmw xchg ptr @x, i8 1 monotonic, align 4, !dbg [[LINE30:!.*]] -// CHECK-NEXT: %tobool = icmp ne i8 %6, 0, !dbg [[LINE30_G7R2:!.*]] -// CHECK-NEXT: store i1 %tobool, ptr %atomic-temp3, align 1, !dbg [[LINE30_G7R1:!.*]] +// CHECK-NEXT: %tobool = icmp ne i8 %6, 0, !dbg [[LINE30_G7R3:!.*]] +// CHECK-NEXT: %storedv = zext i1 %tobool to i8, !dbg [[LINE30_G7R2:!.*]] +// CHECK-NEXT: store i8 %storedv, ptr %atomic-temp3, align 1, !dbg [[LINE30_G7R1:!.*]] // CHECK-NEXT: %7 = load i8, ptr %atomic-temp3, align 1, !dbg [[LINE30_G6R4:!.*]] // CHECK-NEXT: %loadedv = trunc i8 %7 to i1, !dbg [[LINE30_G6R3:!.*]] // CHECK-NEXT: %conv = zext i1 %loadedv to i32, !dbg [[LINE30_G6R2:!.*]] // CHECK-NEXT: store i32 %conv, ptr %r3, align 4, !dbg [[LINE30_G6R1:!.*]] __atomic_clear(&x, __ATOMIC_RELAXED); -// CHECK-NEXT: store atomic i8 0, ptr @x monotonic, align 4, !dbg [[LINE39_G8R1:!.*]] +// CHECK-NEXT: store atomic i8 0, ptr @x monotonic, align 4, !dbg [[LINE40_G8R1:!.*]] int r4 = __c11_atomic_exchange(&x, 2,__ATOMIC_RELAXED); -// CHECK-NEXT: store i32 2, ptr %.atomictmp4, align 4, !dbg [[LINE42_G10R1:!.*]] -// CHECK-NEXT: %8 = load i32, ptr %.atomictmp4, align 4, !dbg [[LINE42:!.*]] -// CHECK-NEXT: %9 = atomicrmw xchg ptr @x, i32 %8 monotonic, align 4, !dbg [[LINE42_G10R2:!.*]] -// CHECK-NEXT: store i32 %9, ptr %atomic-temp5, align 4, !dbg [[LINE42_G10R1:!.*]] -// CHECK-NEXT: %10 = load i32, ptr %atomic-temp5, align 4, !dbg [[LINE42_G9R2:!.*]] -// CHECK-NEXT: store i32 %10, ptr %r4, align 4, !dbg [[LINE42_G9R1:!.*]] +// CHECK-NEXT: store i32 2, ptr %.atomictmp4, align 4, !dbg [[LINE43_G10R1:!.*]] +// CHECK-NEXT: %8 = load i32, ptr %.atomictmp4, align 4, !dbg [[LINE43:!.*]] +// CHECK-NEXT: %9 = atomicrmw xchg ptr @x, i32 %8 monotonic, align 4, !dbg [[LINE43_G10R2:!.*]] +// CHECK-NEXT: store i32 %9, ptr %atomic-temp5, align 4, !dbg [[LINE43_G10R1:!.*]] +// CHECK-NEXT: %10 = load i32, ptr %atomic-temp5, align 4, !dbg [[LINE43_G9R2:!.*]] +// CHECK-NEXT: store i32 %10, ptr %r4, align 4, !dbg [[LINE43_G9R1:!.*]] int r5 = __atomic_compare_exchange(&y, &y, &y, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -// CHECK-NEXT: %11 = load i32, ptr @y, align 4, !dbg [[LINE50:!.*]] -// CHECK-NEXT: %12 = load i32, ptr @y, align 4, !dbg [[LINE50]] -// CHECK-NEXT: %13 = cmpxchg ptr @y, i32 %11, i32 %12 monotonic monotonic, align 4, !dbg [[LINE50]] -// CHECK-NEXT: %14 = extractvalue { i32, i1 } %13, 0, !dbg [[LINE50_G12R2:!.*]] -// CHECK-NEXT: %15 = extractvalue { i32, i1 } %13, 1, !dbg [[LINE50_G12R3:!.*]] -// CHECK-NEXT: br i1 %15, label %cmpxchg.continue, label %cmpxchg.store_expected, !dbg [[LINE50]] +// CHECK-NEXT: %11 = load i32, ptr @y, align 4, !dbg [[LINE51:!.*]] +// CHECK-NEXT: %12 = load i32, ptr @y, align 4, !dbg [[LINE51]] +// CHECK-NEXT: %13 = cmpxchg ptr @y, i32 %11, i32 %12 monotonic monotonic, align 4, !dbg [[LINE51]] +// CHECK-NEXT: %14 = extractvalue { i32, i1 } %13, 0, !dbg [[LINE51_G12R2:!.*]] +// CHECK-NEXT: %15 = extractvalue { i32, i1 } %13, 1, !dbg [[LINE51_G12R3:!.*]] +// CHECK-NEXT: br i1 %15, label %cmpxchg.continue, label %cmpxchg.store_expected, !dbg [[LINE51]] // CHECK: cmpxchg.store_expected: -// CHECK-NEXT: store i32 %14, ptr @y, align 4, !dbg [[LINE50_G12R1:!.*]] -// CHECK-NEXT: br label %cmpxchg.continue, !dbg [[LINE50]] +// CHECK-NEXT: store i32 %14, ptr @y, align 4, !dbg [[LINE51_G12R1:!.*]] +// CHECK-NEXT: br label %cmpxchg.continue, !dbg [[LINE51]] // CHECK: cmpxchg.continue: -// CHECK-NEXT: %storedv = zext i1 %15 to i8, !dbg [[LINE50_G12R2]] -// CHECK-NEXT: store i8 %storedv, ptr %cmpxchg.bool, align 1, !dbg [[LINE50_G12R1]] -// CHECK-NEXT: %16 = load i8, ptr %cmpxchg.bool, align 1, !dbg [[LINE50_G11R4:!.*]] -// CHECK-NEXT: %loadedv6 = trunc i8 %16 to i1, !dbg [[LINE50_G11R3:!.*]] -// CHECK-NEXT: %conv7 = zext i1 %loadedv6 to i32, !dbg [[LINE50_G11R2:!.*]] -// CHECK-NEXT: store i32 %conv7, ptr %r5, align 4, !dbg [[LINE50_G11R1:!.*]] +// CHECK-NEXT: %storedv6 = zext i1 %15 to i8, !dbg [[LINE51_G12R2]] +// CHECK-NEXT: store i8 %storedv6, ptr %cmpxchg.bool, align 1, !dbg [[LINE51_G12R1]] +// CHECK-NEXT: %16 = load i8, ptr %cmpxchg.bool, align 1, !dbg [[LINE51_G11R4:!.*]] +// CHECK-NEXT: %loadedv7 = trunc i8 %16 to i1, !dbg [[LINE51_G11R3:!.*]] +// CHECK-NEXT: %conv8 = zext i1 %loadedv7 to i32, !dbg [[LINE51_G11R2:!.*]] +// CHECK-NEXT: store i32 %conv8, ptr %r5, align 4, !dbg [[LINE51_G11R1:!.*]] int r6 = __c11_atomic_compare_exchange_strong(&x, &y, 42, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -// CHECK-NEXT: store i32 42, ptr %.atomictmp8, align 4, !dbg [[LINE68_G14R1:!.*]] -// CHECK-NEXT: %17 = load i32, ptr @y, align 4, !dbg [[LINE68:!.*]] -// CHECK-NEXT: %18 = load i32, ptr %.atomictmp8, align 4, !dbg [[LINE68]] -// CHECK-NEXT: %19 = cmpxchg ptr @x, i32 %17, i32 %18 monotonic monotonic, align 4, !dbg [[LINE68]] -// CHECK-NEXT: %20 = extractvalue { i32, i1 } %19, 0, !dbg [[LINE68_G14R2:!.*]] -// CHECK-NEXT: %21 = extractvalue { i32, i1 } %19, 1, !dbg [[LINE68_G14R3:!.*]] -// CHECK-NEXT: br i1 %21, label %cmpxchg.continue11, label %cmpxchg.store_expected10, !dbg [[LINE68]] -// CHECK: cmpxchg.store_expected10: -// CHECK-NEXT: store i32 %20, ptr @y, align 4, !dbg [[LINE68_G14R1:!.*]] -// CHECK-NEXT: br label %cmpxchg.continue11, !dbg [[LINE68]] -// CHECK: cmpxchg.continue11: -// CHECK-NEXT: %storedv12 = zext i1 %21 to i8, !dbg [[LINE68_G14R2]] -// CHECK-NEXT: store i8 %storedv12, ptr %cmpxchg.bool9, align 1, !dbg [[LINE68_G14R1:!.*]] -// CHECK-NEXT: %22 = load i8, ptr %cmpxchg.bool9, align 1, !dbg [[LINE68_G13R4:!.*]] -// CHECK-NEXT: %loadedv13 = trunc i8 %22 to i1, !dbg [[LINE68_G13R3:!.*]] -// CHECK-NEXT: %conv14 = zext i1 %loadedv13 to i32, !dbg [[LINE68_G13R2:!.*]] -// CHECK-NEXT: store i32 %conv14, ptr %r6, align 4, !dbg [[LINE68_G13R1:!.*]] +// CHECK-NEXT: store i32 42, ptr %.atomictmp9, align 4, !dbg [[LINE69_G14R1:!.*]] +// CHECK-NEXT: %17 = load i32, ptr @y, align 4, !dbg [[LINE69:!.*]] +// CHECK-NEXT: %18 = load i32, ptr %.atomictmp9, align 4, !dbg [[LINE69]] +// CHECK-NEXT: %19 = cmpxchg ptr @x, i32 %17, i32 %18 monotonic monotonic, align 4, !dbg [[LINE69]] +// CHECK-NEXT: %20 = extractvalue { i32, i1 } %19, 0, !dbg [[LINE69_G14R2:!.*]] +// CHECK-NEXT: %21 = extractvalue { i32, i1 } %19, 1, !dbg [[LINE69_G14R3:!.*]] +// CHECK-NEXT: br i1 %21, label %cmpxchg.continue12, label %cmpxchg.store_expected11, !dbg [[LINE69]] +// CHECK: cmpxchg.store_expected11: +// CHECK-NEXT: store i32 %20, ptr @y, align 4, !dbg [[LINE69_G14R1:!.*]] +// CHECK-NEXT: br label %cmpxchg.continue12, !dbg [[LINE69]] +// CHECK: cmpxchg.continue12: +// CHECK-NEXT: %storedv13 = zext i1 %21 to i8, !dbg [[LINE69_G14R2]] +// CHECK-NEXT: store i8 %storedv13, ptr %cmpxchg.bool10, align 1, !dbg [[LINE69_G14R1:!.*]] +// CHECK-NEXT: %22 = load i8, ptr %cmpxchg.bool10, align 1, !dbg [[LINE69_G13R4:!.*]] +// CHECK-NEXT: %loadedv14 = trunc i8 %22 to i1, !dbg [[LINE69_G13R3:!.*]] +// CHECK-NEXT: %conv15 = zext i1 %loadedv14 to i32, !dbg [[LINE69_G13R2:!.*]] +// CHECK-NEXT: store i32 %conv15, ptr %r6, align 4, !dbg [[LINE69_G13R1:!.*]] int r7 = __c11_atomic_compare_exchange_weak(&x, &y, 43, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -// CHECK-NEXT: store i32 43, ptr %.atomictmp15, align 4, !dbg [[LINE87_G16R1:!.*]] -// CHECK-NEXT: %23 = load i32, ptr @y, align 4, !dbg [[LINE87:!.*]] -// CHECK-NEXT: %24 = load i32, ptr %.atomictmp15, align 4, !dbg [[LINE87]] -// CHECK-NEXT: %25 = cmpxchg weak ptr @x, i32 %23, i32 %24 monotonic monotonic, align 4, !dbg [[LINE87]] -// CHECK-NEXT: %26 = extractvalue { i32, i1 } %25, 0, !dbg [[LINE87_G16R2:!.*]] -// CHECK-NEXT: %27 = extractvalue { i32, i1 } %25, 1, !dbg [[LINE87_G16R3:!.*]] -// CHECK-NEXT: br i1 %27, label %cmpxchg.continue18, label %cmpxchg.store_expected17, !dbg [[LINE87]] -// CHECK: cmpxchg.store_expected17: -// CHECK-NEXT: store i32 %26, ptr @y, align 4, !dbg [[LINE87_G16R1]] -// CHECK-NEXT: br label %cmpxchg.continue18, !dbg [[LINE87]] -// CHECK: cmpxchg.continue18: -// CHECK-NEXT: %storedv19 = zext i1 %27 to i8, !dbg [[LINE87_G16R2]] -// CHECK-NEXT: store i8 %storedv19, ptr %cmpxchg.bool16, align 1, !dbg [[LINE87_G16R1]] -// CHECK-NEXT: %28 = load i8, ptr %cmpxchg.bool16, align 1, !dbg [[LINE87_G15R4:!.*]] -// CHECK-NEXT: %loadedv20 = trunc i8 %28 to i1, !dbg [[LINE87_G15R3:!.*]] -// CHECK-NEXT: %conv21 = zext i1 %loadedv20 to i32, !dbg [[LINE87_G15R2:!.*]] -// CHECK-NEXT: store i32 %conv21, ptr %r7, align 4, !dbg [[LINE87_G15R1:!.*]] +// CHECK-NEXT: store i32 43, ptr %.atomictmp16, align 4, !dbg [[LINE88_G16R1:!.*]] +// CHECK-NEXT: %23 = load i32, ptr @y, align 4, !dbg [[LINE88:!.*]] +// CHECK-NEXT: %24 = load i32, ptr %.atomictmp16, align 4, !dbg [[LINE88]] +// CHECK-NEXT: %25 = cmpxchg weak ptr @x, i32 %23, i32 %24 monotonic monotonic, align 4, !dbg [[LINE88]] +// CHECK-NEXT: %26 = extractvalue { i32, i1 } %25, 0, !dbg [[LINE88_G16R2:!.*]] +// CHECK-NEXT: %27 = extractvalue { i32, i1 } %25, 1, !dbg [[LINE88_G16R3:!.*]] +// CHECK-NEXT: br i1 %27, label %cmpxchg.continue19, label %cmpxchg.store_expected18, !dbg [[LINE88]] +// CHECK: cmpxchg.store_expected18: +// CHECK-NEXT: store i32 %26, ptr @y, align 4, !dbg [[LINE88_G16R1]] +// CHECK-NEXT: br label %cmpxchg.continue19, !dbg [[LINE88]] +// CHECK: cmpxchg.continue19: +// CHECK-NEXT: %storedv20 = zext i1 %27 to i8, !dbg [[LINE88_G16R2]] +// CHECK-NEXT: store i8 %storedv20, ptr %cmpxchg.bool17, align 1, !dbg [[LINE88_G16R1]] +// CHECK-NEXT: %28 = load i8, ptr %cmpxchg.bool17, align 1, !dbg [[LINE88_G15R4:!.*]] +// CHECK-NEXT: %loadedv21 = trunc i8 %28 to i1, !dbg [[LINE88_G15R3:!.*]] +// CHECK-NEXT: %conv22 = zext i1 %loadedv21 to i32, !dbg [[LINE88_G15R2:!.*]] +// CHECK-NEXT: store i32 %conv22, ptr %r7, align 4, !dbg [[LINE88_G15R1:!.*]] // CHECK: ret{{.*}}, !dbg [[RET:!.*]] } @@ -121,6 +122,7 @@ void fun() { // CHECK: [[LINE25_G5R2]] = !DILocation(line: 25, scope: ![[#]], atomGroup: 5, atomRank: 2) // CHECK: [[LINE30]] = !DILocation(line: 30, scope: ![[#]]) +// CHECK: [[LINE30_G7R3]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 7, atomRank: 3) // CHECK: [[LINE30_G7R2]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 7, atomRank: 2) // CHECK: [[LINE30_G7R1]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 7, atomRank: 1) // CHECK: [[LINE30_G6R4]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 6, atomRank: 4) @@ -128,39 +130,39 @@ void fun() { // CHECK: [[LINE30_G6R2]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 6, atomRank: 2) // CHECK: [[LINE30_G6R1]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 6, atomRank: 1) -// CHECK: [[LINE39_G8R1]] = !DILocation(line: 39, scope: ![[#]], atomGroup: 8, atomRank: 1) - -// CHECK: [[LINE42_G10R1]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 10, atomRank: 1) -// CHECK: [[LINE42]] = !DILocation(line: 42, scope: ![[#]]) -// CHECK: [[LINE42_G10R2]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 10, atomRank: 2) -// CHECK: [[LINE42_G9R2]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 9, atomRank: 2) -// CHECK: [[LINE42_G9R1]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 9, atomRank: 1) - -// CHECK: [[LINE50]] = !DILocation(line: 50, scope: ![[#]]) -// CHECK: [[LINE50_G12R2]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 12, atomRank: 2) -// CHECK: [[LINE50_G12R3]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 12, atomRank: 3) -// CHECK: [[LINE50_G12R1]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 12, atomRank: 1) -// CHECK: [[LINE50_G11R4]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 4) -// CHECK: [[LINE50_G11R3]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 3) -// CHECK: [[LINE50_G11R2]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 2) -// CHECK: [[LINE50_G11R1]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 1) - -// CHECK: [[LINE68_G14R1]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 14, atomRank: 1) -// CHECK: [[LINE68]] = !DILocation(line: 68, scope: ![[#]]) -// CHECK: [[LINE68_G14R2]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 14, atomRank: 2) -// CHECK: [[LINE68_G14R3]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 14, atomRank: 3) -// CHECK: [[LINE68_G13R4]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 4) -// CHECK: [[LINE68_G13R3]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 3) -// CHECK: [[LINE68_G13R2]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 2) -// CHECK: [[LINE68_G13R1]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 1) - -// CHECK: [[LINE87_G16R1]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 16, atomRank: 1) -// CHECK: [[LINE87]] = !DILocation(line: 87, scope: ![[#]]) -// CHECK: [[LINE87_G16R2]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 16, atomRank: 2) -// CHECK: [[LINE87_G16R3]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 16, atomRank: 3) -// CHECK: [[LINE87_G15R4]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 4) -// CHECK: [[LINE87_G15R3]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 3) -// CHECK: [[LINE87_G15R2]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 2) -// CHECK: [[LINE87_G15R1]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 1) +// CHECK: [[LINE40_G8R1]] = !DILocation(line: 40, scope: ![[#]], atomGroup: 8, atomRank: 1) + +// CHECK: [[LINE43_G10R1]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 10, atomRank: 1) +// CHECK: [[LINE43]] = !DILocation(line: 43, scope: ![[#]]) +// CHECK: [[LINE43_G10R2]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 10, atomRank: 2) +// CHECK: [[LINE43_G9R2]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 9, atomRank: 2) +// CHECK: [[LINE43_G9R1]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 9, atomRank: 1) + +// CHECK: [[LINE51]] = !DILocation(line: 51, scope: ![[#]]) +// CHECK: [[LINE51_G12R2]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 12, atomRank: 2) +// CHECK: [[LINE51_G12R3]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 12, atomRank: 3) +// CHECK: [[LINE51_G12R1]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 12, atomRank: 1) +// CHECK: [[LINE51_G11R4]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 4) +// CHECK: [[LINE51_G11R3]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 3) +// CHECK: [[LINE51_G11R2]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 2) +// CHECK: [[LINE51_G11R1]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 1) + +// CHECK: [[LINE69_G14R1]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 14, atomRank: 1) +// CHECK: [[LINE69]] = !DILocation(line: 69, scope: ![[#]]) +// CHECK: [[LINE69_G14R2]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 14, atomRank: 2) +// CHECK: [[LINE69_G14R3]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 14, atomRank: 3) +// CHECK: [[LINE69_G13R4]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 4) +// CHECK: [[LINE69_G13R3]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 3) +// CHECK: [[LINE69_G13R2]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 2) +// CHECK: [[LINE69_G13R1]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 1) + +// CHECK: [[LINE88_G16R1]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 16, atomRank: 1) +// CHECK: [[LINE88]] = !DILocation(line: 88, scope: ![[#]]) +// CHECK: [[LINE88_G16R2]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 16, atomRank: 2) +// CHECK: [[LINE88_G16R3]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 16, atomRank: 3) +// CHECK: [[LINE88_G15R4]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 4) +// CHECK: [[LINE88_G15R3]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 3) +// CHECK: [[LINE88_G15R2]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 2) +// CHECK: [[LINE88_G15R1]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 1) // CHECK: [[RET]] = !DILocation({{.*}}, atomGroup: 17, atomRank: 1) diff --git a/clang/test/Driver/Inputs/basic_gpu_tree/bin/keep b/clang/test/Driver/Inputs/basic_gpu_tree/bin/keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/basic_gpu_tree/include/amdgcn-amd-amdhsa/.keep b/clang/test/Driver/Inputs/basic_gpu_tree/include/amdgcn-amd-amdhsa/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/basic_gpu_tree/include/nvptx64-nvidia-cuda/.keep b/clang/test/Driver/Inputs/basic_gpu_tree/include/nvptx64-nvidia-cuda/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/basic_gpu_tree/lib/amdgcn-amd-amdhsa/.keep b/clang/test/Driver/Inputs/basic_gpu_tree/lib/amdgcn-amd-amdhsa/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/basic_gpu_tree/lib/nvptx64-nvidia-cuda/.keep b/clang/test/Driver/Inputs/basic_gpu_tree/lib/nvptx64-nvidia-cuda/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Ofast.c b/clang/test/Driver/Ofast.c index 612478cc89558..e04ce036638f9 100644 --- a/clang/test/Driver/Ofast.c +++ b/clang/test/Driver/Ofast.c @@ -2,7 +2,7 @@ // RUN: %clang -c -O2 -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s // RUN: %clang -c -fno-fast-math -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s // RUN: %clang -c -fno-strict-aliasing -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s -// RUN: %clang -c -fno-vectorize -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s +// RUN: %clang -c -fno-vectorize -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-NO-VECTORIZE %s // RUN: %clang -c -Ofast -O2 -### -Werror %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-O2 \ // RUN: %if target={{.*-windows-msvc.*}} %{ --check-prefix=CHECK-OFAST-O2-ALIASING-MSVC %} \ // RUN: %else %{ --check-prefix=CHECK-OFAST-O2-ALIASING %} %s diff --git a/clang/test/Driver/clang_f_opts.c b/clang/test/Driver/clang_f_opts.c index bdeb747aa66a3..94b983f14e3ef 100644 --- a/clang/test/Driver/clang_f_opts.c +++ b/clang/test/Driver/clang_f_opts.c @@ -156,7 +156,7 @@ // RUN: %clang -### -S -O2 %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S -Os %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s -// RUN: %clang -### -S -fno-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s +// RUN: %clang -### -S -fno-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-NO-VECTORIZE %s // RUN: %clang -### -S -O1 -fvectorize %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S -Ofast %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S %s 2>&1 | FileCheck -check-prefix=CHECK-NO-VECTORIZE %s @@ -179,7 +179,7 @@ // RUN: %clang -### -S -Os %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S -Oz %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s -// RUN: %clang -### -S -fno-slp-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s +// RUN: %clang -### -S -fno-slp-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-NO-SLP-VECTORIZE %s // RUN: %clang -### -S -O1 -fslp-vectorize %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S -Ofast %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S %s 2>&1 | FileCheck -check-prefix=CHECK-NO-SLP-VECTORIZE %s diff --git a/clang/test/Driver/env.c b/clang/test/Driver/env.c index 399b89090eb86..56c037c222e54 100644 --- a/clang/test/Driver/env.c +++ b/clang/test/Driver/env.c @@ -1,6 +1,6 @@ // Some assertions in this test use Linux style (/) file paths. // UNSUPPORTED: system-windows -// RUN: bash -c env | grep LD_LIBRARY_PATH | tr -d '\n' > %t.ld_library_path +// RUN: bash -c env | grep LD_LIBRARY_PATH | sed -ne 's/^.*=//p' | tr -d '\n' > %t.ld_library_path // The PATH variable is heavily used when trying to find a linker. // RUN: env -i LC_ALL=C LD_LIBRARY_PATH="%{readfile:%t.ld_library_path}" CLANG_NO_DEFAULT_CONFIG=1 \ // RUN: %clang %s -### -o %t.o --target=i386-unknown-linux \ diff --git a/clang/test/Driver/gpu-libc-headers.c b/clang/test/Driver/gpu-libc-headers.c index 53c016837dde6..18029193edeba 100644 --- a/clang/test/Driver/gpu-libc-headers.c +++ b/clang/test/Driver/gpu-libc-headers.c @@ -1,18 +1,9 @@ -// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fopenmp=libomp --sysroot=./ \ -// RUN: -fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa --offload-arch=gfx908 \ -// RUN: -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS -// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fopenmp=libomp --sysroot=./ \ -// RUN: -fopenmp-targets=nvptx64-nvidia-cuda -Xopenmp-target=nvptx64-nvidia-cuda --offload-arch=sm_70 \ -// RUN: -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS -// CHECK-HEADERS: "-cc1"{{.*}}"-isysroot" "./"{{.*}}"-internal-isystem" "{{.*}}include{{.*}}llvm_libc_wrappers" -// CHECK-HEADERS: "-cc1"{{.*}}"-isysroot" "./"{{.*}}"-internal-isystem" "{{.*}}include{{.*}}llvm_libc_wrappers" - -// RUN: %clang -### --target=amdgcn-amd-amdhsa -mcpu=gfx90a --sysroot=./ \ -// RUN: -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS-AMDGPU -// RUN: %clang -### --target=nvptx64-nvidia-cuda -march=sm_89 --sysroot=./ \ -// RUN: -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS-NVPTX -// CHECK-HEADERS-AMDGPU: "-cc1"{{.*}}"-isysroot" "./"{{.*}}"-internal-isystem" "{{.*}}include{{.*}}amdgcn-amd-amdhsa" -// CHECK-HEADERS-NVPTX: "-cc1"{{.*}}"-isysroot" "./"{{.*}}"-internal-isystem" "{{.*}}include{{.*}}nvptx64-nvidia-cuda" +// RUN: %clang -### --target=amdgcn-amd-amdhsa -mcpu=gfx90a --sysroot=%S/Inputs/basic_gpu_tree \ +// RUN: -ccc-install-dir %S/Inputs/basic_gpu_tree/bin -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS-AMDGPU +// RUN: %clang -### --target=nvptx64-nvidia-cuda -march=sm_89 --sysroot=%S/Inputs/basic_gpu_tree \ +// RUN: -ccc-install-dir %S/Inputs/basic_gpu_tree/bin -nogpulib %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS-NVPTX +// CHECK-HEADERS-AMDGPU: "-cc1"{{.*}}"-isysroot"{{.*}}"-internal-isystem" "{{.*}}include{{.*}}amdgcn-amd-amdhsa" +// CHECK-HEADERS-NVPTX: "-cc1"{{.*}}"-isysroot"{{.*}}"-internal-isystem" "{{.*}}include{{.*}}nvptx64-nvidia-cuda" // RUN: %clang -### --target=amdgcn-amd-amdhsa -mcpu=gfx1030 -nogpulib \ // RUN: -nogpuinc %s 2>&1 | FileCheck %s --check-prefix=CHECK-HEADERS-DISABLED diff --git a/clang/test/Driver/linker-wrapper-image.c b/clang/test/Driver/linker-wrapper-image.c index c0de56d58196a..31476173cd370 100644 --- a/clang/test/Driver/linker-wrapper-image.c +++ b/clang/test/Driver/linker-wrapper-image.c @@ -1,6 +1,7 @@ // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target // REQUIRES: amdgpu-registered-target +// REQUIRES: spirv-registered-target // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.elf.o @@ -263,3 +264,36 @@ // HIP: while.end: // HIP-NEXT: ret void // HIP-NEXT: } + +// RUN: clang-offload-packager -o %t.out --image=file=%t.elf.o,kind=sycl,triple=spirv64-unknown-unknown,arch=generic +// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o \ +// RUN: -fembed-offload-object=%t.out +// RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu \ +// RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=SYCL +// RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu -r \ +// RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=SYCL + +// SYCL: %__sycl.tgt_device_image = type { i16, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr } +// SYCL-NEXT: %__sycl.tgt_bin_desc = type { i16, i16, ptr, ptr, ptr } + +// SYCL: @.sycl_offloading.target.0 = internal unnamed_addr constant [1 x i8] zeroinitializer +// SYCL-NEXT: @.sycl_offloading.opts.compile.0 = internal unnamed_addr constant [1 x i8] zeroinitializer +// SYCL-NEXT: @.sycl_offloading.opts.link.0 = internal unnamed_addr constant [1 x i8] zeroinitializer +// SYCL-NEXT: @.sycl_offloading.0.data = internal unnamed_addr constant [0 x i8] zeroinitializer, section ".llvm.offloading" +// SYCL-NEXT: @.offloading.entry_name = internal unnamed_addr constant [5 x i8] c"stub\00", section ".llvm.rodata.offloading", align 1 +// SYCL-NEXT: @.offloading.entry.stub = weak constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 8, i32 0, ptr null, ptr @.offloading.entry_name, i64 0, i64 0, ptr null }, section "llvm_offload_entries", align 8 +// SYCL-NEXT: @.sycl_offloading.entries_arr = internal constant [1 x %struct.__tgt_offload_entry] [%struct.__tgt_offload_entry { i64 0, i16 1, i16 8, i32 0, ptr null, ptr @.offloading.entry_name, i64 0, i64 0, ptr null }] +// SYCL-NEXT: @.sycl_offloading.device_images = internal unnamed_addr constant [1 x %__sycl.tgt_device_image] [%__sycl.tgt_device_image { i16 3, i8 8, i8 0, ptr @.sycl_offloading.target.0, ptr @.sycl_offloading.opts.compile.0, ptr @.sycl_offloading.opts.link.0, ptr @.sycl_offloading.0.data, ptr @.sycl_offloading.0.data, ptr @.sycl_offloading.entries_arr, ptr getelementptr ([1 x %struct.__tgt_offload_entry], ptr @.sycl_offloading.entries_arr, i64 0, i64 1), ptr null, ptr null }] +// SYCL-NEXT: @.sycl_offloading.descriptor = internal constant %__sycl.tgt_bin_desc { i16 1, i16 1, ptr @.sycl_offloading.device_images, ptr null, ptr null } + +// SYCL: define internal void @sycl.descriptor_reg() section ".text.startup" { +// SYCL-NEXT: entry: +// SYCL-NEXT: call void @__sycl_register_lib(ptr @.sycl_offloading.descriptor) +// SYCL-NEXT: ret void +// SYCL-NEXT: } + +// SYCL: define internal void @sycl.descriptor_unreg() section ".text.startup" { +// SYCL-NEXT: entry: +// SYCL-NEXT: call void @__sycl_unregister_lib(ptr @.sycl_offloading.descriptor) +// SYCL-NEXT: ret void +// SYCL-NEXT: } diff --git a/clang/test/Driver/linker-wrapper.c b/clang/test/Driver/linker-wrapper.c index e73fa5ca3dbf9..c060dae7bb154 100644 --- a/clang/test/Driver/linker-wrapper.c +++ b/clang/test/Driver/linker-wrapper.c @@ -54,7 +54,7 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.spirv.bc,kind=sycl,triple=spirv64-unknown-unknown,arch=generic // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out -// RUN: not clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ +// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=SPIRV-LINK // SPIRV-LINK: clang{{.*}} -o {{.*}}.img -dumpdir a.out.spirv64..img. --target=spirv64-unknown-unknown {{.*}}.o --sycl-link -Xlinker -triple=spirv64-unknown-unknown -Xlinker -arch= diff --git a/clang/test/Driver/modules-print-library-module-manifest-path.cpp b/clang/test/Driver/modules-print-library-module-manifest-path.cpp index 7606713bfa22a..af0f124477cf8 100644 --- a/clang/test/Driver/modules-print-library-module-manifest-path.cpp +++ b/clang/test/Driver/modules-print-library-module-manifest-path.cpp @@ -18,6 +18,14 @@ // RUN: --target=x86_64-linux-gnu 2>&1 \ // RUN: | FileCheck libcxx.cpp +// check that -nostdlib causes no library-provided module manifest to +// be reported, even when libc++.modules.json is present. +// RUN: %clang -print-library-module-manifest-path \ +// RUN: -nostdlib \ +// RUN: -resource-dir=%t/Inputs/usr/lib/x86_64-linux-gnu \ +// RUN: --target=x86_64-linux-gnu 2>&1 \ +// RUN: | FileCheck libcxx-no-module-json.cpp + // for macos there is a different directory structure // where the library and libc++.modules.json file are in lib // directly but headers are in clang/ver directory which diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c index 88ec766ff6966..5d5fdd72baedb 100644 --- a/clang/test/Driver/riscv-cpus.c +++ b/clang/test/Driver/riscv-cpus.c @@ -462,7 +462,6 @@ // MCPU-SIFIVE-P450-SAME: "-target-feature" "+ziccif" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zicclsm" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+ziccrse" -// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zicntr" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zicsr" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zifencei" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zihintntl" @@ -473,6 +472,7 @@ // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zba" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zbb" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zbs" +// MCPU-SIFIVE-P450-SAME: "-target-feature" "+zkt" // MCPU-SIFIVE-P450-SAME: "-target-abi" "lp64d" // RUN: %clang -target riscv64 -### -c %s 2>&1 -mcpu=sifive-p470 | FileCheck -check-prefix=MCPU-SIFIVE-P470 %s @@ -491,7 +491,6 @@ // MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccif" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicclsm" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+ziccrse" -// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicntr" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zicsr" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zifencei" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zihintntl" @@ -503,6 +502,7 @@ // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zba" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zbb" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zbs" +// MCPU-SIFIVE-P470-SAME: "-target-feature" "+zkt" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvbb" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zvbc" // MCPU-SIFIVE-P470-SAME: "-target-feature" "+zve32f" @@ -553,7 +553,6 @@ // MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccif" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicclsm" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccrse" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicntr" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicsr" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zifencei" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zihintntl" @@ -564,6 +563,7 @@ // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zba" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zbb" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zbs" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zkt" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvbb" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvbc" // MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve32f" diff --git a/clang/test/Interpreter/execute-pch.cpp b/clang/test/Interpreter/execute-pch.cpp new file mode 100644 index 0000000000000..8041ee6ac966d --- /dev/null +++ b/clang/test/Interpreter/execute-pch.cpp @@ -0,0 +1,23 @@ +// REQUIRES: host-supports-jit +// UNSUPPORTED: system-aix +// +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang -fmax-type-align=16 -Xclang -fdeprecated-macro -fno-stack-protector -Xclang -fwrapv -Xclang -fblocks -Xclang -fskip-odr-check-in-gmf -fexceptions -fcxx-exceptions -fgnuc-version=0 -target %host-jit-triple -Xclang -fblocks -Xclang -fmax-type-align=8 -Xclang -fincremental-extensions -Xclang -emit-pch -x c++-header -o %t/include.pch %t/include.hpp +// +// RUN: cat %t/main.cpp \ +// RUN: | clang-repl -Xcc -fgnuc-version=0 -Xcc -fno-stack-protector -Xcc -fwrapv -Xcc -fblocks -Xcc -fskip-odr-check-in-gmf -Xcc -fmax-type-align=8 -Xcc -include-pch -Xcc %t/include.pch \ +// RUN: | FileCheck %s + +//--- include.hpp + +int f_pch() { return 5; } + +//--- main.cpp + +extern "C" int printf(const char *, ...); +printf("f_pch = %d\n", f_pch()); + +// CHECK: f_pch = 5 diff --git a/clang/test/Lexer/cxx-features.cpp b/clang/test/Lexer/cxx-features.cpp index ced5bcaf0db16..8eb9ea032879c 100644 --- a/clang/test/Lexer/cxx-features.cpp +++ b/clang/test/Lexer/cxx-features.cpp @@ -148,7 +148,7 @@ // init_captures checked below -#if check(modules, 0, 0, 0, 0, 0, 0, 0) +#if check(modules, 0, 0, 0, 0, 1, 1, 1) // FIXME: 201907 in C++20 #error "wrong value for __cpp_modules" #endif diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test index 37ff33e5a1523..73d4cb1769ed5 100644 --- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test +++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test @@ -126,7 +126,9 @@ // CHECK-NEXT: NoProfileFunction (SubjectMatchRule_function) // CHECK-NEXT: NoRandomizeLayout (SubjectMatchRule_record) // CHECK-NEXT: NoSanitize (SubjectMatchRule_function, SubjectMatchRule_objc_method, SubjectMatchRule_variable_is_global) -// CHECK-NEXT: NoSanitizeSpecific (SubjectMatchRule_function, SubjectMatchRule_variable_is_global) +// CHECK-NEXT: NoSanitizeAddress (SubjectMatchRule_function, SubjectMatchRule_variable_is_global) +// CHECK-NEXT: NoSanitizeMemory (SubjectMatchRule_function) +// CHECK-NEXT: NoSanitizeThread (SubjectMatchRule_function) // CHECK-NEXT: NoSpeculativeLoadHardening (SubjectMatchRule_function, SubjectMatchRule_objc_method) // CHECK-NEXT: NoSplitStack (SubjectMatchRule_function) // CHECK-NEXT: NoStackProtector (SubjectMatchRule_function) diff --git a/clang/test/Modules/cxx20-module-file-info-macros.cpp b/clang/test/Modules/cxx20-module-file-info-macros.cpp index 3b67e9b9acd41..431c967fbbccd 100644 --- a/clang/test/Modules/cxx20-module-file-info-macros.cpp +++ b/clang/test/Modules/cxx20-module-file-info-macros.cpp @@ -36,28 +36,28 @@ #define REDEFINE // CHECK: Macro Definitions: -// CHECK-DAG: REDEFINE -// CHECK-DAG: FUNC_Macro -// CHECK-DAG: CONSTANT -// CHECK-DAG: FOO +// CHECK: CONSTANT +// CHECK: FOO +// CHECK: FUNC_Macro +// CHECK: REDEFINE // CHECK-NEXT: === //--- include_foo.h #include "foo.h" #undef REDEFINE // CHECK: Macro Definitions: -// CHECK-DAG: CONSTANT -// CHECK-DAG: FUNC_Macro -// CHECK-DAG: FOO +// CHECK: CONSTANT +// CHECK: FOO +// CHECK: FUNC_Macro // CHECK-NEXT: === //--- import_foo.h import "foo.h"; #undef REDEFINE // CHECK: Macro Definitions: -// CHECK-DAG: CONSTANT -// CHECK-DAG: FUNC_Macro -// CHECK-DAG: FOO +// CHECK: CONSTANT +// CHECK: FOO +// CHECK: FUNC_Macro // CHECK-NEXT: === //--- named_module.cppm diff --git a/clang/test/OpenMP/amdgcn_parallel_num_threads_strict_messages.cpp b/clang/test/OpenMP/amdgcn_parallel_num_threads_strict_messages.cpp new file mode 100644 index 0000000000000..513754b0bbad9 --- /dev/null +++ b/clang/test/OpenMP/amdgcn_parallel_num_threads_strict_messages.cpp @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -DF1 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF1 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF2 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF2 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF3 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF3 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null + +#ifndef TARGET +// expected-no-diagnostics +#endif + +#ifdef F3 +template +tx ftemplate(int n) { + tx a = 0; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: tx(20)) severity(fatal) message("msg") + { + } + + short b = 1; +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: b) severity(warning) message("msg") + { + a += b; + } + + return a; +} +#endif + +#ifdef F2 +static +int fstatic(int n) { + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: n) message("msg") + { + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: 32+n) severity(warning) + { + } + + return n+1; +} +#endif + +#ifdef F1 +struct S1 { + double a; + + int r1(int n){ + int b = 1; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: n-b) severity(warning) message("msg") + { + this->a = (double)b + 1.5; + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: 1024) severity(fatal) + { + this->a = 2.5; + } + + return (int)a; + } +}; +#endif + +int bar(int n){ + int a = 0; + +#ifdef F1 + #pragma omp target + { + S1 S; + a += S.r1(n); + } +#endif + +#ifdef F2 + a += fstatic(n); +#endif + +#ifdef F3 + #pragma omp target + a += ftemplate(n); +#endif + + return a; +} diff --git a/clang/test/OpenMP/amdgcn_save_temps.c b/clang/test/OpenMP/amdgcn_save_temps.c new file mode 100644 index 0000000000000..d838bb1166b6b --- /dev/null +++ b/clang/test/OpenMP/amdgcn_save_temps.c @@ -0,0 +1,23 @@ + +// REQUIRES: amdgpu-registered-target + +// RUN: %clang_cc1 -E -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd %s -o %t-openmp-amdgcn-amd-amdhsa-gfx90a.i +// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd -emit-llvm-bc %s -o %t-x86_64-unknown-unknown.bc +// RUN: %clang_cc1 -fopenmp -x c -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -save-temps=cwd -emit-llvm -fopenmp-is-target-device -x cpp-output %t-openmp-amdgcn-amd-amdhsa-gfx90a.i -fopenmp-host-ir-file-path %t-x86_64-unknown-unknown.bc -o - | FileCheck %s +// expected-no-diagnostics +#ifndef HEADER +#define HEADER + +#define N 1000 + +int test_amdgcn_save_temps() { + int arr[N]; +#pragma omp target + for (int i = 0; i < N; i++) { + arr[i] = 1; + } + return arr[0]; +} +#endif + +// CHECK: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_amdgcn_save_temps diff --git a/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp index 04dd9c0e9c69f..bf979d79fc61b 100644 --- a/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp +++ b/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp @@ -7,14 +7,6 @@ // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 - -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" - // RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5 @@ -31,14 +23,6 @@ // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11 -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11 - -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" - // RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK13 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13 @@ -67,20 +51,12 @@ template int tmain() { #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(strict: C) severity(warning) message("msg") -#else #pragma omp distribute parallel for num_threads(C) -#endif for (int i = 0; i < 100; i++) foo(); #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(T(23)) severity(fatal) message("msg1") -#else #pragma omp distribute parallel for num_threads(T(23)) -#endif for (int i = 0; i < 100; i++) foo(); return 0; @@ -91,22 +67,14 @@ int main() { char a = s; #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(2) severity(warning) message("msg2") -#else #pragma omp distribute parallel for num_threads(2) -#endif for (int i = 0; i < 100; i++) { foo(); } #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(a) severity(fatal) message("msg3") -#else #pragma omp distribute parallel for num_threads(a) -#endif for (int i = 0; i < 100; i++) { foo(); } @@ -125,6264 +93,6156 @@ int main() { #endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP41]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_Z8mayThrowv() -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK1-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR6]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK1: invoke.cont2: -// CHECK1-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@main -// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK3: lpad: -// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: cleanup -// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK3-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK3-NEXT: br label [[EH_RESUME:%.*]] -// CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK3: omp_offload.failed3: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK3: omp_offload.cont4: -// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK3-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont5: -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK3-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont7: -// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK3-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: ret i32 [[TMP41]] -// CHECK3: eh.resume: -// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK3-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_Z8mayThrowv() -// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK3-NEXT: ret i8 [[CONV]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK3-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK3-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK3-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK3: omp_offload.failed3: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK3: omp_offload.cont4: -// CHECK3-NEXT: ret i32 0 -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK3-SAME: () #[[ATTR6]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK3: omp_offload.failed3: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK3: omp_offload.cont4: -// CHECK3-NEXT: ret i32 0 -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK3: invoke.cont2: -// CHECK3-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@main -// CHECK5-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK5-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK5-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK5-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK5: omp_offload.failed: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK5: lpad: -// CHECK5-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: cleanup -// CHECK5-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK5-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK5-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK5-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK5-NEXT: br label [[EH_RESUME:%.*]] -// CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK5-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK5: omp_offload.failed3: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK5: omp_offload.cont4: -// CHECK5-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK5-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK5-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK5: invoke.cont5: -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK5-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK5-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK5: invoke.cont7: -// CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK5-NEXT: ret i32 [[TMP41]] -// CHECK5: eh.resume: -// CHECK5-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK5-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK5-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK5-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK5-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: call void @_Z8mayThrowv() -// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK5-NEXT: ret i8 [[CONV]] -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK5-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK5-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK5-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK5-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK5-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK5: omp_offload.failed: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK5: omp_offload.failed3: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK5: omp_offload.cont4: -// CHECK5-NEXT: ret i32 0 -// -// -// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK5-SAME: () #[[ATTR6]] comdat { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK5: omp_offload.failed: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK5: omp_offload.failed3: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK5: omp_offload.cont4: -// CHECK5-NEXT: ret i32 0 -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@main +// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: lpad: +// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: cleanup +// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK3-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK3-NEXT: br label [[EH_RESUME:%.*]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK3-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK3: omp_offload.failed3: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK3: omp_offload.cont4: +// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK3-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK3-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK3: invoke.cont5: +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK3-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK3: invoke.cont7: +// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK3-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK3-NEXT: ret i32 [[TMP41]] +// CHECK3: eh.resume: +// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK3-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: call void @_Z8mayThrowv() +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK3-NEXT: ret i8 [[CONV]] +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 +// CHECK3-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 +// CHECK3-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK3-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK3: omp_offload.failed3: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK3: omp_offload.cont4: +// CHECK3-NEXT: ret i32 0 +// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK3-SAME: () #[[ATTR6]] comdat { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK3: omp_offload.failed3: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK3: omp_offload.cont4: +// CHECK3-NEXT: ret i32 0 +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK3: invoke.cont2: +// CHECK3-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@main +// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11: omp_offload.failed: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK11: lpad: +// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: cleanup +// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK11-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK11-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK11-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[EH_RESUME:%.*]] +// CHECK11: omp_offload.cont: +// CHECK11-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK11-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK11-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK11: omp_offload.failed3: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK11: omp_offload.cont4: +// CHECK11-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK11-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK11-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK11: invoke.cont5: +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK11-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK11-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK11: invoke.cont7: +// CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK11-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP41]] +// CHECK11: eh.resume: +// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK11-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: call void @_Z8mayThrowv() +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK11-NEXT: ret i8 [[CONV]] +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 +// CHECK11-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK11-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 +// CHECK11-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK11-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK11-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11: omp_offload.failed: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK11: omp_offload.cont: +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK11: omp_offload.failed3: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK11: omp_offload.cont4: +// CHECK11-NEXT: ret i32 0 +// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK11-SAME: () #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11: omp_offload.failed: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK11: omp_offload.cont: +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK11: omp_offload.failed3: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK11: omp_offload.cont4: +// CHECK11-NEXT: ret i32 0 +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK11-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK11: invoke.cont2: +// CHECK11-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@main +// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: lpad: +// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: cleanup +// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK1-NEXT: br label [[EH_RESUME:%.*]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK1: omp_offload.failed3: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK1: omp_offload.cont4: +// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK1: invoke.cont5: +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK1: invoke.cont7: +// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK1-NEXT: ret i32 [[TMP41]] +// CHECK1: eh.resume: +// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK1-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK5-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: call void @_Z8mayThrowv() +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK1-NEXT: ret i8 [[CONV]] // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK5-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK5: invoke.cont2: -// CHECK5-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 +// CHECK1-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) +// CHECK1-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK1-NEXT: unreachable // // -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@main -// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: lpad: -// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: cleanup -// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[EH_RESUME:%.*]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK9-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK9-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont5: -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK9-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont7: -// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP41]] -// CHECK9: eh.resume: -// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 +// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_Z8mayThrowv() -// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK9-NEXT: ret i8 [[CONV]] +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK9-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK1: omp_offload.failed3: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK1: omp_offload.cont4: +// CHECK1-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK1-SAME: () #[[ATTR6]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK1: omp_offload.failed3: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK1: omp_offload.cont4: +// CHECK1-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK9-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK9-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK9-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK9-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK9-SAME: () #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK1: invoke.cont2: +// CHECK1-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@main +// CHECK5-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK5-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK5-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK5-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5: omp_offload.failed: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK5: lpad: +// CHECK5-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: cleanup +// CHECK5-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK5-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK5-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK5-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK5-NEXT: br label [[EH_RESUME:%.*]] +// CHECK5: omp_offload.cont: +// CHECK5-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK5-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK5: omp_offload.failed3: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK5: omp_offload.cont4: +// CHECK5-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK5-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK5-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK5: invoke.cont5: +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK5-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK5-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK5: invoke.cont7: +// CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK5-NEXT: ret i32 [[TMP41]] +// CHECK5: eh.resume: +// CHECK5-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK5-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK5-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK5-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK5-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// +// +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK5-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: call void @_Z8mayThrowv() +// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK5-NEXT: ret i8 [[CONV]] // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 +// CHECK5-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK5-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK5-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 +// CHECK5-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) +// CHECK5-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK5-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK5-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5: omp_offload.failed: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK5: omp_offload.cont: +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK5: omp_offload.failed3: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK5: omp_offload.cont4: +// CHECK5-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK9: invoke.cont2: -// CHECK9-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK5-SAME: () #[[ATTR6]] comdat { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5: omp_offload.failed: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK5: omp_offload.cont: +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK5: omp_offload.failed3: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK5: omp_offload.cont4: +// CHECK5-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@main -// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK11: omp_offload.failed: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK11: lpad: -// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: cleanup -// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK11-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK11-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[EH_RESUME:%.*]] -// CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK11-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK11-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK11: omp_offload.failed3: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK11: omp_offload.cont4: -// CHECK11-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK11-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont5: -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK11-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont7: -// CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK11-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP41]] -// CHECK11: eh.resume: -// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK11-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_Z8mayThrowv() -// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK11-NEXT: ret i8 [[CONV]] +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK11-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK11-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK11-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK11-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK5-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK5: invoke.cont2: +// CHECK5-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK11-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@main +// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9: omp_offload.failed: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK9: lpad: +// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: cleanup +// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[EH_RESUME:%.*]] +// CHECK9: omp_offload.cont: +// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK9-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK9: omp_offload.failed3: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK9: omp_offload.cont4: +// CHECK9-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK9-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK9: invoke.cont5: +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK9-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK9: invoke.cont7: +// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP41]] +// CHECK9: eh.resume: +// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK11-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK11: omp_offload.failed: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK11: omp_offload.failed3: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK11: omp_offload.cont4: -// CHECK11-NEXT: ret i32 0 +// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: call void @_Z8mayThrowv() +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK9-NEXT: ret i8 [[CONV]] // // -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK11-SAME: () #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK11: omp_offload.failed: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK11: omp_offload.failed3: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK11: omp_offload.cont4: -// CHECK11-NEXT: ret i32 0 +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 +// CHECK9-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK9-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 +// CHECK9-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK9-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9: omp_offload.failed: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK9: omp_offload.cont: +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK9: omp_offload.failed3: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK9: omp_offload.cont4: +// CHECK9-NEXT: ret i32 0 // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK9-SAME: () #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9: omp_offload.failed: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK9: omp_offload.cont: +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK9: omp_offload.failed3: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK9: omp_offload.cont4: +// CHECK9-NEXT: ret i32 0 // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK11-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK11: invoke.cont2: -// CHECK11-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK9: invoke.cont2: +// CHECK9-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // // CHECK13-LABEL: define {{[^@]+}}@main @@ -6433,11 +6293,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: lpad: // CHECK13-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } @@ -6486,11 +6346,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 // CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) // CHECK13-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 // CHECK13-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] // CHECK13: omp_offload.failed3: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]] // CHECK13: omp_offload.cont4: // CHECK13-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 @@ -6541,14 +6401,14 @@ int main() { // CHECK13-NEXT: ret i8 [[CONV]] // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 // CHECK13-SAME: () #[[ATTR2:[0-9]+]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6594,7 +6454,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -6609,7 +6469,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6695,16 +6555,16 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 // CHECK13-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK13-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6755,7 +6615,7 @@ int main() { // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -6770,7 +6630,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6882,11 +6742,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 @@ -6915,11 +6775,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) // CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] // CHECK13: omp_offload.failed3: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]] // CHECK13: omp_offload.cont4: // CHECK13-NEXT: ret i32 0 @@ -6958,11 +6818,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 @@ -6991,11 +6851,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) // CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] // CHECK13: omp_offload.failed3: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]] // CHECK13: omp_offload.cont4: // CHECK13-NEXT: ret i32 0 @@ -7025,14 +6885,14 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7078,7 +6938,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7093,7 +6953,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7172,14 +7032,14 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7225,7 +7085,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7240,7 +7100,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7319,14 +7179,14 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7372,7 +7232,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7387,7 +7247,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7466,14 +7326,14 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7528,7 +7388,7 @@ int main() { // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7549,7 +7409,7 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp deleted file mode 100644 index 7c4e995890921..0000000000000 --- a/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp +++ /dev/null @@ -1,3541 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 - -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 - -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK9 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9 - -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11 - -// expected-no-diagnostics -#ifndef HEADER -#define HEADER - -typedef __INTPTR_TYPE__ intptr_t; - - -void foo(); - -struct S { - intptr_t a, b, c; - S(intptr_t a) : a(a) {} - operator char() { extern void mayThrow(); mayThrow(); return a; } - ~S() {} -}; - -template -int tmain() { - char str[] = "msg1"; -#pragma omp target -#pragma omp teams -#pragma omp distribute parallel for simd num_threads(strict: C) severity(fatal) message("msg") - for (int i = 0; i < 100; i++) - foo(); -#pragma omp target -#pragma omp teams -#pragma omp distribute parallel for simd num_threads(strict: T(23)) severity(warning) message(str) - for (int i = 0; i < 100; i++) - foo(); - return 0; -} - -int main() { - S s(0); - char a = s; - const char *str = "msg1"; -#pragma omp target -#pragma omp teams -#pragma omp distribute parallel for simd num_threads(strict: 2) severity(warning) message("msg") - for (int i = 0; i < 100; i++) { - foo(); - } -#pragma omp target -#pragma omp teams - -#pragma omp distribute parallel for simd num_threads(strict: a) severity(fatal) message(str) - for (int i = 0; i < 100; i++) { - foo(); - } - return a + tmain() + tmain(); -} - -#endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54() #[[ATTR3:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP29]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP36]], align 8 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP40]], align 4 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP41]], align 4 -// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 -// CHECK1-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP44:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP44]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP45]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_Z8mayThrowv() -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54 -// CHECK1-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 2, i32 1, ptr @.str.1), !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP19]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60 -// CHECK1-SAME: (i64 [[A:%.*]], ptr [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined, ptr [[A_ADDR]], ptr [[STR_ADDR]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]], ptr nonnull align 8 dereferenceable(8) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META24:![0-9]+]] -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]], !align [[META25:![0-9]+]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP10:%.*]] = sext i8 [[TMP9]] to i32 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP3]], i32 [[TMP10]], i32 2, ptr [[TMP11]]), !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined, i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP29]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR6]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.4, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP35]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42 -// CHECK1-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 -// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP41]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP47]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42 -// CHECK1-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP50]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP50]] -// CHECK1: invoke.cont2: -// CHECK1-NEXT: [[TMP8:%.*]] = sext i8 [[CALL]] to i32 -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP8]], i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP17:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP18:%.*]] = extractvalue { ptr, i32 } [[TMP17]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP18]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP53]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@main -// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK3-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]] -// CHECK3: invoke.cont1: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] -// CHECK3: lpad: -// CHECK3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: cleanup -// CHECK3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]] -// CHECK3-NEXT: br label [[EH_RESUME:%.*]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 -// CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] -// CHECK3: omp.inner.for.cond8: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] -// CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]] -// CHECK3: omp.inner.for.body10: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] -// CHECK3-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]] -// CHECK3: invoke.cont13: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] -// CHECK3: omp.body.continue14: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] -// CHECK3: omp.inner.for.inc15: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] -// CHECK3: omp.inner.for.end17: -// CHECK3-NEXT: store i32 100, ptr [[I7]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 -// CHECK3-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont18: -// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]] -// CHECK3-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont21: -// CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]] -// CHECK3-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: ret i32 [[TMP14]] -// CHECK3: eh.resume: -// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK3-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL24]] -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_Z8mayThrowv() -// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK3-NEXT: ret i8 [[CONV]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK3-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat { -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]] -// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK3-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]] -// CHECK3: invoke.cont12: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK3: omp.body.continue13: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK3: omp.inner.for.inc14: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] -// CHECK3: omp.inner.for.end16: -// CHECK3-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK3-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]] -// CHECK3: invoke.cont12: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK3: omp.body.continue13: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK3: omp.inner.for.inc14: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]] -// CHECK3: omp.inner.for.end16: -// CHECK3-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@main -// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK9-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54() #[[ATTR3:[0-9]+]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: lpad: -// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: cleanup -// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[EH_RESUME:%.*]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP29]], align 4 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 2, ptr [[TMP30]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP36]], align 8 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP38]], align 8 -// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP40]], align 4 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP41]], align 4 -// CHECK9-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 -// CHECK9-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: [[TMP44:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP44]] to i32 -// CHECK9-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont5: -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK9-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont7: -// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP45]] -// CHECK9: eh.resume: -// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_Z8mayThrowv() -// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK9-NEXT: ret i8 [[CONV]] -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54 -// CHECK9-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 2, i32 1, ptr @.str.1), !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP19]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK9-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60 -// CHECK9-SAME: (i64 [[A:%.*]], ptr [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined, ptr [[A_ADDR]], ptr [[STR_ADDR]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]], ptr nonnull align 8 dereferenceable(8) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META24:![0-9]+]] -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]], !align [[META25:![0-9]+]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP10:%.*]] = sext i8 [[TMP9]] to i32 -// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP3]], i32 [[TMP10]], i32 2, ptr [[TMP11]]), !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined, i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK9-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP29]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 -// -// -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK9-SAME: () #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP35]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42 -// CHECK9-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 -// CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP41]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP47]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42 -// CHECK9-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP50]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP50]] -// CHECK9: invoke.cont2: -// CHECK9-NEXT: [[TMP8:%.*]] = sext i8 [[CALL]] to i32 -// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP8]], i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP17:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP18:%.*]] = extractvalue { ptr, i32 } [[TMP17]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP18]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP53]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@main -// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK11-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK11-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]] -// CHECK11: invoke.cont1: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] -// CHECK11: lpad: -// CHECK11-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: cleanup -// CHECK11-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0 -// CHECK11-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]] -// CHECK11-NEXT: br label [[EH_RESUME:%.*]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 -// CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] -// CHECK11: omp.inner.for.cond8: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] -// CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]] -// CHECK11: omp.inner.for.body10: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1 -// CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] -// CHECK11-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]] -// CHECK11: invoke.cont13: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] -// CHECK11: omp.body.continue14: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] -// CHECK11: omp.inner.for.inc15: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] -// CHECK11: omp.inner.for.end17: -// CHECK11-NEXT: store i32 100, ptr [[I7]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1 -// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 -// CHECK11-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont18: -// CHECK11-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]] -// CHECK11-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont21: -// CHECK11-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]] -// CHECK11-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP14]] -// CHECK11: eh.resume: -// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK11-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL24]] -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK11-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_Z8mayThrowv() -// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK11-NEXT: ret i8 [[CONV]] -// -// -// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK11-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat { -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]] -// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK11-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK11: omp.inner.for.cond7: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK11: omp.inner.for.body9: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]] -// CHECK11: invoke.cont12: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK11: omp.body.continue13: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK11: omp.inner.for.inc14: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] -// CHECK11: omp.inner.for.end16: -// CHECK11-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK11-NEXT: ret i32 0 -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK11-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK11: omp.inner.for.cond7: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK11: omp.inner.for.body9: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]] -// CHECK11: invoke.cont12: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK11: omp.body.continue13: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK11: omp.inner.for.inc14: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]] -// CHECK11: omp.inner.for.end16: -// CHECK11-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK11-NEXT: ret i32 0 -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]] -// CHECK11-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK11-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: ret void -// diff --git a/clang/test/OpenMP/for_reduction_codegen.cpp b/clang/test/OpenMP/for_reduction_codegen.cpp index 83632db238484..cb4bcc99c3ff3 100644 --- a/clang/test/OpenMP/for_reduction_codegen.cpp +++ b/clang/test/OpenMP/for_reduction_codegen.cpp @@ -27,7 +27,6 @@ struct S { ~S() {} }; - template T tmain() { T t; @@ -60,6 +59,15 @@ T tmain() { } extern S **foo(); +int g_arr[10]; + +void reductionArrayElement() { +#pragma omp parallel +#pragma omp for reduction(+:g_arr[1]) + for (int i = 0; i < 10; i++) { + g_arr[1] += i; + } +} int main() { #ifdef LAMBDA @@ -164,6 +172,7 @@ int main() { #pragma omp for reduction(& : var3) for (int i = 0; i < 10; ++i) ; + reductionArrayElement(); return tmain(); #endif } @@ -535,6 +544,26 @@ int main() { //. // CHECK4: @.gomp_critical_user_.reduction.var = common global [8 x i32] zeroinitializer, align 8 //. + +// CHECK1-LABEL: define {{.*}}reductionArrayElement{{.*}}.omp_outlined{{.*}} +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1: [[G_ARR:%.*]] = alloca i32, align 4 +// CHECK1: [[TMP0:%.*]] = sdiv exact i64 sub (i64 ptrtoint (ptr @g_arr to i64){{.*}} +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[G_ARR:%.*]], i64 [[TMP0]] +// CHECK1: omp.inner.for.body: +// CHECK1: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP1]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP11]],{{.+}} +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4 +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void {{.*}}__kmpc_for_static_fini{{.+}} +// CHECK1: {{.*}}call i32 {{.*}}__kmpc_reduce{{.+}} +// CHECK1: omp.reduction.default: +// CHECK1-NEXT: call void @__kmpc_barrier{{.+}} +// CHECK1-NEXT: ret void +// + // CHECK1-LABEL: define {{[^@]+}}@main // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { // CHECK1-NEXT: entry: @@ -614,6 +643,7 @@ int main() { // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 1, ptr @main.omp_outlined.11, ptr [[TMP7]]) // CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[VAR3]], align 8 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 1, ptr @main.omp_outlined.12, ptr [[TMP8]]) +// CHECK1-NEXT: call void {{.*}}reductionArrayElement{{.*}} // CHECK1-NEXT: [[CALL10:%.*]] = call noundef i32 @_Z5tmainIiLi42EET_v() // CHECK1-NEXT: store i32 [[CALL10]], ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 diff --git a/clang/test/OpenMP/fuse_ast_print.cpp b/clang/test/OpenMP/fuse_ast_print.cpp new file mode 100644 index 0000000000000..283f5883c907d --- /dev/null +++ b/clang/test/OpenMP/fuse_ast_print.cpp @@ -0,0 +1,397 @@ +// Check no warnings/errors +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -fsyntax-only -verify %s +// expected-no-diagnostics + +// Check AST and unparsing +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -ast-dump %s | FileCheck %s --check-prefix=DUMP +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -ast-print %s | FileCheck %s --check-prefix=PRINT + +// Check same results after serialization round-trip +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -emit-pch -o %t %s +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -include-pch %t -ast-dump-all %s | FileCheck %s --check-prefix=DUMP +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -include-pch %t -ast-print %s | FileCheck %s --check-prefix=PRINT + +#ifndef HEADER +#define HEADER + +// placeholder for loop body code +extern "C" void body(...); + +// PRINT-LABEL: void foo1( +// DUMP-LABEL: FunctionDecl {{.*}} foo1 +void foo1() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + + } + +} + +// PRINT-LABEL: void foo2( +// DUMP-LABEL: FunctionDecl {{.*}} foo2 +void foo2() { + // PRINT: #pragma omp unroll partial(4) + // DUMP: OMPUnrollDirective + // DUMP-NEXT: OMPPartialClause + // DUMP-NEXT: ConstantExpr + // DUMP-NEXT: value: Int 4 + // DUMP-NEXT: IntegerLiteral {{.*}} 4 + #pragma omp unroll partial(4) + // PRINT: #pragma omp fuse + // DUMP-NEXT: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + } + +} + +//PRINT-LABEL: void foo3( +//DUMP-LABEL: FunctionTemplateDecl {{.*}} foo3 +template +void foo3() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: #pragma omp unroll partial(Factor1) + // DUMP: OMPUnrollDirective + #pragma omp unroll partial(Factor1) + // PRINT: for (int i = 0; i < 12; i += 1) + // DUMP: ForStmt + for (int i = 0; i < 12; i += 1) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: #pragma omp unroll partial(Factor2) + // DUMP: OMPUnrollDirective + #pragma omp unroll partial(Factor2) + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + + } +} + +// Also test instantiating the template. +void tfoo3() { + foo3<4,2>(); +} + +//PRINT-LABEL: void foo4( +//DUMP-LABEL: FunctionTemplateDecl {{.*}} foo4 +template +void foo4(int start, int end) { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (T i = start; i < end; i += Step) + // DUMP: ForStmt + for (T i = start; i < end; i += Step) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + + // PRINT: for (T j = end; j > start; j -= Step) + // DUMP: ForStmt + for (T j = end; j > start; j -= Step) { + // PRINT: body(j) + // DUMP: CallExpr + body(j); + } + + } +} + +// Also test instantiating the template. +void tfoo4() { + foo4(0, 64); +} + + + +// PRINT-LABEL: void foo5( +// DUMP-LABEL: FunctionDecl {{.*}} foo5 +void foo5() { + double arr[128], arr2[128]; + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT-NEXT: for (auto &&a : arr) + // DUMP-NEXT: CXXForRangeStmt + for (auto &&a: arr) + // PRINT: body(a) + // DUMP: CallExpr + body(a); + // PRINT: for (double v = 42; auto &&b : arr) + // DUMP: CXXForRangeStmt + for (double v = 42; auto &&b: arr) + // PRINT: body(b, v); + // DUMP: CallExpr + body(b, v); + // PRINT: for (auto &&c : arr2) + // DUMP: CXXForRangeStmt + for (auto &&c: arr2) + // PRINT: body(c) + // DUMP: CallExpr + body(c); + + } + +} + +// PRINT-LABEL: void foo6( +// DUMP-LABEL: FunctionDecl {{.*}} foo6 +void foo6() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i <= 10; ++i) + // DUMP: ForStmt + for (int i = 0; i <= 10; ++i) + body(i); + // PRINT: for (int j = 0; j < 100; ++j) + // DUMP: ForStmt + for(int j = 0; j < 100; ++j) + body(j); + } + // PRINT: #pragma omp unroll partial(4) + // DUMP: OMPUnrollDirective + #pragma omp unroll partial(4) + // PRINT: for (int k = 0; k < 250; ++k) + // DUMP: ForStmt + for (int k = 0; k < 250; ++k) + body(k); + } +} + +// PRINT-LABEL: void foo7( +// DUMP-LABEL: FunctionDecl {{.*}} foo7 +void foo7() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + } + } + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + } + } + } + } + +} + +// PRINT-LABEL: void foo8( +// DUMP-LABEL: FunctionDecl {{.*}} foo8 +void foo8() { + // PRINT: #pragma omp fuse looprange(2,2) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(2,2) + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + + } + +} + +//PRINT-LABEL: void foo9( +//DUMP-LABEL: FunctionTemplateDecl {{.*}} foo9 +//DUMP-LABEL: NonTypeTemplateParmDecl {{.*}} F +//DUMP-LABEL: NonTypeTemplateParmDecl {{.*}} C +template +void foo9() { + // PRINT: #pragma omp fuse looprange(F,C) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(F,C) + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + + } +} + +// Also test instantiating the template. +void tfoo9() { + foo9<1, 2>(); +} + +// PRINT-LABEL: void foo10( +// DUMP-LABEL: FunctionDecl {{.*}} foo10 +void foo10() { + // PRINT: #pragma omp fuse looprange(2,2) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(2,2) + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int ii = 0; ii < 10; ii += 2) + // DUMP: ForStmt + for (int ii = 0; ii < 10; ii += 2) + // PRINT: body(ii) + // DUMP: CallExpr + body(ii); + // PRINT: #pragma omp fuse looprange(2,2) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(2,2) + { + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + // PRINT: for (int jj = 10; jj > 0; --jj) + // DUMP: ForStmt + for (int jj = 10; jj > 0; --jj) + // PRINT: body(jj) + // DUMP: CallExpr + body(jj); + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + // PRINT: for (int kk = 0; kk <= 10; ++kk) + // DUMP: ForStmt + for (int kk = 0; kk <= 10; ++kk) + // PRINT: body(kk) + // DUMP: CallExpr + body(kk); + } + } + +} + +#endif diff --git a/clang/test/OpenMP/fuse_codegen.cpp b/clang/test/OpenMP/fuse_codegen.cpp new file mode 100644 index 0000000000000..742c280ed0172 --- /dev/null +++ b/clang/test/OpenMP/fuse_codegen.cpp @@ -0,0 +1,2328 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 5 +// expected-no-diagnostics + +// Check code generation +// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 + +// Check same results after serialization round-trip +// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -emit-pch -o %t %s +// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2 + +#ifndef HEADER +#define HEADER + +//placeholder for loop body code. +extern "C" void body(...) {} + +extern "C" void foo1(int start1, int end1, int step1, int start2, int end2, int step2) { + int i,j; + #pragma omp fuse + { + for(i = start1; i < end1; i += step1) body(i); + for(j = start2; j < end2; j += step2) body(j); + } + +} + +template +void foo2(T start, T end, T step){ + T i,j,k; + #pragma omp fuse + { + for(i = start; i < end; i += step) body(i); + for(j = end; j > start; j -= step) body(j); + for(k = start+step; k < end+step; k += step) body(k); + } +} + +extern "C" void tfoo2() { + foo2(0, 64, 4); +} + +extern "C" void foo3() { + double arr[256]; + #pragma omp fuse + { + #pragma omp fuse + { + for(int i = 0; i < 128; ++i) body(i); + for(int j = 0; j < 256; j+=2) body(j); + } + for(int c = 42; auto &&v: arr) body(c,v); + for(int cc = 37; auto &&vv: arr) body(cc, vv); + } +} + +extern "C" void foo4() { + double arr[256]; + + #pragma omp fuse looprange(2,2) + { + for(int i = 0; i < 128; ++i) body(i); + for(int j = 0; j < 256; j+=2) body(j); + for(int k = 0; k < 64; ++k) body(k); + for(int c = 42; auto &&v: arr) body(c,v); + } +} + +// This exemplifies the usage of loop transformations that generate +// more than top level canonical loop nests (e.g split, loopranged fuse...) +extern "C" void foo5() { + double arr[256]; + #pragma omp fuse looprange(2,2) + { + #pragma omp fuse looprange(2,2) + { + for(int i = 0; i < 128; ++i) body(i); + for(int j = 0; j < 256; j+=2) body(j); + for(int k = 0; k < 512; ++k) body(k); + } + for(int c = 42; auto &&v: arr) body(c,v); + for(int cc = 37; auto &&vv: arr) body(cc, vv); + } +} + + +#endif +// CHECK1-LABEL: define dso_local void @body( +// CHECK1-SAME: ...) #[[ATTR0:[0-9]+]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo1( +// CHECK1-SAME: i32 noundef [[START1:%.*]], i32 noundef [[END1:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[START2:%.*]], i32 noundef [[END2:%.*]], i32 noundef [[STEP2:%.*]]) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[START1_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[END1_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[STEP1_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[START2_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[END2_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[STEP2_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store i32 [[START1]], ptr [[START1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[END1]], ptr [[END1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[STEP1]], ptr [[STEP1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[START2]], ptr [[START2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[END2]], ptr [[END2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[STEP2]], ptr [[STEP2_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[END1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[END2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK1-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK1-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK1-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP19]], [[TMP20]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP21]], %[[COND_TRUE]] ], [ [[TMP22]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: [[CMP16:%.*]] = icmp ult i32 [[TMP23]], [[TMP24]] +// CHECK1-NEXT: br i1 [[CMP16]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP17:%.*]] = icmp ult i32 [[TMP25]], [[TMP26]] +// CHECK1-NEXT: br i1 [[CMP17]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[ADD18:%.*]] = add i32 [[TMP27]], [[MUL]] +// CHECK1-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[MUL19:%.*]] = mul i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[TMP30]], [[MUL19]] +// CHECK1-NEXT: store i32 [[ADD20]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP33]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP21:%.*]] = icmp ult i32 [[TMP34]], [[TMP35]] +// CHECK1-NEXT: br i1 [[CMP21]], label %[[IF_THEN22:.*]], label %[[IF_END27:.*]] +// CHECK1: [[IF_THEN22]]: +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL23:%.*]] = mul i32 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[ADD24:%.*]] = add i32 [[TMP36]], [[MUL23]] +// CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[MUL25:%.*]] = mul i32 [[TMP40]], [[TMP41]] +// CHECK1-NEXT: [[ADD26:%.*]] = add i32 [[TMP39]], [[MUL25]] +// CHECK1-NEXT: store i32 [[ADD26]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP42]]) +// CHECK1-NEXT: br label %[[IF_END27]] +// CHECK1: [[IF_END27]]: +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP43]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @tfoo2( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: call void @_Z4foo2IiEvT_S0_S0_(i32 noundef 0, i32 noundef 64, i32 noundef 4) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define linkonce_odr void @_Z4foo2IiEvT_S0_S0_( +// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] comdat { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP21:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK1-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK1-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK1-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK1-NEXT: store i32 [[ADD16]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: store i32 [[ADD18]], ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] +// CHECK1-NEXT: store i32 [[ADD20]], ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP24]], ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[SUB23:%.*]] = sub i32 [[TMP25]], [[TMP26]] +// CHECK1-NEXT: [[SUB24:%.*]] = sub i32 [[SUB23]], 1 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[ADD25:%.*]] = add i32 [[SUB24]], [[TMP27]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[DIV26:%.*]] = udiv i32 [[ADD25]], [[TMP28]] +// CHECK1-NEXT: [[SUB27:%.*]] = sub i32 [[DIV26]], 1 +// CHECK1-NEXT: store i32 [[SUB27]], ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB2]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST2]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK1-NEXT: [[ADD28:%.*]] = add i32 [[TMP29]], 1 +// CHECK1-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP33]], %[[COND_TRUE]] ], [ [[TMP34]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: [[CMP29:%.*]] = icmp ugt i32 [[TMP35]], [[TMP36]] +// CHECK1-NEXT: br i1 [[CMP29]], label %[[COND_TRUE30:.*]], label %[[COND_FALSE31:.*]] +// CHECK1: [[COND_TRUE30]]: +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK1-NEXT: br label %[[COND_END32:.*]] +// CHECK1: [[COND_FALSE31]]: +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: br label %[[COND_END32]] +// CHECK1: [[COND_END32]]: +// CHECK1-NEXT: [[COND33:%.*]] = phi i32 [ [[TMP37]], %[[COND_TRUE30]] ], [ [[TMP38]], %[[COND_FALSE31]] ] +// CHECK1-NEXT: store i32 [[COND33]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: br i1 [[CMP34]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP35:%.*]] = icmp ult i32 [[TMP41]], [[TMP42]] +// CHECK1-NEXT: br i1 [[CMP35]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP44]], [[TMP45]] +// CHECK1-NEXT: [[ADD36:%.*]] = add i32 [[TMP43]], [[MUL]] +// CHECK1-NEXT: store i32 [[ADD36]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[MUL37:%.*]] = mul i32 [[TMP47]], [[TMP48]] +// CHECK1-NEXT: [[ADD38:%.*]] = add i32 [[TMP46]], [[MUL37]] +// CHECK1-NEXT: store i32 [[ADD38]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP49]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP39:%.*]] = icmp ult i32 [[TMP50]], [[TMP51]] +// CHECK1-NEXT: br i1 [[CMP39]], label %[[IF_THEN40:.*]], label %[[IF_END45:.*]] +// CHECK1: [[IF_THEN40]]: +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL41:%.*]] = mul i32 [[TMP53]], [[TMP54]] +// CHECK1-NEXT: [[ADD42:%.*]] = add i32 [[TMP52]], [[MUL41]] +// CHECK1-NEXT: store i32 [[ADD42]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[MUL43:%.*]] = mul i32 [[TMP56]], [[TMP57]] +// CHECK1-NEXT: [[SUB44:%.*]] = sub i32 [[TMP55]], [[MUL43]] +// CHECK1-NEXT: store i32 [[SUB44]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP58]]) +// CHECK1-NEXT: br label %[[IF_END45]] +// CHECK1: [[IF_END45]]: +// CHECK1-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: [[CMP46:%.*]] = icmp ult i32 [[TMP59]], [[TMP60]] +// CHECK1-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK1: [[IF_THEN47]]: +// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTOMP_LB2]], align 4 +// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTOMP_ST2]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL48:%.*]] = mul i32 [[TMP62]], [[TMP63]] +// CHECK1-NEXT: [[ADD49:%.*]] = add i32 [[TMP61]], [[MUL48]] +// CHECK1-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV2]], align 4 +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTOMP_IV2]], align 4 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[MUL50:%.*]] = mul i32 [[TMP65]], [[TMP66]] +// CHECK1-NEXT: [[ADD51:%.*]] = add i32 [[TMP64]], [[MUL50]] +// CHECK1-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[K]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP67]]) +// CHECK1-NEXT: br label %[[IF_END52]] +// CHECK1: [[IF_END52]]: +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP68]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo3( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE221:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END222:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN225:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_29:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_30:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_ST2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_NI2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_TEMP_140:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX46:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX52:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK1-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK1-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK1-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK1-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK1-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY23:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP15]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR24:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY23]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR24]], ptr [[__END222]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY26:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP16]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY26]], ptr [[__BEGIN225]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY28:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP17]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY28]], ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__END222]], align 8 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK1-NEXT: [[SUB_PTR_LHS_CAST31:%.*]] = ptrtoint ptr [[TMP19]] to i64 +// CHECK1-NEXT: [[SUB_PTR_RHS_CAST32:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[SUB_PTR_SUB33:%.*]] = sub i64 [[SUB_PTR_LHS_CAST31]], [[SUB_PTR_RHS_CAST32]] +// CHECK1-NEXT: [[SUB_PTR_DIV34:%.*]] = sdiv exact i64 [[SUB_PTR_SUB33]], 8 +// CHECK1-NEXT: [[SUB35:%.*]] = sub nsw i64 [[SUB_PTR_DIV34]], 1 +// CHECK1-NEXT: [[ADD36:%.*]] = add nsw i64 [[SUB35]], 1 +// CHECK1-NEXT: [[DIV37:%.*]] = sdiv i64 [[ADD36]], 1 +// CHECK1-NEXT: [[SUB38:%.*]] = sub nsw i64 [[DIV37]], 1 +// CHECK1-NEXT: store i64 [[SUB38]], ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB2]], align 8 +// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_ST2]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK1-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP21]], 1 +// CHECK1-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i64 [[TMP22]], ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP41:%.*]] = icmp sgt i64 [[TMP23]], [[TMP24]] +// CHECK1-NEXT: br i1 [[CMP41]], label %[[COND_TRUE42:.*]], label %[[COND_FALSE43:.*]] +// CHECK1: [[COND_TRUE42]]: +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK1-NEXT: br label %[[COND_END44:.*]] +// CHECK1: [[COND_FALSE43]]: +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: br label %[[COND_END44]] +// CHECK1: [[COND_END44]]: +// CHECK1-NEXT: [[COND45:%.*]] = phi i64 [ [[TMP25]], %[[COND_TRUE42]] ], [ [[TMP26]], %[[COND_FALSE43]] ] +// CHECK1-NEXT: store i64 [[COND45]], ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: [[CMP47:%.*]] = icmp sgt i64 [[TMP27]], [[TMP28]] +// CHECK1-NEXT: br i1 [[CMP47]], label %[[COND_TRUE48:.*]], label %[[COND_FALSE49:.*]] +// CHECK1: [[COND_TRUE48]]: +// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK1-NEXT: br label %[[COND_END50:.*]] +// CHECK1: [[COND_FALSE49]]: +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: br label %[[COND_END50]] +// CHECK1: [[COND_END50]]: +// CHECK1-NEXT: [[COND51:%.*]] = phi i64 [ [[TMP29]], %[[COND_TRUE48]] ], [ [[TMP30]], %[[COND_FALSE49]] ] +// CHECK1-NEXT: store i64 [[COND51]], ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK1-NEXT: [[CMP53:%.*]] = icmp slt i64 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: br i1 [[CMP53]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP33]], [[TMP34]] +// CHECK1-NEXT: br i1 [[CMP54]], label %[[IF_THEN:.*]], label %[[IF_END74:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: [[CONV55:%.*]] = sext i32 [[TMP35]] to i64 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[CONV56:%.*]] = sext i32 [[TMP36]] to i64 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV56]], [[TMP37]] +// CHECK1-NEXT: [[ADD57:%.*]] = add nsw i64 [[CONV55]], [[MUL]] +// CHECK1-NEXT: [[CONV58:%.*]] = trunc i64 [[ADD57]] to i32 +// CHECK1-NEXT: store i32 [[CONV58]], ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP38]], 1 +// CHECK1-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]] +// CHECK1-NEXT: store i32 [[ADD60]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP61:%.*]] = icmp slt i32 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: br i1 [[CMP61]], label %[[IF_THEN62:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN62]]: +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL63:%.*]] = mul nsw i32 [[TMP42]], [[TMP43]] +// CHECK1-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP41]], [[MUL63]] +// CHECK1-NEXT: store i32 [[ADD64]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[MUL65:%.*]] = mul nsw i32 [[TMP44]], 1 +// CHECK1-NEXT: [[ADD66:%.*]] = add nsw i32 0, [[MUL65]] +// CHECK1-NEXT: store i32 [[ADD66]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP45]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP67:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] +// CHECK1-NEXT: br i1 [[CMP67]], label %[[IF_THEN68:.*]], label %[[IF_END73:.*]] +// CHECK1: [[IF_THEN68]]: +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL69:%.*]] = mul nsw i32 [[TMP49]], [[TMP50]] +// CHECK1-NEXT: [[ADD70:%.*]] = add nsw i32 [[TMP48]], [[MUL69]] +// CHECK1-NEXT: store i32 [[ADD70]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[MUL71:%.*]] = mul nsw i32 [[TMP51]], 2 +// CHECK1-NEXT: [[ADD72:%.*]] = add nsw i32 0, [[MUL71]] +// CHECK1-NEXT: store i32 [[ADD72]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP52]]) +// CHECK1-NEXT: br label %[[IF_END73]] +// CHECK1: [[IF_END73]]: +// CHECK1-NEXT: br label %[[IF_END74]] +// CHECK1: [[IF_END74]]: +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP75:%.*]] = icmp slt i64 [[TMP53]], [[TMP54]] +// CHECK1-NEXT: br i1 [[CMP75]], label %[[IF_THEN76:.*]], label %[[IF_END81:.*]] +// CHECK1: [[IF_THEN76]]: +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[MUL77:%.*]] = mul nsw i64 [[TMP56]], [[TMP57]] +// CHECK1-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP55]], [[MUL77]] +// CHECK1-NEXT: store i64 [[ADD78]], ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[MUL79:%.*]] = mul nsw i64 [[TMP59]], 1 +// CHECK1-NEXT: [[ADD_PTR80:%.*]] = getelementptr inbounds double, ptr [[TMP58]], i64 [[MUL79]] +// CHECK1-NEXT: store ptr [[ADD_PTR80]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: store ptr [[TMP60]], ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[C]], align 4 +// CHECK1-NEXT: [[TMP62:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = load double, ptr [[TMP62]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP61]], double noundef [[TMP63]]) +// CHECK1-NEXT: br label %[[IF_END81]] +// CHECK1: [[IF_END81]]: +// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: [[CMP82:%.*]] = icmp slt i64 [[TMP64]], [[TMP65]] +// CHECK1-NEXT: br i1 [[CMP82]], label %[[IF_THEN83:.*]], label %[[IF_END88:.*]] +// CHECK1: [[IF_THEN83]]: +// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_LB2]], align 8 +// CHECK1-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTOMP_ST2]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[MUL84:%.*]] = mul nsw i64 [[TMP67]], [[TMP68]] +// CHECK1-NEXT: [[ADD85:%.*]] = add nsw i64 [[TMP66]], [[MUL84]] +// CHECK1-NEXT: store i64 [[ADD85]], ptr [[DOTOMP_IV2]], align 8 +// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK1-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTOMP_IV2]], align 8 +// CHECK1-NEXT: [[MUL86:%.*]] = mul nsw i64 [[TMP70]], 1 +// CHECK1-NEXT: [[ADD_PTR87:%.*]] = getelementptr inbounds double, ptr [[TMP69]], i64 [[MUL86]] +// CHECK1-NEXT: store ptr [[ADD_PTR87]], ptr [[__BEGIN225]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[__BEGIN225]], align 8 +// CHECK1-NEXT: store ptr [[TMP71]], ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load double, ptr [[TMP73]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP72]], double noundef [[TMP74]]) +// CHECK1-NEXT: br label %[[IF_END88]] +// CHECK1: [[IF_END88]]: +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i64 [[TMP75]], 1 +// CHECK1-NEXT: store i64 [[INC]], ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo4( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: store i32 64, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP5]], 128 +// CHECK1-NEXT: br i1 [[CMP1]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP6]]) +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND2:.*]] +// CHECK1: [[FOR_COND2]]: +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: br i1 [[CMP3]], label %[[FOR_BODY4:.*]], label %[[FOR_END17:.*]] +// CHECK1: [[FOR_BODY4]]: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] +// CHECK1-NEXT: br i1 [[CMP5]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], [[TMP14]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 2 +// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] +// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP16]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP17]], [[TMP18]] +// CHECK1-NEXT: br i1 [[CMP8]], label %[[IF_THEN9:.*]], label %[[IF_END14:.*]] +// CHECK1: [[IF_THEN9]]: +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP19]], [[MUL10]] +// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP22]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] +// CHECK1-NEXT: store i32 [[ADD13]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[K]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP23]]) +// CHECK1-NEXT: br label %[[IF_END14]] +// CHECK1: [[IF_END14]]: +// CHECK1-NEXT: br label %[[FOR_INC15:.*]] +// CHECK1: [[FOR_INC15]]: +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[INC16:%.*]] = add nsw i32 [[TMP24]], 1 +// CHECK1-NEXT: store i32 [[INC16]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND2]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK1: [[FOR_END17]]: +// CHECK1-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP25]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY18:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP26]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY18]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND19:.*]] +// CHECK1: [[FOR_COND19]]: +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK1-NEXT: [[CMP20:%.*]] = icmp ne ptr [[TMP27]], [[TMP28]] +// CHECK1-NEXT: br i1 [[CMP20]], label %[[FOR_BODY21:.*]], label %[[FOR_END23:.*]] +// CHECK1: [[FOR_BODY21]]: +// CHECK1-NEXT: [[TMP29:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: store ptr [[TMP29]], ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[C]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load double, ptr [[TMP31]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP30]], double noundef [[TMP32]]) +// CHECK1-NEXT: br label %[[FOR_INC22:.*]] +// CHECK1: [[FOR_INC22]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP33]], i32 1 +// CHECK1-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND19]] +// CHECK1: [[FOR_END23]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo5( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_TEMP_121:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX22:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX29:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE264:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN265:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END267:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: store i32 512, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK1-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK1-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK1-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK1-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK1-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP23:%.*]] = icmp sgt i64 [[TMP16]], [[TMP17]] +// CHECK1-NEXT: br i1 [[CMP23]], label %[[COND_TRUE24:.*]], label %[[COND_FALSE25:.*]] +// CHECK1: [[COND_TRUE24]]: +// CHECK1-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK1-NEXT: br label %[[COND_END26:.*]] +// CHECK1: [[COND_FALSE25]]: +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: br label %[[COND_END26]] +// CHECK1: [[COND_END26]]: +// CHECK1-NEXT: [[COND27:%.*]] = phi i64 [ [[TMP18]], %[[COND_TRUE24]] ], [ [[TMP19]], %[[COND_FALSE25]] ] +// CHECK1-NEXT: store i64 [[COND27]], ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP20]], 128 +// CHECK1-NEXT: br i1 [[CMP28]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP21]]) +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND30:.*]] +// CHECK1: [[FOR_COND30]]: +// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK1-NEXT: [[CMP31:%.*]] = icmp slt i64 [[TMP23]], [[TMP24]] +// CHECK1-NEXT: br i1 [[CMP31]], label %[[FOR_BODY32:.*]], label %[[FOR_END63:.*]] +// CHECK1: [[FOR_BODY32]]: +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: [[CMP33:%.*]] = icmp slt i64 [[TMP25]], [[TMP26]] +// CHECK1-NEXT: br i1 [[CMP33]], label %[[IF_THEN:.*]], label %[[IF_END53:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[CONV35:%.*]] = sext i32 [[TMP28]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV35]], [[TMP29]] +// CHECK1-NEXT: [[ADD36:%.*]] = add nsw i64 [[CONV34]], [[MUL]] +// CHECK1-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 +// CHECK1-NEXT: store i32 [[CONV37]], ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[MUL38:%.*]] = mul nsw i32 [[TMP30]], 1 +// CHECK1-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] +// CHECK1-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP40:%.*]] = icmp slt i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: br i1 [[CMP40]], label %[[IF_THEN41:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN41]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL42:%.*]] = mul nsw i32 [[TMP34]], [[TMP35]] +// CHECK1-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP33]], [[MUL42]] +// CHECK1-NEXT: store i32 [[ADD43]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP36]], 2 +// CHECK1-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] +// CHECK1-NEXT: store i32 [[ADD45]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP37]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP46:%.*]] = icmp slt i32 [[TMP38]], [[TMP39]] +// CHECK1-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK1: [[IF_THEN47]]: +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL48:%.*]] = mul nsw i32 [[TMP41]], [[TMP42]] +// CHECK1-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP40]], [[MUL48]] +// CHECK1-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[MUL50:%.*]] = mul nsw i32 [[TMP43]], 1 +// CHECK1-NEXT: [[ADD51:%.*]] = add nsw i32 0, [[MUL50]] +// CHECK1-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[K]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP44]]) +// CHECK1-NEXT: br label %[[IF_END52]] +// CHECK1: [[IF_END52]]: +// CHECK1-NEXT: br label %[[IF_END53]] +// CHECK1: [[IF_END53]]: +// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP45]], [[TMP46]] +// CHECK1-NEXT: br i1 [[CMP54]], label %[[IF_THEN55:.*]], label %[[IF_END60:.*]] +// CHECK1: [[IF_THEN55]]: +// CHECK1-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[MUL56:%.*]] = mul nsw i64 [[TMP48]], [[TMP49]] +// CHECK1-NEXT: [[ADD57:%.*]] = add nsw i64 [[TMP47]], [[MUL56]] +// CHECK1-NEXT: store i64 [[ADD57]], ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[MUL58:%.*]] = mul nsw i64 [[TMP51]], 1 +// CHECK1-NEXT: [[ADD_PTR59:%.*]] = getelementptr inbounds double, ptr [[TMP50]], i64 [[MUL58]] +// CHECK1-NEXT: store ptr [[ADD_PTR59]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: store ptr [[TMP52]], ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[C]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load double, ptr [[TMP54]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP53]], double noundef [[TMP55]]) +// CHECK1-NEXT: br label %[[IF_END60]] +// CHECK1: [[IF_END60]]: +// CHECK1-NEXT: br label %[[FOR_INC61:.*]] +// CHECK1: [[FOR_INC61]]: +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[INC62:%.*]] = add nsw i64 [[TMP56]], 1 +// CHECK1-NEXT: store i64 [[INC62]], ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND30]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK1: [[FOR_END63]]: +// CHECK1-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE264]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY66:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP57]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY66]], ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY68:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP58]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR69:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY68]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR69]], ptr [[__END267]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND70:.*]] +// CHECK1: [[FOR_COND70]]: +// CHECK1-NEXT: [[TMP59:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__END267]], align 8 +// CHECK1-NEXT: [[CMP71:%.*]] = icmp ne ptr [[TMP59]], [[TMP60]] +// CHECK1-NEXT: br i1 [[CMP71]], label %[[FOR_BODY72:.*]], label %[[FOR_END74:.*]] +// CHECK1: [[FOR_BODY72]]: +// CHECK1-NEXT: [[TMP61:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: store ptr [[TMP61]], ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load double, ptr [[TMP63]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP62]], double noundef [[TMP64]]) +// CHECK1-NEXT: br label %[[FOR_INC73:.*]] +// CHECK1: [[FOR_INC73]]: +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP65]], i32 1 +// CHECK1-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND70]] +// CHECK1: [[FOR_END74]]: +// CHECK1-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @body( +// CHECK2-SAME: ...) #[[ATTR0:[0-9]+]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo1( +// CHECK2-SAME: i32 noundef [[START1:%.*]], i32 noundef [[END1:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[START2:%.*]], i32 noundef [[END2:%.*]], i32 noundef [[STEP2:%.*]]) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[START1_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[END1_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[STEP1_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[START2_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[END2_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[STEP2_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[START1]], ptr [[START1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[END1]], ptr [[END1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[STEP1]], ptr [[STEP1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[START2]], ptr [[START2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[END2]], ptr [[END2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[STEP2]], ptr [[STEP2_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[END1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK2-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[END2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK2-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK2-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK2-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP19]], [[TMP20]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP21]], %[[COND_TRUE]] ], [ [[TMP22]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: [[CMP16:%.*]] = icmp ult i32 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP16]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP17:%.*]] = icmp ult i32 [[TMP25]], [[TMP26]] +// CHECK2-NEXT: br i1 [[CMP17]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP28]], [[TMP29]] +// CHECK2-NEXT: [[ADD18:%.*]] = add i32 [[TMP27]], [[MUL]] +// CHECK2-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[MUL19:%.*]] = mul i32 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: [[ADD20:%.*]] = add i32 [[TMP30]], [[MUL19]] +// CHECK2-NEXT: store i32 [[ADD20]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP33]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP21:%.*]] = icmp ult i32 [[TMP34]], [[TMP35]] +// CHECK2-NEXT: br i1 [[CMP21]], label %[[IF_THEN22:.*]], label %[[IF_END27:.*]] +// CHECK2: [[IF_THEN22]]: +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL23:%.*]] = mul i32 [[TMP37]], [[TMP38]] +// CHECK2-NEXT: [[ADD24:%.*]] = add i32 [[TMP36]], [[MUL23]] +// CHECK2-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[MUL25:%.*]] = mul i32 [[TMP40]], [[TMP41]] +// CHECK2-NEXT: [[ADD26:%.*]] = add i32 [[TMP39]], [[MUL25]] +// CHECK2-NEXT: store i32 [[ADD26]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP42]]) +// CHECK2-NEXT: br label %[[IF_END27]] +// CHECK2: [[IF_END27]]: +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP43]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo3( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE221:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END222:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN225:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_29:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_30:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_LB2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_ST2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_NI2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_TEMP_140:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX46:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX52:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK2-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK2-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK2-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK2-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK2-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK2-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK2-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK2-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY23:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP15]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR24:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY23]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR24]], ptr [[__END222]], align 8 +// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY26:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP16]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY26]], ptr [[__BEGIN225]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY28:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP17]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY28]], ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__END222]], align 8 +// CHECK2-NEXT: store ptr [[TMP18]], ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK2-NEXT: [[SUB_PTR_LHS_CAST31:%.*]] = ptrtoint ptr [[TMP19]] to i64 +// CHECK2-NEXT: [[SUB_PTR_RHS_CAST32:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK2-NEXT: [[SUB_PTR_SUB33:%.*]] = sub i64 [[SUB_PTR_LHS_CAST31]], [[SUB_PTR_RHS_CAST32]] +// CHECK2-NEXT: [[SUB_PTR_DIV34:%.*]] = sdiv exact i64 [[SUB_PTR_SUB33]], 8 +// CHECK2-NEXT: [[SUB35:%.*]] = sub nsw i64 [[SUB_PTR_DIV34]], 1 +// CHECK2-NEXT: [[ADD36:%.*]] = add nsw i64 [[SUB35]], 1 +// CHECK2-NEXT: [[DIV37:%.*]] = sdiv i64 [[ADD36]], 1 +// CHECK2-NEXT: [[SUB38:%.*]] = sub nsw i64 [[DIV37]], 1 +// CHECK2-NEXT: store i64 [[SUB38]], ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB2]], align 8 +// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_ST2]], align 8 +// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK2-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP21]], 1 +// CHECK2-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i64 [[TMP22]], ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK2-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK2-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP41:%.*]] = icmp sgt i64 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP41]], label %[[COND_TRUE42:.*]], label %[[COND_FALSE43:.*]] +// CHECK2: [[COND_TRUE42]]: +// CHECK2-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK2-NEXT: br label %[[COND_END44:.*]] +// CHECK2: [[COND_FALSE43]]: +// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: br label %[[COND_END44]] +// CHECK2: [[COND_END44]]: +// CHECK2-NEXT: [[COND45:%.*]] = phi i64 [ [[TMP25]], %[[COND_TRUE42]] ], [ [[TMP26]], %[[COND_FALSE43]] ] +// CHECK2-NEXT: store i64 [[COND45]], ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK2-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: [[CMP47:%.*]] = icmp sgt i64 [[TMP27]], [[TMP28]] +// CHECK2-NEXT: br i1 [[CMP47]], label %[[COND_TRUE48:.*]], label %[[COND_FALSE49:.*]] +// CHECK2: [[COND_TRUE48]]: +// CHECK2-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK2-NEXT: br label %[[COND_END50:.*]] +// CHECK2: [[COND_FALSE49]]: +// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: br label %[[COND_END50]] +// CHECK2: [[COND_END50]]: +// CHECK2-NEXT: [[COND51:%.*]] = phi i64 [ [[TMP29]], %[[COND_TRUE48]] ], [ [[TMP30]], %[[COND_FALSE49]] ] +// CHECK2-NEXT: store i64 [[COND51]], ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK2-NEXT: [[CMP53:%.*]] = icmp slt i64 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: br i1 [[CMP53]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP33]], [[TMP34]] +// CHECK2-NEXT: br i1 [[CMP54]], label %[[IF_THEN:.*]], label %[[IF_END74:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: [[CONV55:%.*]] = sext i32 [[TMP35]] to i64 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[CONV56:%.*]] = sext i32 [[TMP36]] to i64 +// CHECK2-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV56]], [[TMP37]] +// CHECK2-NEXT: [[ADD57:%.*]] = add nsw i64 [[CONV55]], [[MUL]] +// CHECK2-NEXT: [[CONV58:%.*]] = trunc i64 [[ADD57]] to i32 +// CHECK2-NEXT: store i32 [[CONV58]], ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP38]], 1 +// CHECK2-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]] +// CHECK2-NEXT: store i32 [[ADD60]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP61:%.*]] = icmp slt i32 [[TMP39]], [[TMP40]] +// CHECK2-NEXT: br i1 [[CMP61]], label %[[IF_THEN62:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN62]]: +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL63:%.*]] = mul nsw i32 [[TMP42]], [[TMP43]] +// CHECK2-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP41]], [[MUL63]] +// CHECK2-NEXT: store i32 [[ADD64]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[MUL65:%.*]] = mul nsw i32 [[TMP44]], 1 +// CHECK2-NEXT: [[ADD66:%.*]] = add nsw i32 0, [[MUL65]] +// CHECK2-NEXT: store i32 [[ADD66]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP45]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP67:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] +// CHECK2-NEXT: br i1 [[CMP67]], label %[[IF_THEN68:.*]], label %[[IF_END73:.*]] +// CHECK2: [[IF_THEN68]]: +// CHECK2-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL69:%.*]] = mul nsw i32 [[TMP49]], [[TMP50]] +// CHECK2-NEXT: [[ADD70:%.*]] = add nsw i32 [[TMP48]], [[MUL69]] +// CHECK2-NEXT: store i32 [[ADD70]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[MUL71:%.*]] = mul nsw i32 [[TMP51]], 2 +// CHECK2-NEXT: [[ADD72:%.*]] = add nsw i32 0, [[MUL71]] +// CHECK2-NEXT: store i32 [[ADD72]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP52:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP52]]) +// CHECK2-NEXT: br label %[[IF_END73]] +// CHECK2: [[IF_END73]]: +// CHECK2-NEXT: br label %[[IF_END74]] +// CHECK2: [[IF_END74]]: +// CHECK2-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP75:%.*]] = icmp slt i64 [[TMP53]], [[TMP54]] +// CHECK2-NEXT: br i1 [[CMP75]], label %[[IF_THEN76:.*]], label %[[IF_END81:.*]] +// CHECK2: [[IF_THEN76]]: +// CHECK2-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[MUL77:%.*]] = mul nsw i64 [[TMP56]], [[TMP57]] +// CHECK2-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP55]], [[MUL77]] +// CHECK2-NEXT: store i64 [[ADD78]], ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[TMP58:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[MUL79:%.*]] = mul nsw i64 [[TMP59]], 1 +// CHECK2-NEXT: [[ADD_PTR80:%.*]] = getelementptr inbounds double, ptr [[TMP58]], i64 [[MUL79]] +// CHECK2-NEXT: store ptr [[ADD_PTR80]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: store ptr [[TMP60]], ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP61:%.*]] = load i32, ptr [[C]], align 4 +// CHECK2-NEXT: [[TMP62:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP63:%.*]] = load double, ptr [[TMP62]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP61]], double noundef [[TMP63]]) +// CHECK2-NEXT: br label %[[IF_END81]] +// CHECK2: [[IF_END81]]: +// CHECK2-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: [[CMP82:%.*]] = icmp slt i64 [[TMP64]], [[TMP65]] +// CHECK2-NEXT: br i1 [[CMP82]], label %[[IF_THEN83:.*]], label %[[IF_END88:.*]] +// CHECK2: [[IF_THEN83]]: +// CHECK2-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_LB2]], align 8 +// CHECK2-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTOMP_ST2]], align 8 +// CHECK2-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[MUL84:%.*]] = mul nsw i64 [[TMP67]], [[TMP68]] +// CHECK2-NEXT: [[ADD85:%.*]] = add nsw i64 [[TMP66]], [[MUL84]] +// CHECK2-NEXT: store i64 [[ADD85]], ptr [[DOTOMP_IV2]], align 8 +// CHECK2-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK2-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTOMP_IV2]], align 8 +// CHECK2-NEXT: [[MUL86:%.*]] = mul nsw i64 [[TMP70]], 1 +// CHECK2-NEXT: [[ADD_PTR87:%.*]] = getelementptr inbounds double, ptr [[TMP69]], i64 [[MUL86]] +// CHECK2-NEXT: store ptr [[ADD_PTR87]], ptr [[__BEGIN225]], align 8 +// CHECK2-NEXT: [[TMP71:%.*]] = load ptr, ptr [[__BEGIN225]], align 8 +// CHECK2-NEXT: store ptr [[TMP71]], ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP72:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK2-NEXT: [[TMP73:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP74:%.*]] = load double, ptr [[TMP73]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP72]], double noundef [[TMP74]]) +// CHECK2-NEXT: br label %[[IF_END88]] +// CHECK2: [[IF_END88]]: +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i64 [[TMP75]], 1 +// CHECK2-NEXT: store i64 [[INC]], ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo4( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: store i32 64, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP5]], 128 +// CHECK2-NEXT: br i1 [[CMP1]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP6]]) +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND2:.*]] +// CHECK2: [[FOR_COND2]]: +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] +// CHECK2-NEXT: br i1 [[CMP3]], label %[[FOR_BODY4:.*]], label %[[FOR_END17:.*]] +// CHECK2: [[FOR_BODY4]]: +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] +// CHECK2-NEXT: br i1 [[CMP5]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[MUL]] +// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 2 +// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] +// CHECK2-NEXT: store i32 [[ADD7]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP16]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP17]], [[TMP18]] +// CHECK2-NEXT: br i1 [[CMP8]], label %[[IF_THEN9:.*]], label %[[IF_END14:.*]] +// CHECK2: [[IF_THEN9]]: +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP20]], [[TMP21]] +// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP19]], [[MUL10]] +// CHECK2-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP22]], 1 +// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] +// CHECK2-NEXT: store i32 [[ADD13]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[K]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP23]]) +// CHECK2-NEXT: br label %[[IF_END14]] +// CHECK2: [[IF_END14]]: +// CHECK2-NEXT: br label %[[FOR_INC15:.*]] +// CHECK2: [[FOR_INC15]]: +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[INC16:%.*]] = add nsw i32 [[TMP24]], 1 +// CHECK2-NEXT: store i32 [[INC16]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND2]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK2: [[FOR_END17]]: +// CHECK2-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP25]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP26:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY18:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP26]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY18]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND19:.*]] +// CHECK2: [[FOR_COND19]]: +// CHECK2-NEXT: [[TMP27:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP28:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK2-NEXT: [[CMP20:%.*]] = icmp ne ptr [[TMP27]], [[TMP28]] +// CHECK2-NEXT: br i1 [[CMP20]], label %[[FOR_BODY21:.*]], label %[[FOR_END23:.*]] +// CHECK2: [[FOR_BODY21]]: +// CHECK2-NEXT: [[TMP29:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: store ptr [[TMP29]], ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[C]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = load double, ptr [[TMP31]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP30]], double noundef [[TMP32]]) +// CHECK2-NEXT: br label %[[FOR_INC22:.*]] +// CHECK2: [[FOR_INC22]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP33]], i32 1 +// CHECK2-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND19]] +// CHECK2: [[FOR_END23]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo5( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_TEMP_121:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX22:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX29:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE264:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN265:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END267:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: store i32 512, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK2-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK2-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK2-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK2-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK2-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK2-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK2-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK2-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK2-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP23:%.*]] = icmp sgt i64 [[TMP16]], [[TMP17]] +// CHECK2-NEXT: br i1 [[CMP23]], label %[[COND_TRUE24:.*]], label %[[COND_FALSE25:.*]] +// CHECK2: [[COND_TRUE24]]: +// CHECK2-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK2-NEXT: br label %[[COND_END26:.*]] +// CHECK2: [[COND_FALSE25]]: +// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: br label %[[COND_END26]] +// CHECK2: [[COND_END26]]: +// CHECK2-NEXT: [[COND27:%.*]] = phi i64 [ [[TMP18]], %[[COND_TRUE24]] ], [ [[TMP19]], %[[COND_FALSE25]] ] +// CHECK2-NEXT: store i64 [[COND27]], ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK2-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP20]], 128 +// CHECK2-NEXT: br i1 [[CMP28]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP21]]) +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND30:.*]] +// CHECK2: [[FOR_COND30]]: +// CHECK2-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK2-NEXT: [[CMP31:%.*]] = icmp slt i64 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP31]], label %[[FOR_BODY32:.*]], label %[[FOR_END63:.*]] +// CHECK2: [[FOR_BODY32]]: +// CHECK2-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: [[CMP33:%.*]] = icmp slt i64 [[TMP25]], [[TMP26]] +// CHECK2-NEXT: br i1 [[CMP33]], label %[[IF_THEN:.*]], label %[[IF_END53:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[CONV35:%.*]] = sext i32 [[TMP28]] to i64 +// CHECK2-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV35]], [[TMP29]] +// CHECK2-NEXT: [[ADD36:%.*]] = add nsw i64 [[CONV34]], [[MUL]] +// CHECK2-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 +// CHECK2-NEXT: store i32 [[CONV37]], ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[MUL38:%.*]] = mul nsw i32 [[TMP30]], 1 +// CHECK2-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] +// CHECK2-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP40:%.*]] = icmp slt i32 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: br i1 [[CMP40]], label %[[IF_THEN41:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN41]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL42:%.*]] = mul nsw i32 [[TMP34]], [[TMP35]] +// CHECK2-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP33]], [[MUL42]] +// CHECK2-NEXT: store i32 [[ADD43]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP36]], 2 +// CHECK2-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] +// CHECK2-NEXT: store i32 [[ADD45]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP37]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP46:%.*]] = icmp slt i32 [[TMP38]], [[TMP39]] +// CHECK2-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK2: [[IF_THEN47]]: +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL48:%.*]] = mul nsw i32 [[TMP41]], [[TMP42]] +// CHECK2-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP40]], [[MUL48]] +// CHECK2-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[MUL50:%.*]] = mul nsw i32 [[TMP43]], 1 +// CHECK2-NEXT: [[ADD51:%.*]] = add nsw i32 0, [[MUL50]] +// CHECK2-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[K]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP44]]) +// CHECK2-NEXT: br label %[[IF_END52]] +// CHECK2: [[IF_END52]]: +// CHECK2-NEXT: br label %[[IF_END53]] +// CHECK2: [[IF_END53]]: +// CHECK2-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP45]], [[TMP46]] +// CHECK2-NEXT: br i1 [[CMP54]], label %[[IF_THEN55:.*]], label %[[IF_END60:.*]] +// CHECK2: [[IF_THEN55]]: +// CHECK2-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[MUL56:%.*]] = mul nsw i64 [[TMP48]], [[TMP49]] +// CHECK2-NEXT: [[ADD57:%.*]] = add nsw i64 [[TMP47]], [[MUL56]] +// CHECK2-NEXT: store i64 [[ADD57]], ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[MUL58:%.*]] = mul nsw i64 [[TMP51]], 1 +// CHECK2-NEXT: [[ADD_PTR59:%.*]] = getelementptr inbounds double, ptr [[TMP50]], i64 [[MUL58]] +// CHECK2-NEXT: store ptr [[ADD_PTR59]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP52:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: store ptr [[TMP52]], ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP53:%.*]] = load i32, ptr [[C]], align 4 +// CHECK2-NEXT: [[TMP54:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP55:%.*]] = load double, ptr [[TMP54]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP53]], double noundef [[TMP55]]) +// CHECK2-NEXT: br label %[[IF_END60]] +// CHECK2: [[IF_END60]]: +// CHECK2-NEXT: br label %[[FOR_INC61:.*]] +// CHECK2: [[FOR_INC61]]: +// CHECK2-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[INC62:%.*]] = add nsw i64 [[TMP56]], 1 +// CHECK2-NEXT: store i64 [[INC62]], ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND30]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK2: [[FOR_END63]]: +// CHECK2-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE264]], align 8 +// CHECK2-NEXT: [[TMP57:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY66:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP57]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY66]], ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: [[TMP58:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY68:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP58]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR69:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY68]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR69]], ptr [[__END267]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND70:.*]] +// CHECK2: [[FOR_COND70]]: +// CHECK2-NEXT: [[TMP59:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__END267]], align 8 +// CHECK2-NEXT: [[CMP71:%.*]] = icmp ne ptr [[TMP59]], [[TMP60]] +// CHECK2-NEXT: br i1 [[CMP71]], label %[[FOR_BODY72:.*]], label %[[FOR_END74:.*]] +// CHECK2: [[FOR_BODY72]]: +// CHECK2-NEXT: [[TMP61:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: store ptr [[TMP61]], ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP62:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK2-NEXT: [[TMP63:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP64:%.*]] = load double, ptr [[TMP63]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP62]], double noundef [[TMP64]]) +// CHECK2-NEXT: br label %[[FOR_INC73:.*]] +// CHECK2: [[FOR_INC73]]: +// CHECK2-NEXT: [[TMP65:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP65]], i32 1 +// CHECK2-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND70]] +// CHECK2: [[FOR_END74]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @tfoo2( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: call void @_Z4foo2IiEvT_S0_S0_(i32 noundef 0, i32 noundef 64, i32 noundef 4) +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define linkonce_odr void @_Z4foo2IiEvT_S0_S0_( +// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] comdat { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP21:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK2-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK2-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK2-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK2-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK2-NEXT: store i32 [[ADD16]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK2-NEXT: store i32 [[ADD18]], ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] +// CHECK2-NEXT: store i32 [[ADD20]], ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP24]], ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK2-NEXT: [[SUB23:%.*]] = sub i32 [[TMP25]], [[TMP26]] +// CHECK2-NEXT: [[SUB24:%.*]] = sub i32 [[SUB23]], 1 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[ADD25:%.*]] = add i32 [[SUB24]], [[TMP27]] +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[DIV26:%.*]] = udiv i32 [[ADD25]], [[TMP28]] +// CHECK2-NEXT: [[SUB27:%.*]] = sub i32 [[DIV26]], 1 +// CHECK2-NEXT: store i32 [[SUB27]], ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB2]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST2]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK2-NEXT: [[ADD28:%.*]] = add i32 [[TMP29]], 1 +// CHECK2-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP33]], %[[COND_TRUE]] ], [ [[TMP34]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: [[CMP29:%.*]] = icmp ugt i32 [[TMP35]], [[TMP36]] +// CHECK2-NEXT: br i1 [[CMP29]], label %[[COND_TRUE30:.*]], label %[[COND_FALSE31:.*]] +// CHECK2: [[COND_TRUE30]]: +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK2-NEXT: br label %[[COND_END32:.*]] +// CHECK2: [[COND_FALSE31]]: +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: br label %[[COND_END32]] +// CHECK2: [[COND_END32]]: +// CHECK2-NEXT: [[COND33:%.*]] = phi i32 [ [[TMP37]], %[[COND_TRUE30]] ], [ [[TMP38]], %[[COND_FALSE31]] ] +// CHECK2-NEXT: store i32 [[COND33]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP39]], [[TMP40]] +// CHECK2-NEXT: br i1 [[CMP34]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP35:%.*]] = icmp ult i32 [[TMP41]], [[TMP42]] +// CHECK2-NEXT: br i1 [[CMP35]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP44]], [[TMP45]] +// CHECK2-NEXT: [[ADD36:%.*]] = add i32 [[TMP43]], [[MUL]] +// CHECK2-NEXT: store i32 [[ADD36]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[MUL37:%.*]] = mul i32 [[TMP47]], [[TMP48]] +// CHECK2-NEXT: [[ADD38:%.*]] = add i32 [[TMP46]], [[MUL37]] +// CHECK2-NEXT: store i32 [[ADD38]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP49:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP49]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP39:%.*]] = icmp ult i32 [[TMP50]], [[TMP51]] +// CHECK2-NEXT: br i1 [[CMP39]], label %[[IF_THEN40:.*]], label %[[IF_END45:.*]] +// CHECK2: [[IF_THEN40]]: +// CHECK2-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL41:%.*]] = mul i32 [[TMP53]], [[TMP54]] +// CHECK2-NEXT: [[ADD42:%.*]] = add i32 [[TMP52]], [[MUL41]] +// CHECK2-NEXT: store i32 [[ADD42]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[MUL43:%.*]] = mul i32 [[TMP56]], [[TMP57]] +// CHECK2-NEXT: [[SUB44:%.*]] = sub i32 [[TMP55]], [[MUL43]] +// CHECK2-NEXT: store i32 [[SUB44]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP58:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP58]]) +// CHECK2-NEXT: br label %[[IF_END45]] +// CHECK2: [[IF_END45]]: +// CHECK2-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: [[CMP46:%.*]] = icmp ult i32 [[TMP59]], [[TMP60]] +// CHECK2-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK2: [[IF_THEN47]]: +// CHECK2-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTOMP_LB2]], align 4 +// CHECK2-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTOMP_ST2]], align 4 +// CHECK2-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL48:%.*]] = mul i32 [[TMP62]], [[TMP63]] +// CHECK2-NEXT: [[ADD49:%.*]] = add i32 [[TMP61]], [[MUL48]] +// CHECK2-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV2]], align 4 +// CHECK2-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK2-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTOMP_IV2]], align 4 +// CHECK2-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[MUL50:%.*]] = mul i32 [[TMP65]], [[TMP66]] +// CHECK2-NEXT: [[ADD51:%.*]] = add i32 [[TMP64]], [[MUL50]] +// CHECK2-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP67:%.*]] = load i32, ptr [[K]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP67]]) +// CHECK2-NEXT: br label %[[IF_END52]] +// CHECK2: [[IF_END52]]: +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP68:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP68]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: ret void +// +//. +// CHECK1: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} +// CHECK1: [[META4]] = !{!"llvm.loop.mustprogress"} +// CHECK1: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]} +// CHECK1: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]} +// CHECK1: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]} +// CHECK1: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} +// CHECK1: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]} +// CHECK1: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]} +//. +// CHECK2: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} +// CHECK2: [[META4]] = !{!"llvm.loop.mustprogress"} +// CHECK2: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]} +// CHECK2: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]} +// CHECK2: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]} +// CHECK2: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} +// CHECK2: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]} +// CHECK2: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]} +//. diff --git a/clang/test/OpenMP/fuse_messages.cpp b/clang/test/OpenMP/fuse_messages.cpp new file mode 100644 index 0000000000000..b86ce95cfe9bc --- /dev/null +++ b/clang/test/OpenMP/fuse_messages.cpp @@ -0,0 +1,209 @@ +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -std=c++20 -fopenmp -fopenmp-version=60 -fsyntax-only -Wuninitialized -verify %s + +void func() { + + // expected-error@+2 {{statement after '#pragma omp fuse' must be a loop sequence containing canonical loops or loop-generating constructs}} + #pragma omp fuse + ; + + // expected-error@+2 {{statement after '#pragma omp fuse' must be a for loop}} + #pragma omp fuse + {int bar = 0;} + + // expected-error@+4 {{statement after '#pragma omp fuse' must be a for loop}} + #pragma omp fuse + { + for(int i = 0; i < 10; ++i); + int x = 2; + } + + // expected-error@+2 {{statement after '#pragma omp fuse' must be a loop sequence containing canonical loops or loop-generating constructs}} + #pragma omp fuse + #pragma omp for + for (int i = 0; i < 7; ++i) + ; + + { + // expected-error@+2 {{expected statement}} + #pragma omp fuse + } + + // expected-warning@+1 {{extra tokens at the end of '#pragma omp fuse' are ignored}} + #pragma omp fuse foo + { + for (int i = 0; i < 7; ++i) + ; + for(int j = 0; j < 100; ++j); + + } + + + // expected-error@+1 {{unexpected OpenMP clause 'final' in directive '#pragma omp fuse'}} + #pragma omp fuse final(0) + { + for (int i = 0; i < 7; ++i) + ; + for(int j = 0; j < 100; ++j); + + } + + //expected-error@+3 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'i'}} + #pragma omp fuse + { + for(int i = 0; i < 10; i*=2) { + ; + } + for(int j = 0; j < 100; ++j); + } + + //expected-error@+2 {{loop sequence after '#pragma omp fuse' must contain at least 1 canonical loop or loop-generating construct}} + #pragma omp fuse + {} + + //expected-error@+3 {{statement after '#pragma omp fuse' must be a for loop}} + #pragma omp fuse + { + #pragma omp unroll full + for(int i = 0; i < 10; ++i); + + for(int j = 0; j < 10; ++j); + } + + //expected-warning@+2 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse + { + for(int i = 0; i < 10; ++i); + } + + //expected-warning@+1 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse looprange(1, 1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + } + + //expected-error@+1 {{argument to 'looprange' clause must be a strictly positive integer value}} + #pragma omp fuse looprange(1, -1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + } + + //expected-error@+1 {{argument to 'looprange' clause must be a strictly positive integer value}} + #pragma omp fuse looprange(1, 0) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + } + + const int x = 1; + constexpr int y = 4; + //expected-error@+1 {{looprange clause selects loops from 1 to 4 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(x,y) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + + //expected-error@+1 {{looprange clause selects loops from 1 to 420 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(1,420) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + + //expected-error@+1 {{looprange clause selects loops from 1 to 6 but this exceeds the number of loops (5) in the loop sequence}} + #pragma omp fuse looprange(1,6) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + // This fusion results in 2 loops + #pragma omp fuse looprange(1,2) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + } + + //expected-error@+1 {{looprange clause selects loops from 2 to 4 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(2,3) + { + #pragma omp unroll partial(2) + for(int i = 0; i < 10; ++i); + + #pragma omp reverse + for(int j = 0; j < 10; ++j); + + #pragma omp fuse + { + { + #pragma omp reverse + for(int j = 0; j < 10; ++j); + } + for(int k = 0; k < 50; ++k); + } + } +} + +// In a template context, but expression itself not instantiation-dependent +template +static void templated_func() { + + //expected-warning@+1 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse looprange(2,1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + + //expected-error@+1 {{looprange clause selects loops from 3 to 5 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(3,3) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + +} + +template +static void templated_func_value_dependent() { + + //expected-warning@+1 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse looprange(V,1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } +} + +template +static void templated_func_type_dependent() { + constexpr T s = 1; + + //expected-error@+1 {{argument to 'looprange' clause must be a strictly positive integer value}} + #pragma omp fuse looprange(s,s-1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } +} + + +void template_inst() { + // expected-note@+1 {{in instantiation of function template specialization 'templated_func' requested here}} + templated_func(); + // expected-note@+1 {{in instantiation of function template specialization 'templated_func_value_dependent<1>' requested here}} + templated_func_value_dependent<1>(); + // expected-note@+1 {{in instantiation of function template specialization 'templated_func_type_dependent' requested here}} + templated_func_type_dependent(); +} + + diff --git a/clang/test/OpenMP/nvptx_parallel_num_threads_strict_messages.cpp b/clang/test/OpenMP/nvptx_parallel_num_threads_strict_messages.cpp new file mode 100644 index 0000000000000..a1a29fee5a69f --- /dev/null +++ b/clang/test/OpenMP/nvptx_parallel_num_threads_strict_messages.cpp @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -DF1 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF1 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF2 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF2 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF3 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF3 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null + +#ifndef TARGET +// expected-no-diagnostics +#endif + +#ifdef F3 +template +tx ftemplate(int n) { + tx a = 0; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: tx(20)) severity(fatal) message("msg") + { + } + + short b = 1; +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: b) severity(warning) message("msg") + { + a += b; + } + + return a; +} +#endif + +#ifdef F2 +static +int fstatic(int n) { + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: n) message("msg") + { + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: 32+n) severity(warning) + { + } + + return n+1; +} +#endif + +#ifdef F1 +struct S1 { + double a; + + int r1(int n){ + int b = 1; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: n-b) severity(warning) message("msg") + { + this->a = (double)b + 1.5; + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: 1024) severity(fatal) + { + this->a = 2.5; + } + + return (int)a; + } +}; +#endif + +int bar(int n){ + int a = 0; + +#ifdef F1 + #pragma omp target + { + S1 S; + a += S.r1(n); + } +#endif + +#ifdef F2 + a += fstatic(n); +#endif + +#ifdef F3 + #pragma omp target + a += ftemplate(n); +#endif + + return a; +} diff --git a/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp b/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp new file mode 100644 index 0000000000000..84e4b909d3f6a --- /dev/null +++ b/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp @@ -0,0 +1,1828 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ +// Test host codegen. +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 + +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" + +// Test target codegen - host bc file has to be created first. +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10 +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12 + +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" + +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +template +tx ftemplate(int n) { + tx a = 0; + + #pragma omp parallel num_threads(strict: tx(20)) severity(fatal) message("msg") + { + } + + short b = 1; + #pragma omp parallel num_threads(strict: b) severity(warning) message("msg") + { + a += b; + } + + return a; +} + +static +int fstatic(int n) { + + #pragma omp target parallel num_threads(strict: n) message("msg") + { + } + + #pragma omp target parallel num_threads(strict: 32+n) severity(warning) + { + } + + return n+1; +} + +struct S1 { + double a; + + int r1(int n){ + int b = 1; + + #pragma omp parallel num_threads(strict: n-b) severity(warning) message("msg") + { + this->a = (double)b + 1.5; + } + + #pragma omp parallel num_threads(strict: 1024) severity(fatal) + { + this->a = 2.5; + } + + return (int)a; + } +}; + +int bar(int n){ + int a = 0; + + #pragma omp target + { + S1 S; + a += S.r1(n); + } + + a += fstatic(n); + + #pragma omp target + a += ftemplate(n); + + return a; +} + +#endif +// CHECK1-LABEL: define {{[^@]+}}@_Z3bari +// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[A_CASTED1:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_CASTED2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[N_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP13]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP10]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP22]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP23]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP24]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95(i64 [[TMP1]], i64 [[TMP3]]) #[[ATTR2:[0-9]+]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP27]]) +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP28]], [[CALL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: store i32 [[TMP29]], ptr [[A_CASTED1]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[A_CASTED1]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP31]], ptr [[N_CASTED2]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[N_CASTED2]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP30]], ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP30]], ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP38]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP41]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP42]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP39]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP40]], ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP51]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP52]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP53]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103.region_id, ptr [[KERNEL_ARGS6]]) +// CHECK1-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK1-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] +// CHECK1: omp_offload.failed7: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103(i64 [[TMP30]], i64 [[TMP32]]) #[[ATTR2]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]] +// CHECK1: omp_offload.cont8: +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: ret i32 [[TMP56]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]]) +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.3, ptr [[THIS1]]) +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK1-NEXT: ret i32 [[CONV]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici +// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_1]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META11:![0-9]+]] +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META11]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP13]], 0 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP12]], ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.4, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] [[TMP14]], ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP13]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61(i64 [[TMP3]], ptr [[TMP4]]) #[[ATTR2]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP30]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP38]], 0 +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP40]], align 4 +// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP41]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP36]], ptr [[TMP42]], align 8 +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP37]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.6, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.7, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP50]], align 4 +// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] [[TMP39]], ptr [[TMP51]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP52]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP38]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.region_id, ptr [[KERNEL_ARGS7]]) +// CHECK1-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK1-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] +// CHECK1: omp_offload.failed8: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65(i64 [[TMP32]]) #[[ATTR2]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT9]] +// CHECK1: omp_offload.cont9: +// CHECK1-NEXT: [[TMP55:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP55]], 1 +// CHECK1-NEXT: ret i32 [[ADD10]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP0]]) +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK1-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.8, ptr [[A]], ptr [[B]]) +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: ret i32 [[TMP3]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META11]], !align [[META12:![0-9]+]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.3 +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK1-NEXT: store double 2.500000e+00, ptr [[A]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META11]] +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META11]] +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.8 +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META11]], !align [[META12]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META11]], !align [[META15:![0-9]+]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z3bari +// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[A_CASTED1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_CASTED2:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP10]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP17]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK3-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95(i32 [[TMP1]], i32 [[TMP3]]) #[[ATTR2:[0-9]+]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP27]]) +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP28]], [[CALL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: store i32 [[TMP29]], ptr [[A_CASTED1]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[A_CASTED1]], align 4 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[TMP31]], ptr [[N_CASTED2]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[N_CASTED2]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[TMP33]], align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP35]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP36]], align 4 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP37]], align 4 +// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP38]], align 4 +// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP41]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP42]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP39]], ptr [[TMP43]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP40]], ptr [[TMP44]], align 4 +// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.1, ptr [[TMP45]], align 4 +// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP46]], align 4 +// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP47]], align 4 +// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP48]], align 4 +// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP50]], align 8 +// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP51]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP52]], align 4 +// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP53]], align 4 +// CHECK3-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103.region_id, ptr [[KERNEL_ARGS6]]) +// CHECK3-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK3-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] +// CHECK3: omp_offload.failed7: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103(i32 [[TMP30]], i32 [[TMP32]]) #[[ATTR2]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT8]] +// CHECK3: omp_offload.cont8: +// CHECK3-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: ret i32 [[TMP56]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 +// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]]) +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.3, ptr [[THIS1]]) +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 4 +// CHECK3-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK3-NEXT: ret i32 [[CONV]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici +// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 4, !nonnull [[META12:![0-9]+]] +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META12]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP13]], 0 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP17]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP12]], ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP23]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] [[TMP14]], ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP13]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61(i32 [[TMP3]], ptr [[TMP4]]) #[[ATTR2]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP30]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP33]], align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP35]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP39:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP38]], 0 +// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP40]], align 4 +// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP41]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP36]], ptr [[TMP42]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP37]], ptr [[TMP43]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.6, ptr [[TMP44]], align 4 +// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.7, ptr [[TMP45]], align 4 +// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP46]], align 4 +// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP47]], align 4 +// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP48]], align 8 +// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP50]], align 4 +// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] [[TMP39]], ptr [[TMP51]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP52]], align 4 +// CHECK3-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP38]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.region_id, ptr [[KERNEL_ARGS7]]) +// CHECK3-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK3-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] +// CHECK3: omp_offload.failed8: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65(i32 [[TMP32]]) #[[ATTR2]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT9]] +// CHECK3: omp_offload.cont9: +// CHECK3-NEXT: [[TMP55:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP55]], 1 +// CHECK3-NEXT: ret i32 [[ADD10]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP0]]) +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK3-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.8, ptr [[A]], ptr [[B]]) +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: ret i32 [[TMP3]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13:![0-9]+]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.3 +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK3-NEXT: store double 2.500000e+00, ptr [[A]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META12]] +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META12]] +// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.8 +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META12]], !align [[META13]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META16:![0-9]+]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META12:![0-9]+]] +// CHECK9-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META12]] +// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]]) +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR2:[0-9]+]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK9-NEXT: ret i32 [[CONV]] +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP0]]) +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR2]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK9-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: ret i32 [[TMP3]] +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META15:![0-9]+]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK9-NEXT: store double [[ADD]], ptr [[A]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK9-NEXT: store double 2.500000e+00, ptr [[A]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META15]] +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META16:![0-9]+]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK9-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]]) +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK10-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR1:[0-9]+]] comdat { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK10-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 8 +// CHECK10-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK10-NEXT: ret i32 [[CONV]] +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP0]]) +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR1]] comdat { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK10-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK10-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK10-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK10-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK10-NEXT: ret i32 [[TMP3]] +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 +// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META12:![0-9]+]] +// CHECK10-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK10-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META12]] +// CHECK10-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META15:![0-9]+]] +// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK10-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK10-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK10-NEXT: store double [[ADD]], ptr [[A]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK10-NEXT: store double 2.500000e+00, ptr [[A]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK10-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META15]] +// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META16:![0-9]+]] +// CHECK10-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK10-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK10-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META13:![0-9]+]] +// CHECK11-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META13]] +// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]]) +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK11-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 4 +// CHECK11-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK11-NEXT: ret i32 [[CONV]] +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP0]]) +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR2]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK11-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: ret i32 [[TMP3]] +// +// +// CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META16:![0-9]+]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK11-NEXT: store double [[ADD]], ptr [[A]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK11-NEXT: store double 2.500000e+00, ptr [[A]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META13]], !align [[META16]] +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META17:![0-9]+]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]]) +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK12-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK12-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 4 +// CHECK12-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK12-NEXT: ret i32 [[CONV]] +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP0]]) +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR1]] comdat { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK12-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK12-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK12-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK12-NEXT: ret i32 [[TMP3]] +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META13:![0-9]+]] +// CHECK12-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META13]] +// CHECK12-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META16:![0-9]+]] +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK12-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK12-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK12-NEXT: store double [[ADD]], ptr [[A]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK12-NEXT: store double 2.500000e+00, ptr [[A]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META13]], !align [[META16]] +// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META17:![0-9]+]] +// CHECK12-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK12-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK12-NEXT: ret void +// diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp deleted file mode 100644 index 02b712679ad19..0000000000000 --- a/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp +++ /dev/null @@ -1,1447 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ -// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 - -// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" - -// expected-no-diagnostics -#ifndef HEADER -#define HEADER - -typedef __INTPTR_TYPE__ intptr_t; - - -void foo(); - -struct S { - intptr_t a, b, c; - S(intptr_t a) : a(a) {} - operator char() { return a; } - ~S() {} -}; - -template -int tmain() { - char str[] = "msg1"; -#pragma omp target -#pragma omp teams distribute parallel for num_threads(strict: C) severity(fatal) message("msg") - for (int i = 0; i < 100; i++) - foo(); -#pragma omp target -#pragma omp teams distribute parallel for num_threads(strict: T(23)) severity(warning) message(str) - for (int i = 0; i < 100; i++) - foo(); - return 0; -} - -int main() { - S s(0); - char a = s; - char str[] = "msg2"; -#pragma omp target -#pragma omp teams distribute parallel for num_threads(strict: 2) severity(warning) message("msg") - for (int i = 0; i < 100; i++) { - foo(); - } -#pragma omp target - -#pragma omp teams distribute parallel for num_threads(strict: a) severity(fatal) message(str) - for (int i = 0; i < 100; i++) { - foo(); - } - return a + tmain() + tmain(); -} - -#endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const.main.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 2, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 2, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44() #[[ATTR5:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP28:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP28]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP30:%.*]] = zext i8 [[TMP29]] to i32 -// CHECK1-NEXT: [[TMP31:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP30]], 0 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP26]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP36]], align 8 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP40]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP42]], align 4 -// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP31]], ptr [[TMP43]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP44]], align 4 -// CHECK1-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP30]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 -// CHECK1-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49(i64 [[TMP19]], ptr [[STR]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP47:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP47]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP48]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44 -// CHECK1-SAME: () #[[ATTR4:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15:![0-9]+]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 2, i32 1, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8:[0-9]+]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR5]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49 -// CHECK1-SAME: (i64 noundef [[A:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR]], align 1 -// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined, i64 [[TMP3]], ptr [[TMP4]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP10]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 2, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 5, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 5, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29() #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 23, i32 0, i32 0], ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 23, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33(ptr [[STR]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29() #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR5]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32 -// CHECK1-NEXT: [[TMP22:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP21]], 0 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP23]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP24]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP22]], ptr [[TMP34]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP35]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP21]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0 -// CHECK1-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33(ptr [[STR]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP38:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP39:%.*]] = extractvalue { ptr, i32 } [[TMP38]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP39]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29 -// CHECK1-SAME: () #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 5, i32 2, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined, ptr [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29 -// CHECK1-SAME: () #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 2, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR5]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined, i64 [[TMP2]], ptr [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP4]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP5]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP10]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 1, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp deleted file mode 100644 index 559cfeef49080..0000000000000 --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp +++ /dev/null @@ -1,1911 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ -// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 - -// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 - -// expected-no-diagnostics -#ifndef HEADER -#define HEADER - -typedef __INTPTR_TYPE__ intptr_t; - - -void foo(); - -struct S { - intptr_t a, b, c; - S(intptr_t a) : a(a) {} - operator char() { return a; } - ~S() {} -}; - -template -int tmain() { - char str[] = "msg"; -#pragma omp target -#pragma omp teams distribute parallel for simd num_threads(strict: C) severity(warning) message("msg") - for (int i = 0; i < 100; i++) - foo(); -#pragma omp target -#pragma omp teams distribute parallel for simd num_threads(strict: T(23)) severity(fatal) message(str) - for (int i = 0; i < 100; i++) - foo(); - return 0; -} - -int main() { - S s(0); - char a = s; - const char *str = "msg"; -#pragma omp target -#pragma omp teams distribute parallel for simd num_threads(strict: 2) severity(fatal) message("msg") - for (int i = 0; i < 100; i++) { - foo(); - } -#pragma omp target - -#pragma omp teams distribute parallel for simd num_threads(strict: a) severity(warning) message(str) - for (int i = 0; i < 100; i++) { - foo(); - } - return a + tmain() + tmain(); -} - -#endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 2, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 2, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44() #[[ATTR4:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP29]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP31:%.*]] = zext i8 [[TMP30]] to i32 -// CHECK1-NEXT: [[TMP32:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP31]], 0 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP34]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP36]], align 8 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP40]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP43]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP32]], ptr [[TMP44]], align 4 -// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP45]], align 4 -// CHECK1-NEXT: [[TMP46:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP31]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0 -// CHECK1-NEXT: br i1 [[TMP47]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP48:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP48]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP49]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44 -// CHECK1-SAME: () #[[ATTR3:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15:![0-9]+]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 2, i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP20]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR4]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49 -// CHECK1-SAME: (i64 noundef [[A:%.*]], ptr noundef [[STR:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 -// CHECK1-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined, i64 [[TMP3]], ptr [[TMP4]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP8:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP8]], i32 1, ptr [[TMP9]]), !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP28]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 4, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 5, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 5, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29() #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 23, i32 0, i32 0], ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 23, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33(ptr [[STR]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 4, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29() #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR4]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32 -// CHECK1-NEXT: [[TMP22:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP21]], 0 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP23]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP24]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP22]], ptr [[TMP34]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP35]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP21]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0 -// CHECK1-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33(ptr [[STR]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP38:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP39:%.*]] = extractvalue { ptr, i32 } [[TMP38]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP39]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29 -// CHECK1-SAME: () #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 5, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP34]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[STR:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined, ptr [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP40]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29 -// CHECK1-SAME: () #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP46]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[STR:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR4]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined, i64 [[TMP2]], ptr [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP4]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP5]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP10]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]), !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP52]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@main -// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB8:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV9:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I10:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2:![0-9]+]] -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP3]] -// CHECK3: invoke.cont2: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] -// CHECK3: lpad: -// CHECK3-NEXT: [[TMP6:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: cleanup -// CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1 -// CHECK3-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]] -// CHECK3-NEXT: br label [[EH_RESUME:%.*]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: store i8 [[TMP9]], ptr [[DOTCAPTURE_EXPR_4]], align 1 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK3-NEXT: store ptr [[TMP10]], ptr [[DOTCAPTURE_EXPR_5]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB7]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB8]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB7]], align 4 -// CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV9]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND11:%.*]] -// CHECK3: omp.inner.for.cond11: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB8]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: [[CMP12:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END20:%.*]] -// CHECK3: omp.inner.for.body13: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP14]], 1 -// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]] -// CHECK3-NEXT: store i32 [[ADD15]], ptr [[I10]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT16:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP7]] -// CHECK3: invoke.cont16: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE17:%.*]] -// CHECK3: omp.body.continue17: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC18:%.*]] -// CHECK3: omp.inner.for.inc18: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[ADD19]], ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP8:![0-9]+]] -// CHECK3: omp.inner.for.end20: -// CHECK3-NEXT: store i32 100, ptr [[I10]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP16]] to i32 -// CHECK3-NEXT: [[CALL22:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont21: -// CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[CONV]], [[CALL22]] -// CHECK3-NEXT: [[CALL25:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT24:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont24: -// CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[ADD23]], [[CALL25]] -// CHECK3-NEXT: store i32 [[ADD26]], ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: ret i32 [[TMP17]] -// CHECK3: eh.resume: -// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK3-NEXT: [[LPAD_VAL27:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL27]] -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP18:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP19:%.*]] = extractvalue { ptr, i32 } [[TMP18]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP19]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK3-NEXT: ret i8 [[CONV]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat { -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]] -// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK3-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP4:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV8:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I9:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 4, i1 false) -// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP10]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[_TMP4]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB6]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB7]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB6]], align 4 -// CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV8]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] -// CHECK3: omp.inner.for.cond10: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB7]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] -// CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]] -// CHECK3: omp.inner.for.body12: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] -// CHECK3-NEXT: store i32 [[ADD14]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP13]] -// CHECK3: invoke.cont15: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] -// CHECK3: omp.body.continue16: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] -// CHECK3: omp.inner.for.inc17: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP14:![0-9]+]] -// CHECK3: omp.inner.for.end19: -// CHECK3-NEXT: store i32 100, ptr [[I9]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP12:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP13:%.*]] = extractvalue { ptr, i32 } [[TMP12]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP13]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK3-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP6:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB8:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB9:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV10:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I11:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 4, i1 false) -// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP16]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK3-NEXT: to label [[INVOKE_CONT4:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK3: invoke.cont4: -// CHECK3-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR6]] -// CHECK3-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_3]], align 1 -// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_5]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[_TMP6]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB8]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB9]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB8]], align 4 -// CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV10]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND12:%.*]] -// CHECK3: omp.inner.for.cond12: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB9]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[CMP13:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] -// CHECK3-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY14:%.*]], label [[OMP_INNER_FOR_END21:%.*]] -// CHECK3: omp.inner.for.body14: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[MUL15:%.*]] = mul nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]] -// CHECK3-NEXT: store i32 [[ADD16]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT17:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP19]] -// CHECK3: invoke.cont17: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE18:%.*]] -// CHECK3: omp.body.continue18: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC19:%.*]] -// CHECK3: omp.inner.for.inc19: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD20]], ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND12]], !llvm.loop [[LOOP20:![0-9]+]] -// CHECK3: omp.inner.for.end21: -// CHECK3-NEXT: store i32 100, ptr [[I11]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP12:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP13:%.*]] = extractvalue { ptr, i32 } [[TMP12]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP13]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: ret void -// diff --git a/clang/test/Parser/cxx1z-decomposition.cpp b/clang/test/Parser/cxx1z-decomposition.cpp index b7a8d30bd16c5..274e24ea55522 100644 --- a/clang/test/Parser/cxx1z-decomposition.cpp +++ b/clang/test/Parser/cxx1z-decomposition.cpp @@ -83,11 +83,19 @@ namespace BadSpecifiers { friend auto &[g] = n; // expected-error {{'auto' not allowed}} expected-error {{friends can only be classes or functions}} }; typedef auto &[h] = n; // expected-error {{cannot be declared 'typedef'}} - constexpr auto &[i] = n; // expected-error {{cannot be declared 'constexpr'}} + constexpr auto &[i] = n; // pre2c-error {{cannot be declared 'constexpr'}} } - static constexpr inline thread_local auto &[j1] = n; // expected-error {{cannot be declared with 'constexpr inline' specifiers}} - static thread_local auto &[j2] = n; // cxx17-warning {{declared with 'static thread_local' specifiers is a C++20 extension}} + static constexpr inline thread_local auto &[j1] = n; + // pre2c-error@-1 {{cannot be declared 'constexpr'}} \ + // expected-error@-1 {{cannot be declared 'inline'}} \ + // cxx17-warning@-1 {{declared 'static' is a C++20 extension}} \ + // cxx17-warning@-1 {{declared 'thread_local' is a C++20 extension}} + + static thread_local auto &[j2] = n; + // cxx17-warning@-1 {{declared 'static' is a C++20 extension}}\ + // cxx17-warning@-1 {{declared 'thread_local' is a C++20 extension}} + inline auto &[k] = n; // expected-error {{cannot be declared 'inline'}} diff --git a/clang/test/Parser/cxx2b-lambdas-ext-warns.cpp b/clang/test/Parser/cxx2b-lambdas-ext-warns.cpp index 7ffb7aae9d391..8c7a77815d47c 100644 --- a/clang/test/Parser/cxx2b-lambdas-ext-warns.cpp +++ b/clang/test/Parser/cxx2b-lambdas-ext-warns.cpp @@ -1,9 +1,7 @@ -// RUN: %clang_cc1 -std=c++20 %s -verify=cxx20 -// RUN: %clang_cc1 -std=c++23 %s -verify=cxx23 -// RUN: %clang_cc1 -std=c++23 -Wpre-c++23-compat %s -verify=precxx23 -// RUN: %clang_cc1 -std=c++23 -pedantic %s -verify=cxx23 - -//cxx23-no-diagnostics +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -target-feature +sme -std=c++20 %s -verify=cxx20 +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -target-feature +sme -std=c++23 %s -verify=cxx23 +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -target-feature +sme -std=c++23 -Wpre-c++23-compat %s -verify=precxx23 +// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -target-feature +sme -std=c++23 -pedantic %s -verify=cxx23 auto L1 = [] constexpr {}; // cxx20-warning@-1 {{lambda without a parameter clause is a C++23 extension}} @@ -14,3 +12,25 @@ auto L3 = [] static {}; // cxx20-warning@-1 {{lambda without a parameter clause is a C++23 extension}} // cxx20-warning@-2 {{static lambdas are a C++23 extension}} // precxx23-warning@-3 {{static lambdas are incompatible with C++ standards before C++23}} + +namespace GH161070 { +void t1() { int a = [] __arm_streaming; } +// precxx23-error@-1 {{'__arm_streaming' cannot be applied to a declaration}} +// precxx23-error@-2 {{expected body of lambda expression}} +// cxx23-error@-3 {{'__arm_streaming' cannot be applied to a declaration}} +// cxx23-error@-4 {{expected body of lambda expression}} +// cxx20-error@-5 {{'__arm_streaming' cannot be applied to a declaration}} +// cxx20-error@-6 {{expected body of lambda expression}} +// cxx20-warning@-7 {{'__arm_streaming' in this position is a C++23 extension}} +// precxx23-warning@-8 {{'__arm_streaming' in this position is incompatible with C++ standards before C++23}} + +void t2() { int a = [] [[assume(true)]]; } +// precxx23-error@-1 {{'assume' attribute cannot be applied to a declaration}} +// precxx23-error@-2 {{expected body of lambda expression}} +// cxx23-error@-3 {{'assume' attribute cannot be applied to a declaration}} +// cxx23-error@-4 {{expected body of lambda expression}} +// cxx20-error@-5 {{'assume' attribute cannot be applied to a declaration}} +// cxx20-error@-6 {{expected body of lambda expression}} +// cxx20-warning@-7 {{an attribute specifier sequence in this position is a C++23 extension}} +// precxx23-warning@-8 {{an attribute specifier sequence in this position is incompatible with C++ standards before C++23}} +} diff --git a/clang/test/Parser/recovery-after-expected-unqualified-id.cpp b/clang/test/Parser/recovery-after-expected-unqualified-id.cpp new file mode 100644 index 0000000000000..8019b46df1e7b --- /dev/null +++ b/clang/test/Parser/recovery-after-expected-unqualified-id.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -verify %s + +3.2 // expected-error {{expected unqualified-id}} + +extern "C" { + typedef int Int; +} + +Int foo(); // Ok diff --git a/clang/test/Preprocessor/Inputs/print-header-json/module.modulemap b/clang/test/Preprocessor/Inputs/print-header-json/module.modulemap new file mode 100644 index 0000000000000..024c43d89e278 --- /dev/null +++ b/clang/test/Preprocessor/Inputs/print-header-json/module.modulemap @@ -0,0 +1,5 @@ +module module0 { + header "header0.h" + header "header1.h" + export * +} diff --git a/clang/test/Preprocessor/Inputs/print-header-json/system/module.modulemap b/clang/test/Preprocessor/Inputs/print-header-json/system/module.modulemap new file mode 100644 index 0000000000000..8ed45ab4dcdbe --- /dev/null +++ b/clang/test/Preprocessor/Inputs/print-header-json/system/module.modulemap @@ -0,0 +1,4 @@ +module systemmodule0 { + header "system2.h" + export * +} diff --git a/clang/test/Preprocessor/print-header-json.c b/clang/test/Preprocessor/print-header-json.c index bb1830e5030d8..057dcc27d8238 100644 --- a/clang/test/Preprocessor/print-header-json.c +++ b/clang/test/Preprocessor/print-header-json.c @@ -21,8 +21,13 @@ #include "header0.h" #include "system2.h" +// RUN: rm %t.txt +// RUN: env CC_PRINT_HEADERS_FORMAT=json CC_PRINT_HEADERS_FILTERING=direct-per-file CC_PRINT_HEADERS_FILE=%t.txt %clang -fsyntax-only -I %S/Inputs/print-header-json -isystem %S/Inputs/print-header-json/system -fmodules -fimplicit-module-maps -fmodules-cache-path=%t %s -o /dev/null +// RUN: cat %t.txt | FileCheck %s --check-prefix=SUPPORTED_PERFILE_MODULES + // SUPPORTED: {"source":"{{[^,]*}}print-header-json.c","includes":["{{[^,]*}}system0.h","{{[^,]*}}system3.h","{{[^,]*}}system2.h"]} -// SUPPORTED_PERFILE: [{"source":"{{[^,]*}}print-header-json.c","includes":["{{[^,]*}}system0.h","{{[^,]*}}header0.h","{{[^,]*}}system2.h"]},{"source":"{{[^,]*}}header0.h","includes":["{{[^,]*}}system3.h","{{[^,]*}}header1.h","{{[^,]*}}header2.h"]}] +// SUPPORTED_PERFILE: {"version":"2.0.0","dependencies":[{"source":"{{[^,]*}}print-header-json.c","includes":[{"location":"{{[^,]*}}print-header-json.c:20:1","file":"{{[^,]*}}system0.h"},{"location":"{{[^,]*}}print-header-json.c:21:1","file":"{{[^,]*}}header0.h"},{"location":"{{[^,]*}}print-header-json.c:22:1","file":"{{[^,]*}}system2.h"}],"imports":[]},{"source":"{{[^,]*}}header0.h","includes":[{"location":"{{[^,]*}}header0.h:1:1","file":"{{[^,]*}}system3.h"},{"location":"{{[^,]*}}header0.h:2:1","file":"{{[^,]*}}header1.h"},{"location":"{{[^,]*}}header0.h:3:1","file":"{{[^,]*}}header2.h"}],"imports":[]}]} +// SUPPORTED_PERFILE_MODULES: {"version":"2.0.0","dependencies":[{"source":"{{[^,]*}}print-header-json.c","includes":[{"location":"{{[^,]*}}print-header-json.c:20:1","file":"{{[^,]*}}system0.h"}],"imports":[{"location":"{{[^,]*}}print-header-json.c:21:1","module":"module0","file":"{{[^,]*}}print-header-json{{[\/\\]+}}module.modulemap"},{"location":"{{[^,]*}}print-header-json.c:22:1","module":"systemmodule0","file":"{{[^,]*}}print-header-json{{[\/\\]+}}system{{[\/\\]+}}module.modulemap"}]}]} // UNSUPPORTED0: error: unsupported combination: -header-include-format=textual and -header-include-filtering=only-direct-system // UNSUPPORTED1: error: unsupported combination: -header-include-format=json and -header-include-filtering=none diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c index 4090f3de3075d..71d8453cdd655 100644 --- a/clang/test/Preprocessor/riscv-target-features.c +++ b/clang/test/Preprocessor/riscv-target-features.c @@ -21,6 +21,8 @@ // CHECK-NOT: __riscv_mul {{.*$}} // CHECK-NOT: __riscv_muldiv {{.*$}} // CHECK-NOT: __riscv_q {{.*$}} +// CHECK-NOT: __riscv_sdext{{.*$}} +// CHECK-NOT: __riscv_sdtrig{{.*$}} // CHECK-NOT: __riscv_sha {{.*$}} // CHECK-NOT: __riscv_shcounterenw {{.*$}} // CHECK-NOT: __riscv_shgatpa {{.*$}} @@ -33,8 +35,11 @@ // CHECK-NOT: __riscv_smcdeleg {{.*$}} // CHECK-NOT: __riscv_smcntrpmf {{.*$}} // CHECK-NOT: __riscv_smcsrind {{.*$}} +// CHECK-NOT: __riscv_smctr{{.*$}} // CHECK-NOT: __riscv_smdbltrp {{.*$}} // CHECK-NOT: __riscv_smepmp {{.*$}} +// CHECK-NOT: __riscv_smmpm{{.*$}} +// CHECK-NOT: __riscv_smnpm{{.*$}} // CHECK-NOT: __riscv_smrnmi {{.*$}} // CHECK-NOT: __riscv_smstateen {{.*$}} // CHECK-NOT: __riscv_ssaia {{.*$}} @@ -43,7 +48,10 @@ // CHECK-NOT: __riscv_sscofpmf {{.*$}} // CHECK-NOT: __riscv_sscounterenw {{.*$}} // CHECK-NOT: __riscv_sscsrind {{.*$}} +// CHECK-NOT: __riscv_ssctr{{.*$}} // CHECK-NOT: __riscv_ssdbltrp {{.*$}} +// CHECK-NOT: __riscv_ssnpm{{.*$}} +// CHECK-NOT: __riscv_sspm{{.*$}} // CHECK-NOT: __riscv_ssqosid{{.*$}} // CHECK-NOT: __riscv_ssstateen {{.*$}} // CHECK-NOT: __riscv_ssstrict {{.*$}} @@ -51,6 +59,7 @@ // CHECK-NOT: __riscv_sstvala {{.*$}} // CHECK-NOT: __riscv_sstvecd {{.*$}} // CHECK-NOT: __riscv_ssu64xl {{.*$}} +// CHECK-NOT: __riscv_supm{{.*$}} // CHECK-NOT: __riscv_svade {{.*$}} // CHECK-NOT: __riscv_svadu {{.*$}} // CHECK-NOT: __riscv_svbare {{.*$}} @@ -91,6 +100,7 @@ // CHECK-NOT: __riscv_zcmt {{.*$}} // CHECK-NOT: __riscv_zdinx {{.*$}} // CHECK-NOT: __riscv_zfa {{.*$}} +// CHECK-NOT: __riscv_zfbfmin {{.*$}} // CHECK-NOT: __riscv_zfh {{.*$}} // CHECK-NOT: __riscv_zfhmin {{.*$}} // CHECK-NOT: __riscv_zfinx {{.*$}} @@ -126,6 +136,7 @@ // CHECK-NOT: __riscv_zksh {{.*$}} // CHECK-NOT: __riscv_zkt {{.*$}} // CHECK-NOT: __riscv_zmmul {{.*$}} +// CHECK-NOT: __riscv_ztso {{.*$}} // CHECK-NOT: __riscv_zvbb {{.*$}} // CHECK-NOT: __riscv_zvbc {{.*$}} // CHECK-NOT: __riscv_zve32f {{.*$}} @@ -133,6 +144,8 @@ // CHECK-NOT: __riscv_zve64d {{.*$}} // CHECK-NOT: __riscv_zve64f {{.*$}} // CHECK-NOT: __riscv_zve64x {{.*$}} +// CHECK-NOT: __riscv_zvfbfmin {{.*$}} +// CHECK-NOT: __riscv_zvfbfwma {{.*$}} // CHECK-NOT: __riscv_zvfh {{.*$}} // CHECK-NOT: __riscv_zvkb {{.*$}} // CHECK-NOT: __riscv_zvkg {{.*$}} @@ -163,25 +176,12 @@ // Experimental extensions -// CHECK-NOT: __riscv_sdext{{.*$}} -// CHECK-NOT: __riscv_sdtrig{{.*$}} -// CHECK-NOT: __riscv_smctr{{.*$}} -// CHECK-NOT: __riscv_smmpm{{.*$}} -// CHECK-NOT: __riscv_smnpm{{.*$}} -// CHECK-NOT: __riscv_ssctr{{.*$}} -// CHECK-NOT: __riscv_ssnpm{{.*$}} -// CHECK-NOT: __riscv_sspm{{.*$}} -// CHECK-NOT: __riscv_supm{{.*$}} // CHECK-NOT: __riscv_zalasr {{.*$}} -// CHECK-NOT: __riscv_zfbfmin {{.*$}} // CHECK-NOT: __riscv_zicfilp {{.*$}} // CHECK-NOT: __riscv_zicfiss {{.*$}} -// CHECK-NOT: __riscv_ztso {{.*$}} // CHECK-NOT: __riscv_zvbc32e {{.*$}} // CHECK-NOT: __riscv_zvfbfa {{.*$}} // CHECK-NOT: __riscv_zvfofp8min {{.*$}} -// CHECK-NOT: __riscv_zvfbfmin {{.*$}} -// CHECK-NOT: __riscv_zvfbfwma {{.*$}} // CHECK-NOT: __riscv_zvkgs {{.*$}} // CHECK-NOT: __riscv_zvqdotq {{.*$}} diff --git a/clang/test/Sema/attr-format.c b/clang/test/Sema/attr-format.c index 5b9e4d02bbaf9..820abd8ec527e 100644 --- a/clang/test/Sema/attr-format.c +++ b/clang/test/Sema/attr-format.c @@ -106,3 +106,11 @@ void b2(const char *a, ...) __attribute__((format(syslog, 1, 1))); // expecte void c2(const char *a, ...) __attribute__((format(syslog, 0, 2))); // expected-error {{'format' attribute parameter 2 is out of bounds}} void d2(const char *a, int c) __attribute__((format(syslog, 1, 2))); // expected-warning {{GCC requires a function with the 'format' attribute to be variadic}} void e2(char *str, int c, ...) __attribute__((format(syslog, 2, 3))); // expected-error {{format argument not a string type}} + +// gnu_printf +// same as format(printf(...))... +void a2(const char *a, ...) __attribute__((format(gnu_printf, 1, 2))); // no-error +void b2(const char *a, ...) __attribute__((format(gnu_printf, 1, 1))); // expected-error {{'format' attribute parameter 3 is out of bounds}} +void c2(const char *a, ...) __attribute__((format(gnu_printf, 0, 2))); // expected-error {{'format' attribute parameter 2 is out of bounds}} +void d2(const char *a, int c) __attribute__((format(gnu_printf, 1, 2))); // expected-warning {{GCC requires a function with the 'format' attribute to be variadic}} +void e2(char *str, int c, ...) __attribute__((format(gnu_printf, 2, 3))); // expected-error {{format argument not a string type}} diff --git a/clang/test/Sema/builtin-masked.c b/clang/test/Sema/builtin-masked.c index eb0070b0276af..af555785aca7a 100644 --- a/clang/test/Sema/builtin-masked.c +++ b/clang/test/Sema/builtin-masked.c @@ -5,44 +5,34 @@ typedef _Bool v8b __attribute__((ext_vector_type(8))); typedef _Bool v2b __attribute__((ext_vector_type(2))); typedef float v8f __attribute__((ext_vector_type(8))); -void test_masked_load(v8i *pf, v8b mask, v2b mask2, v2b thru) { +void test_masked_load(int *pf, v8b mask, v2b mask2, v2b thru) { (void)__builtin_masked_load(mask); // expected-error {{too few arguments to function call, expected 2, have 1}} (void)__builtin_masked_load(mask, pf, pf, pf); // expected-error {{too many arguments to function call, expected at most 3, have 4}} - (void)__builtin_masked_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_load' must have the same number of elements}} - (void)__builtin_masked_load(mask, mask); // expected-error {{2nd argument must be a pointer to vector}} - (void)__builtin_masked_load(mask, (void *)0); // expected-error {{2nd argument must be a pointer to vector}} - (void)__builtin_masked_load(mask2, pf, thru); // expected-error {{3rd argument must be a 'v8i' (vector of 8 'int' values)}} - (void)__builtin_masked_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_load' must have the same number of elements}} + (void)__builtin_masked_load(mask, mask); // expected-error {{2nd argument must be a scalar pointer}} + (void)__builtin_masked_load(mask2, pf, thru); // expected-error {{3rd argument must be a 'int __attribute__((ext_vector_type(2)))' (vector of 2 'int' values)}} } -void test_masked_store(v8i *pf, v8f *pf2, v8b mask, v2b mask2) { +void test_masked_store(int *pf, v8f *pf2, v8b mask, v2b mask2) { __builtin_masked_store(mask); // expected-error {{too few arguments to function call, expected 3, have 1}} __builtin_masked_store(mask, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}} __builtin_masked_store(0, 0, pf); // expected-error {{1st argument must be a vector of boolean types (was 'int')}} __builtin_masked_store(mask, 0, pf); // expected-error {{2nd argument must be a vector}} - __builtin_masked_store(mask, *pf, 0); // expected-error {{3rd argument must be a pointer to vector}} - __builtin_masked_store(mask2, *pf, pf); // expected-error {{all arguments to '__builtin_masked_store' must have the same number of elements}} - __builtin_masked_store(mask, *pf, pf2); // expected-error {{last two arguments to '__builtin_masked_store' must have the same type}} + __builtin_masked_store(mask, *pf, 0); // expected-error {{3rd argument must be a scalar pointer}} } -void test_masked_expand_load(v8i *pf, v8b mask, v2b mask2, v2b thru) { +void test_masked_expand_load(int *pf, v8b mask, v2b mask2, v2b thru) { (void)__builtin_masked_expand_load(mask); // expected-error {{too few arguments to function call, expected 2, have 1}} (void)__builtin_masked_expand_load(mask, pf, pf, pf); // expected-error {{too many arguments to function call, expected at most 3, have 4}} - (void)__builtin_masked_expand_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_expand_load' must have the same number of elements}} - (void)__builtin_masked_expand_load(mask, mask); // expected-error {{2nd argument must be a pointer to vector}} - (void)__builtin_masked_expand_load(mask, (void *)0); // expected-error {{2nd argument must be a pointer to vector}} - (void)__builtin_masked_expand_load(mask2, pf, thru); // expected-error {{3rd argument must be a 'v8i' (vector of 8 'int' values)}} - (void)__builtin_masked_expand_load(mask2, pf); // expected-error {{all arguments to '__builtin_masked_expand_load' must have the same number of elements}} + (void)__builtin_masked_expand_load(mask, mask); // expected-error {{2nd argument must be a scalar pointer}} + (void)__builtin_masked_expand_load(mask2, pf, thru); // expected-error {{3rd argument must be a 'int __attribute__((ext_vector_type(2)))' (vector of 2 'int' values)}} } -void test_masked_compress_store(v8i *pf, v8f *pf2, v8b mask, v2b mask2) { +void test_masked_compress_store(int *pf, v8f *pf2, v8b mask, v2b mask2) { __builtin_masked_compress_store(mask); // expected-error {{too few arguments to function call, expected 3, have 1}} __builtin_masked_compress_store(mask, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}} __builtin_masked_compress_store(0, 0, pf); // expected-error {{1st argument must be a vector of boolean types (was 'int')}} __builtin_masked_compress_store(mask, 0, pf); // expected-error {{2nd argument must be a vector}} - __builtin_masked_compress_store(mask, *pf, 0); // expected-error {{3rd argument must be a pointer to vector}} - __builtin_masked_compress_store(mask2, *pf, pf); // expected-error {{all arguments to '__builtin_masked_compress_store' must have the same number of elements}} - __builtin_masked_compress_store(mask, *pf, pf2); // expected-error {{last two arguments to '__builtin_masked_compress_store' must have the same type}} + __builtin_masked_compress_store(mask, *pf, 0); // expected-error {{3rd argument must be a scalar pointer}} } void test_masked_gather(int *p, v8i idx, v8b mask, v2b mask2, v2b thru) { @@ -61,6 +51,53 @@ void test_masked_scatter(int *p, v8i idx, v8b mask, v2b mask2, v8i val) { __builtin_masked_scatter(p, p, p, p); // expected-error {{1st argument must be a vector of boolean types (was 'int *')}} __builtin_masked_scatter(mask, p, p, p); // expected-error {{2nd argument must be a vector of integer types (was 'int *')}} __builtin_masked_scatter(mask, idx, mask, p); // expected-error {{last two arguments to '__builtin_masked_scatter' must have the same type}} - __builtin_masked_scatter(mask, idx, val, idx); // expected-error {{3rd argument must be a scalar pointer}} - __builtin_masked_scatter(mask, idx, val, &idx); // expected-error {{3rd argument must be a scalar pointer}} + __builtin_masked_scatter(mask, idx, val, idx); // expected-error {{4th argument must be a scalar pointer}} + __builtin_masked_scatter(mask, idx, val, &idx); // expected-error {{4th argument must be a scalar pointer}} +} + +void a(v8b mask, v8i v, const int *ptr) { + __builtin_masked_load(mask, ptr, v); + (void)__builtin_masked_load(mask, (volatile int *)ptr, v); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} +} + +void b(v8b mask, v8i idx, const int *ptr) { + (void)__builtin_masked_gather(mask, idx, ptr); + (void)__builtin_masked_gather(mask, idx, (volatile int *)ptr); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} +} + +void c(v8b mask, const v8i v, int *ptr) { + __builtin_masked_store(mask, v, ptr); +} + +void readonly(v8b mask, v8i v, const int *ptr, const int *s) { + (void)__builtin_masked_store(mask, v, ptr); // expected-error {{sending 'const int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('const int *' vs 'int *')}} + (void)__builtin_masked_compress_store(mask, v, ptr); // expected-error {{sending 'const int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('const int *' vs 'int *')}} + (void)__builtin_masked_scatter(mask, v, v, s); // expected-error {{sending 'const int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('const int *' vs 'int *')}} +} + +void vol(v8b mask, v8i v, volatile int *ptr, volatile int *s) { + (void)__builtin_masked_load(mask, ptr); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} + (void)__builtin_masked_store(mask, v, ptr); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} + (void)__builtin_masked_expand_load(mask, ptr); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} + (void)__builtin_masked_compress_store(mask, v, ptr); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} + (void)__builtin_masked_gather(mask, v, ptr);// expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} + (void)__builtin_masked_scatter(mask, v, v, s); // expected-error {{sending 'volatile int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('volatile int *' vs 'int *')}} +} + +void as(v8b mask, int [[clang::address_space(999)]] * ptr, v8i v) { + (void)__builtin_masked_load(mask, ptr); + (void)__builtin_masked_store(mask, v, ptr); + (void)__builtin_masked_expand_load(mask, ptr); // expected-error {{sending '__attribute__((address_space(999))) int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('__attribute__((address_space(999))) int *' vs 'int *')}} + (void)__builtin_masked_compress_store(mask, v, ptr); // expected-error {{sending '__attribute__((address_space(999))) int *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('__attribute__((address_space(999))) int *' vs 'int *')}} + (void)__builtin_masked_gather(mask, v, ptr); + (void)__builtin_masked_scatter(mask, v, v, ptr); +} + +void atom(v8b mask, _Atomic int * ptr, v8i v) { + (void)__builtin_masked_load(mask, ptr); // expected-error {{'_Atomic(int) *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('_Atomic(int) *' vs 'int *')}} + (void)__builtin_masked_store(mask, v, ptr); // expected-error {{'_Atomic(int) *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('_Atomic(int) *' vs 'int *')}} + (void)__builtin_masked_expand_load(mask, ptr); // expected-error {{'_Atomic(int) *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('_Atomic(int) *' vs 'int *')}} + (void)__builtin_masked_compress_store(mask, v, ptr); // expected-error {{'_Atomic(int) *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('_Atomic(int) *' vs 'int *')}} + (void)__builtin_masked_gather(mask, v, ptr); // expected-error {{'_Atomic(int) *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('_Atomic(int) *' vs 'int *')}} + (void)__builtin_masked_scatter(mask, v, v, ptr); // expected-error {{'_Atomic(int) *' to parameter of incompatible type 'int *': type mismatch at 2nd parameter ('_Atomic(int) *' vs 'int *')}} } diff --git a/clang/test/Sema/builtin-masked.cpp b/clang/test/Sema/builtin-masked.cpp new file mode 100644 index 0000000000000..748c89939c918 --- /dev/null +++ b/clang/test/Sema/builtin-masked.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s + +using v8i = int [[clang::ext_vector_type(8)]]; +using v8b = bool [[clang::ext_vector_type(8)]]; + +template +static void load(v8b mask, V value, const T *ptr) { + (void)__builtin_masked_load(mask, ptr, value); // expected-error {{2nd argument must be a scalar pointer}} + (void)__builtin_masked_expand_load(mask, ptr, value); // expected-error {{2nd argument must be a scalar pointer}} + (void)__builtin_masked_gather(mask, value, ptr); // expected-error {{3rd argument must be a scalar pointer}} +} + +template +static void store(v8b mask, V value, T *ptr) { + (void)__builtin_masked_store(mask, value, ptr); // expected-error {{3rd argument must be a scalar pointer}} + (void)__builtin_masked_compress_store(mask, value, ptr); // expected-error {{3rd argument must be a scalar pointer}} + (void)__builtin_masked_scatter(mask, value, value, ptr); // expected-error {{4th argument must be a scalar pointer}} +} + +void test_masked(v8b mask, v8i v, int *ptr) { + load(mask, v, ptr); + store(mask, v, ptr); + load(mask, v, &v); // expected-note {{in instantiation of function template specialization 'load' requested here}} + store(mask, v, &v); // expected-note {{in instantiation of function template specialization 'store' requested here}} +} diff --git a/clang/test/Sema/const-eval.c b/clang/test/Sema/const-eval.c index 11cc7fbc0feb3..53face901d75e 100644 --- a/clang/test/Sema/const-eval.c +++ b/clang/test/Sema/const-eval.c @@ -138,7 +138,7 @@ EVAL_EXPR(52, &pr24622 == (void *)&PR24622); // We evaluate these by providing 2s' complement semantics in constant // expressions, like we do for integers. -void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // expected-warning {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}} +void *PR28739a = (__int128)(unsigned long)-1 + &PR28739a; // expected-warning {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}} void *PR28739b = &PR28739b + (__int128)(unsigned long)-1; // expected-warning {{refers past the last possible element}} __int128 PR28739c = (&PR28739c + (__int128)(unsigned long)-1) - &PR28739c; // expected-warning {{refers past the last possible element}} void *PR28739d = &(&PR28739d)[(__int128)(unsigned long)-1]; // expected-warning {{refers past the last possible element}} diff --git a/clang/test/Sema/format-strings-scanf.c b/clang/test/Sema/format-strings-scanf.c index d1f694f3595cf..22c1cce2f989b 100644 --- a/clang/test/Sema/format-strings-scanf.c +++ b/clang/test/Sema/format-strings-scanf.c @@ -30,6 +30,7 @@ int fscanf(FILE * restrict, const char * restrict, ...) ; int scanf(const char * restrict, ...) ; int sscanf(const char * restrict, const char * restrict, ...) ; int my_scanf(const char * restrict, ...) __attribute__((__format__(__scanf__, 1, 2))); +int my_gnu_scanf(const char * restrict, ...) __attribute__((__format__(gnu_scanf, 1, 2))); int vscanf(const char * restrict, va_list); int vfscanf(FILE * restrict, const char * restrict, va_list); @@ -98,6 +99,7 @@ void test_variants(int *i, const char *s, ...) { fscanf(f, "%ld", i); // expected-warning{{format specifies type 'long *' but the argument has type 'int *'}} sscanf(buf, "%ld", i); // expected-warning{{format specifies type 'long *' but the argument has type 'int *'}} my_scanf("%ld", i); // expected-warning{{format specifies type 'long *' but the argument has type 'int *'}} + my_gnu_scanf("%ld", i); // expected-warning{{format specifies type 'long *' but the argument has type 'int *'}} va_list ap; va_start(ap, s); diff --git a/clang/test/Sema/format-strings.c b/clang/test/Sema/format-strings.c index 4bff30c313c8f..103dd8ab5a85c 100644 --- a/clang/test/Sema/format-strings.c +++ b/clang/test/Sema/format-strings.c @@ -678,15 +678,21 @@ void pr18905(void) { } void __attribute__((format(strfmon,1,2))) monformat(const char *fmt, ...); +void __attribute__((format(gnu_strfmon,1,2))) gnu_monformat(const char *fmt, ...); void __attribute__((format(strftime,1,0))) dateformat(const char *fmt); +void __attribute__((format(gnu_strftime,1,0))) gnu_dateformat(const char *fmt); // Other formats void test_other_formats(void) { char *str = ""; monformat("", 1); // expected-warning{{format string is empty}} monformat(str); // expected-warning{{format string is not a string literal (potentially insecure)}} + gnu_monformat("", 1); // expected-warning{{format string is empty}} + gnu_monformat(str); // expected-warning{{format string is not a string literal (potentially insecure)}} dateformat(""); // expected-warning{{format string is empty}} dateformat(str); // no-warning (using strftime non-literal is not unsafe) + gnu_dateformat(""); // expected-warning{{format string is empty}} + gnu_dateformat(str); // no-warning (using strftime non-literal is not unsafe) } // Do not warn about unused arguments coming from system headers. diff --git a/clang/test/Sema/integer-overflow.c b/clang/test/Sema/integer-overflow.c index 30a47aa5f6ad6..ba943f0927a22 100644 --- a/clang/test/Sema/integer-overflow.c +++ b/clang/test/Sema/integer-overflow.c @@ -143,7 +143,7 @@ uint64_t check_integer_overflows(int i) { (__imag__ x) = 4608 * 1024 * 1024; // expected-warning@+4 {{overflow in expression; result is 536'870'912 with type 'int'}} -// expected-warning@+3 {{array index 536870912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} +// expected-warning@+3 {{array index 536'870'912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} // expected-note@+1 {{array 'a' declared here}} uint64_t a[10]; a[4608 * 1024 * 1024] = 1i; diff --git a/clang/test/Sema/ppc-dmf-types.c b/clang/test/Sema/ppc-dmf-types.c index b3da72df25081..88926acf2d3fb 100644 --- a/clang/test/Sema/ppc-dmf-types.c +++ b/clang/test/Sema/ppc-dmf-types.c @@ -12,47 +12,86 @@ // typedef typedef __dmr1024 dmr_t; +typedef __dmr2048 dmrp_t; // function argument -void testDmrArg1(__dmr1024 vdmr, int *ptr) { // expected-error {{invalid use of PPC MMA type}} - __dmr1024 *vdmrp = (__dmr1024 *)ptr; +void testDmrArg1(dmr_t vdmr, int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmr_t *vdmrp = (dmr_t *)ptr; *vdmrp = vdmr; } -void testDmrArg2(const __dmr1024 vdmr, int *ptr) { // expected-error {{invalid use of PPC MMA type}} - __dmr1024 *vdmrp = (__dmr1024 *)ptr; +void testDmrArg2(const dmr_t vdmr, int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmr_t *vdmrp = (dmr_t *)ptr; *vdmrp = vdmr; } void testDmrArg3(const dmr_t vdmr, int *ptr) { // expected-error {{invalid use of PPC MMA type}} - __dmr1024 *vdmrp = (__dmr1024 *)ptr; + dmr_t *vdmrp = (dmr_t *)ptr; *vdmrp = vdmr; } +void testDmrPArg1(const dmrp_t vdmrp, int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmrp_t *vdmrpp = (dmrp_t *)ptr; + *vdmrpp = vdmrp; +} + +void testDmrPArg2(const dmrp_t vdmrp, int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmrp_t *vdmrpp = (dmrp_t *)ptr; + *vdmrpp = vdmrp; +} + +void testDmrPArg3(const dmrp_t vdmrp, int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmrp_t *vdmrpp = (dmrp_t *)ptr; + *vdmrpp = vdmrp; +} + // function return -__dmr1024 testDmrRet1(int *ptr) { // expected-error {{invalid use of PPC MMA type}} - __dmr1024 *vdmrp = (__dmr1024 *)ptr; +dmr_t testDmrRet1(int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmr_t *vdmrp = (dmr_t *)ptr; return *vdmrp; // expected-error {{invalid use of PPC MMA type}} } const dmr_t testDmrRet4(int *ptr) { // expected-error {{invalid use of PPC MMA type}} - __dmr1024 *vdmrp = (__dmr1024 *)ptr; + dmr_t *vdmrp = (dmr_t *)ptr; return *vdmrp; // expected-error {{invalid use of PPC MMA type}} } +dmrp_t testDmrPRet1(int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmrp_t *vdmrpp = (dmrp_t *)ptr; + return *vdmrpp; // expected-error {{invalid use of PPC MMA type}} +} + +const dmrp_t testDmrPRet4(int *ptr) { // expected-error {{invalid use of PPC MMA type}} + dmrp_t *vdmrpp = (dmrp_t *)ptr; + return *vdmrpp; // expected-error {{invalid use of PPC MMA type}} +} + // global -__dmr1024 globalvdmr; // expected-error {{invalid use of PPC MMA type}} -const __dmr1024 globalvdmr2; // expected-error {{invalid use of PPC MMA type}} -__dmr1024 *globalvdmrp; -const __dmr1024 *const globalvdmrp2; +dmr_t globalvdmr; // expected-error {{invalid use of PPC MMA type}} +const dmr_t globalvdmr2; // expected-error {{invalid use of PPC MMA type}} +dmr_t *globalvdmrp; +const dmr_t *const globalvdmrp2; dmr_t globalvdmr_t; // expected-error {{invalid use of PPC MMA type}} +dmrp_t globalvdmrp; // expected-error {{invalid use of PPC MMA type}} +const dmrp_t globalvdmrp2; // expected-error {{invalid use of PPC MMA type}} +dmrp_t *globalvdmrpp; +const dmrp_t *const globalvdmrpp2; +dmrp_t globalvdmrp_t; // expected-error {{invalid use of PPC MMA type}} + // struct field struct TestDmrStruct { int a; float b; - __dmr1024 c; // expected-error {{invalid use of PPC MMA type}} - __dmr1024 *vq; + dmr_t c; // expected-error {{invalid use of PPC MMA type}} + dmr_t *vq; +}; + +struct TestDmrPStruct { + int a; + float b; + dmrp_t c; // expected-error {{invalid use of PPC MMA type}} + dmrp_t *vq; }; // operators @@ -101,3 +140,50 @@ void testDmrOperators4(int v, void *ptr) { __dmr1024 vdmr1 = (__dmr1024)v; // expected-error {{used type '__dmr1024' where arithmetic or pointer type is required}} __dmr1024 vdmr2 = (__dmr1024)vdmrp; // expected-error {{used type '__dmr1024' where arithmetic or pointer type is required}} } + +int testDmrPOperators1(int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + __dmr2048 vdmrp1 = *(vdmrpp + 0); + __dmr2048 vdmrp2 = *(vdmrpp + 1); + __dmr2048 vdmrp3 = *(vdmrpp + 2); + if (vdmrp1) // expected-error {{statement requires expression of scalar type ('__dmr2048' invalid)}} + *(vdmrpp + 10) = vdmrp1; + if (!vdmrp2) // expected-error {{invalid argument type '__dmr2048' to unary expression}} + *(vdmrpp + 11) = vdmrp3; + int c1 = vdmrp1 && vdmrp2; // expected-error {{invalid operands to binary expression ('__dmr2048' and '__dmr2048')}} + int c2 = vdmrp2 == vdmrp3; // expected-error {{invalid operands to binary expression ('__dmr2048' and '__dmr2048')}} + int c3 = vdmrp2 < vdmrp1; // expected-error {{invalid operands to binary expression ('__dmr2048' and '__dmr2048')}} + return c1 || c2 || c3; +} + +void testDmrPOperators2(int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + __dmr2048 vdmrp1 = *(vdmrpp + 0); + __dmr2048 vdmrp2 = *(vdmrpp + 1); + __dmr2048 vdmrp3 = *(vdmrpp + 2); + vdmrp1 = -vdmrp1; // expected-error {{invalid argument type '__dmr2048' to unary expression}} + vdmrp2 = vdmrp1 + vdmrp3; // expected-error {{invalid operands to binary expression ('__dmr2048' and '__dmr2048')}} + vdmrp2 = vdmrp2 * vdmrp3; // expected-error {{invalid operands to binary expression ('__dmr2048' and '__dmr2048')}} + vdmrp3 = vdmrp3 | vdmrp3; // expected-error {{invalid operands to binary expression ('__dmr2048' and '__dmr2048')}} + vdmrp3 = vdmrp3 << 2; // expected-error {{invalid operands to binary expression ('__dmr2048' and 'int')}} + *(vdmrpp + 10) = vdmrp1; + *(vdmrpp + 11) = vdmrp2; + *(vdmrpp + 12) = vdmrp3; +} + + +vector unsigned char testDmrPOperators3(int *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + __dmr2048 vdmrp1 = *(vdmrpp + 0); + __dmr2048 vdmrp2 = *(vdmrpp + 1); + __dmr2048 vdmrp3 = *(vdmrpp + 2); + vdmrp1 ? *(vdmrpp + 10) = vdmrp2 : *(vdmrpp + 11) = vdmrp3; // expected-error {{used type '__dmr2048' where arithmetic or pointer type is required}} + vdmrp2 = vdmrp3; + return vdmrp2[1]; // expected-error {{subscripted value is not an array, pointer, or vector}} +} + +void testDmrPOperators4(int v, void *ptr) { + __dmr2048 *vdmrpp = (__dmr2048 *)ptr; + __dmr2048 vdmrp1 = (__dmr2048)v; // expected-error {{used type '__dmr2048' where arithmetic or pointer type is required}} + __dmr2048 vdmrp2 = (__dmr2048)vdmrpp; // expected-error {{used type '__dmr2048' where arithmetic or pointer type is required}} +} diff --git a/clang/test/Sema/unbounded-array-bounds.c b/clang/test/Sema/unbounded-array-bounds.c index b22261a3eaeb5..909286b283852 100644 --- a/clang/test/Sema/unbounded-array-bounds.c +++ b/clang/test/Sema/unbounded-array-bounds.c @@ -14,11 +14,11 @@ struct S s[]; // expected-warning {{tentative array definition}} expected-note { void f1(void) { ++s[3].a; ++s[7073650413200313099].b; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}} - // addr64-warning@-3 {{array index 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}} + // addr64-warning@-3 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}} ++s[7073650].c; - // addr16-warning@-1 {{array index 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} + // addr16-warning@-1 {{array index 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} } long long ll[]; // expected-warning {{tentative array definition}} expected-note {{declared here}} addr16-note {{declared here}} addr32-note {{declared here}} @@ -26,32 +26,32 @@ long long ll[]; // expected-warning {{tentative array definition}} expected-note void f2(void) { ++ll[3]; ++ll[2705843009213693952]; - // addr16-warning@-1 {{array index 2705843009213693952 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8192 elements)}} - // addr32-warning@-2 {{array index 2705843009213693952 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536870912 elements)}} - // addr64-warning@-3 {{array index 2705843009213693952 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2305843009213693952 elements)}} + // addr16-warning@-1 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8'192 elements)}} + // addr32-warning@-2 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536'870'912 elements)}} + // addr64-warning@-3 {{array index 2'705'843'009'213'693'952 refers past the last possible element for an array in 64-bit address space containing 64-bit (8-byte) elements (max possible 2'305'843'009'213'693'952 elements)}} ++ll[847073650]; - // addr16-warning@-1 {{array index 847073650 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8192 elements)}} - // addr32-warning@-2 {{array index 847073650 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536870912 elements)}} + // addr16-warning@-1 {{array index 847'073'650 refers past the last possible element for an array in 16-bit address space containing 64-bit (8-byte) elements (max possible 8'192 elements)}} + // addr32-warning@-2 {{array index 847'073'650 refers past the last possible element for an array in 32-bit address space containing 64-bit (8-byte) elements (max possible 536'870'912 elements)}} } void f3(struct S p[]) { // expected-note {{declared here}} addr16-note {{declared here}} ++p[3].a; ++p[7073650413200313099].b; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}} - // addr64-warning@-3 {{array index 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}} + // addr64-warning@-3 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}} ++p[7073650].c; - // addr16-warning@-1 {{array index 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} + // addr16-warning@-1 {{array index 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} } void f4(struct S *p) { // expected-note {{declared here}} addr16-note {{declared here}} p += 3; p += 7073650413200313099; - // addr16-warning@-1 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} - // addr32-warning@-2 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178956970 elements)}} - // addr64-warning@-3 {{the pointer incremented by 7073650413200313099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576460752303423488 elements)}} + // addr16-warning@-1 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} + // addr32-warning@-2 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 192-bit (24-byte) elements (max possible 178'956'970 elements)}} + // addr64-warning@-3 {{the pointer incremented by 7'073'650'413'200'313'099 refers past the last possible element for an array in 64-bit address space containing 256-bit (32-byte) elements (max possible 576'460'752'303'423'488 elements)}} p += 7073650; - // addr16-warning@-1 {{the pointer incremented by 7073650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3449 elements)}} + // addr16-warning@-1 {{the pointer incremented by 7'073'650 refers past the last possible element for an array in 16-bit address space containing 152-bit (19-byte) elements (max possible 3'449 elements)}} } struct BQ { @@ -63,7 +63,7 @@ struct BQ bq[]; // expected-warning {{tentative array definition}} addr16-note { void f5(void) { ++bq[0].bigblock[0].a; ++bq[1].bigblock[0].a; - // addr16-warning@-1 {{array index 1 refers past the last possible element for an array in 16-bit address space containing 497952-bit (62244-byte) elements (max possible 1 element)}} + // addr16-warning@-1 {{array index 1 refers past the last possible element for an array in 16-bit address space containing 497952-bit (62'244-byte) elements (max possible 1 element)}} } void f6(void) { @@ -102,15 +102,15 @@ struct { void fam_ily() { ++fam.tail[7073650413200313099]; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}} // No warning for addr64 because the array index is inbound in that case. ++fam0.tail[7073650413200313099]; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}} // No warning for addr64 because the array index is inbound in that case. ++fam1.tail[7073650413200313099]; - // addr16-warning@-1 {{array index 7073650413200313099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65536 elements)}} - // addr32-warning@-2 {{array index 7073650413200313099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4294967296 elements)}} + // addr16-warning@-1 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 16-bit address space containing 8-bit (1-byte) elements (max possible 65'536 elements)}} + // addr32-warning@-2 {{array index 7'073'650'413'200'313'099 refers past the last possible element for an array in 32-bit address space containing 8-bit (1-byte) elements (max possible 4'294'967'296 elements)}} // No warning for addr64 because the array index is inbound in that case. } diff --git a/clang/test/Sema/warn-double-promotion.c b/clang/test/Sema/warn-double-promotion.c index 5742a4fb3cbd4..7b06658bf4cdf 100644 --- a/clang/test/Sema/warn-double-promotion.c +++ b/clang/test/Sema/warn-double-promotion.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin -verify -fsyntax-only %s -Wdouble-promotion +// RUN: %clang_cc1 -triple x86_64-apple-darwin -verify -fsyntax-only -x c++ %s -Wdouble-promotion float ReturnFloatFromDouble(double d) { return d; @@ -24,15 +25,39 @@ long double ReturnLongDoubleFromDouble(double d) { return d; //expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} } +double ReturnDoubleFromFloatWithExplicitCast(float f) { + return (double)f; +} + +long double ReturnLongDoubleFromFloatWithExplicitCast(float f) { + return (long double)f; +} + +long double ReturnLongDoubleFromDoubleWithExplicitCast(double d) { + return (long double)d; +} + void Assignment(float f, double d, long double ld) { d = f; //expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} ld = f; //expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} ld = d; //expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + d = (double)f; + ld = (long double)f; + ld = (long double)d; f = d; f = ld; d = ld; } +void AssignmentWithExtraParens(float f, double d, long double ld) { + d = (f); //expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} + ld = (f); //expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} + ld = (d); //expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + d = (double)(f); + ld = (long double)(f); + ld = (long double)(d); +} + extern void DoubleParameter(double); extern void LongDoubleParameter(long double); @@ -40,6 +65,9 @@ void ArgumentPassing(float f, double d) { DoubleParameter(f); // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} LongDoubleParameter(f); // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} LongDoubleParameter(d); // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + DoubleParameter((double)f); + LongDoubleParameter((long double)f); + LongDoubleParameter((long double)d); } void BinaryOperator(float f, double d, long double ld) { @@ -49,12 +77,21 @@ void BinaryOperator(float f, double d, long double ld) { f = ld * f; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} d = d * ld; // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} d = ld * d; // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + f = (double)f * d; + f = d * (double)f; + f = (long double)f * ld; + f = ld * (long double)f; + d = (long double)d * ld; + d = ld * (long double)d; } void MultiplicationAssignment(float f, double d, long double ld) { d *= f; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} ld *= f; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} ld *= d; // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + d *= (double)f; + ld *= (long double)f; + ld *= (long double)d; // FIXME: These cases should produce warnings as above. f *= d; diff --git a/clang/test/Sema/warn-double-promotion.cpp b/clang/test/Sema/warn-double-promotion.cpp new file mode 100644 index 0000000000000..886911244fbd7 --- /dev/null +++ b/clang/test/Sema/warn-double-promotion.cpp @@ -0,0 +1,256 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin -verify -fsyntax-only %s -Wdouble-promotion + +using LongDouble = long double; + +double ReturnDoubleFromFloatWithExplicitCast(float f) { + return static_cast(f); +} + +long double ReturnLongDoubleFromFloatWithExplicitCast(float f) { + return static_cast(f); +} + +long double ReturnLongDoubleFromDoubleWithExplicitCast(double d) { + return static_cast(d); +} + +double ReturnDoubleFromFloatWithExplicitListInitialization(float f) { + return double{f}; +} + +long double ReturnLongDoubleFromFloatWithExplicitListInitialization(float f) { + return LongDouble{f}; +} + +long double ReturnLongDoubleFromDoubleWithExplicitListInitialization(double d) { + return LongDouble{d}; +} + +double ReturnDoubleFromFloatWithFunctionStyleCast(float f) { + return double(f); +} + +long double ReturnLongDoubleFromFloatWithFunctionStyleCast(float f) { + return LongDouble(f); +} + +long double ReturnLongDoubleFromDoubleWithFunctionStyleCast(double d) { + return LongDouble(d); +} + +void InitializationWithParens(float f, double d) { + { + double d(f); // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} + long double ld0(f); // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} + long double ld1(d); // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + } + { + double d(static_cast(f)); + long double ld0(static_cast(f)); + long double ld1(static_cast(d)); + } + { + double d(double{f}); + long double ld0(LongDouble{f}); + long double ld1(LongDouble{d}); + } + { + double d((double(f))); + long double ld0((LongDouble(f))); + long double ld1((LongDouble(d))); + } +} + +void InitializationWithBraces(float f, double d) { + { + double d{f}; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} + long double ld0{f}; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} + long double ld1{d}; // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + } + { + double d{static_cast(f)}; + long double ld0{static_cast(f)}; + long double ld1{static_cast(d)}; + } + { + double d{double{f}}; + long double ld0{LongDouble{f}}; + long double ld1{LongDouble{d}}; + } + { + double d{double(f)}; + long double ld0{LongDouble(f)}; + long double ld1{LongDouble(d)}; + } +} + +void Assignment(float f, double d, long double ld) { + d = static_cast(f); + ld = static_cast(f); + ld = static_cast(d); + d = double{f}; + ld = LongDouble{f}; + ld = LongDouble{d}; + d = double(f); + ld = LongDouble(f); + ld = LongDouble(d); +} + +void AssignmentWithExtraParens(float f, double d, long double ld) { + d = static_cast((f)); + ld = static_cast((f)); + ld = static_cast((d)); + d = double{(f)}; + ld = LongDouble{(f)}; + ld = LongDouble{(d)}; + d = double((f)); + ld = LongDouble((f)); + ld = LongDouble((d)); +} + +void AssignmentWithExtraBraces(float f, double d, long double ld) { + d = double{{f}}; // expected-warning{{too many braces around scalar initializer}} + ld = LongDouble{{f}}; // expected-warning{{too many braces around scalar initializer}} + ld = LongDouble{{d}}; // expected-warning{{too many braces around scalar initializer}} +} + +extern void DoubleParameter(double); +extern void LongDoubleParameter(long double); + +void ArgumentPassing(float f, double d) { + DoubleParameter(static_cast(f)); + LongDoubleParameter(static_cast(f)); + LongDoubleParameter(static_cast(d)); + DoubleParameter(double{f}); + LongDoubleParameter(LongDouble{f}); + LongDoubleParameter(LongDouble{d}); + DoubleParameter(double(f)); + LongDoubleParameter(LongDouble(f)); + LongDoubleParameter(LongDouble(d)); +} + +void BinaryOperator(float f, double d, long double ld) { + f = static_cast(f) * d; + f = d * static_cast(f); + f = static_cast(f) * ld; + f = ld * static_cast(f); + d = static_cast(d) * ld; + d = ld * static_cast(d); + f = double{f} * d; + f = d * double{f}; + f = LongDouble{f} * ld; + f = ld * LongDouble{f}; + d = LongDouble{d} * ld; + d = ld * LongDouble{d}; + f = double(f) * d; + f = d * double(f); + f = LongDouble(f) * ld; + f = ld * LongDouble(f); + d = LongDouble(d) * ld; + d = ld * LongDouble(d); +} + +void MultiplicationAssignment(float f, double d, long double ld) { + d *= static_cast(f); + ld *= static_cast(f); + ld *= static_cast(d); + d *= double{f}; + ld *= LongDouble{f}; + ld *= LongDouble{d}; + d *= double(f); + ld *= LongDouble(f); + ld *= LongDouble(d); +} + +struct ConstructWithDouble { + ConstructWithDouble(double); +}; + +struct ConstructWithLongDouble { + ConstructWithLongDouble(long double); +}; + +void Construct(float f, double d) { + ConstructWithDouble{f}; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} + ConstructWithLongDouble{f}; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} + ConstructWithLongDouble{d}; // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + ConstructWithDouble{static_cast(f)}; + ConstructWithLongDouble{static_cast(f)}; + ConstructWithLongDouble{static_cast(d)}; + ConstructWithDouble{double{f}}; + ConstructWithLongDouble{LongDouble{f}}; + ConstructWithLongDouble{LongDouble{d}}; + ConstructWithDouble{double(f)}; + ConstructWithLongDouble{LongDouble(f)}; + ConstructWithLongDouble{LongDouble(d)}; +} + +template T ReturnTFromFloat(float f) { + return f; // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} \ + // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} +} + +template T ReturnTFromDouble(double d) { + return d; // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} +} + +template T ReturnTFromFloatWithStaticCast(float f) { + return static_cast(f); +} + +template T ReturnTFromDoubleWithStaticCast(double d) { + return static_cast(d); +} + +template T ReturnTFromFloatWithExplicitListInitialization(float f) { + return T{f}; +} + +template T ReturnTFromDoubleWithExplicitListInitialization(double d) { + return T{d}; +} + +template T ReturnTFromFloatWithFunctionStyleCast(float f) { + return T(f); +} + +template T ReturnTFromDoubleWithFunctionStyleCast(double d) { + return T(d); +} + +void TestTemplate(float f, double d) { + ReturnTFromFloat(f); // expected-note{{in instantiation of function template specialization 'ReturnTFromFloat' requested here}} + ReturnTFromFloat(f); // expected-note{{in instantiation of function template specialization 'ReturnTFromFloat' requested here}} + ReturnTFromDouble(d); // expected-note{{in instantiation of function template specialization 'ReturnTFromDouble' requested here}} + ReturnTFromFloatWithStaticCast(f); + ReturnTFromFloatWithStaticCast(f); + ReturnTFromDoubleWithStaticCast(d); + ReturnTFromFloatWithExplicitListInitialization(f); + ReturnTFromFloatWithExplicitListInitialization(f); + ReturnTFromDoubleWithExplicitListInitialization(d); + ReturnTFromFloatWithFunctionStyleCast(f); + ReturnTFromFloatWithFunctionStyleCast(f); + ReturnTFromDoubleWithFunctionStyleCast(d); +} + +struct MemberInitializerListParens { + double m_d; + long double m_ld0; + long double m_ld1; + MemberInitializerListParens(float f, double d): + m_d(f), // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} + m_ld0(f), // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} + m_ld1(d) // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + {} +}; + +struct MemberInitializerListBraces { + double m_d; + long double m_ld0; + long double m_ld1; + MemberInitializerListBraces(float f, double d): + m_d{f}, // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'double'}} + m_ld0{f}, // expected-warning{{implicit conversion increases floating-point precision: 'float' to 'long double'}} + m_ld1{d} // expected-warning{{implicit conversion increases floating-point precision: 'double' to 'long double'}} + {} +}; diff --git a/clang/test/Sema/warn-lifetime-safety-dataflow.cpp b/clang/test/Sema/warn-lifetime-safety-dataflow.cpp index 7dac27506fb6b..31148b990d6bd 100644 --- a/clang/test/Sema/warn-lifetime-safety-dataflow.cpp +++ b/clang/test/Sema/warn-lifetime-safety-dataflow.cpp @@ -12,12 +12,12 @@ MyObj* return_local_addr() { MyObj x {10}; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_X:[0-9]+]] (Path: x), ToOrigin: [[O_DRE_X:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_X]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_X]] (Expr: DeclRefExpr)) MyObj* p = &x; -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_X]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_X]] (Expr: UnaryOperator)) return p; // CHECK: Use ([[O_P]] (Decl: p), Read) -// CHECK: AssignOrigin (Dest: [[O_RET_VAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p)) +// CHECK: OriginFlow (Dest: [[O_RET_VAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p)) // CHECK: ReturnOfOrigin ([[O_RET_VAL]] (Expr: ImplicitCastExpr)) // CHECK: Expire ([[L_X]] (Path: x)) } @@ -29,26 +29,26 @@ MyObj* assign_and_return_local_addr() { MyObj y{20}; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_Y:[0-9]+]] (Path: y), ToOrigin: [[O_DRE_Y:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_Y:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_Y]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_Y:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_Y]] (Expr: DeclRefExpr)) MyObj* ptr1 = &y; -// CHECK: AssignOrigin (Dest: [[O_PTR1:[0-9]+]] (Decl: ptr1), Src: [[O_ADDR_Y]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_PTR1:[0-9]+]] (Decl: ptr1), Src: [[O_ADDR_Y]] (Expr: UnaryOperator)) MyObj* ptr2 = ptr1; // CHECK: Use ([[O_PTR1]] (Decl: ptr1), Read) -// CHECK: AssignOrigin (Dest: [[O_PTR1_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR1]] (Decl: ptr1)) -// CHECK: AssignOrigin (Dest: [[O_PTR2:[0-9]+]] (Decl: ptr2), Src: [[O_PTR1_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_PTR1_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR1]] (Decl: ptr1)) +// CHECK: OriginFlow (Dest: [[O_PTR2:[0-9]+]] (Decl: ptr2), Src: [[O_PTR1_RVAL]] (Expr: ImplicitCastExpr)) ptr2 = ptr1; // CHECK: Use ([[O_PTR1]] (Decl: ptr1), Read) -// CHECK: AssignOrigin (Dest: [[O_PTR1_RVAL_2:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR1]] (Decl: ptr1)) +// CHECK: OriginFlow (Dest: [[O_PTR1_RVAL_2:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR1]] (Decl: ptr1)) // CHECK: Use ({{[0-9]+}} (Decl: ptr2), Write) -// CHECK: AssignOrigin (Dest: [[O_PTR2]] (Decl: ptr2), Src: [[O_PTR1_RVAL_2]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_PTR2]] (Decl: ptr2), Src: [[O_PTR1_RVAL_2]] (Expr: ImplicitCastExpr)) ptr2 = ptr2; // Self assignment. // CHECK: Use ([[O_PTR2]] (Decl: ptr2), Read) -// CHECK: AssignOrigin (Dest: [[O_PTR2_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR2]] (Decl: ptr2)) +// CHECK: OriginFlow (Dest: [[O_PTR2_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR2]] (Decl: ptr2)) // CHECK: Use ([[O_PTR2]] (Decl: ptr2), Write) -// CHECK: AssignOrigin (Dest: [[O_PTR2]] (Decl: ptr2), Src: [[O_PTR2_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_PTR2]] (Decl: ptr2), Src: [[O_PTR2_RVAL]] (Expr: ImplicitCastExpr)) return ptr2; // CHECK: Use ([[O_PTR2]] (Decl: ptr2), Read) -// CHECK: AssignOrigin (Dest: [[O_RET_VAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR2]] (Decl: ptr2)) +// CHECK: OriginFlow (Dest: [[O_RET_VAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_PTR2]] (Decl: ptr2)) // CHECK: ReturnOfOrigin ([[O_RET_VAL]] (Expr: ImplicitCastExpr)) // CHECK: Expire ([[L_Y]] (Path: y)) } @@ -70,9 +70,9 @@ void loan_expires_cpp() { MyObj obj{1}; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_OBJ:[0-9]+]] (Path: obj), ToOrigin: [[O_DRE_OBJ:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_OBJ:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_OBJ]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_OBJ:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_OBJ]] (Expr: DeclRefExpr)) MyObj* pObj = &obj; -// CHECK: AssignOrigin (Dest: {{[0-9]+}} (Decl: pObj), Src: [[O_ADDR_OBJ]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: {{[0-9]+}} (Decl: pObj), Src: [[O_ADDR_OBJ]] (Expr: UnaryOperator)) // CHECK: Expire ([[L_OBJ]] (Path: obj)) } @@ -83,9 +83,9 @@ void loan_expires_trivial() { int trivial_obj = 1; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_TRIVIAL_OBJ:[0-9]+]] (Path: trivial_obj), ToOrigin: [[O_DRE_TRIVIAL:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_TRIVIAL_OBJ:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_TRIVIAL]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_TRIVIAL_OBJ:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_TRIVIAL]] (Expr: DeclRefExpr)) int* pTrivialObj = &trivial_obj; -// CHECK: AssignOrigin (Dest: {{[0-9]+}} (Decl: pTrivialObj), Src: [[O_ADDR_TRIVIAL_OBJ]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: {{[0-9]+}} (Decl: pTrivialObj), Src: [[O_ADDR_TRIVIAL_OBJ]] (Expr: UnaryOperator)) // CHECK-NOT: Expire // CHECK-NEXT: End of Block // FIXME: Add check for Expire once trivial destructors are handled for expiration. @@ -100,20 +100,20 @@ void conditional(bool condition) { if (condition) // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_A:[0-9]+]] (Path: a), ToOrigin: [[O_DRE_A:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_A:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_A]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_A]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_A:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_A]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_A]] (Expr: UnaryOperator)) p = &a; else // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_B:[0-9]+]] (Path: b), ToOrigin: [[O_DRE_B:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_B:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_B]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_B]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_B:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_B]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_B]] (Expr: UnaryOperator)) p = &b; // CHECK: Block B{{[0-9]+}}: int *q = p; // CHECK: Use ([[O_P]] (Decl: p), Read) -// CHECK: AssignOrigin (Dest: [[O_P_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p)) -// CHECK: AssignOrigin (Dest: [[O_Q:[0-9]+]] (Decl: q), Src: [[O_P_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_P_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p)) +// CHECK: OriginFlow (Dest: [[O_Q:[0-9]+]] (Decl: q), Src: [[O_P_RVAL]] (Expr: ImplicitCastExpr)) } @@ -128,36 +128,36 @@ void pointers_in_a_cycle(bool condition) { MyObj* p3 = &v3; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_V1:[0-9]+]] (Path: v1), ToOrigin: [[O_DRE_V1:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_V1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_V1]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P1:[0-9]+]] (Decl: p1), Src: [[O_ADDR_V1]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_V1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_V1]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P1:[0-9]+]] (Decl: p1), Src: [[O_ADDR_V1]] (Expr: UnaryOperator)) // CHECK: Issue ([[L_V2:[0-9]+]] (Path: v2), ToOrigin: [[O_DRE_V2:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_V2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_V2]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P2:[0-9]+]] (Decl: p2), Src: [[O_ADDR_V2]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_V2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_V2]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P2:[0-9]+]] (Decl: p2), Src: [[O_ADDR_V2]] (Expr: UnaryOperator)) // CHECK: Issue ([[L_V3:[0-9]+]] (Path: v3), ToOrigin: [[O_DRE_V3:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_V3:[0-g]+]] (Expr: UnaryOperator), Src: [[O_DRE_V3]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P3:[0-9]+]] (Decl: p3), Src: [[O_ADDR_V3]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_V3:[0-g]+]] (Expr: UnaryOperator), Src: [[O_DRE_V3]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P3:[0-9]+]] (Decl: p3), Src: [[O_ADDR_V3]] (Expr: UnaryOperator)) while (condition) { // CHECK: Block B{{[0-9]+}}: MyObj* temp = p1; // CHECK: Use ([[O_P1]] (Decl: p1), Read) -// CHECK: AssignOrigin (Dest: [[O_P1_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P1]] (Decl: p1)) -// CHECK: AssignOrigin (Dest: [[O_TEMP:[0-9]+]] (Decl: temp), Src: [[O_P1_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_P1_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P1]] (Decl: p1)) +// CHECK: OriginFlow (Dest: [[O_TEMP:[0-9]+]] (Decl: temp), Src: [[O_P1_RVAL]] (Expr: ImplicitCastExpr)) p1 = p2; // CHECK: Use ([[O_P2:[0-9]+]] (Decl: p2), Read) -// CHECK: AssignOrigin (Dest: [[O_P2_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P2]] (Decl: p2)) +// CHECK: OriginFlow (Dest: [[O_P2_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P2]] (Decl: p2)) // CHECK: Use ({{[0-9]+}} (Decl: p1), Write) -// CHECK: AssignOrigin (Dest: [[O_P1]] (Decl: p1), Src: [[O_P2_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_P1]] (Decl: p1), Src: [[O_P2_RVAL]] (Expr: ImplicitCastExpr)) p2 = p3; // CHECK: Use ([[O_P3:[0-9]+]] (Decl: p3), Read) -// CHECK: AssignOrigin (Dest: [[O_P3_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P3]] (Decl: p3)) +// CHECK: OriginFlow (Dest: [[O_P3_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P3]] (Decl: p3)) // CHECK: Use ({{[0-9]+}} (Decl: p2), Write) -// CHECK: AssignOrigin (Dest: [[O_P2]] (Decl: p2), Src: [[O_P3_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_P2]] (Decl: p2), Src: [[O_P3_RVAL]] (Expr: ImplicitCastExpr)) p3 = temp; // CHECK: Use ([[O_TEMP]] (Decl: temp), Read) -// CHECK: AssignOrigin (Dest: [[O_TEMP_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_TEMP]] (Decl: temp)) +// CHECK: OriginFlow (Dest: [[O_TEMP_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_TEMP]] (Decl: temp)) // CHECK: Use ({{[0-9]+}} (Decl: p3), Write) -// CHECK: AssignOrigin (Dest: [[O_P3]] (Decl: p3), Src: [[O_TEMP_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_P3]] (Decl: p3), Src: [[O_TEMP_RVAL]] (Expr: ImplicitCastExpr)) } } @@ -168,13 +168,13 @@ void overwrite_origin() { // CHECK: Block B{{[0-9]+}}: MyObj* p = &s1; // CHECK: Issue ([[L_S1:[0-9]+]] (Path: s1), ToOrigin: [[O_DRE_S1:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) p = &s2; // CHECK: Issue ([[L_S2:[0-9]+]] (Path: s2), ToOrigin: [[O_DRE_S2:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) // CHECK: Expire ([[L_S2]] (Path: s2)) // CHECK: Expire ([[L_S1]] (Path: s1)) } @@ -185,12 +185,12 @@ void reassign_to_null() { // CHECK: Block B{{[0-9]+}}: MyObj* p = &s1; // CHECK: Issue ([[L_S1:[0-9]+]] (Path: s1), ToOrigin: [[O_DRE_S1:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) p = nullptr; -// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: {{[0-9]+}} (Expr: CXXNullPtrLiteralExpr)) +// CHECK: OriginFlow (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: {{[0-9]+}} (Expr: CXXNullPtrLiteralExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) // CHECK: Expire ([[L_S1]] (Path: s1)) } // FIXME: Have a better representation for nullptr than just an empty origin. @@ -204,15 +204,15 @@ void reassign_in_if(bool condition) { MyObj* p = &s1; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_S1:[0-9]+]] (Path: s1), ToOrigin: [[O_DRE_S1:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) if (condition) { // CHECK: Block B{{[0-9]+}}: p = &s2; // CHECK: Issue ([[L_S2:[0-9]+]] (Path: s2), ToOrigin: [[O_DRE_S2:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) } // CHECK: Block B{{[0-9]+}}: // CHECK: Expire ([[L_S2]] (Path: s2)) @@ -227,32 +227,32 @@ void assign_in_switch(int mode) { MyObj s3; MyObj* p = nullptr; // CHECK: Block B{{[0-9]+}}: -// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) switch (mode) { case 1: // CHECK-DAG: Block B{{[0-9]+}}: p = &s1; // CHECK-DAG: Issue ([[L_S1:[0-9]+]] (Path: s1), ToOrigin: [[O_DRE_S1:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK-DAG: AssignOrigin (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) +// CHECK-DAG: OriginFlow (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) // CHECK-DAG: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK-DAG: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) +// CHECK-DAG: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) break; case 2: // CHECK-DAG: Block B{{[0-9]+}}: p = &s2; // CHECK-DAG: Issue ([[L_S2:[0-9]+]] (Path: s2), ToOrigin: [[O_DRE_S2:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK-DAG: AssignOrigin (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) +// CHECK-DAG: OriginFlow (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) // CHECK-DAG: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK-DAG: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) +// CHECK-DAG: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) break; default: // CHECK: Block B{{[0-9]+}}: p = &s3; // CHECK: Issue ([[L_S3:[0-9]+]] (Path: s3), ToOrigin: [[O_DRE_S3:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S3:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S3]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S3:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S3]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S3]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S3]] (Expr: UnaryOperator)) break; } // CHECK: Block B{{[0-9]+}}: @@ -265,16 +265,16 @@ void assign_in_switch(int mode) { void loan_in_loop(bool condition) { MyObj* p = nullptr; // CHECK: Block B{{[0-9]+}}: -// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) while (condition) { MyObj inner; // CHECK: Block B{{[0-9]+}}: p = &inner; // CHECK: Issue ([[L_INNER:[0-9]+]] (Path: inner), ToOrigin: [[O_DRE_INNER:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_INNER:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_INNER]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_INNER:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_INNER]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_INNER]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_INNER]] (Expr: UnaryOperator)) // CHECK: Expire ([[L_INNER]] (Path: inner)) } } @@ -286,16 +286,16 @@ void loop_with_break(int count) { MyObj* p = &s1; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_S1:[0-9]+]] (Path: s1), ToOrigin: [[O_DRE_S1:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S1:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S1]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_S1]] (Expr: UnaryOperator)) for (int i = 0; i < count; ++i) { if (i == 5) { // CHECK: Block B{{[0-9]+}}: p = &s2; // CHECK: Issue ([[L_S2:[0-9]+]] (Path: s2), ToOrigin: [[O_DRE_S2:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_S2:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_S2]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_S2]] (Expr: UnaryOperator)) break; } } @@ -308,22 +308,22 @@ void loop_with_break(int count) { void nested_scopes() { MyObj* p = nullptr; // CHECK: Block B{{[0-9]+}}: -// CHECK: AssignOrigin (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_NULLPTR_CAST:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_NULLPTR:[0-9]+]] (Expr: CXXNullPtrLiteralExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_NULLPTR_CAST]] (Expr: ImplicitCastExpr)) { MyObj outer; p = &outer; // CHECK: Issue ([[L_OUTER:[0-9]+]] (Path: outer), ToOrigin: [[O_DRE_OUTER:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_OUTER:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_OUTER]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_OUTER:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_OUTER]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_OUTER]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_OUTER]] (Expr: UnaryOperator)) { MyObj inner; p = &inner; // CHECK: Issue ([[L_INNER:[0-9]+]] (Path: inner), ToOrigin: [[O_DRE_INNER:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_INNER:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_INNER]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_INNER:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_INNER]] (Expr: DeclRefExpr)) // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_INNER]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_INNER]] (Expr: UnaryOperator)) } // CHECK: Expire ([[L_INNER]] (Path: inner)) } @@ -336,14 +336,14 @@ void pointer_indirection() { int *p = &a; // CHECK: Block B{{[0-9]+}}: // CHECK: Issue ([[L_A:[0-9]+]] (Path: a), ToOrigin: [[O_DRE_A:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_A:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_A]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_A]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_ADDR_A:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_A]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_A]] (Expr: UnaryOperator)) int **pp = &p; // Note: No facts are generated for &p because the subexpression is a pointer type, // which is not yet supported by the origin model. This is expected. int *q = *pp; // CHECK: Use ([[O_PP:[0-9]+]] (Decl: pp), Read) -// CHECK: AssignOrigin (Dest: {{[0-9]+}} (Decl: q), Src: {{[0-9]+}} (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: {{[0-9]+}} (Decl: q), Src: {{[0-9]+}} (Expr: ImplicitCastExpr)) } // CHECK-LABEL: Function: ternary_operator @@ -360,7 +360,7 @@ void ternary_operator() { // CHECK: Block B{{[0-9]+}}: // CHECK: Use ({{[0-9]+}} (Decl: p), Write) -// CHECK: AssignOrigin (Dest: {{[0-9]+}} (Decl: p), Src: {{[0-9]+}} (Expr: ConditionalOperator)) +// CHECK: OriginFlow (Dest: {{[0-9]+}} (Decl: p), Src: {{[0-9]+}} (Expr: ConditionalOperator)) } // CHECK-LABEL: Function: test_use_facts @@ -371,9 +371,9 @@ void test_use_facts() { // CHECK: Block B{{[0-9]+}}: p = &x; // CHECK: Issue ([[L_X:[0-9]+]] (Path: x), ToOrigin: [[O_DRE_X:[0-9]+]] (Expr: DeclRefExpr)) -// CHECK: AssignOrigin (Dest: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_X]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_X]] (Expr: DeclRefExpr)) // CHECK: Use ([[O_P:[0-9]+]] (Decl: p), Write) -// CHECK: AssignOrigin (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_X]] (Expr: UnaryOperator)) +// CHECK: OriginFlow (Dest: [[O_P]] (Decl: p), Src: [[O_ADDR_X]] (Expr: UnaryOperator)) (void)*p; // CHECK: Use ([[O_P]] (Decl: p), Read) usePointer(p); @@ -389,4 +389,28 @@ void test_use_facts() { q->id = 2; // CHECK: Use ([[O_Q]] (Decl: q), Read) // CHECK: Expire ([[L_X]] (Path: x)) -} \ No newline at end of file +} + +// CHECK-LABEL: Function: test_use_lifetimebound_call +MyObj* LifetimeBoundCall(MyObj* x [[clang::lifetimebound]], MyObj* y [[clang::lifetimebound]]); +void test_use_lifetimebound_call() { + MyObj x, y; + MyObj *p = &x; +// CHECK: Issue ([[L_X:[0-9]+]] (Path: x), ToOrigin: [[O_DRE_X:[0-9]+]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_X:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_X]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_P:[0-9]+]] (Decl: p), Src: [[O_ADDR_X]] (Expr: UnaryOperator)) + MyObj *q = &y; +// CHECK: Issue ([[L_Y:[0-9]+]] (Path: y), ToOrigin: [[O_DRE_Y:[0-9]+]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_ADDR_Y:[0-9]+]] (Expr: UnaryOperator), Src: [[O_DRE_Y]] (Expr: DeclRefExpr)) +// CHECK: OriginFlow (Dest: [[O_Q:[0-9]+]] (Decl: q), Src: [[O_ADDR_Y]] (Expr: UnaryOperator)) + MyObj* r = LifetimeBoundCall(p, q); +// CHECK: Use ([[O_P]] (Decl: p), Read) +// CHECK: OriginFlow (Dest: [[O_P_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_P]] (Decl: p)) +// CHECK: Use ([[O_Q]] (Decl: q), Read) +// CHECK: OriginFlow (Dest: [[O_Q_RVAL:[0-9]+]] (Expr: ImplicitCastExpr), Src: [[O_Q]] (Decl: q)) +// CHECK: OriginFlow (Dest: [[O_CALL_EXPR:[0-9]+]] (Expr: CallExpr), Src: [[O_P_RVAL]] (Expr: ImplicitCastExpr)) +// CHECK: OriginFlow (Dest: [[O_CALL_EXPR]] (Expr: CallExpr), Src: [[O_Q_RVAL]] (Expr: ImplicitCastExpr), Merge) +// CHECK: OriginFlow (Dest: [[O_R:[0-9]+]] (Decl: r), Src: [[O_CALL_EXPR]] (Expr: CallExpr)) +// CHECK: Expire ([[L_Y]] (Path: y)) +// CHECK: Expire ([[L_X]] (Path: x)) +} diff --git a/clang/test/Sema/warn-lifetime-safety.cpp b/clang/test/Sema/warn-lifetime-safety.cpp index bc8a5f3f7150f..f6bec6e66c365 100644 --- a/clang/test/Sema/warn-lifetime-safety.cpp +++ b/clang/test/Sema/warn-lifetime-safety.cpp @@ -371,3 +371,152 @@ void no_error_if_dangle_then_rescue_gsl() { v = safe; // 'v' is "rescued" before use by reassigning to a valid object. v.use(); // This is safe. } + + +//===----------------------------------------------------------------------===// +// Lifetimebound Attribute Tests +//===----------------------------------------------------------------------===// + +View Identity(View v [[clang::lifetimebound]]); +View Choose(bool cond, View a [[clang::lifetimebound]], View b [[clang::lifetimebound]]); +MyObj* GetPointer(const MyObj& obj [[clang::lifetimebound]]); + +struct [[gsl::Pointer()]] LifetimeBoundView { + LifetimeBoundView(); + LifetimeBoundView(const MyObj& obj [[clang::lifetimebound]]); + LifetimeBoundView pass() [[clang::lifetimebound]] { return *this; } + operator View() const [[clang::lifetimebound]]; +}; + +void lifetimebound_simple_function() { + View v; + { + MyObj obj; + v = Identity(obj); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note {{destroyed here}} + v.use(); // expected-note {{later used here}} +} + +void lifetimebound_multiple_args_definite() { + View v; + { + MyObj obj1, obj2; + v = Choose(true, + obj1, // expected-warning {{object whose reference is captured does not live long enough}} + obj2); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note 2 {{destroyed here}} + v.use(); // expected-note 2 {{later used here}} +} + +void lifetimebound_multiple_args_potential(bool cond) { + MyObj safe; + View v = safe; + { + MyObj obj1; + if (cond) { + MyObj obj2; + v = Choose(true, + obj1, // expected-warning {{object whose reference is captured may not live long enough}} + obj2); // expected-warning {{object whose reference is captured may not live long enough}} + } // expected-note {{destroyed here}} + } // expected-note {{destroyed here}} + v.use(); // expected-note 2 {{later used here}} +} + +View SelectFirst(View a [[clang::lifetimebound]], View b); +void lifetimebound_mixed_args() { + View v; + { + MyObj obj1, obj2; + v = SelectFirst(obj1, // expected-warning {{object whose reference is captured does not live long enough}} + obj2); + } // expected-note {{destroyed here}} + v.use(); // expected-note {{later used here}} +} + +void lifetimebound_member_function() { + LifetimeBoundView lbv, lbv2; + { + MyObj obj; + lbv = obj; // expected-warning {{object whose reference is captured does not live long enough}} + lbv2 = lbv.pass(); + } // expected-note {{destroyed here}} + View v = lbv2; // expected-note {{later used here}} + v.use(); +} + +void lifetimebound_conversion_operator() { + View v; + { + MyObj obj; + LifetimeBoundView lbv = obj; // expected-warning {{object whose reference is captured does not live long enough}} + v = lbv; // Conversion operator is lifetimebound + } // expected-note {{destroyed here}} + v.use(); // expected-note {{later used here}} +} + +void lifetimebound_chained_calls() { + View v; + { + MyObj obj; + v = Identity(Identity(Identity(obj))); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note {{destroyed here}} + v.use(); // expected-note {{later used here}} +} + +void lifetimebound_with_pointers() { + MyObj* ptr; + { + MyObj obj; + ptr = GetPointer(obj); // expected-warning {{object whose reference is captured does not live long enough}} + } // expected-note {{destroyed here}} + (void)*ptr; // expected-note {{later used here}} +} + +void lifetimebound_no_error_safe_usage() { + MyObj obj; + View v1 = Identity(obj); // No warning - obj lives long enough + View v2 = Choose(true, v1, Identity(obj)); // No warning - all args are safe + v2.use(); // Safe usage +} + +void lifetimebound_partial_safety(bool cond) { + MyObj safe_obj; + View v = safe_obj; + + if (cond) { + MyObj temp_obj; + v = Choose(true, + safe_obj, + temp_obj); // expected-warning {{object whose reference is captured may not live long enough}} + } // expected-note {{destroyed here}} + v.use(); // expected-note {{later used here}} +} + +// FIXME: Creating reference from lifetimebound call doesn't propagate loans. +const MyObj& GetObject(View v [[clang::lifetimebound]]); +void lifetimebound_return_reference() { + View v; + const MyObj* ptr; + { + MyObj obj; + View temp_v = obj; + const MyObj& ref = GetObject(temp_v); + ptr = &ref; + } + (void)*ptr; +} + +// FIXME: No warning for non gsl::Pointer types. Origin tracking is only supported for pointer types. +struct LifetimeBoundCtor { + LifetimeBoundCtor(); + LifetimeBoundCtor(const MyObj& obj [[clang::lifetimebound]]); +}; +void lifetimebound_ctor() { + LifetimeBoundCtor v; + { + MyObj obj; + v = obj; + } + (void)v; +} diff --git a/clang/test/SemaCUDA/vararg.cu b/clang/test/SemaCUDA/vararg.cu index 34ef367d89820..0238f42dc40a9 100644 --- a/clang/test/SemaCUDA/vararg.cu +++ b/clang/test/SemaCUDA/vararg.cu @@ -10,7 +10,7 @@ #include #include "Inputs/cuda.h" -__device__ void foo() { +__global__ void foo() { va_list list; va_arg(list, int); #ifdef EXPECT_VA_ARG_ERR diff --git a/clang/test/SemaCXX/amdgpu-image-rsrc.cpp b/clang/test/SemaCXX/amdgpu-image-rsrc.cpp new file mode 100644 index 0000000000000..61a82d47cf1c0 --- /dev/null +++ b/clang/test/SemaCXX/amdgpu-image-rsrc.cpp @@ -0,0 +1,17 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -fsyntax-only -verify -std=gnu++11 -triple amdgcn -Wno-unused-value %s + +void foo() { + int n = 100; + __amdgpu_texture_t v = 0; // expected-error {{cannot initialize a variable of type '__amdgpu_texture_t' with an rvalue of type 'int'}} + static_cast<__amdgpu_texture_t>(n); // expected-error {{static_cast from 'int' to '__amdgpu_texture_t' is not allowed}} + reinterpret_cast<__amdgpu_texture_t>(n); // expected-error {{reinterpret_cast from 'int' to '__amdgpu_texture_t' is not allowed}} + (void)(v + v); // expected-error {{invalid operands to binary expression ('__amdgpu_texture_t' and '__amdgpu_texture_t')}} + int x(v); // expected-error {{cannot initialize a variable of type 'int' with an lvalue of type '__amdgpu_texture_t'}} + __amdgpu_texture_t k; +} + +template void bar(T); +void use(__amdgpu_texture_t r) { bar(r); } +struct S { __amdgpu_texture_t r; int a; }; diff --git a/clang/test/SemaCXX/array-bounds.cpp b/clang/test/SemaCXX/array-bounds.cpp index b584e1e7cd453..6a40d1db0a6fd 100644 --- a/clang/test/SemaCXX/array-bounds.cpp +++ b/clang/test/SemaCXX/array-bounds.cpp @@ -237,7 +237,7 @@ void test_pr10771() { ((char*)foo)[sizeof(foo) - 1] = '\0'; // no-warning *(((char*)foo) + sizeof(foo) - 1) = '\0'; // no-warning - ((char*)foo)[sizeof(foo)] = '\0'; // expected-warning {{array index 32768 is past the end of the array (that has type 'double[4096]', cast to 'char *')}} + ((char*)foo)[sizeof(foo)] = '\0'; // expected-warning {{array index 32'768 is past the end of the array (that has type 'double[4096]', cast to 'char *')}} // TODO: This should probably warn, too. *(((char*)foo) + sizeof(foo)) = '\0'; // no-warning @@ -248,7 +248,7 @@ int test_pr11007_aux(const char * restrict, ...); // Test checking with varargs. void test_pr11007() { double a[5]; // expected-note {{array 'a' declared here}} - test_pr11007_aux("foo", a[1000]); // expected-warning {{array index 1000 is past the end of the array (that has type 'double[5]')}} + test_pr11007_aux("foo", a[1000]); // expected-warning {{array index 1'000 is past the end of the array (that has type 'double[5]')}} } void test_rdar10916006(void) diff --git a/clang/test/SemaCXX/bitfield-layout.cpp b/clang/test/SemaCXX/bitfield-layout.cpp index 7efd1d38c682f..f30218be01c56 100644 --- a/clang/test/SemaCXX/bitfield-layout.cpp +++ b/clang/test/SemaCXX/bitfield-layout.cpp @@ -35,7 +35,7 @@ CHECK_SIZE(Test4, 8); CHECK_ALIGN(Test4, 8); struct Test5 { - char c : 0x100000001; // expected-warning {{width of bit-field 'c' (4294967297 bits) exceeds the width of its type; value will be truncated to 8 bits}} + char c : 0x100000001; // expected-warning {{width of bit-field 'c' (4'294'967'297 bits) exceeds the width of its type; value will be truncated to 8 bits}} }; // Size and align don't really matter here, just make sure we don't crash. CHECK_SIZE(Test5, 1); diff --git a/clang/test/SemaCXX/constant-expression-cxx14.cpp b/clang/test/SemaCXX/constant-expression-cxx14.cpp index 1743e0e3ac4b5..bea90ff7eaf8a 100644 --- a/clang/test/SemaCXX/constant-expression-cxx14.cpp +++ b/clang/test/SemaCXX/constant-expression-cxx14.cpp @@ -1047,7 +1047,7 @@ constexpr int S = sum(Cs); // expected-error{{must be initialized by a constant constexpr void PR28739(int n) { // cxx14_20-error {{never produces a constant}} int *p = &n; // expected-note {{array 'p' declared here}} p += (__int128)(unsigned long)-1; // cxx14_20-note {{cannot refer to element 18446744073709551615 of non-array object in a constant expression}} - // expected-warning@-1 {{the pointer incremented by 18446744073709551615 refers past the last possible element for an array in 64-bit address space containing 32-bit (4-byte) elements (max possible 4611686018427387904 elements)}} + // expected-warning@-1 {{the pointer incremented by 18'446'744'073'709'551'615 refers past the last possible element for an array in 64-bit address space containing 32-bit (4-byte) elements (max possible 4'611'686'018'427'387'904 elements)}} } constexpr void Void(int n) { diff --git a/clang/test/SemaCXX/ctad.cpp b/clang/test/SemaCXX/ctad.cpp index 8380b564bdcdd..7de7f50337e8c 100644 --- a/clang/test/SemaCXX/ctad.cpp +++ b/clang/test/SemaCXX/ctad.cpp @@ -190,3 +190,10 @@ namespace GH136624 { foo::Alias t = 0; // expected-error@-1 {{no viable conversion from 'int' to 'GH136624::A' (aka 'A')}} } // namespace GH136624 + +namespace GH131342 { + template constexpr int val{0}; + template struct A { A(T) {} }; + template using AA = A>; + AA a{0}; +} // namespace GH131342 diff --git a/clang/test/SemaCXX/cxx17-compat.cpp b/clang/test/SemaCXX/cxx17-compat.cpp index 81b3e1fde5493..99e41d818a6c3 100644 --- a/clang/test/SemaCXX/cxx17-compat.cpp +++ b/clang/test/SemaCXX/cxx17-compat.cpp @@ -83,9 +83,11 @@ static auto [cx, cy, cz] = C(); void f() { static thread_local auto [cx, cy, cz] = C(); #if __cplusplus <= 201703L - // expected-warning@-2 {{decomposition declaration declared with 'static thread_local' specifiers is a C++20 extension}} + // expected-warning@-2 {{decomposition declaration declared 'static' is a C++20 extension}} + // expected-warning@-3 {{decomposition declaration declared 'thread_local' is a C++20 extension}} #else - // expected-warning@-4 {{decomposition declaration declared with 'static thread_local' specifiers is incompatible with C++ standards before C++20}} + // expected-warning@-5 {{decomposition declaration declared 'static' is incompatible with C++ standards before C++20}} + // expected-warning@-6 {{decomposition declaration declared 'thread_local' is incompatible with C++ standards before C++20}} #endif } diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp index 1f4d44218ad1f..fd1a5c01233d5 100644 --- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp +++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp @@ -113,7 +113,7 @@ using Bar = Foo; // expected-note {{candidate template ignored: co // expected-note {{implicit deduction guide declared as 'template requires __is_deducible(test9::Bar, test9::Foo) Bar(test9::Foo) -> test9::Foo'}} \ // expected-note {{implicit deduction guide declared as 'template requires __is_deducible(test9::Bar, test9::Foo) Bar(const X (&)[sizeof(X)]) -> test9::Foo'}} \ // expected-note {{candidate template ignored: constraints not satisfied [with X = int]}} \ - // expected-note {{cannot deduce template arguments for 'test9::Bar' from 'test9::Foo'}} + // expected-note {{cannot deduce template arguments for 'test9::Bar' from 'test9::Foo'}} Bar s = {{1}}; // expected-error {{no viable constructor or deduction guide }} @@ -586,3 +586,18 @@ Baz a{}; static_assert(__is_same(decltype(a), A>)); } // namespace GH133132 + +namespace GH130604 { +template struct A { + A(T); +}; + +template class TT = A> using Alias = TT; // #gh130604-alias +template using Alias2 = Alias; + +Alias2 a(42); +// expected-error@-1 {{no viable constructor or deduction guide for deduction of template arguments of 'Alias2'}} +Alias b(42); +// expected-error@-1 {{alias template 'Alias' requires template arguments; argument deduction only allowed for class templates or alias template}} +// expected-note@#gh130604-alias {{template is declared here}} +} diff --git a/clang/test/SemaCXX/cxx2c-binding-pack-nontemplate.cpp b/clang/test/SemaCXX/cxx2c-binding-pack-nontemplate.cpp index a4f0bcdb4270b..638a2d805c2c5 100644 --- a/clang/test/SemaCXX/cxx2c-binding-pack-nontemplate.cpp +++ b/clang/test/SemaCXX/cxx2c-binding-pack-nontemplate.cpp @@ -10,8 +10,8 @@ void decompose_array() { auto [x, ...rest, y] = arr; // cxx26-warning@+4 {{structured binding packs are incompatible with C++ standards before C++2c}} - // cxx23-warning@+3 {{structured binding packs are a C++2c extension}} - // nontemplate-error@+2 {{decomposition declaration cannot be declared 'constexpr'}} + // cxx23-error@+3 {{decomposition declaration cannot be declared 'constexpr'}} + // cxx23-warning@+2 {{structured binding packs are a C++2c extension}} // nontemplate-error@+1 {{pack declaration outside of template}} constexpr auto [x_c, ...rest_c, y_c] = arr; } diff --git a/clang/test/SemaCXX/cxx2c-decomposition.cpp b/clang/test/SemaCXX/cxx2c-decomposition.cpp new file mode 100644 index 0000000000000..99278c6575ef1 --- /dev/null +++ b/clang/test/SemaCXX/cxx2c-decomposition.cpp @@ -0,0 +1,76 @@ +// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-unknown-linux-gnu -verify=expected +// RUN: %clang_cc1 -std=c++2c %s -triple x86_64-unknown-linux-gnu -verify=expected -fexperimental-new-constant-interpreter + +namespace std { + using size_t = decltype(sizeof(0)); + template struct tuple_size; + template struct tuple_element; +} + +struct Y { int n = 0; }; +struct X { X(); X(Y); X(const X&); ~X(); int k = 42;}; // #X-decl +struct Z { constexpr Z(): i (43){}; int i;}; // #Z-decl +struct Z2 { constexpr Z2(): i (0){}; int i; ~Z2();}; // #Z2-decl + +struct Bit { constexpr Bit(): i(1), j(1){}; int i: 2; int j:2;}; + +struct A { int a : 13; bool b; }; + +struct B {}; +template<> struct std::tuple_size { enum { value = 2 }; }; +template<> struct std::tuple_size { enum { value = 2 }; }; +template<> struct std::tuple_element<0, const B> { using type = Y; }; +template<> struct std::tuple_element<1, const B> { using type = const int&; }; +template +constexpr auto get(B) { + if constexpr (N == 0) + return Y(); + else + return 0.0; +} + + +constexpr auto [t1] = Y {42}; +static_assert(t1 == 42); + +constexpr int i[] = {1, 2}; +constexpr auto [t2, t3] = i; +static_assert(t2 == 1); +static_assert(t3 == 2); + +constexpr auto [t4] = X(); +// expected-error@-1 {{constexpr variable cannot have non-literal type 'const X'}} \ +// expected-note@#X-decl {{'X' is not literal because it is not an aggregate and has no constexpr constructors other than copy or move constructors}} + +constexpr auto [t5] = Z(); +static_assert(t5 == 43); + +constexpr auto [t6] = Z2(); +//expected-error@-1 {{constexpr variable cannot have non-literal type 'const Z2'}} +// expected-note@#Z2-decl {{'Z2' is not literal because its destructor is not constexpr}} + +constexpr auto [t7, t8] = Bit(); +static_assert(t7 == 1); +static_assert(t8 == 1); + +void test_tpl(auto) { + constexpr auto [...p] = Bit(); + static_assert(((p == 1) && ...)); +} + +void test() { + test_tpl(0); +} + +// FIXME : support tuple +constexpr auto [a, b] = B{}; +static_assert(a.n == 0); +// expected-error@-1 {{static assertion expression is not an integral constant expression}} \ +// expected-note@-1 {{read of temporary is not allowed in a constant expression outside the expression that created the temporary}}\ +// expected-note@-2 {{temporary created here}} + +constinit auto [init1] = Y {42}; +constinit auto [init2] = X {}; // expected-error {{variable does not have a constant initializer}} \ +// expected-note {{required by 'constinit' specifier here}} \ +// expected-note {{non-constexpr constructor 'X' cannot be used in a constant expression}} \ +// expected-note@#X-decl {{declared here}} diff --git a/clang/test/SemaCXX/cxx98-compat.cpp b/clang/test/SemaCXX/cxx98-compat.cpp index 8e7acf73923e5..587c242271a02 100644 --- a/clang/test/SemaCXX/cxx98-compat.cpp +++ b/clang/test/SemaCXX/cxx98-compat.cpp @@ -1,6 +1,7 @@ -// RUN: %clang_cc1 -fsyntax-only -std=c++11 -Wc++98-compat -verify %s -// RUN: %clang_cc1 -fsyntax-only -std=c++14 -Wc++98-compat -verify %s -DCXX14COMPAT -// RUN: %clang_cc1 -fsyntax-only -std=c++17 -Wc++98-compat -verify %s -DCXX14COMPAT -DCXX17COMPAT +// RUN: %clang_cc1 -fsyntax-only -std=c++11 -Wc++98-compat -verify=expected,not-cpp20 %s +// RUN: %clang_cc1 -fsyntax-only -std=c++14 -Wc++98-compat -verify=expected,not-cpp20 %s -DCXX14COMPAT +// RUN: %clang_cc1 -fsyntax-only -std=c++17 -Wc++98-compat -verify=expected,not-cpp20 %s -DCXX14COMPAT -DCXX17COMPAT +// RUN: %clang_cc1 -fsyntax-only -std=c++20 -Wc++98-compat -verify=expected,cpp20 %s -DCXX14COMPAT -DCXX17COMPAT namespace std { struct type_info; @@ -226,7 +227,8 @@ void TrivialButNonPODThroughEllipsis() { } struct HasExplicitConversion { - explicit operator bool(); // expected-warning {{explicit conversion functions are incompatible with C++98}} + // FIXME I think we should generate this diagnostic in C++20 + explicit operator bool(); // not-cpp20-warning {{explicit conversion functions are incompatible with C++98}} }; struct Struct {}; @@ -430,3 +432,12 @@ void ctad_test() { CTAD t = s; // expected-warning {{class template argument deduction is incompatible with C++ standards before C++17}} } #endif + +namespace GH161702 { +struct S { + enum E { A }; + using E::A; // expected-warning {{enumeration type in nested name specifier is incompatible with C++98}} + // not-cpp20-error@-1 {{using declaration refers to its own class}} + // cpp20-warning@-2 {{member using declaration naming non-class ''E'' enumerator is incompatible with C++ standards before C++20}} +}; +} diff --git a/clang/test/SemaCXX/decltype.cpp b/clang/test/SemaCXX/decltype.cpp index 739485b57a3ec..45a4c4cf1ac86 100644 --- a/clang/test/SemaCXX/decltype.cpp +++ b/clang/test/SemaCXX/decltype.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify -Wno-c99-designator %s +// RUN: %clang_cc1 -std=c++17 -fsyntax-only -verify -Wno-c99-designator %s // PR5290 int const f0(); @@ -156,6 +157,8 @@ struct A { } }; + + // This shouldn't crash. static_assert(A().f() == 0, ""); // The result should not be dependent. @@ -163,6 +166,81 @@ static_assert(A().f() != 0, ""); // expected-error {{static assertion // expected-note@-1 {{expression evaluates to '0 != 0'}} } + +#if __cplusplus >= 201703L +namespace GH160497 { + +template struct S { + template + inline static auto mem = + [] { static_assert(false); // expected-error {{static assertion failed}} \ + // expected-note {{while substituting into a lambda expression here}} + return 42; + }(); +}; + +using T = decltype(S::mem); + // expected-note@-1 {{in instantiation of static data member 'GH160497::S::mem' requested here}} + + +template struct S2 { + template + inline static auto* mem = + [] { static_assert(false); // expected-error {{static assertion failed}} \ + // expected-note {{while substituting into a lambda expression here}} + return static_cast(nullptr); + }(); +}; + +using T2 = decltype(S2::mem); +//expected-note@-1 {{in instantiation of static data member 'GH160497::S2::mem' requested here}} + +template struct S3 { + template + inline static int mem = // Check we don't instantiate when the type is not deduced. + [] { static_assert(false); + return 42; + }(); +}; + +using T = decltype(S3::mem); +} + +namespace N1 { + +template +struct S { + template + inline static auto mem = 42; +}; + +using T = decltype(S::mem); + +T y = 42; + +} + +namespace GH161196 { + +template struct A { + static constexpr int digits = 0; +}; + +template struct B { + template ::digits> + static constexpr auto XBitMask = 0; +}; + +struct C { + using ReferenceHost = B; + template static decltype(ReferenceHost::XBitMask<0>) XBitMask; +}; + +void test() { (void)C::XBitMask<0>; } + +} +#endif + template class conditional { }; diff --git a/clang/test/SemaCXX/integer-overflow.cpp b/clang/test/SemaCXX/integer-overflow.cpp index 73a4e88ee6c09..214dc11bf3ead 100644 --- a/clang/test/SemaCXX/integer-overflow.cpp +++ b/clang/test/SemaCXX/integer-overflow.cpp @@ -171,7 +171,7 @@ uint64_t check_integer_overflows(int i) { //expected-note 0+{{declared here}} uint64_t a[10]; a[4608 * 1024 * 1024] = 1; #if __cplusplus < 201103L -// expected-warning@-2 {{array index 536870912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} +// expected-warning@-2 {{array index 536'870'912 is past the end of the array (that has type 'uint64_t[10]' (aka 'unsigned long long[10]'))}} // expected-note@-4 {{array 'a' declared here}} #endif diff --git a/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp b/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp index 097ada3caa135..436dfb9aac0a7 100644 --- a/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp +++ b/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp @@ -17,8 +17,7 @@ constexpr bool A::far() { b.data_member; requires A::far(); // #Invalid // expected-error@#Invalid {{recursive template instantiation exceeded maximum depth}} - // expected-note@#Invalid {{in instantiation}} - // expected-note@#Invalid 2 {{while}} + // expected-note@#Invalid 3 {{while}} // expected-note@#Invalid {{contexts in backtrace}} // expected-note@#Invalid {{increase recursive template instantiation depth}} }; diff --git a/clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp b/clang/test/SemaCXX/type-trait-synthesizes-from-spaceship.cpp similarity index 57% rename from clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp rename to clang/test/SemaCXX/type-trait-synthesizes-from-spaceship.cpp index ba581475bb4c7..be312f453f4be 100644 --- a/clang/test/SemaCXX/type-trait-synthesises-from-spaceship.cpp +++ b/clang/test/SemaCXX/type-trait-synthesizes-from-spaceship.cpp @@ -1,24 +1,24 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++20 %s -static_assert(!__builtin_lt_synthesises_from_spaceship()); // expected-error {{expected a type}} -static_assert(!__builtin_lt_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} -static_assert(!__builtin_lt_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} -static_assert(!__builtin_lt_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}} - -static_assert(!__builtin_le_synthesises_from_spaceship()); // expected-error {{expected a type}} -static_assert(!__builtin_le_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} -static_assert(!__builtin_le_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} -static_assert(!__builtin_le_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}} - -static_assert(!__builtin_gt_synthesises_from_spaceship()); // expected-error {{expected a type}} -static_assert(!__builtin_gt_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} -static_assert(!__builtin_gt_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} -static_assert(!__builtin_gt_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}} - -static_assert(!__builtin_ge_synthesises_from_spaceship()); // expected-error {{expected a type}} -static_assert(!__builtin_ge_synthesises_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} -static_assert(!__builtin_ge_synthesises_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} -static_assert(!__builtin_ge_synthesises_from_spaceship(int, 0)); // expected-error {{expected a type}} +static_assert(!__builtin_lt_synthesizes_from_spaceship()); // expected-error {{expected a type}} +static_assert(!__builtin_lt_synthesizes_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} +static_assert(!__builtin_lt_synthesizes_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} +static_assert(!__builtin_lt_synthesizes_from_spaceship(int, 0)); // expected-error {{expected a type}} + +static_assert(!__builtin_le_synthesizes_from_spaceship()); // expected-error {{expected a type}} +static_assert(!__builtin_le_synthesizes_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} +static_assert(!__builtin_le_synthesizes_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} +static_assert(!__builtin_le_synthesizes_from_spaceship(int, 0)); // expected-error {{expected a type}} + +static_assert(!__builtin_gt_synthesizes_from_spaceship()); // expected-error {{expected a type}} +static_assert(!__builtin_gt_synthesizes_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} +static_assert(!__builtin_gt_synthesizes_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} +static_assert(!__builtin_gt_synthesizes_from_spaceship(int, 0)); // expected-error {{expected a type}} + +static_assert(!__builtin_ge_synthesizes_from_spaceship()); // expected-error {{expected a type}} +static_assert(!__builtin_ge_synthesizes_from_spaceship(int)); // expected-error {{type trait requires 2 arguments; have 1 argument}} +static_assert(!__builtin_ge_synthesizes_from_spaceship(int, int, int)); // expected-error {{type trait requires 2 arguments; have 3 argument}} +static_assert(!__builtin_ge_synthesizes_from_spaceship(int, 0)); // expected-error {{expected a type}} namespace std { struct strong_ordering { @@ -35,10 +35,10 @@ struct DefaultSpaceship { friend auto operator<=>(DefaultSpaceship, DefaultSpaceship) = default; }; -static_assert(__builtin_lt_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); -static_assert(__builtin_le_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); -static_assert(__builtin_gt_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); -static_assert(__builtin_ge_synthesises_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); +static_assert(__builtin_lt_synthesizes_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); +static_assert(__builtin_le_synthesizes_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); +static_assert(__builtin_gt_synthesizes_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); +static_assert(__builtin_ge_synthesizes_from_spaceship(const DefaultSpaceship&, const DefaultSpaceship&)); struct CustomSpaceship { int i; @@ -48,10 +48,10 @@ struct CustomSpaceship { } }; -static_assert(__builtin_lt_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); -static_assert(__builtin_le_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); -static_assert(__builtin_gt_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); -static_assert(__builtin_ge_synthesises_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); +static_assert(__builtin_lt_synthesizes_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); +static_assert(__builtin_le_synthesizes_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); +static_assert(__builtin_gt_synthesizes_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); +static_assert(__builtin_ge_synthesizes_from_spaceship(const CustomSpaceship&, const CustomSpaceship&)); struct CustomLT { int i; @@ -61,10 +61,10 @@ struct CustomLT { } }; -static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomLT&, const CustomLT&)); -static_assert(!__builtin_le_synthesises_from_spaceship(const CustomLT&, const CustomLT&)); -static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomLT&, const CustomLT&)); -static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomLT&, const CustomLT&)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(const CustomLT&, const CustomLT&)); +static_assert(!__builtin_le_synthesizes_from_spaceship(const CustomLT&, const CustomLT&)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(const CustomLT&, const CustomLT&)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(const CustomLT&, const CustomLT&)); struct CustomLE { int i; @@ -74,10 +74,10 @@ struct CustomLE { } }; -static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomLE&, const CustomLE&)); -static_assert(!__builtin_le_synthesises_from_spaceship(const CustomLE&, const CustomLE&)); -static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomLE&, const CustomLE&)); -static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomLE&, const CustomLE&)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(const CustomLE&, const CustomLE&)); +static_assert(!__builtin_le_synthesizes_from_spaceship(const CustomLE&, const CustomLE&)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(const CustomLE&, const CustomLE&)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(const CustomLE&, const CustomLE&)); struct CustomGT { int i; @@ -87,10 +87,10 @@ struct CustomGT { } }; -static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomGT&, const CustomGT&)); -static_assert(!__builtin_le_synthesises_from_spaceship(const CustomGT&, const CustomGT&)); -static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomGT&, const CustomGT&)); -static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomGT&, const CustomGT&)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(const CustomGT&, const CustomGT&)); +static_assert(!__builtin_le_synthesizes_from_spaceship(const CustomGT&, const CustomGT&)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(const CustomGT&, const CustomGT&)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(const CustomGT&, const CustomGT&)); struct CustomGE { int i; @@ -100,10 +100,10 @@ struct CustomGE { } }; -static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomGE&, const CustomGE&)); -static_assert(!__builtin_le_synthesises_from_spaceship(const CustomGE&, const CustomGE&)); -static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomGE&, const CustomGE&)); -static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomGE&, const CustomGE&)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(const CustomGE&, const CustomGE&)); +static_assert(!__builtin_le_synthesizes_from_spaceship(const CustomGE&, const CustomGE&)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(const CustomGE&, const CustomGE&)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(const CustomGE&, const CustomGE&)); struct CustomLTAndSpaceship { int i; @@ -117,10 +117,10 @@ struct CustomLTAndSpaceship { } }; -static_assert(!__builtin_lt_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); -static_assert(__builtin_le_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); -static_assert(__builtin_gt_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); -static_assert(__builtin_ge_synthesises_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); +static_assert(__builtin_le_synthesizes_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); +static_assert(__builtin_gt_synthesizes_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); +static_assert(__builtin_ge_synthesizes_from_spaceship(const CustomLTAndSpaceship&, const CustomLTAndSpaceship&)); struct CustomLEAndSpaceship { int i; @@ -134,10 +134,10 @@ struct CustomLEAndSpaceship { } }; -static_assert(__builtin_lt_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); -static_assert(!__builtin_le_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); -static_assert(__builtin_gt_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); -static_assert(__builtin_ge_synthesises_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); +static_assert(__builtin_lt_synthesizes_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); +static_assert(!__builtin_le_synthesizes_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); +static_assert(__builtin_gt_synthesizes_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); +static_assert(__builtin_ge_synthesizes_from_spaceship(const CustomLEAndSpaceship&, const CustomLEAndSpaceship&)); struct CustomGTAndSpaceship { int i; @@ -151,10 +151,10 @@ struct CustomGTAndSpaceship { } }; -static_assert(__builtin_lt_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); -static_assert(__builtin_le_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); -static_assert(!__builtin_gt_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); -static_assert(__builtin_ge_synthesises_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); +static_assert(__builtin_lt_synthesizes_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); +static_assert(__builtin_le_synthesizes_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); +static_assert(__builtin_ge_synthesizes_from_spaceship(const CustomGTAndSpaceship&, const CustomGTAndSpaceship&)); struct CustomGEAndSpaceship { int i; @@ -168,10 +168,10 @@ struct CustomGEAndSpaceship { } }; -static_assert(__builtin_lt_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); -static_assert(__builtin_le_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); -static_assert(__builtin_gt_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); -static_assert(!__builtin_ge_synthesises_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); +static_assert(__builtin_lt_synthesizes_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); +static_assert(__builtin_le_synthesizes_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); +static_assert(__builtin_gt_synthesizes_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(const CustomGEAndSpaceship&, const CustomGEAndSpaceship&)); struct DefaultedCmpAndSpaceship { int i; @@ -187,10 +187,10 @@ struct DefaultedCmpAndSpaceship { }; // TODO: This should probably return true -static_assert(!__builtin_lt_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); -static_assert(!__builtin_le_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); -static_assert(!__builtin_gt_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); -static_assert(!__builtin_ge_synthesises_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); +static_assert(!__builtin_le_synthesizes_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(const DefaultedCmpAndSpaceship&, const DefaultedCmpAndSpaceship&)); struct DifferentTypes { int i; @@ -200,13 +200,13 @@ struct DifferentTypes { } }; -static_assert(__builtin_lt_synthesises_from_spaceship(const DifferentTypes&, const int&)); -static_assert(__builtin_le_synthesises_from_spaceship(const DifferentTypes&, const int&)); -static_assert(__builtin_gt_synthesises_from_spaceship(const DifferentTypes&, const int&)); -static_assert(__builtin_ge_synthesises_from_spaceship(const DifferentTypes&, const int&)); +static_assert(__builtin_lt_synthesizes_from_spaceship(const DifferentTypes&, const int&)); +static_assert(__builtin_le_synthesizes_from_spaceship(const DifferentTypes&, const int&)); +static_assert(__builtin_gt_synthesizes_from_spaceship(const DifferentTypes&, const int&)); +static_assert(__builtin_ge_synthesizes_from_spaceship(const DifferentTypes&, const int&)); // TODO: Should this return true? It's technically not synthesized from spaceship, but it behaves exactly as-if it was -static_assert(!__builtin_lt_synthesises_from_spaceship(int, int)); -static_assert(!__builtin_le_synthesises_from_spaceship(int, int)); -static_assert(!__builtin_gt_synthesises_from_spaceship(int, int)); -static_assert(!__builtin_ge_synthesises_from_spaceship(int, int)); +static_assert(!__builtin_lt_synthesizes_from_spaceship(int, int)); +static_assert(!__builtin_le_synthesizes_from_spaceship(int, int)); +static_assert(!__builtin_gt_synthesizes_from_spaceship(int, int)); +static_assert(!__builtin_ge_synthesizes_from_spaceship(int, int)); diff --git a/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp b/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp index ba9fe3d24a7e6..3e03a79275232 100644 --- a/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp +++ b/clang/test/SemaCXX/type-traits-unsatisfied-diags-std.cpp @@ -66,6 +66,13 @@ struct is_final { template constexpr bool is_final_v = __is_final(T); +template +struct is_abstract { + static constexpr bool value = __is_abstract(T); +}; +template +constexpr bool is_abstract_v = __is_abstract(T); + #endif #ifdef STD2 @@ -151,6 +158,15 @@ using is_final = __details_is_final; template constexpr bool is_final_v = __is_final(T); +template +struct __details_is_abstract { + static constexpr bool value = __is_abstract(T); +}; +template +using is_abstract = __details_is_abstract; +template +constexpr bool is_abstract_v = __is_abstract(T); + #endif @@ -229,6 +245,13 @@ using is_final = __details_is_final; template constexpr bool is_final_v = is_final::value; +template +struct __details_is_abstract : bool_constant<__is_abstract(T)> {}; +template +using is_abstract = __details_is_abstract; +template +constexpr bool is_abstract_v = is_abstract::value; + #endif } @@ -336,6 +359,22 @@ static_assert(std::is_aggregate_v); // expected-note@-1 {{'void' is not aggregate}} \ // expected-note@-1 {{because it is a cv void type}} + +static_assert(!std::is_abstract::value); + +static_assert(std::is_abstract::value); +// expected-error-re@-1 {{static assertion failed due to requirement 'std::{{.*}}is_abstract::value'}} \ +// expected-note@-1 {{'int &' is not abstract}} \ +// expected-note@-1 {{because it is a reference type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +static_assert(std::is_abstract_v); +// expected-error@-1 {{static assertion failed due to requirement 'std::is_abstract_v'}} \ +// expected-note@-1 {{'int &' is not abstract}} \ +// expected-note@-1 {{because it is a reference type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + + namespace test_namespace { using namespace std; static_assert(is_trivially_relocatable::value); @@ -388,7 +427,7 @@ namespace test_namespace { static_assert(is_constructible_v); // expected-error@-1 {{static assertion failed due to requirement 'is_constructible_v'}} \ // expected-note@-1 {{because it is a cv void type}} - + static_assert(std::is_aggregate::value); // expected-error-re@-1 {{static assertion failed due to requirement 'std::{{.*}}is_aggregate::value'}} \ // expected-note@-1 {{'void' is not aggregate}} \ @@ -422,6 +461,18 @@ namespace test_namespace { // expected-note@-1 {{'Fn' (aka 'void ()') is not final}} \ // expected-note@-1 {{because it is a function type}} \ // expected-note@-1 {{because it is not a class or union type}} + + static_assert(is_abstract::value); + // expected-error-re@-1 {{static assertion failed due to requirement '{{.*}}is_abstract::value'}} \ + // expected-note@-1 {{'int &' is not abstract}} \ + // expected-note@-1 {{because it is a reference type}} \ + // expected-note@-1 {{because it is not a struct or class type}} + + static_assert(is_abstract_v); + // expected-error@-1 {{static assertion failed due to requirement 'is_abstract_v'}} \ + // expected-note@-1 {{'int &' is not abstract}} \ + // expected-note@-1 {{because it is a reference type}} \ + // expected-note@-1 {{because it is not a struct or class type}} } diff --git a/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp b/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp index 6f504f2c8b931..22740418f09f5 100644 --- a/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp +++ b/clang/test/SemaCXX/type-traits-unsatisfied-diags.cpp @@ -964,3 +964,91 @@ namespace is_aggregate { static_assert(__is_aggregate(S7[10])); } + +namespace is_abstract_tests { +struct Abstract1 { + virtual void fn1() = 0; +}; + +struct Abstract2 { + virtual void fn2() = 0; +}; + +struct NonAbstract +{ + virtual void f() {} +}; + +// Multiple inheritance reports all abstract base classes that had their pure virtual functions overridden. +struct Overrides : Abstract1, Abstract2, NonAbstract { + void fn1() override {} + void fn2() override {} +}; + +static_assert(__is_abstract(Overrides)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(is_abstract_tests::Overrides)'}} \ +// expected-note@-1 {{'Overrides' is not abstract}} \ +// expected-note@-1 {{because it overrides all pure virtual functions from base class 'Abstract1'}} \ +// expected-note@-1 {{because it overrides all pure virtual functions from base class 'Abstract2'}} \ + +static_assert(__is_abstract(NonAbstract)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(is_abstract_tests::NonAbstract)'}} \ +// expected-note@-1 {{'NonAbstract' is not abstract}} + +// Inheriting over two levels reports the last class only although the source of the pure virtual function +// is the top-most base. +struct Derived : Abstract1 { +}; + +struct Derived2 : Derived { + void fn1() override {} +}; + +static_assert(__is_abstract(Derived2)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(is_abstract_tests::Derived2)'}} \ +// expected-note@-1 {{'Derived2' is not abstract}} \ +// expected-note@-1 {{because it overrides all pure virtual functions from base class 'Derived'}} \ + + +using I = int; +static_assert(__is_abstract(I)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(int)'}} \ +// expected-note@-1 {{'I' (aka 'int') is not abstract}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +using Fty = void(); +static_assert(__is_abstract(Fty)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(void ())'}} \ +// expected-note@-1 {{'Fty' (aka 'void ()') is not abstract}} \ +// expected-note@-1 {{because it is a function type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +using Arr = int[3]; +static_assert(__is_abstract(Arr)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(int[3])'}} \ +// expected-note@-1 {{'Arr' (aka 'int[3]') is not abstract}} \ +// expected-note@-1 {{because it is an array type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +using Ref = int&; +static_assert(__is_abstract(Ref)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(int &)'}} \ +// expected-note@-1 {{'Ref' (aka 'int &') is not abstract}} \ +// expected-note@-1 {{because it is a reference type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +using Ptr = int*; +static_assert(__is_abstract(Ptr)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(int *)'}} \ +// expected-note@-1 {{'Ptr' (aka 'int *') is not abstract}} \ +// expected-note@-1 {{because it is a pointer type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +union U { int x; float y;}; +static_assert(__is_abstract(U)); +// expected-error@-1 {{static assertion failed due to requirement '__is_abstract(is_abstract_tests::U)'}} \ +// expected-note@-1 {{'U' is not abstract}} \ +// expected-note@-1 {{because it is a union type}} \ +// expected-note@-1 {{because it is not a struct or class type}} + +} diff --git a/clang/test/SemaCXX/type-traits.cpp b/clang/test/SemaCXX/type-traits.cpp index 3f0124755c674..d49330f97fad0 100644 --- a/clang/test/SemaCXX/type-traits.cpp +++ b/clang/test/SemaCXX/type-traits.cpp @@ -2038,6 +2038,49 @@ void is_implicit_lifetime(int n) { static_assert(__builtin_is_implicit_lifetime(int * __restrict)); } +namespace GH160610 { +class NonAggregate { +public: + NonAggregate() = default; + + NonAggregate(const NonAggregate&) = delete; + NonAggregate& operator=(const NonAggregate&) = delete; +private: + int num; +}; + +class DataMemberInitializer { +public: + DataMemberInitializer() = default; + + DataMemberInitializer(const DataMemberInitializer&) = delete; + DataMemberInitializer& operator=(const DataMemberInitializer&) = delete; +private: + int num = 0; +}; + +class UserProvidedConstructor { +public: + UserProvidedConstructor() {} + + UserProvidedConstructor(const UserProvidedConstructor&) = delete; + UserProvidedConstructor& operator=(const UserProvidedConstructor&) = delete; +}; + +static_assert(__builtin_is_implicit_lifetime(NonAggregate)); +static_assert(!__builtin_is_implicit_lifetime(DataMemberInitializer)); +static_assert(!__builtin_is_implicit_lifetime(UserProvidedConstructor)); + +#if __cplusplus >= 202002L +template +class Tpl { + Tpl() requires false = default ; +}; +static_assert(!__builtin_is_implicit_lifetime(Tpl)); + +#endif +} + void is_signed() { //static_assert(__is_signed(char)); diff --git a/clang/test/SemaCXX/verbose-trap.cpp b/clang/test/SemaCXX/verbose-trap.cpp index 2503f9860d9c3..0dd090e57d016 100644 --- a/clang/test/SemaCXX/verbose-trap.cpp +++ b/clang/test/SemaCXX/verbose-trap.cpp @@ -1,6 +1,9 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -fcxx-exceptions -verify %s // RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -verify %s +// RUN: %clang_cc1 -std=c++11 -fsyntax-only -fcxx-exceptions -verify %s -fexperimental-new-constant-interpreter +// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -verify %s -fexperimental-new-constant-interpreter + #if !__has_builtin(__builtin_verbose_trap) #error #endif @@ -45,3 +48,14 @@ void f2() { void test() { f(nullptr); } + +/// Arguments must be null terminated. +int foo() { + constexpr char K[] = {'a', 'b'}; + __builtin_verbose_trap("hehe", K); // expected-error {{argument to __builtin_verbose_trap must be a pointer to a constant string}} + __builtin_verbose_trap(K, "hehe"); //expected-error {{argument to __builtin_verbose_trap must be a pointer to a constant string}} + + constexpr char K2[] = {'a', 'b', '\0'}; + __builtin_verbose_trap("hehe", K2); + __builtin_verbose_trap(K2, "hehe"); +} diff --git a/clang/test/SemaHIP/amdgpu-builtin-in-lambda.hip b/clang/test/SemaHIP/amdgpu-builtin-in-lambda.hip index 8f0b14b7379d2..f89fc7b971e16 100644 --- a/clang/test/SemaHIP/amdgpu-builtin-in-lambda.hip +++ b/clang/test/SemaHIP/amdgpu-builtin-in-lambda.hip @@ -10,7 +10,7 @@ struct S { }; static constexpr auto global_load_lds_lambda = [](void* src, __shared__ void *dst) { - __builtin_amdgcn_global_load_lds(src, dst, 16, 0, 0); // gfx90a-error{{invalid size value}} gfx90a-note{{size must be 1, 2, or 4}} + __builtin_amdgcn_global_load_lds(src, dst, 16, 0, 0); // gfx90a-error{{invalid size value}} }; }; @@ -19,7 +19,7 @@ __device__ __amdgpu_buffer_rsrc_t test_simple_builtin(void *p, short stride, int } __device__ void test_target_dependant_builtin(void *src, __shared__ void *dst) { - S::global_load_lds_lambda(src, dst); + S::global_load_lds_lambda(src, dst); // gfx90a-note{{called by 'test_target_dependant_builtin'}} } constexpr auto make_buffer_rsrc_lambda = [](void *p, short stride, int num, int flags) { @@ -27,7 +27,7 @@ constexpr auto make_buffer_rsrc_lambda = [](void *p, short stride, int num, int }; constexpr auto global_load_lds_lambda = [](void* src, __shared__ void *dst) { - __builtin_amdgcn_global_load_lds(src, dst, 16, 0, 0); // gfx90a-error{{invalid size value}} gfx90a-note{{size must be 1, 2, or 4}} + __builtin_amdgcn_global_load_lds(src, dst, 16, 0, 0); // gfx90a-error{{invalid size value}} }; __device__ __amdgpu_buffer_rsrc_t global_test_simple_builtin(void *p, short stride, int num, int flags) { @@ -35,7 +35,7 @@ __device__ __amdgpu_buffer_rsrc_t global_test_simple_builtin(void *p, short stri } __device__ void global_test_target_dependant_builtin(void *src, __shared__ void *dst) { - global_load_lds_lambda(src, dst); + global_load_lds_lambda(src, dst); // gfx90a-note{{called by 'global_test_target_dependant_builtin'}} } __device__ __amdgpu_buffer_rsrc_t local_test_simple_builtin(void *p, short stride, int num, int flags) { @@ -47,7 +47,7 @@ __device__ __amdgpu_buffer_rsrc_t local_test_simple_builtin(void *p, short strid __device__ void local_test_target_dependant_builtin(void *src, __shared__ void *dst) { constexpr auto f = [](void* src, __shared__ void *dst) { - __builtin_amdgcn_global_load_lds(src, dst, 16, 0, 0); // gfx90a-error{{invalid size value}} gfx90a-note{{size must be 1, 2, or 4}} + __builtin_amdgcn_global_load_lds(src, dst, 16, 0, 0); // gfx90a-error{{invalid size value}} }; - f(src, dst); + f(src, dst); // gfx90a-note{{called by 'local_test_target_dependant_builtin'}} } diff --git a/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip b/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip new file mode 100644 index 0000000000000..366278f648939 --- /dev/null +++ b/clang/test/SemaHIP/amdgpu-gfx950-load-to-lds.hip @@ -0,0 +1,60 @@ +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx950 -verify=device %s -fcuda-is-device +// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify=host %s +// device-no-diagnostics + +#define __device__ __attribute__((device)) +#define __global__ __attribute__((global)) +#define __shared__ __attribute__((shared)) + +__device__ void i_am_device(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ void* dst, int vindex, int voffset, int soffset) { + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 1, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 2, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 4, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 12, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 16, voffset, soffset, 0, 0); + + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 1, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 2, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 4, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0); + + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + + __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 4, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 12, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 16, 0 , 0); +} + +__global__ void i_am_kernel(void* src, __amdgpu_buffer_rsrc_t rsrc, __shared__ void* dst, int vindex, int voffset, int soffset) { + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 1, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 2, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 4, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 12, voffset, soffset, 0, 0); + __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, dst, 16, voffset, soffset, 0, 0); + + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 1, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 2, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 4, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 12, vindex, voffset, soffset, 0, 0); + __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, dst, 16, vindex, voffset, soffset, 0, 0); + + __builtin_amdgcn_load_to_lds(src, dst, 1, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 2, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 4, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 12, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + __builtin_amdgcn_load_to_lds(src, dst, 16, 0, 0); // host-error{{cannot initialize a parameter of type '__attribute__((address_space(3))) void *' with an lvalue of type 'void *'}} + + __builtin_amdgcn_global_load_lds(src, dst, 1, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 2, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 4, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 12, 0 , 0); + __builtin_amdgcn_global_load_lds(src, dst, 16, 0 , 0); +} diff --git a/clang/test/SemaHLSL/BuiltIns/clamp-errors-16bit.hlsl b/clang/test/SemaHLSL/BuiltIns/clamp-errors-16bit.hlsl index 41c66a8631fad..7a6341659493b 100644 --- a/clang/test/SemaHLSL/BuiltIns/clamp-errors-16bit.hlsl +++ b/clang/test/SemaHLSL/BuiltIns/clamp-errors-16bit.hlsl @@ -1,6 +1,9 @@ -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=half -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=half +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t // check we error on 16 bit type if shader model is too old // CHECK: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl202x' and shader model is '6.0' diff --git a/clang/test/SemaHLSL/BuiltIns/isnan-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/isnan-errors.hlsl new file mode 100644 index 0000000000000..a6be28117af4f --- /dev/null +++ b/clang/test/SemaHLSL/BuiltIns/isnan-errors.hlsl @@ -0,0 +1,38 @@ + +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm-only -disable-llvm-passes -verify + +bool test_too_few_arg() { + return __builtin_hlsl_elementwise_isnan(); + // expected-error@-1 {{too few arguments to function call, expected 1, have 0}} +} + +bool2 test_too_many_arg(float2 p0) { + return __builtin_hlsl_elementwise_isnan(p0, p0); + // expected-error@-1 {{too many arguments to function call, expected 1, have 2}} +} + +bool builtin_bool_to_float_type_promotion(bool p1) { + return __builtin_hlsl_elementwise_isnan(p1); + // expected-error@-1 {{1st argument must be a scalar or vector of 16 or 32 bit floating-point types (was 'bool')}} +} + +bool builtin_isnan_int_to_float_promotion(int p1) { + return __builtin_hlsl_elementwise_isnan(p1); + // expected-error@-1 {{1st argument must be a scalar or vector of 16 or 32 bit floating-point types (was 'int')}} +} + +bool2 builtin_isnan_int2_to_float2_promotion(int2 p1) { + return __builtin_hlsl_elementwise_isnan(p1); + // expected-error@-1 {{1st argument must be a scalar or vector of 16 or 32 bit floating-point types (was 'int2' (aka 'vector'))}} +} + +// builtins are variadic functions and so are subject to DefaultVariadicArgumentPromotion +half builtin_isnan_half_scalar (half p0) { + return __builtin_hlsl_elementwise_isnan (p0); + // expected-error@-1 {{1st argument must be a scalar or vector of 16 or 32 bit floating-point types (was 'double')}} +} + +float builtin_isnan_float_scalar ( float p0) { + return __builtin_hlsl_elementwise_isnan (p0); + // expected-error@-1 {{1st argument must be a scalar or vector of 16 or 32 bit floating-point types (was 'double')}} +} diff --git a/clang/test/SemaHLSL/BuiltIns/matrix-basic_types-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/matrix-basic_types-errors.hlsl new file mode 100644 index 0000000000000..6f6b01bac829e --- /dev/null +++ b/clang/test/SemaHLSL/BuiltIns/matrix-basic_types-errors.hlsl @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -emit-llvm-only -disable-llvm-passes -verify + +uint64_t5x5 mat; +// expected-error@-1 {{unknown type name 'uint64_t5x5'}} + +// Note: this one only fails because -fnative-half-type is not set +uint16_t4x4 mat2; +// expected-error@-1 {{unknown type name 'uint16_t4x4'}} + +matrix mat3; +// expected-error@-1 {{constraints not satisfied for alias template 'matrix' [with element = int, rows_count = 5, cols_count = 5]}} +// expected-note@* {{because '5 <= 4' (5 <= 4) evaluated to false}} diff --git a/clang/test/SemaHLSL/BuiltIns/matrix-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/matrix-errors.hlsl new file mode 100644 index 0000000000000..03751878bbb98 --- /dev/null +++ b/clang/test/SemaHLSL/BuiltIns/matrix-errors.hlsl @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only -verify %s + +// Some bad declarations +hlsl::matrix ShouldWorkSomeday; // expected-error{{use of alias template 'hlsl::matrix' requires template arguments}} +// expected-note@*:* {{template declaration from hidden source: template requires rows_count <= 4 && cols_count <= 4 using matrix = element __attribute__((matrix_type(rows_count, cols_count)))}} + +hlsl::matrix<1,1,1> BadMat; // expected-error{{template argument for template type parameter must be a type}} +// expected-note@*:* {{template parameter from hidden source: class element = float}} + +hlsl::matrix AnotherBadMat; // expected-error{{template argument for non-type template parameter must be an expression}} +// expected-note@*:* {{template parameter from hidden source: int rows_count = 4}} + +hlsl::matrix YABV; // expected-error{{too many template arguments for alias template 'matrix'}} +// expected-note@*:* {{template declaration from hidden source: template requires rows_count <= 4 && cols_count <= 4 using matrix = element __attribute__((matrix_type(rows_count, cols_count)))}} + +// This code is rejected by clang because clang puts the HLSL built-in types +// into the HLSL namespace. +namespace hlsl { + struct matrix {}; // expected-error {{redefinition of 'matrix'}} +} + +// This code is rejected by dxc because dxc puts the HLSL built-in types +// into the global space, but clang will allow it even though it will shadow the +// matrix template. +struct matrix {}; // expected-note {{candidate found by name lookup is 'matrix'}} + +matrix matInt2x2; // expected-error {{reference to 'matrix' is ambiguous}} + +// expected-note@*:* {{candidate found by name lookup is 'hlsl::matrix'}} diff --git a/clang/test/SemaHLSL/BuiltIns/max-errors-16bit.hlsl b/clang/test/SemaHLSL/BuiltIns/max-errors-16bit.hlsl index e6f6eb00aa408..32a4bbd42e5ec 100644 --- a/clang/test/SemaHLSL/BuiltIns/max-errors-16bit.hlsl +++ b/clang/test/SemaHLSL/BuiltIns/max-errors-16bit.hlsl @@ -1,6 +1,9 @@ -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=half -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=half +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t // check we error on 16 bit type if shader model is too old // CHECK: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl202x' and shader model is '6.0' diff --git a/clang/test/SemaHLSL/BuiltIns/min-errors-16bit.hlsl b/clang/test/SemaHLSL/BuiltIns/min-errors-16bit.hlsl index 6891a1db38605..eb0066835689a 100644 --- a/clang/test/SemaHLSL/BuiltIns/min-errors-16bit.hlsl +++ b/clang/test/SemaHLSL/BuiltIns/min-errors-16bit.hlsl @@ -1,6 +1,9 @@ -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=half -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t -// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=half +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t +// RUN: not %clang_cc1 -fnative-half-type -std=hlsl202x -triple dxilv1.0-unknown-shadermodel6.0-compute \ +// RUN: -finclude-default-header -S -o - %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t // check we error on 16 bit type if shader model is too old // CHECK: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl202x' and shader model is '6.0' diff --git a/clang/test/SemaHLSL/RootSignature-err.hlsl b/clang/test/SemaHLSL/RootSignature-err.hlsl index 89c684cd8d11f..debeafe7ee446 100644 --- a/clang/test/SemaHLSL/RootSignature-err.hlsl +++ b/clang/test/SemaHLSL/RootSignature-err.hlsl @@ -191,6 +191,10 @@ void basic_validation_5() {} [RootSignature("StaticSampler(s0, mipLODBias = 15.990001)")] void basic_validation_6() {} +// expected-error@+1 {{invalid value of flags}} +[RootSignature("StaticSampler(s0, flags = FLAG_TYPO)")] +void basic_validation_7() {} + // expected-error@+1 {{sampler and non-sampler resource mixed in descriptor table}} [RootSignature("DescriptorTable(Sampler(s0), CBV(b0))")] void mixed_resource_table() {} diff --git a/clang/test/SemaHLSL/RootSignature-flags-err.hlsl b/clang/test/SemaHLSL/RootSignature-flags-err.hlsl index 9449d33cee1ad..c79e692202ded 100644 --- a/clang/test/SemaHLSL/RootSignature-flags-err.hlsl +++ b/clang/test/SemaHLSL/RootSignature-flags-err.hlsl @@ -2,7 +2,8 @@ // RUN: -fdx-rootsignature-version=rootsig_1_0 %s -verify=v10 // RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only \ // RUN: -fdx-rootsignature-version=rootsig_1_1 %s -verify=v11 - +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.3-library -x hlsl -fsyntax-only \ +// RUN: -fdx-rootsignature-version=rootsig_1_2 %s -verify=v12 // Root Descriptor Flags: // v10-error@+1 {{invalid flags for version 1.0}} @@ -13,8 +14,9 @@ void bad_root_descriptor_flags_0() {} [RootSignature("CBV(b0, flags = DATA_STATIC_WHILE_SET_AT_EXECUTE)")] void bad_root_descriptor_flags_1() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("CBV(b0, flags = DATA_STATIC | DATA_VOLATILE)")] void bad_root_descriptor_flags_2() {} @@ -40,18 +42,20 @@ void bad_descriptor_range_flags_3() {} [RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS))")] void bad_descriptor_range_flags_4() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("DescriptorTable(CBV(b0, flags = DATA_STATIC | DATA_STATIC_WHILE_SET_AT_EXECUTE))")] void bad_descriptor_range_flags_5() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_VOLATILE | DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS))")] void bad_descriptor_range_flags_6() {} -// v10-error@+2 {{invalid flags for version 1.0}} -// v11-error@+1 {{invalid flags for version 1.1}} +// v10-error@+3 {{invalid flags for version 1.0}} +// v11-error@+2 {{invalid flags for version 1.1}} +// v12-error@+1 {{invalid flags for version 1.2}} [RootSignature("DescriptorTable(CBV(b0, flags = DESCRIPTORS_VOLATILE | DATA_STATIC))")] void bad_descriptor_range_flags_7() {} - diff --git a/clang/test/SemaObjC/builtin-masked.m b/clang/test/SemaObjC/builtin-masked.m new file mode 100644 index 0000000000000..254737a9a37fe --- /dev/null +++ b/clang/test/SemaObjC/builtin-masked.m @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -verify %s + +// expected-no-diagnostics +typedef int v8i __attribute__((ext_vector_type(8))); +typedef _Bool v8b __attribute__((ext_vector_type(8))); + +__attribute__((objc_root_class)) +@interface Obj +@property int *ptr; +@end + +void good(v8b mask, Obj *ptr, v8i v) { + (void)__builtin_masked_load(mask, ptr.ptr); + (void)__builtin_masked_store(mask, v, ptr.ptr); + (void)__builtin_masked_expand_load(mask, ptr.ptr); + (void)__builtin_masked_compress_store(mask, v, ptr.ptr); + (void)__builtin_masked_gather(mask, v, ptr.ptr); + (void)__builtin_masked_scatter(mask, v, v, ptr.ptr); +} diff --git a/clang/test/SemaObjC/os_log.m b/clang/test/SemaObjC/os_log.m new file mode 100644 index 0000000000000..3a8a550eb2009 --- /dev/null +++ b/clang/test/SemaObjC/os_log.m @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 -verify %s + +struct S { + int a[4]; +}; + +struct S s; +_Complex float cf; + +void test_builtin_os_log_invalid_arg(void *buf) { + __builtin_os_log_format(buf, "%*.*f", s, 5, 1.3); // expected-error {{field width should have type 'int', but argument has type 'struct S'}} + __builtin_os_log_format(buf, "%*.*f", 1, s, 1.3); // expected-error {{field precision should have type 'int', but argument has type 'struct S'}} + __builtin_os_log_format(buf, "%*.*f", 1, 5, s); // expected-error {{format specifies type 'double' but the argument has type 'struct S'}} + + __builtin_os_log_format(buf, "%*.*f", cf, 5, 1.3); // expected-error {{field width should have type 'int', but argument has type '_Complex float'}} + __builtin_os_log_format(buf, "%*.*f", 1, cf, 1.3); // expected-error {{field precision should have type 'int', but argument has type '_Complex float'}} + __builtin_os_log_format(buf, "%*.*f", 1, 5, cf); // expected-error {{format specifies type 'double' but the argument has type '_Complex float'}} + + __builtin_os_log_format(buf, "%*.*f", (void *)0, 5, 1.3); // expected-warning {{field width should have type 'int', but argument has type 'void *'}} + __builtin_os_log_format(buf, "%*.*f", 1, (void *)0, 1.3); // expected-warning {{field precision should have type 'int', but argument has type 'void *'}} + __builtin_os_log_format(buf, "%*.*f", 1, 5, (void *)0); // expected-warning {{format specifies type 'double' but the argument has type 'void *'}} +} diff --git a/clang/test/SemaOpenCL/amdgpu-image-rsrc.cl b/clang/test/SemaOpenCL/amdgpu-image-rsrc.cl new file mode 100644 index 0000000000000..dc56494d3c2c1 --- /dev/null +++ b/clang/test/SemaOpenCL/amdgpu-image-rsrc.cl @@ -0,0 +1,13 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -verify -cl-std=CL1.2 -triple amdgcn-amd-amdhsa %s +// RUN: %clang_cc1 -verify -cl-std=CL2.0 -triple amdgcn-amd-amdhsa %s + +void f() { + int n = 3; + __amdgpu_texture_t v = (__amdgpu_texture_t)0; // expected-error {{used type '__amdgpu_texture_t' where arithmetic or pointer type is required}} + int k = v; // expected-error {{initializing '__private int' with an expression of incompatible type '__private __amdgpu_texture_t'}} + (void)(v + v); // expected-error {{invalid operands}} + __amdgpu_texture_t r; + int *p = (int*)r; // expected-error {{operand of type '__amdgpu_texture_t' where arithmetic or pointer type is required}} +} diff --git a/clang/test/SemaOpenMP/amdgpu-image-rsrc.cpp b/clang/test/SemaOpenMP/amdgpu-image-rsrc.cpp new file mode 100644 index 0000000000000..51b3f72d12e12 --- /dev/null +++ b/clang/test/SemaOpenMP/amdgpu-image-rsrc.cpp @@ -0,0 +1,12 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=amdgcn-amd-amdhsa -triple amdgcn-amd-amdhsa -fopenmp-is-target-device -Wno-unused-value %s + +void foo() { +#pragma omp target + { + int n = 5; + __amdgpu_texture_t v = 0; // expected-error {{cannot initialize a variable of type '__amdgpu_texture_t' with an rvalue of type 'int'}} + (void)(v + v); // expected-error {{invalid operands to binary expression}} + } +} diff --git a/clang/test/SemaTemplate/GH161657.cpp b/clang/test/SemaTemplate/GH161657.cpp new file mode 100644 index 0000000000000..6ec793115db12 --- /dev/null +++ b/clang/test/SemaTemplate/GH161657.cpp @@ -0,0 +1,11 @@ +// RUN: %clang_cc1 -fsyntax-only -std=c++20 -ffp-exception-behavior=strict -verify %s +// expected-no-diagnostics + +template struct S { + template using type1 = decltype([] { return U{}; }); +}; + +void foo() { + using T1 = S::type1; + int x = T1()(); +} diff --git a/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp b/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp index 2b519e974a907..66fd1af0d1429 100644 --- a/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp +++ b/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp @@ -2,5 +2,6 @@ template struct S { }; template S operator+(T, T); // expected-error {{instantiation exceeded maximum depth}} expected-note 2{{while substituting}} +// expected-note@-1 {{use -ftemplate-depth=N to increase recursive template instantiation depth}} S<0> s; int k = s + s; // expected-note {{while substituting}} diff --git a/clang/test/SemaTemplate/instantiation-depth-subst.cpp b/clang/test/SemaTemplate/instantiation-depth-subst.cpp index 062a8ed08bb64..17944bc3aaa40 100644 --- a/clang/test/SemaTemplate/instantiation-depth-subst.cpp +++ b/clang/test/SemaTemplate/instantiation-depth-subst.cpp @@ -3,7 +3,8 @@ // PR9793 template auto f(T t) -> decltype(f(t)); // \ // expected-error {{recursive template instantiation exceeded maximum depth of 2}} \ -// expected-note 2 {{while substituting}} +// expected-note 2 {{while substituting}} \ +// expected-note {{use -ftemplate-depth=N to increase recursive template instantiation depth}} struct S {}; int k = f(S{}); // expected-note {{while substituting}} diff --git a/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp b/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp index e74c031eba4c1..c4ac36e263bc8 100644 --- a/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp +++ b/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp @@ -123,3 +123,14 @@ Set sf; // expected-note@#C {{evaluated to false}} } // namespace GH84052 + +namespace error_on_type_instantiation { + int f(int) = delete; + // expected-note@-1 {{candidate function has been explicitly deleted}} + template struct X {}; + // expected-error@-1 {{call to deleted function 'f'}} + template void g() { X x; } + // expected-note@-1 {{while substituting prior template arguments into non-type template parameter [with T = int]}} + template void g(); + // expected-note@-1 {{in instantiation of function template specialization}} +} diff --git a/clang/test/SemaTemplate/temp_arg_template_p0522.cpp b/clang/test/SemaTemplate/temp_arg_template_p0522.cpp index d8a81bb363112..60d98a653ff02 100644 --- a/clang/test/SemaTemplate/temp_arg_template_p0522.cpp +++ b/clang/test/SemaTemplate/temp_arg_template_p0522.cpp @@ -83,11 +83,11 @@ namespace DependentType { namespace Auto { template typename T> struct TInt {}; // #TInt template typename T> struct TIntPtr {}; // #TIntPtr - template typename T> struct TAuto {}; + template typename T> struct TAuto {}; // #TAuto template typename T> struct TAutoPtr {}; - template typename T> struct TDecltypeAuto {}; + template typename T> struct TDecltypeAuto {}; // #TDecltypeAuto template struct Auto; - template struct AutoPtr; // #AutoPtr + template struct AutoPtr; template struct DecltypeAuto; template struct Int; template struct IntPtr; @@ -108,7 +108,7 @@ namespace Auto { TIntPtr ipip; TAuto aa; - TAuto aap; // expected-error@#AutoPtr {{could not match 'auto *' against 'auto'}} + TAuto aap; // expected-error@#TAuto {{non-type template parameter '' with type 'auto *' has incompatible initializer of type 'auto'}} // expected-note@-1 {{different template parameters}} TAuto ai; // FIXME: ill-formed (?) TAuto aip; // FIXME: ill-formed (?) @@ -130,7 +130,7 @@ namespace Auto { // parameters (such as 'user-defined-type &') that are not valid 'auto' // parameters. TDecltypeAuto daa; - TDecltypeAuto daap; // expected-error@#AutoPtr {{could not match 'auto *' against 'decltype(auto)'}} + TDecltypeAuto daap; // expected-error@#TDecltypeAuto {{non-type template parameter '' with type 'auto *' has incompatible initializer of type 'decltype(auto)'}} // expected-note@-1 {{different template parameters}} int n; diff --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py index 4a5d9e582b54c..e6c79d7a71b51 100644 --- a/clang/test/lit.cfg.py +++ b/clang/test/lit.cfg.py @@ -140,24 +140,29 @@ def have_host_out_of_process_jit_feature_support(): return False -def have_host_jit_feature_support(feature_name): + +def run_clang_repl(args): clang_repl_exe = lit.util.which("clang-repl", config.clang_tools_dir) if not clang_repl_exe: - return False + return "" try: clang_repl_cmd = subprocess.Popen( - [clang_repl_exe, "--host-supports-" + feature_name], stdout=subprocess.PIPE + [clang_repl_exe, args], stdout=subprocess.PIPE ) except OSError: print("could not exec clang-repl") - return False + return "" clang_repl_out = clang_repl_cmd.stdout.read().decode("ascii") clang_repl_cmd.wait() - return "true" in clang_repl_out + return clang_repl_out + + +def have_host_jit_feature_support(feature_name): + return "true" in run_clang_repl("--host-supports-" + feature_name) def have_host_clang_repl_cuda(): clang_repl_exe = lit.util.which('clang-repl', config.clang_tools_dir) @@ -191,6 +196,8 @@ def have_host_clang_repl_cuda(): if have_host_clang_repl_cuda(): config.available_features.add('host-supports-cuda') + hosttriple = run_clang_repl("--host-jit-triple") + config.substitutions.append(("%host-jit-triple", hosttriple.strip())) if have_host_out_of_process_jit_feature_support(): config.available_features.add("host-supports-out-of-process-jit") diff --git a/clang/tools/clang-import-test/clang-import-test.cpp b/clang/tools/clang-import-test/clang-import-test.cpp index 910e08ca4dffa..977cec1d53157 100644 --- a/clang/tools/clang-import-test/clang-import-test.cpp +++ b/clang/tools/clang-import-test/clang-import-test.cpp @@ -216,7 +216,7 @@ std::unique_ptr BuildCompilerInstance() { Ins->getTarget().adjust(Ins->getDiagnostics(), Ins->getLangOpts(), /*AuxTarget=*/nullptr); Ins->createFileManager(); - Ins->createSourceManager(Ins->getFileManager()); + Ins->createSourceManager(); Ins->createPreprocessor(TU_Complete); return Ins; diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp index a56e758fb75d8..1419b8c90a625 100644 --- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp +++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp @@ -608,10 +608,10 @@ Expected linkDevice(ArrayRef InputFiles, Error containerizeRawImage(std::unique_ptr &Img, OffloadKind Kind, const ArgList &Args) { llvm::Triple Triple(Args.getLastArgValue(OPT_triple_EQ)); - if (Kind != OFK_OpenMP || !Triple.isSPIRV() || - Triple.getVendor() != llvm::Triple::Intel) - return Error::success(); - return offloading::intel::containerizeOpenMPSPIRVImage(Img); + if (Kind == OFK_OpenMP && Triple.isSPIRV() && + Triple.getVendor() == llvm::Triple::Intel) + return offloading::intel::containerizeOpenMPSPIRVImage(Img); + return Error::success(); } Expected writeOffloadFile(const OffloadFile &File) { @@ -717,6 +717,14 @@ wrapDeviceImages(ArrayRef> Buffers, M, BuffersToWrap.front(), offloading::getOffloadEntryArray(M))) return std::move(Err); break; + case OFK_SYCL: { + // TODO: fill these options once the Driver supports them. + offloading::SYCLJITOptions Options; + if (Error Err = + offloading::wrapSYCLBinaries(M, BuffersToWrap.front(), Options)) + return std::move(Err); + break; + } default: return createStringError(getOffloadKindName(Kind) + " wrapping is not supported"); @@ -754,6 +762,32 @@ bundleOpenMP(ArrayRef Images) { return std::move(Buffers); } +Expected>> +bundleSYCL(ArrayRef Images) { + SmallVector> Buffers; + if (DryRun) { + // In dry-run mode there is an empty input which is insufficient for the + // testing. Therefore, we return here a stub image. + OffloadingImage Image; + Image.TheImageKind = IMG_None; + Image.TheOffloadKind = OffloadKind::OFK_SYCL; + Image.StringData["symbols"] = "stub"; + Image.Image = MemoryBuffer::getMemBufferCopy(""); + SmallString<0> SerializedImage = OffloadBinary::write(Image); + Buffers.emplace_back(MemoryBuffer::getMemBufferCopy(SerializedImage)); + return std::move(Buffers); + } + + for (const OffloadingImage &Image : Images) { + // clang-sycl-linker packs outputs into one binary blob. Therefore, it is + // passed to Offload Wrapper as is. + StringRef S(Image.Image->getBufferStart(), Image.Image->getBufferSize()); + Buffers.emplace_back(MemoryBuffer::getMemBufferCopy(S)); + } + + return std::move(Buffers); +} + Expected>> bundleCuda(ArrayRef Images, const ArgList &Args) { SmallVector, 4> InputFiles; @@ -806,8 +840,9 @@ bundleLinkedOutput(ArrayRef Images, const ArgList &Args, llvm::TimeTraceScope TimeScope("Bundle linked output"); switch (Kind) { case OFK_OpenMP: - case OFK_SYCL: return bundleOpenMP(Images); + case OFK_SYCL: + return bundleSYCL(Images); case OFK_Cuda: return bundleCuda(Images, Args); case OFK_HIP: diff --git a/clang/tools/clang-repl/ClangRepl.cpp b/clang/tools/clang-repl/ClangRepl.cpp index 1d508816d7047..c7879422cd7df 100644 --- a/clang/tools/clang-repl/ClangRepl.cpp +++ b/clang/tools/clang-repl/ClangRepl.cpp @@ -85,6 +85,8 @@ static llvm::cl::list llvm::cl::CommaSeparated); static llvm::cl::opt OptHostSupportsJit("host-supports-jit", llvm::cl::Hidden); +static llvm::cl::opt OptHostJitTriple("host-jit-triple", + llvm::cl::Hidden); static llvm::cl::list OptInputs(llvm::cl::Positional, llvm::cl::desc("[code to run]")); @@ -279,6 +281,11 @@ int main(int argc, const char **argv) { llvm::outs() << "false\n"; } return 0; + } else if (OptHostJitTriple) { + auto J = ExitOnErr(llvm::orc::LLJITBuilder().create()); + auto T = J->getTargetTriple(); + llvm::outs() << T.normalize() << '\n'; + return 0; } clang::IncrementalCompilerBuilder CB; diff --git a/clang/tools/clang-scan-deps/ClangScanDeps.cpp b/clang/tools/clang-scan-deps/ClangScanDeps.cpp index 0e2758d123edc..e41f4eb7999ae 100644 --- a/clang/tools/clang-scan-deps/ClangScanDeps.cpp +++ b/clang/tools/clang-scan-deps/ClangScanDeps.cpp @@ -420,7 +420,7 @@ class FullDeps { std::vector NewMDs; { std::unique_lock ul(Lock); - for (const ModuleDeps &MD : Graph) { + for (ModuleDeps &MD : Graph) { auto I = Modules.find({MD.ID, 0}); if (I != Modules.end()) { I->first.InputIndex = std::min(I->first.InputIndex, InputIndex); diff --git a/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp b/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp index fde6b55165868..de20e74360fbc 100644 --- a/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp +++ b/clang/tools/clang-sycl-linker/ClangSYCLLinker.cpp @@ -27,22 +27,16 @@ #include "llvm/LTO/LTO.h" #include "llvm/Linker/Linker.h" #include "llvm/MC/TargetRegistry.h" -#include "llvm/Object/Archive.h" -#include "llvm/Object/ArchiveWriter.h" #include "llvm/Object/Binary.h" -#include "llvm/Object/ELFObjectFile.h" #include "llvm/Object/IRObjectFile.h" -#include "llvm/Object/ObjectFile.h" #include "llvm/Object/OffloadBinary.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/OptTable.h" #include "llvm/Option/Option.h" -#include "llvm/Remarks/HotnessThresholdParser.h" #include "llvm/Support/CommandLine.h" -#include "llvm/Support/FileOutputBuffer.h" #include "llvm/Support/FileSystem.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/InitLLVM.h" -#include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/Program.h" #include "llvm/Support/Signals.h" @@ -466,6 +460,14 @@ static Error runAOTCompile(StringRef InputFile, StringRef OutputFile, return createStringError(inconvertibleErrorCode(), "Unsupported arch"); } +// TODO: Consider using LLVM-IR metadata to identify globals of interest +bool isKernel(const Function &F) { + const llvm::CallingConv::ID CC = F.getCallingConv(); + return CC == llvm::CallingConv::SPIR_KERNEL || + CC == llvm::CallingConv::AMDGPU_KERNEL || + CC == llvm::CallingConv::PTX_Kernel; +} + /// Performs the following steps: /// 1. Link input device code (user code and SYCL device library code). /// 2. Run SPIR-V code generation. @@ -486,6 +488,22 @@ Error runSYCLLink(ArrayRef Files, const ArgList &Args) { SmallVector SplitModules; SplitModules.emplace_back(*LinkedFile); + // Generate symbol table. + SmallVector SymbolTable; + for (size_t I = 0, E = SplitModules.size(); I != E; ++I) { + Expected> ModOrErr = + getBitcodeModule(SplitModules[I], C); + if (!ModOrErr) + return ModOrErr.takeError(); + + SmallVector Symbols; + for (Function &F : **ModOrErr) { + if (isKernel(F)) + Symbols.push_back(F.getName()); + } + SymbolTable.emplace_back(llvm::join(Symbols.begin(), Symbols.end(), "\n")); + } + bool IsAOTCompileNeeded = IsIntelOffloadArch( StringToOffloadArch(Args.getLastArgValue(OPT_arch_EQ))); @@ -523,12 +541,19 @@ Error runSYCLLink(ArrayRef Files, const ArgList &Args) { return createFileError(File, EC); } OffloadingImage TheImage{}; - TheImage.TheImageKind = IMG_Object; + // TODO: TheImageKind should be + // `IsAOTCompileNeeded ? IMG_Object : IMG_SPIRV;` + // For that we need to update SYCL Runtime to align with the ImageKind enum. + // Temporarily it is initalized to IMG_None, because in that case, SYCL + // Runtime has a heuristic to understand what the Image Kind is, so at least + // it works. + TheImage.TheImageKind = IMG_None; TheImage.TheOffloadKind = OFK_SYCL; TheImage.StringData["triple"] = Args.MakeArgString(Args.getLastArgValue(OPT_triple_EQ)); TheImage.StringData["arch"] = Args.MakeArgString(Args.getLastArgValue(OPT_arch_EQ)); + TheImage.StringData["symbols"] = SymbolTable[I]; TheImage.Image = std::move(*FileOrErr); llvm::SmallString<0> Buffer = OffloadBinary::write(TheImage); diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp index 5aab74348967d..c39f337665a40 100644 --- a/clang/tools/libclang/CIndex.cpp +++ b/clang/tools/libclang/CIndex.cpp @@ -2148,6 +2148,9 @@ class EnqueueVisitor : public ConstStmtVisitor, void VisitOMPUnrollDirective(const OMPUnrollDirective *D); void VisitOMPReverseDirective(const OMPReverseDirective *D); void VisitOMPInterchangeDirective(const OMPInterchangeDirective *D); + void VisitOMPCanonicalLoopSequenceTransformationDirective( + const OMPCanonicalLoopSequenceTransformationDirective *D); + void VisitOMPFuseDirective(const OMPFuseDirective *D); void VisitOMPForDirective(const OMPForDirective *D); void VisitOMPForSimdDirective(const OMPForSimdDirective *D); void VisitOMPSectionsDirective(const OMPSectionsDirective *D); @@ -2353,6 +2356,11 @@ void OMPClauseEnqueue::VisitOMPPartialClause(const OMPPartialClause *C) { Visitor->AddStmt(C->getFactor()); } +void OMPClauseEnqueue::VisitOMPLoopRangeClause(const OMPLoopRangeClause *C) { + Visitor->AddStmt(C->getFirst()); + Visitor->AddStmt(C->getCount()); +} + void OMPClauseEnqueue::VisitOMPAllocatorClause(const OMPAllocatorClause *C) { Visitor->AddStmt(C->getAllocator()); } @@ -2824,10 +2832,8 @@ void OpenACCClauseEnqueue::VisitTileClause(const OpenACCTileClause &C) { void OpenACCClauseEnqueue::VisitPrivateClause(const OpenACCPrivateClause &C) { VisitVarList(C); - for (const OpenACCPrivateRecipe &R : C.getInitRecipes()) { + for (const OpenACCPrivateRecipe &R : C.getInitRecipes()) Visitor.AddDecl(R.AllocaDecl); - Visitor.AddStmt(R.InitExpr); - } } void OpenACCClauseEnqueue::VisitHostClause(const OpenACCHostClause &C) { @@ -2843,7 +2849,6 @@ void OpenACCClauseEnqueue::VisitFirstPrivateClause( VisitVarList(C); for (const OpenACCFirstPrivateRecipe &R : C.getInitRecipes()) { Visitor.AddDecl(R.AllocaDecl); - Visitor.AddStmt(R.InitExpr); Visitor.AddDecl(R.InitFromTemporary); } } @@ -2919,10 +2924,8 @@ void OpenACCClauseEnqueue::VisitDeviceTypeClause( void OpenACCClauseEnqueue::VisitReductionClause( const OpenACCReductionClause &C) { VisitVarList(C); - for (const OpenACCReductionRecipe &R : C.getRecipes()) { + for (const OpenACCReductionRecipe &R : C.getRecipes()) Visitor.AddDecl(R.AllocaDecl); - Visitor.AddStmt(R.InitExpr); - } } void OpenACCClauseEnqueue::VisitAutoClause(const OpenACCAutoClause &C) {} void OpenACCClauseEnqueue::VisitIndependentClause( @@ -3317,6 +3320,15 @@ void EnqueueVisitor::VisitOMPInterchangeDirective( VisitOMPCanonicalLoopNestTransformationDirective(D); } +void EnqueueVisitor::VisitOMPCanonicalLoopSequenceTransformationDirective( + const OMPCanonicalLoopSequenceTransformationDirective *D) { + VisitOMPExecutableDirective(D); +} + +void EnqueueVisitor::VisitOMPFuseDirective(const OMPFuseDirective *D) { + VisitOMPCanonicalLoopSequenceTransformationDirective(D); +} + void EnqueueVisitor::VisitOMPForDirective(const OMPForDirective *D) { VisitOMPLoopDirective(D); } @@ -6275,6 +6287,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) { return cxstring::createRef("OMPReverseDirective"); case CXCursor_OMPInterchangeDirective: return cxstring::createRef("OMPInterchangeDirective"); + case CXCursor_OMPFuseDirective: + return cxstring::createRef("OMPFuseDirective"); case CXCursor_OMPForDirective: return cxstring::createRef("OMPForDirective"); case CXCursor_OMPForSimdDirective: diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp index 3c4062410eac1..56f113c1dc309 100644 --- a/clang/tools/libclang/CXCursor.cpp +++ b/clang/tools/libclang/CXCursor.cpp @@ -687,6 +687,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent, case Stmt::OMPInterchangeDirectiveClass: K = CXCursor_OMPInterchangeDirective; break; + case Stmt::OMPFuseDirectiveClass: + K = CXCursor_OMPFuseDirective; + break; case Stmt::OMPForDirectiveClass: K = CXCursor_OMPForDirective; break; diff --git a/clang/tools/offload-arch/CMakeLists.txt b/clang/tools/offload-arch/CMakeLists.txt index cb50b9c1d6dde..f7d7012cf7272 100644 --- a/clang/tools/offload-arch/CMakeLists.txt +++ b/clang/tools/offload-arch/CMakeLists.txt @@ -1,7 +1,9 @@ set(LLVM_LINK_COMPONENTS Support) -add_clang_tool(offload-arch OffloadArch.cpp NVPTXArch.cpp AMDGPUArchByKFD.cpp AMDGPUArchByHIP.cpp) +add_clang_tool(offload-arch OffloadArch.cpp NVPTXArch.cpp AMDGPUArchByKFD.cpp + AMDGPUArchByHIP.cpp LevelZeroArch.cpp) +# Legacy binary names. add_clang_symlink(amdgpu-arch offload-arch) add_clang_symlink(nvptx-arch offload-arch) diff --git a/clang/tools/offload-arch/LevelZeroArch.cpp b/clang/tools/offload-arch/LevelZeroArch.cpp new file mode 100644 index 0000000000000..5e543e3231c11 --- /dev/null +++ b/clang/tools/offload-arch/LevelZeroArch.cpp @@ -0,0 +1,181 @@ +//===- LevelZeroArch.cpp - list installed Level Zero devices ---*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a tool for detecting Level Zero devices installed in the +// system +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/DynamicLibrary.h" +#include "llvm/Support/Error.h" +#include + +#define ZE_MAX_DEVICE_NAME 256 +#define ZE_MAX_DEVICE_UUID_SIZE 16 + +using ze_driver_handle_t = void *; +using ze_device_handle_t = void *; + +enum ze_result_t { + ZE_RESULT_SUCCESS = 0, + ZE_RESULT_ERROR_UNKNOWN = 0x7ffffffe +}; + +enum ze_structure_type_t { + ZE_STRUCTURE_TYPE_INIT_DRIVER_TYPE_DESC = 0x00020021, + ZE_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x3, + ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff +}; + +enum ze_init_driver_type_flags_t { ZE_INIT_DRIVER_TYPE_FLAG_GPU = 1 }; + +using ze_device_type_t = uint32_t; +using ze_device_property_flags_t = uint32_t; + +struct ze_init_driver_type_desc_t { + ze_structure_type_t stype; + const void *pNext; + ze_init_driver_type_flags_t flags; +}; + +struct ze_device_uuid_t { + uint8_t id[ZE_MAX_DEVICE_UUID_SIZE]; +}; + +struct ze_device_properties_t { + ze_structure_type_t stype; + void *pNext; + ze_device_type_t type; + uint32_t vendorId; + uint32_t deviceId; + ze_device_property_flags_t flags; + uint32_t subdeviceId; + uint32_t coreClockRate; + uint64_t maxMemAllocSize; + uint32_t maxHardwareContexts; + uint32_t maxCommandQueuePriority; + uint32_t numThreadsPerEU; + uint32_t physicalEUSimdWidth; + uint32_t numEUsPerSubslice; + uint32_t numSubslicesPerSlice; + uint32_t numSlices; + uint64_t timerResolution; + uint32_t timestampValidBits; + uint32_t kernelTimestampValidBits; + ze_device_uuid_t uuid; + char name[ZE_MAX_DEVICE_NAME]; +}; + +ze_result_t zeInitDrivers(uint32_t *pCount, ze_driver_handle_t *phDrivers, + ze_init_driver_type_desc_t *desc); +ze_result_t zeDeviceGet(ze_driver_handle_t hDriver, uint32_t *pCount, + void *phDevices); +ze_result_t zeDeviceGetProperties(void *hDevice, void *pProperties); + +using namespace llvm; +extern cl::opt Verbose; + +#define DEFINE_WRAPPER(NAME) \ + using NAME##_ty = decltype(NAME); \ + void *NAME##Ptr = nullptr; \ + template ze_result_t NAME##Wrapper(Ts... args) { \ + if (!NAME##Ptr) { \ + return ZE_RESULT_ERROR_UNKNOWN; \ + } \ + return reinterpret_cast(NAME##Ptr)(args...); \ + } + +DEFINE_WRAPPER(zeInitDrivers) +DEFINE_WRAPPER(zeDeviceGet) +DEFINE_WRAPPER(zeDeviceGetProperties) + +static bool loadLevelZero() { + constexpr const char *L0Library = "libze_loader.so"; + std::string ErrMsg; + + auto DynlibHandle = std::make_unique( + llvm::sys::DynamicLibrary::getPermanentLibrary(L0Library, &ErrMsg)); + if (!DynlibHandle->isValid()) { + if (ErrMsg.empty()) + ErrMsg = "unknown error"; + if (Verbose) + llvm::errs() << "Unable to load library '" << L0Library << "': " << ErrMsg + << "\n"; + return false; + } + + constexpr struct { + const char *Name; + void **FuncPtr; + } Wrappers[] = { + {"zeInitDrivers", &zeInitDriversPtr}, + {"zeDeviceGet", &zeDeviceGetPtr}, + {"zeDeviceGetProperties", &zeDeviceGetPropertiesPtr}, + }; + + for (auto Entry : Wrappers) { + void *P = DynlibHandle->getAddressOfSymbol(Entry.Name); + if (P == nullptr) { + if (Verbose) + llvm::errs() << "Unable to find '" << Entry.Name << "' in '" + << L0Library << "'\n"; + return false; + } + *(Entry.FuncPtr) = P; + } + + return true; +} + +#define CALL_ZE_AND_CHECK(Fn, ...) \ + do { \ + ze_result_t Rc = Fn##Wrapper(__VA_ARGS__); \ + if (Rc != ZE_RESULT_SUCCESS) { \ + if (Verbose) \ + llvm::errs() << "Error: " << __func__ << ":" << #Fn \ + << " failed with error code " << Rc << "\n"; \ + return 1; \ + } \ + } while (0) + +int printGPUsByLevelZero() { + if (!loadLevelZero()) + return 1; + + ze_init_driver_type_desc_t DriverType = {}; + DriverType.stype = ZE_STRUCTURE_TYPE_INIT_DRIVER_TYPE_DESC; + DriverType.flags = ZE_INIT_DRIVER_TYPE_FLAG_GPU; + DriverType.pNext = nullptr; + uint32_t DriverCount{0}; + + // Initialize and find all drivers. + CALL_ZE_AND_CHECK(zeInitDrivers, &DriverCount, nullptr, &DriverType); + + llvm::SmallVector Drivers(DriverCount); + CALL_ZE_AND_CHECK(zeInitDrivers, &DriverCount, Drivers.data(), &DriverType); + + for (auto Driver : Drivers) { + // Discover all the devices for a given driver. + uint32_t DeviceCount = 0; + CALL_ZE_AND_CHECK(zeDeviceGet, Driver, &DeviceCount, nullptr); + + llvm::SmallVector Devices(DeviceCount); + CALL_ZE_AND_CHECK(zeDeviceGet, Driver, &DeviceCount, Devices.data()); + + for (auto Device : Devices) { + ze_device_properties_t DeviceProperties = {}; + DeviceProperties.stype = ZE_STRUCTURE_TYPE_DEVICE_PROPERTIES; + DeviceProperties.pNext = nullptr; + CALL_ZE_AND_CHECK(zeDeviceGetProperties, Device, &DeviceProperties); + llvm::outs() << DeviceProperties.name << '\n'; + } + } + + return 0; +} diff --git a/clang/tools/offload-arch/OffloadArch.cpp b/clang/tools/offload-arch/OffloadArch.cpp index 74be40214a0ec..3c5131eb7c06c 100644 --- a/clang/tools/offload-arch/OffloadArch.cpp +++ b/clang/tools/offload-arch/OffloadArch.cpp @@ -21,6 +21,7 @@ enum VendorName { all, amdgpu, nvptx, + intel, }; static cl::opt @@ -28,7 +29,8 @@ static cl::opt cl::init(all), cl::values(clEnumVal(all, "Print all GPUs (default)"), clEnumVal(amdgpu, "Only print AMD GPUs"), - clEnumVal(nvptx, "Only print NVIDIA GPUs"))); + clEnumVal(nvptx, "Only print NVIDIA GPUs"), + clEnumVal(intel, "Only print Intel GPUs"))); cl::opt Verbose("verbose", cl::desc("Enable verbose output"), cl::init(false), cl::cat(OffloadArchCategory)); @@ -40,6 +42,7 @@ static void PrintVersion(raw_ostream &OS) { int printGPUsByKFD(); int printGPUsByHIP(); int printGPUsByCUDA(); +int printGPUsByLevelZero(); static int printAMD() { #ifndef _WIN32 @@ -51,6 +54,12 @@ static int printAMD() { } static int printNVIDIA() { return printGPUsByCUDA(); } +static int printIntel() { return printGPUsByLevelZero(); } + +const std::array>, 3> VendorTable{ + {{VendorName::amdgpu, printAMD}, + {VendorName::nvptx, printNVIDIA}, + {VendorName::intel, printIntel}}}; int main(int argc, char *argv[]) { cl::HideUnrelatedOptions(OffloadArchCategory); @@ -68,20 +77,17 @@ int main(int argc, char *argv[]) { return 0; } - // If this was invoked from the legacy symlinks provide the same behavior. - bool AMDGPUOnly = Only == VendorName::amdgpu || - sys::path::stem(argv[0]).starts_with("amdgpu-arch"); - bool NVIDIAOnly = Only == VendorName::nvptx || - sys::path::stem(argv[0]).starts_with("nvptx-arch"); - - int NVIDIAResult = 0; - if (!AMDGPUOnly) - NVIDIAResult = printNVIDIA(); + // Support legacy binaries. + if (sys::path::stem(argv[0]).starts_with("amdgpu-arch")) + Only = VendorName::amdgpu; + if (sys::path::stem(argv[0]).starts_with("nvptx-arch")) + Only = VendorName::nvptx; - int AMDResult = 0; - if (!NVIDIAOnly) - AMDResult = printAMD(); + int Result = 1; + for (auto [Name, Func] : VendorTable) { + if (Only == VendorName::all || Only == Name) + Result &= Func(); + } - // We only failed if all cases returned an error. - return AMDResult && NVIDIAResult; + return Result; } diff --git a/clang/unittests/Analysis/CFGTest.cpp b/clang/unittests/Analysis/CFGTest.cpp index 46a6751391cf5..6aa09a8ff61a3 100644 --- a/clang/unittests/Analysis/CFGTest.cpp +++ b/clang/unittests/Analysis/CFGTest.cpp @@ -93,6 +93,159 @@ TEST(CFG, DependantBaseAddImplicitDtors) { .getStatus()); } +TEST(CFG, SwitchCoveredEnumNoDefault) { + const char *Code = R"( + enum class E {E1, E2}; + int f(E e) { + switch(e) { + case E::E1: + return 1; + case E::E2: + return 2; + } + return 0; + } + )"; + CFG::BuildOptions Options; + Options.AssumeReachableDefaultInSwitchStatements = true; + BuildResult B = BuildCFG(Code, Options); + ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus()); + + // [B5 (ENTRY)] + // Succs (1): B2 + // + // [B1] + // 1: 0 + // 2: return [B1.1]; + // Preds (1): B2 + // Succs (1): B0 + // + // [B2] + // 1: e (ImplicitCastExpr, LValueToRValue, E) + // T: switch [B2.1] + // Preds (1): B5 + // Succs (3): B3 B4 B1 + // + // [B3] + // case E::E2: + // 1: 2 + // 2: return [B3.1]; + // Preds (1): B2 + // Succs (1): B0 + // + // [B4] + // case E::E1: + // 1: 1 + // 2: return [B4.1]; + // Preds (1): B2 + // Succs (1): B0 + // + // [B0 (EXIT)] + // Preds (3): B1 B3 B4 + + auto *CFG = B.getCFG(); + const auto &Entry = CFG->getEntry(); + ASSERT_EQ(1u, Entry.succ_size()); + // First successor of Entry is the switch + CFGBlock *SwitchBlock = *Entry.succ_begin(); + ASSERT_EQ(3u, SwitchBlock->succ_size()); + // Last successor of the switch is after the switch + auto NoCaseSucc = SwitchBlock->succ_rbegin(); + EXPECT_TRUE(NoCaseSucc->isReachable()); + + // Checking that the same node is Unreachable without this setting + Options.AssumeReachableDefaultInSwitchStatements = false; + B = BuildCFG(Code, Options); + ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus()); + + const auto &Entry2 = B.getCFG()->getEntry(); + ASSERT_EQ(1u, Entry2.succ_size()); + CFGBlock *SwitchBlock2 = *Entry2.succ_begin(); + ASSERT_EQ(3u, SwitchBlock2->succ_size()); + auto NoCaseSucc2 = SwitchBlock2->succ_rbegin(); + EXPECT_FALSE(NoCaseSucc2->isReachable()); +} + +TEST(CFG, SwitchCoveredEnumWithDefault) { + const char *Code = R"( + enum class E {E1, E2}; + int f(E e) { + switch(e) { + case E::E1: + return 1; + case E::E2: + return 2; + default: + return 0; + } + return -1; + } + )"; + CFG::BuildOptions Options; + Options.AssumeReachableDefaultInSwitchStatements = true; + BuildResult B = BuildCFG(Code, Options); + ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus()); + + // [B6 (ENTRY)] + // Succs (1): B2 + // + // [B1] + // 1: -1 + // 2: return [B1.1]; + // Succs (1): B0 + // + // [B2] + // 1: e (ImplicitCastExpr, LValueToRValue, E) + // T: switch [B2.1] + // Preds (1): B6 + // Succs (3): B4 B5 B3 + // + // [B3] + // default: + // 1: 0 + // 2: return [B3.1]; + // Preds (1): B2 + // Succs (1): B0 + // + // [B4] + // case E::E2: + // 1: 2 + // 2: return [B4.1]; + // Preds (1): B2 + // Succs (1): B0 + // + // [B5] + // case E::E1: + // 1: 1 + // 2: return [B5.1]; + // Preds (1): B2 + // Succs (1): B0 + // + // [B0 (EXIT)] + // Preds (4): B1 B3 B4 B5 + + const auto &Entry = B.getCFG()->getEntry(); + ASSERT_EQ(1u, Entry.succ_size()); + // First successor of Entry is the switch + CFGBlock *SwitchBlock = *Entry.succ_begin(); + ASSERT_EQ(3u, SwitchBlock->succ_size()); + // Last successor of the switch is the default branch + auto defaultBlock = SwitchBlock->succ_rbegin(); + EXPECT_TRUE(defaultBlock->isReachable()); + + // Checking that the same node is Unreachable without this setting + Options.AssumeReachableDefaultInSwitchStatements = false; + B = BuildCFG(Code, Options); + ASSERT_EQ(BuildResult::BuiltCFG, B.getStatus()); + + const auto &Entry2 = B.getCFG()->getEntry(); + ASSERT_EQ(1u, Entry2.succ_size()); + CFGBlock *SwitchBlock2 = *Entry2.succ_begin(); + ASSERT_EQ(3u, SwitchBlock2->succ_size()); + auto defaultBlock2 = SwitchBlock2->succ_rbegin(); + EXPECT_FALSE(defaultBlock2->isReachable()); +} + TEST(CFG, IsLinear) { auto expectLinear = [](bool IsLinear, const char *Code) { BuildResult B = BuildCFG(Code); diff --git a/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp b/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp index fb3ab7c0dc9b4..67b471e328b5e 100644 --- a/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/CachedConstAccessorsLatticeTest.cpp @@ -42,7 +42,7 @@ using ast_matchers::selectFirst; using dataflow::DataflowAnalysisContext; using dataflow::Environment; -using dataflow::LatticeJoinEffect; +using dataflow::LatticeEffect; using dataflow::RecordStorageLocation; using dataflow::Value; using dataflow::WatchedLiteralsSolver; @@ -270,11 +270,11 @@ TEST_F(CachedConstAccessorsLatticeTest, JoinSameNoop) { LatticeT EmptyLattice; LatticeT EmptyLattice2; - EXPECT_EQ(EmptyLattice.join(EmptyLattice2), LatticeJoinEffect::Unchanged); + EXPECT_EQ(EmptyLattice.join(EmptyLattice2), LatticeEffect::Unchanged); LatticeT Lattice1; Lattice1.getOrCreateConstMethodReturnValue(Loc, CE, Env); - EXPECT_EQ(Lattice1.join(Lattice1), LatticeJoinEffect::Unchanged); + EXPECT_EQ(Lattice1.join(Lattice1), LatticeEffect::Unchanged); } TEST_F(CachedConstAccessorsLatticeTest, ProducesNewValueAfterJoinDistinct) { @@ -289,7 +289,7 @@ TEST_F(CachedConstAccessorsLatticeTest, ProducesNewValueAfterJoinDistinct) { LatticeT EmptyLattice; - EXPECT_EQ(Lattice1.join(EmptyLattice), LatticeJoinEffect::Changed); + EXPECT_EQ(Lattice1.join(EmptyLattice), LatticeEffect::Changed); Value *ValAfterJoin = Lattice1.getOrCreateConstMethodReturnValue(Loc, CE, Env); @@ -299,7 +299,7 @@ TEST_F(CachedConstAccessorsLatticeTest, ProducesNewValueAfterJoinDistinct) { LatticeT Lattice3; Value *Val3 = Lattice3.getOrCreateConstMethodReturnValue(Loc, CE, Env); - EXPECT_EQ(Lattice1.join(Lattice3), LatticeJoinEffect::Changed); + EXPECT_EQ(Lattice1.join(Lattice3), LatticeEffect::Changed); Value *ValAfterJoin2 = Lattice1.getOrCreateConstMethodReturnValue(Loc, CE, Env); diff --git a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp index d97e2b0c2425a..cbd55966a3d88 100644 --- a/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp +++ b/clang/unittests/Analysis/FlowSensitive/TransferTest.cpp @@ -1554,8 +1554,8 @@ TEST(TransferTest, BaseClassInitializerFromSiblingDerivedInstance) { struct DerivedTwo : public Base { int DerivedTwoField; - DerivedTwo(const DerivedOne& d1) - : Base(d1), DerivedTwoField(d1.DerivedOneField) { + DerivedTwo(const DerivedOne& D1) + : Base(D1), DerivedTwoField(D1.DerivedOneField) { (void)BaseField; } }; @@ -1565,7 +1565,34 @@ TEST(TransferTest, BaseClassInitializerFromSiblingDerivedInstance) { [](const llvm::StringMap> &Results, ASTContext &ASTCtx) { // Regression test only; we used to crash when transferring the base - // class initializer from the DerivedToBase-cast `d1`. + // class initializer from the DerivedToBase-cast `D1`. + }, + LangStandard::lang_cxx17, /*ApplyBuiltinTransfer=*/true, "DerivedTwo"); +} + +TEST(TransferTest, CopyAssignmentToDerivedToBase) { + std::string Code = R"cc( + struct Base {}; + +struct DerivedOne : public Base { + int DerivedOneField; +}; + +struct DerivedTwo : public Base { + int DerivedTwoField; + + explicit DerivedTwo(const DerivedOne& D1) { + *static_cast(this) = D1; + } +}; +)cc"; + + runDataflow( + Code, + [](const llvm::StringMap> &Results, + ASTContext &ASTCtx) { + // Regression test only; we used to crash when transferring the copy + // assignment operator in the constructor for `DerivedTwo`. }, LangStandard::lang_cxx17, /*ApplyBuiltinTransfer=*/true, "DerivedTwo"); } diff --git a/clang/unittests/Analysis/LifetimeSafetyTest.cpp b/clang/unittests/Analysis/LifetimeSafetyTest.cpp index bff5378c0a8a9..3821015f07fb1 100644 --- a/clang/unittests/Analysis/LifetimeSafetyTest.cpp +++ b/clang/unittests/Analysis/LifetimeSafetyTest.cpp @@ -68,6 +68,7 @@ class LifetimeTestRunner { LifetimeSafetyAnalysis &getAnalysis() { return *Analysis; } ASTContext &getASTContext() { return *ASTCtx; } + AnalysisDeclContext &getAnalysisContext() { return *AnalysisCtx; } ProgramPoint getProgramPoint(llvm::StringRef Annotation) { auto It = AnnotationToPointMap.find(Annotation); @@ -106,7 +107,7 @@ class LifetimeTestHelper { std::vector getLoansForVar(llvm::StringRef VarName) { auto *VD = findDecl(VarName); if (!VD) { - ADD_FAILURE() << "No VarDecl found for '" << VarName << "'"; + ADD_FAILURE() << "Failed to find VarDecl for '" << VarName << "'"; return {}; } std::vector LID = Analysis.getLoanIDForVar(VD); @@ -136,11 +137,20 @@ class LifetimeTestHelper { private: template DeclT *findDecl(llvm::StringRef Name) { auto &Ctx = Runner.getASTContext(); - auto Results = match(valueDecl(hasName(Name)).bind("v"), Ctx); + const auto *TargetFunc = Runner.getAnalysisContext().getDecl(); + auto Results = + match(valueDecl(hasName(Name), + hasAncestor(functionDecl(equalsNode(TargetFunc)))) + .bind("v"), + Ctx); if (Results.empty()) { ADD_FAILURE() << "Declaration '" << Name << "' not found in AST."; return nullptr; } + if (Results.size() > 1) { + ADD_FAILURE() << "Multiple declarations found for '" << Name << "'"; + return nullptr; + } return const_cast(selectFirst("v", Results)); } @@ -208,6 +218,19 @@ MATCHER_P2(HasLoansToImpl, LoanVars, Annotation, "") { ExpectedLoans.insert(ExpectedLoans.end(), ExpectedLIDs.begin(), ExpectedLIDs.end()); } + std::sort(ExpectedLoans.begin(), ExpectedLoans.end()); + std::sort(ActualLoans.begin(), ActualLoans.end()); + if (ExpectedLoans != ActualLoans) { + *result_listener << "Expected: "; + for (const auto &LoanID : ExpectedLoans) { + *result_listener << LoanID.Value << ", "; + } + *result_listener << "Actual: "; + for (const auto &LoanID : ActualLoans) { + *result_listener << LoanID.Value << ", "; + } + return false; + } return ExplainMatchResult(UnorderedElementsAreArray(ExpectedLoans), ActualLoans, result_listener); @@ -921,5 +944,235 @@ TEST_F(LifetimeAnalysisTest, GslPointerConversionOperator) { EXPECT_THAT(Origin("y"), HasLoansTo({"yl"}, "p1")); } +TEST_F(LifetimeAnalysisTest, LifetimeboundSimple) { + SetupTest(R"( + View Identity(View v [[clang::lifetimebound]]); + void target() { + MyObj a, b; + View v1 = a; + POINT(p1); + + View v2 = Identity(v1); + View v3 = Identity(b); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"a"}, "p1")); + // The origin of v2 should now contain the loan to 'o' from v1. + EXPECT_THAT(Origin("v2"), HasLoansTo({"a"}, "p2")); + EXPECT_THAT(Origin("v3"), HasLoansTo({"b"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundMemberFunction) { + SetupTest(R"( + struct [[gsl::Pointer()]] MyView { + MyView(const MyObj& o) {} + MyView pass() [[clang::lifetimebound]] { return *this; } + }; + void target() { + MyObj o; + MyView v1 = o; + POINT(p1); + MyView v2 = v1.pass(); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"o"}, "p1")); + // The call v1.pass() is bound to 'v1'. The origin of v2 should get the loans + // from v1. + EXPECT_THAT(Origin("v2"), HasLoansTo({"o"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundMultipleArgs) { + SetupTest(R"( + View Choose(bool cond, View a [[clang::lifetimebound]], View b [[clang::lifetimebound]]); + void target() { + MyObj o1, o2; + View v1 = o1; + View v2 = o2; + POINT(p1); + + View v3 = Choose(true, v1, v2); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"o1"}, "p1")); + EXPECT_THAT(Origin("v2"), HasLoansTo({"o2"}, "p2")); + // v3 should have loans from both v1 and v2, demonstrating the union of + // loans. + EXPECT_THAT(Origin("v3"), HasLoansTo({"o1", "o2"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundMixedArgs) { + SetupTest(R"( + View Choose(bool cond, View a [[clang::lifetimebound]], View b); + void target() { + MyObj o1, o2; + View v1 = o1; + View v2 = o2; + POINT(p1); + + View v3 = Choose(true, v1, v2); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"o1"}, "p1")); + EXPECT_THAT(Origin("v2"), HasLoansTo({"o2"}, "p1")); + // v3 should only have loans from v1, as v2 is not lifetimebound. + EXPECT_THAT(Origin("v3"), HasLoansTo({"o1"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundChainOfViews) { + SetupTest(R"( + View Identity(View v [[clang::lifetimebound]]); + View DoubleIdentity(View v [[clang::lifetimebound]]); + + void target() { + MyObj obj; + View v1 = obj; + POINT(p1); + View v2 = DoubleIdentity(Identity(v1)); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"obj"}, "p1")); + // v2 should inherit the loan from v1 through the chain of calls. + EXPECT_THAT(Origin("v2"), HasLoansTo({"obj"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundRawPointerParameter) { + SetupTest(R"( + View ViewFromPtr(const MyObj* p [[clang::lifetimebound]]); + MyObj* PtrFromPtr(const MyObj* p [[clang::lifetimebound]]); + MyObj* PtrFromView(View v [[clang::lifetimebound]]); + + void target() { + MyObj a; + View v = ViewFromPtr(&a); + POINT(p1); + + MyObj b; + MyObj* ptr1 = PtrFromPtr(&b); + MyObj* ptr2 = PtrFromPtr(PtrFromPtr(PtrFromPtr(ptr1))); + POINT(p2); + + MyObj c; + View v2 = ViewFromPtr(PtrFromView(c)); + POINT(p3); + } + )"); + EXPECT_THAT(Origin("v"), HasLoansTo({"a"}, "p1")); + EXPECT_THAT(Origin("ptr1"), HasLoansTo({"b"}, "p2")); + EXPECT_THAT(Origin("ptr2"), HasLoansTo({"b"}, "p2")); + EXPECT_THAT(Origin("v2"), HasLoansTo({"c"}, "p3")); +} + +// FIXME: This can be controversial and may be revisited in the future. +TEST_F(LifetimeAnalysisTest, LifetimeboundConstRefViewParameter) { + SetupTest(R"( + View Identity(const View& v [[clang::lifetimebound]]); + void target() { + MyObj o; + View v1 = o; + View v2 = Identity(v1); + POINT(p1); + } + )"); + EXPECT_THAT(Origin("v2"), HasLoansTo({"o"}, "p1")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundConstRefObjParam) { + SetupTest(R"( + View Identity(const MyObj& o [[clang::lifetimebound]]); + void target() { + MyObj a; + View v1 = Identity(a); + POINT(p1); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"a"}, "p1")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundReturnReference) { + SetupTest(R"( + const MyObj& Identity(View v [[clang::lifetimebound]]); + void target() { + MyObj a; + View v1 = a; + POINT(p1); + + View v2 = Identity(v1); + + const MyObj& b = Identity(v1); + View v3 = Identity(b); + POINT(p2); + + MyObj c; + View v4 = Identity(c); + POINT(p3); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"a"}, "p1")); + EXPECT_THAT(Origin("v2"), HasLoansTo({"a"}, "p2")); + + // FIXME: Handle reference types. 'v3' should have loan to 'a' instead of 'b'. + EXPECT_THAT(Origin("v3"), HasLoansTo({"b"}, "p2")); + + EXPECT_THAT(Origin("v4"), HasLoansTo({"c"}, "p3")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundTemplateFunction) { + SetupTest(R"( + template + const T& Identity(T&& v [[clang::lifetimebound]]); + void target() { + MyObj a; + View v1 = Identity(a); + POINT(p1); + + View v2 = Identity(v1); + const View& v3 = Identity(v1); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"a"}, "p1")); + EXPECT_THAT(Origin("v2"), HasLoansTo({"a"}, "p2")); + EXPECT_THAT(Origin("v3"), HasLoansTo({"a"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundTemplateClass) { + SetupTest(R"( + template + struct [[gsl::Pointer()]] MyTemplateView { + MyTemplateView(const T& o) {} + MyTemplateView pass() [[clang::lifetimebound]] { return *this; } + }; + void target() { + MyObj o; + MyTemplateView v1 = o; + POINT(p1); + MyTemplateView v2 = v1.pass(); + POINT(p2); + } + )"); + EXPECT_THAT(Origin("v1"), HasLoansTo({"o"}, "p1")); + EXPECT_THAT(Origin("v2"), HasLoansTo({"o"}, "p2")); +} + +TEST_F(LifetimeAnalysisTest, LifetimeboundConversionOperator) { + SetupTest(R"( + struct MyOwner { + MyObj o; + operator View() const [[clang::lifetimebound]]; + }; + + void target() { + MyOwner owner; + View v = owner; + POINT(p1); + } + )"); + EXPECT_THAT(Origin("v"), HasLoansTo({"owner"}, "p1")); +} } // anonymous namespace } // namespace clang::lifetimes::internal diff --git a/clang/unittests/CodeGen/TestCompiler.h b/clang/unittests/CodeGen/TestCompiler.h index 57b5b079a2e30..9bd90609fcd29 100644 --- a/clang/unittests/CodeGen/TestCompiler.h +++ b/clang/unittests/CodeGen/TestCompiler.h @@ -52,7 +52,7 @@ struct TestCompiler { PtrSize = TInfo.getPointerWidth(clang::LangAS::Default) / 8; compiler.createFileManager(); - compiler.createSourceManager(compiler.getFileManager()); + compiler.createSourceManager(); compiler.createPreprocessor(clang::TU_Prefix); compiler.createASTContext(); diff --git a/clang/unittests/Format/ConfigParseTest.cpp b/clang/unittests/Format/ConfigParseTest.cpp index bb4d38bb741ec..6111e86ff7076 100644 --- a/clang/unittests/Format/ConfigParseTest.cpp +++ b/clang/unittests/Format/ConfigParseTest.cpp @@ -1284,7 +1284,7 @@ TEST(ConfigParseTest, AllowCppForC) { ParseError::Success); } -TEST(ConfigParseTest, HandleNonCppDotHFile) { +TEST(ConfigParseTest, HandleDotHFile) { FormatStyle Style = {}; Style.Language = FormatStyle::LK_Cpp; EXPECT_EQ(parseConfiguration("Language: C", &Style, @@ -1295,11 +1295,14 @@ TEST(ConfigParseTest, HandleNonCppDotHFile) { Style = {}; Style.Language = FormatStyle::LK_Cpp; - EXPECT_EQ(parseConfiguration("Language: ObjC", &Style, + EXPECT_EQ(parseConfiguration("Language: Cpp\n" + "...\n" + "Language: C", + &Style, /*AllowUnknownOptions=*/false, /*IsDotHFile=*/true), ParseError::Success); - EXPECT_EQ(Style.Language, FormatStyle::LK_ObjC); + EXPECT_EQ(Style.Language, FormatStyle::LK_Cpp); } TEST(ConfigParseTest, UsesLanguageForBasedOnStyle) { diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp index 7d550143be5df..fef70365b5e18 100644 --- a/clang/unittests/Format/FormatTest.cpp +++ b/clang/unittests/Format/FormatTest.cpp @@ -1364,6 +1364,27 @@ TEST_F(FormatTest, FormatIfWithoutCompoundStatementButElseWith) { AllowsMergedIf); } +TEST_F(FormatTest, WrapMultipleStatementIfAndElseBraces) { + auto Style = getLLVMStyle(); + Style.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Always; + Style.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_AllIfsAndElse; + Style.BreakBeforeBraces = FormatStyle::BS_Custom; + Style.BraceWrapping.AfterControlStatement = FormatStyle::BWACS_Always; + Style.BraceWrapping.BeforeElse = true; + + verifyFormat("if (x)\n" + "{\n" + " ++x;\n" + " --y;\n" + "}\n" + "else\n" + "{\n" + " --x;\n" + " ++y;\n" + "}", + Style); +} + TEST_F(FormatTest, FormatLoopsWithoutCompoundStatement) { verifyFormat("while (true)\n" " ;"); @@ -20312,6 +20333,11 @@ TEST_F(FormatTest, AlignConsecutiveDeclarations) { " return 2;\n" "} };", BracedAlign); + verifyFormat("const volatile auto result{ []() {\n" + " const auto something = 1;\n" + " return 2;\n" + "} };", + BracedAlign); verifyFormat("int foo{ []() {\n" " int bar{ 0 };\n" " return 0;\n" diff --git a/clang/unittests/Format/QualifierFixerTest.cpp b/clang/unittests/Format/QualifierFixerTest.cpp index f42f2e307f713..58e64ff368946 100644 --- a/clang/unittests/Format/QualifierFixerTest.cpp +++ b/clang/unittests/Format/QualifierFixerTest.cpp @@ -1195,6 +1195,41 @@ TEST_F(QualifierFixerTest, QualifiersBrokenUpByPPDirectives) { Style); } +TEST_F(QualifierFixerTest, QualifierOrderingAfterPreprocessorDirectives) { + auto Style = getLLVMStyle(); + Style.QualifierAlignment = FormatStyle::QAS_Custom; + Style.QualifierOrder = {"static", "inline", "const", "type"}; + + verifyFormat("#if 1\n" + "void foo(const int par);\n" + "const int var1;\n" + "#endif\n" + "\n" + "const int var2;\n" + "const int var3;", + "#if 1\n" + "void foo(int const par);\n" + "int const var1;\n" + "#endif\n" + "\n" + "int const var2;\n" + "int const var3;", + Style); + verifyFormat("#if defined(FOO)\n" + "static const int x = 1;\n" + "#else\n" + "static const int x = 2;\n" + "#endif\n" + "static const int y = 3;", + "#if defined(FOO)\n" + "const static int x = 1;\n" + "#else\n" + "const static int x = 2;\n" + "#endif\n" + "const static int y = 3;", + Style); +} + TEST_F(QualifierFixerTest, UnsignedQualifier) { FormatStyle Style = getLLVMStyle(); diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp index 4c43a963632a6..4a8f27f656f1d 100644 --- a/clang/unittests/Format/TokenAnnotatorTest.cpp +++ b/clang/unittests/Format/TokenAnnotatorTest.cpp @@ -2237,6 +2237,12 @@ TEST_F(TokenAnnotatorTest, UnderstandsLambdas) { ASSERT_EQ(Tokens.size(), 21u) << Tokens; EXPECT_TOKEN(Tokens[11], tok::l_square, TT_LambdaLSquare); EXPECT_TOKEN(Tokens[13], tok::l_brace, TT_LambdaLBrace); + + Tokens = annotate("SomeFunction({[]() -> int *[] { return {}; }});"); + ASSERT_EQ(Tokens.size(), 22u) << Tokens; + EXPECT_TOKEN(Tokens[3], tok::l_square, TT_LambdaLSquare); + EXPECT_TOKEN(Tokens[5], tok::l_paren, TT_LambdaDefinitionLParen); + EXPECT_TOKEN(Tokens[10], tok::l_square, TT_ArraySubscriptLSquare); } TEST_F(TokenAnnotatorTest, UnderstandsFunctionAnnotations) { @@ -4159,7 +4165,15 @@ TEST_F(TokenAnnotatorTest, LineCommentTrailingBackslash) { EXPECT_TOKEN(Tokens[1], tok::comment, TT_LineComment); } -TEST_F(TokenAnnotatorTest, KeywordedFunctionLikeMacro) { +TEST_F(TokenAnnotatorTest, ArrowAfterSubscript) { + auto Tokens = + annotate("return (getStructType()->getElements())[eIdx]->getName();"); + ASSERT_EQ(Tokens.size(), 19u) << Tokens; + // Not TT_LambdaArrow. + EXPECT_TOKEN(Tokens[13], tok::arrow, TT_Unknown); +} + +TEST_F(TokenAnnotatorTest, QtProperty) { auto Style = getLLVMStyle(); Style.AllowBreakBeforeQtProperty = true; diff --git a/clang/unittests/Frontend/CompilerInstanceTest.cpp b/clang/unittests/Frontend/CompilerInstanceTest.cpp index 36cac5a5dd010..cd3fefa1ea994 100644 --- a/clang/unittests/Frontend/CompilerInstanceTest.cpp +++ b/clang/unittests/Frontend/CompilerInstanceTest.cpp @@ -33,7 +33,7 @@ TEST(CompilerInstance, DefaultVFSOverlayFromInvocation) { SmallString<256> CurrentPath; sys::fs::current_path(CurrentPath); - sys::fs::make_absolute(CurrentPath, FileName); + sys::path::make_absolute(CurrentPath, FileName); // Mount the VFS file itself on the path 'virtual.file'. Makes this test // a bit shorter than creating a new dummy file just for this purpose. diff --git a/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp b/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp index 01f8d4f97b092..82f19686167da 100644 --- a/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp +++ b/clang/unittests/Lex/LexHLSLRootSignatureTest.cpp @@ -226,6 +226,9 @@ TEST_F(LexHLSLRootSignatureTest, ValidLexAllTokensTest) { STATIC_BORDER_COLOR_OPAQUE_WHITE STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT STATIC_BORDER_COLOR_OPAQUE_WHITE_UINT + + UINT_BORDER_COLOR + NON_NORMALIZED_COORDINATES )cc"; hlsl::RootSignatureLexer Lexer(Source); diff --git a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp index 9b9f5dd8a63bb..f7e9d2d32c3f4 100644 --- a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp +++ b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp @@ -263,7 +263,8 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseStaticSamplerTest) { filter = FILTER_MAXIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT, maxLOD = 9000, addressU = TEXTURE_ADDRESS_MIRROR, comparisonFunc = COMPARISON_NOT_EQUAL, - borderColor = STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT + borderColor = STATIC_BORDER_COLOR_OPAQUE_BLACK_UINT, + flags = 0 ) )cc"; @@ -336,6 +337,37 @@ TEST_F(ParseHLSLRootSignatureTest, ValidParseStaticSamplerTest) { ASSERT_TRUE(Consumer->isSatisfied()); } +TEST_F(ParseHLSLRootSignatureTest, ValidStaticSamplerFlagsTest) { + const llvm::StringLiteral Source = R"cc( + StaticSampler(s0, flags = UINT_BORDER_COLOR | NON_NORMALIZED_COORDINATES) + )cc"; + + auto Ctx = createMinimalASTContext(); + StringLiteral *Signature = wrapSource(Ctx, Source); + + TrivialModuleLoader ModLoader; + auto PP = createPP(Source, ModLoader); + + hlsl::RootSignatureParser Parser(RootSignatureVersion::V1_1, Signature, *PP); + + // Test no diagnostics produced + Consumer->setNoDiag(); + + ASSERT_FALSE(Parser.parse()); + + auto Elements = Parser.getElements(); + ASSERT_EQ(Elements.size(), 1u); + + RootElement Elem = Elements[0].getElement(); + ASSERT_TRUE(std::holds_alternative(Elem)); + auto ValidStaticSamplerFlags = + llvm::dxbc::StaticSamplerFlags::NonNormalizedCoordinates | + llvm::dxbc::StaticSamplerFlags::UintBorderColor; + ASSERT_EQ(std::get(Elem).Flags, ValidStaticSamplerFlags); + + ASSERT_TRUE(Consumer->isSatisfied()); +} + TEST_F(ParseHLSLRootSignatureTest, ValidParseFloatsTest) { const llvm::StringLiteral Source = R"cc( StaticSampler(s0, mipLODBias = 0), diff --git a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp index 24e2fd65f3c0a..edf33ae04230b 100644 --- a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp +++ b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp @@ -122,8 +122,8 @@ export int aa = 43; Clang.setDiagnostics(Diags); Clang.createVirtualFileSystem(CIOpts.VFS); - FileManager *FM = Clang.createFileManager(); - Clang.createSourceManager(*FM); + Clang.createFileManager(); + Clang.createSourceManager(); EXPECT_TRUE(Clang.createTarget()); Clang.createPreprocessor(TU_Complete); diff --git a/clang/unittests/StaticAnalyzer/CallEventTest.cpp b/clang/unittests/StaticAnalyzer/CallEventTest.cpp index 8b5289ea7472b..f42689218bb1a 100644 --- a/clang/unittests/StaticAnalyzer/CallEventTest.cpp +++ b/clang/unittests/StaticAnalyzer/CallEventTest.cpp @@ -84,6 +84,47 @@ TEST(CXXDeallocatorCall, SimpleDestructor) { #endif } +TEST(PrivateMethodCache, NeverReturnDanglingPointersWithMultipleASTs) { + // Each iteration will load and unload an AST multiple times. Since the code + // is always the same, we increase the chance of hitting a bug in the private + // method cache, returning a dangling pointer and crashing the process. If the + // cache is properly cleared between runs, the test should pass. + for (int I = 0; I < 100; ++I) { + auto const *Code = R"( + typedef __typeof(sizeof(int)) size_t; + + extern void *malloc(size_t size); + extern void *memcpy(void *dest, const void *src, size_t n); + + @interface SomeMoreData { + char const* _buffer; + int _size; + } + @property(nonatomic, readonly) const char* buffer; + @property(nonatomic) int size; + + - (void)appendData:(SomeMoreData*)other; + + @end + + @implementation SomeMoreData + @synthesize size = _size; + @synthesize buffer = _buffer; + + - (void)appendData:(SomeMoreData*)other { + int const len = (_size + other.size); // implicit self._length + char* d = malloc(sizeof(char) * len); + memcpy(d + 20, other.buffer, len); + } + + @end + )"; + std::string Diags; + EXPECT_TRUE(runCheckerOnCodeWithArgs( + Code, {"-x", "objective-c", "-Wno-objc-root-class"}, Diags)); + } +} + } // namespace } // namespace ento } // namespace clang diff --git a/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp b/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp index 80289efd374cf..aa32bb3d39f6d 100644 --- a/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp +++ b/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp @@ -65,7 +65,7 @@ class TestDependencyScanningAction : public tooling::ToolAction { if (!Compiler.hasDiagnostics()) return false; - Compiler.createSourceManager(*FileMgr); + Compiler.createSourceManager(); Compiler.addDependencyCollector(std::make_shared( Compiler.getInvocation().getDependencyOutputOpts(), Deps)); diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index f73b0aecc3b6d..74f29ac43ac40 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -133,28 +133,20 @@ static BasicType ParseBasicType(char c) { switch (c) { case 'c': return BasicType::Int8; - break; case 's': return BasicType::Int16; - break; case 'i': return BasicType::Int32; - break; case 'l': return BasicType::Int64; - break; case 'x': return BasicType::Float16; - break; case 'f': return BasicType::Float32; - break; case 'd': return BasicType::Float64; - break; case 'y': return BasicType::BFloat16; - break; default: return BasicType::Unknown; } diff --git a/clang/utils/perf-training/CMakeLists.txt b/clang/utils/perf-training/CMakeLists.txt index 1d7bb788a15ed..2cd4c4c29c2bb 100644 --- a/clang/utils/perf-training/CMakeLists.txt +++ b/clang/utils/perf-training/CMakeLists.txt @@ -6,6 +6,10 @@ set(CLANG_PGO_TRAINING_DATA "${CMAKE_CURRENT_SOURCE_DIR}" CACHE PATH set(CLANG_PGO_TRAINING_DATA_SOURCE_DIR OFF CACHE STRING "Path to source directory containing cmake project with source files to use for generating pgo data") set(CLANG_PGO_TRAINING_DEPS "" CACHE STRING "Extra dependencies needed to build the PGO training data.") +add_custom_target(clear-perf-data + COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} perf.data + COMMENT "Clearing old perf data") + option(CLANG_PGO_TRAINING_USE_LLVM_BUILD "Use LLVM build for generating PGO data" ON) llvm_canonicalize_cmake_booleans( @@ -21,7 +25,7 @@ if(LLVM_BUILD_INSTRUMENTED) add_lit_testsuite(generate-profraw "Generating clang PGO data" ${CMAKE_CURRENT_BINARY_DIR}/pgo-data/ EXCLUDE_FROM_CHECK_ALL - DEPENDS clear-profraw + DEPENDS clear-profraw clang ) add_custom_target(clear-profraw @@ -55,6 +59,32 @@ if(LLVM_BUILD_INSTRUMENTED) USE_TOOLCHAIN EXLUDE_FROM_ALL NO_INSTALL DEPENDS generate-profraw) add_dependencies(generate-profdata generate-profraw-external) endif() + + if(NOT LLVM_PROFGEN) + find_program(LLVM_PROFGEN llvm-profgen) + endif() + + if(NOT LLVM_PROFGEN) + message(STATUS "To enable converting CSSPGO samples LLVM_PROFGEN has to point to llvm-profgen") + elseif(NOT CLANG_PGO_TRAINING_DATA_SOURCE_DIR) + message(STATUS "CLANG_PGO_TRAINING_DATA_SOURCE_DIR must be set to collect CSSPGO samples") + else() + set(PERF_HELPER "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py) + set(CLANG_SPROFDATA ${CMAKE_CURRENT_BINARY_DIR}/clang.sprofdata) + add_custom_command( + OUTPUT ${CLANG_SPROFDATA} + # Execute generate-profraw-external under perf + COMMAND ${PERF_HELPER} perf --csspgo -- ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target generate-profraw-external + # Convert perf profile into profraw + COMMAND ${PERF_HELPER} perf2prof ${LLVM_PROFGEN} $ ${CMAKE_CURRENT_BINARY_DIR} + # Merge profdata + COMMAND ${PERF_HELPER} merge --sample ${LLVM_PROFDATA} ${CLANG_SPROFDATA} ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS clang ${CLANG_PGO_TRAINING_DEPS} clear-perf-data generate-profraw-external-clean + VERBATIM + USES_TERMINAL + ) + add_custom_target(generate-sprofdata DEPENDS ${CLANG_SPROFDATA}) + endif() endif() endif() @@ -104,8 +134,4 @@ if(CLANG_BOLT AND NOT LLVM_BUILD_INSTRUMENTED) COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} fdata COMMENT "Clearing old BOLT fdata") - add_custom_target(clear-perf-data - COMMAND "${Python3_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/perf-helper.py clean ${CMAKE_CURRENT_BINARY_DIR} perf.data - COMMENT "Clearing old perf data") - endif() diff --git a/clang/utils/perf-training/perf-helper.py b/clang/utils/perf-training/perf-helper.py index ab4491d2a6b6d..1c7904ec62163 100644 --- a/clang/utils/perf-training/perf-helper.py +++ b/clang/utils/perf-training/perf-helper.py @@ -45,14 +45,22 @@ def clean(args): def merge(args): - if len(args) < 3: - print( - "Usage: %s merge \n" % __file__ - + "\tMerges all profraw files from path into output." - ) - return 1 - cmd = [args[0], "merge", "-o", args[1]] - for path in args[2:]: + parser = argparse.ArgumentParser( + prog="perf-helper merge", + description="Merges all profraw files from path(s) into output", + ) + parser.add_argument("profdata", help="Path to llvm-profdata tool") + parser.add_argument("output", help="Output filename") + parser.add_argument( + "paths", nargs="+", help="Folder(s) containing input profraw files" + ) + parser.add_argument("--sample", action="store_true", help="Sample profile") + opts = parser.parse_args(args) + + cmd = [opts.profdata, "merge", "-o", opts.output] + if opts.sample: + cmd += ["--sample"] + for path in opts.paths: cmd.extend(findFilesWithExtension(path, "profraw")) subprocess.check_call(cmd) return 0 @@ -73,25 +81,30 @@ def merge_fdata(args): def perf(args): parser = argparse.ArgumentParser( - prog="perf-helper perf", description="perf wrapper for BOLT profile collection" + prog="perf-helper perf", + description="perf wrapper for BOLT/CSSPGO profile collection", ) parser.add_argument( "--lbr", action="store_true", help="Use perf with branch stacks" ) + parser.add_argument("--csspgo", action="store_true", help="Enable CSSPGO flags") parser.add_argument("cmd", nargs=argparse.REMAINDER, help="") opts = parser.parse_args(args) cmd = opts.cmd[1:] + event = "br_inst_retired.near_taken:uppp" if opts.csspgo else "cycles:u" perf_args = [ "perf", "record", - "--event=cycles:u", + f"--event={event}", "--freq=max", "--output=%d.perf.data" % os.getpid(), ] - if opts.lbr: + if opts.lbr or opts.csspgo: perf_args += ["--branch-filter=any,u"] + if opts.csspgo: + perf_args += ["-g", "--call-graph=fp"] perf_args.extend(cmd) start_time = time.time() @@ -127,6 +140,30 @@ def perf2bolt(args): return 0 +def perf2prof(args): + parser = argparse.ArgumentParser( + prog="perf-helper perf2prof", + description="perf to CSSPGO prof conversion wrapper", + ) + parser.add_argument("profgen", help="Path to llvm-profgen binary") + parser.add_argument("binary", help="Input binary") + parser.add_argument("paths", nargs="+", help="Path containing perf.data files") + opts = parser.parse_args(args) + + profgen_args = [opts.profgen, f"--binary={opts.binary}"] + for path in opts.paths: + for filename in findFilesWithExtension(path, "perf.data"): + subprocess.run( + [ + *profgen_args, + f"--perfdata={filename}", + f"--output={filename}.profraw", + ], + check=True, + ) + return 0 + + def dtrace(args): parser = argparse.ArgumentParser( prog="perf-helper dtrace", @@ -660,7 +697,10 @@ def bolt_optimize(args): process.check_returncode() if opts.method in ["PERF", "LBR"]: - perf2bolt([opts.bolt, opts.perf_training_binary_dir, opts.input]) + args = [opts.bolt, opts.perf_training_binary_dir, opts.input] + if opts.method == "LBR": + args.extend("--lbr") + perf2bolt(args) merge_fdata([opts.merge_fdata, opts.fdata, opts.perf_training_binary_dir]) @@ -707,6 +747,7 @@ def bolt_optimize(args): "merge-fdata": merge_fdata, "perf": perf, "perf2bolt": perf2bolt, + "perf2prof": perf2prof, } diff --git a/compiler-rt/include/xray/xray_interface.h b/compiler-rt/include/xray/xray_interface.h index 675ea0cbc48c8..3ef8ee348540f 100644 --- a/compiler-rt/include/xray/xray_interface.h +++ b/compiler-rt/include/xray/xray_interface.h @@ -1,4 +1,4 @@ -//===- xray_interface.h -----------------------------------------*- C++ -*-===// +//===- xray_interface.h ---------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -14,10 +14,17 @@ #ifndef XRAY_XRAY_INTERFACE_H #define XRAY_XRAY_INTERFACE_H +#ifdef __cplusplus #include #include +#else +#include +#include +#endif +#ifdef __cplusplus extern "C" { +#endif /// Synchronize this with AsmPrinter::SledKind in LLVM. enum XRayEntryType { @@ -49,7 +56,7 @@ enum XRayEntryType { /// achieved by marking them all with: __attribute__((xray_never_instrument)) /// /// Returns 1 on success, 0 on error. -extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType)); +extern int __xray_set_handler(void (*entry)(int32_t, enum XRayEntryType)); /// This removes whatever the currently provided handler is. Returns 1 on /// success, 0 on error. @@ -60,7 +67,7 @@ extern int __xray_remove_handler(); /// start logging their subsequent affected function calls (if patched). /// /// Returns 1 on success, 0 on error. -extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, +extern int __xray_set_handler_arg1(void (*entry)(int32_t, enum XRayEntryType, uint64_t)); /// Disables the XRay handler used to log first arguments of function calls. @@ -68,7 +75,7 @@ extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, extern int __xray_remove_handler_arg1(); /// Provide a function to invoke when XRay encounters a custom event. -extern int __xray_set_customevent_handler(void (*entry)(void *, std::size_t)); +extern int __xray_set_customevent_handler(void (*entry)(void *, size_t)); /// This removes whatever the currently provided custom event handler is. /// Returns 1 on success, 0 on error. @@ -95,39 +102,39 @@ enum XRayPatchingStatus { /// This tells XRay to patch the instrumentation points in all currently loaded /// objects. See XRayPatchingStatus for possible result values. -extern XRayPatchingStatus __xray_patch(); +extern enum XRayPatchingStatus __xray_patch(); /// This tells XRay to patch the instrumentation points in the given object. /// See XRayPatchingStatus for possible result values. -extern XRayPatchingStatus __xray_patch_object(int32_t ObjId); +extern enum XRayPatchingStatus __xray_patch_object(int32_t ObjId); /// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible /// result values. -extern XRayPatchingStatus __xray_unpatch(); +extern enum XRayPatchingStatus __xray_unpatch(); /// Reverses the effect of __xray_patch_object. See XRayPatchingStatus for /// possible result values. -extern XRayPatchingStatus __xray_unpatch_object(int32_t ObjId); +extern enum XRayPatchingStatus __xray_unpatch_object(int32_t ObjId); /// This unpacks the given (packed) function id and patches /// the corresponding function. See XRayPatchingStatus for possible /// result values. -extern XRayPatchingStatus __xray_patch_function(int32_t FuncId); +extern enum XRayPatchingStatus __xray_patch_function(int32_t FuncId); /// This patches a specific function in the given object. See XRayPatchingStatus /// for possible result values. -extern XRayPatchingStatus __xray_patch_function_in_object(int32_t FuncId, - int32_t ObjId); +extern enum XRayPatchingStatus __xray_patch_function_in_object(int32_t FuncId, + int32_t ObjId); /// This unpacks the given (packed) function id and unpatches /// the corresponding function. See XRayPatchingStatus for possible /// result values. -extern XRayPatchingStatus __xray_unpatch_function(int32_t FuncId); +extern enum XRayPatchingStatus __xray_unpatch_function(int32_t FuncId); /// This unpatches a specific function in the given object. /// See XRayPatchingStatus for possible result values. -extern XRayPatchingStatus __xray_unpatch_function_in_object(int32_t FuncId, - int32_t ObjId); +extern enum XRayPatchingStatus __xray_unpatch_function_in_object(int32_t FuncId, + int32_t ObjId); /// This function unpacks the given (packed) function id and returns the address /// of the corresponding function. We return 0 if we encounter any error, even @@ -173,6 +180,8 @@ extern int32_t __xray_pack_id(int32_t FuncId, int32_t ObjId); /// Calling __xray_init() more than once is safe across multiple threads. extern void __xray_init(); +#ifdef __cplusplus } // end extern "C" +#endif #endif // XRAY_XRAY_INTERFACE_H diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp index 7c9a08b9083a2..0f613f0fdc30b 100644 --- a/compiler-rt/lib/asan/asan_interceptors.cpp +++ b/compiler-rt/lib/asan/asan_interceptors.cpp @@ -58,13 +58,20 @@ namespace __asan { static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { #if SANITIZER_INTERCEPT_STRNLEN - if (REAL(strnlen)) { + if (REAL(strnlen)) return REAL(strnlen)(s, maxlen); - } -#endif +# endif return internal_strnlen(s, maxlen); } +static inline uptr MaybeRealWcsnlen(const wchar_t* s, uptr maxlen) { +# if SANITIZER_INTERCEPT_WCSNLEN + if (REAL(wcsnlen)) + return REAL(wcsnlen)(s, maxlen); +# endif + return internal_wcsnlen(s, maxlen); +} + void SetThreadName(const char *name) { AsanThread *t = GetCurrentThread(); if (t) @@ -570,6 +577,20 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) { return REAL(strcpy)(to, from); } +INTERCEPTOR(wchar_t*, wcscpy, wchar_t* to, const wchar_t* from) { + void* ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcscpy); + if (!TryAsanInitFromRtl()) + return REAL(wcscpy)(to, from); + if (flags()->replace_str) { + uptr size = (internal_wcslen(from) + 1) * sizeof(wchar_t); + CHECK_RANGES_OVERLAP("wcscpy", to, size, from, size); + ASAN_READ_RANGE(ctx, from, size); + ASAN_WRITE_RANGE(ctx, to, size); + } + return REAL(wcscpy)(to, from); +} + // Windows doesn't always define the strdup identifier, // and when it does it's a macro defined to either _strdup // or _strdup_dbg, _strdup_dbg ends up calling _strdup, so @@ -633,6 +654,20 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, usize size) { return REAL(strncpy)(to, from, size); } +INTERCEPTOR(wchar_t*, wcsncpy, wchar_t* to, const wchar_t* from, uptr size) { + void* ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcsncpy); + AsanInitFromRtl(); + if (flags()->replace_str) { + uptr from_size = + Min(size, MaybeRealWcsnlen(from, size) + 1) * sizeof(wchar_t); + CHECK_RANGES_OVERLAP("wcsncpy", to, from_size, from, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size * sizeof(wchar_t)); + } + return REAL(wcsncpy)(to, from, size); +} + template static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr, char **endptr, int base) @@ -809,6 +844,11 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(strncat); ASAN_INTERCEPT_FUNC(strncpy); ASAN_INTERCEPT_FUNC(strdup); + + // Intercept wcs* functions. + ASAN_INTERCEPT_FUNC(wcscpy); + ASAN_INTERCEPT_FUNC(wcsncpy); + # if ASAN_INTERCEPT___STRDUP ASAN_INTERCEPT_FUNC(__strdup); #endif diff --git a/compiler-rt/lib/asan/asan_interceptors.h b/compiler-rt/lib/asan/asan_interceptors.h index 3e2386eaf8092..2d551cfafd1f5 100644 --- a/compiler-rt/lib/asan/asan_interceptors.h +++ b/compiler-rt/lib/asan/asan_interceptors.h @@ -129,6 +129,7 @@ DECLARE_REAL(char*, strchr, const char *str, int c) DECLARE_REAL(SIZE_T, strlen, const char *s) DECLARE_REAL(char*, strncpy, char *to, const char *from, SIZE_T size) DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen) +DECLARE_REAL(SIZE_T, wcsnlen, const wchar_t* s, SIZE_T maxlen) DECLARE_REAL(char*, strstr, const char *s1, const char *s2) # if !SANITIZER_APPLE diff --git a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp index bdf328f892063..f52ae9ae8d17c 100644 --- a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp +++ b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp @@ -55,8 +55,10 @@ using namespace __asan; if (LIKELY(replace_intrin_cached)) { \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ + } else if (UNLIKELY(!AsanInited())) { \ + return internal_memmove(to, from, size); \ } \ - return internal_memmove(to, from, size); \ + return REAL(memmove)(to, from, size); \ } while (0) void *__asan_memcpy(void *to, const void *from, uptr size) { diff --git a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h index 14727a5d665ed..ec988cff51c59 100644 --- a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h +++ b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h @@ -20,6 +20,7 @@ DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size) DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size) +DECLARE_REAL(void *, memmove, void *to, const void *from, SIZE_T size) namespace __asan { diff --git a/compiler-rt/lib/asan/asan_win_static_runtime_thunk.cpp b/compiler-rt/lib/asan/asan_win_static_runtime_thunk.cpp index 4a69b66574039..46e0e90738f24 100644 --- a/compiler-rt/lib/asan/asan_win_static_runtime_thunk.cpp +++ b/compiler-rt/lib/asan/asan_win_static_runtime_thunk.cpp @@ -63,6 +63,10 @@ INTERCEPT_LIBRARY_FUNCTION_ASAN(strpbrk); INTERCEPT_LIBRARY_FUNCTION_ASAN(strspn); INTERCEPT_LIBRARY_FUNCTION_ASAN(strstr); INTERCEPT_LIBRARY_FUNCTION_ASAN(strtok); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcscat); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcscpy); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsncat); +INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsncpy); INTERCEPT_LIBRARY_FUNCTION_ASAN(wcslen); INTERCEPT_LIBRARY_FUNCTION_ASAN(wcsnlen); diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt index 0d7fc65cfd3e9..9095b056ae782 100644 --- a/compiler-rt/lib/builtins/CMakeLists.txt +++ b/compiler-rt/lib/builtins/CMakeLists.txt @@ -816,14 +816,15 @@ set(s390x_SOURCES ${GENERIC_TF_SOURCES} ) -set(wasm32_SOURCES - ${GENERIC_TF_SOURCES} - ${GENERIC_SOURCES} -) -set(wasm64_SOURCES + +set(wasm_SOURCES + wasm/__c_longjmp.S + wasm/__cpp_exceptions.S ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES} ) +set(wasm32_SOURCES ${wasm_SOURCES}) +set(wasm64_SOURCES ${wasm_SOURCES}) set(ve_SOURCES ve/grow_stack.S diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64.c b/compiler-rt/lib/builtins/cpu_model/aarch64.c index d7880529ebe70..8af736d0ffe93 100644 --- a/compiler-rt/lib/builtins/cpu_model/aarch64.c +++ b/compiler-rt/lib/builtins/cpu_model/aarch64.c @@ -69,15 +69,15 @@ struct { #if defined(__APPLE__) #include "aarch64/fmv/apple.inc" #elif defined(__FreeBSD__) || defined(__OpenBSD__) -#include "aarch64/fmv/mrs.inc" +#include "aarch64/fmv/hwcap.inc" #include "aarch64/fmv/elf_aux_info.inc" #elif defined(__Fuchsia__) #include "aarch64/fmv/fuchsia.inc" #elif defined(__ANDROID__) -#include "aarch64/fmv/mrs.inc" +#include "aarch64/fmv/hwcap.inc" #include "aarch64/fmv/android.inc" #elif defined(__linux__) && __has_include() -#include "aarch64/fmv/mrs.inc" +#include "aarch64/fmv/hwcap.inc" #include "aarch64/fmv/getauxval.inc" #elif defined(_WIN32) #include "aarch64/fmv/windows.inc" diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc b/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/hwcap.inc similarity index 94% rename from compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc rename to compiler-rt/lib/builtins/cpu_model/aarch64/fmv/hwcap.inc index afe9d4efd6af5..0f56cef97d4a3 100644 --- a/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc +++ b/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/hwcap.inc @@ -7,9 +7,6 @@ static void __init_cpu_features_constructor(unsigned long hwcap, const __ifunc_arg_t *arg) { unsigned long long feat = 0; #define setCPUFeature(F) feat |= 1ULL << F -#define getCPUFeature(id, ftr) __asm__("mrs %0, " #id : "=r"(ftr)) -#define extractBits(val, start, number) \ - (val & ((1ULL << number) - 1ULL) << start) >> start unsigned long hwcap2 = 0; if (hwcap & _IFUNC_ARG_HWCAP) hwcap2 = arg->_hwcap2; diff --git a/compiler-rt/lib/builtins/wasm/__c_longjmp.S b/compiler-rt/lib/builtins/wasm/__c_longjmp.S new file mode 100644 index 0000000000000..d130862fd5c41 --- /dev/null +++ b/compiler-rt/lib/builtins/wasm/__c_longjmp.S @@ -0,0 +1,26 @@ +//===-- __c_longjmp.S - Implement __c_longjmp -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements __c_longjmp which LLVM uses to implenmet setjmp/longjmp +// when Wasm EH is enabled. +// +//===----------------------------------------------------------------------===// + +#ifdef __wasm_exception_handling__ + +#ifdef __wasm64__ +#define PTR i64 +#else +#define PTR i32 +#endif + +.globl __c_longjmp +.tagtype __c_longjmp PTR +__c_longjmp: + +#endif // !__wasm_exception_handling__ diff --git a/compiler-rt/lib/builtins/wasm/__cpp_exception.S b/compiler-rt/lib/builtins/wasm/__cpp_exception.S new file mode 100644 index 0000000000000..0496e1dbf6158 --- /dev/null +++ b/compiler-rt/lib/builtins/wasm/__cpp_exception.S @@ -0,0 +1,26 @@ +//===-- __cpp_exception.S - Implement __cpp_exception ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements __cpp_exception which LLVM uses to implement exception +// handling when Wasm EH is enabled. +// +//===----------------------------------------------------------------------===// + +#ifdef __wasm_exception_handling__ + +#ifdef __wasm64__ +#define PTR i64 +#else +#define PTR i32 +#endif + +.globl __cpp_exception +.tagtype __cpp_exception PTR +__cpp_exception: + +#endif // !__wasm_exception_handling__ diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h index 12a75fea24fba..3ea30630e1827 100644 --- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h +++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h @@ -35,7 +35,7 @@ struct InputInfo { size_t Tmp = 0; // Used by ValidateFeatureSet. // Stats. size_t NumExecutedMutations = 0; - size_t NumSuccessfullMutations = 0; + size_t NumSuccessfulMutations = 0; bool NeverReduce = false; bool MayDeleteFile = false; bool Reduced = false; @@ -328,7 +328,7 @@ class InputCorpus { const auto &II = *Inputs[i]; Printf(" [% 3zd %s] sz: % 5zd runs: % 5zd succ: % 5zd focus: %d\n", i, Sha1ToString(II.Sha1).c_str(), II.U.size(), - II.NumExecutedMutations, II.NumSuccessfullMutations, + II.NumExecutedMutations, II.NumSuccessfulMutations, II.HasFocusFunction); } } diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp index 6b25aa9942d2e..5928d1d96acd1 100644 --- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp @@ -609,7 +609,7 @@ int AnalyzeDictionary(Fuzzer *F, const std::vector &Dict, return 0; } -std::vector ParseSeedInuts(const char *seed_inputs) { +std::vector ParseSeedInputs(const char *seed_inputs) { // Parse -seed_inputs=file1,file2,... or -seed_inputs=@seed_inputs_file std::vector Files; if (!seed_inputs) return Files; @@ -919,7 +919,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { exit(0); } - auto CorporaFiles = ReadCorpora(*Inputs, ParseSeedInuts(Flags.seed_inputs)); + auto CorporaFiles = ReadCorpora(*Inputs, ParseSeedInputs(Flags.seed_inputs)); F->Loop(CorporaFiles); if (Flags.verbosity) diff --git a/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp index 566820ae6d198..cb29af9329bb2 100644 --- a/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp @@ -35,7 +35,7 @@ using namespace fuzzer; #define WIN_SYM_PREFIX #endif -// Declare external functions as having alternativenames, so that we can +// Declare external functions as having alternative names, so that we can // determine if they are not defined. #define EXTERNAL_FUNC(Name, Default) \ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY( \ diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp index 75c2fb71eb070..a93cd16b87934 100644 --- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp @@ -448,9 +448,9 @@ void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) { if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)) && secondsSinceProcessStartUp() >= 2) PrintStats("pulse "); - auto Threshhold = + auto Threshold = static_cast(static_cast(TimeOfLongestUnitInSeconds) * 1.1); - if (TimeOfUnit > Threshhold && TimeOfUnit >= Options.ReportSlowUnits) { + if (TimeOfUnit > Threshold && TimeOfUnit >= Options.ReportSlowUnits) { TimeOfLongestUnitInSeconds = TimeOfUnit; Printf("Slowest unit: %ld s:\n", TimeOfLongestUnitInSeconds); WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-"); @@ -666,7 +666,7 @@ void Fuzzer::PrintStatusForNewUnit(const Unit &U, const char *Text) { } void Fuzzer::ReportNewCoverage(InputInfo *II, const Unit &U) { - II->NumSuccessfullMutations++; + II->NumSuccessfulMutations++; MD.RecordSuccessfulMutationSequence(); PrintStatusForNewUnit(U, II->Reduced ? "REDUCE" : "NEW "); WriteToOutputCorpus(U); diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp index 1abce16d70d94..4587f8616b019 100644 --- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp @@ -101,7 +101,7 @@ size_t MutationDispatcher::Mutate_CustomCrossOver(uint8_t *Data, size_t Size, if (!NewSize) return 0; - assert(NewSize <= MaxSize && "CustomCrossOver returned overisized unit"); + assert(NewSize <= MaxSize && "CustomCrossOver returned oversized unit"); memcpy(Data, U.data(), NewSize); return NewSize; } @@ -413,9 +413,9 @@ size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) { T Add = static_cast(Rand(21)); Add -= 10; if (Rand.RandBool()) - Val = Bswap(T(Bswap(Val) + Add)); // Add assuming different endiannes. + Val = Bswap(T(Bswap(Val) + Add)); // Add assuming different endianness. else - Val = Val + Add; // Add assuming current endiannes. + Val = Val + Add; // Add assuming current endianness. if (Add == 0 || Rand.RandBool()) // Maybe negate. Val = -Val; } @@ -463,7 +463,7 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size, default: assert(0); } assert(NewSize > 0 && "CrossOver returned empty unit"); - assert(NewSize <= MaxSize && "CrossOver returned overisized unit"); + assert(NewSize <= MaxSize && "CrossOver returned oversized unit"); return NewSize; } diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp index 2db2ea98d5c4f..45ee15526b275 100644 --- a/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp @@ -24,7 +24,7 @@ // clang-format off #include // These must be included after windows.h. -// archicture need to be set before including +// architecture need to be set before including // libloaderapi #include #include diff --git a/compiler-rt/lib/msan/msan_linux.cpp b/compiler-rt/lib/msan/msan_linux.cpp index 7140de7e9c543..f08a7c98a4847 100644 --- a/compiler-rt/lib/msan/msan_linux.cpp +++ b/compiler-rt/lib/msan/msan_linux.cpp @@ -190,7 +190,15 @@ bool InitShadowWithReExec(bool init_origins) { "possibly due to high-entropy ASLR.\n" "Re-execing with fixed virtual address space.\n" "N.B. reducing ASLR entropy is preferable.\n"); - CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); + + if (personality(old_personality | ADDR_NO_RANDOMIZE) == -1) { + Printf( + "FATAL: MemorySanitizer: unable to disable ASLR (perhaps " + "sandboxing is enabled?).\n"); + Printf("FATAL: Please rerun without sandboxing and/or ASLR.\n"); + Die(); + } + ReExec(); } # endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc index a96d325d08983..b10ce7fa44afc 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -1326,7 +1326,7 @@ PRCTL_INTERCEPTOR(int, prctl, int option, unsigned long arg2, static const int PR_SET_SECCOMP = 22; static const int SECCOMP_MODE_FILTER = 2; # endif - if (option == PR_SET_VMA && arg2 == 0UL) { + if (option == PR_SET_VMA && arg2 == 0UL && arg5 != 0UL) { char *name = (char *)arg5; COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h index 29987decdff45..88ecd7e16306a 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -551,7 +551,7 @@ SANITIZER_WEAK_IMPORT void *aligned_alloc(__sanitizer::usize __alignment, #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD) #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WCSLEN 1 -#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX +#define SANITIZER_INTERCEPT_WCSCAT (SI_POSIX || SI_WINDOWS) #define SANITIZER_INTERCEPT_WCSDUP SI_POSIX #define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA) #define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index c9ba28a52f780..329ec4596482b 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -101,7 +101,7 @@ class Allocator { Chunk::UnpackedHeader Header = {}; Header.ClassId = QuarantineClassId & Chunk::ClassIdMask; Header.SizeOrUnusedBytes = sizeof(QuarantineBatch); - Header.State = Chunk::State::Allocated; + Header.State = Chunk::State::Quarantined; Chunk::storeHeader(Allocator.Cookie, Ptr, &Header); // Reset tag to 0 as this chunk may have been previously used for a tagged @@ -120,7 +120,7 @@ class Allocator { Chunk::UnpackedHeader Header; Chunk::loadHeader(Allocator.Cookie, Ptr, &Header); - if (UNLIKELY(Header.State != Chunk::State::Allocated)) + if (UNLIKELY(Header.State != Chunk::State::Quarantined)) reportInvalidChunkState(AllocatorAction::Deallocating, Ptr); DCHECK_EQ(Header.ClassId, QuarantineClassId); DCHECK_EQ(Header.Offset, 0); diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h index d08103008ef7c..747b1a2233d32 100644 --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -1565,6 +1565,13 @@ bool SizeClassAllocator64::hasChanceToReleasePages( if (DiffSinceLastReleaseNs < 2 * IntervalNs) return false; } else if (DiffSinceLastReleaseNs < IntervalNs) { + // `TryReleaseThreshold` is capped by (1UL << GroupSizeLog) / 2). If + // RegionPushedBytesDelta grows to twice the threshold, it implies some + // huge deallocations have happened so we better try to release some + // pages. Note this tends to happen for larger block sizes. + if (RegionPushedBytesDelta > (1ULL << GroupSizeLog)) + return true; + // In this case, we are over the threshold but we just did some page // release in the same release interval. This is a hint that we may want // a higher threshold so that we can release more memory at once. diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 1eff9ebcb7a4f..5fdfd1e7c55cc 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc; @@ -150,14 +151,8 @@ void TestAllocator::operator delete(void *ptr) { } template struct ScudoCombinedTest : public Test { - ScudoCombinedTest() { - UseQuarantine = std::is_same::value; - Allocator = std::make_unique(); - } - ~ScudoCombinedTest() { - Allocator->releaseToOS(scudo::ReleaseToOS::Force); - UseQuarantine = true; - } + ScudoCombinedTest() { Allocator = std::make_unique(); } + ~ScudoCombinedTest() { Allocator->releaseToOS(scudo::ReleaseToOS::Force); } void RunTest(); @@ -525,30 +520,25 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) { auto *Allocator = this->Allocator.get(); // Allocates a bunch of chunks, then iterate over all the chunks, ensuring // they are the ones we allocated. This requires the allocator to not have any - // other allocated chunk at this point (eg: won't work with the Quarantine). - // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of - // iterateOverChunks reads header by tagged and non-tagger pointers so one of - // them will fail. - if (!UseQuarantine) { - std::vector V; - for (scudo::uptr I = 0; I < 64U; I++) - V.push_back(Allocator->allocate( - static_cast(std::rand()) % - (TypeParam::Primary::SizeClassMap::MaxSize / 2U), - Origin)); - Allocator->disable(); - Allocator->iterateOverChunks( - 0U, static_cast(SCUDO_MMAP_RANGE_SIZE - 1), - [](uintptr_t Base, UNUSED size_t Size, void *Arg) { - std::vector *V = reinterpret_cast *>(Arg); - void *P = reinterpret_cast(Base); - EXPECT_NE(std::find(V->begin(), V->end(), P), V->end()); - }, - reinterpret_cast(&V)); - Allocator->enable(); - for (auto P : V) - Allocator->deallocate(P, Origin); - } + // other allocated chunk at this point. + std::vector V; + for (scudo::uptr I = 0; I < 64U; I++) + V.push_back(Allocator->allocate( + static_cast(std::rand()) % + (TypeParam::Primary::SizeClassMap::MaxSize / 2U), + Origin)); + Allocator->disable(); + Allocator->iterateOverChunks( + 0U, static_cast(SCUDO_MMAP_RANGE_SIZE - 1), + [](uintptr_t Base, UNUSED size_t Size, void *Arg) { + std::vector *V = reinterpret_cast *>(Arg); + void *P = reinterpret_cast(Base); + EXPECT_NE(std::find(V->begin(), V->end(), P), V->end()); + }, + reinterpret_cast(&V)); + Allocator->enable(); + for (auto P : V) + Allocator->deallocate(P, Origin); } SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) { @@ -1053,7 +1043,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) { // which covers only simple operations and ensure the configuration is able to // compile. TEST(ScudoCombinedTest, BasicTrustyConfig) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); for (scudo::uptr ClassId = 1U; @@ -1117,7 +1107,7 @@ struct TestQuarantineConfig { // Verify that the quarantine exists by default. TEST(ScudoCombinedTest, QuarantineEnabled) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); const scudo::uptr Size = 1000U; @@ -1142,7 +1132,7 @@ struct TestQuarantineDisabledConfig : TestQuarantineConfig { }; TEST(ScudoCombinedTest, QuarantineDisabled) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); const scudo::uptr Size = 1000U; @@ -1161,3 +1151,34 @@ TEST(ScudoCombinedTest, QuarantineDisabled) { // No quarantine stats should not be present. EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos); } + +// Verify that no special quarantine blocks appear in iterateOverChunks. +TEST(ScudoCombinedTest, QuarantineIterateOverChunks) { + using AllocatorT = TestAllocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + // Do a bunch of allocations and deallocations. At the end there should + // be no special quarantine blocks in our callbacks, and no blocks at all. + std::vector Sizes = {128, 128, 256, 256}; + for (auto const Size : Sizes) { + void *Ptr = Allocator->allocate(Size, Origin); + EXPECT_NE(Ptr, nullptr); + Allocator->deallocate(Ptr, Origin); + } + std::unordered_map Pointers; + Allocator->disable(); + Allocator->iterateOverChunks( + 0, static_cast(SCUDO_MMAP_RANGE_SIZE - 1), + [](uintptr_t Base, size_t Size, void *Arg) { + std::unordered_map *Pointers = + reinterpret_cast *>(Arg); + (*Pointers)[Base] = Size; + }, + reinterpret_cast(&Pointers)); + Allocator->enable(); + + for (const auto [Base, Size] : Pointers) { + EXPECT_TRUE(false) << "Unexpected pointer found in iterateOverChunks " + << std::hex << Base << " Size " << std::dec << Size; + } +} diff --git a/compiler-rt/test/asan/TestCases/Posix/coverage-reset.cpp b/compiler-rt/test/asan/TestCases/Posix/coverage-reset.cpp index e3524fced6b4e..48319906a99f1 100644 --- a/compiler-rt/test/asan/TestCases/Posix/coverage-reset.cpp +++ b/compiler-rt/test/asan/TestCases/Posix/coverage-reset.cpp @@ -1,7 +1,7 @@ // RUN: rm -rf %t.dir && mkdir -p %t.dir && cd %t.dir // RUN: %clangxx_asan -fsanitize-coverage=func,trace-pc-guard -DSHARED %s -shared -o %dynamiclib -fPIC %ld_flags_rpath_so -// RUN: %clangxx_asan -fsanitize-coverage=func,trace-pc-guard %s %ld_flags_rpath_exe -o %t.dir/EXE -// RUN: %env_asan_opts=coverage=1:verbosity=1 %run %t.dir/EXE 2>&1 | tee /tmp/test +// RUN: %clangxx_asan -fsanitize-coverage=func,trace-pc-guard %s %ld_flags_rpath_exe -o %t.dir/coverage-reset +// RUN: %env_asan_opts=coverage=1:verbosity=1 %run %t.dir/coverage-reset 2>&1 | FileCheck %s // // UNSUPPORTED: ios @@ -27,7 +27,7 @@ int main(int argc, char **argv) { bar2(); __sanitizer_cov_dump(); // CHECK: RESET -// CHECK-DAG: SanitizerCoverage: ./coverage-reset.cpp{{.*}}.sancov: 2 PCs written +// CHECK-DAG: SanitizerCoverage: ./coverage-reset{{.*}}.sancov: 2 PCs written // CHECK-DAG: SanitizerCoverage: ./libcoverage-reset.cpp{{.*}}.sancov: 2 PCs written fprintf(stderr, "RESET\n"); @@ -36,7 +36,7 @@ int main(int argc, char **argv) { bar1(); __sanitizer_cov_dump(); // CHECK: RESET -// CHECK-DAG: SanitizerCoverage: ./coverage-reset.cpp{{.*}}.sancov: 1 PCs written +// CHECK-DAG: SanitizerCoverage: ./coverage-reset{{.*}}.sancov: 1 PCs written // CHECK-DAG: SanitizerCoverage: ./libcoverage-reset.cpp{{.*}}.sancov: 1 PCs written fprintf(stderr, "RESET\n"); @@ -45,7 +45,7 @@ int main(int argc, char **argv) { foo2(); __sanitizer_cov_dump(); // CHECK: RESET -// CHECK: SanitizerCoverage: ./coverage-reset.cpp{{.*}}.sancov: 2 PCs written +// CHECK: SanitizerCoverage: ./coverage-reset{{.*}}.sancov: 2 PCs written fprintf(stderr, "RESET\n"); __sanitizer_cov_reset(); diff --git a/compiler-rt/test/asan/TestCases/wcscat.cpp b/compiler-rt/test/asan/TestCases/wcscat.cpp new file mode 100644 index 0000000000000..f0a8ec12580b3 --- /dev/null +++ b/compiler-rt/test/asan/TestCases/wcscat.cpp @@ -0,0 +1,26 @@ +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK + +#include +#include + +int main() { + const wchar_t *start = L"X means "; + const wchar_t *append = L"dog"; + wchar_t goodDst[12]; + wcscpy(goodDst, start); + wcscat(goodDst, append); + + wchar_t badDst[9]; + wcscpy(badDst, start); + fprintf(stderr, "Good so far.\n"); + // CHECK: Good so far. + fflush(stderr); + wcscat(badDst, append); // Boom! + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcscat + printf("Should have failed with ASAN error.\n"); +} \ No newline at end of file diff --git a/compiler-rt/test/asan/TestCases/wcscpy.cpp b/compiler-rt/test/asan/TestCases/wcscpy.cpp new file mode 100644 index 0000000000000..a280d29289e37 --- /dev/null +++ b/compiler-rt/test/asan/TestCases/wcscpy.cpp @@ -0,0 +1,23 @@ +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK + +#include +#include + +int main() { + const wchar_t *src = L"X means dog"; + wchar_t goodDst[12]; + wcscpy(goodDst, src); + + wchar_t badDst[7]; + fprintf(stderr, "Good so far.\n"); + // CHECK: Good so far. + fflush(stderr); + wcscpy(badDst, src); // Boom! + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcscpy + printf("Should have failed with ASAN error.\n"); +} \ No newline at end of file diff --git a/compiler-rt/test/asan/TestCases/wcsncat.cpp b/compiler-rt/test/asan/TestCases/wcsncat.cpp new file mode 100644 index 0000000000000..eb7d095e45c7a --- /dev/null +++ b/compiler-rt/test/asan/TestCases/wcsncat.cpp @@ -0,0 +1,27 @@ +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK + +#include +#include + +int main() { + const wchar_t *start = L"X means "; + const wchar_t *append = L"dog"; + wchar_t goodDst[15]; + wcscpy(goodDst, start); + wcsncat(goodDst, append, 5); + + wchar_t badDst[11]; + wcscpy(badDst, start); + wcsncat(badDst, append, 1); + fprintf(stderr, "Good so far.\n"); + // CHECK: Good so far. + fflush(stderr); + wcsncat(badDst, append, 3); // Boom! + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcsncat + printf("Should have failed with ASAN error.\n"); +} \ No newline at end of file diff --git a/compiler-rt/test/asan/TestCases/wcsncpy.cpp b/compiler-rt/test/asan/TestCases/wcsncpy.cpp new file mode 100644 index 0000000000000..1106bf5d264e5 --- /dev/null +++ b/compiler-rt/test/asan/TestCases/wcsncpy.cpp @@ -0,0 +1,25 @@ +// RUN: %clangxx_asan -O0 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O1 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O2 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK +// RUN: %clangxx_asan -O3 %s -o %t && not %env_asan_opts=log_to_stderr=1 %run %t 2>&1 | FileCheck %s --check-prefix=CHECK + +#include +#include + +int main() { + const wchar_t *src = L"X means dog"; + wchar_t goodDst[12]; + wcsncpy(goodDst, src, 12); + + wchar_t badDst[7]; + wcsncpy(badDst, src, 7); // This should still work. + fprintf(stderr, "Good so far.\n"); + // CHECK: Good so far. + fflush(stderr); + + wcsncpy(badDst, src, 15); // Boom! + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow on address [[ADDR:0x[0-9a-f]+]] at pc {{0x[0-9a-f]+}} bp {{0x[0-9a-f]+}} sp {{0x[0-9a-f]+}} + // CHECK: WRITE of size {{[0-9]+}} at [[ADDR]] thread T0 + // CHECK: #0 {{0x[0-9a-f]+}} in wcsncpy + printf("Should have failed with ASAN error.\n"); +} \ No newline at end of file diff --git a/compiler-rt/test/builtins/Unit/fixunstfdi_test.c b/compiler-rt/test/builtins/Unit/fixunstfdi_test.c index 982f3a4629dbd..526ba5ca80cf6 100644 --- a/compiler-rt/test/builtins/Unit/fixunstfdi_test.c +++ b/compiler-rt/test/builtins/Unit/fixunstfdi_test.c @@ -1,24 +1,25 @@ -// XFAIL: target=aarch64-{{.*}}-windows-{{.*}} // RUN: %clang_builtins %s %librt -o %t && %run %t // REQUIRES: librt_has_fixunstfdi #include +#include "int_lib.h" -#if _ARCH_PPC || __aarch64__ || __arm64ec__ +#if defined(CRT_HAS_TF_MODE) -#include "int_lib.h" +#define QUAD_PRECISION +#include "fp_lib.h" // Returns: convert a to a unsigned long long, rounding toward zero. // Negative values all become zero. -// Assumption: long double is a 128 bit floating point type +// Assumption: fp_t is a 128 bit floating point type // du_int is a 64 bit integral type -// value in long double is representable in du_int or is negative +// value in fp_t is representable in du_int or is negative // (no range checking performed) -COMPILER_RT_ABI du_int __fixunstfdi(long double a); +COMPILER_RT_ABI du_int __fixunstfdi(fp_t a); -int test__fixunstfdi(long double a, du_int expected) +int test__fixunstfdi(fp_t a, du_int expected) { du_int x = __fixunstfdi(a); if (x != expected) @@ -29,13 +30,13 @@ int test__fixunstfdi(long double a, du_int expected) char assumption_1[sizeof(du_int) == 2*sizeof(su_int)] = {0}; char assumption_2[sizeof(du_int)*CHAR_BIT == 64] = {0}; -char assumption_3[sizeof(long double)*CHAR_BIT == 128] = {0}; +char assumption_3[sizeof(fp_t)*CHAR_BIT == 128] = {0}; #endif int main() { -#if _ARCH_PPC || __aarch64__ || __arm64ec__ +#if defined(CRT_HAS_TF_MODE) if (test__fixunstfdi(0.0, 0)) return 1; diff --git a/compiler-rt/test/builtins/Unit/multc3_test.c b/compiler-rt/test/builtins/Unit/multc3_test.c index e9c99a72be35e..18561cc344437 100644 --- a/compiler-rt/test/builtins/Unit/multc3_test.c +++ b/compiler-rt/test/builtins/Unit/multc3_test.c @@ -1,24 +1,26 @@ -// XFAIL: target=aarch64-{{.*}}-windows-{{.*}} // RUN: %clang_builtins %s %librt -o %t && %run %t // REQUIRES: librt_has_multc3 #include +#include "int_lib.h" -#if _ARCH_PPC || __aarch64__ || __arm64ec__ +#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128) + +#define QUAD_PRECISION +#include "fp_lib.h" -#include "int_lib.h" #include #include // Returns: the product of a + ib and c + id -COMPILER_RT_ABI long double _Complex -__multc3(long double __a, long double __b, long double __c, long double __d); +COMPILER_RT_ABI Qcomplex +__multc3(fp_t __a, fp_t __b, fp_t __c, fp_t __d); enum {zero, non_zero, inf, NaN, non_zero_nan}; int -classify(long double _Complex x) +classify(Qcomplex x) { if (x == 0) return zero; @@ -41,13 +43,13 @@ classify(long double _Complex x) return non_zero; } -int test__multc3(long double a, long double b, long double c, long double d) +int test__multc3(fp_t a, fp_t b, fp_t c, fp_t d) { - long double _Complex r = __multc3(a, b, c, d); + Qcomplex r = __multc3(a, b, c, d); // printf("test__multc3(%Lf, %Lf, %Lf, %Lf) = %Lf + I%Lf\n", // a, b, c, d, creall(r), cimagl(r)); - long double _Complex dividend; - long double _Complex divisor; + Qcomplex dividend; + Qcomplex divisor; __real__ dividend = a; __imag__ dividend = b; @@ -188,7 +190,7 @@ int test__multc3(long double a, long double b, long double c, long double d) return 0; } -long double x[][2] = +fp_t x[][2] = { { 1.e-6, 1.e-6}, {-1.e-6, 1.e-6}, @@ -348,7 +350,7 @@ long double x[][2] = int main() { -#if _ARCH_PPC || __aarch64__ || __arm64ec__ +#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128) const unsigned N = sizeof(x) / sizeof(x[0]); unsigned i, j; for (i = 0; i < N; ++i) diff --git a/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp b/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp index dab1d1b48f868..afce9dc03dada 100644 --- a/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp +++ b/compiler-rt/test/sanitizer_common/TestCases/Linux/prctl.cpp @@ -88,5 +88,8 @@ int main() { res = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &pr); assert(res == -1); + unsigned long name = reinterpret_cast(nullptr); + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, 0, nullptr, name); + return 0; } diff --git a/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.c b/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.c new file mode 100644 index 0000000000000..2dbc68142dbc5 --- /dev/null +++ b/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.c @@ -0,0 +1,56 @@ +// Check that we can patch and un-patch on demand, and that logging gets invoked +// appropriately. +// +// Do not run on powerpc64le, as linking XRay with C compiler causes linker error +// due to std::__throw_system_error(int) being present in XRay libraries. +// See https://github.com/llvm/llvm-project/issues/141598 +// +// RUN: %clang_xray -fxray-instrument -std=c23 %s -o %t +// RUN: env XRAY_OPTIONS="patch_premain=false" %run %t 2>&1 | FileCheck %s +// RUN: %clang_xray -fxray-instrument -fno-xray-function-index -std=c23 %s -o %t +// RUN: env XRAY_OPTIONS="patch_premain=false" %run %t 2>&1 | FileCheck %s + +// UNSUPPORTED: target-is-mips64,target-is-mips64el +// UNSUPPORTED: target=powerpc64le-{{.*}} + +#include "xray/xray_interface.h" + +#include + +bool called = false; + +void test_handler(int32_t fid, enum XRayEntryType type) { + printf("called: %d, type=%d\n", fid, (int32_t)(type)); + called = true; +} + +[[clang::xray_always_instrument]] void always_instrument() { + printf("always instrumented called\n"); +} + +int main() { + __xray_set_handler(test_handler); + always_instrument(); + // CHECK: always instrumented called + auto status = __xray_patch(); + printf("patching status: %d\n", (int32_t)status); + // CHECK-NEXT: patching status: 1 + always_instrument(); + // CHECK-NEXT: called: {{.*}}, type=0 + // CHECK-NEXT: always instrumented called + // CHECK-NEXT: called: {{.*}}, type=1 + status = __xray_unpatch(); + printf("patching status: %d\n", (int32_t)status); + // CHECK-NEXT: patching status: 1 + always_instrument(); + // CHECK-NEXT: always instrumented called + status = __xray_patch(); + printf("patching status: %d\n", (int32_t)status); + // CHECK-NEXT: patching status: 1 + __xray_remove_handler(); + always_instrument(); + // CHECK-NEXT: always instrumented called + status = __xray_unpatch(); + printf("patching status: %d\n", (int32_t)status); + // CHECK-NEXT: patching status: 1 +} diff --git a/cross-project-tests/CMakeLists.txt b/cross-project-tests/CMakeLists.txt index 192db87043177..8e94579736537 100644 --- a/cross-project-tests/CMakeLists.txt +++ b/cross-project-tests/CMakeLists.txt @@ -21,9 +21,11 @@ set(CROSS_PROJECT_TEST_DEPS count llvm-ar llvm-config + llvm-dis llvm-dwarfdump llvm-objdump not + obj2yaml split-file ) diff --git a/flang-rt/cmake/modules/AddFlangRT.cmake b/flang-rt/cmake/modules/AddFlangRT.cmake index 43b1451d65e32..ab383bcbe2cdf 100644 --- a/flang-rt/cmake/modules/AddFlangRT.cmake +++ b/flang-rt/cmake/modules/AddFlangRT.cmake @@ -251,8 +251,15 @@ function (add_flangrt_library name) $<$:-nogpulib -flto -fvisibility=hidden -Wno-unknown-cuda-version --cuda-feature=+ptx63> ) elseif (APPLE) + # Clang on Darwin enables non-POSIX extensions by default. + # This causes some macros to leak, such as HUGE from , which + # causes some conflicts with Flang symbols (but not with Flang-RT, for + # now). + # It also causes some Flang-RT extensions to be disabled, such as fdate, + # that checks for _POSIX_C_SOURCE. + # Setting _POSIX_C_SOURCE avoids these issues. target_compile_options(${tgtname} PRIVATE - $<$:${DARWIN_osx_BUILTIN_MIN_VER_FLAG}> + $<$:${DARWIN_osx_BUILTIN_MIN_VER_FLAG} -D_POSIX_C_SOURCE=200809> ) endif () diff --git a/flang-rt/include/flang-rt/runtime/format-implementation.h b/flang-rt/include/flang-rt/runtime/format-implementation.h index de06524de32d3..46134146f5c13 100644 --- a/flang-rt/include/flang-rt/runtime/format-implementation.h +++ b/flang-rt/include/flang-rt/runtime/format-implementation.h @@ -532,7 +532,7 @@ RT_API_ATTRS common::optional FormatControl::GetNextDataEdit( ReportBadFormat(context, "Excessive DT'iotype' in FORMAT", start); return common::nullopt; } - edit.ioType[edit.ioTypeChars++] = ch; + context.ioType[edit.ioTypeChars++] = ch; if (ch == quote) { ++offset_; } @@ -556,7 +556,7 @@ RT_API_ATTRS common::optional FormatControl::GetNextDataEdit( ReportBadFormat(context, "Excessive DT(v_list) in FORMAT", start); return common::nullopt; } - edit.vList[edit.vListEntries++] = n; + context.vList[edit.vListEntries++] = n; auto ch{static_cast(GetNextChar(context))}; if (ch != ',') { ok = ch == ')'; diff --git a/flang-rt/include/flang-rt/runtime/format.h b/flang-rt/include/flang-rt/runtime/format.h index 34e33edae546d..79a7dd713b1a1 100644 --- a/flang-rt/include/flang-rt/runtime/format.h +++ b/flang-rt/include/flang-rt/runtime/format.h @@ -86,12 +86,11 @@ struct DataEdit { // defined I/O data edit descriptor RT_OFFLOAD_VAR_GROUP_BEGIN static constexpr std::size_t maxIoTypeChars{32}; - static constexpr std::size_t maxVListEntries{4}; + static constexpr std::size_t maxVListEntries{16}; RT_OFFLOAD_VAR_GROUP_END std::uint8_t ioTypeChars{0}; std::uint8_t vListEntries{0}; char ioType[maxIoTypeChars]; - int vList[maxVListEntries]; }; // Generates a sequence of DataEdits from a FORMAT statement or diff --git a/flang-rt/include/flang-rt/runtime/io-error.h b/flang-rt/include/flang-rt/runtime/io-error.h index 3e8401036f289..0ac1183131808 100644 --- a/flang-rt/include/flang-rt/runtime/io-error.h +++ b/flang-rt/include/flang-rt/runtime/io-error.h @@ -67,6 +67,17 @@ class IoErrorHandler : public Terminator { RT_API_ATTRS int GetIoStat() const { return ioStat_; } RT_API_ATTRS bool GetIoMsg(char *, std::size_t); + // Sets the HasEnd flag so that EOF isn't fatal; used to peek ahead + RT_API_ATTRS bool SetHasEnd(bool yes = true) { + bool oldValue{(flags_ & hasEnd) != 0}; + if (yes) { + flags_ |= hasEnd; + } else { + flags_ &= ~hasEnd; + } + return oldValue; + } + private: enum Flag : std::uint8_t { hasIoStat = 1, // IOSTAT= diff --git a/flang-rt/include/flang-rt/runtime/io-stmt.h b/flang-rt/include/flang-rt/runtime/io-stmt.h index 03b6efd65ddfc..3de2309954a40 100644 --- a/flang-rt/include/flang-rt/runtime/io-stmt.h +++ b/flang-rt/include/flang-rt/runtime/io-stmt.h @@ -61,8 +61,14 @@ using IoDirectionState = std::conditional_t; // Common state for all kinds of formatted I/O -template class FormattedIoStatementState {}; -template <> class FormattedIoStatementState { +struct DefinedIoArgs { + char ioType[DataEdit::maxIoTypeChars]; // IOTYPE string + int vList[DataEdit::maxVListEntries]; // V_LIST(:) values +}; +template +class FormattedIoStatementState : public DefinedIoArgs {}; +template <> +class FormattedIoStatementState : public DefinedIoArgs { public: RT_API_ATTRS std::size_t GetEditDescriptorChars() const; RT_API_ATTRS void GotChar(int); @@ -149,9 +155,7 @@ class IoStatementState { : connection_{connection} {} RT_API_ATTRS FastAsciiField( ConnectionState &connection, const char *start, std::size_t bytes) - : connection_{connection}, at_{start}, limit_{start + bytes} { - CheckForAsterisk(); - } + : connection_{connection}, at_{start}, limit_{start + bytes} {} RT_API_ATTRS ConnectionState &connection() { return connection_; } RT_API_ATTRS std::size_t got() const { return got_; } @@ -168,7 +172,6 @@ class IoStatementState { if (at_) { if (std::size_t bytes{io.GetNextInputBytes(at_)}) { limit_ = at_ + bytes; - CheckForAsterisk(); } else { at_ = limit_ = nullptr; } @@ -181,19 +184,28 @@ class IoStatementState { } connection_.HandleRelativePosition(bytes); } - RT_API_ATTRS bool MightHaveAsterisk() const { return !at_ || hasAsterisk_; } - private: - RT_API_ATTRS void CheckForAsterisk() { - hasAsterisk_ = at_ && at_ < limit_ && - runtime::memchr(at_, '*', limit_ - at_) != nullptr; + // Could there be a list-directed repetition count here? + RT_API_ATTRS bool MightBeRepetitionCount() const { + if (!at_) { + return true; // must use slow path for internal KIND/=1 input + } else { + if (const char *p{at_}; *p >= '0' && *p <= '9') { + while (++p < limit_) { + if (*p < '0' || *p > '9') { + return *p == '*'; + } + } + } + return false; + } } + private: ConnectionState &connection_; const char *at_{nullptr}; const char *limit_{nullptr}; std::size_t got_{0}; // for READ(..., SIZE=) - bool hasAsterisk_{false}; }; RT_API_ATTRS FastAsciiField GetUpcomingFastAsciiField(); @@ -697,6 +709,13 @@ class ChildListIoStatementState : public ChildIoStatementState, using ListDirectedStatementState::GetNextDataEdit; RT_API_ATTRS bool AdvanceRecord(int = 1); RT_API_ATTRS int EndIoStatement(); + RT_API_ATTRS bool CanAdvance() { + return DIR == Direction::Input && + (canAdvance_ || this->mutableModes().inNamelist); + } + +private: + bool canAdvance_{false}; }; template diff --git a/flang-rt/lib/cuda/memory.cpp b/flang-rt/lib/cuda/memory.cpp index d830580e6a066..78270fef07c36 100644 --- a/flang-rt/lib/cuda/memory.cpp +++ b/flang-rt/lib/cuda/memory.cpp @@ -25,23 +25,22 @@ extern "C" { void *RTDEF(CUFMemAlloc)( std::size_t bytes, unsigned type, const char *sourceFile, int sourceLine) { void *ptr = nullptr; - if (bytes != 0) { - if (type == kMemTypeDevice) { - if (Fortran::runtime::executionEnvironment.cudaDeviceIsManaged) { - CUDA_REPORT_IF_ERROR( - cudaMallocManaged((void **)&ptr, bytes, cudaMemAttachGlobal)); - } else { - CUDA_REPORT_IF_ERROR(cudaMalloc((void **)&ptr, bytes)); - } - } else if (type == kMemTypeManaged || type == kMemTypeUnified) { + bytes = bytes ? bytes : 1; + if (type == kMemTypeDevice) { + if (Fortran::runtime::executionEnvironment.cudaDeviceIsManaged) { CUDA_REPORT_IF_ERROR( cudaMallocManaged((void **)&ptr, bytes, cudaMemAttachGlobal)); - } else if (type == kMemTypePinned) { - CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&ptr, bytes)); } else { - Terminator terminator{sourceFile, sourceLine}; - terminator.Crash("unsupported memory type"); + CUDA_REPORT_IF_ERROR(cudaMalloc((void **)&ptr, bytes)); } + } else if (type == kMemTypeManaged || type == kMemTypeUnified) { + CUDA_REPORT_IF_ERROR( + cudaMallocManaged((void **)&ptr, bytes, cudaMemAttachGlobal)); + } else if (type == kMemTypePinned) { + CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&ptr, bytes)); + } else { + Terminator terminator{sourceFile, sourceLine}; + terminator.Crash("unsupported memory type"); } return ptr; } diff --git a/flang-rt/lib/runtime/derived-api.cpp b/flang-rt/lib/runtime/derived-api.cpp index bb08e0397fe9c..fe6868292f019 100644 --- a/flang-rt/lib/runtime/derived-api.cpp +++ b/flang-rt/lib/runtime/derived-api.cpp @@ -118,14 +118,26 @@ bool RTDEF(SameTypeAs)(const Descriptor &a, const Descriptor &b) { } bool RTDEF(ExtendsTypeOf)(const Descriptor &a, const Descriptor &mold) { + // The wording of the standard indicates null or unallocated checks take + // precedence over the extension checks which take precedence over any + // compiler specific behavior. + // F'23 16.9.86 p 5 + // If MOLD is unlimited polymorphic and is either a disassociated pointer or + // unallocated allocatable variable, the result is true; auto aType{a.raw().type}; auto moldType{mold.raw().type}; if ((aType != CFI_type_struct && aType != CFI_type_other) || (moldType != CFI_type_struct && moldType != CFI_type_other)) { - // If either type is intrinsic, they must match. - return aType == moldType; - } else if (const typeInfo::DerivedType * - derivedTypeMold{GetDerivedType(mold)}) { + if (!mold.IsAllocated()) { + return true; + } else if (!a.IsAllocated()) { + return false; + } else { + // If either type is intrinsic and not a pointer or allocatable + // then they must match. + return aType == moldType; + } + } else if (const auto *derivedTypeMold{GetDerivedType(mold)}) { // If A is unlimited polymorphic and is either a disassociated pointer or // unallocated allocatable, the result is false. // Otherwise if the dynamic type of A or MOLD is extensible, the result is diff --git a/flang-rt/lib/runtime/descriptor-io.cpp b/flang-rt/lib/runtime/descriptor-io.cpp index e00072510aff7..e599e624fe02e 100644 --- a/flang-rt/lib/runtime/descriptor-io.cpp +++ b/flang-rt/lib/runtime/descriptor-io.cpp @@ -47,9 +47,11 @@ static RT_API_ATTRS common::optional DefinedFormattedIo( const typeInfo::DerivedType &derived, const typeInfo::SpecialBinding &special, const SubscriptValue subscripts[]) { - // Look at the next data edit descriptor. If this is list-directed I/O, the - // "maxRepeat=0" argument will prevent the input from advancing over an + // Look at the next data edit descriptor. If this is list-directed input, + // the "maxRepeat=0" argument will prevent the input from advancing over an // initial '(' that shouldn't be consumed now as the start of a real part. + // It also allows reaching EOF without crashing, since the EOF only matters + // if a child READ is actually performed. common::optional peek{io.GetNextDataEdit(/*maxRepeat=*/0)}; if (peek && (peek->descriptor == DataEdit::DefinedDerivedType || @@ -62,10 +64,11 @@ static RT_API_ATTRS common::optional DefinedFormattedIo( : *io.GetNextDataEdit(1)}; char ioType[2 + edit.maxIoTypeChars]; auto ioTypeLen{std::size_t{2} /*"DT"*/ + edit.ioTypeChars}; + auto &definedIoArgs{*io.get_if()}; if (edit.descriptor == DataEdit::DefinedDerivedType) { ioType[0] = 'D'; ioType[1] = 'T'; - runtime::memcpy(ioType + 2, edit.ioType, edit.ioTypeChars); + runtime::memcpy(ioType + 2, definedIoArgs.ioType, edit.ioTypeChars); } else { runtime::strcpy( ioType, io.mutableModes().inNamelist ? "NAMELIST" : "LISTDIRECTED"); @@ -79,7 +82,7 @@ static RT_API_ATTRS common::optional DefinedFormattedIo( if (integer8) { // Convert v_list values to INTEGER(8) for (int j{0}; j < edit.vListEntries; ++j) { - vList64[j] = edit.vList[j]; + vList64[j] = definedIoArgs.vList[j]; } vListDesc.Establish( TypeCategory::Integer, sizeof(std::int64_t), nullptr, 1); @@ -89,7 +92,7 @@ static RT_API_ATTRS common::optional DefinedFormattedIo( static_cast(sizeof(std::int64_t))); } else { vListDesc.Establish(TypeCategory::Integer, sizeof(int), nullptr, 1); - vListDesc.set_base_addr(edit.vList); + vListDesc.set_base_addr(definedIoArgs.vList); vListDesc.GetDimension(0).SetBounds(1, edit.vListEntries); vListDesc.GetDimension(0).SetByteStride( static_cast(sizeof(int))); diff --git a/flang-rt/lib/runtime/edit-input.cpp b/flang-rt/lib/runtime/edit-input.cpp index 6ab546ee59f74..436fc3894d902 100644 --- a/flang-rt/lib/runtime/edit-input.cpp +++ b/flang-rt/lib/runtime/edit-input.cpp @@ -53,11 +53,13 @@ static RT_API_ATTRS bool EditBOZInput( IoStatementState &io, const DataEdit &edit, void *n, std::size_t bytes) { // Skip leading white space & zeroes common::optional remaining{io.CueUpInput(edit)}; - auto start{io.GetConnectionState().positionInRecord}; + const ConnectionState &connection{io.GetConnectionState()}; + auto leftTabLimit{connection.leftTabLimit.value_or(0)}; + auto start{connection.positionInRecord - leftTabLimit}; common::optional next{io.NextInField(remaining, edit)}; if (next.value_or('?') == '0') { do { - start = io.GetConnectionState().positionInRecord; + start = connection.positionInRecord - leftTabLimit; next = io.NextInField(remaining, edit); } while (next && *next == '0'); } @@ -447,7 +449,9 @@ static RT_API_ATTRS ScannedRealInput ScanRealInput( } // In list-directed input, a bad exponent is not consumed. auto nextBeforeExponent{next}; - auto startExponent{io.GetConnectionState().positionInRecord}; + const ConnectionState &connection{io.GetConnectionState()}; + auto leftTabLimit{connection.leftTabLimit.value_or(0)}; + auto startExponent{connection.positionInRecord - leftTabLimit}; bool hasGoodExponent{false}; if (next) { if (isHexadecimal) { diff --git a/flang-rt/lib/runtime/io-stmt.cpp b/flang-rt/lib/runtime/io-stmt.cpp index e260c0ca7511d..b958f23cf5342 100644 --- a/flang-rt/lib/runtime/io-stmt.cpp +++ b/flang-rt/lib/runtime/io-stmt.cpp @@ -880,6 +880,9 @@ ListDirectedStatementState::GetNextDataEdit( edit.descriptor = DataEdit::ListDirectedImaginaryPart; } auto fastField{io.GetUpcomingFastAsciiField()}; + // Reaching EOF is okay when peeking at list-directed defined input; + // pretend that there's an END= in that case. + bool oldHasEnd{maxRepeat == 0 && !io.GetIoErrorHandler().SetHasEnd()}; auto ch{io.GetNextNonBlank(byteCount, &fastField)}; if (ch && *ch == comma && eatComma_) { // Consume comma & whitespace after previous item. @@ -890,23 +893,27 @@ ListDirectedStatementState::GetNextDataEdit( ch = io.GetNextNonBlank(byteCount, &fastField); } eatComma_ = true; - if (!ch) { - return common::nullopt; + if (maxRepeat == 0 && !oldHasEnd) { + io.GetIoErrorHandler().SetHasEnd(false); } - if (*ch == '/') { + if (!ch) { // EOF + if (maxRepeat == 0) { + return edit; // DataEdit::ListDirected for look-ahead + } else { + return common::nullopt; + } + } else if (*ch == '/') { hitSlash_ = true; edit.descriptor = DataEdit::ListDirectedNullValue; return edit; - } - if (*ch == comma) { // separator: null value + } else if (*ch == comma) { // separator: null value edit.descriptor = DataEdit::ListDirectedNullValue; return edit; - } - if (imaginaryPart_) { // can't repeat components + } else if (imaginaryPart_) { // can't repeat components return edit; } - if (*ch >= '0' && *ch <= '9' && fastField.MightHaveAsterisk()) { - // look for "r*" repetition count + if (*ch >= '0' && *ch <= '9' && fastField.MightBeRepetitionCount()) { + // There's decimal digits followed by '*'. auto start{fastField.connection().positionInRecord}; int r{0}; do { @@ -1103,10 +1110,19 @@ ChildListIoStatementState::ChildListIoStatementState( : ChildIoStatementState{child, sourceFile, sourceLine} { #if !defined(RT_DEVICE_AVOID_RECURSION) if constexpr (DIR == Direction::Input) { - if (auto *listInput{child.parent() + if (const auto *listInput{child.parent() .get_if>()}) { this->set_eatComma(listInput->eatComma()); this->namelistGroup_ = listInput->namelistGroup(); + if (auto *childListInput{child.parent() + .get_if>()}) { + // Child list input whose parent is child list input: can advance + // if the parent can. + this->canAdvance_ = childListInput->CanAdvance(); + } else { + // Child list input of top-level list input: can advance. + this->canAdvance_ = true; + } } } #else @@ -1117,12 +1133,7 @@ ChildListIoStatementState::ChildListIoStatementState( template bool ChildListIoStatementState::AdvanceRecord(int n) { #if !defined(RT_DEVICE_AVOID_RECURSION) - // Allow child NAMELIST input to advance - if (DIR == Direction::Input && this->mutableModes().inNamelist) { - return this->child().parent().AdvanceRecord(n); - } else { - return false; - } + return this->CanAdvance() && this->child().parent().AdvanceRecord(n); #else this->ReportUnsupportedChildIo(); #endif diff --git a/flang-rt/unittests/CMakeLists.txt b/flang-rt/unittests/CMakeLists.txt index fd63ad11dcf43..53cd54dfd215e 100644 --- a/flang-rt/unittests/CMakeLists.txt +++ b/flang-rt/unittests/CMakeLists.txt @@ -78,6 +78,15 @@ function(add_flangrt_dependent_libs target) instead falls back to builtins from Compiler-RT. Linking with ${tgtname} may result in a linker error.") endif () + elseif (APPLE) + # Clang on Darwin enables non-POSIX extensions by default. + # This causes some macros to leak, such as HUGE from , which + # causes some conflicts with Flang symbols (but not with Flang-RT, for + # now). + # It also causes some Flang-RT extensions to be disabled, such as fdate, + # that checks for _POSIX_C_SOURCE. + # Setting _POSIX_C_SOURCE avoids these issues. + target_compile_options(${target} PRIVATE "-D_POSIX_C_SOURCE=200809") endif () endfunction() diff --git a/flang-rt/unittests/Runtime/CUDA/Memory.cpp b/flang-rt/unittests/Runtime/CUDA/Memory.cpp index f2e17870f7999..c84c54a1376e5 100644 --- a/flang-rt/unittests/Runtime/CUDA/Memory.cpp +++ b/flang-rt/unittests/Runtime/CUDA/Memory.cpp @@ -35,6 +35,12 @@ TEST(MemoryCUFTest, SimpleAllocTramsferFree) { RTNAME(CUFMemFree)((void *)dev, kMemTypeDevice, __FILE__, __LINE__); } +TEST(MemoryCUFTest, AllocZero) { + int *dev = (int *)RTNAME(CUFMemAlloc)(0, kMemTypeDevice, __FILE__, __LINE__); + EXPECT_TRUE(dev != 0); + RTNAME(CUFMemFree)((void *)dev, kMemTypeDevice, __FILE__, __LINE__); +} + static OwningPtr createAllocatable( Fortran::common::TypeCategory tc, int kind, int rank = 1) { return Descriptor::Create(TypeCode{tc, kind}, kind, nullptr, rank, nullptr, diff --git a/flang-rt/unittests/Runtime/Format.cpp b/flang-rt/unittests/Runtime/Format.cpp index fe7403f26700b..cd52dc8c54ed5 100644 --- a/flang-rt/unittests/Runtime/Format.cpp +++ b/flang-rt/unittests/Runtime/Format.cpp @@ -22,7 +22,7 @@ using namespace std::literals::string_literals; using ResultsTy = std::vector; // A test harness context for testing FormatControl -class TestFormatContext : public IoErrorHandler { +class TestFormatContext : public IoErrorHandler, public DefinedIoArgs { public: using CharType = char; TestFormatContext() : IoErrorHandler{"format.cpp", 1} {} diff --git a/flang/docs/ComplexOperations.md b/flang/docs/ComplexOperations.md index 3ebeea5e0a540..1b6ec527b446a 100644 --- a/flang/docs/ComplexOperations.md +++ b/flang/docs/ComplexOperations.md @@ -93,7 +93,9 @@ While [the same option in clang][2] allows specifying `promoted`, this is not implemented in Flang. Also, in the case of `improved`, clang does not handle NaN and infinite values, but Flang does. These behavioral differences arise because the transformation of complex division calculations depends on the implementation -of ComplexToStandard, which may change in the future. +of ComplexToStandard, which may change in the future. If you specify +`-ffast-math`, the lowering is the same as specifiying +`-fcomplex-arithmetic=basic`. [1]: https://discourse.llvm.org/t/rfc-change-lowering-of-fortran-math-intrinsics/63971 [2]: https://clang.llvm.org/docs/UsersManual.html#cmdoption-fcomplex-arithmetic diff --git a/flang/docs/Extensions.md b/flang/docs/Extensions.md index c442a9cd6859e..420b7517922b7 100644 --- a/flang/docs/Extensions.md +++ b/flang/docs/Extensions.md @@ -557,6 +557,17 @@ end generic intrinsic function's inferred result type does not match an explicit declaration. This message is a warning. +* There is no restriction in the standard against assigning + to a whole polymorphic allocatable under control of a `WHERE` + construct or statement, but there is no good portable + behavior to implement and the standard isn't entirely clear + what it should mean. + (Other compilers allow it, but the results are never meaningful; + some never change the type, some change the type according to + the value of the last mask element, some treat these + assignment statements as no-ops, and the rest crash during compilation.) + The compiler flags this case as an error. + ## Standard features that might as well not be * f18 supports designators with constant expressions, properly @@ -917,6 +928,17 @@ print *, [(j,j=1,10)] and the portable interpretation across the most common Fortran compilers. +* `NAMELIST` child input statements are allowed to advance to further + input records. + Further, advancement is allowed when the parent input statement is + a non-child (top level) list-directed input statement, or, recursively, + an intermediate child list-directed input statement that can advance. + This means that non-`NAMELIST` list-directed child input statements are + not allowed to advance when they have an ancestor formatted input statement + that is not list-directed and there is no intervening `NAMELIST`. + This design allows format-driven input with `DT` editing to retain + control over advancement in child input, while otherwise allowing it. + ## De Facto Standard Features * `EXTENDS_TYPE_OF()` returns `.TRUE.` if both of its arguments have the diff --git a/flang/docs/FlangDriver.md b/flang/docs/FlangDriver.md index 2b7d9d4ae6908..3286171bb1499 100644 --- a/flang/docs/FlangDriver.md +++ b/flang/docs/FlangDriver.md @@ -573,6 +573,9 @@ documentation for more details. These correspond to LLVM IR Fast Math attributes: https://llvm.org/docs/LangRef.html#fast-math-flags +In addition to the above, `-ffast-math` also enables +`-fcomplex-arithmetic=basic`. + When `-ffast-math` is specified, any linker steps generated by the compiler driver will also link to `crtfastmath.o`, which adds a static constructor that sets the FTZ/DAZ bits in MXCSR, affecting not only the current only the diff --git a/flang/docs/ParserCombinators.md b/flang/docs/ParserCombinators.md index 076e76f703c49..72cc4ba00307b 100644 --- a/flang/docs/ParserCombinators.md +++ b/flang/docs/ParserCombinators.md @@ -63,6 +63,7 @@ These objects and functions are (or return) the fundamental parsers: the value that the parser never returns. * `nextCh` consumes the next character and returns its location, and fails at EOF. +* `consumedAllInput` is equivalent, but preferable, to `!nextCh`. * `"xyz"_ch` succeeds if the next character consumed matches any of those in the string and returns its location. Be advised that the source will have been normalized to lower case (miniscule) letters outside diff --git a/flang/examples/FeatureList/FeatureList.cpp b/flang/examples/FeatureList/FeatureList.cpp index 569d2b2307f36..daa012e3eb08b 100644 --- a/flang/examples/FeatureList/FeatureList.cpp +++ b/flang/examples/FeatureList/FeatureList.cpp @@ -451,9 +451,6 @@ struct NodeVisitor { READ_FEATURE(OmpBlockConstruct) READ_FEATURE(OmpClause) READ_FEATURE(OmpClauseList) - READ_FEATURE(OmpDeclareTargetSpecifier) - READ_FEATURE(OmpDeclareTargetWithClause) - READ_FEATURE(OmpDeclareTargetWithList) READ_FEATURE(OmpDefaultClause) READ_FEATURE(OmpDefaultClause::DataSharingAttribute) READ_FEATURE(OmpDefaultmapClause) diff --git a/flang/include/flang/Evaluate/intrinsics.h b/flang/include/flang/Evaluate/intrinsics.h index dbe1ba7fe7ec1..fc1c8b2ba6ab7 100644 --- a/flang/include/flang/Evaluate/intrinsics.h +++ b/flang/include/flang/Evaluate/intrinsics.h @@ -86,6 +86,7 @@ class IntrinsicProcTable { bool IsIntrinsic(const std::string &) const; bool IsIntrinsicFunction(const std::string &) const; bool IsIntrinsicSubroutine(const std::string &) const; + bool IsDualIntrinsic(const std::string &) const; // Inquiry intrinsics are defined in section 16.7, table 16.1 IntrinsicClass GetIntrinsicClass(const std::string &) const; diff --git a/flang/include/flang/Evaluate/tools.h b/flang/include/flang/Evaluate/tools.h index 5f2f199e778c7..d8d0956369e40 100644 --- a/flang/include/flang/Evaluate/tools.h +++ b/flang/include/flang/Evaluate/tools.h @@ -1521,6 +1521,9 @@ bool IsVarSubexpressionOf( // it returns std::nullopt. std::optional> GetConvertInput(const Expr &x); +// How many ancestors does have a derived type have? +std::optional CountDerivedTypeAncestors(const semantics::Scope &); + } // namespace Fortran::evaluate namespace Fortran::semantics { diff --git a/flang/include/flang/Frontend/CodeGenOptions.h b/flang/include/flang/Frontend/CodeGenOptions.h index df6063cc90340..3dca169d43b39 100644 --- a/flang/include/flang/Frontend/CodeGenOptions.h +++ b/flang/include/flang/Frontend/CodeGenOptions.h @@ -168,6 +168,13 @@ class CodeGenOptions : public CodeGenOptionsBase { /// by -fprofile-sample-use or -fprofile-instr-use. std::string ProfileRemappingFile; + /// The name for the split debug info file used for the DW_AT_[GNU_]dwo_name + /// attribute in the skeleton CU. + std::string SplitDwarfFile; + + /// Output filename for the split debug info, not used in the skeleton CU. + std::string SplitDwarfOutput; + /// Check if Clang profile instrumenation is on. bool hasProfileClangInstr() const { return getProfileInstr() == llvm::driver::ProfileClangInstr; diff --git a/flang/include/flang/Lower/OpenACC.h b/flang/include/flang/Lower/OpenACC.h index 19d759479abaf..4622dbc8ccf64 100644 --- a/flang/include/flang/Lower/OpenACC.h +++ b/flang/include/flang/Lower/OpenACC.h @@ -77,7 +77,8 @@ static constexpr llvm::StringRef privatizationRecipePrefix = "privatization"; mlir::Value genOpenACCConstruct(AbstractConverter &, Fortran::semantics::SemanticsContext &, pft::Evaluation &, - const parser::OpenACCConstruct &); + const parser::OpenACCConstruct &, + Fortran::lower::SymMap &localSymbols); void genOpenACCDeclarativeConstruct( AbstractConverter &, Fortran::semantics::SemanticsContext &, StatementContext &, const parser::OpenACCDeclarativeConstruct &); diff --git a/flang/include/flang/Lower/OpenMP/Clauses.h b/flang/include/flang/Lower/OpenMP/Clauses.h index 5267a58c7e7f7..5cd196a7869a2 100644 --- a/flang/include/flang/Lower/OpenMP/Clauses.h +++ b/flang/include/flang/Lower/OpenMP/Clauses.h @@ -243,6 +243,7 @@ using Initializer = tomp::clause::InitializerT; using InReduction = tomp::clause::InReductionT; using IsDevicePtr = tomp::clause::IsDevicePtrT; using Lastprivate = tomp::clause::LastprivateT; +using LoopRange = tomp::clause::LoopRangeT; using Linear = tomp::clause::LinearT; using Link = tomp::clause::LinkT; using Map = tomp::clause::MapT; diff --git a/flang/include/flang/Lower/SymbolMap.h b/flang/include/flang/Lower/SymbolMap.h index 813df777d7a39..e57b6a42d3cc1 100644 --- a/flang/include/flang/Lower/SymbolMap.h +++ b/flang/include/flang/Lower/SymbolMap.h @@ -260,6 +260,10 @@ class SymMap { return lookupSymbol(*sym); } + /// Find a symbol by name and return its value if it appears in the current + /// mappings. This lookup is more expensive as it iterates over the map. + const semantics::Symbol *lookupSymbolByName(llvm::StringRef symName); + /// Find `symbol` and return its value if it appears in the inner-most level /// map. SymbolBox shallowLookupSymbol(semantics::SymbolRef sym); diff --git a/flang/include/flang/Optimizer/Builder/FIRBuilder.h b/flang/include/flang/Optimizer/Builder/FIRBuilder.h index 4b3087ed45788..d3af3bafbf279 100644 --- a/flang/include/flang/Optimizer/Builder/FIRBuilder.h +++ b/flang/include/flang/Optimizer/Builder/FIRBuilder.h @@ -959,6 +959,15 @@ mlir::Value genLifetimeStart(mlir::OpBuilder &builder, mlir::Location loc, void genLifetimeEnd(mlir::OpBuilder &builder, mlir::Location loc, mlir::Value mem); +/// Given a fir.box or fir.class \p box describing an entity and a raw address +/// \p newAddr for an entity with the same Fortran properties (rank, dynamic +/// type, length parameters and bounds) and attributes (POINTER or ALLOCATABLE), +/// create a box for \p newAddr with the same type as \p box. This assumes \p +/// newAddr is for contiguous storage (\p box does not have to be contiguous). +mlir::Value getDescriptorWithNewBaseAddress(fir::FirOpBuilder &builder, + mlir::Location loc, mlir::Value box, + mlir::Value newAddr); + } // namespace fir::factory #endif // FORTRAN_OPTIMIZER_BUILDER_FIRBUILDER_H diff --git a/flang/include/flang/Optimizer/HLFIR/Passes.td b/flang/include/flang/Optimizer/HLFIR/Passes.td index 04d7aec5fe489..bfff458f7a6c5 100644 --- a/flang/include/flang/Optimizer/HLFIR/Passes.td +++ b/flang/include/flang/Optimizer/HLFIR/Passes.td @@ -61,6 +61,10 @@ def SimplifyHLFIRIntrinsics : Pass<"simplify-hlfir-intrinsics"> { "the hlfir.matmul.">]; } +def ExpressionSimplification : Pass<"hlfir-expression-simplification"> { + let summary = "Simplify Fortran expressions"; +} + def InlineElementals : Pass<"inline-elementals"> { let summary = "Inline chained hlfir.elemental operations"; } diff --git a/flang/include/flang/Optimizer/Passes/Pipelines.h b/flang/include/flang/Optimizer/Passes/Pipelines.h index f9c41b382abe5..682dd829239ef 100644 --- a/flang/include/flang/Optimizer/Passes/Pipelines.h +++ b/flang/include/flang/Optimizer/Passes/Pipelines.h @@ -158,7 +158,8 @@ void createOpenMPFIRPassPipeline(mlir::PassManager &pm, void createDebugPasses(mlir::PassManager &pm, llvm::codegenoptions::DebugInfoKind debugLevel, llvm::OptimizationLevel OptLevel, - llvm::StringRef inputFilename, int32_t dwarfVersion); + llvm::StringRef inputFilename, int32_t dwarfVersion, + llvm::StringRef splitDwarfFile); void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm, MLIRToLLVMPassPipelineConfig config, diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td index 88573fa9dff7d..e2ba9c3522837 100644 --- a/flang/include/flang/Optimizer/Transforms/Passes.td +++ b/flang/include/flang/Optimizer/Transforms/Passes.td @@ -246,6 +246,10 @@ def AddDebugInfo : Pass<"add-debug-info", "mlir::ModuleOp"> { "int32_t", /*default=*/"0", "dwarf version">, + Option<"splitDwarfFile", "split-dwarf-file", + "std::string", /*default=*/"std::string{}", + "Name of the split dwarf file"> + ]; } diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h index b2341226c7688..14885293fd5eb 100644 --- a/flang/include/flang/Parser/dump-parse-tree.h +++ b/flang/include/flang/Parser/dump-parse-tree.h @@ -522,6 +522,7 @@ class ParseTreeDumper { NODE(parser, OmpAtomicDefaultMemOrderClause) NODE(parser, OmpAutomapModifier) NODE_ENUM(OmpAutomapModifier, Value) + NODE(parser, OmpBaseVariantNames) NODE(parser, OmpBeginDirective) NODE(parser, OmpBeginLoopDirective) NODE(parser, OmpBeginSectionsDirective) @@ -537,9 +538,6 @@ class ParseTreeDumper { NODE_ENUM(OmpCloseModifier, Value) NODE(parser, OmpContainsClause) NODE(parser, OmpContextSelectorSpecification) - NODE(parser, OmpDeclareTargetSpecifier) - NODE(parser, OmpDeclareTargetWithClause) - NODE(parser, OmpDeclareTargetWithList) NODE(parser, OmpDeclareVariantDirective) NODE(parser, OmpDefaultClause) NODE_ENUM(OmpDefaultClause, DataSharingAttribute) @@ -615,6 +613,7 @@ class ParseTreeDumper { NODE_ENUM(OmpLinearModifier, Value) NODE(parser, OmpLocator) NODE(parser, OmpLocatorList) + NODE(parser, OmpLoopRangeClause) NODE(parser, OmpMapClause) NODE(OmpMapClause, Modifier) NODE(parser, OmpMapper) diff --git a/flang/include/flang/Parser/message.h b/flang/include/flang/Parser/message.h index 7da9e12999db1..224263e4be860 100644 --- a/flang/include/flang/Parser/message.h +++ b/flang/include/flang/Parser/message.h @@ -65,6 +65,8 @@ class MessageFixedText { return severity_ == Severity::Error || severity_ == Severity::Todo; } + static const MessageFixedText endOfFileMessage; // "end of file"_err_en_US + private: CharBlock text_; Severity severity_{Severity::None}; diff --git a/flang/include/flang/Parser/openmp-utils.h b/flang/include/flang/Parser/openmp-utils.h index 2e4fa4093b87c..f761332c9cfd7 100644 --- a/flang/include/flang/Parser/openmp-utils.h +++ b/flang/include/flang/Parser/openmp-utils.h @@ -38,15 +38,8 @@ struct ConstructId { static constexpr llvm::omp::Directive id{Id}; \ } -MAKE_CONSTR_ID(OmpDeclareVariantDirective, D::OMPD_declare_variant); MAKE_CONSTR_ID(OpenMPDeclarativeAllocate, D::OMPD_allocate); -MAKE_CONSTR_ID(OpenMPDeclarativeAssumes, D::OMPD_assumes); -MAKE_CONSTR_ID(OpenMPDeclareMapperConstruct, D::OMPD_declare_mapper); -MAKE_CONSTR_ID(OpenMPDeclareReductionConstruct, D::OMPD_declare_reduction); -MAKE_CONSTR_ID(OpenMPDeclareSimdConstruct, D::OMPD_declare_simd); -MAKE_CONSTR_ID(OpenMPDeclareTargetConstruct, D::OMPD_declare_target); MAKE_CONSTR_ID(OpenMPExecutableAllocate, D::OMPD_allocate); -MAKE_CONSTR_ID(OpenMPRequiresConstruct, D::OMPD_requires); #undef MAKE_CONSTR_ID @@ -59,6 +52,10 @@ struct DirectiveNameScope { return name; } + static OmpDirectiveName GetOmpDirectiveName(const OmpDirectiveName &x) { + return x; + } + static OmpDirectiveName GetOmpDirectiveName(const OmpBeginLoopDirective &x) { return x.DirName(); } @@ -93,15 +90,8 @@ struct DirectiveNameScope { } else if constexpr (TupleTrait) { if constexpr (std::is_base_of_v) { return std::get(x.t).DirName(); - } else if constexpr (std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v) { + } else if constexpr (std::is_same_v || + std::is_same_v) { return MakeName(std::get(x.t).source, ConstructId::id); } else { return GetFromTuple( diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h index 516dd298f8beb..325ca9b4a227b 100644 --- a/flang/include/flang/Parser/parse-tree.h +++ b/flang/include/flang/Parser/parse-tree.h @@ -3555,6 +3555,18 @@ struct OmpLocator { WRAPPER_CLASS(OmpLocatorList, std::list); +// Ref: [4.5:58-60], [5.0:58-60], [5.1:63-68], [5.2:197-198], [6.0:334-336] +// +// Argument to DECLARE VARIANT with the base-name present. (When only +// variant-name is present, it is a simple OmpObject). +// +// base-name-variant-name -> // since 4.5 +// base-name : variant-name +struct OmpBaseVariantNames { + TUPLE_CLASS_BOILERPLATE(OmpBaseVariantNames); + std::tuple t; +}; + // Ref: [5.0:326:10-16], [5.1:359:5-11], [5.2:163:2-7], [6.0:293:16-21] // // mapper-specifier -> @@ -3584,6 +3596,7 @@ struct OmpArgument { CharBlock source; UNION_CLASS_BOILERPLATE(OmpArgument); std::variant u; }; @@ -4533,6 +4546,15 @@ struct OmpLinearClause { std::tuple t; }; +// Ref: [6.0:207-208] +// +// loop-range-clause -> +// LOOPRANGE(first, count) // since 6.0 +struct OmpLoopRangeClause { + TUPLE_CLASS_BOILERPLATE(OmpLoopRangeClause); + std::tuple t; +}; + // Ref: [4.5:216-219], [5.0:315-324], [5.1:347-355], [5.2:150-158] // // map-clause -> @@ -4864,8 +4886,8 @@ struct OpenMPUtilityConstruct { // ASSUMES absent-clause | contains-clause | holds-clause | no-openmp-clause | // no-openmp-routines-clause | no-parallelism-clause struct OpenMPDeclarativeAssumes { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclarativeAssumes); - std::tuple t; + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclarativeAssumes, OmpDirectiveSpecification); CharBlock source; }; @@ -4920,61 +4942,51 @@ struct OpenMPSectionsConstruct { t; }; +// Ref: [4.5:58-60], [5.0:58-60], [5.1:63-68], [5.2:197-198], [6.0:334-336] +// +// declare-variant-directive -> +// DECLARE_VARIANT([base-name:]variant-name) // since 4.5 struct OmpDeclareVariantDirective { - TUPLE_CLASS_BOILERPLATE(OmpDeclareVariantDirective); - CharBlock source; - std::tuple, Name, OmpClauseList> t; -}; - -// 2.10.6 declare-target -> DECLARE TARGET (extended-list) | -// DECLARE TARGET [declare-target-clause[ [,] -// declare-target-clause]...] -struct OmpDeclareTargetWithList { - WRAPPER_CLASS_BOILERPLATE(OmpDeclareTargetWithList, OmpObjectList); - CharBlock source; -}; - -struct OmpDeclareTargetWithClause { - WRAPPER_CLASS_BOILERPLATE(OmpDeclareTargetWithClause, OmpClauseList); + WRAPPER_CLASS_BOILERPLATE( + OmpDeclareVariantDirective, OmpDirectiveSpecification); CharBlock source; }; -struct OmpDeclareTargetSpecifier { - UNION_CLASS_BOILERPLATE(OmpDeclareTargetSpecifier); - std::variant u; -}; - +// Ref: [4.5:110-113], [5.0:180-185], [5.1:210-216], [5.2:206-207], +// [6.0:346-348] +// +// declare-target-directive -> // since 4.5 +// DECLARE_TARGET[(extended-list)] | +// DECLARE_TARGET clause-list struct OpenMPDeclareTargetConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclareTargetConstruct); + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclareTargetConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple t; }; // OMP v5.2: 5.8.8 // declare-mapper -> DECLARE MAPPER ([mapper-name :] type :: var) map-clauses struct OpenMPDeclareMapperConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclareMapperConstruct); + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclareMapperConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple t; }; // ref: 5.2: Section 5.5.11 139-141 // 2.16 declare-reduction -> DECLARE REDUCTION (reduction-identifier : type-list // : combiner) [initializer-clause] struct OpenMPDeclareReductionConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclareReductionConstruct); + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclareReductionConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple, - std::optional> - t; }; // 2.8.2 declare-simd -> DECLARE SIMD [(proc-name)] [declare-simd-clause[ [,] // declare-simd-clause]...] struct OpenMPDeclareSimdConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclareSimdConstruct); + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclareSimdConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple, OmpClauseList> t; }; // ref: [6.0:301-303] @@ -4988,9 +5000,8 @@ struct OpenMPGroupprivate { // 2.4 requires -> REQUIRES requires-clause[ [ [,] requires-clause]...] struct OpenMPRequiresConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPRequiresConstruct); + WRAPPER_CLASS_BOILERPLATE(OpenMPRequiresConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple t; }; // 2.15.2 threadprivate -> THREADPRIVATE (variable-name-list) diff --git a/flang/include/flang/Semantics/openmp-utils.h b/flang/include/flang/Semantics/openmp-utils.h index 08b67167f5de2..2954a1c4769f7 100644 --- a/flang/include/flang/Semantics/openmp-utils.h +++ b/flang/include/flang/Semantics/openmp-utils.h @@ -37,6 +37,8 @@ template > U AsRvalue(T &t) { template T &&AsRvalue(T &&t) { return std::move(t); } +const Scope &GetScopingUnit(const Scope &scope); + // There is no consistent way to get the source of an ActionStmt, but there // is "source" in Statement. This structure keeps the ActionStmt with the // extracted source for further use. diff --git a/flang/include/flang/Semantics/symbol.h b/flang/include/flang/Semantics/symbol.h index e90e9c617805d..a0d5ae7176141 100644 --- a/flang/include/flang/Semantics/symbol.h +++ b/flang/include/flang/Semantics/symbol.h @@ -801,7 +801,7 @@ class Symbol { AccPrivate, AccFirstPrivate, AccShared, // OpenACC data-mapping attribute AccCopy, AccCopyIn, AccCopyInReadOnly, AccCopyOut, AccCreate, AccDelete, - AccPresent, AccLink, AccDeviceResident, AccDevicePtr, + AccPresent, AccLink, AccDeviceResident, AccDevicePtr, AccUseDevice, // OpenACC declare AccDeclare, // OpenACC data-movement attribute diff --git a/flang/include/flang/Support/Fortran-features.h b/flang/include/flang/Support/Fortran-features.h index 2bbc2385777da..51364d552be64 100644 --- a/flang/include/flang/Support/Fortran-features.h +++ b/flang/include/flang/Support/Fortran-features.h @@ -76,7 +76,7 @@ ENUM_CLASS(UsageWarning, Portability, PointerToUndefinable, IndexVarRedefinition, IncompatibleImplicitInterfaces, CdefinedInit, VectorSubscriptFinalization, UndefinedFunctionResult, UselessIomsg, MismatchingDummyProcedure, SubscriptedEmptyArray, UnsignedLiteralTruncation, - CompatibleDeclarationsFromDistinctModules, + CompatibleDeclarationsFromDistinctModules, ConstantIsContiguous, NullActualForDefaultIntentAllocatable, UseAssociationIntoSameNameSubprogram, HostAssociatedIntentOutInSpecExpr, NonVolatilePointerToVolatile, RealConstantWidening, VolatileOrAsynchronousTemporary) diff --git a/flang/include/flang/Support/LangOptions.def b/flang/include/flang/Support/LangOptions.def index ba72d7b4b7212..e7185c836f45b 100644 --- a/flang/include/flang/Support/LangOptions.def +++ b/flang/include/flang/Support/LangOptions.def @@ -60,7 +60,8 @@ LANGOPT(OpenMPNoThreadState, 1, 0) LANGOPT(OpenMPNoNestedParallelism, 1, 0) /// Use SIMD only OpenMP support. LANGOPT(OpenMPSimd, 1, false) - +/// Enable fast MOD operations for REAL +LANGOPT(NoFastRealMod, 1, false) LANGOPT(VScaleMin, 32, 0) ///< Minimum vscale range value LANGOPT(VScaleMax, 32, 0) ///< Maximum vscale range value diff --git a/flang/include/flang/Tools/CrossToolHelpers.h b/flang/include/flang/Tools/CrossToolHelpers.h index 01c34eee014f3..850bd1f0940f7 100644 --- a/flang/include/flang/Tools/CrossToolHelpers.h +++ b/flang/include/flang/Tools/CrossToolHelpers.h @@ -109,6 +109,7 @@ struct MLIRToLLVMPassPipelineConfig : public FlangEPCallBacks { InstrumentFunctionExit = "__cyg_profile_func_exit"; } DwarfVersion = opts.DwarfVersion; + SplitDwarfFile = opts.SplitDwarfFile; } llvm::OptimizationLevel OptLevel; ///< optimisation level @@ -146,6 +147,7 @@ struct MLIRToLLVMPassPipelineConfig : public FlangEPCallBacks { Fortran::frontend::CodeGenOptions::ComplexRangeKind:: CX_Full; ///< Method for calculating complex number division int32_t DwarfVersion = 0; ///< Version of DWARF debug info to generate + std::string SplitDwarfFile = ""; ///< File name for the split debug info }; struct OffloadModuleOpts { diff --git a/flang/lib/Evaluate/constant.cpp b/flang/lib/Evaluate/constant.cpp index 990339958399e..7fe000892ac1a 100644 --- a/flang/lib/Evaluate/constant.cpp +++ b/flang/lib/Evaluate/constant.cpp @@ -9,6 +9,7 @@ #include "flang/Evaluate/constant.h" #include "flang/Evaluate/expression.h" #include "flang/Evaluate/shape.h" +#include "flang/Evaluate/tools.h" #include "flang/Evaluate/type.h" #include @@ -390,6 +391,17 @@ std::size_t Constant::CopyFrom(const Constant &source, } bool ComponentCompare::operator()(SymbolRef x, SymbolRef y) const { + if (&x->owner() != &y->owner()) { + // Not components of the same derived type; put ancestors' components first. + if (auto xDepth{CountDerivedTypeAncestors(x->owner())}) { + if (auto yDepth{CountDerivedTypeAncestors(y->owner())}) { + if (*xDepth != *yDepth) { + return *xDepth < *yDepth; + } + } + } + } + // Same derived type, distinct instantiations, or error recovery. return semantics::SymbolSourcePositionCompare{}(x, y); } diff --git a/flang/lib/Evaluate/fold-logical.cpp b/flang/lib/Evaluate/fold-logical.cpp index c64f79e06a8ac..449c316802d6a 100644 --- a/flang/lib/Evaluate/fold-logical.cpp +++ b/flang/lib/Evaluate/fold-logical.cpp @@ -799,12 +799,20 @@ Expr> FoldIntrinsicFunction( } } else if (name == "is_contiguous") { if (args.at(0)) { + auto warnContiguous{[&]() { + if (auto source{args[0]->sourceLocation()}) { + context.Warn(common::UsageWarning::ConstantIsContiguous, *source, + "is_contiguous() is always true for named constants and subobjects of named constants"_warn_en_US); + } + }}; if (auto *expr{args[0]->UnwrapExpr()}) { if (auto contiguous{IsContiguous(*expr, context)}) { + warnContiguous(); return Expr{*contiguous}; } } else if (auto *assumedType{args[0]->GetAssumedTypeDummy()}) { if (auto contiguous{IsContiguous(*assumedType, context)}) { + warnContiguous(); return Expr{*contiguous}; } } diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp index c7f174f7989dd..fe679da4ff98b 100644 --- a/flang/lib/Evaluate/intrinsics.cpp +++ b/flang/lib/Evaluate/intrinsics.cpp @@ -1674,7 +1674,7 @@ static const IntrinsicInterface intrinsicSubroutine[]{ {"to", SameIntOrUnsigned, Rank::elemental, Optionality::required, common::Intent::Out}, {"topos", AnyInt}}, - {}, Rank::elemental, IntrinsicClass::elementalSubroutine}, // elemental + {}, Rank::elemental, IntrinsicClass::elementalSubroutine}, {"random_init", {{"repeatable", AnyLogical, Rank::scalar}, {"image_distinct", AnyLogical, Rank::scalar}}, @@ -2903,7 +2903,7 @@ bool IntrinsicProcTable::Implementation::IsDualIntrinsic( // Collection for some intrinsics with function and subroutine form, // in order to pass the semantic check. static const std::string dualIntrinsic[]{{"chdir"}, {"etime"}, {"fseek"}, - {"ftell"}, {"getcwd"}, {"hostnm"}, {"putenv"s}, {"rename"}, {"second"}, + {"ftell"}, {"getcwd"}, {"hostnm"}, {"putenv"}, {"rename"}, {"second"}, {"system"}, {"unlink"}}; return llvm::is_contained(dualIntrinsic, name); } @@ -3766,6 +3766,9 @@ bool IntrinsicProcTable::IsIntrinsicFunction(const std::string &name) const { bool IntrinsicProcTable::IsIntrinsicSubroutine(const std::string &name) const { return DEREF(impl_.get()).IsIntrinsicSubroutine(name); } +bool IntrinsicProcTable::IsDualIntrinsic(const std::string &name) const { + return DEREF(impl_.get()).IsDualIntrinsic(name); +} IntrinsicClass IntrinsicProcTable::GetIntrinsicClass( const std::string &name) const { diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp index 1f3cbbf6a0c36..3cfad03648aee 100644 --- a/flang/lib/Evaluate/tools.cpp +++ b/flang/lib/Evaluate/tools.cpp @@ -1950,6 +1950,33 @@ bool IsVarSubexpressionOf( return VariableFinder{sub}(super); } +std::optional CountDerivedTypeAncestors(const semantics::Scope &scope) { + if (scope.IsDerivedType()) { + for (auto iter{scope.cbegin()}; iter != scope.cend(); ++iter) { + const Symbol &symbol{*iter->second}; + if (symbol.test(Symbol::Flag::ParentComp)) { + if (const semantics::DeclTypeSpec *type{symbol.GetType()}) { + if (const semantics::DerivedTypeSpec *derived{type->AsDerived()}) { + const semantics::Scope *parent{derived->scope()}; + if (!parent) { + parent = derived->typeSymbol().scope(); + } + if (parent) { + if (auto parentDepth{CountDerivedTypeAncestors(*parent)}) { + return 1 + *parentDepth; + } + } + } + } + return std::nullopt; // error recovery + } + } + return 0; + } else { + return std::nullopt; // error recovery + } +} + } // namespace Fortran::evaluate namespace Fortran::semantics { diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp index 09b51730d6216..548ca675db5ea 100644 --- a/flang/lib/Frontend/CompilerInvocation.cpp +++ b/flang/lib/Frontend/CompilerInvocation.cpp @@ -160,6 +160,12 @@ static bool parseDebugArgs(Fortran::frontend::CodeGenOptions &opts, opts.DwarfVersion = getLastArgIntValue(args, clang::driver::options::OPT_dwarf_version_EQ, /*Default=*/0, diags); + if (const llvm::opt::Arg *a = + args.getLastArg(clang::driver::options::OPT_split_dwarf_file)) + opts.SplitDwarfFile = a->getValue(); + if (const llvm::opt::Arg *a = + args.getLastArg(clang::driver::options::OPT_split_dwarf_output)) + opts.SplitDwarfOutput = a->getValue(); } return true; } @@ -1419,6 +1425,9 @@ static bool parseFloatingPointArgs(CompilerInvocation &invoc, opts.setFPContractMode(Fortran::common::LangOptions::FPM_Fast); } + if (args.hasArg(clang::driver::options::OPT_fno_fast_real_mod)) + opts.NoFastRealMod = true; + return true; } diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp index db6b98998785c..0c630d2ba876d 100644 --- a/flang/lib/Frontend/FrontendActions.cpp +++ b/flang/lib/Frontend/FrontendActions.cpp @@ -277,6 +277,14 @@ bool CodeGenAction::beginSourceFileAction() { ci.getInvocation().getLangOpts().OpenMPVersion); } + if (ci.getInvocation().getLangOpts().NoFastRealMod) { + mlir::ModuleOp mod = lb.getModule(); + mod.getOperation()->setAttr( + mlir::StringAttr::get(mod.getContext(), + llvm::Twine{"fir.no_fast_real_mod"}), + mlir::BoolAttr::get(mod.getContext(), true)); + } + // Create a parse tree and lower it to FIR parseAndLowerTree(ci, lb); @@ -898,7 +906,19 @@ static void generateMachineCodeOrAssemblyImpl(clang::DiagnosticsEngine &diags, llvm::CodeGenFileType cgft = (act == BackendActionTy::Backend_EmitAssembly) ? llvm::CodeGenFileType::AssemblyFile : llvm::CodeGenFileType::ObjectFile; - if (tm.addPassesToEmitFile(codeGenPasses, os, nullptr, cgft)) { + std::unique_ptr dwoOS; + if (!codeGenOpts.SplitDwarfOutput.empty()) { + std::error_code ec; + dwoOS = std::make_unique(codeGenOpts.SplitDwarfOutput, + ec, llvm::sys::fs::OF_None); + if (ec) { + diags.Report(clang::diag::err_fe_unable_to_open_output) + << codeGenOpts.SplitDwarfOutput << ec.message(); + return; + } + } + if (tm.addPassesToEmitFile(codeGenPasses, os, dwoOS ? &dwoOS->os() : nullptr, + cgft)) { unsigned diagID = diags.getCustomDiagID(clang::DiagnosticsEngine::Error, "emission of this file type is not supported"); @@ -909,6 +929,9 @@ static void generateMachineCodeOrAssemblyImpl(clang::DiagnosticsEngine &diags, // Run the passes codeGenPasses.run(llvmModule); + if (dwoOS) + dwoOS->keep(); + // Cleanup delete tlii; } @@ -936,20 +959,18 @@ void CodeGenAction::runOptimizationPipeline(llvm::raw_pwrite_stream &os) { pgoOpt = llvm::PGOOptions(opts.InstrProfileOutput.empty() ? llvm::driver::getDefaultProfileGenName() : opts.InstrProfileOutput, - "", "", opts.MemoryProfileUsePath, nullptr, + "", "", opts.MemoryProfileUsePath, llvm::PGOOptions::IRInstr, llvm::PGOOptions::NoCSAction, llvm::PGOOptions::ColdFuncOpt::Default, false, /*PseudoProbeForProfiling=*/false, false); } else if (opts.hasProfileIRUse()) { - llvm::IntrusiveRefCntPtr VFS = - llvm::vfs::getRealFileSystem(); // -fprofile-use. auto CSAction = opts.hasProfileCSIRUse() ? llvm::PGOOptions::CSIRUse : llvm::PGOOptions::NoCSAction; pgoOpt = llvm::PGOOptions( opts.ProfileInstrumentUsePath, "", opts.ProfileRemappingFile, - opts.MemoryProfileUsePath, VFS, llvm::PGOOptions::IRUse, CSAction, + opts.MemoryProfileUsePath, llvm::PGOOptions::IRUse, CSAction, llvm::PGOOptions::ColdFuncOpt::Default, false); } @@ -1324,6 +1345,7 @@ void CodeGenAction::executeAction() { llvm::TargetMachine &targetMachine = ci.getTargetMachine(); targetMachine.Options.MCOptions.AsmVerbose = targetOpts.asmVerbose; + targetMachine.Options.MCOptions.SplitDwarfFile = codeGenOpts.SplitDwarfFile; const llvm::Triple &theTriple = targetMachine.getTargetTriple(); diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 4a5b9885bb7c4..780d56f085f69 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -2544,7 +2544,7 @@ class FirConverter : public Fortran::lower::AbstractConverter { auto loopOp = fir::DoLoopOp::create( *builder, loc, lowerValue, upperValue, stepValue, /*unordered=*/false, - /*finalCountValue=*/true, + /*finalCountValue=*/false, builder->createConvert(loc, loopVarType, lowerValue)); info.loopOp = loopOp; builder->setInsertionPointToStart(loopOp.getBody()); @@ -2696,22 +2696,18 @@ class FirConverter : public Fortran::lower::AbstractConverter { // Decrement tripVariable. auto doLoopOp = mlir::cast(info.loopOp); builder->setInsertionPointToEnd(doLoopOp.getBody()); - llvm::SmallVector results; - results.push_back(mlir::arith::AddIOp::create( - *builder, loc, doLoopOp.getInductionVar(), doLoopOp.getStep(), - iofAttr)); // Step loopVariable to help optimizations such as vectorization. // Induction variable elimination will clean up as necessary. mlir::Value step = builder->createConvert( loc, info.getLoopVariableType(), doLoopOp.getStep()); mlir::Value loopVar = fir::LoadOp::create(*builder, loc, info.loopVariable); - results.push_back( - mlir::arith::AddIOp::create(*builder, loc, loopVar, step, iofAttr)); - fir::ResultOp::create(*builder, loc, results); + mlir::Value loopVarInc = + mlir::arith::AddIOp::create(*builder, loc, loopVar, step, iofAttr); + fir::ResultOp::create(*builder, loc, loopVarInc); builder->setInsertionPointAfter(doLoopOp); // The loop control variable may be used after the loop. - fir::StoreOp::create(*builder, loc, doLoopOp.getResult(1), + fir::StoreOp::create(*builder, loc, doLoopOp.getResult(0), info.loopVariable); continue; } @@ -3186,7 +3182,7 @@ class FirConverter : public Fortran::lower::AbstractConverter { mlir::OpBuilder::InsertPoint insertPt = builder->saveInsertionPoint(); localSymbols.pushScope(); mlir::Value exitCond = genOpenACCConstruct( - *this, bridge.getSemanticsContext(), getEval(), acc); + *this, bridge.getSemanticsContext(), getEval(), acc, localSymbols); const Fortran::parser::OpenACCLoopConstruct *accLoop = std::get_if(&acc.u); diff --git a/flang/lib/Lower/ConvertCall.cpp b/flang/lib/Lower/ConvertCall.cpp index a5a954a5ccea5..fb72040f9ea14 100644 --- a/flang/lib/Lower/ConvertCall.cpp +++ b/flang/lib/Lower/ConvertCall.cpp @@ -571,6 +571,8 @@ Fortran::lower::genCallOpAndResult( !cuf::isCUDADeviceContext(builder.getRegion())) { for (auto [oper, arg] : llvm::zip(operands, caller.getPassedArguments())) { + if (arg.testTKR(Fortran::common::IgnoreTKR::Contiguous)) + continue; if (auto boxTy = mlir::dyn_cast(oper.getType())) { const Fortran::semantics::Symbol *sym = caller.getDummySymbol(arg); if (sym && Fortran::evaluate::IsCUDADeviceSymbol(*sym)) diff --git a/flang/lib/Lower/ConvertVariable.cpp b/flang/lib/Lower/ConvertVariable.cpp index da964c956dbd0..00ec1b51e5400 100644 --- a/flang/lib/Lower/ConvertVariable.cpp +++ b/flang/lib/Lower/ConvertVariable.cpp @@ -511,6 +511,9 @@ fir::GlobalOp Fortran::lower::defineGlobal( Fortran::semantics::IsProcedurePointer(sym)) TODO(loc, "procedure pointer globals"); + const auto *oeDetails = + sym.detailsIf(); + // If this is an array, check to see if we can use a dense attribute // with a tensor mlir type. This optimization currently only supports // Fortran arrays of integer, real, complex, or logical. The tensor @@ -520,12 +523,10 @@ fir::GlobalOp Fortran::lower::defineGlobal( mlir::Type eleTy = mlir::cast(symTy).getElementType(); if (mlir::isa(eleTy)) { - const auto *details = - sym.detailsIf(); - if (details->init()) { + if (oeDetails && oeDetails->init()) { global = Fortran::lower::tryCreatingDenseGlobal( builder, loc, symTy, globalName, linkage, isConst, - details->init().value(), dataAttr); + oeDetails->init().value(), dataAttr); if (global) { global.setVisibility(mlir::SymbolTable::Visibility::Public); return global; @@ -539,10 +540,8 @@ fir::GlobalOp Fortran::lower::defineGlobal( isConst, var.isTarget(), dataAttr); if (Fortran::semantics::IsAllocatableOrPointer(sym) && !Fortran::semantics::IsProcedure(sym)) { - const auto *details = - sym.detailsIf(); - if (details && details->init()) { - auto expr = *details->init(); + if (oeDetails && oeDetails->init()) { + auto expr = *oeDetails->init(); createGlobalInitialization(builder, global, [&](fir::FirOpBuilder &b) { mlir::Value box = Fortran::lower::genInitialDataTarget(converter, loc, symTy, expr); @@ -558,15 +557,14 @@ fir::GlobalOp Fortran::lower::defineGlobal( fir::HasValueOp::create(b, loc, box); }); } - } else if (const auto *details = - sym.detailsIf()) { - if (details->init()) { + } else if (oeDetails) { + if (oeDetails->init()) { createGlobalInitialization( builder, global, [&](fir::FirOpBuilder &builder) { Fortran::lower::StatementContext stmtCtx( /*cleanupProhibited=*/true); fir::ExtendedValue initVal = genInitializerExprValue( - converter, loc, details->init().value(), stmtCtx); + converter, loc, oeDetails->init().value(), stmtCtx); mlir::Value castTo = builder.createConvert(loc, symTy, fir::getBase(initVal)); fir::HasValueOp::create(builder, loc, castTo); @@ -615,28 +613,32 @@ fir::GlobalOp Fortran::lower::defineGlobal( TODO(loc, "global"); // Something else } // Creates zero initializer for globals without initializers, this is a common - // and expected behavior (although not required by the standard) + // and expected behavior (although not required by the standard). + // Exception: CDEFINED globals are treated as "extern" in C and don't need + // initializer. if (!globalIsInitialized(global)) { - // Fortran does not provide means to specify that a BIND(C) module - // uninitialized variables will be defined in C. - // Add the common linkage to those to allow some level of support - // for this use case. Note that this use case will not work if the Fortran - // module code is placed in a shared library since, at least for the ELF - // format, common symbols are assigned a section in shared libraries. - // The best is still to declare C defined variables in a Fortran module file - // with no other definitions, and to never link the resulting module object - // file. - if (sym.attrs().test(Fortran::semantics::Attr::BIND_C)) - global.setLinkName(builder.createCommonLinkage()); - createGlobalInitialization( - builder, global, [&](fir::FirOpBuilder &builder) { - mlir::Value initValue; - if (converter.getLoweringOptions().getInitGlobalZero()) - initValue = fir::ZeroOp::create(builder, loc, symTy); - else - initValue = fir::UndefOp::create(builder, loc, symTy); - fir::HasValueOp::create(builder, loc, initValue); - }); + if (!oeDetails || !oeDetails->isCDefined()) { + // Fortran does not provide means to specify that a BIND(C) module + // uninitialized variables will be defined in C. + // Add the common linkage to those to allow some level of support + // for this use case. Note that this use case will not work if the Fortran + // module code is placed in a shared library since, at least for the ELF + // format, common symbols are assigned a section in shared libraries. The + // best is still to declare C defined variables in a Fortran module file + // with no other definitions, and to never link the resulting module + // object file. + if (sym.attrs().test(Fortran::semantics::Attr::BIND_C)) + global.setLinkName(builder.createCommonLinkage()); + createGlobalInitialization( + builder, global, [&](fir::FirOpBuilder &builder) { + mlir::Value initValue; + if (converter.getLoweringOptions().getInitGlobalZero()) + initValue = fir::ZeroOp::create(builder, loc, symTy); + else + initValue = fir::UndefOp::create(builder, loc, symTy); + fir::HasValueOp::create(builder, loc, initValue); + }); + } } // Set public visibility to prevent global definition to be optimized out // even if they have no initializer and are unused in this compilation unit. diff --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp index 4ad2ac01334fa..98dc78f625b9e 100644 --- a/flang/lib/Lower/IO.cpp +++ b/flang/lib/Lower/IO.cpp @@ -977,9 +977,9 @@ static void genIoLoop(Fortran::lower::AbstractConverter &converter, fir::StoreOp::create(builder, loc, lcv, loopVar); genItemList(ioImpliedDo); builder.setInsertionPointToEnd(doLoopOp.getBody()); - mlir::Value result = mlir::arith::AddIOp::create( - builder, loc, doLoopOp.getInductionVar(), doLoopOp.getStep(), iofAttr); - fir::ResultOp::create(builder, loc, result); + // fir.do_loop's induction variable's increment is implied, + // so we do not need to increment it explicitly. + fir::ResultOp::create(builder, loc, doLoopOp.getInductionVar()); builder.setInsertionPointAfter(doLoopOp); // The loop control variable may be used after the loop. lcv = builder.createConvert(loc, fir::unwrapRefType(loopVar.getType()), diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp index 07234663cbef6..f9b9b850ad839 100644 --- a/flang/lib/Lower/OpenACC.cpp +++ b/flang/lib/Lower/OpenACC.cpp @@ -44,12 +44,6 @@ #define DEBUG_TYPE "flang-lower-openacc" -static llvm::cl::opt unwrapFirBox( - "openacc-unwrap-fir-box", - llvm::cl::desc( - "Whether to use the address from fix.box in data clause operations."), - llvm::cl::init(false)); - static llvm::cl::opt generateDefaultBounds( "openacc-generate-default-bounds", llvm::cl::desc("Whether to generate default bounds for arrays."), @@ -73,7 +67,6 @@ static unsigned routineCounter = 0; static constexpr llvm::StringRef accRoutinePrefix = "acc_routine_"; static constexpr llvm::StringRef accPrivateInitName = "acc.private.init"; static constexpr llvm::StringRef accReductionInitName = "acc.reduction.init"; -static constexpr llvm::StringRef accFirDescriptorPostfix = "_desc"; static mlir::Location genOperandLocation(Fortran::lower::AbstractConverter &converter, @@ -120,43 +113,6 @@ createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc, llvm::ArrayRef asyncOnlyDeviceTypes, bool unwrapBoxAddr = false, mlir::Value isPresent = {}) { mlir::Value varPtrPtr; - // The data clause may apply to either the box reference itself or the - // pointer to the data it holds. So use `unwrapBoxAddr` to decide. - // When we have a box value - assume it refers to the data inside box. - if (unwrapFirBox && - ((fir::isBoxAddress(baseAddr.getType()) && unwrapBoxAddr) || - fir::isa_box_type(baseAddr.getType()))) { - if (isPresent) { - mlir::Type ifRetTy = - mlir::cast(fir::unwrapRefType(baseAddr.getType())) - .getEleTy(); - if (!fir::isa_ref_type(ifRetTy)) - ifRetTy = fir::ReferenceType::get(ifRetTy); - baseAddr = - builder - .genIfOp(loc, {ifRetTy}, isPresent, - /*withElseRegion=*/true) - .genThen([&]() { - if (fir::isBoxAddress(baseAddr.getType())) - baseAddr = fir::LoadOp::create(builder, loc, baseAddr); - mlir::Value boxAddr = - fir::BoxAddrOp::create(builder, loc, baseAddr); - fir::ResultOp::create(builder, loc, mlir::ValueRange{boxAddr}); - }) - .genElse([&] { - mlir::Value absent = - fir::AbsentOp::create(builder, loc, ifRetTy); - fir::ResultOp::create(builder, loc, mlir::ValueRange{absent}); - }) - .getResults()[0]; - } else { - if (fir::isBoxAddress(baseAddr.getType())) - baseAddr = fir::LoadOp::create(builder, loc, baseAddr); - baseAddr = fir::BoxAddrOp::create(builder, loc, baseAddr); - } - retTy = baseAddr.getType(); - } - llvm::SmallVector operands; llvm::SmallVector operandSegments; @@ -246,46 +202,14 @@ static void createDeclareAllocFuncWithArg(mlir::OpBuilder &modBuilder, llvm::SmallVector bounds; std::stringstream asFortranDesc; asFortranDesc << asFortran.str(); - if (unwrapFirBox) - asFortranDesc << accFirDescriptorPostfix.str(); - - // For descriptor, preserve old behavior when unwrapping FIR box: update. - if (unwrapFirBox) { - mlir::acc::UpdateDeviceOp updateDeviceOp = - createDataEntryOp( - builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds, - /*structured=*/false, /*implicit=*/true, - mlir::acc::DataClause::acc_update_device, descTy, - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - llvm::SmallVector operandSegments{0, 0, 0, 1}; - llvm::SmallVector operands{updateDeviceOp.getResult()}; - createSimpleOp(builder, loc, operands, - operandSegments); - } else { - // New behavior: start a structured region with declare_enter. - EntryOp descEntryOp = createDataEntryOp( - builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds, - /*structured=*/false, /*implicit=*/true, clause, descTy, - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareEnterOp::create( - builder, loc, - mlir::acc::DeclareTokenType::get(descEntryOp.getContext()), - mlir::ValueRange(descEntryOp.getAccVar())); - } - - if (unwrapFirBox) { - mlir::Value desc = - fir::LoadOp::create(builder, loc, registerFuncOp.getArgument(0)); - fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, desc); - addDeclareAttr(builder, boxAddrOp.getOperation(), clause); - EntryOp entryOp = createDataEntryOp( - builder, loc, boxAddrOp.getResult(), asFortran, bounds, - /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareEnterOp::create( - builder, loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()), - mlir::ValueRange(entryOp.getAccVar())); - } + // Start a structured region with declare_enter. + EntryOp descEntryOp = createDataEntryOp( + builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds, + /*structured=*/false, /*implicit=*/true, clause, descTy, + /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); + mlir::acc::DeclareEnterOp::create( + builder, loc, mlir::acc::DeclareTokenType::get(descEntryOp.getContext()), + mlir::ValueRange(descEntryOp.getAccVar())); modBuilder.setInsertionPointAfter(registerFuncOp); builder.restoreInsertionPoint(crtInsPt); @@ -307,67 +231,32 @@ static void createDeclareDeallocFuncWithArg( modBuilder, builder, loc, preDeallocFuncName.str(), {descTy}, {loc}); mlir::Value var = preDeallocOp.getArgument(0); - if (unwrapFirBox) { - mlir::Value loadOp = - fir::LoadOp::create(builder, loc, preDeallocOp.getArgument(0)); - fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, loadOp); - addDeclareAttr(builder, boxAddrOp.getOperation(), clause); - var = boxAddrOp.getResult(); - } llvm::SmallVector bounds; - if (unwrapFirBox) { - // Unwrap: delete device payload using getdeviceptr + declare_exit + ExitOp - mlir::acc::GetDevicePtrOp entryOp = - createDataEntryOp( - builder, loc, var, asFortran, bounds, - /*structured=*/false, /*implicit=*/false, clause, var.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, - mlir::ValueRange(entryOp.getAccVar())); - - if constexpr (std::is_same_v || - std::is_same_v) - ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), - entryOp.getVar(), entryOp.getVarType(), - entryOp.getBounds(), entryOp.getAsyncOperands(), - entryOp.getAsyncOperandsDeviceTypeAttr(), - entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), - /*structured=*/false, /*implicit=*/false, - builder.getStringAttr(*entryOp.getName())); - else - ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), - entryOp.getBounds(), entryOp.getAsyncOperands(), - entryOp.getAsyncOperandsDeviceTypeAttr(), - entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), - /*structured=*/false, /*implicit=*/false, - builder.getStringAttr(*entryOp.getName())); - } else { - mlir::acc::GetDevicePtrOp entryOp = - createDataEntryOp( - builder, loc, var, asFortran, bounds, - /*structured=*/false, /*implicit=*/false, clause, var.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, - mlir::ValueRange(entryOp.getAccVar())); - - if constexpr (std::is_same_v || - std::is_same_v) - ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), - entryOp.getVar(), entryOp.getVarType(), - entryOp.getBounds(), entryOp.getAsyncOperands(), - entryOp.getAsyncOperandsDeviceTypeAttr(), - entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), - /*structured=*/false, /*implicit=*/false, - builder.getStringAttr(*entryOp.getName())); - else - ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), - entryOp.getBounds(), entryOp.getAsyncOperands(), - entryOp.getAsyncOperandsDeviceTypeAttr(), - entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), - /*structured=*/false, /*implicit=*/false, - builder.getStringAttr(*entryOp.getName())); - } + mlir::acc::GetDevicePtrOp entryOp = + createDataEntryOp( + builder, loc, var, asFortran, bounds, + /*structured=*/false, /*implicit=*/false, clause, var.getType(), + /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); + mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, + mlir::ValueRange(entryOp.getAccVar())); + + if constexpr (std::is_same_v || + std::is_same_v) + ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), + entryOp.getVar(), entryOp.getVarType(), entryOp.getBounds(), + entryOp.getAsyncOperands(), + entryOp.getAsyncOperandsDeviceTypeAttr(), + entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), + /*structured=*/false, /*implicit=*/false, + builder.getStringAttr(*entryOp.getName())); + else + ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), + entryOp.getBounds(), entryOp.getAsyncOperands(), + entryOp.getAsyncOperandsDeviceTypeAttr(), + entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), + /*structured=*/false, /*implicit=*/false, + builder.getStringAttr(*entryOp.getName())); // Generate the post dealloc function. modBuilder.setInsertionPointAfter(preDeallocOp); @@ -378,33 +267,14 @@ static void createDeclareDeallocFuncWithArg( modBuilder, builder, loc, postDeallocFuncName.str(), {descTy}, {loc}); var = postDeallocOp.getArgument(0); - if (unwrapFirBox) { - var = fir::LoadOp::create(builder, loc, postDeallocOp.getArgument(0)); - asFortran << accFirDescriptorPostfix.str(); - } - - if (unwrapFirBox) { - // Old behavior: update descriptor after deallocation. - mlir::acc::UpdateDeviceOp updateDeviceOp = - createDataEntryOp( - builder, loc, var, asFortran, bounds, - /*structured=*/false, /*implicit=*/true, - mlir::acc::DataClause::acc_update_device, var.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - llvm::SmallVector operandSegments{0, 0, 0, 1}; - llvm::SmallVector operands{updateDeviceOp.getResult()}; - createSimpleOp(builder, loc, operands, - operandSegments); - } else { - // New behavior: end structured region with declare_exit. - mlir::acc::GetDevicePtrOp postEntryOp = - createDataEntryOp( - builder, loc, var, asFortran, bounds, - /*structured=*/false, /*implicit=*/true, clause, var.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, - mlir::ValueRange(postEntryOp.getAccVar())); - } + // End structured region with declare_exit. + mlir::acc::GetDevicePtrOp postEntryOp = + createDataEntryOp( + builder, loc, var, asFortran, bounds, + /*structured=*/false, /*implicit=*/true, clause, var.getType(), + /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); + mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, + mlir::ValueRange(postEntryOp.getAccVar())); modBuilder.setInsertionPointAfter(postDeallocOp); builder.restoreInsertionPoint(crtInsPt); } @@ -780,7 +650,7 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList, mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>( converter, builder, semanticsContext, stmtCtx, symbol, designator, operandLocation, asFortran, bounds, - /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox, + /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false, /*genDefaultBounds=*/generateDefaultBounds, /*strideIncludeLowerExtent=*/strideIncludeLowerExtent); LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs())); @@ -839,7 +709,7 @@ static void genDeclareDataOperandOperations( mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>( converter, builder, semanticsContext, stmtCtx, symbol, designator, operandLocation, asFortran, bounds, - /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox, + /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false, /*genDefaultBounds=*/generateDefaultBounds, /*strideIncludeLowerExtent=*/strideIncludeLowerExtent); LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs())); @@ -1409,7 +1279,7 @@ static void genPrivatizationRecipes( mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>( converter, builder, semanticsContext, stmtCtx, symbol, designator, operandLocation, asFortran, bounds, - /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox, + /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false, /*genDefaultBounds=*/generateDefaultBounds, /*strideIncludeLowerExtent=*/strideIncludeLowerExtent); LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs())); @@ -1842,7 +1712,7 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList, mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>( converter, builder, semanticsContext, stmtCtx, symbol, designator, operandLocation, asFortran, bounds, - /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox, + /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false, /*genDefaultBounds=*/generateDefaultBounds, /*strideIncludeLowerExtent=*/strideIncludeLowerExtent); LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs())); @@ -3314,7 +3184,8 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter, Fortran::lower::pft::Evaluation &eval, Fortran::semantics::SemanticsContext &semanticsContext, Fortran::lower::StatementContext &stmtCtx, - const Fortran::parser::AccClauseList &accClauseList) { + const Fortran::parser::AccClauseList &accClauseList, + Fortran::lower::SymMap &localSymbols) { mlir::Value ifCond; llvm::SmallVector dataOperands; bool addIfPresentAttr = false; @@ -3329,6 +3200,19 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter, } else if (const auto *useDevice = std::get_if( &clause.u)) { + // When CUDA Fotran is enabled, extra symbols are used in the host_data + // region. Look for them and bind their values with the symbols in the + // outer scope. + if (semanticsContext.IsEnabled(Fortran::common::LanguageFeature::CUDA)) { + const Fortran::parser::AccObjectList &objectList{useDevice->v}; + for (const auto &accObject : objectList.v) { + Fortran::semantics::Symbol &symbol = + getSymbolFromAccObject(accObject); + const Fortran::semantics::Symbol *baseSym = + localSymbols.lookupSymbolByName(symbol.name().ToString()); + localSymbols.copySymbolBinding(*baseSym, symbol); + } + } genDataOperandOperations( useDevice->v, converter, semanticsContext, stmtCtx, dataOperands, mlir::acc::DataClause::acc_use_device, @@ -3369,11 +3253,11 @@ genACCHostDataOp(Fortran::lower::AbstractConverter &converter, hostDataOp.setIfPresentAttr(builder.getUnitAttr()); } -static void -genACC(Fortran::lower::AbstractConverter &converter, - Fortran::semantics::SemanticsContext &semanticsContext, - Fortran::lower::pft::Evaluation &eval, - const Fortran::parser::OpenACCBlockConstruct &blockConstruct) { +static void genACC(Fortran::lower::AbstractConverter &converter, + Fortran::semantics::SemanticsContext &semanticsContext, + Fortran::lower::pft::Evaluation &eval, + const Fortran::parser::OpenACCBlockConstruct &blockConstruct, + Fortran::lower::SymMap &localSymbols) { const auto &beginBlockDirective = std::get(blockConstruct.t); const auto &blockDirective = @@ -3403,7 +3287,7 @@ genACC(Fortran::lower::AbstractConverter &converter, accClauseList); } else if (blockDirective.v == llvm::acc::ACCD_host_data) { genACCHostDataOp(converter, currentLocation, eval, semanticsContext, - stmtCtx, accClauseList); + stmtCtx, accClauseList, localSymbols); } } @@ -4052,45 +3936,15 @@ static void createDeclareAllocFunc(mlir::OpBuilder &modBuilder, asFortran << Fortran::lower::mangle::demangleName(globalOp.getSymName()); std::stringstream asFortranDesc; asFortranDesc << asFortran.str(); - if (unwrapFirBox) - asFortranDesc << accFirDescriptorPostfix.str(); llvm::SmallVector bounds; - // For unwrapFirBox=false this remains declare_enter; for unwrapFirBox=true, - // the descriptor post-alloc remains update behavior. - if (unwrapFirBox) { - mlir::acc::UpdateDeviceOp updDesc = - createDataEntryOp( - builder, loc, addrOp, asFortranDesc, bounds, - /*structured=*/false, /*implicit=*/true, - mlir::acc::DataClause::acc_update_device, addrOp.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - llvm::SmallVector seg{0, 0, 0, 1}; - llvm::SmallVector ops{updDesc.getResult()}; - createSimpleOp(builder, loc, ops, seg); - } else { - EntryOp descEntryOp = createDataEntryOp( - builder, loc, addrOp, asFortranDesc, bounds, - /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareEnterOp::create( - builder, loc, - mlir::acc::DeclareTokenType::get(descEntryOp.getContext()), - mlir::ValueRange(descEntryOp.getAccVar())); - } - - if (unwrapFirBox) { - auto loadOp = fir::LoadOp::create(builder, loc, addrOp.getResult()); - fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, loadOp); - addDeclareAttr(builder, boxAddrOp.getOperation(), clause); - EntryOp entryOp = createDataEntryOp( - builder, loc, boxAddrOp.getResult(), asFortran, bounds, - /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareEnterOp::create( - builder, loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()), - mlir::ValueRange(entryOp.getAccVar())); - } + EntryOp descEntryOp = createDataEntryOp( + builder, loc, addrOp, asFortranDesc, bounds, + /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(), + /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); + mlir::acc::DeclareEnterOp::create( + builder, loc, mlir::acc::DeclareTokenType::get(descEntryOp.getContext()), + mlir::ValueRange(descEntryOp.getAccVar())); modBuilder.setInsertionPointAfter(registerFuncOp); } @@ -4108,56 +3962,6 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder, std::stringstream asFortran; asFortran << Fortran::lower::mangle::demangleName(globalOp.getSymName()); - // If FIR box semantics are being unwrapped, then a pre-dealloc function - // needs generated to ensure to delete the device data pointed to by the - // descriptor before this information is lost. - if (unwrapFirBox) { - // Generate the pre dealloc function. - std::stringstream preDeallocFuncName; - preDeallocFuncName << globalOp.getSymName().str() - << Fortran::lower::declarePreDeallocSuffix.str(); - auto preDeallocOp = - createDeclareFunc(modBuilder, builder, loc, preDeallocFuncName.str()); - - fir::AddrOfOp addrOp = fir::AddrOfOp::create( - builder, loc, fir::ReferenceType::get(globalOp.getType()), - globalOp.getSymbol()); - auto loadOp = fir::LoadOp::create(builder, loc, addrOp.getResult()); - fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, loadOp); - mlir::Value var = boxAddrOp.getResult(); - addDeclareAttr(builder, var.getDefiningOp(), clause); - - llvm::SmallVector bounds; - mlir::acc::GetDevicePtrOp entryOp = - createDataEntryOp( - builder, loc, var, asFortran, bounds, - /*structured=*/false, /*implicit=*/false, clause, var.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - - mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, - mlir::ValueRange(entryOp.getAccVar())); - - if constexpr (std::is_same_v || - std::is_same_v) - ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), - entryOp.getVar(), entryOp.getBounds(), - entryOp.getAsyncOperands(), - entryOp.getAsyncOperandsDeviceTypeAttr(), - entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), - /*structured=*/false, /*implicit=*/false, - builder.getStringAttr(*entryOp.getName())); - else - ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(), - entryOp.getBounds(), entryOp.getAsyncOperands(), - entryOp.getAsyncOperandsDeviceTypeAttr(), - entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(), - /*structured=*/false, /*implicit=*/false, - builder.getStringAttr(*entryOp.getName())); - - // Generate the post dealloc function. - modBuilder.setInsertionPointAfter(preDeallocOp); - } - std::stringstream postDeallocFuncName; postDeallocFuncName << globalOp.getSymName().str() << Fortran::lower::declarePostDeallocSuffix.str(); @@ -4167,30 +3971,15 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder, fir::AddrOfOp addrOp = fir::AddrOfOp::create( builder, loc, fir::ReferenceType::get(globalOp.getType()), globalOp.getSymbol()); - if (unwrapFirBox) - asFortran << accFirDescriptorPostfix.str(); llvm::SmallVector bounds; - if (unwrapFirBox) { - // Unwrap mode: update the descriptor after deallocation (no declare_exit). - mlir::acc::UpdateDeviceOp updDesc = - createDataEntryOp( - builder, loc, addrOp, asFortran, bounds, - /*structured=*/false, /*implicit=*/true, - mlir::acc::DataClause::acc_update_device, addrOp.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - llvm::SmallVector seg{0, 0, 0, 1}; - llvm::SmallVector ops{updDesc.getResult()}; - createSimpleOp(builder, loc, ops, seg); - } else { - // Default: end the structured declare region using declare_exit. - mlir::acc::GetDevicePtrOp descEntryOp = - createDataEntryOp( - builder, loc, addrOp, asFortran, bounds, - /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(), - /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); - mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, - mlir::ValueRange(descEntryOp.getAccVar())); - } + // End the structured declare region using declare_exit. + mlir::acc::GetDevicePtrOp descEntryOp = + createDataEntryOp( + builder, loc, addrOp, asFortran, bounds, + /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(), + /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{}); + mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{}, + mlir::ValueRange(descEntryOp.getAccVar())); modBuilder.setInsertionPointAfter(postDeallocOp); } @@ -4872,13 +4661,15 @@ mlir::Value Fortran::lower::genOpenACCConstruct( Fortran::lower::AbstractConverter &converter, Fortran::semantics::SemanticsContext &semanticsContext, Fortran::lower::pft::Evaluation &eval, - const Fortran::parser::OpenACCConstruct &accConstruct) { + const Fortran::parser::OpenACCConstruct &accConstruct, + Fortran::lower::SymMap &localSymbols) { mlir::Value exitCond; Fortran::common::visit( common::visitors{ [&](const Fortran::parser::OpenACCBlockConstruct &blockConstruct) { - genACC(converter, semanticsContext, eval, blockConstruct); + genACC(converter, semanticsContext, eval, blockConstruct, + localSymbols); }, [&](const Fortran::parser::OpenACCCombinedConstruct &combinedConstruct) { diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp index 48b90ccea2f2a..fac37a372caaf 100644 --- a/flang/lib/Lower/OpenMP/Clauses.cpp +++ b/flang/lib/Lower/OpenMP/Clauses.cpp @@ -1036,6 +1036,11 @@ Link make(const parser::OmpClause::Link &inp, return Link{/*List=*/makeObjects(inp.v, semaCtx)}; } +LoopRange make(const parser::OmpClause::Looprange &inp, + semantics::SemanticsContext &semaCtx) { + llvm_unreachable("Unimplemented: looprange"); +} + Map make(const parser::OmpClause::Map &inp, semantics::SemanticsContext &semaCtx) { // inp.v -> parser::OmpMapClause diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 5681be664d450..1cb3335abbd06 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -761,19 +761,17 @@ static void promoteNonCPtrUseDevicePtrArgsToUseDeviceAddr( static void getDeclareTargetInfo( lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, - const parser::OpenMPDeclareTargetConstruct &declareTargetConstruct, + const parser::OpenMPDeclareTargetConstruct &construct, mlir::omp::DeclareTargetOperands &clauseOps, llvm::SmallVectorImpl &symbolAndClause) { - const auto &spec = - std::get(declareTargetConstruct.t); - if (const auto *objectList{parser::Unwrap(spec.u)}) { - ObjectList objects{makeObjects(*objectList, semaCtx)}; + + if (!construct.v.Arguments().v.empty()) { + ObjectList objects{makeObjects(construct.v.Arguments(), semaCtx)}; // Case: declare target(func, var1, var2) gatherFuncAndVarSyms(objects, mlir::omp::DeclareTargetCaptureClause::to, symbolAndClause, /*automap=*/false); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - List clauses = makeClauses(*clauseList, semaCtx); + } else { + List clauses = makeClauses(construct.v.Clauses(), semaCtx); if (clauses.empty()) { Fortran::lower::pft::FunctionLikeUnit *owningProc = eval.getOwningProcedure(); @@ -3441,18 +3439,20 @@ genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable, TODO(converter.getCurrentLocation(), "OpenMPDeclareSimdConstruct"); } -static void -genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable, - semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, - const parser::OpenMPDeclareMapperConstruct &declareMapperConstruct) { - mlir::Location loc = converter.genLocation(declareMapperConstruct.source); +static void genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable, + semantics::SemanticsContext &semaCtx, + lower::pft::Evaluation &eval, + const parser::OpenMPDeclareMapperConstruct &construct) { + mlir::Location loc = converter.genLocation(construct.source); fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + const parser::OmpArgumentList &args = construct.v.Arguments(); + assert(args.v.size() == 1 && "Expecting single argument"); lower::StatementContext stmtCtx; - const auto &spec = - std::get(declareMapperConstruct.t); - const auto &mapperName{std::get(spec.t)}; - const auto &varType{std::get(spec.t)}; - const auto &varName{std::get(spec.t)}; + const auto *spec = std::get_if(&args.v.front().u); + assert(spec && "Expecting mapper specifier"); + const auto &mapperName{std::get(spec->t)}; + const auto &varType{std::get(spec->t)}; + const auto &varName{std::get(spec->t)}; assert(varType.declTypeSpec->category() == semantics::DeclTypeSpec::Category::TypeDerived && "Expected derived type"); @@ -3476,9 +3476,7 @@ genOMP(lower::AbstractConverter &converter, lower::SymMap &symTable, // Populate the declareMapper region with the map information. mlir::omp::DeclareMapperInfoOperands clauseOps; - const auto *clauseList{ - parser::Unwrap(declareMapperConstruct.t)}; - List clauses = makeClauses(*clauseList, semaCtx); + List clauses = makeClauses(construct.v.Clauses(), semaCtx); ClauseProcessor cp(converter, semaCtx, clauses); cp.processMap(loc, stmtCtx, clauseOps); mlir::omp::DeclareMapperInfoOp::create(firOpBuilder, loc, clauseOps.mapVars); diff --git a/flang/lib/Lower/SymbolMap.cpp b/flang/lib/Lower/SymbolMap.cpp index 080f21ec67400..78529e0d539fb 100644 --- a/flang/lib/Lower/SymbolMap.cpp +++ b/flang/lib/Lower/SymbolMap.cpp @@ -45,6 +45,16 @@ Fortran::lower::SymMap::lookupSymbol(Fortran::semantics::SymbolRef symRef) { return SymbolBox::None{}; } +const Fortran::semantics::Symbol * +Fortran::lower::SymMap::lookupSymbolByName(llvm::StringRef symName) { + for (auto jmap = symbolMapStack.rbegin(), jend = symbolMapStack.rend(); + jmap != jend; ++jmap) + for (auto const &[sym, symBox] : *jmap) + if (sym->name().ToString() == symName) + return sym; + return nullptr; +} + Fortran::lower::SymbolBox Fortran::lower::SymMap::shallowLookupSymbol( Fortran::semantics::SymbolRef symRef) { auto *sym = symRef->HasLocalLocality() ? &*symRef : &symRef->GetUltimate(); diff --git a/flang/lib/Optimizer/Builder/FIRBuilder.cpp b/flang/lib/Optimizer/Builder/FIRBuilder.cpp index b6501fd530992..5da27d1713825 100644 --- a/flang/lib/Optimizer/Builder/FIRBuilder.cpp +++ b/flang/lib/Optimizer/Builder/FIRBuilder.cpp @@ -1943,7 +1943,7 @@ void fir::factory::genDimInfoFromBox( return; unsigned rank = fir::getBoxRank(boxType); - assert(rank != 0 && "must be an array of known rank"); + assert(!boxType.isAssumedRank() && "must be an array of known rank"); mlir::Type idxTy = builder.getIndexType(); for (unsigned i = 0; i < rank; ++i) { mlir::Value dim = builder.createIntegerConstant(loc, idxTy, i); @@ -1974,3 +1974,25 @@ void fir::factory::genLifetimeEnd(mlir::OpBuilder &builder, mlir::Location loc, mlir::Value cast) { mlir::LLVM::LifetimeEndOp::create(builder, loc, cast); } + +mlir::Value fir::factory::getDescriptorWithNewBaseAddress( + fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value box, + mlir::Value newAddr) { + auto boxType = llvm::dyn_cast(box.getType()); + assert(boxType && + "expected a box type input in getDescriptorWithNewBaseAddress"); + if (boxType.isAssumedRank()) + TODO(loc, "changing descriptor base address for an assumed rank entity"); + llvm::SmallVector lbounds; + fir::factory::genDimInfoFromBox(builder, loc, box, &lbounds, + /*extents=*/nullptr, /*strides=*/nullptr); + fir::BoxValue inputBoxValue(box, lbounds, /*explicitParams=*/{}); + fir::ExtendedValue openedInput = + fir::factory::readBoxValue(builder, loc, inputBoxValue); + mlir::Value shape = fir::isArray(openedInput) + ? builder.createShape(loc, openedInput) + : mlir::Value{}; + mlir::Value typeMold = fir::isPolymorphicType(boxType) ? box : mlir::Value{}; + return builder.createBox(loc, boxType, newAddr, shape, /*slice=*/{}, + fir::getTypeParams(openedInput), typeMold); +} diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index 71d35e37bbe94..de7694ffd468c 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -6989,8 +6989,33 @@ mlir::Value IntrinsicLibrary::genMergeBits(mlir::Type resultType, } // MOD +static mlir::Value genFastMod(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value a, mlir::Value p) { + auto fastmathFlags = mlir::arith::FastMathFlags::contract; + auto fastmathAttr = + mlir::arith::FastMathFlagsAttr::get(builder.getContext(), fastmathFlags); + mlir::Value divResult = + mlir::arith::DivFOp::create(builder, loc, a, p, fastmathAttr); + mlir::Type intType = builder.getIntegerType( + a.getType().getIntOrFloatBitWidth(), /*signed=*/true); + mlir::Value intResult = builder.createConvert(loc, intType, divResult); + mlir::Value cnvResult = builder.createConvert(loc, a.getType(), intResult); + mlir::Value mulResult = + mlir::arith::MulFOp::create(builder, loc, cnvResult, p, fastmathAttr); + mlir::Value subResult = + mlir::arith::SubFOp::create(builder, loc, a, mulResult, fastmathAttr); + return subResult; +} + mlir::Value IntrinsicLibrary::genMod(mlir::Type resultType, llvm::ArrayRef args) { + auto mod = builder.getModule(); + bool dontUseFastRealMod = false; + bool canUseApprox = mlir::arith::bitEnumContainsAny( + builder.getFastMathFlags(), mlir::arith::FastMathFlags::afn); + if (auto attr = mod->getAttrOfType("fir.no_fast_real_mod")) + dontUseFastRealMod = attr.getValue(); + assert(args.size() == 2); if (resultType.isUnsignedInteger()) { mlir::Type signlessType = mlir::IntegerType::get( @@ -7002,9 +7027,16 @@ mlir::Value IntrinsicLibrary::genMod(mlir::Type resultType, if (mlir::isa(resultType)) return mlir::arith::RemSIOp::create(builder, loc, args[0], args[1]); - // Use runtime. - return builder.createConvert( - loc, resultType, fir::runtime::genMod(builder, loc, args[0], args[1])); + if (resultType.isFloat() && canUseApprox && !dontUseFastRealMod) { + // Treat MOD as an approximate function and code-gen inline code + // instead of calling into the Fortran runtime library. + return builder.createConvert(loc, resultType, + genFastMod(builder, loc, args[0], args[1])); + } else { + // Use runtime. + return builder.createConvert( + loc, resultType, fir::runtime::genMod(builder, loc, args[0], args[1])); + } } // MODULO diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index 9d707250d11d9..50603cb86e4a5 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -12,6 +12,7 @@ #include "flang/Optimizer/CodeGen/CodeGen.h" +#include "flang/Optimizer/Builder/CUFCommon.h" #include "flang/Optimizer/CodeGen/CodeGenOpenMP.h" #include "flang/Optimizer/CodeGen/FIROpPatterns.h" #include "flang/Optimizer/CodeGen/LLVMInsertChainFolder.h" @@ -1846,6 +1847,18 @@ struct EmboxOpConversion : public EmboxCommonConversion { }; static bool isDeviceAllocation(mlir::Value val, mlir::Value adaptorVal) { + if (val.getDefiningOp() && + val.getDefiningOp()->getParentOfType()) + return false; + // Check if the global symbol is in the device module. + if (auto addr = mlir::dyn_cast_or_null(val.getDefiningOp())) + if (auto gpuMod = + addr->getParentOfType() + .lookupSymbol(cudaDeviceModuleName)) + if (gpuMod.lookupSymbol(addr.getSymbol()) || + gpuMod.lookupSymbol(addr.getSymbol())) + return true; + if (auto loadOp = mlir::dyn_cast_or_null(val.getDefiningOp())) return isDeviceAllocation(loadOp.getMemref(), {}); if (auto boxAddrOp = diff --git a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt index 3775a13e31e95..5c24fe58b05c4 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt +++ b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt @@ -3,6 +3,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) add_flang_library(HLFIRTransforms BufferizeHLFIR.cpp ConvertToFIR.cpp + ExpressionSimplification.cpp InlineElementals.cpp InlineHLFIRAssign.cpp InlineHLFIRCopyIn.cpp diff --git a/flang/lib/Optimizer/HLFIR/Transforms/ExpressionSimplification.cpp b/flang/lib/Optimizer/HLFIR/Transforms/ExpressionSimplification.cpp new file mode 100644 index 0000000000000..0559b49d8ecba --- /dev/null +++ b/flang/lib/Optimizer/HLFIR/Transforms/ExpressionSimplification.cpp @@ -0,0 +1,99 @@ +//===- ExpressionSimplification.cpp - Simplify HLFIR expressions ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Builder/FIRBuilder.h" +#include "flang/Optimizer/HLFIR/HLFIROps.h" +#include "flang/Optimizer/HLFIR/Passes.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +namespace hlfir { +#define GEN_PASS_DEF_EXPRESSIONSIMPLIFICATION +#include "flang/Optimizer/HLFIR/Passes.h.inc" +} // namespace hlfir + +// Get the first user of `op`. +// Note that we consider the first user to be the one on the lowest line of +// the emitted HLFIR. The user iterator considers the opposite. +template +static UserOp getFirstUser(mlir::Operation *op) { + auto it = op->user_begin(), end = op->user_end(), prev = it; + for (; it != end; prev = it++) + ; + if (prev != end) + if (auto userOp = mlir::dyn_cast(*prev)) + return userOp; + return {}; +} + +// Get the last user of `op`. +// Note that we consider the last user to be the one on the highest line of +// the emitted HLFIR. The user iterator considers the opposite. +template +static UserOp getLastUser(mlir::Operation *op) { + if (!op->getUsers().empty()) + if (auto userOp = mlir::dyn_cast(*op->user_begin())) + return userOp; + return {}; +} + +namespace { + +// Trim operations can be erased in certain expressions, such as character +// comparisons. +// Since a character comparison appends spaces to the shorter character, +// calls to trim() that are used only in the comparison can be eliminated. +// +// Example: +// `trim(x) == trim(y)` +// can be simplified to +// `x == y` +class EraseTrim : public mlir::OpRewritePattern { +public: + using mlir::OpRewritePattern::OpRewritePattern; + + llvm::LogicalResult + matchAndRewrite(hlfir::CharTrimOp trimOp, + mlir::PatternRewriter &rewriter) const override { + int trimUses = std::distance(trimOp->use_begin(), trimOp->use_end()); + auto cmpCharOp = getFirstUser(trimOp); + auto destroyOp = getLastUser(trimOp); + if (!cmpCharOp || !destroyOp || trimUses != 2) + return rewriter.notifyMatchFailure( + trimOp, "hlfir.char_trim is not used (only) by hlfir.cmpchar"); + + rewriter.eraseOp(destroyOp); + rewriter.replaceOp(trimOp, trimOp.getChr()); + return mlir::success(); + } +}; + +class ExpressionSimplificationPass + : public hlfir::impl::ExpressionSimplificationBase< + ExpressionSimplificationPass> { +public: + void runOnOperation() override { + mlir::MLIRContext *context = &getContext(); + + mlir::GreedyRewriteConfig config; + // Prevent the pattern driver from merging blocks. + config.setRegionSimplificationLevel( + mlir::GreedySimplifyRegionLevel::Disabled); + + mlir::RewritePatternSet patterns(context); + patterns.insert(context); + + if (mlir::failed(mlir::applyPatternsGreedily( + getOperation(), std::move(patterns), config))) { + mlir::emitError(getOperation()->getLoc(), + "failure in HLFIR expression simplification"); + signalPassFailure(); + } + } +}; + +} // namespace diff --git a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp index 684de4b2fd4a5..89aa010e7d9a1 100644 --- a/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp +++ b/flang/lib/Optimizer/OpenACC/Support/FIROpenACCTypeInterfaces.cpp @@ -365,6 +365,14 @@ getBaseRef(mlir::TypedValue varPtr) { // object, get the base object. return op.getRef(); }) + .Case([&](auto op) -> mlir::Value { + // Strip the conversion and recursively check the operand + if (auto ptrLikeOperand = mlir::dyn_cast_if_present< + mlir::TypedValue>( + op.getValue())) + return getBaseRef(ptrLikeOperand); + return varPtr; + }) .Default([&](mlir::Operation *) { return varPtr; }); return baseRef; diff --git a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp index 57be863cfa1b8..e595e6129c6c3 100644 --- a/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp +++ b/flang/lib/Optimizer/OpenMP/MapInfoFinalization.cpp @@ -41,7 +41,9 @@ #include "mlir/Pass/Pass.h" #include "mlir/Support/LLVM.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" +#include "llvm/Support/raw_ostream.h" #include #include #include @@ -75,6 +77,112 @@ class MapInfoFinalizationPass /// | | std::map localBoxAllocas; + /// Return true if the given path exists in a list of paths. + static bool + containsPath(const llvm::SmallVectorImpl> &paths, + llvm::ArrayRef path) { + return llvm::any_of(paths, [&](const llvm::SmallVector &p) { + return p.size() == path.size() && + std::equal(p.begin(), p.end(), path.begin()); + }); + } + + /// Return true if the given path is already present in + /// op.getMembersIndexAttr(). + static bool mappedIndexPathExists(mlir::omp::MapInfoOp op, + llvm::ArrayRef indexPath) { + if (mlir::ArrayAttr attr = op.getMembersIndexAttr()) { + for (mlir::Attribute list : attr) { + auto listAttr = mlir::cast(list); + if (listAttr.size() != indexPath.size()) + continue; + bool allEq = true; + for (auto [i, val] : llvm::enumerate(listAttr)) { + if (mlir::cast(val).getInt() != indexPath[i]) { + allEq = false; + break; + } + } + if (allEq) + return true; + } + } + return false; + } + + /// Build a compact string key for an index path for set-based + /// deduplication. Format: "N:v0,v1,..." where N is the length. + static void buildPathKey(llvm::ArrayRef path, + llvm::SmallString<64> &outKey) { + outKey.clear(); + llvm::raw_svector_ostream os(outKey); + os << path.size() << ':'; + for (size_t i = 0; i < path.size(); ++i) { + if (i) + os << ','; + os << path[i]; + } + } + + /// Create the member map for coordRef and append it (and its index + /// path) to the provided new* vectors, if it is not already present. + void appendMemberMapIfNew( + mlir::omp::MapInfoOp op, fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value coordRef, llvm::ArrayRef indexPath, + llvm::StringRef memberName, + llvm::SmallVectorImpl &newMapOpsForFields, + llvm::SmallVectorImpl> &newMemberIndexPaths) { + // Local de-dup within this op invocation. + if (containsPath(newMemberIndexPaths, indexPath)) + return; + // Global de-dup against already present member indices. + if (mappedIndexPathExists(op, indexPath)) + return; + + if (op.getMapperId()) { + mlir::omp::DeclareMapperOp symbol = + mlir::SymbolTable::lookupNearestSymbolFrom< + mlir::omp::DeclareMapperOp>(op, op.getMapperIdAttr()); + assert(symbol && "missing symbol for declare mapper identifier"); + mlir::omp::DeclareMapperInfoOp mapperInfo = symbol.getDeclareMapperInfo(); + // TODO: Probably a way to cache these keys in someway so we don't + // constantly go through the process of rebuilding them on every check, to + // save some cycles, but it can wait for a subsequent patch. + for (auto v : mapperInfo.getMapVars()) { + mlir::omp::MapInfoOp map = + mlir::cast(v.getDefiningOp()); + if (!map.getMembers().empty() && mappedIndexPathExists(map, indexPath)) + return; + } + } + + builder.setInsertionPoint(op); + fir::factory::AddrAndBoundsInfo info = fir::factory::getDataOperandBaseAddr( + builder, coordRef, /*isOptional=*/false, loc); + llvm::SmallVector bounds = fir::factory::genImplicitBoundsOps< + mlir::omp::MapBoundsOp, mlir::omp::MapBoundsType>( + builder, info, + hlfir::translateToExtendedValue(loc, builder, hlfir::Entity{coordRef}) + .first, + /*dataExvIsAssumedSize=*/false, loc); + + mlir::omp::MapInfoOp fieldMapOp = mlir::omp::MapInfoOp::create( + builder, loc, coordRef.getType(), coordRef, + mlir::TypeAttr::get(fir::unwrapRefType(coordRef.getType())), + op.getMapTypeAttr(), + builder.getAttr( + mlir::omp::VariableCaptureKind::ByRef), + /*varPtrPtr=*/mlir::Value{}, /*members=*/mlir::ValueRange{}, + /*members_index=*/mlir::ArrayAttr{}, bounds, + /*mapperId=*/mlir::FlatSymbolRefAttr(), + builder.getStringAttr(op.getNameAttr().strref() + "." + memberName + + ".implicit_map"), + /*partial_map=*/builder.getBoolAttr(false)); + + newMapOpsForFields.emplace_back(fieldMapOp); + newMemberIndexPaths.emplace_back(indexPath.begin(), indexPath.end()); + } + /// getMemberUserList gathers all users of a particular MapInfoOp that are /// other MapInfoOp's and places them into the mapMemberUsers list, which /// records the map that the current argument MapInfoOp "op" is part of @@ -363,7 +471,7 @@ class MapInfoFinalizationPass mlir::ArrayAttr newMembersAttr; mlir::SmallVector newMembers; llvm::SmallVector> memberIndices; - bool IsHasDeviceAddr = isHasDeviceAddr(op, target); + bool isHasDeviceAddrFlag = isHasDeviceAddr(op, target); if (!mapMemberUsers.empty() || !op.getMembers().empty()) getMemberIndicesAsVectors( @@ -406,7 +514,7 @@ class MapInfoFinalizationPass mapUser.parent.getMembersMutable().assign(newMemberOps); mapUser.parent.setMembersIndexAttr( builder.create2DI64ArrayAttr(memberIndices)); - } else if (!IsHasDeviceAddr) { + } else if (!isHasDeviceAddrFlag) { auto baseAddr = genBaseAddrMap(descriptor, op.getBounds(), op.getMapType(), builder); newMembers.push_back(baseAddr); @@ -429,7 +537,7 @@ class MapInfoFinalizationPass // The contents of the descriptor (the base address in particular) will // remain unchanged though. uint64_t mapType = op.getMapType(); - if (IsHasDeviceAddr) { + if (isHasDeviceAddrFlag) { mapType |= llvm::to_underlying( llvm::omp::OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS); } @@ -701,94 +809,134 @@ class MapInfoFinalizationPass auto recordType = mlir::cast(underlyingType); llvm::SmallVector newMapOpsForFields; - llvm::SmallVector fieldIndicies; + llvm::SmallVector> newMemberIndexPaths; + // 1) Handle direct top-level allocatable fields. for (auto fieldMemTyPair : recordType.getTypeList()) { auto &field = fieldMemTyPair.first; auto memTy = fieldMemTyPair.second; - bool shouldMapField = - llvm::find_if(mapVarForwardSlice, [&](mlir::Operation *sliceOp) { - if (!fir::isAllocatableType(memTy)) - return false; - - auto designateOp = mlir::dyn_cast(sliceOp); - if (!designateOp) - return false; - - return designateOp.getComponent() && - designateOp.getComponent()->strref() == field; - }) != mapVarForwardSlice.end(); - - // TODO Handle recursive record types. Adapting - // `createParentSymAndGenIntermediateMaps` to work direclty on MLIR - // entities might be helpful here. - - if (!shouldMapField) + if (!fir::isAllocatableType(memTy)) continue; - int32_t fieldIdx = recordType.getFieldIndex(field); - bool alreadyMapped = [&]() { - if (op.getMembersIndexAttr()) - for (auto indexList : op.getMembersIndexAttr()) { - auto indexListAttr = mlir::cast(indexList); - if (indexListAttr.size() == 1 && - mlir::cast(indexListAttr[0]).getInt() == - fieldIdx) - return true; - } - - return false; - }(); - - if (alreadyMapped) + bool referenced = llvm::any_of(mapVarForwardSlice, [&](auto *opv) { + auto designateOp = mlir::dyn_cast(opv); + return designateOp && designateOp.getComponent() && + designateOp.getComponent()->strref() == field; + }); + if (!referenced) continue; + int32_t fieldIdx = recordType.getFieldIndex(field); builder.setInsertionPoint(op); fir::IntOrValue idxConst = mlir::IntegerAttr::get(builder.getI32Type(), fieldIdx); auto fieldCoord = fir::CoordinateOp::create( builder, op.getLoc(), builder.getRefType(memTy), op.getVarPtr(), llvm::SmallVector{idxConst}); - fir::factory::AddrAndBoundsInfo info = - fir::factory::getDataOperandBaseAddr( - builder, fieldCoord, /*isOptional=*/false, op.getLoc()); - llvm::SmallVector bounds = - fir::factory::genImplicitBoundsOps( - builder, info, - hlfir::translateToExtendedValue(op.getLoc(), builder, - hlfir::Entity{fieldCoord}) - .first, - /*dataExvIsAssumedSize=*/false, op.getLoc()); - - mlir::omp::MapInfoOp fieldMapOp = mlir::omp::MapInfoOp::create( - builder, op.getLoc(), fieldCoord.getResult().getType(), - fieldCoord.getResult(), - mlir::TypeAttr::get( - fir::unwrapRefType(fieldCoord.getResult().getType())), - op.getMapTypeAttr(), - builder.getAttr( - mlir::omp::VariableCaptureKind::ByRef), - /*varPtrPtr=*/mlir::Value{}, /*members=*/mlir::ValueRange{}, - /*members_index=*/mlir::ArrayAttr{}, bounds, - /*mapperId=*/mlir::FlatSymbolRefAttr(), - builder.getStringAttr(op.getNameAttr().strref() + "." + field + - ".implicit_map"), - /*partial_map=*/builder.getBoolAttr(false)); - newMapOpsForFields.emplace_back(fieldMapOp); - fieldIndicies.emplace_back(fieldIdx); + int64_t fieldIdx64 = static_cast(fieldIdx); + llvm::SmallVector idxPath{fieldIdx64}; + appendMemberMapIfNew(op, builder, op.getLoc(), fieldCoord, idxPath, + field, newMapOpsForFields, newMemberIndexPaths); + } + + // Handle nested allocatable fields along any component chain + // referenced in the region via HLFIR designates. + llvm::SmallVector> seenIndexPaths; + for (mlir::Operation *sliceOp : mapVarForwardSlice) { + auto designateOp = mlir::dyn_cast(sliceOp); + if (!designateOp || !designateOp.getComponent()) + continue; + llvm::SmallVector compPathReversed; + compPathReversed.push_back(designateOp.getComponent()->strref()); + mlir::Value curBase = designateOp.getMemref(); + bool rootedAtMapArg = false; + while (true) { + if (auto parentDes = curBase.getDefiningOp()) { + if (!parentDes.getComponent()) + break; + compPathReversed.push_back(parentDes.getComponent()->strref()); + curBase = parentDes.getMemref(); + continue; + } + if (auto decl = curBase.getDefiningOp()) { + if (auto barg = + mlir::dyn_cast(decl.getMemref())) + rootedAtMapArg = (barg == opBlockArg); + } else if (auto blockArg = + mlir::dyn_cast_or_null( + curBase)) { + rootedAtMapArg = (blockArg == opBlockArg); + } + break; + } + // Only process nested paths (2+ components). Single-component paths + // for direct fields are handled above. + if (!rootedAtMapArg || compPathReversed.size() < 2) + continue; + builder.setInsertionPoint(op); + llvm::SmallVector indexPath; + mlir::Type curTy = underlyingType; + mlir::Value coordRef = op.getVarPtr(); + bool validPath = true; + for (llvm::StringRef compName : llvm::reverse(compPathReversed)) { + auto recTy = mlir::dyn_cast(curTy); + if (!recTy) { + validPath = false; + break; + } + int32_t idx = recTy.getFieldIndex(compName); + if (idx < 0) { + validPath = false; + break; + } + indexPath.push_back(idx); + mlir::Type memTy = recTy.getType(idx); + fir::IntOrValue idxConst = + mlir::IntegerAttr::get(builder.getI32Type(), idx); + coordRef = fir::CoordinateOp::create( + builder, op.getLoc(), builder.getRefType(memTy), coordRef, + llvm::SmallVector{idxConst}); + curTy = memTy; + } + if (!validPath) + continue; + if (auto finalRefTy = + mlir::dyn_cast(coordRef.getType())) { + mlir::Type eleTy = finalRefTy.getElementType(); + if (fir::isAllocatableType(eleTy)) { + if (!containsPath(seenIndexPaths, indexPath)) { + seenIndexPaths.emplace_back(indexPath.begin(), indexPath.end()); + appendMemberMapIfNew(op, builder, op.getLoc(), coordRef, + indexPath, compPathReversed.front(), + newMapOpsForFields, newMemberIndexPaths); + } + } + } } if (newMapOpsForFields.empty()) return mlir::WalkResult::advance(); - op.getMembersMutable().append(newMapOpsForFields); + // Deduplicate by index path to avoid emitting duplicate members for + // the same component. Use a set-based key to keep this near O(n). + llvm::SmallVector dedupMapOps; + llvm::SmallVector> dedupIndexPaths; + llvm::StringSet<> seenKeys; + for (auto [i, mapOp] : llvm::enumerate(newMapOpsForFields)) { + const auto &path = newMemberIndexPaths[i]; + llvm::SmallString<64> key; + buildPathKey(path, key); + if (seenKeys.contains(key)) + continue; + seenKeys.insert(key); + dedupMapOps.push_back(mapOp); + dedupIndexPaths.emplace_back(path.begin(), path.end()); + } + op.getMembersMutable().append(dedupMapOps); llvm::SmallVector> newMemberIndices; - mlir::ArrayAttr oldMembersIdxAttr = op.getMembersIndexAttr(); - - if (oldMembersIdxAttr) - for (mlir::Attribute indexList : oldMembersIdxAttr) { + if (mlir::ArrayAttr oldAttr = op.getMembersIndexAttr()) + for (mlir::Attribute indexList : oldAttr) { llvm::SmallVector listVec; for (mlir::Attribute index : mlir::cast(indexList)) @@ -796,10 +944,8 @@ class MapInfoFinalizationPass newMemberIndices.emplace_back(std::move(listVec)); } - - for (int64_t newFieldIdx : fieldIndicies) - newMemberIndices.emplace_back( - llvm::SmallVector(1, newFieldIdx)); + for (auto &path : dedupIndexPaths) + newMemberIndices.emplace_back(path); op.setMembersIndexAttr(builder.create2DI64ArrayAttr(newMemberIndices)); op.setPartialMap(true); diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp index fd7d521722a42..a83b0665eaf1f 100644 --- a/flang/lib/Optimizer/Passes/Pipelines.cpp +++ b/flang/lib/Optimizer/Passes/Pipelines.cpp @@ -95,12 +95,14 @@ getEmissionKind(llvm::codegenoptions::DebugInfoKind kind) { void addDebugInfoPass(mlir::PassManager &pm, llvm::codegenoptions::DebugInfoKind debugLevel, llvm::OptimizationLevel optLevel, - llvm::StringRef inputFilename, int32_t dwarfVersion) { + llvm::StringRef inputFilename, int32_t dwarfVersion, + llvm::StringRef splitDwarfFile) { fir::AddDebugInfoOptions options; options.debugLevel = getEmissionKind(debugLevel); options.isOptimized = optLevel != llvm::OptimizationLevel::O0; options.inputFilename = inputFilename; options.dwarfVersion = dwarfVersion; + options.splitDwarfFile = splitDwarfFile; addPassConditionally(pm, disableDebugInfo, [&]() { return fir::createAddDebugInfoPass(options); }); } @@ -245,6 +247,10 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm, void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, EnableOpenMP enableOpenMP, llvm::OptimizationLevel optLevel) { + if (optLevel.getSizeLevel() > 0 || optLevel.getSpeedupLevel() > 0) { + addNestedPassToAllTopLevelOperations( + pm, hlfir::createExpressionSimplification); + } if (optLevel.isOptimizingForSpeed()) { addCanonicalizerPassWithoutRegionSimplification(pm); addNestedPassToAllTopLevelOperations( @@ -336,9 +342,11 @@ void createOpenMPFIRPassPipeline(mlir::PassManager &pm, void createDebugPasses(mlir::PassManager &pm, llvm::codegenoptions::DebugInfoKind debugLevel, llvm::OptimizationLevel OptLevel, - llvm::StringRef inputFilename, int32_t dwarfVersion) { + llvm::StringRef inputFilename, int32_t dwarfVersion, + llvm::StringRef splitDwarfFile) { if (debugLevel != llvm::codegenoptions::NoDebugInfo) - addDebugInfoPass(pm, debugLevel, OptLevel, inputFilename, dwarfVersion); + addDebugInfoPass(pm, debugLevel, OptLevel, inputFilename, dwarfVersion, + splitDwarfFile); } void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm, @@ -356,7 +364,7 @@ void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm, pm, (config.DebugInfo != llvm::codegenoptions::NoDebugInfo)); fir::addExternalNameConversionPass(pm, config.Underscoring); fir::createDebugPasses(pm, config.DebugInfo, config.OptLevel, inputFilename, - config.DwarfVersion); + config.DwarfVersion, config.SplitDwarfFile); fir::addTargetRewritePass(pm); fir::addCompilerGeneratedNamesConversionPass(pm); diff --git a/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp b/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp index af96c0be6fae9..bdf7e4a366cf1 100644 --- a/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp +++ b/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp @@ -694,7 +694,10 @@ void AddDebugInfoPass::runOnOperation() { mlir::LLVM::DICompileUnitAttr cuAttr = mlir::LLVM::DICompileUnitAttr::get( mlir::DistinctAttr::create(mlir::UnitAttr::get(context)), llvm::dwarf::getLanguage("DW_LANG_Fortran95"), fileAttr, producer, - isOptimized, debugLevel); + isOptimized, debugLevel, + /*nameTableKind=*/mlir::LLVM::DINameTableKind::Default, + splitDwarfFile.empty() ? mlir::StringAttr() + : mlir::StringAttr::get(context, splitDwarfFile)); module.walk([&](mlir::func::FuncOp funcOp) { handleFuncOp(funcOp, fileAttr, cuAttr, typeGen, &symbolTable); diff --git a/flang/lib/Optimizer/Transforms/ConvertComplexPow.cpp b/flang/lib/Optimizer/Transforms/ConvertComplexPow.cpp index 127f8720ae524..97386a209b25f 100644 --- a/flang/lib/Optimizer/Transforms/ConvertComplexPow.cpp +++ b/flang/lib/Optimizer/Transforms/ConvertComplexPow.cpp @@ -83,9 +83,7 @@ void ConvertComplexPowPass::runOnOperation() { call.setFastmathAttr(fmf); powIop.replaceAllUsesWith(call.getResult(0)); powIop.erase(); - } - - if (auto powOp = dyn_cast(op)) { + } else if (auto powOp = dyn_cast(op)) { builder.setInsertionPoint(powOp); Location loc = powOp.getLoc(); auto complexTy = cast(powOp.getType()); diff --git a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp index d038c467b166a..00fdb5a4516bd 100644 --- a/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp +++ b/flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp @@ -679,26 +679,38 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertPointerLikeType( /*optional
=*/std::nullopt, /*extra data=*/nullptr); } +static mlir::StringAttr getBasicTypeName(mlir::MLIRContext *context, + llvm::StringRef baseName, + unsigned bitSize) { + std::ostringstream oss; + oss << baseName.str(); + if (bitSize != 32) + oss << "(kind=" << (bitSize / 8) << ")"; + return mlir::StringAttr::get(context, oss.str()); +} + mlir::LLVM::DITypeAttr DebugTypeGenerator::convertType(mlir::Type Ty, mlir::LLVM::DIFileAttr fileAttr, mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) { mlir::MLIRContext *context = module.getContext(); if (Ty.isInteger()) { - return genBasicType(context, mlir::StringAttr::get(context, "integer"), - Ty.getIntOrFloatBitWidth(), llvm::dwarf::DW_ATE_signed); + unsigned bitWidth = Ty.getIntOrFloatBitWidth(); + return genBasicType(context, getBasicTypeName(context, "integer", bitWidth), + bitWidth, llvm::dwarf::DW_ATE_signed); } else if (mlir::isa(Ty)) { - return genBasicType(context, mlir::StringAttr::get(context, "real"), - Ty.getIntOrFloatBitWidth(), llvm::dwarf::DW_ATE_float); + unsigned bitWidth = Ty.getIntOrFloatBitWidth(); + return genBasicType(context, getBasicTypeName(context, "real", bitWidth), + bitWidth, llvm::dwarf::DW_ATE_float); } else if (auto logTy = mlir::dyn_cast_if_present(Ty)) { - return genBasicType(context, - mlir::StringAttr::get(context, logTy.getMnemonic()), - kindMapping.getLogicalBitsize(logTy.getFKind()), - llvm::dwarf::DW_ATE_boolean); + unsigned bitWidth = kindMapping.getLogicalBitsize(logTy.getFKind()); + return genBasicType( + context, getBasicTypeName(context, logTy.getMnemonic(), bitWidth), + bitWidth, llvm::dwarf::DW_ATE_boolean); } else if (auto cplxTy = mlir::dyn_cast_if_present(Ty)) { auto floatTy = mlir::cast(cplxTy.getElementType()); unsigned bitWidth = floatTy.getWidth(); - return genBasicType(context, mlir::StringAttr::get(context, "complex"), + return genBasicType(context, getBasicTypeName(context, "complex", bitWidth), bitWidth * 2, llvm::dwarf::DW_ATE_complex_float); } else if (auto seqTy = mlir::dyn_cast_if_present(Ty)) { return convertSequenceType(seqTy, fileAttr, scope, declOp); diff --git a/flang/lib/Parser/basic-parsers.h b/flang/lib/Parser/basic-parsers.h index 7e69d41debfcd..46d5168c80fe7 100644 --- a/flang/lib/Parser/basic-parsers.h +++ b/flang/lib/Parser/basic-parsers.h @@ -828,7 +828,7 @@ struct NextCh { if (std::optional result{state.GetNextChar()}) { return result; } - state.Say("end of file"_err_en_US); + state.Say(MessageFixedText::endOfFileMessage); return std::nullopt; } }; diff --git a/flang/lib/Parser/executable-parsers.cpp b/flang/lib/Parser/executable-parsers.cpp index ecabbe1db6def..fadec1f11d1db 100644 --- a/flang/lib/Parser/executable-parsers.cpp +++ b/flang/lib/Parser/executable-parsers.cpp @@ -65,21 +65,26 @@ constexpr auto obsoleteExecutionPartConstruct{recovery(ignoredStatementPrefix >> statement("REDIMENSION" >> name / parenthesized(nonemptyList(Parser{}))))))}; -TYPE_PARSER(recovery( - CONTEXT_PARSER("execution part construct"_en_US, - first(construct(executableConstruct), - construct(statement(indirect(formatStmt))), - construct(statement(indirect(entryStmt))), - construct(statement(indirect(dataStmt))), - extension( - "nonstandard usage: NAMELIST in execution part"_port_en_US, +// The "!consumedAllInput >>" test prevents a cascade of errors at EOF. +TYPE_PARSER(!consumedAllInput >> + recovery( + CONTEXT_PARSER("execution part construct"_en_US, + first(construct(executableConstruct), construct( - statement(indirect(Parser{})))), - obsoleteExecutionPartConstruct, - lookAhead(declarationConstruct) >> SkipTo<'\n'>{} >> - fail( - "misplaced declaration in the execution part"_err_en_US))), - construct(executionPartErrorRecovery))) + statement(indirect(formatStmt))), + construct( + statement(indirect(entryStmt))), + construct( + statement(indirect(dataStmt))), + extension( + "nonstandard usage: NAMELIST in execution part"_port_en_US, + construct( + statement(indirect(Parser{})))), + obsoleteExecutionPartConstruct, + lookAhead(declarationConstruct) >> SkipTo<'\n'>{} >> + fail( + "misplaced declaration in the execution part"_err_en_US))), + construct(executionPartErrorRecovery))) // R509 execution-part -> executable-construct [execution-part-construct]... TYPE_CONTEXT_PARSER("execution part"_en_US, diff --git a/flang/lib/Parser/message.cpp b/flang/lib/Parser/message.cpp index 2a8101dd0b810..2c4f930c0b088 100644 --- a/flang/lib/Parser/message.cpp +++ b/flang/lib/Parser/message.cpp @@ -21,6 +21,10 @@ namespace Fortran::parser { +// The nextCh parser emits this, and Message::GetProvenanceRange() looks for it. +const MessageFixedText MessageFixedText::endOfFileMessage{ + "end of file"_err_en_US}; + llvm::raw_ostream &operator<<(llvm::raw_ostream &o, const MessageFixedText &t) { std::size_t n{t.text().size()}; for (std::size_t j{0}; j < n; ++j) { @@ -232,7 +236,20 @@ std::optional Message::GetProvenanceRange( const AllCookedSources &allCooked) const { return common::visit( common::visitors{ - [&](CharBlock cb) { return allCooked.GetProvenanceRange(cb); }, + [&](CharBlock cb) -> std::optional { + if (auto pr{allCooked.GetProvenanceRange(cb)}) { + return pr; + } else if (const auto *fixed{std::get_if(&text_)}; + fixed && + fixed->text() == MessageFixedText::endOfFileMessage.text() && + cb.begin() && cb.size() == 1) { + // Failure from "nextCh" due to reaching EOF. Back up one byte + // to the terminal newline so that the output looks better. + return allCooked.GetProvenanceRange(CharBlock{cb.begin() - 1, 1}); + } else { + return std::nullopt; + } + }, [](const ProvenanceRange &pr) { return std::make_optional(pr); }, }, location_); diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp index fbdb2a2faa715..9507021057476 100644 --- a/flang/lib/Parser/openmp-parsers.cpp +++ b/flang/lib/Parser/openmp-parsers.cpp @@ -315,15 +315,56 @@ TYPE_PARSER( // construct(Parser{}) || construct(Parser{})) -TYPE_PARSER(sourced( // - construct(Parser{}) || - construct(Parser{}) || - construct(Parser{}))) +TYPE_PARSER(construct( + Parser{} / ":", Parser{})) + +// Make the parsing of OmpArgument directive-sensitive. The issue is that +// name1:name2 can match either OmpBaseVariantNames or OmpReductionSpecifier. +// In the former case, "name2" is a name of a function, in the latter, of a +// type. To resolve the conflict we need information provided by name +// resolution, but by that time we can't modify the AST anymore, and the +// name resolution may have implicitly declared a symbol, or issued a message. +template +struct OmpArgumentParser { + using resultType = OmpArgument; + + std::optional Parse(ParseState &state) const { + constexpr auto parser{sourced(first( // + construct(Parser{}), + // By default, prefer OmpReductionSpecifier over OmpBaseVariantNames. + construct(Parser{}), + construct(Parser{})))}; + return parser.Parse(state); + } +}; + +template <> +struct OmpArgumentParser { + using resultType = OmpArgument; + + std::optional Parse(ParseState &state) const { + constexpr auto parser{sourced(first( // + construct(Parser{}), + // In DECLARE_VARIANT parse OmpBaseVariantNames instead of + // OmpReductionSpecifier. + construct(Parser{}), + construct(Parser{})))}; + return parser.Parse(state); + } +}; TYPE_PARSER(construct(nonemptyList(Parser{}))) -TYPE_PARSER(sourced( // - construct(nonemptyList(Parser{})))) +template +struct OmpArgumentListParser { + using resultType = OmpArgumentList; + + std::optional Parse(ParseState &state) const { + return sourced( + construct(nonemptyList(OmpArgumentParser{}))) + .Parse(state); + } +}; TYPE_PARSER( // construct(Parser{}) || @@ -982,6 +1023,9 @@ TYPE_PARSER( maybe(":"_tok >> nonemptyList(Parser{})), /*PostModified=*/pure(true))) +TYPE_PARSER(construct( + scalarIntConstantExpr, "," >> scalarIntConstantExpr)) + // OpenMPv5.2 12.5.2 detach-clause -> DETACH (event-handle) TYPE_PARSER(construct(Parser{})) @@ -1166,6 +1210,8 @@ TYPE_PARSER( // parenthesized(Parser{}))) || "LINK" >> construct(construct( parenthesized(Parser{}))) || + "LOOPRANGE" >> construct(construct( + parenthesized(Parser{}))) || "MAP" >> construct(construct( parenthesized(Parser{}))) || "MATCH" >> construct(construct( @@ -1312,12 +1358,23 @@ TYPE_PARSER( applyFunction(makeFlushFromOldSyntax, verbatim("FLUSH"_tok) / !lookAhead("("_tok), maybe(Parser{}), - maybe(parenthesized(Parser{})), + maybe(parenthesized( + OmpArgumentListParser{})), pure(OmpDirectiveSpecification::Flags::DeprecatedSyntax)))) || + // Parse DECLARE_VARIANT individually, because the "[base:]variant" + // argument will conflict with DECLARE_REDUCTION's "ident:types...". + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_variant)) >= + sourced(construct( + sourced(OmpDirectiveNameParser{}), + maybe(parenthesized(OmpArgumentListParser< + llvm::omp::Directive::OMPD_declare_variant>{})), + maybe(Parser{}), + pure(OmpDirectiveSpecification::Flags::None))) || // Parse the standard syntax: directive [(arguments)] [clauses] sourced(construct( // sourced(OmpDirectiveNameParser{}), - maybe(parenthesized(Parser{})), + maybe(parenthesized(OmpArgumentListParser<>{})), maybe(Parser{}), pure(OmpDirectiveSpecification::Flags::None)))) @@ -1711,32 +1768,21 @@ TYPE_PARSER(construct( // OpenMP 5.2: 7.5.4 Declare Variant directive TYPE_PARSER(sourced(construct( - verbatim("DECLARE VARIANT"_tok) || verbatim("DECLARE_VARIANT"_tok), - "(" >> maybe(name / ":"), name / ")", Parser{}))) + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_variant)) >= + Parser{}))) // 2.16 Declare Reduction Construct TYPE_PARSER(sourced(construct( - verbatim("DECLARE REDUCTION"_tok) || verbatim("DECLARE_REDUCTION"_tok), - "(" >> indirect(Parser{}) / ")", - maybe(Parser{})))) - -// declare-target with list -TYPE_PARSER(sourced(construct( - parenthesized(Parser{})))) - -// declare-target with clause -TYPE_PARSER( - sourced(construct(Parser{}))) - -// declare-target-specifier -TYPE_PARSER( - construct(Parser{}) || - construct(Parser{})) + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_reduction)) >= + Parser{}))) // 2.10.6 Declare Target Construct TYPE_PARSER(sourced(construct( - verbatim("DECLARE TARGET"_tok) || verbatim("DECLARE_TARGET"_tok), - Parser{}))) + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_target)) >= + Parser{}))) static OmpMapperSpecifier ConstructOmpMapperSpecifier( std::optional &&mapperName, TypeSpec &&typeSpec, Name &&varName) { @@ -1764,8 +1810,9 @@ TYPE_PARSER(applyFunction(ConstructOmpMapperSpecifier, // OpenMP 5.2: 5.8.8 Declare Mapper Construct TYPE_PARSER(sourced(construct( - verbatim("DECLARE MAPPER"_tok) || verbatim("DECLARE_MAPPER"_tok), - parenthesized(Parser{}), Parser{}))) + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_mapper)) >= + Parser{}))) TYPE_PARSER(construct(Parser{}) || construct(Parser{})) @@ -1782,8 +1829,9 @@ TYPE_PARSER( // 2.8.2 Declare Simd construct TYPE_PARSER(sourced(construct( - verbatim("DECLARE SIMD"_tok) || verbatim("DECLARE_SIMD"_tok), - maybe(parenthesized(name)), Parser{}))) + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_simd)) >= + Parser{}))) TYPE_PARSER(sourced( // construct( @@ -1793,7 +1841,9 @@ TYPE_PARSER(sourced( // // 2.4 Requires construct TYPE_PARSER(sourced(construct( - verbatim("REQUIRES"_tok), Parser{}))) + predicated(OmpDirectiveNameParser{}, + IsDirective(llvm::omp::Directive::OMPD_requires)) >= + Parser{}))) // 2.15.2 Threadprivate directive TYPE_PARSER(sourced( // @@ -1810,7 +1860,9 @@ TYPE_PARSER( // Assumes Construct TYPE_PARSER(sourced(construct( - verbatim("ASSUMES"_tok), Parser{}))) + predicated(OmpDirectiveNameParser{}, + IsDirective(llvm::omp::Directive::OMPD_assumes)) >= + Parser{}))) // Declarative constructs TYPE_PARSER( diff --git a/flang/lib/Parser/parsing.cpp b/flang/lib/Parser/parsing.cpp index 8a8c6ef673a8c..2df6881146af8 100644 --- a/flang/lib/Parser/parsing.cpp +++ b/flang/lib/Parser/parsing.cpp @@ -85,6 +85,7 @@ const SourceFile *Parsing::Prescan(const std::string &path, Options options) { if (options.features.IsEnabled(LanguageFeature::OpenACC) || (options.prescanAndReformat && noneOfTheAbove)) { prescanner.AddCompilerDirectiveSentinel("$acc"); + prescanner.AddCompilerDirectiveSentinel("@acc"); } if (options.features.IsEnabled(LanguageFeature::OpenMP) || (options.prescanAndReformat && noneOfTheAbove)) { diff --git a/flang/lib/Parser/prescan.cpp b/flang/lib/Parser/prescan.cpp index 3a9a475c365ee..66e5b2cbd5c7f 100644 --- a/flang/lib/Parser/prescan.cpp +++ b/flang/lib/Parser/prescan.cpp @@ -97,17 +97,7 @@ void Prescanner::Prescan(ProvenanceRange range) { while (!IsAtEnd()) { Statement(); } - if (inFixedForm_ != beganInFixedForm) { - std::string dir{"!dir$ "}; - if (beganInFixedForm) { - dir += "fixed"; - } else { - dir += "free"; - } - dir += '\n'; - TokenSequence tokens{dir, allSources_.AddCompilerInsertion(dir).start()}; - tokens.Emit(cooked_); - } + inFixedForm_ = beganInFixedForm; } void Prescanner::Statement() { @@ -157,6 +147,11 @@ void Prescanner::Statement() { directiveSentinel_[4] == '\0') { // CUDA conditional compilation line. condOffset = 5; + } else if (directiveSentinel_[0] == '@' && directiveSentinel_[1] == 'a' && + directiveSentinel_[2] == 'c' && directiveSentinel_[3] == 'c' && + directiveSentinel_[4] == '\0') { + // OpenACC conditional compilation line. + condOffset = 5; } if (condOffset && !preprocessingOnly_) { at_ += *condOffset, column_ += *condOffset; @@ -324,10 +319,11 @@ void Prescanner::Statement() { } NormalizeCompilerDirectiveCommentMarker(*preprocessed); preprocessed->ToLowerCase(); - SourceFormChange(preprocessed->ToString()); - CheckAndEmitLine( - preprocessed->ClipComment(*this, true /* skip first ! */), - newlineProvenance); + if (!SourceFormChange(preprocessed->ToString())) { + CheckAndEmitLine( + preprocessed->ClipComment(*this, true /* skip first ! */), + newlineProvenance); + } break; case LineClassification::Kind::Source: if (inFixedForm_) { @@ -370,14 +366,16 @@ void Prescanner::Statement() { } } tokens.ToLowerCase(); - SourceFormChange(tokens.ToString()); + if (!SourceFormChange(tokens.ToString())) { + CheckAndEmitLine(tokens, newlineProvenance); + } } else { // Kind::Source tokens.ToLowerCase(); if (inFixedForm_) { EnforceStupidEndStatementRules(tokens); } + CheckAndEmitLine(tokens, newlineProvenance); } - CheckAndEmitLine(tokens, newlineProvenance); } directiveSentinel_ = nullptr; } @@ -1774,11 +1772,15 @@ Prescanner::LineClassification Prescanner::ClassifyLine( return classification; } -void Prescanner::SourceFormChange(std::string &&dir) { +bool Prescanner::SourceFormChange(std::string &&dir) { if (dir == "!dir$ free") { inFixedForm_ = false; + return true; } else if (dir == "!dir$ fixed") { inFixedForm_ = true; + return true; + } else { + return false; } } diff --git a/flang/lib/Parser/prescan.h b/flang/lib/Parser/prescan.h index c181c03273ccc..fc38adb926530 100644 --- a/flang/lib/Parser/prescan.h +++ b/flang/lib/Parser/prescan.h @@ -225,7 +225,7 @@ class Prescanner { LineClassification ClassifyLine(const char *) const; LineClassification ClassifyLine( TokenSequence &, Provenance newlineProvenance) const; - void SourceFormChange(std::string &&); + bool SourceFormChange(std::string &&); bool CompilerDirectiveContinuation(TokenSequence &, const char *sentinel); bool SourceLineContinuation(TokenSequence &); diff --git a/flang/lib/Parser/program-parsers.cpp b/flang/lib/Parser/program-parsers.cpp index 5f4e62ffdbbf2..92c0a64b39a9d 100644 --- a/flang/lib/Parser/program-parsers.cpp +++ b/flang/lib/Parser/program-parsers.cpp @@ -67,8 +67,11 @@ static constexpr auto programUnit{ lookAhead(maybe(label) >> validFunctionStmt) >> construct(indirect(functionSubprogram)) || construct(indirect(Parser{}))}; -static constexpr auto normalProgramUnit{StartNewSubprogram{} >> programUnit / - skipMany(";"_tok) / space / recovery(endOfLine, SkipPast<'\n'>{})}; + +static constexpr auto normalProgramUnit{ + !consumedAllInput >> StartNewSubprogram{} >> programUnit / + skipMany(";"_tok) / space / recovery(endOfLine, skipToNextLineIfAny)}; + static constexpr auto globalCompilerDirective{ construct(indirect(compilerDirective))}; @@ -86,7 +89,7 @@ static constexpr auto globalOpenACCCompilerDirective{ TYPE_PARSER( construct(extension( "nonstandard usage: empty source file"_port_en_US, - skipStuffBeforeStatement >> !nextCh >> + skipStuffBeforeStatement >> consumedAllInput >> pure>()) || some(globalCompilerDirective || globalOpenACCCompilerDirective || normalProgramUnit) / @@ -107,7 +110,7 @@ constexpr auto actionStmtLookAhead{first(actionStmt >> ok, // first in the execution part "ALLOCATE ("_tok, "CALL" >> name >> "("_tok, "GO TO"_tok, "OPEN ("_tok, "PRINT"_tok / space / !"("_tok, "READ ("_tok, "WRITE ("_tok)}; -constexpr auto execPartLookAhead{first(actionStmtLookAhead >> ok, +constexpr auto execPartLookAhead{first(actionStmtLookAhead, openaccConstruct >> ok, openmpExecDirective >> ok, "ASSOCIATE ("_tok, "BLOCK"_tok, "SELECT"_tok, "CHANGE TEAM"_sptok, "CRITICAL"_tok, "DO"_tok, "IF ("_tok, "WHERE ("_tok, "FORALL ("_tok, "!$CUF"_tok)}; diff --git a/flang/lib/Parser/stmt-parser.h b/flang/lib/Parser/stmt-parser.h index ee45c6fd5d38c..b2bb8dd843642 100644 --- a/flang/lib/Parser/stmt-parser.h +++ b/flang/lib/Parser/stmt-parser.h @@ -27,21 +27,22 @@ template inline constexpr auto unterminatedStatement(const PA &p) { return skipStuffBeforeStatement >> sourced(construct>( - maybe(label), space >> p)); + maybe(label / space), p)); } constexpr auto atEndOfStmt{space >> withMessage("expected end of statement"_err_en_US, lookAhead(";\n"_ch))}; constexpr auto checkEndOfKnownStmt{recovery(atEndOfStmt, SkipTo<'\n'>{})}; -constexpr auto endOfLine{ - "\n"_ch >> ok || fail("expected end of line"_err_en_US)}; +constexpr auto endOfLine{consumedAllInput || + withMessage("expected end of line"_err_en_US, "\n"_ch >> ok)}; constexpr auto semicolons{";"_ch >> skipMany(";"_tok) / space / maybe("\n"_ch)}; constexpr auto endOfStmt{ space >> withMessage("expected end of statement"_err_en_US, semicolons || endOfLine)}; -constexpr auto forceEndOfStmt{recovery(endOfStmt, SkipPast<'\n'>{})}; +constexpr auto skipToNextLineIfAny{consumedAllInput || SkipPast<'\n'>{}}; +constexpr auto forceEndOfStmt{recovery(endOfStmt, skipToNextLineIfAny)}; template inline constexpr auto statement(const PA &p) { return unterminatedStatement(p) / endOfStmt; @@ -70,17 +71,17 @@ constexpr auto ignoredStatementPrefix{ // Error recovery within a statement() call: skip *to* the end of the line, // unless at an END or CONTAINS statement. constexpr auto inStmtErrorRecovery{!"END"_tok >> !"CONTAINS"_tok >> - SkipTo<'\n'>{} >> construct()}; + (consumedAllInput || SkipTo<'\n'>{}) >> construct()}; // Error recovery within statement sequences: skip *past* the end of the line, // but not over an END or CONTAINS statement. constexpr auto skipStmtErrorRecovery{!"END"_tok >> !"CONTAINS"_tok >> - SkipPast<'\n'>{} >> construct()}; + (consumedAllInput || SkipPast<'\n'>{}) >> construct()}; // Error recovery across statements: skip the line, unless it looks // like it might end the containing construct. constexpr auto stmtErrorRecoveryStart{ignoredStatementPrefix}; -constexpr auto skipBadLine{SkipPast<'\n'>{} >> construct()}; +constexpr auto skipBadLine{skipToNextLineIfAny >> construct()}; constexpr auto executionPartErrorRecovery{stmtErrorRecoveryStart >> !"END"_tok >> !"CONTAINS"_tok >> !"ELSE"_tok >> !"CASE"_tok >> !"TYPE IS"_tok >> !"CLASS"_tok >> !"RANK"_tok >> @@ -93,7 +94,7 @@ constexpr auto noNameEnd{"END" >> missingOptionalName}; // For unrecognizable construct END statements. Be sure to not consume // a program unit's END statement. -constexpr auto progUnitEndStmt{ +constexpr auto progUnitEndStmt{consumedAllInput || "END" >> (lookAhead("\n"_ch) || "SUBROUTINE"_tok || "FUNCTION"_tok || "PROCEDURE"_tok || "MODULE"_tok || "SUBMODULE"_tok || "PROGRAM"_tok || "BLOCK DATA"_tok)}; @@ -103,9 +104,8 @@ constexpr auto namedConstructEndStmtErrorRecovery{ constructEndStmtErrorRecovery >> missingOptionalName}; constexpr auto progUnitEndStmtErrorRecovery{ - (many(!"END"_tok >> SkipPast<'\n'>{}) >> - ("END"_tok >> SkipTo<'\n'>{} || consumedAllInput)) >> - missingOptionalName}; + many(!"END"_tok >> SkipPast<'\n'>{}) >> + maybe("END"_tok >> SkipTo<'\n'>{}) >> missingOptionalName}; constexpr auto beginDirective{skipStuffBeforeStatement >> "!"_ch}; constexpr auto endDirective{space >> endOfLine}; diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp index cf7915886be09..0511f5bdf7478 100644 --- a/flang/lib/Parser/unparse.cpp +++ b/flang/lib/Parser/unparse.cpp @@ -2089,6 +2089,11 @@ class UnparseVisitor { // OpenMP Clauses & Directives void Unparse(const OmpArgumentList &x) { Walk(x.v, ", "); } + void Unparse(const OmpBaseVariantNames &x) { + Walk(std::get<0>(x.t)); // OmpObject + Put(":"); + Walk(std::get<1>(x.t)); // OmpObject + } void Unparse(const OmpTypeNameList &x) { // Walk(x.v, ","); } @@ -2106,7 +2111,7 @@ class UnparseVisitor { Walk(std::get(x.t)); Put(":"); Walk(std::get(x.t)); - Walk(":", std::get>(x.t)); + Walk(": ", std::get>(x.t)); } void Unparse(const llvm::omp::Directive &x) { unsigned ompVersion{langOpts_.OpenMPVersion}; @@ -2340,6 +2345,13 @@ class UnparseVisitor { } } } + void Unparse(const OmpLoopRangeClause &x) { + Word("LOOPRANGE("); + Walk(std::get<0>(x.t)); + Put(", "); + Walk(std::get<1>(x.t)); + Put(")"); + } void Unparse(const OmpReductionClause &x) { using Modifier = OmpReductionClause::Modifier; Walk(std::get>>(x.t), ": "); @@ -2487,9 +2499,6 @@ class UnparseVisitor { void Unparse(const OpenMPCriticalConstruct &x) { Unparse(static_cast(x)); } - void Unparse(const OmpDeclareTargetWithList &x) { - Put("("), Walk(x.v), Put(")"); - } void Unparse(const OmpInitializerProc &x) { Walk(std::get(x.t)); Put("("); @@ -2500,18 +2509,24 @@ class UnparseVisitor { // Don't let the visitor go to the normal AssignmentStmt Unparse function, // it adds an extra newline that we don't want. if (const auto *assignment{std::get_if(&x.u)}) { - Walk(assignment->t, "="); + Walk(assignment->t, " = "); + } else { + Walk(x.u); + } + } + void Unparse(const OmpReductionCombiner &x) { + // Don't let the visitor go to the normal AssignmentStmt Unparse function, + // it adds an extra newline that we don't want. + if (const auto *assignment{std::get_if(&x.u)}) { + Walk(assignment->t, " = "); } else { Walk(x.u); } } void Unparse(const OpenMPDeclareReductionConstruct &x) { BeginOpenMP(); - Word("!$OMP DECLARE REDUCTION "); - Put("("); - Walk(std::get>(x.t)); - Put(")"); - Walk(std::get>(x.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } @@ -2528,12 +2543,8 @@ class UnparseVisitor { } void Unparse(const OmpDeclareVariantDirective &x) { BeginOpenMP(); - Word("!$OMP DECLARE VARIANT "); - Put("("); - Walk(std::get>(x.t), ":"); - Walk(std::get(x.t)); - Put(")"); - Walk(std::get(x.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } @@ -2554,41 +2565,29 @@ class UnparseVisitor { void Unparse(const OpenMPDeclarativeAssumes &x) { BeginOpenMP(); - Word("!$OMP ASSUMES "); - Walk(std::get(x.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } - void Unparse(const OpenMPDeclareMapperConstruct &z) { + void Unparse(const OpenMPDeclareMapperConstruct &x) { BeginOpenMP(); - Word("!$OMP DECLARE MAPPER ("); - const auto &spec{std::get(z.t)}; - const auto &mapperName{std::get(spec.t)}; - if (mapperName.find(llvm::omp::OmpDefaultMapperName) == std::string::npos) { - Walk(mapperName); - Put(":"); - } - Walk(std::get(spec.t)); - Put("::"); - Walk(std::get(spec.t)); - Put(")"); - - Walk(std::get(z.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } - void Unparse(const OpenMPDeclareSimdConstruct &y) { + void Unparse(const OpenMPDeclareSimdConstruct &x) { BeginOpenMP(); - Word("!$OMP DECLARE SIMD "); - Walk("(", std::get>(y.t), ")"); - Walk(std::get(y.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } void Unparse(const OpenMPDeclareTargetConstruct &x) { BeginOpenMP(); - Word("!$OMP DECLARE TARGET "); - Walk(std::get(x.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } @@ -2602,10 +2601,10 @@ class UnparseVisitor { Put("\n"); EndOpenMP(); } - void Unparse(const OpenMPRequiresConstruct &y) { + void Unparse(const OpenMPRequiresConstruct &x) { BeginOpenMP(); - Word("!$OMP REQUIRES "); - Walk(std::get(y.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } diff --git a/flang/lib/Semantics/assignment.cpp b/flang/lib/Semantics/assignment.cpp index 88e08887160d9..f4aa496e485e1 100644 --- a/flang/lib/Semantics/assignment.cpp +++ b/flang/lib/Semantics/assignment.cpp @@ -41,7 +41,6 @@ class AssignmentContext { void PopWhereContext(); void Analyze(const parser::AssignmentStmt &); void Analyze(const parser::PointerAssignmentStmt &); - void Analyze(const parser::ConcurrentControl &); SemanticsContext &context() { return context_; } private: @@ -76,6 +75,11 @@ void AssignmentContext::Analyze(const parser::AssignmentStmt &stmt) { whole{evaluate::UnwrapWholeSymbolOrComponentDataRef(lhs)}) { if (IsAllocatable(whole->GetUltimate())) { flags.set(DefinabilityFlag::PotentialDeallocation); + if (IsPolymorphic(*whole) && whereDepth_ > 0) { + Say(lhsLoc, + "Assignment to whole polymorphic allocatable '%s' may not be nested in a WHERE statement or construct"_err_en_US, + whole->name()); + } } } if (auto whyNot{WhyNotDefinable(lhsLoc, scope, flags, lhs)}) { diff --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp index 797fd067b8185..4939d8d64a999 100644 --- a/flang/lib/Semantics/check-call.cpp +++ b/flang/lib/Semantics/check-call.cpp @@ -1538,6 +1538,10 @@ static bool CheckElementalConformance(parser::ContextualMessages &messages, evaluate::SayWithDeclaration(messages, *wholeSymbol, "Whole assumed-size array '%s' may not be used as an argument to an elemental procedure"_err_en_US, wholeSymbol->name()); + } else if (IsAssumedRank(*wholeSymbol)) { + evaluate::SayWithDeclaration(messages, *wholeSymbol, + "Assumed-rank array '%s' may not be used as an argument to an elemental procedure"_err_en_US, + wholeSymbol->name()); } } if (auto argShape{evaluate::GetShape(context, *expr)}) { diff --git a/flang/lib/Semantics/check-cuda.cpp b/flang/lib/Semantics/check-cuda.cpp index 9b48432e049b9..3d2db6a9c8aa9 100644 --- a/flang/lib/Semantics/check-cuda.cpp +++ b/flang/lib/Semantics/check-cuda.cpp @@ -774,4 +774,33 @@ void CUDAChecker::Enter(const parser::AssignmentStmt &x) { } } +void CUDAChecker::Enter(const parser::PrintStmt &x) { + CHECK(context_.location()); + const Scope &scope{context_.FindScope(*context_.location())}; + const Scope &progUnit{GetProgramUnitContaining(scope)}; + if (IsCUDADeviceContext(&progUnit) || deviceConstructDepth_ > 0) { + return; + } + + auto &outputItemList{std::get>(x.t)}; + for (const auto &item : outputItemList) { + if (const auto *x{std::get_if(&item.u)}) { + if (const auto *expr{GetExpr(context_, *x)}) { + for (const Symbol &sym : CollectCudaSymbols(*expr)) { + if (const auto *details = sym.GetUltimate() + .detailsIf()) { + if (details->cudaDataAttr() && + (*details->cudaDataAttr() == common::CUDADataAttr::Device || + *details->cudaDataAttr() == + common::CUDADataAttr::Constant)) { + context_.Say(parser::FindSourceLocation(*x), + "device data not allowed in I/O statements"_err_en_US); + } + } + } + } + } + } +} + } // namespace Fortran::semantics diff --git a/flang/lib/Semantics/check-cuda.h b/flang/lib/Semantics/check-cuda.h index 10000253ffe5a..ef5e57ab41b81 100644 --- a/flang/lib/Semantics/check-cuda.h +++ b/flang/lib/Semantics/check-cuda.h @@ -49,6 +49,7 @@ class CUDAChecker : public virtual BaseChecker { void Leave(const parser::OpenACCLoopConstruct &); void Enter(const parser::DoConstruct &); void Leave(const parser::DoConstruct &); + void Enter(const parser::PrintStmt &); private: SemanticsContext &context_; diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp index 1049a6d2c1b2e..7b881008219df 100644 --- a/flang/lib/Semantics/check-declarations.cpp +++ b/flang/lib/Semantics/check-declarations.cpp @@ -1189,7 +1189,8 @@ void CheckHelper::CheckObjectEntity( } } else if (!subpDetails && symbol.owner().kind() != Scope::Kind::Module && symbol.owner().kind() != Scope::Kind::MainProgram && - symbol.owner().kind() != Scope::Kind::BlockConstruct) { + symbol.owner().kind() != Scope::Kind::BlockConstruct && + symbol.owner().kind() != Scope::Kind::OpenACCConstruct) { messages_.Say( "ATTRIBUTES(%s) may apply only to module, host subprogram, block, or device subprogram data"_err_en_US, parser::ToUpperCaseLetters(common::EnumToString(attr))); diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp index ae14afdd9962d..1f059f747bad0 100644 --- a/flang/lib/Semantics/check-omp-structure.cpp +++ b/flang/lib/Semantics/check-omp-structure.cpp @@ -351,6 +351,17 @@ bool OmpStructureChecker::IsCloselyNestedRegion(const OmpDirectiveSet &set) { return false; } +bool OmpStructureChecker::IsNestedInDirective(llvm::omp::Directive directive) { + if (dirContext_.size() >= 1) { + for (size_t i = dirContext_.size() - 1; i > 0; --i) { + if (dirContext_[i - 1].directive == directive) { + return true; + } + } + } + return false; +} + void OmpStructureChecker::CheckVariableListItem( const SymbolSourceMap &symbols) { for (auto &[symbol, source] : symbols) { @@ -620,43 +631,10 @@ template struct DirectiveSpellingVisitor { checker_(GetDirName(x.t).source, Directive::OMPD_allocators); return false; } - bool Pre(const parser::OpenMPDeclarativeAssumes &x) { - checker_(std::get(x.t).source, Directive::OMPD_assumes); - return false; - } - bool Pre(const parser::OpenMPDeclareMapperConstruct &x) { - checker_( - std::get(x.t).source, Directive::OMPD_declare_mapper); - return false; - } - bool Pre(const parser::OpenMPDeclareReductionConstruct &x) { - checker_(std::get(x.t).source, - Directive::OMPD_declare_reduction); - return false; - } - bool Pre(const parser::OpenMPDeclareSimdConstruct &x) { - checker_( - std::get(x.t).source, Directive::OMPD_declare_simd); - return false; - } - bool Pre(const parser::OpenMPDeclareTargetConstruct &x) { - checker_( - std::get(x.t).source, Directive::OMPD_declare_target); - return false; - } - bool Pre(const parser::OmpDeclareVariantDirective &x) { - checker_(std::get(x.t).source, - Directive::OMPD_declare_variant); - return false; - } bool Pre(const parser::OpenMPGroupprivate &x) { checker_(x.v.DirName().source, Directive::OMPD_groupprivate); return false; } - bool Pre(const parser::OpenMPRequiresConstruct &x) { - checker_(std::get(x.t).source, Directive::OMPD_requires); - return false; - } bool Pre(const parser::OmpBeginDirective &x) { checker_(x.DirName().source, x.DirId()); return false; @@ -1371,8 +1349,42 @@ void OmpStructureChecker::Leave(const parser::OpenMPThreadprivate &x) { } void OmpStructureChecker::Enter(const parser::OpenMPDeclareSimdConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContextAndClauseSets(dir.source, llvm::omp::Directive::OMPD_declare_simd); + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContextAndClauseSets(dirName.source, dirName.v); + + const parser::OmpArgumentList &args{x.v.Arguments()}; + if (args.v.empty()) { + return; + } else if (args.v.size() > 1) { + context_.Say(args.source, + "DECLARE_SIMD directive should have at most one argument"_err_en_US); + return; + } + + auto isValidSymbol{[](const Symbol *sym) { + if (IsProcedure(*sym) || IsFunction(*sym)) { + return true; + } + if (const Symbol *owner{GetScopingUnit(sym->owner()).symbol()}) { + return IsProcedure(*owner) || IsFunction(*owner); + } + return false; + }}; + + const parser::OmpArgument &arg{args.v.front()}; + if (auto *sym{GetArgumentSymbol(arg)}) { + if (!isValidSymbol(sym)) { + auto &msg{context_.Say(arg.source, + "The name '%s' should refer to a procedure"_err_en_US, sym->name())}; + if (sym->test(Symbol::Flag::Implicit)) { + msg.Attach(arg.source, + "The name '%s' has been implicitly declared"_en_US, sym->name()); + } + } + } else { + context_.Say(arg.source, + "The argument to the DECLARE_SIMD directive should be a procedure name"_err_en_US); + } } void OmpStructureChecker::Leave(const parser::OpenMPDeclareSimdConstruct &) { @@ -1380,9 +1392,50 @@ void OmpStructureChecker::Leave(const parser::OpenMPDeclareSimdConstruct &) { } void OmpStructureChecker::Enter(const parser::OmpDeclareVariantDirective &x) { - const auto &dir{std::get(x.t)}; - PushContextAndClauseSets( - dir.source, llvm::omp::Directive::OMPD_declare_variant); + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContextAndClauseSets(dirName.source, dirName.v); + + const parser::OmpArgumentList &args{x.v.Arguments()}; + if (args.v.size() != 1) { + context_.Say(args.source, + "DECLARE_VARIANT directive should have a single argument"_err_en_US); + return; + } + + auto InvalidArgument{[&](parser::CharBlock source) { + context_.Say(source, + "The argument to the DECLARE_VARIANT directive should be [base-name:]variant-name"_err_en_US); + }}; + + auto CheckSymbol{[&](const Symbol *sym, parser::CharBlock source) { + if (sym) { + if (!IsProcedure(*sym) && !IsFunction(*sym)) { + auto &msg{context_.Say(source, + "The name '%s' should refer to a procedure"_err_en_US, + sym->name())}; + if (sym->test(Symbol::Flag::Implicit)) { + msg.Attach(source, "The name '%s' has been implicitly declared"_en_US, + sym->name()); + } + } + } else { + InvalidArgument(source); + } + }}; + + const parser::OmpArgument &arg{args.v.front()}; + common::visit( // + common::visitors{ + [&](const parser::OmpBaseVariantNames &y) { + CheckSymbol(GetObjectSymbol(std::get<0>(y.t)), arg.source); + CheckSymbol(GetObjectSymbol(std::get<1>(y.t)), arg.source); + }, + [&](const parser::OmpLocator &y) { + CheckSymbol(GetArgumentSymbol(arg), arg.source); + }, + [&](auto &&y) { InvalidArgument(arg.source); }, + }, + arg.u); } void OmpStructureChecker::Leave(const parser::OmpDeclareVariantDirective &) { @@ -1462,14 +1515,13 @@ void OmpStructureChecker::Leave(const parser::OpenMPDepobjConstruct &x) { } void OmpStructureChecker::Enter(const parser::OpenMPRequiresConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContextAndClauseSets(dir.source, llvm::omp::Directive::OMPD_requires); + const auto &dirName{x.v.DirName()}; + PushContextAndClauseSets(dirName.source, dirName.v); if (visitedAtomicSource_.empty()) { return; } - const auto &clauseList{std::get(x.t)}; - for (const parser::OmpClause &clause : clauseList.v) { + for (const parser::OmpClause &clause : x.v.Clauses().v) { llvm::omp::Clause id{clause.Id()}; if (id == llvm::omp::Clause::OMPC_atomic_default_mem_order) { parser::MessageFormattedText txt( @@ -1570,46 +1622,26 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Allocate &x) { } } -void OmpStructureChecker::Enter(const parser::OmpDeclareTargetWithClause &x) { - SetClauseSets(llvm::omp::Directive::OMPD_declare_target); -} +void OmpStructureChecker::Enter(const parser::OpenMPDeclareMapperConstruct &x) { + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContextAndClauseSets(dirName.source, dirName.v); -void OmpStructureChecker::Leave(const parser::OmpDeclareTargetWithClause &x) { - if (x.v.v.size() > 0) { - const parser::OmpClause *enterClause = - FindClause(llvm::omp::Clause::OMPC_enter); - const parser::OmpClause *toClause = FindClause(llvm::omp::Clause::OMPC_to); - const parser::OmpClause *linkClause = - FindClause(llvm::omp::Clause::OMPC_link); - const parser::OmpClause *indirectClause = - FindClause(llvm::omp::Clause::OMPC_indirect); - if (!enterClause && !toClause && !linkClause) { - context_.Say(x.source, - "If the DECLARE TARGET directive has a clause, it must contain at least one ENTER clause or LINK clause"_err_en_US); - } - if (indirectClause && !enterClause) { - context_.Say(x.source, - "The INDIRECT clause cannot be used without the ENTER clause with the DECLARE TARGET directive."_err_en_US); - } - unsigned version{context_.langOptions().OpenMPVersion}; - if (toClause && version >= 52) { - context_.Warn(common::UsageWarning::OpenMPUsage, toClause->source, - "The usage of TO clause on DECLARE TARGET directive has been deprecated. Use ENTER clause instead."_warn_en_US); - } - if (indirectClause) { - CheckAllowedClause(llvm::omp::Clause::OMPC_indirect); - } + const parser::OmpArgumentList &args{x.v.Arguments()}; + if (args.v.size() != 1) { + context_.Say(args.source, + "DECLARE_MAPPER directive should have a single argument"_err_en_US); + return; } -} -void OmpStructureChecker::Enter(const parser::OpenMPDeclareMapperConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContextAndClauseSets( - dir.source, llvm::omp::Directive::OMPD_declare_mapper); - const auto &spec{std::get(x.t)}; - const auto &type = std::get(spec.t); - if (!std::get_if(&type.u)) { - context_.Say(dir.source, "Type is not a derived type"_err_en_US); + const parser::OmpArgument &arg{args.v.front()}; + if (auto *spec{std::get_if(&arg.u)}) { + const auto &type = std::get(spec->t); + if (!std::get_if(&type.u)) { + context_.Say(arg.source, "Type is not a derived type"_err_en_US); + } + } else { + context_.Say(arg.source, + "The argument to the DECLARE_MAPPER directive should be a mapper-specifier"_err_en_US); } } @@ -1619,9 +1651,21 @@ void OmpStructureChecker::Leave(const parser::OpenMPDeclareMapperConstruct &) { void OmpStructureChecker::Enter( const parser::OpenMPDeclareReductionConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContextAndClauseSets( - dir.source, llvm::omp::Directive::OMPD_declare_reduction); + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContextAndClauseSets(dirName.source, dirName.v); + + const parser::OmpArgumentList &args{x.v.Arguments()}; + if (args.v.size() != 1) { + context_.Say(args.source, + "DECLARE_REDUCTION directive should have a single argument"_err_en_US); + return; + } + + const parser::OmpArgument &arg{args.v.front()}; + if (!std::holds_alternative(arg.u)) { + context_.Say(arg.source, + "The argument to the DECLARE_REDUCTION directive should be a reduction-specifier"_err_en_US); + } } void OmpStructureChecker::Leave( @@ -1629,42 +1673,6 @@ void OmpStructureChecker::Leave( dirContext_.pop_back(); } -void OmpStructureChecker::Enter(const parser::OpenMPDeclareTargetConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContext(dir.source, llvm::omp::Directive::OMPD_declare_target); -} - -void OmpStructureChecker::Enter(const parser::OmpDeclareTargetWithList &x) { - SymbolSourceMap symbols; - GetSymbolsInObjectList(x.v, symbols); - for (auto &[symbol, source] : symbols) { - const GenericDetails *genericDetails = symbol->detailsIf(); - if (genericDetails) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be a generic name."_err_en_US, - symbol->name()); - genericDetails->specific(); - } - if (IsProcedurePointer(*symbol)) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be a procedure pointer."_err_en_US, - symbol->name()); - } - const SubprogramDetails *entryDetails = - symbol->detailsIf(); - if (entryDetails && entryDetails->entryScope()) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be an entry name."_err_en_US, - symbol->name()); - } - if (IsStmtFunction(*symbol)) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be a statement function."_err_en_US, - symbol->name()); - } - } -} - void OmpStructureChecker::CheckSymbolName( const parser::CharBlock &source, const parser::OmpObject &object) { common::visit( @@ -1697,62 +1705,138 @@ void OmpStructureChecker::CheckSymbolNames( } } +void OmpStructureChecker::Enter(const parser::OpenMPDeclareTargetConstruct &x) { + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContext(dirName.source, dirName.v); + + // Check if arguments are extended-list-items. + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + const Symbol *symbol{GetArgumentSymbol(arg)}; + if (!symbol) { + context_.Say(arg.source, + "An argument to the DECLARE TARGET directive should be an extended-list-item"_err_en_US); + continue; + } + const GenericDetails *genericDetails = symbol->detailsIf(); + if (genericDetails) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be a generic name."_err_en_US, + symbol->name()); + genericDetails->specific(); + } + if (IsProcedurePointer(*symbol)) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be a procedure pointer."_err_en_US, + symbol->name()); + } + const SubprogramDetails *entryDetails = + symbol->detailsIf(); + if (entryDetails && entryDetails->entryScope()) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be an entry name."_err_en_US, + symbol->name()); + } + if (IsStmtFunction(*symbol)) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be a statement function."_err_en_US, + symbol->name()); + } + } + + // Check if there are arguments or clauses, but not both. + if (!x.v.Clauses().v.empty()) { + if (!x.v.Arguments().v.empty()) { + context_.Say(x.source, + "DECLARE TARGET directive can have argument or clauses, but not both"_err_en_US); + } + SetClauseSets(llvm::omp::Directive::OMPD_declare_target); + } +} + void OmpStructureChecker::Leave(const parser::OpenMPDeclareTargetConstruct &x) { - const auto &dir{std::get(x.t)}; - const auto &spec{std::get(x.t)}; + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + // Handle both forms of DECLARE TARGET. // - Extended list: It behaves as if there was an ENTER/TO clause with the // list of objects as argument. It accepts no explicit clauses. // - With clauses. - if (const auto *objectList{parser::Unwrap(spec.u)}) { - deviceConstructFound_ = true; - CheckSymbolNames(dir.source, *objectList); - CheckVarIsNotPartOfAnotherVar(dir.source, *objectList); - CheckThreadprivateOrDeclareTargetVar(*objectList); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - bool toClauseFound{false}, deviceTypeClauseFound{false}, - enterClauseFound{false}; - for (const auto &clause : clauseList->v) { - common::visit( - common::visitors{ - [&](const parser::OmpClause::To &toClause) { - toClauseFound = true; - auto &objList{std::get(toClause.v.t)}; - CheckSymbolNames(dir.source, objList); - CheckVarIsNotPartOfAnotherVar(dir.source, objList); - CheckThreadprivateOrDeclareTargetVar(objList); - }, - [&](const parser::OmpClause::Link &linkClause) { - CheckSymbolNames(dir.source, linkClause.v); - CheckVarIsNotPartOfAnotherVar(dir.source, linkClause.v); - CheckThreadprivateOrDeclareTargetVar(linkClause.v); - }, - [&](const parser::OmpClause::Enter &enterClause) { - enterClauseFound = true; - auto &objList{std::get(enterClause.v.t)}; - CheckSymbolNames(dir.source, objList); - CheckVarIsNotPartOfAnotherVar(dir.source, objList); - CheckThreadprivateOrDeclareTargetVar(objList); - }, - [&](const parser::OmpClause::DeviceType &deviceTypeClause) { - deviceTypeClauseFound = true; - if (deviceTypeClause.v.v != - parser::OmpDeviceTypeClause::DeviceTypeDescription::Host) { - // Function / subroutine explicitly marked as runnable by the - // target device. - deviceConstructFound_ = true; - } - }, - [&](const auto &) {}, - }, - clause.u); + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{GetArgumentObject(arg)}) { + deviceConstructFound_ = true; + CheckSymbolName(dirName.source, *object); + CheckVarIsNotPartOfAnotherVar(dirName.source, *object); + CheckThreadprivateOrDeclareTargetVar(*object); + } + } - if ((toClauseFound || enterClauseFound) && !deviceTypeClauseFound) { - deviceConstructFound_ = true; - } + if (!x.v.Clauses().v.empty()) { + const parser::OmpClause *enterClause = + FindClause(llvm::omp::Clause::OMPC_enter); + const parser::OmpClause *toClause = FindClause(llvm::omp::Clause::OMPC_to); + const parser::OmpClause *linkClause = + FindClause(llvm::omp::Clause::OMPC_link); + const parser::OmpClause *indirectClause = + FindClause(llvm::omp::Clause::OMPC_indirect); + if (!enterClause && !toClause && !linkClause) { + context_.Say(x.source, + "If the DECLARE TARGET directive has a clause, it must contain at least one ENTER clause or LINK clause"_err_en_US); + } + if (indirectClause && !enterClause) { + context_.Say(x.source, + "The INDIRECT clause cannot be used without the ENTER clause with the DECLARE TARGET directive."_err_en_US); + } + unsigned version{context_.langOptions().OpenMPVersion}; + if (toClause && version >= 52) { + context_.Warn(common::UsageWarning::OpenMPUsage, toClause->source, + "The usage of TO clause on DECLARE TARGET directive has been deprecated. Use ENTER clause instead."_warn_en_US); + } + if (indirectClause) { + CheckAllowedClause(llvm::omp::Clause::OMPC_indirect); } } + + bool toClauseFound{false}, deviceTypeClauseFound{false}, + enterClauseFound{false}; + for (const parser::OmpClause &clause : x.v.Clauses().v) { + common::visit( + common::visitors{ + [&](const parser::OmpClause::To &toClause) { + toClauseFound = true; + auto &objList{std::get(toClause.v.t)}; + CheckSymbolNames(dirName.source, objList); + CheckVarIsNotPartOfAnotherVar(dirName.source, objList); + CheckThreadprivateOrDeclareTargetVar(objList); + }, + [&](const parser::OmpClause::Link &linkClause) { + CheckSymbolNames(dirName.source, linkClause.v); + CheckVarIsNotPartOfAnotherVar(dirName.source, linkClause.v); + CheckThreadprivateOrDeclareTargetVar(linkClause.v); + }, + [&](const parser::OmpClause::Enter &enterClause) { + enterClauseFound = true; + auto &objList{std::get(enterClause.v.t)}; + CheckSymbolNames(dirName.source, objList); + CheckVarIsNotPartOfAnotherVar(dirName.source, objList); + CheckThreadprivateOrDeclareTargetVar(objList); + }, + [&](const parser::OmpClause::DeviceType &deviceTypeClause) { + deviceTypeClauseFound = true; + if (deviceTypeClause.v.v != + parser::OmpDeviceTypeClause::DeviceTypeDescription::Host) { + // Function / subroutine explicitly marked as runnable by the + // target device. + deviceConstructFound_ = true; + } + }, + [&](const auto &) {}, + }, + clause.u); + + if ((toClauseFound || enterClauseFound) && !deviceTypeClauseFound) { + deviceConstructFound_ = true; + } + } + dirContext_.pop_back(); } @@ -1817,12 +1901,89 @@ void OmpStructureChecker::Enter(const parser::OmpClause::At &x) { } } +// Goes through the names in an OmpObjectList and checks if each name appears +// in the given allocate statement +void OmpStructureChecker::CheckAllNamesInAllocateStmt( + const parser::CharBlock &source, const parser::OmpObjectList &ompObjectList, + const parser::AllocateStmt &allocate) { + for (const auto &obj : ompObjectList.v) { + if (const auto *d{std::get_if(&obj.u)}) { + if (const auto *ref{std::get_if(&d->u)}) { + if (const auto *n{std::get_if(&ref->u)}) { + CheckNameInAllocateStmt(source, *n, allocate); + } + } + } + } +} + +void OmpStructureChecker::CheckNameInAllocateStmt( + const parser::CharBlock &source, const parser::Name &name, + const parser::AllocateStmt &allocate) { + for (const auto &allocation : + std::get>(allocate.t)) { + const auto &allocObj = std::get(allocation.t); + if (const auto *n{std::get_if(&allocObj.u)}) { + if (n->source == name.source) { + return; + } + } + } + unsigned version{context_.langOptions().OpenMPVersion}; + context_.Say(source, + "Object '%s' in %s directive not " + "found in corresponding ALLOCATE statement"_err_en_US, + name.ToString(), + parser::ToUpperCaseLetters( + llvm::omp::getOpenMPDirectiveName(GetContext().directive, version) + .str())); +} + void OmpStructureChecker::Enter(const parser::OpenMPExecutableAllocate &x) { - isPredefinedAllocator = true; const auto &dir{std::get(x.t)}; - const auto &objectList{std::get>(x.t)}; PushContextAndClauseSets(dir.source, llvm::omp::Directive::OMPD_allocate); + + unsigned version{context_.langOptions().OpenMPVersion}; + if (version >= 52) { + context_.Warn(common::UsageWarning::OpenMPUsage, x.source, + "The executable form of the OpenMP ALLOCATE directive has been deprecated, please use ALLOCATORS instead"_warn_en_US); + } + + bool hasAllocator = false; + // TODO: Investigate whether searching the clause list can be done with + // parser::Unwrap instead of the following loop const auto &clauseList{std::get(x.t)}; + for (const auto &clause : clauseList.v) { + if (std::get_if(&clause.u)) { + hasAllocator = true; + } + } + + if (IsNestedInDirective(llvm::omp::Directive::OMPD_target) && !hasAllocator) { + // TODO: expand this check to exclude the case when a requires + // directive with the dynamic_allocators clause is present + // in the same compilation unit (OMP5.0 2.11.3). + context_.Say(x.source, + "ALLOCATE directives that appear in a TARGET region must specify an allocator clause"_err_en_US); + } + + const auto &allocateStmt = + std::get>(x.t).statement; + if (const auto &list{std::get>(x.t)}) { + CheckAllNamesInAllocateStmt( + std::get(x.t).source, *list, allocateStmt); + } + if (const auto &subDirs{ + std::get>>( + x.t)}) { + for (const auto &dalloc : *subDirs) { + CheckAllNamesInAllocateStmt(std::get(dalloc.t).source, + std::get(dalloc.t), allocateStmt); + } + } + + isPredefinedAllocator = true; + const auto &objectList{std::get>(x.t)}; for (const auto &clause : clauseList.v) { CheckAlignValue(clause); } @@ -1857,7 +2018,31 @@ void OmpStructureChecker::Enter(const parser::OpenMPAllocatorsConstruct &x) { const auto *allocate{ action ? parser::Unwrap(action.stmt) : nullptr}; - if (!allocate) { + if (allocate) { + for (const auto &clause : dirSpec.Clauses().v) { + if (auto *alloc{std::get_if(&clause.u)}) { + CheckAllNamesInAllocateStmt( + x.source, std::get(alloc->v.t), *allocate); + + using OmpAllocatorSimpleModifier = parser::OmpAllocatorSimpleModifier; + using OmpAllocatorComplexModifier = parser::OmpAllocatorComplexModifier; + + auto &modifiers{OmpGetModifiers(alloc->v)}; + bool hasAllocator{ + OmpGetUniqueModifier(modifiers) || + OmpGetUniqueModifier(modifiers)}; + + // TODO: As with allocate directive, exclude the case when a requires + // directive with the dynamic_allocators clause is present in + // the same compilation unit (OMP5.0 2.11.3). + if (IsNestedInDirective(llvm::omp::Directive::OMPD_target) && + !hasAllocator) { + context_.Say(x.source, + "ALLOCATORS directives that appear in a TARGET region must specify an allocator"_err_en_US); + } + } + } + } else { const parser::CharBlock &source = action ? action.source : x.source; context_.Say(source, "The body of the ALLOCATORS construct should be an ALLOCATE statement"_err_en_US); @@ -3043,6 +3228,12 @@ CHECK_REQ_CONSTANT_SCALAR_INT_CLAUSE(Collapse, OMPC_collapse) CHECK_REQ_CONSTANT_SCALAR_INT_CLAUSE(Safelen, OMPC_safelen) CHECK_REQ_CONSTANT_SCALAR_INT_CLAUSE(Simdlen, OMPC_simdlen) +void OmpStructureChecker::Enter(const parser::OmpClause::Looprange &x) { + context_.Say(GetContext().clauseSource, + "LOOPRANGE clause is not implemented yet"_err_en_US, + ContextDirectiveAsFortran()); +} + // Restrictions specific to each clause are implemented apart from the // generalized restrictions. diff --git a/flang/lib/Semantics/check-omp-structure.h b/flang/lib/Semantics/check-omp-structure.h index 176f6568814c5..f507278fba5f2 100644 --- a/flang/lib/Semantics/check-omp-structure.h +++ b/flang/lib/Semantics/check-omp-structure.h @@ -113,9 +113,6 @@ class OmpStructureChecker void Leave(const parser::OpenMPDeclareTargetConstruct &); void Enter(const parser::OpenMPDepobjConstruct &); void Leave(const parser::OpenMPDepobjConstruct &); - void Enter(const parser::OmpDeclareTargetWithList &); - void Enter(const parser::OmpDeclareTargetWithClause &); - void Leave(const parser::OmpDeclareTargetWithClause &); void Enter(const parser::OpenMPDispatchConstruct &); void Leave(const parser::OpenMPDispatchConstruct &); void Enter(const parser::OmpErrorDirective &); @@ -180,6 +177,7 @@ class OmpStructureChecker bool HasInvalidWorksharingNesting( const parser::CharBlock &, const OmpDirectiveSet &); bool IsCloselyNestedRegion(const OmpDirectiveSet &set); + bool IsNestedInDirective(llvm::omp::Directive directive); void HasInvalidTeamsNesting( const llvm::omp::Directive &dir, const parser::CharBlock &source); void HasInvalidDistributeNesting(const parser::OpenMPLoopConstruct &x); @@ -312,6 +310,11 @@ class OmpStructureChecker const std::optional &maybeClauses); void CheckCancellationNest( const parser::CharBlock &source, llvm::omp::Directive type); + void CheckAllNamesInAllocateStmt(const parser::CharBlock &source, + const parser::OmpObjectList &ompObjectList, + const parser::AllocateStmt &allocate); + void CheckNameInAllocateStmt(const parser::CharBlock &source, + const parser::Name &ompObject, const parser::AllocateStmt &allocate); std::int64_t GetOrdCollapseLevel(const parser::OpenMPLoopConstruct &x); void CheckReductionObjects( const parser::OmpObjectList &objects, llvm::omp::Clause clauseId); diff --git a/flang/lib/Semantics/data-to-inits.cpp b/flang/lib/Semantics/data-to-inits.cpp index 1c454385e6989..1e46dabe30c89 100644 --- a/flang/lib/Semantics/data-to-inits.cpp +++ b/flang/lib/Semantics/data-to-inits.cpp @@ -943,10 +943,19 @@ void ConstructInitializer(const Symbol &symbol, void ConvertToInitializers( DataInitializations &inits, evaluate::ExpressionAnalyzer &exprAnalyzer) { + // Process DATA-style component /initializers/ now, so that they appear as + // default values in time for EQUIVALENCE processing in ProcessScopes. + for (auto &[symbolPtr, initialization] : inits) { + if (symbolPtr->owner().IsDerivedType()) { + ConstructInitializer(*symbolPtr, initialization, exprAnalyzer); + } + } if (ProcessScopes( exprAnalyzer.context().globalScope(), exprAnalyzer, inits)) { for (auto &[symbolPtr, initialization] : inits) { - ConstructInitializer(*symbolPtr, initialization, exprAnalyzer); + if (!symbolPtr->owner().IsDerivedType()) { + ConstructInitializer(*symbolPtr, initialization, exprAnalyzer); + } } } } diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp index 3f048ab6f7a4d..836500145e4a2 100644 --- a/flang/lib/Semantics/expression.cpp +++ b/flang/lib/Semantics/expression.cpp @@ -3644,19 +3644,24 @@ std::optional ExpressionAnalyzer::CheckCall( Say(callSite, "Assumed-length character function must be defined with a length to be called"_err_en_US); } + if (!chars->IsPure()) { + if (const semantics::Scope *pure{semantics::FindPureProcedureContaining( + context_.FindScope(callSite))}) { + std::string name; + if (procSymbol) { + name = "'"s + procSymbol->name().ToString() + "'"; + } else if (const auto *intrinsic{proc.GetSpecificIntrinsic()}) { + name = "'"s + intrinsic->name + "'"; + } + Say(callSite, + "Procedure %s referenced in pure subprogram '%s' must be pure too"_err_en_US, + name, DEREF(pure->symbol()).name()); + } + } ok &= semantics::CheckArguments(*chars, arguments, context_, context_.FindScope(callSite), treatExternalAsImplicit, /*ignoreImplicitVsExplicit=*/false, specificIntrinsic); } - if (procSymbol && !IsPureProcedure(*procSymbol)) { - if (const semantics::Scope * - pure{semantics::FindPureProcedureContaining( - context_.FindScope(callSite))}) { - Say(callSite, - "Procedure '%s' referenced in pure subprogram '%s' must be pure too"_err_en_US, - procSymbol->name(), DEREF(pure->symbol()).name()); - } - } if (ok && !treatExternalAsImplicit && procSymbol && !(chars && chars->HasExplicitInterface())) { if (const Symbol *global{FindGlobal(*procSymbol)}; diff --git a/flang/lib/Semantics/openmp-utils.cpp b/flang/lib/Semantics/openmp-utils.cpp index 35b7718715071..a8ec4d6c24beb 100644 --- a/flang/lib/Semantics/openmp-utils.cpp +++ b/flang/lib/Semantics/openmp-utils.cpp @@ -41,6 +41,24 @@ namespace Fortran::semantics::omp { using namespace Fortran::parser::omp; +const Scope &GetScopingUnit(const Scope &scope) { + const Scope *iter{&scope}; + for (; !iter->IsTopLevel(); iter = &iter->parent()) { + switch (iter->kind()) { + case Scope::Kind::BlockConstruct: + case Scope::Kind::BlockData: + case Scope::Kind::DerivedType: + case Scope::Kind::MainProgram: + case Scope::Kind::Module: + case Scope::Kind::Subprogram: + return *iter; + default: + break; + } + } + return *iter; +} + SourcedActionStmt GetActionStmt(const parser::ExecutionPartConstruct *x) { if (x == nullptr) { return SourcedActionStmt{}; diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp index 1f8d9285c1c4b..624b89005c809 100644 --- a/flang/lib/Semantics/resolve-directives.cpp +++ b/flang/lib/Semantics/resolve-directives.cpp @@ -149,7 +149,24 @@ template class DirectiveAttributeVisitor { dataSharingAttributeObjects_.clear(); } bool HasDataSharingAttributeObject(const Symbol &); + + /// Extract the iv and bounds of a DO loop: + /// 1. The loop index/induction variable + /// 2. The lower bound + /// 3. The upper bound + /// 4. The step/increment (or nullptr if not present) + /// + /// Each returned tuple value can be nullptr if not present. Diagnoses an + /// error if the the DO loop is a DO WHILE or DO CONCURRENT loop. + std::tuple + GetLoopBounds(const parser::DoConstruct &); + + /// Extract the loop index/induction variable from a DO loop. Diagnoses an + /// error if the the DO loop is a DO WHILE or DO CONCURRENT loop and returns + /// nullptr. const parser::Name *GetLoopIndex(const parser::DoConstruct &); + const parser::DoConstruct *GetDoConstructIf( const parser::ExecutionPartConstruct &); Symbol *DeclareNewAccessEntity(const Symbol &, Symbol::Flag, Scope &); @@ -311,6 +328,11 @@ class AccAttributeVisitor : DirectiveAttributeVisitor { return false; } + bool Pre(const parser::AccClause::UseDevice &x) { + ResolveAccObjectList(x.v, Symbol::Flag::AccUseDevice); + return false; + } + void Post(const parser::Name &); private: @@ -473,9 +495,10 @@ class OmpAttributeVisitor : DirectiveAttributeVisitor { bool Pre(const parser::OpenMPDeclareSimdConstruct &x) { PushContext(x.source, llvm::omp::Directive::OMPD_declare_simd); - const auto &name{std::get>(x.t)}; - if (name) { - ResolveOmpName(*name, Symbol::Flag::OmpDeclareSimd); + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{omp::GetArgumentObject(arg)}) { + ResolveOmpObject(*object, Symbol::Flag::OmpDeclareSimd); + } } return true; } @@ -522,7 +545,7 @@ class OmpAttributeVisitor : DirectiveAttributeVisitor { // Gather information from the clauses. Flags flags; std::optional memOrder; - for (const auto &clause : std::get(x.t).v) { + for (const parser::OmpClause &clause : x.v.Clauses().v) { flags |= common::visit( common::visitors{ [&memOrder]( @@ -934,6 +957,13 @@ class OmpAttributeVisitor : DirectiveAttributeVisitor { privateDataSharingAttributeObjects_.clear(); } + /// Check that loops in the loop nest are perfectly nested, as well that lower + /// bound, upper bound, and step expressions do not use the iv + /// of a surrounding loop of the associated loops nest. + /// We do not support non-perfectly nested loops not non-rectangular loops yet + /// (both introduced in OpenMP 5.0) + void CheckPerfectNestAndRectangularLoop(const parser::OpenMPLoopConstruct &x); + // Predetermined DSA rules void PrivatizeAssociatedLoopIndexAndCheckLoopLevel( const parser::OpenMPLoopConstruct &); @@ -951,7 +981,6 @@ class OmpAttributeVisitor : DirectiveAttributeVisitor { void ResolveOmpNameList(const std::list &, Symbol::Flag); void ResolveOmpName(const parser::Name &, Symbol::Flag); Symbol *ResolveName(const parser::Name *); - Symbol *ResolveOmpObjectScope(const parser::Name *); Symbol *DeclareOrMarkOtherAccessEntity(const parser::Name &, Symbol::Flag); Symbol *DeclareOrMarkOtherAccessEntity(Symbol &, Symbol::Flag); void CheckMultipleAppearances( @@ -969,11 +998,6 @@ class OmpAttributeVisitor : DirectiveAttributeVisitor { sourceLabels_.clear(); targetLabels_.clear(); }; - void CheckAllNamesInAllocateStmt(const parser::CharBlock &source, - const parser::OmpObjectList &ompObjectList, - const parser::AllocateStmt &allocate); - void CheckNameInAllocateStmt(const parser::CharBlock &source, - const parser::Name &ompObject, const parser::AllocateStmt &allocate); std::int64_t ordCollapseLevel{0}; @@ -1010,14 +1034,15 @@ bool DirectiveAttributeVisitor::HasDataSharingAttributeObject( } template -const parser::Name *DirectiveAttributeVisitor::GetLoopIndex( - const parser::DoConstruct &x) { +std::tuple +DirectiveAttributeVisitor::GetLoopBounds(const parser::DoConstruct &x) { using Bounds = parser::LoopControl::Bounds; if (x.GetLoopControl()) { if (const Bounds * b{std::get_if(&x.GetLoopControl()->u)}) { - return &b->name.thing; - } else { - return nullptr; + auto &step = b->step; + return {&b->name.thing, &b->lower, &b->upper, + step.has_value() ? &step.value() : nullptr}; } } else { context_ @@ -1025,8 +1050,14 @@ const parser::Name *DirectiveAttributeVisitor::GetLoopIndex( "Loop control is not present in the DO LOOP"_err_en_US) .Attach(GetContext().directiveSource, "associated with the enclosing LOOP construct"_en_US); - return nullptr; } + return {nullptr, nullptr, nullptr, nullptr}; +} + +template +const parser::Name *DirectiveAttributeVisitor::GetLoopIndex( + const parser::DoConstruct &x) { + return std::get(GetLoopBounds(x)); } template @@ -1972,6 +2003,10 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPLoopConstruct &x) { } } } + + // Must be done before iv privatization + CheckPerfectNestAndRectangularLoop(x); + PrivatizeAssociatedLoopIndexAndCheckLoopLevel(x); ordCollapseLevel = GetNumAffectedLoopsFromLoopConstruct(x) + 1; return true; @@ -2167,6 +2202,119 @@ void OmpAttributeVisitor::CollectNumAffectedLoopsFromClauses( } } +void OmpAttributeVisitor::CheckPerfectNestAndRectangularLoop( + const parser::OpenMPLoopConstruct &x) { + auto &dirContext{GetContext()}; + std::int64_t dirDepth{dirContext.associatedLoopLevel}; + if (dirDepth <= 0) + return; + + auto checkExprHasSymbols = [&](llvm::SmallVector &ivs, + const parser::ScalarExpr *bound) { + if (ivs.empty()) + return; + auto boundExpr{semantics::AnalyzeExpr(context_, *bound)}; + if (!boundExpr) + return; + semantics::UnorderedSymbolSet boundSyms{ + evaluate::CollectSymbols(*boundExpr)}; + if (boundSyms.empty()) + return; + for (Symbol *iv : ivs) { + if (boundSyms.count(*iv) != 0) { + // TODO: Point to occurence of iv in boundExpr, directiveSource as a + // note + context_.Say(dirContext.directiveSource, + "Trip count must be computable and invariant"_err_en_US); + } + } + }; + + // Find the associated region by skipping nested loop-associated constructs + // such as loop transformations + const parser::NestedConstruct *innermostAssocRegion{nullptr}; + const parser::OpenMPLoopConstruct *innermostConstruct{&x}; + while (const auto &innerAssocStmt{ + std::get>( + innermostConstruct->t)}) { + innermostAssocRegion = &(innerAssocStmt.value()); + if (const auto *innerConstruct{ + std::get_if>( + innermostAssocRegion)}) { + innermostConstruct = &innerConstruct->value(); + } else { + break; + } + } + + if (!innermostAssocRegion) + return; + const auto &outer{std::get_if(innermostAssocRegion)}; + if (!outer) + return; + + llvm::SmallVector ivs; + int curLevel{0}; + const parser::DoConstruct *loop{outer}; + while (true) { + auto [iv, lb, ub, step] = GetLoopBounds(*loop); + + if (lb) + checkExprHasSymbols(ivs, lb); + if (ub) + checkExprHasSymbols(ivs, ub); + if (step) + checkExprHasSymbols(ivs, step); + if (iv) { + if (auto *symbol{currScope().FindSymbol(iv->source)}) + ivs.push_back(symbol); + } + + // Stop after processing all affected loops + if (curLevel + 1 >= dirDepth) + break; + + // Recurse into nested loop + const auto &block{std::get(loop->t)}; + if (block.empty()) { + // Insufficient number of nested loops already reported by + // CheckAssocLoopLevel() + break; + } + + loop = GetDoConstructIf(block.front()); + if (!loop) { + // Insufficient number of nested loops already reported by + // CheckAssocLoopLevel() + break; + } + + auto checkPerfectNest = [&, this]() { + if (block.empty()) + return; + auto last = block.end(); + --last; + + // A trailing CONTINUE is not considered part of the loop body + if (parser::Unwrap(*last)) + --last; + + // In a perfectly nested loop, the nested loop must be the only statement + if (last == block.begin()) + return; + + // Non-perfectly nested loop + // TODO: Point to non-DO statement, directiveSource as a note + context_.Say(dirContext.directiveSource, + "Canonical loop nest must be perfectly nested."_err_en_US); + }; + + checkPerfectNest(); + + ++curLevel; + } +} + // 2.15.1.1 Data-sharing Attribute Rules - Predetermined // - The loop iteration variable(s) in the associated do-loop(s) of a do, // parallel do, taskloop, or distribute construct is (are) private. @@ -2323,22 +2471,17 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPCriticalConstruct &x) { bool OmpAttributeVisitor::Pre(const parser::OpenMPDeclareTargetConstruct &x) { PushContext(x.source, llvm::omp::Directive::OMPD_declare_target); - const auto &spec{std::get(x.t)}; - if (const auto *objectList{parser::Unwrap(spec.u)}) { - ResolveOmpObjectList(*objectList, Symbol::Flag::OmpDeclareTarget); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - for (const auto &clause : clauseList->v) { - if (const auto *toClause{std::get_if(&clause.u)}) { - auto &objList{std::get(toClause->v.t)}; - ResolveOmpObjectList(objList, Symbol::Flag::OmpDeclareTarget); - } else if (const auto *linkClause{ - std::get_if(&clause.u)}) { - ResolveOmpObjectList(linkClause->v, Symbol::Flag::OmpDeclareTarget); - } else if (const auto *enterClause{ - std::get_if(&clause.u)}) { - ResolveOmpObjectList(std::get(enterClause->v.t), - Symbol::Flag::OmpDeclareTarget); + + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{omp::GetArgumentObject(arg)}) { + ResolveOmpObject(*object, Symbol::Flag::OmpDeclareTarget); + } + } + + for (const parser::OmpClause &clause : x.v.Clauses().v) { + if (auto *objects{parser::omp::GetOmpObjectList(clause)}) { + for (const parser::OmpObject &object : objects->v) { + ResolveOmpObject(object, Symbol::Flag::OmpDeclareTarget); } } } @@ -2346,7 +2489,8 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPDeclareTargetConstruct &x) { } bool OmpAttributeVisitor::Pre(const parser::OpenMPDeclareMapperConstruct &x) { - PushContext(x.source, llvm::omp::Directive::OMPD_declare_mapper); + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContext(dirName.source, dirName.v); return true; } @@ -2391,8 +2535,6 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPDispatchConstruct &x) { } bool OmpAttributeVisitor::Pre(const parser::OpenMPExecutableAllocate &x) { - IssueNonConformanceWarning(llvm::omp::Directive::OMPD_allocate, x.source, 52); - PushContext(x.source, llvm::omp::Directive::OMPD_allocate); const auto &list{std::get>(x.t)}; if (list) { @@ -2473,83 +2615,10 @@ bool OmpAttributeVisitor::IsNestedInDirective(llvm::omp::Directive directive) { } void OmpAttributeVisitor::Post(const parser::OpenMPExecutableAllocate &x) { - bool hasAllocator = false; - // TODO: Investigate whether searching the clause list can be done with - // parser::Unwrap instead of the following loop - const auto &clauseList{std::get(x.t)}; - for (const auto &clause : clauseList.v) { - if (std::get_if(&clause.u)) { - hasAllocator = true; - } - } - - if (IsNestedInDirective(llvm::omp::Directive::OMPD_target) && !hasAllocator) { - // TODO: expand this check to exclude the case when a requires - // directive with the dynamic_allocators clause is present - // in the same compilation unit (OMP5.0 2.11.3). - context_.Say(x.source, - "ALLOCATE directives that appear in a TARGET region " - "must specify an allocator clause"_err_en_US); - } - - const auto &allocateStmt = - std::get>(x.t).statement; - if (const auto &list{std::get>(x.t)}) { - CheckAllNamesInAllocateStmt( - std::get(x.t).source, *list, allocateStmt); - } - if (const auto &subDirs{ - std::get>>( - x.t)}) { - for (const auto &dalloc : *subDirs) { - CheckAllNamesInAllocateStmt(std::get(dalloc.t).source, - std::get(dalloc.t), allocateStmt); - } - } PopContext(); } void OmpAttributeVisitor::Post(const parser::OpenMPAllocatorsConstruct &x) { - const parser::OmpDirectiveSpecification &dirSpec{x.BeginDir()}; - auto &block{std::get(x.t)}; - - omp::SourcedActionStmt action{omp::GetActionStmt(block)}; - const parser::AllocateStmt *allocate{[&]() { - if (action) { - if (auto *alloc{std::get_if>( - &action.stmt->u)}) { - return &alloc->value(); - } - } - return static_cast(nullptr); - }()}; - - if (allocate) { - for (const auto &clause : dirSpec.Clauses().v) { - if (auto *alloc{std::get_if(&clause.u)}) { - CheckAllNamesInAllocateStmt( - x.source, std::get(alloc->v.t), *allocate); - - using OmpAllocatorSimpleModifier = parser::OmpAllocatorSimpleModifier; - using OmpAllocatorComplexModifier = parser::OmpAllocatorComplexModifier; - - auto &modifiers{OmpGetModifiers(alloc->v)}; - bool hasAllocator{ - OmpGetUniqueModifier(modifiers) || - OmpGetUniqueModifier(modifiers)}; - - // TODO: As with allocate directive, exclude the case when a requires - // directive with the dynamic_allocators clause is present in - // the same compilation unit (OMP5.0 2.11.3). - if (IsNestedInDirective(llvm::omp::Directive::OMPD_target) && - !hasAllocator) { - context_.Say(x.source, - "ALLOCATORS directives that appear in a TARGET region " - "must specify an allocator"_err_en_US); - } - } - } - } PopContext(); } @@ -2923,31 +2992,6 @@ Symbol *OmpAttributeVisitor::ResolveOmpCommonBlockName( return nullptr; } -// Use this function over ResolveOmpName when an omp object's scope needs -// resolving, it's symbol flag isn't important and a simple check for resolution -// failure is desired. Using ResolveOmpName means needing to work with the -// context to check for failure, whereas here a pointer comparison is all that's -// needed. -Symbol *OmpAttributeVisitor::ResolveOmpObjectScope(const parser::Name *name) { - - // TODO: Investigate whether the following block can be replaced by, or - // included in, the ResolveOmpName function - if (auto *prev{name ? GetContext().scope.parent().FindSymbol(name->source) - : nullptr}) { - name->symbol = prev; - return nullptr; - } - - // TODO: Investigate whether the following block can be replaced by, or - // included in, the ResolveOmpName function - if (auto *ompSymbol{ - name ? GetContext().scope.FindSymbol(name->source) : nullptr}) { - name->symbol = ompSymbol; - return ompSymbol; - } - return nullptr; -} - void OmpAttributeVisitor::ResolveOmpObjectList( const parser::OmpObjectList &ompObjectList, Symbol::Flag ompFlag) { for (const auto &ompObject : ompObjectList.v) { @@ -3026,13 +3070,19 @@ void OmpAttributeVisitor::ResolveOmpDesignator( context_.Say(designator.source, "List items specified in the ALLOCATE directive must not have the ALLOCATABLE attribute unless the directive is associated with an ALLOCATE statement"_err_en_US); } - if ((ompFlag == Symbol::Flag::OmpDeclarativeAllocateDirective || - ompFlag == Symbol::Flag::OmpExecutableAllocateDirective) && - ResolveOmpObjectScope(name) == nullptr) { - context_.Say(designator.source, // 2.15.3 - "List items must be declared in the same scoping unit in which the %s directive appears"_err_en_US, - parser::ToUpperCaseLetters( - llvm::omp::getOpenMPDirectiveName(directive, version))); + bool checkScope{ompFlag == Symbol::Flag::OmpDeclarativeAllocateDirective}; + // In 5.1 the scope check only applies to declarative allocate. + if (version == 50 && !checkScope) { + checkScope = ompFlag == Symbol::Flag::OmpExecutableAllocateDirective; + } + if (checkScope) { + if (omp::GetScopingUnit(GetContext().scope) != + omp::GetScopingUnit(symbol->GetUltimate().owner())) { + context_.Say(designator.source, // 2.15.3 + "List items must be declared in the same scoping unit in which the %s directive appears"_err_en_US, + parser::ToUpperCaseLetters( + llvm::omp::getOpenMPDirectiveName(directive, version))); + } } if (ompFlag == Symbol::Flag::OmpReduction) { // Using variables inside of a namelist in OpenMP reductions @@ -3488,44 +3538,6 @@ void OmpAttributeVisitor::CheckLabelContext(const parser::CharBlock source, } } -// Goes through the names in an OmpObjectList and checks if each name appears -// in the given allocate statement -void OmpAttributeVisitor::CheckAllNamesInAllocateStmt( - const parser::CharBlock &source, const parser::OmpObjectList &ompObjectList, - const parser::AllocateStmt &allocate) { - for (const auto &obj : ompObjectList.v) { - if (const auto *d{std::get_if(&obj.u)}) { - if (const auto *ref{std::get_if(&d->u)}) { - if (const auto *n{std::get_if(&ref->u)}) { - CheckNameInAllocateStmt(source, *n, allocate); - } - } - } - } -} - -void OmpAttributeVisitor::CheckNameInAllocateStmt( - const parser::CharBlock &source, const parser::Name &name, - const parser::AllocateStmt &allocate) { - for (const auto &allocation : - std::get>(allocate.t)) { - const auto &allocObj = std::get(allocation.t); - if (const auto *n{std::get_if(&allocObj.u)}) { - if (n->source == name.source) { - return; - } - } - } - unsigned version{context_.langOptions().OpenMPVersion}; - context_.Say(source, - "Object '%s' in %s directive not " - "found in corresponding ALLOCATE statement"_err_en_US, - name.ToString(), - parser::ToUpperCaseLetters( - llvm::omp::getOpenMPDirectiveName(GetContext().directive, version) - .str())); -} - void OmpAttributeVisitor::AddOmpRequiresToScope(Scope &scope, WithOmpDeclarative::RequiresFlags flags, std::optional memOrder) { diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp index 396025ff0a007..5041a6a08fc3c 100644 --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -24,6 +24,7 @@ #include "flang/Evaluate/intrinsics.h" #include "flang/Evaluate/tools.h" #include "flang/Evaluate/type.h" +#include "flang/Parser/openmp-utils.h" #include "flang/Parser/parse-tree-visitor.h" #include "flang/Parser/parse-tree.h" #include "flang/Parser/tools.h" @@ -955,7 +956,7 @@ class SubprogramVisitor : public virtual ScopeHandler, public InterfaceVisitor { bool HandlePreviousCalls(const parser::Name &, Symbol &, Symbol::Flag); const Symbol *CheckExtantProc(const parser::Name &, Symbol::Flag); // Create a subprogram symbol in the current scope and push a new scope. - Symbol &PushSubprogramScope(const parser::Name &, Symbol::Flag, + Symbol *PushSubprogramScope(const parser::Name &, Symbol::Flag, const parser::LanguageBindingSpec * = nullptr, bool hasModulePrefix = false); Symbol *GetSpecificFromGeneric(const parser::Name &); @@ -1386,6 +1387,8 @@ class ConstructVisitor : public virtual DeclarationVisitor { // Create scopes for OpenACC constructs class AccVisitor : public virtual DeclarationVisitor { public: + explicit AccVisitor(SemanticsContext &context) : context_{context} {} + void AddAccSourceRange(const parser::CharBlock &); static bool NeedsScope(const parser::OpenACCBlockConstruct &); @@ -1394,6 +1397,7 @@ class AccVisitor : public virtual DeclarationVisitor { void Post(const parser::OpenACCBlockConstruct &); bool Pre(const parser::OpenACCCombinedConstruct &); void Post(const parser::OpenACCCombinedConstruct &); + bool Pre(const parser::AccClause::UseDevice &x); bool Pre(const parser::AccBeginBlockDirective &x) { AddAccSourceRange(x.source); return true; @@ -1429,6 +1433,11 @@ class AccVisitor : public virtual DeclarationVisitor { void Post(const parser::AccBeginLoopDirective &x) { messageHandler().set_currStmtSource(std::nullopt); } + + void CopySymbolWithDevice(const parser::Name *name); + +private: + SemanticsContext &context_; }; bool AccVisitor::NeedsScope(const parser::OpenACCBlockConstruct &x) { @@ -1458,6 +1467,60 @@ bool AccVisitor::Pre(const parser::OpenACCBlockConstruct &x) { return true; } +void AccVisitor::CopySymbolWithDevice(const parser::Name *name) { + // When CUDA Fortran is enabled together with OpenACC, new + // symbols are created for the one appearing in the use_device + // clause. These new symbols have the CUDA Fortran device + // attribute. + if (context_.languageFeatures().IsEnabled(common::LanguageFeature::CUDA)) { + name->symbol = currScope().CopySymbol(*name->symbol); + if (auto *object{name->symbol->detailsIf()}) { + object->set_cudaDataAttr(common::CUDADataAttr::Device); + } + } +} + +bool AccVisitor::Pre(const parser::AccClause::UseDevice &x) { + for (const auto &accObject : x.v.v) { + common::visit( + common::visitors{ + [&](const parser::Designator &designator) { + if (const auto *name{ + semantics::getDesignatorNameIfDataRef(designator)}) { + Symbol *prev{currScope().FindSymbol(name->source)}; + if (prev != name->symbol) { + name->symbol = prev; + } + CopySymbolWithDevice(name); + } else { + if (const auto *dataRef{ + std::get_if(&designator.u)}) { + using ElementIndirection = + common::Indirection; + if (auto *ind{std::get_if(&dataRef->u)}) { + const parser::ArrayElement &arrayElement{ind->value()}; + Walk(arrayElement.subscripts); + const parser::DataRef &base{arrayElement.base}; + if (auto *name{std::get_if(&base.u)}) { + Symbol *prev{currScope().FindSymbol(name->source)}; + if (prev != name->symbol) { + name->symbol = prev; + } + CopySymbolWithDevice(name); + } + } + } + } + }, + [&](const parser::Name &name) { + // TODO: common block in use_device? + }, + }, + accObject.u); + } + return false; +} + void AccVisitor::Post(const parser::OpenACCBlockConstruct &x) { if (NeedsScope(x)) { PopScope(); @@ -1486,34 +1549,16 @@ class OmpVisitor : public virtual DeclarationVisitor { bool Pre(const parser::OmpBlockConstruct &); void Post(const parser::OmpBlockConstruct &); bool Pre(const parser::OmpBeginDirective &x) { - AddOmpSourceRange(x.source); - // Manually resolve names in CRITICAL directives. This is because these - // names do not denote Fortran objects, and the CRITICAL directive causes - // them to be "auto-declared", i.e. inserted into the global scope. - // More specifically, they are not expected to have explicit declarations, - // and if they do the behavior is unspeficied. - if (x.DirName().v == llvm::omp::Directive::OMPD_critical) { - for (const parser::OmpArgument &arg : x.Arguments().v) { - ResolveCriticalName(arg); - } - } - return true; + return Pre(static_cast(x)); } - void Post(const parser::OmpBeginDirective &) { - messageHandler().set_currStmtSource(std::nullopt); + void Post(const parser::OmpBeginDirective &x) { + Post(static_cast(x)); } bool Pre(const parser::OmpEndDirective &x) { - AddOmpSourceRange(x.source); - // Manually resolve names in CRITICAL directives. - if (x.DirName().v == llvm::omp::Directive::OMPD_critical) { - for (const parser::OmpArgument &arg : x.Arguments().v) { - ResolveCriticalName(arg); - } - } - return true; + return Pre(static_cast(x)); } - void Post(const parser::OmpEndDirective &) { - messageHandler().set_currStmtSource(std::nullopt); + void Post(const parser::OmpEndDirective &x) { + Post(static_cast(x)); } bool Pre(const parser::OpenMPLoopConstruct &) { @@ -1522,15 +1567,21 @@ class OmpVisitor : public virtual DeclarationVisitor { } void Post(const parser::OpenMPLoopConstruct &) { PopScope(); } bool Pre(const parser::OmpBeginLoopDirective &x) { - AddOmpSourceRange(x.source); - return true; + return Pre(static_cast(x)); + } + void Post(const parser::OmpBeginLoopDirective &x) { + Post(static_cast(x)); + } + bool Pre(const parser::OmpEndLoopDirective &x) { + return Pre(static_cast(x)); + } + void Post(const parser::OmpEndLoopDirective &x) { + Post(static_cast(x)); } bool Pre(const parser::OpenMPDeclareMapperConstruct &x) { AddOmpSourceRange(x.source); - ProcessMapperSpecifier(std::get(x.t), - std::get(x.t)); - return false; + return true; } bool Pre(const parser::OpenMPDeclareSimdConstruct &x) { @@ -1552,44 +1603,14 @@ class OmpVisitor : public virtual DeclarationVisitor { bool Pre(const parser::OmpDeclareVariantDirective &x) { AddOmpSourceRange(x.source); - auto FindSymbolOrError = [&](const parser::Name &procName) { - auto *symbol{FindSymbol(NonDerivedTypeScope(), procName)}; - if (!symbol) { - context().Say(procName.source, - "Implicit subroutine declaration '%s' in !$OMP DECLARE VARIANT"_err_en_US, - procName.source); - } - }; - auto &baseProcName = std::get>(x.t); - if (baseProcName) { - FindSymbolOrError(*baseProcName); - } - auto &varProcName = std::get(x.t); - FindSymbolOrError(varProcName); return true; } bool Pre(const parser::OpenMPDeclareReductionConstruct &x) { - AddOmpSourceRange(x.source); - parser::OmpClauseList empty(std::list{}); - auto &maybeClauses{std::get>(x.t)}; - ProcessReductionSpecifier( - std::get>(x.t).value(), - maybeClauses ? *maybeClauses : empty, declaratives_.back()); - return false; - } - bool Pre(const parser::OmpMapClause &); - - void Post(const parser::OmpBeginLoopDirective &) { - messageHandler().set_currStmtSource(std::nullopt); - } - bool Pre(const parser::OmpEndLoopDirective &x) { AddOmpSourceRange(x.source); return true; } - void Post(const parser::OmpEndLoopDirective &) { - messageHandler().set_currStmtSource(std::nullopt); - } + bool Pre(const parser::OmpMapClause &); bool Pre(const parser::OpenMPSectionsConstruct &) { PushScope(Scope::Kind::OtherConstruct, nullptr); @@ -1597,18 +1618,16 @@ class OmpVisitor : public virtual DeclarationVisitor { } void Post(const parser::OpenMPSectionsConstruct &) { PopScope(); } bool Pre(const parser::OmpBeginSectionsDirective &x) { - AddOmpSourceRange(x.source); - return true; + return Pre(static_cast(x)); } - void Post(const parser::OmpBeginSectionsDirective &) { - messageHandler().set_currStmtSource(std::nullopt); + void Post(const parser::OmpBeginSectionsDirective &x) { + Post(static_cast(x)); } bool Pre(const parser::OmpEndSectionsDirective &x) { - AddOmpSourceRange(x.source); - return true; + return Pre(static_cast(x)); } - void Post(const parser::OmpEndSectionsDirective &) { - messageHandler().set_currStmtSource(std::nullopt); + void Post(const parser::OmpEndSectionsDirective &x) { + Post(static_cast(x)); } bool Pre(const parser::OpenMPThreadprivate &) { SkipImplicitTyping(true); @@ -1616,51 +1635,41 @@ class OmpVisitor : public virtual DeclarationVisitor { } void Post(const parser::OpenMPThreadprivate &) { SkipImplicitTyping(false); } bool Pre(const parser::OpenMPDeclareTargetConstruct &x) { - const auto &spec{std::get(x.t)}; - auto populateDeclareTargetNames{[this](const parser::OmpObjectList - &objectList) { - for (const auto &ompObject : objectList.v) { - common::visit( - common::visitors{ - [&](const parser::Designator &designator) { - if (const auto *name{ - semantics::getDesignatorNameIfDataRef(designator)}) { - specPartState_.declareTargetNames.insert(name->source); - } - }, - [&](const parser::Name &name) { - specPartState_.declareTargetNames.insert(name.source); - }, - [&](const parser::OmpObject::Invalid &invalid) { - switch (invalid.v) { - SWITCH_COVERS_ALL_CASES - case parser::OmpObject::Invalid::Kind::BlankCommonBlock: - context().Say(invalid.source, - "Blank common blocks are not allowed as directive or clause arguments"_err_en_US); - break; - } - }, - }, - ompObject.u); - } + auto addObjectName{[&](const parser::OmpObject &object) { + common::visit( + common::visitors{ + [&](const parser::Designator &designator) { + if (const auto *name{ + semantics::getDesignatorNameIfDataRef(designator)}) { + specPartState_.declareTargetNames.insert(name->source); + } + }, + [&](const parser::Name &name) { + specPartState_.declareTargetNames.insert(name.source); + }, + [&](const parser::OmpObject::Invalid &invalid) { + switch (invalid.v) { + SWITCH_COVERS_ALL_CASES + case parser::OmpObject::Invalid::Kind::BlankCommonBlock: + context().Say(invalid.source, + "Blank common blocks are not allowed as directive or clause arguments"_err_en_US); + break; + } + }, + }, + object.u); }}; - if (const auto *objectList{parser::Unwrap(spec.u)}) { - populateDeclareTargetNames(*objectList); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - for (const auto &clause : clauseList->v) { - if (const auto *toClause{ - std::get_if(&clause.u)}) { - populateDeclareTargetNames( - std::get(toClause->v.t)); - } else if (const auto *linkClause{ - std::get_if(&clause.u)}) { - populateDeclareTargetNames(linkClause->v); - } else if (const auto *enterClause{ - std::get_if(&clause.u)}) { - populateDeclareTargetNames( - std::get(enterClause->v.t)); + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{omp::GetArgumentObject(arg)}) { + addObjectName(*object); + } + } + + for (const parser::OmpClause &clause : x.v.Clauses().v) { + if (auto *objects{parser::omp::GetOmpObjectList(clause)}) { + for (const parser::OmpObject &object : objects->v) { + addObjectName(object); } } } @@ -1671,12 +1680,14 @@ class OmpVisitor : public virtual DeclarationVisitor { void Post(const parser::OpenMPDeclareTargetConstruct &) { SkipImplicitTyping(false); } - bool Pre(const parser::OpenMPDeclarativeAllocate &) { + bool Pre(const parser::OpenMPDeclarativeAllocate &x) { + AddOmpSourceRange(x.source); SkipImplicitTyping(true); return true; } void Post(const parser::OpenMPDeclarativeAllocate &) { SkipImplicitTyping(false); + messageHandler().set_currStmtSource(std::nullopt); } bool Pre(const parser::OpenMPDeclarativeConstruct &x) { AddOmpSourceRange(x.source); @@ -1717,7 +1728,23 @@ class OmpVisitor : public virtual DeclarationVisitor { PopScope(); } } + + // These objects are handled explicitly, and the AST traversal should not + // reach a point where it calls the Pre functions for them. + bool Pre(const parser::OmpMapperSpecifier &x) { + llvm_unreachable("This function should not be reached by AST traversal"); + } + bool Pre(const parser::OmpReductionSpecifier &x) { + llvm_unreachable("This function should not be reached by AST traversal"); + } + bool Pre(const parser::OmpBaseVariantNames &x) { + llvm_unreachable("This function should not be reached by AST traversal"); + } + bool Pre(const parser::OmpDirectiveSpecification &x); + void Post(const parser::OmpDirectiveSpecification &) { + messageHandler().set_currStmtSource(std::nullopt); + } bool Pre(const parser::OmpTypeSpecifier &x) { BeginDeclTypeSpec(); @@ -1727,12 +1754,21 @@ class OmpVisitor : public virtual DeclarationVisitor { EndDeclTypeSpec(); } + bool Pre(const parser::OpenMPConstruct &x) { + // Indicate that the current directive is not a declarative one. + declaratives_.push_back(nullptr); + return true; + } + void Post(const parser::OpenMPConstruct &) { + // Pop the null pointer. + declaratives_.pop_back(); + } + private: void ProcessMapperSpecifier(const parser::OmpMapperSpecifier &spec, const parser::OmpClauseList &clauses); void ProcessReductionSpecifier(const parser::OmpReductionSpecifier &spec, - const parser::OmpClauseList &clauses, - const parser::OpenMPDeclarativeConstruct *wholeConstruct); + const parser::OmpClauseList &clauses); void ResolveCriticalName(const parser::OmpArgument &arg); @@ -1863,8 +1899,7 @@ std::string MangleDefinedOperator(const parser::CharBlock &name) { void OmpVisitor::ProcessReductionSpecifier( const parser::OmpReductionSpecifier &spec, - const parser::OmpClauseList &clauses, - const parser::OpenMPDeclarativeConstruct *construct) { + const parser::OmpClauseList &clauses) { const parser::Name *name{nullptr}; parser::CharBlock mangledName; UserReductionDetails reductionDetailsTemp; @@ -1951,7 +1986,7 @@ void OmpVisitor::ProcessReductionSpecifier( PopScope(); } - reductionDetails->AddDecl(construct); + reductionDetails->AddDecl(declaratives_.back()); if (!symbol) { symbol = &MakeSymbol(mangledName, Attrs{}, std::move(*reductionDetails)); @@ -1994,30 +2029,44 @@ bool OmpVisitor::Pre(const parser::OmpDirectiveSpecification &x) { const parser::OmpArgumentList &args{x.Arguments()}; const parser::OmpClauseList &clauses{x.Clauses()}; + bool visitClauses{true}; + + for (const parser::OmpArgument &arg : args.v) { + common::visit( // + common::visitors{ + [&](const parser::OmpMapperSpecifier &spec) { + ProcessMapperSpecifier(spec, clauses); + visitClauses = false; + }, + [&](const parser::OmpReductionSpecifier &spec) { + ProcessReductionSpecifier(spec, clauses); + visitClauses = false; + }, + [&](const parser::OmpBaseVariantNames &names) { + Walk(std::get<0>(names.t)); + Walk(std::get<1>(names.t)); + }, + [&](const parser::OmpLocator &locator) { + // Manually resolve names in CRITICAL directives. This is because + // these names do not denote Fortran objects, and the CRITICAL + // directive causes them to be "auto-declared", i.e. inserted into + // the global scope. More specifically, they are not expected to + // have explicit declarations, and if they do the behavior is + // unspeficied. + if (x.DirId() == llvm::omp::Directive::OMPD_critical) { + ResolveCriticalName(arg); + } else { + Walk(locator); + } + }, + }, + arg.u); + } - switch (x.DirId()) { - case llvm::omp::Directive::OMPD_declare_mapper: - if (!args.v.empty()) { - const parser::OmpArgument &first{args.v.front()}; - if (auto *spec{std::get_if(&first.u)}) { - ProcessMapperSpecifier(*spec, clauses); - } - } - break; - case llvm::omp::Directive::OMPD_declare_reduction: - if (!args.v.empty()) { - const parser::OmpArgument &first{args.v.front()}; - if (auto *spec{std::get_if(&first.u)}) { - ProcessReductionSpecifier(*spec, clauses, declaratives_.back()); - } - } - break; - default: - // Default processing. - Walk(args); + if (visitClauses) { Walk(clauses); - break; } + return false; } @@ -2051,7 +2100,8 @@ class ResolveNamesVisitor : public virtual ScopeHandler, ResolveNamesVisitor( SemanticsContext &context, ImplicitRulesMap &rules, Scope &top) - : BaseVisitor{context, *this, rules}, topScope_{top} { + : BaseVisitor{context, *this, rules}, AccVisitor(context), + topScope_{top} { PushScope(top); } @@ -3358,7 +3408,8 @@ bool ScopeHandler::CheckPossibleBadForwardRef(const Symbol &symbol) { context().SetError(symbol); return true; } - if ((IsDummy(symbol) || FindCommonBlockContaining(symbol)) && + if ((IsDummy(symbol) || + (!symbol.has() && FindCommonBlockContaining(symbol))) && isImplicitNoneType() && symbol.test(Symbol::Flag::Implicit) && !context().HasError(symbol)) { // Dummy or COMMON was implicitly typed despite IMPLICIT NONE(TYPE) in @@ -4491,10 +4542,13 @@ bool SubprogramVisitor::HandleStmtFunction(const parser::StmtFunctionStmt &x) { "'%s' has not been declared as an array or pointer-valued function"_err_en_US); return false; } - auto &symbol{PushSubprogramScope(name, Symbol::Flag::Function)}; - symbol.set(Symbol::Flag::StmtFunction); - EraseSymbol(symbol); // removes symbol added by PushSubprogramScope - auto &details{symbol.get()}; + Symbol *symbol{PushSubprogramScope(name, Symbol::Flag::Function)}; + if (!symbol) { + return false; + } + symbol->set(Symbol::Flag::StmtFunction); + EraseSymbol(*symbol); // removes symbol added by PushSubprogramScope + auto &details{symbol->get()}; for (const auto &dummyName : std::get>(x.t)) { ObjectEntityDetails dummyDetails{true}; if (auto *dummySymbol{FindInScope(currScope().parent(), dummyName)}) { @@ -5123,19 +5177,22 @@ bool SubprogramVisitor::BeginSubprogram(const parser::Name &name, } } } - Symbol &newSymbol{ + Symbol *newSymbol{ PushSubprogramScope(name, subpFlag, bindingSpec, hasModulePrefix)}; + if (!newSymbol) { + return false; + } if (moduleInterface) { - newSymbol.get().set_moduleInterface(*moduleInterface); + newSymbol->get().set_moduleInterface(*moduleInterface); if (moduleInterface->attrs().test(Attr::PRIVATE)) { - SetImplicitAttr(newSymbol, Attr::PRIVATE); + SetImplicitAttr(*newSymbol, Attr::PRIVATE); } else if (moduleInterface->attrs().test(Attr::PUBLIC)) { - SetImplicitAttr(newSymbol, Attr::PUBLIC); + SetImplicitAttr(*newSymbol, Attr::PUBLIC); } } if (entryStmts) { for (const auto &ref : *entryStmts) { - CreateEntry(*ref, newSymbol); + CreateEntry(*ref, *newSymbol); } } return true; @@ -5242,12 +5299,16 @@ const Symbol *SubprogramVisitor::CheckExtantProc( return prev; } -Symbol &SubprogramVisitor::PushSubprogramScope(const parser::Name &name, +Symbol *SubprogramVisitor::PushSubprogramScope(const parser::Name &name, Symbol::Flag subpFlag, const parser::LanguageBindingSpec *bindingSpec, bool hasModulePrefix) { Symbol *symbol{GetSpecificFromGeneric(name)}; const DeclTypeSpec *previousImplicitType{nullptr}; SourceName previousName; + if (symbol && inInterfaceBlock() && !symbol->has()) { + SayAlreadyDeclared(name, *symbol); + return nullptr; + } if (!symbol) { if (bindingSpec && currScope().IsGlobal() && std::get>( @@ -5276,9 +5337,7 @@ Symbol &SubprogramVisitor::PushSubprogramScope(const parser::Name &name, if (subpFlag == Symbol::Flag::Function) { auto &funcResultTop{funcResultStack().Push(currScope(), name.source)}; funcResultTop.previousImplicitType = previousImplicitType; - ; funcResultTop.previousName = previousName; - ; } if (inInterfaceBlock()) { auto &details{symbol->get()}; @@ -5304,7 +5363,7 @@ Symbol &SubprogramVisitor::PushSubprogramScope(const parser::Name &name, found && found->has()) { found->set(subpFlag); // PushScope() created symbol } - return *symbol; + return symbol; } void SubprogramVisitor::PushBlockDataScope(const parser::Name &name) { @@ -5730,7 +5789,8 @@ void DeclarationVisitor::DeclareIntrinsic(const parser::Name &name) { } } if (!symbol.test(Symbol::Flag::Function) && - !symbol.test(Symbol::Flag::Subroutine)) { + !symbol.test(Symbol::Flag::Subroutine) && + !context().intrinsics().IsDualIntrinsic(name.source.ToString())) { if (context().intrinsics().IsIntrinsicFunction(name.source.ToString())) { symbol.set(Symbol::Flag::Function); } else if (context().intrinsics().IsIntrinsicSubroutine( diff --git a/flang/test/Driver/complex-range.f90 b/flang/test/Driver/complex-range.f90 index e5a1ba9068ac9..575fa0437fd0d 100644 --- a/flang/test/Driver/complex-range.f90 +++ b/flang/test/Driver/complex-range.f90 @@ -15,6 +15,83 @@ ! RUN: not %flang -### -fcomplex-arithmetic=foo -c %s 2>&1 \ ! RUN: | FileCheck %s --check-prefix=ERR +! RUN: %flang -### -ffast-math -c %s 2>&1 \ +! RUN: | FileCheck %s --check-prefix=BASIC + +! RUN: %flang -### -fno-fast-math -c %s 2>&1 \ +! RUN: | FileCheck %s --check-prefix=RANGE + +! RUN: %flang -### -Werror -ffast-math -fno-fast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=RANGE %s + +! RUN: %flang -### -ffast-math -fcomplex-arithmetic=full -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=FULL,ARITH-FULL-OVERRIDING,FAST-OVERRIDDEN %s + +! RUN: %flang -### -ffast-math -fcomplex-arithmetic=improved -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=IMPRVD,ARITH-IMPROVED-OVERRIDING,FAST-OVERRIDDEN %s + +! RUN: %flang -### -Werror -ffast-math -fcomplex-arithmetic=basic -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC %s + +! RUN: %flang -### -Werror -fno-fast-math -ffast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC %s + +! RUN: %flang -### -Werror -fno-fast-math -fcomplex-arithmetic=full -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=FULL %s + +! RUN: %flang -### -Werror -fno-fast-math -fcomplex-arithmetic=improved -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=IMPRVD %s + +! RUN: %flang -### -Werror -fno-fast-math -fcomplex-arithmetic=basic -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC %s + +! RUN: %flang -### -fcomplex-arithmetic=full -ffast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC,FAST-OVERRIDING,ARITH-FULL-OVERRIDDEN %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=full -fno-fast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=RANGE %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=full -fcomplex-arithmetic=improved -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=IMPRVD %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=full -fcomplex-arithmetic=basic -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC %s + +! RUN: %flang -### -fcomplex-arithmetic=improved -ffast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC,FAST-OVERRIDING,ARITH-IMPROVED-OVERRIDDEN %s + +! RUN: %flang -### -fcomplex-arithmetic=improved -fno-fast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=RANGE,NOFAST-OVERRIDING,ARITH-IMPROVED-OVERRIDDEN %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=improved -fcomplex-arithmetic=full -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=FULL %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=improved -fcomplex-arithmetic=basic -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=basic -ffast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=BASIC %s + +! RUN: %flang -### -fcomplex-arithmetic=basic -fno-fast-math -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=RANGE,NOFAST-OVERRIDING,ARITH-BASIC-OVERRIDDEN %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=basic -fcomplex-arithmetic=full -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=FULL %s + +! RUN: %flang -### -Werror -fcomplex-arithmetic=basic -fcomplex-arithmetic=improved -c %s 2>&1 \ +! RUN: | FileCheck --check-prefixes=IMPRVD %s + + +! FAST-OVERRIDING: warning: '-ffast-math' sets complex range to "basic" +! NOFAST-OVERRIDING: warning: '-fno-fast-math' sets complex range to "none" +! ARITH-FULL-OVERRIDING: warning: '-fcomplex-arithmetic=full' sets complex range to "full" +! ARITH-IMPROVED-OVERRIDING: warning: '-fcomplex-arithmetic=improved' sets complex range to "improved" + +! FAST-OVERRIDDEN: overriding the setting of "basic" that was implied by '-ffast-math' [-Woverriding-complex-range] +! ARITH-FULL-OVERRIDDEN: overriding the setting of "full" that was implied by '-fcomplex-arithmetic=full' [-Woverriding-complex-range] +! ARITH-IMPROVED-OVERRIDDEN: overriding the setting of "improved" that was implied by '-fcomplex-arithmetic=improved' [-Woverriding-complex-range] +! ARITH-BASIC-OVERRIDDEN: overriding the setting of "basic" that was implied by '-fcomplex-arithmetic=basic' [-Woverriding-complex-range] + ! RANGE-NOT: -complex-range= ! FULL: -complex-range=full ! IMPRVD: -complex-range=improved diff --git a/flang/test/Driver/fast-real-mod.f90 b/flang/test/Driver/fast-real-mod.f90 new file mode 100644 index 0000000000000..4ea9b26e64753 --- /dev/null +++ b/flang/test/Driver/fast-real-mod.f90 @@ -0,0 +1,7 @@ +! RUN: %flang -fno-fast-real-mod -### -c %s 2>&1 | FileCheck %s -check-prefix CHECK-NO-FAST-REAL-MOD + +! CHECK-NO-FAST-REAL-MOD: "-fno-fast-real-mod" + +program test + ! nothing to be done in here +end program test diff --git a/flang/test/Driver/flang-dwarf-version.f90 b/flang/test/Driver/flang-dwarf-version.f90 index dc69140a7eda1..d860c970a91f8 100644 --- a/flang/test/Driver/flang-dwarf-version.f90 +++ b/flang/test/Driver/flang-dwarf-version.f90 @@ -1,9 +1,18 @@ +// RUN: %if !target={{.*aix.*}} %{ \ // RUN: %flang -### -S %s -g -gdwarf-5 2>&1 \ -// RUN: | FileCheck --check-prefix=CHECK-DWARF5 %s +// RUN: | FileCheck --check-prefix=CHECK-DWARF5 %s \ +// RUN: %} + +// RUN: %if !target={{.*aix.*}} %{ \ // RUN: %flang -### -S %s -gdwarf-5 2>&1 \ -// RUN: | FileCheck --check-prefix=CHECK-DWARF5 %s +// RUN: | FileCheck --check-prefix=CHECK-DWARF5 %s \ +// RUN: %} + +// RUN: %if !target={{.*aix.*}} %{ \ // RUN: %flang -### -S %s -g1 -gdwarf-5 2>&1 \ -// RUN: | FileCheck --check-prefix=CHECK-WITH-G1-DWARF5 %s +// RUN: | FileCheck --check-prefix=CHECK-WITH-G1-DWARF5 %s \ +// RUN: %} + // RUN: %flang -### -S %s -gdwarf-4 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-DWARF4 %s // RUN: %flang -### -S %s -gdwarf-3 2>&1 \ diff --git a/flang/test/Driver/lto-lld-flags.f90 b/flang/test/Driver/lto-lld-flags.f90 index 055526ab02a9d..851ed0913cdec 100644 --- a/flang/test/Driver/lto-lld-flags.f90 +++ b/flang/test/Driver/lto-lld-flags.f90 @@ -1,4 +1,5 @@ ! UNSUPPORTED: system-windows +! REQUIRES: lld ! check flto-partitions is passed to lld, and not to fc1 ! RUN: %flang -### -fuse-ld=lld -flto=full -flto-partitions=16 %s 2>&1 | FileCheck %s --check-prefixes=LLD-PART,FC1-PART diff --git a/flang/test/Driver/mlir-pass-pipeline.f90 b/flang/test/Driver/mlir-pass-pipeline.f90 index 4fd89d6f15d46..df558392c5fc2 100644 --- a/flang/test/Driver/mlir-pass-pipeline.f90 +++ b/flang/test/Driver/mlir-pass-pipeline.f90 @@ -15,6 +15,15 @@ ! ALL: Pass statistics report ! ALL: Fortran::lower::VerifierPass +! O2-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] +! O2-NEXT: 'fir.global' Pipeline +! O2-NEXT: ExpressionSimplification +! O2-NEXT: 'func.func' Pipeline +! O2-NEXT: ExpressionSimplification +! O2-NEXT: 'omp.declare_reduction' Pipeline +! O2-NEXT: ExpressionSimplification +! O2-NEXT: 'omp.private' Pipeline +! O2-NEXT: ExpressionSimplification ! O2-NEXT: Canonicalizer ! ALL: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private'] ! ALL-NEXT:'fir.global' Pipeline diff --git a/flang/test/Driver/split-debug.f90 b/flang/test/Driver/split-debug.f90 new file mode 100644 index 0000000000000..1cb9f84d7a5d2 --- /dev/null +++ b/flang/test/Driver/split-debug.f90 @@ -0,0 +1,44 @@ +! Test -gsplit-dwarf and -gsplit-dwarf={split,single}. + +! RUN: %flang -### -c -target x86_64 -g -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefixes=SPLIT +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf -g %s 2>&1 | FileCheck %s --check-prefixes=SPLIT +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=split -g %s 2>&1 | FileCheck %s --check-prefixes=SPLIT + +! SPLIT: "-split-dwarf-file" "split-debug.dwo" "-split-dwarf-output" "split-debug.dwo" + +! Check warning on non-supported platforms. +! RUN: %flang -### -c -target x86_64-apple-darwin -gsplit-dwarf -g %s 2>&1 | FileCheck %s --check-prefix=WARN +! WARN: warning: debug information option '-gsplit-dwarf' is not supported for target 'x86_64-apple-darwin' + +! -gno-split-dwarf disables debug fission. +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf -g -gno-split-dwarf %s 2>&1 | FileCheck %s --check-prefix=NOSPLIT +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=single -g -gno-split-dwarf %s 2>&1 | FileCheck %s --check-prefix=NOSPLIT +! RUN: %flang -### -c -target x86_64 -gno-split-dwarf -g -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefixes=SPLIT + +! NOSPLIT-NOT: "-split-dwarf + +! Test -gsplit-dwarf=single. +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=single -g %s 2>&1 | FileCheck %s --check-prefix=SINGLE + +! SINGLE: "-split-dwarf-file" "split-debug.o" +! SINGLE-NOT: "-split-dwarf-output" + +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=single -g -o %tfoo.o %s 2>&1 | FileCheck %s --check-prefix=SINGLE_WITH_FILENAME +! SINGLE_WITH_FILENAME: "-split-dwarf-file" "{{.*}}foo.o" +! SINGLE_WITH_FILENAME-NOT: "-split-dwarf-output" + + +! Invoke objcopy if not using the integrated assembler. +! RUN: %flang -### -c -target x86_64-unknown-linux-gnu -fno-integrated-as -gsplit-dwarf -g %s 2>&1 | FileCheck %s --check-prefix=OBJCOPY +! OBJCOPY: objcopy{{(.exe)?}} +! OBJCOPY-SAME: --extract-dwo +! OBJCOPY-NEXT: objcopy{{(.exe)?}} +! OBJCOPY-SAME: --strip-dwo + +! RUN: not %flang -target powerpc-ibm-aix -gdwarf-4 -gsplit-dwarf %s 2>&1 \ +! RUN: | FileCheck %s --check-prefix=UNSUP_OPT_AIX +! RUN: not %flang -target powerpc64-ibm-aix -gdwarf-4 -gsplit-dwarf %s 2>&1 \ +! RUN: | FileCheck %s --check-prefix=UNSUP_OPT_AIX64 + +! UNSUP_OPT_AIX: error: unsupported option '-gsplit-dwarf' for target 'powerpc-ibm-aix' +! UNSUP_OPT_AIX64: error: unsupported option '-gsplit-dwarf' for target 'powerpc64-ibm-aix' diff --git a/flang/test/Evaluate/Inputs/comporder1.mod b/flang/test/Evaluate/Inputs/comporder1.mod new file mode 100644 index 0000000000000..5c1a3c89d5e1e --- /dev/null +++ b/flang/test/Evaluate/Inputs/comporder1.mod @@ -0,0 +1,6 @@ +!mod$ v1 sum:64657f78d85da21d +module comporder1 +type::base +integer(4)::c1 +end type +end diff --git a/flang/test/Evaluate/Inputs/comporder2.mod b/flang/test/Evaluate/Inputs/comporder2.mod new file mode 100644 index 0000000000000..e228639669642 --- /dev/null +++ b/flang/test/Evaluate/Inputs/comporder2.mod @@ -0,0 +1,8 @@ +!mod$ v1 sum:3235f4a02cdad423 +!need$ 64657f78d85da21d n comporder1 +module comporder2 +use comporder1,only:base +type,extends(base)::intermediate +integer(4)::c2 +end type +end diff --git a/flang/test/Evaluate/comporder.f90 b/flang/test/Evaluate/comporder.f90 new file mode 100644 index 0000000000000..c57f68137e11b --- /dev/null +++ b/flang/test/Evaluate/comporder.f90 @@ -0,0 +1,41 @@ +!RUN: %flang_fc1 -fdebug-unparse -I%S/Inputs %s | FileCheck %s +program main + use comporder2 + type, extends(intermediate) :: last + integer c3 + end type + !CHECK:PRINT *, last(c1=1_4,c2=2_4,c3=3_4) + print *, last(1,2,3) + !CHECK:PRINT *, last(c1=11_4,c2=12_4,c3=13_4) + print *, last(c3=13,c2=12,c1=11) + !CHECK:PRINT *, last(c1=21_4,c2=22_4,c3=23_4) + print *, last(c3=23,c1=21,c2=22) + !CHECK:PRINT *, last(c1=31_4,c2=32_4,c3=33_4) + print *, last(c2=32,c3=33,c1=31) + !CHECK:PRINT *, last(c1=41_4,c2=42_4,c3=43_4) + print *, last(c2=42,c1=41,c3=43) + !CHECK:PRINT *, last(c1=51_4,c2=52_4,c3=53_4) + print *, last(c1=51,c3=53,c2=52) + !CHECK:PRINT *, last(c1=61_4,c2=62_4,c3=63_4) + print *, last(c1=61,c2=62,c3=63) + !CHECK:PRINT *, last(intermediate=intermediate(c1=71_4,c2=72_4),c3=73_4) + print *, last(c3=73,intermediate=intermediate(c2=72,c1=71)) + !CHECK:PRINT *, last(intermediate=intermediate(c1=81_4,c2=82_4),c3=83_4) + print *, last(c3=83,intermediate=intermediate(c1=81,c2=82)) + !CHECK:PRINT *, last(intermediate=intermediate(c1=91_4,c2=92_4),c3=93_4) + print *, last(intermediate(c2=92,c1=91),c3=93) + !CHECK:PRINT *, last(intermediate=intermediate(c1=101_4,c2=102_4),c3=103_4) + print *, last(intermediate(c1=101,c2=102),c3=103) + !CHECK:PRINT *, last(intermediate=intermediate(base=base(c1=111_4),c2=112_4),c3=113_4) + print *, last(c3=113,intermediate=intermediate(c2=112,base=base(c1=111))) + !CHECK:PRINT *, last(intermediate=intermediate(base=base(c1=121_4),c2=122_4),c3=123_4) + print *, last(c3=123,intermediate=intermediate(base(c1=121),c2=122)) + !CHECK:PRINT *, last(intermediate=intermediate(base=base(c1=131_4),c2=132_4),c3=133_4) + print *, last(intermediate(c2=132,base=base(c1=131)),c3=133) + !CHECK:PRINT *, last(intermediate=intermediate(base=base(c1=141_4),c2=142_4),c3=143_4) + print *, last(intermediate(base(c1=141),c2=142),c3=143) + !CHECK:PRINT *, last(base=base(c1=151_4),c2=152_4,c3=153_4) + print *, last(base(151),c3=153,c2=152) + !CHECK:PRINT *, last(base=base(c1=161_4),c2=162_4,c3=163_4) + print *, last(base(161),c2=162,c3=163) +end diff --git a/flang/test/Fir/CUDA/cuda-code-gen.mlir b/flang/test/Fir/CUDA/cuda-code-gen.mlir index 672be13beae24..bbd3f9fbd351b 100644 --- a/flang/test/Fir/CUDA/cuda-code-gen.mlir +++ b/flang/test/Fir/CUDA/cuda-code-gen.mlir @@ -221,3 +221,66 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : func.func private @__tgt_acc_get_deviceptr() -> !fir.ref> } + +// ----- + +module attributes {gpu.container_module, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>} { + fir.global @_QMm1Eda {data_attr = #cuf.cuda} : !fir.box>> { + %c0 = arith.constant 0 : index + %0 = fir.zero_bits !fir.heap> + %1 = fircg.ext_embox %0(%c0, %c0) {allocator_idx = 2 : i32} : (!fir.heap>, index, index) -> !fir.box>> + fir.has_value %1 : !fir.box>> + } + func.func @_QQmain() attributes {fir.bindc_name = "P", target_cpu = "x86-64", target_features = #llvm.target_features<["+cmov", "+mmx", "+sse", "+sse2", "+cx8", "+x87", "+fxsr"]>} { + %c64 = arith.constant 64 : index + %c1 = arith.constant 1 : index + %c0_i32 = arith.constant 0 : i32 + %0 = fir.address_of(@_QMm1Eda) : !fir.ref>>> + %8 = fir.load %0 : !fir.ref>>> + %9 = fircg.ext_rebox %8 : (!fir.box>>) -> !fir.box> + gpu.launch_func @cuda_device_mod::@_QMm1Psub2 blocks in (%c1, %c1, %c1) threads in (%c64, %c1, %c1) dynamic_shared_memory_size %c0_i32 args(%9 : !fir.box>) {cuf.proc_attr = #cuf.cuda_proc} + return + } + gpu.module @cuda_device_mod { + fir.global @_QMm1Eda {data_attr = #cuf.cuda} : !fir.box>> { + %c0 = arith.constant 0 : index + %0 = fir.zero_bits !fir.heap> + %1 = fircg.ext_embox %0(%c0, %c0) {allocator_idx = 2 : i32} : (!fir.heap>, index, index) -> !fir.box>> + fir.has_value %1 : !fir.box>> + } + gpu.func @_QMm1Psub2(%arg0: !fir.box>) kernel { + gpu.return + } + } +} + +// CHECK-LABEL: llvm.func @_QQmain() +// CHECK: llvm.call @_FortranACUFAllocDescriptor + +// ----- + +module attributes {gpu.container_module, dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry, dense<64> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry, dense<32> : vector<4xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<2xi64>>, #dlti.dl_entry : vector<4xi64>>, #dlti.dl_entry<"dlti.endianness", "little">, #dlti.dl_entry<"dlti.stack_alignment", 128 : i64>>} { + fir.global @_QMm1Eda {data_attr = #cuf.cuda} : !fir.box>> { + %c0 = arith.constant 0 : index + %0 = fir.zero_bits !fir.heap> + %1 = fircg.ext_embox %0(%c0, %c0) {allocator_idx = 2 : i32} : (!fir.heap>, index, index) -> !fir.box>> + fir.has_value %1 : !fir.box>> + } + gpu.module @cuda_device_mod { + fir.global @_QMm1Eda {data_attr = #cuf.cuda} : !fir.box>> { + %c0 = arith.constant 0 : index + %0 = fir.zero_bits !fir.heap> + %1 = fircg.ext_embox %0(%c0, %c0) {allocator_idx = 2 : i32} : (!fir.heap>, index, index) -> !fir.box>> + fir.has_value %1 : !fir.box>> + } + func.func @_QQxxx() { + %0 = fir.address_of(@_QMm1Eda) : !fir.ref>>> + %8 = fir.load %0 : !fir.ref>>> + return + } + } +} + +// CHECK-LABEL: llvm.func @_QQxxx() +// CHECK: llvm.alloca %{{.*}} x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr +// CHECK-NOT: llvm.call @_FortranACUFAllocDescriptor diff --git a/flang/test/Fir/OpenACC/openacc-type-categories.mlir b/flang/test/Fir/OpenACC/openacc-type-categories.mlir new file mode 100644 index 0000000000000..2275039dc3aff --- /dev/null +++ b/flang/test/Fir/OpenACC/openacc-type-categories.mlir @@ -0,0 +1,36 @@ +// Use --mlir-disable-threading so that the diagnostic printing is serialized. +// RUN: fir-opt %s -pass-pipeline='builtin.module(test-fir-openacc-interfaces)' -split-input-file --mlir-disable-threading 2>&1 | FileCheck %s + +module { + fir.global linkonce @test_string constant : !fir.char<1,26> { + %0 = fir.string_lit "hello_world_test_string\00"(26) : !fir.char<1,26> + fir.has_value %0 : !fir.char<1,26> + } + + // Test global constant string with pointer conversion + func.func @_QPtest_global_string_ptr() { + %0 = fir.address_of(@test_string) : !fir.ref> + %1 = fir.convert %0 : (!fir.ref>) -> !fir.ref + %2 = acc.copyin varPtr(%1 : !fir.ref) -> !fir.ref {name = "test_string", structured = false} + acc.enter_data dataOperands(%2 : !fir.ref) + return + } + + // CHECK: Visiting: %{{.*}} = acc.copyin varPtr(%{{.*}} : !fir.ref) -> !fir.ref {name = "test_string", structured = false} + // CHECK: Pointer-like and Mappable: !fir.ref + // CHECK: Type category: nonscalar + + // Test array with pointer conversion + func.func @_QPtest_alloca_array_ptr() { + %c10 = arith.constant 10 : index + %0 = fir.alloca !fir.array<10xf32> {bindc_name = "local_array", uniq_name = "_QFtest_alloca_array_ptrElocal_array"} + %1 = fir.convert %0 : (!fir.ref>) -> !fir.ref + %2 = acc.copyin varPtr(%1 : !fir.ref) -> !fir.ref {name = "local_array", structured = false} + acc.enter_data dataOperands(%2 : !fir.ref) + return + } + + // CHECK: Visiting: %{{.*}} = acc.copyin varPtr(%{{.*}} : !fir.ref) -> !fir.ref {name = "local_array", structured = false} + // CHECK: Pointer-like and Mappable: !fir.ref + // CHECK: Type category: array +} diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir index 24e5cad84b709..38d51110bbde3 100644 --- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir +++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir @@ -349,7 +349,7 @@ func.func @_QPopenmp_target_data_region() { %3 = fir.convert %c1024_i32 : (i32) -> index %c1 = arith.constant 1 : index %4 = fir.convert %2 : (index) -> i32 - %5:2 = fir.do_loop %arg0 = %2 to %3 step %c1 iter_args(%arg1 = %4) -> (index, i32) { + %5 = fir.do_loop %arg0 = %2 to %3 step %c1 iter_args(%arg1 = %4) -> (i32) { fir.store %arg1 to %1 : !fir.ref %6 = fir.load %1 : !fir.ref %7 = fir.load %1 : !fir.ref @@ -358,13 +358,12 @@ func.func @_QPopenmp_target_data_region() { %9 = arith.subi %8, %c1_i64 : i64 %10 = fir.coordinate_of %0, %9 : (!fir.ref>, i64) -> !fir.ref fir.store %6 to %10 : !fir.ref - %11 = arith.addi %arg0, %c1 overflow : index %12 = fir.convert %c1 : (index) -> i32 %13 = fir.load %1 : !fir.ref %14 = arith.addi %13, %12 overflow : i32 - fir.result %11, %14 : index, i32 + fir.result %14 : i32 } - fir.store %5#1 to %1 : !fir.ref + fir.store %5 to %1 : !fir.ref omp.terminator } return @@ -404,7 +403,6 @@ func.func @_QPopenmp_target_data_region() { // CHECK: %[[VAL_21:.*]] = llvm.sub %[[VAL_19]], %[[VAL_20]] : i64 // CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_1]][0, %[[VAL_21]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: llvm.store %[[VAL_17]], %[[VAL_22]] : i32, !llvm.ptr -// CHECK: %[[VAL_23:.*]] = llvm.add %[[VAL_12]], %[[VAL_8]] overflow : i64 // CHECK: %[[VAL_24:.*]] = llvm.trunc %[[VAL_8]] : i64 to i32 // CHECK: %[[VAL_25:.*]] = llvm.load %[[VAL_3]] : !llvm.ptr -> i32 // CHECK: %[[VAL_26:.*]] = llvm.add %[[VAL_25]], %[[VAL_24]] overflow : i32 @@ -653,18 +651,17 @@ func.func @_QPsb() { omp.sections { omp.section { %2 = fir.convert %c1 : (index) -> i32 - %3:2 = fir.do_loop %arg0 = %c1 to %c10 step %c1 iter_args(%arg1 = %2) -> (index, i32) { + %3 = fir.do_loop %arg0 = %c1 to %c10 step %c1 iter_args(%arg1 = %2) -> (i32) { fir.store %arg1 to %0 : !fir.ref %4 = fir.load %1 : !fir.ref %5 = arith.addi %4, %c1_i32 : i32 fir.store %5 to %1 : !fir.ref - %6 = arith.addi %arg0, %c1 : index %7 = fir.convert %c1 : (index) -> i32 %8 = fir.load %0 : !fir.ref %9 = arith.addi %8, %7 : i32 - fir.result %6, %9 : index, i32 + fir.result %9 : i32 } - fir.store %3#1 to %0 : !fir.ref + fir.store %3 to %0 : !fir.ref omp.terminator } omp.terminator diff --git a/flang/test/HLFIR/expression-simplification.fir b/flang/test/HLFIR/expression-simplification.fir new file mode 100644 index 0000000000000..15d1550f1f172 --- /dev/null +++ b/flang/test/HLFIR/expression-simplification.fir @@ -0,0 +1,101 @@ +// RUN: fir-opt %s --hlfir-expression-simplification | FileCheck %s + +// Test removal of trim() calls. + +// logical function test_char_cmp(x, y) result(cmp) +// character(*) :: x, y +// cmp = trim(x) == trim(y) +// end function + +func.func @_QPtest_char_cmp(%arg0: !fir.boxchar<1> {fir.bindc_name = "x"}, + %arg1: !fir.boxchar<1> {fir.bindc_name = "y"}) -> !fir.logical<4> { + %0 = fir.dummy_scope : !fir.dscope + %1 = fir.alloca !fir.logical<4> {bindc_name = "cmp", uniq_name = "_QFtest_char_cmpEcmp"} + %2:2 = hlfir.declare %1 {uniq_name = "_QFtest_char_cmpEcmp"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) + %3:2 = fir.unboxchar %arg0 : (!fir.boxchar<1>) -> (!fir.ref>, index) + %4:2 = hlfir.declare %3#0 typeparams %3#1 dummy_scope %0 {uniq_name = "_QFtest_char_cmpEx"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) + %5:2 = fir.unboxchar %arg1 : (!fir.boxchar<1>) -> (!fir.ref>, index) + %6:2 = hlfir.declare %5#0 typeparams %5#1 dummy_scope %0 {uniq_name = "_QFtest_char_cmpEy"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) + %7 = hlfir.char_trim %4#0 : (!fir.boxchar<1>) -> !hlfir.expr> + %8 = hlfir.char_trim %6#0 : (!fir.boxchar<1>) -> !hlfir.expr> + %9 = hlfir.cmpchar eq %7 %8 : (!hlfir.expr>, !hlfir.expr>) -> i1 + %10 = fir.convert %9 : (i1) -> !fir.logical<4> + hlfir.assign %10 to %2#0 : !fir.logical<4>, !fir.ref> + hlfir.destroy %8 : !hlfir.expr> + hlfir.destroy %7 : !hlfir.expr> + %11 = fir.load %2#0 : !fir.ref> + return %11 : !fir.logical<4> +} + +// CHECK-LABEL: func.func @_QPtest_char_cmp( +// CHECK-SAME: %[[ARG_0:.*]]: !fir.boxchar<1> {fir.bindc_name = "x"}, +// CHECK-SAME: %[[ARG_1:.*]]: !fir.boxchar<1> {fir.bindc_name = "y"}) -> !fir.logical<4> { +// CHECK: %[[VAL_0:.*]] = fir.dummy_scope : !fir.dscope +// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.logical<4> {bindc_name = "cmp", uniq_name = "_QFtest_char_cmpEcmp"} +// CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFtest_char_cmpEcmp"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) +// CHECK: %[[VAL_3:.*]]:2 = fir.unboxchar %[[ARG_0]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +// CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_3]]#0 typeparams %[[VAL_3]]#1 dummy_scope %[[VAL_0]] {uniq_name = "_QFtest_char_cmpEx"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) +// CHECK: %[[VAL_6:.*]]:2 = fir.unboxchar %[[ARG_1]] : (!fir.boxchar<1>) -> (!fir.ref>, index) +// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_6]]#0 typeparams %[[VAL_6]]#1 dummy_scope %[[VAL_0]] {uniq_name = "_QFtest_char_cmpEy"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) +// CHECK: %[[VAL_9:.*]] = hlfir.cmpchar eq %[[VAL_5]]#0 %[[VAL_8]]#0 : (!fir.boxchar<1>, !fir.boxchar<1>) -> i1 +// CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i1) -> !fir.logical<4> +// CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_2]]#0 : !fir.logical<4>, !fir.ref> +// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_2]]#0 : !fir.ref> +// CHECK: return %[[VAL_11]] : !fir.logical<4> +// CHECK: } + +// Check that trim() is not removed when its result is stored. + +// logical function test_char_cmp2(x, y) result(res) +// character(*) :: x, y +// character(:), allocatable :: tx +// +// tx = trim(x) +// res = tx == y +// end function + +func.func @_QPtest_char_cmp2(%arg0: !fir.boxchar<1> {fir.bindc_name = "x"}, %arg1: !fir.boxchar<1> {fir.bindc_name = "y"}) -> !fir.logical<4> { + %0 = fir.dummy_scope : !fir.dscope + %1 = fir.alloca !fir.logical<4> {bindc_name = "res", uniq_name = "_QFtest_char_cmp2Eres"} + %2:2 = hlfir.declare %1 {uniq_name = "_QFtest_char_cmp2Eres"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) + %3 = fir.alloca !fir.box>> {bindc_name = "tx", uniq_name = "_QFtest_char_cmp2Etx"} + %4 = fir.zero_bits !fir.heap> + %c0 = arith.constant 0 : index + %5 = fir.embox %4 typeparams %c0 : (!fir.heap>, index) -> !fir.box>> + fir.store %5 to %3 : !fir.ref>>> + %6:2 = hlfir.declare %3 {fortran_attrs = #fir.var_attrs, uniq_name = "_QFtest_char_cmp2Etx"} : (!fir.ref>>>) -> (!fir.ref>>>, !fir.ref>>>) + %7:2 = fir.unboxchar %arg0 : (!fir.boxchar<1>) -> (!fir.ref>, index) + %8:2 = hlfir.declare %7#0 typeparams %7#1 dummy_scope %0 {uniq_name = "_QFtest_char_cmp2Ex"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) + %9:2 = fir.unboxchar %arg1 : (!fir.boxchar<1>) -> (!fir.ref>, index) + %10:2 = hlfir.declare %9#0 typeparams %9#1 dummy_scope %0 {uniq_name = "_QFtest_char_cmp2Ey"} : (!fir.ref>, index, !fir.dscope) -> (!fir.boxchar<1>, !fir.ref>) + %11 = hlfir.char_trim %8#0 : (!fir.boxchar<1>) -> !hlfir.expr> + hlfir.assign %11 to %6#0 realloc : !hlfir.expr>, !fir.ref>>> + hlfir.destroy %11 : !hlfir.expr> + %12 = fir.load %6#0 : !fir.ref>>> + %13 = fir.box_addr %12 : (!fir.box>>) -> !fir.heap> + %14 = fir.load %6#0 : !fir.ref>>> + %15 = fir.box_elesize %14 : (!fir.box>>) -> index + %16 = fir.emboxchar %13, %15 : (!fir.heap>, index) -> !fir.boxchar<1> + %17 = hlfir.cmpchar eq %16 %10#0 : (!fir.boxchar<1>, !fir.boxchar<1>) -> i1 + %18 = fir.convert %17 : (i1) -> !fir.logical<4> + hlfir.assign %18 to %2#0 : !fir.logical<4>, !fir.ref> + %19 = fir.load %2#0 : !fir.ref> + %20 = fir.load %6#0 : !fir.ref>>> + %21 = fir.box_addr %20 : (!fir.box>>) -> !fir.heap> + %22 = fir.convert %21 : (!fir.heap>) -> i64 + %c0_i64 = arith.constant 0 : i64 + %23 = arith.cmpi ne, %22, %c0_i64 : i64 + fir.if %23 { + %24 = fir.load %6#0 : !fir.ref>>> + %25 = fir.box_addr %24 : (!fir.box>>) -> !fir.heap> + fir.freemem %25 : !fir.heap> + %26 = fir.zero_bits !fir.heap> + %c0_0 = arith.constant 0 : index + %27 = fir.embox %26 typeparams %c0_0 : (!fir.heap>, index) -> !fir.box>> + fir.store %27 to %6#0 : !fir.ref>>> + } + return %19 : !fir.logical<4> +} + +// CHECK-LABEL: func.func @_QPtest_char_cmp2( +// CHECK: hlfir.char_trim diff --git a/flang/test/Integration/debug-complex-1.f90 b/flang/test/Integration/debug-complex-1.f90 index 1ec4b7fe33990..1d70140a202d7 100644 --- a/flang/test/Integration/debug-complex-1.f90 +++ b/flang/test/Integration/debug-complex-1.f90 @@ -17,8 +17,8 @@ function fn1(a, b) result (c) end program ! CHECK-DAG: ![[C4:.*]] = !DIBasicType(name: "complex", size: 64, encoding: DW_ATE_complex_float) -! CHECK-DAG: ![[C8:.*]] = !DIBasicType(name: "complex", size: 128, encoding: DW_ATE_complex_float) -! CHECK-DAG: ![[C16:.*]] = !DIBasicType(name: "complex", size: 256, encoding: DW_ATE_complex_float) +! CHECK-DAG: ![[C8:.*]] = !DIBasicType(name: "complex(kind=8)", size: 128, encoding: DW_ATE_complex_float) +! CHECK-DAG: ![[C16:.*]] = !DIBasicType(name: "complex(kind=16)", size: 256, encoding: DW_ATE_complex_float) ! CHECK-DAG: !DILocalVariable(name: "c4"{{.*}}type: ![[C4]]) ! CHECK-DAG: !DILocalVariable(name: "c8"{{.*}}type: ![[C8]]) ! CHECK-DAG: !DILocalVariable(name: "r"{{.*}}type: ![[C16]]) diff --git a/flang/test/Integration/debug-dwarf-flags.f90 b/flang/test/Integration/debug-dwarf-flags.f90 index ac5b1c0d8d4b2..3307ef6f0a971 100644 --- a/flang/test/Integration/debug-dwarf-flags.f90 +++ b/flang/test/Integration/debug-dwarf-flags.f90 @@ -1,7 +1,13 @@ +! RUN: %if !target={{.*aix.*}} %{ \ ! RUN: %flang_fc1 -emit-llvm -debug-info-kind=standalone -dwarf-version=5 %s \ -! RUN: -o - | FileCheck --check-prefix=CHECK-DWARF5 %s +! RUN: -o - | FileCheck --check-prefix=CHECK-DWARF5 %s \ +! RUN: %} + +! RUN: %if !target={{.*aix.*}} %{ \ ! RUN: %flang_fc1 -emit-llvm -debug-info-kind=line-tables-only -dwarf-version=5 \ -! RUN: %s -o - | FileCheck --check-prefix=CHECK-DWARF5 %s +! RUN: %s -o - | FileCheck --check-prefix=CHECK-DWARF5 %s \ +! RUN: %} + ! RUN: %flang_fc1 -emit-llvm -debug-info-kind=standalone -dwarf-version=4 %s \ ! RUN: -o - | FileCheck --check-prefix=CHECK-DWARF4 %s ! RUN: %flang_fc1 -emit-llvm -debug-info-kind=standalone -dwarf-version=3 %s \ diff --git a/flang/test/Integration/debug-local-var-2.f90 b/flang/test/Integration/debug-local-var-2.f90 index 0ddac633a5b1e..e95263e6841ad 100644 --- a/flang/test/Integration/debug-local-var-2.f90 +++ b/flang/test/Integration/debug-local-var-2.f90 @@ -40,11 +40,11 @@ program mn ! BOTH-DAG: ![[MAIN:.*]] = distinct !DISubprogram(name: "MN", {{.*}}) ! BOTH-DAG: ![[TYI32:.*]] = !DIBasicType(name: "integer", size: 32, encoding: DW_ATE_signed) -! BOTH-DAG: ![[TYI64:.*]] = !DIBasicType(name: "integer", size: 64, encoding: DW_ATE_signed) -! BOTH-DAG: ![[TYL8:.*]] = !DIBasicType(name: "logical", size: 8, encoding: DW_ATE_boolean) +! BOTH-DAG: ![[TYI64:.*]] = !DIBasicType(name: "integer(kind=8)", size: 64, encoding: DW_ATE_signed) +! BOTH-DAG: ![[TYL8:.*]] = !DIBasicType(name: "logical(kind=1)", size: 8, encoding: DW_ATE_boolean) ! BOTH-DAG: ![[TYL32:.*]] = !DIBasicType(name: "logical", size: 32, encoding: DW_ATE_boolean) ! BOTH-DAG: ![[TYR32:.*]] = !DIBasicType(name: "real", size: 32, encoding: DW_ATE_float) -! BOTH-DAG: ![[TYR64:.*]] = !DIBasicType(name: "real", size: 64, encoding: DW_ATE_float) +! BOTH-DAG: ![[TYR64:.*]] = !DIBasicType(name: "real(kind=8)", size: 64, encoding: DW_ATE_float) ! BOTH-DAG: ![[I4]] = !DILocalVariable(name: "i4", scope: ![[MAIN]], file: !{{.*}}, line: [[@LINE+6]], type: ![[TYI32]]) ! BOTH-DAG: ![[I8]] = !DILocalVariable(name: "i8", scope: ![[MAIN]], file: !{{.*}}, line: [[@LINE+6]], type: ![[TYI64]]) diff --git a/flang/test/Integration/debug-split-dwarf.f90 b/flang/test/Integration/debug-split-dwarf.f90 new file mode 100644 index 0000000000000..ebfa040a42632 --- /dev/null +++ b/flang/test/Integration/debug-split-dwarf.f90 @@ -0,0 +1,29 @@ +! REQUIRES: x86-registered-target + +! Testing to ensure that setting only -split-dwarf-file allows to place +! .dwo sections into regular output object. +! RUN: %flang_fc1 -debug-info-kind=standalone -triple x86_64-unknown-linux \ +! RUN: -split-dwarf-file %t.o -emit-obj -o %t.o %s +! RUN: llvm-readobj -S %t.o | FileCheck --check-prefix=DWO %s + +! Testing to ensure that setting both -split-dwarf-file and -split-dwarf-output +! does not place .dwo sections into regular output object but in a separate +! file. +! RUN: %flang_fc1 -debug-info-kind=standalone -triple x86_64-unknown-linux \ +! RUN: -split-dwarf-file %t.dwo -split-dwarf-output %t.dwo -emit-obj -o %t.o %s +! RUN: llvm-readobj -S %t.dwo | FileCheck --check-prefix=DWO %s +! RUN: llvm-readobj -S %t.o | FileCheck --check-prefix=SPLIT %s + +! Test that splitDebugFilename field of the DICompileUnit get correctly +! generated. +! RUN: %flang_fc1 -debug-info-kind=standalone -triple x86_64-unknown-linux \ +! RUN: -split-dwarf-file %t.test_dwo -split-dwarf-output %t.test_dwo \ +! RUN: -emit-llvm %s -o - | FileCheck --check-prefix=CU %s + +! DWO: .dwo +! SPLIT-NOT: .dwo +! CU: !DICompileUnit +! CU-SAME: splitDebugFilename: "{{.*}}test_dwo" + +program test +end program test diff --git a/flang/test/Lower/CUDA/cuda-runtime-check.cuf b/flang/test/Lower/CUDA/cuda-runtime-check.cuf index 1aa95ec0ff405..02bb593db7305 100644 --- a/flang/test/Lower/CUDA/cuda-runtime-check.cuf +++ b/flang/test/Lower/CUDA/cuda-runtime-check.cuf @@ -16,6 +16,10 @@ contains call foo(a(1:10,1:10:2)) end subroutine +! CHECK-LABEL: func.func @_QMsection_testPtest_host() +! CHECK: fir.call @_FortranACUFDescriptorCheckSection +! CHECK: fir.call @_QMsection_testPfoo + attributes(device) subroutine zoo(a) real, device, dimension(:,:) :: a end subroutine @@ -25,12 +29,23 @@ contains allocate(a(10,10)) call zoo(a(1:10,1:10:2)) end subroutine -end module - -! CHECK-LABEL: func.func @_QMsection_testPtest_host() -! CHECK: fir.call @_FortranACUFDescriptorCheckSection -! CHECK: fir.call @_QMsection_testPfoo ! CHECK-LABEL: func.func @_QMsection_testPtest_device() ! CHECK-NOT: fir.call @_FortranACUFDescriptorCheckSection ! CHECK: fir.call @_QMsection_testPzoo + + subroutine ignore(a) + real, device, dimension(:,:) :: a + !dir$ ignore_tkr(c) a + end subroutine + + subroutine test_host2() + real, device, allocatable, dimension(:,:) :: a + allocate(a(10,10)) + call ignore(a(1:10,1:10:2)) + end subroutine + +! CHECK-LABEL: func.func @_QMsection_testPtest_host2() +! CHECK-NOT: fir.call @_FortranACUFDescriptorCheckSection +! CHECK: fir.call @_QMsection_testPignore +end module diff --git a/flang/test/Lower/Intrinsics/fast-real-mod.f90 b/flang/test/Lower/Intrinsics/fast-real-mod.f90 new file mode 100644 index 0000000000000..f80f7203ad1a2 --- /dev/null +++ b/flang/test/Lower/Intrinsics/fast-real-mod.f90 @@ -0,0 +1,83 @@ +! RUN: %flang_fc1 -ffast-math -emit-mlir -o - %s | FileCheck %s --check-prefixes=CHECK%if target=x86_64{{.*}} %{,CHECK-KIND10%}%if flang-supports-f128-math %{,CHECK-KIND16%} +! RUN: %flang_fc1 -ffast-math -fno-fast-real-mod -emit-mlir -o - %s | FileCheck %s --check-prefixes=CHECK-NFRM%if target=x86_64{{.*}} %{,CHECK-NFRM-KIND10%}%if flang-supports-f128-math %{,CHECK-NFRM-KIND16%} + +! TODO: check line that fir.fast_real_mod is not there +! CHECK-NFRM: module attributes {{{.*}}fir.no_fast_real_mod = true{{.*}}} + +! CHECK-LABEL: @_QPmod_real4 +subroutine mod_real4(r, a, p) + implicit none + real(kind=4) :: r, a, p +! CHECK: %[[A:.*]] = fir.declare{{.*}}a" +! CHECK: %[[P:.*]] = fir.declare{{.*}}p" +! CHECK: %[[R:.*]] = fir.declare{{.*}}r" +! CHECK: %[[A_LOAD:.*]] = fir.load %[[A]] +! CHECK: %[[P_LOAD:.*]] = fir.load %[[P]] +! CHECK: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath : f32 +! CHECK: %[[CV1:.*]] = fir.convert %[[DIV]] : (f32) -> si32 +! CHECK: %[[CV2:.*]] = fir.convert %[[CV1]] : (si32) -> f32 +! CHECK: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath : f32 +! CHECK: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath : f32 +! CHECK: fir.store %[[SUB]] to %[[R]] : !fir.ref +! CHECK-NFRM: fir.call @_FortranAModReal4(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f32, f32, !fir.ref, i32) -> f32 + r = mod(a, p) +end subroutine mod_real4 + +! CHECK-LABEL: @_QPmod_real8 +subroutine mod_real8(r, a, p) + implicit none + real(kind=8) :: r, a, p +! CHECK: %[[A:.*]] = fir.declare{{.*}}a" +! CHECK: %[[P:.*]] = fir.declare{{.*}}p" +! CHECK: %[[R:.*]] = fir.declare{{.*}}r" +! CHECK: %[[A_LOAD:.*]] = fir.load %[[A]] +! CHECK: %[[P_LOAD:.*]] = fir.load %[[P]] +! CHECK: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath : f64 +! CHECK: %[[CV1:.*]] = fir.convert %[[DIV]] : (f64) -> si64 +! CHECK: %[[CV2:.*]] = fir.convert %[[CV1]] : (si64) -> f64 +! CHECK: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath : f64 +! CHECK: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath : f64 +! CHECK: fir.store %[[SUB]] to %[[R]] : !fir.ref +! CHECK-NFRM: fir.call @_FortranAModReal8(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f64, f64, !fir.ref, i32) -> f64 + r = mod(a, p) +end subroutine mod_real8 + +! CHECK-LABEL: @_QPmod_real10 +subroutine mod_real10(r, a, p) + implicit none + integer, parameter :: kind10 = merge(10, 4, selected_real_kind(p=18).eq.10) + real(kind=kind10) :: r, a, p +! CHECK-KIND10: %[[A:.*]] = fir.declare{{.*}}a" +! CHECK-KIND10: %[[P:.*]] = fir.declare{{.*}}p" +! CHECK-KIND10: %[[R:.*]] = fir.declare{{.*}}r" +! CHECK-KIND10: %[[A_LOAD:.*]] = fir.load %[[A]] +! CHECK-KIND10: %[[P_LOAD:.*]] = fir.load %[[P]] +! CHECK-KIND10: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath : f80 +! CHECK-KIND10: %[[CV1:.*]] = fir.convert %[[DIV]] : (f80) -> si80 +! CHECK-KIND10: %[[CV2:.*]] = fir.convert %[[CV1]] : (si80) -> f80 +! CHECK-KIND10: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath : f80 +! CHECK-KIND10: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath : f80 +! CHECK-KIND10: fir.store %[[SUB]] to %[[R]] : !fir.ref +! CHECK-NFRM-KIND10: fir.call @_FortranAModReal10(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f80, f80, !fir.ref, i32) -> f80 + r = mod(a, p) +end subroutine mod_real10 + +! CHECK-LABEL: @_QPmod_real16 +subroutine mod_real16(r, a, p) + implicit none + integer, parameter :: kind16 = merge(16, 4, selected_real_kind(p=33).eq.16) + real(kind=kind16) :: r, a, p +! CHECK-KIND16: %[[A:.*]] = fir.declare{{.*}}a" +! CHECK-KIND16: %[[P:.*]] = fir.declare{{.*}}p" +! CHECK-KIND16: %[[R:.*]] = fir.declare{{.*}}r" +! CHECK-KIND16: %[[A_LOAD:.*]] = fir.load %[[A]] +! CHECK-KIND16: %[[P_LOAD:.*]] = fir.load %[[P]] +! CHECK-KIND16: %[[DIV:.*]] = arith.divf %[[A_LOAD]], %[[P_LOAD]] fastmath : f128 +! CHECK-KIND16: %[[CV1:.*]] = fir.convert %[[DIV]] : (f128) -> si128 +! CHECK-KIND16: %[[CV2:.*]] = fir.convert %[[CV1]] : (si128) -> f128 +! CHECK-KIND16: %[[MUL:.*]] = arith.mulf %[[CV2]], %[[P_LOAD]] fastmath : f128 +! CHECK-KIND16: %[[SUB:.*]] = arith.subf %[[A_LOAD]], %[[MUL]] fastmath : f128 +! CHECK-KIND16: fir.store %[[SUB]] to %[[R]] : !fir.ref +! CHECK-NFRM-KIND16: fir.call @_FortranAModReal16(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}}: (f128, f128, !fir.ref, i32) -> f128 + r = mod(a, p) +end subroutine mod_real16 diff --git a/flang/test/Lower/OpenACC/acc-bounds.f90 b/flang/test/Lower/OpenACC/acc-bounds.f90 index cff53a2bfd122..f6996df6d2454 100644 --- a/flang/test/Lower/OpenACC/acc-bounds.f90 +++ b/flang/test/Lower/OpenACC/acc-bounds.f90 @@ -1,6 +1,6 @@ ! This test checks lowering of OpenACC data bounds operation. -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s +! RUN: bbc -fopenacc -emit-hlfir --openacc-generate-default-bounds=true %s -o - | FileCheck %s module openacc_bounds @@ -23,18 +23,12 @@ subroutine acc_derived_type_component_pointer_array() end subroutine ! CHECK-LABEL: func.func @_QMopenacc_boundsPacc_derived_type_component_pointer_array() { -! CHECK: %[[D:.*]] = fir.alloca !fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box>>}> {bindc_name = "d", uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"} -! CHECK: %[[DECL_D:.*]]:2 = hlfir.declare %[[D]] {uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) -! CHECK: %[[COORD:.*]] = hlfir.designate %[[DECL_D]]#0{"array_comp"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>) -> !fir.ref>>> -! CHECK: %[[LOAD:.*]] = fir.load %[[COORD]] : !fir.ref>>> -! CHECK: %[[BOX_DIMS0:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[UB:.*]] = arith.subi %[[BOX_DIMS1]]#1, %[[C1]] : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box>>) -> !fir.ptr> -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ptr>) bounds(%[[BOUND]]) -> !fir.ptr> {name = "d%array_comp", structured = false} -! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ptr>) +! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box>>}> {bindc_name = "d", uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"} +! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"} : (!fir.ref>>}>>) -> (!fir.ref>>}>>, !fir.ref>>}>>) +! CHECK: %[[VAL_4:.*]] = hlfir.designate %[[VAL_2]]#0{"array_comp"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>) -> !fir.ref>>> +! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_4]] : !fir.ref>>> +! CHECK: %[[VAL_6:.*]] = acc.create var(%[[VAL_5]] : !fir.box>>) -> !fir.box>> {name = "d%[[VAL_7:.*]]", structured = false} +! CHECK: acc.enter_data dataOperands(%[[VAL_6]] : !fir.box>>) ! CHECK: return ! CHECK: } @@ -73,9 +67,8 @@ subroutine acc_derived_type_component_allocatable_array() ! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) ! CHECK: %[[UB:.*]] = arith.subi %[[BOX_DIMS1]]#1, %[[C1]] : index ! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box>>) -> !fir.heap> -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "d%array_comp", structured = false} -! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) +! CHECK: %[[CREATE:.*]] = acc.create var(%[[LOAD]] : !fir.box>>) bounds(%[[BOUND]]) -> !fir.box>> {name = "d%[[VAL_15:.*]]", structured = false} +! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.box>>) ! CHECK: return ! CHECK: } @@ -92,9 +85,8 @@ subroutine acc_undefined_extent(a) ! CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) ! CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %c1{{.*}} : index ! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%c1{{.*}} : index) {strideInBytes = true} -! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_ARG0]]#0 : (!fir.box>) -> !fir.ref> -! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.ref>) +! CHECK: %[[PRESENT:.*]] = acc.present var(%[[DECL_ARG0]]#0 : !fir.box>) bounds(%[[BOUND]]) -> !fir.box> {name = "a"} +! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.box>) { subroutine acc_multi_strides(a) real, dimension(:,:,:) :: a @@ -114,9 +106,8 @@ subroutine acc_multi_strides(a) ! CHECK: %[[STRIDE2:.*]] = arith.muli %[[STRIDE1]], %[[BOX_DIMS1]]#1 : index ! CHECK: %[[BOX_DIMS2:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#0, %c2{{.*}} : (!fir.box>, index) -> (index, index, index) ! CHECK: %[[BOUNDS2:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[BOX_DIMS2]]#1 : index) stride(%[[STRIDE2]] : index) startIdx(%{{.*}} : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_ARG0]]#0 : (!fir.box>) -> !fir.ref> -! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUNDS0]], %[[BOUNDS1]], %[[BOUNDS2]]) -> !fir.ref> {name = "a"} -! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.ref>) { +! CHECK: %[[PRESENT:.*]] = acc.present var(%[[DECL_ARG0]]#0 : !fir.box>) bounds(%[[BOUNDS0]], %[[BOUNDS1]], %[[BOUNDS2]]) -> !fir.box> {name = "a"} +! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.box>) { subroutine acc_optional_data(a) real, pointer, optional :: a(:) @@ -137,16 +128,8 @@ subroutine acc_optional_data(a) ! CHECK: fir.result %[[C0]], %[[CM1]], %[[C0]], %[[C0]], %[[C0]] : index, index, index, index, index ! CHECK: } ! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[RES]]#0 : index) upperbound(%[[RES]]#1 : index) extent(%[[RES]]#2 : index) stride(%[[RES]]#3 : index) startIdx(%[[RES]]#4 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.if %[[IS_PRESENT]] -> (!fir.ptr>) { -! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref>>> -! CHECK: %[[ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box>>) -> !fir.ptr> -! CHECK: fir.result %[[ADDR]] : !fir.ptr> -! CHECK: } else { -! CHECK: %[[ABSENT:.*]] = fir.absent !fir.ptr> -! CHECK: fir.result %[[ABSENT]] : !fir.ptr> -! CHECK: } -! CHECK: %[[ATTACH:.*]] = acc.attach varPtr(%[[BOX_ADDR]] : !fir.ptr>) bounds(%[[BOUND]]) -> !fir.ptr> {name = "a"} -! CHECK: acc.data dataOperands(%[[ATTACH]] : !fir.ptr>) +! CHECK: %[[ATTACH:.*]] = acc.attach varPtr(%[[ARG0_DECL]]#0 : !fir.ref>>>) bounds(%[[BOUND]]) -> !fir.ref>>> {name = "a"} +! CHECK: acc.data dataOperands(%[[ATTACH]] : !fir.ref>>>) { subroutine acc_optional_data2(a, n) integer :: n diff --git a/flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90 deleted file mode 100644 index 7f89fe2dd523e..0000000000000 --- a/flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,152 +0,0 @@ -! This test checks lowering of complex OpenACC data operands and checks -! that default bounds are generated. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -module acc_data_operand - - type wrapper - real :: data(100) - end type - -contains - -! Testing derived-type component without section -subroutine acc_operand_derived_type_component() - type(wrapper) :: w - - !$acc data copy(w%data) - !$acc end data -end subroutine - -! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_derived_type_component() { -! CHECK: %[[W:.*]] = fir.alloca !fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}> {bindc_name = "w", uniq_name = "_QMacc_data_operandFacc_operand_derived_type_componentEw"} -! CHECK: %[[DECLW:.*]]:2 = hlfir.declare %[[W]] -! CHECK: %[[EXT:.*]] = arith.constant 100 : index -! CHECK: %[[COORD_DATA:.*]] = hlfir.designate %[[DECLW]]#0{"data"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -! CHECK: %[[ONE:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.subi %[[EXT]], %[[ONE]] : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -! CHECK: %[[COPY_COPYIN:.*]] = acc.copyin varPtr(%[[COORD_DATA]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {dataClause = #acc, name = "w%data"} -! CHECK: acc.data dataOperands(%[[COPY_COPYIN]] : !fir.ref>) { -! CHECK: acc.terminator -! CHECK: } -! CHECK: acc.copyout accPtr(%[[COPY_COPYIN]] : !fir.ref>) bounds(%[[BOUND]]) to varPtr(%[[COORD_DATA]] : !fir.ref>) {dataClause = #acc, name = "w%data"} - - -! Testing array of derived-type component without section -subroutine acc_operand_array_derived_type_component() - type(wrapper) :: w(10) - - !$acc data copy(w(1)%data) - !$acc end data -end subroutine - -! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_derived_type_component() { -! CHECK: %[[W:.*]] = fir.alloca !fir.array<10x!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>> {bindc_name = "w", uniq_name = "_QMacc_data_operandFacc_operand_array_derived_type_componentEw"} -! CHECK: %[[DECLW:.*]]:2 = hlfir.declare %[[W]] -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[W_1:.*]] = hlfir.designate %[[DECLW]]#0 (%[[C1]]) : (!fir.ref}>>>, index) -> !fir.ref}>> -! CHECK: %[[EXT:.*]] = arith.constant 100 : index -! CHECK: %[[COORD_W1_DATA:.*]] = hlfir.designate %[[W_1]]{"data"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -! CHECK: %[[ONE:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.subi %[[EXT]], %[[ONE]] : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -! CHECK: %[[COPY_COPYIN:.*]] = acc.copyin varPtr(%[[COORD_W1_DATA]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {dataClause = #acc, name = "w(1_8)%data"} -! CHECK: acc.data dataOperands(%[[COPY_COPYIN]] : !fir.ref>) { -! CHECK: acc.terminator -! CHECK: } -! CHECK: acc.copyout accPtr(%[[COPY_COPYIN]] : !fir.ref>) bounds(%[[BOUND]]) to varPtr(%[[COORD_W1_DATA]] : !fir.ref>) {dataClause = #acc, name = "w(1_8)%data"} - -! Testing array sections on allocatable array -subroutine acc_operand_array_section_allocatable() - real, allocatable :: a(:) - - allocate(a(100)) - - !$acc data copyin(a(1:50)) copyout(a(51:100)) - !$acc end data - - deallocate(a) -end subroutine - -! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_section_allocatable() { -! CHECK: %[[A:.*]] = fir.alloca !fir.box>> {bindc_name = "a", uniq_name = "_QMacc_data_operandFacc_operand_array_section_allocatableEa"} -! CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] {fortran_attrs = #fir.var_attrs -! CHECK: %[[LOAD_BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -! CHECK: %[[LOAD_BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_0:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_1]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.subi %[[C1]], %[[DIMS0_0]]#0 : index -! CHECK: %[[C50:.*]] = arith.constant 50 : index -! CHECK: %[[UB:.*]] = arith.subi %[[C50]], %[[DIMS0_0]]#0 : index -! CHECK: %[[LOAD_BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[BOUND_1_50:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_A_0]] : (!fir.box>>) -> !fir.heap> -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND_1_50]]) -> !fir.heap> {name = "a(1:50)"} -! CHECK: %[[LOAD_BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -! CHECK: %[[LOAD_BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_0:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_1]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C51:.*]] = arith.constant 51 : index -! CHECK: %[[LB:.*]] = arith.subi %[[C51]], %[[DIMS0_0]]#0 : index -! CHECK: %[[C100:.*]] = arith.constant 100 : index -! CHECK: %[[UB:.*]] = arith.subi %[[C100]], %[[DIMS0_0]]#0 : index -! CHECK: %[[LOAD_BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[BOUND_51_100:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_A_0]] : (!fir.box>>) -> !fir.heap> -! CHECK: %[[COPYOUT_CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND_51_100]]) -> !fir.heap> {dataClause = #acc, name = "a(51:100)"} -! CHECK: acc.data dataOperands(%[[COPYIN]], %[[COPYOUT_CREATE]] : !fir.heap>, !fir.heap>) { -! CHECK: acc.terminator -! CHECK: } -! CHECK: acc.delete accPtr(%[[COPYIN]] : !fir.heap>) bounds(%[[BOUND_1_50]]) {dataClause = #acc, name = "a(1:50)"} -! CHECK: acc.copyout accPtr(%[[COPYOUT_CREATE]] : !fir.heap>) bounds(%[[BOUND_51_100]]) to varPtr(%[[BOX_ADDR]] : !fir.heap>) {name = "a(51:100)"} - - -! Testing array sections on pointer array -subroutine acc_operand_array_section_pointer() - real, target :: a(100) - real, pointer :: p(:) - - p => a - - !$acc data copyin(p(1:50)) - !$acc end data -end subroutine - -! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_section_pointer() { -! CHECK: %[[P:.*]] = fir.alloca !fir.box>> {bindc_name = "p", uniq_name = "_QMacc_data_operandFacc_operand_array_section_pointerEp"} -! CHECK: %[[DECLP:.*]]:2 = hlfir.declare %[[P]] {fortran_attrs = #fir.var_attrs -! CHECK: %[[LOAD_BOX_P_0:.*]] = fir.load %[[DECLP]]#0 : !fir.ref>>> -! CHECK: %[[LOAD_BOX_P_1:.*]] = fir.load %[[DECLP]]#0 : !fir.ref>>> -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_0:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_1]], %[[C0:.*]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.subi %[[C1]], %[[DIMS0_0]]#0 : index -! CHECK: %[[C50:.*]] = arith.constant 50 : index -! CHECK: %[[UB:.*]] = arith.subi %[[C50]], %[[DIMS0_0]]#0 : index -! CHECK: %[[LOAD_BOX_P_2:.*]] = fir.load %[[DECLP]]#0 : !fir.ref>>> -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_P_0]] : (!fir.box>>) -> !fir.ptr> -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.ptr>) bounds(%[[BOUND]]) -> !fir.ptr> {name = "p(1:50)"} -! CHECK: acc.data dataOperands(%[[COPYIN]] : !fir.ptr>) { -! CHECK: acc.terminator -! CHECK: } -! CHECK: acc.delete accPtr(%[[COPYIN]] : !fir.ptr>) bounds(%[[BOUND]]) {dataClause = #acc, name = "p(1:50)"} - -end module diff --git a/flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90 deleted file mode 100644 index 789db34adefee..0000000000000 --- a/flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,205 +0,0 @@ -! This test checks lowering of OpenACC data directive. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -subroutine acc_data - real, dimension(10, 10) :: a, b, c - real, pointer :: d, e - logical :: ifCondition = .TRUE. - -! CHECK: %[[A:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"} -! CHECK:%[[DECLA:.*]]:2 = hlfir.declare %[[A]] -! CHECK: %[[B:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"} -! CHECK:%[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[C:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"} -! CHECK:%[[DECLC:.*]]:2 = hlfir.declare %[[C]] -! CHECK: %[[D:.*]] = fir.alloca !fir.box> {bindc_name = "d", uniq_name = "{{.*}}Ed"} -! CHECK:%[[DECLD:.*]]:2 = hlfir.declare %[[D]] -! CHECK: %[[E:.*]] = fir.alloca !fir.box> {bindc_name = "e", uniq_name = "{{.*}}Ee"} -! CHECK:%[[DECLE:.*]]:2 = hlfir.declare %[[E]] - - !$acc data if(.TRUE.) copy(a) - !$acc end data - -! CHECK: %[[IF1:.*]] = arith.constant true -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: acc.data if(%[[IF1]]) dataOperands(%[[COPYIN]] : !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK:acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} - - !$acc data copy(a) if(ifCondition) - !$acc end data - -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[IFCOND:.*]] = fir.load %{{.*}} : !fir.ref> -! CHECK: %[[IF2:.*]] = fir.convert %[[IFCOND]] : (!fir.logical<4>) -> i1 -! CHECK: acc.data if(%[[IF2]]) dataOperands(%[[COPYIN]] : !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK:acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} - - !$acc data copy(a, b, c) - !$acc end data - -! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK: %[[COPYIN_C:.*]] = acc.copyin varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c"} -! CHECK: acc.data dataOperands(%[[COPYIN_A]], %[[COPYIN_B]], %[[COPYIN_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK:acc.copyout accPtr(%[[COPYIN_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} -! CHECK:acc.copyout accPtr(%[[COPYIN_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref>) {dataClause = #acc, name = "b"} -! CHECK:acc.copyout accPtr(%[[COPYIN_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLC]]#0 : !fir.ref>) {dataClause = #acc, name = "c"} - - !$acc data copy(a) copy(b) copy(c) - !$acc end data - -! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK: %[[COPYIN_C:.*]] = acc.copyin varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c"} -! CHECK: acc.data dataOperands(%[[COPYIN_A]], %[[COPYIN_B]], %[[COPYIN_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK:acc.copyout accPtr(%[[COPYIN_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} -! CHECK:acc.copyout accPtr(%[[COPYIN_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref>) {dataClause = #acc, name = "b"} -! CHECK:acc.copyout accPtr(%[[COPYIN_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLC]]#0 : !fir.ref>) {dataClause = #acc, name = "c"} - - !$acc data copyin(a) copyin(readonly: b, c) - !$acc end data - -! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK: %[[COPYIN_C:.*]] = acc.copyin varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c"} -! CHECK: acc.data dataOperands(%[[COPYIN_A]], %[[COPYIN_B]], %[[COPYIN_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK: acc.delete accPtr(%[[COPYIN_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "a"} -! CHECK: acc.delete accPtr(%[[COPYIN_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "b"} -! CHECK: acc.delete accPtr(%[[COPYIN_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "c"} - - !$acc data copyout(a) copyout(zero: b) copyout(c) - !$acc end data - -! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c"} -! CHECK: acc.data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK:acc.copyout accPtr(%[[CREATE_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref>) {name = "a"} -! CHECK:acc.copyout accPtr(%[[CREATE_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref>) {dataClause = #acc, name = "b"} -! CHECK:acc.copyout accPtr(%[[CREATE_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLC]]#0 : !fir.ref>) {name = "c"} - - !$acc data create(a, b) create(zero: c) - !$acc end data - -! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c"} -! CHECK: acc.data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK: acc.delete accPtr(%[[CREATE_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "a"} -! CHECK: acc.delete accPtr(%[[CREATE_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "b"} -! CHECK: acc.delete accPtr(%[[CREATE_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "c"} - - !$acc data create(c) copy(b) create(a) - !$acc end data -! CHECK:%[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "c"} -! CHECK:%[[COPY_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK:%[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.data dataOperands(%[[CREATE_C]], %[[COPY_B]], %[[CREATE_A]] : !fir.ref>, !fir.ref>, !fir.ref>) { - - !$acc data no_create(a, b) create(zero: c) - !$acc end data - -! CHECK: %[[NO_CREATE_A:.*]] = acc.nocreate varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: %[[NO_CREATE_B:.*]] = acc.nocreate varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c"} -! CHECK: acc.data dataOperands(%[[NO_CREATE_A]], %[[NO_CREATE_B]], %[[CREATE_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK: acc.delete accPtr(%[[CREATE_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc, name = "c"} - - !$acc data present(a, b, c) - !$acc end data - -! CHECK: %[[PRESENT_A:.*]] = acc.present varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: %[[PRESENT_B:.*]] = acc.present varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: %[[PRESENT_C:.*]] = acc.present varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "c"} -! CHECK: acc.data dataOperands(%[[PRESENT_A]], %[[PRESENT_B]], %[[PRESENT_C]] : !fir.ref>, !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} - - !$acc data deviceptr(b, c) - !$acc end data - -! CHECK: %[[DEVICEPTR_B:.*]] = acc.deviceptr varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: %[[DEVICEPTR_C:.*]] = acc.deviceptr varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "c"} -! CHECK: acc.data dataOperands(%[[DEVICEPTR_B]], %[[DEVICEPTR_C]] : !fir.ref>, !fir.ref>) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} - - !$acc data attach(d, e) - !$acc end data - -! CHECK: %[[ATTACH_D:.*]] = acc.attach varPtr(%{{.*}} : !fir.ptr) -> !fir.ptr {name = "d"} -! CHECK: %[[ATTACH_E:.*]] = acc.attach varPtr(%{{.*}} : !fir.ptr) -> !fir.ptr {name = "e"} -! CHECK: acc.data dataOperands(%[[ATTACH_D]], %[[ATTACH_E]] : !fir.ptr, !fir.ptr) { -! CHECK: acc.terminator -! CHECK-NEXT: }{{$}} -! CHECK: acc.detach accPtr(%[[ATTACH_D]] : !fir.ptr) {dataClause = #acc, name = "d"} -! CHECK: acc.detach accPtr(%[[ATTACH_E]] : !fir.ptr) {dataClause = #acc, name = "e"} - - !$acc data present(a) async - !$acc end data - -! CHECK: acc.data async dataOperands(%{{.*}}) { -! CHECK: } - - !$acc data copy(a) async(1) - !$acc end data - -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC:.*]] : i32) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: acc.data async(%[[ASYNC]] : i32) dataOperands(%[[COPYIN]] : !fir.ref>) { -! CHECK: }{{$}} -! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC]] : i32) to varPtr(%{{.*}} : !fir.ref>) {dataClause = #acc, name = "a"} - - !$acc data present(a) wait - !$acc end data - -! CHECK: acc.data dataOperands(%{{.*}}) wait { -! CHECK: } - - !$acc data present(a) wait(1) - !$acc end data - -! CHECK: acc.data dataOperands(%{{.*}}) wait({%{{.*}} : i32}) { -! CHECK: }{{$}} - - !$acc data present(a) wait(devnum: 0: 1) - !$acc end data - -! CHECK: acc.data dataOperands(%{{.*}}) wait({devnum: %{{.*}} : i32, %{{.*}} : i32}) { -! CHECK: }{{$}} - - !$acc data default(none) - !$acc end data - -! CHECK: acc.data { -! CHECK: acc.terminator -! CHECK: } attributes {defaultAttr = #acc} - - !$acc data default(present) - !$acc end data - -! CHECK: acc.data { -! CHECK: acc.terminator -! CHECK: } attributes {defaultAttr = #acc} - - !$acc data - !$acc end data -! CHECK-NOT: acc.data - -end subroutine acc_data diff --git a/flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90 deleted file mode 100644 index 4b181f8a26987..0000000000000 --- a/flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,478 +0,0 @@ -! This test checks lowering of OpenACC declare directive in function and -! subroutine specification parts. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -module acc_declare - contains - - subroutine acc_declare_copy() - integer :: a(100), i - !$acc declare copy(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_copy() -! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index -! CHECK-DAG: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_copyEa"} -! CHECK-DAG: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { -! CHECK: } -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[COPYIN]] : !fir.ref>) -! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) bounds(%[[BOUND]]) to varPtr(%[[DECL]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} -! CHECK: return - - subroutine acc_declare_create() - integer :: a(100), i - !$acc declare create(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_create() { -! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index -! CHECK-DAG: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_createEa"} -! CHECK-DAG: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_createEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { -! CHECK: } -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[CREATE]] : !fir.ref>) bounds(%[[BOUND]]) {dataClause = #acc, name = "a"} -! CHECK: return - - subroutine acc_declare_present(a) - integer :: a(100), i - !$acc declare present(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_present( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}) -! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index -! CHECK-DAG: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_presentEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%[[C1]] : index) -! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[PRESENT]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[PRESENT]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[PRESENT]] : !fir.ref>) bounds(%[[BOUND]]) {dataClause = #acc, name = "a"} - - subroutine acc_declare_copyin() - integer :: a(100), b(10), i - !$acc declare copyin(a) copyin(readonly: b) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_copyin() -! CHECK: %[[A:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_copyinEa"} -! CHECK: %[[ADECL:.*]]:2 = hlfir.declare %[[A]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyinEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[B:.*]] = fir.alloca !fir.array<10xi32> {bindc_name = "b", uniq_name = "_QMacc_declareFacc_declare_copyinEb"} -! CHECK: %[[BDECL:.*]]:2 = hlfir.declare %[[B]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyinEb"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[BOUND_A:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index) -! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[ADECL]]#0 : !fir.ref>) bounds(%[[BOUND_A]]) -> !fir.ref> {name = "a"} -! CHECK: %[[BOUND_B:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index) -! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[BDECL]]#0 : !fir.ref>) bounds(%[[BOUND_B]]) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK: acc.declare_enter dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref>, !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) -! CHECK: acc.delete accPtr(%[[COPYIN_A]] : !fir.ref>) bounds(%[[BOUND_A]]) {dataClause = #acc, name = "a"} -! CHECK: acc.delete accPtr(%[[COPYIN_B]] : !fir.ref>) bounds(%[[BOUND_B]]) {dataClause = #acc, name = "b"} - - subroutine acc_declare_copyout() - integer :: a(100), i - !$acc declare copyout(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_copyout() -! CHECK: %[[A:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_copyoutEa"} -! CHECK: %[[ADECL:.*]]:2 = hlfir.declare %[[A]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyoutEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref>) bounds(%{{.*}}) to varPtr(%[[ADECL]]#0 : !fir.ref>) {name = "a"} -! CHECK: return - - subroutine acc_declare_deviceptr(a) - integer :: a(100), i - !$acc declare deviceptr(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_deviceptr( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}) { -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_deviceptrEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DEVICEPTR:.*]] = acc.deviceptr varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.declare_enter dataOperands(%[[DEVICEPTR]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) - - subroutine acc_declare_link(a) - integer :: a(100), i - !$acc declare link(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_link( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}) -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_linkEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[LINK:.*]] = acc.declare_link varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.declare_enter dataOperands(%[[LINK]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) - - subroutine acc_declare_device_resident(a) - integer :: a(100), i - !$acc declare device_resident(a) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_device_resident( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}) -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_device_residentEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DEVICERES:.*]] = acc.declare_device_resident varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICERES]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICERES]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[DEVICERES]] : !fir.ref>) bounds(%{{.*}}) {dataClause = #acc, name = "a"} - - subroutine acc_declare_device_resident2() - integer, parameter :: n = 100 - real, dimension(n) :: dataparam - !$acc declare device_resident(dataparam) - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_device_resident2() -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = "dataparam", uniq_name = "_QMacc_declareFacc_declare_device_resident2Edataparam"} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_device_resident2Edataparam"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DEVICERES:.*]] = acc.declare_device_resident varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "dataparam"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICERES]] : !fir.ref>) -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICERES]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[DEVICERES]] : !fir.ref>) bounds(%{{.*}}) {dataClause = #acc, name = "dataparam"} - - subroutine acc_declare_link2() - integer, parameter :: n = 100 - real, dimension(n) :: dataparam - !$acc declare link(dataparam) - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_link2() -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = "dataparam", uniq_name = "_QMacc_declareFacc_declare_link2Edataparam"} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_link2Edataparam"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[LINK:.*]] = acc.declare_link varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "dataparam"} -! CHECK: acc.declare_enter dataOperands(%[[LINK]] : !fir.ref>) - - subroutine acc_declare_deviceptr2() - integer, parameter :: n = 100 - real, dimension(n) :: dataparam - !$acc declare deviceptr(dataparam) - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_deviceptr2() -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = "dataparam", uniq_name = "_QMacc_declareFacc_declare_deviceptr2Edataparam"} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_deviceptr2Edataparam"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DEVICEPTR:.*]] = acc.deviceptr varPtr(%[[DECL]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "dataparam"} -! CHECK: acc.declare_enter dataOperands(%[[DEVICEPTR]] : !fir.ref>) - - function acc_declare_in_func() - real :: a(1024) - !$acc declare device_resident(a) - end function - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_in_func() -> f32 { -! CHECK: %[[DEVICE_RESIDENT:.*]] = acc.declare_device_resident varPtr(%{{.*}}#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICE_RESIDENT]] : !fir.ref>) -! CHECK: %[[LOAD:.*]] = fir.load %{{.*}}#0 : !fir.ref -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICE_RESIDENT]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[DEVICE_RESIDENT]] : !fir.ref>) bounds(%{{.*}}) {dataClause = #acc, name = "a"} -! CHECK: return %[[LOAD]] : f32 -! CHECK: } - - function acc_declare_in_func2(i) - real :: a(1024) - integer :: i - !$acc declare create(a) - return - end function - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_in_func2(%arg0: !fir.ref {fir.bindc_name = "i"}) -> f32 { -! CHECK: %[[ALLOCA_A:.*]] = fir.alloca !fir.array<1024xf32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_in_func2Ea"} -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ALLOCA_A]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_in_func2Ea"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL_A]]#0 : !fir.ref>) bounds(%{{[0-9]+}}) -> !fir.ref> {name = "a"} -! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: cf.br ^bb1 -! CHECK: ^bb1: -! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[CREATE]] : !fir.ref>) bounds(%{{[0-9]+}}) {dataClause = #acc, name = "a"} -! CHECK: return %{{.*}} : f32 -! CHECK: } - - subroutine acc_declare_allocate() - integer, allocatable :: a(:) - !$acc declare create(a) - - allocate(a(100)) - -! CHECK: %{{.*}} = fir.allocmem !fir.array, %{{.*}} {fir.must_be_heap = true, uniq_name = "_QMacc_declareFacc_declare_allocateEa.alloc"} -! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action} : !fir.ref>>> - - deallocate(a) - -! CHECK: %{{.*}} = fir.box_addr %{{.*}} {acc.declare_action = #acc.declare_action} : (!fir.box>>) -> !fir.heap> - -! CHECK: fir.freemem %{{.*}} : !fir.heap> -! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action} : !fir.ref>>> - -! CHECK: fir.if -! CHECK: fir.freemem %{{.*}} : !fir.heap> -! CHECK: fir.store %{{.*}} to %{{.*}}#0 {acc.declare_action = #acc.declare_action} : !fir.ref>>> -! CHECK: } - - end subroutine - -! CHECK-LABEL: func.func private @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_alloc( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>>) { -! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[ARG0]] : !fir.ref>>>) -> !fir.ref>>> {implicit = true, name = "a_desc", structured = false} -! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.ref>>>) -! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0]] : !fir.ref>>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare} : (!fir.box>>) -> !fir.heap> -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) -> !fir.heap> {name = "a", structured = false} -! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.heap>) -! CHECK: return -! CHECK: } - -! CHECK-LABEL: func.func private @_QMacc_declareFacc_declare_allocateEa_acc_declare_pre_dealloc( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>>) { -! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0]] : !fir.ref>>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare} : (!fir.box>>) -> !fir.heap> -! CHECK: %[[GETDEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[BOX_ADDR]] : !fir.heap>) -> !fir.heap> {dataClause = #acc, name = "a", structured = false} -! CHECK: acc.declare_exit dataOperands(%[[GETDEVICEPTR]] : !fir.heap>) -! CHECK: acc.delete accPtr(%[[GETDEVICEPTR]] : !fir.heap>) {dataClause = #acc, name = "a", structured = false} -! CHECK: return -! CHECK: } - -! CHECK-LABEL: func.func private @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_dealloc( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>>) { -! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0]] : !fir.ref>>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box>>) -> !fir.heap> -! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[BOX_ADDR]] : !fir.heap>) -> !fir.heap> {implicit = true, name = "a_desc", structured = false} -! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.heap>) -! CHECK: return -! CHECK: } - - subroutine acc_declare_multiple_directive(a, b) - integer :: a(100), b(100), i - !$acc declare copy(a) - !$acc declare copyout(b) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_multiple_directive( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) { -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_multiple_directiveEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DECL_B:.*]]:2 = hlfir.declare %[[ARG1]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_multiple_directiveEb"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL_A]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a"} -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL_B]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b"} -! CHECK: acc.declare_enter dataOperands(%[[COPYIN]], %[[CREATE]] : !fir.ref>, !fir.ref>) -! CHECK: %{{.*}}:{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { - - -! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref>) bounds(%{{.*}}) to varPtr(%[[DECL_B]]#0 : !fir.ref>) {name = "b"} -! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) bounds(%{{.*}}) to varPtr(%[[DECL_A]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} - - subroutine acc_declare_array_section(a) - integer :: a(:) - !$acc declare copy(a(1:10)) - - do i = 1, 100 - a(i) = i - end do - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_array_section( -! CHECK-SAME: %[[ARG0:.*]]: !fir.box> {fir.bindc_name = "a"}) { -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {uniq_name = "_QMacc_declareFacc_declare_array_sectionEa"} : (!fir.box>, !fir.dscope) -> (!fir.box>, !fir.box>) -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 {acc.declare = #acc.declare} : (!fir.box>) -> !fir.ref> -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a(1:10)"} -! CHECK: acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref>) - -! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) bounds(%{{.*}}) to varPtr(%[[BOX_ADDR]] : !fir.ref>) {dataClause = #acc, name = "a(1:10)"} - - subroutine acc_declare_allocate_with_stat() - integer :: status - real, pointer, dimension(:) :: localptr - !$acc declare create(localptr) - allocate(localptr(n), stat=status) - - deallocate(localptr, stat=status) - end subroutine - -! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_allocate_with_stat() -! CHECK: fir.call @_FortranAPointerAllocate(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} {acc.declare_action = #acc.declare_action} -! CHECK: fir.call @_FortranAPointerDeallocate(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} {acc.declare_action = #acc.declare_action} -end module - -module acc_declare_allocatable_test - integer, allocatable :: data1(:) - !$acc declare create(data1) -end module - -! CHECK-LABEL: acc.global_ctor @_QMacc_declare_allocatable_testEdata1_acc_ctor { -! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) {acc.declare = #acc.declare} : !fir.ref>>> -! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[GLOBAL_ADDR]] : !fir.ref>>>) -> !fir.ref>>> {dataClause = #acc, implicit = true, name = "data1", structured = false} -! CHECK: acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref>>>) -! CHECK: acc.terminator -! CHECK: } - -! CHECK-LABEL: func.func private @_QMacc_declare_allocatable_testEdata1_acc_declare_post_alloc() { -! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) : !fir.ref>>> -! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[GLOBAL_ADDR]] : !fir.ref>>>) -> !fir.ref>>> {implicit = true, name = "data1_desc", structured = false} -! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.ref>>>) -! CHECK: %[[LOAD:.*]] = fir.load %[[GLOBAL_ADDR]] : !fir.ref>>> -! CHECK: %[[BOXADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare} : (!fir.box>>) -> !fir.heap> -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOXADDR]] : !fir.heap>) -> !fir.heap> {name = "data1", structured = false} -! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.heap>) -! CHECK: return -! CHECK: } - -! CHECK-LABEL: func.func private @_QMacc_declare_allocatable_testEdata1_acc_declare_pre_dealloc() { -! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) : !fir.ref>>> -! CHECK: %[[LOAD:.*]] = fir.load %[[GLOBAL_ADDR]] : !fir.ref>>> -! CHECK: %[[BOXADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare} : (!fir.box>>) -> !fir.heap> -! CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[BOXADDR]] : !fir.heap>) -> !fir.heap> {dataClause = #acc, name = "data1", structured = false} -! CHECK: acc.declare_exit dataOperands(%[[DEVPTR]] : !fir.heap>) -! CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.heap>) {dataClause = #acc, name = "data1", structured = false} -! CHECK: return -! CHECK: } - -! CHECK-LABEL: func.func private @_QMacc_declare_allocatable_testEdata1_acc_declare_post_dealloc() { -! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) : !fir.ref>>> -! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[GLOBAL_ADDR]] : !fir.ref>>>) -> !fir.ref>>> {implicit = true, name = "data1_desc", structured = false} -! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.ref>>>) -! CHECK: return -! CHECK: } - -! CHECK-LABEL: acc.global_dtor @_QMacc_declare_allocatable_testEdata1_acc_dtor { -! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) {acc.declare = #acc.declare} : !fir.ref>>> -! CHECK: %[[DEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[GLOBAL_ADDR]] : !fir.ref>>>) -> !fir.ref>>> {dataClause = #acc, name = "data1", structured = false} -! CHECK: acc.declare_exit dataOperands(%[[DEVICEPTR]] : !fir.ref>>>) -! CHECK: acc.delete accPtr(%[[DEVICEPTR]] : !fir.ref>>>) {dataClause = #acc, name = "data1", structured = false} -! CHECK: acc.terminator -! CHECK: } - - -module acc_declare_equivalent - integer, parameter :: n = 10 - real :: v1(n) - real :: v2(n) - equivalence(v1(1), v2(1)) - !$acc declare create(v2) -end module - -! CHECK-LABEL: acc.global_ctor @_QMacc_declare_equivalentEv2_acc_ctor { -! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalentEv1) {acc.declare = #acc.declare} : !fir.ref> -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) -> !fir.ref> {name = "v2", structured = false} -! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: acc.terminator -! CHECK: } -! CHECK-LABEL: acc.global_dtor @_QMacc_declare_equivalentEv2_acc_dtor { -! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalentEv1) {acc.declare = #acc.declare} : !fir.ref> -! CHECK: %[[DEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[ADDR]] : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "v2", structured = false} -! CHECK: acc.declare_exit dataOperands(%[[DEVICEPTR]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[DEVICEPTR]] : !fir.ref>) {dataClause = #acc, name = "v2", structured = false} -! CHECK: acc.terminator -! CHECK: } - -module acc_declare_equivalent2 - real :: v1(10) - real :: v2(5) - equivalence(v1(6), v2(1)) - !$acc declare create(v2) -end module - -! CHECK-LABEL: acc.global_ctor @_QMacc_declare_equivalent2Ev2_acc_ctor { -! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalent2Ev1) {acc.declare = #acc.declare} : !fir.ref> -! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) -> !fir.ref> {name = "v2", structured = false} -! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: acc.terminator -! CHECK: } -! CHECK-LABEL: acc.global_dtor @_QMacc_declare_equivalent2Ev2_acc_dtor { -! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalent2Ev1) {acc.declare = #acc.declare} : !fir.ref> -! CHECK: %[[DEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[ADDR]] : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "v2", structured = false} -! CHECK: acc.declare_exit dataOperands(%[[DEVICEPTR]] : !fir.ref>) -! CHECK: acc.delete accPtr(%[[DEVICEPTR]] : !fir.ref>) {dataClause = #acc, name = "v2", structured = false} -! CHECK: acc.terminator -! CHECK: } - -! Test that the pre/post alloc/dealloc attributes are set when the -! allocate/deallocate statement are in a different module. -module acc_declare_allocatable_test2 -contains - subroutine init() - use acc_declare_allocatable_test - allocate(data1(100)) -! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action} : !fir.ref>>> - end subroutine - - subroutine finalize() - use acc_declare_allocatable_test - deallocate(data1) -! CHECK: %{{.*}} = fir.box_addr %{{.*}} {acc.declare_action = #acc.declare_action} : (!fir.box>>) -> !fir.heap> -! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action} : !fir.ref>>> - end subroutine -end module - -module acc_declare_allocatable_test3 - integer, allocatable :: data1(:) - integer, allocatable :: data2(:) - !$acc declare create(data1, data2, data1) -end module - -! CHECK-LABEL: acc.global_ctor @_QMacc_declare_allocatable_test3Edata1_acc_ctor -! CHECK-LABEL: acc.global_ctor @_QMacc_declare_allocatable_test3Edata2_acc_ctor - -module acc_declare_post_action_stat - real, dimension(:), allocatable :: x, y - !$acc declare create(x,y) - -contains - - subroutine init() - integer :: stat - allocate(x(10), y(10), stat=stat) - end subroutine -end module - -! CHECK-LABEL: func.func @_QMacc_declare_post_action_statPinit() -! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath {acc.declare_action = #acc.declare_action} : (!fir.ref>, !fir.ref, i1, !fir.box, !fir.ref, i32) -> i32 -! CHECK: fir.if -! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath {acc.declare_action = #acc.declare_action} : (!fir.ref>, !fir.ref, i1, !fir.box, !fir.ref, i32) -> i32 diff --git a/flang/test/Lower/OpenACC/acc-declare.f90 b/flang/test/Lower/OpenACC/acc-declare.f90 index edae0e6a4d37e..46c4365f23fd6 100644 --- a/flang/test/Lower/OpenACC/acc-declare.f90 +++ b/flang/test/Lower/OpenACC/acc-declare.f90 @@ -20,7 +20,7 @@ subroutine acc_declare_copy() ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: } ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[COPYIN]] : !fir.ref>) ! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) to varPtr(%[[DECL]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} @@ -40,7 +40,7 @@ subroutine acc_declare_create() ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_createEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: } ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) ! CHECK: acc.delete accPtr(%[[CREATE]] : !fir.ref>) {dataClause = #acc, name = "a"} @@ -60,7 +60,7 @@ subroutine acc_declare_present(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_presentEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[PRESENT]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[PRESENT]] : !fir.ref>) ! CHECK: acc.delete accPtr(%[[PRESENT]] : !fir.ref>) {dataClause = #acc, name = "a"} @@ -81,7 +81,7 @@ subroutine acc_declare_copyin() ! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[ADECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[BDECL]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "b"} ! CHECK: acc.declare_enter dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref>, !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.delete accPtr(%[[COPYIN_A]] : !fir.ref>) {dataClause = #acc, name = "a"} ! CHECK: acc.delete accPtr(%[[COPYIN_B]] : !fir.ref>) {dataClause = #acc, name = "b"} @@ -99,7 +99,7 @@ subroutine acc_declare_copyout() ! CHECK: %[[ADECL:.*]]:2 = hlfir.declare %[[A]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyoutEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADECL]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) ! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref>) to varPtr(%[[ADECL]]#0 : !fir.ref>) {name = "a"} ! CHECK: return @@ -118,7 +118,7 @@ subroutine acc_declare_deviceptr(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_deviceptrEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[DEVICEPTR:.*]] = acc.deviceptr varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: acc.declare_enter dataOperands(%[[DEVICEPTR]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) subroutine acc_declare_link(a) integer :: a(100), i @@ -134,7 +134,7 @@ subroutine acc_declare_link(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_linkEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[LINK:.*]] = acc.declare_link varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: acc.declare_enter dataOperands(%[[LINK]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) subroutine acc_declare_device_resident(a) integer :: a(100), i @@ -150,7 +150,7 @@ subroutine acc_declare_device_resident(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_device_residentEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[DEVICERES:.*]] = acc.declare_device_resident varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICERES]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICERES]] : !fir.ref>) ! CHECK: acc.delete accPtr(%[[DEVICERES]] : !fir.ref>) {dataClause = #acc, name = "a"} @@ -279,7 +279,7 @@ subroutine acc_declare_multiple_directive(a, b) ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL_A]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "a"} ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL_B]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "b"} ! CHECK: acc.declare_enter dataOperands(%[[COPYIN]], %[[CREATE]] : !fir.ref>, !fir.ref>) -! CHECK: %{{.*}}:{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref>) to varPtr(%[[DECL_B]]#0 : !fir.ref>) {name = "b"} diff --git a/flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90 deleted file mode 100644 index 3e08068bdec44..0000000000000 --- a/flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,818 +0,0 @@ -! This test checks lowering of OpenACC enter data directive. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -subroutine acc_enter_data - integer :: async = 1 - real, dimension(10, 10) :: a, b, c - real, pointer :: d - logical :: ifCondition = .TRUE. - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[EXTENT_C10:.*]] = arith.constant 10 : index -!CHECK: %[[A:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"} -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[B:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"} -!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -!CHECK: %[[C:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"} -!CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]] -!CHECK: %[[D:.*]] = fir.alloca !fir.box> {bindc_name = "d", uniq_name = "{{.*}}Ed"} -!CHECK: %[[DECLD:.*]]:2 = hlfir.declare %[[D]] - - !$acc enter data create(a) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref>){{$}} - - !$acc enter data create(a) if(.true.) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: [[IF1:%.*]] = arith.constant true -!CHECK: acc.enter_data if([[IF1]]) dataOperands(%[[CREATE_A]] : !fir.ref>){{$}} - - !$acc enter data create(a) if(ifCondition) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref> -!CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1 -!CHECK: acc.enter_data if([[IF2]]) dataOperands(%[[CREATE_A]] : !fir.ref>){{$}} - - !$acc enter data create(a) create(b) create(c) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "b", structured = false} -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "c", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref>, !fir.ref>, !fir.ref>){{$}} - - !$acc enter data create(a) create(b) create(zero: c) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "b", structured = false} -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {dataClause = #acc, name = "c", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref>, !fir.ref>, !fir.ref>){{$}} - - !$acc enter data copyin(a) create(b) attach(d) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "b", structured = false} -!CHECK: %[[BOX_D:.*]] = fir.load %[[DECLD]]#0 : !fir.ref>> -!CHECK: %[[BOX_ADDR_D:.*]] = fir.box_addr %[[BOX_D]] : (!fir.box>) -> !fir.ptr -!CHECK: %[[ATTACH_D:.*]] = acc.attach varPtr(%[[BOX_ADDR_D]] : !fir.ptr) -> !fir.ptr {name = "d", structured = false} -!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]], %[[CREATE_B]], %[[ATTACH_D]] : !fir.ref>, !fir.ref>, !fir.ptr){{$}} - - !$acc enter data create(a) async -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) async -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data async dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) wait -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data wait dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) async wait -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) async -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data async wait dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) async(1) -!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32 -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) async(%[[ASYNC1]] : i32) -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data async(%[[ASYNC1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) async(async) -!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) - -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) async(%[[ASYNC2]] : i32) -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data async(%[[ASYNC2]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) wait(1) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: %[[WAIT1:.*]] = arith.constant 1 : i32 -!CHECK: acc.enter_data wait(%[[WAIT1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) wait(queues: 1, 2) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: %[[WAIT2:.*]] = arith.constant 1 : i32 -!CHECK: %[[WAIT3:.*]] = arith.constant 2 : i32 -!CHECK: acc.enter_data wait(%[[WAIT2]], %[[WAIT3]] : i32, i32) dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data create(a) wait(devnum: 1: queues: 1, 2) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: %[[WAIT4:.*]] = arith.constant 1 : i32 -!CHECK: %[[WAIT5:.*]] = arith.constant 2 : i32 -!CHECK: %[[WAIT6:.*]] = arith.constant 1 : i32 -!CHECK: acc.enter_data wait_devnum(%[[WAIT6]] : i32) wait(%[[WAIT4]], %[[WAIT5]] : i32, i32) dataOperands(%[[CREATE_A]] : !fir.ref>) - - !$acc enter data copyin(a(1:10,1:5)) -!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref> {name = "a(1:10,1:5)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref>) - - !$acc enter data copyin(a(1:,1:5)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[LB1:.*]] = arith.constant 0 : index -!CHECK: %[[UB1:.*]] = arith.subi %c10{{.*}}, %[[ONE]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index) -!CHECK: %[[LB2:.*]] = arith.constant 0 : index -!CHECK: %[[UB2:.*]] = arith.constant 4 : index -!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB2]] : index) upperbound(%[[UB2]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%c1{{.*}} : index) -!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref> {name = "a(1:,1:5)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref>) - - !$acc enter data copyin(a(:10,1:5)) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[UB1:.*]] = arith.constant 9 : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB1]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB2:.*]] = arith.constant 4 : index -!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB2]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index) -!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref> {name = "a(:10,1:5)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref>) - - !$acc enter data copyin(a(:,:)) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[UB:.*]] = arith.subi %c10{{.*}}, %[[ONE]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -!CHECK: %[[UB:.*]] = arith.subi %c10{{.*}}, %[[ONE]] : index -!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index) -!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref> {name = "a(:,:)", structured = false} -end subroutine acc_enter_data - -subroutine acc_enter_data_dummy(a, b, n, m) - integer :: n, m - real :: a(1:10) - real :: b(n:m) - -!CHECK-LABEL: func.func @_QPacc_enter_data_dummy -!CHECK-SAME: %[[A:.*]]: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref> {fir.bindc_name = "b"}, %[[N:.*]]: !fir.ref {fir.bindc_name = "n"}, %[[M:.*]]: !fir.ref {fir.bindc_name = "m"} -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[DECLN:.*]]:2 = hlfir.declare %[[N]] -!CHECK: %[[DECLM:.*]]:2 = hlfir.declare %[[M]] -!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref -!CHECK: %[[N_I64:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64 -!CHECK: %[[N_IDX:.*]] = fir.convert %[[N_I64]] : (i64) -> index -!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref -!CHECK: %[[M_I64:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64 -!CHECK: %[[M_IDX:.*]] = fir.convert %[[M_I64]] : (i64) -> index -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[M_N:.*]] = arith.subi %[[M_IDX]], %[[N_IDX]] : index -!CHECK: %[[M_N_1:.*]] = arith.addi %[[M_N]], %[[C1]] : index -!CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[M_N_1]], %[[C0]] : index -!CHECK: %[[EXT_B:.*]] = arith.select %[[CMP]], %[[M_N_1]], %[[C0]] : index -!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] - - !$acc enter data create(a) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(b) -!CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS]]#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true} -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(5:10)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[LB1:.*]] = arith.constant 4 : index -!CHECK: %[[UB1:.*]] = arith.constant 9 : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index) -!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND1]]) -> !fir.ref> {name = "a(5:10)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref>) - - !$acc enter data create(b(n:m)) -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref -!CHECK: %[[N_CONV1:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64 -!CHECK: %[[N_CONV2:.*]] = fir.convert %[[N_CONV1]] : (i64) -> index -!CHECK: %[[LB:.*]] = arith.subi %[[N_CONV2]], %[[N_IDX]] : index -!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref -!CHECK: %[[M_CONV1:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64 -!CHECK: %[[M_CONV2:.*]] = fir.convert %[[M_CONV1]] : (i64) -> index -!CHECK: %[[UB:.*]] = arith.subi %[[M_CONV2]], %[[N_IDX]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[N_IDX]] : index) {strideInBytes = true} -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND1]]) -> !fir.ref> {name = "b(n:m)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref>) - - !$acc enter data create(b(n:)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0_8 : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref -!CHECK: %[[CONVERT1_N:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64 -!CHECK: %[[CONVERT2_N:.*]] = fir.convert %[[CONVERT1_N]] : (i64) -> index -!CHECK: %[[LB:.*]] = arith.subi %[[CONVERT2_N]], %[[N_IDX]] : index -!CHECK: %[[UB:.*]] = arith.subi %[[EXT_B]], %c1{{.*}} : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[N_IDX]] : index) {strideInBytes = true} -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND1]]) -> !fir.ref> {name = "b(n:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref>) - - !$acc enter data create(b(:)) -!CHECK: %[[ZERO:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[EXT_B]], %[[ONE]] : index -!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[N_IDX]] : index) {strideInBytes = true} -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND1]]) -> !fir.ref> {name = "b(:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref>) - -end subroutine - -! Test lowering of array section for non default lower bound. -subroutine acc_enter_data_non_default_lb() - integer :: a(0:9) - integer :: b(11:20) - -!CHECK-LABEL: func.func @_QPacc_enter_data_non_default_lb() { -!CHECK: %[[BASELB:.*]] = arith.constant 0 : index -!CHECK: %[[EXTENT_C10:.*]] = arith.constant 10 : index -!CHECK: %[[A:.*]] = fir.alloca !fir.array<10xi32> {bindc_name = "a", uniq_name = "_QFacc_enter_data_non_default_lbEa"} -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[B:.*]] = fir.alloca !fir.array<10xi32> {bindc_name = "b", uniq_name = "_QFacc_enter_data_non_default_lbEb"} -!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] - - !$acc enter data create(a(5:9)) -!CHECK: %[[SECTIONLB:.*]] = arith.constant 5 : index -!CHECK: %[[LB:.*]] = arith.subi %[[SECTIONLB]], %[[BASELB]] : index -!CHECK: %[[SECTIONUB:.*]] = arith.constant 9 : index -!CHECK: %[[UB:.*]] = arith.subi %[[SECTIONUB]], %[[BASELB]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index) -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(5:9)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(:)) -!CHECK: %[[ZERO:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index) -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(:6)) -!CHECK: %[[ZERO:.*]] = arith.constant 0 : index -!CHECK: %[[SECTIONUB:.*]] = arith.constant 6 : index -!CHECK: %[[UB:.*]] = arith.subi %[[SECTIONUB]], %[[BASELB]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index) -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(:6)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(4:)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[SECTIONLB:.*]] = arith.constant 4 : index -!CHECK: %[[LB:.*]] = arith.subi %[[SECTIONLB]], %[[BASELB]] : index -!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index) -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(4:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(b) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%{{.*}} : index) startIdx(%c11{{.*}} : index) {strideInBytes = true} -!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - -end subroutine - -! Test lowering of assumed size arrays. -subroutine acc_enter_data_assumed(a, b, n, m) - integer :: n, m - real :: a(:) - real :: b(10:) - -!CHECK-LABEL: func.func @_QPacc_enter_data_assumed( -!CHECK-SAME: %[[A:.*]]: !fir.box> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.box> {fir.bindc_name = "b"}, %[[N:.*]]: !fir.ref {fir.bindc_name = "n"}, %[[M:.*]]: !fir.ref {fir.bindc_name = "m"}) { -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[LB_C10:.*]] = arith.constant 10 : i64 -!CHECK: %[[LB_C10_IDX:.*]] = fir.convert %[[LB_C10]] : (i64) -> index -!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -!CHECK: %[[DECLM:.*]]:2 = hlfir.declare %[[M]] -!CHECK: %[[DECLN:.*]]:2 = hlfir.declare %[[N]] - - !$acc enter data create(a) -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS]]#1, %[[C1]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS]]#2 : index) startIdx(%[[C1]] : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(:)) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(2:)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[LB:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(2:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(:4)) -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.constant 3 : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(:4)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(6:10)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[LB:.*]] = arith.constant 5 : index -!CHECK: %[[UB:.*]] = arith.constant 9 : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(6:10)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(n:)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) - -!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref -!CHECK: %[[CONVERT1_N:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64 -!CHECK: %[[CONVERT2_N:.*]] = fir.convert %[[CONVERT1_N]] : (i64) -> index -!CHECK: %[[LB:.*]] = arith.subi %[[CONVERT2_N]], %[[ONE]] : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(n:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(:m)) -!CHECK: %[[BASELB:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) - -!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref -!CHECK: %[[CONVERT1_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64 -!CHECK: %[[CONVERT2_M:.*]] = fir.convert %[[CONVERT1_M]] : (i64) -> index -!CHECK: %[[UB:.*]] = arith.subi %[[CONVERT2_M]], %[[ONE]] : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[BASELB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(:m)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a(n:m)) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) - -!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref -!CHECK: %[[CONVERT1_N:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64 -!CHECK: %[[CONVERT2_N:.*]] = fir.convert %[[CONVERT1_N]] : (i64) -> index -!CHECK: %[[LB:.*]] = arith.subi %[[CONVERT2_N]], %[[ONE]] : index - -!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref -!CHECK: %[[CONVERT1_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64 -!CHECK: %[[CONVERT2_M:.*]] = fir.convert %[[CONVERT1_M]] : (i64) -> index -!CHECK: %[[UB:.*]] = arith.subi %[[CONVERT2_M]], %[[ONE]] : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(n:m)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(b(:m)) -!CHECK: %[[ZERO:.*]] = arith.constant 0 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) - -!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref -!CHECK: %[[CONVERT1_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64 -!CHECK: %[[CONVERT2_M:.*]] = fir.convert %[[CONVERT1_M]] : (i64) -> index -!CHECK: %[[UB:.*]] = arith.subi %[[CONVERT2_M]], %[[LB_C10_IDX]] : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLB]]#1, %{{.*}} : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b(:m)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(b) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index - -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %[[C0]] : (!fir.box>, index) -> (index, index, index) -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true} - -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box>) -> !fir.ref> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - -end subroutine - -subroutine acc_enter_data_allocatable() - real, allocatable :: a(:) - integer, allocatable :: i - -!CHECK-LABEL: func.func @_QPacc_enter_data_allocatable() { -!CHECK: %[[A:.*]] = fir.alloca !fir.box>> {bindc_name = "a", uniq_name = "_QFacc_enter_data_allocatableEa"} -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[I:.*]] = fir.alloca !fir.box> {bindc_name = "i", uniq_name = "_QFacc_enter_data_allocatableEi"} -!CHECK: %[[DECLI:.*]]:2 = hlfir.declare %[[I]] - - !$acc enter data create(a) - -!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0_0:.*]] = arith.constant 0 : index -!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0_1:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %c0{{.*}} : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %c1{{.*}} : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "a", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - - !$acc enter data create(a(:)) - -!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[ZERO:.*]] = arith.constant 0 : index -!CHECK: %[[ONE:.*]] = arith.constant 1 : index - -!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) - -!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS2]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB:.*]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "a(:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - - !$acc enter data create(a(2:5)) - -!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> - -!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C2:.*]] = arith.constant 2 : index -!CHECK: %[[LB:.*]] = arith.subi %[[C2]], %[[DIMS0]]#0 : index -!CHECK: %[[C5:.*]] = arith.constant 5 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C5]], %[[DIMS0]]#0 : index -!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "a(2:5)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - - !$acc enter data create(a(3:)) - -!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[ONE:.*]] = arith.constant 1 : index - -!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C3:.*]] = arith.constant 3 : index -!CHECK: %[[LB:.*]] = arith.subi %[[C3]], %[[DIMS0]]#0 : index - -!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS2]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "a(3:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - - !$acc enter data create(a(:7)) - -!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[ZERO:.*]] = arith.constant 0 : index - -!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[C7:.*]] = arith.constant 7 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C7]], %[[DIMS0]]#0 : index -!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref>>> -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "a(:7)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - - !$acc enter data create(i) - -!CHECK: %[[BOX_I:.*]] = fir.load %[[DECLI]]#0 : !fir.ref>> -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_I]] : (!fir.box>) -> !fir.heap -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap) -> !fir.heap {name = "i", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap) - -end subroutine - -subroutine acc_enter_data_derived_type() - type :: dt - real :: data - real :: array(1:10) - end type - - type :: t - type(dt) :: d - end type - - type :: z - integer, allocatable :: data(:) - end type - - type :: tt - type(dt) :: d(10) - end type - - type(dt) :: a - type(t) :: b - type(dt) :: aa(10) - type(z) :: c - type(tt) :: d - -!CHECK-LABEL: func.func @_QPacc_enter_data_derived_type() { -!CHECK: %[[A:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}> {bindc_name = "a", uniq_name = "_QFacc_enter_data_derived_typeEa"} -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[AA:.*]] = fir.alloca !fir.array<10x!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>> {bindc_name = "aa", uniq_name = "_QFacc_enter_data_derived_typeEaa"} -!CHECK: %[[DECLAA:.*]]:2 = hlfir.declare %[[AA]] -!CHECK: %[[B:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTt{d:!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>}> {bindc_name = "b", uniq_name = "_QFacc_enter_data_derived_typeEb"} -!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -!CHECK: %[[C:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTz{data:!fir.box>>}> {bindc_name = "c", uniq_name = "_QFacc_enter_data_derived_typeEc"} -!CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]] -!CHECK: %[[D:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTtt{d:!fir.array<10x!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>}> {bindc_name = "d", uniq_name = "_QFacc_enter_data_derived_typeEd"} -!CHECK: %[[DECLD:.*]]:2 = hlfir.declare %[[D]] - - !$acc enter data create(a%data) - - -!CHECK: %[[DATA_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"data"} : (!fir.ref}>>) -> !fir.ref -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DATA_COORD]] : !fir.ref) -> !fir.ref {name = "a%data", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref) - - !$acc enter data create(b%d%data) - - - -!CHECK: %[[D_COORD:.*]] = hlfir.designate %[[DECLB]]#0{"d"} : (!fir.ref}>}>>) -> !fir.ref}>> -!CHECK: %[[DATA_COORD:.*]] = hlfir.designate %[[D_COORD]]{"data"} : (!fir.ref}>>) -> !fir.ref -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DATA_COORD]] : !fir.ref) -> !fir.ref {name = "b%d%data", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref) - - !$acc enter data create(a%array) - - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a%array", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a%array(:)) - - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a%array(:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a%array(1:5)) - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[C0:.*]] = arith.constant 0 : index -!CHECK: %[[C4:.*]] = arith.constant 4 : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[C4]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a%array(1:5)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a%array(:5)) - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[C4:.*]] = arith.constant 4 : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[C4]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a%array(:5)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - - !$acc enter data create(a%array(2:)) - - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 1 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a%array(2:)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - -!$acc enter data create(b%d%array) - - - -!CHECK: %[[D_COORD:.*]] = hlfir.designate %[[DECLB]]#0{"d"} : (!fir.ref}>}>>) -> !fir.ref}>> -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[D_COORD]]{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b%d%array", structured = false} - - !$acc enter data create(c%data) - - -!CHECK: %[[DATA_COORD:.*]] = hlfir.designate %[[DECLC]]#0{"data"} {fortran_attrs = #fir.var_attrs} : (!fir.ref>>}>>) -> !fir.ref>>> -!CHECK: %[[DATA_BOX:.*]] = fir.load %[[DATA_COORD]] : !fir.ref>>> -!CHECK: %[[DIM0:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DATA_BOX]], %[[DIM0]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[DIM0_1:.*]] = arith.constant 0 : index -!CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[DATA_BOX]], %[[DIM0_1]] : (!fir.box>>, index) -> (index, index, index) -!CHECK: %[[UB:.*]] = arith.subi %[[DIMS0_1]]#1, %[[ONE]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_1]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true} -!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DATA_BOX]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%[[BOUND]]) -> !fir.heap> {name = "c%data", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - - !$acc enter data create (d%d(1)%array) - - - - - - -!CHECK: %[[ONE:.*]] = arith.constant 1 : index -!CHECK: %[[D1_COORD:.*]] = hlfir.designate %[[DECLD]]#0{"d"} <%{{.*}}> (%[[ONE]]) : (!fir.ref}>>}>>, !fir.shape<1>, index) -> !fir.ref}>> - - -!CHECK: %[[C10:.*]] = arith.constant 10 : index -!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[D1_COORD]]{"array"} shape %{{.*}} : (!fir.ref}>>, !fir.shape<1>) -> !fir.ref> -!CHECK: %[[C1:.*]] = arith.constant 1 : index -!CHECK: %[[LB:.*]] = arith.constant 0 : index -!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index -!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "d%d(1_8)%array", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref>) - -end subroutine - -subroutine acc_enter_data_single_array_element() - type t1 - real, allocatable :: a(:, :) - end type t1 - type(t1), allocatable :: e(:) - allocate(e(10)%a(5,5)) - - !$acc enter data create(e(2)%a(1,2)) - -!CHECK-LABEL: func.func @_QPacc_enter_data_single_array_element() { -!CHECK-DAG: %[[VAL_38:.*]]:3 = fir.box_dims %[[BOX:.*]], %[[VAL_37:.*]] : (!fir.box>>, index) -> (index, index, index) -!CHECK-DAG: %[[VAL_37]] = arith.constant 0 : index -!CHECK-DAG: %[[VAL_40:.*]]:3 = fir.box_dims %[[BOX]], %[[VAL_39:.*]] : (!fir.box>>, index) -> (index, index, index) -!CHECK-DAG: %[[VAL_39]] = arith.constant 1 : index -!CHECK-DAG: %[[VAL_41:.*]] = fir.box_addr %[[BOX]] : (!fir.box>>) -> !fir.heap> -!CHECK: %[[VAL_42:.*]] = arith.constant 1 : index -!CHECK: %[[VAL_43:.*]] = arith.constant 1 : index -!CHECK: %[[VAL_44:.*]] = arith.subi %[[VAL_43]], %[[VAL_38]]#0 : index -!CHECK: %[[VAL_45:.*]] = acc.bounds lowerbound(%[[VAL_44]] : index) upperbound(%[[VAL_44]] : index) extent(%[[VAL_42]] : index) stride(%[[VAL_42]] : index) startIdx(%[[VAL_38]]#0 : index) -!CHECK: %[[VAL_46:.*]] = arith.constant 2 : index -!CHECK: %[[VAL_47:.*]] = arith.subi %[[VAL_46]], %[[VAL_40]]#0 : index -!CHECK: %[[VAL_48:.*]] = acc.bounds lowerbound(%[[VAL_47]] : index) upperbound(%[[VAL_47]] : index) extent(%[[VAL_42]] : index) stride(%[[VAL_42]] : index) startIdx(%[[VAL_40]]#0 : index) -!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[VAL_41]] : !fir.heap>) bounds(%[[VAL_45]], %[[VAL_48]]) -> !fir.heap> {name = "e(2_8)%a(1,2)", structured = false} -!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap>) - -end subroutine diff --git a/flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90 deleted file mode 100644 index fd942173b637a..0000000000000 --- a/flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,107 +0,0 @@ -! This test checks lowering of OpenACC exit data directive. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -subroutine acc_exit_data - integer :: async = 1 - real, dimension(10, 10) :: a, b, c - real, pointer :: d - logical :: ifCondition = .TRUE. - -!CHECK: %[[A:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"} -!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -!CHECK: %[[B:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"} -!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -!CHECK: %[[C:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"} -!CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]] -!CHECK: %[[D:.*]] = fir.alloca !fir.box> {bindc_name = "d", uniq_name = "{{.*}}Ed"} -!CHECK: %[[DECLD:.*]]:2 = hlfir.declare %[[D]] - - !$acc exit data delete(a) -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: acc.exit_data dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - - !$acc exit data delete(a) if(.true.) -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[IF1:.*]] = arith.constant true -!CHECK: acc.exit_data if(%[[IF1]]) dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - - !$acc exit data delete(a) if(ifCondition) -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[IFCOND:.*]] = fir.load %{{.*}} : !fir.ref> -!CHECK: %[[IF2:.*]] = fir.convert %[[IFCOND]] : (!fir.logical<4>) -> i1 -!CHECK: acc.exit_data if(%[[IF2]]) dataOperands(%[[DEVPTR]] : !fir.ref>){{$}} -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - - !$acc exit data delete(a) delete(b) delete(c) -!CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[DEVPTR_B:.*]] = acc.getdeviceptr varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b", structured = false} -!CHECK: %[[DEVPTR_C:.*]] = acc.getdeviceptr varPtr(%[[DECLC]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "c", structured = false} -!CHECK: acc.exit_data dataOperands(%[[DEVPTR_A]], %[[DEVPTR_B]], %[[DEVPTR_C]] : !fir.ref>, !fir.ref>, !fir.ref>){{$}} -!CHECK: acc.delete accPtr(%[[DEVPTR_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} -!CHECK: acc.delete accPtr(%[[DEVPTR_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "b", structured = false} -!CHECK: acc.delete accPtr(%[[DEVPTR_C]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "c", structured = false} - - !$acc exit data copyout(a) delete(b) detach(d) -!CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[DEVPTR_B:.*]] = acc.getdeviceptr varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "b", structured = false} -!CHECK: %[[BOX_D:.*]] = fir.load %[[DECLD]]#0 : !fir.ref>> -!CHECK: %[[D_ADDR:.*]] = fir.box_addr %[[BOX_D]] : (!fir.box>) -> !fir.ptr -!CHECK: %[[DEVPTR_D:.*]] = acc.getdeviceptr varPtr(%[[D_ADDR]] : !fir.ptr) -> !fir.ptr {dataClause = #acc, name = "d", structured = false} -!CHECK: acc.exit_data dataOperands(%[[DEVPTR_A]], %[[DEVPTR_B]], %[[DEVPTR_D]] : !fir.ref>, !fir.ref>, !fir.ptr) -!CHECK: acc.copyout accPtr(%[[DEVPTR_A]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref>) {name = "a", structured = false} -!CHECK: acc.delete accPtr(%[[DEVPTR_B]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "b", structured = false} -!CHECK: acc.detach accPtr(%[[DEVPTR_D]] : !fir.ptr) {name = "d", structured = false} - - !$acc exit data delete(a) async -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: acc.exit_data async dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async {name = "a", structured = false} - - !$acc exit data delete(a) wait -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: acc.exit_data wait dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - - !$acc exit data delete(a) async wait -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: acc.exit_data async wait dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async {name = "a", structured = false} - - !$acc exit data delete(a) async(1) -!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32 -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC1]] : i32) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: acc.exit_data async(%[[ASYNC1]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC1]] : i32) {name = "a", structured = false} - - - !$acc exit data delete(a) async(async) -!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC2]] : i32) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: acc.exit_data async(%[[ASYNC2]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC2]] : i32) {name = "a", structured = false} - - !$acc exit data delete(a) wait(1) -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[WAIT1:.*]] = arith.constant 1 : i32 -!CHECK: acc.exit_data wait(%[[WAIT1]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - - !$acc exit data delete(a) wait(queues: 1, 2) -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[WAIT2:.*]] = arith.constant 1 : i32 -!CHECK: %[[WAIT3:.*]] = arith.constant 2 : i32 -!CHECK: acc.exit_data wait(%[[WAIT2]], %[[WAIT3]] : i32, i32) dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - - !$acc exit data delete(a) wait(devnum: 1: queues: 1, 2) -!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {dataClause = #acc, name = "a", structured = false} -!CHECK: %[[WAIT4:.*]] = arith.constant 1 : i32 -!CHECK: %[[WAIT5:.*]] = arith.constant 2 : i32 -!CHECK: %[[WAIT6:.*]] = arith.constant 1 : i32 -!CHECK: acc.exit_data wait_devnum(%[[WAIT6]] : i32) wait(%[[WAIT4]], %[[WAIT5]] : i32, i32) dataOperands(%[[DEVPTR]] : !fir.ref>) -!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false} - -end subroutine acc_exit_data diff --git a/flang/test/Lower/OpenACC/acc-host-data-cuda-device.f90 b/flang/test/Lower/OpenACC/acc-host-data-cuda-device.f90 new file mode 100644 index 0000000000000..da034adec39c4 --- /dev/null +++ b/flang/test/Lower/OpenACC/acc-host-data-cuda-device.f90 @@ -0,0 +1,43 @@ + +! RUN: bbc -fopenacc -fcuda -emit-hlfir %s -o - | FileCheck %s + +module m + +interface doit +subroutine __device_sub(a) + real(4), device, intent(in) :: a(:,:,:) + !dir$ ignore_tkr(c) a +end +subroutine __host_sub(a) + real(4), intent(in) :: a(:,:,:) + !dir$ ignore_tkr(c) a +end +end interface +end module + +program testex1 +integer, parameter :: ntimes = 10 +integer, parameter :: ni=128 +integer, parameter :: nj=256 +integer, parameter :: nk=64 +real(4), dimension(ni,nj,nk) :: a + +!$acc enter data copyin(a) + +block; use m +!$acc host_data use_device(a) +do nt = 1, ntimes + call doit(a) +end do +!$acc end host_data +end block + +block; use m +do nt = 1, ntimes + call doit(a) +end do +end block +end + +! CHECK: fir.call @_QP__device_sub +! CHECK: fir.call @_QP__host_sub diff --git a/flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90 deleted file mode 100644 index 2de7cc5761a2b..0000000000000 --- a/flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,54 +0,0 @@ -! This test checks lowering of OpenACC host_data directive. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -subroutine acc_host_data() - real, dimension(10) :: a - logical :: ifCondition = .TRUE. - -! CHECK: %[[A:.*]] = fir.alloca !fir.array<10xf32> {bindc_name = "a", uniq_name = "_QFacc_host_dataEa"} -! CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] -! CHECK: %[[IFCOND:.*]] = fir.address_of(@_QFacc_host_dataEifcondition) : !fir.ref> -! CHECK: %[[DECLIFCOND:.*]]:2 = hlfir.declare %[[IFCOND]] - - !$acc host_data use_device(a) - !$acc end host_data - -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index) -! CHECK: %[[DA0:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: %[[DA1:.*]] = acc.use_device varPtr(%[[DECLA]]#1 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} - ! CHECK: acc.host_data dataOperands(%[[DA0]], %[[DA1]] : !fir.ref>, !fir.ref>) - - !$acc host_data use_device(a) if_present - !$acc end host_data - -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index) -! CHECK: %[[DA0:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: %[[DA1:.*]] = acc.use_device varPtr(%[[DECLA]]#1 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: acc.host_data dataOperands(%[[DA0]], %[[DA1]] : !fir.ref>{{.*}}) { -! CHECK: } attributes {ifPresent} - - !$acc host_data use_device(a) if(ifCondition) - !$acc end host_data - -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index) -! CHECK: %[[DA:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: %[[LOAD_IFCOND:.*]] = fir.load %[[DECLIFCOND]]#0 : !fir.ref> -! CHECK: %[[IFCOND_I1:.*]] = fir.convert %[[LOAD_IFCOND]] : (!fir.logical<4>) -> i1 -! CHECK: acc.host_data if(%[[IFCOND_I1]]) dataOperands(%[[DA]]{{.*}} : !fir.ref>{{.*}}) - - !$acc host_data use_device(a) if(.true.) - !$acc end host_data - -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index) -! CHECK: %[[DA:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a"} -! CHECK: acc.host_data dataOperands(%[[DA]]{{.*}} : !fir.ref>{{.*}}) - - !$acc host_data use_device(a) if(.false.) - a = 1.0 - !$acc end host_data - -! CHECK-NOT: acc.host_data -! CHECK: hlfir.assign %{{.*}} to %[[DECLA]]#0 - -end subroutine diff --git a/flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90 deleted file mode 100644 index b1dc4e79f9f73..0000000000000 --- a/flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,403 +0,0 @@ -! This test checks lowering of OpenACC loop directive. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -! CHECK-LABEL: acc.private.recipe @privatization_ref_10xf32 : !fir.ref> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>): -! CHECK: %[[C10:.*]] = arith.constant 10 : index -! CHECK: %[[SHAPE:.*]] = fir.shape %[[C10]] : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<10xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_box_UxUx2xi32 : !fir.box> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[DIM0:.*]]:3 = fir.box_dims %arg0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[DIM1:.*]]:3 = fir.box_dims %arg0, %c1{{.*}} : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[DIM0]]#1, %[[DIM1]]#1, %c2{{.*}} : (index, index, index) -> !fir.shape<3> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[DIM0]]#1, %[[DIM1]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<3>) -> (!fir.box>, !fir.heap>) -! CHECK: acc.yield %[[DECL]]#0 : !fir.box> -! CHECK: } copy { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>, %[[ARG1:.*]]: !fir.box>, %[[LB0:.*]]: index, %[[UB0:.*]]: index, %[[STEP0:.*]]: index, %[[LB1:.*]]: index, %[[UB1:.*]]: index, %[[STEP1:.*]]: index, %[[LB2:.*]]: index, %[[UB2:.*]]: index, %[[STEP2:.*]]: index): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}}, %{{.*}} : (index, index, index) -> !fir.shape<3> -! CHECK: %[[DES_SRC:.*]] = hlfir.designate %[[ARG0]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]], %[[LB2]]:%[[UB2]]:%[[STEP2]]) shape %[[SHAPE]] : (!fir.box>, index, index, index, index, index, index, index, index, index, !fir.shape<3>) -> !fir.box> -! CHECK: %[[DES_DST:.*]] = hlfir.designate %[[ARG1]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]], %[[LB2]]:%[[UB2]]:%[[STEP2]]) shape %[[SHAPE]] : (!fir.box>, index, index, index, index, index, index, index, index, index, !fir.shape<3>) -> !fir.box> -! CHECK: hlfir.assign %[[DES_SRC]] to %[[DES_DST]] : !fir.box>, !fir.box> -! CHECK: acc.terminator -! CHECK: } - -! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_section_lb4.ub9_box_Uxi32 : !fir.box> init { -! CHECK: ^bb0(%{{.*}}: !fir.box>): -! CHECK: } copy { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>, %[[ARG1:.*]]: !fir.box>): -! CHECK: %[[LB:.*]] = arith.constant 4 : index -! CHECK: %[[UB:.*]] = arith.constant 9 : index -! CHECK: %[[STEP:.*]] = arith.constant 1 : index -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[EXT0:.*]] = arith.subi %[[UB]], %[[LB]] : index -! CHECK: %[[EXT1:.*]] = arith.addi %[[EXT0]], %[[C1]] : index -! CHECK: %[[EXT2:.*]] = arith.divsi %[[EXT1]], %[[STEP]] : index -! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[EXT2]], %[[C0]] : index -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[EXT2]], %[[C0]] : index -! CHECK: %[[SHAPE:.*]] = fir.shape %[[SELECT]] : (index) -> !fir.shape<1> -! CHECK: %[[LEFT:.*]] = hlfir.designate %[[ARG0]] shape %[[SHAPE]] : (!fir.box>, !fir.shape<1>) -> !fir.box> -! CHECK: %[[RIGHT:.*]] = hlfir.designate %[[ARG1]] shape %[[SHAPE]] : (!fir.box>, !fir.shape<1>) -> !fir.box> -! CHECK: hlfir.assign %[[LEFT]] to %[[RIGHT]] : !fir.box>, !fir.box> -! CHECK: acc.terminator -! CHECK: } - -! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_box_Uxi32 : !fir.box> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %c0 : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: acc.yield %[[DECL]]#0 : !fir.box> -! CHECK: } copy { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>, %[[ARG1:.*]]: !fir.box>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[ARG0]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %[[SHAPE]] : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> -! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[ARG1]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %[[SHAPE]] : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> -! CHECK: hlfir.assign %[[DES_V1]] to %[[DES_V2]] : !fir.box>, !fir.box> -! CHECK: acc.terminator -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_box_UxUx2xi32 : !fir.box> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[DIM0:.*]]:3 = fir.box_dims %arg0, %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[DIM1:.*]]:3 = fir.box_dims %arg0, %c1{{.*}} : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[DIM0]]#1, %[[DIM1]]#1, %c2{{.*}} : (index, index, index) -> !fir.shape<3> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[DIM0]]#1, %[[DIM1]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<3>) -> (!fir.box>, !fir.heap>) -! CHECK: acc.yield %[[DECL]]#0 : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_box_ptr_Uxi32 : !fir.box>> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>>): -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %arg0, %c0 : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %0#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_box_heap_Uxi32 : !fir.box>> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>>): -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_box_Uxi32 : !fir.box> init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %0#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: acc.yield %[[DECLARE:.*]]#0 : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_section_lb50.ub99_ref_50xf32 : !fir.ref> init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<50xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } copy { -! CHECK: ^bb0(%[[SRC:.*]]: !fir.ref>, %[[DST:.*]]: !fir.ref>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[DECL_SRC:.*]]:2 = hlfir.declare %[[SRC]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DECL_DST:.*]]:2 = hlfir.declare %[[DST]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DES_SRC:.*]] = hlfir.designate %[[DECL_SRC]]#0 shape %[[SHAPE:.*]] : (!fir.ref>, !fir.shape<1>) -> !fir.ref> -! CHECK: %[[DES_DST:.*]] = hlfir.designate %[[DECL_DST]]#0 shape %[[SHAPE:.*]] : (!fir.ref>, !fir.shape<1>) -> !fir.ref> -! CHECK: hlfir.assign %[[DES_SRC]] to %[[DES_DST]] : !fir.ref>, !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_section_ext100_ref_100xf32 : !fir.ref> init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } copy { -! CHECK: ^bb0(%[[SRC:.*]]: !fir.ref>, %[[DST:.*]]: !fir.ref>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[DECL_SRC:.*]]:2 = hlfir.declare %[[SRC]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DECL_DST:.*]]:2 = hlfir.declare %[[DST]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[DES_SRC:.*]] = hlfir.designate %[[DECL_SRC]]#0 shape %[[SHAPE]] : (!fir.ref>, !fir.shape<1>) -> !fir.ref> -! CHECK: %[[DES_DST:.*]] = hlfir.designate %[[DECL_DST]]#0 shape %[[SHAPE]] : (!fir.ref>, !fir.shape<1>) -> !fir.ref> -! CHECK: hlfir.assign %[[DES_SRC]] to %[[DES_DST]] : !fir.ref>, !fir.ref> -! CHECK: acc.terminator -! CHECK: } - -! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_ref_i32 : !fir.ref init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.private.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } copy { -! CHECK: ^bb0(%[[SRC:.*]]: !fir.ref, %[[DST:.*]]: !fir.ref): -! CHECK: %[[VALUE:.*]] = fir.load %[[SRC]] : !fir.ref -! CHECK: fir.store %[[VALUE]] to %[[DST]] : !fir.ref -! CHECK: acc.terminator -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_ref_50xf32 : !fir.ref> init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<50xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_ref_100xf32 : !fir.ref> init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.private.recipe @privatization_ref_i32 : !fir.ref init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.private.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } - -program acc_private - integer :: i, c - integer, parameter :: n = 100 - real, dimension(n) :: a, b - -! CHECK: %[[B:.*]] = fir.address_of(@_QFEb) : !fir.ref> -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[C:.*]] = fir.alloca i32 {bindc_name = "c", uniq_name = "_QFEc"} -! CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]] - - !$acc loop private(c) - DO i = 1, n - c = i - a(i) = b(i) + c - END DO - -! CHECK: %[[C_PRIVATE:.*]] = acc.private varPtr(%[[DECLC]]#0 : !fir.ref) -> !fir.ref {name = "c"} -! CHECK: acc.loop private({{.*}}@privatization_ref_i32 -> %[[C_PRIVATE]] : !fir.ref{{.*}}) -! CHECK: acc.yield - - !$acc loop private(b) - DO i = 1, n - c = i - a(i) = b(i) + c - END DO - -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.subi %{{.*}}, %[[C1]] : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[B_PRIVATE:.*]] = acc.private varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b"} -! CHECK: acc.loop private({{.*}}@privatization_ref_100xf32 -> %[[B_PRIVATE]] : !fir.ref>{{.*}}) -! CHECK: acc.yield - - !$acc loop private(b(1:50)) - DO i = 1, n - c = i - a(i) = b(i) + c - END DO - -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.constant 49 : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[B_PRIVATE:.*]] = acc.private varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b(1:50)"} -! CHECK: acc.loop private({{.*}}@privatization_ref_50xf32 -> %[[B_PRIVATE]] : !fir.ref>{{.*}}) - - !$acc parallel loop firstprivate(c) - DO i = 1, n - c = i - a(i) = b(i) + c - END DO - -! CHECK: %[[FP_C:.*]] = acc.firstprivate varPtr(%[[DECLC]]#0 : !fir.ref) -> !fir.ref {name = "c"} -! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_ref_i32 -> %[[FP_C]] : !fir.ref) -! CHECK: acc.yield - - !$acc parallel loop firstprivate(b) - DO i = 1, n - c = i - a(i) = b(i) + c - END DO - -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.subi %{{.*}}, %[[C1]] : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b"} -! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_ext100_ref_100xf32 -> %[[FP_B]] : !fir.ref>) -! CHECK: acc.yield - - !$acc parallel loop firstprivate(b(51:100)) - DO i = 1, n - c = i - a(i) = b(i) + c - END DO - -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 50 : index -! CHECK: %[[UB:.*]] = arith.constant 99 : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "b(51:100)"} -! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_lb50.ub99_ref_50xf32 -> %[[FP_B]] : !fir.ref>) - -end program - -subroutine acc_private_assumed_shape(a, n) - integer :: a(:), i, n - - !$acc parallel loop private(a) - do i = 1, n - a(i) = i - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_private_assumed_shape( -! CHECK-SAME: %[[ARG0:.*]]: !fir.box> {fir.bindc_name = "a"} -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_private_assumed_shapeEa"} : (!fir.box>, !fir.dscope) -> (!fir.box>, !fir.box>) -! CHECK: acc.parallel {{.*}} { -! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box>) -> !fir.ref> -! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[ADDR]] : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_Uxi32 -> %[[PRIVATE]] : !fir.ref>{{.*}}) - -subroutine acc_private_allocatable_array(a, n) - integer, allocatable :: a(:) - integer :: i, n - - !$acc parallel loop private(a) - do i = 1, n - a(i) = i - end do - - !$acc serial private(a) - a(i) = 1 - !$acc end serial -end subroutine - -! CHECK-LABEL: func.func @_QPacc_private_allocatable_array( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>> {fir.bindc_name = "a"} -! CHECK: %[[DECLA_A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFacc_private_allocatable_arrayEa"} : (!fir.ref>>>, !fir.dscope) -> (!fir.ref>>>, !fir.ref>>>) -! CHECK: acc.parallel {{.*}} { -! CHECK: %[[BOX:.*]] = fir.load %[[DECLA_A]]#0 : !fir.ref>>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box>>) -> !fir.heap> -! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%{{.*}}) -> !fir.heap> {name = "a"} -! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_heap_Uxi32 -> %[[PRIVATE]] : !fir.heap>{{.*}}) -! CHECK: acc.serial private(@privatization_box_heap_Uxi32 -> %{{.*}} : !fir.heap>) - -subroutine acc_private_pointer_array(a, n) - integer, pointer :: a(:) - integer :: i, n - - !$acc parallel loop private(a) - do i = 1, n - a(i) = i - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_private_pointer_array( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>> {fir.bindc_name = "a"}, %arg1: !fir.ref {fir.bindc_name = "n"}) { -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %arg0 dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFacc_private_pointer_arrayEa"} : (!fir.ref>>>, !fir.dscope) -> (!fir.ref>>>, !fir.ref>>>) -! CHECK: acc.parallel {{.*}} { -! CHECK: %[[BOX:.*]] = fir.load %[[DECLA_A]]#0 : !fir.ref>>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box>>) -> !fir.ptr> -! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.ptr>) bounds(%{{.*}}) -> !fir.ptr> {name = "a"} -! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_ptr_Uxi32 -> %[[PRIVATE]] : !fir.ptr>{{.*}}) - -subroutine acc_private_dynamic_extent(a, n) - integer :: n, i - integer :: a(n, n, 2) - - !$acc parallel loop private(a) - do i = 1, n - a(i, i, 1) = i - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_private_dynamic_extent( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref {fir.bindc_name = "n"}) { -! CHECK: %[[DECL_N:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_private_dynamic_extentEn"} : (!fir.ref, !fir.dscope) -> (!fir.ref, !fir.ref) -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_private_dynamic_extentEa"} : (!fir.ref>, !fir.shape<3>, !fir.dscope) -> (!fir.box>, !fir.ref>) -! CHECK: acc.parallel {{.*}} { -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box>) -> !fir.ref> -! CHECK: %[[PRIV:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_UxUx2xi32 -> %[[PRIV]] : !fir.ref>{{.*}}) - -subroutine acc_firstprivate_assumed_shape(a, n) - integer :: a(:), i, n - - !$acc parallel loop firstprivate(a) - do i = 1, n - a(i) = i - end do -end subroutine - -subroutine acc_firstprivate_assumed_shape_with_section(a, n) - integer :: a(:), i, n - - !$acc parallel loop firstprivate(a(5:10)) - do i = 1, n - a(i) = i - end do -end subroutine - -subroutine acc_firstprivate_dynamic_extent(a, n) - integer :: n, i - integer :: a(n, n, 2) - - !$acc parallel loop firstprivate(a) - do i = 1, n - a(i, i, 1) = i - end do -end subroutine - -! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_box_UxUx2xi32 -> %{{.*}} : !fir.ref>) - -module acc_declare_equivalent - integer, parameter :: n = 10 - real :: v1(n) - real :: v2(n) - equivalence(v1(1), v2(1)) -contains - subroutine sub1() - !$acc parallel private(v2) - !$acc end parallel - end subroutine -end module - -! CHECK: acc.parallel private(@privatization_ref_10xf32 -> %{{.*}} : !fir.ref>) - -subroutine acc_private_use() - integer :: i, j - - !$acc parallel loop - do i = 1, 10 - j = i - end do -end - -! CHECK-LABEL: func.func @_QPacc_private_use() -! CHECK: %[[I:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFacc_private_useEi"} -! CHECK: %[[DECL_I:.*]]:2 = hlfir.declare %[[I]] {uniq_name = "_QFacc_private_useEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: acc.parallel -! CHECK: %[[PRIV_I:.*]] = acc.private varPtr(%[[DECL_I]]#0 : !fir.ref) -> !fir.ref {implicit = true, name = "i"} -! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[PRIV_I]] {uniq_name = "_QFacc_private_useEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: acc.loop {{.*}} private(@privatization_ref_i32 -> %[[PRIV_I]] : !fir.ref) control(%[[IV0:.*]] : i32) = (%c1{{.*}} : i32) to (%c10{{.*}} : i32) step (%c1{{.*}} : i32) -! CHECK: fir.store %[[IV0]] to %[[DECL_PRIV_I]]#0 : !fir.ref -! CHECK: %{{.*}} = fir.load %[[DECL_PRIV_I]]#0 : !fir.ref diff --git a/flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90 deleted file mode 100644 index b48f530473740..0000000000000 --- a/flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90 +++ /dev/null @@ -1,1227 +0,0 @@ -! This test checks lowering of OpenACC reduction clause. - -! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_UxUxf32 : !fir.box> reduction_operator init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[CST:.*]] = arith.constant -1.401300e-45 : f32 -! CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[ARG0]], %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[ARG0]], %c1 : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[DIMS0]]#1, %[[DIMS1]]#1 : (index, index) -> !fir.shape<2> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[DIMS0]]#1, %[[DIMS1]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<2>) -> (!fir.box>, !fir.heap>) -! CHECK: hlfir.assign %[[CST]] to %[[DECL]]#0 : f32, !fir.box> -! CHECK: acc.yield %[[DECL]]#0 : !fir.box> -! CHECK: } combiner { -! CHECK: ^bb0(%[[V1:.*]]: !fir.box>, %[[V2:.*]]: !fir.box>, %[[LB0:.*]]: index, %[[UB0:.*]]: index, %[[STEP0:.*]]: index, %[[LB1:.*]]: index, %[[UB1:.*]]: index, %[[STEP1:.*]]: index): - -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2> -! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[V1]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]]) shape %[[SHAPE]] : (!fir.box>, index, index, index, index, index, index, !fir.shape<2>) -> !fir.box> -! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[V2]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]]) shape %[[SHAPE]] : (!fir.box>, index, index, index, index, index, index, !fir.shape<2>) -> !fir.box> -! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %[[SHAPE]] unordered : (!fir.shape<2>) -> !hlfir.expr { -! CHECK: ^bb0(%[[ARG0:.*]]: index, %[[ARG1:.*]]: index): -! CHECK: %[[D1:.*]] = hlfir.designate %[[DES_V1]] (%[[ARG0]], %[[ARG1]]) : (!fir.box>, index, index) -> !fir.ref -! CHECK: %[[D2:.*]] = hlfir.designate %[[DES_V2]] (%[[ARG0]], %[[ARG1]]) : (!fir.box>, index, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[D1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[D2]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD1]], %[[LOAD2]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : f32 -! CHECK: hlfir.yield_element %[[SELECT]] : f32 -! CHECK: } -! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[V1]] : !hlfir.expr, !fir.box> -! CHECK: acc.yield %[[V1]] : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_ptr_Uxf32 : !fir.box>> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.box>>): -! CHECK: } combiner { -! CHECK: ^bb0(%{{.*}}: !fir.box>>, %{{.*}}: !fir.box>>, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index): -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_heap_Uxf32 : !fir.box>> reduction_operator init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>>): -! CHECK: %[[CST:.*]] = arith.constant -1.401300e-45 : f32 -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box>>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %2(%1) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: hlfir.assign %[[CST]] to %[[DECLARE]]#0 : f32, !fir.box> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>>, %[[ARG1:.*]]: !fir.box>>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[ARG0]] (%[[ARG2]]:%[[ARG3]]:%[[ARG4]]) shape %[[SHAPE]] : (!fir.box>>, index, index, index, !fir.shape<1>) -> !fir.box>> -! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[ARG1]] (%[[ARG2]]:%[[ARG3]]:%[[ARG4]]) shape %[[SHAPE]] : (!fir.box>>, index, index, index, !fir.shape<1>) -> !fir.box>> -! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %[[SHAPE]] unordered : (!fir.shape<1>) -> !hlfir.expr { -! CHECK: ^bb0(%[[IV:.*]]: index): -! CHECK: %[[V1:.*]] = hlfir.designate %[[DES_V1]] (%[[IV]]) : (!fir.box>>, index) -> !fir.ref -! CHECK: %[[V2:.*]] = hlfir.designate %[[DES_V2]] (%[[IV]]) : (!fir.box>>, index) -> !fir.ref -! CHECK: %[[LOAD_V1:.*]] = fir.load %[[V1]] : !fir.ref -! CHECK: %[[LOAD_V2:.*]] = fir.load %[[V2]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD_V1]], %[[LOAD_V2]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD_V1]], %[[LOAD_V2]] : f32 -! CHECK: hlfir.yield_element %[[SELECT]] : f32 -! CHECK: } -! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[ARG0]] : !hlfir.expr, !fir.box>> -! CHECK: acc.yield %[[ARG0]] : !fir.box>> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_lb1.ub3_box_Uxi32 : !fir.box> reduction_operator init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %c0{{.*}} : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %0#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: hlfir.assign %c0{{.*}} to %[[DECLARE]]#0 : i32, !fir.box> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>, %[[ARG1:.*]]: !fir.box>): -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[DES1:.*]] = hlfir.designate %[[ARG0]] shape %[[SHAPE]] : (!fir.box>, !fir.shape<1>) -> !fir.box> -! CHECK: %[[DES2:.*]] = hlfir.designate %[[ARG1]] shape %[[SHAPE]] : (!fir.box>, !fir.shape<1>) -> !fir.box> -! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %[[SHAPE]] unordered : (!fir.shape<1>) -> !hlfir.expr { -! CHECK: ^bb0(%[[IV:.*]]: index): -! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[DES1]] (%[[IV]]) : (!fir.box>, index) -> !fir.ref -! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[DES2]] (%[[IV]]) : (!fir.box>, index) -> !fir.ref -! CHECK: %[[LOAD_V1:.*]] = fir.load %[[DES_V1]] : !fir.ref -! CHECK: %[[LOAD_V2:.*]] = fir.load %[[DES_V2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD_V1]], %[[LOAD_V2]] : i32 -! CHECK: hlfir.yield_element %[[COMBINED]] : i32 -! CHECK: } -! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[ARG0]] : !hlfir.expr, !fir.box> -! CHECK: acc.yield %[[ARG0]] : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_Uxf32 : !fir.box> reduction_operator init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[INIT_VALUE:.*]] = arith.constant -1.401300e-45 : f32 -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %0#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: hlfir.assign %[[INIT_VALUE]] to %[[DECLARE]]#0 : f32, !fir.box> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>, %[[ARG1:.*]]: !fir.box> -! CHECK: %[[LEFT:.*]] = hlfir.designate %[[ARG0]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> -! CHECK: %[[RIGHT:.*]] = hlfir.designate %[[ARG1]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> -! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr { -! CHECK: ^bb0(%{{.*}}: index): -! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[LEFT]] (%{{.*}}) : (!fir.box>, index) -> !fir.ref -! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[RIGHT]] (%{{.*}}) : (!fir.box>, index) -> !fir.ref -! CHECK: %[[LOAD_V1:.*]] = fir.load %[[DES_V1]] : !fir.ref -! CHECK: %[[LOAD_V2:.*]] = fir.load %[[DES_V2]] : !fir.ref -! CHECK: %[[CMPF:.*]] = arith.cmpf ogt, %[[LOAD_V1]], %[[LOAD_V2]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMPF]], %[[LOAD_V1]], %[[LOAD_V2]] : f32 -! CHECK: hlfir.yield_element %[[SELECT]] : f32 -! CHECK: } -! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[ARG0]] : !hlfir.expr, !fir.box> -! CHECK: acc.yield %[[ARG0]] : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_box_Uxi32 : !fir.box> reduction_operator init { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box>): -! CHECK: %[[INIT_VALUE:.*]] = arith.constant 0 : i32 -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box>, index) -> (index, index, index) -! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1> -! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""} -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap>, !fir.shape<1>) -> (!fir.box>, !fir.heap>) -! CHECK: hlfir.assign %[[INIT_VALUE]] to %[[DECLARE]]#0 : i32, !fir.box> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box> -! CHECK: } combiner { -! CHECK: ^bb0(%[[V1:.*]]: !fir.box>, %[[V2:.*]]: !fir.box> -! CHECK: %[[LEFT:.*]] = hlfir.designate %[[ARG0]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> -! CHECK: %[[RIGHT:.*]] = hlfir.designate %[[ARG1]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box>, index, index, index, !fir.shape<1>) -> !fir.box> -! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr { -! CHECK: ^bb0(%{{.*}}: index): -! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[LEFT]] (%{{.*}}) : (!fir.box>, index) -> !fir.ref -! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[RIGHT]] (%{{.*}}) : (!fir.box>, index) -> !fir.ref -! CHECK: %[[LOAD_V1:.*]] = fir.load %[[DES_V1]] : !fir.ref -! CHECK: %[[LOAD_V2:.*]] = fir.load %[[DES_V2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD_V1]], %[[LOAD_V2]] : i32 -! CHECK: hlfir.yield_element %[[COMBINED]] : i32 -! CHECK: } -! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[V1]] : !hlfir.expr, !fir.box> -! CHECK: acc.yield %arg0 : !fir.box> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_mul_ref_z32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[REAL:.*]] = arith.constant 1.000000e+00 : f32 -! CHECK: %[[IMAG:.*]] = arith.constant 0.000000e+00 : f32 -! CHECK: %[[UNDEF:.*]] = fir.undefined complex -! CHECK: %[[UNDEF1:.*]] = fir.insert_value %[[UNDEF]], %[[REAL]], [0 : index] : (complex, f32) -> complex -! CHECK: %[[UNDEF2:.*]] = fir.insert_value %[[UNDEF1]], %[[IMAG]], [1 : index] : (complex, f32) -> complex -! CHECK: %[[ALLOCA:.*]] = fir.alloca complex -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) -! CHECK: fir.store %[[UNDEF2]] to %[[DECLARE]]#0 : !fir.ref> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref> -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref> -! CHECK: %[[COMBINED:.*]] = fir.mulc %[[LOAD0]], %[[LOAD1]] {fastmath = #arith.fastmath} : complex -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref> -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_ref_z32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[REAL:.*]] = arith.constant 0.000000e+00 : f32 -! CHECK: %[[IMAG:.*]] = arith.constant 0.000000e+00 : f32 -! CHECK: %[[UNDEF:.*]] = fir.undefined complex -! CHECK: %[[UNDEF1:.*]] = fir.insert_value %[[UNDEF]], %[[REAL]], [0 : index] : (complex, f32) -> complex -! CHECK: %[[UNDEF2:.*]] = fir.insert_value %[[UNDEF1]], %[[IMAG]], [1 : index] : (complex, f32) -> complex -! CHECK: %[[ALLOCA:.*]] = fir.alloca complex -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) -! CHECK: fir.store %[[UNDEF2]] to %[[DECLARE]]#0 : !fir.ref> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref> -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref> -! CHECK: %[[COMBINED:.*]] = fir.addc %[[LOAD0]], %[[LOAD1]] {fastmath = #arith.fastmath} : complex -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref> -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_neqv_ref_l32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[CST:.*]] = arith.constant false -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref> -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref> -! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CMP:.*]] = arith.cmpi ne, %[[CONV0]], %[[CONV1]] : i1 -! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref> -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_eqv_ref_l32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[CST:.*]] = arith.constant true -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref> -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref> -! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CMP:.*]] = arith.cmpi eq, %[[CONV0]], %[[CONV1]] : i1 -! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref> -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_lor_ref_l32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[CST:.*]] = arith.constant false -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref> -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref> -! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CMP:.*]] = arith.ori %[[CONV0]], %[[CONV1]] : i1 -! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref> -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_land_ref_l32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[CST:.*]] = arith.constant true -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref> -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref> -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref> -! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1 -! CHECK: %[[CMP:.*]] = arith.andi %[[CONV0]], %[[CONV1]] : i1 -! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4> -! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref> -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_xor_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[CST:.*]] = arith.constant 0 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[CST]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.xori %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_ior_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[CST:.*]] = arith.constant 0 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[CST]] to %[[DECLARE:.*]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE:.*]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.ori %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_iand_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[CST:.*]] = arith.constant -1 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[CST]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.andi %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_section_ext100_ref_100xf32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant -1.401300e-45 : f32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.constant 99 : index -! CHECK: %[[STEP:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] { -! CHECK: %[[COORD:.*]] = fir.coordinate_of %[[DECLARE]]#0, %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: fir.store %[[INIT]] to %[[COORD]] : !fir.ref -! CHECK: } -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 99 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV0]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV0]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD1]], %[[LOAD2]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : f32 -! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_ref_f32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant -1.401300e-45 : f32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca f32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %0 {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD0]], %[[LOAD1]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : f32 -! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_section_ext100xext10_ref_100x10xi32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%arg0: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant -2147483648 : i32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10xi32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<2>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 9 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[LB1:.*]] = arith.constant 0 : index -! CHECK: %[[UB1:.*]] = arith.constant 99 : index -! CHECK: %[[STEP1:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0:.*]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1:.*]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_max_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%arg0: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant -2147483648 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_min_section_ext100xext10_ref_100x10xf32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 3.40282347E+38 : f32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<2>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 9 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[LB1:.*]] = arith.constant 0 : index -! CHECK: %[[UB1:.*]] = arith.constant 99 : index -! CHECK: %[[STEP1:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpf olt, %[[LOAD1]], %[[LOAD2]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : f32 -! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_min_ref_f32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant 3.40282347E+38 : f32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca f32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpf olt, %[[LOAD0]], %[[LOAD1]] {{.*}} : f32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : f32 -! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_min_section_ext100_ref_100xi32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 2147483647 : i32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 99 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV0]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV0]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_min_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant 2147483647 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_mul_ref_f32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant 1.000000e+00 : f32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca f32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.mulf %[[LOAD0]], %[[LOAD1]] fastmath : f32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_mul_section_ext100_ref_100xi32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 1 : i32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.constant 99 : index -! CHECK: %[[STEP:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.muli %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_mul_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant 1 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.muli %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100_ref_100xf32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 0.000000e+00 : f32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.constant 99 : index -! CHECK: %[[STEP:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addf %[[LOAD1]], %[[LOAD2]] fastmath : f32 -! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_ref_f32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant 0.000000e+00 : f32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca f32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addf %[[LOAD0]], %[[LOAD1]] fastmath : f32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100xext10xext2_ref_100x10x2xi32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 0 : i32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}}, %{{.*}} : (index, index, index) -> !fir.shape<3> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10x2xi32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<3>) -> (!fir.ref>, !fir.ref>) -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 1 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[LB1:.*]] = arith.constant 0 : index -! CHECK: %[[UB1:.*]] = arith.constant 9 : index -! CHECK: %[[STEP1:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] { -! CHECK: %[[LB2:.*]] = arith.constant 0 : index -! CHECK: %[[UB2:.*]] = arith.constant 99 : index -! CHECK: %[[STEP2:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV2:.*]] = %[[LB2]] to %[[UB2]] step %[[STEP2]] { -! CHECK: %[[COORD]] = fir.coordinate_of %[[DECLARE]]#0, %[[IV2]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index, index) -> !fir.ref -! CHECK: fir.store %[[INIT]] to %[[COORD]] : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 1 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[LB1:.*]] = arith.constant 0 : index -! CHECK: %[[UB1:.*]] = arith.constant 9 : index -! CHECK: %[[STEP1:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] { -! CHECK: %[[LB2:.*]] = arith.constant 0 : index -! CHECK: %[[UB2:.*]] = arith.constant 99 : index -! CHECK: %[[STEP2:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV2:.*]] = %[[LB2]] to %[[UB2]] step %[[STEP2]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV2]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV2]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: } -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100xext10_ref_100x10xi32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 0 : i32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10xi32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<2>) -> (!fir.ref>, !fir.ref>) -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB0:.*]] = arith.constant 0 : index -! CHECK: %[[UB0:.*]] = arith.constant 9 : index -! CHECK: %[[STEP0:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] { -! CHECK: %[[LB1:.*]] = arith.constant 0 : index -! CHECK: %[[UB1:.*]] = arith.constant 99 : index -! CHECK: %[[STEP1:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV1]], %[[IV0]] : (!fir.ref>, index, index) -> !fir.ref -! CHECK: %[[LOAD1]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100_ref_100xi32 : !fir.ref> reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref>): -! CHECK: %[[INIT:.*]] = arith.constant 0 : i32 -! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1> -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -! HFLIR: acc.yield %[[DECLARE]]#0 : !fir.ref> -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref>, %[[ARG1:.*]]: !fir.ref>): -! CHECK: %[[LB:.*]] = arith.constant 0 : index -! CHECK: %[[UB:.*]] = arith.constant 99 : index -! CHECK: %[[STEP:.*]] = arith.constant 1 : index -! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] { -! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV]] : (!fir.ref>, index) -> !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref -! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD1]], %[[LOAD2]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref -! CHECK: } -! CHECK: acc.yield %[[ARG0]] : !fir.ref> -! CHECK: } - -! CHECK-LABEL: acc.reduction.recipe @reduction_add_ref_i32 : !fir.ref reduction_operator init { -! CHECK: ^bb0(%{{.*}}: !fir.ref): -! CHECK: %[[INIT:.*]] = arith.constant 0 : i32 -! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 -! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref) -> (!fir.ref, !fir.ref) -! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref -! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref -! CHECK: } combiner { -! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref, %[[ARG1:.*]]: !fir.ref): -! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref -! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref -! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD0]], %[[LOAD1]] : i32 -! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref -! CHECK: acc.yield %[[ARG0]] : !fir.ref -! CHECK: } - -subroutine acc_reduction_add_int(a, b) - integer :: a(100) - integer :: i, b - - !$acc loop reduction(+:b) - do i = 1, 100 - b = b + a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_int( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_i32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_add_int_array_1d(a, b) - integer :: a(100) - integer :: i, b(100) - - !$acc loop reduction(+:b) - do i = 1, 100 - b(i) = b(i) + a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_int_array_1d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100_ref_100xi32 -> %[[RED_B]] : !fir.ref>) - -subroutine acc_reduction_add_int_array_2d(a, b) - integer :: a(100, 10), b(100, 10) - integer :: i, j - - !$acc loop collapse(2) reduction(+:b) - do i = 1, 100 - do j = 1, 10 - b(i, j) = b(i, j) + a(i, j) - end do - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_int_array_2d( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) { -! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]] -! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100xext10_ref_100x10xi32 -> %[[RED_ARG1]] : !fir.ref>) -! CHECK: } attributes {collapse = [2]{{.*}} - -subroutine acc_reduction_add_int_array_3d(a, b) - integer :: a(100, 10, 2), b(100, 10, 2) - integer :: i, j, k - - !$acc loop collapse(3) reduction(+:b) - do i = 1, 100 - do j = 1, 10 - do k = 1, 2 - b(i, j, k) = b(i, j, k) + a(i, j, k) - end do - end do - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_int_array_3d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]] -! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100xext10xext2_ref_100x10x2xi32 -> %[[RED_ARG1]] : !fir.ref>) -! CHECK: } attributes {collapse = [3]{{.*}} - -subroutine acc_reduction_add_float(a, b) - real :: a(100), b - integer :: i - - !$acc loop reduction(+:b) - do i = 1, 100 - b = b + a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_float( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_f32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_add_float_array_1d(a, b) - real :: a(100), b(100) - integer :: i - - !$acc loop reduction(+:b) - do i = 1, 100 - b(i) = b(i) + a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_float_array_1d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100_ref_100xf32 -> %[[RED_B]] : !fir.ref>) - -subroutine acc_reduction_mul_int(a, b) - integer :: a(100) - integer :: i, b - - !$acc loop reduction(*:b) - do i = 1, 100 - b = b * a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_mul_int( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_mul_ref_i32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_mul_int_array_1d(a, b) - integer :: a(100) - integer :: i, b(100) - - !$acc loop reduction(*:b) - do i = 1, 100 - b(i) = b(i) * a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_mul_int_array_1d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_mul_section_ext100_ref_100xi32 -> %[[RED_B]] : !fir.ref>) - -subroutine acc_reduction_mul_float(a, b) - real :: a(100), b - integer :: i - - !$acc loop reduction(*:b) - do i = 1, 100 - b = b * a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_mul_float( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_mul_ref_f32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_mul_float_array_1d(a, b) - real :: a(100), b(100) - integer :: i - - !$acc loop reduction(*:b) - do i = 1, 100 - b(i) = b(i) * a(i) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_mul_float_array_1d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_mul_section_ext100_ref_100xf32 -> %[[RED_B]] : !fir.ref>) - -subroutine acc_reduction_min_int(a, b) - integer :: a(100) - integer :: i, b - - !$acc loop reduction(min:b) - do i = 1, 100 - b = min(b, a(i)) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_min_int( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_min_ref_i32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_min_int_array_1d(a, b) - integer :: a(100), b(100) - integer :: i - - !$acc loop reduction(min:b) - do i = 1, 100 - b(i) = min(b(i), a(i)) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_min_int_array_1d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]] -! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_min_section_ext100_ref_100xi32 -> %[[RED_ARG1]] : !fir.ref>) - -subroutine acc_reduction_min_float(a, b) - real :: a(100), b - integer :: i - - !$acc loop reduction(min:b) - do i = 1, 100 - b = min(b, a(i)) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_min_float( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_min_ref_f32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_min_float_array2d(a, b) - real :: a(100, 10), b(100, 10) - integer :: i, j - - !$acc loop reduction(min:b) collapse(2) - do i = 1, 100 - do j = 1, 10 - b(i, j) = min(b(i, j), a(i, j)) - end do - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_min_float_array2d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]] -! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_min_section_ext100xext10_ref_100x10xf32 -> %[[RED_ARG1]] : !fir.ref>) -! CHECK: attributes {collapse = [2]{{.*}} - -subroutine acc_reduction_max_int(a, b) - integer :: a(100) - integer :: i, b - - !$acc loop reduction(max:b) - do i = 1, 100 - b = max(b, a(i)) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_max_int( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_max_ref_i32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_max_int_array2d(a, b) - integer :: a(100, 10), b(100, 10) - integer :: i, j - - !$acc loop reduction(max:b) collapse(2) - do i = 1, 100 - do j = 1, 10 - b(i, j) = max(b(i, j), a(i, j)) - end do - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_max_int_array2d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]] -! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_max_section_ext100xext10_ref_100x10xi32 -> %[[RED_ARG1]] : !fir.ref>) - -subroutine acc_reduction_max_float(a, b) - real :: a(100), b - integer :: i - - !$acc loop reduction(max:b) - do i = 1, 100 - b = max(b, a(i)) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_max_float( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref {fir.bindc_name = "b"}) -! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]] -! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref) -> !fir.ref {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_max_ref_f32 -> %[[RED_B]] : !fir.ref) - -subroutine acc_reduction_max_float_array1d(a, b) - real :: a(100), b(100) - integer :: i - - !$acc loop reduction(max:b) - do i = 1, 100 - b(i) = max(b(i), a(i)) - end do -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_max_float_array1d( -! CHECK-SAME: %{{.*}}: !fir.ref> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref> {fir.bindc_name = "b"}) -! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]] -! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "b"} -! CHECK: acc.loop {{.*}} reduction(@reduction_max_section_ext100_ref_100xf32 -> %[[RED_ARG1]] : !fir.ref>) - -subroutine acc_reduction_iand() - integer :: i - !$acc parallel reduction(iand:i) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_iand() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref) -> !fir.ref {name = "i"} -! CHECK: acc.parallel reduction(@reduction_iand_ref_i32 -> %[[RED]] : !fir.ref) - -subroutine acc_reduction_ior() - integer :: i - !$acc parallel reduction(ior:i) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_ior() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref) -> !fir.ref {name = "i"} -! CHECK: acc.parallel reduction(@reduction_ior_ref_i32 -> %[[RED]] : !fir.ref) - -subroutine acc_reduction_ieor() - integer :: i - !$acc parallel reduction(ieor:i) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_ieor() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref) -> !fir.ref {name = "i"} -! CHECK: acc.parallel reduction(@reduction_xor_ref_i32 -> %[[RED]] : !fir.ref) - -subroutine acc_reduction_and() - logical :: l - !$acc parallel reduction(.and.:l) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_and() -! CHECK: %[[L:.*]] = fir.alloca !fir.logical<4> {bindc_name = "l", uniq_name = "_QFacc_reduction_andEl"} -! CHECK: %[[DECLL:.*]]:2 = hlfir.declare %[[L]] -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[DECLL]]#0 : !fir.ref>) -> !fir.ref> {name = "l"} -! CHECK: acc.parallel reduction(@reduction_land_ref_l32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_or() - logical :: l - !$acc parallel reduction(.or.:l) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_or() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) -> !fir.ref> {name = "l"} -! CHECK: acc.parallel reduction(@reduction_lor_ref_l32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_eqv() - logical :: l - !$acc parallel reduction(.eqv.:l) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_eqv() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) -> !fir.ref> {name = "l"} -! CHECK: acc.parallel reduction(@reduction_eqv_ref_l32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_neqv() - logical :: l - !$acc parallel reduction(.neqv.:l) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_neqv() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) -> !fir.ref> {name = "l"} -! CHECK: acc.parallel reduction(@reduction_neqv_ref_l32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_add_cmplx() - complex :: c - !$acc parallel reduction(+:c) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_cmplx() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) -> !fir.ref> {name = "c"} -! CHECK: acc.parallel reduction(@reduction_add_ref_z32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_mul_cmplx() - complex :: c - !$acc parallel reduction(*:c) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_mul_cmplx() -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) -> !fir.ref> {name = "c"} -! CHECK: acc.parallel reduction(@reduction_mul_ref_z32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_add_alloc() - integer, allocatable :: i - allocate(i) - !$acc parallel reduction(+:i) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_alloc() -! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.box> {bindc_name = "i", uniq_name = "_QFacc_reduction_add_allocEi"} -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]] -! CHECK: %[[LOAD:.*]] = fir.load %[[DECL]]#0 : !fir.ref>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box>) -> !fir.heap -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.heap) -> !fir.heap {name = "i"} -! CHECK: acc.parallel reduction(@reduction_add_heap_i32 -> %[[RED]] : !fir.heap) - -subroutine acc_reduction_add_pointer(i) - integer, pointer :: i - !$acc parallel reduction(+:i) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_pointer( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>> {fir.bindc_name = "i"}) -! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]] -! CHECK: %[[LOAD:.*]] = fir.load %[[DECLARG0]]#0 : !fir.ref>> -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box>) -> !fir.ptr -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ptr) -> !fir.ptr {name = "i"} -! CHECK: acc.parallel reduction(@reduction_add_ptr_i32 -> %[[RED]] : !fir.ptr) - -subroutine acc_reduction_add_static_slice(a) - integer :: a(100) - !$acc parallel reduction(+:a(11:20)) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_static_slice( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}) -! CHECK: %[[C100:.*]] = arith.constant 100 : index -! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]] -! CHECK: %[[C1:.*]] = arith.constant 1 : index -! CHECK: %[[LB:.*]] = arith.constant 10 : index -! CHECK: %[[UB:.*]] = arith.constant 19 : index -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C100]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index) -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[DECLARG0]]#0 : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(11:20)"} -! CHECK: acc.parallel reduction(@reduction_add_section_lb10.ub19_ref_100xi32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_add_dynamic_extent_add(a) - integer :: a(:) - !$acc parallel reduction(+:a) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_dynamic_extent_add( -! CHECK-SAME: %[[ARG0:.*]]: !fir.box> {fir.bindc_name = "a"}) -! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]] -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.parallel reduction(@reduction_add_box_Uxi32 -> %[[RED:.*]] : !fir.ref>) - -subroutine acc_reduction_add_assumed_shape_max(a) - real :: a(:) - !$acc parallel reduction(max:a) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_assumed_shape_max( -! CHECK-SAME: %[[ARG0:.*]]: !fir.box> {fir.bindc_name = "a"}) -! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]] -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref>) bounds(%{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.parallel reduction(@reduction_max_box_Uxf32 -> %[[RED]] : !fir.ref>) { - -subroutine acc_reduction_add_dynamic_extent_add_with_section(a) - integer :: a(:) - !$acc parallel reduction(+:a(2:4)) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_dynamic_extent_add_with_section( -! CHECK-SAME: %[[ARG0:.*]]: !fir.box> {fir.bindc_name = "a"}) -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_reduction_add_dynamic_extent_add_with_sectionEa"} : (!fir.box>, !fir.dscope) -> (!fir.box>, !fir.box>) -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c1{{.*}} : index) upperbound(%c3{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL]]#0 : (!fir.box>) -> !fir.ref> -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ref>) bounds(%[[BOUND]]) -> !fir.ref> {name = "a(2:4)"} -! CHECK: acc.parallel reduction(@reduction_add_section_lb1.ub3_box_Uxi32 -> %[[RED]] : !fir.ref>) - -subroutine acc_reduction_add_allocatable(a) - real, allocatable :: a(:) - !$acc parallel reduction(max:a) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_allocatable( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>> {fir.bindc_name = "a"}) -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFacc_reduction_add_allocatableEa"} : (!fir.ref>>>, !fir.dscope) -> (!fir.ref>>>, !fir.ref>>>) -! CHECK: %[[BOX:.*]] = fir.load %[[DECL]]#0 : !fir.ref>>> -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box>>) -> !fir.heap> -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.heap>) bounds(%{{[0-9]+}}) -> !fir.heap> {name = "a"} -! CHECK: acc.parallel reduction(@reduction_max_box_heap_Uxf32 -> %[[RED]] : !fir.heap>) - -subroutine acc_reduction_add_pointer_array(a) - real, pointer :: a(:) - !$acc parallel reduction(max:a) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_add_pointer_array( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref>>> {fir.bindc_name = "a"}) -! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs, uniq_name = "_QFacc_reduction_add_pointer_arrayEa"} : (!fir.ref>>>, !fir.dscope) -> (!fir.ref>>>, !fir.ref>>>) -! CHECK: %[[BOX:.*]] = fir.load %[[DECL]]#0 : !fir.ref>>> -! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true} -! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box>>) -> !fir.ptr> -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ptr>) bounds(%[[BOUND]]) -> !fir.ptr> {name = "a"} -! CHECK: acc.parallel reduction(@reduction_max_box_ptr_Uxf32 -> %[[RED]] : !fir.ptr>) - -subroutine acc_reduction_max_dynamic_extent_max(a, n) - integer :: n - real :: a(n, n) - !$acc parallel reduction(max:a) - !$acc end parallel -end subroutine - -! CHECK-LABEL: func.func @_QPacc_reduction_max_dynamic_extent_max( -! CHECK-SAME: %[[ARG0:.*]]: !fir.ref> {fir.bindc_name = "a"}, %{{.*}}: !fir.ref {fir.bindc_name = "n"}) -! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_reduction_max_dynamic_extent_maxEa"} : (!fir.ref>, !fir.shape<2>, !fir.dscope) -> (!fir.box>, !fir.ref>) -! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box>) -> !fir.ref> -! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[ADDR]] : !fir.ref>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref> {name = "a"} -! CHECK: acc.parallel reduction(@reduction_max_box_UxUxf32 -> %[[RED]] : !fir.ref>) diff --git a/flang/test/Lower/OpenMP/declare-mapper.f90 b/flang/test/Lower/OpenMP/declare-mapper.f90 index 8a98c68a8d582..3d4d0da1e18a3 100644 --- a/flang/test/Lower/OpenMP/declare-mapper.f90 +++ b/flang/test/Lower/OpenMP/declare-mapper.f90 @@ -6,6 +6,7 @@ ! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-3.f90 -o - | FileCheck %t/omp-declare-mapper-3.f90 ! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-4.f90 -o - | FileCheck %t/omp-declare-mapper-4.f90 ! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=50 %t/omp-declare-mapper-5.f90 -o - | FileCheck %t/omp-declare-mapper-5.f90 +! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=51 %t/omp-declare-mapper-6.f90 -o - | FileCheck %t/omp-declare-mapper-6.f90 !--- omp-declare-mapper-1.f90 subroutine declare_mapper_1 @@ -262,3 +263,41 @@ subroutine use_inner() !$omp end target end subroutine end program declare_mapper_5 + +!--- omp-declare-mapper-6.f90 +subroutine declare_mapper_nested_parent + type :: inner_t + real, allocatable :: deep_arr(:) + end type inner_t + + type, abstract :: base_t + real, allocatable :: base_arr(:) + type(inner_t) :: inner + end type base_t + + type, extends(base_t) :: real_t + real, allocatable :: real_arr(:) + end type real_t + + !$omp declare mapper (custommapper : real_t :: t) map(tofrom: t%base_arr, t%real_arr) + ! CHECK: omp.declare_mapper @{{.*custommapper}} + ! CHECK-DAG: omp.map.info {{.*}} {name = "t%base_t%base_arr"} + ! CHECK-DAG: omp.map.info {{.*}} {name = "t%real_arr"} + ! CHECK: omp.declare_mapper.info + + type(real_t) :: r + + allocate(r%base_arr(10)) + allocate(r%inner%deep_arr(10)) + allocate(r%real_arr(10)) + r%base_arr = 1.0 + r%inner%deep_arr = 4.0 + r%real_arr = 0.0 + + ! Check implicit maps for deep nested allocatable payloads not covered by mapper + ! CHECK-DAG: omp.map.info {{.*}} {name = "r.deep_arr.implicit_map"} + ! CHECK: omp.target + !$omp target map(mapper(custommapper), tofrom: r) + r%real_arr = r%base_arr(1) + r%inner%deep_arr(1) + !$omp end target +end subroutine declare_mapper_nested_parent diff --git a/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 b/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 index 0c0d877a17b00..642b11bcd6b75 100644 --- a/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 +++ b/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 @@ -22,10 +22,10 @@ subroutine sb1 !CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_ADDR]] {uniq_name = "_QFsb1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: omp.parallel private({{.*}} %[[I_DECL]]#0 -> %[[I_PVT_ADDR:.*]] : {{.*}}) { !CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ADDR]] {uniq_name = "_QFsb1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[I_FINAL_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[I_FINAL_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[I_FINAL_VAL]]#1 to %[[I_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[I_FINAL_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: omp.terminator !CHECK: } !CHECK: return @@ -58,20 +58,20 @@ subroutine sb2 !CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ADDR]] {uniq_name = "_QFsb2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[FINAL_J_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[J_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[FINAL_J_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[J_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[J_VAL]] to %[[J_PVT_DECL]]#0 : !fir.ref !CHECK: fir.if %{{.*}} { -!CHECK: %[[FINAL_I_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[FINAL_I_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[FINAL_I_VAL]]#1 to %[[I_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[FINAL_I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: %[[FINAL_I_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[FINAL_I_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[FINAL_I_VAL]]#1 to %[[I_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[FINAL_I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[FINAL_J_VAL]]#1 to %[[J_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[FINAL_J_VAL]] to %[[J_PVT_DECL]]#0 : !fir.ref !CHECK: omp.terminator !CHECK: } !CHECK: return diff --git a/flang/test/Lower/OpenMP/infinite-loop-in-construct.f90 b/flang/test/Lower/OpenMP/infinite-loop-in-construct.f90 index 16b400a231860..f02d0e5ccc53c 100644 --- a/flang/test/Lower/OpenMP/infinite-loop-in-construct.f90 +++ b/flang/test/Lower/OpenMP/infinite-loop-in-construct.f90 @@ -8,8 +8,10 @@ ! CHECK: cf.cond_br %{{[0-9]+}}, ^bb1, ^bb2 ! CHECK-NEXT: ^bb1: // pred: ^bb0 ! CHECK: cf.br ^bb2 -! CHECK-NEXT: ^bb2: // 3 preds: ^bb0, ^bb1, ^bb2 -! CHECK-NEXT: cf.br ^bb2 +! CHECK-NEXT: ^bb2: // 2 preds: ^bb0, ^bb1 +! CHECK: cf.br ^bb3 +! CHECK-NEXT: ^bb3: // 2 preds: ^bb2, ^bb3 +! CHECK: cf.br ^bb3 ! CHECK-NEXT: } subroutine sb(ninter, numnod) diff --git a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 index 4d1023925fd88..3bb40834afe4c 100644 --- a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 +++ b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 @@ -59,21 +59,20 @@ ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i32) -> index ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : index ! CHECK: %[[LB:.*]] = fir.convert %[[VAL_8]] : (index) -> i32 -! CHECK: %[[VAL_12:.*]]:2 = fir.do_loop %[[VAL_13:[^ ]*]] = +! CHECK: %[[VAL_12:.*]] = fir.do_loop %[[VAL_13:[^ ]*]] = ! CHECK-SAME: %[[VAL_8]] to %[[VAL_10]] step %[[VAL_11]] -! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (index, i32) { +! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (i32) { ! CHECK: fir.store %[[IV]] to %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: %[[LOAD:.*]] = fir.load %[[PRIV_I_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_16:.*]] = arith.addi %[[LOAD]], %[[VAL_15]] : i32 ! CHECK: hlfir.assign %[[VAL_16]] to %[[PRIV_X_DECL]]#0 : i32, !fir.ref -! CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_13]], %[[VAL_11]] overflow : index ! CHECK: %[[STEPCAST:.*]] = fir.convert %[[VAL_11]] : (index) -> i32 ! CHECK: %[[IVLOAD:.*]] = fir.load %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: %[[IVINC:.*]] = arith.addi %[[IVLOAD]], %[[STEPCAST]] overflow : -! CHECK: fir.result %[[VAL_17]], %[[IVINC]] : index, i32 +! CHECK: fir.result %[[IVINC]] : i32 ! CHECK: } -! CHECK: fir.store %[[VAL_12]]#1 to %[[PRIV_J_DECL]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_12]] to %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: omp.yield ! CHECK: } ! CHECK: } diff --git a/flang/test/Lower/OpenMP/sections-predetermined-private.f90 b/flang/test/Lower/OpenMP/sections-predetermined-private.f90 index 3ca3b2219c91b..3313feb3d7021 100644 --- a/flang/test/Lower/OpenMP/sections-predetermined-private.f90 +++ b/flang/test/Lower/OpenMP/sections-predetermined-private.f90 @@ -15,15 +15,15 @@ ! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[VAL_11:.*]]:2 = fir.do_loop %[[VAL_12:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}} -> (index, i32) { +! CHECK: %[[VAL_11:.*]] = fir.do_loop %[[VAL_12:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}} -> (i32) { ! CHECK: } -! CHECK: fir.store %[[VAL_11]]#1 to %[[VAL_4]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_11]] to %[[VAL_4]]#0 : !fir.ref ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.section { -! CHECK: %[[VAL_25:.*]]:2 = fir.do_loop %[[VAL_26:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %[[VAL_25:.*]] = fir.do_loop %[[VAL_26:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: } -! CHECK: fir.store %[[VAL_25]]#1 to %[[VAL_4]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_25]] to %[[VAL_4]]#0 : !fir.ref ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator diff --git a/flang/test/Lower/OpenMP/shared-loop.f90 b/flang/test/Lower/OpenMP/shared-loop.f90 index a26cbd0b9e90e..48ad553752e4a 100644 --- a/flang/test/Lower/OpenMP/shared-loop.f90 +++ b/flang/test/Lower/OpenMP/shared-loop.f90 @@ -9,14 +9,14 @@ ! CHECK: omp.parallel { ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[RES:.*]]:2 = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = +! CHECK: %[[RES:.*]] = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = ! CHECK: fir.store %[[ARG1]] to %[[DECL_I]]#0 -! CHECK: %[[UPDATE_ITER:.*]] = arith.addi %[[ARG0]], %{{.*}} +! CHECK: hlfir.assign ! CHECK: %[[LOAD_I:.*]] = fir.load %[[DECL_I]]#0 ! CHECK: %[[RES_I:.*]] = arith.addi %[[LOAD_I]], %{{.*}} -! CHECK: fir.result %[[UPDATE_ITER]], %[[RES_I]] +! CHECK: fir.result %[[RES_I]] ! CHECK: } -! CHECK: fir.store %[[RES]]#1 to %[[DECL_I]]#0 +! CHECK: fir.store %[[RES]] to %[[DECL_I]]#0 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator @@ -47,15 +47,15 @@ subroutine omploop ! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[ALLOC_PRIV_I]] ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[RES:.*]]:2 = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = +! CHECK: %[[RES:.*]] = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = ! CHECK-NOT: fir.store %[[ARG1]] to %[[DECL_I]]#1 ! CHECK: fir.store %[[ARG1]] to %[[DECL_PRIV_I]]#0 -! CHECK: %[[UPDATE_ITER:.*]] = arith.addi %[[ARG0]], %{{.*}} +! CHECK: hlfir.assign ! CHECK: %[[LOAD_I:.*]] = fir.load %[[DECL_PRIV_I]]#0 ! CHECK: %[[RES_I:.*]] = arith.addi %[[LOAD_I]], %{{.*}} -! CHECK: fir.result %[[UPDATE_ITER]], %[[RES_I]] +! CHECK: fir.result %[[RES_I]] ! CHECK: } -! CHECK: fir.store %[[RES]]#1 to %[[DECL_PRIV_I]]#0 +! CHECK: fir.store %[[RES]] to %[[DECL_PRIV_I]]#0 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator @@ -87,15 +87,15 @@ subroutine omploop2 ! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[ALLOC_PRIV_I]] ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[RES:.*]]:2 = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = +! CHECK: %[[RES:.*]] = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = ! CHECK-NOT: fir.store %[[ARG1]] to %[[DECL_I]]#1 ! CHECK: fir.store %[[ARG1]] to %[[DECL_PRIV_I]]#0 -! CHECK: %[[UPDATE_ITER:.*]] = arith.addi %[[ARG0]], %{{.*}} +! CHECK: hlfir.assign ! CHECK: %[[LOAD_I:.*]] = fir.load %[[DECL_PRIV_I]]#0 ! CHECK: %[[RES_I:.*]] = arith.addi %[[LOAD_I]], %{{.*}} -! CHECK: fir.result %[[UPDATE_ITER]], %[[RES_I]] +! CHECK: fir.result %[[RES_I]] ! CHECK: } -! CHECK: fir.store %[[RES]]#1 to %[[DECL_PRIV_I]]#0 +! CHECK: fir.store %[[RES]] to %[[DECL_PRIV_I]]#0 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator diff --git a/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 b/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 new file mode 100644 index 0000000000000..fea7a8b335d63 --- /dev/null +++ b/flang/test/Lower/OpenMP/wsloop-collapse-continue.f90 @@ -0,0 +1,19 @@ +! RUN: bbc -fopenmp -emit-hlfir %s -o - | FileCheck %s + +program wsloop_collapse_continue + integer i, j + +! CHECK: omp.wsloop {{.*}} { +! CHECK: omp.loop_nest ({{.*}}) : i32 = ({{.*}}) to ({{.*}}) inclusive step ({{.*}}) collapse(2) { + !$omp do collapse(2) + do 50 i = 1, 42 + do 51 j = 1, 84 +! CHECK: fir.call @_FortranAioOutputInteger32( + print *, i +! CHECK: fir.call @_FortranAioOutputInteger32( + print *, j + 51 continue + 50 continue + !$omp end do + +end program wsloop_collapse_continue diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 index e2f75bc8e4481..a02188a7fb0db 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 @@ -204,7 +204,7 @@ program reduce15 ! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_38]] : (i32) -> index ! CHECK: %[[VAL_40:.*]] = arith.constant 1 : index ! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_37]] : (index) -> i32 -! CHECK: %[[VAL_42:.*]]:2 = fir.do_loop %[[VAL_43:.*]] = %[[VAL_37]] to %[[VAL_39]] step %[[VAL_40]] iter_args(%[[VAL_44:.*]] = %[[VAL_41]]) -> (index, i32) { +! CHECK: %[[VAL_42:.*]] = fir.do_loop %[[VAL_43:.*]] = %[[VAL_37]] to %[[VAL_39]] step %[[VAL_40]] iter_args(%[[VAL_44:.*]] = %[[VAL_41]]) -> (i32) { ! CHECK: fir.store %[[VAL_44]] to %[[VAL_3]]#0 : !fir.ref ! CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref ! CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref>>> @@ -212,13 +212,12 @@ program reduce15 ! CHECK: %[[VAL_48:.*]] = fir.convert %[[VAL_47]] : (i32) -> i64 ! CHECK: %[[VAL_49:.*]] = hlfir.designate %[[VAL_46]] (%[[VAL_48]]) : (!fir.box>>, i64) -> !fir.ref ! CHECK: hlfir.assign %[[VAL_45]] to %[[VAL_49]] : i32, !fir.ref -! CHECK: %[[VAL_50:.*]] = arith.addi %[[VAL_43]], %[[VAL_40]] overflow : index ! CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_40]] : (index) -> i32 ! CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref ! CHECK: %[[VAL_53:.*]] = arith.addi %[[VAL_52]], %[[VAL_51]] overflow : i32 -! CHECK: fir.result %[[VAL_50]], %[[VAL_53]] : index, i32 +! CHECK: fir.result %[[VAL_53]] : i32 ! CHECK: } -! CHECK: fir.store %[[VAL_54:.*]]#1 to %[[VAL_3]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_54:.*]] to %[[VAL_3]]#0 : !fir.ref ! CHECK: omp.parallel { ! CHECK: %[[VAL_57:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_58:.*]] = arith.constant 10 : i32 diff --git a/flang/test/Lower/OpenMP/wsloop-variable.f90 b/flang/test/Lower/OpenMP/wsloop-variable.f90 index 0f4aafb10ded3..60d970f3f0bac 100644 --- a/flang/test/Lower/OpenMP/wsloop-variable.f90 +++ b/flang/test/Lower/OpenMP/wsloop-variable.f90 @@ -139,7 +139,7 @@ subroutine wsloop_variable_sub !CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref !CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_33]] : (i32) -> index !CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_30]] : (index) -> i64 -!CHECK: %[[VAL_36:.*]]:2 = fir.do_loop %[[VAL_37:.*]] = %[[VAL_30]] to %[[VAL_32]] step %[[VAL_34]] iter_args(%[[VAL_38:.*]] = %[[VAL_35]]) -> (index, i64) { +!CHECK: %[[VAL_36:.*]] = fir.do_loop %[[VAL_37:.*]] = %[[VAL_30]] to %[[VAL_32]] step %[[VAL_34]] iter_args(%[[VAL_38:.*]] = %[[VAL_35]]) -> (i64) { !CHECK: fir.store %[[VAL_38]] to %[[VAL_17]]#0 : !fir.ref !CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref !CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (i16) -> i64 @@ -147,13 +147,12 @@ subroutine wsloop_variable_sub !CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_40]], %[[VAL_41]] : i64 !CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_42]] : (i64) -> f32 !CHECK: hlfir.assign %[[VAL_43]] to %[[VAL_21]]#0 : f32, !fir.ref -!CHECK: %[[VAL_44:.*]] = arith.addi %[[VAL_37]], %[[VAL_34]] overflow : index !CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_34]] : (index) -> i64 !CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref !CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_46]], %[[VAL_45]] overflow : i64 -!CHECK: fir.result %[[VAL_44]], %[[VAL_47]] : index, i64 +!CHECK: fir.result %[[VAL_47]] : i64 !CHECK: } -!CHECK: fir.store %[[VAL_48:.*]]#1 to %[[VAL_17]]#0 : !fir.ref +!CHECK: fir.store %[[VAL_48:.*]] to %[[VAL_17]]#0 : !fir.ref !CHECK: omp.yield !CHECK: } !CHECK: } diff --git a/flang/test/Lower/cdefined.f90 b/flang/test/Lower/cdefined.f90 new file mode 100644 index 0000000000000..89599442589eb --- /dev/null +++ b/flang/test/Lower/cdefined.f90 @@ -0,0 +1,9 @@ +! RUN: bbc -emit-hlfir -o - %s | FileCheck %s +! Ensure that CDEFINED variable has external (default) linkage and that +! it doesn't have an initializer +module m + use iso_c_binding + integer(c_int), bind(C, name='c_global', CDEFINED) :: c = 42 + ! CHECK: fir.global @c_global : i32 + ! CHECK-NOT: fir.zero_bits +end diff --git a/flang/test/Lower/do_loop.f90 b/flang/test/Lower/do_loop.f90 index 5d8343b8d68a4..065324ac200da 100644 --- a/flang/test/Lower/do_loop.f90 +++ b/flang/test/Lower/do_loop.f90 @@ -20,19 +20,18 @@ subroutine simple_loop ! CHECK: %[[C5_CVT:.*]] = fir.convert %c5_i32 : (i32) -> index ! CHECK: %[[C1:.*]] = arith.constant 1 : index ! CHECK: %[[LB:.*]] = fir.convert %[[C1_CVT]] : (index) -> i32 - ! CHECK: %[[LI_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[LI_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[C1_CVT]] to %[[C5_CVT]] step %[[C1]] - ! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (i32) { do i=1,5 ! CHECK: fir.store %[[IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[C1]] overflow : index ! CHECK: %[[STEPCAST:.*]] = fir.convert %[[C1]] : (index) -> i32 ! CHECK: %[[IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[IVINC:.*]] = arith.addi %[[IVLOAD]], %[[STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[IVINC]] : index, i32 + ! CHECK: fir.result %[[IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[LI_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[LI_RES]] to %[[I_REF]] : !fir.ref ! CHECK: %[[I:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %{{.*}} = fir.call @_FortranAioOutputInteger32(%{{.*}}, %[[I]]) {{.*}}: (!fir.ref, i32) -> i1 print *, i @@ -54,9 +53,9 @@ subroutine nested_loop ! CHECK: %[[E_I_CVT:.*]] = fir.convert %[[E_I]] : (i32) -> index ! CHECK: %[[ST_I:.*]] = arith.constant 1 : index ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_I_CVT]] : (index) -> i32 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_I_CVT]] to %[[E_I_CVT]] step %[[ST_I]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do i=1,5 ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref ! CHECK: %[[S_J:.*]] = arith.constant 1 : i32 @@ -65,9 +64,9 @@ subroutine nested_loop ! CHECK: %[[E_J_CVT:.*]] = fir.convert %[[E_J]] : (i32) -> index ! CHECK: %[[ST_J:.*]] = arith.constant 1 : index ! CHECK: %[[J_LB:.*]] = fir.convert %[[S_J_CVT]] : (index) -> i32 - ! CHECK: %[[J_RES:.*]]:2 = fir.do_loop %[[LJ:[^ ]*]] = + ! CHECK: %[[J_RES:.*]] = fir.do_loop %[[LJ:[^ ]*]] = ! CHECK-SAME: %[[S_J_CVT]] to %[[E_J_CVT]] step %[[ST_J]] - ! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (i32) { do j=1,5 ! CHECK: fir.store %[[J_IV]] to %[[J_REF]] : !fir.ref ! CHECK: %[[ASUM:.*]] = fir.load %[[ASUM_REF]] : !fir.ref @@ -84,22 +83,20 @@ subroutine nested_loop ! CHECK: %[[ASUM_NEW:.*]] = arith.addi %[[ASUM]], %[[ARR_VAL]] : i32 ! CHECK: fir.store %[[ASUM_NEW]] to %[[ASUM_REF]] : !fir.ref asum = asum + arr(i,j) - ! CHECK: %[[LJ_NEXT:.*]] = arith.addi %[[LJ]], %[[ST_J]] overflow : index ! CHECK: %[[J_STEPCAST:.*]] = fir.convert %[[ST_J]] : (index) -> i32 ! CHECK: %[[J_IVLOAD:.*]] = fir.load %[[J_REF]] : !fir.ref ! CHECK: %[[J_IVINC:.*]] = arith.addi %[[J_IVLOAD]], %[[J_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LJ_NEXT]], %[[J_IVINC]] : index, i32 + ! CHECK: fir.result %[[J_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[J_RES]]#1 to %[[J_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_I]] overflow : index + ! CHECK: fir.store %[[J_RES]] to %[[J_REF]] : !fir.ref ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_I]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test a downcounting loop @@ -115,19 +112,18 @@ subroutine down_counting_loop() ! CHECK: %[[CMINUS1:.*]] = arith.constant -1 : i32 ! CHECK: %[[CMINUS1_STEP_CVT:.*]] = fir.convert %[[CMINUS1]] : (i32) -> index ! CHECK: %[[I_LB:.*]] = fir.convert %[[C5_CVT]] : (index) -> i32 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[C5_CVT]] to %[[C1_CVT]] step %[[CMINUS1_STEP_CVT]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do i=5,1,-1 ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[CMINUS1_STEP_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[CMINUS1_STEP_CVT]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test a general loop with a variable step @@ -143,19 +139,18 @@ subroutine loop_with_variable_step(s,e,st) ! CHECK: %[[ST:.*]] = fir.load %[[ST_REF]] : !fir.ref ! CHECK: %[[ST_CVT:.*]] = fir.convert %[[ST]] : (i32) -> index ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_CVT]] : (index) -> i32 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_CVT]] to %[[E_CVT]] step %[[ST_CVT]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do i=s,e,st ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_CVT]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test usage of pointer variables as index, start, end and step variables @@ -195,19 +190,18 @@ subroutine loop_with_pointer_variables(s,e,st) ! CHECK: %[[ST:.*]] = fir.load %[[ST_PTR]] : !fir.ptr ! CHECK: %[[ST_CVT:.*]] = fir.convert %[[ST]] : (i32) -> index ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_CVT]] : (index) -> i32 -! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = +! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_CVT]] to %[[E_CVT]] step %[[ST_CVT]] -! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { +! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do iptr=sptr,eptr,stptr ! CHECK: fir.store %[[I_IV]] to %[[I_PTR]] : !fir.ptr -! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_CVT]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_PTR]] : !fir.ptr ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 -! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 +! CHECK: fir.result %[[I_IVINC]] : i32 end do ! CHECK: } -! CHECK: fir.store %[[I_RES]]#1 to %[[I_PTR]] : !fir.ptr +! CHECK: fir.store %[[I_RES]] to %[[I_PTR]] : !fir.ptr end subroutine ! Test usage of non-default integer kind for loop control and loop index variable @@ -225,19 +219,18 @@ subroutine loop_with_non_default_integer(s,e,st) integer(kind=8) :: s, e, st ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_CVT]] : (index) -> i64 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_CVT]] to %[[E_CVT]] step %[[ST_CVT]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i64) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i64) { do i=s,e,st ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_CVT]] : (index) -> i64 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i64 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i64 + ! CHECK: fir.result %[[I_IVINC]] : i64 end do ! CHECK: } - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test real loop control. diff --git a/flang/test/Lower/do_loop_unstructured.f90 b/flang/test/Lower/do_loop_unstructured.f90 index 176ea5ccee129..3b03850b43bb2 100644 --- a/flang/test/Lower/do_loop_unstructured.f90 +++ b/flang/test/Lower/do_loop_unstructured.f90 @@ -215,9 +215,8 @@ subroutine nested_structured_in_unstructured() ! CHECK: ^[[BODY]]: ! CHECK: %{{.*}} = fir.do_loop %[[J_INDEX:[^ ]*]] = ! CHECK-SAME: %{{.*}} to %{{.*}} step %[[ST:[^ ]*]] -! CHECK-SAME: iter_args(%[[J_IV:.*]] = %{{.*}}) -> (index, i32) { +! CHECK-SAME: iter_args(%[[J_IV:.*]] = %{{.*}}) -> (i32) { ! CHECK: fir.store %[[J_IV]] to %[[LOOP_VAR_J_REF]] : !fir.ref -! CHECK: %[[J_INDEX_NEXT:.*]] = arith.addi %[[J_INDEX]], %[[ST]] overflow : index ! CHECK: %[[LOOP_VAR_J:.*]] = fir.load %[[LOOP_VAR_J_REF]] : !fir.ref ! CHECK: %[[LOOP_VAR_J_NEXT:.*]] = arith.addi %[[LOOP_VAR_J]], %{{[^ ]*}} overflow : i32 ! CHECK: } diff --git a/flang/test/Lower/infinite_loop.f90 b/flang/test/Lower/infinite_loop.f90 index de0bee779c5b6..18309777e2928 100644 --- a/flang/test/Lower/infinite_loop.f90 +++ b/flang/test/Lower/infinite_loop.f90 @@ -94,17 +94,16 @@ subroutine structured_loop_in_infinite(i) ! CHECK: %[[C10_INDEX:.*]] = fir.convert %[[C10]] : (i32) -> index ! CHECK: %[[C1_1:.*]] = arith.constant 1 : index ! CHECK: %[[J_LB:.*]] = fir.convert %[[C1_INDEX]] : (index) -> i32 -! CHECK: %[[J_FINAL:.*]]:2 = fir.do_loop %[[J:[^ ]*]] = +! CHECK: %[[J_FINAL:.*]] = fir.do_loop %[[J:[^ ]*]] = ! CHECK-SAME: %[[C1_INDEX]] to %[[C10_INDEX]] step %[[C1_1]] -! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (index, i32) { +! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (i32) { ! CHECK: fir.store %[[J_IV]] to %[[J_REF]] : !fir.ref -! CHECK: %[[J_NEXT:.*]] = arith.addi %[[J]], %[[C1_1]] overflow : index ! CHECK: %[[J_STEPCAST:.*]] = fir.convert %[[C1_1]] : (index) -> i32 ! CHECK: %[[J_IVLOAD:.*]] = fir.load %[[J_REF]] : !fir.ref ! CHECK: %[[J_IVINC:.*]] = arith.addi %[[J_IVLOAD]], %[[J_STEPCAST]] overflow : i32 -! CHECK: fir.result %[[J_NEXT]], %[[J_IVINC]] : index, i32 +! CHECK: fir.result %[[J_IVINC]] : i32 ! CHECK: } -! CHECK: fir.store %[[J_FINAL]]#1 to %[[J_REF]] : !fir.ref +! CHECK: fir.store %[[J_FINAL]] to %[[J_REF]] : !fir.ref ! CHECK: cf.br ^[[BODY1]] ! CHECK: ^[[RETURN]]: ! CHECK: return diff --git a/flang/test/Lower/io-implied-do-fixes.f90 b/flang/test/Lower/io-implied-do-fixes.f90 index cd4fd43e05194..91e8cbc9868ff 100644 --- a/flang/test/Lower/io-implied-do-fixes.f90 +++ b/flang/test/Lower/io-implied-do-fixes.f90 @@ -10,8 +10,7 @@ ! CHECK: %[[J_VAL_FINAL:.*]] = fir.do_loop %[[J_VAL:.*]] = %{{.*}} to %{{.*}} step %{{.*}} -> index { ! CHECK: %[[J_VAL_CVT1:.*]] = fir.convert %[[J_VAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT1]] to %[[J_ADDR]] : !fir.ptr -! CHECK: %[[J_VAL_NEXT:.*]] = arith.addi %[[J_VAL]], %{{[^ ]*}} overflow : index -! CHECK: fir.result %[[J_VAL_NEXT]] : index +! CHECK: fir.result %[[J_VAL]] : index ! CHECK: } ! CHECK: %[[J_VAL_CVT2:.*]] = fir.convert %[[J_VAL_FINAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT2]] to %[[J_ADDR]] : !fir.ptr @@ -28,8 +27,7 @@ subroutine ido1 ! CHECK: %[[J_VAL_FINAL:.*]] = fir.do_loop %[[J_VAL:.*]] = %{{.*}} to %{{.*}} step %{{.*}} -> index { ! CHECK: %[[J_VAL_CVT1:.*]] = fir.convert %[[J_VAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT1]] to %[[J_ADDR]] : !fir.heap -! CHECK: %[[J_VAL_NEXT:.*]] = arith.addi %[[J_VAL]], %{{[^ ]*}} overflow : index -! CHECK: fir.result %[[J_VAL_NEXT]] : index +! CHECK: fir.result %[[J_VAL]] : index ! CHECK: } ! CHECK: %[[J_VAL_CVT2:.*]] = fir.convert %[[J_VAL_FINAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT2]] to %[[J_ADDR]] : !fir.heap diff --git a/flang/test/Lower/loops.f90 b/flang/test/Lower/loops.f90 index 64f14ff972272..2fea84b03891a 100644 --- a/flang/test/Lower/loops.f90 +++ b/flang/test/Lower/loops.f90 @@ -31,7 +31,7 @@ subroutine loop_test a(i,j,k) = a(i,j,k) + 1 enddo - ! CHECK-COUNT-3: fir.do_loop {{[^un]*}} -> (index, i32) + ! CHECK-COUNT-3: fir.do_loop {{[^un]*}} -> (i32) asum = 0 do i=1,5 do j=1,5 @@ -120,7 +120,7 @@ subroutine lis(n) ! CHECK: %[[V_95:[0-9]+]] = fir.alloca !fir.array, %{{.*}}, %{{.*}} {bindc_name = "t", pinned, uniq_name = "_QFlisEt"} ! CHECK: %[[V_96:[0-9]+]] = fir.alloca !fir.box>> {bindc_name = "p", pinned, uniq_name = "_QFlisEp"} ! CHECK: fir.store %{{.*}} to %[[V_96]] : !fir.ref>>> - ! CHECK: fir.do_loop %arg3 = %{{.*}} to %{{.*}} step %c1{{.*}} iter_args(%arg4 = %{{.*}}) -> (index, i32) { + ! CHECK: fir.do_loop %arg3 = %{{.*}} to %{{.*}} step %c1{{.*}} iter_args(%arg4 = %{{.*}}) -> (i32) { ! CHECK: fir.do_concurrent { ! CHECK: fir.alloca i32 {bindc_name = "m"} ! CHECK: fir.do_concurrent.loop (%{{.*}}) = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) { diff --git a/flang/test/Lower/loops2.f90 b/flang/test/Lower/loops2.f90 index 60a6bf6c53119..cdd5c986bce71 100644 --- a/flang/test/Lower/loops2.f90 +++ b/flang/test/Lower/loops2.f90 @@ -15,10 +15,10 @@ subroutine test_pointer() ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMtest_loop_varEi_pointer) : !fir.ref>> ! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>> ! CHECK: %[[VAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.box>) -> !fir.ptr -! CHECK: %[[VAL_9:.*]]:2 = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) +! CHECK: %[[VAL_9:.*]] = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) ! CHECK: fir.store %[[IV]] to %[[VAL_2]] : !fir.ptr ! CHECK: } -! CHECK: fir.store %[[VAL_9]]#1 to %[[VAL_2]] : !fir.ptr +! CHECK: fir.store %[[VAL_9]] to %[[VAL_2]] : !fir.ptr end subroutine ! CHECK-LABEL: func @_QMtest_loop_varPtest_allocatable @@ -28,10 +28,10 @@ subroutine test_allocatable() ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMtest_loop_varEi_allocatable) : !fir.ref>> ! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>> ! CHECK: %[[VAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.box>) -> !fir.heap -! CHECK: %[[VAL_9:.*]]:2 = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) +! CHECK: %[[VAL_9:.*]] = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) ! CHECK: fir.store %[[IV]] to %[[VAL_2]] : !fir.heap ! CHECK: } -! CHECK: fir.store %[[VAL_9]]#1 to %[[VAL_2]] : !fir.heap +! CHECK: fir.store %[[VAL_9]] to %[[VAL_2]] : !fir.heap end subroutine ! CHECK-LABEL: func @_QMtest_loop_varPtest_real_pointer diff --git a/flang/test/Lower/mixed_loops.f90 b/flang/test/Lower/mixed_loops.f90 index 991fd7aa82bb9..b0e1114b8dcf1 100644 --- a/flang/test/Lower/mixed_loops.f90 +++ b/flang/test/Lower/mixed_loops.f90 @@ -92,23 +92,22 @@ subroutine do_inside_while_loop ! CHECK-DAG: %[[C13:.*]] = fir.convert %[[C13_I32]] : (i32) -> index ! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index ! CHECK: %[[I_LB:.*]] = fir.convert %[[C8]] : (index) -> i32 - ! CHECK: %[[RESULT:.*]]:2 = fir.do_loop %[[IDX:[^ ]*]] = + ! CHECK: %[[RESULT:.*]] = fir.do_loop %[[IDX:[^ ]*]] = ! CHECK-SAME: %[[C8]] to %[[C13]] step %[[C1]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref ! CHECK-DAG: %[[J2:.*]] = fir.load %[[J_REF]] : !fir.ref ! CHECK-DAG: %[[C2:.*]] = arith.constant 2 : i32 ! CHECK: %[[JINC:.*]] = arith.muli %[[C2]], %[[J2]] : i32 ! CHECK: fir.store %[[JINC]] to %[[J_REF]] : !fir.ref - ! CHECK: %[[IINC:.*]] = arith.addi %[[IDX]], %[[C1]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[C1]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[IINC]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 do i=8,13 j=j*2 - ! CHECK: fir.store %[[RESULT]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[RESULT]] to %[[I_REF]] : !fir.ref end do ! CHECK: br ^[[HDR1]] diff --git a/flang/test/Lower/nsw.f90 b/flang/test/Lower/nsw.f90 index 2ec1efb2af42a..e113c26a9dc80 100644 --- a/flang/test/Lower/nsw.f90 +++ b/flang/test/Lower/nsw.f90 @@ -84,7 +84,7 @@ subroutine loop_params(a,lb,ub,st) ! CHECK: %[[VAL_30:.*]] = arith.muli %[[VAL_29]], %[[VAL_4]] overflow : i32 ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_30]] : (i32) -> index ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_26]] : (index) -> i32 -! CHECK: %[[VAL_33:.*]]:2 = fir.do_loop %[[VAL_34:.*]] = %[[VAL_26]] to %[[VAL_28]] step %[[VAL_31]] iter_args(%[[VAL_35:.*]] = %[[VAL_32]]) -> (index, i32) { +! CHECK: %[[VAL_33:.*]] = fir.do_loop %[[VAL_34:.*]] = %[[VAL_26]] to %[[VAL_28]] step %[[VAL_31]] iter_args(%[[VAL_35:.*]] = %[[VAL_32]]) -> (i32) { subroutine loop_params2(a,lb,ub,st) integer :: i, lb, ub, st diff --git a/flang/test/Parser/OpenMP/assumption.f90 b/flang/test/Parser/OpenMP/assumption.f90 index 0f333f99f9085..86cbad9e42f78 100644 --- a/flang/test/Parser/OpenMP/assumption.f90 +++ b/flang/test/Parser/OpenMP/assumption.f90 @@ -141,9 +141,11 @@ program p end program p !UNPARSE: PROGRAM p -!UNPARSE: !$OMP ASSUMES NO_OPENMP +!UNPARSE: !$OMP ASSUMES NO_OPENMP !UNPARSE: END PROGRAM p -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclarativeAssumes -!PARSE-TREE: | Verbatim +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclarativeAssumes -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = assumes !PARSE-TREE: | OmpClauseList -> OmpClause -> NoOpenmp +!PARSE-TREE: | Flags = None +!PARSE-TREE: ImplicitPart -> diff --git a/flang/test/Parser/OpenMP/declare-mapper-unparse.f90 b/flang/test/Parser/OpenMP/declare-mapper-unparse.f90 index 30d75d02736f3..b53bf5ce10557 100644 --- a/flang/test/Parser/OpenMP/declare-mapper-unparse.f90 +++ b/flang/test/Parser/OpenMP/declare-mapper-unparse.f90 @@ -9,7 +9,7 @@ program main end type ty -!CHECK: !$OMP DECLARE MAPPER (mymapper:ty::mapped) MAP(mapped,mapped%x) +!CHECK: !$OMP DECLARE MAPPER(mymapper:ty::mapped) MAP(mapped,mapped%x) !$omp declare mapper(mymapper : ty :: mapped) map(mapped, mapped%x) !PARSE-TREE: OpenMPDeclareMapperConstruct @@ -24,7 +24,7 @@ program main !PARSE-TREE: DataRef -> Name = 'mapped' !PARSE-TREE: Name = 'x' -!CHECK: !$OMP DECLARE MAPPER (ty::mapped) MAP(mapped,mapped%x) +!CHECK: !$OMP DECLARE MAPPER(ty::mapped) MAP(mapped,mapped%x) !$omp declare mapper(ty :: mapped) map(mapped, mapped%x) !PARSE-TREE: OpenMPDeclareMapperConstruct diff --git a/flang/test/Parser/OpenMP/declare-reduction-multi.f90 b/flang/test/Parser/OpenMP/declare-reduction-multi.f90 index 693e69d8896be..0af3ed6e78571 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-multi.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-multi.f90 @@ -26,111 +26,123 @@ program omp_examples type(tt) :: values(n), sum, prod, big, small !$omp declare reduction(+:tt:omp_out%r = omp_out%r + omp_in%r) initializer(omp_priv%r = 0) -!CHECK: !$OMP DECLARE REDUCTION (+:tt: omp_out%r=omp_out%r+omp_in%r -!CHECK-NEXT: ) INITIALIZER(omp_priv%r=0_4) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: Verbatim -!PARSE-TREE: OmpReductionSpecifier -!PARSE-TREE-NEXT: OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add -!PARSE-TREE: OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec -!PARSE-TREE-NEXT: Name = 'tt' -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out%r=omp_out%r+omp_in%r' -!PARSE-TREE: OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=0._4 +!CHECK: !$OMP DECLARE REDUCTION(+:tt: omp_out%r = omp_out%r+omp_in%r) INITIALIZER(omp_priv%r = 0_4) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec +!PARSE-TREE: | | | Name = 'tt' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out%r=omp_out%r+omp_in%r' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=0._4' + !$omp declare reduction(*:tt:omp_out%r = omp_out%r * omp_in%r) initializer(omp_priv%r = 1) -!CHECK-NEXT: !$OMP DECLARE REDUCTION (*:tt: omp_out%r=omp_out%r*omp_in%r -!CHECK-NEXT: ) INITIALIZER(omp_priv%r=1_4) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: Verbatim -!PARSE-TREE: OmpReductionSpecifier -!PARSE-TREE: OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Multiply -!PARSE-TREE: OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec -!PARSE-TREE-NEXT: Name = 'tt' -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out%r=omp_out%r*omp_in%r' -!PARSE-TREE: OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=1._4' +!CHECK-NEXT: !$OMP DECLARE REDUCTION(*:tt: omp_out%r = omp_out%r*omp_in%r) INITIALIZER(omp_priv%r = 1_4) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Multiply +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec +!PARSE-TREE: | | | Name = 'tt' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out%r=omp_out%r*omp_in%r' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=1._4' + !$omp declare reduction(max:tt:omp_out = mymax(omp_out, omp_in)) initializer(omp_priv%r = 0) -!CHECK-NEXT: !$OMP DECLARE REDUCTION (max:tt: omp_out=mymax(omp_out,omp_in) -!CHECK-NEXT: ) INITIALIZER(omp_priv%r=0_4) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: Verbatim -!PARSE-TREE: OmpReductionSpecifier -!PARSE-TREE: OmpReductionIdentifier -> ProcedureDesignator -> Name = 'max' -!PARSE-TREE: OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec -!PARSE-TREE: Name = 'tt' -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out=mymax(omp_out,omp_in)' -!PARSE-TREE: OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=0._4' +!CHECK-NEXT: !$OMP DECLARE REDUCTION(max:tt: omp_out = mymax(omp_out,omp_in)) INITIALIZER(omp_priv%r = 0_4) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'max' +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec +!PARSE-TREE: | | | Name = 'tt' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out=mymax(omp_out,omp_in)' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=0._4' + !$omp declare reduction(min:tt:omp_out%r = min(omp_out%r, omp_in%r)) initializer(omp_priv%r = 1) -!CHECK-NEXT: !$OMP DECLARE REDUCTION (min:tt: omp_out%r=min(omp_out%r,omp_in%r) -!CHECK-NEXT: ) INITIALIZER(omp_priv%r=1_4) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: Verbatim -!PARSE-TREE: OmpReductionSpecifier -!PARSE-TREE: OmpReductionIdentifier -> ProcedureDesignator -> Name = 'min' -!PARSE-TREE: OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec -!PARSE-TREE: Name = 'tt' -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out%r=min(omp_out%r,omp_in%r)' -!PARSE-TREE: OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=1._4' +!CHECK-NEXT: !$OMP DECLARE REDUCTION(min:tt: omp_out%r = min(omp_out%r,omp_in%r)) INITIALIZER(omp_priv%r = 1_4) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'min' +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec +!PARSE-TREE: | | | Name = 'tt' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out%r=min(omp_out%r,omp_in%r)' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv%r=1._4' + call random_number(values%r) sum%r = 0 !$omp parallel do reduction(+:sum) -!CHECK: !$OMP PARALLEL DO REDUCTION(+: sum) +!CHECK: !$OMP PARALLEL DO REDUCTION(+: sum) + !PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct -!PARSE-TREE: OmpBeginLoopDirective -!PARSE-TREE: OmpDirectiveName -> llvm::omp::Directive = parallel do -!PARSE-TREE: OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause -!PARSE-TREE: Modifier -> OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add -!PARSE-TREE: OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'sum -!PARSE-TREE: Flags = None -!PARSE-TREE: DoConstruct +!PARSE-TREE: | OmpBeginLoopDirective +!PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = parallel do +!PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause +!PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add +!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'sum' +!PARSE-TREE: | | Flags = None +!PARSE-TREE: | DoConstruct + do i = 1, n sum%r = sum%r + values(i)%r end do prod%r = 1 !$omp parallel do reduction(*:prod) -!CHECK: !$OMP PARALLEL DO REDUCTION(*: prod) -!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct -!PARSE-TREE: OmpBeginLoopDirective -!PARSE-TREE: OmpDirectiveName -> llvm::omp::Directive = parallel do -!PARSE-TREE: OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause -!PARSE-TREE: Modifier -> OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Multiply -!PARSE-TREE: OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'prod' -!PARSE-TREE: Flags = None -!PARSE-TREE: DoConstruct +!CHECK: !$OMP PARALLEL DO REDUCTION(*: prod) + +!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct +!PARSE-TREE: | OmpBeginLoopDirective +!PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = parallel do +!PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause +!PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Multiply +!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'prod' +!PARSE-TREE: | | Flags = None +!PARSE-TREE: | DoConstruct + do i = 1, n prod%r = prod%r * (values(i)%r+0.6) end do big%r = 0 !$omp parallel do reduction(max:big) -!CHECK: $OMP PARALLEL DO REDUCTION(max: big) +!CHECK: $OMP PARALLEL DO REDUCTION(max: big) + !PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct -!PARSE-TREE: OmpBeginLoopDirective -!PARSE-TREE: OmpDirectiveName -> llvm::omp::Directive = parallel do -!PARSE-TREE: OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause -!PARSE-TREE: Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'max' -!PARSE-TREE: OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'big' -!PARSE-TREE: Flags = None -!PARSE-TREE: DoConstruct +!PARSE-TREE: | OmpBeginLoopDirective +!PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = parallel do +!PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause +!PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'max' +!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'big' +!PARSE-TREE: | | Flags = None +!PARSE-TREE: | DoConstruct + do i = 1, n big = mymax(values(i), big) end do small%r = 1 !$omp parallel do reduction(min:small) -!CHECK: !$OMP PARALLEL DO REDUCTION(min: small) -!CHECK-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct -!CHECK-TREE: OmpBeginLoopDirective -!CHECK-TREE: OmpDirectiveName -> llvm::omp::Directive = parallel do -!CHECK-TREE: OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause -!CHECK-TREE: Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'min' -!CHECK-TREE: OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'small' -!PARSE-TREE: Flags = None -!CHECK-TREE: DoConstruct +!CHECK: !$OMP PARALLEL DO REDUCTION(min: small) + +!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct +!PARSE-TREE: | OmpBeginLoopDirective +!PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = parallel do +!PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause +!PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'min' +!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'small' +!PARSE-TREE: | | Flags = None +!PARSE-TREE: | DoConstruct + do i = 1, n small%r = min(values(i)%r, small%r) end do - + print *, values%r print *, "sum=", sum%r print *, "prod=", prod%r diff --git a/flang/test/Parser/OpenMP/declare-reduction-operator.f90 b/flang/test/Parser/OpenMP/declare-reduction-operator.f90 index 7bfb78115b10d..347588468617b 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-operator.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-operator.f90 @@ -16,27 +16,31 @@ subroutine reduce_1 ( n, tts ) type(tt) :: tts(n) type(tt2) :: tts2(n) -!CHECK: !$OMP DECLARE REDUCTION (+:tt: omp_out=tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y) -!CHECK: ) INITIALIZER(omp_priv=tt(x=0_4,y=0_4)) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: Verbatim -!PARSE-TREE: OmpReductionSpecifier -!PARSE-TREE: OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out=tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' -!PARSE-TREE: OmpInitializerClause -> AssignmentStmt = 'omp_priv=tt(x=0_4,y=0_4)' - +!CHECK: !$OMP DECLARE REDUCTION(+:tt: omp_out = tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)) INITIALIZER(omp_priv = tt(x=0_4,y=0_4)) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec +!PARSE-TREE: | | | Name = 'tt' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out=tt(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv=tt(x=0_4,y=0_4)' + !$omp declare reduction(+ : tt : omp_out = tt(omp_out%x - omp_in%x , omp_out%y - omp_in%y)) initializer(omp_priv = tt(0,0)) -!CHECK: !$OMP DECLARE REDUCTION (+:tt2: omp_out=tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y) -!CHECK: ) INITIALIZER(omp_priv=tt2(x=0._8,y=0._8) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: Verbatim -!PARSE-TREE: OmpReductionSpecifier -!PARSE-TREE: OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out=tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' -!PARSE-TREE: OmpInitializerClause -> AssignmentStmt = 'omp_priv=tt2(x=0._8,y=0._8)' - +!CHECK: !$OMP DECLARE REDUCTION(+:tt2: omp_out = tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)) INITIALIZER(omp_priv = tt2(x=0._8,y=0._8) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec +!PARSE-TREE: | | | Name = 'tt2' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out=tt2(x=omp_out%x-omp_in%x,y=omp_out%y-omp_in%y)' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv=tt2(x=0._8,y=0._8)' + !$omp declare reduction(+ :tt2 : omp_out = tt2(omp_out%x - omp_in%x , omp_out%y - omp_in%y)) initializer(omp_priv = tt2(0,0)) type(tt) :: diffp = tt( 0, 0 ) diff --git a/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 b/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 index fbcd5b62821a3..455fc17871ad3 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-unparse-with-symbols.f90 @@ -8,6 +8,6 @@ subroutine f00 !CHECK: !DEF: /f00 (Subroutine) Subprogram !CHECK: subroutine f00 -!CHECK: !$omp declare reduction (fred:integer,real:omp_out = omp_in+omp_out) +!CHECK: !$omp declare reduction(fred:integer,real: omp_out = omp_in+omp_out) !CHECK: end subroutine diff --git a/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 b/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 index 0ed693e5821d6..7514f0cf83877 100644 --- a/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 +++ b/flang/test/Parser/OpenMP/declare-reduction-unparse.f90 @@ -18,21 +18,37 @@ subroutine initme(x,n) integer x,n end subroutine initme end interface -!CHECK: !$OMP DECLARE REDUCTION (red_add:INTEGER(KIND=4_4): omp_out=omp_out+omp_in -!CHECK: ) INITIALIZER(initme(omp_priv, 0_4)) !$omp declare reduction(red_add:integer(4):omp_out=omp_out+omp_in) initializer(initme(omp_priv,0)) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out=omp_out+omp_in' -!PARSE-TREE: OmpInitializerClause -> OmpInitializerProc -!PARSE-TREE-NEXT: ProcedureDesignator -> Name = 'initme' +!CHECK: !$OMP DECLARE REDUCTION(red_add:INTEGER(KIND=4_4): omp_out = omp_out+omp_in) INITIALIZER(initme(omp_priv, 0_4)) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'red_add' +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> DeclarationTypeSpec -> IntrinsicTypeSpec -> IntegerTypeSpec -> KindSelector -> Scalar -> Integer -> Constant -> Expr = '4_4' +!PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '4' +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out=omp_out+omp_in' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> OmpInitializerProc +!PARSE-TREE: | | ProcedureDesignator -> Name = 'initme' + res=init !$omp simd reduction(red_add:res) !CHECK: !$OMP SIMD REDUCTION(red_add: res) + +!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> AssignmentStmt = 'res=init' +!PARSE-TREE: | Variable = 'res' +!PARSE-TREE: | | Designator -> DataRef -> Name = 'res' +!PARSE-TREE: | Expr = 'init' +!PARSE-TREE: | | Designator -> DataRef -> Name = 'init' !PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPLoopConstruct -!PARSE-TREE: OmpBeginLoopDirective -!PARSE-TREE: OmpDirectiveName -> llvm::omp::Directive = simd -!PARSE-TREE: OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause -!PARSE-TREE: Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'red_add +!PARSE-TREE: | OmpBeginLoopDirective +!PARSE-TREE: | | OmpDirectiveName -> llvm::omp::Directive = simd +!PARSE-TREE: | | OmpClauseList -> OmpClause -> Reduction -> OmpReductionClause +!PARSE-TREE: | | | Modifier -> OmpReductionIdentifier -> ProcedureDesignator -> Name = 'red_add' +!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'res' +!PARSE-TREE: | | Flags = None +!PARSE-TREE: | DoConstruct + do i=1,n res=res+x(i) enddo @@ -43,8 +59,7 @@ end function func !CHECK-LABEL: program main program main integer :: my_var -!CHECK: !$OMP DECLARE REDUCTION (my_add_red:INTEGER: omp_out=omp_out+omp_in -!CHECK-NEXT: ) INITIALIZER(omp_priv=0_4) +!CHECK: !$OMP DECLARE REDUCTION(my_add_red:INTEGER: omp_out = omp_out+omp_in) INITIALIZER(omp_priv = 0_4) !$omp declare reduction (my_add_red : integer : omp_out = omp_out + omp_in) initializer (omp_priv=0) my_var = 0 @@ -54,8 +69,10 @@ program main print *, "sum of thread numbers is ", my_var end program main -!PARSE-TREE: OpenMPDeclareReductionConstruct -!PARSE-TREE: OmpReductionIdentifier -> ProcedureDesignator -> Name = 'my_add_red' -!PARSE-TREE: DeclarationTypeSpec -> IntrinsicTypeSpec -> IntegerTypeSpec -!PARSE-TREE: OmpReductionCombiner -> AssignmentStmt = 'omp_out=omp_out+omp_in' -!PARSE-TREE: OmpInitializerClause -> AssignmentStmt = 'omp_priv=0_4' +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier +!PARSE-TREE: | | OmpReductionIdentifier -> ProcedureDesignator -> Name = 'my_add_red' +!PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> DeclarationTypeSpec -> IntrinsicTypeSpec -> IntegerTypeSpec -> +!PARSE-TREE: | | OmpReductionCombiner -> AssignmentStmt = 'omp_out=omp_out+omp_in' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Initializer -> OmpInitializerClause -> AssignmentStmt = 'omp_priv=0_4' diff --git a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 index 916bd66017ce0..16dc4eb44e6fd 100644 --- a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 +++ b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 @@ -1,5 +1,3 @@ -! REQUIRES: openmp_runtime - ! RUN: %flang_fc1 %openmp_flags -fopenmp-version=52 -fdebug-dump-parse-tree %s | FileCheck %s ! RUN: %flang_fc1 %openmp_flags -fdebug-unparse -fopenmp-version=52 %s | FileCheck %s --check-prefix="UNPARSE" @@ -15,11 +13,14 @@ function func() result(i) contains function func1() result(i) !$omp declare target enter(func1) indirect(.true.) - !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpEnterClause - !CHECK-NEXT: | | | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func1' - !CHECK-NEXT: | | | | | OmpClause -> Indirect -> OmpIndirectClause -> Scalar -> Logical -> Expr = '.true._4' - !CHECK-NEXT: | | | | | | LiteralConstant -> LogicalLiteralConstant - !CHECK-NEXT: | | | | | | | bool = 'true' + !CHECK: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification + !CHECK-NEXT: | OmpDirectiveName -> llvm::omp::Directive = declare target + !CHECK-NEXT: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause + !CHECK-NEXT: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func1' + !CHECK-NEXT: | OmpClause -> Indirect -> OmpIndirectClause -> Scalar -> Logical -> Expr = '.true._4' + !CHECK-NEXT: | | LiteralConstant -> LogicalLiteralConstant + !CHECK-NEXT: | | | bool = 'true' + !CHECK-NEXT: | Flags = None character(1) :: i i = 'a' return @@ -27,9 +28,12 @@ function func1() result(i) function func2() result(i) !$omp declare target enter(func2) indirect - !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpEnterClause - !CHECK-NEXT: | | | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2' - !CHECK-NEXT: | | | | | OmpClause -> Indirect -> OmpIndirectClause -> + !CHECK: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification + !CHECK-NEXT: | OmpDirectiveName -> llvm::omp::Directive = declare target + !CHECK-NEXT: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause + !CHECK-NEXT: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2' + !CHECK-NEXT: | OmpClause -> Indirect -> OmpIndirectClause -> + !CHECK-NEXT: | Flags = None character(1) :: i i = 'b' return @@ -51,5 +55,5 @@ program main end program -!UNPARSE: !$OMP DECLARE TARGET ENTER(func1) INDIRECT(.true._4) -!UNPARSE: !$OMP DECLARE TARGET ENTER(func2) INDIRECT() +!UNPARSE: !$OMP DECLARE TARGET ENTER(func1) INDIRECT(.true._4) +!UNPARSE: !$OMP DECLARE TARGET ENTER(func2) INDIRECT() diff --git a/flang/test/Parser/OpenMP/declare-target-to-clause.f90 b/flang/test/Parser/OpenMP/declare-target-to-clause.f90 index bcb23f821e403..8198f44bcec18 100644 --- a/flang/test/Parser/OpenMP/declare-target-to-clause.f90 +++ b/flang/test/Parser/OpenMP/declare-target-to-clause.f90 @@ -9,11 +9,13 @@ module m !UNPARSE: MODULE m !UNPARSE: INTEGER x, y -!UNPARSE: !$OMP DECLARE TARGET TO(x,y) +!UNPARSE: !$OMP DECLARE TARGET TO(x,y) !UNPARSE: END MODULE -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> To -> OmpToClause -!PARSE-TREE: | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | OmpObject -> Designator -> DataRef -> Name = 'y' -!PARSE-TREE: | bool = 'true' - +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/declare-variant.f90 b/flang/test/Parser/OpenMP/declare-variant.f90 index 3366b143e62e6..f5c34abd84ac7 100644 --- a/flang/test/Parser/OpenMP/declare-variant.f90 +++ b/flang/test/Parser/OpenMP/declare-variant.f90 @@ -2,15 +2,19 @@ ! RUN: %flang_fc1 -fdebug-dump-parse-tree-no-sema -fopenmp %s | FileCheck --check-prefix="PARSE-TREE" %s subroutine sub0 -!CHECK: !$OMP DECLARE VARIANT (sub:vsub) MATCH(CONSTRUCT={PARALLEL}) -!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -!PARSE-TREE: | Verbatim -!PARSE-TREE: | Name = 'sub' -!PARSE-TREE: | Name = 'vsub' +!CHECK: !$OMP DECLARE VARIANT(sub:vsub) MATCH(CONSTRUCT={PARALLEL}) + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare variant +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpBaseVariantNames +!PARSE-TREE: | | OmpObject -> Designator -> DataRef -> Name = 'sub' +!PARSE-TREE: | | OmpObject -> Designator -> DataRef -> Name = 'vsub' !PARSE-TREE: | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct !PARSE-TREE: | | OmpTraitSelector !PARSE-TREE: | | | OmpTraitSelectorName -> llvm::omp::Directive = parallel +!PARSE-TREE: | Flags = None + !$omp declare variant (sub:vsub) match (construct={parallel}) contains subroutine vsub @@ -30,14 +34,17 @@ subroutine vsub (v1) integer, value :: v1 end subroutine sub (v1) -!CHECK: !$OMP DECLARE VARIANT (vsub) MATCH(CONSTRUCT={DISPATCH} -!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -!PARSE-TREE: | Verbatim -!PARSE-TREE: | Name = 'vsub' +!CHECK: !$OMP DECLARE VARIANT(vsub) MATCH(CONSTRUCT={DISPATCH}) + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare variant +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'vsub' !PARSE-TREE: | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct !PARSE-TREE: | | OmpTraitSelector !PARSE-TREE: | | | OmpTraitSelectorName -> llvm::omp::Directive = dispatch +!PARSE-TREE: | Flags = None + !$omp declare variant(vsub), match(construct={dispatch}) integer, value :: v1 end @@ -56,17 +63,20 @@ subroutine vsub (v1, a1, a2) integer(omp_interop_kind), value :: a2 end subroutine sub (v1) -!CHECK: !$OMP DECLARE VARIANT (vsub) MATCH(CONSTRUCT={DISPATCH}) APPEND_ARGS(INTEROP(T& -!CHECK: !$OMP&ARGET),INTEROP(TARGET)) -!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -!PARSE-TREE: | Verbatim -!PARSE-TREE: | Name = 'vsub' +!CHECK: !$OMP DECLARE VARIANT(vsub) MATCH(CONSTRUCT={DISPATCH}) APPEND_ARGS(INTEROP(TA& +!CHECK: !$OMP&RGET),INTEROP(TARGET)) + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare variant +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'vsub' !PARSE-TREE: | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct !PARSE-TREE: | | OmpTraitSelector !PARSE-TREE: | | | OmpTraitSelectorName -> llvm::omp::Directive = dispatch !PARSE-TREE: | OmpClause -> AppendArgs -> OmpAppendArgsClause -> OmpAppendOp -> OmpInteropType -> Value = Target !PARSE-TREE: | OmpAppendOp -> OmpInteropType -> Value = Target +!PARSE-TREE: | Flags = None + !$omp declare variant(vsub), match(construct={dispatch}), append_args (interop(target), interop(target)) integer, value :: v1 end @@ -81,11 +91,12 @@ subroutine sb3 (x1, x2) contains subroutine sub (v1, v2) type(c_ptr), value :: v1, v2 -!CHECK: !$OMP DECLARE VARIANT (vsub) MATCH(CONSTRUCT={DISPATCH}) ADJUST_ARGS(NOTHING:v& -!CHECK: !$OMP&1) ADJUST_ARGS(NEED_DEVICE_PTR:v2) -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -!PARSE-TREE: | Verbatim -!PARSE-TREE: | Name = 'vsub' +!CHECK: !$OMP DECLARE VARIANT(vsub) MATCH(CONSTRUCT={DISPATCH}) ADJUST_ARGS(NOTHING:v1& +!CHECK: !$OMP&) ADJUST_ARGS(NEED_DEVICE_PTR:v2) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare variant +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'vsub' !PARSE-TREE: | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct !PARSE-TREE: | | OmpTraitSelector @@ -96,6 +107,8 @@ subroutine sub (v1, v2) !PARSE-TREE: | OmpClause -> AdjustArgs -> OmpAdjustArgsClause !PARSE-TREE: | | OmpAdjustOp -> Value = Need_Device_Ptr !PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'v2' +!PARSE-TREE: | Flags = None + !$omp declare variant(vsub) match ( construct = { dispatch } ) adjust_args(nothing : v1 ) adjust_args(need_device_ptr : v2) end subroutine vsub(v1, v2) @@ -119,13 +132,15 @@ subroutine f2 (x, y) !$omp declare variant (f1) match (construct={simd(uniform(y))}) end end subroutine -!CHECK: !$OMP DECLARE VARIANT (f1) MATCH(CONSTRUCT={SIMD(UNIFORM(y))}) -!PARSE-TREE: | | | | DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -!PARSE-TREE-NEXT: | | | | | Verbatim -!PARSE-TREE-NEXT: | | | | | Name = 'f1' -!PARSE-TREE-NEXT: | | | | | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector -!PARSE-TREE-NEXT: | | | | | | OmpTraitSetSelectorName -> Value = Construct -!PARSE-TREE-NEXT: | | | | | | OmpTraitSelector -!PARSE-TREE-NEXT: | | | | | | | OmpTraitSelectorName -> Value = Simd -!PARSE-TREE-NEXT: | | | | | | | Properties -!PARSE-TREE-NEXT: | | | | | | | | OmpTraitProperty -> OmpClause -> Uniform -> Name = 'y' +!CHECK: !$OMP DECLARE VARIANT(f1) MATCH(CONSTRUCT={SIMD(UNIFORM(y))}) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare variant +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'f1' +!PARSE-TREE: | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector +!PARSE-TREE: | | OmpTraitSetSelectorName -> Value = Construct +!PARSE-TREE: | | OmpTraitSelector +!PARSE-TREE: | | | OmpTraitSelectorName -> Value = Simd +!PARSE-TREE: | | | Properties +!PARSE-TREE: | | | | OmpTraitProperty -> OmpClause -> Uniform -> Name = 'y' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/declare_target-device_type.f90 b/flang/test/Parser/OpenMP/declare_target-device_type.f90 index b6903614a628e..7df796288f4d4 100644 --- a/flang/test/Parser/OpenMP/declare_target-device_type.f90 +++ b/flang/test/Parser/OpenMP/declare_target-device_type.f90 @@ -3,35 +3,113 @@ subroutine openmp_declare_target integer, save :: x, y - !CHECK: !$omp declare target device_type(host) enter(x) +!CHECK: !$omp declare target device_type(host) enter(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None !$omp declare target device_type(host) enter(x) - !CHECK: !$omp declare target device_type(nohost) enter(x) + +!CHECK: !$omp declare target device_type(nohost) enter(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None !$omp declare target device_type(nohost) enter(x) - !CHECK: !$omp declare target device_type(any) enter(x) + +!CHECK: !$omp declare target device_type(any) enter(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None !$omp declare target device_type(any) enter(x) - !CHECK: !$omp declare target device_type(host) to(x) +!CHECK: !$omp declare target device_type(host) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(host) to(x) - !CHECK: !$omp declare target device_type(nohost) to(x) + +!CHECK: !$omp declare target device_type(nohost) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(nohost) to(x) - !CHECK: !$omp declare target device_type(any) to(x) + +!CHECK: !$omp declare target device_type(any) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(any) to(x) - !CHECK: !$omp declare target device_type(host) enter(y) to(x) +!CHECK: !$omp declare target device_type(host) enter(y) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(host) enter(y) to(x) - !CHECK: !$omp declare target device_type(nohost) enter(y) to(x) + +!CHECK: !$omp declare target device_type(nohost) enter(y) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(nohost) enter(y) to(x) - !CHECK: !$omp declare target device_type(any) enter(y) to(x) + +!CHECK: !$omp declare target device_type(any) enter(y) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(any) enter(y) to(x) + integer :: a(1024), i - !CHECK: do +!CHECK: do do i = 1, 1024 a(i) = i - !CHECK: end do +!CHECK: end do end do -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any END subroutine openmp_declare_target diff --git a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 b/flang/test/Parser/OpenMP/enter-automap-modifier.f90 index 1f361ca5c2f06..bc5b5eb3e7ef3 100644 --- a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 +++ b/flang/test/Parser/OpenMP/enter-automap-modifier.f90 @@ -8,9 +8,12 @@ program automap !UNPARSE: PROGRAM AUTOMAP !UNPARSE: INTEGER x -!UNPARSE: !$OMP DECLARE TARGET ENTER(AUTOMAP: x) +!UNPARSE: !$OMP DECLARE_TARGET ENTER(AUTOMAP: x) !UNPARSE: END PROGRAM -!PARSE-TREE: OmpClauseList -> OmpClause -> Enter -> OmpEnterClause -!PARSE-TREE-NEXT: | Modifier -> OmpAutomapModifier -> Value = Automap -!PARSE-TREE-NEXT: | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | Modifier -> OmpAutomapModifier -> Value = Automap +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/linear-clause.f90 b/flang/test/Parser/OpenMP/linear-clause.f90 index 5ea31ce58fc5a..b53dfe5f941a3 100644 --- a/flang/test/Parser/OpenMP/linear-clause.f90 +++ b/flang/test/Parser/OpenMP/linear-clause.f90 @@ -84,18 +84,16 @@ subroutine f03(x) !UNPARSE: SUBROUTINE f03 (x) !UNPARSE: INTEGER x -!UNPARSE: !$OMP DECLARE SIMD LINEAR(x: UVAL) +!UNPARSE: !$OMP DECLARE SIMD LINEAR(x: UVAL) !UNPARSE: END SUBROUTINE -!PARSE-TREE: SpecificationPart -![...] -!PARSE-TREE: | DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -!PARSE-TREE: | | Verbatim -!PARSE-TREE: | | OmpClauseList -> OmpClause -> Linear -> OmpLinearClause -!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | | | Modifier -> OmpLinearModifier -> Value = Uval -!PARSE-TREE: | | | bool = 'true' -!PARSE-TREE: ExecutionPart -> Block +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare simd +!PARSE-TREE: | OmpClauseList -> OmpClause -> Linear -> OmpLinearClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | Modifier -> OmpLinearModifier -> Value = Uval +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None subroutine f04(x) integer :: x @@ -104,17 +102,15 @@ subroutine f04(x) !UNPARSE: SUBROUTINE f04 (x) !UNPARSE: INTEGER x -!UNPARSE: !$OMP DECLARE SIMD LINEAR(x: UVAL, STEP(3_4)) +!UNPARSE: !$OMP DECLARE SIMD LINEAR(x: UVAL, STEP(3_4)) !UNPARSE: END SUBROUTINE -!PARSE-TREE: SpecificationPart -![...] -!PARSE-TREE: | DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -!PARSE-TREE: | | Verbatim -!PARSE-TREE: | | OmpClauseList -> OmpClause -> Linear -> OmpLinearClause -!PARSE-TREE: | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | | | Modifier -> OmpLinearModifier -> Value = Uval -!PARSE-TREE: | | | Modifier -> OmpStepComplexModifier -> Scalar -> Integer -> Expr = '3_4' -!PARSE-TREE: | | | | LiteralConstant -> IntLiteralConstant = '3' -!PARSE-TREE: | | | bool = 'true' -!PARSE-TREE: ExecutionPart -> Block +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare simd +!PARSE-TREE: | OmpClauseList -> OmpClause -> Linear -> OmpLinearClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | Modifier -> OmpLinearModifier -> Value = Uval +!PARSE-TREE: | | Modifier -> OmpStepComplexModifier -> Scalar -> Integer -> Expr = '3_4' +!PARSE-TREE: | | | LiteralConstant -> IntLiteralConstant = '3' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/metadirective-dirspec.f90 b/flang/test/Parser/OpenMP/metadirective-dirspec.f90 index 84064a0353678..baf969317c18f 100644 --- a/flang/test/Parser/OpenMP/metadirective-dirspec.f90 +++ b/flang/test/Parser/OpenMP/metadirective-dirspec.f90 @@ -105,8 +105,8 @@ subroutine f03 !UNPARSE: TYPE :: tt2 !UNPARSE: REAL :: x !UNPARSE: END TYPE -!UNPARSE: !$OMP METADIRECTIVE WHEN(USER={CONDITION(.true._4)}: DECLARE REDUCTION(+:tt1,tt2: omp_out%x=omp_in%x+omp_out%x -!UNPARSE: )) +!UNPARSE: !$OMP METADIRECTIVE WHEN(USER={CONDITION(.true._4)}: DECLARE REDUCTION(+:tt1,tt2: omp_out%x = omp_in%x+omp_out%x)& +!UNPARSE: !$OMP&) !UNPARSE: END SUBROUTINE !PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpMetadirectiveDirective diff --git a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 index c2498c878f559..f4cdd556bd4e5 100644 --- a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 +++ b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 @@ -51,12 +51,12 @@ subroutine f01 !UNPARSE: TYPE :: t !UNPARSE: INTEGER :: x !UNPARSE: END TYPE -!UNPARSE: !$OMP DECLARE MAPPER (t::v) MAP(v%x) +!UNPARSE: !$OMP DECLARE_MAPPER(t::v) MAP(v%x) !UNPARSE: END SUBROUTINE -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareMapperConstruct -!PARSE-TREE: | Verbatim -!PARSE-TREE: | OmpMapperSpecifier +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareMapperConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare mapper +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpMapperSpecifier !PARSE-TREE: | | string = 't.omp.default.mapper' !PARSE-TREE: | | TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 't' @@ -66,6 +66,7 @@ subroutine f01 !PARSE-TREE: | | | DataRef -> Name = 'v' !PARSE-TREE: | | | Name = 'x' !PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None subroutine f02 type :: t @@ -78,13 +79,12 @@ subroutine f02 !UNPARSE: TYPE :: t !UNPARSE: INTEGER :: x !UNPARSE: END TYPE -!UNPARSE: !$OMP DECLARE REDUCTION (+:t: omp_out%x=omp_out%x+omp_in%x -!UNPARSE: ) +!UNPARSE: !$OMP DECLARE_REDUCTION(+:t: omp_out%x = omp_out%x+omp_in%x) !UNPARSE: END SUBROUTINE -!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -!PARSE-TREE: | Verbatim -!PARSE-TREE: | OmpReductionSpecifier +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareReductionConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare reduction +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpReductionSpecifier !PARSE-TREE: | | OmpReductionIdentifier -> DefinedOperator -> IntrinsicOperator = Add !PARSE-TREE: | | OmpTypeNameList -> OmpTypeSpecifier -> TypeSpec -> DerivedTypeSpec !PARSE-TREE: | | | Name = 't' @@ -104,30 +104,33 @@ subroutine f02 !PARSE-TREE: | | | | | | | DataRef -> Name = 'omp_in' !PARSE-TREE: | | | | | | | Name = 'x' !PARSE-TREE: | OmpClauseList -> +!PARSE-TREE: | Flags = None subroutine f03 !$omp declare_simd end !UNPARSE: SUBROUTINE f03 -!UNPARSE: !$OMP DECLARE SIMD +!UNPARSE: !$OMP DECLARE_SIMD !UNPARSE: END SUBROUTINE -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -!PARSE-TREE: | Verbatim +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareSimdConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare simd !PARSE-TREE: | OmpClauseList -> +!PARSE-TREE: | Flags = None subroutine f04 !$omp declare_target end !UNPARSE: SUBROUTINE f04 -!UNPARSE: !$OMP DECLARE TARGET +!UNPARSE: !$OMP DECLARE_TARGET !UNPARSE: END SUBROUTINE -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -!PARSE-TREE: | Verbatim -!PARSE-TREE: | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> +!PARSE-TREE: | Flags = None subroutine f05 implicit none @@ -144,12 +147,12 @@ subroutine g05 !UNPARSE: SUBROUTINE g05 !UNPARSE: END SUBROUTINE !UNPARSE: END INTERFACE -!UNPARSE: !$OMP DECLARE VARIANT (g05) MATCH(USER={CONDITION(.true._4)}) +!UNPARSE: !$OMP DECLARE_VARIANT(g05) MATCH(USER={CONDITION(.true._4)}) !UNPARSE: END SUBROUTINE -!PARSE-TREE: OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -!PARSE-TREE: | Verbatim -!PARSE-TREE: | Name = 'g05' +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OmpDeclareVariantDirective -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare variant +!PARSE-TREE: | OmpArgumentList -> OmpArgument -> OmpLocator -> OmpObject -> Designator -> DataRef -> Name = 'g05' !PARSE-TREE: | OmpClauseList -> OmpClause -> Match -> OmpMatchClause -> OmpContextSelectorSpecification -> OmpTraitSetSelector !PARSE-TREE: | | OmpTraitSetSelectorName -> Value = User !PARSE-TREE: | | OmpTraitSelector @@ -158,6 +161,7 @@ subroutine g05 !PARSE-TREE: | | | | OmpTraitProperty -> Scalar -> Expr = '.true._4' !PARSE-TREE: | | | | | LiteralConstant -> LogicalLiteralConstant !PARSE-TREE: | | | | | | bool = 'true' +!PARSE-TREE: | Flags = None subroutine f06 implicit none diff --git a/flang/test/Parser/OpenMP/requires.f90 b/flang/test/Parser/OpenMP/requires.f90 new file mode 100644 index 0000000000000..6cbb06eaf93c0 --- /dev/null +++ b/flang/test/Parser/OpenMP/requires.f90 @@ -0,0 +1,33 @@ +!RUN: %flang_fc1 -fdebug-unparse -fopenmp -fopenmp-version=50 %s | FileCheck --ignore-case --check-prefix="UNPARSE" %s +!RUN: %flang_fc1 -fdebug-dump-parse-tree -fopenmp -fopenmp-version=50 %s | FileCheck --check-prefix="PARSE-TREE" %s + +!$omp requires atomic_default_mem_order(seq_cst) + +!UNPARSE: !$OMP REQUIRES ATOMIC_DEFAULT_MEM_ORDER(SEQ_CST) + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires +!PARSE-TREE: | OmpClauseList -> OmpClause -> AtomicDefaultMemOrder -> OmpAtomicDefaultMemOrderClause -> OmpMemoryOrderType = Seq_Cst +!PARSE-TREE: | Flags = None + +!$omp requires unified_shared_memory unified_address + +!UNPARSE: !$OMP REQUIRES UNIFIED_SHARED_MEMORY UNIFIED_ADDRESS + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires +!PARSE-TREE: | OmpClauseList -> OmpClause -> UnifiedSharedMemory +!PARSE-TREE: | OmpClause -> UnifiedAddress +!PARSE-TREE: | Flags = None + +!$omp requires dynamic_allocators reverse_offload + +!UNPARSE: !$OMP REQUIRES DYNAMIC_ALLOCATORS REVERSE_OFFLOAD + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires +!PARSE-TREE: | OmpClauseList -> OmpClause -> DynamicAllocators +!PARSE-TREE: | OmpClause -> ReverseOffload +!PARSE-TREE: | Flags = None + +end diff --git a/flang/test/Parser/at-process.f b/flang/test/Parser/at-process.f index 4f54c6b65638b..e1abedf04ec84 100644 --- a/flang/test/Parser/at-process.f +++ b/flang/test/Parser/at-process.f @@ -19,4 +19,4 @@ subroutine f() !CHECK: Character in fixed-form label field must be a digit @precoss -!CHECK: at-process.f:14:1: error: parser FAIL (final position) + end diff --git a/flang/test/Parser/come-to-a-bad-end.f90 b/flang/test/Parser/come-to-a-bad-end.f90 new file mode 100644 index 0000000000000..f23e4a9ce779a --- /dev/null +++ b/flang/test/Parser/come-to-a-bad-end.f90 @@ -0,0 +1,13 @@ +!RUN: not %flang_fc1 -fsyntax-only %s 2>&1 | FileCheck %s +!CHECK:come-to-a-bad-end.f90:13:4: error: expected '(' +!CHECK: in the context: statement function definition +!CHECK: in the context: SUBROUTINE subprogram +!CHECK:error: expected declaration construct +!CHECK:come-to-a-bad-end.f90:13:1: in the context: specification part +!CHECK: in the context: SUBROUTINE subprogram +!CHECK:error: end of file +!CHECK: in the context: SUBROUTINE subprogram +subroutine a +end +subroutine b +gnd diff --git a/flang/test/Parser/recovery08.f90 b/flang/test/Parser/recovery08.f90 new file mode 100644 index 0000000000000..978e42bab9344 --- /dev/null +++ b/flang/test/Parser/recovery08.f90 @@ -0,0 +1,11 @@ +! RUN: not %flang_fc1 -fsyntax-only %s 2>&1 | FileCheck %s +! CHECK: error: end of file +! CHECK: ^ +! CHECK: in the context: END PROGRAM statement +! CHECK: in the context: main program + + integer :: i + + ! Add empty lines for emphasis + + i = 5 diff --git a/flang/test/Parser/unparseable.f90 b/flang/test/Parser/unparseable.f90 index 9e7a890b67a34..07126d0ffb27a 100644 --- a/flang/test/Parser/unparseable.f90 +++ b/flang/test/Parser/unparseable.f90 @@ -1,5 +1,9 @@ ! RUN: not %flang_fc1 -fsyntax-only %s 2>&1 | FileCheck %s -! CHECK: unparseable.f90:5:1: error: parser FAIL (final position) +! CHECK:error: end of file +! CHECK:in the context: END PROGRAM statement +! CHECK:unparseable.f90:9:1: in the context: main program +! CHECK:error: end of file +! CHECK:unparseable.f90:9:1: in the context: SELECT TYPE construct module m end select type (barf) diff --git a/flang/test/Preprocessing/fixed-free.f b/flang/test/Preprocessing/fixed-free.f new file mode 100644 index 0000000000000..95f63a4d71e4c --- /dev/null +++ b/flang/test/Preprocessing/fixed-free.f @@ -0,0 +1,8 @@ +!RUN: %flang -E %s 2>&1 | FileCheck %s +!RUN: %flang -fc1 -fsyntax-only %s 2>&1 | FileCheck --allow-empty %s +!CHECK-NOT: dir$ +!CHECK-NOT: error: +!dir$ fixed + continue +!dir$ free + end diff --git a/flang/test/Semantics/OpenACC/acc-sentinel.f90 b/flang/test/Semantics/OpenACC/acc-sentinel.f90 new file mode 100644 index 0000000000000..d34d97e8b1b5d --- /dev/null +++ b/flang/test/Semantics/OpenACC/acc-sentinel.f90 @@ -0,0 +1,14 @@ +! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenacc + +subroutine test_sentinel() +! Test for error since we currently do not have an OpenACC module upstream. +!ERROR: Cannot parse module file for module 'openacc': Source file 'openacc.mod' was not found + !@acc use openacc + integer :: i + + !$acc parallel loop + do i = 1, 10 + end do + !$acc end parallel + +end subroutine diff --git a/flang/test/Semantics/OpenMP/allocate-align01.f90 b/flang/test/Semantics/OpenMP/allocate-align01.f90 index 508efa82f12a0..4967330e37b48 100644 --- a/flang/test/Semantics/OpenMP/allocate-align01.f90 +++ b/flang/test/Semantics/OpenMP/allocate-align01.f90 @@ -13,7 +13,7 @@ program allocate_align_tree z = 3 !ERROR: The alignment value should be a constant positive integer !$omp allocate(j) align(xx) - !WARNING: OpenMP directive ALLOCATE has been deprecated, please use ALLOCATORS instead. [-Wopen-mp-usage] + !WARNING: The executable form of the OpenMP ALLOCATE directive has been deprecated, please use ALLOCATORS instead [-Wopen-mp-usage] !ERROR: The alignment value should be a constant positive integer !$omp allocate(xarray) align(-32) allocator(omp_large_cap_mem_alloc) allocate(j(z), xarray(t)) diff --git a/flang/test/Semantics/OpenMP/allocate01.f90 b/flang/test/Semantics/OpenMP/allocate01.f90 index e0b084ff0030b..1d99811156438 100644 --- a/flang/test/Semantics/OpenMP/allocate01.f90 +++ b/flang/test/Semantics/OpenMP/allocate01.f90 @@ -19,8 +19,7 @@ subroutine sema() !$omp allocate(y) print *, a - !WARNING: OpenMP directive ALLOCATE has been deprecated, please use ALLOCATORS instead. [-Wopen-mp-usage] - !ERROR: List items must be declared in the same scoping unit in which the ALLOCATE directive appears + !WARNING: The executable form of the OpenMP ALLOCATE directive has been deprecated, please use ALLOCATORS instead [-Wopen-mp-usage] !$omp allocate(x) allocator(omp_default_mem_alloc) allocate ( x(a), darray(a, b) ) end subroutine sema diff --git a/flang/test/Semantics/OpenMP/allocate08.f90 b/flang/test/Semantics/OpenMP/allocate08.f90 index fc950ea4fca36..5bfa918be4cad 100644 --- a/flang/test/Semantics/OpenMP/allocate08.f90 +++ b/flang/test/Semantics/OpenMP/allocate08.f90 @@ -27,10 +27,12 @@ subroutine allocate() !$omp allocate(x) allocator(omp_default_mem_alloc) !$omp allocate(y) allocator(omp_default_mem_alloc) + !ERROR: List items must be declared in the same scoping unit in which the ALLOCATE directive appears !$omp allocate(z) allocator(omp_default_mem_alloc) !$omp allocate(x) !$omp allocate(y) + !ERROR: List items must be declared in the same scoping unit in which the ALLOCATE directive appears !$omp allocate(z) !$omp allocate(w) allocator(custom_allocator) @@ -40,5 +42,6 @@ subroutine allocate() !ERROR: If list items within the ALLOCATE directive have the SAVE attribute, are a common block name, or are declared in the scope of a module, then only predefined memory allocator parameters can be used in the allocator clause !$omp allocate(y) allocator(custom_allocator) !ERROR: If list items within the ALLOCATE directive have the SAVE attribute, are a common block name, or are declared in the scope of a module, then only predefined memory allocator parameters can be used in the allocator clause + !ERROR: List items must be declared in the same scoping unit in which the ALLOCATE directive appears !$omp allocate(z) allocator(custom_allocator) end subroutine allocate diff --git a/flang/test/Semantics/OpenMP/allocators04.f90 b/flang/test/Semantics/OpenMP/allocators04.f90 index 212e48fbd1b26..c71c7ca8466ba 100644 --- a/flang/test/Semantics/OpenMP/allocators04.f90 +++ b/flang/test/Semantics/OpenMP/allocators04.f90 @@ -22,10 +22,12 @@ subroutine allocate() trait(1)%value = default_mem_fb custom_allocator = omp_init_allocator(omp_default_mem_space, 1, trait) + !ERROR: List items must be declared in the same scoping unit in which the ALLOCATORS directive appears !$omp allocators allocate(omp_default_mem_alloc: a) allocate(a) !ERROR: If list items within the ALLOCATORS directive have the SAVE attribute, are a common block name, or are declared in the scope of a module, then only predefined memory allocator parameters can be used in the allocator clause + !ERROR: List items must be declared in the same scoping unit in which the ALLOCATORS directive appears !$omp allocators allocate(custom_allocator: b) allocate(b) end subroutine diff --git a/flang/test/Semantics/OpenMP/allocators05.f90 b/flang/test/Semantics/OpenMP/allocators05.f90 index 0e8366a2461e6..efacdfaec7647 100644 --- a/flang/test/Semantics/OpenMP/allocators05.f90 +++ b/flang/test/Semantics/OpenMP/allocators05.f90 @@ -15,11 +15,9 @@ subroutine allocate() integer, parameter :: LEN = 2 !$omp target private(a, b) - !ERROR: List items must be declared in the same scoping unit in which the ALLOCATORS directive appears !$omp allocators allocate(omp_default_mem_alloc: a) allocate(a(LEN)) !ERROR: ALLOCATORS directives that appear in a TARGET region must specify an allocator - !ERROR: List items must be declared in the same scoping unit in which the ALLOCATORS directive appears !$omp allocators allocate(b) allocate(b(LEN)) !$omp end target diff --git a/flang/test/Semantics/OpenMP/blank-common-block.f90 b/flang/test/Semantics/OpenMP/blank-common-block.f90 index 4a217fced0ff7..e410f0c56d1c0 100644 --- a/flang/test/Semantics/OpenMP/blank-common-block.f90 +++ b/flang/test/Semantics/OpenMP/blank-common-block.f90 @@ -4,6 +4,7 @@ module m integer :: a common // a !ERROR: Blank common blocks are not allowed as directive or clause arguments + !ERROR: An argument to the DECLARE TARGET directive should be an extended-list-item !$omp declare_target(//) !ERROR: Blank common blocks are not allowed as directive or clause arguments !$omp threadprivate(//) diff --git a/flang/test/Semantics/OpenMP/declare-mapper04.f90 b/flang/test/Semantics/OpenMP/declare-mapper04.f90 new file mode 100644 index 0000000000000..2f45e230c3513 --- /dev/null +++ b/flang/test/Semantics/OpenMP/declare-mapper04.f90 @@ -0,0 +1,18 @@ +! RUN: %python %S/../test_errors.py %s %flang -fopenmp -fopenmp-version=60 + +type :: t1 + integer :: y +end type + +type :: t2 + integer :: y +end type + +!ERROR: DECLARE_MAPPER directive should have a single argument +!$omp declare mapper(m1:t1::x, m2:t2::x) map(x, x%y) + +integer :: x(10) +!ERROR: The argument to the DECLARE_MAPPER directive should be a mapper-specifier +!$omp declare mapper(x) map(to: x) + +end diff --git a/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 b/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 index f80eb1097e18a..0882de80fdcc6 100644 --- a/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 +++ b/flang/test/Semantics/OpenMP/declare-reduction-modfile.f90 @@ -6,12 +6,12 @@ !type::t1 !integer(4)::val !endtype -!!$OMP DECLARE REDUCTION (*:t1:omp_out = omp_out*omp_in) INITIALIZER(omp_priv=t& -!!$OMP&1(1)) +!!$OMP DECLARE REDUCTION(*:t1:omp_out=omp_out*omp_in)INITIALIZER(omp_priv=& +!!$OMP&t1(1)) !!$OMP METADIRECTIVE OTHERWISE(DECLARE REDUCTION(+:INTEGER)) -!!$OMP DECLARE REDUCTION (.fluffy.:t1:omp_out = omp_out.fluffy.omp_in) INITIALI& +!!$OMP DECLARE REDUCTION(.fluffy.:t1:omp_out=omp_out.fluffy.omp_in)INITIALI& !!$OMP&ZER(omp_priv=t1(0)) -!!$OMP DECLARE REDUCTION (.mul.:t1:omp_out = omp_out.mul.omp_in) INITIALIZER(om& +!!$OMP DECLARE REDUCTION(.mul.:t1:omp_out=omp_out.mul.omp_in)INITIALIZER(om& !!$OMP&p_priv=t1(1)) !interface operator(.mul.) !procedure::mul diff --git a/flang/test/Semantics/OpenMP/declare-simd.f90 b/flang/test/Semantics/OpenMP/declare-simd.f90 new file mode 100644 index 0000000000000..bb259b8722ca2 --- /dev/null +++ b/flang/test/Semantics/OpenMP/declare-simd.f90 @@ -0,0 +1,27 @@ +! RUN: %python %S/../test_errors.py %s %flang -fopenmp -fopenmp-version=60 + +module m + +!ERROR: The name 'x' should refer to a procedure +!$omp declare_simd(x) + +!ERROR: DECLARE_SIMD directive should have at most one argument +!$omp declare_simd(f00, f01) + +!ERROR: The argument to the DECLARE_SIMD directive should be a procedure name +!$omp declare_simd(v : integer) + +contains + +subroutine f00 +end + +subroutine f01 +end + +integer function f02 +!Ok, expect no diagnostics +!$omp declare_simd(f02) +end + +end module diff --git a/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 b/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 index 9a0acdb3dd100..3439e6fd13981 100644 --- a/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 +++ b/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 @@ -19,7 +19,7 @@ end module test !CHECK: !DEF: /test/ex/b ObjectEntity INTEGER(4) !CHECK: !DEF: /test/ex/c ObjectEntity INTEGER(4) !CHECK: function ex(a, b, c) -!CHECK: !$omp declare target (ex) +!CHECK: !$omp declare target(ex) !CHECK: !REF: /test/ex/a !CHECK: !REF: /test/ex/b !CHECK: !REF: /test/ex/c diff --git a/flang/test/Semantics/OpenMP/declare-variant.f90 b/flang/test/Semantics/OpenMP/declare-variant.f90 index 84a0cdcd10d91..6fc94a4fb837f 100644 --- a/flang/test/Semantics/OpenMP/declare-variant.f90 +++ b/flang/test/Semantics/OpenMP/declare-variant.f90 @@ -1,9 +1,9 @@ ! RUN: %python %S/../test_errors.py %s %flang -fopenmp -fopenmp-version=51 subroutine sub0 -!ERROR: Implicit subroutine declaration 'vsub1' in !$OMP DECLARE VARIANT +!ERROR: The name 'vsub1' should refer to a procedure !$omp declare variant (sub:vsub1) match (construct={parallel}) -!ERROR: Implicit subroutine declaration 'sub1' in !$OMP DECLARE VARIANT +!ERROR: The name 'sub1' should refer to a procedure !$omp declare variant (sub1:vsub) match (construct={parallel}) contains subroutine vsub diff --git a/flang/test/Semantics/OpenMP/do22.f90 b/flang/test/Semantics/OpenMP/do22.f90 new file mode 100644 index 0000000000000..9d96d3af54e5c --- /dev/null +++ b/flang/test/Semantics/OpenMP/do22.f90 @@ -0,0 +1,73 @@ +! RUN: %python %S/../test_errors.py %s %flang -fopenmp +! Check for existence of loop following a DO directive + +subroutine do_imperfectly_nested_before + integer i, j + + !ERROR: The value of the parameter in the COLLAPSE or ORDERED clause must not be larger than the number of nested loops following the construct. + !$omp do collapse(2) + do i = 1, 10 + print *, i + do j = 1, 10 + print *, i, j + end do + end do + !$omp end do +end subroutine + + +subroutine do_imperfectly_nested_behind + integer i, j + + !ERROR: Canonical loop nest must be perfectly nested. + !$omp do collapse(2) + do i = 1, 10 + do j = 1, 10 + print *, i, j + end do + print *, i + end do + !$omp end do +end subroutine + + +subroutine do_nonrectangular_lb + integer i, j + + !ERROR: Trip count must be computable and invariant + !$omp do collapse(2) + do i = 1, 10 + do j = i, 10 + print *, i, j + end do + end do + !$omp end do +end subroutine + + +subroutine do_nonrectangular_ub + integer i, j + + !ERROR: Trip count must be computable and invariant + !$omp do collapse(2) + do i = 1, 10 + do j = 0, i + print *, i, j + end do + end do + !$omp end do +end subroutine + + +subroutine do_nonrectangular_step + integer i, j + + !ERROR: Trip count must be computable and invariant + !$omp do collapse(2) + do i = 1, 10 + do j = 1, 10, i + print *, i, j + end do + end do + !$omp end do +end subroutine diff --git a/flang/test/Semantics/bug133669.f90 b/flang/test/Semantics/bug133669.f90 new file mode 100644 index 0000000000000..b4d55db193a2c --- /dev/null +++ b/flang/test/Semantics/bug133669.f90 @@ -0,0 +1,51 @@ +!RUN: %python %S/test_errors.py %s %flang_fc1 +module m + contains + subroutine s(x, y, mask) + class(*), allocatable, intent(in out) :: x(:), y(:) + logical, intent(in) :: mask(:) + select type(x) + type is(integer) + print *, 'before, x is integer', x + type is(real) + print *, 'before, x is real', x + class default + print *, 'before, x has some other type' + end select + select type(y) + type is(integer) + print *, 'y is integer', y + type is(real) + print *, 'y is real', y + end select + print *, 'mask', mask + !ERROR: Assignment to whole polymorphic allocatable 'x' may not be nested in a WHERE statement or construct + where(mask) x = y + select type(x) + type is(integer) + print *, 'after, x is integer', x + type is(real) + print *, 'after, x is real', x + class default + print *, 'before, x has some other type' + end select + print * + end +end + +program main + use m + class(*), allocatable :: x(:), y(:) + x = [1, 2] + y = [3., 4.] + call s(x, y, [.false., .false.]) + x = [1, 2] + y = [3., 4.] + call s(x, y, [.false., .true.]) + x = [1, 2] + y = [3., 4.] + call s(x, y, [.true., .false.]) + x = [1, 2] + y = [3., 4.] + call s(x, y, [.true., .true.]) +end program main diff --git a/flang/test/Semantics/bug157124.f90 b/flang/test/Semantics/bug157124.f90 new file mode 100644 index 0000000000000..92326dc9e7b69 --- /dev/null +++ b/flang/test/Semantics/bug157124.f90 @@ -0,0 +1,11 @@ +! RUN: %python %S/test_errors.py %s %flang_fc1 +pure subroutine puresub + intrinsic sleep, chdir, get_command + character(80) str + !ERROR: Procedure 'impureexternal' referenced in pure subprogram 'puresub' must be pure too + call impureExternal ! implicit interface + !ERROR: Procedure 'sleep' referenced in pure subprogram 'puresub' must be pure too + call sleep(1) ! intrinsic subroutine, debatably impure + !ERROR: Procedure 'chdir' referenced in pure subprogram 'puresub' must be pure too + call chdir('.') ! "dual" function/subroutine, impure +end diff --git a/flang/test/Semantics/bug159554.f90 b/flang/test/Semantics/bug159554.f90 new file mode 100644 index 0000000000000..f5a51ebc0b5cb --- /dev/null +++ b/flang/test/Semantics/bug159554.f90 @@ -0,0 +1,8 @@ +!RUN: %python %S/test_errors.py %s %flang_fc1 +use, intrinsic :: iso_c_binding +interface c_funloc +!ERROR: 'c_funloc' is already declared in this scoping unit + function c_funloc() + end function +end interface +end diff --git a/flang/test/Semantics/bug159977.f90 b/flang/test/Semantics/bug159977.f90 new file mode 100644 index 0000000000000..ee731c9fb170e --- /dev/null +++ b/flang/test/Semantics/bug159977.f90 @@ -0,0 +1,11 @@ +! RUN: %flang_fc1 -fsyntax-only -pedantic %s 2>&1 | FileCheck %s --allow-empty +! Ensure no bogus "no explicit type for ..." error on USE-associated +! implicitly-typed COMMON block object in scope with IMPLICIT NONE. +! CHECK-NOT: error: +module m + common /block/ var +end +subroutine test + use m + implicit none +end diff --git a/flang/test/Semantics/contiguous-warn.f90 b/flang/test/Semantics/contiguous-warn.f90 new file mode 100644 index 0000000000000..2eb1f1c0857f7 --- /dev/null +++ b/flang/test/Semantics/contiguous-warn.f90 @@ -0,0 +1,6 @@ +! RUN: %python %S/test_errors.py %s %flang_fc1 -pedantic -Werror +integer, parameter :: num = 3 +integer, parameter :: arr(num)=[(i, i=1,num)] +!WARNING: is_contiguous() is always true for named constants and subobjects of named constants [-Wconstant-is-contiguous] +logical, parameter :: result=is_contiguous(arr(num:1:-1)) +end diff --git a/flang/test/Semantics/cuf23.cuf b/flang/test/Semantics/cuf23.cuf new file mode 100644 index 0000000000000..8c03c18d9b0db --- /dev/null +++ b/flang/test/Semantics/cuf23.cuf @@ -0,0 +1,55 @@ +! RUN: %python %S/test_errors.py %s %flang_fc1 -fopenacc + +module devicemod + real, constant :: c(10) +end module + +program test + use devicemod + real, device :: a(10) + real, managed :: m(10) + a = 1.0 +!ERROR: device data not allowed in I/O statements + print *, a(1) +!ERROR: device data not allowed in I/O statements + print *, a + + print*, m(9) ! ok + print*, m ! ok + +!ERROR: device data not allowed in I/O statements + print*, c +!ERROR: device data not allowed in I/O statements + print*, c(5) +end + +subroutine host() + integer :: i + real, device :: a(10) + !$acc parallel loop + do i = 1, 10 + print*, a(i) ! ok + end do + + !$cuf kernel do + do i = 1, 10 + print*, a(i) ! ok + end do +end subroutine + +attributes(global) subroutine global1() + real, device :: a(10) + print*, a ! ok +end subroutine + +attributes(device) subroutine device1() + real, device :: a(10) + print*, a ! ok +end subroutine + +attributes(global) subroutine global_with_block() + block + real, device :: a(10) + print*, a ! ok + end block +end subroutine diff --git a/flang/test/Semantics/data24.f90 b/flang/test/Semantics/data24.f90 new file mode 100644 index 0000000000000..b645bd1ff5bc6 --- /dev/null +++ b/flang/test/Semantics/data24.f90 @@ -0,0 +1,16 @@ +! RUN: %flang_fc1 -fdebug-dump-symbols %s 2>&1 | FileCheck %s +! Ensure that DATA-style default component /initializers/ are processed +! before they are needed to handle EQUIVALENCE'd storage. +type t + sequence + integer :: j(10) /1,2,3,4,5,6,7,8,9,10/ +end type +type(t) :: A +integer arr(10) +equivalence (A, arr) +end + +!CHECK: .F18.0, SAVE (CompilerCreated) size=40 offset=0: ObjectEntity type: INTEGER(4) shape: 1_8:10_8 init:[INTEGER(4)::1_4,2_4,3_4,4_4,5_4,6_4,7_4,8_4,9_4,10_4] +!CHECK: a size=40 offset=0: ObjectEntity type: TYPE(t) +!CHECK: arr size=40 offset=0: ObjectEntity type: INTEGER(4) shape: 1_8:10_8 +!CHECK: Equivalence Sets: (a,arr(1)) (.F18.0,a) diff --git a/flang/test/Semantics/elemental03.f90 b/flang/test/Semantics/elemental03.f90 new file mode 100644 index 0000000000000..1a2e22065b418 --- /dev/null +++ b/flang/test/Semantics/elemental03.f90 @@ -0,0 +1,13 @@ +!RUN: %python %S/test_errors.py %s %flang_fc1 +module m + contains + elemental real function f(x) + real, intent(in) :: x + f = x + end + subroutine s(a) + real a(..) + !ERROR: Assumed-rank array 'a' may not be used as an argument to an elemental procedure + print *, f(a) + end +end diff --git a/flang/test/Transforms/OpenMP/simd-only.mlir b/flang/test/Transforms/OpenMP/simd-only.mlir index 0025d10fbd21a..a550d5660d224 100644 --- a/flang/test/Transforms/OpenMP/simd-only.mlir +++ b/flang/test/Transforms/OpenMP/simd-only.mlir @@ -65,10 +65,10 @@ func.func @parallel(%arg0: i32, %arg1: !fir.ref) { // CHECK: fir.convert %16 = fir.convert %c100000_i32 : (i32) -> index // CHECK: fir.do_loop - %18:2 = fir.do_loop %arg4 = %15 to %16 step %c1 iter_args(%arg2 = %arg0) -> (index, i32) { + %18 = fir.do_loop %arg4 = %15 to %16 step %c1 iter_args(%arg2 = %arg0) -> (i32) { // CHECK: fir.store fir.store %arg0 to %arg1 : !fir.ref - fir.result %arg4, %arg2 : index, i32 + fir.result %arg2 : i32 } // CHECK-NOT: omp.terminator omp.terminator diff --git a/flang/test/Transforms/debug-complex-1.fir b/flang/test/Transforms/debug-complex-1.fir index f7be6b2d4a931..6e2c6c5bdb354 100644 --- a/flang/test/Transforms/debug-complex-1.fir +++ b/flang/test/Transforms/debug-complex-1.fir @@ -26,9 +26,9 @@ module { #loc3 = loc("./simple.f90":8:1) #loc4 = loc("./simple.f90":11:1) -// CHECK-DAG: #[[CMPX8:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[CMPX8:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[CMPX4:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[CMPX16:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[CMPX16:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[TY1:.*]] = #llvm.di_subroutine_type<{{.*}}types = #[[CMPX8]], #[[CMPX4]]> // CHECK-DAG: #[[TY2:.*]] = #llvm.di_subroutine_type<{{.*}}types = #[[CMPX16]], #[[CMPX4]]> diff --git a/flang/test/Transforms/debug-derived-type-1.fir b/flang/test/Transforms/debug-derived-type-1.fir index cfbd361a91e72..22832b67742c8 100644 --- a/flang/test/Transforms/debug-derived-type-1.fir +++ b/flang/test/Transforms/debug-derived-type-1.fir @@ -45,12 +45,12 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry, d // CHECK-DAG: #[[INT_TY:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[INT8_TY:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[INT8_TY:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[REAL4_TY:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[CMX8_TY:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[CMX_ARR:.*]] = #llvm.di_composite_type -// CHECK-DAG: #[[LOG_TY:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[REAL8_TY:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[LOG_TY:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[REAL8_TY:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[STR_TY:.*]] = #llvm.di_string_type // CHECK-DAG: #[[MOD:.*]] = #llvm.di_module<{{.*}}name = "m_employee"{{.*}}> // CHECK-DAG: #[[MOD1:.*]] = #llvm.di_module<{{.*}}name = "t1"{{.*}}> diff --git a/flang/test/Transforms/debug-fn-info.fir b/flang/test/Transforms/debug-fn-info.fir index c02835be50af5..e42beb1f748f1 100644 --- a/flang/test/Transforms/debug-fn-info.fir +++ b/flang/test/Transforms/debug-fn-info.fir @@ -64,10 +64,10 @@ module { #loc4 = loc("test2.f90":53:22) -// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[INT4:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[REAL4:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[LOG4:.*]] = #llvm.di_basic_type // CHECK: #[[TY0:.*]] = #llvm.di_subroutine_type diff --git a/flang/test/Transforms/debug-local-var.fir b/flang/test/Transforms/debug-local-var.fir index 06c9b01e75a61..d39017e6dd62a 100644 --- a/flang/test/Transforms/debug-local-var.fir +++ b/flang/test/Transforms/debug-local-var.fir @@ -71,10 +71,10 @@ module { #loc15 = loc("test.f90":21:24) #loc16 = loc("test.f90":22:5) -// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[INT8:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[INT4:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type -// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[REAL8:.*]] = #llvm.di_basic_type +// CHECK-DAG: #[[LOG1:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[REAL4:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[LOG4:.*]] = #llvm.di_basic_type // CHECK-DAG: #[[MAIN:.*]] = #llvm.di_subprogram<{{.*}}name = "mn"{{.*}}> diff --git a/flang/test/Transforms/debug-ref-type.fir b/flang/test/Transforms/debug-ref-type.fir index 745aebee778be..daffa293ba2e3 100644 --- a/flang/test/Transforms/debug-ref-type.fir +++ b/flang/test/Transforms/debug-ref-type.fir @@ -5,6 +5,6 @@ module { } #loc1 = loc("test.f90":5:1) -// CHECK: #[[INT8_TY:.*]] = #llvm.di_basic_type +// CHECK: #[[INT8_TY:.*]] = #llvm.di_basic_type // CHECK: #[[REF_TY:.*]] = #llvm.di_derived_type // CHECK: #llvm.di_subroutine_type<{{.*}}types = #[[REF_TY]], #[[INT8_TY]]> diff --git a/flang/test/Transforms/debug-split-dwarf.fir b/flang/test/Transforms/debug-split-dwarf.fir new file mode 100644 index 0000000000000..9c095457fb117 --- /dev/null +++ b/flang/test/Transforms/debug-split-dwarf.fir @@ -0,0 +1,12 @@ +// RUN: fir-opt --add-debug-info="split-dwarf-file=test.dwo" \ +// RUN: --mlir-print-debuginfo %s -o - | FileCheck %s + +module { + func.func @test() { + return + } loc(#loc1) +} +#loc1 = loc("test.f90":15:1) + +// CHECK: llvm.di_compile_unit +// CHECK-SAME: splitDebugFilename = "test.dwo" diff --git a/flang/test/Transforms/debug-tuple-type.fir b/flang/test/Transforms/debug-tuple-type.fir index e3b0bafdf3cd4..73a07333b3aef 100644 --- a/flang/test/Transforms/debug-tuple-type.fir +++ b/flang/test/Transforms/debug-tuple-type.fir @@ -5,7 +5,7 @@ module { func.func private @_FortranAioOutputDerivedType(!fir.ref>) } -// CHECK: #[[F64:.*]] = #llvm.di_basic_type +// CHECK: #[[F64:.*]] = #llvm.di_basic_type // CHECK: #[[CU:.*]] = #llvm.di_compile_unit<{{.*}}> // CHECK: #[[DTY1:.*]] = #llvm.di_derived_type // CHECK: #[[DTY2:.*]] = #llvm.di_derived_type diff --git a/flang/test/Transforms/debug-vector-type.fir b/flang/test/Transforms/debug-vector-type.fir index d3e1f6ec28d0f..9e41d90f407b9 100644 --- a/flang/test/Transforms/debug-vector-type.fir +++ b/flang/test/Transforms/debug-vector-type.fir @@ -2,22 +2,22 @@ module { func.func private @foo1(%arg0: !fir.vector<20:bf16>) -// CHECK-DAG: #[[F16:.*]] = #llvm.di_basic_type -// CHECK-DAG: #llvm.di_composite_type> +// CHECK-DAG: #[[F16:.*]] = #llvm.di_basic_type +// CHECK-DAG: #llvm.di_composite_type> func.func private @foo2(%arg0: !fir.vector<30:f32>) // CHECK-DAG: #[[F32:.*]] = #llvm.di_basic_type // CHECK-DAG: #llvm.di_composite_type> func.func private @foo3(%arg0: !fir.vector<10:f64>) -// CHECK-DAG: #[[F64:.*]] = #llvm.di_basic_type -// CHECK-DAG: #llvm.di_composite_type> +// CHECK-DAG: #[[F64:.*]] = #llvm.di_basic_type +// CHECK-DAG: #llvm.di_composite_type> func.func private @foo4(%arg0: !fir.vector<5:i32>) // CHECK-DAG: #[[I32:.*]] = #llvm.di_basic_type // CHECK-DAG: #llvm.di_composite_type> func.func private @foo5(%arg0: !fir.vector<2:i64>) -// CHECK-DAG: #[[I64:.*]] = #llvm.di_basic_type -// CHECK-DAG: #llvm.di_composite_type> +// CHECK-DAG: #[[I64:.*]] = #llvm.di_basic_type +// CHECK-DAG: #llvm.di_composite_type> } diff --git a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp index e3e364720af67..10a7ddf339133 100644 --- a/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp +++ b/flang/unittests/Optimizer/Builder/FIRBuilderTest.cpp @@ -644,3 +644,87 @@ TEST_F(FIRBuilderTest, genArithIntegerOverflow) { auto op4_ioff = op4_iofi.getOverflowAttr().getValue(); EXPECT_EQ(op4_ioff, nsw); } + +TEST_F(FIRBuilderTest, getDescriptorWithNewBaseAddress) { + auto builder = getBuilder(); + auto loc = builder.getUnknownLoc(); + + // Build an input fir.box for a 1-D array of i64 with constant extent 10. + auto i64Ty = builder.getI64Type(); + auto seqTy = fir::SequenceType::get({10}, i64Ty); + auto refArrTy = fir::ReferenceType::get(seqTy); + auto ptrTy = fir::PointerType::get(seqTy); + auto boxTy = fir::BoxType::get(ptrTy); + // Create an undef box descriptor value (descriptor contents are unspecified). + mlir::Value inputBox = fir::UndefOp::create(builder, loc, boxTy); + + // New base address (same element type and properties). + mlir::Value addr2 = fir::UndefOp::create(builder, loc, refArrTy); + + mlir::Value newBox = fir::factory::getDescriptorWithNewBaseAddress( + builder, loc, inputBox, addr2); + + // The returned descriptor must have the same type as the input box. + EXPECT_EQ(newBox.getType(), inputBox.getType()); + + // It must be constructed by an embox using the new base address. + ASSERT_TRUE(llvm::isa_and_nonnull(newBox.getDefiningOp())); + auto embox = llvm::dyn_cast(newBox.getDefiningOp()); + EXPECT_EQ(embox.getMemref(), addr2); + + // The shape should be derived from the input box; expect a fir.shape with one + // extent that comes from a fir.box_dims reading from the original input box. + mlir::Value shape = embox.getShape(); + ASSERT_TRUE(shape); + ASSERT_TRUE(llvm::isa_and_nonnull(shape.getDefiningOp())); + auto shapeOp = llvm::dyn_cast(shape.getDefiningOp()); + ASSERT_EQ(shapeOp.getExtents().size(), 1u); + mlir::Value extent0 = shapeOp.getExtents()[0]; + ASSERT_TRUE(llvm::isa_and_nonnull(extent0.getDefiningOp())); + auto dimOp = llvm::dyn_cast(extent0.getDefiningOp()); + EXPECT_EQ(dimOp.getVal(), inputBox); + + // Also verify the origin comes from a BoxDims on the same input box. + ASSERT_EQ(shapeOp.getOrigins().size(), 1u); + mlir::Value origin0 = shapeOp.getOrigins()[0]; + ASSERT_TRUE(llvm::isa_and_nonnull(origin0.getDefiningOp())); + auto lbOp = llvm::dyn_cast(origin0.getDefiningOp()); + EXPECT_EQ(lbOp.getVal(), inputBox); +} + +TEST_F(FIRBuilderTest, getDescriptorWithNewBaseAddress_PolymorphicScalar) { + auto builder = getBuilder(); + auto loc = builder.getUnknownLoc(); + + // Build a polymorphic scalar: fir.class>>. + auto recTy = fir::RecordType::get(builder.getContext(), "poly_rec"); + auto ptrRecTy = fir::PointerType::get(recTy); + auto classTy = fir::ClassType::get(ptrRecTy); + + // Input descriptor is an undefined fir.class value. + mlir::Value inputBox = fir::UndefOp::create(builder, loc, classTy); + + // New base address of the same element type (reference to the record). + auto refRecTy = fir::ReferenceType::get(recTy); + mlir::Value newAddr = fir::UndefOp::create(builder, loc, refRecTy); + + mlir::Value newBox = fir::factory::getDescriptorWithNewBaseAddress( + builder, loc, inputBox, newAddr); + + // Same descriptor type must be preserved. + EXPECT_EQ(newBox.getType(), inputBox.getType()); + + // Must be an embox using the new base address and carrying the original box + // as mold. + ASSERT_TRUE(llvm::isa_and_nonnull(newBox.getDefiningOp())); + auto embox = llvm::dyn_cast(newBox.getDefiningOp()); + EXPECT_EQ(embox.getMemref(), newAddr); + + // Polymorphic scalar should have no shape operand. + mlir::Value shape = embox.getShape(); + EXPECT_TRUE(shape == nullptr); + + // The type descriptor/mold must be the original input box. + mlir::Value tdesc = embox.getSourceBox(); + EXPECT_EQ(tdesc, inputBox); +} diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt index 4f3704ec9aa9b..14718e2090bde 100644 --- a/libc/CMakeLists.txt +++ b/libc/CMakeLists.txt @@ -246,7 +246,7 @@ else() set(LIBC_INCLUDE_DIR ${CMAKE_BINARY_DIR}/include) set(LIBC_LIBRARY_DIR ${CMAKE_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX}) endif() - if(LIBC_TARGET_OS_IS_GPU) + if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR) if(LIBC_TARGET_TRIPLE) set(LIBC_INSTALL_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR}/${LIBC_TARGET_TRIPLE}) else() diff --git a/libc/benchmarks/LibcMemoryBenchmark.cpp b/libc/benchmarks/LibcMemoryBenchmark.cpp index 3ced306584d15..9307ee45b2853 100644 --- a/libc/benchmarks/LibcMemoryBenchmark.cpp +++ b/libc/benchmarks/LibcMemoryBenchmark.cpp @@ -53,8 +53,8 @@ MismatchOffsetDistribution::MismatchOffsetDistribution(size_t BufferSize, : MismatchAt(MismatchAt) { if (MismatchAt <= 1) return; - for (size_t I = MaxSizeValue + 1; I < BufferSize; I += MaxSizeValue) - MismatchIndices.push_back(I); + for (size_t i = MaxSizeValue + 1; i < BufferSize; i += MaxSizeValue) + MismatchIndices.push_back(i); if (MismatchIndices.empty()) report_fatal_error("Unable to generate mismatch"); MismatchIndexSelector = diff --git a/libc/benchmarks/LibcMemoryBenchmarkMain.cpp b/libc/benchmarks/LibcMemoryBenchmarkMain.cpp index c042b29cad98e..613160d72c17f 100644 --- a/libc/benchmarks/LibcMemoryBenchmarkMain.cpp +++ b/libc/benchmarks/LibcMemoryBenchmarkMain.cpp @@ -161,11 +161,11 @@ struct MemfunctionBenchmarkBase : public BenchmarkSetup { if (Percent == LastPercent) return; LastPercent = Percent; - size_t I = 0; + size_t i = 0; errs() << '['; - for (; I <= Percent; ++I) + for (; i <= Percent; ++i) errs() << '#'; - for (; I <= 100; ++I) + for (; i <= 100; ++i) errs() << '_'; errs() << "] " << Percent << '%' << '\r'; } diff --git a/libc/benchmarks/LibcMemoryBenchmarkTest.cpp b/libc/benchmarks/LibcMemoryBenchmarkTest.cpp index 00866e5a65c27..3c796c8ecc284 100644 --- a/libc/benchmarks/LibcMemoryBenchmarkTest.cpp +++ b/libc/benchmarks/LibcMemoryBenchmarkTest.cpp @@ -38,7 +38,7 @@ TEST(OffsetDistribution, AlignToBegin) { const size_t BufferSize = 8192; OffsetDistribution OD(BufferSize, 1024, std::nullopt); std::default_random_engine Gen; - for (size_t I = 0; I <= 10; ++I) + for (size_t i = 0; i <= 10; ++i) EXPECT_EQ(OD(Gen), 0U); } @@ -46,7 +46,7 @@ TEST(OffsetDistribution, NoAlignment) { const size_t BufferSize = 8192; OffsetDistribution OD(BufferSize, 1, Align(1)); std::default_random_engine Gen; - for (size_t I = 0; I <= 10; ++I) + for (size_t i = 0; i <= 10; ++i) EXPECT_THAT(OD(Gen), AllOf(Ge(0U), Lt(8192U))); } @@ -59,7 +59,7 @@ TEST(OffsetDistribution, Aligned) { const size_t BufferSize = 8192; OffsetDistribution OD(BufferSize, 1, Align(16)); std::default_random_engine Gen; - for (size_t I = 0; I <= 10; ++I) + for (size_t i = 0; i <= 10; ++i) EXPECT_THAT(OD(Gen), AllOf(Ge(0U), Lt(8192U), IsDivisibleBy(16U))); } diff --git a/libc/cmake/modules/LLVMLibCTestRules.cmake b/libc/cmake/modules/LLVMLibCTestRules.cmake index 9f00875390b53..19da0ad29cd84 100644 --- a/libc/cmake/modules/LLVMLibCTestRules.cmake +++ b/libc/cmake/modules/LLVMLibCTestRules.cmake @@ -39,7 +39,7 @@ function(_get_common_test_compile_options output_var c_test flags) list(APPEND compile_options "-ffixed-point") endif() - # list(APPEND compile_options "-Wall") + list(APPEND compile_options "-Wall") list(APPEND compile_options "-Wextra") # -DLIBC_WNO_ERROR=ON if you can't build cleanly with -Werror. if(NOT LIBC_WNO_ERROR) diff --git a/libc/config/baremetal/aarch64/entrypoints.txt b/libc/config/baremetal/aarch64/entrypoints.txt index 685a80b4002a3..935c95af0d4af 100644 --- a/libc/config/baremetal/aarch64/entrypoints.txt +++ b/libc/config/baremetal/aarch64/entrypoints.txt @@ -803,6 +803,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt index 8ec972fc58411..82e257c1d2b0d 100644 --- a/libc/config/baremetal/arm/entrypoints.txt +++ b/libc/config/baremetal/arm/entrypoints.txt @@ -803,6 +803,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt index 2f1d930497734..c10cc1162cc5a 100644 --- a/libc/config/baremetal/riscv/entrypoints.txt +++ b/libc/config/baremetal/riscv/entrypoints.txt @@ -803,6 +803,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/darwin/aarch64/entrypoints.txt b/libc/config/darwin/aarch64/entrypoints.txt index 3af3f7ff66874..e3c6c2b30c415 100644 --- a/libc/config/darwin/aarch64/entrypoints.txt +++ b/libc/config/darwin/aarch64/entrypoints.txt @@ -633,6 +633,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/darwin/x86_64/entrypoints.txt b/libc/config/darwin/x86_64/entrypoints.txt index a0881e5b02fe8..e899bf97ea3f6 100644 --- a/libc/config/darwin/x86_64/entrypoints.txt +++ b/libc/config/darwin/x86_64/entrypoints.txt @@ -276,6 +276,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/gpu/amdgpu/entrypoints.txt b/libc/config/gpu/amdgpu/entrypoints.txt index 7e4b1ab6d253f..0dda7d5c683ec 100644 --- a/libc/config/gpu/amdgpu/entrypoints.txt +++ b/libc/config/gpu/amdgpu/entrypoints.txt @@ -659,6 +659,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/gpu/nvptx/entrypoints.txt b/libc/config/gpu/nvptx/entrypoints.txt index 72a6257283475..6070fb5b17b3c 100644 --- a/libc/config/gpu/nvptx/entrypoints.txt +++ b/libc/config/gpu/nvptx/entrypoints.txt @@ -661,6 +661,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt index e38fc857d4e16..4824684103983 100644 --- a/libc/config/linux/aarch64/entrypoints.txt +++ b/libc/config/linux/aarch64/entrypoints.txt @@ -325,6 +325,8 @@ set(TARGET_LIBC_ENTRYPOINTS libc.src.unistd.dup2 libc.src.unistd.dup3 libc.src.unistd.execve + # Disabled while SYS_faccessat2 is unavailable on the buildbot. + # libc.src.unistd.faccessat libc.src.unistd.fchdir libc.src.unistd.fpathconf libc.src.unistd.fsync @@ -332,6 +334,7 @@ set(TARGET_LIBC_ENTRYPOINTS libc.src.unistd.getcwd libc.src.unistd.getentropy libc.src.unistd.geteuid + libc.src.unistd.gethostname libc.src.unistd.getpid libc.src.unistd.getppid libc.src.unistd.getsid @@ -885,6 +888,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/linux/arm/entrypoints.txt b/libc/config/linux/arm/entrypoints.txt index 97857986d3874..f04ac40145d3a 100644 --- a/libc/config/linux/arm/entrypoints.txt +++ b/libc/config/linux/arm/entrypoints.txt @@ -503,6 +503,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/linux/riscv/entrypoints.txt b/libc/config/linux/riscv/entrypoints.txt index 89e3653186d13..5f407e842121e 100644 --- a/libc/config/linux/riscv/entrypoints.txt +++ b/libc/config/linux/riscv/entrypoints.txt @@ -336,6 +336,7 @@ set(TARGET_LIBC_ENTRYPOINTS libc.src.unistd.getcwd libc.src.unistd.getentropy libc.src.unistd.geteuid + libc.src.unistd.gethostname libc.src.unistd.getpid libc.src.unistd.getppid libc.src.unistd.getsid @@ -907,6 +908,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt index 0bb8a683c5b01..87b78a337b875 100644 --- a/libc/config/linux/x86_64/entrypoints.txt +++ b/libc/config/linux/x86_64/entrypoints.txt @@ -331,6 +331,7 @@ set(TARGET_LIBC_ENTRYPOINTS libc.src.unistd.dup2 libc.src.unistd.dup3 libc.src.unistd.execve + libc.src.unistd.faccessat libc.src.unistd.fchdir libc.src.unistd.fpathconf libc.src.unistd.fsync @@ -338,6 +339,7 @@ set(TARGET_LIBC_ENTRYPOINTS libc.src.unistd.getcwd libc.src.unistd.getentropy libc.src.unistd.geteuid + libc.src.unistd.gethostname libc.src.unistd.getpid libc.src.unistd.getppid libc.src.unistd.getsid @@ -942,6 +944,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/config/windows/entrypoints.txt b/libc/config/windows/entrypoints.txt index b7e6f7be128c4..3a76595b258e2 100644 --- a/libc/config/windows/entrypoints.txt +++ b/libc/config/windows/entrypoints.txt @@ -349,6 +349,7 @@ list(APPEND TARGET_LIBM_ENTRYPOINTS libc.src.math.llogbbf16 libc.src.math.llrintbf16 libc.src.math.llroundbf16 + libc.src.math.log_bf16 libc.src.math.logbbf16 libc.src.math.lrintbf16 libc.src.math.lroundbf16 diff --git a/libc/docs/headers/math/index.rst b/libc/docs/headers/math/index.rst index 7d5b341ba674a..51bf238b950b0 100644 --- a/libc/docs/headers/math/index.rst +++ b/libc/docs/headers/math/index.rst @@ -319,7 +319,7 @@ Higher Math Functions +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+------------------------+----------------------------+ | lgamma | | | | | | | 7.12.8.3 | F.10.5.3 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+------------------------+----------------------------+ -| log | |check| | |check| | | |check| | | | 7.12.6.11 | F.10.3.11 | +| log | |check| | |check| | | |check| | | |check| ? | 7.12.6.11 | F.10.3.11 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+------------------------+----------------------------+ | log10 | |check| | |check| | | |check| | | | 7.12.6.12 | F.10.3.12 | +-----------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+------------------------+----------------------------+ @@ -373,6 +373,7 @@ Legends: * x ULPs: largest errors recorded. * N/A: Not defined in the standard or will not be added. * \*: LLVM libc extension. +* ? Because of a conflict between float16 logb function and bfloat16 log function, the latter is implemented as `log_bf16`. .. TODO(lntue): Add a new page to discuss about the algorithms used in the diff --git a/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp b/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp index 097e6193ee6ef..2fabbba231167 100644 --- a/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp +++ b/libc/fuzzing/stdlib/strtointeger_differential_fuzz.cpp @@ -44,6 +44,10 @@ // greater than 50% chance for each character to end the string, making the odds // of getting long numbers very low. extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { + if (size < 2) // Needs at least one byte for the base and one byte for the + // string. + return 0; + uint8_t *container = new uint8_t[size + 1]; if (!container) __builtin_trap(); diff --git a/libc/include/llvm-libc-macros/linux/fcntl-macros.h b/libc/include/llvm-libc-macros/linux/fcntl-macros.h index aec8a0d2da0b5..74d406f742f38 100644 --- a/libc/include/llvm-libc-macros/linux/fcntl-macros.h +++ b/libc/include/llvm-libc-macros/linux/fcntl-macros.h @@ -61,6 +61,9 @@ // Allow empty relative pathname. #define AT_EMPTY_PATH 0x1000 +// Perform access checks using the effective user and group IDs. +#define AT_EACCESS 0x200 + // Values of SYS_fcntl commands. #define F_DUPFD 0 #define F_GETFD 1 diff --git a/libc/include/sys/syscall.h.def b/libc/include/sys/syscall.h.def index 6d74cc6f78556..60e5024e500e3 100644 --- a/libc/include/sys/syscall.h.def +++ b/libc/include/sys/syscall.h.def @@ -309,6 +309,10 @@ #define SYS_faccessat __NR_faccessat #endif +#ifdef __NR_faccessat2 +#define SYS_faccessat2 __NR_faccessat2 +#endif + #ifdef __NR_fadvise64 #define SYS_fadvise64 __NR_fadvise64 #endif diff --git a/libc/include/unistd.yaml b/libc/include/unistd.yaml index 051e92b006741..2ff86eafaf550 100644 --- a/libc/include/unistd.yaml +++ b/libc/include/unistd.yaml @@ -96,6 +96,15 @@ functions: - type: const char * - type: __exec_argv_t - type: __exec_envp_t + - name: faccessat + standards: + - POSIX + return_type: int + arguments: + - type: int + - type: const char * + - type: int + - type: int - name: fchdir standards: - POSIX @@ -141,6 +150,13 @@ functions: return_type: uid_t arguments: - type: void + - name: gethostname + standards: + - POSIX + return_type: int + arguments: + - type: char * + - type: size_t - name: getopt standards: - POSIX diff --git a/libc/shared/math.h b/libc/shared/math.h index 9ba898ea6dac9..4b2a0d8c712ad 100644 --- a/libc/shared/math.h +++ b/libc/shared/math.h @@ -45,6 +45,8 @@ #include "math/exp10.h" #include "math/exp10f.h" #include "math/exp10f16.h" +#include "math/exp10m1f.h" +#include "math/exp10m1f16.h" #include "math/expf.h" #include "math/expf16.h" #include "math/frexpf.h" diff --git a/libc/shared/math/exp10m1f.h b/libc/shared/math/exp10m1f.h new file mode 100644 index 0000000000000..9093705ce801b --- /dev/null +++ b/libc/shared/math/exp10m1f.h @@ -0,0 +1,23 @@ +//===-- Shared exp10m1f function --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SHARED_MATH_EXP10M1F_H +#define LLVM_LIBC_SHARED_MATH_EXP10M1F_H + +#include "shared/libc_common.h" +#include "src/__support/math/exp10m1f.h" + +namespace LIBC_NAMESPACE_DECL { +namespace shared { + +using math::exp10m1f; + +} // namespace shared +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SHARED_MATH_EXP10M1F_H diff --git a/libc/shared/math/exp10m1f16.h b/libc/shared/math/exp10m1f16.h new file mode 100644 index 0000000000000..5f18f2986207e --- /dev/null +++ b/libc/shared/math/exp10m1f16.h @@ -0,0 +1,29 @@ +//===-- Shared exp10m1f16 function ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SHARED_MATH_EXP10M1F16_H +#define LLVM_LIBC_SHARED_MATH_EXP10M1F16_H + +#include "include/llvm-libc-macros/float16-macros.h" +#include "shared/libc_common.h" + +#ifdef LIBC_TYPES_HAS_FLOAT16 + +#include "src/__support/math/exp10m1f16.h" + +namespace LIBC_NAMESPACE_DECL { +namespace shared { + +using math::exp10m1f16; + +} // namespace shared +} // namespace LIBC_NAMESPACE_DECL + +#endif // LIBC_TYPES_HAS_FLOAT16 + +#endif // LLVM_LIBC_SHARED_MATH_EXP10M1F16_H diff --git a/libc/src/__support/CPP/simd.h b/libc/src/__support/CPP/simd.h index d2a5b17fa4b9f..422d2f4c8433d 100644 --- a/libc/src/__support/CPP/simd.h +++ b/libc/src/__support/CPP/simd.h @@ -287,34 +287,72 @@ LIBC_INLINE constexpr static T hmax(simd v) { } // Accessor helpers. -template = 0> -LIBC_INLINE T load_unaligned(const void *ptr) { +template +LIBC_INLINE T constexpr static load(const void *ptr, bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); T tmp; - __builtin_memcpy(&tmp, ptr, sizeof(T)); + __builtin_memcpy_inline( + &tmp, reinterpret_cast *>(ptr), sizeof(T)); return tmp; } template = 0> -LIBC_INLINE T load_aligned(const void *ptr) { - return load_unaligned(__builtin_assume_aligned(ptr, alignof(T))); +LIBC_INLINE constexpr static void store(T v, void *ptr, bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + __builtin_memcpy_inline(ptr, &v, sizeof(T)); } template = 0> -LIBC_INLINE T store_unaligned(T v, void *ptr) { - __builtin_memcpy(ptr, &v, sizeof(T)); +LIBC_INLINE constexpr static T +load_masked(simd> mask, const void *ptr, + T passthru = internal::poison(), bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + return __builtin_masked_load( + mask, reinterpret_cast *>(ptr), passthru); } template = 0> -LIBC_INLINE T store_aligned(T v, void *ptr) { - store_unaligned(v, __builtin_assume_aligned(ptr, alignof(T))); +LIBC_INLINE constexpr static void store_masked(simd> mask, + T v, void *ptr, + bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + __builtin_masked_store(mask, v, + reinterpret_cast *>(ptr)); +} +template = 0> +LIBC_INLINE constexpr static T gather(simd> mask, Idx idx, + const void *base, bool aligned = false) { + if (aligned) + base = __builtin_assume_aligned(base, alignof(T)); + return __builtin_masked_gather( + mask, idx, reinterpret_cast *>(base)); +} +template = 0> +LIBC_INLINE constexpr static void scatter(simd> mask, + Idx idx, T v, void *base, + bool aligned = false) { + if (aligned) + base = __builtin_assume_aligned(base, alignof(T)); + __builtin_masked_scatter(mask, idx, v, + reinterpret_cast *>(base)); } template = 0> -LIBC_INLINE T -masked_load(simd> m, void *ptr, - T passthru = internal::poison>()) { - return __builtin_masked_load(m, ptr, passthru); +LIBC_INLINE constexpr static T +expand(simd> mask, const void *ptr, + T passthru = internal::poison(), bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + return __builtin_masked_expand_load( + mask, reinterpret_cast *>(ptr), passthru); } template = 0> -LIBC_INLINE T masked_store(simd> m, T v, void *ptr) { - __builtin_masked_store( - m, v, static_cast(__builtin_assume_aligned(ptr, alignof(T)))); +LIBC_INLINE constexpr static void compress(simd> mask, T v, + void *ptr, bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + __builtin_masked_compress_store( + mask, v, reinterpret_cast *>(ptr)); } // Construction helpers. diff --git a/libc/src/__support/CPP/tuple.h b/libc/src/__support/CPP/tuple.h index cce8e0ef2bfae..fa4fcd08cc04f 100644 --- a/libc/src/__support/CPP/tuple.h +++ b/libc/src/__support/CPP/tuple.h @@ -48,33 +48,33 @@ template LIBC_INLINE constexpr auto tie(Ts &...args) { return tuple(args...); } -template +template LIBC_INLINE constexpr auto &get(tuple &t) { - if constexpr (I == 0) + if constexpr (Idx == 0) return t.get_head(); else - return get(t.get_tail()); + return get(t.get_tail()); } -template +template LIBC_INLINE constexpr const auto &get(const tuple &t) { - if constexpr (I == 0) + if constexpr (Idx == 0) return t.get_head(); else - return get(t.get_tail()); + return get(t.get_tail()); } -template +template LIBC_INLINE constexpr auto &&get(tuple &&t) { - if constexpr (I == 0) + if constexpr (Idx == 0) return static_cast(t.get_head()); else - return get(static_cast &&>(t.get_tail())); + return get(static_cast &&>(t.get_tail())); } -template +template LIBC_INLINE constexpr const auto &&get(const tuple &&t) { - if constexpr (I == 0) + if constexpr (Idx == 0) return static_cast(t.get_head()); else - return get(static_cast &&>(t.get_tail())); + return get(static_cast &&>(t.get_tail())); } template struct tuple_size; @@ -82,21 +82,21 @@ template struct tuple_size> { static constexpr size_t value = sizeof...(Ts); }; -template struct tuple_element; -template -struct tuple_element> - : tuple_element> {}; +template struct tuple_element; +template +struct tuple_element> + : tuple_element> {}; template struct tuple_element<0, tuple> { using type = cpp::remove_cv_t>; }; namespace internal { -template +template LIBC_INLINE constexpr auto tuple_cat(const tuple &a, const tuple &b, - cpp::index_sequence, cpp::index_sequence) { - return tuple(get(a)..., get(b)...); + cpp::index_sequence, cpp::index_sequence) { + return tuple(get(a)..., get(b)...); } template @@ -128,16 +128,16 @@ LIBC_INLINE constexpr auto tuple_cat(const Tuples &...tuples) { namespace std { template struct tuple_size; -template struct tuple_element; +template struct tuple_element; template struct tuple_size> : LIBC_NAMESPACE::cpp::tuple_size> {}; -template -struct tuple_element> - : LIBC_NAMESPACE::cpp::tuple_element> { -}; +template +struct tuple_element> + : LIBC_NAMESPACE::cpp::tuple_element> {}; } // namespace std diff --git a/libc/src/__support/macros/attributes.h b/libc/src/__support/macros/attributes.h index 145aa3b65057c..d5ff028634940 100644 --- a/libc/src/__support/macros/attributes.h +++ b/libc/src/__support/macros/attributes.h @@ -81,4 +81,14 @@ LIBC_THREAD_MODE_EXTERNAL. #define LIBC_HAS_VECTOR_TYPE 0 #endif +#if __has_attribute(no_sanitize) +// Disable regular and hardware-supported ASan for functions that may +// intentionally make out-of-bounds access. Disable TSan as well, as it detects +// out-of-bounds accesses to heap memory. +#define LIBC_NO_SANITIZE_OOB_ACCESS \ + __attribute__((no_sanitize("address", "hwaddress", "thread"))) +#else +#define LIBC_NO_SANITIZE_OOB_ACCESS +#endif + #endif // LLVM_LIBC_SRC___SUPPORT_MACROS_ATTRIBUTES_H diff --git a/libc/src/__support/math/CMakeLists.txt b/libc/src/__support/math/CMakeLists.txt index 12ffa2ab456e7..98f9bb42f91f4 100644 --- a/libc/src/__support/math/CMakeLists.txt +++ b/libc/src/__support/math/CMakeLists.txt @@ -481,6 +481,40 @@ add_header_library( libc.src.__support.FPUtil.generic.sqrt ) +add_header_library( + exp10m1f + HDRS + exp10m1f.h + DEPENDS + .exp10f_utils + libc.src.errno.errno + libc.src.__support.common + libc.src.__support.FPUtil.except_value_utils + libc.src.__support.FPUtil.fenv_impl + libc.src.__support.FPUtil.fp_bits + libc.src.__support.FPUtil.multiply_add + libc.src.__support.FPUtil.polyeval + libc.src.__support.FPUtil.rounding_mode + libc.src.__support.macros.optimization +) + +add_header_library( + exp10m1f16 + HDRS + exp10m1f16.h + DEPENDS + .exp10f16_utils + libc.src.__support.FPUtil.cast + libc.src.__support.FPUtil.except_value_utils + libc.src.__support.FPUtil.fenv_impl + libc.src.__support.FPUtil.fp_bits + libc.src.__support.FPUtil.multiply_add + libc.src.__support.FPUtil.polyeval + libc.src.__support.FPUtil.rounding_mode + libc.src.__support.macros.optimization + libc.src.__support.macros.properties.cpu_features +) + add_header_library( erff HDRS diff --git a/libc/src/__support/math/exp10m1f.h b/libc/src/__support/math/exp10m1f.h new file mode 100644 index 0000000000000..9fe4ff774ec68 --- /dev/null +++ b/libc/src/__support/math/exp10m1f.h @@ -0,0 +1,234 @@ +//===-- Implementation header for exp10m1f ----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F_H +#define LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F_H + +#include "exp10f_utils.h" +#include "src/__support/FPUtil/FEnvImpl.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/PolyEval.h" +#include "src/__support/FPUtil/except_value_utils.h" +#include "src/__support/FPUtil/multiply_add.h" +#include "src/__support/FPUtil/rounding_mode.h" +#include "src/__support/common.h" +#include "src/__support/libc_errno.h" +#include "src/__support/macros/config.h" +#include "src/__support/macros/optimization.h" + +namespace LIBC_NAMESPACE_DECL { + +namespace math { + +namespace exp10m1f_internal { + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +static constexpr size_t N_EXCEPTS_LO = 11; + +static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_LO = + {{ + // x = 0x1.0fe54ep-11, exp10m1f(x) = 0x1.3937eep-10 (RZ) + {0x3a07'f2a7U, 0x3a9c'9bf7U, 1U, 0U, 1U}, + // x = 0x1.80e6eap-11, exp10m1f(x) = 0x1.bb8272p-10 (RZ) + {0x3a40'7375U, 0x3add'c139U, 1U, 0U, 1U}, + // x = -0x1.2a33bcp-51, exp10m1f(x) = -0x1.57515ep-50 (RZ) + {0xa615'19deU, 0xa6ab'a8afU, 0U, 1U, 0U}, + // x = -0x0p+0, exp10m1f(x) = -0x0p+0 (RZ) + {0x8000'0000U, 0x8000'0000U, 0U, 0U, 0U}, + // x = -0x1.b59e08p-31, exp10m1f(x) = -0x1.f7d356p-30 (RZ) + {0xb05a'cf04U, 0xb0fb'e9abU, 0U, 1U, 1U}, + // x = -0x1.bf342p-12, exp10m1f(x) = -0x1.014e02p-10 (RZ) + {0xb9df'9a10U, 0xba80'a701U, 0U, 1U, 0U}, + // x = -0x1.6207fp-11, exp10m1f(x) = -0x1.9746cap-10 (RZ) + {0xba31'03f8U, 0xbacb'a365U, 0U, 1U, 1U}, + // x = -0x1.bd0c66p-11, exp10m1f(x) = -0x1.ffe168p-10 (RZ) + {0xba5e'8633U, 0xbaff'f0b4U, 0U, 1U, 1U}, + // x = -0x1.ffd84cp-10, exp10m1f(x) = -0x1.25faf2p-8 (RZ) + {0xbaff'ec26U, 0xbb92'fd79U, 0U, 1U, 0U}, + // x = -0x1.a74172p-9, exp10m1f(x) = -0x1.e57be2p-8 (RZ) + {0xbb53'a0b9U, 0xbbf2'bdf1U, 0U, 1U, 1U}, + // x = -0x1.cb694cp-9, exp10m1f(x) = -0x1.0764e4p-7 (RZ) + {0xbb65'b4a6U, 0xbc03'b272U, 0U, 1U, 0U}, + }}; + +static constexpr size_t N_EXCEPTS_HI = 19; + +static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_HI = + {{ + // (input, RZ output, RU offset, RD offset, RN offset) + // x = 0x1.8d31eep-8, exp10m1f(x) = 0x1.cc7e4cp-7 (RZ) + {0x3bc6'98f7U, 0x3c66'3f26U, 1U, 0U, 1U}, + // x = 0x1.915fcep-8, exp10m1f(x) = 0x1.d15f72p-7 (RZ) + {0x3bc8'afe7U, 0x3c68'afb9U, 1U, 0U, 0U}, + // x = 0x1.bcf982p-8, exp10m1f(x) = 0x1.022928p-6 (RZ) + {0x3bde'7cc1U, 0x3c81'1494U, 1U, 0U, 1U}, + // x = 0x1.99ff0ap-7, exp10m1f(x) = 0x1.dee416p-6 (RZ) + {0x3c4c'ff85U, 0x3cef'720bU, 1U, 0U, 0U}, + // x = 0x1.75ea14p-6, exp10m1f(x) = 0x1.b9ff16p-5 (RZ) + {0x3cba'f50aU, 0x3d5c'ff8bU, 1U, 0U, 0U}, + // x = 0x1.f81b64p-6, exp10m1f(x) = 0x1.2cb6bcp-4 (RZ) + {0x3cfc'0db2U, 0x3d96'5b5eU, 1U, 0U, 0U}, + // x = 0x1.fafecp+3, exp10m1f(x) = 0x1.8c880ap+52 (RZ) + {0x417d'7f60U, 0x59c6'4405U, 1U, 0U, 0U}, + // x = -0x1.3bf094p-8, exp10m1f(x) = -0x1.69ba4ap-7 (RZ) + {0xbb9d'f84aU, 0xbc34'dd25U, 0U, 1U, 0U}, + // x = -0x1.4558bcp-8, exp10m1f(x) = -0x1.746fb8p-7 (RZ) + {0xbba2'ac5eU, 0xbc3a'37dcU, 0U, 1U, 1U}, + // x = -0x1.4bb43p-8, exp10m1f(x) = -0x1.7babe4p-7 (RZ) + {0xbba5'da18U, 0xbc3d'd5f2U, 0U, 1U, 1U}, + // x = -0x1.776cc8p-8, exp10m1f(x) = -0x1.ad62c4p-7 (RZ) + {0xbbbb'b664U, 0xbc56'b162U, 0U, 1U, 0U}, + // x = -0x1.f024cp-8, exp10m1f(x) = -0x1.1b20d6p-6 (RZ) + {0xbbf8'1260U, 0xbc8d'906bU, 0U, 1U, 1U}, + // x = -0x1.f510eep-8, exp10m1f(x) = -0x1.1de9aap-6 (RZ) + {0xbbfa'8877U, 0xbc8e'f4d5U, 0U, 1U, 0U}, + // x = -0x1.0b43c4p-7, exp10m1f(x) = -0x1.30d418p-6 (RZ) + {0xbc05'a1e2U, 0xbc98'6a0cU, 0U, 1U, 0U}, + // x = -0x1.245ee4p-7, exp10m1f(x) = -0x1.4d2b86p-6 (RZ) + {0xbc12'2f72U, 0xbca6'95c3U, 0U, 1U, 0U}, + // x = -0x1.f9f2dap-7, exp10m1f(x) = -0x1.1e2186p-5 (RZ) + {0xbc7c'f96dU, 0xbd0f'10c3U, 0U, 1U, 0U}, + // x = -0x1.08e42p-6, exp10m1f(x) = -0x1.2b5c4p-5 (RZ) + {0xbc84'7210U, 0xbd15'ae20U, 0U, 1U, 1U}, + // x = -0x1.0cdc44p-5, exp10m1f(x) = -0x1.2a2152p-4 (RZ) + {0xbd06'6e22U, 0xbd95'10a9U, 0U, 1U, 1U}, + // x = -0x1.ca4322p-5, exp10m1f(x) = -0x1.ef073p-4 (RZ) + {0xbd65'2191U, 0xbdf7'8398U, 0U, 1U, 1U}, + }}; +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + +} // namespace exp10m1f_internal + +LIBC_INLINE static constexpr float exp10m1f(float x) { + using namespace exp10m1f_internal; + using FPBits = fputil::FPBits; + FPBits xbits(x); + + uint32_t x_u = xbits.uintval(); + uint32_t x_abs = x_u & 0x7fff'ffffU; + + // When x >= log10(2^128), or x is nan + if (LIBC_UNLIKELY(xbits.is_pos() && x_u >= 0x421a'209bU)) { + if (xbits.is_finite()) { + int rounding = fputil::quick_get_round(); + if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO) + return FPBits::max_normal().get_val(); + + fputil::set_errno_if_required(ERANGE); + fputil::raise_except_if_required(FE_OVERFLOW); + } + + // x >= log10(2^128) and 10^x - 1 rounds to +inf, or x is +inf or nan + return x + FPBits::inf().get_val(); + } + + // When |x| <= log10(2) * 2^(-6) + if (LIBC_UNLIKELY(x_abs <= 0x3b9a'209bU)) { +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + if (auto r = EXP10M1F_EXCEPTS_LO.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + double dx = x; + double dx_sq = dx * dx; + double c0 = dx * Exp10Base::COEFFS[0]; + double c1 = + fputil::multiply_add(dx, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); + double c2 = + fputil::multiply_add(dx, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); + // 10^dx - 1 ~ (1 + COEFFS[0] * dx + ... + COEFFS[4] * dx^5) - 1 + // = COEFFS[0] * dx + ... + COEFFS[4] * dx^5 + return static_cast(fputil::polyeval(dx_sq, c0, c1, c2)); + } + + // When x <= log10(2^-25), or x is nan + if (LIBC_UNLIKELY(x_u >= 0xc0f0d2f1)) { + // exp10m1(-inf) = -1 + if (xbits.is_inf()) + return -1.0f; + // exp10m1(nan) = nan + if (xbits.is_nan()) + return x; + + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TOWARDZERO || + (rounding == FE_TONEAREST && x_u == 0xc0f0d2f1)) + return -0x1.ffff'fep-1f; // -1.0f + 0x1.0p-24f + + fputil::set_errno_if_required(ERANGE); + fputil::raise_except_if_required(FE_UNDERFLOW); + return -1.0f; + } + + // Exact outputs when x = 1, 2, ..., 10. + // Quick check mask: 0x800f'ffffU = ~(bits of 1.0f | ... | bits of 10.0f) + if (LIBC_UNLIKELY((x_u & 0x800f'ffffU) == 0)) { + switch (x_u) { + case 0x3f800000U: // x = 1.0f + return 9.0f; + case 0x40000000U: // x = 2.0f + return 99.0f; + case 0x40400000U: // x = 3.0f + return 999.0f; + case 0x40800000U: // x = 4.0f + return 9'999.0f; + case 0x40a00000U: // x = 5.0f + return 99'999.0f; + case 0x40c00000U: // x = 6.0f + return 999'999.0f; + case 0x40e00000U: // x = 7.0f + return 9'999'999.0f; + case 0x41000000U: { // x = 8.0f + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TONEAREST) + return 100'000'000.0f; + return 99'999'992.0f; + } + case 0x41100000U: { // x = 9.0f + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TONEAREST) + return 1'000'000'000.0f; + return 999'999'936.0f; + } + case 0x41200000U: { // x = 10.0f + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TONEAREST) + return 10'000'000'000.0f; + return 9'999'998'976.0f; + } + } + } + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + if (auto r = EXP10M1F_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + // Range reduction: 10^x = 2^(mid + hi) * 10^lo + // rr = (2^(mid + hi), lo) + auto rr = exp_b_range_reduc(x); + + // The low part is approximated by a degree-5 minimax polynomial. + // 10^lo ~ 1 + COEFFS[0] * lo + ... + COEFFS[4] * lo^5 + double lo_sq = rr.lo * rr.lo; + double c0 = fputil::multiply_add(rr.lo, Exp10Base::COEFFS[0], 1.0); + double c1 = + fputil::multiply_add(rr.lo, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); + double c2 = + fputil::multiply_add(rr.lo, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); + double exp10_lo = fputil::polyeval(lo_sq, c0, c1, c2); + // 10^x - 1 = 2^(mid + hi) * 10^lo - 1 + // ~ mh * exp10_lo - 1 + return static_cast(fputil::multiply_add(exp10_lo, rr.mh, -1.0)); +} + +} // namespace math + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F_H diff --git a/libc/src/__support/math/exp10m1f16.h b/libc/src/__support/math/exp10m1f16.h new file mode 100644 index 0000000000000..6367a857fa98a --- /dev/null +++ b/libc/src/__support/math/exp10m1f16.h @@ -0,0 +1,185 @@ +//===-- Implementation header for exp10m1f16 --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F16_H +#define LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F16_H + +#include "include/llvm-libc-macros/float16-macros.h" + +#ifdef LIBC_TYPES_HAS_FLOAT16 + +#include "exp10f16_utils.h" +#include "src/__support/FPUtil/FEnvImpl.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/PolyEval.h" +#include "src/__support/FPUtil/cast.h" +#include "src/__support/FPUtil/except_value_utils.h" +#include "src/__support/FPUtil/multiply_add.h" +#include "src/__support/FPUtil/rounding_mode.h" +#include "src/__support/common.h" +#include "src/__support/macros/config.h" +#include "src/__support/macros/optimization.h" +#include "src/__support/macros/properties/cpu_features.h" + +namespace LIBC_NAMESPACE_DECL { + +namespace math { + +LIBC_INLINE static constexpr float16 exp10m1f16(float16 x) { + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + constexpr fputil::ExceptValues EXP10M1F16_EXCEPTS_LO = {{ + // (input, RZ output, RU offset, RD offset, RN offset) + // x = 0x1.5c4p-4, exp10m1f16(x) = 0x1.bacp-3 (RZ) + {0x2d71U, 0x32ebU, 1U, 0U, 0U}, + // x = -0x1.5ep-13, exp10m1f16(x) = -0x1.92cp-12 (RZ) + {0x8978U, 0x8e4bU, 0U, 1U, 0U}, + // x = -0x1.e2p-10, exp10m1f16(x) = -0x1.14cp-8 (RZ) + {0x9788U, 0x9c53U, 0U, 1U, 0U}, + }}; + +#ifdef LIBC_TARGET_CPU_HAS_FMA_FLOAT + constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 3; +#else + constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 6; +#endif + + constexpr fputil::ExceptValues + EXP10M1F16_EXCEPTS_HI = {{ + // (input, RZ output, RU offset, RD offset, RN offset) + // x = 0x1.8f4p-2, exp10m1f16(x) = 0x1.744p+0 (RZ) + {0x363dU, 0x3dd1U, 1U, 0U, 0U}, + // x = 0x1.95cp-2, exp10m1f16(x) = 0x1.7d8p+0 (RZ) + {0x3657U, 0x3df6U, 1U, 0U, 0U}, + // x = 0x1.d04p-2, exp10m1f16(x) = 0x1.d7p+0 (RZ) + {0x3741U, 0x3f5cU, 1U, 0U, 1U}, +#ifndef LIBC_TARGET_CPU_HAS_FMA_FLOAT + // x = 0x1.0cp+1, exp10m1f16(x) = 0x1.ec4p+6 (RZ) + {0x4030U, 0x57b1U, 1U, 0U, 1U}, + // x = 0x1.1b8p+1, exp10m1f16(x) = 0x1.45cp+7 (RZ) + {0x406eU, 0x5917U, 1U, 0U, 1U}, + // x = 0x1.2f4p+2, exp10m1f16(x) = 0x1.ab8p+15 (RZ) + {0x44bdU, 0x7aaeU, 1U, 0U, 1U}, +#endif + }}; +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + using FPBits = fputil::FPBits; + FPBits x_bits(x); + + uint16_t x_u = x_bits.uintval(); + uint16_t x_abs = x_u & 0x7fffU; + + // When |x| <= 2^(-3), or |x| >= 11 * log10(2), or x is NaN. + if (LIBC_UNLIKELY(x_abs <= 0x3000U || x_abs >= 0x429fU)) { + // exp10m1(NaN) = NaN + if (x_bits.is_nan()) { + if (x_bits.is_signaling_nan()) { + fputil::raise_except_if_required(FE_INVALID); + return FPBits::quiet_nan().get_val(); + } + + return x; + } + + // When x >= 16 * log10(2). + if (x_u >= 0x44d1U && x_bits.is_pos()) { + // exp10m1(+inf) = +inf + if (x_bits.is_inf()) + return FPBits::inf().get_val(); + + switch (fputil::quick_get_round()) { + case FE_TONEAREST: + case FE_UPWARD: + fputil::set_errno_if_required(ERANGE); + fputil::raise_except_if_required(FE_OVERFLOW | FE_INEXACT); + return FPBits::inf().get_val(); + default: + return FPBits::max_normal().get_val(); + } + } + + // When x < -11 * log10(2). + if (x_u > 0xc29fU) { + // exp10m1(-inf) = -1 + if (x_bits.is_inf()) + return FPBits::one(Sign::NEG).get_val(); + + // When x >= -0x1.ce4p+1, round(10^x - 1, HP, RN) = -0x1.ffcp-1. + if (x_u <= 0xc339U) { + return fputil::round_result_slightly_down( + fputil::cast(-0x1.ffcp-1)); + } + + // When x < -0x1.ce4p+1, round(10^x - 1, HP, RN) = -1. + switch (fputil::quick_get_round()) { + case FE_TONEAREST: + case FE_DOWNWARD: + return FPBits::one(Sign::NEG).get_val(); + default: + return fputil::cast(-0x1.ffcp-1); + } + } + + // When |x| <= 2^(-3). + if (x_abs <= 0x3000U) { + if (LIBC_UNLIKELY(x_abs == 0)) + return x; + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + if (auto r = EXP10M1F16_EXCEPTS_LO.lookup(x_u); + LIBC_UNLIKELY(r.has_value())) + return r.value(); +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + float xf = x; + // Degree-5 minimax polynomial generated by Sollya with the following + // commands: + // > display = hexadecimal; + // > P = fpminimax((10^x - 1)/x, 4, [|SG...|], [-2^-3, 2^-3]); + // > x * P; + return fputil::cast( + xf * fputil::polyeval(xf, 0x1.26bb1cp+1f, 0x1.5351c8p+1f, + 0x1.04704p+1f, 0x1.2ce084p+0f, 0x1.14a6bep-1f)); + } + } + + // When x is 1, 2, or 3. These are hard-to-round cases with exact results. + // 10^4 - 1 = 9'999 is not exactly representable as a float16, but luckily the + // polynomial approximation gives the correct result for x = 4 in all + // rounding modes. + if (LIBC_UNLIKELY((x_u & ~(0x3c00U | 0x4000U | 0x4200U | 0x4400U)) == 0)) { + switch (x_u) { + case 0x3c00U: // x = 1.0f16 + return fputil::cast(9.0); + case 0x4000U: // x = 2.0f16 + return fputil::cast(99.0); + case 0x4200U: // x = 3.0f16 + return fputil::cast(999.0); + } + } + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + if (auto r = EXP10M1F16_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + // exp10(x) = exp2((hi + mid) * log2(10)) * exp10(lo) + auto [exp2_hi_mid, exp10_lo] = exp10_range_reduction(x); + // exp10m1(x) = exp2((hi + mid) * log2(lo)) * exp10(lo) - 1 + return fputil::cast( + fputil::multiply_add(exp2_hi_mid, exp10_lo, -1.0f)); +} + +} // namespace math + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LIBC_TYPES_HAS_FLOAT16 + +#endif // LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F16_H diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt index a6f400c873b7e..3c7e99f4a9c46 100644 --- a/libc/src/math/CMakeLists.txt +++ b/libc/src/math/CMakeLists.txt @@ -392,6 +392,7 @@ add_math_entrypoint_object(log2f16) add_math_entrypoint_object(log) add_math_entrypoint_object(logf) add_math_entrypoint_object(logf16) +add_math_entrypoint_object(log_bf16) add_math_entrypoint_object(logb) add_math_entrypoint_object(logbf) diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 0830c7a72a6ec..99c1b08326d53 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -1593,16 +1593,7 @@ add_entrypoint_object( HDRS ../exp10m1f.h DEPENDS - libc.src.errno.errno - libc.src.__support.common - libc.src.__support.FPUtil.except_value_utils - libc.src.__support.FPUtil.fenv_impl - libc.src.__support.FPUtil.fp_bits - libc.src.__support.FPUtil.multiply_add - libc.src.__support.FPUtil.polyeval - libc.src.__support.FPUtil.rounding_mode - libc.src.__support.macros.optimization - libc.src.__support.math.exp10f_utils + libc.src.__support.math.exp10m1f ) add_entrypoint_object( @@ -1612,18 +1603,7 @@ add_entrypoint_object( HDRS ../exp10m1f16.h DEPENDS - libc.hdr.errno_macros - libc.hdr.fenv_macros - libc.src.__support.FPUtil.cast - libc.src.__support.FPUtil.except_value_utils - libc.src.__support.FPUtil.fenv_impl - libc.src.__support.FPUtil.fp_bits - libc.src.__support.FPUtil.multiply_add - libc.src.__support.FPUtil.polyeval - libc.src.__support.FPUtil.rounding_mode - libc.src.__support.macros.optimization - libc.src.__support.macros.properties.cpu_features - libc.src.__support.math.exp10f16_utils + libc.src.__support.math.exp10m1f16 ) add_entrypoint_object( @@ -2261,6 +2241,7 @@ add_entrypoint_object( libc.src.__support.FPUtil.multiply_add libc.src.__support.FPUtil.polyeval libc.src.__support.macros.optimization + libc.src.__support.macros.properties.cpu_features ) add_entrypoint_object( @@ -2283,6 +2264,22 @@ add_entrypoint_object( libc.src.__support.math.expxf16_utils ) +add_entrypoint_object( + log_bf16 + SRCS + log_bf16.cpp + HDRS + ../log_bf16.h + DEPENDS + libc.src.__support.common + libc.src.__support.FPUtil.bfloat16 + libc.src.__support.FPUtil.cast + libc.src.__support.FPUtil.fp_bits + libc.src.__support.FPUtil.multiply_add + libc.src.__support.macros.config + libc.src.__support.macros.optimization +) + add_entrypoint_object( logb SRCS diff --git a/libc/src/math/generic/exp10m1f.cpp b/libc/src/math/generic/exp10m1f.cpp index 8589e3fb6639d..87980b7753b40 100644 --- a/libc/src/math/generic/exp10m1f.cpp +++ b/libc/src/math/generic/exp10m1f.cpp @@ -7,215 +7,10 @@ //===----------------------------------------------------------------------===// #include "src/math/exp10m1f.h" -#include "src/__support/FPUtil/FEnvImpl.h" -#include "src/__support/FPUtil/FPBits.h" -#include "src/__support/FPUtil/PolyEval.h" -#include "src/__support/FPUtil/except_value_utils.h" -#include "src/__support/FPUtil/multiply_add.h" -#include "src/__support/FPUtil/rounding_mode.h" -#include "src/__support/common.h" -#include "src/__support/libc_errno.h" -#include "src/__support/macros/config.h" -#include "src/__support/macros/optimization.h" -#include "src/__support/math/exp10f_utils.h" +#include "src/__support/math/exp10m1f.h" namespace LIBC_NAMESPACE_DECL { -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS -static constexpr size_t N_EXCEPTS_LO = 11; - -static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_LO = - {{ - // x = 0x1.0fe54ep-11, exp10m1f(x) = 0x1.3937eep-10 (RZ) - {0x3a07'f2a7U, 0x3a9c'9bf7U, 1U, 0U, 1U}, - // x = 0x1.80e6eap-11, exp10m1f(x) = 0x1.bb8272p-10 (RZ) - {0x3a40'7375U, 0x3add'c139U, 1U, 0U, 1U}, - // x = -0x1.2a33bcp-51, exp10m1f(x) = -0x1.57515ep-50 (RZ) - {0xa615'19deU, 0xa6ab'a8afU, 0U, 1U, 0U}, - // x = -0x0p+0, exp10m1f(x) = -0x0p+0 (RZ) - {0x8000'0000U, 0x8000'0000U, 0U, 0U, 0U}, - // x = -0x1.b59e08p-31, exp10m1f(x) = -0x1.f7d356p-30 (RZ) - {0xb05a'cf04U, 0xb0fb'e9abU, 0U, 1U, 1U}, - // x = -0x1.bf342p-12, exp10m1f(x) = -0x1.014e02p-10 (RZ) - {0xb9df'9a10U, 0xba80'a701U, 0U, 1U, 0U}, - // x = -0x1.6207fp-11, exp10m1f(x) = -0x1.9746cap-10 (RZ) - {0xba31'03f8U, 0xbacb'a365U, 0U, 1U, 1U}, - // x = -0x1.bd0c66p-11, exp10m1f(x) = -0x1.ffe168p-10 (RZ) - {0xba5e'8633U, 0xbaff'f0b4U, 0U, 1U, 1U}, - // x = -0x1.ffd84cp-10, exp10m1f(x) = -0x1.25faf2p-8 (RZ) - {0xbaff'ec26U, 0xbb92'fd79U, 0U, 1U, 0U}, - // x = -0x1.a74172p-9, exp10m1f(x) = -0x1.e57be2p-8 (RZ) - {0xbb53'a0b9U, 0xbbf2'bdf1U, 0U, 1U, 1U}, - // x = -0x1.cb694cp-9, exp10m1f(x) = -0x1.0764e4p-7 (RZ) - {0xbb65'b4a6U, 0xbc03'b272U, 0U, 1U, 0U}, - }}; - -static constexpr size_t N_EXCEPTS_HI = 19; - -static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_HI = - {{ - // (input, RZ output, RU offset, RD offset, RN offset) - // x = 0x1.8d31eep-8, exp10m1f(x) = 0x1.cc7e4cp-7 (RZ) - {0x3bc6'98f7U, 0x3c66'3f26U, 1U, 0U, 1U}, - // x = 0x1.915fcep-8, exp10m1f(x) = 0x1.d15f72p-7 (RZ) - {0x3bc8'afe7U, 0x3c68'afb9U, 1U, 0U, 0U}, - // x = 0x1.bcf982p-8, exp10m1f(x) = 0x1.022928p-6 (RZ) - {0x3bde'7cc1U, 0x3c81'1494U, 1U, 0U, 1U}, - // x = 0x1.99ff0ap-7, exp10m1f(x) = 0x1.dee416p-6 (RZ) - {0x3c4c'ff85U, 0x3cef'720bU, 1U, 0U, 0U}, - // x = 0x1.75ea14p-6, exp10m1f(x) = 0x1.b9ff16p-5 (RZ) - {0x3cba'f50aU, 0x3d5c'ff8bU, 1U, 0U, 0U}, - // x = 0x1.f81b64p-6, exp10m1f(x) = 0x1.2cb6bcp-4 (RZ) - {0x3cfc'0db2U, 0x3d96'5b5eU, 1U, 0U, 0U}, - // x = 0x1.fafecp+3, exp10m1f(x) = 0x1.8c880ap+52 (RZ) - {0x417d'7f60U, 0x59c6'4405U, 1U, 0U, 0U}, - // x = -0x1.3bf094p-8, exp10m1f(x) = -0x1.69ba4ap-7 (RZ) - {0xbb9d'f84aU, 0xbc34'dd25U, 0U, 1U, 0U}, - // x = -0x1.4558bcp-8, exp10m1f(x) = -0x1.746fb8p-7 (RZ) - {0xbba2'ac5eU, 0xbc3a'37dcU, 0U, 1U, 1U}, - // x = -0x1.4bb43p-8, exp10m1f(x) = -0x1.7babe4p-7 (RZ) - {0xbba5'da18U, 0xbc3d'd5f2U, 0U, 1U, 1U}, - // x = -0x1.776cc8p-8, exp10m1f(x) = -0x1.ad62c4p-7 (RZ) - {0xbbbb'b664U, 0xbc56'b162U, 0U, 1U, 0U}, - // x = -0x1.f024cp-8, exp10m1f(x) = -0x1.1b20d6p-6 (RZ) - {0xbbf8'1260U, 0xbc8d'906bU, 0U, 1U, 1U}, - // x = -0x1.f510eep-8, exp10m1f(x) = -0x1.1de9aap-6 (RZ) - {0xbbfa'8877U, 0xbc8e'f4d5U, 0U, 1U, 0U}, - // x = -0x1.0b43c4p-7, exp10m1f(x) = -0x1.30d418p-6 (RZ) - {0xbc05'a1e2U, 0xbc98'6a0cU, 0U, 1U, 0U}, - // x = -0x1.245ee4p-7, exp10m1f(x) = -0x1.4d2b86p-6 (RZ) - {0xbc12'2f72U, 0xbca6'95c3U, 0U, 1U, 0U}, - // x = -0x1.f9f2dap-7, exp10m1f(x) = -0x1.1e2186p-5 (RZ) - {0xbc7c'f96dU, 0xbd0f'10c3U, 0U, 1U, 0U}, - // x = -0x1.08e42p-6, exp10m1f(x) = -0x1.2b5c4p-5 (RZ) - {0xbc84'7210U, 0xbd15'ae20U, 0U, 1U, 1U}, - // x = -0x1.0cdc44p-5, exp10m1f(x) = -0x1.2a2152p-4 (RZ) - {0xbd06'6e22U, 0xbd95'10a9U, 0U, 1U, 1U}, - // x = -0x1.ca4322p-5, exp10m1f(x) = -0x1.ef073p-4 (RZ) - {0xbd65'2191U, 0xbdf7'8398U, 0U, 1U, 1U}, - }}; -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - -LLVM_LIBC_FUNCTION(float, exp10m1f, (float x)) { - using FPBits = fputil::FPBits; - FPBits xbits(x); - - uint32_t x_u = xbits.uintval(); - uint32_t x_abs = x_u & 0x7fff'ffffU; - - // When x >= log10(2^128), or x is nan - if (LIBC_UNLIKELY(xbits.is_pos() && x_u >= 0x421a'209bU)) { - if (xbits.is_finite()) { - int rounding = fputil::quick_get_round(); - if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO) - return FPBits::max_normal().get_val(); - - fputil::set_errno_if_required(ERANGE); - fputil::raise_except_if_required(FE_OVERFLOW); - } - - // x >= log10(2^128) and 10^x - 1 rounds to +inf, or x is +inf or nan - return x + FPBits::inf().get_val(); - } - - // When |x| <= log10(2) * 2^(-6) - if (LIBC_UNLIKELY(x_abs <= 0x3b9a'209bU)) { -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS - if (auto r = EXP10M1F_EXCEPTS_LO.lookup(x_u); LIBC_UNLIKELY(r.has_value())) - return r.value(); -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - - double dx = x; - double dx_sq = dx * dx; - double c0 = dx * Exp10Base::COEFFS[0]; - double c1 = - fputil::multiply_add(dx, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); - double c2 = - fputil::multiply_add(dx, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); - // 10^dx - 1 ~ (1 + COEFFS[0] * dx + ... + COEFFS[4] * dx^5) - 1 - // = COEFFS[0] * dx + ... + COEFFS[4] * dx^5 - return static_cast(fputil::polyeval(dx_sq, c0, c1, c2)); - } - - // When x <= log10(2^-25), or x is nan - if (LIBC_UNLIKELY(x_u >= 0xc0f0d2f1)) { - // exp10m1(-inf) = -1 - if (xbits.is_inf()) - return -1.0f; - // exp10m1(nan) = nan - if (xbits.is_nan()) - return x; - - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TOWARDZERO || - (rounding == FE_TONEAREST && x_u == 0xc0f0d2f1)) - return -0x1.ffff'fep-1f; // -1.0f + 0x1.0p-24f - - fputil::set_errno_if_required(ERANGE); - fputil::raise_except_if_required(FE_UNDERFLOW); - return -1.0f; - } - - // Exact outputs when x = 1, 2, ..., 10. - // Quick check mask: 0x800f'ffffU = ~(bits of 1.0f | ... | bits of 10.0f) - if (LIBC_UNLIKELY((x_u & 0x800f'ffffU) == 0)) { - switch (x_u) { - case 0x3f800000U: // x = 1.0f - return 9.0f; - case 0x40000000U: // x = 2.0f - return 99.0f; - case 0x40400000U: // x = 3.0f - return 999.0f; - case 0x40800000U: // x = 4.0f - return 9'999.0f; - case 0x40a00000U: // x = 5.0f - return 99'999.0f; - case 0x40c00000U: // x = 6.0f - return 999'999.0f; - case 0x40e00000U: // x = 7.0f - return 9'999'999.0f; - case 0x41000000U: { // x = 8.0f - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TONEAREST) - return 100'000'000.0f; - return 99'999'992.0f; - } - case 0x41100000U: { // x = 9.0f - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TONEAREST) - return 1'000'000'000.0f; - return 999'999'936.0f; - } - case 0x41200000U: { // x = 10.0f - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TONEAREST) - return 10'000'000'000.0f; - return 9'999'998'976.0f; - } - } - } - -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS - if (auto r = EXP10M1F_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value())) - return r.value(); -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - - // Range reduction: 10^x = 2^(mid + hi) * 10^lo - // rr = (2^(mid + hi), lo) - auto rr = exp_b_range_reduc(x); - - // The low part is approximated by a degree-5 minimax polynomial. - // 10^lo ~ 1 + COEFFS[0] * lo + ... + COEFFS[4] * lo^5 - double lo_sq = rr.lo * rr.lo; - double c0 = fputil::multiply_add(rr.lo, Exp10Base::COEFFS[0], 1.0); - double c1 = - fputil::multiply_add(rr.lo, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); - double c2 = - fputil::multiply_add(rr.lo, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); - double exp10_lo = fputil::polyeval(lo_sq, c0, c1, c2); - // 10^x - 1 = 2^(mid + hi) * 10^lo - 1 - // ~ mh * exp10_lo - 1 - return static_cast(fputil::multiply_add(exp10_lo, rr.mh, -1.0)); -} +LLVM_LIBC_FUNCTION(float, exp10m1f, (float x)) { return math::exp10m1f(x); } } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/math/generic/exp10m1f16.cpp b/libc/src/math/generic/exp10m1f16.cpp index 6c2fdbea418df..8a3c4abf1f10e 100644 --- a/libc/src/math/generic/exp10m1f16.cpp +++ b/libc/src/math/generic/exp10m1f16.cpp @@ -7,166 +7,12 @@ //===----------------------------------------------------------------------===// #include "src/math/exp10m1f16.h" -#include "hdr/errno_macros.h" -#include "hdr/fenv_macros.h" -#include "src/__support/FPUtil/FEnvImpl.h" -#include "src/__support/FPUtil/FPBits.h" -#include "src/__support/FPUtil/PolyEval.h" -#include "src/__support/FPUtil/cast.h" -#include "src/__support/FPUtil/except_value_utils.h" -#include "src/__support/FPUtil/multiply_add.h" -#include "src/__support/FPUtil/rounding_mode.h" -#include "src/__support/common.h" -#include "src/__support/macros/config.h" -#include "src/__support/macros/optimization.h" -#include "src/__support/macros/properties/cpu_features.h" -#include "src/__support/math/exp10f16_utils.h" +#include "src/__support/math/exp10m1f16.h" namespace LIBC_NAMESPACE_DECL { -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS -static constexpr fputil::ExceptValues EXP10M1F16_EXCEPTS_LO = {{ - // (input, RZ output, RU offset, RD offset, RN offset) - // x = 0x1.5c4p-4, exp10m1f16(x) = 0x1.bacp-3 (RZ) - {0x2d71U, 0x32ebU, 1U, 0U, 0U}, - // x = -0x1.5ep-13, exp10m1f16(x) = -0x1.92cp-12 (RZ) - {0x8978U, 0x8e4bU, 0U, 1U, 0U}, - // x = -0x1.e2p-10, exp10m1f16(x) = -0x1.14cp-8 (RZ) - {0x9788U, 0x9c53U, 0U, 1U, 0U}, -}}; - -#ifdef LIBC_TARGET_CPU_HAS_FMA_FLOAT -static constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 3; -#else -static constexpr size_t N_EXP10M1F16_EXCEPTS_HI = 6; -#endif - -static constexpr fputil::ExceptValues - EXP10M1F16_EXCEPTS_HI = {{ - // (input, RZ output, RU offset, RD offset, RN offset) - // x = 0x1.8f4p-2, exp10m1f16(x) = 0x1.744p+0 (RZ) - {0x363dU, 0x3dd1U, 1U, 0U, 0U}, - // x = 0x1.95cp-2, exp10m1f16(x) = 0x1.7d8p+0 (RZ) - {0x3657U, 0x3df6U, 1U, 0U, 0U}, - // x = 0x1.d04p-2, exp10m1f16(x) = 0x1.d7p+0 (RZ) - {0x3741U, 0x3f5cU, 1U, 0U, 1U}, -#ifndef LIBC_TARGET_CPU_HAS_FMA_FLOAT - // x = 0x1.0cp+1, exp10m1f16(x) = 0x1.ec4p+6 (RZ) - {0x4030U, 0x57b1U, 1U, 0U, 1U}, - // x = 0x1.1b8p+1, exp10m1f16(x) = 0x1.45cp+7 (RZ) - {0x406eU, 0x5917U, 1U, 0U, 1U}, - // x = 0x1.2f4p+2, exp10m1f16(x) = 0x1.ab8p+15 (RZ) - {0x44bdU, 0x7aaeU, 1U, 0U, 1U}, -#endif - }}; -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - LLVM_LIBC_FUNCTION(float16, exp10m1f16, (float16 x)) { - using FPBits = fputil::FPBits; - FPBits x_bits(x); - - uint16_t x_u = x_bits.uintval(); - uint16_t x_abs = x_u & 0x7fffU; - - // When |x| <= 2^(-3), or |x| >= 11 * log10(2), or x is NaN. - if (LIBC_UNLIKELY(x_abs <= 0x3000U || x_abs >= 0x429fU)) { - // exp10m1(NaN) = NaN - if (x_bits.is_nan()) { - if (x_bits.is_signaling_nan()) { - fputil::raise_except_if_required(FE_INVALID); - return FPBits::quiet_nan().get_val(); - } - - return x; - } - - // When x >= 16 * log10(2). - if (x_u >= 0x44d1U && x_bits.is_pos()) { - // exp10m1(+inf) = +inf - if (x_bits.is_inf()) - return FPBits::inf().get_val(); - - switch (fputil::quick_get_round()) { - case FE_TONEAREST: - case FE_UPWARD: - fputil::set_errno_if_required(ERANGE); - fputil::raise_except_if_required(FE_OVERFLOW | FE_INEXACT); - return FPBits::inf().get_val(); - default: - return FPBits::max_normal().get_val(); - } - } - - // When x < -11 * log10(2). - if (x_u > 0xc29fU) { - // exp10m1(-inf) = -1 - if (x_bits.is_inf()) - return FPBits::one(Sign::NEG).get_val(); - - // When x >= -0x1.ce4p+1, round(10^x - 1, HP, RN) = -0x1.ffcp-1. - if (x_u <= 0xc339U) { - return fputil::round_result_slightly_down( - fputil::cast(-0x1.ffcp-1)); - } - - // When x < -0x1.ce4p+1, round(10^x - 1, HP, RN) = -1. - switch (fputil::quick_get_round()) { - case FE_TONEAREST: - case FE_DOWNWARD: - return FPBits::one(Sign::NEG).get_val(); - default: - return fputil::cast(-0x1.ffcp-1); - } - } - - // When |x| <= 2^(-3). - if (x_abs <= 0x3000U) { - if (LIBC_UNLIKELY(x_abs == 0)) - return x; - -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS - if (auto r = EXP10M1F16_EXCEPTS_LO.lookup(x_u); - LIBC_UNLIKELY(r.has_value())) - return r.value(); -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - - float xf = x; - // Degree-5 minimax polynomial generated by Sollya with the following - // commands: - // > display = hexadecimal; - // > P = fpminimax((10^x - 1)/x, 4, [|SG...|], [-2^-3, 2^-3]); - // > x * P; - return fputil::cast( - xf * fputil::polyeval(xf, 0x1.26bb1cp+1f, 0x1.5351c8p+1f, - 0x1.04704p+1f, 0x1.2ce084p+0f, 0x1.14a6bep-1f)); - } - } - - // When x is 1, 2, or 3. These are hard-to-round cases with exact results. - // 10^4 - 1 = 9'999 is not exactly representable as a float16, but luckily the - // polynomial approximation gives the correct result for x = 4 in all - // rounding modes. - if (LIBC_UNLIKELY((x_u & ~(0x3c00U | 0x4000U | 0x4200U | 0x4400U)) == 0)) { - switch (x_u) { - case 0x3c00U: // x = 1.0f16 - return fputil::cast(9.0); - case 0x4000U: // x = 2.0f16 - return fputil::cast(99.0); - case 0x4200U: // x = 3.0f16 - return fputil::cast(999.0); - } - } - -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS - if (auto r = EXP10M1F16_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value())) - return r.value(); -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - - // exp10(x) = exp2((hi + mid) * log2(10)) * exp10(lo) - auto [exp2_hi_mid, exp10_lo] = exp10_range_reduction(x); - // exp10m1(x) = exp2((hi + mid) * log2(lo)) * exp10(lo) - 1 - return fputil::cast( - fputil::multiply_add(exp2_hi_mid, exp10_lo, -1.0f)); + return math::exp10m1f16(x); } } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/math/generic/log_bf16.cpp b/libc/src/math/generic/log_bf16.cpp new file mode 100644 index 0000000000000..213dccca0fb9e --- /dev/null +++ b/libc/src/math/generic/log_bf16.cpp @@ -0,0 +1,137 @@ +//===-- BFloat16 log(x) function ------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/math/log_bf16.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/bfloat16.h" +#include "src/__support/FPUtil/cast.h" +#include "src/__support/FPUtil/multiply_add.h" +#include "src/__support/common.h" +#include "src/__support/macros/config.h" +#include "src/__support/macros/optimization.h" +#include "src/__support/macros/properties/cpu_features.h" + +namespace LIBC_NAMESPACE_DECL { + +// Generated by Sollya with the following commands: +// > display = hexadecimal; +// > round(log(2), SG, RN); +static constexpr float LOGF_2 = 0x1.62e43p-1f; + +// Generated by Sollya with the following commands: +// > display = hexadecimal; +// > for i from 0 to 127 do print(round(log(1 + i * 2^-7), SG, RN)); +static constexpr float LOG_1_PLUS_M[128] = { + 0x0.0p0f, 0x1.fe02a6p-8f, 0x1.fc0a8cp-7f, 0x1.7b91bp-6f, + 0x1.f829bp-6f, 0x1.39e87cp-5f, 0x1.77459p-5f, 0x1.b42dd8p-5f, + 0x1.f0a30cp-5f, 0x1.16536ep-4f, 0x1.341d7ap-4f, 0x1.51b074p-4f, + 0x1.6f0d28p-4f, 0x1.8c345ep-4f, 0x1.a926d4p-4f, 0x1.c5e548p-4f, + 0x1.e27076p-4f, 0x1.fec914p-4f, 0x1.0d77e8p-3f, 0x1.1b72aep-3f, + 0x1.29553p-3f, 0x1.371fc2p-3f, 0x1.44d2b6p-3f, 0x1.526e5ep-3f, + 0x1.5ff308p-3f, 0x1.6d60fep-3f, 0x1.7ab89p-3f, 0x1.87fa06p-3f, + 0x1.9525aap-3f, 0x1.a23bc2p-3f, 0x1.af3c94p-3f, 0x1.bc2868p-3f, + 0x1.c8ff7cp-3f, 0x1.d5c216p-3f, 0x1.e27076p-3f, 0x1.ef0adcp-3f, + 0x1.fb9186p-3f, 0x1.04025ap-2f, 0x1.0a324ep-2f, 0x1.1058cp-2f, + 0x1.1675cap-2f, 0x1.1c898cp-2f, 0x1.22942p-2f, 0x1.2895a2p-2f, + 0x1.2e8e2cp-2f, 0x1.347ddap-2f, 0x1.3a64c6p-2f, 0x1.404308p-2f, + 0x1.4618bcp-2f, 0x1.4be5fap-2f, 0x1.51aad8p-2f, 0x1.576772p-2f, + 0x1.5d1bdcp-2f, 0x1.62c83p-2f, 0x1.686c82p-2f, 0x1.6e08eap-2f, + 0x1.739d8p-2f, 0x1.792a56p-2f, 0x1.7eaf84p-2f, 0x1.842d1ep-2f, + 0x1.89a338p-2f, 0x1.8f11e8p-2f, 0x1.947942p-2f, 0x1.99d958p-2f, + 0x1.9f323ep-2f, 0x1.a4840ap-2f, 0x1.a9cecap-2f, 0x1.af1294p-2f, + 0x1.b44f78p-2f, 0x1.b9858ap-2f, 0x1.beb4dap-2f, 0x1.c3dd7ap-2f, + 0x1.c8ff7cp-2f, 0x1.ce1afp-2f, 0x1.d32fe8p-2f, 0x1.d83e72p-2f, + 0x1.dd46ap-2f, 0x1.e24882p-2f, 0x1.e74426p-2f, 0x1.ec399ep-2f, + 0x1.f128f6p-2f, 0x1.f6124p-2f, 0x1.faf588p-2f, 0x1.ffd2ep-2f, + 0x1.02552ap-1f, 0x1.04bdfap-1f, 0x1.0723e6p-1f, 0x1.0986f4p-1f, + 0x1.0be72ep-1f, 0x1.0e4498p-1f, 0x1.109f3ap-1f, 0x1.12f71ap-1f, + 0x1.154c3ep-1f, 0x1.179eacp-1f, 0x1.19ee6cp-1f, 0x1.1c3b82p-1f, + 0x1.1e85f6p-1f, 0x1.20cdcep-1f, 0x1.23130ep-1f, 0x1.2555bcp-1f, + 0x1.2795e2p-1f, 0x1.29d38p-1f, 0x1.2c0e9ep-1f, 0x1.2e4744p-1f, + 0x1.307d74p-1f, 0x1.32b134p-1f, 0x1.34e28ap-1f, 0x1.37117cp-1f, + 0x1.393e0ep-1f, 0x1.3b6844p-1f, 0x1.3d9026p-1f, 0x1.3fb5b8p-1f, + 0x1.41d8fep-1f, 0x1.43f9fep-1f, 0x1.4618bcp-1f, 0x1.48353ep-1f, + 0x1.4a4f86p-1f, 0x1.4c679ap-1f, 0x1.4e7d82p-1f, 0x1.50913cp-1f, + 0x1.52a2d2p-1f, 0x1.54b246p-1f, 0x1.56bf9ep-1f, 0x1.58cadcp-1f, + 0x1.5ad404p-1f, 0x1.5cdb1ep-1f, 0x1.5ee02ap-1f, 0x1.60e33p-1f, +}; + +LLVM_LIBC_FUNCTION(bfloat16, log_bf16, (bfloat16 x)) { + using FPBits = fputil::FPBits; + FPBits x_bits(x); + + uint16_t x_u = x_bits.uintval(); + + // If x <= 0, or x is 1, or x is +inf, or x is NaN. + if (LIBC_UNLIKELY(x_u == 0U || x_u == 0x3f80U || x_u >= 0x7f80U)) { + // log(NaN) = NaN + if (x_bits.is_nan()) { + if (x_bits.is_signaling_nan()) { + fputil::raise_except_if_required(FE_INVALID); + return FPBits::quiet_nan().get_val(); + } + + return x; + } + + // log(+/-0) = −inf + if ((x_u & 0x7fffU) == 0U) { + fputil::raise_except_if_required(FE_DIVBYZERO); + return FPBits::inf(Sign::NEG).get_val(); + } + + // log(1) = 0 + if (x_u == 0x3f80U) + return FPBits::zero().get_val(); + + // x < 0 + if (x_u > 0x8000U) { + fputil::set_errno_if_required(EDOM); + fputil::raise_except_if_required(FE_INVALID); + return FPBits::quiet_nan().get_val(); + } + + // log(+inf) = +inf + return FPBits::inf().get_val(); + } + +#ifndef LIBC_TARGET_CPU_HAS_FMA + // log(0.00000000000000171390679426508540927898138761520386) + // ~= -34.00000095 + if (LIBC_UNLIKELY(x_u == 0x26F7U)) + return bfloat16(-34.0000009); +#endif // LIBC_TARGET_CPU_HAS_FMA + + int e = -FPBits::EXP_BIAS; + + // When x is subnormal, normalize it. + if ((x_u & FPBits::EXP_MASK) == 0U) { + // Can't pass an integer to fputil::cast directly. + constexpr float NORMALIZE_EXP = 1U << FPBits::FRACTION_LEN; + x_bits = FPBits(x_bits.get_val() * fputil::cast(NORMALIZE_EXP)); + x_u = x_bits.uintval(); + e -= FPBits::FRACTION_LEN; + } + + // To compute log(x), we perform the following range reduction: + // x = 2^e * (1 + m), + // log(x) = e * log(2) + log(1 + m). + // for BFloat16, mantissa is at most 7 explicit bits, so we lookup + // log(1 + m) in LOG_1_PLUS_M table using `m` as key. + + // Get the 7-bit mantissa directly as the table index + uint16_t m = x_bits.get_mantissa(); + + // Get unbiased exponent + e += x_u >> FPBits::FRACTION_LEN; + + return fputil::cast( + fputil::multiply_add(static_cast(e), LOGF_2, LOG_1_PLUS_M[m])); +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/math/log_bf16.h b/libc/src/math/log_bf16.h new file mode 100644 index 0000000000000..6f6b8e4cc55fe --- /dev/null +++ b/libc/src/math/log_bf16.h @@ -0,0 +1,21 @@ +//===-- Implementation header for BFloat16 log(x) function ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_MATH_LOG_BF16_H +#define LLVM_LIBC_SRC_MATH_LOG_BF16_H + +#include "src/__support/macros/config.h" +#include "src/__support/macros/properties/types.h" + +namespace LIBC_NAMESPACE_DECL { + +bfloat16 log_bf16(bfloat16 x); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_MATH_LOG_BF16_H diff --git a/libc/src/string/CMakeLists.txt b/libc/src/string/CMakeLists.txt index b8cdb2a7d3538..83c956429be24 100644 --- a/libc/src/string/CMakeLists.txt +++ b/libc/src/string/CMakeLists.txt @@ -22,6 +22,7 @@ add_header_library( libc.src.__support.CPP.type_traits libc.src.__support.CPP.simd libc.src.__support.common + libc.src.__support.macros.attributes libc.src.string.memory_utils.inline_memcpy ${string_config_options} ) diff --git a/libc/src/string/memory_utils/aarch64/inline_strlen.h b/libc/src/string/memory_utils/aarch64/inline_strlen.h index 36fd1aa636b54..87f5ccdd56e23 100644 --- a/libc/src/string/memory_utils/aarch64/inline_strlen.h +++ b/libc/src/string/memory_utils/aarch64/inline_strlen.h @@ -17,7 +17,7 @@ namespace LIBC_NAMESPACE_DECL { namespace neon { -[[gnu::no_sanitize_address]] [[maybe_unused]] LIBC_INLINE static size_t +[[maybe_unused]] LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static size_t string_length(const char *src) { using Vector __attribute__((may_alias)) = uint8x8_t; diff --git a/libc/src/string/memory_utils/generic/inline_strlen.h b/libc/src/string/memory_utils/generic/inline_strlen.h index 5e553e301d4da..69700e801bcea 100644 --- a/libc/src/string/memory_utils/generic/inline_strlen.h +++ b/libc/src/string/memory_utils/generic/inline_strlen.h @@ -24,22 +24,22 @@ LIBC_INLINE constexpr cpp::simd_mask shift_mask(cpp::simd_mask m, return cpp::bit_cast>(r); } -[[clang::no_sanitize("address")]] LIBC_INLINE size_t -string_length(const char *src) { +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE size_t string_length(const char *src) { constexpr cpp::simd null_byte = cpp::splat('\0'); size_t alignment = alignof(cpp::simd); const cpp::simd *aligned = reinterpret_cast *>( __builtin_align_down(src, alignment)); - cpp::simd chars = cpp::load_aligned>(aligned); + cpp::simd chars = cpp::load>(aligned, /*aligned=*/true); cpp::simd_mask mask = chars == null_byte; size_t offset = src - reinterpret_cast(aligned); if (cpp::any_of(shift_mask(mask, offset))) return cpp::find_first_set(shift_mask(mask, offset)); for (;;) { - cpp::simd chars = cpp::load_aligned>(++aligned); + cpp::simd chars = cpp::load>(++aligned, + /*aligned=*/true); cpp::simd_mask mask = chars == null_byte; if (cpp::any_of(mask)) return (reinterpret_cast(aligned) - src) + diff --git a/libc/src/string/memory_utils/op_generic.h b/libc/src/string/memory_utils/op_generic.h index 010f2187a4ffd..a86cbd8bcfc72 100644 --- a/libc/src/string/memory_utils/op_generic.h +++ b/libc/src/string/memory_utils/op_generic.h @@ -41,12 +41,22 @@ static_assert((UINTPTR_MAX == 4294967295U) || "We currently only support 32- or 64-bit platforms"); #ifdef LIBC_COMPILER_IS_MSVC - +#ifdef LIBC_TARGET_ARCH_IS_X86 namespace LIBC_NAMESPACE_DECL { using generic_v128 = __m128i; using generic_v256 = __m256i; using generic_v512 = __m512i; } // namespace LIBC_NAMESPACE_DECL +#else +// Special handling when target does not have real vector types. +// We can potentially use uint8x16_t etc. However, MSVC does not provide +// subscript operation. +namespace LIBC_NAMESPACE_DECL { +struct alignas(16) generic_v128 : public cpp::array {}; +struct alignas(32) generic_v256 : public cpp::array {}; +struct alignas(64) generic_v512 : public cpp::array {}; +} // namespace LIBC_NAMESPACE_DECL +#endif #else namespace LIBC_NAMESPACE_DECL { @@ -159,7 +169,8 @@ template struct Memset { LIBC_INLINE static void block(Ptr dst, uint8_t value) { if constexpr (is_scalar_v || is_vector_v) { - store(dst, splat(value)); + // Avoid ambiguous call due to ADL + generic::store(dst, splat(value)); } else if constexpr (is_array_v) { using value_type = typename T::value_type; const auto Splat = splat(value); diff --git a/libc/src/string/memory_utils/x86_64/inline_strlen.h b/libc/src/string/memory_utils/x86_64/inline_strlen.h index 739f8c1aaddbc..9e10d58363393 100644 --- a/libc/src/string/memory_utils/x86_64/inline_strlen.h +++ b/libc/src/string/memory_utils/x86_64/inline_strlen.h @@ -18,12 +18,12 @@ namespace LIBC_NAMESPACE_DECL { namespace string_length_internal { // Return a bit-mask with the nth bit set if the nth-byte in block_ptr is zero. template -[[gnu::no_sanitize_address]] LIBC_INLINE static Mask +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static Mask compare_and_mask(const Vector *block_ptr); template )> -[[gnu::no_sanitize_address]] LIBC_INLINE static size_t +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE static size_t string_length_vector(const char *src) { uintptr_t misalign_bytes = reinterpret_cast(src) % sizeof(Vector); diff --git a/libc/src/string/string_utils.h b/libc/src/string/string_utils.h index 9d636d02f4756..7feef56fb3676 100644 --- a/libc/src/string/string_utils.h +++ b/libc/src/string/string_utils.h @@ -19,6 +19,7 @@ #include "hdr/types/size_t.h" #include "src/__support/CPP/bitset.h" #include "src/__support/CPP/type_traits.h" // cpp::is_same_v +#include "src/__support/macros/attributes.h" #include "src/__support/macros/config.h" #include "src/__support/macros/optimization.h" // LIBC_UNLIKELY #include "src/string/memory_utils/inline_memcpy.h" @@ -119,7 +120,7 @@ template LIBC_INLINE size_t string_length(const T *src) { } template -[[gnu::no_sanitize_address]] LIBC_INLINE void * +LIBC_NO_SANITIZE_OOB_ACCESS LIBC_INLINE void * find_first_character_wide_read(const unsigned char *src, unsigned char ch, size_t n) { const unsigned char *char_ptr = src; diff --git a/libc/src/sys/mman/linux/CMakeLists.txt b/libc/src/sys/mman/linux/CMakeLists.txt index 89a0ad1527a06..7181bb98a187f 100644 --- a/libc/src/sys/mman/linux/CMakeLists.txt +++ b/libc/src/sys/mman/linux/CMakeLists.txt @@ -184,11 +184,10 @@ add_header_library( HDRS shm_common.h DEPENDS + libc.hdr.errno_macros libc.src.__support.CPP.array libc.src.__support.CPP.string_view - libc.src.__support.CPP.optional - libc.src.__support.common - libc.src.errno.errno + libc.src.__support.error_or libc.src.string.memory_utils.inline_memcpy ) @@ -199,8 +198,8 @@ add_entrypoint_object( HDRS ../shm_open.h DEPENDS - libc.src.fcntl.open libc.hdr.types.mode_t + libc.src.errno.errno .shm_common ) @@ -211,6 +210,6 @@ add_entrypoint_object( HDRS ../shm_unlink.h DEPENDS - libc.src.unistd.unlink + libc.src.errno.errno .shm_common ) diff --git a/libc/src/sys/mman/linux/shm_common.h b/libc/src/sys/mman/linux/shm_common.h index 29d1401821e49..9ba8fd1ea100c 100644 --- a/libc/src/sys/mman/linux/shm_common.h +++ b/libc/src/sys/mman/linux/shm_common.h @@ -6,18 +6,13 @@ // //===----------------------------------------------------------------------===// +#include "hdr/errno_macros.h" #include "src/__support/CPP/array.h" -#include "src/__support/CPP/optional.h" #include "src/__support/CPP/string_view.h" -#include "src/__support/libc_errno.h" +#include "src/__support/error_or.h" #include "src/__support/macros/config.h" #include "src/string/memory_utils/inline_memcpy.h" -// TODO: clean this up. -// 1. Change from optional to ErrorOr, and return the errno instead of setting -// it here. -// 2. Replace inline memcpy with __builtin_memcpy - // TODO: Get PATH_MAX via https://github.com/llvm/llvm-project/issues/85121 #include @@ -28,24 +23,18 @@ namespace shm_common { LIBC_INLINE_VAR constexpr cpp::string_view SHM_PREFIX = "/dev/shm/"; using SHMPath = cpp::array; -LIBC_INLINE cpp::optional translate_name(cpp::string_view name) { +LIBC_INLINE ErrorOr translate_name(cpp::string_view name) { // trim leading slashes size_t offset = name.find_first_not_of('/'); - if (offset == cpp::string_view::npos) { - libc_errno = EINVAL; - return cpp::nullopt; - } + if (offset == cpp::string_view::npos) + return Error(EINVAL); name = name.substr(offset); // check the name - if (name.size() > NAME_MAX) { - libc_errno = ENAMETOOLONG; - return cpp::nullopt; - } - if (name == "." || name == ".." || name.contains('/')) { - libc_errno = EINVAL; - return cpp::nullopt; - } + if (name.size() > NAME_MAX) + return Error(ENAMETOOLONG); + if (name == "." || name == ".." || name.contains('/')) + return Error(EINVAL); // prepend the prefix SHMPath buffer; diff --git a/libc/src/sys/mman/linux/shm_open.cpp b/libc/src/sys/mman/linux/shm_open.cpp index 3099062eace98..46231ba1279a8 100644 --- a/libc/src/sys/mman/linux/shm_open.cpp +++ b/libc/src/sys/mman/linux/shm_open.cpp @@ -7,9 +7,11 @@ //===----------------------------------------------------------------------===// #include "src/sys/mman/shm_open.h" + #include "hdr/fcntl_macros.h" #include "hdr/types/mode_t.h" #include "src/__support/OSUtil/fcntl.h" +#include "src/__support/libc_errno.h" #include "src/__support/macros/config.h" #include "src/sys/mman/linux/shm_common.h" @@ -18,17 +20,19 @@ namespace LIBC_NAMESPACE_DECL { static constexpr int DEFAULT_OFLAGS = O_NOFOLLOW | O_CLOEXEC | O_NONBLOCK; LLVM_LIBC_FUNCTION(int, shm_open, (const char *name, int oflags, mode_t mode)) { - if (cpp::optional buffer = - shm_common::translate_name(name)) { - auto result = internal::open(buffer->data(), oflags | DEFAULT_OFLAGS, mode); + auto path_result = shm_common::translate_name(name); + if (!path_result.has_value()) { + libc_errno = path_result.error(); + return -1; + } - if (!result.has_value()) { - libc_errno = result.error(); - return -1; - } - return result.value(); + auto open_result = + internal::open(path_result->data(), oflags | DEFAULT_OFLAGS, mode); + if (!open_result.has_value()) { + libc_errno = open_result.error(); + return -1; } - return -1; + return open_result.value(); } } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/sys/mman/linux/shm_unlink.cpp b/libc/src/sys/mman/linux/shm_unlink.cpp index 4c61c7cd16bad..7671b1918b83c 100644 --- a/libc/src/sys/mman/linux/shm_unlink.cpp +++ b/libc/src/sys/mman/linux/shm_unlink.cpp @@ -7,20 +7,38 @@ //===----------------------------------------------------------------------===// #include "src/sys/mman/shm_unlink.h" + +#include "hdr/fcntl_macros.h" +#include "src/__support/OSUtil/syscall.h" // For internal syscall function. +#include "src/__support/libc_errno.h" // For internal errno. #include "src/__support/macros/config.h" #include "src/sys/mman/linux/shm_common.h" -#include "src/unistd/unlink.h" +#include // For SYS_unlink, SYS_unlinkat namespace LIBC_NAMESPACE_DECL { -// TODO: stop calling the public unlink function. It should be calling an -// internal shared utility. +// TODO: move the unlink syscall to a shared utility. LLVM_LIBC_FUNCTION(int, shm_unlink, (const char *name)) { - if (cpp::optional buffer = - shm_common::translate_name(name)) - return LIBC_NAMESPACE::unlink(buffer->data()); - return -1; + auto path_result = shm_common::translate_name(name); + if (!path_result.has_value()) { + libc_errno = path_result.error(); + return -1; + } +#ifdef SYS_unlink + int ret = LIBC_NAMESPACE::syscall_impl(SYS_unlink, path_result->data()); +#elif defined(SYS_unlinkat) + int ret = LIBC_NAMESPACE::syscall_impl(SYS_unlinkat, AT_FDCWD, + path_result->data(), 0); +#else +#error "unlink and unlinkat syscalls not available." +#endif + + if (ret < 0) { + libc_errno = -ret; + return -1; + } + return ret; } } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/unistd/CMakeLists.txt b/libc/src/unistd/CMakeLists.txt index b1a1716aa85c6..78c3bf8442fab 100644 --- a/libc/src/unistd/CMakeLists.txt +++ b/libc/src/unistd/CMakeLists.txt @@ -55,6 +55,13 @@ add_entrypoint_object( .${LIBC_TARGET_OS}.dup3 ) +add_entrypoint_object( + faccessat + ALIAS + DEPENDS + .${LIBC_TARGET_OS}.faccessat +) + add_entrypoint_object( fchdir ALIAS @@ -111,6 +118,13 @@ add_entrypoint_object( .${LIBC_TARGET_OS}.getcwd ) +add_entrypoint_object( + gethostname + ALIAS + DEPENDS + .${LIBC_TARGET_OS}.gethostname +) + add_entrypoint_object( getpid ALIAS diff --git a/libc/src/unistd/faccessat.h b/libc/src/unistd/faccessat.h new file mode 100644 index 0000000000000..0dc834dbd7cae --- /dev/null +++ b/libc/src/unistd/faccessat.h @@ -0,0 +1,20 @@ +//===-- Implementation header for faccessat ---------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_UNISTD_FACCESSAT_H +#define LLVM_LIBC_SRC_UNISTD_FACCESSAT_H + +#include "src/__support/macros/config.h" + +namespace LIBC_NAMESPACE_DECL { + +int faccessat(int fd, const char *path, int amode, int flag); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_UNISTD_FACCESSAT_H diff --git a/libc/src/unistd/gethostname.h b/libc/src/unistd/gethostname.h new file mode 100644 index 0000000000000..cf67bdbd5c320 --- /dev/null +++ b/libc/src/unistd/gethostname.h @@ -0,0 +1,21 @@ +//===-- Implementation header for gethostname -------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC_UNISTD_GETHOSTNAME_H +#define LLVM_LIBC_SRC_UNISTD_GETHOSTNAME_H + +#include "hdr/types/size_t.h" +#include "src/__support/macros/config.h" + +namespace LIBC_NAMESPACE_DECL { + +int gethostname(char *name, size_t len); + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC_UNISTD_GETHOSTNAME_H diff --git a/libc/src/unistd/linux/CMakeLists.txt b/libc/src/unistd/linux/CMakeLists.txt index 382a61fea8b66..dff6ba2db8a38 100644 --- a/libc/src/unistd/linux/CMakeLists.txt +++ b/libc/src/unistd/linux/CMakeLists.txt @@ -80,6 +80,19 @@ add_entrypoint_object( libc.src.errno.errno ) +add_entrypoint_object( + faccessat + SRCS + faccessat.cpp + HDRS + ../faccessat.h + DEPENDS + libc.hdr.fcntl_macros + libc.include.sys_syscall + libc.src.__support.OSUtil.osutil + libc.src.errno.errno +) + add_entrypoint_object( fchdir SRCS @@ -194,6 +207,20 @@ add_entrypoint_object( libc.src.errno.errno ) +add_entrypoint_object( + gethostname + SRCS + gethostname.cpp + HDRS + ../gethostname.h + DEPENDS + libc.hdr.types.size_t + libc.include.sys_syscall + libc.include.sys_utsname + libc.src.__support.OSUtil.osutil + libc.src.errno.errno +) + add_entrypoint_object( geteuid SRCS diff --git a/libc/src/unistd/linux/access.cpp b/libc/src/unistd/linux/access.cpp index 55cd6adca779d..f06eec5a8db6a 100644 --- a/libc/src/unistd/linux/access.cpp +++ b/libc/src/unistd/linux/access.cpp @@ -23,7 +23,7 @@ LLVM_LIBC_FUNCTION(int, access, (const char *path, int mode)) { int ret = LIBC_NAMESPACE::syscall_impl(SYS_access, path, mode); #elif defined(SYS_faccessat) int ret = - LIBC_NAMESPACE::syscall_impl(SYS_faccessat, AT_FDCWD, path, mode, 0); + LIBC_NAMESPACE::syscall_impl(SYS_faccessat, AT_FDCWD, path, mode); #else #error "access and faccessat syscalls not available." #endif diff --git a/libc/src/unistd/linux/faccessat.cpp b/libc/src/unistd/linux/faccessat.cpp new file mode 100644 index 0000000000000..7a2a29cb0e901 --- /dev/null +++ b/libc/src/unistd/linux/faccessat.cpp @@ -0,0 +1,37 @@ +//===-- Linux implementation of faccessat ---------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/unistd/faccessat.h" + +#include "src/__support/OSUtil/syscall.h" // For internal syscall function. +#include "src/__support/common.h" + +#include "hdr/fcntl_macros.h" +#include "src/__support/libc_errno.h" +#include "src/__support/macros/config.h" +#include // For syscall numbers. + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, faccessat, + (int fd, const char *path, int amode, int flag)) { +#ifdef SYS_faccessat2 + int ret = + LIBC_NAMESPACE::syscall_impl(SYS_faccessat2, fd, path, amode, flag); +#else +#error "faccessat2 syscall is not available." +#endif + + if (ret < 0) { + libc_errno = -ret; + return -1; + } + return 0; +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/unistd/linux/gethostname.cpp b/libc/src/unistd/linux/gethostname.cpp new file mode 100644 index 0000000000000..60a12a4d6f8ea --- /dev/null +++ b/libc/src/unistd/linux/gethostname.cpp @@ -0,0 +1,53 @@ +//===-- Linux implementation of gethostname -------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/unistd/gethostname.h" + +#include "hdr/types/size_t.h" +#include "src/__support/OSUtil/syscall.h" // For internal syscall function. +#include "src/__support/common.h" +#include "src/__support/libc_errno.h" +#include "src/__support/macros/config.h" +#include "src/string/string_utils.h" + +#include // For syscall numbers. +#include + +namespace LIBC_NAMESPACE_DECL { + +LLVM_LIBC_FUNCTION(int, gethostname, (char *name, size_t size)) { + // Check for invalid pointer + if (name == nullptr) { + libc_errno = EFAULT; + return -1; + } + + // Because there is no SYS_gethostname syscall, we use uname to get the + // hostname. + utsname unameData; + int ret = LIBC_NAMESPACE::syscall_impl(SYS_uname, &unameData); + if (ret < 0) { + libc_errno = static_cast(-ret); + return -1; + } + + // Guarantee that the name will be null terminated. + // The amount of bytes copied is min(size + 1, strlen(nodename) + 1) + // +1 to account for the null terminator (the last copied byte is a NULL). + internal::strlcpy(name, unameData.nodename, size + 1); + + // Checks if the length of the hostname was greater than or equal to size + if (internal::string_length(unameData.nodename) >= size) { + libc_errno = ENAMETOOLONG; + return -1; + } + + return 0; +} + +} // namespace LIBC_NAMESPACE_DECL diff --git a/libc/test/UnitTest/FEnvSafeTest.cpp b/libc/test/UnitTest/FEnvSafeTest.cpp index 2730de350b39a..4393f9d5e5c3b 100644 --- a/libc/test/UnitTest/FEnvSafeTest.cpp +++ b/libc/test/UnitTest/FEnvSafeTest.cpp @@ -43,7 +43,7 @@ void FEnvSafeTest::set_fenv(const fenv_t &fenv) { void FEnvSafeTest::expect_fenv_eq(const fenv_t &before_fenv, const fenv_t &after_fenv) { -#if defined(LIBC_TARGET_ARCH_IS_AARCH64) +#if defined(LIBC_TARGET_ARCH_IS_AARCH64) && !defined(LIBC_COMPILER_IS_MSVC) using FPState = LIBC_NAMESPACE::fputil::FEnv::FPState; const FPState &before_state = reinterpret_cast(before_fenv); const FPState &after_state = reinterpret_cast(after_fenv); diff --git a/libc/test/UnitTest/MemoryMatcher.cpp b/libc/test/UnitTest/MemoryMatcher.cpp index 3cd5174fd7f75..6e375768e9333 100644 --- a/libc/test/UnitTest/MemoryMatcher.cpp +++ b/libc/test/UnitTest/MemoryMatcher.cpp @@ -40,9 +40,9 @@ bool MemoryMatcher::match(MemoryView actualValue) { } static void display(char C) { - const auto print = [](unsigned char I) { + const auto print = [](unsigned char i) { tlog << static_cast(LIBC_NAMESPACE::internal::toupper( - LIBC_NAMESPACE::internal::int_to_b36_char(I))); + LIBC_NAMESPACE::internal::int_to_b36_char(i))); }; print(static_cast(C) / 16); print(static_cast(C) & 15); diff --git a/libc/test/integration/src/pthread/pthread_create_test.cpp b/libc/test/integration/src/pthread/pthread_create_test.cpp index abd348e707c09..d436cc3270d9c 100644 --- a/libc/test/integration/src/pthread/pthread_create_test.cpp +++ b/libc/test/integration/src/pthread/pthread_create_test.cpp @@ -108,14 +108,14 @@ static void *successThread(void *Arg) { volatile uint8_t *bytes_on_stack = (volatile uint8_t *)__builtin_alloca(test_stacksize); - for (size_t I = 0; I < test_stacksize; ++I) { + for (size_t i = 0; i < test_stacksize; ++i) { // Write permissions - bytes_on_stack[I] = static_cast(I); + bytes_on_stack[i] = static_cast(i); } - for (size_t I = 0; I < test_stacksize; ++I) { + for (size_t i = 0; i < test_stacksize; ++i) { // Read/write permissions - bytes_on_stack[I] += static_cast(I); + bytes_on_stack[i] += static_cast(i); } } diff --git a/libc/test/shared/CMakeLists.txt b/libc/test/shared/CMakeLists.txt index 9f3e9838d6b78..ea4634cbe7f9f 100644 --- a/libc/test/shared/CMakeLists.txt +++ b/libc/test/shared/CMakeLists.txt @@ -36,6 +36,8 @@ add_fp_unittest( libc.src.__support.math.cospif libc.src.__support.math.cospif16 libc.src.__support.math.dsqrtl + libc.src.__support.math.exp10m1f + libc.src.__support.math.exp10m1f16 libc.src.__support.math.erff libc.src.__support.math.exp libc.src.__support.math.exp10 diff --git a/libc/test/shared/shared_math_test.cpp b/libc/test/shared/shared_math_test.cpp index 655e7fb48230e..17221932927b0 100644 --- a/libc/test/shared/shared_math_test.cpp +++ b/libc/test/shared/shared_math_test.cpp @@ -27,6 +27,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat16) { EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::coshf16(0.0f16)); EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::cospif16(0.0f16)); EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::exp10f16(0.0f16)); + EXPECT_FP_EQ(0x0p+0f16, LIBC_NAMESPACE::shared::exp10m1f16(0.0f16)); EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::expf16(0.0f16)); @@ -57,6 +58,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat) { EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::cosf(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::coshf(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::cospif(0.0f)); + EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::exp10m1f(0.0f)); EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::erff(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::exp10f(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::expf(0.0f)); diff --git a/libc/test/src/__support/CPP/simd_test.cpp b/libc/test/src/__support/CPP/simd_test.cpp index c8f34df8ab028..8bead8461d649 100644 --- a/libc/test/src/__support/CPP/simd_test.cpp +++ b/libc/test/src/__support/CPP/simd_test.cpp @@ -86,3 +86,65 @@ TEST(LlvmLibcSIMDTest, SplitConcat) { cpp::simd n = cpp::concat(c, c, c, c, c, c, c, c); EXPECT_TRUE(cpp::all_of(n == ~0)); } + +TEST(LlvmLibcSIMDTest, LoadStore) { + constexpr size_t SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE]; + + cpp::simd v1 = cpp::splat(1); + cpp::store(v1, buf); + cpp::simd v2 = cpp::load>(buf); + + EXPECT_TRUE(cpp::all_of(v1 == 1)); + EXPECT_TRUE(cpp::all_of(v2 == 1)); + + cpp::simd v3 = cpp::splat(2); + cpp::store(v3, buf, /*aligned=*/true); + cpp::simd v4 = cpp::load>(buf, /*aligned=*/true); + + EXPECT_TRUE(cpp::all_of(v3 == 2)); + EXPECT_TRUE(cpp::all_of(v4 == 2)); +} + +TEST(LlvmLibcSIMDTest, MaskedLoadStore) { + constexpr size_t SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE] = {0}; + + cpp::simd mask = cpp::iota(0) % 2 == 0; + cpp::simd v1 = cpp::splat(1); + + cpp::store_masked>(mask, v1, buf); + cpp::simd v2 = cpp::load_masked>(mask, buf); + + EXPECT_TRUE(cpp::all_of((v2 == 1) == mask)); +} + +TEST(LlvmLibcSIMDTest, GatherScatter) { + constexpr int SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE]; + + cpp::simd mask = cpp::iota(1); + cpp::simd idx = cpp::iota(0); + cpp::simd v1 = cpp::splat(1); + + cpp::scatter>(mask, idx, v1, buf); + cpp::simd v2 = cpp::gather>(mask, idx, buf); + + EXPECT_TRUE(cpp::all_of(v1 == 1)); + EXPECT_TRUE(cpp::all_of(v2 == 1)); +} + +TEST(LlvmLibcSIMDTest, MaskedCompressExpand) { + constexpr size_t SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE] = {0}; + + cpp::simd mask_expand = cpp::iota(0) % 2 == 0; + cpp::simd mask_compress = 1; + + cpp::simd v1 = cpp::iota(0); + + cpp::compress>(mask_compress, v1, buf); + cpp::simd v2 = cpp::expand>(mask_expand, buf); + + EXPECT_TRUE(cpp::all_of(!mask_expand || v2 <= SIZE / 2)); +} diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt index 9d644703a61ae..2d2d5287bb384 100644 --- a/libc/test/src/math/CMakeLists.txt +++ b/libc/test/src/math/CMakeLists.txt @@ -2041,6 +2041,18 @@ add_fp_unittest( libc.src.math.logf16 ) +add_fp_unittest( + log_bf16_test + NEED_MPFR + SUITE + libc-math-unittests + SRCS + log_bf16_test.cpp + DEPENDS + libc.src.math.log_bf16 + libc.src.__support.FPUtil.bfloat16 +) + add_fp_unittest( log2_test NEED_MPFR diff --git a/libc/test/src/math/log_bf16_test.cpp b/libc/test/src/math/log_bf16_test.cpp new file mode 100644 index 0000000000000..ab91ebef4342f --- /dev/null +++ b/libc/test/src/math/log_bf16_test.cpp @@ -0,0 +1,41 @@ +//===-- Full range tests for BFloat16 log(x) function ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/__support/FPUtil/bfloat16.h" +#include "src/math/log_bf16.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" +#include "utils/MPFRWrapper/MPFRUtils.h" + +using LlvmLibcLogBf16Test = LIBC_NAMESPACE::testing::FPTest; + +namespace mpfr = LIBC_NAMESPACE::testing::mpfr; + +// range: [0, inf] +static constexpr uint16_t POS_START = 0x0000U; +static constexpr uint16_t POS_STOP = 0x7f80U; + +// range: [-0, -inf] +static constexpr uint16_t NEG_START = 0x8000U; +static constexpr uint16_t NEG_STOP = 0xff80U; + +TEST_F(LlvmLibcLogBf16Test, PositiveRange) { + for (uint16_t v = POS_START; v <= POS_STOP; ++v) { + bfloat16 x = FPBits(v).get_val(); + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log, x, + LIBC_NAMESPACE::log_bf16(x), 0.5); + } +} + +TEST_F(LlvmLibcLogBf16Test, NegativeRange) { + for (uint16_t v = NEG_START; v <= NEG_STOP; ++v) { + bfloat16 x = FPBits(v).get_val(); + EXPECT_MPFR_MATCH_ALL_ROUNDING(mpfr::Operation::Log, x, + LIBC_NAMESPACE::log_bf16(x), 0.5); + } +} diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt index eadd5f0970722..42a97ba10b601 100644 --- a/libc/test/src/math/smoke/CMakeLists.txt +++ b/libc/test/src/math/smoke/CMakeLists.txt @@ -4342,6 +4342,19 @@ add_fp_unittest( libc.src.__support.FPUtil.cast ) +add_fp_unittest( + log_bf16_test + SUITE + libc-math-smoke-tests + SRCS + log_bf16_test.cpp + DEPENDS + libc.hdr.errno_macros + libc.hdr.fenv_macros + libc.src.math.log_bf16 + libc.src.__support.FPUtil.bfloat16 +) + add_fp_unittest( log2_test SUITE diff --git a/libc/test/src/math/smoke/RoundToIntegerTest.h b/libc/test/src/math/smoke/RoundToIntegerTest.h index f8be5a5bcc737..c0b326e5dcbb0 100644 --- a/libc/test/src/math/smoke/RoundToIntegerTest.h +++ b/libc/test/src/math/smoke/RoundToIntegerTest.h @@ -22,25 +22,26 @@ static constexpr int ROUNDING_MODES[4] = {FE_UPWARD, FE_DOWNWARD, FE_TOWARDZERO, FE_TONEAREST}; -template +template class RoundToIntegerTestTemplate : public LIBC_NAMESPACE::testing::FEnvSafeTest { public: - typedef I (*RoundToIntegerFunc)(F); + typedef IntType (*RoundToIntegerFunc)(FloatType); private: - DECLARE_SPECIAL_CONSTANTS(F) + DECLARE_SPECIAL_CONSTANTS(FloatType) static constexpr StorageType MAX_SUBNORMAL = FPBits::max_subnormal().uintval(); static constexpr StorageType MIN_SUBNORMAL = FPBits::min_subnormal().uintval(); - static constexpr I INTEGER_MIN = I(1) << (sizeof(I) * 8 - 1); - static constexpr I INTEGER_MAX = -(INTEGER_MIN + 1); + static constexpr IntType INTEGER_MIN = IntType(1) + << (sizeof(IntType) * 8 - 1); + static constexpr IntType INTEGER_MAX = -(INTEGER_MIN + 1); - void test_one_input(RoundToIntegerFunc func, F input, I expected, - bool expectError) { + void test_one_input(RoundToIntegerFunc func, FloatType input, + IntType expected, bool expectError) { libc_errno = 0; LIBC_NAMESPACE::fputil::clear_except(FE_ALL_EXCEPT); @@ -92,14 +93,14 @@ class RoundToIntegerTestTemplate } void do_round_numbers_test(RoundToIntegerFunc func) { - test_one_input(func, zero, I(0), false); - test_one_input(func, neg_zero, I(0), false); - test_one_input(func, F(1.0), I(1), false); - test_one_input(func, F(-1.0), I(-1), false); - test_one_input(func, F(10.0), I(10), false); - test_one_input(func, F(-10.0), I(-10), false); - test_one_input(func, F(1232.0), I(1232), false); - test_one_input(func, F(-1232.0), I(-1232), false); + test_one_input(func, zero, IntType(0), false); + test_one_input(func, neg_zero, IntType(0), false); + test_one_input(func, FloatType(1.0), IntType(1), false); + test_one_input(func, FloatType(-1.0), IntType(-1), false); + test_one_input(func, FloatType(10.0), IntType(10), false); + test_one_input(func, FloatType(-10.0), IntType(-10), false); + test_one_input(func, FloatType(1232.0), IntType(1232), false); + test_one_input(func, FloatType(-1232.0), IntType(-1232), false); } void testRoundNumbers(RoundToIntegerFunc func) { @@ -120,29 +121,29 @@ class RoundToIntegerTestTemplate static_cast((MAX_SUBNORMAL - MIN_SUBNORMAL) / COUNT), StorageType(1)); for (StorageType i = MIN_SUBNORMAL; i <= MAX_SUBNORMAL; i += STEP) { - F x = FPBits(i).get_val(); - if (x == F(0.0)) + FloatType x = FPBits(i).get_val(); + if (x == FloatType(0.0)) continue; // All subnormal numbers should round to zero. if (TestModes) { if (x > zero) { LIBC_NAMESPACE::fputil::set_round(FE_UPWARD); - test_one_input(func, x, I(1), false); + test_one_input(func, x, IntType(1), false); LIBC_NAMESPACE::fputil::set_round(FE_DOWNWARD); - test_one_input(func, x, I(0), false); + test_one_input(func, x, IntType(0), false); LIBC_NAMESPACE::fputil::set_round(FE_TOWARDZERO); - test_one_input(func, x, I(0), false); + test_one_input(func, x, IntType(0), false); LIBC_NAMESPACE::fputil::set_round(FE_TONEAREST); - test_one_input(func, x, I(0), false); + test_one_input(func, x, IntType(0), false); } else { LIBC_NAMESPACE::fputil::set_round(FE_UPWARD); - test_one_input(func, x, I(0), false); + test_one_input(func, x, IntType(0), false); LIBC_NAMESPACE::fputil::set_round(FE_DOWNWARD); - test_one_input(func, x, I(-1), false); + test_one_input(func, x, IntType(-1), false); LIBC_NAMESPACE::fputil::set_round(FE_TOWARDZERO); - test_one_input(func, x, I(0), false); + test_one_input(func, x, IntType(0), false); LIBC_NAMESPACE::fputil::set_round(FE_TONEAREST); - test_one_input(func, x, I(0), false); + test_one_input(func, x, IntType(0), false); } } else { test_one_input(func, x, 0L, false); @@ -151,9 +152,10 @@ class RoundToIntegerTestTemplate } }; -#define LIST_ROUND_TO_INTEGER_TESTS_HELPER(F, I, func, TestModes) \ +#define LIST_ROUND_TO_INTEGER_TESTS_HELPER(FloatType, IntType, func, \ + TestModes) \ using LlvmLibcRoundToIntegerTest = \ - RoundToIntegerTestTemplate; \ + RoundToIntegerTestTemplate; \ TEST_F(LlvmLibcRoundToIntegerTest, InfinityAndNaN) { \ testInfinityAndNaN(&func); \ } \ @@ -164,16 +166,16 @@ class RoundToIntegerTestTemplate testSubnormalRange(&func); \ } -#define LIST_ROUND_TO_INTEGER_TESTS(F, I, func) \ - LIST_ROUND_TO_INTEGER_TESTS_HELPER(F, I, func, false) +#define LIST_ROUND_TO_INTEGER_TESTS(FloatType, IntType, func) \ + LIST_ROUND_TO_INTEGER_TESTS_HELPER(FloatType, IntType, func, false) // The GPU target does not support different rounding modes. #ifdef LIBC_TARGET_ARCH_IS_GPU -#define LIST_ROUND_TO_INTEGER_TESTS_WITH_MODES(F, I, func) \ - LIST_ROUND_TO_INTEGER_TESTS_HELPER(F, I, func, false) +#define LIST_ROUND_TO_INTEGER_TESTS_WITH_MODES(FloatType, IntType, func) \ + LIST_ROUND_TO_INTEGER_TESTS_HELPER(FloatType, IntType, func, false) #else -#define LIST_ROUND_TO_INTEGER_TESTS_WITH_MODES(F, I, func) \ - LIST_ROUND_TO_INTEGER_TESTS_HELPER(F, I, func, true) +#define LIST_ROUND_TO_INTEGER_TESTS_WITH_MODES(FloatType, IntType, func) \ + LIST_ROUND_TO_INTEGER_TESTS_HELPER(FloatType, IntType, func, true) #endif #endif // LLVM_LIBC_TEST_SRC_MATH_SMOKE_ROUNDTOINTEGERTEST_H diff --git a/libc/test/src/math/smoke/log_bf16_test.cpp b/libc/test/src/math/smoke/log_bf16_test.cpp new file mode 100644 index 0000000000000..ec3b8eb6205fa --- /dev/null +++ b/libc/test/src/math/smoke/log_bf16_test.cpp @@ -0,0 +1,52 @@ +//===-- Unittests for BFloat16 log(x) function =---------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "hdr/errno_macros.h" +#include "hdr/fenv_macros.h" +#include "src/__support/FPUtil/bfloat16.h" +#include "src/__support/macros/properties/types.h" +#include "src/math/log_bf16.h" +#include "test/UnitTest/FEnvSafeTest.h" +#include "test/UnitTest/FPMatcher.h" +#include "test/UnitTest/Test.h" + +class LlvmLibcLogBf16Test : public LIBC_NAMESPACE::testing::FEnvSafeTest { + DECLARE_SPECIAL_CONSTANTS(bfloat16) + +public: + void test_special_numbers() { + EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::log_bf16(aNaN)); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_WITH_EXCEPTION(aNaN, LIBC_NAMESPACE::log_bf16(sNaN), + FE_INVALID); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING(inf, LIBC_NAMESPACE::log_bf16(inf)); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::log_bf16(neg_inf)); + EXPECT_MATH_ERRNO(EDOM); + + EXPECT_FP_EQ_WITH_EXCEPTION_ALL_ROUNDING( + neg_inf, LIBC_NAMESPACE::log_bf16(zero), FE_DIVBYZERO); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_WITH_EXCEPTION_ALL_ROUNDING( + neg_inf, LIBC_NAMESPACE::log_bf16(neg_zero), FE_DIVBYZERO); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING(zero, LIBC_NAMESPACE::log_bf16(bfloat16(1.0))); + EXPECT_MATH_ERRNO(0); + + EXPECT_FP_EQ_ALL_ROUNDING(aNaN, LIBC_NAMESPACE::log_bf16(bfloat16(-1.0))); + EXPECT_MATH_ERRNO(EDOM); + } +}; + +TEST_F(LlvmLibcLogBf16Test, SpecialNumbers) { test_special_numbers(); } diff --git a/libc/test/src/string/memory_utils/utils_test.cpp b/libc/test/src/string/memory_utils/utils_test.cpp index 4dff0684b9111..2ab2e8d3ad867 100644 --- a/libc/test/src/string/memory_utils/utils_test.cpp +++ b/libc/test/src/string/memory_utils/utils_test.cpp @@ -47,14 +47,14 @@ TEST(LlvmLibcUtilsTest, DistanceToAlignDown) { TEST(LlvmLibcUtilsTest, Adjust2) { char a, b; const size_t base_size = 10; - for (uintptr_t I = 0; I < 4; ++I) { + for (uintptr_t i = 0; i < 4; ++i) { auto *p1 = &a; auto *p2 = &b; size_t size = base_size; - adjust(static_cast(I), p1, p2, size); - EXPECT_EQ(intptr_t(p1), intptr_t(&a + I)); - EXPECT_EQ(intptr_t(p2), intptr_t(&b + I)); - EXPECT_EQ(size, base_size - I); + adjust(static_cast(i), p1, p2, size); + EXPECT_EQ(intptr_t(p1), intptr_t(&a + i)); + EXPECT_EQ(intptr_t(p2), intptr_t(&b + i)); + EXPECT_EQ(size, base_size - i); } } diff --git a/libc/test/src/sys/mman/linux/madvise_test.cpp b/libc/test/src/sys/mman/linux/madvise_test.cpp index 6671050a28038..b7c3f0571571c 100644 --- a/libc/test/src/sys/mman/linux/madvise_test.cpp +++ b/libc/test/src/sys/mman/linux/madvise_test.cpp @@ -13,8 +13,6 @@ #include "test/UnitTest/ErrnoSetterMatcher.h" #include "test/UnitTest/Test.h" -#include - using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails; using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; using LlvmLibcMadviseTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; diff --git a/libc/test/src/sys/mman/linux/mincore_test.cpp b/libc/test/src/sys/mman/linux/mincore_test.cpp index ade620b838a38..3a15291564922 100644 --- a/libc/test/src/sys/mman/linux/mincore_test.cpp +++ b/libc/test/src/sys/mman/linux/mincore_test.cpp @@ -6,7 +6,6 @@ // //===----------------------------------------------------------------------===// -#include "src/__support/OSUtil/syscall.h" // For internal syscall function. #include "src/sys/mman/madvise.h" #include "src/sys/mman/mincore.h" #include "src/sys/mman/mlock.h" @@ -18,10 +17,6 @@ #include "test/UnitTest/ErrnoSetterMatcher.h" #include "test/UnitTest/Test.h" -#include -#include -#include - using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails; using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; using LlvmLibcMincoreTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; diff --git a/libc/test/src/sys/mman/linux/mlock_test.cpp b/libc/test/src/sys/mman/linux/mlock_test.cpp index 6b81411ca604a..cd374222680f8 100644 --- a/libc/test/src/sys/mman/linux/mlock_test.cpp +++ b/libc/test/src/sys/mman/linux/mlock_test.cpp @@ -6,6 +6,10 @@ // //===----------------------------------------------------------------------===// +// TODO: Simplify these tests and split them up. mlock, mlock2, mlockall, +// munlock, and munlockall should have separate test files which only need to +// check our code paths (succeeds and errors). + #include "src/__support/OSUtil/syscall.h" // For internal syscall function. #include "src/__support/libc_errno.h" #include "src/sys/mman/madvise.h" @@ -24,10 +28,7 @@ #include "test/UnitTest/Test.h" #include -#include -#include #include -#include using namespace LIBC_NAMESPACE::testing::ErrnoSetterMatcher; using LlvmLibcMlockTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; diff --git a/libc/test/src/sys/mman/linux/mremap_test.cpp b/libc/test/src/sys/mman/linux/mremap_test.cpp index 5ff774d57614a..620292a2d0109 100644 --- a/libc/test/src/sys/mman/linux/mremap_test.cpp +++ b/libc/test/src/sys/mman/linux/mremap_test.cpp @@ -13,8 +13,6 @@ #include "test/UnitTest/ErrnoSetterMatcher.h" #include "test/UnitTest/Test.h" -#include - using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails; using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; using LlvmLibcMremapTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; diff --git a/libc/test/src/sys/mman/linux/shm_test.cpp b/libc/test/src/sys/mman/linux/shm_test.cpp index ae555fa2f1aff..48bdf84c7270d 100644 --- a/libc/test/src/sys/mman/linux/shm_test.cpp +++ b/libc/test/src/sys/mman/linux/shm_test.cpp @@ -7,7 +7,6 @@ //===----------------------------------------------------------------------===// #include "hdr/fcntl_macros.h" -#include "src/__support/OSUtil/syscall.h" #include "src/fcntl/fcntl.h" #include "src/sys/mman/mmap.h" #include "src/sys/mman/munmap.h" @@ -18,7 +17,6 @@ #include "test/UnitTest/ErrnoCheckingTest.h" #include "test/UnitTest/ErrnoSetterMatcher.h" #include "test/UnitTest/Test.h" -#include using namespace LIBC_NAMESPACE::testing::ErrnoSetterMatcher; using LlvmLibcShmTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; diff --git a/libc/test/src/unistd/CMakeLists.txt b/libc/test/src/unistd/CMakeLists.txt index 9aec6150af0ec..44f28fff9ad39 100644 --- a/libc/test/src/unistd/CMakeLists.txt +++ b/libc/test/src/unistd/CMakeLists.txt @@ -93,6 +93,23 @@ add_libc_unittest( libc.test.UnitTest.ErrnoSetterMatcher ) +add_libc_unittest( + faccessat_test + SUITE + libc_unistd_unittests + SRCS + faccessat_test.cpp + DEPENDS + libc.include.unistd + libc.src.errno.errno + libc.src.fcntl.open + libc.src.unistd.faccessat + libc.src.unistd.close + libc.src.unistd.unlink + libc.test.UnitTest.ErrnoCheckingTest + libc.test.UnitTest.ErrnoSetterMatcher +) + add_libc_unittest( fchdir_test SUITE @@ -408,6 +425,18 @@ add_libc_unittest( libc.test.UnitTest.ErrnoSetterMatcher ) +add_libc_unittest( + gethostname_test + SUITE + libc_unistd_unittests + SRCS + gethostname_test.cpp + DEPENDS + libc.src.unistd.gethostname + libc.src.errno.errno + libc.test.UnitTest.ErrnoCheckingTest +) + add_libc_unittest( getpid_test SUITE diff --git a/libc/test/src/unistd/faccessat_test.cpp b/libc/test/src/unistd/faccessat_test.cpp new file mode 100644 index 0000000000000..6280b147c6d58 --- /dev/null +++ b/libc/test/src/unistd/faccessat_test.cpp @@ -0,0 +1,115 @@ +//===-- Unittests for faccessat -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/fcntl/open.h" +#include "src/unistd/close.h" +#include "src/unistd/faccessat.h" +#include "src/unistd/unlink.h" +#include "test/UnitTest/ErrnoCheckingTest.h" +#include "test/UnitTest/ErrnoSetterMatcher.h" +#include "test/UnitTest/Test.h" + +#include +#include +#include + +namespace { + +using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Fails; +using LIBC_NAMESPACE::testing::ErrnoSetterMatcher::Succeeds; + +using LlvmLibcFaccessatTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; + +TEST_F(LlvmLibcFaccessatTest, WithAtFdcwd) { + // Test access checks on a file with AT_FDCWD and no flags, equivalent to + // access(). + constexpr const char *FILENAME = "faccessat_basic.test"; + auto TEST_FILE = libc_make_test_file_path(FILENAME); + + // Check permissions on a file with full permissions + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); + + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, F_OK, 0), + Succeeds(0)); + ASSERT_THAT( + LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, X_OK | W_OK | R_OK, 0), + Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0)); + + // Check permissions on a file with execute-only permission + fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IXUSR); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); + + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, F_OK, 0), + Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, X_OK, 0), + Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, R_OK, 0), + Fails(EACCES)); + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, W_OK, 0), + Fails(EACCES)); + ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0)); +} + +TEST_F(LlvmLibcFaccessatTest, NonExistentFile) { + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, "faccessat_nonexistent.test", + F_OK, 0), + Fails(ENOENT)); +} + +TEST_F(LlvmLibcFaccessatTest, AtEaccess) { + // With AT_EACCESS, faccessat checks permissions using the effective user ID, + // but the effective and real user ID will be the same here and changing that + // is not feasible in a test, so this is just a basic sanity check. + constexpr const char *FILENAME = "faccessat_eaccess.test"; + auto TEST_FILE = libc_make_test_file_path(FILENAME); + + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); + + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, TEST_FILE, X_OK | W_OK | R_OK, + AT_EACCESS), + Succeeds(0)); + + ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0)); +} + +TEST_F(LlvmLibcFaccessatTest, AtEmptyPath) { + constexpr const char *FILENAME = "faccessat_atemptypath.test"; + auto TEST_FILE = libc_make_test_file_path(FILENAME); + + int fd = LIBC_NAMESPACE::open(TEST_FILE, O_WRONLY | O_CREAT, S_IRWXU); + ASSERT_ERRNO_SUCCESS(); + ASSERT_GT(fd, 0); + + // Check permissions on the file referred to by fd + ASSERT_THAT(LIBC_NAMESPACE::faccessat(fd, "", F_OK, AT_EMPTY_PATH), + Succeeds(0)); + ASSERT_THAT( + LIBC_NAMESPACE::faccessat(fd, "", X_OK | W_OK | R_OK, AT_EMPTY_PATH), + Succeeds(0)); + + ASSERT_THAT(LIBC_NAMESPACE::close(fd), Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::unlink(TEST_FILE), Succeeds(0)); + + // Check permissions on the current working directory + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, "", F_OK, AT_EMPTY_PATH), + Succeeds(0)); + ASSERT_THAT(LIBC_NAMESPACE::faccessat(AT_FDCWD, "", X_OK | W_OK | R_OK, + AT_EMPTY_PATH), + Succeeds(0)); +} + +} // namespace diff --git a/libc/test/src/unistd/gethostname_test.cpp b/libc/test/src/unistd/gethostname_test.cpp new file mode 100644 index 0000000000000..a0e57ff0df333 --- /dev/null +++ b/libc/test/src/unistd/gethostname_test.cpp @@ -0,0 +1,31 @@ +//===-- Unittests for gethostname -----------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/unistd/gethostname.h" + +#include "test/UnitTest/ErrnoCheckingTest.h" +#include "test/UnitTest/Test.h" + +using LlvmLibcGetHostNameTest = LIBC_NAMESPACE::testing::ErrnoCheckingTest; + +TEST(LlvmLibcGetHostNameTest, GetCurrHostName) { + char hostbuffer[1024]; + int ret = LIBC_NAMESPACE::gethostname(hostbuffer, sizeof(hostbuffer)); + ASSERT_NE(ret, -1); + ASSERT_ERRNO_SUCCESS(); + + ret = LIBC_NAMESPACE::gethostname(hostbuffer, 0); + ASSERT_EQ(ret, -1); + ASSERT_ERRNO_EQ(ENAMETOOLONG); + + // test for invalid pointer + char *nptr = nullptr; + ret = LIBC_NAMESPACE::gethostname(nptr, 1); + ASSERT_EQ(ret, -1); + ASSERT_ERRNO_EQ(EFAULT); +} diff --git a/libclc/Maintainers.md b/libclc/Maintainers.md index ac869b6945db5..cdd84e059a796 100644 --- a/libclc/Maintainers.md +++ b/libclc/Maintainers.md @@ -10,8 +10,17 @@ The following people are the active maintainers for the project. Please reach out to them for code reviews, questions about their area of expertise, or other assistance. -Fraser Cormack \ -fraser@codeplay.com (email), [frasercrmck](https://github.com/frasercrmck) (GitHub) +Wenju He \ +wenju.he@intel.com (email), [wenju-he](https://github.com/wenju-he) (GitHub) Tom Stellard \ tstellar@redhat.com (email), [tstellar](https://github.com/tstellar) (GitHub) + +## Inactive Maintainers + +The following people have graciously spent time performing maintainership +responsibilities but are no longer active in that role. Thank you for all your +help with the success of the project! + +Fraser Cormack \ +frasercrmck@pm.me (email), [frasercrmck](https://github.com/frasercrmck) (GitHub) diff --git a/libcxx/docs/Contributing.rst b/libcxx/docs/Contributing.rst index ac856195ad68e..4e9d1ba52b47e 100644 --- a/libcxx/docs/Contributing.rst +++ b/libcxx/docs/Contributing.rst @@ -330,6 +330,17 @@ has been merged, an LLVM premerge maintainer (a Google employee) must use terraform to apply the change to the running GKE cluster. +Monitoring premerge testing performance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The llvm-premerge-libcxx runners mentioned above collect metrics regarding the +time the tests spend queued up before they start running and also the time it +takes the tests to actually complete running. These metrics are collected and +aggregated (based on stage and PR), and the results can be seen at the +`Libc++ Premerge Testing dashboard +`__ +. + run-buildbot-container ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/libcxx/docs/FeatureTestMacroTable.rst b/libcxx/docs/FeatureTestMacroTable.rst index 358889d8dbc37..3c7175c73f21e 100644 --- a/libcxx/docs/FeatureTestMacroTable.rst +++ b/libcxx/docs/FeatureTestMacroTable.rst @@ -506,6 +506,8 @@ Status ---------------------------------------------------------- ----------------- ``__cpp_lib_sstream_from_string_view`` ``202306L`` ---------------------------------------------------------- ----------------- + ``__cpp_lib_string_subview`` ``202506L`` + ---------------------------------------------------------- ----------------- ``__cpp_lib_string_view`` ``202403L`` ---------------------------------------------------------- ----------------- ``__cpp_lib_submdspan`` *unimplemented* diff --git a/libcxx/docs/ReleaseNotes/22.rst b/libcxx/docs/ReleaseNotes/22.rst index 509ead64ee525..8d023a14e89e6 100644 --- a/libcxx/docs/ReleaseNotes/22.rst +++ b/libcxx/docs/ReleaseNotes/22.rst @@ -40,6 +40,8 @@ Implemented Papers - P2321R2: ``zip`` (`Github `__) (The paper is partially implemented. ``zip_transform_view`` is implemented in this release) +- P3044R2: sub-``string_view`` from ``string`` (`Github `__) +- P3223R2: Making ``std::istream::ignore`` less surprising (`Github `__) - P3168R2: Give ``std::optional`` Range Support (`Github `__) Improvements and New Features @@ -63,6 +65,8 @@ Improvements and New Features - Multiple internal types have been refactored to use ``[[no_unique_address]]``, resulting in faster compile times and reduced debug information. +- The performance of ``std::find`` has been improved by up to 2x for integral types + Deprecations and Removals ------------------------- diff --git a/libcxx/docs/Status/Cxx2cIssues.csv b/libcxx/docs/Status/Cxx2cIssues.csv index aaf561982e120..7bf7bc95c8281 100644 --- a/libcxx/docs/Status/Cxx2cIssues.csv +++ b/libcxx/docs/Status/Cxx2cIssues.csv @@ -78,7 +78,7 @@ "","","","","","" "`LWG3216 `__","Rebinding the allocator before calling ``construct``/``destroy`` in ``allocate_shared``","2024-11 (Wrocław)","","","`#118332 `__","" "`LWG3436 `__","``std::construct_at`` should support arrays","2024-11 (Wrocław)","","","`#118335 `__","" -"`LWG3886 `__","Monad mo' problems","2024-11 (Wrocław)","","","`#118336 `__","" +"`LWG3886 `__","Monad mo' problems","2024-11 (Wrocław)","|Complete|","22","`#118336 `__","" "`LWG3899 `__","``co_yield``\ing elements of an lvalue generator is unnecessarily inefficient","2024-11 (Wrocław)","","","`#118337 `__","" "`LWG3900 `__","The ``allocator_arg_t`` overloads of ``generator::promise_type::operator new`` should not be constrained","2024-11 (Wrocław)","","","`#118338 `__","" "`LWG3918 `__","``std::uninitialized_move/_n`` and guaranteed copy elision","2024-11 (Wrocław)","","","`#118339 `__","" diff --git a/libcxx/docs/Status/Cxx2cPapers.csv b/libcxx/docs/Status/Cxx2cPapers.csv index f873d16808afe..4e0918b0246c1 100644 --- a/libcxx/docs/Status/Cxx2cPapers.csv +++ b/libcxx/docs/Status/Cxx2cPapers.csv @@ -129,7 +129,7 @@ "`P3179R9 `__","Parallel Range Algorithms","2025-06 (Sofia)","","","`#148137 `__","" "`P3709R2 `__","Reconsider parallel ``ranges::rotate_copy`` and ``ranges::reverse_copy``","2025-06 (Sofia)","","","`#148138 `__","" "`P3641R0 `__","Rename ``std::observable`` to ``std::observable_checkpoint``, and add a feature-test macro","2025-06 (Sofia)","","","`#148139 `__","" -"`P3044R2 `__","sub-``string_view`` from ``string``","2025-06 (Sofia)","","","`#148140 `__","" +"`P3044R2 `__","sub-``string_view`` from ``string``","2025-06 (Sofia)","|Complete|","22","`#148140 `__","" "`P2876R3 `__","Proposal to extend ``std::simd`` with more constructors and accessors","2025-06 (Sofia)","","","`#148143 `__","" "`P3480R6 `__","``std::simd`` is a range","2025-06 (Sofia)","","","`#148144 `__","" "`P2664R11 `__","Extend ``std::simd`` with permutation API","2025-06 (Sofia)","","","`#148145 `__","" @@ -151,7 +151,7 @@ "`P3111R8 `__","Atomic Reduction Operations","2025-06 (Sofia)","","","`#148174 `__","" "`P3060R3 `__","Add ``std::views::indices(n)``","2025-06 (Sofia)","","","`#148175 `__","" "`P2319R5 `__","Prevent ``path`` presentation problems","2025-06 (Sofia)","","","`#148177 `__","" -"`P3223R2 `__","Making ``std::istream::ignore`` less surprising","2025-06 (Sofia)","","","`#148178 `__","" +"`P3223R2 `__","Making ``std::istream::ignore`` less surprising","2025-06 (Sofia)","|Complete|","22","`#148178 `__","" "`P2781R9 `__","``std::constant_wrapper``","2025-06 (Sofia)","","","`#148179 `__","" "`P3697R1 `__","Minor additions to C++26 standard library hardening","2025-06 (Sofia)","","","`#148180 `__","" "`P3552R3 `__","Add a Coroutine Task Type","2025-06 (Sofia)","","","`#148182 `__","" diff --git a/libcxx/docs/TestingLibcxx.rst b/libcxx/docs/TestingLibcxx.rst index 227791031bab0..6171629185af2 100644 --- a/libcxx/docs/TestingLibcxx.rst +++ b/libcxx/docs/TestingLibcxx.rst @@ -482,7 +482,7 @@ when running the benchmarks. For example, .. code-block:: bash - $ libcxx/utils/libcxx-lit libcxx/test/benchmarks/string.bench.cpp --show-all --param optimization=speed + $ libcxx/utils/libcxx-lit libcxx/test/benchmarks/containers/string.bench.cpp --show-all --param optimization=speed Note that benchmarks are only dry-run when run via the ``check-cxx`` target since we only want to make sure they don't rot. Do not rely on the results of benchmarks @@ -504,7 +504,7 @@ more benchmarks, as usual: .. code-block:: bash $ cmake -S runtimes -B [...] - $ libcxx/utils/libcxx-lit libcxx/test/benchmarks/string.bench.cpp --param optimization=speed + $ libcxx/utils/libcxx-lit libcxx/test/benchmarks/containers/string.bench.cpp --param optimization=speed Then, get the consolidated benchmark output for that run using ``consolidate-benchmarks``: diff --git a/libcxx/docs/index.rst b/libcxx/docs/index.rst index a44c3161534b3..4d5064bfd7f3b 100644 --- a/libcxx/docs/index.rst +++ b/libcxx/docs/index.rst @@ -133,7 +133,7 @@ velocity, libc++ drops support for older compilers as newer ones are released. Compiler Versions Restrictions Support policy ============ =================== ========================== ===================== Clang 19, 20, 21-git latest two stable releases per `LLVM's release page `_ and the development version -AppleClang 16.4 latest stable release per `Xcode's release page `_ +AppleClang 26.0 latest stable release per `Xcode's release page `_ Open XL 17.1.3 (AIX) latest stable release per `Open XL's documentation page `_ GCC 15 In C++11 or later only latest stable release per `GCC's release page `_ ============ =================== ========================== ===================== diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt index db918a16e9a61..ddace8bf8c728 100644 --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -216,6 +216,7 @@ set(files __atomic/check_memory_order.h __atomic/contention_t.h __atomic/fence.h + __atomic/floating_point_helper.h __atomic/is_always_lock_free.h __atomic/kill_dependency.h __atomic/memory_order.h @@ -838,6 +839,7 @@ set(files __type_traits/is_floating_point.h __type_traits/is_function.h __type_traits/is_fundamental.h + __type_traits/is_generic_transparent_comparator.h __type_traits/is_implicit_lifetime.h __type_traits/is_implicitly_default_constructible.h __type_traits/is_integral.h @@ -880,6 +882,7 @@ set(files __type_traits/make_32_64_or_128_bit.h __type_traits/make_const_lvalue_ref.h __type_traits/make_signed.h + __type_traits/make_transparent.h __type_traits/make_unsigned.h __type_traits/maybe_const.h __type_traits/nat.h diff --git a/libcxx/include/__algorithm/comp.h b/libcxx/include/__algorithm/comp.h index ab3c598418828..38e2fb9f5e744 100644 --- a/libcxx/include/__algorithm/comp.h +++ b/libcxx/include/__algorithm/comp.h @@ -11,6 +11,7 @@ #include <__config> #include <__type_traits/desugars_to.h> +#include <__type_traits/is_generic_transparent_comparator.h> #include <__type_traits/is_integral.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -48,6 +49,9 @@ inline const bool __desugars_to_v<__less_tag, __less<>, _Tp, _Tp> = true; template inline const bool __desugars_to_v<__totally_ordered_less_tag, __less<>, _Tp, _Tp> = is_integral<_Tp>::value; +template <> +inline const bool __is_generic_transparent_comparator_v<__less<> > = true; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___ALGORITHM_COMP_H diff --git a/libcxx/include/__algorithm/find.h b/libcxx/include/__algorithm/find.h index 8c8cb5820fee3..72e201a3c693b 100644 --- a/libcxx/include/__algorithm/find.h +++ b/libcxx/include/__algorithm/find.h @@ -12,6 +12,7 @@ #include <__algorithm/find_segment_if.h> #include <__algorithm/min.h> +#include <__algorithm/simd_utils.h> #include <__algorithm/unwrap_iter.h> #include <__bit/countr.h> #include <__bit/invert_if.h> @@ -44,39 +45,102 @@ _LIBCPP_BEGIN_NAMESPACE_STD // generic implementation template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter -__find(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { +__find_loop(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { for (; __first != __last; ++__first) if (std::__invoke(__proj, *__first) == __value) break; return __first; } -// trivially equality comparable implementations -template ::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value && - sizeof(_Tp) == 1, - int> = 0> -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { - if (auto __ret = std::__constexpr_memchr(__first, __value, __last - __first)) - return __ret; - return __last; +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter +__find(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { + return std::__find_loop(std::move(__first), std::move(__last), __value, __proj); } -#if _LIBCPP_HAS_WIDE_CHARACTERS -template ::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value && - sizeof(_Tp) == sizeof(wchar_t) && _LIBCPP_ALIGNOF(_Tp) >= _LIBCPP_ALIGNOF(wchar_t), - int> = 0> +#if _LIBCPP_VECTORIZE_ALGORITHMS +template +[[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find_vectorized(_Tp* __first, _Tp* __last, _Up __value) { + if (!__libcpp_is_constant_evaluated()) { + constexpr size_t __unroll_count = 4; + constexpr size_t __vec_size = __native_vector_size<_Tp>; + using __vec = __simd_vector<_Tp, __vec_size>; + + auto __orig_first = __first; + + auto __values = static_cast<__simd_vector<_Tp, __vec_size>>(__value); // broadcast the value + while (static_cast(__last - __first) >= __unroll_count * __vec_size) [[__unlikely__]] { + __vec __lhs[__unroll_count]; + + for (size_t __i = 0; __i != __unroll_count; ++__i) + __lhs[__i] = std::__load_vector<__vec>(__first + __i * __vec_size); + + for (size_t __i = 0; __i != __unroll_count; ++__i) { + if (auto __cmp_res = __lhs[__i] == __values; std::__any_of(__cmp_res)) { + auto __offset = __i * __vec_size + std::__find_first_set(__cmp_res); + return __first + __offset; + } + } + + __first += __unroll_count * __vec_size; + } + + // check the remaining 0-3 vectors + while (static_cast(__last - __first) >= __vec_size) { + if (auto __cmp_res = std::__load_vector<__vec>(__first) == __values; std::__any_of(__cmp_res)) { + return __first + std::__find_first_set(__cmp_res); + } + __first += __vec_size; + } + + if (__last - __first == 0) + return __first; + + // Check if we can load elements in front of the current pointer. If that's the case load a vector at + // (last - vector_size) to check the remaining elements + if (static_cast(__first - __orig_first) >= __vec_size) { + __first = __last - __vec_size; + return __first + std::__find_first_set(std::__load_vector<__vec>(__first) == __values); + } + } + + __identity __proj; + return std::__find_loop(__first, __last, __value, __proj); +} +#endif + +#ifndef _LIBCPP_CXX03_LANG +// trivially equality comparable implementations +template < + class _Tp, + class _Up, + class _Proj, + __enable_if_t<__is_identity<_Proj>::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { - if (auto __ret = std::__constexpr_wmemchr(__first, __value, __last - __first)) - return __ret; - return __last; + if constexpr (sizeof(_Tp) == 1) { + if (auto __ret = std::__constexpr_memchr(__first, __value, __last - __first)) + return __ret; + return __last; + } +# if _LIBCPP_HAS_WIDE_CHARACTERS + else if constexpr (sizeof(_Tp) == sizeof(wchar_t) && _LIBCPP_ALIGNOF(_Tp) >= _LIBCPP_ALIGNOF(wchar_t)) { + if (auto __ret = std::__constexpr_wmemchr(__first, __value, __last - __first)) + return __ret; + return __last; + } +# endif +# if _LIBCPP_VECTORIZE_ALGORITHMS + else if constexpr (is_integral<_Tp>::value) { + return std::__find_vectorized(__first, __last, __value); + } +# endif + else { + __identity __proj; + return std::__find_loop(__first, __last, __value, __proj); + } } -#endif // _LIBCPP_HAS_WIDE_CHARACTERS +#endif // TODO: This should also be possible to get right with different signedness // cast integral types to allow vectorization diff --git a/libcxx/include/__algorithm/make_heap.h b/libcxx/include/__algorithm/make_heap.h index 8cfeda2b59811..8aff8ce588568 100644 --- a/libcxx/include/__algorithm/make_heap.h +++ b/libcxx/include/__algorithm/make_heap.h @@ -36,7 +36,7 @@ __make_heap(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compar using __diff_t = __iter_diff_t<_RandomAccessIterator>; const __diff_t __n = __last - __first; - static const bool __assume_both_children = is_arithmetic<__iter_value_type<_RandomAccessIterator> >::value; + const bool __assume_both_children = is_arithmetic<__iter_value_type<_RandomAccessIterator> >::value; // While it would be correct to always assume we have both children, in practice we observed this to be a performance // improvement only for arithmetic types. diff --git a/libcxx/include/__algorithm/simd_utils.h b/libcxx/include/__algorithm/simd_utils.h index 96b074c063a5d..aaeb8a881df18 100644 --- a/libcxx/include/__algorithm/simd_utils.h +++ b/libcxx/include/__algorithm/simd_utils.h @@ -114,6 +114,11 @@ template }(make_index_sequence<__simd_vector_size_v<_VecT>>{}); } +template +[[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool __any_of(__simd_vector<_Tp, _Np> __vec) noexcept { + return __builtin_reduce_or(__builtin_convertvector(__vec, __simd_vector)); +} + template [[__nodiscard__]] _LIBCPP_HIDE_FROM_ABI bool __all_of(__simd_vector<_Tp, _Np> __vec) noexcept { return __builtin_reduce_and(__builtin_convertvector(__vec, __simd_vector)); diff --git a/libcxx/include/__atomic/atomic.h b/libcxx/include/__atomic/atomic.h index 3554ff5169954..b424427e65c33 100644 --- a/libcxx/include/__atomic/atomic.h +++ b/libcxx/include/__atomic/atomic.h @@ -11,6 +11,7 @@ #include <__atomic/atomic_sync.h> #include <__atomic/check_memory_order.h> +#include <__atomic/floating_point_helper.h> #include <__atomic/is_always_lock_free.h> #include <__atomic/memory_order.h> #include <__atomic/support.h> @@ -332,41 +333,17 @@ template requires is_floating_point_v<_Tp> struct atomic<_Tp> : __atomic_base<_Tp> { private: - _LIBCPP_HIDE_FROM_ABI static constexpr bool __is_fp80_long_double() { - // Only x87-fp80 long double has 64-bit mantissa - return __LDBL_MANT_DIG__ == 64 && std::is_same_v<_Tp, long double>; - } - - _LIBCPP_HIDE_FROM_ABI static constexpr bool __has_rmw_builtin() { -# ifndef _LIBCPP_COMPILER_CLANG_BASED - return false; -# else - // The builtin __cxx_atomic_fetch_add errors during compilation for - // long double on platforms with fp80 format. - // For more details, see - // lib/Sema/SemaChecking.cpp function IsAllowedValueType - // LLVM Parser does not allow atomicrmw with x86_fp80 type. - // if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && - // &Context.getTargetInfo().getLongDoubleFormat() == - // &llvm::APFloat::x87DoubleExtended()) - // For more info - // https://llvm.org/PR68602 - // https://reviews.llvm.org/D53965 - return !__is_fp80_long_double(); -# endif - } - template _LIBCPP_HIDE_FROM_ABI static _Tp __rmw_op(_This&& __self, _Tp __operand, memory_order __m, _Operation __operation, _BuiltinOp __builtin_op) { - if constexpr (__has_rmw_builtin()) { + if constexpr (std::__has_rmw_builtin<_Tp>()) { return __builtin_op(std::addressof(std::forward<_This>(__self).__a_), __operand, __m); } else { _Tp __old = __self.load(memory_order_relaxed); _Tp __new = __operation(__old, __operand); while (!__self.compare_exchange_weak(__old, __new, __m, memory_order_relaxed)) { # ifdef _LIBCPP_COMPILER_CLANG_BASED - if constexpr (__is_fp80_long_double()) { + if constexpr (std::__is_fp80_long_double<_Tp>()) { // https://llvm.org/PR47978 // clang bug: __old is not updated on failure for atomic::compare_exchange_weak // Note __old = __self.load(memory_order_relaxed) will not work diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h index b5493662c518e..9bdc6b1160d2c 100644 --- a/libcxx/include/__atomic/atomic_ref.h +++ b/libcxx/include/__atomic/atomic_ref.h @@ -20,6 +20,7 @@ #include <__assert> #include <__atomic/atomic_sync.h> #include <__atomic/check_memory_order.h> +#include <__atomic/floating_point_helper.h> #include <__atomic/memory_order.h> #include <__atomic/to_gcc_order.h> #include <__concepts/arithmetic.h> @@ -322,20 +323,28 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { atomic_ref& operator=(const atomic_ref&) = delete; _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { - _Tp __old = this->load(memory_order_relaxed); - _Tp __new = __old + __arg; - while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { - __new = __old + __arg; + if constexpr (std::__has_rmw_builtin<_Tp>()) { + return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } else { + _Tp __old = this->load(memory_order_relaxed); + _Tp __new = __old + __arg; + while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { + __new = __old + __arg; + } + return __old; } - return __old; } _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { - _Tp __old = this->load(memory_order_relaxed); - _Tp __new = __old - __arg; - while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { - __new = __old - __arg; + if constexpr (std::__has_rmw_builtin<_Tp>()) { + return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } else { + _Tp __old = this->load(memory_order_relaxed); + _Tp __new = __old - __arg; + while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { + __new = __old - __arg; + } + return __old; } - return __old; } _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } diff --git a/libcxx/include/__atomic/floating_point_helper.h b/libcxx/include/__atomic/floating_point_helper.h new file mode 100644 index 0000000000000..8762ec234b189 --- /dev/null +++ b/libcxx/include/__atomic/floating_point_helper.h @@ -0,0 +1,55 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ATOMIC_FLOATING_POINT_HELPER_H +#define _LIBCPP___ATOMIC_FLOATING_POINT_HELPER_H + +#include <__config> +#include <__type_traits/is_floating_point.h> +#include <__type_traits/is_same.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool __is_fp80_long_double() { + // Only x87-fp80 long double has 64-bit mantissa + return __LDBL_MANT_DIG__ == 64 && std::is_same_v<_Tp, long double>; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool __has_rmw_builtin() { + static_assert(std::is_floating_point_v<_Tp>); +# ifndef _LIBCPP_COMPILER_CLANG_BASED + return false; +# else + // The builtin __cxx_atomic_fetch_add errors during compilation for + // long double on platforms with fp80 format. + // For more details, see + // lib/Sema/SemaChecking.cpp function IsAllowedValueType + // LLVM Parser does not allow atomicrmw with x86_fp80 type. + // if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && + // &Context.getTargetInfo().getLongDoubleFormat() == + // &llvm::APFloat::x87DoubleExtended()) + // For more info + // https://llvm.org/PR68602 + // https://reviews.llvm.org/D53965 + return !std::__is_fp80_long_double<_Tp>(); +# endif +} + +#endif + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___ATOMIC_FLOATING_POINT_HELPER_H diff --git a/libcxx/include/__bit/countl.h b/libcxx/include/__bit/countl.h index 075914020879a..29b01277fb0eb 100644 --- a/libcxx/include/__bit/countl.h +++ b/libcxx/include/__bit/countl.h @@ -37,7 +37,7 @@ template <__unsigned_integer _Tp> template <__unsigned_integer _Tp> [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countl_one(_Tp __t) noexcept { - return __t != numeric_limits<_Tp>::max() ? std::countl_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; + return std::countl_zero(static_cast<_Tp>(~__t)); } #endif // _LIBCPP_STD_VER >= 20 diff --git a/libcxx/include/__bit/countr.h b/libcxx/include/__bit/countr.h index f6c98695d3d06..4de887ad4f67c 100644 --- a/libcxx/include/__bit/countr.h +++ b/libcxx/include/__bit/countr.h @@ -37,7 +37,7 @@ template <__unsigned_integer _Tp> template <__unsigned_integer _Tp> [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept { - return __t != numeric_limits<_Tp>::max() ? std::countr_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; + return std::countr_zero(static_cast<_Tp>(~__t)); } #endif // _LIBCPP_STD_VER >= 20 diff --git a/libcxx/include/__bit/has_single_bit.h b/libcxx/include/__bit/has_single_bit.h index b43e69323e77b..d10ab7d6c1791 100644 --- a/libcxx/include/__bit/has_single_bit.h +++ b/libcxx/include/__bit/has_single_bit.h @@ -25,7 +25,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template <__unsigned_integer _Tp> [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool has_single_bit(_Tp __t) noexcept { - return __t != 0 && (((__t & (__t - 1)) == 0)); + return __t != 0 && ((__t & (__t - 1)) == 0); } _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__bit/rotate.h b/libcxx/include/__bit/rotate.h index c6f34bdaf6e63..fde9058887779 100644 --- a/libcxx/include/__bit/rotate.h +++ b/libcxx/include/__bit/rotate.h @@ -22,46 +22,35 @@ _LIBCPP_BEGIN_NAMESPACE_STD // Writing two full functions for rotl and rotr makes it easier for the compiler // to optimize the code. On x86 this function becomes the ROL instruction and // the rotr function becomes the ROR instruction. -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotl(_Tp __x, int __s) _NOEXCEPT { - static_assert(__is_unsigned_integer_v<_Tp>, "__rotl requires an unsigned integer type"); + +#if _LIBCPP_STD_VER >= 20 + +template <__unsigned_integer _Tp> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotl(_Tp __t, int __cnt) noexcept { const int __n = numeric_limits<_Tp>::digits; - int __r = __s % __n; + int __r = __cnt % __n; if (__r == 0) - return __x; + return __t; if (__r > 0) - return (__x << __r) | (__x >> (__n - __r)); + return (__t << __r) | (__t >> (__n - __r)); - return (__x >> -__r) | (__x << (__n + __r)); + return (__t >> -__r) | (__t << (__n + __r)); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotr(_Tp __x, int __s) _NOEXCEPT { - static_assert(__is_unsigned_integer_v<_Tp>, "__rotr requires an unsigned integer type"); +template <__unsigned_integer _Tp> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotr(_Tp __t, int __cnt) noexcept { const int __n = numeric_limits<_Tp>::digits; - int __r = __s % __n; + int __r = __cnt % __n; if (__r == 0) - return __x; + return __t; if (__r > 0) - return (__x >> __r) | (__x << (__n - __r)); - - return (__x << -__r) | (__x >> (__n + __r)); -} + return (__t >> __r) | (__t << (__n - __r)); -#if _LIBCPP_STD_VER >= 20 - -template <__unsigned_integer _Tp> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotl(_Tp __t, int __cnt) noexcept { - return std::__rotl(__t, __cnt); -} - -template <__unsigned_integer _Tp> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotr(_Tp __t, int __cnt) noexcept { - return std::__rotr(__t, __cnt); + return (__t << -__r) | (__t >> (__n + __r)); } #endif // _LIBCPP_STD_VER >= 20 diff --git a/libcxx/include/__chrono/time_point.h b/libcxx/include/__chrono/time_point.h index fc4408d23dbf1..bc2c7798a630b 100644 --- a/libcxx/include/__chrono/time_point.h +++ b/libcxx/include/__chrono/time_point.h @@ -95,7 +95,7 @@ struct common_type, chrono::time_point<_C namespace chrono { -template +template , int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, _ToDuration> time_point_cast(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>(chrono::duration_cast<_ToDuration>(__t.time_since_epoch())); diff --git a/libcxx/include/__cxx03/__chrono/time_point.h b/libcxx/include/__cxx03/__chrono/time_point.h index 8ec687d837717..7f2a73af839ca 100644 --- a/libcxx/include/__cxx03/__chrono/time_point.h +++ b/libcxx/include/__cxx03/__chrono/time_point.h @@ -81,7 +81,7 @@ common_type, chrono::time_point<_Clock, _ namespace chrono { -template +template ::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI time_point<_Clock, _ToDuration> time_point_cast(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>(chrono::duration_cast<_ToDuration>(__t.time_since_epoch())); } diff --git a/libcxx/include/__cxx03/__locale b/libcxx/include/__cxx03/__locale index 86160bcdcbd59..70dd1e65cfba9 100644 --- a/libcxx/include/__cxx03/__locale +++ b/libcxx/include/__cxx03/__locale @@ -578,7 +578,7 @@ public: #ifdef _CACHED_RUNES static const size_t table_size = _CACHED_RUNES; #else - static const size_t table_size = 256; // FIXME: Don't hardcode this. + static const size_t table_size = 256; #endif _LIBCPP_HIDE_FROM_ABI const mask* table() const _NOEXCEPT { return __tab_; } static const mask* classic_table() _NOEXCEPT; diff --git a/libcxx/include/__cxx03/__math/traits.h b/libcxx/include/__cxx03/__math/traits.h index 0d27680d579a4..250e88beb7d70 100644 --- a/libcxx/include/__cxx03/__math/traits.h +++ b/libcxx/include/__cxx03/__math/traits.h @@ -12,7 +12,6 @@ #include <__cxx03/__config> #include <__cxx03/__type_traits/enable_if.h> #include <__cxx03/__type_traits/is_arithmetic.h> -#include <__cxx03/__type_traits/is_floating_point.h> #include <__cxx03/__type_traits/is_integral.h> #include <__cxx03/__type_traits/is_signed.h> #include <__cxx03/__type_traits/promote.h> @@ -28,8 +27,21 @@ namespace __math { // signbit -template ::value, int> = 0> -_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool signbit(_A1 __x) _NOEXCEPT { +// The universal C runtime (UCRT) in the WinSDK provides floating point overloads +// for std::signbit(). By defining our overloads as templates, we can work around +// this issue as templates are less preferred than non-template functions. +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool signbit(float __x) _NOEXCEPT { + return __builtin_signbit(__x); +} + +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool signbit(double __x) _NOEXCEPT { + return __builtin_signbit(__x); +} + +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool signbit(long double __x) _NOEXCEPT { return __builtin_signbit(__x); } @@ -109,16 +121,19 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool isnan(long double __x) _NOEX // isnormal -template ::value, int> = 0> -_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool isnormal(_A1 __x) _NOEXCEPT { - return __builtin_isnormal(__x); -} - template ::value, int> = 0> _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool isnormal(_A1 __x) _NOEXCEPT { return __x != 0; } +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool isnormal(float __x) _NOEXCEPT { return __builtin_isnormal(__x); } + +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool isnormal(double __x) _NOEXCEPT { return __builtin_isnormal(__x); } + +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI bool isnormal(long double __x) _NOEXCEPT { + return __builtin_isnormal(__x); +} + // isgreater template ::value && is_arithmetic<_A2>::value, int> = 0> diff --git a/libcxx/include/__cxx03/locale b/libcxx/include/__cxx03/locale index 57267ab390227..79cd50e0e2419 100644 --- a/libcxx/include/__cxx03/locale +++ b/libcxx/include/__cxx03/locale @@ -2030,10 +2030,6 @@ protected: _LIBCPP_HIDE_FROM_ABI ~__time_get_storage() {} time_base::dateorder __do_date_order() const; - -private: - void init(const ctype<_CharT>&); - string_type __analyze(char __fmt, const ctype<_CharT>&); }; # define _LIBCPP_TIME_GET_STORAGE_EXPLICIT_INSTANTIATION(_CharT) \ @@ -2043,19 +2039,10 @@ private: _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const char*); \ template <> \ _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const string&); \ - template <> \ - _LIBCPP_EXPORTED_FROM_ABI void __time_get_storage<_CharT>::init(const ctype<_CharT>&); \ - template <> \ - _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::string_type __time_get_storage<_CharT>::__analyze( \ - char, const ctype<_CharT>&); \ extern template _LIBCPP_EXPORTED_FROM_ABI time_base::dateorder __time_get_storage<_CharT>::__do_date_order() \ const; \ extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const char*); \ - extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const string&); \ - extern template _LIBCPP_EXPORTED_FROM_ABI void __time_get_storage<_CharT>::init(const ctype<_CharT>&); \ - extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::string_type \ - __time_get_storage<_CharT>::__analyze(char, const ctype<_CharT>&); \ - /**/ + extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const string&); _LIBCPP_TIME_GET_STORAGE_EXPLICIT_INSTANTIATION(char) # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS diff --git a/libcxx/include/__cxx03/vector b/libcxx/include/__cxx03/vector index 8192ffc1a0dae..4b62e0bf33c46 100644 --- a/libcxx/include/__cxx03/vector +++ b/libcxx/include/__cxx03/vector @@ -1891,7 +1891,7 @@ vector::__recommend(size_type __new_size) const { const size_type __cap = capacity(); if (__cap >= __ms / 2) return __ms; - return std::max(2 * __cap, __align_it(__new_size)); + return std::max(2 * __cap, __align_it(__new_size)); } // Default constructs __n objects starting at __end_ diff --git a/libcxx/include/__expected/expected.h b/libcxx/include/__expected/expected.h index 38a34121040f6..8b3eeebd38ae7 100644 --- a/libcxx/include/__expected/expected.h +++ b/libcxx/include/__expected/expected.h @@ -555,7 +555,7 @@ class expected : private __expected_base<_Tp, _Err> { is_nothrow_constructible_v<_Tp, _Up> && is_nothrow_constructible_v<_Err, _OtherErr>) // strengthened : __base(__other.__has_val(), std::move(__other.__union())) {} - template + template > requires(!is_same_v, in_place_t> && !is_same_v> && !is_same_v, unexpect_t> && is_constructible_v<_Tp, _Up> && !__is_std_unexpected>::value && @@ -669,7 +669,7 @@ class expected : private __expected_base<_Tp, _Err> { return *this; } - template + template > _LIBCPP_HIDE_FROM_ABI constexpr expected& operator=(_Up&& __v) requires(!is_same_v> && !__is_std_unexpected>::value && is_constructible_v<_Tp, _Up> && is_assignable_v<_Tp&, _Up> && @@ -887,14 +887,14 @@ class expected : private __expected_base<_Tp, _Err> { return std::move(this->__unex()); } - template + template > _LIBCPP_HIDE_FROM_ABI constexpr _Tp value_or(_Up&& __v) const& { static_assert(is_copy_constructible_v<_Tp>, "value_type has to be copy constructible"); static_assert(is_convertible_v<_Up, _Tp>, "argument has to be convertible to value_type"); return this->__has_val() ? this->__val() : static_cast<_Tp>(std::forward<_Up>(__v)); } - template + template > _LIBCPP_HIDE_FROM_ABI constexpr _Tp value_or(_Up&& __v) && { static_assert(is_move_constructible_v<_Tp>, "value_type has to be move constructible"); static_assert(is_convertible_v<_Up, _Tp>, "argument has to be convertible to value_type"); diff --git a/libcxx/include/__flat_map/flat_map.h b/libcxx/include/__flat_map/flat_map.h index bf193f6d3c62f..7bb235ba76503 100644 --- a/libcxx/include/__flat_map/flat_map.h +++ b/libcxx/include/__flat_map/flat_map.h @@ -29,7 +29,6 @@ #include <__flat_map/key_value_iterator.h> #include <__flat_map/sorted_unique.h> #include <__flat_map/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/memory.h> @@ -48,7 +47,6 @@ #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> #include <__ranges/subrange.h> #include <__ranges/zip_view.h> @@ -1125,8 +1123,7 @@ class flat_map { }; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && is_invocable_v) @@ -1139,7 +1136,7 @@ flat_map(_KeyContainer, _MappedContainer, _Compare = _Compare()) template requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && - !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value) + !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>) flat_map(_KeyContainer, _MappedContainer, _Allocator) -> flat_map; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> && - uses_allocator_v<_MappedContainer, _Allocator> && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && + uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && is_invocable_v) @@ -1162,8 +1158,7 @@ flat_map(_KeyContainer, _MappedContainer, _Compare, _Allocator) _MappedContainer>; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && is_invocable_v) @@ -1176,7 +1171,7 @@ flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Compare = _Compare() template requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && - !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value) + !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>) flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Allocator) -> flat_map; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> && - uses_allocator_v<_MappedContainer, _Allocator> && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && + uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && is_invocable_v) @@ -1199,19 +1193,19 @@ flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Compare, _Allocator) _MappedContainer>; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_map(_InputIterator, _InputIterator, _Compare = _Compare()) -> flat_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_map(sorted_unique_t, _InputIterator, _InputIterator, _Compare = _Compare()) -> flat_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>; template >, class _Allocator = allocator, - class = __enable_if_t::value && __is_allocator<_Allocator>::value>> + class = __enable_if_t && __is_allocator_v<_Allocator>>> flat_map(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_map< __range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1219,7 +1213,7 @@ flat_map(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator( vector<__range_key_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_key_type<_Range>>>, vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>; -template ::value>> +template >> flat_map(from_range_t, _Range&&, _Allocator) -> flat_map< __range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1228,11 +1222,11 @@ flat_map(from_range_t, _Range&&, _Allocator) -> flat_map< vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_map(initializer_list>, _Compare = _Compare()) -> flat_map<_Key, _Tp, _Compare>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_map(sorted_unique_t, initializer_list>, _Compare = _Compare()) -> flat_map<_Key, _Tp, _Compare>; template diff --git a/libcxx/include/__flat_map/flat_multimap.h b/libcxx/include/__flat_map/flat_multimap.h index 260d93ed25785..96d945405cffe 100644 --- a/libcxx/include/__flat_map/flat_multimap.h +++ b/libcxx/include/__flat_map/flat_multimap.h @@ -22,7 +22,6 @@ #include <__algorithm/upper_bound.h> #include <__assert> #include <__compare/synth_three_way.h> -#include <__concepts/convertible_to.h> #include <__concepts/swappable.h> #include <__config> #include <__cstddef/byte.h> @@ -30,7 +29,6 @@ #include <__flat_map/key_value_iterator.h> #include <__flat_map/sorted_equivalent.h> #include <__flat_map/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/vector.h> @@ -47,7 +45,6 @@ #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> #include <__ranges/subrange.h> #include <__ranges/zip_view.h> @@ -57,14 +54,12 @@ #include <__type_traits/is_allocator.h> #include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_same.h> -#include <__type_traits/maybe_const.h> #include <__utility/exception_guard.h> #include <__utility/move.h> #include <__utility/pair.h> #include <__utility/scope_guard.h> #include <__vector/vector.h> #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -928,8 +923,7 @@ class flat_multimap { }; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && is_invocable_v) @@ -942,7 +936,7 @@ flat_multimap(_KeyContainer, _MappedContainer, _Compare = _Compare()) template requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && - !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value) + !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>) flat_multimap(_KeyContainer, _MappedContainer, _Allocator) -> flat_multimap; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> && - uses_allocator_v<_MappedContainer, _Allocator> && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && + uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && is_invocable_v) @@ -965,8 +958,7 @@ flat_multimap(_KeyContainer, _MappedContainer, _Compare, _Allocator) _MappedContainer>; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && is_invocable_v) @@ -979,7 +971,7 @@ flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Compare = _ template requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && - !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value) + !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>) flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Allocator) -> flat_multimap; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && - !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> && - uses_allocator_v<_MappedContainer, _Allocator> && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> && + uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> && is_invocable_v) @@ -1002,19 +993,19 @@ flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Compare, _A _MappedContainer>; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_multimap(_InputIterator, _InputIterator, _Compare = _Compare()) -> flat_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_multimap(sorted_equivalent_t, _InputIterator, _InputIterator, _Compare = _Compare()) -> flat_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>; template >, class _Allocator = allocator, - class = __enable_if_t::value && __is_allocator<_Allocator>::value>> + class = __enable_if_t && __is_allocator_v<_Allocator>>> flat_multimap(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_multimap< __range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1022,7 +1013,7 @@ flat_multimap(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Alloc vector<__range_key_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_key_type<_Range>>>, vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>; -template ::value>> +template >> flat_multimap(from_range_t, _Range&&, _Allocator) -> flat_multimap< __range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1031,11 +1022,11 @@ flat_multimap(from_range_t, _Range&&, _Allocator) -> flat_multimap< vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_multimap(initializer_list>, _Compare = _Compare()) -> flat_multimap<_Key, _Tp, _Compare>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_multimap(sorted_equivalent_t, initializer_list>, _Compare = _Compare()) -> flat_multimap<_Key, _Tp, _Compare>; diff --git a/libcxx/include/__flat_map/key_value_iterator.h b/libcxx/include/__flat_map/key_value_iterator.h index d04a23d1f8606..795651a07937b 100644 --- a/libcxx/include/__flat_map/key_value_iterator.h +++ b/libcxx/include/__flat_map/key_value_iterator.h @@ -20,7 +20,6 @@ #include <__type_traits/conditional.h> #include <__utility/forward.h> #include <__utility/move.h> -#include <__utility/pair.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/libcxx/include/__flat_set/flat_multiset.h b/libcxx/include/__flat_set/flat_multiset.h index 44d8af05a56af..b1a4917659c49 100644 --- a/libcxx/include/__flat_set/flat_multiset.h +++ b/libcxx/include/__flat_set/flat_multiset.h @@ -13,54 +13,40 @@ #include <__algorithm/equal_range.h> #include <__algorithm/lexicographical_compare_three_way.h> #include <__algorithm/lower_bound.h> -#include <__algorithm/min.h> #include <__algorithm/ranges_equal.h> #include <__algorithm/ranges_inplace_merge.h> #include <__algorithm/ranges_is_sorted.h> #include <__algorithm/ranges_sort.h> -#include <__algorithm/ranges_unique.h> #include <__algorithm/remove_if.h> #include <__algorithm/upper_bound.h> #include <__assert> #include <__compare/synth_three_way.h> -#include <__concepts/convertible_to.h> #include <__concepts/swappable.h> #include <__config> -#include <__cstddef/byte.h> -#include <__cstddef/ptrdiff_t.h> -#include <__flat_map/key_value_iterator.h> #include <__flat_map/sorted_equivalent.h> #include <__flat_set/ra_iterator.h> #include <__flat_set/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/vector.h> #include <__iterator/concepts.h> -#include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__iterator/prev.h> -#include <__iterator/ranges_iterator_traits.h> #include <__iterator/reverse_iterator.h> #include <__memory/allocator_traits.h> #include <__memory/uses_allocator.h> #include <__memory/uses_allocator_construction.h> -#include <__ranges/access.h> #include <__ranges/concepts.h> #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> #include <__ranges/subrange.h> -#include <__ranges/zip_view.h> -#include <__type_traits/conjunction.h> #include <__type_traits/container_traits.h> #include <__type_traits/invoke.h> #include <__type_traits/is_allocator.h> #include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_same.h> -#include <__type_traits/maybe_const.h> #include <__utility/as_const.h> #include <__utility/exception_guard.h> #include <__utility/move.h> @@ -689,7 +675,7 @@ class flat_multiset { }; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && is_invocable_v) @@ -697,12 +683,12 @@ flat_multiset(_KeyContainer, _Compare = _Compare()) -> flat_multiset; template - requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value) + requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>) flat_multiset(_KeyContainer, _Allocator) -> flat_multiset, _KeyContainer>; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && uses_allocator_v<_KeyContainer, _Allocator> && is_invocable_v flat_multiset; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && is_invocable_v) @@ -719,12 +705,12 @@ flat_multiset(sorted_equivalent_t, _KeyContainer, _Compare = _Compare()) -> flat_multiset; template - requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value) + requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>) flat_multiset(sorted_equivalent_t, _KeyContainer, _Allocator) -> flat_multiset, _KeyContainer>; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && uses_allocator_v<_KeyContainer, _Allocator> && is_invocable_v flat_multiset; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_multiset(_InputIterator, _InputIterator, _Compare = _Compare()) -> flat_multiset<__iter_value_type<_InputIterator>, _Compare>; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_multiset(sorted_equivalent_t, _InputIterator, _InputIterator, _Compare = _Compare()) -> flat_multiset<__iter_value_type<_InputIterator>, _Compare>; template >, class _Allocator = allocator>, - class = __enable_if_t::value && __is_allocator<_Allocator>::value>> + class = __enable_if_t && __is_allocator_v<_Allocator>>> flat_multiset(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_multiset< ranges::range_value_t<_Range>, _Compare, vector, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>; -template ::value>> +template >> flat_multiset(from_range_t, _Range&&, _Allocator) -> flat_multiset< ranges::range_value_t<_Range>, less>, vector, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_multiset(initializer_list<_Key>, _Compare = _Compare()) -> flat_multiset<_Key, _Compare>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_multiset(sorted_equivalent_t, initializer_list<_Key>, _Compare = _Compare()) -> flat_multiset<_Key, _Compare>; template diff --git a/libcxx/include/__flat_set/flat_set.h b/libcxx/include/__flat_set/flat_set.h index 95cb998459bc8..5fa1f2d8acb9b 100644 --- a/libcxx/include/__flat_set/flat_set.h +++ b/libcxx/include/__flat_set/flat_set.h @@ -12,7 +12,6 @@ #include <__algorithm/lexicographical_compare_three_way.h> #include <__algorithm/lower_bound.h> -#include <__algorithm/min.h> #include <__algorithm/ranges_adjacent_find.h> #include <__algorithm/ranges_equal.h> #include <__algorithm/ranges_inplace_merge.h> @@ -24,20 +23,16 @@ #include <__compare/synth_three_way.h> #include <__concepts/swappable.h> #include <__config> -#include <__cstddef/ptrdiff_t.h> #include <__flat_map/sorted_unique.h> #include <__flat_set/ra_iterator.h> #include <__flat_set/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/vector.h> #include <__iterator/concepts.h> -#include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__iterator/next.h> #include <__iterator/prev.h> -#include <__iterator/ranges_iterator_traits.h> #include <__iterator/reverse_iterator.h> #include <__memory/allocator_traits.h> #include <__memory/uses_allocator.h> @@ -47,10 +42,7 @@ #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> -#include <__ranges/subrange.h> -#include <__type_traits/conjunction.h> #include <__type_traits/container_traits.h> #include <__type_traits/invoke.h> #include <__type_traits/is_allocator.h> @@ -774,19 +766,19 @@ class flat_set { }; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && is_invocable_v) flat_set(_KeyContainer, _Compare = _Compare()) -> flat_set; template - requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value) + requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>) flat_set(_KeyContainer, _Allocator) -> flat_set, _KeyContainer>; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && uses_allocator_v<_KeyContainer, _Allocator> && is_invocable_v flat_set(_KeyContainer, _Compare, _Allocator) -> flat_set; template > - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && is_invocable_v) @@ -802,12 +794,12 @@ flat_set(sorted_unique_t, _KeyContainer, _Compare = _Compare()) -> flat_set; template - requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value) + requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>) flat_set(sorted_unique_t, _KeyContainer, _Allocator) -> flat_set, _KeyContainer>; template - requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value && + requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && uses_allocator_v<_KeyContainer, _Allocator> && is_invocable_v flat_set; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_set(_InputIterator, _InputIterator, _Compare = _Compare()) -> flat_set<__iter_value_type<_InputIterator>, _Compare>; template >> - requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value) + requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>) flat_set(sorted_unique_t, _InputIterator, _InputIterator, _Compare = _Compare()) -> flat_set<__iter_value_type<_InputIterator>, _Compare>; template >, class _Allocator = allocator>, - class = __enable_if_t::value && __is_allocator<_Allocator>::value>> + class = __enable_if_t && __is_allocator_v<_Allocator>>> flat_set(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_set< ranges::range_value_t<_Range>, _Compare, vector, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>; -template ::value>> +template >> flat_set(from_range_t, _Range&&, _Allocator) -> flat_set< ranges::range_value_t<_Range>, less>, vector, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_set(initializer_list<_Key>, _Compare = _Compare()) -> flat_set<_Key, _Compare>; template > - requires(!__is_allocator<_Compare>::value) + requires(!__is_allocator_v<_Compare>) flat_set(sorted_unique_t, initializer_list<_Key>, _Compare = _Compare()) -> flat_set<_Key, _Compare>; template diff --git a/libcxx/include/__functional/is_transparent.h b/libcxx/include/__functional/is_transparent.h index 567df1a662f54..c2c6fbce2465b 100644 --- a/libcxx/include/__functional/is_transparent.h +++ b/libcxx/include/__functional/is_transparent.h @@ -29,6 +29,14 @@ inline const bool __is_transparent_v<_Tp, _Key, __void_t(arg))`. +// +// This is different from `__is_transparent_v`, which is only a property of the comparator and doesn't provide +// additional semantic guarantees. +template +inline const bool __is_transparently_comparable_v = false; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___FUNCTIONAL_IS_TRANSPARENT diff --git a/libcxx/include/__functional/operations.h b/libcxx/include/__functional/operations.h index 7b0ea11db5844..7f315ca851c08 100644 --- a/libcxx/include/__functional/operations.h +++ b/libcxx/include/__functional/operations.h @@ -15,7 +15,9 @@ #include <__functional/unary_function.h> #include <__fwd/functional.h> #include <__type_traits/desugars_to.h> +#include <__type_traits/is_generic_transparent_comparator.h> #include <__type_traits/is_integral.h> +#include <__type_traits/make_transparent.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -377,6 +379,14 @@ struct less { typedef void is_transparent; }; +template +struct __make_transparent > { + using type _LIBCPP_NODEBUG = less<>; +}; + +template <> +inline const bool __is_generic_transparent_comparator_v> = true; + template inline const bool __desugars_to_v<__less_tag, less<>, _Tp, _Up> = true; @@ -466,6 +476,14 @@ struct greater { template inline const bool __desugars_to_v<__greater_tag, greater<>, _Tp, _Up> = true; + +template +struct __make_transparent> { + using type _LIBCPP_NODEBUG = greater<>; +}; + +template <> +inline const bool __is_generic_transparent_comparator_v> = true; #endif // Logical operations diff --git a/libcxx/include/__functional/ranges_operations.h b/libcxx/include/__functional/ranges_operations.h index df95843e7c9af..dc9da061af264 100644 --- a/libcxx/include/__functional/ranges_operations.h +++ b/libcxx/include/__functional/ranges_operations.h @@ -14,6 +14,7 @@ #include <__concepts/totally_ordered.h> #include <__config> #include <__type_traits/desugars_to.h> +#include <__type_traits/is_generic_transparent_comparator.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -108,6 +109,12 @@ inline const bool __desugars_to_v<__less_tag, ranges::less, _Tp, _Up> = true; template inline const bool __desugars_to_v<__greater_tag, ranges::greater, _Tp, _Up> = true; +template <> +inline const bool __is_generic_transparent_comparator_v = true; + +template <> +inline const bool __is_generic_transparent_comparator_v = true; + #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__locale b/libcxx/include/__locale index 757a53951f66e..eb7b7786208e8 100644 --- a/libcxx/include/__locale +++ b/libcxx/include/__locale @@ -585,7 +585,7 @@ public: # ifdef _CACHED_RUNES static const size_t table_size = _CACHED_RUNES; # else - static const size_t table_size = 256; // FIXME: Don't hardcode this. + static const size_t table_size = 256; # endif _LIBCPP_HIDE_FROM_ABI const mask* table() const _NOEXCEPT { return __tab_; } static const mask* classic_table() _NOEXCEPT; diff --git a/libcxx/include/__locale_dir/time.h b/libcxx/include/__locale_dir/time.h index 5f60d5f36b349..78698e9651918 100644 --- a/libcxx/include/__locale_dir/time.h +++ b/libcxx/include/__locale_dir/time.h @@ -601,17 +601,13 @@ class __time_get_storage : public __time_get { template <> \ _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const string&); \ template <> \ - _LIBCPP_EXPORTED_FROM_ABI void __time_get_storage<_CharT>::init(const ctype<_CharT>&); \ + void __time_get_storage<_CharT>::init(const ctype<_CharT>&); \ template <> \ - _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::string_type __time_get_storage<_CharT>::__analyze( \ - char, const ctype<_CharT>&); \ + __time_get_storage<_CharT>::string_type __time_get_storage<_CharT>::__analyze(char, const ctype<_CharT>&); \ extern template _LIBCPP_EXPORTED_FROM_ABI time_base::dateorder __time_get_storage<_CharT>::__do_date_order() \ const; \ extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const char*); \ - extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const string&); \ - extern template _LIBCPP_EXPORTED_FROM_ABI void __time_get_storage<_CharT>::init(const ctype<_CharT>&); \ - extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::string_type \ - __time_get_storage<_CharT>::__analyze(char, const ctype<_CharT>&); + extern template _LIBCPP_EXPORTED_FROM_ABI __time_get_storage<_CharT>::__time_get_storage(const string&); _LIBCPP_TIME_GET_STORAGE_EXPLICIT_INSTANTIATION(char) # if _LIBCPP_HAS_WIDE_CHARACTERS diff --git a/libcxx/include/__memory/compressed_pair.h b/libcxx/include/__memory/compressed_pair.h index d9d2720f9c9e4..0388d752ccc8b 100644 --- a/libcxx/include/__memory/compressed_pair.h +++ b/libcxx/include/__memory/compressed_pair.h @@ -28,8 +28,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // understand how it works). // // ================================================================================================================== // -// The first member is aligned to the alignment of the second member to force padding in front of the compressed pair -// in case there are members before it. +// On GCC, the first member is aligned to the alignment of the second member to force padding in front of the compressed +// pair in case there are members before it. // // For example: // (assuming x86-64 linux) @@ -53,6 +53,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD // Furthermore, that alignment must be the same as what was used in the old __compressed_pair layout, so we must // handle reference types specially since alignof(T&) == alignof(T). // See https://llvm.org/PR118559. +// +// On Clang, this is unnecessary, since we use anonymous structs instead, which automatically handle the alignment +// correctly. #ifndef _LIBCPP_ABI_NO_COMPRESSED_PAIR_PADDING @@ -104,8 +107,7 @@ class __compressed_pair_padding<_ToPad, true> {}; # else # define _LIBCPP_COMPRESSED_PAIR(T1, Initializer1, T2, Initializer2) \ struct { \ - _LIBCPP_NO_UNIQUE_ADDRESS \ - __attribute__((__aligned__(::std::__compressed_pair_alignment))) T1 Initializer1; \ + _LIBCPP_NO_UNIQUE_ADDRESS T1 Initializer1; \ _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding _LIBCPP_CONCAT3(__padding1_, __LINE__, _); \ _LIBCPP_NO_UNIQUE_ADDRESS T2 Initializer2; \ _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding _LIBCPP_CONCAT3(__padding2_, __LINE__, _); \ @@ -113,9 +115,7 @@ class __compressed_pair_padding<_ToPad, true> {}; # define _LIBCPP_COMPRESSED_TRIPLE(T1, Initializer1, T2, Initializer2, T3, Initializer3) \ struct { \ - _LIBCPP_NO_UNIQUE_ADDRESS \ - __attribute__((__aligned__(::std::__compressed_pair_alignment), \ - __aligned__(::std::__compressed_pair_alignment))) T1 Initializer1; \ + _LIBCPP_NO_UNIQUE_ADDRESS T1 Initializer1; \ _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding _LIBCPP_CONCAT3(__padding1_, __LINE__, _); \ _LIBCPP_NO_UNIQUE_ADDRESS T2 Initializer2; \ _LIBCPP_NO_UNIQUE_ADDRESS ::std::__compressed_pair_padding _LIBCPP_CONCAT3(__padding2_, __LINE__, _); \ diff --git a/libcxx/include/__memory/shared_count.h b/libcxx/include/__memory/shared_count.h index dad20bcabd7ea..b40d8c9cf77d1 100644 --- a/libcxx/include/__memory/shared_count.h +++ b/libcxx/include/__memory/shared_count.h @@ -22,37 +22,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD // NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively) // should be sufficient for thread safety. // See https://llvm.org/PR22803 -#if (defined(__clang__) && __has_builtin(__atomic_add_fetch) && defined(__ATOMIC_RELAXED) && \ - defined(__ATOMIC_ACQ_REL)) || \ - defined(_LIBCPP_COMPILER_GCC) -# define _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT 1 -#else -# define _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT 0 -#endif - -template -inline _LIBCPP_HIDE_FROM_ABI _ValueType __libcpp_relaxed_load(_ValueType const* __value) { -#if _LIBCPP_HAS_THREADS && defined(__ATOMIC_RELAXED) && \ - (__has_builtin(__atomic_load_n) || defined(_LIBCPP_COMPILER_GCC)) - return __atomic_load_n(__value, __ATOMIC_RELAXED); -#else - return *__value; -#endif -} - -template -inline _LIBCPP_HIDE_FROM_ABI _ValueType __libcpp_acquire_load(_ValueType const* __value) { -#if _LIBCPP_HAS_THREADS && defined(__ATOMIC_ACQUIRE) && \ - (__has_builtin(__atomic_load_n) || defined(_LIBCPP_COMPILER_GCC)) - return __atomic_load_n(__value, __ATOMIC_ACQUIRE); -#else - return *__value; -#endif -} template inline _LIBCPP_HIDE_FROM_ABI _Tp __libcpp_atomic_refcount_increment(_Tp& __t) _NOEXCEPT { -#if _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT && _LIBCPP_HAS_THREADS +#if _LIBCPP_HAS_THREADS return __atomic_add_fetch(std::addressof(__t), 1, __ATOMIC_RELAXED); #else return __t += 1; @@ -61,7 +34,7 @@ inline _LIBCPP_HIDE_FROM_ABI _Tp __libcpp_atomic_refcount_increment(_Tp& __t) _N template inline _LIBCPP_HIDE_FROM_ABI _Tp __libcpp_atomic_refcount_decrement(_Tp& __t) _NOEXCEPT { -#if _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT && _LIBCPP_HAS_THREADS +#if _LIBCPP_HAS_THREADS return __atomic_add_fetch(std::addressof(__t), -1, __ATOMIC_ACQ_REL); #else return __t -= 1; @@ -95,7 +68,13 @@ class _LIBCPP_EXPORTED_FROM_ABI __shared_count { return false; } #endif - _LIBCPP_HIDE_FROM_ABI long use_count() const _NOEXCEPT { return __libcpp_relaxed_load(&__shared_owners_) + 1; } + _LIBCPP_HIDE_FROM_ABI long use_count() const _NOEXCEPT { +#if _LIBCPP_HAS_THREADS + return __atomic_load_n(&__shared_owners_, __ATOMIC_RELAXED) + 1; +#else + return __shared_owners_ + 1; +#endif + } }; class _LIBCPP_EXPORTED_FROM_ABI __shared_weak_count : private __shared_count { diff --git a/libcxx/include/__mutex/once_flag.h b/libcxx/include/__mutex/once_flag.h index e384c15a9f9b6..808b1ea99cc0b 100644 --- a/libcxx/include/__mutex/once_flag.h +++ b/libcxx/include/__mutex/once_flag.h @@ -10,10 +10,9 @@ #define _LIBCPP___MUTEX_ONCE_FLAG_H #include <__config> -#include <__functional/invoke.h> #include <__memory/addressof.h> -#include <__memory/shared_count.h> // __libcpp_acquire_load #include <__tuple/tuple_size.h> +#include <__type_traits/invoke.h> #include <__utility/forward.h> #include <__utility/integer_sequence.h> #include <__utility/move.h> @@ -118,6 +117,15 @@ void _LIBCPP_HIDE_FROM_ABI __call_once_proxy(void* __vp) { _LIBCPP_EXPORTED_FROM_ABI void __call_once(volatile once_flag::_State_type&, void*, void (*)(void*)); +template +inline _LIBCPP_HIDE_FROM_ABI _ValueType __libcpp_acquire_load(_ValueType const* __value) { +#if _LIBCPP_HAS_THREADS + return __atomic_load_n(__value, __ATOMIC_ACQUIRE); +#else + return *__value; +#endif +} + #ifndef _LIBCPP_CXX03_LANG template diff --git a/libcxx/include/__ranges/join_view.h b/libcxx/include/__ranges/join_view.h index 327b349f476a7..364f056d8d2cf 100644 --- a/libcxx/include/__ranges/join_view.h +++ b/libcxx/include/__ranges/join_view.h @@ -410,8 +410,13 @@ struct __segmented_iterator_traits<_JoinViewIterator> { static constexpr _LIBCPP_HIDE_FROM_ABI _JoinViewIterator __compose(__segment_iterator __seg_iter, __local_iterator __local_iter) { - return _JoinViewIterator( - std::move(__seg_iter).__get_data(), std::move(__seg_iter).__get_iter(), std::move(__local_iter)); + auto&& __parent = std::move(__seg_iter).__get_data(); + auto&& __outer = std::move(__seg_iter).__get_iter(); + if (__local_iter == ranges::end(*__outer)) { + ++__outer; + return _JoinViewIterator(*__parent, __outer); + } + return _JoinViewIterator(__parent, __outer, std::move(__local_iter)); } }; diff --git a/libcxx/include/__string/char_traits.h b/libcxx/include/__string/char_traits.h index 86c92477cbfeb..8292750919427 100644 --- a/libcxx/include/__string/char_traits.h +++ b/libcxx/include/__string/char_traits.h @@ -369,6 +369,13 @@ _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR_SINCE_CXX14 const _CharT* __searc if (__len1 < __len2) return __last1; + if (__builtin_constant_p(__len2 == 1) && __len2 == 1) { + auto __res = _Traits::find(__first1, __len1, *__first2); + if (__res == nullptr) + return __last1; + return __res; + } + // First element of __first2 is loop invariant. _CharT __f2 = *__first2; while (true) { diff --git a/libcxx/include/__tree b/libcxx/include/__tree index 61c910c52c536..ef960d481cb7b 100644 --- a/libcxx/include/__tree +++ b/libcxx/include/__tree @@ -34,6 +34,7 @@ #include <__type_traits/is_same.h> #include <__type_traits/is_specialization.h> #include <__type_traits/is_swappable.h> +#include <__type_traits/make_transparent.h> #include <__type_traits/remove_const.h> #include <__utility/forward.h> #include <__utility/lazy_synth_three_way_comparator.h> @@ -1749,7 +1750,8 @@ __tree<_Tp, _Compare, _Allocator>::__find_equal(const _Key& __v) { } __node_base_pointer* __node_ptr = __root_ptr(); - auto __comp = __lazy_synth_three_way_comparator<_Compare, _Key, value_type>(value_comp()); + auto&& __transparent = std::__as_transparent(value_comp()); + auto __comp = __lazy_synth_three_way_comparator<__make_transparent_t<_Compare>, _Key, value_type>(__transparent); while (true) { auto __comp_res = __comp(__v, __nd->__get_value()); diff --git a/libcxx/include/__type_traits/is_allocator.h b/libcxx/include/__type_traits/is_allocator.h index 191eeb9a1f522..f37c029a2aa89 100644 --- a/libcxx/include/__type_traits/is_allocator.h +++ b/libcxx/include/__type_traits/is_allocator.h @@ -11,7 +11,6 @@ #include <__config> #include <__cstddef/size_t.h> -#include <__type_traits/integral_constant.h> #include <__type_traits/void_t.h> #include <__utility/declval.h> @@ -21,13 +20,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -struct __is_allocator : false_type {}; +template +inline const bool __is_allocator_v = false; -template -struct __is_allocator<_Alloc, - __void_t, - __void_t().allocate(size_t(0)))> > : true_type {}; +template +inline const bool __is_allocator_v<_Alloc, + __void_t, + __void_t().allocate(size_t()))> > = true; _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__type_traits/is_generic_transparent_comparator.h b/libcxx/include/__type_traits/is_generic_transparent_comparator.h new file mode 100644 index 0000000000000..fd02c0b0423d1 --- /dev/null +++ b/libcxx/include/__type_traits/is_generic_transparent_comparator.h @@ -0,0 +1,30 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___TYPE_TRAITS_IS_GENERIC_TRANSPARENT_COMPARATOR_H +#define _LIBCPP___TYPE_TRAITS_IS_GENERIC_TRANSPARENT_COMPARATOR_H + +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +// This traits returns true if the given _Comparator is known to accept any two types for compaison. This is separate +// from `__is_transparent_v`, since that only enables overloads of specific functions, but doesn't give any semantic +// guarantees. This trait guarantess that the comparator simply calls the appropriate comparison functions for any two +// types. + +template +inline const bool __is_generic_transparent_comparator_v = false; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___TYPE_TRAITS_IS_GENERIC_TRANSPARENT_COMPARATOR_H diff --git a/libcxx/include/__type_traits/make_transparent.h b/libcxx/include/__type_traits/make_transparent.h new file mode 100644 index 0000000000000..4d3207a807fa7 --- /dev/null +++ b/libcxx/include/__type_traits/make_transparent.h @@ -0,0 +1,48 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___TYPE_TRAITS_MAKE_TRANSPARENT_H +#define _LIBCPP___TYPE_TRAITS_MAKE_TRANSPARENT_H + +#include <__config> +#include <__type_traits/enable_if.h> +#include <__type_traits/is_empty.h> +#include <__type_traits/is_same.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +// __make_transparent tries to create a transparent comparator from its non-transparent counterpart, e.g. obtain +// `less<>` from `less`. This is useful in cases where conversions can be avoided (e.g. a string literal to a +// std::string). + +template +struct __make_transparent { + using type _LIBCPP_NODEBUG = _Comparator; +}; + +template +using __make_transparent_t _LIBCPP_NODEBUG = typename __make_transparent<_Comparator>::type; + +template >::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _Comparator& __as_transparent(_Comparator& __comp) { + return __comp; +} + +template >::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI __make_transparent_t<_Comparator> __as_transparent(_Comparator&) { + static_assert(is_empty<_Comparator>::value); + return __make_transparent_t<_Comparator>(); +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___TYPE_TRAITS_MAKE_TRANSPARENT_H diff --git a/libcxx/include/__utility/default_three_way_comparator.h b/libcxx/include/__utility/default_three_way_comparator.h index 438ab55b43230..92cdce6aae117 100644 --- a/libcxx/include/__utility/default_three_way_comparator.h +++ b/libcxx/include/__utility/default_three_way_comparator.h @@ -40,13 +40,13 @@ struct __default_three_way_comparator<_LHS, } }; -#if _LIBCPP_STD_VER >= 20 && __has_builtin(__builtin_lt_synthesises_from_spaceship) +#if _LIBCPP_STD_VER >= 20 && __has_builtin(__builtin_lt_synthesizes_from_spaceship) template struct __default_three_way_comparator< _LHS, _RHS, __enable_if_t::value && is_arithmetic<_RHS>::value) && - __builtin_lt_synthesises_from_spaceship(const _LHS&, const _RHS&)>> { + __builtin_lt_synthesizes_from_spaceship(const _LHS&, const _RHS&)>> { _LIBCPP_HIDE_FROM_ABI static int operator()(const _LHS& __lhs, const _RHS& __rhs) { auto __res = __lhs <=> __rhs; if (__res < 0) diff --git a/libcxx/include/__vector/vector.h b/libcxx/include/__vector/vector.h index a69aa9145e638..707aff3e7c3d3 100644 --- a/libcxx/include/__vector/vector.h +++ b/libcxx/include/__vector/vector.h @@ -176,7 +176,7 @@ class vector { __guard.__complete(); } - template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI vector(size_type __n, const value_type& __x, const allocator_type& __a) : __alloc_(__a) { @@ -846,20 +846,20 @@ class vector { template >, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> vector(_InputIterator, _InputIterator) -> vector<__iter_value_type<_InputIterator>, _Alloc>; template ::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> vector(_InputIterator, _InputIterator, _Alloc) -> vector<__iter_value_type<_InputIterator>, _Alloc>; #endif #if _LIBCPP_STD_VER >= 23 template >, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> vector(from_range_t, _Range&&, _Alloc = _Alloc()) -> vector, _Alloc>; #endif diff --git a/libcxx/include/__vector/vector_bool.h b/libcxx/include/__vector/vector_bool.h index 7b82906769255..66f5fd9498eec 100644 --- a/libcxx/include/__vector/vector_bool.h +++ b/libcxx/include/__vector/vector_bool.h @@ -478,7 +478,6 @@ class vector { return (__new_size + (__bits_per_word - 1)) & ~((size_type)__bits_per_word - 1); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 size_type __recommend(size_type __new_size) const; - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __construct_at_end(size_type __n, bool __x); template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __construct_at_end(_InputIterator __first, _Sentinel __last, size_type __n); @@ -567,20 +566,6 @@ vector::__recommend(size_type __new_size) const { return std::max(2 * __cap, __align_it(__new_size)); } -// Default constructs __n objects starting at __end_ -// Precondition: size() + __n <= capacity() -// Postcondition: size() == size() + __n -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void -vector::__construct_at_end(size_type __n, bool __x) { - _LIBCPP_ASSERT_INTERNAL( - capacity() >= size() + __n, "vector::__construct_at_end called with insufficient capacity"); - std::fill_n(end(), __n, __x); - this->__size_ += __n; - if (end().__ctz_ != 0) // Ensure uninitialized leading bits in the last word are set to zero - std::fill_n(end(), __bits_per_word - end().__ctz_, 0); -} - template template _LIBCPP_CONSTEXPR_SINCE_CXX20 void @@ -613,7 +598,8 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector::vector(size_type __n) : __begin_(nullptr), __size_(0), __cap_(0) { if (__n > 0) { __vallocate(__n); - __construct_at_end(__n, false); + std::fill_n(__begin_, __external_cap_to_internal(__n), __storage_type(0)); + __size_ = __n; } } @@ -623,7 +609,8 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector::vector(size_type __n, co : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { if (__n > 0) { __vallocate(__n); - __construct_at_end(__n, false); + std::fill_n(__begin_, __external_cap_to_internal(__n), __storage_type(0)); + __size_ = __n; } } #endif @@ -633,7 +620,8 @@ _LIBCPP_CONSTEXPR_SINCE_CXX20 vector::vector(size_type __n, co : __begin_(nullptr), __size_(0), __cap_(0) { if (__n > 0) { __vallocate(__n); - __construct_at_end(__n, __x); + std::fill_n(__begin_, __external_cap_to_internal(__n), __storage_type(0) - __x); + __size_ = __n; } } @@ -643,7 +631,8 @@ vector::vector(size_type __n, const value_type& __x, const all : __begin_(nullptr), __size_(0), __cap_(0), __alloc_(static_cast<__storage_allocator>(__a)) { if (__n > 0) { __vallocate(__n); - __construct_at_end(__n, __x); + std::fill_n(__begin_, __external_cap_to_internal(__n), __storage_type(0) - __x); + __size_ = __n; } } diff --git a/libcxx/include/bitset b/libcxx/include/bitset index e2b46154ae730..3453c2fcde71e 100644 --- a/libcxx/include/bitset +++ b/libcxx/include/bitset @@ -867,7 +867,16 @@ bitset<_Size>::to_string(char __zero, char __one) const { template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX23 size_t bitset<_Size>::count() const _NOEXCEPT { - return static_cast(std::count(__base::__make_iter(0), __base::__make_iter(_Size), true)); +# if defined(_LIBCPP_COMPILER_CLANG_BASED) && !defined(_LIBCPP_CXX03_LANG) + if constexpr (_Size == 0) { + return 0; + } else if constexpr (_Size <= __base::__bits_per_word) { + return __builtin_popcountg(static_cast(__base::__first_)); + } else +# endif + { + return static_cast(std::count(__base::__make_iter(0), __base::__make_iter(_Size), true)); + } } template diff --git a/libcxx/include/deque b/libcxx/include/deque index 98d1dbbddb7e8..cfb64b4f07332 100644 --- a/libcxx/include/deque +++ b/libcxx/include/deque @@ -637,7 +637,7 @@ public: # endif _LIBCPP_HIDE_FROM_ABI deque(size_type __n, const value_type& __v); - template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0> _LIBCPP_HIDE_FROM_ABI deque(size_type __n, const value_type& __v, const allocator_type& __a) : __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) { __annotate_new(0); @@ -1260,20 +1260,20 @@ _LIBCPP_CONSTEXPR const typename allocator_traits<_Alloc>::difference_type deque template >, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> deque(_InputIterator, _InputIterator) -> deque<__iter_value_type<_InputIterator>, _Alloc>; template ::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> deque(_InputIterator, _InputIterator, _Alloc) -> deque<__iter_value_type<_InputIterator>, _Alloc>; # endif # if _LIBCPP_STD_VER >= 23 template >, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> deque(from_range_t, _Range&&, _Alloc = _Alloc()) -> deque, _Alloc>; # endif diff --git a/libcxx/include/ext/hash_map b/libcxx/include/ext/hash_map index 70c2fbeec2959..01ca7498f0cc1 100644 --- a/libcxx/include/ext/hash_map +++ b/libcxx/include/ext/hash_map @@ -787,10 +787,7 @@ hash_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::hash_multimap( } template -hash_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::hash_multimap(const hash_multimap& __u) : __table_(__u.__table_) { - __table_.__rehash_multi(__u.bucket_count()); - insert(__u.begin(), __u.end()); -} +hash_multimap<_Key, _Tp, _Hash, _Pred, _Alloc>::hash_multimap(const hash_multimap& __u) : __table_(__u.__table_) {} template template diff --git a/libcxx/include/ext/hash_set b/libcxx/include/ext/hash_set index 62a7a0dbcffb9..2796774fee24a 100644 --- a/libcxx/include/ext/hash_set +++ b/libcxx/include/ext/hash_set @@ -534,10 +534,7 @@ hash_multiset<_Value, _Hash, _Pred, _Alloc>::hash_multiset( } template -hash_multiset<_Value, _Hash, _Pred, _Alloc>::hash_multiset(const hash_multiset& __u) : __table_(__u.__table_) { - __table_.__rehash_multi(__u.bucket_count()); - insert(__u.begin(), __u.end()); -} +hash_multiset<_Value, _Hash, _Pred, _Alloc>::hash_multiset(const hash_multiset& __u) : __table_(__u.__table_) {} template template diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list index 6daa7fbbc03c2..0a0bfa7a7f037 100644 --- a/libcxx/include/forward_list +++ b/libcxx/include/forward_list @@ -680,7 +680,7 @@ public: # endif _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI forward_list(size_type __n, const value_type& __v); - template <__enable_if_t<__is_allocator<_Alloc>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Alloc>, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI forward_list(size_type __n, const value_type& __v, const allocator_type& __a) : __base(__a) { @@ -920,20 +920,20 @@ private: template >, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> forward_list(_InputIterator, _InputIterator) -> forward_list<__iter_value_type<_InputIterator>, _Alloc>; template ::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> forward_list(_InputIterator, _InputIterator, _Alloc) -> forward_list<__iter_value_type<_InputIterator>, _Alloc>; # endif # if _LIBCPP_STD_VER >= 23 template >, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> forward_list(from_range_t, _Range&&, _Alloc = _Alloc()) -> forward_list, _Alloc>; # endif diff --git a/libcxx/include/istream b/libcxx/include/istream index 93def61a8b477..7f15521f91a8a 100644 --- a/libcxx/include/istream +++ b/libcxx/include/istream @@ -70,6 +70,7 @@ public: basic_istream& getline(char_type* s, streamsize n, char_type delim); basic_istream& ignore(streamsize n = 1, int_type delim = traits_type::eof()); + basic_istream& ignore(streamsize n, char_type delim); // Since C++26, implemented as a DR int_type peek(); basic_istream& read (char_type* s, streamsize n); streamsize readsome(char_type* s, streamsize n); @@ -172,6 +173,7 @@ template # include <__type_traits/conjunction.h> # include <__type_traits/enable_if.h> # include <__type_traits/is_base_of.h> +# include <__type_traits/is_same.h> # include <__type_traits/make_unsigned.h> # include <__utility/declval.h> # include <__utility/forward.h> @@ -292,6 +294,10 @@ public: basic_istream& getline(char_type* __s, streamsize __n, char_type __dlm); basic_istream& ignore(streamsize __n = 1, int_type __dlm = traits_type::eof()); + template ::value, int> = 0> + _LIBCPP_HIDE_FROM_ABI basic_istream& ignore(streamsize __n, char_type __delim) { + return ignore(__n, traits_type::to_int_type(__delim)); + } int_type peek(); basic_istream& read(char_type* __s, streamsize __n); streamsize readsome(char_type* __s, streamsize __n); diff --git a/libcxx/include/list b/libcxx/include/list index 2896231203d9b..5d8067545b9c7 100644 --- a/libcxx/include/list +++ b/libcxx/include/list @@ -724,7 +724,7 @@ public: _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI explicit list(size_type __n, const allocator_type& __a); # endif _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI list(size_type __n, const value_type& __x); - template <__enable_if_t<__is_allocator<_Alloc>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Alloc>, int> = 0> _LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI list(size_type __n, const value_type& __x, const allocator_type& __a) : __base(__a) { @@ -1002,20 +1002,20 @@ private: template >, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> list(_InputIterator, _InputIterator) -> list<__iter_value_type<_InputIterator>, _Alloc>; template ::value>, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> list(_InputIterator, _InputIterator, _Alloc) -> list<__iter_value_type<_InputIterator>, _Alloc>; # endif # if _LIBCPP_STD_VER >= 23 template >, - class = enable_if_t<__is_allocator<_Alloc>::value> > + class = enable_if_t<__is_allocator_v<_Alloc>>> list(from_range_t, _Range&&, _Alloc = _Alloc()) -> list, _Alloc>; # endif diff --git a/libcxx/include/map b/libcxx/include/map index 5f906bb0106c1..035f913bd3497 100644 --- a/libcxx/include/map +++ b/libcxx/include/map @@ -600,7 +600,10 @@ erase_if(multimap& c, Predicate pred); // C++20 # include <__ranges/from_range.h> # include <__tree> # include <__type_traits/container_traits.h> +# include <__type_traits/desugars_to.h> # include <__type_traits/is_allocator.h> +# include <__type_traits/is_convertible.h> +# include <__type_traits/make_transparent.h> # include <__type_traits/remove_const.h> # include <__type_traits/type_identity.h> # include <__utility/forward.h> @@ -666,6 +669,11 @@ public: # endif }; +template +struct __make_transparent<__map_value_compare<_Key, _MapValueT, _Compare> > { + using type _LIBCPP_NODEBUG = __map_value_compare<_Key, _MapValueT, __make_transparent_t<_Compare> >; +}; + # if _LIBCPP_STD_VER >= 14 template struct __lazy_synth_three_way_comparator<__map_value_compare<_Key, _MapValueT, _Compare>, _MapValueT, _MapValueT> { @@ -1048,6 +1056,24 @@ public: _LIBCPP_HIDE_FROM_ABI mapped_type& operator[](key_type&& __k); # endif + template >, int> = 0> + _LIBCPP_HIDE_FROM_ABI mapped_type& at(_Arg&& __arg) { + auto [_, __child] = __tree_.__find_equal(__arg); + if (__child == nullptr) + std::__throw_out_of_range("map::at: key not found"); + return static_cast<__node_pointer>(__child)->__get_value().second; + } + + template >, int> = 0> + _LIBCPP_HIDE_FROM_ABI const mapped_type& at(_Arg&& __arg) const { + auto [_, __child] = __tree_.__find_equal(__arg); + if (__child == nullptr) + std::__throw_out_of_range("map::at: key not found"); + return static_cast<__node_pointer>(__child)->__get_value().second; + } + _LIBCPP_HIDE_FROM_ABI mapped_type& at(const key_type& __k); _LIBCPP_HIDE_FROM_ABI const mapped_type& at(const key_type& __k) const; @@ -1242,11 +1268,15 @@ public: _LIBCPP_HIDE_FROM_ABI iterator find(const key_type& __k) { return __tree_.find(__k); } _LIBCPP_HIDE_FROM_ABI const_iterator find(const key_type& __k) const { return __tree_.find(__k); } # if _LIBCPP_STD_VER >= 14 - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI iterator find(const _K2& __k) { return __tree_.find(__k); } - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI const_iterator find(const _K2& __k) const { return __tree_.find(__k); } @@ -1262,7 +1292,9 @@ public: # if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI bool contains(const key_type& __k) const { return find(__k) != end(); } - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI bool contains(const _K2& __k) const { return find(__k) != end(); } @@ -1271,12 +1303,16 @@ public: _LIBCPP_HIDE_FROM_ABI iterator lower_bound(const key_type& __k) { return __tree_.lower_bound(__k); } _LIBCPP_HIDE_FROM_ABI const_iterator lower_bound(const key_type& __k) const { return __tree_.lower_bound(__k); } # if _LIBCPP_STD_VER >= 14 - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI iterator lower_bound(const _K2& __k) { return __tree_.lower_bound(__k); } - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI const_iterator lower_bound(const _K2& __k) const { return __tree_.lower_bound(__k); } @@ -1285,11 +1321,15 @@ public: _LIBCPP_HIDE_FROM_ABI iterator upper_bound(const key_type& __k) { return __tree_.upper_bound(__k); } _LIBCPP_HIDE_FROM_ABI const_iterator upper_bound(const key_type& __k) const { return __tree_.upper_bound(__k); } # if _LIBCPP_STD_VER >= 14 - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI iterator upper_bound(const _K2& __k) { return __tree_.upper_bound(__k); } - template , int> = 0> + template || __is_transparently_comparable_v<_Compare, key_type, _K2>, + int> = 0> _LIBCPP_HIDE_FROM_ABI const_iterator upper_bound(const _K2& __k) const { return __tree_.upper_bound(__k); } @@ -1332,8 +1372,8 @@ template >, class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> map(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator()) -> map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare, _Allocator>; @@ -1341,8 +1381,8 @@ map(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocat template >, class _Allocator = allocator<__range_to_alloc_type<_Range>>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> map(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> map<__range_key_type<_Range>, __range_mapped_type<_Range>, _Compare, _Allocator>; # endif @@ -1351,15 +1391,15 @@ template >, class _Allocator = allocator>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> map(initializer_list>, _Compare = _Compare(), _Allocator = _Allocator()) -> map, _Tp, _Compare, _Allocator>; template ::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>> map(_InputIterator, _InputIterator, _Allocator) -> map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -1367,12 +1407,12 @@ map(_InputIterator, _InputIterator, _Allocator) _Allocator>; # if _LIBCPP_STD_VER >= 23 -template ::value, void>> +template >> map(from_range_t, _Range&&, _Allocator) -> map<__range_key_type<_Range>, __range_mapped_type<_Range>, less<__range_key_type<_Range>>, _Allocator>; # endif -template ::value, void>> +template >> map(initializer_list>, _Allocator) -> map, _Tp, less>, _Allocator>; # endif @@ -1889,8 +1929,8 @@ template >, class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> multimap(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator()) -> multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare, _Allocator>; @@ -1898,8 +1938,8 @@ multimap(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Al template >, class _Allocator = allocator<__range_to_alloc_type<_Range>>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> multimap(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, _Compare, _Allocator>; # endif @@ -1908,15 +1948,15 @@ template >, class _Allocator = allocator>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> multimap(initializer_list>, _Compare = _Compare(), _Allocator = _Allocator()) -> multimap, _Tp, _Compare, _Allocator>; template ::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>> multimap(_InputIterator, _InputIterator, _Allocator) -> multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -1924,12 +1964,12 @@ multimap(_InputIterator, _InputIterator, _Allocator) _Allocator>; # if _LIBCPP_STD_VER >= 23 -template ::value, void>> +template >> multimap(from_range_t, _Range&&, _Allocator) -> multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, less<__range_key_type<_Range>>, _Allocator>; # endif -template ::value, void>> +template >> multimap(initializer_list>, _Allocator) -> multimap, _Tp, less>, _Allocator>; # endif diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in index 63cf8e847751f..894093b409e11 100644 --- a/libcxx/include/module.modulemap.in +++ b/libcxx/include/module.modulemap.in @@ -103,10 +103,7 @@ module std_core [system] { header "__type_traits/is_aggregate.h" export std_core.type_traits.integral_constant } - module is_allocator { - header "__type_traits/is_allocator.h" - export std_core.type_traits.integral_constant - } + module is_allocator { header "__type_traits/is_allocator.h" } module is_always_bitcastable { header "__type_traits/is_always_bitcastable.h" export std_core.type_traits.integral_constant @@ -203,6 +200,7 @@ module std_core [system] { header "__type_traits/is_fundamental.h" export std_core.type_traits.integral_constant } + module is_generic_transparent_comparator { header "__type_traits/is_generic_transparent_comparator.h" } module is_implicit_lifetime { header "__type_traits/is_implicit_lifetime.h" export std_core.type_traits.integral_constant @@ -356,6 +354,7 @@ module std_core [system] { module make_32_64_or_128_bit { header "__type_traits/make_32_64_or_128_bit.h" } module make_const_lvalue_ref { header "__type_traits/make_const_lvalue_ref.h" } module make_signed { header "__type_traits/make_signed.h" } + module make_transparent { header "__type_traits/make_transparent.h" } module make_unsigned { header "__type_traits/make_unsigned.h" } module maybe_const { header "__type_traits/maybe_const.h" } module nat { header "__type_traits/nat.h" } @@ -885,6 +884,7 @@ module std [system] { module check_memory_order { header "__atomic/check_memory_order.h" } module contention_t { header "__atomic/contention_t.h" } module fence { header "__atomic/fence.h" } + module floating_point_helper { header "__atomic/floating_point_helper.h" } module is_always_lock_free { header "__atomic/is_always_lock_free.h" } module kill_dependency { header "__atomic/kill_dependency.h" } module memory_order { header "__atomic/memory_order.h" } @@ -1227,6 +1227,7 @@ module std [system] { header "deque" export * export std.iterator.reverse_iterator + export std.algorithm.simd_utils // This is a workaround for https://llvm.org/PR120108. } module exception { @@ -1849,7 +1850,10 @@ module std [system] { module ranges { module access { header "__ranges/access.h" } - module all { header "__ranges/all.h" } + module all { + header "__ranges/all.h" + export std.ranges.ref_view + } module as_rvalue_view { header "__ranges/as_rvalue_view.h" } module chunk_by_view { header "__ranges/chunk_by_view.h" @@ -2237,6 +2241,7 @@ module std [system] { header "vector" export std.iterator.reverse_iterator export * + export std.algorithm.simd_utils // This is a workaround for https://llvm.org/PR120108. } // Experimental C++ Standard Library interfaces diff --git a/libcxx/include/mutex b/libcxx/include/mutex index 58474e0ca2b7a..0b81f1bb1c8a6 100644 --- a/libcxx/include/mutex +++ b/libcxx/include/mutex @@ -500,6 +500,10 @@ _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS +# if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 23 +# include +# endif + # if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20 # include # include @@ -513,7 +517,6 @@ _LIBCPP_POP_MACROS # include # include # include -# include # endif #endif // __cplusplus < 201103L && defined(_LIBCPP_USE_FROZEN_CXX03_HEADERS) diff --git a/libcxx/include/optional b/libcxx/include/optional index 39fcaa2c2ec18..ef1bfd3ec44c0 100644 --- a/libcxx/include/optional +++ b/libcxx/include/optional @@ -119,7 +119,7 @@ namespace std { constexpr explicit optional(in_place_t, Args &&...); template constexpr explicit optional(in_place_t, initializer_list, Args &&...); - template + template> constexpr explicit(see-below) optional(U &&); template explicit(see-below) optional(const optional &); // constexpr in C++20 @@ -133,7 +133,7 @@ namespace std { optional &operator=(nullopt_t) noexcept; // constexpr in C++20 constexpr optional &operator=(const optional &); constexpr optional &operator=(optional &&) noexcept(see below); - template optional &operator=(U &&); // constexpr in C++20 + template> optional &operator=(U &&); // constexpr in C++20 template optional &operator=(const optional &); // constexpr in C++20 template optional &operator=(optional &&); // constexpr in C++20 template T& emplace(Args &&...); // constexpr in C++20 @@ -161,8 +161,8 @@ namespace std { constexpr T &value() &; constexpr T &&value() &&; constexpr const T &&value() const &&; - template constexpr T value_or(U &&) const &; - template constexpr T value_or(U &&) &&; + template> constexpr T value_or(U &&) const &; + template> constexpr T value_or(U &&) &&; // [optional.monadic], monadic operations template constexpr auto and_then(F&& f) &; // since C++23 @@ -730,7 +730,8 @@ public: enable_if_t<_CheckOptionalArgsCtor<_Up>::template __enable_implicit<_Up>(), int> = 0> _LIBCPP_HIDE_FROM_ABI constexpr optional(_Up&& __v) : __base(in_place, std::forward<_Up>(__v)) {} - template ::template __enable_explicit<_Up>(), int> = 0> + template , + enable_if_t<_CheckOptionalArgsCtor<_Up>::template __enable_explicit<_Up>(), int> = 0> _LIBCPP_HIDE_FROM_ABI constexpr explicit optional(_Up&& __v) : __base(in_place, std::forward<_Up>(__v)) {} // LWG2756: conditionally explicit conversion from const optional<_Up>& @@ -771,7 +772,7 @@ public: _LIBCPP_HIDE_FROM_ABI constexpr optional& operator=(optional&&) = default; // LWG2756 - template , enable_if_t<_And<_IsNotSame<__remove_cvref_t<_Up>, optional>, _Or<_IsNotSame<__remove_cvref_t<_Up>, value_type>, _Not>>, is_constructible, @@ -919,14 +920,14 @@ public: return std::move(this->__get()); } - template + template > _LIBCPP_HIDE_FROM_ABI constexpr value_type value_or(_Up&& __v) const& { static_assert(is_copy_constructible_v, "optional::value_or: T must be copy constructible"); static_assert(is_convertible_v<_Up, value_type>, "optional::value_or: U must be convertible to T"); return this->has_value() ? this->__get() : static_cast(std::forward<_Up>(__v)); } - template + template > _LIBCPP_HIDE_FROM_ABI constexpr value_type value_or(_Up&& __v) && { static_assert(is_move_constructible_v, "optional::value_or: T must be move constructible"); static_assert(is_convertible_v<_Up, value_type>, "optional::value_or: U must be convertible to T"); diff --git a/libcxx/include/queue b/libcxx/include/queue index c33afc892dda8..65936250c66a1 100644 --- a/libcxx/include/queue +++ b/libcxx/include/queue @@ -437,12 +437,12 @@ public: }; # if _LIBCPP_STD_VER >= 17 -template ::value> > +template >> queue(_Container) -> queue; template ::value>, + class = enable_if_t>, class = enable_if_t::value> > queue(_Container, _Alloc) -> queue; # endif @@ -457,11 +457,11 @@ queue(from_range_t, _Range&&) -> queue>; template ::value, int> = 0, - __enable_if_t<__is_allocator<_Alloc>::value, int> = 0> + __enable_if_t<__is_allocator_v<_Alloc>, int> = 0> queue(_InputIterator, _InputIterator, _Alloc) -> queue<__iter_value_type<_InputIterator>, deque<__iter_value_type<_InputIterator>, _Alloc>>; -template ::value, int> = 0> +template , int> = 0> queue(from_range_t, _Range&&, _Alloc) -> queue, deque, _Alloc>>; # endif @@ -700,31 +700,31 @@ public: # if _LIBCPP_STD_VER >= 17 template ::value>, - class = enable_if_t::value> > + class = enable_if_t>, + class = enable_if_t>> priority_queue(_Compare, _Container) -> priority_queue; template >, class _Container = vector<__iter_value_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t::value>, - class = enable_if_t::value> > + class = enable_if_t>, + class = enable_if_t>> priority_queue(_InputIterator, _InputIterator, _Compare = _Compare(), _Container = _Container()) -> priority_queue<__iter_value_type<_InputIterator>, _Container, _Compare>; template ::value>, - class = enable_if_t::value>, - class = enable_if_t::value> > + class = enable_if_t>, + class = enable_if_t>, + class = enable_if_t::value>> priority_queue(_Compare, _Container, _Alloc) -> priority_queue; template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value> > + class = enable_if_t<__is_allocator_v<_Allocator>>> priority_queue(_InputIterator, _InputIterator, _Allocator) -> priority_queue<__iter_value_type<_InputIterator>, vector<__iter_value_type<_InputIterator>, _Allocator>, @@ -734,8 +734,8 @@ template ::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value> > + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> priority_queue(_InputIterator, _InputIterator, _Compare, _Allocator) -> priority_queue<__iter_value_type<_InputIterator>, vector<__iter_value_type<_InputIterator>, _Allocator>, @@ -746,8 +746,8 @@ template ::value>, - class = enable_if_t::value>, - class = enable_if_t::value>, + class = enable_if_t>, + class = enable_if_t>, class = enable_if_t::value> > priority_queue(_InputIterator, _InputIterator, _Compare, _Container, _Alloc) -> priority_queue; @@ -757,19 +757,19 @@ priority_queue(_InputIterator, _InputIterator, _Compare, _Container, _Alloc) template >, - class = enable_if_t::value>> + class = enable_if_t>> priority_queue(from_range_t, _Range&&, _Compare = _Compare()) -> priority_queue, vector>, _Compare>; template ::value>, - class = enable_if_t<__is_allocator<_Alloc>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Alloc>>> priority_queue(from_range_t, _Range&&, _Compare, _Alloc) -> priority_queue, vector, _Alloc>, _Compare>; -template ::value>> +template >> priority_queue(from_range_t, _Range&&, _Alloc) -> priority_queue, vector, _Alloc>>; diff --git a/libcxx/include/set b/libcxx/include/set index 81c3de7343ee5..75529e7bac6ff 100644 --- a/libcxx/include/set +++ b/libcxx/include/set @@ -899,8 +899,8 @@ template >, class _Allocator = allocator<__iter_value_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>, - class = enable_if_t::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>, + class = enable_if_t>> set(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator()) -> set<__iter_value_type<_InputIterator>, _Compare, _Allocator>; @@ -908,8 +908,8 @@ set(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocat template >, class _Allocator = allocator>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>, - class = enable_if_t::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>, + class = enable_if_t>> set(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> set, _Compare, _Allocator>; # endif @@ -917,24 +917,24 @@ set(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) template , class _Allocator = allocator<_Key>, - class = enable_if_t::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> set(initializer_list<_Key>, _Compare = _Compare(), _Allocator = _Allocator()) -> set<_Key, _Compare, _Allocator>; template ::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>> set(_InputIterator, _InputIterator, _Allocator) -> set<__iter_value_type<_InputIterator>, less<__iter_value_type<_InputIterator>>, _Allocator>; # if _LIBCPP_STD_VER >= 23 -template ::value, void>> +template >> set(from_range_t, _Range&&, _Allocator) -> set, less>, _Allocator>; # endif -template ::value, void>> +template >> set(initializer_list<_Key>, _Allocator) -> set<_Key, less<_Key>, _Allocator>; # endif @@ -1351,8 +1351,8 @@ template >, class _Allocator = allocator<__iter_value_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>, - class = enable_if_t::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>, + class = enable_if_t>> multiset(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator()) -> multiset<__iter_value_type<_InputIterator>, _Compare, _Allocator>; @@ -1360,8 +1360,8 @@ multiset(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Al template >, class _Allocator = allocator>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>, - class = enable_if_t::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>, + class = enable_if_t>> multiset(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> multiset, _Compare, _Allocator>; # endif @@ -1369,25 +1369,25 @@ multiset(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator( template , class _Allocator = allocator<_Key>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>, - class = enable_if_t::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>, + class = enable_if_t>> multiset(initializer_list<_Key>, _Compare = _Compare(), _Allocator = _Allocator()) -> multiset<_Key, _Compare, _Allocator>; template ::value, void>, - class = enable_if_t<__is_allocator<_Allocator>::value, void>> + class = enable_if_t<__is_allocator_v<_Allocator>>> multiset(_InputIterator, _InputIterator, _Allocator) -> multiset<__iter_value_type<_InputIterator>, less<__iter_value_type<_InputIterator>>, _Allocator>; # if _LIBCPP_STD_VER >= 23 -template ::value, void>> +template >> multiset(from_range_t, _Range&&, _Allocator) -> multiset, less>, _Allocator>; # endif -template ::value, void>> +template >> multiset(initializer_list<_Key>, _Allocator) -> multiset<_Key, less<_Key>, _Allocator>; # endif diff --git a/libcxx/include/sstream b/libcxx/include/sstream index 682a28fd4dbb8..c42dbff9eee5f 100644 --- a/libcxx/include/sstream +++ b/libcxx/include/sstream @@ -484,7 +484,7 @@ public: # if _LIBCPP_STD_VER >= 20 template - requires __is_allocator<_SAlloc>::value + requires __is_allocator_v<_SAlloc> _LIBCPP_HIDE_FROM_ABI basic_string str(const _SAlloc& __sa) const { return basic_string<_CharT, _Traits, _SAlloc>(view(), __sa); } @@ -963,7 +963,7 @@ public: # if _LIBCPP_STD_VER >= 20 template - requires __is_allocator<_SAlloc>::value + requires __is_allocator_v<_SAlloc> _LIBCPP_HIDE_FROM_ABI basic_string str(const _SAlloc& __sa) const { return __sb_.str(__sa); } @@ -1101,7 +1101,7 @@ public: # if _LIBCPP_STD_VER >= 20 template - requires __is_allocator<_SAlloc>::value + requires __is_allocator_v<_SAlloc> _LIBCPP_HIDE_FROM_ABI basic_string str(const _SAlloc& __sa) const { return __sb_.str(__sa); } @@ -1241,7 +1241,7 @@ public: # if _LIBCPP_STD_VER >= 20 template - requires __is_allocator<_SAlloc>::value + requires __is_allocator_v<_SAlloc> _LIBCPP_HIDE_FROM_ABI basic_string str(const _SAlloc& __sa) const { return __sb_.str(__sa); } diff --git a/libcxx/include/stack b/libcxx/include/stack index 985813fcf578a..3d7187ddb1630 100644 --- a/libcxx/include/stack +++ b/libcxx/include/stack @@ -294,12 +294,12 @@ public: }; # if _LIBCPP_STD_VER >= 17 -template ::value> > +template >> stack(_Container) -> stack; template ::value>, + class = enable_if_t>, class = enable_if_t::value> > stack(_Container, _Alloc) -> stack; # endif @@ -314,11 +314,11 @@ stack(from_range_t, _Range&&) -> stack>; template ::value, int> = 0, - __enable_if_t<__is_allocator<_Alloc>::value, int> = 0> + __enable_if_t<__is_allocator_v<_Alloc>, int> = 0> stack(_InputIterator, _InputIterator, _Alloc) -> stack<__iter_value_type<_InputIterator>, deque<__iter_value_type<_InputIterator>, _Alloc>>; -template ::value, int> = 0> +template , int> = 0> stack(from_range_t, _Range&&, _Alloc) -> stack, deque, _Alloc>>; diff --git a/libcxx/include/string b/libcxx/include/string index 081467edfe3fb..dc562e0207630 100644 --- a/libcxx/include/string +++ b/libcxx/include/string @@ -280,6 +280,8 @@ public: basic_string substr(size_type pos = 0, size_type n = npos) const; // constexpr in C++20, removed in C++23 basic_string substr(size_type pos = 0, size_type n = npos) const&; // since C++23 constexpr basic_string substr(size_type pos = 0, size_type n = npos) &&; // since C++23 + constexpr basic_string_view subview(size_type pos = 0, + size_type n = npos) const; // since C++26 void swap(basic_string& str) noexcept(allocator_traits::propagate_on_container_swap::value || allocator_traits::is_always_equal::value); // C++17, constexpr since C++20 @@ -598,6 +600,7 @@ basic_string operator""s( const char32_t *str, size_t len ); # include <__debug_utils/sanitizers.h> # include <__format/enable_insertable.h> # include <__functional/hash.h> +# include <__functional/is_transparent.h> # include <__functional/unary_function.h> # include <__fwd/string.h> # include <__iterator/bounded_iter.h> @@ -626,6 +629,7 @@ basic_string operator""s( const char32_t *str, size_t len ); # include <__type_traits/is_allocator.h> # include <__type_traits/is_array.h> # include <__type_traits/is_convertible.h> +# include <__type_traits/is_generic_transparent_comparator.h> # include <__type_traits/is_nothrow_assignable.h> # include <__type_traits/is_nothrow_constructible.h> # include <__type_traits/is_replaceable.h> @@ -1055,13 +1059,13 @@ public: } # endif // _LIBCPP_CXX03_LANG - template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* _LIBCPP_DIAGNOSE_NULLPTR __s) { _LIBCPP_ASSERT_NON_NULL(__s != nullptr, "basic_string(const char*) detected nullptr"); __init(__s, traits_type::length(__s)); } - template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* _LIBCPP_DIAGNOSE_NULLPTR __s, const _Allocator& __a) : __alloc_(__a) { @@ -1110,7 +1114,7 @@ public: } # endif - template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0> + template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(size_type __n, _CharT __c, const _Allocator& __a) : __alloc_(__a) { __init(__n, __c); @@ -1758,6 +1762,11 @@ public: return basic_string(std::move(*this), __pos, __n); } # endif +# if _LIBCPP_STD_VER >= 26 + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr __self_view subview(size_type __pos = 0, size_type __n = npos) const { + return __self_view(*this).subview(__pos, __n); + } +# endif _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void swap(basic_string& __str) # if _LIBCPP_STD_VER >= 14 @@ -2545,7 +2554,7 @@ _LIBCPP_STRING_V1_EXTERN_TEMPLATE_LIST(_LIBCPP_DECLARE, wchar_t) # endif # undef _LIBCPP_DECLARE -# if _LIBCPP_STD_VER <= 17 || !__has_builtin(__builtin_lt_synthesises_from_spaceship) +# if _LIBCPP_STD_VER <= 17 || !__has_builtin(__builtin_lt_synthesizes_from_spaceship) template struct __default_three_way_comparator, basic_string<_CharT, _Traits, _Alloc> > { using __string_t _LIBCPP_NODEBUG = basic_string<_CharT, _Traits, _Alloc>; @@ -2560,26 +2569,40 @@ struct __default_three_way_comparator, bas }; # endif +template +inline const bool __is_transparently_comparable_v<_Comparator, + basic_string<_CharT, _Traits, _Alloc>, + const _CharT*, + __enable_if_t<__is_generic_transparent_comparator_v<_Comparator> > > = + true; + +template +inline const bool __is_transparently_comparable_v<_Comparator, + basic_string<_CharT, _Traits, _Alloc>, + _CharT[_Np], + __enable_if_t<__is_generic_transparent_comparator_v<_Comparator> > > = + true; + # if _LIBCPP_STD_VER >= 17 template , class _Allocator = allocator<_CharT>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t<__is_allocator<_Allocator>::value> > + class = enable_if_t<__is_allocator_v<_Allocator>>> basic_string(_InputIterator, _InputIterator, _Allocator = _Allocator()) -> basic_string<_CharT, char_traits<_CharT>, _Allocator>; template , - class = enable_if_t<__is_allocator<_Allocator>::value> > + class = enable_if_t<__is_allocator_v<_Allocator>>> explicit basic_string(basic_string_view<_CharT, _Traits>, const _Allocator& = _Allocator()) -> basic_string<_CharT, _Traits, _Allocator>; template , - class = enable_if_t<__is_allocator<_Allocator>::value>, + class = enable_if_t<__is_allocator_v<_Allocator>>, class _Sz = typename allocator_traits<_Allocator>::size_type > basic_string(basic_string_view<_CharT, _Traits>, _Sz, _Sz, const _Allocator& = _Allocator()) -> basic_string<_CharT, _Traits, _Allocator>; @@ -2588,7 +2611,7 @@ basic_string(basic_string_view<_CharT, _Traits>, _Sz, _Sz, const _Allocator& = _ # if _LIBCPP_STD_VER >= 23 template >, - class = enable_if_t<__is_allocator<_Allocator>::value> > + class = enable_if_t<__is_allocator_v<_Allocator>>> basic_string(from_range_t, _Range&&, _Allocator = _Allocator()) -> basic_string, char_traits>, _Allocator>; # endif diff --git a/libcxx/include/string_view b/libcxx/include/string_view index 983e5852015e0..5ecaa3de7deba 100644 --- a/libcxx/include/string_view +++ b/libcxx/include/string_view @@ -130,6 +130,8 @@ namespace std { size_type copy(charT* s, size_type n, size_type pos = 0) const; // constexpr in C++20 constexpr basic_string_view substr(size_type pos = 0, size_type n = npos) const; + constexpr basic_string_view subview(size_type pos = 0, + size_type n = npos) const; // freestanding-deleted, since C++26 constexpr int compare(basic_string_view s) const noexcept; constexpr int compare(size_type pos1, size_type n1, basic_string_view s) const; constexpr int compare(size_type pos1, size_type n1, @@ -465,6 +467,13 @@ public: : basic_string_view(__assume_valid(), data() + __pos, std::min(__n, size() - __pos)); } +# if _LIBCPP_STD_VER >= 26 + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI constexpr basic_string_view subview(size_type __pos = 0, size_type __n = npos) const { + return substr(__pos, __n); + } +# endif + _LIBCPP_CONSTEXPR_SINCE_CXX14 int compare(basic_string_view __sv) const _NOEXCEPT { size_type __rlen = std::min(size(), __sv.size()); int __retval = _Traits::compare(data(), __sv.data(), __rlen); diff --git a/libcxx/include/unordered_map b/libcxx/include/unordered_map index 43a2245c5acc0..2afc8805cb4c7 100644 --- a/libcxx/include/unordered_map +++ b/libcxx/include/unordered_map @@ -1297,10 +1297,10 @@ template >, class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type = 0, @@ -1314,10 +1314,10 @@ template >, class _Pred = equal_to<__range_key_type<_Range>>, class _Allocator = allocator<__range_to_alloc_type<_Range>>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type = 0, @@ -1332,10 +1332,10 @@ template >, class _Pred = equal_to>, class _Allocator = allocator>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(initializer_list>, typename allocator_traits<_Allocator>::size_type = 0, _Hash = _Hash(), @@ -1345,7 +1345,7 @@ unordered_map(initializer_list>, template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -1356,7 +1356,7 @@ unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocat template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(_InputIterator, _InputIterator, _Allocator) -> unordered_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -1368,9 +1368,9 @@ template ::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -1380,7 +1380,7 @@ unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocat # if _LIBCPP_STD_VER >= 23 -template ::value>> +template >> unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_map<__range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1388,7 +1388,7 @@ unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::siz equal_to<__range_key_type<_Range>>, _Allocator>; -template ::value>> +template >> unordered_map(from_range_t, _Range&&, _Allocator) -> unordered_map<__range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1399,9 +1399,9 @@ unordered_map(from_range_t, _Range&&, _Allocator) template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_map<__range_key_type<_Range>, __range_mapped_type<_Range>, @@ -1411,11 +1411,11 @@ unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::siz # endif -template ::value>> +template >> unordered_map(initializer_list>, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_map, _Tp, hash>, equal_to>, _Allocator>; -template ::value>> +template >> unordered_map(initializer_list>, _Allocator) -> unordered_map, _Tp, hash>, equal_to>, _Allocator>; @@ -1423,9 +1423,9 @@ template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_map(initializer_list>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_map, _Tp, _Hash, equal_to>, _Allocator>; # endif @@ -1992,10 +1992,10 @@ template >, class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type = 0, @@ -2013,10 +2013,10 @@ template >, class _Pred = equal_to<__range_key_type<_Range>>, class _Allocator = allocator<__range_to_alloc_type<_Range>>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type = 0, @@ -2031,10 +2031,10 @@ template >, class _Pred = equal_to>, class _Allocator = allocator>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(initializer_list>, typename allocator_traits<_Allocator>::size_type = 0, _Hash = _Hash(), @@ -2045,7 +2045,7 @@ unordered_multimap(initializer_list>, template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -2056,7 +2056,7 @@ unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Al template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(_InputIterator, _InputIterator, _Allocator) -> unordered_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -2068,9 +2068,9 @@ template ::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, @@ -2080,7 +2080,7 @@ unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Al # if _LIBCPP_STD_VER >= 23 -template ::value>> +template >> unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, @@ -2088,7 +2088,7 @@ unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator> equal_to<__range_key_type<_Range>>, _Allocator>; -template ::value>> +template >> unordered_multimap(from_range_t, _Range&&, _Allocator) -> unordered_multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, @@ -2099,9 +2099,9 @@ unordered_multimap(from_range_t, _Range&&, _Allocator) template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, @@ -2111,7 +2111,7 @@ unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator> # endif -template ::value>> +template >> unordered_multimap(initializer_list>, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_multimap, _Tp, @@ -2119,7 +2119,7 @@ unordered_multimap(initializer_list>, typename allocator_traits< equal_to>, _Allocator>; -template ::value>> +template >> unordered_multimap(initializer_list>, _Allocator) -> unordered_multimap, _Tp, @@ -2131,9 +2131,9 @@ template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multimap( initializer_list>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_multimap, _Tp, _Hash, equal_to>, _Allocator>; diff --git a/libcxx/include/unordered_set b/libcxx/include/unordered_set index c6ee0ffdec6af..6b81fc318e3a1 100644 --- a/libcxx/include/unordered_set +++ b/libcxx/include/unordered_set @@ -917,10 +917,10 @@ template >, class _Allocator = allocator<__iter_value_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type = 0, @@ -933,10 +933,10 @@ template >, class _Pred = equal_to>, class _Allocator = allocator>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type = 0, @@ -950,10 +950,10 @@ template , class _Pred = equal_to<_Tp>, class _Allocator = allocator<_Tp>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type = 0, _Hash = _Hash(), @@ -963,7 +963,7 @@ unordered_set(initializer_list<_Tp>, template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_set<__iter_value_type<_InputIterator>, hash<__iter_value_type<_InputIterator>>, @@ -974,22 +974,22 @@ template ::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_set<__iter_value_type<_InputIterator>, _Hash, equal_to<__iter_value_type<_InputIterator>>, _Allocator>; # if _LIBCPP_STD_VER >= 23 -template ::value>> +template >> unordered_set(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_set, hash>, equal_to>, _Allocator>; -template ::value>> +template >> unordered_set(from_range_t, _Range&&, _Allocator) -> unordered_set, hash>, @@ -999,24 +999,24 @@ unordered_set(from_range_t, _Range&&, _Allocator) template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_set, _Hash, equal_to>, _Allocator>; # endif -template ::value>> +template >> unordered_set(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_set<_Tp, hash<_Tp>, equal_to<_Tp>, _Allocator>; template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_set(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_set<_Tp, _Hash, equal_to<_Tp>, _Allocator>; # endif @@ -1502,10 +1502,10 @@ template >, class _Allocator = allocator<__iter_value_type<_InputIterator>>, class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset( _InputIterator, _InputIterator, @@ -1519,10 +1519,10 @@ template >, class _Pred = equal_to>, class _Allocator = allocator>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset( from_range_t, _Range&&, @@ -1536,10 +1536,10 @@ template , class _Pred = equal_to<_Tp>, class _Allocator = allocator<_Tp>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t>, + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type = 0, _Hash = _Hash(), @@ -1549,7 +1549,7 @@ unordered_multiset(initializer_list<_Tp>, template ::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_multiset<__iter_value_type<_InputIterator>, hash<__iter_value_type<_InputIterator>>, @@ -1560,9 +1560,9 @@ template ::value>, - class = enable_if_t::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_multiset<__iter_value_type<_InputIterator>, _Hash, @@ -1571,14 +1571,14 @@ unordered_multiset(_InputIterator, _InputIterator, typename allocator_traits<_Al # if _LIBCPP_STD_VER >= 23 -template ::value>> +template >> unordered_multiset(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_multiset, hash>, equal_to>, _Allocator>; -template ::value>> +template >> unordered_multiset(from_range_t, _Range&&, _Allocator) -> unordered_multiset, hash>, @@ -1588,24 +1588,24 @@ unordered_multiset(from_range_t, _Range&&, _Allocator) template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_multiset, _Hash, equal_to>, _Allocator>; # endif -template ::value>> +template >> unordered_multiset(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Allocator) -> unordered_multiset<_Tp, hash<_Tp>, equal_to<_Tp>, _Allocator>; template ::value>, + class = enable_if_t>, class = enable_if_t::value>, - class = enable_if_t<__is_allocator<_Allocator>::value>> + class = enable_if_t<__is_allocator_v<_Allocator>>> unordered_multiset(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator) -> unordered_multiset<_Tp, _Hash, equal_to<_Tp>, _Allocator>; # endif diff --git a/libcxx/include/version b/libcxx/include/version index 16917a3bd9ddd..a132f0808aeb8 100644 --- a/libcxx/include/version +++ b/libcxx/include/version @@ -245,6 +245,7 @@ __cpp_lib_starts_ends_with 201711L __cpp_lib_string_contains 202011L __cpp_lib_string_resize_and_overwrite 202110L +__cpp_lib_string_subview 202506L __cpp_lib_string_udls 201304L __cpp_lib_string_view 202403L 201803L // C++20 @@ -599,6 +600,7 @@ __cpp_lib_void_t 201411L # define __cpp_lib_span_at 202311L # define __cpp_lib_span_initializer_list 202311L # define __cpp_lib_sstream_from_string_view 202306L +# define __cpp_lib_string_subview 202506L # undef __cpp_lib_string_view # define __cpp_lib_string_view 202403L // # define __cpp_lib_submdspan 202306L diff --git a/libcxx/lib/abi/CHANGELOG.TXT b/libcxx/lib/abi/CHANGELOG.TXT index 8c1841648f821..968dc7a22a8c7 100644 --- a/libcxx/lib/abi/CHANGELOG.TXT +++ b/libcxx/lib/abi/CHANGELOG.TXT @@ -12,6 +12,21 @@ To generate a summary, re-generate the new ABI list using the New entries should be added directly below the "Version" header. +------------ +Version 22.0 +------------ + +* [libc++] Remove __time_get_storage::{__analyze,init} from the ABI + + These functions have never been used outside the dylib, so there is no point in exporting them. + + All platforms + ------------- + Symbol removed: _ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE + Symbol removed: _ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE + Symbol removed: _ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE + Symbol removed: _ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE + ------------ Version 21.0 ------------ diff --git a/libcxx/lib/abi/arm64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/arm64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist index 162757c7e37ec..3a1d8950c2db0 100644 --- a/libcxx/lib/abi/arm64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/arm64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -1515,14 +1515,10 @@ {'is_defined': True, 'name': '__ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/i686-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/i686-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist index 4b6f3548ce495..313de84df20e8 100644 --- a/libcxx/lib/abi/i686-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/i686-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -1151,14 +1151,10 @@ {'is_defined': True, 'name': '_ZNSt6__ndk117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/powerpc-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/powerpc-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist index 2b85596bd87f6..99cde72885cf2 100644 --- a/libcxx/lib/abi/powerpc-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/powerpc-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -510,14 +510,10 @@ {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC2EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/powerpc64-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/powerpc64-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist index 6ebdab96ed455..577d6cf759a77 100644 --- a/libcxx/lib/abi/powerpc64-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/powerpc64-ibm-aix.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -510,14 +510,10 @@ {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} -{'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'storage_mapping_class': 'DS', 'type': 'FUNC'} {'import_export': 'EXP', 'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC2EPKc', 'storage_mapping_class': 'DS', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/x86_64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/x86_64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist index f6f7d7fd8265a..5173a1a76b81a 100644 --- a/libcxx/lib/abi/x86_64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/x86_64-apple-darwin.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -1514,14 +1514,10 @@ {'is_defined': True, 'name': '__ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '__ZNSt3__118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/x86_64-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/x86_64-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist index 45f3d7c5904e8..1be7d8a2ac20b 100644 --- a/libcxx/lib/abi/x86_64-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/x86_64-linux-android21.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -1151,14 +1151,10 @@ {'is_defined': True, 'name': '_ZNSt6__ndk117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt6__ndk118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/x86_64-unknown-freebsd.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/x86_64-unknown-freebsd.libcxxabi.v1.stable.exceptions.nonew.abilist index de8cf6deef1df..40ae625d3bd69 100644 --- a/libcxx/lib/abi/x86_64-unknown-freebsd.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/x86_64-unknown-freebsd.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -1165,14 +1165,10 @@ {'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.exceptions.nonew.abilist b/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.exceptions.nonew.abilist index 8c55c4385f6f6..90166073b135f 100644 --- a/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.exceptions.nonew.abilist +++ b/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.exceptions.nonew.abilist @@ -1163,14 +1163,10 @@ {'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.noexceptions.nonew.abilist b/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.noexceptions.nonew.abilist index 51caa07a74330..5855c17cf11ed 100644 --- a/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.noexceptions.nonew.abilist +++ b/libcxx/lib/abi/x86_64-unknown-linux-gnu.libcxxabi.v1.stable.noexceptions.nonew.abilist @@ -1134,14 +1134,10 @@ {'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb0EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__117moneypunct_bynameIwLb1EE4initEPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__get_ostream_fileERNS_13basic_ostreamIcNS_11char_traitsIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE4initERKNS_5ctypeIcEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcE9__analyzeEcRKNS_5ctypeIcEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIcEC2ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE4initERKNS_5ctypeIwEE', 'type': 'FUNC'} -{'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwE9__analyzeEcRKNS_5ctypeIwEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1EPKc', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC1ERKNS_12basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE', 'type': 'FUNC'} {'is_defined': True, 'name': '_ZNSt3__118__time_get_storageIwEC2EPKc', 'type': 'FUNC'} diff --git a/libcxx/test/benchmarks/GenerateInput.h b/libcxx/test/benchmarks/GenerateInput.h index 06387852f76a6..d07cb8857579d 100644 --- a/libcxx/test/benchmarks/GenerateInput.h +++ b/libcxx/test/benchmarks/GenerateInput.h @@ -25,7 +25,7 @@ static const char Letters[] = { static const std::size_t LettersSize = sizeof(Letters); inline std::default_random_engine& getRandomEngine() { - static std::default_random_engine RandEngine(std::random_device{}()); + static std::default_random_engine RandEngine(123456); return RandEngine; } diff --git a/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp b/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp index b2ead1cc75585..afea31fb59e95 100644 --- a/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp +++ b/libcxx/test/benchmarks/algorithms/nonmodifying/find.bench.cpp @@ -51,6 +51,7 @@ int main(int argc, char** argv) { // find bm.template operator()>("std::find(vector) (" + comment + ")", std_find); bm.template operator()>("std::find(vector) (" + comment + ")", std_find); + bm.template operator()>("std::find(vector) (" + comment + ")", std_find); bm.template operator()>("std::find(deque) (" + comment + ")", std_find); bm.template operator()>("std::find(list) (" + comment + ")", std_find); diff --git a/libcxx/test/benchmarks/algorithms/sorting/partial_sort.bench.cpp b/libcxx/test/benchmarks/algorithms/sorting/partial_sort.bench.cpp index 7000be66920d0..db90428a39bd8 100644 --- a/libcxx/test/benchmarks/algorithms/sorting/partial_sort.bench.cpp +++ b/libcxx/test/benchmarks/algorithms/sorting/partial_sort.bench.cpp @@ -68,15 +68,15 @@ int main(int argc, char** argv) { return real_data; }; auto name = [variant](std::string op) { return op + " (" + variant + ")"; }; - bm.operator()>(name("std::partial_sort(vector"), std_partial_sort, generate); + bm.operator()>(name("std::partial_sort(vector)"), std_partial_sort, generate); bm.operator()>( - name("std::partial_sort(vector"), std_partial_sort, gen2); - bm.operator()>(name("std::partial_sort(deque"), std_partial_sort, generate); + name("std::partial_sort(vector)"), std_partial_sort, gen2); + bm.operator()>(name("std::partial_sort(deque)"), std_partial_sort, generate); - bm.operator()>(name("rng::partial_sort(vector"), std::ranges::partial_sort, generate); + bm.operator()>(name("rng::partial_sort(vector)"), std::ranges::partial_sort, generate); bm.operator()>( - name("rng::partial_sort(vector"), std::ranges::partial_sort, gen2); - bm.operator()>(name("rng::partial_sort(deque"), std::ranges::partial_sort, generate); + name("rng::partial_sort(vector)"), std::ranges::partial_sort, gen2); + bm.operator()>(name("rng::partial_sort(deque)"), std::ranges::partial_sort, generate); }; register_bm(support::quicksort_adversarial_data, "qsort adversarial"); diff --git a/libcxx/test/benchmarks/containers/associative/map.bench.cpp b/libcxx/test/benchmarks/containers/associative/map.bench.cpp index bd664dbb56ee7..142229ae64cad 100644 --- a/libcxx/test/benchmarks/containers/associative/map.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/map.bench.cpp @@ -16,6 +16,19 @@ #include "../../GenerateInput.h" #include "benchmark/benchmark.h" +static void BM_map_find_string_literal(benchmark::State& state) { + std::map map; + map.emplace("Something very very long to show a long string situation", 1); + map.emplace("Something Else", 2); + + for (auto _ : state) { + benchmark::DoNotOptimize(map); + benchmark::DoNotOptimize(map.find("Something very very long to show a long string situation")); + } +} + +BENCHMARK(BM_map_find_string_literal); + template struct support::adapt_operations> { using ValueType = typename std::map::value_type; diff --git a/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp b/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp index 57adec2d214d4..d670c531910ea 100644 --- a/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp +++ b/libcxx/test/benchmarks/containers/associative/unordered_map.bench.cpp @@ -15,6 +15,19 @@ #include "../../GenerateInput.h" #include "benchmark/benchmark.h" +static void BM_map_find_string_literal(benchmark::State& state) { + std::unordered_map map; + map.emplace("Something very very long to show a long string situation", 1); + map.emplace("Something Else", 2); + + for (auto _ : state) { + benchmark::DoNotOptimize(map); + benchmark::DoNotOptimize(map.find("Something very very long to show a long string situation")); + } +} + +BENCHMARK(BM_map_find_string_literal); + template struct support::adapt_operations> { using ValueType = typename std::unordered_map::value_type; diff --git a/libcxx/test/benchmarks/containers/sequence/vector_bool.bench.cpp b/libcxx/test/benchmarks/containers/sequence/vector_bool.bench.cpp new file mode 100644 index 0000000000000..6ecb268208cc8 --- /dev/null +++ b/libcxx/test/benchmarks/containers/sequence/vector_bool.bench.cpp @@ -0,0 +1,20 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include + +static void BM_vector_bool_size_ctor(benchmark::State& state) { + for (auto _ : state) { + std::vector vec(100, true); + benchmark::DoNotOptimize(vec); + } +} +BENCHMARK(BM_vector_bool_size_ctor); + +BENCHMARK_MAIN(); diff --git a/libcxx/test/benchmarks/containers/string.bench.cpp b/libcxx/test/benchmarks/containers/string.bench.cpp index 966775d31a8cf..2484ec8fd955f 100644 --- a/libcxx/test/benchmarks/containers/string.bench.cpp +++ b/libcxx/test/benchmarks/containers/string.bench.cpp @@ -60,6 +60,45 @@ static void BM_StringFindMatch2(benchmark::State& state) { } BENCHMARK(BM_StringFindMatch2)->Range(1, MAX_STRING_LEN / 4); +static void BM_StringFindStringLiteral(benchmark::State& state) { + std::string s; + + for (int i = 0; i < state.range(0); i++) + s += 'a'; + + s += 'b'; + + benchmark::DoNotOptimize(s.data()); + benchmark::ClobberMemory(); + size_t pos; + + for (auto _ : state) { + benchmark::DoNotOptimize(pos = s.find("b")); + benchmark::ClobberMemory(); + } +} + +BENCHMARK(BM_StringFindStringLiteral)->RangeMultiplier(2)->Range(8, 8 << 10); + +static void BM_StringFindCharLiteral(benchmark::State& state) { + std::string s; + + for (int i = 0; i < state.range(0); i++) + s += 'a'; + + s += 'b'; + + benchmark::DoNotOptimize(s.data()); + benchmark::ClobberMemory(); + size_t pos; + + for (auto _ : state) { + benchmark::DoNotOptimize(pos = s.find('b')); + benchmark::ClobberMemory(); + } +} +BENCHMARK(BM_StringFindCharLiteral)->RangeMultiplier(2)->Range(8, 8 << 10); + static void BM_StringCtorDefault(benchmark::State& state) { for (auto _ : state) { std::string Default; diff --git a/libcxx/test/benchmarks/shared_mutex_vs_mutex.bench.cpp b/libcxx/test/benchmarks/shared_mutex_vs_mutex.bench.cpp index 84a49a8d07d04..84729ebe013df 100644 --- a/libcxx/test/benchmarks/shared_mutex_vs_mutex.bench.cpp +++ b/libcxx/test/benchmarks/shared_mutex_vs_mutex.bench.cpp @@ -8,6 +8,10 @@ // UNSUPPORTED: c++03, c++11, c++14 +// This benchmark is very expensive and we don't want to run it on a regular basis, +// only to ensure the code doesn't rot. +// REQUIRES: enable-benchmarks=dry-run + // This benchmark compares the performance of std::mutex and std::shared_mutex in contended scenarios. // it's meant to establish a baseline overhead for std::shared_mutex and std::mutex, and to help inform decisions about // which mutex to use when selecting a mutex type for a given use case. diff --git a/libcxx/test/benchmarks/spec.gen.py b/libcxx/test/benchmarks/spec.gen.py index ea7b75b3d2085..c36dd0a3faba2 100644 --- a/libcxx/test/benchmarks/spec.gen.py +++ b/libcxx/test/benchmarks/spec.gen.py @@ -45,7 +45,7 @@ tune = base copies = 1 threads = 1 - CC = cc -O3 + CC = cc -O3 -std=c18 -Wno-implicit-function-declaration CXX = {cxx} {compile_flags} {flags} {link_flags} -Wno-error CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version @@ -72,7 +72,12 @@ print(f'RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T --rebuild {benchmark}') print(f'RUN: rm -rf %T/benchspec') # remove the temporary directory, which can become quite large - # Parse the results into a LNT-compatible format. This also errors out if there are no CSV files, which - # means that the benchmark didn't run properly (the `runcpu` command above never reports a failure). - print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results %T/result/*.train.csv --output-format=lnt > %T/results.lnt || ! cat %T/result/*.log') + # The `runcpu` command above doesn't fail even if the benchmark fails to run. To determine failure, parse the CSV + # results and ensure there are no compilation errors or runtime errors in the status row. Also print the logs and + # fail if there are no CSV files at all, which implies a SPEC error. + print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results --extract "Base Status" --keep-failed %T/result/*.train.csv > %T/status || ! cat %T/result/*.log') + print(f'RUN: ! grep -E "CE|RE" %T/status || ! cat %T/result/*.log') + + # If there were no errors, parse the results into LNT-compatible format and print them. + print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results %T/result/*.train.csv --output-format=lnt > %T/results.lnt') print(f'RUN: cat %T/results.lnt') diff --git a/libcxx/test/std/time/time.point/time.point.cast/ceil.compile.fail.cpp b/libcxx/test/extensions/gnu/hash_multimap/copy.pass.cpp similarity index 52% rename from libcxx/test/std/time/time.point/time.point.cast/ceil.compile.fail.cpp rename to libcxx/test/extensions/gnu/hash_multimap/copy.pass.cpp index fb82fdffe4d2a..9f9737b28e44d 100644 --- a/libcxx/test/std/time/time.point/time.point.cast/ceil.compile.fail.cpp +++ b/libcxx/test/extensions/gnu/hash_multimap/copy.pass.cpp @@ -6,22 +6,22 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: c++03, c++11, c++14 -// +// ADDITIONAL_COMPILE_FLAGS: -Wno-deprecated -// ceil +// hash_multimap::hash_multimap(const hash_multimap&) -// template -// time_point -// ceil(const time_point& t); +#include +#include -// ToDuration shall be an instantiation of duration. +int main(int, char**) { + __gnu_cxx::hash_multimap map; -#include + map.insert(std::make_pair(1, 1)); + map.insert(std::make_pair(1, 1)); -int main(int, char**) -{ - std::chrono::ceil(std::chrono::system_clock::now()); + auto map2 = map; + + assert(map2.size() == 2); return 0; } diff --git a/libcxx/test/std/time/time.point/time.point.cast/round.compile.fail.cpp b/libcxx/test/extensions/gnu/hash_multiset/copy.pass.cpp similarity index 52% rename from libcxx/test/std/time/time.point/time.point.cast/round.compile.fail.cpp rename to libcxx/test/extensions/gnu/hash_multiset/copy.pass.cpp index a5436c684040d..84c14bd18b085 100644 --- a/libcxx/test/std/time/time.point/time.point.cast/round.compile.fail.cpp +++ b/libcxx/test/extensions/gnu/hash_multiset/copy.pass.cpp @@ -6,22 +6,22 @@ // //===----------------------------------------------------------------------===// -// UNSUPPORTED: c++03, c++11, c++14 -// +// ADDITIONAL_COMPILE_FLAGS: -Wno-deprecated -// round +// hash_multiset::hash_multiset(const hash_multiset&) -// template -// time_point -// round(const time_point& t); +#include +#include -// ToDuration shall be an instantiation of duration. +int main(int, char**) { + __gnu_cxx::hash_multiset set; -#include + set.insert(1); + set.insert(1); -int main(int, char**) -{ - std::chrono::round(std::chrono::system_clock::now()); + auto set2 = set; + + assert(set2.size() == 2); return 0; } diff --git a/libcxx/test/libcxx/diagnostics/string.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/string.nodiscard.verify.cpp index 746ae633fff44..d421eaf5cb8f1 100644 --- a/libcxx/test/libcxx/diagnostics/string.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/string.nodiscard.verify.cpp @@ -12,7 +12,12 @@ #include +#include "test_macros.h" + void test() { std::string string; string.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +#if TEST_STD_VER >= 26 + string.subview(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +#endif } diff --git a/libcxx/test/libcxx/diagnostics/string_view.nodiscard.verify.cpp b/libcxx/test/libcxx/diagnostics/string_view.nodiscard.verify.cpp index b5548bc34fddc..e5b2258315fe4 100644 --- a/libcxx/test/libcxx/diagnostics/string_view.nodiscard.verify.cpp +++ b/libcxx/test/libcxx/diagnostics/string_view.nodiscard.verify.cpp @@ -12,7 +12,12 @@ #include +#include "test_macros.h" + void test() { std::string_view string_view; string_view.empty(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +#if TEST_STD_VER >= 26 + string_view.subview(); // expected-warning {{ignoring return value of function declared with 'nodiscard' attribute}} +#endif } diff --git a/libcxx/test/libcxx/memory/is_allocator.pass.cpp b/libcxx/test/libcxx/memory/is_allocator.pass.cpp index cf11d077bf086..ad01b93a449e1 100644 --- a/libcxx/test/libcxx/memory/is_allocator.pass.cpp +++ b/libcxx/test/libcxx/memory/is_allocator.pass.cpp @@ -11,7 +11,7 @@ // UNSUPPORTED: c++03, c++11, c++14 // template -// struct __is_allocator; +// inline const bool __is_allocator_v; // Is either true_type or false_type depending on if A is an allocator. @@ -23,15 +23,13 @@ #include "test_allocator.h" template -void test_allocators() -{ - static_assert(!std::__is_allocator::value, "" ); - static_assert( std::__is_allocator>::value, "" ); - static_assert( std::__is_allocator>::value, "" ); - static_assert( std::__is_allocator>::value, "" ); +void test_allocators() { + static_assert(!std::__is_allocator_v, ""); + static_assert(std::__is_allocator_v>, ""); + static_assert(std::__is_allocator_v>, ""); + static_assert(std::__is_allocator_v>, ""); } - int main(int, char**) { // test_allocators(); diff --git a/libcxx/test/libcxx/numerics/bit.ops.pass.cpp b/libcxx/test/libcxx/numerics/bit.ops.pass.cpp index 7f502d6e01d1e..061f7030eca0b 100644 --- a/libcxx/test/libcxx/numerics/bit.ops.pass.cpp +++ b/libcxx/test/libcxx/numerics/bit.ops.pass.cpp @@ -11,7 +11,6 @@ #include <__bit/bit_log2.h> #include <__bit/countl.h> -#include <__bit/rotate.h> #include #include "test_macros.h" @@ -19,10 +18,8 @@ TEST_CONSTEXPR_CXX14 bool test() { const unsigned v = 0x12345678; - ASSERT_SAME_TYPE(unsigned, decltype(std::__rotr(v, 3))); ASSERT_SAME_TYPE(int, decltype(std::__countl_zero(v))); - assert(std::__rotr(v, 3) == 0x02468acfU); assert(std::__countl_zero(v) == 3); #if TEST_STD_VER > 17 diff --git a/libcxx/test/libcxx/transitive_includes/cxx26.csv b/libcxx/test/libcxx/transitive_includes/cxx26.csv index 5f906338f4b7c..81c8c41d88756 100644 --- a/libcxx/test/libcxx/transitive_includes/cxx26.csv +++ b/libcxx/test/libcxx/transitive_includes/cxx26.csv @@ -669,7 +669,6 @@ mutex ctime mutex limits mutex ratio mutex tuple -mutex typeinfo mutex version new version numbers version diff --git a/libcxx/test/libcxx/utilities/utility/has_default_three_way.compile.pass.cpp b/libcxx/test/libcxx/utilities/utility/has_default_three_way.compile.pass.cpp index 42b4855a9fddd..625b194b1eb1a 100644 --- a/libcxx/test/libcxx/utilities/utility/has_default_three_way.compile.pass.cpp +++ b/libcxx/test/libcxx/utilities/utility/has_default_three_way.compile.pass.cpp @@ -18,7 +18,7 @@ static_assert(std::__has_default_three_way_comparator::value); static_assert(std::__has_default_three_way_comparator::value); static_assert(std::__has_default_three_way_comparator::value); -#if __has_builtin(__builtin_lt_synthesises_from_spaceship) +#if __has_builtin(__builtin_lt_synthesizes_from_spaceship) static_assert(std::__has_default_three_way_comparator::value); static_assert(std::__has_default_three_way_comparator::value); static_assert(std::__has_default_three_way_comparator::value); diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp index 5b4abc45b6f4f..3303b4a76f467 100644 --- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp +++ b/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp @@ -272,57 +272,100 @@ class Comparable { friend bool operator==(const Comparable& lhs, long long rhs) { return comparable_data[lhs.index_] == rhs; } }; -void test_deque() { - { // empty deque - std::deque data; - assert(std::ranges::find(data, 4) == data.end()); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); - } - - { // single element - match - std::deque data = {4}; - assert(std::ranges::find(data, 4) == data.begin()); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.begin()); - } - - { // single element - no match - std::deque data = {3}; - assert(std::ranges::find(data, 4) == data.end()); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); - } - - // many elements - for (auto size : {2, 3, 1023, 1024, 1025, 2047, 2048, 2049}) { - { // last element match +void test_segmented_iterator_types() { + // Test the optimized find algorithm for types that implement the segment iterator trait + // deque + { + { // empty deque std::deque data; - data.resize(size); - std::fill(data.begin(), data.end(), 3); - data[size - 1] = 4; - assert(std::ranges::find(data, 4) == data.end() - 1); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 1); + assert(std::ranges::find(data, 4) == data.end()); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); } - { // second-last element match - std::deque data; - data.resize(size); - std::fill(data.begin(), data.end(), 3); - data[size - 2] = 4; - assert(std::ranges::find(data, 4) == data.end() - 2); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 2); + { // single element - match + std::deque data = {4}; + assert(std::ranges::find(data, 4) == data.begin()); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.begin()); } - { // no match - std::deque data; - data.resize(size); - std::fill(data.begin(), data.end(), 3); + { // single element - no match + std::deque data = {3}; assert(std::ranges::find(data, 4) == data.end()); assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); } + + // many elements + for (auto size : {2, 3, 1023, 1024, 1025, 2047, 2048, 2049}) { + { // last element match + std::deque data; + data.resize(size); + std::fill(data.begin(), data.end(), 3); + data[size - 1] = 4; + assert(std::ranges::find(data, 4) == data.end() - 1); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 1); + } + + { // second-last element match + std::deque data; + data.resize(size); + std::fill(data.begin(), data.end(), 3); + data[size - 2] = 4; + assert(std::ranges::find(data, 4) == data.end() - 2); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 2); + } + + { // no match + std::deque data; + data.resize(size); + std::fill(data.begin(), data.end(), 3); + assert(std::ranges::find(data, 4) == data.end()); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); + } + } + } + // join_view ranges adaptor + { + { // single element - match + int data[1][1] = {{4}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::begin(joined)); + } + { // single element - no match + // (reproducer for https://llvm.org/PR158279, where the iterator would never reach the end sentinel) + int data[1][1] = {{3}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::end(joined)); + } + { // several sub-arrays of size 1 - match + int data[3][1] = {{0}, {4}, {0}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::next(std::ranges::begin(joined))); + } + { // several sub-arrays of size 2 - match in second element of an array + int data[3][2] = {{0, 0}, {0, 4}, {0, 0}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::next(std::ranges::begin(joined), 3)); + } + { // vector of empty vectors + std::vector> data = {{}, {}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::end(joined)); + } + { // vector of variably sized vectors - match + std::vector> data = {{}, {}, {3, 4}, {}, {}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::next(std::ranges::begin(joined))); + } + { // vector of variably sized vectors - no match + std::vector> data = {{}, {}, {3, 5}, {}, {}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::end(joined)); + } } } int main(int, char**) { - test_deque(); + test_segmented_iterator_types(); test(); static_assert(test()); diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp index 8e2605fef9d31..65a457a6129d5 100644 --- a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp +++ b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp @@ -7,6 +7,7 @@ // UNSUPPORTED: c++03, c++11, c++14, c++17 // XFAIL: !has-64-bit-atomics +// XFAIL: target={{x86_64-.*}} && tsan // integral-type fetch_add(integral-type, memory_order = memory_order::seq_cst) const noexcept; // floating-point-type fetch_add(floating-point-type, memory_order = memory_order::seq_cst) const noexcept; diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp index 04def076a301f..ab89ebdbde261 100644 --- a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp +++ b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp @@ -7,6 +7,7 @@ // UNSUPPORTED: c++03, c++11, c++14, c++17 // XFAIL: !has-64-bit-atomics +// XFAIL: target={{x86_64-.*}} && tsan // integral-type fetch_sub(integral-type, memory_order = memory_order::seq_cst) const noexcept; // floating-point-type fetch_sub(floating-point-type, memory_order = memory_order::seq_cst) const noexcept; diff --git a/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp b/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp index 0136fb0631604..7017351d47865 100644 --- a/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp +++ b/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp @@ -9,8 +9,6 @@ // // vector -// XFAIL: FROZEN-CXX03-HEADERS-FIXME - // This test ensures that std::vector handles allocator types with small size types // properly. Related issue: https://llvm.org/PR121713. diff --git a/libcxx/test/std/containers/sequences/vector/common.h b/libcxx/test/std/containers/sequences/vector/common.h index 4af6559a06e73..34453f8889b73 100644 --- a/libcxx/test/std/containers/sequences/vector/common.h +++ b/libcxx/test/std/containers/sequences/vector/common.h @@ -214,10 +214,10 @@ struct throwing_iterator { }; inline void check_new_delete_called() { - assert(globalMemCounter.new_called == globalMemCounter.delete_called); - assert(globalMemCounter.new_array_called == globalMemCounter.delete_array_called); - assert(globalMemCounter.aligned_new_called == globalMemCounter.aligned_delete_called); - assert(globalMemCounter.aligned_new_array_called == globalMemCounter.aligned_delete_array_called); + ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.new_called == globalMemCounter.delete_called); + ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.new_array_called == globalMemCounter.delete_array_called); + ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.aligned_new_called == globalMemCounter.aligned_delete_called); + ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.aligned_new_array_called == globalMemCounter.aligned_delete_array_called); } template diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp index 056d6f65fc368..2c3751a97cf4e 100644 --- a/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp +++ b/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp @@ -12,6 +12,9 @@ // Assertion failed: N->getValueType(0) == MVT::v1i1 && "Expected v1i1 type" // XFAIL: target=armv7-unknown-linux-gnueabihf +// FIXME: This should work with -flax-vector-conversions=none +// ADDITIONAL_COMPILE_FLAGS(clang): -flax-vector-conversions=integer + // // // [simd.class] diff --git a/libcxx/test/std/input.output/filesystems/class.path/path.member/path.append.pass.cpp b/libcxx/test/std/input.output/filesystems/class.path/path.member/path.append.pass.cpp index 3442019a8360c..b3d96c283c9b7 100644 --- a/libcxx/test/std/input.output/filesystems/class.path/path.member/path.append.pass.cpp +++ b/libcxx/test/std/input.output/filesystems/class.path/path.member/path.append.pass.cpp @@ -12,6 +12,11 @@ // These tests require locale for non-char paths // UNSUPPORTED: no-localization +// In MinGW mode, with optimizations enabled with a DLL, the number of counted +// allocations mismatches, as some ctor/dtor calls are generated in the +// calling code, and some are called from the DLL. +// ADDITIONAL_COMPILE_FLAGS: -DALLOW_MISMATCHING_LIBRRARY_INTERNAL_ALLOCATIONS + // // class path diff --git a/libcxx/test/std/input.output/filesystems/class.path/path.member/path.concat.pass.cpp b/libcxx/test/std/input.output/filesystems/class.path/path.member/path.concat.pass.cpp index 5596de7328da4..570d303985e86 100644 --- a/libcxx/test/std/input.output/filesystems/class.path/path.member/path.concat.pass.cpp +++ b/libcxx/test/std/input.output/filesystems/class.path/path.member/path.concat.pass.cpp @@ -12,6 +12,11 @@ // These tests require locale for non-char paths // UNSUPPORTED: no-localization +// In MinGW mode, with optimizations enabled with a DLL, the number of counted +// allocations mismatches, as some ctor/dtor calls are generated in the +// calling code, and some are called from the DLL. +// ADDITIONAL_COMPILE_FLAGS: -DALLOW_MISMATCHING_LIBRRARY_INTERNAL_ALLOCATIONS + // // class path diff --git a/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/ignore.char_type.pass.cpp b/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/ignore.char_type.pass.cpp new file mode 100644 index 0000000000000..d0d174c1d4d87 --- /dev/null +++ b/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/ignore.char_type.pass.cpp @@ -0,0 +1,41 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// Requires 396145d in the built library. +// XFAIL: using-built-library-before-llvm-9 +// XFAIL: FROZEN-CXX03-HEADERS-FIXME + +// + +// basic_istream& ignore(streamsize n, char_type delim); + +#include +#include +#include + +#include "test_macros.h" + +int main(int, char**) { + std::istringstream in("\xF0\x9F\xA4\xA1 Clown Face"); + in.ignore(100, '\xA1'); // Ignore up to '\xA1' delimiter, + // previously might have ignored to EOF. + + assert(in.gcount() == 4); // 4 bytes were ignored. + assert(in.peek() == ' '); // Next character is a space. + + std::string str; // Read the next word. + in >> str; + assert(str == "Clown"); + + // Parameter value "-1L" doesn't cause ambiguity with the char_type overload. + in.ignore(100, -1L); // Ignore up to EOF, which is the default behavior. + assert(in.eof()); // Stream should be at EOF now. + assert(in.gcount() == 5); + + return 0; +} diff --git a/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm b/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm index 05a6698ea1a59..de383051543be 100644 --- a/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm +++ b/libcxx/test/std/language.support/support.exception/propagation/make_exception_ptr.objc.pass.mm @@ -17,7 +17,8 @@ // out-of-the-box. // REQUIRES: has-fobjc-arc && darwin -// ADDITIONAL_COMPILE_FLAGS: -fobjc-arc +// FIXME: including seems to be currently broken with modules enabled +// ADDITIONAL_COMPILE_FLAGS: -fobjc-arc -fno-modules #include #include diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/string.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/string.version.compile.pass.cpp index 7236d5d7f2aca..147854eead2cc 100644 --- a/libcxx/test/std/language.support/support.limits/support.limits.general/string.version.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.limits/support.limits.general/string.version.compile.pass.cpp @@ -60,6 +60,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifdef __cpp_lib_string_udls # error "__cpp_lib_string_udls should not be defined before c++14" # endif @@ -114,6 +118,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++14" # endif @@ -177,6 +185,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++17" # endif @@ -261,6 +273,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++20" # endif @@ -354,6 +370,10 @@ # error "__cpp_lib_string_resize_and_overwrite should have the value 202110L in c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++23" # endif @@ -456,6 +476,13 @@ # error "__cpp_lib_string_resize_and_overwrite should have the value 202110L in c++26" # endif +# ifndef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should be defined in c++26" +# endif +# if __cpp_lib_string_subview != 202506L +# error "__cpp_lib_string_subview should have the value 202506L in c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++26" # endif diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/string_view.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/string_view.version.compile.pass.cpp index c7bafb0bf059c..2c3716111102a 100644 --- a/libcxx/test/std/language.support/support.limits/support.limits.general/string_view.version.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.limits/support.limits.general/string_view.version.compile.pass.cpp @@ -40,6 +40,10 @@ # error "__cpp_lib_string_contains should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifdef __cpp_lib_string_view # error "__cpp_lib_string_view should not be defined before c++17" # endif @@ -66,6 +70,10 @@ # error "__cpp_lib_string_contains should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifdef __cpp_lib_string_view # error "__cpp_lib_string_view should not be defined before c++17" # endif @@ -92,6 +100,10 @@ # error "__cpp_lib_string_contains should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_view # error "__cpp_lib_string_view should be defined in c++17" # endif @@ -136,6 +148,10 @@ # error "__cpp_lib_string_contains should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_view # error "__cpp_lib_string_view should be defined in c++20" # endif @@ -183,6 +199,10 @@ # error "__cpp_lib_string_contains should have the value 202011L in c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_view # error "__cpp_lib_string_view should be defined in c++23" # endif @@ -239,6 +259,13 @@ # error "__cpp_lib_string_contains should have the value 202011L in c++26" # endif +# ifndef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should be defined in c++26" +# endif +# if __cpp_lib_string_subview != 202506L +# error "__cpp_lib_string_subview should have the value 202506L in c++26" +# endif + # ifndef __cpp_lib_string_view # error "__cpp_lib_string_view should be defined in c++26" # endif diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp index cde2f258b7732..6aa704a3ead3f 100644 --- a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp @@ -820,6 +820,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifdef __cpp_lib_string_udls # error "__cpp_lib_string_udls should not be defined before c++14" # endif @@ -1775,6 +1779,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++14" # endif @@ -2916,6 +2924,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++17" # endif @@ -4330,6 +4342,10 @@ # error "__cpp_lib_string_resize_and_overwrite should not be defined before c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++20" # endif @@ -5978,6 +5994,10 @@ # error "__cpp_lib_string_resize_and_overwrite should have the value 202110L in c++23" # endif +# ifdef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should not be defined before c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++23" # endif @@ -7935,6 +7955,13 @@ # error "__cpp_lib_string_resize_and_overwrite should have the value 202110L in c++26" # endif +# ifndef __cpp_lib_string_subview +# error "__cpp_lib_string_subview should be defined in c++26" +# endif +# if __cpp_lib_string_subview != 202506L +# error "__cpp_lib_string_subview should have the value 202506L in c++26" +# endif + # ifndef __cpp_lib_string_udls # error "__cpp_lib_string_udls should be defined in c++26" # endif diff --git a/libcxx/test/std/numerics/c.math/isnormal.pass.cpp b/libcxx/test/std/numerics/c.math/isnormal.pass.cpp index 03b086fa3f48e..76c3d13520d99 100644 --- a/libcxx/test/std/numerics/c.math/isnormal.pass.cpp +++ b/libcxx/test/std/numerics/c.math/isnormal.pass.cpp @@ -11,8 +11,6 @@ // We don't control the implementation on windows // UNSUPPORTED: windows -// XFAIL: FROZEN-CXX03-HEADERS-FIXME - #include #include #include diff --git a/libcxx/test/std/numerics/c.math/signbit.pass.cpp b/libcxx/test/std/numerics/c.math/signbit.pass.cpp index 44ce32581cca1..7571ced2e4431 100644 --- a/libcxx/test/std/numerics/c.math/signbit.pass.cpp +++ b/libcxx/test/std/numerics/c.math/signbit.pass.cpp @@ -17,8 +17,6 @@ // GCC warns about signbit comparing `bool_v < 0`, which we're testing // ADDITIONAL_COMPILE_FLAGS(gcc): -Wno-bool-compare -// XFAIL: FROZEN-CXX03-HEADERS-FIXME - #include #include #include diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp index 60ed469ce991b..4aa4a3f339142 100644 --- a/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp +++ b/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp @@ -43,14 +43,17 @@ TEST_CONSTEXPR_CXX20 void test0() { test(S(""), S("abcdeabcde"), 1, S::npos); test(S(""), S("abcdeabcdeabcdeabcde"), 1, S::npos); test(S("abcde"), S(""), 0, 0); + test(S("abcde"), S("a"), 0, 0); test(S("abcde"), S("abcde"), 0, 0); test(S("abcde"), S("abcdeabcde"), 0, S::npos); test(S("abcde"), S("abcdeabcdeabcdeabcde"), 0, S::npos); test(S("abcde"), S(""), 1, 1); + test(S("abcde"), S("a"), 1, S::npos); test(S("abcde"), S("abcde"), 1, S::npos); test(S("abcde"), S("abcdeabcde"), 1, S::npos); test(S("abcde"), S("abcdeabcdeabcdeabcde"), 1, S::npos); test(S("abcde"), S(""), 2, 2); + test(S("abcde"), S("a"), 2, S::npos); test(S("abcde"), S("abcde"), 2, S::npos); test(S("abcde"), S("abcdeabcde"), 2, S::npos); test(S("abcde"), S("abcdeabcdeabcdeabcde"), 2, S::npos); @@ -59,58 +62,72 @@ TEST_CONSTEXPR_CXX20 void test0() { test(S("abcde"), S("abcdeabcde"), 4, S::npos); test(S("abcde"), S("abcdeabcdeabcdeabcde"), 4, S::npos); test(S("abcde"), S(""), 5, 5); + test(S("abcde"), S("a"), 5, S::npos); test(S("abcde"), S("abcde"), 5, S::npos); test(S("abcde"), S("abcdeabcde"), 5, S::npos); test(S("abcde"), S("abcdeabcdeabcdeabcde"), 5, S::npos); test(S("abcde"), S(""), 6, S::npos); + test(S("abcde"), S("a"), 6, S::npos); test(S("abcde"), S("abcde"), 6, S::npos); test(S("abcde"), S("abcdeabcde"), 6, S::npos); test(S("abcde"), S("abcdeabcdeabcdeabcde"), 6, S::npos); test(S("abcdeabcde"), S(""), 0, 0); + test(S("abcdeabcde"), S("a"), 0, 0); test(S("abcdeabcde"), S("abcde"), 0, 0); test(S("abcdeabcde"), S("abcdeabcde"), 0, 0); test(S("abcdeabcde"), S("abcdeabcdeabcdeabcde"), 0, S::npos); test(S("abcdeabcde"), S(""), 1, 1); + test(S("abcdeabcde"), S("a"), 1, 5); test(S("abcdeabcde"), S("abcde"), 1, 5); test(S("abcdeabcde"), S("abcdeabcde"), 1, S::npos); test(S("abcdeabcde"), S("abcdeabcdeabcdeabcde"), 1, S::npos); test(S("abcdeabcde"), S(""), 5, 5); + test(S("abcdeabcde"), S("a"), 5, 5); test(S("abcdeabcde"), S("abcde"), 5, 5); test(S("abcdeabcde"), S("abcdeabcde"), 5, S::npos); test(S("abcdeabcde"), S("abcdeabcdeabcdeabcde"), 5, S::npos); test(S("abcdeabcde"), S(""), 9, 9); + test(S("abcdeabcde"), S("a"), 9, S::npos); test(S("abcdeabcde"), S("abcde"), 9, S::npos); test(S("abcdeabcde"), S("abcdeabcde"), 9, S::npos); test(S("abcdeabcde"), S("abcdeabcdeabcdeabcde"), 9, S::npos); test(S("abcdeabcde"), S(""), 10, 10); + test(S("abcdeabcde"), S("a"), 10, S::npos); test(S("abcdeabcde"), S("abcde"), 10, S::npos); test(S("abcdeabcde"), S("abcdeabcde"), 10, S::npos); test(S("abcdeabcde"), S("abcdeabcdeabcdeabcde"), 10, S::npos); test(S("abcdeabcde"), S(""), 11, S::npos); + test(S("abcdeabcde"), S("a"), 11, S::npos); test(S("abcdeabcde"), S("abcde"), 11, S::npos); test(S("abcdeabcde"), S("abcdeabcde"), 11, S::npos); test(S("abcdeabcde"), S("abcdeabcdeabcdeabcde"), 11, S::npos); test(S("abcdeabcdeabcdeabcde"), S(""), 0, 0); + test(S("abcdeabcdeabcdeabcde"), S("a"), 0, 0); test(S("abcdeabcdeabcdeabcde"), S("abcde"), 0, 0); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcde"), 0, 0); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 0, 0); test(S("abcdeabcdeabcdeabcde"), S(""), 1, 1); + test(S("abcdeabcdeabcdeabcde"), S("a"), 1, 5); test(S("abcdeabcdeabcdeabcde"), S("abcde"), 1, 5); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcde"), 1, 5); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 1, S::npos); test(S("abcdeabcdeabcdeabcde"), S(""), 10, 10); + test(S("abcdeabcdeabcdeabcde"), S("a"), 10, 10); test(S("abcdeabcdeabcdeabcde"), S("abcde"), 10, 10); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcde"), 10, 10); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 10, S::npos); test(S("abcdeabcdeabcdeabcde"), S(""), 19, 19); + test(S("abcdeabcdeabcdeabcde"), S("a"), 19, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcde"), 19, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcde"), 19, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 19, S::npos); test(S("abcdeabcdeabcdeabcde"), S(""), 20, 20); + test(S("abcdeabcdeabcdeabcde"), S("a"), 20, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcde"), 20, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcde"), 20, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 20, S::npos); test(S("abcdeabcdeabcdeabcde"), S(""), 21, S::npos); + test(S("abcdeabcdeabcdeabcde"), S("a"), 21, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcde"), 21, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcde"), 21, S::npos); test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 21, S::npos); diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_substr/subview.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_substr/subview.pass.cpp new file mode 100644 index 0000000000000..3cbf5270faee6 --- /dev/null +++ b/libcxx/test/std/strings/basic.string/string.ops/string_substr/subview.pass.cpp @@ -0,0 +1,123 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// REQUIRES: std-at-least-c++26 + +// + +// constexpr basic_string_view subview(size_type pos = 0, +// size_type n = npos) const; + +#include +#include +#include +#include +#include + +#include "constexpr_char_traits.h" +#include "make_string.h" +#include "min_allocator.h" +#include "test_allocator.h" +#include "test_macros.h" +#include "type_algorithms.h" + +#define CS(S) MAKE_CSTRING(CharT, S) + +template +constexpr void test() { + std::basic_string s{CS("Hello cruel world!"), AllocT{}}; + + { // With a default position and a character length. + + // Also check if subview() is a const-qualified. + assert(std::as_const(s).subview() == CS("Hello cruel world!")); + + // Check it the return type of subview() is correct. + std::same_as> decltype(auto) sv = s.subview(); + assert(sv == CS("Hello cruel world!")); + } + + { // Check with different position and length. + + // With a explict position and a character length. + assert(s.subview(6, 5) == CS("cruel")); + + // From the beginning of the string with a explicit character length. + assert(s.subview(0, 5) == CS("Hello")); + + // To the end of string with the default character length. + assert(s.subview(12) == CS("world!")); + + // From the beginning to the end of the string with explicit values. + assert(s.subview(0, s.size()) == CS("Hello cruel world!")); + } + + // Test if exceptions are thrown correctly. +#ifndef TEST_HAS_NO_EXCEPTIONS + if (!std::is_constant_evaluated()) { + { // With a position that is out of range. + try { + std::ignore = s.subview(s.size() + 1); + assert(false); + } catch ([[maybe_unused]] const std::out_of_range& ex) { + LIBCPP_ASSERT(std::string(ex.what()) == "string_view::substr"); + } catch (...) { + assert(false); + } + } + + { // With a position that is out of range and a 0 character length. + try { + std::ignore = s.subview(s.size() + 1, 0); + assert(false); + } catch ([[maybe_unused]] const std::out_of_range& ex) { + LIBCPP_ASSERT(std::string(ex.what()) == "string_view::substr"); + } catch (...) { + assert(false); + } + } + + { // With a position that is out of range and a some character length. + try { + std::ignore = s.subview(s.size() + 1, 1); + assert(false); + } catch ([[maybe_unused]] const std::out_of_range& ex) { + LIBCPP_ASSERT(std::string(ex.what()) == "string_view::substr"); + } catch (...) { + assert(false); + } + } + } +#endif +} + +template +constexpr void test() { + test, std::allocator>(); + test, min_allocator>(); + test, safe_allocator>(); + test, test_allocator>(); + + test, std::allocator>(); + test, min_allocator>(); + test, safe_allocator>(); + test, test_allocator>(); +} + +constexpr bool test() { + types::for_each(types::character_types(), [] { test(); }); + + return true; +} + +int main(int, char**) { + test(); + static_assert(test()); + + return 0; +} diff --git a/libcxx/test/std/strings/string.view/string.view.ops/subview.pass.cpp b/libcxx/test/std/strings/string.view/string.view.ops/subview.pass.cpp new file mode 100644 index 0000000000000..b781754c9ba96 --- /dev/null +++ b/libcxx/test/std/strings/string.view/string.view.ops/subview.pass.cpp @@ -0,0 +1,120 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// REQUIRES: std-at-least-c++26 + +// + +// constexpr basic_string_view subview(size_type pos = 0, +// size_type n = npos) const; // freestanding-deleted + +#include +#include +#include +#include + +#include "constexpr_char_traits.h" +#include "make_string.h" +#include "test_macros.h" +#include "type_algorithms.h" + +#define CS(S) MAKE_CSTRING(CharT, S) + +template +constexpr void test() { + std::basic_string_view sv{CS("Hello cruel world!")}; + + { // With a default position and a character length. + + // Check if subview() is a const-qualified. + assert(std::as_const(sv).subview() == CS("Hello cruel world!")); + + // Check if the return type of subview() is correct. + std::same_as> decltype(auto) subsv = sv.subview(); + assert(subsv == CS("Hello cruel world!")); + } + + { // Check with different position and length. + + // With a explict position and a character length. + assert(sv.subview(6, 5) == CS("cruel")); + + // From the beginning of the string with a explicit character length. + assert(sv.subview(0, 5) == CS("Hello")); + + // To the end of string with the default character length. + assert(sv.subview(12) == CS("world!")); + + // From the beginning to the end of the string with explicit values. + assert(sv.subview(0, sv.size()) == CS("Hello cruel world!")); + } + + // Test if exceptions are thrown correctly. +#ifndef TEST_HAS_NO_EXCEPTIONS + if (!std::is_constant_evaluated()) { + { // With a position that is out of range. + try { + std::ignore = sv.subview(sv.size() + 1); + assert(false); + } catch ([[maybe_unused]] const std::out_of_range& ex) { + LIBCPP_ASSERT(std::string(ex.what()) == "string_view::substr"); + } catch (...) { + assert(false); + } + } + + { // With a position that is out of range and a 0 character length. + try { + std::ignore = sv.subview(sv.size() + 1, 0); + assert(false); + } catch ([[maybe_unused]] const std::out_of_range& ex) { + LIBCPP_ASSERT(std::string(ex.what()) == "string_view::substr"); + } catch (...) { + assert(false); + } + } + + { // With a position that is out of range and a some character length. + try { + std::ignore = sv.subview(sv.size() + 1, 1); + assert(false); + } catch ([[maybe_unused]] const std::out_of_range& ex) { + LIBCPP_ASSERT(std::string(ex.what()) == "string_view::substr"); + } catch (...) { + assert(false); + } + } + } +#endif +} + +template +constexpr void test() { + test>(); + test>(); + test>(); + test>(); + + test>(); + test>(); + test>(); + test>(); +} + +constexpr bool test() { + types::for_each(types::character_types(), [] { test(); }); + + return true; +} + +int main(int, char**) { + test(); + static_assert(test()); + + return 0; +} diff --git a/libcxx/test/std/time/time.duration/time.duration.alg/abs.compile.fail.cpp b/libcxx/test/std/time/time.duration/time.duration.alg/abs.compile.fail.cpp deleted file mode 100644 index 8d807c7a9b395..0000000000000 --- a/libcxx/test/std/time/time.duration/time.duration.alg/abs.compile.fail.cpp +++ /dev/null @@ -1,28 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++03, c++11, c++14 -// - -// ceil - -// template -// constexpr duration abs(duration d) - -// This function shall not participate in overload resolution unless numeric_limits::is_signed is true. - -#include - -typedef std::chrono::duration unsigned_secs; - -int main(int, char**) -{ - std::chrono::abs(unsigned_secs(0)); - - return 0; -} diff --git a/libcxx/test/std/time/time.duration/time.duration.alg/abs.pass.cpp b/libcxx/test/std/time/time.duration/time.duration.alg/abs.pass.cpp index f95d42b52638e..42f4323b5bd27 100644 --- a/libcxx/test/std/time/time.duration/time.duration.alg/abs.pass.cpp +++ b/libcxx/test/std/time/time.duration/time.duration.alg/abs.pass.cpp @@ -19,9 +19,19 @@ #include #include #include +#include #include "test_macros.h" +template +inline constexpr bool has_abs_v = false; + +template +inline constexpr bool has_abs_v()))> = true; + +static_assert(has_abs_v); +static_assert(!has_abs_v>); + template void test(const Duration& f, const Duration& d) diff --git a/libcxx/test/std/time/time.point/time.point.cast/ceil.pass.cpp b/libcxx/test/std/time/time.point/time.point.cast/ceil.pass.cpp index 58bcf73234f91..4e383ba923759 100644 --- a/libcxx/test/std/time/time.point/time.point.cast/ceil.pass.cpp +++ b/libcxx/test/std/time/time.point/time.point.cast/ceil.pass.cpp @@ -21,6 +21,15 @@ #include "test_macros.h" +template +inline constexpr bool has_ceil_v = false; + +template +inline constexpr bool has_ceil_v(std::chrono::system_clock::now()))> = true; + +static_assert(has_ceil_v); +static_assert(!has_ceil_v); + template void test(const FromDuration& df, const ToDuration& d) diff --git a/libcxx/test/std/time/time.point/time.point.cast/floor.compile.fail.cpp b/libcxx/test/std/time/time.point/time.point.cast/floor.compile.fail.cpp deleted file mode 100644 index 12b1dec9fd509..0000000000000 --- a/libcxx/test/std/time/time.point/time.point.cast/floor.compile.fail.cpp +++ /dev/null @@ -1,27 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++03, c++11, c++14 -// - -// floor - -// template -// time_point -// floor(const time_point& t); - -// ToDuration shall be an instantiation of duration. - -#include - -int main(int, char**) -{ - std::chrono::floor(std::chrono::system_clock::now()); - - return 0; -} diff --git a/libcxx/test/std/time/time.point/time.point.cast/floor.pass.cpp b/libcxx/test/std/time/time.point/time.point.cast/floor.pass.cpp index e53ce86869f7a..69e4d096b7441 100644 --- a/libcxx/test/std/time/time.point/time.point.cast/floor.pass.cpp +++ b/libcxx/test/std/time/time.point/time.point.cast/floor.pass.cpp @@ -21,6 +21,15 @@ #include "test_macros.h" +template +inline constexpr bool has_floor_v = false; + +template +inline constexpr bool has_floor_v(std::chrono::system_clock::now()))> = true; + +static_assert(has_floor_v); +static_assert(!has_floor_v); + template void test(const FromDuration& df, const ToDuration& d) diff --git a/libcxx/test/std/time/time.point/time.point.cast/round.pass.cpp b/libcxx/test/std/time/time.point/time.point.cast/round.pass.cpp index 4523abd27d035..74f05f72daf7c 100644 --- a/libcxx/test/std/time/time.point/time.point.cast/round.pass.cpp +++ b/libcxx/test/std/time/time.point/time.point.cast/round.pass.cpp @@ -21,6 +21,15 @@ #include "test_macros.h" +template +inline constexpr bool has_round_v = false; + +template +inline constexpr bool has_round_v(std::chrono::system_clock::now()))> = true; + +static_assert(has_round_v); +static_assert(!has_round_v); + template void test(const FromDuration& df, const ToDuration& d) diff --git a/libcxx/test/std/time/time.point/time.point.cast/time_point_cast.pass.cpp b/libcxx/test/std/time/time.point/time.point.cast/time_point_cast.pass.cpp index 0d82e2db8b7b7..d82cf42c145f9 100644 --- a/libcxx/test/std/time/time.point/time.point.cast/time_point_cast.pass.cpp +++ b/libcxx/test/std/time/time.point/time.point.cast/time_point_cast.pass.cpp @@ -21,6 +21,16 @@ #include "test_macros.h" +template +struct has_time_point_cast : std::false_type {}; + +template +struct has_time_point_cast(std::chrono::system_clock::now()))> + : std::true_type {}; + +static_assert(has_time_point_cast::value, ""); +static_assert(!has_time_point_cast::value, ""); + template void test(const FromDuration& df, const ToDuration& d) diff --git a/libcxx/test/std/time/time.point/time.point.cast/toduration.compile.fail.cpp b/libcxx/test/std/time/time.point/time.point.cast/toduration.compile.fail.cpp deleted file mode 100644 index c16492f730a17..0000000000000 --- a/libcxx/test/std/time/time.point/time.point.cast/toduration.compile.fail.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -// - -// time_point - -// template -// time_point -// time_point_cast(const time_point& t); - -// ToDuration shall be an instantiation of duration. - -#include - -int main(int, char**) -{ - typedef std::chrono::system_clock Clock; - typedef std::chrono::time_point FromTimePoint; - typedef std::chrono::time_point ToTimePoint; - std::chrono::time_point_cast(FromTimePoint(std::chrono::milliseconds(3))); - - return 0; -} diff --git a/libcxx/test/std/utilities/expected/expected.expected/assign/assign.U.pass.cpp b/libcxx/test/std/utilities/expected/expected.expected/assign/assign.U.pass.cpp index 807a8af5bb5f6..795fa77e8338c 100644 --- a/libcxx/test/std/utilities/expected/expected.expected/assign/assign.U.pass.cpp +++ b/libcxx/test/std/utilities/expected/expected.expected/assign/assign.U.pass.cpp @@ -325,6 +325,44 @@ constexpr bool test() { } } + // Check move constructor selection + { + struct MoveOnlyMulti { + bool used_move1 = false; + bool used_move2 = false; + + constexpr MoveOnlyMulti() = default; + constexpr MoveOnlyMulti(const MoveOnlyMulti&) = delete; + constexpr MoveOnlyMulti& operator=(const MoveOnlyMulti&) = delete; + constexpr MoveOnlyMulti& operator=(MoveOnlyMulti&&) { + used_move1 = true; + return *this; + } + constexpr MoveOnlyMulti& operator=(const MoveOnlyMulti&&) { + used_move2 = true; + return *this; + }; + constexpr MoveOnlyMulti(MoveOnlyMulti&&) : used_move1(true) {} + constexpr MoveOnlyMulti(const MoveOnlyMulti&&) : used_move2(true) {} + }; + + { + MoveOnlyMulti t{}; + std::expected e1(std::unexpect); + static_assert(std::is_same_v); + e1 = {std::move(t)}; + assert(e1.value().used_move1); + } + { + const MoveOnlyMulti t{}; + std::expected e1(std::unexpect); + static_assert(std::is_same_v); + // _Up = remove_cv_t --> should use MoveOnlyMulti(MoveOnlyMulti&&) + e1 = {std::move(t)}; + assert(e1.value().used_move1); + } + } + return true; } diff --git a/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp b/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp index 13c0da27bc533..fe664dfc97cfe 100644 --- a/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp +++ b/libcxx/test/std/utilities/expected/expected.expected/ctor/ctor.u.pass.cpp @@ -80,6 +80,17 @@ struct CopyOnly { friend constexpr bool operator==(const CopyOnly& mi, int ii) { return mi.i == ii; } }; +struct MoveOnly2 { + int j; + bool used_move1 = false; + bool used_move2 = false; + + constexpr explicit MoveOnly2(int jj) : j(jj) {} + constexpr MoveOnly2(const MoveOnly2&) = delete; + constexpr MoveOnly2(MoveOnly2&& m) : j(m.j), used_move1(true) {} + constexpr MoveOnly2(const MoveOnly2&& m) : j(m.j), used_move2(true) {} +}; + struct BaseError {}; struct DerivedError : BaseError {}; @@ -164,6 +175,22 @@ constexpr bool test() { assert(e2.has_value()); assert(!e2.value()); // yes, e2 holds "false" since LWG3836 } + + // Check move constructor selection + { + MoveOnly2 t{1}; + std::expected e1(std::move(t)); + assert(e1.has_value()); + assert(e1.value().used_move1 == true); + assert(e1.value().j == 1); + } + { + const MoveOnly2 t2{2}; + std::expected e1(std::move(t2)); + assert(e1.has_value()); + assert(e1.value().used_move2 == true); + assert(e1.value().j == 2); + } return true; } diff --git a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp index a68f3f40e3647..5264e7700e3d9 100644 --- a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp +++ b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp @@ -17,9 +17,12 @@ #include #include +#include +#include #include #include #include +#include #include "test_macros.h" #include "type_algorithms.h" @@ -216,8 +219,16 @@ constexpr bool test() { // C++ standard library types + // These types are guaranteed to be implicit-lifetime. + test_is_implicit_lifetime>(); + test_is_implicit_lifetime>(); + test_is_implicit_lifetime>(); + +#ifdef _LIBCPP_VERSION + // These types should be implicit-lifetime, but they are not guaranteed to be so. test_is_implicit_lifetime>(); test_is_implicit_lifetime>(); +#endif // Standard C23 types diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp index a5ee602ab7bce..a90fecfd075fe 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.ctor/U.pass.cpp @@ -59,7 +59,8 @@ constexpr bool explicit_conversion(Input&& in, const Expect& v) static_assert(!std::is_constructible::value, ""); static_assert(!std::is_constructible::value, ""); optional opt(std::forward(in)); - return opt && *opt == static_cast(v); + optional opt2{std::forward(in)}; + return opt && *opt == static_cast(v) && (opt2 && *opt2 == static_cast(v)); } void test_implicit() @@ -83,6 +84,11 @@ void test_implicit() using T = TestTypes::TestType; assert(implicit_conversion(3, T(3))); } + { + using T = TestTypes::TestType; + optional opt({3}); + assert(opt && *opt == static_cast(3)); + } { using O = optional; static_assert(!test_convertible(), ""); diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or.pass.cpp index 4f9b6993c6f4f..8c063ae1a799c 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or.pass.cpp @@ -40,6 +40,12 @@ struct X {return x.i_ == y.i_;} }; +struct Z { + int i_, j_; + constexpr Z(int i, int j) : i_(i), j_(j) {} + friend constexpr bool operator==(const Z& z1, const Z& z2) { return z1.i_ == z2.i_ && z1.j_ == z2.j_; } +}; + constexpr int test() { { @@ -64,6 +70,16 @@ constexpr int test() assert(std::move(opt).value_or(Y(3)) == 4); assert(!opt); } + { + optional opt; + assert(std::move(opt).value_or({Y(3)}) == 4); + assert(!opt); + } + { + optional opt; + assert((std::move(opt).value_or({2, 3}) == Z{2, 3})); + assert(!opt); + } return 0; } diff --git a/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or_const.pass.cpp b/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or_const.pass.cpp index cf782f1137876..ec42890a3b995 100644 --- a/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or_const.pass.cpp +++ b/libcxx/test/std/utilities/optional/optional.object/optional.object.observe/value_or_const.pass.cpp @@ -75,6 +75,10 @@ int main(int, char**) const optional opt; assert(opt.value_or(Y(3)) == 4); } + { + const optional opt; + assert(opt.value_or({Y(3)}) == 4); + } return 0; } diff --git a/libcxx/test/support/count_new.h b/libcxx/test/support/count_new.h index c8169d3acceab..f175bc2ffcd44 100644 --- a/libcxx/test/support/count_new.h +++ b/libcxx/test/support/count_new.h @@ -626,7 +626,11 @@ struct RequireAllocationGuard { void requireExactly(std::size_t N) { m_req_alloc = N; m_exactly = true; } ~RequireAllocationGuard() { +#ifdef ALLOW_MISMATCHING_LIBRRARY_INTERNAL_ALLOCATIONS + ASSERT_WITH_LIBRARY_INTERNAL_ALLOCATIONS(globalMemCounter.checkOutstandingNewEq(static_cast(m_outstanding_new_on_init))); +#else assert(globalMemCounter.checkOutstandingNewEq(static_cast(m_outstanding_new_on_init))); +#endif std::size_t Expect = m_new_count_on_init + m_req_alloc; assert(globalMemCounter.checkNewCalledEq(static_cast(Expect)) || (!m_exactly && globalMemCounter.checkNewCalledGreaterThan(static_cast(Expect)))); diff --git a/libcxx/utils/benchmark-historical b/libcxx/utils/benchmark-historical index 7bba2128cf4d9..c1f9d11a6e800 100755 --- a/libcxx/utils/benchmark-historical +++ b/libcxx/utils/benchmark-historical @@ -91,13 +91,13 @@ def main(argv): subprocess.call(test_cmd) output_file.parent.mkdir(parents=True, exist_ok=True) + mode = 'a' if args.existing == 'append' else 'w' if output_file.exists() and args.existing == 'append': logging.info(f'Appending to existing data for {commit}') - mode = 'a' - else: - assert args.existing == 'overwrite' + elif output_file.exists() and args.existing == 'overwrite': logging.info(f'Overwriting existing data for {commit}') - mode = 'w' + else: + logging.info(f'Writing data for {commit}') with open(output_file, mode) as out: subprocess.check_call([(PARENT_DIR / 'consolidate-benchmarks'), build_dir], stdout=out) diff --git a/libcxx/utils/compare-benchmarks b/libcxx/utils/compare-benchmarks index c56f5581b0ae7..18a448ab434c7 100755 --- a/libcxx/utils/compare-benchmarks +++ b/libcxx/utils/compare-benchmarks @@ -1,32 +1,40 @@ #!/usr/bin/env python3 import argparse +import functools +import pathlib import re import statistics import sys +import tempfile -import plotly +import numpy +import pandas +import plotly.express import tabulate -def parse_lnt(lines): +def parse_lnt(lines, aggregate=statistics.median): """ - Parse lines in LNT format and return a dictionnary of the form: + Parse lines in LNT format and return a list of dictionnaries of the form: - { - 'benchmark1': { - 'metric1': [float], - 'metric2': [float], + [ + { + 'benchmark': , + : float, + : float, ... }, - 'benchmark2': { - 'metric1': [float], - 'metric2': [float], + { + 'benchmark': , + : float, + : float, ... }, ... - } + ] - Each metric may have multiple values. + If a metric has multiple values associated to it, they are aggregated into a single + value using the provided aggregation function. """ results = {} for line in lines: @@ -35,90 +43,123 @@ def parse_lnt(lines): continue (identifier, value) = line.split(' ') - (name, metric) = identifier.split('.') - if name not in results: - results[name] = {} - if metric not in results[name]: - results[name][metric] = [] - results[name][metric].append(float(value)) - return results - -def plain_text_comparison(benchmarks, baseline, candidate): + (benchmark, metric) = identifier.split('.') + if benchmark not in results: + results[benchmark] = {'benchmark': benchmark} + + entry = results[benchmark] + if metric not in entry: + entry[metric] = [] + entry[metric].append(float(value)) + + for (bm, entry) in results.items(): + for metric in entry: + if isinstance(entry[metric], list): + entry[metric] = aggregate(entry[metric]) + + return list(results.values()) + +def plain_text_comparison(data, metric, baseline_name=None, candidate_name=None): """ - Create a tabulated comparison of the baseline and the candidate. + Create a tabulated comparison of the baseline and the candidate for the given metric. """ - headers = ['Benchmark', 'Baseline', 'Candidate', 'Difference', '% Difference'] + # Compute additional info in new columns. In text mode, we can assume that we are + # comparing exactly two data sets (suffixed _0 and _1). + data['difference'] = data[f'{metric}_1'] - data[f'{metric}_0'] + data['percent'] = 100 * (data['difference'] / data[f'{metric}_0']) + + data = data.replace(numpy.nan, None).sort_values(by='benchmark') # avoid NaNs in tabulate output + headers = ['Benchmark', baseline_name, candidate_name, 'Difference', '% Difference'] fmt = (None, '.2f', '.2f', '.2f', '.2f') - table = [] - for (bm, base, cand) in zip(benchmarks, baseline, candidate): - diff = (cand - base) if base and cand else None - percent = 100 * (diff / base) if base and cand else None - row = [bm, base, cand, diff, percent] - table.append(row) + table = data[['benchmark', f'{metric}_0', f'{metric}_1', 'difference', 'percent']].set_index('benchmark') return tabulate.tabulate(table, headers=headers, floatfmt=fmt, numalign='right') -def create_chart(benchmarks, baseline, candidate): +def create_chart(data, metric, subtitle=None, series_names=None): """ - Create a bar chart comparing 'baseline' and 'candidate'. + Create a bar chart comparing the given metric across the provided series. """ - figure = plotly.graph_objects.Figure() - figure.add_trace(plotly.graph_objects.Bar(x=benchmarks, y=baseline, name='Baseline')) - figure.add_trace(plotly.graph_objects.Bar(x=benchmarks, y=candidate, name='Candidate')) + data = data.sort_values(by='benchmark').rename(columns={f'{metric}_{i}': series_names[i] for i in range(len(series_names))}) + title = ' vs '.join(series_names) + figure = plotly.express.bar(data, title=title, subtitle=subtitle, x='benchmark', y=series_names, barmode='group') + figure.update_layout(xaxis_title='', yaxis_title='', legend_title='') return figure -def prepare_series(baseline, candidate, metric, aggregate=statistics.median): - """ - Prepare the data for being formatted or displayed as a chart. - - Metrics that have more than one value are aggregated using the given aggregation function. - """ - all_benchmarks = sorted(list(set(baseline.keys()) | set(candidate.keys()))) - baseline_series = [] - candidate_series = [] - for bm in all_benchmarks: - baseline_series.append(aggregate(baseline[bm][metric]) if bm in baseline and metric in baseline[bm] else None) - candidate_series.append(aggregate(candidate[bm][metric]) if bm in candidate and metric in candidate[bm] else None) - return (all_benchmarks, baseline_series, candidate_series) - def main(argv): parser = argparse.ArgumentParser( prog='compare-benchmarks', - description='Compare the results of two sets of benchmarks in LNT format.', - epilog='This script requires the `tabulate` and the `plotly` Python modules.') - parser.add_argument('baseline', type=argparse.FileType('r'), - help='Path to a LNT format file containing the benchmark results for the baseline.') - parser.add_argument('candidate', type=argparse.FileType('r'), - help='Path to a LNT format file containing the benchmark results for the candidate.') - parser.add_argument('--output', '-o', type=argparse.FileType('w'), default=sys.stdout, - help='Path of a file where to output the resulting comparison. Default to stdout.') + description='Compare the results of multiple sets of benchmarks in LNT format.', + epilog='This script depends on the modules listed in `libcxx/utils/requirements.txt`.') + parser.add_argument('files', type=argparse.FileType('r'), nargs='+', + help='Path to LNT format files containing the benchmark results to compare. In the text format, ' + 'exactly two files must be compared.') + parser.add_argument('--output', '-o', type=pathlib.Path, required=False, + help='Path of a file where to output the resulting comparison. If the output format is `text`, ' + 'default to stdout. If the output format is `chart`, default to a temporary file which is ' + 'opened automatically once generated, but not removed after creation.') parser.add_argument('--metric', type=str, default='execution_time', help='The metric to compare. LNT data may contain multiple metrics (e.g. code size, execution time, etc) -- ' - 'this option allows selecting which metric is being analyzed. The default is "execution_time".') + 'this option allows selecting which metric is being analyzed. The default is `execution_time`.') parser.add_argument('--filter', type=str, required=False, help='An optional regular expression used to filter the benchmarks included in the comparison. ' 'Only benchmarks whose names match the regular expression will be included.') parser.add_argument('--format', type=str, choices=['text', 'chart'], default='text', - help='Select the output format. "text" generates a plain-text comparison in tabular form, and "chart" ' - 'generates a self-contained HTML graph that can be opened in a browser. The default is text.') + help='Select the output format. `text` generates a plain-text comparison in tabular form, and `chart` ' + 'generates a self-contained HTML graph that can be opened in a browser. The default is `text`.') + parser.add_argument('--open', action='store_true', + help='Whether to automatically open the generated HTML file when finished. This option only makes sense ' + 'when the output format is `chart`.') + parser.add_argument('--series-names', type=str, required=False, + help='Optional comma-delimited list of names to use for the various series. By default, we use ' + 'Baseline and Candidate for two input files, and CandidateN for subsequent inputs.') + parser.add_argument('--subtitle', type=str, required=False, + help='Optional subtitle to use for the chart. This can be used to help identify the contents of the chart. ' + 'This option cannot be used with the plain text output.') args = parser.parse_args(argv) - baseline = parse_lnt(args.baseline.readlines()) - candidate = parse_lnt(args.candidate.readlines()) + if args.format == 'text': + if len(args.files) != 2: + parser.error('--format=text requires exactly two input files to compare') + if args.subtitle is not None: + parser.error('Passing --subtitle makes no sense with --format=text') + if args.open: + parser.error('Passing --open makes no sense with --format=text') + + if args.series_names is None: + args.series_names = ['Baseline'] + if len(args.files) == 2: + args.series_names += ['Candidate'] + elif len(args.files) > 2: + args.series_names.extend(f'Candidate{n}' for n in range(1, len(args.files))) + else: + args.series_names = args.series_names.split(',') + if len(args.series_names) != len(args.files): + parser.error(f'Passed incorrect number of series names: got {len(args.series_names)} series names but {len(args.files)} inputs to compare') + + # Parse the raw LNT data and store each input in a dataframe + lnt_inputs = [parse_lnt(file.readlines()) for file in args.files] + inputs = [pandas.DataFrame(lnt).rename(columns={args.metric: f'{args.metric}_{i}'}) for (i, lnt) in enumerate(lnt_inputs)] - if args.filter is not None: - regex = re.compile(args.filter) - baseline = {k: v for (k, v) in baseline.items() if regex.search(k)} - candidate = {k: v for (k, v) in candidate.items() if regex.search(k)} + # Join the inputs into a single dataframe + data = functools.reduce(lambda a, b: a.merge(b, how='outer', on='benchmark'), inputs) - (benchmarks, baseline_series, candidate_series) = prepare_series(baseline, candidate, args.metric) + if args.filter is not None: + keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None] + data = data[data['benchmark'].isin(keeplist)] if args.format == 'chart': - figure = create_chart(benchmarks, baseline_series, candidate_series) - plotly.io.write_html(figure, file=args.output) + figure = create_chart(data, args.metric, subtitle=args.subtitle, series_names=args.series_names) + do_open = args.output is None or args.open + output = args.output or tempfile.NamedTemporaryFile(suffix='.html').name + plotly.io.write_html(figure, file=output, auto_open=do_open) else: - diff = plain_text_comparison(benchmarks, baseline_series, candidate_series) - args.output.write(diff) - args.output.write('\n') + diff = plain_text_comparison(data, args.metric, baseline_name=args.series_names[0], + candidate_name=args.series_names[1]) + diff += '\n' + if args.output is not None: + with open(args.output, 'w') as out: + out.write(diff) + else: + sys.stdout.write(diff) if __name__ == '__main__': main(sys.argv[1:]) diff --git a/libcxx/utils/find-rerun-candidates b/libcxx/utils/find-rerun-candidates new file mode 100755 index 0000000000000..5ac2644005aac --- /dev/null +++ b/libcxx/utils/find-rerun-candidates @@ -0,0 +1,242 @@ +#!/usr/bin/env python3 + +import argparse +import datetime +import functools +import os +import pathlib +import re +import statistics +import subprocess +import sys + +import git +import pandas +import tqdm + +@functools.total_ordering +class Commit: + """ + This class represents a commit inside a given Git repository. + """ + + def __init__(self, git_repo, sha): + self._git_repo = git_repo + self._sha = sha + + def __eq__(self, other): + """ + Return whether two commits refer to the same commit. + + This doesn't take into account the content of the Git tree at those commits, only the + 'identity' of the commits themselves. + """ + return self.fullrev == other.fullrev + + def __lt__(self, other): + """ + Return whether a commit is an ancestor of another commit in the Git repository. + """ + # Is self._sha an ancestor of other._sha? + res = subprocess.run(['git', '-C', self._git_repo, 'merge-base', '--is-ancestor', self._sha, other._sha]) + if res.returncode not in (0, 1): + raise RuntimeError(f'Error when trying to obtain the commit order for {self._sha} and {other._sha}') + return res.returncode == 0 + + def __hash__(self): + """ + Return the full revision for this commit. + """ + return hash(self.fullrev) + + @functools.cache + def show(self, include_diff=False): + """ + Return the commit information equivalent to `git show` associated to this commit. + """ + cmd = ['git', '-C', self._git_repo, 'show', self._sha] + if not include_diff: + cmd.append('--no-patch') + return subprocess.check_output(cmd, text=True) + + @functools.cached_property + def shortrev(self): + """ + Return the shortened version of the given SHA. + """ + return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', '--short', self._sha], text=True).strip() + + @functools.cached_property + def fullrev(self): + """ + Return the full SHA associated to this commit. + """ + return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', self._sha], text=True).strip() + + @functools.cached_property + def commit_date(self): + """ + Return the date of the commit as a `datetime.datetime` object. + """ + repo = git.Repo(self._git_repo) + return datetime.datetime.fromtimestamp(repo.commit(self._sha).committed_date) + + def prefetch(self): + """ + Prefetch cached properties associated to this commit object. + + This makes it possible to control when time is spent recovering that information from Git for + e.g. better reporting to the user. + """ + self.commit_date + self.fullrev + self.shortrev + self.show() + + def __str__(self): + return self._sha + +def directory_path(string): + if os.path.isdir(string): + return pathlib.Path(string) + else: + raise NotADirectoryError(string) + +def parse_lnt(lines, aggregate=statistics.median): + """ + Parse lines in LNT format and return a list of dictionnaries of the form: + + [ + { + 'benchmark': , + : [float], + : [float], + 'data_points': int, + ... + }, + { + 'benchmark': , + : [float], + : [float], + 'data_points': int, + ... + }, + ... + ] + + If a metric has multiple values associated to it, they are aggregated into a single + value using the provided aggregation function. + """ + results = {} + for line in lines: + line = line.strip() + if not line: + continue + + (identifier, value) = line.split(' ') + (benchmark, metric) = identifier.split('.') + if benchmark not in results: + results[benchmark] = {'benchmark': benchmark} + + entry = results[benchmark] + if metric not in entry: + entry[metric] = [] + entry[metric].append(float(value)) + + for (bm, entry) in results.items(): + metrics = [key for key in entry if isinstance(entry[key], list)] + min_data_points = min(len(entry[metric]) for metric in metrics) + for metric in metrics: + entry[metric] = aggregate(entry[metric]) + entry['data_points'] = min_data_points + + return list(results.values()) + +def sorted_revlist(git_repo, commits): + """ + Return the list of commits sorted by their chronological order (from oldest to newest) in the + provided Git repository. Items earlier in the list are older than items later in the list. + """ + revlist_cmd = ['git', '-C', git_repo, 'rev-list', '--no-walk'] + list(commits) + revlist = subprocess.check_output(revlist_cmd, text=True).strip().splitlines() + return list(reversed(revlist)) + +def main(argv): + parser = argparse.ArgumentParser( + prog='find-rerun-candidates', + description='Find benchmarking data points that are good candidates for additional runs, to reduce noise.') + parser.add_argument('directory', type=directory_path, + help='Path to a valid directory containing benchmark data in LNT format, each file being named .lnt. ' + 'This is also the format generated by the `benchmark-historical` utility.') + parser.add_argument('--metric', type=str, default='execution_time', + help='The metric to analyze. LNT data may contain multiple metrics (e.g. code size, execution time, etc) -- ' + 'this option allows selecting which metric is analyzed for rerun candidates. The default is "execution_time".') + parser.add_argument('--filter', type=str, required=False, + help='An optional regular expression used to filter the benchmarks included in the analysis. ' + 'Only benchmarks whose names match the regular expression will be analyzed.') + parser.add_argument('--outlier-threshold', metavar='FLOAT', type=float, default=0.1, + help='Relative difference from the previous points for considering a data point as an outlier. This threshold is ' + 'expressed as a floating point number, e.g. 0.25 will detect points that differ by more than 25%% from their ' + 'previous result.') + parser.add_argument('--data-points-threshold', type=int, required=False, + help='Number of data points above which an outlier is not considered an outlier. If an outlier has more than ' + 'that number of data points yet its relative difference is above the threshold, it is not considered an ' + 'outlier. This can be used to re-run noisy data points until we have at least N samples, at which point ' + 'we consider the data to be accurate, even if the result is beyond the threshold. By default, there is ' + 'no limit on the number of data points.') + parser.add_argument('--git-repo', type=directory_path, default=pathlib.Path(os.getcwd()), + help='Path to the git repository to use for ordering commits in time. ' + 'By default, the current working directory is used.') + args = parser.parse_args(argv) + + # Extract benchmark data from the directory. + data = {} + files = [f for f in args.directory.glob('*.lnt')] + for file in tqdm.tqdm(files, desc='Parsing LNT files'): + rows = parse_lnt(file.read_text().splitlines()) + (commit, _) = os.path.splitext(os.path.basename(file)) + commit = Commit(args.git_repo, commit) + data[commit] = rows + + # Obtain commit information which is then cached throughout the program. Do this + # eagerly so we can provide a progress bar. + for commit in tqdm.tqdm(data.keys(), desc='Prefetching Git information'): + commit.prefetch() + + # Create a dataframe from the raw data and add some columns to it: + # - 'commit' represents the Commit object associated to the results in that row + # - `revlist_order` represents the order of the commit within the Git repository. + revlist = sorted_revlist(args.git_repo, [c.fullrev for c in data.keys()]) + data = pandas.DataFrame([row | {'commit': c} for (c, rows) in data.items() for row in rows]) + data = data.join(pandas.DataFrame([{'revlist_order': revlist.index(c.fullrev)} for c in data['commit']])) + + # Filter the benchmarks if needed. + if args.filter is not None: + keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None] + data = data[data['benchmark'].isin(keeplist)] + + # Detect outliers by selecting all benchmarks whose change percentage is beyond the threshold. + # If we have a max number of points, also take that into account. + if args.data_points_threshold is not None: + print(f'Generating outliers with more than {args.outlier_threshold * 100}% relative difference and less than {args.data_points_threshold} data points') + else: + print(f'Generating outliers with more than {args.outlier_threshold * 100}% relative difference') + + overall = set() + for (benchmark, series) in data.sort_values(by='revlist_order').groupby('benchmark'): + pct_change = series[args.metric].pct_change() + outliers = series[pct_change.abs() > args.outlier_threshold] + if args.data_points_threshold is not None: + outliers = outliers[outliers['data_points'] < args.data_points_threshold] + outliers = set(outliers['commit']) + overall |= outliers + if len(outliers) > 0: + print(f'{benchmark}: {" ".join(c.shortrev for c in outliers)}') + + if len(overall) > 0: + print(f'Summary: {" ".join(c.shortrev for c in overall)}') + else: + print(f'No outliers') + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/libcxx/utils/generate_feature_test_macro_components.py b/libcxx/utils/generate_feature_test_macro_components.py index c1e579c775746..5d469d4914b0b 100644 --- a/libcxx/utils/generate_feature_test_macro_components.py +++ b/libcxx/utils/generate_feature_test_macro_components.py @@ -1334,6 +1334,11 @@ def add_version_header(tc): "values": {"c++23": 202110}, "headers": ["string"], }, + { + "name": "__cpp_lib_string_subview", + "values": {"c++26": 202506}, + "headers": ["string", "string_view"], + }, { "name": "__cpp_lib_string_udls", "values": {"c++14": 201304}, diff --git a/libcxx/utils/libcxx/test/format.py b/libcxx/utils/libcxx/test/format.py index 5765afec399cf..c9dffd1bb7971 100644 --- a/libcxx/utils/libcxx/test/format.py +++ b/libcxx/utils/libcxx/test/format.py @@ -92,6 +92,7 @@ def parseScript(test, preamble): # errors, which doesn't make sense for clang-verify tests because we may want to check # for specific warning diagnostics. _checkBaseSubstitutions(substitutions) + substitutions.append(("%T", tmpDir)) substitutions.append( ("%{build}", "%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe") ) diff --git a/libcxx/utils/libcxx/test/params.py b/libcxx/utils/libcxx/test/params.py index 6f013a75195a8..c02d6df1c47a4 100644 --- a/libcxx/utils/libcxx/test/params.py +++ b/libcxx/utils/libcxx/test/params.py @@ -75,6 +75,9 @@ # We're not annotating all the APIs, since that's a lot of annotations compared to how many we actually care about "-Wno-nullability-completeness", + + # Technically not a warning flag, but might as well be: + "-flax-vector-conversions=none", ] _allStandards = ["c++03", "c++11", "c++14", "c++17", "c++20", "c++23", "c++26"] diff --git a/libcxx/utils/parse-spec-results b/libcxx/utils/parse-spec-results index 3aff206f8959c..2c3c279622ad5 100755 --- a/libcxx/utils/parse-spec-results +++ b/libcxx/utils/parse-spec-results @@ -58,7 +58,10 @@ def main(argv): 'sure to use appropriate quoting for header names that contain spaces. This option only makes sense ' 'when the output format is CSV.') parser.add_argument('--keep-not-run', action='store_true', - help='Keep entries whose \'Base Status\' is marked as \'NR\', aka \'Not Run\'. By default, such entries are discarded.') + help='Keep entries whose "Base Status" is marked as "NR" (aka "Not Run"). By default, such entries are discarded.') + parser.add_argument('--keep-failed', action='store_true', + help='Keep entries whose "Base Status" is marked as "CE" (aka "Compilation Error") or "RE" (aka "Runtime Error"). ' + 'By default, such entries are discarded.') args = parser.parse_args(argv) if args.table == 'full': @@ -76,10 +79,12 @@ def main(argv): headers = parsed_headers rows.extend(parsed_rows) - # Remove rows that were not run unless we were asked to keep them + # Remove rows that were not run (or failed) unless we were asked to keep them + status = headers.index('Base Status') if not args.keep_not_run: - not_run = headers.index('Base Status') - rows = [row for row in rows if row[not_run] != 'NR'] + rows = [row for row in rows if row[status] != 'NR'] + if not args.keep_failed: + rows = [row for row in rows if row[status] not in ('CE', 'RE')] if args.extract is not None: if args.output_format != 'csv': diff --git a/libcxx/utils/requirements.txt b/libcxx/utils/requirements.txt index 0c76714849281..1ec769c8693dc 100644 --- a/libcxx/utils/requirements.txt +++ b/libcxx/utils/requirements.txt @@ -1,3 +1,7 @@ +GitPython +numpy +pandas plotly +statsmodels tabulate tqdm diff --git a/libcxx/utils/visualize-historical b/libcxx/utils/visualize-historical index 7bea83ebfbf00..114c7e81f29e7 100755 --- a/libcxx/utils/visualize-historical +++ b/libcxx/utils/visualize-historical @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import argparse +import datetime import functools import os import pathlib @@ -10,7 +11,10 @@ import subprocess import sys import tempfile +import git +import pandas import plotly +import plotly.express import tqdm @functools.total_ordering @@ -48,6 +52,7 @@ class Commit: """ return hash(self.fullrev) + @functools.cache def show(self, include_diff=False): """ Return the commit information equivalent to `git show` associated to this commit. @@ -71,6 +76,14 @@ class Commit: """ return subprocess.check_output(['git', '-C', self._git_repo, 'rev-parse', self._sha], text=True).strip() + @functools.cached_property + def commit_date(self): + """ + Return the date of the commit as a `datetime.datetime` object. + """ + repo = git.Repo(self._git_repo) + return datetime.datetime.fromtimestamp(repo.commit(self._sha).committed_date) + def prefetch(self): """ Prefetch cached properties associated to this commit object. @@ -78,8 +91,10 @@ class Commit: This makes it possible to control when time is spent recovering that information from Git for e.g. better reporting to the user. """ - self.shortrev + self.commit_date self.fullrev + self.shortrev + self.show() def __str__(self): return self._sha @@ -97,25 +112,21 @@ def truncate_lines(string, n, marker=None): assert len(truncated) <= n, "broken post-condition" return '\n'.join(truncated) -def create_plot(commits, benchmarks, data): +def create_plot(data, metric, subtitle=None): """ - Create a plot object showing the evolution of each benchmark throughout the given commits. + Create a plot object showing the evolution of each benchmark throughout the given commits for + the given metric. """ - figure = plotly.graph_objects.Figure(layout_title_text=f'{commits[0].shortrev} to {commits[-1].shortrev}') - - # Create the X axis and the hover information - x_axis = [commit.shortrev for commit in commits] - hover_info = [truncate_lines(commit.show(), 30, marker='...').replace('\n', '
') for commit in commits] - - # For each benchmark, get the metric for that benchmark for each commit. - # - # Some commits may not have any data associated to a benchmark (e.g. runtime or compilation error). - # Use None, which is handled properly by plotly. - for benchmark in benchmarks: - series = [commit_data.get(benchmark, None) for commit_data in data] - scatter = plotly.graph_objects.Scatter(x=x_axis, y=series, text=hover_info, name=benchmark) - figure.add_trace(scatter) - + data = data.sort_values(by=['revlist_order', 'benchmark']) + revlist = pandas.unique(data['commit']) # list of all commits in chronological order + hover_info = {c: truncate_lines(c.show(), 30, marker='...').replace('\n', '
') for c in revlist} + figure = plotly.express.scatter(data, title=f"{revlist[0].shortrev} to {revlist[-1].shortrev}", + subtitle=subtitle, + x='revlist_order', y=metric, + symbol='benchmark', + color='benchmark', + hover_name=[hover_info[c] for c in data['commit']], + trendline="lowess") return figure def directory_path(string): @@ -124,25 +135,28 @@ def directory_path(string): else: raise NotADirectoryError(string) -def parse_lnt(lines): +def parse_lnt(lines, aggregate=statistics.median): """ - Parse lines in LNT format and return a dictionnary of the form: + Parse lines in LNT format and return a list of dictionnaries of the form: - { - 'benchmark1': { - 'metric1': [float], - 'metric2': [float], + [ + { + 'benchmark': , + : float, + : float, ... }, - 'benchmark2': { - 'metric1': [float], - 'metric2': [float], + { + 'benchmark': , + : float, + : float, ... }, ... - } + ] - Each metric may have multiple values. + If a metric has multiple values associated to it, they are aggregated into a single + value using the provided aggregation function. """ results = {} for line in lines: @@ -151,36 +165,30 @@ def parse_lnt(lines): continue (identifier, value) = line.split(' ') - (name, metric) = identifier.split('.') - if name not in results: - results[name] = {} - if metric not in results[name]: - results[name][metric] = [] - results[name][metric].append(float(value)) - return results - -def find_outliers(xs, ys, threshold): - """ - Given a list of x coordinates and a list of y coordinates, find (x, y) pairs where the y - value differs from the previous y value by more than the given relative difference. + (benchmark, metric) = identifier.split('.') + if benchmark not in results: + results[benchmark] = {'benchmark': benchmark} - The threshold is given as a floating point representing a percentage, e.g. 0.25 will result in - detecting points that differ from their previous value by more than 25%. The difference is in - absolute value, i.e. both positive and negative spikes are detected. - """ - outliers = [] - previous = None - for (x, y) in zip(xs, ys): - if y is None: # skip data points that don't contain values - continue + entry = results[benchmark] + if metric not in entry: + entry[metric] = [] + entry[metric].append(float(value)) - if previous is not None: - diff = y - previous - if (diff / previous) > threshold: - outliers.append((x, y)) - previous = y - return outliers + for (bm, entry) in results.items(): + for metric in entry: + if isinstance(entry[metric], list): + entry[metric] = aggregate(entry[metric]) + return list(results.values()) + +def sorted_revlist(git_repo, commits): + """ + Return the list of commits sorted by their chronological order (from oldest to newest) in the + provided Git repository. Items earlier in the list are older than items later in the list. + """ + revlist_cmd = ['git', '-C', git_repo, 'rev-list', '--no-walk'] + list(commits) + revlist = subprocess.check_output(revlist_cmd, text=True).strip().splitlines() + return list(reversed(revlist)) def main(argv): parser = argparse.ArgumentParser( @@ -188,7 +196,7 @@ def main(argv): description='Visualize historical data in LNT format. This program generates a HTML file that embeds an ' 'interactive plot with the provided data. The HTML file can then be opened in a browser to ' 'visualize the data as a chart.', - epilog='This script depends on the `plotly` and the `tqdm` Python modules.') + epilog='This script depends on the modules listed in `libcxx/utils/requirements.txt`.') parser.add_argument('directory', type=directory_path, help='Path to a valid directory containing benchmark data in LNT format, each file being named .lnt. ' 'This is also the format generated by the `benchmark-historical` utility.') @@ -205,13 +213,8 @@ def main(argv): 'Since the chart is interactive, it generally makes most sense to include all the benchmarks ' 'and to then filter them in the browser, but in some cases producing a chart with a reduced ' 'number of data series is useful.') - parser.add_argument('--find-outliers', metavar='FLOAT', type=float, required=False, - help='When building the chart, detect commits that show a large spike (more than the given relative threshold) ' - 'with the previous result and print those to standard output. This can be used to generate a list of ' - 'potential outliers that we might want to re-generate the data for. The threshold is expressed as a ' - 'floating point number, e.g. 0.25 will detect points that differ by more than 25%% from their previous ' - 'result. This option respects --filter, i.e. only benchmarks that match the filter will be analyzed for ' - 'outliers.') + parser.add_argument('--subtitle', type=str, required=False, + help='Optional subtitle for the chart. This can be used to help identify the contents of the chart.') parser.add_argument('--git-repo', type=directory_path, default=pathlib.Path(os.getcwd()), help='Path to the git repository to use for ordering commits in time. ' 'By default, the current working directory is used.') @@ -220,50 +223,36 @@ def main(argv): 'the resulting benchmark is opened automatically by default.') args = parser.parse_args(argv) - # Extract benchmark data from the directory and keep only the metric we're interested in. - # - # Some data points may have multiple values associated to the metric (e.g. if we performed - # multiple runs to reduce noise), in which case we aggregate them using a median. - historical_data = [] + # Extract benchmark data from the directory. + data = {} files = [f for f in args.directory.glob('*.lnt')] for file in tqdm.tqdm(files, desc='Parsing LNT files'): + rows = parse_lnt(file.read_text().splitlines()) (commit, _) = os.path.splitext(os.path.basename(file)) commit = Commit(args.git_repo, commit) - with open(file, 'r') as f: - lnt_data = parse_lnt(f.readlines()) - commit_data = {} - for (bm, metrics) in lnt_data.items(): - commit_data[bm] = statistics.median(metrics[args.metric]) if args.metric in metrics else None - historical_data.append((commit, commit_data)) + data[commit] = rows # Obtain commit information which is then cached throughout the program. Do this # eagerly so we can provide a progress bar. - for (commit, _) in tqdm.tqdm(historical_data, desc='Prefetching Git information'): + for commit in tqdm.tqdm(data.keys(), desc='Prefetching Git information'): commit.prefetch() - # Sort the data based on the ordering of commits inside the provided Git repository - historical_data.sort(key=lambda x: x[0]) + # Create a dataframe from the raw data and add some columns to it: + # - 'commit' represents the Commit object associated to the results in that row + # - `revlist_order` represents the order of the commit within the Git repository. + # - `date` represents the commit date + revlist = sorted_revlist(args.git_repo, [c.fullrev for c in data.keys()]) + data = pandas.DataFrame([row | {'commit': c} for (c, rows) in data.items() for row in rows]) + data = data.join(pandas.DataFrame([{'revlist_order': revlist.index(c.fullrev)} for c in data['commit']])) + data = data.join(pandas.DataFrame([{'date': c.commit_date} for c in data['commit']])) - # Filter the benchmarks if needed - benchmarks = {b for (_, commit_data) in historical_data for b in commit_data.keys()} + # Filter the benchmarks if needed. if args.filter is not None: - regex = re.compile(args.filter) - benchmarks = {b for b in benchmarks if regex.search(b)} - - # If requested, perform a basic pass to detect outliers - if args.find_outliers is not None: - threshold = args.find_outliers - outliers = set() - for benchmark in benchmarks: - commits = [commit for (commit, _) in historical_data] - series = [commit_data.get(benchmark, None) for (_, commit_data) in historical_data] - outliers |= set(commit for (commit, _) in find_outliers(commits, series, threshold=threshold)) - print(f'Outliers (more than {threshold * 100}%): {" ".join(str(x) for x in outliers)}') - - # Plot the data for all the required benchmarks - figure = create_plot([commit for (commit, _) in historical_data], - sorted(list(benchmarks)), - [commit_data for (_, commit_data) in historical_data]) + keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None] + data = data[data['benchmark'].isin(keeplist)] + + # Plot the data for all the required benchmarks. + figure = create_plot(data, args.metric, subtitle=args.subtitle) do_open = args.output is None or args.open output = args.output if args.output is not None else tempfile.NamedTemporaryFile(suffix='.html').name plotly.io.write_html(figure, file=output, auto_open=do_open) diff --git a/libunwind/src/UnwindRegistersRestore.S b/libunwind/src/UnwindRegistersRestore.S index 5e199188945df..1bcd205be260d 100644 --- a/libunwind/src/UnwindRegistersRestore.S +++ b/libunwind/src/UnwindRegistersRestore.S @@ -1044,9 +1044,10 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv) lw $27, (4 * 27)($4) lw $28, (4 * 28)($4) lw $29, (4 * 29)($4) - lw $30, (4 * 30)($4) // load new pc into ra lw $31, (4 * 32)($4) + // MIPS 1 has load delay slot. Ensure lw $31 and jr are separated by an instruction. + lw $30, (4 * 30)($4) // jump to ra, load a0 in the delay slot jr $31 lw $4, (4 * 4)($4) @@ -1082,11 +1083,13 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv) ld $2, (8 * 2)($4) ld $3, (8 * 3)($4) // skip a0 for now - .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 ld $\i, (8 * \i)($4) .endr // load new pc into ra ld $31, (8 * 32)($4) + // MIPS 1 has load delay slot. Ensure lw $31 and jr are separated by an instruction. + ld $30, (8 * 30)($4) // jump to ra, load a0 in the delay slot jr $31 ld $4, (8 * 4)($4) diff --git a/libunwind/test/configs/cmake-bridge.cfg.in b/libunwind/test/configs/cmake-bridge.cfg.in index b804c210f0bbc..e40497bfa9976 100644 --- a/libunwind/test/configs/cmake-bridge.cfg.in +++ b/libunwind/test/configs/cmake-bridge.cfg.in @@ -14,6 +14,7 @@ import os, site site.addsitedir(os.path.join('@LIBUNWIND_LIBCXX_PATH@', 'utils')) import libcxx.test.format +from lit.util import which # Basic configuration of the test suite config.name = os.path.basename('@LIBUNWIND_TEST_CONFIG@') @@ -33,3 +34,13 @@ config.substitutions.append(('%{install-prefix}', '@LIBUNWIND_TESTING_INSTALL_PR config.substitutions.append(('%{include}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/include')) config.substitutions.append(('%{lib}', '@LIBUNWIND_TESTING_INSTALL_PREFIX@/@LIBUNWIND_INSTALL_LIBRARY_DIR@')) config.substitutions.append(('%{benchmark_flags}', '')) + +# Check for objcopy tools +objcopy_path = which('llvm-objcopy', '@LLVM_BUILD_BINARY_DIR@/bin') +if not objcopy_path: + objcopy_path = which('llvm-objcopy') +if not objcopy_path: + objcopy_path = which('objcopy') +if objcopy_path: + config.substitutions.append(('%{objcopy}', objcopy_path)) + config.available_features.add('objcopy-available') diff --git a/libunwind/test/eh_frame_fde_pc_range.pass.cpp b/libunwind/test/eh_frame_fde_pc_range.pass.cpp index 39c8e8066264d..852612bd9a6e4 100644 --- a/libunwind/test/eh_frame_fde_pc_range.pass.cpp +++ b/libunwind/test/eh_frame_fde_pc_range.pass.cpp @@ -14,16 +14,15 @@ // clang-format off // REQUIRES: target={{x86_64-.+-linux-gnu}} -// aarch64,arm have a cross toolchain build(llvm-clang-win-x-aarch64, etc) -// where objdump is not available. +// REQUIRES: objcopy-available // TODO: Figure out why this fails with Memory Sanitizer. // XFAIL: msan // RUN: %{build} -// RUN: objcopy --dump-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe +// RUN: %{objcopy} --dump-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe // RUN: echo -ne '\xFF' | dd of=%t_ehf_hdr.bin bs=1 seek=2 count=2 conv=notrunc status=none -// RUN: objcopy --update-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe +// RUN: %{objcopy} --update-section .eh_frame_hdr=%t_ehf_hdr.bin %t.exe // RUN: %{exec} %t.exe // clang-format on diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp index a59cc06d51836..3676b8881016b 100644 --- a/lld/COFF/Driver.cpp +++ b/lld/COFF/Driver.cpp @@ -2104,18 +2104,18 @@ void LinkerDriver::linkerMain(ArrayRef argsArr) { config->dtltoDistributor = args.getLastArgValue(OPT_thinlto_distributor); // Handle /thinlto-distributor-arg: - for (auto *arg : args.filtered(OPT_thinlto_distributor_arg)) - config->dtltoDistributorArgs.push_back(arg->getValue()); + config->dtltoDistributorArgs = + args::getStrings(args, OPT_thinlto_distributor_arg); // Handle /thinlto-remote-compiler: - config->dtltoCompiler = args.getLastArgValue(OPT_thinlto_compiler); + config->dtltoCompiler = args.getLastArgValue(OPT_thinlto_remote_compiler); if (!config->dtltoDistributor.empty() && config->dtltoCompiler.empty()) Err(ctx) << "A value must be specified for /thinlto-remote-compiler if " "/thinlto-distributor is specified."; // Handle /thinlto-remote-compiler-arg: - for (auto *arg : args.filtered(OPT_thinlto_compiler_arg)) - config->dtltoCompilerArgs.push_back(arg->getValue()); + config->dtltoCompilerArgs = + args::getStrings(args, OPT_thinlto_remote_compiler_arg); // Handle /dwodir config->dwoDir = args.getLastArgValue(OPT_dwodir); diff --git a/lld/COFF/Options.td b/lld/COFF/Options.td index 485db5a8b21c1..f3d0eb3356200 100644 --- a/lld/COFF/Options.td +++ b/lld/COFF/Options.td @@ -289,10 +289,10 @@ def thinlto_distributor : P<"thinlto-distributor", "backend compilations will be distributed">; def thinlto_distributor_arg : P<"thinlto-distributor-arg", "Arguments to pass to the ThinLTO distributor">; -def thinlto_compiler : P<"thinlto-remote-compiler", +def thinlto_remote_compiler : P<"thinlto-remote-compiler", "Compiler for the ThinLTO distributor to invoke for ThinLTO backend " "compilations">; -def thinlto_compiler_arg : P<"thinlto-remote-compiler-arg", +def thinlto_remote_compiler_arg : P<"thinlto-remote-compiler-arg", "Compiler arguments for the ThinLTO distributor to pass for ThinLTO backend " "compilations">; def lto_obj_path : P< diff --git a/lld/COFF/Writer.cpp b/lld/COFF/Writer.cpp index b4f00996319b1..258a82e371f3a 100644 --- a/lld/COFF/Writer.cpp +++ b/lld/COFF/Writer.cpp @@ -1620,7 +1620,7 @@ void Writer::createSymbolAndStringTable() { dthunk->wrappedSym->writtenToSymtab = true; if (std::optional sym = createSymbol(dthunk->wrappedSym)) { - if (d->getName().size() > COFF::NameSize) + if (dthunk->wrappedSym->getName().size() > COFF::NameSize) longNameSymbols.emplace_back(outputSymtab.size(), dthunk->wrappedSym->getName()); outputSymtab.push_back(*sym); diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index 1beab8d33f4ba..62f7fffce7dbe 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -1399,8 +1399,9 @@ static void readConfigs(Ctx &ctx, opt::InputArgList &args) { ctx.arg.dtltoDistributor = args.getLastArgValue(OPT_thinlto_distributor_eq); ctx.arg.dtltoDistributorArgs = args::getStrings(args, OPT_thinlto_distributor_arg); - ctx.arg.dtltoCompiler = args.getLastArgValue(OPT_thinlto_compiler_eq); - ctx.arg.dtltoCompilerArgs = args::getStrings(args, OPT_thinlto_compiler_arg); + ctx.arg.dtltoCompiler = args.getLastArgValue(OPT_thinlto_remote_compiler_eq); + ctx.arg.dtltoCompilerArgs = + args::getStrings(args, OPT_thinlto_remote_compiler_arg); ctx.arg.dwoDir = args.getLastArgValue(OPT_plugin_opt_dwo_dir_eq); ctx.arg.dynamicLinker = getDynamicLinker(ctx, args); ctx.arg.ehFrameHdr = diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 98267d1e081db..ff7ef2dce5c79 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -1357,21 +1357,24 @@ SyntheticSection *EhInputSection::getParent() const { // .eh_frame is a sequence of CIE or FDE records. // This function splits an input section into records and returns them. +// In rare cases (.eh_frame pieces are reordered by a linker script), the +// relocations may be unordered. template void EhInputSection::split() { - const RelsOrRelas rels = relsOrRelas(/*supportsCrel=*/false); - // getReloc expects the relocations to be sorted by r_offset. See the comment - // in scanRelocs. - if (rels.areRelocsRel()) { - SmallVector storage; - split(sortRels(rels.rels, storage)); - } else { - SmallVector storage; - split(sortRels(rels.relas, storage)); - } -} + const RelsOrRelas elfRels = relsOrRelas(); + if (elfRels.areRelocsCrel()) + preprocessRelocs(elfRels.crels); + else if (elfRels.areRelocsRel()) + preprocessRelocs(elfRels.rels); + else + preprocessRelocs(elfRels.relas); + + // The loop below expects the relocations to be sorted by offset. + auto cmp = [](const Relocation &a, const Relocation &b) { + return a.offset < b.offset; + }; + if (!llvm::is_sorted(rels, cmp)) + llvm::stable_sort(rels, cmp); -template -void EhInputSection::split(ArrayRef rels) { ArrayRef d = content(); const char *msg = nullptr; unsigned relI = 0; @@ -1397,10 +1400,10 @@ void EhInputSection::split(ArrayRef rels) { // Find the first relocation that points to [off,off+size). Relocations // have been sorted by r_offset. const uint64_t off = d.data() - content().data(); - while (relI != rels.size() && rels[relI].r_offset < off) + while (relI != rels.size() && rels[relI].offset < off) ++relI; unsigned firstRel = -1; - if (relI != rels.size() && rels[relI].r_offset < off + size) + if (relI != rels.size() && rels[relI].offset < off + size) firstRel = relI; (id == 0 ? cies : fdes).emplace_back(off, this, size, firstRel); d = d.slice(size); @@ -1410,6 +1413,23 @@ void EhInputSection::split(ArrayRef rels) { << getObjMsg(d.data() - content().data()); } +template +void EhInputSection::preprocessRelocs(Relocs elfRels) { + Ctx &ctx = file->ctx; + rels.reserve(elfRels.size()); + for (auto rel : elfRels) { + uint64_t offset = rel.r_offset; + Symbol &sym = file->getSymbol(rel.getSymbol(ctx.arg.isMips64EL)); + RelType type = rel.getType(ctx.arg.isMips64EL); + RelExpr expr = ctx.target->getRelExpr(type, sym, content().data() + offset); + int64_t addend = + RelTy::HasAddend + ? getAddend(rel) + : ctx.target->getImplicitAddend(content().data() + offset, type); + rels.push_back({expr, type, offset, addend, &sym}); + } +} + // Return the offset in an output section for a given input offset. uint64_t EhInputSection::getParentOffset(uint64_t offset) const { auto it = partition_point( diff --git a/lld/ELF/InputSection.h b/lld/ELF/InputSection.h index 8462f03bdb77e..dc29fedbc5c53 100644 --- a/lld/ELF/InputSection.h +++ b/lld/ELF/InputSection.h @@ -394,7 +394,7 @@ class EhInputSection : public InputSectionBase { StringRef name); static bool classof(const SectionBase *s) { return s->kind() == EHFrame; } template void split(); - template void split(ArrayRef rels); + template void preprocessRelocs(Relocs rels); // Splittable sections are handled as a sequence of data // rather than a single large blob of data. @@ -402,6 +402,10 @@ class EhInputSection : public InputSectionBase { SyntheticSection *getParent() const; uint64_t getParentOffset(uint64_t offset) const; + + // Preprocessed relocations in uniform format to avoid REL/RELA/CREL + // relocation format handling throughout the codebase. + SmallVector rels; }; // This is a section that is added directly to an output section diff --git a/lld/ELF/MarkLive.cpp b/lld/ELF/MarkLive.cpp index 83ae9fb7689e0..a7b0f08c8d954 100644 --- a/lld/ELF/MarkLive.cpp +++ b/lld/ELF/MarkLive.cpp @@ -68,10 +68,9 @@ template class MarkLive { void mark(); template - void resolveReloc(InputSectionBase &sec, RelTy &rel, bool fromFDE); + void resolveReloc(InputSectionBase &sec, const RelTy &rel, bool fromFDE); - template - void scanEhFrameSection(EhInputSection &eh, ArrayRef rels); + void scanEhFrameSection(EhInputSection &eh); Ctx &ctx; // The index of the partition that we are currently processing. @@ -115,23 +114,38 @@ static uint64_t getAddend(Ctx &, InputSectionBase &sec, template template void MarkLive::resolveReloc(InputSectionBase &sec, - RelTy &rel, bool fromFDE) { + const RelTy &rel, + bool fromFDE) { // If a symbol is referenced in a live section, it is used. - Symbol &sym = sec.file->getRelocTargetSym(rel); - sym.used = true; + Symbol *sym; + if constexpr (std::is_same_v) { + assert(isa(sec)); + sym = rel.sym; + } else { + sym = &sec.file->getRelocTargetSym(rel); + } + sym->used = true; LiveReason reason; - if (TrackWhyLive) - reason = {SecOffset(&sec, rel.r_offset), "referenced by"}; + if (TrackWhyLive) { + if constexpr (std::is_same_v) + reason = {SecOffset(&sec, rel.offset), "referenced by"}; + else + reason = {SecOffset(&sec, rel.r_offset), "referenced by"}; + } - if (auto *d = dyn_cast(&sym)) { + if (auto *d = dyn_cast(sym)) { auto *relSec = dyn_cast_or_null(d->section); if (!relSec) return; uint64_t offset = d->value; - if (d->isSection()) - offset += getAddend(ctx, sec, rel); + if (d->isSection()) { + if constexpr (std::is_same_v) + offset += rel.addend; + else + offset += getAddend(ctx, sec, rel); + } // fromFDE being true means this is referenced by a FDE in a .eh_frame // piece. The relocation points to the described function or to a LSDA. We @@ -141,8 +155,9 @@ void MarkLive::resolveReloc(InputSectionBase &sec, // associated text section is live, the LSDA will be retained due to section // group/SHF_LINK_ORDER rules (b) if the associated text section should be // discarded, marking the LSDA will unnecessarily retain the text section. - if (!(fromFDE && ((relSec->flags & (SHF_EXECINSTR | SHF_LINK_ORDER)) || - relSec->nextInSectionGroup))) { + if (!(fromFDE && std::is_same_v && + ((relSec->flags & (SHF_EXECINSTR | SHF_LINK_ORDER)) || + relSec->nextInSectionGroup))) { Symbol *canonicalSym = d; if (TrackWhyLive && d->isSection()) { // This is expensive, so ideally this would be deferred until it's known @@ -159,15 +174,15 @@ void MarkLive::resolveReloc(InputSectionBase &sec, return; } - if (auto *ss = dyn_cast(&sym)) { + if (auto *ss = dyn_cast(sym)) { if (!ss->isWeak()) { cast(ss->file)->isNeeded = true; if (TrackWhyLive) - whyLive.try_emplace(&sym, reason); + whyLive.try_emplace(sym, reason); } } - for (InputSectionBase *sec : cNamedSections.lookup(sym.getName())) + for (InputSectionBase *sec : cNamedSections.lookup(sym->getName())) enqueue(sec, /*offset=*/0, /*sym=*/nullptr, reason); } @@ -186,9 +201,8 @@ void MarkLive::resolveReloc(InputSectionBase &sec, // the gc pass. With that we would be able to also gc some sections holding // LSDAs and personality functions if we found that they were unused. template -template -void MarkLive::scanEhFrameSection(EhInputSection &eh, - ArrayRef rels) { +void MarkLive::scanEhFrameSection(EhInputSection &eh) { + ArrayRef rels = eh.rels; for (const EhSectionPiece &cie : eh.cies) if (cie.firstRelocation != unsigned(-1)) resolveReloc(eh, rels[cie.firstRelocation], false); @@ -198,7 +212,7 @@ void MarkLive::scanEhFrameSection(EhInputSection &eh, continue; uint64_t pieceEnd = fde.inputOff + fde.size; for (size_t j = firstRelI, end2 = rels.size(); - j < end2 && rels[j].r_offset < pieceEnd; ++j) + j < end2 && rels[j].offset < pieceEnd; ++j) resolveReloc(eh, rels[j], true); } } @@ -360,14 +374,8 @@ void MarkLive::run() { // that point to .eh_frames. Otherwise, the garbage collector would drop // all of them. We also want to preserve personality routines and LSDA // referenced by .eh_frame sections, so we scan them for that here. - for (EhInputSection *eh : ctx.ehInputSections) { - const RelsOrRelas rels = - eh->template relsOrRelas(/*supportsCrel=*/false); - if (rels.areRelocsRel()) - scanEhFrameSection(*eh, rels.rels); - else if (rels.relas.size()) - scanEhFrameSection(*eh, rels.relas); - } + for (EhInputSection *eh : ctx.ehInputSections) + scanEhFrameSection(*eh); for (InputSectionBase *sec : ctx.inputSections) { if (sec->flags & SHF_GNU_RETAIN) { enqueue(sec, /*offset=*/0, /*sym=*/nullptr, {std::nullopt, "retained"}); diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td index f0523185a0a31..0d6dda4b60d3a 100644 --- a/lld/ELF/Options.td +++ b/lld/ELF/Options.td @@ -722,11 +722,11 @@ def thinlto_distributor_eq: JJ<"thinlto-distributor=">, "ThinLTO backend compilations will be distributed">; defm thinlto_distributor_arg: EEq<"thinlto-distributor-arg", "Arguments to " "pass to the ThinLTO distributor">; -def thinlto_compiler_eq: JJ<"thinlto-remote-compiler=">, +def thinlto_remote_compiler_eq: JJ<"thinlto-remote-compiler=">, HelpText<"Compiler for the ThinLTO distributor to invoke for ThinLTO backend " "compilations">; -defm thinlto_compiler_arg: EEq<"thinlto-remote-compiler-arg", "Compiler " - "arguments for the ThinLTO distributor to pass for ThinLTO backend " +defm thinlto_remote_compiler_arg: EEq<"thinlto-remote-compiler-arg", + "Compiler arguments for the ThinLTO distributor to pass for ThinLTO backend " "compilations">; defm fat_lto_objects: BB<"fat-lto-objects", "Use the .llvm.lto section, which contains LLVM bitcode, in fat LTO object files to perform LTO.", diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp index bd96c051d160d..84b9b5e983662 100644 --- a/lld/ELF/Relocations.cpp +++ b/lld/ELF/Relocations.cpp @@ -6,37 +6,22 @@ // //===----------------------------------------------------------------------===// // -// This file contains platform-independent functions to process relocations. -// I'll describe the overview of this file here. +// This file implements the core relocation processing logic. It analyzes +// relocations and determines what auxiliary data structures (GOT, PLT, copy +// relocations) need to be created during linking. // -// Simple relocations are easy to handle for the linker. For example, -// for R_X86_64_PC64 relocs, the linker just has to fix up locations -// with the relative offsets to the target symbols. It would just be -// reading records from relocation sections and applying them to output. +// The main entry point is scanRelocations(), which calls scanSection() +// to process all relocations within an input section. For each relocation, +// scan() analyzes the type and target, and determines whether a synthetic +// section entry or dynamic relocation is needed. // -// But not all relocations are that easy to handle. For example, for -// R_386_GOTOFF relocs, the linker has to create new GOT entries for -// symbols if they don't exist, and fix up locations with GOT entry -// offsets from the beginning of GOT section. So there is more than -// fixing addresses in relocation processing. +// Note: This file analyzes what needs to be done but doesn't apply the +// actual relocations - that happens later in InputSection::writeTo(). +// Instead, it populates Relocation objects in InputSectionBase::relocations +// and creates necessary synthetic sections (GOT, PLT, etc.). // -// ELF defines a large number of complex relocations. -// -// The functions in this file analyze relocations and do whatever needs -// to be done. It includes, but not limited to, the following. -// -// - create GOT/PLT entries -// - create new relocations in .dynsym to let the dynamic linker resolve -// them at runtime (since ELF supports dynamic linking, not all -// relocations can be resolved at link-time) -// - create COPY relocs and reserve space in .bss -// - replace expensive relocs (in terms of runtime cost) with cheap ones -// - error out infeasible combinations such as PIC and non-relative relocs -// -// Note that the functions in this file don't actually apply relocations -// because it doesn't know about the output file nor the output file buffer. -// It instead stores Relocation objects to InputSection's Relocations -// vector to let it apply later in InputSection::writeTo. +// In addition, this file implements the core Thunk creation logic, called +// during finalizeAddressDependentContent(). // //===----------------------------------------------------------------------===// @@ -405,22 +390,17 @@ namespace { class OffsetGetter { public: OffsetGetter() = default; - explicit OffsetGetter(InputSectionBase &sec) { - if (auto *eh = dyn_cast(&sec)) { - cies = eh->cies; - fdes = eh->fdes; - i = cies.begin(); - j = fdes.begin(); - } + explicit OffsetGetter(EhInputSection &sec) { + cies = sec.cies; + fdes = sec.fdes; + i = cies.begin(); + j = fdes.begin(); } // Translates offsets in input sections to offsets in output sections. // Given offset must increase monotonically. We assume that Piece is // sorted by inputOff. uint64_t get(Ctx &ctx, uint64_t off) { - if (cies.empty()) - return off; - while (j != fdes.end() && j->inputOff <= off) ++j; auto it = j; @@ -450,13 +430,12 @@ class OffsetGetter { class RelocationScanner { public: RelocationScanner(Ctx &ctx) : ctx(ctx) {} - template - void scanSection(InputSectionBase &s, bool isEH = false); + template void scanSection(InputSectionBase &s); + template void scanEhSection(EhInputSection &s); private: Ctx &ctx; InputSectionBase *sec; - OffsetGetter getter; // End of relocations, used by Mips/PPC64. const void *end = nullptr; @@ -466,14 +445,14 @@ class RelocationScanner { int64_t computeMipsAddend(const RelTy &rel, RelExpr expr, bool isLocal) const; bool isStaticLinkTimeConstant(RelExpr e, RelType type, const Symbol &sym, uint64_t relOff) const; - void processAux(RelExpr expr, RelType type, uint64_t offset, Symbol &sym, - int64_t addend) const; + void process(RelExpr expr, RelType type, uint64_t offset, Symbol &sym, + int64_t addend) const; unsigned handleTlsRelocation(RelExpr expr, RelType type, uint64_t offset, Symbol &sym, int64_t addend); template - void scanOne(typename Relocs::const_iterator &i); - template void scan(Relocs rels); + void scan(typename Relocs::const_iterator &i); + template void scanSectionImpl(Relocs rels); }; } // namespace @@ -961,7 +940,7 @@ static bool canDefineSymbolInExecutable(Ctx &ctx, Symbol &sym) { } // Returns true if a given relocation can be computed at link-time. -// This only handles relocation types expected in processAux. +// This only handles relocation types expected in process(). // // For instance, we know the offset from a relocation to its target at // link-time if the relocation is PC-relative and refers a @@ -1052,8 +1031,8 @@ bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type, // sections. Given that it is ro, we will need an extra PT_LOAD. This // complicates things for the dynamic linker and means we would have to reserve // space for the extra PT_LOAD even if we end up not using it. -void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, - Symbol &sym, int64_t addend) const { +void RelocationScanner::process(RelExpr expr, RelType type, uint64_t offset, + Symbol &sym, int64_t addend) const { // If non-ifunc non-preemptible, change PLT to direct call and optimize GOT // indirection. const bool isIfunc = sym.isGnuIFunc(); @@ -1493,7 +1472,7 @@ unsigned RelocationScanner::handleTlsRelocation(RelExpr expr, RelType type, } template -void RelocationScanner::scanOne(typename Relocs::const_iterator &i) { +void RelocationScanner::scan(typename Relocs::const_iterator &i) { const RelTy &rel = *i; uint32_t symIndex = rel.getSymbol(ctx.arg.isMips64EL); Symbol &sym = sec->getFile()->getSymbol(symIndex); @@ -1511,9 +1490,7 @@ void RelocationScanner::scanOne(typename Relocs::const_iterator &i) { } } // Get an offset in an output section this relocation is applied to. - uint64_t offset = getter.get(ctx, rel.r_offset); - if (offset == uint64_t(-1)) - return; + uint64_t offset = rel.r_offset; RelExpr expr = ctx.target->getRelExpr(type, sym, sec->content().data() + offset); @@ -1587,7 +1564,7 @@ void RelocationScanner::scanOne(typename Relocs::const_iterator &i) { } // Process TLS relocations, including TLS optimizations. Note that - // R_TPREL and R_TPREL_NEG relocations are resolved in processAux. + // R_TPREL and R_TPREL_NEG relocations are resolved in process(). // // Some RISCV TLSDESC relocations reference a local NOTYPE symbol, // but we need to process them in handleTlsRelocation. @@ -1599,7 +1576,7 @@ void RelocationScanner::scanOne(typename Relocs::const_iterator &i) { } } - processAux(expr, type, offset, sym, addend); + process(expr, type, offset, sym, addend); } // R_PPC64_TLSGD/R_PPC64_TLSLD is required to mark `bl __tls_get_addr` for @@ -1642,30 +1619,27 @@ static void checkPPC64TLSRelax(InputSectionBase &sec, Relocs rels) { } template -void RelocationScanner::scan(Relocs rels) { +void RelocationScanner::scanSectionImpl(Relocs rels) { // Not all relocations end up in Sec->Relocations, but a lot do. sec->relocations.reserve(rels.size()); if (ctx.arg.emachine == EM_PPC64) checkPPC64TLSRelax(*sec, rels); - // For EhInputSection, OffsetGetter expects the relocations to be sorted by - // r_offset. In rare cases (.eh_frame pieces are reordered by a linker - // script), the relocations may be unordered. // On SystemZ, all sections need to be sorted by r_offset, to allow TLS // relaxation to be handled correctly - see SystemZ::getTlsGdRelaxSkip. SmallVector storage; - if (isa(sec) || ctx.arg.emachine == EM_S390) + if (ctx.arg.emachine == EM_S390) rels = sortRels(rels, storage); if constexpr (RelTy::IsCrel) { for (auto i = rels.begin(); i != rels.end();) - scanOne(i); + scan(i); } else { // The non-CREL code path has additional check for PPC64 TLS. end = static_cast(rels.end()); for (auto i = rels.begin(); i != end;) - scanOne(i); + scan(i); } // Sort relocations by offset for more efficient searching for @@ -1680,17 +1654,36 @@ void RelocationScanner::scan(Relocs rels) { }); } -template -void RelocationScanner::scanSection(InputSectionBase &s, bool isEH) { +template void RelocationScanner::scanSection(InputSectionBase &s) { sec = &s; - getter = OffsetGetter(s); - const RelsOrRelas rels = s.template relsOrRelas(!isEH); + const RelsOrRelas rels = s.template relsOrRelas(); if (rels.areRelocsCrel()) - scan(rels.crels); + scanSectionImpl(rels.crels); else if (rels.areRelocsRel()) - scan(rels.rels); + scanSectionImpl(rels.rels); else - scan(rels.relas); + scanSectionImpl(rels.relas); +} + +template void RelocationScanner::scanEhSection(EhInputSection &s) { + sec = &s; + OffsetGetter getter(s); + auto rels = s.rels; + s.relocations.reserve(rels.size()); + for (auto &r : rels) { + // Ignore R_*_NONE and other marker relocations. + if (r.expr == R_NONE) + continue; + uint64_t offset = getter.get(ctx, r.offset); + // Skip if the relocation offset is within a dead piece. + if (offset == uint64_t(-1)) + continue; + Symbol *sym = r.sym; + if (sym->isUndefined() && + maybeReportUndefined(ctx, cast(*sym), *sec, offset)) + continue; + process(r.expr, r.type, offset, *sym, r.addend); + } } template void elf::scanRelocations(Ctx &ctx) { @@ -1725,7 +1718,7 @@ template void elf::scanRelocations(Ctx &ctx) { RelocationScanner scanner(ctx); for (Partition &part : ctx.partitions) { for (EhInputSection *sec : part.ehFrame->sections) - scanner.template scanSection(*sec, /*isEH=*/true); + scanner.template scanEhSection(*sec); if (part.armExidx && part.armExidx->isLive()) for (InputSection *sec : part.armExidx->exidxSections) if (sec->isLive()) diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp index 0e96e37ecb9c1..bbf4b29a9fda5 100644 --- a/lld/ELF/SyntheticSections.cpp +++ b/lld/ELF/SyntheticSections.cpp @@ -403,12 +403,12 @@ EhFrameSection::EhFrameSection(Ctx &ctx) // Search for an existing CIE record or create a new one. // CIE records from input object files are uniquified by their contents // and where their relocations point to. -template -CieRecord *EhFrameSection::addCie(EhSectionPiece &cie, ArrayRef rels) { +CieRecord *EhFrameSection::addCie(EhSectionPiece &cie, + ArrayRef rels) { Symbol *personality = nullptr; unsigned firstRelI = cie.firstRelocation; if (firstRelI != (unsigned)-1) - personality = &cie.sec->file->getRelocTargetSym(rels[firstRelI]); + personality = rels[firstRelI].sym; // Search for an existing CIE by CIE contents/relocation target pair. CieRecord *&rec = cieMap[{cie.data(), personality}]; @@ -424,25 +424,20 @@ CieRecord *EhFrameSection::addCie(EhSectionPiece &cie, ArrayRef rels) { // There is one FDE per function. Returns a non-null pointer to the function // symbol if the given FDE points to a live function. -template -Defined *EhFrameSection::isFdeLive(EhSectionPiece &fde, ArrayRef rels) { - auto *sec = cast(fde.sec); - unsigned firstRelI = fde.firstRelocation; - +Defined *EhFrameSection::isFdeLive(EhSectionPiece &fde, + ArrayRef rels) { // An FDE should point to some function because FDEs are to describe // functions. That's however not always the case due to an issue of // ld.gold with -r. ld.gold may discard only functions and leave their // corresponding FDEs, which results in creating bad .eh_frame sections. // To deal with that, we ignore such FDEs. + unsigned firstRelI = fde.firstRelocation; if (firstRelI == (unsigned)-1) return nullptr; - const RelTy &rel = rels[firstRelI]; - Symbol &b = sec->file->getRelocTargetSym(rel); - // FDEs for garbage-collected or merged-by-ICF sections, or sections in // another partition, are dead. - if (auto *d = dyn_cast(&b)) + if (auto *d = dyn_cast(rels[firstRelI].sym)) if (!d->folded && d->section && d->section->partition == partition) return d; return nullptr; @@ -452,41 +447,29 @@ Defined *EhFrameSection::isFdeLive(EhSectionPiece &fde, ArrayRef rels) { // is one CIE record per input object file which is followed by // a list of FDEs. This function searches an existing CIE or create a new // one and associates FDEs to the CIE. -template -void EhFrameSection::addRecords(EhInputSection *sec, ArrayRef rels) { +template void EhFrameSection::addRecords(EhInputSection *sec) { + auto rels = sec->rels; offsetToCie.clear(); for (EhSectionPiece &cie : sec->cies) - offsetToCie[cie.inputOff] = addCie(cie, rels); + offsetToCie[cie.inputOff] = addCie(cie, rels); for (EhSectionPiece &fde : sec->fdes) { - uint32_t id = endian::read32(fde.data().data() + 4); + uint32_t id = endian::read32(fde.data().data() + 4); CieRecord *rec = offsetToCie[fde.inputOff + 4 - id]; if (!rec) Fatal(ctx) << sec << ": invalid CIE reference"; - if (!isFdeLive(fde, rels)) + if (!isFdeLive(fde, rels)) continue; rec->fdes.push_back(&fde); numFdes++; } } -template -void EhFrameSection::addSectionAux(EhInputSection *sec) { - if (!sec->isLive()) - return; - const RelsOrRelas rels = - sec->template relsOrRelas(/*supportsCrel=*/false); - if (rels.areRelocsRel()) - addRecords(sec, rels.rels); - else - addRecords(sec, rels.relas); -} - // Used by ICF::handleLSDA(). This function is very similar to // EhFrameSection::addRecords(). -template +template void EhFrameSection::iterateFDEWithLSDAAux( - EhInputSection &sec, ArrayRef rels, DenseSet &ciesWithLSDA, + EhInputSection &sec, DenseSet &ciesWithLSDA, llvm::function_ref fn) { for (EhSectionPiece &cie : sec.cies) if (hasLSDA(cie)) @@ -497,7 +480,7 @@ void EhFrameSection::iterateFDEWithLSDAAux( continue; // The CIE has a LSDA argument. Call fn with d's section. - if (Defined *d = isFdeLive(fde, rels)) + if (Defined *d = isFdeLive(fde, sec.rels)) if (auto *s = dyn_cast_or_null(d->section)) fn(*s); } @@ -509,12 +492,7 @@ void EhFrameSection::iterateFDEWithLSDA( DenseSet ciesWithLSDA; for (EhInputSection *sec : sections) { ciesWithLSDA.clear(); - const RelsOrRelas rels = - sec->template relsOrRelas(/*supportsCrel=*/false); - if (rels.areRelocsRel()) - iterateFDEWithLSDAAux(*sec, rels.rels, ciesWithLSDA, fn); - else - iterateFDEWithLSDAAux(*sec, rels.relas, ciesWithLSDA, fn); + iterateFDEWithLSDAAux(*sec, ciesWithLSDA, fn); } } @@ -531,20 +509,16 @@ void EhFrameSection::finalizeContents() { case ELFNoneKind: llvm_unreachable("invalid ekind"); case ELF32LEKind: - for (EhInputSection *sec : sections) - addSectionAux(sec); - break; - case ELF32BEKind: - for (EhInputSection *sec : sections) - addSectionAux(sec); - break; case ELF64LEKind: for (EhInputSection *sec : sections) - addSectionAux(sec); + if (sec->isLive()) + addRecords(sec); break; + case ELF32BEKind: case ELF64BEKind: for (EhInputSection *sec : sections) - addSectionAux(sec); + if (sec->isLive()) + addRecords(sec); break; } diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h index 223dfe3b67b10..ac3ec63f0a7a5 100644 --- a/lld/ELF/SyntheticSections.h +++ b/lld/ELF/SyntheticSections.h @@ -80,19 +80,14 @@ class EhFrameSection final : public SyntheticSection { uint64_t size = 0; - template - void addRecords(EhInputSection *s, llvm::ArrayRef rels); - template void addSectionAux(EhInputSection *s); - template - void iterateFDEWithLSDAAux(EhInputSection &sec, ArrayRef rels, + template void addRecords(EhInputSection *s); + template + void iterateFDEWithLSDAAux(EhInputSection &sec, llvm::DenseSet &ciesWithLSDA, llvm::function_ref fn); - template - CieRecord *addCie(EhSectionPiece &piece, ArrayRef rels); - - template - Defined *isFdeLive(EhSectionPiece &piece, ArrayRef rels); + CieRecord *addCie(EhSectionPiece &piece, ArrayRef rels); + Defined *isFdeLive(EhSectionPiece &piece, ArrayRef rels); uint64_t getFdePc(uint8_t *buf, size_t off, uint8_t enc) const; diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp index 979a4ee6d8133..903ba78a27c75 100644 --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -848,8 +848,7 @@ void ObjCSelRefsHelper::initialize() { void ObjCSelRefsHelper::cleanup() { methnameToSelref.clear(); } ConcatInputSection *ObjCSelRefsHelper::makeSelRef(StringRef methname) { - auto methnameOffset = - in.objcMethnameSection->getStringOffset(methname).outSecOff; + auto methnameOffset = in.objcMethnameSection->getStringOffset(methname); size_t wordSize = target->wordSize; uint8_t *selrefData = bAlloc().Allocate(wordSize); @@ -1685,28 +1684,7 @@ void CStringSection::writeTo(uint8_t *buf) const { } } -void CStringSection::finalizeContents() { - uint64_t offset = 0; - for (CStringInputSection *isec : inputs) { - for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) { - if (!piece.live) - continue; - // See comment above DeduplicatedCStringSection for how alignment is - // handled. - uint32_t pieceAlign = 1 - << llvm::countr_zero(isec->align | piece.inSecOff); - offset = alignToPowerOf2(offset, pieceAlign); - piece.outSecOff = offset; - isec->isFinal = true; - StringRef string = isec->getStringRef(i); - offset += string.size() + 1; // account for null terminator - } - } - size = offset; -} - -// Mergeable cstring literals are found under the __TEXT,__cstring section. In -// contrast to ELF, which puts strings that need different alignments into +// In contrast to ELF, which puts strings that need different alignments into // different sections, clang's Mach-O backend puts them all in one section. // Strings that need to be aligned have the .p2align directive emitted before // them, which simply translates into zero padding in the object file. In other @@ -1741,21 +1719,43 @@ void CStringSection::finalizeContents() { // requires its operand addresses to be 16-byte aligned). However, there will // typically also be other cstrings in the same file that aren't used via SIMD // and don't need this alignment. They will be emitted at some arbitrary address -// `A`, but ld64 will treat them as being 16-byte aligned with an offset of `16 -// % A`. +// `A`, but ld64 will treat them as being 16-byte aligned with an offset of +// `16 % A`. +static Align getStringPieceAlignment(const CStringInputSection *isec, + const StringPiece &piece) { + return llvm::Align(1ULL << llvm::countr_zero(isec->align | piece.inSecOff)); +} + +void CStringSection::finalizeContents() { + size = 0; + // TODO: Call buildCStringPriorities() to support cstring ordering when + // deduplication is off, although this may negatively impact build + // performance. + for (CStringInputSection *isec : inputs) { + for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) { + if (!piece.live) + continue; + piece.outSecOff = alignTo(size, getStringPieceAlignment(isec, piece)); + StringRef string = isec->getStringRef(i); + size = piece.outSecOff + string.size() + 1; // account for null terminator + } + isec->isFinal = true; + } +} + void DeduplicatedCStringSection::finalizeContents() { // Find the largest alignment required for each string. + DenseMap strToAlignment; for (const CStringInputSection *isec : inputs) { for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) { if (!piece.live) continue; auto s = isec->getCachedHashStringRef(i); assert(isec->align != 0); - uint8_t trailingZeros = llvm::countr_zero(isec->align | piece.inSecOff); - auto it = stringOffsetMap.insert( - std::make_pair(s, StringOffset(trailingZeros))); - if (!it.second && it.first->second.trailingZeros < trailingZeros) - it.first->second.trailingZeros = trailingZeros; + auto align = getStringPieceAlignment(isec, piece); + auto [it, wasInserted] = strToAlignment.try_emplace(s, align); + if (!wasInserted && it->second < align) + it->second = align; } } @@ -1765,38 +1765,31 @@ void DeduplicatedCStringSection::finalizeContents() { for (auto &[isec, i] : priorityBuilder.buildCStringPriorities(inputs)) { auto &piece = isec->pieces[i]; auto s = isec->getCachedHashStringRef(i); - auto it = stringOffsetMap.find(s); - assert(it != stringOffsetMap.end()); - lld::macho::DeduplicatedCStringSection::StringOffset &offsetInfo = - it->second; - if (offsetInfo.outSecOff == UINT64_MAX) { - offsetInfo.outSecOff = - alignToPowerOf2(size, 1ULL << offsetInfo.trailingZeros); - size = offsetInfo.outSecOff + s.size() + 1; // account for null terminator + auto [it, wasInserted] = stringOffsetMap.try_emplace(s, /*placeholder*/ 0); + if (wasInserted) { + // Avoid computing the offset until we are sure we will need to + uint64_t offset = alignTo(size, strToAlignment.at(s)); + it->second = offset; + size = offset + s.size() + 1; // account for null terminator } - piece.outSecOff = offsetInfo.outSecOff; + // If the string was already in stringOffsetMap, it is a duplicate and we + // only need to assign the offset. + piece.outSecOff = it->second; } for (CStringInputSection *isec : inputs) isec->isFinal = true; } void DeduplicatedCStringSection::writeTo(uint8_t *buf) const { - for (const auto &p : stringOffsetMap) { - StringRef data = p.first.val(); - uint64_t off = p.second.outSecOff; - if (!data.empty()) - memcpy(buf + off, data.data(), data.size()); - } + for (const auto &[s, outSecOff] : stringOffsetMap) + if (s.size()) + memcpy(buf + outSecOff, s.data(), s.size()); } -DeduplicatedCStringSection::StringOffset -DeduplicatedCStringSection::getStringOffset(StringRef str) const { +uint64_t DeduplicatedCStringSection::getStringOffset(StringRef str) const { // StringPiece uses 31 bits to store the hashes, so we replicate that uint32_t hash = xxh3_64bits(str) & 0x7fffffff; - auto offset = stringOffsetMap.find(CachedHashStringRef(str, hash)); - assert(offset != stringOffsetMap.end() && - "Looked-up strings should always exist in section"); - return offset->second; + return stringOffsetMap.at(CachedHashStringRef(str, hash)); } // This section is actually emitted as __TEXT,__const by ld64, but clang may diff --git a/lld/MachO/SyntheticSections.h b/lld/MachO/SyntheticSections.h index 1abf3c210a64e..a37dd66107ee7 100644 --- a/lld/MachO/SyntheticSections.h +++ b/lld/MachO/SyntheticSections.h @@ -571,18 +571,10 @@ class DeduplicatedCStringSection final : public CStringSection { uint64_t getSize() const override { return size; } void finalizeContents() override; void writeTo(uint8_t *buf) const override; - - struct StringOffset { - uint8_t trailingZeros; - uint64_t outSecOff = UINT64_MAX; - - explicit StringOffset(uint8_t zeros) : trailingZeros(zeros) {} - }; - - StringOffset getStringOffset(StringRef str) const; + uint64_t getStringOffset(StringRef str) const; private: - llvm::DenseMap stringOffsetMap; + llvm::DenseMap stringOffsetMap; size_t size = 0; }; diff --git a/lld/docs/ReleaseNotes.rst b/lld/docs/ReleaseNotes.rst index 6ea1ea0fd6c2f..566dde6e08115 100644 --- a/lld/docs/ReleaseNotes.rst +++ b/lld/docs/ReleaseNotes.rst @@ -44,6 +44,9 @@ MinGW Improvements MachO Improvements ------------------ +* ``--separate-cstring-literal-sections`` emits cstring literal sections into sections defined by their section name. + (`#158720 `_) + WebAssembly Improvements ------------------------ diff --git a/lld/test/COFF/strtab.s b/lld/test/COFF/strtab.s index fbdd8df52d540..9edc13e19e825 100644 --- a/lld/test/COFF/strtab.s +++ b/lld/test/COFF/strtab.s @@ -1,17 +1,32 @@ # REQUIRES: x86 # RUN: llvm-mc -triple=x86_64-windows-msvc %s -filetype=obj -o %t.obj -# RUN: lld-link -out:%t.exe -entry:main %t.obj -debug:dwarf +# RUN: lld-link -machine:x64 -def:%S/Inputs/library.def -implib:%t.lib +# RUN: lld-link -out:%t.exe -entry:main %t.obj %t.lib -debug:dwarf # RUN: llvm-readobj --string-table %t.exe | FileCheck %s +# RUN: llvm-nm %t.exe | FileCheck %s --check-prefix=SYMBOLS + +# Note, for this test to have the intended test coverage, the imported symbol +# "function" needs to be such that the symbol name itself is <= 8 chars, while +# "__imp_"+name is >8 chars. # CHECK: StringTable { -# CHECK-NEXT: Length: 87 +# CHECK-NEXT: Length: 102 # CHECK-NEXT: [ 4] .debug_abbrev # CHECK-NEXT: [ 12] .debug_line # CHECK-NEXT: [ 1e] long_name_symbolz # CHECK-NEXT: [ 30] .debug_abbrez -# CHECK-NEXT: [ 3e] __impl_long_name_symbolA +# CHECK-NEXT: [ 3e] __imp_function +# CHECK-NEXT: [ 4d] __impl_long_name_symbolA # CHECK-NEXT: } +# SYMBOLS: 140001000 N .debug_abbrez +# SYMBOLS-NEXT: 140002070 R __imp_function +# SYMBOLS-NEXT: 140001000 t __impl_long_name_symbolA +# SYMBOLS-NEXT: 140001010 T function +# SYMBOLS-NEXT: 140001000 t long_name_symbolA +# SYMBOLS-NEXT: 140001000 t long_name_symbolz +# SYMBOLS-NEXT: 140001000 T main +# SYMBOLS-NEXT: 140001000 t name_symbolA .global main .text @@ -21,6 +36,7 @@ long_name_symbolA: __impl_long_name_symbolA: name_symbolA: .debug_abbrez: + call function ret .section .debug_abbrev,"dr" diff --git a/lld/test/ELF/eh-frame-relocation.s b/lld/test/ELF/eh-frame-relocation.s new file mode 100644 index 0000000000000..9c1fe40dba7d3 --- /dev/null +++ b/lld/test/ELF/eh-frame-relocation.s @@ -0,0 +1,29 @@ +# REQUIRES: x86 +## Test that marker relocations are ignored and undefined symbols lead to errors. + +# RUN: rm -rf %t && split-file %s %t && cd %t +# RUN: llvm-mc -filetype=obj -triple=x86_64 a.s -o a.o +# RUN: llvm-mc -filetype=obj -triple=x86_64 abi.s -o abi.o +# RUN: ld.lld a.o abi.o -o a +# RUN: llvm-readelf -s a | FileCheck %s + +# CHECK: 00000000002{{.*}} 0 FUNC GLOBAL DEFAULT [[#]] __gxx_personality_v0 + +# RUN: not ld.lld a.o 2>&1 | FileCheck %s --check-prefix=ERR + +# ERR: error: undefined symbol: __gxx_personality_v0 +# ERR-NEXT: >>> referenced by a.o:(.eh_frame+0x12) + +#--- a.s +.cfi_startproc +.cfi_personality 0, __gxx_personality_v0 + ret +.cfi_endproc + +.section .eh_frame,"a",@unwind +.reloc ., BFD_RELOC_NONE, ignore + +#--- abi.s +.globl __gxx_personality_v0 +.type __gxx_personality_v0, @function +__gxx_personality_v0: diff --git a/lld/test/MachO/ordre-file-cstring.s b/lld/test/MachO/order-file-cstring.s similarity index 100% rename from lld/test/MachO/ordre-file-cstring.s rename to lld/test/MachO/order-file-cstring.s diff --git a/lld/test/wasm/archive-export.test b/lld/test/wasm/archive-export.test index 9a76d60d63d91..c67e500e46dd2 100644 --- a/lld/test/wasm/archive-export.test +++ b/lld/test/wasm/archive-export.test @@ -14,6 +14,9 @@ CHECK: Exports: CHECK-NEXT: - Name: memory CHECK-NEXT: Kind: MEMORY CHECK-NEXT: Index: 0 +CHECK-NEXT: - Name: __stack_pointer +CHECK-NEXT: Kind: GLOBAL +CHECK-NEXT: Index: 0 CHECK-NEXT: - Name: foo CHECK-NEXT: Kind: FUNCTION CHECK-NEXT: Index: 1 diff --git a/lld/test/wasm/comdats.ll b/lld/test/wasm/comdats.ll index 8fc301e9a10e0..2dd687fbad1ef 100644 --- a/lld/test/wasm/comdats.ll +++ b/lld/test/wasm/comdats.ll @@ -35,6 +35,9 @@ entry: ; CHECK-NEXT: - Name: memory ; CHECK-NEXT: Kind: MEMORY ; CHECK-NEXT: Index: 0 +; CHECK-NEXT: - Name: __stack_pointer +; CHECK-NEXT: Kind: GLOBAL +; CHECK-NEXT: Index: 0 ; CHECK-NEXT: - Name: _start ; CHECK-NEXT: Kind: FUNCTION ; CHECK-NEXT: Index: 1 diff --git a/lld/test/wasm/memory-naming.test b/lld/test/wasm/memory-naming.test index b4aabaeeac357..766d9cd59050b 100644 --- a/lld/test/wasm/memory-naming.test +++ b/lld/test/wasm/memory-naming.test @@ -65,6 +65,21 @@ # CHECK-IMPORT-NEXT: Index: 0 # CHECK-IMPORT-NEXT: - Type: +# RUN:wasm-ld --import-memory=foo -o %t.import.wasm %t.start.o +# RUN: obj2yaml %t.import.wasm | FileCheck -check-prefix=CHECK-IMPORT-DEFAULT %s + +# Verify that memory import module defaults to `env`, which is the default +# module for all imports. + +# CHECK-IMPORT-DEFAULT: - Type: IMPORT +# CHECK-IMPORT-DEFAULT-NEXT: Imports: +# CHECK-IMPORT-DEFAULT-NEXT: - Module: env +# CHECK-IMPORT-DEFAULT-NEXT: Field: foo +# CHECK-IMPORT-DEFAULT-NEXT: Kind: MEMORY +# CHECK-IMPORT-DEFAULT-NEXT: Memory: +# CHECK-IMPORT-DEFAULT-NEXT: Minimum: 0x2 +# CHECK-IMPORT-DEFAULT-NEXT: - Type: + # RUN:wasm-ld --import-memory=foo,bar --export-memory=qux -o %t.both.wasm %t.start.o # RUN: obj2yaml %t.both.wasm | FileCheck -check-prefix=CHECK-BOTH %s diff --git a/lld/test/wasm/mutable-global-exports.s b/lld/test/wasm/mutable-global-exports.s index 59308496ab4cc..1c10e92083b5c 100644 --- a/lld/test/wasm/mutable-global-exports.s +++ b/lld/test/wasm/mutable-global-exports.s @@ -16,6 +16,10 @@ .globl _start .globl foo_global +.globl bar_global + +.globaltype bar_global, i32, immutable +bar_global: .globaltype foo_global, i32 foo_global: @@ -33,6 +37,7 @@ _start: .ascii "atomics" # CHECK-ERR: mutable global exported but 'mutable-globals' feature not present in inputs: `foo_global`. Use --no-check-features to suppress +# CHECK-ERR-NOT: bar_global # CHECK: - Type: EXPORT # CHECK-NEXT: Exports: @@ -68,42 +73,48 @@ _start: # CHECK-ALL-NEXT: - Name: __wasm_call_ctors # CHECK-ALL-NEXT: Kind: FUNCTION # CHECK-ALL-NEXT: Index: 0 +# CHECK-ALL-NEXT: - Name: __stack_pointer +# CHECK-ALL-NEXT: Kind: GLOBAL +# CHECK-ALL-NEXT: Index: 0 # CHECK-ALL-NEXT: - Name: _start # CHECK-ALL-NEXT: Kind: FUNCTION # CHECK-ALL-NEXT: Index: 1 # CHECK-ALL-NEXT: - Name: foo_global # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 1 -# CHECK-ALL-NEXT: - Name: __dso_handle +# CHECK-ALL-NEXT: - Name: bar_global # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 2 -# CHECK-ALL-NEXT: - Name: __data_end +# CHECK-ALL-NEXT: - Name: __dso_handle # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 3 -# CHECK-ALL-NEXT: - Name: __stack_low +# CHECK-ALL-NEXT: - Name: __data_end # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 4 -# CHECK-ALL-NEXT: - Name: __stack_high +# CHECK-ALL-NEXT: - Name: __stack_low # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 5 -# CHECK-ALL-NEXT: - Name: __global_base +# CHECK-ALL-NEXT: - Name: __stack_high # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 6 -# CHECK-ALL-NEXT: - Name: __heap_base +# CHECK-ALL-NEXT: - Name: __global_base # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 7 -# CHECK-ALL-NEXT: - Name: __heap_end +# CHECK-ALL-NEXT: - Name: __heap_base # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 8 -# CHECK-ALL-NEXT: - Name: __memory_base +# CHECK-ALL-NEXT: - Name: __heap_end # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 9 -# CHECK-ALL-NEXT: - Name: __table_base +# CHECK-ALL-NEXT: - Name: __memory_base # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 10 -# CHECK-ALL-NEXT: - Name: __wasm_first_page_end +# CHECK-ALL-NEXT: - Name: __table_base # CHECK-ALL-NEXT: Kind: GLOBAL # CHECK-ALL-NEXT: Index: 11 +# CHECK-ALL-NEXT: - Name: __wasm_first_page_end +# CHECK-ALL-NEXT: Kind: GLOBAL +# CHECK-ALL-NEXT: Index: 12 # CHECK-ALL-NEXT: - Type: CODE # CHECK-ALL: Name: target_features diff --git a/lld/test/wasm/visibility-hidden.ll b/lld/test/wasm/visibility-hidden.ll index 36c29a8e47385..6ed7ba3afdc02 100644 --- a/lld/test/wasm/visibility-hidden.ll +++ b/lld/test/wasm/visibility-hidden.ll @@ -43,6 +43,9 @@ entry: ; CHECK-NEXT: - Name: memory ; CHECK-NEXT: Kind: MEMORY ; CHECK-NEXT: Index: 0 +; CHECK-NEXT: - Name: __stack_pointer +; CHECK-NEXT: Kind: GLOBAL +; CHECK-NEXT: Index: 0 ; CHECK-NEXT: - Name: objectDefault ; CHECK-NEXT: Kind: FUNCTION ; CHECK-NEXT: Index: 1 diff --git a/lld/wasm/Driver.cpp b/lld/wasm/Driver.cpp index b57d77457b83a..46c848d5c1232 100644 --- a/lld/wasm/Driver.cpp +++ b/lld/wasm/Driver.cpp @@ -542,22 +542,19 @@ static void readConfigs(opt::InputArgList &args) { ctx.arg.noinhibitExec = args.hasArg(OPT_noinhibit_exec); if (args.hasArg(OPT_import_memory_with_name)) { - ctx.arg.memoryImport = - args.getLastArgValue(OPT_import_memory_with_name).split(","); + auto argValue = args.getLastArgValue(OPT_import_memory_with_name); + if (argValue.contains(',')) + ctx.arg.memoryImport = argValue.split(","); + else + ctx.arg.memoryImport = {defaultModule, argValue}; } else if (args.hasArg(OPT_import_memory)) { - ctx.arg.memoryImport = - std::pair(defaultModule, memoryName); - } else { - ctx.arg.memoryImport = - std::optional>(); + ctx.arg.memoryImport = {defaultModule, memoryName}; } if (args.hasArg(OPT_export_memory_with_name)) { ctx.arg.memoryExport = args.getLastArgValue(OPT_export_memory_with_name); } else if (args.hasArg(OPT_export_memory)) { ctx.arg.memoryExport = memoryName; - } else { - ctx.arg.memoryExport = std::optional(); } ctx.arg.sharedMemory = args.hasArg(OPT_shared_memory); @@ -748,8 +745,7 @@ static void setConfigs() { error("--export-memory is incompatible with --shared"); } if (!ctx.arg.memoryImport.has_value()) { - ctx.arg.memoryImport = std::pair( - defaultModule, memoryName); + ctx.arg.memoryImport = {defaultModule, memoryName}; } } @@ -918,9 +914,10 @@ static InputGlobal *createGlobal(StringRef name, bool isMutable) { return make(wasmGlobal, nullptr); } -static GlobalSymbol *createGlobalVariable(StringRef name, bool isMutable) { +static GlobalSymbol *createGlobalVariable(StringRef name, bool isMutable, + uint32_t flags = 0) { InputGlobal *g = createGlobal(name, isMutable); - return symtab->addSyntheticGlobal(name, WASM_SYMBOL_VISIBILITY_HIDDEN, g); + return symtab->addSyntheticGlobal(name, flags, g); } static GlobalSymbol *createOptionalGlobal(StringRef name, bool isMutable) { @@ -970,9 +967,13 @@ static void createSyntheticSymbols() { } if (ctx.arg.sharedMemory) { - ctx.sym.tlsBase = createGlobalVariable("__tls_base", true); - ctx.sym.tlsSize = createGlobalVariable("__tls_size", false); - ctx.sym.tlsAlign = createGlobalVariable("__tls_align", false); + // TLS symbols are all hidden/dso-local + ctx.sym.tlsBase = + createGlobalVariable("__tls_base", true, WASM_SYMBOL_VISIBILITY_HIDDEN); + ctx.sym.tlsSize = createGlobalVariable("__tls_size", false, + WASM_SYMBOL_VISIBILITY_HIDDEN); + ctx.sym.tlsAlign = createGlobalVariable("__tls_align", false, + WASM_SYMBOL_VISIBILITY_HIDDEN); ctx.sym.initTLS = symtab->addSyntheticFunction( "__wasm_init_tls", WASM_SYMBOL_VISIBILITY_HIDDEN, make(is64 ? i64ArgSignature : i32ArgSignature, diff --git a/lld/wasm/Writer.cpp b/lld/wasm/Writer.cpp index b704677d36c93..9a5b56fc52e2f 100644 --- a/lld/wasm/Writer.cpp +++ b/lld/wasm/Writer.cpp @@ -576,7 +576,7 @@ void Writer::populateTargetFeatures() { if (ctx.isPic) { // This should not be necessary because all PIC objects should - // contain the mutable-globals feature. + // contain the `mutable-globals` feature. // TODO (https://github.com/llvm/llvm-project/issues/51681) allowed.insert("mutable-globals"); } @@ -703,10 +703,12 @@ void Writer::checkImportExportTargetFeatures() { } } for (const Symbol *sym : out.exportSec->exportedSymbols) { - if (isa(sym)) { - error(Twine("mutable global exported but 'mutable-globals' feature " - "not present in inputs: `") + - toString(*sym) + "`. Use --no-check-features to suppress."); + if (auto *global = dyn_cast(sym)) { + if (global->getGlobalType()->Mutable) { + error(Twine("mutable global exported but 'mutable-globals' feature " + "not present in inputs: `") + + toString(*sym) + "`. Use --no-check-features to suppress."); + } } } } @@ -782,6 +784,9 @@ void Writer::calculateExports() { unsigned globalIndex = out.importSec->getNumImportedGlobals() + out.globalSec->numGlobals(); + bool hasMutableGlobals = + out.targetFeaturesSec->features.count("mutable-globals") > 0; + for (Symbol *sym : symtab->symbols()) { if (!sym->isExported()) continue; @@ -799,7 +804,8 @@ void Writer::calculateExports() { } export_ = {name, WASM_EXTERNAL_FUNCTION, f->getExportedFunctionIndex()}; } else if (auto *g = dyn_cast(sym)) { - if (g->getGlobalType()->Mutable && !g->getFile() && !g->forceExport) { + if (!hasMutableGlobals && g->getGlobalType()->Mutable && !g->getFile() && + !g->isExportedExplicit()) { // Avoid exporting mutable globals are linker synthesized (e.g. // __stack_pointer or __tls_base) unless they are explicitly exported // from the command line. diff --git a/lldb/include/lldb/Core/Debugger.h b/lldb/include/lldb/Core/Debugger.h index 250ad64b76d9a..06136ed40471d 100644 --- a/lldb/include/lldb/Core/Debugger.h +++ b/lldb/include/lldb/Core/Debugger.h @@ -181,7 +181,15 @@ class Debugger : public std::enable_shared_from_this, return m_target_list.GetSelectedTarget(); } + /// Get the execution context representing the selected entities in the + /// selected target. ExecutionContext GetSelectedExecutionContext(); + + /// Similar to GetSelectedExecutionContext but returns a + /// ExecutionContextRef, and will hold the dummy target if no target is + /// currently selected. + ExecutionContextRef GetSelectedExecutionContextRef(); + /// Get accessor for the target list. /// /// The target list is part of the global debugger object. This the single @@ -419,7 +427,7 @@ class Debugger : public std::enable_shared_from_this, void CancelInterruptRequest(); /// Redraw the statusline if enabled. - void RedrawStatusline(bool update = true); + void RedrawStatusline(std::optional exe_ctx_ref); /// This is the correct way to query the state of Interruption. /// If you are on the RunCommandInterpreter thread, it will check the @@ -701,9 +709,9 @@ class Debugger : public std::enable_shared_from_this, void HandleBreakpointEvent(const lldb::EventSP &event_sp); - void HandleProcessEvent(const lldb::EventSP &event_sp); + lldb::ProcessSP HandleProcessEvent(const lldb::EventSP &event_sp); - void HandleThreadEvent(const lldb::EventSP &event_sp); + lldb::ThreadSP HandleThreadEvent(const lldb::EventSP &event_sp); void HandleProgressEvent(const lldb::EventSP &event_sp); diff --git a/lldb/include/lldb/Core/Mangled.h b/lldb/include/lldb/Core/Mangled.h index 47f1c6a8d80b7..546d7a9b409ed 100644 --- a/lldb/include/lldb/Core/Mangled.h +++ b/lldb/include/lldb/Core/Mangled.h @@ -148,13 +148,7 @@ class Mangled { /// Mangled name get accessor. /// /// \return - /// A reference to the mangled name string object. - ConstString &GetMangledName() { return m_mangled; } - - /// Mangled name get accessor. - /// - /// \return - /// A const reference to the mangled name string object. + /// The mangled name string object. ConstString GetMangledName() const { return m_mangled; } /// Best name get accessor. @@ -251,7 +245,7 @@ class Mangled { /// \return /// eManglingSchemeNone if no known mangling scheme could be identified /// for s, otherwise the enumerator for the mangling scheme detected. - static Mangled::ManglingScheme GetManglingScheme(llvm::StringRef const name); + static Mangled::ManglingScheme GetManglingScheme(llvm::StringRef name); static bool IsMangledName(llvm::StringRef name); diff --git a/lldb/include/lldb/Core/Statusline.h b/lldb/include/lldb/Core/Statusline.h index 6bda153f822d2..a5ab1927b57f5 100644 --- a/lldb/include/lldb/Core/Statusline.h +++ b/lldb/include/lldb/Core/Statusline.h @@ -9,6 +9,8 @@ #ifndef LLDB_CORE_STATUSLINE_H #define LLDB_CORE_STATUSLINE_H +#include "lldb/Symbol/SymbolContext.h" +#include "lldb/Target/ExecutionContext.h" #include "lldb/lldb-forward.h" #include #include @@ -19,15 +21,16 @@ class Statusline { Statusline(Debugger &debugger); ~Statusline(); + using Context = std::pair; + /// Reduce the scroll window and draw the statusline. - void Enable(); + void Enable(std::optional exe_ctx_ref); /// Hide the statusline and extend the scroll window. void Disable(); - /// Redraw the statusline. If update is false, this will redraw the last - /// string. - void Redraw(bool update = true); + /// Redraw the statusline. + void Redraw(std::optional exe_ctx_ref); /// Inform the statusline that the terminal dimensions have changed. void TerminalSizeChanged(); @@ -46,7 +49,11 @@ class Statusline { void UpdateScrollWindow(ScrollWindowMode mode); Debugger &m_debugger; - std::string m_last_str; + + /// Cached copy of the execution context that allows us to redraw the + /// statusline. + ExecutionContextRef m_exe_ctx_ref; + uint64_t m_terminal_width = 0; uint64_t m_terminal_height = 0; }; diff --git a/lldb/include/lldb/Target/ExecutionContext.h b/lldb/include/lldb/Target/ExecutionContext.h index f105e38fa69aa..fe8bce7f69713 100644 --- a/lldb/include/lldb/Target/ExecutionContext.h +++ b/lldb/include/lldb/Target/ExecutionContext.h @@ -92,10 +92,21 @@ class ExecutionContextRef { /// Construct using the target and all the selected items inside of it (the /// process and its selected thread, and the thread's selected frame). If - /// there is no selected thread, default to the first thread If there is no + /// there is no selected thread, default to the first thread. If there is no /// selected frame, default to the first frame. ExecutionContextRef(Target *target, bool adopt_selected); + /// Construct using the process and all the selected items inside of it ( + /// the selected thread, and the thread's selected frame). If + /// there is no selected thread, default to the first thread. If there is no + /// selected frame, default to the first frame. + ExecutionContextRef(Process *process, bool adopt_selected); + + /// Construct using the thread and all the selected items inside of it ( the + /// selected frame). If there is no selected frame, default to the first + /// frame. + ExecutionContextRef(Thread *thread, bool adopt_selected); + /// Construct using an execution context scope. /// /// If the ExecutionContextScope object is valid and refers to a frame, make @@ -199,9 +210,9 @@ class ExecutionContextRef { void SetTargetPtr(Target *target, bool adopt_selected); - void SetProcessPtr(Process *process); + void SetProcessPtr(Process *process, bool adopt_selected = false); - void SetThreadPtr(Thread *thread); + void SetThreadPtr(Thread *thread, bool adopt_selected = false); void SetFramePtr(StackFrame *frame); diff --git a/lldb/include/lldb/Target/Statistics.h b/lldb/include/lldb/Target/Statistics.h index d6983bb0b9d24..2653835206ec7 100644 --- a/lldb/include/lldb/Target/Statistics.h +++ b/lldb/include/lldb/Target/Statistics.h @@ -322,12 +322,14 @@ class TargetStats { void IncreaseSourceRealpathCompatibleCount(uint32_t count); StatsDuration &GetCreateTime() { return m_create_time; } + StatsDuration &GetLoadCoreTime() { return m_load_core_time; } StatsSuccessFail &GetExpressionStats() { return m_expr_eval; } StatsSuccessFail &GetFrameVariableStats() { return m_frame_var; } void Reset(Target &target); protected: StatsDuration m_create_time; + StatsDuration m_load_core_time; std::optional m_launch_or_attach_time; std::optional m_first_private_stop_time; std::optional m_first_public_stop_time; diff --git a/lldb/packages/Python/lldbsuite/test/cpu_feature.py b/lldb/packages/Python/lldbsuite/test/cpu_feature.py new file mode 100644 index 0000000000000..d7668c1884e40 --- /dev/null +++ b/lldb/packages/Python/lldbsuite/test/cpu_feature.py @@ -0,0 +1,79 @@ +""" +Platform-agnostic helper to query for CPU features. +""" + +import re + + +class CPUFeature: + def __init__(self, linux_cpu_info_flag: str = None, darwin_sysctl_key: str = None): + self.cpu_info_flag = linux_cpu_info_flag + self.sysctl_key = darwin_sysctl_key + + def __str__(self): + for arch_class in ALL_ARCHS: + for feat_var in dir(arch_class): + if self == getattr(arch_class, feat_var): + return f"{arch_class.__name__}.{feat_var}" + raise AssertionError("unreachable") + + def is_supported(self, triple, cmd_runner): + if re.match(".*-.*-linux", triple): + err_msg, res = self._is_supported_linux(cmd_runner) + elif re.match(".*-apple-.*", triple): + err_msg, res = self._is_supported_darwin(cmd_runner) + else: + err_msg, res = None, False + + if err_msg: + print(f"CPU feature check failed: {err_msg}") + + return res + + def _is_supported_linux(self, cmd_runner): + if not self.cpu_info_flag: + return f"Unspecified cpuinfo flag for {self}", False + + cmd = "cat /proc/cpuinfo" + err, retcode, output = cmd_runner(cmd) + if err.Fail() or retcode != 0: + return output, False + + # Assume that every processor presents the same features. + # Look for the first "Features: ...." line. Features are space separated. + if m := re.search(r"Features\s*: (.*)\n", output): + features = m.group(1).split() + return None, (self.cpu_info_flag in features) + + return 'No "Features:" line found in /proc/cpuinfo', False + + def _is_supported_darwin(self, cmd_runner): + if not self.sysctl_key: + return f"Unspecified sysctl key for {self}", False + + cmd = f"sysctl -n {self.sysctl_key}" + err, retcode, output = cmd_runner(cmd) + if err.Fail() or retcode != 0: + return output, False + + return None, (output.strip() == "1") + + +class AArch64: + FPMR = CPUFeature("fpmr") + GCS = CPUFeature("gcs") + MTE = CPUFeature("mte", "hw.optional.arm.FEAT_MTE4") + MTE_STORE_ONLY = CPUFeature("mtestoreonly") + PTR_AUTH = CPUFeature("paca", "hw.optional.arm.FEAT_PAuth2") + SME = CPUFeature("sme", "hw.optional.arm.FEAT_SME") + SME_FA64 = CPUFeature("smefa64") + SME2 = CPUFeature("sme2", "hw.optional.arm.FEAT_SME2") + SVE = CPUFeature("sve") + + +class Loong: + LASX = CPUFeature("lasx") + LSX = CPUFeature("lsx") + + +ALL_ARCHS = [AArch64, Loong] diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py index 16a58cfc10b9a..454196e1b0264 100644 --- a/lldb/packages/Python/lldbsuite/test/decorators.py +++ b/lldb/packages/Python/lldbsuite/test/decorators.py @@ -27,6 +27,7 @@ from lldbsuite.support import temp_file from lldbsuite.test import lldbplatform from lldbsuite.test import lldbplatformutil +from lldbsuite.test.cpu_feature import CPUFeature class DecorateMode: @@ -1131,24 +1132,13 @@ def skipIfLLVMTargetMissing(target): return unittest.skipIf(not found, "requires " + target) -# Call sysctl on darwin to see if a specified hardware feature is available on this machine. -def skipUnlessFeature(feature): - def is_feature_enabled(): - if platform.system() == "Darwin": - try: - output = subprocess.check_output( - ["/usr/sbin/sysctl", feature], stderr=subprocess.DEVNULL - ).decode("utf-8") - # If 'feature: 1' was output, then this feature is available and - # the test should not be skipped. - if re.match(r"%s: 1\s*" % feature, output): - return None - else: - return "%s is not supported on this system." % feature - except subprocess.CalledProcessError: - return "%s is not supported on this system." % feature +def skipUnlessFeature(cpu_feature: CPUFeature): + def hasFeature(test_case): + if not test_case.isSupported(cpu_feature): + return f"Unsupported CPU feature: {cpu_feature}" + return None - return skipTestIfFn(is_feature_enabled) + return skipTestIfFn(hasFeature) def skipIfBuildType(types: list[str]): diff --git a/lldb/packages/Python/lldbsuite/test/lldbtest.py b/lldb/packages/Python/lldbsuite/test/lldbtest.py index b7077f8d8cc5c..8074922723440 100644 --- a/lldb/packages/Python/lldbsuite/test/lldbtest.py +++ b/lldb/packages/Python/lldbsuite/test/lldbtest.py @@ -48,6 +48,7 @@ # LLDB modules import lldb from . import configuration +from . import cpu_feature from . import decorators from . import lldbplatformutil from . import lldbtest_config @@ -1315,39 +1316,6 @@ def isPPC64le(self): return True return False - def getCPUInfo(self): - triple = self.dbg.GetSelectedPlatform().GetTriple() - - # TODO other platforms, please implement this function - if not re.match(".*-.*-linux", triple): - return "" - - # Need to do something different for non-Linux/Android targets - cpuinfo_path = self.getBuildArtifact("cpuinfo") - if configuration.lldb_platform_name: - self.runCmd( - 'platform get-file "/proc/cpuinfo" ' + cpuinfo_path, check=False - ) - if not self.res.Succeeded(): - if self.TraceOn(): - print( - 'Failed to get /proc/cpuinfo from remote: "{}"'.format( - self.res.GetOutput().strip() - ) - ) - print("All cpuinfo feature checks will fail.") - return "" - else: - cpuinfo_path = "/proc/cpuinfo" - - try: - with open(cpuinfo_path, "r") as f: - cpuinfo = f.read() - except: - return "" - - return cpuinfo - def isAArch64(self): """Returns true if the architecture is AArch64.""" arch = self.getArchitecture().lower() @@ -1360,39 +1328,47 @@ def isARM(self): self.getArchitecture().lower().startswith("arm") ) + def isSupported(self, cpu_feature: cpu_feature.CPUFeature): + triple = self.dbg.GetSelectedPlatform().GetTriple() + cmd_runner = self.run_platform_command + return cpu_feature.is_supported(triple, cmd_runner) + def isAArch64SVE(self): - return self.isAArch64() and "sve" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.SVE) def isAArch64SME(self): - return self.isAArch64() and "sme" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.SME) def isAArch64SME2(self): # If you have sme2, you also have sme. - return self.isAArch64() and "sme2" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.SME2) def isAArch64SMEFA64(self): # smefa64 allows the use of the full A64 instruction set in streaming # mode. This is required by certain test programs to setup register # state. - cpuinfo = self.getCPUInfo() - return self.isAArch64() and "sme" in cpuinfo and "smefa64" in cpuinfo + return ( + self.isAArch64() + and self.isSupported(cpu_feature.AArch64.SME) + and self.isSupported(cpu_feature.AArch64.SME_FA64) + ) def isAArch64MTE(self): - return self.isAArch64() and "mte" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.MTE) def isAArch64MTEStoreOnly(self): - return self.isAArch64() and "mtestoreonly" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.MTE_STORE_ONLY) def isAArch64GCS(self): - return self.isAArch64() and "gcs" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.GCS) def isAArch64PAuth(self): if self.getArchitecture() == "arm64e": return True - return self.isAArch64() and "paca" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.PTR_AUTH) def isAArch64FPMR(self): - return self.isAArch64() and "fpmr" in self.getCPUInfo() + return self.isAArch64() and self.isSupported(cpu_feature.AArch64.FPMR) def isAArch64Windows(self): """Returns true if the architecture is AArch64 and platform windows.""" @@ -1407,10 +1383,10 @@ def isLoongArch(self): return arch in ["loongarch64", "loongarch32"] def isLoongArchLSX(self): - return self.isLoongArch() and "lsx" in self.getCPUInfo() + return self.isLoongArch() and self.isSupported(cpu_feature.Loong.LSX) def isLoongArchLASX(self): - return self.isLoongArch() and "lasx" in self.getCPUInfo() + return self.isLoongArch() and self.isSupported(cpu_feature.Loong.LASX) def isRISCV(self): """Returns true if the architecture is RISCV64 or RISCV32.""" diff --git a/lldb/source/API/CMakeLists.txt b/lldb/source/API/CMakeLists.txt index fdd6b3b077463..ce59ee505cd3d 100644 --- a/lldb/source/API/CMakeLists.txt +++ b/lldb/source/API/CMakeLists.txt @@ -299,6 +299,8 @@ set(generated_public_headers ${LLDB_OBJ_DIR}/include/lldb/API/SBLanguages.h) file(GLOB root_public_headers ${LLDB_SOURCE_DIR}/include/lldb/lldb-*.h) file(GLOB root_private_headers ${LLDB_SOURCE_DIR}/include/lldb/lldb-private*.h) list(REMOVE_ITEM root_public_headers ${root_private_headers}) +# Skip the initial copy of lldb-defines.h. The fixed version is generated at build time. +list(REMOVE_ITEM root_public_headers ${LLDB_SOURCE_DIR}/include/lldb/lldb-defines.h) find_program(unifdef_EXECUTABLE unifdef) diff --git a/lldb/source/API/SBTarget.cpp b/lldb/source/API/SBTarget.cpp index eb56337de3c44..0d03250753802 100644 --- a/lldb/source/API/SBTarget.cpp +++ b/lldb/source/API/SBTarget.cpp @@ -255,6 +255,7 @@ SBProcess SBTarget::LoadCore(const char *core_file, lldb::SBError &error) { ProcessSP process_sp(target_sp->CreateProcess( target_sp->GetDebugger().GetListener(), "", &filespec, false)); if (process_sp) { + ElapsedTime load_core_time(target_sp->GetStatistics().GetLoadCoreTime()); error.SetError(process_sp->LoadCore()); if (error.Success()) sb_process.SetSP(process_sp); diff --git a/lldb/source/Commands/CommandObjectTarget.cpp b/lldb/source/Commands/CommandObjectTarget.cpp index 940be42d1b6e3..c59d02812f328 100644 --- a/lldb/source/Commands/CommandObjectTarget.cpp +++ b/lldb/source/Commands/CommandObjectTarget.cpp @@ -418,7 +418,11 @@ class CommandObjectTargetCreate : public CommandObjectParsed { if (process_sp) { // Seems weird that we Launch a core file, but that is what we // do! - error = process_sp->LoadCore(); + { + ElapsedTime load_core_time( + target_sp->GetStatistics().GetLoadCoreTime()); + error = process_sp->LoadCore(); + } if (error.Fail()) { result.AppendError(error.AsCString("unknown core file format")); diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp index ed674ee1275c7..568cd9d3d03b6 100644 --- a/lldb/source/Core/Debugger.cpp +++ b/lldb/source/Core/Debugger.cpp @@ -253,16 +253,18 @@ Status Debugger::SetPropertyValue(const ExecutionContext *exe_ctx, // Statusline setting changed. If we have a statusline instance, update it // now. Otherwise it will get created in the default event handler. std::lock_guard guard(m_statusline_mutex); - if (StatuslineSupported()) + if (StatuslineSupported()) { m_statusline.emplace(*this); - else + m_statusline->Enable(GetSelectedExecutionContextRef()); + } else { m_statusline.reset(); + } } else if (property_path == g_debugger_properties[ePropertyStatuslineFormat].name || property_path == g_debugger_properties[ePropertySeparator].name) { // Statusline format changed. Redraw the statusline. - RedrawStatusline(); + RedrawStatusline(std::nullopt); } else if (property_path == g_debugger_properties[ePropertyUseSourceCache].name) { // use-source-cache changed. Wipe out the cache contents if it was @@ -501,7 +503,7 @@ FormatEntity::Entry Debugger::GetStatuslineFormat() const { bool Debugger::SetStatuslineFormat(const FormatEntity::Entry &format) { constexpr uint32_t idx = ePropertyStatuslineFormat; bool ret = SetPropertyAtIndex(idx, format); - RedrawStatusline(); + RedrawStatusline(std::nullopt); return ret; } @@ -526,7 +528,7 @@ llvm::StringRef Debugger::GetDisabledAnsiSuffix() const { bool Debugger::SetSeparator(llvm::StringRef s) { constexpr uint32_t idx = ePropertySeparator; bool ret = SetPropertyAtIndex(idx, s); - RedrawStatusline(); + RedrawStatusline(std::nullopt); return ret; } @@ -1210,14 +1212,18 @@ void Debugger::RestoreInputTerminalState() { { std::lock_guard guard(m_statusline_mutex); if (m_statusline) - m_statusline->Enable(); + m_statusline->Enable(GetSelectedExecutionContext()); } } -void Debugger::RedrawStatusline(bool update) { +void Debugger::RedrawStatusline( + std::optional exe_ctx_ref) { std::lock_guard guard(m_statusline_mutex); - if (m_statusline) - m_statusline->Redraw(update); + + if (!m_statusline) + return; + + m_statusline->Redraw(exe_ctx_ref); } ExecutionContext Debugger::GetSelectedExecutionContext() { @@ -1226,6 +1232,13 @@ ExecutionContext Debugger::GetSelectedExecutionContext() { return ExecutionContext(exe_ctx_ref); } +ExecutionContextRef Debugger::GetSelectedExecutionContextRef() { + if (TargetSP selected_target_sp = GetSelectedTarget()) + return ExecutionContextRef(selected_target_sp.get(), + /*adopt_selected=*/true); + return ExecutionContextRef(m_dummy_target_sp.get(), /*adopt_selected=*/false); +} + void Debugger::DispatchInputInterrupt() { std::lock_guard guard(m_io_handler_stack.GetMutex()); IOHandlerSP reader_sp(m_io_handler_stack.Top()); @@ -1941,8 +1954,7 @@ void Debugger::FlushProcessOutput(Process &process, bool flush_stdout, } // This function handles events that were broadcast by the process. -void Debugger::HandleProcessEvent(const EventSP &event_sp) { - using namespace lldb; +ProcessSP Debugger::HandleProcessEvent(const EventSP &event_sp) { const uint32_t event_type = event_sp->GetType(); ProcessSP process_sp = (event_type == Process::eBroadcastBitStructuredData) @@ -2024,23 +2036,24 @@ void Debugger::HandleProcessEvent(const EventSP &event_sp) { if (pop_process_io_handler) process_sp->PopProcessIOHandler(); } + return process_sp; } -void Debugger::HandleThreadEvent(const EventSP &event_sp) { +ThreadSP Debugger::HandleThreadEvent(const EventSP &event_sp) { // At present the only thread event we handle is the Frame Changed event, and // all we do for that is just reprint the thread status for that thread. - using namespace lldb; const uint32_t event_type = event_sp->GetType(); const bool stop_format = true; + ThreadSP thread_sp; if (event_type == Thread::eBroadcastBitStackChanged || event_type == Thread::eBroadcastBitThreadSelected) { - ThreadSP thread_sp( - Thread::ThreadEventData::GetThreadFromEvent(event_sp.get())); + thread_sp = Thread::ThreadEventData::GetThreadFromEvent(event_sp.get()); if (thread_sp) { thread_sp->GetStatus(*GetAsyncOutputStream(), 0, 1, 1, stop_format, /*show_hidden*/ true); } } + return thread_sp; } bool Debugger::IsForwardingEvents() { return (bool)m_forward_listener_sp; } @@ -2068,6 +2081,11 @@ bool Debugger::StatuslineSupported() { return false; } +static bool RequiresFollowChildWorkaround(const Process &process) { + // FIXME: https://github.com/llvm/llvm-project/issues/160216 + return process.GetFollowForkMode() == eFollowChild; +} + lldb::thread_result_t Debugger::DefaultEventHandler() { ListenerSP listener_sp(GetListener()); ConstString broadcaster_class_target(Target::GetStaticBroadcasterClass()); @@ -2109,28 +2127,37 @@ lldb::thread_result_t Debugger::DefaultEventHandler() { if (StatuslineSupported()) { std::lock_guard guard(m_statusline_mutex); - if (!m_statusline) + if (!m_statusline) { m_statusline.emplace(*this); + m_statusline->Enable(GetSelectedExecutionContextRef()); + } } bool done = false; while (!done) { EventSP event_sp; if (listener_sp->GetEvent(event_sp, std::nullopt)) { + std::optional exe_ctx_ref = std::nullopt; if (event_sp) { Broadcaster *broadcaster = event_sp->GetBroadcaster(); if (broadcaster) { uint32_t event_type = event_sp->GetType(); ConstString broadcaster_class(broadcaster->GetBroadcasterClass()); if (broadcaster_class == broadcaster_class_process) { - HandleProcessEvent(event_sp); + if (ProcessSP process_sp = HandleProcessEvent(event_sp)) + if (!RequiresFollowChildWorkaround(*process_sp)) + exe_ctx_ref = ExecutionContextRef(process_sp.get(), + /*adopt_selected=*/true); } else if (broadcaster_class == broadcaster_class_target) { if (Breakpoint::BreakpointEventData::GetEventDataFromEvent( event_sp.get())) { HandleBreakpointEvent(event_sp); } } else if (broadcaster_class == broadcaster_class_thread) { - HandleThreadEvent(event_sp); + if (ThreadSP thread_sp = HandleThreadEvent(event_sp)) + if (!RequiresFollowChildWorkaround(*thread_sp->GetProcess())) + exe_ctx_ref = ExecutionContextRef(thread_sp.get(), + /*adopt_selected=*/true); } else if (broadcaster == m_command_interpreter_up.get()) { if (event_type & CommandInterpreter::eBroadcastBitQuitCommandReceived) { @@ -2168,7 +2195,7 @@ lldb::thread_result_t Debugger::DefaultEventHandler() { if (m_forward_listener_sp) m_forward_listener_sp->AddEvent(event_sp); } - RedrawStatusline(); + RedrawStatusline(exe_ctx_ref); } } diff --git a/lldb/source/Core/IOHandler.cpp b/lldb/source/Core/IOHandler.cpp index f65a1113f3592..57819eeade6e8 100644 --- a/lldb/source/Core/IOHandler.cpp +++ b/lldb/source/Core/IOHandler.cpp @@ -442,7 +442,7 @@ void IOHandlerEditline::AutoCompleteCallback(CompletionRequest &request) { } void IOHandlerEditline::RedrawCallback() { - m_debugger.RedrawStatusline(/*update=*/false); + m_debugger.RedrawStatusline(std::nullopt); } #endif diff --git a/lldb/source/Core/Mangled.cpp b/lldb/source/Core/Mangled.cpp index 91b9c0007617d..0780846b0ed60 100644 --- a/lldb/source/Core/Mangled.cpp +++ b/lldb/source/Core/Mangled.cpp @@ -40,7 +40,7 @@ bool Mangled::IsMangledName(llvm::StringRef name) { return Mangled::GetManglingScheme(name) != Mangled::eManglingSchemeNone; } -Mangled::ManglingScheme Mangled::GetManglingScheme(llvm::StringRef const name) { +Mangled::ManglingScheme Mangled::GetManglingScheme(llvm::StringRef name) { if (name.empty()) return Mangled::eManglingSchemeNone; diff --git a/lldb/source/Core/Statusline.cpp b/lldb/source/Core/Statusline.cpp index 393d427241021..bfbd190fba27c 100644 --- a/lldb/source/Core/Statusline.cpp +++ b/lldb/source/Core/Statusline.cpp @@ -35,9 +35,7 @@ using namespace lldb_private; Statusline::Statusline(Debugger &debugger) : m_debugger(debugger), m_terminal_width(m_debugger.GetTerminalWidth()), - m_terminal_height(m_debugger.GetTerminalHeight()) { - Enable(); -} + m_terminal_height(m_debugger.GetTerminalHeight()) {} Statusline::~Statusline() { Disable(); } @@ -47,16 +45,16 @@ void Statusline::TerminalSizeChanged() { UpdateScrollWindow(ResizeStatusline); - // Draw the old statusline. - Redraw(/*update=*/false); + // Redraw the old statusline. + Redraw(std::nullopt); } -void Statusline::Enable() { +void Statusline::Enable(std::optional exe_ctx_ref) { // Reduce the scroll window to make space for the status bar below. UpdateScrollWindow(EnableStatusline); // Draw the statusline. - Redraw(/*update=*/true); + Redraw(exe_ctx_ref); } void Statusline::Disable() { @@ -69,8 +67,6 @@ void Statusline::Draw(std::string str) { if (!stream_sp) return; - m_last_str = str; - str = ansi::TrimAndPad(str, m_terminal_width); LockedStreamFile locked_stream = stream_sp->Lock(); @@ -127,33 +123,32 @@ void Statusline::UpdateScrollWindow(ScrollWindowMode mode) { m_debugger.RefreshIOHandler(); } -void Statusline::Redraw(bool update) { - if (!update) { - Draw(m_last_str); - return; - } - - ExecutionContext exe_ctx = m_debugger.GetSelectedExecutionContext(); - - // For colors and progress events, the format entity needs access to the - // debugger, which requires a target in the execution context. - if (!exe_ctx.HasTargetScope()) - exe_ctx.SetTargetPtr(&m_debugger.GetSelectedOrDummyTarget()); - - SymbolContext symbol_ctx; - if (ProcessSP process_sp = exe_ctx.GetProcessSP()) { - // Check if the process is stopped, and if it is, make sure it remains - // stopped until we've computed the symbol context. - Process::StopLocker stop_locker; - if (stop_locker.TryLock(&process_sp->GetRunLock())) { - if (auto frame_sp = exe_ctx.GetFrameSP()) - symbol_ctx = frame_sp->GetSymbolContext(eSymbolContextEverything); - } +void Statusline::Redraw(std::optional exe_ctx_ref) { + // Update the cached execution context. + if (exe_ctx_ref) + m_exe_ctx_ref = *exe_ctx_ref; + + // Lock the execution context. + ExecutionContext exe_ctx = + m_exe_ctx_ref.Lock(/*thread_and_frame_only_if_stopped=*/false); + + // Compute the symbol context if we're stopped. + SymbolContext sym_ctx; + llvm::Expected stopped_exe_ctx = + GetStoppedExecutionContext(&m_exe_ctx_ref); + if (stopped_exe_ctx) { + // The StoppedExecutionContext only ensures that we hold the run lock. + // The process could be in an exited or unloaded state and have no frame. + if (auto frame_sp = stopped_exe_ctx->GetFrameSP()) + sym_ctx = frame_sp->GetSymbolContext(eSymbolContextEverything); + } else { + // We can draw the statusline without being stopped. + llvm::consumeError(stopped_exe_ctx.takeError()); } StreamString stream; FormatEntity::Entry format = m_debugger.GetStatuslineFormat(); - FormatEntity::Format(format, stream, &symbol_ctx, &exe_ctx, nullptr, nullptr, + FormatEntity::Format(format, stream, &sym_ctx, &exe_ctx, nullptr, nullptr, false, false); Draw(stream.GetString().str()); diff --git a/lldb/source/Expression/IRExecutionUnit.cpp b/lldb/source/Expression/IRExecutionUnit.cpp index 25d4a87b89ef2..60b9de0d21b2e 100644 --- a/lldb/source/Expression/IRExecutionUnit.cpp +++ b/lldb/source/Expression/IRExecutionUnit.cpp @@ -751,7 +751,12 @@ ResolveFunctionCallLabel(FunctionCallLabel &label, sc_list.Append(*sc_or_err); LoadAddressResolver resolver(*sc.target_sp, symbol_was_missing_weak); - return resolver.Resolve(sc_list).value_or(LLDB_INVALID_ADDRESS); + lldb::addr_t resolved_addr = + resolver.Resolve(sc_list).value_or(LLDB_INVALID_ADDRESS); + if (resolved_addr == LLDB_INVALID_ADDRESS) + return llvm::createStringError("couldn't resolve address for function"); + + return resolved_addr; } lldb::addr_t diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp index 924953cc43fa2..3c49c911108a3 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp @@ -792,7 +792,7 @@ ClangExpressionParser::ClangExpressionParser( // 6. Set up the source management objects inside the compiler m_compiler->createFileManager(); if (!m_compiler->hasSourceManager()) - m_compiler->createSourceManager(m_compiler->getFileManager()); + m_compiler->createSourceManager(); m_compiler->createPreprocessor(TU_Complete); switch (expr.Language().AsLanguageType()) { diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h b/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h index 4fe727460fdb9..dcba0d9c34962 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h +++ b/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h @@ -19,8 +19,8 @@ class ModuleDependencyCollectorAdaptor public: ModuleDependencyCollectorAdaptor( std::shared_ptr file_collector) - : clang::ModuleDependencyCollector(""), m_file_collector(file_collector) { - } + : clang::ModuleDependencyCollector("", llvm::vfs::getRealFileSystem()), + m_file_collector(file_collector) {} void addFile(llvm::StringRef Filename, llvm::StringRef FileDst = {}) override { diff --git a/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp b/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp index 20661290ca4c6..5c1b7d4943b3f 100644 --- a/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp +++ b/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp @@ -804,7 +804,7 @@ class Executor { return transformOptional( inst.rs1.ReadI64(m_emu), [&](int64_t rs1) { - int64_t result = rs1 + int64_t(SignExt(inst.imm)); + uint64_t result = rs1 + uint64_t(SignExt(inst.imm)); // Check if this is a stack pointer adjustment. if (inst.rd.rd == RISCV_GPR_SP && inst.rs1.rs == RISCV_GPR_SP) { diff --git a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp index 4e8a430af8c6c..a2199cb65cd35 100644 --- a/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp +++ b/lldb/source/Plugins/Language/CPlusPlus/CPlusPlusLanguage.cpp @@ -104,10 +104,10 @@ CPlusPlusLanguage::GetFunctionNameInfo(ConstString name) const { } bool CPlusPlusLanguage::SymbolNameFitsToLanguage(Mangled mangled) const { - const char *mangled_name = mangled.GetMangledName().GetCString(); - auto mangling_scheme = Mangled::GetManglingScheme(mangled_name); - return mangled_name && (mangling_scheme == Mangled::eManglingSchemeItanium || - mangling_scheme == Mangled::eManglingSchemeMSVC); + auto mangling_scheme = + Mangled::GetManglingScheme(mangled.GetMangledName().GetStringRef()); + return mangling_scheme == Mangled::eManglingSchemeItanium || + mangling_scheme == Mangled::eManglingSchemeMSVC; } ConstString CPlusPlusLanguage::GetDemangledFunctionNameWithoutArguments( diff --git a/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp b/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp index 931baf5927a04..097c91b623e8f 100644 --- a/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp +++ b/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp @@ -826,6 +826,24 @@ bool ObjectFileELF::ParseHeader() { } UUID ObjectFileELF::GetUUID() { + if (m_uuid) + return m_uuid; + + // Try loading note info from any PT_NOTE program headers. This is more + // friendly to ELF files that have no section headers, like ELF files that + // are loaded from memory. + for (const ELFProgramHeader &H : ProgramHeaders()) { + if (H.p_type == llvm::ELF::PT_NOTE) { + DataExtractor note_data = GetSegmentData(H); + if (note_data.GetByteSize()) { + lldb_private::ArchSpec arch_spec; + RefineModuleDetailsFromNote(note_data, arch_spec, m_uuid); + if (m_uuid) + return m_uuid; + } + } + } + // Need to parse the section list to get the UUIDs, so make sure that's been // done. if (!ParseSectionHeaders() && GetType() != ObjectFile::eTypeCoreFile) diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp index fada1fda2b4bc..9cdb8467bfc60 100644 --- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp +++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp @@ -2067,6 +2067,43 @@ static bool ParseTrieEntries(DataExtractor &data, lldb::offset_t offset, return true; } +static bool +TryParseV2ObjCMetadataSymbol(const char *&symbol_name, + const char *&symbol_name_non_abi_mangled, + SymbolType &type) { + static constexpr llvm::StringLiteral g_objc_v2_prefix_class("_OBJC_CLASS_$_"); + static constexpr llvm::StringLiteral g_objc_v2_prefix_metaclass( + "_OBJC_METACLASS_$_"); + static constexpr llvm::StringLiteral g_objc_v2_prefix_ivar("_OBJC_IVAR_$_"); + + llvm::StringRef symbol_name_ref(symbol_name); + if (symbol_name_ref.empty()) + return false; + + if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = symbol_name + g_objc_v2_prefix_class.size(); + type = eSymbolTypeObjCClass; + return true; + } + + if (symbol_name_ref.starts_with(g_objc_v2_prefix_metaclass)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = symbol_name + g_objc_v2_prefix_metaclass.size(); + type = eSymbolTypeObjCMetaClass; + return true; + } + + if (symbol_name_ref.starts_with(g_objc_v2_prefix_ivar)) { + symbol_name_non_abi_mangled = symbol_name + 1; + symbol_name = symbol_name + g_objc_v2_prefix_ivar.size(); + type = eSymbolTypeObjCIVar; + return true; + } + + return false; +} + static SymbolType GetSymbolType(const char *&symbol_name, bool &demangled_is_synthesized, const SectionSP &text_section_sp, @@ -2183,9 +2220,6 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { lldb::offset_t offset = MachHeaderSizeFromMagic(m_header.magic); uint32_t i; FileSpecList dylib_files; - llvm::StringRef g_objc_v2_prefix_class("_OBJC_CLASS_$_"); - llvm::StringRef g_objc_v2_prefix_metaclass("_OBJC_METACLASS_$_"); - llvm::StringRef g_objc_v2_prefix_ivar("_OBJC_IVAR_$_"); UUID image_uuid; for (i = 0; i < m_header.ncmds; ++i) { @@ -2805,36 +2839,15 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { is_gsym = true; sym[sym_idx].SetExternal(true); - if (symbol_name && symbol_name[0] == '_' && - symbol_name[1] == 'O') { - llvm::StringRef symbol_name_ref(symbol_name); - if (symbol_name_ref.starts_with( - g_objc_v2_prefix_class)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_class.size(); - type = eSymbolTypeObjCClass; - demangled_is_synthesized = true; - - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_metaclass)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_metaclass.size(); - type = eSymbolTypeObjCMetaClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_ivar)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_ivar.size(); - type = eSymbolTypeObjCIVar; - demangled_is_synthesized = true; - } + if (TryParseV2ObjCMetadataSymbol( + symbol_name, symbol_name_non_abi_mangled, + type)) { + demangled_is_synthesized = true; } else { if (nlist.n_value != 0) symbol_section = section_info.GetSection( nlist.n_sect, nlist.n_value); + type = eSymbolTypeData; } break; @@ -3320,48 +3333,10 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { symbol_sect_name) { type = eSymbolTypeRuntime; - if (symbol_name) { - llvm::StringRef symbol_name_ref(symbol_name); - if (symbol_name_ref.starts_with("_OBJC_")) { - llvm::StringRef - g_objc_v2_prefix_class( - "_OBJC_CLASS_$_"); - llvm::StringRef - g_objc_v2_prefix_metaclass( - "_OBJC_METACLASS_$_"); - llvm::StringRef - g_objc_v2_prefix_ivar("_OBJC_IVAR_$_"); - if (symbol_name_ref.starts_with( - g_objc_v2_prefix_class)) { - symbol_name_non_abi_mangled = - symbol_name + 1; - symbol_name = - symbol_name + - g_objc_v2_prefix_class.size(); - type = eSymbolTypeObjCClass; - demangled_is_synthesized = true; - } else if ( - symbol_name_ref.starts_with( - g_objc_v2_prefix_metaclass)) { - symbol_name_non_abi_mangled = - symbol_name + 1; - symbol_name = - symbol_name + - g_objc_v2_prefix_metaclass.size(); - type = eSymbolTypeObjCMetaClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_ivar)) { - symbol_name_non_abi_mangled = - symbol_name + 1; - symbol_name = - symbol_name + - g_objc_v2_prefix_ivar.size(); - type = eSymbolTypeObjCIVar; - demangled_is_synthesized = true; - } - } - } + if (TryParseV2ObjCMetadataSymbol( + symbol_name, + symbol_name_non_abi_mangled, type)) + demangled_is_synthesized = true; } else if (symbol_sect_name && ::strstr(symbol_sect_name, "__gcc_except_tab") == @@ -3652,7 +3627,7 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { if (is_debug) { switch (nlist.n_type) { - case N_GSYM: + case N_GSYM: { // global symbol: name,,NO_SECT,type,0 // Sometimes the N_GSYM value contains the address. @@ -3668,33 +3643,17 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { is_gsym = true; sym[sym_idx].SetExternal(true); - if (symbol_name && symbol_name[0] == '_' && symbol_name[1] == 'O') { - llvm::StringRef symbol_name_ref(symbol_name); - if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = symbol_name + g_objc_v2_prefix_class.size(); - type = eSymbolTypeObjCClass; - demangled_is_synthesized = true; - - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_metaclass)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = symbol_name + g_objc_v2_prefix_metaclass.size(); - type = eSymbolTypeObjCMetaClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with(g_objc_v2_prefix_ivar)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = symbol_name + g_objc_v2_prefix_ivar.size(); - type = eSymbolTypeObjCIVar; - demangled_is_synthesized = true; - } + if (TryParseV2ObjCMetadataSymbol(symbol_name, + symbol_name_non_abi_mangled, type)) { + demangled_is_synthesized = true; } else { if (nlist.n_value != 0) symbol_section = section_info.GetSection(nlist.n_sect, nlist.n_value); + type = eSymbolTypeData; } - break; + } break; case N_FNAME: // procedure name (f77 kludge): name,,NO_SECT,0,0 @@ -4130,38 +4089,9 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) { ::strstr(symbol_sect_name, "__objc") == symbol_sect_name) { type = eSymbolTypeRuntime; - if (symbol_name) { - llvm::StringRef symbol_name_ref(symbol_name); - if (symbol_name_ref.starts_with("_OBJC_")) { - llvm::StringRef g_objc_v2_prefix_class( - "_OBJC_CLASS_$_"); - llvm::StringRef g_objc_v2_prefix_metaclass( - "_OBJC_METACLASS_$_"); - llvm::StringRef g_objc_v2_prefix_ivar( - "_OBJC_IVAR_$_"); - if (symbol_name_ref.starts_with(g_objc_v2_prefix_class)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_class.size(); - type = eSymbolTypeObjCClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_metaclass)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_metaclass.size(); - type = eSymbolTypeObjCMetaClass; - demangled_is_synthesized = true; - } else if (symbol_name_ref.starts_with( - g_objc_v2_prefix_ivar)) { - symbol_name_non_abi_mangled = symbol_name + 1; - symbol_name = - symbol_name + g_objc_v2_prefix_ivar.size(); - type = eSymbolTypeObjCIVar; - demangled_is_synthesized = true; - } - } - } + if (TryParseV2ObjCMetadataSymbol( + symbol_name, symbol_name_non_abi_mangled, type)) + demangled_is_synthesized = true; } else if (symbol_sect_name && ::strstr(symbol_sect_name, "__gcc_except_tab") == symbol_sect_name) { diff --git a/lldb/source/Plugins/Platform/Android/AdbClient.cpp b/lldb/source/Plugins/Platform/Android/AdbClient.cpp index a179260ca15f6..0fbb48a2e16a0 100644 --- a/lldb/source/Plugins/Platform/Android/AdbClient.cpp +++ b/lldb/source/Plugins/Platform/Android/AdbClient.cpp @@ -8,61 +8,48 @@ #include "AdbClient.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/Support/FileUtilities.h" - #include "lldb/Host/ConnectionFileDescriptor.h" #include "lldb/Host/FileSystem.h" -#include "lldb/Host/PosixApi.h" -#include "lldb/Utility/DataBuffer.h" -#include "lldb/Utility/DataBufferHeap.h" +#include "lldb/Utility/Connection.h" #include "lldb/Utility/DataEncoder.h" #include "lldb/Utility/DataExtractor.h" #include "lldb/Utility/FileSpec.h" +#include "lldb/Utility/LLDBLog.h" +#include "lldb/Utility/Log.h" +#include "lldb/Utility/Status.h" #include "lldb/Utility/StreamString.h" #include "lldb/Utility/Timeout.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/FileUtilities.h" +#include #include - -#include #include #include #include -// On Windows, transitive dependencies pull in , which defines a -// macro that clashes with a method name. -#ifdef SendMessage -#undef SendMessage -#endif - using namespace lldb; using namespace lldb_private; using namespace lldb_private::platform_android; using namespace std::chrono; +using namespace llvm; -static const seconds kReadTimeout(20); +static const char *kSocketNamespaceAbstract = "localabstract"; +static const char *kSocketNamespaceFileSystem = "localfilesystem"; +const seconds kReadTimeout(20); static const char *kOKAY = "OKAY"; static const char *kFAIL = "FAIL"; static const char *kDATA = "DATA"; static const char *kDONE = "DONE"; - static const char *kSEND = "SEND"; static const char *kRECV = "RECV"; static const char *kSTAT = "STAT"; - static const size_t kSyncPacketLen = 8; -// Maximum size of a filesync DATA packet. static const size_t kMaxPushData = 2 * 1024; -// Default mode for pushed files. -static const uint32_t kDefaultMode = 0100770; // S_IFREG | S_IRWXU | S_IRWXG - -static const char *kSocketNamespaceAbstract = "localabstract"; -static const char *kSocketNamespaceFileSystem = "localfilesystem"; +static const uint32_t kDefaultMode = 0100770; static Status ReadAllBytes(Connection &conn, void *buffer, size_t size) { - Status error; ConnectionStatus status; char *read_buffer = static_cast(buffer); @@ -85,86 +72,215 @@ static Status ReadAllBytes(Connection &conn, void *buffer, size_t size) { error = Status::FromErrorStringWithFormat( "Unable to read requested number of bytes. Connection status: %d.", status); + return error; } -Status AdbClient::CreateByDeviceID(const std::string &device_id, - AdbClient &adb) { - Status error; - std::string android_serial; - if (!device_id.empty()) - android_serial = device_id; - else if (const char *env_serial = std::getenv("ANDROID_SERIAL")) - android_serial = env_serial; +static Status ReadAdbMessage(Connection &conn, std::vector &message) { + message.clear(); - if (android_serial.empty()) { - DeviceIDList connected_devices; - error = adb.GetDevices(connected_devices); - if (error.Fail()) - return error; + char buffer[5]; + buffer[4] = 0; + + auto error = ReadAllBytes(conn, buffer, 4); + if (error.Fail()) + return error; + + unsigned int packet_len = 0; + sscanf(buffer, "%x", &packet_len); + + message.resize(packet_len, 0); + error = ReadAllBytes(conn, &message[0], packet_len); + if (error.Fail()) + message.clear(); - if (connected_devices.size() != 1) - return Status::FromErrorStringWithFormat( - "Expected a single connected device, got instead %zu - try " - "setting 'ANDROID_SERIAL'", - connected_devices.size()); - adb.SetDeviceID(connected_devices.front()); - } else { - adb.SetDeviceID(android_serial); - } return error; } -AdbClient::AdbClient() = default; - -AdbClient::AdbClient(const std::string &device_id) : m_device_id(device_id) {} +static Status GetResponseError(Connection &conn, const char *response_id) { + if (strcmp(response_id, kFAIL) != 0) + return Status::FromErrorStringWithFormat( + "Got unexpected response id from adb: \"%s\"", response_id); -AdbClient::~AdbClient() = default; + std::vector error_message; + auto error = ReadAdbMessage(conn, error_message); + if (!error.Success()) + return error; -void AdbClient::SetDeviceID(const std::string &device_id) { - m_device_id = device_id; + std::string error_str(&error_message[0], error_message.size()); + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, "ADB error: %s", error_str.c_str()); + return Status(error_str); } -const std::string &AdbClient::GetDeviceID() const { return m_device_id; } +static Status ReadResponseStatus(Connection &conn) { + char response_id[5]; -Status AdbClient::Connect() { + const size_t packet_len = 4; + response_id[packet_len] = 0; + + auto error = ReadAllBytes(conn, response_id, packet_len); + if (error.Fail()) + return error; + + if (strncmp(response_id, kOKAY, packet_len) != 0) + return GetResponseError(conn, response_id); + + return error; +} + +static Status SendAdbMessage(Connection &conn, llvm::StringRef packet) { Status error; - m_conn = std::make_unique(); + + char length_buffer[5]; + snprintf(length_buffer, sizeof(length_buffer), "%04x", + static_cast(packet.size())); + + ConnectionStatus status; + + conn.Write(length_buffer, 4, status, &error); + if (error.Fail()) + return error; + + conn.Write(packet.str().c_str(), packet.size(), status, &error); + return error; +} + +static Status ConnectToAdb(Connection &conn) { std::string port = "5037"; - if (const char *env_port = std::getenv("ANDROID_ADB_SERVER_PORT")) { + if (const char *env_port = std::getenv("ANDROID_ADB_SERVER_PORT")) port = env_port; - } std::string uri = "connect://127.0.0.1:" + port; - m_conn->Connect(uri.c_str(), &error); + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, "Connecting to ADB server at %s", uri.c_str()); + + Status error; + conn.Connect(uri.c_str(), &error); return error; } -Status AdbClient::GetDevices(DeviceIDList &device_list) { - device_list.clear(); - - auto error = SendMessage("host:devices"); +static Status EnterSyncMode(Connection &conn) { + auto error = SendAdbMessage(conn, "sync:"); if (error.Fail()) return error; - error = ReadResponseStatus(); + return ReadResponseStatus(conn); +} + +static Status SelectTargetDevice(Connection &conn, llvm::StringRef device_id) { + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOG(log, "Selecting device: {0}", device_id); + + std::ostringstream msg; + msg << "host:transport:" << device_id.str(); + + auto error = SendAdbMessage(conn, msg.str()); if (error.Fail()) return error; - std::vector in_buffer; - error = ReadMessage(in_buffer); + return ReadResponseStatus(conn); +} - llvm::StringRef response(&in_buffer[0], in_buffer.size()); - llvm::SmallVector devices; - response.split(devices, "\n", -1, false); +Expected AdbClient::ResolveDeviceID(StringRef device_id) { + StringRef preferred_serial; + if (!device_id.empty()) + preferred_serial = device_id; + else if (const char *env_serial = std::getenv("ANDROID_SERIAL")) + preferred_serial = env_serial; - for (const auto &device : devices) - device_list.push_back(std::string(device.split('\t').first)); + if (preferred_serial.empty()) { + DeviceIDList connected_devices; - // Force disconnect since ADB closes connection after host:devices response - // is sent. - m_conn.reset(); - return error; + auto GetDevices = [](DeviceIDList &device_list) -> Status { + device_list.clear(); + + // Create temporary ADB client for this operation only + auto temp_conn = std::make_unique(); + auto error = ConnectToAdb(*temp_conn); + if (error.Fail()) + return error; + + // NOTE: ADB closes the connection after host:devices response. + // The connection is no longer valid + error = SendAdbMessage(*temp_conn, "host:devices"); + if (error.Fail()) + return error; + + error = ReadResponseStatus(*temp_conn); + if (error.Fail()) + return error; + + std::vector in_buffer; + error = ReadAdbMessage(*temp_conn, in_buffer); + + StringRef response(&in_buffer[0], in_buffer.size()); + SmallVector devices; + response.split(devices, "\n", -1, false); + + for (const auto &device : devices) + device_list.push_back(std::string(device.split('\t').first)); + return error; + }; + + Status error = GetDevices(connected_devices); + if (error.Fail()) + return error.ToError(); + + if (connected_devices.size() != 1) + return createStringError( + inconvertibleErrorCode(), + "Expected a single connected device, got instead %zu - try " + "setting 'ANDROID_SERIAL'", + connected_devices.size()); + + std::string resolved_device_id = std::move(connected_devices.front()); + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, "AdbClient::ResolveDeviceID Resolved device ID: %s", + resolved_device_id.c_str()); + return resolved_device_id; + } + + std::string resolved_device_id = preferred_serial.str(); + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, "AdbClient::ResolveDeviceID Resolved device ID: %s", + resolved_device_id.c_str()); + return resolved_device_id; +} + +AdbClient::AdbClient(llvm::StringRef device_id) : m_device_id(device_id) { + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, + "AdbClient::AdbClient(device_id='%s') - Creating AdbClient with " + "device ID", + device_id.str().c_str()); + m_conn = std::make_unique(); + Connect(); +} + +AdbClient::AdbClient() { + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF( + log, + "AdbClient::AdbClient() - Creating AdbClient with default constructor"); + m_conn = std::make_unique(); + Connect(); +} + +AdbClient::~AdbClient() { + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, + "AdbClient::~AdbClient() - Destroying AdbClient for device: %s", + m_device_id.c_str()); +} + +llvm::StringRef AdbClient::GetDeviceID() const { return m_device_id; } + +Status AdbClient::Connect() { + if (m_conn->IsConnected()) + return Status(); + + return ConnectToAdb(*m_conn); } Status AdbClient::SetPortForwarding(const uint16_t local_port, @@ -177,7 +293,7 @@ Status AdbClient::SetPortForwarding(const uint16_t local_port, if (error.Fail()) return error; - return ReadResponseStatus(); + return ReadResponseStatus(*m_conn); } Status @@ -196,7 +312,7 @@ AdbClient::SetPortForwarding(const uint16_t local_port, if (error.Fail()) return error; - return ReadResponseStatus(); + return ReadResponseStatus(*m_conn); } Status AdbClient::DeletePortForwarding(const uint16_t local_port) { @@ -207,56 +323,13 @@ Status AdbClient::DeletePortForwarding(const uint16_t local_port) { if (error.Fail()) return error; - return ReadResponseStatus(); -} - -Status AdbClient::SendMessage(const std::string &packet, const bool reconnect) { - Status error; - if (!m_conn || reconnect) { - error = Connect(); - if (error.Fail()) - return error; - } - - char length_buffer[5]; - snprintf(length_buffer, sizeof(length_buffer), "%04x", - static_cast(packet.size())); - - ConnectionStatus status; - - m_conn->Write(length_buffer, 4, status, &error); - if (error.Fail()) - return error; - - m_conn->Write(packet.c_str(), packet.size(), status, &error); - return error; + return ReadResponseStatus(*m_conn); } -Status AdbClient::SendDeviceMessage(const std::string &packet) { +Status AdbClient::SendDeviceMessage(llvm::StringRef packet) { std::ostringstream msg; - msg << "host-serial:" << m_device_id << ":" << packet; - return SendMessage(msg.str()); -} - -Status AdbClient::ReadMessage(std::vector &message) { - message.clear(); - - char buffer[5]; - buffer[4] = 0; - - auto error = ReadAllBytes(buffer, 4); - if (error.Fail()) - return error; - - unsigned int packet_len = 0; - sscanf(buffer, "%x", &packet_len); - - message.resize(packet_len, 0); - error = ReadAllBytes(&message[0], packet_len); - if (error.Fail()) - message.clear(); - - return error; + msg << "host-serial:" << m_device_id << ":" << packet.str(); + return SendAdbMessage(*m_conn, msg.str()); } Status AdbClient::ReadMessageStream(std::vector &message, @@ -264,6 +337,9 @@ Status AdbClient::ReadMessageStream(std::vector &message, auto start = steady_clock::now(); message.clear(); + if (!m_conn) + return Status::FromErrorString("No connection available"); + Status error; lldb::ConnectionStatus status = lldb::eConnectionStatusSuccess; char buffer[1024]; @@ -282,87 +358,22 @@ Status AdbClient::ReadMessageStream(std::vector &message, return error; } -Status AdbClient::ReadResponseStatus() { - char response_id[5]; - - static const size_t packet_len = 4; - response_id[packet_len] = 0; - - auto error = ReadAllBytes(response_id, packet_len); - if (error.Fail()) - return error; - - if (strncmp(response_id, kOKAY, packet_len) != 0) - return GetResponseError(response_id); - - return error; -} - -Status AdbClient::GetResponseError(const char *response_id) { - if (strcmp(response_id, kFAIL) != 0) - return Status::FromErrorStringWithFormat( - "Got unexpected response id from adb: \"%s\"", response_id); - - std::vector error_message; - auto error = ReadMessage(error_message); - if (!error.Success()) - return error; - return Status(std::string(&error_message[0], error_message.size())); -} - -Status AdbClient::SwitchDeviceTransport() { - std::ostringstream msg; - msg << "host:transport:" << m_device_id; - - auto error = SendMessage(msg.str()); - if (error.Fail()) - return error; - - return ReadResponseStatus(); -} - -Status AdbClient::StartSync() { - auto error = SwitchDeviceTransport(); - if (error.Fail()) - return Status::FromErrorStringWithFormat( - "Failed to switch to device transport: %s", error.AsCString()); - - error = Sync(); - if (error.Fail()) - return Status::FromErrorStringWithFormat("Sync failed: %s", - error.AsCString()); - - return error; -} - -Status AdbClient::Sync() { - auto error = SendMessage("sync:", false); - if (error.Fail()) - return error; - - return ReadResponseStatus(); -} - -Status AdbClient::ReadAllBytes(void *buffer, size_t size) { - return ::ReadAllBytes(*m_conn, buffer, size); -} - Status AdbClient::internalShell(const char *command, milliseconds timeout, std::vector &output_buf) { output_buf.clear(); - auto error = SwitchDeviceTransport(); + auto error = SelectTargetDevice(*m_conn, m_device_id); if (error.Fail()) return Status::FromErrorStringWithFormat( - "Failed to switch to device transport: %s", error.AsCString()); + "Failed to select target device: %s", error.AsCString()); StreamString adb_command; adb_command.Printf("shell:%s", command); - error = SendMessage(std::string(adb_command.GetString()), false); + error = SendAdbMessage(*m_conn, std::string(adb_command.GetString())); if (error.Fail()) return error; - error = ReadResponseStatus(); + error = ReadResponseStatus(*m_conn); if (error.Fail()) return error; @@ -417,18 +428,8 @@ Status AdbClient::ShellToFile(const char *command, milliseconds timeout, return Status(); } -std::unique_ptr -AdbClient::GetSyncService(Status &error) { - std::unique_ptr sync_service; - error = StartSync(); - if (error.Success()) - sync_service.reset(new SyncService(std::move(m_conn))); - - return sync_service; -} - -Status AdbClient::SyncService::internalPullFile(const FileSpec &remote_file, - const FileSpec &local_file) { +Status AdbSyncService::PullFileImpl(const FileSpec &remote_file, + const FileSpec &local_file) { const auto local_file_path = local_file.GetPath(); llvm::FileRemover local_file_remover(local_file_path); @@ -462,8 +463,8 @@ Status AdbClient::SyncService::internalPullFile(const FileSpec &remote_file, return error; } -Status AdbClient::SyncService::internalPushFile(const FileSpec &local_file, - const FileSpec &remote_file) { +Status AdbSyncService::PushFileImpl(const FileSpec &local_file, + const FileSpec &remote_file) { const auto local_file_path(local_file.GetPath()); std::ifstream src(local_file_path.c_str(), std::ios::in | std::ios::binary); if (!src.is_open()) @@ -487,7 +488,9 @@ Status AdbClient::SyncService::internalPushFile(const FileSpec &local_file, error.AsCString()); } error = SendSyncRequest( - kDONE, llvm::sys::toTimeT(FileSystem::Instance().GetModificationTime(local_file)), + kDONE, + llvm::sys::toTimeT( + FileSystem::Instance().GetModificationTime(local_file)), nullptr); if (error.Fail()) return error; @@ -500,7 +503,7 @@ Status AdbClient::SyncService::internalPushFile(const FileSpec &local_file, error.AsCString()); if (response_id == kFAIL) { std::string error_message(data_len, 0); - error = ReadAllBytes(&error_message[0], data_len); + error = ReadAllBytes(*m_conn, &error_message[0], data_len); if (error.Fail()) return Status::FromErrorStringWithFormat( "Failed to read DONE error message: %s", error.AsCString()); @@ -518,9 +521,8 @@ Status AdbClient::SyncService::internalPushFile(const FileSpec &local_file, return error; } -Status AdbClient::SyncService::internalStat(const FileSpec &remote_file, - uint32_t &mode, uint32_t &size, - uint32_t &mtime) { +Status AdbSyncService::StatImpl(const FileSpec &remote_file, uint32_t &mode, + uint32_t &size, uint32_t &mtime) { const std::string remote_file_path(remote_file.GetPath(false)); auto error = SendSyncRequest(kSTAT, remote_file_path.length(), remote_file_path.c_str()); @@ -532,7 +534,7 @@ Status AdbClient::SyncService::internalStat(const FileSpec &remote_file, static const size_t response_len = stat_len + (sizeof(uint32_t) * 3); std::vector buffer(response_len); - error = ReadAllBytes(&buffer[0], buffer.size()); + error = ReadAllBytes(*m_conn, &buffer[0], buffer.size()); if (error.Fail()) return Status::FromErrorStringWithFormat("Failed to read response: %s", error.AsCString()); @@ -555,51 +557,57 @@ Status AdbClient::SyncService::internalStat(const FileSpec &remote_file, return Status(); } -Status AdbClient::SyncService::PullFile(const FileSpec &remote_file, - const FileSpec &local_file) { - return executeCommand([this, &remote_file, &local_file]() { - return internalPullFile(remote_file, local_file); +Status AdbSyncService::PullFile(const FileSpec &remote_file, + const FileSpec &local_file) { + return ExecuteCommand([this, &remote_file, &local_file]() { + return PullFileImpl(remote_file, local_file); }); } -Status AdbClient::SyncService::PushFile(const FileSpec &local_file, - const FileSpec &remote_file) { - return executeCommand([this, &local_file, &remote_file]() { - return internalPushFile(local_file, remote_file); +Status AdbSyncService::PushFile(const FileSpec &local_file, + const FileSpec &remote_file) { + return ExecuteCommand([this, &local_file, &remote_file]() { + return PushFileImpl(local_file, remote_file); }); } -Status AdbClient::SyncService::Stat(const FileSpec &remote_file, uint32_t &mode, - uint32_t &size, uint32_t &mtime) { - return executeCommand([this, &remote_file, &mode, &size, &mtime]() { - return internalStat(remote_file, mode, size, mtime); +Status AdbSyncService::Stat(const FileSpec &remote_file, uint32_t &mode, + uint32_t &size, uint32_t &mtime) { + return ExecuteCommand([this, &remote_file, &mode, &size, &mtime]() { + return StatImpl(remote_file, mode, size, mtime); }); } -bool AdbClient::SyncService::IsConnected() const { +bool AdbSyncService::IsConnected() const { return m_conn && m_conn->IsConnected(); } -AdbClient::SyncService::SyncService(std::unique_ptr &&conn) - : m_conn(std::move(conn)) {} - -Status -AdbClient::SyncService::executeCommand(const std::function &cmd) { - if (!m_conn) - return Status::FromErrorString("SyncService is disconnected"); +AdbSyncService::AdbSyncService(const std::string device_id) + : m_device_id(device_id) { + m_conn = std::make_unique(); + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, + "AdbSyncService::AdbSyncService() - Creating AdbSyncService for " + "device: %s", + m_device_id.c_str()); +} +Status AdbSyncService::ExecuteCommand(const std::function &cmd) { Status error = cmd(); - if (error.Fail()) - m_conn.reset(); - return error; } -AdbClient::SyncService::~SyncService() = default; +AdbSyncService::~AdbSyncService() { + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, + "AdbSyncService::~AdbSyncService() - Destroying AdbSyncService for " + "device: %s", + m_device_id.c_str()); +} -Status AdbClient::SyncService::SendSyncRequest(const char *request_id, - const uint32_t data_len, - const void *data) { +Status AdbSyncService::SendSyncRequest(const char *request_id, + const uint32_t data_len, + const void *data) { DataEncoder encoder(eByteOrderLittle, sizeof(void *)); encoder.AppendData(llvm::StringRef(request_id)); encoder.AppendU32(data_len); @@ -615,11 +623,11 @@ Status AdbClient::SyncService::SendSyncRequest(const char *request_id, return error; } -Status AdbClient::SyncService::ReadSyncHeader(std::string &response_id, - uint32_t &data_len) { +Status AdbSyncService::ReadSyncHeader(std::string &response_id, + uint32_t &data_len) { char buffer[kSyncPacketLen]; - auto error = ReadAllBytes(buffer, kSyncPacketLen); + auto error = ReadAllBytes(*m_conn, buffer, kSyncPacketLen); if (error.Success()) { response_id.assign(&buffer[0], 4); DataExtractor extractor(&buffer[4], 4, eByteOrderLittle, sizeof(void *)); @@ -630,8 +638,7 @@ Status AdbClient::SyncService::ReadSyncHeader(std::string &response_id, return error; } -Status AdbClient::SyncService::PullFileChunk(std::vector &buffer, - bool &eof) { +Status AdbSyncService::PullFileChunk(std::vector &buffer, bool &eof) { buffer.clear(); std::string response_id; @@ -642,14 +649,14 @@ Status AdbClient::SyncService::PullFileChunk(std::vector &buffer, if (response_id == kDATA) { buffer.resize(data_len, 0); - error = ReadAllBytes(&buffer[0], data_len); + error = ReadAllBytes(*m_conn, &buffer[0], data_len); if (error.Fail()) buffer.clear(); } else if (response_id == kDONE) { eof = true; } else if (response_id == kFAIL) { std::string error_message(data_len, 0); - error = ReadAllBytes(&error_message[0], data_len); + error = ReadAllBytes(*m_conn, &error_message[0], data_len); if (error.Fail()) return Status::FromErrorStringWithFormat( "Failed to read pull error message: %s", error.AsCString()); @@ -662,6 +669,15 @@ Status AdbClient::SyncService::PullFileChunk(std::vector &buffer, return Status(); } -Status AdbClient::SyncService::ReadAllBytes(void *buffer, size_t size) { - return ::ReadAllBytes(*m_conn, buffer, size); +Status AdbSyncService::SetupSyncConnection() { + Status error = ConnectToAdb(*m_conn); + if (error.Fail()) + return error; + + error = SelectTargetDevice(*m_conn, m_device_id); + if (error.Fail()) + return error; + + error = EnterSyncMode(*m_conn); + return error; } diff --git a/lldb/source/Plugins/Platform/Android/AdbClient.h b/lldb/source/Plugins/Platform/Android/AdbClient.h index 851c09957bd4a..341a9fa4b93ad 100644 --- a/lldb/source/Plugins/Platform/Android/AdbClient.h +++ b/lldb/source/Plugins/Platform/Android/AdbClient.h @@ -10,6 +10,7 @@ #define LLDB_SOURCE_PLUGINS_PLATFORM_ANDROID_ADBCLIENT_H #include "lldb/Utility/Status.h" +#include "llvm/Support/Error.h" #include #include #include @@ -32,59 +33,21 @@ class AdbClient { using DeviceIDList = std::list; - class SyncService { - friend class AdbClient; - - public: - virtual ~SyncService(); - - virtual Status PullFile(const FileSpec &remote_file, - const FileSpec &local_file); - - Status PushFile(const FileSpec &local_file, const FileSpec &remote_file); - - virtual Status Stat(const FileSpec &remote_file, uint32_t &mode, - uint32_t &size, uint32_t &mtime); - - bool IsConnected() const; - - protected: - explicit SyncService(std::unique_ptr &&conn); - - private: - Status SendSyncRequest(const char *request_id, const uint32_t data_len, - const void *data); - - Status ReadSyncHeader(std::string &response_id, uint32_t &data_len); - - Status PullFileChunk(std::vector &buffer, bool &eof); - - Status ReadAllBytes(void *buffer, size_t size); - - Status internalPullFile(const FileSpec &remote_file, - const FileSpec &local_file); - - Status internalPushFile(const FileSpec &local_file, - const FileSpec &remote_file); - - Status internalStat(const FileSpec &remote_file, uint32_t &mode, - uint32_t &size, uint32_t &mtime); - - Status executeCommand(const std::function &cmd); - - std::unique_ptr m_conn; - }; - - static Status CreateByDeviceID(const std::string &device_id, AdbClient &adb); + /// Resolves a device identifier to its canonical form. + /// + /// \param device_id the device identifier to resolve (may be empty). + /// + /// \returns Expected containing the resolved device ID on + /// success, or an Error if the device ID cannot be resolved or + /// is ambiguous. + static llvm::Expected ResolveDeviceID(llvm::StringRef device_id); AdbClient(); - explicit AdbClient(const std::string &device_id); + explicit AdbClient(llvm::StringRef device_id); virtual ~AdbClient(); - const std::string &GetDeviceID() const; - - Status GetDevices(DeviceIDList &device_list); + llvm::StringRef GetDeviceID() const; Status SetPortForwarding(const uint16_t local_port, const uint16_t remote_port); @@ -102,39 +65,50 @@ class AdbClient { std::chrono::milliseconds timeout, const FileSpec &output_file_spec); - virtual std::unique_ptr GetSyncService(Status &error); - - Status SwitchDeviceTransport(); - -private: Status Connect(); - void SetDeviceID(const std::string &device_id); - - Status SendMessage(const std::string &packet, const bool reconnect = true); - - Status SendDeviceMessage(const std::string &packet); - - Status ReadMessage(std::vector &message); +private: + Status SendDeviceMessage(llvm::StringRef packet); Status ReadMessageStream(std::vector &message, std::chrono::milliseconds timeout); - Status GetResponseError(const char *response_id); + Status internalShell(const char *command, std::chrono::milliseconds timeout, + std::vector &output_buf); - Status ReadResponseStatus(); + std::string m_device_id; + std::unique_ptr m_conn; +}; - Status Sync(); +class AdbSyncService { +public: + explicit AdbSyncService(const std::string device_id); + virtual ~AdbSyncService(); + Status SetupSyncConnection(); - Status StartSync(); + virtual Status PullFile(const FileSpec &remote_file, + const FileSpec &local_file); + virtual Status PushFile(const FileSpec &local_file, + const FileSpec &remote_file); + virtual Status Stat(const FileSpec &remote_file, uint32_t &mode, + uint32_t &size, uint32_t &mtime); + virtual bool IsConnected() const; - Status internalShell(const char *command, std::chrono::milliseconds timeout, - std::vector &output_buf); + llvm::StringRef GetDeviceId() const { return m_device_id; } - Status ReadAllBytes(void *buffer, size_t size); +private: + Status SendSyncRequest(const char *request_id, const uint32_t data_len, + const void *data); + Status ReadSyncHeader(std::string &response_id, uint32_t &data_len); + Status PullFileChunk(std::vector &buffer, bool &eof); + Status PullFileImpl(const FileSpec &remote_file, const FileSpec &local_file); + Status PushFileImpl(const FileSpec &local_file, const FileSpec &remote_file); + Status StatImpl(const FileSpec &remote_file, uint32_t &mode, uint32_t &size, + uint32_t &mtime); + Status ExecuteCommand(const std::function &cmd); - std::string m_device_id; std::unique_ptr m_conn; + std::string m_device_id; }; } // namespace platform_android diff --git a/lldb/source/Plugins/Platform/Android/PlatformAndroid.cpp b/lldb/source/Plugins/Platform/Android/PlatformAndroid.cpp index 5bc9cc133fbd3..600cc0a04cd22 100644 --- a/lldb/source/Plugins/Platform/Android/PlatformAndroid.cpp +++ b/lldb/source/Plugins/Platform/Android/PlatformAndroid.cpp @@ -9,10 +9,8 @@ #include "lldb/Core/Module.h" #include "lldb/Core/PluginManager.h" #include "lldb/Core/Section.h" -#include "lldb/Host/HostInfo.h" #include "lldb/Utility/LLDBLog.h" #include "lldb/Utility/Log.h" -#include "lldb/Utility/Scalar.h" #include "lldb/Utility/UriParser.h" #include "lldb/ValueObject/ValueObject.h" @@ -194,12 +192,10 @@ Status PlatformAndroid::ConnectRemote(Args &args) { auto error = PlatformLinux::ConnectRemote(args); if (error.Success()) { - AdbClient adb; - error = AdbClient::CreateByDeviceID(m_device_id, adb); - if (error.Fail()) - return error; - - m_device_id = adb.GetDeviceID(); + auto resolved_device_id_or_error = AdbClient::ResolveDeviceID(m_device_id); + if (!resolved_device_id_or_error) + return Status::FromError(resolved_device_id_or_error.takeError()); + m_device_id = *resolved_device_id_or_error; } return error; } @@ -216,29 +212,33 @@ Status PlatformAndroid::GetFile(const FileSpec &source, Status error; auto sync_service = GetSyncService(error); - if (error.Fail()) - return error; - - uint32_t mode = 0, size = 0, mtime = 0; - error = sync_service->Stat(source_spec, mode, size, mtime); - if (error.Fail()) - return error; - if (mode != 0) - return sync_service->PullFile(source_spec, destination); + // If sync service is available, try to use it + if (error.Success() && sync_service) { + uint32_t mode = 0, size = 0, mtime = 0; + error = sync_service->Stat(source_spec, mode, size, mtime); + if (error.Success()) { + if (mode != 0) + return sync_service->PullFile(source_spec, destination); + + // mode == 0 can signify that adbd cannot access the file due security + // constraints - fall through to try "cat ..." as a fallback. + Log *log = GetLog(LLDBLog::Platform); + LLDB_LOGF(log, "Got mode == 0 on '%s': try to get file via 'shell cat'", + source_spec.GetPath(false).c_str()); + } + } + // Fallback to shell cat command if sync service failed or returned mode == 0 std::string source_file = source_spec.GetPath(false); Log *log = GetLog(LLDBLog::Platform); - LLDB_LOGF(log, "Got mode == 0 on '%s': try to get file via 'shell cat'", - source_file.c_str()); + LLDB_LOGF(log, "Using shell cat fallback for '%s'", source_file.c_str()); if (strchr(source_file.c_str(), '\'') != nullptr) return Status::FromErrorString( "Doesn't support single-quotes in filenames"); - // mode == 0 can signify that adbd cannot access the file due security - // constraints - try "cat ..." as a fallback. AdbClientUP adb(GetAdbClient(error)); if (error.Fail()) return error; @@ -275,12 +275,19 @@ Status PlatformAndroid::DownloadModuleSlice(const FileSpec &src_file_spec, const uint64_t src_offset, const uint64_t src_size, const FileSpec &dst_file_spec) { + std::string source_file = src_file_spec.GetPath(false); + if (source_file.empty()) + return Status::FromErrorString("Source file path cannot be empty"); + + std::string destination_file = dst_file_spec.GetPath(false); + if (destination_file.empty()) + return Status::FromErrorString("Destination file path cannot be empty"); + // In Android API level 23 and above, dynamic loader is able to load .so // file directly from APK. In that case, src_offset will be an non-zero. if (src_offset == 0) // Use GetFile for a normal file. return GetFile(src_file_spec, dst_file_spec); - std::string source_file = src_file_spec.GetPath(false); if (source_file.find('\'') != std::string::npos) return Status::FromErrorString( "Doesn't support single-quotes in filenames"); @@ -424,7 +431,7 @@ PlatformAndroid::GetLibdlFunctionDeclarations(lldb_private::Process *process) { std::vector dl_open_names = {"__dl_dlopen", "dlopen"}; const char *dl_open_name = nullptr; Target &target = process->GetTarget(); - for (auto name : dl_open_names) { + for (auto *name : dl_open_names) { target.GetImages().FindFunctionSymbols( ConstString(name), eFunctionNameTypeFull, matching_symbols); if (matching_symbols.GetSize()) { @@ -445,11 +452,8 @@ PlatformAndroid::GetLibdlFunctionDeclarations(lldb_private::Process *process) { } PlatformAndroid::AdbClientUP PlatformAndroid::GetAdbClient(Status &error) { - AdbClientUP adb(std::make_unique(m_device_id)); - if (adb) - error.Clear(); - else - error = Status::FromErrorString("Failed to create AdbClient"); + AdbClientUP adb = std::make_unique(m_device_id); + error = adb->Connect(); return adb; } @@ -473,14 +477,10 @@ std::string PlatformAndroid::GetRunAs() { } return run_as.str(); } - -AdbClient::SyncService *PlatformAndroid::GetSyncService(Status &error) { - if (m_adb_sync_svc && m_adb_sync_svc->IsConnected()) - return m_adb_sync_svc.get(); - - AdbClientUP adb(GetAdbClient(error)); +std::unique_ptr PlatformAndroid::GetSyncService(Status &error) { + auto sync_service = std::make_unique(m_device_id); + error = sync_service->SetupSyncConnection(); if (error.Fail()) return nullptr; - m_adb_sync_svc = adb->GetSyncService(error); - return (error.Success()) ? m_adb_sync_svc.get() : nullptr; + return sync_service; } diff --git a/lldb/source/Plugins/Platform/Android/PlatformAndroid.h b/lldb/source/Plugins/Platform/Android/PlatformAndroid.h index 5602edf73c1d3..3384525362ecf 100644 --- a/lldb/source/Plugins/Platform/Android/PlatformAndroid.h +++ b/lldb/source/Plugins/Platform/Android/PlatformAndroid.h @@ -75,14 +75,15 @@ class PlatformAndroid : public platform_linux::PlatformLinux { typedef std::unique_ptr AdbClientUP; virtual AdbClientUP GetAdbClient(Status &error); + std::string GetRunAs(); + +public: virtual llvm::StringRef GetPropertyPackageName(); - std::string GetRunAs(); +protected: + virtual std::unique_ptr GetSyncService(Status &error); private: - AdbClient::SyncService *GetSyncService(Status &error); - - std::unique_ptr m_adb_sync_svc; std::string m_device_id; uint32_t m_sdk_version; }; diff --git a/lldb/source/Plugins/Platform/Android/PlatformAndroidRemoteGDBServer.cpp b/lldb/source/Plugins/Platform/Android/PlatformAndroidRemoteGDBServer.cpp index 0cf64807ec0d6..461ee8e3b1826 100644 --- a/lldb/source/Plugins/Platform/Android/PlatformAndroidRemoteGDBServer.cpp +++ b/lldb/source/Plugins/Platform/Android/PlatformAndroidRemoteGDBServer.cpp @@ -21,6 +21,7 @@ using namespace lldb; using namespace lldb_private; using namespace platform_android; +using namespace llvm; static const lldb::pid_t g_remote_platform_pid = 0; // Alias for the process id of lldb-platform @@ -32,12 +33,12 @@ static Status ForwardPortWithAdb( std::string &device_id) { Log *log = GetLog(LLDBLog::Platform); - AdbClient adb; - auto error = AdbClient::CreateByDeviceID(device_id, adb); - if (error.Fail()) - return error; + auto resolved_device_id_or_error = AdbClient::ResolveDeviceID(device_id); + if (!resolved_device_id_or_error) + return Status::FromError(resolved_device_id_or_error.takeError()); + device_id = *resolved_device_id_or_error; - device_id = adb.GetDeviceID(); + AdbClient adb(device_id); LLDB_LOGF(log, "Connected to Android device \"%s\"", device_id.c_str()); if (remote_port != 0) { diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp index 5ffb4423969ca..82e9d867c3ac0 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp @@ -3126,39 +3126,6 @@ void DWARFASTParserClang::ParseSingleMember( if (!member_clang_type.IsCompleteType()) member_clang_type.GetCompleteType(); - { - // Older versions of clang emit the same DWARF for array[0] and array[1]. If - // the current field is at the end of the structure, then there is - // definitely no room for extra elements and we override the type to - // array[0]. This was fixed by f454dfb6b5af. - CompilerType member_array_element_type; - uint64_t member_array_size; - bool member_array_is_incomplete; - - if (member_clang_type.IsArrayType(&member_array_element_type, - &member_array_size, - &member_array_is_incomplete) && - !member_array_is_incomplete) { - uint64_t parent_byte_size = - parent_die.GetAttributeValueAsUnsigned(DW_AT_byte_size, UINT64_MAX); - - if (attrs.member_byte_offset >= parent_byte_size) { - if (member_array_size != 1 && - (member_array_size != 0 || - attrs.member_byte_offset > parent_byte_size)) { - module_sp->ReportError( - "{0:x8}: DW_TAG_member '{1}' refers to type {2:x16}" - " which extends beyond the bounds of {3:x8}", - die.GetID(), attrs.name, - attrs.encoding_form.Reference().GetOffset(), parent_die.GetID()); - } - - member_clang_type = - m_ast.CreateArrayType(member_array_element_type, 0, false); - } - } - } - TypeSystemClang::RequireCompleteType(member_clang_type); clang::FieldDecl *field_decl = TypeSystemClang::AddFieldToRecordType( diff --git a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp index 9f4eb1c21711d..3b936c06b1072 100644 --- a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp +++ b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp @@ -139,8 +139,8 @@ class PluginProperties : public Properties { if (!IsNativeReaderRequested()) { static std::once_flag g_warning_shown; Debugger::ReportWarning( - "The DIA PDB reader was explicitly requested, but LLDB was built " - "without the DIA SDK. The native reader will be used instead.", + "the DIA PDB reader was explicitly requested, but LLDB was built " + "without the DIA SDK. The native reader will be used instead", {}, &g_warning_shown); } return true; diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp index 1948f51c3f2e1..21c265ede0bc5 100644 --- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp +++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp @@ -960,6 +960,12 @@ CompilerType TypeSystemClang::GetBuiltinTypeForDWARFEncodingAndBitSize( if (type_name == "long double" && QualTypeMatchesBitSize(bit_size, ast, ast.LongDoubleTy)) return GetType(ast.LongDoubleTy); + if (type_name == "__bf16" && + QualTypeMatchesBitSize(bit_size, ast, ast.BFloat16Ty)) + return GetType(ast.BFloat16Ty); + if (type_name == "_Float16" && + QualTypeMatchesBitSize(bit_size, ast, ast.Float16Ty)) + return GetType(ast.Float16Ty); // As Rust currently uses `TypeSystemClang`, match `f128` here as well so it // doesn't get misinterpreted as `long double` on targets where they are // the same size but different formats. @@ -1792,6 +1798,8 @@ bool TypeSystemClang::RecordHasFields(const RecordDecl *record_decl) { for (base_class = cxx_record_decl->bases_begin(), base_class_end = cxx_record_decl->bases_end(); base_class != base_class_end; ++base_class) { + assert(record_decl != base_class->getType()->getAsCXXRecordDecl() && + "Base can't inherit from itself."); if (RecordHasFields(base_class->getType()->getAsCXXRecordDecl())) return true; } @@ -5033,6 +5041,7 @@ lldb::Encoding TypeSystemClang::GetEncoding(lldb::opaque_compiler_type_t type, case clang::BuiltinType::VectorPair: case clang::BuiltinType::VectorQuad: case clang::BuiltinType::DMR1024: + case clang::BuiltinType::DMR2048: break; // ARM -- Scalable Vector Extension diff --git a/lldb/source/Symbol/DWARFCallFrameInfo.cpp b/lldb/source/Symbol/DWARFCallFrameInfo.cpp index 2f8f9e9182fb2..b490045cb3818 100644 --- a/lldb/source/Symbol/DWARFCallFrameInfo.cpp +++ b/lldb/source/Symbol/DWARFCallFrameInfo.cpp @@ -20,6 +20,8 @@ #include "lldb/Utility/LLDBLog.h" #include "lldb/Utility/Log.h" #include "lldb/Utility/Timer.h" +#include "llvm/BinaryFormat/Dwarf.h" +#include #include #include #include @@ -147,6 +149,23 @@ GetGNUEHPointer(const DataExtractor &DE, lldb::offset_t *offset_ptr, return baseAddress + addressValue; } +// Check if the given cie_id value indicates a CIE (Common Information Entry) +// as opposed to an FDE (Frame Description Entry). +static bool IsCIEMarker(uint64_t cie_id, bool is_64bit, + DWARFCallFrameInfo::Type type) { + // Check eh_frame CIE marker + if (type == DWARFCallFrameInfo::EH) + return cie_id == 0; + + // Check debug_frame CIE marker + // DWARF64 + if (is_64bit) + return cie_id == llvm::dwarf::DW64_CIE_ID; + + // DWARF32 + return cie_id == llvm::dwarf::DW_CIE_ID; +} + DWARFCallFrameInfo::DWARFCallFrameInfo(ObjectFile &objfile, SectionSP §ion_sp, Type type) : m_objfile(objfile), m_section_sp(section_sp), m_type(type) {} @@ -283,7 +302,7 @@ DWARFCallFrameInfo::ParseCIE(const dw_offset_t cie_offset) { GetCFIData(); uint32_t length = m_cfi_data.GetU32(&offset); dw_offset_t cie_id, end_offset; - bool is_64bit = (length == UINT32_MAX); + bool is_64bit = (length == llvm::dwarf::DW_LENGTH_DWARF64); if (is_64bit) { length = m_cfi_data.GetU64(&offset); cie_id = m_cfi_data.GetU64(&offset); @@ -292,8 +311,9 @@ DWARFCallFrameInfo::ParseCIE(const dw_offset_t cie_offset) { cie_id = m_cfi_data.GetU32(&offset); end_offset = cie_offset + length + 4; } - if (length > 0 && ((m_type == DWARF && cie_id == UINT32_MAX) || - (m_type == EH && cie_id == 0ul))) { + + // Check if this is a CIE or FDE based on the CIE ID marker + if (length > 0 && IsCIEMarker(cie_id, is_64bit, m_type)) { size_t i; // cie.offset = cie_offset; // cie.length = length; @@ -470,7 +490,7 @@ void DWARFCallFrameInfo::GetFDEIndex() { const dw_offset_t current_entry = offset; dw_offset_t cie_id, next_entry, cie_offset; uint32_t len = m_cfi_data.GetU32(&offset); - bool is_64bit = (len == UINT32_MAX); + bool is_64bit = (len == llvm::dwarf::DW_LENGTH_DWARF64); if (is_64bit) { len = m_cfi_data.GetU64(&offset); cie_id = m_cfi_data.GetU64(&offset); @@ -493,11 +513,8 @@ void DWARFCallFrameInfo::GetFDEIndex() { return; } - // An FDE entry contains CIE_pointer in debug_frame in same place as cie_id - // in eh_frame. CIE_pointer is an offset into the .debug_frame section. So, - // variable cie_offset should be equal to cie_id for debug_frame. - // FDE entries with cie_id == 0 shouldn't be ignored for it. - if ((cie_id == 0 && m_type == EH) || cie_id == UINT32_MAX || len == 0) { + // Check if this is a CIE or FDE based on the CIE ID marker + if (IsCIEMarker(cie_id, is_64bit, m_type) || len == 0) { auto cie_sp = ParseCIE(current_entry); if (!cie_sp) { // Cannot parse, the reason is already logged @@ -568,7 +585,7 @@ DWARFCallFrameInfo::ParseFDE(dw_offset_t dwarf_offset, uint32_t length = m_cfi_data.GetU32(&offset); dw_offset_t cie_offset; - bool is_64bit = (length == UINT32_MAX); + bool is_64bit = (length == llvm::dwarf::DW_LENGTH_DWARF64); if (is_64bit) { length = m_cfi_data.GetU64(&offset); cie_offset = m_cfi_data.GetU64(&offset); @@ -577,7 +594,9 @@ DWARFCallFrameInfo::ParseFDE(dw_offset_t dwarf_offset, } // FDE entries with zeroth cie_offset may occur for debug_frame. - assert(!(m_type == EH && 0 == cie_offset) && cie_offset != UINT32_MAX); + assert(!(m_type == EH && 0 == cie_offset) && + cie_offset != + (is_64bit ? llvm::dwarf::DW64_CIE_ID : llvm::dwarf::DW_CIE_ID)); // Translate the CIE_id from the eh_frame format, which is relative to the // FDE offset, into a __eh_frame section offset diff --git a/lldb/source/Target/ExecutionContext.cpp b/lldb/source/Target/ExecutionContext.cpp index 9d232e420f71c..a795913047639 100644 --- a/lldb/source/Target/ExecutionContext.cpp +++ b/lldb/source/Target/ExecutionContext.cpp @@ -429,6 +429,16 @@ ExecutionContextRef::ExecutionContextRef(Target *target, bool adopt_selected) SetTargetPtr(target, adopt_selected); } +ExecutionContextRef::ExecutionContextRef(Process *process, bool adopt_selected) + : m_target_wp(), m_process_wp(), m_thread_wp(), m_stack_id() { + SetProcessPtr(process, adopt_selected); +} + +ExecutionContextRef::ExecutionContextRef(Thread *thread, bool adopt_selected) + : m_target_wp(), m_process_wp(), m_thread_wp(), m_stack_id() { + SetThreadPtr(thread, adopt_selected); +} + ExecutionContextRef::ExecutionContextRef(const ExecutionContextRef &rhs) = default; @@ -513,55 +523,66 @@ void ExecutionContextRef::SetFrameSP(const lldb::StackFrameSP &frame_sp) { void ExecutionContextRef::SetTargetPtr(Target *target, bool adopt_selected) { Clear(); if (target) { - lldb::TargetSP target_sp(target->shared_from_this()); - if (target_sp) { - m_target_wp = target_sp; - if (adopt_selected) { - lldb::ProcessSP process_sp(target_sp->GetProcessSP()); - if (process_sp) { - m_process_wp = process_sp; - if (process_sp) { - // Only fill in the thread and frame if our process is stopped - // Don't just check the state, since we might be in the middle of - // resuming. - Process::StopLocker stop_locker; - - if (stop_locker.TryLock(&process_sp->GetRunLock()) && - StateIsStoppedState(process_sp->GetState(), true)) { - lldb::ThreadSP thread_sp( - process_sp->GetThreadList().GetSelectedThread()); - if (!thread_sp) - thread_sp = process_sp->GetThreadList().GetThreadAtIndex(0); - - if (thread_sp) { - SetThreadSP(thread_sp); - lldb::StackFrameSP frame_sp( - thread_sp->GetSelectedFrame(DoNoSelectMostRelevantFrame)); - if (!frame_sp) - frame_sp = thread_sp->GetStackFrameAtIndex(0); - if (frame_sp) - SetFrameSP(frame_sp); - } - } - } - } - } + lldb::TargetSP target_sp = target->shared_from_this(); + SetTargetSP(target_sp); + if (adopt_selected) { + if (lldb::ProcessSP process_sp = target_sp->GetProcessSP()) + SetProcessPtr(process_sp.get(), adopt_selected); } } } -void ExecutionContextRef::SetProcessPtr(Process *process) { +void ExecutionContextRef::SetProcessPtr(Process *process, bool adopt_selected) { if (process) { - SetProcessSP(process->shared_from_this()); + lldb::ProcessSP process_sp = process->shared_from_this(); + SetProcessSP(process_sp); + if (adopt_selected) { + // Only fill in the thread if our process is stopped. + // Don't just check the state, since we might be in the middle of + // resuming. + Process::StopLocker stop_locker; + if (stop_locker.TryLock(&process_sp->GetRunLock()) && + StateIsStoppedState(process_sp->GetState(), true)) { + lldb::ThreadSP thread_sp( + process_sp->GetThreadList().GetSelectedThread()); + if (!thread_sp) + thread_sp = process_sp->GetThreadList().GetThreadAtIndex(0); + if (thread_sp) { + SetThreadSP(thread_sp); + lldb::StackFrameSP frame_sp = + thread_sp->GetSelectedFrame(DoNoSelectMostRelevantFrame); + if (!frame_sp) + frame_sp = thread_sp->GetStackFrameAtIndex(0); + if (frame_sp) + SetFrameSP(frame_sp); + } + } + } } else { m_process_wp.reset(); m_target_wp.reset(); } } -void ExecutionContextRef::SetThreadPtr(Thread *thread) { +void ExecutionContextRef::SetThreadPtr(Thread *thread, bool adopt_selected) { if (thread) { - SetThreadSP(thread->shared_from_this()); + lldb::ThreadSP thread_sp = thread->shared_from_this(); + SetThreadSP(thread_sp); + if (adopt_selected) { + // Only fill in the frame if our process is stopped. + // Don't just check the state, since we might be in the middle of + // resuming. + Process::StopLocker stop_locker; + if (stop_locker.TryLock(&thread->GetProcess()->GetRunLock()) && + StateIsStoppedState(thread->GetProcess()->GetState(), true)) { + lldb::StackFrameSP frame_sp = + thread_sp->GetSelectedFrame(DoNoSelectMostRelevantFrame); + if (!frame_sp) + frame_sp = thread_sp->GetStackFrameAtIndex(0); + if (frame_sp) + SetFrameSP(frame_sp); + } + } } else { ClearThread(); m_process_wp.reset(); diff --git a/lldb/source/Target/Statistics.cpp b/lldb/source/Target/Statistics.cpp index 8ad8d507268e2..f7311a8b24416 100644 --- a/lldb/source/Target/Statistics.cpp +++ b/lldb/source/Target/Statistics.cpp @@ -148,6 +148,11 @@ TargetStats::ToJSON(Target &target, target_metrics_json.try_emplace("targetCreateTime", m_create_time.get().count()); + if (m_load_core_time.get().count() > 0) { + target_metrics_json.try_emplace("loadCoreTime", + m_load_core_time.get().count()); + } + json::Array breakpoints_array; double totalBreakpointResolveTime = 0.0; // Report both the normal breakpoint list and the internal breakpoint list. diff --git a/lldb/source/Utility/Scalar.cpp b/lldb/source/Utility/Scalar.cpp index c8766bdf2aee7..f2c18cdd896da 100644 --- a/lldb/source/Utility/Scalar.cpp +++ b/lldb/source/Utility/Scalar.cpp @@ -471,24 +471,10 @@ bool Scalar::ShiftRightLogical(const Scalar &rhs) { } Scalar &Scalar::operator>>=(const Scalar &rhs) { - switch (m_type) { - case e_void: - case e_float: + if (m_type == e_int && rhs.m_type == e_int) + m_integer >>= rhs.m_integer.getZExtValue(); + else m_type = e_void; - break; - - case e_int: - switch (rhs.m_type) { - case e_void: - case e_float: - m_type = e_void; - break; - case e_int: - m_integer = m_integer.ashr(rhs.m_integer); - break; - } - break; - } return *this; } diff --git a/lldb/test/API/commands/register/register/register_command/TestRegisters.py b/lldb/test/API/commands/register/register/register_command/TestRegisters.py index 0134139892794..29d090a279070 100644 --- a/lldb/test/API/commands/register/register/register_command/TestRegisters.py +++ b/lldb/test/API/commands/register/register/register_command/TestRegisters.py @@ -21,24 +21,6 @@ def tearDown(self): self.dbg.GetSelectedTarget().GetProcess().Destroy() TestBase.tearDown(self) - # on macOS, detect if the current machine is arm64 and supports SME - def get_sme_available(self): - if self.getArchitecture() != "arm64": - return None - try: - sysctl_output = subprocess.check_output( - ["sysctl", "hw.optional.arm.FEAT_SME"] - ).decode("utf-8") - except subprocess.CalledProcessError: - return None - m = re.match(r"hw\.optional\.arm\.FEAT_SME: (\w+)", sysctl_output) - if m: - if int(m.group(1)) == 1: - return True - else: - return False - return None - @skipIfiOSSimulator @skipIf(archs=no_match(["amd64", "arm$", "i386", "x86_64"])) @expectedFailureAll(oslist=["freebsd", "netbsd"], bugnumber="llvm.org/pr48371") @@ -51,7 +33,7 @@ def test_register_commands(self): self.log_enable("registers") error_str_matched = False - if self.get_sme_available() and self.platformIsDarwin(): + if self.isAArch64SME() and self.platformIsDarwin(): # On Darwin AArch64 SME machines, we will have unavailable # registers when not in Streaming SVE Mode/SME, so # `register read -a` will report that some registers diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py index 1e920faab6397..45f7b5be465c5 100644 --- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py +++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/generic/unordered_map-iterator/TestDataFormatterStdUnorderedMap.py @@ -124,11 +124,6 @@ def do_test_ptr(self): self.check_ptr_ptr("ptr5") self.check_ptr_ptr("ptr6") - @expectedFailureAll( - bugnumber="https://github.com/llvm/llvm-project/issues/146040", - compiler="clang", - compiler_version=["<", "21"], - ) @add_test_categories(["libc++"]) def test_ptr_libcxx(self): self.build(dictionary={"USE_LIBCPP": 1}) diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestMemoryRegionDirtyPages.py b/lldb/test/API/functionalities/gdb_remote_client/TestMemoryRegionDirtyPages.py index 9d7e0c0f7af6c..695faf896ef5d 100644 --- a/lldb/test/API/functionalities/gdb_remote_client/TestMemoryRegionDirtyPages.py +++ b/lldb/test/API/functionalities/gdb_remote_client/TestMemoryRegionDirtyPages.py @@ -5,60 +5,102 @@ from lldbsuite.test.lldbgdbclient import GDBRemoteTestBase +class TestRegion(object): + def __init__(self, start_addr, size, dirty_pages): + self.start_addr = start_addr + self.size = size + self.dirty_pages = dirty_pages + + def as_packet(self): + dirty_pages = "" + if self.dirty_pages is not None: + dirty_pages = ( + "dirty-pages:" + + ",".join([format(a, "x") for a in self.dirty_pages]) + + ";" + ) + return f"start:{self.start_addr:x};size:{self.size};permissions:r;{dirty_pages}" + + def expected_command_output(self): + if self.dirty_pages is None: + return [ + "Modified memory (dirty) page list provided", + "Dirty pages:", + ], False + + expected = [ + f"Modified memory (dirty) page list provided, {len(self.dirty_pages)} entries." + ] + if self.dirty_pages: + expected.append( + "Dirty pages: " + + ", ".join([format(a, "#x") for a in self.dirty_pages]) + + "." + ) + return expected, True + + class TestMemoryRegionDirtyPages(GDBRemoteTestBase): @skipIfXmlSupportMissing def test(self): + test_regions = [ + # A memory region where we don't know anything about dirty pages + TestRegion(0, 0x100000000, None), + # A memory region with dirty page information -- and zero dirty pages + TestRegion(0x100000000, 4000, []), + # A memory region with one dirty page + TestRegion(0x100004000, 4000, [0x100004000]), + # A memory region with multple dirty pages + TestRegion( + 0x1000A2000, + 5000, + [0x1000A2000, 0x1000A3000, 0x1000A4000, 0x1000A5000, 0x1000A6000], + ), + ] + class MyResponder(MockGDBServerResponder): def qHostInfo(self): return "ptrsize:8;endian:little;vm-page-size:4096;" def qMemoryRegionInfo(self, addr): - if addr == 0: - return "start:0;size:100000000;" - if addr == 0x100000000: - return "start:100000000;size:4000;permissions:rx;dirty-pages:;" - if addr == 0x100004000: - return ( - "start:100004000;size:4000;permissions:r;dirty-pages:100004000;" - ) - if addr == 0x1000A2000: - return "start:1000a2000;size:5000;permissions:r;dirty-pages:1000a2000,1000a3000,1000a4000,1000a5000,1000a6000;" + for region in test_regions: + if region.start_addr == addr: + return region.as_packet() self.server.responder = MyResponder() target = self.dbg.CreateTarget("") if self.TraceOn(): self.runCmd("log enable gdb-remote packets") self.addTearDownHook(lambda: self.runCmd("log disable gdb-remote packets")) + process = self.connect(target) + lldbutil.expect_state_changes( + self, self.dbg.GetListener(), process, [lldb.eStateStopped] + ) - # A memory region where we don't know anything about dirty pages - region = lldb.SBMemoryRegionInfo() - err = process.GetMemoryRegionInfo(0, region) - self.assertSuccess(err) - self.assertFalse(region.HasDirtyMemoryPageList()) - self.assertEqual(region.GetNumDirtyPages(), 0) - region.Clear() + for test_region in test_regions: + region = lldb.SBMemoryRegionInfo() + err = process.GetMemoryRegionInfo(test_region.start_addr, region) + self.assertSuccess(err) + self.assertEqual(region.GetPageSize(), 4096) - # A memory region with dirty page information -- and zero dirty pages - err = process.GetMemoryRegionInfo(0x100000000, region) - self.assertSuccess(err) - self.assertTrue(region.HasDirtyMemoryPageList()) - self.assertEqual(region.GetNumDirtyPages(), 0) - self.assertEqual(region.GetPageSize(), 4096) - region.Clear() + if test_region.dirty_pages is None: + self.assertFalse(region.HasDirtyMemoryPageList()) + self.assertEqual(0, region.GetNumDirtyPages()) + else: + self.assertTrue(region.HasDirtyMemoryPageList()) + self.assertEqual( + len(test_region.dirty_pages), region.GetNumDirtyPages() + ) - # A memory region with one dirty page - err = process.GetMemoryRegionInfo(0x100004000, region) - self.assertSuccess(err) - self.assertTrue(region.HasDirtyMemoryPageList()) - self.assertEqual(region.GetNumDirtyPages(), 1) - self.assertEqual(region.GetDirtyPageAddressAtIndex(0), 0x100004000) - region.Clear() + for i, expected_dirty_page in enumerate(test_region.dirty_pages): + self.assertEqual( + expected_dirty_page, region.GetDirtyPageAddressAtIndex(i) + ) - # A memory region with multple dirty pages - err = process.GetMemoryRegionInfo(0x1000A2000, region) - self.assertSuccess(err) - self.assertTrue(region.HasDirtyMemoryPageList()) - self.assertEqual(region.GetNumDirtyPages(), 5) - self.assertEqual(region.GetDirtyPageAddressAtIndex(4), 0x1000A6000) - region.Clear() + substrs, matching = test_region.expected_command_output() + self.expect( + f"memory region 0x{test_region.start_addr:x}", + substrs=substrs, + matching=matching, + ) diff --git a/lldb/test/API/functionalities/json/symbol-file/Makefile b/lldb/test/API/functionalities/json/symbol-file/Makefile index 13bc164582eee..5d05d95fc8428 100644 --- a/lldb/test/API/functionalities/json/symbol-file/Makefile +++ b/lldb/test/API/functionalities/json/symbol-file/Makefile @@ -1,4 +1,5 @@ C_SOURCES := main.c +CFLAGS_EXTRAS := -no-pie all: stripped.out diff --git a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py index f06c9ae14bb7a..d7249df350fc1 100644 --- a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py +++ b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py @@ -1,6 +1,7 @@ # Test the SBAPI for GetStatistics() import json + import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * @@ -54,6 +55,11 @@ def test_stats_api(self): stats_json, 'Make sure the "frameVariable" key in in target.GetStatistics()["targets"][0]', ) + self.assertNotIn( + "loadCoreTime", + stats_json, + "LoadCoreTime should not be present in a live, non-coredump target", + ) expressionEvaluation = stats_json["expressionEvaluation"] self.assertIn( "successes", @@ -157,3 +163,25 @@ def test_command_stats_force(self): stats_force.GetAsJSON(stream_force) debug_stats_force = json.loads(stream_force.GetData()) self.assertEqual(debug_stats_force["totalDebugInfoByteSize"], 445) + + def test_core_load_time(self): + """ + Test to see if the coredump path is included in statistics dump. + """ + yaml_file = "arm64-minidump-build-ids.yaml" + src_dir = self.getSourceDir() + minidump_path = self.getBuildArtifact(os.path.basename(yaml_file) + ".dmp") + self.yaml2obj(os.path.join(src_dir, yaml_file), minidump_path) + target = self.dbg.CreateTarget(None) + process = target.LoadCore(minidump_path) + self.assertTrue(process.IsValid()) + + stats_options = lldb.SBStatisticsOptions() + stats = target.GetStatistics(stats_options) + stream = lldb.SBStream() + stats.GetAsJSON(stream) + debug_stats = json.loads(stream.GetData()) + self.assertTrue("targets" in debug_stats) + target_info = debug_stats["targets"][0] + self.assertTrue("loadCoreTime" in target_info) + self.assertTrue(float(target_info["loadCoreTime"]) > 0.0) diff --git a/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml b/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml new file mode 100644 index 0000000000000..4acbc409d8082 --- /dev/null +++ b/lldb/test/API/functionalities/stats_api/arm64-minidump-build-ids.yaml @@ -0,0 +1,19 @@ +--- !minidump +Streams: + - Type: SystemInfo + Processor Arch: ARM + Platform ID: Linux + CSD Version: '15E216' + CPU: + CPUID: 0x00000000 + - Type: ModuleList + Modules: + - Base of Image: 0x0000000000001000 + Size of Image: 0x00001000 + Module Name: '/tmp/a' + CodeView Record: 4C4570420102030405060708090A0B0C0D0E0F1011121314 + - Base of Image: 0x0000000000001000 + Size of Image: 0x00001000 + Module Name: '/tmp/b' + CodeView Record: 4C4570420A141E28323C46505A646E78828C96A0AAB4BEC8 +... diff --git a/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py b/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py index 87d8adb42b82e..2d3e4f7cdd472 100644 --- a/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py +++ b/lldb/test/API/lang/cpp/abi_tag_structors/TestAbiTagStructors.py @@ -10,6 +10,11 @@ class AbiTagStructorsTestCase(TestBase): + @skipIf( + compiler="clang", + compiler_version=["<", "22"], + bugnumber="Required Clang flag not supported", + ) @expectedFailureAll(oslist=["windows"]) def test_with_structor_linkage_names(self): self.build(dictionary={"CXXFLAGS_EXTRAS": "-gstructor-decl-linkage-names"}) @@ -73,7 +78,16 @@ def test_no_structor_linkage_names(self): Test that without linkage names on structor declarations we can't call ABI-tagged structors. """ - self.build(dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"}) + # In older versions of Clang the -gno-structor-decl-linkage-names + # behaviour was the default. + if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion( + [">=", "22.0"] + ): + self.build( + dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"} + ) + else: + self.build() lldbutil.run_to_source_breakpoint( self, "Break here", lldb.SBFileSpec("main.cpp", False) @@ -105,12 +119,23 @@ def do_nested_structor_test(self): "expression TaggedLocal()", error=True, substrs=["Couldn't look up symbols"] ) + @skipIf(compiler="clang", compiler_version=["<", "22"]) @expectedFailureAll(oslist=["windows"]) - def test_nested_no_structor_linkage_names(self): + def test_nested_with_structor_linkage_names(self): self.build(dictionary={"CXXFLAGS_EXTRAS": "-gstructor-decl-linkage-names"}) self.do_nested_structor_test() @expectedFailureAll(oslist=["windows"]) - def test_nested_with_structor_linkage_names(self): - self.build(dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"}) + def test_nested_no_structor_linkage_names(self): + # In older versions of Clang the -gno-structor-decl-linkage-names + # behaviour was the default. + if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion( + [">=", "22.0"] + ): + self.build( + dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"} + ) + else: + self.build() + self.do_nested_structor_test() diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py index c0545c70c84ea..b3bed43c75873 100644 --- a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py +++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py @@ -6,6 +6,11 @@ class ExprDefinitionInDylibTestCase(TestBase): + @skipIf( + compiler="clang", + compiler_version=["<", "22"], + bugnumber="Required Clang flag not supported", + ) @skipIfWindows def test_with_structor_linkage_names(self): """ @@ -74,7 +79,16 @@ def test_no_structor_linkage_names(self): Tests that if structor declarations don't have linkage names, we can't call ABI-tagged constructors. But non-tagged ones are fine. """ - self.build(dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"}) + # In older versions of Clang the -gno-structor-decl-linkage-names + # behaviour was the default. + if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion( + [">=", "22.0"] + ): + self.build( + dictionary={"CXXFLAGS_EXTRAS": "-gno-structor-decl-linkage-names"} + ) + else: + self.build() target = self.dbg.CreateTarget(self.getBuildArtifact("a.out")) self.assertTrue(target, VALID_TARGET) @@ -95,6 +109,6 @@ def test_no_structor_linkage_names(self): self.expect_expr("Foo(10)", result_type="Foo") - self.expect("Base()", error=True) + self.expect("expr Base()", error=True) - self.expect("Bar()", error=True) + self.expect("expr Bar()", error=True) diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/Makefile b/lldb/test/API/lang/cpp/floating-types-specialization/Makefile new file mode 100644 index 0000000000000..99998b20bcb05 --- /dev/null +++ b/lldb/test/API/lang/cpp/floating-types-specialization/Makefile @@ -0,0 +1,3 @@ +CXX_SOURCES := main.cpp + +include Makefile.rules diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py b/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py new file mode 100644 index 0000000000000..f4530cd545046 --- /dev/null +++ b/lldb/test/API/lang/cpp/floating-types-specialization/TestCppFloatingTypesSpecialization.py @@ -0,0 +1,36 @@ +import lldb +import lldbsuite.test.lldbplatformutil as lldbplatformutil +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestCase(TestBase): + def test(self): + self.build() + lldbutil.run_to_source_breakpoint( + self, "// break here", lldb.SBFileSpec("main.cpp", False) + ) + + # On 32-bit Arm, you have to have the bfloat16 extension, or an FPU while + # not using the soft float mode. The target we assume has none of that + # so instead of __bf16 we get __fp16. + is_arm_32_bit = lldbplatformutil.getArchitecture() == "arm" + + self.expect_expr( + "f0", result_type=("Foo<__fp16>" if is_arm_32_bit else "Foo<__bf16>") + ) + + # When __bf16 is actually __fp16, f1 looks like it inherits from itself. + # Which clang allows but LLDB fails to evaluate. + if not is_arm_32_bit: + self.expect_expr("f1", result_type="Foo<__fp16>") + + # Test sizeof to ensure while computing layout we don't do + # infinite recursion. + v = self.frame().EvaluateExpression("sizeof(f0)") + self.assertEqual(v.GetValueAsUnsigned() > 0, True) + + if not is_arm_32_bit: + v = self.frame().EvaluateExpression("sizeof(f1)") + self.assertEqual(v.GetValueAsUnsigned() > 0, True) diff --git a/lldb/test/API/lang/cpp/floating-types-specialization/main.cpp b/lldb/test/API/lang/cpp/floating-types-specialization/main.cpp new file mode 100644 index 0000000000000..e3e8a3767fef8 --- /dev/null +++ b/lldb/test/API/lang/cpp/floating-types-specialization/main.cpp @@ -0,0 +1,11 @@ +template struct Foo; + +template <> struct Foo<__bf16> {}; + +template <> struct Foo<_Float16> : Foo<__bf16> {}; + +int main() { + Foo<__bf16> f0; + Foo<_Float16> f1; + return 0; // break here +} diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/Makefile b/lldb/test/API/lang/cpp/function-call-from-object-file/Makefile new file mode 100644 index 0000000000000..285bbfbbca4fe --- /dev/null +++ b/lldb/test/API/lang/cpp/function-call-from-object-file/Makefile @@ -0,0 +1,3 @@ +CXX_SOURCES := main.cpp lib1.cpp lib2.cpp + +include Makefile.rules diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py b/lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py new file mode 100644 index 0000000000000..f0a7aef182a67 --- /dev/null +++ b/lldb/test/API/lang/cpp/function-call-from-object-file/TestFunctionCallFromObjectFile.py @@ -0,0 +1,29 @@ +""" +Tests that we can call functions that have definitions in multiple +CUs in the debug-info (which is the case for functions defined in headers). +The linker will most likely de-duplicate the functiond definitions when linking +the final executable. On Darwin, this will create a debug-map that LLDB will use +to fix up object file addresses to addresses in the linked executable. However, +if we parsed the DIE from the object file whose functiond definition got stripped +by the linker, LLDB needs to ensure it can still resolve the function symbol it +got for it. +""" + +import lldb +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestFunctionCallFromObjectFile(TestBase): + def test_lib1(self): + self.build() + lldbutil.run_to_name_breakpoint(self, "lib1_func") + + self.expect_expr("Foo{}.foo()", result_type="int", result_value="15") + + def test_lib2(self): + self.build() + lldbutil.run_to_name_breakpoint(self, "lib2_func") + + self.expect_expr("Foo{}.foo()", result_type="int", result_value="15") diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/common.h b/lldb/test/API/lang/cpp/function-call-from-object-file/common.h new file mode 100644 index 0000000000000..76e23be6b97a6 --- /dev/null +++ b/lldb/test/API/lang/cpp/function-call-from-object-file/common.h @@ -0,0 +1,8 @@ +#ifndef COMMON_H_IN +#define COMMON_H_IN + +struct Foo { + int foo() { return 15; } +}; + +#endif // COMMON_H_IN diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp b/lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp new file mode 100644 index 0000000000000..b97bcc1b712b6 --- /dev/null +++ b/lldb/test/API/lang/cpp/function-call-from-object-file/lib1.cpp @@ -0,0 +1,8 @@ +#include "common.h" + +// Parameter "Foo*" forces LLDB to parse "Foo" from the object +// file that it is stopped in. +void lib1_func(Foo *) { + // Force definition into lib1.o debug-info. + Foo{}.foo(); +} diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp b/lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp new file mode 100644 index 0000000000000..2f9d81a8bdf4c --- /dev/null +++ b/lldb/test/API/lang/cpp/function-call-from-object-file/lib2.cpp @@ -0,0 +1,6 @@ +#include "common.h" + +void lib2_func(Foo *) { + // Force definition into lib2.o debug-info. + Foo{}.foo(); +} diff --git a/lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp b/lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp new file mode 100644 index 0000000000000..61ca798daf1df --- /dev/null +++ b/lldb/test/API/lang/cpp/function-call-from-object-file/main.cpp @@ -0,0 +1,10 @@ +struct Foo; + +extern void lib1_func(Foo *); +extern void lib2_func(Foo *); + +int main() { + lib1_func(nullptr); + lib2_func(nullptr); + return 0; +} diff --git a/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py b/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py index 5f939ecfbef29..882c91d1ce8c8 100644 --- a/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py +++ b/lldb/test/API/lang/cpp/structured-binding/TestStructuredBinding.py @@ -99,16 +99,21 @@ def test(self): self.expect_expr("ty2", result_value="'z'") self.expect_expr("tz2", result_value="10") - self.expect( - "frame variable", - substrs=[ - "tx1 =", - "ty1 =", - "tz1 =", - "tx2 =", - "ty2 =", - "tz2 =", - "mp1 =", - "mp2 =", - ], - ) + # Older versions of Clang marked structured binding variables + # as artificial, and thus LLDB wouldn't display them. + if self.expectedCompiler(["clang"]) and self.expectedCompilerVersion( + [">=", "22.0"] + ): + self.expect( + "frame variable", + substrs=[ + "tx1 =", + "ty1 =", + "tz1 =", + "tx2 =", + "ty2 =", + "tz2 =", + "mp1 =", + "mp2 =", + ], + ) diff --git a/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py b/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py index eac7b5ef1099a..83c057220410a 100644 --- a/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py +++ b/lldb/test/API/lang/cpp/template-arguments/TestCppTemplateArguments.py @@ -1,4 +1,5 @@ import lldb +import lldbsuite.test.lldbplatformutil as lldbplatformutil from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil @@ -82,8 +83,12 @@ def test(self): value = self.expect_expr("temp7", result_type="Foo<__fp16, __fp16>") self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1)) - value = self.expect_expr("temp8", result_type="Foo<__fp16, __fp16>") - self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1)) + # The target we use when evaluating these expressions for Arm leads to there + # not being a __bf16 type in the AST so we fall back to __fp16 and evaluating + # this fails. + if lldbplatformutil.getArchitecture() != "arm": + value = self.expect_expr("temp8", result_type="Foo<__bf16, __bf16>") + self.assertFalse(value.GetType().GetTemplateArgumentValue(target, 1)) value = self.expect_expr("temp9", result_type="Bar") template_param_value = value.GetType().GetTemplateArgumentValue(target, 1) diff --git a/lldb/test/API/macosx/mte/Makefile b/lldb/test/API/macosx/mte/Makefile new file mode 100644 index 0000000000000..cb20942805e2a --- /dev/null +++ b/lldb/test/API/macosx/mte/Makefile @@ -0,0 +1,12 @@ +C_SOURCES := main.c + +EXE := uaf_mte + +all: uaf_mte sign + +include Makefile.rules + +sign: mte-entitlements.plist uaf_mte +ifeq ($(OS),Darwin) + codesign -s - -f --entitlements $^ +endif diff --git a/lldb/test/API/macosx/mte/TestDarwinMTE.py b/lldb/test/API/macosx/mte/TestDarwinMTE.py new file mode 100644 index 0000000000000..ef858b1fc2710 --- /dev/null +++ b/lldb/test/API/macosx/mte/TestDarwinMTE.py @@ -0,0 +1,110 @@ +"""Test MTE Memory Tagging on Apple platforms""" + +import lldb +import re +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbsuite.test.cpu_feature as cpu_feature + +exe_name = "uaf_mte" # Must match Makefile + + +class TestDarwinMTE(TestBase): + NO_DEBUG_INFO_TESTCASE = True + + @skipUnlessFeature(cpu_feature.AArch64.MTE) + def test_tag_fault(self): + self.build() + exe = self.getBuildArtifact(exe_name) + + target = self.dbg.CreateTarget(exe) + self.assertTrue(target, VALID_TARGET) + + process = target.LaunchSimple(None, None, None) + self.assertState(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED) + + self.expect( + "thread info", + substrs=[ + "stop reason = EXC_ARM_MTE_TAG_FAULT", + "MTE tag mismatch detected", + ], + ) + + @skipUnlessFeature(cpu_feature.AArch64.MTE) + def test_memory_region(self): + self.build() + lldbutil.run_to_source_breakpoint( + self, "// before free", lldb.SBFileSpec("main.c"), exe_name=exe_name + ) + + # (lldb) memory region ptr + # [0x00000001005ec000-0x00000001009ec000) rw- + # memory tagging: enabled + # Modified memory (dirty) page list provided, 2 entries. + # Dirty pages: 0x1005ec000, 0x1005fc000. + self.expect("memory region ptr", substrs=["memory tagging: enabled"]) + + @skipUnlessFeature(cpu_feature.AArch64.MTE) + def test_memory_read_with_tags(self): + self.build() + lldbutil.run_to_source_breakpoint( + self, "// before free", lldb.SBFileSpec("main.c"), exe_name=exe_name + ) + + # (lldb) memory read ptr-16 ptr+48 --show-tags + # 0x7d2c00930: 00 00 00 00 00 00 00 00 d0 e3 a5 0a 02 00 00 00 ................ (tag: 0x3) + # 0x7d2c00940: 48 65 6c 6c 6f 00 00 00 00 00 00 00 00 00 00 00 Hello........... (tag: 0xb) + # 0x7d2c00950: 57 6f 72 6c 64 00 00 00 00 00 00 00 00 00 00 00 World........... (tag: 0xb) + # 0x7d2c00960: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ (tag: 0x9) + self.expect( + "memory read ptr-16 ptr+48 --show-tags", + substrs=[" Hello...........", " World..........."], + patterns=[r"(.*\(tag: 0x[0-9a-f]\)\n){4}"], + ) + + def _parse_pointer_tag(self, output): + return re.search(r"Logical tag: (0x[0-9a-f])", output).group(1) + + def _parse_memory_tags(self, output, expected_tag_count): + tags = re.findall(r"\): (0x[0-9a-f])", output) + self.assertEqual(len(tags), expected_tag_count) + return tags + + @skipUnlessFeature(cpu_feature.AArch64.MTE) + def test_memory_tag_read(self): + self.build() + lldbutil.run_to_source_breakpoint( + self, "// before free", lldb.SBFileSpec("main.c"), exe_name=exe_name + ) + + # (lldb) memory tag read ptr-1 ptr+33 + # Logical tag: 0x5 + # Allocation tags: + # [0x100a65a40, 0x100a65a50): 0xf (mismatch) + # [0x100a65a50, 0x100a65a60): 0x5 + # [0x100a65a60, 0x100a65a70): 0x5 + # [0x100a65a70, 0x100a65a80): 0x2 (mismatch) + self.expect( + "memory tag read ptr-1 ptr+33", + substrs=["Logical tag: 0x", "Allocation tags:", "(mismatch)"], + patterns=[r"(\[.*\): 0x[0-9a-f].*\n){4}"], + ) + output = self.res.GetOutput() + self.assertEqual(output.count("(mismatch)"), 2) + ptr_tag = self._parse_pointer_tag(output) + tags = self._parse_memory_tags(output, 4) + self.assertEqual(tags[1], ptr_tag) + self.assertEqual(tags[2], ptr_tag) + self.assertNotEqual(tags[0], ptr_tag) # Memory that comes before/after + self.assertNotEqual(tags[3], ptr_tag) # allocation has different tag. + + # Continue running until MTE fault + self.expect("process continue", substrs=["stop reason = EXC_ARM_MTE_TAG_FAULT"]) + + self.runCmd("memory tag read ptr-1 ptr+33") + output = self.res.GetOutput() + self.assertEqual(output.count("(mismatch)"), 4) + tags = self._parse_memory_tags(output, 4) + self.assertTrue(all(t != ptr_tag for t in tags)) diff --git a/lldb/test/API/macosx/mte/main.c b/lldb/test/API/macosx/mte/main.c new file mode 100644 index 0000000000000..f9f6b1594ef41 --- /dev/null +++ b/lldb/test/API/macosx/mte/main.c @@ -0,0 +1,28 @@ +#include +#include +#include +#include + +// Produce some names on the trace +const size_t tag_granule = 16; +static uint8_t *my_malloc(void) { return malloc(2 * tag_granule); } +static uint8_t *allocate(void) { return my_malloc(); } + +static void my_free(void *ptr) { free(ptr); } +static void deallocate(void *ptr) { my_free(ptr); } + +static void touch_memory(uint8_t *ptr) { ptr[7] = 1; } // invalid access +static void modify(uint8_t *ptr) { touch_memory(ptr); } + +int main() { + uint8_t *ptr = allocate(); + + strncpy((char *)ptr, "Hello", 16); + strncpy((char *)ptr + 16, "World", 16); + + deallocate(ptr); // before free + + modify(ptr); // use-after-free + + return 0; +} diff --git a/lldb/test/API/macosx/mte/mte-entitlements.plist b/lldb/test/API/macosx/mte/mte-entitlements.plist new file mode 100644 index 0000000000000..6de5d5634d878 --- /dev/null +++ b/lldb/test/API/macosx/mte/mte-entitlements.plist @@ -0,0 +1,10 @@ + + + + + com.apple.security.hardened-process + + com.apple.security.hardened-process.checked-allocations + + + diff --git a/lldb/test/API/macosx/sme-registers/TestSMERegistersDarwin.py b/lldb/test/API/macosx/sme-registers/TestSMERegistersDarwin.py index 6f9d055cef506..c762c8da78ca8 100644 --- a/lldb/test/API/macosx/sme-registers/TestSMERegistersDarwin.py +++ b/lldb/test/API/macosx/sme-registers/TestSMERegistersDarwin.py @@ -1,6 +1,7 @@ import lldb from lldbsuite.test.lldbtest import * from lldbsuite.test.decorators import * +import lldbsuite.test.cpu_feature as cpu_feature import lldbsuite.test.lldbutil as lldbutil import os @@ -9,10 +10,9 @@ class TestSMERegistersDarwin(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) - @skipIfRemote @skipUnlessDarwin - @skipUnlessFeature("hw.optional.arm.FEAT_SME") - @skipUnlessFeature("hw.optional.arm.FEAT_SME2") + @skipUnlessFeature(cpu_feature.AArch64.SME) + @skipUnlessFeature(cpu_feature.AArch64.SME2) # thread_set_state/thread_get_state only avail in macOS 15.4+ @skipIf(macos_version=["<", "15.4"]) def test(self): diff --git a/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py b/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py index 74743d9182ab4..c5a68372d8221 100644 --- a/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py +++ b/lldb/test/API/tools/lldb-dap/module/TestDAP_module.py @@ -7,7 +7,8 @@ import lldbdap_testcase import re - +# Flakey in Github CI runs, see https://github.com/llvm/llvm-project/issues/137660. +@skipIfLinux class TestDAP_module(lldbdap_testcase.DAPTestCaseBase): def run_test(self, symbol_basename, expect_debug_info_size): program_basename = "a.out.stripped" diff --git a/lldb/test/CMakeLists.txt b/lldb/test/CMakeLists.txt index 8116f4c3c823a..513d1ec493ee1 100644 --- a/lldb/test/CMakeLists.txt +++ b/lldb/test/CMakeLists.txt @@ -164,9 +164,14 @@ if(TARGET clang) if (TARGET libcxx OR ("libcxx" IN_LIST LLVM_ENABLE_RUNTIMES)) set(LLDB_HAS_LIBCXX ON) if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE) - set(LIBCXX_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LLVM_DEFAULT_TARGET_TRIPLE}) + set(LIBCXX_TARGET_SUBDIR ${LLVM_DEFAULT_TARGET_TRIPLE}) + if(LIBCXX_LIBDIR_SUBDIR) + string(APPEND LIBCXX_TARGET_SUBDIR /${LIBCXX_LIBDIR_SUBDIR}) + endif() + cmake_path(NORMAL_PATH LIBCXX_TARGET_SUBDIR) + set(LIBCXX_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LIBCXX_TARGET_SUBDIR}) set(LIBCXX_GENERATED_INCLUDE_DIR "${LLVM_BINARY_DIR}/include/c++/v1") - set(LIBCXX_GENERATED_INCLUDE_TARGET_DIR "${LLVM_BINARY_DIR}/include/${LLVM_DEFAULT_TARGET_TRIPLE}/c++/v1") + set(LIBCXX_GENERATED_INCLUDE_TARGET_DIR "${LLVM_BINARY_DIR}/include/${LIBCXX_TARGET_SUBDIR}/c++/v1") else() set(LIBCXX_LIBRARY_DIR ${CMAKE_BINARY_DIR}/lib${LIBCXX_LIBDIR_SUFFIX}) set(LIBCXX_GENERATED_INCLUDE_DIR "${CMAKE_BINARY_DIR}/include/c++/v1") diff --git a/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c b/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c new file mode 100644 index 0000000000000..8f1bb62874a12 --- /dev/null +++ b/lldb/test/Shell/Expr/TestGlobalSymbolObjCConflict.c @@ -0,0 +1,35 @@ +// XFAIL: target-windows + +// Tests that LLDB correctly parses global symbols +// starting with 'O'. On some platforms (e.g., Darwin) +// C-symbols are prefixed with a '_'. The LLDB Macho-O +// parses handles Objective-C metadata symbols starting +// with '_OBJC' specially. This test ensures that we don't +// lose track of regular global symbols with a '_O' prefix +// in this. + +// RUN: %clang_host -c -g -fno-common %s -o %t.o +// RUN: %clang_host %t.o -o %t.out +// RUN: %lldb -b -x %t.out \ +// RUN: -o "b 29" \ +// RUN: -o "run" \ +// RUN: -o "p OglobalVar" \ +// RUN: -o "p Oabc" | FileCheck %s + +typedef struct { + int a; +} Oabc_t; + +Oabc_t Oabc; +int OglobalVar; + +int main(int argc, const char *argv[]) { + Oabc.a = 15; + OglobalVar = 10; + return OglobalVar + Oabc.a; +} + +// CHECK: (lldb) p OglobalVar +// CHECK: (int) 10 +// CHECK: (lldb) p Oabc +// CHECK: (Oabc_t) (a = 15) diff --git a/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml b/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml new file mode 100644 index 0000000000000..1e9c5dfaeab1b --- /dev/null +++ b/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml @@ -0,0 +1,706 @@ +## This test verifies that loading an ELF file, that has no section headers but +## has a PT_NOTE program header with a GNU Build ID, can properly extract the +## UUID value. + +# RUN: yaml2obj %s -o %t +# RUN: llvm-strip --strip-sections %t + +# RUN: %lldb -b \ +# RUN: -o "target create -d '%t'" \ +# RUN: -o "image list" \ +# RUN: | FileCheck %s + +# CHECK: Current executable set to '{{.*}}elf-no-shdrs-pt-notes.yaml.tmp' (x86_64). +# CHECK: [ 0] 7F1F56D6-7DBB-17BA-C9A3-4417DB52F097-2548414F 0x0000000000000000 {{.*}}elf-no-shdrs-pt-notes.yaml.tmp + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_DYN + Machine: EM_X86_64 + Entry: 0x1040 +ProgramHeaders: + - Type: PT_PHDR + Flags: [ PF_R ] + VAddr: 0x40 + Align: 0x8 + Offset: 0x40 + - Type: PT_INTERP + Flags: [ PF_R ] + FirstSec: .interp + LastSec: .interp + VAddr: 0x318 + Offset: 0x318 + - Type: PT_LOAD + Flags: [ PF_R ] + FirstSec: .interp + LastSec: .rela.plt + Align: 0x1000 + Offset: 0x0 + - Type: PT_LOAD + Flags: [ PF_X, PF_R ] + FirstSec: .init + LastSec: .fini + VAddr: 0x1000 + Align: 0x1000 + Offset: 0x1000 + - Type: PT_LOAD + Flags: [ PF_R ] + FirstSec: .rodata + LastSec: .eh_frame + VAddr: 0x2000 + Align: 0x1000 + Offset: 0x2000 + - Type: PT_LOAD + Flags: [ PF_W, PF_R ] + FirstSec: .init_array + LastSec: .bss + VAddr: 0x3DB0 + Align: 0x1000 + Offset: 0x2DB0 + - Type: PT_DYNAMIC + Flags: [ PF_W, PF_R ] + FirstSec: .dynamic + LastSec: .dynamic + VAddr: 0x3DC8 + Align: 0x8 + Offset: 0x2DC8 + - Type: PT_NOTE + Flags: [ PF_R ] + FirstSec: .note.gnu.property + LastSec: .note.gnu.property + VAddr: 0x338 + Align: 0x8 + Offset: 0x338 + - Type: PT_NOTE + Flags: [ PF_R ] + FirstSec: .note.gnu.build-id + LastSec: .note.ABI-tag + VAddr: 0x358 + Align: 0x4 + Offset: 0x358 + - Type: PT_GNU_PROPERTY + Flags: [ PF_R ] + FirstSec: .note.gnu.property + LastSec: .note.gnu.property + VAddr: 0x338 + Align: 0x8 + Offset: 0x338 + - Type: PT_GNU_EH_FRAME + Flags: [ PF_R ] + FirstSec: .eh_frame_hdr + LastSec: .eh_frame_hdr + VAddr: 0x2004 + Align: 0x4 + Offset: 0x2004 + - Type: PT_GNU_STACK + Flags: [ PF_W, PF_R ] + Align: 0x10 + Offset: 0x0 + - Type: PT_GNU_RELRO + Flags: [ PF_R ] + FirstSec: .init_array + LastSec: .got + VAddr: 0x3DB0 + Offset: 0x2DB0 +Sections: + - Name: .interp + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x318 + AddressAlign: 0x1 + Content: 2F6C696236342F6C642D6C696E75782D7838362D36342E736F2E3200 + - Name: .note.gnu.property + Type: SHT_NOTE + Flags: [ SHF_ALLOC ] + Address: 0x338 + AddressAlign: 0x8 + Notes: + - Name: GNU + Desc: 028000C0040000000300000000000000 + Type: NT_GNU_PROPERTY_TYPE_0 + - Name: .note.gnu.build-id + Type: SHT_NOTE + Flags: [ SHF_ALLOC ] + Address: 0x358 + AddressAlign: 0x4 + Notes: + - Name: GNU + Desc: 7F1F56D67DBB17BAC9A34417DB52F0972548414F + Type: NT_PRPSINFO + - Name: .note.ABI-tag + Type: SHT_NOTE + Flags: [ SHF_ALLOC ] + Address: 0x37C + AddressAlign: 0x4 + Notes: + - Name: GNU + Desc: '00000000030000000200000000000000' + Type: NT_VERSION + - Name: .gnu.hash + Type: SHT_GNU_HASH + Flags: [ SHF_ALLOC ] + Address: 0x3A0 + Link: .dynsym + AddressAlign: 0x8 + Header: + SymNdx: 0x1 + Shift2: 0x0 + BloomFilter: [ 0x0 ] + HashBuckets: [ 0x0 ] + HashValues: [ ] + - Name: .dynsym + Type: SHT_DYNSYM + Flags: [ SHF_ALLOC ] + Address: 0x3C0 + Link: .dynstr + AddressAlign: 0x8 + - Name: .dynstr + Type: SHT_STRTAB + Flags: [ SHF_ALLOC ] + Address: 0x450 + AddressAlign: 0x1 + - Name: .gnu.version + Type: SHT_GNU_versym + Flags: [ SHF_ALLOC ] + Address: 0x500 + Link: .dynsym + AddressAlign: 0x2 + Entries: [ 0, 2, 3, 0, 0, 0 ] + - Name: .gnu.version_r + Type: SHT_GNU_verneed + Flags: [ SHF_ALLOC ] + Address: 0x510 + Link: .dynstr + AddressAlign: 0x8 + Dependencies: + - Version: 1 + File: libc.so.6 + Entries: + - Name: GLIBC_2.34 + Hash: 110530996 + Flags: 0 + Other: 3 + - Name: GLIBC_2.2.5 + Hash: 157882997 + Flags: 0 + Other: 2 + - Name: .rela.dyn + Type: SHT_RELA + Flags: [ SHF_ALLOC ] + Address: 0x540 + Link: .dynsym + AddressAlign: 0x8 + Relocations: + - Offset: 0x3DB0 + Type: R_X86_64_RELATIVE + Addend: 4384 + - Offset: 0x3DB8 + Type: R_X86_64_RELATIVE + Addend: 4320 + - Offset: 0x3DC0 + Type: R_X86_64_RELATIVE + Addend: 15808 + - Offset: 0x3FD8 + Symbol: __cxa_finalize + Type: R_X86_64_GLOB_DAT + - Offset: 0x3FE0 + Symbol: __libc_start_main + Type: R_X86_64_GLOB_DAT + - Offset: 0x3FE8 + Symbol: _ITM_deregisterTMCloneTable + Type: R_X86_64_GLOB_DAT + - Offset: 0x3FF0 + Symbol: __gmon_start__ + Type: R_X86_64_GLOB_DAT + - Offset: 0x3FF8 + Symbol: _ITM_registerTMCloneTable + Type: R_X86_64_GLOB_DAT + - Name: .rela.plt + Type: SHT_RELA + Flags: [ SHF_ALLOC, SHF_INFO_LINK ] + Address: 0x600 + Link: .dynsym + AddressAlign: 0x8 + Info: .got.plt + Relocations: + - Offset: 0x4018 + Symbol: __cxa_finalize + Type: R_X86_64_JUMP_SLOT + - Name: .init + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x1000 + AddressAlign: 0x4 + Offset: 0x1000 + Content: F30F1EFA4883EC08488B05E12F00004885C07402FFD04883C408C3 + - Name: .plt + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x1020 + AddressAlign: 0x10 + EntSize: 0x10 + Content: FF35E22F0000FF25E42F00000F1F4000FF25E22F00006800000000E9E0FFFFFF + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x1040 + AddressAlign: 0x10 + Content: F30F1EFA31ED4989D15E4889E24883E4F050544531C031C9488D3DD1000000FF157B2F0000F4662E0F1F840000000000488D3DB12F0000488D05AA2F00004839F87415488B055E2F00004885C07409FFE00F1F8000000000C30F1F8000000000488D3D812F0000488D357A2F00004829FE4889F048C1EE3F48C1F8034801C648D1FE7414488B052D2F00004885C07408FFE0660F1F440000C30F1F8000000000F30F1EFA803D392F000000752B5548833DE22E0000004889E5740C488D3DBE2C0000E829FFFFFFE864FFFFFFC605112F0000015DC30F1F00C30F1F8000000000F30F1EFAE977FFFFFF0F1F8000000000554889E5C745FC0000000031C05DC3 + - Name: .fini + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x1140 + AddressAlign: 0x4 + Content: F30F1EFA4883EC084883C408C3 + - Name: .rodata + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_MERGE ] + Address: 0x2000 + AddressAlign: 0x4 + EntSize: 0x4 + Offset: 0x2000 + Content: '01000200' + - Name: .eh_frame_hdr + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x2004 + AddressAlign: 0x4 + Content: 011B033B20000000030000001CF0FFFF540000003CF0FFFF3C0000002CF1FFFF7C000000 + - Name: .eh_frame + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x2028 + AddressAlign: 0x8 + Content: 1400000000000000017A5200017810011B0C070890010000140000001C000000F8EFFFFF2600000000440710000000002400000034000000C0EFFFFF20000000000E10460E184A0F0B770880003F1A3B2A332422000000001C0000005C000000A8F0FFFF0F00000000410E108602430D064A0C070800000000000000 + - Name: .init_array + Type: SHT_INIT_ARRAY + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x3DB0 + AddressAlign: 0x8 + EntSize: 0x8 + Offset: 0x2DB0 + Content: '2011000000000000' + - Name: .fini_array + Type: SHT_FINI_ARRAY + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x3DB8 + AddressAlign: 0x8 + EntSize: 0x8 + Content: E010000000000000 + - Name: .data.rel.ro + Type: SHT_PROGBITS + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x3DC0 + AddressAlign: 0x8 + Content: C03D000000000000 + - Name: .dynamic + Type: SHT_DYNAMIC + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x3DC8 + Link: .dynstr + AddressAlign: 0x8 + Entries: + - Tag: DT_NEEDED + Value: 0x67 + - Tag: DT_NEEDED + Value: 0x76 + - Tag: DT_NEEDED + Value: 0x80 + - Tag: DT_NEEDED + Value: 0x8E + - Tag: DT_INIT + Value: 0x1000 + - Tag: DT_FINI + Value: 0x1140 + - Tag: DT_INIT_ARRAY + Value: 0x3DB0 + - Tag: DT_INIT_ARRAYSZ + Value: 0x8 + - Tag: DT_FINI_ARRAY + Value: 0x3DB8 + - Tag: DT_FINI_ARRAYSZ + Value: 0x8 + - Tag: DT_GNU_HASH + Value: 0x3A0 + - Tag: DT_STRTAB + Value: 0x450 + - Tag: DT_SYMTAB + Value: 0x3C0 + - Tag: DT_STRSZ + Value: 0xAF + - Tag: DT_SYMENT + Value: 0x18 + - Tag: DT_DEBUG + Value: 0x0 + - Tag: DT_PLTGOT + Value: 0x4000 + - Tag: DT_PLTRELSZ + Value: 0x18 + - Tag: DT_PLTREL + Value: 0x7 + - Tag: DT_JMPREL + Value: 0x600 + - Tag: DT_RELA + Value: 0x540 + - Tag: DT_RELASZ + Value: 0xC0 + - Tag: DT_RELAENT + Value: 0x18 + - Tag: DT_FLAGS_1 + Value: 0x8000000 + - Tag: DT_VERNEED + Value: 0x510 + - Tag: DT_VERNEEDNUM + Value: 0x1 + - Tag: DT_VERSYM + Value: 0x500 + - Tag: DT_RELACOUNT + Value: 0x3 + - Tag: DT_NULL + Value: 0x0 + - Tag: DT_NULL + Value: 0x0 + - Tag: DT_NULL + Value: 0x0 + - Tag: DT_NULL + Value: 0x0 + - Tag: DT_NULL + Value: 0x0 + - Name: .got + Type: SHT_PROGBITS + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x3FD8 + AddressAlign: 0x8 + EntSize: 0x8 + Content: '00000000000000000000000000000000000000000000000000000000000000000000000000000000' + - Name: .got.plt + Type: SHT_PROGBITS + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x4000 + AddressAlign: 0x8 + EntSize: 0x8 + Content: C83D000000000000000000000000000000000000000000003610000000000000 + - Name: .data + Type: SHT_PROGBITS + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x4020 + AddressAlign: 0x1 + Content: '00000000' + - Name: .bss + Type: SHT_NOBITS + Flags: [ SHF_WRITE, SHF_ALLOC ] + Address: 0x4024 + AddressAlign: 0x1 + Size: 0x4 + - Name: .comment + Type: SHT_PROGBITS + Flags: [ SHF_MERGE, SHF_STRINGS ] + AddressAlign: 0x1 + EntSize: 0x1 + Content: 4743433A2028474E55292031312E352E302032303234303731392028526564204861742031312E352E302D3929004743433A2028474E55292031312E352E302032303234303731392028526564204861742031312E352E302D3131290046616365626F6F6B20636C616E672076657273696F6E2031352E38302E31202868747470733A2F2F6769742E696E7465726E616C2E7466626E772E6E65742F7265706F732F6769742F726F2F6F736D6574612F65787465726E616C2F6C6C766D2D70726F6A65637420626632333164636436353637396532643466616461623562353363353264623734666237653133362900 + - Name: .annobin.notes + Type: SHT_PROGBITS + Flags: [ SHF_MERGE, SHF_STRINGS ] + AddressAlign: 0x1 + EntSize: 0x1 + Content: 41563A3470313239380052563A72756E6E696E67206763632031312E352E302032303234303731390042563A616E6E6F62696E206763632031312E352E302032303234303731390047573A307833643230353661202E2E2F737973646570732F7838362F6162692D6E6F74652E630053503A330053433A310043463A38202E2E2F737973646570732F7838362F6162692D6E6F74652E6300464C3A2D31202E2E2F737973646570732F7838362F6162692D6E6F74652E630047413A310050493A330053453A300069533A300047573A30783364323035366120696E69742E630043463A3820696E69742E6300464C3A2D3120696E69742E6300 + - Name: .gnu.build.attributes + Type: SHT_NOTE + Address: 0x6028 + AddressAlign: 0x4 + Notes: + - Name: "GA$\x013a1" + Desc: '40100000000000006610000000000000' + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: '66100000000000006610000000000000' + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: '00100000000000001610000000000000' + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: '40110000000000004811000000000000' + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: '70100000000000002911000000000000' + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: 3F110000000000003F11000000000000 + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: 3F110000000000003F11000000000000 + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: 16100000000000001B10000000000000 + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN + - Name: "GA$\x013a1" + Desc: 48110000000000004D11000000000000 + Type: NT_GNU_BUILD_ATTRIBUTE_OPEN +Symbols: + - Name: .interp + Type: STT_SECTION + Section: .interp + Value: 0x318 + - Name: .note.gnu.property + Type: STT_SECTION + Section: .note.gnu.property + Value: 0x338 + - Name: .note.gnu.build-id + Type: STT_SECTION + Section: .note.gnu.build-id + Value: 0x358 + - Name: .note.ABI-tag + Type: STT_SECTION + Section: .note.ABI-tag + Value: 0x37C + - Name: .gnu.hash + Type: STT_SECTION + Section: .gnu.hash + Value: 0x3A0 + - Name: .dynsym + Type: STT_SECTION + Section: .dynsym + Value: 0x3C0 + - Name: .dynstr + Type: STT_SECTION + Section: .dynstr + Value: 0x450 + - Name: .gnu.version + Type: STT_SECTION + Section: .gnu.version + Value: 0x500 + - Name: .gnu.version_r + Type: STT_SECTION + Section: .gnu.version_r + Value: 0x510 + - Name: .rela.dyn + Type: STT_SECTION + Section: .rela.dyn + Value: 0x540 + - Name: .rela.plt + Type: STT_SECTION + Section: .rela.plt + Value: 0x600 + - Name: .init + Type: STT_SECTION + Section: .init + Value: 0x1000 + - Name: .plt + Type: STT_SECTION + Section: .plt + Value: 0x1020 + - Name: .text + Type: STT_SECTION + Section: .text + Value: 0x1040 + - Name: .fini + Type: STT_SECTION + Section: .fini + Value: 0x1140 + - Name: .rodata + Type: STT_SECTION + Section: .rodata + Value: 0x2000 + - Name: .eh_frame_hdr + Type: STT_SECTION + Section: .eh_frame_hdr + Value: 0x2004 + - Name: .eh_frame + Type: STT_SECTION + Section: .eh_frame + Value: 0x2028 + - Name: .init_array + Type: STT_SECTION + Section: .init_array + Value: 0x3DB0 + - Name: .fini_array + Type: STT_SECTION + Section: .fini_array + Value: 0x3DB8 + - Name: .data.rel.ro + Type: STT_SECTION + Section: .data.rel.ro + Value: 0x3DC0 + - Name: .dynamic + Type: STT_SECTION + Section: .dynamic + Value: 0x3DC8 + - Name: .got + Type: STT_SECTION + Section: .got + Value: 0x3FD8 + - Name: .got.plt + Type: STT_SECTION + Section: .got.plt + Value: 0x4000 + - Name: .data + Type: STT_SECTION + Section: .data + Value: 0x4020 + - Name: .bss + Type: STT_SECTION + Section: .bss + Value: 0x4024 + - Name: .comment + Type: STT_SECTION + Section: .comment + - Name: .annobin.notes + Type: STT_SECTION + Section: .annobin.notes + - Name: .gnu.build.attributes + Type: STT_SECTION + Section: .gnu.build.attributes + Value: 0x6028 + - Name: '/usr/lib/gcc/x86_64-redhat-linux/11/../../../../lib64/Scrt1.o' + Type: STT_FILE + Index: SHN_ABS + - Name: __abi_tag + Type: STT_OBJECT + Section: .note.ABI-tag + Value: 0x37C + Size: 0x20 + - Name: crtstuff.c + Type: STT_FILE + Index: SHN_ABS + - Name: deregister_tm_clones + Type: STT_FUNC + Section: .text + Value: 0x1070 + - Name: register_tm_clones + Type: STT_FUNC + Section: .text + Value: 0x10A0 + - Name: __do_global_dtors_aux + Type: STT_FUNC + Section: .text + Value: 0x10E0 + - Name: completed.0 + Type: STT_OBJECT + Section: .bss + Value: 0x4024 + Size: 0x1 + - Name: __do_global_dtors_aux_fini_array_entry + Type: STT_OBJECT + Section: .fini_array + Value: 0x3DB8 + - Name: frame_dummy + Type: STT_FUNC + Section: .text + Value: 0x1120 + - Name: __frame_dummy_init_array_entry + Type: STT_OBJECT + Section: .init_array + Value: 0x3DB0 + - Name: main.cpp + Type: STT_FILE + Index: SHN_ABS + - Name: 'crtstuff.c (1)' + Type: STT_FILE + Index: SHN_ABS + - Name: __FRAME_END__ + Type: STT_OBJECT + Section: .eh_frame + Value: 0x20A0 + - Type: STT_FILE + Index: SHN_ABS + - Name: __GNU_EH_FRAME_HDR + Section: .eh_frame_hdr + Value: 0x2004 + - Name: _DYNAMIC + Type: STT_OBJECT + Section: .dynamic + Value: 0x3DC8 + - Name: _GLOBAL_OFFSET_TABLE_ + Type: STT_OBJECT + Section: .got.plt + Value: 0x4000 + - Name: _edata + Section: .data + Binding: STB_GLOBAL + Value: 0x4024 + - Name: data_start + Section: .data + Binding: STB_WEAK + Value: 0x4020 + - Name: _IO_stdin_used + Type: STT_OBJECT + Section: .rodata + Binding: STB_GLOBAL + Value: 0x2000 + Size: 0x4 + - Name: '__cxa_finalize@GLIBC_2.2.5' + Type: STT_FUNC + Binding: STB_WEAK + - Name: main + Type: STT_FUNC + Section: .text + Binding: STB_GLOBAL + Value: 0x1130 + Size: 0xF + - Name: __dso_handle + Type: STT_OBJECT + Section: .data.rel.ro + Binding: STB_GLOBAL + Value: 0x3DC0 + Other: [ STV_HIDDEN ] + - Name: _fini + Type: STT_FUNC + Section: .fini + Binding: STB_GLOBAL + Value: 0x1140 + Other: [ STV_HIDDEN ] + - Name: '__libc_start_main@GLIBC_2.34' + Type: STT_FUNC + Binding: STB_GLOBAL + - Name: _start + Type: STT_FUNC + Section: .text + Binding: STB_GLOBAL + Value: 0x1040 + Size: 0x26 + - Name: _init + Type: STT_FUNC + Section: .init + Binding: STB_GLOBAL + Value: 0x1000 + Other: [ STV_HIDDEN ] + - Name: __TMC_END__ + Type: STT_OBJECT + Section: .data + Binding: STB_GLOBAL + Value: 0x4028 + Other: [ STV_HIDDEN ] + - Name: __data_start + Section: .data + Binding: STB_GLOBAL + Value: 0x4020 + - Name: _end + Section: .bss + Binding: STB_GLOBAL + Value: 0x4028 + - Name: __bss_start + Section: .bss + Binding: STB_GLOBAL + Value: 0x4024 + - Name: _ITM_deregisterTMCloneTable + Binding: STB_WEAK + - Name: __gmon_start__ + Binding: STB_WEAK + - Name: _ITM_registerTMCloneTable + Binding: STB_WEAK +DynamicSymbols: + - Name: __cxa_finalize + Type: STT_FUNC + Binding: STB_WEAK + - Name: __libc_start_main + Type: STT_FUNC + Binding: STB_GLOBAL + - Name: _ITM_deregisterTMCloneTable + Binding: STB_WEAK + - Name: __gmon_start__ + Binding: STB_WEAK + - Name: _ITM_registerTMCloneTable + Binding: STB_WEAK +... diff --git a/lldb/test/Shell/SymbolFile/DWARF/incomplete-member-beyond-parent-bounds.yaml b/lldb/test/Shell/SymbolFile/DWARF/incomplete-member-beyond-parent-bounds.yaml new file mode 100644 index 0000000000000..4e659d02b3cd4 --- /dev/null +++ b/lldb/test/Shell/SymbolFile/DWARF/incomplete-member-beyond-parent-bounds.yaml @@ -0,0 +1,104 @@ +# This is DWARF where we placed an incomplete type +# at an offset that is the parent DW_AT_byte_size. Check +# that we don't report an error in such cases. +# +# DW_TAG_compile_unit +# DW_AT_name ("main.cpp") +# DW_AT_language (DW_LANG_C) +# +# DW_TAG_structure_type +# DW_AT_name ("Incomplete") +# DW_AT_external (true) +# +# DW_TAG_structure_type +# DW_AT_name ("Foo") +# DW_AT_byte_size (0x04) +# +# DW_TAG_member +# DW_AT_name ("mem") +# DW_AT_data_member_location ("0x04") +# DW_AT_type (0x00000011 "Incomplete") +# +# NULL +# +# NULL + +# RUN: yaml2obj %s > %t +# RUN: lldb-test symbols --name=Foo --find=type %t 2>&1 | FileCheck %s + +# CHECK: Found 1 types: + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +DWARF: + debug_str: + - main.cpp + - Incomplete + - Foo + - mem + debug_abbrev: + - ID: 0 + Table: + - Code: 0x1 + Tag: DW_TAG_compile_unit + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_language + Form: DW_FORM_udata + - Code: 0x2 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_external + Form: DW_FORM_flag_present + - Code: 0x3 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x4 + Tag: DW_TAG_member + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_data_member_location + Form: DW_FORM_data1 + debug_info: + - Version: 4 + AbbrevTableID: 0 + AbbrOffset: 0x0 + AddrSize: 8 + Entries: + - AbbrCode: 0x1 + Values: + - Value: 0x0 + - Value: 0x2 + - AbbrCode: 0x2 + Values: + - Value: 0x9 + - AbbrCode: 0x3 + Values: + - Value: 0x14 + - Value: 0x04 + - AbbrCode: 0x4 + Values: + - Value: 0x18 + - Value: 0x11 + - Value: 0x04 + - AbbrCode: 0x0 + - AbbrCode: 0x0 +... diff --git a/lldb/test/Shell/SymbolFile/DWARF/member-beyond-parent-bounds.yaml b/lldb/test/Shell/SymbolFile/DWARF/member-beyond-parent-bounds.yaml new file mode 100644 index 0000000000000..2ac538ed1a851 --- /dev/null +++ b/lldb/test/Shell/SymbolFile/DWARF/member-beyond-parent-bounds.yaml @@ -0,0 +1,109 @@ +# This is malformed DWARF where we placed a non-zero sized type +# at an offset that is larger the parent DW_AT_byte_size. Check +# that we report an error in such cases. +# +# DW_TAG_compile_unit +# DW_AT_name ("main.cpp") +# DW_AT_language (DW_LANG_C) +# +# DW_TAG_base_type +# DW_AT_name ("int") +# DW_AT_encoding (DW_ATE_signed) +# DW_AT_byte_size (0x04) +# +# DW_TAG_structure_type +# DW_AT_name ("Foo") +# DW_AT_byte_size (0x04) +# +# DW_TAG_member +# DW_AT_name ("mem") +# DW_AT_data_member_location ("0x05") +# DW_AT_type (0x00000011 "int") +# +# NULL +# +# NULL + +# RUN: yaml2obj %s > %t +# RUN: lldb-test symbols --name=Foo --find=type %t 2>&1 | FileCheck %s + +# CHECK: Found 1 types: + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +DWARF: + debug_str: + - main.cpp + - int + - Foo + - mem + debug_abbrev: + - ID: 0 + Table: + - Code: 0x1 + Tag: DW_TAG_compile_unit + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_language + Form: DW_FORM_udata + - Code: 0x2 + Tag: DW_TAG_base_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_encoding + Form: DW_FORM_data1 + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x3 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x4 + Tag: DW_TAG_member + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_data_member_location + Form: DW_FORM_data1 + debug_info: + - Version: 4 + AbbrevTableID: 0 + AbbrOffset: 0x0 + AddrSize: 8 + Entries: + - AbbrCode: 0x1 + Values: + - Value: 0x0 + - Value: 0x2 + - AbbrCode: 0x2 + Values: + - Value: 0x9 + - Value: 0x5 + - Value: 0x4 + - AbbrCode: 0x3 + Values: + - Value: 0x0d + - Value: 0x04 + - AbbrCode: 0x4 + Values: + - Value: 0x11 + - Value: 0x11 + - Value: 0x05 + - AbbrCode: 0x0 + - AbbrCode: 0x0 +... diff --git a/lldb/test/Shell/SymbolFile/DWARF/member-on-parent-bounds.yaml b/lldb/test/Shell/SymbolFile/DWARF/member-on-parent-bounds.yaml new file mode 100644 index 0000000000000..736697c002ee6 --- /dev/null +++ b/lldb/test/Shell/SymbolFile/DWARF/member-on-parent-bounds.yaml @@ -0,0 +1,109 @@ +# This is malformed DWARF where we placed a non-zero sized type +# at an offset that is the parent DW_AT_byte_size. Check +# that we report an error in such cases. +# +# DW_TAG_compile_unit +# DW_AT_name ("main.cpp") +# DW_AT_language (DW_LANG_C) +# +# DW_TAG_base_type +# DW_AT_name ("int") +# DW_AT_encoding (DW_ATE_signed) +# DW_AT_byte_size (0x04) +# +# DW_TAG_structure_type +# DW_AT_name ("Foo") +# DW_AT_byte_size (0x04) +# +# DW_TAG_member +# DW_AT_name ("mem") +# DW_AT_data_member_location ("0x04") +# DW_AT_type (0x00000011 "int") +# +# NULL +# +# NULL + +# RUN: yaml2obj %s > %t +# RUN: lldb-test symbols --name=Foo --find=type %t 2>&1 | FileCheck %s + +# CHECK: Found 1 types: + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +DWARF: + debug_str: + - main.cpp + - int + - Foo + - mem + debug_abbrev: + - ID: 0 + Table: + - Code: 0x1 + Tag: DW_TAG_compile_unit + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_language + Form: DW_FORM_udata + - Code: 0x2 + Tag: DW_TAG_base_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_encoding + Form: DW_FORM_data1 + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x3 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x4 + Tag: DW_TAG_member + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_data_member_location + Form: DW_FORM_data1 + debug_info: + - Version: 4 + AbbrevTableID: 0 + AbbrOffset: 0x0 + AddrSize: 8 + Entries: + - AbbrCode: 0x1 + Values: + - Value: 0x0 + - Value: 0x2 + - AbbrCode: 0x2 + Values: + - Value: 0x9 + - Value: 0x5 + - Value: 0x4 + - AbbrCode: 0x3 + Values: + - Value: 0x0d + - Value: 0x04 + - AbbrCode: 0x4 + Values: + - Value: 0x11 + - Value: 0x11 + - Value: 0x04 + - AbbrCode: 0x0 + - AbbrCode: 0x0 +... diff --git a/lldb/test/Shell/SymbolFile/DWARF/union-types-no-member-location.yaml b/lldb/test/Shell/SymbolFile/DWARF/union-types-no-member-location.yaml new file mode 100644 index 0000000000000..1d1e129cdb7c0 --- /dev/null +++ b/lldb/test/Shell/SymbolFile/DWARF/union-types-no-member-location.yaml @@ -0,0 +1,178 @@ +# This test produces DWARF that contains a union type whose DW_TAG_member does +# not have a DW_AT_data_member_location set to zero. This is how GCC emits +# debug information for unions. There was code in the DWARFASTParserClang that +# was emitting an invalid error in this case. This test verifies that this +# error does not get emitted. +# +# 0x0000000b: DW_TAG_compile_unit +# DW_AT_name ("main.cpp") +# DW_AT_language (DW_LANG_C) +# +# 0x00000011: DW_TAG_base_type +# DW_AT_name ("int") +# DW_AT_encoding (DW_ATE_signed_char) +# DW_AT_byte_size (0x04) +# +# 0x00000018: DW_TAG_base_type +# DW_AT_name ("__ARRAY_SIZE_TYPE__") +# DW_AT_encoding (DW_ATE_unsigned) +# DW_AT_byte_size (0x08) +# +# 0x0000001f: DW_TAG_array_type +# DW_AT_type (0x00000011 "int") +# +# 0x00000024: DW_TAG_subrange_type +# DW_AT_type (0x00000018 "__ARRAY_SIZE_TYPE__") +# DW_AT_count (0x20) +# +# 0x0000002a: NULL +# +# 0x0000002b: DW_TAG_union_type +# DW_AT_name ("UnionType") +# DW_AT_byte_size (0x20) +# +# 0x00000031: DW_TAG_member +# DW_AT_name ("array") +# DW_AT_type (0x0000001f "int[32]") +# +# 0x0000003a: NULL +# +# 0x0000003b: DW_TAG_subprogram +# DW_AT_low_pc (0x0000000000001000) +# DW_AT_high_pc (0x0000000000001050) +# DW_AT_name ("foo") +# DW_AT_type (0x00000031 "array") +# +# 0x00000054: NULL + +# RUN: yaml2obj %s > %t +# RUN: lldb-test symbols --name=UnionType --find=type %t > %t.stdout +# RUN: cat %t.stdout | FileCheck --check-prefix=STDOUT %s + +# STDOUT: Found 1 types: +# STDOUT: {{(0x)?[0-9a-fA-F]+}}: Type{0x0000002b} , name = "UnionType", size = 32, compiler_type = 0x{{[0-9a-fA-F]+}} union UnionType { + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +DWARF: + debug_str: + - '' + - main.cpp + - int + - __ARRAY_SIZE_TYPE__ + - UnionType + - array + debug_abbrev: + - ID: 0 + Table: + - Code: 0x1 + Tag: DW_TAG_compile_unit + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_language + Form: DW_FORM_udata + - Code: 0x2 + Tag: DW_TAG_base_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_encoding + Form: DW_FORM_data1 + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x3 + Tag: DW_TAG_array_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x4 + Tag: DW_TAG_subrange_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_count + Form: DW_FORM_data1 + - Code: 0x5 + Tag: DW_TAG_union_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x6 + Tag: DW_TAG_member + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x7 + Tag: DW_TAG_subprogram + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_low_pc + Form: DW_FORM_addr + - Attribute: DW_AT_high_pc + Form: DW_FORM_addr + - Attribute: DW_AT_name + Form: DW_FORM_string + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + debug_info: + - Length: 0x51 + Version: 4 + AbbrevTableID: 0 + AbbrOffset: 0x0 + AddrSize: 8 + Entries: + - AbbrCode: 0x1 + Values: + - Value: 0x1 + - Value: 0x2 + - AbbrCode: 0x2 + Values: + - Value: 0xA + - Value: 0x6 + - Value: 0x4 + - AbbrCode: 0x2 + Values: + - Value: 0xE + - Value: 0x7 + - Value: 0x8 + - AbbrCode: 0x3 + Values: + - Value: 0x11 + - AbbrCode: 0x4 + Values: + - Value: 0x18 + - Value: 0x20 + - AbbrCode: 0x0 + - AbbrCode: 0x5 + Values: + - Value: 0x22 + - Value: 0x20 + - AbbrCode: 0x6 + Values: + - Value: 0x2C + - Value: 0x1F + - AbbrCode: 0x0 + - AbbrCode: 0x7 + Values: + - Value: 0x1000 + - Value: 0x1050 + - Value: 0xDEADBEEFDEADBEEF + CStr: foo + - Value: 0x31 + - AbbrCode: 0x0 +... diff --git a/lldb/test/Shell/SymbolFile/DWARF/zero-sized-member-in-parent-bounds.yaml b/lldb/test/Shell/SymbolFile/DWARF/zero-sized-member-in-parent-bounds.yaml new file mode 100644 index 0000000000000..a98f62cd34056 --- /dev/null +++ b/lldb/test/Shell/SymbolFile/DWARF/zero-sized-member-in-parent-bounds.yaml @@ -0,0 +1,105 @@ +# This is DWARF where we placed a zero-sized type +# at an offset that is the parent DW_AT_byte_size. Check +# that we don't report an error in such cases. +# +# DW_TAG_compile_unit +# DW_AT_name ("main.cpp") +# DW_AT_language (DW_LANG_C) +# +# DW_TAG_structure_type +# DW_AT_name ("Bar") +# DW_AT_byte_size (0x00) +# +# DW_TAG_structure_type +# DW_AT_name ("Foo") +# DW_AT_byte_size (0x04) +# +# DW_TAG_member +# DW_AT_name ("mem") +# DW_AT_data_member_location ("0x04") +# DW_AT_type (0x00000011 "Bar") +# +# NULL +# +# NULL + +# RUN: yaml2obj %s > %t +# RUN: lldb-test symbols --name=Foo --find=type %t 2>&1 | FileCheck %s + +# CHECK: Found 1 types: + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +DWARF: + debug_str: + - main.cpp + - Bar + - Foo + - mem + debug_abbrev: + - ID: 0 + Table: + - Code: 0x1 + Tag: DW_TAG_compile_unit + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_language + Form: DW_FORM_udata + - Code: 0x2 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x3 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x4 + Tag: DW_TAG_member + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_data_member_location + Form: DW_FORM_data1 + debug_info: + - Version: 4 + AbbrevTableID: 0 + AbbrOffset: 0x0 + AddrSize: 8 + Entries: + - AbbrCode: 0x1 + Values: + - Value: 0x0 + - Value: 0x2 + - AbbrCode: 0x2 + Values: + - Value: 0x9 + - Value: 0x0 + - AbbrCode: 0x3 + Values: + - Value: 0x0d + - Value: 0x04 + - AbbrCode: 0x4 + Values: + - Value: 0x11 + - Value: 0x11 + - Value: 0x04 + - AbbrCode: 0x0 + - AbbrCode: 0x0 +... diff --git a/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp b/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp index 41ddba746b4ac..dc26ec8d30cb4 100644 --- a/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp +++ b/lldb/test/Shell/SymbolFile/NativePDB/native-setting.cpp @@ -38,7 +38,7 @@ // NO-ENV: Dumping debug symbols for 1 modules. // NO-ENV: SymbolFile native-pdb -// ENV0: warning: The DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead. +// ENV0: warning: the DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead // ENV0: (lldb) target modules dump symfile // ENV0: Dumping debug symbols for 1 modules. // ENV0: SymbolFile native-pdb @@ -48,12 +48,12 @@ // ENV1: Dumping debug symbols for 1 modules. // ENV1: SymbolFile native-pdb -// ENV0-SET-DIA: warning: The DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead. +// ENV0-SET-DIA: warning: the DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead // ENV0-SET-DIA: (lldb) target modules dump symfile // ENV0-SET-DIA: Dumping debug symbols for 1 modules. // ENV0-SET-DIA: SymbolFile native-pdb -// ENV1-SET-DIA: warning: The DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead. +// ENV1-SET-DIA: warning: the DIA PDB reader was explicitly requested, but LLDB was built without the DIA SDK. The native reader will be used instead // ENV1-SET-DIA: (lldb) target modules dump symfile // ENV1-SET-DIA: Dumping debug symbols for 1 modules. // ENV1-SET-DIA: SymbolFile native-pdb diff --git a/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp b/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp index 81d643d9572d8..beb5ae2f90256 100644 --- a/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp +++ b/lldb/test/Shell/SymbolFile/NativePDB/symtab.cpp @@ -1,4 +1,4 @@ -// REQUIRES: x86 +// REQUIRES: lld, x86 // Test symtab reading // RUN: %build --compiler=clang-cl --arch=64 --nodefaultlib -o %t.exe -- %s diff --git a/lldb/test/Shell/lit.cfg.py b/lldb/test/Shell/lit.cfg.py index 505847fb763e0..cdc0cfe51f7c6 100644 --- a/lldb/test/Shell/lit.cfg.py +++ b/lldb/test/Shell/lit.cfg.py @@ -33,7 +33,7 @@ # suffixes: A list of file extensions to treat as test files. This is overriden # by individual lit.local.cfg files in the test subdirectories. -config.suffixes = [".test", ".cpp", ".s", ".m", ".ll"] +config.suffixes = [".test", ".cpp", ".s", ".m", ".ll", ".c"] # excludes: A list of directories to exclude from the testsuite. The 'Inputs' # subdirectories contain auxiliary inputs for various tests in their parent diff --git a/lldb/tools/debugserver/source/DNB.cpp b/lldb/tools/debugserver/source/DNB.cpp index f541134b43a1b..0cd48d91a682a 100644 --- a/lldb/tools/debugserver/source/DNB.cpp +++ b/lldb/tools/debugserver/source/DNB.cpp @@ -1386,6 +1386,16 @@ int DNBProcessMemoryRegionInfo(nub_process_t pid, nub_addr_t addr, return -1; } +nub_bool_t DNBProcessGetMemoryTags(nub_process_t pid, nub_addr_t addr, + nub_size_t size, + std::vector &tags) { + MachProcessSP procSP; + if (GetProcessSP(pid, procSP)) + return procSP->Task().GetMemoryTags(addr, size, tags); + + return false; +} + std::string DNBProcessGetProfileData(nub_process_t pid, DNBProfileDataScanType scanType) { MachProcessSP procSP; diff --git a/lldb/tools/debugserver/source/DNB.h b/lldb/tools/debugserver/source/DNB.h index 10d1f68794355..1f3d5392c588f 100644 --- a/lldb/tools/debugserver/source/DNB.h +++ b/lldb/tools/debugserver/source/DNB.h @@ -105,6 +105,9 @@ nub_bool_t DNBProcessMemoryDeallocate(nub_process_t pid, nub_addr_t addr) DNB_EXPORT; int DNBProcessMemoryRegionInfo(nub_process_t pid, nub_addr_t addr, DNBRegionInfo *region_info) DNB_EXPORT; +nub_bool_t DNBProcessGetMemoryTags(nub_process_t pid, nub_addr_t addr, + nub_size_t size, + std::vector &tags) DNB_EXPORT; std::string DNBProcessGetProfileData(nub_process_t pid, DNBProfileDataScanType scanType) DNB_EXPORT; diff --git a/lldb/tools/debugserver/source/DNBDefs.h b/lldb/tools/debugserver/source/DNBDefs.h index df8ca809d412c..d98399aed5e19 100644 --- a/lldb/tools/debugserver/source/DNBDefs.h +++ b/lldb/tools/debugserver/source/DNBDefs.h @@ -358,10 +358,11 @@ struct DNBExecutableImageInfo { struct DNBRegionInfo { public: DNBRegionInfo() - : addr(0), size(0), permissions(0), dirty_pages(), vm_types() {} + : addr(0), size(0), permissions(0), flags(), dirty_pages(), vm_types() {} nub_addr_t addr; nub_addr_t size; uint32_t permissions; + std::vector flags; std::vector dirty_pages; std::vector vm_types; }; diff --git a/lldb/tools/debugserver/source/MacOSX/MachTask.h b/lldb/tools/debugserver/source/MacOSX/MachTask.h index 2284f6b99de91..c4a20b80fda95 100644 --- a/lldb/tools/debugserver/source/MacOSX/MachTask.h +++ b/lldb/tools/debugserver/source/MacOSX/MachTask.h @@ -56,6 +56,8 @@ class MachTask { nub_size_t ReadMemory(nub_addr_t addr, nub_size_t size, void *buf); nub_size_t WriteMemory(nub_addr_t addr, nub_size_t size, const void *buf); int GetMemoryRegionInfo(nub_addr_t addr, DNBRegionInfo *region_info); + nub_bool_t GetMemoryTags(nub_addr_t addr, nub_size_t size, + std::vector &tags); std::string GetProfileData(DNBProfileDataScanType scanType); nub_addr_t AllocateMemory(nub_size_t size, uint32_t permissions); diff --git a/lldb/tools/debugserver/source/MacOSX/MachTask.mm b/lldb/tools/debugserver/source/MacOSX/MachTask.mm index 8ae9d4df99657..21156feecba2c 100644 --- a/lldb/tools/debugserver/source/MacOSX/MachTask.mm +++ b/lldb/tools/debugserver/source/MacOSX/MachTask.mm @@ -213,7 +213,7 @@ } //---------------------------------------------------------------------- -// MachTask::MemoryRegionInfo +// MachTask::GetMemoryRegionInfo //---------------------------------------------------------------------- int MachTask::GetMemoryRegionInfo(nub_addr_t addr, DNBRegionInfo *region_info) { task_t task = TaskPort(); @@ -221,14 +221,31 @@ return -1; int ret = m_vm_memory.GetMemoryRegionInfo(task, addr, region_info); - DNBLogThreadedIf(LOG_MEMORY, "MachTask::MemoryRegionInfo ( addr = 0x%8.8llx " - ") => %i (start = 0x%8.8llx, size = 0x%8.8llx, " - "permissions = %u)", + DNBLogThreadedIf(LOG_MEMORY, + "MachTask::GetMemoryRegionInfo ( addr = 0x%8.8llx ) => %i " + "(start = 0x%8.8llx, size = 0x%8.8llx, permissions = %u)", (uint64_t)addr, ret, (uint64_t)region_info->addr, (uint64_t)region_info->size, region_info->permissions); return ret; } +//---------------------------------------------------------------------- +// MachTask::GetMemoryTags +//---------------------------------------------------------------------- +nub_bool_t MachTask::GetMemoryTags(nub_addr_t addr, nub_size_t size, + std::vector &tags) { + task_t task = TaskPort(); + if (task == TASK_NULL) + return false; + + bool ok = m_vm_memory.GetMemoryTags(task, addr, size, tags); + DNBLogThreadedIf(LOG_MEMORY, "MachTask::GetMemoryTags ( addr = 0x%8.8llx, " + "size = 0x%8.8llx ) => %s ( tag count = %llu)", + (uint64_t)addr, (uint64_t)size, (ok ? "ok" : "err"), + (uint64_t)tags.size()); + return ok; +} + #define TIME_VALUE_TO_TIMEVAL(a, r) \ do { \ (r)->tv_sec = (a)->seconds; \ diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp index f3aa4d7d980fd..bb57245e7da48 100644 --- a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp +++ b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.cpp @@ -13,6 +13,7 @@ #include "MachVMMemory.h" #include "DNBLog.h" #include "MachVMRegion.h" +#include #include #include #include @@ -123,6 +124,7 @@ nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, region_info->addr = vmRegion.StartAddress(); region_info->size = vmRegion.GetByteSize(); region_info->permissions = vmRegion.GetDNBPermissions(); + region_info->flags = vmRegion.GetFlags(); region_info->dirty_pages = get_dirty_pages(task, vmRegion.StartAddress(), vmRegion.GetByteSize()); region_info->vm_types = vmRegion.GetMemoryTypes(); @@ -150,6 +152,63 @@ nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, return true; } +// API availability: +// mach_vm_update_pointers_with_remote_tags() - 26.0 +// VM_OFFSET_LIST_MAX macro - 26.1 +#ifndef VM_OFFSET_LIST_MAX +#define VM_OFFSET_LIST_MAX 512 +#endif +using mach_vm_offset_list_t = mach_vm_offset_t *; +using mach_vm_update_pointers_with_remote_tags_t = kern_return_t( + mach_port_name_t target, mach_vm_offset_list_t in_pointer_list, + mach_msg_type_number_t in_pointer_listCnt, + mach_vm_offset_list_t out_pointer_list, + mach_msg_type_number_t *out_pointer_listCnt); + +nub_bool_t MachVMMemory::GetMemoryTags(task_t task, nub_addr_t address, + nub_size_t size, + std::vector &tags) { + static auto mach_vm_update_pointers_with_remote_tags = + (mach_vm_update_pointers_with_remote_tags_t *)dlsym( + RTLD_DEFAULT, "mach_vm_update_pointers_with_remote_tags"); + assert(mach_vm_update_pointers_with_remote_tags); + + // Max batch size supported by mach_vm_update_pointers_with_remote_tags. + constexpr uint32_t max_ptr_count = VM_OFFSET_LIST_MAX; + constexpr uint32_t tag_shift = 56; + constexpr nub_addr_t tag_mask = + ((nub_addr_t)0x0f << tag_shift); // Lower half of top byte. + constexpr uint32_t tag_granule = 16; + + mach_msg_type_number_t ptr_count = + (size / tag_granule) + ((size % tag_granule > 0) ? 1 : 0); + ptr_count = std::min(ptr_count, max_ptr_count); + + auto ptr_arr = std::make_unique(ptr_count); + for (size_t i = 0; i < ptr_count; i++) + ptr_arr[i] = (address + i * tag_granule); + + mach_msg_type_number_t ptr_count_out = ptr_count; + m_err = mach_vm_update_pointers_with_remote_tags( + task, ptr_arr.get(), ptr_count, ptr_arr.get(), &ptr_count_out); + + const bool failed = (m_err.Fail() || (ptr_count != ptr_count_out)); + if (failed || DNBLogCheckLogBit(LOG_MEMORY)) + m_err.LogThreaded("::mach_vm_update_pointers_with_remote_tags ( task = " + "0x%4.4x, ptr_count = %d ) => %i ( ptr_count_out = %d)", + task, ptr_count, m_err.Status(), ptr_count_out); + if (failed) + return false; + + tags.reserve(ptr_count); + for (size_t i = 0; i < ptr_count; i++) { + nub_addr_t tag = (ptr_arr[i] & tag_mask) >> tag_shift; + tags.push_back(tag); + } + + return true; +} + static uint64_t GetPhysicalMemory() { // This doesn't change often at all. No need to poll each time. static uint64_t physical_memory = 0; diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h index 05d2c029b9980..8a7616091fbb3 100644 --- a/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h +++ b/lldb/tools/debugserver/source/MacOSX/MachVMMemory.h @@ -28,6 +28,8 @@ class MachVMMemory { nub_size_t PageSize(task_t task); nub_bool_t GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info); + nub_bool_t GetMemoryTags(task_t task, nub_addr_t address, nub_size_t size, + std::vector &tags); nub_bool_t GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp index 97908b4acaf28..9d0d60fdaaed9 100644 --- a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp +++ b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp @@ -114,6 +114,11 @@ bool MachVMRegion::RestoreProtections() { return false; } +#ifdef VM_REGION_FLAG_JIT_ENABLED +#define VM_REGION_HAS_FLAGS 1 +#else +#define VM_REGION_HAS_FLAGS 0 +#endif bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) { // Restore any original protections and clear our vars Clear(); @@ -140,6 +145,30 @@ bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) { if (failed) return false; if (log_protections) { +#if VM_REGION_HAS_FLAGS + DNBLogThreaded("info = { prot = %u, " + "max_prot = %u, " + "inheritance = 0x%8.8x, " + "offset = 0x%8.8llx, " + "user_tag = 0x%8.8x, " + "ref_count = %u, " + "shadow_depth = %u, " + "ext_pager = %u, " + "share_mode = %u, " + "is_submap = %d, " + "behavior = %d, " + "object_id = 0x%8.8x, " + "user_wired_count = 0x%4.4x, " + "flags = %d }", + m_data.protection, m_data.max_protection, m_data.inheritance, + (uint64_t)m_data.offset, m_data.user_tag, m_data.ref_count, + m_data.shadow_depth, m_data.external_pager, + m_data.share_mode, m_data.is_submap, m_data.behavior, + m_data.object_id, m_data.user_wired_count, m_data.flags); +#else + // Duplicate log call instead of #if-defing printing of flags to avoid + // compiler warning: 'embedding a directive within macro arguments has + // undefined behavior' DNBLogThreaded("info = { prot = %u, " "max_prot = %u, " "inheritance = 0x%8.8x, " @@ -158,6 +187,7 @@ bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) { m_data.shadow_depth, m_data.external_pager, m_data.share_mode, m_data.is_submap, m_data.behavior, m_data.object_id, m_data.user_wired_count); +#endif } m_curr_protection = m_data.protection; @@ -183,6 +213,22 @@ uint32_t MachVMRegion::GetDNBPermissions() const { return dnb_permissions; } +#ifndef VM_REGION_FLAG_MTE_ENABLED +#define VM_REGION_FLAG_MTE_ENABLED 0x4 +#endif +std::vector MachVMRegion::GetFlags() const { + std::vector flags; +#if VM_REGION_HAS_FLAGS + if (m_data.flags & VM_REGION_FLAG_JIT_ENABLED) + flags.push_back("jit"); + if (m_data.flags & VM_REGION_FLAG_TPRO_ENABLED) + flags.push_back("tpro"); + if (m_data.flags & VM_REGION_FLAG_MTE_ENABLED) + flags.push_back("mt"); +#endif + return flags; +} + std::vector MachVMRegion::GetMemoryTypes() const { std::vector types; if (m_data.user_tag == VM_MEMORY_STACK) { diff --git a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h index cb7705893c7ed..ba6e1f3bfa70e 100644 --- a/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h +++ b/lldb/tools/debugserver/source/MacOSX/MachVMRegion.h @@ -40,9 +40,10 @@ class MachVMRegion { vm_prot_t prot); bool RestoreProtections(); bool GetRegionForAddress(nub_addr_t addr); - std::vector GetMemoryTypes() const; uint32_t GetDNBPermissions() const; + std::vector GetFlags() const; + std::vector GetMemoryTypes() const; const DNBError &GetError() { return m_err; } diff --git a/lldb/tools/debugserver/source/RNBRemote.cpp b/lldb/tools/debugserver/source/RNBRemote.cpp index d9fb22c6a1c06..434e9cfa40fb4 100644 --- a/lldb/tools/debugserver/source/RNBRemote.cpp +++ b/lldb/tools/debugserver/source/RNBRemote.cpp @@ -22,6 +22,9 @@ #include #include #include +#if __has_include() +#include +#endif #include #include #include @@ -502,6 +505,8 @@ void RNBRemote::CreatePacketTable() { memory_region_info, &RNBRemote::HandlePacket_MemoryRegionInfo, NULL, "qMemoryRegionInfo", "Return size and attributes of a memory region that " "contains the given address")); + t.push_back(Packet(get_memory_tags, &RNBRemote::HandlePacket_qMemTags, NULL, + "qMemTags", "Return tags for a region of memory")); t.push_back(Packet(get_profile_data, &RNBRemote::HandlePacket_GetProfileData, NULL, "qGetProfileData", "Return profiling data of the current target.")); @@ -3475,6 +3480,18 @@ static bool GetProcessNameFrom_vAttach(const char *&p, return return_val; } +static bool supports_memory_tagging() { + const char *name = "hw.optional.arm.FEAT_MTE4"; + uint32_t val; + size_t len = sizeof(val); + int ret = ::sysctlbyname(name, &val, &len, nullptr, 0); + if (ret != 0) + return false; + + assert(len == sizeof(val)); + return val; +} + rnb_err_t RNBRemote::HandlePacket_qSupported(const char *p) { uint32_t max_packet_size = 128 * 1024; // 128 KiB is a reasonable max packet // size--debugger can always use less @@ -3505,6 +3522,9 @@ rnb_err_t RNBRemote::HandlePacket_qSupported(const char *p) { reply << "SupportedWatchpointTypes=x86_64;"; #endif + if (supports_memory_tagging()) + reply << "memory-tagging+;"; + return SendPacket(reply.str().c_str()); } @@ -4251,7 +4271,6 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) { is in unmapped memory Region lookup cannot be performed on this platform or process is not yet launched - This packet isn't implemented Examples of use: qMemoryRegionInfo:3a55140 @@ -4303,6 +4322,16 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) { ostrm << 'x'; ostrm << ';'; + if (!region_info.flags.empty()) { + ostrm << "flags:"; + for (size_t i = 0; i < region_info.flags.size(); i++) { + if (i != 0) + ostrm << " "; // Separator is whitespace + ostrm << region_info.flags[i]; + } + ostrm << ";"; + } + ostrm << "dirty-pages:"; if (region_info.dirty_pages.size() > 0) { bool first = true; @@ -4327,6 +4356,62 @@ rnb_err_t RNBRemote::HandlePacket_MemoryRegionInfo(const char *p) { return SendPacket(ostrm.str()); } +// qMemTags:,: +rnb_err_t RNBRemote::HandlePacket_qMemTags(const char *p) { + nub_process_t pid = m_ctx.ProcessID(); + if (pid == INVALID_NUB_PROCESS) + return SendPacket("OK"); + + StdStringExtractor packet(p); + packet.SetFilePos(strlen("qMemTags:")); + + // Address + nub_addr_t addr = + packet.GetHexMaxU64(StdStringExtractor::BigEndian, INVALID_NUB_ADDRESS); + if (addr == INVALID_NUB_ADDRESS) + return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, + "Invalid/missing address in qMemTags packet"); + // , + if (packet.GetChar() != ',') + return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, + "Invalid qMemTags packet format"); + // Length + uint64_t length = packet.GetHexMaxU64(StdStringExtractor::BigEndian, 0); + if (length == 0) + return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, + "Invalid/missing length in qMemTags packet"); + // : + if (packet.GetChar() != ':') + return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, + "Invalid qMemTags packet format"); + // Type + // On the LLDB side this is a `int32_t` serialized as (unsigned) hex, which + // means negative values will show up as large positive values here. Right + // now, we only support MTE (type 1), so we can ignore this complication. + uint32_t type = packet.GetHexMaxU32(StdStringExtractor::BigEndian, 0); + if (type != 1 /* MTE */) + return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, + "Invalid/missing type in qMemTags packet, " + "only MTE (type 1) is supported"); + // + if (packet.GetBytesLeft() != 0) + return HandlePacket_ILLFORMED(__FILE__, __LINE__, p, + "Invalid qMemTags packet format"); + + std::vector tags; + bool ok = DNBProcessGetMemoryTags(pid, addr, length, tags); + if (!ok) + return SendErrorPacket("E91"); + + std::ostringstream ostrm; + ostrm << "m"; // Multi part replies + for (uint8_t tag : tags) { + ostrm << RAWHEX8(tag); // 2 hex chars per tag + } + + return SendPacket(ostrm.str()); +} + // qGetProfileData;scan_type:0xYYYYYYY rnb_err_t RNBRemote::HandlePacket_GetProfileData(const char *p) { nub_process_t pid = m_ctx.ProcessID(); @@ -6162,6 +6247,21 @@ GetCPUTypesFromHost(nub_process_t pid) { return {cputype, cpusubtype}; } +static bool ProcessRunningWithMemoryTagging(pid_t pid) { +#if __has_include() + if (__builtin_available(macOS 26.0, iOS 26.0, tvOS 26.0, watchOS 26.0, + visionOS 26.0, driverkit 25.0, *)) { + os_security_config_t config; + int ret = ::os_security_config_get_for_proc(pid, &config); + if (ret != 0) + return false; + + return (config & OS_SECURITY_CONFIG_MTE); + } +#endif + return false; +} + // Note that all numeric values returned by qProcessInfo are hex encoded, // including the pid and the cpu type. @@ -6338,6 +6438,9 @@ rnb_err_t RNBRemote::HandlePacket_qProcessInfo(const char *p) { rep << "vendor:apple;"; + if (ProcessRunningWithMemoryTagging(pid)) + rep << "mte:enabled;"; + #if defined(__LITTLE_ENDIAN__) rep << "endian:little;"; #elif defined(__BIG_ENDIAN__) diff --git a/lldb/tools/debugserver/source/RNBRemote.h b/lldb/tools/debugserver/source/RNBRemote.h index ad254ae90e2f7..cf1c978afcd23 100644 --- a/lldb/tools/debugserver/source/RNBRemote.h +++ b/lldb/tools/debugserver/source/RNBRemote.h @@ -121,6 +121,7 @@ class RNBRemote { set_list_threads_in_stop_reply, // 'QListThreadsInStopReply:' sync_thread_state, // 'QSyncThreadState:' memory_region_info, // 'qMemoryRegionInfo:' + get_memory_tags, // 'qMemTags:' get_profile_data, // 'qGetProfileData' set_enable_profiling, // 'QSetEnableAsyncProfiling' enable_compression, // 'QEnableCompression:' @@ -237,6 +238,7 @@ class RNBRemote { rnb_err_t HandlePacket_SaveRegisterState(const char *p); rnb_err_t HandlePacket_RestoreRegisterState(const char *p); rnb_err_t HandlePacket_MemoryRegionInfo(const char *p); + rnb_err_t HandlePacket_qMemTags(const char *p); rnb_err_t HandlePacket_GetProfileData(const char *p); rnb_err_t HandlePacket_SetEnableAsyncProfiling(const char *p); rnb_err_t HandlePacket_QEnableCompression(const char *p); diff --git a/lldb/tools/lldb-dap/package-lock.json b/lldb/tools/lldb-dap/package-lock.json index f3ae6b76be6d0..826f29f70106c 100644 --- a/lldb/tools/lldb-dap/package-lock.json +++ b/lldb/tools/lldb-dap/package-lock.json @@ -8,6 +8,9 @@ "name": "lldb-dap", "version": "0.2.16", "license": "Apache 2.0 License with LLVM exceptions", + "dependencies": { + "chokidar": "^4.0.3" + }, "devDependencies": { "@types/node": "^18.19.41", "@types/tabulator-tables": "^6.2.10", @@ -1301,6 +1304,21 @@ "url": "https://github.com/sponsors/fb55" } }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/chownr": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", @@ -2746,6 +2764,19 @@ "node": ">= 6" } }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", diff --git a/lldb/tools/lldb-dap/package.json b/lldb/tools/lldb-dap/package.json index 6566ba3bdee13..e961c2e48b258 100644 --- a/lldb/tools/lldb-dap/package.json +++ b/lldb/tools/lldb-dap/package.json @@ -1,7 +1,7 @@ { "name": "lldb-dap", "displayName": "LLDB DAP", - "version": "0.2.16", + "version": "0.2.18", "publisher": "llvm-vs-code-extensions", "homepage": "https://lldb.llvm.org", "description": "Debugging with LLDB in Visual Studio Code", @@ -27,6 +27,9 @@ "categories": [ "Debuggers" ], + "dependencies": { + "chokidar": "^4.0.3" + }, "devDependencies": { "@types/node": "^18.19.41", "@types/tabulator-tables": "^6.2.10", @@ -46,13 +49,14 @@ ], "main": "./out/extension", "scripts": { + "bundle-extension": "npx tsc -p ./ --noEmit && npx esbuild src-ts/extension.ts --bundle --outfile=out/extension.js --external:vscode --format=cjs --platform=node --target=node22 --minify", "bundle-symbols-table-view": "npx tsc -p src-ts/webview --noEmit && npx esbuild src-ts/webview/symbols-table-view.ts --bundle --format=iife --outdir=./out/webview", "bundle-tabulator": "cp node_modules/tabulator-tables/dist/js/tabulator.min.js ./out/webview/ && cp node_modules/tabulator-tables/dist/css/tabulator_midnight.min.css ./out/webview/ && cp node_modules/tabulator-tables/dist/css/tabulator_simple.min.css ./out/webview/", "bundle-webview": "npm run bundle-symbols-table-view && npm run bundle-tabulator", - "vscode:prepublish": "npm run bundle-webview && tsc -p ./", + "vscode:prepublish": "npm run bundle-webview && npm run bundle-extension", "watch": "npm run bundle-webview && tsc -watch -p ./", "format": "npx prettier './src-ts/' --write", - "package": "rm -rf ./out/lldb-dap.vsix && vsce package --out ./out/lldb-dap.vsix", + "package": "rm -rf ./out && vsce package --out ./out/lldb-dap.vsix", "publish": "vsce publish", "vscode-uninstall": "code --uninstall-extension llvm-vs-code-extensions.lldb-dap", "vscode-install": "code --install-extension ./out/lldb-dap.vsix" @@ -347,6 +351,9 @@ { "language": "objective-c" }, + { + "language": "objective-cpp" + }, { "language": "objectpascal" }, @@ -375,6 +382,7 @@ "fortran-modern", "nim", "objective-c", + "objective-cpp", "objectpascal", "pascal", "rust", diff --git a/lldb/tools/lldb-dap/src-ts/lldb-dap-server.ts b/lldb/tools/lldb-dap/src-ts/lldb-dap-server.ts index 280a11d807f6a..4e348965930d9 100644 --- a/lldb/tools/lldb-dap/src-ts/lldb-dap-server.ts +++ b/lldb/tools/lldb-dap/src-ts/lldb-dap-server.ts @@ -1,4 +1,6 @@ +import { FSWatcher, watch as chokidarWatch } from 'chokidar'; import * as child_process from "node:child_process"; +import * as path from "path"; import { isDeepStrictEqual } from "util"; import * as vscode from "vscode"; @@ -12,6 +14,10 @@ export class LLDBDapServer implements vscode.Disposable { private serverProcess?: child_process.ChildProcessWithoutNullStreams; private serverInfo?: Promise<{ host: string; port: number }>; private serverSpawnInfo?: string[]; + // Detects changes to the lldb-dap executable file since the server's startup. + private serverFileWatcher?: FSWatcher; + // Indicates whether the lldb-dap executable file has changed since the server's startup. + private serverFileChanged?: boolean; constructor() { vscode.commands.registerCommand( @@ -83,6 +89,11 @@ export class LLDBDapServer implements vscode.Disposable { }); this.serverProcess = process; this.serverSpawnInfo = this.getSpawnInfo(dapPath, dapArgs, options?.env); + this.serverFileChanged = false; + this.serverFileWatcher = chokidarWatch(dapPath); + this.serverFileWatcher + .on('change', () => this.serverFileChanged = true) + .on('unlink', () => this.serverFileChanged = true); }); return this.serverInfo; } @@ -100,21 +111,27 @@ export class LLDBDapServer implements vscode.Disposable { args: string[], env: NodeJS.ProcessEnv | { [key: string]: string } | undefined, ): Promise { - if (!this.serverProcess || !this.serverInfo || !this.serverSpawnInfo) { + if ( + !this.serverProcess || + !this.serverInfo || + !this.serverSpawnInfo || + !this.serverFileWatcher || + this.serverFileChanged === undefined + ) { return true; } - const newSpawnInfo = this.getSpawnInfo(dapPath, args, env); - if (isDeepStrictEqual(this.serverSpawnInfo, newSpawnInfo)) { - return true; - } + const changeTLDR = []; + const changeDetails = []; - const userInput = await vscode.window.showInformationMessage( - "The arguments to lldb-dap have changed. Would you like to restart the server?", - { - modal: true, - detail: `An existing lldb-dap server (${this.serverProcess.pid}) is running with different arguments. + if (this.serverFileChanged) { + changeTLDR.push("an old binary"); + } + const newSpawnInfo = this.getSpawnInfo(dapPath, args, env); + if (!isDeepStrictEqual(this.serverSpawnInfo, newSpawnInfo)) { + changeTLDR.push("different arguments"); + changeDetails.push(` The previous lldb-dap server was started with: ${this.serverSpawnInfo.join(" ")} @@ -122,7 +139,22 @@ ${this.serverSpawnInfo.join(" ")} The new lldb-dap server will be started with: ${newSpawnInfo.join(" ")} +` + ); + } + + // If the server hasn't changed, continue startup without killing it. + if (changeTLDR.length === 0) { + return true; + } + // The server has changed. Prompt the user to restart it. + const userInput = await vscode.window.showInformationMessage( + "The lldb-dap server has changed. Would you like to restart the server?", + { + modal: true, + detail: `An existing lldb-dap server (${this.serverProcess.pid}) is running with ${changeTLDR.map(s => `*${s}*`).join(" and ")}. +${changeDetails.join("\n")} Restarting the server will interrupt any existing debug sessions and start a new server.`, }, "Restart", @@ -130,9 +162,7 @@ Restarting the server will interrupt any existing debug sessions and start a new ); switch (userInput) { case "Restart": - this.serverProcess.kill(); - this.serverProcess = undefined; - this.serverInfo = undefined; + this.dispose(); return true; case "Use Existing": return true; @@ -156,6 +186,10 @@ Restarting the server will interrupt any existing debug sessions and start a new if (this.serverProcess === process) { this.serverProcess = undefined; this.serverInfo = undefined; + this.serverSpawnInfo = undefined; + this.serverFileWatcher?.close(); + this.serverFileWatcher = undefined; + this.serverFileChanged = undefined; } } diff --git a/lldb/unittests/Host/posix/HostTest.cpp b/lldb/unittests/Host/posix/HostTest.cpp index 082edccf4e774..dc75b288ba76a 100644 --- a/lldb/unittests/Host/posix/HostTest.cpp +++ b/lldb/unittests/Host/posix/HostTest.cpp @@ -15,6 +15,10 @@ #include #include +#ifdef __linux__ +#include +#endif // __linux__ + using namespace lldb_private; namespace { @@ -116,7 +120,12 @@ TEST_F(HostTest, GetProcessInfoSetsPriority) { ASSERT_TRUE(Info.IsZombie().has_value()); ASSERT_FALSE(Info.IsZombie().value()); + // CoreDumping was added in kernel version 4.15. +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) ASSERT_TRUE(Info.IsCoreDumping().has_value()); ASSERT_FALSE(Info.IsCoreDumping().value()); +#else + ASSERT_FALSE(Info.IsCoreDumping().has_value()); +#endif } #endif diff --git a/lldb/unittests/Platform/Android/AdbClientTest.cpp b/lldb/unittests/Platform/Android/AdbClientTest.cpp index 0808b96f69fc8..9b3a6fa9ceb33 100644 --- a/lldb/unittests/Platform/Android/AdbClientTest.cpp +++ b/lldb/unittests/Platform/Android/AdbClientTest.cpp @@ -6,8 +6,11 @@ // //===----------------------------------------------------------------------===// -#include "gtest/gtest.h" #include "Plugins/Platform/Android/AdbClient.h" +#include "lldb/Host/Socket.h" +#include "lldb/Host/common/TCPSocket.h" +#include "gtest/gtest.h" +#include #include static void set_env(const char *var, const char *value) { @@ -20,32 +23,121 @@ static void set_env(const char *var, const char *value) { using namespace lldb; using namespace lldb_private; - -namespace lldb_private { -namespace platform_android { +using namespace lldb_private::platform_android; class AdbClientTest : public ::testing::Test { public: - void SetUp() override { set_env("ANDROID_SERIAL", ""); } + void SetUp() override { + set_env("ANDROID_SERIAL", ""); + set_env("ANDROID_ADB_SERVER_PORT", ""); + } - void TearDown() override { set_env("ANDROID_SERIAL", ""); } + void TearDown() override { + set_env("ANDROID_SERIAL", ""); + set_env("ANDROID_ADB_SERVER_PORT", ""); + } }; -TEST(AdbClientTest, CreateByDeviceId) { - AdbClient adb; - Status error = AdbClient::CreateByDeviceID("device1", adb); - EXPECT_TRUE(error.Success()); - EXPECT_EQ("device1", adb.GetDeviceID()); +TEST_F(AdbClientTest, ResolveDeviceId_ExplicitDeviceId) { + auto result = AdbClient::ResolveDeviceID("device1"); + EXPECT_TRUE(static_cast(result)); + EXPECT_EQ("device1", *result); } -TEST(AdbClientTest, CreateByDeviceId_ByEnvVar) { +TEST_F(AdbClientTest, ResolveDeviceId_ByEnvVar) { set_env("ANDROID_SERIAL", "device2"); - AdbClient adb; - Status error = AdbClient::CreateByDeviceID("", adb); - EXPECT_TRUE(error.Success()); - EXPECT_EQ("device2", adb.GetDeviceID()); + auto result = AdbClient::ResolveDeviceID(""); + EXPECT_TRUE(static_cast(result)); + EXPECT_EQ("device2", *result); +} + +TEST_F(AdbClientTest, ResolveDeviceId_PrefersExplicitOverEnvVar) { + set_env("ANDROID_SERIAL", "env_device"); + + // Explicit device ID should take precedence over environment variable + auto result = AdbClient::ResolveDeviceID("explicit_device"); + EXPECT_TRUE(static_cast(result)); + EXPECT_EQ("explicit_device", *result); +} + +TEST_F(AdbClientTest, AdbClient_Constructor_StoresDeviceId) { + AdbClient client("test_device_123"); + EXPECT_EQ(client.GetDeviceID(), "test_device_123"); +} + +TEST_F(AdbClientTest, AdbClient_DefaultConstructor) { + AdbClient client; + EXPECT_EQ(client.GetDeviceID(), ""); } -} // end namespace platform_android -} // end namespace lldb_private +TEST_F(AdbClientTest, AdbSyncService_Constructor_StoresDeviceId) { + AdbSyncService sync("device123"); + EXPECT_EQ(sync.GetDeviceId(), "device123"); +} + +TEST_F(AdbClientTest, AdbSyncService_OperationsFailWhenNotConnected) { + AdbSyncService sync_service("test_device"); + + // Verify service is not connected initially + EXPECT_FALSE(sync_service.IsConnected()); + + // File operations should fail when not connected + FileSpec remote_file("/data/test.txt"); + FileSpec local_file("/tmp/test.txt"); + uint32_t mode, size, mtime; + + Status stat_result = sync_service.Stat(remote_file, mode, size, mtime); + EXPECT_TRUE(stat_result.Fail()); + + Status pull_result = sync_service.PullFile(remote_file, local_file); + EXPECT_TRUE(pull_result.Fail()); + + Status push_result = sync_service.PushFile(local_file, remote_file); + EXPECT_TRUE(push_result.Fail()); +} + +static uint16_t FindUnusedPort() { + auto temp_socket = std::make_unique(true); + Status error = temp_socket->Listen("localhost:0", 1); + if (error.Fail()) { + return 0; // fallback + } + uint16_t port = temp_socket->GetLocalPortNumber(); + temp_socket.reset(); // Close the socket to free the port + return port; +} + +#ifndef _WIN32 +// This test is disabled on Windows due to platform-specific socket behavior +// that causes assertion failures in TCPSocket::Listen() +TEST_F(AdbClientTest, RealTcpConnection) { + uint16_t unused_port = FindUnusedPort(); + ASSERT_NE(unused_port, 0) << "Failed to find an unused port"; + + std::string port_str = std::to_string(unused_port); + set_env("ANDROID_ADB_SERVER_PORT", port_str.c_str()); + + AdbClient client; + const auto status1 = client.Connect(); + EXPECT_FALSE(status1.Success()) + << "Connection should fail when no server is listening on port " + << unused_port; + + // now start a server on the port and try again + auto listen_socket = std::make_unique(true); + std::string listen_address = "localhost:" + port_str; + Status error = listen_socket->Listen(listen_address.c_str(), 5); + ASSERT_TRUE(error.Success()) << "Failed to create listening socket on port " + << unused_port << ": " << error.AsCString(); + + // Verify the socket is listening on the expected port + ASSERT_EQ(listen_socket->GetLocalPortNumber(), unused_port) + << "Socket is not listening on the expected port"; + + const auto status2 = client.Connect(); + EXPECT_TRUE(status2.Success()) + << "Connection should succeed when server is listening on port " + << unused_port; +} +#endif // _WIN32 diff --git a/lldb/unittests/Platform/Android/PlatformAndroidTest.cpp b/lldb/unittests/Platform/Android/PlatformAndroidTest.cpp index d021562d94d28..514bce1c71576 100644 --- a/lldb/unittests/Platform/Android/PlatformAndroidTest.cpp +++ b/lldb/unittests/Platform/Android/PlatformAndroidTest.cpp @@ -8,8 +8,6 @@ #include "Plugins/Platform/Android/PlatformAndroid.h" #include "Plugins/Platform/Android/PlatformAndroidRemoteGDBServer.h" -#include "TestingSupport/SubsystemRAII.h" -#include "TestingSupport/TestUtilities.h" #include "lldb/Utility/Connection.h" #include "gmock/gmock.h" @@ -20,212 +18,281 @@ using namespace testing; namespace { -class MockSyncService : public AdbClient::SyncService { -public: - MockSyncService() : SyncService(std::unique_ptr()) {} - - MOCK_METHOD2(PullFile, - Status(const FileSpec &remote_file, const FileSpec &local_file)); - MOCK_METHOD4(Stat, Status(const FileSpec &remote_file, uint32_t &mode, - uint32_t &size, uint32_t &mtime)); -}; - -typedef std::unique_ptr SyncServiceUP; - class MockAdbClient : public AdbClient { public: - explicit MockAdbClient() : AdbClient("mock") {} + explicit MockAdbClient() : AdbClient() {} MOCK_METHOD3(ShellToFile, Status(const char *command, std::chrono::milliseconds timeout, const FileSpec &output_file_spec)); - MOCK_METHOD1(GetSyncService, SyncServiceUP(Status &error)); }; class PlatformAndroidTest : public PlatformAndroid, public ::testing::Test { public: PlatformAndroidTest() : PlatformAndroid(false) { m_remote_platform_sp = PlatformSP(new PlatformAndroidRemoteGDBServer()); + + // Set up default mock behavior to avoid uninteresting call warnings + ON_CALL(*this, GetSyncService(_)) + .WillByDefault([](Status &error) -> std::unique_ptr { + error = Status::FromErrorString("Sync service unavailable"); + return nullptr; + }); } MOCK_METHOD1(GetAdbClient, AdbClientUP(Status &error)); MOCK_METHOD0(GetPropertyPackageName, llvm::StringRef()); + MOCK_METHOD1(GetSyncService, std::unique_ptr(Status &error)); + + // Make GetSyncService public for testing + using PlatformAndroid::GetSyncService; }; } // namespace -TEST_F(PlatformAndroidTest, DownloadModuleSliceWithAdbClientError) { +TEST_F(PlatformAndroidTest, + DownloadModuleSlice_AdbClientError_FailsGracefully) { EXPECT_CALL(*this, GetAdbClient(_)) - .Times(1) .WillOnce(DoAll(WithArg<0>([](auto &arg) { arg = Status::FromErrorString( "Failed to create AdbClient"); }), Return(ByMove(AdbClientUP())))); - EXPECT_TRUE( - DownloadModuleSlice( - FileSpec("/system/app/Test/Test.apk!/lib/arm64-v8a/libtest.so"), 4096, - 3600, FileSpec()) - .Fail()); -} - -TEST_F(PlatformAndroidTest, DownloadModuleSliceWithNormalFile) { - auto sync_service = new MockSyncService(); - EXPECT_CALL(*sync_service, Stat(FileSpec("/system/lib64/libc.so"), _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgReferee<1>(1), Return(Status()))); - EXPECT_CALL(*sync_service, PullFile(FileSpec("/system/lib64/libc.so"), _)) - .Times(1) - .WillOnce(Return(Status())); - - auto adb_client = new MockAdbClient(); - EXPECT_CALL(*adb_client, GetSyncService(_)) - .Times(1) - .WillOnce(Return(ByMove(SyncServiceUP(sync_service)))); - - EXPECT_CALL(*this, GetAdbClient(_)) - .Times(1) - .WillOnce(Return(ByMove(AdbClientUP(adb_client)))); + Status result = DownloadModuleSlice( + FileSpec("/system/app/Test/Test.apk!/lib/arm64-v8a/libtest.so"), 4096, + 3600, FileSpec("/tmp/libtest.so")); - EXPECT_TRUE( - DownloadModuleSlice(FileSpec("/system/lib64/libc.so"), 0, 0, FileSpec()) - .Success()); + EXPECT_TRUE(result.Fail()); + EXPECT_THAT(result.AsCString(), HasSubstr("Failed to create AdbClient")); } -TEST_F(PlatformAndroidTest, DownloadModuleSliceWithZipFile) { - auto adb_client = new MockAdbClient(); +TEST_F(PlatformAndroidTest, DownloadModuleSlice_ZipFile_UsesCorrectDdCommand) { + auto *adb_client = new MockAdbClient(); EXPECT_CALL(*adb_client, ShellToFile(StrEq("dd if='/system/app/Test/Test.apk' " "iflag=skip_bytes,count_bytes " "skip=4096 count=3600 status=none"), _, _)) - .Times(1) .WillOnce(Return(Status())); + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef(""))); + EXPECT_CALL(*this, GetAdbClient(_)) - .Times(1) .WillOnce(Return(ByMove(AdbClientUP(adb_client)))); - EXPECT_TRUE( - DownloadModuleSlice( - FileSpec("/system/app/Test/Test.apk!/lib/arm64-v8a/libtest.so"), 4096, - 3600, FileSpec()) - .Success()); + Status result = DownloadModuleSlice( + FileSpec("/system/app/Test/Test.apk!/lib/arm64-v8a/libtest.so"), 4096, + 3600, FileSpec("/tmp/libtest.so")); + + EXPECT_TRUE(result.Success()); } -TEST_F(PlatformAndroidTest, DownloadModuleSliceWithZipFileAndRunAs) { - auto adb_client = new MockAdbClient(); +TEST_F(PlatformAndroidTest, + DownloadModuleSlice_ZipFileWithRunAs_UsesRunAsCommand) { + auto *adb_client = new MockAdbClient(); EXPECT_CALL(*adb_client, ShellToFile(StrEq("run-as 'com.example.test' " "dd if='/system/app/Test/Test.apk' " "iflag=skip_bytes,count_bytes " "skip=4096 count=3600 status=none"), _, _)) - .Times(1) .WillOnce(Return(Status())); EXPECT_CALL(*this, GetPropertyPackageName()) - .Times(1) .WillOnce(Return(llvm::StringRef("com.example.test"))); EXPECT_CALL(*this, GetAdbClient(_)) - .Times(1) .WillOnce(Return(ByMove(AdbClientUP(adb_client)))); - EXPECT_TRUE( - DownloadModuleSlice( - FileSpec("/system/app/Test/Test.apk!/lib/arm64-v8a/libtest.so"), 4096, - 3600, FileSpec()) - .Success()); + Status result = DownloadModuleSlice( + FileSpec("/system/app/Test/Test.apk!/lib/arm64-v8a/libtest.so"), 4096, + 3600, FileSpec("/tmp/libtest.so")); + + EXPECT_TRUE(result.Success()); } -TEST_F(PlatformAndroidTest, GetFileWithNormalFile) { - auto sync_service = new MockSyncService(); - EXPECT_CALL(*sync_service, Stat(FileSpec("/data/local/tmp/test"), _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgReferee<1>(1), Return(Status()))); - EXPECT_CALL(*sync_service, PullFile(FileSpec("/data/local/tmp/test"), _)) - .Times(1) +TEST_F(PlatformAndroidTest, + DownloadModuleSlice_LargeFile_CalculatesParametersCorrectly) { + const uint64_t large_offset = 100 * 1024 * 1024; // 100MB offset + const uint64_t large_size = 50 * 1024 * 1024; // 50MB size + + auto *adb_client = new MockAdbClient(); + EXPECT_CALL(*adb_client, + ShellToFile(StrEq("dd if='/system/app/Large.apk' " + "iflag=skip_bytes,count_bytes " + "skip=104857600 count=52428800 status=none"), + _, _)) .WillOnce(Return(Status())); - auto adb_client = new MockAdbClient(); - EXPECT_CALL(*adb_client, GetSyncService(_)) - .Times(1) - .WillOnce(Return(ByMove(SyncServiceUP(sync_service)))); + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef(""))); EXPECT_CALL(*this, GetAdbClient(_)) - .Times(1) .WillOnce(Return(ByMove(AdbClientUP(adb_client)))); - EXPECT_TRUE(GetFile(FileSpec("/data/local/tmp/test"), FileSpec()).Success()); + Status result = DownloadModuleSlice( + FileSpec("/system/app/Large.apk!/lib/arm64-v8a/large.so"), large_offset, + large_size, FileSpec("/tmp/large.so")); + + EXPECT_TRUE(result.Success()); } -TEST_F(PlatformAndroidTest, GetFileWithCatFallback) { - auto sync_service = new MockSyncService(); - EXPECT_CALL( - *sync_service, - Stat(FileSpec("/data/data/com.example.app/lib-main/libtest.so"), _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgReferee<1>(0), Return(Status()))); +TEST_F(PlatformAndroidTest, + GetFile_SyncServiceUnavailable_FallsBackToShellCat) { + auto *adb_client = new MockAdbClient(); + EXPECT_CALL(*adb_client, + ShellToFile(StrEq("cat '/data/local/tmp/test'"), _, _)) + .WillOnce(Return(Status())); - auto adb_client0 = new MockAdbClient(); - EXPECT_CALL(*adb_client0, GetSyncService(_)) - .Times(1) - .WillOnce(Return(ByMove(SyncServiceUP(sync_service)))); + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef(""))); + + EXPECT_CALL(*this, GetAdbClient(_)) + .WillOnce(DoAll(WithArg<0>([](auto &arg) { arg.Clear(); }), + Return(ByMove(AdbClientUP(adb_client))))); + + EXPECT_CALL(*this, GetSyncService(_)) + .WillOnce([](Status &error) -> std::unique_ptr { + error = Status::FromErrorString("Sync service unavailable"); + return nullptr; + }); + + Status result = + GetFile(FileSpec("/data/local/tmp/test"), FileSpec("/tmp/test")); + EXPECT_TRUE(result.Success()); +} - auto adb_client1 = new MockAdbClient(); +TEST_F(PlatformAndroidTest, GetFile_WithRunAs_UsesRunAsInShellCommand) { + auto *adb_client = new MockAdbClient(); EXPECT_CALL( - *adb_client1, - ShellToFile(StrEq("cat '/data/data/com.example.app/lib-main/libtest.so'"), + *adb_client, + ShellToFile(StrEq("run-as 'com.example.app' " + "cat '/data/data/com.example.app/lib-main/libtest.so'"), _, _)) - .Times(1) .WillOnce(Return(Status())); + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef("com.example.app"))); + EXPECT_CALL(*this, GetAdbClient(_)) - .Times(2) - .WillOnce(Return(ByMove(AdbClientUP(adb_client0)))) - .WillOnce(Return(ByMove(AdbClientUP(adb_client1)))); + .WillOnce(DoAll(WithArg<0>([](auto &arg) { arg.Clear(); }), + Return(ByMove(AdbClientUP(adb_client))))); + + EXPECT_CALL(*this, GetSyncService(_)) + .WillOnce([](Status &error) -> std::unique_ptr { + error = Status::FromErrorString("Sync service unavailable"); + return nullptr; + }); - EXPECT_TRUE( + Status result = GetFile(FileSpec("/data/data/com.example.app/lib-main/libtest.so"), - FileSpec()) - .Success()); + FileSpec("/tmp/libtest.so")); + EXPECT_TRUE(result.Success()); } -TEST_F(PlatformAndroidTest, GetFileWithCatFallbackAndRunAs) { - auto sync_service = new MockSyncService(); - EXPECT_CALL( - *sync_service, - Stat(FileSpec("/data/data/com.example.app/lib-main/libtest.so"), _, _, _)) - .Times(1) - .WillOnce(DoAll(SetArgReferee<1>(0), Return(Status()))); +TEST_F(PlatformAndroidTest, GetFile_FilenameWithSingleQuotes_Rejected) { + EXPECT_CALL(*this, GetSyncService(_)) + .WillOnce([](Status &error) -> std::unique_ptr { + error = Status::FromErrorString("Sync service unavailable"); + return nullptr; + }); - auto adb_client0 = new MockAdbClient(); - EXPECT_CALL(*adb_client0, GetSyncService(_)) - .Times(1) - .WillOnce(Return(ByMove(SyncServiceUP(sync_service)))); + Status result = + GetFile(FileSpec("/test/file'with'quotes"), FileSpec("/tmp/output")); - auto adb_client1 = new MockAdbClient(); - EXPECT_CALL( - *adb_client1, - ShellToFile(StrEq("run-as 'com.example.app' " - "cat '/data/data/com.example.app/lib-main/libtest.so'"), - _, _)) - .Times(1) + EXPECT_TRUE(result.Fail()); + EXPECT_THAT(result.AsCString(), HasSubstr("single-quotes")); +} + +TEST_F(PlatformAndroidTest, + DownloadModuleSlice_FilenameWithSingleQuotes_Rejected) { + Status result = DownloadModuleSlice(FileSpec("/test/file'with'quotes"), 100, + 200, FileSpec("/tmp/output")); + + EXPECT_TRUE(result.Fail()); + EXPECT_THAT(result.AsCString(), HasSubstr("single-quotes")); +} + +TEST_F(PlatformAndroidTest, GetFile_NetworkTimeout_PropagatesErrorCorrectly) { + auto *adb_client = new MockAdbClient(); + EXPECT_CALL(*adb_client, ShellToFile(_, _, _)) + .WillOnce(Return(Status::FromErrorString("Network timeout"))); + + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef(""))); + + EXPECT_CALL(*this, GetAdbClient(_)) + .WillOnce(DoAll(WithArg<0>([](auto &arg) { arg.Clear(); }), + Return(ByMove(AdbClientUP(adb_client))))); + + EXPECT_CALL(*this, GetSyncService(_)) + .WillOnce([](Status &error) -> std::unique_ptr { + error = Status::FromErrorString("Sync service unavailable"); + return nullptr; + }); + + Status result = + GetFile(FileSpec("/data/large/file.so"), FileSpec("/tmp/large.so")); + EXPECT_TRUE(result.Fail()); + EXPECT_THAT(result.AsCString(), HasSubstr("Network timeout")); +} + +TEST_F(PlatformAndroidTest, SyncService_ConnectionFailsGracefully) { + // Constructor should succeed even with a failing connection + AdbSyncService sync_service("test-device"); + + // The service should report as not connected initially + EXPECT_FALSE(sync_service.IsConnected()); + EXPECT_EQ(sync_service.GetDeviceId(), "test-device"); + + // Operations should fail gracefully when connection setup fails + FileSpec remote_file("/data/test.txt"); + FileSpec local_file("/tmp/test.txt"); + uint32_t mode, size, mtime; + + Status result = sync_service.Stat(remote_file, mode, size, mtime); + EXPECT_TRUE(result.Fail()); +} + +TEST_F(PlatformAndroidTest, GetRunAs_FormatsPackageNameCorrectly) { + // Empty package name + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef(""))); + EXPECT_EQ(this->GetRunAs(), ""); + + // Valid package name + EXPECT_CALL(*this, GetPropertyPackageName()) + .WillOnce(Return(llvm::StringRef("com.example.test"))); + EXPECT_EQ(this->GetRunAs(), "run-as 'com.example.test' "); +} + +TEST_F(PlatformAndroidTest, + DownloadModuleSlice_ZeroOffset_CallsGetFileInsteadOfDd) { + // When offset=0, DownloadModuleSlice calls GetFile which uses 'cat', not 'dd' + // We need to ensure the sync service fails so GetFile falls back to shell cat + auto *adb_client = new MockAdbClient(); + EXPECT_CALL(*adb_client, + ShellToFile(StrEq("cat '/system/lib64/libc.so'"), _, _)) .WillOnce(Return(Status())); EXPECT_CALL(*this, GetPropertyPackageName()) - .Times(1) - .WillOnce(Return(llvm::StringRef("com.example.app"))); + .WillOnce(Return(llvm::StringRef(""))); EXPECT_CALL(*this, GetAdbClient(_)) - .Times(2) - .WillOnce(Return(ByMove(AdbClientUP(adb_client0)))) - .WillOnce(Return(ByMove(AdbClientUP(adb_client1)))); + .WillOnce(DoAll(WithArg<0>([](auto &arg) { arg.Clear(); }), + Return(ByMove(AdbClientUP(adb_client))))); - EXPECT_TRUE( - GetFile(FileSpec("/data/data/com.example.app/lib-main/libtest.so"), - FileSpec()) - .Success()); + // Mock GetSyncService to fail, forcing GetFile to use shell cat fallback + EXPECT_CALL(*this, GetSyncService(_)) + .WillOnce(DoAll(WithArg<0>([](auto &arg) { + arg = + Status::FromErrorString("Sync service unavailable"); + }), + Return(ByMove(std::unique_ptr())))); + + Status result = DownloadModuleSlice(FileSpec("/system/lib64/libc.so"), 0, 0, + FileSpec("/tmp/libc.so")); + EXPECT_TRUE(result.Success()); } diff --git a/lldb/unittests/Symbol/TestDWARFCallFrameInfo.cpp b/lldb/unittests/Symbol/TestDWARFCallFrameInfo.cpp index e113b8ca99341..c52e9a7387e14 100644 --- a/lldb/unittests/Symbol/TestDWARFCallFrameInfo.cpp +++ b/lldb/unittests/Symbol/TestDWARFCallFrameInfo.cpp @@ -380,3 +380,288 @@ void DWARFCallFrameInfoTest::TestValOffset(DWARFCallFrameInfo::Type type, TEST_F(DWARFCallFrameInfoTest, ValOffset_dwarf3) { TestValOffset(DWARFCallFrameInfo::DWARF, "debug_frame3"); } + +// Test that we correctly handle invalid FDE entries that have CIE ID values +TEST_F(DWARFCallFrameInfoTest, InvalidFDEWithCIEID_dwarf32) { + // Create an FDE with cie_offset of 0xFFFFFFFF (DW_CIE_ID) which is invalid + auto ExpectedFile = TestFile::fromYaml(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x0000000000000260 + AddressAlign: 0x0000000000000010 + Content: 554889E5897DFC8B45FC5DC3 + - Name: .debug_frame + Type: SHT_PROGBITS + AddressAlign: 0x0000000000000008 + # First, a valid CIE + # 00000000 0000000000000014 ffffffff CIE + # Version: 3 + # Augmentation: "" + # Code alignment factor: 1 + # Data alignment factor: -8 + # Return address column: 16 + Content: 14000000FFFFFFFF03000178100C0708900100000000000018000000FFFFFFFF60020000000000000C00000000000000 + # Then an invalid FDE with CIE pointer = 0xFFFFFFFF (which would make it look like a CIE) + # 00000018 0000000000000018 ffffffff FDE cie=ffffffff pc=0000000000000260..000000000000026c + # The cie offset of 0xFFFFFFFF is invalid for an FDE in debug_frame +Symbols: + - Name: test_invalid + Type: STT_FUNC + Section: .text + Value: 0x0000000000000260 + Size: 0x000000000000000C + Binding: STB_GLOBAL +... +)"); + ASSERT_THAT_EXPECTED(ExpectedFile, llvm::Succeeded()); + + auto module_sp = std::make_shared(ExpectedFile->moduleSpec()); + SectionList *list = module_sp->GetSectionList(); + ASSERT_NE(nullptr, list); + + auto section_sp = list->FindSectionByType(eSectionTypeDWARFDebugFrame, false); + ASSERT_NE(nullptr, section_sp); + + DWARFCallFrameInfo cfi(*module_sp->GetObjectFile(), section_sp, + DWARFCallFrameInfo::DWARF); + + // This should trigger our assertion or return nullptr because the FDE is + // invalid + const Symbol *sym = module_sp->FindFirstSymbolWithNameAndType( + ConstString("test_invalid"), eSymbolTypeAny); + ASSERT_NE(nullptr, sym); + + std::unique_ptr plan_up = cfi.GetUnwindPlan(sym->GetAddress()); + // The plan should be null because we have an invalid FDE + EXPECT_EQ(nullptr, plan_up); +} + +// Test that we correctly handle invalid FDE entries that have CIE ID values +TEST_F(DWARFCallFrameInfoTest, InvalidFDEWithCIEID_dwarf64) { + // Create an FDE with cie_offset of 0xFFFFFFFFFFFFFFFF (DW64_CIE_ID) which is + // invalid + auto ExpectedFile = TestFile::fromYaml(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x0000000000000260 + AddressAlign: 0x0000000000000010 + Content: 554889E5897DFC8B45FC5DC3 + - Name: .debug_frame + Type: SHT_PROGBITS + AddressAlign: 0x0000000000000008 + # DWARF64 format CIE + # Initial length: 0xFFFFFFFF followed by 64-bit length + # 00000000 ffffffff 0000000000000014 ffffffffffffffff CIE + Content: FFFFFFFF1400000000000000FFFFFFFFFFFFFFFF03000178100C0708900100000000FFFFFFFF1800000000000000FFFFFFFFFFFFFFFF60020000000000000C00000000000000 + # DWARF64 FDE with invalid CIE pointer = 0xFFFFFFFFFFFFFFFF + # Initial length: 0xFFFFFFFF, followed by 64-bit length (0x18) + # Then 64-bit CIE pointer: 0xFFFFFFFFFFFFFFFF (which is DW64_CIE_ID, invalid for FDE) +Symbols: + - Name: test_invalid64 + Type: STT_FUNC + Section: .text + Value: 0x0000000000000260 + Size: 0x000000000000000C + Binding: STB_GLOBAL +... +)"); + ASSERT_THAT_EXPECTED(ExpectedFile, llvm::Succeeded()); + + auto module_sp = std::make_shared(ExpectedFile->moduleSpec()); + SectionList *list = module_sp->GetSectionList(); + ASSERT_NE(nullptr, list); + + auto section_sp = list->FindSectionByType(eSectionTypeDWARFDebugFrame, false); + ASSERT_NE(nullptr, section_sp); + + DWARFCallFrameInfo cfi(*module_sp->GetObjectFile(), section_sp, + DWARFCallFrameInfo::DWARF); + + const Symbol *sym = module_sp->FindFirstSymbolWithNameAndType( + ConstString("test_invalid64"), eSymbolTypeAny); + ASSERT_NE(nullptr, sym); + + std::unique_ptr plan_up = cfi.GetUnwindPlan(sym->GetAddress()); + // The plan should be null because we have an invalid FDE + EXPECT_EQ(nullptr, plan_up); +} + +// Test valid CIE markers in eh_frame format +TEST_F(DWARFCallFrameInfoTest, ValidCIEMarkers_eh_frame) { + auto ExpectedFile = TestFile::fromYaml(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_DYN + Machine: EM_X86_64 + Entry: 0x0000000000000260 +Sections: + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x0000000000000260 + AddressAlign: 0x0000000000000010 + Content: 554889E5897DFC8B45FC5DC3 + - Name: .eh_frame + Type: SHT_X86_64_UNWIND + Flags: [ SHF_ALLOC ] + Address: 0x0000000000000290 + AddressAlign: 0x0000000000000008 + # eh_frame content + # CIE + FDE that works with address 0x260 + Content: 1400000000000000017A5200017810011B0C0708900100001C0000001C000000B0FFFFFF0C00000000410E108602430D0600000000000000 +Symbols: + - Name: simple_function + Type: STT_FUNC + Section: .text + Value: 0x0000000000000260 + Size: 0x000000000000000F + Binding: STB_GLOBAL +... +)"); + ASSERT_THAT_EXPECTED(ExpectedFile, llvm::Succeeded()); + + auto module_sp = std::make_shared(ExpectedFile->moduleSpec()); + SectionList *list = module_sp->GetSectionList(); + ASSERT_NE(nullptr, list); + + auto section_sp = list->FindSectionByType(eSectionTypeEHFrame, false); + ASSERT_NE(nullptr, section_sp); + + DWARFCallFrameInfo cfi(*module_sp->GetObjectFile(), section_sp, + DWARFCallFrameInfo::EH); + + const Symbol *sym = module_sp->FindFirstSymbolWithNameAndType( + ConstString("simple_function"), eSymbolTypeAny); + ASSERT_NE(nullptr, sym); + + std::unique_ptr plan_up = cfi.GetUnwindPlan(sym->GetAddress()); + // Should succeed with valid CIE and FDE + ASSERT_NE(nullptr, plan_up); + EXPECT_GE(plan_up->GetRowCount(), 1); +} + +// Test valid CIE markers in debug_frame DWARF32 format +TEST_F(DWARFCallFrameInfoTest, ValidCIEMarkers_dwarf32) { + auto ExpectedFile = TestFile::fromYaml(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x0000000000001130 + AddressAlign: 0x0000000000000010 + Content: 554889E5897DFC8B45FC83C0015DC3 + - Name: .debug_frame + Type: SHT_PROGBITS + AddressAlign: 0x0000000000000008 + # debug_frame content in DWARF32 format + # CIE (length=0x14, CIE_id=0xFFFFFFFF, version=4) + # FDE (length=0x24, CIE_offset=0) + Content: 14000000FFFFFFFF040008000178100C0708900100000000240000000000000030110000000000000F00000000000000410E108602430D064A0C070800000000 +Symbols: + - Name: simple_function + Type: STT_FUNC + Section: .text + Value: 0x0000000000001130 + Size: 0x000000000000000F + Binding: STB_GLOBAL +... +)"); + ASSERT_THAT_EXPECTED(ExpectedFile, llvm::Succeeded()); + + auto module_sp = std::make_shared(ExpectedFile->moduleSpec()); + SectionList *list = module_sp->GetSectionList(); + ASSERT_NE(nullptr, list); + + auto section_sp = list->FindSectionByType(eSectionTypeDWARFDebugFrame, false); + ASSERT_NE(nullptr, section_sp); + + DWARFCallFrameInfo cfi(*module_sp->GetObjectFile(), section_sp, + DWARFCallFrameInfo::DWARF); + + const Symbol *sym = module_sp->FindFirstSymbolWithNameAndType( + ConstString("simple_function"), eSymbolTypeAny); + ASSERT_NE(nullptr, sym); + + std::unique_ptr plan_up = cfi.GetUnwindPlan(sym->GetAddress()); + // Should succeed with valid CIE and FDE + ASSERT_NE(nullptr, plan_up); + EXPECT_GE(plan_up->GetRowCount(), 1); +} + +// Test valid CIE markers in debug_frame DWARF64 format +TEST_F(DWARFCallFrameInfoTest, ValidCIEMarkers_dwarf64) { + auto ExpectedFile = TestFile::fromYaml(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC, SHF_EXECINSTR ] + Address: 0x0000000000001130 + AddressAlign: 0x0000000000000010 + Content: 554889E5897DFC8B45FC83C0015DC3 + - Name: .debug_frame + Type: SHT_PROGBITS + AddressAlign: 0x0000000000000008 + # debug_frame content in DWARF64 format + # CIE: length_marker=0xFFFFFFFF, length=0x14, CIE_id=0xFFFFFFFFFFFFFFFF, version=4 + # FDE: length_marker=0xFFFFFFFF, length=0x24, CIE_offset=0x0 (points to CIE) + Content: FFFFFFFF1400000000000000FFFFFFFFFFFFFFFF040008000178100C07089001FFFFFFFF2400000000000000000000000000000030110000000000000F00000000000000410E108602430D064A0C0708 +Symbols: + - Name: simple_function + Type: STT_FUNC + Section: .text + Value: 0x0000000000001130 + Size: 0x000000000000000F + Binding: STB_GLOBAL +... +)"); + ASSERT_THAT_EXPECTED(ExpectedFile, llvm::Succeeded()); + + auto module_sp = std::make_shared(ExpectedFile->moduleSpec()); + SectionList *list = module_sp->GetSectionList(); + ASSERT_NE(nullptr, list); + + auto section_sp = list->FindSectionByType(eSectionTypeDWARFDebugFrame, false); + ASSERT_NE(nullptr, section_sp); + + DWARFCallFrameInfo cfi(*module_sp->GetObjectFile(), section_sp, + DWARFCallFrameInfo::DWARF); + + const Symbol *sym = module_sp->FindFirstSymbolWithNameAndType( + ConstString("simple_function"), eSymbolTypeAny); + ASSERT_NE(nullptr, sym); + + std::unique_ptr plan_up = cfi.GetUnwindPlan(sym->GetAddress()); + // Should succeed with valid CIE and FDE + ASSERT_NE(nullptr, plan_up); + EXPECT_GE(plan_up->GetRowCount(), 1); +} diff --git a/lldb/unittests/Utility/ScalarTest.cpp b/lldb/unittests/Utility/ScalarTest.cpp index 6d5caef42bee4..869a5809e6d14 100644 --- a/lldb/unittests/Utility/ScalarTest.cpp +++ b/lldb/unittests/Utility/ScalarTest.cpp @@ -118,11 +118,17 @@ TEST(ScalarTest, RightShiftOperator) { int a = 0x00001000; int b = 0xFFFFFFFF; int c = 4; + unsigned d = 0xFFFFFFFF; + unsigned short e = 0xFFFF; Scalar a_scalar(a); Scalar b_scalar(b); Scalar c_scalar(c); + Scalar d_scalar(d); + Scalar e_scalar(e); ASSERT_EQ(a >> c, a_scalar >> c_scalar); ASSERT_EQ(b >> c, b_scalar >> c_scalar); + ASSERT_EQ(d >> c, d_scalar >> c_scalar); + ASSERT_EQ(e >> c, e_scalar >> c_scalar); } TEST(ScalarTest, GetBytes) { diff --git a/lldb/utils/lldb-dotest/CMakeLists.txt b/lldb/utils/lldb-dotest/CMakeLists.txt index 3b8c88b6dc78c..f3f75015637f4 100644 --- a/lldb/utils/lldb-dotest/CMakeLists.txt +++ b/lldb/utils/lldb-dotest/CMakeLists.txt @@ -15,9 +15,14 @@ llvm_canonicalize_cmake_booleans( if ("libcxx" IN_LIST LLVM_ENABLE_RUNTIMES) set(LLDB_HAS_LIBCXX ON) if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE) - set(LIBCXX_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LLVM_DEFAULT_TARGET_TRIPLE}) + set(LIBCXX_TARGET_SUBDIR ${LLVM_DEFAULT_TARGET_TRIPLE}) + if(LIBCXX_LIBDIR_SUBDIR) + string(APPEND LIBCXX_TARGET_SUBDIR /${LIBCXX_LIBDIR_SUBDIR}) + endif() + cmake_path(NORMAL_PATH LIBCXX_TARGET_SUBDIR) + set(LIBCXX_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LIBCXX_TARGET_SUBDIR}) set(LIBCXX_GENERATED_INCLUDE_DIR "${LLVM_BINARY_DIR}/include/c++/v1") - set(LIBCXX_GENERATED_INCLUDE_TARGET_DIR "${LLVM_BINARY_DIR}/include/${LLVM_DEFAULT_TARGET_TRIPLE}/c++/v1") + set(LIBCXX_GENERATED_INCLUDE_TARGET_DIR "${LLVM_BINARY_DIR}/include/${LIBCXX_TARGET_SUBDIR}/c++/v1") else() set(LIBCXX_LIBRARY_DIR ${CMAKE_BINARY_DIR}/lib${LIBCXX_LIBDIR_SUFFIX}) set(LIBCXX_GENERATED_INCLUDE_DIR "${CMAKE_BINARY_DIR}/include/c++/v1") diff --git a/llvm/.clang-format b/llvm/.clang-format index 5bead5f39dd3c..ecb44bfabd9aa 100644 --- a/llvm/.clang-format +++ b/llvm/.clang-format @@ -1,2 +1,2 @@ BasedOnStyle: LLVM - +LineEnding: LF diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt index b98192968a3ab..c450ee5a3d72e 100644 --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -1011,6 +1011,9 @@ set(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR ${LLVM_ENABLE_PER_TARGET_RUNTIME_DIR_defa set(LLVM_PROFDATA_FILE "" CACHE FILEPATH "Profiling data file to use when compiling in order to improve runtime performance.") +set(LLVM_SPROFDATA_FILE "" CACHE FILEPATH + "Sampling profiling data file to use when compiling in order to improve runtime performance.") + if(LLVM_INCLUDE_TESTS) # All LLVM Python files should be compatible down to this minimum version. set(LLVM_MINIMUM_PYTHON_VERSION 3.8) diff --git a/llvm/Maintainers.md b/llvm/Maintainers.md index 5afdd1519c96f..e52259236fc19 100644 --- a/llvm/Maintainers.md +++ b/llvm/Maintainers.md @@ -123,6 +123,13 @@ a.bataev@outlook.com (email), [alexey-bataev](https://github.com/alexey-bataev) Chandler Carruth \ chandlerc@gmail.com, chandlerc@google.com (email), [chandlerc](https://github.com/chandlerc) (GitHub) +#### DFAJumpThreading + +Hongyu Chen \ +xxs\_chy@outlook.com (email), [XChy](https://github.com/XChy) (Github) \ +Usman Nadeem \ +mnadeem@quicinc.com (email), [UsmanNadeem](https://github.com/UsmanNadeem) (Github) + ### Instrumentation and sanitizers #### Sanitizers not covered by someone else diff --git a/llvm/benchmarks/CMakeLists.txt b/llvm/benchmarks/CMakeLists.txt index 57d49ed153749..3cbfb0d44e5a3 100644 --- a/llvm/benchmarks/CMakeLists.txt +++ b/llvm/benchmarks/CMakeLists.txt @@ -10,6 +10,7 @@ add_benchmark(GetIntrinsicForClangBuiltin GetIntrinsicForClangBuiltin.cpp PARTIA add_benchmark(FormatVariadicBM FormatVariadicBM.cpp PARTIAL_SOURCES_INTENDED) add_benchmark(GetIntrinsicInfoTableEntriesBM GetIntrinsicInfoTableEntriesBM.cpp PARTIAL_SOURCES_INTENDED) add_benchmark(SandboxIRBench SandboxIRBench.cpp PARTIAL_SOURCES_INTENDED) +add_benchmark(MustacheBench Mustache.cpp PARTIAL_SOURCES_INTENDED) add_benchmark(RuntimeLibcallsBench RuntimeLibcalls.cpp PARTIAL_SOURCES_INTENDED) diff --git a/llvm/benchmarks/Mustache.cpp b/llvm/benchmarks/Mustache.cpp new file mode 100644 index 0000000000000..6d24f5442e274 --- /dev/null +++ b/llvm/benchmarks/Mustache.cpp @@ -0,0 +1,256 @@ +#include "llvm/Support/Mustache.h" +#include "benchmark/benchmark.h" +#include "llvm/Support/JSON.h" +#include "llvm/Support/raw_ostream.h" +#include + +// A large, raw string with many characters that require HTML escaping. +static const std::string LongHtmlString = [] { + std::string S; + S.reserve(500000); + for (int i = 0; i < 50000; ++i) { + S += ""; + } + return S; +}(); + +// A deep AND wide JSON object for testing traversal. +static const llvm::json::Value DeepJsonData = [] { + llvm::json::Value Root = llvm::json::Object(); + llvm::json::Object *Current = Root.getAsObject(); + for (int i = 0; i < 50; ++i) { // 50 levels deep + for (int j = 0; j < 100; ++j) { + (*Current)["sibling_" + std::to_string(j)] = llvm::json::Value("noise"); + } + std::string Key = "level_" + std::to_string(i); + (*Current)[Key] = llvm::json::Object(); + Current = (*Current)[Key].getAsObject(); + } + (*Current)["final_value"] = llvm::json::Value("Success!"); + + llvm::json::Array Arr; + for (int i = 0; i < 5000; ++i) { // 5,000 iterations + Arr.push_back(llvm::json::Value(i)); + } + + llvm::json::Object NewRoot; + NewRoot["deep_data"] = std::move(Root); + NewRoot["loop_array"] = std::move(Arr); + return llvm::json::Value(std::move(NewRoot)); +}(); + +// A huge array for testing iteration performance. +static const llvm::json::Value HugeArrayData = [] { + llvm::json::Array Arr; + for (int i = 0; i < 100000; ++i) { // 100,000 array items + Arr.push_back(llvm::json::Object( + {{"id", llvm::json::Value(static_cast(i))}, + {"is_even", llvm::json::Value(i % 2 == 0)}, + {"data", llvm::json::Value("Item data for " + std::to_string(i))}})); + } + return llvm::json::Object({{"items", std::move(Arr)}}); +}(); + +// The main template that includes a partial within a loop. +static const std::string ComplexPartialTemplate = + "Header\n" + "{{#items}}{{> item_partial}}{{/items}}\n" + "Footer"; + +// The partial template is now more complex, rendering multiple fields and a +// conditional section. +static const std::string ItemPartialTemplate = + "
\n" + "

{{data}}

\n" + " {{#is_even}}(Even){{/is_even}}\n" + "
\n"; + +// A single large string to stress the output buffer. +static const llvm::json::Value LargeOutputData = llvm::json::Object({ + {"long_string", + llvm::json::Value(std::string(1024 * 1024, 'A'))} // 1MB string +}); + +// --- Static Data (Templates) --- + +static const std::string BulkEscapingTemplate = "{{content}}"; +static const std::string BulkUnescapedTemplate = "{{{content}}}"; +static const std::string BulkUnescapedAmpersandTemplate = "{{& content}}"; + +static const std::string DeepTraversalTemplate = [] { + std::string LongKey = + "deep_data.level_0.level_1.level_2.level_3.level_4.level_5." + "level_6.level_7.level_8.level_9." + "level_10.level_11.level_12.level_13.level_14.level_" + "15.level_16.level_17.level_18.level_19." + "level_20.level_21.level_22.level_23.level_24.level_" + "25.level_26.level_27.level_28.level_29." + "level_30.level_31.level_32.level_33.level_34.level_" + "35.level_36.level_37.level_38.level_39." + "level_40.level_41.level_42.level_43.level_44.level_" + "45.level_46.level_47.level_48.level_49.final_value"; + return "{{#loop_array}}{{" + LongKey + "}}{{/loop_array}}"; +}(); + +static const std::string DeeplyNestedRenderingTemplate = [] { + std::string NestedTemplate = "{{#deep_data}}"; + for (int i = 0; i < 50; ++i) { + NestedTemplate += "{{#level_" + std::to_string(i) + "}}"; + } + NestedTemplate += "{{final_value}}"; + for (int i = 49; i >= 0; --i) { + NestedTemplate += "{{/level_" + std::to_string(i) + "}}"; + } + NestedTemplate += "{{/deep_data}}"; + return NestedTemplate; +}(); + +static const std::string HugeArrayIterationTemplate = + "{{#items}}ID: {{id}}.{{/items}}"; + +static const std::string ComplexTemplateParsingTemplate = [] { + std::string LargeTemplate; + LargeTemplate.reserve(100000); + for (int i = 0; i < 1000; ++i) { + LargeTemplate += "{{var_" + std::to_string(i) + + "}}" + "{{#section_" + + std::to_string(i) + "}}Content{{/section_" + + std::to_string(i) + + "}}" + "{{!comment_" + + std::to_string(i) + + "}}" + "{{=<% %>=}}" + "<%var_tag_changed_to_percent_sign_" + + std::to_string(i) + + "%>" + "<%={{ }}=%>" + "{{^inverted_" + + std::to_string(i) + "}}Not Present{{/inverted_" + + std::to_string(i) + "}}"; + } + return LargeTemplate; +}(); + +static const std::string SmallTemplateParsingTemplate = + "{{level_0.sibling_99}}\n" + "{{level_0.level_1.level_2.level_3.level_4.level_5.sibling_50}}\n" + "{{level_0.level_1.level_2.level_3.level_4.level_5." + "level_6.level_7.level_8.level_9." + "level_10.level_11.level_12.level_13.level_14.level_" + "15.level_16.level_17.level_18.level_19." + "level_20.level_21.level_22.level_23.level_24.level_" + "25.level_26.level_27.level_28.level_29." + "level_30.level_31.level_32.level_33.level_34.level_" + "35.level_36.level_37.level_38.level_39." + "level_40.level_41.level_42.level_43.level_44.level_" + "45.level_46.level_47.level_48.level_49.final_value}}\n"; + +static const std::string LargeOutputStringTemplate = "{{long_string}}"; + +// Tests the performance of rendering a large string with various escaping +// syntaxes. +static void BM_Mustache_StringRendering(benchmark::State &state, + const std::string &TplStr) { + llvm::mustache::Template Tpl(TplStr); + llvm::json::Value Data = + llvm::json::Object({{"content", llvm::json::Value(LongHtmlString)}}); + for (auto _ : state) { + std::string Result; + llvm::raw_string_ostream OS(Result); + Tpl.render(Data, OS); + benchmark::DoNotOptimize(Result); + } +} +BENCHMARK_CAPTURE(BM_Mustache_StringRendering, Escaped, BulkEscapingTemplate); +BENCHMARK_CAPTURE(BM_Mustache_StringRendering, Unescaped_Triple, + BulkUnescapedTemplate); +BENCHMARK_CAPTURE(BM_Mustache_StringRendering, Unescaped_Ampersand, + BulkUnescapedAmpersandTemplate); + +// Tests the "hot render" cost of repeatedly traversing a deep and wide +// JSON object. +static void BM_Mustache_DeepTraversal(benchmark::State &state) { + llvm::mustache::Template Tpl(DeepTraversalTemplate); + for (auto _ : state) { + std::string Result; + llvm::raw_string_ostream OS(Result); + Tpl.render(DeepJsonData, OS); + benchmark::DoNotOptimize(Result); + } +} +BENCHMARK(BM_Mustache_DeepTraversal); + +// Tests the "hot render" cost of pushing and popping a deep context stack. +static void BM_Mustache_DeeplyNestedRendering(benchmark::State &state) { + llvm::mustache::Template Tpl(DeeplyNestedRenderingTemplate); + for (auto _ : state) { + std::string Result; + llvm::raw_string_ostream OS(Result); + Tpl.render(DeepJsonData, OS); + benchmark::DoNotOptimize(Result); + } +} +BENCHMARK(BM_Mustache_DeeplyNestedRendering); + +// Tests the performance of the loop logic when iterating over a huge number of +// items. +static void BM_Mustache_HugeArrayIteration(benchmark::State &state) { + llvm::mustache::Template Tpl(HugeArrayIterationTemplate); + for (auto _ : state) { + std::string Result; + llvm::raw_string_ostream OS(Result); + Tpl.render(HugeArrayData, OS); + benchmark::DoNotOptimize(Result); + } +} +BENCHMARK(BM_Mustache_HugeArrayIteration); + +// Tests the performance of the parser on a large, "wide" template. +static void BM_Mustache_ComplexTemplateParsing(benchmark::State &state) { + for (auto _ : state) { + llvm::mustache::Template Tpl(ComplexTemplateParsingTemplate); + benchmark::DoNotOptimize(Tpl); + } +} +BENCHMARK(BM_Mustache_ComplexTemplateParsing); + +// Tests the performance of the parser on a small, "deep" template. +static void BM_Mustache_SmallTemplateParsing(benchmark::State &state) { + for (auto _ : state) { + llvm::mustache::Template Tpl(SmallTemplateParsingTemplate); + benchmark::DoNotOptimize(Tpl); + } +} +BENCHMARK(BM_Mustache_SmallTemplateParsing); + +// Tests the performance of rendering a template that includes a partial. +static void BM_Mustache_PartialsRendering(benchmark::State &state) { + llvm::mustache::Template Tpl(ComplexPartialTemplate); + Tpl.registerPartial("item_partial", ItemPartialTemplate); + llvm::json::Value Data = HugeArrayData; + + for (auto _ : state) { + std::string Result; + llvm::raw_string_ostream OS(Result); + Tpl.render(Data, OS); + benchmark::DoNotOptimize(Result); + } +} +BENCHMARK(BM_Mustache_PartialsRendering); + +// Tests the performance of the underlying buffer management when generating a +// very large output. +static void BM_Mustache_LargeOutputString(benchmark::State &state) { + llvm::mustache::Template Tpl(LargeOutputStringTemplate); + for (auto _ : state) { + std::string Result; + llvm::raw_string_ostream OS(Result); + Tpl.render(LargeOutputData, OS); + benchmark::DoNotOptimize(Result); + } +} +BENCHMARK(BM_Mustache_LargeOutputString); + +BENCHMARK_MAIN(); diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake index 8eca29f8a03f5..d4195db6368d7 100644 --- a/llvm/cmake/modules/HandleLLVMOptions.cmake +++ b/llvm/cmake/modules/HandleLLVMOptions.cmake @@ -1184,7 +1184,7 @@ if(LLVM_ENABLE_EH AND NOT LLVM_ENABLE_RTTI) message(FATAL_ERROR "Exception handling requires RTTI. You must set LLVM_ENABLE_RTTI to ON") endif() -set(LLVM_BUILD_INSTRUMENTED OFF CACHE STRING "Build LLVM and tools with PGO instrumentation. May be specified as IR or Frontend") +set(LLVM_BUILD_INSTRUMENTED OFF CACHE STRING "Build LLVM and tools with PGO instrumentation. May be specified as IR, Frontend, CSIR, CSSPGO") set(LLVM_VP_COUNTERS_PER_SITE "1.5" CACHE STRING "Value profile counters to use per site for IR PGO with Clang") mark_as_advanced(LLVM_BUILD_INSTRUMENTED LLVM_VP_COUNTERS_PER_SITE) string(TOUPPER "${LLVM_BUILD_INSTRUMENTED}" uppercase_LLVM_BUILD_INSTRUMENTED) @@ -1217,6 +1217,19 @@ if (LLVM_BUILD_INSTRUMENTED) CMAKE_EXE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS) endif() + elseif(uppercase_LLVM_BUILD_INSTRUMENTED STREQUAL "CSSPGO") + if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + append("-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -fno-optimize-sibling-calls -fpseudo-probe-for-profiling -fdebug-info-for-profiling" + CMAKE_CXX_FLAGS + CMAKE_C_FLAGS) + if(NOT LINKER_IS_LLD_LINK) + append("-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer -fno-optimize-sibling-calls -fpseudo-probe-for-profiling -fdebug-info-for-profiling" + CMAKE_EXE_LINKER_FLAGS + CMAKE_SHARED_LINKER_FLAGS) + endif() + else() + message(FATAL_ERROR "LLVM_BUILD_INSTRUMENTED=CSSPGO can only be specified when compiling with clang") + endif() else() append("-fprofile-instr-generate=\"${LLVM_PROFILE_FILE_PATTERN}\"" CMAKE_CXX_FLAGS @@ -1269,6 +1282,21 @@ elseif(LLVM_PROFDATA_FILE) message(WARNING "LLVM_PROFDATA_FILE specified, but ${LLVM_PROFDATA_FILE} not found") endif() +if(LLVM_SPROFDATA_FILE AND EXISTS ${LLVM_SPROFDATA_FILE}) + if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" ) + append("-fpseudo-probe-for-profiling -fprofile-sample-use=\"${LLVM_SPROFDATA_FILE}\"" + CMAKE_CXX_FLAGS + CMAKE_C_FLAGS) + if(NOT LINKER_IS_LLD_LINK) + append("-fpseudo-probe-for-profiling -fprofile-sample-use=\"${LLVM_SPROFDATA_FILE}\"" + CMAKE_EXE_LINKER_FLAGS + CMAKE_SHARED_LINKER_FLAGS) + endif() + else() + message(FATAL_ERROR "LLVM_SPROFDATA_FILE can only be specified when compiling with clang") + endif() +endif() + option(LLVM_BUILD_INSTRUMENTED_COVERAGE "Build LLVM and tools with Code Coverage instrumentation" Off) option(LLVM_INDIVIDUAL_TEST_COVERAGE "Emit individual coverage file for each test case." OFF) mark_as_advanced(LLVM_BUILD_INSTRUMENTED_COVERAGE) diff --git a/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst b/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst new file mode 100644 index 0000000000000..7259ee8731300 --- /dev/null +++ b/llvm/docs/AMDGPU/AMDGPUAsmGFX12.rst @@ -0,0 +1,2002 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +==================================================================================== +Syntax of GFX12 Instructions +==================================================================================== + +.. contents:: + :local: + +Introduction +============ + +This document describes the syntax of GFX12 instructions. + +Notation +======== + +Notation used in this document is explained :ref:`here`. + +Overview +======== + +An overview of generic syntax and other features of AMDGPU instructions may be found :ref:`in this document`. + +Instructions +============ + + +SMEM +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **SRC3** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_atc_probe :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_atc_probe_buffer :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_b128 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_b256 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_b32 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_b512 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_b64 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_b96 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_i16 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_i8 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_u16 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_load_u8 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_nop :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_buffer_prefetch_data :ref:`sbase`, :ref:`ioffset`, :ref:`soffset`, :ref:`sdata` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_dcache_inv :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_b128 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_b256 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_b32 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_b512 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_b64 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_b96 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_i16 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_i8 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_u16 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_load_u8 :ref:`sdata`, :ref:`sbase`, :ref:`soffset` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_prefetch_data :ref:`sbase`, :ref:`ioffset`, :ref:`soffset`, :ref:`sdata` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_prefetch_data_pc_rel :ref:`ioffset`, :ref:`soffset`, :ref:`sdata` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_prefetch_inst :ref:`sbase`, :ref:`ioffset`, :ref:`soffset`, :ref:`sdata` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + s_prefetch_inst_pc_rel :ref:`ioffset`, :ref:`soffset`, :ref:`sdata` :ref:`offset24s` :ref:`th` :ref:`scope` :ref:`nv` + +SOP1 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_abs_i32 :ref:`sdst`, :ref:`ssrc0` + s_alloc_vgpr :ref:`ssrc0` + s_and_not0_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_and_not0_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_and_not0_wrexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_and_not0_wrexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_and_not1_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_and_not1_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_and_not1_wrexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_and_not1_wrexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_and_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_and_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_barrier_init :ref:`ssrc0` + s_barrier_join :ref:`ssrc0` + s_barrier_signal :ref:`ssrc0` + s_barrier_signal_isfirst :ref:`ssrc0` + s_bcnt0_i32_b32 :ref:`sdst`, :ref:`ssrc0` + s_bcnt0_i32_b64 :ref:`sdst`, :ref:`ssrc0` + s_bcnt1_i32_b32 :ref:`sdst`, :ref:`ssrc0` + s_bcnt1_i32_b64 :ref:`sdst`, :ref:`ssrc0` + s_bitreplicate_b64_b32 :ref:`sdst`, :ref:`ssrc0` + s_bitset0_b32 :ref:`sdst`, :ref:`ssrc0` + s_bitset0_b64 :ref:`sdst`, :ref:`ssrc0` + s_bitset1_b32 :ref:`sdst`, :ref:`ssrc0` + s_bitset1_b64 :ref:`sdst`, :ref:`ssrc0` + s_brev_b32 :ref:`sdst`, :ref:`ssrc0` + s_brev_b64 :ref:`sdst`, :ref:`ssrc0` + s_ceil_f16 :ref:`sdst`, :ref:`ssrc0` + s_ceil_f32 :ref:`sdst`, :ref:`ssrc0` + s_cls_i32 :ref:`sdst`, :ref:`ssrc0` + s_cls_i32_i64 :ref:`sdst`, :ref:`ssrc0` + s_clz_i32_u32 :ref:`sdst`, :ref:`ssrc0` + s_clz_i32_u64 :ref:`sdst`, :ref:`ssrc0` + s_cmov_b32 :ref:`sdst`, :ref:`ssrc0` + s_cmov_b64 :ref:`sdst`, :ref:`ssrc0` + s_ctz_i32_b32 :ref:`sdst`, :ref:`ssrc0` + s_ctz_i32_b64 :ref:`sdst`, :ref:`ssrc0` + s_cvt_f16_f32 :ref:`sdst`, :ref:`ssrc0` + s_cvt_f32_f16 :ref:`sdst`, :ref:`ssrc0` + s_cvt_f32_i32 :ref:`sdst`, :ref:`ssrc0` + s_cvt_f32_u32 :ref:`sdst`, :ref:`ssrc0` + s_cvt_hi_f32_f16 :ref:`sdst`, :ref:`ssrc0` + s_cvt_i32_f32 :ref:`sdst`, :ref:`ssrc0` + s_cvt_u32_f32 :ref:`sdst`, :ref:`ssrc0` + s_floor_f16 :ref:`sdst`, :ref:`ssrc0` + s_floor_f32 :ref:`sdst`, :ref:`ssrc0` + s_get_barrier_state :ref:`sdst`, :ref:`ssrc0` + s_get_lock_state :ref:`sdst`, :ref:`ssrc0` + s_getpc_b64 :ref:`sdst` + s_mov_b32 :ref:`sdst`, :ref:`ssrc0` + s_mov_b64 :ref:`sdst`, :ref:`ssrc0` + s_mov_fed_b32 :ref:`sdst`, :ref:`ssrc0` + s_mov_from_global_b32 :ref:`sdst`, :ref:`ssrc0` + s_mov_from_global_b64 :ref:`sdst`, :ref:`ssrc0` + s_mov_regrd_b32 :ref:`sdst`, :ref:`ssrc0` + s_mov_to_global_b32 :ref:`sdst`, :ref:`ssrc0` + s_mov_to_global_b64 :ref:`sdst`, :ref:`ssrc0` + s_movreld_b32 :ref:`sdst`, :ref:`ssrc0` + s_movreld_b64 :ref:`sdst`, :ref:`ssrc0` + s_movrels_b32 :ref:`sdst`, :ref:`ssrc0` + s_movrels_b64 :ref:`sdst`, :ref:`ssrc0` + s_movrelsd_2_b32 :ref:`sdst`, :ref:`ssrc0` + s_nand_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_nand_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_nor_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_nor_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_not_b32 :ref:`sdst`, :ref:`ssrc0` + s_not_b64 :ref:`sdst`, :ref:`ssrc0` + s_or_not0_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_or_not0_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_or_not1_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_or_not1_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_or_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_or_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_quadmask_b32 :ref:`sdst`, :ref:`ssrc0` + s_quadmask_b64 :ref:`sdst`, :ref:`ssrc0` + s_rfe_b64 :ref:`ssrc0` + s_rndne_f16 :ref:`sdst`, :ref:`ssrc0` + s_rndne_f32 :ref:`sdst`, :ref:`ssrc0` + s_sendmsg_rtn_b32 :ref:`sdst`, :ref:`ssrc0` + s_sendmsg_rtn_b64 :ref:`sdst`, :ref:`ssrc0` + s_setpc_b64 :ref:`ssrc0` + s_sext_i32_i16 :ref:`sdst`, :ref:`ssrc0` + s_sext_i32_i8 :ref:`sdst`, :ref:`ssrc0` + s_sleep_var :ref:`ssrc0` + s_swap_to_global_b32 :ref:`sdst`, :ref:`ssrc0` + s_swappc_b64 :ref:`sdst`, :ref:`ssrc0` + s_trunc_f16 :ref:`sdst`, :ref:`ssrc0` + s_trunc_f32 :ref:`sdst`, :ref:`ssrc0` + s_try_lock :ref:`ssrc0` + s_unlock :ref:`ssrc0` + s_wakeup_barrier :ref:`ssrc0` + s_wqm_b32 :ref:`sdst`, :ref:`ssrc0` + s_wqm_b64 :ref:`sdst`, :ref:`ssrc0` + s_xnor_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_xnor_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + s_xor_saveexec_b32 :ref:`sdst`, :ref:`ssrc0` + s_xor_saveexec_b64 :ref:`sdst`, :ref:`ssrc0` + +SOP2 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_absdiff_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_add_co_ci_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_add_co_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_add_co_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_add_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_add_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_add_nc_u64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_and_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_and_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_and_not1_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_and_not1_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_ashr_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_ashr_i64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_bfe_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_bfe_i64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_bfe_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_bfe_u64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_bfm_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_bfm_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_cselect_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_cselect_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_cvt_pk_rtz_f16_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_fmaak_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1`, :ref:`literal` + s_fmac_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_fmac_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_fmamk_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`literal`, :ref:`ssrc1` + s_lshl1_add_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshl2_add_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshl3_add_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshl4_add_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshl_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshl_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshr_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_lshr_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_max_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_max_num_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_max_num_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_max_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_maximum_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_maximum_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_min_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_min_num_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_min_num_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_min_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_minimum_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_minimum_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_mul_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_mul_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_mul_hi_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_mul_hi_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_mul_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_mul_u64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_nand_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_nand_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_nor_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_nor_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_or_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_or_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_or_not1_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_or_not1_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_pack_hh_b32_b16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_pack_hl_b32_b16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_pack_lh_b32_b16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_pack_ll_b32_b16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_sub_co_ci_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_sub_co_i32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_sub_co_u32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_sub_f16 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_sub_f32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_sub_nc_u64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_xnor_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_xnor_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_xor_b32 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + s_xor_b64 :ref:`sdst`, :ref:`ssrc0`, :ref:`ssrc1` + +SOPC +---- + +.. parsed-literal:: + + **INSTRUCTION** **SRC0** **SRC1** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_bitcmp0_b32 :ref:`ssrc0`, :ref:`ssrc1` + s_bitcmp0_b64 :ref:`ssrc0`, :ref:`ssrc1` + s_bitcmp1_b32 :ref:`ssrc0`, :ref:`ssrc1` + s_bitcmp1_b64 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_eq_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_eq_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_eq_i32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_eq_u32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_eq_u64 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_ge_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_ge_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_ge_i32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_ge_u32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_gt_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_gt_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_gt_i32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_gt_u32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_le_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_le_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_le_i32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_le_u32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lg_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lg_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lg_i32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lg_u32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lg_u64 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lt_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lt_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lt_i32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_lt_u32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_neq_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_neq_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nge_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nge_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_ngt_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_ngt_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nle_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nle_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nlg_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nlg_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nlt_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_nlt_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_o_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_o_f32 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_u_f16 :ref:`ssrc0`, :ref:`ssrc1` + s_cmp_u_f32 :ref:`ssrc0`, :ref:`ssrc1` + +SOPK +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_addk_co_i32 :ref:`sdst`, :ref:`simm16` + s_call_b64 :ref:`sdst`, :ref:`simm16` + s_cmovk_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_eq_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_eq_u32 :ref:`sdst`, :ref:`simm16` + s_cmpk_ge_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_ge_u32 :ref:`sdst`, :ref:`simm16` + s_cmpk_gt_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_gt_u32 :ref:`sdst`, :ref:`simm16` + s_cmpk_le_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_le_u32 :ref:`sdst`, :ref:`simm16` + s_cmpk_lg_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_lg_u32 :ref:`sdst`, :ref:`simm16` + s_cmpk_lt_i32 :ref:`sdst`, :ref:`simm16` + s_cmpk_lt_u32 :ref:`sdst`, :ref:`simm16` + s_getreg_b32 :ref:`sdst`, :ref:`simm16` + s_getreg_regrd_b32 :ref:`sdst`, :ref:`simm16` + s_movk_i32 :ref:`sdst`, :ref:`simm16` + s_mulk_i32 :ref:`sdst`, :ref:`simm16` + s_setreg_b32 :ref:`simm16`, :ref:`sdst` + s_setreg_imm32_b32 :ref:`simm16`, :ref:`literal` + s_subvector_loop_begin :ref:`sdst`, :ref:`simm16` + s_subvector_loop_end :ref:`sdst`, :ref:`simm16` + s_version :ref:`simm16` + +SOPP +---- + +.. parsed-literal:: + + **INSTRUCTION** **SRC** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + s_barrier + s_barrier_leave + s_barrier_wait :ref:`simm16` + s_branch :ref:`simm16` + s_cbranch_cdbgsys :ref:`simm16` + s_cbranch_cdbgsys_and_user :ref:`simm16` + s_cbranch_cdbgsys_or_user :ref:`simm16` + s_cbranch_cdbguser :ref:`simm16` + s_cbranch_execnz :ref:`simm16` + s_cbranch_execz :ref:`simm16` + s_cbranch_scc0 :ref:`simm16` + s_cbranch_scc1 :ref:`simm16` + s_cbranch_vccnz :ref:`simm16` + s_cbranch_vccz :ref:`simm16` + s_clause :ref:`simm16` + s_code_end + s_decperflevel :ref:`simm16` + s_delay_alu :ref:`simm16` + s_denorm_mode :ref:`simm16` + s_endpgm + s_endpgm_ordered_ps_done + s_endpgm_saved + s_icache_inv + s_incperflevel :ref:`simm16` + s_nop :ref:`simm16` + s_round_mode :ref:`simm16` + s_sendmsg :ref:`simm16` + s_sendmsghalt :ref:`simm16` + s_set_inst_prefetch_distance :ref:`simm16` + s_sethalt :ref:`simm16` + s_setkill :ref:`simm16` + s_setprio :ref:`simm16` + s_singleuse_vdst :ref:`simm16` + s_sleep :ref:`simm16` + s_trap :ref:`simm16` + s_ttracedata + s_ttracedata_imm :ref:`simm16` + s_wait_alu :ref:`simm16` + s_wait_bvhcnt :ref:`simm16` + s_wait_dscnt :ref:`simm16` + s_wait_event :ref:`simm16` + s_wait_expcnt :ref:`simm16` + s_wait_idle + s_wait_kmcnt :ref:`simm16` + s_wait_loadcnt :ref:`simm16` + s_wait_loadcnt_dscnt :ref:`simm16` + s_wait_samplecnt :ref:`simm16` + s_wait_storecnt :ref:`simm16` + s_wait_storecnt_dscnt :ref:`simm16` + s_waitcnt :ref:`simm16` + s_wakeup + +VBUFFER +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + buffer_atomic_add_f32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_add_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_add_u64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_and_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_and_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_cmpswap_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_cmpswap_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_cond_sub_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_dec_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_dec_u64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_inc_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_inc_u64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_max_i32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_max_i64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_max_num_f32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_max_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_max_u64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_min_i32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_min_i64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_min_num_f32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_min_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_min_u64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_or_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_or_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_pk_add_bf16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_pk_add_f16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_sub_clamp_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_sub_u32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_sub_u64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_swap_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_swap_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_xor_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_atomic_xor_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_gl0_inv :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_gl1_inv :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_b128 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_b96 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_block :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_b16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_hi_b16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_hi_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_hi_i8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_hi_u8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_i8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_d16_u8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_i16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_i8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_lds_b32 :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_lds_format_x :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_lds_i16 :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_lds_i8 :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_lds_u16 :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_lds_u8 :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_u16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_load_u8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_nop :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_b128 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_b16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_b32 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_b64 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_b8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_b96 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_block :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_hi_b16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_hi_b8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_d16_hi_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + buffer_store_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_d16_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_d16_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_d16_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_d16_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_load_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_d16_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_d16_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_d16_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_d16_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_format_x :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_format_xy :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_format_xyz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + tbuffer_store_format_xyzw :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`soffset` :ref:`offset` :ref:`idxen` :ref:`offen` :ref:`tfe` :ref:`th` :ref:`scope` :ref:`nv` + +VDS +--- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + ds_add_f32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_add_f64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_add_rtn_f32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_add_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_add_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_add_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_add_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_and_b32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_and_b64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_and_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_and_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_append :ref:`vdst` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_fi_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_fi_from_global_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_fi_to_global_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_fi_to_simd_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_from_global_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_to_global_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bpermute_to_simd_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bvh_stack_push4_pop1_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bvh_stack_push8_pop1_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_bvh_stack_push8_pop2_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_cmpstore_b32 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_cmpstore_b64 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_cmpstore_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_cmpstore_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_cond_sub_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_cond_sub_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_condxchg32_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_consume :ref:`vdst` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_dec_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_dec_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_dec_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_dec_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_inc_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_inc_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_inc_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_inc_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_2addr_b32 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_2addr_b64 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_2addr_stride64_b32 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_2addr_stride64_b64 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_addtid_b32 :ref:`vdst` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_b128 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_b32 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_b64 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_b96 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_i16 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_i8 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_i8_d16 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_i8_d16_hi :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_u16 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_u16_d16 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_u16_d16_hi :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_u8 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_u8_d16 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_load_u8_d16_hi :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_i32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_i64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_num_f32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_num_f64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_num_rtn_f32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_num_rtn_f64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_rtn_i32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_rtn_i64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_max_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_i32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_i64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_num_f32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_num_f64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_num_rtn_f32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_num_rtn_f64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_rtn_i32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_rtn_i64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_min_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_mskor_b32 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_mskor_b64 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_mskor_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_mskor_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_nop :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_or_b32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_or_b64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_or_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_or_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_permute_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_permute_from_global_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_permute_to_global_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_permute_to_simd_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_pk_add_bf16 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_pk_add_f16 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_pk_add_rtn_bf16 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_pk_add_rtn_f16 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_rsub_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_rsub_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_rsub_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_rsub_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_2addr_b32 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_2addr_b64 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_2addr_stride64_b32 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_2addr_stride64_b64 :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_addtid_b32 :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b128 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b16 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b16_d16_hi :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b8 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b8_d16_hi :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_store_b96 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_storexchg_2addr_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_storexchg_2addr_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_storexchg_2addr_stride64_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_storexchg_2addr_stride64_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_storexchg_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_storexchg_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_sub_clamp_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_sub_clamp_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_sub_rtn_u32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_sub_rtn_u64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_sub_u32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_sub_u64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_swizzle_b32 :ref:`vdst`, :ref:`addr` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_wrap_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0`, :ref:`data1` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_xor_b32 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_xor_b64 :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_xor_rtn_b32 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + ds_xor_rtn_b64 :ref:`vdst`, :ref:`addr`, :ref:`data0` :ref:`offset` :ref:`offset0` :ref:`offset1` + +VDSDIR +------ + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + ds_direct_load :ref:`vdst` :ref:`wait_va_vdst` :ref:`wait_vdst` :ref:`wait_vm_vsrc` + ds_param_load :ref:`vdst`, :ref:`attr` :ref:`wait_va_vdst` :ref:`wait_vdst` :ref:`wait_vm_vsrc` + +VERIF +----- + +.. parsed-literal:: + + **INSTRUCTION** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + fake_s_delay_alu + fake_s_nop + fake_s_wait_alu + fake_s_wait_bvhcnt + fake_s_wait_dscnt + fake_s_wait_expcnt + fake_s_wait_kmcnt + fake_s_wait_loadcnt + fake_s_wait_samplecnt + fake_s_wait_storecnt + fake_s_waitcnt + fake_v_nop + ill_0 + ill_1 + ill_beef + metadata + verif_s_adjdelay_alu + +VEXPORT +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **SRC3** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + export :ref:`tgt`, :ref:`vsrc0`, :ref:`vsrc1`, :ref:`vsrc2`, :ref:`vsrc3` :ref:`done` :ref:`row_en` + +VFLAT +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + flat_atomic_add_f32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_add_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_add_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_and_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_and_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_cmpswap_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_cmpswap_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_cond_sub_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_dec_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_dec_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_inc_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_inc_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_max_i32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_max_i64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_max_num_f32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_max_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_max_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_min_i32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_min_i64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_min_num_f32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_min_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_min_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_or_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_or_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_pk_add_bf16 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_pk_add_f16 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_sub_clamp_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_sub_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_sub_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_swap_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_swap_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_xor_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_atomic_xor_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_b128 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_b32 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_b64 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_b96 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_d16_b16 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_d16_hi_b16 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_d16_hi_i8 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_d16_hi_u8 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_d16_i8 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_d16_u8 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_i16 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_i8 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_u16 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_load_u8 :ref:`vdst`, :ref:`vaddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_b128 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_b16 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_b32 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_b64 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_b8 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_b96 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_d16_hi_b16 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + flat_store_d16_hi_b8 :ref:`vaddr`, :ref:`vsrc` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + +VGLOBAL +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + global_atomic_add_f32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_add_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_add_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_and_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_and_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_cmpswap_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_cmpswap_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_cond_sub_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_dec_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_dec_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_inc_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_inc_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_max_i32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_max_i64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_max_num_f32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_max_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_max_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_min_i32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_min_i64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_min_num_f32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_min_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_min_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_or_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_or_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_ordered_add_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_pk_add_bf16 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_pk_add_f16 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_sub_clamp_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_sub_u32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_sub_u64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_swap_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_swap_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_xor_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_atomic_xor_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_inv :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_addtid_b32 :ref:`vdst`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_b128 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_b96 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_block :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_d16_b16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_d16_hi_b16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_d16_hi_i8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_d16_hi_u8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_d16_i8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_d16_u8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_i16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_i8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_lds_addtid_b32 :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_lds_b32 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_lds_i16 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_lds_i8 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_lds_u16 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_lds_u8 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_tr_b128 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_tr_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_u16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_load_u8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_addtid_b32 :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_b128 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_b16 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_b32 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_b64 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_b8 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_b96 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_block :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_d16_hi_b16 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_store_d16_hi_b8 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_wb :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + global_wbinv :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + +VIMAGE +------ + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + image_atomic_add_flt :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_add_uint :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_and :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_cmpswap :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_dec_uint :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_inc_uint :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_max_flt :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_max_int :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_max_uint :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_min_flt :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_min_int :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_min_uint :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_or :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_pk_add_bf16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_pk_add_f16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_sub_uint :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_swap :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_atomic_xor :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_bvh64_intersect_ray :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_bvh8_intersect_ray :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_bvh_dual_intersect_ray :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_bvh_intersect_ray :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_get_resinfo :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_load :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_load_mip :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_load_mip_pck :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_load_mip_pck_sgn :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_load_pck :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_load_pck_sgn :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_rsvd_atomic_umax_8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_rsvd_atomic_umin_8 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_store :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_store_mip :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_store_mip_pck :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_store_pck :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + +VINTERP +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_interp_p10_f16_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` :ref:`clamp` :ref:`wait_exp` + v_interp_p10_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` :ref:`clamp` :ref:`wait_exp` + v_interp_p10_rtz_f16_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` :ref:`clamp` :ref:`wait_exp` + v_interp_p2_f16_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` :ref:`clamp` :ref:`wait_exp` + v_interp_p2_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` :ref:`clamp` :ref:`wait_exp` + v_interp_p2_rtz_f16_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` :ref:`clamp` :ref:`wait_exp` + +VOP1 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_bfrev_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_ceil_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_ceil_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_ceil_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cls_i32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_clz_i32_u32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cos_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cos_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_ctz_i32_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f16_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f16_i16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f16_u16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_bf8 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_fp8 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_i32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_u32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_ubyte0 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_ubyte1 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_ubyte2 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f32_ubyte3 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f64_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f64_i32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_f64_u32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_floor_i32_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_i16_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_i32_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_i32_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_i32_i16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_nearest_i32_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_norm_i16_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_norm_u16_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_off_f32_i4 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_f32_bf8 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_f32_fp8 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_u16_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_u32_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_u32_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_u32_u16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_exp_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_exp_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_floor_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_floor_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_floor_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_fract_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_fract_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_fract_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_frexp_exp_i16_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_frexp_exp_i32_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_frexp_exp_i32_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_frexp_mant_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_frexp_mant_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_frexp_mant_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_log_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_log_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_mov_b16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_mov_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_mov_fed_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_mov_from_global_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_mov_to_global_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_movreld_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_movrels_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_movrelsd_2_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_movrelsd_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_nop :ref:`omod` :ref:`clamp` + v_not_b16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_not_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_permlane64_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_pipeflush :ref:`omod` :ref:`clamp` + v_rcp_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rcp_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rcp_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rcp_iflag_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_readfirstlane_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rndne_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rndne_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rndne_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rsq_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rsq_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_rsq_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sat_pk_u8_i16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sin_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sin_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sqrt_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sqrt_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sqrt_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_swap_b16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_swap_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_swaprel_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_trunc_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_trunc_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_trunc_f64 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_writelane_regwr_b32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + +VOP2 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_add_co_ci_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`vcc`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_nc_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_nc_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_and_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_ashrrev_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cndmask_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`vcc`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_rtz_f16_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmaak_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`literal`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmaak_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`literal`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmaak_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`literal`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmac_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmac_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmac_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmamk_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`literal`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmamk_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`literal`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_fmamk_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`literal`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_illegal :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_ldexp_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshlrev_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshlrev_b64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshrrev_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_num_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_num_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_dx9_zero_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_hi_i32_i24 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_hi_u32_u24 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_i32_i24 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_u32_u24 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_or_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_pk_fmac_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_co_ci_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`vcc`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_nc_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_nc_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_subrev_co_ci_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m`, :ref:`vcc`::ref:`m` :ref:`omod` :ref:`clamp` + v_subrev_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_subrev_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_subrev_nc_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_xnor_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_xor_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + +VOP3 +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_add3_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_co_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_lshl_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_nc_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_nc_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_add_nc_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_alignbit_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_alignbyte_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_and_b16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_and_or_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_ashrrev_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_ashrrev_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_bcnt_u32_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_bfe_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_bfe_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_bfi_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_bfm_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cndmask_b16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_cubeid_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_cubema_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_cubesc_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_cubetc_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_bf8_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_fp8_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_i16_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_i16_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_norm_i16_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_norm_i16_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_norm_u16_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_norm_u16_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_u16_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_u16_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_pk_u8_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_sr_bf8_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cvt_sr_fp8_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_fixup_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_fixup_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_fixup_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_fmas_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_fmas_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_scale_f32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_div_scale_f64 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_dot2_bf16_bf16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_dot2_f16_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_fma_dx9_zero_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_fma_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_fma_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_fma_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_ldexp_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_ldexp_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_lerp_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshl_add_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshl_add_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshl_or_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshlrev_b16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshrrev_b16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_lshrrev_b64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_co_i64_i32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_co_u64_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_i32_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_i32_i24 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_u32_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mad_u32_u24 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max3_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max3_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max3_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max3_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max3_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max3_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_max_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximum3_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximum3_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximum_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximum_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximum_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximumminimum_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maximumminimum_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maxmin_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maxmin_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maxmin_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_maxmin_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mbcnt_hi_u32_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mbcnt_lo_u32_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_med3_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_med3_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_med3_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_med3_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_med3_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_med3_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min3_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min3_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min3_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min3_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min3_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min3_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_min_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimum3_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimum3_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimum_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimum_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimum_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimummaximum_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minimummaximum_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minmax_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minmax_num_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minmax_num_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_minmax_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mqsad_pk_u16_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mqsad_u32_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_msad_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_hi_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_hi_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_lo_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mul_lo_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_mullit_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_or3_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_or_b16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_pack_b32_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_perm_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_permlane16_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_permlane16_var_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_permlanex16_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_permlanex16_var_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_qsad_pk_u16_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_readlane_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_readlane_regrd_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_exp_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_exp_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_log_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_log_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_rcp_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_rcp_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_rsq_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_rsq_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_sqrt_f16 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_s_sqrt_f32 :ref:`vdst`, :ref:`src0`::ref:`m` :ref:`omod` :ref:`clamp` + v_sad_hi_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_sad_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_sad_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_sad_u8 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_co_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_nc_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_nc_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_sub_nc_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_subrev_co_u32 :ref:`vdst`, :ref:`sdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_trig_preop_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_writelane_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + v_xad_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_xor3_b32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m`, :ref:`src2`::ref:`m` :ref:`omod` :ref:`clamp` + v_xor_b16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`src1`::ref:`m` :ref:`omod` :ref:`clamp` + +VOP3P +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dot2_f32_bf16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot2_f32_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot4_f32_bf8_bf8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot4_f32_bf8_fp8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot4_f32_fp8_bf8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot4_f32_fp8_fp8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot4_i32_iu8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot4_u32_u8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot8_i32_iu4 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_dot8_u32_u4 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_fma_mix_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_fma_mixhi_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_fma_mixlo_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_pk_add_bf16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_add_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_add_i16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_add_u16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_ashrrev_i16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_fma_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_pk_fma_f32 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_pk_lshlrev_b16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_lshrrev_b16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_mad_i16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_pk_mad_u16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_pk_max_i16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_max_num_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_max_u16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_maximum_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_min_i16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_min_num_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_min_u16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_minimum_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_mul_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_mul_lo_u16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_sub_i16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_pk_sub_u16 :ref:`vdst`, :ref:`src0`, :ref:`src1` + v_swmmac_bf16_16x16x32_bf16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f16_16x16x32_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f32_16x16x32_bf16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f32_16x16x32_bf8_bf8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f32_16x16x32_bf8_fp8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f32_16x16x32_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f32_16x16x32_fp8_bf8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_f32_16x16x32_fp8_fp8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_i32_16x16x32_iu4 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_i32_16x16x32_iu8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_swmmac_i32_16x16x64_iu4 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_bf16_16x16x16_bf16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f16_16x16x16_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f32_16x16x16_bf16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f32_16x16x16_bf8_bf8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f32_16x16x16_bf8_fp8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f32_16x16x16_f16 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f32_16x16x16_fp8_bf8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_f32_16x16x16_fp8_fp8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_i32_16x16x16_iu4 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_i32_16x16x16_iu8 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + v_wmma_i32_16x16x32_iu4 :ref:`vdst`, :ref:`src0`, :ref:`src1`, :ref:`src2` + +VOPC +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_cmp_class_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_class_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_class_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_eq_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_f_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ge_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_gt_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_le_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lg_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lg_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lg_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_lt_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ne_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ne_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ne_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ne_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ne_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ne_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_neq_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_neq_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_neq_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nge_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nge_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nge_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ngt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ngt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_ngt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nle_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nle_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nle_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nlg_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nlg_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nlg_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nlt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nlt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_nlt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_o_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_o_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_o_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_t_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_u_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_u_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmp_u_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_class_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_class_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_class_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_eq_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_f_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ge_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_gt_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_le_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lg_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lg_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lg_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_lt_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ne_i16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ne_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ne_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ne_u16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ne_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ne_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_neq_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_neq_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_neq_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nge_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nge_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nge_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ngt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ngt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_ngt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nle_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nle_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nle_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nlg_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nlg_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nlg_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nlt_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nlt_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_nlt_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_o_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_o_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_o_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_i32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_i64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_u32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_t_u64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_u_f16 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_u_f32 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + v_cmpx_u_f64 :ref:`vdst`, :ref:`src0`::ref:`m`, :ref:`vsrc1`::ref:`m` :ref:`omod` :ref:`clamp` + +VOPD +---- + +.. parsed-literal:: + + **INSTRUCTION** **DST0** **DST1** **SRC0** **SRC1** **SRC2** **SRC3** **SRC4** **SRC5** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dual_add_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_add_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_add_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_add_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_add_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_add_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_cndmask_b32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc`, :ref:`literal` + v_dual_cndmask_b32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc`, :ref:`literal` + v_dual_cndmask_b32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vcc` + v_dual_cndmask_b32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_cndmask_b32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_dot2acc_f32_bf16_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_dot2acc_f32_bf16_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_dot2acc_f32_bf16_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_dot2acc_f32_bf16_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_dot2acc_f32_bf16_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_bf16_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_dot2acc_f32_f16_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_dot2acc_f32_f16_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_dot2acc_f32_f16_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_dot2acc_f32_f16_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_dot2acc_f32_f16_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmaak_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc`, :ref:`literal` + v_dual_fmaak_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`literal` + v_dual_fmaak_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmaak_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmac_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_fmac_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmac_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmac_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_fmac_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmac_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_fmamk_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc`, :ref:`literal` + v_dual_fmamk_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`literal` + v_dual_fmamk_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_fmamk_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_max_num_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_max_num_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_max_num_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_max_num_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_max_num_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_max_num_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_min_num_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_min_num_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_min_num_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_min_num_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_min_num_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_mov_b32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_mov_b32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_mov_b32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0` + v_dual_mov_b32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mov_b32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_mul_dx9_zero_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_mul_dx9_zero_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_mul_dx9_zero_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_mul_dx9_zero_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_dx9_zero_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_mul_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_mul_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_mul_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_mul_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_mul_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_sub_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_sub_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_sub_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_sub_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_sub_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_add_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_add_nc_u32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_and_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_cndmask_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`vcc` + v_dual_subrev_f32_x_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_dot2acc_f32_f16 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_fmaak_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_subrev_f32_x_fmac_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_fmamk_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1`, :ref:`literal` + v_dual_subrev_f32_x_lshlrev_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_max_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_min_num_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_mov_b32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0` + v_dual_subrev_f32_x_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_mul_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_sub_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_subrev_f32_x_subrev_f32 :ref:`vdstx`, :ref:`vdsty`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`srcy0`, :ref:`vsrcy1` + +VOPDX +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dual_add_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_cndmask_b32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`vcc` + v_dual_dot2acc_f32_bf16 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_dot2acc_f32_f16 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_fmaak_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1`, :ref:`literal` + v_dual_fmac_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_fmamk_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`literal`, :ref:`vsrcx1` + v_dual_max_num_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_min_num_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_mov_b32 :ref:`vdstx`, :ref:`srcx0` + v_dual_mul_dx9_zero_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_mul_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_sub_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + v_dual_subrev_f32 :ref:`vdstx`, :ref:`srcx0`, :ref:`vsrcx1` + +VOPDY +----- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + v_dual_add_nc_u32 :ref:`vdsty`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_and_b32 :ref:`vdsty`, :ref:`srcy0`, :ref:`vsrcy1` + v_dual_lshlrev_b32 :ref:`vdsty`, :ref:`srcy0`, :ref:`vsrcy1` + +VSAMPLE +------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + image_gather4 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_b :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_b_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c_b :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c_b_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c_l :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c_lz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_c_lz_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_l :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_lz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_lz_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_gather4h :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_get_lod :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_msaa_load :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_b :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_b_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_b_cl_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_b_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_b :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_b_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_b_cl_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_b_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_cl_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_cl_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_cl_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_cl_o_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_d_o_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_l :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_l_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_lz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_lz_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_c_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_cl_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_cl :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_cl_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_cl_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_cl_o_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_d_o_g16 :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_l :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_l_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_lz :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_lz_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + image_sample_o :ref:`vdata`, :ref:`vaddr`, :ref:`rsrc`, :ref:`samp` :ref:`dmask` :ref:`tfe` :ref:`unorm` :ref:`lwe` :ref:`dim` :ref:`r128` :ref:`a16` :ref:`d16` :ref:`th` :ref:`scope` :ref:`nv` + +VSCRATCH +-------- + +.. parsed-literal:: + + **INSTRUCTION** **DST** **SRC0** **SRC1** **SRC2** **MODIFIERS** + \ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---|\ |---| + scratch_load_b128 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_b32 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_b64 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_b96 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_block :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_d16_b16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_d16_hi_b16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_d16_hi_i8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_d16_hi_u8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_d16_i8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_d16_u8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_i16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_i8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_lds_b32 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_lds_i16 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_lds_i8 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_lds_u16 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_lds_u8 :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_u16 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_load_u8 :ref:`vdst`, :ref:`vaddr`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_b128 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_b16 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_b32 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_b64 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_b8 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_b96 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_block :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_d16_hi_b16 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + scratch_store_d16_hi_b8 :ref:`vaddr`, :ref:`vsrc`, :ref:`saddr` :ref:`offset` :ref:`th` :ref:`scope` :ref:`nv` + +.. |---| unicode:: U+02014 .. em dash + +.. toctree:: + :hidden: + + gfx12_addr + gfx12_attr + gfx12_data0_56f215 + gfx12_data0_6802ce + gfx12_data0_e016a1 + gfx12_data0_fd235e + gfx12_data1_6802ce + gfx12_data1_731030 + gfx12_data1_e016a1 + gfx12_data1_fd235e + gfx12_ioffset + gfx12_literal_1f74c7 + gfx12_literal_81e671 + gfx12_m + gfx12_rsrc_5fe6d8 + gfx12_rsrc_c9f929 + gfx12_saddr_cdc95c + gfx12_saddr_d42b64 + gfx12_samp + gfx12_sbase_453b95 + gfx12_sbase_47adb7 + gfx12_sdata_0974a4 + gfx12_sdata_354189 + gfx12_sdata_4585b8 + gfx12_sdata_5c7b50 + gfx12_sdata_6c003b + gfx12_sdata_836716 + gfx12_sdata_d725ab + gfx12_sdata_dd9dd8 + gfx12_sdst_006c40 + gfx12_sdst_20064d + gfx12_sdst_354189 + gfx12_sdst_836716 + gfx12_sdst_ced58d + gfx12_sdst_e701cc + gfx12_simm16_15ccdd + gfx12_simm16_218bea + gfx12_simm16_39b593 + gfx12_simm16_3d2a4f + gfx12_simm16_730a13 + gfx12_simm16_7ed651 + gfx12_simm16_81e671 + gfx12_simm16_c98889 + gfx12_simm16_cc1716 + gfx12_simm16_ee8b30 + gfx12_soffset_8ec073 + gfx12_soffset_c5b88c + gfx12_soffset_ec005a + gfx12_src0_5727cf + gfx12_src0_5cae62 + gfx12_src0_6802ce + gfx12_src0_85aab6 + gfx12_src0_c4593f + gfx12_src0_e016a1 + gfx12_src0_fd235e + gfx12_src1_5727cf + gfx12_src1_5cae62 + gfx12_src1_6802ce + gfx12_src1_731030 + gfx12_src1_977794 + gfx12_src1_c4593f + gfx12_src1_e016a1 + gfx12_src1_fd235e + gfx12_src2_2797bc + gfx12_src2_5727cf + gfx12_src2_5cae62 + gfx12_src2_6802ce + gfx12_src2_7b936a + gfx12_src2_96fbd3 + gfx12_src2_c4593f + gfx12_src2_e016a1 + gfx12_srcx0 + gfx12_srcy0 + gfx12_ssrc0_007f9c + gfx12_ssrc0_1a9ca5 + gfx12_ssrc0_245536 + gfx12_ssrc0_2797bc + gfx12_ssrc0_bbb4c6 + gfx12_ssrc0_c4593f + gfx12_ssrc1_bbb4c6 + gfx12_ssrc1_c4593f + gfx12_tgt + gfx12_vaddr_a972b9 + gfx12_vaddr_c12f43 + gfx12_vaddr_c8b8d4 + gfx12_vaddr_d82160 + gfx12_vaddr_f2b449 + gfx12_vcc + gfx12_vdata_2eda77 + gfx12_vdata_48e42f + gfx12_vdata_69a144 + gfx12_vdata_89680f + gfx12_vdata_aac3e8 + gfx12_vdata_bdb32f + gfx12_vdst_006c40 + gfx12_vdst_227281 + gfx12_vdst_2eda77 + gfx12_vdst_47d3bc + gfx12_vdst_48e42f + gfx12_vdst_69a144 + gfx12_vdst_7de8e7 + gfx12_vdst_836716 + gfx12_vdst_89680f + gfx12_vdst_bdb32f + gfx12_vdstx + gfx12_vdsty + gfx12_vsrc0 + gfx12_vsrc1_6802ce + gfx12_vsrc1_fd235e + gfx12_vsrc2 + gfx12_vsrc3 + gfx12_vsrc_56f215 + gfx12_vsrc_6802ce + gfx12_vsrc_89fd7b + gfx12_vsrc_e016a1 + gfx12_vsrc_fd235e + gfx12_vsrcx1 + gfx12_vsrcy1 + gfx12_clause + gfx12_delay + gfx12_hwreg + gfx12_imm16 + gfx12_label + gfx12_sendmsg + gfx12_sendmsg_rtn + gfx12_version + gfx12_waitcnt diff --git a/llvm/docs/AMDGPU/gfx12_addr.rst b/llvm/docs/AMDGPU/gfx12_addr.rst new file mode 100644 index 0000000000000..d2fc0e0cb2f4b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_addr.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_addr: + +addr +==== + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_attr.rst b/llvm/docs/AMDGPU/gfx12_attr.rst new file mode 100644 index 0000000000000..a6c5c275b349f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_attr.rst @@ -0,0 +1,28 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_attr: + +attr +==== + +Interpolation attribute and channel: + + ============== =================================== + Syntax Description + ============== =================================== + attr{0..32}.x Attribute 0..32 with *x* channel. + attr{0..32}.y Attribute 0..32 with *y* channel. + attr{0..32}.z Attribute 0..32 with *z* channel. + attr{0..32}.w Attribute 0..32 with *w* channel. + ============== =================================== + +Examples: + +.. parsed-literal:: + + ds_param_load v1, attr0.x diff --git a/llvm/docs/AMDGPU/gfx12_clause.rst b/llvm/docs/AMDGPU/gfx12_clause.rst new file mode 100644 index 0000000000000..88feb3b1d9974 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_clause.rst @@ -0,0 +1,7 @@ +.. _amdgpu_synid_clause: + +clause +====== + +Description of a clause following this instruction. + diff --git a/llvm/docs/AMDGPU/gfx12_data0_56f215.rst b/llvm/docs/AMDGPU/gfx12_data0_56f215.rst new file mode 100644 index 0000000000000..d8dde0013ed64 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_56f215.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_56f215: + +data0 +===== + +Instruction input. + +*Size:* 3 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst b/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst new file mode 100644 index 0000000000000..02fe36f489229 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_6802ce: + +data0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst b/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst new file mode 100644 index 0000000000000..914715bf30ea9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_e016a1: + +data0 +===== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst b/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst new file mode 100644 index 0000000000000..7617c61a94be3 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data0_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data0_fd235e: + +data0 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst new file mode 100644 index 0000000000000..318db2daaeec3 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_6802ce: + +data1 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data1_731030.rst b/llvm/docs/AMDGPU/gfx12_data1_731030.rst new file mode 100644 index 0000000000000..1a6eda65328ae --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_731030.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_731030: + +data1 +===== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst b/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst new file mode 100644 index 0000000000000..dee4148c3d6d1 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_e016a1: + +data1 +===== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst new file mode 100644 index 0000000000000..c8d4a88857d1f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_data1_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_data1_fd235e: + +data1 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_delay.rst b/llvm/docs/AMDGPU/gfx12_delay.rst new file mode 100644 index 0000000000000..600ece7fccfc5 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_delay.rst @@ -0,0 +1,74 @@ +.. _amdgpu_synid_delay: + +delay +===== + +A delay between dependent SALU/VALU instructions. +This operand may specify a delay for 2 instructions: +the one after the current *s_delay_alu* instruction +and for the second instruction indicated by *SKIP*. + +The bits of this operand have the following meaning: + + ===== ========================================================== ============ + Bits Description Value Range + ===== ========================================================== ============ + 3:0 ID0: indicates a delay for the first instruction. 0..11 + 6:4 SKIP: indicates the position of the second instruction. 0..5 + 10:7 ID1: indicates a delay for the second instruction. 0..11 + ===== ========================================================== ============ + +This operand may be specified as one of the following: + +* An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range 0..0xFFFF. +* A combination of *instid0*, *instskip*, *instid1* values described below. + + ======================== =========================== =============== + Syntax Description Default Value + ======================== =========================== =============== + instid0(<*ID name*>) A symbolic *ID0* value. instid0(NO_DEP) + instskip(<*SKIP name*>) A symbolic *SKIP* value. instskip(SAME) + instid1(<*ID name*>) A symbolic *ID1* value. instid1(NO_DEP) + ======================== =========================== =============== + +These values may be specified in any order. +When more than one value is specified, the values must be separated from each other by a '|'. + +Valid *ID names* are defined below. + + =================== =================================================================== + Name Description + =================== =================================================================== + NO_DEP No dependency on any prior instruction. This is the default value. + VALU_DEP_1 Dependency on a previous VALU instruction, 1 opcode back. + VALU_DEP_2 Dependency on a previous VALU instruction, 2 opcodes back. + VALU_DEP_3 Dependency on a previous VALU instruction, 3 opcodes back. + VALU_DEP_4 Dependency on a previous VALU instruction, 4 opcodes back. + TRANS32_DEP_1 Dependency on a previous TRANS32 instruction, 1 opcode back. + TRANS32_DEP_2 Dependency on a previous TRANS32 instruction, 2 opcodes back. + TRANS32_DEP_3 Dependency on a previous TRANS32 instruction, 3 opcodes back. + FMA_ACCUM_CYCLE_1 Single cycle penalty for FMA accumulation. + SALU_CYCLE_1 1 cycle penalty for a prior SALU instruction. + SALU_CYCLE_2 2 cycle penalty for a prior SALU instruction. + SALU_CYCLE_3 3 cycle penalty for a prior SALU instruction. + =================== =================================================================== + +Legal *SKIP names* are described in the following table. + + ======== ============================================================================ + Name Description + ======== ============================================================================ + SAME Apply second dependency to the same instruction. This is the default value. + NEXT Apply second dependency to the next instruction. + SKIP_1 Skip 1 instruction then apply dependency. + SKIP_2 Skip 2 instructions then apply dependency. + SKIP_3 Skip 3 instructions then apply dependency. + SKIP_4 Skip 4 instructions then apply dependency. + ======== ============================================================================ + +Examples: + +.. parsed-literal:: + + s_delay_alu instid0(VALU_DEP_1) + s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) diff --git a/llvm/docs/AMDGPU/gfx12_hwreg.rst b/llvm/docs/AMDGPU/gfx12_hwreg.rst new file mode 100644 index 0000000000000..d99cb20df24ae --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_hwreg.rst @@ -0,0 +1,76 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_hwreg: + +hwreg +===== + +Bits of a hardware register being accessed. + +The bits of this operand have the following meaning: + + ======= ===================== ============ + Bits Description Value Range + ======= ===================== ============ + 5:0 Register *id*. 0..63 + 10:6 First bit *offset*. 0..31 + 15:11 *Size* in bits. 1..32 + ======= ===================== ============ + +This operand may be specified as one of the following: + +* An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range 0..0xFFFF. +* An *hwreg* value described below. + + ==================================== ============================================================================ + Hwreg Value Syntax Description + ==================================== ============================================================================ + hwreg({0..63}) All bits of a register indicated by its *id*. + hwreg(<*name*>) All bits of a register indicated by its *name*. + hwreg({0..63}, {0..31}, {1..32}) Register bits indicated by register *id*, first bit *offset* and *size*. + hwreg(<*name*>, {0..31}, {1..32}) Register bits indicated by register *name*, first bit *offset* and *size*. + ==================================== ============================================================================ + +Numeric values may be specified as positive :ref:`integer numbers` +or :ref:`absolute expressions`. + +Defined register *names* include: + + =================== ========================================== + Name Description + =================== ========================================== + HW_REG_MODE Shader writeable mode bits. + HW_REG_STATUS Shader read-only status. + HW_REG_TRAPSTS Trap status. + HW_REG_HW_ID1 Id of wave, simd, compute unit, etc. + HW_REG_HW_ID2 Id of queue, pipeline, etc. + HW_REG_GPR_ALLOC Per-wave SGPR and VGPR allocation. + HW_REG_LDS_ALLOC Per-wave LDS allocation. + HW_REG_IB_STS Counters of outstanding instructions. + HW_REG_SH_MEM_BASES Memory aperture. + HW_REG_FLAT_SCR_LO flat_scratch_lo register. + HW_REG_FLAT_SCR_HI flat_scratch_hi register. + =================== ========================================== + +Examples: + +.. parsed-literal:: + + reg = 1 + offset = 2 + size = 4 + hwreg_enc = reg | (offset << 6) | ((size - 1) << 11) + + s_getreg_b32 s2, 0x1881 + s_getreg_b32 s2, hwreg_enc // the same as above + s_getreg_b32 s2, hwreg(1, 2, 4) // the same as above + s_getreg_b32 s2, hwreg(reg, offset, size) // the same as above + + s_getreg_b32 s2, hwreg(15) + s_getreg_b32 s2, hwreg(51, 1, 31) + s_getreg_b32 s2, hwreg(HW_REG_LDS_ALLOC, 0, 1) diff --git a/llvm/docs/AMDGPU/gfx12_imm16.rst b/llvm/docs/AMDGPU/gfx12_imm16.rst new file mode 100644 index 0000000000000..44e6d5856a558 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_imm16.rst @@ -0,0 +1,7 @@ +.. _amdgpu_synid_imm16: + +imm16 +====== + +An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range -32768..65535. + diff --git a/llvm/docs/AMDGPU/gfx12_ioffset.rst b/llvm/docs/AMDGPU/gfx12_ioffset.rst new file mode 100644 index 0000000000000..0901b774f8144 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ioffset.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ioffset: + +ioffset +======= + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_label.rst b/llvm/docs/AMDGPU/gfx12_label.rst new file mode 100644 index 0000000000000..bdd6e1cb1ee8d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_label.rst @@ -0,0 +1,29 @@ +.. _amdgpu_synid_label: + +label +===== + +A branch target which is a 16-bit signed integer treated as a PC-relative dword offset. + +This operand may be specified as one of the following: + +* An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range -32768..65535. +* A :ref:`symbol` (for example, a label) representing a relocatable address in the same compilation unit where it is referred from. The value is handled as a 16-bit PC-relative dword offset to be resolved by a linker. + +Examples: + +.. parsed-literal:: + + offset = 30 + label_1: + label_2 = . + 4 + + s_branch 32 + s_branch offset + 2 + s_branch label_1 + s_branch label_2 + s_branch label_3 + s_branch label_4 + + label_3 = label_2 + 4 + label_4: diff --git a/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst b/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst new file mode 100644 index 0000000000000..7442c5d5c89dc --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_literal_1f74c7.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_literal_1f74c7: + +literal +======= + +*Size:* 2 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_literal_81e671.rst b/llvm/docs/AMDGPU/gfx12_literal_81e671.rst new file mode 100644 index 0000000000000..ab1b05601ff68 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_literal_81e671.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_literal_81e671: + +literal +======= + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_m.rst b/llvm/docs/AMDGPU/gfx12_m.rst new file mode 100644 index 0000000000000..7cfee90bae2ce --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_m.rst @@ -0,0 +1,13 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_m: + +m += + +This operand may be used with floating point operand modifiers :ref:`abs` and :ref:`neg`. diff --git a/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst b/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst new file mode 100644 index 0000000000000..d1a475f205329 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_rsrc_5fe6d8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_rsrc_5fe6d8: + +rsrc +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst b/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst new file mode 100644 index 0000000000000..180ae068d2ceb --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_rsrc_c9f929.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_rsrc_c9f929: + +rsrc +==== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst b/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst new file mode 100644 index 0000000000000..4b3511fc76671 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_saddr_cdc95c.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_saddr_cdc95c: + +saddr +===== + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst b/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst new file mode 100644 index 0000000000000..d3de11dfaade8 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_saddr_d42b64.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_saddr_d42b64: + +saddr +===== + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_samp.rst b/llvm/docs/AMDGPU/gfx12_samp.rst new file mode 100644 index 0000000000000..2bb15e58db5fe --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_samp.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_samp: + +samp +==== + +*Size:* 4 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst b/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst new file mode 100644 index 0000000000000..54c2deed4e5f1 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sbase_453b95.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sbase_453b95: + +sbase +===== + +A 128-bit buffer resource constant for scalar memory operations which provides a base address, a size and a stride. + +*Size:* 4 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst b/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst new file mode 100644 index 0000000000000..2308b3d44585c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sbase_47adb7.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sbase_47adb7: + +sbase +===== + +A 64-bit base address for scalar memory operations. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst b/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst new file mode 100644 index 0000000000000..d498f8c705210 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_0974a4.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_0974a4: + +sdata +===== + +Instruction output. + +*Size:* 8 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_354189.rst b/llvm/docs/AMDGPU/gfx12_sdata_354189.rst new file mode 100644 index 0000000000000..c50665474c461 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_354189.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_354189: + +sdata +===== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst b/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst new file mode 100644 index 0000000000000..42f66f33e6ad4 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_4585b8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_4585b8: + +sdata +===== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst b/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst new file mode 100644 index 0000000000000..028461a4a07da --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_5c7b50.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_5c7b50: + +sdata +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst b/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst new file mode 100644 index 0000000000000..87e19a95f2b8b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_6c003b.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_6c003b: + +sdata +===== + +Instruction output. + +*Size:* 16 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_836716.rst b/llvm/docs/AMDGPU/gfx12_sdata_836716.rst new file mode 100644 index 0000000000000..be1bce94e7062 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_836716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_836716: + +sdata +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst b/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst new file mode 100644 index 0000000000000..c882df8dad6c1 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_d725ab.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_d725ab: + +sdata +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`simm8` diff --git a/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst b/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst new file mode 100644 index 0000000000000..64658894fee95 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdata_dd9dd8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdata_dd9dd8: + +sdata +===== + +Instruction output. + +*Size:* 3 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst b/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst new file mode 100644 index 0000000000000..f269b05c65edc --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_006c40.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_006c40: + +sdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`vcc` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst b/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst new file mode 100644 index 0000000000000..83c11a2e03eae --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_20064d.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_20064d: + +sdst +==== + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_354189.rst b/llvm/docs/AMDGPU/gfx12_sdst_354189.rst new file mode 100644 index 0000000000000..8433406a20591 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_354189.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_354189: + +sdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_836716.rst b/llvm/docs/AMDGPU/gfx12_sdst_836716.rst new file mode 100644 index 0000000000000..abce5696f6716 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_836716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_836716: + +sdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst b/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst new file mode 100644 index 0000000000000..e0072d90a4cfd --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_ced58d.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_ced58d: + +sdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst b/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst new file mode 100644 index 0000000000000..33e8c376af67f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sdst_e701cc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_sdst_e701cc: + +sdst +==== + +Instruction output. + +*Size:* 1 dword if wavefront size is 32, otherwise 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_sendmsg.rst b/llvm/docs/AMDGPU/gfx12_sendmsg.rst new file mode 100644 index 0000000000000..cb51be04555fe --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sendmsg.rst @@ -0,0 +1,48 @@ +.. _amdgpu_synid_sendmsg: + +sendmsg +======= + +An 8-bit value in simm16[7:0] encodes the message type. + +This operand may be specified as one of the following: + +* An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range 0..0xFFFF. +* A *sendmsg* value described below. + + + ==================================== ==================================================== + Sendmsg Value Syntax Description + ==================================== ==================================================== + sendmsg(<*type*>) A message identified by its *type*. + ==================================== ==================================================== + +*Type* may be specified using message *name* or message *id*. + +Numeric values may be specified as positive :ref:`integer numbers` +or :ref:`absolute expressions`. + + +Only the following message types are valid. + + ====================== =========== + Message type simm16[7:0] + ====================== =========== + Reserved 0 + MSG_INTERRUPT 1 + MSG_HS_TESSFACTOR 2 + MSG_DEALLOC_VGPRS 3 + MSG_GS_ALLOC_REQ 9 + ====================== =========== + +Examples: + +.. parsed-literal:: + + // numeric message code + msg = 0x1 + s_sendmsg 0x3 + s_sendmsg msg + 2 + + // sendmsg with strict arguments validation + s_sendmsg sendmsg(MSG_INTERRUPT) diff --git a/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst b/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst new file mode 100644 index 0000000000000..ebb591dc101e7 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_sendmsg_rtn.rst @@ -0,0 +1,30 @@ +.. _amdgpu_synid_sendmsg_rtn: + +sendmsg_rtn +=========== + +An 8-bit value in the instruction to encode the message type. + +This operand may be specified as one of the following: + + * An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range 0..0xFFFF. + * A *sendmsg* value described below. + + ==================================== ==================================================== + Sendmsg Value Syntax Description + ==================================== ==================================================== + sendmsg(MSG_RTN_GET_DOORBELL) Get doorbell ID. + sendmsg(MSG_RTN_GET_DDID) Get Draw/Dispatch ID. + sendmsg(MSG_RTN_GET_TMA) Get TMA value. + sendmsg(MSG_RTN_GET_TBA) Get TBA value. + sendmsg(MSG_RTN_GET_REALTIME) Get REALTIME value. + sendmsg(MSG_RTN_SAVE_WAVE) Report that this wave is ready to be context-saved. + ==================================== ==================================================== + +Examples: + +.. parsed-literal:: + + s_sendmsg_rtn_b32 s0, 132 + s_sendmsg_rtn_b32 s0, sendmsg(MSG_GET_REALTIME) + diff --git a/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst b/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst new file mode 100644 index 0000000000000..0cb123393a309 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_15ccdd.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_15ccdd: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`version` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst b/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst new file mode 100644 index 0000000000000..e08605e0bfbfb --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_218bea.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_218bea: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`waitcnt` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst b/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst new file mode 100644 index 0000000000000..babb4b689a519 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_39b593.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_39b593: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`imm16` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst b/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst new file mode 100644 index 0000000000000..cc8dbc6742803 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_3d2a4f.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_3d2a4f: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`label` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst b/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst new file mode 100644 index 0000000000000..93596db9287be --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_730a13.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_730a13: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`clause` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst b/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst new file mode 100644 index 0000000000000..fc63930c30334 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_7ed651.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_7ed651: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`hwreg` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst b/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst new file mode 100644 index 0000000000000..16dcf397b48cf --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_81e671.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_81e671: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst b/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst new file mode 100644 index 0000000000000..03e007af73690 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_c98889.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_c98889: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`delay` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst b/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst new file mode 100644 index 0000000000000..e53f8125c3398 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_cc1716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_cc1716: + +simm16 +====== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`hwreg` diff --git a/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst b/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst new file mode 100644 index 0000000000000..9bdac9b6056e7 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_simm16_ee8b30.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_simm16_ee8b30: + +simm16 +====== + +*Size:* 1 dword. + +*Operands:* :ref:`sendmsg` diff --git a/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst b/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst new file mode 100644 index 0000000000000..44de0304b46cf --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_soffset_8ec073.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_soffset_8ec073: + +soffset +======= + +An unsigned 20-bit offset added to the base address to get memory address. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst b/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst new file mode 100644 index 0000000000000..d115150d11d71 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_soffset_c5b88c.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_soffset_c5b88c: + +soffset +======= + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst b/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst new file mode 100644 index 0000000000000..bd571b6499603 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_soffset_ec005a.rst @@ -0,0 +1,20 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_soffset_ec005a: + +soffset +======= + +An offset added to the base address to get memory address. + +* If offset is specified as a register, it supplies an unsigned byte offset. +* If offset is specified as a 21-bit immediate, it supplies a signed byte offset. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst new file mode 100644 index 0000000000000..15fde5c33daab --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_5727cf.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_5727cf: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst new file mode 100644 index 0000000000000..fa02f046b1804 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_5cae62.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_5cae62: + +src0 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`scc`, :ref:`fconst`, :ref:`literal` diff --git a/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst new file mode 100644 index 0000000000000..e17a719c8b02c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_6802ce: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst b/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst new file mode 100644 index 0000000000000..effa6f69c6acb --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_85aab6.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_85aab6: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`literal`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst new file mode 100644 index 0000000000000..bbe6191f49944 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_c4593f: + +src0 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst new file mode 100644 index 0000000000000..c2d23d737610d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_e016a1: + +src0 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst b/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst new file mode 100644 index 0000000000000..dc048af280704 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src0_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src0_fd235e: + +src0 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst new file mode 100644 index 0000000000000..d1d08370eab76 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_5727cf.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_5727cf: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst new file mode 100644 index 0000000000000..3ad591ce779a7 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_5cae62.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_5cae62: + +src1 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`scc`, :ref:`fconst`, :ref:`literal` diff --git a/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst new file mode 100644 index 0000000000000..84ff631fc275d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_6802ce: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src1_731030.rst b/llvm/docs/AMDGPU/gfx12_src1_731030.rst new file mode 100644 index 0000000000000..8c67699145a1f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_731030.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_731030: + +src1 +==== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src1_977794.rst b/llvm/docs/AMDGPU/gfx12_src1_977794.rst new file mode 100644 index 0000000000000..765134002d94b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_977794.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_977794: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst new file mode 100644 index 0000000000000..aba4da84faee5 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_c4593f: + +src1 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst new file mode 100644 index 0000000000000..438585390ec88 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_e016a1: + +src1 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst new file mode 100644 index 0000000000000..5863e93170ef6 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src1_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src1_fd235e: + +src1 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst b/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst new file mode 100644 index 0000000000000..b393e2ac36b96 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_2797bc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_2797bc: + +src2 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst b/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst new file mode 100644 index 0000000000000..9ffaa079f6b91 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_5727cf.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_5727cf: + +src2 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst b/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst new file mode 100644 index 0000000000000..46d65cb3bad5b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_5cae62.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_5cae62: + +src2 +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`scc`, :ref:`fconst`, :ref:`literal` diff --git a/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst b/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst new file mode 100644 index 0000000000000..0ad2ede9df4ac --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_6802ce: + +src2 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst b/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst new file mode 100644 index 0000000000000..9f1ea3c3ab944 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_7b936a.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_7b936a: + +src2 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v`, :ref:`fconst` diff --git a/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst b/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst new file mode 100644 index 0000000000000..884d089d544c2 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_96fbd3.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_96fbd3: + +src2 +==== + +Instruction input. + +*Size:* 8 dwords. + +*Operands:* :ref:`v`, :ref:`fconst` diff --git a/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst b/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst new file mode 100644 index 0000000000000..849230b5a56a2 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_c4593f: + +src2 +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst b/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst new file mode 100644 index 0000000000000..266c4eaedf72d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_src2_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_src2_e016a1: + +src2 +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_srcx0.rst b/llvm/docs/AMDGPU/gfx12_srcx0.rst new file mode 100644 index 0000000000000..57b05a18c3100 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_srcx0.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_srcx0: + +srcx0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_srcy0.rst b/llvm/docs/AMDGPU/gfx12_srcy0.rst new file mode 100644 index 0000000000000..350b7428668ba --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_srcy0.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_srcy0: + +srcy0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v`, :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst new file mode 100644 index 0000000000000..c3f33e4f78fdd --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_007f9c.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_007f9c: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst new file mode 100644 index 0000000000000..5aa3f2d3585ac --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_1a9ca5.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_1a9ca5: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`m0` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst new file mode 100644 index 0000000000000..36925daf7a86c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_245536.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_245536: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`sendmsg_rtn` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst new file mode 100644 index 0000000000000..4eae7050ea714 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_2797bc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_2797bc: + +ssrc0 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst new file mode 100644 index 0000000000000..a29f83d36d48f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_bbb4c6.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_bbb4c6: + +ssrc0 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`scc`, :ref:`fconst`, :ref:`literal` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst b/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst new file mode 100644 index 0000000000000..33ca4d608d7df --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc0_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc0_c4593f: + +ssrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst b/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst new file mode 100644 index 0000000000000..1f3ea343f3a09 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc1_bbb4c6.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc1_bbb4c6: + +ssrc1 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`scc`, :ref:`fconst`, :ref:`literal` diff --git a/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst b/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst new file mode 100644 index 0000000000000..f81d0f203f07b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_ssrc1_c4593f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_ssrc1_c4593f: + +ssrc1 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`m0`, :ref:`scc`, :ref:`fconst`, :ref:`literal`, :ref:`exec_hi`, :ref:`exec_lo`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_tgt.rst b/llvm/docs/AMDGPU/gfx12_tgt.rst new file mode 100644 index 0000000000000..83a25aa466bfb --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_tgt.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_tgt: + +tgt +=== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst b/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst new file mode 100644 index 0000000000000..223b50d6ef205 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_a972b9.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_a972b9: + +vaddr +===== + +*Size:* 11 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst b/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst new file mode 100644 index 0000000000000..5a93efec9f86a --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_c12f43.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_c12f43: + +vaddr +===== + +*Size:* 12 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst b/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst new file mode 100644 index 0000000000000..1998e1ddc9504 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_c8b8d4.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_c8b8d4: + +vaddr +===== + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst b/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst new file mode 100644 index 0000000000000..92d09a2399a2f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_d82160.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_d82160: + +vaddr +===== + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst b/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst new file mode 100644 index 0000000000000..10d7e0ad1fce4 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vaddr_f2b449.rst @@ -0,0 +1,15 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vaddr_f2b449: + +vaddr +===== + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vcc.rst b/llvm/docs/AMDGPU/gfx12_vcc.rst new file mode 100644 index 0000000000000..e8509ff50a32f --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vcc.rst @@ -0,0 +1,16 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vcc: + +vcc +=== + +Vector condition code. This operand depends on wavefront size: + +* Should be :ref:`vcc_lo` if wavefront size is 32. +* Should be :ref:`vcc` if wavefront size is 64. diff --git a/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst b/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst new file mode 100644 index 0000000000000..839ec86ce2634 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_2eda77.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_2eda77: + +vdata +===== + +Instruction output. + +*Size:* 32 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst b/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst new file mode 100644 index 0000000000000..d2ab49a951684 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_48e42f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_48e42f: + +vdata +===== + +Instruction output. + +*Size:* 3 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst b/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst new file mode 100644 index 0000000000000..22ac087b51e8e --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_69a144.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_69a144: + +vdata +===== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst b/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst new file mode 100644 index 0000000000000..5f4f4782e410d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_89680f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_89680f: + +vdata +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst b/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst new file mode 100644 index 0000000000000..2e285ef86eebc --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_aac3e8.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_aac3e8: + +vdata +===== + +Instruction output. + +*Size:* 10 dwords. + +*Operands:* diff --git a/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst b/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst new file mode 100644 index 0000000000000..109c7672541a5 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdata_bdb32f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdata_bdb32f: + +vdata +===== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst b/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst new file mode 100644 index 0000000000000..dc3ac95500037 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_006c40.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_006c40: + +vdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`vcc` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_227281.rst b/llvm/docs/AMDGPU/gfx12_vdst_227281.rst new file mode 100644 index 0000000000000..13fd9513245dd --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_227281.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_227281: + +vdst +==== + +Instruction output. + +*Size:* 4 dwords if wavefront size is 64, otherwise 8 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst b/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst new file mode 100644 index 0000000000000..9372e484cf5d9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_2eda77.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_2eda77: + +vdst +==== + +Instruction output. + +*Size:* 32 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst b/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst new file mode 100644 index 0000000000000..056fe3f197417 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_47d3bc.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_47d3bc: + +vdst +==== + +Instruction output. + +*Size:* 8 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst b/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst new file mode 100644 index 0000000000000..84ab35b36b7b3 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_48e42f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_48e42f: + +vdst +==== + +Instruction output. + +*Size:* 3 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst b/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst new file mode 100644 index 0000000000000..70873ff9502b8 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_69a144.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_69a144: + +vdst +==== + +Instruction output. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst b/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst new file mode 100644 index 0000000000000..7248ea9449236 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_7de8e7.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_7de8e7: + +vdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`exec` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_836716.rst b/llvm/docs/AMDGPU/gfx12_vdst_836716.rst new file mode 100644 index 0000000000000..1cd43ee9f620a --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_836716.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_836716: + +vdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`s`, :ref:`ttmp`, :ref:`null`, :ref:`vcc_hi`, :ref:`vcc_lo` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst b/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst new file mode 100644 index 0000000000000..b4f055cc1574d --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_89680f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_89680f: + +vdst +==== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst b/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst new file mode 100644 index 0000000000000..e2a4a47987b7c --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdst_bdb32f.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdst_bdb32f: + +vdst +==== + +Instruction output. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdstx.rst b/llvm/docs/AMDGPU/gfx12_vdstx.rst new file mode 100644 index 0000000000000..4b95d4d0d84ba --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdstx.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdstx: + +vdstx +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vdsty.rst b/llvm/docs/AMDGPU/gfx12_vdsty.rst new file mode 100644 index 0000000000000..cf0b4641308be --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vdsty.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vdsty: + +vdsty +===== + +Instruction output. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_version.rst b/llvm/docs/AMDGPU/gfx12_version.rst new file mode 100644 index 0000000000000..4e490ca4954a9 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_version.rst @@ -0,0 +1,7 @@ +.. _amdgpu_synid_version: + +version +======= + +Microcode version header. + diff --git a/llvm/docs/AMDGPU/gfx12_vsrc0.rst b/llvm/docs/AMDGPU/gfx12_vsrc0.rst new file mode 100644 index 0000000000000..fb381690c3692 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc0.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc0: + +vsrc0 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst b/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst new file mode 100644 index 0000000000000..449054574be9b --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc1_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc1_6802ce: + +vsrc1 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst b/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst new file mode 100644 index 0000000000000..d6567c2fd9cef --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc1_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc1_fd235e: + +vsrc1 +===== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc2.rst b/llvm/docs/AMDGPU/gfx12_vsrc2.rst new file mode 100644 index 0000000000000..fe20832437431 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc2.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc2: + +vsrc2 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc3.rst b/llvm/docs/AMDGPU/gfx12_vsrc3.rst new file mode 100644 index 0000000000000..18df9e4418f0e --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc3.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc3: + +vsrc3 +===== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst b/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst new file mode 100644 index 0000000000000..166da38acb079 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_56f215.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_56f215: + +vsrc +==== + +Instruction input. + +*Size:* 3 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst b/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst new file mode 100644 index 0000000000000..e879c2bad2038 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_6802ce.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_6802ce: + +vsrc +==== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst b/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst new file mode 100644 index 0000000000000..c521e7261c59e --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_89fd7b.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_89fd7b: + +vsrc +==== + +Instruction input. + +*Size:* 32 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst b/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst new file mode 100644 index 0000000000000..84eb2eda944b7 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_e016a1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_e016a1: + +vsrc +==== + +Instruction input. + +*Size:* 4 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst b/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst new file mode 100644 index 0000000000000..640a235730f93 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrc_fd235e.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrc_fd235e: + +vsrc +==== + +Instruction input. + +*Size:* 2 dwords. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrcx1.rst b/llvm/docs/AMDGPU/gfx12_vsrcx1.rst new file mode 100644 index 0000000000000..9dab58c459b2e --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrcx1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrcx1: + +vsrcx1 +====== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_vsrcy1.rst b/llvm/docs/AMDGPU/gfx12_vsrcy1.rst new file mode 100644 index 0000000000000..496b2d66d2ff5 --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_vsrcy1.rst @@ -0,0 +1,17 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_gfx12_vsrcy1: + +vsrcy1 +====== + +Instruction input. + +*Size:* 1 dword. + +*Operands:* :ref:`v` diff --git a/llvm/docs/AMDGPU/gfx12_waitcnt.rst b/llvm/docs/AMDGPU/gfx12_waitcnt.rst new file mode 100644 index 0000000000000..454122212f64e --- /dev/null +++ b/llvm/docs/AMDGPU/gfx12_waitcnt.rst @@ -0,0 +1,55 @@ +.. + ************************************************** + * * + * Automatically generated file, do not edit! * + * * + ************************************************** + +.. _amdgpu_synid_waitcnt: + +waitcnt +======= + +Counts of outstanding instructions to wait for. + +The bits of this operand have the following meaning: + + ===== ================================================ ============ + Bits Description Value Range + ===== ================================================ ============ + 2:0 EXP_CNT: export and LDSDIR count. 0..7 + 3:3 Unused \- + 9:4 LGKM_CNT: LDS, GDS, Constant and Message count. 0..63 + 15:10 VM_CNT: vector memory operations count. 0..63 + ===== ================================================ ============ + +This operand may be specified as one of the following: + +* An :ref:`integer_number` or an :ref:`absolute_expression`. The value must be in the range 0..0xFFFF. +* A combination of *vmcnt*, *expcnt*, *lgkmcnt* and other values described below. + + ====================== ====================================================================== + Syntax Description + ====================== ====================================================================== + vmcnt(<*N*>) A VM_CNT value. *N* must not exceed the largest VM_CNT value. + expcnt(<*N*>) An EXP_CNT value. *N* must not exceed the largest EXP_CNT value. + lgkmcnt(<*N*>) An LGKM_CNT value. *N* must not exceed the largest LGKM_CNT value. + vmcnt_sat(<*N*>) A VM_CNT value computed as min(*N*, the largest VM_CNT value). + expcnt_sat(<*N*>) An EXP_CNT value computed as min(*N*, the largest EXP_CNT value). + lgkmcnt_sat(<*N*>) An LGKM_CNT value computed as min(*N*, the largest LGKM_CNT value). + ====================== ====================================================================== + +These values may be specified in any order. Spaces, ampersands and commas may be used as optional separators. + +*N* is either an +:ref:`integer number` or an +:ref:`absolute expression`. + +Examples: + +.. parsed-literal:: + + s_waitcnt vmcnt(1) + s_waitcnt expcnt(2) lgkmcnt(3) + s_waitcnt vmcnt(1), expcnt(2), lgkmcnt(3) + s_waitcnt vmcnt(1) & lgkmcnt_sat(100) & expcnt(2) diff --git a/llvm/docs/AMDGPUModifierSyntax.rst b/llvm/docs/AMDGPUModifierSyntax.rst index 334bdafefbbe2..8a60663b7303c 100644 --- a/llvm/docs/AMDGPUModifierSyntax.rst +++ b/llvm/docs/AMDGPUModifierSyntax.rst @@ -1078,6 +1078,73 @@ Examples: offset:0xfffff offset:-x +.. _amdgpu_synid_smem_offset24s: + +offset24s +~~~~~~~~~ + +Specifies a signed 24-bit offset, in bytes. The default value is 0. + + ============================= ==================================================================== + Syntax Description + ============================= ==================================================================== + offset:{-0x1000000..0xFFFFFF} Specifies an offset as an + :ref:`integer number ` + or an :ref:`absolute expression`. + ============================= ==================================================================== + +Examples: + +.. parsed-literal:: + + offset:-1 + offset:0xfffff + offset:-x + +.. _amdgpu_synid_th: + +th +~~ + +Specifies temporal hint of memory operation. + + =============================== ========================================================= + Syntax Description + =============================== ========================================================= + TH_{LOAD|STORE}_RT Regular + TH_{LOAD|STORE}_NT Non-temporal + TH_{LOAD|STORE}_HT High-temporal + TH_{LOAD|STORE}_LU Last use. Not available in SYS scope. + TH_{LOAD|STORE}_WB Regular (CU, SE); High-temporal with write-back (MALL) + TH_{LOAD|STORE}_NT_RT Non-temporal (CU, SE); Regular (MALL) + TH_{LOAD|STORE}_RT_NT Regular (CU, SE); Non-temporal (MALL) + TH_{LOAD|STORE}_NT_HT Non-temporal (CU, SE); High-temporal (MALL) + TH_{LOAD|STORE}_NT_WB Non-temporal (CU, SE); High-temporal with write-back (MALL) + TH_{LOAD|STORE}_BYPASS Available for SYS scope only. + TH_ATOMIC_RT Regular + TH_ATOMIC_RT_RETURN Regular. For atomic instructions that return values. + TH_ATOMIC_NT Non-temporal + TH_ATOMIC_NT_RETURN Non-temporal. For atomic instructions that return values. + TH_ATOMIC_CASCADE_RT Cascading atomic; Regular. + TH_ATOMIC_CASCADE_NT Cascading atomic; Non-temporal. + =============================== ========================================================= + +.. _amdgpu_synid_scope: + +scope +~~~~~ + +Specifies scope of memory operation. + + =============================== ========================================================= + Syntax Description + =============================== ========================================================= + SCOPE_CU Coherency within a Compute Unit. + SCOPE_SE Coherency within a Shader Engine. + SCOPE_DEV Coherency within a single device. + SCOPE_SYS Coherency across the full system. + =============================== ========================================================= + VINTRP/VINTERP/LDSDIR Modifiers ------------------------------- @@ -1117,6 +1184,27 @@ The default value is zero. This is a safe value, but it may be suboptimal. issuing this instruction. ================ ====================================================== +.. _amdgpu_synid_wait_va_vdst: + +wait_va_vdst +~~~~~~~~~~~~ + +Manually specify a wait on the VA_VDST counter before issuing this instruction. VA_VDST must be less +than or equal to this value before the instruction is issued. If set to 15, no wait is performed. + +If unspecified the current default is zero. This is a safe value but may have poor performance characteristics. + +This modifier is a shorthand for the WAR hazard where VALU reads a VGPR that is written by a parameter +load. Since there is no VA_VSRC counter we must use VA_VDST as a proxy to detect when the +VALU instruction has completed: + +Examples: + +.. parsed-literal:: + + v_mov_b32 v1, v0 + ds_param_load v0, . . . wait_va_vdst:0 + .. _amdgpu_synid_wait_vdst: wait_vdst @@ -1135,6 +1223,27 @@ The default value is zero. This is a safe value, but it may be suboptimal. issuing this instruction. ================== ====================================================== +.. _amdgpu_synid_wait_vm_vsrc: + +wait_vm_vsrc +~~~~~~~~~~~~ + +Manually specify a wait on the VM_VSRC counter before issuing this instruction. VM_VSRC must be less +than or equal to this value before the instruction is issued. If set to 1, no wait is performed. + +If unspecified the current default is zero. This is a safe value but may have poor performance characteristics. + +This modifier is a shorthand for the WAR hazard where VMEM reads a VGPR that is written by a parameter +load. + +Examples: + +.. parsed-literal:: + + buffer_load_b32 v1, v0, s0, 0 + ds_param_load v0, . . . wait_vm_vsrc:0 + + DPP8 Modifiers -------------- diff --git a/llvm/docs/AMDGPUOperandSyntax.rst b/llvm/docs/AMDGPUOperandSyntax.rst index e8a76322fe76a..722290fb72e16 100644 --- a/llvm/docs/AMDGPUOperandSyntax.rst +++ b/llvm/docs/AMDGPUOperandSyntax.rst @@ -479,6 +479,7 @@ High and low 32 bits of *xnack mask* may be accessed as separate registers: .. _amdgpu_synid_vcc: .. _amdgpu_synid_vcc_lo: +.. _amdgpu_synid_vcc_hi: vcc --- @@ -523,6 +524,8 @@ including register indexing and bounds checking. =========== =================================================== .. _amdgpu_synid_exec: +.. _amdgpu_synid_exec_lo: +.. _amdgpu_synid_exec_hi: exec ---- @@ -752,6 +755,14 @@ or an :ref:`absolute expression`. The value must be in the range -0x100000..0x0FFFFF. +.. _amdgpu_synid_simm8: + +simm8 +----- + +An 8-bit :ref:`integer number` +or an :ref:`absolute expression`. + .. _amdgpu_synid_off: off diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst index edabdc595a1f0..a4d110fbbb38b 100644 --- a/llvm/docs/AMDGPUUsage.rst +++ b/llvm/docs/AMDGPUUsage.rst @@ -22,6 +22,7 @@ User Guide for AMDGPU Backend AMDGPU/AMDGPUAsmGFX1013 AMDGPU/AMDGPUAsmGFX1030 AMDGPU/AMDGPUAsmGFX11 + AMDGPU/AMDGPUAsmGFX12 AMDGPUModifierSyntax AMDGPUOperandSyntax AMDGPUInstructionSyntax @@ -979,11 +980,13 @@ supported for the ``amdgcn`` target. access is not supported except by flat and scratch instructions in GFX9-GFX11. - Code that manipulates the stack values in other lanes of a wavefront, - such as by ``addrspacecast``-ing stack pointers to generic ones and taking offsets - that reach other lanes or by explicitly constructing the scratch buffer descriptor, - triggers undefined behavior when it modifies the scratch values of other lanes. - The compiler may assume that such modifications do not occur. + On targets without "Globally Accessible Scratch" (introduced in GFX125x), code that + manipulates the stack values in other lanes of a wavefront, such as by + ``addrspacecast``-ing stack pointers to generic ones and taking offsets that reach other + lanes or by explicitly constructing the scratch buffer descriptor, triggers undefined + behavior when it modifies the scratch values of other lanes. The compiler may assume + that such modifications do not occur for such targets. + When using code object V5 ``LIBOMPTARGET_STACK_SIZE`` may be used to provide the private segment size in bytes, for cases where a dynamic stack is used. @@ -1515,6 +1518,88 @@ The AMDGPU backend implements the following LLVM IR intrinsics. List AMDGPU intrinsics. +'``llvm.amdgcn.cooperative.atomic``' Intrinsics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``llvm.amdgcn.cooperative.atomic`` :ref:`family of intrinsics` +provide atomic load and store operations to a naturally-aligned contiguous memory regions. +Memory is accessed cooperatively by a collection of convergent threads, with each thread accessing +a fraction of the contiguous memory region. + + .. TODO:: + + The memory model described here is imprecise; see SWDEV-536264. + +This intrinsic has a memory ordering and may be used to synchronize-with another cooperative atomic. +If the memory ordering is relaxed, it may pair with a fence if that same fence is executed by +all participating threads with the same synchronization scope and set of address spaces. + +In both cases, a synchronize-with relation can only be established between cooperative atomics with the +same total access size. + +Each target may have additional restrictions on how the intrinsic may be used; see +:ref:`the table below`. +Targets not covered in the table do not support these intrinsics. + + .. table:: AMDGPU Cooperative Atomic Intrinsics Availability + :name: amdgpu-llvm-ir-cooperative-atomic-intrinsics-availability + + =============== ============================================================= + GFX Version Target Restrictions + =============== ============================================================= + GFX 12.5 :ref:`amdgpu-amdhsa-memory-model-gfx125x-cooperative-atomics` + =============== ============================================================= + +If the intrinsic is used without meeting all of the above conditions, or the target-specific conditions, +then this intrinsic causes undefined behavior. + + .. table:: AMDGPU Cooperative Atomic Intrinsics + :name: amdgpu-cooperative-atomic-intrinsics-table + + ======================================================= =========== ============ ========== + LLVM Intrinsic Number of Access Size Total Size + Threads Per Thread + Used + ======================================================= =========== ============ ========== + ``llvm.amdgcn.cooperative.atomic.store.32x4B`` 32 4B 128B + + ``llvm.amdgcn.cooperative.atomic.load.32x4B`` 32 4B 128B + + ``llvm.amdgcn.cooperative.atomic.store.16x8B`` 16 8B 128B + + ``llvm.amdgcn.cooperative.atomic.load.16x8B`` 16 8B 128B + + ``llvm.amdgcn.cooperative.atomic.store.8x16B`` 8 16B 128B + + ``llvm.amdgcn.cooperative.atomic.load.8x16B`` 8 16B 128B + + ======================================================= =========== ============ ========== + +The intrinsics are available for the global (``.p1`` suffix) and generic (``.p0`` suffix) address spaces. + +The atomic ordering operand (3rd operand for ``.store``, 2nd for ``.load``) is an integer that follows the +C ABI encoding of atomic memory orderings. The supported values are in +:ref:`the table below`. + + .. table:: AMDGPU Cooperative Atomic Intrinsics Atomic Memory Orderings + :name: amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table + + ====== ================ ================================= + Value Atomic Memory Notes + Ordering + ====== ================ ================================= + ``0`` ``relaxed`` The default for unsupported values. + + ``2`` ``acquire`` Only for ``.load`` + + ``3`` ``release`` Only for ``.store`` + + ``5`` ``seq_cst`` + ====== ================ ================================= + +The last argument of the intrinsic is the synchronization scope +as a metadata string, which must be one of the supported :ref:`memory scopes`. + .. _amdgpu_metadata: LLVM IR Metadata @@ -1843,6 +1928,7 @@ The AMDGPU backend supports the following LLVM IR attributes. This is only relevant on targets with cluster support. + ================================================ ========================================================== Calling Conventions @@ -5261,6 +5347,9 @@ The fields used by CP for code objects before V3 also match those specified in GFX10-GFX12 (wavefront size 32) - max_vgpr 1..256 - max(0, ceil(vgprs_used / 8) - 1) + GFX125X (wavefront size 32) + - max_vgpr 1..1024 + - max(0, ceil(vgprs_used / 16) - 1) Where vgprs_used is defined as the highest VGPR number @@ -6491,6 +6580,7 @@ following sections: * :ref:`amdgpu-amdhsa-memory-model-gfx942` * :ref:`amdgpu-amdhsa-memory-model-gfx10-gfx11` * :ref:`amdgpu-amdhsa-memory-model-gfx12` +* :ref:`amdgpu-amdhsa-memory-model-gfx125x` .. _amdgpu-fence-as: @@ -16617,6 +16707,2022 @@ the instruction in the code sequence that references the table. - system for OpenCL.* ============ ============ ============== ========== ================================ +.. _amdgpu-amdhsa-memory-model-gfx125x: + +Memory Model GFX125x +++++++++++++++++++++++++ + +For GFX125x: + +**Device Structure:** + +* Each agent has multiple shader engines (SE). +* Each SE has multiple shader arrays (SA). +* Each SA has multiple work-group processors (WGP). +* Each WGP has 4 SIMD32 (2 SIMD32-pairs) that execute wavefronts. +* The wavefronts for a single work-group are executed in the same + WGP. + +**Device Memory:** + +* Each WGP has a single write-through WGP cache (WGP$) shared by the wavefronts of the + work-groups executing on it. The WGP$ is divided between LDS and vector L0 memory. + + * Vector L0 memory holds clean data only. + +* Each WGP$ has two request queues; one per SIMD32-pair. + Each queue can handle both LDS and vector L0 requests. Requests in one queue + are executed serially and in-order, but are not kept in order with the other queue. +* The scalar memory operations access a scalar L0 cache shared by all wavefronts + on a WGP. The scalar and vector L0 caches are not kept coherent by hardware. However, scalar + operations are used in a restricted way so do not impact the memory model. See + :ref:`amdgpu-amdhsa-memory-spaces`. +* The vector and scalar memory L0 caches are both clients of an L1 buffer shared by + all WGPs on the same SE. +* L1 buffers have separate request queues for each WGP$ it serves. Requests in one queue + are executed serially and in-order, but are not kept in order with other queues. +* L1 buffers are clients of the L2 cache. +* There may be multiple L2 caches per agent. Ranges of virtual addresses can be set up as follows: + + * Be non-hardware-coherent; copies of the data are not coherent between multiple L2s. + * Be read-write hardware-coherent with other L2 caches on the same or other agents. + * Bypass L2 entirely to ensure system coherence. + +* L2 caches have multiple memory channels to service disjoint ranges of virtual + addresses. + +**Memory Model:** + +.. note:: + + This section is currently incomplete as work on the compiler is still ongoing. + The following is a non-exhaustive list of unimplemented/undocumented features: + non-volatile bit code sequences, monitor and wait, globally accessing scratch atomics, + multicast loads, barriers (including split barriers) and cooperative atomics. + Scalar operations memory model needs more elaboration as well. + +* Vector memory operations are performed as wavefront wide operations, with the + ``EXEC`` mask predicating which lanes execute. +* Consecutive vector memory operations from the same wavefront are issued in program order. + Vector memory operations are issued (and executed) in no particular order between wavefronts. +* Wave execution of a vector memory operation instruction issues (initiates) the operation, + but completion occurs an unspecified amount of time later. + The ``s_wait_*cnt`` instructions must be used to determine if the operation has completed. +* The types of vector memory operations (and their associated ``s_wait_*cnt`` instructions) are: + + * Load (global, scratch, flat, buffer): ``s_wait_loadcnt`` + * Store (global, scratch, flat, buffer): ``s_wait_storecnt`` + * non-ASYNC LDS: ``s_wait_dscnt`` + * ASYNC LDS: ``s_wait_asynccnt`` + * Tensor: ``s_wait_tensorcnt`` + +* ``s_wait_xcnt`` is a counter that is incremented when a memory operation is issued, and + decremented when memory address translation for that instruction is completed. + Waiting on a memory counter ``s_wait_*cnt N`` also waits on ``s_wait_xcnt N``. + + * ``s_wait_xcnt 0x0`` is required before flat and global atomic stores/read-modify-write + operations to guarantee atomicity during a xnack replay. + +* Within a wavefront, vector memory operation completion (``s_wait_*cnt`` decrement) is + reported in order of issue within a type, but in no particular order between types. +* Within a wavefront, the order in which data is returned to registers by a vector memory + operation can be different from the order in which the vector memory operations were issued. + + * Thus, a ``s_wait_*cnt`` instruction must be used to prevent multiple vector memory operations + that return results to the same register from executing concurrently as they may not return + their results in instruction issue order, even though they will be reported as completed in + instruction issue order by the decrementing of the counter. + +* Within a wavefront, consecutive loads and store to the same address will be processed in program order + by the memory subsystem. Loads and stores to different addresses may be processed + out of order with respect to a different address. +* All non-ASYNC LDS vector memory operations of a WGP are performed as wavefront wide + operations in a global order and involve no caching. Completion is reported to a wavefront in + execution order. +* ASYNC LDS and tensor vector memory operations are not covered by the memory model implemented + by the AMDGPU backend. Neither ``s_wait_asynccnt`` nor ``s_wait_tensorcnt`` are inserted + automatically. They must be emitted using compiler built-in calls. +* Some vector memory operations contain a ``SCOPE`` field with values + corresponding to each cache level. The ``SCOPE`` determines whether a cache + can complete an operation locally or whether it needs to forward the operation + to the next cache level. The ``SCOPE`` values are: + + * ``SCOPE_CU``: WGP + * ``SCOPE_SE``: Shader Engine + * ``SCOPE_DEV``: Device/Agent + * ``SCOPE_SYS``: System + +* Each cache is assigned a ``SCOPE`` by the hardware depending on the agent's + configuration. + + * This ensures that ``SCOPE_DEV`` can always be used to implement agent coherence, + even in the presence of multiple non-coherent L2 caches on the same agent. + +* When a vector memory operation with a given ``SCOPE`` reaches a cache with a smaller + ``SCOPE`` value, it is forwarded to the next level of cache. +* When a vector memory operation with a given ``SCOPE`` reaches a cache with a ``SCOPE`` + value greater than or equal to its own, the operation can proceed: + + * Reads can hit into the cache. + * Writes can happen in this cache and completion (``s_wait`` decrement) can be + reported. + * RMW operations can be done locally. + +* Some memory operations contain a ``nv`` bit, for "non-volatile", which indicates + memory that is not expected to change during a kernel's execution. + This information is propagated to the cache lines for that address + (refered to as ``$nv``). + + * When ``nv=0`` reads hit dirty ``$nv=1`` data in cache, the hardware will + writeback the data to the next level in the hierarchy and then subsequently read + it again, updating the cache line with a clean ``$nv=0`` copy of the data. + +* ``global_inv``, ``global_wb`` and ``global_wbinv`` are cache control instructions. + The affected cache(s) are controlled by the ``SCOPE`` of the instruction. + Only caches whose scope is strictly smaller than the instruction's are affected. + + * ``global_inv`` invalidates the data in affected caches so that subsequent reads + will re-read from the next level in the cache hierarchy. + The invalidation requests cannot be reordered with pending or upcoming + memory operations. Instruction completion is reported using ``s_wait_loadcnt``. + * ``global_wb`` flushes the dirty data in affected caches to the next level in + the cache hierarchy. This instruction additionally ensures previous + memory operation done at a lower scope level have reached the desired + ``SCOPE:``. Instruction completion is reported using ``s_wait_storecnt`` once + all data has been acknowledged by the next level in the cache hierarchy. + * ``global_wbinv`` performs a ``global_inv`` then a ``global_wb``. + Instruction completion is reported using ``s_wait_storecnt``. + * ``global_inv``, ``global_wb`` and ``global_wbinv`` with ``nv=0`` can only + affect ``$nv=0`` cache lines, whereas ``nv=1`` can affect all cache lines. + * ``global_inv``, ``global_wb`` and ``global_wbinv`` behave like memory operations + issued to every address at the same time. They are kept in order with other + memory operations from the same wave. + +Scalar memory operations are only used to access memory that is proven to not +change during the execution of the kernel dispatch. This includes constant +address space and global address space for program scope ``const`` variables. +Therefore, the kernel machine code does not have to maintain the scalar cache to +ensure it is coherent with the vector caches. The scalar and vector caches are +invalidated between kernel dispatches by CP since constant address space data +may change between kernel dispatch executions. See +:ref:`amdgpu-amdhsa-memory-spaces`. + +Atomics in the scratch address space are handled as follows: + +* Data types <= 32 bits: The instruction is converted into an atomic in the + generic (``flat``) address space. All properties of the atomic + (atomic ordering, volatility, alignment, etc.) are preserved. + Refer to the generic address space code sequences for further information. +* Data types >32 bits: unsupported and an error is emitted. + +The code sequences used to implement the memory model for GFX125x are defined in +table :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-table`. + +The mapping of LLVM IR syncscope to GFX125x instruction ``scope`` operands is +defined in :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + +The table only applies if and only if it is directly referenced by an entry in +:ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-table`, and it only applies to +the instruction in the code sequence that references the table. + + .. table:: AMDHSA Memory Model Code Sequences GFX125x - Instruction Scopes + :name: amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table + + ================================= ======================= + LLVM syncscope ISA + + + ================================= ======================= + *none*, one-as ``scope:SCOPE_SYS`` + system, system-one-as ``scope:SCOPE_SYS`` + agent, agent-one-as ``scope:SCOPE_DEV`` + cluster, cluster-one-as ``scope:SCOPE_SE`` + workgroup, workgroup-one-as ``scope:SCOPE_CU`` [1]_ + wavefront, wavefront-one-as ``scope:SCOPE_CU`` [1]_ + singlethread, singlethread-one-as ``scope:SCOPE_CU`` [1]_ + ================================= ======================= + + .. [1] ``SCOPE_CU`` is the default ``scope:`` emitted by the compiler. + It will be omitted when instructions are emitted in textual form by the compiler. + + .. table:: AMDHSA Memory Model Code Sequences GFX125x + :name: amdgpu-amdhsa-memory-model-code-sequences-gfx125x-table + + ============ ============ ============== ========== ================================ + LLVM Instr LLVM Memory LLVM Memory AMDGPU AMDGPU Machine Code + Ordering Sync Scope Address GFX125x + Space + ============ ============ ============== ========== ================================ + **Non-Atomic** + ------------------------------------------------------------------------------------ + load *none* *none* - global - !volatile & !nontemporal + - generic + - private 1. buffer/global/flat_load + - constant + - !volatile & nontemporal + + 1. buffer/global/flat_load + ``th:TH_LOAD_NT`` + + - volatile + + 1. buffer/global/flat_load + ``scope:SCOPE_SYS`` + + 2. ``s_wait_loadcnt 0x0`` + + - Must happen before + any following volatile + global/generic + load/store. + - Ensures that + volatile + operations to + different + addresses will not + be reordered by + hardware. + + load *none* *none* - local 1. ds_load + store *none* *none* - global - !volatile & !nontemporal + - generic + - private 1. buffer/global/flat_store + - constant + - !volatile & nontemporal + + 1. buffer/global/flat_store + ``th:TH_STORE_NT`` + + - volatile + + 1. buffer/global/flat_store + ``scope:SCOPE_SYS`` + + 2. ``s_wait_storecnt 0x0`` + + - Must happen before + any following volatile + global/generic + load/store. + - Ensures that + volatile + operations to + different + addresses will not + be reordered by + hardware. + + store *none* *none* - local 1. ds_store + **Unordered Atomic** + ------------------------------------------------------------------------------------ + load atomic unordered *any* *any* *Same as non-atomic*. + store atomic unordered *any* *any* *Same as non-atomic*. + atomicrmw unordered *any* *any* *Same as monotonic atomic*. + **Monotonic Atomic** + ------------------------------------------------------------------------------------ + load atomic monotonic - singlethread - global 1. buffer/global/flat_load + - wavefront - generic + - workgroup - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - cluster + - agent + - system + load atomic monotonic - singlethread - local 1. ds_load + - wavefront + - workgroup + store atomic monotonic - singlethread - global 1. ``s_wait_xcnt 0x0`` + - wavefront - generic + - workgroup - Ensure operation remains atomic even during a xnack replay. + - cluster - Only needed for ``flat`` and ``global`` operations. + - agent + - system 2. buffer/global/flat_store + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + store atomic monotonic - singlethread - local 1. ds_store + - wavefront + - workgroup + atomicrmw monotonic - singlethread - global 1. ``s_wait_xcnt 0x0`` + - wavefront - generic + - workgroup - Ensure operation remains atomic even during a xnack replay. + - cluster - Only needed for ``flat`` and ``global`` operations. + - agent + - system 2. buffer/global/flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + atomicrmw monotonic - singlethread - local 1. ds_atomic + - wavefront + - workgroup + **Acquire Atomic** + ------------------------------------------------------------------------------------ + load atomic acquire - singlethread - global 1. buffer/global/ds/flat_load + - wavefront - local + - generic + load atomic acquire - workgroup - global 1. buffer/global_load + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. ``s_wait_loadcnt 0x0`` + + - Must happen before any following + global/generic + load/load + atomic/store/store + atomic/atomicrmw. + + + load atomic acquire - workgroup - local 1. ds_load + 2. ``s_wait_dscnt 0x0`` + + - If OpenCL, omit. + - Must happen before any following + global/generic load/load + atomic/store/store + atomic/atomicrmw. + - Ensures any + following global + data read is no + older than the local load + atomic value being + acquired. + + + load atomic acquire - workgroup - generic 1. flat_load + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0`` + - Must happen before any + following global/generic + load/load + atomic/store/store + atomic/atomicrmw. + - Ensures any + following global + data read is no + older than a local load + atomic value being + acquired. + + load atomic acquire - cluster - global 1. buffer/global_load + - agent + - system - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. ``s_wait_loadcnt 0x0`` + + - Must happen before + following + ``global_inv``. + - Ensures the load + has completed + before invalidating + the caches. + + 3. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/atomicrmw. + - Ensures that + following + loads will not see + stale global data. + + load atomic acquire - cluster - generic 1. flat_load + - agent + - system - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0`` + - Must happen before + following + ``global_inv``. + - Ensures the flat_load + has completed + before invalidating + the caches. + + 3. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/atomicrmw. + - Ensures that + following loads + will not see stale + global data. + + atomicrmw acquire - singlethread - global 1. ``s_wait_xcnt 0x0`` + - wavefront - local + - generic - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 2. buffer/global/ds/flat_atomic + + atomicrmw acquire - workgroup - global 1. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 2. buffer/global_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, + use ``th:TH_ATOMIC_RETURN`` + + 3. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + + - Must happen before any following + global/generic + load/load + atomic/store/store + atomic/atomicrmw. + + atomicrmw acquire - workgroup - local 1. ds_atomic + 2. ``s_wait_dscnt 0x0`` + + - If OpenCL, omit. + - Ensures any + following global + data read is no + older than the local + atomicrmw value + being acquired. + + + atomicrmw acquire - workgroup - generic 1. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + + 2. flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, + use ``th:TH_ATOMIC_RETURN`` + + 3. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0`` + - Ensures any + following global + data read is no + older than the local + atomicrmw value + being acquired. + + atomicrmw acquire - cluster - global 1. ``s_wait_xcnt 0x0`` + - agent + - system - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``global`` operations. + + 2. buffer/global_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, + use ``th:TH_ATOMIC_RETURN`` + + 3. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + + - Must happen before + following ``global_inv``. + - Ensures the + atomicrmw has + completed before + invalidating the + caches. + + 4. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/atomicrmw. + - Ensures that + following loads + will not see stale + global data. + + atomicrmw acquire - cluster - generic 1. ``s_wait_xcnt 0x0`` + - agent + - system - Ensure operation remains atomic even during a xnack replay. + + 2. flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, + use ``th:TH_ATOMIC_RETURN`` + + 3. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit dscnt + - Must happen before + following + global_inv + - Ensures the + atomicrmw has + completed before + invalidating the + caches. + + 4. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/atomicrmw. + - Ensures that + following loads + will not see stale + global data. + + fence acquire - singlethread *none* *none* + - wavefront + fence acquire - workgroup *none* 1. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0`` + - If OpenCL and address space is local, + omit all. + - See :ref:`amdgpu-fence-as` for + more details on fencing specific + address spaces. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load + atomic/ + atomicrmw-with-return-value + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + atomicrmw-no-return-value + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic load + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - Ensures that the + fence-paired atomic + has completed + before invalidating + the + cache. Therefore + any following + locations read must + be no older than + the value read by + the + fence-paired-atomic. + + + fence acquire - cluster *none* 1. | ``s_wait_storecnt 0x0`` + - agent | ``s_wait_loadcnt 0x0`` + - system | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - If OpenCL and address space is + local, omit all. + - See :ref:`amdgpu-fence-as` for + more details on fencing specific + address spaces. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load + atomic/ + atomicrmw-with-return-value + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + atomicrmw-no-return-value + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic load + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - Must happen before + the following + ``global_inv`` + - Ensures that the + fence-paired atomic + has completed + before invalidating the + caches. Therefore + any following + locations read must + be no older than + the value read by + the + fence-paired-atomic. + + 2. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Ensures that + following + loads will not see + stale data. + + **Release Atomic** + ------------------------------------------------------------------------------------ + store atomic release - singlethread - global 1. ``s_wait_xcnt 0x0`` + - wavefront - local + - generic - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 2. buffer/global/ds/flat_store + + store atomic release - workgroup - global 1. | ``s_wait_storecnt 0x0`` + - cluster - generic | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before the + following store. + - Ensures that all + memory operations + have + completed before + performing the + store that is being + released. + + 2. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 3. buffer/global/flat_store + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + store atomic release - workgroup - local 1. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - Must happen before the + following store. + - Ensures that all + global memory + operations have + completed before + performing the + store that is being + released. + + 2. ds_store + store atomic release - agent - global 1. ``global_wb`` + - system - generic + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + ``global_wb`` or + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before the + following store. + - Ensures that all + memory operations + have + completed before + performing the + store that is being + released. + + 3. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 4. buffer/global/flat_store + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + atomicrmw release - singlethread - global 1. ``s_wait_xcnt 0x0`` + - wavefront - local + - generic - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 2. buffer/global/ds/flat_atomic + atomicrmw release - workgroup - global 1. | ``s_wait_storecnt 0x0`` + - cluster - generic | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before the + following atomic. + - Ensures that all + memory operations + have + completed before + performing the + atomicrmw that is + being released. + + 2. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 3. buffer/global/flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + atomicrmw release - workgroup - local 1. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit all. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - Must happen before the + following atomic. + - Ensures that all + global memory + operations have + completed before + performing the + store that is being + released. + + 2. ds_atomic + atomicrmw release - agent - global 1. ``global_wb`` + - system - generic + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + ``global_wb`` or + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before the + following atomic. + - Ensures that all + memory operations + to global and local + have completed + before performing + the atomicrmw that + is being released. + + 3. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 4. buffer/global/flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + fence release - singlethread *none* *none* + - wavefront + fence release - workgroup *none* 1. | ``s_wait_storecnt 0x0`` + - cluster | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - If OpenCL and + address space is + local, omit all. + - See :ref:`amdgpu-fence-as` for + more details on fencing specific + address spaces. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store atomic/ + atomicrmw. + - Must happen before + any following store + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - Ensures that all + memory operations + have + completed before + performing the + following + fence-paired-atomic. + + fence release - agent *none* 1. ``global_wb`` + - system + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + | **OpenCL:** + | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + + - If OpenCl, omit ``s_wait_dscnt 0x0``. + - If OpenCL and address space is local, + omit all. + - See :ref:`amdgpu-fence-as` for + more details on fencing specific + address spaces. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + ``global_wb`` or + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before + any following store + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + fence-paired-atomic). + - Ensures that all + memory operations + have + completed before + performing the + following + fence-paired-atomic. + + **Acquire-Release Atomic** + ------------------------------------------------------------------------------------ + atomicrmw acq_rel - singlethread - global 1. ``s_wait_xcnt 0x0`` + - wavefront - local + - generic - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 2. buffer/global/ds/flat_atomic + atomicrmw acq_rel - workgroup - global 1. | ``s_wait_storecnt 0x0`` + - cluster | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0``. + - Must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before + the following + atomicrmw. + - Ensures that all + memory operations + have + completed before + performing the + atomicrmw that is + being released. + + 2. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``flat`` and ``global`` operations. + + 3. buffer/global_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, use + ``th:TH_ATOMIC_RETURN``. + + 4. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + + - Ensures any + following global + data read is no + older than the + atomicrmw value + being acquired. + + atomicrmw acq_rel - workgroup - local 1 | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - Must happen before + the following + store. + - Ensures that all + global memory + operations have + completed before + performing the + store that is being + released. + + 2. ds_atomic + 3. ``s_wait_dscnt 0x0`` + + - If OpenCL, omit. + - Ensures any + following global + data read is no + older than the local load + atomic value being + acquired. + + atomicrmw acq_rel - workgroup - generic 1. | ``s_wait_storecnt 0x0`` + - cluster | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_loadcnt 0x0``. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store + atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before + the following + atomicrmw. + - Ensures that all + memory operations + have + completed before + performing the + atomicrmw that is + being released. + + 2. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + + 3. flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, + use ``th:TH_ATOMIC_RETURN``. + + 4. | **Atomic without return:** + | ``s_wait_dscnt 0x0`` + | ``s_wait_storecnt 0x0`` + | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit ``s_wait_dscnt 0x0`` + - Ensures any + following global + data read is no + older than the load + atomic value being + acquired. + + + atomicrmw acq_rel - agent - global 1. ``global_wb`` + - system + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit + ``s_wait_dscnt 0x0`` + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + ``global_wb``. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before + the following + atomicrmw. + - Ensures that all + memory operations + to global have + completed before + performing the + atomicrmw that is + being released. + + 2. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + - Only needed for ``global`` operations. + + 3. buffer/global_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, use + ``th:TH_ATOMIC_RETURN``. + + 4. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + + - Must happen before + following + ``global_inv``. + - Ensures the + atomicrmw has + completed before + invalidating the + caches. + + 5. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/atomicrmw. + - Ensures that + following loads + will not see stale + global data. + + atomicrmw acq_rel - agent - generic 1. ``global_wb`` + - system + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit + ``s_wait_dscnt 0x0`` + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load atomic + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + ``global_wb``. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before + the following + atomicrmw. + - Ensures that all + memory operations + have + completed before + performing the + atomicrmw that is + being released. + + 3. ``s_wait_xcnt 0x0`` + + - Ensure operation remains atomic even during a xnack replay. + + 4. flat_atomic + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - If atomic with return, use + ``th:TH_ATOMIC_RETURN``. + + 5. | **Atomic with return:** + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + | **Atomic without return:** + | ``s_wait_storecnt 0x0`` + | ``s_wait_dscnt 0x0`` + + + - If OpenCL, omit + ``s_wait_dscnt 0x0``. + - Must happen before + following + ``global_inv``. + - Ensures the + atomicrmw has + completed before + invalidating the + caches. + + 5. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/atomicrmw. + - Ensures that + following loads + will not see stale + global data. + + fence acq_rel - singlethread *none* *none* + - wavefront + fence acq_rel - workgroup *none* 1. | ``s_wait_storecnt 0x0`` + - cluster | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL and + address space is + not generic, omit + ``s_wait_dscnt 0x0`` + - If OpenCL and + address space is + local, omit + all but ``s_wait_dscnt 0x0``. + - See :ref:`amdgpu-fence-as` for + more details on fencing specific + address spaces. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + any preceding + global/generic + store/store atomic/ + atomicrmw-no-return-value. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store atomic/ + atomicrmw. + - Must happen before + any following + global/generic + load/load + atomic/store/store + atomic/atomicrmw. + - Ensures that all + memory operations + have + completed before + performing any + following global + memory operations. + - Ensures that the + preceding + local/generic load + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + acquire-fence-paired-atomic) + has completed + before following + global memory + operations. This + satisfies the + requirements of + acquire. + - Ensures that all + previous memory + operations have + completed before a + following + local/generic store + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + release-fence-paired-atomic). + This satisfies the + requirements of + release. + - Ensures that the + acquire-fence-paired + atomic has completed + before invalidating + the + cache. Therefore + any following + locations read must + be no older than + the value read by + the + acquire-fence-paired-atomic. + + fence acq_rel - agent *none* 1. ``global_wb`` + - system + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + + 2. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL and + address space is + not generic, omit + ``s_wait_dscnt 0x0`` + - If OpenCL and + address space is + local, omit + all but ``s_wait_dscnt 0x0``. + - See :ref:`amdgpu-fence-as` for + more details on fencing specific + address spaces. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + any preceding + global/generic + load/load + atomic/ + atomicrmw-with-return-value. + - ``s_wait_storecnt 0x0`` + must happen after + ``global_wb``. + - ``s_wait_dscnt 0x0`` + must happen after + any preceding + local/generic + load/store/load + atomic/store + atomic/atomicrmw. + - Must happen before + the following + ``global_inv`` + - Ensures that the + preceding + global/local/generic + load + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + acquire-fence-paired-atomic) + has completed + before invalidating + the caches. This + satisfies the + requirements of + acquire. + - Ensures that all + previous memory + operations have + completed before a + following + global/local/generic + store + atomic/atomicrmw + with an equal or + wider sync scope + and memory ordering + stronger than + unordered (this is + termed the + release-fence-paired-atomic). + This satisfies the + requirements of + release. + + 3. ``global_inv`` + + - Apply :ref:`amdgpu-amdhsa-memory-model-code-sequences-gfx125x-scopes-table`. + - Must happen before + any following + global/generic + load/load + atomic/store/store + atomic/atomicrmw. + - Ensures that + following loads + will not see stale + global data. This + satisfies the + requirements of + acquire. + + **Sequential Consistent Atomic** + ------------------------------------------------------------------------------------ + load atomic seq_cst - singlethread - global *Same as corresponding + - wavefront - local load atomic acquire, + - generic except must generate + all instructions even + for OpenCL.* + load atomic seq_cst - workgroup - global 1. | ``s_wait_storecnt 0x0`` + - generic | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit + ``s_wait_dscnt 0x0`` + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_dscnt 0x0`` must + happen after + preceding + local/generic load + atomic/store + atomic/atomicrmw + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own ``s_wait_dscnt 0x0`` + and so do not need to be + considered.) + - ``s_wait_loadcnt 0x0`` + must happen after + preceding + global/generic load + atomic/ + atomicrmw-with-return-value + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own waits and so do + not need to be + considered.) + - ``s_wait_storecnt 0x0`` + Must happen after + preceding + global/generic store + atomic/ + atomicrmw-no-return-value + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own ``s_wait_storecnt 0x0`` + and so do not need to be + considered.) + - Ensures any + preceding + sequential + consistent global/local + memory instructions + have completed + before executing + this sequentially + consistent + instruction. This + prevents reordering + a seq_cst store + followed by a + seq_cst load. (Note + that seq_cst is + stronger than + acquire/release as + the reordering of + load acquire + followed by a store + release is + prevented by the + ``s_wait``\s of + the release, but + there is nothing + preventing a store + release followed by + load acquire from + completing out of + order. The ``s_wait``\s + could be placed after + seq_store or before + the seq_load. We + choose the load to + make the ``s_wait``\s be + as late as possible + so that the store + may have already + completed.) + + 2. *Following + instructions same as + corresponding load + atomic acquire, + except must generate + all instructions even + for OpenCL.* + load atomic seq_cst - workgroup - local 1. | ``s_wait_storecnt 0x0`` + | ``s_wait_loadcnt 0x0`` + | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit all. + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_loadcnt 0x0`` + must happen after + preceding + global/generic load + atomic/ + atomicrmw-with-return-value + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own ``s_wait``\s and so do + not need to be + considered.) + - ``s_wait_storecnt 0x0`` + Must happen after + preceding + global/generic store + atomic/ + atomicrmw-no-return-value + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own ``s_wait_storecnt 0x0`` + and so do + not need to be + considered.) + - Ensures any + preceding + sequential + consistent global + memory instructions + have completed + before executing + this sequentially + consistent + instruction. This + prevents reordering + a seq_cst store + followed by a + seq_cst load. (Note + that seq_cst is + stronger than + acquire/release as + the reordering of + load acquire + followed by a store + release is + prevented by the + ``s_wait``\s of + the release, but + there is nothing + preventing a store + release followed by + load acquire from + completing out of + order. The s_waitcnt + could be placed after + seq_store or before + the seq_load. We + choose the load to + make the ``s_wait``\s be + as late as possible + so that the store + may have already + completed.) + + 2. *Following + instructions same as + corresponding load + atomic acquire, + except must generate + all instructions even + for OpenCL.* + + load atomic seq_cst - cluster - global 1. | ``s_wait_storecnt 0x0`` + - agent - generic | ``s_wait_loadcnt 0x0`` + - system | ``s_wait_dscnt 0x0`` + + - If OpenCL, omit + ``s_wait_dscnt 0x0`` + - The waits can be + independently moved + according to the + following rules: + - ``s_wait_dscnt 0x0`` + must happen after + preceding + local load + atomic/store + atomic/atomicrmw + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own ``s_wait_dscnt 0x0`` + and so do + not need to be + considered.) + - ``s_wait_loadcnt 0x0`` + must happen after + preceding + global/generic load + atomic/ + atomicrmw-with-return-value + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own ``s_wait``\s and so do + not need to be + considered.) + - ``s_wait_storecnt 0x0`` + Must happen after + preceding + global/generic store + atomic/ + atomicrmw-no-return-value + with memory + ordering of seq_cst + and with equal or + wider sync scope. + (Note that seq_cst + fences have their + own + ``s_wait_storecnt 0x0`` and so do + not need to be + considered.) + - Ensures any + preceding + sequential + consistent global + memory instructions + have completed + before executing + this sequentially + consistent + instruction. This + prevents reordering + a seq_cst store + followed by a + seq_cst load. (Note + that seq_cst is + stronger than + acquire/release as + the reordering of + load acquire + followed by a store + release is + prevented by the + ``s_wait``\s of + the release, but + there is nothing + preventing a store + release followed by + load acquire from + completing out of + order. The ``s_wait``\s + could be placed after + seq_store or before + the seq_load. We + choose the load to + make the ``s_wait``\s be + as late as possible + so that the store + may have already + completed.) + + 2. *Following + instructions same as + corresponding load + atomic acquire, + except must generate + all instructions even + for OpenCL.* + store atomic seq_cst - singlethread - global *Same as corresponding + - wavefront - local store atomic release, + - workgroup - generic except must generate + - cluster all instructions even + - agent for OpenCL.* + - system + atomicrmw seq_cst - singlethread - global *Same as corresponding + - wavefront - local atomicrmw acq_rel, + - workgroup - generic except must generate + - cluster all instructions even + - agent for OpenCL.* + - system + fence seq_cst - singlethread *none* *Same as corresponding + - wavefront fence acq_rel, + - workgroup except must generate + - cluster all instructions even + - agent for OpenCL.* + - system + ============ ============ ============== ========== ================================ + +.. _amdgpu-amdhsa-memory-model-gfx125x-cooperative-atomics: + +'``llvm.amdgcn.cooperative.atomic``' Intrinsics +############################################### + +The collection of convergent threads participating in a cooperative atomic must belong +to the same wave32. + +Only naturally-aligned, contiguous groups of lanes may be used; +see :ref:`the table below` for the set of +possible lane groups. +Cooperative atomics may be executed by more than one group per wave. +Using an unsupported lane group, or using more lane groups per wave than the maximum will +cause undefined behavior. + +Using the intrinsic also causes undefined behavior if it loads or stores to addresses that: + +* Are not in the global address space (e.g.: private and local addresses spaces). +* Are only reachable through a bus that does not support 128B/256B requests + (e.g.: host memory over PCIe) +* Any other unsupported addresses (TBD, needs refinement) + +.. TODO:: + + Enumerate all cases where UB is invoked when using this intrinsic instead of hand-waving + "specific global memory locations". + +.. table:: GFX125x Cooperative Atomic Intrinsics + :name: gfx125x-cooperative-atomic-intrinsics-table + + ======================================================= ======================================= + LLVM Intrinsic Lane Groups + ======================================================= ======================================= + ``llvm.amdgcn.cooperative.atomic.store.32x4B`` ``0-31`` + + ``llvm.amdgcn.cooperative.atomic.load.32x4B`` ``0-31`` + + ``llvm.amdgcn.cooperative.atomic.store.16x8B`` ``0-15``, ``16-31`` + + ``llvm.amdgcn.cooperative.atomic.load.16x8B`` ``0-15``, ``16-31`` + + ``llvm.amdgcn.cooperative.atomic.store.8x16B`` ``0-7``, ``8-15``, ``16-23``, ``24-31`` + + ``llvm.amdgcn.cooperative.atomic.load.8x16B`` ``0-7``, ``8-15``, ``16-23``, ``24-31`` + + ======================================================= ======================================= + .. _amdgpu-amdhsa-trap-handler-abi: Trap Handler ABI @@ -17803,6 +19909,7 @@ in this description. :doc:`gfx1102` :doc:`gfx1103` + RDNA 4 :doc:`GFX12` :doc:`gfx1200` ============= ============================================= ======================================= For more information about instructions, their semantics and supported diff --git a/llvm/docs/BugLifeCycle.rst b/llvm/docs/BugLifeCycle.rst index 9bf13e64ed56e..1215af9e47e08 100644 --- a/llvm/docs/BugLifeCycle.rst +++ b/llvm/docs/BugLifeCycle.rst @@ -16,7 +16,7 @@ consistency helps reporters, developers and others to gain a better understanding of what a particular bug state actually means and what to expect might happen next. -At the same time, we aim to not over-specify the life cycle of bugs in +At the same time, we aim not to over-specify the life cycle of bugs in `the LLVM Bug Tracking System `_, as the overall goal is to make it easier to work with and understand the bug reports. @@ -39,7 +39,7 @@ use, needs to be maintained. See the following for details: Reporting bugs ============== -See :doc:`HowToSubmitABug` on further details on how to submit good bug reports. +See :doc:`HowToSubmitABug` for further details on how to submit good bug reports. You can apply `labels `_ to the bug to provide extra information to make the bug easier to discover, such @@ -50,9 +50,9 @@ as a label for the part of the project the bug pertains to. Triaging bugs ============= -Open bugs that have not been marked with the ``confirmed`` label are bugs that +Open bugs that have not been marked with the ``confirmed`` label still need to be triaged. When triage is complete, the ``confirmed`` label -should be added along with any other labels that help to classify the report, +should be added along with any other labels that help classify the report, unless the issue is being :ref:`closed`. The goal of triaging a bug is to make sure a newly reported bug ends up in a @@ -124,13 +124,13 @@ Examples of reasons for resolving are: ``duplicate`` label with a comment pointing to the issue it duplicates. * If there is a sound reason for not fixing the issue (difficulty, ABI, open - research questions, etc), add the ``wontfix`` label and a comment explaining + research questions, etc.), add the ``wontfix`` label and a comment explaining why no changes are expected. * If there is a specific and plausible reason to think that a given bug is otherwise inapplicable or obsolete. One example is an open bug that doesn't contain enough information to clearly understand the problem being reported - (e.g. not reproducible). It is fine to close such a bug, adding with the + (e.g., not reproducible). It is fine to close such a bug, adding the ``worksforme`` label and leaving a comment to encourage the reporter to reopen the bug with more information if it's still reproducible for them. @@ -140,7 +140,7 @@ Examples of reasons for resolving are: Maintenance of metadata ======================= -Project member with write access to the project can create new labels, but we +Project members with write access to the project can create new labels, but we discourage adding ad hoc labels because we want to control the proliferation of labels and avoid single-use labels. If you would like a new label added, please open an issue asking to create an issue label and add the ``infrastructure`` diff --git a/llvm/docs/BuildingADistribution.rst b/llvm/docs/BuildingADistribution.rst index 10e571cdea3f9..81ed8b8723a26 100644 --- a/llvm/docs/BuildingADistribution.rst +++ b/llvm/docs/BuildingADistribution.rst @@ -13,8 +13,8 @@ combination of its sub-project tools for distribution. This document covers useful features of the LLVM build system as well as best practices and general information about packaging LLVM. -If you are new to CMake you may find the :doc:`CMake` or :doc:`CMakePrimer` -documentation useful. Some of the things covered in this document are the inner +If you are new to CMake, you may find the :doc:`CMake` or :doc:`CMakePrimer` +documentation useful. This document covers some of the inner workings of the builds described in the :doc:`AdvancedBuilds` document. General Distribution Guidance @@ -27,7 +27,7 @@ compiler. This is done so that the compiler you distribute benefits from all the bug fixes, performance optimizations and general improvements provided by the new compiler. -In deciding how to build your distribution there are a few trade-offs that you +In deciding how to build your distribution, there are a few trade-offs that you will need to evaluate. The big two are: #. Compile time of the distribution against performance of the built compiler @@ -41,8 +41,8 @@ opportunity for the compiler to optimize. The guidance for minimizing distribution size is to dynamically link LLVM and Clang libraries into the tools to reduce code duplication. This will come at a -substantial performance penalty to the generated binary both because it reduces -optimization opportunity, and because dynamic linking requires resolving symbols +substantial performance penalty to the generated binary, both because it reduces +optimization opportunities and because dynamic linking requires resolving symbols at process launch time, which can be very slow for C++ code. .. _shared_libs: @@ -76,7 +76,7 @@ LLVM testing tools. Alternatively the ``install-distribution`` target, which is recommended for building distributions, only installs specific parts of LLVM as specified at configuration time by *LLVM_DISTRIBUTION_COMPONENTS*. -Additionally by default the ``install`` target will install the LLVM testing +Additionally, by default, the ``install`` target will install the LLVM testing tools as the public tools. This can be changed well by setting *LLVM_INSTALL_TOOLCHAIN_ONLY* to ``On``. The LLVM tools are intended for development and testing of LLVM, and should only be included in distributions diff --git a/llvm/docs/CIBestPractices.rst b/llvm/docs/CIBestPractices.rst index 8301b95f54938..da92ed3660e55 100644 --- a/llvm/docs/CIBestPractices.rst +++ b/llvm/docs/CIBestPractices.rst @@ -9,11 +9,11 @@ This document contains a list of guidelines and best practices to use when working on LLVM's CI systems. These are intended to keep our actions reliable, consistent, and secure. -Github Actions Best Practices +GitHub Actions Best Practices ============================= This section contains information on best practices/guidelines when working on -LLVM's github actions workflows. +LLVM's GitHub actions workflows. Disabling Jobs In Forks ----------------------- @@ -35,7 +35,7 @@ jobs specified within a workflow: if: github.repository_owner == 'llvm' We choose to use ``github.repository_owner`` rather than ``github.repository`` -to enable these workflows to run in forks inside the LLVM organization such as +to enable these workflows to run in forks inside the LLVM organization, such as the ClangIR fork. There are some exceptions to this rule where ``github.repository`` might be @@ -46,7 +46,7 @@ release tasks, which should not run anywhere else. Hash Pinning Dependencies ------------------------- -Github Actions allows the use of actions from other repositories as steps in +GitHub Actions allows the use of actions from other repositories as steps in jobs. We take advantage of various actions for a variety of different tasks, but especially tasks like checking out the repository, and downloading/uploading build caches. These actions are typically versioned with @@ -59,9 +59,9 @@ just a release, which looks like the following: uses: actions/checkout@v4 However, it is best practice to specify an exact commit SHA from which to pull -the action from, noting the version in a comment: +the action, noting the version in a comment: -We plan on revisiting this recommendation once Github's immutable actions have +We plan on revisiting this recommendation once GitHub's immutable actions have been rolled out as GA. .. code-block:: yaml @@ -72,11 +72,11 @@ been rolled out as GA. This is beneficial for two reasons: reliability and security. Specifying an exact SHA rather than just a major version ensures we end up running the same -action originally specified when the workflow as authored and/or updated, +action originally specified when the workflow was authored and/or updated, and that no breaking changes sneak in from new versions of a workflow being released. However, this effect could also be achieved by specifying an exact dot release. The biggest reason to prefer hash pinned dependencies is security. -Release assets on Github are mutable, allowing an attacker to change the code +Release assets on GitHub are mutable, allowing an attacker to change the code within a specific version of an action after the fact, potentially stealing sensitive tokens and credentials. Hash pinning the dependencies prevents this as the hash would change with the code. @@ -84,10 +84,10 @@ as the hash would change with the code. Using Versioned Runner Images ----------------------------- -Github actions allows the use of either specifically versioned runner images +GitHub actions allows the use of either specifically versioned runner images (e.g., ``ubuntu-22.04``), or just the latest runner image (e.g., ``ubuntu-latest``). It is best practice to use explicitly versioned -runner images. This prevents breakages when Github rolls the latest runner +runner images. This prevents breakages when GitHub rolls the latest runner image to a new version with potentially breaking changes, instead allowing us to explicitly opt-in to using the new image when we have done sufficient testing to ensure that our existing workflows work as expected in the new @@ -112,7 +112,7 @@ the principle of least privilege. Ensuring Workflows Run on the Correct Events -------------------------------------------- -Github allows workflows to run on a multitude of events and it is important to +GitHub allows workflows to run on a multitude of events, and it is important to configure a workflow such that it triggers on the correct events. There are two main best practices around events that trigger workflows: diff --git a/llvm/docs/CodingStandards.rst b/llvm/docs/CodingStandards.rst index dd275f292967b..65dd794103ac3 100644 --- a/llvm/docs/CodingStandards.rst +++ b/llvm/docs/CodingStandards.rst @@ -1790,6 +1790,12 @@ would help to avoid running into a "dangling else" situation. markAsIgnored(D); } +Use Unix line endings for files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use Unix line endings for all files. CRLF line endings are allowed as an +exception for test files that intend to test CRLF handling or when the file +format requires it (like ``.bat`` or ``.rc`` files). See Also ======== diff --git a/llvm/docs/CommandGuide/lit.rst b/llvm/docs/CommandGuide/lit.rst index 359e0c3e81d0e..6a721ebf9cad0 100644 --- a/llvm/docs/CommandGuide/lit.rst +++ b/llvm/docs/CommandGuide/lit.rst @@ -630,13 +630,11 @@ TestRunner.py: %{fs-sep} file system path separator %t temporary file name unique to the test %basename_t The last path component of %t but without the ``.tmp`` extension (deprecated, use ``%{t:stem}`` instead) - %T parent directory of %t (not unique, deprecated, do not use) %% % %/s %s but ``\`` is replaced by ``/`` %/S %S but ``\`` is replaced by ``/`` %/p %p but ``\`` is replaced by ``/`` %/t %t but ``\`` is replaced by ``/`` - %/T %T but ``\`` is replaced by ``/`` %{s:basename} The last path component of %s %{t:stem} The last path component of %t but without the ``.tmp`` extension (alias for %basename_t) %{s:real} %s after expanding all symbolic links and substitute drives @@ -648,12 +646,10 @@ TestRunner.py: %{/S:real} %/S after expanding all symbolic links and substitute drives %{/p:real} %/p after expanding all symbolic links and substitute drives %{/t:real} %/t after expanding all symbolic links and substitute drives - %{/T:real} %/T after expanding all symbolic links and substitute drives %{/s:regex_replacement} %/s but escaped for use in the replacement of a ``s@@@`` command in sed %{/S:regex_replacement} %/S but escaped for use in the replacement of a ``s@@@`` command in sed %{/p:regex_replacement} %/p but escaped for use in the replacement of a ``s@@@`` command in sed %{/t:regex_replacement} %/t but escaped for use in the replacement of a ``s@@@`` command in sed - %{/T:regex_replacement} %/T but escaped for use in the replacement of a ``s@@@`` command in sed %:s On Windows, %/s but a ``:`` is removed if its the second character. Otherwise, %s but with a single leading ``/`` removed. %:S On Windows, %/S but a ``:`` is removed if its the second character. @@ -662,8 +658,6 @@ TestRunner.py: Otherwise, %p but with a single leading ``/`` removed. %:t On Windows, %/t but a ``:`` is removed if its the second character. Otherwise, %t but with a single leading ``/`` removed. - %:T On Windows, %/T but a ``:`` is removed if its the second character. - Otherwise, %T but with a single leading ``/`` removed. %{readfile:} Reads the file specified. ======================= ============== diff --git a/llvm/docs/CommandGuide/llvm-readelf.rst b/llvm/docs/CommandGuide/llvm-readelf.rst index 284c3aa470a6f..5403fea60d5ee 100644 --- a/llvm/docs/CommandGuide/llvm-readelf.rst +++ b/llvm/docs/CommandGuide/llvm-readelf.rst @@ -143,6 +143,10 @@ OPTIONS Display all notes. +.. option:: --offloading + + Display list of HIP offload bundles. + .. option:: --pretty-print When used with :option:`--elf-output-style`, JSON output will be formatted in diff --git a/llvm/docs/CommandGuide/llvm-readobj.rst b/llvm/docs/CommandGuide/llvm-readobj.rst index 8bd29eafbbfcf..0d05b947a6b3e 100644 --- a/llvm/docs/CommandGuide/llvm-readobj.rst +++ b/llvm/docs/CommandGuide/llvm-readobj.rst @@ -104,6 +104,10 @@ file formats. Do not demangle symbol names in the output. This option is only for ELF and XCOFF file formats. The option is enabled by default. +.. option:: --offloading + + Display list of HIP offload bundles. + .. option:: --relocations, --relocs, -r Display the relocation entries in the file. diff --git a/llvm/docs/CommandGuide/llvm-size.rst b/llvm/docs/CommandGuide/llvm-size.rst index f244769545b31..12e7c58c5776d 100644 --- a/llvm/docs/CommandGuide/llvm-size.rst +++ b/llvm/docs/CommandGuide/llvm-size.rst @@ -41,6 +41,13 @@ OPTIONS as a separate section entry for ``sysv`` output. If not specified, these symbols are ignored. +.. option:: --exclude-pagezero + + Do not include the ``__PAGEZERO`` segment when calculating size information + for Mach-O files. The ``__PAGEZERO`` segment is a virtual memory region used + for memory protection that does not contribute to actual size, and excluding + can provide a better representation of actual size. + .. option:: -d Equivalent to :option:`--radix` with a value of ``10``. diff --git a/llvm/docs/Coroutines.rst b/llvm/docs/Coroutines.rst index dde73c9c3cc23..13d2da42eaca7 100644 --- a/llvm/docs/Coroutines.rst +++ b/llvm/docs/Coroutines.rst @@ -303,7 +303,7 @@ The LLVM IR for this coroutine looks like this: call void @free(ptr %mem) br label %suspend suspend: - %unused = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -637,7 +637,7 @@ store the current value produced by a coroutine. call void @free(ptr %mem) br label %suspend suspend: - %unused = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -806,7 +806,7 @@ The LLVM IR for a coroutine using a Coroutine with a custom ABI looks like: call void @free(ptr %mem) br label %suspend suspend: - %unused = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -1444,7 +1444,7 @@ A frontend should emit function attribute `presplitcoroutine` for the coroutine. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :: - declare i1 @llvm.coro.end(ptr , i1 , token ) + declare void @llvm.coro.end(ptr , i1 , token ) Overview: """"""""" @@ -1502,8 +1502,9 @@ For landingpad based exception model, it is expected that frontend uses the .. code-block:: llvm ehcleanup: - %InResumePart = call i1 @llvm.coro.end(ptr null, i1 true, token none) - br i1 %InResumePart, label %eh.resume, label %cleanup.cont + call void @llvm.coro.end(ptr null, i1 true, token none) + %InRamp = call i1 @llvm.coro.is_in_ramp() + br i1 %InRamp, label %cleanup.cont, label %eh.resume cleanup.cont: ; rest of the cleanup @@ -1515,10 +1516,10 @@ For landingpad based exception model, it is expected that frontend uses the %lpad.val29 = insertvalue { ptr, i32 } %lpad.val, i32 %sel, 1 resume { ptr, i32 } %lpad.val29 -The `CoroSpit` pass replaces `coro.end` with ``True`` in the resume functions, -thus leading to immediate unwind to the caller, whereas in start function it -is replaced with ``False``, thus allowing to proceed to the rest of the cleanup -code that is only needed during initial invocation of the coroutine. +The `CoroSpit` pass replaces `coro.is_in_ramp` with ``True`` in the ramp functions, +thus allowing to proceed to the rest of the cleanup code that is only needed during +initial invocation of the coroutine. Otherwise, it is replaced with ``False``, +thus leading to immediate unwind to the caller. For Windows Exception handling model, a frontend should attach a funclet bundle referring to an enclosing cleanuppad as follows: @@ -1527,7 +1528,7 @@ referring to an enclosing cleanuppad as follows: ehcleanup: %tok = cleanuppad within none [] - %unused = call i1 @llvm.coro.end(ptr null, i1 true, token none) [ "funclet"(token %tok) ] + call void @llvm.coro.end(ptr null, i1 true, token none) [ "funclet"(token %tok) ] cleanupret from %tok unwind label %RestOfTheCleanup The `CoroSplit` pass, if the funclet bundle is present, will insert @@ -1592,7 +1593,7 @@ The number of arguments must match the return type of the continuation function: cleanup: %tok = call token (...) @llvm.coro.end.results(i8 %val) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token %tok) + call void @llvm.coro.end(ptr %hdl, i1 0, token %tok) unreachable ... @@ -1604,7 +1605,7 @@ The number of arguments must match the return type of the continuation function: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :: - declare i1 @llvm.coro.end.async(ptr , i1 , ...) + declare void @llvm.coro.end.async(ptr , i1 , ...) Overview: """"""""" @@ -1635,10 +1636,10 @@ the function call. .. code-block:: llvm - call i1 (ptr, i1, ...) @llvm.coro.end.async( - ptr %hdl, i1 0, - ptr @must_tail_call_return, - ptr %ctxt, ptr %task, ptr %actor) + call void (ptr, i1, ...) @llvm.coro.end.async( + ptr %hdl, i1 0, + ptr @must_tail_call_return, + ptr %ctxt, ptr %task, ptr %actor) unreachable .. _coro.suspend: @@ -2117,6 +2118,30 @@ Example: %hdl.result = ... ; get address of returned coroutine handle ret ptr %hdl.result +'llvm.coro.is_in_ramp' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + declare i1 @llvm.coro.is_in_ramp() + +Overview: +""""""""" + +The '``llvm.coro.is_in_ramp``' intrinsic returns a bool value that marks coroutine ramp +function and resume/destroy function. + +Arguments: +"""""""""" + +None + +Semantics: +"""""""""" + +The `CoroSpit` pass replaces `coro.is_in_ramp` with ``True`` ramp functions. +Otherwise, it is replaced with ``False``, allowing the frontend to separate +ramp function and resume/destroy function. + Coroutine Transformation Passes =============================== CoroEarly diff --git a/llvm/docs/DirectXUsage.rst b/llvm/docs/DirectXUsage.rst index 1d964e6d54dae..78f27d89c1f8a 100644 --- a/llvm/docs/DirectXUsage.rst +++ b/llvm/docs/DirectXUsage.rst @@ -29,7 +29,7 @@ Initially the backend is aimed at supporting DirectX 12, and support for DirectX 11 is planned at a later date. The DirectX backend is currently experimental and is not shipped with any -release builds of LLVM tools. To enable building the DirectX backend locally add +release builds of LLVM tools. To build the DirectX backend locally, add ``DirectX`` to the ``LLVM_EXPERIMENTAL_TARGETS_TO_BUILD`` CMake option. For more information on building LLVM see the :doc:`CMake` documentation. @@ -38,7 +38,7 @@ information on building LLVM see the :doc:`CMake` documentation. Target Triples ============== -At present the DirectX target only supports the ``dxil`` architecture, which +At present, the DirectX target only supports the ``dxil`` architecture, which generates code for the `DirectX Intermediate Language. `_ @@ -46,8 +46,8 @@ In addition to target architecture, the DirectX backend also needs to know the target runtime version and pipeline stage. These are expressed using the OS and Environment triple component. -Presently the DirectX backend requires targeting the ``shadermodel`` OS, and -supports versions 6.0+ (at time of writing the latest announced version is 6.7). +Presently, the DirectX backend requires targeting the ``shadermodel`` OS, and +supports versions 6.0+ (as of writing, the latest announced version is 6.7). .. table:: DirectX Environments diff --git a/llvm/docs/Docker.rst b/llvm/docs/Docker.rst index 5d976eddb3130..5f8e619d8b5eb 100644 --- a/llvm/docs/Docker.rst +++ b/llvm/docs/Docker.rst @@ -27,8 +27,8 @@ to get a very basic explanation of it. `Docker `_ is a popular solution for running programs in an isolated and reproducible environment, especially to maintain releases for software deployed to large distributed fleets. -It uses linux kernel namespaces and cgroups to provide a lightweight isolation -inside currently running linux kernel. +It uses Linux kernel namespaces and cgroups to provide a lightweight isolation +inside currently running Linux kernel. A single active instance of dockerized environment is called a *docker container*. A snapshot of a docker container filesystem is called a *docker image*. @@ -127,17 +127,17 @@ Which image should I choose? We currently provide two images: Debian12-based and nvidia-cuda-based. They differ in the base image that they use, i.e. they have a different set of preinstalled binaries. Debian8 is very minimal, nvidia-cuda is larger, but has -preinstalled CUDA libraries and allows to access a GPU, installed on your +preinstalled CUDA libraries and allows access to a GPU, installed on your machine. -If you need a minimal linux distribution with only clang and libstdc++ included, +If you need a minimal Linux distribution with only clang and libstdc++ included, you should try Debian12-based image. If you want to use CUDA libraries and have access to a GPU on your machine, you should choose nvidia-cuda-based image and use `nvidia-docker `_ to run your docker containers. Note that you don't need nvidia-docker to build the images, but you need it in order -to have an access to GPU from a docker container that is running the built +to have access to a GPU from a docker container that is running the built image. If you have a different use-case, you could create your own image based on @@ -176,4 +176,4 @@ The first image is only used during build and does not have a descriptive name, i.e. it is only accessible via the hash value after the build is finished. The second image is our resulting image. It contains only the built binaries and not any build dependencies. It is also accessible via a descriptive name -(specified by -d and -t flags). +(specified by ``-d`` and ``-t`` flags). diff --git a/llvm/docs/ExtendingLLVM.rst b/llvm/docs/ExtendingLLVM.rst index 50f0af3fafc4c..019fdf5fc3278 100644 --- a/llvm/docs/ExtendingLLVM.rst +++ b/llvm/docs/ExtendingLLVM.rst @@ -13,7 +13,7 @@ function, or a whole new instruction. When you come to this realization, stop and think. Do you really need to extend LLVM? Is it a new fundamental capability that LLVM does not support at its -current incarnation or can it be synthesized from already pre-existing LLVM +current incarnation or can it be synthesized from existing LLVM elements? If you are not sure, ask on the `LLVM forums `_. The reason is that extending LLVM will get involved as you need to update all the different passes @@ -27,7 +27,7 @@ method of choice for LLVM extension. Before you invest a significant amount of effort into a non-trivial extension, **ask on the list** if what you are looking to do can be done with -already-existing infrastructure, or if maybe someone else is already working on +existing infrastructure, or if maybe someone else is already working on it. You will save yourself a lot of time and effort by doing so. .. _intrinsic function: @@ -57,12 +57,12 @@ function and then be turned into an instruction if warranted. #. ``llvm/lib/Analysis/ConstantFolding.cpp``: - If it is possible to constant fold your intrinsic, add support to it in the + If it is possible to constant fold your intrinsic, add support for it in the ``canConstantFoldCallTo`` and ``ConstantFoldCall`` functions. #. ``llvm/test/*``: - Add test cases for your test cases to the test suite + Add test cases for your intrinsic to the test suite Once the intrinsic has been added to the system, you must add code generator support for it. Generally you must do the following steps: @@ -72,7 +72,7 @@ Add support to the .td file for the target(s) of your choice in This is usually a matter of adding a pattern to the .td file that matches the intrinsic, though it may obviously require adding the instructions you want to - generate as well. There are lots of examples in the PowerPC and X86 backend + generate as well. There are lots of examples in the PowerPC and X86 backends to follow. Adding a new SelectionDAG node @@ -194,7 +194,7 @@ Adding a new instruction #. ``llvm/lib/AsmParser/LLLexer.cpp``: - add a new token to parse your instruction from assembly text file + add a new token to parse your instruction from an assembly text file #. ``llvm/lib/AsmParser/LLParser.cpp``: @@ -207,7 +207,7 @@ Adding a new instruction #. ``llvm/lib/Bitcode/Writer/BitcodeWriter.cpp``: - add a case for your instruction and how it will be parsed from bitcode + add a case for your instruction and how it will be written to bitcode #. ``llvm/lib/IR/Instruction.cpp``: @@ -236,7 +236,7 @@ Adding a new type .. warning:: Adding new types changes the bitcode format, and will break compatibility with - currently-existing LLVM installations. Only add new types if it is absolutely + existing LLVM installations. Only add new types if it is absolutely necessary. Adding a fundamental type @@ -284,17 +284,17 @@ Adding a derived type #. ``llvm/include/llvm/IR/DerivedTypes.h``: - add new class to represent new class in the hierarchy; add forward + add a new class to represent the new class in the hierarchy; add forward declaration to the TypeMap value type #. ``llvm/lib/IR/Type.cpp`` and ``llvm/lib/CodeGen/ValueTypes.cpp``: - add support for derived type, notably `enum TypeID` and `is`, `get` methods. + add support for derived type, notably ``enum TypeID`` and ``is``, ``get`` methods. #. ``llvm/include/llvm-c/Core.h`` and ``llvm/lib/IR/Core.cpp``: add enum ``LLVMTypeKind`` and modify - `LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty)` for the new type + ``LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty)`` for the new type #. ``llvm/lib/AsmParser/LLLexer.cpp``: diff --git a/llvm/docs/FuzzingLLVM.rst b/llvm/docs/FuzzingLLVM.rst index a0355d7014c8d..76eb4288a1f2c 100644 --- a/llvm/docs/FuzzingLLVM.rst +++ b/llvm/docs/FuzzingLLVM.rst @@ -33,7 +33,7 @@ clang-proto-fuzzer A |protobuf fuzzer| that compiles valid C++ programs generated from a protobuf class that describes a subset of the C++ language. -This fuzzer accepts clang command line options after `ignore_remaining_args=1`. +This fuzzer accepts clang command-line options after `ignore_remaining_args=1`. For example, the following command will fuzz clang with a higher optimization level: @@ -106,7 +106,7 @@ llvm-opt-fuzzer A |LLVM IR fuzzer| aimed at finding bugs in optimization passes. -It receives optimization pipeline and runs it for each fuzzer input. +It receives an optimization pipeline and runs it for each fuzzer input. Interface of this fuzzer almost directly mirrors ``llvm-isel-fuzzer``. Both ``mtriple`` and ``passes`` arguments are required. Passes are specified in a @@ -117,7 +117,7 @@ this format in the doxygen for ``PassBuilder::parsePassPipeline``. % bin/llvm-opt-fuzzer -ignore_remaining_args=1 -mtriple x86_64 -passes instcombine -Similarly to the ``llvm-isel-fuzzer`` arguments in some predefined configurations +Similarly to the ``llvm-isel-fuzzer``, arguments in some predefined configurations might be embedded directly into the binary file name: .. code-block:: shell @@ -176,7 +176,7 @@ mutations that a fuzzer in LLVM might want. Generic Random Fuzzing ---------------------- -The most basic form of input mutation is to use the built in mutators of +The most basic form of input mutation is to use the built-in mutators of LibFuzzer. These simply treat the input corpus as a bag of bits and make random mutations. This type of fuzzer is good for stressing the surface layers of a program, and is good at testing things like lexers, parsers, or binary @@ -244,7 +244,7 @@ by adding the following two flags to your CMake invocation: to avoid building the sanitizers themselves with sanitizers enabled. .. note:: You may run into issues if you build with BFD ld, which is the - default linker on many unix systems. These issues are being tracked + default linker on many Unix systems. These issues are being tracked in https://llvm.org/PR34636. Continuously Running and Finding Bugs @@ -280,6 +280,6 @@ your fuzzer can be built and tested when not built against libFuzzer. There is also some handling of the CMake config for fuzzers, where you should use the ``add_llvm_fuzzer`` to set up fuzzer targets. This function works -similarly to functions such as ``add_llvm_tool``, but they take care of linking +similarly to functions such as ``add_llvm_tool``, but it takes care of linking to LibFuzzer when appropriate and can be passed the ``DUMMY_MAIN`` argument to enable standalone testing. diff --git a/llvm/docs/GetElementPtr.rst b/llvm/docs/GetElementPtr.rst index 25a24c6403091..6831a8e6e81eb 100644 --- a/llvm/docs/GetElementPtr.rst +++ b/llvm/docs/GetElementPtr.rst @@ -10,8 +10,8 @@ Introduction This document seeks to dispel the mystery and confusion surrounding LLVM's `GetElementPtr `_ (GEP) instruction. -Questions about the wily GEP instruction are probably the most frequently -occurring questions once a developer gets down to coding with LLVM. Here we lay +Questions about the wily GEP instruction are probably the most frequent +questions once a developer gets down to coding with LLVM. Here we lay out the sources of confusion and show that the GEP instruction is really quite simple. @@ -30,8 +30,8 @@ What is the first index of the GEP instruction? Quick answer: The index stepping through the second operand. The confusion with the first index usually arises from thinking about the -GetElementPtr instruction as if it was a C index operator. They aren't the -same. For example, when we write, in "C": +GetElementPtr instruction as if it were a C index operator. They aren't the +same. For example, when we write, in C: .. code-block:: c++ @@ -62,7 +62,7 @@ The answer is simply because memory does not have to be accessed to perform the computation. The second operand to the GEP instruction must be a value of a pointer type. The value of the pointer is provided directly to the GEP instruction as an operand without any need for accessing memory. It must, -therefore be indexed and requires an index operand. Consider this example: +therefore, be indexed and requires an index operand. Consider this example: .. code-block:: c++ @@ -285,7 +285,7 @@ I'm writing a backend for a target which needs custom lowering for GEP. How do I ----------------------------------------------------------------------------------------- You don't. The integer computation implied by a GEP is target-independent. -Typically what you'll need to do is make your backend pattern-match expressions +Typically what you'll need to do is make your backend pattern-match expression trees involving ADD, MUL, etc., which are what GEP is lowered into. This has the advantage of letting your code work correctly in more cases. @@ -377,7 +377,7 @@ the underlying object. Furthermore, loads and stores don't have to use the same types as the type of the underlying object. Types in this context serve only to specify memory size -and alignment. Beyond that there are merely a hint to the optimizer indicating +and alignment. Beyond that they are merely a hint to the optimizer indicating how the value will likely be used. Can I cast an object's address to integer and add it to null? @@ -506,7 +506,7 @@ sufficient to preserve the pointer aliasing guarantees that GEP provides. Summary ======= -In summary, here's some things to always remember about the GetElementPtr +In summary, here are some things to always remember about the GetElementPtr instruction: diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst index d87a8bd81cc7b..4b4b09ad87aba 100644 --- a/llvm/docs/GettingInvolved.rst +++ b/llvm/docs/GettingInvolved.rst @@ -42,7 +42,7 @@ LLVM welcomes contributions of all kinds. To get started, please review the foll in the LLVM system. :doc:`BugLifeCycle` - Describes how bugs are reported, triaged and closed. + Describes how bugs are reported, triaged, and closed. :doc:`CodingStandards` Details the LLVM coding standards and provides useful information on writing @@ -108,7 +108,7 @@ The :doc:`CodeOfConduct` applies to all these forums and mailing lists. `Commits Archive (llvm-commits)`__ This list contains all commit messages that are made when LLVM developers commit code changes to the repository. It also serves as a forum for - patch review (i.e. send patches here). It is useful for those who want to + patch review (i.e., send patches here). It is useful for those who want to stay on the bleeding edge of LLVM development. This list is very high volume. @@ -121,7 +121,7 @@ The :doc:`CodeOfConduct` applies to all these forums and mailing lists. .. __: http://lists.llvm.org/pipermail/llvm-bugs/ `LLVM Announcements`__ - If you just want project wide announcements such as releases, developers meetings, or blog posts, then you should check out the Announcement category on LLVM Discourse. + If you just want project-wide announcements such as releases, developers meetings, or blog posts, then you should check out the Announcement category on LLVM Discourse. .. __: https://discourse.llvm.org/c/announce/46 @@ -208,11 +208,21 @@ what to add to your calendar invite. - 3rd Tuesday of the month - `ics `__ - `Meeting details/agenda: `__ - * - LLVM Qualification Working Group + * - `LLVM Qualification Working Group `__ - 1st Tuesday/Wednesday of the month - `ics `__ `gcal `__ - `Minutes/docs `__ + * - MLIR C/C++ Frontend Working Group + - Monthly, usually 1st Monday of the month + - `ics `__ + `gcal `__ + - `Minutes/docs `__ + * - ClangIR Upstreaming Coordination Meeting + - Every 2 weeks on Mondays + - `ics `__ + `gcal `__ + - For event owners, our Discord bot also supports sending automated announcements @@ -473,7 +483,7 @@ join one in your city. Or start a new one if there is none: Community wide proposals ------------------------ -Proposals for massive changes in how the community behaves and how the work flow +Proposals for large-scale changes in how the community behaves and how the work flow can be better. .. toctree:: @@ -518,7 +528,7 @@ also be seen inline below: Note that the web view of the LLVM community calendar shows events in Coordinated Universal Time (UTC). If you use Google Calendar, consider subscribing to it with the + button in the bottom-right corner to view all -events in your local timezone alongside your other calendars. +events in your local time zone alongside your other calendars. .. _llvm-community-calendar-host-guidance: @@ -554,9 +564,9 @@ An example invite looks as follows This event is a meetup for all developers of LLDB. Meeting agendas are posted on discourse before the event. - Attendees are required to adhere to the LLVM Code of Conduct + Attendees must adhere to the LLVM Code of Conduct (https://llvm.org/docs/CodeOfConduct.html). For any Code of Conduct reports, - please contact the organizers, and also email conduct@llvm.org. + please contact the organizers and also email conduct@llvm.org. Agenda/Meeting Minutes: Link to minutes diff --git a/llvm/docs/GlobalISel/InstructionSelect.rst b/llvm/docs/GlobalISel/InstructionSelect.rst index 9798ae7a596ca..5513824cf190c 100644 --- a/llvm/docs/GlobalISel/InstructionSelect.rst +++ b/llvm/docs/GlobalISel/InstructionSelect.rst @@ -5,8 +5,22 @@ InstructionSelect ----------------- This pass transforms generic machine instructions into equivalent -target-specific instructions. It traverses the ``MachineFunction`` bottom-up, -selecting uses before definitions, enabling trivial dead code elimination. +target-specific instructions. + +The legacy instruction selector, SelectionDAG, iterated over each function's +basic block and constructed a dataflow graph. Every backend defines +tree patterns in the ``XXXInstrInfo.td``. The legacy selector started +at the bottom and replaced the SDNodes greedily. + +The GlobalISel's instruction selector traverses the ``MachineFunction`` +bottom-up, selecting uses before definitions, enabling trivial dead code +elimination. It does that by iterating over the basic blocks in post-order. +Each gMIR instruction is then replaced by a MIR instruction when a matching +pattern is found. So, when there is a 1:1 mapping between gMIR and MIR, where +is the benefit of the global scope? Even in the case of a 1:1 mapping, +GlobalISel includes a combiner that can match and fuse multiple gMIR +instructions. The scope of the combination is not limited to a basic block, +but can extend across the entire function. .. _api-instructionselector: diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index e6713c827d6ab..22b58bf0f5735 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -660,19 +660,60 @@ Non-Integral Pointer Type Note: non-integral pointer types are a work in progress, and they should be considered experimental at this time. -LLVM IR optionally allows the frontend to denote pointers in certain address -spaces as "non-integral" via the :ref:`datalayout string`. -Non-integral pointer types represent pointers that have an *unspecified* bitwise -representation; that is, the integral representation may be target dependent or -unstable (not backed by a fixed integer). +For most targets, the pointer representation is a direct mapping from the +bitwise representation to the address of the underlying memory location. +Such pointers are considered "integral", and any pointers where the +representation is not just an integer address are called "non-integral". + +Non-integral pointers have at least one of the following three properties: + +* the pointer representation contains non-address bits +* the pointer representation is unstable (may changed at any time in a + target-specific way) +* the pointer representation has external state + +These properties (or combinations thereof) can be applied to pointers via the +:ref:`datalayout string`. + +The exact implications of these properties are target-specific. The following +subsections describe the IR semantics and restrictions to optimization passes +for each of these properties. + +Pointers with non-address bits +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pointers in this address space have a bitwise representation that not only +has address bits, but also some other target-specific metadata. +In most cases pointers with non-address bits behave exactly the same as +integral pointers, the only difference is that it is not possible to create a +pointer just from an address unless all the non-address bits are also recreated +correctly in a target-specific way. + +An example of pointers with non-address bits are the AMDGPU buffer descriptors +which are 160 bits: a 128-bit fat pointer and a 32-bit offset. +Similarly, CHERI capabilities contain a 32 or 64 bit address as well as the +same number of metadata bits, but unlike the AMDGPU buffer descriptors they have +external state in addition to non-address bits. + + +Unstable pointer representation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pointers in this address space have an *unspecified* bitwise representation +(i.e. not backed by a fixed integer). The bitwise pattern of such pointers is +allowed to change in a target-specific way. For example, this could be a pointer +type used with copying garbage collection where the garbage collector could +update the pointer at any time in the collection sweep. ``inttoptr`` and ``ptrtoint`` instructions have the same semantics as for integral (i.e., normal) pointers in that they convert integers to and from -corresponding pointer types, but there are additional implications to be -aware of. Because the bit-representation of a non-integral pointer may -not be stable, two identical casts of the same operand may or may not +corresponding pointer types, but there are additional implications to be aware +of. + +For "unstable" pointer representations, the bit-representation of the pointer +may not be stable, so two identical casts of the same operand may or may not return the same value. Said differently, the conversion to or from the -non-integral type depends on environmental state in an implementation +"unstable" pointer type depends on environmental state in an implementation defined manner. If the frontend wishes to observe a *particular* value following a cast, the @@ -681,21 +722,72 @@ defined manner. (In practice, this tends to require ``noinline`` routines for such operations.) From the perspective of the optimizer, ``inttoptr`` and ``ptrtoint`` for -non-integral types are analogous to ones on integral types with one +"unstable" pointer types are analogous to ones on integral types with one key exception: the optimizer may not, in general, insert new dynamic occurrences of such casts. If a new cast is inserted, the optimizer would need to either ensure that a) all possible values are valid, or b) appropriate fencing is inserted. Since the appropriate fencing is implementation defined, the optimizer can't do the latter. The former is challenging as many commonly expected properties, such as -``ptrtoint(v)-ptrtoint(v) == 0``, don't hold for non-integral types. +``ptrtoint(v)-ptrtoint(v) == 0``, don't hold for "unstable" pointer types. Similar restrictions apply to intrinsics that might examine the pointer bits, such as :ref:`llvm.ptrmask`. -The alignment information provided by the frontend for a non-integral pointer +The alignment information provided by the frontend for an "unstable" pointer (typically using attributes or metadata) must be valid for every possible representation of the pointer. +Pointers with external state +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A further special case of non-integral pointers is ones that include external +state (such as bounds information or a type tag) with a target-defined size. +An example of such a type is a CHERI capability, where there is an additional +validity bit that is part of all pointer-typed registers, but is located in +memory at an implementation-defined address separate from the pointer itself. +Another example would be a fat-pointer scheme where pointers remain plain +integers, but the associated bounds are stored in an out-of-band table. + +Unless also marked as "unstable", the bit-wise representation of pointers with +external state is stable and ``ptrtoint(x)`` always yields a deterministic +value. This means transformation passes are still permitted to insert new +``ptrtoint`` instructions. + +The following restrictions apply to IR level optimization passes: + +The ``inttoptr`` instruction does not recreate the external state and therefore +it is target dependent whether it can be used to create a dereferenceable +pointer. In general passes should assume that the result of such an inttoptr +is not dereferenceable. For example, on CHERI targets an ``inttoptr`` will +yield a capability with the external state (the validity tag bit) set to zero, +which will cause any dereference to trap. +The ``ptrtoint`` instruction also only returns the "in-band" state and omits +all external state. + +When a ``store ptr addrspace(N) %p, ptr @dst`` of such a non-integral pointer +is performed, the external metadata is also stored to an implementation-defined +location. Similarly, a ``%val = load ptr addrspace(N), ptr @dst`` will fetch the +external metadata and make it available for all uses of ``%val``. +Similarly, the ``llvm.memcpy`` and ``llvm.memmove`` intrinsics also transfer the +external state. This is essential to allow frontends to efficiently emit copies +of structures containing such pointers, since expanding all these copies as +individual loads and stores would affect compilation speed and inhibit +optimizations. + +Notionally, these external bits are part of the pointer, but since +``inttoptr`` / ``ptrtoint``` only operate on the "in-band" bits of the pointer +and the external bits are not explicitly exposed, they are not included in the +size specified in the :ref:`datalayout string`. + +When a pointer type has external state, all roundtrips via memory must +be performed as loads and stores of the correct type since stores of other +types may not propagate the external data. +Therefore it is not legal to convert an existing load/store (or a +``llvm.memcpy`` / ``llvm.memmove`` intrinsic) of pointer types with external +state to a load/store of an integer type with same bitwidth, as that may drop +the external state. + + .. _globalvars: Global Variables @@ -1397,6 +1489,8 @@ Currently, only the following parameter attributes are defined: function, returning a pointer to allocated storage disjoint from the storage for any other object accessible to the caller. +.. _captures_attr: + ``captures(...)`` This attribute restricts the ways in which the callee may capture the pointer. This is not a valid attribute for return values. This attribute @@ -2921,6 +3015,8 @@ assumptions, such as that a :ref:`parameter attribute ` or a location. Operand bundles enable assumptions that are either hard or impossible to represent as a boolean argument of an :ref:`llvm.assume `. +Assumes with operand bundles must have ``i1 true`` as the condition operand. + An assume operand bundle has the form: :: @@ -2953,7 +3049,7 @@ allows the optimizer to assume that at location of call to .. code-block:: llvm - call void @llvm.assume(i1 %cond) ["cold"(), "nonnull"(ptr %val)] + call void @llvm.assume(i1 true) ["cold"(), "nonnull"(ptr %val)] allows the optimizer to assume that the :ref:`llvm.assume ` call location is cold and that ``%val`` may not be null. @@ -3179,8 +3275,8 @@ as follows: ``A
`` Specifies the address space of objects created by '``alloca``'. Defaults to the default address space of 0. -``p[n]::[:[:]]`` - This specifies the properties of a pointer in address space ``n``. +``p[][]::[:[:]]`` + This specifies the properties of a pointer in address space ``as``. The ```` parameter specifies the size of the bitwise representation. For :ref:`non-integral pointers ` the representation size may be larger than the address width of the underlying address space (e.g. to @@ -3193,9 +3289,13 @@ as follows: default index size is equal to the pointer size. The index size also specifies the width of addresses in this address space. All sizes are in bits. - The address space, ``n``, is optional, and if not specified, - denotes the default address space 0. The value of ``n`` must be - in the range [1,2^24). + The address space, ````, is optional, and if not specified, denotes the + default address space 0. The value of ```` must be in the range [1,2^24). + The optional ```` are used to specify properties of pointers in this + address space: the character ``u`` marks pointers as having an unstable + representation, and ``e`` marks pointers having external state. See + :ref:`Non-Integral Pointer Types `. + ``i:[:]`` This specifies the alignment for an integer type of a given bit ````. The value of ```` must be in the range [1,2^24). @@ -3248,9 +3348,11 @@ as follows: this set are considered to support most general arithmetic operations efficiently. ``ni:
:
:
...`` - This specifies pointer types with the specified address spaces - as :ref:`Non-Integral Pointer Type ` s. The ``0`` - address space cannot be specified as non-integral. + This marks pointer types with the specified address spaces + as :ref:`unstable `. + The ``0`` address space cannot be specified as non-integral. + It is only supported for backwards compatibility, the flags of the ``p`` + specifier should be used instead for new code. ```` is a lower bound on what is required for a type to be considered aligned. This is used in various places, such as: @@ -7443,6 +7545,33 @@ The number of bytes known to be dereferenceable is specified by the integer value in the metadata node. This is analogous to the ''dereferenceable_or_null'' attribute on parameters and return values. +'``captures``' Metadata +^^^^^^^^^^^^^^^^^^^^^^^ + +The ``!captures`` metadata can only be applied to ``store`` instructions with +a pointer-typed value operand. It restricts the capturing behavior of the store +value operand in the same way the ``captures(...)`` attribute would do on a +call. See the :ref:`pointer capture section ` for a detailed +discussion of capture semantics. + +The ``!captures`` metadata accepts a non-empty list of strings from the same +set as the :ref:`captures attribute `: +``!"address"``, ``!"address_is_null"``, ``!"provenance"`` and +``!"read_provenance"``. ``!"none"`` is not supported. + +For example ``store ptr %x, ptr %y, !captures !{!"address"}`` indicates that +the copy of pointer ``%x`` stored to location ``%y`` will only be used to +inspect its integral address value, and not dereferenced. Dereferencing the +pointer would result in undefined behavior. + +Similarly ``store ptr %x, ptr %y, !captures !{!"address", !"read_provenance"}`` +indicates that while reads through the stored pointer are allowed, writes would +result in undefined behavior. + +The ``!captures`` attribute makes no statement about other uses of ``%x``, or +uses of the stored-to memory location after it has been overwritten with a +different value. + .. _llvm.loop: '``llvm.loop``' @@ -8764,6 +8893,28 @@ For example, the following metadata section contains two library specifiers:: Each library specifier will be handled independently by the consuming linker. The effect of the library specifiers are defined by the consuming linker. +'``llvm.errno.tbaa``' Named Metadata +==================================== + +The module-level ``!llvm.errno.tbaa`` metadata specifies the TBAA nodes used +for accessing ``errno``. These nodes are guaranteed to represent int-compatible +accesses according to C/C++ strict aliasing rules. This should let LLVM alias +analyses to reason about aliasing with ``errno`` when calling library functions +that may set ``errno``, allowing optimizations such as store-to-load forwarding +across such routines. + +For example, the following is a valid metadata specifying the TBAA information +for an integer access: + + !llvm.errno.tbaa = !{!0} + !0 = !{!1, !1, i64 0} + !1 = !{!"int", !2, i64 0} + !2 = !{!"omnipotent char", !3, i64 0} + !3 = !{!"Simple C/C++ TBAA"} + +Multiple TBAA operands are allowed to support merging of modules that may use +different TBAA hierarchies (e.g., when mixing C and C++). + .. _summary: ThinLTO Summary @@ -11299,11 +11450,9 @@ responsibility of the code emitter to ensure that the alignment information is correct. Overestimating the alignment results in undefined behavior. Underestimating the alignment may produce less efficient code. An alignment of 1 is always safe. The maximum possible alignment is ``1 << 32``. An alignment -value higher than the size of the loaded type implies memory up to the -alignment value bytes can be safely loaded without trapping in the default -address space. Access of the high bytes can interfere with debugging tools, so -should not be accessed if the function has the ``sanitize_thread`` or -``sanitize_address`` attributes. +value higher than the size of the loaded type does *not* imply (without target +specific knowledge) that memory up to the alignment value bytes can be safely +loaded without trapping. The alignment is only optional when parsing textual IR; for in-memory IR, it is always present. An omitted ``align`` argument means that the operation has the @@ -11439,12 +11588,10 @@ operation (that is, the alignment of the memory address). It is the responsibility of the code emitter to ensure that the alignment information is correct. Overestimating the alignment results in undefined behavior. Underestimating the alignment may produce less efficient code. An alignment of -1 is always safe. The maximum possible alignment is ``1 << 32``. An alignment -value higher than the size of the loaded type implies memory up to the -alignment value bytes can be safely loaded without trapping in the default -address space. Access of the high bytes can interfere with debugging tools, so -should not be accessed if the function has the ``sanitize_thread`` or -``sanitize_address`` attributes. +1 is always safe. The maximum possible alignment is ``1 << 32``. An alignment +value higher than the size of the stored type does *not* imply (without target +specific knowledge) that memory up to the alignment value bytes can be safely +loaded without trapping. The alignment is only optional when parsing textual IR; for in-memory IR, it is always present. An omitted ``align`` argument means that the operation has the @@ -24468,7 +24615,7 @@ Overview: The '``llvm.vp.load.ff.*``' intrinsic is similar to '``llvm.vp.load.*``', but will not trap if there are not ``evl`` readable -lanes at the pointer. '``ff``' stands for fault-first or fault-only-first. +lanes at the pointer. '``ff``' stands for first-fault or fault-only-first. Arguments: """""""""" @@ -31402,4 +31549,3 @@ Semantics: The '``llvm.preserve.struct.access.index``' intrinsic produces the same result as a getelementptr with base ``base`` and access operands ``{0, gep_index}``. - diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst index 4c8c605edfdd6..e8dceb836f98a 100644 --- a/llvm/docs/NVPTXUsage.rst +++ b/llvm/docs/NVPTXUsage.rst @@ -1971,6 +1971,464 @@ The last argument `i1 %unpack` is a compile-time constant which when set, indica For more information, refer to the `PTX ISA `__. +tcgen05.mma Intrinsics +---------------------- + +Overview +^^^^^^^^ + +`tcgen05.mma` operation of shape `M x N x K` perform matrix multiplication and +accumulation of the form: `D = A * B + D` where: + + - the `A` matrix has shape `M x K`, in either `Tensor Memory` or `Shared Memory` + - the `B` matrix has shape `K x N`, in `Shared Memory` of the current CTA and, optionally in peer CTA + - the `D` matrix is of the shape `M x N`, in `Tensor Memory` + +Optionally an input predicate can be used to disable the input (`%enable_inp_d`) +from the accumulator matrix and the following operation can be performed as `D = A * B` + +The matrix multiplication and accumulation operations are categorized into various +kinds based on input types and the throughput of the multiplication operation. +The following table shows the different kinds of MMA operations that are supported: + ++------------+--------------------------------------------+ +| .kind | Supported Input Types | ++============+============================================+ +| f16 | F16 and BF16 | ++------------+--------------------------------------------+ +| tf32 | TF32 | ++------------+--------------------------------------------+ +| f8f6f4 | All combinations of F8, F6, and F4 | ++------------+--------------------------------------------+ +| i8 | Signed and Unsigned 8-bit Integers | ++------------+--------------------------------------------+ +| mxf8f6f4 | MX-floating point formats | ++------------+--------------------------------------------+ +| mxf4 | MX-floating point formats (FP4) | ++------------+--------------------------------------------+ +| mxf4nvf4 | MXF4 + custom NVIDIA 4-bit floating point | +| | (with common scaling factor) | ++------------+--------------------------------------------+ + +`tcgen05.mma.sp` supports sparse variant of `A` with shape `M x K` stored in packed +form as `M X (K / 2)` in memory. The `%spmetadata` specifies the mapping of the +`K / 2` non-zero elements to the `K` elements before performing the MMA operation. + +`tcgen05.mma.block_scale` perform matrix multiplication with block scaling +`D = (A * scale_A) * (B * scale_B) + D` where scaling of input matrices from +memory to form the matrix `A` and matrix `B` before performing the MMA operation. +Scale factors for `A` and `B` matrices need to be duplicated to all 32 lane partitions +of tensor memory. The shape of `%scale_a` and `%scale_b` matrices depend on the +`.scale_vectorsize` described in `here `__ + +The sparsity metadata (`%spmetadata`) as well as the block-scale inputs for `A / B` +matrices (`%scale_a` and `%scale_b`) reside in Tensor Memory. + +To facilitate opportunistic re-use of `A / B` matrix data across a sequence of MMA +operations, the `A/B` matrices are loaded into a collector buffer +(`%collector_usage_a_op_flag`, `%collector_usage_b_buffer_flag`, and `%collector_usage_b_op_flag`). +The flag value of the collector_usage flag in the intrinsic specifies the nature of the re-use + +There are three kinds of matrix descriptors used by the tcgen05 family of instructions: + ++----------------------------+-----------------------------------------------------------------------------------------------------------+-------------+ +| Descriptor | Description | Size (bits) | ++============================+===========================================================================================================+=============+ +| Shared Memory Descriptor | Describes properties of multiplicand matrix | | +| | in shared memory, including its location | | +| | within the CTA's shared memory. | 64 | +| | `PTX ISA `__ | | ++----------------------------+-----------------------------------------------+-------------+---------------------------------------------+-------------+ +| Instruction Descriptor | Describes shapes, types, and details of | | +| | all matrices and the MMA operation. | 32 | +| | `PTX ISA `__ | | ++----------------------------+-----------------------------------------------+-------------+---------------------------------------------+-------------+ +| Zero-Column Mask Descriptor| Generates a mask specifying which columns of | | +| | B matrix are zeroed in the MMA operation, | | +| | regardless of values in shared memory. | 64 | +| | Total mask size = N bits | | +| | `PTX ISA `__ | | ++----------------------------+-----------------------------------------------+-------------+---------------------------------------------+-------------+ + +`tcgen05.mma` can be used for general matrix multiplication or for convolution operations. +In case of convolutions, the `activations` can be stored in either matrix `A` or matrix `B` +while the `weights` will be stored in the other matrix + +`tcgen05.mma` has an optional collector qualifier to specify when an `A` or `B` matrix +is new to the sequence and should be loaded, unchanged within the sequence and, +should be reused, or the last use in the sequence and should be discarded. +The collector qualifier is used to give the TensorCore permission to reuse a +previously loaded `A` or `B` matrix; however reuse is opportunistic in that the +TensorCore may reload a matrix even when it has permission to reuse that matrix. +Thus, the source memory of an A or B matrix must not be modified while the MMA +instruction using those matrices has not completed - regardless of collector +qualifier permissions. + +The `cta_group::1` specifies that the operation is performed on the Tensor Memory +of the executing thread’s CTA only. The `cta_group::2` specifies that the MMA +operation is performed on the Tensor Memory of the executing thread’s CTA and its peer CTA. + +The vector operand `%disable_output_lane` specifies the lane(s) in the Tensor Memory +that should be not be updated with the resultant matrix D. Elements of the vector operand +disable-output-lane forms a mask where each bit corresponds to a lane of the Tensor Memory, +with least significant bit of the first element of the vector (leftmost in syntax) +corresponding to the lane 0 of the Tensor Memory. If a bit in the mask is 1, then +the corresponding lane in the Tensor Memory for the resultant matrix D will not be +updated + +Intrinsic Design: +^^^^^^^^^^^^^^^^^ + +Given the broad feature set of `tcgen05.mma` instruction modeling these +through intrinsics is highly complex, and the following table outlines the large +number of intrinsics required to fully support the `tcgen05.mma` instruction set. + ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| variant | Configuration | Total Variants | ++====================================+===================================================================================================+================+ +| tcgen05.mma.shared | 2 (space) x 2 (sp) x 4 (kind) x 2 (cta_group) x 4 (collector_usage) | 128 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.tensor.ashift | 2 (sp) x 4 (kind) x 2 (cta_group) x 2 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.scale_d | 2 (space) x 2 (sp) x 2 (kind) x 2 (cta_group) x 4 (collector_usage) | 128 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.scale_d.tensor.ashift | 2 (sp) x 2 (kind) x 2 (cta_group) x 2 (collector_usage) | 16 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.disable_output_lane | 2 (space) x 2 (sp) x 4 (kind) x 2 (cta_group) x 4 (collector_usage) | 128 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.disable_output_lane... | 2 (sp) x 4 (kind) x 2 (cta_group) x 2 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.block_scale | 2 (space) x 1 (mxf4nvf4) x 2 (cta_group) x 2 (scale_vec_size) x 4 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.block_scale | 2 (space) x 1 (mxf4) x 2 (cta_group) x 2 (scale_vec_size) x 4 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.block_scale | 2 (space) x 1 (mxf8f6f4) x 2 (cta_group) x 2 (scale_vec_size) x 4 (collector_usage) | 32 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| tcgen05.mma.ws | 2 (space) x 2 (sp) x 4 (kind) x 2 (zero_col_mask) x 4 (collector_usage_op) x 4 (collector_buffer) | 256 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ +| Total | | 816 | ++------------------------------------+---------------------------------------------------------------------------------------------------+----------------+ + + +To reduce the number of possible intrinsic variations, we've modeled the `tcgen05.mma` +instructions using flag operands. We've added range checks to these flags to prevent +invalid values. We also expanded some flags back into intrinsic modifiers to avoid +supporting invalid combinations of features. + + +'``llvm.nvvm.tcgen05.mma.*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + + ; .sp variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i32 %kind_flag, i32 %cta_group_flag, i32 %collector_usage_a_op_flag) + + ; .scale_d variants + declare void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group_flag, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.scale_d<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group_flag, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; sp.scale_d variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group_flag, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, i32 %cta_group, i32 %collector_usage_a_op_flag) + +Overview: +""""""""" + +`nvvm.tcgen05.mma` is an asynchronous intrinsic which initiates an `M x N x K` matrix +multiply and accumulate operation, `D = A * B + D` where the `A` matrix is `M x K`, +the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The operation of the form +`D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. +The optional immediate argument `%scale_d_imm` can be specified to scale the input +matrix `D` as follows: `D = A * B + D * (2 ^ - %scale_d_imm)`. The valid range of +values for argument `%scale_d_imm` is `[0, 15]`. The 32-bit register operand idesc +is the instruction descriptor as described in `Instruction descriptor `__ + +`nvvm.tcgen05.mma` has single thread semantics, unlike the collective instructions +`nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing +the `nvvm.tcgen05.mma` will result in the initiation of the whole matrix and accumulate +operation + +When `.sp` is specifed, the dimension of A matrix is `M x (K/2)` and requires +specifiying an additional `%spmetadata` argument + +`.ashift` shifts the rows of the A matrix down by one row, except for the last row +in the Tensor Memory. `.ashift` is only allowed with M = 128 or M = 256. + +The `%collector_usage_a_op_flag` flag specifies the usage of collector buffer for +matrix `A`. It is illegal to specify either of `USE` or `FILL` for `%collector_usage_a_op_flag` +along with `.ashift` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag` flag: + +============= ========== + `kind_flag` value +============= ========== + F16 0 + TF32 1 + F8F6F4 2 + I8 3 +============= ========== + +`%cta_group_flag` flag: + +================= ========== + `cta_group_flag` value +================= ========== + CG1 1 + CG2 2 +================= ========== + +`%collector_usage_a_op_flag` flag: + +============================= ========== + `collector_usage_a_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + +'``llvm.nvvm.tcgen05.mma.block_scale*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + ; mxf8f6f4 + declare void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + + ; mxf4 + declare void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + + ; mxf4nvf4 + declare void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 cta_group_flag, i32 %collector_usage_a_op_flag) + +Overview: +""""""""" +`nvvm.tcgen05.mma.block_scale` is an asynchronous intrinsic which initiates an `M x N x K` matrix multiply and accumulate operation, `D = (A * scale_a) * (B * scale_b) + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The matrices `A` and `B` are scaled with `%scale_A` and `%scale_B` matrices respectively before performing the matrix multiply and accumulate operation. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +`nvvm.tcgen05.mma.block_scale` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma.block_scale` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `M x (K / 2)` and requires specifiying an additional `%spmetadata` argument + +The `%collector_usage_a_op_flag` flag specifies the usage of collector buffer for matrix `A` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%cta_group`: + +============= ========== + `cta_group` value +============= ========== + CG1 1 + CG2 2 +============= ========== + +`%collector_usage_a_op_flag`: + +============================= ========== + `collector_usage_a_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + +'``llvm.nvvm.tcgen05.mma.disable_output_lane*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + declare void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; .sp variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; .scale_d variants + declare void @llvm.nvvm.tcgen05.mma.shared.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.shared.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg2<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + + ; .sp.scale_d variants + declare void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d.disable_output_lane.cg1(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d.disable_output_lane.cg2(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.disable_output_lane.cg1<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, <4 x i32> %disable_output_lane_v4, i32 %kind_flag, i32 %collector_usage_a_op_flag) + declare void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.disable_output_lane.cg2<.ashift>(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, ptr addrspace(6) %spmetadata, i1 %enable_inp_d, i64 %scale_d_imm, <8 x i32> %disable_output_lane_v8, i32 %kind_flag, i32 %collector_usage_a_op_flag) + +Overview: +""""""""" + +`nvvm.tcgen05.mma.disable_output_lane` is an asynchronous intrinsic which initiates an `M x N x K` matrix multiply and accumulate operation, `D = A * B + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The optional immediate argument `%scale_d_imm` can be specified to scale the input matrix `D` as follows: `D = A*B+D * (2 ^ - %scale_d_imm)`. The valid range of values for argument `%scale_d_imm` is `[0, 15]`. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +The vector operand `%disable_output_lane` specifies the lane(s) in the Tensor Memory that should be not be updated with the resultant matrix `D`. Elements of the vector operand `%disable_output_lane` forms a mask where each bit corresponds to a lane of the Tensor Memory, with least significant bit of the first element of the vector corresponding to the `lane 0` of the Tensor Memory. If a bit in the mask is 1, then the corresponding lane in the Tensor Memory for the resultant matrix `D` will not be updated + +`nvvm.tcgen05.mma.disable_output_lane` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma.disable_output_lane` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `M x (K / 2)` and requires specifiying an additional `%spmetadata` argument + +`.ashift` shifts the rows of the A matrix down by one row, except for the last row in the Tensor Memory. `.ashift` is only allowed with M = 128 or M = 256. + +The `%collector_usage_a_op_flag` flag specifies the usage of collector buffer for matrix `A`. It is illegal to specify either of `USE` or `FILL` for `%collector_usage_a_op_flag` along with `.ashift` + +For more information, refer to the `PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag`: + +============= ========== + `kind_flag` value +============= ========== + F16 0 + TF32 1 + F8F6F4 2 + I8 3 +============= ========== + +`%cta_group_flag`: + +================= ========== + `cta_group_flag` value +================= ========== + CG1 1 + CG2 2 +================= ========== + +`%collector_usage_a_op_flag`: + +============================= ========== + `collector_usage_a_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + + +'``llvm.nvvm.tcgen05.mma.ws*``' +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" + +.. code-block:: llvm + + // tcgen05.mma.ws + declare void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + + ; .sp variants + declare void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %d, i64 %adesc, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 %zero_col_mask, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + declare void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %d, ptr addrspace(6) %atensor, i64 %bdesc, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 %zero_col_mask, i32 %kind_flag, i32 %collector_usage_b_buffer_flag, i32 %collector_usage_b_op_flag) + +Overview: +""""""""" + +`nvvm.tcgen05.mma.ws` is an asynchronous intrinsic which initiates an `M x N x K` weight stationary convolution matrix multiply and accumulate operation, `D = A * B + D` where the `A` matrix is `M x K`, the `B` matrix is `K x N`, and the `D` matrix is `M x N`. The operation of the form `D = A*B` is issued when the input predicate argument `%enable_inp_d` is false. The optional immediate argument `%scale_d_imm` can be specified to scale the input matrix `D` as follows: `D = A*B+D * (2 ^ - %scale_d_imm)`. The valid range of values for argument `%scale_d_imm` is `[0, 15]`. The 32-bit register operand idesc is the instruction descriptor as described in `Instruction descriptor `__ + +`nvvm.tcgen05.mma` has single thread semantics, unlike the collective instructions `nvvm.mma.sync` or the PTX `wgmma.mma_async` instruction. So, a single thread issuing the `nvvm.tcgen05.mma` will result in the initiation of the whole matrix multiply and accumulate operation + +When `.sp` is specifed, the dimension of A matrix is `M x (K / 2)` and requires specifiying an additional `%spmetadata` argument + +The operand `%zero_col_mask` is a 64-bit register which specifies the `Zero-Column Mask Descriptor `__. The zero-column mask descriptor is used to generate a mask that specifies which columns of `B` matrix will have zero value for the matrix multiply and accumulate operation regardless of the values present in the shared memory. + +The `%collector_usage_b_buffer_flag` and `%collector_usage_b_op_flag` together flag specifies the usage of collector buffer for Matrix `B` + +For more information, refer to the +`PTX ISA `__ + +The following tables describes the possible values of the flag arguments + +`%kind_flag`: + +============= ========== + `kind_flag` value +============= ========== + F16 0 + TF32 1 + F8F6F4 2 + I8 3 +============= ========== + +`%collector_usage_b_buffer_flag`: + +================================ ========== + `collector_usage_b_buffer_flag` value +================================ ========== + B0 0 + B1 1 + B2 2 + B3 3 +================================ ========== + +`%collector_usage_b_op_flag`: + +============================= ========== + `collector_usage_b_op_flag` value +============================= ========== + DISCARD 0 + LASTUSE 1 + USE 2 + FILL 3 +============================= ========== + Store Intrinsics ---------------- diff --git a/llvm/docs/ProgrammersManual.rst b/llvm/docs/ProgrammersManual.rst index 602922fcb3b9c..f2b31211cf0dc 100644 --- a/llvm/docs/ProgrammersManual.rst +++ b/llvm/docs/ProgrammersManual.rst @@ -3832,7 +3832,7 @@ Important Subclasses of the ``Instruction`` class * ``BinaryOperator`` - This subclasses represents all two operand instructions whose operands must be + This subclass represents all two operand instructions whose operands must be the same type, except for the comparison instructions. .. _CastInst: diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst index 7b1a6ce834919..f9e2e4a5f02c3 100644 --- a/llvm/docs/RISCVUsage.rst +++ b/llvm/docs/RISCVUsage.rst @@ -120,6 +120,8 @@ on support follow. ``H`` Assembly Support ``M`` Supported ``Q`` Assembly Support + ``Sdext`` Assembly Support (`See note <#riscv-debug-specification-note>`__) + ``Sdtrig`` Assembly Support (`See note <#riscv-debug-specification-note>`__) ``Sha`` Supported ``Shcounterenw`` Assembly Support (`See note <#riscv-profiles-extensions-note>`__) ``Shgatpa`` Assembly Support (`See note <#riscv-profiles-extensions-note>`__) @@ -132,6 +134,7 @@ on support follow. ``Smcdeleg`` Supported ``Smcntrpmf`` Supported ``Smcsrind`` Supported + ``Smctr`` Assembly Support ``Smdbltrp`` Supported ``Smepmp`` Supported ``Smmpm`` Supported @@ -144,6 +147,7 @@ on support follow. ``Sscofpmf`` Assembly Support ``Sscounterenw`` Assembly Support (`See note <#riscv-profiles-extensions-note>`__) ``Sscsrind`` Supported + ``Ssctr`` Assembly Support ``Ssdbltrp`` Supported ``Ssnpm`` Supported ``Sspm`` Supported @@ -306,6 +310,10 @@ Supported ``Za128rs``, ``Za64rs``, ``Zama16b``, ``Zic64b``, ``Ziccamoa``, ``Ziccamoc``, ``Ziccif``, ``Zicclsm``, ``Ziccrse``, ``Shcounterenvw``, ``Shgatpa``, ``Shtvala``, ``Shvsatpa``, ``Shvstvala``, ``Shvstvecd``, ``Ssccptr``, ``Sscounterenw``, ``Ssstateen``, ``Ssstrict``, ``Sstvala``, ``Sstvecd``, ``Ssu64xl``, ``Svade``, ``Svbare`` These extensions are defined as part of the `RISC-V Profiles specification `__. They do not introduce any new features themselves, but instead describe existing hardware features. +.. _riscv-debug-specification-note: + +``Sdext``, ``Sdtrig`` `The RISC-V Debug Specification `__. + .. _riscv-zacas-note: ``Zacas`` @@ -337,12 +345,6 @@ The primary goal of experimental support is to assist in the process of ratifica ``experimental-zvbc32e``, ``experimental-zvkgs`` LLVM implements the `0.7 release specification `__. -``experimental-sdext``, ``experimental-sdtrig`` - LLVM implements the `1.0-rc4 specification `__. - -``experimental-smctr``, ``experimental-ssctr`` - LLVM implements the `1.0-rc3 specification `__. - ``experimental-svukte`` LLVM implements the `0.3 draft specification `__. diff --git a/llvm/docs/Reference.rst b/llvm/docs/Reference.rst index 7d0fdd78dc96d..5d842d339f8c9 100644 --- a/llvm/docs/Reference.rst +++ b/llvm/docs/Reference.rst @@ -46,6 +46,7 @@ LLVM and API reference documentation. ScudoHardenedAllocator MemoryModelRelaxationAnnotations MemTagSanitizer + QualGroup Security SecurityTransparencyReports SegmentedStacks diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md index 40cddb45df84d..85c16b9c33f10 100644 --- a/llvm/docs/ReleaseNotes.md +++ b/llvm/docs/ReleaseNotes.md @@ -160,6 +160,7 @@ Changes to the LLVM tools * `llvm-readelf` now dumps all hex format values in lower-case mode. * Some code paths for supporting Python 2.7 in `llvm-lit` have been removed. +* Support for `%T` in lit has been removed. Changes to LLDB --------------------------------- diff --git a/llvm/docs/SPIRVUsage.rst b/llvm/docs/SPIRVUsage.rst index fdefc53b32aba..b6cd4b4feb46b 100644 --- a/llvm/docs/SPIRVUsage.rst +++ b/llvm/docs/SPIRVUsage.rst @@ -232,7 +232,7 @@ Below is a list of supported SPIR-V extensions, sorted alphabetically by their e * - ``SPV_INTEL_int4`` - Adds support for 4-bit integer type, and allow this type to be used in cooperative matrices. * - ``SPV_KHR_float_controls2`` - - Adds ability to specify the floating-point environment in shaders. It can be used on whole modules and individual instructions. + - Adds execution modes and decorations to control floating-point computations in both kernels and shaders. It can be used on whole modules and individual instructions. SPIR-V representation in LLVM IR ================================ @@ -589,3 +589,31 @@ Group and Subgroup Operations For workgroup and subgroup operations, LLVM uses function calls to represent SPIR-V's group-based instructions. These builtins facilitate group synchronization, data sharing, and collective operations essential for efficient parallel computation. + +SPIR-V Instructions Mapped to LLVM Metadata +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Some SPIR-V instructions don't have a direct equivalent in the LLVM IR language. To +address this, the SPIR-V Target uses different specific LLVM named metadata to convey +the necessary information. The SPIR-V specification allows multiple module-scope +instructions, where as LLVM named metadata must be unique. Therefore, the encoding of +such instructions has the following format: + +.. code-block:: llvm + + !spirv. = !{!, !, ..} + ! = !{, , ..} + ! = !{, , ..} + +Below, you will find the mappings between SPIR-V instruction and their corresponding +LLVM IR representations. + ++--------------------+---------------------------------------------------------+ +| SPIR-V instruction | LLVM IR | ++====================+=========================================================+ +| OpExecutionMode | .. code-block:: llvm | +| | | +| | !spirv.ExecutionMode = !{!0} | +| | !0 = !{void @worker, i32 30, i32 262149} | +| | ; Set execution mode with id 30 (VecTypeHint) and | +| | ; literal `262149` operand. | ++--------------------+---------------------------------------------------------+ diff --git a/llvm/docs/TableGen/ProgRef.rst b/llvm/docs/TableGen/ProgRef.rst index 2b1af05794021..0ff4cc764eaaf 100644 --- a/llvm/docs/TableGen/ProgRef.rst +++ b/llvm/docs/TableGen/ProgRef.rst @@ -64,7 +64,7 @@ Classes and concrete records have a unique *name*, either chosen by the programmer or generated by TableGen. Associated with that name is a list of *fields* with values and an optional list of *parent classes* (sometimes called base or super classes). The fields are the primary data that -backends will process. Note that TableGen assigns no meanings to fields; the +backends will process. Note that TableGen assigns no meaning to fields; the meanings are entirely up to the backends and the programs that incorporate the output of those backends. @@ -243,7 +243,7 @@ Include files ------------- TableGen has an include mechanism. The content of the included file -lexically replaces the ``include`` directive and is then parsed as if it was +lexically replaces the ``include`` directive and is then parsed as if it were originally in the main file. .. productionlist:: @@ -670,17 +670,17 @@ name of a multiclass. The argument values can be specified in two forms: * Positional argument (``value``). The value is assigned to the argument in the - corresponding position. For ``Foo``, ``a0`` will be assigned to first - argument and ``a1`` will be assigned to second argument. + corresponding position. For ``Foo``, ``a0`` will be assigned to the first + argument and ``a1`` will be assigned to the second argument. * Named argument (``name=value``). The value is assigned to the argument with the specified name. For ``Foo``, ``a0`` will be assigned to the argument with name ``a`` and ``a1`` will be assigned to the argument with name ``b``. -Required arguments can also be specified as named argument. +Required arguments can also be specified as a named argument. Note that the argument can only be specified once regardless of the way (named -or positional) to specify and positional arguments should be put before named +or positional) to specify and positional arguments should precede named arguments. .. productionlist:: @@ -817,7 +817,7 @@ type. It provides a single field, ``Value``, which holds a 3-bit number. Its template argument, ``val``, is used to set the ``Value`` field. Each of the eight records is defined with ``FPFormat`` as its parent class. The enumeration value is passed in angle brackets as the template argument. Each -record will inherent the ``Value`` field with the appropriate enumeration +record will inherit the ``Value`` field with the appropriate enumeration value. Here is a more complex example of classes with template arguments. First, we @@ -1308,7 +1308,7 @@ with ``F0``, ``F1``, ``F2``, and ``F3``. ------------------------------------- A ``dump`` statement prints the input string to standard error -output. It is intended for debugging purpose. +output. It is intended for debugging purposes. * At top level, the message is printed immediately. @@ -1727,7 +1727,7 @@ and non-0 as true. ``!div(``\ *a*\ ``,`` *b*\ ``)`` This operator performs signed division of *a* by *b*, and produces the quotient. - Division by 0 produces an error. Division of INT64_MIN by -1 produces an error. + Division by 0 produces an error. Division of ``INT64_MIN`` by -1 produces an error. ``!empty(``\ *a*\ ``)`` This operator produces 1 if the string, list, or DAG *a* is empty; 0 otherwise. @@ -1914,7 +1914,7 @@ and non-0 as true. ``!or(``\ *a*\ ``,`` *b*\ ``, ...)`` This operator does a bitwise OR on *a*, *b*, etc., and produces the result. A logical OR can be performed if all the arguments are either - 0 or 1. This operator is short-circuit to -1 (all ones) the left-most + 0 or 1. This operator is short-circuit to -1 (all ones) when the left-most operand is -1. ``!range([``\ *start*\ ``,]`` *end*\ ``[,``\ *step*\ ``])`` @@ -1937,7 +1937,7 @@ and non-0 as true. Equivalent to ``!range(0, !size(list))``. ``!repr(``\ *value*\ ``)`` - Represents *value* as a string. String format for the value is not + Represents *value* as a string. The string format for the value is not guaranteed to be stable. Intended for debugging purposes only. ``!setdagarg(``\ *dag*\ ``,``\ *key*\ ``,``\ *arg*\ ``)`` diff --git a/llvm/docs/index.rst b/llvm/docs/index.rst index 28ca6bf6316f4..b480729aaa5d9 100644 --- a/llvm/docs/index.rst +++ b/llvm/docs/index.rst @@ -86,7 +86,6 @@ LLVM welcomes contributions of all kinds. To learn more, see the following artic :hidden: GettingInvolved - QualGroup * :doc:`GettingInvolved` * :ref:`development-process` @@ -98,8 +97,6 @@ LLVM welcomes contributions of all kinds. To learn more, see the following artic * :ref:`report-security-issue` -* :doc:`QualGroup` - Indices and tables ================== diff --git a/llvm/include/llvm/ADT/AllocatorList.h b/llvm/include/llvm/ADT/AllocatorList.h index 04d0afc9d076e..2716b83ca224a 100644 --- a/llvm/include/llvm/ADT/AllocatorList.h +++ b/llvm/include/llvm/ADT/AllocatorList.h @@ -155,8 +155,8 @@ template class AllocatorList : AllocatorT { std::swap(getAlloc(), RHS.getAlloc()); } - bool empty() { return List.empty(); } - size_t size() { return List.size(); } + [[nodiscard]] bool empty() const { return List.empty(); } + [[nodiscard]] size_t size() const { return List.size(); } iterator begin() { return iterator(List.begin()); } iterator end() { return iterator(List.end()); } diff --git a/llvm/include/llvm/ADT/ArrayRef.h b/llvm/include/llvm/ADT/ArrayRef.h index fb91690bb0eb3..448d10013d371 100644 --- a/llvm/include/llvm/ADT/ArrayRef.h +++ b/llvm/include/llvm/ADT/ArrayRef.h @@ -547,7 +547,8 @@ namespace llvm { } template - inline bool operator==(SmallVectorImpl &LHS, ArrayRef RHS) { + [[nodiscard]] inline bool operator==(const SmallVectorImpl &LHS, + ArrayRef RHS) { return ArrayRef(LHS).equals(RHS); } @@ -557,7 +558,8 @@ namespace llvm { } template - inline bool operator!=(SmallVectorImpl &LHS, ArrayRef RHS) { + [[nodiscard]] inline bool operator!=(const SmallVectorImpl &LHS, + ArrayRef RHS) { return !(LHS == RHS); } diff --git a/llvm/include/llvm/ADT/BitVector.h b/llvm/include/llvm/ADT/BitVector.h index 72da2343fae13..9e81a4b735e7f 100644 --- a/llvm/include/llvm/ADT/BitVector.h +++ b/llvm/include/llvm/ADT/BitVector.h @@ -40,12 +40,20 @@ template class const_set_bits_iterator_impl { Current = Parent.find_next(Current); } + void retreat() { + if (Current == -1) { + Current = Parent.find_last(); + } else { + Current = Parent.find_prev(Current); + } + } + public: - using iterator_category = std::forward_iterator_tag; - using difference_type = std::ptrdiff_t; - using value_type = int; - using pointer = value_type*; - using reference = value_type&; + using iterator_category = std::bidirectional_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = unsigned; + using pointer = const value_type *; + using reference = value_type; const_set_bits_iterator_impl(const BitVectorT &Parent, int Current) : Parent(Parent), Current(Current) {} @@ -64,6 +72,17 @@ template class const_set_bits_iterator_impl { return *this; } + const_set_bits_iterator_impl operator--(int) { + auto Prev = *this; + retreat(); + return Prev; + } + + const_set_bits_iterator_impl &operator--() { + retreat(); + return *this; + } + unsigned operator*() const { return Current; } bool operator==(const const_set_bits_iterator_impl &Other) const { @@ -551,10 +570,7 @@ class BitVector { template static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg, ArgTys const &...Args) { - assert(llvm::all_of( - std::initializer_list{Args.size()...}, - [&Arg](auto const &BV) { return Arg.size() == BV; }) && - "consistent sizes"); + assert(((Arg.size() == Args.size()) && ...) && "consistent sizes"); Out.resize(Arg.size()); for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I) Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...); diff --git a/llvm/include/llvm/ADT/CoalescingBitVector.h b/llvm/include/llvm/ADT/CoalescingBitVector.h index 4940bc1c2c18b..b126fc699ad87 100644 --- a/llvm/include/llvm/ADT/CoalescingBitVector.h +++ b/llvm/include/llvm/ADT/CoalescingBitVector.h @@ -194,10 +194,7 @@ template class CoalescingBitVector { // Delete the overlapping intervals. Split up intervals that only partially // intersect an overlap. - for (IntervalT Overlap : Overlaps) { - IndexT OlapStart, OlapStop; - std::tie(OlapStart, OlapStop) = Overlap; - + for (auto [OlapStart, OlapStop] : Overlaps) { auto It = Intervals.find(OlapStart); IndexT CurrStart = It.start(); IndexT CurrStop = It.stop(); @@ -420,10 +417,7 @@ template class CoalescingBitVector { const SmallVectorImpl &Overlaps, SmallVectorImpl &NonOverlappingParts) { IndexT NextUncoveredBit = Start; - for (IntervalT Overlap : Overlaps) { - IndexT OlapStart, OlapStop; - std::tie(OlapStart, OlapStop) = Overlap; - + for (auto [OlapStart, OlapStop] : Overlaps) { // [Start;Stop] and [OlapStart;OlapStop] overlap iff OlapStart <= Stop // and Start <= OlapStop. bool DoesOverlap = OlapStart <= Stop && Start <= OlapStop; diff --git a/llvm/include/llvm/ADT/ConcurrentHashtable.h b/llvm/include/llvm/ADT/ConcurrentHashtable.h index 6de194db9ba7a..6a943c5b062e7 100644 --- a/llvm/include/llvm/ADT/ConcurrentHashtable.h +++ b/llvm/include/llvm/ADT/ConcurrentHashtable.h @@ -253,9 +253,8 @@ class ConcurrentHashTableByPtr { OS << "\nOverall number of entries = " << OverallNumberOfEntries; OS << "\nOverall number of non empty buckets = " << NumberOfNonEmptyBuckets; - for (auto &BucketSize : BucketSizesMap) - OS << "\n Number of buckets with size " << BucketSize.first << ": " - << BucketSize.second; + for (auto [Size, Count] : BucketSizesMap) + OS << "\n Number of buckets with size " << Size << ": " << Count; std::stringstream stream; stream << std::fixed << std::setprecision(2) diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index e13a2cb09a412..4bda50f5a5cc0 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -75,37 +75,39 @@ class DenseMapBase : public DebugEpochBase { using const_iterator = DenseMapIterator; - inline iterator begin() { + [[nodiscard]] inline iterator begin() { return iterator::makeBegin(buckets(), empty(), *this); } - inline iterator end() { return iterator::makeEnd(buckets(), *this); } - inline const_iterator begin() const { + [[nodiscard]] inline iterator end() { + return iterator::makeEnd(buckets(), *this); + } + [[nodiscard]] inline const_iterator begin() const { return const_iterator::makeBegin(buckets(), empty(), *this); } - inline const_iterator end() const { + [[nodiscard]] inline const_iterator end() const { return const_iterator::makeEnd(buckets(), *this); } // Return an iterator to iterate over keys in the map. - inline auto keys() { + [[nodiscard]] inline auto keys() { return map_range(*this, [](const BucketT &P) { return P.getFirst(); }); } // Return an iterator to iterate over values in the map. - inline auto values() { + [[nodiscard]] inline auto values() { return map_range(*this, [](const BucketT &P) { return P.getSecond(); }); } - inline auto keys() const { + [[nodiscard]] inline auto keys() const { return map_range(*this, [](const BucketT &P) { return P.getFirst(); }); } - inline auto values() const { + [[nodiscard]] inline auto values() const { return map_range(*this, [](const BucketT &P) { return P.getSecond(); }); } [[nodiscard]] bool empty() const { return getNumEntries() == 0; } - unsigned size() const { return getNumEntries(); } + [[nodiscard]] unsigned size() const { return getNumEntries(); } /// Grow the densemap so that it can contain at least \p NumEntries items /// before resizing again. @@ -153,30 +155,35 @@ class DenseMapBase : public DebugEpochBase { } /// Return true if the specified key is in the map, false otherwise. - bool contains(const_arg_type_t Val) const { + [[nodiscard]] bool contains(const_arg_type_t Val) const { return doFind(Val) != nullptr; } /// Return 1 if the specified key is in the map, 0 otherwise. - size_type count(const_arg_type_t Val) const { + [[nodiscard]] size_type count(const_arg_type_t Val) const { return contains(Val) ? 1 : 0; } - iterator find(const_arg_type_t Val) { return find_as(Val); } - const_iterator find(const_arg_type_t Val) const { return find_as(Val); } + [[nodiscard]] iterator find(const_arg_type_t Val) { + return find_as(Val); + } + [[nodiscard]] const_iterator find(const_arg_type_t Val) const { + return find_as(Val); + } /// Alternate version of find() which allows a different, and possibly /// less expensive, key type. /// The DenseMapInfo is responsible for supplying methods /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key /// type used. - template iterator find_as(const LookupKeyT &Val) { + template + [[nodiscard]] iterator find_as(const LookupKeyT &Val) { if (BucketT *Bucket = doFind(Val)) return makeIterator(Bucket); return end(); } template - const_iterator find_as(const LookupKeyT &Val) const { + [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const { if (const BucketT *Bucket = doFind(Val)) return makeConstIterator(Bucket); return end(); @@ -184,7 +191,7 @@ class DenseMapBase : public DebugEpochBase { /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. - ValueT lookup(const_arg_type_t Val) const { + [[nodiscard]] ValueT lookup(const_arg_type_t Val) const { if (const BucketT *Bucket = doFind(Val)) return Bucket->getSecond(); return ValueT(); @@ -194,7 +201,8 @@ class DenseMapBase : public DebugEpochBase { // useful, because `lookup` cannot be used with non-default-constructible // values. template > - ValueT lookup_or(const_arg_type_t Val, U &&Default) const { + [[nodiscard]] ValueT lookup_or(const_arg_type_t Val, + U &&Default) const { if (const BucketT *Bucket = doFind(Val)) return Bucket->getSecond(); return Default; @@ -202,7 +210,7 @@ class DenseMapBase : public DebugEpochBase { /// at - Return the entry for the specified key, or abort if no such /// entry exists. - const ValueT &at(const_arg_type_t Val) const { + [[nodiscard]] const ValueT &at(const_arg_type_t Val) const { auto Iter = this->find(std::move(Val)); assert(Iter != this->end() && "DenseMap::at failed due to a missing key"); return Iter->second; @@ -330,14 +338,16 @@ class DenseMapBase : public DebugEpochBase { /// isPointerIntoBucketsArray - Return true if the specified pointer points /// somewhere into the DenseMap's array of buckets (i.e. either to a key or /// value in the DenseMap). - bool isPointerIntoBucketsArray(const void *Ptr) const { + [[nodiscard]] bool isPointerIntoBucketsArray(const void *Ptr) const { return Ptr >= getBuckets() && Ptr < getBucketsEnd(); } /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets /// array. In conjunction with the previous method, this can be used to /// determine whether an insertion caused the DenseMap to reallocate. - const void *getPointerIntoBucketsArray() const { return getBuckets(); } + [[nodiscard]] const void *getPointerIntoBucketsArray() const { + return getBuckets(); + } protected: DenseMapBase() = default; @@ -430,10 +440,6 @@ class DenseMapBase : public DebugEpochBase { } } - static unsigned getHashValue(const KeyT &Val) { - return KeyInfoT::getHashValue(Val); - } - template static unsigned getHashValue(const LookupKeyT &Val) { return KeyInfoT::getHashValue(Val); @@ -448,6 +454,11 @@ class DenseMapBase : public DebugEpochBase { static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); } private: + DerivedT &derived() { return *static_cast(this); } + const DerivedT &derived() const { + return *static_cast(this); + } + template std::pair lookupOrInsertIntoBucket(KeyArgT &&Key, Ts &&...Args) { @@ -477,39 +488,27 @@ class DenseMapBase : public DebugEpochBase { return const_iterator::makeIterator(TheBucket, buckets(), *this); } - unsigned getNumEntries() const { - return static_cast(this)->getNumEntries(); - } + unsigned getNumEntries() const { return derived().getNumEntries(); } - void setNumEntries(unsigned Num) { - static_cast(this)->setNumEntries(Num); - } + void setNumEntries(unsigned Num) { derived().setNumEntries(Num); } void incrementNumEntries() { setNumEntries(getNumEntries() + 1); } void decrementNumEntries() { setNumEntries(getNumEntries() - 1); } - unsigned getNumTombstones() const { - return static_cast(this)->getNumTombstones(); - } + unsigned getNumTombstones() const { return derived().getNumTombstones(); } - void setNumTombstones(unsigned Num) { - static_cast(this)->setNumTombstones(Num); - } + void setNumTombstones(unsigned Num) { derived().setNumTombstones(Num); } void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); } void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); } - const BucketT *getBuckets() const { - return static_cast(this)->getBuckets(); - } + const BucketT *getBuckets() const { return derived().getBuckets(); } - BucketT *getBuckets() { return static_cast(this)->getBuckets(); } + BucketT *getBuckets() { return derived().getBuckets(); } - unsigned getNumBuckets() const { - return static_cast(this)->getNumBuckets(); - } + unsigned getNumBuckets() const { return derived().getNumBuckets(); } BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); } @@ -525,9 +524,9 @@ class DenseMapBase : public DebugEpochBase { return llvm::make_range(getBuckets(), getBucketsEnd()); } - void grow(unsigned AtLeast) { static_cast(this)->grow(AtLeast); } + void grow(unsigned AtLeast) { derived().grow(AtLeast); } - void shrink_and_clear() { static_cast(this)->shrink_and_clear(); } + void shrink_and_clear() { derived().shrink_and_clear(); } template BucketT *findBucketForInsertion(const LookupKeyT &Lookup, @@ -656,7 +655,9 @@ class DenseMapBase : public DebugEpochBase { /// This is just the raw memory used by DenseMap. /// If entries are pointers to objects, the size of the referenced objects /// are not included. - size_t getMemorySize() const { return getNumBuckets() * sizeof(BucketT); } + [[nodiscard]] size_t getMemorySize() const { + return getNumBuckets() * sizeof(BucketT); + } }; /// Equality comparison for DenseMap. @@ -667,9 +668,9 @@ class DenseMapBase : public DebugEpochBase { /// complexity is linear, worst case is O(N^2) (if every hash collides). template -bool operator==( - const DenseMapBase &LHS, - const DenseMapBase &RHS) { +[[nodiscard]] bool +operator==(const DenseMapBase &LHS, + const DenseMapBase &RHS) { if (LHS.size() != RHS.size()) return false; @@ -687,9 +688,9 @@ bool operator==( /// Equivalent to !(LHS == RHS). See operator== for performance notes. template -bool operator!=( - const DenseMapBase &LHS, - const DenseMapBase &RHS) { +[[nodiscard]] bool +operator!=(const DenseMapBase &LHS, + const DenseMapBase &RHS) { return !(LHS == RHS); } @@ -1009,21 +1010,13 @@ class SmallDenseMap void copyFrom(const SmallDenseMap &other) { this->destroyAll(); deallocateBuckets(); - Small = true; - if (other.getNumBuckets() > InlineBuckets) { - Small = false; - new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); - } + allocateBuckets(other.getNumBuckets()); this->BaseT::copyFrom(other); } void init(unsigned InitNumEntries) { auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); - Small = true; - if (InitBuckets > InlineBuckets) { - Small = false; - new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); - } + allocateBuckets(InitBuckets); this->BaseT::initEmpty(); } @@ -1057,21 +1050,14 @@ class SmallDenseMap // AtLeast == InlineBuckets can happen if there are many tombstones, // and grow() is used to remove them. Usually we always switch to the // large rep here. - if (AtLeast > InlineBuckets) { - Small = false; - new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); - } + allocateBuckets(AtLeast); this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd)); return; } LargeRep OldRep = std::move(*getLargeRep()); getLargeRep()->~LargeRep(); - if (AtLeast <= InlineBuckets) { - Small = true; - } else { - new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); - } + allocateBuckets(AtLeast); this->moveFromOldBuckets(OldRep.buckets()); @@ -1166,12 +1152,15 @@ class SmallDenseMap getLargeRep()->~LargeRep(); } - LargeRep allocateBuckets(unsigned Num) { - assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); - LargeRep Rep = {static_cast(allocate_buffer( - sizeof(BucketT) * Num, alignof(BucketT))), - Num}; - return Rep; + void allocateBuckets(unsigned Num) { + if (Num <= InlineBuckets) { + Small = true; + } else { + Small = false; + BucketT *NewBuckets = static_cast( + allocate_buffer(sizeof(BucketT) * Num, alignof(BucketT))); + new (getLargeRep()) LargeRep{NewBuckets, Num}; + } } }; @@ -1239,15 +1228,15 @@ class DenseMapIterator : DebugEpochBase::HandleBase { const DenseMapIterator &I) : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} - reference operator*() const { + [[nodiscard]] reference operator*() const { assert(isHandleInSync() && "invalid iterator access!"); assert(Ptr != End && "dereferencing end() iterator"); return *Ptr; } - pointer operator->() const { return &operator*(); } + [[nodiscard]] pointer operator->() const { return &operator*(); } - friend bool operator==(const DenseMapIterator &LHS, - const DenseMapIterator &RHS) { + [[nodiscard]] friend bool operator==(const DenseMapIterator &LHS, + const DenseMapIterator &RHS) { assert((!LHS.getEpochAddress() || LHS.isHandleInSync()) && "handle not in sync!"); assert((!RHS.getEpochAddress() || RHS.isHandleInSync()) && @@ -1257,8 +1246,8 @@ class DenseMapIterator : DebugEpochBase::HandleBase { return LHS.Ptr == RHS.Ptr; } - friend bool operator!=(const DenseMapIterator &LHS, - const DenseMapIterator &RHS) { + [[nodiscard]] friend bool operator!=(const DenseMapIterator &LHS, + const DenseMapIterator &RHS) { return !(LHS == RHS); } @@ -1296,7 +1285,8 @@ class DenseMapIterator : DebugEpochBase::HandleBase { }; template -inline size_t capacity_in_bytes(const DenseMap &X) { +[[nodiscard]] inline size_t +capacity_in_bytes(const DenseMap &X) { return X.getMemorySize(); } diff --git a/llvm/include/llvm/ADT/DenseMapInfo.h b/llvm/include/llvm/ADT/DenseMapInfo.h index 57a8674e35015..f24aeb4371e7f 100644 --- a/llvm/include/llvm/ADT/DenseMapInfo.h +++ b/llvm/include/llvm/ADT/DenseMapInfo.h @@ -139,13 +139,11 @@ struct DenseMapInfo> { using SecondInfo = DenseMapInfo; static constexpr Pair getEmptyKey() { - return std::make_pair(FirstInfo::getEmptyKey(), - SecondInfo::getEmptyKey()); + return {FirstInfo::getEmptyKey(), SecondInfo::getEmptyKey()}; } static constexpr Pair getTombstoneKey() { - return std::make_pair(FirstInfo::getTombstoneKey(), - SecondInfo::getTombstoneKey()); + return {FirstInfo::getTombstoneKey(), SecondInfo::getTombstoneKey()}; } static unsigned getHashValue(const Pair& PairVal) { diff --git a/llvm/include/llvm/ADT/DenseSet.h b/llvm/include/llvm/ADT/DenseSet.h index 60ad9b2eb7762..eec800d07b6df 100644 --- a/llvm/include/llvm/ADT/DenseSet.h +++ b/llvm/include/llvm/ADT/DenseSet.h @@ -83,9 +83,9 @@ class DenseSetImpl { DenseSetImpl(llvm::from_range_t, Range &&R) : DenseSetImpl(adl_begin(R), adl_end(R)) {} - bool empty() const { return TheMap.empty(); } - size_type size() const { return TheMap.size(); } - size_t getMemorySize() const { return TheMap.getMemorySize(); } + [[nodiscard]] bool empty() const { return TheMap.empty(); } + [[nodiscard]] size_type size() const { return TheMap.size(); } + [[nodiscard]] size_t getMemorySize() const { return TheMap.getMemorySize(); } /// Grow the DenseSet so that it has at least Size buckets. Will not shrink /// the Size of the set. @@ -154,14 +154,20 @@ class DenseSetImpl { using iterator = DenseSetIterator; using const_iterator = DenseSetIterator; - iterator begin() { return iterator(TheMap.begin()); } - iterator end() { return iterator(TheMap.end()); } + [[nodiscard]] iterator begin() { return iterator(TheMap.begin()); } + [[nodiscard]] iterator end() { return iterator(TheMap.end()); } - const_iterator begin() const { return const_iterator(TheMap.begin()); } - const_iterator end() const { return const_iterator(TheMap.end()); } + [[nodiscard]] const_iterator begin() const { + return const_iterator(TheMap.begin()); + } + [[nodiscard]] const_iterator end() const { + return const_iterator(TheMap.end()); + } - iterator find(const_arg_type_t V) { return iterator(TheMap.find(V)); } - const_iterator find(const_arg_type_t V) const { + [[nodiscard]] iterator find(const_arg_type_t V) { + return iterator(TheMap.find(V)); + } + [[nodiscard]] const_iterator find(const_arg_type_t V) const { return const_iterator(TheMap.find(V)); } @@ -180,10 +186,12 @@ class DenseSetImpl { /// The DenseMapInfo is responsible for supplying methods /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type /// used. - template iterator find_as(const LookupKeyT &Val) { + template + [[nodiscard]] iterator find_as(const LookupKeyT &Val) { return iterator(TheMap.find_as(Val)); } template + [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const { return const_iterator(TheMap.find_as(Val)); } @@ -229,8 +237,9 @@ class DenseSetImpl { /// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst /// case is O(N^2) (if every hash collides). template -bool operator==(const DenseSetImpl &LHS, - const DenseSetImpl &RHS) { +[[nodiscard]] bool +operator==(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { if (LHS.size() != RHS.size()) return false; @@ -245,8 +254,9 @@ bool operator==(const DenseSetImpl &LHS, /// /// Equivalent to !(LHS == RHS). See operator== for performance notes. template -bool operator!=(const DenseSetImpl &LHS, - const DenseSetImpl &RHS) { +[[nodiscard]] bool +operator!=(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { return !(LHS == RHS); } diff --git a/llvm/include/llvm/ADT/DirectedGraph.h b/llvm/include/llvm/ADT/DirectedGraph.h index 83c0bea6393c4..fb6b180f77e6b 100644 --- a/llvm/include/llvm/ADT/DirectedGraph.h +++ b/llvm/include/llvm/ADT/DirectedGraph.h @@ -181,16 +181,6 @@ template class DirectedGraph { DirectedGraph() = default; explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); } - DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {} - DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {} - DGraphType &operator=(const DGraphType &G) { - Nodes = G.Nodes; - return *this; - } - DGraphType &operator=(const DGraphType &&G) { - Nodes = std::move(G.Nodes); - return *this; - } const_iterator begin() const { return Nodes.begin(); } const_iterator end() const { return Nodes.end(); } diff --git a/llvm/include/llvm/ADT/EquivalenceClasses.h b/llvm/include/llvm/ADT/EquivalenceClasses.h index 1a2331c1a0322..90d8948734729 100644 --- a/llvm/include/llvm/ADT/EquivalenceClasses.h +++ b/llvm/include/llvm/ADT/EquivalenceClasses.h @@ -180,8 +180,8 @@ template class EquivalenceClasses { } /// Returns true if \p V is contained an equivalence class. - bool contains(const ElemTy &V) const { - return TheMapping.find(V) != TheMapping.end(); + [[nodiscard]] bool contains(const ElemTy &V) const { + return TheMapping.contains(V); } /// getLeaderValue - Return the leader for the specified value that is in the @@ -256,9 +256,11 @@ template class EquivalenceClasses { } if (!Next) { // If the current element is the last element(not leader), set the - // successor of the current element's predecessor to null, and set - // the 'Leader' field of the class leader to the predecessor element. - Pre->Next = nullptr; + // successor of the current element's predecessor to null while + // preserving the leader bit, and set the 'Leader' field of the class + // leader to the predecessor element. + Pre->Next = reinterpret_cast( + static_cast(Pre->isLeader())); Leader->Leader = Pre; } else { // If the current element is in the middle of class, then simply diff --git a/llvm/include/llvm/ADT/FunctionExtras.h b/llvm/include/llvm/ADT/FunctionExtras.h index 1311452a17bb3..2498cb7796f1f 100644 --- a/llvm/include/llvm/ADT/FunctionExtras.h +++ b/llvm/include/llvm/ADT/FunctionExtras.h @@ -58,10 +58,6 @@ template class unique_function; namespace detail { -template -using EnableIfTrivial = - std::enable_if_t::value && - std::is_trivially_destructible::value>; template using EnableUnlessSameType = std::enable_if_t, ThisT>::value>; @@ -94,13 +90,12 @@ template class UniqueFunctionBase { template struct AdjustedParamTBase { static_assert(!std::is_reference::value, "references should be handled by template specialization"); - template - using IsSizeLessThanThresholdT = - std::bool_constant; + static constexpr bool IsSizeLessThanThreshold = + sizeof(T) <= 2 * sizeof(void *); using type = std::conditional_t::value && std::is_trivially_move_constructible::value && - IsSizeLessThanThresholdT::value, + IsSizeLessThanThreshold, T, T &>; }; @@ -236,17 +231,17 @@ template class UniqueFunctionBase { // type erased behaviors needed. Create a static instance of the struct type // here and each instance will contain a pointer to it. // Wrap in a struct to avoid https://gcc.gnu.org/PR71954 - template - struct CallbacksHolder { - inline static NonTrivialCallbacks Callbacks = { - &CallImpl, &MoveImpl, &DestroyImpl}; - }; - // See if we can create a trivial callback. We need the callable to be - // trivially moved and trivially destroyed so that we don't have to store - // type erased callbacks for those operations. - template - struct CallbacksHolder> { - inline static TrivialCallback Callbacks = {&CallImpl}; + template struct CallbacksHolder { + inline static auto Callbacks = []() constexpr { + // For trivial callables, we don't need to store move and destroy + // callbacks. + if constexpr (std::is_trivially_move_constructible_v && + std::is_trivially_destructible_v) + return TrivialCallback{&CallImpl}; + else + return NonTrivialCallbacks{&CallImpl, &MoveImpl, + &DestroyImpl}; + }(); }; // A simple tag type so the call-as type to be passed to the constructor. diff --git a/llvm/include/llvm/ADT/GenericSSAContext.h b/llvm/include/llvm/ADT/GenericSSAContext.h index 6aa3a8b9b6e0b..e9f99bafe9f1e 100644 --- a/llvm/include/llvm/ADT/GenericSSAContext.h +++ b/llvm/include/llvm/ADT/GenericSSAContext.h @@ -54,7 +54,7 @@ template class GenericSSAContext { // The null value for ValueRefT. For LLVM IR and MIR, this is simply the // default constructed value. - static constexpr ValueRefT *ValueRefNull = {}; + static constexpr ValueRefT ValueRefNull = {}; // An InstructionT usually defines one or more ValueT objects. using InstructionT = typename SSATraits::InstructionT; diff --git a/llvm/include/llvm/ADT/GenericUniformityImpl.h b/llvm/include/llvm/ADT/GenericUniformityImpl.h index 3b9b7f2633771..7fb0dbe22f12f 100644 --- a/llvm/include/llvm/ADT/GenericUniformityImpl.h +++ b/llvm/include/llvm/ADT/GenericUniformityImpl.h @@ -310,7 +310,7 @@ template class GenericSyncDependenceAnalysis { const DivergenceDescriptor &getJoinBlocks(const BlockT *DivTermBlock); private: - static DivergenceDescriptor EmptyDivergenceDesc; + static inline DivergenceDescriptor EmptyDivergenceDesc; ModifiedPO CyclePO; @@ -408,15 +408,6 @@ template class GenericUniformityAnalysisImpl { const CycleT *); protected: - /// \brief Value/block pair representing a single phi input. - struct PhiInput { - ConstValueRefT value; - BlockT *predBlock; - - PhiInput(ConstValueRefT value, BlockT *predBlock) - : value(value), predBlock(predBlock) {} - }; - const ContextT &Context; const FunctionT &F; const CycleInfoT &CI; @@ -741,10 +732,6 @@ template class DivergencePropagator { } }; -template -typename llvm::GenericSyncDependenceAnalysis::DivergenceDescriptor - llvm::GenericSyncDependenceAnalysis::EmptyDivergenceDesc; - template llvm::GenericSyncDependenceAnalysis::GenericSyncDependenceAnalysis( const ContextT &Context, const DominatorTreeT &DT, const CycleInfoT &CI) diff --git a/llvm/include/llvm/ADT/ImmutableMap.h b/llvm/include/llvm/ADT/ImmutableMap.h index 3d19ca41a5be0..32634a96ee9ea 100644 --- a/llvm/include/llvm/ADT/ImmutableMap.h +++ b/llvm/include/llvm/ADT/ImmutableMap.h @@ -111,25 +111,25 @@ class ImmutableMap { } }; - bool contains(key_type_ref K) const { + [[nodiscard]] bool contains(key_type_ref K) const { return Root ? Root->contains(K) : false; } - bool operator==(const ImmutableMap &RHS) const { + [[nodiscard]] bool operator==(const ImmutableMap &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } - bool operator!=(const ImmutableMap &RHS) const { + [[nodiscard]] bool operator!=(const ImmutableMap &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) : Root != RHS.Root; } - TreeTy *getRoot() const { + [[nodiscard]] TreeTy *getRoot() const { if (Root) { Root->retain(); } return Root.get(); } - TreeTy *getRootWithoutRetain() const { return Root.get(); } + [[nodiscard]] TreeTy *getRootWithoutRetain() const { return Root.get(); } void manualRetain() { if (Root) Root->retain(); @@ -139,7 +139,7 @@ class ImmutableMap { if (Root) Root->release(); } - bool isEmpty() const { return !Root; } + [[nodiscard]] bool isEmpty() const { return !Root; } public: //===--------------------------------------------------===// @@ -163,10 +163,10 @@ class ImmutableMap { data_type_ref getData() const { return (*this)->second; } }; - iterator begin() const { return iterator(Root.get()); } - iterator end() const { return iterator(); } + [[nodiscard]] iterator begin() const { return iterator(Root.get()); } + [[nodiscard]] iterator end() const { return iterator(); } - data_type* lookup(key_type_ref K) const { + [[nodiscard]] data_type *lookup(key_type_ref K) const { if (Root) { TreeTy* T = Root->find(K); if (T) return &T->getValue().second; @@ -178,7 +178,7 @@ class ImmutableMap { /// getMaxElement - Returns the pair in the ImmutableMap for /// which key is the highest in the ordering of keys in the map. This /// method returns NULL if the map is empty. - value_type* getMaxElement() const { + [[nodiscard]] value_type *getMaxElement() const { return Root ? &(Root->getMaxElement()->getValue()) : nullptr; } @@ -186,7 +186,9 @@ class ImmutableMap { // Utility methods. //===--------------------------------------------------===// - unsigned getHeight() const { return Root ? Root->getHeight() : 0; } + [[nodiscard]] unsigned getHeight() const { + return Root ? Root->getHeight() : 0; + } static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) { ID.AddPointer(M.Root.get()); @@ -250,7 +252,7 @@ class ImmutableMapRef { return ImmutableMapRef(NewT, Factory); } - bool contains(key_type_ref K) const { + [[nodiscard]] bool contains(key_type_ref K) const { return Root ? Root->contains(K) : false; } @@ -258,16 +260,16 @@ class ImmutableMapRef { return ImmutableMap(Factory->getCanonicalTree(Root.get())); } - bool operator==(const ImmutableMapRef &RHS) const { + [[nodiscard]] bool operator==(const ImmutableMapRef &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } - bool operator!=(const ImmutableMapRef &RHS) const { + [[nodiscard]] bool operator!=(const ImmutableMapRef &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) : Root != RHS.Root; } - bool isEmpty() const { return !Root; } + [[nodiscard]] bool isEmpty() const { return !Root; } //===--------------------------------------------------===// // For testing. @@ -293,10 +295,10 @@ class ImmutableMapRef { data_type_ref getData() const { return (*this)->second; } }; - iterator begin() const { return iterator(Root.get()); } - iterator end() const { return iterator(); } + [[nodiscard]] iterator begin() const { return iterator(Root.get()); } + [[nodiscard]] iterator end() const { return iterator(); } - data_type *lookup(key_type_ref K) const { + [[nodiscard]] data_type *lookup(key_type_ref K) const { if (Root) { TreeTy* T = Root->find(K); if (T) return &T->getValue().second; @@ -308,7 +310,7 @@ class ImmutableMapRef { /// getMaxElement - Returns the pair in the ImmutableMap for /// which key is the highest in the ordering of keys in the map. This /// method returns NULL if the map is empty. - value_type* getMaxElement() const { + [[nodiscard]] value_type *getMaxElement() const { return Root ? &(Root->getMaxElement()->getValue()) : nullptr; } @@ -316,7 +318,9 @@ class ImmutableMapRef { // Utility methods. //===--------------------------------------------------===// - unsigned getHeight() const { return Root ? Root->getHeight() : 0; } + [[nodiscard]] unsigned getHeight() const { + return Root ? Root->getHeight() : 0; + } static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) { ID.AddPointer(M.Root.get()); diff --git a/llvm/include/llvm/ADT/ImmutableSet.h b/llvm/include/llvm/ADT/ImmutableSet.h index ac86f43b2048e..017585a47ddd6 100644 --- a/llvm/include/llvm/ADT/ImmutableSet.h +++ b/llvm/include/llvm/ADT/ImmutableSet.h @@ -531,7 +531,7 @@ class ImutAVLFactory { /// add_internal - Creates a new tree that includes the specified /// data and the data from the original tree. If the original tree /// already contained the data item, the original tree is returned. - TreeTy* add_internal(value_type_ref V, TreeTy* T) { + TreeTy *add_internal(value_type_ref V, TreeTy *T) { if (isEmpty(T)) return createNode(T, V, T); assert(!T->isMutable()); @@ -539,19 +539,34 @@ class ImutAVLFactory { key_type_ref K = ImutInfo::KeyOfValue(V); key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T)); - if (ImutInfo::isEqual(K,KCurrent)) + if (ImutInfo::isEqual(K, KCurrent)) { + // If both key and value are same, return the original tree. + if (ImutInfo::isDataEqual(ImutInfo::DataOfValue(V), + ImutInfo::DataOfValue(getValue(T)))) + return T; + // Otherwise create a new node with the new value. return createNode(getLeft(T), V, getRight(T)); - else if (ImutInfo::isLess(K,KCurrent)) - return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T)); + } + + TreeTy *NewL = getLeft(T); + TreeTy *NewR = getRight(T); + if (ImutInfo::isLess(K, KCurrent)) + NewL = add_internal(V, NewL); else - return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T))); + NewR = add_internal(V, NewR); + + // If no changes were made, return the original tree. Otherwise, balance the + // tree and return the new root. + return NewL == getLeft(T) && NewR == getRight(T) + ? T + : balanceTree(NewL, getValue(T), NewR); } /// remove_internal - Creates a new tree that includes all the data /// from the original tree except the specified data. If the /// specified data did not exist in the original tree, the original /// tree is returned. - TreeTy* remove_internal(key_type_ref K, TreeTy* T) { + TreeTy *remove_internal(key_type_ref K, TreeTy *T) { if (isEmpty(T)) return T; @@ -559,15 +574,21 @@ class ImutAVLFactory { key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T)); - if (ImutInfo::isEqual(K,KCurrent)) { + if (ImutInfo::isEqual(K, KCurrent)) return combineTrees(getLeft(T), getRight(T)); - } else if (ImutInfo::isLess(K,KCurrent)) { - return balanceTree(remove_internal(K, getLeft(T)), - getValue(T), getRight(T)); - } else { - return balanceTree(getLeft(T), getValue(T), - remove_internal(K, getRight(T))); - } + + TreeTy *NewL = getLeft(T); + TreeTy *NewR = getRight(T); + if (ImutInfo::isLess(K, KCurrent)) + NewL = remove_internal(K, NewL); + else + NewR = remove_internal(K, NewR); + + // If no changes were made, return the original tree. Otherwise, balance the + // tree and return the new root. + return NewL == getLeft(T) && NewR == getRight(T) + ? T + : balanceTree(NewL, getValue(T), NewR); } TreeTy* combineTrees(TreeTy* L, TreeTy* R) { diff --git a/llvm/include/llvm/ADT/IntervalTree.h b/llvm/include/llvm/ADT/IntervalTree.h index 918c86227576e..d14de06f26dc3 100644 --- a/llvm/include/llvm/ADT/IntervalTree.h +++ b/llvm/include/llvm/ADT/IntervalTree.h @@ -236,8 +236,7 @@ template class IntervalData { //===----------------------------------------------------------------------===// // Helper class template that is used by the IntervalTree to ensure that one // does instantiate using only fundamental and/or pointer types. -template -using PointTypeIsValid = std::bool_constant::value>; +template using PointTypeIsValid = std::is_fundamental; template using ValueTypeIsValid = std::bool_constant::value || diff --git a/llvm/include/llvm/ADT/MapVector.h b/llvm/include/llvm/ADT/MapVector.h index 4a50126ff5aad..82f2c4977e01d 100644 --- a/llvm/include/llvm/ADT/MapVector.h +++ b/llvm/include/llvm/ADT/MapVector.h @@ -45,15 +45,15 @@ class MapVector { using const_reverse_iterator = typename VectorType::const_reverse_iterator; /// Clear the MapVector and return the underlying vector. - VectorType takeVector() { + [[nodiscard]] VectorType takeVector() { Map.clear(); return std::move(Vector); } /// Returns an array reference of the underlying vector. - ArrayRef getArrayRef() const { return Vector; } + [[nodiscard]] ArrayRef getArrayRef() const { return Vector; } - size_type size() const { return Vector.size(); } + [[nodiscard]] size_type size() const { return Vector.size(); } /// Grow the MapVector so that it can contain at least \p NumEntries items /// before resizing again. @@ -62,24 +62,28 @@ class MapVector { Vector.reserve(NumEntries); } - iterator begin() { return Vector.begin(); } - const_iterator begin() const { return Vector.begin(); } - iterator end() { return Vector.end(); } - const_iterator end() const { return Vector.end(); } + [[nodiscard]] iterator begin() { return Vector.begin(); } + [[nodiscard]] const_iterator begin() const { return Vector.begin(); } + [[nodiscard]] iterator end() { return Vector.end(); } + [[nodiscard]] const_iterator end() const { return Vector.end(); } - reverse_iterator rbegin() { return Vector.rbegin(); } - const_reverse_iterator rbegin() const { return Vector.rbegin(); } - reverse_iterator rend() { return Vector.rend(); } - const_reverse_iterator rend() const { return Vector.rend(); } - - bool empty() const { - return Vector.empty(); + [[nodiscard]] reverse_iterator rbegin() { return Vector.rbegin(); } + [[nodiscard]] const_reverse_iterator rbegin() const { + return Vector.rbegin(); } + [[nodiscard]] reverse_iterator rend() { return Vector.rend(); } + [[nodiscard]] const_reverse_iterator rend() const { return Vector.rend(); } + + [[nodiscard]] bool empty() const { return Vector.empty(); } - std::pair &front() { return Vector.front(); } - const std::pair &front() const { return Vector.front(); } - std::pair &back() { return Vector.back(); } - const std::pair &back() const { return Vector.back(); } + [[nodiscard]] std::pair &front() { return Vector.front(); } + [[nodiscard]] const std::pair &front() const { + return Vector.front(); + } + [[nodiscard]] std::pair &back() { return Vector.back(); } + [[nodiscard]] const std::pair &back() const { + return Vector.back(); + } void clear() { Map.clear(); @@ -96,7 +100,7 @@ class MapVector { } // Returns a copy of the value. Only allowed if ValueT is copyable. - ValueT lookup(const KeyT &Key) const { + [[nodiscard]] ValueT lookup(const KeyT &Key) const { static_assert(std::is_copy_constructible_v, "Cannot call lookup() if ValueT is not copyable."); typename MapType::const_iterator Pos = Map.find(Key); @@ -134,17 +138,21 @@ class MapVector { return Ret; } - bool contains(const KeyT &Key) const { return Map.find(Key) != Map.end(); } + [[nodiscard]] bool contains(const KeyT &Key) const { + return Map.find(Key) != Map.end(); + } - size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; } + [[nodiscard]] size_type count(const KeyT &Key) const { + return contains(Key) ? 1 : 0; + } - iterator find(const KeyT &Key) { + [[nodiscard]] iterator find(const KeyT &Key) { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? Vector.end() : (Vector.begin() + Pos->second); } - const_iterator find(const KeyT &Key) const { + [[nodiscard]] const_iterator find(const KeyT &Key) const { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? Vector.end() : (Vector.begin() + Pos->second); diff --git a/llvm/include/llvm/ADT/PackedVector.h b/llvm/include/llvm/ADT/PackedVector.h index 1146cc4bd6d23..09c20e39d1552 100644 --- a/llvm/include/llvm/ADT/PackedVector.h +++ b/llvm/include/llvm/ADT/PackedVector.h @@ -20,51 +20,6 @@ namespace llvm { -template -class PackedVectorBase; - -// This won't be necessary if we can specialize members without specializing -// the parent template. -template -class PackedVectorBase { -protected: - static T getValue(const BitVectorTy &Bits, unsigned Idx) { - T val = T(); - for (unsigned i = 0; i != BitNum; ++i) - val = T(val | ((Bits[(Idx * BitNum) + i] ? 1UL : 0UL) << i)); - return val; - } - - static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { - assert((val >> BitNum) == 0 && "value is too big"); - for (unsigned i = 0; i != BitNum; ++i) - Bits[(Idx * BitNum) + i] = val & (T(1) << i); - } -}; - -template -class PackedVectorBase { -protected: - static T getValue(const BitVectorTy &Bits, unsigned Idx) { - T val = T(); - for (unsigned i = 0; i != BitNum-1; ++i) - val = T(val | ((Bits[(Idx * BitNum) + i] ? 1UL : 0UL) << i)); - if (Bits[(Idx * BitNum) + BitNum - 1]) - val = ~val; - return val; - } - - static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { - if (val < 0) { - val = ~val; - Bits.set((Idx * BitNum) + BitNum - 1); - } - assert((val >> (BitNum-1)) == 0 && "value is too big"); - for (unsigned i = 0; i != BitNum-1; ++i) - Bits[(Idx * BitNum) + i] = val & (T(1) << i); - } -}; - /// Store a vector of values using a specific number of bits for each /// value. Both signed and unsigned types can be used, e.g /// @code @@ -73,15 +28,46 @@ class PackedVectorBase { /// will create a vector accepting values -2, -1, 0, 1. Any other value will hit /// an assertion. template -class PackedVector : public PackedVectorBase::is_signed> { +class PackedVector { BitVectorTy Bits; // Keep track of the number of elements on our own. // We always maintain Bits.size() == NumElements * BitNum. // Used to avoid an integer division in size(). unsigned NumElements = 0; - using base = PackedVectorBase::is_signed>; + + static T getValue(const BitVectorTy &Bits, unsigned Idx) { + if constexpr (std::numeric_limits::is_signed) { + T val = T(); + for (unsigned i = 0; i != BitNum - 1; ++i) + val = T(val | ((Bits[(Idx * BitNum) + i] ? 1UL : 0UL) << i)); + if (Bits[(Idx * BitNum) + BitNum - 1]) + val = ~val; + return val; + } else { + T val = T(); + for (unsigned i = 0; i != BitNum; ++i) + val = T(val | ((Bits[(Idx * BitNum) + i] ? 1UL : 0UL) << i)); + return val; + } + } + + static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { + if constexpr (std::numeric_limits::is_signed) { + if (val < 0) { + val = ~val; + Bits.set((Idx * BitNum) + BitNum - 1); + } else { + Bits.reset((Idx * BitNum) + BitNum - 1); + } + assert((val >> (BitNum - 1)) == 0 && "value is too big"); + for (unsigned i = 0; i != BitNum - 1; ++i) + Bits[(Idx * BitNum) + i] = val & (T(1) << i); + } else { + assert((val >> BitNum) == 0 && "value is too big"); + for (unsigned i = 0; i != BitNum; ++i) + Bits[(Idx * BitNum) + i] = val & (T(1) << i); + } + } public: class reference { @@ -97,9 +83,7 @@ class PackedVector : public PackedVectorBase &base, ptrdiff_t index) { // We encode the internal base as a pair of the derived base and a start // index into the derived base. - return std::make_pair(base.first, base.second + index); + return {base.first, base.second + index}; } /// See `detail::indexed_accessor_range_base` for details. static ReferenceT diff --git a/llvm/include/llvm/ADT/SetVector.h b/llvm/include/llvm/ADT/SetVector.h index 5f6db9a78a003..c129f3a695b9e 100644 --- a/llvm/include/llvm/ADT/SetVector.h +++ b/llvm/include/llvm/ADT/SetVector.h @@ -87,72 +87,54 @@ class SetVector { SetVector(llvm::from_range_t, Range &&R) : SetVector(adl_begin(R), adl_end(R)) {} - ArrayRef getArrayRef() const { return vector_; } + [[nodiscard]] ArrayRef getArrayRef() const { return vector_; } /// Clear the SetVector and return the underlying vector. - Vector takeVector() { + [[nodiscard]] Vector takeVector() { set_.clear(); return std::move(vector_); } /// Determine if the SetVector is empty or not. - bool empty() const { - return vector_.empty(); - } + [[nodiscard]] bool empty() const { return vector_.empty(); } /// Determine the number of elements in the SetVector. - size_type size() const { - return vector_.size(); - } + [[nodiscard]] size_type size() const { return vector_.size(); } /// Get an iterator to the beginning of the SetVector. - iterator begin() { - return vector_.begin(); - } + [[nodiscard]] iterator begin() { return vector_.begin(); } /// Get a const_iterator to the beginning of the SetVector. - const_iterator begin() const { - return vector_.begin(); - } + [[nodiscard]] const_iterator begin() const { return vector_.begin(); } /// Get an iterator to the end of the SetVector. - iterator end() { - return vector_.end(); - } + [[nodiscard]] iterator end() { return vector_.end(); } /// Get a const_iterator to the end of the SetVector. - const_iterator end() const { - return vector_.end(); - } + [[nodiscard]] const_iterator end() const { return vector_.end(); } /// Get an reverse_iterator to the end of the SetVector. - reverse_iterator rbegin() { - return vector_.rbegin(); - } + [[nodiscard]] reverse_iterator rbegin() { return vector_.rbegin(); } /// Get a const_reverse_iterator to the end of the SetVector. - const_reverse_iterator rbegin() const { + [[nodiscard]] const_reverse_iterator rbegin() const { return vector_.rbegin(); } /// Get a reverse_iterator to the beginning of the SetVector. - reverse_iterator rend() { - return vector_.rend(); - } + [[nodiscard]] reverse_iterator rend() { return vector_.rend(); } /// Get a const_reverse_iterator to the beginning of the SetVector. - const_reverse_iterator rend() const { - return vector_.rend(); - } + [[nodiscard]] const_reverse_iterator rend() const { return vector_.rend(); } /// Return the first element of the SetVector. - const value_type &front() const { + [[nodiscard]] const value_type &front() const { assert(!empty() && "Cannot call front() on empty SetVector!"); return vector_.front(); } /// Return the last element of the SetVector. - const value_type &back() const { + [[nodiscard]] const value_type &back() const { assert(!empty() && "Cannot call back() on empty SetVector!"); return vector_.back(); } @@ -299,11 +281,11 @@ class SetVector { return Ret; } - bool operator==(const SetVector &that) const { + [[nodiscard]] bool operator==(const SetVector &that) const { return vector_ == that.vector_; } - bool operator!=(const SetVector &that) const { + [[nodiscard]] bool operator!=(const SetVector &that) const { return vector_ != that.vector_; } diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h index 16ad3973e054d..f588a77a53b2a 100644 --- a/llvm/include/llvm/ADT/SmallPtrSet.h +++ b/llvm/include/llvm/ADT/SmallPtrSet.h @@ -96,8 +96,8 @@ class SmallPtrSetImplBase : public DebugEpochBase { SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete; [[nodiscard]] bool empty() const { return size() == 0; } - size_type size() const { return NumEntries; } - size_type capacity() const { return CurArraySize; } + [[nodiscard]] size_type size() const { return NumEntries; } + [[nodiscard]] size_type capacity() const { return CurArraySize; } void clear() { incrementEpoch(); @@ -136,12 +136,12 @@ class SmallPtrSetImplBase : public DebugEpochBase { } protected: - static void *getTombstoneMarker() { return reinterpret_cast(-2); } + static void *getTombstoneMarker() { return reinterpret_cast(-2); } static void *getEmptyMarker() { // Note that -1 is chosen to make clear() efficiently implementable with // memset and because it's not a valid pointer value. - return reinterpret_cast(-1); + return reinterpret_cast(-1); } const void **EndPointer() const { @@ -190,7 +190,7 @@ class SmallPtrSetImplBase : public DebugEpochBase { /// return true, otherwise return false. This is hidden from the client so /// that the derived class can check that the right type of pointer is passed /// in. - bool erase_imp(const void * Ptr) { + bool erase_imp(const void *Ptr) { if (isSmall()) { for (const void *&Bucket : small_buckets()) { if (Bucket == Ptr) { @@ -218,7 +218,7 @@ class SmallPtrSetImplBase : public DebugEpochBase { /// Returns the raw pointer needed to construct an iterator. If element not /// found, this will be EndPointer. Otherwise, it will be a pointer to the /// slot which stores Ptr; - const void *const * find_imp(const void * Ptr) const { + const void *const *find_imp(const void *Ptr) const { if (isSmall()) { // Linear search for the item. for (const void *const &Bucket : small_buckets()) @@ -251,7 +251,7 @@ class SmallPtrSetImplBase : public DebugEpochBase { LLVM_ABI std::pair insert_imp_big(const void *Ptr); LLVM_ABI const void *const *doFind(const void *Ptr) const; - const void * const *FindBucketFor(const void *Ptr) const; + const void *const *FindBucketFor(const void *Ptr) const; LLVM_ABI void shrink_and_clear(); /// Grow - Allocate a larger backing store for the buckets and move it over. @@ -279,18 +279,12 @@ class SmallPtrSetImplBase : public DebugEpochBase { /// SmallPtrSetIteratorImpl - This is the common base class shared between all /// instances of SmallPtrSetIterator. -class SmallPtrSetIteratorImpl { -protected: - const void *const *Bucket; - const void *const *End; - +class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIteratorImpl + : public DebugEpochBase::HandleBase { public: - explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E) - : Bucket(BP), End(E) { - if (shouldReverseIterate()) { - RetreatIfNotValid(); - return; - } + explicit SmallPtrSetIteratorImpl(const void *const *BP, const void *const *E, + const DebugEpochBase &Epoch) + : DebugEpochBase::HandleBase(&Epoch), Bucket(BP), End(E) { AdvanceIfNotValid(); } @@ -302,6 +296,18 @@ class SmallPtrSetIteratorImpl { } protected: + void *dereference() const { + assert(isHandleInSync() && "invalid iterator access!"); + assert(Bucket < End); + return const_cast(*Bucket); + } + void increment() { + assert(isHandleInSync() && "invalid iterator access!"); + ++Bucket; + AdvanceIfNotValid(); + } + +private: /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket /// that is. This is guaranteed to stop because the end() bucket is marked /// valid. @@ -312,21 +318,19 @@ class SmallPtrSetIteratorImpl { *Bucket == SmallPtrSetImplBase::getTombstoneMarker())) ++Bucket; } - void RetreatIfNotValid() { - assert(Bucket >= End); - while (Bucket != End && - (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() || - Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) { - --Bucket; - } - } + + using BucketItTy = + std::conditional_t, + const void *const *>; + + BucketItTy Bucket; + BucketItTy End; }; /// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet. template -class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIterator - : public SmallPtrSetIteratorImpl, - DebugEpochBase::HandleBase { +class SmallPtrSetIterator : public SmallPtrSetIteratorImpl { using PtrTraits = PointerLikeTypeTraits; public: @@ -336,37 +340,22 @@ class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIterator using difference_type = std::ptrdiff_t; using iterator_category = std::forward_iterator_tag; - explicit SmallPtrSetIterator(const void *const *BP, const void *const *E, - const DebugEpochBase &Epoch) - : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {} + using SmallPtrSetIteratorImpl::SmallPtrSetIteratorImpl; // Most methods are provided by the base class. - const PtrTy operator*() const { - assert(isHandleInSync() && "invalid iterator access!"); - if (shouldReverseIterate()) { - assert(Bucket > End); - return PtrTraits::getFromVoidPointer(const_cast(Bucket[-1])); - } - assert(Bucket < End); - return PtrTraits::getFromVoidPointer(const_cast(*Bucket)); + [[nodiscard]] const PtrTy operator*() const { + return PtrTraits::getFromVoidPointer(dereference()); } - inline SmallPtrSetIterator& operator++() { // Preincrement - assert(isHandleInSync() && "invalid iterator access!"); - if (shouldReverseIterate()) { - --Bucket; - RetreatIfNotValid(); - return *this; - } - ++Bucket; - AdvanceIfNotValid(); + inline SmallPtrSetIterator &operator++() { // Preincrement + increment(); return *this; } - SmallPtrSetIterator operator++(int) { // Postincrement + SmallPtrSetIterator operator++(int) { // Postincrement SmallPtrSetIterator tmp = *this; - ++*this; + increment(); return tmp; } }; @@ -376,8 +365,7 @@ class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIterator /// /// This is particularly useful for passing around between interface boundaries /// to avoid encoding a particular small size in the interface boundary. -template -class SmallPtrSetImpl : public SmallPtrSetImplBase { +template class SmallPtrSetImpl : public SmallPtrSetImplBase { using ConstPtrType = typename add_const_past_pointer::type; using PtrTraits = PointerLikeTypeTraits; using ConstPtrTraits = PointerLikeTypeTraits; @@ -406,9 +394,7 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { /// Insert the given pointer with an iterator hint that is ignored. This is /// identical to calling insert(Ptr), but allows SmallPtrSet to be used by /// std::insert_iterator and std::inserter(). - iterator insert(iterator, PtrType Ptr) { - return insert(Ptr).first; - } + iterator insert(iterator, PtrType Ptr) { return insert(Ptr).first; } /// Remove pointer from the set. /// @@ -431,8 +417,7 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { /// Returns whether anything was removed. It is safe to read the set inside /// the predicate function. However, the predicate must not modify the set /// itself, only indicate a removal by returning true. - template - bool remove_if(UnaryPredicate P) { + template bool remove_if(UnaryPredicate P) { bool Removed = false; if (isSmall()) { auto Buckets = small_buckets(); @@ -467,18 +452,17 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { } /// count - Return 1 if the specified pointer is in the set, 0 otherwise. - size_type count(ConstPtrType Ptr) const { + [[nodiscard]] size_type count(ConstPtrType Ptr) const { return contains_imp(ConstPtrTraits::getAsVoidPointer(Ptr)); } - iterator find(ConstPtrType Ptr) const { + [[nodiscard]] iterator find(ConstPtrType Ptr) const { return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr))); } - bool contains(ConstPtrType Ptr) const { + [[nodiscard]] bool contains(ConstPtrType Ptr) const { return contains_imp(ConstPtrTraits::getAsVoidPointer(Ptr)); } - template - void insert(IterT I, IterT E) { + template void insert(IterT I, IterT E) { for (; I != E; ++I) insert(*I); } @@ -491,19 +475,21 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { insert(adl_begin(R), adl_end(R)); } - iterator begin() const { - if (shouldReverseIterate()) + [[nodiscard]] iterator begin() const { + if constexpr (shouldReverseIterate()) return makeIterator(EndPointer() - 1); - return makeIterator(CurArray); + else + return makeIterator(CurArray); } - iterator end() const { return makeIterator(EndPointer()); } + [[nodiscard]] iterator end() const { return makeIterator(EndPointer()); } private: /// Create an iterator that dereferences to same place as the given pointer. iterator makeIterator(const void *const *P) const { - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this); - return iterator(P, EndPointer(), *this); + else + return iterator(P, EndPointer(), *this); } }; @@ -512,8 +498,8 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { /// Iterates over elements of LHS confirming that each value from LHS is also in /// RHS, and that no additional values are in RHS. template -bool operator==(const SmallPtrSetImpl &LHS, - const SmallPtrSetImpl &RHS) { +[[nodiscard]] bool operator==(const SmallPtrSetImpl &LHS, + const SmallPtrSetImpl &RHS) { if (LHS.size() != RHS.size()) return false; @@ -528,8 +514,8 @@ bool operator==(const SmallPtrSetImpl &LHS, /// /// Equivalent to !(LHS == RHS). template -bool operator!=(const SmallPtrSetImpl &LHS, - const SmallPtrSetImpl &RHS) { +[[nodiscard]] bool operator!=(const SmallPtrSetImpl &LHS, + const SmallPtrSetImpl &RHS) { return !(LHS == RHS); } @@ -537,7 +523,7 @@ bool operator!=(const SmallPtrSetImpl &LHS, /// SmallSize or less elements. This internally rounds up SmallSize to the next /// power of two if it is not already a power of two. See the comments above /// SmallPtrSetImplBase for details of the algorithm. -template +template class SmallPtrSet : public SmallPtrSetImpl { // In small mode SmallPtrSet uses linear search for the elements, so it is // not a good idea to choose this value too high. You may consider using a @@ -568,7 +554,7 @@ class SmallPtrSet : public SmallPtrSetImpl { : BaseT(SmallStorage, SmallSizePowTwo, that.SmallStorage, std::move(that)) {} - template + template SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) { this->insert(I, E); } @@ -610,16 +596,16 @@ class SmallPtrSet : public SmallPtrSetImpl { } }; -} // end namespace llvm +} // namespace llvm namespace std { - /// Implement std::swap in terms of SmallPtrSet swap. - template - inline void swap(llvm::SmallPtrSet &LHS, llvm::SmallPtrSet &RHS) { - LHS.swap(RHS); - } +/// Implement std::swap in terms of SmallPtrSet swap. +template +inline void swap(llvm::SmallPtrSet &LHS, llvm::SmallPtrSet &RHS) { + LHS.swap(RHS); +} -} // end namespace std +} // namespace std #endif // LLVM_ADT_SMALLPTRSET_H diff --git a/llvm/include/llvm/ADT/SmallSet.h b/llvm/include/llvm/ADT/SmallSet.h index 0e90293352630..3ca833f15eed3 100644 --- a/llvm/include/llvm/ADT/SmallSet.h +++ b/llvm/include/llvm/ADT/SmallSet.h @@ -167,12 +167,14 @@ class SmallSet { [[nodiscard]] bool empty() const { return Vector.empty() && Set.empty(); } - size_type size() const { + [[nodiscard]] size_type size() const { return isSmall() ? Vector.size() : Set.size(); } /// count - Return 1 if the element is in the set, 0 otherwise. - size_type count(const T &V) const { return contains(V) ? 1 : 0; } + [[nodiscard]] size_type count(const T &V) const { + return contains(V) ? 1 : 0; + } /// insert - Insert an element into the set if it isn't already there. /// Returns a pair. The first value of it is an iterator to the inserted @@ -210,20 +212,20 @@ class SmallSet { Set.clear(); } - const_iterator begin() const { + [[nodiscard]] const_iterator begin() const { if (isSmall()) return {Vector.begin()}; return {Set.begin()}; } - const_iterator end() const { + [[nodiscard]] const_iterator end() const { if (isSmall()) return {Vector.end()}; return {Set.end()}; } /// Check if the SmallSet contains the given element. - bool contains(const T &V) const { + [[nodiscard]] bool contains(const T &V) const { if (isSmall()) return vfind(V) != Vector.end(); return Set.find(V) != Set.end(); @@ -279,7 +281,8 @@ class SmallSet : public SmallPtrSet {}; /// For large-set mode amortized complexity is linear, worst case is O(N^2) (if /// every hash collides). template -bool operator==(const SmallSet &LHS, const SmallSet &RHS) { +[[nodiscard]] bool operator==(const SmallSet &LHS, + const SmallSet &RHS) { if (LHS.size() != RHS.size()) return false; @@ -291,7 +294,8 @@ bool operator==(const SmallSet &LHS, const SmallSet &RHS) { /// /// Equivalent to !(LHS == RHS). See operator== for performance notes. template -bool operator!=(const SmallSet &LHS, const SmallSet &RHS) { +[[nodiscard]] bool operator!=(const SmallSet &LHS, + const SmallSet &RHS) { return !(LHS == RHS); } diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h index 36b324355ee10..efae6f339f9de 100644 --- a/llvm/include/llvm/ADT/SmallVector.h +++ b/llvm/include/llvm/ADT/SmallVector.h @@ -199,17 +199,18 @@ class SmallVectorTemplateCommon } /// Check whether any part of the range will be invalidated by clearing. - void assertSafeToReferenceAfterClear(const T *From, const T *To) { - if (From == To) - return; - this->assertSafeToReferenceAfterResize(From, 0); - this->assertSafeToReferenceAfterResize(To - 1, 0); + template + void assertSafeToReferenceAfterClear(ItTy From, ItTy To) { + if constexpr (std::is_pointer_v && + std::is_same_v< + std::remove_const_t>, + std::remove_const_t>) { + if (From == To) + return; + this->assertSafeToReferenceAfterResize(From, 0); + this->assertSafeToReferenceAfterResize(To - 1, 0); + } } - template < - class ItTy, - std::enable_if_t, T *>::value, - bool> = false> - void assertSafeToReferenceAfterClear(ItTy, ItTy) {} /// Check whether any part of the range will be invalidated by growing. template void assertSafeToAddRange(ItTy From, ItTy To) { @@ -501,25 +502,22 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon { /// Copy the range [I, E) onto the uninitialized memory /// starting with "Dest", constructing elements into it as needed. - template + template static void uninitialized_copy(It1 I, It1 E, It2 Dest) { - // Arbitrary iterator types; just use the basic implementation. - std::uninitialized_copy(I, E, Dest); - } - - /// Copy the range [I, E) onto the uninitialized memory - /// starting with "Dest", constructing elements into it as needed. - template - static void uninitialized_copy( - T1 *I, T1 *E, T2 *Dest, - std::enable_if_t, T2>::value> * = - nullptr) { - // Use memcpy for PODs iterated by pointers (which includes SmallVector - // iterators): std::uninitialized_copy optimizes to memmove, but we can - // use memcpy here. Note that I and E are iterators and thus might be - // invalid for memcpy if they are equal. - if (I != E) - std::memcpy(reinterpret_cast(Dest), I, (E - I) * sizeof(T)); + if constexpr (std::is_pointer_v && std::is_pointer_v && + std::is_same_v< + std::remove_const_t>, + std::remove_pointer_t>) { + // Use memcpy for PODs iterated by pointers (which includes SmallVector + // iterators): std::uninitialized_copy optimizes to memmove, but we can + // use memcpy here. Note that I and E are iterators and thus might be + // invalid for memcpy if they are equal. + if (I != E) + std::memcpy(reinterpret_cast(Dest), I, (E - I) * sizeof(T)); + } else { + // Arbitrary iterator types; just use the basic implementation. + std::uninitialized_copy(I, E, Dest); + } } /// Double the size of the allocated memory, guaranteeing space for at diff --git a/llvm/include/llvm/ADT/SparseMultiSet.h b/llvm/include/llvm/ADT/SparseMultiSet.h index cf7603158b28b..0aa7edbcea673 100644 --- a/llvm/include/llvm/ADT/SparseMultiSet.h +++ b/llvm/include/llvm/ADT/SparseMultiSet.h @@ -400,7 +400,7 @@ class SparseMultiSet { RangePair equal_range(const KeyT &K) { iterator B = find(K); iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx); - return std::make_pair(B, E); + return {B, E}; } /// Insert a new element at the tail of the subset list. Returns an iterator diff --git a/llvm/include/llvm/ADT/SparseSet.h b/llvm/include/llvm/ADT/SparseSet.h index 395cfc3ebfd43..9783301be4b64 100644 --- a/llvm/include/llvm/ADT/SparseSet.h +++ b/llvm/include/llvm/ADT/SparseSet.h @@ -171,23 +171,23 @@ class SparseSet { using iterator = typename DenseT::iterator; using const_iterator = typename DenseT::const_iterator; - const_iterator begin() const { return Dense.begin(); } - const_iterator end() const { return Dense.end(); } - iterator begin() { return Dense.begin(); } - iterator end() { return Dense.end(); } + [[nodiscard]] const_iterator begin() const { return Dense.begin(); } + [[nodiscard]] const_iterator end() const { return Dense.end(); } + [[nodiscard]] iterator begin() { return Dense.begin(); } + [[nodiscard]] iterator end() { return Dense.end(); } /// empty - Returns true if the set is empty. /// /// This is not the same as BitVector::empty(). /// - bool empty() const { return Dense.empty(); } + [[nodiscard]] bool empty() const { return Dense.empty(); } /// size - Returns the number of elements in the set. /// /// This is not the same as BitVector::size() which returns the size of the /// universe. /// - size_type size() const { return Dense.size(); } + [[nodiscard]] size_type size() const { return Dense.size(); } /// clear - Clears the set. This is a very fast constant time operation. /// @@ -222,21 +222,27 @@ class SparseSet { /// @param Key A valid key to find. /// @returns An iterator to the element identified by key, or end(). /// - iterator find(const KeyT &Key) { return findIndex(KeyIndexOf(Key)); } + [[nodiscard]] iterator find(const KeyT &Key) { + return findIndex(KeyIndexOf(Key)); + } - const_iterator find(const KeyT &Key) const { + [[nodiscard]] const_iterator find(const KeyT &Key) const { return const_cast(this)->findIndex(KeyIndexOf(Key)); } /// Check if the set contains the given \c Key. /// /// @param Key A valid key to find. - bool contains(const KeyT &Key) const { return find(Key) != end(); } + [[nodiscard]] bool contains(const KeyT &Key) const { + return find(Key) != end(); + } /// count - Returns 1 if this set contains an element identified by Key, /// 0 otherwise. /// - size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; } + [[nodiscard]] size_type count(const KeyT &Key) const { + return contains(Key) ? 1 : 0; + } /// insert - Attempts to insert a new element. /// diff --git a/llvm/include/llvm/ADT/StringMap.h b/llvm/include/llvm/ADT/StringMap.h index 2c146fbf08df1..01cbf2d3fff71 100644 --- a/llvm/include/llvm/ADT/StringMap.h +++ b/llvm/include/llvm/ADT/StringMap.h @@ -102,18 +102,18 @@ class StringMapImpl { return reinterpret_cast(TombstoneIntVal); } - unsigned getNumBuckets() const { return NumBuckets; } - unsigned getNumItems() const { return NumItems; } + [[nodiscard]] unsigned getNumBuckets() const { return NumBuckets; } + [[nodiscard]] unsigned getNumItems() const { return NumItems; } - bool empty() const { return NumItems == 0; } - unsigned size() const { return NumItems; } + [[nodiscard]] bool empty() const { return NumItems == 0; } + [[nodiscard]] unsigned size() const { return NumItems; } /// Returns the hash value that will be used for the given string. /// This allows precomputing the value and passing it explicitly /// to some of the functions. /// The implementation of this function is not guaranteed to be stable /// and may change. - LLVM_ABI static uint32_t hash(StringRef Key); + [[nodiscard]] LLVM_ABI static uint32_t hash(StringRef Key); void swap(StringMapImpl &Other) { std::swap(TheTable, Other.TheTable); @@ -220,30 +220,35 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap using const_iterator = StringMapIterBase; using iterator = StringMapIterBase; - iterator begin() { return iterator(TheTable, NumBuckets != 0); } - iterator end() { return iterator(TheTable + NumBuckets); } - const_iterator begin() const { + [[nodiscard]] iterator begin() { return iterator(TheTable, NumBuckets != 0); } + [[nodiscard]] iterator end() { return iterator(TheTable + NumBuckets); } + [[nodiscard]] const_iterator begin() const { return const_iterator(TheTable, NumBuckets != 0); } - const_iterator end() const { return const_iterator(TheTable + NumBuckets); } + [[nodiscard]] const_iterator end() const { + return const_iterator(TheTable + NumBuckets); + } - iterator_range> keys() const { + [[nodiscard]] iterator_range> keys() const { return make_range(StringMapKeyIterator(begin()), StringMapKeyIterator(end())); } - iterator find(StringRef Key) { return find(Key, hash(Key)); } + [[nodiscard]] iterator find(StringRef Key) { return find(Key, hash(Key)); } - iterator find(StringRef Key, uint32_t FullHashValue) { + [[nodiscard]] iterator find(StringRef Key, uint32_t FullHashValue) { int Bucket = FindKey(Key, FullHashValue); if (Bucket == -1) return end(); return iterator(TheTable + Bucket); } - const_iterator find(StringRef Key) const { return find(Key, hash(Key)); } + [[nodiscard]] const_iterator find(StringRef Key) const { + return find(Key, hash(Key)); + } - const_iterator find(StringRef Key, uint32_t FullHashValue) const { + [[nodiscard]] const_iterator find(StringRef Key, + uint32_t FullHashValue) const { int Bucket = FindKey(Key, FullHashValue); if (Bucket == -1) return end(); @@ -252,7 +257,7 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. - ValueTy lookup(StringRef Key) const { + [[nodiscard]] ValueTy lookup(StringRef Key) const { const_iterator Iter = find(Key); if (Iter != end()) return Iter->second; @@ -261,7 +266,7 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap /// at - Return the entry for the specified key, or abort if no such /// entry exists. - const ValueTy &at(StringRef Val) const { + [[nodiscard]] const ValueTy &at(StringRef Val) const { auto Iter = this->find(Val); assert(Iter != this->end() && "StringMap::at failed due to a missing key"); return Iter->second; @@ -272,18 +277,22 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; } /// contains - Return true if the element is in the map, false otherwise. - bool contains(StringRef Key) const { return find(Key) != end(); } + [[nodiscard]] bool contains(StringRef Key) const { + return find(Key) != end(); + } /// count - Return 1 if the element is in the map, 0 otherwise. - size_type count(StringRef Key) const { return contains(Key) ? 1 : 0; } + [[nodiscard]] size_type count(StringRef Key) const { + return contains(Key) ? 1 : 0; + } template - size_type count(const StringMapEntry &MapEntry) const { + [[nodiscard]] size_type count(const StringMapEntry &MapEntry) const { return count(MapEntry.getKey()); } /// equal - check whether both of the containers are equal. - bool operator==(const StringMap &RHS) const { + [[nodiscard]] bool operator==(const StringMap &RHS) const { if (size() != RHS.size()) return false; @@ -302,7 +311,9 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap return true; } - bool operator!=(const StringMap &RHS) const { return !(*this == RHS); } + [[nodiscard]] bool operator!=(const StringMap &RHS) const { + return !(*this == RHS); + } /// insert - Insert the specified key/value pair into the map. If the key /// already exists in the map, return false and ignore the request, otherwise @@ -447,8 +458,12 @@ template class StringMapIterBase { AdvancePastEmptyBuckets(); } - reference operator*() const { return *static_cast(*Ptr); } - pointer operator->() const { return static_cast(*Ptr); } + [[nodiscard]] reference operator*() const { + return *static_cast(*Ptr); + } + [[nodiscard]] pointer operator->() const { + return static_cast(*Ptr); + } StringMapIterBase &operator++() { // Preincrement ++Ptr; diff --git a/llvm/include/llvm/ADT/StringRef.h b/llvm/include/llvm/ADT/StringRef.h index 49a52fbe1a6f7..7aee2aa67ddec 100644 --- a/llvm/include/llvm/ADT/StringRef.h +++ b/llvm/include/llvm/ADT/StringRef.h @@ -717,8 +717,8 @@ namespace llvm { split(StringRef Separator) const { size_t Idx = find(Separator); if (Idx == npos) - return std::make_pair(*this, StringRef()); - return std::make_pair(slice(0, Idx), substr(Idx + Separator.size())); + return {*this, StringRef()}; + return {slice(0, Idx), substr(Idx + Separator.size())}; } /// Split into two substrings around the last occurrence of a separator @@ -735,8 +735,8 @@ namespace llvm { rsplit(StringRef Separator) const { size_t Idx = rfind(Separator); if (Idx == npos) - return std::make_pair(*this, StringRef()); - return std::make_pair(slice(0, Idx), substr(Idx + Separator.size())); + return {*this, StringRef()}; + return {slice(0, Idx), substr(Idx + Separator.size())}; } /// Split into substrings around the occurrences of a separator string. diff --git a/llvm/include/llvm/ADT/StringSet.h b/llvm/include/llvm/ADT/StringSet.h index b4853423a1ef3..c8be3f2a503e4 100644 --- a/llvm/include/llvm/ADT/StringSet.h +++ b/llvm/include/llvm/ADT/StringSet.h @@ -57,7 +57,9 @@ class StringSet : public StringMap { } /// Check if the set contains the given \c key. - bool contains(StringRef key) const { return Base::contains(key); } + [[nodiscard]] bool contains(StringRef key) const { + return Base::contains(key); + } }; } // end namespace llvm diff --git a/llvm/include/llvm/ADT/StringTable.h b/llvm/include/llvm/ADT/StringTable.h index 575b3c929e40c..9422a6da1ce8e 100644 --- a/llvm/include/llvm/ADT/StringTable.h +++ b/llvm/include/llvm/ADT/StringTable.h @@ -118,12 +118,8 @@ class StringTable { constexpr Iterator(const Iterator &RHS) = default; constexpr Iterator(Iterator &&RHS) = default; - Iterator &operator=(const Iterator &RHS) { - Table = RHS.Table; - O = RHS.O; - S = RHS.S; - return *this; - } + constexpr Iterator &operator=(const Iterator &RHS) = default; + constexpr Iterator &operator=(Iterator &&RHS) = default; bool operator==(const Iterator &RHS) const { assert(Table == RHS.Table && "Compared iterators for unrelated tables!"); diff --git a/llvm/include/llvm/ADT/ilist_node.h b/llvm/include/llvm/ADT/ilist_node.h index 8d78d5dbbda44..2af1c6ebbffce 100644 --- a/llvm/include/llvm/ADT/ilist_node.h +++ b/llvm/include/llvm/ADT/ilist_node.h @@ -51,12 +51,11 @@ class ilist_iterator_w_bits; template class ilist_sentinel; // Selector for which iterator type to pick given the iterator-bits node option. -template -struct ilist_select_iterator_type { - using type = std::conditional_t, - ilist_iterator>; -}; +template +using ilist_select_iterator_type = + std::conditional_t, + ilist_iterator>; /// Implementation for an ilist node. /// @@ -91,18 +90,12 @@ class ilist_node_impl friend class ilist_iterator_w_bits; protected: - using self_iterator = - typename ilist_select_iterator_type::type; - using const_self_iterator = - typename ilist_select_iterator_type::type; + using self_iterator = ilist_select_iterator_type; + using const_self_iterator = ilist_select_iterator_type; using reverse_self_iterator = - typename ilist_select_iterator_type::type; + ilist_select_iterator_type; using const_reverse_self_iterator = - typename ilist_select_iterator_type::type; + ilist_select_iterator_type; ilist_node_impl() = default; diff --git a/llvm/include/llvm/ADT/simple_ilist.h b/llvm/include/llvm/ADT/simple_ilist.h index 7236b3fa5a7d2..fcb2e41f62bf0 100644 --- a/llvm/include/llvm/ADT/simple_ilist.h +++ b/llvm/include/llvm/ADT/simple_ilist.h @@ -92,18 +92,11 @@ class simple_ilist using reference = typename OptionsT::reference; using const_pointer = typename OptionsT::const_pointer; using const_reference = typename OptionsT::const_reference; - using iterator = - typename ilist_select_iterator_type::type; - using const_iterator = - typename ilist_select_iterator_type::type; - using reverse_iterator = - typename ilist_select_iterator_type::type; + using iterator = ilist_select_iterator_type; + using const_iterator = ilist_select_iterator_type; + using reverse_iterator = ilist_select_iterator_type; using const_reverse_iterator = - typename ilist_select_iterator_type::type; + ilist_select_iterator_type; using size_type = size_t; using difference_type = ptrdiff_t; diff --git a/llvm/include/llvm/Analysis/AssumptionCache.h b/llvm/include/llvm/Analysis/AssumptionCache.h index 1b026ef76a45e..5656729d20366 100644 --- a/llvm/include/llvm/Analysis/AssumptionCache.h +++ b/llvm/include/llvm/Analysis/AssumptionCache.h @@ -28,6 +28,7 @@ namespace llvm { class AssumeInst; +struct OperandBundleUse; class Function; class raw_ostream; class TargetTransformInfo; @@ -65,7 +66,7 @@ class AssumptionCache { /// Vector of weak value handles to calls of the \@llvm.assume /// intrinsic. - SmallVector AssumeHandles; + SmallVector AssumeHandles; class LLVM_ABI AffectedValueCallbackVH final : public CallbackVH { AssumptionCache *AC; @@ -148,7 +149,7 @@ class AssumptionCache { /// FIXME: We should replace this with pointee_iterator> /// when we can write that to filter out the null values. Then caller code /// will become simpler. - MutableArrayRef assumptions() { + MutableArrayRef assumptions() { if (!Scanned) scanFunction(); return AssumeHandles; @@ -165,6 +166,11 @@ class AssumptionCache { return AVI->second; } + + /// Determine which values are affected by this assume operand bundle. + static void + findValuesAffectedByOperandBundle(OperandBundleUse Bundle, + function_ref InsertAffected); }; /// A function analysis which provides an \c AssumptionCache. diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h index 3671c1c71ac0b..ed43f19b4a7d3 100644 --- a/llvm/include/llvm/Analysis/IR2Vec.h +++ b/llvm/include/llvm/Analysis/IR2Vec.h @@ -36,6 +36,7 @@ #define LLVM_ANALYSIS_IR2VEC_H #include "llvm/ADT/DenseMap.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Type.h" #include "llvm/Support/CommandLine.h" @@ -44,6 +45,7 @@ #include "llvm/Support/JSON.h" #include #include +#include namespace llvm { @@ -143,6 +145,80 @@ struct Embedding { using InstEmbeddingsMap = DenseMap; using BBEmbeddingsMap = DenseMap; +/// Generic storage class for section-based vocabularies. +/// VocabStorage provides a generic foundation for storing and accessing +/// embeddings organized into sections. +class VocabStorage { +private: + /// Section-based storage + std::vector> Sections; + + const size_t TotalSize; + const unsigned Dimension; + +public: + /// Default constructor creates empty storage (invalid state) + VocabStorage() : Sections(), TotalSize(0), Dimension(0) {} + + /// Create a VocabStorage with pre-organized section data + VocabStorage(std::vector> &&SectionData); + + VocabStorage(VocabStorage &&) = default; + VocabStorage &operator=(VocabStorage &&) = delete; + + VocabStorage(const VocabStorage &) = delete; + VocabStorage &operator=(const VocabStorage &) = delete; + + /// Get total number of entries across all sections + size_t size() const { return TotalSize; } + + /// Get number of sections + unsigned getNumSections() const { + return static_cast(Sections.size()); + } + + /// Section-based access: Storage[sectionId][localIndex] + const std::vector &operator[](unsigned SectionId) const { + assert(SectionId < Sections.size() && "Invalid section ID"); + return Sections[SectionId]; + } + + /// Get vocabulary dimension + unsigned getDimension() const { return Dimension; } + + /// Check if vocabulary is valid (has data) + bool isValid() const { return TotalSize > 0; } + + /// Iterator support for section-based access + class const_iterator { + const VocabStorage *Storage; + unsigned SectionId = 0; + size_t LocalIndex = 0; + + public: + const_iterator(const VocabStorage *Storage, unsigned SectionId, + size_t LocalIndex) + : Storage(Storage), SectionId(SectionId), LocalIndex(LocalIndex) {} + + LLVM_ABI const Embedding &operator*() const; + LLVM_ABI const_iterator &operator++(); + LLVM_ABI bool operator==(const const_iterator &Other) const; + LLVM_ABI bool operator!=(const const_iterator &Other) const; + }; + + const_iterator begin() const { return const_iterator(this, 0, 0); } + const_iterator end() const { + return const_iterator(this, getNumSections(), 0); + } + + using VocabMap = std::map; + /// Parse a vocabulary section from JSON and populate the target vocabulary + /// map. + static Error parseVocabSection(StringRef Key, + const json::Value &ParsedVocabValue, + VocabMap &TargetVocab, unsigned &Dim); +}; + /// Class for storing and accessing the IR2Vec vocabulary. /// The Vocabulary class manages seed embeddings for LLVM IR entities. The /// seed embeddings are the initial learned representations of the entities @@ -162,15 +238,42 @@ using BBEmbeddingsMap = DenseMap; /// embeddings. class Vocabulary { friend class llvm::IR2VecVocabAnalysis; - using VocabVector = std::vector; - VocabVector Vocab; -public: - // Slot layout: - // [0 .. MaxOpcodes-1] => Instruction opcodes - // [MaxOpcodes .. MaxOpcodes+MaxCanonicalTypeIDs-1] => Canonicalized types - // [MaxOpcodes+MaxCanonicalTypeIDs .. NumCanonicalEntries-1] => Operand kinds + // Vocabulary Layout: + // +----------------+------------------------------------------------------+ + // | Entity Type | Index Range | + // +----------------+------------------------------------------------------+ + // | Opcodes | [0 .. (MaxOpcodes-1)] | + // | Canonical Types| [MaxOpcodes .. (MaxOpcodes+MaxCanonicalTypeIDs-1)] | + // | Operands | [(MaxOpcodes+MaxCanonicalTypeIDs) .. NumCanEntries] | + // +----------------+------------------------------------------------------+ + // Note: MaxOpcodes is the number of unique opcodes supported by LLVM IR. + // MaxCanonicalTypeIDs is the number of canonicalized type IDs. + // "Similar" LLVM Types are grouped/canonicalized together. E.g., all + // float variants (FloatTy, DoubleTy, HalfTy, etc.) map to + // CanonicalTypeID::FloatTy. This helps reduce the vocabulary size + // and improves learning. Operands include Comparison predicates + // (ICmp/FCmp) along with other operand types. This can be extended to + // include other specializations in future. + enum class Section : unsigned { + Opcodes = 0, + CanonicalTypes = 1, + Operands = 2, + Predicates = 3, + MaxSections + }; + + // Use section-based storage for better organization and efficiency + VocabStorage Storage; + + static constexpr unsigned NumICmpPredicates = + static_cast(CmpInst::LAST_ICMP_PREDICATE) - + static_cast(CmpInst::FIRST_ICMP_PREDICATE) + 1; + static constexpr unsigned NumFCmpPredicates = + static_cast(CmpInst::LAST_FCMP_PREDICATE) - + static_cast(CmpInst::FIRST_FCMP_PREDICATE) + 1; +public: /// Canonical type IDs supported by IR2Vec Vocabulary enum class CanonicalTypeID : unsigned { FloatTy, @@ -207,59 +310,114 @@ class Vocabulary { static_cast(CanonicalTypeID::MaxCanonicalType); static constexpr unsigned MaxOperandKinds = static_cast(OperandKind::MaxOperandKind); + // CmpInst::Predicate has gaps. We want the vocabulary to be dense without + // empty slots. + static constexpr unsigned MaxPredicateKinds = + NumICmpPredicates + NumFCmpPredicates; Vocabulary() = default; - LLVM_ABI Vocabulary(VocabVector &&Vocab) : Vocab(std::move(Vocab)) {} + LLVM_ABI Vocabulary(VocabStorage &&Storage) : Storage(std::move(Storage)) {} + + Vocabulary(const Vocabulary &) = delete; + Vocabulary &operator=(const Vocabulary &) = delete; - LLVM_ABI bool isValid() const { return Vocab.size() == NumCanonicalEntries; }; - LLVM_ABI unsigned getDimension() const; - /// Total number of entries (opcodes + canonicalized types + operand kinds) + Vocabulary(Vocabulary &&) = default; + Vocabulary &operator=(Vocabulary &&Other) = delete; + + LLVM_ABI bool isValid() const { + return Storage.size() == NumCanonicalEntries; + } + + LLVM_ABI unsigned getDimension() const { + assert(isValid() && "IR2Vec Vocabulary is invalid"); + return Storage.getDimension(); + } + + /// Total number of entries (opcodes + canonicalized types + operand kinds + + /// predicates) static constexpr size_t getCanonicalSize() { return NumCanonicalEntries; } /// Function to get vocabulary key for a given Opcode LLVM_ABI static StringRef getVocabKeyForOpcode(unsigned Opcode); /// Function to get vocabulary key for a given TypeID - LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID); + LLVM_ABI static StringRef getVocabKeyForTypeID(Type::TypeID TypeID) { + return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID)); + } /// Function to get vocabulary key for a given OperandKind - LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind); + LLVM_ABI static StringRef getVocabKeyForOperandKind(OperandKind Kind) { + unsigned Index = static_cast(Kind); + assert(Index < MaxOperandKinds && "Invalid OperandKind"); + return OperandKindNames[Index]; + } /// Function to classify an operand into OperandKind LLVM_ABI static OperandKind getOperandKind(const Value *Op); - /// Functions to return the slot index or position of a given Opcode, TypeID, - /// or OperandKind in the vocabulary. - LLVM_ABI static unsigned getSlotIndex(unsigned Opcode); - LLVM_ABI static unsigned getSlotIndex(Type::TypeID TypeID); - LLVM_ABI static unsigned getSlotIndex(const Value &Op); + /// Function to get vocabulary key for a given predicate + LLVM_ABI static StringRef getVocabKeyForPredicate(CmpInst::Predicate P); + + /// Functions to return flat index + LLVM_ABI static unsigned getIndex(unsigned Opcode) { + assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); + return Opcode - 1; // Convert to zero-based index + } + + LLVM_ABI static unsigned getIndex(Type::TypeID TypeID) { + assert(static_cast(TypeID) < MaxTypeIDs && "Invalid type ID"); + return MaxOpcodes + static_cast(getCanonicalTypeID(TypeID)); + } + + LLVM_ABI static unsigned getIndex(const Value &Op) { + unsigned Index = static_cast(getOperandKind(&Op)); + assert(Index < MaxOperandKinds && "Invalid OperandKind"); + return OperandBaseOffset + Index; + } + + LLVM_ABI static unsigned getIndex(CmpInst::Predicate P) { + return PredicateBaseOffset + getPredicateLocalIndex(P); + } /// Accessors to get the embedding for a given entity. - LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const; - LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeId) const; - LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const; + LLVM_ABI const ir2vec::Embedding &operator[](unsigned Opcode) const { + assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); + return Storage[static_cast(Section::Opcodes)][Opcode - 1]; + } + + LLVM_ABI const ir2vec::Embedding &operator[](Type::TypeID TypeID) const { + assert(static_cast(TypeID) < MaxTypeIDs && "Invalid type ID"); + unsigned LocalIndex = static_cast(getCanonicalTypeID(TypeID)); + return Storage[static_cast(Section::CanonicalTypes)][LocalIndex]; + } + + LLVM_ABI const ir2vec::Embedding &operator[](const Value &Arg) const { + unsigned LocalIndex = static_cast(getOperandKind(&Arg)); + assert(LocalIndex < MaxOperandKinds && "Invalid OperandKind"); + return Storage[static_cast(Section::Operands)][LocalIndex]; + } + + LLVM_ABI const ir2vec::Embedding &operator[](CmpInst::Predicate P) const { + unsigned LocalIndex = getPredicateLocalIndex(P); + return Storage[static_cast(Section::Predicates)][LocalIndex]; + } /// Const Iterator type aliases - using const_iterator = VocabVector::const_iterator; + using const_iterator = VocabStorage::const_iterator; + const_iterator begin() const { assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.begin(); + return Storage.begin(); } - const_iterator cbegin() const { - assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.cbegin(); - } + const_iterator cbegin() const { return begin(); } const_iterator end() const { assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.end(); + return Storage.end(); } - const_iterator cend() const { - assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab.cend(); - } + const_iterator cend() const { return end(); } /// Returns the string key for a given index position in the vocabulary. /// This is useful for debugging or printing the vocabulary. Do not use this @@ -267,14 +425,24 @@ class Vocabulary { LLVM_ABI static StringRef getStringKey(unsigned Pos); /// Create a dummy vocabulary for testing purposes. - LLVM_ABI static VocabVector createDummyVocabForTest(unsigned Dim = 1); + LLVM_ABI static VocabStorage createDummyVocabForTest(unsigned Dim = 1); LLVM_ABI bool invalidate(Module &M, const PreservedAnalyses &PA, ModuleAnalysisManager::Invalidator &Inv) const; private: constexpr static unsigned NumCanonicalEntries = - MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds; + MaxOpcodes + MaxCanonicalTypeIDs + MaxOperandKinds + MaxPredicateKinds; + + // Base offsets for flat index computation + constexpr static unsigned OperandBaseOffset = + MaxOpcodes + MaxCanonicalTypeIDs; + constexpr static unsigned PredicateBaseOffset = + OperandBaseOffset + MaxOperandKinds; + + /// Functions for predicate index calculations + static unsigned getPredicateLocalIndex(CmpInst::Predicate P); + static CmpInst::Predicate getPredicateFromLocalIndex(unsigned LocalIndex); /// String mappings for CanonicalTypeID values static constexpr StringLiteral CanonicalTypeNames[] = { @@ -322,10 +490,26 @@ class Vocabulary { /// Function to get vocabulary key for canonical type by enum LLVM_ABI static StringRef - getVocabKeyForCanonicalTypeID(CanonicalTypeID CType); + getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) { + unsigned Index = static_cast(CType); + assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID"); + return CanonicalTypeNames[Index]; + } /// Function to convert TypeID to CanonicalTypeID - LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID); + LLVM_ABI static CanonicalTypeID getCanonicalTypeID(Type::TypeID TypeID) { + unsigned Index = static_cast(TypeID); + assert(Index < MaxTypeIDs && "Invalid TypeID"); + return TypeIDMapping[Index]; + } + + /// Function to get the predicate enum value for a given index. Index is + /// relative to the predicates section of the vocabulary. E.g., Index 0 + /// corresponds to the first predicate. + LLVM_ABI static CmpInst::Predicate getPredicate(unsigned Index) { + assert(Index < MaxPredicateKinds && "Invalid predicate index"); + return getPredicateFromLocalIndex(Index); + } }; /// Embedder provides the interface to generate embeddings (vector @@ -418,22 +602,20 @@ class LLVM_ABI FlowAwareEmbedder : public Embedder { /// mapping between an entity of the IR (like opcode, type, argument, etc.) and /// its corresponding embedding. class IR2VecVocabAnalysis : public AnalysisInfoMixin { - using VocabVector = std::vector; using VocabMap = std::map; - VocabMap OpcVocab, TypeVocab, ArgVocab; - VocabVector Vocab; + std::optional Vocab; - Error readVocabulary(); - Error parseVocabSection(StringRef Key, const json::Value &ParsedVocabValue, - VocabMap &TargetVocab, unsigned &Dim); - void generateNumMappedVocab(); + Error readVocabulary(VocabMap &OpcVocab, VocabMap &TypeVocab, + VocabMap &ArgVocab); + void generateVocabStorage(VocabMap &OpcVocab, VocabMap &TypeVocab, + VocabMap &ArgVocab); void emitError(Error Err, LLVMContext &Ctx); public: LLVM_ABI static AnalysisKey Key; IR2VecVocabAnalysis() = default; - LLVM_ABI explicit IR2VecVocabAnalysis(const VocabVector &Vocab); - LLVM_ABI explicit IR2VecVocabAnalysis(VocabVector &&Vocab); + LLVM_ABI explicit IR2VecVocabAnalysis(ir2vec::VocabStorage &&Vocab) + : Vocab(std::move(Vocab)) {} using Result = ir2vec::Vocabulary; LLVM_ABI Result run(Module &M, ModuleAnalysisManager &MAM); }; diff --git a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h index abcf9a841d5fc..e8124f72a1a81 100644 --- a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h +++ b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h @@ -32,14 +32,19 @@ class InterestingMemoryOperand { Value *MaybeEVL; // The Stride Value, if we're looking at a strided load/store. Value *MaybeStride; + // The Offset Value, if we're looking at a indexed load/store. The + // offset actually means byte-offset instead of array index. + Value *MaybeByteOffset; InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, class Type *OpType, MaybeAlign Alignment, Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr, - Value *MaybeStride = nullptr) + Value *MaybeStride = nullptr, + Value *MaybeByteOffset = nullptr) : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { + MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride), + MaybeByteOffset(MaybeByteOffset) { const DataLayout &DL = I->getDataLayout(); TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); PtrUse = &I->getOperandUse(OperandNo); diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h index 49a795b5fd6a7..84b4ad7c1d5a9 100644 --- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h @@ -413,30 +413,29 @@ class MemoryDepChecker { uint64_t MaxStride; std::optional CommonStride; - /// TypeByteSize is a pair of alloc sizes of the source and sink. - std::pair TypeByteSize; - - // HasSameSize is a boolean indicating whether the store sizes of the source - // and sink are equal. - // TODO: Remove this. - bool HasSameSize; + /// TypeByteSize is either the common store size of both accesses, or 0 when + /// store sizes mismatch. + uint64_t TypeByteSize; bool AIsWrite; bool BIsWrite; DepDistanceStrideAndSizeInfo(const SCEV *Dist, uint64_t MaxStride, std::optional CommonStride, - std::pair TypeByteSize, - bool HasSameSize, bool AIsWrite, bool BIsWrite) + uint64_t TypeByteSize, bool AIsWrite, + bool BIsWrite) : Dist(Dist), MaxStride(MaxStride), CommonStride(CommonStride), - TypeByteSize(TypeByteSize), HasSameSize(HasSameSize), - AIsWrite(AIsWrite), BIsWrite(BIsWrite) {} + TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {} }; /// Get the dependence distance, strides, type size and whether it is a write - /// for the dependence between A and B. Returns either a DepType, the - /// dependence result, if it could already be determined, or a - /// DepDistanceStrideAndSizeInfo struct. + /// for the dependence between A and B. Returns a DepType, if we can prove + /// there's no dependence or the analysis fails. Outlined to lambda to limit + /// he scope of various temporary variables, like A/BPtr, StrideA/BPtr and + /// others. Returns either the dependence result, if it could already be + /// determined, or a DepDistanceStrideAndSizeInfo struct, noting that + /// TypeByteSize could be 0 when store sizes mismatch, and this should be + /// checked in the caller. std::variant getDependenceDistanceStrideAndSize(const MemAccessInfo &A, Instruction *AInst, const MemAccessInfo &B, @@ -725,8 +724,9 @@ class LoopAccessInfo { /// Return true if the block BB needs to be predicated in order for the loop /// to be vectorized. - LLVM_ABI static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, - DominatorTree *DT); + LLVM_ABI static bool blockNeedsPredication(const BasicBlock *BB, + const Loop *TheLoop, + const DominatorTree *DT); /// Returns true if value \p V is loop invariant. LLVM_ABI bool isInvariant(Value *V) const; diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h index be690a49767b4..571caf95f275d 100644 --- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h +++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h @@ -59,14 +59,6 @@ LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type); /// True if the AllocTypes bitmask contains just a single type. LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes); -/// Removes any existing "ambiguous" memprof attribute. Called before we apply a -/// specific allocation type such as "cold", "notcold", or "hot". -LLVM_ABI void removeAnyExistingAmbiguousAttribute(CallBase *CB); - -/// Adds an "ambiguous" memprof attribute to call with a matched allocation -/// profile but that we haven't yet been able to disambiguate. -LLVM_ABI void addAmbiguousAttribute(CallBase *CB); - /// Class to build a trie of call stack contexts for a particular profiled /// allocation call, along with their associated allocation types. /// The allocation will be at the root of the trie, which is then used to diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h index 7a45ae93b185b..164b46b54890b 100644 --- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h +++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h @@ -184,6 +184,7 @@ m_scev_PtrToInt(const Op0_t &Op0) { /// Match a binary SCEV. template struct SCEVBinaryExpr_match { Op0_t Op0; @@ -192,6 +193,10 @@ struct SCEVBinaryExpr_match { SCEVBinaryExpr_match(Op0_t Op0, Op1_t Op1) : Op0(Op0), Op1(Op1) {} bool match(const SCEV *S) const { + if (auto WrappingS = dyn_cast(S)) + if (WrappingS->getNoWrapFlags(WrapFlags) != WrapFlags) + return false; + auto *E = dyn_cast(S); return E && E->getNumOperands() == 2 && ((Op0.match(E->getOperand(0)) && Op1.match(E->getOperand(1))) || @@ -201,10 +206,12 @@ struct SCEVBinaryExpr_match { }; template -inline SCEVBinaryExpr_match +inline SCEVBinaryExpr_match m_scev_Binary(const Op0_t &Op0, const Op1_t &Op1) { - return SCEVBinaryExpr_match(Op0, Op1); + return SCEVBinaryExpr_match(Op0, + Op1); } template @@ -220,9 +227,17 @@ m_scev_Mul(const Op0_t &Op0, const Op1_t &Op1) { } template -inline SCEVBinaryExpr_match +inline SCEVBinaryExpr_match m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1) { - return m_scev_Binary(Op0, Op1); + return m_scev_Binary(Op0, + Op1); +} + +template +inline SCEVBinaryExpr_match +m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1) { + return m_scev_Binary(Op0, + Op1); } template diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h index c04380667a640..8944e73688eed 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainer.h +++ b/llvm/include/llvm/BinaryFormat/DXContainer.h @@ -185,6 +185,15 @@ enum class DescriptorRangeFlags : uint32_t { LLVM_ABI ArrayRef> getDescriptorRangeFlags(); +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) Enum = Num, +enum class StaticSamplerFlags : uint32_t { +#include "DXContainerConstants.def" + + LLVM_MARK_AS_BITMASK_ENUM(NonNormalizedCoordinates) +}; + +LLVM_ABI ArrayRef> getStaticSamplerFlags(); + #define ROOT_PARAMETER(Val, Enum) Enum = Val, enum class RootParameterType : uint32_t { #include "DXContainerConstants.def" @@ -813,12 +822,29 @@ struct DescriptorRange { } }; } // namespace v2 + +namespace v3 { +struct StaticSampler : public v1::StaticSampler { + uint32_t Flags; + + StaticSampler() = default; + explicit StaticSampler(v1::StaticSampler &Base) + : v1::StaticSampler(Base), Flags(0U) {} + + void swapBytes() { + v1::StaticSampler::swapBytes(); + sys::swapByteOrder(Flags); + } +}; + +} // namespace v3 } // namespace RTS0 // D3D_ROOT_SIGNATURE_VERSION enum class RootSignatureVersion { V1_0 = 0x1, V1_1 = 0x2, + V1_2 = 0x3, }; } // namespace dxbc diff --git a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def index 889653611d79a..f576d958037cd 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def +++ b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def @@ -104,6 +104,16 @@ DESCRIPTOR_RANGE_FLAG(0x10000, DescriptorsStaticKeepingBufferBoundsChecks, DESCR #undef DESCRIPTOR_RANGE_FLAG #endif // DESCRIPTOR_RANGE_FLAG +// STATIC_SAMPLER_FLAG(flag value, name, flag). +#ifdef STATIC_SAMPLER_FLAG + +STATIC_SAMPLER_FLAG(0x0, None, SAMPLER_FLAG_NONE) +STATIC_SAMPLER_FLAG(0x1, UintBorderColor, SAMPLER_FLAG_UINT_BORDER_COLOR) +STATIC_SAMPLER_FLAG(0x2, NonNormalizedCoordinates, SAMPLER_FLAG_NON_NORMALIZED_COORDINATES) + +#undef STATIC_SAMPLER_FLAG +#endif // STATIC_SAMPLER_FLAG + #ifdef ROOT_PARAMETER ROOT_PARAMETER(0, DescriptorTable) diff --git a/llvm/include/llvm/Bitstream/BitstreamWriter.h b/llvm/include/llvm/Bitstream/BitstreamWriter.h index 5f53681320ce4..a2938642f824a 100644 --- a/llvm/include/llvm/Bitstream/BitstreamWriter.h +++ b/llvm/include/llvm/Bitstream/BitstreamWriter.h @@ -87,7 +87,7 @@ class BitstreamWriter { void WriteWord(unsigned Value) { Value = - support::endian::byte_swap(Value); + support::endian::byte_swap(Value, llvm::endianness::little); Buffer.append(reinterpret_cast(&Value), reinterpret_cast(&Value + 1)); } diff --git a/llvm/include/llvm/CAS/FileOffset.h b/llvm/include/llvm/CAS/FileOffset.h new file mode 100644 index 0000000000000..21d045e8c9d78 --- /dev/null +++ b/llvm/include/llvm/CAS/FileOffset.h @@ -0,0 +1,39 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file declares interface for FileOffset that represent stored data at an +/// offset from the beginning of a file. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CAS_FILEOFFSET_H +#define LLVM_CAS_FILEOFFSET_H + +#include + +namespace llvm::cas { + +/// FileOffset is a wrapper around `uint64_t` to represent the offset of data +/// from the beginning of the file. +class FileOffset { +public: + uint64_t get() const { return Offset; } + + explicit operator bool() const { return Offset; } + + FileOffset() = default; + explicit FileOffset(uint64_t Offset) : Offset(Offset) {} + +private: + uint64_t Offset = 0; +}; + +} // namespace llvm::cas + +#endif // LLVM_CAS_FILEOFFSET_H diff --git a/llvm/include/llvm/CAS/OnDiskTrieRawHashMap.h b/llvm/include/llvm/CAS/OnDiskTrieRawHashMap.h new file mode 100644 index 0000000000000..5e41bf6ab571e --- /dev/null +++ b/llvm/include/llvm/CAS/OnDiskTrieRawHashMap.h @@ -0,0 +1,236 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file declares interface for OnDiskTrieRawHashMap, a thread-safe and +/// (mostly) lock-free hash map stored as trie and backed by persistent files on +/// disk. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CAS_ONDISKTRIERAWHASHMAP_H +#define LLVM_CAS_ONDISKTRIERAWHASHMAP_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLFunctionalExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CAS/FileOffset.h" +#include "llvm/Support/Error.h" +#include + +namespace llvm { + +class raw_ostream; + +namespace cas { + +/// OnDiskTrieRawHashMap is a persistent trie data structure used as hash maps. +/// The keys are fixed length, and are expected to be binary hashes with a +/// normal distribution. +/// +/// - Thread-safety is achieved through the use of atomics within a shared +/// memory mapping. Atomic access does not work on networked filesystems. +/// - Filesystem locks are used, but only sparingly: +/// - during initialization, for creating / opening an existing store; +/// - for the lifetime of the instance, a shared/reader lock is held +/// - during destruction, if there are no concurrent readers, to shrink the +/// files to their minimum size. +/// - Path is used as a directory: +/// - "index" stores the root trie and subtries. +/// - "data" stores (most of) the entries, like a bump-ptr-allocator. +/// - Large entries are stored externally in a file named by the key. +/// - Code is system-dependent and binary format itself is not portable. These +/// are not artifacts that can/should be moved between different systems; they +/// are only appropriate for local storage. +class OnDiskTrieRawHashMap { +public: + LLVM_DUMP_METHOD void dump() const; + void + print(raw_ostream &OS, + function_ref)> PrintRecordData = nullptr) const; + +public: + /// Const value proxy to access the records stored in TrieRawHashMap. + struct ConstValueProxy { + ConstValueProxy() = default; + ConstValueProxy(ArrayRef Hash, ArrayRef Data) + : Hash(Hash), Data(Data) {} + ConstValueProxy(ArrayRef Hash, StringRef Data) + : Hash(Hash), Data(Data.begin(), Data.size()) {} + + ArrayRef Hash; + ArrayRef Data; + }; + + /// Value proxy to access the records stored in TrieRawHashMap. + struct ValueProxy { + operator ConstValueProxy() const { return ConstValueProxy(Hash, Data); } + + ValueProxy() = default; + ValueProxy(ArrayRef Hash, MutableArrayRef Data) + : Hash(Hash), Data(Data) {} + + ArrayRef Hash; + MutableArrayRef Data; + }; + + /// Validate the trie data structure. + /// + /// Callback receives the file offset to the data entry and the data stored. + Error validate( + function_ref RecordVerifier) const; + + /// Check the valid range of file offset for OnDiskTrieRawHashMap. + static bool validOffset(FileOffset Offset) { + return Offset.get() < (1LL << 48); + } + +public: + /// Template class to implement a `pointer` type into the trie data structure. + /// + /// It provides pointer-like operation, e.g., dereference to get underlying + /// data. It also reserves the top 16 bits of the pointer value, which can be + /// used to pack additional information if needed. + template class PointerImpl { + public: + FileOffset getOffset() const { + return FileOffset(OffsetLow32 | (uint64_t)OffsetHigh16 << 32); + } + + explicit operator bool() const { return IsValue; } + + const ProxyT &operator*() const { + assert(IsValue); + return Value; + } + const ProxyT *operator->() const { + assert(IsValue); + return &Value; + } + + PointerImpl() = default; + + protected: + PointerImpl(ProxyT Value, FileOffset Offset, bool IsValue = true) + : Value(Value), OffsetLow32((uint64_t)Offset.get()), + OffsetHigh16((uint64_t)Offset.get() >> 32), IsValue(IsValue) { + if (IsValue) + assert(validOffset(Offset)); + } + + ProxyT Value; + uint32_t OffsetLow32 = 0; + uint16_t OffsetHigh16 = 0; + + // True if points to a value (not a "nullptr"). Use an extra field because + // 0 can be a valid offset. + bool IsValue = false; + }; + + class pointer; + class const_pointer : public PointerImpl { + public: + const_pointer() = default; + + private: + friend class pointer; + friend class OnDiskTrieRawHashMap; + using const_pointer::PointerImpl::PointerImpl; + }; + + class pointer : public PointerImpl { + public: + operator const_pointer() const { + return const_pointer(Value, getOffset(), IsValue); + } + + pointer() = default; + + private: + friend class OnDiskTrieRawHashMap; + using pointer::PointerImpl::PointerImpl; + }; + + /// Find the value from hash. + /// + /// \returns pointer to the value if exists, otherwise returns a non-value + /// pointer that evaluates to `false` when convert to boolean. + const_pointer find(ArrayRef Hash) const; + + /// Helper function to recover a pointer into the trie from file offset. + Expected recoverFromFileOffset(FileOffset Offset) const; + + using LazyInsertOnConstructCB = + function_ref; + using LazyInsertOnLeakCB = + function_ref; + + /// Insert lazily. + /// + /// \p OnConstruct is called when ready to insert a value, after allocating + /// space for the data. It is called at most once. + /// + /// \p OnLeak is called only if \p OnConstruct has been called and a race + /// occurred before insertion, causing the tentative offset and data to be + /// abandoned. This allows clients to clean up other results or update any + /// references. + /// + /// NOTE: Does *not* guarantee that \p OnConstruct is only called on success. + /// The in-memory \a TrieRawHashMap uses LazyAtomicPointer to synchronize + /// simultaneous writes, but that seems dangerous to use in a memory-mapped + /// file in case a process crashes in the busy state. + Expected insertLazy(ArrayRef Hash, + LazyInsertOnConstructCB OnConstruct = nullptr, + LazyInsertOnLeakCB OnLeak = nullptr); + + Expected insert(const ConstValueProxy &Value) { + return insertLazy(Value.Hash, [&](FileOffset, ValueProxy Allocated) { + assert(Allocated.Hash == Value.Hash); + assert(Allocated.Data.size() == Value.Data.size()); + llvm::copy(Value.Data, Allocated.Data.begin()); + }); + } + + size_t size() const; + size_t capacity() const; + + /// Gets or creates a file at \p Path with a hash-mapped trie named \p + /// TrieName. The hash size is \p NumHashBits (in bits) and the records store + /// data of size \p DataSize (in bytes). + /// + /// \p MaxFileSize controls the maximum file size to support, limiting the + /// size of the \a mapped_file_region. \p NewFileInitialSize is the starting + /// size if a new file is created. + /// + /// \p NewTableNumRootBits and \p NewTableNumSubtrieBits are hints to + /// configure the trie, if it doesn't already exist. + /// + /// \pre NumHashBits is a multiple of 8 (byte-aligned). + static Expected + create(const Twine &Path, const Twine &TrieName, size_t NumHashBits, + uint64_t DataSize, uint64_t MaxFileSize, + std::optional NewFileInitialSize, + std::optional NewTableNumRootBits = std::nullopt, + std::optional NewTableNumSubtrieBits = std::nullopt); + + OnDiskTrieRawHashMap(OnDiskTrieRawHashMap &&RHS); + OnDiskTrieRawHashMap &operator=(OnDiskTrieRawHashMap &&RHS); + ~OnDiskTrieRawHashMap(); + +private: + struct ImplType; + explicit OnDiskTrieRawHashMap(std::unique_ptr Impl); + std::unique_ptr Impl; +}; + +} // namespace cas +} // namespace llvm + +#endif // LLVM_CAS_ONDISKTRIERAWHASHMAP_H diff --git a/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h b/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h index f0cfa7663c5fa..82dd5feb31dba 100644 --- a/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h +++ b/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h @@ -50,6 +50,10 @@ struct FunctionPathAndClusterInfo { // the edge a -> b (a is not cloned). The index of the path in this vector // determines the `UniqueBBID::CloneID` of the cloned blocks in that path. SmallVector> ClonePaths; + // Node counts for each basic block. + DenseMap NodeCounts; + // Edge counts for each edge, stored as a nested map. + DenseMap> EdgeCounts; }; class BasicBlockSectionsProfileReader { @@ -77,6 +81,11 @@ class BasicBlockSectionsProfileReader { SmallVector> getClonePathsForFunction(StringRef FuncName) const; + // Returns the profile count for the edge from `SrcBBID` to `SinkBBID` in + // function `FuncName` or zero if it does not exist. + uint64_t getEdgeCount(StringRef FuncName, const UniqueBBID &SrcBBID, + const UniqueBBID &SinkBBID) const; + private: StringRef getAliasName(StringRef FuncName) const { auto R = FuncAliasMap.find(FuncName); @@ -183,6 +192,9 @@ class BasicBlockSectionsProfileReaderWrapperPass : public ImmutablePass { SmallVector> getClonePathsForFunction(StringRef FuncName) const; + uint64_t getEdgeCount(StringRef FuncName, const UniqueBBID &SrcBBID, + const UniqueBBID &DestBBID) const; + // Initializes the FunctionNameToDIFilename map for the current module and // then reads the profile for the matching functions. bool doInitialization(Module &M) override; diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index dce423fc1b18b..42ddb32d24093 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -2929,7 +2929,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { CostKind); EVT VT = TLI->getValueType(DL, CmpTy, true); - if (TLI->shouldExpandCmpUsingSelects(VT)) { + if (TLI->preferSelectsOverBooleanArithmetic(VT)) { // x < y ? -1 : (x > y ? 1 : 0) Cost += 2 * thisT()->getCmpSelInstrCost( BinaryOperator::Select, RetTy, CondTy, diff --git a/llvm/include/llvm/CodeGen/CalcSpillWeights.h b/llvm/include/llvm/CodeGen/CalcSpillWeights.h index 5a86dd9650fbd..11d1c16561507 100644 --- a/llvm/include/llvm/CodeGen/CalcSpillWeights.h +++ b/llvm/include/llvm/CodeGen/CalcSpillWeights.h @@ -81,6 +81,14 @@ class VirtRegMap; static bool isRematerializable(const LiveInterval &LI, const LiveIntervals &LIS, const VirtRegMap &VRM, + const MachineRegisterInfo &MRI, + const TargetInstrInfo &TII); + + /// \returns true if all registers used by \p MI are also available with the + /// same value at \p UseIdx. + static bool allUsesAvailableAt(const MachineInstr *MI, SlotIndex UseIdx, + const LiveIntervals &LIS, + const MachineRegisterInfo &MRI, const TargetInstrInfo &TII); protected: diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h index 22569aab236af..c0e426c4a8db3 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h @@ -300,6 +300,10 @@ class LegalizerHelper { Type *OpType, LostDebugLocObserver &LocObserver); + LegalizeResult emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, + unsigned Size, Type *OpType, + LostDebugLocObserver &LocObserver); + public: /// Return the alignment to use for a stack temporary object with the given /// type. diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index 99d3cd0aac85c..40c7792f7e8a2 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -2184,6 +2184,13 @@ class LLVM_ABI MachineIRBuilder { return buildInstr(TargetOpcode::G_FSINCOS, {Sin, Cos}, {Src}, Flags); } + /// Build and insert \p Fract, \p Int = G_FMODF \p Src + MachineInstrBuilder buildModf(const DstOp &Fract, const DstOp &Int, + const SrcOp &Src, + std::optional Flags = std::nullopt) { + return buildInstr(TargetOpcode::G_FMODF, {Fract, Int}, {Src}, Flags); + } + /// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1 MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1) { @@ -2463,6 +2470,11 @@ class LLVM_ABI MachineIRBuilder { return buildInstr(TargetOpcode::G_GET_ROUNDING, {Dst}, {}); } + /// Build and insert G_SET_ROUNDING + MachineInstrBuilder buildSetRounding(const SrcOp &Src) { + return buildInstr(TargetOpcode::G_SET_ROUNDING, {}, {Src}); + } + virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef DstOps, ArrayRef SrcOps, std::optional Flags = std::nullopt); diff --git a/llvm/include/llvm/CodeGen/LiveInterval.h b/llvm/include/llvm/CodeGen/LiveInterval.h index e1c5717f5face..f18c177b1c35b 100644 --- a/llvm/include/llvm/CodeGen/LiveInterval.h +++ b/llvm/include/llvm/CodeGen/LiveInterval.h @@ -83,8 +83,16 @@ namespace llvm { /// Mark this value as unused. void markUnused() { def = SlotIndex(); } + + LLVM_ABI void print(raw_ostream &OS) const; + LLVM_ABI void dump() const; }; + inline raw_ostream &operator<<(raw_ostream &OS, const VNInfo &VNI) { + VNI.print(OS); + return OS; + } + /// Result of a LiveRange query. This class hides the implementation details /// of live ranges, and it should be used as the primary interface for /// examining live ranges around instructions. diff --git a/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/llvm/include/llvm/CodeGen/LiveRangeEdit.h index db1785de255f0..6473138a801f7 100644 --- a/llvm/include/llvm/CodeGen/LiveRangeEdit.h +++ b/llvm/include/llvm/CodeGen/LiveRangeEdit.h @@ -189,11 +189,6 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate { explicit Remat(const VNInfo *ParentVNI) : ParentVNI(ParentVNI) {} }; - /// allUsesAvailableAt - Return true if all registers used by OrigMI at - /// OrigIdx are also available with the same value at UseIdx. - bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, - SlotIndex UseIdx) const; - /// canRematerializeAt - Determine if ParentVNI can be rematerialized at /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI. bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx); diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h index c7304e386b542..e80c13885805b 100644 --- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h +++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h @@ -378,6 +378,8 @@ struct ScalarEnumerationTraits { IO.enumCase(ID, "default", TargetStackID::Default); IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill); IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector); + IO.enumCase(ID, "scalable-predicate-vector", + TargetStackID::ScalablePredicateVector); IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal); IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc); } diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h index 94139b64a3e30..71739278cf513 100644 --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -505,6 +505,11 @@ class MachineBasicBlock LLVM_ABI void removeLiveIn(MCRegister Reg, LaneBitmask LaneMask = LaneBitmask::getAll()); + /// Remove the specified register from any overlapped live in. The method is + /// subreg-aware and removes Reg and its subregs from the live in set. It also + /// clears the corresponding bitmask from its live-in super registers. + LLVM_ABI void removeLiveInOverlappedWith(MCRegister Reg); + /// Return true if the specified register is in the live in set. LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask = LaneBitmask::getAll()) const; @@ -1035,7 +1040,9 @@ class MachineBasicBlock /// Succ, can be split. If this returns true a subsequent call to /// SplitCriticalEdge is guaranteed to return a valid basic block if /// no changes occurred in the meantime. - LLVM_ABI bool canSplitCriticalEdge(const MachineBasicBlock *Succ) const; + LLVM_ABI bool + canSplitCriticalEdge(const MachineBasicBlock *Succ, + const MachineLoopInfo *MLI = nullptr) const; void pop_front() { Insts.pop_front(); } void pop_back() { Insts.pop_back(); } @@ -1287,6 +1294,15 @@ class MachineBasicBlock // Helper function for MIRPrinter. LLVM_ABI bool canPredictBranchProbabilities() const; + /// Iterate over block PHI instructions and remove all incoming values for + /// PredMBB. + /// + /// Method does not erase PHI instructions even if they have single income or + /// do not have incoming values ar all. It is a caller responsibility to make + /// decision how to process PHI instructions after incoming values removal. + LLVM_ABI void + removePHIsIncomingValuesForPredecessor(const MachineBasicBlock &PredMBB); + private: /// Return probability iterator corresponding to the I successor iterator. probability_iterator getProbabilityIterator(succ_iterator I); diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h index 00c734330a40b..50ce93104ab53 100644 --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -497,7 +497,18 @@ class MachineFrameInfo { /// Should this stack ID be considered in MaxAlignment. bool contributesToMaxAlignment(uint8_t StackID) { return StackID == TargetStackID::Default || - StackID == TargetStackID::ScalableVector; + StackID == TargetStackID::ScalableVector || + StackID == TargetStackID::ScalablePredicateVector; + } + + bool hasScalableStackID(int ObjectIdx) const { + uint8_t StackID = getStackID(ObjectIdx); + return isScalableStackID(StackID); + } + + bool isScalableStackID(uint8_t StackID) const { + return StackID == TargetStackID::ScalableVector || + StackID == TargetStackID::ScalablePredicateVector; } /// setObjectAlignment - Change the alignment of the specified stack object. diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h index 10a9b1ff1411d..4fcb7f36e0238 100644 --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -1229,7 +1229,7 @@ class MachineInstr /// Returns true if this instruction is a candidate for remat. /// This flag is deprecated, please don't use it anymore. If this - /// flag is set, the isReallyTriviallyReMaterializable() method is called to + /// flag is set, the isReMaterializableImpl() method is called to /// verify the instruction is really rematerializable. bool isRematerializable(QueryType Type = AllInBundle) const { // It's only possible to re-mat a bundle if all bundled instructions are @@ -2000,6 +2000,15 @@ class MachineInstr /// and point them to \p Reg instead. LLVM_ABI void changeDebugValuesDefReg(Register Reg); + /// Remove all incoming values of Phi instruction for the given block. + /// + /// Return deleted operands count. + /// + /// Method does not erase PHI instruction even if it has single income or does + /// not have incoming values at all. It is a caller responsibility to make + /// decision how to process PHI instruction after incoming values removed. + LLVM_ABI unsigned removePHIIncomingValueFor(const MachineBasicBlock &MBB); + /// Sets all register debug operands in this debug value instruction to be /// undef. void setDebugValueUndef() { diff --git a/llvm/include/llvm/CodeGen/RDFGraph.h b/llvm/include/llvm/CodeGen/RDFGraph.h index 8a93afbcb5491..6bb6033a8a2f2 100644 --- a/llvm/include/llvm/CodeGen/RDFGraph.h +++ b/llvm/include/llvm/CodeGen/RDFGraph.h @@ -447,7 +447,7 @@ struct NodeAllocator { AllocatorTy MemPool; }; -using RegisterSet = std::set; +using RegisterSet = std::set; struct TargetOperandInfo { TargetOperandInfo(const TargetInstrInfo &tii) : TII(tii) {} diff --git a/llvm/include/llvm/CodeGen/RDFRegisters.h b/llvm/include/llvm/CodeGen/RDFRegisters.h index 4a9a4063c9e83..82027cad53bdb 100644 --- a/llvm/include/llvm/CodeGen/RDFRegisters.h +++ b/llvm/include/llvm/CodeGen/RDFRegisters.h @@ -199,6 +199,33 @@ struct PhysicalRegisterInfo { std::vector AliasInfos; }; +struct RegisterRefEqualTo { + constexpr RegisterRefEqualTo(const llvm::rdf::PhysicalRegisterInfo &pri) + : PRI(&pri) {} + + bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { + return PRI->equal_to(A, B); + } + +private: + // Make it a pointer just in case. See comment in `RegisterRefLess` below. + const llvm::rdf::PhysicalRegisterInfo *PRI; +}; + +struct RegisterRefLess { + constexpr RegisterRefLess(const llvm::rdf::PhysicalRegisterInfo &pri) + : PRI(&pri) {} + + bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { + return PRI->less(A, B); + } + +private: + // Make it a pointer because apparently some versions of MSVC use std::swap + // on the comparator object. + const llvm::rdf::PhysicalRegisterInfo *PRI; +}; + struct RegisterAggr { RegisterAggr(const PhysicalRegisterInfo &pri) : Units(pri.getTRI().getNumRegUnits()), PRI(pri) {} @@ -334,18 +361,6 @@ template <> struct hash { } }; -template <> struct equal_to { - constexpr equal_to(const llvm::rdf::PhysicalRegisterInfo &pri) : PRI(&pri) {} - - bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { - return PRI->equal_to(A, B); - } - -private: - // Make it a pointer just in case. See comment in `less` below. - const llvm::rdf::PhysicalRegisterInfo *PRI; -}; - template <> struct equal_to { bool operator()(const llvm::rdf::RegisterAggr &A, const llvm::rdf::RegisterAggr &B) const { @@ -353,23 +368,10 @@ template <> struct equal_to { } }; -template <> struct less { - constexpr less(const llvm::rdf::PhysicalRegisterInfo &pri) : PRI(&pri) {} - - bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { - return PRI->less(A, B); - } - -private: - // Make it a pointer because apparently some versions of MSVC use std::swap - // on the std::less specialization. - const llvm::rdf::PhysicalRegisterInfo *PRI; -}; - } // namespace std namespace llvm::rdf { -using RegisterSet = std::set>; +using RegisterSet = std::set; } // namespace llvm::rdf #endif // LLVM_CODEGEN_RDFREGISTERS_H diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h index 0e29e45752a9f..75696faf114cc 100644 --- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h +++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h @@ -32,6 +32,7 @@ enum Value { SGPRSpill = 1, ScalableVector = 2, WasmLocal = 3, + ScalablePredicateVector = 4, NoAlloc = 255 }; } diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h index f2ad5ee249b46..175f205328361 100644 --- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -168,10 +168,22 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo { /// registers so that the instructions result is independent of the place /// in the function. bool isTriviallyReMaterializable(const MachineInstr &MI) const { + if (!isReMaterializable(MI)) + return false; + for (const MachineOperand &MO : MI.all_uses()) { + if (MO.getReg().isVirtual()) + return false; + } + return true; + } + + /// Return true if the instruction would be materializable at a point + /// in the containing function where all virtual register uses were + /// known to be live and available in registers. + bool isReMaterializable(const MachineInstr &MI) const { return (MI.getOpcode() == TargetOpcode::IMPLICIT_DEF && MI.getNumOperands() == 1) || - (MI.getDesc().isRematerializable() && - isReallyTriviallyReMaterializable(MI)); + (MI.getDesc().isRematerializable() && isReMaterializableImpl(MI)); } /// Given \p MO is a PhysReg use return if it can be ignored for the purpose @@ -194,11 +206,10 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo { protected: /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is /// set, this hook lets the target specify whether the instruction is actually - /// trivially rematerializable, taking into consideration its operands. This + /// rematerializable, taking into consideration its operands. This /// predicate must return false if the instruction has any side effects other - /// than producing a value, or if it requres any address registers that are - /// not always available. - virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const; + /// than producing a value. + virtual bool isReMaterializableImpl(const MachineInstr &MI) const; /// This method commutes the operands of the given machine instruction MI. /// The operands to be commuted are specified by their indices OpIdx1 and diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 4c2d991308d30..7bbad172b2d42 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -480,13 +480,6 @@ class LLVM_ABI TargetLoweringBase { return true; } - /// Return true if the @llvm.vector.partial.reduce.* intrinsic - /// should be expanded using generic code in SelectionDAGBuilder. - virtual bool - shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const { - return true; - } - /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded /// using generic code in SelectionDAGBuilder. virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { @@ -3455,6 +3448,10 @@ class LLVM_ABI TargetLoweringBase { /// matching of other patterns. virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const { + // Form it if it is legal. + if (isOperationLegal(Opcode, VT)) + return true; + // TODO: The default logic is inherited from code in CodeGenPrepare. // The opcode should not make a difference by default? if (Opcode != ISD::UADDO) @@ -3505,9 +3502,10 @@ class LLVM_ABI TargetLoweringBase { return isOperationLegalOrCustom(Op, VT); } - /// Should we expand [US]CMP nodes using two selects and two compares, or by - /// doing arithmetic on boolean types - virtual bool shouldExpandCmpUsingSelects(EVT VT) const { return false; } + /// Should we prefer selects to doing arithmetic on boolean types + virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const { + return false; + } /// True if target has some particular form of dealing with pointer arithmetic /// semantics for pointers with the given value type. False if pointer diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td index 44edec98d20f3..300addd7d4daf 100644 --- a/llvm/include/llvm/CodeGen/ValueTypes.td +++ b/llvm/include/llvm/CodeGen/ValueTypes.td @@ -368,6 +368,10 @@ def c64 : VTCheriCapability<64, 254>; // 64-bit CHERI capability value def c128 : VTCheriCapability<128, 255>; // 128-bit CHERI capability value let isNormalValueType = false in { +// Pseudo valuetype mapped to the current CHERI capability pointer size. +// Should only be used in TableGen. +def cPTR : VTAny<503>; + def token : ValueType<0, 504>; // TokenTy def MetadataVT : ValueType<0, 505> { // Metadata let LLVMName = "Metadata"; diff --git a/llvm/include/llvm/CodeGenTypes/MachineValueType.h b/llvm/include/llvm/CodeGenTypes/MachineValueType.h index e4114ae957c70..69d52e33d900f 100644 --- a/llvm/include/llvm/CodeGenTypes/MachineValueType.h +++ b/llvm/include/llvm/CodeGenTypes/MachineValueType.h @@ -582,6 +582,12 @@ namespace llvm { MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE, force_iteration_on_noniterable_enum); } + + static auto cheri_capability_valuetypes() { + return enum_seq_inclusive(MVT::FIRST_CHERI_CAPABILITY_VALUETYPE, + MVT::LAST_CHERI_CAPABILITY_VALUETYPE, + force_iteration_on_noniterable_enum); + } /// @} }; diff --git a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h index 52af205257627..ffe0b50b036ac 100644 --- a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h +++ b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h @@ -179,6 +179,7 @@ class DWARFDataExtractorBase : public DataExtractor { class DWARFDataExtractorSimple : public DWARFDataExtractorBase { +public: using DWARFDataExtractorBase::DWARFDataExtractorBase; LLVM_ABI uint64_t getRelocatedValueImpl(uint32_t Size, uint64_t *Off, diff --git a/llvm/include/llvm/DebugInfo/LogicalView/Core/LVObject.h b/llvm/include/llvm/DebugInfo/LogicalView/Core/LVObject.h index ee9a87e25e15d..4caf1236dc0fb 100644 --- a/llvm/include/llvm/DebugInfo/LogicalView/Core/LVObject.h +++ b/llvm/include/llvm/DebugInfo/LogicalView/Core/LVObject.h @@ -82,6 +82,8 @@ using LVScopes = SmallVector; using LVSymbols = SmallVector; using LVTypes = SmallVector; +using LVElementsView = detail::concat_range; using LVOffsets = SmallVector; // The following DWARF documents detail the 'tombstone' concept: diff --git a/llvm/include/llvm/DebugInfo/LogicalView/Core/LVScope.h b/llvm/include/llvm/DebugInfo/LogicalView/Core/LVScope.h index a453923d032e4..f4f3516769938 100644 --- a/llvm/include/llvm/DebugInfo/LogicalView/Core/LVScope.h +++ b/llvm/include/llvm/DebugInfo/LogicalView/Core/LVScope.h @@ -14,6 +14,7 @@ #ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H #define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H +#include "llvm/ADT/STLExtras.h" #include "llvm/DebugInfo/LogicalView/Core/LVElement.h" #include "llvm/DebugInfo/LogicalView/Core/LVLocation.h" #include "llvm/DebugInfo/LogicalView/Core/LVSort.h" @@ -94,6 +95,11 @@ class LLVM_ABI LVScope : public LVElement { LVProperties Kinds; LVProperties Properties; static LVScopeDispatch Dispatch; + // Empty containers used in `getChildren()` in case there is no Types, + // Symbols, or Scopes. + static const LVTypes EmptyTypes; + static const LVSymbols EmptySymbols; + static const LVScopes EmptyScopes; // Size in bits if this scope represents also a compound type. uint32_t BitSize = 0; @@ -128,14 +134,6 @@ class LLVM_ABI LVScope : public LVElement { std::unique_ptr Lines; std::unique_ptr Ranges; - // Vector of elements (types, scopes and symbols). - // It is the union of (*Types, *Symbols and *Scopes) to be used for - // the following reasons: - // - Preserve the order the logical elements are read in. - // - To have a single container with all the logical elements, when - // the traversal does not require any specific element kind. - std::unique_ptr Children; - // Resolve the template parameters/arguments relationship. void resolveTemplate(); void printEncodedArgs(raw_ostream &OS, bool Full) const; @@ -213,7 +211,23 @@ class LLVM_ABI LVScope : public LVElement { const LVScopes *getScopes() const { return Scopes.get(); } const LVSymbols *getSymbols() const { return Symbols.get(); } const LVTypes *getTypes() const { return Types.get(); } - const LVElements *getChildren() const { return Children.get(); } + // Return view over union of child Scopes, Types, and Symbols, in that order. + // + // Calling `LVScope::sort()` ensures that each of groups is sorted according + // to the given criteria (see also `LVOptions::setSortMode()`). Because + // `getChildren()` iterates over the concatenation, the result returned by + // this function is not necessarily sorted. If order is important, use + // `getSortedChildren()`. + LVElementsView getChildren() const { + return llvm::concat(Scopes ? *Scopes : EmptyScopes, + Types ? *Types : EmptyTypes, + Symbols ? *Symbols : EmptySymbols); + } + // Return vector of child Scopes, Types, and Symbols that is sorted using + // `SortFunction`. This requires copy + sort; if order is not important, + // use `getChildren()` instead. + LVElements getSortedChildren( + LVSortFunction SortFunction = llvm::logicalview::getSortFunction()) const; void addElement(LVElement *Element); void addElement(LVLine *Line); @@ -222,7 +236,6 @@ class LLVM_ABI LVScope : public LVElement { void addElement(LVType *Type); void addObject(LVLocation *Location); void addObject(LVAddress LowerAddress, LVAddress UpperAddress); - void addToChildren(LVElement *Element); // Add the missing elements from the given 'Reference', which is the // scope associated with any DW_AT_specification, DW_AT_abstract_origin. diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h index 87777fddc9157..edee6a7dec6fc 100644 --- a/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h +++ b/llvm/include/llvm/Frontend/HLSL/HLSLRootSignature.h @@ -56,7 +56,8 @@ struct RootDescriptor { return; } - assert(Version == llvm::dxbc::RootSignatureVersion::V1_1 && + assert((Version == llvm::dxbc::RootSignatureVersion::V1_1 || + Version == llvm::dxbc::RootSignatureVersion::V1_2) && "Specified an invalid root signature version"); switch (Type) { case dxil::ResourceClass::CBuffer: @@ -100,7 +101,8 @@ struct DescriptorTableClause { return; } - assert(Version == dxbc::RootSignatureVersion::V1_1 && + assert((Version == dxbc::RootSignatureVersion::V1_1 || + Version == dxbc::RootSignatureVersion::V1_2) && "Specified an invalid root signature version"); switch (Type) { case dxil::ResourceClass::CBuffer: @@ -131,6 +133,7 @@ struct StaticSampler { float MaxLOD = std::numeric_limits::max(); uint32_t Space = 0; dxbc::ShaderVisibility Visibility = dxbc::ShaderVisibility::All; + dxbc::StaticSamplerFlags Flags = dxbc::StaticSamplerFlags::None; }; /// Models RootElement : RootFlags | RootConstants | RootParam diff --git a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h index c6d7c32c4ad95..bfcbf728d415c 100644 --- a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h +++ b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h @@ -48,6 +48,91 @@ class RootSignatureValidationError } }; +class OffsetAppendAfterOverflow : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Register; + uint32_t Space; + + OffsetAppendAfterOverflow(dxil::ResourceClass Type, uint32_t Register, + uint32_t Space) + : Type(Type), Register(Register), Space(Space) {} + + void log(raw_ostream &OS) const override { + OS << "Range " << getResourceClassName(Type) << "(register=" << Register + << ", space=" << Space << ") " + << "cannot be appended after an unbounded range "; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + +class ShaderRegisterOverflowError + : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Register; + uint32_t Space; + + ShaderRegisterOverflowError(dxil::ResourceClass Type, uint32_t Register, + uint32_t Space) + : Type(Type), Register(Register), Space(Space) {} + + void log(raw_ostream &OS) const override { + OS << "Overflow for shader register range: " << getResourceClassName(Type) + << "(register=" << Register << ", space=" << Space << ")."; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + +class OffsetOverflowError : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Register; + uint32_t Space; + + OffsetOverflowError(dxil::ResourceClass Type, uint32_t Register, + uint32_t Space) + : Type(Type), Register(Register), Space(Space) {} + + void log(raw_ostream &OS) const override { + OS << "Offset overflow for descriptor range: " << getResourceClassName(Type) + << "(register=" << Register << ", space=" << Space << ")."; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + +class TableSamplerMixinError : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Location; + + TableSamplerMixinError(dxil::ResourceClass Type, uint32_t Location) + : Type(Type), Location(Location) {} + + void log(raw_ostream &OS) const override { + OS << "Samplers cannot be mixed with other " + << "resource types in a descriptor table, " << getResourceClassName(Type) + << "(location=" << Location << ")"; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + class GenericRSMetadataError : public ErrorInfo { public: LLVM_ABI static char ID; diff --git a/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h b/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h index 49c5967aebd3e..4dd18111b0c9d 100644 --- a/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h +++ b/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h @@ -33,6 +33,7 @@ LLVM_ABI bool verifyRangeType(uint32_t Type); LLVM_ABI bool verifyDescriptorRangeFlag(uint32_t Version, dxil::ResourceClass Type, dxbc::DescriptorRangeFlags FlagsVal); +LLVM_ABI bool verifyStaticSamplerFlags(uint32_t Version, uint32_t FlagsNumber); LLVM_ABI bool verifyNumDescriptors(uint32_t NumDescriptors); LLVM_ABI bool verifyMipLODBias(float MipLODBias); LLVM_ABI bool verifyMaxAnisotropy(uint32_t MaxAnisotropy); diff --git a/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h b/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h index 6b9da06707261..24017492e30b2 100644 --- a/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h +++ b/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h @@ -13,6 +13,8 @@ #include "llvm/IR/Module.h" #include "llvm/Support/Compiler.h" +#include + namespace llvm { namespace offloading { using EntryArrayTy = std::pair; @@ -52,6 +54,24 @@ LLVM_ABI llvm::Error wrapHIPBinary(llvm::Module &M, llvm::ArrayRef Images, EntryArrayTy EntryArray, llvm::StringRef Suffix = "", bool EmitSurfacesAndTextures = true); + +struct SYCLJITOptions { + // Target/compiler specific options that are suggested to use to "compile" + // program at runtime. + std::string CompileOptions; + // Target/compiler specific options that are suggested to use to "link" + // program at runtime. + std::string LinkOptions; +}; + +/// Wraps OffloadBinaries in the given \p Buffers into the module \p M +/// as global symbols and registers the images with the SYCL Runtime. +/// \param Options Compiler and linker options to be encoded for the later +/// use by a runtime for JIT compilation. +LLVM_ABI llvm::Error +wrapSYCLBinaries(llvm::Module &M, llvm::ArrayRef Buffer, + SYCLJITOptions Options = SYCLJITOptions()); + } // namespace offloading } // namespace llvm diff --git a/llvm/include/llvm/Frontend/Offloading/Utility.h b/llvm/include/llvm/Frontend/Offloading/Utility.h index f8a2b1237b5e1..23e6702beb476 100644 --- a/llvm/include/llvm/Frontend/Offloading/Utility.h +++ b/llvm/include/llvm/Frontend/Offloading/Utility.h @@ -82,7 +82,8 @@ LLVM_ABI StructType *getEntryTy(Module &M); /// \param Data Extra data storage associated with the entry. /// \param SectionName The section this entry will be placed at. /// \param AuxAddr An extra pointer if needed. -LLVM_ABI void +/// \return The emitted global variable containing the offloading entry. +LLVM_ABI GlobalVariable * emitOffloadingEntry(Module &M, object::OffloadKind Kind, Constant *Addr, StringRef Name, uint64_t Size, uint32_t Flags, uint64_t Data, Constant *AuxAddr = nullptr, diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h index 1ade9ce0c3a7d..db781b58944bc 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h @@ -1268,6 +1268,15 @@ struct WriteT { using EmptyTrait = std::true_type; }; +// V6: [6.4.7] Looprange clause +template struct LoopRangeT { + using Begin = E; + using End = E; + + using TupleTrait = std::true_type; + std::tuple t; +}; + // --- template @@ -1300,8 +1309,8 @@ using TupleClausesT = DoacrossT, DynGroupprivateT, FromT, GrainsizeT, IfT, InitT, InReductionT, LastprivateT, LinearT, - MapT, NumTasksT, OrderT, - ReductionT, ScheduleT, + LoopRangeT, MapT, NumTasksT, + OrderT, ReductionT, ScheduleT, TaskReductionT, ToT>; template diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td index 4d9b8f8a6c51e..38f95a11bf85f 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMP.td +++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -284,6 +284,10 @@ def OMPC_Linear : Clause<[Spelling<"linear">]> { def OMPC_Link : Clause<[Spelling<"link">]> { let flangClass = "OmpObjectList"; } +def OMPC_LoopRange : Clause<[Spelling<"looprange">]> { + let clangClass = "OMPLoopRangeClause"; + let flangClass = "OmpLoopRangeClause"; +} def OMPC_Map : Clause<[Spelling<"map">]> { let clangClass = "OMPMapClause"; let flangClass = "OmpMapClause"; @@ -902,6 +906,11 @@ def OMP_Groupprivate : Directive<[Spelling<"groupprivate">]> { let category = CA_Declarative; let languages = [L_C, L_Fortran]; } +def OMP_Fuse : Directive<[Spelling<"fuse">]> { + let allowedOnceClauses = [VersionedClause]; + let association = AS_Block; + let category = CA_Executable; +} def OMP_Interchange : Directive<[Spelling<"interchange">]> { let allowedOnceClauses = [ VersionedClause, @@ -2087,9 +2096,11 @@ def OMP_TargetParallel : Directive<[Spelling<"target parallel">]> { let allowedOnceClauses = [ VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, ]; let leafConstructs = [OMP_Target, OMP_Parallel]; @@ -2117,12 +2128,14 @@ def OMP_TargetParallelDo : Directive<[Spelling<"target parallel do">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, ]; let leafConstructs = [OMP_Target, OMP_Parallel, OMP_Do]; let category = CA_Executable; @@ -2146,6 +2159,7 @@ def OMP_TargetParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2156,6 +2170,7 @@ def OMP_TargetParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2180,6 +2195,7 @@ def OMP_TargetParallelFor : Directive<[Spelling<"target parallel for">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2189,6 +2205,7 @@ def OMP_TargetParallelFor : Directive<[Spelling<"target parallel for">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, ]; @@ -2218,6 +2235,7 @@ def OMP_TargetParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2229,6 +2247,7 @@ def OMP_TargetParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2264,11 +2283,13 @@ def OMP_target_parallel_loop : Directive<[Spelling<"target parallel loop">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, ]; let leafConstructs = [OMP_Target, OMP_Parallel, OMP_loop]; @@ -2299,12 +2320,14 @@ def OMP_TargetSimd : Directive<[Spelling<"target simd">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, ]; @@ -2397,12 +2420,14 @@ def OMP_TargetTeamsDistributeParallelDo VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, ]; let leafConstructs = @@ -2436,6 +2461,7 @@ def OMP_TargetTeamsDistributeParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2443,6 +2469,7 @@ def OMP_TargetTeamsDistributeParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, ]; @@ -2468,6 +2495,7 @@ def OMP_TargetTeamsDistributeParallelFor VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2477,6 +2505,7 @@ def OMP_TargetTeamsDistributeParallelFor VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2508,6 +2537,7 @@ def OMP_TargetTeamsDistributeParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2519,6 +2549,7 @@ def OMP_TargetTeamsDistributeParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h index f43ef932e965a..0a11617ea971c 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -1085,11 +1085,13 @@ class OpenMPIRBuilder { /// preheader of the loop. /// \param LoopType Information about type of loop worksharing. /// It corresponds to type of loop workshare OpenMP pragma. + /// \param NoLoop If true, no-loop code is generated. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, - omp::WorksharingLoopType LoopType); + omp::WorksharingLoopType LoopType, + bool NoLoop); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// @@ -1209,6 +1211,7 @@ class OpenMPIRBuilder { /// present. /// \param LoopType Information about type of loop worksharing. /// It corresponds to type of loop workshare OpenMP pragma. + /// \param NoLoop If true, no-loop code is generated. /// /// \returns Point where to insert code after the workshare construct. LLVM_ABI InsertPointOrErrorTy applyWorkshareLoop( @@ -1219,7 +1222,8 @@ class OpenMPIRBuilder { bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false, bool HasOrderedClause = false, omp::WorksharingLoopType LoopType = - omp::WorksharingLoopType::ForStaticLoop); + omp::WorksharingLoopType::ForStaticLoop, + bool NoLoop = false); /// Tile a loop nest. /// @@ -1402,7 +1406,7 @@ class OpenMPIRBuilder { /// any. LLVM_ABI static TargetRegionEntryInfo getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, - StringRef ParentName = ""); + vfs::FileSystem &VFS, StringRef ParentName = ""); /// Enum class for the RedctionGen CallBack type to be used. enum class ReductionGenCBKind { Clang, MLIR }; diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h index 5653ee7b6837d..56fc749838ef9 100644 --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -77,12 +77,21 @@ class DataLayout { uint32_t BitWidth; Align ABIAlign; Align PrefAlign; + /// The index bit width also defines the address size in this address space. + /// If the index width is less than the representation bit width, the + /// pointer is non-integral and bits beyond the index width could be used + /// for additional metadata (e.g. AMDGPU buffer fat pointers with bounds + /// and other flags or CHERI capabilities that contain bounds+permissions). uint32_t IndexBitWidth; /// Pointers in this address space don't have a well-defined bitwise - /// representation (e.g. may be relocated by a copying garbage collector). - /// Additionally, they may also be non-integral (i.e. containing additional - /// metadata such as bounds information/permissions). - bool IsNonIntegral; + /// representation (e.g. they may be relocated by a copying garbage + /// collector and thus have different addresses at different times). + bool HasUnstableRepresentation; + /// Pointers in this address space have additional state bits that are + /// located at a target-defined location when stored in memory. An example + /// of this would be CHERI capabilities where the validity bit is stored + /// separately from the pointer address+bounds information. + bool HasExternalState; LLVM_ABI bool operator==(const PointerSpec &Other) const; }; @@ -149,7 +158,7 @@ class DataLayout { /// Sets or updates the specification for pointer in the given address space. void setPointerSpec(uint32_t AddrSpace, uint32_t BitWidth, Align ABIAlign, Align PrefAlign, uint32_t IndexBitWidth, - bool IsNonIntegral); + bool HasUnstableRepr, bool HasExternalState); /// Internal helper to get alignment for integer of given bitwidth. LLVM_ABI Align getIntegerAlignment(uint32_t BitWidth, bool abi_or_pref) const; @@ -355,19 +364,91 @@ class DataLayout { /// \sa DataLayout::getAddressSizeInBits unsigned getAddressSize(unsigned AS) const { return getIndexSize(AS); } - /// Return the address spaces containing non-integral pointers. Pointers in - /// this address space don't have a well-defined bitwise representation. - SmallVector getNonIntegralAddressSpaces() const { + /// Return the address spaces with special pointer semantics (such as being + /// unstable or non-integral). + SmallVector getNonStandardAddressSpaces() const { SmallVector AddrSpaces; for (const PointerSpec &PS : PointerSpecs) { - if (PS.IsNonIntegral) + if (PS.HasUnstableRepresentation || PS.HasExternalState || + PS.BitWidth != PS.IndexBitWidth) AddrSpaces.push_back(PS.AddrSpace); } return AddrSpaces; } + /// Returns whether this address space has a non-integral pointer + /// representation, i.e. the pointer is not just an integer address but some + /// other bitwise representation. When true, passes cannot assume that all + /// bits of the representation map directly to the allocation address. + /// NOTE: This also returns true for "unstable" pointers where the + /// representation may be just an address, but this value can change at any + /// given time (e.g. due to copying garbage collection). + /// Examples include AMDGPU buffer descriptors with a 128-bit fat pointer + /// and a 32-bit offset or CHERI capabilities that contain bounds, permissions + /// and an out-of-band validity bit. + /// + /// In general, more specialized functions such as mustNotIntroduceIntToPtr(), + /// mustNotIntroducePtrToInt(), or hasExternalState() should be + /// preferred over this one when reasoning about the behavior of IR + /// analysis/transforms. + /// TODO: should remove/deprecate this once all uses have migrated. bool isNonIntegralAddressSpace(unsigned AddrSpace) const { - return getPointerSpec(AddrSpace).IsNonIntegral; + const auto &PS = getPointerSpec(AddrSpace); + return PS.BitWidth != PS.IndexBitWidth || PS.HasUnstableRepresentation || + PS.HasExternalState; + } + + /// Returns whether this address space has an "unstable" pointer + /// representation. The bitwise pattern of such pointers is allowed to change + /// in a target-specific way. For example, this could be used for copying + /// garbage collection where the garbage collector could update the pointer + /// value as part of the collection sweep. + bool hasUnstableRepresentation(unsigned AddrSpace) const { + return getPointerSpec(AddrSpace).HasUnstableRepresentation; + } + bool hasUnstableRepresentation(Type *Ty) const { + auto *PTy = dyn_cast(Ty->getScalarType()); + return PTy && hasUnstableRepresentation(PTy->getPointerAddressSpace()); + } + + /// Returns whether this address space has external state (implies having + /// a non-integral pointer representation). + /// These pointer types must be loaded and stored using appropriate + /// instructions and cannot use integer loads/stores as this would not + /// propagate the out-of-band state. An example of such a pointer type is a + /// CHERI capability that contain bounds, permissions and an out-of-band + /// validity bit that is invalidated whenever an integer/FP store is performed + /// to the associated memory location. + bool hasExternalState(unsigned AddrSpace) const { + return getPointerSpec(AddrSpace).HasExternalState; + } + bool hasExternalState(Type *Ty) const { + auto *PTy = dyn_cast(Ty->getScalarType()); + return PTy && hasExternalState(PTy->getPointerAddressSpace()); + } + + /// Returns whether passes must avoid introducing `inttoptr` instructions + /// for this address space (unless they have target-specific knowledge). + /// + /// This is currently the case for non-integral pointer representations with + /// external state (hasExternalState()) since `inttoptr` cannot recreate the + /// external state bits. + /// New `inttoptr` instructions should also be avoided for "unstable" bitwise + /// representations (hasUnstableRepresentation()) unless the pass knows it is + /// within a critical section that retains the current representation. + bool mustNotIntroduceIntToPtr(unsigned AddrSpace) const { + return hasUnstableRepresentation(AddrSpace) || hasExternalState(AddrSpace); + } + + /// Returns whether passes must avoid introducing `ptrtoint` instructions + /// for this address space (unless they have target-specific knowledge). + /// + /// This is currently the case for pointer address spaces that have an + /// "unstable" representation (hasUnstableRepresentation()) since the + /// bitwise pattern of such pointers could change unless the pass knows it is + /// within a critical section that retains the current representation. + bool mustNotIntroducePtrToInt(unsigned AddrSpace) const { + return hasUnstableRepresentation(AddrSpace); } bool isNonIntegralPointerType(PointerType *PT) const { @@ -375,10 +456,20 @@ class DataLayout { } bool isNonIntegralPointerType(Type *Ty) const { - auto *PTy = dyn_cast(Ty); + auto *PTy = dyn_cast(Ty->getScalarType()); return PTy && isNonIntegralPointerType(PTy); } + bool mustNotIntroducePtrToInt(Type *Ty) const { + auto *PTy = dyn_cast(Ty->getScalarType()); + return PTy && mustNotIntroducePtrToInt(PTy->getPointerAddressSpace()); + } + + bool mustNotIntroduceIntToPtr(Type *Ty) const { + auto *PTy = dyn_cast(Ty->getScalarType()); + return PTy && mustNotIntroduceIntToPtr(PTy->getPointerAddressSpace()); + } + /// The size in bits of the pointer representation in a given address space. /// This is not necessarily the same as the integer address of a pointer (e.g. /// for fat pointers). diff --git a/llvm/include/llvm/IR/FixedMetadataKinds.def b/llvm/include/llvm/IR/FixedMetadataKinds.def index d09cc15d65ff6..0603abcd6a4da 100644 --- a/llvm/include/llvm/IR/FixedMetadataKinds.def +++ b/llvm/include/llvm/IR/FixedMetadataKinds.def @@ -55,3 +55,4 @@ LLVM_FIXED_MD_KIND(MD_mmra, "mmra", 40) LLVM_FIXED_MD_KIND(MD_noalias_addrspace, "noalias.addrspace", 41) LLVM_FIXED_MD_KIND(MD_callee_type, "callee_type", 42) LLVM_FIXED_MD_KIND(MD_nofree, "nofree", 43) +LLVM_FIXED_MD_KIND(MD_captures, "captures", 44) diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h index 95a0a7fd2f97e..de7a237098594 100644 --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -32,6 +32,7 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/OperandTraits.h" +#include "llvm/IR/ProfDataUtils.h" #include "llvm/IR/Use.h" #include "llvm/IR/User.h" #include "llvm/Support/AtomicOrdering.h" @@ -3536,8 +3537,6 @@ class SwitchInstProfUpdateWrapper { bool Changed = false; protected: - LLVM_ABI MDNode *buildProfBranchWeightsMD(); - LLVM_ABI void init(); public: @@ -3549,8 +3548,8 @@ class SwitchInstProfUpdateWrapper { SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } ~SwitchInstProfUpdateWrapper() { - if (Changed) - SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); + if (Changed && Weights.has_value() && Weights->size() >= 2) + setBranchWeights(SI, Weights.value(), /*IsExpected=*/false); } /// Delegate the call to the underlying SwitchInst::removeCase() and remove diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h index eb0440f500735..0622bfae2c845 100644 --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -810,6 +810,26 @@ class MinMaxIntrinsic : public IntrinsicInst { /// Whether the intrinsic is signed or unsigned. bool isSigned() const { return isSigned(getIntrinsicID()); }; + /// Whether the intrinsic is a smin or umin. + static bool isMin(Intrinsic::ID ID) { + switch (ID) { + case Intrinsic::umin: + case Intrinsic::smin: + return true; + case Intrinsic::umax: + case Intrinsic::smax: + return false; + default: + llvm_unreachable("Invalid intrinsic"); + } + } + + /// Whether the intrinsic is a smin or a umin. + bool isMin() const { return isMin(getIntrinsicID()); } + + /// Whether the intrinsic is a smax or a umax. + bool isMax() const { return !isMin(getIntrinsicID()); } + /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, /// so there is a certain threshold value, upon reaching which, /// their value can no longer change. Return said threshold. diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 585371a6a4423..96da698538314 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -297,46 +297,39 @@ def IIT_MMX : IIT_VT; def IIT_TOKEN : IIT_VT; def IIT_METADATA : IIT_VT; def IIT_EMPTYSTRUCT : IIT_VT; -def IIT_STRUCT2 : IIT_Base<21>; -def IIT_STRUCT3 : IIT_Base<22>; -def IIT_STRUCT4 : IIT_Base<23>; -def IIT_STRUCT5 : IIT_Base<24>; -def IIT_EXTEND_ARG : IIT_Base<25>; -def IIT_TRUNC_ARG : IIT_Base<26>; -def IIT_ANYPTR : IIT_Base<27>; -def IIT_V1 : IIT_Vec<1, 28>; -def IIT_VARARG : IIT_VT; -def IIT_ONE_NTH_ELTS_VEC_ARG : IIT_Base<30>; -def IIT_SAME_VEC_WIDTH_ARG : IIT_Base<31>; -def IIT_VEC_OF_ANYPTRS_TO_ELT : IIT_Base<34>; -def IIT_I128 : IIT_Int<128, 35>; -def IIT_V512 : IIT_Vec<512, 36>; -def IIT_V1024 : IIT_Vec<1024, 37>; -def IIT_STRUCT6 : IIT_Base<38>; -def IIT_STRUCT7 : IIT_Base<39>; -def IIT_STRUCT8 : IIT_Base<40>; -def IIT_F128 : IIT_VT; -def IIT_VEC_ELEMENT : IIT_Base<42>; -def IIT_SCALABLE_VEC : IIT_Base<43>; -def IIT_SUBDIVIDE2_ARG : IIT_Base<44>; -def IIT_SUBDIVIDE4_ARG : IIT_Base<45>; -def IIT_VEC_OF_BITCASTS_TO_INT : IIT_Base<46>; -def IIT_V128 : IIT_Vec<128, 47>; -def IIT_BF16 : IIT_VT; -def IIT_STRUCT9 : IIT_Base<49>; -def IIT_V256 : IIT_Vec<256, 50>; -def IIT_AMX : IIT_VT; -def IIT_PPCF128 : IIT_VT; -def IIT_V3 : IIT_Vec<3, 53>; -def IIT_EXTERNREF : IIT_VT; -def IIT_FUNCREF : IIT_VT; -def IIT_I2 : IIT_Int<2, 57>; -def IIT_I4 : IIT_Int<4, 58>; -def IIT_AARCH64_SVCOUNT : IIT_VT; -def IIT_V6 : IIT_Vec<6, 60>; -def IIT_V10 : IIT_Vec<10, 61>; -def IIT_V2048 : IIT_Vec<2048, 62>; -def IIT_V4096 : IIT_Vec<4096, 63>; +def IIT_STRUCT : IIT_Base<21>; +def IIT_EXTEND_ARG : IIT_Base<22>; +def IIT_TRUNC_ARG : IIT_Base<23>; +def IIT_ANYPTR : IIT_Base<24>; +def IIT_V1 : IIT_Vec<1, 25>; +def IIT_VARARG : IIT_VT; +def IIT_ONE_NTH_ELTS_VEC_ARG : IIT_Base<27>; +def IIT_SAME_VEC_WIDTH_ARG : IIT_Base<28>; +def IIT_VEC_OF_ANYPTRS_TO_ELT : IIT_Base<29>; +def IIT_I128 : IIT_Int<128, 30>; +def IIT_V512 : IIT_Vec<512, 31>; +def IIT_V1024 : IIT_Vec<1024, 32>; +def IIT_F128 : IIT_VT; +def IIT_VEC_ELEMENT : IIT_Base<34>; +def IIT_SCALABLE_VEC : IIT_Base<35>; +def IIT_SUBDIVIDE2_ARG : IIT_Base<36>; +def IIT_SUBDIVIDE4_ARG : IIT_Base<37>; +def IIT_VEC_OF_BITCASTS_TO_INT : IIT_Base<38>; +def IIT_V128 : IIT_Vec<128, 39>; +def IIT_BF16 : IIT_VT; +def IIT_V256 : IIT_Vec<256, 41>; +def IIT_AMX : IIT_VT; +def IIT_PPCF128 : IIT_VT; +def IIT_V3 : IIT_Vec<3, 44>; +def IIT_EXTERNREF : IIT_VT; +def IIT_FUNCREF : IIT_VT; +def IIT_I2 : IIT_Int<2, 47>; +def IIT_I4 : IIT_Int<4, 48>; +def IIT_AARCH64_SVCOUNT : IIT_VT; +def IIT_V6 : IIT_Vec<6, 50>; +def IIT_V10 : IIT_Vec<10, 51>; +def IIT_V2048 : IIT_Vec<2048, 52>; +def IIT_V4096 : IIT_Vec<4096, 53>; } defvar IIT_all_FixedTypes = !filter(iit, IIT_all, @@ -345,19 +338,6 @@ defvar IIT_all_FixedTypes = !filter(iit, IIT_all, defvar IIT_all_VectorTypes = !filter(iit, IIT_all, !isa(iit)); -defvar IIT_RetNumbers = [ - [IIT_Done.Number], - [], - [IIT_STRUCT2.Number], - [IIT_STRUCT3.Number], - [IIT_STRUCT4.Number], - [IIT_STRUCT5.Number], - [IIT_STRUCT6.Number], - [IIT_STRUCT7.Number], - [IIT_STRUCT8.Number], - [IIT_STRUCT9.Number], -]; - //===----------------------------------------------------------------------===// // Types used by intrinsics. //===----------------------------------------------------------------------===// @@ -663,7 +643,10 @@ class TypeInfoGen< !if(!isa(ty), ACTys[MappingRIdxs[ty.Number]], ty)); list TypeSig = !listflatten(!listconcat( - [IIT_RetNumbers[!size(RetTypes)]], + [!cond( + !eq(!size(RetTypes), 0): [IIT_Done.Number], + !eq(!size(RetTypes), 1): [], + true: [IIT_STRUCT.Number, !sub(!size(RetTypes), 2)])], !foreach(i, !range(AllTypes), !foreach(a, AllTypes[i].Sig, ResolveArgCode< @@ -977,8 +960,12 @@ def int_instrprof_mcdc_tvbitmap_update : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i32_ty, llvm_ptr_ty]>; -def int_call_preallocated_setup : DefaultAttrsIntrinsic<[llvm_token_ty], [llvm_i32_ty]>; -def int_call_preallocated_arg : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_i32_ty]>; +def int_call_preallocated_setup + : DefaultAttrsIntrinsic<[llvm_token_ty], [llvm_i32_ty], + [ImmArg>]>; +def int_call_preallocated_arg + : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_i32_ty], + [ImmArg>]>; def int_call_preallocated_teardown : DefaultAttrsIntrinsic<[], [llvm_token_ty]>; // This intrinsic is intentionally undocumented and users shouldn't call it; @@ -1775,12 +1762,13 @@ def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly, ReadOnly>, NoCapture>]>; -def int_coro_end : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty, llvm_token_ty], []>; +def int_coro_end : Intrinsic<[], [llvm_ptr_ty, llvm_i1_ty, llvm_token_ty], []>; def int_coro_end_results : Intrinsic<[llvm_token_ty], [llvm_vararg_ty]>; def int_coro_end_async - : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty, llvm_vararg_ty], []>; + : Intrinsic<[], [llvm_ptr_ty, llvm_i1_ty, llvm_vararg_ty], []>; def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; +def int_coro_is_in_ramp : Intrinsic<[llvm_i1_ty], [], [IntrNoMem], "llvm.coro.is_in_ramp">; def int_coro_noop : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>; def int_coro_align : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>; diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index 7c9aef52b3acf..fbc92d77da1ab 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -130,8 +130,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>; class AdvSIMD_1VectorArg_Expand_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>; - class AdvSIMD_1VectorArg_Long_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>; class AdvSIMD_1IntArg_Narrow_Intrinsic : DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>; class AdvSIMD_1VectorArg_Narrow_Intrinsic @@ -150,9 +148,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". class AdvSIMD_2VectorArg_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>; - class AdvSIMD_2VectorArg_Compare_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>], - [IntrNoMem]>; class AdvSIMD_2Arg_FloatCompare_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>], [IntrNoMem]>; @@ -160,10 +155,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>, LLVMTruncatedType<0>], [IntrNoMem]>; - class AdvSIMD_2VectorArg_Wide_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMTruncatedType<0>], - [IntrNoMem]>; class AdvSIMD_2VectorArg_Narrow_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>, LLVMExtendedType<0>], @@ -172,10 +163,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>, llvm_i32_ty], [IntrNoMem]>; - class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty], - [IntrNoMem]>; class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], @@ -184,10 +171,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>, llvm_i32_ty], [IntrNoMem]>; - class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty], - [IntrNoMem]>; class AdvSIMD_2VectorArg_Lane_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty], @@ -205,14 +188,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>; - class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty, - LLVMMatchType<1>], [IntrNoMem]>; - class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty, llvm_i32_ty], - [IntrNoMem]>; class AdvSIMD_CvtFxToFP_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>; @@ -238,11 +213,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>], [IntrNoMem]>; - class AdvSIMD_FML_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>], - [IntrNoMem]>; - class AdvSIMD_BF16FML_Intrinsic : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty], diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td index afce1fe6af854..ded00b1274670 100644 --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -1431,7 +1431,7 @@ def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic < [llvm_anyptr_ty], [llvm_anyptr_ty, // base llvm_i16_ty, // stride (and swizzle control) - llvm_i32_ty, // NumRecords / extent + llvm_i64_ty, // NumRecords / extent llvm_i32_ty], // flags // Attributes lifted from ptrmask + some extra argument attributes. [IntrNoMem, ReadNone>, @@ -3808,6 +3808,7 @@ class AMDGPUCooperativeAtomicLoad : Intrinsic < [SDNPMemOperand, SDNPMayLoad] >; +// TODO: We may want to drop _relaxed and use an atomic ordering operand instead. def int_amdgcn_cooperative_atomic_load_32x4B : AMDGPUCooperativeAtomicLoad; def int_amdgcn_cooperative_atomic_store_32x4B : AMDGPUCooperativeAtomicStore; def int_amdgcn_cooperative_atomic_load_16x8B : AMDGPUCooperativeAtomicLoad; diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td index d27d42841e012..570d6bc35cbd0 100644 --- a/llvm/include/llvm/IR/IntrinsicsDirectX.td +++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td @@ -134,6 +134,8 @@ def int_dx_degrees : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty def int_dx_isinf : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [llvm_anyfloat_ty], [IntrNoMem]>; +def int_dx_isnan : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [llvm_anyfloat_ty], [IntrNoMem]>; def int_dx_lerp : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty, LLVMMatchType<0>,LLVMMatchType<0>], [IntrNoMem]>; diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index 7b40841e45d0d..9cfab26fffa54 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -947,6 +947,78 @@ class NVVM_TCGEN05_LDST_ACCESS_SIZE { true : llvm_void_ty); } +class NVVM_TCGEN05_MMA_BASE { + LLVMType a_operand_type = !if(!eq(Space, "tensor"), + llvm_tmem_ptr_ty, llvm_i64_ty); + list common_args = !listconcat( + [llvm_tmem_ptr_ty, // d + a_operand_type, // a + llvm_i64_ty, // b + llvm_i32_ty, // idesc + llvm_i1_ty], // enable_input_d + !if(!eq(Sp, 1), [llvm_tmem_ptr_ty], [])); // spmetadata + list common_intr_props = !listconcat( + [IntrArgMemOnly, WriteOnly>], + !if(!eq(Space, "tensor"), [ReadOnly>], []) + ); +} + +class NVVM_TCGEN05_MMA: + NVVM_TCGEN05_MMA_BASE { + string intr = "llvm.nvvm.tcgen05.mma" + # !if(!eq(Sp, 1), ".sp", "") + # "." # Space + # !if(!eq(ScaleInputD, 1), ".scale_d", "") + # !if(!eq(AShift, 1), ".ashift", ""); + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_BLOCKSCALE: + NVVM_TCGEN05_MMA_BASE { + string intr = "llvm.nvvm.tcgen05.mma" + # !if(!eq(Sp, 1), ".sp", "") + # "." # Space + # "." # Kind + # ".block_scale" # ScaleVecSize; + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_WS: + NVVM_TCGEN05_MMA_BASE { + string intr = "llvm.nvvm.tcgen05.mma.ws" + # !if(!eq(Sp, 1), ".sp", "") + # "." # Space + # !if(!eq(ZeroColMask, 1), ".zero_col_mask", ""); + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE: + NVVM_TCGEN05_MMA_BASE { + string intr = "llvm.nvvm.tcgen05.mma" + # !if(!eq(Sp, 1), ".sp", "") + # "." # Space + # !if(!eq(ScaleInputD, 1), ".scale_d", "") + # ".disable_output_lane.cg" # CtaGroup + # !if(!eq(AShift, 1), ".ashift", ""); + string record = !subst(".", "_", !subst("llvm.", "int_", intr)); +} + +class NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED { + bit ret = !cond( + !and(!eq(Kind, "mxf8f6f4"), !eq(ScaleVecSize, "")) : true, + !and(!eq(Kind, "mxf4"), !eq(ScaleVecSize, "")) : true, + !and(!eq(Kind, "mxf4nvf4"), !eq(ScaleVecSize, ".block16")) : true, + !and(!eq(Kind, "mxf4"), !eq(ScaleVecSize, ".block32")) : true, + !and(!eq(Kind, "mxf4nvf4"), !eq(ScaleVecSize, ".block32")) : true, + !and(!eq(Kind, "mxf8f6f4"), !eq(ScaleVecSize, ".block32")) : true, + true: false + ); +} + class TexVector types> { string Name = name; list Types = types; @@ -2268,13 +2340,15 @@ def int_nvvm_exit : NVVMBuiltin, class DefaultAttrsIntrinsicFlags ret_types, list param_types, list flags, - list intr_properties> + list intr_properties, + string name = ""> : DefaultAttrsIntrinsic< ret_types, !listconcat(param_types, flags), !listconcat(intr_properties, !foreach(i, !range(flags), - ImmArg>))>; + ImmArg>)), + name>; // TMA Tensor Copy Intrinsics: S2G -> From Shared to Global memory variants foreach dim = 1...5 in { @@ -2663,4 +2737,136 @@ foreach dim = ["x", "y", "z"] in : PureIntrinsic<[llvm_i32_ty], [llvm_i128_ty], [], "llvm.nvvm.clusterlaunchcontrol.query_cancel.get_first_ctaid." # dim>; -} // let TargetPrefix = "nvvm" +// +// tcgen05.mma intrinsics +// + +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach scale_d = [0, 1] in { + foreach ashift = !if(!eq(space, "tensor"), [0, 1], [0]) in { + defvar mma = NVVM_TCGEN05_MMA; + defvar args = !listconcat( + mma.common_args, + !if(!eq(scale_d, 1), [llvm_i64_ty], []) // scale_d_imm + ); + defvar flags = [llvm_i32_ty, // kind + llvm_i32_ty, // cta_group + llvm_i32_ty]; // collector_usage_a + defvar nargs = !size(args); + defvar scale_d_imm = ArgIndex; + defvar scale_d_imm_range = [ImmArg, Range]; + defvar intrinsic_properties = !listconcat( + mma.common_intr_props, + !if(!eq(scale_d, 1), scale_d_imm_range, []), + [Range, 0, !if(!eq(scale_d, 1), 2, 4)>, // kind + Range, 1, 3>, // cta_group + Range, 0, + !if(!eq(ashift, 1), 2, 4)> // collector_usage + ] + ); + + def mma.record: + DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties, + mma.intr>; + } + } + } +} + +// +// tcgen05.mma disable_output_lane intrinsics +// +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach cta_group = [1, 2] in { + foreach scale_d = [0, 1] in { + foreach ashift = !if(!eq(space, "tensor"), [0, 1], [0]) in { + defvar mma = NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE< + sp, space, cta_group, ashift, scale_d>; + defvar disable_output_lane_type = + !if(!eq(cta_group, 1), llvm_v4i32_ty, llvm_v8i32_ty); + defvar args = !listconcat( + mma.common_args, + !if(!eq(scale_d, 1), [llvm_i64_ty], []), + [disable_output_lane_type] + ); + defvar flags = [llvm_i32_ty, // kind_flag + llvm_i32_ty]; // collector_usage_a_flag + defvar nargs = !size(args); + defvar scale_d_imm = ArgIndex; + defvar scale_d_imm_range = [ImmArg, Range]; + defvar intrinsic_properties = !listconcat( + mma.common_intr_props, + !if(!eq(scale_d, 1), scale_d_imm_range, []), + [Range, 0, !if(!eq(scale_d, 1), 2, 4)>, + Range, 0, !if(!eq(ashift, 1), 2, 4)>] + ); + + def mma.record: DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties, + mma.intr>; + } // ashift + } // scale_d + } // cta_group + } // space +} // sp + +// +// tcgen05.mma block_scale intrinsics +// +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach kind = ["mxf8f6f4", "mxf4", "mxf4nvf4"] in { + foreach scale_vec_size = ["", ".block16", ".block32"] in { + defvar mma = NVVM_TCGEN05_MMA_BLOCKSCALE; + defvar args = !listconcat(mma.common_args, + [llvm_tmem_ptr_ty, // scale_a + llvm_tmem_ptr_ty]); // scale_b + defvar flags = [llvm_i32_ty, // cta_group + llvm_i32_ty]; // collector_usage_a + defvar nargs = !size(args); + defvar cta_group = ArgIndex; + defvar collector_usage = ArgIndex; + + if NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED.ret then { + def mma.record: DefaultAttrsIntrinsicFlags<[], args, flags, + !listconcat(mma.common_intr_props, + [Range, + Range]), + mma.intr>; + } + } + } + } +} + +// +// tcgen05.mma ws intrinsics +// +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach zero_col_mask = [0, 1] in { + defvar mma = NVVM_TCGEN05_MMA_WS; + defvar args = !listconcat( + mma.common_args, + !if(!eq(zero_col_mask, 1), [llvm_i64_ty], []) + ); + defvar flags = [llvm_i32_ty, // kind + llvm_i32_ty, // collector_buffer_b + llvm_i32_ty]; // collector_usage_b_op + defvar nargs = !size(args); + defvar intrinsic_properties = !listconcat( + mma.common_intr_props, + [Range, 0, 4>, + Range, 0, 4>, + Range, 0, 4>] + ); + + def mma.record: + DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties, + mma.intr>; + } + } +} + +} // let TargetPrefix = "nvvm" \ No newline at end of file diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td index b89fa87bf77b9..823c491e1bfee 100644 --- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td +++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td @@ -87,6 +87,8 @@ let TargetPrefix = "spv" in { def int_spv_frac : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty], [IntrNoMem]>; def int_spv_isinf : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [llvm_anyfloat_ty], [IntrNoMem]>; + def int_spv_isnan : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [llvm_anyfloat_ty], [IntrNoMem]>; def int_spv_lerp : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty, LLVMMatchType<0>,LLVMMatchType<0>], [IntrNoMem] >; def int_spv_length : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], [llvm_anyfloat_ty], [IntrNoMem]>; diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td index 4af9ffc52ba6b..81fbfbf0bb1b4 100644 --- a/llvm/include/llvm/IR/IntrinsicsX86.td +++ b/llvm/include/llvm/IR/IntrinsicsX86.td @@ -1919,62 +1919,62 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx2_vpdpbssd_128 : ClangBuiltin<"__builtin_ia32_vpdpbssd128">, DefaultAttrsIntrinsic<[llvm_v4i32_ty], - [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbssd_256 : ClangBuiltin<"__builtin_ia32_vpdpbssd256">, DefaultAttrsIntrinsic<[llvm_v8i32_ty], - [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [llvm_v8i32_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbssds_128 : ClangBuiltin<"__builtin_ia32_vpdpbssds128">, DefaultAttrsIntrinsic<[llvm_v4i32_ty], - [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbssds_256 : ClangBuiltin<"__builtin_ia32_vpdpbssds256">, DefaultAttrsIntrinsic<[llvm_v8i32_ty], - [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [llvm_v8i32_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbsud_128 : ClangBuiltin<"__builtin_ia32_vpdpbsud128">, DefaultAttrsIntrinsic<[llvm_v4i32_ty], - [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbsud_256 : ClangBuiltin<"__builtin_ia32_vpdpbsud256">, DefaultAttrsIntrinsic<[llvm_v8i32_ty], - [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [llvm_v8i32_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbsuds_128 : ClangBuiltin<"__builtin_ia32_vpdpbsuds128">, DefaultAttrsIntrinsic<[llvm_v4i32_ty], - [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbsuds_256 : ClangBuiltin<"__builtin_ia32_vpdpbsuds256">, DefaultAttrsIntrinsic<[llvm_v8i32_ty], - [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [llvm_v8i32_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbuud_128 : ClangBuiltin<"__builtin_ia32_vpdpbuud128">, DefaultAttrsIntrinsic<[llvm_v4i32_ty], - [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbuud_256 : ClangBuiltin<"__builtin_ia32_vpdpbuud256">, DefaultAttrsIntrinsic<[llvm_v8i32_ty], - [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [llvm_v8i32_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbuuds_128 : ClangBuiltin<"__builtin_ia32_vpdpbuuds128">, DefaultAttrsIntrinsic<[llvm_v4i32_ty], - [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [llvm_v4i32_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpbuuds_256 : ClangBuiltin<"__builtin_ia32_vpdpbuuds256">, DefaultAttrsIntrinsic<[llvm_v8i32_ty], - [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [llvm_v8i32_ty, llvm_v32i8_ty, llvm_v32i8_ty], [IntrNoMem]>; def int_x86_avx2_vpdpwsud_128 @@ -5000,32 +5000,32 @@ let TargetPrefix = "x86" in { def int_x86_avx10_vpdpbssd_512 : ClangBuiltin<"__builtin_ia32_vpdpbssd512">, DefaultAttrsIntrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty], + [llvm_v16i32_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>; def int_x86_avx10_vpdpbssds_512 : ClangBuiltin<"__builtin_ia32_vpdpbssds512">, DefaultAttrsIntrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty], + [llvm_v16i32_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>; def int_x86_avx10_vpdpbsud_512 : ClangBuiltin<"__builtin_ia32_vpdpbsud512">, DefaultAttrsIntrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty], + [llvm_v16i32_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>; def int_x86_avx10_vpdpbsuds_512 : ClangBuiltin<"__builtin_ia32_vpdpbsuds512">, DefaultAttrsIntrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty], + [llvm_v16i32_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>; def int_x86_avx10_vpdpbuud_512 : ClangBuiltin<"__builtin_ia32_vpdpbuud512">, DefaultAttrsIntrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty], + [llvm_v16i32_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>; def int_x86_avx10_vpdpbuuds_512 : ClangBuiltin<"__builtin_ia32_vpdpbuuds512">, DefaultAttrsIntrinsic<[llvm_v16i32_ty], - [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty], + [llvm_v16i32_ty, llvm_v64i8_ty, llvm_v64i8_ty], [IntrNoMem]>; // VNNI INT16 def int_x86_avx10_vpdpwsud_512 : diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h index 852a3a4e2f638..5972dcb637dfa 100644 --- a/llvm/include/llvm/IR/LLVMContext.h +++ b/llvm/include/llvm/IR/LLVMContext.h @@ -97,6 +97,8 @@ class LLVMContext { OB_ptrauth = 7, // "ptrauth" OB_kcfi = 8, // "kcfi" OB_convergencectrl = 9, // "convergencectrl" + OB_align = 10, // "align" + OB_LastBundleID = OB_align // Marker for last bundle ID }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h index 990bdc618f240..85a7f8fd373c0 100644 --- a/llvm/include/llvm/IR/Metadata.h +++ b/llvm/include/llvm/IR/Metadata.h @@ -41,6 +41,7 @@ namespace llvm { +enum class CaptureComponents : uint8_t; class Module; class ModuleSlotTracker; class raw_ostream; @@ -1480,6 +1481,13 @@ class MDNode : public Metadata { LLVM_ABI static MDNode *getMergedCallsiteMetadata(MDNode *A, MDNode *B); LLVM_ABI static MDNode *getMergedCalleeTypeMetadata(const MDNode *A, const MDNode *B); + + /// Convert !captures metadata to CaptureComponents. MD may be nullptr. + LLVM_ABI static CaptureComponents toCaptureComponents(const MDNode *MD); + /// Convert CaptureComponents to !captures metadata. The return value may be + /// nullptr. + LLVM_ABI static MDNode *fromCaptureComponents(LLVMContext &Ctx, + CaptureComponents CC); }; /// Tuple of metadata. diff --git a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h index cc4929a1ff8da..d55100e5e709d 100644 --- a/llvm/include/llvm/IR/NVVMIntrinsicUtils.h +++ b/llvm/include/llvm/IR/NVVMIntrinsicUtils.h @@ -47,6 +47,15 @@ enum class CTAGroupKind : uint8_t { CG_2 = 2, // cta_group::2 modifier }; +enum class Tcgen05MMAKind : uint8_t { F16 = 0, TF32 = 1, F8F6F4 = 2, I8 = 3 }; + +enum class Tcgen05CollectorUsageOp : uint8_t { + DISCARD = 0, + LASTUSE = 1, + FILL = 2, + USE = 3, +}; + inline bool FPToIntegerIntrinsicShouldFTZ(Intrinsic::ID IntrinsicID) { switch (IntrinsicID) { case Intrinsic::nvvm_f2i_rm_ftz: @@ -180,6 +189,70 @@ inline bool FPToIntegerIntrinsicResultIsSigned(Intrinsic::ID IntrinsicID) { "Checking invalid f2i/d2i intrinsic for signed int conversion"); } +inline bool FPToIntegerIntrinsicNaNZero(Intrinsic::ID IntrinsicID) { + switch (IntrinsicID) { + // f2i + case Intrinsic::nvvm_f2i_rm: + case Intrinsic::nvvm_f2i_rn: + case Intrinsic::nvvm_f2i_rp: + case Intrinsic::nvvm_f2i_rz: + case Intrinsic::nvvm_f2i_rm_ftz: + case Intrinsic::nvvm_f2i_rn_ftz: + case Intrinsic::nvvm_f2i_rp_ftz: + case Intrinsic::nvvm_f2i_rz_ftz: + // f2ui + case Intrinsic::nvvm_f2ui_rm: + case Intrinsic::nvvm_f2ui_rn: + case Intrinsic::nvvm_f2ui_rp: + case Intrinsic::nvvm_f2ui_rz: + case Intrinsic::nvvm_f2ui_rm_ftz: + case Intrinsic::nvvm_f2ui_rn_ftz: + case Intrinsic::nvvm_f2ui_rp_ftz: + case Intrinsic::nvvm_f2ui_rz_ftz: + return true; + // d2i + case Intrinsic::nvvm_d2i_rm: + case Intrinsic::nvvm_d2i_rn: + case Intrinsic::nvvm_d2i_rp: + case Intrinsic::nvvm_d2i_rz: + // d2ui + case Intrinsic::nvvm_d2ui_rm: + case Intrinsic::nvvm_d2ui_rn: + case Intrinsic::nvvm_d2ui_rp: + case Intrinsic::nvvm_d2ui_rz: + // f2ll + case Intrinsic::nvvm_f2ll_rm: + case Intrinsic::nvvm_f2ll_rn: + case Intrinsic::nvvm_f2ll_rp: + case Intrinsic::nvvm_f2ll_rz: + case Intrinsic::nvvm_f2ll_rm_ftz: + case Intrinsic::nvvm_f2ll_rn_ftz: + case Intrinsic::nvvm_f2ll_rp_ftz: + case Intrinsic::nvvm_f2ll_rz_ftz: + // f2ull + case Intrinsic::nvvm_f2ull_rm: + case Intrinsic::nvvm_f2ull_rn: + case Intrinsic::nvvm_f2ull_rp: + case Intrinsic::nvvm_f2ull_rz: + case Intrinsic::nvvm_f2ull_rm_ftz: + case Intrinsic::nvvm_f2ull_rn_ftz: + case Intrinsic::nvvm_f2ull_rp_ftz: + case Intrinsic::nvvm_f2ull_rz_ftz: + // d2ll + case Intrinsic::nvvm_d2ll_rm: + case Intrinsic::nvvm_d2ll_rn: + case Intrinsic::nvvm_d2ll_rp: + case Intrinsic::nvvm_d2ll_rz: + // d2ull + case Intrinsic::nvvm_d2ull_rm: + case Intrinsic::nvvm_d2ull_rn: + case Intrinsic::nvvm_d2ull_rp: + case Intrinsic::nvvm_d2ull_rz: + return false; + } + llvm_unreachable("Checking NaN result for invalid f2i/d2i intrinsic"); +} + inline APFloat::roundingMode GetFPToIntegerRoundingMode(Intrinsic::ID IntrinsicID) { switch (IntrinsicID) { diff --git a/llvm/include/llvm/IR/ProfDataUtils.h b/llvm/include/llvm/IR/ProfDataUtils.h index de9675f48c79b..a0876b169e0b8 100644 --- a/llvm/include/llvm/IR/ProfDataUtils.h +++ b/llvm/include/llvm/IR/ProfDataUtils.h @@ -145,7 +145,13 @@ LLVM_ABI bool extractProfTotalWeight(const Instruction &I, /// \param Weights an array of weights to set on instruction I. /// \param IsExpected were these weights added from an llvm.expect* intrinsic. LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef Weights, - bool IsExpected); + bool IsExpected, bool ElideAllZero = false); + +/// Variant of `setBranchWeights` where the `Weights` will be fit first to +/// uint32_t by shifting right. +LLVM_ABI void setFittedBranchWeights(Instruction &I, ArrayRef Weights, + bool IsExpected, + bool ElideAllZero = false); /// downscale the given weights preserving the ratio. If the maximum value is /// not already known and not provided via \param KnownMaxCount , it will be @@ -185,6 +191,14 @@ inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) { LLVM_ABI void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName); +/// Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch +/// weights in the new instruction if the parent function of the original +/// instruction has an entry count. This is to not confuse users by injecting +/// profile data into non-profiled functions. +LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, + Function &F, + StringRef PassName); + /// Analogous to setExplicitlyUnknownBranchWeights, but for functions and their /// entry counts. LLVM_ABI void setExplicitlyUnknownFunctionEntryCount(Function &F, diff --git a/llvm/include/llvm/IR/ValueMap.h b/llvm/include/llvm/IR/ValueMap.h index 1a11718bfcdae..97653c2282aba 100644 --- a/llvm/include/llvm/IR/ValueMap.h +++ b/llvm/include/llvm/IR/ValueMap.h @@ -42,18 +42,15 @@ namespace llvm { -template +template class ValueMapCallbackVH; -template -class ValueMapIterator; -template -class ValueMapConstIterator; +template class ValueMapIterator; +template class ValueMapConstIterator; /// This class defines the default behavior for configurable aspects of /// ValueMap<>. User Configs should inherit from this class to be as compatible /// as possible with future versions of ValueMap. -template -struct ValueMapConfig { +template struct ValueMapConfig { using mutex_type = MutexT; /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's @@ -66,21 +63,24 @@ struct ValueMapConfig { // override all the defaults. struct ExtraData {}; - template + template static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {} - template - static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {} + template + static void onDelete(const ExtraDataT & /*Data*/, KeyT /*Old*/) {} /// Returns a mutex that should be acquired around any changes to the map. /// This is only acquired from the CallbackVH (and held around calls to onRAUW /// and onDelete) and not inside other ValueMap methods. NULL means that no /// mutex is necessary. - template - static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; } + template + static mutex_type *getMutex(const ExtraDataT & /*Data*/) { + return nullptr; + } }; /// See the file comment. -template> +template > class ValueMap { friend class ValueMapCallbackVH; @@ -157,9 +157,7 @@ class ValueMap { return Map.find_as(Val) == Map.end() ? 0 : 1; } - iterator find(const KeyT &Val) { - return iterator(Map.find_as(Val)); - } + iterator find(const KeyT &Val) { return iterator(Map.find_as(Val)); } const_iterator find(const KeyT &Val) const { return const_iterator(Map.find_as(Val)); } @@ -186,8 +184,7 @@ class ValueMap { } /// insert - Range insertion of pairs. - template - void insert(InputIt I, InputIt E) { + template void insert(InputIt I, InputIt E) { for (; I != E; ++I) insert(*I); } @@ -200,17 +197,13 @@ class ValueMap { Map.erase(I); return true; } - void erase(iterator I) { - return Map.erase(I.base()); - } + void erase(iterator I) { return Map.erase(I.base()); } - value_type& FindAndConstruct(const KeyT &Key) { + value_type &FindAndConstruct(const KeyT &Key) { return Map.FindAndConstruct(Wrap(Key)); } - ValueT &operator[](const KeyT &Key) { - return Map[Wrap(Key)]; - } + ValueT &operator[](const KeyT &Key) { return Map[Wrap(Key)]; } /// isPointerIntoBucketsArray - Return true if the specified pointer points /// somewhere into the ValueMap's array of buckets (i.e. either to a key or @@ -235,7 +228,7 @@ class ValueMap { // the const_cast incorrect) is if it gets inserted into the map. But then // this function must have been called from a non-const method, making the // const_cast ok. - return ValueMapCVH(key, const_cast(this)); + return ValueMapCVH(key, const_cast(this)); } }; @@ -252,7 +245,7 @@ class ValueMapCallbackVH final : public CallbackVH { ValueMapT *Map; ValueMapCallbackVH(KeyT Key, ValueMapT *Map) - : CallbackVH(const_cast(static_cast(Key))), + : CallbackVH(const_cast(static_cast(Key))), Map(Map) {} // Private constructor used to create empty/tombstone DenseMap keys. @@ -268,8 +261,8 @@ class ValueMapCallbackVH final : public CallbackVH { std::unique_lock Guard; if (M) Guard = std::unique_lock(*M); - Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this. - Copy.Map->Map.erase(Copy); // Definitely destroys *this. + Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this. + Copy.Map->Map.erase(Copy); // Definitely destroys *this. } void allUsesReplacedWith(Value *new_key) override { @@ -291,14 +284,14 @@ class ValueMapCallbackVH final : public CallbackVH { // removed the old mapping. if (I != Copy.Map->Map.end()) { ValueT Target(std::move(I->second)); - Copy.Map->Map.erase(I); // Definitely destroys *this. + Copy.Map->Map.erase(I); // Definitely destroys *this. Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target))); } } } }; -template +template struct DenseMapInfo> { using VH = ValueMapCallbackVH; @@ -318,9 +311,7 @@ struct DenseMapInfo> { return DenseMapInfo::getHashValue(Val); } - static bool isEqual(const VH &LHS, const VH &RHS) { - return LHS == RHS; - } + static bool isEqual(const VH &LHS, const VH &RHS) { return LHS == RHS; } static bool isEqual(const KeyT &LHS, const VH &RHS) { return LHS == RHS.getValPtr(); @@ -347,7 +338,7 @@ template class ValueMapIterator { struct ValueTypeProxy { const KeyT first; - ValueT& second; + ValueT &second; ValueTypeProxy *operator->() { return this; } @@ -361,23 +352,19 @@ template class ValueMapIterator { return Result; } - ValueTypeProxy operator->() const { - return operator*(); - } + ValueTypeProxy operator->() const { return operator*(); } - bool operator==(const ValueMapIterator &RHS) const { - return I == RHS.I; - } - bool operator!=(const ValueMapIterator &RHS) const { - return I != RHS.I; - } + bool operator==(const ValueMapIterator &RHS) const { return I == RHS.I; } + bool operator!=(const ValueMapIterator &RHS) const { return I != RHS.I; } - inline ValueMapIterator& operator++() { // Preincrement + inline ValueMapIterator &operator++() { // Preincrement ++I; return *this; } - ValueMapIterator operator++(int) { // Postincrement - ValueMapIterator tmp = *this; ++*this; return tmp; + ValueMapIterator operator++(int) { // Postincrement + ValueMapIterator tmp = *this; + ++*this; + return tmp; } }; @@ -397,13 +384,13 @@ template class ValueMapConstIterator { ValueMapConstIterator() : I() {} ValueMapConstIterator(BaseT I) : I(I) {} ValueMapConstIterator(ValueMapIterator Other) - : I(Other.base()) {} + : I(Other.base()) {} BaseT base() const { return I; } struct ValueTypeProxy { const KeyT first; - const ValueT& second; + const ValueT &second; ValueTypeProxy *operator->() { return this; } operator std::pair() const { return std::make_pair(first, second); @@ -415,23 +402,19 @@ template class ValueMapConstIterator { return Result; } - ValueTypeProxy operator->() const { - return operator*(); - } + ValueTypeProxy operator->() const { return operator*(); } - bool operator==(const ValueMapConstIterator &RHS) const { - return I == RHS.I; - } - bool operator!=(const ValueMapConstIterator &RHS) const { - return I != RHS.I; - } + bool operator==(const ValueMapConstIterator &RHS) const { return I == RHS.I; } + bool operator!=(const ValueMapConstIterator &RHS) const { return I != RHS.I; } - inline ValueMapConstIterator& operator++() { // Preincrement + inline ValueMapConstIterator &operator++() { // Preincrement ++I; return *this; } - ValueMapConstIterator operator++(int) { // Postincrement - ValueMapConstIterator tmp = *this; ++*this; return tmp; + ValueMapConstIterator operator++(int) { // Postincrement + ValueMapConstIterator tmp = *this; + ++*this; + return tmp; } }; diff --git a/llvm/include/llvm/IR/Verifier.h b/llvm/include/llvm/IR/Verifier.h index 8dbb9c8a41d7e..b15b4d8ca99fe 100644 --- a/llvm/include/llvm/IR/Verifier.h +++ b/llvm/include/llvm/IR/Verifier.h @@ -60,12 +60,13 @@ class TBAAVerifier { /// \name Helper functions used by \c visitTBAAMetadata. /// @{ - MDNode *getFieldNodeFromTBAABaseNode(Instruction &I, const MDNode *BaseNode, - APInt &Offset, bool IsNewFormat); - TBAAVerifier::TBAABaseNodeSummary verifyTBAABaseNode(Instruction &I, + MDNode *getFieldNodeFromTBAABaseNode(const Instruction *I, + const MDNode *BaseNode, APInt &Offset, + bool IsNewFormat); + TBAAVerifier::TBAABaseNodeSummary verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode, bool IsNewFormat); - TBAABaseNodeSummary verifyTBAABaseNodeImpl(Instruction &I, + TBAABaseNodeSummary verifyTBAABaseNodeImpl(const Instruction *I, const MDNode *BaseNode, bool IsNewFormat); @@ -75,9 +76,9 @@ class TBAAVerifier { public: TBAAVerifier(VerifierSupport *Diagnostic = nullptr) : Diagnostic(Diagnostic) {} - /// Visit an instruction and return true if it is valid, return false if an - /// invalid TBAA is attached. - LLVM_ABI bool visitTBAAMetadata(Instruction &I, const MDNode *MD); + /// Visit an instruction, or a TBAA node itself as part of a metadata, and + /// return true if it is valid, return false if an invalid TBAA is attached. + LLVM_ABI bool visitTBAAMetadata(const Instruction *I, const MDNode *MD); }; /// Check a function for errors, useful for use when debugging a diff --git a/llvm/include/llvm/MC/DXContainerRootSignature.h b/llvm/include/llvm/MC/DXContainerRootSignature.h index 54677ef70244f..2b08b2439d2c0 100644 --- a/llvm/include/llvm/MC/DXContainerRootSignature.h +++ b/llvm/include/llvm/MC/DXContainerRootSignature.h @@ -74,6 +74,8 @@ struct StaticSampler { uint32_t ShaderRegister; uint32_t RegisterSpace; dxbc::ShaderVisibility ShaderVisibility; + // Version 3 onwards: + uint32_t Flags = 0; }; struct RootParametersContainer { diff --git a/llvm/include/llvm/MC/MCCodeEmitter.h b/llvm/include/llvm/MC/MCCodeEmitter.h index 1c454c3795c2c..5f288e9e45c4b 100644 --- a/llvm/include/llvm/MC/MCCodeEmitter.h +++ b/llvm/include/llvm/MC/MCCodeEmitter.h @@ -16,7 +16,6 @@ namespace llvm { class MCFixup; class MCInst; class MCSubtargetInfo; -class raw_ostream; template class SmallVectorImpl; /// MCCodeEmitter - Generic instruction encoding interface. @@ -36,6 +35,12 @@ class LLVM_ABI MCCodeEmitter { virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl &CB, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const = 0; + +protected: + // Helper function used by CodeEmitterGen for error reporting. + [[noreturn]] static void reportUnsupportedInst(const MCInst &Inst); + [[noreturn]] static void reportUnsupportedOperand(const MCInst &Inst, + unsigned OpNum); }; } // end namespace llvm diff --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h index 0a4bd17e20738..c2f15b81da02c 100644 --- a/llvm/include/llvm/MC/MCInstrDesc.h +++ b/llvm/include/llvm/MC/MCInstrDesc.h @@ -532,7 +532,7 @@ class MCInstrDesc { /// Returns true if this instruction is a candidate for remat. This /// flag is only used in TargetInstrInfo method isTriviallyRematerializable. /// - /// If this flag is set, the isReallyTriviallyReMaterializable() method is + /// If this flag is set, the isReMaterializableImpl() method is /// called to verify the instruction is really rematerializable. bool isRematerializable() const { return Flags & (1ULL << MCID::Rematerializable); diff --git a/llvm/include/llvm/MCA/CustomBehaviour.h b/llvm/include/llvm/MCA/CustomBehaviour.h index 0ce3993be95ba..8ad674c4ecf13 100644 --- a/llvm/include/llvm/MCA/CustomBehaviour.h +++ b/llvm/include/llvm/MCA/CustomBehaviour.h @@ -49,8 +49,7 @@ class InstrPostProcess { /// object after it has been lowered from the MCInst. /// This is generally a less disruptive alternative to modifying the /// scheduling model. - virtual void postProcessInstruction(std::unique_ptr &Inst, - const MCInst &MCI) {} + virtual void postProcessInstruction(Instruction &Inst, const MCInst &MCI) {} // The resetState() method gets invoked at the beginning of each code region // so that targets that override this function can clear any state that they diff --git a/llvm/include/llvm/Object/DXContainer.h b/llvm/include/llvm/Object/DXContainer.h index 9bc1918852335..5a5a4dbaae2ad 100644 --- a/llvm/include/llvm/Object/DXContainer.h +++ b/llvm/include/llvm/Object/DXContainer.h @@ -228,11 +228,11 @@ class RootSignature { uint32_t Flags; ViewArray ParametersHeaders; StringRef PartData; - ViewArray StaticSamplers; + ViewArray StaticSamplers; using param_header_iterator = ViewArray::iterator; - using samplers_iterator = ViewArray::iterator; + using samplers_iterator = ViewArray::iterator; public: RootSignature(StringRef PD) : PartData(PD) {} diff --git a/llvm/include/llvm/Object/ELF.h b/llvm/include/llvm/Object/ELF.h index 0b362d389c177..59f63eb6b5bb6 100644 --- a/llvm/include/llvm/Object/ELF.h +++ b/llvm/include/llvm/Object/ELF.h @@ -407,7 +407,8 @@ class ELFFile { Elf_Note_Iterator notes_begin(const Elf_Phdr &Phdr, Error &Err) const { assert(Phdr.p_type == ELF::PT_NOTE && "Phdr is not of type PT_NOTE"); ErrorAsOutParameter ErrAsOutParam(Err); - if (Phdr.p_offset + Phdr.p_filesz > getBufSize()) { + if (Phdr.p_offset + Phdr.p_filesz > getBufSize() || + Phdr.p_offset + Phdr.p_filesz < Phdr.p_offset) { Err = createError("invalid offset (0x" + Twine::utohexstr(Phdr.p_offset) + ") or size (0x" + Twine::utohexstr(Phdr.p_filesz) + ")"); @@ -435,7 +436,8 @@ class ELFFile { Elf_Note_Iterator notes_begin(const Elf_Shdr &Shdr, Error &Err) const { assert(Shdr.sh_type == ELF::SHT_NOTE && "Shdr is not of type SHT_NOTE"); ErrorAsOutParameter ErrAsOutParam(Err); - if (Shdr.sh_offset + Shdr.sh_size > getBufSize()) { + if (Shdr.sh_offset + Shdr.sh_size > getBufSize() || + Shdr.sh_offset + Shdr.sh_size < Shdr.sh_offset) { Err = createError("invalid offset (0x" + Twine::utohexstr(Shdr.sh_offset) + ") or size (0x" + Twine::utohexstr(Shdr.sh_size) + ")"); diff --git a/llvm/include/llvm/Object/OffloadBinary.h b/llvm/include/llvm/Object/OffloadBinary.h index b5c845fa8eb70..ac2dbf60e2aec 100644 --- a/llvm/include/llvm/Object/OffloadBinary.h +++ b/llvm/include/llvm/Object/OffloadBinary.h @@ -48,6 +48,7 @@ enum ImageKind : uint16_t { IMG_Cubin, IMG_Fatbinary, IMG_PTX, + IMG_SPIRV, IMG_LAST, }; @@ -70,9 +71,9 @@ class OffloadBinary : public Binary { /// The offloading metadata that will be serialized to a memory buffer. struct OffloadingImage { - ImageKind TheImageKind; - OffloadKind TheOffloadKind; - uint32_t Flags; + ImageKind TheImageKind = ImageKind::IMG_None; + OffloadKind TheOffloadKind = OffloadKind::OFK_None; + uint32_t Flags = 0; MapVector StringData; std::unique_ptr Image; }; diff --git a/llvm/include/llvm/Object/OffloadBundle.h b/llvm/include/llvm/Object/OffloadBundle.h index f4d5a1d878b8d..18be62b10c518 100644 --- a/llvm/include/llvm/Object/OffloadBundle.h +++ b/llvm/include/llvm/Object/OffloadBundle.h @@ -161,7 +161,7 @@ struct OffloadBundleURI { OffsetStr.getAsInteger(10, O); Str = Str.drop_front(OffsetStr.size()); - if (Str.consume_front("&size=")) + if (!Str.consume_front("&size=")) return createStringError(object_error::parse_failed, "Reading 'size' in URI"); diff --git a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h index 62bfee7693db1..b5b110d0f59a1 100644 --- a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h +++ b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h @@ -178,6 +178,11 @@ struct StaticSamplerYamlDesc { uint32_t ShaderRegister; uint32_t RegisterSpace; dxbc::ShaderVisibility ShaderVisibility; + + LLVM_ABI uint32_t getEncodedFlags() const; + +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) bool Enum = false; +#include "llvm/BinaryFormat/DXContainerConstants.def" }; struct RootSignatureYamlDesc { diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h index 2742ec1b71b7e..8538a8b2afe14 100644 --- a/llvm/include/llvm/Passes/PassBuilder.h +++ b/llvm/include/llvm/Passes/PassBuilder.h @@ -23,6 +23,7 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/Error.h" #include "llvm/Support/PGOOptions.h" +#include "llvm/Support/VirtualFileSystem.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/IPO/Inliner.h" #include "llvm/Transforms/IPO/ModuleInliner.h" @@ -35,10 +36,6 @@ class StringRef; class AAManager; class TargetMachine; class ModuleSummaryIndex; -template class IntrusiveRefCntPtr; -namespace vfs { -class FileSystem; -} // namespace vfs /// Tunable parameters for passes in the default pipelines. class PipelineTuningOptions { @@ -115,6 +112,7 @@ class PassBuilder { PipelineTuningOptions PTO; std::optional PGOOpt; PassInstrumentationCallbacks *PIC; + IntrusiveRefCntPtr FS; public: /// A struct to capture parsed pass pipeline names. @@ -134,7 +132,8 @@ class PassBuilder { TargetMachine *TM = nullptr, PipelineTuningOptions PTO = PipelineTuningOptions(), std::optional PGOOpt = std::nullopt, - PassInstrumentationCallbacks *PIC = nullptr); + PassInstrumentationCallbacks *PIC = nullptr, + IntrusiveRefCntPtr FS = vfs::getRealFileSystem()); /// Cross register the analysis managers through their proxies. /// @@ -632,8 +631,7 @@ class PassBuilder { bool RunProfileGen, bool IsCS, bool AtomicCounterUpdate, std::string ProfileFile, - std::string ProfileRemappingFile, - IntrusiveRefCntPtr FS); + std::string ProfileRemappingFile); /// Returns PIC. External libraries can use this to register pass /// instrumentation callbacks. @@ -641,6 +639,11 @@ class PassBuilder { return PIC; } + /// Returns the virtual file system. + IntrusiveRefCntPtr getVirtualFileSystemPtr() const { + return FS; + } + // Invoke the callbacks registered for the various extension points. // Custom pipelines should use these to invoke the callbacks registered // by TargetMachines and other clients. @@ -772,8 +775,7 @@ class PassBuilder { void addPGOInstrPasses(ModulePassManager &MPM, OptimizationLevel Level, bool RunProfileGen, bool IsCS, bool AtomicCounterUpdate, std::string ProfileFile, - std::string ProfileRemappingFile, - IntrusiveRefCntPtr FS); + std::string ProfileRemappingFile); void addPostPGOLoopRotation(ModulePassManager &MPM, OptimizationLevel Level); bool isInstrumentedPGOUse() const; diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h index 7d1a85ba528fc..e09958160b9a0 100644 --- a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h +++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h @@ -1215,19 +1215,19 @@ namespace accessors { /// Return the structural hash associated with the function. template uint64_t getFuncHash(const FuncRecordTy *Record) { - return support::endian::byte_swap(Record->FuncHash); + return support::endian::byte_swap(Record->FuncHash, Endian); } /// Return the coverage map data size for the function. template uint64_t getDataSize(const FuncRecordTy *Record) { - return support::endian::byte_swap(Record->DataSize); + return support::endian::byte_swap(Record->DataSize, Endian); } /// Return the function lookup key. The value is considered opaque. template uint64_t getFuncNameRef(const FuncRecordTy *Record) { - return support::endian::byte_swap(Record->NameRef); + return support::endian::byte_swap(Record->NameRef, Endian); } /// Return the PGO name of the function. Used for formats in which the name is @@ -1280,14 +1280,14 @@ struct CovMapFunctionRecordV1 { /// Return function lookup key. The value is consider opaque. template IntPtrT getFuncNameRef() const { - return support::endian::byte_swap(NamePtr); + return support::endian::byte_swap(NamePtr, Endian); } /// Return the PGO name of the function. template Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const { IntPtrT NameRef = getFuncNameRef(); - uint32_t NameS = support::endian::byte_swap(NameSize); + uint32_t NameS = support::endian::byte_swap(NameSize, Endian); FuncName = ProfileNames.getFuncName(NameRef, NameS); if (NameS && FuncName.empty()) return make_error(coveragemap_error::malformed, @@ -1385,7 +1385,7 @@ struct CovMapFunctionRecordV3 { /// Get the filename set reference. template uint64_t getFilenamesRef() const { - return support::endian::byte_swap(FilenamesRef); + return support::endian::byte_swap(FilenamesRef, Endian); } /// Read the inline coverage mapping. Ignore the buffer parameter, it is for @@ -1416,19 +1416,19 @@ struct CovMapHeader { #define COVMAP_HEADER(Type, LLVMType, Name, Init) Type Name; #include "llvm/ProfileData/InstrProfData.inc" template uint32_t getNRecords() const { - return support::endian::byte_swap(NRecords); + return support::endian::byte_swap(NRecords, Endian); } template uint32_t getFilenamesSize() const { - return support::endian::byte_swap(FilenamesSize); + return support::endian::byte_swap(FilenamesSize, Endian); } template uint32_t getCoverageSize() const { - return support::endian::byte_swap(CoverageSize); + return support::endian::byte_swap(CoverageSize, Endian); } template uint32_t getVersion() const { - return support::endian::byte_swap(Version); + return support::endian::byte_swap(Version, Endian); } }; diff --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h index 84773f1d9c37b..a4ca54e26f18d 100644 --- a/llvm/include/llvm/Support/Alignment.h +++ b/llvm/include/llvm/Support/Alignment.h @@ -52,16 +52,8 @@ struct Align { friend unsigned encode(struct MaybeAlign A); friend struct MaybeAlign decodeMaybeAlign(unsigned Value); - /// A trivial type to allow construction of constexpr Align. - /// This is currently needed to workaround a bug in GCC 5.3 which prevents - /// definition of constexpr assign operators. - /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic - /// FIXME: Remove this, make all assign operators constexpr and introduce user - /// defined literals when we don't have to support GCC 5.3 anymore. - /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain - struct LogValue { - uint8_t Log; - }; + struct FromShiftValue {}; + constexpr Align(FromShiftValue, uint8_t Shift) : ShiftValue(Shift) {} public: /// Default is byte-aligned. @@ -70,8 +62,8 @@ struct Align { /// checks have been performed when building `Other`. constexpr Align(const Align &Other) = default; constexpr Align(Align &&Other) = default; - Align &operator=(const Align &Other) = default; - Align &operator=(Align &&Other) = default; + constexpr Align &operator=(const Align &Other) = default; + constexpr Align &operator=(Align &&Other) = default; explicit Align(uint64_t Value) { assert(Value > 0 && "Value must not be 0"); @@ -82,7 +74,7 @@ struct Align { /// This is a hole in the type system and should not be abused. /// Needed to interact with C for instance. - uint64_t value() const { return uint64_t(1) << ShiftValue; } + constexpr uint64_t value() const { return uint64_t(1) << ShiftValue; } // Returns the previous alignment. Align previous() const { @@ -94,7 +86,7 @@ struct Align { /// Allow constructions of constexpr Align. template constexpr static Align Constant() { - return LogValue{static_cast(ConstantLog2())}; + return Align(FromShiftValue{}, ConstantLog2()); } /// Allow constructions of constexpr Align from types. @@ -102,9 +94,6 @@ struct Align { template constexpr static Align Of() { return Constant>(); } - - /// Constexpr constructor from LogValue type. - constexpr Align(LogValue CA) : ShiftValue(CA.Log) {} }; /// Treats the value 0 as a 1, so Align is always at least 1. diff --git a/llvm/include/llvm/Support/AllocatorBase.h b/llvm/include/llvm/Support/AllocatorBase.h index 0442432250069..6414c5dc5122c 100644 --- a/llvm/include/llvm/Support/AllocatorBase.h +++ b/llvm/include/llvm/Support/AllocatorBase.h @@ -28,6 +28,7 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/MemAlloc.h" #include +#include namespace llvm { @@ -111,7 +112,7 @@ template class AllocatorHolder : Alloc { public: AllocatorHolder() = default; AllocatorHolder(const Alloc &A) : Alloc(A) {} - AllocatorHolder(Alloc &&A) : Alloc(static_cast(A)) {} + AllocatorHolder(Alloc &&A) : Alloc(std::move(A)) {} Alloc &getAllocator() { return *this; } const Alloc &getAllocator() const { return *this; } }; diff --git a/llvm/include/llvm/Support/BinaryStreamRef.h b/llvm/include/llvm/Support/BinaryStreamRef.h index 47009ff0b96fc..8ca312daa3bd7 100644 --- a/llvm/include/llvm/Support/BinaryStreamRef.h +++ b/llvm/include/llvm/Support/BinaryStreamRef.h @@ -209,7 +209,7 @@ struct BinarySubstreamRef { BinarySubstreamRef keep_front(uint64_t N) const { return slice(0, N); } std::pair split(uint64_t Off) const { - return std::make_pair(keep_front(Off), drop_front(Off)); + return {keep_front(Off), drop_front(Off)}; } uint64_t size() const { return StreamData.getLength(); } diff --git a/llvm/include/llvm/Support/Casting.h b/llvm/include/llvm/Support/Casting.h index 66fdcb44ea2c0..2a9a149327d83 100644 --- a/llvm/include/llvm/Support/Casting.h +++ b/llvm/include/llvm/Support/Casting.h @@ -544,14 +544,9 @@ struct CastInfo> : public OptionalValueCast { /// /// if (isa(myVal)) { ... } /// if (isa(myVal)) { ... } -template -[[nodiscard]] inline bool isa(const From &Val) { - return CastInfo::isPossible(Val); -} - -template +template [[nodiscard]] inline bool isa(const From &Val) { - return isa(Val) || isa(Val); + return (CastInfo::isPossible(Val) || ...); } /// cast - Return the argument parameter cast to the specified type. This diff --git a/llvm/include/llvm/Support/CommandLine.h b/llvm/include/llvm/Support/CommandLine.h index b81df756247c9..dd05c530cc06e 100644 --- a/llvm/include/llvm/Support/CommandLine.h +++ b/llvm/include/llvm/Support/CommandLine.h @@ -1193,6 +1193,31 @@ class LLVM_ABI parser : public basic_parser { //-------------------------------------------------- +template <> +class LLVM_ABI parser> + : public basic_parser> { +public: + parser(Option &O) : basic_parser(O) {} + + // Return true on error. + bool parse(Option &, StringRef, StringRef Arg, + std::optional &Value) { + Value = Arg.str(); + return false; + } + + // Overload in subclass to provide a better default value. + StringRef getValueName() const override { return "optional string"; } + + void printOptionDiff(const Option &O, std::optional V, + const OptVal &Default, size_t GlobalWidth) const; + + // An out-of-line virtual method to provide a 'home' for this class. + void anchor() override; +}; + +//-------------------------------------------------- + extern template class LLVM_TEMPLATE_ABI basic_parser; template <> class LLVM_ABI parser : public basic_parser { diff --git a/llvm/include/llvm/Support/DebugCounter.h b/llvm/include/llvm/Support/DebugCounter.h index 89349d1ebffee..48fc60035b189 100644 --- a/llvm/include/llvm/Support/DebugCounter.h +++ b/llvm/include/llvm/Support/DebugCounter.h @@ -136,7 +136,7 @@ class DebugCounter { // Return the name and description of the counter with the given ID. std::pair getCounterInfo(unsigned ID) const { - return std::make_pair(RegisteredCounters[ID], Counters.lookup(ID).Desc); + return {RegisteredCounters[ID], Counters.lookup(ID).Desc}; } // Iterate through the registered counters diff --git a/llvm/include/llvm/Support/Endian.h b/llvm/include/llvm/Support/Endian.h index 7eb1d7e8dfe7f..51db225841dbe 100644 --- a/llvm/include/llvm/Support/Endian.h +++ b/llvm/include/llvm/Support/Endian.h @@ -49,7 +49,9 @@ template /// Swap the bytes of value to match the given endianness. template -[[nodiscard]] inline value_type byte_swap(value_type value) { +[[nodiscard]] +LLVM_DEPRECATED("Pass endian as a function argument instead", + "byte_swap") inline value_type byte_swap(value_type value) { return byte_swap(value, endian); } @@ -66,7 +68,9 @@ template } template -[[nodiscard]] inline value_type read(const void *memory) { +[[nodiscard]] LLVM_DEPRECATED("Pass endian as a function argument instead", + "read") inline value_type + read(const void *memory) { return read(memory, endian); } @@ -127,7 +131,7 @@ template uint64_t startBit) { assert(startBit < 8); if (startBit == 0) - return read(memory); + return read(memory, endian); else { // Read two values and compose the result from them. value_type val[2]; @@ -135,8 +139,8 @@ template LLVM_ASSUME_ALIGNED( memory, (detail::PickAlignment::value)), sizeof(value_type) * 2); - val[0] = byte_swap(val[0]); - val[1] = byte_swap(val[1]); + val[0] = byte_swap(val[0], endian); + val[1] = byte_swap(val[1], endian); // Shift bits from the lower value into place. make_unsigned_t lowerVal = val[0] >> startBit; @@ -170,8 +174,8 @@ inline void writeAtBitAlignment(void *memory, value_type value, LLVM_ASSUME_ALIGNED( memory, (detail::PickAlignment::value)), sizeof(value_type) * 2); - val[0] = byte_swap(val[0]); - val[1] = byte_swap(val[1]); + val[0] = byte_swap(val[0], endian); + val[1] = byte_swap(val[1], endian); // Mask off any existing bits in the upper part of the lower value that // we want to replace. @@ -199,8 +203,8 @@ inline void writeAtBitAlignment(void *memory, value_type value, val[1] |= upperVal; // Finally, rewrite values. - val[0] = byte_swap(val[0]); - val[1] = byte_swap(val[1]); + val[0] = byte_swap(val[0], endian); + val[1] = byte_swap(val[1], endian); memcpy(LLVM_ASSUME_ALIGNED( memory, (detail::PickAlignment::value)), &val[0], sizeof(value_type) * 2); @@ -223,8 +227,8 @@ struct packed_endian_specific_integral { explicit packed_endian_specific_integral(value_type val) { *this = val; } value_type value() const { - return endian::read( - (const void*)Value.buffer); + return endian::read((const void *)Value.buffer, + endian); } operator value_type() const { return value(); } @@ -263,7 +267,7 @@ struct packed_endian_specific_integral { explicit ref(void *Ptr) : Ptr(Ptr) {} operator value_type() const { - return endian::read(Ptr); + return endian::read(Ptr, endian); } void operator=(value_type NewValue) { diff --git a/llvm/include/llvm/Support/FileCollector.h b/llvm/include/llvm/Support/FileCollector.h index b00bf3174e654..9fa11ba362241 100644 --- a/llvm/include/llvm/Support/FileCollector.h +++ b/llvm/include/llvm/Support/FileCollector.h @@ -81,19 +81,28 @@ class LLVM_ABI FileCollector : public FileCollectorBase { /// Canonicalize a pair of virtual and real paths. LLVM_ABI PathStorage canonicalize(StringRef SrcPath); + /// Return the underlying file system. + vfs::FileSystem &getFileSystem() const { return *VFS; }; + + explicit PathCanonicalizer(IntrusiveRefCntPtr VFS) + : VFS(std::move(VFS)) {} + private: /// Replace with a (mostly) real path, or don't modify. Resolves symlinks /// in the directory, using \a CachedDirs to avoid redundant lookups, but /// leaves the filename as a possible symlink. void updateWithRealPath(SmallVectorImpl &Path); + IntrusiveRefCntPtr VFS; + StringMap CachedDirs; }; /// \p Root is the directory where collected files are will be stored. /// \p OverlayRoot is VFS mapping root. /// \p Root directory gets created in copyFiles unless it already exists. - FileCollector(std::string Root, std::string OverlayRoot); + FileCollector(std::string Root, std::string OverlayRoot, + IntrusiveRefCntPtr VFS); /// Write the yaml mapping (for the VFS) to the given file. std::error_code writeMapping(StringRef MappingFile); diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h index c203779307840..cf2a8104ac813 100644 --- a/llvm/include/llvm/Support/FileSystem.h +++ b/llvm/include/llvm/Support/FileSystem.h @@ -266,18 +266,6 @@ class file_status : public basic_file_status { /// @name Physical Operators /// @{ -/// Make \a path an absolute path. -/// -/// Makes \a path absolute using the \a current_directory if it is not already. -/// An empty \a path will result in the \a current_directory. -/// -/// /absolute/path => /absolute/path -/// relative/../path => /relative/../path -/// -/// @param path A path that is modified to be an absolute path. -LLVM_ABI void make_absolute(const Twine ¤t_directory, - SmallVectorImpl &path); - /// Make \a path an absolute path. /// /// Makes \a path absolute using the current directory if it is not already. An diff --git a/llvm/include/llvm/Support/Format.h b/llvm/include/llvm/Support/Format.h index 2553002b37899..34b224dba5407 100644 --- a/llvm/include/llvm/Support/Format.h +++ b/llvm/include/llvm/Support/Format.h @@ -78,16 +78,6 @@ class LLVM_ABI format_object_base { /// printed, this synthesizes the string into a temporary buffer provided and /// returns whether or not it is big enough. -// Helper to validate that format() parameters are scalars or pointers. -template struct validate_format_parameters; -template -struct validate_format_parameters { - static_assert(std::is_scalar_v, - "format can't be used with non fundamental / non pointer type"); - validate_format_parameters() { validate_format_parameters(); } -}; -template <> struct validate_format_parameters<> {}; - template class format_object final : public format_object_base { std::tuple Vals; @@ -105,7 +95,9 @@ class format_object final : public format_object_base { public: format_object(const char *fmt, const Ts &... vals) : format_object_base(fmt), Vals(vals...) { - validate_format_parameters(); + static_assert( + (std::is_scalar_v && ...), + "format can't be used with non fundamental / non pointer type"); } int snprint(char *Buffer, unsigned BufferSize) const override { diff --git a/llvm/include/llvm/Support/FormatProviders.h b/llvm/include/llvm/Support/FormatProviders.h index 3e0800e1efe6c..8eaa5e382c73e 100644 --- a/llvm/include/llvm/Support/FormatProviders.h +++ b/llvm/include/llvm/Support/FormatProviders.h @@ -29,22 +29,18 @@ namespace support { namespace detail { template struct use_integral_formatter - : public std::bool_constant< - is_one_of::value> {}; + : public is_one_of {}; template -struct use_char_formatter : public std::bool_constant> { -}; +struct use_char_formatter : public std::is_same {}; template -struct is_cstring - : public std::bool_constant::value> {}; +struct is_cstring : public is_one_of {}; template -struct use_string_formatter - : public std::bool_constant> {}; +struct use_string_formatter : public std::is_convertible {}; template struct use_pointer_formatter @@ -52,8 +48,7 @@ struct use_pointer_formatter }; template -struct use_double_formatter - : public std::bool_constant> {}; +struct use_double_formatter : public std::is_floating_point {}; class HelperFunctions { protected: @@ -389,7 +384,7 @@ template class format_provider> { StringRef Sep = consumeOneOption(Style, '$', ", "); StringRef Args = consumeOneOption(Style, '@', ""); assert(Style.empty() && "Unexpected text in range option string!"); - return std::make_pair(Sep, Args); + return {Sep, Args}; } public: diff --git a/llvm/include/llvm/Support/FormatVariadicDetails.h b/llvm/include/llvm/Support/FormatVariadicDetails.h index 4002caf76675c..0fdc7b6f94da7 100644 --- a/llvm/include/llvm/Support/FormatVariadicDetails.h +++ b/llvm/include/llvm/Support/FormatVariadicDetails.h @@ -92,8 +92,7 @@ template class has_StreamOperator { // based format() invocation. template struct uses_format_member - : public std::bool_constant< - std::is_base_of_v>> {}; + : public std::is_base_of> {}; // Simple template that decides whether a type T should use the format_provider // based format() invocation. The member function takes priority, so this test diff --git a/llvm/include/llvm/Support/HashBuilder.h b/llvm/include/llvm/Support/HashBuilder.h index ae266d3f19a1a..d0130d61af59b 100644 --- a/llvm/include/llvm/Support/HashBuilder.h +++ b/llvm/include/llvm/Support/HashBuilder.h @@ -31,8 +31,7 @@ namespace llvm { namespace hashbuilder_detail { /// Trait to indicate whether a type's bits can be hashed directly (after /// endianness correction). -template -struct IsHashableData : std::bool_constant::value> {}; +template struct IsHashableData : is_integral_or_enum {}; } // namespace hashbuilder_detail diff --git a/llvm/include/llvm/Support/InstructionCost.h b/llvm/include/llvm/Support/InstructionCost.h index ab1c8ebc8c95e..507c16666b958 100644 --- a/llvm/include/llvm/Support/InstructionCost.h +++ b/llvm/include/llvm/Support/InstructionCost.h @@ -59,8 +59,8 @@ class InstructionCost { State = Invalid; } - static CostType getMaxValue() { return std::numeric_limits::max(); } - static CostType getMinValue() { return std::numeric_limits::min(); } + static constexpr CostType MaxValue = std::numeric_limits::max(); + static constexpr CostType MinValue = std::numeric_limits::min(); public: // A default constructed InstructionCost is a valid zero cost @@ -69,8 +69,8 @@ class InstructionCost { InstructionCost(CostState) = delete; InstructionCost(CostType Val) : Value(Val), State(Valid) {} - static InstructionCost getMax() { return getMaxValue(); } - static InstructionCost getMin() { return getMinValue(); } + static InstructionCost getMax() { return MaxValue; } + static InstructionCost getMin() { return MinValue; } static InstructionCost getInvalid(CostType Val = 0) { InstructionCost Tmp(Val); Tmp.setInvalid(); @@ -102,7 +102,7 @@ class InstructionCost { // Saturating addition. InstructionCost::CostType Result; if (AddOverflow(Value, RHS.Value, Result)) - Result = RHS.Value > 0 ? getMaxValue() : getMinValue(); + Result = RHS.Value > 0 ? MaxValue : MinValue; Value = Result; return *this; @@ -120,7 +120,7 @@ class InstructionCost { // Saturating subtract. InstructionCost::CostType Result; if (SubOverflow(Value, RHS.Value, Result)) - Result = RHS.Value > 0 ? getMinValue() : getMaxValue(); + Result = RHS.Value > 0 ? MinValue : MaxValue; Value = Result; return *this; } @@ -138,9 +138,9 @@ class InstructionCost { InstructionCost::CostType Result; if (MulOverflow(Value, RHS.Value, Result)) { if ((Value > 0 && RHS.Value > 0) || (Value < 0 && RHS.Value < 0)) - Result = getMaxValue(); + Result = MaxValue; else - Result = getMinValue(); + Result = MinValue; } Value = Result; diff --git a/llvm/include/llvm/Support/MD5.h b/llvm/include/llvm/Support/MD5.h index 66e2119f8a132..ed29826bab0cb 100644 --- a/llvm/include/llvm/Support/MD5.h +++ b/llvm/include/llvm/Support/MD5.h @@ -57,7 +57,7 @@ class MD5 { } std::pair words() const { using namespace support; - return std::make_pair(high(), low()); + return {high(), low()}; } }; diff --git a/llvm/include/llvm/Support/Mustache.h b/llvm/include/llvm/Support/Mustache.h index 781ec557950ec..ee9f40638fd12 100644 --- a/llvm/include/llvm/Support/Mustache.h +++ b/llvm/include/llvm/Support/Mustache.h @@ -85,6 +85,14 @@ using SectionLambda = std::function; class ASTNode; using AstPtr = std::unique_ptr; +using EscapeMap = DenseMap; + +struct MustacheContext { + StringMap Partials; + StringMap Lambdas; + StringMap SectionLambdas; + EscapeMap Escapes; +}; // A Template represents the container for the AST and the partials // and Lambdas that are registered with it. @@ -118,10 +126,7 @@ class Template { LLVM_ABI void overrideEscapeCharacters(DenseMap Escapes); private: - StringMap Partials; - StringMap Lambdas; - StringMap SectionLambdas; - DenseMap Escapes; + MustacheContext Ctx; AstPtr Tree; }; } // namespace llvm::mustache diff --git a/llvm/include/llvm/Support/OnDiskHashTable.h b/llvm/include/llvm/Support/OnDiskHashTable.h index f6b4055e74de7..d7d72cfbbc649 100644 --- a/llvm/include/llvm/Support/OnDiskHashTable.h +++ b/llvm/include/llvm/Support/OnDiskHashTable.h @@ -309,7 +309,7 @@ template class OnDiskChainedHashTable { offset_type NumEntries = endian::readNext( Buckets); - return std::make_pair(NumBuckets, NumEntries); + return {NumBuckets, NumEntries}; } offset_type getNumBuckets() const { return NumBuckets; } diff --git a/llvm/include/llvm/Support/PGOOptions.h b/llvm/include/llvm/Support/PGOOptions.h index 6527a18258bf8..fb1dc0cf4aa0a 100644 --- a/llvm/include/llvm/Support/PGOOptions.h +++ b/llvm/include/llvm/Support/PGOOptions.h @@ -14,16 +14,10 @@ #ifndef LLVM_SUPPORT_PGOOPTIONS_H #define LLVM_SUPPORT_PGOOPTIONS_H -#include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Error.h" namespace llvm { - -namespace vfs { -class FileSystem; -} // namespace vfs - /// A struct capturing PGO tunables. struct PGOOptions { enum PGOAction { NoAction, IRInstr, IRUse, SampleUse }; @@ -31,9 +25,7 @@ struct PGOOptions { enum class ColdFuncOpt { Default, OptSize, MinSize, OptNone }; LLVM_ABI PGOOptions(std::string ProfileFile, std::string CSProfileGenFile, std::string ProfileRemappingFile, - std::string MemoryProfile, - IntrusiveRefCntPtr FS, - PGOAction Action = NoAction, + std::string MemoryProfile, PGOAction Action = NoAction, CSPGOAction CSAction = NoCSAction, ColdFuncOpt ColdType = ColdFuncOpt::Default, bool DebugInfoForProfiling = false, @@ -53,7 +45,6 @@ struct PGOOptions { bool DebugInfoForProfiling; bool PseudoProbeForProfiling; bool AtomicCounterUpdate; - IntrusiveRefCntPtr FS; }; } // namespace llvm diff --git a/llvm/include/llvm/Support/Path.h b/llvm/include/llvm/Support/Path.h index 0cb517146c04b..a8e0f338ec203 100644 --- a/llvm/include/llvm/Support/Path.h +++ b/llvm/include/llvm/Support/Path.h @@ -566,6 +566,18 @@ LLVM_ABI bool is_absolute_gnu(const Twine &path, Style style = Style::native); /// @result True if the path is relative, false if it is not. LLVM_ABI bool is_relative(const Twine &path, Style style = Style::native); +/// Make \a path an absolute path. +/// +/// Makes \a path absolute using the \a current_directory if it is not already. +/// An empty \a path will result in the \a current_directory. +/// +/// /absolute/path => /absolute/path +/// relative/../path => /relative/../path +/// +/// @param path A path that is modified to be an absolute path. +LLVM_ABI void make_absolute(const Twine ¤t_directory, + SmallVectorImpl &path); + } // end namespace path } // end namespace sys } // end namespace llvm diff --git a/llvm/include/llvm/Support/ProgramStack.h b/llvm/include/llvm/Support/ProgramStack.h index 0dd8235b90c06..13729a2990588 100644 --- a/llvm/include/llvm/Support/ProgramStack.h +++ b/llvm/include/llvm/Support/ProgramStack.h @@ -46,17 +46,15 @@ LLVM_ABI unsigned getDefaultStackSize(); LLVM_ABI void runOnNewStack(unsigned StackSize, function_ref Fn); template -std::enable_if_t, R> -runOnNewStack(unsigned StackSize, function_ref Fn, Ts &&...Args) { - std::optional Ret; - runOnNewStack(StackSize, [&]() { Ret = Fn(std::forward(Args)...); }); - return std::move(*Ret); -} - -template -void runOnNewStack(unsigned StackSize, function_ref Fn, +auto runOnNewStack(unsigned StackSize, function_ref Fn, Ts &&...Args) { - runOnNewStack(StackSize, [&]() { Fn(std::forward(Args)...); }); + if constexpr (std::is_same_v) { + runOnNewStack(StackSize, [&]() { Fn(std::forward(Args)...); }); + } else { + std::optional Ret; + runOnNewStack(StackSize, [&]() { Ret = Fn(std::forward(Args)...); }); + return std::move(*Ret); + } } } // namespace llvm diff --git a/llvm/include/llvm/Support/Registry.h b/llvm/include/llvm/Support/Registry.h index ff9226c39359c..c02f15e5e32b8 100644 --- a/llvm/include/llvm/Support/Registry.h +++ b/llvm/include/llvm/Support/Registry.h @@ -58,8 +58,8 @@ namespace llvm { // declaration causing error C2487 "member of dll interface class may not // be declared with dll interface". // https://developercommunity.visualstudio.com/t/c2487-in-dllexport-class-with-static-members/69878 - static node *Head; - static node *Tail; + static inline node *Head = nullptr; + static inline node *Tail = nullptr; public: /// Node in linked list of entries. @@ -143,19 +143,11 @@ namespace llvm { /// Instantiate a registry class. #define LLVM_INSTANTIATE_REGISTRY(REGISTRY_CLASS) \ namespace llvm { \ - template \ - typename Registry::node *Registry::Head = nullptr; \ - template \ - typename Registry::node *Registry::Tail = nullptr; \ template class LLVM_ABI_EXPORT Registry; \ } #else #define LLVM_INSTANTIATE_REGISTRY(REGISTRY_CLASS) \ namespace llvm { \ - template \ - typename Registry::node *Registry::Head = nullptr; \ - template \ - typename Registry::node *Registry::Tail = nullptr; \ template class Registry; \ } #endif diff --git a/llvm/include/llvm/Support/SMLoc.h b/llvm/include/llvm/Support/SMLoc.h index d7dde81ce0be7..c80969b1d83dc 100644 --- a/llvm/include/llvm/Support/SMLoc.h +++ b/llvm/include/llvm/Support/SMLoc.h @@ -28,8 +28,8 @@ class SMLoc { constexpr bool isValid() const { return Ptr != nullptr; } - constexpr bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; } - constexpr bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; } + constexpr bool operator==(SMLoc RHS) const { return RHS.Ptr == Ptr; } + constexpr bool operator!=(SMLoc RHS) const { return RHS.Ptr != Ptr; } constexpr const char *getPointer() const { return Ptr; } diff --git a/llvm/include/llvm/Support/ScaledNumber.h b/llvm/include/llvm/Support/ScaledNumber.h index 87a56809976a3..07baf153e10c6 100644 --- a/llvm/include/llvm/Support/ScaledNumber.h +++ b/llvm/include/llvm/Support/ScaledNumber.h @@ -57,8 +57,8 @@ inline std::pair getRounded(DigitsT Digits, int16_t Scale, if (ShouldRound) if (!++Digits) // Overflow. - return std::make_pair(DigitsT(1) << (getWidth() - 1), Scale + 1); - return std::make_pair(Digits, Scale); + return {DigitsT(1) << (getWidth() - 1), Scale + 1}; + return {Digits, Scale}; } /// Convenience helper for 32-bit rounding. @@ -83,7 +83,7 @@ inline std::pair getAdjusted(uint64_t Digits, const int Width = getWidth(); if (Width == 64 || Digits <= std::numeric_limits::max()) - return std::make_pair(Digits, Scale); + return {Digits, Scale}; // Shift right and round. int Shift = llvm::bit_width(Digits) - Width; @@ -160,9 +160,9 @@ std::pair getQuotient(DigitsT Dividend, DigitsT Divisor) { // Check for zero. if (!Dividend) - return std::make_pair(0, 0); + return {0, 0}; if (!Divisor) - return std::make_pair(std::numeric_limits::max(), MaxScale); + return {std::numeric_limits::max(), MaxScale}; if (getWidth() == 64) return divide64(Dividend, Divisor); @@ -192,7 +192,7 @@ inline std::pair getLgImpl(DigitsT Digits, int16_t Scale) { static_assert(!std::numeric_limits::is_signed, "expected unsigned"); if (!Digits) - return std::make_pair(INT32_MIN, 0); + return {INT32_MIN, 0}; // Get the floor of the lg of Digits. static_assert(sizeof(Digits) <= sizeof(uint64_t)); @@ -201,12 +201,12 @@ inline std::pair getLgImpl(DigitsT Digits, int16_t Scale) { // Get the actual floor. int32_t Floor = Scale + LocalFloor; if (Digits == UINT64_C(1) << LocalFloor) - return std::make_pair(Floor, 0); + return {Floor, 0}; // Round based on the next digit. assert(LocalFloor >= 1); bool Round = Digits & UINT64_C(1) << (LocalFloor - 1); - return std::make_pair(Floor + Round, Round ? 1 : -1); + return {Floor + Round, Round ? 1 : -1}; } /// Get the lg (rounded) of a scaled number. @@ -348,11 +348,11 @@ std::pair getSum(DigitsT LDigits, int16_t LScale, // Compute sum. DigitsT Sum = LDigits + RDigits; if (Sum >= RDigits) - return std::make_pair(Sum, Scale); + return {Sum, Scale}; // Adjust sum after arithmetic overflow. DigitsT HighBit = DigitsT(1) << (getWidth() - 1); - return std::make_pair(HighBit | Sum >> 1, Scale + 1); + return {HighBit | Sum >> 1, Scale + 1}; } /// Convenience helper for 32-bit sum. @@ -384,18 +384,18 @@ std::pair getDifference(DigitsT LDigits, int16_t LScale, // Compute difference. if (LDigits <= RDigits) - return std::make_pair(0, 0); + return {0, 0}; if (RDigits || !SavedRDigits) - return std::make_pair(LDigits - RDigits, LScale); + return {LDigits - RDigits, LScale}; // Check if RDigits just barely lost its last bit. E.g., for 32-bit: // // 1*2^32 - 1*2^0 == 0xffffffff != 1*2^32 const auto RLgFloor = getLgFloor(SavedRDigits, SavedRScale); if (!compare(LDigits, LScale, DigitsT(1), RLgFloor + getWidth())) - return std::make_pair(std::numeric_limits::max(), RLgFloor); + return {std::numeric_limits::max(), RLgFloor}; - return std::make_pair(LDigits, LScale); + return {LDigits, LScale}; } /// Convenience helper for 32-bit difference. @@ -435,9 +435,9 @@ class ScaledNumberBase { static std::pair splitSigned(int64_t N) { if (N >= 0) - return std::make_pair(N, false); + return {N, false}; uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N); - return std::make_pair(Unsigned, true); + return {Unsigned, true}; } static int64_t joinSigned(uint64_t U, bool IsNeg) { if (U > uint64_t(INT64_MAX)) diff --git a/llvm/include/llvm/Support/SipHash.h b/llvm/include/llvm/Support/SipHash.h index 910cf59432c69..b090565641526 100644 --- a/llvm/include/llvm/Support/SipHash.h +++ b/llvm/include/llvm/Support/SipHash.h @@ -33,6 +33,13 @@ LLVM_ABI void getSipHash_2_4_64(ArrayRef In, const uint8_t (&K)[16], LLVM_ABI void getSipHash_2_4_128(ArrayRef In, const uint8_t (&K)[16], uint8_t (&Out)[16]); +/// Compute a stable 64-bit hash of the given string. +/// +/// The exact algorithm is the little-endian interpretation of the +/// non-doubled (i.e. 64-bit) result of applying a SipHash-2-4 using +/// a specific seed value which can be found in the source. +LLVM_ABI uint64_t getStableSipHash(StringRef Str); + /// Compute a stable non-zero 16-bit hash of the given string. /// /// The exact algorithm is the little-endian interpretation of the diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def index b905576b61791..e55314568d683 100644 --- a/llvm/include/llvm/Support/TargetOpcodes.def +++ b/llvm/include/llvm/Support/TargetOpcodes.def @@ -650,6 +650,9 @@ HANDLE_TARGET_OPCODE(G_FDIV) /// Generic FP remainder. HANDLE_TARGET_OPCODE(G_FREM) +/// Generic FP modf +HANDLE_TARGET_OPCODE(G_FMODF) + /// Generic FP exponentiation. HANDLE_TARGET_OPCODE(G_FPOW) @@ -745,6 +748,7 @@ HANDLE_TARGET_OPCODE(G_SET_FPMODE) HANDLE_TARGET_OPCODE(G_RESET_FPMODE) HANDLE_TARGET_OPCODE(G_GET_ROUNDING) +HANDLE_TARGET_OPCODE(G_SET_ROUNDING) /// Generic pointer offset HANDLE_TARGET_OPCODE(G_PTR_ADD) diff --git a/llvm/include/llvm/Support/TrailingObjects.h b/llvm/include/llvm/Support/TrailingObjects.h index d7211a930ae49..dc03285c4994b 100644 --- a/llvm/include/llvm/Support/TrailingObjects.h +++ b/llvm/include/llvm/Support/TrailingObjects.h @@ -57,25 +57,9 @@ namespace llvm { namespace trailing_objects_internal { -/// Helper template to calculate the max alignment requirement for a set of -/// objects. -template class AlignmentCalcHelper { -private: - enum { - FirstAlignment = alignof(First), - RestAlignment = AlignmentCalcHelper::Alignment, - }; - -public: - enum { - Alignment = FirstAlignment > RestAlignment ? FirstAlignment : RestAlignment - }; -}; -template class AlignmentCalcHelper { -public: - enum { Alignment = alignof(First) }; -}; +template +inline constexpr size_t MaxAlignment = std::max({alignof(T)...}); /// The base class for TrailingObjects* classes. class TrailingObjectsBase { @@ -209,11 +193,10 @@ class alignas(Align) TrailingObjectsImpl /// See the file comment for details on the usage of the /// TrailingObjects type. template -class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< - trailing_objects_internal::AlignmentCalcHelper< - TrailingTys...>::Alignment, - BaseTy, TrailingObjects, - BaseTy, TrailingTys...> { +class TrailingObjects + : private trailing_objects_internal::TrailingObjectsImpl< + trailing_objects_internal::MaxAlignment, BaseTy, + TrailingObjects, BaseTy, TrailingTys...> { template friend class trailing_objects_internal::TrailingObjectsImpl; @@ -221,8 +204,8 @@ class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< template class Foo {}; typedef trailing_objects_internal::TrailingObjectsImpl< - trailing_objects_internal::AlignmentCalcHelper::Alignment, - BaseTy, TrailingObjects, BaseTy, TrailingTys...> + trailing_objects_internal::MaxAlignment, BaseTy, + TrailingObjects, BaseTy, TrailingTys...> ParentType; using TrailingObjectsBase = trailing_objects_internal::TrailingObjectsBase; @@ -301,11 +284,8 @@ class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< /// (which must be one of those specified in the class template). The /// array may have zero or more elements in it. template T *getTrailingObjects() { - verifyTrailingObjectsAssertions(); - // Forwards to an impl function with overloads, since member - // function templates can't be specialized. - return this->getTrailingObjectsImpl( - static_cast(this), TrailingObjectsBase::OverloadToken()); + return const_cast( + static_cast(this)->getTrailingObjects()); } // getTrailingObjects() specialization for a single trailing type. @@ -323,13 +303,8 @@ class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< } FirstTrailingType *getTrailingObjects() { - static_assert(sizeof...(TrailingTys) == 1, - "Can use non-templated getTrailingObjects() only when there " - "is a single trailing type"); - verifyTrailingObjectsAssertions(); - return this->getTrailingObjectsImpl( - static_cast(this), - TrailingObjectsBase::OverloadToken()); + return const_cast( + static_cast(this)->getTrailingObjects()); } // Functions that return the trailing objects as ArrayRefs. @@ -359,9 +334,8 @@ class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< } template T *getTrailingObjectsNonStrict() { - verifyTrailingObjectsAssertions(); - return this->getTrailingObjectsImpl( - static_cast(this), TrailingObjectsBase::OverloadToken()); + return const_cast(static_cast(this) + ->getTrailingObjectsNonStrict()); } template diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h index 29d1c6894b4b6..0a7ae15edbb33 100644 --- a/llvm/include/llvm/Support/TypeSize.h +++ b/llvm/include/llvm/Support/TypeSize.h @@ -179,7 +179,7 @@ template class FixedOrScalableQuantity { /// This function tells the caller whether the element count is known at /// compile time to be a multiple of the scalar value RHS. constexpr bool isKnownMultipleOf(ScalarTy RHS) const { - return getKnownMinValue() % RHS == 0; + return RHS != 0 && getKnownMinValue() % RHS == 0; } /// Returns whether or not the callee is known to be a multiple of RHS. @@ -191,7 +191,8 @@ template class FixedOrScalableQuantity { // x % y == 0 !=> x % (vscale * y) == 0 if (!isScalable() && RHS.isScalable()) return false; - return getKnownMinValue() % RHS.getKnownMinValue() == 0; + return RHS.getKnownMinValue() != 0 && + getKnownMinValue() % RHS.getKnownMinValue() == 0; } // Return the minimum value with the assumption that the count is exact. diff --git a/llvm/include/llvm/Support/YAMLTraits.h b/llvm/include/llvm/Support/YAMLTraits.h index 81e3e2e41e86d..3d36f41ca1a04 100644 --- a/llvm/include/llvm/Support/YAMLTraits.h +++ b/llvm/include/llvm/Support/YAMLTraits.h @@ -705,7 +705,7 @@ class LLVM_ABI IO { virtual bool mapTag(StringRef Tag, bool Default = false) = 0; virtual void beginMapping() = 0; virtual void endMapping() = 0; - virtual bool preflightKey(const char *, bool, bool, bool &, void *&) = 0; + virtual bool preflightKey(StringRef, bool, bool, bool &, void *&) = 0; virtual void postflightKey(void *) = 0; virtual std::vector keys() = 0; @@ -713,12 +713,12 @@ class LLVM_ABI IO { virtual void endFlowMapping() = 0; virtual void beginEnumScalar() = 0; - virtual bool matchEnumScalar(const char *, bool) = 0; + virtual bool matchEnumScalar(StringRef, bool) = 0; virtual bool matchEnumFallback() = 0; virtual void endEnumScalar() = 0; virtual bool beginBitSetScalar(bool &) = 0; - virtual bool bitSetMatch(const char *, bool) = 0; + virtual bool bitSetMatch(StringRef, bool) = 0; virtual void endBitSetScalar() = 0; virtual void scalarString(StringRef &, QuotingType) = 0; @@ -731,8 +731,7 @@ class LLVM_ABI IO { virtual std::error_code error() = 0; virtual void setAllowUnknownKeys(bool Allow); - template - void enumCase(T &Val, const char *Str, const T ConstVal) { + template void enumCase(T &Val, StringRef Str, const T ConstVal) { if (matchEnumScalar(Str, outputting() && Val == ConstVal)) { Val = ConstVal; } @@ -740,7 +739,7 @@ class LLVM_ABI IO { // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF template - void enumCase(T &Val, const char *Str, const uint32_t ConstVal) { + void enumCase(T &Val, StringRef Str, const uint32_t ConstVal) { if (matchEnumScalar(Str, outputting() && Val == static_cast(ConstVal))) { Val = ConstVal; } @@ -757,7 +756,7 @@ class LLVM_ABI IO { } template - void bitSetCase(T &Val, const char *Str, const T ConstVal) { + void bitSetCase(T &Val, StringRef Str, const T ConstVal) { if (bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal)) { Val = static_cast(Val | ConstVal); } @@ -765,20 +764,20 @@ class LLVM_ABI IO { // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF template - void bitSetCase(T &Val, const char *Str, const uint32_t ConstVal) { + void bitSetCase(T &Val, StringRef Str, const uint32_t ConstVal) { if (bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal)) { Val = static_cast(Val | ConstVal); } } template - void maskedBitSetCase(T &Val, const char *Str, T ConstVal, T Mask) { + void maskedBitSetCase(T &Val, StringRef Str, T ConstVal, T Mask) { if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal)) Val = Val | ConstVal; } template - void maskedBitSetCase(T &Val, const char *Str, uint32_t ConstVal, + void maskedBitSetCase(T &Val, StringRef Str, uint32_t ConstVal, uint32_t Mask) { if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal)) Val = Val | ConstVal; @@ -787,29 +786,29 @@ class LLVM_ABI IO { void *getContext() const; void setContext(void *); - template void mapRequired(const char *Key, T &Val) { + template void mapRequired(StringRef Key, T &Val) { EmptyContext Ctx; this->processKey(Key, Val, true, Ctx); } template - void mapRequired(const char *Key, T &Val, Context &Ctx) { + void mapRequired(StringRef Key, T &Val, Context &Ctx) { this->processKey(Key, Val, true, Ctx); } - template void mapOptional(const char *Key, T &Val) { + template void mapOptional(StringRef Key, T &Val) { EmptyContext Ctx; mapOptionalWithContext(Key, Val, Ctx); } template - void mapOptional(const char *Key, T &Val, const DefaultT &Default) { + void mapOptional(StringRef Key, T &Val, const DefaultT &Default) { EmptyContext Ctx; mapOptionalWithContext(Key, Val, Default, Ctx); } template - void mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) { + void mapOptionalWithContext(StringRef Key, T &Val, Context &Ctx) { if constexpr (has_SequenceTraits::value) { // omit key/value instead of outputting empty sequence if (this->canElideEmptySequence() && Val.begin() == Val.end()) @@ -819,14 +818,14 @@ class LLVM_ABI IO { } template - void mapOptionalWithContext(const char *Key, std::optional &Val, + void mapOptionalWithContext(StringRef Key, std::optional &Val, Context &Ctx) { this->processKeyWithDefault(Key, Val, std::optional(), /*Required=*/false, Ctx); } template - void mapOptionalWithContext(const char *Key, T &Val, const DefaultT &Default, + void mapOptionalWithContext(StringRef Key, T &Val, const DefaultT &Default, Context &Ctx) { static_assert(std::is_convertible::value, "Default type must be implicitly convertible to value type!"); @@ -836,12 +835,12 @@ class LLVM_ABI IO { private: template - void processKeyWithDefault(const char *Key, std::optional &Val, + void processKeyWithDefault(StringRef Key, std::optional &Val, const std::optional &DefaultValue, bool Required, Context &Ctx); template - void processKeyWithDefault(const char *Key, T &Val, const T &DefaultValue, + void processKeyWithDefault(StringRef Key, T &Val, const T &DefaultValue, bool Required, Context &Ctx) { void *SaveInfo; bool UseDefault; @@ -857,7 +856,7 @@ class LLVM_ABI IO { } template - void processKey(const char *Key, T &Val, bool Required, Context &Ctx) { + void processKey(StringRef Key, T &Val, bool Required, Context &Ctx) { void *SaveInfo; bool UseDefault; if (this->preflightKey(Key, Required, false, UseDefault, SaveInfo)) { @@ -1332,7 +1331,7 @@ class LLVM_ABI Input : public IO { bool mapTag(StringRef, bool) override; void beginMapping() override; void endMapping() override; - bool preflightKey(const char *, bool, bool, bool &, void *&) override; + bool preflightKey(StringRef Key, bool, bool, bool &, void *&) override; void postflightKey(void *) override; std::vector keys() override; void beginFlowMapping() override; @@ -1346,11 +1345,11 @@ class LLVM_ABI Input : public IO { void postflightFlowElement(void *) override; void endFlowSequence() override; void beginEnumScalar() override; - bool matchEnumScalar(const char *, bool) override; + bool matchEnumScalar(StringRef, bool) override; bool matchEnumFallback() override; void endEnumScalar() override; bool beginBitSetScalar(bool &) override; - bool bitSetMatch(const char *, bool) override; + bool bitSetMatch(StringRef, bool) override; void endBitSetScalar() override; void scalarString(StringRef &, QuotingType) override; void blockScalarString(StringRef &) override; @@ -1483,7 +1482,7 @@ class LLVM_ABI Output : public IO { bool mapTag(StringRef, bool) override; void beginMapping() override; void endMapping() override; - bool preflightKey(const char *key, bool, bool, bool &, void *&) override; + bool preflightKey(StringRef Key, bool, bool, bool &, void *&) override; void postflightKey(void *) override; std::vector keys() override; void beginFlowMapping() override; @@ -1497,11 +1496,11 @@ class LLVM_ABI Output : public IO { void postflightFlowElement(void *) override; void endFlowSequence() override; void beginEnumScalar() override; - bool matchEnumScalar(const char *, bool) override; + bool matchEnumScalar(StringRef, bool) override; bool matchEnumFallback() override; void endEnumScalar() override; bool beginBitSetScalar(bool &) override; - bool bitSetMatch(const char *, bool) override; + bool bitSetMatch(StringRef, bool) override; void endBitSetScalar() override; void scalarString(StringRef &, QuotingType) override; void blockScalarString(StringRef &) override; @@ -1558,7 +1557,7 @@ class LLVM_ABI Output : public IO { }; template -void IO::processKeyWithDefault(const char *Key, std::optional &Val, +void IO::processKeyWithDefault(StringRef Key, std::optional &Val, const std::optional &DefaultValue, bool Required, Context &Ctx) { assert(!DefaultValue && "std::optional shouldn't have a value!"); diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h index d4fa1e5d65749..cb2721aba4f25 100644 --- a/llvm/include/llvm/TableGen/Record.h +++ b/llvm/include/llvm/TableGen/Record.h @@ -1577,7 +1577,7 @@ class RecordVal { } /// Get the source location of the point where the field was defined. - const SMLoc &getLoc() const { return Loc; } + SMLoc getLoc() const { return Loc; } /// Is this a field where nonconcrete values are okay? bool isNonconcreteOK() const { diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td index ce4750db88c9a..faf77880e4614 100644 --- a/llvm/include/llvm/Target/GenericOpcodes.td +++ b/llvm/include/llvm/Target/GenericOpcodes.td @@ -981,6 +981,13 @@ def G_FREM : GenericInstruction { let hasSideEffects = false; } +/// Generic FP modf +def G_FMODF : GenericInstruction { + let OutOperandList = (outs type0:$dst1, type0:$dst2); + let InOperandList = (ins type0:$src1); + let hasSideEffects = false; +} + // Floating point exponentiation. def G_FPOW : GenericInstruction { let OutOperandList = (outs type0:$dst); @@ -1273,6 +1280,12 @@ def G_GET_ROUNDING : GenericInstruction { let hasSideEffects = true; } +def G_SET_ROUNDING : GenericInstruction { + let OutOperandList = (outs); + let InOperandList = (ins type0:$src); + let hasSideEffects = true; +} + //------------------------------------------------------------------------------ // Memory ops //------------------------------------------------------------------------------ diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h index bf4e490554723..d0fd483a8ddaa 100644 --- a/llvm/include/llvm/Target/TargetMachine.h +++ b/llvm/include/llvm/Target/TargetMachine.h @@ -29,10 +29,10 @@ #include #include -LLVM_ABI extern llvm::cl::opt NoKernelInfoEndLTO; - namespace llvm { +LLVM_ABI extern llvm::cl::opt NoKernelInfoEndLTO; + class AAManager; using ModulePassManager = PassManager; diff --git a/llvm/include/llvm/TextAPI/SymbolSet.h b/llvm/include/llvm/TextAPI/SymbolSet.h index 42c411acb6f9d..22f4124f40313 100644 --- a/llvm/include/llvm/TextAPI/SymbolSet.h +++ b/llvm/include/llvm/TextAPI/SymbolSet.h @@ -92,6 +92,8 @@ class SymbolSet { public: SymbolSet() = default; + SymbolSet(const SymbolSet &other) = delete; + SymbolSet &operator=(const SymbolSet &other) = delete; LLVM_ABI ~SymbolSet(); LLVM_ABI Symbol *addGlobal(EncodeKind Kind, StringRef Name, SymbolFlags Flags, const Target &Targ); diff --git a/llvm/include/llvm/Transforms/Coroutines/CoroInstr.h b/llvm/include/llvm/Transforms/Coroutines/CoroInstr.h index 0688068167ae6..38daf25cacd83 100644 --- a/llvm/include/llvm/Transforms/Coroutines/CoroInstr.h +++ b/llvm/include/llvm/Transforms/Coroutines/CoroInstr.h @@ -428,6 +428,18 @@ class CoroFrameInst : public IntrinsicInst { } }; +/// This represents the llvm.coro.is_in_ramp instruction. +class CoroIsInRampInst : public IntrinsicInst { +public: + // Methods to support type inquiry through isa, cast, and dyn_cast: + static bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::coro_is_in_ramp; + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } +}; + /// This represents the llvm.coro.free instruction. class CoroFreeInst : public IntrinsicInst { enum { IdArg, FrameArg }; diff --git a/llvm/include/llvm/Transforms/Coroutines/CoroShape.h b/llvm/include/llvm/Transforms/Coroutines/CoroShape.h index c54081de2d9da..11b004572957f 100644 --- a/llvm/include/llvm/Transforms/Coroutines/CoroShape.h +++ b/llvm/include/llvm/Transforms/Coroutines/CoroShape.h @@ -53,6 +53,7 @@ enum class ABI { struct Shape { CoroBeginInst *CoroBegin = nullptr; SmallVector CoroEnds; + SmallVector CoroIsInRampInsts; SmallVector CoroSizes; SmallVector CoroAligns; SmallVector CoroSuspends; @@ -65,6 +66,7 @@ struct Shape { void clear() { CoroBegin = nullptr; CoroEnds.clear(); + CoroIsInRampInsts.clear(); CoroSizes.clear(); CoroAligns.clear(); CoroSuspends.clear(); diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h index fa313f5290773..d6c2d7fc48bda 100644 --- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h +++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h @@ -64,6 +64,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { /// A worklist of the instructions that need to be simplified. InstructionWorklist &Worklist; + Function &F; + // Mode in which we are running the combiner. const bool MinimizeSize; @@ -98,17 +100,17 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { bool ComputedBackEdges = false; public: - InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder, - bool MinimizeSize, AAResults *AA, AssumptionCache &AC, - TargetLibraryInfo &TLI, TargetTransformInfo &TTI, - DominatorTree &DT, OptimizationRemarkEmitter &ORE, - BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, - ProfileSummaryInfo *PSI, const DataLayout &DL, + InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder, Function &F, + AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, + TargetTransformInfo &TTI, DominatorTree &DT, + OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, + BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, + const DataLayout &DL, ReversePostOrderTraversal &RPOT) : TTIForTargetIntrinsicsOnly(TTI), Builder(Builder), Worklist(Worklist), - MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL), - SQ(DL, &TLI, &DT, &AC, nullptr, /*UseInstrInfo*/ true, - /*CanUseUndef*/ true, &DC), + F(F), MinimizeSize(F.hasMinSize()), AA(AA), AC(AC), TLI(TLI), DT(DT), + DL(DL), SQ(DL, &TLI, &DT, &AC, nullptr, /*UseInstrInfo*/ true, + /*CanUseUndef*/ true, &DC), ORE(ORE), BFI(BFI), BPI(BPI), PSI(PSI), RPOT(RPOT) {} virtual ~InstCombiner() = default; diff --git a/llvm/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h b/llvm/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h index af3662e4a6565..9c9d6afe1872f 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h +++ b/llvm/include/llvm/Transforms/Instrumentation/DataFlowSanitizer.h @@ -10,6 +10,7 @@ #include "llvm/IR/PassManager.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/VirtualFileSystem.h" #include #include @@ -19,11 +20,13 @@ class Module; class DataFlowSanitizerPass : public PassInfoMixin { private: std::vector ABIListFiles; + IntrusiveRefCntPtr FS; public: DataFlowSanitizerPass( - const std::vector &ABIListFiles = std::vector()) - : ABIListFiles(ABIListFiles) {} + const std::vector &ABIListFiles = std::vector(), + IntrusiveRefCntPtr FS = vfs::getRealFileSystem()) + : ABIListFiles(ABIListFiles), FS(std::move(FS)) {} LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); static bool isRequired() { return true; } }; diff --git a/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h b/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h index 4d3ead29af0d2..43ef86b08517a 100644 --- a/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h +++ b/llvm/include/llvm/Transforms/Instrumentation/GCOVProfiler.h @@ -14,17 +14,22 @@ #include "llvm/IR/PassManager.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/VirtualFileSystem.h" #include "llvm/Transforms/Utils/Instrumentation.h" namespace llvm { /// The gcov-style instrumentation pass class GCOVProfilerPass : public PassInfoMixin { public: - GCOVProfilerPass(const GCOVOptions &Options = GCOVOptions::getDefault()) : GCOVOpts(Options) { } + GCOVProfilerPass( + const GCOVOptions &Options = GCOVOptions::getDefault(), + IntrusiveRefCntPtr VFS = vfs::getRealFileSystem()) + : GCOVOpts(Options), VFS(std::move(VFS)) {} LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); private: GCOVOptions GCOVOpts; + IntrusiveRefCntPtr VFS; }; } // namespace llvm diff --git a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h index 61786227d7a33..dfd6e2f3d03ae 100644 --- a/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h +++ b/llvm/include/llvm/Transforms/Scalar/JumpTableToSwitch.h @@ -15,7 +15,12 @@ namespace llvm { class Function; -struct JumpTableToSwitchPass : PassInfoMixin { +class JumpTableToSwitchPass : public PassInfoMixin { + // Necessary until we switch to GUIDs as metadata, after which we can drop it. + const bool InLTO; + +public: + explicit JumpTableToSwitchPass(bool InLTO = false) : InLTO(InLTO) {} /// Run the pass over the function. PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); }; diff --git a/llvm/include/llvm/Transforms/Scalar/SROA.h b/llvm/include/llvm/Transforms/Scalar/SROA.h index c03cdf48fb1c6..8bb65bf7225e0 100644 --- a/llvm/include/llvm/Transforms/Scalar/SROA.h +++ b/llvm/include/llvm/Transforms/Scalar/SROA.h @@ -1,4 +1,4 @@ -//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===// +//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/include/llvm/Transforms/Vectorize/EVLIndVarSimplify.h b/llvm/include/llvm/Transforms/Vectorize/EVLIndVarSimplify.h deleted file mode 100644 index 3178dc762a195..0000000000000 --- a/llvm/include/llvm/Transforms/Vectorize/EVLIndVarSimplify.h +++ /dev/null @@ -1,31 +0,0 @@ -//===------ EVLIndVarSimplify.h - Optimize vectorized loops w/ EVL IV------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This pass optimizes a vectorized loop with canonical IV to using EVL-based -// IV if it was tail-folded by predicated EVL. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TRANSFORMS_VECTORIZE_EVLINDVARSIMPLIFY_H -#define LLVM_TRANSFORMS_VECTORIZE_EVLINDVARSIMPLIFY_H - -#include "llvm/Analysis/LoopAnalysisManager.h" -#include "llvm/IR/PassManager.h" - -namespace llvm { -class Loop; -class LPMUpdater; - -/// Turn vectorized loops with canonical induction variables into loops that -/// only use a single EVL-based induction variable. -struct EVLIndVarSimplifyPass : public PassInfoMixin { - PreservedAnalyses run(Loop &L, LoopAnalysisManager &LAM, - LoopStandardAnalysisResults &AR, LPMUpdater &U); -}; -} // namespace llvm -#endif diff --git a/llvm/lib/Analysis/AssumptionCache.cpp b/llvm/lib/Analysis/AssumptionCache.cpp index 980a891266e50..61b7b3fa9e2c4 100644 --- a/llvm/lib/Analysis/AssumptionCache.cpp +++ b/llvm/lib/Analysis/AssumptionCache.cpp @@ -53,6 +53,22 @@ AssumptionCache::getOrInsertAffectedValues(Value *V) { return AffectedValues[AffectedValueCallbackVH(V, this)]; } +void AssumptionCache::findValuesAffectedByOperandBundle( + OperandBundleUse Bundle, function_ref InsertAffected) { + auto AddAffectedVal = [&](Value *V) { + if (isa(V)) + InsertAffected(V); + }; + + if (Bundle.getTagName() == "separate_storage") { + assert(Bundle.Inputs.size() == 2 && "separate_storage must have two args"); + AddAffectedVal(getUnderlyingObject(Bundle.Inputs[0])); + AddAffectedVal(getUnderlyingObject(Bundle.Inputs[1])); + } else if (Bundle.Inputs.size() > ABA_WasOn && + Bundle.getTagName() != IgnoreBundleTag) + AddAffectedVal(Bundle.Inputs[ABA_WasOn]); +} + static void findAffectedValues(CallBase *CI, TargetTransformInfo *TTI, SmallVectorImpl &Affected) { @@ -69,17 +85,10 @@ findAffectedValues(CallBase *CI, TargetTransformInfo *TTI, } }; - for (unsigned Idx = 0; Idx != CI->getNumOperandBundles(); Idx++) { - OperandBundleUse Bundle = CI->getOperandBundleAt(Idx); - if (Bundle.getTagName() == "separate_storage") { - assert(Bundle.Inputs.size() == 2 && - "separate_storage must have two args"); - AddAffectedVal(getUnderlyingObject(Bundle.Inputs[0]), Idx); - AddAffectedVal(getUnderlyingObject(Bundle.Inputs[1]), Idx); - } else if (Bundle.Inputs.size() > ABA_WasOn && - Bundle.getTagName() != IgnoreBundleTag) - AddAffectedVal(Bundle.Inputs[ABA_WasOn], Idx); - } + for (unsigned Idx = 0; Idx != CI->getNumOperandBundles(); Idx++) + AssumptionCache::findValuesAffectedByOperandBundle( + CI->getOperandBundleAt(Idx), + [&](Value *V) { Affected.push_back({V, Idx}); }); Value *Cond = CI->getArgOperand(0); findValuesAffectedByCondition(Cond, /*IsAssume=*/true, InsertAffected); @@ -172,7 +181,7 @@ void AssumptionCache::scanFunction() { for (BasicBlock &B : F) for (Instruction &I : B) if (isa(&I)) - AssumeHandles.push_back({&I, ExprResultIdx}); + AssumeHandles.push_back(&I); // Mark the scan as complete. Scanned = true; @@ -188,7 +197,7 @@ void AssumptionCache::registerAssumption(AssumeInst *CI) { if (!Scanned) return; - AssumeHandles.push_back({CI, ExprResultIdx}); + AssumeHandles.push_back(CI); #ifndef NDEBUG assert(CI->getParent() && diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp index a0fe7f9037e47..22229d9c26b3b 100644 --- a/llvm/lib/Analysis/CaptureTracking.cpp +++ b/llvm/lib/Analysis/CaptureTracking.cpp @@ -320,8 +320,12 @@ UseCaptureInfo llvm::DetermineUseCaptureKind(const Use &U, const Value *Base) { return CaptureComponents::None; case Instruction::Store: // Stored the pointer - conservatively assume it may be captured. + if (U.getOperandNo() == 0) + return MDNode::toCaptureComponents( + I->getMetadata(LLVMContext::MD_captures)); + // Volatile stores make the address observable. - if (U.getOperandNo() == 0 || cast(I)->isVolatile()) + if (cast(I)->isVolatile()) return CaptureComponents::All; return CaptureComponents::None; case Instruction::AtomicRMW: { diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index a3b2e62a1b8ba..d52b073854630 100755 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -951,21 +951,21 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, // If the base value for this address is a literal integer value, fold the // getelementptr to the resulting integer value casted to the pointer type. - APInt BasePtr(DL.getPointerTypeSizeInBits(Ptr->getType()), 0); + APInt BaseIntVal(DL.getPointerTypeSizeInBits(Ptr->getType()), 0); if (auto *CE = dyn_cast(Ptr)) { if (CE->getOpcode() == Instruction::IntToPtr) { if (auto *Base = dyn_cast(CE->getOperand(0))) - BasePtr = Base->getValue().zextOrTrunc(BasePtr.getBitWidth()); + BaseIntVal = Base->getValue().zextOrTrunc(BaseIntVal.getBitWidth()); } } - auto *PTy = cast(Ptr->getType()); - if ((Ptr->isNullValue() || BasePtr != 0) && - !DL.isNonIntegralPointerType(PTy)) { + if ((Ptr->isNullValue() || BaseIntVal != 0) && + !DL.mustNotIntroduceIntToPtr(Ptr->getType())) { + // If the index size is smaller than the pointer size, add to the low // bits only. - BasePtr.insertBits(BasePtr.trunc(BitWidth) + Offset, 0); - Constant *C = ConstantInt::get(Ptr->getContext(), BasePtr); + BaseIntVal.insertBits(BaseIntVal.trunc(BitWidth) + Offset, 0); + Constant *C = ConstantInt::get(Ptr->getContext(), BaseIntVal); return ConstantExpr::getIntToPtr(C, ResTy); } @@ -2625,8 +2625,17 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, case Intrinsic::nvvm_d2ull_rp: case Intrinsic::nvvm_d2ull_rz: { // In float-to-integer conversion, NaN inputs are converted to 0. - if (U.isNaN()) - return ConstantInt::get(Ty, 0); + if (U.isNaN()) { + // In float-to-integer conversion, NaN inputs are converted to 0 + // when the source and destination bitwidths are both less than 64. + if (nvvm::FPToIntegerIntrinsicNaNZero(IntrinsicID)) + return ConstantInt::get(Ty, 0); + + // Otherwise, the most significant bit is set. + unsigned BitWidth = Ty->getIntegerBitWidth(); + uint64_t Val = 1ULL << (BitWidth - 1); + return ConstantInt::get(Ty, APInt(BitWidth, Val, /*IsSigned=*/false)); + } APFloat::roundingMode RMode = nvvm::GetFPToIntegerRoundingMode(IntrinsicID); @@ -2636,13 +2645,11 @@ static Constant *ConstantFoldScalarCall1(StringRef Name, APSInt ResInt(Ty->getIntegerBitWidth(), !IsSigned); auto FloatToRound = IsFTZ ? FTZPreserveSign(U) : U; + // Return max/min value for integers if the result is +/-inf or + // is too large to fit in the result's integer bitwidth. bool IsExact = false; - APFloat::opStatus Status = - FloatToRound.convertToInteger(ResInt, RMode, &IsExact); - - if (Status != APFloat::opInvalidOp) - return ConstantInt::get(Ty, ResInt); - return nullptr; + FloatToRound.convertToInteger(ResInt, RMode, &IsExact); + return ConstantInt::get(Ty, ResInt); } } diff --git a/llvm/lib/Analysis/CtxProfAnalysis.cpp b/llvm/lib/Analysis/CtxProfAnalysis.cpp index a363bce0570e7..c4abec02e765a 100644 --- a/llvm/lib/Analysis/CtxProfAnalysis.cpp +++ b/llvm/lib/Analysis/CtxProfAnalysis.cpp @@ -30,6 +30,9 @@ #define DEBUG_TYPE "ctx_prof" using namespace llvm; + +namespace llvm { + cl::opt UseCtxProfile("use-ctx-profile", cl::init(""), cl::Hidden, cl::desc("Use the specified contextual profile file")); @@ -50,7 +53,6 @@ static cl::opt ForceIsInSpecializedModule( const char *AssignGUIDPass::GUIDMetadataName = "guid"; -namespace llvm { class ProfileAnnotatorImpl final { friend class ProfileAnnotator; class BBInfo; diff --git a/llvm/lib/Analysis/IR2Vec.cpp b/llvm/lib/Analysis/IR2Vec.cpp index 99afc0601d523..295b6d33525d9 100644 --- a/llvm/lib/Analysis/IR2Vec.cpp +++ b/llvm/lib/Analysis/IR2Vec.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Sequence.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Module.h" @@ -216,6 +217,8 @@ void SymbolicEmbedder::computeEmbeddings(const BasicBlock &BB) const { ArgEmb += Vocab[*Op]; auto InstVector = Vocab[I.getOpcode()] + Vocab[I.getType()->getTypeID()] + ArgEmb; + if (const auto *IC = dyn_cast(&I)) + InstVector += Vocab[IC->getPredicate()]; InstVecMap[&I] = InstVector; BBVector += InstVector; } @@ -250,6 +253,9 @@ void FlowAwareEmbedder::computeEmbeddings(const BasicBlock &BB) const { // embeddings auto InstVector = Vocab[I.getOpcode()] + Vocab[I.getType()->getTypeID()] + ArgEmb; + // Add compare predicate embedding as an additional operand if applicable + if (const auto *IC = dyn_cast(&I)) + InstVector += Vocab[IC->getPredicate()]; InstVecMap[&I] = InstVector; BBVector += InstVector; } @@ -257,42 +263,114 @@ void FlowAwareEmbedder::computeEmbeddings(const BasicBlock &BB) const { } // ==----------------------------------------------------------------------===// -// Vocabulary +// VocabStorage //===----------------------------------------------------------------------===// -unsigned Vocabulary::getDimension() const { - assert(isValid() && "IR2Vec Vocabulary is invalid"); - return Vocab[0].size(); +VocabStorage::VocabStorage(std::vector> &&SectionData) + : Sections(std::move(SectionData)), TotalSize([&] { + assert(!Sections.empty() && "Vocabulary has no sections"); + // Compute total size across all sections + size_t Size = 0; + for (const auto &Section : Sections) { + assert(!Section.empty() && "Vocabulary section is empty"); + Size += Section.size(); + } + return Size; + }()), + Dimension([&] { + // Get dimension from the first embedding in the first section - all + // embeddings must have the same dimension + assert(!Sections.empty() && "Vocabulary has no sections"); + assert(!Sections[0].empty() && "First section of vocabulary is empty"); + unsigned ExpectedDim = static_cast(Sections[0][0].size()); + + // Verify that all embeddings across all sections have the same + // dimension + [[maybe_unused]] auto allSameDim = + [ExpectedDim](const std::vector &Section) { + return std::all_of(Section.begin(), Section.end(), + [ExpectedDim](const Embedding &Emb) { + return Emb.size() == ExpectedDim; + }); + }; + assert(std::all_of(Sections.begin(), Sections.end(), allSameDim) && + "All embeddings must have the same dimension"); + + return ExpectedDim; + }()) {} + +const Embedding &VocabStorage::const_iterator::operator*() const { + assert(SectionId < Storage->Sections.size() && "Invalid section ID"); + assert(LocalIndex < Storage->Sections[SectionId].size() && + "Local index out of range"); + return Storage->Sections[SectionId][LocalIndex]; +} + +VocabStorage::const_iterator &VocabStorage::const_iterator::operator++() { + ++LocalIndex; + // Check if we need to move to the next section + if (SectionId < Storage->getNumSections() && + LocalIndex >= Storage->Sections[SectionId].size()) { + assert(LocalIndex == Storage->Sections[SectionId].size() && + "Local index should be at the end of the current section"); + LocalIndex = 0; + ++SectionId; + } + return *this; } -unsigned Vocabulary::getSlotIndex(unsigned Opcode) { - assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); - return Opcode - 1; // Convert to zero-based index +bool VocabStorage::const_iterator::operator==( + const const_iterator &Other) const { + return Storage == Other.Storage && SectionId == Other.SectionId && + LocalIndex == Other.LocalIndex; } -unsigned Vocabulary::getSlotIndex(Type::TypeID TypeID) { - assert(static_cast(TypeID) < MaxTypeIDs && "Invalid type ID"); - return MaxOpcodes + static_cast(getCanonicalTypeID(TypeID)); +bool VocabStorage::const_iterator::operator!=( + const const_iterator &Other) const { + return !(*this == Other); } -unsigned Vocabulary::getSlotIndex(const Value &Op) { - unsigned Index = static_cast(getOperandKind(&Op)); - assert(Index < MaxOperandKinds && "Invalid OperandKind"); - return MaxOpcodes + MaxCanonicalTypeIDs + Index; -} +Error VocabStorage::parseVocabSection(StringRef Key, + const json::Value &ParsedVocabValue, + VocabMap &TargetVocab, unsigned &Dim) { + json::Path::Root Path(""); + const json::Object *RootObj = ParsedVocabValue.getAsObject(); + if (!RootObj) + return createStringError(errc::invalid_argument, + "JSON root is not an object"); -const Embedding &Vocabulary::operator[](unsigned Opcode) const { - return Vocab[getSlotIndex(Opcode)]; -} + const json::Value *SectionValue = RootObj->get(Key); + if (!SectionValue) + return createStringError(errc::invalid_argument, + "Missing '" + std::string(Key) + + "' section in vocabulary file"); + if (!json::fromJSON(*SectionValue, TargetVocab, Path)) + return createStringError(errc::illegal_byte_sequence, + "Unable to parse '" + std::string(Key) + + "' section from vocabulary"); -const Embedding &Vocabulary::operator[](Type::TypeID TypeID) const { - return Vocab[getSlotIndex(TypeID)]; -} + Dim = TargetVocab.begin()->second.size(); + if (Dim == 0) + return createStringError(errc::illegal_byte_sequence, + "Dimension of '" + std::string(Key) + + "' section of the vocabulary is zero"); + + if (!std::all_of(TargetVocab.begin(), TargetVocab.end(), + [Dim](const std::pair &Entry) { + return Entry.second.size() == Dim; + })) + return createStringError( + errc::illegal_byte_sequence, + "All vectors in the '" + std::string(Key) + + "' section of the vocabulary are not of the same dimension"); -const ir2vec::Embedding &Vocabulary::operator[](const Value &Arg) const { - return Vocab[getSlotIndex(Arg)]; + return Error::success(); } +// ==----------------------------------------------------------------------===// +// Vocabulary +//===----------------------------------------------------------------------===// + StringRef Vocabulary::getVocabKeyForOpcode(unsigned Opcode) { assert(Opcode >= 1 && Opcode <= MaxOpcodes && "Invalid opcode"); #define HANDLE_INST(NUM, OPCODE, CLASS) \ @@ -304,29 +382,6 @@ StringRef Vocabulary::getVocabKeyForOpcode(unsigned Opcode) { return "UnknownOpcode"; } -StringRef Vocabulary::getVocabKeyForCanonicalTypeID(CanonicalTypeID CType) { - unsigned Index = static_cast(CType); - assert(Index < MaxCanonicalTypeIDs && "Invalid CanonicalTypeID"); - return CanonicalTypeNames[Index]; -} - -Vocabulary::CanonicalTypeID -Vocabulary::getCanonicalTypeID(Type::TypeID TypeID) { - unsigned Index = static_cast(TypeID); - assert(Index < MaxTypeIDs && "Invalid TypeID"); - return TypeIDMapping[Index]; -} - -StringRef Vocabulary::getVocabKeyForTypeID(Type::TypeID TypeID) { - return getVocabKeyForCanonicalTypeID(getCanonicalTypeID(TypeID)); -} - -StringRef Vocabulary::getVocabKeyForOperandKind(Vocabulary::OperandKind Kind) { - unsigned Index = static_cast(Kind); - assert(Index < MaxOperandKinds && "Invalid OperandKind"); - return OperandKindNames[Index]; -} - // Helper function to classify an operand into OperandKind Vocabulary::OperandKind Vocabulary::getOperandKind(const Value *Op) { if (isa(Op)) @@ -338,18 +393,50 @@ Vocabulary::OperandKind Vocabulary::getOperandKind(const Value *Op) { return OperandKind::VariableID; } +unsigned Vocabulary::getPredicateLocalIndex(CmpInst::Predicate P) { + if (P >= CmpInst::FIRST_FCMP_PREDICATE && P <= CmpInst::LAST_FCMP_PREDICATE) + return P - CmpInst::FIRST_FCMP_PREDICATE; + else + return P - CmpInst::FIRST_ICMP_PREDICATE + + (CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE + 1); +} + +CmpInst::Predicate Vocabulary::getPredicateFromLocalIndex(unsigned LocalIndex) { + unsigned fcmpRange = + CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE + 1; + if (LocalIndex < fcmpRange) + return static_cast(CmpInst::FIRST_FCMP_PREDICATE + + LocalIndex); + else + return static_cast(CmpInst::FIRST_ICMP_PREDICATE + + LocalIndex - fcmpRange); +} + +StringRef Vocabulary::getVocabKeyForPredicate(CmpInst::Predicate Pred) { + static SmallString<16> PredNameBuffer; + if (Pred < CmpInst::FIRST_ICMP_PREDICATE) + PredNameBuffer = "FCMP_"; + else + PredNameBuffer = "ICMP_"; + PredNameBuffer += CmpInst::getPredicateName(Pred); + return PredNameBuffer; +} + StringRef Vocabulary::getStringKey(unsigned Pos) { assert(Pos < NumCanonicalEntries && "Position out of bounds in vocabulary"); // Opcode if (Pos < MaxOpcodes) return getVocabKeyForOpcode(Pos + 1); // Type - if (Pos < MaxOpcodes + MaxCanonicalTypeIDs) + if (Pos < OperandBaseOffset) return getVocabKeyForCanonicalTypeID( static_cast(Pos - MaxOpcodes)); // Operand - return getVocabKeyForOperandKind( - static_cast(Pos - MaxOpcodes - MaxCanonicalTypeIDs)); + if (Pos < PredicateBaseOffset) + return getVocabKeyForOperandKind( + static_cast(Pos - OperandBaseOffset)); + // Predicates + return getVocabKeyForPredicate(getPredicate(Pos - PredicateBaseOffset)); } // For now, assume vocabulary is stable unless explicitly invalidated. @@ -359,65 +446,62 @@ bool Vocabulary::invalidate(Module &M, const PreservedAnalyses &PA, return !(PAC.preservedWhenStateless()); } -Vocabulary::VocabVector Vocabulary::createDummyVocabForTest(unsigned Dim) { - VocabVector DummyVocab; - DummyVocab.reserve(NumCanonicalEntries); +VocabStorage Vocabulary::createDummyVocabForTest(unsigned Dim) { float DummyVal = 0.1f; - // Create a dummy vocabulary with entries for all opcodes, types, and - // operands - for ([[maybe_unused]] unsigned _ : - seq(0u, Vocabulary::MaxOpcodes + Vocabulary::MaxCanonicalTypeIDs + - Vocabulary::MaxOperandKinds)) { - DummyVocab.push_back(Embedding(Dim, DummyVal)); - DummyVal += 0.1f; - } - return DummyVocab; -} -// ==----------------------------------------------------------------------===// -// IR2VecVocabAnalysis -//===----------------------------------------------------------------------===// + // Create sections for opcodes, types, operands, and predicates + // Order must match Vocabulary::Section enum + std::vector> Sections; + Sections.reserve(4); -Error IR2VecVocabAnalysis::parseVocabSection( - StringRef Key, const json::Value &ParsedVocabValue, VocabMap &TargetVocab, - unsigned &Dim) { - json::Path::Root Path(""); - const json::Object *RootObj = ParsedVocabValue.getAsObject(); - if (!RootObj) - return createStringError(errc::invalid_argument, - "JSON root is not an object"); + // Opcodes section + std::vector OpcodeSec; + OpcodeSec.reserve(MaxOpcodes); + for (unsigned I = 0; I < MaxOpcodes; ++I) { + OpcodeSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(OpcodeSec)); - const json::Value *SectionValue = RootObj->get(Key); - if (!SectionValue) - return createStringError(errc::invalid_argument, - "Missing '" + std::string(Key) + - "' section in vocabulary file"); - if (!json::fromJSON(*SectionValue, TargetVocab, Path)) - return createStringError(errc::illegal_byte_sequence, - "Unable to parse '" + std::string(Key) + - "' section from vocabulary"); + // Types section + std::vector TypeSec; + TypeSec.reserve(MaxCanonicalTypeIDs); + for (unsigned I = 0; I < MaxCanonicalTypeIDs; ++I) { + TypeSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(TypeSec)); - Dim = TargetVocab.begin()->second.size(); - if (Dim == 0) - return createStringError(errc::illegal_byte_sequence, - "Dimension of '" + std::string(Key) + - "' section of the vocabulary is zero"); + // Operands section + std::vector OperandSec; + OperandSec.reserve(MaxOperandKinds); + for (unsigned I = 0; I < MaxOperandKinds; ++I) { + OperandSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(OperandSec)); - if (!std::all_of(TargetVocab.begin(), TargetVocab.end(), - [Dim](const std::pair &Entry) { - return Entry.second.size() == Dim; - })) - return createStringError( - errc::illegal_byte_sequence, - "All vectors in the '" + std::string(Key) + - "' section of the vocabulary are not of the same dimension"); + // Predicates section + std::vector PredicateSec; + PredicateSec.reserve(MaxPredicateKinds); + for (unsigned I = 0; I < MaxPredicateKinds; ++I) { + PredicateSec.emplace_back(Dim, DummyVal); + DummyVal += 0.1f; + } + Sections.push_back(std::move(PredicateSec)); - return Error::success(); + return VocabStorage(std::move(Sections)); } +// ==----------------------------------------------------------------------===// +// IR2VecVocabAnalysis +//===----------------------------------------------------------------------===// + // FIXME: Make this optional. We can avoid file reads // by auto-generating a default vocabulary during the build time. -Error IR2VecVocabAnalysis::readVocabulary() { +Error IR2VecVocabAnalysis::readVocabulary(VocabMap &OpcVocab, + VocabMap &TypeVocab, + VocabMap &ArgVocab) { auto BufOrError = MemoryBuffer::getFileOrSTDIN(VocabFile, /*IsText=*/true); if (!BufOrError) return createFileError(VocabFile, BufOrError.getError()); @@ -429,16 +513,16 @@ Error IR2VecVocabAnalysis::readVocabulary() { return ParsedVocabValue.takeError(); unsigned OpcodeDim = 0, TypeDim = 0, ArgDim = 0; - if (auto Err = - parseVocabSection("Opcodes", *ParsedVocabValue, OpcVocab, OpcodeDim)) + if (auto Err = VocabStorage::parseVocabSection("Opcodes", *ParsedVocabValue, + OpcVocab, OpcodeDim)) return Err; - if (auto Err = - parseVocabSection("Types", *ParsedVocabValue, TypeVocab, TypeDim)) + if (auto Err = VocabStorage::parseVocabSection("Types", *ParsedVocabValue, + TypeVocab, TypeDim)) return Err; - if (auto Err = - parseVocabSection("Arguments", *ParsedVocabValue, ArgVocab, ArgDim)) + if (auto Err = VocabStorage::parseVocabSection("Arguments", *ParsedVocabValue, + ArgVocab, ArgDim)) return Err; if (!(OpcodeDim == TypeDim && TypeDim == ArgDim)) @@ -448,7 +532,9 @@ Error IR2VecVocabAnalysis::readVocabulary() { return Error::success(); } -void IR2VecVocabAnalysis::generateNumMappedVocab() { +void IR2VecVocabAnalysis::generateVocabStorage(VocabMap &OpcVocab, + VocabMap &TypeVocab, + VocabMap &ArgVocab) { // Helper for handling missing entities in the vocabulary. // Currently, we use a zero vector. In the future, we will throw an error to @@ -466,7 +552,6 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { // Handle Opcodes std::vector NumericOpcodeEmbeddings(Vocabulary::MaxOpcodes, Embedding(Dim)); - NumericOpcodeEmbeddings.reserve(Vocabulary::MaxOpcodes); for (unsigned Opcode : seq(0u, Vocabulary::MaxOpcodes)) { StringRef VocabKey = Vocabulary::getVocabKeyForOpcode(Opcode + 1); auto It = OpcVocab.find(VocabKey.str()); @@ -475,13 +560,10 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { else handleMissingEntity(VocabKey.str()); } - Vocab.insert(Vocab.end(), NumericOpcodeEmbeddings.begin(), - NumericOpcodeEmbeddings.end()); // Handle Types - only canonical types are present in vocabulary std::vector NumericTypeEmbeddings(Vocabulary::MaxCanonicalTypeIDs, Embedding(Dim)); - NumericTypeEmbeddings.reserve(Vocabulary::MaxCanonicalTypeIDs); for (unsigned CTypeID : seq(0u, Vocabulary::MaxCanonicalTypeIDs)) { StringRef VocabKey = Vocabulary::getVocabKeyForCanonicalTypeID( static_cast(CTypeID)); @@ -491,13 +573,10 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { } handleMissingEntity(VocabKey.str()); } - Vocab.insert(Vocab.end(), NumericTypeEmbeddings.begin(), - NumericTypeEmbeddings.end()); // Handle Arguments/Operands std::vector NumericArgEmbeddings(Vocabulary::MaxOperandKinds, Embedding(Dim)); - NumericArgEmbeddings.reserve(Vocabulary::MaxOperandKinds); for (unsigned OpKind : seq(0u, Vocabulary::MaxOperandKinds)) { Vocabulary::OperandKind Kind = static_cast(OpKind); StringRef VocabKey = Vocabulary::getVocabKeyForOperandKind(Kind); @@ -508,15 +587,37 @@ void IR2VecVocabAnalysis::generateNumMappedVocab() { } handleMissingEntity(VocabKey.str()); } - Vocab.insert(Vocab.end(), NumericArgEmbeddings.begin(), - NumericArgEmbeddings.end()); -} -IR2VecVocabAnalysis::IR2VecVocabAnalysis(const VocabVector &Vocab) - : Vocab(Vocab) {} + // Handle Predicates: part of Operands section. We look up predicate keys + // in ArgVocab. + std::vector NumericPredEmbeddings(Vocabulary::MaxPredicateKinds, + Embedding(Dim, 0)); + for (unsigned PK : seq(0u, Vocabulary::MaxPredicateKinds)) { + StringRef VocabKey = + Vocabulary::getVocabKeyForPredicate(Vocabulary::getPredicate(PK)); + auto It = ArgVocab.find(VocabKey.str()); + if (It != ArgVocab.end()) { + NumericPredEmbeddings[PK] = It->second; + continue; + } + handleMissingEntity(VocabKey.str()); + } -IR2VecVocabAnalysis::IR2VecVocabAnalysis(VocabVector &&Vocab) - : Vocab(std::move(Vocab)) {} + // Create section-based storage instead of flat vocabulary + // Order must match Vocabulary::Section enum + std::vector> Sections(4); + Sections[static_cast(Vocabulary::Section::Opcodes)] = + std::move(NumericOpcodeEmbeddings); // Section::Opcodes + Sections[static_cast(Vocabulary::Section::CanonicalTypes)] = + std::move(NumericTypeEmbeddings); // Section::CanonicalTypes + Sections[static_cast(Vocabulary::Section::Operands)] = + std::move(NumericArgEmbeddings); // Section::Operands + Sections[static_cast(Vocabulary::Section::Predicates)] = + std::move(NumericPredEmbeddings); // Section::Predicates + + // Create VocabStorage from organized sections + Vocab.emplace(std::move(Sections)); +} void IR2VecVocabAnalysis::emitError(Error Err, LLVMContext &Ctx) { handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EI) { @@ -528,8 +629,8 @@ IR2VecVocabAnalysis::Result IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { auto Ctx = &M.getContext(); // If vocabulary is already populated by the constructor, use it. - if (!Vocab.empty()) - return Vocabulary(std::move(Vocab)); + if (Vocab.has_value()) + return Vocabulary(std::move(Vocab.value())); // Otherwise, try to read from the vocabulary file. if (VocabFile.empty()) { @@ -538,7 +639,9 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { "set it using --ir2vec-vocab-path"); return Vocabulary(); // Return invalid result } - if (auto Err = readVocabulary()) { + + VocabMap OpcVocab, TypeVocab, ArgVocab; + if (auto Err = readVocabulary(OpcVocab, TypeVocab, ArgVocab)) { emitError(std::move(Err), *Ctx); return Vocabulary(); } @@ -553,9 +656,9 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { scaleVocabSection(ArgVocab, ArgWeight); // Generate the numeric lookup vocabulary - generateNumMappedVocab(); + generateVocabStorage(OpcVocab, TypeVocab, ArgVocab); - return Vocabulary(std::move(Vocab)); + return Vocabulary(std::move(Vocab.value())); } // ==----------------------------------------------------------------------===// @@ -564,7 +667,7 @@ IR2VecVocabAnalysis::run(Module &M, ModuleAnalysisManager &AM) { PreservedAnalyses IR2VecPrinterPass::run(Module &M, ModuleAnalysisManager &MAM) { - auto Vocabulary = MAM.getResult(M); + auto &Vocabulary = MAM.getResult(M); assert(Vocabulary.isValid() && "IR2Vec Vocabulary is invalid"); for (Function &F : M) { @@ -606,7 +709,7 @@ PreservedAnalyses IR2VecPrinterPass::run(Module &M, PreservedAnalyses IR2VecVocabPrinterPass::run(Module &M, ModuleAnalysisManager &MAM) { - auto IR2VecVocabulary = MAM.getResult(M); + auto &IR2VecVocabulary = MAM.getResult(M); assert(IR2VecVocabulary.isValid() && "IR2Vec Vocabulary is invalid"); // Print each entry diff --git a/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp b/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp index 7b93474e4dc7b..25e7a97065b27 100644 --- a/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp +++ b/llvm/lib/Analysis/IndirectCallPromotionAnalysis.cpp @@ -22,6 +22,8 @@ using namespace llvm; #define DEBUG_TYPE "pgo-icall-prom-analysis" +namespace llvm { + // The percent threshold for the direct-call target (this call site vs the // remaining call count) for it to be considered as the promotion target. static cl::opt ICPRemainingPercentThreshold( @@ -54,6 +56,8 @@ cl::opt MaxNumVTableAnnotations( "icp-max-num-vtables", cl::init(6), cl::Hidden, cl::desc("Max number of vtables annotated for a vtable load instruction.")); +} // end namespace llvm + bool ICallPromotionAnalysis::isPromotionProfitable(uint64_t Count, uint64_t TotalCount, uint64_t RemainingCount) { diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp index 28b14c2562df1..0fa804f2959e8 100644 --- a/llvm/lib/Analysis/InlineAdvisor.cpp +++ b/llvm/lib/Analysis/InlineAdvisor.cpp @@ -217,7 +217,7 @@ AnalysisKey PluginInlineAdvisorAnalysis::Key; bool InlineAdvisorAnalysis::initializeIR2VecVocabIfRequested( Module &M, ModuleAnalysisManager &MAM) { if (!IR2VecVocabFile.empty()) { - auto IR2VecVocabResult = MAM.getResult(M); + auto &IR2VecVocabResult = MAM.getResult(M); if (!IR2VecVocabResult.isValid()) { M.getContext().emitError("Failed to load IR2Vec vocabulary"); return false; diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index a90b618607ad6..07f4a8e5c889e 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6514,10 +6514,27 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, const CallBase *Call) { unsigned BitWidth = ReturnType->getScalarSizeInBits(); switch (IID) { - case Intrinsic::get_active_lane_mask: + case Intrinsic::get_active_lane_mask: { if (match(Op1, m_Zero())) return ConstantInt::getFalse(ReturnType); + + const Function *F = Call->getFunction(); + auto *ScalableTy = dyn_cast(ReturnType); + Attribute Attr = F->getFnAttribute(Attribute::VScaleRange); + if (ScalableTy && Attr.isValid()) { + std::optional VScaleMax = Attr.getVScaleRangeMax(); + if (!VScaleMax) + break; + uint64_t MaxPossibleMaskElements = + (uint64_t)ScalableTy->getMinNumElements() * (*VScaleMax); + + const APInt *Op1Val; + if (match(Op0, m_Zero()) && match(Op1, m_APInt(Op1Val)) && + Op1Val->uge(MaxPossibleMaskElements)) + return ConstantInt::getAllOnesValue(ReturnType); + } break; + } case Intrinsic::abs: // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here. // It is always ok to pick the earlier abs. We'll just lose nsw if its only diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp index 90bae77bcf703..0e5bc481383a0 100644 --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -59,6 +59,11 @@ INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info", "Lazy Value Information Analysis", false, true) +static cl::opt PerPredRanges( + "lvi-per-pred-ranges", cl::Hidden, cl::init(false), + cl::desc("Enable tracking of ranges for a value in a block for" + "each block predecessor (default = false)")); + namespace llvm { FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); @@ -103,6 +108,10 @@ namespace { namespace { using NonNullPointerSet = SmallDenseSet, 2>; +using BBLatticeElementMap = + SmallDenseMap, ValueLatticeElement, 4>; +using PredecessorValueLatticeMap = + SmallDenseMap, BBLatticeElementMap, 2>; /// This is the cache kept by LazyValueInfo which /// maintains information about queries across the clients' queries. @@ -117,6 +126,10 @@ class LazyValueInfoCache { // std::nullopt indicates that the nonnull pointers for this basic block // block have not been computed yet. std::optional NonNullPointers; + // This is an extension of the above LatticeElements, caching, for each + // Value, a ValueLatticeElement, for each predecessor of the BB tracked by + // this entry. + std::optional PredecessorLatticeElements; }; /// Cached information per basic block. @@ -134,8 +147,14 @@ class LazyValueInfoCache { BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) { auto It = BlockCache.find_as(BB); - if (It == BlockCache.end()) - It = BlockCache.insert({BB, std::make_unique()}).first; + if (It == BlockCache.end()) { + std::unique_ptr BCE = + std::make_unique(); + if (PerPredRanges) + BCE->PredecessorLatticeElements = + std::make_optional(); + It = BlockCache.insert({BB, std::move(BCE)}).first; + } return It->second.get(); } @@ -161,6 +180,28 @@ class LazyValueInfoCache { addValueHandle(Val); } + void insertPredecessorResults(Value *Val, BasicBlock *BB, + BBLatticeElementMap &PredLatticeElements) { + BlockCacheEntry *Entry = getOrCreateBlockEntry(BB); + + Entry->PredecessorLatticeElements->insert({Val, PredLatticeElements}); + + addValueHandle(Val); + } + + std::optional + getCachedPredecessorInfo(Value *V, BasicBlock *BB) const { + const BlockCacheEntry *Entry = getBlockEntry(BB); + if (!Entry) + return std::nullopt; + + auto LatticeIt = Entry->PredecessorLatticeElements->find_as(V); + if (LatticeIt == Entry->PredecessorLatticeElements->end()) + return std::nullopt; + + return LatticeIt->second; + } + std::optional getCachedValueInfo(Value *V, BasicBlock *BB) const { const BlockCacheEntry *Entry = getBlockEntry(BB); @@ -216,6 +257,8 @@ void LazyValueInfoCache::eraseValue(Value *V) { Pair.second->OverDefined.erase(V); if (Pair.second->NonNullPointers) Pair.second->NonNullPointers->erase(V); + if (PerPredRanges) + Pair.second->PredecessorLatticeElements->erase(V); } auto HandleIt = ValueHandles.find_as(V); @@ -230,6 +273,10 @@ void LVIValueHandle::deleted() { } void LazyValueInfoCache::eraseBlock(BasicBlock *BB) { + // Clear all when a BB is removed. + if (PerPredRanges) + for (auto &Pair : BlockCache) + Pair.second->PredecessorLatticeElements->clear(); BlockCache.erase(BB); } @@ -691,6 +738,9 @@ LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) { // find a path to function entry. TODO: We should consider explicitly // canonicalizing to make this true rather than relying on this happy // accident. + std::optional PredLatticeElements; + if (PerPredRanges) + PredLatticeElements = std::make_optional(); for (BasicBlock *Pred : predecessors(BB)) { // Skip self loops. if (Pred == BB) @@ -710,8 +760,13 @@ LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) { << Pred->getName() << "' (non local).\n"); return Result; } + if (PerPredRanges) + PredLatticeElements->insert({Pred, *EdgeResult}); } + if (PerPredRanges) + TheCache.insertPredecessorResults(Val, BB, *PredLatticeElements); + // Return the merged value, which is more precise than 'overdefined'. assert(!Result.isOverdefined()); return Result; @@ -724,6 +779,9 @@ LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) { // Loop over all of our predecessors, merging what we know from them into // result. See the comment about the chosen traversal order in // solveBlockValueNonLocal; the same reasoning applies here. + std::optional PredLatticeElements; + if (PerPredRanges) + PredLatticeElements = std::make_optional(); for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *PhiBB = PN->getIncomingBlock(i); Value *PhiVal = PN->getIncomingValue(i); @@ -746,8 +804,14 @@ LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) { return Result; } + + if (PerPredRanges) + PredLatticeElements->insert({PhiBB, *EdgeResult}); } + if (PerPredRanges) + TheCache.insertPredecessorResults(PN, BB, *PredLatticeElements); + // Return the merged value, which is more precise than 'overdefined'. assert(!Result.isOverdefined() && "Possible PHI in entry block?"); return Result; @@ -1002,7 +1066,77 @@ LazyValueInfoImpl::solveBlockValueBinaryOpImpl( const ConstantRange &LHSRange = *LHSRes; const ConstantRange &RHSRange = *RHSRes; - return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange)); + + std::optional MergedResult = + ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange)); + + if (!PerPredRanges) + return MergedResult; + + std::optional PredLHS = + TheCache.getCachedPredecessorInfo(LHS, BB); + if (!PredLHS) + return MergedResult; + std::optional PredRHS = + TheCache.getCachedPredecessorInfo(RHS, BB); + if (!PredRHS) + return MergedResult; + + const BBLatticeElementMap &LHSPredMap = *PredLHS; + const BBLatticeElementMap &RHSPredMap = *PredRHS; + + BBLatticeElementMap PredLatticeElements; + ValueLatticeElement OverallPredResult; + for (auto *Pred : predecessors(BB)) { + auto LHSIt = LHSPredMap.find_as(Pred); + if (LHSIt == LHSPredMap.end()) + return MergedResult; + const ValueLatticeElement &LHSFromPred = LHSIt->second; + std::optional LHSFromPredRes = + LHSFromPred.asConstantRange(LHS->getType()); + if (!LHSFromPredRes) + return MergedResult; + + auto RHSIt = RHSPredMap.find_as(Pred); + if (RHSIt == RHSPredMap.end()) + return MergedResult; + const ValueLatticeElement &RHSFromPred = RHSIt->second; + std::optional RHSFromPredRes = + RHSFromPred.asConstantRange(RHS->getType()); + if (!RHSFromPredRes) + return MergedResult; + + const ConstantRange &LHSFromPredRange = *LHSFromPredRes; + const ConstantRange &RHSFromPredRange = *RHSFromPredRes; + std::optional PredResult = + ValueLatticeElement::getRange(OpFn(LHSFromPredRange, RHSFromPredRange)); + if (!PredResult) + return MergedResult; + if (PredResult->isOverdefined()) { + LLVM_DEBUG( + dbgs() << " pred BB '" << Pred->getName() << "' for BB '" + << BB->getName() + << "' overdefined. Discarding all predecessor intervals.\n"); + return MergedResult; + } + PredLatticeElements.insert({Pred, *PredResult}); + OverallPredResult.mergeIn(*PredResult); + } + + // If this point is reached, all predecessors for both LHS and RHS have + // constant ranges previously computed. Can cache result and use the + // OverallPredResult; + TheCache.insertPredecessorResults(I, BB, PredLatticeElements); + + LLVM_DEBUG(dbgs() << " Using predecessor intervals, evaluated " << *I + << " to: " << OverallPredResult << ".\n"); + + if (!MergedResult) + return OverallPredResult; + + LLVM_DEBUG(dbgs() << " Intersecting intervals for " << *I << ": " + << OverallPredResult << " and " << MergedResult << ".\n"); + return MergedResult->intersect(OverallPredResult); } std::optional @@ -1498,19 +1632,25 @@ LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom, *getValueFromCondition(Usr->getOperand(0), Condition, isTrueDest, /*UseBlockValue*/ false); - if (!OpLatticeVal.isConstantRange()) - return OpLatticeVal; + if (OpLatticeVal.isConstantRange()) { + const unsigned ResultBitWidth = + Usr->getType()->getScalarSizeInBits(); + if (auto *Trunc = dyn_cast(Usr)) + return ValueLatticeElement::getRange( + OpLatticeVal.getConstantRange().truncate( + ResultBitWidth, Trunc->getNoWrapKind())); - const unsigned ResultBitWidth = - Usr->getType()->getScalarSizeInBits(); - if (auto *Trunc = dyn_cast(Usr)) return ValueLatticeElement::getRange( - OpLatticeVal.getConstantRange().truncate( - ResultBitWidth, Trunc->getNoWrapKind())); - - return ValueLatticeElement::getRange( - OpLatticeVal.getConstantRange().castOp( - cast(Usr)->getOpcode(), ResultBitWidth)); + OpLatticeVal.getConstantRange().castOp( + cast(Usr)->getOpcode(), ResultBitWidth)); + } + if (OpLatticeVal.isConstant()) { + Constant *C = OpLatticeVal.getConstant(); + if (auto *CastC = ConstantFoldCastOperand( + cast(Usr)->getOpcode(), C, Usr->getType(), DL)) + return ValueLatticeElement::get(CastC); + } + return ValueLatticeElement::getOverdefined(); } else { // If one of Val's operand has an inferred value, we may be able to // infer the value of Val. diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index d6ad855cad9a7..47dccde45337b 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -234,9 +234,14 @@ static bool evaluatePtrAddRecAtMaxBTCWillNotWrap( // Check if we have a suitable dereferencable assumption we can use. if (!StartPtrV->canBeFreed()) { + Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt(); + if (BasicBlock *LoopPred = L->getLoopPredecessor()) { + if (isa(LoopPred->getTerminator())) + CtxI = LoopPred->getTerminator(); + } + RetainedKnowledge DerefRK = getKnowledgeValidInContext( - StartPtrV, {Attribute::Dereferenceable}, *AC, - L->getLoopPredecessor()->getTerminator(), DT); + StartPtrV, {Attribute::Dereferenceable}, *AC, CtxI, DT); if (DerefRK) { DerefBytesSCEV = SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue)); @@ -2090,12 +2095,14 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize( return MemoryDepChecker::Dependence::Unknown; } + TypeSize AStoreSz = DL.getTypeStoreSize(ATy); + TypeSize BStoreSz = DL.getTypeStoreSize(BTy); + + // If store sizes are not the same, set TypeByteSize to zero, so we can check + // it in the caller isDependent. uint64_t ASz = DL.getTypeAllocSize(ATy); uint64_t BSz = DL.getTypeAllocSize(BTy); - - // Both the source and sink sizes are neeeded in dependence checks, depending - // on the use. - std::pair TypeByteSize(ASz, BSz); + uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0; uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz; uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz; @@ -2117,23 +2124,8 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize( return Dependence::Unknown; } - // When the distance is possibly zero, we're reading/writing the same memory - // location: if the store sizes are not equal, fail with an unknown - // dependence. - TypeSize AStoreSz = DL.getTypeStoreSize(ATy); - TypeSize BStoreSz = DL.getTypeStoreSize(BTy); - if (AStoreSz != BStoreSz && !SE.isKnownNonZero(Dist)) { - LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence distance with " - "different type sizes\n"); - return Dependence::Unknown; - } - - // TODO: Remove this. - bool HasSameSize = AStoreSz == BStoreSz; - return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride, - TypeByteSize, HasSameSize, AIsWrite, - BIsWrite); + TypeByteSize, AIsWrite, BIsWrite); } MemoryDepChecker::Dependence::DepType @@ -2165,8 +2157,9 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, return std::get(Res); } - auto &[Dist, MaxStride, CommonStride, TypeByteSize, HasSameSize, AIsWrite, - BIsWrite] = std::get(Res); + auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] = + std::get(Res); + bool HasSameSize = TypeByteSize > 0; ScalarEvolution &SE = *PSE.getSE(); auto &DL = InnermostLoop->getHeader()->getDataLayout(); @@ -2192,8 +2185,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, // If the distance between accesses and their strides are known constants, // check whether the accesses interlace each other. if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize && - areStridedAccessesIndependent(ConstDist, *CommonStride, - TypeByteSize.first)) { + areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) { LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); return Dependence::NoDep; } @@ -2207,9 +2199,13 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, // Negative distances are not plausible dependencies. if (SE.isKnownNonPositive(Dist)) { if (SE.isKnownNonNegative(Dist)) { - // Write to the same location with the same size. - assert(HasSameSize && "Accesses must have the same size"); - return Dependence::Forward; + if (HasSameSize) { + // Write to the same location with the same size. + return Dependence::Forward; + } + LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but " + "different type sizes\n"); + return Dependence::Unknown; } bool IsTrueDataDependence = (AIsWrite && !BIsWrite); @@ -2227,7 +2223,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, : Dependence::Unknown; } if (!HasSameSize || - couldPreventStoreLoadForward(ConstDist, TypeByteSize.first)) { + couldPreventStoreLoadForward(ConstDist, TypeByteSize)) { LLVM_DEBUG( dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); return Dependence::ForwardButPreventsForwarding; @@ -2293,8 +2289,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, // We know that Dist is positive, but it may not be constant. Use the signed // minimum for computations below, as this ensures we compute the closest // possible dependence distance. - uint64_t MinDistanceNeeded = - MaxStride * (MinNumIter - 1) + TypeByteSize.first; + uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize; if (MinDistanceNeeded > static_cast(MinDistance)) { if (!ConstDist) { // For non-constant distances, we checked the lower bound of the @@ -2322,15 +2317,14 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, bool IsTrueDataDependence = (!AIsWrite && BIsWrite); if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist && - couldPreventStoreLoadForward(MinDistance, TypeByteSize.first, - *CommonStride)) + couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride)) return Dependence::BackwardVectorizableButPreventsForwarding; uint64_t MaxVF = MinDepDistBytes / MaxStride; LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance << " with max VF = " << MaxVF << '\n'); - uint64_t MaxVFInBits = MaxVF * TypeByteSize.first * 8; + uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8; if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) { // For non-constant distances, we checked the lower bound of the dependence // distance and the distance may be larger at runtime (and safe for @@ -2867,8 +2861,9 @@ void LoopAccessInfo::emitUnsafeDependenceRemark() { } } -bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, - DominatorTree *DT) { +bool LoopAccessInfo::blockNeedsPredication(const BasicBlock *BB, + const Loop *TheLoop, + const DominatorTree *DT) { assert(TheLoop->contains(BB) && "Unknown block used"); // Blocks that do not dominate the latch need predication. diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp index 67c2cfadb6533..9a022d9ed09ce 100644 --- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -80,6 +80,10 @@ static cl::opt cl::desc("The number of blocks to scan during memory " "dependency analysis (default = 200)")); +static cl::opt CacheGlobalLimit( + "memdep-cache-global-limit", cl::Hidden, cl::init(10000), + cl::desc("The max number of entries allowed in a cache (default = 10000)")); + // Limit on the number of memdep results to process. static const unsigned int NumResultsLimit = 100; @@ -1142,6 +1146,10 @@ bool MemoryDependenceResults::getNonLocalPointerDepFromBB( return true; } + // If the size of this cache has surpassed the global limit, stop here. + if (Cache->size() > CacheGlobalLimit) + return false; + // Otherwise, either this is a new block, a block with an invalid cache // pointer or one that we're about to invalidate by putting more info into // it than its valid cache info. If empty and not explicitly indicated as diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp index b5ca6b13108fe..0c1f8dbd1119a 100644 --- a/llvm/lib/Analysis/MemoryProfileInfo.cpp +++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp @@ -22,6 +22,8 @@ using namespace llvm::memprof; #define DEBUG_TYPE "memory-profile-info" +namespace llvm { + cl::opt MemProfReportHintedSizes( "memprof-report-hinted-sizes", cl::init(false), cl::Hidden, cl::desc("Report total allocation sizes of hinted allocations")); @@ -52,6 +54,8 @@ cl::opt MinPercentMaxColdSize( "memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden, cl::desc("Min percent of max cold bytes for critical cold context")); +} // end namespace llvm + bool llvm::memprof::metadataIncludesAllContextSizeInfo() { return MemProfReportHintedSizes || MinClonedColdBytePercent < 100; } @@ -121,24 +125,6 @@ bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) { return NumAllocTypes == 1; } -void llvm::memprof::removeAnyExistingAmbiguousAttribute(CallBase *CB) { - if (!CB->hasFnAttr("memprof")) - return; - assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous"); - CB->removeFnAttr("memprof"); -} - -void llvm::memprof::addAmbiguousAttribute(CallBase *CB) { - // We may have an existing ambiguous attribute if we are reanalyzing - // after inlining. - if (CB->hasFnAttr("memprof")) { - assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous"); - } else { - auto A = llvm::Attribute::get(CB->getContext(), "memprof", "ambiguous"); - CB->addFnAttr(A); - } -} - void CallStackTrie::addCallStack( AllocationType AllocType, ArrayRef StackIds, std::vector ContextSizeInfo) { @@ -484,9 +470,6 @@ void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT, StringRef Descriptor) { auto AllocTypeString = getAllocTypeAttributeString(AT); auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString); - // After inlining we may be able to convert an existing ambiguous allocation - // to an unambiguous one. - removeAnyExistingAmbiguousAttribute(CI); CI->addFnAttr(A); if (MemProfReportHintedSizes) { std::vector ContextSizeInfo; @@ -546,7 +529,6 @@ bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) { assert(MIBCallStack.size() == 1 && "Should only be left with Alloc's location in stack"); CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes)); - addAmbiguousAttribute(CI); return true; } // If there exists corner case that CallStackTrie has one chain to leaf diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp index a317ac471a231..a60a4bb1194e2 100644 --- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp +++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp @@ -67,7 +67,6 @@ using namespace llvm::memprof; namespace llvm { FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold = FunctionSummary::FSHT_None; -} // namespace llvm static cl::opt FSEC( "force-summary-edges-cold", cl::Hidden, cl::location(ForceSummaryEdgesCold), @@ -91,6 +90,7 @@ LLVM_ABI extern cl::opt ScalePartialSampleProfileWorkingSetSize; extern cl::opt MaxNumVTableAnnotations; extern cl::opt MemProfReportHintedSizes; +} // namespace llvm // Walk through the operands of a given User via worklist iteration and populate // the set of GlobalValue references encountered. Invoked either on an diff --git a/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/llvm/lib/Analysis/ProfileSummaryInfo.cpp index f1c3155f2f141..44d7a175cc7fe 100644 --- a/llvm/lib/Analysis/ProfileSummaryInfo.cpp +++ b/llvm/lib/Analysis/ProfileSummaryInfo.cpp @@ -24,6 +24,8 @@ #include using namespace llvm; +namespace llvm { + static cl::opt PartialProfile( "partial-profile", cl::Hidden, cl::init(false), cl::desc("Specify the current profile is used as a partial profile.")); @@ -44,6 +46,8 @@ static cl::opt PartialSampleProfileWorkingSetSizeScaleFactor( "and the factor to scale the working set size to use the same " "shared thresholds as PGO.")); +} // end namespace llvm + // The profile summary metadata may be attached either by the frontend or by // any backend passes (IR level instrumentation, for example). This method // checks if the Summary is null and if so checks if the summary metadata is now diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index b08399b381f34..63e1b1462d007 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3598,6 +3598,13 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, } } + // TODO: Generalize to handle any common factors. + // udiv (mul nuw a, vscale), (mul nuw b, vscale) --> udiv a, b + const SCEV *NewLHS, *NewRHS; + if (match(LHS, m_scev_c_NUWMul(m_SCEV(NewLHS), m_SCEVVScale())) && + match(RHS, m_scev_c_NUWMul(m_SCEV(NewRHS), m_SCEVVScale()))) + return getUDivExpr(NewLHS, NewRHS); + // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs // changes). Make sure we get a new one. IP = nullptr; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6f11b250cf21f..09a8fbea065ac 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -7651,25 +7651,26 @@ static bool isGuaranteedNotToBeUndefOrPoison( return true; } - if (const auto *PN = dyn_cast(V)) { - unsigned Num = PN->getNumIncomingValues(); - bool IsWellDefined = true; - for (unsigned i = 0; i < Num; ++i) { - if (PN == PN->getIncomingValue(i)) - continue; - auto *TI = PN->getIncomingBlock(i)->getTerminator(); - if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, - DT, Depth + 1, Kind)) { - IsWellDefined = false; - break; + if (!::canCreateUndefOrPoison(Opr, Kind, + /*ConsiderFlagsAndMetadata=*/true)) { + if (const auto *PN = dyn_cast(V)) { + unsigned Num = PN->getNumIncomingValues(); + bool IsWellDefined = true; + for (unsigned i = 0; i < Num; ++i) { + if (PN == PN->getIncomingValue(i)) + continue; + auto *TI = PN->getIncomingBlock(i)->getTerminator(); + if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, + DT, Depth + 1, Kind)) { + IsWellDefined = false; + break; + } } - } - if (IsWellDefined) + if (IsWellDefined) + return true; + } else if (all_of(Opr->operands(), OpCheck)) return true; - } else if (!::canCreateUndefOrPoison(Opr, Kind, - /*ConsiderFlagsAndMetadata*/ true) && - all_of(Opr->operands(), OpCheck)) - return true; + } } if (auto *I = dyn_cast(V)) diff --git a/llvm/lib/BinaryFormat/DXContainer.cpp b/llvm/lib/BinaryFormat/DXContainer.cpp index 36d10d0b63078..c06a3e34653f0 100644 --- a/llvm/lib/BinaryFormat/DXContainer.cpp +++ b/llvm/lib/BinaryFormat/DXContainer.cpp @@ -89,6 +89,15 @@ ArrayRef> dxbc::getDescriptorRangeFlags() { return ArrayRef(DescriptorRangeFlagNames); } +static const EnumEntry StaticSamplerFlagNames[] = { +#define STATIC_SAMPLER_FLAG(Val, Enum, Flag) {#Enum, StaticSamplerFlags::Enum}, +#include "llvm/BinaryFormat/DXContainerConstants.def" +}; + +ArrayRef> dxbc::getStaticSamplerFlags() { + return ArrayRef(StaticSamplerFlagNames); +} + #define SHADER_VISIBILITY(Val, Enum) {#Enum, ShaderVisibility::Enum}, static const EnumEntry ShaderVisibilityValues[] = { diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 22a0d0ffdbaab..832aa9ff7ed3d 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -7024,7 +7024,7 @@ Error BitcodeReader::materialize(GlobalValue *GV) { if (!MDLoader->isStrippingTBAA()) { for (auto &I : instructions(F)) { MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa); - if (!TBAA || TBAAVerifyHelper.visitTBAAMetadata(I, TBAA)) + if (!TBAA || TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA)) continue; MDLoader->setStripTBAA(true); stripTBAA(F->getParent()); diff --git a/llvm/lib/CAS/CMakeLists.txt b/llvm/lib/CAS/CMakeLists.txt index 6ed724bc2fd76..7ae5f7e46418e 100644 --- a/llvm/lib/CAS/CMakeLists.txt +++ b/llvm/lib/CAS/CMakeLists.txt @@ -2,14 +2,19 @@ add_llvm_component_library(LLVMCAS ActionCache.cpp ActionCaches.cpp BuiltinCAS.cpp + DatabaseFile.cpp InMemoryCAS.cpp MappedFileRegionArena.cpp ObjectStore.cpp OnDiskCommon.cpp + OnDiskTrieRawHashMap.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/CAS + LINK_LIBS + ${LLVM_PTHREAD_LIB} + LINK_COMPONENTS Support ) diff --git a/llvm/lib/CAS/DatabaseFile.cpp b/llvm/lib/CAS/DatabaseFile.cpp new file mode 100644 index 0000000000000..db8ce1dc5bb14 --- /dev/null +++ b/llvm/lib/CAS/DatabaseFile.cpp @@ -0,0 +1,123 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file This file implements the common abstractions for CAS database file. +/// +//===----------------------------------------------------------------------===// + +#include "DatabaseFile.h" + +using namespace llvm; +using namespace llvm::cas; +using namespace llvm::cas::ondisk; + +Error ondisk::createTableConfigError(std::errc ErrC, StringRef Path, + StringRef TableName, const Twine &Msg) { + return createStringError(make_error_code(ErrC), + Path + "[" + TableName + "]: " + Msg); +} + +Error ondisk::checkTable(StringRef Label, size_t Expected, size_t Observed, + StringRef Path, StringRef TrieName) { + if (Expected == Observed) + return Error::success(); + return createTableConfigError(std::errc::invalid_argument, Path, TrieName, + "mismatched " + Label + + " (expected: " + Twine(Expected) + + ", observed: " + Twine(Observed) + ")"); +} + +Expected +DatabaseFile::create(const Twine &Path, uint64_t Capacity, + function_ref NewDBConstructor) { + // Constructor for if the file doesn't exist. + auto NewFileConstructor = [&](MappedFileRegionArena &Alloc) -> Error { + if (Alloc.capacity() < + sizeof(Header) + sizeof(MappedFileRegionArena::Header)) + return createTableConfigError(std::errc::argument_out_of_domain, + Path.str(), "datafile", + "Allocator too small for header"); + (void)new (Alloc.data()) Header{getMagic(), getVersion(), {0}}; + DatabaseFile DB(Alloc); + return NewDBConstructor(DB); + }; + + // Get or create the file. + MappedFileRegionArena Alloc; + if (Error E = MappedFileRegionArena::create(Path, Capacity, sizeof(Header), + NewFileConstructor) + .moveInto(Alloc)) + return std::move(E); + + return DatabaseFile::get( + std::make_unique(std::move(Alloc))); +} + +Error DatabaseFile::addTable(TableHandle Table) { + assert(Table); + assert(&Table.getRegion() == &getRegion()); + int64_t ExistingRootOffset = 0; + const int64_t NewOffset = + reinterpret_cast(&Table.getHeader()) - getRegion().data(); + if (H->RootTableOffset.compare_exchange_strong(ExistingRootOffset, NewOffset)) + return Error::success(); + + // Silently ignore attempts to set the root to itself. + if (ExistingRootOffset == NewOffset) + return Error::success(); + + // Return an proper error message. + TableHandle Root(getRegion(), ExistingRootOffset); + if (Root.getName() == Table.getName()) + return createStringError( + make_error_code(std::errc::not_supported), + "collision with existing table of the same name '" + Table.getName() + + "'"); + + return createStringError(make_error_code(std::errc::not_supported), + "cannot add new table '" + Table.getName() + + "'" + " to existing root '" + + Root.getName() + "'"); +} + +std::optional DatabaseFile::findTable(StringRef Name) { + int64_t RootTableOffset = H->RootTableOffset.load(); + if (!RootTableOffset) + return std::nullopt; + + TableHandle Root(getRegion(), RootTableOffset); + if (Root.getName() == Name) + return Root; + + return std::nullopt; +} + +Error DatabaseFile::validate(MappedFileRegion &Region) { + if (Region.size() < sizeof(Header)) + return createStringError(std::errc::invalid_argument, + "database: missing header"); + + // Check the magic and version. + auto *H = reinterpret_cast
(Region.data()); + if (H->Magic != getMagic()) + return createStringError(std::errc::invalid_argument, + "database: bad magic"); + if (H->Version != getVersion()) + return createStringError(std::errc::invalid_argument, + "database: wrong version"); + + auto *MFH = reinterpret_cast(Region.data() + + sizeof(Header)); + // Check the bump-ptr, which should point past the header. + if (MFH->BumpPtr.load() < (int64_t)sizeof(Header)) + return createStringError(std::errc::invalid_argument, + "database: corrupt bump-ptr"); + + return Error::success(); +} diff --git a/llvm/lib/CAS/DatabaseFile.h b/llvm/lib/CAS/DatabaseFile.h new file mode 100644 index 0000000000000..609e5f1357190 --- /dev/null +++ b/llvm/lib/CAS/DatabaseFile.h @@ -0,0 +1,153 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This file declares the common interface for a DatabaseFile that is used to +/// implement OnDiskCAS. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_CAS_DATABASEFILE_H +#define LLVM_LIB_CAS_DATABASEFILE_H + +#include "llvm/ADT/StringRef.h" +#include "llvm/CAS/MappedFileRegionArena.h" +#include "llvm/Support/Error.h" + +namespace llvm::cas::ondisk { + +using MappedFileRegion = MappedFileRegionArena::RegionT; + +/// Generic handle for a table. +/// +/// Generic table header layout: +/// - 2-bytes: TableKind +/// - 2-bytes: TableNameSize +/// - 4-bytes: TableNameRelOffset (relative to header) +class TableHandle { +public: + enum class TableKind : uint16_t { + TrieRawHashMap = 1, + DataAllocator = 2, + }; + struct Header { + TableKind Kind; + uint16_t NameSize; + int32_t NameRelOffset; ///< Relative to Header. + }; + + explicit operator bool() const { return H; } + const Header &getHeader() const { return *H; } + MappedFileRegion &getRegion() const { return *Region; } + + template static void check() { + static_assert( + std::is_same::value, + "T::GenericHeader should be of type TableHandle::Header"); + static_assert(offsetof(typename T::Header, GenericHeader) == 0, + "T::GenericHeader must be the head of T::Header"); + } + template bool is() const { return T::Kind == H->Kind; } + template T dyn_cast() const { + check(); + if (is()) + return T(*Region, *reinterpret_cast(H)); + return T(); + } + template T cast() const { + assert(is()); + return dyn_cast(); + } + + StringRef getName() const { + auto *Begin = reinterpret_cast(H) + H->NameRelOffset; + return StringRef(Begin, H->NameSize); + } + + TableHandle() = default; + TableHandle(MappedFileRegion &Region, Header &H) : Region(&Region), H(&H) {} + TableHandle(MappedFileRegion &Region, intptr_t HeaderOffset) + : TableHandle(Region, + *reinterpret_cast
(Region.data() + HeaderOffset)) { + } + +private: + MappedFileRegion *Region = nullptr; + Header *H = nullptr; +}; + +/// Encapsulate a database file, which: +/// - Sets/checks magic. +/// - Sets/checks version. +/// - Points at an arbitrary root table. +/// - Sets up a MappedFileRegionArena for allocation. +/// +/// Top-level layout: +/// - 4-bytes: Magic +/// - 4-bytes: Version +/// - 8-bytes: RootTableOffset (16-bits: Kind; 48-bits: Offset) +/// - 8-bytes: BumpPtr from MappedFileRegionArena +class DatabaseFile { +public: + static constexpr uint32_t getMagic() { return 0xDA7ABA53UL; } + static constexpr uint32_t getVersion() { return 1UL; } + struct Header { + uint32_t Magic; + uint32_t Version; + std::atomic RootTableOffset; + }; + + const Header &getHeader() { return *H; } + MappedFileRegionArena &getAlloc() { return Alloc; } + MappedFileRegion &getRegion() { return Alloc.getRegion(); } + + /// Add a table. This is currently not thread safe and should be called inside + /// NewDBConstructor. + Error addTable(TableHandle Table); + + /// Find a table. May return null. + std::optional findTable(StringRef Name); + + /// Create the DatabaseFile at Path with Capacity. + static Expected + create(const Twine &Path, uint64_t Capacity, + function_ref NewDBConstructor); + + size_t size() const { return Alloc.size(); } + +private: + static Expected + get(std::unique_ptr Alloc) { + if (Error E = validate(Alloc->getRegion())) + return std::move(E); + return DatabaseFile(std::move(Alloc)); + } + + static Error validate(MappedFileRegion &Region); + + DatabaseFile(MappedFileRegionArena &Alloc) + : H(reinterpret_cast
(Alloc.data())), Alloc(Alloc) {} + DatabaseFile(std::unique_ptr Alloc) + : DatabaseFile(*Alloc) { + OwnedAlloc = std::move(Alloc); + } + + Header *H = nullptr; + MappedFileRegionArena &Alloc; + std::unique_ptr OwnedAlloc; +}; + +Error createTableConfigError(std::errc ErrC, StringRef Path, + StringRef TableName, const Twine &Msg); + +Error checkTable(StringRef Label, size_t Expected, size_t Observed, + StringRef Path, StringRef TrieName); + +} // namespace llvm::cas::ondisk + +#endif diff --git a/llvm/lib/CAS/InMemoryCAS.cpp b/llvm/lib/CAS/InMemoryCAS.cpp index 255b89c15c4c5..c63ee70de0849 100644 --- a/llvm/lib/CAS/InMemoryCAS.cpp +++ b/llvm/lib/CAS/InMemoryCAS.cpp @@ -57,6 +57,9 @@ class InMemoryObject { InMemoryObject() = delete; InMemoryObject(InMemoryObject &&) = delete; InMemoryObject(const InMemoryObject &) = delete; + InMemoryObject &operator=(const InMemoryObject &) = delete; + InMemoryObject &operator=(InMemoryObject &&) = delete; + virtual ~InMemoryObject() = default; protected: InMemoryObject(Kind K, const InMemoryIndexValueT &I) : IndexAndKind(&I, K) {} diff --git a/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp b/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp new file mode 100644 index 0000000000000..940389336ce22 --- /dev/null +++ b/llvm/lib/CAS/OnDiskTrieRawHashMap.cpp @@ -0,0 +1,1179 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file Implements OnDiskTrieRawHashMap. +/// +//===----------------------------------------------------------------------===// + +#include "llvm/CAS/OnDiskTrieRawHashMap.h" +#include "DatabaseFile.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/TrieHashIndexGenerator.h" +#include "llvm/CAS/MappedFileRegionArena.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/Support/ThreadPool.h" +#include "llvm/Support/Threading.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; +using namespace llvm::cas; +using namespace llvm::cas::ondisk; + +#if LLVM_ENABLE_ONDISK_CAS + +//===----------------------------------------------------------------------===// +// TrieRawHashMap data structures. +//===----------------------------------------------------------------------===// + +namespace { + +class SubtrieHandle; +class TrieRawHashMapHandle; +class TrieVisitor; + +/// A value stored in the slots inside a SubTrie. A stored value can either be a +/// subtrie (encoded after negation) which is the file offset to another +/// subtrie, or it can be a fileset to a DataRecord. +class SubtrieSlotValue { +public: + explicit operator bool() const { return !isEmpty(); } + bool isEmpty() const { return !Offset; } + bool isData() const { return Offset > 0; } + bool isSubtrie() const { return Offset < 0; } + uint64_t asData() const { + assert(isData()); + return Offset; + } + uint64_t asSubtrie() const { + assert(isSubtrie()); + return -Offset; + } + + FileOffset asSubtrieFileOffset() const { return FileOffset(asSubtrie()); } + + FileOffset asDataFileOffset() const { return FileOffset(asData()); } + + int64_t getRawOffset() const { return Offset; } + + static SubtrieSlotValue getDataOffset(int64_t Offset) { + return SubtrieSlotValue(Offset); + } + + static SubtrieSlotValue getSubtrieOffset(int64_t Offset) { + return SubtrieSlotValue(-Offset); + } + + static SubtrieSlotValue getDataOffset(FileOffset Offset) { + return getDataOffset(Offset.get()); + } + + static SubtrieSlotValue getSubtrieOffset(FileOffset Offset) { + return getDataOffset(Offset.get()); + } + + static SubtrieSlotValue getFromSlot(std::atomic &Slot) { + return SubtrieSlotValue(Slot.load()); + } + + SubtrieSlotValue() = default; + +private: + friend class SubtrieHandle; + explicit SubtrieSlotValue(int64_t Offset) : Offset(Offset) {} + int64_t Offset = 0; +}; + +/// Subtrie layout: +/// - 2-bytes: StartBit +/// - 1-bytes: NumBits=lg(num-slots) +/// - 5-bytes: 0-pad +/// - +class SubtrieHandle { +public: + struct Header { + /// The bit this subtrie starts on. + uint16_t StartBit; + + /// The number of bits this subtrie handles. It has 2^NumBits slots. + uint8_t NumBits; + + /// 0-pad to 8B. + uint8_t ZeroPad1B; + uint32_t ZeroPad4B; + }; + + /// Slot storage: + /// - zero: Empty + /// - positive: RecordOffset + /// - negative: SubtrieOffset + using SlotT = std::atomic; + + static int64_t getSlotsSize(uint32_t NumBits) { + return sizeof(int64_t) * (1ull << NumBits); + } + + static int64_t getSize(uint32_t NumBits) { + return sizeof(SubtrieHandle::Header) + getSlotsSize(NumBits); + } + + int64_t getSize() const { return getSize(H->NumBits); } + size_t getNumSlots() const { return Slots.size(); } + + SubtrieSlotValue load(size_t I) const { + return SubtrieSlotValue(Slots[I].load()); + } + void store(size_t I, SubtrieSlotValue V) { + return Slots[I].store(V.getRawOffset()); + } + + void printHash(raw_ostream &OS, ArrayRef Bytes) const; + + /// Return None on success, or the existing offset on failure. + bool compare_exchange_strong(size_t I, SubtrieSlotValue &Expected, + SubtrieSlotValue New) { + return Slots[I].compare_exchange_strong(Expected.Offset, New.Offset); + } + + /// Sink \p V from \p I in this subtrie down to \p NewI in a new subtrie with + /// \p NumSubtrieBits. + /// + /// \p UnusedSubtrie maintains a 1-item "free" list of unused subtries. If a + /// new subtrie is created that isn't used because of a lost race, then it If + /// it's already valid, it should be used instead of allocating a new one. + /// should be returned as an out parameter to be passed back in the future. + /// If it's already valid, it should be used instead of allocating a new one. + /// + /// Returns the subtrie that now lives at \p I. + Expected sink(size_t I, SubtrieSlotValue V, + MappedFileRegionArena &Alloc, + size_t NumSubtrieBits, + SubtrieHandle &UnusedSubtrie, size_t NewI); + + /// Only safe if the subtrie is empty. + void reinitialize(uint32_t StartBit, uint32_t NumBits); + + SubtrieSlotValue getOffset() const { + return SubtrieSlotValue::getSubtrieOffset( + reinterpret_cast(H) - Region->data()); + } + + FileOffset getFileOffset() const { return getOffset().asSubtrieFileOffset(); } + + explicit operator bool() const { return H; } + + Header &getHeader() const { return *H; } + uint32_t getStartBit() const { return H->StartBit; } + uint32_t getNumBits() const { return H->NumBits; } + + static Expected create(MappedFileRegionArena &Alloc, + uint32_t StartBit, uint32_t NumBits); + + static SubtrieHandle getFromFileOffset(MappedFileRegion &Region, + FileOffset Offset) { + return SubtrieHandle(Region, SubtrieSlotValue::getSubtrieOffset(Offset)); + } + + SubtrieHandle() = default; + SubtrieHandle(MappedFileRegion &Region, Header &H) + : Region(&Region), H(&H), Slots(getSlots(H)) {} + SubtrieHandle(MappedFileRegion &Region, SubtrieSlotValue Offset) + : SubtrieHandle(Region, *reinterpret_cast
( + Region.data() + Offset.asSubtrie())) {} + +private: + MappedFileRegion *Region = nullptr; + Header *H = nullptr; + MutableArrayRef Slots; + + static MutableArrayRef getSlots(Header &H) { + return MutableArrayRef(reinterpret_cast(&H + 1), + 1ull << H.NumBits); + } +}; + +/// Handle for a TrieRawHashMap table. +/// +/// TrieRawHashMap table layout: +/// - [8-bytes: Generic table header] +/// - 1-byte: NumSubtrieBits +/// - 1-byte: Flags (not used yet) +/// - 2-bytes: NumHashBits +/// - 4-bytes: RecordDataSize (in bytes) +/// - 8-bytes: RootTrieOffset +/// - 8-bytes: AllocatorOffset (reserved for implementing free lists) +/// - '\0' +/// +/// Record layout: +/// - +/// - +class TrieRawHashMapHandle { +public: + static constexpr TableHandle::TableKind Kind = + TableHandle::TableKind::TrieRawHashMap; + + struct Header { + TableHandle::Header GenericHeader; + uint8_t NumSubtrieBits; + uint8_t Flags; ///< None used yet. + uint16_t NumHashBits; + uint32_t RecordDataSize; + std::atomic RootTrieOffset; + std::atomic AllocatorOffset; + }; + + operator TableHandle() const { + if (!H) + return TableHandle(); + return TableHandle(*Region, H->GenericHeader); + } + + struct RecordData { + OnDiskTrieRawHashMap::ValueProxy Proxy; + SubtrieSlotValue Offset; + FileOffset getFileOffset() const { return Offset.asDataFileOffset(); } + }; + + enum Limits : size_t { + /// Seems like 65528 hash bits ought to be enough. + MaxNumHashBytes = UINT16_MAX >> 3, + MaxNumHashBits = MaxNumHashBytes << 3, + + /// 2^16 bits in a trie is 65536 slots. This restricts us to a 16-bit + /// index. This many slots is suspicously large anyway. + MaxNumRootBits = 16, + + /// 2^10 bits in a trie is 1024 slots. This many slots seems suspiciously + /// large for subtries. + MaxNumSubtrieBits = 10, + }; + + static constexpr size_t getNumHashBytes(size_t NumHashBits) { + assert(NumHashBits % 8 == 0); + return NumHashBits / 8; + } + static constexpr size_t getRecordSize(size_t RecordDataSize, + size_t NumHashBits) { + return RecordDataSize + getNumHashBytes(NumHashBits); + } + + RecordData getRecord(SubtrieSlotValue Offset); + Expected createRecord(MappedFileRegionArena &Alloc, + ArrayRef Hash); + + explicit operator bool() const { return H; } + const Header &getHeader() const { return *H; } + SubtrieHandle getRoot() const; + Expected getOrCreateRoot(MappedFileRegionArena &Alloc); + MappedFileRegion &getRegion() const { return *Region; } + + size_t getFlags() const { return H->Flags; } + size_t getNumSubtrieBits() const { return H->NumSubtrieBits; } + size_t getNumHashBits() const { return H->NumHashBits; } + size_t getNumHashBytes() const { return getNumHashBytes(H->NumHashBits); } + size_t getRecordDataSize() const { return H->RecordDataSize; } + size_t getRecordSize() const { + return getRecordSize(H->RecordDataSize, H->NumHashBits); + } + + TrieHashIndexGenerator getIndexGen(SubtrieHandle Root, + ArrayRef Hash) { + assert(Root.getStartBit() == 0); + assert(getNumHashBytes() == Hash.size()); + assert(getNumHashBits() == Hash.size() * 8); + return TrieHashIndexGenerator{Root.getNumBits(), getNumSubtrieBits(), Hash}; + } + + static Expected + create(MappedFileRegionArena &Alloc, StringRef Name, + std::optional NumRootBits, uint64_t NumSubtrieBits, + uint64_t NumHashBits, uint64_t RecordDataSize); + + void + print(raw_ostream &OS, + function_ref)> PrintRecordData = nullptr) const; + + Error validate( + function_ref + RecordVerifier) const; + TrieRawHashMapHandle() = default; + TrieRawHashMapHandle(MappedFileRegion &Region, Header &H) + : Region(&Region), H(&H) {} + TrieRawHashMapHandle(MappedFileRegion &Region, intptr_t HeaderOffset) + : TrieRawHashMapHandle( + Region, *reinterpret_cast
(Region.data() + HeaderOffset)) { + } + +private: + MappedFileRegion *Region = nullptr; + Header *H = nullptr; +}; + +} // end anonymous namespace + +struct OnDiskTrieRawHashMap::ImplType { + DatabaseFile File; + TrieRawHashMapHandle Trie; +}; + +Expected SubtrieHandle::create(MappedFileRegionArena &Alloc, + uint32_t StartBit, + uint32_t NumBits) { + assert(StartBit <= TrieRawHashMapHandle::MaxNumHashBits); + assert(NumBits <= UINT8_MAX); + assert(NumBits <= TrieRawHashMapHandle::MaxNumRootBits); + + auto Mem = Alloc.allocate(getSize(NumBits)); + if (LLVM_UNLIKELY(!Mem)) + return Mem.takeError(); + auto *H = + new (*Mem) SubtrieHandle::Header{(uint16_t)StartBit, (uint8_t)NumBits, + /*ZeroPad1B=*/0, /*ZeroPad4B=*/0}; + SubtrieHandle S(Alloc.getRegion(), *H); + for (auto I = S.Slots.begin(), E = S.Slots.end(); I != E; ++I) + new (I) SlotT(0); + return S; +} + +SubtrieHandle TrieRawHashMapHandle::getRoot() const { + if (int64_t Root = H->RootTrieOffset) + return SubtrieHandle(getRegion(), SubtrieSlotValue::getSubtrieOffset(Root)); + return SubtrieHandle(); +} + +Expected +TrieRawHashMapHandle::getOrCreateRoot(MappedFileRegionArena &Alloc) { + assert(&Alloc.getRegion() == &getRegion()); + if (SubtrieHandle Root = getRoot()) + return Root; + + int64_t Race = 0; + auto LazyRoot = SubtrieHandle::create(Alloc, 0, H->NumSubtrieBits); + if (LLVM_UNLIKELY(!LazyRoot)) + return LazyRoot.takeError(); + if (H->RootTrieOffset.compare_exchange_strong( + Race, LazyRoot->getOffset().asSubtrie())) + return *LazyRoot; + + // There was a race. Return the other root. + // + // TODO: Avoid leaking the lazy root by storing it in an allocator. + return SubtrieHandle(getRegion(), SubtrieSlotValue::getSubtrieOffset(Race)); +} + +Expected +TrieRawHashMapHandle::create(MappedFileRegionArena &Alloc, StringRef Name, + std::optional NumRootBits, + uint64_t NumSubtrieBits, uint64_t NumHashBits, + uint64_t RecordDataSize) { + // Allocate. + auto Offset = Alloc.allocateOffset(sizeof(Header) + Name.size() + 1); + if (LLVM_UNLIKELY(!Offset)) + return Offset.takeError(); + + // Construct the header and the name. + assert(Name.size() <= UINT16_MAX && "Expected smaller table name"); + assert(NumSubtrieBits <= UINT8_MAX && "Expected valid subtrie bits"); + assert(NumHashBits <= UINT16_MAX && "Expected valid hash size"); + assert(RecordDataSize <= UINT32_MAX && "Expected smaller table name"); + auto *H = new (Alloc.getRegion().data() + *Offset) + Header{{TableHandle::TableKind::TrieRawHashMap, (uint16_t)Name.size(), + (uint32_t)sizeof(Header)}, + (uint8_t)NumSubtrieBits, + /*Flags=*/0, + (uint16_t)NumHashBits, + (uint32_t)RecordDataSize, + /*RootTrieOffset=*/{0}, + /*AllocatorOffset=*/{0}}; + char *NameStorage = reinterpret_cast(H + 1); + llvm::copy(Name, NameStorage); + NameStorage[Name.size()] = 0; + + // Construct a root trie, if requested. + TrieRawHashMapHandle Trie(Alloc.getRegion(), *H); + auto Sub = SubtrieHandle::create(Alloc, 0, *NumRootBits); + if (LLVM_UNLIKELY(!Sub)) + return Sub.takeError(); + if (NumRootBits) + H->RootTrieOffset = Sub->getOffset().asSubtrie(); + return Trie; +} + +TrieRawHashMapHandle::RecordData +TrieRawHashMapHandle::getRecord(SubtrieSlotValue Offset) { + char *Begin = Region->data() + Offset.asData(); + OnDiskTrieRawHashMap::ValueProxy Proxy; + Proxy.Data = MutableArrayRef(Begin, getRecordDataSize()); + Proxy.Hash = ArrayRef(reinterpret_cast(Proxy.Data.end()), + getNumHashBytes()); + return RecordData{Proxy, Offset}; +} + +Expected +TrieRawHashMapHandle::createRecord(MappedFileRegionArena &Alloc, + ArrayRef Hash) { + assert(&Alloc.getRegion() == Region); + assert(Hash.size() == getNumHashBytes()); + auto Offset = Alloc.allocateOffset(getRecordSize()); + if (LLVM_UNLIKELY(!Offset)) + return Offset.takeError(); + + RecordData Record = getRecord(SubtrieSlotValue::getDataOffset(*Offset)); + llvm::copy(Hash, const_cast(Record.Proxy.Hash.begin())); + return Record; +} + +Expected +OnDiskTrieRawHashMap::recoverFromFileOffset(FileOffset Offset) const { + // Check alignment. + if (!isAligned(MappedFileRegionArena::getAlign(), Offset.get())) + return createStringError(make_error_code(std::errc::protocol_error), + "unaligned file offset at 0x" + + utohexstr(Offset.get(), /*LowerCase=*/true)); + + // Check bounds. + // + // Note: There's no potential overflow when using \c uint64_t because Offset + // is in valid offset range and the record size is in \c [0,UINT32_MAX]. + if (!validOffset(Offset) || + Offset.get() + Impl->Trie.getRecordSize() > Impl->File.getAlloc().size()) + return createStringError(make_error_code(std::errc::protocol_error), + "file offset too large: 0x" + + utohexstr(Offset.get(), /*LowerCase=*/true)); + + // Looks okay... + TrieRawHashMapHandle::RecordData D = + Impl->Trie.getRecord(SubtrieSlotValue::getDataOffset(Offset)); + return const_pointer(D.Proxy, D.getFileOffset()); +} + +OnDiskTrieRawHashMap::const_pointer +OnDiskTrieRawHashMap::find(ArrayRef Hash) const { + TrieRawHashMapHandle Trie = Impl->Trie; + assert(Hash.size() == Trie.getNumHashBytes() && "Invalid hash"); + + SubtrieHandle S = Trie.getRoot(); + if (!S) + return const_pointer(); + + TrieHashIndexGenerator IndexGen = Trie.getIndexGen(S, Hash); + size_t Index = IndexGen.next(); + for (;;) { + // Try to set the content. + SubtrieSlotValue V = S.load(Index); + if (!V) + return const_pointer(); + + // Check for an exact match. + if (V.isData()) { + TrieRawHashMapHandle::RecordData D = Trie.getRecord(V); + return D.Proxy.Hash == Hash ? const_pointer(D.Proxy, D.getFileOffset()) + : const_pointer(); + } + + Index = IndexGen.next(); + S = SubtrieHandle(Trie.getRegion(), V); + } +} + +/// Only safe if the subtrie is empty. +void SubtrieHandle::reinitialize(uint32_t StartBit, uint32_t NumBits) { + assert(StartBit > H->StartBit); + assert(NumBits <= H->NumBits); + // Ideally would also assert that all slots are empty, but that's expensive. + + H->StartBit = StartBit; + H->NumBits = NumBits; +} + +Expected +OnDiskTrieRawHashMap::insertLazy(ArrayRef Hash, + LazyInsertOnConstructCB OnConstruct, + LazyInsertOnLeakCB OnLeak) { + TrieRawHashMapHandle Trie = Impl->Trie; + assert(Hash.size() == Trie.getNumHashBytes() && "Invalid hash"); + + MappedFileRegionArena &Alloc = Impl->File.getAlloc(); + std::optional S; + auto Err = Trie.getOrCreateRoot(Alloc).moveInto(S); + if (LLVM_UNLIKELY(Err)) + return std::move(Err); + + TrieHashIndexGenerator IndexGen = Trie.getIndexGen(*S, Hash); + size_t Index = IndexGen.next(); + + // Walk through the hash bytes and insert into correct trie position. + std::optional NewRecord; + SubtrieHandle UnusedSubtrie; + for (;;) { + SubtrieSlotValue Existing = S->load(Index); + + // Try to set it, if it's empty. + if (!Existing) { + if (!NewRecord) { + auto Err = Trie.createRecord(Alloc, Hash).moveInto(NewRecord); + if (LLVM_UNLIKELY(Err)) + return std::move(Err); + if (OnConstruct) + OnConstruct(NewRecord->Offset.asDataFileOffset(), NewRecord->Proxy); + } + + if (S->compare_exchange_strong(Index, Existing, NewRecord->Offset)) + return pointer(NewRecord->Proxy, NewRecord->Offset.asDataFileOffset()); + + // Race means that Existing is no longer empty; fall through... + } + + if (Existing.isSubtrie()) { + S = SubtrieHandle(Trie.getRegion(), Existing); + Index = IndexGen.next(); + continue; + } + + // Check for an exact match. + TrieRawHashMapHandle::RecordData ExistingRecord = Trie.getRecord(Existing); + if (ExistingRecord.Proxy.Hash == Hash) { + if (NewRecord && OnLeak) + OnLeak(NewRecord->Offset.asDataFileOffset(), NewRecord->Proxy, + ExistingRecord.Offset.asDataFileOffset(), ExistingRecord.Proxy); + return pointer(ExistingRecord.Proxy, + ExistingRecord.Offset.asDataFileOffset()); + } + + // Sink the existing content as long as the indexes match. + for (;;) { + size_t NextIndex = IndexGen.next(); + size_t NewIndexForExistingContent = + IndexGen.getCollidingBits(ExistingRecord.Proxy.Hash); + + auto Err = S->sink(Index, Existing, Alloc, IndexGen.getNumBits(), + UnusedSubtrie, NewIndexForExistingContent) + .moveInto(S); + if (LLVM_UNLIKELY(Err)) + return std::move(Err); + Index = NextIndex; + + // Found the difference. + if (NextIndex != NewIndexForExistingContent) + break; + } + } +} + +Expected SubtrieHandle::sink(size_t I, SubtrieSlotValue V, + MappedFileRegionArena &Alloc, + size_t NumSubtrieBits, + SubtrieHandle &UnusedSubtrie, + size_t NewI) { + std::optional NewS; + if (UnusedSubtrie) { + // Steal UnusedSubtrie and initialize it. + NewS.emplace(); + std::swap(*NewS, UnusedSubtrie); + NewS->reinitialize(getStartBit() + getNumBits(), NumSubtrieBits); + } else { + // Allocate a new, empty subtrie. + auto Err = SubtrieHandle::create(Alloc, getStartBit() + getNumBits(), + NumSubtrieBits) + .moveInto(NewS); + if (LLVM_UNLIKELY(Err)) + return std::move(Err); + } + + NewS->store(NewI, V); + if (compare_exchange_strong(I, V, NewS->getOffset())) + return *NewS; // Success! + + // Raced. + assert(V.isSubtrie() && "Expected racing sink() to add a subtrie"); + + // Wipe out the new slot so NewS can be reused and set the out parameter. + NewS->store(NewI, SubtrieSlotValue()); + UnusedSubtrie = *NewS; + + // Return the subtrie added by the concurrent sink() call. + return SubtrieHandle(Alloc.getRegion(), V); +} + +void OnDiskTrieRawHashMap::print( + raw_ostream &OS, function_ref)> PrintRecordData) const { + Impl->Trie.print(OS, PrintRecordData); +} + +Error OnDiskTrieRawHashMap::validate( + function_ref RecordVerifier) const { + return Impl->Trie.validate(RecordVerifier); +} + +// Helper function that prints hexdigit and have a sub-byte starting position. +static void printHexDigits(raw_ostream &OS, ArrayRef Bytes, + size_t StartBit, size_t NumBits) { + assert(StartBit % 4 == 0); + assert(NumBits % 4 == 0); + for (size_t I = StartBit, E = StartBit + NumBits; I != E; I += 4) { + uint8_t HexPair = Bytes[I / 8]; + uint8_t HexDigit = I % 8 == 0 ? HexPair >> 4 : HexPair & 0xf; + OS << hexdigit(HexDigit, /*LowerCase=*/true); + } +} + +static void printBits(raw_ostream &OS, ArrayRef Bytes, size_t StartBit, + size_t NumBits) { + assert(StartBit + NumBits <= Bytes.size() * 8u); + for (size_t I = StartBit, E = StartBit + NumBits; I != E; ++I) { + uint8_t Byte = Bytes[I / 8]; + size_t ByteOffset = I % 8; + if (size_t ByteShift = 8 - ByteOffset - 1) + Byte >>= ByteShift; + OS << (Byte & 0x1 ? '1' : '0'); + } +} + +void SubtrieHandle::printHash(raw_ostream &OS, ArrayRef Bytes) const { + // afb[1c:00*01110*0]def + size_t EndBit = getStartBit() + getNumBits(); + size_t HashEndBit = Bytes.size() * 8u; + + size_t FirstBinaryBit = getStartBit() & ~0x3u; + printHexDigits(OS, Bytes, 0, FirstBinaryBit); + + size_t LastBinaryBit = (EndBit + 3u) & ~0x3u; + OS << "["; + printBits(OS, Bytes, FirstBinaryBit, LastBinaryBit - FirstBinaryBit); + OS << "]"; + + printHexDigits(OS, Bytes, LastBinaryBit, HashEndBit - LastBinaryBit); +} + +static void appendIndexBits(std::string &Prefix, size_t Index, + size_t NumSlots) { + std::string Bits; + for (size_t NumBits = 1u; NumBits < NumSlots; NumBits <<= 1) { + Bits.push_back('0' + (Index & 0x1)); + Index >>= 1; + } + for (char Ch : llvm::reverse(Bits)) + Prefix += Ch; +} + +static void printPrefix(raw_ostream &OS, StringRef Prefix) { + while (Prefix.size() >= 4) { + uint8_t Digit; + bool ErrorParsingBinary = Prefix.take_front(4).getAsInteger(2, Digit); + assert(!ErrorParsingBinary); + (void)ErrorParsingBinary; + OS << hexdigit(Digit, /*LowerCase=*/true); + Prefix = Prefix.drop_front(4); + } + if (!Prefix.empty()) + OS << "[" << Prefix << "]"; +} + +LLVM_DUMP_METHOD void OnDiskTrieRawHashMap::dump() const { print(dbgs()); } + +static Expected checkParameter(StringRef Label, size_t Max, + std::optional Value, + std::optional Default, + StringRef Path, StringRef TableName) { + assert(Value || Default); + assert(!Default || *Default <= Max); + if (!Value) + return *Default; + + if (*Value <= Max) + return *Value; + return createTableConfigError( + std::errc::argument_out_of_domain, Path, TableName, + "invalid " + Label + ": " + Twine(*Value) + " (max: " + Twine(Max) + ")"); +} + +size_t OnDiskTrieRawHashMap::size() const { return Impl->File.size(); } +size_t OnDiskTrieRawHashMap::capacity() const { + return Impl->File.getRegion().size(); +} + +Expected +OnDiskTrieRawHashMap::create(const Twine &PathTwine, const Twine &TrieNameTwine, + size_t NumHashBits, uint64_t DataSize, + uint64_t MaxFileSize, + std::optional NewFileInitialSize, + std::optional NewTableNumRootBits, + std::optional NewTableNumSubtrieBits) { + SmallString<128> PathStorage; + StringRef Path = PathTwine.toStringRef(PathStorage); + SmallString<128> TrieNameStorage; + StringRef TrieName = TrieNameTwine.toStringRef(TrieNameStorage); + + constexpr size_t DefaultNumRootBits = 10; + constexpr size_t DefaultNumSubtrieBits = 6; + + size_t NumRootBits; + if (Error E = checkParameter( + "root bits", TrieRawHashMapHandle::MaxNumRootBits, + NewTableNumRootBits, DefaultNumRootBits, Path, TrieName) + .moveInto(NumRootBits)) + return std::move(E); + + size_t NumSubtrieBits; + if (Error E = checkParameter("subtrie bits", + TrieRawHashMapHandle::MaxNumSubtrieBits, + NewTableNumSubtrieBits, DefaultNumSubtrieBits, + Path, TrieName) + .moveInto(NumSubtrieBits)) + return std::move(E); + + size_t NumHashBytes = NumHashBits >> 3; + if (Error E = + checkParameter("hash size", TrieRawHashMapHandle::MaxNumHashBits, + NumHashBits, std::nullopt, Path, TrieName) + .takeError()) + return std::move(E); + assert(NumHashBits == NumHashBytes << 3 && + "Expected hash size to be byte-aligned"); + if (NumHashBits != NumHashBytes << 3) + return createTableConfigError( + std::errc::argument_out_of_domain, Path, TrieName, + "invalid hash size: " + Twine(NumHashBits) + " (not byte-aligned)"); + + // Constructor for if the file doesn't exist. + auto NewDBConstructor = [&](DatabaseFile &DB) -> Error { + auto Trie = + TrieRawHashMapHandle::create(DB.getAlloc(), TrieName, NumRootBits, + NumSubtrieBits, NumHashBits, DataSize); + if (LLVM_UNLIKELY(!Trie)) + return Trie.takeError(); + + return DB.addTable(*Trie); + }; + + // Get or create the file. + Expected File = + DatabaseFile::create(Path, MaxFileSize, NewDBConstructor); + if (!File) + return File.takeError(); + + // Find the trie and validate it. + std::optional Table = File->findTable(TrieName); + if (!Table) + return createTableConfigError(std::errc::argument_out_of_domain, Path, + TrieName, "table not found"); + if (Error E = checkTable("table kind", (size_t)TrieRawHashMapHandle::Kind, + (size_t)Table->getHeader().Kind, Path, TrieName)) + return std::move(E); + auto Trie = Table->cast(); + assert(Trie && "Already checked the kind"); + + // Check the hash and data size. + if (Error E = checkTable("hash size", NumHashBits, Trie.getNumHashBits(), + Path, TrieName)) + return std::move(E); + if (Error E = checkTable("data size", DataSize, Trie.getRecordDataSize(), + Path, TrieName)) + return std::move(E); + + // No flags supported right now. Either corrupt, or coming from a future + // writer. + if (size_t Flags = Trie.getFlags()) + return createTableConfigError(std::errc::invalid_argument, Path, TrieName, + "unsupported flags: " + Twine(Flags)); + + // Success. + OnDiskTrieRawHashMap::ImplType Impl{DatabaseFile(std::move(*File)), Trie}; + return OnDiskTrieRawHashMap(std::make_unique(std::move(Impl))); +} + +static Error createInvalidTrieError(uint64_t Offset, const Twine &Msg) { + return createStringError(make_error_code(std::errc::protocol_error), + "invalid trie at 0x" + + utohexstr(Offset, /*LowerCase=*/true) + ": " + + Msg); +} + +//===----------------------------------------------------------------------===// +// TrieVisitor data structures. +//===----------------------------------------------------------------------===// + +namespace { +/// A multi-threaded vistior to traverse the Trie. +/// +/// TODO: add more sanity checks that isn't just plain data corruption. For +/// example, some ill-formed data can be constructed to form a cycle using +/// Sub-Tries and it can lead to inifinite loop when visiting (or inserting +/// data). +class TrieVisitor { +public: + TrieVisitor(TrieRawHashMapHandle Trie, unsigned ThreadCount = 0, + unsigned ErrorLimit = 50) + : Trie(Trie), ErrorLimit(ErrorLimit), + Threads(hardware_concurrency(ThreadCount)) {} + virtual ~TrieVisitor() = default; + Error visit(); + +private: + // Virtual method to implement the action when visiting a sub-trie. + virtual Error visitSubTrie(StringRef Prefix, SubtrieHandle SubTrie) { + return Error::success(); + } + + // Virtual method to implement the action when visiting a slot in a trie node. + virtual Error visitSlot(unsigned I, SubtrieHandle Subtrie, StringRef Prefix, + SubtrieSlotValue Slot) { + return Error::success(); + } + +protected: + TrieRawHashMapHandle Trie; + +private: + Error traverseTrieNode(SubtrieHandle Node, StringRef Prefix); + + Error validateSubTrie(SubtrieHandle Node, bool IsRoot); + + // Helper function to capture errors when visiting the trie nodes. + void addError(Error NewError) { + assert(NewError && "not an error"); + std::lock_guard ErrorLock(Lock); + if (NumError >= ErrorLimit) { + // Too many errors. + consumeError(std::move(NewError)); + return; + } + + if (Err) + Err = joinErrors(std::move(*Err), std::move(NewError)); + else + Err = std::move(NewError); + NumError++; + } + + bool tooManyErrors() { + std::lock_guard ErrorLock(Lock); + return (bool)Err && NumError >= ErrorLimit; + } + + const unsigned ErrorLimit; + std::optional Err; + unsigned NumError = 0; + std::mutex Lock; + DefaultThreadPool Threads; +}; + +/// A visitor that traverse and print the Trie. +class TriePrinter : public TrieVisitor { +public: + TriePrinter(TrieRawHashMapHandle Trie, raw_ostream &OS, + function_ref)> PrintRecordData) + : TrieVisitor(Trie, /*ThreadCount=*/1), OS(OS), + PrintRecordData(PrintRecordData) {} + + Error printRecords() { + if (Records.empty()) + return Error::success(); + + OS << "records\n"; + llvm::sort(Records); + for (int64_t Offset : Records) { + TrieRawHashMapHandle::RecordData Record = + Trie.getRecord(SubtrieSlotValue::getDataOffset(Offset)); + if (auto Err = printRecord(Record)) + return Err; + } + return Error::success(); + } + + Error printRecord(TrieRawHashMapHandle::RecordData &Record) { + OS << "- addr=" << (void *)Record.getFileOffset().get() << " "; + if (PrintRecordData) { + PrintRecordData(Record.Proxy.Data); + } else { + OS << "bytes="; + ArrayRef Data( + reinterpret_cast(Record.Proxy.Data.data()), + Record.Proxy.Data.size()); + printHexDigits(OS, Data, 0, Data.size() * 8); + } + OS << "\n"; + return Error::success(); + } + + Error visitSubTrie(StringRef Prefix, SubtrieHandle SubTrie) override { + if (Prefix.empty()) { + OS << "root"; + } else { + OS << "subtrie="; + printPrefix(OS, Prefix); + } + + OS << " addr=" + << (void *)(reinterpret_cast(&SubTrie.getHeader()) - + Trie.getRegion().data()); + OS << " num-slots=" << SubTrie.getNumSlots() << "\n"; + return Error::success(); + } + + Error visitSlot(unsigned I, SubtrieHandle Subtrie, StringRef Prefix, + SubtrieSlotValue Slot) override { + OS << "- index="; + for (size_t Pad : {10, 100, 1000}) + if (I < Pad && Subtrie.getNumSlots() >= Pad) + OS << "0"; + OS << I << " "; + if (Slot.isSubtrie()) { + OS << "addr=" << (void *)Slot.asSubtrie(); + OS << " subtrie="; + printPrefix(OS, Prefix); + OS << "\n"; + return Error::success(); + } + TrieRawHashMapHandle::RecordData Record = Trie.getRecord(Slot); + OS << "addr=" << (void *)Record.getFileOffset().get(); + OS << " content="; + Subtrie.printHash(OS, Record.Proxy.Hash); + OS << "\n"; + Records.push_back(Slot.asData()); + return Error::success(); + } + +private: + raw_ostream &OS; + function_ref)> PrintRecordData; + SmallVector Records; +}; + +/// TrieVerifier that adds additional verification on top of the basic visitor. +class TrieVerifier : public TrieVisitor { +public: + TrieVerifier( + TrieRawHashMapHandle Trie, + function_ref + RecordVerifier) + : TrieVisitor(Trie), RecordVerifier(RecordVerifier) {} + +private: + Error visitSubTrie(StringRef Prefix, SubtrieHandle SubTrie) final { + return Error::success(); + } + + Error visitSlot(unsigned I, SubtrieHandle Subtrie, StringRef Prefix, + SubtrieSlotValue Slot) final { + if (RecordVerifier && Slot.isData()) { + if (!isAligned(MappedFileRegionArena::getAlign(), Slot.asData())) + return createInvalidTrieError(Slot.asData(), "mis-aligned data entry"); + + TrieRawHashMapHandle::RecordData Record = + Trie.getRecord(SubtrieSlotValue::getDataOffset(Slot.asData())); + return RecordVerifier(Slot.asDataFileOffset(), + OnDiskTrieRawHashMap::ConstValueProxy{ + Record.Proxy.Hash, Record.Proxy.Data}); + } + return Error::success(); + } + + function_ref + RecordVerifier; +}; +} // namespace + +Error TrieVisitor::visit() { + auto Root = Trie.getRoot(); + if (!Root) + return Error::success(); + + if (auto Err = validateSubTrie(Root, /*IsRoot=*/true)) + return Err; + + if (auto Err = visitSubTrie("", Root)) + return Err; + + SmallVector Subs; + SmallVector Prefixes; + const size_t NumSlots = Root.getNumSlots(); + for (size_t I = 0, E = NumSlots; I != E; ++I) { + SubtrieSlotValue Slot = Root.load(I); + if (!Slot) + continue; + uint64_t Offset = Slot.isSubtrie() ? Slot.asSubtrie() : Slot.asData(); + if (Offset >= (uint64_t)Trie.getRegion().size()) + return createInvalidTrieError(Offset, "slot points out of bound"); + std::string SubtriePrefix; + appendIndexBits(SubtriePrefix, I, NumSlots); + if (Slot.isSubtrie()) { + SubtrieHandle S(Trie.getRegion(), Slot); + Subs.push_back(S); + Prefixes.push_back(SubtriePrefix); + } + if (auto Err = visitSlot(I, Root, SubtriePrefix, Slot)) + return Err; + } + + for (size_t I = 0, E = Subs.size(); I != E; ++I) { + Threads.async( + [&](unsigned Idx) { + // Don't run if there is an error already. + if (tooManyErrors()) + return; + if (auto Err = traverseTrieNode(Subs[Idx], Prefixes[Idx])) + addError(std::move(Err)); + }, + I); + } + + Threads.wait(); + if (Err) + return std::move(*Err); + return Error::success(); +} + +Error TrieVisitor::validateSubTrie(SubtrieHandle Node, bool IsRoot) { + char *Addr = reinterpret_cast(&Node.getHeader()); + const int64_t Offset = Node.getFileOffset().get(); + if (Addr + Node.getSize() >= + Trie.getRegion().data() + Trie.getRegion().size()) + return createInvalidTrieError(Offset, "subtrie node spans out of bound"); + + if (!IsRoot && + Node.getStartBit() + Node.getNumBits() > Trie.getNumHashBits()) { + return createInvalidTrieError(Offset, + "subtrie represents too many hash bits"); + } + + if (IsRoot) { + if (Node.getStartBit() != 0) + return createInvalidTrieError(Offset, + "root node doesn't start at 0 index"); + + return Error::success(); + } + + if (Node.getNumBits() > Trie.getNumSubtrieBits()) + return createInvalidTrieError(Offset, "subtrie has wrong number of slots"); + + return Error::success(); +} + +Error TrieVisitor::traverseTrieNode(SubtrieHandle Node, StringRef Prefix) { + if (auto Err = validateSubTrie(Node, /*IsRoot=*/false)) + return Err; + + if (auto Err = visitSubTrie(Prefix, Node)) + return Err; + + SmallVector Subs; + SmallVector Prefixes; + const size_t NumSlots = Node.getNumSlots(); + for (size_t I = 0, E = NumSlots; I != E; ++I) { + SubtrieSlotValue Slot = Node.load(I); + if (!Slot) + continue; + uint64_t Offset = Slot.isSubtrie() ? Slot.asSubtrie() : Slot.asData(); + if (Offset >= (uint64_t)Trie.getRegion().size()) + return createInvalidTrieError(Offset, "slot points out of bound"); + std::string SubtriePrefix = Prefix.str(); + appendIndexBits(SubtriePrefix, I, NumSlots); + if (Slot.isSubtrie()) { + SubtrieHandle S(Trie.getRegion(), Slot); + Subs.push_back(S); + Prefixes.push_back(SubtriePrefix); + } + if (auto Err = visitSlot(I, Node, SubtriePrefix, Slot)) + return Err; + } + for (size_t I = 0, E = Subs.size(); I != E; ++I) + if (auto Err = traverseTrieNode(Subs[I], Prefixes[I])) + return Err; + + return Error::success(); +} + +void TrieRawHashMapHandle::print( + raw_ostream &OS, function_ref)> PrintRecordData) const { + OS << "hash-num-bits=" << getNumHashBits() + << " hash-size=" << getNumHashBytes() + << " record-data-size=" << getRecordDataSize() << "\n"; + + TriePrinter Printer(*this, OS, PrintRecordData); + if (auto Err = Printer.visit()) + OS << "error: " << toString(std::move(Err)) << "\n"; + + if (auto Err = Printer.printRecords()) + OS << "error: " << toString(std::move(Err)) << "\n"; + + return; +} + +Error TrieRawHashMapHandle::validate( + function_ref + RecordVerifier) const { + // Use the base TrieVisitor to identify the errors inside trie first. + TrieVisitor BasicVerifier(*this); + if (auto Err = BasicVerifier.visit()) + return Err; + + // If the trie data structure is sound, do a second pass to verify data and + // verifier function can assume the index is correct. However, there can be + // newly added bad entries that can still produce error. + TrieVerifier Verifier(*this, RecordVerifier); + return Verifier.visit(); +} + +#else // !LLVM_ENABLE_ONDISK_CAS + +struct OnDiskTrieRawHashMap::ImplType {}; + +Expected +OnDiskTrieRawHashMap::create(const Twine &PathTwine, const Twine &TrieNameTwine, + size_t NumHashBits, uint64_t DataSize, + uint64_t MaxFileSize, + std::optional NewFileInitialSize, + std::optional NewTableNumRootBits, + std::optional NewTableNumSubtrieBits) { + return createStringError(make_error_code(std::errc::not_supported), + "OnDiskTrieRawHashMap is not supported"); +} + +Expected +OnDiskTrieRawHashMap::insertLazy(ArrayRef Hash, + LazyInsertOnConstructCB OnConstruct, + LazyInsertOnLeakCB OnLeak) { + return createStringError(make_error_code(std::errc::not_supported), + "OnDiskTrieRawHashMap is not supported"); +} + +Expected +OnDiskTrieRawHashMap::recoverFromFileOffset(FileOffset Offset) const { + return createStringError(make_error_code(std::errc::not_supported), + "OnDiskTrieRawHashMap is not supported"); +} + +OnDiskTrieRawHashMap::const_pointer +OnDiskTrieRawHashMap::find(ArrayRef Hash) const { + return const_pointer(); +} + +void OnDiskTrieRawHashMap::print( + raw_ostream &OS, function_ref)> PrintRecordData) const { +} + +Error OnDiskTrieRawHashMap::validate( + function_ref + RecordVerifier) const { + return createStringError(make_error_code(std::errc::not_supported), + "OnDiskTrieRawHashMap is not supported"); +} + +size_t OnDiskTrieRawHashMap::size() const { return 0; } +size_t OnDiskTrieRawHashMap::capacity() const { return 0; } + +#endif // LLVM_ENABLE_ONDISK_CAS + +OnDiskTrieRawHashMap::OnDiskTrieRawHashMap(std::unique_ptr Impl) + : Impl(std::move(Impl)) {} +OnDiskTrieRawHashMap::OnDiskTrieRawHashMap(OnDiskTrieRawHashMap &&RHS) = + default; +OnDiskTrieRawHashMap & +OnDiskTrieRawHashMap::operator=(OnDiskTrieRawHashMap &&RHS) = default; +OnDiskTrieRawHashMap::~OnDiskTrieRawHashMap() = default; diff --git a/llvm/lib/CGData/CodeGenData.cpp b/llvm/lib/CGData/CodeGenData.cpp index b4f08c3d13b0d..7900dc7653c03 100644 --- a/llvm/lib/CGData/CodeGenData.cpp +++ b/llvm/lib/CGData/CodeGenData.cpp @@ -31,11 +31,14 @@ static cl::opt static cl::opt CodeGenDataUsePath("codegen-data-use-path", cl::init(""), cl::Hidden, cl::desc("File path to where .cgdata file is read")); + +namespace llvm { cl::opt CodeGenDataThinLTOTwoRounds( "codegen-data-thinlto-two-rounds", cl::init(false), cl::Hidden, cl::desc("Enable two-round ThinLTO code generation. The first round " "emits codegen data, while the second round uses the emitted " "codegen data for further optimizations.")); +} // end namespace llvm static std::string getCGDataErrString(cgdata_error Err, const std::string &ErrMsg = "") { diff --git a/llvm/lib/CGData/CodeGenDataReader.cpp b/llvm/lib/CGData/CodeGenDataReader.cpp index fc59be8df525a..b1cd939db9a4f 100644 --- a/llvm/lib/CGData/CodeGenDataReader.cpp +++ b/llvm/lib/CGData/CodeGenDataReader.cpp @@ -26,14 +26,14 @@ static cl::opt IndexedCodeGenDataReadFunctionMapNames( "disabled to save memory and time for final consumption of the " "indexed CodeGenData in production.")); +namespace llvm { + cl::opt IndexedCodeGenDataLazyLoading( "indexed-codegen-data-lazy-loading", cl::init(false), cl::Hidden, cl::desc( "Lazily load indexed CodeGenData. Enable to save memory and time " "for final consumption of the indexed CodeGenData in production.")); -namespace llvm { - static Expected> setupMemoryBuffer(const Twine &Filename, vfs::FileSystem &FS) { auto BufferOrErr = Filename.str() == "-" ? MemoryBuffer::getSTDIN() @@ -169,8 +169,8 @@ bool IndexedCodeGenDataReader::hasFormat(const MemoryBuffer &DataBuffer) { if (DataBuffer.getBufferSize() < sizeof(IndexedCGData::Magic)) return false; - uint64_t Magic = endian::read( - DataBuffer.getBufferStart()); + uint64_t Magic = endian::read(DataBuffer.getBufferStart(), + llvm::endianness::little); // Verify that it's magical. return Magic == IndexedCGData::Magic; } diff --git a/llvm/lib/CGData/CodeGenDataWriter.cpp b/llvm/lib/CGData/CodeGenDataWriter.cpp index 14a8558ba63b7..a2bbceebd0317 100644 --- a/llvm/lib/CGData/CodeGenDataWriter.cpp +++ b/llvm/lib/CGData/CodeGenDataWriter.cpp @@ -40,7 +40,7 @@ void CGDataOStream::patch(ArrayRef P) { for (const auto &K : P) { for (size_t I = 0; I < K.D.size(); ++I) { uint64_t Bytes = - endian::byte_swap(K.D[I]); + endian::byte_swap(K.D[I], llvm::endianness::little); Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t), reinterpret_cast(&Bytes), sizeof(uint64_t)); } @@ -52,7 +52,7 @@ void CGDataOStream::patch(ArrayRef P) { for (const auto &K : P) { for (size_t I = 0; I < K.D.size(); ++I) { uint64_t Bytes = - endian::byte_swap(K.D[I]); + endian::byte_swap(K.D[I], llvm::endianness::little); VOStream.pwrite(reinterpret_cast(&Bytes), sizeof(uint64_t), K.Pos + I * sizeof(uint64_t)); } diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 701a6a2f0f7a0..11efe492c57cc 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -473,11 +473,9 @@ bool AsmPrinter::doInitialization(Module &M) { AddrLabelSymbols = nullptr; // Initialize TargetLoweringObjectFile. - const_cast(getObjFileLowering()) - .Initialize(OutContext, TM); + TM.getObjFileLowering()->Initialize(OutContext, TM); - const_cast(getObjFileLowering()) - .getModuleMetadata(M); + TM.getObjFileLowering()->getModuleMetadata(M); // On AIX, we delay emitting any section information until // after emitting the .file pseudo-op. This allows additional diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 7ce014e9fac9a..518121e200190 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -1836,8 +1836,12 @@ DIE *DwarfCompileUnit::getOrCreateSubprogramDIE(const DISubprogram *SP, if (!F && SP->isDefinition()) { F = DD->getLexicalScopes().getFunction(SP); - if (!F) - return &getCU().getOrCreateAbstractSubprogramDIE(SP); + if (!F) { + // SP may belong to another CU. Determine the CU similarly + // to DwarfDebug::constructAbstractSubprogramScopeDIE. + return &DD->getOrCreateAbstractSubprogramCU(SP, *this) + .getOrCreateAbstractSubprogramDIE(SP); + } } return DwarfUnit::getOrCreateSubprogramDIE(SP, F, Minimal); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 8efc6f124a55d..09d5f9c57a1a7 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -548,6 +548,16 @@ bool DwarfDebug::shareAcrossDWOCUs() const { return SplitDwarfCrossCuReferences; } +DwarfCompileUnit & +DwarfDebug::getOrCreateAbstractSubprogramCU(const DISubprogram *SP, + DwarfCompileUnit &SrcCU) { + auto &CU = getOrCreateDwarfCompileUnit(SP->getUnit()); + if (CU.getSkeleton()) + return shareAcrossDWOCUs() ? CU : SrcCU; + + return CU; +} + void DwarfDebug::constructAbstractSubprogramScopeDIE(DwarfCompileUnit &SrcCU, LexicalScope *Scope) { assert(Scope && Scope->getScopeNode()); @@ -559,14 +569,11 @@ void DwarfDebug::constructAbstractSubprogramScopeDIE(DwarfCompileUnit &SrcCU, // Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram // was inlined from another compile unit. auto &CU = getOrCreateDwarfCompileUnit(SP->getUnit()); - if (auto *SkelCU = CU.getSkeleton()) { - (shareAcrossDWOCUs() ? CU : SrcCU) - .constructAbstractSubprogramScopeDIE(Scope); + auto &TargetCU = getOrCreateAbstractSubprogramCU(SP, SrcCU); + TargetCU.constructAbstractSubprogramScopeDIE(Scope); + if (auto *SkelCU = CU.getSkeleton()) if (CU.getCUNode()->getSplitDebugInlining()) SkelCU->constructAbstractSubprogramScopeDIE(Scope); - } else { - CU.constructAbstractSubprogramScopeDIE(Scope); - } } /// Represents a parameter whose call site value can be described by applying a diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h index 89813dcf0fdab..1a1b28a6fc035 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -906,6 +906,10 @@ class DwarfDebug : public DebugHandlerBase { return CUDieMap.lookup(Die); } + /// Find the matching DwarfCompileUnit for the given SP referenced from SrcCU. + DwarfCompileUnit &getOrCreateAbstractSubprogramCU(const DISubprogram *SP, + DwarfCompileUnit &SrcCU); + unsigned getStringTypeLoc(const DIStringType *ST) const { return StringTypeLocMap.lookup(ST); } diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index 8a30714db2fdf..1703b27d350f3 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -154,7 +154,7 @@ bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI, unsigned Size = TRI.getSubRegIdxSize(Idx); unsigned Offset = TRI.getSubRegIdxOffset(Idx); Reg = TRI.getDwarfRegNum(SR, false); - if (Reg < 0) + if (Reg < 0 || Offset + Size > RegSize) continue; // Used to build the intersection between the bits we already diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index 62fb5eb011cf2..3cfe7cc12d5b6 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -1889,11 +1889,12 @@ DIE &DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) { bool IsBitfield = DT->isBitField(); // Handle the size. - if (auto *Var = dyn_cast_or_null(DT->getRawSizeInBits())) { + if (DT->getRawSizeInBits() == nullptr) { + // No size, just ignore. + } else if (auto *Var = dyn_cast(DT->getRawSizeInBits())) { if (auto *VarDIE = getDIE(Var)) addDIEEntry(MemberDie, dwarf::DW_AT_bit_size, *VarDIE); - } else if (auto *Exp = - dyn_cast_or_null(DT->getRawSizeInBits())) { + } else if (auto *Exp = dyn_cast(DT->getRawSizeInBits())) { DIELoc *Loc = new (DIEValueAllocator) DIELoc; DIEDwarfExpression DwarfExpr(*Asm, getCU(), *Loc); DwarfExpr.setMemoryLocationKind(); diff --git a/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp b/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp index 7baeb3fd7bcee..fbcd614b85d18 100644 --- a/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp +++ b/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp @@ -76,6 +76,21 @@ BasicBlockSectionsProfileReader::getClonePathsForFunction( return ProgramPathAndClusterInfo.lookup(getAliasName(FuncName)).ClonePaths; } +uint64_t BasicBlockSectionsProfileReader::getEdgeCount( + StringRef FuncName, const UniqueBBID &SrcBBID, + const UniqueBBID &SinkBBID) const { + auto It = ProgramPathAndClusterInfo.find(getAliasName(FuncName)); + if (It == ProgramPathAndClusterInfo.end()) + return 0; + auto NodeIt = It->second.EdgeCounts.find(SrcBBID); + if (NodeIt == It->second.EdgeCounts.end()) + return 0; + auto EdgeIt = NodeIt->second.find(SinkBBID); + if (EdgeIt == NodeIt->second.end()) + return 0; + return EdgeIt->second; +} + // Reads the version 1 basic block sections profile. Profile for each function // is encoded as follows: // m @@ -240,6 +255,38 @@ Error BasicBlockSectionsProfileReader::ReadV1Profile() { } continue; } + case 'g': { // CFG profile specifier. + // Skip the profile when we the profile iterator (FI) refers to the + // past-the-end element. + if (FI == ProgramPathAndClusterInfo.end()) + continue; + // For each node, its CFG profile is encoded as + // :,:,:,... + for (auto BasicBlockEdgeProfile : Values) { + if (BasicBlockEdgeProfile.empty()) + continue; + SmallVector NodeEdgeCounts; + BasicBlockEdgeProfile.split(NodeEdgeCounts, ','); + UniqueBBID SrcBBID; + for (size_t i = 0; i < NodeEdgeCounts.size(); ++i) { + auto [BBIDStr, CountStr] = NodeEdgeCounts[i].split(':'); + auto BBID = parseUniqueBBID(BBIDStr); + if (!BBID) + return BBID.takeError(); + unsigned long long Count = 0; + if (getAsUnsignedInteger(CountStr, 10, Count)) + return createProfileParseError( + Twine("unsigned integer expected: '") + CountStr + "'"); + if (i == 0) { + // The first element represents the source and its total count. + FI->second.NodeCounts[SrcBBID = *BBID] = Count; + continue; + } + FI->second.EdgeCounts[SrcBBID][*BBID] = Count; + } + } + continue; + } default: return createProfileParseError(Twine("invalid specifier: '") + Twine(Specifier) + "'"); @@ -440,6 +487,12 @@ BasicBlockSectionsProfileReaderWrapperPass::getClonePathsForFunction( return BBSPR.getClonePathsForFunction(FuncName); } +uint64_t BasicBlockSectionsProfileReaderWrapperPass::getEdgeCount( + StringRef FuncName, const UniqueBBID &SrcBBID, + const UniqueBBID &SinkBBID) const { + return BBSPR.getEdgeCount(FuncName, SrcBBID, SinkBBID); +} + BasicBlockSectionsProfileReader & BasicBlockSectionsProfileReaderWrapperPass::getBBSPR() { return BBSPR; diff --git a/llvm/lib/CodeGen/CalcSpillWeights.cpp b/llvm/lib/CodeGen/CalcSpillWeights.cpp index b16694eafd90e..a77da01761579 100644 --- a/llvm/lib/CodeGen/CalcSpillWeights.cpp +++ b/llvm/lib/CodeGen/CalcSpillWeights.cpp @@ -81,12 +81,15 @@ Register VirtRegAuxInfo::copyHint(const MachineInstr *MI, Register Reg, bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI, const LiveIntervals &LIS, const VirtRegMap &VRM, + const MachineRegisterInfo &MRI, const TargetInstrInfo &TII) { Register Reg = LI.reg(); Register Original = VRM.getOriginal(Reg); + SmallDenseMap VNIDefs; for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end(); I != E; ++I) { const VNInfo *VNI = *I; + const VNInfo *OrigVNI = VNI; if (VNI->isUnused()) continue; if (VNI->isPHIDef()) @@ -122,8 +125,77 @@ bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI, assert(MI && "Dead valno in interval"); } - if (!TII.isTriviallyReMaterializable(*MI)) + if (!TII.isReMaterializable(*MI)) return false; + + VNIDefs[OrigVNI->id] = MI; + } + + // If MI has register uses, it will only be rematerializable if its uses are + // also live at the indices it will be rematerialized at. + for (MachineOperand &MO : MRI.reg_nodbg_operands(LI.reg())) { + if (!MO.readsReg()) + continue; + SlotIndex UseIdx = LIS.getInstructionIndex(*MO.getParent()); + MachineInstr *Def = VNIDefs[LI.getVNInfoAt(UseIdx)->id]; + assert(Def && "Use with no def"); + if (!allUsesAvailableAt(Def, UseIdx, LIS, MRI, TII)) + return false; + } + + return true; +} + +bool VirtRegAuxInfo::allUsesAvailableAt(const MachineInstr *MI, + SlotIndex UseIdx, + const LiveIntervals &LIS, + const MachineRegisterInfo &MRI, + const TargetInstrInfo &TII) { + SlotIndex OrigIdx = LIS.getInstructionIndex(*MI).getRegSlot(true); + UseIdx = std::max(UseIdx, UseIdx.getRegSlot(true)); + for (const MachineOperand &MO : MI->operands()) { + if (!MO.isReg() || !MO.getReg() || !MO.readsReg()) + continue; + + // We can't remat physreg uses, unless it is a constant or target wants + // to ignore this use. + if (MO.getReg().isPhysical()) { + if (MRI.isConstantPhysReg(MO.getReg()) || TII.isIgnorableUse(MO)) + continue; + return false; + } + + const LiveInterval &li = LIS.getInterval(MO.getReg()); + const VNInfo *OVNI = li.getVNInfoAt(OrigIdx); + if (!OVNI) + continue; + + // Don't allow rematerialization immediately after the original def. + // It would be incorrect if OrigMI redefines the register. + // See PR14098. + if (SlotIndex::isSameInstr(OrigIdx, UseIdx)) + return false; + + if (OVNI != li.getVNInfoAt(UseIdx)) + return false; + + // Check that subrange is live at UseIdx. + if (li.hasSubRanges()) { + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + unsigned SubReg = MO.getSubReg(); + LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg) + : MRI.getMaxLaneMaskForVReg(MO.getReg()); + for (const LiveInterval::SubRange &SR : li.subranges()) { + if ((SR.LaneMask & LM).none()) + continue; + if (!SR.liveAt(UseIdx)) + return false; + // Early exit if all used lanes are checked. No need to continue. + LM &= ~SR.LaneMask; + if (LM.none()) + break; + } + } } return true; } @@ -339,7 +411,7 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start, // it is a preferred candidate for spilling. // FIXME: this gets much more complicated once we support non-trivial // re-materialization. - if (isRematerializable(LI, LIS, VRM, *MF.getSubtarget().getInstrInfo())) + if (isRematerializable(LI, LIS, VRM, MRI, *MF.getSubtarget().getInstrInfo())) TotalWeight *= 0.5F; // Finally, we scale the weight by the scale factor of register class. diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index d290f202f3cca..eb73d01b3558c 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1749,6 +1749,12 @@ bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, Sub->hasNUsesOrMore(1))) return false; + // We don't want to move around uses of condition values this late, so we + // check if it is legal to create the call to the intrinsic in the basic + // block containing the icmp. + if (Sub->getParent() != Cmp->getParent() && !Sub->hasOneUse()) + return false; + if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), Cmp, Intrinsic::usub_with_overflow)) return false; diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 12b735e053bde..884c3f1692e94 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2362,6 +2362,13 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, MachineInstr::copyFlagsFromInstruction(CI)); return true; } + case Intrinsic::modf: { + ArrayRef VRegs = getOrCreateVRegs(CI); + MIRBuilder.buildModf(VRegs[0], VRegs[1], + getOrCreateVReg(*CI.getArgOperand(0)), + MachineInstr::copyFlagsFromInstruction(CI)); + return true; + } case Intrinsic::sincos: { ArrayRef VRegs = getOrCreateVRegs(CI); MIRBuilder.buildFSincos(VRegs[0], VRegs[1], @@ -2607,6 +2614,9 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID, case Intrinsic::get_rounding: MIRBuilder.buildGetRounding(getOrCreateVReg(CI)); return true; + case Intrinsic::set_rounding: + MIRBuilder.buildSetRounding(getOrCreateVReg(*CI.getOperand(0))); + return true; case Intrinsic::vscale: { MIRBuilder.buildVScale(getOrCreateVReg(CI), 1); return true; diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index f3e036ed1b947..cffaf7ce5aa06 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -471,6 +471,8 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { RTLIBCASE(TANH_F); case TargetOpcode::G_FSINCOS: RTLIBCASE(SINCOS_F); + case TargetOpcode::G_FMODF: + RTLIBCASE(MODF_F); case TargetOpcode::G_FLOG10: RTLIBCASE(LOG10_F); case TargetOpcode::G_FLOG: @@ -702,6 +704,46 @@ LegalizerHelper::LegalizeResult LegalizerHelper::emitSincosLibcall( return LegalizerHelper::Legalized; } +LegalizerHelper::LegalizeResult +LegalizerHelper::emitModfLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, + unsigned Size, Type *OpType, + LostDebugLocObserver &LocObserver) { + MachineFunction &MF = MIRBuilder.getMF(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + + Register DstFrac = MI.getOperand(0).getReg(); + Register DstInt = MI.getOperand(1).getReg(); + Register Src = MI.getOperand(2).getReg(); + LLT DstTy = MRI.getType(DstFrac); + + int MemSize = DstTy.getSizeInBytes(); + Align Alignment = getStackTemporaryAlignment(DstTy); + const DataLayout &DL = MIRBuilder.getDataLayout(); + unsigned AddrSpace = DL.getAllocaAddrSpace(); + MachinePointerInfo PtrInfo; + + Register StackPtrInt = + createStackTemporary(TypeSize::getFixed(MemSize), Alignment, PtrInfo) + .getReg(0); + + auto &Ctx = MF.getFunction().getContext(); + auto LibcallResult = createLibcall( + MIRBuilder, getRTLibDesc(MI.getOpcode(), Size), {DstFrac, OpType, 0}, + {{Src, OpType, 0}, {StackPtrInt, PointerType::get(Ctx, AddrSpace), 1}}, + LocObserver, &MI); + + if (LibcallResult != LegalizeResult::Legalized) + return LegalizerHelper::UnableToLegalize; + + MachineMemOperand *LoadMMOInt = MF.getMachineMemOperand( + PtrInfo, MachineMemOperand::MOLoad, MemSize, Alignment); + + MIRBuilder.buildLoad(DstInt, StackPtrInt, *LoadMMOInt); + MI.eraseFromParent(); + + return LegalizerHelper::Legalized; +} + LegalizerHelper::LegalizeResult llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, MachineInstr &MI, LostDebugLocObserver &LocObserver) { @@ -1341,6 +1383,16 @@ LegalizerHelper::libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver) { } return emitSincosLibcall(MI, MIRBuilder, Size, HLTy, LocObserver); } + case TargetOpcode::G_FMODF: { + LLT LLTy = MRI.getType(MI.getOperand(0).getReg()); + unsigned Size = LLTy.getSizeInBits(); + Type *HLTy = getFloatTypeForLLT(Ctx, LLTy); + if (!HLTy || (Size != 32 && Size != 64 && Size != 80 && Size != 128)) { + LLVM_DEBUG(dbgs() << "No libcall available for type " << LLTy << ".\n"); + return UnableToLegalize; + } + return emitModfLibcall(MI, MIRBuilder, Size, HLTy, LocObserver); + } case TargetOpcode::G_LROUND: case TargetOpcode::G_LLROUND: case TargetOpcode::G_INTRINSIC_LRINT: @@ -2935,6 +2987,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_SEXT); widenScalarDst(MI, WideTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt()); widenScalarDst(MI, WideTy, 1); Observer.changedInstr(MI); return Legalized; @@ -2972,6 +3025,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ZEXT); widenScalarDst(MI, WideTy); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt()); widenScalarDst(MI, WideTy, 1); Observer.changedInstr(MI); return Legalized; @@ -3331,6 +3385,16 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); Observer.changedInstr(MI); return Legalized; + case TargetOpcode::G_FMODF: { + Observer.changingInstr(MI); + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT); + + widenScalarDst(MI, WideTy, 1, TargetOpcode::G_FPTRUNC); + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), --MIRBuilder.getInsertPt()); + widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); + Observer.changedInstr(MI); + return Legalized; + } case TargetOpcode::G_FPOWI: case TargetOpcode::G_FLDEXP: case TargetOpcode::G_STRICT_FLDEXP: { @@ -5470,6 +5534,7 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, case G_LROUND: case G_LLROUND: case G_INTRINSIC_TRUNC: + case G_FMODF: case G_FCOS: case G_FSIN: case G_FTAN: @@ -8598,7 +8663,8 @@ LegalizerHelper::lowerThreewayCompare(MachineInstr &MI) { auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); auto BC = TLI.getBooleanContents(DstTy.isVector(), /*isFP=*/false); - if (TLI.shouldExpandCmpUsingSelects(getApproximateEVTForLLT(SrcTy, Ctx)) || + if (TLI.preferSelectsOverBooleanArithmetic( + getApproximateEVTForLLT(SrcTy, Ctx)) || BC == TargetLowering::UndefinedBooleanContent) { auto One = MIRBuilder.buildConstant(DstTy, 1); auto SelectZeroOrOne = MIRBuilder.buildSelect(DstTy, IsGT, One, Zero); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp index 477e5c1559b26..c2d474fdde696 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -34,7 +34,7 @@ cl::opt llvm::DisableGISelLegalityCheck( cl::desc("Don't verify that MIR is fully legal between GlobalISel passes"), cl::Hidden); -cl::opt VerboseVerifyLegalizerInfo( +static cl::opt VerboseVerifyLegalizerInfo( "verbose-gisel-verify-legalizer-info", cl::desc("Print more information to dbgs about GlobalISel legalizer rules " "being verified"), diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 98c56f739ad4e..0c2b74c907d2a 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -642,8 +642,12 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { SmallVector, 8> Ops; VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); - if (!RI.Reads) + // Defs without reads will be deleted if unused after remat is + // completed for other users of the virtual register. + if (!RI.Reads) { + LLVM_DEBUG(dbgs() << "\tskipping remat of def " << MI); return false; + } SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); @@ -657,8 +661,13 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { return true; } - if (SnippetCopies.count(&MI)) + // Snippets copies are ignored for remat, and will be deleted if they + // don't feed a live user after rematerialization completes. + if (SnippetCopies.count(&MI)) { + LLVM_DEBUG(dbgs() << "\tskipping remat snippet copy for " << UseIdx << '\t' + << MI); return false; + } LiveInterval &OrigLI = LIS.getInterval(Original); VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); diff --git a/llvm/lib/CodeGen/LiveInterval.cpp b/llvm/lib/CodeGen/LiveInterval.cpp index b682998c329bc..299db85233c2d 100644 --- a/llvm/lib/CodeGen/LiveInterval.cpp +++ b/llvm/lib/CodeGen/LiveInterval.cpp @@ -996,6 +996,17 @@ LLVM_DUMP_METHOD void LiveRange::Segment::dump() const { } #endif +void VNInfo::print(raw_ostream &OS) const { + OS << id << '@'; + if (isUnused()) { + OS << 'x'; + } else { + OS << def; + if (isPHIDef()) + OS << "-phi"; + } +} + void LiveRange::print(raw_ostream &OS) const { if (empty()) OS << "EMPTY"; @@ -1013,15 +1024,10 @@ void LiveRange::print(raw_ostream &OS) const { for (const_vni_iterator i = vni_begin(), e = vni_end(); i != e; ++i, ++vnum) { const VNInfo *vni = *i; - if (vnum) OS << ' '; - OS << vnum << '@'; - if (vni->isUnused()) { - OS << 'x'; - } else { - OS << vni->def; - if (vni->isPHIDef()) - OS << "-phi"; - } + if (vnum) + OS << ' '; + OS << *vni; + assert(vnum == vni->id && "Bad VNInfo"); } } } @@ -1041,9 +1047,9 @@ void LiveInterval::print(raw_ostream &OS) const { } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void LiveRange::dump() const { - dbgs() << *this << '\n'; -} +LLVM_DUMP_METHOD void VNInfo::dump() const { dbgs() << *this << '\n'; } + +LLVM_DUMP_METHOD void LiveRange::dump() const { dbgs() << *this << '\n'; } LLVM_DUMP_METHOD void LiveInterval::SubRange::dump() const { dbgs() << *this << '\n'; diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp index 33e980a5993d3..59bc82dc267b5 100644 --- a/llvm/lib/CodeGen/LiveRangeEdit.cpp +++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp @@ -80,7 +80,7 @@ void LiveRangeEdit::scanRemattable() { MachineInstr *DefMI = LIS.getInstructionFromIndex(OrigVNI->def); if (!DefMI) continue; - if (TII.isTriviallyReMaterializable(*DefMI)) + if (TII.isReMaterializable(*DefMI)) Remattable.insert(OrigVNI); } ScannedRemattable = true; @@ -92,60 +92,6 @@ bool LiveRangeEdit::anyRematerializable() { return !Remattable.empty(); } -/// allUsesAvailableAt - Return true if all registers used by OrigMI at -/// OrigIdx are also available with the same value at UseIdx. -bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI, - SlotIndex OrigIdx, - SlotIndex UseIdx) const { - OrigIdx = OrigIdx.getRegSlot(true); - UseIdx = std::max(UseIdx, UseIdx.getRegSlot(true)); - for (const MachineOperand &MO : OrigMI->operands()) { - if (!MO.isReg() || !MO.getReg() || !MO.readsReg()) - continue; - - // We can't remat physreg uses, unless it is a constant or target wants - // to ignore this use. - if (MO.getReg().isPhysical()) { - if (MRI.isConstantPhysReg(MO.getReg()) || TII.isIgnorableUse(MO)) - continue; - return false; - } - - LiveInterval &li = LIS.getInterval(MO.getReg()); - const VNInfo *OVNI = li.getVNInfoAt(OrigIdx); - if (!OVNI) - continue; - - // Don't allow rematerialization immediately after the original def. - // It would be incorrect if OrigMI redefines the register. - // See PR14098. - if (SlotIndex::isSameInstr(OrigIdx, UseIdx)) - return false; - - if (OVNI != li.getVNInfoAt(UseIdx)) - return false; - - // Check that subrange is live at UseIdx. - if (li.hasSubRanges()) { - const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); - unsigned SubReg = MO.getSubReg(); - LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg) - : MRI.getMaxLaneMaskForVReg(MO.getReg()); - for (LiveInterval::SubRange &SR : li.subranges()) { - if ((SR.LaneMask & LM).none()) - continue; - if (!SR.liveAt(UseIdx)) - return false; - // Early exit if all used lanes are checked. No need to continue. - LM &= ~SR.LaneMask; - if (LM.none()) - break; - } - } - } - return true; -} - bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx) { assert(ScannedRemattable && "Call anyRematerializable first"); @@ -155,12 +101,10 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI, return false; // No defining instruction provided. - SlotIndex DefIdx; assert(RM.OrigMI && "No defining instruction for remattable value"); - DefIdx = LIS.getInstructionIndex(*RM.OrigMI); // Verify that all used registers are available with the same values. - if (!allUsesAvailableAt(RM.OrigMI, DefIdx, UseIdx)) + if (!VirtRegAuxInfo::allUsesAvailableAt(RM.OrigMI, UseIdx, LIS, MRI, TII)) return false; return true; @@ -221,8 +165,8 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, // Since we're moving the DefMI load, make sure we're not extending any live // ranges. - if (!allUsesAvailableAt(DefMI, LIS.getInstructionIndex(*DefMI), - LIS.getInstructionIndex(*UseMI))) + if (!VirtRegAuxInfo::allUsesAvailableAt( + DefMI, LIS.getInstructionIndex(*UseMI), LIS, MRI, TII)) return false; // We also need to make sure it is safe to move the load. @@ -387,7 +331,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) { // register uses. That may provoke RA to split an interval at the KILL // and later result in an invalid live segment end. if (isOrigDef && DeadRemats && !HasLiveVRegUses && - TII.isTriviallyReMaterializable(*MI)) { + TII.isReMaterializable(*MI)) { LiveInterval &NewLI = createEmptyIntervalFrom(Dest, false); VNInfo::Allocator &Alloc = LIS.getVNInfoAllocator(); VNInfo *VNI = NewLI.getNextValue(Idx, Alloc); diff --git a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp index 1cde094d78e23..b2731b691d54c 100644 --- a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp +++ b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp @@ -927,7 +927,7 @@ MLEvictAdvisor::getLIFeatureComponents(const LiveInterval &LI) const { Ret.HintWeights += Freq; } Ret.IsRemat = VirtRegAuxInfo::isRematerializable( - LI, *LIS, *VRM, *MF.getSubtarget().getInstrInfo()); + LI, *LIS, *VRM, *MRI, *MF.getSubtarget().getInstrInfo()); return Ret; } diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp index 08a51b9b0242a..1cb57a4fa4258 100644 --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -606,6 +606,26 @@ void MachineBasicBlock::removeLiveIn(MCRegister Reg, LaneBitmask LaneMask) { LiveIns.erase(I); } +void MachineBasicBlock::removeLiveInOverlappedWith(MCRegister Reg) { + const MachineFunction *MF = getParent(); + const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + // Remove Reg and its subregs from live in set. + for (MCPhysReg S : TRI->subregs_inclusive(Reg)) + removeLiveIn(S); + + // Remove live-in bitmask in super registers as well. + for (MCPhysReg Super : TRI->superregs(Reg)) { + for (MCSubRegIndexIterator SRI(Super, TRI); SRI.isValid(); ++SRI) { + if (Reg == SRI.getSubReg()) { + unsigned SubRegIndex = SRI.getSubRegIndex(); + LaneBitmask SubRegLaneMask = TRI->getSubRegIndexLaneMask(SubRegIndex); + removeLiveIn(Super, SubRegLaneMask); + break; + } + } + } +} + MachineBasicBlock::livein_iterator MachineBasicBlock::removeLiveIn(MachineBasicBlock::livein_iterator I) { // Get non-const version of iterator. @@ -1160,7 +1180,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge( MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge( MachineBasicBlock *Succ, const SplitCriticalEdgeAnalyses &Analyses, std::vector> *LiveInSets, MachineDomTreeUpdater *MDTU) { - if (!canSplitCriticalEdge(Succ)) + if (!canSplitCriticalEdge(Succ, Analyses.MLI)) return nullptr; MachineFunction *MF = getParent(); @@ -1388,8 +1408,8 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge( return NMBB; } -bool MachineBasicBlock::canSplitCriticalEdge( - const MachineBasicBlock *Succ) const { +bool MachineBasicBlock::canSplitCriticalEdge(const MachineBasicBlock *Succ, + const MachineLoopInfo *MLI) const { // Splitting the critical edge to a landing pad block is non-trivial. Don't do // it in this generic function. if (Succ->isEHPad()) @@ -1403,8 +1423,17 @@ bool MachineBasicBlock::canSplitCriticalEdge( const MachineFunction *MF = getParent(); // Performance might be harmed on HW that implements branching using exec mask // where both sides of the branches are always executed. - if (MF->getTarget().requiresStructuredCFG()) + + if (MF->getTarget().requiresStructuredCFG()) { + // If `Succ` is a loop header, splitting the critical edge will not + // break structured CFG. + if (MLI) { + const MachineLoop *L = MLI->getLoopFor(Succ); + return L && L->getHeader() == Succ; + } + return false; + } // Do we have an Indirect jump with a jumptable that we can rewrite? int JTI = findJumpTableIndex(*this); @@ -1802,6 +1831,12 @@ bool MachineBasicBlock::sizeWithoutDebugLargerThan(unsigned Limit) const { return false; } +void MachineBasicBlock::removePHIsIncomingValuesForPredecessor( + const MachineBasicBlock &PredMBB) { + for (MachineInstr &Phi : phis()) + Phi.removePHIIncomingValueFor(PredMBB); +} + const MBBSectionID MBBSectionID::ColdSectionID(MBBSectionID::SectionType::Cold); const MBBSectionID MBBSectionID::ExceptionSectionID(MBBSectionID::SectionType::Exception); diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 2c06c5ad4a5e4..8ad9245a47684 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -2747,3 +2747,18 @@ bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const { return F.getRegMayBeFolded(); return false; } + +unsigned MachineInstr::removePHIIncomingValueFor(const MachineBasicBlock &MBB) { + assert(isPHI()); + + // Phi might have multiple entries for MBB. Need to remove them all. + unsigned RemovedCount = 0; + for (unsigned N = getNumOperands(); N > 2; N -= 2) { + if (getOperand(N - 1).getMBB() == &MBB) { + removeOperand(N - 1); + removeOperand(N - 2); + RemovedCount += 2; + } + } + return RemovedCount; +} diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index 4f164e2d53460..7acddff753693 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -244,8 +244,6 @@ namespace { bool IsGuaranteedToExecute(MachineBasicBlock *BB, MachineLoop *CurLoop); - bool isTriviallyReMaterializable(const MachineInstr &MI) const; - void EnterScope(MachineBasicBlock *MBB); void ExitScope(MachineBasicBlock *MBB); @@ -771,23 +769,6 @@ bool MachineLICMImpl::IsGuaranteedToExecute(MachineBasicBlock *BB, return true; } -/// Check if \p MI is trivially remateralizable and if it does not have any -/// virtual register uses. Even though rematerializable RA might not actually -/// rematerialize it in this scenario. In that case we do not want to hoist such -/// instruction out of the loop in a belief RA will sink it back if needed. -bool MachineLICMImpl::isTriviallyReMaterializable( - const MachineInstr &MI) const { - if (!TII->isTriviallyReMaterializable(MI)) - return false; - - for (const MachineOperand &MO : MI.all_uses()) { - if (MO.getReg().isVirtual()) - return false; - } - - return true; -} - void MachineLICMImpl::EnterScope(MachineBasicBlock *MBB) { LLVM_DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n'); @@ -1300,9 +1281,9 @@ bool MachineLICMImpl::IsProfitableToHoist(MachineInstr &MI, return false; } - // Rematerializable instructions should always be hoisted providing the - // register allocator can just pull them down again when needed. - if (isTriviallyReMaterializable(MI)) + // Trivially rematerializable instructions should always be hoisted + // providing the register allocator can just pull them down again when needed. + if (TII->isTriviallyReMaterializable(MI)) return true; // FIXME: If there are long latency loop-invariant instructions inside the @@ -1386,7 +1367,7 @@ bool MachineLICMImpl::IsProfitableToHoist(MachineInstr &MI, // High register pressure situation, only hoist if the instruction is going // to be remat'ed. - if (!isTriviallyReMaterializable(MI) && + if (!TII->isTriviallyReMaterializable(MI) && !MI.isDereferenceableInvariantLoad()) { LLVM_DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI); return false; diff --git a/llvm/lib/CodeGen/MachineRegionInfo.cpp b/llvm/lib/CodeGen/MachineRegionInfo.cpp index f8268b8894ca3..366755af08e49 100644 --- a/llvm/lib/CodeGen/MachineRegionInfo.cpp +++ b/llvm/lib/CodeGen/MachineRegionInfo.cpp @@ -10,6 +10,7 @@ #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/RegionInfoImpl.h" #include "llvm/CodeGen/MachinePostDominators.h" +#include "llvm/CodeGen/Passes.h" #include "llvm/Config/llvm-config.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" @@ -127,7 +128,7 @@ LLVM_DUMP_METHOD void MachineRegionInfoPass::dump() const { #endif char MachineRegionInfoPass::ID = 0; -char &MachineRegionInfoPassID = MachineRegionInfoPass::ID; +char &llvm::MachineRegionInfoPassID = MachineRegionInfoPass::ID; INITIALIZE_PASS_BEGIN(MachineRegionInfoPass, DEBUG_TYPE, "Detect single entry single exit regions", true, true) diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index c6fa8f42757db..299bcc46e4bd2 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -4157,33 +4157,32 @@ SUnit *GenericScheduler::pickNode(bool &IsTopNode) { return nullptr; } SUnit *SU; - do { - if (RegionPolicy.OnlyTopDown) { - SU = Top.pickOnlyChoice(); - if (!SU) { - CandPolicy NoPolicy; - TopCand.reset(NoPolicy); - pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); - assert(TopCand.Reason != NoCand && "failed to find a candidate"); - tracePick(TopCand); - SU = TopCand.SU; - } - IsTopNode = true; - } else if (RegionPolicy.OnlyBottomUp) { - SU = Bot.pickOnlyChoice(); - if (!SU) { - CandPolicy NoPolicy; - BotCand.reset(NoPolicy); - pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); - assert(BotCand.Reason != NoCand && "failed to find a candidate"); - tracePick(BotCand); - SU = BotCand.SU; - } - IsTopNode = false; - } else { - SU = pickNodeBidirectional(IsTopNode); + if (RegionPolicy.OnlyTopDown) { + SU = Top.pickOnlyChoice(); + if (!SU) { + CandPolicy NoPolicy; + TopCand.reset(NoPolicy); + pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand); + assert(TopCand.Reason != NoCand && "failed to find a candidate"); + tracePick(TopCand); + SU = TopCand.SU; } - } while (SU->isScheduled); + IsTopNode = true; + } else if (RegionPolicy.OnlyBottomUp) { + SU = Bot.pickOnlyChoice(); + if (!SU) { + CandPolicy NoPolicy; + BotCand.reset(NoPolicy); + pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand); + assert(BotCand.Reason != NoCand && "failed to find a candidate"); + tracePick(BotCand); + SU = BotCand.SU; + } + IsTopNode = false; + } else { + SU = pickNodeBidirectional(IsTopNode); + } + assert(!SU->isScheduled && "SUnit scheduled twice."); // If IsTopNode, then SU is in Top.Available and must be removed. Otherwise, // if isTopReady(), then SU is in either Top.Available or Top.Pending. @@ -4524,43 +4523,42 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) { return nullptr; } SUnit *SU; - do { - if (RegionPolicy.OnlyBottomUp) { - SU = Bot.pickOnlyChoice(); - if (SU) { - tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true); - } else { - CandPolicy NoPolicy; - BotCand.reset(NoPolicy); - // Set the bottom-up policy based on the state of the current bottom - // zone and the instructions outside the zone, including the top zone. - setPolicy(BotCand.Policy, /*IsPostRA=*/true, Bot, nullptr); - pickNodeFromQueue(Bot, BotCand); - assert(BotCand.Reason != NoCand && "failed to find a candidate"); - tracePick(BotCand, /*IsPostRA=*/true); - SU = BotCand.SU; - } - IsTopNode = false; - } else if (RegionPolicy.OnlyTopDown) { - SU = Top.pickOnlyChoice(); - if (SU) { - tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true); - } else { - CandPolicy NoPolicy; - TopCand.reset(NoPolicy); - // Set the top-down policy based on the state of the current top zone - // and the instructions outside the zone, including the bottom zone. - setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); - pickNodeFromQueue(Top, TopCand); - assert(TopCand.Reason != NoCand && "failed to find a candidate"); - tracePick(TopCand, /*IsPostRA=*/true); - SU = TopCand.SU; - } - IsTopNode = true; + if (RegionPolicy.OnlyBottomUp) { + SU = Bot.pickOnlyChoice(); + if (SU) { + tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true); } else { - SU = pickNodeBidirectional(IsTopNode); + CandPolicy NoPolicy; + BotCand.reset(NoPolicy); + // Set the bottom-up policy based on the state of the current bottom + // zone and the instructions outside the zone, including the top zone. + setPolicy(BotCand.Policy, /*IsPostRA=*/true, Bot, nullptr); + pickNodeFromQueue(Bot, BotCand); + assert(BotCand.Reason != NoCand && "failed to find a candidate"); + tracePick(BotCand, /*IsPostRA=*/true); + SU = BotCand.SU; } - } while (SU->isScheduled); + IsTopNode = false; + } else if (RegionPolicy.OnlyTopDown) { + SU = Top.pickOnlyChoice(); + if (SU) { + tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true); + } else { + CandPolicy NoPolicy; + TopCand.reset(NoPolicy); + // Set the top-down policy based on the state of the current top zone + // and the instructions outside the zone, including the bottom zone. + setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr); + pickNodeFromQueue(Top, TopCand); + assert(TopCand.Reason != NoCand && "failed to find a candidate"); + tracePick(TopCand, /*IsPostRA=*/true); + SU = TopCand.SU; + } + IsTopNode = true; + } else { + SU = pickNodeBidirectional(IsTopNode); + } + assert(!SU->isScheduled && "SUnit scheduled twice."); if (SU->isTopReady()) Top.removeReady(SU); diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index 9ec5151a039b7..d5153b7fb6207 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -2187,11 +2187,9 @@ static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB, static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB, const SmallVectorImpl &UsedOpsInCopy, const SmallVectorImpl &DefedRegsInCopy) { - MachineFunction &MF = *SuccBB->getParent(); - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); for (Register DefReg : DefedRegsInCopy) - for (MCPhysReg S : TRI->subregs_inclusive(DefReg)) - SuccBB->removeLiveIn(S); + SuccBB->removeLiveInOverlappedWith(DefReg); + for (auto U : UsedOpsInCopy) SuccBB->addLiveIn(MI->getOperand(U).getReg()); SuccBB->sortUniqueLiveIns(); diff --git a/llvm/lib/CodeGen/MachineStripDebug.cpp b/llvm/lib/CodeGen/MachineStripDebug.cpp index ea291f64bff43..d54fe023a4a7e 100644 --- a/llvm/lib/CodeGen/MachineStripDebug.cpp +++ b/llvm/lib/CodeGen/MachineStripDebug.cpp @@ -58,7 +58,7 @@ struct StripDebugMachineModule : public ModulePass { // preservation. Preserve it for now. if (MI.getNumOperands() > 1) { LLVM_DEBUG(dbgs() << "Removing debug instruction " << MI); - MBB.erase(&MI); + MBB.erase_instr(&MI); Changed |= true; continue; } diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index e911ce8a75828..115485509c4a5 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -1549,7 +1549,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) { report("G_BUILD_VECTOR result element type must match source type", MI); if (DstTy.getNumElements() != MI->getNumOperands() - 1) - report("G_BUILD_VECTOR must have an operand for each elemement", MI); + report("G_BUILD_VECTOR must have an operand for each element", MI); for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg())) @@ -2398,11 +2398,11 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { // The next two checks allow COPY between physical and virtual registers, // when the virtual register has a scalable size and the physical register - // has a fixed size. These checks allow COPY between *potentialy* mismatched - // sizes. However, once RegisterBankSelection occurs, MachineVerifier should - // be able to resolve a fixed size for the scalable vector, and at that - // point this function will know for sure whether the sizes are mismatched - // and correctly report a size mismatch. + // has a fixed size. These checks allow COPY between *potentially* + // mismatched sizes. However, once RegisterBankSelection occurs, + // MachineVerifier should be able to resolve a fixed size for the scalable + // vector, and at that point this function will know for sure whether the + // sizes are mismatched and correctly report a size mismatch. if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() && !SrcSize.isScalable()) break; @@ -3213,13 +3213,13 @@ struct VRegFilter { private: static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8; - // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound - // are tracked by Dense. The only purpose of the threashold and the Dense set + // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyond + // are tracked by Dense. The only purpose of the threshold and the Dense set // is to have a reasonably growing memory usage in pathological cases (large // number of very sparse VRegFilter instances live at the same time). In // practice even in the worst-by-execution time cases having all elements // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more - // space efficient than if tracked by Dense. The threashold is set to keep the + // space efficient than if tracked by Dense. The threshold is set to keep the // worst-case memory usage within 2x of figures determined empirically for // "all Dense" scenario in such worst-by-execution-time cases. BitVector Sparse; @@ -3459,7 +3459,7 @@ void MachineVerifier::visitMachineFunctionAfter() { // Check live-in list of each MBB. If a register is live into MBB, check // that the register is in regsLiveOut of each predecessor block. Since - // this must come from a definition in the predecesssor or its live-in + // this must come from a definition in the predecessor or its live-in // list, this will catch a live-through case where the predecessor does not // have the register in its live-in list. This currently only checks // registers that have no aliases, are not allocatable and are not diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp index 21bf052d1fdaf..d47ed65540cf4 100644 --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -10,6 +10,7 @@ #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/CodeGen/LiveIntervals.h" +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -859,20 +860,6 @@ void ModuloScheduleExpander::splitLifetimes(MachineBasicBlock *KernelBB, } } -/// Remove the incoming block from the Phis in a basic block. -static void removePhis(MachineBasicBlock *BB, MachineBasicBlock *Incoming) { - for (MachineInstr &MI : *BB) { - if (!MI.isPHI()) - break; - for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) - if (MI.getOperand(i + 1).getMBB() == Incoming) { - MI.removeOperand(i + 1); - MI.removeOperand(i); - break; - } - } -} - /// Create branches from each prolog basic block to the appropriate epilog /// block. These edges are needed if the loop ends before reaching the /// kernel. @@ -906,7 +893,7 @@ void ModuloScheduleExpander::addBranches(MachineBasicBlock &PreheaderBB, Prolog->removeSuccessor(LastPro); LastEpi->removeSuccessor(Epilog); numAdded = TII->insertBranch(*Prolog, Epilog, nullptr, Cond, DebugLoc()); - removePhis(Epilog, LastEpi); + Epilog->removePHIsIncomingValuesForPredecessor(*LastEpi); // Remove the blocks that are no longer referenced. if (LastPro != LastEpi) { for (auto &MI : *LastEpi) @@ -924,7 +911,7 @@ void ModuloScheduleExpander::addBranches(MachineBasicBlock &PreheaderBB, LastPro->eraseFromParent(); } else { numAdded = TII->insertBranch(*Prolog, LastPro, nullptr, Cond, DebugLoc()); - removePhis(Epilog, Prolog); + Epilog->removePHIsIncomingValuesForPredecessor(*Prolog); } LastPro = Prolog; LastEpi = Epilog; diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 7bfc9dae59fcf..e1d39d64e9fb8 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -1004,9 +1004,8 @@ bool PeepholeOptimizer::findNextSource(const TargetRegisterClass *DefRC, // Thus, instead of maintaining untested code, we will revisit that if // that changes at some point. Register Reg = RegSubReg.Reg; - SmallVector SrcToLook; RegSubRegPair CurSrcPair = RegSubReg; - SrcToLook.push_back(CurSrcPair); + SmallVector SrcToLook = {CurSrcPair}; unsigned PHICount = 0; do { @@ -1204,6 +1203,18 @@ bool PeepholeOptimizer::optimizeCoalescableCopyImpl(Rewriter &&CpyRewriter) { if (!NewSrc.Reg) continue; + if (NewSrc.SubReg) { + // Verify the register class supports the subregister index. ARM's + // copy-like queries return register:subreg pairs where the register's + // current class does not directly support the subregister index. + const TargetRegisterClass *RC = MRI->getRegClass(NewSrc.Reg); + const TargetRegisterClass *WithSubRC = + TRI->getSubClassWithSubReg(RC, NewSrc.SubReg); + if (!MRI->constrainRegClass(NewSrc.Reg, WithSubRC)) + continue; + Changed = true; + } + // Rewrite source. if (CpyRewriter.RewriteCurrentSource(NewSrc.Reg, NewSrc.SubReg)) { // We may have extended the live-range of NewSrc, account for that. @@ -1276,6 +1287,18 @@ MachineInstr &PeepholeOptimizer::rewriteSource(MachineInstr &CopyLike, const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg); Register NewVReg = MRI->createVirtualRegister(DefRC); + if (NewSrc.SubReg) { + const TargetRegisterClass *NewSrcRC = MRI->getRegClass(NewSrc.Reg); + const TargetRegisterClass *WithSubRC = + TRI->getSubClassWithSubReg(NewSrcRC, NewSrc.SubReg); + + // The new source may not directly support the subregister, but we should be + // able to assume it is constrainable to support the subregister (otherwise + // ValueTracker was lying and reported a useless value). + if (!MRI->constrainRegClass(NewSrc.Reg, WithSubRC)) + llvm_unreachable("replacement register cannot support subregister"); + } + MachineInstr *NewCopy = BuildMI(*CopyLike.getParent(), &CopyLike, CopyLike.getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg) @@ -1906,7 +1929,27 @@ ValueTrackerResult ValueTracker::getNextSourceFromCopy() { const MachineOperand &Src = Def->getOperand(1); if (Src.isUndef()) return ValueTrackerResult(); - return ValueTrackerResult(Src.getReg(), Src.getSubReg()); + + Register SrcReg = Src.getReg(); + unsigned SubReg = Src.getSubReg(); + if (DefSubReg) { + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + SubReg = TRI->composeSubRegIndices(SubReg, DefSubReg); + + if (SrcReg.isVirtual()) { + // TODO: Try constraining on rewrite if we can + const TargetRegisterClass *RegRC = MRI.getRegClass(SrcReg); + const TargetRegisterClass *SrcWithSubRC = + TRI->getSubClassWithSubReg(RegRC, SubReg); + if (RegRC != SrcWithSubRC) + return ValueTrackerResult(); + } else { + if (!TRI->getSubReg(SrcReg, SubReg)) + return ValueTrackerResult(); + } + } + + return ValueTrackerResult(SrcReg, SubReg); } ValueTrackerResult ValueTracker::getNextSourceFromBitcast() { diff --git a/llvm/lib/CodeGen/RDFLiveness.cpp b/llvm/lib/CodeGen/RDFLiveness.cpp index 318422b46e811..2e1cf499eab41 100644 --- a/llvm/lib/CodeGen/RDFLiveness.cpp +++ b/llvm/lib/CodeGen/RDFLiveness.cpp @@ -652,8 +652,9 @@ void Liveness::computePhiInfo() { // defs, cache the result of subtracting these defs from a given register // ref. using RefHash = std::hash; - using RefEqual = std::equal_to; - using SubMap = std::unordered_map; + using RefEqual = RegisterRefEqualTo; + using SubMap = + std::unordered_map; std::unordered_map Subs; auto ClearIn = [](RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) { if (Mid.empty()) @@ -868,7 +869,7 @@ void Liveness::computeLiveIns() { std::vector LV; for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins()) LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask)); - llvm::sort(LV, std::less(PRI)); + llvm::sort(LV, RegisterRefLess(PRI)); dbgs() << printMBBReference(B) << "\t rec = {"; for (auto I : LV) dbgs() << ' ' << Print(I, DFG); @@ -878,7 +879,7 @@ void Liveness::computeLiveIns() { LV.clear(); for (RegisterRef RR : LiveMap[&B].refs()) LV.push_back(RR); - llvm::sort(LV, std::less(PRI)); + llvm::sort(LV, RegisterRefLess(PRI)); dbgs() << "\tcomp = {"; for (auto I : LV) dbgs() << ' ' << Print(I, DFG); diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index d004815d2c17a..7fe13a316db95 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -1383,21 +1383,53 @@ bool RAGreedy::trySplitAroundHintReg(MCPhysReg Hint, // Compute the cost of assigning a non Hint physical register to VirtReg. // We define it as the total frequency of broken COPY instructions to/from // Hint register, and after split, they can be deleted. - for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) { - if (!TII->isFullCopyInstr(Instr)) + + // FIXME: This is miscounting the costs with subregisters. In particular, this + // should support recognizing SplitKit formed copy bundles instead of direct + // copy instructions, which will appear in the same block. + for (const MachineOperand &Opnd : MRI->reg_nodbg_operands(Reg)) { + const MachineInstr &Instr = *Opnd.getParent(); + if (!Instr.isCopy() || Opnd.isImplicit()) continue; - Register OtherReg = Instr.getOperand(1).getReg(); - if (OtherReg == Reg) { - OtherReg = Instr.getOperand(0).getReg(); - if (OtherReg == Reg) - continue; - // Check if VirtReg interferes with OtherReg after this COPY instruction. - if (VirtReg.liveAt(LIS->getInstructionIndex(Instr).getRegSlot())) - continue; + + // Look for the other end of the copy. + const bool IsDef = Opnd.isDef(); + const MachineOperand &OtherOpnd = Instr.getOperand(IsDef); + Register OtherReg = OtherOpnd.getReg(); + assert(Reg == Opnd.getReg()); + if (OtherReg == Reg) + continue; + + unsigned SubReg = Opnd.getSubReg(); + unsigned OtherSubReg = OtherOpnd.getSubReg(); + if (SubReg && OtherSubReg && SubReg != OtherSubReg) + continue; + + // Check if VirtReg interferes with OtherReg after this COPY instruction. + if (Opnd.readsReg()) { + SlotIndex Index = LIS->getInstructionIndex(Instr).getRegSlot(); + + if (SubReg) { + LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg); + if (IsDef) + Mask = ~Mask; + + if (any_of(VirtReg.subranges(), [=](const LiveInterval::SubRange &S) { + return (S.LaneMask & Mask).any() && S.liveAt(Index); + })) { + continue; + } + } else { + if (VirtReg.liveAt(Index)) + continue; + } } + MCRegister OtherPhysReg = OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg); - if (OtherPhysReg == Hint) + MCRegister ThisHint = + SubReg ? TRI->getSubReg(Hint, SubReg) : MCRegister(Hint); + if (OtherPhysReg == ThisHint) Cost += MBFI->getBlockFreq(Instr.getParent()); } @@ -2403,25 +2435,28 @@ void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) { unsigned SubReg = Opnd.getSubReg(); // Get the current assignment. - MCRegister OtherPhysReg = - OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg); - if (OtherSubReg) { - if (OtherReg.isPhysical()) { - MCRegister Tuple = - TRI->getMatchingSuperReg(OtherPhysReg, OtherSubReg, RC); - if (!Tuple) - continue; - OtherPhysReg = Tuple; - } else { - // TODO: There should be a hinting mechanism for subregisters - if (SubReg != OtherSubReg) - continue; - } + MCRegister OtherPhysReg; + if (OtherReg.isPhysical()) { + if (OtherSubReg) + OtherPhysReg = TRI->getMatchingSuperReg(OtherReg, OtherSubReg, RC); + else if (SubReg) + OtherPhysReg = TRI->getMatchingSuperReg(OtherReg, SubReg, RC); + else + OtherPhysReg = OtherReg; + } else { + OtherPhysReg = VRM->getPhys(OtherReg); + // TODO: Should find matching superregister, but applying this in the + // non-hint case currently causes regressions + + if (SubReg && OtherSubReg && SubReg != OtherSubReg) + continue; } // Push the collected information. - Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg, - OtherPhysReg)); + if (OtherPhysReg) { + Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg, + OtherPhysReg)); + } } } @@ -2450,15 +2485,13 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) { // We have a broken hint, check if it is possible to fix it by // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted // some register and PhysReg may be available for the other live-ranges. - SmallSet Visited; - SmallVector RecoloringCandidates; HintsInfo Info; Register Reg = VirtReg.reg(); MCRegister PhysReg = VRM->getPhys(Reg); // Start the recoloring algorithm from the input live-interval, then // it will propagate to the ones that are copy-related with it. - Visited.insert(Reg); - RecoloringCandidates.push_back(Reg); + SmallSet Visited = {Reg}; + SmallVector RecoloringCandidates = {Reg}; LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI) << '(' << printReg(PhysReg, TRI) << ")\n"); @@ -2466,12 +2499,10 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) { do { Reg = RecoloringCandidates.pop_back_val(); - // We cannot recolor physical register. - if (Reg.isPhysical()) - continue; + MCRegister CurrPhys = VRM->getPhys(Reg); // This may be a skipped register. - if (!VRM->hasPhys(Reg)) { + if (!CurrPhys) { assert(!shouldAllocateRegister(Reg) && "We have an unallocated variable which should have been handled"); continue; @@ -2480,7 +2511,6 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) { // Get the live interval mapped with this virtual register to be able // to check for the interference with the new color. LiveInterval &LI = LIS->getInterval(Reg); - MCRegister CurrPhys = VRM->getPhys(Reg); // Check that the new color matches the register class constraints and // that it is free for this live range. if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) || @@ -2517,7 +2547,8 @@ void RAGreedy::tryHintRecoloring(const LiveInterval &VirtReg) { // Push all copy-related live-ranges to keep reconciling the broken // hints. for (const HintInfo &HI : Info) { - if (Visited.insert(HI.Reg).second) + // We cannot recolor physical register. + if (HI.Reg.isVirtual() && Visited.insert(HI.Reg).second) RecoloringCandidates.push_back(HI.Reg); } } while (!RecoloringCandidates.empty()); diff --git a/llvm/lib/CodeGen/RegAllocScore.cpp b/llvm/lib/CodeGen/RegAllocScore.cpp index b86647dbe0a48..280946bb0d0b3 100644 --- a/llvm/lib/CodeGen/RegAllocScore.cpp +++ b/llvm/lib/CodeGen/RegAllocScore.cpp @@ -23,6 +23,8 @@ #include "llvm/Support/CommandLine.h" using namespace llvm; + +namespace llvm { LLVM_ABI cl::opt CopyWeight("regalloc-copy-weight", cl::init(0.2), cl::Hidden); LLVM_ABI cl::opt LoadWeight("regalloc-load-weight", cl::init(4.0), @@ -33,6 +35,8 @@ LLVM_ABI cl::opt CheapRematWeight("regalloc-cheap-remat-weight", cl::init(0.2), cl::Hidden); LLVM_ABI cl::opt ExpensiveRematWeight("regalloc-expensive-remat-weight", cl::init(1.0), cl::Hidden); +} // end namespace llvm + #define DEBUG_TYPE "regalloc-score" RegAllocScore &RegAllocScore::operator+=(const RegAllocScore &Other) { @@ -79,8 +83,7 @@ llvm::calculateRegAllocScore(const MachineFunction &MF, return MBFI.getBlockFreqRelativeToEntryBlock(&MBB); }, [&](const MachineInstr &MI) { - return MF.getSubtarget().getInstrInfo()->isTriviallyReMaterializable( - MI); + return MF.getSubtarget().getInstrInfo()->isReMaterializable(MI); }); } diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index db00f54daeb62..ebfea8e5581bf 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -20,6 +20,7 @@ #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervals.h" #include "llvm/CodeGen/LiveRangeEdit.h" @@ -294,10 +295,10 @@ class RegisterCoalescer : private LiveRangeEdit::Delegate { /// We found a copy which can be moved to its less frequent predecessor. bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI); - /// If the source of a copy is defined by a - /// trivial computation, replace the copy by rematerialize the definition. - bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI, - bool &IsDefCopy); + /// If the source of a copy is defined by a CheapAsAMove computation, + /// replace the copy by rematerialize the definition. + bool reMaterializeDef(const CoalescerPair &CP, MachineInstr *CopyMI, + bool &IsDefCopy); /// Return true if a copy involving a physreg should be joined. bool canJoinPhys(const CoalescerPair &CP); @@ -583,14 +584,14 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const { return DstReg == Dst; // This is a partial register copy. Check that the parts match. return Register(TRI.getSubReg(DstReg, SrcSub)) == Dst; - } else { - // DstReg is virtual. - if (DstReg != Dst) - return false; - // Registers match, do the subregisters line up? - return TRI.composeSubRegIndices(SrcIdx, SrcSub) == - TRI.composeSubRegIndices(DstIdx, DstSub); } + + // DstReg is virtual. + if (DstReg != Dst) + return false; + // Registers match, do the subregisters line up? + return TRI.composeSubRegIndices(SrcIdx, SrcSub) == + TRI.composeSubRegIndices(DstIdx, DstSub); } void RegisterCoalescerLegacy::getAnalysisUsage(AnalysisUsage &AU) const { @@ -1297,9 +1298,9 @@ static bool definesFullReg(const MachineInstr &MI, Register Reg) { return false; } -bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, - MachineInstr *CopyMI, - bool &IsDefCopy) { +bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, + MachineInstr *CopyMI, + bool &IsDefCopy) { IsDefCopy = false; Register SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg(); unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx(); @@ -1325,7 +1326,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, if (!TII->isAsCheapAsAMove(*DefMI)) return false; - if (!TII->isTriviallyReMaterializable(*DefMI)) + if (!TII->isReMaterializable(*DefMI)) return false; if (!definesFullReg(*DefMI, SrcReg)) @@ -1393,10 +1394,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, } } - SmallVector NewRegs; - LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS, nullptr, this); - SlotIndex DefIdx = LIS->getInstructionIndex(*DefMI); - if (!Edit.allUsesAvailableAt(DefMI, DefIdx, CopyIdx)) + if (!VirtRegAuxInfo::allUsesAvailableAt(DefMI, CopyIdx, *LIS, *MRI, *TII)) return false; DebugLoc DL = CopyMI->getDebugLoc(); @@ -1405,6 +1403,8 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, std::next(MachineBasicBlock::iterator(CopyMI)); LiveRangeEdit::Remat RM(ValNo); RM.OrigMI = DefMI; + SmallVector NewRegs; + LiveRangeEdit Edit(&SrcInt, NewRegs, *MF, *LIS, nullptr, this); Edit.rematerializeAt(*MBB, MII, DstReg, RM, *TRI, false, SrcIdx, CopyMI); MachineInstr &NewMI = *std::prev(MII); NewMI.setDebugLoc(DL); @@ -1475,10 +1475,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, // // The implicit-def of the super register may have been reduced to // subregisters depending on the uses. - - bool NewMIDefinesFullReg = false; - - SmallVector NewMIImplDefs; + SmallVector, 4> NewMIImplDefs; for (unsigned i = NewMI.getDesc().getNumOperands(), e = NewMI.getNumOperands(); i != e; ++i) { @@ -1486,9 +1483,6 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, if (MO.isReg() && MO.isDef()) { assert(MO.isImplicit()); if (MO.getReg().isPhysical()) { - if (MO.getReg() == DstReg) - NewMIDefinesFullReg = true; - assert(MO.isImplicit() && MO.getReg().isPhysical() && (MO.isDead() || (DefSubIdx && @@ -1496,7 +1490,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, MCRegister((unsigned)NewMI.getOperand(0).getReg())) || TRI->isSubRegisterEq(NewMI.getOperand(0).getReg(), MO.getReg()))))); - NewMIImplDefs.push_back(MO.getReg().asMCReg()); + NewMIImplDefs.push_back({i, MO.getReg()}); } else { assert(MO.getReg() == NewMI.getOperand(0).getReg()); @@ -1641,12 +1635,30 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, // been asked for. If so it must implicitly define the whole thing. assert(DstReg.isPhysical() && "Only expect virtual or physical registers in remat"); + + // When we're rematerializing into a not-quite-right register we already add + // the real definition as an implicit-def, but we should also be marking the + // "official" register as dead, since nothing else is going to use it as a + // result of this remat. Not doing this can affect pressure tracking. NewMI.getOperand(0).setIsDead(true); - if (!NewMIDefinesFullReg) { + bool HasDefMatchingCopy = false; + for (auto [OpIndex, Reg] : NewMIImplDefs) { + if (Reg != DstReg) + continue; + // Also, if CopyDstReg is a sub-register of DstReg (and it is defined), we + // must mark DstReg as dead since it is not going to used as a result of + // this remat. + if (DstReg != CopyDstReg) + NewMI.getOperand(OpIndex).setIsDead(true); + else + HasDefMatchingCopy = true; + } + + // If NewMI does not already have an implicit-def CopyDstReg add one now. + if (!HasDefMatchingCopy) NewMI.addOperand(MachineOperand::CreateReg( CopyDstReg, true /*IsDef*/, true /*IsImp*/, false /*IsKill*/)); - } // Record small dead def live-ranges for all the subregisters // of the destination register. @@ -1677,8 +1689,8 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, NewMI.addOperand(MO); SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); - for (MCRegister Reg : NewMIImplDefs) { - for (MCRegUnit Unit : TRI->regunits(Reg)) + for (Register Reg : make_second_range(NewMIImplDefs)) { + for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) if (LiveRange *LR = LIS->getCachedRegUnit(Unit)) LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); } @@ -2128,10 +2140,10 @@ bool RegisterCoalescer::joinCopy( << printReg(CP.getSrcReg(), TRI) << " with " << printReg(CP.getDstReg(), TRI, CP.getSrcIdx()) << '\n'); if (!canJoinPhys(CP)) { - // Before giving up coalescing, if definition of source is defined by - // trivial computation, try rematerializing it. + // Before giving up coalescing, try rematerializing the source of + // the copy instead if it is cheap. bool IsDefCopy = false; - if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) + if (reMaterializeDef(CP, CopyMI, IsDefCopy)) return true; if (IsDefCopy) Again = true; // May be possible to coalesce later. @@ -2167,10 +2179,9 @@ bool RegisterCoalescer::joinCopy( if (!joinIntervals(CP)) { // Coalescing failed. - // If definition of source is defined by trivial computation, try - // rematerializing it. + // Try rematerializing the definition of the source if it is cheap. bool IsDefCopy = false; - if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) + if (reMaterializeDef(CP, CopyMI, IsDefCopy)) return true; // If we can eliminate the copy without merging the live segments, do so @@ -2903,8 +2914,7 @@ JoinVals::ConflictResolution JoinVals::analyzeValue(unsigned ValNo, if ((V.ValidLanes & OtherV.ValidLanes).any()) // Overlapping lanes can't be resolved. return CR_Impossible; - else - return CR_Merge; + return CR_Merge; } // No simultaneous def. Is Other live at the def? diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index a6ba6e518899f..558c5a0390228 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11849,9 +11849,7 @@ static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS, if (!VT.isFloatingPoint()) return false; - const TargetOptions &Options = DAG.getTarget().Options; - - return (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) && + return Flags.hasNoSignedZeros() && TLI.isProfitableToCombineMinNumMaxNum(VT) && (Flags.hasNoNaNs() || (DAG.isKnownNeverNaN(RHS) && DAG.isKnownNeverNaN(LHS))); @@ -12996,13 +12994,31 @@ SDValue DAGCombiner::foldPartialReduceMLAMulOp(SDNode *N) { SDValue Op1 = N->getOperand(1); SDValue Op2 = N->getOperand(2); - APInt C; - if (Op1->getOpcode() != ISD::MUL || - !ISD::isConstantSplatVector(Op2.getNode(), C) || !C.isOne()) + unsigned Opc = Op1->getOpcode(); + if (Opc != ISD::MUL && Opc != ISD::SHL) return SDValue(); SDValue LHS = Op1->getOperand(0); SDValue RHS = Op1->getOperand(1); + + // Try to treat (shl %a, %c) as (mul %a, (1 << %c)) for constant %c. + if (Opc == ISD::SHL) { + APInt C; + if (!ISD::isConstantSplatVector(RHS.getNode(), C)) + return SDValue(); + + RHS = + DAG.getSplatVector(RHS.getValueType(), DL, + DAG.getConstant(APInt(C.getBitWidth(), 1).shl(C), DL, + RHS.getValueType().getScalarType())); + Opc = ISD::MUL; + } + + APInt C; + if (Opc != ISD::MUL || !ISD::isConstantSplatVector(Op2.getNode(), C) || + !C.isOne()) + return SDValue(); + unsigned LHSOpcode = LHS->getOpcode(); if (!ISD::isExtOpcode(LHSOpcode)) return SDValue(); @@ -17351,7 +17367,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { // Always prefer FMAD to FMA for precision. unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; bool Aggressive = TLI.enableAggressiveFMAFusion(VT); - bool NoSignedZero = Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros(); + bool NoSignedZero = Flags.hasNoSignedZeros(); // Is the node an FMUL and contractable either due to global flags or // SDNodeFlags. @@ -17770,7 +17786,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { // N0 + -0.0 --> N0 (also allowed with +0.0 and fast-math) ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, true); if (N1C && N1C->isZero()) - if (N1C->isNegative() || Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) + if (N1C->isNegative() || Flags.hasNoSignedZeros()) return N0; if (SDValue NewSel = foldBinOpIntoSelect(N)) @@ -17823,11 +17839,10 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { return DAG.getConstantFP(0.0, DL, VT); } - // If 'unsafe math' or reassoc and nsz, fold lots of things. + // If reassoc and nsz, fold lots of things. // TODO: break out portions of the transformations below for which Unsafe is // considered and which do not require both nsz and reassoc - if ((Options.NoSignedZerosFPMath || - (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) && + if (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros() && AllowNewConst) { // fadd (fadd x, c1), c2 -> fadd x, c1 + c2 if (N1CFP && N0.getOpcode() == ISD::FADD && @@ -17911,10 +17926,9 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { DAG.getConstantFP(4.0, DL, VT)); } } - } // enable-unsafe-fp-math && AllowNewConst + } // reassoc && nsz && AllowNewConst - if ((Options.NoSignedZerosFPMath || - (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros()))) { + if (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros()) { // Fold fadd(vecreduce(x), vecreduce(y)) -> vecreduce(fadd(x, y)) if (SDValue SD = reassociateReduction(ISD::VECREDUCE_FADD, ISD::FADD, DL, VT, N0, N1, Flags)) @@ -17985,8 +17999,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) { // (fsub A, 0) -> A if (N1CFP && N1CFP->isZero()) { - if (!N1CFP->isNegative() || Options.NoSignedZerosFPMath || - Flags.hasNoSignedZeros()) { + if (!N1CFP->isNegative() || Flags.hasNoSignedZeros()) { return N0; } } @@ -17999,8 +18012,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) { // (fsub -0.0, N1) -> -N1 if (N0CFP && N0CFP->isZero()) { - if (N0CFP->isNegative() || - (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) { + if (N0CFP->isNegative() || Flags.hasNoSignedZeros()) { // We cannot replace an FSUB(+-0.0,X) with FNEG(X) when denormals are // flushed to zero, unless all users treat denorms as zero (DAZ). // FIXME: This transform will change the sign of a NaN and the behavior @@ -18016,8 +18028,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) { } } - if ((Options.NoSignedZerosFPMath || - (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros())) && + if (Flags.hasAllowReassociation() && Flags.hasNoSignedZeros() && N1.getOpcode() == ISD::FADD) { // X - (X + Y) -> -Y if (N0 == N1->getOperand(0)) @@ -18332,11 +18343,9 @@ template SDValue DAGCombiner::visitFMA(SDNode *N) { return matcher.getNode(ISD::FMA, DL, VT, NegN0, NegN1, N2); } - // FIXME: use fast math flags instead of Options.UnsafeFPMath - // TODO: Finally migrate away from global TargetOptions. if ((Options.NoNaNsFPMath && Options.NoInfsFPMath) || (N->getFlags().hasNoNaNs() && N->getFlags().hasNoInfs())) { - if (Options.NoSignedZerosFPMath || N->getFlags().hasNoSignedZeros() || + if (N->getFlags().hasNoSignedZeros() || (N2CFP && !N2CFP->isExactlyValue(-0.0))) { if (N0CFP && N0CFP->isZero()) return N2; @@ -18641,8 +18650,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) { } // Fold X/Sqrt(X) -> Sqrt(X) - if ((Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros()) && - Flags.hasAllowReassociation()) + if (Flags.hasNoSignedZeros() && Flags.hasAllowReassociation()) if (N1.getOpcode() == ISD::FSQRT && N0 == N1.getOperand(0)) return N1; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index ff7cd665446cc..87d5453cd98cf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -6256,17 +6256,17 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) { // FIXME: Not all targets may support EVL in VP_LOAD. These will have been // removed from the IR by the ExpandVectorPredication pass but we're // reintroducing them here. - EVT LdVT = LD->getMemoryVT(); - EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), LdVT); - EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, - WideVT.getVectorElementCount()); + EVT VT = LD->getValueType(0); + EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); + EVT WideMaskVT = getSetCCResultType(WideVT); + if (ExtType == ISD::NON_EXTLOAD && TLI.isOperationLegalOrCustom(ISD::VP_LOAD, WideVT) && TLI.isTypeLegal(WideMaskVT)) { SDLoc DL(N); SDValue Mask = DAG.getAllOnesConstant(DL, WideMaskVT); SDValue EVL = DAG.getElementCount(DL, TLI.getVPExplicitVectorLengthTy(), - LdVT.getVectorElementCount()); + VT.getVectorElementCount()); SDValue NewLoad = DAG.getLoadVP(LD->getAddressingMode(), ISD::NON_EXTLOAD, WideVT, DL, LD->getChain(), LD->getBasePtr(), LD->getOffset(), Mask, @@ -6303,6 +6303,24 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) { return Result; } + if (VT.isVector()) { + // If all else fails replace the load with a wide masked load. + SDLoc DL(N); + EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); + + SDValue Len = DAG.getElementCount(DL, IdxVT, VT.getVectorElementCount()); + SDValue Mask = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, WideMaskVT, + DAG.getConstant(0, DL, IdxVT), Len); + + SDValue NewLoad = DAG.getMaskedLoad( + WideVT, DL, LD->getChain(), LD->getBasePtr(), LD->getOffset(), Mask, + DAG.getPOISON(WideVT), LD->getMemoryVT(), LD->getMemOperand(), + LD->getAddressingMode(), LD->getExtensionType()); + + ReplaceValueWith(SDValue(N, 1), NewLoad.getValue(1)); + return NewLoad; + } + report_fatal_error("Unable to widen vector load"); } @@ -7516,8 +7534,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) { SDValue StVal = ST->getValue(); EVT StVT = StVal.getValueType(); EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StVT); - EVT WideMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, - WideVT.getVectorElementCount()); + EVT WideMaskVT = getSetCCResultType(WideVT); if (TLI.isOperationLegalOrCustom(ISD::VP_STORE, WideVT) && TLI.isTypeLegal(WideMaskVT)) { @@ -7540,6 +7557,22 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) { return DAG.getNode(ISD::TokenFactor, SDLoc(ST), MVT::Other, StChain); } + if (StVT.isVector()) { + // If all else fails replace the store with a wide masked store. + SDLoc DL(N); + EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); + + SDValue WideStVal = GetWidenedVector(StVal); + SDValue Len = DAG.getElementCount(DL, IdxVT, StVT.getVectorElementCount()); + SDValue Mask = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, WideMaskVT, + DAG.getConstant(0, DL, IdxVT), Len); + + return DAG.getMaskedStore(ST->getChain(), DL, WideStVal, ST->getBasePtr(), + ST->getOffset(), Mask, ST->getMemoryVT(), + ST->getMemOperand(), ST->getAddressingMode(), + ST->isTruncatingStore()); + } + report_fatal_error("Unable to widen vector store"); } @@ -8298,8 +8331,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl &LdChain, AAMDNodes AAInfo = LD->getAAInfo(); if (LdVT.isScalableVector()) - report_fatal_error("Generating widen scalable extending vector loads is " - "not yet supported"); + return SDValue(); EVT EltVT = WidenVT.getVectorElementType(); EVT LdEltVT = LdVT.getVectorElementType(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 7aa293af963e6..95f53fe0bfdba 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -4762,6 +4762,11 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, case ISD::AssertZext: Tmp = cast(Op.getOperand(1))->getVT().getSizeInBits(); return VTBits-Tmp; + case ISD::FREEZE: + if (isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedElts, + /*PoisonOnly=*/false)) + return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); + break; case ISD::MERGE_VALUES: return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts, Depth + 1); @@ -11161,8 +11166,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, APFloat FrexpMant = frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven); SDValue Result0 = getConstantFP(FrexpMant, DL, VTList.VTs[0]); - SDValue Result1 = - getConstant(FrexpMant.isFinite() ? FrexpExp : 0, DL, VTList.VTs[1]); + SDValue Result1 = getSignedConstant(FrexpMant.isFinite() ? FrexpExp : 0, + DL, VTList.VTs[1]); return getNode(ISD::MERGE_VALUES, DL, VTList, {Result0, Result1}, Flags); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index b5201a311c591..c21890a0d856f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8103,10 +8103,6 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, return; } case Intrinsic::vector_partial_reduce_add: { - if (!TLI.shouldExpandPartialReductionIntrinsic(cast(&I))) { - visitTargetIntrinsic(I, Intrinsic); - return; - } SDValue Acc = getValue(I.getOperand(0)); SDValue Input = getValue(I.getOperand(1)); setValue(&I, diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 80500e48351e4..cc503d324e74b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -7492,7 +7492,6 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, // Pre-increment recursion depth for use in recursive calls. ++Depth; const SDNodeFlags Flags = Op->getFlags(); - const TargetOptions &Options = DAG.getTarget().Options; EVT VT = Op.getValueType(); unsigned Opcode = Op.getOpcode(); @@ -7572,7 +7571,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, return DAG.getBuildVector(VT, DL, Ops); } case ISD::FADD: { - if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) + if (!Flags.hasNoSignedZeros()) break; // After operation legalization, it might not be legal to create new FSUBs. @@ -7617,7 +7616,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, } case ISD::FSUB: { // We can't turn -(A-B) into B-A when we honor signed zeros. - if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) + if (!Flags.hasNoSignedZeros()) break; SDValue X = Op.getOperand(0), Y = Op.getOperand(1); @@ -7678,7 +7677,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, } case ISD::FMA: case ISD::FMAD: { - if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) + if (!Flags.hasNoSignedZeros()) break; SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); @@ -8797,7 +8796,6 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node, EVT VT = Node->getValueType(0); EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); bool IsMax = Opc == ISD::FMAXIMUMNUM; - const TargetOptions &Options = DAG.getTarget().Options; SDNodeFlags Flags = Node->getFlags(); unsigned NewOp = @@ -8839,7 +8837,9 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node, return DAG.getNode(IEEE2008Op, DL, VT, LHS, RHS, Flags); } - if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) + if (VT.isVector() && + (isOperationLegalOrCustomOrPromote(Opc, VT.getVectorElementType()) || + !isOperationLegalOrCustom(ISD::VSELECT, VT))) return DAG.UnrollVectorOp(Node); // If only one operand is NaN, override it with another operand. @@ -8856,8 +8856,8 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node, // TODO: We need quiet sNaN if strictfp. // Fixup signed zero behavior. - if (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros() || - DAG.isKnownNeverZeroFloat(LHS) || DAG.isKnownNeverZeroFloat(RHS)) { + if (Flags.hasNoSignedZeros() || DAG.isKnownNeverZeroFloat(LHS) || + DAG.isKnownNeverZeroFloat(RHS)) { return MinMax; } SDValue TestZero = @@ -9775,11 +9775,12 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const { return DAG.getNode(ISD::SUB, dl, VT, Cmp, Xor); } - // Similar to the branchless expansion, use the (sign-extended) usubo overflow - // flag if the (scalar) type is illegal as this is more likely to legalize - // cleanly: - // abdu(lhs, rhs) -> sub(xor(sub(lhs, rhs), uof(lhs, rhs)), uof(lhs, rhs)) - if (!IsSigned && VT.isScalarInteger() && !isTypeLegal(VT)) { + // Similar to the branchless expansion, if we don't prefer selects, use the + // (sign-extended) usubo overflow flag if the (scalar) type is illegal as this + // is more likely to legalize cleanly: abdu(lhs, rhs) -> sub(xor(sub(lhs, + // rhs), uof(lhs, rhs)), uof(lhs, rhs)) + if (!IsSigned && VT.isScalarInteger() && !isTypeLegal(VT) && + !preferSelectsOverBooleanArithmetic(VT)) { SDValue USubO = DAG.getNode(ISD::USUBO, dl, DAG.getVTList(VT, MVT::i1), {LHS, RHS}); SDValue Cmp = DAG.getNode(ISD::SIGN_EXTEND, dl, VT, USubO.getValue(1)); @@ -10974,7 +10975,8 @@ SDValue TargetLowering::expandCMP(SDNode *Node, SelectionDAG &DAG) const { // because one of the conditions can be merged with one of the selects. // And finally, if we don't know the contents of high bits of a boolean value // we can't perform any arithmetic either. - if (shouldExpandCmpUsingSelects(VT) || BoolVT.getScalarSizeInBits() == 1 || + if (preferSelectsOverBooleanArithmetic(VT) || + BoolVT.getScalarSizeInBits() == 1 || getBooleanContents(BoolVT) == UndefinedBooleanContent) { SDValue SelectZeroOrOne = DAG.getSelect(dl, ResVT, IsGT, DAG.getConstant(1, dl, ResVT), diff --git a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp index 096a33c17cb4b..64e5cd5cd19bb 100644 --- a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp +++ b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp @@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis { : Slot(Idx), Size(MFI.getObjectSize(Idx)), Align(MFI.getObjectAlign(Idx).value()), Offset(Offset), SlotTy(Invalid), Scalable(false) { - Scalable = MFI.getStackID(Idx) == TargetStackID::ScalableVector; + Scalable = MFI.hasScalableStackID(Idx); if (MFI.isSpillSlotObjectIndex(Idx)) SlotTy = SlotType::Spill; else if (MFI.isFixedObjectIndex(Idx)) diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp index 9b1420a94142d..8e48d19537165 100644 --- a/llvm/lib/CodeGen/TailDuplicator.cpp +++ b/llvm/lib/CodeGen/TailDuplicator.cpp @@ -375,13 +375,7 @@ void TailDuplicator::processPHI( if (!Remove) return; - // MI might have multiple entries for PredBB. Need to remove them all. - for (unsigned N = MI->getNumOperands(); N > 2; N -= 2) { - if (MI->getOperand(N - 1).getMBB() == PredBB) { - MI->removeOperand(N - 1); - MI->removeOperand(N - 2); - } - } + MI->removePHIIncomingValueFor(*PredBB); if (MI->getNumOperands() == 1 && !TailBB->hasAddressTaken()) MI->eraseFromParent(); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 5be89b49fb6ba..2f3b7a2c8fcdf 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1590,7 +1590,7 @@ MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const { return MachineTraceStrategy::TS_MinInstrCount; } -bool TargetInstrInfo::isReallyTriviallyReMaterializable( +bool TargetInstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { const MachineFunction &MF = *MI.getMF(); const MachineRegisterInfo &MRI = MF.getRegInfo(); diff --git a/llvm/lib/CodeGen/UnreachableBlockElim.cpp b/llvm/lib/CodeGen/UnreachableBlockElim.cpp index 512e83db40a5a..cf8c1a7bd08d0 100644 --- a/llvm/lib/CodeGen/UnreachableBlockElim.cpp +++ b/llvm/lib/CodeGen/UnreachableBlockElim.cpp @@ -22,6 +22,7 @@ #include "llvm/CodeGen/UnreachableBlockElim.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -155,18 +156,7 @@ bool UnreachableMachineBlockElim::run(MachineFunction &F) { if (MDT && MDT->getNode(&BB)) MDT->eraseNode(&BB); while (!BB.succ_empty()) { - MachineBasicBlock* succ = *BB.succ_begin(); - - for (MachineInstr &Phi : succ->phis()) { - for (unsigned i = Phi.getNumOperands() - 1; i >= 2; i -= 2) { - if (Phi.getOperand(i).isMBB() && - Phi.getOperand(i).getMBB() == &BB) { - Phi.removeOperand(i); - Phi.removeOperand(i - 1); - } - } - } - + (*BB.succ_begin())->removePHIsIncomingValuesForPredecessor(BB); BB.removeSuccessor(BB.succ_begin()); } } diff --git a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp index 8ec3f1729b974..5ab80e339a1ad 100644 --- a/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFVerifier.cpp @@ -851,6 +851,86 @@ unsigned DWARFVerifier::verifyDebugInfoAttribute(const DWARFDie &Die, } break; } + case DW_AT_LLVM_stmt_sequence: { + // Make sure the offset in the DW_AT_LLVM_stmt_sequence attribute is valid + // and points to a valid sequence offset in the line table. + auto SectionOffset = AttrValue.Value.getAsSectionOffset(); + if (!SectionOffset) { + ReportError("Invalid DW_AT_LLVM_stmt_sequence encoding", + "DIE has invalid DW_AT_LLVM_stmt_sequence encoding"); + break; + } + if (*SectionOffset >= U->getLineSection().Data.size()) { + ReportError( + "DW_AT_LLVM_stmt_sequence offset out of bounds", + "DW_AT_LLVM_stmt_sequence offset is beyond .debug_line bounds: " + + llvm::formatv("{0:x8}", *SectionOffset)); + break; + } + + // Get the line table for this unit to validate bounds + const auto *LineTable = DCtx.getLineTableForUnit(U); + if (!LineTable) { + ReportError("DW_AT_LLVM_stmt_sequence without line table", + "DIE has DW_AT_LLVM_stmt_sequence but compile unit has no " + "line table"); + break; + } + + // Get the DW_AT_stmt_list offset from the compile unit DIE + DWARFDie CUDie = U->getUnitDIE(); + auto StmtListOffset = toSectionOffset(CUDie.find(DW_AT_stmt_list)); + if (!StmtListOffset) { + ReportError("DW_AT_LLVM_stmt_sequence without DW_AT_stmt_list", + "DIE has DW_AT_LLVM_stmt_sequence but compile unit has no " + "DW_AT_stmt_list"); + break; + } + + const int8_t DwarfOffset = + LineTable->Prologue.getFormParams().getDwarfOffsetByteSize(); + // Calculate the bounds of this specific line table + uint64_t LineTableStart = *StmtListOffset; + uint64_t PrologueLength = LineTable->Prologue.PrologueLength; + uint64_t TotalLength = LineTable->Prologue.TotalLength; + uint64_t LineTableEnd = LineTableStart + TotalLength + DwarfOffset; + + // See DWARF definition for this, the following three do not + // count toward prologue length. Calculate SequencesStart correctly + // according to DWARF specification: + uint64_t InitialLengthSize = DwarfOffset; + // Version field is always 2 bytes + uint64_t VersionSize = 2; + uint64_t PrologueLengthSize = DwarfOffset; + uint64_t SequencesStart = LineTableStart + InitialLengthSize + VersionSize + + PrologueLengthSize + PrologueLength; + + // Check if the offset is within the bounds of this specific line table + if (*SectionOffset < SequencesStart || *SectionOffset >= LineTableEnd) { + ReportError("DW_AT_LLVM_stmt_sequence offset out of line table bounds", + "DW_AT_LLVM_stmt_sequence offset " + + llvm::formatv("{0:x8}", *SectionOffset) + + " is not within the line table bounds [" + + llvm::formatv("{0:x8}", SequencesStart) + ", " + + llvm::formatv("{0:x8}", LineTableEnd) + ")"); + break; + } + + // Check if the offset matches any of the sequence offset. + auto It = + std::find_if(LineTable->Sequences.begin(), LineTable->Sequences.end(), + [SectionOffset](const auto &Sequence) { + return Sequence.StmtSeqOffset == *SectionOffset; + }); + + if (It == LineTable->Sequences.end()) + ReportError( + "Invalid DW_AT_LLVM_stmt_sequence offset", + "DW_AT_LLVM_stmt_sequence offset " + + llvm::formatv("{0:x8}", *SectionOffset) + + " does not point to a valid sequence offset in the line table"); + break; + } default: break; } diff --git a/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp b/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp index c1017d8a3c22f..d973a47f68732 100644 --- a/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp +++ b/llvm/lib/DebugInfo/LogicalView/Core/LVReader.cpp @@ -148,7 +148,7 @@ std::error_code LVSplitContext::open(std::string ContextName, return std::error_code(); } -LVReader *CurrentReader = nullptr; +static LVReader *CurrentReader = nullptr; LVReader &LVReader::getInstance() { if (CurrentReader) return *CurrentReader; diff --git a/llvm/lib/DebugInfo/LogicalView/Core/LVScope.cpp b/llvm/lib/DebugInfo/LogicalView/Core/LVScope.cpp index 64f1bfc015380..e03932622b259 100644 --- a/llvm/lib/DebugInfo/LogicalView/Core/LVScope.cpp +++ b/llvm/lib/DebugInfo/LogicalView/Core/LVScope.cpp @@ -107,10 +107,16 @@ LVScopeDispatch LVScope::Dispatch = { {LVScopeKind::IsTryBlock, &LVScope::getIsTryBlock}, {LVScopeKind::IsUnion, &LVScope::getIsUnion}}; -void LVScope::addToChildren(LVElement *Element) { - if (!Children) - Children = std::make_unique(); - Children->push_back(Element); +const LVTypes LVScope::EmptyTypes{}; +const LVSymbols LVScope::EmptySymbols{}; +const LVScopes LVScope::EmptyScopes{}; + +LVElements LVScope::getSortedChildren(LVSortFunction SortFunction) const { + const auto UnsortedChildren = getChildren(); + LVElements Elements{UnsortedChildren.begin(), UnsortedChildren.end()}; + if (SortFunction) + llvm::stable_sort(Elements, SortFunction); + return Elements; } void LVScope::addElement(LVElement *Element) { @@ -175,7 +181,6 @@ void LVScope::addElement(LVScope *Scope) { // Add it to parent. Scopes->push_back(Scope); - addToChildren(Scope); Scope->setParent(this); // Notify the reader about the new element being added. @@ -202,7 +207,6 @@ void LVScope::addElement(LVSymbol *Symbol) { // Add it to parent. Symbols->push_back(Symbol); - addToChildren(Symbol); Symbol->setParent(this); // Notify the reader about the new element being added. @@ -229,7 +233,6 @@ void LVScope::addElement(LVType *Type) { // Add it to parent. Types->push_back(Type); - addToChildren(Type); Type->setParent(this); // Notify the reader about the new element being added. @@ -277,15 +280,12 @@ bool LVScope::removeElement(LVElement *Element) { if (Element->getIsLine()) return RemoveElement(Lines); - if (RemoveElement(Children)) { - if (Element->getIsSymbol()) - return RemoveElement(Symbols); - if (Element->getIsType()) - return RemoveElement(Types); - if (Element->getIsScope()) - return RemoveElement(Scopes); - llvm_unreachable("Invalid element."); - } + if (Element->getIsSymbol()) + return RemoveElement(Symbols); + if (Element->getIsType()) + return RemoveElement(Types); + if (Element->getIsScope()) + return RemoveElement(Scopes); return false; } @@ -356,9 +356,8 @@ void LVScope::updateLevel(LVScope *Parent, bool Moved) { setLevel(Parent->getLevel() + 1); // Update the children. - if (Children) - for (LVElement *Element : *Children) - Element->updateLevel(this, Moved); + for (LVElement *Element : getChildren()) + Element->updateLevel(this, Moved); // Update any lines. if (Lines) @@ -374,13 +373,12 @@ void LVScope::resolve() { LVElement::resolve(); // Resolve the children. - if (Children) - for (LVElement *Element : *Children) { - if (getIsGlobalReference()) - // If the scope is a global reference, mark all its children as well. - Element->setIsGlobalReference(); - Element->resolve(); - } + for (LVElement *Element : getChildren()) { + if (getIsGlobalReference()) + // If the scope is a global reference, mark all its children as well. + Element->setIsGlobalReference(); + Element->resolve(); + } } void LVScope::resolveName() { @@ -633,14 +631,13 @@ Error LVScope::doPrint(bool Split, bool Match, bool Print, raw_ostream &OS, options().getPrintFormatting() && getLevel() < options().getOutputLevel()) { // Print the children. - if (Children) - for (const LVElement *Element : *Children) { - if (Match && !Element->getHasPattern()) - continue; - if (Error Err = - Element->doPrint(Split, Match, Print, *StreamSplit, Full)) - return Err; - } + for (const LVElement *Element : getSortedChildren()) { + if (Match && !Element->getHasPattern()) + continue; + if (Error Err = + Element->doPrint(Split, Match, Print, *StreamSplit, Full)) + return Err; + } // Print the line records. if (Lines) @@ -692,7 +689,6 @@ void LVScope::sort() { Traverse(Parent->Symbols, SortFunction); Traverse(Parent->Scopes, SortFunction); Traverse(Parent->Ranges, compareRange); - Traverse(Parent->Children, SortFunction); if (Parent->Scopes) for (LVScope *Scope : *Parent->Scopes) @@ -978,9 +974,8 @@ bool LVScope::equals(const LVScopes *References, const LVScopes *Targets) { void LVScope::report(LVComparePass Pass) { getComparator().printItem(this, Pass); getComparator().push(this); - if (Children) - for (LVElement *Element : *Children) - Element->report(Pass); + for (LVElement *Element : getSortedChildren()) + Element->report(Pass); if (Lines) for (LVLine *Line : *Lines) @@ -1656,9 +1651,8 @@ void LVScopeCompileUnit::printMatchedElements(raw_ostream &OS, // Print the view for the matched scopes. for (const LVScope *Scope : MatchedScopes) { Scope->print(OS); - if (const LVElements *Elements = Scope->getChildren()) - for (LVElement *Element : *Elements) - Element->print(OS); + for (LVElement *Element : Scope->getSortedChildren()) + Element->print(OS); } } diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp index f23fb346c55f9..5f956b1e6a517 100644 --- a/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELF_loongarch.cpp @@ -365,6 +365,10 @@ class ELFLinkGraphBuilder_loongarch : public ELFLinkGraphBuilder { uint32_t Type = Rel.getType(false); int64_t Addend = Rel.r_addend; + // ignore + if (Type == ELF::R_LARCH_MARK_LA) + return Error::success(); + if (Type == ELF::R_LARCH_RELAX) { if (BlockToFix.edges_empty()) return make_error( diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp index 09ac0f19a7b07..f79478038c5cb 100644 --- a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp @@ -599,8 +599,7 @@ Expected> createLinkGraphFromMachOObject_arm64( } static Error applyPACSigningToModInitPointers(LinkGraph &G) { - assert(G.getTargetTriple().getSubArch() == Triple::AArch64SubArch_arm64e && - "PAC signing only valid for arm64e"); + assert(G.getTargetTriple().isArm64e() && "PAC signing only valid for arm64e"); if (auto *ModInitSec = G.findSectionByName("__DATA,__mod_init_func")) { for (auto *B : ModInitSec->blocks()) { diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index d6268037dea86..dd1b1d3b2e943 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -781,6 +781,9 @@ void RuntimeDyldELF::resolveLoongArch64Relocation(const SectionEntry &Section, default: report_fatal_error("Relocation type not implemented yet!"); break; + case ELF::R_LARCH_MARK_LA: + // ignore + break; case ELF::R_LARCH_32: support::ulittle32_t::ref{TargetPtr} = static_cast(Value + Addend); diff --git a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp index 92c62b83fadb0..2b33e560d74ac 100644 --- a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp +++ b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp @@ -113,6 +113,13 @@ static raw_ostream &operator<<(raw_ostream &OS, return OS; } +static raw_ostream &operator<<(raw_ostream &OS, + const llvm::dxbc::StaticSamplerFlags &Flags) { + printFlags(OS, Flags, dxbc::getStaticSamplerFlags()); + + return OS; +} + raw_ostream &operator<<(raw_ostream &OS, const dxbc::RootFlags &Flags) { OS << "RootFlags("; printFlags(OS, Flags, dxbc::getRootFlags()); @@ -172,7 +179,7 @@ raw_ostream &operator<<(raw_ostream &OS, const StaticSampler &Sampler) { << ", borderColor = " << Sampler.BorderColor << ", minLOD = " << Sampler.MinLOD << ", maxLOD = " << Sampler.MaxLOD << ", space = " << Sampler.Space << ", visibility = " << Sampler.Visibility - << ")"; + << ", flags = " << Sampler.Flags << ")"; return OS; } diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp index f29f2c7602fc6..7a0cf408968de 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp @@ -27,6 +27,11 @@ namespace rootsig { char GenericRSMetadataError::ID; char InvalidRSMetadataFormat::ID; char InvalidRSMetadataValue::ID; +char TableSamplerMixinError::ID; +char ShaderRegisterOverflowError::ID; +char OffsetOverflowError::ID; +char OffsetAppendAfterOverflow::ID; + template char RootSignatureValidationError::ID; static std::optional extractMdIntValue(MDNode *Node, @@ -55,8 +60,9 @@ static std::optional extractMdStringValue(MDNode *Node, template && std::is_same_v, uint32_t>>> -Expected extractEnumValue(MDNode *Node, unsigned int OpId, StringRef ErrText, - llvm::function_ref VerifyFn) { +static Expected +extractEnumValue(MDNode *Node, unsigned int OpId, StringRef ErrText, + llvm::function_ref VerifyFn) { if (std::optional Val = extractMdIntValue(Node, OpId)) { if (!VerifyFn(*Val)) return make_error>(ErrText, *Val); @@ -212,6 +218,7 @@ MDNode *MetadataBuilder::BuildStaticSampler(const StaticSampler &Sampler) { ConstantAsMetadata::get(Builder.getInt32(Sampler.Space)), ConstantAsMetadata::get( Builder.getInt32(to_underlying(Sampler.Visibility))), + ConstantAsMetadata::get(Builder.getInt32(to_underlying(Sampler.Flags))), }; return MDNode::get(Ctx, Operands); } @@ -411,7 +418,7 @@ Error MetadataParser::parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD, Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD, MDNode *StaticSamplerNode) { - if (StaticSamplerNode->getNumOperands() != 14) + if (StaticSamplerNode->getNumOperands() != 15) return make_error("Static Sampler"); mcdxbc::StaticSampler Sampler; @@ -495,6 +502,17 @@ Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD, return Error(std::move(E)); Sampler.ShaderVisibility = *Visibility; + if (RSD.Version < 3) { + RSD.StaticSamplers.push_back(Sampler); + return Error::success(); + } + assert(RSD.Version >= 3); + + if (std::optional Val = extractMdIntValue(StaticSamplerNode, 14)) + Sampler.Flags = *Val; + else + return make_error("Static Sampler Flags"); + RSD.StaticSamplers.push_back(Sampler); return Error::success(); } @@ -538,6 +556,60 @@ Error MetadataParser::parseRootSignatureElement(mcdxbc::RootSignatureDesc &RSD, llvm_unreachable("Unhandled RootSignatureElementKind enum."); } +static Error +validateDescriptorTableSamplerMixin(const mcdxbc::DescriptorTable &Table, + uint32_t Location) { + dxil::ResourceClass CurrRC = dxil::ResourceClass::Sampler; + for (const mcdxbc::DescriptorRange &Range : Table.Ranges) { + if (Range.RangeType == dxil::ResourceClass::Sampler && + CurrRC != dxil::ResourceClass::Sampler) + return make_error(CurrRC, Location); + CurrRC = Range.RangeType; + } + return Error::success(); +} + +static Error +validateDescriptorTableRegisterOverflow(const mcdxbc::DescriptorTable &Table, + uint32_t Location) { + uint64_t Offset = 0; + bool IsPrevUnbound = false; + for (const mcdxbc::DescriptorRange &Range : Table.Ranges) { + // Validation of NumDescriptors should have happened by this point. + if (Range.NumDescriptors == 0) + continue; + + const uint64_t RangeBound = llvm::hlsl::rootsig::computeRangeBound( + Range.BaseShaderRegister, Range.NumDescriptors); + + if (!verifyNoOverflowedOffset(RangeBound)) + return make_error( + Range.RangeType, Range.BaseShaderRegister, Range.RegisterSpace); + + bool IsAppending = + Range.OffsetInDescriptorsFromTableStart == DescriptorTableOffsetAppend; + if (!IsAppending) + Offset = Range.OffsetInDescriptorsFromTableStart; + + if (IsPrevUnbound && IsAppending) + return make_error( + Range.RangeType, Range.BaseShaderRegister, Range.RegisterSpace); + + const uint64_t OffsetBound = + llvm::hlsl::rootsig::computeRangeBound(Offset, Range.NumDescriptors); + + if (!verifyNoOverflowedOffset(OffsetBound)) + return make_error( + Range.RangeType, Range.BaseShaderRegister, Range.RegisterSpace); + + Offset = OffsetBound + 1; + IsPrevUnbound = + Range.NumDescriptors == llvm::hlsl::rootsig::NumDescriptorsUnbounded; + } + + return Error::success(); +} + Error MetadataParser::validateRootSignature( const mcdxbc::RootSignatureDesc &RSD) { Error DeferredErrs = Error::success(); @@ -611,6 +683,14 @@ Error MetadataParser::validateRootSignature( joinErrors(std::move(DeferredErrs), make_error>( "DescriptorFlag", Range.Flags)); + + if (Error Err = + validateDescriptorTableSamplerMixin(Table, Info.Location)) + DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err)); + + if (Error Err = + validateDescriptorTableRegisterOverflow(Table, Info.Location)) + DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err)); } break; } @@ -651,6 +731,12 @@ Error MetadataParser::validateRootSignature( joinErrors(std::move(DeferredErrs), make_error>( "RegisterSpace", Sampler.RegisterSpace)); + + if (!hlsl::rootsig::verifyStaticSamplerFlags(RSD.Version, Sampler.Flags)) + DeferredErrs = + joinErrors(std::move(DeferredErrs), + make_error>( + "Static Sampler Flag", Sampler.Flags)); } return DeferredErrs; diff --git a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp index c2c3bf6d1b8dc..8a2b03d9ede8b 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp @@ -20,7 +20,9 @@ namespace rootsig { bool verifyRootFlag(uint32_t Flags) { return (Flags & ~0xfff) == 0; } -bool verifyVersion(uint32_t Version) { return (Version == 1 || Version == 2); } +bool verifyVersion(uint32_t Version) { + return (Version == 1 || Version == 2 || Version == 3); +} bool verifyRegisterValue(uint32_t RegisterValue) { return RegisterValue != ~0U; @@ -38,7 +40,7 @@ bool verifyRootDescriptorFlag(uint32_t Version, uint32_t FlagsVal) { if (Version == 1) return Flags == FlagT::DataVolatile; - assert(Version == 2 && "Provided invalid root signature version"); + assert((Version <= 3) && "Provided invalid root signature version"); // The data-specific flags are mutually exclusive. FlagT DataFlags = FlagT::DataVolatile | FlagT::DataStatic | @@ -111,6 +113,25 @@ bool verifyDescriptorRangeFlag(uint32_t Version, dxil::ResourceClass Type, return (Flags & ~Mask) == FlagT::None; } +bool verifyStaticSamplerFlags(uint32_t Version, uint32_t FlagsNumber) { + uint32_t LargestValue = llvm::to_underlying( + dxbc::StaticSamplerFlags::LLVM_BITMASK_LARGEST_ENUMERATOR); + if (FlagsNumber >= NextPowerOf2(LargestValue)) + return false; + + dxbc::StaticSamplerFlags Flags = dxbc::StaticSamplerFlags(FlagsNumber); + if (Version <= 2) + return Flags == dxbc::StaticSamplerFlags::None; + + assert(Version == 3 && "Provided invalid root signature version"); + + dxbc::StaticSamplerFlags Mask = + dxbc::StaticSamplerFlags::NonNormalizedCoordinates | + dxbc::StaticSamplerFlags::UintBorderColor | + dxbc::StaticSamplerFlags::None; + return (Flags | Mask) == Mask; +} + bool verifyNumDescriptors(uint32_t NumDescriptors) { return NumDescriptors > 0; } @@ -136,7 +157,6 @@ uint64_t computeRangeBound(uint64_t Offset, uint32_t Size) { return Offset + uint64_t(Size) - 1; } - } // namespace rootsig } // namespace hlsl } // namespace llvm diff --git a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp index cfddc06fbc00b..c4aa2c7638450 100644 --- a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp +++ b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp @@ -8,19 +8,32 @@ #include "llvm/Frontend/Offloading/OffloadWrapper.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/Magic.h" #include "llvm/Frontend/Offloading/Utility.h" #include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" #include "llvm/Object/OffloadBinary.h" #include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LineIterator.h" +#include "llvm/Support/MemoryBufferRef.h" #include "llvm/TargetParser/Triple.h" #include "llvm/Transforms/Utils/ModuleUtils.h" +#include +#include +#include + using namespace llvm; +using namespace llvm::object; using namespace llvm::offloading; namespace { @@ -620,6 +633,384 @@ void createRegisterFatbinFunction(Module &M, GlobalVariable *FatbinDesc, // Add this function to constructors. appendToGlobalCtors(M, CtorFunc, /*Priority=*/101); } + +/// SYCLWrapper helper class that creates all LLVM IRs wrapping given images. +struct SYCLWrapper { + Module &M; + LLVMContext &C; + SYCLJITOptions Options; + + StructType *EntryTy = nullptr; + StructType *SyclDeviceImageTy = nullptr; + StructType *SyclBinDescTy = nullptr; + + SYCLWrapper(Module &M, const SYCLJITOptions &Options) + : M(M), C(M.getContext()), Options(Options) { + EntryTy = offloading::getEntryTy(M); + SyclDeviceImageTy = getSyclDeviceImageTy(); + SyclBinDescTy = getSyclBinDescTy(); + } + + IntegerType *getSizeTTy() { + switch (M.getDataLayout().getPointerSize()) { + case 4: + return Type::getInt32Ty(C); + case 8: + return Type::getInt64Ty(C); + } + llvm_unreachable("unsupported pointer type size"); + } + + SmallVector getSizetConstPair(size_t First, size_t Second) { + IntegerType *SizeTTy = getSizeTTy(); + return SmallVector{ConstantInt::get(SizeTTy, First), + ConstantInt::get(SizeTTy, Second)}; + } + + /// Note: Properties aren't supported and the support is going + /// to be added later. + /// Creates a structure corresponding to: + /// SYCL specific image descriptor type. + /// \code + /// struct __sycl.tgt_device_image { + /// // version of this structure - for backward compatibility; + /// // all modifications which change order/type/offsets of existing fields + /// // should increment the version. + /// uint16_t Version; + /// // the kind of offload model the image employs. + /// uint8_t OffloadKind; + /// // format of the image data - SPIRV, LLVMIR bitcode, etc + /// uint8_t Format; + /// // null-terminated string representation of the device's target + /// // architecture + /// const char *Arch; + /// // a null-terminated string; target- and compiler-specific options + /// // which are suggested to use to "compile" program at runtime + /// const char *CompileOptions; + /// // a null-terminated string; target- and compiler-specific options + /// // which are suggested to use to "link" program at runtime + /// const char *LinkOptions; + /// // Pointer to the device binary image start + /// void *ImageStart; + /// // Pointer to the device binary image end + /// void *ImageEnd; + /// // the entry table + /// __tgt_offload_entry *EntriesBegin; + /// __tgt_offload_entry *EntriesEnd; + /// const char *PropertiesBegin; + /// const char *PropertiesEnd; + /// }; + /// \endcode + StructType *getSyclDeviceImageTy() { + return StructType::create( + { + Type::getInt16Ty(C), // Version + Type::getInt8Ty(C), // OffloadKind + Type::getInt8Ty(C), // Format + PointerType::getUnqual(C), // Arch + PointerType::getUnqual(C), // CompileOptions + PointerType::getUnqual(C), // LinkOptions + PointerType::getUnqual(C), // ImageStart + PointerType::getUnqual(C), // ImageEnd + PointerType::getUnqual(C), // EntriesBegin + PointerType::getUnqual(C), // EntriesEnd + PointerType::getUnqual(C), // PropertiesBegin + PointerType::getUnqual(C) // PropertiesEnd + }, + "__sycl.tgt_device_image"); + } + + /// Creates a structure for SYCL specific binary descriptor type. Corresponds + /// to: + /// + /// \code + /// struct __sycl.tgt_bin_desc { + /// // version of this structure - for backward compatibility; + /// // all modifications which change order/type/offsets of existing fields + /// // should increment the version. + /// uint16_t Version; + /// uint16_t NumDeviceImages; + /// __sycl.tgt_device_image *DeviceImages; + /// // the offload entry table + /// __tgt_offload_entry *HostEntriesBegin; + /// __tgt_offload_entry *HostEntriesEnd; + /// }; + /// \endcode + StructType *getSyclBinDescTy() { + return StructType::create( + {Type::getInt16Ty(C), Type::getInt16Ty(C), PointerType::getUnqual(C), + PointerType::getUnqual(C), PointerType::getUnqual(C)}, + "__sycl.tgt_bin_desc"); + } + + /// Adds a global readonly variable that is initialized by given + /// \p Initializer to the module. + GlobalVariable *addGlobalArrayVariable(const Twine &Name, + ArrayRef Initializer, + const Twine &Section = "") { + auto *Arr = ConstantDataArray::get(M.getContext(), Initializer); + auto *Var = new GlobalVariable(M, Arr->getType(), /*isConstant*/ true, + GlobalVariable::InternalLinkage, Arr, Name); + Var->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); + + SmallVector NameBuf; + auto SectionName = Section.toStringRef(NameBuf); + if (!SectionName.empty()) + Var->setSection(SectionName); + return Var; + } + + /// Adds given \p Buf as a global variable into the module. + /// \returns Pair of pointers that point at the beginning and the end of the + /// variable. + std::pair + addArrayToModule(ArrayRef Buf, const Twine &Name, + const Twine &Section = "") { + auto *Var = addGlobalArrayVariable(Name, Buf, Section); + auto *ImageB = ConstantExpr::getGetElementPtr(Var->getValueType(), Var, + getSizetConstPair(0, 0)); + auto *ImageE = ConstantExpr::getGetElementPtr( + Var->getValueType(), Var, getSizetConstPair(0, Buf.size())); + return std::make_pair(ImageB, ImageE); + } + + /// Adds given \p Data as constant byte array in the module. + /// \returns Constant pointer to the added data. The pointer type does not + /// carry size information. + Constant *addRawDataToModule(ArrayRef Data, const Twine &Name) { + auto *Var = addGlobalArrayVariable(Name, Data); + auto *DataPtr = ConstantExpr::getGetElementPtr(Var->getValueType(), Var, + getSizetConstPair(0, 0)); + return DataPtr; + } + + /// Creates a global variable of const char* type and creates an + /// initializer that initializes it with \p Str. + /// + /// \returns Link-time constant pointer (constant expr) to that + /// variable. + Constant *addStringToModule(StringRef Str, const Twine &Name) { + auto *Arr = ConstantDataArray::getString(C, Str); + auto *Var = new GlobalVariable(M, Arr->getType(), /*isConstant*/ true, + GlobalVariable::InternalLinkage, Arr, Name); + Var->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); + auto *Zero = ConstantInt::get(getSizeTTy(), 0); + Constant *ZeroZero[] = {Zero, Zero}; + return ConstantExpr::getGetElementPtr(Var->getValueType(), Var, ZeroZero); + } + + /// Each image contains its own set of symbols, which may contain different + /// symbols than other images. This function constructs an array of + /// symbol entries for a particular image. + /// + /// \returns Pointers to the beginning and end of the array. + std::pair + initOffloadEntriesPerImage(StringRef Entries, const Twine &OffloadKindTag) { + SmallVector EntriesInits; + std::unique_ptr MB = MemoryBuffer::getMemBuffer( + Entries, /*BufferName*/ "", /*RequiresNullTerminator*/ false); + for (line_iterator LI(*MB); !LI.is_at_eof(); ++LI) { + GlobalVariable *GV = + emitOffloadingEntry(M, /*Kind*/ OffloadKind::OFK_SYCL, + Constant::getNullValue(PointerType::getUnqual(C)), + /*Name*/ *LI, /*Size*/ 0, + /*Flags*/ 0, /*Data*/ 0); + EntriesInits.push_back(GV->getInitializer()); + } + + auto *Arr = ConstantArray::get(ArrayType::get(EntryTy, EntriesInits.size()), + EntriesInits); + auto *EntriesGV = new GlobalVariable(M, Arr->getType(), /*isConstant*/ true, + GlobalVariable::InternalLinkage, Arr, + OffloadKindTag + "entries_arr"); + + auto *EntriesB = ConstantExpr::getGetElementPtr( + EntriesGV->getValueType(), EntriesGV, getSizetConstPair(0, 0)); + auto *EntriesE = ConstantExpr::getGetElementPtr( + EntriesGV->getValueType(), EntriesGV, + getSizetConstPair(0, EntriesInits.size())); + return std::make_pair(EntriesB, EntriesE); + } + + Constant *wrapImage(const OffloadBinary &OB, const Twine &ImageID, + StringRef OffloadKindTag) { + // Note: Intel DPC++ compiler had 2 versions of this structure + // and clang++ has a third different structure. To avoid ABI incompatibility + // between generated device images the Version here starts from 3. + constexpr uint16_t DeviceImageStructVersion = 3; + Constant *Version = + ConstantInt::get(Type::getInt16Ty(C), DeviceImageStructVersion); + Constant *OffloadKindConstant = ConstantInt::get( + Type::getInt8Ty(C), static_cast(OB.getOffloadKind())); + Constant *ImageKindConstant = ConstantInt::get( + Type::getInt8Ty(C), static_cast(OB.getImageKind())); + StringRef Triple = OB.getString("triple"); + Constant *TripleConstant = + addStringToModule(Triple, Twine(OffloadKindTag) + "target." + ImageID); + Constant *CompileOptions = + addStringToModule(Options.CompileOptions, + Twine(OffloadKindTag) + "opts.compile." + ImageID); + Constant *LinkOptions = addStringToModule( + Options.LinkOptions, Twine(OffloadKindTag) + "opts.link." + ImageID); + + // Note: NULL for now. + std::pair PropertiesConstants = { + Constant::getNullValue(PointerType::getUnqual(C)), + Constant::getNullValue(PointerType::getUnqual(C))}; + + StringRef RawImage = OB.getImage(); + std::pair Binary = addArrayToModule( + ArrayRef(RawImage.begin(), RawImage.end()), + Twine(OffloadKindTag) + ImageID + ".data", ".llvm.offloading"); + + // For SYCL images offload entries are defined here per image. + std::pair ImageEntriesPtrs = + initOffloadEntriesPerImage(OB.getString("symbols"), OffloadKindTag); + Constant *WrappedBinary = ConstantStruct::get( + SyclDeviceImageTy, Version, OffloadKindConstant, ImageKindConstant, + TripleConstant, CompileOptions, LinkOptions, Binary.first, + Binary.second, ImageEntriesPtrs.first, ImageEntriesPtrs.second, + PropertiesConstants.first, PropertiesConstants.second); + + return WrappedBinary; + } + + GlobalVariable *combineWrappedImages(ArrayRef WrappedImages, + StringRef OffloadKindTag) { + auto *ImagesData = ConstantArray::get( + ArrayType::get(SyclDeviceImageTy, WrappedImages.size()), WrappedImages); + auto *ImagesGV = + new GlobalVariable(M, ImagesData->getType(), /*isConstant*/ true, + GlobalValue::InternalLinkage, ImagesData, + Twine(OffloadKindTag) + "device_images"); + ImagesGV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); + + auto *Zero = ConstantInt::get(getSizeTTy(), 0); + Constant *ZeroZero[] = {Zero, Zero}; + auto *ImagesB = ConstantExpr::getGetElementPtr(ImagesGV->getValueType(), + ImagesGV, ZeroZero); + + Constant *EntriesB = Constant::getNullValue(PointerType::getUnqual(C)); + Constant *EntriesE = Constant::getNullValue(PointerType::getUnqual(C)); + static constexpr uint16_t BinDescStructVersion = 1; + auto *DescInit = ConstantStruct::get( + SyclBinDescTy, + ConstantInt::get(Type::getInt16Ty(C), BinDescStructVersion), + ConstantInt::get(Type::getInt16Ty(C), WrappedImages.size()), ImagesB, + EntriesB, EntriesE); + + return new GlobalVariable(M, DescInit->getType(), /*isConstant*/ true, + GlobalValue::InternalLinkage, DescInit, + Twine(OffloadKindTag) + "descriptor"); + } + + /// Creates binary descriptor for the given device images. Binary descriptor + /// is an object that is passed to the offloading runtime at program startup + /// and it describes all device images available in the executable or shared + /// library. It is defined as follows: + /// + /// \code + /// __attribute__((visibility("hidden"))) + /// __tgt_offload_entry *__sycl_offload_entries_arr0[]; + /// ... + /// __attribute__((visibility("hidden"))) + /// __tgt_offload_entry *__sycl_offload_entries_arrN[]; + /// + /// __attribute__((visibility("hidden"))) + /// extern const char *CompileOptions = "..."; + /// ... + /// __attribute__((visibility("hidden"))) + /// extern const char *LinkOptions = "..."; + /// ... + /// + /// static const char Image0[] = { ... }; + /// ... + /// static const char ImageN[] = { ... }; + /// + /// static const __sycl.tgt_device_image Images[] = { + /// { + /// Version, // Version + /// OffloadKind, // OffloadKind + /// Format, // Format of the image. + // TripleString, // Arch + /// CompileOptions, // CompileOptions + /// LinkOptions, // LinkOptions + /// Image0, // ImageStart + /// Image0 + IMAGE0_SIZE, // ImageEnd + /// __sycl_offload_entries_arr0, // EntriesBegin + /// __sycl_offload_entries_arr0 + ENTRIES0_SIZE, // EntriesEnd + /// NULL, // PropertiesBegin + /// NULL, // PropertiesEnd + /// }, + /// ... + /// }; + /// + /// static const __sycl.tgt_bin_desc FatbinDesc = { + /// Version, //Version + /// sizeof(Images) / sizeof(Images[0]), //NumDeviceImages + /// Images, //DeviceImages + /// NULL, //HostEntriesBegin + /// NULL //HostEntriesEnd + /// }; + /// \endcode + /// + /// \returns Global variable that represents FatbinDesc. + GlobalVariable *createFatbinDesc(ArrayRef OffloadFiles) { + StringRef OffloadKindTag = ".sycl_offloading."; + SmallVector WrappedImages; + WrappedImages.reserve(OffloadFiles.size()); + for (size_t I = 0, E = OffloadFiles.size(); I != E; ++I) + WrappedImages.push_back( + wrapImage(*OffloadFiles[I].getBinary(), Twine(I), OffloadKindTag)); + + return combineWrappedImages(WrappedImages, OffloadKindTag); + } + + void createRegisterFatbinFunction(GlobalVariable *FatbinDesc) { + auto *FuncTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg*/ false); + auto *Func = Function::Create(FuncTy, GlobalValue::InternalLinkage, + Twine("sycl") + ".descriptor_reg", &M); + Func->setSection(".text.startup"); + + // Get RegFuncName function declaration. + auto *RegFuncTy = + FunctionType::get(Type::getVoidTy(C), PointerType::getUnqual(C), + /*isVarArg=*/false); + FunctionCallee RegFuncC = + M.getOrInsertFunction("__sycl_register_lib", RegFuncTy); + + // Construct function body + IRBuilder Builder(BasicBlock::Create(C, "entry", Func)); + Builder.CreateCall(RegFuncC, FatbinDesc); + Builder.CreateRetVoid(); + + // Add this function to constructors. + appendToGlobalCtors(M, Func, /*Priority*/ 1); + } + + void createUnregisterFunction(GlobalVariable *FatbinDesc) { + auto *FuncTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg*/ false); + auto *Func = Function::Create(FuncTy, GlobalValue::InternalLinkage, + "sycl.descriptor_unreg", &M); + Func->setSection(".text.startup"); + + // Get UnregFuncName function declaration. + auto *UnRegFuncTy = + FunctionType::get(Type::getVoidTy(C), PointerType::getUnqual(C), + /*isVarArg=*/false); + FunctionCallee UnRegFuncC = + M.getOrInsertFunction("__sycl_unregister_lib", UnRegFuncTy); + + // Construct function body + IRBuilder<> Builder(BasicBlock::Create(C, "entry", Func)); + Builder.CreateCall(UnRegFuncC, FatbinDesc); + Builder.CreateRetVoid(); + + // Add this function to global destructors. + appendToGlobalDtors(M, Func, /*Priority*/ 1); + } +}; // end of SYCLWrapper + } // namespace Error offloading::wrapOpenMPBinaries(Module &M, ArrayRef> Images, @@ -660,3 +1051,22 @@ Error offloading::wrapHIPBinary(Module &M, ArrayRef Image, EmitSurfacesAndTextures); return Error::success(); } + +Error llvm::offloading::wrapSYCLBinaries(llvm::Module &M, ArrayRef Buffer, + SYCLJITOptions Options) { + SYCLWrapper W(M, Options); + MemoryBufferRef MBR(StringRef(Buffer.begin(), Buffer.size()), + /*Identifier*/ ""); + SmallVector OffloadFiles; + if (Error E = extractOffloadBinaries(MBR, OffloadFiles)) + return E; + + GlobalVariable *Desc = W.createFatbinDesc(OffloadFiles); + if (!Desc) + return createStringError(inconvertibleErrorCode(), + "No binary descriptors created."); + + W.createRegisterFatbinFunction(Desc); + W.createUnregisterFunction(Desc); + return Error::success(); +} diff --git a/llvm/lib/Frontend/Offloading/Utility.cpp b/llvm/lib/Frontend/Offloading/Utility.cpp index 4b75307ca0b6c..5000488a52f37 100644 --- a/llvm/lib/Frontend/Offloading/Utility.cpp +++ b/llvm/lib/Frontend/Offloading/Utility.cpp @@ -82,11 +82,11 @@ offloading::getOffloadingEntryInitializer(Module &M, object::OffloadKind Kind, return {EntryInitializer, Str}; } -void offloading::emitOffloadingEntry(Module &M, object::OffloadKind Kind, - Constant *Addr, StringRef Name, - uint64_t Size, uint32_t Flags, - uint64_t Data, Constant *AuxAddr, - StringRef SectionName) { +GlobalVariable * +offloading::emitOffloadingEntry(Module &M, object::OffloadKind Kind, + Constant *Addr, StringRef Name, uint64_t Size, + uint32_t Flags, uint64_t Data, + Constant *AuxAddr, StringRef SectionName) { const llvm::Triple &Triple = M.getTargetTriple(); auto [EntryInitializer, NameGV] = getOffloadingEntryInitializer( @@ -106,6 +106,7 @@ void offloading::emitOffloadingEntry(Module &M, object::OffloadKind Kind, else Entry->setSection(SectionName); Entry->setAlignment(Align(object::OffloadBinary::getAlignment())); + return Entry; } std::pair diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 9b67465faab0b..5980ee35a5cd2 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -4979,7 +4979,7 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder, WorksharingLoopType LoopType, BasicBlock *InsertBlock, Value *Ident, Value *LoopBodyArg, Value *TripCount, - Function &LoopBodyFn) { + Function &LoopBodyFn, bool NoLoop) { Type *TripCountTy = TripCount->getType(); Module &M = OMPBuilder->M; IRBuilder<> &Builder = OMPBuilder->Builder; @@ -5007,8 +5007,10 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder, RealArgs.push_back(ConstantInt::get(TripCountTy, 0)); if (LoopType == WorksharingLoopType::DistributeForStaticLoop) { RealArgs.push_back(ConstantInt::get(TripCountTy, 0)); + RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), NoLoop)); + } else { + RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), 0)); } - RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), 0)); Builder.CreateCall(RTLFn, RealArgs); } @@ -5016,7 +5018,7 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder, static void workshareLoopTargetCallback( OpenMPIRBuilder *OMPIRBuilder, CanonicalLoopInfo *CLI, Value *Ident, Function &OutlinedFn, const SmallVector &ToBeDeleted, - WorksharingLoopType LoopType) { + WorksharingLoopType LoopType, bool NoLoop) { IRBuilder<> &Builder = OMPIRBuilder->Builder; BasicBlock *Preheader = CLI->getPreheader(); Value *TripCount = CLI->getTripCount(); @@ -5063,17 +5065,16 @@ static void workshareLoopTargetCallback( OutlinedFnCallInstruction->eraseFromParent(); createTargetLoopWorkshareCall(OMPIRBuilder, LoopType, Preheader, Ident, - LoopBodyArg, TripCount, OutlinedFn); + LoopBodyArg, TripCount, OutlinedFn, NoLoop); for (auto &ToBeDeletedItem : ToBeDeleted) ToBeDeletedItem->eraseFromParent(); CLI->invalidate(); } -OpenMPIRBuilder::InsertPointTy -OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI, - InsertPointTy AllocaIP, - WorksharingLoopType LoopType) { +OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoopTarget( + DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, + WorksharingLoopType LoopType, bool NoLoop) { uint32_t SrcLocStrSize; Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); @@ -5156,7 +5157,7 @@ OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI, OI.PostOutlineCB = [=, ToBeDeletedVec = std::move(ToBeDeleted)](Function &OutlinedFn) { workshareLoopTargetCallback(this, CLI, Ident, OutlinedFn, ToBeDeletedVec, - LoopType); + LoopType, NoLoop); }; addOutlineInfo(std::move(OI)); return CLI->getAfterIP(); @@ -5167,9 +5168,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyWorkshareLoop( bool NeedsBarrier, omp::ScheduleKind SchedKind, Value *ChunkSize, bool HasSimdModifier, bool HasMonotonicModifier, bool HasNonmonotonicModifier, bool HasOrderedClause, - WorksharingLoopType LoopType) { + WorksharingLoopType LoopType, bool NoLoop) { if (Config.isTargetDevice()) - return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType); + return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType, NoLoop); OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType( SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier, HasNonmonotonicModifier, HasOrderedClause); @@ -10312,17 +10313,19 @@ void OffloadEntriesInfoManager::getTargetRegionEntryFnName( TargetRegionEntryInfo OpenMPIRBuilder::getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, + vfs::FileSystem &VFS, StringRef ParentName) { sys::fs::UniqueID ID(0xdeadf17e, 0); auto FileIDInfo = CallBack(); uint64_t FileID = 0; - std::error_code EC = sys::fs::getUniqueID(std::get<0>(FileIDInfo), ID); - // If the inode ID could not be determined, create a hash value - // the current file name and use that as an ID. - if (EC) + if (ErrorOr Status = VFS.status(std::get<0>(FileIDInfo))) { + ID = Status->getUniqueID(); + FileID = Status->getUniqueID().getFile(); + } else { + // If the inode ID could not be determined, create a hash value + // the current file name and use that as an ID. FileID = hash_value(std::get<0>(FileIDInfo)); - else - FileID = ID.getFile(); + } return TargetRegionEntryInfo(ParentName, ID.getDevice(), FileID, std::get<1>(FileIDInfo)); diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 1a518305cffbe..245129f3f791f 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -127,7 +127,7 @@ static void orderValue(const Value *V, OrderMap &OM) { if (OM.lookup(V)) return; - if (const Constant *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { if (isa(C)) return; @@ -146,17 +146,17 @@ static void orderValue(const Value *V, OrderMap &OM) { static OrderMap orderModule(const Module *M) { OrderMap OM; - auto orderConstantValue = [&OM](const Value *V) { + auto OrderConstantValue = [&OM](const Value *V) { if (isa(V) || isa(V)) orderValue(V, OM); }; auto OrderConstantFromMetadata = [&](Metadata *MD) { if (const auto *VAM = dyn_cast(MD)) { - orderConstantValue(VAM->getValue()); + OrderConstantValue(VAM->getValue()); } else if (const auto *AL = dyn_cast(MD)) { for (const auto *VAM : AL->getArgs()) - orderConstantValue(VAM->getValue()); + OrderConstantValue(VAM->getValue()); } }; @@ -302,18 +302,18 @@ static UseListOrderMap predictUseListOrder(const Module *M) { } static const Module *getModuleFromVal(const Value *V) { - if (const Argument *MA = dyn_cast(V)) + if (const auto *MA = dyn_cast(V)) return MA->getParent() ? MA->getParent()->getParent() : nullptr; - if (const BasicBlock *BB = dyn_cast(V)) + if (const auto *BB = dyn_cast(V)) return BB->getParent() ? BB->getParent()->getParent() : nullptr; - if (const Instruction *I = dyn_cast(V)) { + if (const auto *I = dyn_cast(V)) { const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr; return M ? M->getParent() : nullptr; } - if (const GlobalValue *GV = dyn_cast(V)) + if (const auto *GV = dyn_cast(V)) return GV->getParent(); if (const auto *MAV = dyn_cast(V)) { @@ -337,7 +337,7 @@ static const Module *getModuleFromDPI(const DbgRecord *DR) { return DR->getMarker() ? getModuleFromDPI(DR->getMarker()) : nullptr; } -static void PrintCallingConv(unsigned cc, raw_ostream &Out) { +static void printCallingConv(unsigned cc, raw_ostream &Out) { switch (cc) { default: Out << "cc" << cc; break; case CallingConv::Fast: Out << "fastcc"; break; @@ -484,7 +484,7 @@ void llvm::printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name) { /// Turn the specified name into an 'LLVM name', which is either prefixed with % /// (if the string only contains simple characters) or is surrounded with ""'s /// (if it has special chars in it). Print it out. -static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { +static void printLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { switch (Prefix) { case NoPrefix: break; @@ -506,29 +506,25 @@ static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) { /// Turn the specified name into an 'LLVM name', which is either prefixed with % /// (if the string only contains simple characters) or is surrounded with ""'s /// (if it has special chars in it). Print it out. -static void PrintLLVMName(raw_ostream &OS, const Value *V) { - PrintLLVMName(OS, V->getName(), +static void printLLVMName(raw_ostream &OS, const Value *V) { + printLLVMName(OS, V->getName(), isa(V) ? GlobalPrefix : LocalPrefix); } -static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef Mask) { +static void printShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef Mask) { Out << ", <"; if (isa(Ty)) Out << "vscale x "; Out << Mask.size() << " x i32> "; - bool FirstElt = true; if (all_of(Mask, [](int Elt) { return Elt == 0; })) { Out << "zeroinitializer"; } else if (all_of(Mask, [](int Elt) { return Elt == PoisonMaskElem; })) { Out << "poison"; } else { Out << "<"; + ListSeparator LS; for (int Elt : Mask) { - if (FirstElt) - FirstElt = false; - else - Out << ", "; - Out << "i32 "; + Out << LS << "i32 "; if (Elt == PoisonMaskElem) Out << "poison"; else @@ -672,7 +668,7 @@ void TypePrinting::print(Type *Ty, raw_ostream &OS) { return printStructBody(STy, OS); if (!STy->getName().empty()) - return PrintLLVMName(OS, STy->getName(), LocalPrefix); + return printLLVMName(OS, STy->getName(), LocalPrefix); incorporateTypes(); const auto I = Type2Number.find(STy); @@ -1003,26 +999,26 @@ void ModuleSlotTracker::setProcessHook( } static SlotTracker *createSlotTracker(const Value *V) { - if (const Argument *FA = dyn_cast(V)) + if (const auto *FA = dyn_cast(V)) return new SlotTracker(FA->getParent()); - if (const Instruction *I = dyn_cast(V)) + if (const auto *I = dyn_cast(V)) if (I->getParent()) return new SlotTracker(I->getParent()->getParent()); - if (const BasicBlock *BB = dyn_cast(V)) + if (const auto *BB = dyn_cast(V)) return new SlotTracker(BB->getParent()); - if (const GlobalVariable *GV = dyn_cast(V)) + if (const auto *GV = dyn_cast(V)) return new SlotTracker(GV->getParent()); - if (const GlobalAlias *GA = dyn_cast(V)) + if (const auto *GA = dyn_cast(V)) return new SlotTracker(GA->getParent()); - if (const GlobalIFunc *GIF = dyn_cast(V)) + if (const auto *GIF = dyn_cast(V)) return new SlotTracker(GIF->getParent()); - if (const Function *Func = dyn_cast(V)) + if (const auto *Func = dyn_cast(V)) return new SlotTracker(Func); return nullptr; @@ -1222,7 +1218,7 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) { // but we can have faulty metadata from debug-intrinsic days being // autoupgraded into debug records. This gets caught by the verifier, which // then will print the faulty IR, hitting this code path. - if (const DbgVariableRecord *DVR = dyn_cast(&DR)) { + if (const auto *DVR = dyn_cast(&DR)) { // Process metadata used by DbgRecords; we only specifically care about the // DILocalVariable, DILocation, and DIAssignID fields, as the Value and // Expression fields should only be printed inline and so do not use a slot. @@ -1237,7 +1233,7 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) { if (auto *Empty = dyn_cast_if_present(DVR->getRawAddress())) CreateMetadataSlot(Empty); } - } else if (const DbgLabelRecord *DLR = dyn_cast(&DR)) { + } else if (const auto *DLR = dyn_cast(&DR)) { CreateMetadataSlot(DLR->getRawLabel()); } else { llvm_unreachable("unsupported DbgRecord kind"); @@ -1248,12 +1244,12 @@ void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) { void SlotTracker::processInstructionMetadata(const Instruction &I) { // Process metadata used directly by intrinsics. - if (const CallInst *CI = dyn_cast(&I)) + if (const auto *CI = dyn_cast(&I)) if (Function *F = CI->getCalledFunction()) if (F->isIntrinsic()) for (auto &Op : I.operands()) if (auto *V = dyn_cast_or_null(Op)) - if (MDNode *N = dyn_cast(V->getMetadata())) + if (auto *N = dyn_cast(V->getMetadata())) CreateMetadataSlot(N); // Process metadata attached to this instruction. @@ -1410,7 +1406,7 @@ void SlotTracker::CreateMetadataSlot(const MDNode *N) { // Recursively add any MDNodes referenced by operands. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) - if (const MDNode *Op = dyn_cast_or_null(N->getOperand(i))) + if (const auto *Op = dyn_cast_or_null(N->getOperand(i))) CreateMetadataSlot(Op); } @@ -1468,32 +1464,30 @@ struct AsmWriterContext { // AsmWriter Implementation //===----------------------------------------------------------------------===// -static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, - AsmWriterContext &WriterCtx); +static void writeAsOperandInternal(raw_ostream &Out, const Value *V, + AsmWriterContext &WriterCtx, + bool PrintType = false); -static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, +static void writeAsOperandInternal(raw_ostream &Out, const Metadata *MD, AsmWriterContext &WriterCtx, bool FromValue = false); -static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { - if (const FPMathOperator *FPO = dyn_cast(U)) +static void writeOptimizationInfo(raw_ostream &Out, const User *U) { + if (const auto *FPO = dyn_cast(U)) Out << FPO->getFastMathFlags(); - if (const OverflowingBinaryOperator *OBO = - dyn_cast(U)) { + if (const auto *OBO = dyn_cast(U)) { if (OBO->hasNoUnsignedWrap()) Out << " nuw"; if (OBO->hasNoSignedWrap()) Out << " nsw"; - } else if (const PossiblyExactOperator *Div = - dyn_cast(U)) { + } else if (const auto *Div = dyn_cast(U)) { if (Div->isExact()) Out << " exact"; - } else if (const PossiblyDisjointInst *PDI = - dyn_cast(U)) { + } else if (const auto *PDI = dyn_cast(U)) { if (PDI->isDisjoint()) Out << " disjoint"; - } else if (const GEPOperator *GEP = dyn_cast(U)) { + } else if (const auto *GEP = dyn_cast(U)) { if (GEP->isInBounds()) Out << " inbounds"; else if (GEP->hasNoUnsignedSignedWrap()) @@ -1518,7 +1512,7 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { } } -static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) { +static void writeAPFloatInternal(raw_ostream &Out, const APFloat &APF) { if (&APF.getSemantics() == &APFloat::IEEEsingle() || &APF.getSemantics() == &APFloat::IEEEdouble()) { // We would like to output the FP constant value in exponential notation, @@ -1611,9 +1605,9 @@ static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) { llvm_unreachable("Unsupported floating point type"); } -static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, +static void writeConstantInternal(raw_ostream &Out, const Constant *CV, AsmWriterContext &WriterCtx) { - if (const ConstantInt *CI = dyn_cast(CV)) { + if (const auto *CI = dyn_cast(CV)) { Type *Ty = CI->getType(); if (Ty->isVectorTy()) { @@ -1633,7 +1627,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - if (const ConstantFP *CFP = dyn_cast(CV)) { + if (const auto *CFP = dyn_cast(CV)) { Type *Ty = CFP->getType(); if (Ty->isVectorTy()) { @@ -1642,7 +1636,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, Out << " "; } - WriteAPFloatInternal(Out, CFP->getValueAPF()); + writeAPFloatInternal(Out, CFP->getValueAPF()); if (Ty->isVectorTy()) Out << ")"; @@ -1655,28 +1649,28 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - if (const BlockAddress *BA = dyn_cast(CV)) { + if (const auto *BA = dyn_cast(CV)) { Out << "blockaddress("; - WriteAsOperandInternal(Out, BA->getFunction(), WriterCtx); + writeAsOperandInternal(Out, BA->getFunction(), WriterCtx); Out << ", "; - WriteAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx); + writeAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx); Out << ")"; return; } if (const auto *Equiv = dyn_cast(CV)) { Out << "dso_local_equivalent "; - WriteAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx); + writeAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx); return; } if (const auto *NC = dyn_cast(CV)) { Out << "no_cfi "; - WriteAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx); + writeAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx); return; } - if (const ConstantPtrAuth *CPA = dyn_cast(CV)) { + if (const auto *CPA = dyn_cast(CV)) { Out << "ptrauth ("; // ptrauth (ptr CST, i32 KEY[, i64 DISC[, ptr ADDRDISC]?]?) @@ -1689,31 +1683,25 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, ListSeparator LS; for (unsigned i = 0, e = NumOpsToWrite; i != e; ++i) { Out << LS; - WriterCtx.TypePrinter->print(CPA->getOperand(i)->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, CPA->getOperand(i), WriterCtx); + writeAsOperandInternal(Out, CPA->getOperand(i), WriterCtx, + /*PrintType=*/true); } Out << ')'; return; } - if (const ConstantArray *CA = dyn_cast(CV)) { - Type *ETy = CA->getType()->getElementType(); + if (const auto *CA = dyn_cast(CV)) { Out << '['; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, CA->getOperand(0), WriterCtx); - for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) { - Out << ", "; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, CA->getOperand(i), WriterCtx); + ListSeparator LS; + for (const Value *Op : CA->operands()) { + Out << LS; + writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); } Out << ']'; return; } - if (const ConstantDataArray *CA = dyn_cast(CV)) { + if (const auto *CA = dyn_cast(CV)) { // As a special case, print the array as a string if it is an array of // i8 with ConstantInt values. if (CA->isString()) { @@ -1723,43 +1711,30 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - Type *ETy = CA->getType()->getElementType(); Out << '['; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, CA->getElementAsConstant(0), WriterCtx); - for (uint64_t i = 1, e = CA->getNumElements(); i != e; ++i) { - Out << ", "; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx); + ListSeparator LS; + for (uint64_t i = 0, e = CA->getNumElements(); i != e; ++i) { + Out << LS; + writeAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx, + /*PrintType=*/true); } Out << ']'; return; } - if (const ConstantStruct *CS = dyn_cast(CV)) { + if (const auto *CS = dyn_cast(CV)) { if (CS->getType()->isPacked()) Out << '<'; Out << '{'; - unsigned N = CS->getNumOperands(); - if (N) { + if (CS->getNumOperands() != 0) { Out << ' '; - WriterCtx.TypePrinter->print(CS->getOperand(0)->getType(), Out); - Out << ' '; - - WriteAsOperandInternal(Out, CS->getOperand(0), WriterCtx); - - for (unsigned i = 1; i < N; i++) { - Out << ", "; - WriterCtx.TypePrinter->print(CS->getOperand(i)->getType(), Out); - Out << ' '; - - WriteAsOperandInternal(Out, CS->getOperand(i), WriterCtx); + ListSeparator LS; + for (const Value *Op : CS->operands()) { + Out << LS; + writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); } Out << ' '; } - Out << '}'; if (CS->getType()->isPacked()) Out << '>'; @@ -1768,7 +1743,6 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, if (isa(CV) || isa(CV)) { auto *CVVTy = cast(CV->getType()); - Type *ETy = CVVTy->getElementType(); // Use the same shorthand for splat vector (i.e. "splat(Ty val)") as is // permitted on IR input to reduce the output changes when enabling @@ -1778,23 +1752,18 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, if (auto *SplatVal = CV->getSplatValue()) { if (isa(SplatVal) || isa(SplatVal)) { Out << "splat ("; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, SplatVal, WriterCtx); + writeAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true); Out << ')'; return; } } Out << '<'; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, CV->getAggregateElement(0U), WriterCtx); - for (unsigned i = 1, e = CVVTy->getNumElements(); i != e; ++i) { - Out << ", "; - WriterCtx.TypePrinter->print(ETy, Out); - Out << ' '; - WriteAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx); + ListSeparator LS; + for (unsigned i = 0, e = CVVTy->getNumElements(); i != e; ++i) { + Out << LS; + writeAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx, + /*PrintType=*/true); } Out << '>'; return; @@ -1820,7 +1789,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, return; } - if (const ConstantExpr *CE = dyn_cast(CV)) { + if (const auto *CE = dyn_cast(CV)) { // Use the same shorthand for splat vector (i.e. "splat(Ty val)") as is // permitted on IR input to reduce the output changes when enabling // UseConstant{Int,FP}ForScalableSplat. @@ -1830,9 +1799,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, if (auto *SplatVal = CE->getSplatValue()) { if (isa(SplatVal) || isa(SplatVal)) { Out << "splat ("; - WriterCtx.TypePrinter->print(SplatVal->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, SplatVal, WriterCtx); + writeAsOperandInternal(Out, SplatVal, WriterCtx, /*PrintType=*/true); Out << ')'; return; } @@ -1840,21 +1807,18 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, } Out << CE->getOpcodeName(); - WriteOptimizationInfo(Out, CE); + writeOptimizationInfo(Out, CE); Out << " ("; - if (const GEPOperator *GEP = dyn_cast(CE)) { + if (const auto *GEP = dyn_cast(CE)) { WriterCtx.TypePrinter->print(GEP->getSourceElementType(), Out); Out << ", "; } - for (User::const_op_iterator OI = CE->op_begin(); OI != CE->op_end(); - ++OI) { - WriterCtx.TypePrinter->print((*OI)->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, *OI, WriterCtx); - if (OI+1 != CE->op_end()) - Out << ", "; + ListSeparator LS; + for (const Value *Op : CE->operands()) { + Out << LS; + writeAsOperandInternal(Out, Op, WriterCtx, /*PrintType=*/true); } if (CE->isCast()) { @@ -1863,7 +1827,7 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, } if (CE->getOpcode() == Instruction::ShuffleVector) - PrintShuffleMask(Out, CE->getType(), CE->getShuffleMask()); + printShuffleMask(Out, CE->getType(), CE->getShuffleMask()); Out << ')'; return; @@ -1875,21 +1839,18 @@ static void WriteConstantInternal(raw_ostream &Out, const Constant *CV, static void writeMDTuple(raw_ostream &Out, const MDTuple *Node, AsmWriterContext &WriterCtx) { Out << "!{"; - for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) { - const Metadata *MD = Node->getOperand(mi); - if (!MD) + ListSeparator LS; + for (const Metadata *MD : Node->operands()) { + Out << LS; + if (!MD) { Out << "null"; - else if (auto *MDV = dyn_cast(MD)) { + } else if (auto *MDV = dyn_cast(MD)) { Value *V = MDV->getValue(); - WriterCtx.TypePrinter->print(V->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, V, WriterCtx); + writeAsOperandInternal(Out, V, WriterCtx, /*PrintType=*/true); } else { - WriteAsOperandInternal(Out, MD, WriterCtx); + writeAsOperandInternal(Out, MD, WriterCtx); WriterCtx.onWriteMetadataAsOperand(MD); } - if (mi + 1 != me) - Out << ", "; } Out << "}"; @@ -1897,24 +1858,9 @@ static void writeMDTuple(raw_ostream &Out, const MDTuple *Node, namespace { -struct FieldSeparator { - bool Skip = true; - const char *Sep; - - FieldSeparator(const char *Sep = ", ") : Sep(Sep) {} -}; - -raw_ostream &operator<<(raw_ostream &OS, FieldSeparator &FS) { - if (FS.Skip) { - FS.Skip = false; - return OS; - } - return OS << FS.Sep; -} - struct MDFieldPrinter { raw_ostream &Out; - FieldSeparator FS; + ListSeparator FS; AsmWriterContext &WriterCtx; explicit MDFieldPrinter(raw_ostream &Out) @@ -1990,7 +1936,7 @@ static void writeMetadataAsOperand(raw_ostream &Out, const Metadata *MD, Out << "null"; return; } - WriteAsOperandInternal(Out, MD, WriterCtx); + writeAsOperandInternal(Out, MD, WriterCtx); WriterCtx.onWriteMetadataAsOperand(MD); } @@ -2051,7 +1997,7 @@ void MDFieldPrinter::printDIFlags(StringRef Name, DINode::DIFlags Flags) { SmallVector SplitFlags; auto Extra = DINode::splitFlags(Flags, SplitFlags); - FieldSeparator FlagsFS(" | "); + ListSeparator FlagsFS(" | "); for (auto F : SplitFlags) { auto StringF = DINode::getFlagString(F); assert(!StringF.empty() && "Expected valid flag"); @@ -2075,7 +2021,7 @@ void MDFieldPrinter::printDISPFlags(StringRef Name, SmallVector SplitFlags; auto Extra = DISubprogram::splitFlags(Flags, SplitFlags); - FieldSeparator FlagsFS(" | "); + ListSeparator FlagsFS(" | "); for (auto F : SplitFlags) { auto StringF = DISubprogram::getFlagString(F); assert(!StringF.empty() && "Expected valid flag"); @@ -2124,7 +2070,7 @@ static void writeGenericDINode(raw_ostream &Out, const GenericDINode *N, Printer.printString("header", N->getHeader()); if (N->getNumDwarfOperands()) { Out << Printer.FS << "operands: {"; - FieldSeparator IFS; + ListSeparator IFS; for (auto &I : N->dwarf_operands()) { Out << IFS; writeMetadataAsOperand(Out, I, WriterCtx); @@ -2638,7 +2584,7 @@ static void writeDILabel(raw_ostream &Out, const DILabel *N, static void writeDIExpression(raw_ostream &Out, const DIExpression *N, AsmWriterContext &WriterCtx) { Out << "!DIExpression("; - FieldSeparator FS; + ListSeparator FS; if (N->isValid()) { for (const DIExpression::ExprOperand &Op : N->expr_ops()) { auto OpStr = dwarf::OperationEncodingString(Op.getOp()); @@ -2666,11 +2612,11 @@ static void writeDIArgList(raw_ostream &Out, const DIArgList *N, assert(FromValue && "Unexpected DIArgList metadata outside of value argument"); Out << "!DIArgList("; - FieldSeparator FS; + ListSeparator FS; MDFieldPrinter Printer(Out, WriterCtx); - for (Metadata *Arg : N->getArgs()) { + for (const Metadata *Arg : N->getArgs()) { Out << FS; - WriteAsOperandInternal(Out, Arg, WriterCtx, true); + writeAsOperandInternal(Out, Arg, WriterCtx, true); } Out << ")"; } @@ -2713,7 +2659,7 @@ static void writeDIImportedEntity(raw_ostream &Out, const DIImportedEntity *N, Out << ")"; } -static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, +static void writeMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, AsmWriterContext &Ctx) { if (Node->isDistinct()) Out << "distinct "; @@ -2733,21 +2679,27 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, // Full implementation of printing a Value as an operand with support for // TypePrinting, etc. -static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, - AsmWriterContext &WriterCtx) { +static void writeAsOperandInternal(raw_ostream &Out, const Value *V, + AsmWriterContext &WriterCtx, + bool PrintType) { + if (PrintType) { + WriterCtx.TypePrinter->print(V->getType(), Out); + Out << ' '; + } + if (V->hasName()) { - PrintLLVMName(Out, V); + printLLVMName(Out, V); return; } - const Constant *CV = dyn_cast(V); + const auto *CV = dyn_cast(V); if (CV && !isa(CV)) { assert(WriterCtx.TypePrinter && "Constants require TypePrinting!"); - WriteConstantInternal(Out, CV, WriterCtx); + writeConstantInternal(Out, CV, WriterCtx); return; } - if (const InlineAsm *IA = dyn_cast(V)) { + if (const auto *IA = dyn_cast(V)) { Out << "asm "; if (IA->hasSideEffects()) Out << "sideeffect "; @@ -2767,7 +2719,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, } if (auto *MD = dyn_cast(V)) { - WriteAsOperandInternal(Out, MD->getMetadata(), WriterCtx, + writeAsOperandInternal(Out, MD->getMetadata(), WriterCtx, /* FromValue */ true); return; } @@ -2777,7 +2729,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, auto *Machine = WriterCtx.Machine; // If we have a SlotTracker, use it. if (Machine) { - if (const GlobalValue *GV = dyn_cast(V)) { + if (const auto *GV = dyn_cast(V)) { Slot = Machine->getGlobalSlot(GV); Prefix = '@'; } else { @@ -2794,7 +2746,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, } } else if ((Machine = createSlotTracker(V))) { // Otherwise, create one to get the # and then destroy it. - if (const GlobalValue *GV = dyn_cast(V)) { + if (const auto *GV = dyn_cast(V)) { Slot = Machine->getGlobalSlot(GV); Prefix = '@'; } else { @@ -2812,21 +2764,21 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, Out << ""; } -static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, +static void writeAsOperandInternal(raw_ostream &Out, const Metadata *MD, AsmWriterContext &WriterCtx, bool FromValue) { // Write DIExpressions and DIArgLists inline when used as a value. Improves // readability of debug info intrinsics. - if (const DIExpression *Expr = dyn_cast(MD)) { + if (const auto *Expr = dyn_cast(MD)) { writeDIExpression(Out, Expr, WriterCtx); return; } - if (const DIArgList *ArgList = dyn_cast(MD)) { + if (const auto *ArgList = dyn_cast(MD)) { writeDIArgList(Out, ArgList, WriterCtx, FromValue); return; } - if (const MDNode *N = dyn_cast(MD)) { + if (const auto *N = dyn_cast(MD)) { std::unique_ptr MachineStorage; SaveAndRestore SARMachine(WriterCtx.Machine); if (!WriterCtx.Machine) { @@ -2835,7 +2787,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, } int Slot = WriterCtx.Machine->getMetadataSlot(N); if (Slot == -1) { - if (const DILocation *Loc = dyn_cast(N)) { + if (const auto *Loc = dyn_cast(N)) { writeDILocation(Out, Loc, WriterCtx); return; } @@ -2847,7 +2799,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, return; } - if (const MDString *MDS = dyn_cast(MD)) { + if (const auto *MDS = dyn_cast(MD)) { Out << "!\""; printEscapedString(MDS->getString(), Out); Out << '"'; @@ -2859,9 +2811,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD, assert((FromValue || !isa(V)) && "Unexpected function-local metadata outside of value argument"); - WriterCtx.TypePrinter->print(V->getValue()->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, V->getValue(), WriterCtx); + writeAsOperandInternal(Out, V->getValue(), WriterCtx, /*PrintType=*/true); } namespace { @@ -2936,7 +2886,7 @@ class AssemblyWriter { void printDbgRecord(const DbgRecord &DR); void printDbgRecordLine(const DbgRecord &DR); - void printUseListOrder(const Value *V, const std::vector &Shuffle); + void printUseListOrder(const Value *V, ArrayRef Shuffle); void printUseLists(const Function *F); void printModuleSummaryIndex(); @@ -2948,16 +2898,14 @@ class AssemblyWriter { void printTypeIdSummary(const TypeIdSummary &TIS); void printTypeIdCompatibleVtableSummary(const TypeIdCompatibleVtableInfo &TI); void printTypeTestResolution(const TypeTestResolution &TTRes); - void printArgs(const std::vector &Args); + void printArgs(ArrayRef Args); void printWPDRes(const WholeProgramDevirtResolution &WPDRes); void printTypeIdInfo(const FunctionSummary::TypeIdInfo &TIDInfo); void printVFuncId(const FunctionSummary::VFuncId VFId); - void - printNonConstVCalls(const std::vector &VCallList, - const char *Tag); - void - printConstVCalls(const std::vector &VCallList, - const char *Tag); + void printNonConstVCalls(ArrayRef VCallList, + const char *Tag); + void printConstVCalls(ArrayRef VCallList, + const char *Tag); private: /// Print out metadata attachments. @@ -2999,12 +2947,8 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) { Out << ""; return; } - if (PrintType) { - TypePrinter.print(Operand->getType(), Out); - Out << ' '; - } - auto WriterCtx = getContext(); - WriteAsOperandInternal(Out, Operand, WriterCtx); + auto WriteCtx = getContext(); + writeAsOperandInternal(Out, Operand, WriteCtx, PrintType); } void AssemblyWriter::writeSyncScope(const LLVMContext &Context, @@ -3064,7 +3008,7 @@ void AssemblyWriter::writeParamOperand(const Value *Operand, Out << ' '; // Print the operand auto WriterCtx = getContext(); - WriteAsOperandInternal(Out, Operand, WriterCtx); + writeAsOperandInternal(Out, Operand, WriterCtx); } void AssemblyWriter::writeOperandBundles(const CallBase *Call) { @@ -3073,34 +3017,24 @@ void AssemblyWriter::writeOperandBundles(const CallBase *Call) { Out << " [ "; - bool FirstBundle = true; + ListSeparator LS; for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) { OperandBundleUse BU = Call->getOperandBundleAt(i); - if (!FirstBundle) - Out << ", "; - FirstBundle = false; - - Out << '"'; + Out << LS << '"'; printEscapedString(BU.getTagName(), Out); Out << '"'; Out << '('; - bool FirstInput = true; + ListSeparator InnerLS; auto WriterCtx = getContext(); for (const auto &Input : BU.Inputs) { - if (!FirstInput) - Out << ", "; - FirstInput = false; - + Out << InnerLS; if (Input == nullptr) Out << ""; - else { - TypePrinter.print(Input->getType(), Out); - Out << " "; - WriteAsOperandInternal(Out, Input, WriterCtx); - } + else + writeAsOperandInternal(Out, Input, WriterCtx, /*PrintType=*/true); } Out << ')'; @@ -3229,7 +3163,7 @@ void AssemblyWriter::printModuleSummaryIndex() { Out << "path: \""; printEscapedString(ModPair.first, Out); Out << "\", hash: ("; - FieldSeparator FS; + ListSeparator FS; for (auto Hash : ModPair.second) Out << FS << Hash; Out << "))\n"; @@ -3347,7 +3281,7 @@ void AssemblyWriter::printTypeIdSummary(const TypeIdSummary &TIS) { printTypeTestResolution(TIS.TTRes); if (!TIS.WPDRes.empty()) { Out << ", wpdResolutions: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &WPDRes : TIS.WPDRes) { Out << FS; Out << "(offset: " << WPDRes.first << ", "; @@ -3362,7 +3296,7 @@ void AssemblyWriter::printTypeIdSummary(const TypeIdSummary &TIS) { void AssemblyWriter::printTypeIdCompatibleVtableSummary( const TypeIdCompatibleVtableInfo &TI) { Out << ", summary: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &P : TI) { Out << FS; Out << "(offset: " << P.AddressPointOffset << ", "; @@ -3372,14 +3306,8 @@ void AssemblyWriter::printTypeIdCompatibleVtableSummary( Out << ")"; } -void AssemblyWriter::printArgs(const std::vector &Args) { - Out << "args: ("; - FieldSeparator FS; - for (auto arg : Args) { - Out << FS; - Out << arg; - } - Out << ")"; +void AssemblyWriter::printArgs(ArrayRef Args) { + Out << "args: (" << llvm::interleaved(Args) << ')'; } void AssemblyWriter::printWPDRes(const WholeProgramDevirtResolution &WPDRes) { @@ -3391,7 +3319,7 @@ void AssemblyWriter::printWPDRes(const WholeProgramDevirtResolution &WPDRes) { if (!WPDRes.ResByArg.empty()) { Out << ", resByArg: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &ResByArg : WPDRes.ResByArg) { Out << FS; printArgs(ResByArg.first); @@ -3451,7 +3379,7 @@ void AssemblyWriter::printGlobalVarSummary(const GlobalVarSummary *GS) { if (!VTableFuncs.empty()) { Out << ", vTableFuncs: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &P : VTableFuncs) { Out << FS; Out << "(virtFunc: ^" << Machine.getGUIDSlot(P.FuncVI.getGUID()) @@ -3528,7 +3456,7 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { if (!FS->calls().empty()) { Out << ", calls: ("; - FieldSeparator IFS; + ListSeparator IFS; for (auto &Call : FS->calls()) { Out << IFS; Out << "(callee: ^" << Machine.getGUIDSlot(Call.first.getGUID()); @@ -3566,22 +3494,22 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { if (!FS->allocs().empty()) { Out << ", allocs: ("; - FieldSeparator AFS; + ListSeparator AFS; for (auto &AI : FS->allocs()) { Out << AFS; Out << "(versions: ("; - FieldSeparator VFS; + ListSeparator VFS; for (auto V : AI.Versions) { Out << VFS; Out << AllocTypeName(V); } Out << "), memProf: ("; - FieldSeparator MIBFS; + ListSeparator MIBFS; for (auto &MIB : AI.MIBs) { Out << MIBFS; Out << "(type: " << AllocTypeName((uint8_t)MIB.AllocType); Out << ", stackIds: ("; - FieldSeparator SIDFS; + ListSeparator SIDFS; for (auto Id : MIB.StackIdIndices) { Out << SIDFS; Out << TheIndex->getStackIdAtIndex(Id); @@ -3595,7 +3523,7 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { if (!FS->callsites().empty()) { Out << ", callsites: ("; - FieldSeparator SNFS; + ListSeparator SNFS; for (auto &CI : FS->callsites()) { Out << SNFS; if (CI.Callee) @@ -3603,13 +3531,13 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { else Out << "(callee: null"; Out << ", clones: ("; - FieldSeparator VFS; + ListSeparator VFS; for (auto V : CI.Clones) { Out << VFS; Out << V; } Out << "), stackIds: ("; - FieldSeparator SIDFS; + ListSeparator SIDFS; for (auto Id : CI.StackIdIndices) { Out << SIDFS; Out << TheIndex->getStackIdAtIndex(Id); @@ -3625,7 +3553,7 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { if (!FS->paramAccesses().empty()) { Out << ", params: ("; - FieldSeparator IFS; + ListSeparator IFS; for (auto &PS : FS->paramAccesses()) { Out << IFS; Out << "(param: " << PS.ParamNo; @@ -3633,7 +3561,7 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { PrintRange(PS.Use); if (!PS.Calls.empty()) { Out << ", calls: ("; - FieldSeparator IFS; + ListSeparator IFS; for (auto &Call : PS.Calls) { Out << IFS; Out << "(callee: ^" << Machine.getGUIDSlot(Call.Callee.getGUID()); @@ -3653,11 +3581,11 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) { void AssemblyWriter::printTypeIdInfo( const FunctionSummary::TypeIdInfo &TIDInfo) { Out << ", typeIdInfo: ("; - FieldSeparator TIDFS; + ListSeparator TIDFS; if (!TIDInfo.TypeTests.empty()) { Out << TIDFS; Out << "typeTests: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &GUID : TIDInfo.TypeTests) { auto TidIter = TheIndex->typeIds().equal_range(GUID); if (TidIter.first == TidIter.second) { @@ -3706,7 +3634,7 @@ void AssemblyWriter::printVFuncId(const FunctionSummary::VFuncId VFId) { return; } // Print all type id that correspond to this GUID. - FieldSeparator FS; + ListSeparator FS; for (const auto &[GUID, TypeIdPair] : make_range(TidIter)) { Out << FS; Out << "vFuncId: ("; @@ -3719,9 +3647,9 @@ void AssemblyWriter::printVFuncId(const FunctionSummary::VFuncId VFId) { } void AssemblyWriter::printNonConstVCalls( - const std::vector &VCallList, const char *Tag) { + ArrayRef VCallList, const char *Tag) { Out << Tag << ": ("; - FieldSeparator FS; + ListSeparator FS; for (auto &VFuncId : VCallList) { Out << FS; printVFuncId(VFuncId); @@ -3730,10 +3658,9 @@ void AssemblyWriter::printNonConstVCalls( } void AssemblyWriter::printConstVCalls( - const std::vector &VCallList, - const char *Tag) { + ArrayRef VCallList, const char *Tag) { Out << Tag << ": ("; - FieldSeparator FS; + ListSeparator FS; for (auto &ConstVCall : VCallList) { Out << FS; Out << "("; @@ -3774,7 +3701,7 @@ void AssemblyWriter::printSummary(const GlobalValueSummary &Summary) { auto RefList = Summary.refs(); if (!RefList.empty()) { Out << ", refs: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &Ref : RefList) { Out << FS; if (Ref.isReadOnly()) @@ -3797,7 +3724,7 @@ void AssemblyWriter::printSummaryInfo(unsigned Slot, const ValueInfo &VI) { Out << "guid: " << VI.getGUID(); if (!VI.getSummaryList().empty()) { Out << ", summaries: ("; - FieldSeparator FS; + ListSeparator FS; for (auto &Summary : VI.getSummaryList()) { Out << FS; printSummary(*Summary); @@ -3835,13 +3762,11 @@ void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) { Out << '!'; printMetadataIdentifier(NMD->getName(), Out); Out << " = !{"; - for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { - if (i) - Out << ", "; - + ListSeparator LS; + for (const MDNode *Op : NMD->operands()) { + Out << LS; // Write DIExpressions inline. // FIXME: Ban DIExpressions in NamedMDNodes, they will serve no purpose. - MDNode *Op = NMD->getOperand(i); if (auto *Expr = dyn_cast(Op)) { writeDIExpression(Out, Expr, AsmWriterContext::getEmpty()); continue; @@ -3856,7 +3781,7 @@ void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) { Out << "}\n"; } -static void PrintVisibility(GlobalValue::VisibilityTypes Vis, +static void printVisibility(GlobalValue::VisibilityTypes Vis, formatted_raw_ostream &Out) { switch (Vis) { case GlobalValue::DefaultVisibility: break; @@ -3865,13 +3790,13 @@ static void PrintVisibility(GlobalValue::VisibilityTypes Vis, } } -static void PrintDSOLocation(const GlobalValue &GV, +static void printDSOLocation(const GlobalValue &GV, formatted_raw_ostream &Out) { if (GV.isDSOLocal() && !GV.isImplicitDSOLocal()) Out << "dso_local "; } -static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT, +static void printDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT, formatted_raw_ostream &Out) { switch (SCT) { case GlobalValue::DefaultStorageClass: break; @@ -3880,7 +3805,7 @@ static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT, } } -static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM, +static void printThreadLocalModel(GlobalVariable::ThreadLocalMode TLM, formatted_raw_ostream &Out) { switch (TLM) { case GlobalVariable::NotThreadLocal: @@ -3926,7 +3851,7 @@ static void maybePrintComdat(formatted_raw_ostream &Out, return; Out << '('; - PrintLLVMName(Out, C->getName(), ComdatPrefix); + printLLVMName(Out, C->getName(), ComdatPrefix); Out << ')'; } @@ -3935,17 +3860,17 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) { Out << "; Materializable\n"; AsmWriterContext WriterCtx(&TypePrinter, &Machine, GV->getParent()); - WriteAsOperandInternal(Out, GV, WriterCtx); + writeAsOperandInternal(Out, GV, WriterCtx); Out << " = "; if (!GV->hasInitializer() && GV->hasExternalLinkage()) Out << "external "; Out << getLinkageNameWithSpace(GV->getLinkage()); - PrintDSOLocation(*GV, Out); - PrintVisibility(GV->getVisibility(), Out); - PrintDLLStorageClass(GV->getDLLStorageClass(), Out); - PrintThreadLocalModel(GV->getThreadLocalMode(), Out); + printDSOLocation(*GV, Out); + printVisibility(GV->getVisibility(), Out); + printDLLStorageClass(GV->getDLLStorageClass(), Out); + printThreadLocalModel(GV->getThreadLocalMode(), Out); StringRef UA = getUnnamedAddrEncoding(GV->getUnnamedAddr()); if (!UA.empty()) Out << UA << ' '; @@ -4026,14 +3951,14 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) { Out << "; Materializable\n"; AsmWriterContext WriterCtx(&TypePrinter, &Machine, GA->getParent()); - WriteAsOperandInternal(Out, GA, WriterCtx); + writeAsOperandInternal(Out, GA, WriterCtx); Out << " = "; Out << getLinkageNameWithSpace(GA->getLinkage()); - PrintDSOLocation(*GA, Out); - PrintVisibility(GA->getVisibility(), Out); - PrintDLLStorageClass(GA->getDLLStorageClass(), Out); - PrintThreadLocalModel(GA->getThreadLocalMode(), Out); + printDSOLocation(*GA, Out); + printVisibility(GA->getVisibility(), Out); + printDLLStorageClass(GA->getDLLStorageClass(), Out); + printThreadLocalModel(GA->getThreadLocalMode(), Out); StringRef UA = getUnnamedAddrEncoding(GA->getUnnamedAddr()); if (!UA.empty()) Out << UA << ' '; @@ -4065,12 +3990,12 @@ void AssemblyWriter::printIFunc(const GlobalIFunc *GI) { Out << "; Materializable\n"; AsmWriterContext WriterCtx(&TypePrinter, &Machine, GI->getParent()); - WriteAsOperandInternal(Out, GI, WriterCtx); + writeAsOperandInternal(Out, GI, WriterCtx); Out << " = "; Out << getLinkageNameWithSpace(GI->getLinkage()); - PrintDSOLocation(*GI, Out); - PrintVisibility(GI->getVisibility(), Out); + printDSOLocation(*GI, Out); + printVisibility(GI->getVisibility(), Out); Out << "ifunc "; @@ -4122,7 +4047,7 @@ void AssemblyWriter::printTypeIdentities() { auto &NamedTypes = TypePrinter.getNamedTypes(); for (StructType *NamedType : NamedTypes) { - PrintLLVMName(Out, NamedType->getName(), LocalPrefix); + printLLVMName(Out, NamedType->getName(), LocalPrefix); Out << " = type "; // Make sure we print out at least one level of the type structure, so @@ -4170,13 +4095,13 @@ void AssemblyWriter::printFunction(const Function *F) { Out << "define "; Out << getLinkageNameWithSpace(F->getLinkage()); - PrintDSOLocation(*F, Out); - PrintVisibility(F->getVisibility(), Out); - PrintDLLStorageClass(F->getDLLStorageClass(), Out); + printDSOLocation(*F, Out); + printVisibility(F->getVisibility(), Out); + printDLLStorageClass(F->getDLLStorageClass(), Out); // Print the calling convention. if (F->getCallingConv() != CallingConv::C) { - PrintCallingConv(F->getCallingConv(), Out); + printCallingConv(F->getCallingConv(), Out); Out << " "; } @@ -4186,17 +4111,16 @@ void AssemblyWriter::printFunction(const Function *F) { TypePrinter.print(F->getReturnType(), Out); AsmWriterContext WriterCtx(&TypePrinter, &Machine, F->getParent()); Out << ' '; - WriteAsOperandInternal(Out, F, WriterCtx); + writeAsOperandInternal(Out, F, WriterCtx); Out << '('; // Loop over the arguments, printing them... if (F->isDeclaration() && !IsForDebug) { // We're only interested in the type here - don't print argument names. + ListSeparator LS; for (unsigned I = 0, E = FT->getNumParams(); I != E; ++I) { - // Insert commas as we go... the first arg doesn't get a comma - if (I) - Out << ", "; - // Output type... + Out << LS; + // Output type. TypePrinter.print(FT->getParamType(I), Out); AttributeSet ArgAttrs = Attrs.getParamAttrs(I); @@ -4207,10 +4131,9 @@ void AssemblyWriter::printFunction(const Function *F) { } } else { // The arguments are meaningful here, print them in detail. + ListSeparator LS; for (const Argument &Arg : F->args()) { - // Insert commas as we go... the first arg doesn't get a comma - if (Arg.getArgNo() != 0) - Out << ", "; + Out << LS; printArgument(&Arg, Attrs.getParamAttrs(Arg.getArgNo())); } } @@ -4304,7 +4227,7 @@ void AssemblyWriter::printArgument(const Argument *Arg, AttributeSet Attrs) { // Output name, if available... if (Arg->hasName()) { Out << ' '; - PrintLLVMName(Out, Arg); + printLLVMName(Out, Arg); } else { int Slot = Machine.getLocalSlot(Arg); assert(Slot != -1 && "expect argument in function here"); @@ -4317,7 +4240,7 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) { bool IsEntryBlock = BB->getParent() && BB->isEntryBlock(); if (BB->hasName()) { // Print out the label if it exists... Out << "\n"; - PrintLLVMName(Out, BB->getName(), LabelPrefix); + printLLVMName(Out, BB->getName(), LabelPrefix); Out << ':'; } else if (!IsEntryBlock) { Out << "\n"; @@ -4332,16 +4255,14 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) { // Output predecessors for the block. Out.PadToColumn(50); Out << ";"; - const_pred_iterator PI = pred_begin(BB), PE = pred_end(BB); - - if (PI == PE) { + if (pred_empty(BB)) { Out << " No predecessors!"; } else { Out << " preds = "; - writeOperand(*PI, false); - for (++PI; PI != PE; ++PI) { - Out << ", "; - writeOperand(*PI, false); + ListSeparator LS; + for (const BasicBlock *Pred : predecessors(BB)) { + Out << LS; + writeOperand(Pred, false); } } } @@ -4437,7 +4358,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { // Print out name if it exists... if (I.hasName()) { - PrintLLVMName(Out, &I); + printLLVMName(Out, &I); Out << " = "; } else if (!I.getType()->isVoidTy()) { // Print out the def slot taken. @@ -4448,7 +4369,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << '%' << SlotNum << " = "; } - if (const CallInst *CI = dyn_cast(&I)) { + if (const auto *CI = dyn_cast(&I)) { if (CI->isMustTailCall()) Out << "musttail "; else if (CI->isTailCall()) @@ -4476,14 +4397,14 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " volatile"; // Print out optimization information. - WriteOptimizationInfo(Out, &I); + writeOptimizationInfo(Out, &I); // Print out the compare instruction predicates - if (const CmpInst *CI = dyn_cast(&I)) + if (const auto *CI = dyn_cast(&I)) Out << ' ' << CI->getPredicate(); // Print out the atomicrmw operation - if (const AtomicRMWInst *RMWI = dyn_cast(&I)) + if (const auto *RMWI = dyn_cast(&I)) Out << ' ' << AtomicRMWInst::getOperationName(RMWI->getOperation()); // Print out the type of the operands... @@ -4520,35 +4441,38 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(Operand, true); Out << ", ["; + ListSeparator LS; for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { - if (i != 1) - Out << ", "; + Out << LS; writeOperand(I.getOperand(i), true); } Out << ']'; - } else if (const PHINode *PN = dyn_cast(&I)) { + } else if (const auto *PN = dyn_cast(&I)) { Out << ' '; TypePrinter.print(I.getType(), Out); Out << ' '; - for (unsigned op = 0, Eop = PN->getNumIncomingValues(); op < Eop; ++op) { - if (op) Out << ", "; - Out << "[ "; - writeOperand(PN->getIncomingValue(op), false); Out << ", "; - writeOperand(PN->getIncomingBlock(op), false); Out << " ]"; + ListSeparator LS; + for (const auto &[V, Block] : + zip_equal(PN->incoming_values(), PN->blocks())) { + Out << LS << "[ "; + writeOperand(V, false); + Out << ", "; + writeOperand(Block, false); + Out << " ]"; } - } else if (const ExtractValueInst *EVI = dyn_cast(&I)) { + } else if (const auto *EVI = dyn_cast(&I)) { Out << ' '; writeOperand(I.getOperand(0), true); - for (unsigned i : EVI->indices()) - Out << ", " << i; - } else if (const InsertValueInst *IVI = dyn_cast(&I)) { + Out << ", "; + Out << llvm::interleaved(EVI->indices()); + } else if (const auto *IVI = dyn_cast(&I)) { Out << ' '; writeOperand(I.getOperand(0), true); Out << ", "; writeOperand(I.getOperand(1), true); - for (unsigned i : IVI->indices()) - Out << ", " << i; - } else if (const LandingPadInst *LPI = dyn_cast(&I)) { + Out << ", "; + Out << llvm::interleaved(IVI->indices()); + } else if (const auto *LPI = dyn_cast(&I)) { Out << ' '; TypePrinter.print(I.getType(), Out); if (LPI->isCleanup() || LPI->getNumClauses() != 0) @@ -4570,12 +4494,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " within "; writeOperand(CatchSwitch->getParentPad(), /*PrintType=*/false); Out << " ["; - unsigned Op = 0; + ListSeparator LS; for (const BasicBlock *PadBB : CatchSwitch->handlers()) { - if (Op > 0) - Out << ", "; + Out << LS; writeOperand(PadBB, /*PrintType=*/true); - ++Op; } Out << "] unwind "; if (const BasicBlock *UnwindDest = CatchSwitch->getUnwindDest()) @@ -4586,10 +4508,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " within "; writeOperand(FPI->getParentPad(), /*PrintType=*/false); Out << " ["; - for (unsigned Op = 0, NumOps = FPI->arg_size(); Op < NumOps; ++Op) { - if (Op > 0) - Out << ", "; - writeOperand(FPI->getArgOperand(Op), /*PrintType=*/true); + ListSeparator LS; + for (const Value *Op : FPI->arg_operands()) { + Out << LS; + writeOperand(Op, /*PrintType=*/true); } Out << ']'; } else if (isa(I) && !Operand) { @@ -4609,11 +4531,11 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(CRI->getOperand(1), /*PrintType=*/true); else Out << "to caller"; - } else if (const CallInst *CI = dyn_cast(&I)) { + } else if (const auto *CI = dyn_cast(&I)) { // Print the calling convention being used. if (CI->getCallingConv() != CallingConv::C) { Out << " "; - PrintCallingConv(CI->getCallingConv(), Out); + printCallingConv(CI->getCallingConv(), Out); } Operand = CI->getCalledOperand(); @@ -4635,9 +4557,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << ' '; writeOperand(Operand, false); Out << '('; + ListSeparator LS; for (unsigned op = 0, Eop = CI->arg_size(); op < Eop; ++op) { - if (op > 0) - Out << ", "; + Out << LS; writeParamOperand(CI->getArgOperand(op), PAL.getParamAttrs(op)); } @@ -4656,7 +4578,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttrs()); writeOperandBundles(CI); - } else if (const InvokeInst *II = dyn_cast(&I)) { + } else if (const auto *II = dyn_cast(&I)) { Operand = II->getCalledOperand(); FunctionType *FTy = II->getFunctionType(); Type *RetTy = FTy->getReturnType(); @@ -4665,7 +4587,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { // Print the calling convention being used. if (II->getCallingConv() != CallingConv::C) { Out << " "; - PrintCallingConv(II->getCallingConv(), Out); + printCallingConv(II->getCallingConv(), Out); } if (PAL.hasRetAttrs()) @@ -4683,9 +4605,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << ' '; writeOperand(Operand, false); Out << '('; + ListSeparator LS; for (unsigned op = 0, Eop = II->arg_size(); op < Eop; ++op) { - if (op) - Out << ", "; + Out << LS; writeParamOperand(II->getArgOperand(op), PAL.getParamAttrs(op)); } @@ -4699,7 +4621,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeOperand(II->getNormalDest(), true); Out << " unwind "; writeOperand(II->getUnwindDest(), true); - } else if (const CallBrInst *CBI = dyn_cast(&I)) { + } else if (const auto *CBI = dyn_cast(&I)) { Operand = CBI->getCalledOperand(); FunctionType *FTy = CBI->getFunctionType(); Type *RetTy = FTy->getReturnType(); @@ -4708,7 +4630,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { // Print the calling convention being used. if (CBI->getCallingConv() != CallingConv::C) { Out << " "; - PrintCallingConv(CBI->getCallingConv(), Out); + printCallingConv(CBI->getCallingConv(), Out); } if (PAL.hasRetAttrs()) @@ -4723,9 +4645,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << ' '; writeOperand(Operand, false); Out << '('; + ListSeparator ArgLS; for (unsigned op = 0, Eop = CBI->arg_size(); op < Eop; ++op) { - if (op) - Out << ", "; + Out << ArgLS; writeParamOperand(CBI->getArgOperand(op), PAL.getParamAttrs(op)); } @@ -4738,13 +4660,13 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << "\n to "; writeOperand(CBI->getDefaultDest(), true); Out << " ["; - for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) { - if (i != 0) - Out << ", "; - writeOperand(CBI->getIndirectDest(i), true); + ListSeparator DestLS; + for (const BasicBlock *Dest : CBI->getIndirectDests()) { + Out << DestLS; + writeOperand(Dest, true); } Out << ']'; - } else if (const AllocaInst *AI = dyn_cast(&I)) { + } else if (const auto *AI = dyn_cast(&I)) { Out << ' '; if (AI->isUsedWithInAlloca()) Out << "inalloca "; @@ -4766,9 +4688,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } unsigned AddrSpace = AI->getAddressSpace(); - if (AddrSpace != 0) { + if (AddrSpace != 0) Out << ", addrspace(" << AddrSpace << ')'; - } } else if (isa(I)) { if (Operand) { Out << ' '; @@ -4783,7 +4704,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } Out << ", "; TypePrinter.print(I.getType(), Out); - } else if (Operand) { // Print the normal way. + } else if (Operand) { // Print the normal way. if (const auto *GEP = dyn_cast(&I)) { Out << ' '; TypePrinter.print(GEP->getSourceElementType(), Out); @@ -4824,35 +4745,36 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } Out << ' '; - for (unsigned i = 0, E = I.getNumOperands(); i != E; ++i) { - if (i) Out << ", "; - writeOperand(I.getOperand(i), PrintAllTypes); + ListSeparator LS; + for (const Value *Op : I.operands()) { + Out << LS; + writeOperand(Op, PrintAllTypes); } } // Print atomic ordering/alignment for memory operations - if (const LoadInst *LI = dyn_cast(&I)) { + if (const auto *LI = dyn_cast(&I)) { if (LI->isAtomic()) writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID()); if (MaybeAlign A = LI->getAlign()) Out << ", align " << A->value(); - } else if (const StoreInst *SI = dyn_cast(&I)) { + } else if (const auto *SI = dyn_cast(&I)) { if (SI->isAtomic()) writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID()); if (MaybeAlign A = SI->getAlign()) Out << ", align " << A->value(); - } else if (const AtomicCmpXchgInst *CXI = dyn_cast(&I)) { + } else if (const auto *CXI = dyn_cast(&I)) { writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(), CXI->getFailureOrdering(), CXI->getSyncScopeID()); Out << ", align " << CXI->getAlign().value(); - } else if (const AtomicRMWInst *RMWI = dyn_cast(&I)) { + } else if (const auto *RMWI = dyn_cast(&I)) { writeAtomic(RMWI->getContext(), RMWI->getOrdering(), RMWI->getSyncScopeID()); Out << ", align " << RMWI->getAlign().value(); - } else if (const FenceInst *FI = dyn_cast(&I)) { + } else if (const auto *FI = dyn_cast(&I)) { writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID()); - } else if (const ShuffleVectorInst *SVI = dyn_cast(&I)) { - PrintShuffleMask(Out, SVI->getType(), SVI->getShuffleMask()); + } else if (const auto *SVI = dyn_cast(&I)) { + printShuffleMask(Out, SVI->getType(), SVI->getShuffleMask()); } // Print Metadata info. @@ -4908,7 +4830,7 @@ void AssemblyWriter::printDbgVariableRecord(const DbgVariableRecord &DVR) { if (!M) Out << "(null)"; else - WriteAsOperandInternal(Out, M, WriterCtx, true); + writeAsOperandInternal(Out, M, WriterCtx, true); }; Out << "("; @@ -4942,9 +4864,9 @@ void AssemblyWriter::printDbgRecordLine(const DbgRecord &DR) { void AssemblyWriter::printDbgLabelRecord(const DbgLabelRecord &Label) { auto WriterCtx = getContext(); Out << "#dbg_label("; - WriteAsOperandInternal(Out, Label.getRawLabel(), WriterCtx, true); + writeAsOperandInternal(Out, Label.getRawLabel(), WriterCtx, true); Out << ", "; - WriteAsOperandInternal(Out, Label.getDebugLoc(), WriterCtx, true); + writeAsOperandInternal(Out, Label.getDebugLoc(), WriterCtx, true); Out << ")"; } @@ -4967,7 +4889,7 @@ void AssemblyWriter::printMetadataAttachments( } else Out << "!"; Out << ' '; - WriteAsOperandInternal(Out, I.second, WriterCtx); + writeAsOperandInternal(Out, I.second, WriterCtx); } } @@ -4990,7 +4912,7 @@ void AssemblyWriter::writeAllMDNodes() { void AssemblyWriter::printMDNodeBody(const MDNode *Node) { auto WriterCtx = getContext(); - WriteMDNodeBodyInternal(Out, Node, WriterCtx); + writeMDNodeBodyInternal(Out, Node, WriterCtx); } void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) { @@ -5009,12 +4931,10 @@ void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) { void AssemblyWriter::writeAttributeSet(const AttributeSet &AttrSet, bool InAttrGroup) { - bool FirstAttr = true; + ListSeparator LS(" "); for (const auto &Attr : AttrSet) { - if (!FirstAttr) - Out << ' '; + Out << LS; writeAttribute(Attr, InAttrGroup); - FirstAttr = false; } } @@ -5031,7 +4951,7 @@ void AssemblyWriter::writeAllAttributeGroups() { } void AssemblyWriter::printUseListOrder(const Value *V, - const std::vector &Shuffle) { + ArrayRef Shuffle) { bool IsInFunction = Machine.getFunction(); if (IsInFunction) Out << " "; @@ -5120,7 +5040,7 @@ void NamedMDNode::print(raw_ostream &ROS, ModuleSlotTracker &MST, } void Comdat::print(raw_ostream &ROS, bool /*IsForDebug*/) const { - PrintLLVMName(ROS, getName(), ComdatPrefix); + printLLVMName(ROS, getName(), ComdatPrefix); ROS << " = comdat "; switch (getSelectionKind()) { @@ -5152,7 +5072,7 @@ void Type::print(raw_ostream &OS, bool /*IsForDebug*/, bool NoDetails) const { return; // If the type is a named struct type, print the body as well. - if (StructType *STy = dyn_cast(const_cast(this))) + if (auto *STy = dyn_cast(const_cast(this))) if (!STy->isLiteral()) { OS << " = type "; TP.printStructBody(STy, OS); @@ -5188,11 +5108,9 @@ void DbgMarker::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { - if (F) - MST.incorporateFunction(*F); - }; - incorporateFunction(getParent() ? getParent()->getParent() : nullptr); + const Function *F = getParent() ? getParent()->getParent() : nullptr; + if (F) + MST.incorporateFunction(*F); AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug); W.printDbgMarker(*this); } @@ -5209,13 +5127,11 @@ void DbgVariableRecord::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { - if (F) - MST.incorporateFunction(*F); - }; - incorporateFunction(Marker && Marker->getParent() + const Function *F = Marker && Marker->getParent() ? Marker->getParent()->getParent() - : nullptr); + : nullptr; + if (F) + MST.incorporateFunction(*F); AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug); W.printDbgVariableRecord(*this); } @@ -5226,12 +5142,11 @@ void DbgLabelRecord::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { - if (F) - MST.incorporateFunction(*F); - }; - incorporateFunction(Marker->getParent() ? Marker->getParent()->getParent() - : nullptr); + const Function *F = + Marker->getParent() ? Marker->getParent()->getParent() : nullptr; + if (F) + MST.incorporateFunction(*F); + AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug); W.printDbgLabelRecord(*this); } @@ -5253,39 +5168,39 @@ void Value::print(raw_ostream &ROS, ModuleSlotTracker &MST, SlotTracker EmptySlotTable(static_cast(nullptr)); SlotTracker &SlotTable = MST.getMachine() ? *MST.getMachine() : EmptySlotTable; - auto incorporateFunction = [&](const Function *F) { + auto IncorporateFunction = [&](const Function *F) { if (F) MST.incorporateFunction(*F); }; - if (const Instruction *I = dyn_cast(this)) { - incorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr); + if (const auto *I = dyn_cast(this)) { + IncorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr); AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr, IsForDebug); W.printInstruction(*I); - } else if (const BasicBlock *BB = dyn_cast(this)) { - incorporateFunction(BB->getParent()); + } else if (const auto *BB = dyn_cast(this)) { + IncorporateFunction(BB->getParent()); AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr, IsForDebug); W.printBasicBlock(BB); - } else if (const GlobalValue *GV = dyn_cast(this)) { + } else if (const auto *GV = dyn_cast(this)) { AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr, IsForDebug); - if (const GlobalVariable *V = dyn_cast(GV)) + if (const auto *V = dyn_cast(GV)) W.printGlobal(V); - else if (const Function *F = dyn_cast(GV)) + else if (const auto *F = dyn_cast(GV)) W.printFunction(F); - else if (const GlobalAlias *A = dyn_cast(GV)) + else if (const auto *A = dyn_cast(GV)) W.printAlias(A); - else if (const GlobalIFunc *I = dyn_cast(GV)) + else if (const auto *I = dyn_cast(GV)) W.printIFunc(I); else llvm_unreachable("Unknown GlobalValue to print out!"); - } else if (const MetadataAsValue *V = dyn_cast(this)) { + } else if (const auto *V = dyn_cast(this)) { V->getMetadata()->print(ROS, MST, getModuleFromVal(V)); - } else if (const Constant *C = dyn_cast(this)) { + } else if (const auto *C = dyn_cast(this)) { TypePrinting TypePrinter; TypePrinter.print(C->getType(), OS); OS << ' '; AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine()); - WriteConstantInternal(OS, C, WriterCtx); + writeConstantInternal(OS, C, WriterCtx); } else if (isa(this) || isa(this)) { this->printAsOperand(OS, /* PrintType */ true, MST); } else { @@ -5301,7 +5216,7 @@ static bool printWithoutType(const Value &V, raw_ostream &O, if (V.hasName() || isa(V) || (!isa(V) && !isa(V))) { AsmWriterContext WriterCtx(nullptr, Machine, M); - WriteAsOperandInternal(O, &V, WriterCtx); + writeAsOperandInternal(O, &V, WriterCtx); return true; } return false; @@ -5310,13 +5225,8 @@ static bool printWithoutType(const Value &V, raw_ostream &O, static void printAsOperandImpl(const Value &V, raw_ostream &O, bool PrintType, ModuleSlotTracker &MST) { TypePrinting TypePrinter(MST.getModule()); - if (PrintType) { - TypePrinter.print(V.getType(), O); - O << ' '; - } - AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine(), MST.getModule()); - WriteAsOperandInternal(O, &V, WriterCtx); + writeAsOperandInternal(O, &V, WriterCtx, PrintType); } void Value::printAsOperand(raw_ostream &O, bool PrintType, @@ -5347,14 +5257,14 @@ void Value::printAsOperand(raw_ostream &O, bool PrintType, static void printMetadataImplRec(raw_ostream &ROS, const Metadata &MD, AsmWriterContext &WriterCtx) { formatted_raw_ostream OS(ROS); - WriteAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true); + writeAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true); auto *N = dyn_cast(&MD); if (!N || isa(MD)) return; OS << " = "; - WriteMDNodeBodyInternal(OS, N, WriterCtx); + writeMDNodeBodyInternal(OS, N, WriterCtx); } namespace { @@ -5415,14 +5325,14 @@ static void printMetadataImpl(raw_ostream &ROS, const Metadata &MD, WriterCtx = std::make_unique(&TypePrinter, MST.getMachine(), M); - WriteAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true); + writeAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true); auto *N = dyn_cast(&MD); if (OnlyAsOperand || !N || isa(MD)) return; OS << " = "; - WriteMDNodeBodyInternal(OS, N, *WriterCtx); + writeMDNodeBodyInternal(OS, N, *WriterCtx); } void Metadata::printAsOperand(raw_ostream &OS, const Module *M) const { diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 5385b1f8cac0b..f28b98957cae4 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -594,6 +594,42 @@ static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name, return false; // No other 'x86.avx512.*'. } + if (Name.consume_front("avx2.vpdpb")) { + // Added in 21.1 + ID = StringSwitch(Name) + .Case("ssd.128", Intrinsic::x86_avx2_vpdpbssd_128) + .Case("ssd.256", Intrinsic::x86_avx2_vpdpbssd_256) + .Case("ssds.128", Intrinsic::x86_avx2_vpdpbssds_128) + .Case("ssds.256", Intrinsic::x86_avx2_vpdpbssds_256) + .Case("sud.128", Intrinsic::x86_avx2_vpdpbsud_128) + .Case("sud.256", Intrinsic::x86_avx2_vpdpbsud_256) + .Case("suds.128", Intrinsic::x86_avx2_vpdpbsuds_128) + .Case("suds.256", Intrinsic::x86_avx2_vpdpbsuds_256) + .Case("uud.128", Intrinsic::x86_avx2_vpdpbuud_128) + .Case("uud.256", Intrinsic::x86_avx2_vpdpbuud_256) + .Case("uuds.128", Intrinsic::x86_avx2_vpdpbuuds_128) + .Case("uuds.256", Intrinsic::x86_avx2_vpdpbuuds_256) + .Default(Intrinsic::not_intrinsic); + if (ID != Intrinsic::not_intrinsic) + return upgradeX86MultiplyAddBytes(F, ID, NewFn); + return false; // No other 'x86.avx2.*' + } + + if (Name.consume_front("avx10.vpdpb")) { + // Added in 21.1 + ID = StringSwitch(Name) + .Case("ssd.512", Intrinsic::x86_avx10_vpdpbssd_512) + .Case("ssds.512", Intrinsic::x86_avx10_vpdpbssds_512) + .Case("sud.512", Intrinsic::x86_avx10_vpdpbsud_512) + .Case("suds.512", Intrinsic::x86_avx10_vpdpbsuds_512) + .Case("uud.512", Intrinsic::x86_avx10_vpdpbuud_512) + .Case("uuds.512", Intrinsic::x86_avx10_vpdpbuuds_512) + .Default(Intrinsic::not_intrinsic); + if (ID != Intrinsic::not_intrinsic) + return upgradeX86MultiplyAddBytes(F, ID, NewFn); + return false; // No other 'x86.avx10.*' + } + if (Name.consume_front("avx512bf16.")) { // Added in 9.0 ID = StringSwitch(Name) @@ -5224,7 +5260,25 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) { case Intrinsic::x86_avx512_vpdpbusd_512: case Intrinsic::x86_avx512_vpdpbusds_128: case Intrinsic::x86_avx512_vpdpbusds_256: - case Intrinsic::x86_avx512_vpdpbusds_512: { + case Intrinsic::x86_avx512_vpdpbusds_512: + case Intrinsic::x86_avx2_vpdpbssd_128: + case Intrinsic::x86_avx2_vpdpbssd_256: + case Intrinsic::x86_avx10_vpdpbssd_512: + case Intrinsic::x86_avx2_vpdpbssds_128: + case Intrinsic::x86_avx2_vpdpbssds_256: + case Intrinsic::x86_avx10_vpdpbssds_512: + case Intrinsic::x86_avx2_vpdpbsud_128: + case Intrinsic::x86_avx2_vpdpbsud_256: + case Intrinsic::x86_avx10_vpdpbsud_512: + case Intrinsic::x86_avx2_vpdpbsuds_128: + case Intrinsic::x86_avx2_vpdpbsuds_256: + case Intrinsic::x86_avx10_vpdpbsuds_512: + case Intrinsic::x86_avx2_vpdpbuud_128: + case Intrinsic::x86_avx2_vpdpbuud_256: + case Intrinsic::x86_avx10_vpdpbuud_512: + case Intrinsic::x86_avx2_vpdpbuuds_128: + case Intrinsic::x86_avx2_vpdpbuuds_256: + case Intrinsic::x86_avx10_vpdpbuuds_512: { unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 8; Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2)}; diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 77f9b997a2ebf..49e1f898ca594 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -151,7 +151,8 @@ bool DataLayout::PointerSpec::operator==(const PointerSpec &Other) const { return AddrSpace == Other.AddrSpace && BitWidth == Other.BitWidth && ABIAlign == Other.ABIAlign && PrefAlign == Other.PrefAlign && IndexBitWidth == Other.IndexBitWidth && - IsNonIntegral == Other.IsNonIntegral; + HasUnstableRepresentation == Other.HasUnstableRepresentation && + HasExternalState == Other.HasExternalState; } namespace { @@ -194,7 +195,7 @@ constexpr DataLayout::PrimitiveSpec DefaultVectorSpecs[] = { // Default pointer type specifications. constexpr DataLayout::PointerSpec DefaultPointerSpecs[] = { // p0:64:64:64:64 - {0, 64, Align::Constant<8>(), Align::Constant<8>(), 64, false}, + {0, 64, Align::Constant<8>(), Align::Constant<8>(), 64, false, false}, }; DataLayout::DataLayout() @@ -405,9 +406,29 @@ Error DataLayout::parsePointerSpec(StringRef Spec) { // Address space. Optional, defaults to 0. unsigned AddrSpace = 0; - if (!Components[0].empty()) - if (Error Err = parseAddrSpace(Components[0], AddrSpace)) - return Err; + bool ExternalState = false; + bool UnstableRepr = false; + StringRef AddrSpaceStr = Components[0]; + while (!AddrSpaceStr.empty()) { + char C = AddrSpaceStr.front(); + if (C == 'e') { + ExternalState = true; + } else if (C == 'u') { + UnstableRepr = true; + } else if (isAlpha(C)) { + return createStringError("'%c' is not a valid pointer specification flag", + C); + } else { + break; // not a valid flag, remaining must be the address space number. + } + AddrSpaceStr = AddrSpaceStr.drop_front(1); + } + if (!AddrSpaceStr.empty()) + if (Error Err = parseAddrSpace(AddrSpaceStr, AddrSpace)) + return Err; // Failed to parse the remaining characters as a number + if (AddrSpace == 0 && (ExternalState || UnstableRepr)) + return createStringError( + "address space 0 cannot be unstable or have external state"); // Size. Required, cannot be zero. unsigned BitWidth; @@ -441,7 +462,7 @@ Error DataLayout::parsePointerSpec(StringRef Spec) { "index size cannot be larger than the pointer size"); setPointerSpec(AddrSpace, BitWidth, ABIAlign, PrefAlign, IndexBitWidth, - false); + UnstableRepr, ExternalState); return Error::success(); } @@ -617,7 +638,7 @@ Error DataLayout::parseLayoutString(StringRef LayoutString) { // the spec for AS0, and we then update that to mark it non-integral. const PointerSpec &PS = getPointerSpec(AS); setPointerSpec(AS, PS.BitWidth, PS.ABIAlign, PS.PrefAlign, PS.IndexBitWidth, - true); + /*HasUnstableRepr=*/true, /*HasExternalState=*/false); } return Error::success(); @@ -665,17 +686,20 @@ DataLayout::getPointerSpec(uint32_t AddrSpace) const { void DataLayout::setPointerSpec(uint32_t AddrSpace, uint32_t BitWidth, Align ABIAlign, Align PrefAlign, - uint32_t IndexBitWidth, bool IsNonIntegral) { + uint32_t IndexBitWidth, bool HasUnstableRepr, + bool HasExternalState) { auto I = lower_bound(PointerSpecs, AddrSpace, LessPointerAddrSpace()); if (I == PointerSpecs.end() || I->AddrSpace != AddrSpace) { PointerSpecs.insert(I, PointerSpec{AddrSpace, BitWidth, ABIAlign, PrefAlign, - IndexBitWidth, IsNonIntegral}); + IndexBitWidth, HasUnstableRepr, + HasExternalState}); } else { I->BitWidth = BitWidth; I->ABIAlign = ABIAlign; I->PrefAlign = PrefAlign; I->IndexBitWidth = IndexBitWidth; - I->IsNonIntegral = IsNonIntegral; + I->HasUnstableRepresentation = HasUnstableRepr; + I->HasExternalState = HasExternalState; } } diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp index 166521a276643..f9ded507f8328 100644 --- a/llvm/lib/IR/DebugInfo.cpp +++ b/llvm/lib/IR/DebugInfo.cpp @@ -375,6 +375,38 @@ bool DebugInfoFinder::addScope(DIScope *Scope) { return true; } +/// Recursively handle DILocations in followup metadata etc. +/// +/// TODO: If for example a followup loop metadata would refence itself this +/// function would go into infinite recursion. We do not expect such cycles in +/// the loop metadata (except for the self-referencing first element +/// "LoopID"). However, we could at least handle such situations more gracefully +/// somehow (e.g. by keeping track of visited nodes and dropping metadata). +static Metadata *updateLoopMetadataDebugLocationsRecursive( + Metadata *MetadataIn, function_ref Updater) { + const MDTuple *M = dyn_cast_or_null(MetadataIn); + // The loop metadata options should start with a MDString. + if (!M || M->getNumOperands() < 1 || !isa(M->getOperand(0))) + return MetadataIn; + + bool Updated = false; + SmallVector MDs{M->getOperand(0)}; + for (Metadata *MD : llvm::drop_begin(M->operands())) { + if (!MD) { + MDs.push_back(nullptr); + continue; + } + Metadata *NewMD = + Updater(updateLoopMetadataDebugLocationsRecursive(MD, Updater)); + if (NewMD) + MDs.push_back(NewMD); + Updated |= NewMD != MD; + } + + assert(!M->isDistinct() && "M should not be distinct."); + return Updated ? MDNode::get(M->getContext(), MDs) : MetadataIn; +} + static MDNode *updateLoopMetadataDebugLocationsImpl( MDNode *OrigLoopID, function_ref Updater) { assert(OrigLoopID && OrigLoopID->getNumOperands() > 0 && @@ -385,11 +417,11 @@ static MDNode *updateLoopMetadataDebugLocationsImpl( // Save space for the self-referential LoopID. SmallVector MDs = {nullptr}; - for (unsigned i = 1; i < OrigLoopID->getNumOperands(); ++i) { - Metadata *MD = OrigLoopID->getOperand(i); + for (Metadata *MD : llvm::drop_begin(OrigLoopID->operands())) { if (!MD) MDs.push_back(nullptr); - else if (Metadata *NewMD = Updater(MD)) + else if (Metadata *NewMD = Updater( + updateLoopMetadataDebugLocationsRecursive(MD, Updater))) MDs.push_back(NewMD); } diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index a8bb34f69c629..33ca46ca1c2c6 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -30,6 +30,8 @@ #include "llvm/Support/Compiler.h" using namespace llvm; +namespace llvm { + // FIXME: Flag used for an ablation performance test, Issue #147390. Placing it // here because referencing IR should be feasible from anywhere. Will be // removed after the ablation test. @@ -38,6 +40,8 @@ cl::opt ProfcheckDisableMetadataFixes( cl::desc( "Disable metadata propagation fixes discovered through Issue #147390")); +} // end namespace llvm + InsertPosition::InsertPosition(Instruction *InsertBefore) : InsertAt(InsertBefore ? InsertBefore->getIterator() : InstListType::iterator()) {} diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index daebf447a2107..941e41f3127d5 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -2847,6 +2847,7 @@ unsigned CastInst::isEliminableCastPair( // FPTRUNC > FloatPt n/a FloatPt n/a // FPEXT < FloatPt n/a FloatPt n/a // PTRTOINT n/a Pointer n/a Integral Unsigned + // PTRTOADDR n/a Pointer n/a Integral Unsigned // INTTOPTR n/a Integral Unsigned Pointer n/a // BITCAST = FirstClass n/a FirstClass n/a // ADDRSPCST n/a Pointer n/a Pointer n/a @@ -2878,7 +2879,7 @@ unsigned CastInst::isEliminableCastPair( { 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt | { 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt | { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr | - { 99,99,99,99,99,99,99,99,99,11,99,99,15, 0}, // IntToPtr | + { 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr | { 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+ }; @@ -2972,7 +2973,8 @@ unsigned CastInst::isEliminableCastPair( // zext, sext -> zext, because sext can't sign extend after zext return Instruction::ZExt; case 11: { - // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize + // inttoptr, ptrtoint/ptrtoaddr -> bitcast if SrcSize<=PtrSize and + // SrcSize==DstSize if (!MidIntPtrTy) return 0; unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); @@ -4139,23 +4141,6 @@ void SwitchInst::growOperands() { growHungoffUses(ReservedSpace); } -MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() { - assert(Changed && "called only if metadata has changed"); - - if (!Weights) - return nullptr; - - assert(SI.getNumSuccessors() == Weights->size() && - "num of prof branch_weights must accord with num of successors"); - - bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; }); - - if (AllZeroes || Weights->size() < 2) - return nullptr; - - return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights); -} - void SwitchInstProfUpdateWrapper::init() { MDNode *ProfileData = getBranchWeightMDNode(SI); if (!ProfileData) diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp index 4d2e8fadff4f7..6797a100ff732 100644 --- a/llvm/lib/IR/Intrinsics.cpp +++ b/llvm/lib/IR/Intrinsics.cpp @@ -207,7 +207,6 @@ DecodeIITType(unsigned &NextElt, ArrayRef Infos, bool IsScalableVector = (LastInfo == IIT_SCALABLE_VEC); IIT_Info Info = IIT_Info(Infos[NextElt++]); - unsigned StructElts = 2; switch (Info) { case IIT_Done: @@ -390,28 +389,9 @@ DecodeIITType(unsigned &NextElt, ArrayRef Infos, case IIT_EMPTYSTRUCT: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); return; - case IIT_STRUCT9: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT8: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT7: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT6: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT5: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT4: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT3: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT2: { + case IIT_STRUCT: { + unsigned StructElts = Infos[NextElt++] + 2; + OutputTable.push_back( IITDescriptor::get(IITDescriptor::Struct, StructElts)); diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp index 57532cd491dd6..335c210c10e1a 100644 --- a/llvm/lib/IR/LLVMContext.cpp +++ b/llvm/lib/IR/LLVMContext.cpp @@ -53,6 +53,8 @@ static StringRef knownBundleName(unsigned BundleTagID) { return "kcfi"; case LLVMContext::OB_convergencectrl: return "convergencectrl"; + case LLVMContext::OB_align: + return "align"; default: llvm_unreachable("unknown bundle id"); } @@ -76,7 +78,7 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { } for (unsigned BundleTagID = LLVMContext::OB_deopt; - BundleTagID <= LLVMContext::OB_convergencectrl; ++BundleTagID) { + BundleTagID <= LLVMContext::OB_LastBundleID; ++BundleTagID) { [[maybe_unused]] const auto *Entry = pImpl->getOrInsertBundleTag(knownBundleName(BundleTagID)); assert(Entry->second == BundleTagID && "operand bundle id drifted!"); diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp index 9cfb0ff4d689a..1add0c7930bc9 100644 --- a/llvm/lib/IR/Metadata.cpp +++ b/llvm/lib/IR/Metadata.cpp @@ -48,6 +48,7 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ModRef.h" #include #include #include @@ -1435,6 +1436,40 @@ MDNode *MDNode::getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B) { return B; } +CaptureComponents MDNode::toCaptureComponents(const MDNode *MD) { + if (!MD) + return CaptureComponents::All; + + CaptureComponents CC = CaptureComponents::None; + for (Metadata *Op : MD->operands()) { + CaptureComponents Component = + StringSwitch(cast(Op)->getString()) + .Case("address", CaptureComponents::Address) + .Case("address_is_null", CaptureComponents::AddressIsNull) + .Case("provenance", CaptureComponents::Provenance) + .Case("read_provenance", CaptureComponents::ReadProvenance); + CC |= Component; + } + return CC; +} + +MDNode *MDNode::fromCaptureComponents(LLVMContext &Ctx, CaptureComponents CC) { + assert(!capturesNothing(CC) && "Can't encode captures(none)"); + if (capturesAll(CC)) + return nullptr; + + SmallVector Components; + if (capturesAddressIsNullOnly(CC)) + Components.push_back(MDString::get(Ctx, "address_is_null")); + else if (capturesAddress(CC)) + Components.push_back(MDString::get(Ctx, "address")); + if (capturesReadProvenanceOnly(CC)) + Components.push_back(MDString::get(Ctx, "read_provenance")); + else if (capturesFullProvenance(CC)) + Components.push_back(MDString::get(Ctx, "provenance")); + return MDNode::get(Ctx, Components); +} + //===----------------------------------------------------------------------===// // NamedMDNode implementation. // diff --git a/llvm/lib/IR/ProfDataUtils.cpp b/llvm/lib/IR/ProfDataUtils.cpp index 5827292cee39b..edeca976d293e 100644 --- a/llvm/lib/IR/ProfDataUtils.cpp +++ b/llvm/lib/IR/ProfDataUtils.cpp @@ -12,6 +12,7 @@ #include "llvm/IR/ProfDataUtils.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" @@ -19,6 +20,7 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" +#include "llvm/Support/CommandLine.h" using namespace llvm; @@ -84,10 +86,31 @@ static void extractFromBranchWeightMD(const MDNode *ProfileData, } } +/// Push the weights right to fit in uint32_t. +static SmallVector fitWeights(ArrayRef Weights) { + SmallVector Ret; + Ret.reserve(Weights.size()); + uint64_t Max = *llvm::max_element(Weights); + if (Max > UINT_MAX) { + unsigned Offset = 32 - llvm::countl_zero(Max); + for (const uint64_t &Value : Weights) + Ret.push_back(static_cast(Value >> Offset)); + } else { + append_range(Ret, Weights); + } + return Ret; +} + } // namespace namespace llvm { - +cl::opt ElideAllZeroBranchWeights("elide-all-zero-branch-weights", +#if defined(LLVM_ENABLE_PROFCHECK) + cl::init(false) +#else + cl::init(true) +#endif +); const char *MDProfLabels::BranchWeights = "branch_weights"; const char *MDProfLabels::ExpectedBranchWeights = "expected"; const char *MDProfLabels::ValueProfile = "VP"; @@ -252,6 +275,13 @@ void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName) { MDB.createString(PassName)})); } +void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, Function &F, + StringRef PassName) { + if (std::optional EC = F.getEntryCount(); + EC && EC->getCount() > 0) + setExplicitlyUnknownBranchWeights(I, PassName); +} + void setExplicitlyUnknownFunctionEntryCount(Function &F, StringRef PassName) { MDBuilder MDB(F.getContext()); F.setMetadata( @@ -275,12 +305,23 @@ bool hasExplicitlyUnknownBranchWeights(const Instruction &I) { } void setBranchWeights(Instruction &I, ArrayRef Weights, - bool IsExpected) { + bool IsExpected, bool ElideAllZero) { + if ((ElideAllZeroBranchWeights && ElideAllZero) && + llvm::all_of(Weights, [](uint32_t V) { return V == 0; })) { + I.setMetadata(LLVMContext::MD_prof, nullptr); + return; + } + MDBuilder MDB(I.getContext()); MDNode *BranchWeights = MDB.createBranchWeights(Weights, IsExpected); I.setMetadata(LLVMContext::MD_prof, BranchWeights); } +void setFittedBranchWeights(Instruction &I, ArrayRef Weights, + bool IsExpected, bool ElideAllZero) { + setBranchWeights(I, fitWeights(Weights), IsExpected, ElideAllZero); +} + SmallVector downscaleWeights(ArrayRef Weights, std::optional KnownMaxCount) { uint64_t MaxCount = KnownMaxCount.has_value() ? KnownMaxCount.value() diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index 4e8f359481b81..a3476092253e7 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -36,7 +36,7 @@ using namespace llvm; -cl::opt UseDerefAtPointSemantics( +static cl::opt UseDerefAtPointSemantics( "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false), cl::desc("Deref attributes and metadata infer facts at definition only")); @@ -1000,14 +1000,12 @@ Align Value::getPointerAlignment(const DataLayout &DL) const { ConstantInt *CI = mdconst::extract(MD->getOperand(0)); return Align(CI->getLimitedValue()); } - } else if (auto *CstPtr = dyn_cast(this)) { - // Strip pointer casts to avoid creating unnecessary ptrtoint expression - // if the only "reduction" is combining a bitcast + ptrtoint. - CstPtr = CstPtr->stripPointerCasts(); - if (auto *CstInt = dyn_cast_or_null(ConstantExpr::getPtrToInt( - const_cast(CstPtr), DL.getIntPtrType(getType()), - /*OnlyIfReduced=*/true))) { - size_t TrailingZeros = CstInt->getValue().countr_zero(); + } else if (auto *CE = dyn_cast(this)) { + // Determine the alignment of inttoptr(C). + if (CE->getOpcode() == Instruction::IntToPtr && + isa(CE->getOperand(0))) { + ConstantInt *IntPtr = cast(CE->getOperand(0)); + size_t TrailingZeros = IntPtr->getValue().countr_zero(); // While the actual alignment may be large, elsewhere we have // an arbitrary upper alignmet limit, so let's clamp to it. return Align(TrailingZeros < Value::MaxAlignmentExponent diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 9bde965d660a4..6b3cd27b77a7a 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -480,6 +480,7 @@ class Verifier : public InstVisitor, VerifierSupport { visitModuleFlags(); visitModuleIdents(); visitModuleCommandLines(); + visitModuleErrnoTBAA(); verifyCompileUnits(); @@ -516,6 +517,7 @@ class Verifier : public InstVisitor, VerifierSupport { void visitComdat(const Comdat &C); void visitModuleIdents(); void visitModuleCommandLines(); + void visitModuleErrnoTBAA(); void visitModuleFlags(); void visitModuleFlag(const MDNode *Op, DenseMap &SeenIDs, @@ -540,6 +542,7 @@ class Verifier : public InstVisitor, VerifierSupport { void visitAliasScopeMetadata(const MDNode *MD); void visitAliasScopeListMetadata(const MDNode *MD); void visitAccessGroupMetadata(const MDNode *MD); + void visitCapturesMetadata(Instruction &I, const MDNode *Captures); template bool isValidMetadataArray(const MDTuple &N); #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N); @@ -1815,6 +1818,18 @@ void Verifier::visitModuleCommandLines() { } } +void Verifier::visitModuleErrnoTBAA() { + const NamedMDNode *ErrnoTBAA = M.getNamedMetadata("llvm.errno.tbaa"); + if (!ErrnoTBAA) + return; + + Check(ErrnoTBAA->getNumOperands() >= 1, + "llvm.errno.tbaa must have at least one operand", ErrnoTBAA); + + for (const MDNode *N : ErrnoTBAA->operands()) + TBAAVerifyHelper.visitTBAAMetadata(nullptr, N); +} + void Verifier::visitModuleFlags() { const NamedMDNode *Flags = M.getModuleFlagsMetadata(); if (!Flags) return; @@ -5359,6 +5374,27 @@ void Verifier::visitAccessGroupMetadata(const MDNode *MD) { } } +void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) { + static const char *ValidArgs[] = {"address_is_null", "address", + "read_provenance", "provenance"}; + + auto *SI = dyn_cast(&I); + Check(SI, "!captures metadata can only be applied to store instructions", &I); + Check(SI->getValueOperand()->getType()->isPointerTy(), + "!captures metadata can only be applied to store with value operand of " + "pointer type", + &I); + Check(Captures->getNumOperands() != 0, "!captures metadata cannot be empty", + &I); + + for (Metadata *Op : Captures->operands()) { + auto *Str = dyn_cast(Op); + Check(Str, "!captures metadata must be a list of strings", &I); + Check(is_contained(ValidArgs, Str->getString()), + "invalid entry in !captures metadata", &I, Str); + } +} + /// verifyInstruction - Verify that an instruction is well formed. /// void Verifier::visitInstruction(Instruction &I) { @@ -5537,7 +5573,7 @@ void Verifier::visitInstruction(Instruction &I) { visitNofreeMetadata(I, MD); if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa)) - TBAAVerifyHelper.visitTBAAMetadata(I, TBAA); + TBAAVerifyHelper.visitTBAAMetadata(&I, TBAA); if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias)) visitAliasScopeListMetadata(MD); @@ -5586,6 +5622,9 @@ void Verifier::visitInstruction(Instruction &I) { if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation)) visitAnnotationMetadata(Annotation); + if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures)) + visitCapturesMetadata(I, Captures); + if (MDNode *N = I.getDebugLoc().getAsMDNode()) { CheckDI(isa(N), "invalid !dbg metadata attachment", &I, N); visitMDNode(*N, AreDebugLocsAllowed::Yes); @@ -5675,6 +5714,11 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { default: break; case Intrinsic::assume: { + if (Call.hasOperandBundles()) { + auto *Cond = dyn_cast(Call.getArgOperand(0)); + Check(Cond && Cond->isOne(), + "assume with operand bundles must have i1 true condition", Call); + } for (auto &Elem : Call.bundle_op_infos()) { unsigned ArgCount = Elem.End - Elem.Begin; // Separate storage assumptions are special insofar as they're the only @@ -5850,9 +5894,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { break; } case Intrinsic::call_preallocated_setup: { - auto *NumArgs = dyn_cast(Call.getArgOperand(0)); - Check(NumArgs != nullptr, - "llvm.call.preallocated.setup argument must be a constant"); + auto *NumArgs = cast(Call.getArgOperand(0)); bool FoundCall = false; for (User *U : Call.users()) { auto *UseCall = dyn_cast(U); @@ -7655,10 +7697,10 @@ template void TBAAVerifier::CheckFailed(Tys &&... Args) { /// TBAA scheme. This means \p BaseNode is either a scalar node, or a /// struct-type node describing an aggregate data structure (like a struct). TBAAVerifier::TBAABaseNodeSummary -TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode, +TBAAVerifier::verifyTBAABaseNode(const Instruction *I, const MDNode *BaseNode, bool IsNewFormat) { if (BaseNode->getNumOperands() < 2) { - CheckFailed("Base nodes must have at least two operands", &I, BaseNode); + CheckFailed("Base nodes must have at least two operands", I, BaseNode); return {true, ~0u}; } @@ -7674,8 +7716,8 @@ TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode, } TBAAVerifier::TBAABaseNodeSummary -TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, - bool IsNewFormat) { +TBAAVerifier::verifyTBAABaseNodeImpl(const Instruction *I, + const MDNode *BaseNode, bool IsNewFormat) { const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u}; if (BaseNode->getNumOperands() == 2) { @@ -7704,7 +7746,7 @@ TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, auto *TypeSizeNode = mdconst::dyn_extract_or_null( BaseNode->getOperand(1)); if (!TypeSizeNode) { - CheckFailed("Type size nodes must be constants!", &I, BaseNode); + CheckFailed("Type size nodes must be constants!", I, BaseNode); return InvalidNode; } } @@ -7730,7 +7772,7 @@ TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, const MDOperand &FieldTy = BaseNode->getOperand(Idx); const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1); if (!isa(FieldTy)) { - CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode); + CheckFailed("Incorrect field entry in struct type node!", I, BaseNode); Failed = true; continue; } @@ -7738,7 +7780,7 @@ TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, auto *OffsetEntryCI = mdconst::dyn_extract_or_null(FieldOffset); if (!OffsetEntryCI) { - CheckFailed("Offset entries must be constants!", &I, BaseNode); + CheckFailed("Offset entries must be constants!", I, BaseNode); Failed = true; continue; } @@ -7748,7 +7790,7 @@ TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, if (OffsetEntryCI->getBitWidth() != BitWidth) { CheckFailed( - "Bitwidth between the offsets and struct type entries must match", &I, + "Bitwidth between the offsets and struct type entries must match", I, BaseNode); Failed = true; continue; @@ -7763,7 +7805,7 @@ TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue()); if (!IsAscending) { - CheckFailed("Offsets must be increasing!", &I, BaseNode); + CheckFailed("Offsets must be increasing!", I, BaseNode); Failed = true; } @@ -7773,7 +7815,7 @@ TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode, auto *MemberSizeNode = mdconst::dyn_extract_or_null( BaseNode->getOperand(Idx + 2)); if (!MemberSizeNode) { - CheckFailed("Member size entries must be constants!", &I, BaseNode); + CheckFailed("Member size entries must be constants!", I, BaseNode); Failed = true; continue; } @@ -7825,7 +7867,7 @@ bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) { /// Offset in place to be the offset within the field node returned. /// /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode. -MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I, +MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(const Instruction *I, const MDNode *BaseNode, APInt &Offset, bool IsNewFormat) { @@ -7845,7 +7887,7 @@ MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I, mdconst::extract(BaseNode->getOperand(Idx + 1)); if (OffsetEntryCI->getValue().ugt(Offset)) { if (Idx == FirstFieldOpNo) { - CheckFailed("Could not find TBAA parent in struct type node", &I, + CheckFailed("Could not find TBAA parent in struct type node", I, BaseNode, &Offset); return nullptr; } @@ -7874,21 +7916,22 @@ static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) { return isa_and_nonnull(Type->getOperand(0)); } -bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { - CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", - &I, MD); +bool TBAAVerifier::visitTBAAMetadata(const Instruction *I, const MDNode *MD) { + CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands", I, + MD); - CheckTBAA(isa(I) || isa(I) || isa(I) || - isa(I) || isa(I) || - isa(I), - "This instruction shall not have a TBAA access tag!", &I); + if (I) + CheckTBAA(isa(I) || isa(I) || isa(I) || + isa(I) || isa(I) || + isa(I), + "This instruction shall not have a TBAA access tag!", I); bool IsStructPathTBAA = isa(MD->getOperand(0)) && MD->getNumOperands() >= 3; CheckTBAA(IsStructPathTBAA, "Old-style TBAA is no longer allowed, use struct-path TBAA instead", - &I); + I); MDNode *BaseNode = dyn_cast_or_null(MD->getOperand(0)); MDNode *AccessType = dyn_cast_or_null(MD->getOperand(1)); @@ -7897,17 +7940,17 @@ bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { if (IsNewFormat) { CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5, - "Access tag metadata must have either 4 or 5 operands", &I, MD); + "Access tag metadata must have either 4 or 5 operands", I, MD); } else { CheckTBAA(MD->getNumOperands() < 5, - "Struct tag metadata must have either 3 or 4 operands", &I, MD); + "Struct tag metadata must have either 3 or 4 operands", I, MD); } // Check the access size field. if (IsNewFormat) { auto *AccessSizeNode = mdconst::dyn_extract_or_null( MD->getOperand(3)); - CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD); + CheckTBAA(AccessSizeNode, "Access size field must be a constant", I, MD); } // Check the immutability flag. @@ -7916,27 +7959,27 @@ bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { auto *IsImmutableCI = mdconst::dyn_extract_or_null( MD->getOperand(ImmutabilityFlagOpNo)); CheckTBAA(IsImmutableCI, - "Immutability tag on struct tag metadata must be a constant", &I, + "Immutability tag on struct tag metadata must be a constant", I, MD); CheckTBAA( IsImmutableCI->isZero() || IsImmutableCI->isOne(), - "Immutability part of the struct tag metadata must be either 0 or 1", - &I, MD); + "Immutability part of the struct tag metadata must be either 0 or 1", I, + MD); } CheckTBAA(BaseNode && AccessType, "Malformed struct tag metadata: base and access-type " "should be non-null and point to Metadata nodes", - &I, MD, BaseNode, AccessType); + I, MD, BaseNode, AccessType); if (!IsNewFormat) { CheckTBAA(isValidScalarTBAANode(AccessType), - "Access type node must be a valid scalar type", &I, MD, + "Access type node must be a valid scalar type", I, MD, AccessType); } auto *OffsetCI = mdconst::dyn_extract_or_null(MD->getOperand(2)); - CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD); + CheckTBAA(OffsetCI, "Offset must be constant integer", I, MD); APInt Offset = OffsetCI->getValue(); bool SeenAccessTypeInPath = false; @@ -7944,17 +7987,17 @@ bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { SmallPtrSet StructPath; for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode); - BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, - IsNewFormat)) { + BaseNode = + getFieldNodeFromTBAABaseNode(I, BaseNode, Offset, IsNewFormat)) { if (!StructPath.insert(BaseNode).second) { - CheckFailed("Cycle detected in struct path", &I, MD); + CheckFailed("Cycle detected in struct path", I, MD); return false; } bool Invalid; unsigned BaseNodeBitWidth; - std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode, - IsNewFormat); + std::tie(Invalid, BaseNodeBitWidth) = + verifyTBAABaseNode(I, BaseNode, IsNewFormat); // If the base node is invalid in itself, then we've already printed all the // errors we wanted to print. @@ -7964,20 +8007,20 @@ bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { SeenAccessTypeInPath |= BaseNode == AccessType; if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType) - CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", - &I, MD, &Offset); + CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access", I, + MD, &Offset); CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() || (BaseNodeBitWidth == 0 && Offset == 0) || (IsNewFormat && BaseNodeBitWidth == ~0u), - "Access bit-width not the same as description bit-width", &I, MD, + "Access bit-width not the same as description bit-width", I, MD, BaseNodeBitWidth, Offset.getBitWidth()); if (IsNewFormat && SeenAccessTypeInPath) break; } - CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I, + CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", I, MD); return true; } diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp index 7b252627d73f9..e6544f3bafff4 100644 --- a/llvm/lib/LTO/LTO.cpp +++ b/llvm/lib/LTO/LTO.cpp @@ -75,9 +75,10 @@ static cl::opt DumpThinCGSCCs("dump-thin-cg-sccs", cl::init(false), cl::Hidden, cl::desc("Dump the SCCs in the ThinLTO index's callgraph")); +namespace llvm { extern cl::opt CodeGenDataThinLTOTwoRounds; - extern cl::opt ForceImportAll; +} // end namespace llvm namespace llvm { /// Enable global value internalization in LTO. diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp index c126e8efe82b3..11a7b3221bec9 100644 --- a/llvm/lib/LTO/LTOBackend.cpp +++ b/llvm/lib/LTO/LTOBackend.cpp @@ -240,27 +240,26 @@ static void runNewPMPasses(const Config &Conf, Module &Mod, TargetMachine *TM, unsigned OptLevel, bool IsThinLTO, ModuleSummaryIndex *ExportSummary, const ModuleSummaryIndex *ImportSummary) { - auto FS = vfs::getRealFileSystem(); std::optional PGOOpt; if (!Conf.SampleProfile.empty()) PGOOpt = PGOOptions(Conf.SampleProfile, "", Conf.ProfileRemapping, - /*MemoryProfile=*/"", FS, PGOOptions::SampleUse, + /*MemoryProfile=*/"", PGOOptions::SampleUse, PGOOptions::NoCSAction, PGOOptions::ColdFuncOpt::Default, true); else if (Conf.RunCSIRInstr) { PGOOpt = PGOOptions("", Conf.CSIRProfile, Conf.ProfileRemapping, - /*MemoryProfile=*/"", FS, PGOOptions::IRUse, + /*MemoryProfile=*/"", PGOOptions::IRUse, PGOOptions::CSIRInstr, PGOOptions::ColdFuncOpt::Default, Conf.AddFSDiscriminator); } else if (!Conf.CSIRProfile.empty()) { - PGOOpt = PGOOptions(Conf.CSIRProfile, "", Conf.ProfileRemapping, - /*MemoryProfile=*/"", FS, PGOOptions::IRUse, - PGOOptions::CSIRUse, PGOOptions::ColdFuncOpt::Default, - Conf.AddFSDiscriminator); + PGOOpt = + PGOOptions(Conf.CSIRProfile, "", Conf.ProfileRemapping, + /*MemoryProfile=*/"", PGOOptions::IRUse, PGOOptions::CSIRUse, + PGOOptions::ColdFuncOpt::Default, Conf.AddFSDiscriminator); NoPGOWarnMismatch = !Conf.PGOWarnMismatch; } else if (Conf.AddFSDiscriminator) { - PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr, - PGOOptions::NoAction, PGOOptions::NoCSAction, + PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", PGOOptions::NoAction, + PGOOptions::NoCSAction, PGOOptions::ColdFuncOpt::Default, true); } TM->setPGOOption(PGOOpt); diff --git a/llvm/lib/MC/DXContainerRootSignature.cpp b/llvm/lib/MC/DXContainerRootSignature.cpp index b9ebb7a9e789c..713aa3d8143e8 100644 --- a/llvm/lib/MC/DXContainerRootSignature.cpp +++ b/llvm/lib/MC/DXContainerRootSignature.cpp @@ -23,19 +23,20 @@ static uint32_t writePlaceholder(raw_svector_ostream &Stream) { static uint32_t rewriteOffsetToCurrentByte(raw_svector_ostream &Stream, uint32_t Offset) { uint32_t ByteOffset = Stream.tell(); - uint32_t Value = - support::endian::byte_swap( - ByteOffset); + uint32_t Value = support::endian::byte_swap( + ByteOffset, llvm::endianness::little); Stream.pwrite(reinterpret_cast(&Value), sizeof(Value), Offset); return ByteOffset; } size_t RootSignatureDesc::getSize() const { uint32_t StaticSamplersOffset = computeStaticSamplersOffset(); - size_t StaticSamplersSize = - StaticSamplers.size() * sizeof(dxbc::RTS0::v1::StaticSampler); + size_t StaticSamplersSize = sizeof(dxbc::RTS0::v1::StaticSampler); + if (Version > 2) + StaticSamplersSize = sizeof(dxbc::RTS0::v3::StaticSampler); - return size_t(StaticSamplersOffset) + StaticSamplersSize; + return size_t(StaticSamplersOffset) + + (StaticSamplersSize * StaticSamplers.size()); } uint32_t RootSignatureDesc::computeRootParametersOffset() const { @@ -171,6 +172,9 @@ void RootSignatureDesc::write(raw_ostream &OS) const { support::endian::write(BOS, S.ShaderRegister, llvm::endianness::little); support::endian::write(BOS, S.RegisterSpace, llvm::endianness::little); support::endian::write(BOS, S.ShaderVisibility, llvm::endianness::little); + + if (Version > 2) + support::endian::write(BOS, S.Flags, llvm::endianness::little); } assert(Storage.size() == getSize()); OS.write(Storage.data(), Storage.size()); diff --git a/llvm/lib/MC/MCCodeEmitter.cpp b/llvm/lib/MC/MCCodeEmitter.cpp index 0d114f12d58c5..76a8406cee7bd 100644 --- a/llvm/lib/MC/MCCodeEmitter.cpp +++ b/llvm/lib/MC/MCCodeEmitter.cpp @@ -7,9 +7,28 @@ //===----------------------------------------------------------------------===// #include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCInst.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include using namespace llvm; MCCodeEmitter::MCCodeEmitter() = default; MCCodeEmitter::~MCCodeEmitter() = default; + +void MCCodeEmitter::reportUnsupportedInst(const MCInst &Inst) { + std::string Msg; + raw_string_ostream OS(Msg); + OS << "Unsupported instruction : " << Inst; + reportFatalInternalError(Msg.c_str()); +} + +void MCCodeEmitter::reportUnsupportedOperand(const MCInst &Inst, + unsigned OpNum) { + std::string Msg; + raw_string_ostream OS(Msg); + OS << "Unsupported instruction operand : \"" << Inst << "\"[" << OpNum << "]"; + reportFatalInternalError(Msg.c_str()); +} diff --git a/llvm/lib/MC/MCObjectFileInfo.cpp b/llvm/lib/MC/MCObjectFileInfo.cpp index a0cd09b11d8de..a755c22ab879a 100644 --- a/llvm/lib/MC/MCObjectFileInfo.cpp +++ b/llvm/lib/MC/MCObjectFileInfo.cpp @@ -776,10 +776,18 @@ void MCObjectFileInfo::initCOFFMCObjectFileInfo(const Triple &T) { ".debug_loc.dwo", COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | COFF::IMAGE_SCN_MEM_READ); + DwarfLoclistsDWOSection = Ctx->getCOFFSection( + ".debug_loclists.dwo", COFF::IMAGE_SCN_MEM_DISCARDABLE | + COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | + COFF::IMAGE_SCN_MEM_READ); DwarfStrOffDWOSection = Ctx->getCOFFSection( ".debug_str_offsets.dwo", COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | COFF::IMAGE_SCN_MEM_READ); + DwarfRnglistsDWOSection = Ctx->getCOFFSection( + ".debug_rnglists.dwo", COFF::IMAGE_SCN_MEM_DISCARDABLE | + COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | + COFF::IMAGE_SCN_MEM_READ); DwarfAddrSection = Ctx->getCOFFSection( ".debug_addr", COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | diff --git a/llvm/lib/MC/MCSFrame.cpp b/llvm/lib/MC/MCSFrame.cpp index 066d1a34e1548..d6fa54c087ca3 100644 --- a/llvm/lib/MC/MCSFrame.cpp +++ b/llvm/lib/MC/MCSFrame.cpp @@ -111,6 +111,8 @@ struct SFrameFDE { MCFragment *Frag; // Unwinding fres SmallVector FREs; + // .cfi_remember_state stack + SmallVector SaveState; SFrameFDE(const MCDwarfFrameInfo &DF, MCSymbol *FRES) : DFrame(DF), FREStart(FRES), Frag(nullptr) {} @@ -198,7 +200,7 @@ class SFrameEmitterImpl { return false; } - bool setCFAOffset(SFrameFRE &FRE, const SMLoc &Loc, size_t Offset) { + bool setCFAOffset(SFrameFRE &FRE, SMLoc Loc, size_t Offset) { if (!FRE.CFARegSet) { Streamer.getContext().reportWarning( Loc, "adjusting CFA offset without a base register. " @@ -237,13 +239,30 @@ class SFrameEmitterImpl { case MCCFIInstruction::OpAdjustCfaOffset: return setCFAOffset(FRE, CFI.getLoc(), FRE.CFAOffset + CFI.getOffset()); case MCCFIInstruction::OpRememberState: - // TODO: Implement. Will use FDE. + if (FDE.FREs.size() == 1) { + // Error for gas compatibility: If the initial FRE isn't complete, + // then any state is incomplete. FIXME: Dwarf doesn't error here. + // Why should sframe? + Streamer.getContext().reportWarning( + CFI.getLoc(), "skipping SFrame FDE; .cfi_remember_state without " + "prior SFrame FRE state"); + return false; + } + FDE.SaveState.push_back(FRE); return true; case MCCFIInstruction::OpRestore: - // TODO: Implement. Will use FDE. + // The first FRE generated has the original state. + if (CFI.getRegister() == FPReg) + FRE.FPOffset = FDE.FREs.front().FPOffset; + else if (CFI.getRegister() == RAReg) + FRE.RAOffset = FDE.FREs.front().RAOffset; return true; case MCCFIInstruction::OpRestoreState: - // TODO: Implement. Will use FDE. + // The cfi parser will have caught unbalanced directives earlier, so a + // mismatch here is an implementation error. + assert(!FDE.SaveState.empty() && + "cfi_restore_state without cfi_save_state"); + FRE = FDE.SaveState.pop_back_val(); return true; case MCCFIInstruction::OpEscape: // TODO: Implement. Will use FDE. @@ -394,8 +413,8 @@ class SFrameEmitterImpl { // shf_fdeoff. With no sfh_auxhdr, these immediately follow this header. Streamer.emitInt32(0); // shf_freoff - Streamer.emitAbsoluteSymbolDiff(FRESubSectionStart, FDESubSectionStart, - sizeof(uint32_t)); + Streamer.emitInt32(FDEs.size() * + sizeof(sframe::FuncDescEntry)); } void emitFDEs() { diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp index 6fc0889afc6a8..a11259748b9cc 100644 --- a/llvm/lib/Object/ArchiveWriter.cpp +++ b/llvm/lib/Object/ArchiveWriter.cpp @@ -1119,10 +1119,26 @@ Error writeArchiveToStream(raw_ostream &Out, // to switch to 64-bit. Note that the file can be larger than 4GB as long as // the last member starts before the 4GB offset. if (*HeadersSize + LastMemberHeaderOffset >= Sym64Threshold) { - if (Kind == object::Archive::K_DARWIN) + switch (Kind) { + case object::Archive::K_COFF: + // COFF format has no 64-bit version, so we use GNU64 instead. + if (!SymMap.Map.empty() && !SymMap.ECMap.empty()) + // Only the COFF format supports the ECSYMBOLS section, so don’t use + // GNU64 when two symbol maps are required. + return make_error( + "Archive is too large: ARM64X does not support archives larger " + "than 4GB"); + // Since this changes the headers, we need to recalculate everything. + return writeArchiveToStream(Out, NewMembers, WriteSymtab, + object::Archive::K_GNU64, Deterministic, + Thin, IsEC, Warn); + case object::Archive::K_DARWIN: Kind = object::Archive::K_DARWIN64; - else + break; + default: Kind = object::Archive::K_GNU64; + break; + } HeadersSize.reset(); } } diff --git a/llvm/lib/Object/DXContainer.cpp b/llvm/lib/Object/DXContainer.cpp index 031b9414f4c1a..7b7b8d88c63fc 100644 --- a/llvm/lib/Object/DXContainer.cpp +++ b/llvm/lib/Object/DXContainer.cpp @@ -276,10 +276,13 @@ Error DirectX::RootSignature::parse() { RootParametersOffset, NumParameters * sizeof(dxbc::RTS0::v1::RootParameterHeader)); - StaticSamplers.Stride = sizeof(dxbc::RTS0::v1::StaticSampler); - StaticSamplers.Data = PartData.substr( - StaticSamplersOffset, - NumStaticSamplers * sizeof(dxbc::RTS0::v1::StaticSampler)); + StaticSamplers.Stride = (Version <= 2) + ? sizeof(dxbc::RTS0::v1::StaticSampler) + : sizeof(dxbc::RTS0::v3::StaticSampler); + + StaticSamplers.Data = PartData.substr(StaticSamplersOffset, + static_cast(NumStaticSamplers) * + StaticSamplers.Stride); return Error::success(); } diff --git a/llvm/lib/Object/OffloadBundle.cpp b/llvm/lib/Object/OffloadBundle.cpp index 1e1042ce2bc21..329dcbf2d939a 100644 --- a/llvm/lib/Object/OffloadBundle.cpp +++ b/llvm/lib/Object/OffloadBundle.cpp @@ -89,17 +89,17 @@ Error OffloadBundleFatBin::readEntries(StringRef Buffer, uint64_t EntryIDSize; StringRef EntryID; - if (auto EC = Reader.readInteger(EntryOffset)) - return errorCodeToError(object_error::parse_failed); + if (Error Err = Reader.readInteger(EntryOffset)) + return Err; - if (auto EC = Reader.readInteger(EntrySize)) - return errorCodeToError(object_error::parse_failed); + if (Error Err = Reader.readInteger(EntrySize)) + return Err; - if (auto EC = Reader.readInteger(EntryIDSize)) - return errorCodeToError(object_error::parse_failed); + if (Error Err = Reader.readInteger(EntryIDSize)) + return Err; - if (auto EC = Reader.readFixedString(EntryID, EntryIDSize)) - return errorCodeToError(object_error::parse_failed); + if (Error Err = Reader.readFixedString(EntryID, EntryIDSize)) + return Err; auto Entry = std::make_unique( EntryOffset + SectionOffset, EntrySize, EntryIDSize, EntryID); @@ -120,14 +120,15 @@ OffloadBundleFatBin::create(MemoryBufferRef Buf, uint64_t SectionOffset, if (identify_magic(Buf.getBuffer()) != file_magic::offload_bundle) return errorCodeToError(object_error::parse_failed); - OffloadBundleFatBin *TheBundle = new OffloadBundleFatBin(Buf, FileName); + std::unique_ptr TheBundle( + new OffloadBundleFatBin(Buf, FileName)); // Read the Bundle Entries Error Err = TheBundle->readEntries(Buf.getBuffer(), SectionOffset); if (Err) - return errorCodeToError(object_error::parse_failed); + return Err; - return std::unique_ptr(TheBundle); + return std::move(TheBundle); } Error OffloadBundleFatBin::extractBundle(const ObjectFile &Source) { diff --git a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp index 910383816f43b..b00e45d912be1 100644 --- a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp +++ b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp @@ -343,6 +343,9 @@ Error DXContainerWriter::writeParts(raw_ostream &OS) { NewSampler.RegisterSpace = Param.RegisterSpace; NewSampler.ShaderVisibility = Param.ShaderVisibility; + if (RS.Version > 2) + NewSampler.Flags = Param.getEncodedFlags(); + RS.StaticSamplers.push_back(NewSampler); } diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp index 22674b1ceb734..5dff9bad12b52 100644 --- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp +++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp @@ -154,7 +154,7 @@ DXContainerYAML::RootSignatureYamlDesc::create( if (Error E = readDescriptorRanges( Header, RootSigDesc, DTV)) return std::move(E); - } else if (Version == 2) { + } else if (Version == 2 || Version == 3) { if (Error E = readDescriptorRanges( Header, RootSigDesc, DTV)) return std::move(E); @@ -209,6 +209,11 @@ DXContainerYAML::RootSignatureYamlDesc::create( NewS.RegisterSpace = S.RegisterSpace; NewS.ShaderVisibility = dxbc::ShaderVisibility(S.ShaderVisibility); + if (Version > 2) { +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) \ + NewS.Enum = (S.Flags & llvm::to_underlying(dxbc::StaticSamplerFlags::Enum)); +#include "llvm/BinaryFormat/DXContainerConstants.def" + } RootSigDesc.StaticSamplers.push_back(NewS); } @@ -245,6 +250,15 @@ uint32_t DXContainerYAML::DescriptorRangeYaml::getEncodedFlags() const { return Flags; } +uint32_t DXContainerYAML::StaticSamplerYamlDesc::getEncodedFlags() const { + uint64_t Flags = 0; +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) \ + if (Enum) \ + Flags |= (uint32_t)dxbc::StaticSamplerFlags::Enum; +#include "llvm/BinaryFormat/DXContainerConstants.def" + return Flags; +} + uint64_t DXContainerYAML::ShaderFeatureFlags::getEncodedFlags() { uint64_t Flag = 0; #define SHADER_FEATURE_FLAG(Num, DxilModuleNum, Val, Str) \ @@ -512,6 +526,9 @@ void MappingTraits::mapping( IO.mapRequired("ShaderRegister", S.ShaderRegister); IO.mapRequired("RegisterSpace", S.RegisterSpace); IO.mapRequired("ShaderVisibility", S.ShaderVisibility); +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) \ + IO.mapOptional(#Flag, S.Enum, false); +#include "llvm/BinaryFormat/DXContainerConstants.def" } void MappingTraits::mapping(IO &IO, diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index e4dab4acc0b4a..c234623caecf9 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -375,7 +375,6 @@ #include "llvm/Transforms/Utils/SymbolRewriter.h" #include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h" #include "llvm/Transforms/Utils/UnifyLoopExits.h" -#include "llvm/Transforms/Vectorize/EVLIndVarSimplify.h" #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" #include "llvm/Transforms/Vectorize/LoopVectorize.h" @@ -504,8 +503,9 @@ static Expected parseOptLevelParam(StringRef S) { PassBuilder::PassBuilder(TargetMachine *TM, PipelineTuningOptions PTO, std::optional PGOOpt, - PassInstrumentationCallbacks *PIC) - : TM(TM), PTO(PTO), PGOOpt(PGOOpt), PIC(PIC) { + PassInstrumentationCallbacks *PIC, + IntrusiveRefCntPtr FS) + : TM(TM), PTO(PTO), PGOOpt(PGOOpt), PIC(PIC), FS(std::move(FS)) { if (TM) TM->registerPassBuilderCallbacks(*this); if (PIC) { diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp index 30c6f06be139d..7069e8d67c2f1 100644 --- a/llvm/lib/Passes/PassBuilderPipelines.cpp +++ b/llvm/lib/Passes/PassBuilderPipelines.cpp @@ -150,6 +150,8 @@ using namespace llvm; +namespace llvm { + static cl::opt UseInlineAdvisor( "enable-ml-inliner", cl::init(InliningAdvisorMode::Default), cl::Hidden, cl::desc("Enable ML policy for inliner. Currently trained for -Oz only"), @@ -305,7 +307,6 @@ static cl::opt InstrumentColdFuncOnlyPath( extern cl::opt UseCtxProfile; extern cl::opt PGOInstrumentColdFunctionOnly; -namespace llvm { extern cl::opt EnableMemProfContextDisambiguation; } // namespace llvm @@ -610,7 +611,9 @@ PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level, // Jump table to switch conversion. if (EnableJumpTableToSwitch) - FPM.addPass(JumpTableToSwitchPass()); + FPM.addPass(JumpTableToSwitchPass( + /*InLTO=*/Phase == ThinOrFullLTOPhase::ThinLTOPostLink || + Phase == ThinOrFullLTOPhase::FullLTOPostLink)); FPM.addPass( SimplifyCFGPass(SimplifyCFGOptions().convertSwitchRangeToICmp(true))); @@ -848,8 +851,7 @@ void PassBuilder::addPGOInstrPasses(ModulePassManager &MPM, OptimizationLevel Level, bool RunProfileGen, bool IsCS, bool AtomicCounterUpdate, std::string ProfileFile, - std::string ProfileRemappingFile, - IntrusiveRefCntPtr FS) { + std::string ProfileRemappingFile) { assert(Level != OptimizationLevel::O0 && "Not expecting O0 here!"); if (!RunProfileGen) { @@ -884,10 +886,11 @@ void PassBuilder::addPGOInstrPasses(ModulePassManager &MPM, MPM.addPass(InstrProfilingLoweringPass(Options, IsCS)); } -void PassBuilder::addPGOInstrPassesForO0( - ModulePassManager &MPM, bool RunProfileGen, bool IsCS, - bool AtomicCounterUpdate, std::string ProfileFile, - std::string ProfileRemappingFile, IntrusiveRefCntPtr FS) { +void PassBuilder::addPGOInstrPassesForO0(ModulePassManager &MPM, + bool RunProfileGen, bool IsCS, + bool AtomicCounterUpdate, + std::string ProfileFile, + std::string ProfileRemappingFile) { if (!RunProfileGen) { assert(!ProfileFile.empty() && "Profile use expecting a profile file!"); MPM.addPass( @@ -1133,8 +1136,8 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level, if (LoadSampleProfile) { // Annotate sample profile right after early FPM to ensure freshness of // the debug info. - MPM.addPass(SampleProfileLoaderPass(PGOOpt->ProfileFile, - PGOOpt->ProfileRemappingFile, Phase)); + MPM.addPass(SampleProfileLoaderPass( + PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile, Phase, FS)); // Cache ProfileSummaryAnalysis once to avoid the potential need to insert // RequireAnalysisPass for PSI before subsequent non-module passes. MPM.addPass(RequireAnalysisPass()); @@ -1230,8 +1233,7 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level, addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/IsPGOInstrGen, /*IsCS=*/false, PGOOpt->AtomicCounterUpdate, - PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile, - PGOOpt->FS); + PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile); } else if (IsCtxProfGen || IsCtxProfUse) { MPM.addPass(PGOInstrumentationGen(PGOInstrumentationType::CTXPROF)); // In pre-link, we just want the instrumented IR. We use the contextual @@ -1254,10 +1256,10 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level, addPostPGOLoopRotation(MPM, Level); MPM.addPass(PGOCtxProfLoweringPass()); } else if (IsColdFuncOnlyInstrGen) { - addPGOInstrPasses( - MPM, Level, /* RunProfileGen */ true, /* IsCS */ false, - /* AtomicCounterUpdate */ false, InstrumentColdFuncOnlyPath, - /* ProfileRemappingFile */ "", IntrusiveRefCntPtr()); + addPGOInstrPasses(MPM, Level, /* RunProfileGen */ true, /* IsCS */ false, + /* AtomicCounterUpdate */ false, + InstrumentColdFuncOnlyPath, + /* ProfileRemappingFile */ ""); } if (IsPGOInstrGen || IsPGOInstrUse || IsCtxProfGen) @@ -1268,7 +1270,7 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level, EnableSampledInstr)); if (IsMemprofUse) - MPM.addPass(MemProfUsePass(PGOOpt->MemoryProfile, PGOOpt->FS)); + MPM.addPass(MemProfUsePass(PGOOpt->MemoryProfile, FS)); if (PGOOpt && (PGOOpt->Action == PGOOptions::IRUse || PGOOpt->Action == PGOOptions::SampleUse)) @@ -1477,13 +1479,11 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level, if (PGOOpt->CSAction == PGOOptions::CSIRInstr) addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/true, /*IsCS=*/true, PGOOpt->AtomicCounterUpdate, - PGOOpt->CSProfileGenFile, PGOOpt->ProfileRemappingFile, - PGOOpt->FS); + PGOOpt->CSProfileGenFile, PGOOpt->ProfileRemappingFile); else if (PGOOpt->CSAction == PGOOptions::CSIRUse) addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/false, /*IsCS=*/true, PGOOpt->AtomicCounterUpdate, - PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile, - PGOOpt->FS); + PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile); } // Re-compute GlobalsAA here prior to function passes. This is particularly @@ -1831,6 +1831,7 @@ ModulePassManager PassBuilder::buildThinLTODefaultPipeline( // in ICP. MPM.addPass(LowerTypeTestsPass(nullptr, nullptr, lowertypetests::DropTestKind::Assume)); + MPM.addPass(buildCoroWrapper(ThinOrFullLTOPhase::ThinLTOPostLink)); // Drop available_externally and unreferenced globals. This is necessary // with ThinLTO in order to avoid leaving undefined references to dead // globals in the object file. @@ -2070,13 +2071,11 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level, if (PGOOpt->CSAction == PGOOptions::CSIRInstr) addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/true, /*IsCS=*/true, PGOOpt->AtomicCounterUpdate, - PGOOpt->CSProfileGenFile, PGOOpt->ProfileRemappingFile, - PGOOpt->FS); + PGOOpt->CSProfileGenFile, PGOOpt->ProfileRemappingFile); else if (PGOOpt->CSAction == PGOOptions::CSIRUse) addPGOInstrPasses(MPM, Level, /*RunProfileGen=*/false, /*IsCS=*/true, PGOOpt->AtomicCounterUpdate, - PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile, - PGOOpt->FS); + PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile); } // Break up allocas @@ -2236,7 +2235,7 @@ PassBuilder::buildO0DefaultPipeline(OptimizationLevel Level, MPM, /*RunProfileGen=*/(PGOOpt->Action == PGOOptions::IRInstr), /*IsCS=*/false, PGOOpt->AtomicCounterUpdate, PGOOpt->ProfileFile, - PGOOpt->ProfileRemappingFile, PGOOpt->FS); + PGOOpt->ProfileRemappingFile); // Instrument function entry and exit before all inlining. MPM.addPass(createModuleToFunctionPassAdaptor( diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 49d5d08474f0f..f0e7d36f78aab 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -755,7 +755,6 @@ LOOP_ANALYSIS("should-run-extra-simple-loop-unswitch", #endif LOOP_PASS("canon-freeze", CanonicalizeFreezeInLoopsPass()) LOOP_PASS("dot-ddg", DDGDotPrinterPass()) -LOOP_PASS("evl-iv-simplify", EVLIndVarSimplifyPass()) LOOP_PASS("guard-widening", GuardWideningPass()) LOOP_PASS("extra-simple-loop-unswitch-passes", ExtraLoopPassManager()) diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp index fc2577e6ada5d..075ad8d7aec8b 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp @@ -949,9 +949,9 @@ loadTestingFormat(StringRef Data, StringRef CompilationDir) { if (Data.size() < sizeof(uint64_t)) return make_error(coveragemap_error::malformed, "the size of data is too small"); - auto TestingVersion = - support::endian::byte_swap( - *reinterpret_cast(Data.data())); + auto TestingVersion = support::endian::byte_swap( + *reinterpret_cast(Data.data()), + llvm::endianness::little); Data = Data.substr(sizeof(uint64_t)); // Read the ProfileNames data. @@ -1274,9 +1274,9 @@ BinaryCoverageReader::create( std::vector> Readers; if (ObjectBuffer.getBuffer().size() > sizeof(TestingFormatMagic)) { - uint64_t Magic = - support::endian::byte_swap( - *reinterpret_cast(ObjectBuffer.getBufferStart())); + uint64_t Magic = support::endian::byte_swap( + *reinterpret_cast(ObjectBuffer.getBufferStart()), + llvm::endianness::little); if (Magic == TestingFormatMagic) { // This is a special format used for testing. auto ReaderOrErr = diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp index 12b1687af69db..3875f01c48528 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp @@ -292,7 +292,7 @@ void CoverageMappingWriter::write(raw_ostream &OS) { void TestingFormatWriter::write(raw_ostream &OS, TestingFormatVersion Version) { auto ByteSwap = [](uint64_t N) { - return support::endian::byte_swap(N); + return support::endian::byte_swap(N, llvm::endianness::little); }; // Output a 64bit magic number. diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp index e1c6315853b3b..3c8e44a18f533 100644 --- a/llvm/lib/ProfileData/InstrProf.cpp +++ b/llvm/lib/ProfileData/InstrProf.cpp @@ -292,7 +292,7 @@ void ProfOStream::patch(ArrayRef P) { for (const auto &K : P) { for (int I = 0, E = K.D.size(); I != E; I++) { uint64_t Bytes = - endian::byte_swap(K.D[I]); + endian::byte_swap(K.D[I], llvm::endianness::little); Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t), (const char *)&Bytes, sizeof(uint64_t)); } diff --git a/llvm/lib/ProfileData/InstrProfReader.cpp b/llvm/lib/ProfileData/InstrProfReader.cpp index 886add7131da2..d2ae4b5226ff6 100644 --- a/llvm/lib/ProfileData/InstrProfReader.cpp +++ b/llvm/lib/ProfileData/InstrProfReader.cpp @@ -1171,8 +1171,8 @@ bool IndexedInstrProfReader::hasFormat(const MemoryBuffer &DataBuffer) { if (DataBuffer.getBufferSize() < 8) return false; - uint64_t Magic = endian::read( - DataBuffer.getBufferStart()); + uint64_t Magic = endian::read(DataBuffer.getBufferStart(), + llvm::endianness::little); // Verify that it's magical. return Magic == IndexedInstrProf::Magic; } @@ -1186,10 +1186,10 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version, if (Version >= IndexedInstrProf::Version4) { const IndexedInstrProf::Summary *SummaryInLE = reinterpret_cast(Cur); - uint64_t NFields = endian::byte_swap( - SummaryInLE->NumSummaryFields); - uint64_t NEntries = endian::byte_swap( - SummaryInLE->NumCutoffEntries); + uint64_t NFields = endian::byte_swap( + SummaryInLE->NumSummaryFields, llvm::endianness::little); + uint64_t NEntries = endian::byte_swap( + SummaryInLE->NumCutoffEntries, llvm::endianness::little); uint32_t SummarySize = IndexedInstrProf::Summary::getSize(NFields, NEntries); std::unique_ptr SummaryData = @@ -1198,7 +1198,7 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version, const uint64_t *Src = reinterpret_cast(SummaryInLE); uint64_t *Dst = reinterpret_cast(SummaryData.get()); for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++) - Dst[I] = endian::byte_swap(Src[I]); + Dst[I] = endian::byte_swap(Src[I], llvm::endianness::little); SummaryEntryVector DetailedSummary; for (unsigned I = 0; I < SummaryData->NumCutoffEntries; I++) { @@ -1598,8 +1598,8 @@ Error IndexedInstrProfReader::getFunctionBitmap(StringRef FuncName, std::memset(W, 0, sizeof(W)); std::memcpy(W, &BitmapBytes[I], N); I += N; - return support::endian::read(W); + return support::endian::read( + W, llvm::endianness::little); }, Bitmap, Bitmap); assert(I == E); diff --git a/llvm/lib/ProfileData/MemProfCommon.cpp b/llvm/lib/ProfileData/MemProfCommon.cpp index a13a291a4cd27..cfd2efddce27b 100644 --- a/llvm/lib/ProfileData/MemProfCommon.cpp +++ b/llvm/lib/ProfileData/MemProfCommon.cpp @@ -20,6 +20,8 @@ using namespace llvm; using namespace llvm::memprof; +namespace llvm { + // Upper bound on lifetime access density (accesses per byte per lifetime sec) // for marking an allocation cold. LLVM_ABI cl::opt MemProfLifetimeAccessDensityColdThreshold( @@ -48,6 +50,8 @@ LLVM_ABI cl::opt cl::desc("Enable use of hot hints (only supported for " "unambigously hot allocations)")); +} // end namespace llvm + AllocationType llvm::memprof::getAllocType(uint64_t TotalLifetimeAccessDensity, uint64_t AllocCount, uint64_t TotalLifetime) { diff --git a/llvm/lib/ProfileData/SampleProfReader.cpp b/llvm/lib/ProfileData/SampleProfReader.cpp index 81ae792e70b99..766c0814ca067 100644 --- a/llvm/lib/ProfileData/SampleProfReader.cpp +++ b/llvm/lib/ProfileData/SampleProfReader.cpp @@ -1290,8 +1290,8 @@ SampleProfileReaderExtBinaryBase::readNameTableSec(bool IsMD5, NameTable.reserve(*Size); for (size_t I = 0; I < *Size; ++I) { using namespace support; - uint64_t FID = endian::read( - Data + I * sizeof(uint64_t)); + uint64_t FID = endian::read( + Data + I * sizeof(uint64_t), endianness::little); NameTable.emplace_back(FunctionId(FID)); } if (!ProfileIsCS) diff --git a/llvm/lib/Remarks/YAMLRemarkSerializer.cpp b/llvm/lib/Remarks/YAMLRemarkSerializer.cpp index f8b610dd7f73f..22e297040575c 100644 --- a/llvm/lib/Remarks/YAMLRemarkSerializer.cpp +++ b/llvm/lib/Remarks/YAMLRemarkSerializer.cpp @@ -114,11 +114,13 @@ template <> struct MappingTraits { static void mapping(IO &io, Argument &A) { assert(io.outputting() && "input not yet implemented"); + // NB: A.Key.data() is not necessarily null-terminated, as the StringRef may + // be a span into the middle of a string. if (StringRef(A.Val).count('\n') > 1) { StringBlockVal S(A.Val); - io.mapRequired(A.Key.data(), S); + io.mapRequired(A.Key, S); } else { - io.mapRequired(A.Key.data(), A.Val); + io.mapRequired(A.Key, A.Val); } io.mapOptional("DebugLoc", A.Loc); } diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp index d14abb4bd05b5..8623c06597f5c 100644 --- a/llvm/lib/Support/APFloat.cpp +++ b/llvm/lib/Support/APFloat.cpp @@ -5857,7 +5857,7 @@ DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp, // practice. if (Exp == APFloat::IEK_NaN) { DoubleAPFloat Quiet{Arg}; - Quiet.getFirst().makeQuiet(); + Quiet.getFirst() = Quiet.getFirst().makeQuiet(); return Quiet; } diff --git a/llvm/lib/Support/ARMWinEH.cpp b/llvm/lib/Support/ARMWinEH.cpp index 29c7a28541f23..fedea774b0da1 100644 --- a/llvm/lib/Support/ARMWinEH.cpp +++ b/llvm/lib/Support/ARMWinEH.cpp @@ -41,7 +41,7 @@ std::pair SavedRegisterMask(const RuntimeFunction &RF, GPRMask |= (((1 << ((RF.StackAdjust() & 0x3) + 1)) - 1) << (~RF.StackAdjust() & 0x3)); - return std::make_pair(GPRMask, VFPMask); + return {GPRMask, VFPMask}; } } // namespace WinEH } // namespace ARM diff --git a/llvm/lib/Support/BinaryStreamReader.cpp b/llvm/lib/Support/BinaryStreamReader.cpp index afc00864a5fb6..26ddf3f9c193d 100644 --- a/llvm/lib/Support/BinaryStreamReader.cpp +++ b/llvm/lib/Support/BinaryStreamReader.cpp @@ -174,5 +174,5 @@ BinaryStreamReader::split(uint64_t Off) const { First = First.keep_front(Off); BinaryStreamReader W1{First}; BinaryStreamReader W2{Second}; - return std::make_pair(W1, W2); + return {W1, W2}; } diff --git a/llvm/lib/Support/BinaryStreamWriter.cpp b/llvm/lib/Support/BinaryStreamWriter.cpp index dff08fee3fefa..0c399d5691f5b 100644 --- a/llvm/lib/Support/BinaryStreamWriter.cpp +++ b/llvm/lib/Support/BinaryStreamWriter.cpp @@ -89,7 +89,7 @@ BinaryStreamWriter::split(uint64_t Off) const { First = First.keep_front(Off); BinaryStreamWriter W1{First}; BinaryStreamWriter W2{Second}; - return std::make_pair(W1, W2); + return {W1, W2}; } Error BinaryStreamWriter::padToAlignment(uint32_t Align) { diff --git a/llvm/lib/Support/CommandLine.cpp b/llvm/lib/Support/CommandLine.cpp index 12a8d0c3a6bae..9491ec049f79d 100644 --- a/llvm/lib/Support/CommandLine.cpp +++ b/llvm/lib/Support/CommandLine.cpp @@ -101,6 +101,7 @@ void parser::anchor() {} void parser::anchor() {} void parser::anchor() {} void parser::anchor() {} +void parser>::anchor() {} void parser::anchor() {} // These anchor functions instantiate opt and reference its virtual @@ -2261,6 +2262,22 @@ void parser::printOptionDiff(const Option &O, StringRef V, outs() << ")\n"; } +void parser>::printOptionDiff( + const Option &O, std::optional V, + const OptionValue> &D, + size_t GlobalWidth) const { + printOptionName(O, GlobalWidth); + outs() << "= " << V; + size_t VSize = V.has_value() ? V.value().size() : 0; + size_t NumSpaces = MaxOptWidth > VSize ? MaxOptWidth - VSize : 0; + outs().indent(NumSpaces) << " (default: "; + if (D.hasValue() && D.getValue().has_value()) + outs() << D.getValue(); + else + outs() << "*no value*"; + outs() << ")\n"; +} + // Print a placeholder for options that don't yet support printOptionDiff(). void basic_parser_impl::printOptionNoValue(const Option &O, size_t GlobalWidth) const { diff --git a/llvm/lib/Support/DXILABI.cpp b/llvm/lib/Support/DXILABI.cpp index 082e32061bd45..ba6e16a0181c7 100644 --- a/llvm/lib/Support/DXILABI.cpp +++ b/llvm/lib/Support/DXILABI.cpp @@ -1,33 +1,33 @@ -//===-- DXILABI.cpp - ABI Sensitive Values for DXIL -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file contains definitions of various constants and enums that are -// required to remain stable as per the DXIL format's requirements. -// -// Documentation for DXIL can be found in -// https://github.com/Microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Support/DXILABI.h" -#include "llvm/Support/ErrorHandling.h" -using namespace llvm; - -StringRef dxil::getResourceClassName(dxil::ResourceClass RC) { - switch (RC) { - case dxil::ResourceClass::SRV: - return "SRV"; - case dxil::ResourceClass::UAV: - return "UAV"; - case dxil::ResourceClass::CBuffer: - return "CBV"; - case dxil::ResourceClass::Sampler: - return "Sampler"; - } - llvm_unreachable("Invalid ResourceClass enum value"); -} +//===-- DXILABI.cpp - ABI Sensitive Values for DXIL -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains definitions of various constants and enums that are +// required to remain stable as per the DXIL format's requirements. +// +// Documentation for DXIL can be found in +// https://github.com/Microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/DXILABI.h" +#include "llvm/Support/ErrorHandling.h" +using namespace llvm; + +StringRef dxil::getResourceClassName(dxil::ResourceClass RC) { + switch (RC) { + case dxil::ResourceClass::SRV: + return "SRV"; + case dxil::ResourceClass::UAV: + return "UAV"; + case dxil::ResourceClass::CBuffer: + return "CBV"; + case dxil::ResourceClass::Sampler: + return "Sampler"; + } + llvm_unreachable("Invalid ResourceClass enum value"); +} diff --git a/llvm/lib/Support/FileCollector.cpp b/llvm/lib/Support/FileCollector.cpp index edb5313d43eec..1e5de2c49a2b3 100644 --- a/llvm/lib/Support/FileCollector.cpp +++ b/llvm/lib/Support/FileCollector.cpp @@ -49,8 +49,9 @@ static bool isCaseSensitivePath(StringRef Path) { return true; } -FileCollector::FileCollector(std::string Root, std::string OverlayRoot) - : Root(Root), OverlayRoot(OverlayRoot) { +FileCollector::FileCollector(std::string Root, std::string OverlayRoot, + IntrusiveRefCntPtr VFS) + : Root(Root), OverlayRoot(OverlayRoot), Canonicalizer(std::move(VFS)) { assert(sys::path::is_absolute(Root) && "Root not absolute"); assert(sys::path::is_absolute(OverlayRoot) && "OverlayRoot not absolute"); } @@ -67,9 +68,8 @@ void FileCollector::PathCanonicalizer::updateWithRealPath( SmallString<256> RealPath; auto DirWithSymlink = CachedDirs.find(Directory); if (DirWithSymlink == CachedDirs.end()) { - // FIXME: Should this be a call to FileSystem::getRealpath(), in some - // cases? What if there is nothing on disk? - if (sys::fs::real_path(Directory, RealPath)) + // FIXME: What if there is nothing on disk? + if (VFS->getRealPath(Directory, RealPath)) return; CachedDirs[Directory] = std::string(RealPath); } else { @@ -88,9 +88,9 @@ void FileCollector::PathCanonicalizer::updateWithRealPath( } /// Make Path absolute. -static void makeAbsolute(SmallVectorImpl &Path) { +static void makeAbsolute(vfs::FileSystem &VFS, SmallVectorImpl &Path) { // We need an absolute src path to append to the root. - sys::fs::make_absolute(Path); + VFS.makeAbsolute(Path); // Canonicalize src to a native path to avoid mixed separator styles. sys::path::native(Path); @@ -105,7 +105,7 @@ FileCollector::PathCanonicalizer::PathStorage FileCollector::PathCanonicalizer::canonicalize(StringRef SrcPath) { PathStorage Paths; Paths.VirtualPath = SrcPath; - makeAbsolute(Paths.VirtualPath); + makeAbsolute(*VFS, Paths.VirtualPath); // If a ".." component is present after a symlink component, remove_dots may // lead to the wrong real destination path. Let the source be canonicalized diff --git a/llvm/lib/Support/Mustache.cpp b/llvm/lib/Support/Mustache.cpp index 6c2ed6c84c6cf..47860c00be610 100644 --- a/llvm/lib/Support/Mustache.cpp +++ b/llvm/lib/Support/Mustache.cpp @@ -7,9 +7,14 @@ //===----------------------------------------------------------------------===// #include "llvm/Support/Mustache.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include +#include #include +#define DEBUG_TYPE "mustache" + using namespace llvm; using namespace llvm::mustache; @@ -51,6 +56,33 @@ static Accessor splitMustacheString(StringRef Str) { namespace llvm::mustache { +class MustacheOutputStream : public raw_ostream { +public: + MustacheOutputStream() = default; + ~MustacheOutputStream() override = default; + + virtual void suspendIndentation() {} + virtual void resumeIndentation() {} + +private: + void anchor() override; +}; + +void MustacheOutputStream::anchor() {} + +class RawMustacheOutputStream : public MustacheOutputStream { +public: + RawMustacheOutputStream(raw_ostream &OS) : OS(OS) { SetUnbuffered(); } + +private: + raw_ostream &OS; + + void write_impl(const char *Ptr, size_t Size) override { + OS.write(Ptr, Size); + } + uint64_t current_pos() const override { return OS.tell(); } +}; + class Token { public: enum class Type { @@ -62,6 +94,7 @@ class Token { InvertSectionOpen, UnescapeVariable, Comment, + SetDelimiter, }; Token(std::string Str) @@ -102,6 +135,8 @@ class Token { return Type::Partial; case '&': return Type::UnescapeVariable; + case '=': + return Type::SetDelimiter; default: return Type::Variable; } @@ -130,26 +165,17 @@ class ASTNode { InvertSection, }; - ASTNode(llvm::StringMap &Partials, llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, EscapeMap &Escapes) - : Partials(Partials), Lambdas(Lambdas), SectionLambdas(SectionLambdas), - Escapes(Escapes), Ty(Type::Root), Parent(nullptr), - ParentContext(nullptr) {} + ASTNode(MustacheContext &Ctx) + : Ctx(Ctx), Ty(Type::Root), Parent(nullptr), ParentContext(nullptr) {} - ASTNode(std::string Body, ASTNode *Parent, llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, EscapeMap &Escapes) - : Partials(Partials), Lambdas(Lambdas), SectionLambdas(SectionLambdas), - Escapes(Escapes), Ty(Type::Text), Body(std::move(Body)), Parent(Parent), + ASTNode(MustacheContext &Ctx, std::string Body, ASTNode *Parent) + : Ctx(Ctx), Ty(Type::Text), Body(std::move(Body)), Parent(Parent), ParentContext(nullptr) {} // Constructor for Section/InvertSection/Variable/UnescapeVariable Nodes - ASTNode(Type Ty, Accessor Accessor, ASTNode *Parent, - llvm::StringMap &Partials, llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, EscapeMap &Escapes) - : Partials(Partials), Lambdas(Lambdas), SectionLambdas(SectionLambdas), - Escapes(Escapes), Ty(Ty), Parent(Parent), - AccessorValue(std::move(Accessor)), ParentContext(nullptr) {} + ASTNode(MustacheContext &Ctx, Type Ty, Accessor Accessor, ASTNode *Parent) + : Ctx(Ctx), Ty(Ty), Parent(Parent), AccessorValue(std::move(Accessor)), + ParentContext(nullptr) {} void addChild(AstPtr Child) { Children.emplace_back(std::move(Child)); }; @@ -157,26 +183,33 @@ class ASTNode { void setIndentation(size_t NewIndentation) { Indentation = NewIndentation; }; - void render(const llvm::json::Value &Data, llvm::raw_ostream &OS); + void render(const llvm::json::Value &Data, MustacheOutputStream &OS); private: - void renderLambdas(const llvm::json::Value &Contexts, llvm::raw_ostream &OS, - Lambda &L); + void renderLambdas(const llvm::json::Value &Contexts, + MustacheOutputStream &OS, Lambda &L); void renderSectionLambdas(const llvm::json::Value &Contexts, - llvm::raw_ostream &OS, SectionLambda &L); + MustacheOutputStream &OS, SectionLambda &L); - void renderPartial(const llvm::json::Value &Contexts, llvm::raw_ostream &OS, - ASTNode *Partial); + void renderPartial(const llvm::json::Value &Contexts, + MustacheOutputStream &OS, ASTNode *Partial); - void renderChild(const llvm::json::Value &Context, llvm::raw_ostream &OS); + void renderChild(const llvm::json::Value &Context, MustacheOutputStream &OS); const llvm::json::Value *findContext(); - StringMap &Partials; - StringMap &Lambdas; - StringMap &SectionLambdas; - EscapeMap &Escapes; + void renderRoot(const json::Value &CurrentCtx, MustacheOutputStream &OS); + void renderText(MustacheOutputStream &OS); + void renderPartial(const json::Value &CurrentCtx, MustacheOutputStream &OS); + void renderVariable(const json::Value &CurrentCtx, MustacheOutputStream &OS); + void renderUnescapeVariable(const json::Value &CurrentCtx, + MustacheOutputStream &OS); + void renderSection(const json::Value &CurrentCtx, MustacheOutputStream &OS); + void renderInvertSection(const json::Value &CurrentCtx, + MustacheOutputStream &OS); + + MustacheContext &Ctx; Type Ty; size_t Indentation = 0; std::string RawBody; @@ -189,29 +222,18 @@ class ASTNode { }; // A wrapper for arena allocator for ASTNodes -AstPtr createRootNode(llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes) { - return std::make_unique(Partials, Lambdas, SectionLambdas, Escapes); +static AstPtr createRootNode(MustacheContext &Ctx) { + return std::make_unique(Ctx); } -AstPtr createNode(ASTNode::Type T, Accessor A, ASTNode *Parent, - llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes) { - return std::make_unique(T, std::move(A), Parent, Partials, Lambdas, - SectionLambdas, Escapes); +static AstPtr createNode(MustacheContext &Ctx, ASTNode::Type T, Accessor A, + ASTNode *Parent) { + return std::make_unique(Ctx, T, std::move(A), Parent); } -AstPtr createTextNode(std::string Body, ASTNode *Parent, - llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes) { - return std::make_unique(std::move(Body), Parent, Partials, Lambdas, - SectionLambdas, Escapes); +static AstPtr createTextNode(MustacheContext &Ctx, std::string Body, + ASTNode *Parent) { + return std::make_unique(Ctx, std::move(Body), Parent); } // Function to check if there is meaningful text behind. @@ -226,7 +248,7 @@ AstPtr createTextNode(std::string Body, ASTNode *Parent, // and the current token is the second token. // For example: // "{{#Section}}" -bool hasTextBehind(size_t Idx, const ArrayRef &Tokens) { +static bool hasTextBehind(size_t Idx, const ArrayRef &Tokens) { if (Idx == 0) return true; @@ -242,7 +264,7 @@ bool hasTextBehind(size_t Idx, const ArrayRef &Tokens) { // Function to check if there's no meaningful text ahead. // We determine if a token has text ahead if the left of previous // token does not start with a newline. -bool hasTextAhead(size_t Idx, const ArrayRef &Tokens) { +static bool hasTextAhead(size_t Idx, const ArrayRef &Tokens) { if (Idx >= Tokens.size() - 1) return true; @@ -255,11 +277,11 @@ bool hasTextAhead(size_t Idx, const ArrayRef &Tokens) { return !TokenBody.starts_with("\r\n") && !TokenBody.starts_with("\n"); } -bool requiresCleanUp(Token::Type T) { +static bool requiresCleanUp(Token::Type T) { // We must clean up all the tokens that could contain child nodes. return T == Token::Type::SectionOpen || T == Token::Type::InvertSectionOpen || T == Token::Type::SectionClose || T == Token::Type::Comment || - T == Token::Type::Partial; + T == Token::Type::Partial || T == Token::Type::SetDelimiter; } // Adjust next token body if there is no text ahead. @@ -268,7 +290,7 @@ bool requiresCleanUp(Token::Type T) { // "{{! Comment }} \nLine 2" // would be considered as no text ahead and should be rendered as // " Line 2" -void stripTokenAhead(SmallVectorImpl &Tokens, size_t Idx) { +static void stripTokenAhead(SmallVectorImpl &Tokens, size_t Idx) { Token &NextToken = Tokens[Idx + 1]; StringRef NextTokenBody = NextToken.TokenBody; // Cut off the leading newline which could be \n or \r\n. @@ -282,55 +304,167 @@ void stripTokenAhead(SmallVectorImpl &Tokens, size_t Idx) { // For example: // The template string // " \t{{#section}}A{{/section}}" -// would be considered as having no text ahead and would be render as +// would be considered as having no text ahead and would be render as: // "A" -// The exception for this is partial tag which requires us to -// keep track of the indentation once it's rendered. void stripTokenBefore(SmallVectorImpl &Tokens, size_t Idx, Token &CurrentToken, Token::Type CurrentType) { Token &PrevToken = Tokens[Idx - 1]; StringRef PrevTokenBody = PrevToken.TokenBody; StringRef Unindented = PrevTokenBody.rtrim(" \r\t\v"); size_t Indentation = PrevTokenBody.size() - Unindented.size(); - if (CurrentType != Token::Type::Partial) - PrevToken.TokenBody = Unindented.str(); + PrevToken.TokenBody = Unindented.str(); CurrentToken.setIndentation(Indentation); } +struct Tag { + enum class Kind { + None, + Normal, // {{...}} + Triple, // {{{...}}} + }; + + Kind TagKind = Kind::None; + StringRef Content; // The content between the delimiters. + StringRef FullMatch; // The entire tag, including delimiters. + size_t StartPosition = StringRef::npos; +}; + +[[maybe_unused]] static const char *tagKindToString(Tag::Kind K) { + switch (K) { + case Tag::Kind::None: + return "None"; + case Tag::Kind::Normal: + return "Normal"; + case Tag::Kind::Triple: + return "Triple"; + } + llvm_unreachable("Unknown Tag::Kind"); +} + +[[maybe_unused]] static const char *jsonKindToString(json::Value::Kind K) { + switch (K) { + case json::Value::Kind::Null: + return "JSON_KIND_NULL"; + case json::Value::Kind::Boolean: + return "JSON_KIND_BOOLEAN"; + case json::Value::Kind::Number: + return "JSON_KIND_NUMBER"; + case json::Value::Kind::String: + return "JSON_KIND_STRING"; + case json::Value::Kind::Array: + return "JSON_KIND_ARRAY"; + case json::Value::Kind::Object: + return "JSON_KIND_OBJECT"; + } + llvm_unreachable("Unknown json::Value::Kind"); +} + +static Tag findNextTag(StringRef Template, size_t StartPos, StringRef Open, + StringRef Close) { + const StringLiteral TripleOpen("{{{"); + const StringLiteral TripleClose("}}}"); + + size_t NormalOpenPos = Template.find(Open, StartPos); + size_t TripleOpenPos = Template.find(TripleOpen, StartPos); + + Tag Result; + + // Determine which tag comes first. + if (TripleOpenPos != StringRef::npos && + (NormalOpenPos == StringRef::npos || TripleOpenPos <= NormalOpenPos)) { + // Found a triple mustache tag. + size_t EndPos = + Template.find(TripleClose, TripleOpenPos + TripleOpen.size()); + if (EndPos == StringRef::npos) + return Result; // No closing tag found. + + Result.TagKind = Tag::Kind::Triple; + Result.StartPosition = TripleOpenPos; + size_t ContentStart = TripleOpenPos + TripleOpen.size(); + Result.Content = Template.substr(ContentStart, EndPos - ContentStart); + Result.FullMatch = Template.substr( + TripleOpenPos, (EndPos + TripleClose.size()) - TripleOpenPos); + } else if (NormalOpenPos != StringRef::npos) { + // Found a normal mustache tag. + size_t EndPos = Template.find(Close, NormalOpenPos + Open.size()); + if (EndPos == StringRef::npos) + return Result; // No closing tag found. + + Result.TagKind = Tag::Kind::Normal; + Result.StartPosition = NormalOpenPos; + size_t ContentStart = NormalOpenPos + Open.size(); + Result.Content = Template.substr(ContentStart, EndPos - ContentStart); + Result.FullMatch = + Template.substr(NormalOpenPos, (EndPos + Close.size()) - NormalOpenPos); + } + + return Result; +} + +static std::optional> +processTag(const Tag &T, SmallVectorImpl &Tokens) { + LLVM_DEBUG(dbgs() << "[Tag] " << T.FullMatch << ", Content: " << T.Content + << ", Kind: " << tagKindToString(T.TagKind) << "\n"); + if (T.TagKind == Tag::Kind::Triple) { + Tokens.emplace_back(T.FullMatch.str(), "&" + T.Content.str(), '&'); + return std::nullopt; + } + StringRef Interpolated = T.Content; + std::string RawBody = T.FullMatch.str(); + if (!Interpolated.trim().starts_with("=")) { + char Front = Interpolated.empty() ? ' ' : Interpolated.trim().front(); + Tokens.emplace_back(RawBody, Interpolated.str(), Front); + return std::nullopt; + } + Tokens.emplace_back(RawBody, Interpolated.str(), '='); + StringRef DelimSpec = Interpolated.trim(); + DelimSpec = DelimSpec.drop_front(1); + DelimSpec = DelimSpec.take_until([](char C) { return C == '='; }); + DelimSpec = DelimSpec.trim(); + + std::pair Ret = DelimSpec.split(' '); + LLVM_DEBUG(dbgs() << "[Set Delimiter] NewOpen: " << Ret.first + << ", NewClose: " << Ret.second << "\n"); + return Ret; +} + // Simple tokenizer that splits the template into tokens. // The mustache spec allows {{{ }}} to unescape variables, // but we don't support that here. An unescape variable // is represented only by {{& variable}}. -SmallVector tokenize(StringRef Template) { +static SmallVector tokenize(StringRef Template) { + LLVM_DEBUG(dbgs() << "[Tokenize Template] \"" << Template << "\"\n"); SmallVector Tokens; - StringLiteral Open("{{"); - StringLiteral Close("}}"); + SmallString<8> Open("{{"); + SmallString<8> Close("}}"); size_t Start = 0; - size_t DelimiterStart = Template.find(Open); - if (DelimiterStart == StringRef::npos) { - Tokens.emplace_back(Template.str()); - return Tokens; - } - while (DelimiterStart != StringRef::npos) { - if (DelimiterStart != Start) - Tokens.emplace_back(Template.substr(Start, DelimiterStart - Start).str()); - size_t DelimiterEnd = Template.find(Close, DelimiterStart); - if (DelimiterEnd == StringRef::npos) + + while (Start < Template.size()) { + LLVM_DEBUG(dbgs() << "[Tokenize Loop] Start:" << Start << ", Open:'" << Open + << "', Close:'" << Close << "'\n"); + Tag T = findNextTag(Template, Start, Open, Close); + + if (T.TagKind == Tag::Kind::None) { + // No more tags, the rest is text. + Tokens.emplace_back(Template.substr(Start).str()); + LLVM_DEBUG(dbgs() << " No more tags. Created final Text token: \"" + << Template.substr(Start) << "\"\n"); break; + } - // Extract the Interpolated variable without delimiters. - size_t InterpolatedStart = DelimiterStart + Open.size(); - size_t InterpolatedEnd = DelimiterEnd - DelimiterStart - Close.size(); - std::string Interpolated = - Template.substr(InterpolatedStart, InterpolatedEnd).str(); - std::string RawBody = Open.str() + Interpolated + Close.str(); - Tokens.emplace_back(RawBody, Interpolated, Interpolated[0]); - Start = DelimiterEnd + Close.size(); - DelimiterStart = Template.find(Open, Start); - } + // Add the text before the tag. + if (T.StartPosition > Start) { + StringRef Text = Template.substr(Start, T.StartPosition - Start); + Tokens.emplace_back(Text.str()); + } - if (Start < Template.size()) - Tokens.emplace_back(Template.substr(Start).str()); + if (auto NewDelims = processTag(T, Tokens)) { + std::tie(Open, Close) = *NewDelims; + } + + // Move past the tag. + Start = T.StartPosition + T.FullMatch.size(); + } // Fix up white spaces for: // - open sections @@ -376,23 +510,36 @@ SmallVector tokenize(StringRef Template) { } // Custom stream to escape strings. -class EscapeStringStream : public raw_ostream { +class EscapeStringStream : public MustacheOutputStream { public: explicit EscapeStringStream(llvm::raw_ostream &WrappedStream, EscapeMap &Escape) - : Escape(Escape), WrappedStream(WrappedStream) { + : Escape(Escape), EscapeChars(Escape.keys().begin(), Escape.keys().end()), + WrappedStream(WrappedStream) { SetUnbuffered(); } protected: void write_impl(const char *Ptr, size_t Size) override { - llvm::StringRef Data(Ptr, Size); - for (char C : Data) { - auto It = Escape.find(C); - if (It != Escape.end()) - WrappedStream << It->getSecond(); - else - WrappedStream << C; + StringRef Data(Ptr, Size); + size_t Start = 0; + while (Start < Size) { + // Find the next character that needs to be escaped. + size_t Next = Data.find_first_of(EscapeChars.str(), Start); + + // If no escapable characters are found, write the rest of the string. + if (Next == StringRef::npos) { + WrappedStream << Data.substr(Start); + return; + } + + // Write the chunk of text before the escapable character. + if (Next > Start) + WrappedStream << Data.substr(Start, Next - Start); + + // Look up and write the escaped version of the character. + WrappedStream << Escape[Data[Next]]; + Start = Next + 1; } } @@ -400,27 +547,40 @@ class EscapeStringStream : public raw_ostream { private: EscapeMap &Escape; + SmallString<8> EscapeChars; llvm::raw_ostream &WrappedStream; }; // Custom stream to add indentation used to for rendering partials. -class AddIndentationStringStream : public raw_ostream { +class AddIndentationStringStream : public MustacheOutputStream { public: - explicit AddIndentationStringStream(llvm::raw_ostream &WrappedStream, + explicit AddIndentationStringStream(raw_ostream &WrappedStream, size_t Indentation) - : Indentation(Indentation), WrappedStream(WrappedStream) { + : Indentation(Indentation), WrappedStream(WrappedStream), + NeedsIndent(true), IsSuspended(false) { SetUnbuffered(); } + void suspendIndentation() override { IsSuspended = true; } + void resumeIndentation() override { IsSuspended = false; } + protected: void write_impl(const char *Ptr, size_t Size) override { llvm::StringRef Data(Ptr, Size); SmallString<0> Indent; Indent.resize(Indentation, ' '); + for (char C : Data) { - WrappedStream << C; - if (C == '\n') + LLVM_DEBUG(dbgs() << "[Indentation Stream] NeedsIndent:" << NeedsIndent + << ", C:'" << C << "', Indentation:" << Indentation + << "\n"); + if (NeedsIndent && C != '\n') { WrappedStream << Indent; + NeedsIndent = false; + } + WrappedStream << C; + if (C == '\n' && !IsSuspended) + NeedsIndent = true; } } @@ -428,44 +588,50 @@ class AddIndentationStringStream : public raw_ostream { private: size_t Indentation; - llvm::raw_ostream &WrappedStream; + raw_ostream &WrappedStream; + bool NeedsIndent; + bool IsSuspended; }; class Parser { public: - Parser(StringRef TemplateStr) : TemplateStr(TemplateStr) {} + Parser(StringRef TemplateStr, MustacheContext &Ctx) + : Ctx(Ctx), TemplateStr(TemplateStr) {} - AstPtr parse(llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes); + AstPtr parse(); private: - void parseMustache(ASTNode *Parent, llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes); + void parseMustache(ASTNode *Parent); + void parseSection(ASTNode *Parent, ASTNode::Type Ty, const Accessor &A); + MustacheContext &Ctx; SmallVector Tokens; size_t CurrentPtr; StringRef TemplateStr; }; -AstPtr Parser::parse(llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes) { +void Parser::parseSection(ASTNode *Parent, ASTNode::Type Ty, + const Accessor &A) { + AstPtr CurrentNode = createNode(Ctx, Ty, A, Parent); + size_t Start = CurrentPtr; + parseMustache(CurrentNode.get()); + const size_t End = CurrentPtr - 1; + std::string RawBody; + for (std::size_t I = Start; I < End; I++) + RawBody += Tokens[I].RawBody; + CurrentNode->setRawBody(std::move(RawBody)); + Parent->addChild(std::move(CurrentNode)); +} + +AstPtr Parser::parse() { Tokens = tokenize(TemplateStr); CurrentPtr = 0; - AstPtr RootNode = createRootNode(Partials, Lambdas, SectionLambdas, Escapes); - parseMustache(RootNode.get(), Partials, Lambdas, SectionLambdas, Escapes); + AstPtr RootNode = createRootNode(Ctx); + parseMustache(RootNode.get()); return RootNode; } -void Parser::parseMustache(ASTNode *Parent, llvm::StringMap &Partials, - llvm::StringMap &Lambdas, - llvm::StringMap &SectionLambdas, - EscapeMap &Escapes) { +void Parser::parseMustache(ASTNode *Parent) { while (CurrentPtr < Tokens.size()) { Token CurrentToken = Tokens[CurrentPtr]; @@ -475,66 +641,48 @@ void Parser::parseMustache(ASTNode *Parent, llvm::StringMap &Partials, switch (CurrentToken.getType()) { case Token::Type::Text: { - CurrentNode = createTextNode(std::move(CurrentToken.TokenBody), Parent, - Partials, Lambdas, SectionLambdas, Escapes); + CurrentNode = + createTextNode(Ctx, std::move(CurrentToken.TokenBody), Parent); Parent->addChild(std::move(CurrentNode)); break; } case Token::Type::Variable: { - CurrentNode = createNode(ASTNode::Variable, std::move(A), Parent, - Partials, Lambdas, SectionLambdas, Escapes); + CurrentNode = createNode(Ctx, ASTNode::Variable, std::move(A), Parent); Parent->addChild(std::move(CurrentNode)); break; } case Token::Type::UnescapeVariable: { - CurrentNode = createNode(ASTNode::UnescapeVariable, std::move(A), Parent, - Partials, Lambdas, SectionLambdas, Escapes); + CurrentNode = + createNode(Ctx, ASTNode::UnescapeVariable, std::move(A), Parent); Parent->addChild(std::move(CurrentNode)); break; } case Token::Type::Partial: { - CurrentNode = createNode(ASTNode::Partial, std::move(A), Parent, Partials, - Lambdas, SectionLambdas, Escapes); + CurrentNode = createNode(Ctx, ASTNode::Partial, std::move(A), Parent); CurrentNode->setIndentation(CurrentToken.getIndentation()); Parent->addChild(std::move(CurrentNode)); break; } case Token::Type::SectionOpen: { - CurrentNode = createNode(ASTNode::Section, A, Parent, Partials, Lambdas, - SectionLambdas, Escapes); - size_t Start = CurrentPtr; - parseMustache(CurrentNode.get(), Partials, Lambdas, SectionLambdas, - Escapes); - const size_t End = CurrentPtr - 1; - std::string RawBody; - for (std::size_t I = Start; I < End; I++) - RawBody += Tokens[I].RawBody; - CurrentNode->setRawBody(std::move(RawBody)); - Parent->addChild(std::move(CurrentNode)); + parseSection(Parent, ASTNode::Section, A); break; } case Token::Type::InvertSectionOpen: { - CurrentNode = createNode(ASTNode::InvertSection, A, Parent, Partials, - Lambdas, SectionLambdas, Escapes); - size_t Start = CurrentPtr; - parseMustache(CurrentNode.get(), Partials, Lambdas, SectionLambdas, - Escapes); - const size_t End = CurrentPtr - 1; - std::string RawBody; - for (size_t Idx = Start; Idx < End; Idx++) - RawBody += Tokens[Idx].RawBody; - CurrentNode->setRawBody(std::move(RawBody)); - Parent->addChild(std::move(CurrentNode)); + parseSection(Parent, ASTNode::InvertSection, A); break; } case Token::Type::Comment: + case Token::Type::SetDelimiter: break; case Token::Type::SectionClose: return; } } } -void toMustacheString(const json::Value &Data, raw_ostream &OS) { +static void toMustacheString(const json::Value &Data, raw_ostream &OS) { + LLVM_DEBUG(dbgs() << "[To Mustache String] Kind: " + << jsonKindToString(Data.kind()) << ", Data: " << Data + << "\n"); switch (Data.kind()) { case json::Value::Null: return; @@ -566,74 +714,106 @@ void toMustacheString(const json::Value &Data, raw_ostream &OS) { } } -void ASTNode::render(const json::Value &CurrentCtx, raw_ostream &OS) { +void ASTNode::renderRoot(const json::Value &CurrentCtx, + MustacheOutputStream &OS) { + renderChild(CurrentCtx, OS); +} + +void ASTNode::renderText(MustacheOutputStream &OS) { OS << Body; } + +void ASTNode::renderPartial(const json::Value &CurrentCtx, + MustacheOutputStream &OS) { + LLVM_DEBUG(dbgs() << "[Render Partial] Accessor:" << AccessorValue[0] + << ", Indentation:" << Indentation << "\n"); + auto Partial = Ctx.Partials.find(AccessorValue[0]); + if (Partial != Ctx.Partials.end()) + renderPartial(CurrentCtx, OS, Partial->getValue().get()); +} + +void ASTNode::renderVariable(const json::Value &CurrentCtx, + MustacheOutputStream &OS) { + auto Lambda = Ctx.Lambdas.find(AccessorValue[0]); + if (Lambda != Ctx.Lambdas.end()) { + renderLambdas(CurrentCtx, OS, Lambda->getValue()); + } else if (const json::Value *ContextPtr = findContext()) { + EscapeStringStream ES(OS, Ctx.Escapes); + toMustacheString(*ContextPtr, ES); + } +} + +void ASTNode::renderUnescapeVariable(const json::Value &CurrentCtx, + MustacheOutputStream &OS) { + LLVM_DEBUG(dbgs() << "[Render UnescapeVariable] Accessor:" << AccessorValue[0] + << "\n"); + auto Lambda = Ctx.Lambdas.find(AccessorValue[0]); + if (Lambda != Ctx.Lambdas.end()) { + renderLambdas(CurrentCtx, OS, Lambda->getValue()); + } else if (const json::Value *ContextPtr = findContext()) { + OS.suspendIndentation(); + toMustacheString(*ContextPtr, OS); + OS.resumeIndentation(); + } +} + +void ASTNode::renderSection(const json::Value &CurrentCtx, + MustacheOutputStream &OS) { + auto SectionLambda = Ctx.SectionLambdas.find(AccessorValue[0]); + if (SectionLambda != Ctx.SectionLambdas.end()) { + renderSectionLambdas(CurrentCtx, OS, SectionLambda->getValue()); + return; + } + + const json::Value *ContextPtr = findContext(); + if (isContextFalsey(ContextPtr)) + return; + + if (const json::Array *Arr = ContextPtr->getAsArray()) { + for (const json::Value &V : *Arr) + renderChild(V, OS); + return; + } + renderChild(*ContextPtr, OS); +} + +void ASTNode::renderInvertSection(const json::Value &CurrentCtx, + MustacheOutputStream &OS) { + bool IsLambda = Ctx.SectionLambdas.contains(AccessorValue[0]); + const json::Value *ContextPtr = findContext(); + if (isContextFalsey(ContextPtr) && !IsLambda) { + renderChild(CurrentCtx, OS); + } +} + +void ASTNode::render(const llvm::json::Value &Data, MustacheOutputStream &OS) { + if (Ty != Root && Ty != Text && AccessorValue.empty()) + return; // Set the parent context to the incoming context so that we // can walk up the context tree correctly in findContext(). - ParentContext = &CurrentCtx; - const json::Value *ContextPtr = Ty == Root ? ParentContext : findContext(); + ParentContext = &Data; switch (Ty) { case Root: - renderChild(CurrentCtx, OS); + renderRoot(Data, OS); return; case Text: - OS << Body; + renderText(OS); return; - case Partial: { - auto Partial = Partials.find(AccessorValue[0]); - if (Partial != Partials.end()) - renderPartial(CurrentCtx, OS, Partial->getValue().get()); + case Partial: + renderPartial(Data, OS); return; - } - case Variable: { - auto Lambda = Lambdas.find(AccessorValue[0]); - if (Lambda != Lambdas.end()) { - renderLambdas(CurrentCtx, OS, Lambda->getValue()); - } else if (ContextPtr) { - EscapeStringStream ES(OS, Escapes); - toMustacheString(*ContextPtr, ES); - } + case Variable: + renderVariable(Data, OS); return; - } - case UnescapeVariable: { - auto Lambda = Lambdas.find(AccessorValue[0]); - if (Lambda != Lambdas.end()) { - renderLambdas(CurrentCtx, OS, Lambda->getValue()); - } else if (ContextPtr) { - toMustacheString(*ContextPtr, OS); - } + case UnescapeVariable: + renderUnescapeVariable(Data, OS); return; - } - case Section: { - auto SectionLambda = SectionLambdas.find(AccessorValue[0]); - bool IsLambda = SectionLambda != SectionLambdas.end(); - - if (IsLambda) { - renderSectionLambdas(CurrentCtx, OS, SectionLambda->getValue()); - return; - } - - if (isContextFalsey(ContextPtr)) - return; - - if (const json::Array *Arr = ContextPtr->getAsArray()) { - for (const json::Value &V : *Arr) - renderChild(V, OS); - return; - } - renderChild(*ContextPtr, OS); + case Section: + renderSection(Data, OS); return; - } - case InvertSection: { - bool IsLambda = SectionLambdas.contains(AccessorValue[0]); - if (isContextFalsey(ContextPtr) && !IsLambda) { - // The context for the children remains unchanged from the parent's, so - // we pass this node's original incoming context. - renderChild(CurrentCtx, OS); - } + case InvertSection: + renderInvertSection(Data, OS); return; } - } llvm_unreachable("Invalid ASTNode type"); } @@ -676,27 +856,29 @@ const json::Value *ASTNode::findContext() { return Context; } -void ASTNode::renderChild(const json::Value &Contexts, llvm::raw_ostream &OS) { +void ASTNode::renderChild(const json::Value &Contexts, + MustacheOutputStream &OS) { for (AstPtr &Child : Children) Child->render(Contexts, OS); } -void ASTNode::renderPartial(const json::Value &Contexts, llvm::raw_ostream &OS, - ASTNode *Partial) { +void ASTNode::renderPartial(const json::Value &Contexts, + MustacheOutputStream &OS, ASTNode *Partial) { + LLVM_DEBUG(dbgs() << "[Render Partial Indentation] Indentation: " << Indentation << "\n"); AddIndentationStringStream IS(OS, Indentation); Partial->render(Contexts, IS); } -void ASTNode::renderLambdas(const json::Value &Contexts, llvm::raw_ostream &OS, - Lambda &L) { +void ASTNode::renderLambdas(const json::Value &Contexts, + MustacheOutputStream &OS, Lambda &L) { json::Value LambdaResult = L(); std::string LambdaStr; raw_string_ostream Output(LambdaStr); toMustacheString(LambdaResult, Output); - Parser P = Parser(LambdaStr); - AstPtr LambdaNode = P.parse(Partials, Lambdas, SectionLambdas, Escapes); + Parser P(LambdaStr, Ctx); + AstPtr LambdaNode = P.parse(); - EscapeStringStream ES(OS, Escapes); + EscapeStringStream ES(OS, Ctx.Escapes); if (Ty == Variable) { LambdaNode->render(Contexts, ES); return; @@ -705,39 +887,44 @@ void ASTNode::renderLambdas(const json::Value &Contexts, llvm::raw_ostream &OS, } void ASTNode::renderSectionLambdas(const json::Value &Contexts, - llvm::raw_ostream &OS, SectionLambda &L) { + MustacheOutputStream &OS, SectionLambda &L) { json::Value Return = L(RawBody); if (isFalsey(Return)) return; std::string LambdaStr; raw_string_ostream Output(LambdaStr); toMustacheString(Return, Output); - Parser P = Parser(LambdaStr); - AstPtr LambdaNode = P.parse(Partials, Lambdas, SectionLambdas, Escapes); + Parser P(LambdaStr, Ctx); + AstPtr LambdaNode = P.parse(); LambdaNode->render(Contexts, OS); } void Template::render(const json::Value &Data, llvm::raw_ostream &OS) { - Tree->render(Data, OS); + RawMustacheOutputStream MOS(OS); + Tree->render(Data, MOS); } void Template::registerPartial(std::string Name, std::string Partial) { - Parser P = Parser(Partial); - AstPtr PartialTree = P.parse(Partials, Lambdas, SectionLambdas, Escapes); - Partials.insert(std::make_pair(Name, std::move(PartialTree))); + Parser P(Partial, Ctx); + AstPtr PartialTree = P.parse(); + Ctx.Partials.insert(std::make_pair(Name, std::move(PartialTree))); } -void Template::registerLambda(std::string Name, Lambda L) { Lambdas[Name] = L; } +void Template::registerLambda(std::string Name, Lambda L) { + Ctx.Lambdas[Name] = L; +} void Template::registerLambda(std::string Name, SectionLambda L) { - SectionLambdas[Name] = L; + Ctx.SectionLambdas[Name] = L; } -void Template::overrideEscapeCharacters(EscapeMap E) { Escapes = std::move(E); } +void Template::overrideEscapeCharacters(EscapeMap E) { + Ctx.Escapes = std::move(E); +} Template::Template(StringRef TemplateStr) { - Parser P = Parser(TemplateStr); - Tree = P.parse(Partials, Lambdas, SectionLambdas, Escapes); + Parser P(TemplateStr, Ctx); + Tree = P.parse(); // The default behavior is to escape html entities. const EscapeMap HtmlEntities = {{'&', "&"}, {'<', "<"}, @@ -748,21 +935,18 @@ Template::Template(StringRef TemplateStr) { } Template::Template(Template &&Other) noexcept - : Partials(std::move(Other.Partials)), Lambdas(std::move(Other.Lambdas)), - SectionLambdas(std::move(Other.SectionLambdas)), - Escapes(std::move(Other.Escapes)), Tree(std::move(Other.Tree)) {} + : Ctx(std::move(Other.Ctx)), Tree(std::move(Other.Tree)) {} Template::~Template() = default; Template &Template::operator=(Template &&Other) noexcept { if (this != &Other) { - Partials = std::move(Other.Partials); - Lambdas = std::move(Other.Lambdas); - SectionLambdas = std::move(Other.SectionLambdas); - Escapes = std::move(Other.Escapes); + Ctx = std::move(Other.Ctx); Tree = std::move(Other.Tree); Other.Tree = nullptr; } return *this; } } // namespace llvm::mustache + +#undef DEBUG_TYPE diff --git a/llvm/lib/Support/OptimizedStructLayout.cpp b/llvm/lib/Support/OptimizedStructLayout.cpp index 7b21f927a3462..a3b5c312beaa9 100644 --- a/llvm/lib/Support/OptimizedStructLayout.cpp +++ b/llvm/lib/Support/OptimizedStructLayout.cpp @@ -82,7 +82,7 @@ llvm::performOptimizedStructLayout(MutableArrayRef Fields) { #ifndef NDEBUG checkValidLayout(Fields, Size, MaxAlign); #endif - return std::make_pair(Size, MaxAlign); + return {Size, MaxAlign}; } // Walk over the flexible-offset fields, tracking MaxAlign and @@ -164,7 +164,7 @@ llvm::performOptimizedStructLayout(MutableArrayRef Fields) { #ifndef NDEBUG checkValidLayout(Fields, LastEnd, MaxAlign); #endif - return std::make_pair(LastEnd, MaxAlign); + return {LastEnd, MaxAlign}; } } @@ -452,5 +452,5 @@ llvm::performOptimizedStructLayout(MutableArrayRef Fields) { checkValidLayout(Fields, LastEnd, MaxAlign); #endif - return std::make_pair(LastEnd, MaxAlign); + return {LastEnd, MaxAlign}; } diff --git a/llvm/lib/Support/PGOOptions.cpp b/llvm/lib/Support/PGOOptions.cpp index 5981dff9e0946..ecfb0ca33f16c 100644 --- a/llvm/lib/Support/PGOOptions.cpp +++ b/llvm/lib/Support/PGOOptions.cpp @@ -13,8 +13,7 @@ using namespace llvm; PGOOptions::PGOOptions(std::string ProfileFile, std::string CSProfileGenFile, std::string ProfileRemappingFile, - std::string MemoryProfile, - IntrusiveRefCntPtr FS, PGOAction Action, + std::string MemoryProfile, PGOAction Action, CSPGOAction CSAction, ColdFuncOpt ColdType, bool DebugInfoForProfiling, bool PseudoProbeForProfiling, bool AtomicCounterUpdate) @@ -24,7 +23,7 @@ PGOOptions::PGOOptions(std::string ProfileFile, std::string CSProfileGenFile, DebugInfoForProfiling(DebugInfoForProfiling || (Action == SampleUse && !PseudoProbeForProfiling)), PseudoProbeForProfiling(PseudoProbeForProfiling), - AtomicCounterUpdate(AtomicCounterUpdate), FS(std::move(FS)) { + AtomicCounterUpdate(AtomicCounterUpdate) { // Note, we do allow ProfileFile.empty() for Action=IRUse LTO can // callback with IRUse action without ProfileFile. @@ -47,10 +46,6 @@ PGOOptions::PGOOptions(std::string ProfileFile, std::string CSProfileGenFile, assert(this->Action != NoAction || this->CSAction != NoCSAction || !this->MemoryProfile.empty() || this->DebugInfoForProfiling || this->PseudoProbeForProfiling); - - // If we need to use the profile, the VFS cannot be nullptr. - assert(this->FS || !(this->Action == IRUse || this->CSAction == CSIRUse || - !this->MemoryProfile.empty())); } PGOOptions::PGOOptions(const PGOOptions &) = default; diff --git a/llvm/lib/Support/Path.cpp b/llvm/lib/Support/Path.cpp index 761d29e960887..3e066665f4155 100644 --- a/llvm/lib/Support/Path.cpp +++ b/llvm/lib/Support/Path.cpp @@ -700,6 +700,55 @@ bool is_relative(const Twine &path, Style style) { return !is_absolute(path, style); } +void make_absolute(const Twine ¤t_directory, + SmallVectorImpl &path) { + StringRef p(path.data(), path.size()); + + bool rootDirectory = has_root_directory(p); + bool rootName = has_root_name(p); + + // Already absolute. + if ((rootName || is_style_posix(Style::native)) && rootDirectory) + return; + + // All the following conditions will need the current directory. + SmallString<128> current_dir; + current_directory.toVector(current_dir); + + // Relative path. Prepend the current directory. + if (!rootName && !rootDirectory) { + // Append path to the current directory. + append(current_dir, p); + // Set path to the result. + path.swap(current_dir); + return; + } + + if (!rootName && rootDirectory) { + StringRef cdrn = root_name(current_dir); + SmallString<128> curDirRootName(cdrn.begin(), cdrn.end()); + append(curDirRootName, p); + // Set path to the result. + path.swap(curDirRootName); + return; + } + + if (rootName && !rootDirectory) { + StringRef pRootName = root_name(p); + StringRef bRootDirectory = root_directory(current_dir); + StringRef bRelativePath = relative_path(current_dir); + StringRef pRelativePath = relative_path(p); + + SmallString<128> res; + append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath); + path.swap(res); + return; + } + + llvm_unreachable("All rootName and rootDirectory combinations should have " + "occurred above!"); +} + StringRef remove_leading_dotslash(StringRef Path, Style style) { // Remove leading "./" (or ".//" or "././" etc.) while (Path.size() > 2 && Path[0] == '.' && is_separator(Path[1], style)) { @@ -903,55 +952,6 @@ getPotentiallyUniqueTempFileName(const Twine &Prefix, StringRef Suffix, return createTemporaryFile(Prefix, Suffix, Dummy, ResultPath, FS_Name); } -void make_absolute(const Twine ¤t_directory, - SmallVectorImpl &path) { - StringRef p(path.data(), path.size()); - - bool rootDirectory = path::has_root_directory(p); - bool rootName = path::has_root_name(p); - - // Already absolute. - if ((rootName || is_style_posix(Style::native)) && rootDirectory) - return; - - // All of the following conditions will need the current directory. - SmallString<128> current_dir; - current_directory.toVector(current_dir); - - // Relative path. Prepend the current directory. - if (!rootName && !rootDirectory) { - // Append path to the current directory. - path::append(current_dir, p); - // Set path to the result. - path.swap(current_dir); - return; - } - - if (!rootName && rootDirectory) { - StringRef cdrn = path::root_name(current_dir); - SmallString<128> curDirRootName(cdrn.begin(), cdrn.end()); - path::append(curDirRootName, p); - // Set path to the result. - path.swap(curDirRootName); - return; - } - - if (rootName && !rootDirectory) { - StringRef pRootName = path::root_name(p); - StringRef bRootDirectory = path::root_directory(current_dir); - StringRef bRelativePath = path::relative_path(current_dir); - StringRef pRelativePath = path::relative_path(p); - - SmallString<128> res; - path::append(res, pRootName, bRootDirectory, bRelativePath, pRelativePath); - path.swap(res); - return; - } - - llvm_unreachable("All rootName and rootDirectory combinations should have " - "occurred above!"); -} - std::error_code make_absolute(SmallVectorImpl &path) { if (path::is_absolute(path)) return {}; @@ -960,7 +960,7 @@ std::error_code make_absolute(SmallVectorImpl &path) { if (std::error_code ec = current_path(current_dir)) return ec; - make_absolute(current_dir, path); + path::make_absolute(current_dir, path); return {}; } diff --git a/llvm/lib/Support/ScaledNumber.cpp b/llvm/lib/Support/ScaledNumber.cpp index 4d5923e3634b1..2c99e07660334 100644 --- a/llvm/lib/Support/ScaledNumber.cpp +++ b/llvm/lib/Support/ScaledNumber.cpp @@ -41,7 +41,7 @@ std::pair ScaledNumbers::multiply64(uint64_t LHS, // Check whether the upper digit is empty. if (!Upper) - return std::make_pair(Lower, 0); + return {Lower, 0}; // Shift as little as possible to maximize precision. unsigned LeadingZeros = llvm::countl_zero(Upper); @@ -91,7 +91,7 @@ std::pair ScaledNumbers::divide64(uint64_t Dividend, // Check for powers of two. if (Divisor == 1) - return std::make_pair(Dividend, Shift); + return {Dividend, Shift}; // Maximize size of dividend. if (int Zeros = llvm::countl_zero(Dividend)) { diff --git a/llvm/lib/Support/ScopedPrinter.cpp b/llvm/lib/Support/ScopedPrinter.cpp index a17e397c0aa58..efb61785d17b0 100644 --- a/llvm/lib/Support/ScopedPrinter.cpp +++ b/llvm/lib/Support/ScopedPrinter.cpp @@ -1,12 +1,17 @@ -#include "llvm/Support/ScopedPrinter.h" +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "llvm/Support/ScopedPrinter.h" #include "llvm/Support/Format.h" -using namespace llvm::support; +using namespace llvm; -namespace llvm { - -raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value) { +raw_ostream &llvm::operator<<(raw_ostream &OS, const HexNumber &Value) { OS << "0x" << utohexstr(Value.Value); return OS; } @@ -45,5 +50,3 @@ JSONScopedPrinter::JSONScopedPrinter( if (this->OuterScope) this->OuterScope->setPrinter(*this); } - -} // namespace llvm diff --git a/llvm/lib/Support/SipHash.cpp b/llvm/lib/Support/SipHash.cpp index 86dad66420435..382d36f0a8da5 100644 --- a/llvm/lib/Support/SipHash.cpp +++ b/llvm/lib/Support/SipHash.cpp @@ -35,14 +35,19 @@ void llvm::getSipHash_2_4_128(ArrayRef In, const uint8_t (&K)[16], siphash<2, 4>(In.data(), In.size(), K, Out); } -/// Compute an ABI-stable 16-bit hash of the given string. -uint16_t llvm::getPointerAuthStableSipHash(StringRef Str) { +/// Compute an ABI-stable 64-bit hash of the given string. +uint64_t llvm::getStableSipHash(StringRef Str) { static const uint8_t K[16] = {0xb5, 0xd4, 0xc9, 0xeb, 0x79, 0x10, 0x4a, 0x79, 0x6f, 0xec, 0x8b, 0x1b, 0x42, 0x87, 0x81, 0xd4}; uint8_t RawHashBytes[8]; getSipHash_2_4_64(arrayRefFromStringRef(Str), K, RawHashBytes); - uint64_t RawHash = endian::read64le(RawHashBytes); + return endian::read64le(RawHashBytes); +} + +/// Compute an ABI-stable 16-bit hash of the given string. +uint16_t llvm::getPointerAuthStableSipHash(StringRef Str) { + uint64_t RawHash = getStableSipHash(Str); // Produce a non-zero 16-bit discriminator. uint16_t Discriminator = (RawHash % 0xFFFF) + 1; diff --git a/llvm/lib/Support/SmallPtrSet.cpp b/llvm/lib/Support/SmallPtrSet.cpp index a602165a0753c..e377dbf4a6999 100644 --- a/llvm/lib/Support/SmallPtrSet.cpp +++ b/llvm/lib/Support/SmallPtrSet.cpp @@ -52,7 +52,7 @@ SmallPtrSetImplBase::insert_imp_big(const void *Ptr) { // Okay, we know we have space. Find a hash bucket. const void **Bucket = const_cast(FindBucketFor(Ptr)); if (*Bucket == Ptr) - return std::make_pair(Bucket, false); // Already inserted, good. + return {Bucket, false}; // Already inserted, good. // Otherwise, insert it! if (*Bucket == getTombstoneMarker()) @@ -60,7 +60,7 @@ SmallPtrSetImplBase::insert_imp_big(const void *Ptr) { ++NumEntries; *Bucket = Ptr; incrementEpoch(); - return std::make_pair(Bucket, true); + return {Bucket, true}; } const void *const *SmallPtrSetImplBase::doFind(const void *Ptr) const { diff --git a/llvm/lib/Support/SourceMgr.cpp b/llvm/lib/Support/SourceMgr.cpp index 3f97213d86c05..a43cf37a79824 100644 --- a/llvm/lib/Support/SourceMgr.cpp +++ b/llvm/lib/Support/SourceMgr.cpp @@ -202,7 +202,7 @@ SourceMgr::getLineAndColumn(SMLoc Loc, unsigned BufferID) const { size_t NewlineOffs = StringRef(BufStart, Ptr - BufStart).find_last_of("\n\r"); if (NewlineOffs == StringRef::npos) NewlineOffs = ~(size_t)0; - return std::make_pair(LineNo, Ptr - BufStart - NewlineOffs); + return {LineNo, Ptr - BufStart - NewlineOffs}; } // FIXME: Note that the formatting of source locations is spread between diff --git a/llvm/lib/Support/StringExtras.cpp b/llvm/lib/Support/StringExtras.cpp index 6ae26267337b1..5058c08aff64a 100644 --- a/llvm/lib/Support/StringExtras.cpp +++ b/llvm/lib/Support/StringExtras.cpp @@ -44,7 +44,7 @@ std::pair llvm::getToken(StringRef Source, // Find the next occurrence of the delimiter. StringRef::size_type End = Source.find_first_of(Delimiters, Start); - return std::make_pair(Source.slice(Start, End), Source.substr(End)); + return {Source.slice(Start, End), Source.substr(End)}; } /// SplitString - Split up the specified string according to the specified diff --git a/llvm/lib/Support/StringMap.cpp b/llvm/lib/Support/StringMap.cpp index 3432dc15ceef2..4aee30cd484e0 100644 --- a/llvm/lib/Support/StringMap.cpp +++ b/llvm/lib/Support/StringMap.cpp @@ -83,7 +83,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name, // Hash table unallocated so far? if (NumBuckets == 0) init(16); - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) FullHashValue = ~FullHashValue; unsigned BucketNo = FullHashValue & (NumBuckets - 1); unsigned *HashTable = getHashTable(TheTable, NumBuckets); @@ -142,7 +142,7 @@ int StringMapImpl::FindKey(StringRef Key, uint32_t FullHashValue) const { #ifdef EXPENSIVE_CHECKS assert(FullHashValue == hash(Key)); #endif - if (shouldReverseIterate()) + if constexpr (shouldReverseIterate()) FullHashValue = ~FullHashValue; unsigned BucketNo = FullHashValue & (NumBuckets - 1); unsigned *HashTable = getHashTable(TheTable, NumBuckets); diff --git a/llvm/lib/Support/TextEncoding.cpp b/llvm/lib/Support/TextEncoding.cpp index b4ee0f8ee8bfd..804ff07f6e9a8 100644 --- a/llvm/lib/Support/TextEncoding.cpp +++ b/llvm/lib/Support/TextEncoding.cpp @@ -161,7 +161,7 @@ TextEncodingConverterICU::convertString(StringRef Source, EC = U_ZERO_ERROR; const char *Input = In; - Output = InputLength ? static_cast(Result.data()) : nullptr; + Output = static_cast(Result.data()); ucnv_convertEx(&*ToConvDesc, &*FromConvDesc, &Output, Result.end(), &Input, In + InputLength, /*pivotStart=*/NULL, /*pivotSource=*/NULL, /*pivotTarget=*/NULL, @@ -172,8 +172,10 @@ TextEncodingConverterICU::convertString(StringRef Source, if (Capacity < Result.max_size()) { HandleOverflow(Capacity, Output, OutputLength, Result); continue; - } else + } else { + Result.resize(Output - Result.data()); return std::error_code(E2BIG, std::generic_category()); + } } // Some other error occured. Result.resize(Output - Result.data()); @@ -268,10 +270,8 @@ TextEncodingConverterIconv::convertString(StringRef Source, }; do { - // Setup the input. Use nullptr to reset iconv state if input length is - // zero. size_t InputLength = Source.size(); - char *Input = const_cast(InputLength ? Source.data() : ""); + char *Input = const_cast(Source.data()); Ret = iconv(ConvDesc, &Input, &InputLength, &Output, &OutputLength); if (Ret != 0) { if (auto EC = HandleError(Ret)) diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp index cf784595c2f1c..c754b30d8de4a 100644 --- a/llvm/lib/Support/VirtualFileSystem.cpp +++ b/llvm/lib/Support/VirtualFileSystem.cpp @@ -133,7 +133,7 @@ std::error_code FileSystem::makeAbsolute(SmallVectorImpl &Path) const { if (!WorkingDir) return WorkingDir.getError(); - llvm::sys::fs::make_absolute(WorkingDir.get(), Path); + sys::path::make_absolute(WorkingDir.get(), Path); return {}; } @@ -300,7 +300,7 @@ class RealFileSystem : public FileSystem { if (!WD || !*WD) return Path; Path.toVector(Storage); - sys::fs::make_absolute(WD->get().Resolved, Storage); + sys::path::make_absolute(WD->get().Resolved, Storage); return Storage; } @@ -1908,7 +1908,12 @@ class llvm::vfs::RedirectingFileSystemParser { FullPath = FS->getOverlayFileDir(); assert(!FullPath.empty() && "External contents prefix directory must exist"); - llvm::sys::path::append(FullPath, Value); + SmallString<256> AbsFullPath = Value; + if (FS->makeAbsolute(FullPath, AbsFullPath)) { + error(N, "failed to make 'external-contents' absolute"); + return nullptr; + } + FullPath = AbsFullPath; } else { FullPath = Value; } @@ -1973,7 +1978,7 @@ class llvm::vfs::RedirectingFileSystemParser { EC = FS->makeAbsolute(FullPath, Name); Name = canonicalize(Name); } else { - EC = sys::fs::make_absolute(Name); + EC = FS->makeAbsolute(Name); } if (EC) { assert(NameValueNode && "Name presence should be checked earlier"); @@ -2204,7 +2209,7 @@ RedirectingFileSystem::create(std::unique_ptr Buffer, // FS->OverlayFileDir => //dummy.cache/vfs // SmallString<256> OverlayAbsDir = sys::path::parent_path(YAMLFilePath); - std::error_code EC = llvm::sys::fs::make_absolute(OverlayAbsDir); + std::error_code EC = FS->makeAbsolute(OverlayAbsDir); assert(!EC && "Overlay dir final path must be absolute"); (void)EC; FS->setOverlayFileDir(OverlayAbsDir); diff --git a/llvm/lib/Support/YAMLParser.cpp b/llvm/lib/Support/YAMLParser.cpp index fa5db46598905..6734877802caf 100644 --- a/llvm/lib/Support/YAMLParser.cpp +++ b/llvm/lib/Support/YAMLParser.cpp @@ -59,7 +59,7 @@ using EncodingInfo = std::pair; /// and how long the byte order mark is if one exists. static EncodingInfo getUnicodeEncoding(StringRef Input) { if (Input.empty()) - return std::make_pair(UEF_Unknown, 0); + return {UEF_Unknown, 0}; switch (uint8_t(Input[0])) { case 0x00: @@ -67,44 +67,44 @@ static EncodingInfo getUnicodeEncoding(StringRef Input) { if ( Input[1] == 0 && uint8_t(Input[2]) == 0xFE && uint8_t(Input[3]) == 0xFF) - return std::make_pair(UEF_UTF32_BE, 4); + return {UEF_UTF32_BE, 4}; if (Input[1] == 0 && Input[2] == 0 && Input[3] != 0) - return std::make_pair(UEF_UTF32_BE, 0); + return {UEF_UTF32_BE, 0}; } if (Input.size() >= 2 && Input[1] != 0) - return std::make_pair(UEF_UTF16_BE, 0); - return std::make_pair(UEF_Unknown, 0); + return {UEF_UTF16_BE, 0}; + return {UEF_Unknown, 0}; case 0xFF: if ( Input.size() >= 4 && uint8_t(Input[1]) == 0xFE && Input[2] == 0 && Input[3] == 0) - return std::make_pair(UEF_UTF32_LE, 4); + return {UEF_UTF32_LE, 4}; if (Input.size() >= 2 && uint8_t(Input[1]) == 0xFE) - return std::make_pair(UEF_UTF16_LE, 2); - return std::make_pair(UEF_Unknown, 0); + return {UEF_UTF16_LE, 2}; + return {UEF_Unknown, 0}; case 0xFE: if (Input.size() >= 2 && uint8_t(Input[1]) == 0xFF) - return std::make_pair(UEF_UTF16_BE, 2); - return std::make_pair(UEF_Unknown, 0); + return {UEF_UTF16_BE, 2}; + return {UEF_Unknown, 0}; case 0xEF: if ( Input.size() >= 3 && uint8_t(Input[1]) == 0xBB && uint8_t(Input[2]) == 0xBF) - return std::make_pair(UEF_UTF8, 3); - return std::make_pair(UEF_Unknown, 0); + return {UEF_UTF8, 3}; + return {UEF_Unknown, 0}; } // It could still be utf-32 or utf-16. if (Input.size() >= 4 && Input[1] == 0 && Input[2] == 0 && Input[3] == 0) - return std::make_pair(UEF_UTF32_LE, 0); + return {UEF_UTF32_LE, 0}; if (Input.size() >= 2 && Input[1] == 0) - return std::make_pair(UEF_UTF16_LE, 0); + return {UEF_UTF16_LE, 0}; - return std::make_pair(UEF_UTF8, 0); + return {UEF_UTF8, 0}; } /// Pin the vtables to this file. @@ -199,7 +199,7 @@ static UTF8Decoded decodeUTF8(StringRef Range) { // 1 byte: [0x00, 0x7f] // Bit pattern: 0xxxxxxx if (Position < End && (*Position & 0x80) == 0) { - return std::make_pair(*Position, 1); + return {*Position, 1}; } // 2 bytes: [0x80, 0x7ff] // Bit pattern: 110xxxxx 10xxxxxx @@ -208,7 +208,7 @@ static UTF8Decoded decodeUTF8(StringRef Range) { uint32_t codepoint = ((*Position & 0x1F) << 6) | (*(Position + 1) & 0x3F); if (codepoint >= 0x80) - return std::make_pair(codepoint, 2); + return {codepoint, 2}; } // 3 bytes: [0x8000, 0xffff] // Bit pattern: 1110xxxx 10xxxxxx 10xxxxxx @@ -222,7 +222,7 @@ static UTF8Decoded decodeUTF8(StringRef Range) { // they are high / low surrogate halves used by UTF-16. if (codepoint >= 0x800 && (codepoint < 0xD800 || codepoint > 0xDFFF)) - return std::make_pair(codepoint, 3); + return {codepoint, 3}; } // 4 bytes: [0x10000, 0x10FFFF] // Bit pattern: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx @@ -235,9 +235,9 @@ static UTF8Decoded decodeUTF8(StringRef Range) { ((*(Position + 2) & 0x3F) << 6) | (*(Position + 3) & 0x3F); if (codepoint >= 0x10000 && codepoint <= 0x10FFFF) - return std::make_pair(codepoint, 4); + return {codepoint, 4}; } - return std::make_pair(0, 0); + return {0, 0}; } namespace llvm { diff --git a/llvm/lib/Support/YAMLTraits.cpp b/llvm/lib/Support/YAMLTraits.cpp index 035828b594e84..95a41eafdf5e4 100644 --- a/llvm/lib/Support/YAMLTraits.cpp +++ b/llvm/lib/Support/YAMLTraits.cpp @@ -144,7 +144,7 @@ std::vector Input::keys() { return Ret; } -bool Input::preflightKey(const char *Key, bool Required, bool, bool &UseDefault, +bool Input::preflightKey(StringRef Key, bool Required, bool, bool &UseDefault, void *&SaveInfo) { UseDefault = false; if (EC) @@ -168,7 +168,7 @@ bool Input::preflightKey(const char *Key, bool Required, bool, bool &UseDefault, UseDefault = true; return false; } - MN->ValidKeys.push_back(Key); + MN->ValidKeys.push_back(Key.str()); HNode *Value = MN->Mapping[Key].first; if (!Value) { if (Required) @@ -266,7 +266,7 @@ void Input::beginEnumScalar() { ScalarMatchFound = false; } -bool Input::matchEnumScalar(const char *Str, bool) { +bool Input::matchEnumScalar(StringRef Str, bool) { if (ScalarMatchFound) return false; if (ScalarHNode *SN = dyn_cast(CurrentNode)) { @@ -302,7 +302,7 @@ bool Input::beginBitSetScalar(bool &DoClear) { return true; } -bool Input::bitSetMatch(const char *Str, bool) { +bool Input::bitSetMatch(StringRef Str, bool) { if (EC) return false; if (SequenceHNode *SQ = dyn_cast(CurrentNode)) { @@ -541,7 +541,7 @@ std::vector Output::keys() { report_fatal_error("invalid call"); } -bool Output::preflightKey(const char *Key, bool Required, bool SameAsDefault, +bool Output::preflightKey(StringRef Key, bool Required, bool SameAsDefault, bool &UseDefault, void *&SaveInfo) { UseDefault = false; SaveInfo = nullptr; @@ -666,7 +666,7 @@ void Output::beginEnumScalar() { EnumerationMatchFound = false; } -bool Output::matchEnumScalar(const char *Str, bool Match) { +bool Output::matchEnumScalar(StringRef Str, bool Match) { if (Match && !EnumerationMatchFound) { newLineCheck(); outputUpToEndOfLine(Str); @@ -695,7 +695,7 @@ bool Output::beginBitSetScalar(bool &DoClear) { return true; } -bool Output::bitSetMatch(const char *Str, bool Matches) { +bool Output::bitSetMatch(StringRef Str, bool Matches) { if (Matches) { if (NeedBitValueComma) output(", "); diff --git a/llvm/lib/Support/rpmalloc/rpmalloc.h b/llvm/lib/Support/rpmalloc/rpmalloc.h index 3911c53b779b3..5b7fe1ff4286b 100644 --- a/llvm/lib/Support/rpmalloc/rpmalloc.h +++ b/llvm/lib/Support/rpmalloc/rpmalloc.h @@ -1,428 +1,428 @@ -//===---------------------- rpmalloc.h ------------------*- C -*-=============// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This library provides a cross-platform lock free thread caching malloc -// implementation in C11. -// -//===----------------------------------------------------------------------===// - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(__clang__) || defined(__GNUC__) -#define RPMALLOC_EXPORT __attribute__((visibility("default"))) -#define RPMALLOC_ALLOCATOR -#if (defined(__clang_major__) && (__clang_major__ < 4)) || \ - (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD) -#define RPMALLOC_ATTRIB_MALLOC -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) -#else -#define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__)) -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size))) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) \ - __attribute__((alloc_size(count, size))) -#endif -#define RPMALLOC_CDECL -#elif defined(_MSC_VER) -#define RPMALLOC_EXPORT -#define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict) -#define RPMALLOC_ATTRIB_MALLOC -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) -#define RPMALLOC_CDECL __cdecl -#else -#define RPMALLOC_EXPORT -#define RPMALLOC_ALLOCATOR -#define RPMALLOC_ATTRIB_MALLOC -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) -#define RPMALLOC_CDECL -#endif - -//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce -// a very small overhead due to some size calculations not being compile time -// constants -#ifndef RPMALLOC_CONFIGURABLE -#define RPMALLOC_CONFIGURABLE 0 -#endif - -//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* -//! functions). -// Will introduce a very small overhead to track fully allocated spans in heaps -#ifndef RPMALLOC_FIRST_CLASS_HEAPS -#define RPMALLOC_FIRST_CLASS_HEAPS 0 -#endif - -//! Flag to rpaligned_realloc to not preserve content in reallocation -#define RPMALLOC_NO_PRESERVE 1 -//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be -//! done in-place, -// in which case the original pointer is still valid (just like a call to -// realloc which failes to allocate a new block). -#define RPMALLOC_GROW_OR_FAIL 2 - -typedef struct rpmalloc_global_statistics_t { - //! Current amount of virtual memory mapped, all of which might not have been - //! committed (only if ENABLE_STATISTICS=1) - size_t mapped; - //! Peak amount of virtual memory mapped, all of which might not have been - //! committed (only if ENABLE_STATISTICS=1) - size_t mapped_peak; - //! Current amount of memory in global caches for small and medium sizes - //! (<32KiB) - size_t cached; - //! Current amount of memory allocated in huge allocations, i.e larger than - //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) - size_t huge_alloc; - //! Peak amount of memory allocated in huge allocations, i.e larger than - //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) - size_t huge_alloc_peak; - //! Total amount of memory mapped since initialization (only if - //! ENABLE_STATISTICS=1) - size_t mapped_total; - //! Total amount of memory unmapped since initialization (only if - //! ENABLE_STATISTICS=1) - size_t unmapped_total; -} rpmalloc_global_statistics_t; - -typedef struct rpmalloc_thread_statistics_t { - //! Current number of bytes available in thread size class caches for small - //! and medium sizes (<32KiB) - size_t sizecache; - //! Current number of bytes available in thread span caches for small and - //! medium sizes (<32KiB) - size_t spancache; - //! Total number of bytes transitioned from thread cache to global cache (only - //! if ENABLE_STATISTICS=1) - size_t thread_to_global; - //! Total number of bytes transitioned from global cache to thread cache (only - //! if ENABLE_STATISTICS=1) - size_t global_to_thread; - //! Per span count statistics (only if ENABLE_STATISTICS=1) - struct { - //! Currently used number of spans - size_t current; - //! High water mark of spans used - size_t peak; - //! Number of spans transitioned to global cache - size_t to_global; - //! Number of spans transitioned from global cache - size_t from_global; - //! Number of spans transitioned to thread cache - size_t to_cache; - //! Number of spans transitioned from thread cache - size_t from_cache; - //! Number of spans transitioned to reserved state - size_t to_reserved; - //! Number of spans transitioned from reserved state - size_t from_reserved; - //! Number of raw memory map calls (not hitting the reserve spans but - //! resulting in actual OS mmap calls) - size_t map_calls; - } span_use[64]; - //! Per size class statistics (only if ENABLE_STATISTICS=1) - struct { - //! Current number of allocations - size_t alloc_current; - //! Peak number of allocations - size_t alloc_peak; - //! Total number of allocations - size_t alloc_total; - //! Total number of frees - size_t free_total; - //! Number of spans transitioned to cache - size_t spans_to_cache; - //! Number of spans transitioned from cache - size_t spans_from_cache; - //! Number of spans transitioned from reserved state - size_t spans_from_reserved; - //! Number of raw memory map calls (not hitting the reserve spans but - //! resulting in actual OS mmap calls) - size_t map_calls; - } size_use[128]; -} rpmalloc_thread_statistics_t; - -typedef struct rpmalloc_config_t { - //! Map memory pages for the given number of bytes. The returned address MUST - //! be - // aligned to the rpmalloc span size, which will always be a power of two. - // Optionally the function can store an alignment offset in the offset - // variable in case it performs alignment and the returned pointer is offset - // from the actual start of the memory region due to this alignment. The - // alignment offset will be passed to the memory unmap function. The - // alignment offset MUST NOT be larger than 65535 (storable in an uint16_t), - // if it is you must use natural alignment to shift it into 16 bits. If you - // set a memory_map function, you must also set a memory_unmap function or - // else the default implementation will be used for both. This function must - // be thread safe, it can be called by multiple threads simultaneously. - void *(*memory_map)(size_t size, size_t *offset); - //! Unmap the memory pages starting at address and spanning the given number - //! of bytes. - // If release is set to non-zero, the unmap is for an entire span range as - // returned by a previous call to memory_map and that the entire range should - // be released. The release argument holds the size of the entire span range. - // If release is set to 0, the unmap is a partial decommit of a subset of the - // mapped memory range. If you set a memory_unmap function, you must also set - // a memory_map function or else the default implementation will be used for - // both. This function must be thread safe, it can be called by multiple - // threads simultaneously. - void (*memory_unmap)(void *address, size_t size, size_t offset, - size_t release); - //! Called when an assert fails, if asserts are enabled. Will use the standard - //! assert() - // if this is not set. - void (*error_callback)(const char *message); - //! Called when a call to map memory pages fails (out of memory). If this - //! callback is - // not set or returns zero the library will return a null pointer in the - // allocation call. If this callback returns non-zero the map call will be - // retried. The argument passed is the number of bytes that was requested in - // the map call. Only used if the default system memory map function is used - // (memory_map callback is not set). - int (*map_fail_callback)(size_t size); - //! Size of memory pages. The page size MUST be a power of two. All memory - //! mapping - // requests to memory_map will be made with size set to a multiple of the - // page size. Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system - // page size is used. - size_t page_size; - //! Size of a span of memory blocks. MUST be a power of two, and in - //! [4096,262144] - // range (unless 0 - set to 0 to use the default span size). Used if - // RPMALLOC_CONFIGURABLE is defined to 1. - size_t span_size; - //! Number of spans to map at each request to map new virtual memory blocks. - //! This can - // be used to minimize the system call overhead at the cost of virtual memory - // address space. The extra mapped pages will not be written until actually - // used, so physical committed memory should not be affected in the default - // implementation. Will be aligned to a multiple of spans that match memory - // page size in case of huge pages. - size_t span_map_count; - //! Enable use of large/huge pages. If this flag is set to non-zero and page - //! size is - // zero, the allocator will try to enable huge pages and auto detect the - // configuration. If this is set to non-zero and page_size is also non-zero, - // the allocator will assume huge pages have been configured and enabled - // prior to initializing the allocator. For Windows, see - // https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support - // For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt - int enable_huge_pages; - //! Respectively allocated pages and huge allocated pages names for systems - // supporting it to be able to distinguish among anonymous regions. - const char *page_name; - const char *huge_page_name; -} rpmalloc_config_t; - -//! Initialize allocator with default configuration -RPMALLOC_EXPORT int rpmalloc_initialize(void); - -//! Initialize allocator with given configuration -RPMALLOC_EXPORT int rpmalloc_initialize_config(const rpmalloc_config_t *config); - -//! Get allocator configuration -RPMALLOC_EXPORT const rpmalloc_config_t *rpmalloc_config(void); - -//! Finalize allocator -RPMALLOC_EXPORT void rpmalloc_finalize(void); - -//! Initialize allocator for calling thread -RPMALLOC_EXPORT void rpmalloc_thread_initialize(void); - -//! Finalize allocator for calling thread -RPMALLOC_EXPORT void rpmalloc_thread_finalize(int release_caches); - -//! Perform deferred deallocations pending for the calling thread heap -RPMALLOC_EXPORT void rpmalloc_thread_collect(void); - -//! Query if allocator is initialized for calling thread -RPMALLOC_EXPORT int rpmalloc_is_thread_initialized(void); - -//! Get per-thread statistics -RPMALLOC_EXPORT void -rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats); - -//! Get global statistics -RPMALLOC_EXPORT void -rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats); - -//! Dump all statistics in human readable format to file (should be a FILE*) -RPMALLOC_EXPORT void rpmalloc_dump_statistics(void *file); - -//! Allocate a memory block of at least the given size -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1); - -//! Free the given memory block -RPMALLOC_EXPORT void rpfree(void *ptr); - -//! Allocate a memory block of at least the given size and zero initialize it -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2); - -//! Reallocate the given block to at least the given size -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Reallocate the given block to at least the given size and alignment, -// with optional control flags (see RPMALLOC_NO_PRESERVE). -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, - unsigned int flags) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(3); - -//! Allocate a memory block of at least the given size and alignment. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Allocate a memory block of at least the given size and alignment, and zero -//! initialize it. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpaligned_calloc(size_t alignment, size_t num, - size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); - -//! Allocate a memory block of at least the given size and alignment. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Allocate a memory block of at least the given size and alignment. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT int rpposix_memalign(void **memptr, size_t alignment, - size_t size); - -//! Query the usable size of the given memory block (from given pointer to the -//! end of block) -RPMALLOC_EXPORT size_t rpmalloc_usable_size(void *ptr); - -//! Dummy empty function for forcing linker symbol inclusion -RPMALLOC_EXPORT void rpmalloc_linker_reference(void); - -#if RPMALLOC_FIRST_CLASS_HEAPS - -//! Heap type -typedef struct heap_t rpmalloc_heap_t; - -//! Acquire a new heap. Will reuse existing released heaps or allocate memory -//! for a new heap -// if none available. Heap API is implemented with the strict assumption that -// only one single thread will call heap functions for a given heap at any -// given time, no functions are thread safe. -RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_heap_acquire(void); - -//! Release a heap (does NOT free the memory allocated by the heap, use -//! rpmalloc_heap_free_all before destroying the heap). -// Releasing a heap will enable it to be reused by other threads. Safe to pass -// a null pointer. -RPMALLOC_EXPORT void rpmalloc_heap_release(rpmalloc_heap_t *heap); - -//! Allocate a memory block of at least the given size using the given heap. -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Allocate a memory block of at least the given size using the given heap. The -//! returned -// block will have the requested alignment. Alignment must be a power of two -// and a multiple of sizeof(void*), and should ideally be less than memory page -// size. A caveat of rpmalloc internals is that this must also be strictly less -// than the span size (default 64KiB). -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment, - size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(3); - -//! Allocate a memory block of at least the given size using the given heap and -//! zero initialize it. -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, - size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); - -//! Allocate a memory block of at least the given size using the given heap and -//! zero initialize it. The returned -// block will have the requested alignment. Alignment must either be zero, or a -// power of two and a multiple of sizeof(void*), and should ideally be less -// than memory page size. A caveat of rpmalloc internals is that this must also -// be strictly less than the span size (default 64KiB). -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment, - size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); - -//! Reallocate the given block to at least the given size. The memory block MUST -//! be allocated -// by the same heap given to this function. -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size, - unsigned int flags) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(3); - -//! Reallocate the given block to at least the given size. The memory block MUST -//! be allocated -// by the same heap given to this function. The returned block will have the -// requested alignment. Alignment must be either zero, or a power of two and a -// multiple of sizeof(void*), and should ideally be less than memory page size. -// A caveat of rpmalloc internals is that this must also be strictly less than -// the span size (default 64KiB). -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *rpmalloc_heap_aligned_realloc( - rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size, - unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4); - -//! Free the given memory block from the given heap. The memory block MUST be -//! allocated -// by the same heap given to this function. -RPMALLOC_EXPORT void rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr); - -//! Free all memory allocated by the heap -RPMALLOC_EXPORT void rpmalloc_heap_free_all(rpmalloc_heap_t *heap); - -//! Set the given heap as the current heap for the calling thread. A heap MUST -//! only be current heap -// for a single thread, a heap can never be shared between multiple threads. -// The previous current heap for the calling thread is released to be reused by -// other threads. -RPMALLOC_EXPORT void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap); - -//! Returns which heap the given pointer is allocated on -RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(void *ptr); - -#endif - -#ifdef __cplusplus -} -#endif +//===---------------------- rpmalloc.h ------------------*- C -*-=============// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This library provides a cross-platform lock free thread caching malloc +// implementation in C11. +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__clang__) || defined(__GNUC__) +#define RPMALLOC_EXPORT __attribute__((visibility("default"))) +#define RPMALLOC_ALLOCATOR +#if (defined(__clang_major__) && (__clang_major__ < 4)) || \ + (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD) +#define RPMALLOC_ATTRIB_MALLOC +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) +#else +#define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__)) +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size))) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) \ + __attribute__((alloc_size(count, size))) +#endif +#define RPMALLOC_CDECL +#elif defined(_MSC_VER) +#define RPMALLOC_EXPORT +#define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict) +#define RPMALLOC_ATTRIB_MALLOC +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) +#define RPMALLOC_CDECL __cdecl +#else +#define RPMALLOC_EXPORT +#define RPMALLOC_ALLOCATOR +#define RPMALLOC_ATTRIB_MALLOC +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) +#define RPMALLOC_CDECL +#endif + +//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce +// a very small overhead due to some size calculations not being compile time +// constants +#ifndef RPMALLOC_CONFIGURABLE +#define RPMALLOC_CONFIGURABLE 0 +#endif + +//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* +//! functions). +// Will introduce a very small overhead to track fully allocated spans in heaps +#ifndef RPMALLOC_FIRST_CLASS_HEAPS +#define RPMALLOC_FIRST_CLASS_HEAPS 0 +#endif + +//! Flag to rpaligned_realloc to not preserve content in reallocation +#define RPMALLOC_NO_PRESERVE 1 +//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be +//! done in-place, +// in which case the original pointer is still valid (just like a call to +// realloc which failes to allocate a new block). +#define RPMALLOC_GROW_OR_FAIL 2 + +typedef struct rpmalloc_global_statistics_t { + //! Current amount of virtual memory mapped, all of which might not have been + //! committed (only if ENABLE_STATISTICS=1) + size_t mapped; + //! Peak amount of virtual memory mapped, all of which might not have been + //! committed (only if ENABLE_STATISTICS=1) + size_t mapped_peak; + //! Current amount of memory in global caches for small and medium sizes + //! (<32KiB) + size_t cached; + //! Current amount of memory allocated in huge allocations, i.e larger than + //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) + size_t huge_alloc; + //! Peak amount of memory allocated in huge allocations, i.e larger than + //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) + size_t huge_alloc_peak; + //! Total amount of memory mapped since initialization (only if + //! ENABLE_STATISTICS=1) + size_t mapped_total; + //! Total amount of memory unmapped since initialization (only if + //! ENABLE_STATISTICS=1) + size_t unmapped_total; +} rpmalloc_global_statistics_t; + +typedef struct rpmalloc_thread_statistics_t { + //! Current number of bytes available in thread size class caches for small + //! and medium sizes (<32KiB) + size_t sizecache; + //! Current number of bytes available in thread span caches for small and + //! medium sizes (<32KiB) + size_t spancache; + //! Total number of bytes transitioned from thread cache to global cache (only + //! if ENABLE_STATISTICS=1) + size_t thread_to_global; + //! Total number of bytes transitioned from global cache to thread cache (only + //! if ENABLE_STATISTICS=1) + size_t global_to_thread; + //! Per span count statistics (only if ENABLE_STATISTICS=1) + struct { + //! Currently used number of spans + size_t current; + //! High water mark of spans used + size_t peak; + //! Number of spans transitioned to global cache + size_t to_global; + //! Number of spans transitioned from global cache + size_t from_global; + //! Number of spans transitioned to thread cache + size_t to_cache; + //! Number of spans transitioned from thread cache + size_t from_cache; + //! Number of spans transitioned to reserved state + size_t to_reserved; + //! Number of spans transitioned from reserved state + size_t from_reserved; + //! Number of raw memory map calls (not hitting the reserve spans but + //! resulting in actual OS mmap calls) + size_t map_calls; + } span_use[64]; + //! Per size class statistics (only if ENABLE_STATISTICS=1) + struct { + //! Current number of allocations + size_t alloc_current; + //! Peak number of allocations + size_t alloc_peak; + //! Total number of allocations + size_t alloc_total; + //! Total number of frees + size_t free_total; + //! Number of spans transitioned to cache + size_t spans_to_cache; + //! Number of spans transitioned from cache + size_t spans_from_cache; + //! Number of spans transitioned from reserved state + size_t spans_from_reserved; + //! Number of raw memory map calls (not hitting the reserve spans but + //! resulting in actual OS mmap calls) + size_t map_calls; + } size_use[128]; +} rpmalloc_thread_statistics_t; + +typedef struct rpmalloc_config_t { + //! Map memory pages for the given number of bytes. The returned address MUST + //! be + // aligned to the rpmalloc span size, which will always be a power of two. + // Optionally the function can store an alignment offset in the offset + // variable in case it performs alignment and the returned pointer is offset + // from the actual start of the memory region due to this alignment. The + // alignment offset will be passed to the memory unmap function. The + // alignment offset MUST NOT be larger than 65535 (storable in an uint16_t), + // if it is you must use natural alignment to shift it into 16 bits. If you + // set a memory_map function, you must also set a memory_unmap function or + // else the default implementation will be used for both. This function must + // be thread safe, it can be called by multiple threads simultaneously. + void *(*memory_map)(size_t size, size_t *offset); + //! Unmap the memory pages starting at address and spanning the given number + //! of bytes. + // If release is set to non-zero, the unmap is for an entire span range as + // returned by a previous call to memory_map and that the entire range should + // be released. The release argument holds the size of the entire span range. + // If release is set to 0, the unmap is a partial decommit of a subset of the + // mapped memory range. If you set a memory_unmap function, you must also set + // a memory_map function or else the default implementation will be used for + // both. This function must be thread safe, it can be called by multiple + // threads simultaneously. + void (*memory_unmap)(void *address, size_t size, size_t offset, + size_t release); + //! Called when an assert fails, if asserts are enabled. Will use the standard + //! assert() + // if this is not set. + void (*error_callback)(const char *message); + //! Called when a call to map memory pages fails (out of memory). If this + //! callback is + // not set or returns zero the library will return a null pointer in the + // allocation call. If this callback returns non-zero the map call will be + // retried. The argument passed is the number of bytes that was requested in + // the map call. Only used if the default system memory map function is used + // (memory_map callback is not set). + int (*map_fail_callback)(size_t size); + //! Size of memory pages. The page size MUST be a power of two. All memory + //! mapping + // requests to memory_map will be made with size set to a multiple of the + // page size. Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system + // page size is used. + size_t page_size; + //! Size of a span of memory blocks. MUST be a power of two, and in + //! [4096,262144] + // range (unless 0 - set to 0 to use the default span size). Used if + // RPMALLOC_CONFIGURABLE is defined to 1. + size_t span_size; + //! Number of spans to map at each request to map new virtual memory blocks. + //! This can + // be used to minimize the system call overhead at the cost of virtual memory + // address space. The extra mapped pages will not be written until actually + // used, so physical committed memory should not be affected in the default + // implementation. Will be aligned to a multiple of spans that match memory + // page size in case of huge pages. + size_t span_map_count; + //! Enable use of large/huge pages. If this flag is set to non-zero and page + //! size is + // zero, the allocator will try to enable huge pages and auto detect the + // configuration. If this is set to non-zero and page_size is also non-zero, + // the allocator will assume huge pages have been configured and enabled + // prior to initializing the allocator. For Windows, see + // https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support + // For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt + int enable_huge_pages; + //! Respectively allocated pages and huge allocated pages names for systems + // supporting it to be able to distinguish among anonymous regions. + const char *page_name; + const char *huge_page_name; +} rpmalloc_config_t; + +//! Initialize allocator with default configuration +RPMALLOC_EXPORT int rpmalloc_initialize(void); + +//! Initialize allocator with given configuration +RPMALLOC_EXPORT int rpmalloc_initialize_config(const rpmalloc_config_t *config); + +//! Get allocator configuration +RPMALLOC_EXPORT const rpmalloc_config_t *rpmalloc_config(void); + +//! Finalize allocator +RPMALLOC_EXPORT void rpmalloc_finalize(void); + +//! Initialize allocator for calling thread +RPMALLOC_EXPORT void rpmalloc_thread_initialize(void); + +//! Finalize allocator for calling thread +RPMALLOC_EXPORT void rpmalloc_thread_finalize(int release_caches); + +//! Perform deferred deallocations pending for the calling thread heap +RPMALLOC_EXPORT void rpmalloc_thread_collect(void); + +//! Query if allocator is initialized for calling thread +RPMALLOC_EXPORT int rpmalloc_is_thread_initialized(void); + +//! Get per-thread statistics +RPMALLOC_EXPORT void +rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats); + +//! Get global statistics +RPMALLOC_EXPORT void +rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats); + +//! Dump all statistics in human readable format to file (should be a FILE*) +RPMALLOC_EXPORT void rpmalloc_dump_statistics(void *file); + +//! Allocate a memory block of at least the given size +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1); + +//! Free the given memory block +RPMALLOC_EXPORT void rpfree(void *ptr); + +//! Allocate a memory block of at least the given size and zero initialize it +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2); + +//! Reallocate the given block to at least the given size +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Reallocate the given block to at least the given size and alignment, +// with optional control flags (see RPMALLOC_NO_PRESERVE). +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, + unsigned int flags) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(3); + +//! Allocate a memory block of at least the given size and alignment. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Allocate a memory block of at least the given size and alignment, and zero +//! initialize it. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpaligned_calloc(size_t alignment, size_t num, + size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); + +//! Allocate a memory block of at least the given size and alignment. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Allocate a memory block of at least the given size and alignment. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT int rpposix_memalign(void **memptr, size_t alignment, + size_t size); + +//! Query the usable size of the given memory block (from given pointer to the +//! end of block) +RPMALLOC_EXPORT size_t rpmalloc_usable_size(void *ptr); + +//! Dummy empty function for forcing linker symbol inclusion +RPMALLOC_EXPORT void rpmalloc_linker_reference(void); + +#if RPMALLOC_FIRST_CLASS_HEAPS + +//! Heap type +typedef struct heap_t rpmalloc_heap_t; + +//! Acquire a new heap. Will reuse existing released heaps or allocate memory +//! for a new heap +// if none available. Heap API is implemented with the strict assumption that +// only one single thread will call heap functions for a given heap at any +// given time, no functions are thread safe. +RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_heap_acquire(void); + +//! Release a heap (does NOT free the memory allocated by the heap, use +//! rpmalloc_heap_free_all before destroying the heap). +// Releasing a heap will enable it to be reused by other threads. Safe to pass +// a null pointer. +RPMALLOC_EXPORT void rpmalloc_heap_release(rpmalloc_heap_t *heap); + +//! Allocate a memory block of at least the given size using the given heap. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Allocate a memory block of at least the given size using the given heap. The +//! returned +// block will have the requested alignment. Alignment must be a power of two +// and a multiple of sizeof(void*), and should ideally be less than memory page +// size. A caveat of rpmalloc internals is that this must also be strictly less +// than the span size (default 64KiB). +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment, + size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(3); + +//! Allocate a memory block of at least the given size using the given heap and +//! zero initialize it. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, + size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); + +//! Allocate a memory block of at least the given size using the given heap and +//! zero initialize it. The returned +// block will have the requested alignment. Alignment must either be zero, or a +// power of two and a multiple of sizeof(void*), and should ideally be less +// than memory page size. A caveat of rpmalloc internals is that this must also +// be strictly less than the span size (default 64KiB). +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment, + size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); + +//! Reallocate the given block to at least the given size. The memory block MUST +//! be allocated +// by the same heap given to this function. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size, + unsigned int flags) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(3); + +//! Reallocate the given block to at least the given size. The memory block MUST +//! be allocated +// by the same heap given to this function. The returned block will have the +// requested alignment. Alignment must be either zero, or a power of two and a +// multiple of sizeof(void*), and should ideally be less than memory page size. +// A caveat of rpmalloc internals is that this must also be strictly less than +// the span size (default 64KiB). +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *rpmalloc_heap_aligned_realloc( + rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size, + unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4); + +//! Free the given memory block from the given heap. The memory block MUST be +//! allocated +// by the same heap given to this function. +RPMALLOC_EXPORT void rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr); + +//! Free all memory allocated by the heap +RPMALLOC_EXPORT void rpmalloc_heap_free_all(rpmalloc_heap_t *heap); + +//! Set the given heap as the current heap for the calling thread. A heap MUST +//! only be current heap +// for a single thread, a heap can never be shared between multiple threads. +// The previous current heap for the calling thread is released to be reused by +// other threads. +RPMALLOC_EXPORT void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap); + +//! Returns which heap the given pointer is allocated on +RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(void *ptr); + +#endif + +#ifdef __cplusplus +} +#endif diff --git a/llvm/lib/Support/rpmalloc/rpnew.h b/llvm/lib/Support/rpmalloc/rpnew.h index d8303c6f95652..a18f0799d56d1 100644 --- a/llvm/lib/Support/rpmalloc/rpnew.h +++ b/llvm/lib/Support/rpmalloc/rpnew.h @@ -1,113 +1,113 @@ -//===-------------------------- rpnew.h -----------------*- C -*-=============// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This library provides a cross-platform lock free thread caching malloc -// implementation in C11. -// -//===----------------------------------------------------------------------===// - -#ifdef __cplusplus - -#include -#include - -#ifndef __CRTDECL -#define __CRTDECL -#endif - -extern void __CRTDECL operator delete(void *p) noexcept { rpfree(p); } - -extern void __CRTDECL operator delete[](void *p) noexcept { rpfree(p); } - -extern void *__CRTDECL operator new(std::size_t size) noexcept(false) { - return rpmalloc(size); -} - -extern void *__CRTDECL operator new[](std::size_t size) noexcept(false) { - return rpmalloc(size); -} - -extern void *__CRTDECL operator new(std::size_t size, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpmalloc(size); -} - -extern void *__CRTDECL operator new[](std::size_t size, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpmalloc(size); -} - -#if (__cplusplus >= 201402L || _MSC_VER >= 1916) - -extern void __CRTDECL operator delete(void *p, std::size_t size) noexcept { - (void)sizeof(size); - rpfree(p); -} - -extern void __CRTDECL operator delete[](void *p, std::size_t size) noexcept { - (void)sizeof(size); - rpfree(p); -} - -#endif - -#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) - -extern void __CRTDECL operator delete(void *p, - std::align_val_t align) noexcept { - (void)sizeof(align); - rpfree(p); -} - -extern void __CRTDECL operator delete[](void *p, - std::align_val_t align) noexcept { - (void)sizeof(align); - rpfree(p); -} - -extern void __CRTDECL operator delete(void *p, std::size_t size, - std::align_val_t align) noexcept { - (void)sizeof(size); - (void)sizeof(align); - rpfree(p); -} - -extern void __CRTDECL operator delete[](void *p, std::size_t size, - std::align_val_t align) noexcept { - (void)sizeof(size); - (void)sizeof(align); - rpfree(p); -} - -extern void *__CRTDECL operator new(std::size_t size, - std::align_val_t align) noexcept(false) { - return rpaligned_alloc(static_cast(align), size); -} - -extern void *__CRTDECL operator new[](std::size_t size, - std::align_val_t align) noexcept(false) { - return rpaligned_alloc(static_cast(align), size); -} - -extern void *__CRTDECL operator new(std::size_t size, std::align_val_t align, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpaligned_alloc(static_cast(align), size); -} - -extern void *__CRTDECL operator new[](std::size_t size, std::align_val_t align, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpaligned_alloc(static_cast(align), size); -} - -#endif - -#endif +//===-------------------------- rpnew.h -----------------*- C -*-=============// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This library provides a cross-platform lock free thread caching malloc +// implementation in C11. +// +//===----------------------------------------------------------------------===// + +#ifdef __cplusplus + +#include +#include + +#ifndef __CRTDECL +#define __CRTDECL +#endif + +extern void __CRTDECL operator delete(void *p) noexcept { rpfree(p); } + +extern void __CRTDECL operator delete[](void *p) noexcept { rpfree(p); } + +extern void *__CRTDECL operator new(std::size_t size) noexcept(false) { + return rpmalloc(size); +} + +extern void *__CRTDECL operator new[](std::size_t size) noexcept(false) { + return rpmalloc(size); +} + +extern void *__CRTDECL operator new(std::size_t size, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpmalloc(size); +} + +extern void *__CRTDECL operator new[](std::size_t size, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpmalloc(size); +} + +#if (__cplusplus >= 201402L || _MSC_VER >= 1916) + +extern void __CRTDECL operator delete(void *p, std::size_t size) noexcept { + (void)sizeof(size); + rpfree(p); +} + +extern void __CRTDECL operator delete[](void *p, std::size_t size) noexcept { + (void)sizeof(size); + rpfree(p); +} + +#endif + +#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) + +extern void __CRTDECL operator delete(void *p, + std::align_val_t align) noexcept { + (void)sizeof(align); + rpfree(p); +} + +extern void __CRTDECL operator delete[](void *p, + std::align_val_t align) noexcept { + (void)sizeof(align); + rpfree(p); +} + +extern void __CRTDECL operator delete(void *p, std::size_t size, + std::align_val_t align) noexcept { + (void)sizeof(size); + (void)sizeof(align); + rpfree(p); +} + +extern void __CRTDECL operator delete[](void *p, std::size_t size, + std::align_val_t align) noexcept { + (void)sizeof(size); + (void)sizeof(align); + rpfree(p); +} + +extern void *__CRTDECL operator new(std::size_t size, + std::align_val_t align) noexcept(false) { + return rpaligned_alloc(static_cast(align), size); +} + +extern void *__CRTDECL operator new[](std::size_t size, + std::align_val_t align) noexcept(false) { + return rpaligned_alloc(static_cast(align), size); +} + +extern void *__CRTDECL operator new(std::size_t size, std::align_val_t align, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpaligned_alloc(static_cast(align), size); +} + +extern void *__CRTDECL operator new[](std::size_t size, std::align_val_t align, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpaligned_alloc(static_cast(align), size); +} + +#endif + +#endif diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index 79655e1c9529c..0f4bbfc3d610e 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -1610,7 +1610,8 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, int BaseOffset = -AFI->getTaggedBasePointerOffset(); Register FrameReg; StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference( - MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg, + MF, BaseOffset, false /*isFixed*/, TargetStackID::Default /*StackID*/, + FrameReg, /*PreferFP=*/false, /*ForSimm=*/true); Register SrcReg = FrameReg; diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index ab5c6f3c0a19d..8d6eb91d74375 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -56,15 +56,20 @@ // | async context if needed | // | (a.k.a. "frame record") | // |-----------------------------------| <- fp(=x29) -// | | -// |-----------------------------------| -// | | -// | callee-saved fp/simd/SVE regs | -// | | -// |-----------------------------------| -// | | -// | SVE stack objects | -// | | +// Default SVE stack layout Split SVE objects +// (aarch64-split-sve-objects=false) (aarch64-split-sve-objects=true) +// |-----------------------------------| |-----------------------------------| +// | | | callee-saved PPR registers | +// |-----------------------------------| |-----------------------------------| +// | | | PPR stack objects | +// | callee-saved fp/simd/SVE regs | |-----------------------------------| +// | | | | +// |-----------------------------------| |-----------------------------------| +// | | | callee-saved ZPR/FPR registers | +// | SVE stack objects | |-----------------------------------| +// | | | ZPR stack objects | +// |-----------------------------------| |-----------------------------------| +// ^ NB: FPR CSRs are promoted to ZPRs // |-----------------------------------| // |.empty.space.to.make.part.below....| // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at @@ -274,6 +279,11 @@ static cl::opt OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden); +static cl::opt + SplitSVEObjects("aarch64-split-sve-objects", + cl::desc("Split allocation of ZPR & PPR objects"), + cl::init(false), cl::Hidden); + cl::opt EnableHomogeneousPrologEpilog( "homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " @@ -324,7 +334,41 @@ AArch64FrameLowering::getArgumentStackToRestore(MachineFunction &MF, static bool produceCompactUnwindFrame(const AArch64FrameLowering &, MachineFunction &MF); -// Conservatively, returns true if the function is likely to have an SVE vectors +enum class AssignObjectOffsets { No, Yes }; +/// Process all the SVE stack objects and the SVE stack size and offsets for +/// each object. If AssignOffsets is "Yes", the offsets get assigned (and SVE +/// stack sizes set). Returns the size of the SVE stack. +static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, + AssignObjectOffsets AssignOffsets); + +static unsigned getStackHazardSize(const MachineFunction &MF) { + return MF.getSubtarget().getStreamingHazardSize(); +} + +/// Returns true if PPRs are spilled as ZPRs. +static bool arePPRsSpilledAsZPR(const MachineFunction &MF) { + return MF.getSubtarget().getRegisterInfo()->getSpillSize( + AArch64::PPRRegClass) == 16; +} + +StackOffset +AArch64FrameLowering::getZPRStackSize(const MachineFunction &MF) const { + const AArch64FunctionInfo *AFI = MF.getInfo(); + return StackOffset::getScalable(AFI->getStackSizeZPR()); +} + +StackOffset +AArch64FrameLowering::getPPRStackSize(const MachineFunction &MF) const { + // With split SVE objects, the hazard padding is added to the PPR region, + // which places it between the [GPR, PPR] area and the [ZPR, FPR] area. This + // avoids hazards between both GPRs and FPRs and ZPRs and PPRs. + const AArch64FunctionInfo *AFI = MF.getInfo(); + return StackOffset::get(AFI->hasSplitSVEObjects() ? getStackHazardSize(MF) + : 0, + AFI->getStackSizePPR()); +} + +// Conservatively, returns true if the function is likely to have SVE vectors // on the stack. This function is safe to be called before callee-saves or // object offsets have been determined. static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL, @@ -338,7 +382,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL, const MachineFrameInfo &MFI = MF.getFrameInfo(); for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) { - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) + if (MFI.hasScalableStackID(FI)) return true; } @@ -482,13 +526,6 @@ AArch64FrameLowering::getFixedObjectSize(const MachineFunction &MF, } } -/// Returns the size of the entire SVE stackframe (calleesaves + spills). -StackOffset -AArch64FrameLowering::getSVEStackSize(const MachineFunction &MF) const { - const AArch64FunctionInfo *AFI = MF.getInfo(); - return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE()); -} - bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { if (!EnableRedZone) return false; @@ -514,7 +551,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { !Subtarget.hasSVE(); return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize || - getSVEStackSize(MF) || LowerQRegCopyThroughMem); + AFI->hasSVEStackSize() || LowerQRegCopyThroughMem); } /// hasFPImpl - Return true if the specified function should have a dedicated @@ -557,7 +594,7 @@ bool AArch64FrameLowering::hasFPImpl(const MachineFunction &MF) const { // CFA in either of these cases. if (AFI.needsDwarfUnwindInfo(MF) && ((requiresSaveVG(MF) || AFI.getSMEFnAttrs().hasStreamingBody()) && - (!AFI.hasCalculatedStackSizeSVE() || AFI.getStackSizeSVE() > 0))) + (!AFI.hasCalculatedStackSizeSVE() || AFI.hasSVEStackSize()))) return true; // With large callframes around we may need to use FP to access the scavenging // emergency spillslot. @@ -1126,10 +1163,6 @@ static bool isTargetWindows(const MachineFunction &MF) { return MF.getSubtarget().isTargetWindows(); } -static unsigned getStackHazardSize(const MachineFunction &MF) { - return MF.getSubtarget().getStreamingHazardSize(); -} - void AArch64FrameLowering::emitPacRetPlusLeafHardening( MachineFunction &MF) const { const AArch64Subtarget &Subtarget = MF.getSubtarget(); @@ -1212,7 +1245,9 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, const auto &MFI = MF.getFrameInfo(); int64_t ObjectOffset = MFI.getObjectOffset(FI); - StackOffset SVEStackSize = getSVEStackSize(MF); + StackOffset ZPRStackSize = getZPRStackSize(MF); + StackOffset PPRStackSize = getPPRStackSize(MF); + StackOffset SVEStackSize = ZPRStackSize + PPRStackSize; // For VLA-area objects, just emit an offset at the end of the stack frame. // Whilst not quite correct, these objects do live at the end of the frame and @@ -1228,11 +1263,21 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, const auto *AFI = MF.getInfo(); bool FPAfterSVECalleeSaves = isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize(); - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) { + if (MFI.hasScalableStackID(FI)) { if (FPAfterSVECalleeSaves && - -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) + -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) { + assert(!AFI->hasSplitSVEObjects() && + "split-sve-objects not supported with FPAfterSVECalleeSaves"); return StackOffset::getScalable(ObjectOffset); - return StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()), + } + StackOffset AccessOffset{}; + // The scalable vectors are below (lower address) the scalable predicates + // with split SVE objects, so we must subtract the size of the predicates. + if (AFI->hasSplitSVEObjects() && + MFI.getStackID(FI) == TargetStackID::ScalableVector) + AccessOffset = -PPRStackSize; + return AccessOffset + + StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()), ObjectOffset); } @@ -1294,14 +1339,15 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference( const auto &MFI = MF.getFrameInfo(); int64_t ObjectOffset = MFI.getObjectOffset(FI); bool isFixed = MFI.isFixedObjectIndex(FI); - bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector; - return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, - PreferFP, ForSimm); + auto StackID = static_cast(MFI.getStackID(FI)); + return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, StackID, + FrameReg, PreferFP, ForSimm); } StackOffset AArch64FrameLowering::resolveFrameOffsetReference( - const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, - Register &FrameReg, bool PreferFP, bool ForSimm) const { + const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, + TargetStackID::Value StackID, Register &FrameReg, bool PreferFP, + bool ForSimm) const { const auto &MFI = MF.getFrameInfo(); const auto *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); @@ -1312,8 +1358,11 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed(); bool isCSR = !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI)); + bool isSVE = MFI.isScalableStackID(StackID); - const StackOffset &SVEStackSize = getSVEStackSize(MF); + StackOffset ZPRStackSize = getZPRStackSize(MF); + StackOffset PPRStackSize = getPPRStackSize(MF); + StackOffset SVEStackSize = ZPRStackSize + PPRStackSize; // Use frame pointer to reference fixed objects. Use it for locals if // there are VLAs or a dynamically realigned SP (and thus the SP isn't @@ -1388,12 +1437,25 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize(); if (isSVE) { - StackOffset FPOffset = - StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); + StackOffset FPOffset = StackOffset::get( + -AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); StackOffset SPOffset = SVEStackSize + StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(), ObjectOffset); + + // With split SVE objects the ObjectOffset is relative to the split area + // (i.e. the PPR area or ZPR area respectively). + if (AFI->hasSplitSVEObjects() && StackID == TargetStackID::ScalableVector) { + // If we're accessing an SVE vector with split SVE objects... + // - From the FP we need to move down past the PPR area: + FPOffset -= PPRStackSize; + // - From the SP we only need to move up to the ZPR area: + SPOffset -= PPRStackSize; + // Note: `SPOffset = SVEStackSize + ...`, so `-= PPRStackSize` results in + // `SPOffset = ZPRStackSize + ...`. + } + if (FPAfterSVECalleeSaves) { FPOffset += StackOffset::getScalable(AFI->getSVECalleeSavedStackSize()); if (-ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) { @@ -1401,6 +1463,7 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( SPOffset += StackOffset::getFixed(AFI->getCalleeSavedStackSize()); } } + // Always use the FP for SVE spills if available and beneficial. if (hasFP(MF) && (SPOffset.getFixed() || FPOffset.getScalable() < SPOffset.getScalable() || @@ -1408,13 +1471,13 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( FrameReg = RegInfo->getFrameRegister(MF); return FPOffset; } - FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() : (unsigned)AArch64::SP; + return SPOffset; } - StackOffset ScalableOffset = {}; + StackOffset SVEAreaOffset = {}; if (FPAfterSVECalleeSaves) { // In this stack layout, the FP is in between the callee saves and other // SVE allocations. @@ -1422,25 +1485,25 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( StackOffset::getScalable(AFI->getSVECalleeSavedStackSize()); if (UseFP) { if (isFixed) - ScalableOffset = SVECalleeSavedStack; + SVEAreaOffset = SVECalleeSavedStack; else if (!isCSR) - ScalableOffset = SVECalleeSavedStack - SVEStackSize; + SVEAreaOffset = SVECalleeSavedStack - SVEStackSize; } else { if (isFixed) - ScalableOffset = SVEStackSize; + SVEAreaOffset = SVEStackSize; else if (isCSR) - ScalableOffset = SVEStackSize - SVECalleeSavedStack; + SVEAreaOffset = SVEStackSize - SVECalleeSavedStack; } } else { if (UseFP && !(isFixed || isCSR)) - ScalableOffset = -SVEStackSize; + SVEAreaOffset = -SVEStackSize; if (!UseFP && (isFixed || isCSR)) - ScalableOffset = SVEStackSize; + SVEAreaOffset = SVEStackSize; } if (UseFP) { FrameReg = RegInfo->getFrameRegister(MF); - return StackOffset::getFixed(FPOffset) + ScalableOffset; + return StackOffset::getFixed(FPOffset) + SVEAreaOffset; } // Use the base pointer if we have one. @@ -1457,7 +1520,7 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( Offset -= AFI->getLocalStackSize(); } - return StackOffset::getFixed(Offset) + ScalableOffset; + return StackOffset::getFixed(Offset) + SVEAreaOffset; } static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { @@ -1614,11 +1677,25 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, RegInc = -1; FirstReg = Count - 1; } + bool FPAfterSVECalleeSaves = IsWindows && AFI->getSVECalleeSavedStackSize(); - int ScalableByteOffset = - FPAfterSVECalleeSaves ? 0 : AFI->getSVECalleeSavedStackSize(); + + int ZPRByteOffset = 0; + int PPRByteOffset = 0; + bool SplitPPRs = AFI->hasSplitSVEObjects(); + if (SplitPPRs) { + ZPRByteOffset = AFI->getZPRCalleeSavedStackSize(); + PPRByteOffset = AFI->getPPRCalleeSavedStackSize(); + } else if (!FPAfterSVECalleeSaves) { + ZPRByteOffset = + AFI->getZPRCalleeSavedStackSize() + AFI->getPPRCalleeSavedStackSize(); + // Unused: Everything goes in ZPR space. + PPRByteOffset = 0; + } + bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace(); Register LastReg = 0; + bool HasCSHazardPadding = AFI->hasStackHazardSlotIndex() && !SplitPPRs; // When iterating backwards, the loop condition relies on unsigned wraparound. for (unsigned i = FirstReg; i < Count; i += RegInc) { @@ -1647,8 +1724,12 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, llvm_unreachable("Unsupported register class."); } + int &ScalableByteOffset = RPI.Type == RegPairInfo::PPR && SplitPPRs + ? PPRByteOffset + : ZPRByteOffset; + // Add the stack hazard size as we transition from GPR->FPR CSRs. - if (AFI->hasStackHazardSlotIndex() && + if (HasCSHazardPadding && (!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) && AArch64InstrInfo::isFpOrNEON(RPI.Reg1)) ByteOffset += StackFillDir * StackHazardSize; @@ -1656,7 +1737,7 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, int Scale = TRI->getSpillSize(*RPI.RC); // Add the next reg to the pair if it is in the same register class. - if (unsigned(i + RegInc) < Count && !AFI->hasStackHazardSlotIndex()) { + if (unsigned(i + RegInc) < Count && !HasCSHazardPadding) { MCRegister NextReg = CSI[i + RegInc].getReg(); bool IsFirst = i == FirstReg; switch (RPI.Type) { @@ -2021,10 +2102,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters( } // Update the StackIDs of the SVE stack slots. MachineFrameInfo &MFI = MF.getFrameInfo(); - if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) { + if (RPI.Type == RegPairInfo::ZPR) { MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector); if (RPI.isPaired()) MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector); + } else if (RPI.Type == RegPairInfo::PPR) { + MFI.setStackID(FrameIdxReg1, TargetStackID::ScalablePredicateVector); + if (RPI.isPaired()) + MFI.setStackID(FrameIdxReg2, TargetStackID::ScalablePredicateVector); } } return true; @@ -2199,6 +2284,13 @@ static std::optional getLdStFrameID(const MachineInstr &MI, return getMMOFrameID(*MI.memoperands_begin(), MFI); } +// Returns true if the LDST MachineInstr \p MI is a PPR access. +static bool isPPRAccess(const MachineInstr &MI) { + return MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && + MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO && + AArch64::PPRRegClass.contains(MI.getOperand(0).getReg()); +} + // Check if a Hazard slot is needed for the current function, and if so create // one for it. The index is stored in AArch64FunctionInfo->StackHazardSlotIndex, // which can be used to determine if any hazard padding is needed. @@ -2222,26 +2314,50 @@ void AArch64FrameLowering::determineStackHazardSlot( bool HasFPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) { return AArch64::FPR64RegClass.contains(Reg) || AArch64::FPR128RegClass.contains(Reg) || - AArch64::ZPRRegClass.contains(Reg) || - AArch64::PPRRegClass.contains(Reg); + AArch64::ZPRRegClass.contains(Reg); + }); + bool HasPPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) { + return AArch64::PPRRegClass.contains(Reg); }); bool HasFPRStackObjects = false; - if (!HasFPRCSRs) { - std::vector FrameObjects(MFI.getObjectIndexEnd()); + bool HasPPRStackObjects = false; + if (!HasFPRCSRs || SplitSVEObjects) { + enum SlotType : uint8_t { + Unknown = 0, + ZPRorFPR = 1 << 0, + PPR = 1 << 1, + GPR = 1 << 2, + LLVM_MARK_AS_BITMASK_ENUM(GPR) + }; + + // Find stack slots solely used for one kind of register (ZPR, PPR, etc.), + // based on the kinds of accesses used in the function. + SmallVector SlotTypes(MFI.getObjectIndexEnd(), SlotType::Unknown); for (auto &MBB : MF) { for (auto &MI : MBB) { std::optional FI = getLdStFrameID(MI, MFI); - if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { - if (MFI.getStackID(*FI) == TargetStackID::ScalableVector || - AArch64InstrInfo::isFpOrNEON(MI)) - FrameObjects[*FI] |= 2; - else - FrameObjects[*FI] |= 1; + if (!FI || FI < 0 || FI > int(SlotTypes.size())) + continue; + if (MFI.hasScalableStackID(*FI)) { + SlotTypes[*FI] |= + isPPRAccess(MI) ? SlotType::PPR : SlotType::ZPRorFPR; + } else { + SlotTypes[*FI] |= AArch64InstrInfo::isFpOrNEON(MI) + ? SlotType::ZPRorFPR + : SlotType::GPR; } } } - HasFPRStackObjects = - any_of(FrameObjects, [](unsigned B) { return (B & 3) == 2; }); + + for (int FI = 0; FI < int(SlotTypes.size()); ++FI) { + HasFPRStackObjects |= SlotTypes[FI] == SlotType::ZPRorFPR; + // For SplitSVEObjects remember that this stack slot is a predicate, this + // will be needed later when determining the frame layout. + if (SlotTypes[FI] == SlotType::PPR) { + MFI.setStackID(FI, TargetStackID::ScalablePredicateVector); + HasPPRStackObjects = true; + } + } } if (HasFPRCSRs || HasFPRStackObjects) { @@ -2250,6 +2366,78 @@ void AArch64FrameLowering::determineStackHazardSlot( << StackHazardSize << "\n"); AFI->setStackHazardSlotIndex(ID); } + + // Determine if we should use SplitSVEObjects. This should only be used if + // there's a possibility of a stack hazard between PPRs and ZPRs or FPRs. + if (SplitSVEObjects) { + if (!HasPPRCSRs && !HasPPRStackObjects) { + LLVM_DEBUG( + dbgs() << "Not using SplitSVEObjects as no PPRs are on the stack\n"); + return; + } + + if (!HasFPRCSRs && !HasFPRStackObjects) { + LLVM_DEBUG( + dbgs() + << "Not using SplitSVEObjects as no FPRs or ZPRs are on the stack\n"); + return; + } + + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (MFI.hasVarSizedObjects() || TRI->hasStackRealignment(MF)) { + LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with variable " + "sized objects or realignment\n"); + return; + } + + if (arePPRsSpilledAsZPR(MF)) { + LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with " + "-aarch64-enable-zpr-predicate-spills"); + return; + } + + // If another calling convention is explicitly set FPRs can't be promoted to + // ZPR callee-saves. + if (!is_contained({CallingConv::C, CallingConv::Fast, + CallingConv::AArch64_SVE_VectorCall}, + MF.getFunction().getCallingConv())) { + LLVM_DEBUG( + dbgs() << "Calling convention is not supported with SplitSVEObjects"); + return; + } + + [[maybe_unused]] const AArch64Subtarget &Subtarget = + MF.getSubtarget(); + assert(Subtarget.isSVEorStreamingSVEAvailable() && + "Expected SVE to be available for PPRs"); + + // With SplitSVEObjects the CS hazard padding is placed between the + // PPRs and ZPRs. If there are any FPR CS there would be a hazard between + // them and the CS GRPs. Avoid this by promoting all FPR CS to ZPRs. + BitVector FPRZRegs(SavedRegs.size()); + for (size_t Reg = 0, E = SavedRegs.size(); HasFPRCSRs && Reg < E; ++Reg) { + BitVector::reference RegBit = SavedRegs[Reg]; + if (!RegBit) + continue; + unsigned SubRegIdx = 0; + if (AArch64::FPR64RegClass.contains(Reg)) + SubRegIdx = AArch64::dsub; + else if (AArch64::FPR128RegClass.contains(Reg)) + SubRegIdx = AArch64::zsub; + else + continue; + // Clear the bit for the FPR save. + RegBit = false; + // Mark that we should save the corresponding ZPR. + Register ZReg = + TRI->getMatchingSuperReg(Reg, SubRegIdx, &AArch64::ZPRRegClass); + FPRZRegs.set(ZReg); + } + SavedRegs |= FPRZRegs; + + AFI->setSplitSVEObjects(true); + LLVM_DEBUG(dbgs() << "SplitSVEObjects enabled!\n"); + } } void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, @@ -2260,10 +2448,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; + const AArch64Subtarget &Subtarget = MF.getSubtarget(); + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); const AArch64RegisterInfo *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); - const AArch64Subtarget &Subtarget = MF.getSubtarget(); AArch64FunctionInfo *AFI = MF.getInfo(); unsigned UnspilledCSGPR = AArch64::NoRegister; unsigned UnspilledCSGPRPaired = AArch64::NoRegister; @@ -2382,17 +2571,26 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, SavedRegs.set(AArch64::X18); } + // Determine if a Hazard slot should be used and where it should go. + // If SplitSVEObjects is used, the hazard padding is placed between the PPRs + // and ZPRs. Otherwise, it goes in the callee save area. + determineStackHazardSlot(MF, SavedRegs); + // Calculates the callee saved stack size. unsigned CSStackSize = 0; - unsigned SVECSStackSize = 0; + unsigned ZPRCSStackSize = 0; + unsigned PPRCSStackSize = 0; const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); for (unsigned Reg : SavedRegs.set_bits()) { auto *RC = TRI->getMinimalPhysRegClass(Reg); assert(RC && "expected register class!"); auto SpillSize = TRI->getSpillSize(*RC); - if (AArch64::PPRRegClass.contains(Reg) || - AArch64::ZPRRegClass.contains(Reg)) - SVECSStackSize += SpillSize; + bool IsZPR = AArch64::ZPRRegClass.contains(Reg); + bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg); + if (IsZPR || (IsPPR && arePPRsSpilledAsZPR(MF))) + ZPRCSStackSize += SpillSize; + else if (IsPPR) + PPRCSStackSize += SpillSize; else CSStackSize += SpillSize; } @@ -2402,17 +2600,15 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, // only 64-bit GPRs can be added to SavedRegs. unsigned NumSavedRegs = SavedRegs.count(); + // If we have hazard padding in the CS area add that to the size. + if (AFI->isStackHazardIncludedInCalleeSaveArea()) + CSStackSize += getStackHazardSize(MF); + // Increase the callee-saved stack size if the function has streaming mode // changes, as we will need to spill the value of the VG register. if (requiresSaveVG(MF)) CSStackSize += 8; - // Determine if a Hazard slot should be used, and increase the CSStackSize by - // StackHazardSize if so. - determineStackHazardSlot(MF, SavedRegs); - if (AFI->hasStackHazardSlotIndex()) - CSStackSize += getStackHazardSize(MF); - // If we must call __arm_get_current_vg in the prologue preserve the LR. if (requiresSaveVG(MF) && !Subtarget.hasSVE()) SavedRegs.set(AArch64::LR); @@ -2433,8 +2629,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, }); // If any callee-saved registers are used, the frame cannot be eliminated. - int64_t SVEStackSize = - alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16); + auto [ZPRLocalStackSize, PPRLocalStackSize] = + determineSVEStackSizes(MF, AssignObjectOffsets::No); + uint64_t SVELocals = ZPRLocalStackSize + PPRLocalStackSize; + uint64_t SVEStackSize = + alignTo(ZPRCSStackSize + PPRCSStackSize + SVELocals, 16); bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize; // The CSR spill slots have not been allocated yet, so estimateStackSize @@ -2519,7 +2718,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, // instructions. AFI->setCalleeSavedStackSize(AlignedCSStackSize); AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); - AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16)); + AFI->setSVECalleeSavedStackSize(ZPRCSStackSize, alignTo(PPRCSStackSize, 16)); } bool AArch64FrameLowering::assignCalleeSavedSpillSlots( @@ -2572,7 +2771,7 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots( const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); // Create a hazard slot as we switch between GPR and FPR CSRs. - if (AFI->hasStackHazardSlotIndex() && + if (AFI->isStackHazardIncludedInCalleeSaveArea() && (!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) && AArch64InstrInfo::isFpOrNEON(Reg)) { assert(HazardSlotIndex == std::numeric_limits::max() && @@ -2611,7 +2810,7 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots( } // Add hazard slot in the case where no FPR CSRs are present. - if (AFI->hasStackHazardSlotIndex() && + if (AFI->isStackHazardIncludedInCalleeSaveArea() && HazardSlotIndex == std::numeric_limits::max()) { HazardSlotIndex = MFI.CreateStackObject(StackHazardSize, Align(8), true); LLVM_DEBUG(dbgs() << "Created CSR Hazard at slot " << HazardSlotIndex @@ -2658,7 +2857,6 @@ static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, assert((Max == std::numeric_limits::min() || Max + 1 == CS.getFrameIdx()) && "SVE CalleeSaves are not consecutive"); - Min = std::min(Min, CS.getFrameIdx()); Max = std::max(Max, CS.getFrameIdx()); } @@ -2666,43 +2864,64 @@ static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, return Min != std::numeric_limits::max(); } -// Process all the SVE stack objects and determine offsets for each -// object. If AssignOffsets is true, the offsets get assigned. -// Fills in the first and last callee-saved frame indices into -// Min/MaxCSFrameIndex, respectively. -// Returns the size of the stack. -static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, - int &MinCSFrameIndex, - int &MaxCSFrameIndex, - bool AssignOffsets) { +static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, + AssignObjectOffsets AssignOffsets) { + MachineFrameInfo &MFI = MF.getFrameInfo(); + auto *AFI = MF.getInfo(); + + SVEStackSizes SVEStack{}; + + // With SplitSVEObjects we maintain separate stack offsets for predicates + // (PPRs) and SVE vectors (ZPRs). When SplitSVEObjects is disabled predicates + // are included in the SVE vector area. + uint64_t &ZPRStackTop = SVEStack.ZPRStackSize; + uint64_t &PPRStackTop = + AFI->hasSplitSVEObjects() ? SVEStack.PPRStackSize : SVEStack.ZPRStackSize; + #ifndef NDEBUG // First process all fixed stack objects. for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) - assert(MFI.getStackID(I) != TargetStackID::ScalableVector && + assert(!MFI.hasScalableStackID(I) && "SVE vectors should never be passed on the stack by value, only by " "reference."); #endif - auto Assign = [&MFI](int FI, int64_t Offset) { + auto AllocateObject = [&](int FI) { + uint64_t &StackTop = MFI.getStackID(FI) == TargetStackID::ScalableVector + ? ZPRStackTop + : PPRStackTop; + + // FIXME: Given that the length of SVE vectors is not necessarily a power of + // two, we'd need to align every object dynamically at runtime if the + // alignment is larger than 16. This is not yet supported. + Align Alignment = MFI.getObjectAlign(FI); + if (Alignment > Align(16)) + report_fatal_error( + "Alignment of scalable vectors > 16 bytes is not yet supported"); + + StackTop += MFI.getObjectSize(FI); + StackTop = alignTo(StackTop, Alignment); + + assert(StackTop < std::numeric_limits::max() && + "SVE StackTop far too large?!"); + + int64_t Offset = -int64_t(StackTop); + if (AssignOffsets == AssignObjectOffsets::Yes) + MFI.setObjectOffset(FI, Offset); + LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n"); - MFI.setObjectOffset(FI, Offset); }; - int64_t Offset = 0; - // Then process all callee saved slots. + int MinCSFrameIndex, MaxCSFrameIndex; if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) { - // Assign offsets to the callee save slots. - for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) { - Offset += MFI.getObjectSize(I); - Offset = alignTo(Offset, MFI.getObjectAlign(I)); - if (AssignOffsets) - Assign(I, -Offset); - } + for (int FI = MinCSFrameIndex; FI <= MaxCSFrameIndex; ++FI) + AllocateObject(FI); } - // Ensure that the Callee-save area is aligned to 16bytes. - Offset = alignTo(Offset, Align(16U)); + // Ensure the CS area is 16-byte aligned. + PPRStackTop = alignTo(PPRStackTop, Align(16U)); + ZPRStackTop = alignTo(ZPRStackTop, Align(16U)); // Create a buffer of SVE objects to allocate and sort it. SmallVector ObjectsToAllocate; @@ -2715,48 +2934,31 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector) ObjectsToAllocate.push_back(StackProtectorFI); } - for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { - unsigned StackID = MFI.getStackID(I); - if (StackID != TargetStackID::ScalableVector) - continue; - if (I == StackProtectorFI) + + for (int FI = 0, E = MFI.getObjectIndexEnd(); FI != E; ++FI) { + if (FI == StackProtectorFI || MFI.isDeadObjectIndex(FI)) continue; - if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex) + if (MaxCSFrameIndex >= FI && FI >= MinCSFrameIndex) continue; - if (MFI.isDeadObjectIndex(I)) + + if (MFI.getStackID(FI) != TargetStackID::ScalableVector && + MFI.getStackID(FI) != TargetStackID::ScalablePredicateVector) continue; - ObjectsToAllocate.push_back(I); + ObjectsToAllocate.push_back(FI); } // Allocate all SVE locals and spills - for (unsigned FI : ObjectsToAllocate) { - Align Alignment = MFI.getObjectAlign(FI); - // FIXME: Given that the length of SVE vectors is not necessarily a power of - // two, we'd need to align every object dynamically at runtime if the - // alignment is larger than 16. This is not yet supported. - if (Alignment > Align(16)) - report_fatal_error( - "Alignment of scalable vectors > 16 bytes is not yet supported"); - - Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment); - if (AssignOffsets) - Assign(FI, -Offset); - } + for (unsigned FI : ObjectsToAllocate) + AllocateObject(FI); - return Offset; -} + PPRStackTop = alignTo(PPRStackTop, Align(16U)); + ZPRStackTop = alignTo(ZPRStackTop, Align(16U)); -int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets( - MachineFrameInfo &MFI) const { - int MinCSFrameIndex, MaxCSFrameIndex; - return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false); -} + if (AssignOffsets == AssignObjectOffsets::Yes) + AFI->setStackSizeSVE(SVEStack.ZPRStackSize, SVEStack.PPRStackSize); -int64_t AArch64FrameLowering::assignSVEStackObjectOffsets( - MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const { - return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, - true); + return SVEStack; } /// Attempts to scavenge a register from \p ScavengeableRegs given the used @@ -3070,12 +3272,7 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized( assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && "Upwards growing stack unsupported"); - int MinCSFrameIndex, MaxCSFrameIndex; - int64_t SVEStackSize = - assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex); - - AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U)); - AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex); + (void)determineSVEStackSizes(MF, AssignObjectOffsets::Yes); // If this function isn't doing Win64-style C++ EH, we don't need to do // anything. @@ -3359,7 +3556,8 @@ void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI, Register Reg; FrameRegOffset = TFI->resolveFrameOffsetReference( - *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg, + *MF, FirstTagStore.Offset, false /*isFixed*/, + TargetStackID::Default /*StackID*/, Reg, /*PreferFP=*/false, /*ForSimm=*/true); FrameReg = Reg; FrameRegUpdate = std::nullopt; @@ -3597,7 +3795,7 @@ StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP( // Go to common code if we cannot provide sp + offset. if (MFI.hasVarSizedObjects() || - MF.getInfo()->getStackSizeSVE() || + MF.getInfo()->hasSVEStackSize() || MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF)) return getFrameIndexReference(MF, FI, FrameReg); @@ -3699,10 +3897,12 @@ bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) { void AArch64FrameLowering::orderFrameObjects( const MachineFunction &MF, SmallVectorImpl &ObjectsToAllocate) const { - if (!OrderFrameObjects || ObjectsToAllocate.empty()) + const AArch64FunctionInfo &AFI = *MF.getInfo(); + + if ((!OrderFrameObjects && !AFI.hasSplitSVEObjects()) || + ObjectsToAllocate.empty()) return; - const AArch64FunctionInfo &AFI = *MF.getInfo(); const MachineFrameInfo &MFI = MF.getFrameInfo(); std::vector FrameObjects(MFI.getObjectIndexEnd()); for (auto &Obj : ObjectsToAllocate) { @@ -4080,7 +4280,7 @@ void AArch64FrameLowering::emitRemarks( } unsigned RegTy = StackAccess::AccessType::GPR; - if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) { + if (MFI.hasScalableStackID(FrameIdx)) { // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO // spill/fill the predicate as a data vector (so are an FPR access). if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h index 7bba053111e89..32a9bd831989c 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -24,6 +24,11 @@ class AArch64FunctionInfo; class AArch64PrologueEmitter; class AArch64EpilogueEmitter; +struct SVEStackSizes { + uint64_t ZPRStackSize{0}; + uint64_t PPRStackSize{0}; +}; + class AArch64FrameLowering : public TargetFrameLowering { public: explicit AArch64FrameLowering() @@ -64,8 +69,9 @@ class AArch64FrameLowering : public TargetFrameLowering { bool ForSimm) const; StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, - bool isSVE, Register &FrameReg, - bool PreferFP, bool ForSimm) const; + TargetStackID::Value StackID, + Register &FrameReg, bool PreferFP, + bool ForSimm) const; bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef CSI, @@ -124,6 +130,7 @@ class AArch64FrameLowering : public TargetFrameLowering { return false; case TargetStackID::Default: case TargetStackID::ScalableVector: + case TargetStackID::ScalablePredicateVector: case TargetStackID::NoAlloc: return true; } @@ -132,7 +139,8 @@ class AArch64FrameLowering : public TargetFrameLowering { bool isStackIdSafeForLocalArea(unsigned StackId) const override { // We don't support putting SVE objects into the pre-allocated local // frame block at the moment. - return StackId != TargetStackID::ScalableVector; + return (StackId != TargetStackID::ScalableVector && + StackId != TargetStackID::ScalablePredicateVector); } void @@ -145,7 +153,17 @@ class AArch64FrameLowering : public TargetFrameLowering { bool requiresSaveVG(const MachineFunction &MF) const; - StackOffset getSVEStackSize(const MachineFunction &MF) const; + /// Returns the size of the entire ZPR stackframe (calleesaves + spills). + StackOffset getZPRStackSize(const MachineFunction &MF) const; + + /// Returns the size of the entire PPR stackframe (calleesaves + spills + + /// hazard padding). + StackOffset getPPRStackSize(const MachineFunction &MF) const; + + /// Returns the size of the entire SVE stackframe (PPRs + ZPRs). + StackOffset getSVEStackSize(const MachineFunction &MF) const { + return getZPRStackSize(MF) + getPPRStackSize(MF); + } friend class AArch64PrologueEpilogueCommon; friend class AArch64PrologueEmitter; @@ -165,10 +183,6 @@ class AArch64FrameLowering : public TargetFrameLowering { /// Returns true if CSRs should be paired. bool producePairRegisters(MachineFunction &MF) const; - int64_t estimateSVEStackObjectOffsets(MachineFrameInfo &MF) const; - int64_t assignSVEStackObjectOffsets(MachineFrameInfo &MF, - int &MinCSFrameIndex, - int &MaxCSFrameIndex) const; /// Make a determination whether a Hazard slot is used and create it if /// needed. void determineStackHazardSlot(MachineFunction &MF, diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 54bdb8750f709..e7b2d20e2a6cb 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -2089,7 +2089,8 @@ void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node, if (!ImmToReg(Node->getOperand(2), ZtValue)) return; - SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4)}; + SDValue Chain = Node->getOperand(0); + SDValue Ops[] = {ZtValue, Node->getOperand(3), Node->getOperand(4), Chain}; SDLoc DL(Node); EVT VT = Node->getValueType(0); @@ -2110,14 +2111,15 @@ void AArch64DAGToDAGISel::SelectMultiVectorLutiLane(SDNode *Node, void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node, unsigned NumOutVecs, unsigned Opc) { - SDValue ZtValue; - SmallVector Ops; if (!ImmToReg(Node->getOperand(2), ZtValue)) return; - Ops.push_back(ZtValue); - Ops.push_back(createZMulTuple({Node->getOperand(3), Node->getOperand(4)})); + SDValue Chain = Node->getOperand(0); + SDValue Ops[] = {ZtValue, + createZMulTuple({Node->getOperand(3), Node->getOperand(4)}), + Chain}; + SDLoc DL(Node); EVT VT = Node->getValueType(0); @@ -4346,34 +4348,14 @@ bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm, ->getAPIntValue() .trunc(VT.getFixedSizeInBits()) .getSExtValue(); + int32_t ImmVal, ShiftVal; + if (!AArch64_AM::isSVECpyDupImm(VT.getScalarSizeInBits(), Val, ImmVal, + ShiftVal)) + return false; - switch (VT.SimpleTy) { - case MVT::i8: - // All immediates are supported. - Shift = CurDAG->getTargetConstant(0, DL, MVT::i32); - Imm = CurDAG->getTargetConstant(Val & 0xFF, DL, MVT::i32); - return true; - case MVT::i16: - case MVT::i32: - case MVT::i64: - // Support 8bit signed immediates. - if (Val >= -128 && Val <= 127) { - Shift = CurDAG->getTargetConstant(0, DL, MVT::i32); - Imm = CurDAG->getTargetConstant(Val & 0xFF, DL, MVT::i32); - return true; - } - // Support 16bit signed immediates that are a multiple of 256. - if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) { - Shift = CurDAG->getTargetConstant(8, DL, MVT::i32); - Imm = CurDAG->getTargetConstant((Val >> 8) & 0xFF, DL, MVT::i32); - return true; - } - break; - default: - break; - } - - return false; + Shift = CurDAG->getTargetConstant(ShiftVal, DL, MVT::i32); + Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32); + return true; } bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) { @@ -7515,7 +7497,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, int FI = cast(N)->getIndex(); // We can only encode VL scaled offsets, so only fold in frame indexes // referencing SVE objects. - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) { + if (MFI.hasScalableStackID(FI)) { Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64); return true; @@ -7561,7 +7543,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, int FI = cast(Base)->getIndex(); // We can only encode VL scaled offsets, so only fold in frame indexes // referencing SVE objects. - if (MFI.getStackID(FI) == TargetStackID::ScalableVector) + if (MFI.hasScalableStackID(FI)) Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 09b31616e0882..70d5ad7d660f1 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1537,6 +1537,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::FP_TO_UINT, VT, Custom); setOperationAction(ISD::FP_TO_SINT, VT, Custom); setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Legal); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::MULHS, VT, Custom); setOperationAction(ISD::MULHU, VT, Custom); @@ -2182,13 +2183,6 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT, return false; } -bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic( - const IntrinsicInst *I) const { - assert(I->getIntrinsicID() == Intrinsic::vector_partial_reduce_add && - "Unexpected intrinsic!"); - return true; -} - bool AArch64TargetLowering::shouldExpandCttzElements(EVT VT) const { if (!Subtarget->isSVEorStreamingSVEAvailable()) return true; @@ -6624,7 +6618,6 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, "llvm.eh.recoverfp must take a function as the first argument"); return IncomingFPOp; } - case Intrinsic::aarch64_neon_vsri: case Intrinsic::aarch64_neon_vsli: case Intrinsic::aarch64_sve_sri: @@ -8093,13 +8086,76 @@ static SDValue getZT0FrameIndex(MachineFrameInfo &MFI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); } +// Emit a call to __arm_sme_save or __arm_sme_restore. +static SDValue emitSMEStateSaveRestore(const AArch64TargetLowering &TLI, + SelectionDAG &DAG, + AArch64FunctionInfo *Info, SDLoc DL, + SDValue Chain, bool IsSave) { + MachineFunction &MF = DAG.getMachineFunction(); + AArch64FunctionInfo *FuncInfo = MF.getInfo(); + FuncInfo->setSMESaveBufferUsed(); + TargetLowering::ArgListTy Args; + Args.emplace_back( + DAG.getCopyFromReg(Chain, DL, Info->getSMESaveBufferAddr(), MVT::i64), + PointerType::getUnqual(*DAG.getContext())); + + RTLIB::Libcall LC = + IsSave ? RTLIB::SMEABI_SME_SAVE : RTLIB::SMEABI_SME_RESTORE; + SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), + TLI.getPointerTy(DAG.getDataLayout())); + auto *RetTy = Type::getVoidTy(*DAG.getContext()); + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( + TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)); + return TLI.LowerCallTo(CLI).second; +} + +static SDValue emitRestoreZALazySave(SDValue Chain, SDLoc DL, + const AArch64TargetLowering &TLI, + const AArch64RegisterInfo &TRI, + AArch64FunctionInfo &FuncInfo, + SelectionDAG &DAG) { + // Conditionally restore the lazy save using a pseudo node. + RTLIB::Libcall LC = RTLIB::SMEABI_TPIDR2_RESTORE; + TPIDR2Object &TPIDR2 = FuncInfo.getTPIDR2Obj(); + SDValue RegMask = DAG.getRegisterMask(TRI.getCallPreservedMask( + DAG.getMachineFunction(), TLI.getLibcallCallingConv(LC))); + SDValue RestoreRoutine = DAG.getTargetExternalSymbol( + TLI.getLibcallName(LC), TLI.getPointerTy(DAG.getDataLayout())); + SDValue TPIDR2_EL0 = DAG.getNode( + ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Chain, + DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32)); + // Copy the address of the TPIDR2 block into X0 before 'calling' the + // RESTORE_ZA pseudo. + SDValue Glue; + SDValue TPIDR2Block = DAG.getFrameIndex( + TPIDR2.FrameIndex, + DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); + Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, TPIDR2Block, Glue); + Chain = + DAG.getNode(AArch64ISD::RESTORE_ZA, DL, MVT::Other, + {Chain, TPIDR2_EL0, DAG.getRegister(AArch64::X0, MVT::i64), + RestoreRoutine, RegMask, Chain.getValue(1)}); + // Finally reset the TPIDR2_EL0 register to 0. + Chain = DAG.getNode( + ISD::INTRINSIC_VOID, DL, MVT::Other, Chain, + DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32), + DAG.getConstant(0, DL, MVT::i64)); + TPIDR2.Uses++; + return Chain; +} + SDValue AArch64TargetLowering::lowerEHPadEntry(SDValue Chain, SDLoc const &DL, SelectionDAG &DAG) const { assert(Chain.getOpcode() == ISD::EntryToken && "Unexpected Chain value"); SDValue Glue = Chain.getValue(1); MachineFunction &MF = DAG.getMachineFunction(); - SMEAttrs SMEFnAttrs = MF.getInfo()->getSMEFnAttrs(); + auto &FuncInfo = *MF.getInfo(); + auto &Subtarget = DAG.getSubtarget(); + const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); + + SMEAttrs SMEFnAttrs = FuncInfo.getSMEFnAttrs(); // The following conditions are true on entry to an exception handler: // - PSTATE.SM is 0. @@ -8114,14 +8170,43 @@ SDValue AArch64TargetLowering::lowerEHPadEntry(SDValue Chain, SDLoc const &DL, // These mode changes are usually optimized away in catch blocks as they // occur before the __cxa_begin_catch (which is a non-streaming function), // but are necessary in some cases (such as for cleanups). + // + // Additionally, if the function has ZA or ZT0 state, we must restore it. + // [COND_]SMSTART SM if (SMEFnAttrs.hasStreamingInterfaceOrBody()) - return changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, - /*Glue*/ Glue, AArch64SME::Always); + Chain = changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, + /*Glue*/ Glue, AArch64SME::Always); + else if (SMEFnAttrs.hasStreamingCompatibleInterface()) + Chain = changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, Glue, + AArch64SME::IfCallerIsStreaming); + + if (getTM().useNewSMEABILowering()) + return Chain; - if (SMEFnAttrs.hasStreamingCompatibleInterface()) - return changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, Glue, - AArch64SME::IfCallerIsStreaming); + if (SMEFnAttrs.hasAgnosticZAInterface()) { + // Restore full ZA + Chain = emitSMEStateSaveRestore(*this, DAG, &FuncInfo, DL, Chain, + /*IsSave=*/false); + } else if (SMEFnAttrs.hasZAState() || SMEFnAttrs.hasZT0State()) { + // SMSTART ZA + Chain = DAG.getNode( + AArch64ISD::SMSTART, DL, DAG.getVTList(MVT::Other, MVT::Glue), Chain, + DAG.getTargetConstant(int32_t(AArch64SVCR::SVCRZA), DL, MVT::i32)); + + // Restore ZT0 + if (SMEFnAttrs.hasZT0State()) { + SDValue ZT0FrameIndex = + getZT0FrameIndex(MF.getFrameInfo(), FuncInfo, DAG); + Chain = + DAG.getNode(AArch64ISD::RESTORE_ZT, DL, DAG.getVTList(MVT::Other), + {Chain, DAG.getConstant(0, DL, MVT::i32), ZT0FrameIndex}); + } + + // Restore ZA + if (SMEFnAttrs.hasZAState()) + Chain = emitRestoreZALazySave(Chain, DL, *this, TRI, FuncInfo, DAG); + } return Chain; } @@ -9171,8 +9256,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, (MI.getOpcode() == AArch64::ADDXri || MI.getOpcode() == AArch64::SUBXri)) { const MachineOperand &MO = MI.getOperand(1); - if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) == - TargetStackID::ScalableVector) + if (MO.isFI() && MF.getFrameInfo().hasScalableStackID(MO.getIndex())) MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false, /*IsImplicit=*/true)); } @@ -9239,30 +9323,6 @@ SDValue AArch64TargetLowering::changeStreamingMode( return GetCheckVL(SMChange.getValue(0), SMChange.getValue(1)); } -// Emit a call to __arm_sme_save or __arm_sme_restore. -static SDValue emitSMEStateSaveRestore(const AArch64TargetLowering &TLI, - SelectionDAG &DAG, - AArch64FunctionInfo *Info, SDLoc DL, - SDValue Chain, bool IsSave) { - MachineFunction &MF = DAG.getMachineFunction(); - AArch64FunctionInfo *FuncInfo = MF.getInfo(); - FuncInfo->setSMESaveBufferUsed(); - TargetLowering::ArgListTy Args; - Args.emplace_back( - DAG.getCopyFromReg(Chain, DL, Info->getSMESaveBufferAddr(), MVT::i64), - PointerType::getUnqual(*DAG.getContext())); - - RTLIB::Libcall LC = - IsSave ? RTLIB::SMEABI_SME_SAVE : RTLIB::SMEABI_SME_RESTORE; - SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), - TLI.getPointerTy(DAG.getDataLayout())); - auto *RetTy = Type::getVoidTy(*DAG.getContext()); - TargetLowering::CallLoweringInfo CLI(DAG); - CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( - TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)); - return TLI.LowerCallTo(CLI).second; -} - static AArch64SME::ToggleCondition getSMToggleCondition(const SMECallAttrs &CallAttrs) { if (!CallAttrs.caller().hasStreamingCompatibleInterface() || @@ -9643,8 +9703,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty); MachineFrameInfo &MFI = MF.getFrameInfo(); int FI = MFI.CreateStackObject(StoreSize, Alignment, false); - if (isScalable) - MFI.setStackID(FI, TargetStackID::ScalableVector); + if (isScalable) { + bool IsPred = VA.getValVT() == MVT::aarch64svcount || + VA.getValVT().getVectorElementType() == MVT::i1; + MFI.setStackID(FI, IsPred ? TargetStackID::ScalablePredicateVector + : TargetStackID::ScalableVector); + } MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); SDValue Ptr = DAG.getFrameIndex( @@ -10022,33 +10086,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, {Result, DAG.getConstant(0, DL, MVT::i32), ZTFrameIdx}); if (RequiresLazySave) { - // Conditionally restore the lazy save using a pseudo node. - RTLIB::Libcall LC = RTLIB::SMEABI_TPIDR2_RESTORE; - TPIDR2Object &TPIDR2 = FuncInfo->getTPIDR2Obj(); - SDValue RegMask = DAG.getRegisterMask( - TRI->getCallPreservedMask(MF, getLibcallCallingConv(LC))); - SDValue RestoreRoutine = DAG.getTargetExternalSymbol( - getLibcallName(LC), getPointerTy(DAG.getDataLayout())); - SDValue TPIDR2_EL0 = DAG.getNode( - ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Result, - DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32)); - // Copy the address of the TPIDR2 block into X0 before 'calling' the - // RESTORE_ZA pseudo. - SDValue Glue; - SDValue TPIDR2Block = DAG.getFrameIndex( - TPIDR2.FrameIndex, - DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); - Result = DAG.getCopyToReg(Result, DL, AArch64::X0, TPIDR2Block, Glue); - Result = - DAG.getNode(AArch64ISD::RESTORE_ZA, DL, MVT::Other, - {Result, TPIDR2_EL0, DAG.getRegister(AArch64::X0, MVT::i64), - RestoreRoutine, RegMask, Result.getValue(1)}); - // Finally reset the TPIDR2_EL0 register to 0. - Result = DAG.getNode( - ISD::INTRINSIC_VOID, DL, MVT::Other, Result, - DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32), - DAG.getConstant(0, DL, MVT::i64)); - TPIDR2.Uses++; + Result = emitRestoreZALazySave(Result, DL, *this, *TRI, *FuncInfo, DAG); } else if (RequiresSaveAllZA) { Result = emitSMEStateSaveRestore(*this, DAG, FuncInfo, DL, Result, /*IsSave=*/false); @@ -11743,6 +11781,28 @@ SDValue AArch64TargetLowering::LowerSELECT_CC( return DAG.getNode(ISD::AND, DL, VT, LHS, Shift); } + // Check for sign bit test patterns that can use TST optimization. + // (SELECT_CC setlt, sign_extend_inreg, 0, tval, fval) + // -> TST %operand, sign_bit; CSEL + // (SELECT_CC setlt, sign_extend, 0, tval, fval) + // -> TST %operand, sign_bit; CSEL + if (CC == ISD::SETLT && RHSC && RHSC->isZero() && LHS.hasOneUse() && + (LHS.getOpcode() == ISD::SIGN_EXTEND_INREG || + LHS.getOpcode() == ISD::SIGN_EXTEND)) { + + uint64_t SignBitPos; + std::tie(LHS, SignBitPos) = lookThroughSignExtension(LHS); + EVT TestVT = LHS.getValueType(); + SDValue SignBitConst = DAG.getConstant(1ULL << SignBitPos, DL, TestVT); + SDValue TST = + DAG.getNode(AArch64ISD::ANDS, DL, DAG.getVTList(TestVT, MVT::i32), + LHS, SignBitConst); + + SDValue Flags = TST.getValue(1); + return DAG.getNode(AArch64ISD::CSEL, DL, TVal.getValueType(), TVal, FVal, + DAG.getConstant(AArch64CC::NE, DL, MVT::i32), Flags); + } + // Canonicalise absolute difference patterns: // select_cc lhs, rhs, sub(lhs, rhs), sub(rhs, lhs), cc -> // select_cc lhs, rhs, sub(lhs, rhs), neg(sub(lhs, rhs)), cc @@ -15097,9 +15157,7 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { : Shift.getOperand(1); unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI; - SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Imm); - - return ResultSLI; + return DAG.getNode(Inst, DL, VT, X, Y, Imm); } static SDValue tryLowerToBSL(SDValue N, SelectionDAG &DAG) { @@ -15277,6 +15335,27 @@ static SDValue NormalizeBuildVector(SDValue Op, return DAG.getBuildVector(VT, DL, Ops); } +static SDValue trySVESplat64(SDValue Op, SelectionDAG &DAG, + const AArch64Subtarget *ST, APInt &DefBits) { + EVT VT = Op.getValueType(); + // TODO: We should be able to support 64-bit destinations too + if (!ST->hasSVE() || !VT.is128BitVector() || + DefBits.getHiBits(64) != DefBits.getLoBits(64)) + return SDValue(); + + // See if we can make use of the SVE dup instruction. + APInt Val64 = DefBits.trunc(64); + int32_t ImmVal, ShiftVal; + if (!AArch64_AM::isSVECpyDupImm(64, Val64.getSExtValue(), ImmVal, ShiftVal)) + return SDValue(); + + SDLoc DL(Op); + SDValue SplatVal = DAG.getSplatVector(MVT::nxv2i64, DL, + DAG.getConstant(Val64, DL, MVT::i64)); + SDValue Res = convertFromScalableVector(DAG, MVT::v2i64, SplatVal); + return DAG.getNode(AArch64ISD::NVCAST, DL, VT, Res); +} + static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG, const AArch64Subtarget *ST) { EVT VT = Op.getValueType(); @@ -15316,6 +15395,10 @@ static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG, if (SDValue R = TryMOVIWithBits(UndefBits)) return R; + // Try to materialise the constant using SVE when available. + if (SDValue R = trySVESplat64(Op, DAG, ST, DefBits)) + return R; + // See if a fneg of the constant can be materialized with a MOVI, etc auto TryWithFNeg = [&](APInt DefBits, MVT FVT) { // FNegate each sub-element of the constant @@ -18785,21 +18868,25 @@ performActiveLaneMaskCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, (!ST->hasSVE2p1() && !(ST->hasSME2() && ST->isStreaming()))) return SDValue(); - unsigned NumUses = N->use_size(); + // Count the number of users which are extract_vectors. + unsigned NumExts = count_if(N->users(), [](SDNode *Use) { + return Use->getOpcode() == ISD::EXTRACT_SUBVECTOR; + }); + auto MaskEC = N->getValueType(0).getVectorElementCount(); - if (!MaskEC.isKnownMultipleOf(NumUses)) + if (!MaskEC.isKnownMultipleOf(NumExts)) return SDValue(); - ElementCount ExtMinEC = MaskEC.divideCoefficientBy(NumUses); + ElementCount ExtMinEC = MaskEC.divideCoefficientBy(NumExts); if (ExtMinEC.getKnownMinValue() < 2) return SDValue(); - SmallVector Extracts(NumUses, nullptr); + SmallVector Extracts(NumExts, nullptr); for (SDNode *Use : N->users()) { if (Use->getOpcode() != ISD::EXTRACT_SUBVECTOR) - return SDValue(); + continue; - // Ensure the extract type is correct (e.g. if NumUses is 4 and + // Ensure the extract type is correct (e.g. if NumExts is 4 and // the mask return type is nxv8i1, each extract should be nxv2i1. if (Use->getValueType(0).getVectorElementCount() != ExtMinEC) return SDValue(); @@ -18820,32 +18907,39 @@ performActiveLaneMaskCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SDValue Idx = N->getOperand(0); SDValue TC = N->getOperand(1); - EVT OpVT = Idx.getValueType(); - if (OpVT != MVT::i64) { + if (Idx.getValueType() != MVT::i64) { Idx = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Idx); TC = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, TC); } // Create the whilelo_x2 intrinsics from each pair of extracts EVT ExtVT = Extracts[0]->getValueType(0); + EVT DoubleExtVT = ExtVT.getDoubleNumVectorElementsVT(*DAG.getContext()); auto R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, {ExtVT, ExtVT}, {ID, Idx, TC}); DCI.CombineTo(Extracts[0], R.getValue(0)); DCI.CombineTo(Extracts[1], R.getValue(1)); + SmallVector Concats = {DAG.getNode( + ISD::CONCAT_VECTORS, DL, DoubleExtVT, R.getValue(0), R.getValue(1))}; - if (NumUses == 2) - return SDValue(N, 0); + if (NumExts == 2) { + assert(N->getValueType(0) == DoubleExtVT); + return Concats[0]; + } - auto Elts = DAG.getElementCount(DL, OpVT, ExtVT.getVectorElementCount() * 2); - for (unsigned I = 2; I < NumUses; I += 2) { + auto Elts = + DAG.getElementCount(DL, MVT::i64, ExtVT.getVectorElementCount() * 2); + for (unsigned I = 2; I < NumExts; I += 2) { // After the first whilelo_x2, we need to increment the starting value. - Idx = DAG.getNode(ISD::UADDSAT, DL, OpVT, Idx, Elts); + Idx = DAG.getNode(ISD::UADDSAT, DL, MVT::i64, Idx, Elts); R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, {ExtVT, ExtVT}, {ID, Idx, TC}); DCI.CombineTo(Extracts[I], R.getValue(0)); DCI.CombineTo(Extracts[I + 1], R.getValue(1)); + Concats.push_back(DAG.getNode(ISD::CONCAT_VECTORS, DL, DoubleExtVT, + R.getValue(0), R.getValue(1))); } - return SDValue(N, 0); + return DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), Concats); } // Turn a v8i8/v16i8 extended vecreduce into a udot/sdot and vecreduce @@ -25430,6 +25524,32 @@ SDValue performCONDCombine(SDNode *N, CmpIndex, CC)) return Val; + // X & M ?= C --> (C << clz(M)) ?= (X << clz(M)) where M is a non-empty + // sequence of ones starting at the least significant bit with the remainder + // zero and C is a constant s.t. (C & ~M) == 0 that cannot be materialised + // into a SUBS (immediate). The transformed form can be matched into a SUBS + // (shifted register). + if ((CC == AArch64CC::EQ || CC == AArch64CC::NE) && AndNode->hasOneUse() && + isa(AndNode->getOperand(1)) && + isa(SubsNode->getOperand(1))) { + SDValue X = AndNode->getOperand(0); + APInt M = AndNode->getConstantOperandAPInt(1); + APInt C = SubsNode->getConstantOperandAPInt(1); + + if (M.isMask() && C.isSubsetOf(M) && !isLegalArithImmed(C.getZExtValue())) { + SDLoc DL(SubsNode); + EVT VT = SubsNode->getValueType(0); + unsigned ShiftAmt = M.countl_zero(); + SDValue ShiftedX = DAG.getNode( + ISD::SHL, DL, VT, X, DAG.getShiftAmountConstant(ShiftAmt, VT, DL)); + SDValue ShiftedC = DAG.getConstant(C << ShiftAmt, DL, VT); + SDValue NewSubs = DAG.getNode(AArch64ISD::SUBS, DL, SubsNode->getVTList(), + ShiftedC, ShiftedX); + DCI.CombineTo(SubsNode, NewSubs, NewSubs.getValue(1)); + return SDValue(N, 0); + } + } + if (ConstantSDNode *CN = dyn_cast(AndNode->getOperand(1))) { uint32_t CNV = CN->getZExtValue(); if (CNV == 255) @@ -27115,6 +27235,21 @@ static bool isLanes1toNKnownZero(SDValue Op) { } } +// Return true if the vector operation can guarantee that the first lane of its +// result is active. +static bool isLane0KnownActive(SDValue Op) { + switch (Op.getOpcode()) { + default: + return false; + case AArch64ISD::REINTERPRET_CAST: + return isLane0KnownActive(Op->getOperand(0)); + case ISD::SPLAT_VECTOR: + return isOneConstant(Op.getOperand(0)); + case AArch64ISD::PTRUE: + return Op.getConstantOperandVal(0) == AArch64SVEPredPattern::all; + }; +} + static SDValue removeRedundantInsertVectorElt(SDNode *N) { assert(N->getOpcode() == ISD::INSERT_VECTOR_ELT && "Unexpected node!"); SDValue InsertVec = N->getOperand(0); @@ -27400,6 +27535,32 @@ static SDValue performMULLCombine(SDNode *N, return SDValue(); } +static SDValue performPTestFirstCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + if (DCI.isBeforeLegalize()) + return SDValue(); + + SDLoc DL(N); + auto Mask = N->getOperand(0); + auto Pred = N->getOperand(1); + + if (!isLane0KnownActive(Mask)) + return SDValue(); + + if (Pred->getOpcode() == AArch64ISD::REINTERPRET_CAST) + Pred = Pred->getOperand(0); + + if (Pred->getOpcode() == ISD::CONCAT_VECTORS) { + Pred = Pred->getOperand(0); + Pred = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, MVT::nxv16i1, Pred); + return DAG.getNode(AArch64ISD::PTEST_FIRST, DL, N->getValueType(0), Mask, + Pred); + } + + return SDValue(); +} + static SDValue performScalarToVectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { @@ -27756,6 +27917,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, case AArch64ISD::UMULL: case AArch64ISD::PMULL: return performMULLCombine(N, DCI, DAG); + case AArch64ISD::PTEST_FIRST: + return performPTestFirstCombine(N, DCI, DAG); case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (N->getConstantOperandVal(1)) { @@ -29388,7 +29551,7 @@ bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT); } -bool AArch64TargetLowering::shouldExpandCmpUsingSelects(EVT VT) const { +bool AArch64TargetLowering::preferSelectsOverBooleanArithmetic(EVT VT) const { // Expand scalar and SVE operations using selects. Neon vectors prefer sub to // avoid vselect becoming bsl / unrolling. return !VT.isFixedLengthVector(); @@ -29445,7 +29608,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { // than doing it here in finalizeLowering. if (MFI.hasStackProtectorIndex()) { for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { - if (MFI.getStackID(i) == TargetStackID::ScalableVector && + if (MFI.hasScalableStackID(i) && MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) { MFI.setStackID(MFI.getStackProtectorIndex(), TargetStackID::ScalableVector); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index ff073d3eafb1f..e472e7d565d9b 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -303,6 +303,16 @@ class AArch64TargetLowering : public TargetLowering { bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override; + /// Return true if it is profitable to fold a pair of shifts into a mask. + bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { + EVT VT = Y.getValueType(); + + if (VT.isVector()) + return false; + + return VT.getScalarSizeInBits() <= 64; + } + bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override; @@ -435,7 +445,7 @@ class AArch64TargetLowering : public TargetLowering { bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override; - bool shouldExpandCmpUsingSelects(EVT VT) const override; + bool preferSelectsOverBooleanArithmetic(EVT VT) const override; bool isComplexDeinterleavingSupported() const override; bool isComplexDeinterleavingOperationSupported( @@ -523,9 +533,6 @@ class AArch64TargetLowering : public TargetLowering { bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override; - bool - shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override; - bool shouldExpandCttzElements(EVT VT) const override; bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const override; diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index 78d683a4b4256..6ef0a95d7406d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -5301,28 +5301,29 @@ multiclass FPToIntegerUnscaled rmode, bits<3> opcode, string asm, } } -multiclass FPToIntegerSIMDScalar rmode, bits<3> opcode, string asm> { +multiclass FPToIntegerSIMDScalar rmode, bits<3> opcode, string asm, + SDPatternOperator OpN = null_frag> { // double-precision to 32-bit SIMD/FPR def SDr : BaseFPToIntegerUnscaled<0b01, rmode, opcode, FPR64, FPR32, asm, - []> { + [(set FPR32:$Rd, (i32 (OpN (f64 FPR64:$Rn))))]> { let Inst{31} = 0; // 32-bit FPR flag } // half-precision to 32-bit SIMD/FPR def SHr : BaseFPToIntegerUnscaled<0b11, rmode, opcode, FPR16, FPR32, asm, - []> { + [(set FPR32:$Rd, (i32 (OpN (f16 FPR16:$Rn))))]> { let Inst{31} = 0; // 32-bit FPR flag } // half-precision to 64-bit SIMD/FPR def DHr : BaseFPToIntegerUnscaled<0b11, rmode, opcode, FPR16, FPR64, asm, - []> { + [(set FPR64:$Rd, (i64 (OpN (f16 FPR16:$Rn))))]> { let Inst{31} = 1; // 64-bit FPR flag } // single-precision to 64-bit SIMD/FPR def DSr : BaseFPToIntegerUnscaled<0b00, rmode, opcode, FPR32, FPR64, asm, - []> { + [(set FPR64:$Rd, (i64 (OpN (f32 FPR32:$Rn))))]> { let Inst{31} = 1; // 64-bit FPR flag } } @@ -7940,14 +7941,18 @@ multiclass SIMDTwoScalarD opc, string asm, } } -let mayRaiseFPException = 1, Uses = [FPCR] in -multiclass SIMDFPTwoScalar opc, string asm> { +let mayRaiseFPException = 1, Uses = [FPCR], FastISelShouldIgnore = 1 in +multiclass SIMDFPTwoScalar opc, string asm, + SDPatternOperator OpN = null_frag> { let Predicates = [HasNEONandIsStreamingSafe] in { - def v1i64 : BaseSIMDTwoScalar; - def v1i32 : BaseSIMDTwoScalar; + def v1i64 : BaseSIMDTwoScalar; + def v1i32 : BaseSIMDTwoScalar; } let Predicates = [HasNEONandIsStreamingSafe, HasFullFP16] in { - def v1f16 : BaseSIMDTwoScalar; + def v1f16 : BaseSIMDTwoScalar; } } @@ -10171,28 +10176,6 @@ multiclass SIMDScalarLShiftBHSD opc, string asm, (!cast(NAME # "d") FPR64:$Rn, vecshiftL64:$imm)>; } -multiclass SIMDScalarRShiftBHSD opc, string asm> { - def b : BaseSIMDScalarShift { - let Inst{18-16} = imm{2-0}; - } - - def h : BaseSIMDScalarShift { - let Inst{19-16} = imm{3-0}; - } - - def s : BaseSIMDScalarShift { - let Inst{20-16} = imm{4-0}; - } - - def d : BaseSIMDScalarShift { - let Inst{21-16} = imm{5-0}; - } -} - //---------------------------------------------------------------------------- // AdvSIMD vector x indexed element //---------------------------------------------------------------------------- diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 5a51c812732e6..5a90da1fade39 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1503,6 +1503,13 @@ AArch64InstrInfo::canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask, getElementSizeForOpcode(PredOpcode)) return PredOpcode; + // For PTEST_FIRST(PTRUE_ALL, WHILE), the PTEST_FIRST is redundant since + // WHILEcc performs an implicit PTEST with an all active mask, setting + // the N flag as the PTEST_FIRST would. + if (PTest->getOpcode() == AArch64::PTEST_PP_FIRST && + isPTrueOpcode(MaskOpcode) && Mask->getOperand(1).getImm() == 31) + return PredOpcode; + return {}; } @@ -5592,7 +5599,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, assert(Subtarget.isSVEorStreamingSVEAvailable() && "Unexpected register store without SVE store instructions"); Opc = AArch64::STR_PXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; } @@ -5607,7 +5614,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, Opc = AArch64::STRSui; else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) { Opc = AArch64::STR_PPXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; case 8: @@ -5777,7 +5784,7 @@ void AArch64InstrInfo::loadRegFromStackSlot( if (IsPNR) PNRReg = DestReg; Opc = AArch64::LDR_PXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; } @@ -5792,7 +5799,7 @@ void AArch64InstrInfo::loadRegFromStackSlot( Opc = AArch64::LDRSui; else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) { Opc = AArch64::LDR_PPXI; - StackID = TargetStackID::ScalableVector; + StackID = TargetStackID::ScalablePredicateVector; } break; case 8: diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 980636c1b562b..f788c7510f80c 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -5231,18 +5231,19 @@ defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>; defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>; let Predicates = [HasNEON, HasFPRCVT] in{ - defm FCVTAS : FPToIntegerSIMDScalar<0b11, 0b010, "fcvtas">; - defm FCVTAU : FPToIntegerSIMDScalar<0b11, 0b011, "fcvtau">; - defm FCVTMS : FPToIntegerSIMDScalar<0b10, 0b100, "fcvtms">; - defm FCVTMU : FPToIntegerSIMDScalar<0b10, 0b101, "fcvtmu">; - defm FCVTNS : FPToIntegerSIMDScalar<0b01, 0b010, "fcvtns">; - defm FCVTNU : FPToIntegerSIMDScalar<0b01, 0b011, "fcvtnu">; - defm FCVTPS : FPToIntegerSIMDScalar<0b10, 0b010, "fcvtps">; - defm FCVTPU : FPToIntegerSIMDScalar<0b10, 0b011, "fcvtpu">; + defm FCVTAS : FPToIntegerSIMDScalar<0b11, 0b010, "fcvtas", int_aarch64_neon_fcvtas>; + defm FCVTAU : FPToIntegerSIMDScalar<0b11, 0b011, "fcvtau", int_aarch64_neon_fcvtau>; + defm FCVTMS : FPToIntegerSIMDScalar<0b10, 0b100, "fcvtms", int_aarch64_neon_fcvtms>; + defm FCVTMU : FPToIntegerSIMDScalar<0b10, 0b101, "fcvtmu", int_aarch64_neon_fcvtmu>; + defm FCVTNS : FPToIntegerSIMDScalar<0b01, 0b010, "fcvtns", int_aarch64_neon_fcvtns>; + defm FCVTNU : FPToIntegerSIMDScalar<0b01, 0b011, "fcvtnu", int_aarch64_neon_fcvtnu>; + defm FCVTPS : FPToIntegerSIMDScalar<0b10, 0b010, "fcvtps", int_aarch64_neon_fcvtps>; + defm FCVTPU : FPToIntegerSIMDScalar<0b10, 0b011, "fcvtpu", int_aarch64_neon_fcvtpu>; defm FCVTZS : FPToIntegerSIMDScalar<0b10, 0b110, "fcvtzs">; defm FCVTZU : FPToIntegerSIMDScalar<0b10, 0b111, "fcvtzu">; } + // AArch64's FCVT instructions saturate when out of range. multiclass FPToIntegerSatPats { let Predicates = [HasFullFP16] in { @@ -5309,35 +5310,6 @@ multiclass FPToIntegerSatPats; defm : FPToIntegerSatPats; -multiclass FPToIntegerIntPats { - let Predicates = [HasFullFP16] in { - def : Pat<(i32 (round f16:$Rn)), (!cast(INST # UWHr) $Rn)>; - def : Pat<(i64 (round f16:$Rn)), (!cast(INST # UXHr) $Rn)>; - } - def : Pat<(i32 (round f32:$Rn)), (!cast(INST # UWSr) $Rn)>; - def : Pat<(i64 (round f32:$Rn)), (!cast(INST # UXSr) $Rn)>; - def : Pat<(i32 (round f64:$Rn)), (!cast(INST # UWDr) $Rn)>; - def : Pat<(i64 (round f64:$Rn)), (!cast(INST # UXDr) $Rn)>; - - let Predicates = [HasFullFP16] in { - def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))), - (!cast(INST # SWHri) $Rn, $scale)>; - def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))), - (!cast(INST # SXHri) $Rn, $scale)>; - } - def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))), - (!cast(INST # SWSri) $Rn, $scale)>; - def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))), - (!cast(INST # SXSri) $Rn, $scale)>; - def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))), - (!cast(INST # SWDri) $Rn, $scale)>; - def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))), - (!cast(INST # SXDri) $Rn, $scale)>; -} - -defm : FPToIntegerIntPats; -defm : FPToIntegerIntPats; - multiclass FPToIntegerPats { def : Pat<(i32 (to_int (round f32:$Rn))), (!cast(INST # UWSr) f32:$Rn)>; @@ -6572,14 +6544,14 @@ defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>; defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>; defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>; defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>; -defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">; -defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">; -defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">; -defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">; -defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">; -defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">; -defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">; -defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">; +defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas", int_aarch64_neon_fcvtas>; +defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau", int_aarch64_neon_fcvtau>; +defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms", int_aarch64_neon_fcvtms>; +defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu", int_aarch64_neon_fcvtmu>; +defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns", int_aarch64_neon_fcvtns>; +defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu", int_aarch64_neon_fcvtnu>; +defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps", int_aarch64_neon_fcvtps>; +defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu", int_aarch64_neon_fcvtpu>; def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">; defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">; defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">; @@ -6600,6 +6572,86 @@ defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd", int_aarch64_neon_usqadd>; +// Floating-point conversion patterns. +multiclass FPToIntegerSIMDScalarPatterns { + def : Pat<(f32 (bitconvert (i32 (OpN (f64 FPR64:$Rn))))), + (!cast(INST # SDr) FPR64:$Rn)>; + def : Pat<(f32 (bitconvert (i32 (OpN (f16 FPR16:$Rn))))), + (!cast(INST # SHr) FPR16:$Rn)>; + def : Pat<(f64 (bitconvert (i64 (OpN (f16 FPR16:$Rn))))), + (!cast(INST # DHr) FPR16:$Rn)>; + def : Pat<(f64 (bitconvert (i64 (OpN (f32 FPR32:$Rn))))), + (!cast(INST # DSr) FPR32:$Rn)>; + def : Pat<(f32 (bitconvert (i32 (OpN (f32 FPR32:$Rn))))), + (!cast(INST # v1i32) FPR32:$Rn)>; + def : Pat<(f64 (bitconvert (i64 (OpN (f64 FPR64:$Rn))))), + (!cast(INST # v1i64) FPR64:$Rn)>; + +} +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; +defm: FPToIntegerSIMDScalarPatterns; + +multiclass FPToIntegerIntPats { + let Predicates = [HasFullFP16] in { + def : Pat<(i32 (round f16:$Rn)), (!cast(INST # UWHr) $Rn)>; + def : Pat<(i64 (round f16:$Rn)), (!cast(INST # UXHr) $Rn)>; + } + def : Pat<(i32 (round f32:$Rn)), (!cast(INST # UWSr) $Rn)>; + def : Pat<(i64 (round f32:$Rn)), (!cast(INST # UXSr) $Rn)>; + def : Pat<(i32 (round f64:$Rn)), (!cast(INST # UWDr) $Rn)>; + def : Pat<(i64 (round f64:$Rn)), (!cast(INST # UXDr) $Rn)>; + + // For global-isel we can use register classes to determine + // which FCVT instruction to use. + let Predicates = [HasFPRCVT] in { + def : Pat<(i32 (round f16:$Rn)), (!cast(INST # SHr) $Rn)>; + def : Pat<(i64 (round f16:$Rn)), (!cast(INST # DHr) $Rn)>; + def : Pat<(i64 (round f32:$Rn)), (!cast(INST # DSr) $Rn)>; + def : Pat<(i32 (round f64:$Rn)), (!cast(INST # SDr) $Rn)>; + } + def : Pat<(i32 (round f32:$Rn)), (!cast(INST # v1i32) $Rn)>; + def : Pat<(i64 (round f64:$Rn)), (!cast(INST # v1i64) $Rn)>; + + let Predicates = [HasFPRCVT] in { + def : Pat<(f32 (bitconvert (i32 (round f16:$Rn)))), + (!cast(INST # SHr) $Rn)>; + def : Pat<(f64 (bitconvert (i64 (round f16:$Rn)))), + (!cast(INST # DHr) $Rn)>; + def : Pat<(f64 (bitconvert (i64 (round f32:$Rn)))), + (!cast(INST # DSr) $Rn)>; + def : Pat<(f32 (bitconvert (i32 (round f64:$Rn)))), + (!cast(INST # SDr) $Rn)>; + } + def : Pat<(f32 (bitconvert (i32 (round f32:$Rn)))), + (!cast(INST # v1i32) $Rn)>; + def : Pat<(f64 (bitconvert (i64 (round f64:$Rn)))), + (!cast(INST # v1i64) $Rn)>; + + let Predicates = [HasFullFP16] in { + def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))), + (!cast(INST # SWHri) $Rn, $scale)>; + def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))), + (!cast(INST # SXHri) $Rn, $scale)>; + } + def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))), + (!cast(INST # SWSri) $Rn, $scale)>; + def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))), + (!cast(INST # SXSri) $Rn, $scale)>; + def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))), + (!cast(INST # SWDri) $Rn, $scale)>; + def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))), + (!cast(INST # SXDri) $Rn, $scale)>; +} + +defm : FPToIntegerIntPats; +defm : FPToIntegerIntPats; + // f16 -> s16 conversions let Predicates = [HasFullFP16] in { def : Pat<(i16(fp_to_sint_sat_gi f16:$Rn)), (FCVTZSv1f16 f16:$Rn)>; @@ -9855,8 +9907,14 @@ def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 (REV64v4i16 FPR64:$src))>; } -def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; -def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>; +def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), + (v4f16 FPR64:$src)>; +def : Pat<(v4f16 (bitconvert (v4bf16 FPR64:$src))), + (v4f16 FPR64:$src)>; +def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), + (v4bf16 FPR64:$src)>; +def : Pat<(v4bf16 (bitconvert (v4f16 FPR64:$src))), + (v4bf16 FPR64:$src)>; let Predicates = [IsLE] in { def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>; @@ -10184,8 +10242,14 @@ def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 (REV32v8i16 FPR128:$src))>; } -def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; -def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>; +def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), + (v8f16 FPR128:$src)>; +def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), + (v8bf16 FPR128:$src)>; +def : Pat<(v8f16 (bitconvert (v8bf16 FPR128:$src))), + (v8f16 FPR128:$src)>; +def : Pat<(v8bf16 (bitconvert (v8f16 FPR128:$src))), + (v8bf16 FPR128:$src)>; let Predicates = [IsLE] in { def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>; diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp index a81f5b3d436a9..b3c9656d4d80b 100644 --- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp @@ -23,12 +23,21 @@ using namespace llvm; +static std::optional +getSVEStackSize(const AArch64FunctionInfo &MFI, + uint64_t (AArch64FunctionInfo::*GetStackSize)() const) { + if (!MFI.hasCalculatedStackSizeSVE()) + return std::nullopt; + return (MFI.*GetStackSize)(); +} + yaml::AArch64FunctionInfo::AArch64FunctionInfo( const llvm::AArch64FunctionInfo &MFI) : HasRedZone(MFI.hasRedZone()), - StackSizeSVE(MFI.hasCalculatedStackSizeSVE() - ? std::optional(MFI.getStackSizeSVE()) - : std::nullopt), + StackSizeZPR( + getSVEStackSize(MFI, &llvm::AArch64FunctionInfo::getStackSizeZPR)), + StackSizePPR( + getSVEStackSize(MFI, &llvm::AArch64FunctionInfo::getStackSizePPR)), HasStackFrame(MFI.hasStackFrame() ? std::optional(MFI.hasStackFrame()) : std::nullopt) {} @@ -41,8 +50,9 @@ void AArch64FunctionInfo::initializeBaseYamlFields( const yaml::AArch64FunctionInfo &YamlMFI) { if (YamlMFI.HasRedZone) HasRedZone = YamlMFI.HasRedZone; - if (YamlMFI.StackSizeSVE) - setStackSizeSVE(*YamlMFI.StackSizeSVE); + if (YamlMFI.StackSizeZPR || YamlMFI.StackSizePPR) + setStackSizeSVE(YamlMFI.StackSizeZPR.value_or(0), + YamlMFI.StackSizePPR.value_or(0)); if (YamlMFI.HasStackFrame) setHasStackFrame(*YamlMFI.HasStackFrame); } diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h index 897c7e8539608..91e64e69de6d0 100644 --- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h +++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h @@ -74,13 +74,10 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// Amount of stack frame size, not including callee-saved registers. uint64_t LocalStackSize = 0; - /// The start and end frame indices for the SVE callee saves. - int MinSVECSFrameIndex = 0; - int MaxSVECSFrameIndex = 0; - /// Amount of stack frame size used for saving callee-saved registers. unsigned CalleeSavedStackSize = 0; - unsigned SVECalleeSavedStackSize = 0; + unsigned ZPRCalleeSavedStackSize = 0; + unsigned PPRCalleeSavedStackSize = 0; bool HasCalleeSavedStackSize = false; bool HasSVECalleeSavedStackSize = false; @@ -137,9 +134,14 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// SVE stack size (for predicates and data vectors) are maintained here /// rather than in FrameInfo, as the placement and Stack IDs are target /// specific. - uint64_t StackSizeSVE = 0; + uint64_t StackSizeZPR = 0; + uint64_t StackSizePPR = 0; + + /// Are SVE objects (vectors and predicates) split into separate regions on + /// the stack. + bool SplitSVEObjects = false; - /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid. + /// HasCalculatedStackSizeSVE indicates whether StackSizeZPR/PPR is valid. bool HasCalculatedStackSizeSVE = false; /// Has a value when it is known whether or not the function uses a @@ -312,16 +314,25 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { TailCallReservedStack = bytes; } - bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; } - - void setStackSizeSVE(uint64_t S) { + void setStackSizeSVE(uint64_t ZPR, uint64_t PPR) { + StackSizeZPR = ZPR; + StackSizePPR = PPR; HasCalculatedStackSizeSVE = true; - StackSizeSVE = S; } - uint64_t getStackSizeSVE() const { + uint64_t getStackSizeZPR() const { + assert(hasCalculatedStackSizeSVE()); + return StackSizeZPR; + } + uint64_t getStackSizePPR() const { assert(hasCalculatedStackSizeSVE()); - return StackSizeSVE; + return StackSizePPR; + } + + bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; } + + bool hasSVEStackSize() const { + return getStackSizeZPR() > 0 || getStackSizePPR() > 0; } bool hasStackFrame() const { return HasStackFrame; } @@ -329,7 +340,6 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { bool isStackRealigned() const { return StackRealigned; } void setStackRealigned(bool s) { StackRealigned = s; } - bool hasCalleeSaveStackFreeSpace() const { return CalleeSaveStackHasFreeSpace; } @@ -414,29 +424,37 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { } // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes' - void setSVECalleeSavedStackSize(unsigned Size) { - SVECalleeSavedStackSize = Size; + void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR) { + ZPRCalleeSavedStackSize = ZPR; + PPRCalleeSavedStackSize = PPR; HasSVECalleeSavedStackSize = true; } - unsigned getSVECalleeSavedStackSize() const { + unsigned getZPRCalleeSavedStackSize() const { assert(HasSVECalleeSavedStackSize && - "SVECalleeSavedStackSize has not been calculated"); - return SVECalleeSavedStackSize; + "ZPRCalleeSavedStackSize has not been calculated"); + return ZPRCalleeSavedStackSize; } - - void setMinMaxSVECSFrameIndex(int Min, int Max) { - MinSVECSFrameIndex = Min; - MaxSVECSFrameIndex = Max; + unsigned getPPRCalleeSavedStackSize() const { + assert(HasSVECalleeSavedStackSize && + "PPRCalleeSavedStackSize has not been calculated"); + return PPRCalleeSavedStackSize; } - int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; } - int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; } + unsigned getSVECalleeSavedStackSize() const { + assert(!hasSplitSVEObjects() && + "ZPRs and PPRs are split. Use get[ZPR|PPR]CalleeSavedStackSize()"); + return getZPRCalleeSavedStackSize() + getPPRCalleeSavedStackSize(); + } void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; } unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamicTLSAccesses; } + bool isStackHazardIncludedInCalleeSaveArea() const { + return hasStackHazardSlotIndex() && !hasSplitSVEObjects(); + } + std::optional hasRedZone() const { return HasRedZone; } void setHasRedZone(bool s) { HasRedZone = s; } @@ -472,6 +490,15 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { StackHazardCSRSlotIndex = Index; } + bool hasSplitSVEObjects() const { return SplitSVEObjects; } + void setSplitSVEObjects(bool s) { SplitSVEObjects = s; } + + bool hasSVE_AAPCS(const MachineFunction &MF) const { + return hasSplitSVEObjects() || isSVECC() || + MF.getFunction().getCallingConv() == + CallingConv::AArch64_SVE_VectorCall; + } + SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; } unsigned getSRetReturnReg() const { return SRetReturnReg; } @@ -611,7 +638,8 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { namespace yaml { struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo { std::optional HasRedZone; - std::optional StackSizeSVE; + std::optional StackSizeZPR; + std::optional StackSizePPR; std::optional HasStackFrame; AArch64FunctionInfo() = default; @@ -624,7 +652,8 @@ struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo { template <> struct MappingTraits { static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) { YamlIO.mapOptional("hasRedZone", MFI.HasRedZone); - YamlIO.mapOptional("stackSizeSVE", MFI.StackSizeSVE); + YamlIO.mapOptional("stackSizeZPR", MFI.StackSizeZPR); + YamlIO.mapOptional("stackSizePPR", MFI.StackSizePPR); YamlIO.mapOptional("hasStackFrame", MFI.HasStackFrame); } }; diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp index 7947469b6c04f..aed137c654d15 100644 --- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp +++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp @@ -48,21 +48,19 @@ bool AArch64PrologueEpilogueCommon::isVGInstruction( return Opc == TargetOpcode::COPY; } -// Convenience function to determine whether I is an SVE callee save. -static bool isSVECalleeSave(MachineBasicBlock::iterator I) { +// Convenience function to determine whether I is part of the ZPR callee saves. +static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I) { switch (I->getOpcode()) { default: return false; - case AArch64::PTRUE_C_B: case AArch64::LD1B_2Z_IMM: case AArch64::ST1B_2Z_IMM: case AArch64::STR_ZXI: - case AArch64::STR_PXI: case AArch64::LDR_ZXI: - case AArch64::LDR_PXI: - case AArch64::PTRUE_B: case AArch64::CPY_ZPzI_B: case AArch64::CMPNE_PPzZI_B: + case AArch64::PTRUE_C_B: + case AArch64::PTRUE_B: return I->getFlag(MachineInstr::FrameSetup) || I->getFlag(MachineInstr::FrameDestroy); case AArch64::SEH_SavePReg: @@ -71,6 +69,23 @@ static bool isSVECalleeSave(MachineBasicBlock::iterator I) { } } +// Convenience function to determine whether I is part of the PPR callee saves. +static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I) { + switch (I->getOpcode()) { + default: + return false; + case AArch64::STR_PXI: + case AArch64::LDR_PXI: + return I->getFlag(MachineInstr::FrameSetup) || + I->getFlag(MachineInstr::FrameDestroy); + } +} + +// Convenience function to determine whether I is part of the SVE callee saves. +static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I) { + return isPartOfZPRCalleeSaves(I) || isPartOfPPRCalleeSaves(I); +} + AArch64PrologueEpilogueCommon::AArch64PrologueEpilogueCommon( MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL) @@ -316,7 +331,7 @@ bool AArch64PrologueEpilogueCommon::shouldCombineCSRLocalStackBump( // When there is an SVE area on the stack, always allocate the // callee-saves and spills/locals separately. - if (AFL.getSVEStackSize(MF)) + if (AFI->hasSVEStackSize()) return false; return true; @@ -541,6 +556,13 @@ void AArch64PrologueEmitter::emitPrologue() { // to determine the end of the prologue. DebugLoc DL; + // In some cases, particularly with CallingConv::SwiftTail, it is possible to + // have a tail-call where the caller only needs to adjust the stack pointer in + // the epilogue. In this case, we still need to emit a SEH prologue sequence. + // See `seh-minimal-prologue-epilogue.ll` test cases. + if (AFI->getArgumentStackToRestore()) + HasWinCFI = true; + if (AFI->shouldSignReturnAddress(MF)) { // If pac-ret+leaf is in effect, PAUTH_PROLOGUE pseudo instructions // are inserted by emitPacRetPlusLeafHardening(). @@ -632,7 +654,7 @@ void AArch64PrologueEmitter::emitPrologue() { // Now allocate space for the GPR callee saves. MachineBasicBlock::iterator MBBI = PrologueBeginI; - while (MBBI != EndI && isSVECalleeSave(MBBI)) + while (MBBI != EndI && isPartOfSVECalleeSaves(MBBI)) ++MBBI; FirstGPRSaveI = convertCalleeSaveRestoreToSPPrePostIncDec( MBBI, DL, -AFI->getCalleeSavedStackSize(), EmitAsyncCFI); @@ -662,7 +684,7 @@ void AArch64PrologueEmitter::emitPrologue() { MachineBasicBlock::iterator AfterGPRSavesI = FirstGPRSaveI; while (AfterGPRSavesI != EndI && AfterGPRSavesI->getFlag(MachineInstr::FrameSetup) && - !isSVECalleeSave(AfterGPRSavesI)) { + !isPartOfSVECalleeSaves(AfterGPRSavesI)) { if (CombineSPBump && // Only fix-up frame-setup load/store instructions. (!AFL.requiresSaveVG(MF) || !isVGInstruction(AfterGPRSavesI, TLI))) @@ -693,56 +715,105 @@ void AArch64PrologueEmitter::emitPrologue() { if (AFL.windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) emitWindowsStackProbe(AfterGPRSavesI, DL, NumBytes, RealignmentPadding); - StackOffset SVEStackSize = AFL.getSVEStackSize(MF); - StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize; - MachineBasicBlock::iterator CalleeSavesEnd = AfterGPRSavesI; + StackOffset PPRCalleeSavesSize = + StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize()); + StackOffset ZPRCalleeSavesSize = + StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize()); + StackOffset SVECalleeSavesSize = PPRCalleeSavesSize + ZPRCalleeSavesSize; + StackOffset PPRLocalsSize = AFL.getPPRStackSize(MF) - PPRCalleeSavesSize; + StackOffset ZPRLocalsSize = AFL.getZPRStackSize(MF) - ZPRCalleeSavesSize; + + std::optional ZPRCalleeSavesBegin, + ZPRCalleeSavesEnd, PPRCalleeSavesBegin, PPRCalleeSavesEnd; StackOffset CFAOffset = StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes); - - // Process the SVE callee-saves to determine what space needs to be - // allocated. MachineBasicBlock::iterator AfterSVESavesI = AfterGPRSavesI; - if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { - LLVM_DEBUG(dbgs() << "SVECalleeSavedStackSize = " << CalleeSavedSize - << "\n"); - SVECalleeSavesSize = StackOffset::getScalable(CalleeSavedSize); - SVELocalsSize = SVEStackSize - SVECalleeSavesSize; - // Find callee save instructions in frame. - // Note: With FPAfterSVECalleeSaves the callee saves have already been - // allocated. - if (!FPAfterSVECalleeSaves) { - MachineBasicBlock::iterator CalleeSavesBegin = AfterGPRSavesI; - assert(isSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction"); - while (isSVECalleeSave(AfterSVESavesI) && + if (!FPAfterSVECalleeSaves) { + // Process the SVE callee-saves to find the starts/ends of the ZPR and PPR + // areas. + PPRCalleeSavesBegin = AfterGPRSavesI; + if (PPRCalleeSavesSize) { + LLVM_DEBUG(dbgs() << "PPRCalleeSavedStackSize = " + << PPRCalleeSavesSize.getScalable() << "\n"); + + assert(isPartOfPPRCalleeSaves(*PPRCalleeSavesBegin) && + "Unexpected instruction"); + while (isPartOfPPRCalleeSaves(AfterSVESavesI) && AfterSVESavesI != MBB.getFirstTerminator()) ++AfterSVESavesI; - CalleeSavesEnd = AfterSVESavesI; - - StackOffset LocalsSize = SVELocalsSize + StackOffset::getFixed(NumBytes); - // Allocate space for the callee saves (if any). - allocateStackSpace(CalleeSavesBegin, 0, SVECalleeSavesSize, - EmitAsyncCFI && !HasFP, CFAOffset, - MFI.hasVarSizedObjects() || LocalsSize); } + PPRCalleeSavesEnd = ZPRCalleeSavesBegin = AfterSVESavesI; + if (ZPRCalleeSavesSize) { + LLVM_DEBUG(dbgs() << "ZPRCalleeSavedStackSize = " + << ZPRCalleeSavesSize.getScalable() << "\n"); + assert(isPartOfZPRCalleeSaves(*ZPRCalleeSavesBegin) && + "Unexpected instruction"); + while (isPartOfZPRCalleeSaves(AfterSVESavesI) && + AfterSVESavesI != MBB.getFirstTerminator()) + ++AfterSVESavesI; + } + ZPRCalleeSavesEnd = AfterSVESavesI; } - CFAOffset += SVECalleeSavesSize; if (EmitAsyncCFI) - emitCalleeSavedSVELocations(CalleeSavesEnd); - - // Allocate space for the rest of the frame including SVE locals. Align the - // stack as necessary. - assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) && - "Cannot use redzone with stack realignment"); - if (!AFL.canUseRedZone(MF)) { - // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have - // the correct value here, as NumBytes also includes padding bytes, - // which shouldn't be counted here. - allocateStackSpace(CalleeSavesEnd, RealignmentPadding, - SVELocalsSize + StackOffset::getFixed(NumBytes), + emitCalleeSavedSVELocations(AfterSVESavesI); + + if (AFI->hasSplitSVEObjects()) { + assert(!FPAfterSVECalleeSaves && + "Cannot use FPAfterSVECalleeSaves with aarch64-split-sve-objects"); + assert(!AFL.canUseRedZone(MF) && + "Cannot use redzone with aarch64-split-sve-objects"); + // TODO: Handle HasWinCFI/NeedsWinCFI? + assert(!NeedsWinCFI && + "WinCFI with aarch64-split-sve-objects is not supported"); + + // Split ZPR and PPR allocation. + // Allocate PPR callee saves + allocateStackSpace(*PPRCalleeSavesBegin, 0, PPRCalleeSavesSize, + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects() || ZPRCalleeSavesSize || + ZPRLocalsSize || PPRLocalsSize); + CFAOffset += PPRCalleeSavesSize; + + // Allocate PPR locals + ZPR callee saves + assert(PPRCalleeSavesEnd == ZPRCalleeSavesBegin && + "Expected ZPR callee saves after PPR locals"); + allocateStackSpace(*PPRCalleeSavesEnd, RealignmentPadding, + PPRLocalsSize + ZPRCalleeSavesSize, + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects() || ZPRLocalsSize); + CFAOffset += PPRLocalsSize + ZPRCalleeSavesSize; + + // Allocate ZPR locals + allocateStackSpace(*ZPRCalleeSavesEnd, RealignmentPadding, + ZPRLocalsSize + StackOffset::getFixed(NumBytes), EmitAsyncCFI && !HasFP, CFAOffset, MFI.hasVarSizedObjects()); + } else { + // Allocate space for the callee saves (if any). + StackOffset LocalsSize = + PPRLocalsSize + ZPRLocalsSize + StackOffset::getFixed(NumBytes); + if (!FPAfterSVECalleeSaves) + allocateStackSpace(AfterGPRSavesI, 0, SVECalleeSavesSize, + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects() || LocalsSize); + CFAOffset += SVECalleeSavesSize; + + // Allocate space for the rest of the frame including SVE locals. Align the + // stack as necessary. + assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) && + "Cannot use redzone with stack realignment"); + if (!AFL.canUseRedZone(MF)) { + // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have + // the correct value here, as NumBytes also includes padding bytes, + // which shouldn't be counted here. + StackOffset SVELocalsSize = PPRLocalsSize + ZPRLocalsSize; + allocateStackSpace(AfterSVESavesI, RealignmentPadding, + SVELocalsSize + StackOffset::getFixed(NumBytes), + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects()); + } } // If we need a base pointer, set it up here. It's whatever the value of the @@ -789,7 +860,8 @@ void AArch64PrologueEmitter::emitPrologue() { emitDefineCFAWithFP(AfterSVESavesI, FixedObject); } else { StackOffset TotalSize = - SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize()); + AFL.getSVEStackSize(MF) + + StackOffset::getFixed((int64_t)MFI.getStackSize()); CFIInstBuilder CFIBuilder(MBB, AfterSVESavesI, MachineInstr::FrameSetup); CFIBuilder.insertCFIInst( createDefCFA(RegInfo, /*FrameReg=*/AArch64::SP, /*Reg=*/AArch64::SP, @@ -1158,7 +1230,7 @@ void AArch64PrologueEmitter::emitCalleeSavedGPRLocations( CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup); for (const auto &Info : CSI) { unsigned FrameIdx = Info.getFrameIdx(); - if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) + if (MFI.hasScalableStackID(FrameIdx)) continue; assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); @@ -1184,8 +1256,10 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations( AFL.getOffsetOfLocalArea(); } + StackOffset PPRStackSize = AFL.getPPRStackSize(MF); for (const auto &Info : CSI) { - if (MFI.getStackID(Info.getFrameIdx()) != TargetStackID::ScalableVector) + int FI = Info.getFrameIdx(); + if (!MFI.hasScalableStackID(FI)) continue; // Not all unwinders may know about SVE registers, so assume the lowest @@ -1196,9 +1270,13 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations( continue; StackOffset Offset = - StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - + StackOffset::getScalable(MFI.getObjectOffset(FI)) - StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI)); + if (AFI->hasSplitSVEObjects() && + MFI.getStackID(FI) == TargetStackID::ScalableVector) + Offset -= PPRStackSize; + CFIBuilder.insertCFIInst( createCFAOffset(RegInfo, Reg, Offset, IncomingVGOffsetFromDefCFA)); } @@ -1315,7 +1393,7 @@ void AArch64EpilogueEmitter::emitEpilogue() { while (FirstGPRRestoreI != Begin) { --FirstGPRRestoreI; if (!FirstGPRRestoreI->getFlag(MachineInstr::FrameDestroy) || - (!FPAfterSVECalleeSaves && isSVECalleeSave(FirstGPRRestoreI))) { + (!FPAfterSVECalleeSaves && isPartOfSVECalleeSaves(FirstGPRRestoreI))) { ++FirstGPRRestoreI; break; } else if (CombineSPBump) @@ -1339,7 +1417,9 @@ void AArch64EpilogueEmitter::emitEpilogue() { if (HasFP && AFI->hasSwiftAsyncContext()) emitSwiftAsyncContextFramePointer(EpilogueEndI, DL); - const StackOffset &SVEStackSize = AFL.getSVEStackSize(MF); + StackOffset ZPRStackSize = AFL.getZPRStackSize(MF); + StackOffset PPRStackSize = AFL.getPPRStackSize(MF); + StackOffset SVEStackSize = ZPRStackSize + PPRStackSize; // If there is a single SP update, insert it before the ret and we're done. if (CombineSPBump) { @@ -1360,106 +1440,188 @@ void AArch64EpilogueEmitter::emitEpilogue() { NumBytes -= PrologueSaveSize; assert(NumBytes >= 0 && "Negative stack allocation size!?"); - // Process the SVE callee-saves to determine what space needs to be - // deallocated. - StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; - MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI, - RestoreEnd = FirstGPRRestoreI; - if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { - if (FPAfterSVECalleeSaves) - RestoreEnd = MBB.getFirstTerminator(); - - RestoreBegin = std::prev(RestoreEnd); - while (RestoreBegin != MBB.begin() && - isSVECalleeSave(std::prev(RestoreBegin))) - --RestoreBegin; - - assert(isSVECalleeSave(RestoreBegin) && - isSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction"); - - StackOffset CalleeSavedSizeAsOffset = - StackOffset::getScalable(CalleeSavedSize); - DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; - DeallocateAfter = CalleeSavedSizeAsOffset; - } - - // Deallocate the SVE area. - if (FPAfterSVECalleeSaves) { - // If the callee-save area is before FP, restoring the FP implicitly - // deallocates non-callee-save SVE allocations. Otherwise, deallocate - // them explicitly. - if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) { - emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP, - DeallocateBefore, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI); + if (!AFI->hasSplitSVEObjects()) { + // Process the SVE callee-saves to determine what space needs to be + // deallocated. + StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; + MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI, + RestoreEnd = FirstGPRRestoreI; + int64_t ZPRCalleeSavedSize = AFI->getZPRCalleeSavedStackSize(); + int64_t PPRCalleeSavedSize = AFI->getPPRCalleeSavedStackSize(); + int64_t SVECalleeSavedSize = ZPRCalleeSavedSize + PPRCalleeSavedSize; + + if (SVECalleeSavedSize) { + if (FPAfterSVECalleeSaves) + RestoreEnd = MBB.getFirstTerminator(); + + RestoreBegin = std::prev(RestoreEnd); + while (RestoreBegin != MBB.begin() && + isPartOfSVECalleeSaves(std::prev(RestoreBegin))) + --RestoreBegin; + + assert(isPartOfSVECalleeSaves(RestoreBegin) && + isPartOfSVECalleeSaves(std::prev(RestoreEnd)) && + "Unexpected instruction"); + + StackOffset CalleeSavedSizeAsOffset = + StackOffset::getScalable(SVECalleeSavedSize); + DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; + DeallocateAfter = CalleeSavedSizeAsOffset; } - // Deallocate callee-save non-SVE registers. - emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, - StackOffset::getFixed(AFI->getCalleeSavedStackSize()), TII, - MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); - - // Deallocate fixed objects. - emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, - StackOffset::getFixed(FixedObject), TII, - MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); - - // Deallocate callee-save SVE registers. - emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, - DeallocateAfter, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI); - } else if (SVEStackSize) { - int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize(); - // If we have stack realignment or variable-sized objects we must use the - // FP to restore SVE callee saves (as there is an unknown amount of - // data/padding between the SP and SVE CS area). - Register BaseForSVEDealloc = - (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP - : AArch64::SP; - if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) { - Register CalleeSaveBase = AArch64::FP; - if (int64_t CalleeSaveBaseOffset = - AFI->getCalleeSaveBaseToFrameRecordOffset()) { - // If we have have an non-zero offset to the non-SVE CS base we need to - // compute the base address by subtracting the offest in a temporary - // register first (to avoid briefly deallocating the SVE CS). - CalleeSaveBase = - MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); - emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP, - StackOffset::getFixed(-CalleeSaveBaseOffset), TII, - MachineInstr::FrameDestroy); - } - // The code below will deallocate the stack space space by moving the - // SP to the start of the SVE callee-save area. - emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase, - StackOffset::getScalable(-SVECalleeSavedSize), TII, - MachineInstr::FrameDestroy); - } else if (BaseForSVEDealloc == AArch64::SP) { - if (SVECalleeSavedSize) { - // Deallocate the non-SVE locals first before we can deallocate (and - // restore callee saves) from the SVE area. - emitFrameOffset( - MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, - StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy, - false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, - SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize)); - NumBytes = 0; + // Deallocate the SVE area. + if (FPAfterSVECalleeSaves) { + // If the callee-save area is before FP, restoring the FP implicitly + // deallocates non-callee-save SVE allocations. Otherwise, deallocate + // them explicitly. + if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) { + emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP, + DeallocateBefore, TII, MachineInstr::FrameDestroy, + false, NeedsWinCFI, &HasWinCFI); } + // Deallocate callee-save non-SVE registers. emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, - DeallocateBefore, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, - SVEStackSize + - StackOffset::getFixed(NumBytes + PrologueSaveSize)); + StackOffset::getFixed(AFI->getCalleeSavedStackSize()), + TII, MachineInstr::FrameDestroy, false, NeedsWinCFI, + &HasWinCFI); + // Deallocate fixed objects. + emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, + StackOffset::getFixed(FixedObject), TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI, + &HasWinCFI); + + // Deallocate callee-save SVE registers. emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, DeallocateAfter, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, - DeallocateAfter + - StackOffset::getFixed(NumBytes + PrologueSaveSize)); + NeedsWinCFI, &HasWinCFI); + } else if (SVEStackSize) { + int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize(); + // If we have stack realignment or variable-sized objects we must use the + // FP to restore SVE callee saves (as there is an unknown amount of + // data/padding between the SP and SVE CS area). + Register BaseForSVEDealloc = + (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP + : AArch64::SP; + if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) { + Register CalleeSaveBase = AArch64::FP; + if (int64_t CalleeSaveBaseOffset = + AFI->getCalleeSaveBaseToFrameRecordOffset()) { + // If we have have an non-zero offset to the non-SVE CS base we need + // to compute the base address by subtracting the offest in a + // temporary register first (to avoid briefly deallocating the SVE + // CS). + CalleeSaveBase = MBB.getParent()->getRegInfo().createVirtualRegister( + &AArch64::GPR64RegClass); + emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP, + StackOffset::getFixed(-CalleeSaveBaseOffset), TII, + MachineInstr::FrameDestroy); + } + // The code below will deallocate the stack space space by moving the + // SP to the start of the SVE callee-save area. + emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase, + StackOffset::getScalable(-SVECalleeSavedSize), TII, + MachineInstr::FrameDestroy); + } else if (BaseForSVEDealloc == AArch64::SP) { + if (SVECalleeSavedSize) { + // Deallocate the non-SVE locals first before we can deallocate (and + // restore callee saves) from the SVE area. + emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, + StackOffset::getFixed(NumBytes), TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI, + &HasWinCFI, EmitCFI && !HasFP, + SVEStackSize + StackOffset::getFixed( + NumBytes + PrologueSaveSize)); + NumBytes = 0; + } + + emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, + DeallocateBefore, TII, MachineInstr::FrameDestroy, + false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, + SVEStackSize + + StackOffset::getFixed(NumBytes + PrologueSaveSize)); + + emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, + DeallocateAfter, TII, MachineInstr::FrameDestroy, false, + NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, + DeallocateAfter + + StackOffset::getFixed(NumBytes + PrologueSaveSize)); + } + + if (EmitCFI) + emitCalleeSavedSVERestores(RestoreEnd); + } + } else if (AFI->hasSplitSVEObjects() && SVEStackSize) { + // TODO: Support stack realigment and variable-sized objects. + assert(!AFI->isStackRealigned() && !MFI.hasVarSizedObjects() && + "unexpected stack realignment or variable sized objects with split " + "SVE stack objects"); + // SplitSVEObjects. Determine the sizes and starts/ends of the ZPR and PPR + // areas. + auto ZPRCalleeSavedSize = + StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize()); + auto PPRCalleeSavedSize = + StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize()); + StackOffset PPRLocalsSize = PPRStackSize - PPRCalleeSavedSize; + StackOffset ZPRLocalsSize = ZPRStackSize - ZPRCalleeSavedSize; + + MachineBasicBlock::iterator PPRRestoreBegin = FirstGPRRestoreI, + PPRRestoreEnd = FirstGPRRestoreI; + if (PPRCalleeSavedSize) { + PPRRestoreBegin = std::prev(PPRRestoreEnd); + while (PPRRestoreBegin != MBB.begin() && + isPartOfPPRCalleeSaves(std::prev(PPRRestoreBegin))) + --PPRRestoreBegin; + } + + MachineBasicBlock::iterator ZPRRestoreBegin = PPRRestoreBegin, + ZPRRestoreEnd = PPRRestoreBegin; + if (ZPRCalleeSavedSize) { + ZPRRestoreBegin = std::prev(ZPRRestoreEnd); + while (ZPRRestoreBegin != MBB.begin() && + isPartOfZPRCalleeSaves(std::prev(ZPRRestoreBegin))) + --ZPRRestoreBegin; } + + auto CFAOffset = + SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize); + if (PPRCalleeSavedSize || ZPRCalleeSavedSize) { + // Deallocate the non-SVE locals first before we can deallocate (and + // restore callee saves) from the SVE area. + auto NonSVELocals = StackOffset::getFixed(NumBytes); + emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP, + NonSVELocals, TII, MachineInstr::FrameDestroy, false, + false, nullptr, EmitCFI && !HasFP, CFAOffset); + NumBytes = 0; + CFAOffset -= NonSVELocals; + } + + if (ZPRLocalsSize) { + emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP, + ZPRLocalsSize, TII, MachineInstr::FrameDestroy, false, + false, nullptr, EmitCFI && !HasFP, CFAOffset); + CFAOffset -= ZPRLocalsSize; + } + + if (PPRLocalsSize || ZPRCalleeSavedSize) { + assert(PPRRestoreBegin == ZPRRestoreEnd && + "Expected PPR restores after ZPR"); + emitFrameOffset(MBB, PPRRestoreBegin, DL, AArch64::SP, AArch64::SP, + PPRLocalsSize + ZPRCalleeSavedSize, TII, + MachineInstr::FrameDestroy, false, false, nullptr, + EmitCFI && !HasFP, CFAOffset); + CFAOffset -= PPRLocalsSize + ZPRCalleeSavedSize; + } + if (PPRCalleeSavedSize) { + emitFrameOffset(MBB, PPRRestoreEnd, DL, AArch64::SP, AArch64::SP, + PPRCalleeSavedSize, TII, MachineInstr::FrameDestroy, + false, false, nullptr, EmitCFI && !HasFP, CFAOffset); + } + + // We only emit CFI information for ZPRs so emit CFI after the ZPR restores. if (EmitCFI) - emitCalleeSavedSVERestores(RestoreEnd); + emitCalleeSavedSVERestores(ZPRRestoreEnd); } if (!HasFP) { @@ -1617,8 +1779,7 @@ void AArch64EpilogueEmitter::emitCalleeSavedRestores( CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy); for (const auto &Info : CSI) { - if (SVE != - (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) + if (SVE != MFI.hasScalableStackID(Info.getFrameIdx())) continue; MCRegister Reg = Info.getReg(); diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp index 2b0c8ad0578bc..79975b0256328 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -71,6 +71,7 @@ bool AArch64RegisterInfo::regNeedsCFI(MCRegister Reg, const MCPhysReg * AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); + auto &AFI = *MF->getInfo(); if (MF->getFunction().getCallingConv() == CallingConv::GHC) // GHC set of callee saved regs is empty as all those regs are @@ -101,10 +102,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList; if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall) return CSR_Win_AArch64_AAVPCS_SaveList; - if (MF->getFunction().getCallingConv() == - CallingConv::AArch64_SVE_VectorCall) - return CSR_Win_AArch64_SVE_AAPCS_SaveList; - if (MF->getInfo()->isSVECC()) + if (AFI.hasSVE_AAPCS(*MF)) return CSR_Win_AArch64_SVE_AAPCS_SaveList; return CSR_Win_AArch64_AAPCS_SaveList; } @@ -148,7 +146,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { // This is for OSes other than Windows; Windows is a separate case further // above. return CSR_AArch64_AAPCS_X18_SaveList; - if (MF->getInfo()->isSVECC()) + if (AFI.hasSVE_AAPCS(*MF)) return CSR_AArch64_SVE_AAPCS_SaveList; return CSR_AArch64_AAPCS_SaveList; } @@ -158,6 +156,7 @@ AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); assert(MF->getSubtarget().isTargetDarwin() && "Invalid subtarget for getDarwinCalleeSavedRegs"); + auto &AFI = *MF->getInfo(); if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check) report_fatal_error( @@ -205,7 +204,7 @@ AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const { return CSR_Darwin_AArch64_RT_AllRegs_SaveList; if (MF->getFunction().getCallingConv() == CallingConv::Win64) return CSR_Darwin_AArch64_AAPCS_Win64_SaveList; - if (MF->getInfo()->isSVECC()) + if (AFI.hasSVE_AAPCS(*MF)) return CSR_Darwin_AArch64_SVE_AAPCS_SaveList; return CSR_Darwin_AArch64_AAPCS_SaveList; } @@ -643,7 +642,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const { if (ST.hasSVE() || ST.isStreaming()) { // Frames that have variable sized objects and scalable SVE objects, // should always use a basepointer. - if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE()) + if (!AFI->hasCalculatedStackSizeSVE() || AFI->hasSVEStackSize()) return true; } @@ -783,7 +782,7 @@ AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { assert((!MF.getSubtarget().hasSVE() || AFI->hasCalculatedStackSizeSVE()) && "Expected SVE area to be calculated by this point"); - return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE() && + return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->hasSVEStackSize() && !AFI->hasStackHazardSlotIndex(); } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 7fe4f7acdbd49..36c9cb6c1d94f 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -707,16 +707,14 @@ let Predicates = [HasSVE_or_SME] in { defm SDOT_ZZZ : sve_intx_dot<0b0, "sdot", AArch64sdot>; defm UDOT_ZZZ : sve_intx_dot<0b1, "udot", AArch64udot>; - let Predicates = [HasSVE_or_SME] in { - def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), - (UDOT_ZZZ_S $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), - (SDOT_ZZZ_S $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (UDOT_ZZZ_D $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (SDOT_ZZZ_D $Acc, $MulLHS, $MulRHS)>; - } // End HasSVE_or_SME + def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), + (UDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>; + def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)), + (SDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>; + def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (UDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>; + def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (SDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>; defm SDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b0, "sdot", int_aarch64_sve_sdot_lane>; defm UDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b1, "udot", int_aarch64_sve_udot_lane>; @@ -2541,7 +2539,7 @@ let Predicates = [HasBF16, HasSVE_or_SME] in { } // End HasBF16, HasSVE_or_SME let Predicates = [HasBF16, HasSVE] in { - defm BFMMLA_ZZZ : sve_fp_matrix_mla<0b01, "bfmmla", ZPR32, ZPR16, int_aarch64_sve_bfmmla, nxv4f32, nxv8bf16>; + defm BFMMLA_ZZZ_HtoS : sve_fp_matrix_mla<0b01, "bfmmla", ZPR32, ZPR16, int_aarch64_sve_bfmmla, nxv4f32, nxv8bf16>; } // End HasBF16, HasSVE let Predicates = [HasBF16, HasSVE_or_SME] in { @@ -3646,6 +3644,9 @@ let Predicates = [HasSVE_or_SME, HasMatMulInt8] in { defm USDOT_ZZZ : sve_int_dot_mixed<"usdot", AArch64usdot>; defm USDOT_ZZZI : sve_int_dot_mixed_indexed<0, "usdot", int_aarch64_sve_usdot_lane>; defm SUDOT_ZZZI : sve_int_dot_mixed_indexed<1, "sudot", int_aarch64_sve_sudot_lane>; + + def : Pat<(nxv4i32 (partial_reduce_sumla nxv4i32:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), + (USDOT_ZZZ $Acc, $RHS, $LHS)>; } // End HasSVE_or_SME, HasMatMulInt8 let Predicates = [HasSVE, HasMatMulFP32] in { @@ -3752,6 +3753,19 @@ let Predicates = [HasSVE2_or_SME] in { defm UMLSLB_ZZZ : sve2_int_mla_long<0b10110, "umlslb", int_aarch64_sve_umlslb>; defm UMLSLT_ZZZ : sve2_int_mla_long<0b10111, "umlslt", int_aarch64_sve_umlslt>; + def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), + (UMLALT_ZZZ_D (UMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), + (SMLALT_ZZZ_D (SMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), + (UMLALT_ZZZ_S (UMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), + (SMLALT_ZZZ_S (SMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv8i16 (partial_reduce_umla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), + (UMLALT_ZZZ_H (UMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; + def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), + (SMLALT_ZZZ_H (SMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; + // SVE2 saturating multiply-add long (indexed) defm SQDMLALB_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0100, "sqdmlalb", int_aarch64_sve_sqdmlalb_lane>; defm SQDMLALT_ZZZI : sve2_int_mla_long_by_indexed_elem<0b0101, "sqdmlalt", int_aarch64_sve_sqdmlalt_lane>; @@ -3880,19 +3894,6 @@ let Predicates = [HasSVE2_or_SME] in { def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$Input, (nxv16i8 (splat_vector (i32 1))))), (SADDWT_ZZZ_H (SADDWB_ZZZ_H $Acc, $Input), $Input)>; - def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), - (UMLALT_ZZZ_D (UMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv4i32:$LHS, nxv4i32:$RHS)), - (SMLALT_ZZZ_D (SMLALB_ZZZ_D $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), - (UMLALT_ZZZ_S (UMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$LHS, nxv8i16:$RHS)), - (SMLALT_ZZZ_S (SMLALB_ZZZ_S $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv8i16 (partial_reduce_umla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), - (UMLALT_ZZZ_H (UMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; - def : Pat<(nxv8i16 (partial_reduce_smla nxv8i16:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), - (SMLALT_ZZZ_H (SMLALB_ZZZ_H $Acc, $LHS, $RHS), $LHS, $RHS)>; - // SVE2 integer multiply long defm SQDMULLB_ZZZ : sve2_wide_int_arith_long<0b11000, "sqdmullb", int_aarch64_sve_sqdmullb>; defm SQDMULLT_ZZZ : sve2_wide_int_arith_long<0b11001, "sqdmullt", int_aarch64_sve_sqdmullt>; @@ -4200,11 +4201,6 @@ let Predicates = [HasSVEAES2, HasNonStreamingSVE_or_SSVE_AES] in { def PMULL_2ZZZ_Q : sve_crypto_pmull_multi<"pmull">; } -let Predicates = [HasSVE_or_SME, HasMatMulInt8] in { - def : Pat<(nxv4i32 (partial_reduce_sumla nxv4i32:$Acc, nxv16i8:$LHS, nxv16i8:$RHS)), - (USDOT_ZZZ $Acc, $RHS, $LHS)>; - } // End HasSVE_or_SME, HasMatMulInt8 - //===----------------------------------------------------------------------===// // SME or SVE2.1 instructions //===----------------------------------------------------------------------===// @@ -4238,12 +4234,10 @@ defm UDOT_ZZZ_HtoS : sve2p1_two_way_dot_vv<"udot", 0b1, int_aarch64_sve_udot_x2 defm SDOT_ZZZI_HtoS : sve2p1_two_way_dot_vvi<"sdot", 0b0, int_aarch64_sve_sdot_lane_x2>; defm UDOT_ZZZI_HtoS : sve2p1_two_way_dot_vvi<"udot", 0b1, int_aarch64_sve_udot_lane_x2>; -let Predicates = [HasSVE2p1_or_SME2] in { - def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (UDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; - def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), - (SDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; -} // End HasSVE2p1_or_SME2 +def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (UDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; +def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)), + (SDOT_ZZZ_HtoS $Acc, $MulLHS, $MulRHS)>; defm SQCVTN_Z2Z_StoH : sve2p1_multi_vec_extract_narrow<"sqcvtn", 0b00, int_aarch64_sve_sqcvtn_x2>; defm UQCVTN_Z2Z_StoH : sve2p1_multi_vec_extract_narrow<"uqcvtn", 0b01, int_aarch64_sve_uqcvtn_x2>; diff --git a/llvm/lib/Target/AArch64/AArch64SchedA320.td b/llvm/lib/Target/AArch64/AArch64SchedA320.td index 5ec95c707c28f..2c193e59cc417 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedA320.td +++ b/llvm/lib/Target/AArch64/AArch64SchedA320.td @@ -826,13 +826,13 @@ def : InstRW<[CortexA320MCWrite<15, 12, CortexA320UnitVMC>], (instregex "^[SU]DI def : InstRW<[CortexA320MCWrite<26, 23, CortexA320UnitVMC>], (instregex "^[SU]DIVR?_(ZPmZ|ZPZZ)_D")>; // Dot product, 8 bit -def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_S")>; +def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_BtoS")>; // Dot product, 8 bit, using signed and unsigned integers def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; // Dot product, 16 bit -def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_D")>; +def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_HtoD")>; // Duplicate, immediate and indexed form def : InstRW<[CortexA320Write<3, CortexA320UnitVALU>], (instregex "^DUP_ZI_[BHSD]", @@ -1182,7 +1182,7 @@ def : InstRW<[CortexA320Write<4, CortexA320UnitVALU>], (instrs BFCVT_ZPmZ, BFCVT def : InstRW<[CortexA320Write_11cyc_1VMAC_1VALU], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[CortexA320Write_16cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ)>; +def : InstRW<[CortexA320Write_16cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ_HtoS)>; // Multiply accumulate long def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^BFMLAL[BT]_ZZZ(I)?")>; diff --git a/llvm/lib/Target/AArch64/AArch64SchedA510.td b/llvm/lib/Target/AArch64/AArch64SchedA510.td index 356e3fa39c53f..66f49f040ad12 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedA510.td +++ b/llvm/lib/Target/AArch64/AArch64SchedA510.td @@ -804,13 +804,13 @@ def : InstRW<[CortexA510MCWrite<15, 12, CortexA510UnitVMC>], (instregex "^[SU]DI def : InstRW<[CortexA510MCWrite<26, 23, CortexA510UnitVMC>], (instregex "^[SU]DIVR?_(ZPmZ|ZPZZ)_D")>; // Dot product, 8 bit -def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_S")>; +def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_BtoS")>; // Dot product, 8 bit, using signed and unsigned integers def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; // Dot product, 16 bit -def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_D")>; +def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_HtoD")>; // Duplicate, immediate and indexed form def : InstRW<[CortexA510Write<3, CortexA510UnitVALU>], (instregex "^DUP_ZI_[BHSD]", @@ -1160,7 +1160,7 @@ def : InstRW<[CortexA510Write<4, CortexA510UnitVALU>], (instrs BFCVT_ZPmZ, BFCVT def : InstRW<[A510Write_10cyc_1VMAC_1VALU], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[A510Write_15cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ)>; +def : InstRW<[A510Write_15cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ_HtoS)>; // Multiply accumulate long def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^BFMLAL[BT]_ZZZ(I)?")>; diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td index e7982226ff3d1..50f10114989d0 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td @@ -1764,13 +1764,13 @@ def : InstRW<[N2Write_20c_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", "^[SU]DIV_ZPZZ_D")>; // Dot product, 8 bit -def : InstRW<[N2Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_S$")>; +def : InstRW<[N2Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_BtoS$")>; // Dot product, 8 bit, using signed and unsigned integers def : InstRW<[N2Write_3c_1V], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; // Dot product, 16 bit -def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_HtoD$")>; // Duplicate, immediate and indexed form def : InstRW<[N2Write_2c_1V], (instregex "^DUP_ZI_[BHSD]$", @@ -2118,7 +2118,7 @@ def : InstRW<[N2Write_3c_1V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; def : InstRW<[N2Write_4c_1V], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[N2Write_5c_1V], (instrs BFMMLA_ZZZ)>; +def : InstRW<[N2Write_5c_1V], (instrs BFMMLA_ZZZ_HtoS)>; // Multiply accumulate long def : InstRW<[N2Write_4c_1V], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>; diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td index cd0d8a9186d5b..411b372a3f533 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td @@ -1736,13 +1736,13 @@ def : InstRW<[N3Write_16c_16V0], (instregex "^[SU]DIVR?_ZPmZ_D", "^[SU]DIV_ZPZZ_D")>; // Dot product, 8 bit -def : InstRW<[N3Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_S$")>; +def : InstRW<[N3Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_BtoS$")>; // Dot product, 8 bit, using signed and unsigned integers def : InstRW<[N3Write_3c_1V], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; // Dot product, 16 bit -def : InstRW<[N3Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>; +def : InstRW<[N3Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_HtoD$")>; // Duplicate, immediate and indexed form def : InstRW<[N3Write_2c_1V], (instregex "^DUP_ZI_[BHSD]$", @@ -2082,7 +2082,7 @@ def : InstRW<[N3Write_4c_2V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; def : InstRW<[N3Write_4c_1V], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[N3Write_5c_1V], (instrs BFMMLA_ZZZ)>; +def : InstRW<[N3Write_5c_1V], (instrs BFMMLA_ZZZ_HtoS)>; // Multiply accumulate long def : InstRW<[N3Write_4c_1V], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>; diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td index f28df44bfdb38..3cbfc59423c9a 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td @@ -1555,14 +1555,14 @@ def : InstRW<[V1Write_20c7_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", "^[SU]DIV_ZPZZ_D")>; // Dot product, 8 bit -def : InstRW<[V1Wr_ZDOTB, V1Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_S$")>; +def : InstRW<[V1Wr_ZDOTB, V1Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_BtoS$")>; // Dot product, 8 bit, using signed and unsigned integers def : InstRW<[V1Wr_ZUDOTB, V1Rd_ZUDOTB], (instrs SUDOT_ZZZI, USDOT_ZZZ, USDOT_ZZZI)>; // Dot product, 16 bit -def : InstRW<[V1Wr_ZDOTH, V1Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_D$")>; +def : InstRW<[V1Wr_ZDOTH, V1Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_HtoD$")>; // Duplicate, immediate and indexed form def : InstRW<[V1Write_2c_1V01], (instregex "^DUP_ZI_[BHSD]$", @@ -1808,7 +1808,7 @@ def : InstRW<[V1Write_4c_1V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; def : InstRW<[V1Wr_ZBFDOT, V1Rd_ZBFDOT], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[V1Wr_ZBFMMA, V1Rd_ZBFMMA], (instrs BFMMLA_ZZZ)>; +def : InstRW<[V1Wr_ZBFMMA, V1Rd_ZBFMMA], (instrs BFMMLA_ZZZ_HtoS)>; // Multiply accumulate long def : InstRW<[V1Wr_ZBFMAL, V1Rd_ZBFMAL], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>; diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td index 6261220082029..bdde8e388cccc 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td @@ -2251,13 +2251,13 @@ def : InstRW<[V2Write_20c_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", "^[SU]DIV_ZPZZ_D")>; // Dot product, 8 bit -def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_S")>; +def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_BtoS")>; // Dot product, 8 bit, using signed and unsigned integers def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; // Dot product, 16 bit -def : InstRW<[V2Wr_ZDOTH, V2Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_D")>; +def : InstRW<[V2Wr_ZDOTH, V2Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_HtoD")>; // Duplicate, immediate and indexed form def : InstRW<[V2Write_2c_1V], (instregex "^DUP_ZI_[BHSD]", @@ -2614,7 +2614,7 @@ def : InstRW<[V2Write_4c_1V02], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; def : InstRW<[V2Wr_ZBFDOT, V2Rd_ZBFDOT], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[V2Wr_ZBFMMA, V2Rd_ZBFMMA], (instrs BFMMLA_ZZZ)>; +def : InstRW<[V2Wr_ZBFMMA, V2Rd_ZBFMMA], (instrs BFMMLA_ZZZ_HtoS)>; // Multiply accumulate long def : InstRW<[V2Wr_ZBFMAL, V2Rd_ZBFMAL], (instregex "^BFMLAL[BT]_ZZZI?")>; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp index 5748556d07285..96cc3f3cac91c 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -2914,10 +2914,10 @@ bool AArch64InstructionSelector::select(MachineInstr &I) { } if (OpFlags & AArch64II::MO_GOT) { - I.setDesc(TII.get(MF.getInfo()->hasELFSignedGOT() - ? AArch64::LOADgotAUTH - : AArch64::LOADgot)); + bool IsGOTSigned = MF.getInfo()->hasELFSignedGOT(); + I.setDesc(TII.get(IsGOTSigned ? AArch64::LOADgotAUTH : AArch64::LOADgot)); I.getOperand(1).setTargetFlags(OpFlags); + I.addImplicitDefUseOperands(MF); } else if (TM.getCodeModel() == CodeModel::Large && !TM.isPositionIndependent()) { // Materialize the global using movz/movk instructions. diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp index ea2196a584127..c197550ee38c7 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -438,13 +438,13 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FPOW, G_FLOG, G_FLOG2, G_FLOG10, G_FTAN, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, - G_FSINH, G_FTANH}) + G_FSINH, G_FTANH, G_FMODF}) // We need a call for these, so we always need to scalarize. .scalarize(0) // Regardless of FP16 support, widen 16-bit elements to 32-bits. .minScalar(0, s32) .libcallFor({s32, s64, s128}); - getActionDefinitionsBuilder(G_FPOWI) + getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP}) .scalarize(0) .minScalar(0, s32) .libcallFor({{s32, s32}, {s64, s32}, {s128, s32}}); diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp index eeb34e12993b9..f90bcc7a77cdf 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp @@ -573,9 +573,7 @@ bool AArch64RegisterBankInfo::onlyUsesFP(const MachineInstr &MI, case Intrinsic::aarch64_neon_fcvtnu: case Intrinsic::aarch64_neon_fcvtps: case Intrinsic::aarch64_neon_fcvtpu: - // Force FPR register bank for half types, as those types otherwise - // don't get legalized correctly resulting in fp16 <-> gpr32 COPY's. - return MRI.getType(MI.getOperand(2).getReg()) == LLT::float16(); + return true; default: break; } @@ -1148,6 +1146,34 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case TargetOpcode::G_INTRINSIC: case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: { switch (cast(MI).getIntrinsicID()) { + case Intrinsic::aarch64_neon_fcvtas: + case Intrinsic::aarch64_neon_fcvtau: + case Intrinsic::aarch64_neon_fcvtzs: + case Intrinsic::aarch64_neon_fcvtzu: + case Intrinsic::aarch64_neon_fcvtms: + case Intrinsic::aarch64_neon_fcvtmu: + case Intrinsic::aarch64_neon_fcvtns: + case Intrinsic::aarch64_neon_fcvtnu: + case Intrinsic::aarch64_neon_fcvtps: + case Intrinsic::aarch64_neon_fcvtpu: { + OpRegBankIdx[2] = PMI_FirstFPR; + if (MRI.getType(MI.getOperand(0).getReg()).isVector()) { + OpRegBankIdx[0] = PMI_FirstFPR; + break; + } + TypeSize DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI); + TypeSize SrcSize = getSizeInBits(MI.getOperand(2).getReg(), MRI, TRI); + if (((DstSize == SrcSize) || STI.hasFeature(AArch64::FeatureFPRCVT)) && + all_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()), + [&](const MachineInstr &UseMI) { + return onlyUsesFP(UseMI, MRI, TRI) || + prefersFPUse(UseMI, MRI, TRI); + })) + OpRegBankIdx[0] = PMI_FirstFPR; + else + OpRegBankIdx[0] = PMI_FirstGPR; + break; + } case Intrinsic::aarch64_neon_vcvtfxs2fp: case Intrinsic::aarch64_neon_vcvtfxu2fp: case Intrinsic::aarch64_neon_vcvtfp2fxs: diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h index f542592d22c5f..4ae5d040d5e8a 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h @@ -871,6 +871,36 @@ inline static bool isAnyMOVWMovAlias(uint64_t Value, int RegWidth) { return isAnyMOVZMovAlias(Value, RegWidth); } +static inline bool isSVECpyDupImm(int SizeInBits, int64_t Val, int32_t &Imm, + int32_t &Shift) { + switch (SizeInBits) { + case 8: + // All immediates are supported. + Shift = 0; + Imm = Val & 0xFF; + return true; + case 16: + case 32: + case 64: + // Support 8bit signed immediates. + if (Val >= -128 && Val <= 127) { + Shift = 0; + Imm = Val & 0xFF; + return true; + } + // Support 16bit signed immediates that are a multiple of 256. + if (Val >= -32768 && Val <= 32512 && Val % 256 == 0) { + Shift = 8; + Imm = (Val >> 8) & 0xFF; + return true; + } + break; + default: + break; + } + return false; +} + } // end namespace AArch64_AM } // end namespace llvm diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp index 6e5a69030dbc6..21ff55e9d9a7f 100644 --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp @@ -25,7 +25,6 @@ #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/Support/Casting.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include #include diff --git a/llvm/lib/Target/AArch64/MachineSMEABIPass.cpp b/llvm/lib/Target/AArch64/MachineSMEABIPass.cpp index cced0faa28889..474974893d945 100644 --- a/llvm/lib/Target/AArch64/MachineSMEABIPass.cpp +++ b/llvm/lib/Target/AArch64/MachineSMEABIPass.cpp @@ -22,7 +22,7 @@ // To handle ZA state across control flow, we make use of edge bundling. This // assigns each block an "incoming" and "outgoing" edge bundle (representing // incoming and outgoing edges). Initially, these are unique to each block; -// then, in the process of forming bundles, the outgoing block of a block is +// then, in the process of forming bundles, the outgoing bundle of a block is // joined with the incoming bundle of all successors. The result is that each // bundle can be assigned a single ZA state, which ensures the state required by // all a blocks' successors is the same, and that each basic block will always diff --git a/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp b/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp index 85cca1de47b78..2a563663a34d1 100644 --- a/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp +++ b/llvm/lib/Target/AArch64/SMEPeepholeOpt.cpp @@ -184,6 +184,17 @@ bool SMEPeepholeOpt::optimizeStartStopPairs( isSVERegOp(TRI, MRI, MI.getOperand(1))) Prev = nullptr; break; + case AArch64::RestoreZAPseudo: + case AArch64::InOutZAUsePseudo: + case AArch64::CommitZASavePseudo: + case AArch64::SMEStateAllocPseudo: + case AArch64::RequiresZASavePseudo: + // These instructions only depend on the ZA state, not the streaming mode, + // so if the pair of smstart/stop is only changing the streaming mode, we + // can permit these instructions. + if (Prev->getOperand(0).getImm() != AArch64SVCR::SVCRSM) + Prev = nullptr; + break; case AArch64::ADJCALLSTACKDOWN: case AArch64::ADJCALLSTACKUP: case AArch64::ANDXri: diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index 7913e8ca8652e..9a23c35766cac 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -3748,18 +3748,18 @@ multiclass sve2_int_mla_long_by_indexed_elem opc, string asm, // SVE Integer Dot Product Group //===----------------------------------------------------------------------===// -class sve_intx_dot +class sve_intx_dot sz, bits<5> op5, bit U, string asm, + ZPRRegOp zprty1, ZPRRegOp zprty2> : I<(outs zprty1:$Zda), (ins zprty1:$_Zda, zprty2:$Zn, zprty2:$Zm), asm, "\t$Zda, $Zn, $Zm", "", []>, Sched<[]> { bits<5> Zda; bits<5> Zn; bits<5> Zm; - let Inst{31-23} = 0b010001001; - let Inst{22} = sz; + let Inst{31-24} = 0b01000100; + let Inst{23-22} = sz; let Inst{21} = 0; let Inst{20-16} = Zm; - let Inst{15-11} = 0; + let Inst{15-11} = op5; let Inst{10} = U; let Inst{9-5} = Zn; let Inst{4-0} = Zda; @@ -3770,11 +3770,17 @@ class sve_intx_dot { - def _S : sve_intx_dot<0b0, opc, asm, ZPR32, ZPR8>; - def _D : sve_intx_dot<0b1, opc, asm, ZPR64, ZPR16>; + def _BtoS : sve_intx_dot<0b10, 0b00000, opc, asm, ZPR32, ZPR8>; + def _HtoD : sve_intx_dot<0b11, 0b00000, opc, asm, ZPR64, ZPR16>; + + def : SVE_3_Op_Pat(NAME # _BtoS)>; + def : SVE_3_Op_Pat(NAME # _HtoD)>; +} - def : SVE_3_Op_Pat(NAME # _S)>; - def : SVE_3_Op_Pat(NAME # _D)>; +multiclass sve2p1_two_way_dot_vv { + def NAME : sve_intx_dot<0b00, 0b11001, u, mnemonic, ZPR32, ZPR16>; + + def : SVE_3_Op_Pat(NAME)>; } //===----------------------------------------------------------------------===// @@ -3804,21 +3810,21 @@ class sve_intx_dot_by_indexed_elem { - def _S : sve_intx_dot_by_indexed_elem<0b0, opc, asm, ZPR32, ZPR8, ZPR3b8, VectorIndexS32b_timm> { + def _BtoS : sve_intx_dot_by_indexed_elem<0b0, opc, asm, ZPR32, ZPR8, ZPR3b8, VectorIndexS32b_timm> { bits<2> iop; bits<3> Zm; let Inst{20-19} = iop; let Inst{18-16} = Zm; } - def _D : sve_intx_dot_by_indexed_elem<0b1, opc, asm, ZPR64, ZPR16, ZPR4b16, VectorIndexD32b_timm> { + def _HtoD : sve_intx_dot_by_indexed_elem<0b1, opc, asm, ZPR64, ZPR16, ZPR4b16, VectorIndexD32b_timm> { bits<1> iop; bits<4> Zm; let Inst{20} = iop; let Inst{19-16} = Zm; } - def : SVE_4_Op_Imm_Pat(NAME # _S)>; - def : SVE_4_Op_Imm_Pat(NAME # _D)>; + def : SVE_4_Op_Imm_Pat(NAME # _BtoS)>; + def : SVE_4_Op_Imm_Pat(NAME # _HtoD)>; } //===----------------------------------------------------------------------===// @@ -9893,32 +9899,6 @@ multiclass sve_fp_clamp_bfloat { def : SVE_3_Op_Pat(NAME)>; } -// SVE two-way dot product -class sve2p1_two_way_dot_vv - : I<(outs ZPR32:$Zda), (ins ZPR32:$_Zda, ZPR16:$Zn, ZPR16:$Zm), - mnemonic, "\t$Zda, $Zn, $Zm", - "", []>, Sched<[]> { - bits<5> Zda; - bits<5> Zn; - bits<5> Zm; - let Inst{31-21} = 0b01000100000; - let Inst{20-16} = Zm; - let Inst{15-11} = 0b11001; - let Inst{10} = u; - let Inst{9-5} = Zn; - let Inst{4-0} = Zda; - - let Constraints = "$Zda = $_Zda"; - let DestructiveInstType = DestructiveOther; - let hasSideEffects = 0; -} - -multiclass sve2p1_two_way_dot_vv { - def NAME : sve2p1_two_way_dot_vv; - - def : SVE_3_Op_Pat(NAME)>; -} - // SVE two-way dot product (indexed) class sve2p1_two_way_dot_vvi : I<(outs ZPR32:$Zda), (ins ZPR32:$_Zda, ZPR16:$Zn, ZPR3b16:$Zm, VectorIndexS32b:$i2), diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td index 23339b2ad228e..7003a40a940aa 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -1238,6 +1238,19 @@ def FeatureSetPrioIncWgInst : SubtargetFeature<"setprio-inc-wg-inst", // Subtarget Features (options and debugging) //===------------------------------------------------------------===// +// Ugly hack to accomodate assembling modules with mixed +// wavesizes. Ideally we would have a mapping symbol in assembly which +// would keep track of which sections of code should be treated as +// wave32 and wave64. Instead what users do is assemble with both +// wavesizes enabled. We translate this into this special mode so this +// only influences assembler behavior and nothing else. +def FeatureAssemblerPermissiveWavesize : SubtargetFeature< + "assembler-permissive-wavesize", + "AssemblerPermissiveWavesize", + "true", + "allow parsing wave32 and wave64 variants of instructions" +>; + class FeatureMaxPrivateElementSize : SubtargetFeature< "max-private-element-size-"#size, "MaxPrivateElementSize", @@ -1443,6 +1456,12 @@ def FeatureLdsBarrierArriveAtomic : SubtargetFeature< "lds-barrier-arrive-atomic "Has LDS barrier-arrive atomic instructions" >; +def Feature45BitNumRecordsBufferResource : SubtargetFeature< "45-bit-num-records-buffer-resource", + "Has45BitNumRecordsBufferResource", + "true", + "The buffer resource (V#) supports 45-bit num_records" +>; + // Dummy feature used to disable assembler instructions. def FeatureDisable : SubtargetFeature<"", "FeatureDisable","true", @@ -2106,6 +2125,7 @@ def FeatureISAVersion12_50 : FeatureSet< FeatureMadU32Inst, FeatureLdsBarrierArriveAtomic, FeatureSetPrioIncWgInst, + Feature45BitNumRecordsBufferResource, ]>; def FeatureISAVersion12_51 : FeatureSet< @@ -2569,6 +2589,8 @@ def NotHasTrue16BitInsts : True16PredicateClass<"!Subtarget->hasTrue16BitInsts() // only allow 32-bit registers in operands and use low halves thereof. def UseRealTrue16Insts : True16PredicateClass<"Subtarget->useRealTrue16Insts()">, AssemblerPredicate<(all_of FeatureTrue16BitInsts, FeatureRealTrue16Insts)>; +def NotUseRealTrue16Insts : True16PredicateClass<"!Subtarget->useRealTrue16Insts()">, + AssemblerPredicate<(not (all_of FeatureTrue16BitInsts, FeatureRealTrue16Insts))>; def UseFakeTrue16Insts : True16PredicateClass<"Subtarget->hasTrue16BitInsts() && " "!Subtarget->useRealTrue16Insts()">, AssemblerPredicate<(all_of FeatureTrue16BitInsts)>; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index d4210b8bc9a87..2192a72bb27b7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1089,10 +1089,17 @@ void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) { for (SDNode::user_iterator UI = N->user_begin(), E = N->user_end(); UI != E; ++UI) if (UI.getUse().getResNo() == 1) { - if ((IsAdd && (UI->getOpcode() != ISD::UADDO_CARRY)) || - (!IsAdd && (UI->getOpcode() != ISD::USUBO_CARRY))) { - IsVALU = true; - break; + if (UI->isMachineOpcode()) { + if (UI->getMachineOpcode() != + (IsAdd ? AMDGPU::S_ADD_CO_PSEUDO : AMDGPU::S_SUB_CO_PSEUDO)) { + IsVALU = true; + break; + } + } else { + if (UI->getOpcode() != (IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY)) { + IsVALU = true; + break; + } } } @@ -4078,18 +4085,26 @@ bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, // register. Mods |= SISrcMods::OP_SEL_1; - if (IsExtractHigh || - (Src.getValueSizeInBits() == 16 && isExtractHiElt(Src, Src))) { - Mods |= SISrcMods::OP_SEL_0; + if (Src.getValueSizeInBits() == 16) { + if (isExtractHiElt(Src, Src)) { + Mods |= SISrcMods::OP_SEL_0; - // TODO: Should we try to look for neg/abs here? - } + // TODO: Should we try to look for neg/abs here? + return true; + } + + if (Src.getOpcode() == ISD::TRUNCATE && + Src.getOperand(0).getValueType() == MVT::i32) { + Src = Src.getOperand(0); + return true; + } + + if (Subtarget->useRealTrue16Insts()) + // In true16 mode, pack src to a 32bit + Src = createVOP3PSrc32FromLo16(Src, In, CurDAG, Subtarget); + } else if (IsExtractHigh) + Mods |= SISrcMods::OP_SEL_0; - // Prevent unnecessary subreg COPY to VGPR_16 - if (Src.getOpcode() == ISD::TRUNCATE && - Src.getOperand(0).getValueType() == MVT::i32) { - Src = Src.getOperand(0); - } return true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index f069b591eb315..a44af5f854c18 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -5287,30 +5287,6 @@ SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N, return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); } -bool AMDGPUTargetLowering::isInt64ImmLegal(SDNode *N, SelectionDAG &DAG) const { - if (!Subtarget->isGCN()) - return false; - - ConstantSDNode *SDConstant = dyn_cast(N); - ConstantFPSDNode *SDFPConstant = dyn_cast(N); - auto &ST = DAG.getSubtarget(); - const auto *TII = ST.getInstrInfo(); - - if (!ST.hasMovB64() || (!SDConstant && !SDFPConstant)) - return false; - - if (ST.has64BitLiterals()) - return true; - - if (SDConstant) { - const APInt &APVal = SDConstant->getAPIntValue(); - return isUInt<32>(APVal.getZExtValue()) || TII->isInlineConstant(APVal); - } - - APInt Val = SDFPConstant->getValueAPF().bitcastToAPInt(); - return isUInt<32>(Val.getZExtValue()) || TII->isInlineConstant(Val); -} - SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -5360,8 +5336,6 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, SDValue Src = N->getOperand(0); if (ConstantSDNode *C = dyn_cast(Src)) { SDLoc SL(N); - if (isInt64ImmLegal(C, DAG)) - break; uint64_t CVal = C->getZExtValue(); SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, DAG.getConstant(Lo_32(CVal), SL, MVT::i32), @@ -5372,8 +5346,6 @@ SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, if (ConstantFPSDNode *C = dyn_cast(Src)) { const APInt &Val = C->getValueAPF().bitcastToAPInt(); SDLoc SL(N); - if (isInt64ImmLegal(C, DAG)) - break; uint64_t CVal = Val.getZExtValue(); SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, DAG.getConstant(Lo_32(CVal), SL, MVT::i32), diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h index 610f0ebb4caa5..bdaf48652d107 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -103,9 +103,6 @@ class AMDGPUTargetLowering : public TargetLowering { SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; protected: - /// Check whether value Val can be supported by v_mov_b64, for the current - /// target. - bool isInt64ImmLegal(SDNode *Val, SelectionDAG &DAG) const; bool shouldCombineMemoryType(EVT VT) const; SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const; SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const; diff --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp index 38718c43a61dd..7504f1a8cea09 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp @@ -150,7 +150,10 @@ class LiveRegOptimizer { if (!CVisited.insert(CII).second) continue; - if (CII->getParent() == II->getParent() && !IsLookThru(II)) + // Same-BB filter must look at the *user*; and allow non-lookthrough + // users when the def is a PHI (loop-header pattern). + if (CII->getParent() == II->getParent() && !IsLookThru(CII) && + !isa(II)) continue; if (isOpLegal(CII)) diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index c690b2b7129b4..ee466ca20bde3 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -5905,33 +5905,50 @@ bool AMDGPULegalizerInfo::legalizePointerAsRsrcIntrin( Register Flags = MI.getOperand(5).getReg(); LLT S32 = LLT::scalar(32); + LLT S64 = LLT::scalar(64); B.setInsertPt(B.getMBB(), ++B.getInsertPt()); - auto Unmerge = B.buildUnmerge(S32, Pointer); - Register LowHalf = Unmerge.getReg(0); - Register HighHalf = Unmerge.getReg(1); - - auto AndMask = B.buildConstant(S32, 0x0000ffff); - auto Masked = B.buildAnd(S32, HighHalf, AndMask); - - MachineInstrBuilder NewHighHalf = Masked; - std::optional StrideConst = - getIConstantVRegValWithLookThrough(Stride, MRI); - if (!StrideConst || !StrideConst->Value.isZero()) { - MachineInstrBuilder ShiftedStride; - if (StrideConst) { - uint32_t StrideVal = StrideConst->Value.getZExtValue(); - uint32_t ShiftedStrideVal = StrideVal << 16; - ShiftedStride = B.buildConstant(S32, ShiftedStrideVal); - } else { - auto ExtStride = B.buildAnyExt(S32, Stride); - auto ShiftConst = B.buildConstant(S32, 16); - ShiftedStride = B.buildShl(S32, ExtStride, ShiftConst); - } - NewHighHalf = B.buildOr(S32, Masked, ShiftedStride); + + auto ExtStride = B.buildAnyExt(S32, Stride); + + if (ST.has45BitNumRecordsBufferResource()) { + Register Zero = B.buildConstant(S32, 0).getReg(0); + // Build the lower 64-bit value, which has a 57-bit base and the lower 7-bit + // num_records. + LLT PtrIntTy = LLT::scalar(MRI.getType(Pointer).getSizeInBits()); + auto PointerInt = B.buildPtrToInt(PtrIntTy, Pointer); + auto ExtPointer = B.buildAnyExtOrTrunc(S64, PointerInt); + auto NumRecordsLHS = B.buildShl(S64, NumRecords, B.buildConstant(S32, 57)); + Register LowHalf = B.buildOr(S64, ExtPointer, NumRecordsLHS).getReg(0); + + // Build the higher 64-bit value, which has the higher 38-bit num_records, + // 6-bit zero (omit), 16-bit stride and scale and 4-bit flag. + auto NumRecordsRHS = B.buildLShr(S64, NumRecords, B.buildConstant(S32, 7)); + auto ShiftedStride = B.buildShl(S32, ExtStride, B.buildConstant(S32, 12)); + auto ExtShiftedStride = + B.buildMergeValues(S64, {Zero, ShiftedStride.getReg(0)}); + auto ShiftedFlags = B.buildShl(S32, Flags, B.buildConstant(S32, 28)); + auto ExtShiftedFlags = + B.buildMergeValues(S64, {Zero, ShiftedFlags.getReg(0)}); + auto CombinedFields = B.buildOr(S64, NumRecordsRHS, ExtShiftedStride); + Register HighHalf = + B.buildOr(S64, CombinedFields, ExtShiftedFlags).getReg(0); + B.buildMergeValues(Result, {LowHalf, HighHalf}); + } else { + NumRecords = B.buildTrunc(S32, NumRecords).getReg(0); + auto Unmerge = B.buildUnmerge(S32, Pointer); + auto LowHalf = Unmerge.getReg(0); + auto HighHalf = Unmerge.getReg(1); + + auto AndMask = B.buildConstant(S32, 0x0000ffff); + auto Masked = B.buildAnd(S32, HighHalf, AndMask); + auto ShiftConst = B.buildConstant(S32, 16); + auto ShiftedStride = B.buildShl(S32, ExtStride, ShiftConst); + auto NewHighHalf = B.buildOr(S32, Masked, ShiftedStride); + Register NewHighHalfReg = NewHighHalf.getReg(0); + B.buildMergeValues(Result, {LowHalf, NewHighHalfReg, NumRecords, Flags}); } - Register NewHighHalfReg = NewHighHalf.getReg(0); - B.buildMergeValues(Result, {LowHalf, NewHighHalfReg, NumRecords, Flags}); + MI.eraseFromParent(); return true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp index d9bfeae52e213..0a5913293238a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp @@ -2562,7 +2562,9 @@ bool AMDGPULowerBufferFatPointers::run(Module &M, const TargetMachine &TM) { for (Function *F : NeedsPostProcess) Splitter.processFunction(*F); for (Function *F : Intrinsics) { - if (isRemovablePointerIntrinsic(F->getIntrinsicID())) { + // use_empty() can also occur with cases like masked load, which will + // have been rewritten out of the module by now but not erased. + if (F->use_empty() || isRemovablePointerIntrinsic(F->getIntrinsicID())) { F->eraseFromParent(); } else { std::optional NewF = Intrinsic::remangleIntrinsicFunction(F); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp index f01d5f6726822..6efa78ef902c0 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -608,6 +608,8 @@ class AMDGPULowerModuleLDS { ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot] : EmptySet; + const size_t HybridModuleRootKernelsSize = HybridModuleRootKernels.size(); + for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) { // Each iteration of this loop assigns exactly one global variable to // exactly one of the implementation strategies. @@ -647,7 +649,8 @@ class AMDGPULowerModuleLDS { ModuleScopeVariables.insert(GV); } else if (K.second.size() == 1) { KernelAccessVariables.insert(GV); - } else if (set_is_subset(K.second, HybridModuleRootKernels)) { + } else if (K.second.size() == HybridModuleRootKernelsSize && + set_is_subset(K.second, HybridModuleRootKernels)) { ModuleScopeVariables.insert(GV); } else { TableLookupVariables.insert(GV); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp index 6acbf52b97de5..680e7eb3de6be 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -175,6 +175,40 @@ void AMDGPUMCInstLower::lowerT16D16Helper(const MachineInstr *MI, } } +void AMDGPUMCInstLower::lowerT16FmaMixFP16(const MachineInstr *MI, + MCInst &OutMI) const { + unsigned Opcode = MI->getOpcode(); + const auto *TII = static_cast(ST.getInstrInfo()); + const SIRegisterInfo &TRI = TII->getRegisterInfo(); + + int VDstIdx = AMDGPU::getNamedOperandIdx(Opcode, llvm::AMDGPU::OpName::vdst); + const MachineOperand &VDst = MI->getOperand(VDstIdx); + bool IsHi = AMDGPU::isHi16Reg(VDst.getReg(), TRI); + switch (Opcode) { + case AMDGPU::V_FMA_MIX_F16_t16: + Opcode = IsHi ? AMDGPU::V_FMA_MIXHI_F16 : AMDGPU::V_FMA_MIXLO_F16; + break; + case AMDGPU::V_FMA_MIX_BF16_t16: + Opcode = IsHi ? AMDGPU::V_FMA_MIXHI_BF16 : AMDGPU::V_FMA_MIXLO_BF16; + break; + } + int MCOpcode = TII->pseudoToMCOpcode(Opcode); + assert(MCOpcode != -1 && + "Pseudo instruction doesn't have a target-specific version"); + OutMI.setOpcode(MCOpcode); + + // lower operands + for (int I = 0, E = MI->getNumExplicitOperands(); I < E; I++) { + const MachineOperand &MO = MI->getOperand(I); + MCOperand MCOp; + if (I == VDstIdx) + MCOp = MCOperand::createReg(TRI.get32BitRegister(VDst.getReg())); + else + lowerOperand(MO, MCOp); + OutMI.addOperand(MCOp); + } +} + void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { unsigned Opcode = MI->getOpcode(); const auto *TII = static_cast(ST.getInstrInfo()); @@ -201,6 +235,10 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { } else if (AMDGPU::getT16D16Helper(Opcode)) { lowerT16D16Helper(MI, OutMI); return; + } else if (Opcode == AMDGPU::V_FMA_MIX_F16_t16 || + Opcode == AMDGPU::V_FMA_MIX_BF16_t16) { + lowerT16FmaMixFP16(MI, OutMI); + return; } int MCOpcode = TII->pseudoToMCOpcode(Opcode); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h index 68b8d4e25a6cc..23ed55d45220f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h @@ -38,6 +38,7 @@ class AMDGPUMCInstLower { void lower(const MachineInstr *MI, MCInst &OutMI) const; void lowerT16D16Helper(const MachineInstr *MI, MCInst &OutMI) const; + void lowerT16FmaMixFP16(const MachineInstr *MI, MCInst &OutMI) const; }; namespace { diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index 0776d14a84067..f413bbcecb526 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -840,7 +840,9 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{B128, Ptr32}, {{}, {VgprB128, VgprPtr32}}}); // clang-format on - addRulesForGOpcs({G_AMDGPU_BUFFER_LOAD}, StandardB) + addRulesForGOpcs({G_AMDGPU_BUFFER_LOAD, G_AMDGPU_BUFFER_LOAD_FORMAT, + G_AMDGPU_TBUFFER_LOAD_FORMAT}, + StandardB) .Div(B32, {{VgprB32}, {SgprV4S32_WF, Vgpr32, Vgpr32, Sgpr32_WF}}) .Uni(B32, {{UniInVgprB32}, {SgprV4S32_WF, Vgpr32, Vgpr32, Sgpr32_WF}}) .Div(B64, {{VgprB64}, {SgprV4S32_WF, Vgpr32, Vgpr32, Sgpr32_WF}}) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp index 92a587b5771b6..280fbe20667c6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -1384,6 +1384,11 @@ void AMDGPUPassConfig::addCodeGenPrepare() { if (TM->getTargetTriple().isAMDGCN() && EnableLowerKernelArguments) addPass(createAMDGPULowerKernelArgumentsPass()); + TargetPassConfig::addCodeGenPrepare(); + + if (isPassEnabled(EnableLoadStoreVectorizer)) + addPass(createLoadStoreVectorizerPass()); + if (TM->getTargetTriple().isAMDGCN()) { // This lowering has been placed after codegenprepare to take advantage of // address mode matching (which is why it isn't put with the LDS lowerings). @@ -1392,15 +1397,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() { // but has been put before switch lowering and CFG flattening so that those // passes can run on the more optimized control flow this pass creates in // many cases. - // - // FIXME: This should ideally be put after the LoadStoreVectorizer. - // However, due to some annoying facts about ResourceUsageAnalysis, - // (especially as exercised in the resource-usage-dead-function test), - // we need all the function passes codegenprepare all the way through - // said resource usage analysis to run on the call graph produced - // before codegenprepare runs (because codegenprepare will knock some - // nodes out of the graph, which leads to function-level passes not - // being run on them, which causes crashes in the resource usage analysis). addPass(createAMDGPULowerBufferFatPointersPass()); addPass(createAMDGPULowerIntrinsicsLegacyPass()); // In accordance with the above FIXME, manually force all the @@ -1408,11 +1404,6 @@ void AMDGPUPassConfig::addCodeGenPrepare() { addPass(new DummyCGSCCPass()); } - TargetPassConfig::addCodeGenPrepare(); - - if (isPassEnabled(EnableLoadStoreVectorizer)) - addPass(createLoadStoreVectorizerPass()); - // LowerSwitch pass may introduce unreachable blocks that can // cause unexpected behavior for subsequent passes. Placing it // here seems better that these blocks would get cleaned up by @@ -2125,6 +2116,11 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const { if (EnableLowerKernelArguments) addPass(AMDGPULowerKernelArgumentsPass(TM)); + Base::addCodeGenPrepare(addPass); + + if (isPassEnabled(EnableLoadStoreVectorizer)) + addPass(LoadStoreVectorizerPass()); + // This lowering has been placed after codegenprepare to take advantage of // address mode matching (which is why it isn't put with the LDS lowerings). // It could be placed anywhere before uniformity annotations (an analysis @@ -2132,25 +2128,11 @@ void AMDGPUCodeGenPassBuilder::addCodeGenPrepare(AddIRPass &addPass) const { // but has been put before switch lowering and CFG flattening so that those // passes can run on the more optimized control flow this pass creates in // many cases. - // - // FIXME: This should ideally be put after the LoadStoreVectorizer. - // However, due to some annoying facts about ResourceUsageAnalysis, - // (especially as exercised in the resource-usage-dead-function test), - // we need all the function passes codegenprepare all the way through - // said resource usage analysis to run on the call graph produced - // before codegenprepare runs (because codegenprepare will knock some - // nodes out of the graph, which leads to function-level passes not - // being run on them, which causes crashes in the resource usage analysis). addPass(AMDGPULowerBufferFatPointersPass(TM)); addPass.requireCGSCCOrder(); addPass(AMDGPULowerIntrinsicsPass(TM)); - Base::addCodeGenPrepare(addPass); - - if (isPassEnabled(EnableLoadStoreVectorizer)) - addPass(LoadStoreVectorizerPass()); - // LowerSwitch pass may introduce unreachable blocks that can cause unexpected // behavior for subsequent passes. Placing it here seems better that these // blocks would get cleaned up by UnreachableBlockElim inserted next in the diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp index 3e2b2c3510569..03d16fdd54c42 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -607,13 +607,15 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost( case ISD::FSUB: if (ST->hasPackedFP32Ops() && SLT == MVT::f32) NElts = (NElts + 1) / 2; + if (ST->hasBF16PackedInsts() && SLT == MVT::bf16) + NElts = (NElts + 1) / 2; if (SLT == MVT::f64) return LT.first * NElts * get64BitInstrCost(CostKind); if (ST->has16BitInsts() && SLT == MVT::f16) NElts = (NElts + 1) / 2; - if (SLT == MVT::f32 || SLT == MVT::f16) + if (SLT == MVT::f32 || SLT == MVT::f16 || SLT == MVT::bf16) return LT.first * NElts * getFullRateInstrCost(); break; case ISD::FDIV: @@ -746,7 +748,9 @@ GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; - if ((ST->hasVOP3PInsts() && (SLT == MVT::f16 || SLT == MVT::i16)) || + if ((ST->hasVOP3PInsts() && + (SLT == MVT::f16 || SLT == MVT::i16 || + (SLT == MVT::bf16 && ST->hasBF16PackedInsts()))) || (ST->hasPackedFP32Ops() && SLT == MVT::f32)) NElts = (NElts + 1) / 2; diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index 2ced4d6813766..a67a7bedf19a3 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -80,8 +80,7 @@ class AMDGPUOperand : public MCParsedAsmOperand { bool Abs = false; bool Neg = false; bool Sext = false; - bool Lit = false; - bool Lit64 = false; + LitModifier Lit = LitModifier::None; bool hasFPModifiers() const { return Abs || Neg; } bool hasIntModifiers() const { return Sext; } @@ -1247,6 +1246,12 @@ raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) { // AsmParser //===----------------------------------------------------------------------===// +// TODO: define GET_SUBTARGET_FEATURE_NAME +#define GET_REGISTER_MATCHER +#include "AMDGPUGenAsmMatcher.inc" +#undef GET_REGISTER_MATCHER +#undef GET_SUBTARGET_FEATURE_NAME + // Holds info related to the current kernel, e.g. count of SGPRs used. // Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next // .amdgpu_hsa_kernel or at EOF. @@ -1537,6 +1542,10 @@ class AMDGPUAsmParser : public MCTargetAsmParser { return AMDGPU::isGFX10_BEncoding(getSTI()); } + bool isWave32() const { return getAvailableFeatures()[Feature_isWave32Bit]; } + + bool isWave64() const { return getAvailableFeatures()[Feature_isWave64Bit]; } + bool hasInv2PiInlineImm() const { return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm]; } @@ -1590,16 +1599,22 @@ class AMDGPUAsmParser : public MCTargetAsmParser { return static_cast(TS); } - const MCRegisterInfo *getMRI() const { + MCContext &getContext() const { // We need this const_cast because for some reason getContext() is not const // in MCAsmParser. - return const_cast(this)->getContext().getRegisterInfo(); + return const_cast(this)->MCTargetAsmParser::getContext(); + } + + const MCRegisterInfo *getMRI() const { + return getContext().getRegisterInfo(); } const MCInstrInfo *getMII() const { return &MII; } + // FIXME: This should not be used. Instead, should use queries derived from + // getAvailableFeatures(). const FeatureBitset &getFeatureBits() const { return getSTI().getFeatureBits(); } @@ -1675,10 +1690,10 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const; bool parseSP3NegModifier(); ParseStatus parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false, - bool HasLit = false, bool HasLit64 = false); + LitModifier Lit = LitModifier::None); ParseStatus parseReg(OperandVector &Operands); ParseStatus parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod = false, - bool HasLit = false, bool HasLit64 = false); + LitModifier Lit = LitModifier::None); ParseStatus parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true); ParseStatus parseRegOrImmWithIntInputMods(OperandVector &Operands, @@ -1792,7 +1807,8 @@ class AMDGPUAsmParser : public MCTargetAsmParser { const OperandVector &Operands) const; SMLoc getInstLoc(const OperandVector &Operands) const; - bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc, const OperandVector &Operands); + bool validateInstruction(const MCInst &Inst, SMLoc IDLoc, + const OperandVector &Operands); bool validateOffset(const MCInst &Inst, const OperandVector &Operands); bool validateFlatOffset(const MCInst &Inst, const OperandVector &Operands); bool validateSMEMOffset(const MCInst &Inst, const OperandVector &Operands); @@ -1809,8 +1825,8 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool validateMIMGAtomicDMask(const MCInst &Inst); bool validateMIMGGatherDMask(const MCInst &Inst); bool validateMovrels(const MCInst &Inst, const OperandVector &Operands); - bool validateMIMGDataSize(const MCInst &Inst, const SMLoc &IDLoc); - bool validateMIMGAddrSize(const MCInst &Inst, const SMLoc &IDLoc); + bool validateMIMGDataSize(const MCInst &Inst, SMLoc IDLoc); + bool validateMIMGAddrSize(const MCInst &Inst, SMLoc IDLoc); bool validateMIMGD16(const MCInst &Inst); bool validateMIMGDim(const MCInst &Inst, const OperandVector &Operands); bool validateTensorR128(const MCInst &Inst); @@ -1832,7 +1848,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool validateDivScale(const MCInst &Inst); bool validateWaitCnt(const MCInst &Inst, const OperandVector &Operands); bool validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands, - const SMLoc &IDLoc); + SMLoc IDLoc); bool validateTHAndScopeBits(const MCInst &Inst, const OperandVector &Operands, const unsigned CPol); bool validateTFE(const MCInst &Inst, const OperandVector &Operands); @@ -1849,7 +1865,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool isSupportedMnemo(StringRef Mnemo, const FeatureBitset &FBS, ArrayRef Variants); - bool checkUnsupportedInstruction(StringRef Name, const SMLoc &IDLoc); + bool checkUnsupportedInstruction(StringRef Name, SMLoc IDLoc); bool isId(const StringRef Id) const; bool isId(const AsmToken &Token, const StringRef Id) const; @@ -2256,9 +2272,8 @@ bool AMDGPUOperand::isSDWAInt32Operand() const { } bool AMDGPUOperand::isBoolReg() const { - auto FB = AsmParser->getFeatureBits(); - return isReg() && ((FB[AMDGPU::FeatureWavefrontSize64] && isSCSrc_b64()) || - (FB[AMDGPU::FeatureWavefrontSize32] && isSCSrc_b32())); + return isReg() && ((AsmParser->isWave64() && isSCSrc_b64()) || + (AsmParser->isWave32() && isSCSrc_b32())); } uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const @@ -2312,6 +2327,11 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo APInt Literal(64, Val); uint8_t OpTy = InstDesc.operands()[OpNum].OperandType; + bool CanUse64BitLiterals = + AsmParser->has64BitLiterals() && + !(InstDesc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)); + MCContext &Ctx = AsmParser->getContext(); + if (Imm.IsFPImm) { // We got fp literal token switch (OpTy) { case AMDGPU::OPERAND_REG_IMM_INT64: @@ -2341,7 +2361,15 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo Val &= 0xffffffff00000000u; } - Inst.addOperand(MCOperand::createImm(Val)); + if ((OpTy == AMDGPU::OPERAND_REG_IMM_FP64 || + OpTy == AMDGPU::OPERAND_REG_INLINE_C_FP64 || + OpTy == AMDGPU::OPERAND_REG_INLINE_AC_FP64) && + CanUse64BitLiterals && Lo_32(Val) != 0) { + Inst.addOperand(MCOperand::createExpr( + AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx))); + } else { + Inst.addOperand(MCOperand::createImm(Val)); + } return; } @@ -2351,7 +2379,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo llvm_unreachable("fp literal in 64-bit integer instruction."); case AMDGPU::OPERAND_KIMM64: - Inst.addOperand(MCOperand::createImm(Val)); + if (CanUse64BitLiterals && Lo_32(Val) != 0) { + Inst.addOperand(MCOperand::createExpr( + AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx))); + } else { + Inst.addOperand(MCOperand::createImm(Val)); + } return; case AMDGPU::OPERAND_REG_IMM_BF16: @@ -2437,10 +2470,16 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo // truncated to uint32_t), if the target doesn't support 64-bit literals, or // the lit modifier is explicitly used, we need to truncate it to the 32 // LSBs. - if (!AsmParser->has64BitLiterals() || getModifiers().Lit) + if (!AsmParser->has64BitLiterals() || + getModifiers().Lit == LitModifier::Lit) Val = Lo_32(Val); - Inst.addOperand(MCOperand::createImm(Val)); + if (CanUse64BitLiterals && (!isInt<32>(Val) || !isUInt<32>(Val))) { + Inst.addOperand(MCOperand::createExpr( + AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx))); + } else { + Inst.addOperand(MCOperand::createImm(Val)); + } return; case AMDGPU::OPERAND_REG_IMM_FP64: @@ -2461,12 +2500,18 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo // 1) explicitly forced by using lit modifier; // 2) the value is a valid 32-bit representation (signed or unsigned), // meanwhile not forced by lit64 modifier. - if (getModifiers().Lit || - (!getModifiers().Lit64 && (isInt<32>(Val) || isUInt<32>(Val)))) + if (getModifiers().Lit == LitModifier::Lit || + (getModifiers().Lit != LitModifier::Lit64 && + (isInt<32>(Val) || isUInt<32>(Val)))) Val = static_cast(Val) << 32; } - Inst.addOperand(MCOperand::createImm(Val)); + if (CanUse64BitLiterals && Lo_32(Val) != 0) { + Inst.addOperand(MCOperand::createExpr( + AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx))); + } else { + Inst.addOperand(MCOperand::createImm(Val)); + } return; case AMDGPU::OPERAND_REG_IMM_INT16: @@ -2484,10 +2529,16 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo return; case AMDGPU::OPERAND_KIMM64: - if ((isInt<32>(Val) || isUInt<32>(Val)) && !getModifiers().Lit64) + if ((isInt<32>(Val) || isUInt<32>(Val)) && + getModifiers().Lit != LitModifier::Lit64) Val <<= 32; - Inst.addOperand(MCOperand::createImm(Val)); + if (CanUse64BitLiterals && Lo_32(Val) != 0) { + Inst.addOperand(MCOperand::createExpr( + AMDGPUMCExpr::createLit(LitModifier::Lit64, Val, Ctx))); + } else { + Inst.addOperand(MCOperand::createImm(Val)); + } return; default: @@ -3167,20 +3218,22 @@ AMDGPUAsmParser::parseRegister(bool RestoreOnFailure) { } ParseStatus AMDGPUAsmParser::parseImm(OperandVector &Operands, - bool HasSP3AbsModifier, bool HasLit, - bool HasLit64) { + bool HasSP3AbsModifier, LitModifier Lit) { // TODO: add syntactic sugar for 1/(2*PI) if (isRegister() || isModifier()) return ParseStatus::NoMatch; - if (!HasLit && !HasLit64) { - HasLit64 = trySkipId("lit64"); - HasLit = !HasLit64 && trySkipId("lit"); - if (HasLit || HasLit64) { + if (Lit == LitModifier::None) { + if (trySkipId("lit")) + Lit = LitModifier::Lit; + else if (trySkipId("lit64")) + Lit = LitModifier::Lit64; + + if (Lit != LitModifier::None) { if (!skipToken(AsmToken::LParen, "expected left paren after lit")) return ParseStatus::Failure; - ParseStatus S = parseImm(Operands, HasSP3AbsModifier, HasLit, HasLit64); + ParseStatus S = parseImm(Operands, HasSP3AbsModifier, Lit); if (S.isSuccess() && !skipToken(AsmToken::RParen, "expected closing parentheses")) return ParseStatus::Failure; @@ -3201,8 +3254,7 @@ ParseStatus AMDGPUAsmParser::parseImm(OperandVector &Operands, } AMDGPUOperand::Modifiers Mods; - Mods.Lit = HasLit; - Mods.Lit64 = HasLit64; + Mods.Lit = Lit; if (IsReal) { // Floating-point expressions are not supported. @@ -3253,7 +3305,7 @@ ParseStatus AMDGPUAsmParser::parseImm(OperandVector &Operands, AMDGPUOperand &Op = static_cast(*Operands.back()); Op.setModifiers(Mods); } else { - if (HasLit || HasLit64) + if (Lit != LitModifier::None) return ParseStatus::NoMatch; Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S)); } @@ -3277,14 +3329,13 @@ ParseStatus AMDGPUAsmParser::parseReg(OperandVector &Operands) { } ParseStatus AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, - bool HasSP3AbsMod, bool HasLit, - bool HasLit64) { + bool HasSP3AbsMod, LitModifier Lit) { ParseStatus Res = parseReg(Operands); if (!Res.isNoMatch()) return Res; if (isModifier()) return ParseStatus::NoMatch; - return parseImm(Operands, HasSP3AbsMod, HasLit, HasLit64); + return parseImm(Operands, HasSP3AbsMod, Lit); } bool @@ -3380,7 +3431,6 @@ AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm) { bool Neg, SP3Neg; bool Abs, SP3Abs; - bool Lit64, Lit; SMLoc Loc; // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead. @@ -3400,18 +3450,19 @@ AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, if (Abs && !skipToken(AsmToken::LParen, "expected left paren after abs")) return ParseStatus::Failure; - Lit64 = trySkipId("lit64"); - if (Lit64) { + LitModifier Lit = LitModifier::None; + if (trySkipId("lit")) { + Lit = LitModifier::Lit; + if (!skipToken(AsmToken::LParen, "expected left paren after lit")) + return ParseStatus::Failure; + } else if (trySkipId("lit64")) { + Lit = LitModifier::Lit64; if (!skipToken(AsmToken::LParen, "expected left paren after lit64")) return ParseStatus::Failure; if (!has64BitLiterals()) return Error(Loc, "lit64 is not supported on this GPU"); } - Lit = !Lit64 && trySkipId("lit"); - if (Lit && !skipToken(AsmToken::LParen, "expected left paren after lit")) - return ParseStatus::Failure; - Loc = getLoc(); SP3Abs = trySkipToken(AsmToken::Pipe); if (Abs && SP3Abs) @@ -3419,16 +3470,16 @@ AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, ParseStatus Res; if (AllowImm) { - Res = parseRegOrImm(Operands, SP3Abs, Lit, Lit64); + Res = parseRegOrImm(Operands, SP3Abs, Lit); } else { Res = parseReg(Operands); } if (!Res.isSuccess()) - return (SP3Neg || Neg || SP3Abs || Abs || Lit || Lit64) + return (SP3Neg || Neg || SP3Abs || Abs || Lit != LitModifier::None) ? ParseStatus::Failure : Res; - if ((Lit || Lit64) && !Operands.back()->isImm()) + if (Lit != LitModifier::None && !Operands.back()->isImm()) Error(Loc, "expected immediate with lit modifier"); if (SP3Abs && !skipToken(AsmToken::Pipe, "expected vertical bar")) @@ -3437,7 +3488,7 @@ AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, return ParseStatus::Failure; if (Neg && !skipToken(AsmToken::RParen, "expected closing parentheses")) return ParseStatus::Failure; - if ((Lit || Lit64) && + if (Lit != LitModifier::None && !skipToken(AsmToken::RParen, "expected closing parentheses")) return ParseStatus::Failure; @@ -3445,9 +3496,8 @@ AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, Mods.Abs = Abs || SP3Abs; Mods.Neg = Neg || SP3Neg; Mods.Lit = Lit; - Mods.Lit64 = Lit64; - if (Mods.hasFPModifiers() || Lit || Lit64) { + if (Mods.hasFPModifiers() || Lit != LitModifier::None) { AMDGPUOperand &Op = static_cast(*Operands.back()); if (Op.isExpr()) return Error(Op.getStartLoc(), "expected an absolute expression"); @@ -3637,7 +3687,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst, const MCOperand &MO = Inst.getOperand(OpIdx); - int64_t Val = MO.getImm(); + int64_t Val = MO.isImm() ? MO.getImm() : getLitValue(MO.getExpr()); auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx); switch (OpSize) { // expected operand size @@ -4038,8 +4088,7 @@ bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) { constexpr uint64_t MIMGFlags = SIInstrFlags::MIMG | SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE; -bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, - const SMLoc &IDLoc) { +bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, SMLoc IDLoc) { const unsigned Opc = Inst.getOpcode(); const MCInstrDesc &Desc = MII.get(Opc); @@ -4086,8 +4135,7 @@ bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, return false; } -bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, - const SMLoc &IDLoc) { +bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, SMLoc IDLoc) { const unsigned Opc = Inst.getOpcode(); const MCInstrDesc &Desc = MII.get(Opc); @@ -4765,16 +4813,26 @@ bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst, const MCOperand &MO = Inst.getOperand(OpIdx); // Exclude special imm operands (like that used by s_set_gpr_idx_on) if (AMDGPU::isSISrcOperand(Desc, OpIdx)) { - if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) { + std::optional Imm; + if (MO.isImm()) { + Imm = MO.getImm(); + } else if (MO.isExpr()) { + if (isLitExpr(MO.getExpr())) + Imm = getLitValue(MO.getExpr()); + } else { + continue; + } + + if (!Imm.has_value()) { + ++NumExprs; + } else if (!isInlineConstant(Inst, OpIdx)) { auto OpType = static_cast( Desc.operands()[OpIdx].OperandType); - int64_t Value = encode32BitLiteral(MO.getImm(), OpType); + int64_t Value = encode32BitLiteral(*Imm, OpType); if (NumLiterals == 0 || LiteralValue != Value) { LiteralValue = Value; ++NumLiterals; } - } else if (MO.isExpr()) { - ++NumExprs; } } } @@ -4977,9 +5035,8 @@ bool AMDGPUAsmParser::validateDPP(const MCInst &Inst, // Check if VCC register matches wavefront size bool AMDGPUAsmParser::validateVccOperand(MCRegister Reg) const { - auto FB = getFeatureBits(); - return (FB[AMDGPU::FeatureWavefrontSize64] && Reg == AMDGPU::VCC) || - (FB[AMDGPU::FeatureWavefrontSize32] && Reg == AMDGPU::VCC_LO); + return (Reg == AMDGPU::VCC && isWave64()) || + (Reg == AMDGPU::VCC_LO && isWave32()); } // One unique literal can be used. VOP3 literal is only allowed in GFX10+ @@ -5007,9 +5064,18 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst, if (!isSISrcOperand(Desc, OpIdx)) continue; + std::optional Imm; + if (MO.isImm()) + Imm = MO.getImm(); + else if (MO.isExpr() && isLitExpr(MO.getExpr())) + Imm = getLitValue(MO.getExpr()); + bool IsAnotherLiteral = false; - if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) { - uint64_t Value = static_cast(MO.getImm()); + if (!Imm.has_value()) { + // Literal value not known, so we conservately assume it's different. + IsAnotherLiteral = true; + } else if (!isInlineConstant(Inst, OpIdx)) { + uint64_t Value = *Imm; bool IsForcedFP64 = Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_KIMM64 || (Desc.operands()[OpIdx].OperandType == AMDGPU::OPERAND_REG_IMM_FP64 && @@ -5030,9 +5096,6 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst, IsAnotherLiteral = !LiteralValue || *LiteralValue != Value; LiteralValue = Value; - } else if (MO.isExpr()) { - // Literal value not known, so we conservately assume it's different. - IsAnotherLiteral = true; } if (IsAnotherLiteral && !HasMandatoryLiteral && @@ -5280,7 +5343,7 @@ bool AMDGPUAsmParser::validateGWS(const MCInst &Inst, bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands, - const SMLoc &IDLoc) { + SMLoc IDLoc) { int CPolPos = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::cpol); if (CPolPos == -1) @@ -5477,8 +5540,7 @@ bool AMDGPUAsmParser::validateWMMA(const MCInst &Inst, validateFmt(AMDGPU::OpName::matrix_b_fmt, AMDGPU::OpName::src1); } -bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst, - const SMLoc &IDLoc, +bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst, SMLoc IDLoc, const OperandVector &Operands) { if (!validateLdsDirect(Inst, Operands)) return false; @@ -5640,7 +5702,7 @@ bool AMDGPUAsmParser::isSupportedMnemo(StringRef Mnemo, } bool AMDGPUAsmParser::checkUnsupportedInstruction(StringRef Mnemo, - const SMLoc &IDLoc) { + SMLoc IDLoc) { FeatureBitset FBS = ComputeAvailableFeatures(getFeatureBits()); // Check if requested instruction variant is supported. @@ -5663,7 +5725,7 @@ bool AMDGPUAsmParser::checkUnsupportedInstruction(StringRef Mnemo, // Check if this instruction may be used with a different wavesize. if (isGFX10Plus() && getFeatureBits()[AMDGPU::FeatureWavefrontSize64] && !getFeatureBits()[AMDGPU::FeatureWavefrontSize32]) { - + // FIXME: Use getAvailableFeatures, and do not manually recompute FeatureBitset FeaturesWS32 = getFeatureBits(); FeaturesWS32.flip(AMDGPU::FeatureWavefrontSize64) .flip(AMDGPU::FeatureWavefrontSize32); @@ -6418,10 +6480,10 @@ bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID, if (C.code_properties & AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) { if (!isGFX10Plus()) return TokError("enable_wavefront_size32=1 is only allowed on GFX10+"); - if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32]) + if (!isWave32()) return TokError("enable_wavefront_size32=1 requires +WavefrontSize32"); } else { - if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64]) + if (!isWave64()) return TokError("enable_wavefront_size32=0 requires +WavefrontSize64"); } } @@ -6430,10 +6492,10 @@ bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID, if (C.wavefront_size == 5) { if (!isGFX10Plus()) return TokError("wavefront_size=5 is only allowed on GFX10+"); - if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32]) + if (!isWave32()) return TokError("wavefront_size=5 requires +WavefrontSize32"); } else if (C.wavefront_size == 6) { - if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64]) + if (!isWave64()) return TokError("wavefront_size=6 requires +WavefrontSize64"); } } @@ -10336,7 +10398,6 @@ LLVMInitializeAMDGPUAsmParser() { RegisterMCAsmParser B(getTheGCNTarget()); } -#define GET_REGISTER_MATCHER #define GET_MATCHER_IMPLEMENTATION #define GET_MNEMONIC_SPELL_CHECKER #define GET_MNEMONIC_CHECKER diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td index f2e432fa8d7f5..b2ff5a11aec6e 100644 --- a/llvm/lib/Target/AMDGPU/DSInstructions.td +++ b/llvm/lib/Target/AMDGPU/DSInstructions.td @@ -969,10 +969,9 @@ multiclass DSReadPat_t16 { } let OtherPredicates = [NotLDSRequiresM0Init] in { - foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in - let True16Predicate = p in { - def : DSReadPat(!cast(inst)#"_gfx9"), vt, !cast(frag)>; - } + let True16Predicate = NotUseRealTrue16Insts in { + def : DSReadPat(!cast(inst)#"_gfx9"), vt, !cast(frag)>; + } let True16Predicate = UseRealTrue16Insts in { def : DSReadPat(!cast(inst)#"_t16"), vt, !cast(frag)>; } @@ -1050,10 +1049,9 @@ multiclass DSWritePat_t16 { } let OtherPredicates = [NotLDSRequiresM0Init] in { - foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in - let True16Predicate = p in { - def : DSWritePat(!cast(inst)#"_gfx9"), vt, !cast(frag)>; - } + let True16Predicate = NotUseRealTrue16Insts in { + def : DSWritePat(!cast(inst)#"_gfx9"), vt, !cast(frag)>; + } let True16Predicate = UseRealTrue16Insts in { def : DSWritePat(!cast(inst)#"_t16"), vt, !cast(frag)>; } diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index d3db1b7394675..2d5ae29c1037c 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -17,6 +17,7 @@ // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? #include "Disassembler/AMDGPUDisassembler.h" +#include "MCTargetDesc/AMDGPUMCExpr.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" #include "SIDefines.h" #include "SIRegisterInfo.h" @@ -123,14 +124,14 @@ static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr, static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder) { const auto *DAsm = static_cast(Decoder); - return addOperand(Inst, DAsm->decodeBoolReg(Val)); + return addOperand(Inst, DAsm->decodeBoolReg(Inst, Val)); } static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder) { const auto *DAsm = static_cast(Decoder); - return addOperand(Inst, DAsm->decodeSplitBarrier(Val)); + return addOperand(Inst, DAsm->decodeSplitBarrier(Inst, Val)); } static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr, @@ -164,7 +165,7 @@ static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr, const MCDisassembler *Decoder) { \ assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \ auto DAsm = static_cast(Decoder); \ - return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm)); \ + return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm)); \ } static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize, @@ -172,7 +173,7 @@ static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize, const MCDisassembler *Decoder) { assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!"); const auto *DAsm = static_cast(Decoder); - return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm)); + return addOperand(Inst, DAsm->decodeSrcOp(Inst, OpWidth, EncImm)); } // Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to @@ -317,7 +318,7 @@ static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm, unsigned RegIdx = Imm & 0x7f; return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi)); } - return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF)); + return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF)); } template @@ -332,7 +333,7 @@ static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm, unsigned RegIdx = Imm & 0xff; return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi)); } - return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF)); + return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(Inst, OpWidth, Imm & 0xFF)); } static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm, @@ -371,7 +372,7 @@ static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val, static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm, unsigned Opw, const MCDisassembler *Decoder) { const auto *DAsm = static_cast(Decoder); - return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256)); + return addOperand(Inst, DAsm->decodeSrcOp(Inst, Opw, Imm | 256)); } template @@ -386,7 +387,7 @@ static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm, const MCDisassembler *Decoder) { assert(Imm < (1 << 9) && "9-bit encoding"); const auto *DAsm = static_cast(Decoder); - return addOperand(Inst, DAsm->decodeSrcOp(64, Imm)); + return addOperand(Inst, DAsm->decodeSrcOp(Inst, 64, Imm)); } #define DECODE_SDWA(DecName) \ @@ -510,8 +511,8 @@ void AMDGPUDisassembler::decodeImmOperands(MCInst &MI, } if (Imm == AMDGPU::EncValues::LITERAL_CONST) { - Op = decodeLiteralConstant(OpDesc.OperandType == - AMDGPU::OPERAND_REG_IMM_FP64); + Op = decodeLiteralConstant( + Desc, OpDesc, OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64); continue; } @@ -1543,10 +1544,16 @@ AMDGPUDisassembler::decodeMandatoryLiteral64Constant(uint64_t Val) const { } HasLiteral = true; Literal = Literal64 = Val; - return MCOperand::createImm(Literal64); + + bool UseLit64 = Lo_32(Literal64) != 0; + return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit( + LitModifier::Lit64, Literal64, getContext())) + : MCOperand::createImm(Literal64); } -MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const { +MCOperand AMDGPUDisassembler::decodeLiteralConstant(const MCInstrDesc &Desc, + const MCOperandInfo &OpDesc, + bool ExtendFP64) const { // For now all literal constants are supposed to be unsigned integer // ToDo: deal with signed/unsigned 64-bit integer constants // ToDo: deal with float/double constants @@ -1560,10 +1567,31 @@ MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const { if (ExtendFP64) Literal64 <<= 32; } - return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal); + + int64_t Val = ExtendFP64 ? Literal64 : Literal; + + bool CanUse64BitLiterals = + STI.hasFeature(AMDGPU::Feature64BitLiterals) && + !(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)); + + bool UseLit64 = false; + if (CanUse64BitLiterals) { + if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 || + OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT64) + UseLit64 = !isInt<32>(Val) || !isUInt<32>(Val); + else if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64 || + OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP64 || + OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_AC_FP64) + UseLit64 = Lo_32(Val) != 0; + } + + return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit( + LitModifier::Lit64, Val, getContext())) + : MCOperand::createImm(Val); } -MCOperand AMDGPUDisassembler::decodeLiteral64Constant() const { +MCOperand +AMDGPUDisassembler::decodeLiteral64Constant(const MCInst &Inst) const { assert(STI.hasFeature(AMDGPU::Feature64BitLiterals)); if (!HasLiteral) { @@ -1574,7 +1602,23 @@ MCOperand AMDGPUDisassembler::decodeLiteral64Constant() const { HasLiteral = true; Literal64 = eatBytes(Bytes); } - return MCOperand::createImm(Literal64); + + bool UseLit64 = false; + const MCInstrDesc &Desc = MCII->get(Inst.getOpcode()); + const MCOperandInfo &OpDesc = Desc.operands()[Inst.getNumOperands()]; + if (OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_INT64 || + OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT64) { + UseLit64 = !isInt<32>(Literal64) || !isUInt<32>(Literal64); + } else { + assert(OpDesc.OperandType == AMDGPU::OPERAND_REG_IMM_FP64 || + OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP64 || + OpDesc.OperandType == AMDGPU::OPERAND_REG_INLINE_AC_FP64); + UseLit64 = Lo_32(Literal64) != 0; + } + + return UseLit64 ? MCOperand::createExpr(AMDGPUMCExpr::createLit( + LitModifier::Lit64, Literal64, getContext())) + : MCOperand::createImm(Literal64); } MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { @@ -1822,7 +1866,8 @@ int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const { return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1; } -MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const { +MCOperand AMDGPUDisassembler::decodeSrcOp(const MCInst &Inst, unsigned Width, + unsigned Val) const { using namespace AMDGPU::EncValues; assert(Val < 1024); // enum10 @@ -1834,10 +1879,11 @@ MCOperand AMDGPUDisassembler::decodeSrcOp(unsigned Width, unsigned Val) const { return createRegOperand(IsAGPR ? getAgprClassId(Width) : getVgprClassId(Width), Val - VGPR_MIN); } - return decodeNonVGPRSrcOp(Width, Val & 0xFF); + return decodeNonVGPRSrcOp(Inst, Width, Val & 0xFF); } -MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(unsigned Width, +MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(const MCInst &Inst, + unsigned Width, unsigned Val) const { // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been // decoded earlier. @@ -1861,7 +1907,7 @@ MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(unsigned Width, return MCOperand::createImm(Val); if (Val == LITERAL64_CONST && STI.hasFeature(AMDGPU::Feature64BitLiterals)) { - return decodeLiteral64Constant(); + return decodeLiteral64Constant(Inst); } switch (Width) { @@ -2053,13 +2099,16 @@ MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC); } -MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const { - return STI.hasFeature(AMDGPU::FeatureWavefrontSize32) ? decodeSrcOp(32, Val) - : decodeSrcOp(64, Val); +MCOperand AMDGPUDisassembler::decodeBoolReg(const MCInst &Inst, + unsigned Val) const { + return STI.hasFeature(AMDGPU::FeatureWavefrontSize32) + ? decodeSrcOp(Inst, 32, Val) + : decodeSrcOp(Inst, 64, Val); } -MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const { - return decodeSrcOp(32, Val); +MCOperand AMDGPUDisassembler::decodeSplitBarrier(const MCInst &Inst, + unsigned Val) const { + return decodeSrcOp(Inst, 32, Val); } MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const { diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h index c1131c2936fc7..935c3836f2ed9 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h @@ -142,12 +142,15 @@ class AMDGPUDisassembler : public MCDisassembler { MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const; MCOperand decodeMandatoryLiteral64Constant(uint64_t Imm) const; - MCOperand decodeLiteralConstant(bool ExtendFP64) const; - MCOperand decodeLiteral64Constant() const; + MCOperand decodeLiteralConstant(const MCInstrDesc &Desc, + const MCOperandInfo &OpDesc, + bool ExtendFP64) const; + MCOperand decodeLiteral64Constant(const MCInst &Inst) const; - MCOperand decodeSrcOp(unsigned Width, unsigned Val) const; + MCOperand decodeSrcOp(const MCInst &Inst, unsigned Width, unsigned Val) const; - MCOperand decodeNonVGPRSrcOp(unsigned Width, unsigned Val) const; + MCOperand decodeNonVGPRSrcOp(const MCInst &Inst, unsigned Width, + unsigned Val) const; MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const; MCOperand decodeSpecialReg32(unsigned Val) const; @@ -159,8 +162,8 @@ class AMDGPUDisassembler : public MCDisassembler { MCOperand decodeSDWASrc32(unsigned Val) const; MCOperand decodeSDWAVopcDst(unsigned Val) const; - MCOperand decodeBoolReg(unsigned Val) const; - MCOperand decodeSplitBarrier(unsigned Val) const; + MCOperand decodeBoolReg(const MCInst &Inst, unsigned Val) const; + MCOperand decodeSplitBarrier(const MCInst &Inst, unsigned Val) const; MCOperand decodeDpp8FI(unsigned Val) const; MCOperand decodeVersionImm(unsigned Imm) const; diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index 7e5ae25ff30e6..5a22b23cecf86 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -1982,8 +1982,7 @@ defm : FlatLoadPats ; defm : FlatLoadPats ; defm : FlatLoadPats ; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { defm : FlatLoadPats ; defm : FlatLoadPats ; defm : FlatLoadPats ; @@ -2009,8 +2008,8 @@ let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predi defm : FlatLoadPats_D16_t16; defm : FlatStorePats_t16 ; defm : FlatStorePats_t16 ; - def : FlatStorePat ; - def : FlatStorePat ; + defm : FlatStorePats_t16 ; + defm : FlatStorePats_t16 ; } // End let OtherPredicates = [D16PreservesUnusedBits, HasFlatAddressSpace], True16Predicate = UseRealTrue16Insts defm : FlatLoadPats ; @@ -2127,8 +2126,7 @@ defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; defm : GlobalFLATLoadPats ; @@ -2187,8 +2185,7 @@ defm : GlobalFLATStorePats ; defm : GlobalFLATStorePats ; defm : GlobalFLATStorePats ; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let OtherPredicates = [HasFlatGlobalInsts], True16Predicate = p in { +let OtherPredicates = [HasFlatGlobalInsts], True16Predicate = NotUseRealTrue16Insts in { defm : GlobalFLATStorePats ; defm : GlobalFLATStorePats ; defm : GlobalFLATStorePats ; @@ -2356,8 +2353,7 @@ defm : ScratchFLATLoadPats ; defm : ScratchFLATLoadPats ; defm : ScratchFLATLoadPats ; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { defm : ScratchFLATLoadPats ; defm : ScratchFLATLoadPats ; defm : ScratchFLATLoadPats ; diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp index a3b64aee297b2..1d9a427f2829b 100644 --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -82,7 +82,7 @@ static bool isDivFMas(unsigned Opcode) { } static bool isSGetReg(unsigned Opcode) { - return Opcode == AMDGPU::S_GETREG_B32; + return Opcode == AMDGPU::S_GETREG_B32 || Opcode == AMDGPU::S_GETREG_B32_const; } static bool isSSetReg(unsigned Opcode) { @@ -443,40 +443,101 @@ using GetNumWaitStatesFn = function_ref; // Search for a hazard in a block and its predecessors. template static bool -hasHazard(StateT State, +hasHazard(StateT InitialState, function_ref IsHazard, function_ref UpdateState, - const MachineBasicBlock *MBB, - MachineBasicBlock::const_reverse_instr_iterator I, - DenseSet &Visited) { - for (auto E = MBB->instr_rend(); I != E; ++I) { - // No need to look at parent BUNDLE instructions. - if (I->isBundle()) - continue; - - switch (IsHazard(State, *I)) { - case HazardFound: - return true; - case HazardExpired: - return false; - default: - // Continue search - break; + const MachineBasicBlock *InitialMBB, + MachineBasicBlock::const_reverse_instr_iterator InitialI) { + struct StateMapKey { + SmallVectorImpl *States; + unsigned Idx; + static bool isEqual(const StateMapKey &LHS, const StateMapKey &RHS) { + return LHS.States == RHS.States && LHS.Idx == RHS.Idx; + } + }; + struct StateMapKeyTraits : DenseMapInfo { + static inline StateMapKey getEmptyKey() { + return {static_cast *>( + DenseMapInfo::getEmptyKey()), + DenseMapInfo::getEmptyKey()}; + } + static inline StateMapKey getTombstoneKey() { + return {static_cast *>( + DenseMapInfo::getTombstoneKey()), + DenseMapInfo::getTombstoneKey()}; + } + static unsigned getHashValue(const StateMapKey &Key) { + return StateT::getHashValue((*Key.States)[Key.Idx]); } + static unsigned getHashValue(const StateT &State) { + return StateT::getHashValue(State); + } + static bool isEqual(const StateMapKey &LHS, const StateMapKey &RHS) { + const auto EKey = getEmptyKey(); + const auto TKey = getTombstoneKey(); + if (StateMapKey::isEqual(LHS, EKey) || StateMapKey::isEqual(RHS, EKey) || + StateMapKey::isEqual(LHS, TKey) || StateMapKey::isEqual(RHS, TKey)) + return StateMapKey::isEqual(LHS, RHS); + return StateT::isEqual((*LHS.States)[LHS.Idx], (*RHS.States)[RHS.Idx]); + } + static bool isEqual(const StateT &LHS, const StateMapKey &RHS) { + if (StateMapKey::isEqual(RHS, getEmptyKey()) || + StateMapKey::isEqual(RHS, getTombstoneKey())) + return false; + return StateT::isEqual(LHS, (*RHS.States)[RHS.Idx]); + } + }; - if (I->isInlineAsm() || I->isMetaInstruction()) - continue; + SmallDenseMap StateMap; + SmallVector States; - UpdateState(State, *I); - } + MachineBasicBlock::const_reverse_instr_iterator I = InitialI; + const MachineBasicBlock *MBB = InitialMBB; + StateT State = InitialState; - for (MachineBasicBlock *Pred : MBB->predecessors()) { - if (!Visited.insert(Pred).second) - continue; + SmallSetVector, 16> Worklist; + unsigned WorkIdx = 0; + for (;;) { + bool Expired = false; + for (auto E = MBB->instr_rend(); I != E; ++I) { + // No need to look at parent BUNDLE instructions. + if (I->isBundle()) + continue; - if (hasHazard(State, IsHazard, UpdateState, Pred, Pred->instr_rbegin(), - Visited)) - return true; + auto Result = IsHazard(State, *I); + if (Result == HazardFound) + return true; + if (Result == HazardExpired) { + Expired = true; + break; + } + + if (I->isInlineAsm() || I->isMetaInstruction()) + continue; + + UpdateState(State, *I); + } + + if (!Expired) { + unsigned StateIdx = States.size(); + StateMapKey Key = {&States, StateIdx}; + auto Insertion = StateMap.insert_as(std::pair(Key, StateIdx), State); + if (Insertion.second) { + States.emplace_back(State); + } else { + StateIdx = Insertion.first->second; + } + for (MachineBasicBlock *Pred : MBB->predecessors()) + Worklist.insert(std::pair(Pred, StateIdx)); + } + + if (WorkIdx == Worklist.size()) + break; + + unsigned StateIdx; + std::tie(MBB, StateIdx) = Worklist[WorkIdx++]; + State = States[StateIdx]; + I = MBB->instr_rbegin(); } return false; @@ -1641,6 +1702,15 @@ bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) { SmallDenseMap DefPos; int ExecPos = std::numeric_limits::max(); int VALUs = 0; + + static unsigned getHashValue(const StateType &State) { + return hash_combine(State.ExecPos, State.VALUs, + hash_combine_range(State.DefPos)); + } + static bool isEqual(const StateType &LHS, const StateType &RHS) { + return LHS.DefPos == RHS.DefPos && LHS.ExecPos == RHS.ExecPos && + LHS.VALUs == RHS.VALUs; + } }; StateType State; @@ -1735,9 +1805,8 @@ bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) { State.VALUs += 1; }; - DenseSet Visited; if (!hasHazard(State, IsHazardFn, UpdateStateFn, MI->getParent(), - std::next(MI->getReverseIterator()), Visited)) + std::next(MI->getReverseIterator()))) return false; BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), @@ -1778,6 +1847,13 @@ bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) { struct StateType { int VALUs = 0; int TRANS = 0; + + static unsigned getHashValue(const StateType &State) { + return hash_combine(State.VALUs, State.TRANS); + } + static bool isEqual(const StateType &LHS, const StateType &RHS) { + return LHS.VALUs == RHS.VALUs && LHS.TRANS == RHS.TRANS; + } }; StateType State; @@ -1813,9 +1889,8 @@ bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) { State.TRANS += 1; }; - DenseSet Visited; if (!hasHazard(State, IsHazardFn, UpdateStateFn, MI->getParent(), - std::next(MI->getReverseIterator()), Visited)) + std::next(MI->getReverseIterator()))) return false; // Hazard is observed - insert a wait on va_dst counter to ensure hazard is diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp index 254b75b784e75..bdc08101c7119 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -29,6 +29,7 @@ #include "SIMachineFunctionInfo.h" #include "Utils/AMDGPUBaseInfo.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/MC/LaneBitmask.h" #include "llvm/Support/ErrorHandling.h" @@ -1633,64 +1634,6 @@ void GCNSchedStage::revertScheduling() { DAG.Regions[RegionIdx] = std::pair(DAG.RegionBegin, DAG.RegionEnd); } -bool PreRARematStage::allUsesAvailableAt(const MachineInstr *InstToRemat, - SlotIndex OriginalIdx, - SlotIndex RematIdx) const { - - LiveIntervals *LIS = DAG.LIS; - MachineRegisterInfo &MRI = DAG.MRI; - OriginalIdx = OriginalIdx.getRegSlot(true); - RematIdx = std::max(RematIdx, RematIdx.getRegSlot(true)); - for (const MachineOperand &MO : InstToRemat->operands()) { - if (!MO.isReg() || !MO.getReg() || !MO.readsReg()) - continue; - - if (!MO.getReg().isVirtual()) { - // Do not attempt to reason about PhysRegs - // TODO: better analysis of PhysReg livness - if (!DAG.MRI.isConstantPhysReg(MO.getReg()) && - !DAG.TII->isIgnorableUse(MO)) - return false; - - // Constant PhysRegs and IgnorableUses are okay - continue; - } - - LiveInterval &LI = LIS->getInterval(MO.getReg()); - const VNInfo *OVNI = LI.getVNInfoAt(OriginalIdx); - assert(OVNI); - - // Don't allow rematerialization immediately after the original def. - // It would be incorrect if InstToRemat redefines the register. - // See PR14098. - if (SlotIndex::isSameInstr(OriginalIdx, RematIdx)) - return false; - - if (OVNI != LI.getVNInfoAt(RematIdx)) - return false; - - // Check that subrange is live at RematIdx. - if (LI.hasSubRanges()) { - const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); - unsigned SubReg = MO.getSubReg(); - LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg) - : MRI.getMaxLaneMaskForVReg(MO.getReg()); - for (LiveInterval::SubRange &SR : LI.subranges()) { - if ((SR.LaneMask & LM).none()) - continue; - if (!SR.liveAt(RematIdx)) - return false; - - // Early exit if all used lanes are checked. No need to continue. - LM &= ~SR.LaneMask; - if (LM.none()) - break; - } - } - } - return true; -} - bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() { const Function &F = MF.getFunction(); @@ -1777,9 +1720,9 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() { for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) { auto Region = DAG.Regions[I]; for (auto MI = Region.first; MI != Region.second; ++MI) { - // The instruction must be trivially rematerializable. + // The instruction must be rematerializable. MachineInstr &DefMI = *MI; - if (!isTriviallyReMaterializable(DefMI)) + if (!isReMaterializable(DefMI)) continue; // We only support rematerializing virtual registers with one definition. @@ -1812,9 +1755,9 @@ bool PreRARematStage::canIncreaseOccupancyOrReduceSpill() { // Do not rematerialize an instruction it it uses registers that aren't // available at its use. This ensures that we are not extending any live // range while rematerializing. - SlotIndex DefIdx = DAG.LIS->getInstructionIndex(DefMI); SlotIndex UseIdx = DAG.LIS->getInstructionIndex(*UseMI).getRegSlot(true); - if (!allUsesAvailableAt(&DefMI, DefIdx, UseIdx)) + if (!VirtRegAuxInfo::allUsesAvailableAt(&DefMI, UseIdx, *DAG.LIS, DAG.MRI, + *DAG.TII)) continue; REMAT_DEBUG(dbgs() << "Region " << I << ": remat instruction " << DefMI); @@ -2002,8 +1945,8 @@ void PreRARematStage::rematerialize() { } // Copied from MachineLICM -bool PreRARematStage::isTriviallyReMaterializable(const MachineInstr &MI) { - if (!DAG.TII->isTriviallyReMaterializable(MI)) +bool PreRARematStage::isReMaterializable(const MachineInstr &MI) { + if (!DAG.TII->isReMaterializable(MI)) return false; for (const MachineOperand &MO : MI.all_uses()) { diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h index 790370ff8ab4d..8ea42677454e4 100644 --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h @@ -433,7 +433,7 @@ class ClusteredLowOccStage : public GCNSchedStage { /// Attempts to reduce function spilling or, if there is no spilling, to /// increase function occupancy by one with respect to ArchVGPR usage by sinking -/// trivially rematerializable instructions to their use. When the stage +/// rematerializable instructions to their use. When the stage /// estimates reducing spilling or increasing occupancy is possible, as few /// instructions as possible are rematerialized to reduce potential negative /// effects on function latency. @@ -483,9 +483,8 @@ class PreRARematStage : public GCNSchedStage { /// PreRARematStage::TargetOccupancy. bool canIncreaseOccupancyOrReduceSpill(); - /// Whether the MI is trivially rematerializable and does not have any virtual - /// register use. - bool isTriviallyReMaterializable(const MachineInstr &MI); + /// Whether the MI is rematerializable + bool isReMaterializable(const MachineInstr &MI); /// Rematerializes all instructions in PreRARematStage::Rematerializations /// and stores the achieved occupancy after remat in @@ -497,12 +496,6 @@ class PreRARematStage : public GCNSchedStage { /// stage to their pre-stage values. void finalizeGCNSchedStage() override; - /// \p Returns true if all the uses in \p InstToRemat defined at \p - /// OriginalIdx are live at \p RematIdx. This only checks liveness of virtual - /// reg uses. - bool allUsesAvailableAt(const MachineInstr *InstToRemat, - SlotIndex OriginalIdx, SlotIndex RematIdx) const; - public: bool initGCNSchedStage() override; diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h index 920a47b5afe07..a54d6651c25c1 100644 --- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h +++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h @@ -99,6 +99,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, bool EnableDS128 = false; bool EnablePRTStrictNull = false; bool DumpCode = false; + bool AssemblerPermissiveWavesize = false; // Subtarget statically properties set by tablegen bool FP64 = false; @@ -285,6 +286,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, bool UseBlockVGPROpsForCSR = false; bool HasGloballyAddressableScratch = false; + bool Has45BitNumRecordsBufferResource = false; + // Dummy feature to use for assembler in tablegen. bool FeatureDisable = false; @@ -1849,6 +1852,12 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo, return 4; return 3; } + + /// \returns true if the sub-target supports buffer resource (V#) with 45-bit + /// num_records. + bool has45BitNumRecordsBufferResource() const { + return Has45BitNumRecordsBufferResource; + } }; class GCNUserSGPRUsageInfo { diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp index b8f43c4550b7e..afaa19013bfc2 100644 --- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp +++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp @@ -21,8 +21,8 @@ namespace llvm::mca { -void AMDGPUInstrPostProcess::postProcessInstruction( - std::unique_ptr &Inst, const MCInst &MCI) { +void AMDGPUInstrPostProcess::postProcessInstruction(Instruction &Inst, + const MCInst &MCI) { switch (MCI.getOpcode()) { case AMDGPU::S_WAITCNT: case AMDGPU::S_WAITCNT_soft: @@ -44,7 +44,7 @@ void AMDGPUInstrPostProcess::postProcessInstruction( // s_waitcnt instructions encode important information as immediate operands // which are lost during the MCInst -> mca::Instruction lowering. -void AMDGPUInstrPostProcess::processWaitCnt(std::unique_ptr &Inst, +void AMDGPUInstrPostProcess::processWaitCnt(Instruction &Inst, const MCInst &MCI) { for (int Idx = 0, N = MCI.size(); Idx < N; Idx++) { MCAOperand Op; @@ -55,7 +55,7 @@ void AMDGPUInstrPostProcess::processWaitCnt(std::unique_ptr &Inst, Op = MCAOperand::createImm(MCOp.getImm()); } Op.setIndex(Idx); - Inst->addOperand(Op); + Inst.addOperand(Op); } } diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h index 85b9c188b5d1a..cbc7427ce6cdf 100644 --- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h +++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h @@ -26,7 +26,7 @@ namespace llvm { namespace mca { class AMDGPUInstrPostProcess : public InstrPostProcess { - void processWaitCnt(std::unique_ptr &Inst, const MCInst &MCI); + void processWaitCnt(Instruction &Inst, const MCInst &MCI); public: AMDGPUInstrPostProcess(const MCSubtargetInfo &STI, const MCInstrInfo &MCII) @@ -34,8 +34,7 @@ class AMDGPUInstrPostProcess : public InstrPostProcess { ~AMDGPUInstrPostProcess() = default; - void postProcessInstruction(std::unique_ptr &Inst, - const MCInst &MCI) override; + void postProcessInstruction(Instruction &Inst, const MCInst &MCI) override; }; struct WaitCntInfo { diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp index f098e7a3c6c67..d3b5718093997 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -80,9 +80,13 @@ void AMDGPUInstPrinter::printFP64ImmOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { // KIMM64 - const MCInstrDesc &Desc = MII.get(MI->getOpcode()); - uint64_t Imm = MI->getOperand(OpNo).getImm(); - printLiteral64(Desc, Imm, STI, O, /*IsFP=*/true); + const MCOperand &Op = MI->getOperand(OpNo); + if (Op.isExpr()) { + MAI.printExpr(O, *Op.getExpr()); + return; + } + + printLiteral64(Op.getImm(), O, /*IsFP=*/true); } void AMDGPUInstPrinter::printNamedBit(const MCInst *MI, unsigned OpNo, @@ -332,8 +336,16 @@ static MCPhysReg getRegForPrinting(MCPhysReg Reg, const MCRegisterInfo &MRI) { if (Idx < 0x100) return Reg; + unsigned RegNo = Idx % 0x100; const MCRegisterClass *RC = getVGPRPhysRegClass(Reg, MRI); - return RC->getRegister(Idx % 0x100); + if (RC->getID() == AMDGPU::VGPR_16RegClassID) { + // This class has 2048 registers with interleaved lo16 and hi16. + RegNo *= 2; + if (Enc & AMDGPU::HWEncoding::IS_HI16) + ++RegNo; + } + + return RC->getRegister(RegNo); } // Restore MSBs of a VGPR above 255 from the MCInstrAnalysis. @@ -652,7 +664,7 @@ void AMDGPUInstPrinter::printImmediate32(uint32_t Imm, O << formatHex(static_cast(Imm)); } -void AMDGPUInstPrinter::printImmediate64(const MCInstrDesc &Desc, uint64_t Imm, +void AMDGPUInstPrinter::printImmediate64(uint64_t Imm, const MCSubtargetInfo &STI, raw_ostream &O, bool IsFP) { int64_t SImm = static_cast(Imm); @@ -683,27 +695,15 @@ void AMDGPUInstPrinter::printImmediate64(const MCInstrDesc &Desc, uint64_t Imm, STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) O << "0.15915494309189532"; else - printLiteral64(Desc, Imm, STI, O, IsFP); + printLiteral64(Imm, O, IsFP); } -void AMDGPUInstPrinter::printLiteral64(const MCInstrDesc &Desc, uint64_t Imm, - const MCSubtargetInfo &STI, - raw_ostream &O, bool IsFP) { - // This part needs to align with AMDGPUOperand::addLiteralImmOperand. - bool CanUse64BitLiterals = - STI.hasFeature(AMDGPU::Feature64BitLiterals) && - !(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)); - if (IsFP) { - if (CanUse64BitLiterals && Lo_32(Imm)) - O << "lit64(" << formatHex(static_cast(Imm)) << ')'; - else - O << formatHex(static_cast(Hi_32(Imm))); - } else { - if (CanUse64BitLiterals && (!isInt<32>(Imm) || !isUInt<32>(Imm))) - O << "lit64(" << formatHex(static_cast(Imm)) << ')'; - else - O << formatHex(static_cast(Imm)); - } +void AMDGPUInstPrinter::printLiteral64(uint64_t Imm, raw_ostream &O, + bool IsFP) { + if (IsFP && Lo_32(Imm) == 0) + O << formatHex(static_cast(Hi_32(Imm))); + else + O << formatHex(Imm); } void AMDGPUInstPrinter::printBLGP(const MCInst *MI, unsigned OpNo, @@ -814,12 +814,12 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo, break; case AMDGPU::OPERAND_REG_IMM_INT64: case AMDGPU::OPERAND_REG_INLINE_C_INT64: - printImmediate64(Desc, Op.getImm(), STI, O, false); + printImmediate64(Op.getImm(), STI, O, false); break; case AMDGPU::OPERAND_REG_IMM_FP64: case AMDGPU::OPERAND_REG_INLINE_C_FP64: case AMDGPU::OPERAND_REG_INLINE_AC_FP64: - printImmediate64(Desc, Op.getImm(), STI, O, true); + printImmediate64(Op.getImm(), STI, O, true); break; case AMDGPU::OPERAND_REG_INLINE_C_INT16: case AMDGPU::OPERAND_REG_IMM_INT16: diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h index 21cc2f229de91..b27295e73ec99 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h @@ -89,10 +89,9 @@ class AMDGPUInstPrinter : public MCInstPrinter { raw_ostream &O); void printImmediate32(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O); - void printImmediate64(const MCInstrDesc &Desc, uint64_t Imm, - const MCSubtargetInfo &STI, raw_ostream &O, bool IsFP); - void printLiteral64(const MCInstrDesc &Desc, uint64_t Imm, - const MCSubtargetInfo &STI, raw_ostream &O, bool IsFP); + void printImmediate64(uint64_t Imm, const MCSubtargetInfo &STI, + raw_ostream &O, bool IsFP); + void printLiteral64(uint64_t Imm, raw_ostream &O, bool IsFP); void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O); void printRegularOperand(const MCInst *MI, unsigned OpNo, diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp index bf212bbca934c..f287911654c24 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp @@ -345,7 +345,7 @@ std::optional AMDGPUMCCodeEmitter::getLitEncoding( case AMDGPU::OPERAND_KIMM32: case AMDGPU::OPERAND_KIMM16: case AMDGPU::OPERAND_KIMM64: - return MO.getImm(); + return Imm; default: llvm_unreachable("invalid operand size"); } @@ -457,6 +457,8 @@ void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI, else if (Op.isExpr()) { if (const auto *C = dyn_cast(Op.getExpr())) Imm = C->getValue(); + else if (AMDGPU::isLitExpr(Op.getExpr())) + Imm = AMDGPU::getLitValue(Op.getExpr()); } else // Exprs will be replaced with a fixup value. llvm_unreachable("Must be immediate or expr"); diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp index 6638fa2f687d8..c27be0250e386 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp @@ -75,6 +75,12 @@ void AMDGPUMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { case AGVK_Occupancy: OS << "occupancy("; break; + case AGVK_Lit: + OS << "lit("; + break; + case AGVK_Lit64: + OS << "lit64("; + break; } for (const auto *It = Args.begin(); It != Args.end(); ++It) { MAI->printExpr(OS, **It); @@ -259,6 +265,9 @@ bool AMDGPUMCExpr::evaluateAsRelocatableImpl(MCValue &Res, return evaluateTotalNumVGPR(Res, Asm); case AGVK_Occupancy: return evaluateOccupancy(Res, Asm); + case AGVK_Lit: + case AGVK_Lit64: + return Args[0]->evaluateAsRelocatable(Res, Asm); } for (const MCExpr *Arg : Args) { @@ -332,6 +341,14 @@ const AMDGPUMCExpr *AMDGPUMCExpr::createOccupancy( Ctx); } +const AMDGPUMCExpr *AMDGPUMCExpr::createLit(LitModifier Lit, int64_t Value, + MCContext &Ctx) { + assert(Lit == LitModifier::Lit || Lit == LitModifier::Lit64); + return create(Lit == LitModifier::Lit ? VariantKind::AGVK_Lit + : VariantKind::AGVK_Lit64, + {MCConstantExpr::create(Value, Ctx, /*PrintInHex=*/true)}, Ctx); +} + static KnownBits fromOptionalToKnownBits(std::optional CompareResult) { static constexpr unsigned BitWidth = 64; const APInt True(BitWidth, 1); @@ -513,7 +530,9 @@ static void targetOpKnownBitsMapHelper(const MCExpr *Expr, KnownBitsMap &KBM, case AMDGPUMCExpr::VariantKind::AGVK_ExtraSGPRs: case AMDGPUMCExpr::VariantKind::AGVK_TotalNumVGPRs: case AMDGPUMCExpr::VariantKind::AGVK_AlignTo: - case AMDGPUMCExpr::VariantKind::AGVK_Occupancy: { + case AMDGPUMCExpr::VariantKind::AGVK_Occupancy: + case AMDGPUMCExpr::VariantKind::AGVK_Lit: + case AMDGPUMCExpr::VariantKind::AGVK_Lit64: { int64_t Val; if (AGVK->evaluateAsAbsolute(Val)) { APInt APValue(BitWidth, Val); @@ -709,3 +728,15 @@ void llvm::AMDGPU::printAMDGPUMCExpr(const MCExpr *Expr, raw_ostream &OS, MAI->printExpr(OS, *Expr); } + +bool AMDGPU::isLitExpr(const MCExpr *Expr) { + const auto *E = dyn_cast(Expr); + return E && (E->getKind() == AMDGPUMCExpr::AGVK_Lit || + E->getKind() == AMDGPUMCExpr::AGVK_Lit64); +} + +int64_t AMDGPU::getLitValue(const MCExpr *Expr) { + assert(isLitExpr(Expr)); + return cast(cast(Expr)->getArgs()[0]) + ->getValue(); +} diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h index bc6fdf7f2e4cd..54fcd2af49ecd 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.h @@ -17,6 +17,8 @@ namespace llvm { class Function; class GCNSubtarget; +enum class LitModifier { None, Lit, Lit64 }; + /// AMDGPU target specific MCExpr operations. /// /// Takes in a minimum of 1 argument to be used with an operation. The supported @@ -36,7 +38,9 @@ class AMDGPUMCExpr : public MCTargetExpr { AGVK_ExtraSGPRs, AGVK_TotalNumVGPRs, AGVK_AlignTo, - AGVK_Occupancy + AGVK_Occupancy, + AGVK_Lit, + AGVK_Lit64, }; // Relocation specifiers. @@ -99,6 +103,9 @@ class AMDGPUMCExpr : public MCTargetExpr { const MCExpr *NumVGPRs, unsigned DynamicVGPRBlockSize, const GCNSubtarget &STM, MCContext &Ctx); + static const AMDGPUMCExpr *createLit(LitModifier Lit, int64_t Value, + MCContext &Ctx); + ArrayRef getArgs() const { return Args; } VariantKind getKind() const { return Kind; } const MCExpr *getSubExpr(size_t Index) const; @@ -129,6 +136,11 @@ const MCExpr *foldAMDGPUMCExpr(const MCExpr *Expr, MCContext &Ctx); static inline AMDGPUMCExpr::Specifier getSpecifier(const MCSymbolRefExpr *SRE) { return AMDGPUMCExpr::Specifier(SRE->getKind()); } + +LLVM_READONLY bool isLitExpr(const MCExpr *Expr); + +LLVM_READONLY int64_t getLitValue(const MCExpr *Expr); + } // end namespace AMDGPU } // end namespace llvm diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp index f2e2d0ed3f8a6..013cfeb364048 100644 --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp @@ -82,20 +82,36 @@ createAMDGPUMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { MCSubtargetInfo *STI = createAMDGPUMCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, FS); + bool IsWave64 = STI->hasFeature(AMDGPU::FeatureWavefrontSize64); + bool IsWave32 = STI->hasFeature(AMDGPU::FeatureWavefrontSize32); + // FIXME: We should error for the default target. if (STI->getFeatureBits().none()) STI->ToggleFeature(AMDGPU::FeatureSouthernIslands); - if (!STI->hasFeature(AMDGPU::FeatureWavefrontSize64) && - !STI->hasFeature(AMDGPU::FeatureWavefrontSize32)) { + if (!IsWave64 && !IsWave32) { // If there is no default wave size it must be a generation before gfx10, // these have FeatureWavefrontSize64 in their definition already. For gfx10+ // set wave32 as a default. STI->ToggleFeature(AMDGPU::isGFX10Plus(*STI) ? AMDGPU::FeatureWavefrontSize32 : AMDGPU::FeatureWavefrontSize64); + } else if (IsWave64 && IsWave32) { + // The wave size is mutually exclusive. If both somehow end up set, wave32 + // wins if supported. + STI->ToggleFeature(AMDGPU::supportsWave32(*STI) + ? AMDGPU::FeatureWavefrontSize64 + : AMDGPU::FeatureWavefrontSize32); + + // If both wavesizes were manually requested, hack in a feature to permit + // assembling modules with mixed wavesizes. + STI->ToggleFeature(AMDGPU::FeatureAssemblerPermissiveWavesize); } + assert((STI->hasFeature(AMDGPU::FeatureWavefrontSize64) != + STI->hasFeature(AMDGPU::FeatureWavefrontSize32)) && + "wavesize features are mutually exclusive"); + return STI; } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 38331b614bceb..fed37788802b9 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -712,10 +712,15 @@ bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const { TII->getRegClass(MI->getDesc(), Fold.UseOpNo, TRI)) { const TargetRegisterClass *NewRC = TRI->getRegClassForReg(*MRI, New->getReg()); - const TargetRegisterClass *ConstrainRC = - TRI->findCommonRegClass(OpRC, Old.getSubReg(), NewRC, New->getSubReg()); - if (!ConstrainRC) - return false; + + const TargetRegisterClass *ConstrainRC = OpRC; + if (New->getSubReg()) { + ConstrainRC = + TRI->getMatchingSuperRegClass(NewRC, OpRC, New->getSubReg()); + + if (!ConstrainRC) + return false; + } if (!MRI->constrainRegClass(New->getReg(), ConstrainRC)) { LLVM_DEBUG(dbgs() << "Cannot constrain " << printReg(New->getReg(), TRI) @@ -1308,6 +1313,15 @@ void SIFoldOperandsImpl::foldOperand( if (MovSrcRC) { if (UseSubReg) MovSrcRC = TRI->getMatchingSuperRegClass(SrcRC, MovSrcRC, UseSubReg); + + // FIXME: We should be able to directly check immediate operand legality + // for all cases, but gfx908 hacks break. + if (MovOp == AMDGPU::AV_MOV_B32_IMM_PSEUDO && + (!OpToFold.isImm() || + !TII->isImmOperandLegal(MovDesc, SrcIdx, + *OpToFold.getEffectiveImmVal()))) + break; + if (!MRI->constrainRegClass(SrcReg, MovSrcRC)) break; diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index 7c5d4fc2dacf6..e4b3528b432bb 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -924,6 +924,7 @@ bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { case TargetStackID::SGPRSpill: return true; case TargetStackID::ScalableVector: + case TargetStackID::ScalablePredicateVector: case TargetStackID::WasmLocal: return false; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 4fd703c1f810e..f7265c5fda9dc 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -64,14 +64,6 @@ static cl::opt UseDivergentRegisterIndexing( cl::desc("Use indirect register addressing for divergent indexes"), cl::init(false)); -// TODO: This option should be removed once we switch to always using PTRADD in -// the SelectionDAG. -static cl::opt UseSelectionDAGPTRADD( - "amdgpu-use-sdag-ptradd", cl::Hidden, - cl::desc("Generate ISD::PTRADD nodes for 64-bit pointer arithmetic in the " - "SelectionDAG ISel"), - cl::init(false)); - static bool denormalModeIsFlushAllF32(const MachineFunction &MF) { const SIMachineFunctionInfo *Info = MF.getInfo(); return Info->getMode().FP32Denormals == DenormalMode::getPreserveSign(); @@ -851,6 +843,13 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, setOperationAction(ISD::SELECT, {MVT::v4i16, MVT::v4f16, MVT::v4bf16}, Custom); + if (Subtarget->hasBF16PackedInsts()) { + for (MVT VT : {MVT::v4bf16, MVT::v8bf16, MVT::v16bf16, MVT::v32bf16}) + // Split vector operations. + setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FCANONICALIZE}, + VT, Custom); + } + if (Subtarget->hasPackedFP32Ops()) { setOperationAction({ISD::FADD, ISD::FMUL, ISD::FMA, ISD::FNEG}, MVT::v2f32, Legal); @@ -5907,6 +5906,8 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, const GCNSubtarget &ST = MF->getSubtarget(); const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const DebugLoc &DL = MI.getDebugLoc(); switch (MI.getOpcode()) { case AMDGPU::WAVE_REDUCE_UMIN_PSEUDO_U32: @@ -5947,7 +5948,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B64); case AMDGPU::S_UADDO_PSEUDO: case AMDGPU::S_USUBO_PSEUDO: { - const DebugLoc &DL = MI.getDebugLoc(); MachineOperand &Dest0 = MI.getOperand(0); MachineOperand &Dest1 = MI.getOperand(1); MachineOperand &Src0 = MI.getOperand(2); @@ -5962,9 +5962,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, .add(Src1); // clang-format on - BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg()) - .addImm(1) - .addImm(0); + unsigned SelOpc = + Subtarget->isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; + BuildMI(*BB, MI, DL, TII->get(SelOpc), Dest1.getReg()).addImm(-1).addImm(0); MI.eraseFromParent(); return BB; @@ -5975,9 +5975,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } case AMDGPU::V_ADD_U64_PSEUDO: case AMDGPU::V_SUB_U64_PSEUDO: { - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - const DebugLoc &DL = MI.getDebugLoc(); - bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO); MachineOperand &Dest = MI.getOperand(0); @@ -6070,9 +6067,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // This pseudo has a chance to be selected // only from uniform add/subcarry node. All the VGPR operands // therefore assumed to be splat vectors. - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); MachineBasicBlock::iterator MII = MI; - const DebugLoc &DL = MI.getDebugLoc(); MachineOperand &Dest = MI.getOperand(0); MachineOperand &CarryDest = MI.getOperand(1); MachineOperand &Src0 = MI.getOperand(2); @@ -6136,7 +6131,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // clang-format on unsigned SelOpc = - (ST.isWave64()) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; + ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg()) .addImm(-1) @@ -6165,7 +6160,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case AMDGPU::GET_GROUPSTATICSIZE: { assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL); - DebugLoc DL = MI.getDebugLoc(); BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) .add(MI.getOperand(0)) .addImm(MFI->getLDSSize()); @@ -6174,8 +6168,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } case AMDGPU::GET_SHADERCYCLESHILO: { assert(MF->getSubtarget().hasShaderCyclesHiLoRegisters()); - MachineRegisterInfo &MRI = MF->getRegInfo(); - const DebugLoc &DL = MI.getDebugLoc(); // The algorithm is: // // hi1 = getreg(SHADER_CYCLES_HI) @@ -6238,12 +6230,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case AMDGPU::SI_KILL_I1_PSEUDO: return splitKillBlock(MI, BB); case AMDGPU::V_CNDMASK_B64_PSEUDO: { - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); - Register Dst = MI.getOperand(0).getReg(); const MachineOperand &Src0 = MI.getOperand(1); const MachineOperand &Src1 = MI.getOperand(2); - const DebugLoc &DL = MI.getDebugLoc(); Register SrcCond = MI.getOperand(3).getReg(); Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); @@ -6296,7 +6285,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, return BB; } case AMDGPU::SI_BR_UNDEF: { - const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) .add(MI.getOperand(0)); Br->getOperand(1).setIsUndef(); // read undef SCC @@ -6312,8 +6300,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, return BB; } case AMDGPU::SI_CALL_ISEL: { - const DebugLoc &DL = MI.getDebugLoc(); - unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); MachineInstrBuilder MIB; @@ -6330,7 +6316,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case AMDGPU::V_SUB_CO_U32_e32: case AMDGPU::V_SUBREV_CO_U32_e32: { // TODO: Define distinct V_*_I32_Pseudo instructions instead. - const DebugLoc &DL = MI.getDebugLoc(); unsigned Opc = MI.getOpcode(); bool NeedClampOperand = false; @@ -6411,7 +6396,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } if (SetRoundOp || SetDenormOp) { - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { unsigned ImmVal = Def->getOperand(1).getImm(); @@ -6448,7 +6432,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MI.setDesc(TII->get(AMDGPU::COPY)); return BB; case AMDGPU::ENDPGM_TRAP: { - const DebugLoc &DL = MI.getDebugLoc(); if (BB->succ_empty() && std::next(MI.getIterator()) == BB->end()) { MI.setDesc(TII->get(AMDGPU::S_ENDPGM)); MI.addOperand(MachineOperand::CreateImm(0)); @@ -6475,7 +6458,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } case AMDGPU::SIMULATED_TRAP: { assert(Subtarget->hasPrivEnabledTrap2NopBug()); - MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); MachineBasicBlock *SplitBB = TII->insertSimulatedTrap(MRI, *BB, MI, MI.getDebugLoc()); MI.eraseFromParent(); @@ -6638,10 +6620,12 @@ SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, SelectionDAG &DAG) const { unsigned Opc = Op.getOpcode(); EVT VT = Op.getValueType(); - assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || - VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i16 || - VT == MVT::v16f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || - VT == MVT::v32f32 || VT == MVT::v32i16 || VT == MVT::v32f16); + assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16 || + VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || + VT == MVT::v8bf16 || VT == MVT::v16i16 || VT == MVT::v16f16 || + VT == MVT::v16bf16 || VT == MVT::v8f32 || VT == MVT::v16f32 || + VT == MVT::v32f32 || VT == MVT::v32i16 || VT == MVT::v32f16 || + VT == MVT::v32bf16); auto [Lo, Hi] = DAG.SplitVectorOperand(Op.getNode(), 0); @@ -7540,17 +7524,30 @@ SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, SelectionDAG &DAG) const { SDNode *BR = nullptr; SDNode *SetCC = nullptr; - if (Intr->getOpcode() == ISD::SETCC) { + switch (Intr->getOpcode()) { + case ISD::SETCC: { // As long as we negate the condition everything is fine SetCC = Intr; Intr = SetCC->getOperand(0).getNode(); - - } else { + break; + } + case ISD::XOR: { + // Similar to SETCC, if we have (xor c, -1), we will be fine. + SDValue LHS = Intr->getOperand(0); + SDValue RHS = Intr->getOperand(1); + if (auto *C = dyn_cast(RHS); C && C->getZExtValue()) { + Intr = LHS.getNode(); + break; + } + [[fallthrough]]; + } + default: { // Get the target from BR if we don't negate the condition BR = findUser(BRCOND, ISD::BR); assert(BR && "brcond missing unconditional branch user"); Target = BR->getOperand(1); } + } unsigned CFNode = isCFIntrinsic(Intr); if (CFNode == 0) { @@ -11461,7 +11458,7 @@ static bool isNoUnsignedWrap(SDValue Addr) { bool SITargetLowering::shouldPreservePtrArith(const Function &F, EVT PtrVT) const { - return UseSelectionDAGPTRADD && PtrVT == MVT::i64; + return PtrVT == MVT::i64; } bool SITargetLowering::canTransformPtrArithOutOfBounds(const Function &F, @@ -11590,29 +11587,61 @@ SDValue SITargetLowering::lowerPointerAsRsrcIntrin(SDNode *Op, SDValue NumRecords = Op->getOperand(3); SDValue Flags = Op->getOperand(4); - auto [LowHalf, HighHalf] = DAG.SplitScalar(Pointer, Loc, MVT::i32, MVT::i32); - SDValue Mask = DAG.getConstant(0x0000ffff, Loc, MVT::i32); - SDValue Masked = DAG.getNode(ISD::AND, Loc, MVT::i32, HighHalf, Mask); - std::optional ConstStride = std::nullopt; - if (auto *ConstNode = dyn_cast(Stride)) - ConstStride = ConstNode->getZExtValue(); - - SDValue NewHighHalf = Masked; - if (!ConstStride || *ConstStride != 0) { - SDValue ShiftedStride; - if (ConstStride) { - ShiftedStride = DAG.getConstant(*ConstStride << 16, Loc, MVT::i32); - } else { - SDValue ExtStride = DAG.getAnyExtOrTrunc(Stride, Loc, MVT::i32); - ShiftedStride = - DAG.getNode(ISD::SHL, Loc, MVT::i32, ExtStride, - DAG.getShiftAmountConstant(16, MVT::i32, Loc)); - } - NewHighHalf = DAG.getNode(ISD::OR, Loc, MVT::i32, Masked, ShiftedStride); + SDValue ExtStride = DAG.getAnyExtOrTrunc(Stride, Loc, MVT::i32); + SDValue Rsrc; + + if (Subtarget->has45BitNumRecordsBufferResource()) { + SDValue Zero = DAG.getConstant(0, Loc, MVT::i32); + // Build the lower 64-bit value, which has a 57-bit base and the lower 7-bit + // num_records. + SDValue ExtPointer = DAG.getAnyExtOrTrunc(Pointer, Loc, MVT::i64); + SDValue NumRecordsLHS = + DAG.getNode(ISD::SHL, Loc, MVT::i64, NumRecords, + DAG.getShiftAmountConstant(57, MVT::i32, Loc)); + SDValue LowHalf = + DAG.getNode(ISD::OR, Loc, MVT::i64, ExtPointer, NumRecordsLHS); + + // Build the higher 64-bit value, which has the higher 38-bit num_records, + // 6-bit zero (omit), 16-bit stride and scale and 4-bit flag. + SDValue NumRecordsRHS = + DAG.getNode(ISD::SRL, Loc, MVT::i64, NumRecords, + DAG.getShiftAmountConstant(7, MVT::i32, Loc)); + SDValue ShiftedStride = + DAG.getNode(ISD::SHL, Loc, MVT::i32, ExtStride, + DAG.getShiftAmountConstant(12, MVT::i32, Loc)); + SDValue ExtShiftedStrideVec = + DAG.getNode(ISD::BUILD_VECTOR, Loc, MVT::v2i32, Zero, ShiftedStride); + SDValue ExtShiftedStride = + DAG.getNode(ISD::BITCAST, Loc, MVT::i64, ExtShiftedStrideVec); + SDValue ShiftedFlags = + DAG.getNode(ISD::SHL, Loc, MVT::i32, Flags, + DAG.getShiftAmountConstant(28, MVT::i32, Loc)); + SDValue ExtShiftedFlagsVec = + DAG.getNode(ISD::BUILD_VECTOR, Loc, MVT::v2i32, Zero, ShiftedFlags); + SDValue ExtShiftedFlags = + DAG.getNode(ISD::BITCAST, Loc, MVT::i64, ExtShiftedFlagsVec); + SDValue CombinedFields = + DAG.getNode(ISD::OR, Loc, MVT::i64, NumRecordsRHS, ExtShiftedStride); + SDValue HighHalf = + DAG.getNode(ISD::OR, Loc, MVT::i64, CombinedFields, ExtShiftedFlags); + + Rsrc = DAG.getNode(ISD::BUILD_VECTOR, Loc, MVT::v2i64, LowHalf, HighHalf); + } else { + NumRecords = DAG.getAnyExtOrTrunc(NumRecords, Loc, MVT::i32); + auto [LowHalf, HighHalf] = + DAG.SplitScalar(Pointer, Loc, MVT::i32, MVT::i32); + SDValue Mask = DAG.getConstant(0x0000ffff, Loc, MVT::i32); + SDValue Masked = DAG.getNode(ISD::AND, Loc, MVT::i32, HighHalf, Mask); + SDValue ShiftedStride = + DAG.getNode(ISD::SHL, Loc, MVT::i32, ExtStride, + DAG.getShiftAmountConstant(16, MVT::i32, Loc)); + SDValue NewHighHalf = + DAG.getNode(ISD::OR, Loc, MVT::i32, Masked, ShiftedStride); + + Rsrc = DAG.getNode(ISD::BUILD_VECTOR, Loc, MVT::v4i32, LowHalf, NewHighHalf, + NumRecords, Flags); } - SDValue Rsrc = DAG.getNode(ISD::BUILD_VECTOR, Loc, MVT::v4i32, LowHalf, - NewHighHalf, NumRecords, Flags); SDValue RsrcPtr = DAG.getNode(ISD::BITCAST, Loc, MVT::i128, Rsrc); return RsrcPtr; } @@ -15198,36 +15227,13 @@ SITargetLowering::performExtractVectorEltCombine(SDNode *N, return V; } - // EXTRACT_VECTOR_ELT (v2i32 bitcast (i64/f64:k), Idx) - // => - // i32:Lo(k) if Idx == 0, or - // i32:Hi(k) if Idx == 1 - auto *Idx = dyn_cast(N->getOperand(1)); - if (Vec.getOpcode() == ISD::BITCAST && VecVT == MVT::v2i32 && Idx) { - SDLoc SL(N); - SDValue PeekThrough = Vec.getOperand(0); - auto *KImm = dyn_cast(PeekThrough); - if (KImm && KImm->getValueType(0).getSizeInBits() == 64) { - uint64_t KImmValue = KImm->getZExtValue(); - return DAG.getConstant( - (KImmValue >> (32 * Idx->getZExtValue())) & 0xffffffff, SL, MVT::i32); - } - auto *KFPImm = dyn_cast(PeekThrough); - if (KFPImm && KFPImm->getValueType(0).getSizeInBits() == 64) { - uint64_t KFPImmValue = - KFPImm->getValueAPF().bitcastToAPInt().getZExtValue(); - return DAG.getConstant((KFPImmValue >> (32 * Idx->getZExtValue())) & - 0xffffffff, - SL, MVT::i32); - } - } - if (!DCI.isBeforeLegalize()) return SDValue(); // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit // elements. This exposes more load reduction opportunities by replacing // multiple small extract_vector_elements with a single 32-bit extract. + auto *Idx = dyn_cast(N->getOperand(1)); if (isa(Vec) && VecEltSize <= 16 && VecEltVT.isByteSized() && VecSize > 32 && VecSize % 32 == 0 && Idx) { EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 7ec98851d0bef..76bfce8c0f6f9 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -418,15 +418,14 @@ class WaitcntGeneratorGFX12Plus : public WaitcntGenerator { class SIInsertWaitcnts { public: const GCNSubtarget *ST; + const SIInstrInfo *TII = nullptr; + const SIRegisterInfo *TRI = nullptr; + const MachineRegisterInfo *MRI = nullptr; InstCounterType SmemAccessCounter; InstCounterType MaxCounter; const unsigned *WaitEventMaskForInst; private: - const SIInstrInfo *TII = nullptr; - const SIRegisterInfo *TRI = nullptr; - const MachineRegisterInfo *MRI = nullptr; - DenseMap SLoadAddresses; DenseMap PreheadersToFlush; MachineLoopInfo *MLI; @@ -495,13 +494,6 @@ class SIInsertWaitcnts { bool isVMEMOrFlatVMEM(const MachineInstr &MI) const; bool run(MachineFunction &MF); - bool isForceEmitWaitcnt() const { - for (auto T : inst_counter_types()) - if (ForceEmitWaitcnt[T]) - return true; - return false; - } - void setForceEmitWaitcnt() { // For non-debug builds, ForceEmitWaitcnt has been initialized to false; // For debug builds, get the debug counter info and adjust if need be @@ -570,10 +562,6 @@ class SIInsertWaitcnts { return VmemReadMapping[getVmemType(Inst)]; } - bool hasXcnt() const { return ST->hasWaitXCnt(); } - - bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const; - bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; bool isVmemAccess(const MachineInstr &MI) const; bool generateWaitcntInstBefore(MachineInstr &MI, WaitcntBrackets &ScoreBrackets, @@ -591,7 +579,6 @@ class SIInsertWaitcnts { WaitcntBrackets &ScoreBrackets); bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, WaitcntBrackets &ScoreBrackets); - static bool asynchronouslyWritesSCC(unsigned Opcode); }; // This objects maintains the current score brackets of each wait counter, and @@ -643,8 +630,6 @@ class WaitcntBrackets { bool merge(const WaitcntBrackets &Other); RegInterval getRegInterval(const MachineInstr *MI, - const MachineRegisterInfo *MRI, - const SIRegisterInfo *TRI, const MachineOperand &Op) const; bool counterOutOfOrder(InstCounterType T) const; @@ -662,9 +647,7 @@ class WaitcntBrackets { void applyWaitcnt(const AMDGPU::Waitcnt &Wait); void applyWaitcnt(InstCounterType T, unsigned Count); void applyXcnt(const AMDGPU::Waitcnt &Wait); - void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI, - const MachineRegisterInfo *MRI, WaitEventType E, - MachineInstr &MI); + void updateByEvent(WaitEventType E, MachineInstr &MI); unsigned hasPendingEvent() const { return PendingEvents; } unsigned hasPendingEvent(WaitEventType E) const { @@ -773,10 +756,8 @@ class WaitcntBrackets { void setScoreByInterval(RegInterval Interval, InstCounterType CntTy, unsigned Score); - void setScoreByOperand(const MachineInstr *MI, const SIRegisterInfo *TRI, - const MachineRegisterInfo *MRI, - const MachineOperand &Op, InstCounterType CntTy, - unsigned Val); + void setScoreByOperand(const MachineInstr *MI, const MachineOperand &Op, + InstCounterType CntTy, unsigned Val); const SIInsertWaitcnts *Context; @@ -833,12 +814,13 @@ class SIInsertWaitcntsLegacy : public MachineFunctionPass { } // end anonymous namespace RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI, - const MachineRegisterInfo *MRI, - const SIRegisterInfo *TRI, const MachineOperand &Op) const { if (Op.getReg() == AMDGPU::SCC) return {SCC, SCC + 1}; + const SIRegisterInfo *TRI = Context->TRI; + const MachineRegisterInfo *MRI = Context->MRI; + if (!TRI->isInAllocatableClass(Op.getReg())) return {-1, -1}; @@ -903,11 +885,9 @@ void WaitcntBrackets::setScoreByInterval(RegInterval Interval, } void WaitcntBrackets::setScoreByOperand(const MachineInstr *MI, - const SIRegisterInfo *TRI, - const MachineRegisterInfo *MRI, const MachineOperand &Op, InstCounterType CntTy, unsigned Score) { - RegInterval Interval = getRegInterval(MI, MRI, TRI, Op); + RegInterval Interval = getRegInterval(MI, Op); setScoreByInterval(Interval, CntTy, Score); } @@ -939,10 +919,7 @@ bool WaitcntBrackets::hasPointSamplePendingVmemTypes( return hasOtherPendingVmemTypes(Interval, VMEM_NOSAMPLER); } -void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, - const SIRegisterInfo *TRI, - const MachineRegisterInfo *MRI, - WaitEventType E, MachineInstr &Inst) { +void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { InstCounterType T = eventCounter(Context->WaitEventMaskForInst, E); unsigned UB = getScoreUB(T); @@ -955,6 +932,10 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, PendingEvents |= 1 << E; setScoreUB(T, CurrScore); + const SIRegisterInfo *TRI = Context->TRI; + const MachineRegisterInfo *MRI = Context->MRI; + const SIInstrInfo *TII = Context->TII; + if (T == EXP_CNT) { // Put score on the source vgprs. If this is a store, just use those // specific register(s). @@ -962,59 +943,56 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, // All GDS operations must protect their address register (same as // export.) if (const auto *AddrOp = TII->getNamedOperand(Inst, AMDGPU::OpName::addr)) - setScoreByOperand(&Inst, TRI, MRI, *AddrOp, EXP_CNT, CurrScore); + setScoreByOperand(&Inst, *AddrOp, EXP_CNT, CurrScore); if (Inst.mayStore()) { if (const auto *Data0 = TII->getNamedOperand(Inst, AMDGPU::OpName::data0)) - setScoreByOperand(&Inst, TRI, MRI, *Data0, EXP_CNT, CurrScore); + setScoreByOperand(&Inst, *Data0, EXP_CNT, CurrScore); if (const auto *Data1 = TII->getNamedOperand(Inst, AMDGPU::OpName::data1)) - setScoreByOperand(&Inst, TRI, MRI, *Data1, EXP_CNT, CurrScore); + setScoreByOperand(&Inst, *Data1, EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst) && !SIInstrInfo::isGWS(Inst) && Inst.getOpcode() != AMDGPU::DS_APPEND && Inst.getOpcode() != AMDGPU::DS_CONSUME && Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) { for (const MachineOperand &Op : Inst.all_uses()) { if (TRI->isVectorRegister(*MRI, Op.getReg())) - setScoreByOperand(&Inst, TRI, MRI, Op, EXP_CNT, CurrScore); + setScoreByOperand(&Inst, Op, EXP_CNT, CurrScore); } } } else if (TII->isFLAT(Inst)) { if (Inst.mayStore()) { - setScoreByOperand(&Inst, TRI, MRI, + setScoreByOperand(&Inst, *TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst)) { - setScoreByOperand(&Inst, TRI, MRI, + setScoreByOperand(&Inst, *TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } } else if (TII->isMIMG(Inst)) { if (Inst.mayStore()) { - setScoreByOperand(&Inst, TRI, MRI, Inst.getOperand(0), EXP_CNT, - CurrScore); + setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst)) { - setScoreByOperand(&Inst, TRI, MRI, + setScoreByOperand(&Inst, *TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } } else if (TII->isMTBUF(Inst)) { if (Inst.mayStore()) - setScoreByOperand(&Inst, TRI, MRI, Inst.getOperand(0), EXP_CNT, - CurrScore); + setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore); } else if (TII->isMUBUF(Inst)) { if (Inst.mayStore()) { - setScoreByOperand(&Inst, TRI, MRI, Inst.getOperand(0), EXP_CNT, - CurrScore); + setScoreByOperand(&Inst, Inst.getOperand(0), EXP_CNT, CurrScore); } else if (SIInstrInfo::isAtomicRet(Inst)) { - setScoreByOperand(&Inst, TRI, MRI, + setScoreByOperand(&Inst, *TII->getNamedOperand(Inst, AMDGPU::OpName::data), EXP_CNT, CurrScore); } } else if (TII->isLDSDIR(Inst)) { // LDSDIR instructions attach the score to the destination. - setScoreByOperand(&Inst, TRI, MRI, + setScoreByOperand(&Inst, *TII->getNamedOperand(Inst, AMDGPU::OpName::vdst), EXP_CNT, CurrScore); } else { @@ -1025,18 +1003,18 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, // score. for (MachineOperand &DefMO : Inst.all_defs()) { if (TRI->isVGPR(*MRI, DefMO.getReg())) { - setScoreByOperand(&Inst, TRI, MRI, DefMO, EXP_CNT, CurrScore); + setScoreByOperand(&Inst, DefMO, EXP_CNT, CurrScore); } } } for (const MachineOperand &Op : Inst.all_uses()) { if (TRI->isVectorRegister(*MRI, Op.getReg())) - setScoreByOperand(&Inst, TRI, MRI, Op, EXP_CNT, CurrScore); + setScoreByOperand(&Inst, Op, EXP_CNT, CurrScore); } } } else if (T == X_CNT) { for (const MachineOperand &Op : Inst.all_uses()) - setScoreByOperand(&Inst, TRI, MRI, Op, T, CurrScore); + setScoreByOperand(&Inst, Op, T, CurrScore); } else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ { // Match the score to the destination registers. // @@ -1048,7 +1026,7 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, // Special cases where implicit register defs exists, such as M0 or VCC, // but none with memory instructions. for (const MachineOperand &Op : Inst.defs()) { - RegInterval Interval = getRegInterval(&Inst, MRI, TRI, Op); + RegInterval Interval = getRegInterval(&Inst, Op); if (T == LOAD_CNT || T == SAMPLE_CNT || T == BVH_CNT) { if (Interval.first >= NUM_ALL_VGPRS) continue; @@ -1109,7 +1087,7 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII, setRegScore(FIRST_LDS_VGPR, T, CurrScore); } - if (Context->asynchronouslyWritesSCC(Inst.getOpcode())) { + if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) { setRegScore(SCC, T, CurrScore); PendingSCCWrite = &Inst; } @@ -1831,12 +1809,6 @@ bool WaitcntGeneratorGFX12Plus::createNewWaitcnt( return Modified; } -static bool readsVCCZ(const MachineInstr &MI) { - unsigned Opc = MI.getOpcode(); - return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && - !MI.getOperand(1).isUndef(); -} - /// \returns true if the callee inserts an s_waitcnt 0 on function entry. static bool callWaitsOnFunctionEntry(const MachineInstr &MI) { // Currently all conventions wait, but this may not always be the case. @@ -1871,26 +1843,24 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, assert(!MI.isMetaInstruction()); AMDGPU::Waitcnt Wait; + const unsigned Opc = MI.getOpcode(); // FIXME: This should have already been handled by the memory legalizer. // Removing this currently doesn't affect any lit tests, but we need to // verify that nothing was relying on this. The number of buffer invalidates // being handled here should not be expanded. - if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 || - MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC || - MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL || - MI.getOpcode() == AMDGPU::BUFFER_GL0_INV || - MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) { + if (Opc == AMDGPU::BUFFER_WBINVL1 || Opc == AMDGPU::BUFFER_WBINVL1_SC || + Opc == AMDGPU::BUFFER_WBINVL1_VOL || Opc == AMDGPU::BUFFER_GL0_INV || + Opc == AMDGPU::BUFFER_GL1_INV) { Wait.LoadCnt = 0; } // All waits must be resolved at call return. // NOTE: this could be improved with knowledge of all call sites or // with knowledge of the called routines. - if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG || - MI.getOpcode() == AMDGPU::SI_RETURN || - MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN || - MI.getOpcode() == AMDGPU::S_SETPC_B64_return || + if (Opc == AMDGPU::SI_RETURN_TO_EPILOG || Opc == AMDGPU::SI_RETURN || + Opc == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN || + Opc == AMDGPU::S_SETPC_B64_return || (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) { Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false)); } @@ -1902,8 +1872,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, // send a message to explicitly release all VGPRs before the stores have // completed, but it is only safe to do this if there are no outstanding // scratch stores. - else if (MI.getOpcode() == AMDGPU::S_ENDPGM || - MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED) { + else if (Opc == AMDGPU::S_ENDPGM || Opc == AMDGPU::S_ENDPGM_SAVED) { if (!WCG->isOptNone() && (MI.getMF()->getInfo()->isDynamicVGPREnabled() || (ST->getGeneration() >= AMDGPUSubtarget::GFX11 && @@ -1912,8 +1881,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, ReleaseVGPRInsts.insert(&MI); } // Resolve vm waits before gs-done. - else if ((MI.getOpcode() == AMDGPU::S_SENDMSG || - MI.getOpcode() == AMDGPU::S_SENDMSGHALT) && + else if ((Opc == AMDGPU::S_SENDMSG || Opc == AMDGPU::S_SENDMSGHALT) && ST->hasLegacyGeometry() && ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_PreGFX11_) == AMDGPU::SendMsg::ID_GS_DONE_PreGFX11)) { @@ -1938,7 +1906,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, // Wait for any pending GDS instruction to complete before any // "Always GDS" instruction. - if (TII->isAlwaysGDS(MI.getOpcode()) && ScoreBrackets.hasPendingGDS()) + if (TII->isAlwaysGDS(Opc) && ScoreBrackets.hasPendingGDS()) addWait(Wait, DS_CNT, ScoreBrackets.getPendingGDSWait()); if (MI.isCall() && callWaitsOnFunctionEntry(MI)) { @@ -1950,7 +1918,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, const auto &CallAddrOp = *TII->getNamedOperand(MI, AMDGPU::OpName::src0); if (CallAddrOp.isReg()) { RegInterval CallAddrOpInterval = - ScoreBrackets.getRegInterval(&MI, MRI, TRI, CallAddrOp); + ScoreBrackets.getRegInterval(&MI, CallAddrOp); ScoreBrackets.determineWait(SmemAccessCounter, CallAddrOpInterval, Wait); @@ -1958,13 +1926,13 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, if (const auto *RtnAddrOp = TII->getNamedOperand(MI, AMDGPU::OpName::dst)) { RegInterval RtnAddrOpInterval = - ScoreBrackets.getRegInterval(&MI, MRI, TRI, *RtnAddrOp); + ScoreBrackets.getRegInterval(&MI, *RtnAddrOp); ScoreBrackets.determineWait(SmemAccessCounter, RtnAddrOpInterval, Wait); } } - } else if (MI.getOpcode() == AMDGPU::S_BARRIER_WAIT) { + } else if (Opc == AMDGPU::S_BARRIER_WAIT) { ScoreBrackets.tryClearSCCWriteEvent(&MI); } else { // FIXME: Should not be relying on memoperands. @@ -2022,7 +1990,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, if (Op.isTied() && Op.isUse() && TII->doesNotReadTiedSource(MI)) continue; - RegInterval Interval = ScoreBrackets.getRegInterval(&MI, MRI, TRI, Op); + RegInterval Interval = ScoreBrackets.getRegInterval(&MI, Op); const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg()); if (IsVGPR) { @@ -2061,7 +2029,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, ScoreBrackets.determineWait(SmemAccessCounter, Interval, Wait); } - if (hasXcnt() && Op.isDef()) + if (ST->hasWaitXCnt() && Op.isDef()) ScoreBrackets.determineWait(X_CNT, Interval, Wait); } } @@ -2079,18 +2047,17 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, // // In all other cases, ensure safety by ensuring that there are no outstanding // memory operations. - if (MI.getOpcode() == AMDGPU::S_BARRIER && - !ST->hasAutoWaitcntBeforeBarrier() && !ST->supportsBackOffBarrier()) { + if (Opc == AMDGPU::S_BARRIER && !ST->hasAutoWaitcntBeforeBarrier() && + !ST->supportsBackOffBarrier()) { Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/true)); } // TODO: Remove this work-around, enable the assert for Bug 457939 // after fixing the scheduler. Also, the Shader Compiler code is // independent of target. - if (readsVCCZ(MI) && ST->hasReadVCCZBug()) { - if (ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { - Wait.DsCnt = 0; - } + if (SIInstrInfo::isCBranchVCCZRead(MI) && ST->hasReadVCCZBug() && + ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) { + Wait.DsCnt = 0; } // Verify that the wait is actually needed. @@ -2165,19 +2132,19 @@ bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait, } // XCnt may be already consumed by a load wait. - if (Wait.KmCnt == 0 && Wait.XCnt != ~0u && - !ScoreBrackets.hasPendingEvent(SMEM_GROUP)) - Wait.XCnt = ~0u; + if (Wait.XCnt != ~0u) { + if (Wait.KmCnt == 0 && !ScoreBrackets.hasPendingEvent(SMEM_GROUP)) + Wait.XCnt = ~0u; - if (Wait.LoadCnt == 0 && Wait.XCnt != ~0u && - !ScoreBrackets.hasPendingEvent(VMEM_GROUP)) - Wait.XCnt = ~0u; + if (Wait.LoadCnt == 0 && !ScoreBrackets.hasPendingEvent(VMEM_GROUP)) + Wait.XCnt = ~0u; - // Since the translation for VMEM addresses occur in-order, we can skip the - // XCnt if the current instruction is of VMEM type and has a memory dependency - // with another VMEM instruction in flight. - if (Wait.XCnt != ~0u && isVmemAccess(*It)) - Wait.XCnt = ~0u; + // Since the translation for VMEM addresses occur in-order, we can skip the + // XCnt if the current instruction is of VMEM type and has a memory + // dependency with another VMEM instruction in flight. + if (isVmemAccess(*It)) + Wait.XCnt = ~0u; + } if (WCG->createNewWaitcnt(Block, It, Wait)) Modified = true; @@ -2185,75 +2152,11 @@ bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait, return Modified; } -// This is a flat memory operation. Check to see if it has memory tokens other -// than LDS. Other address spaces supported by flat memory operations involve -// global memory. -bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const { - assert(TII->isFLAT(MI)); - - // All flat instructions use the VMEM counter except prefetch. - if (!TII->usesVM_CNT(MI)) - return false; - - // If there are no memory operands then conservatively assume the flat - // operation may access VMEM. - if (MI.memoperands_empty()) - return true; - - // See if any memory operand specifies an address space that involves VMEM. - // Flat operations only supported FLAT, LOCAL (LDS), or address spaces - // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION - // (GDS) address space is not supported by flat operations. Therefore, simply - // return true unless only the LDS address space is found. - for (const MachineMemOperand *Memop : MI.memoperands()) { - unsigned AS = Memop->getAddrSpace(); - assert(AS != AMDGPUAS::REGION_ADDRESS); - if (AS != AMDGPUAS::LOCAL_ADDRESS) - return true; - } - - return false; -} - -// This is a flat memory operation. Check to see if it has memory tokens for -// either LDS or FLAT. -bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const { - assert(TII->isFLAT(MI)); - - // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter. - if (!TII->usesLGKM_CNT(MI)) - return false; - - // If in tgsplit mode then there can be no use of LDS. - if (ST->isTgSplitEnabled()) - return false; - - // If there are no memory operands then conservatively assume the flat - // operation may access LDS. - if (MI.memoperands_empty()) - return true; - - // See if any memory operand specifies an address space that involves LDS. - for (const MachineMemOperand *Memop : MI.memoperands()) { - unsigned AS = Memop->getAddrSpace(); - if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) - return true; - } - - return false; -} - bool SIInsertWaitcnts::isVmemAccess(const MachineInstr &MI) const { - return (TII->isFLAT(MI) && mayAccessVMEMThroughFlat(MI)) || + return (TII->isFLAT(MI) && TII->mayAccessVMEMThroughFlat(MI)) || (TII->isVMEM(MI) && !AMDGPU::getMUBUFIsBufferInv(MI.getOpcode())); } -static bool isGFX12CacheInvOrWBInst(MachineInstr &Inst) { - auto Opc = Inst.getOpcode(); - return Opc == AMDGPU::GLOBAL_INV || Opc == AMDGPU::GLOBAL_WB || - Opc == AMDGPU::GLOBAL_WBINV; -} - // Return true if the next instruction is S_ENDPGM, following fallthrough // blocks if necessary. bool SIInsertWaitcnts::isNextENDPGM(MachineBasicBlock::instr_iterator It, @@ -2324,16 +2227,15 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) { if (TII->isAlwaysGDS(Inst.getOpcode()) || TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) { - ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst); - ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst); + ScoreBrackets->updateByEvent(GDS_ACCESS, Inst); + ScoreBrackets->updateByEvent(GDS_GPR_LOCK, Inst); ScoreBrackets->setPendingGDS(); } else { - ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); + ScoreBrackets->updateByEvent(LDS_ACCESS, Inst); } } else if (TII->isFLAT(Inst)) { - if (isGFX12CacheInvOrWBInst(Inst)) { - ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst), - Inst); + if (SIInstrInfo::isGFX12CacheInvOrWBInst(Inst.getOpcode())) { + ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst); return; } @@ -2341,16 +2243,15 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, int FlatASCount = 0; - if (mayAccessVMEMThroughFlat(Inst)) { + if (TII->mayAccessVMEMThroughFlat(Inst)) { ++FlatASCount; IsVMEMAccess = true; - ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst), - Inst); + ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst); } - if (mayAccessLDSThroughFlat(Inst)) { + if (TII->mayAccessLDSThroughFlat(Inst)) { ++FlatASCount; - ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst); + ScoreBrackets->updateByEvent(LDS_ACCESS, Inst); } // This is a flat memory operation that access both VMEM and LDS, so note it @@ -2361,16 +2262,15 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, } else if (SIInstrInfo::isVMEM(Inst) && !llvm::AMDGPU::getMUBUFIsBufferInv(Inst.getOpcode())) { IsVMEMAccess = true; - ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst), - Inst); + ScoreBrackets->updateByEvent(getVmemWaitEventType(Inst), Inst); if (ST->vmemWriteNeedsExpWaitcnt() && (Inst.mayStore() || SIInstrInfo::isAtomicRet(Inst))) { - ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst); + ScoreBrackets->updateByEvent(VMW_GPR_LOCK, Inst); } } else if (TII->isSMRD(Inst)) { IsSMEMAccess = true; - ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); + ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst); } else if (Inst.isCall()) { if (callWaitsOnFunctionReturn(Inst)) { // Act as a wait on everything @@ -2382,45 +2282,45 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt()); } } else if (SIInstrInfo::isLDSDIR(Inst)) { - ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_LDS_ACCESS, Inst); + ScoreBrackets->updateByEvent(EXP_LDS_ACCESS, Inst); } else if (TII->isVINTERP(Inst)) { int64_t Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm(); ScoreBrackets->applyWaitcnt(EXP_CNT, Imm); } else if (SIInstrInfo::isEXP(Inst)) { unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm(); if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31) - ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst); + ScoreBrackets->updateByEvent(EXP_PARAM_ACCESS, Inst); else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST) - ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst); + ScoreBrackets->updateByEvent(EXP_POS_ACCESS, Inst); else - ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst); - } else if (asynchronouslyWritesSCC(Inst.getOpcode())) { - ScoreBrackets->updateByEvent(TII, TRI, MRI, SCC_WRITE, Inst); + ScoreBrackets->updateByEvent(EXP_GPR_LOCK, Inst); + } else if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) { + ScoreBrackets->updateByEvent(SCC_WRITE, Inst); } else { switch (Inst.getOpcode()) { case AMDGPU::S_SENDMSG: case AMDGPU::S_SENDMSG_RTN_B32: case AMDGPU::S_SENDMSG_RTN_B64: case AMDGPU::S_SENDMSGHALT: - ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst); + ScoreBrackets->updateByEvent(SQ_MESSAGE, Inst); break; case AMDGPU::S_MEMTIME: case AMDGPU::S_MEMREALTIME: case AMDGPU::S_GET_BARRIER_STATE_M0: case AMDGPU::S_GET_BARRIER_STATE_IMM: - ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst); + ScoreBrackets->updateByEvent(SMEM_ACCESS, Inst); break; } } - if (!hasXcnt()) + if (!ST->hasWaitXCnt()) return; if (IsVMEMAccess) - ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_GROUP, Inst); + ScoreBrackets->updateByEvent(VMEM_GROUP, Inst); if (IsSMEMAccess) - ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_GROUP, Inst); + ScoreBrackets->updateByEvent(SMEM_GROUP, Inst); } bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, @@ -2478,9 +2378,8 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { unsigned OldEventsHasSCCWrite = OldEvents & (1 << SCC_WRITE); if (!OldEventsHasSCCWrite) { PendingSCCWrite = Other.PendingSCCWrite; - } else { - if (PendingSCCWrite != Other.PendingSCCWrite) - PendingSCCWrite = nullptr; + } else if (PendingSCCWrite != Other.PendingSCCWrite) { + PendingSCCWrite = nullptr; } } } @@ -2516,12 +2415,6 @@ static bool isWaitInstr(MachineInstr &Inst) { counterTypeForInstr(Opcode).has_value(); } -bool SIInsertWaitcnts::asynchronouslyWritesSCC(unsigned Opcode) { - return Opcode == AMDGPU::S_BARRIER_LEAVE || - Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM || - Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0; -} - // Generate s_waitcnt instructions where needed. bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block, @@ -2578,7 +2471,7 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, OldWaitcntInstr = nullptr; // Restore vccz if it's not known to be correct already. - bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst); + bool RestoreVCCZ = !VCCZCorrect && SIInstrInfo::isCBranchVCCZRead(Inst); // Don't examine operands unless we need to track vccz correctness. if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) { @@ -2701,7 +2594,7 @@ bool SIInsertWaitcnts::isPreheaderToFlush( bool SIInsertWaitcnts::isVMEMOrFlatVMEM(const MachineInstr &MI) const { if (SIInstrInfo::isFLAT(MI)) - return mayAccessVMEMThroughFlat(MI); + return TII->mayAccessVMEMThroughFlat(MI); return SIInstrInfo::isVMEM(MI); } @@ -2724,15 +2617,14 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML, for (MachineBasicBlock *MBB : ML->blocks()) { for (MachineInstr &MI : *MBB) { if (isVMEMOrFlatVMEM(MI)) { - if (MI.mayLoad()) - HasVMemLoad = true; - if (MI.mayStore()) - HasVMemStore = true; + HasVMemLoad |= MI.mayLoad(); + HasVMemStore |= MI.mayStore(); } + for (const MachineOperand &Op : MI.all_uses()) { - if (!TRI->isVectorRegister(*MRI, Op.getReg())) + if (Op.isDebug() || !TRI->isVectorRegister(*MRI, Op.getReg())) continue; - RegInterval Interval = Brackets.getRegInterval(&MI, MRI, TRI, Op); + RegInterval Interval = Brackets.getRegInterval(&MI, Op); // Vgpr use for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { // If we find a register that is loaded inside the loop, 1. and 2. @@ -2757,7 +2649,7 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML, // VMem load vgpr def if (isVMEMOrFlatVMEM(MI) && MI.mayLoad()) { for (const MachineOperand &Op : MI.all_defs()) { - RegInterval Interval = Brackets.getRegInterval(&MI, MRI, TRI, Op); + RegInterval Interval = Brackets.getRegInterval(&MI, Op); for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) { // If we find a register that is loaded inside the loop, 1. and 2. // are invalidated and we can exit. diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 9b364fdab5fd4..56435a50c87ad 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -124,7 +124,7 @@ static bool canRemat(const MachineInstr &MI) { return false; } -bool SIInstrInfo::isReallyTriviallyReMaterializable( +bool SIInstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { if (canRemat(MI)) { @@ -145,7 +145,7 @@ bool SIInstrInfo::isReallyTriviallyReMaterializable( return true; } - return TargetInstrInfo::isReallyTriviallyReMaterializable(MI); + return TargetInstrInfo::isReMaterializableImpl(MI); } // Returns true if the scalar result of a VALU instruction depends on exec. @@ -4344,6 +4344,59 @@ bool SIInstrInfo::mayAccessScratchThroughFlat(const MachineInstr &MI) const { }); } +bool SIInstrInfo::mayAccessVMEMThroughFlat(const MachineInstr &MI) const { + assert(isFLAT(MI)); + + // All flat instructions use the VMEM counter except prefetch. + if (!usesVM_CNT(MI)) + return false; + + // If there are no memory operands then conservatively assume the flat + // operation may access VMEM. + if (MI.memoperands_empty()) + return true; + + // See if any memory operand specifies an address space that involves VMEM. + // Flat operations only supported FLAT, LOCAL (LDS), or address spaces + // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION + // (GDS) address space is not supported by flat operations. Therefore, simply + // return true unless only the LDS address space is found. + for (const MachineMemOperand *Memop : MI.memoperands()) { + unsigned AS = Memop->getAddrSpace(); + assert(AS != AMDGPUAS::REGION_ADDRESS); + if (AS != AMDGPUAS::LOCAL_ADDRESS) + return true; + } + + return false; +} + +bool SIInstrInfo::mayAccessLDSThroughFlat(const MachineInstr &MI) const { + assert(isFLAT(MI)); + + // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter. + if (!usesLGKM_CNT(MI)) + return false; + + // If in tgsplit mode then there can be no use of LDS. + if (ST.isTgSplitEnabled()) + return false; + + // If there are no memory operands then conservatively assume the flat + // operation may access LDS. + if (MI.memoperands_empty()) + return true; + + // See if any memory operand specifies an address space that involves LDS. + for (const MachineMemOperand *Memop : MI.memoperands()) { + unsigned AS = Memop->getAddrSpace(); + if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) + return true; + } + + return false; +} + bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { // Skip the full operand and register alias search modifiesRegister // does. There's only a handful of instructions that touch this, it's only an @@ -9506,6 +9559,13 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { DescSize = Desc.getSize(); } + // If FMA Pseudo inst, get correct MC code size + if (Opc == AMDGPU::V_FMA_MIX_F16_t16 || Opc == AMDGPU::V_FMA_MIX_BF16_t16) { + // All potential lowerings are the same size; arbitrarily pick one. + const MCInstrDesc &Desc = getMCOpcodeFromPseudo(AMDGPU::V_FMA_MIXLO_F16); + DescSize = Desc.getSize(); + } + return DescSize; } } diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index e249fc6cbb79d..a21089f8e0fcc 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -244,7 +244,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { return ST; } - bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; + bool isReMaterializableImpl(const MachineInstr &MI) const override; bool isIgnorableUse(const MachineOperand &MO) const override; @@ -688,6 +688,12 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { /// to not hit scratch. bool mayAccessScratchThroughFlat(const MachineInstr &MI) const; + /// \returns true for FLAT instructions that can access VMEM. + bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const; + + /// \returns true for FLAT instructions that can access LDS. + bool mayAccessLDSThroughFlat(const MachineInstr &MI) const; + static bool isBlockLoadStore(uint16_t Opcode) { switch (Opcode) { case AMDGPU::SI_BLOCK_SPILL_V1024_SAVE: @@ -748,6 +754,18 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { return isLDSDMA(MI) && MI.getOpcode() != AMDGPU::BUFFER_STORE_LDS_DWORD; } + static bool isSBarrierSCCWrite(unsigned Opcode) { + return Opcode == AMDGPU::S_BARRIER_LEAVE || + Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM || + Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0; + } + + static bool isCBranchVCCZRead(const MachineInstr &MI) { + unsigned Opc = MI.getOpcode(); + return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && + !MI.getOperand(1).isUndef(); + } + static bool isWQM(const MachineInstr &MI) { return MI.getDesc().TSFlags & SIInstrFlags::WQM; } @@ -1006,9 +1024,13 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { Opcode == AMDGPU::S_BARRIER_INIT_M0 || Opcode == AMDGPU::S_BARRIER_INIT_IMM || Opcode == AMDGPU::S_BARRIER_JOIN_IMM || - Opcode == AMDGPU::S_BARRIER_LEAVE || - Opcode == AMDGPU::S_BARRIER_LEAVE_IMM || - Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER; + Opcode == AMDGPU::S_BARRIER_LEAVE || Opcode == AMDGPU::DS_GWS_INIT || + Opcode == AMDGPU::DS_GWS_BARRIER; + } + + static bool isGFX12CacheInvOrWBInst(unsigned Opc) { + return Opc == AMDGPU::GLOBAL_INV || Opc == AMDGPU::GLOBAL_WB || + Opc == AMDGPU::GLOBAL_WBINV; } static bool isF16PseudoScalarTrans(unsigned Opcode) { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index fb2cd04b364d7..18a53931a6390 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -7,9 +7,11 @@ //===----------------------------------------------------------------------===// def isWave32 : Predicate<"Subtarget->isWave32()">, - AssemblerPredicate <(all_of FeatureWavefrontSize32)>; + AssemblerPredicate <(any_of FeatureWavefrontSize32, + FeatureAssemblerPermissiveWavesize)>; def isWave64 : Predicate<"Subtarget->isWave64()">, - AssemblerPredicate <(all_of FeatureWavefrontSize64)>; + AssemblerPredicate <(any_of FeatureWavefrontSize64, + FeatureAssemblerPermissiveWavesize)>; class AMDGPUMnemonicAlias : MnemonicAlias, PredicateControl; diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index d4c1bc6d84384..be084a952bc41 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -1466,8 +1466,7 @@ class VOPSelectPat_t16 : GCNPat < def : VOPSelectModsPat ; def : VOPSelectModsPat ; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : VOPSelectPat ; def : VOPSelectPat ; } // End True16Predicate = p @@ -2137,8 +2136,7 @@ def : GCNPat < >; foreach fp16vt = [f16, bf16] in { -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let SubtargetPredicate = p in { +let SubtargetPredicate = NotUseRealTrue16Insts in { def : GCNPat < (fabs (fp16vt VGPR_32:$src)), (V_AND_B32_e64 (S_MOV_B32 (i32 0x00007fff)), VGPR_32:$src) @@ -2230,8 +2228,7 @@ def : GCNPat < } foreach fp16vt = [f16, bf16] in { -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (fcopysign fp16vt:$src0, fp16vt:$src1), (V_BFI_B32_e64 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1) @@ -2287,8 +2284,9 @@ def : GCNPat < def : GCNPat < (fcopysign fp16vt:$src0, f32:$src1), - (EXTRACT_SUBREG (V_BFI_B32_e64 (S_MOV_B32 (i32 0x7fff0000)), - (REG_SEQUENCE VGPR_32, (i16 (IMPLICIT_DEF)), lo16, $src0, hi16), $src1), hi16) + (EXTRACT_SUBREG (V_BFI_B32_e64 (S_MOV_B32 (i32 0x00007fff)), + (REG_SEQUENCE VGPR_32, $src0, lo16, (i16 (IMPLICIT_DEF)), hi16), + (V_LSHRREV_B32_e64 (i32 16), $src1)), lo16) >; def : GCNPat < @@ -2353,23 +2351,21 @@ def : GCNPat < (S_MOV_B32 $ga) >; -foreach pred = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in { - let True16Predicate = pred in { - def : GCNPat < - (VGPRImm<(i16 imm)>:$imm), - (V_MOV_B32_e32 imm:$imm) - >; +let True16Predicate = NotUseRealTrue16Insts in { + def : GCNPat < + (VGPRImm<(i16 imm)>:$imm), + (V_MOV_B32_e32 imm:$imm) + >; - // FIXME: Workaround for ordering issue with peephole optimizer where - // a register class copy interferes with immediate folding. Should - // use s_mov_b32, which can be shrunk to s_movk_i32 + // FIXME: Workaround for ordering issue with peephole optimizer where + // a register class copy interferes with immediate folding. Should + // use s_mov_b32, which can be shrunk to s_movk_i32 - foreach vt = [f16, bf16] in { - def : GCNPat < - (VGPRImm<(vt fpimm)>:$imm), - (V_MOV_B32_e32 (vt (bitcast_fpimm_to_i32 $imm))) - >; - } + foreach vt = [f16, bf16] in { + def : GCNPat < + (VGPRImm<(vt fpimm)>:$imm), + (V_MOV_B32_e32 (vt (bitcast_fpimm_to_i32 $imm))) + >; } } @@ -2660,11 +2656,11 @@ let True16Predicate = NotHasTrue16BitInsts in { let SubtargetPredicate = isNotGFX9Plus in { def : ROTRPattern ; -def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (and i32:$src1, (i32 31))))), (V_ALIGNBIT_B32_e64 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), (i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>; -def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), (V_ALIGNBIT_B32_e64 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), (i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>; } // isNotGFX9Plus @@ -2678,8 +2674,8 @@ def : GCNPat < $src1, /* clamp */ 0, /* op_sel */ 0) >; -foreach pat = [(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), - (i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1))))] in +foreach pat = [(i32 (DivergentUnaryFrag (srl i64:$src0, (and i32:$src1, (i32 31))))), + (i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1))))] in def : GCNPat; -def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), (V_ALIGNBIT_B32_t16_e64 0, /* src0_modifiers */ (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), 0, /* src1_modifiers */ @@ -2734,7 +2730,7 @@ def : GCNPat < $src1, /* clamp */ 0, /* op_sel */ 0) >; -def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (and i32:$src1, (i32 31))))), (V_ALIGNBIT_B32_fake16_e64 0, /* src0_modifiers */ (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), 0, /* src1_modifiers */ @@ -2743,7 +2739,7 @@ def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), $src1, /* clamp */ 0, /* op_sel */ 0) >; -def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), (V_ALIGNBIT_B32_fake16_e64 0, /* src0_modifiers */ (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), 0, /* src1_modifiers */ @@ -2858,8 +2854,7 @@ def : GCNPat< (i32 (DivergentSextInreg i32:$src)), (V_BFE_I32_e64 i32:$src, (i32 0), (i32 1))>; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (i16 (DivergentSextInreg i16:$src)), (V_BFE_I32_e64 $src, (i32 0), (i32 1)) @@ -3204,8 +3199,7 @@ def : GCNPat< } } // AddedComplexity = 1 -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat< (i32 (DivergentUnaryFrag i16:$src)), (V_AND_B32_e64 (S_MOV_B32 (i32 0xffff)), $src) @@ -3415,8 +3409,7 @@ def : GCNPat < // Magic number: 1 | (0 << 8) | (12 << 16) | (12 << 24) // The 12s emit 0s. -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (i16 (bswap i16:$a)), (V_PERM_B32_e64 (i32 0), VSrc_b32:$a, (S_MOV_B32 (i32 0x0c0c0001))) @@ -3669,8 +3662,7 @@ def : GCNPat < (S_LSHL_B32 SReg_32:$src1, (i16 16)) >; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (v2i16 (DivergentBinFrag (i16 0), (i16 VGPR_32:$src1))), (v2i16 (V_LSHLREV_B32_e64 (i16 16), VGPR_32:$src1)) @@ -3706,8 +3698,7 @@ def : GCNPat < (COPY_TO_REGCLASS SReg_32:$src0, SReg_32) >; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (vecTy (DivergentBinFrag (Ty VGPR_32:$src0), (Ty undef))), (COPY_TO_REGCLASS VGPR_32:$src0, VGPR_32) @@ -3734,8 +3725,7 @@ def : GCNPat < >; let SubtargetPredicate = HasVOP3PInsts in { -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in +let True16Predicate = NotUseRealTrue16Insts in def : GCNPat < (v2i16 (DivergentBinFrag (i16 VGPR_32:$src0), (i16 VGPR_32:$src1))), (v2i16 (V_LSHL_OR_B32_e64 $src1, (i32 16), (i32 (V_AND_B32_e64 (i32 (V_MOV_B32_e32 (i32 0xffff))), $src0)))) @@ -3765,8 +3755,7 @@ def : GCNPat < (S_PACK_LL_B32_B16 SReg_32:$src0, SReg_32:$src1) >; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { // Take the lower 16 bits from each VGPR_32 and concat them def : GCNPat < (vecTy (DivergentBinFrag (Ty VGPR_32:$a), (Ty VGPR_32:$b))), @@ -3837,8 +3826,7 @@ def : GCNPat < >; // Take the upper 16 bits from each VGPR_32 and concat them -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in +let True16Predicate = NotUseRealTrue16Insts in def : GCNPat < (vecTy (DivergentBinFrag (Ty !if(!eq(Ty, i16), @@ -3880,8 +3868,7 @@ def : GCNPat < (v2i16 (S_PACK_HL_B32_B16 SReg_32:$src0, SReg_32:$src1)) >; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (v2f16 (scalar_to_vector f16:$src0)), (COPY $src0) diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp index 115a020f44098..8586d6c18b361 100644 --- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -293,7 +293,6 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) { LIS->InsertMachineInstrInMaps(*SetExec); LIS->InsertMachineInstrInMaps(*NewBr); - LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC); MI.eraseFromParent(); // FIXME: Is there a better way of adjusting the liveness? It shouldn't be @@ -363,9 +362,6 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) { RecomputeRegs.insert(SrcReg); RecomputeRegs.insert(DstReg); LIS->createAndComputeVirtRegInterval(SaveReg); - - // Let this be recomputed. - LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC); } void SILowerControlFlow::emitIfBreak(MachineInstr &MI) { @@ -828,7 +824,10 @@ bool SILowerControlFlow::run(MachineFunction &MF) { optimizeEndCf(); - if (LIS) { + if (LIS && Changed) { + // These will need to be recomputed for insertions and removals. + LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC); + LIS->removeAllRegUnitsForPhysReg(AMDGPU::SCC); for (Register Reg : RecomputeRegs) { LIS->removeInterval(Reg); LIS->createAndComputeVirtRegInterval(Reg); diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index c85d2bb9fe9ae..484861dcaac07 100644 --- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -106,6 +106,7 @@ class SIMemOpInfo final { bool IsLastUse = false; bool IsCooperative = false; + // TODO: Should we assume Cooperative=true if no MMO is present? SIMemOpInfo( const GCNSubtarget &ST, AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent, @@ -299,6 +300,10 @@ class SICacheControl { bool enableNamedBit(const MachineBasicBlock::iterator MI, AMDGPU::CPol::CPol Bit) const; + /// Check if any atomic operation on AS can affect memory accessible via the + /// global address space. + bool canAffectGlobalAddrSpace(SIAtomicAddrSpace AS) const; + public: /// Create a cache control for the subtarget \p ST. @@ -334,6 +339,11 @@ class SICacheControl { bool IsNonTemporal, bool IsLastUse = false) const = 0; + /// Add final touches to a `mayStore` instruction \p MI, which may be a + /// Store or RMW instruction. + /// FIXME: This takes a MI because iterators aren't handled properly. When + /// this is called, they often point to entirely different insts. Thus we back + /// up the inst early and pass it here instead. virtual bool finalizeStore(MachineInstr &MI, bool Atomic) const { return false; }; @@ -991,6 +1001,15 @@ bool SICacheControl::enableNamedBit(const MachineBasicBlock::iterator MI, return true; } +bool SICacheControl::canAffectGlobalAddrSpace(SIAtomicAddrSpace AS) const { + assert((!ST.hasGloballyAddressableScratch() || + (AS & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE || + (AS & SIAtomicAddrSpace::SCRATCH) == SIAtomicAddrSpace::NONE) && + "scratch instructions should already be replaced by flat " + "instructions if GloballyAddressableScratch is enabled"); + return (AS & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE; +} + /* static */ std::unique_ptr SICacheControl::create(const GCNSubtarget &ST) { GCNSubtarget::Generation Generation = ST.getGeneration(); @@ -1016,7 +1035,7 @@ bool SIGfx6CacheControl::enableLoadCacheBypass( assert(MI->mayLoad() && !MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -1239,7 +1258,7 @@ bool SIGfx6CacheControl::insertAcquire(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -1299,7 +1318,7 @@ bool SIGfx7CacheControl::insertAcquire(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -1336,7 +1355,7 @@ bool SIGfx90ACacheControl::enableLoadCacheBypass( assert(MI->mayLoad() && !MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -1378,7 +1397,7 @@ bool SIGfx90ACacheControl::enableRMWCacheBypass( assert(MI->mayLoad() && MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -1487,7 +1506,7 @@ bool SIGfx90ACacheControl::insertAcquire(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Ensures that following loads will not see stale remote VMEM data or @@ -1551,7 +1570,7 @@ bool SIGfx90ACacheControl::insertRelease(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Inserting a "S_WAITCNT vmcnt(0)" before is not required because the @@ -1594,7 +1613,7 @@ bool SIGfx940CacheControl::enableLoadCacheBypass( assert(MI->mayLoad() && !MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Set SC bits to indicate system scope. @@ -1638,7 +1657,7 @@ bool SIGfx940CacheControl::enableStoreCacheBypass( assert(!MI->mayLoad() && MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Set SC bits to indicate system scope. @@ -1678,7 +1697,7 @@ bool SIGfx940CacheControl::enableRMWCacheBypass( assert(MI->mayLoad() && MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Set SC1 bit to indicate system scope. @@ -1756,7 +1775,7 @@ bool SIGfx940CacheControl::insertAcquire(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Ensures that following loads will not see stale remote VMEM data or @@ -1840,7 +1859,7 @@ bool SIGfx940CacheControl::insertRelease(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: // Inserting a "S_WAITCNT vmcnt(0)" before is not required because the @@ -1897,7 +1916,7 @@ bool SIGfx10CacheControl::enableLoadCacheBypass( assert(MI->mayLoad() && !MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -2129,7 +2148,7 @@ bool SIGfx10CacheControl::insertAcquire(MachineBasicBlock::iterator &MI, if (Pos == Position::AFTER) ++MI; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -2194,7 +2213,7 @@ bool SIGfx11CacheControl::enableLoadCacheBypass( assert(MI->mayLoad() && !MI->mayStore()); bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: case SIAtomicScope::AGENT: @@ -2368,7 +2387,10 @@ bool SIGfx12CacheControl::insertWait(MachineBasicBlock::iterator &MI, // which shares the same L0. // // GFX12.5: - // TODO DOCS + // CU$ has two ports. To ensure operations are visible at the workgroup + // level, we need to ensure all operations in this port have completed + // so the other SIMDs in the WG can see them. There is no ordering + // guarantee between the ports. if (!ST.isCuModeEnabled() || ST.hasGFX1250Insts()) { if ((Op & SIMemOp::LOAD) != SIMemOp::NONE) LOADCnt |= true; @@ -2462,7 +2484,7 @@ bool SIGfx12CacheControl::insertAcquire(MachineBasicBlock::iterator &MI, /// memory. /// Other address spaces do not have a cache. - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) == SIAtomicAddrSpace::NONE) + if (!canAffectGlobalAddrSpace(AddrSpace)) return false; AMDGPU::CPol::CPol ScopeImm = AMDGPU::CPol::SCOPE_DEV; @@ -2483,8 +2505,7 @@ bool SIGfx12CacheControl::insertAcquire(MachineBasicBlock::iterator &MI, // Otherwise in CU mode all waves of a work-group are on the same CU, and // so the L0 does not need to be invalidated. // - // GFX12.5 - // TODO DOCS + // GFX12.5 has a shared WGP$, so no invalidates are required. if (ST.isCuModeEnabled()) return false; @@ -2523,12 +2544,13 @@ bool SIGfx12CacheControl::insertRelease(MachineBasicBlock::iterator &MI, // writeback as all memory operations by the same thread are // sequentially consistent, and no other thread can access scratch // memory. - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { if (Pos == Position::AFTER) ++MI; // global_wb is only necessary at system scope for GFX12.0, - // they're also necessary at device scope for GFX12.5. + // they're also necessary at device scope for GFX12.5 as stores + // cannot report completion earlier than L2. // // Emitting it for lower scopes is a slow no-op, so we omit it // for performance. @@ -2539,7 +2561,7 @@ bool SIGfx12CacheControl::insertRelease(MachineBasicBlock::iterator &MI, Changed = true; break; case SIAtomicScope::AGENT: - // TODO DOCS + // GFX12.5 may have >1 L2 per device so we must emit a device scope WB. if (ST.hasGFX1250Insts()) { BuildMI(MBB, MI, DL, TII->get(AMDGPU::GLOBAL_WB)) .addImm(AMDGPU::CPol::SCOPE_DEV); @@ -2655,7 +2677,7 @@ bool SIGfx12CacheControl::setAtomicScope(const MachineBasicBlock::iterator &MI, SIAtomicAddrSpace AddrSpace) const { bool Changed = false; - if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) != SIAtomicAddrSpace::NONE) { + if (canAffectGlobalAddrSpace(AddrSpace)) { switch (Scope) { case SIAtomicScope::SYSTEM: Changed |= setScope(MI, AMDGPU::CPol::SCOPE_SYS); diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td index 296ce5a46287c..84287b621fe78 100644 --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -352,10 +352,12 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">; } // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] +let Defs = [SCC] in { def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32", [(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>; def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64", [(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>; +} let Uses = [M0] in { def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">; @@ -1616,7 +1618,8 @@ def S_BARRIER_WAIT : SOPP_Pseudo <"s_barrier_wait", (ins i16imm:$simm16), "$simm let isConvergent = 1; } -def S_BARRIER_LEAVE : SOPP_Pseudo <"s_barrier_leave", (ins)> { + def S_BARRIER_LEAVE : SOPP_Pseudo <"s_barrier_leave", + (ins), "", [(int_amdgcn_s_barrier_leave (i16 srcvalue))] > { let SchedRW = [WriteBarrier]; let simm16 = 0; let fixed_imm = 1; @@ -1624,9 +1627,6 @@ def S_BARRIER_LEAVE : SOPP_Pseudo <"s_barrier_leave", (ins)> { let Defs = [SCC]; } -def S_BARRIER_LEAVE_IMM : SOPP_Pseudo <"s_barrier_leave", - (ins i16imm:$simm16), "$simm16", [(int_amdgcn_s_barrier_leave timm:$simm16)]>; - def S_WAKEUP : SOPP_Pseudo <"s_wakeup", (ins) > { let SubtargetPredicate = isGFX8Plus; let simm16 = 0; diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp index 14ebbf8e9c929..6489e63d4f6b8 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUAsmUtils.cpp @@ -14,11 +14,9 @@ namespace llvm::AMDGPU { //===----------------------------------------------------------------------===// // Custom Operands. // -// A table of custom operands shall describe "primary" operand names first -// followed by aliases if any. It is not required but recommended to arrange -// operands so that operand encoding match operand position in the table. This -// will make getNameFromOperandTable() a bit more efficient. Unused slots in the -// table shall have an empty name. +// A table of custom operands must be ordered by Encoding in ascending order +// to enable binary search lookup. Within entries that share the same encoding, +// "primary" operand names should be listed first followed by aliases if any. // //===----------------------------------------------------------------------===// @@ -27,21 +25,18 @@ template static StringRef getNameFromOperandTable(const CustomOperand (&Table)[N], unsigned Encoding, const MCSubtargetInfo &STI) { - auto isValidIndexForEncoding = [&](size_t Idx) { - return Idx < N && Table[Idx].Encoding == Encoding && - !Table[Idx].Name.empty() && - (!Table[Idx].Cond || Table[Idx].Cond(STI)); - }; - - // This is an optimization that should work in most cases. As a side effect, - // it may cause selection of an alias instead of a primary operand name in - // case of sparse tables. - if (isValidIndexForEncoding(Encoding)) - return Table[Encoding].Name; - - for (size_t Idx = 0; Idx != N; ++Idx) - if (isValidIndexForEncoding(Idx)) - return Table[Idx].Name; + // Find the first entry with the target encoding + auto First = + std::lower_bound(Table, Table + N, Encoding, + [](const CustomOperand &Entry, unsigned TargetEncoding) { + return Entry.Encoding < TargetEncoding; + }); + + // Search through entries with the same encoding to find the first valid one + for (auto It = First; It != Table + N && It->Encoding == Encoding; ++It) { + if (It->Encoding == Encoding && (!It->Cond || It->Cond(STI))) + return It->Name; + } return ""; } @@ -92,10 +87,11 @@ namespace SendMsg { // clang-format off static constexpr CustomOperand MsgOperands[] = { - {{""}}, {{"MSG_INTERRUPT"}, ID_INTERRUPT}, {{"MSG_GS"}, ID_GS_PreGFX11, isNotGFX11Plus}, + {{"MSG_HS_TESSFACTOR"}, ID_HS_TESSFACTOR_GFX11Plus, isGFX11Plus}, {{"MSG_GS_DONE"}, ID_GS_DONE_PreGFX11, isNotGFX11Plus}, + {{"MSG_DEALLOC_VGPRS"}, ID_DEALLOC_VGPRS_GFX11Plus, isGFX11Plus}, {{"MSG_SAVEWAVE"}, ID_SAVEWAVE, isGFX8_GFX9_GFX10}, {{"MSG_STALL_WAVE_GEN"}, ID_STALL_WAVE_GEN, isGFX9_GFX10_GFX11}, {{"MSG_HALT_WAVES"}, ID_HALT_WAVES, isGFX9_GFX10_GFX11}, @@ -103,10 +99,8 @@ static constexpr CustomOperand MsgOperands[] = { {{"MSG_EARLY_PRIM_DEALLOC"}, ID_EARLY_PRIM_DEALLOC, isGFX9_GFX10}, {{"MSG_GS_ALLOC_REQ"}, ID_GS_ALLOC_REQ, isGFX9Plus}, {{"MSG_GET_DOORBELL"}, ID_GET_DOORBELL, isGFX9_GFX10}, - {{"MSG_GET_DDID"}, ID_GET_DDID, isGFX10}, - {{"MSG_HS_TESSFACTOR"}, ID_HS_TESSFACTOR_GFX11Plus, isGFX11Plus}, - {{"MSG_DEALLOC_VGPRS"}, ID_DEALLOC_VGPRS_GFX11Plus, isGFX11Plus}, {{"MSG_SAVEWAVE_HAS_TDM"}, ID_SAVEWAVE_HAS_TDM, isGFX1250}, + {{"MSG_GET_DDID"}, ID_GET_DDID, isGFX10}, {{"MSG_SYSMSG"}, ID_SYSMSG}, {{"MSG_RTN_GET_DOORBELL"}, ID_RTN_GET_DOORBELL, isGFX11Plus}, {{"MSG_RTN_GET_DDID"}, ID_RTN_GET_DDID, isGFX11Plus}, @@ -121,7 +115,6 @@ static constexpr CustomOperand MsgOperands[] = { }; static constexpr CustomOperand SysMsgOperands[] = { - {{""}}, {{"SYSMSG_OP_ECC_ERR_INTERRUPT"}, OP_SYS_ECC_ERR_INTERRUPT}, {{"SYSMSG_OP_REG_RD"}, OP_SYS_REG_RD}, {{"SYSMSG_OP_HOST_TRAP_ACK"}, OP_SYS_HOST_TRAP_ACK, isNotGFX9Plus}, @@ -169,68 +162,67 @@ namespace Hwreg { // NOLINTBEGIN // clang-format off static constexpr CustomOperand Operands[] = { - {{""}}, - {{"HW_REG_MODE"}, ID_MODE}, - {{"HW_REG_STATUS"}, ID_STATUS}, - {{"HW_REG_TRAPSTS"}, ID_TRAPSTS, isNotGFX12Plus}, - {{"HW_REG_HW_ID"}, ID_HW_ID, isNotGFX10Plus}, - {{"HW_REG_GPR_ALLOC"}, ID_GPR_ALLOC}, - {{"HW_REG_LDS_ALLOC"}, ID_LDS_ALLOC}, - {{"HW_REG_IB_STS"}, ID_IB_STS}, - {{""}}, - {{""}}, - {{"HW_REG_PERF_SNAPSHOT_DATA"}, ID_PERF_SNAPSHOT_DATA_gfx12, isGFX12Plus}, - {{"HW_REG_PERF_SNAPSHOT_PC_LO"}, ID_PERF_SNAPSHOT_PC_LO_gfx12, isGFX12Plus}, - {{"HW_REG_PERF_SNAPSHOT_PC_HI"}, ID_PERF_SNAPSHOT_PC_HI_gfx12, isGFX12Plus}, - {{""}}, - {{""}}, - {{"HW_REG_SH_MEM_BASES"}, ID_MEM_BASES, isGFX9_GFX10_GFX11}, - {{"HW_REG_TBA_LO"}, ID_TBA_LO, isGFX9_GFX10}, - {{"HW_REG_TBA_HI"}, ID_TBA_HI, isGFX9_GFX10}, - {{"HW_REG_TMA_LO"}, ID_TMA_LO, isGFX9_GFX10}, - {{"HW_REG_TMA_HI"}, ID_TMA_HI, isGFX9_GFX10}, - {{"HW_REG_FLAT_SCR_LO"}, ID_FLAT_SCR_LO, isGFX10_GFX11}, - {{"HW_REG_FLAT_SCR_HI"}, ID_FLAT_SCR_HI, isGFX10_GFX11}, - {{"HW_REG_XNACK_MASK"}, ID_XNACK_MASK, isGFX10Before1030}, - {{"HW_REG_HW_ID1"}, ID_HW_ID1, isGFX10Plus}, - {{"HW_REG_HW_ID2"}, ID_HW_ID2, isGFX10Plus}, - {{"HW_REG_POPS_PACKER"}, ID_POPS_PACKER, isGFX10}, - {{""}}, - {{"HW_REG_PERF_SNAPSHOT_DATA"}, ID_PERF_SNAPSHOT_DATA_gfx11, isGFX11}, - {{"HW_REG_IB_STS2"}, ID_IB_STS2, isGFX1250}, - {{"HW_REG_SHADER_CYCLES"}, ID_SHADER_CYCLES, isGFX10_3_GFX11}, - {{"HW_REG_SHADER_CYCLES_HI"}, ID_SHADER_CYCLES_HI, isGFX12Plus}, - {{"HW_REG_DVGPR_ALLOC_LO"}, ID_DVGPR_ALLOC_LO, isGFX12Plus}, - {{"HW_REG_DVGPR_ALLOC_HI"}, ID_DVGPR_ALLOC_HI, isGFX12Plus}, - - // Register numbers reused in GFX11 - {{"HW_REG_PERF_SNAPSHOT_PC_LO"}, ID_PERF_SNAPSHOT_PC_LO_gfx11, isGFX11}, - {{"HW_REG_PERF_SNAPSHOT_PC_HI"}, ID_PERF_SNAPSHOT_PC_HI_gfx11, isGFX11}, - - // Register numbers reused in GFX12+ - {{"HW_REG_STATE_PRIV"}, ID_STATE_PRIV, isGFX12Plus}, - {{"HW_REG_PERF_SNAPSHOT_DATA1"}, ID_PERF_SNAPSHOT_DATA1, isGFX12Plus}, - {{"HW_REG_PERF_SNAPSHOT_DATA2"}, ID_PERF_SNAPSHOT_DATA2, isGFX12Plus}, - {{"HW_REG_EXCP_FLAG_PRIV"}, ID_EXCP_FLAG_PRIV, isGFX12Plus}, - {{"HW_REG_EXCP_FLAG_USER"}, ID_EXCP_FLAG_USER, isGFX12Plus}, - {{"HW_REG_TRAP_CTRL"}, ID_TRAP_CTRL, isGFX12Plus}, - {{"HW_REG_SCRATCH_BASE_LO"}, ID_FLAT_SCR_LO, isGFX12Plus}, - {{"HW_REG_SCRATCH_BASE_HI"}, ID_FLAT_SCR_HI, isGFX12Plus}, - {{"HW_REG_SHADER_CYCLES_LO"}, ID_SHADER_CYCLES, isGFX12Plus}, - - // GFX942 specific registers - {{"HW_REG_XCC_ID"}, ID_XCC_ID, isGFX940}, - {{"HW_REG_SQ_PERF_SNAPSHOT_DATA"}, ID_SQ_PERF_SNAPSHOT_DATA, isGFX940}, - {{"HW_REG_SQ_PERF_SNAPSHOT_DATA1"}, ID_SQ_PERF_SNAPSHOT_DATA1, isGFX940}, - {{"HW_REG_SQ_PERF_SNAPSHOT_PC_LO"}, ID_SQ_PERF_SNAPSHOT_PC_LO, isGFX940}, - {{"HW_REG_SQ_PERF_SNAPSHOT_PC_HI"}, ID_SQ_PERF_SNAPSHOT_PC_HI, isGFX940}, - - // GFX1250 - {{"HW_REG_XNACK_STATE_PRIV"}, ID_XNACK_STATE_PRIV, isGFX1250}, - {{"HW_REG_XNACK_MASK"}, ID_XNACK_MASK_gfx1250, isGFX1250}, - - // Aliases - {{"HW_REG_HW_ID"}, ID_HW_ID1, isGFX10}, + {{"HW_REG_WAVE_MODE"}, ID_MODE, isGFX12Plus}, + {{"HW_REG_MODE"}, ID_MODE}, + {{"HW_REG_WAVE_STATUS"}, ID_STATUS, isGFX12Plus}, + {{"HW_REG_STATUS"}, ID_STATUS}, + {{"HW_REG_TRAPSTS"}, ID_TRAPSTS, isNotGFX12Plus}, + {{"HW_REG_HW_ID"}, ID_HW_ID, isNotGFX10Plus}, + {{"HW_REG_WAVE_STATE_PRIV"}, ID_STATE_PRIV, isGFX12Plus}, + {{"HW_REG_STATE_PRIV"}, ID_STATE_PRIV, isGFX12Plus}, + {{"HW_REG_WAVE_GPR_ALLOC"}, ID_GPR_ALLOC, isGFX12Plus}, + {{"HW_REG_GPR_ALLOC"}, ID_GPR_ALLOC}, + {{"HW_REG_WAVE_LDS_ALLOC"}, ID_LDS_ALLOC, isGFX12Plus}, + {{"HW_REG_LDS_ALLOC"}, ID_LDS_ALLOC}, + {{"HW_REG_IB_STS"}, ID_IB_STS}, + {{"HW_REG_PERF_SNAPSHOT_DATA"}, ID_PERF_SNAPSHOT_DATA_gfx12, isGFX12Plus}, + {{"HW_REG_PERF_SNAPSHOT_PC_LO"}, ID_PERF_SNAPSHOT_PC_LO_gfx12, isGFX12Plus}, + {{"HW_REG_PERF_SNAPSHOT_PC_HI"}, ID_PERF_SNAPSHOT_PC_HI_gfx12, isGFX12Plus}, + {{"HW_REG_SH_MEM_BASES"}, ID_MEM_BASES, isGFX9_GFX10_GFX11}, + {{"HW_REG_PERF_SNAPSHOT_DATA1"}, ID_PERF_SNAPSHOT_DATA1, isGFX12Plus}, + {{"HW_REG_TBA_LO"}, ID_TBA_LO, isGFX9_GFX10}, + {{"HW_REG_PERF_SNAPSHOT_DATA2"}, ID_PERF_SNAPSHOT_DATA2, isGFX12Plus}, + {{"HW_REG_TBA_HI"}, ID_TBA_HI, isGFX9_GFX10}, + {{"HW_REG_WAVE_EXCP_FLAG_PRIV"}, ID_EXCP_FLAG_PRIV, isGFX12Plus}, + {{"HW_REG_EXCP_FLAG_PRIV"}, ID_EXCP_FLAG_PRIV, isGFX12Plus}, + {{"HW_REG_TMA_LO"}, ID_TMA_LO, isGFX9_GFX10}, + {{"HW_REG_PERF_SNAPSHOT_PC_LO"}, ID_PERF_SNAPSHOT_PC_LO_gfx11, isGFX11}, + {{"HW_REG_WAVE_EXCP_FLAG_USER"}, ID_EXCP_FLAG_USER, isGFX12Plus}, + {{"HW_REG_EXCP_FLAG_USER"}, ID_EXCP_FLAG_USER, isGFX12Plus}, + {{"HW_REG_TMA_HI"}, ID_TMA_HI, isGFX9_GFX10}, + {{"HW_REG_PERF_SNAPSHOT_PC_HI"}, ID_PERF_SNAPSHOT_PC_HI_gfx11, isGFX11}, + {{"HW_REG_WAVE_TRAP_CTRL"}, ID_TRAP_CTRL, isGFX12Plus}, + {{"HW_REG_TRAP_CTRL"}, ID_TRAP_CTRL, isGFX12Plus}, + {{"HW_REG_FLAT_SCR_LO"}, ID_FLAT_SCR_LO, isGFX10_GFX11}, + {{"HW_REG_WAVE_SCRATCH_BASE_LO"}, ID_FLAT_SCR_LO, isGFX12Plus}, + {{"HW_REG_SCRATCH_BASE_LO"}, ID_FLAT_SCR_LO, isGFX12Plus}, + {{"HW_REG_XCC_ID"}, ID_XCC_ID, isGFX940}, + {{"HW_REG_FLAT_SCR_HI"}, ID_FLAT_SCR_HI, isGFX10_GFX11}, + {{"HW_REG_WAVE_SCRATCH_BASE_HI"}, ID_FLAT_SCR_HI, isGFX12Plus}, + {{"HW_REG_SCRATCH_BASE_HI"}, ID_FLAT_SCR_HI, isGFX12Plus}, + {{"HW_REG_SQ_PERF_SNAPSHOT_DATA"}, ID_SQ_PERF_SNAPSHOT_DATA, isGFX940}, + {{"HW_REG_XNACK_MASK"}, ID_XNACK_MASK, isGFX10Before1030}, + {{"HW_REG_SQ_PERF_SNAPSHOT_DATA1"}, ID_SQ_PERF_SNAPSHOT_DATA1, isGFX940}, + {{"HW_REG_WAVE_HW_ID1"}, ID_HW_ID1, isGFX12Plus}, + {{"HW_REG_HW_ID1"}, ID_HW_ID1, isGFX10Plus}, + {{"HW_REG_HW_ID"}, ID_HW_ID1, isGFX10}, + {{"HW_REG_SQ_PERF_SNAPSHOT_PC_LO"}, ID_SQ_PERF_SNAPSHOT_PC_LO, isGFX940}, + {{"HW_REG_WAVE_HW_ID2"}, ID_HW_ID2, isGFX12Plus}, + {{"HW_REG_HW_ID2"}, ID_HW_ID2, isGFX10Plus}, + {{"HW_REG_SQ_PERF_SNAPSHOT_PC_HI"}, ID_SQ_PERF_SNAPSHOT_PC_HI, isGFX940}, + {{"HW_REG_POPS_PACKER"}, ID_POPS_PACKER, isGFX10}, + {{"HW_REG_PERF_SNAPSHOT_DATA"}, ID_PERF_SNAPSHOT_DATA_gfx11, isGFX11}, + {{"HW_REG_IB_STS2"}, ID_IB_STS2, isGFX1250}, + {{"HW_REG_SHADER_CYCLES"}, ID_SHADER_CYCLES, isGFX10_3_GFX11}, + {{"HW_REG_SHADER_CYCLES_LO"}, ID_SHADER_CYCLES, isGFX12Plus}, + {{"HW_REG_SHADER_CYCLES_HI"}, ID_SHADER_CYCLES_HI, isGFX12Plus}, + {{"HW_REG_WAVE_DVGPR_ALLOC_LO"}, ID_DVGPR_ALLOC_LO, isGFX12Plus}, + {{"HW_REG_DVGPR_ALLOC_LO"}, ID_DVGPR_ALLOC_LO, isGFX12Plus}, + {{"HW_REG_WAVE_DVGPR_ALLOC_HI"}, ID_DVGPR_ALLOC_HI, isGFX12Plus}, + {{"HW_REG_DVGPR_ALLOC_HI"}, ID_DVGPR_ALLOC_HI, isGFX12Plus}, + {{"HW_REG_XNACK_STATE_PRIV"}, ID_XNACK_STATE_PRIV, isGFX1250}, + {{"HW_REG_XNACK_MASK"}, ID_XNACK_MASK_gfx1250, isGFX1250}, + }; // clang-format on // NOLINTEND diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index c80302e03beea..20fa1412a778e 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -3410,7 +3410,16 @@ MCPhysReg getVGPRWithMSBs(MCPhysReg Reg, unsigned MSBs, const MCRegisterClass *RC = getVGPRPhysRegClass(Reg, MRI); if (!RC) return AMDGPU::NoRegister; - return RC->getRegister(Idx | (MSBs << 8)); + + Idx |= MSBs << 8; + if (RC->getID() == AMDGPU::VGPR_16RegClassID) { + // This class has 2048 registers with interleaved lo16 and hi16. + Idx *= 2; + if (Enc & AMDGPU::HWEncoding::IS_HI16) + ++Idx; + } + + return RC->getRegister(Idx); } std::pair diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index 37b0262966160..2b9c063f42a5e 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -1568,6 +1568,11 @@ bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI); bool hasMAIInsts(const MCSubtargetInfo &STI); bool hasVOPD(const MCSubtargetInfo &STI); bool hasDPPSrc1SGPR(const MCSubtargetInfo &STI); + +inline bool supportsWave32(const MCSubtargetInfo &STI) { + return AMDGPU::isGFX10Plus(STI) && !AMDGPU::isGFX1250(STI); +} + int getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR, int32_t ArgNumVGPR); unsigned hasKernargPreload(const MCSubtargetInfo &STI); bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST); diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td index 6230c17e20804..54f57e02ed47e 100644 --- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td @@ -314,9 +314,10 @@ let SubtargetPredicate = HasGFX950Insts, OtherPredicates = [HasBF16ConversionIns defm V_CVT_F32_BF16 : VOP1Inst_t16 <"v_cvt_f32_bf16", VOP_F32_BF16>; } let SubtargetPredicate = isGFX1250Plus, OtherPredicates = [HasBF16ConversionInsts] in { - defm V_CVT_F32_BF16_gfx1250 : VOP1Inst_t16_with_profiles <"v_cvt_f32_bf16_gfx1250", VOP_F32_BF16, - VOPProfile_CVT_F32_BF16_gfx1250_t16, - VOPProfile_CVT_F32_BF16_gfx1250_fake16>; + let True16Predicate = UseRealTrue16Insts in + defm V_CVT_F32_BF16_gfx1250_t16 : VOP1Inst <"V_CVT_F32_BF16_gfx1250_t16", VOPProfile_CVT_F32_BF16_gfx1250_t16>; + let True16Predicate = UseFakeTrue16Insts in + defm V_CVT_F32_BF16_gfx1250_fake16 : VOP1Inst <"V_CVT_F32_BF16_gfx1250_fake16", VOPProfile_CVT_F32_BF16_gfx1250_fake16>; } let ReadsModeReg = 0, mayRaiseFPException = 0 in { @@ -899,6 +900,7 @@ class VOP1_DPP16_Gen op, VOP1_DPP_Pseudo ps, GFXGen Gen, VOPProfile p = let DecoderNamespace = Gen.DecoderNamespace; let OtherPredicates = !listconcat(ps.OtherPredicates, !if(p.HasExt64BitDPP, [HasDPALU_DPP], [])); + let True16Predicate = ps.True16Predicate; } class VOP1_DPP8 op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> : @@ -921,6 +923,7 @@ class VOP1_DPP8_Gen op, VOP1_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pf VOP1_DPP8 { let AssemblerPredicate = Gen.AssemblerPredicate; let DecoderNamespace = Gen.DecoderNamespace; + let True16Predicate = ps.True16Predicate; } //===----------------------------------------------------------------------===// @@ -1149,7 +1152,7 @@ defm V_TANH_F16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x01f>; defm V_PERMLANE16_SWAP_B32 : VOP1_Real_OpSelIsDPP_gfx1250<0x049>; defm V_TANH_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x04a>; defm V_PRNG_B32 : VOP1_Real_FULL; -defm V_CVT_F32_BF16 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16", "V_CVT_F32_BF16_gfx1250">; +defm V_CVT_F32_BF16_gfx1250 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x072, "v_cvt_f32_bf16">; defm V_SAT_PK4_I4_I8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x073>; defm V_SAT_PK4_U4_U8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x074>; defm V_CVT_PK_F16_FP8 : VOP1_Real_FULL_t16_and_fake16_gfx1250<0x075>; @@ -1561,8 +1564,7 @@ def : GCNPat < } // End OtherPredicates = [isGFX8Plus] -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let OtherPredicates = [isGFX8Plus, p] in { +let OtherPredicates = [isGFX8Plus, NotUseRealTrue16Insts] in { def : GCNPat< (i32 (anyext i16:$src)), (COPY $src) diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td index 37d92bc5076de..30dab55df7c29 100644 --- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -1378,8 +1378,7 @@ class ZExt_i16_i1_Pat : GCNPat < $src) >; -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in { +let True16Predicate = NotUseRealTrue16Insts in { def : GCNPat < (and i16:$src0, i16:$src1), (V_AND_B32_e64 VSrc_b32:$src0, VSrc_b32:$src1) diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td index e6a7c35dce0be..4a2b54dde68d3 100644 --- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -387,8 +387,7 @@ let SchedRW = [Write64Bit] in { } // End SchedRW = [Write64Bit] } // End isReMaterializable = 1 -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in +let True16Predicate = NotUseRealTrue16Insts in def : GCNPat< (i32 (DivergentUnaryFrag i16:$src)), (i32 (V_BFE_I32_e64 i16:$src, (i32 0), (i32 0x10))) @@ -501,8 +500,7 @@ def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32 } // End SubtargetPredicate = Has16BitInsts, isCommutable = 1 -foreach p = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in -let True16Predicate = p in +let True16Predicate = NotUseRealTrue16Insts in def : GCNPat< (i64 (DivergentUnaryFrag i16:$src)), (REG_SEQUENCE VReg_64, diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index f7279b664ed27..5daf860d540ca 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -64,6 +64,13 @@ class VOP3P_Mix_Profile + : VOP3P_Mix_Profile { + let IsTrue16 = 1; + let IsRealTrue16 = 1; + let DstRC64 = getVALUDstForVT.ret; +} + multiclass VOP3PInst { def NAME : VOP3P_Pseudo { } // end SubtargetPredicate = isGFX11Plus } +multiclass VOP3_VOP3PInst_t16 { + def NAME : VOP3P_Pseudo; + + if P.HasExtVOP3DPP then + def _dpp : VOP3_DPP_Pseudo { + let VOP3P = 1; + let PseudoInstr = OpName#"_dpp"; + } +} + let isReMaterializable = 1 in { let isCommutable = 1 in { defm V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3P_Profile>; @@ -160,12 +177,9 @@ defm V_PK_MAXIMUM3_F16 : VOP3PInst<"v_pk_maximum3_f16", VOP3P_Profile { +multiclass MadFmaMixFP32Pats { defvar VOP3PMadMixModsPat = !if (!eq(VT, bf16), VOP3PMadMixBF16Mods, VOP3PMadMixMods); defvar VOP3PMadMixModsExtPat = !if (!eq(VT, bf16), VOP3PMadMixBF16ModsExt, VOP3PMadMixModsExt); // At least one of the operands needs to be an fpextend of an f16 @@ -189,7 +203,14 @@ multiclass MadFmaMixPats; +} +multiclass MadFmaMixFP16Pats { + defvar VOP3PMadMixModsPat = !if (!eq(VT, bf16), VOP3PMadMixBF16Mods, VOP3PMadMixMods); def : GCNPat < (AMDGPUclamp (build_vector (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$lo_src0, i32:$lo_src0_modifiers)), @@ -243,9 +264,6 @@ multiclass MadFmaMixPats; +} - } // end True16Predicate +multiclass MadFmaMixFP16Pats_t16 { + defvar VOP3PMadMixModsPat = !if (!eq(VT, bf16), VOP3PMadMixBF16Mods, VOP3PMadMixMods); + def : GCNPat < + (VT (fpround (fmul (f32 (VOP3PMadMixModsPat f32:$src0, i32:$src0_modifiers)), + (f32 (VOP3PMadMixModsPat f32:$src1, i32:$src1_modifiers))))), + (mix_inst_16 $src0_modifiers, $src0, + $src1_modifiers, $src1, + (i32 0), (i32 0), + DSTCLAMP.NONE) + >; - let True16Predicate = UseRealTrue16Insts in { def : GCNPat < - (build_vector (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$src0, i32:$src0_modifiers)), + (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$src0, i32:$src0_modifiers)), (f32 (VOP3PMadMixModsPat VT:$src1, i32:$src1_modifiers)), - (f32 (VOP3PMadMixModsPat VT:$src2, i32:$src2_modifiers))))), VT:$elt1), - (vecVT (mixlo_inst $src0_modifiers, $src0, - $src1_modifiers, $src1, - $src2_modifiers, $src2, - DSTCLAMP.NONE, - (REG_SEQUENCE VGPR_32, (VT (IMPLICIT_DEF)), lo16, $elt1, hi16))) + (f32 (VOP3PMadMixModsPat VT:$src2, i32:$src2_modifiers))))), + (mix_inst_16 $src0_modifiers, $src0, + $src1_modifiers, $src1, + $src2_modifiers, $src2, + DSTCLAMP.NONE) >; + def : GCNPat < - (build_vector VT:$elt0, (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$src0, i32:$src0_modifiers)), - (f32 (VOP3PMadMixModsPat VT:$src1, i32:$src1_modifiers)), - (f32 (VOP3PMadMixModsPat VT:$src2, i32:$src2_modifiers)))))), - (vecVT (mixhi_inst $src0_modifiers, $src0, - $src1_modifiers, $src1, - $src2_modifiers, $src2, - DSTCLAMP.NONE, - (REG_SEQUENCE VGPR_32, $elt0, lo16, (VT (IMPLICIT_DEF)), hi16))) + (AMDGPUclamp (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$src0, i32:$src0_modifiers)), + (f32 (VOP3PMadMixModsPat VT:$src1, i32:$src1_modifiers)), + (f32 (VOP3PMadMixModsPat VT:$src2, i32:$src2_modifiers)))))), + (mix_inst_16 $src0_modifiers, $src0, + $src1_modifiers, $src1, + $src2_modifiers, $src2, + DSTCLAMP.ENABLE) >; def : GCNPat < - (build_vector - VT:$elt0, - (AMDGPUclamp (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$src0, i32:$src0_modifiers)), - (f32 (VOP3PMadMixModsPat VT:$src1, i32:$src1_modifiers)), - (f32 (VOP3PMadMixModsPat VT:$src2, i32:$src2_modifiers))))))), - (vecVT (mixhi_inst $src0_modifiers, $src0, - $src1_modifiers, $src1, - $src2_modifiers, $src2, - DSTCLAMP.ENABLE, - (REG_SEQUENCE VGPR_32, $elt0, lo16, (VT (IMPLICIT_DEF)), hi16))) + (AMDGPUclamp (build_vector + (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$lo_src0, i32:$lo_src0_modifiers)), + (f32 (VOP3PMadMixModsPat VT:$lo_src1, i32:$lo_src1_modifiers)), + (f32 (VOP3PMadMixModsPat VT:$lo_src2, i32:$lo_src2_modifiers))))), + (VT (fpround (fma_like (f32 (VOP3PMadMixModsPat VT:$hi_src0, i32:$hi_src0_modifiers)), + (f32 (VOP3PMadMixModsPat VT:$hi_src1, i32:$hi_src1_modifiers)), + (f32 (VOP3PMadMixModsPat VT:$hi_src2, i32:$hi_src2_modifiers))))))), + (vecVT (REG_SEQUENCE VGPR_32, (mix_inst_16 $lo_src0_modifiers, $lo_src0, + $lo_src1_modifiers, $lo_src1, + $lo_src2_modifiers, $lo_src2, + DSTCLAMP.ENABLE), lo16, + (mix_inst_16 $hi_src0_modifiers, $hi_src0, + $hi_src1_modifiers, $hi_src1, + $hi_src2_modifiers, $hi_src2, + DSTCLAMP.ENABLE), hi16)) >; - } // end True16Predicate } class MinimumMaximumByMinimum3Maximum3VOP3P; +defm : MadFmaMixFP32Pats; +defm : MadFmaMixFP16Pats; } // OtherPredicates = [NoFP32Denormals] } // End SubtargetPredicate = HasMadMixInsts @@ -360,10 +394,18 @@ defm V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3P_Mix_Profile>; } + +// Pseudo true16 inst for v_fma_mixlo/hi_f16 +defm V_FMA_MIX_F16_t16 : VOP3_VOP3PInst_t16<"v_fma_mix_f16_t16", VOP3P_Mix_Profile_t16>; } // End FPDPRounding = 1 } -defm : MadFmaMixPats; +defm : MadFmaMixFP32Pats; + +let True16Predicate = NotUseRealTrue16Insts in +defm : MadFmaMixFP16Pats; +let True16Predicate = UseRealTrue16Insts in +defm : MadFmaMixFP16Pats_t16; } let SubtargetPredicate = HasFmaMixBF16Insts in { @@ -378,10 +420,17 @@ defm V_FMA_MIXLO_BF16 : VOP3_VOP3PInst<"v_fma_mixlo_bf16", VOP3P_Mix_Profile>; } + +// Pseudo true16 inst for v_fma_mixlo/hi_bf16 +defm V_FMA_MIX_BF16_t16 : VOP3_VOP3PInst_t16<"v_fma_mix_bf16_t16", VOP3P_Mix_Profile_t16>; } // End FPDPRounding = 1 } // End isCommutable = 1 -defm : MadFmaMixPats; +defm : MadFmaMixFP32Pats; +let True16Predicate = NotUseRealTrue16Insts in +defm : MadFmaMixFP16Pats; +let True16Predicate = UseRealTrue16Insts in +defm : MadFmaMixFP16Pats_t16; } // End SubtargetPredicate = HasFmaMixBF16Insts def PK_ADD_MINMAX_Profile : VOP3P_Profile { diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 1c42f44765abf..1f773e2a7e0fc 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -610,25 +610,41 @@ void ARMAsmPrinter::emitEndOfAsmFile(Module &M) { // to appear in the .ARM.attributes section in ELF. // Instead of subclassing the MCELFStreamer, we do the work here. - // Returns true if all functions have the same function attribute value. - // It also returns true when the module has no functions. +// Returns true if all function definitions have the same function attribute +// value. It also returns true when the module has no functions. static bool checkFunctionsAttributeConsistency(const Module &M, StringRef Attr, StringRef Value) { - return !any_of(M, [&](const Function &F) { - return F.getFnAttribute(Attr).getValueAsString() != Value; - }); + return !any_of(M, [&](const Function &F) { + if (F.isDeclaration()) + return false; + return F.getFnAttribute(Attr).getValueAsString() != Value; + }); } -// Returns true if all functions have the same denormal mode. +// Returns true if all functions definitions have the same denormal mode. // It also returns true when the module has no functions. -static bool checkDenormalAttributeConsistency(const Module &M, - StringRef Attr, +static bool checkDenormalAttributeConsistency(const Module &M, StringRef Attr, DenormalMode Value) { return !any_of(M, [&](const Function &F) { + if (F.isDeclaration()) + return false; StringRef AttrVal = F.getFnAttribute(Attr).getValueAsString(); return parseDenormalFPAttribute(AttrVal) != Value; }); } +// Returns true if all functions have different denormal modes. +static bool checkDenormalAttributeInconsistency(const Module &M) { + auto F = M.functions().begin(); + auto E = M.functions().end(); + if (F == E) + return false; + DenormalMode Value = F->getDenormalModeRaw(); + ++F; + return std::any_of(F, E, [&](const Function &F) { + return !F.isDeclaration() && F.getDenormalModeRaw() != Value; + }); +} + void ARMAsmPrinter::emitAttributes() { MCTargetStreamer &TS = *OutStreamer->getTargetStreamer(); ARMTargetStreamer &ATS = static_cast(TS); @@ -695,7 +711,9 @@ void ARMAsmPrinter::emitAttributes() { DenormalMode::getPositiveZero())) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::PositiveZero); - else if (!TM.Options.UnsafeFPMath) + else if (checkDenormalAttributeInconsistency(*MMI->getModule()) || + checkDenormalAttributeConsistency( + *MMI->getModule(), "denormal-fp-math", DenormalMode::getIEEE())) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::IEEEDenormals); else { @@ -730,7 +748,7 @@ void ARMAsmPrinter::emitAttributes() { TM.Options.NoTrappingFPMath) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Not_Allowed); - else if (!TM.Options.UnsafeFPMath) { + else { ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Allowed); // If the user has permitted this code to choose the IEEE 754 diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index 5c35b3327c16d..22769dbf38719 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -6510,14 +6510,14 @@ bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault( return Subtarget.isMClass() && MF.getFunction().hasMinSize(); } -bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable( +bool ARMBaseInstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { // Try hard to rematerialize any VCTPs because if we spill P0, it will block // the tail predication conversion. This means that the element count // register has to be live for longer, but that has to be better than // spill/restore and VPT predication. return (isVCTP(&MI) && !isPredicated(MI)) || - TargetInstrInfo::isReallyTriviallyReMaterializable(MI); + TargetInstrInfo::isReMaterializableImpl(MI); } unsigned llvm::getBLXOpcode(const MachineFunction &MF) { diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h index 71de3c6ad597a..2869e7f708046 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h @@ -479,7 +479,7 @@ class ARMBaseInstrInfo : public ARMGenInstrInfo { MachineInstr *canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII) const; - bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; + bool isReMaterializableImpl(const MachineInstr &MI) const override; private: /// Modeling special VFP / NEON fp MLA / MLS hazards. diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp index e94220af05a0d..2e8a676269a74 100644 --- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -960,17 +960,3 @@ bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, } return false; } - -bool ARMBaseRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, - unsigned DefSubReg, - const TargetRegisterClass *SrcRC, - unsigned SrcSubReg) const { - // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2). - if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 && - SrcRC == &ARM::DPRRegClass && - (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1)) - return false; - - return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg, - SrcRC, SrcSubReg); -} diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h index 5b67b34089d7e..03b0fa0d1ee08 100644 --- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -158,11 +158,6 @@ class ARMBaseRegisterInfo : public ARMGenRegisterInfo { const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override; - bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, - unsigned DefSubReg, - const TargetRegisterClass *SrcRC, - unsigned SrcSubReg) const override; - int getSEHRegNum(unsigned i) const { return getEncodingValue(i); } }; diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp index 57141ab69223f..9945ecc9c96e0 100644 --- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -27,6 +27,8 @@ #include "llvm/MC/MCAsmInfo.h" #include "llvm/Support/Debug.h" +#include + using namespace llvm; #define DEBUG_TYPE "arm-pseudo" diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 9052cbfa89deb..f4ac6bb76b3fe 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1482,7 +1482,7 @@ bool ARMTargetLowering::useSoftFloat() const { return Subtarget->useSoftFloat(); } -bool ARMTargetLowering::shouldExpandCmpUsingSelects(EVT VT) const { +bool ARMTargetLowering::preferSelectsOverBooleanArithmetic(EVT VT) const { return !Subtarget->isThumb1Only() && VT.getSizeInBits() <= 32; } @@ -5573,7 +5573,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, llvm_unreachable("Unknown VFP cmp argument!"); } -/// OptimizeVFPBrcond - With nnan, it's legal to optimize some +/// OptimizeVFPBrcond - With nnan and without daz, it's legal to optimize some /// f32 and even f64 comparisons to integer ones. SDValue ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { @@ -5729,9 +5729,9 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { } SDNodeFlags Flags = Op->getFlags(); - if ((getTargetMachine().Options.UnsafeFPMath || Flags.hasNoNaNs()) && - (DAG.getDenormalMode(MVT::f32) == DenormalMode::getIEEE() && - DAG.getDenormalMode(MVT::f64) == DenormalMode::getIEEE()) && + if (Flags.hasNoNaNs() && + DAG.getDenormalMode(MVT::f32) == DenormalMode::getIEEE() && + DAG.getDenormalMode(MVT::f64) == DenormalMode::getIEEE() && (CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETNE || CC == ISD::SETUNE)) { if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) @@ -20428,9 +20428,9 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, if (CVal >= -255 && CVal <= -1) break; } else { - // This must be a constant between -4095 and 4095. It is not clear - // what this constraint is intended for. Implemented for - // compatibility with GCC. + // This must be a constant between -4095 and 4095. This is suitable + // for use as the immediate offset field in LDR and STR instructions + // such as LDR r0,[r1,#offset]. if (CVal >= -4095 && CVal <= 4095) break; } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 8e417ac3e1a7b..fa130a153b0de 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -605,7 +605,7 @@ class VectorType; bool preferZeroCompareBranch() const override { return true; } - bool shouldExpandCmpUsingSelects(EVT VT) const override; + bool preferSelectsOverBooleanArithmetic(EVT VT) const override; bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override; diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index 9f600e0c685ab..58bc338b25856 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -88,18 +88,16 @@ ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU, ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS, const ARMBaseTargetMachine &TM, bool IsLittle, - bool MinSize) + bool MinSize, DenormalMode DM) : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize), - IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM), + IsLittle(IsLittle), DM(DM), TargetTriple(TT), Options(TM.Options), TM(TM), FrameLowering(initializeFrameLowering(CPU, FS)), // At this point initializeSubtargetDependencies has been called so // we can query directly. - InstrInfo(isThumb1Only() - ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) - : !isThumb() - ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) - : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), + InstrInfo(isThumb1Only() ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) + : !isThumb() ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) + : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), TLInfo(TM, *this) { CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering())); @@ -224,10 +222,14 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. const FeatureBitset &Bits = getFeatureBits(); if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters - (Options.UnsafeFPMath || isTargetDarwin())) + (isTargetDarwin() || DM == DenormalMode::getPreserveSign())) HasNEONForFP = true; - if (isRWPI()) + const ARM::ArchKind Arch = ARM::parseArch(TargetTriple.getArchName()); + if (isRWPI() || + (isTargetIOS() && + (Arch == ARM::ArchKind::ARMV6K || Arch == ARM::ArchKind::ARMV6) && + TargetTriple.isOSVersionLT(3, 0))) ReserveR9 = true; // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2 diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index 637eb4560e0f1..b2d368e0ca175 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -186,6 +186,12 @@ class ARMSubtarget : public ARMGenSubtargetInfo { /// IsLittle - The target is Little Endian bool IsLittle; + /// DM - Denormal mode + /// NEON and VFP RunFast mode are not IEEE 754 compliant, + /// use this field to determine whether to generate NEON/VFP + /// instructions in related function. + DenormalMode DM; + /// TargetTriple - What processor and OS we're targeting. Triple TargetTriple; @@ -206,7 +212,7 @@ class ARMSubtarget : public ARMGenSubtargetInfo { /// ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS, const ARMBaseTargetMachine &TM, bool IsLittle, - bool MinSize = false); + bool MinSize = false, DenormalMode DM = DenormalMode::getIEEE()); /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size /// that still makes it profitable to inline the call. diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp index 131b9332e9ade..86740a92b32c5 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp +++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp @@ -229,6 +229,10 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { if (F.hasMinSize()) Key += "+minsize"; + DenormalMode DM = F.getDenormalModeRaw(); + if (DM != DenormalMode::getIEEE()) + Key += "denormal-fp-math=" + DM.str(); + auto &I = SubtargetMap[Key]; if (!I) { // This needs to be done before we create a new subtarget since any @@ -236,7 +240,7 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { // function that reside in TargetOptions. resetTargetOptions(F); I = std::make_unique(TargetTriple, CPU, FS, *this, isLittle, - F.hasMinSize()); + F.hasMinSize(), DM); if (!I->isThumb() && !I->hasARMOps()) F.getContext().emitError("Function '" + F.getName() + "' uses ARM " diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 0e974838a7c6b..f60660b12baca 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -135,17 +135,17 @@ class UnwindContext { MCRegister getFPReg() const { return FPReg; } void emitFnStartLocNotes() const { - for (const SMLoc &Loc : FnStartLocs) + for (SMLoc Loc : FnStartLocs) Parser.Note(Loc, ".fnstart was specified here"); } void emitCantUnwindLocNotes() const { - for (const SMLoc &Loc : CantUnwindLocs) + for (SMLoc Loc : CantUnwindLocs) Parser.Note(Loc, ".cantunwind was specified here"); } void emitHandlerDataLocNotes() const { - for (const SMLoc &Loc : HandlerDataLocs) + for (SMLoc Loc : HandlerDataLocs) Parser.Note(Loc, ".handlerdata was specified here"); } diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index b25b7e7104f20..d358913d38af9 100644 --- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -157,12 +157,6 @@ class ARMDisassembler : public MCDisassembler { } // end anonymous namespace -// Forward declare these because the autogenerated code will reference them. -// Definitions are further down. -static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val, - uint64_t Address, - const MCDisassembler *Decoder); - typedef DecodeStatus OperandDecoder(MCInst &Inst, unsigned Val, uint64_t Address, const MCDisassembler *Decoder); @@ -3167,6 +3161,65 @@ static DecodeStatus DecodeT2LoadShift(MCInst &Inst, unsigned Insn, return S; } +static DecodeStatus DecodeT2Imm8(MCInst &Inst, unsigned Val, uint64_t Address, + const MCDisassembler *Decoder) { + int imm = Val & 0xFF; + if (Val == 0) + imm = INT32_MIN; + else if (!(Val & 0x100)) + imm *= -1; + Inst.addOperand(MCOperand::createImm(imm)); + + return MCDisassembler::Success; +} + +static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val, + uint64_t Address, + const MCDisassembler *Decoder) { + DecodeStatus S = MCDisassembler::Success; + + unsigned Rn = fieldFromInstruction(Val, 9, 4); + unsigned imm = fieldFromInstruction(Val, 0, 9); + + // Thumb stores cannot use PC as dest register. + switch (Inst.getOpcode()) { + case ARM::t2STRT: + case ARM::t2STRBT: + case ARM::t2STRHT: + case ARM::t2STRi8: + case ARM::t2STRHi8: + case ARM::t2STRBi8: + if (Rn == 15) + return MCDisassembler::Fail; + break; + default: + break; + } + + // Some instructions always use an additive offset. + switch (Inst.getOpcode()) { + case ARM::t2LDRT: + case ARM::t2LDRBT: + case ARM::t2LDRHT: + case ARM::t2LDRSBT: + case ARM::t2LDRSHT: + case ARM::t2STRT: + case ARM::t2STRBT: + case ARM::t2STRHT: + imm |= 0x100; + break; + default: + break; + } + + if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) + return MCDisassembler::Fail; + if (!Check(S, DecodeT2Imm8(Inst, imm, Address, Decoder))) + return MCDisassembler::Fail; + + return S; +} + static DecodeStatus DecodeT2LoadImm8(MCInst &Inst, unsigned Insn, uint64_t Address, const MCDisassembler *Decoder) { @@ -3476,18 +3529,6 @@ static DecodeStatus DecodeT2AddrModeImm0_1020s4(MCInst &Inst, unsigned Val, return S; } -static DecodeStatus DecodeT2Imm8(MCInst &Inst, unsigned Val, uint64_t Address, - const MCDisassembler *Decoder) { - int imm = Val & 0xFF; - if (Val == 0) - imm = INT32_MIN; - else if (!(Val & 0x100)) - imm *= -1; - Inst.addOperand(MCOperand::createImm(imm)); - - return MCDisassembler::Success; -} - template static DecodeStatus DecodeT2Imm7(MCInst &Inst, unsigned Val, uint64_t Address, const MCDisassembler *Decoder) { @@ -3503,53 +3544,6 @@ static DecodeStatus DecodeT2Imm7(MCInst &Inst, unsigned Val, uint64_t Address, return MCDisassembler::Success; } -static DecodeStatus DecodeT2AddrModeImm8(MCInst &Inst, unsigned Val, - uint64_t Address, - const MCDisassembler *Decoder) { - DecodeStatus S = MCDisassembler::Success; - - unsigned Rn = fieldFromInstruction(Val, 9, 4); - unsigned imm = fieldFromInstruction(Val, 0, 9); - - // Thumb stores cannot use PC as dest register. - switch (Inst.getOpcode()) { - case ARM::t2STRT: - case ARM::t2STRBT: - case ARM::t2STRHT: - case ARM::t2STRi8: - case ARM::t2STRHi8: - case ARM::t2STRBi8: - if (Rn == 15) - return MCDisassembler::Fail; - break; - default: - break; - } - - // Some instructions always use an additive offset. - switch (Inst.getOpcode()) { - case ARM::t2LDRT: - case ARM::t2LDRBT: - case ARM::t2LDRHT: - case ARM::t2LDRSBT: - case ARM::t2LDRSHT: - case ARM::t2STRT: - case ARM::t2STRBT: - case ARM::t2STRHT: - imm |= 0x100; - break; - default: - break; - } - - if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) - return MCDisassembler::Fail; - if (!Check(S, DecodeT2Imm8(Inst, imm, Address, Decoder))) - return MCDisassembler::Fail; - - return S; -} - template static DecodeStatus DecodeTAddrModeImm7(MCInst &Inst, unsigned Val, uint64_t Address, diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp index c56b589519533..4a87c638f5fc3 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -30,7 +30,6 @@ #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/TargetParser/Triple.h" #include diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td index c65ead45e2c7e..228114c5c24b2 100644 --- a/llvm/lib/Target/DirectX/DXIL.td +++ b/llvm/lib/Target/DirectX/DXIL.td @@ -424,6 +424,7 @@ def Saturate : DXILOp<7, unary> { def IsNaN : DXILOp<8, isSpecialFloat> { let Doc = "Determines if the specified value is NaN."; + let intrinsics = [IntrinSelect]; let arguments = [OverloadTy]; let result = Int1Ty; let overloads = [Overloads]; diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp index e2469d8df957f..ebb7c2607c0c8 100644 --- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp +++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp @@ -213,6 +213,7 @@ static bool isIntrinsicExpansion(Function &F) { case Intrinsic::dx_nclamp: case Intrinsic::dx_degrees: case Intrinsic::dx_isinf: + case Intrinsic::dx_isnan: case Intrinsic::dx_lerp: case Intrinsic::dx_normalize: case Intrinsic::dx_fdot: @@ -1024,6 +1025,9 @@ static bool expandIntrinsic(Function &F, CallInst *Orig) { case Intrinsic::dx_isinf: Result = expand16BitIsInf(Orig); break; + case Intrinsic::dx_isnan: + Result = expand16BitIsNaN(Orig); + break; case Intrinsic::dx_lerp: Result = expandLerpIntrinsic(Orig); break; diff --git a/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp b/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp index 7e93474e73118..6e95a4232fabe 100644 --- a/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp +++ b/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp @@ -160,6 +160,41 @@ tripleToVisibility(llvm::Triple::EnvironmentType ET) { } } +static void reportIfDeniedShaderStageAccess(Module &M, + const dxbc::RootFlags &Flags, + const dxbc::RootFlags &Mask) { + if ((Flags & Mask) != Mask) + return; + + SmallString<128> Message; + raw_svector_ostream OS(Message); + OS << "Shader has root bindings but root signature uses a DENY flag to " + "disallow root binding access to the shader stage."; + M.getContext().diagnose(DiagnosticInfoGeneric(Message)); +} + +static std::optional +getEnvironmentDenyFlagMask(Triple::EnvironmentType ShaderProfile) { + switch (ShaderProfile) { + case Triple::Pixel: + return dxbc::RootFlags::DenyPixelShaderRootAccess; + case Triple::Vertex: + return dxbc::RootFlags::DenyVertexShaderRootAccess; + case Triple::Geometry: + return dxbc::RootFlags::DenyGeometryShaderRootAccess; + case Triple::Hull: + return dxbc::RootFlags::DenyHullShaderRootAccess; + case Triple::Domain: + return dxbc::RootFlags::DenyDomainShaderRootAccess; + case Triple::Mesh: + return dxbc::RootFlags::DenyMeshShaderRootAccess; + case Triple::Amplification: + return dxbc::RootFlags::DenyAmplificationShaderRootAccess; + default: + return std::nullopt; + } +} + static void validateRootSignature(Module &M, const mcdxbc::RootSignatureDesc &RSD, dxil::ModuleMetadataInfo &MMI, @@ -225,7 +260,9 @@ static void validateRootSignature(Module &M, Builder.findOverlapping(ReportedBinding); reportOverlappingRegisters(M, ReportedBinding, Overlaping); }); + const hlsl::BoundRegs &BoundRegs = Builder.takeBoundRegs(); + bool HasBindings = false; for (const ResourceInfo &RI : DRM) { const ResourceInfo::ResourceBinding &Binding = RI.getBinding(); const dxil::ResourceTypeInfo &RTI = DRTM[RI.getHandleTy()]; @@ -236,22 +273,33 @@ static void validateRootSignature(Module &M, BoundRegs.findBoundReg(RC, Binding.Space, Binding.LowerBound, Binding.LowerBound + Binding.Size - 1); - if (Reg != nullptr) { - const auto *ParamInfo = - static_cast(Reg->Cookie); - - if (RC != ResourceClass::SRV && RC != ResourceClass::UAV) - continue; + if (!Reg) { + reportRegNotBound(M, RC, Binding); + continue; + } - if (ParamInfo->Type == dxbc::RootParameterType::DescriptorTable) - continue; + const auto *ParamInfo = + static_cast(Reg->Cookie); - if (RK != ResourceKind::RawBuffer && RK != ResourceKind::StructuredBuffer) - reportInvalidHandleTyError(M, RC, Binding); - } else { - reportRegNotBound(M, RC, Binding); + bool IsSRVOrUAV = RC == ResourceClass::SRV || RC == ResourceClass::UAV; + bool IsDescriptorTable = + ParamInfo->Type == dxbc::RootParameterType::DescriptorTable; + bool IsRawOrStructuredBuffer = + RK != ResourceKind::RawBuffer && RK != ResourceKind::StructuredBuffer; + if (IsSRVOrUAV && !IsDescriptorTable && IsRawOrStructuredBuffer) { + reportInvalidHandleTyError(M, RC, Binding); + continue; } + + HasBindings = true; } + + if (!HasBindings) + return; + + if (std::optional Mask = + getEnvironmentDenyFlagMask(MMI.ShaderProfile)) + reportIfDeniedShaderStageAccess(M, dxbc::RootFlags(RSD.Flags), *Mask); } static mcdxbc::RootSignatureDesc * diff --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp index 5153d24070dc9..68fd3e0bc74c7 100644 --- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp +++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp @@ -30,6 +30,7 @@ bool DirectXTTIImpl::isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, switch (ID) { case Intrinsic::dx_asdouble: case Intrinsic::dx_isinf: + case Intrinsic::dx_isnan: case Intrinsic::dx_firstbitlow: case Intrinsic::dx_firstbituhigh: case Intrinsic::dx_firstbitshigh: @@ -48,6 +49,7 @@ bool DirectXTTIImpl::isTargetIntrinsicTriviallyScalarizable( case Intrinsic::dx_firstbituhigh: case Intrinsic::dx_frac: case Intrinsic::dx_isinf: + case Intrinsic::dx_isnan: case Intrinsic::dx_rsqrt: case Intrinsic::dx_saturate: case Intrinsic::dx_splitdouble: diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp index df2cfd07d8cc0..4d96cfadc79ff 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp @@ -2483,8 +2483,15 @@ OpRef HvxSelector::perfect(ShuffleMask SM, OpRef Va, ResultStack &Results) { } ++I; + // Upper bits of the vdeal/vshuff parameter that do not cover any byte in + // the vector are ignored. Technically, A2_tfrsi takes a signed value, which + // is sign-extended to 32 bit if there is no extender. The practical + // advantages are that signed values are smaller in common use cases and are + // not sensitive to the vector size. + int SS = SignExtend32(S, HwLog); + NodeTemplate Res; - Results.push(Hexagon::A2_tfrsi, MVT::i32, {getConst32(S, dl)}); + Results.push(Hexagon::A2_tfrsi, MVT::i32, {getSignedConst32(SS, dl)}); Res.Opc = IsInc ? Hexagon::V6_vshuffvdd : Hexagon::V6_vdealvdd; Res.Ty = PairTy; Res.Ops = {OpRef::hi(Arg), OpRef::lo(Arg), OpRef::res(-1)}; diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 45d194e944fb9..939841ae817c3 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -2804,6 +2804,7 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset, case Hexagon::V6_vL32b_nt_cur_npred_ai: case Hexagon::V6_vL32b_nt_tmp_pred_ai: case Hexagon::V6_vL32b_nt_tmp_npred_ai: + case Hexagon::V6_vS32Ub_npred_ai: case Hexagon::V6_vgathermh_pseudo: case Hexagon::V6_vgathermw_pseudo: case Hexagon::V6_vgathermhw_pseudo: diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp index dfe0fa973c9b3..021dceb0e0789 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp @@ -25,7 +25,6 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include #include diff --git a/llvm/lib/Target/Hexagon/RDFCopy.cpp b/llvm/lib/Target/Hexagon/RDFCopy.cpp index fafdad08909dd..3b1d3bd89680b 100644 --- a/llvm/lib/Target/Hexagon/RDFCopy.cpp +++ b/llvm/lib/Target/Hexagon/RDFCopy.cpp @@ -108,7 +108,7 @@ bool CopyPropagation::scanBlock(MachineBasicBlock *B) { for (NodeAddr IA : BA.Addr->members(DFG)) { if (DFG.IsCode(IA)) { NodeAddr SA = IA; - EqualityMap EM(std::less(DFG.getPRI())); + EqualityMap EM(RegisterRefLess(DFG.getPRI())); if (interpretAsCopy(SA.Addr->getCode(), EM)) recordCopy(SA, EM); } diff --git a/llvm/lib/Target/Hexagon/RDFCopy.h b/llvm/lib/Target/Hexagon/RDFCopy.h index e4fb89892831d..92b2c65982655 100644 --- a/llvm/lib/Target/Hexagon/RDFCopy.h +++ b/llvm/lib/Target/Hexagon/RDFCopy.h @@ -25,8 +25,8 @@ class MachineInstr; namespace rdf { struct CopyPropagation { - CopyPropagation(DataFlowGraph &dfg) : MDT(dfg.getDT()), DFG(dfg), - RDefMap(std::less(DFG.getPRI())) {} + CopyPropagation(DataFlowGraph &dfg) + : MDT(dfg.getDT()), DFG(dfg), RDefMap(RegisterRefLess(DFG.getPRI())) {} virtual ~CopyPropagation() = default; @@ -35,7 +35,7 @@ namespace rdf { bool trace() const { return Trace; } DataFlowGraph &getDFG() { return DFG; } - using EqualityMap = std::map; + using EqualityMap = std::map; virtual bool interpretAsCopy(const MachineInstr *MI, EqualityMap &EM); private: @@ -45,7 +45,7 @@ namespace rdf { bool Trace = false; // map: register -> (map: stmt -> reaching def) - std::map> RDefMap; + std::map, RegisterRefLess> RDefMap; // map: statement -> (map: dst reg -> src reg) std::map CopyMap; std::vector Copies; diff --git a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp index 5be4713b349ee..9b11201d0312d 100644 --- a/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp +++ b/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp @@ -957,8 +957,10 @@ void LoongArchAsmParser::emitLoadAddressAbs(MCInst &Inst, SMLoc IDLoc, : Inst.getOperand(2).getExpr(); InstSeq Insts; + // To distinguish between la.abs and %abs_hi20, la.abs will generate + // R_LARCH_MARK_LA and R_LARCH_ABS_HI20 relocations. Insts.push_back( - LoongArchAsmParser::Inst(LoongArch::LU12I_W, ELF::R_LARCH_ABS_HI20)); + LoongArchAsmParser::Inst(LoongArch::LU12I_W, ELF::R_LARCH_MARK_LA)); Insts.push_back( LoongArchAsmParser::Inst(LoongArch::ORI, ELF::R_LARCH_ABS_LO12)); diff --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp index 07e722b9a6591..442f0a46a4983 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp @@ -113,10 +113,11 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) { APInt SplatValue, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; - unsigned Op; + unsigned Op = 0; EVT ResTy = BVN->getValueType(0); bool Is128Vec = BVN->getValueType(0).is128BitVector(); bool Is256Vec = BVN->getValueType(0).is256BitVector(); + SDNode *Res; if (!Subtarget->hasExtLSX() || (!Is128Vec && !Is256Vec)) break; @@ -124,26 +125,25 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) { HasAnyUndefs, 8)) break; - switch (SplatBitSize) { - default: - break; - case 8: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_B : LoongArch::PseudoVREPLI_B; - break; - case 16: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_H : LoongArch::PseudoVREPLI_H; - break; - case 32: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_W : LoongArch::PseudoVREPLI_W; - break; - case 64: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_D : LoongArch::PseudoVREPLI_D; - break; - } - - SDNode *Res; // If we have a signed 10 bit integer, we can splat it directly. if (SplatValue.isSignedIntN(10)) { + switch (SplatBitSize) { + default: + break; + case 8: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_B : LoongArch::PseudoVREPLI_B; + break; + case 16: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_H : LoongArch::PseudoVREPLI_H; + break; + case 32: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_W : LoongArch::PseudoVREPLI_W; + break; + case 64: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_D : LoongArch::PseudoVREPLI_D; + break; + } + EVT EleType = ResTy.getVectorElementType(); APInt Val = SplatValue.sextOrTrunc(EleType.getSizeInBits()); SDValue Imm = CurDAG->getTargetConstant(Val, DL, EleType); @@ -151,6 +151,21 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) { ReplaceNode(Node, Res); return; } + + // Select appropriate [x]vldi instructions for some special constant splats, + // where the immediate value `imm[12] == 1` for used [x]vldi instructions. + const auto &TLI = + *static_cast(getTargetLowering()); + std::pair ConvertVLDI = + TLI.isImmVLDILegalForMode1(SplatValue, SplatBitSize); + if (ConvertVLDI.first) { + Op = Is256Vec ? LoongArch::XVLDI : LoongArch::VLDI; + SDValue Imm = CurDAG->getSignedTargetConstant( + SignExtend32<13>(ConvertVLDI.second), DL, MVT::i32); + Res = CurDAG->getMachineNode(Op, DL, ResTy, Imm); + ReplaceNode(Node, Res); + return; + } break; } } diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 32baa2d111270..4cfbfca45d359 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -340,6 +340,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT, Expand); setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::ABDS, VT, Legal); setOperationAction(ISD::ABDU, VT, Legal); setOperationAction(ISD::SADDSAT, VT, Legal); @@ -419,6 +420,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT, Expand); setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::ABDS, VT, Legal); setOperationAction(ISD::ABDU, VT, Legal); setOperationAction(ISD::SADDSAT, VT, Legal); @@ -666,6 +668,7 @@ SDValue LoongArchTargetLowering::lowerVECREDUCE_ADD(SDValue Op, unsigned NumEles = Val.getSimpleValueType().getVectorNumElements(); unsigned EleBits = Val.getSimpleValueType().getScalarSizeInBits(); + unsigned ResBits = OpVT.getScalarSizeInBits(); unsigned LegalVecSize = 128; bool isLASX256Vector = @@ -691,10 +694,11 @@ SDValue LoongArchTargetLowering::lowerVECREDUCE_ADD(SDValue Op, if (isLASX256Vector) { SDValue Tmp = DAG.getNode(LoongArchISD::XVPERMI, DL, MVT::v4i64, Val, - DAG.getConstant(2, DL, MVT::i64)); + DAG.getConstant(2, DL, Subtarget.getGRLenVT())); Val = DAG.getNode(ISD::ADD, DL, MVT::v4i64, Tmp, Val); } + Val = DAG.getBitcast(MVT::getVectorVT(OpVT, LegalVecSize / ResBits), Val); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT, Val, DAG.getConstant(0, DL, Subtarget.getGRLenVT())); } @@ -727,15 +731,16 @@ SDValue LoongArchTargetLowering::lowerVECREDUCE(SDValue Op, unsigned Opcode = ISD::getVecReduceBaseOpcode(Op.getOpcode()); MVT VecTy = Val.getSimpleValueType(); + MVT GRLenVT = Subtarget.getGRLenVT(); for (int i = NumEles; i > 1; i /= 2) { - SDValue ShiftAmt = DAG.getConstant(i * EleBits / 16, DL, MVT::i64); + SDValue ShiftAmt = DAG.getConstant(i * EleBits / 16, DL, GRLenVT); SDValue Tmp = DAG.getNode(LoongArchISD::VBSRL, DL, VecTy, Val, ShiftAmt); Val = DAG.getNode(Opcode, DL, VecTy, Tmp, Val); } return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT, Val, - DAG.getConstant(0, DL, Subtarget.getGRLenVT())); + DAG.getConstant(0, DL, GRLenVT)); } SDValue LoongArchTargetLowering::lowerPREFETCH(SDValue Op, @@ -1119,6 +1124,10 @@ SDValue LoongArchTargetLowering::lowerBITREVERSE(SDValue Op, SDValue Src = Op->getOperand(0); SDLoc DL(Op); + // LoongArchISD::BITREV_8B is not supported on LA32. + if (!Subtarget.is64Bit() && (ResTy == MVT::v16i8 || ResTy == MVT::v32i8)) + return SDValue(); + EVT NewVT = ResTy.is128BitVector() ? MVT::v2i64 : MVT::v4i64; unsigned int OrigEltNum = ResTy.getVectorNumElements(); unsigned int NewEltNum = NewVT.getVectorNumElements(); @@ -1128,7 +1137,7 @@ SDValue LoongArchTargetLowering::lowerBITREVERSE(SDValue Op, SmallVector Ops; for (unsigned int i = 0; i < NewEltNum; i++) { SDValue Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, NewSrc, - DAG.getConstant(i, DL, MVT::i64)); + DAG.getConstant(i, DL, Subtarget.getGRLenVT())); unsigned RevOp = (ResTy == MVT::v16i8 || ResTy == MVT::v32i8) ? (unsigned)LoongArchISD::BITREV_8B : (unsigned)ISD::BITREVERSE; @@ -1596,7 +1605,7 @@ static SDValue lowerVECTOR_SHUFFLEAsZeroOrAnyExtend(const SDLoc &DL, /// value is necessary in order to fit the above form. static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef Mask, MVT VT, - SDValue V1, SDValue V2, SelectionDAG &DAG, + SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) { int SplatIndex = -1; for (const auto &M : Mask) { @@ -1611,9 +1620,8 @@ lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef Mask, MVT VT, assert(SplatIndex < (int)Mask.size() && "Out of bounds mask index"); if (fitsRegularPattern(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) { - APInt Imm(64, SplatIndex); return DAG.getNode(LoongArchISD::VREPLVEI, DL, VT, V1, - DAG.getConstant(Imm, DL, Subtarget.getGRLenVT())); + DAG.getConstant(SplatIndex, DL, Subtarget.getGRLenVT())); } return SDValue(); @@ -1671,7 +1679,7 @@ lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef Mask, MVT VT, } // Calculate the immediate. Replace any remaining undefs with zero - APInt Imm(64, 0); + int Imm = 0; for (int i = SubVecSize - 1; i >= 0; --i) { int M = SubMask[i]; @@ -1946,11 +1954,12 @@ static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef Mask, /// adding it as an operand to the resulting VSHUF. static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef Mask, MVT VT, SDValue V1, SDValue V2, - SelectionDAG &DAG) { + SelectionDAG &DAG, + const LoongArchSubtarget &Subtarget) { SmallVector Ops; for (auto M : Mask) - Ops.push_back(DAG.getConstant(M, DL, MVT::i64)); + Ops.push_back(DAG.getSignedConstant(M, DL, Subtarget.getGRLenVT())); EVT MaskVecTy = VT.changeVectorElementTypeToInteger(); SDValue MaskVec = DAG.getBuildVector(MaskVecTy, DL, Ops); @@ -1989,8 +1998,8 @@ static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef Mask, MVT VT, SDValue Result; // TODO: Add more comparison patterns. if (V2.isUndef()) { - if ((Result = lowerVECTOR_SHUFFLE_VREPLVEI(DL, Mask, VT, V1, V2, DAG, - Subtarget))) + if ((Result = + lowerVECTOR_SHUFFLE_VREPLVEI(DL, Mask, VT, V1, DAG, Subtarget))) return Result; if ((Result = lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG, Subtarget))) @@ -2030,7 +2039,8 @@ static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef Mask, MVT VT, return Result; if (SDValue NewShuffle = widenShuffleMask(DL, Mask, VT, V1, V2, DAG)) return NewShuffle; - if ((Result = lowerVECTOR_SHUFFLE_VSHUF(DL, Mask, VT, V1, V2, DAG))) + if ((Result = + lowerVECTOR_SHUFFLE_VSHUF(DL, Mask, VT, V1, V2, DAG, Subtarget))) return Result; return SDValue(); } @@ -2045,7 +2055,7 @@ static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef Mask, MVT VT, /// value is necessary in order to fit the above form. static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef Mask, MVT VT, - SDValue V1, SDValue V2, SelectionDAG &DAG, + SDValue V1, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) { int SplatIndex = -1; for (const auto &M : Mask) { @@ -2060,7 +2070,10 @@ lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef Mask, MVT VT, const auto &Begin = Mask.begin(); const auto &End = Mask.end(); - unsigned HalfSize = Mask.size() / 2; + int HalfSize = Mask.size() / 2; + + if (SplatIndex >= HalfSize) + return SDValue(); assert(SplatIndex < (int)Mask.size() && "Out of bounds mask index"); if (fitsRegularPattern(Begin, 1, End - HalfSize, SplatIndex, 0) && @@ -2085,10 +2098,30 @@ lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef Mask, MVT VT, return lowerVECTOR_SHUFFLE_VSHUF4I(DL, Mask, VT, V1, V2, DAG, Subtarget); } +/// Lower VECTOR_SHUFFLE into XVPERMI (if possible). +static SDValue +lowerVECTOR_SHUFFLE_XVPERMI(const SDLoc &DL, ArrayRef Mask, MVT VT, + SDValue V1, SelectionDAG &DAG, + const LoongArchSubtarget &Subtarget) { + // Only consider XVPERMI_D. + if (Mask.size() != 4 || (VT != MVT::v4i64 && VT != MVT::v4f64)) + return SDValue(); + + unsigned MaskImm = 0; + for (unsigned i = 0; i < Mask.size(); ++i) { + if (Mask[i] == -1) + continue; + MaskImm |= Mask[i] << (i * 2); + } + + return DAG.getNode(LoongArchISD::XVPERMI, DL, VT, V1, + DAG.getConstant(MaskImm, DL, Subtarget.getGRLenVT())); +} + /// Lower VECTOR_SHUFFLE into XVPERM (if possible). static SDValue lowerVECTOR_SHUFFLE_XVPERM(const SDLoc &DL, ArrayRef Mask, - MVT VT, SDValue V1, SDValue V2, - SelectionDAG &DAG) { + MVT VT, SDValue V1, SelectionDAG &DAG, + const LoongArchSubtarget &Subtarget) { // LoongArch LASX only have XVPERM_W. if (Mask.size() != 8 || (VT != MVT::v8i32 && VT != MVT::v8f32)) return SDValue(); @@ -2119,9 +2152,10 @@ static SDValue lowerVECTOR_SHUFFLE_XVPERM(const SDLoc &DL, ArrayRef Mask, return SDValue(); SmallVector Masks; + MVT GRLenVT = Subtarget.getGRLenVT(); for (unsigned i = 0; i < NumElts; ++i) - Masks.push_back(Mask[i] == -1 ? DAG.getUNDEF(MVT::i64) - : DAG.getConstant(Mask[i], DL, MVT::i64)); + Masks.push_back(Mask[i] == -1 ? DAG.getUNDEF(GRLenVT) + : DAG.getConstant(Mask[i], DL, GRLenVT)); SDValue MaskVec = DAG.getBuildVector(MVT::v8i32, DL, Masks); return DAG.getNode(LoongArchISD::XVPERM, DL, VT, V1, MaskVec); @@ -2285,6 +2319,53 @@ static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef Mask, return DAG.getNode(LoongArchISD::VPICKOD, DL, VT, V2, V1); } +/// Lower VECTOR_SHUFFLE into XVINSVE0 (if possible). +static SDValue +lowerVECTOR_SHUFFLE_XVINSVE0(const SDLoc &DL, ArrayRef Mask, MVT VT, + SDValue V1, SDValue V2, SelectionDAG &DAG, + const LoongArchSubtarget &Subtarget) { + // LoongArch LASX only supports xvinsve0.{w/d}. + if (VT != MVT::v8i32 && VT != MVT::v8f32 && VT != MVT::v4i64 && + VT != MVT::v4f64) + return SDValue(); + + MVT GRLenVT = Subtarget.getGRLenVT(); + int MaskSize = Mask.size(); + assert(MaskSize == (int)VT.getVectorNumElements() && "Unexpected mask size"); + + // Check if exactly one element of the Mask is replaced by 'Replaced', while + // all other elements are either 'Base + i' or undef (-1). On success, return + // the index of the replaced element. Otherwise, just return -1. + auto checkReplaceOne = [&](int Base, int Replaced) -> int { + int Idx = -1; + for (int i = 0; i < MaskSize; ++i) { + if (Mask[i] == Base + i || Mask[i] == -1) + continue; + if (Mask[i] != Replaced) + return -1; + if (Idx == -1) + Idx = i; + else + return -1; + } + return Idx; + }; + + // Case 1: the lowest element of V2 replaces one element in V1. + int Idx = checkReplaceOne(0, MaskSize); + if (Idx != -1) + return DAG.getNode(LoongArchISD::XVINSVE0, DL, VT, V1, V2, + DAG.getConstant(Idx, DL, GRLenVT)); + + // Case 2: the lowest element of V1 replaces one element in V2. + Idx = checkReplaceOne(MaskSize, 0); + if (Idx != -1) + return DAG.getNode(LoongArchISD::XVINSVE0, DL, VT, V2, V1, + DAG.getConstant(Idx, DL, GRLenVT)); + + return SDValue(); +} + /// Lower VECTOR_SHUFFLE into XVSHUF (if possible). static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef Mask, MVT VT, SDValue V1, SDValue V2, @@ -2353,8 +2434,10 @@ static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef Mask, /// The first case is the closest to LoongArch instructions and the other /// cases need to be converted to it for processing. /// -/// This function may modify V1, V2 and Mask -static void canonicalizeShuffleVectorByLane( +/// This function will return true for the last three cases above and will +/// modify V1, V2 and Mask. Otherwise, return false for the first case and +/// cross-lane shuffle cases. +static bool canonicalizeShuffleVectorByLane( const SDLoc &DL, MutableArrayRef Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) { @@ -2378,15 +2461,15 @@ static void canonicalizeShuffleVectorByLane( preMask = LowLaneTy; if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](int M) { - return M < 0 || (M >= 0 && M < HalfSize) || - (M >= MaskSize && M < MaskSize + HalfSize); + return M < 0 || (M >= HalfSize && M < MaskSize) || + (M >= MaskSize + HalfSize && M < MaskSize * 2); })) - postMask = HighLaneTy; + postMask = LowLaneTy; else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](int M) { - return M < 0 || (M >= HalfSize && M < MaskSize) || - (M >= MaskSize + HalfSize && M < MaskSize * 2); + return M < 0 || (M >= 0 && M < HalfSize) || + (M >= MaskSize && M < MaskSize + HalfSize); })) - postMask = LowLaneTy; + postMask = HighLaneTy; // The pre-half of mask is high lane type, and the post-half of mask // is low lane type, which is closest to the LoongArch instructions. @@ -2395,7 +2478,7 @@ static void canonicalizeShuffleVectorByLane( // to the lower 128-bit of vector register, and the low lane of mask // corresponds the higher 128-bit of vector register. if (preMask == HighLaneTy && postMask == LowLaneTy) { - return; + return false; } if (preMask == LowLaneTy && postMask == HighLaneTy) { V1 = DAG.getBitcast(MVT::v4i64, V1); @@ -2449,8 +2532,10 @@ static void canonicalizeShuffleVectorByLane( *it = *it < 0 ? *it : *it + HalfSize; } } else { // cross-lane - return; + return false; } + + return true; } /// Lower VECTOR_SHUFFLE as lane permute and then shuffle (if possible). @@ -2516,27 +2601,23 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef Mask, MVT VT, assert(Mask.size() % 2 == 0 && "Expected even mask size."); assert(Mask.size() >= 4 && "Mask size is less than 4."); - // canonicalize non cross-lane shuffle vector - SmallVector NewMask(Mask); - canonicalizeShuffleVectorByLane(DL, NewMask, VT, V1, V2, DAG, Subtarget); - APInt KnownUndef, KnownZero; - computeZeroableShuffleElements(NewMask, V1, V2, KnownUndef, KnownZero); + computeZeroableShuffleElements(Mask, V1, V2, KnownUndef, KnownZero); APInt Zeroable = KnownUndef | KnownZero; SDValue Result; // TODO: Add more comparison patterns. if (V2.isUndef()) { - if ((Result = lowerVECTOR_SHUFFLE_XVREPLVEI(DL, NewMask, VT, V1, V2, DAG, - Subtarget))) + if ((Result = + lowerVECTOR_SHUFFLE_XVREPLVEI(DL, Mask, VT, V1, DAG, Subtarget))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVSHUF4I(DL, NewMask, VT, V1, V2, DAG, + if ((Result = lowerVECTOR_SHUFFLE_XVSHUF4I(DL, Mask, VT, V1, V2, DAG, Subtarget))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVPERM(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = + lowerVECTOR_SHUFFLE_XVPERMI(DL, Mask, VT, V1, DAG, Subtarget))) return Result; - if ((Result = lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(DL, NewMask, VT, - V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLE_XVPERM(DL, Mask, VT, V1, DAG, Subtarget))) return Result; // TODO: This comment may be enabled in the future to better match the @@ -2546,24 +2627,42 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef Mask, MVT VT, // It is recommended not to change the pattern comparison order for better // performance. - if ((Result = lowerVECTOR_SHUFFLE_XVPACKEV(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLE_XVPACKEV(DL, Mask, VT, V1, V2, DAG))) + return Result; + if ((Result = lowerVECTOR_SHUFFLE_XVPACKOD(DL, Mask, VT, V1, V2, DAG))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVPACKOD(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLE_XVILVH(DL, Mask, VT, V1, V2, DAG))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVILVH(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLE_XVILVL(DL, Mask, VT, V1, V2, DAG))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVILVL(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLE_XVPICKEV(DL, Mask, VT, V1, V2, DAG))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVPICKEV(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLE_XVPICKOD(DL, Mask, VT, V1, V2, DAG))) return Result; - if ((Result = lowerVECTOR_SHUFFLE_XVPICKOD(DL, NewMask, VT, V1, V2, DAG))) + if ((Result = lowerVECTOR_SHUFFLEAsShift(DL, Mask, VT, V1, V2, DAG, Subtarget, + Zeroable))) return Result; - if ((Result = lowerVECTOR_SHUFFLEAsShift(DL, NewMask, VT, V1, V2, DAG, - Subtarget, Zeroable))) + if ((Result = + lowerVECTOR_SHUFFLE_XVINSVE0(DL, Mask, VT, V1, V2, DAG, Subtarget))) return Result; - if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, NewMask, VT, V1, V2, DAG, + if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, Mask, VT, V1, V2, DAG, Subtarget))) return Result; + + // canonicalize non cross-lane shuffle vector + SmallVector NewMask(Mask); + if (canonicalizeShuffleVectorByLane(DL, NewMask, VT, V1, V2, DAG, Subtarget)) + return lower256BitShuffle(DL, NewMask, VT, V1, V2, DAG, Subtarget); + + // FIXME: Handling the remaining cases earlier can degrade performance + // in some situations. Further analysis is required to enable more + // effective optimizations. + if (V2.isUndef()) { + if ((Result = lowerVECTOR_SHUFFLEAsLanePermuteAndShuffle(DL, NewMask, VT, + V1, V2, DAG))) + return Result; + } + if (SDValue NewShuffle = widenShuffleMask(DL, NewMask, VT, V1, V2, DAG)) return NewShuffle; if ((Result = lowerVECTOR_SHUFFLE_XVSHUF(DL, NewMask, VT, V1, V2, DAG))) @@ -2804,9 +2903,10 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op, if (SplatBitSize == 64 && !Subtarget.is64Bit()) { // We can only handle 64-bit elements that are within - // the signed 10-bit range on 32-bit targets. + // the signed 10-bit range or match vldi patterns on 32-bit targets. // See the BUILD_VECTOR case in LoongArchDAGToDAGISel::Select(). - if (!SplatValue.isSignedIntN(10)) + if (!SplatValue.isSignedIntN(10) && + !isImmVLDILegalForMode1(SplatValue, SplatBitSize).first) return SDValue(); if ((Is128Vec && ResTy == MVT::v4i32) || (Is256Vec && ResTy == MVT::v8i32)) @@ -3102,12 +3202,33 @@ LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, return SDValue(); SDValue SplatElt = DAG.getSplatBuildVector(VT, DL, Op1); - SDValue SplatIdx = DAG.getSplatBuildVector(IdxVTy, DL, Op2); - SmallVector RawIndices; - for (unsigned i = 0; i < NumElts; ++i) - RawIndices.push_back(DAG.getConstant(i, DL, Subtarget.getGRLenVT())); - SDValue Indices = DAG.getBuildVector(IdxVTy, DL, RawIndices); + SDValue SplatIdx; + SDValue Indices; + + if (!Subtarget.is64Bit() && IdxTy == MVT::i64) { + MVT PairVTy = MVT::getVectorVT(MVT::i32, NumElts * 2); + for (unsigned i = 0; i < NumElts; ++i) { + RawIndices.push_back(Op2); + RawIndices.push_back(DAG.getConstant(0, DL, MVT::i32)); + } + SplatIdx = DAG.getBuildVector(PairVTy, DL, RawIndices); + SplatIdx = DAG.getBitcast(IdxVTy, SplatIdx); + + RawIndices.clear(); + for (unsigned i = 0; i < NumElts; ++i) { + RawIndices.push_back(DAG.getConstant(i, DL, MVT::i32)); + RawIndices.push_back(DAG.getConstant(0, DL, MVT::i32)); + } + Indices = DAG.getBuildVector(PairVTy, DL, RawIndices); + Indices = DAG.getBitcast(IdxVTy, Indices); + } else { + SplatIdx = DAG.getSplatBuildVector(IdxVTy, DL, Op2); + + for (unsigned i = 0; i < NumElts; ++i) + RawIndices.push_back(DAG.getConstant(i, DL, Subtarget.getGRLenVT())); + Indices = DAG.getBuildVector(IdxVTy, DL, RawIndices); + } // insert vec, elt, idx // => @@ -5129,7 +5250,7 @@ performSETCC_BITCASTCombine(SDNode *N, SelectionDAG &DAG, if (Opc == ISD::DELETED_NODE) return SDValue(); - SDValue V = DAG.getNode(Opc, DL, MVT::i64, Src.getOperand(0)); + SDValue V = DAG.getNode(Opc, DL, Subtarget.getGRLenVT(), Src.getOperand(0)); EVT T = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements()); V = DAG.getZExtOrTrunc(V, DL, T); return DAG.getBitcast(VT, V); @@ -5142,6 +5263,7 @@ static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, EVT VT = N->getValueType(0); SDValue Src = N->getOperand(0); EVT SrcVT = Src.getValueType(); + MVT GRLenVT = Subtarget.getGRLenVT(); if (!DCI.isBeforeLegalizeOps()) return SDValue(); @@ -5209,11 +5331,11 @@ static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, if (Src.getSimpleValueType() == MVT::v32i8) { SDValue Lo, Hi; std::tie(Lo, Hi) = DAG.SplitVector(Src, DL); - Lo = DAG.getNode(LoongArchISD::VMSKLTZ, DL, MVT::i64, Lo); - Hi = DAG.getNode(LoongArchISD::VMSKLTZ, DL, MVT::i64, Hi); - Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi, + Lo = DAG.getNode(LoongArchISD::VMSKLTZ, DL, GRLenVT, Lo); + Hi = DAG.getNode(LoongArchISD::VMSKLTZ, DL, GRLenVT, Hi); + Hi = DAG.getNode(ISD::SHL, DL, GRLenVT, Hi, DAG.getConstant(16, DL, MVT::i8)); - V = DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi); + V = DAG.getNode(ISD::OR, DL, GRLenVT, Lo, Hi); } else if (UseLASX) { return SDValue(); } @@ -5221,7 +5343,7 @@ static SDValue performBITCASTCombine(SDNode *N, SelectionDAG &DAG, if (!V) { Opc = UseLASX ? LoongArchISD::XVMSKLTZ : LoongArchISD::VMSKLTZ; - V = DAG.getNode(Opc, DL, MVT::i64, Src); + V = DAG.getNode(Opc, DL, GRLenVT, Src); } EVT T = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements()); @@ -5878,6 +6000,22 @@ static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG) { return DAG.getNode(ISD::XOR, DL, ResTy, Node->getOperand(1), BitImm); } +template +static SDValue lowerVectorPickVE2GR(SDNode *N, SelectionDAG &DAG, + unsigned ResOp) { + unsigned Imm = N->getConstantOperandVal(2); + if (!isUInt(Imm)) { + const StringRef ErrorMsg = "argument out of range"; + DAG.getContext()->emitError(N->getOperationName(0) + ": " + ErrorMsg + "."); + return DAG.getUNDEF(N->getValueType(0)); + } + SDLoc DL(N); + SDValue Vec = N->getOperand(1); + SDValue Idx = DAG.getConstant(Imm, DL, MVT::i32); + SDValue EltVT = DAG.getValueType(Vec.getValueType().getVectorElementType()); + return DAG.getNode(ResOp, DL, N->getValueType(0), Vec, Idx, EltVT); +} + static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, @@ -6367,6 +6505,68 @@ performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, N->getOperand(1), DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getGRLenVT(), N->getOperand(2))); + case Intrinsic::loongarch_lsx_vpickve2gr_b: + if (!Subtarget.is64Bit()) + return lowerVectorPickVE2GR<4>(N, DAG, LoongArchISD::VPICK_SEXT_ELT); + break; + case Intrinsic::loongarch_lsx_vpickve2gr_h: + case Intrinsic::loongarch_lasx_xvpickve2gr_w: + if (!Subtarget.is64Bit()) + return lowerVectorPickVE2GR<3>(N, DAG, LoongArchISD::VPICK_SEXT_ELT); + break; + case Intrinsic::loongarch_lsx_vpickve2gr_w: + if (!Subtarget.is64Bit()) + return lowerVectorPickVE2GR<2>(N, DAG, LoongArchISD::VPICK_SEXT_ELT); + break; + case Intrinsic::loongarch_lsx_vpickve2gr_bu: + if (!Subtarget.is64Bit()) + return lowerVectorPickVE2GR<4>(N, DAG, LoongArchISD::VPICK_ZEXT_ELT); + break; + case Intrinsic::loongarch_lsx_vpickve2gr_hu: + case Intrinsic::loongarch_lasx_xvpickve2gr_wu: + if (!Subtarget.is64Bit()) + return lowerVectorPickVE2GR<3>(N, DAG, LoongArchISD::VPICK_ZEXT_ELT); + break; + case Intrinsic::loongarch_lsx_vpickve2gr_wu: + if (!Subtarget.is64Bit()) + return lowerVectorPickVE2GR<2>(N, DAG, LoongArchISD::VPICK_ZEXT_ELT); + break; + case Intrinsic::loongarch_lsx_bz_b: + case Intrinsic::loongarch_lsx_bz_h: + case Intrinsic::loongarch_lsx_bz_w: + case Intrinsic::loongarch_lsx_bz_d: + case Intrinsic::loongarch_lasx_xbz_b: + case Intrinsic::loongarch_lasx_xbz_h: + case Intrinsic::loongarch_lasx_xbz_w: + case Intrinsic::loongarch_lasx_xbz_d: + if (!Subtarget.is64Bit()) + return DAG.getNode(LoongArchISD::VALL_ZERO, DL, N->getValueType(0), + N->getOperand(1)); + break; + case Intrinsic::loongarch_lsx_bz_v: + case Intrinsic::loongarch_lasx_xbz_v: + if (!Subtarget.is64Bit()) + return DAG.getNode(LoongArchISD::VANY_ZERO, DL, N->getValueType(0), + N->getOperand(1)); + break; + case Intrinsic::loongarch_lsx_bnz_b: + case Intrinsic::loongarch_lsx_bnz_h: + case Intrinsic::loongarch_lsx_bnz_w: + case Intrinsic::loongarch_lsx_bnz_d: + case Intrinsic::loongarch_lasx_xbnz_b: + case Intrinsic::loongarch_lasx_xbnz_h: + case Intrinsic::loongarch_lasx_xbnz_w: + case Intrinsic::loongarch_lasx_xbnz_d: + if (!Subtarget.is64Bit()) + return DAG.getNode(LoongArchISD::VALL_NONZERO, DL, N->getValueType(0), + N->getOperand(1)); + break; + case Intrinsic::loongarch_lsx_bnz_v: + case Intrinsic::loongarch_lasx_xbnz_v: + if (!Subtarget.is64Bit()) + return DAG.getNode(LoongArchISD::VANY_NONZERO, DL, N->getValueType(0), + N->getOperand(1)); + break; } return SDValue(); } @@ -7303,6 +7503,7 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(XVPERM) NODE_NAME_CASE(XVREPLVE0) NODE_NAME_CASE(XVREPLVE0Q) + NODE_NAME_CASE(XVINSVE0) NODE_NAME_CASE(VPICK_SEXT_ELT) NODE_NAME_CASE(VPICK_ZEXT_ELT) NODE_NAME_CASE(VREPLVE) @@ -8396,6 +8597,87 @@ SDValue LoongArchTargetLowering::LowerReturn( return DAG.getNode(LoongArchISD::RET, DL, MVT::Other, RetOps); } +// Check if a constant splat can be generated using [x]vldi, where imm[12] == 1. +// Note: The following prefixes are excluded: +// imm[11:8] == 4'b0000, 4'b0100, 4'b1000 +// as they can be represented using [x]vrepli.[whb] +std::pair LoongArchTargetLowering::isImmVLDILegalForMode1( + const APInt &SplatValue, const unsigned SplatBitSize) const { + uint64_t RequiredImm = 0; + uint64_t V = SplatValue.getZExtValue(); + if (SplatBitSize == 16 && !(V & 0x00FF)) { + // 4'b0101 + RequiredImm = (0b10101 << 8) | (V >> 8); + return {true, RequiredImm}; + } else if (SplatBitSize == 32) { + // 4'b0001 + if (!(V & 0xFFFF00FF)) { + RequiredImm = (0b10001 << 8) | (V >> 8); + return {true, RequiredImm}; + } + // 4'b0010 + if (!(V & 0xFF00FFFF)) { + RequiredImm = (0b10010 << 8) | (V >> 16); + return {true, RequiredImm}; + } + // 4'b0011 + if (!(V & 0x00FFFFFF)) { + RequiredImm = (0b10011 << 8) | (V >> 24); + return {true, RequiredImm}; + } + // 4'b0110 + if ((V & 0xFFFF00FF) == 0xFF) { + RequiredImm = (0b10110 << 8) | (V >> 8); + return {true, RequiredImm}; + } + // 4'b0111 + if ((V & 0xFF00FFFF) == 0xFFFF) { + RequiredImm = (0b10111 << 8) | (V >> 16); + return {true, RequiredImm}; + } + // 4'b1010 + if ((V & 0x7E07FFFF) == 0x3E000000 || (V & 0x7E07FFFF) == 0x40000000) { + RequiredImm = + (0b11010 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F); + return {true, RequiredImm}; + } + } else if (SplatBitSize == 64) { + // 4'b1011 + if ((V & 0xFFFFFFFF7E07FFFFULL) == 0x3E000000ULL || + (V & 0xFFFFFFFF7E07FFFFULL) == 0x40000000ULL) { + RequiredImm = + (0b11011 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F); + return {true, RequiredImm}; + } + // 4'b1100 + if ((V & 0x7FC0FFFFFFFFFFFFULL) == 0x4000000000000000ULL || + (V & 0x7FC0FFFFFFFFFFFFULL) == 0x3FC0000000000000ULL) { + RequiredImm = + (0b11100 << 8) | (((V >> 56) & 0xC0) ^ 0x40) | ((V >> 48) & 0x3F); + return {true, RequiredImm}; + } + // 4'b1001 + auto sameBitsPreByte = [](uint64_t x) -> std::pair { + uint8_t res = 0; + for (int i = 0; i < 8; ++i) { + uint8_t byte = x & 0xFF; + if (byte == 0 || byte == 0xFF) + res |= ((byte & 1) << i); + else + return {false, 0}; + x >>= 8; + } + return {true, res}; + }; + auto [IsSame, Suffix] = sameBitsPreByte(V); + if (IsSame) { + RequiredImm = (0b11001 << 8) | Suffix; + return {true, RequiredImm}; + } + } + return {false, RequiredImm}; +} + bool LoongArchTargetLowering::isFPImmVLDILegal(const APFloat &Imm, EVT VT) const { if (!Subtarget.hasExtLSX()) @@ -8460,8 +8742,12 @@ EVT LoongArchTargetLowering::getSetCCResultType(const DataLayout &DL, } bool LoongArchTargetLowering::hasAndNot(SDValue Y) const { - // TODO: Support vectors. - return Y.getValueType().isScalarInteger() && !isa(Y); + EVT VT = Y.getValueType(); + + if (VT.isVector()) + return Subtarget.hasExtLSX() && VT.isInteger(); + + return VT.isScalarInteger() && !isa(Y); } bool LoongArchTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, @@ -9305,3 +9591,39 @@ bool LoongArchTargetLowering::SimplifyDemandedBitsForTargetNode( return TargetLowering::SimplifyDemandedBitsForTargetNode( Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); } + +bool LoongArchTargetLowering::shouldScalarizeBinop(SDValue VecOp) const { + unsigned Opc = VecOp.getOpcode(); + + // Assume target opcodes can't be scalarized. + // TODO - do we have any exceptions? + if (Opc >= ISD::BUILTIN_OP_END || !isBinOp(Opc)) + return false; + + // If the vector op is not supported, try to convert to scalar. + EVT VecVT = VecOp.getValueType(); + if (!isOperationLegalOrCustomOrPromote(Opc, VecVT)) + return true; + + // If the vector op is supported, but the scalar op is not, the transform may + // not be worthwhile. + EVT ScalarVT = VecVT.getScalarType(); + return isOperationLegalOrCustomOrPromote(Opc, ScalarVT); +} + +bool LoongArchTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, + unsigned Index) const { + if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) + return false; + + // Extract a 128-bit subvector from index 0 of a 256-bit vector is free. + return Index == 0; +} + +bool LoongArchTargetLowering::isExtractVecEltCheap(EVT VT, + unsigned Index) const { + EVT EltVT = VT.getScalarType(); + + // Extract a scalar FP value from index 0 of a vector is free. + return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0; +} diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h index 19c85faa9f9cc..8a4d7748467c7 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h @@ -151,6 +151,7 @@ enum NodeType : unsigned { XVPERM, XVREPLVE0, XVREPLVE0Q, + XVINSVE0, // Extended vector element extraction VPICK_SEXT_ELT, @@ -337,6 +338,17 @@ class LoongArchTargetLowering : public TargetLowering { TargetLoweringOpt &TLO, unsigned Depth) const override; + bool shouldScalarizeBinop(SDValue VecOp) const override; + bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, + unsigned Index) const override; + bool isExtractVecEltCheap(EVT VT, unsigned Index) const override; + + /// Check if a constant splat can be generated using [x]vldi, where imm[12] + /// is 1. + std::pair + isImmVLDILegalForMode1(const APInt &SplatValue, + const unsigned SplatBitSize) const; + private: /// Target-specific function used to lower LoongArch calling conventions. typedef bool LoongArchCCAssignFn(const DataLayout &DL, LoongArchABI::ABI ABI, diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td index 20ccc622f58dc..9565a55e4c6c5 100644 --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -943,7 +943,7 @@ let Predicates = [IsLA64] in { def ADD_D : ALU_3R<0x00108000>; def SUB_D : ALU_3R<0x00118000>; // ADDI_D isn't always rematerializable, but isReMaterializable will be used as -// a hint which is verified in isReallyTriviallyReMaterializable. +// a hint which is verified in isReMaterializableImpl. // See LoongArchInstrInfo::isAsCheapAsAMove for more details. let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def ADDI_D : ALU_2RI12<0x02c00000, simm12_addlike>; diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td index adfe990ba1234..5143d53bad719 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td @@ -20,6 +20,7 @@ def loongarch_xvpermi: SDNode<"LoongArchISD::XVPERMI", SDT_LoongArchV1RUimm>; def loongarch_xvperm: SDNode<"LoongArchISD::XVPERM", SDT_LoongArchXVPERM>; def loongarch_xvreplve0: SDNode<"LoongArchISD::XVREPLVE0", SDT_LoongArchXVREPLVE0>; def loongarch_xvreplve0q: SDNode<"LoongArchISD::XVREPLVE0Q", SDT_LoongArchXVREPLVE0>; +def loongarch_xvinsve0 : SDNode<"LoongArchISD::XVINSVE0", SDT_LoongArchV2RUimm>; def loongarch_xvmskltz: SDNode<"LoongArchISD::XVMSKLTZ", SDT_LoongArchVMSKCOND>; def loongarch_xvmskgez: SDNode<"LoongArchISD::XVMSKGEZ", SDT_LoongArchVMSKCOND>; def loongarch_xvmskeqz: SDNode<"LoongArchISD::XVMSKEQZ", SDT_LoongArchVMSKCOND>; @@ -1708,6 +1709,14 @@ def : Pat<(vector_insert v4f64:$xd, (f64(bitconvert i64:$rj)), uimm2:$imm), (XVINSGR2VR_D v4f64:$xd, GPR:$rj, uimm2:$imm)>; // XVINSVE0_{W/D} +def : Pat<(loongarch_xvinsve0 v8i32:$xd, v8i32:$xj, uimm3:$imm), + (XVINSVE0_W v8i32:$xd, v8i32:$xj, uimm3:$imm)>; +def : Pat<(loongarch_xvinsve0 v4i64:$xd, v4i64:$xj, uimm2:$imm), + (XVINSVE0_D v4i64:$xd, v4i64:$xj, uimm2:$imm)>; +def : Pat<(loongarch_xvinsve0 v8f32:$xd, v8f32:$xj, uimm3:$imm), + (XVINSVE0_W v8f32:$xd, v8f32:$xj, uimm3:$imm)>; +def : Pat<(loongarch_xvinsve0 v4f64:$xd, v4f64:$xj, uimm2:$imm), + (XVINSVE0_D v4f64:$xd, v4f64:$xj, uimm2:$imm)>; def : Pat<(vector_insert v8f32:$xd, FPR32:$fj, uimm3:$imm), (XVINSVE0_W v8f32:$xd, (SUBREG_TO_REG(i64 0), FPR32:$fj, sub_32), uimm3:$imm)>; @@ -2015,10 +2024,26 @@ def : Pat<(v4i32(fp_to_uint v4f64:$vj)), (XVFTINTRZ_LU_D v4f64:$vj)), sub_128)>; +// abs +def : Pat<(abs v32i8:$xj), (XVMAX_B v32i8:$xj, (XVNEG_B v32i8:$xj))>; +def : Pat<(abs v16i16:$xj), (XVMAX_H v16i16:$xj, (XVNEG_H v16i16:$xj))>; +def : Pat<(abs v8i32:$xj), (XVMAX_W v8i32:$xj, (XVNEG_W v8i32:$xj))>; +def : Pat<(abs v4i64:$xj), (XVMAX_D v4i64:$xj, (XVNEG_D v4i64:$xj))>; + // XVABSD_{B/H/W/D}[U] defm : PatXrXr; defm : PatXrXrU; +// XVADDA_{B/H/W/D} +def : Pat<(add (v32i8 (abs v32i8:$xj)), (v32i8 (abs v32i8:$xk))), + (XVADDA_B v32i8:$xj, v32i8:$xk)>; +def : Pat<(add (v16i16 (abs v16i16:$xj)), (v16i16 (abs v16i16:$xk))), + (XVADDA_H v16i16:$xj, v16i16:$xk)>; +def : Pat<(add (v8i32 (abs v8i32:$xj)), (v8i32 (abs v8i32:$xk))), + (XVADDA_W v8i32:$xj, v8i32:$xk)>; +def : Pat<(add (v4i64 (abs v4i64:$xj)), (v4i64 (abs v4i64:$xk))), + (XVADDA_D v4i64:$xj, v4i64:$xk)>; + // XVSADD_{B/H/W/D}[U], XVSSUB_{B/H/W/D}[U] defm : PatXrXr; defm : PatXrXr; diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td index d99a57e562528..8d1dc99e316c9 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td @@ -26,7 +26,7 @@ def SDT_LoongArchV1RUimm: SDTypeProfile<1, 2, [SDTCisVec<0>, def SDT_LoongArchV2RUimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, - SDTCisVT<3, i64>]>; + SDTCisVT<3, GRLenVT>]>; def SDT_LoongArchVreplgr2vr : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<0>, SDTCisInt<1>]>; def SDT_LoongArchVFRECIPE : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVec<0>, SDTCisSameAs<0, 1>]>; def SDT_LoongArchVFRSQRTE : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVec<0>, SDTCisSameAs<0, 1>]>; @@ -158,6 +158,7 @@ def vsplatf32_fpimm_eq_1 N = N->getOperand(0).getNode(); return selectVSplat(N, Imm, EltTy.getSizeInBits()) && + Imm.getBitWidth() == 32 && Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == APFloat(+1.0f).bitcastToAPInt(); }]>; @@ -1482,7 +1483,7 @@ multiclass VldreplPat { } multiclass VstelmPat { + Operand ImmOpnd, Operand IdxOpnd, ValueType elt = GRLenVT> { def : Pat<(StoreOp(elt(vector_extract vt:$vd, IdxOpnd:$idx)), BaseAddr:$rj), (Inst vt:$vd, BaseAddr:$rj, 0, IdxOpnd:$idx)>; @@ -2110,8 +2111,8 @@ def : Pat<(GRLenVT (vector_extract v4i32:$vj, GRLenVT:$rk)), (COPY_TO_REGCLASS (f32 (EXTRACT_SUBREG (VREPLVE_W v4i32:$vj, GRLenVT:$rk), sub_32)), GPR)>; -def : Pat<(i64 (vector_extract v2i64:$vj, i64:$rk)), - (COPY_TO_REGCLASS (f64 (EXTRACT_SUBREG (VREPLVE_D v2i64:$vj, i64:$rk), +def : Pat<(GRLenVT (vector_extract v2i64:$vj, GRLenVT:$rk)), + (COPY_TO_REGCLASS (f64 (EXTRACT_SUBREG (VREPLVE_D v2i64:$vj, GRLenVT:$rk), sub_64)), GPR)>; def : Pat<(f32 (vector_extract v4f32:$vj, GRLenVT:$rk)), @@ -2153,10 +2154,26 @@ def : Pat<(f32 f32imm_vldi:$in), def : Pat<(f64 f64imm_vldi:$in), (f64 (EXTRACT_SUBREG (VLDI (to_f64imm_vldi f64imm_vldi:$in)), sub_64))>; +// abs +def : Pat<(abs v16i8:$vj), (VMAX_B v16i8:$vj, (VNEG_B v16i8:$vj))>; +def : Pat<(abs v8i16:$vj), (VMAX_H v8i16:$vj, (VNEG_H v8i16:$vj))>; +def : Pat<(abs v4i32:$vj), (VMAX_W v4i32:$vj, (VNEG_W v4i32:$vj))>; +def : Pat<(abs v2i64:$vj), (VMAX_D v2i64:$vj, (VNEG_D v2i64:$vj))>; + // VABSD_{B/H/W/D}[U] defm : PatVrVr; defm : PatVrVrU; +// VADDA_{B/H/W/D} +def : Pat<(add (v16i8 (abs v16i8:$vj)), (v16i8 (abs v16i8:$vk))), + (VADDA_B v16i8:$vj, v16i8:$vk)>; +def : Pat<(add (v8i16 (abs v8i16:$vj)), (v8i16 (abs v8i16:$vk))), + (VADDA_H v8i16:$vj, v8i16:$vk)>; +def : Pat<(add (v4i32 (abs v4i32:$vj)), (v4i32 (abs v4i32:$vk))), + (VADDA_W v4i32:$vj, v4i32:$vk)>; +def : Pat<(add (v2i64 (abs v2i64:$vj)), (v2i64 (abs v2i64:$vk))), + (VADDA_D v2i64:$vj, v2i64:$vk)>; + // VSADD_{B/H/W/D}[U], VSSUB_{B/H/W/D}[U] defm : PatVrVr; defm : PatVrVr; diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp index d0a8ababe8e58..c5e26c106b5df 100644 --- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp @@ -57,6 +57,11 @@ static cl::opt cl::desc("Enable the loop data prefetch pass"), cl::init(false)); +static cl::opt + EnableMergeBaseOffset("loongarch-enable-merge-offset", + cl::desc("Enable the merge base offset pass"), + cl::init(true), cl::Hidden); + static Reloc::Model getEffectiveRelocModel(const Triple &TT, std::optional RM) { return RM.value_or(Reloc::Static); @@ -214,7 +219,7 @@ void LoongArchPassConfig::addMachineSSAOptimization() { void LoongArchPassConfig::addPreRegAlloc() { addPass(createLoongArchPreRAExpandPseudoPass()); - if (TM->getOptLevel() != CodeGenOptLevel::None) + if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMergeBaseOffset) addPass(createLoongArchMergeBaseOffsetOptPass()); } diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp index 0d7761777cb7d..8ecb62d0ea7bb 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.cpp @@ -32,6 +32,7 @@ static StringRef getLoongArchSpecifierName(uint16_t S) { return "b16"; case ELF::R_LARCH_B21: return "b21"; + case ELF::R_LARCH_MARK_LA: case ELF::R_LARCH_ABS_HI20: return "abs_hi20"; case ELF::R_LARCH_ABS_LO12: diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp index b7ead5e61ab81..f0e2bc4855187 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp @@ -161,6 +161,13 @@ LoongArchMCCodeEmitter::getExprOpValue(const MCInst &MI, const MCOperand &MO, case ELF::R_LARCH_B26: FixupKind = LoongArch::fixup_loongarch_b26; break; + case ELF::R_LARCH_MARK_LA: + // Match gas behavior: generate `R_LARCH_MARK_LA` relocation when using + // `la.abs`. + Fixups.push_back( + MCFixup::create(0, MCConstantExpr::create(0, Ctx), + FirstLiteralRelocationKind + ELF::R_LARCH_MARK_LA)); + [[fallthrough]]; case ELF::R_LARCH_ABS_HI20: FixupKind = LoongArch::fixup_loongarch_abs_hi20; break; diff --git a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp index 3e9666f586e0f..e37f3a66fe11f 100644 --- a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp +++ b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp @@ -39,10 +39,10 @@ class M68kAsmParser : public MCTargetAsmParser { #include "M68kGenAsmMatcher.inc" // Helpers for Match&Emit. - bool invalidOperand(const SMLoc &Loc, const OperandVector &Operands, + bool invalidOperand(SMLoc Loc, const OperandVector &Operands, const uint64_t &ErrorInfo); - bool missingFeature(const SMLoc &Loc, const uint64_t &ErrorInfo); - bool emit(MCInst &Inst, SMLoc const &Loc, MCStreamer &Out) const; + bool missingFeature(SMLoc Loc, const uint64_t &ErrorInfo); + bool emit(MCInst &Inst, SMLoc Loc, MCStreamer &Out) const; bool parseRegisterName(MCRegister &RegNo, SMLoc Loc, StringRef RegisterName); ParseStatus parseRegister(MCRegister &RegNo); @@ -991,8 +991,7 @@ bool M68kAsmParser::parseInstruction(ParseInstructionInfo &Info, StringRef Name, return false; } -bool M68kAsmParser::invalidOperand(SMLoc const &Loc, - OperandVector const &Operands, +bool M68kAsmParser::invalidOperand(SMLoc Loc, OperandVector const &Operands, uint64_t const &ErrorInfo) { SMLoc ErrorLoc = Loc; char const *Diag = 0; @@ -1015,13 +1014,11 @@ bool M68kAsmParser::invalidOperand(SMLoc const &Loc, return Error(ErrorLoc, Diag); } -bool M68kAsmParser::missingFeature(llvm::SMLoc const &Loc, - uint64_t const &ErrorInfo) { +bool M68kAsmParser::missingFeature(SMLoc Loc, uint64_t const &ErrorInfo) { return Error(Loc, "instruction requires a CPU feature not currently enabled"); } -bool M68kAsmParser::emit(MCInst &Inst, SMLoc const &Loc, - MCStreamer &Out) const { +bool M68kAsmParser::emit(MCInst &Inst, SMLoc Loc, MCStreamer &Out) const { Inst.setLoc(Loc); Out.emitInstruction(Inst, *STI); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index a8369f2b28fb7..bbfd0872cc4cd 100644 --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -28,7 +28,6 @@ #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/Support/Casting.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include #include diff --git a/llvm/lib/Target/Mips/MipsCallingConv.td b/llvm/lib/Target/Mips/MipsCallingConv.td index 3501f9fbfd2e7..748162525b091 100644 --- a/llvm/lib/Target/Mips/MipsCallingConv.td +++ b/llvm/lib/Target/Mips/MipsCallingConv.td @@ -186,7 +186,8 @@ def RetCC_MipsN : CallingConv<[ // // f128 should only occur for the N64 ABI where long double is 128-bit. On // N32, long double is equivalent to double. - CCIfType<[i64], CCIfOrigArgWasF128>>, + CCIfSubtargetNot<"isSingleFloat()", + CCIfType<[i64], CCIfOrigArgWasF128>>>, // Aggregate returns are positioned at the lowest address in the slot for // both little and big-endian targets. When passing in registers, this @@ -316,9 +317,10 @@ def CC_Mips_FixedArg : CallingConv<[ // // f128 should only occur for the N64 ABI where long double is 128-bit. On // N32, long double is equivalent to double. - CCIfType<[i64], - CCIfSubtargetNot<"useSoftFloat()", - CCIfOrigArgWasF128>>>, + CCIfType<[i64], + CCIfSubtargetNot<"isSingleFloat()", + CCIfSubtargetNot<"useSoftFloat()", + CCIfOrigArgWasF128>>>>, CCIfCC<"CallingConv::Fast", CCDelegateTo>, @@ -342,8 +344,8 @@ def CC_Mips : CallingConv<[ // Callee-saved register lists. //===----------------------------------------------------------------------===// -def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP, - (sequence "S%u", 7, 0))>; +def CSR_O32_SingleFloat : CalleeSavedRegs<(add(sequence "F%u", 31, 20), RA, FP, + (sequence "S%u", 7, 0))>; def CSR_O32_FPXX : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP, (sequence "S%u", 7, 0))> { @@ -357,13 +359,19 @@ def CSR_O32_FP64 : CalleeSavedRegs<(add (decimate (sequence "D%u_64", 30, 20), 2), RA, FP, (sequence "S%u", 7, 0))>; -def CSR_N32 : CalleeSavedRegs<(add D20_64, D22_64, D24_64, D26_64, D28_64, - D30_64, RA_64, FP_64, GP_64, - (sequence "S%u_64", 7, 0))>; +def CSR_N32 : CalleeSavedRegs<(add(decimate(sequence "D%u_64", 30, 20), 2), + RA_64, FP_64, GP_64, (sequence "S%u_64", 7, 0))>; + +def CSR_N32_SingleFloat + : CalleeSavedRegs<(add(decimate(sequence "F%u", 30, 20), 2), RA_64, FP_64, + GP_64, (sequence "S%u_64", 7, 0))>; def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64, GP_64, (sequence "S%u_64", 7, 0))>; +def CSR_N64_SingleFloat : CalleeSavedRegs<(add(sequence "F%u", 31, 24), RA_64, + FP_64, GP_64, (sequence "S%u_64", 7, 0))>; + def CSR_Mips16RetHelper : CalleeSavedRegs<(add V0, V1, FP, (sequence "A%u", 3, 0), (sequence "S%u", 7, 0), diff --git a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp index 34ff41f6e02da..78f2e5db40f9d 100644 --- a/llvm/lib/Target/Mips/MipsExpandPseudo.cpp +++ b/llvm/lib/Target/Mips/MipsExpandPseudo.cpp @@ -432,13 +432,24 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( Register OldVal = I->getOperand(6).getReg(); Register BinOpRes = I->getOperand(7).getReg(); Register StoreVal = I->getOperand(8).getReg(); + bool NoMovnInstr = (IsMin || IsMax) && !STI->hasMips4() && !STI->hasMips32(); const BasicBlock *LLVM_BB = BB.getBasicBlock(); MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *loop1MBB = nullptr; + MachineBasicBlock *loop2MBB = nullptr; + if (NoMovnInstr) { + loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); + loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); + } MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB); MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); MachineFunction::iterator It = ++BB.getIterator(); MF->insert(It, loopMBB); + if (NoMovnInstr) { + MF->insert(It, loop1MBB); + MF->insert(It, loop2MBB); + } MF->insert(It, sinkMBB); MF->insert(It, exitMBB); @@ -446,9 +457,19 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( exitMBB->transferSuccessorsAndUpdatePHIs(&BB); BB.addSuccessor(loopMBB, BranchProbability::getOne()); - loopMBB->addSuccessor(sinkMBB); - loopMBB->addSuccessor(loopMBB); - loopMBB->normalizeSuccProbs(); + if (NoMovnInstr) { + loopMBB->addSuccessor(loop1MBB); + loopMBB->addSuccessor(loop2MBB); + } else { + loopMBB->addSuccessor(sinkMBB); + loopMBB->addSuccessor(loopMBB); + loopMBB->normalizeSuccProbs(); + } + if (NoMovnInstr) { + loop1MBB->addSuccessor(loop2MBB); + loop2MBB->addSuccessor(loopMBB); + loop2MBB->addSuccessor(sinkMBB); + } BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0); if (IsNand) { @@ -525,7 +546,7 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( BuildMI(loopMBB, DL, TII->get(OR), BinOpRes) .addReg(BinOpRes) .addReg(Scratch4); - } else { + } else if (STI->hasMips4() || STI->hasMips32()) { // max: move BinOpRes, StoreVal // movn BinOpRes, Incr, Scratch4, BinOpRes // min: move BinOpRes, StoreVal @@ -537,12 +558,59 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( .addReg(Incr) .addReg(Scratch4) .addReg(BinOpRes); + } else { + // if min: + // loopMBB: move BinOpRes, StoreVal + // beq Scratch4, 0, loop1MBB + // j loop2MBB + // loop1MBB: move BinOpRes, Incr + // loop2MBB: and BinOpRes, BinOpRes, Mask + // and StoreVal, OlddVal, Mask2 + // or StoreVal, StoreVal, BinOpRes + // StoreVal = sc StoreVal, 0(Ptr) + // beq StoreVal, zero, loopMBB + // + // if max: + // loopMBB: move BinOpRes, Incr + // beq Scratch4, 0, loop1MBB + // j loop2MBB + // loop1MBB: move BinOpRes, StoreVal + // loop2MBB: and BinOpRes, BinOpRes, Mask + // and StoreVal, OlddVal, Mask2 + // or StoreVal, StoreVal, BinOpRes + // StoreVal = sc StoreVal, 0(Ptr) + // beq StoreVal, zero, loopMBB + if (IsMin) { + BuildMI(loopMBB, DL, TII->get(OR), BinOpRes) + .addReg(StoreVal) + .addReg(Mips::ZERO); + BuildMI(loop1MBB, DL, TII->get(OR), BinOpRes) + .addReg(Incr) + .addReg(Mips::ZERO); + } else { + BuildMI(loopMBB, DL, TII->get(OR), BinOpRes) + .addReg(Incr) + .addReg(Mips::ZERO); + BuildMI(loop1MBB, DL, TII->get(OR), BinOpRes) + .addReg(StoreVal) + .addReg(Mips::ZERO); + } + BuildMI(loopMBB, DL, TII->get(BEQ)) + .addReg(Scratch4) + .addReg(Mips::ZERO) + .addMBB(loop1MBB); + BuildMI(loopMBB, DL, TII->get(Mips::J)).addMBB(loop2MBB); } // and BinOpRes, BinOpRes, Mask - BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes) - .addReg(BinOpRes) - .addReg(Mask); + if (NoMovnInstr) + BuildMI(loop2MBB, DL, TII->get(Mips::AND), BinOpRes) + .addReg(BinOpRes) + .addReg(Mask); + else + BuildMI(loopMBB, DL, TII->get(Mips::AND), BinOpRes) + .addReg(BinOpRes) + .addReg(Mask); } else if (!IsSwap) { // binopres, oldval, incr2 @@ -564,14 +632,37 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( // or StoreVal, StoreVal, BinOpRes // StoreVal = sc StoreVal, 0(Ptr) // beq StoreVal, zero, loopMBB - BuildMI(loopMBB, DL, TII->get(Mips::AND), StoreVal) - .addReg(OldVal).addReg(Mask2); - BuildMI(loopMBB, DL, TII->get(Mips::OR), StoreVal) - .addReg(StoreVal).addReg(BinOpRes); - BuildMI(loopMBB, DL, TII->get(SC), StoreVal) - .addReg(StoreVal).addReg(Ptr).addImm(0); - BuildMI(loopMBB, DL, TII->get(BEQ)) - .addReg(StoreVal).addReg(Mips::ZERO).addMBB(loopMBB); + if (NoMovnInstr) { + BuildMI(loop2MBB, DL, TII->get(Mips::AND), StoreVal) + .addReg(OldVal) + .addReg(Mask2); + BuildMI(loop2MBB, DL, TII->get(Mips::OR), StoreVal) + .addReg(StoreVal) + .addReg(BinOpRes); + BuildMI(loop2MBB, DL, TII->get(SC), StoreVal) + .addReg(StoreVal) + .addReg(Ptr) + .addImm(0); + BuildMI(loop2MBB, DL, TII->get(BEQ)) + .addReg(StoreVal) + .addReg(Mips::ZERO) + .addMBB(loopMBB); + } else { + BuildMI(loopMBB, DL, TII->get(Mips::AND), StoreVal) + .addReg(OldVal) + .addReg(Mask2); + BuildMI(loopMBB, DL, TII->get(Mips::OR), StoreVal) + .addReg(StoreVal) + .addReg(BinOpRes); + BuildMI(loopMBB, DL, TII->get(SC), StoreVal) + .addReg(StoreVal) + .addReg(Ptr) + .addImm(0); + BuildMI(loopMBB, DL, TII->get(BEQ)) + .addReg(StoreVal) + .addReg(Mips::ZERO) + .addMBB(loopMBB); + } // sinkMBB: // and maskedoldval1,oldval,mask @@ -600,6 +691,11 @@ bool MipsExpandPseudo::expandAtomicBinOpSubword( LivePhysRegs LiveRegs; computeAndAddLiveIns(LiveRegs, *loopMBB); + if (loop1MBB) { + assert(loop2MBB && "should have 2 loop blocks"); + computeAndAddLiveIns(LiveRegs, *loop1MBB); + computeAndAddLiveIns(LiveRegs, *loop2MBB); + } computeAndAddLiveIns(LiveRegs, *sinkMBB); computeAndAddLiveIns(LiveRegs, *exitMBB); @@ -746,20 +842,41 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB, llvm_unreachable("Unknown pseudo atomic!"); } + bool NoMovnInstr = (IsMin || IsMax) && !STI->hasMips4() && !STI->hasMips32(); const BasicBlock *LLVM_BB = BB.getBasicBlock(); MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *loop1MBB = nullptr; + MachineBasicBlock *loop2MBB = nullptr; + if (NoMovnInstr) { + loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); + loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); + } MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); MachineFunction::iterator It = ++BB.getIterator(); MF->insert(It, loopMBB); + if (NoMovnInstr) { + MF->insert(It, loop1MBB); + MF->insert(It, loop2MBB); + } MF->insert(It, exitMBB); exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end()); exitMBB->transferSuccessorsAndUpdatePHIs(&BB); BB.addSuccessor(loopMBB, BranchProbability::getOne()); - loopMBB->addSuccessor(exitMBB); - loopMBB->addSuccessor(loopMBB); + if (NoMovnInstr) { + loopMBB->addSuccessor(loop1MBB); + loopMBB->addSuccessor(loop2MBB); + } else { + loopMBB->addSuccessor(exitMBB); + loopMBB->addSuccessor(loopMBB); + } loopMBB->normalizeSuccProbs(); + if (NoMovnInstr) { + loop1MBB->addSuccessor(loop2MBB); + loop2MBB->addSuccessor(loopMBB); + loop2MBB->addSuccessor(exitMBB); + } BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0); assert((OldVal != Ptr) && "Clobbered the wrong ptr reg!"); @@ -802,7 +919,7 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB, BuildMI(loopMBB, DL, TII->get(OR), Scratch) .addReg(Scratch) .addReg(Scratch2); - } else { + } else if (STI->hasMips4() || STI->hasMips32()) { // max: move Scratch, OldVal // movn Scratch, Incr, Scratch2, Scratch // min: move Scratch, OldVal @@ -814,6 +931,38 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB, .addReg(Incr) .addReg(Scratch2) .addReg(Scratch); + } else { + // if min: + // loopMBB: move Scratch, OldVal + // beq Scratch2_32, 0, loop1MBB + // j loop2MBB + // loop1MBB: move Scratch, Incr + // loop2MBB: sc $2, 0($4) + // beqz $2, $BB0_1 + // nop + // + // if max: + // loopMBB: move Scratch, Incr + // beq Scratch2_32, 0, loop1MBB + // j loop2MBB + // loop1MBB: move Scratch, OldVal + // loop2MBB: sc $2, 0($4) + // beqz $2, $BB0_1 + // nop + if (IsMin) { + BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(OldVal).addReg(ZERO); + BuildMI(loop1MBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO); + } else { + BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO); + BuildMI(loop1MBB, DL, TII->get(OR), Scratch) + .addReg(OldVal) + .addReg(ZERO); + } + BuildMI(loopMBB, DL, TII->get(BEQ)) + .addReg(Scratch2_32) + .addReg(ZERO) + .addMBB(loop1MBB); + BuildMI(loopMBB, DL, TII->get(Mips::J)).addMBB(loop2MBB); } } else if (Opcode) { @@ -829,20 +978,36 @@ bool MipsExpandPseudo::expandAtomicBinOp(MachineBasicBlock &BB, BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO); } - BuildMI(loopMBB, DL, TII->get(SC), Scratch) - .addReg(Scratch) - .addReg(Ptr) - .addImm(0); - BuildMI(loopMBB, DL, TII->get(BEQ)) - .addReg(Scratch) - .addReg(ZERO) - .addMBB(loopMBB); + if (NoMovnInstr) { + BuildMI(loop2MBB, DL, TII->get(SC), Scratch) + .addReg(Scratch) + .addReg(Ptr) + .addImm(0); + BuildMI(loop2MBB, DL, TII->get(BEQ)) + .addReg(Scratch) + .addReg(ZERO) + .addMBB(loopMBB); + } else { + BuildMI(loopMBB, DL, TII->get(SC), Scratch) + .addReg(Scratch) + .addReg(Ptr) + .addImm(0); + BuildMI(loopMBB, DL, TII->get(BEQ)) + .addReg(Scratch) + .addReg(ZERO) + .addMBB(loopMBB); + } NMBBI = BB.end(); I->eraseFromParent(); LivePhysRegs LiveRegs; computeAndAddLiveIns(LiveRegs, *loopMBB); + if (loop1MBB) { + assert(loop2MBB && "should have 2 loop blocks"); + computeAndAddLiveIns(LiveRegs, *loop1MBB); + computeAndAddLiveIns(LiveRegs, *loop2MBB); + } computeAndAddLiveIns(LiveRegs, *exitMBB); return true; diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 1491300e37d3e..b05de49d8332a 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -4265,10 +4265,16 @@ parseRegForInlineAsmConstraint(StringRef C, MVT VT) const { return std::make_pair(0U, nullptr); if (Prefix == "$f") { // Parse $f0-$f31. - // If the size of FP registers is 64-bit or Reg is an even number, select - // the 64-bit register class. Otherwise, select the 32-bit register class. - if (VT == MVT::Other) - VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32; + // If the targets is single float only, always select 32-bit registers, + // otherwise if the size of FP registers is 64-bit or Reg is an even number, + // select the 64-bit register class. Otherwise, select the 32-bit register + // class. + if (VT == MVT::Other) { + if (Subtarget.isSingleFloat()) + VT = MVT::f32; + else + VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32; + } RC = getRegClassFor(VT); @@ -4308,10 +4314,12 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, return std::make_pair(0U, &Mips::CPU16RegsRegClass); return std::make_pair(0U, &Mips::GPR32RegClass); } - if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) && + if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat()) || + (VT == MVT::f64 && Subtarget.isSingleFloat())) && !Subtarget.isGP64bit()) return std::make_pair(0U, &Mips::GPR32RegClass); - if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) && + if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat()) || + (VT == MVT::f64 && Subtarget.isSingleFloat())) && Subtarget.isGP64bit()) return std::make_pair(0U, &Mips::GPR64RegClass); // This will generate an error message diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp index 6f8d6764e77b8..6ca587b1ba4d5 100644 --- a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp @@ -89,14 +89,25 @@ MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { : CSR_Interrupt_32_SaveList; } - if (Subtarget.isSingleFloat()) - return CSR_SingleFloatOnly_SaveList; + // N64 ABI + if (Subtarget.isABI_N64()) { + if (Subtarget.isSingleFloat()) + return CSR_N64_SingleFloat_SaveList; - if (Subtarget.isABI_N64()) return CSR_N64_SaveList; + } + + // N32 ABI + if (Subtarget.isABI_N32()) { + if (Subtarget.isSingleFloat()) + return CSR_N32_SingleFloat_SaveList; - if (Subtarget.isABI_N32()) return CSR_N32_SaveList; + } + + // O32 ABI + if (Subtarget.isSingleFloat()) + return CSR_O32_SingleFloat_SaveList; if (Subtarget.isFP64bit()) return CSR_O32_FP64_SaveList; @@ -111,14 +122,25 @@ const uint32_t * MipsRegisterInfo::getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const { const MipsSubtarget &Subtarget = MF.getSubtarget(); - if (Subtarget.isSingleFloat()) - return CSR_SingleFloatOnly_RegMask; + // N64 ABI + if (Subtarget.isABI_N64()) { + if (Subtarget.isSingleFloat()) + return CSR_N64_SingleFloat_RegMask; - if (Subtarget.isABI_N64()) return CSR_N64_RegMask; + } + + // N32 ABI + if (Subtarget.isABI_N32()) { + if (Subtarget.isSingleFloat()) + return CSR_N32_SingleFloat_RegMask; - if (Subtarget.isABI_N32()) return CSR_N32_RegMask; + } + + // O32 ABI + if (Subtarget.isSingleFloat()) + return CSR_O32_SingleFloat_RegMask; if (Subtarget.isFP64bit()) return CSR_O32_FP64_RegMask; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index 71a70d9c2dd46..19917f3650bb5 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -28,6 +28,7 @@ #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGenTypes/MachineValueType.h" @@ -211,6 +212,16 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM, } } + // Targets with 64bits integer registers, but no 64bit floating point register + // do not support conversion between them + if (Subtarget.isGP64bit() && Subtarget.isSingleFloat() && + !Subtarget.useSoftFloat()) { + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + } + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); setOperationAction(ISD::MULHS, MVT::i32, Custom); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index c70f48af33cf2..bef4868492d4e 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -38,6 +38,13 @@ static cl::opt EnableRsqrtOpt("nvptx-rsqrt-approx-opt", cl::init(true), cl::Hidden, cl::desc("Enable reciprocal sqrt optimization")); +// FIXME: This is a WAR to recover lost performance from #155024. +// We still need to investigate the regression and find a more permanent +// solution. +static cl::opt EnableMADWide("nvptx-mad-wide-opt", cl::init(false), + cl::Hidden, + cl::desc("Enable MAD wide optimization")); + /// createNVPTXISelDag - This pass converts a legalized DAG into a /// NVPTX-specific DAG, ready for instruction scheduling. FunctionPass *llvm::createNVPTXISelDag(NVPTXTargetMachine &TM, @@ -84,6 +91,8 @@ bool NVPTXDAGToDAGISel::allowFMA() const { bool NVPTXDAGToDAGISel::doRsqrtOpt() const { return EnableRsqrtOpt; } +bool NVPTXDAGToDAGISel::doMADWideOpt() const { return EnableMADWide; } + /// Select - Select instructions not customized! Used for /// expanded, promoted and normal instructions. void NVPTXDAGToDAGISel::Select(SDNode *N) { @@ -1018,6 +1027,7 @@ pickOpcodeForVT(MVT::SimpleValueType VT, std::optional Opcode_i16, case MVT::f32: return Opcode_i32; case MVT::v2f32: + case MVT::v2i32: case MVT::i64: case MVT::f64: return Opcode_i64; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h index 8dcd5362c4512..c912e709d0aa0 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -45,6 +45,7 @@ class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel { bool useF32FTZ() const; bool allowFMA() const; bool doRsqrtOpt() const; + bool doMADWideOpt() const; NVPTXScopes Scopes{}; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index ca8a3f69f991d..8c21746c4369e 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -226,21 +226,20 @@ getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, switch (VectorVT.SimpleTy) { default: return std::nullopt; + case MVT::v4i64: case MVT::v4f64: - case MVT::v8i32: - // This is a "native" vector type iff the address space is global - // and the target supports 256-bit loads/stores + // This is a "native" vector type iff the address space is global and the + // target supports 256-bit loads/stores if (!CanLowerTo256Bit) return std::nullopt; LLVM_FALLTHROUGH; case MVT::v2i8: - case MVT::v2i32: case MVT::v2i64: case MVT::v2f64: - case MVT::v4i32: // This is a "native" vector type return std::pair(NumElts, EltVT); + case MVT::v16f16: // <8 x f16x2> case MVT::v16bf16: // <8 x bf16x2> case MVT::v16i16: // <8 x i16x2> @@ -264,12 +263,18 @@ getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, case MVT::v16i8: // <4 x i8x4> PackRegSize = 32; break; + case MVT::v8f32: // <4 x f32x2> + case MVT::v8i32: // <4 x i32x2> + // This is a "native" vector type iff the address space is global and the + // target supports 256-bit loads/stores if (!CanLowerTo256Bit) return std::nullopt; LLVM_FALLTHROUGH; case MVT::v2f32: // <1 x f32x2> case MVT::v4f32: // <2 x f32x2> + case MVT::v2i32: // <1 x i32x2> + case MVT::v4i32: // <2 x i32x2> if (!STI.hasF32x2Instructions()) return std::pair(NumElts, EltVT); PackRegSize = 64; @@ -590,8 +595,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, addRegisterClass(MVT::bf16, &NVPTX::B16RegClass); addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass); - if (STI.hasF32x2Instructions()) + if (STI.hasF32x2Instructions()) { addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass); + addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass); + } // Conversion to/from FP16/FP16x2 is always legal. setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); @@ -628,12 +635,18 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom); - // No support for these operations with v2f32. - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Expand); + // No support for these operations with v2f32/v2i32 + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand); + + setOperationAction(ISD::TRUNCATE, MVT::v2i16, Expand); + setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, + MVT::v2i32, Expand); + // Need custom lowering in case the index is dynamic. if (STI.hasF32x2Instructions()) - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, + Custom); // Custom conversions to/from v2i8. setOperationAction(ISD::BITCAST, MVT::v2i8, Custom); @@ -661,14 +674,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // Operations not directly supported by NVPTX. for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32, MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, - MVT::v4i8, MVT::i32, MVT::i64}) { + MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) { setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction(ISD::BR_CC, VT, Expand); } - // Not directly supported. TLI would attempt to expand operations like - // FMINIMUM(v2f32) using invalid SETCC and VSELECT nodes. - setOperationAction(ISD::VSELECT, MVT::v2f32, Expand); + // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT. + setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand); // Some SIGN_EXTEND_INREG can be done using cvt instruction. // For others we will expand to a SHL/SRA pair. @@ -815,7 +827,14 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SRA, ISD::SRL, ISD::MULHS, ISD::MULHU, ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::SETCC}, - MVT::v2i16, Expand); + {MVT::v2i16, MVT::v2i32}, Expand); + + // v2i32 is not supported for any arithmetic operations + setOperationAction({ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, + ISD::CTPOP, ISD::CTLZ, ISD::ADD, ISD::SUB, ISD::MUL, + ISD::SHL, ISD::SRA, ISD::SRL, ISD::OR, ISD::AND, ISD::XOR, + ISD::SREM, ISD::UREM}, + MVT::v2i32, Expand); setOperationAction(ISD::ADDC, MVT::i32, Legal); setOperationAction(ISD::ADDE, MVT::i32, Legal); @@ -829,7 +848,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, } setOperationAction(ISD::CTTZ, MVT::i16, Expand); - setOperationAction(ISD::CTTZ, MVT::v2i16, Expand); + setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand); setOperationAction(ISD::CTTZ, MVT::i32, Expand); setOperationAction(ISD::CTTZ, MVT::i64, Expand); @@ -1071,7 +1090,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // Custom lowering for tcgen05.st vector operands setOperationAction(ISD::INTRINSIC_VOID, {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32, - MVT::v32i32, MVT::v64i32, MVT::v128i32}, + MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other}, Custom); // Enable custom lowering for the following: @@ -1134,6 +1153,34 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { MAKE_CASE(NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X) MAKE_CASE(NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y) MAKE_CASE(NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1) + MAKE_CASE(NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT) + MAKE_CASE( + NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT) } return nullptr; @@ -2576,7 +2623,7 @@ static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG) { return V; } -static SDValue LowerTcgen05St(SDValue Op, SelectionDAG &DAG) { +static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG) { SDNode *N = Op.getNode(); SDLoc DL(N); SmallVector Ops; @@ -2602,7 +2649,141 @@ static SDValue LowerTcgen05St(SDValue Op, SelectionDAG &DAG) { return Tcgen05StNode; } -static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { +static unsigned getTcgen05MMADisableOutputLane(unsigned IID) { + switch (IID) { + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift: + return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift: + return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift: + return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1; + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2: + return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: + return NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT; + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: + return NVPTXISD:: + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT; + }; + llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic"); +} + +static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG) { + SDNode *N = Op.getNode(); + SDLoc DL(N); + unsigned IID = cast(N->getOperand(1))->getZExtValue(); + + SmallVector Ops; + // split the vector argument + for (size_t I = 0; I < N->getNumOperands(); I++) { + if (I == 1) + continue; // skip IID + SDValue Val = N->getOperand(I); + EVT ValVT = Val.getValueType(); + if (ValVT.isVector()) { + EVT EltVT = ValVT.getVectorElementType(); + for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++) + Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val, + DAG.getIntPtrConstant(J, DL))); + } else + Ops.push_back(Val); + } + + MemIntrinsicSDNode *MemSD = cast(N); + SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode( + getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops, + MemSD->getMemoryVT(), MemSD->getMemOperand()); + + return Tcgen05MMANode; +} + +// Lower vector return type of tcgen05.ld intrinsics +static std::optional> +lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) { + SDLoc DL(N); + EVT ResVT = N->getValueType(0); + if (!ResVT.isVector()) + return {}; // already legalized. + + const unsigned NumElts = ResVT.getVectorNumElements(); + + // Create the return type of the instructions + SmallVector ListVTs; + for (unsigned i = 0; i < NumElts; ++i) + ListVTs.push_back(MVT::i32); + + ListVTs.push_back(N->getValueType(1)); // Chain + + SDVTList ResVTs = DAG.getVTList(ListVTs); + + SmallVector Ops{N->getOperand(0), N->getOperand(1), + N->getOperand(2)}; + + if (HasOffset) { + Ops.push_back(N->getOperand(3)); // offset + Ops.push_back(N->getOperand(4)); // Pack flag + } else + Ops.push_back(N->getOperand(3)); // Pack flag + + MemIntrinsicSDNode *MemSD = cast(N); + SDValue NewNode = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, ResVTs, Ops, + MemSD->getMemoryVT(), MemSD->getMemOperand()); + + // split the vector result + SmallVector ScalarRes; + for (unsigned i = 0; i < NumElts; ++i) { + SDValue Res = NewNode.getValue(i); + ScalarRes.push_back(Res); + } + + SDValue Chain = NewNode.getValue(NumElts); + SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes); + return {{BuildVector, Chain}}; +} + +static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { SDNode *N = Op.getNode(); SDValue Intrin = N->getOperand(1); @@ -2648,7 +2829,36 @@ static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { case Intrinsic::nvvm_tcgen05_st_16x64b_x64: case Intrinsic::nvvm_tcgen05_st_32x32b_x64: case Intrinsic::nvvm_tcgen05_st_32x32b_x128: - return LowerTcgen05St(Op, DAG); + return lowerTcgen05St(Op, DAG); + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: + return LowerTcgen05MMADisableOutputLane(Op, DAG); } return Op; } @@ -2721,6 +2931,28 @@ static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG) { SDValue Selector = (Op->op_end() - 1)->get(); return getPRMT(A, B, Selector, DL, DAG, Mode); } + +static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG) { + switch (Op->getConstantOperandVal(1)) { + default: + return Op; + + // These tcgen05 intrinsics return a v2i32, which is legal, so we have to + // lower them through LowerOperation() instead of ReplaceNodeResults(). + case Intrinsic::nvvm_tcgen05_ld_16x64b_x2: + case Intrinsic::nvvm_tcgen05_ld_16x128b_x1: + case Intrinsic::nvvm_tcgen05_ld_32x32b_x2: + if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG)) + return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op)); + return SDValue(); + + case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: + if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true)) + return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op)); + return SDValue(); + } +} + static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG) { switch (Op->getConstantOperandVal(0)) { default: @@ -2883,11 +3115,11 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG); case ISD::INTRINSIC_W_CHAIN: - return Op; + return lowerIntrinsicWChain(Op, DAG); case ISD::INTRINSIC_WO_CHAIN: return lowerIntrinsicWOChain(Op, DAG); case ISD::INTRINSIC_VOID: - return LowerIntrinsicVoid(Op, DAG); + return lowerIntrinsicVoid(Op, DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::BITCAST: @@ -4725,6 +4957,53 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic( Info.align.reset(); return true; } + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: { + // We are reading and writing back to TMem + Info.opc = ISD::INTRINSIC_VOID; + Info.memVT = MVT::v4i32; + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + Info.align = Align(16); + return true; + } + + case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2: + case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift: + case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift: + case Intrinsic:: + nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: { + // We are reading and writing back to TMem + Info.opc = ISD::INTRINSIC_VOID; + Info.memVT = MVT::v8i32; + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + Info.align = Align(16); + return true; + } } return false; } @@ -5727,7 +6006,7 @@ static SDValue PerformEXTRACTCombine(SDNode *N, IsPTXVectorType(VectorVT.getSimpleVT())) return SDValue(); // Native vector loads already combine nicely w/ // extract_vector_elt. - // Don't mess with singletons or packed types (v2f32, v2*16, v4i8 and v8i8), + // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8), // we already handle them OK. if (VectorVT.getVectorNumElements() == 1 || NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8) @@ -6107,53 +6386,6 @@ static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1})); } -// Lower vector return type of tcgen05.ld intrinsics -static void ReplaceTcgen05Ld(SDNode *N, SelectionDAG &DAG, - SmallVectorImpl &Results, - bool hasOffset = false) { - SDLoc DL(N); - EVT ResVT = N->getValueType(0); - if (!ResVT.isVector()) - return; // already legalized. - - const unsigned NumElts = ResVT.getVectorNumElements(); - - // Create the return type of the instructions - SmallVector ListVTs; - for (unsigned i = 0; i < NumElts; ++i) - ListVTs.push_back(MVT::i32); - - ListVTs.push_back(N->getValueType(1)); // Chain - - SDVTList ResVTs = DAG.getVTList(ListVTs); - - SmallVector Ops{N->getOperand(0), N->getOperand(1), - N->getOperand(2)}; - - if (hasOffset) { - Ops.push_back(N->getOperand(3)); // offset - Ops.push_back(N->getOperand(4)); // Pack flag - } else - Ops.push_back(N->getOperand(3)); // Pack flag - - MemIntrinsicSDNode *MemSD = cast(N); - SDValue NewNode = - DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, ResVTs, Ops, - MemSD->getMemoryVT(), MemSD->getMemOperand()); - - // split the vector result - SmallVector ScalarRes; - for (unsigned i = 0; i < NumElts; ++i) { - SDValue Res = NewNode.getValue(i); - ScalarRes.push_back(Res); - } - - SDValue Chain = NewNode.getValue(NumElts); - SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes); - Results.push_back(BuildVector); // Build Vector - Results.push_back(Chain); // Chain -} - static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl &Results) { SDValue Chain = N->getOperand(0); @@ -6262,21 +6494,18 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, return; } - case Intrinsic::nvvm_tcgen05_ld_16x64b_x2: case Intrinsic::nvvm_tcgen05_ld_16x64b_x4: case Intrinsic::nvvm_tcgen05_ld_16x64b_x8: case Intrinsic::nvvm_tcgen05_ld_16x64b_x16: case Intrinsic::nvvm_tcgen05_ld_16x64b_x32: case Intrinsic::nvvm_tcgen05_ld_16x64b_x64: case Intrinsic::nvvm_tcgen05_ld_16x64b_x128: - case Intrinsic::nvvm_tcgen05_ld_32x32b_x2: case Intrinsic::nvvm_tcgen05_ld_32x32b_x4: case Intrinsic::nvvm_tcgen05_ld_32x32b_x8: case Intrinsic::nvvm_tcgen05_ld_32x32b_x16: case Intrinsic::nvvm_tcgen05_ld_32x32b_x32: case Intrinsic::nvvm_tcgen05_ld_32x32b_x64: case Intrinsic::nvvm_tcgen05_ld_32x32b_x128: - case Intrinsic::nvvm_tcgen05_ld_16x128b_x1: case Intrinsic::nvvm_tcgen05_ld_16x128b_x2: case Intrinsic::nvvm_tcgen05_ld_16x128b_x4: case Intrinsic::nvvm_tcgen05_ld_16x128b_x8: @@ -6289,16 +6518,23 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, case Intrinsic::nvvm_tcgen05_ld_16x256b_x8: case Intrinsic::nvvm_tcgen05_ld_16x256b_x16: case Intrinsic::nvvm_tcgen05_ld_16x256b_x32: - return ReplaceTcgen05Ld(N, DAG, Results); + if (auto Res = lowerTcgen05Ld(N, DAG)) { + Results.push_back(Res->first); + Results.push_back(Res->second); + } + return; - case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: - return ReplaceTcgen05Ld(N, DAG, Results, /* Offset */ true); + if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) { + Results.push_back(Res->first); + Results.push_back(Res->second); + } + return; } } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h index 03b3edc902e54..769d2fe46f2c8 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -99,7 +99,32 @@ enum NodeType : unsigned { StoreV2, StoreV4, StoreV8, - LAST_MEMORY_OPCODE = StoreV8, + TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1, + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2, + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT, + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, + LAST_MEMORY_OPCODE = + TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT, }; } diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 4e38e026e6bda..4cacee2290763 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -114,11 +114,13 @@ def hasArchAccelFeatures : Predicate<"Subtarget->hasArchAccelFeatures()">; def doF32FTZ : Predicate<"useF32FTZ()">; def doNoF32FTZ : Predicate<"!useF32FTZ()">; def doRsqrtOpt : Predicate<"doRsqrtOpt()">; +def doMADWideOpt : Predicate<"doMADWideOpt()">; def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">; def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">; def hasDotInstructions : Predicate<"Subtarget->hasDotInstructions()">; def hasTcgen05Instructions : Predicate<"Subtarget->hasTcgen05Instructions()">; +def hasTcgen05MMAScaleInputDImm : Predicate<"Subtarget->hasTcgen05MMAScaleInputDImm()">; def hasTMACTAGroupSupport : Predicate<"Subtarget->hasCpAsyncBulkTensorCTAGroupSupport()">; def hasF32x2Instructions : Predicate<"Subtarget->hasF32x2Instructions()">; @@ -754,8 +756,10 @@ def : Pat<(vt (select i1:$p, vt:$a, vt:$b)), (SELP_b32rr $a, $b, $p)>; } -def : Pat<(v2f32 (select i1:$p, v2f32:$a, v2f32:$b)), +foreach vt = [v2f32, v2i32] in { +def : Pat<(vt (select i1:$p, vt:$a, vt:$b)), (SELP_b64rr $a, $b, $p)>; +} //----------------------------------- // Test Instructions @@ -899,8 +903,15 @@ let Predicates = [hasOptEnabled] in { defm MAD_LO_S32 : MADInst<"lo.s32", mul, I32RT, I32RT>; defm MAD_LO_S64 : MADInst<"lo.s64", mul, I64RT, I64RT>; - // Generating mad.wide causes a regression: + // Generating mad.wide causes a regression in some cases: // https://github.com/llvm/llvm-project/pull/150477#issuecomment-3191367837 + // Only do so when the user requests it. + let Predicates = [doMADWideOpt] in { + defm MAD_WIDE_U16 : MADInst<"wide.u16", umul_wide, I32RT, I16RT>; + defm MAD_WIDE_S16 : MADInst<"wide.s16", smul_wide, I32RT, I16RT>; + defm MAD_WIDE_U32 : MADInst<"wide.u32", umul_wide, I64RT, I32RT>; + defm MAD_WIDE_S32 : MADInst<"wide.s32", smul_wide, I64RT, I32RT>; + } } //----------------------------------- @@ -2092,8 +2103,8 @@ foreach vt = [v2f16, v2bf16, v2i16] in { (V2I16toI32 $a, $b)>; } -// Same thing for the 64-bit type v2f32. -foreach vt = [v2f32] in { +// Handle extracting one element from the pair (64-bit types) +foreach vt = [v2f32, v2i32] in { def : Pat<(extractelt vt:$src, 0), (I64toI32L_Sink $src)>, Requires<[hasPTX<71>]>; def : Pat<(extractelt vt:$src, 1), (I64toI32H_Sink $src)>, Requires<[hasPTX<71>]>; diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index c544911bdf1e3..e91171c1ae38f 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -5282,3 +5282,420 @@ foreach dim = ["x", "y", "z"] in { def CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_ # dim: CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID; } + +// +// tcgen05.mma Instructions +// + +class Tcgen05MMAInst PTXPredicates> : + NVPTXInst<(outs), (ins), "?", []>, + Requires { + + Intrinsic Intrin = !cast( + NVVM_TCGEN05_MMA.record + ); + + dag ScaleInpIns = !if(!eq(ScaleInputD, 1), (ins i64imm:$scale_input_d), (ins)); + string ScaleInpStr = !if(!eq(ScaleInputD, 1), ", $scale_input_d", ""); + dag ScaleInpInput = !if(!eq(ScaleInputD, 1), (Intrin i64:$scale_input_d), (Intrin)); + + dag SparseMetadataIns = !if(!eq(Sp, 1), (ins B32:$spmetadata), (ins)); + dag SparseMetadataIntr = !if(!eq(Sp, 1), (Intrin B32:$spmetadata), (Intrin)); + string SparseMetadataStr = !if(!eq(Sp, 1), ", [$spmetadata]", ""); + + int KindVal = !cond( + !eq(KindStr, "f16"): 0, + !eq(KindStr, "tf32"): 1, + !eq(KindStr, "f8f6f4"): 2, + !eq(KindStr, "i8"): 3, + ); + + int CollectorUsageVal = !cond( + !eq(CollectorUsage, "discard"): 0, + !eq(CollectorUsage, "lastuse"): 1, + !eq(CollectorUsage, "fill"): 2, + !eq(CollectorUsage, "use"): 3 + ); + + string AOperandStr = !if(!eq(ASpace, "tensor"), "[$a]", "$a"); + + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag input = !con((ins B32:$dtmem, + ARegClass:$a, ADDR:$b, + B32:$idesc, + B1:$enable_inp_d), + SparseMetadataIns, + ScaleInpIns); + + let InOperandList = input; + let OutOperandList = (outs); + let AsmString = "tcgen05.mma" + # !if(!eq(Sp, 1), ".sp", "") + # ".cta_group::" # CtaGroup + # ".kind::" # KindStr + # ".collector::a::" # CollectorUsage + # !if(!eq(AShift, 1), ".ashift", "") + # " [$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", $idesc, $enable_inp_d" + # ScaleInpStr + # ";"; + + dag IntrinsicPattern = !con((Intrin i32:$dtmem, + ARegClass:$a, addr:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataIntr, + ScaleInpInput); + + dag FlagOperands = (Intrin (i32 KindVal), (i32 CtaGroup), + (i32 CollectorUsageVal)); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach kind = ["f16", "tf32", "f8f6f4", "i8"] in { + foreach cta_group = [1, 2] in { + foreach collector_usage = ["discard", "lastuse", "fill", "use"] in { + foreach scale_input_d = !if(!or(!eq(kind, "f16"), + !eq(kind, "tf32")), [0, 1], [0]) in { + foreach ashift = !if(!eq(space, "tensor"), [0, 1], [0]) in { + + def : Tcgen05MMAInst; + } + } + } + } + } + } +} + +class Tcgen05MMADisableOutputLaneTypeProfile: + SDTypeProfile<0, 0, []> { + int DisableOutputLaneVecSize = !mul(4, CtaGroup); + + list VTs = !listconcat( + [i32], // d + !if(!eq(ASpace, "tensor"), [i32], [i64]), // a + [i64, i32, i1], // b, idesc, enable_inp_d + !if(!eq(Sp, 1), [i32], []), // spmetadata + !if(!eq(ScaleInputD, 1), [i64], []), // scale_input_d + !listsplat(i32, DisableOutputLaneVecSize), // disable_output_lane + [i32, i32] // kind, collector_usage + ); + let Constraints = !foreach(x, !range(!size(VTs)), SDTCisVT); + let NumOperands = !size(Constraints); +} + +class Tcgen05MMADisableOutputLaneSDNode: + SDNode<"NVPTXISD::TCGEN05_MMA" + # !if(!eq(Sp, 1), "_SP", "") + # "_" # !toupper(ASpace) + # !if(!eq(ScaleInput, 1), "_SCALE_D", "") + # "_DISABLE_OUTPUT_LANE_CG" # CtaGroup + # !if(!eq(AShift, 1), "_ASHIFT", ""), + Tcgen05MMADisableOutputLaneTypeProfile, + [SDNPHasChain, SDNPSideEffect]>; + +class Tcgen05MMADisableOutputLaneInst PTXPredicates> : + NVPTXInst<(outs), (ins), "?", []>, + Requires { + + SDNode Opcode = Tcgen05MMADisableOutputLaneSDNode; + + + dag ScaleInpIns = !if(!eq(ScaleInputD, 1), (ins i64imm:$scale_input_d), (ins)); + string ScaleInpStr = !if(!eq(ScaleInputD, 1), ", $scale_input_d", ""); + dag ScaleInpInput = !if(!eq(ScaleInputD, 1), (Opcode i64:$scale_input_d), (Opcode)); + + // disable output lane + int DisableOutputLaneVecSize = !mul(4, CtaGroup); + + dag DisableOutputLaneIns = !dag(ins, + !listsplat(B32, DisableOutputLaneVecSize), + !foreach(x, + !range(DisableOutputLaneVecSize), + "disable_output_lane" # x)); + + dag DisableOutputLaneInput = !dag(Opcode, + !listsplat(i32, DisableOutputLaneVecSize), + !foreach(x, + !range(DisableOutputLaneVecSize), + "disable_output_lane" # x)); + + string DisableOutputLaneStr = "{{" # + !interleave( + !foreach(x, + !range(DisableOutputLaneVecSize), + "$disable_output_lane" # x), + ", ") + # "}}"; + + dag SparseMetadataIns = !if(!eq(Sp, 1), (ins B32:$spmetadata), (ins)); + dag SparseMetadataIntr = !if(!eq(Sp, 1), (Opcode i32:$spmetadata), (Opcode)); + string SparseMetadataStr = !if(!eq(Sp, 1), ", [$spmetadata]", ""); + + int KindVal = !cond( + !eq(Kind, "f16"): 0, + !eq(Kind, "tf32"): 1, + !eq(Kind, "f8f6f4"): 2, + !eq(Kind, "i8"): 3, + ); + + int CollectorUsage = !cond( + !eq(CollectorUsageStr, "discard"): 0, + !eq(CollectorUsageStr, "lastuse"): 1, + !eq(CollectorUsageStr, "fill"): 2, + !eq(CollectorUsageStr, "use"): 3, + ); + + string AOperandStr = !if(!eq(ASpace, "tensor"), "[$a]", "$a"); + + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag InOperandList = !con((ins B32:$dtmem, + ARegClass:$a, B64:$b, + B32:$idesc, + B1:$enable_inp_d), + SparseMetadataIns, + ScaleInpIns, + DisableOutputLaneIns); + + let OutOperandList = (outs); + let AsmString = "tcgen05.mma" + # !if(!eq(Sp, 1), ".sp", "") + # ".cta_group::" # CtaGroup + # ".kind::" # Kind + # !if(!eq(AShift, 1), ".ashift", "") + # ".collector::a::" # CollectorUsageStr + # " " # "[$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", " # "$idesc" + # ", " # DisableOutputLaneStr + # ", $enable_inp_d" + # ScaleInpStr + # ";"; + + dag IntrinsicPattern = !con((Opcode i32:$dtmem, + ARegClass:$a, i64:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataIntr, + ScaleInpInput, + DisableOutputLaneInput); + + dag FlagOperands = (Opcode (i32 KindVal), (i32 CollectorUsage)); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma.disable_output_lane +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach kind = ["f16", "tf32", "f8f6f4", "i8"] in { + foreach cta_group = [1, 2] in { + foreach collector_usage = ["fill", "use", "lastuse", "discard"] in { + foreach scale_input_d = !if(!or(!eq(kind, "f16"), + !eq(kind, "tf32")), [0, 1], [0]) in { + foreach ashift = !if(!eq(space, "tensor"), [0, 1], [0]) in { + def : + Tcgen05MMADisableOutputLaneInst; + } + } + } + } + } + } +} + +class Tcgen05MMABlockScaleInst: + NVPTXInst<(outs), (ins), "?", []>, + Requires<[hasTcgen05Instructions, PTXPredicate]> { + + Intrinsic Intrin = !cast( + NVVM_TCGEN05_MMA_BLOCKSCALE.record); + + dag SparseMetadataIns = !if(!eq(Sp, 1), (ins B32:$spmetadata), (ins)); + dag SparseMetadataIntr = !if(!eq(Sp, 1), (Intrin i32:$spmetadata), (Intrin)); + string SparseMetadataStr = !if(!eq(Sp, 1), ", [$spmetadata]", ""); + + int KindVal = !cond( + !eq(KindStr, "mxf8f6f4") : 0, + !eq(KindStr, "mxf4") : 1, + !eq(KindStr, "mxf4nvf4") : 2, + ); + + int CollectorUsage = !cond( + !eq(CollectorUsageStr, "discard") : 0, + !eq(CollectorUsageStr, "lastuse") : 1, + !eq(CollectorUsageStr, "fill") : 2, + !eq(CollectorUsageStr, "use") : 3, + ); + + string AOperandStr = !if(!eq(ASpace, "tensor"), "[$a]", "$a"); + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag input = !con((ins B32:$dtmem, ARegClass:$a, B64:$b, + B32:$idesc, B1:$enable_inp_d), + SparseMetadataIns, + (ins B32:$scale_a, + B32:$scale_b)); + + let InOperandList = input; + let OutOperandList = (outs); + let AsmString = "tcgen05.mma" + # !if(!eq(Sp, 1), ".sp", "") + # ".cta_group::" # CtaGroup + # ".kind::" # KindStr + # ".block_scale" # ScaleVecSize + # ".collector::a::" # CollectorUsageStr + # " [$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", $idesc, [$scale_a], [$scale_b], $enable_inp_d;"; + + dag IntrinsicPattern = !con((Intrin i32:$dtmem, + ARegClass:$a, i64:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataIntr, + (Intrin i32:$scale_a, + i32:$scale_b)); + + dag FlagOperands = (Intrin (i32 CtaGroup), (i32 CollectorUsage)); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma.block_scale +foreach sp = [0, 1] in { + foreach space = ["tensor", "shared"] in { + foreach kind = ["mxf8f6f4", "mxf4", "mxf4nvf4"] in { + foreach scale_vec_size = ["", ".block16", ".block32"] in { + foreach cta_group = [1, 2] in { + foreach collector_usage = ["fill", "use", "lastuse", "discard"] in { + if NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED.ret then { + def : Tcgen05MMABlockScaleInst, hasPTX<86>)>; + } + } + } + } + } + } +} + +// +// tcgen05.mma.ws Instructions +// + +class Tcgen05MMAWSInst : + NVPTXInst<(outs), (ins), "?", []>, + Requires<[hasTcgen05Instructions]> { + + Intrinsic Intrin = !cast( + NVVM_TCGEN05_MMA_WS.record); + + dag ZeroColMaskIns = !if(!eq(HasZeroColMask, 1), + (ins B64:$zero_col_mask), (ins)); + string ZeroColMaskStr = !if(!eq(HasZeroColMask, 1), ", $zero_col_mask", ""); + dag ZeroColMaskIntr = !if(!eq(HasZeroColMask, 1), + (Intrin i64:$zero_col_mask), (Intrin)); + + dag SparseMetadataIns = !if(!eq(Sp, 1), (ins B32:$spmetadata), (ins)); + dag SparseMetadataIntr = !if(!eq(Sp, 1), (Intrin B32:$spmetadata), (Intrin)); + string SparseMetadataStr = !if(!eq(Sp, 1), ", [$spmetadata]", ""); + + int KindVal = !cond( + !eq(KindStr, "f16") : 0, + !eq(KindStr, "tf32") : 1, + !eq(KindStr, "f8f6f4"): 2, + !eq(KindStr, "i8") : 3, + ); + + int CollectorUsageOp = !cond( + !eq(CollectorUsageOpStr, "discard"): 0, + !eq(CollectorUsageOpStr, "lastuse"): 1, + !eq(CollectorUsageOpStr, "fill") : 2, + !eq(CollectorUsageOpStr, "use") : 3, + ); + + string AOperandStr = !if(!eq(ASpace, "tensor"), "[$a]", "$a"); + NVPTXRegClass ARegClass = !if(!eq(ASpace, "tensor"), B32, B64); + + dag input = !con((ins B32:$dtmem, + ARegClass:$a, B64:$b, + B32:$idesc, + B1:$enable_inp_d), + SparseMetadataIns, + ZeroColMaskIns); + + let InOperandList = input; + let OutOperandList = (outs); + let AsmString = "tcgen05.mma.ws" + # !if(!eq(Sp, 1), ".sp", "") + # ".cta_group::1" + # ".kind::" # KindStr + # ".collector::b" # CollectorBufferB + # "::" # CollectorUsageOpStr + # " [$dtmem], " # AOperandStr # ", $b" + # SparseMetadataStr + # ", $idesc, $enable_inp_d" + # ZeroColMaskStr + # ";"; + + dag IntrinsicPattern = !con((Intrin i32:$dtmem, + ARegClass:$a, i64:$b, + i32:$idesc, + i1:$enable_inp_d), + SparseMetadataIntr, + ZeroColMaskIntr); + + dag FlagOperands = (Intrin (i32 KindVal), (i32 CollectorBufferB), + (i32 CollectorUsageOp)); + + let Pattern = [!con(IntrinsicPattern, FlagOperands)]; +} + +// tcgen05.mma.ws +foreach sp = [0, 1] in { + foreach space = ["shared", "tensor"] in { + foreach kind = ["f16", "tf32", "f8f6f4", "i8"] in { + foreach collector_buffer_b = [0, 1, 2, 3] in { + foreach collector_usage_op = ["discard", "fill", "use", "lastuse"] in { + foreach zero_col_mask = [0, 1] in { + def : Tcgen05MMAWSInst; + } + } + } + } + } +} diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td index 2e81ab122d1df..913487b64617a 100644 --- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td @@ -54,7 +54,8 @@ def B16 : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4))>; def B32 : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16, v4i8, f32], 32, (add (sequence "R%u", 0, 4), VRFrame32, VRFrameLocal32)>; -def B64 : NVPTXRegClass<[i64, v2f32, f64], 64, (add (sequence "RL%u", 0, 4), +def B64 : NVPTXRegClass<[i64, v2i32, v2f32, f64], 64, + (add (sequence "RL%u", 0, 4), VRFrame64, VRFrameLocal64)>; // 128-bit regs are not defined as general regs in NVPTX. They are used for inlineASM only. def B128 : NVPTXRegClass<[i128], 128, (add (sequence "RQ%u", 0, 4))>; diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h index 0a77a633cb255..e81c56bb4b562 100644 --- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h +++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h @@ -114,6 +114,10 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo { case 1013: // sm_101a HasTcgen05 = true; break; + case 1103: // sm_110a + HasTcgen05 = true; + MinPTXVersion = 90; + break; case 1033: // sm_103a HasTcgen05 = true; MinPTXVersion = 88; @@ -122,6 +126,10 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo { return HasTcgen05 && PTXVersion >= MinPTXVersion; } + + bool hasTcgen05MMAScaleInputDImm() const { + return FullSmVersion == 1003 && PTXVersion >= 86; + } // f32x2 instructions in Blackwell family bool hasF32x2Instructions() const; diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp index f4f89613b358d..4029e143ae2a4 100644 --- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp @@ -281,21 +281,12 @@ static Instruction *convertNvvmIntrinsicToLlvm(InstCombiner &IC, return {Intrinsic::trunc, FTZ_MustBeOn}; // NVVM intrinsics that map to LLVM cast operations. - // - // Note that llvm's target-generic conversion operators correspond to the rz - // (round to zero) versions of the nvvm conversion intrinsics, even though - // most everything else here uses the rn (round to nearest even) nvvm ops. - case Intrinsic::nvvm_d2i_rz: - case Intrinsic::nvvm_f2i_rz: - case Intrinsic::nvvm_d2ll_rz: - case Intrinsic::nvvm_f2ll_rz: - return {Instruction::FPToSI}; - case Intrinsic::nvvm_d2ui_rz: - case Intrinsic::nvvm_f2ui_rz: - case Intrinsic::nvvm_d2ull_rz: - case Intrinsic::nvvm_f2ull_rz: - return {Instruction::FPToUI}; - // Integer to floating-point uses RN rounding, not RZ + // Note - we cannot map intrinsics like nvvm_d2ll_rz to LLVM's + // FPToSI, as NaN to int conversion with FPToSI is considered UB and is + // eliminated. NVVM conversion intrinsics are translated to PTX cvt + // instructions which define the outcome for NaN rather than leaving as UB. + // Therefore, translate NVVM intrinsics to sitofp/uitofp, but not to + // fptosi/fptoui. case Intrinsic::nvvm_i2d_rn: case Intrinsic::nvvm_i2f_rn: case Intrinsic::nvvm_ll2d_rn: @@ -590,8 +581,12 @@ Value *NVPTXTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, } case Intrinsic::nvvm_prefetch_tensormap: { IRBuilder<> Builder(II); - return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_prefetch_tensormap, - NewV); + const unsigned NewAS = NewV->getType()->getPointerAddressSpace(); + if (NewAS == NVPTXAS::ADDRESS_SPACE_CONST || + NewAS == NVPTXAS::ADDRESS_SPACE_PARAM) + return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_prefetch_tensormap, + NewV); + return nullptr; } } return nullptr; diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h index a070789f85e0b..4b5cb30fd3036 100644 --- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h +++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h @@ -99,8 +99,8 @@ namespace NVPTX { // register. NOTE: This must be kept in sync with the register classes // defined in NVPTXRegisterInfo.td. inline auto packed_types() { - static const auto PackedTypes = {MVT::v4i8, MVT::v2f16, MVT::v2bf16, - MVT::v2i16, MVT::v2f32}; + static const auto PackedTypes = {MVT::v4i8, MVT::v2f16, MVT::v2bf16, + MVT::v2i16, MVT::v2f32, MVT::v2i32}; return PackedTypes; } diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp index 8ed7c68f54e7f..48c31c91e9338 100644 --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp @@ -22,7 +22,6 @@ #include "llvm/MC/MCRegisterInfo.h" #include "llvm/Support/Casting.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/TargetParser/Triple.h" #include diff --git a/llvm/lib/Target/PowerPC/PPC.td b/llvm/lib/Target/PowerPC/PPC.td index d89a9487c0da2..4ff2f8a54529f 100644 --- a/llvm/lib/Target/PowerPC/PPC.td +++ b/llvm/lib/Target/PowerPC/PPC.td @@ -360,8 +360,11 @@ def FeatureFastMFLR : SubtargetFeature<"fast-MFLR", "HasFastMFLR", "true", //===----------------------------------------------------------------------===// // PowerPC Instruction Predicate Definitions. -def In32BitMode : Predicate<"!Subtarget->isPPC64()">; -def In64BitMode : Predicate<"Subtarget->isPPC64()">; + +def IsLittleEndian : Predicate<"Subtarget->isLittleEndian()">; +def IsBigEndian : Predicate<"!Subtarget->isLittleEndian()">; +def IsPPC32 : Predicate<"!Subtarget->isPPC64()">; +def IsPPC64 : Predicate<"Subtarget->isPPC64()">; def IsBookE : Predicate<"Subtarget->isBookE()">; def IsNotBookE : Predicate<"!Subtarget->isBookE()">; def HasOnlyMSYNC : Predicate<"Subtarget->hasOnlyMSYNC()">; @@ -379,27 +382,50 @@ def NaNsFPMath : Predicate<"!Subtarget->getTargetMachine().Options.NoNaNsFPMath">; def HasBPERMD : Predicate<"Subtarget->hasBPERMD()">; def HasExtDiv : Predicate<"Subtarget->hasExtDiv()">; +def HasFPU : Predicate<"Subtarget->hasFPU()">; +def HasHTM : Predicate<"Subtarget->hasHTM()">; +def HasDirectMove : Predicate<"Subtarget->hasDirectMove()">; +def HasP8Crypto : Predicate<"Subtarget->hasP8Crypto()">; +def PCRelativeMemops : Predicate<"Subtarget->hasPCRelativeMemops()">; +def PrefixInstrs : Predicate<"Subtarget->hasPrefixInstrs()">; +def PairedVectorMemops : Predicate<"Subtarget->pairedVectorMemops()">; +def MMA : Predicate<"Subtarget->hasMMA()">; + +// Vector support predicates +def HasVSX : Predicate<"Subtarget->hasVSX()">; +def NoP8Vector : Predicate<"!Subtarget->hasP8Vector()">; +def HasP8Vector : Predicate<"Subtarget->hasP8Vector()">; +def HasAltivec : Predicate<"Subtarget->hasAltivec()">; +def HasP8Altivec : Predicate<"Subtarget->hasP8Altivec()">; +def NoP9Vector : Predicate<"!Subtarget->hasP9Vector()">; +def HasP9Vector : Predicate<"Subtarget->hasP9Vector()">; +def NoP9Altivec : Predicate<"!Subtarget->hasP9Altivec()">; +def HasP9Altivec : Predicate<"Subtarget->hasP9Altivec()">; +def HasOnlySwappingMemOps : Predicate<"!Subtarget->hasP9Vector()">; +def NoP10Vector : Predicate<"!Subtarget->hasP10Vector()">; +def HasP10Vector : Predicate<"Subtarget->hasP10Vector()">; + +// Predicates used to differenciate between different ISAs. def IsISA2_06 : Predicate<"Subtarget->isISA2_06()">; def IsISA2_07 : Predicate<"Subtarget->isISA2_07()">; def IsISA3_0 : Predicate<"Subtarget->isISA3_0()">; -def HasFPU : Predicate<"Subtarget->hasFPU()">; -def PCRelativeMemops : Predicate<"Subtarget->hasPCRelativeMemops()">; +def IsISA3_1 : Predicate<"Subtarget->isISA3_1()">; def IsNotISA3_1 : Predicate<"!Subtarget->isISA3_1()">; +def IsISAFuture : Predicate<"Subtarget->isISAFuture()">; +def IsNotISAFuture : Predicate<"!Subtarget->isISAFuture()">; // AIX assembler may not be modern enough to support some extended mne. def ModernAs: Predicate<"!Subtarget->isAIXABI() || Subtarget->HasModernAIXAs">, AssemblerPredicate<(any_of (not AIXOS), FeatureModernAIXAs)>; def IsAIX : Predicate<"Subtarget->isAIXABI()">; def NotAIX : Predicate<"!Subtarget->isAIXABI()">; -def IsISAFuture : Predicate<"Subtarget->isISAFuture()">; -def IsNotISAFuture : Predicate<"!Subtarget->isISAFuture()">; //===----------------------------------------------------------------------===// // HwModes //===----------------------------------------------------------------------===// defvar PPC32 = DefaultMode; -def PPC64 : HwMode<[In64BitMode]>; +def PPC64 : HwMode<[IsPPC64]>; // Since new processors generally contain a superset of features of those that // came before them, the idea is to make implementations of new processors diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td index 269d30318bca8..60efa4c8f0a37 100644 --- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td @@ -76,23 +76,23 @@ let Interpretation64Bit = 1, isCodeGenOnly = 1 in { let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, hasSideEffects = 0 in { let isReturn = 1, isPredicable = 1, Uses = [LR8, RM] in def BLR8 : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB, - [(PPCretglue)]>, Requires<[In64BitMode]>; + [(PPCretglue)]>, Requires<[IsPPC64]>; let isBranch = 1, isIndirectBranch = 1, Uses = [CTR8] in { let isPredicable = 1 in def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins (pred $BIBO, $CR):$cond), "b${cond:cc}ctr${cond:pm} ${cond:reg}", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTR8 : XLForm_2_br2<19, 528, 12, 0, (outs), (ins crbitrc:$BI), "bcctr 12, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTR8n : XLForm_2_br2<19, 528, 4, 0, (outs), (ins crbitrc:$BI), "bcctr 4, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } @@ -160,20 +160,20 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8], hasSideEffects = 0 in { let isPredicable = 1 in def BCTRL8 : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; let isCodeGenOnly = 1 in { def BCCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins (pred $BIBO, $CR):$cond), "b${cond:cc}ctrl${cond:pm} ${cond:reg}", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTRL8 : XLForm_2_br2<19, 528, 12, 1, (outs), (ins crbitrc:$BI), "bcctrl 12, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTRL8n : XLForm_2_br2<19, 528, 4, 1, (outs), (ins crbitrc:$BI), "bcctrl 4, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } } @@ -207,7 +207,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8, RM], hasSideEffects = 0, let isPredicable = 1 in def BCTRL8_RM : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl_rm)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } @@ -218,7 +218,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, (ins (memrix $D, $RA):$src), "bctrl\n\tld 2, $src", IIC_BrB, [(PPCbctrl_load_toc iaddrX4:$src)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, @@ -228,7 +228,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, (ins (memrix $D, $RA):$src), "bctrl\n\tld 2, $src", IIC_BrB, [(PPCbctrl_load_toc_rm iaddrX4:$src)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } // Interpretation64Bit @@ -449,7 +449,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR8, RM] in def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in @@ -516,7 +516,7 @@ let hasSideEffects = 1 in { def EH_SjLj_SetJmp64 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins memr:$buf), "#EH_SJLJ_SETJMP64", [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } let hasSideEffects = 1, isBarrier = 1 in { @@ -524,7 +524,7 @@ let hasSideEffects = 1, isBarrier = 1 in { def EH_SjLj_LongJmp64 : PPCCustomInserterPseudo<(outs), (ins memr:$buf), "#EH_SJLJ_LONGJMP64", [(PPCeh_sjlj_longjmp addr:$buf)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } def MFSPR8 : XFXForm_1<31, 339, (outs g8rc:$RST), (ins i32imm:$SPR), @@ -1948,7 +1948,7 @@ def : Pat<(atomic_load_nonext_64 XForm:$src), (LDX memrr:$src)>; def : Pat<(atomic_store_64 i64:$val, DSForm:$ptr), (STD g8rc:$val, memrix:$ptr)>; def : Pat<(atomic_store_64 i64:$val, XForm:$ptr), (STDX g8rc:$val, memrr:$ptr)>; -let Predicates = [IsISA3_0, In64BitMode] in { +let Predicates = [IsISA3_0, IsPPC64] in { def : Pat<(i64 (int_ppc_cmpeqb g8rc:$a, g8rc:$b)), (i64 (SETB8 (CMPEQB $a, $b)))>; def : Pat<(i64 (int_ppc_setb g8rc:$a, g8rc:$b)), @@ -1961,7 +1961,7 @@ def : Pat<(i64 (int_ppc_maddld g8rc:$a, g8rc:$b, g8rc:$c)), (i64 (MADDLD8 $a, $b, $c))>; } -let Predicates = [In64BitMode] in { +let Predicates = [IsPPC64] in { def : Pat<(i64 (int_ppc_mulhd g8rc:$a, g8rc:$b)), (i64 (MULHD $a, $b))>; def : Pat<(i64 (int_ppc_mulhdu g8rc:$a, g8rc:$b)), diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index 97d5e28963234..c616db4a1031c 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -343,7 +343,6 @@ class VXCR_Int_Ty xo, string opc, Intrinsic IntID, ValueType Ty> //===----------------------------------------------------------------------===// // Instruction Definitions. -def HasAltivec : Predicate<"Subtarget->hasAltivec()">; let Predicates = [HasAltivec] in { def DSS : DSS_Form<0, 822, (outs), (ins u5imm:$STRM), @@ -1193,8 +1192,6 @@ class VX_VT5_VA5_VB5_XO9_o xo, string opc, list pattern> let PS = 0; } -def HasP8Altivec : Predicate<"Subtarget->hasP8Altivec()">; -def HasP8Crypto : Predicate<"Subtarget->hasP8Crypto()">; let Predicates = [HasP8Altivec] in { let isCommutable = 1 in { @@ -1420,7 +1417,6 @@ def VSBOX : VXBX_Int_Ty<1480, "vsbox", int_ppc_altivec_crypto_vsbox, v2i64>; } // HasP8Crypto // The following altivec instructions were introduced in Power ISA 3.0 -def HasP9Altivec : Predicate<"Subtarget->hasP9Altivec()">; let Predicates = [HasP9Altivec] in { // Vector Multiply-Sum diff --git a/llvm/lib/Target/PowerPC/PPCInstrFuture.td b/llvm/lib/Target/PowerPC/PPCInstrFuture.td index 5751d7dc1628b..1aefea1a1c498 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrFuture.td +++ b/llvm/lib/Target/PowerPC/PPCInstrFuture.td @@ -45,81 +45,251 @@ multiclass XOForm_RTAB5_L1r opcode, bits<9> xo, dag OOL, dag IOL, } } -class VXForm_VRTB5 xo, bits<5> R, dag OOL, dag IOL, string asmstr, - list pattern> : I<4, OOL, IOL, asmstr, NoItinerary> { +class VXForm_VRTB5_Base xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I<4, OOL, IOL, asmstr, NoItinerary> { bits<5> VRT; bits<5> VRB; let Pattern = pattern; let Inst{6...10} = VRT; - let Inst{11...15} = R; let Inst{16...20} = VRB; let Inst{21...31} = xo; } +class VXForm_VRTB5 xo, bits<5> R, dag OOL, dag IOL, string asmstr, + list pattern> + : VXForm_VRTB5_Base { + + let Inst{11...15} = R; +} + class VXForm_VRTB5_UIM2 xo, bits<3> R, dag OOL, dag IOL, string asmstr, list pattern> - : I<4, OOL, IOL, asmstr, NoItinerary> { - bits<5> VRT; - bits<5> VRB; + : VXForm_VRTB5_Base { bits<2> UIM; - let Pattern = pattern; - - let Inst{6...10} = VRT; let Inst{11...13} = R; let Inst{14...15} = UIM; - let Inst{16...20} = VRB; - let Inst{21...31} = xo; } class VXForm_VRTB5_UIM1 xo, bits<4> R, dag OOL, dag IOL, string asmstr, list pattern> - : I<4, OOL, IOL, asmstr, NoItinerary> { - bits<5> VRT; - bits<5> VRB; + : VXForm_VRTB5_Base { bits<1> UIM; - let Pattern = pattern; - - let Inst{6...10} = VRT; let Inst{11...14} = R; let Inst{15} = UIM; - let Inst{16...20} = VRB; - let Inst{21...31} = xo; } class VXForm_VRTB5_UIM3 xo, bits<2> R, dag OOL, dag IOL, string asmstr, list pattern> - : I<4, OOL, IOL, asmstr, NoItinerary> { - bits<5> VRT; - bits<5> VRB; + : VXForm_VRTB5_Base { bits<3> UIM; - let Pattern = pattern; - - let Inst{6...10} = VRT; let Inst{11...12} = R; let Inst{13...15} = UIM; - let Inst{16...20} = VRB; - let Inst{21...31} = xo; } class VXForm_VRTAB5 xo, dag OOL, dag IOL, string asmstr, - list pattern> : I<4, OOL, IOL, asmstr, NoItinerary> { - bits<5> VRT; + list pattern> + : VXForm_VRTB5_Base { bits<5> VRA; - bits<5> VRB; + + let Inst{11...15} = VRA; +} + +class XX3Form_XTBp5_M2 xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I<60, OOL, IOL, asmstr, NoItinerary> { + + bits<5> XTp; + bits<5> XBp; + bits<2> M; let Pattern = pattern; - let Inst{6...10} = VRT; - let Inst{11...15} = VRA; - let Inst{16...20} = VRB; - let Inst{21...31} = xo; + let Inst{6...9} = XTp{3...0}; + let Inst {10} = XTp{4}; + let Inst{15} = M{0}; + let Inst{16...19} = XBp{3...0}; + let Inst{20} = M{1}; + let Inst{21...29} = xo; + let Inst{30} = XBp{4}; } +class XX3Form_XTABp5_M2 xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I<60, OOL, IOL, asmstr, NoItinerary> { + + bits<5> XTp; + bits<5> XAp; + bits<5> XBp; + bits<2> M; + + let Pattern = pattern; + + let Inst{6...9} = XTp{3...0}; + let Inst{10} = XTp{4}; + let Inst{11...14} = XAp{3...0}; + let Inst{15} = M{0}; + let Inst{16...19} = XBp{3...0}; + let Inst{20} = M{1}; + let Inst{21...28} = xo; + let Inst{29} = XAp{4}; + let Inst{30} = XBp{4}; +} + +class XX3Form_XTAB6_P1 xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I<60, OOL, IOL, asmstr, NoItinerary> { + + bits<6> XT; + bits<6> XA; + bits<6> XB; + bits<1> P; + + let Pattern = pattern; + + let Inst{6...10} = XT{4...0}; + let Inst{11...15} = XA{4...0}; + let Inst{16...20} = XB{4...0}; + let Inst{21...22} = 3; + let Inst{23} = P; + let Inst{24...28} = xo; + let Inst{29} = XA{5}; + let Inst{30} = XB{5}; + let Inst{31} = XT{5}; +} + +class XX3Form_XTAB6 opcode, bits<8> xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I { + + bits<6> XT; + bits<6> XA; + bits<6> XB; + + let Pattern = pattern; + + let Inst{6...10} = XT{4...0}; + let Inst{11...15} = XA{4...0}; + let Inst{16...20} = XB{4...0}; + let Inst{21...28} = xo; + let Inst{29} = XA{5}; + let Inst{30} = XB{5}; + let Inst{31} = XT{5}; +} + +class XX3Form_XTAB6_S xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I<59, OOL, IOL, asmstr, NoItinerary> { + bits<6> XT; + bits<6> XA; + bits<6> XB; + + let Pattern = pattern; + + let Inst{6...10} = XT{4...0}; + let Inst{11...15} = XA{4...0}; + let Inst{16...20} = XB{4...0}; + let Inst{24...28} = xo; + let Inst{29} = XA{5}; + let Inst{30} = XB{5}; + let Inst{31} = XT{5}; +} + +class XX3Form_XTAB6_S3 xo, dag OOL, dag IOL, string asmstr, + list pattern> + : XX3Form_XTAB6_S { + + bits<3> S; + let Inst{21...23} = S; +} + +class XX3Form_XTAB6_3S1 xo, dag OOL, dag IOL, string asmstr, + list pattern> + : XX3Form_XTAB6_S { + + bits<1> S0; + bits<1> S1; + bits<1> S2; + + let Inst{21} = S0; + let Inst{22} = S1; + let Inst{23} = S2; +} + +class XX3Form_XTAB6_2S1 xo, dag OOL, dag IOL, string asmstr, + list pattern> + : XX3Form_XTAB6_S { + + bits<1> S1; + bits<1> S2; + + let Inst{21} = 0; + let Inst{22} = S1; + let Inst{23} = S2; +} + +class XX3Form_XTAB6_P xo, dag OOL, dag IOL, string asmstr, + list pattern> + : I<59, OOL, IOL, asmstr, NoItinerary> { + + bits<6> XT; + bits<6> XA; + bits<6> XB; + bits<1> P; + + let Pattern = pattern; + + let Inst{6...10} = XT{4...0}; + let Inst{11...15} = XA{4...0}; + let Inst{16...20} = XB{4...0}; + let Inst{21} = P; + let Inst{22...28} = xo; + let Inst{29} = XA{5}; + let Inst{30} = XB{5}; + let Inst{31} = XT{5}; +} + +// Prefix instruction classes. + +class 8RR_XX4Form_XTABC6_P opcode, dag OOL, dag IOL, string asmstr, + InstrItinClass itin, list pattern> + : PI<1, opcode, OOL, IOL, asmstr, itin> { + bits<6> XT; + bits<6> XA; + bits<6> XB; + bits<6> XC; + bits<1> P; + + let Pattern = pattern; + + // The prefix. + let Inst{6...7} = 1; + let Inst{8...11} = 0; + + // The instruction. + let Inst{38...42} = XT{4...0}; + let Inst{43...47} = XA{4...0}; + let Inst{48...52} = XB{4...0}; + let Inst{53...57} = XC{4...0}; + let Inst{58} = 1; + let Inst{59} = P; + let Inst{60} = XC{5}; + let Inst{61} = XA{5}; + let Inst{62} = XB{5}; + let Inst{63} = XT{5}; +} + +//-------------------------- Instruction definitions -------------------------// +// Predicate combinations available: +// [IsISAFuture] +// [HasVSX, IsISAFuture] +// [HasVSX, PrefixInstrs, IsISAFuture] + let Predicates = [IsISAFuture] in { defm SUBFUS : XOForm_RTAB5_L1r<31, 72, (outs g8rc:$RT), (ins g8rc:$RA, g8rc:$RB, u1imm:$L), "subfus", @@ -134,10 +304,10 @@ let Predicates = [HasVSX, IsISAFuture] in { def LXVRLL : XX1Form_memOp<31, 557, (outs vsrc:$XT), (ins (memr $RA):$addr, g8rc:$RB), "lxvrll $XT, $addr, $RB", IIC_LdStLoad, []>; - def LXVPRL : XForm_XTp5_XAB5<31, 589, (outs vsrprc:$XTp), + def LXVPRL : XForm_XTp5_RAB5<31, 589, (outs vsrprc:$XTp), (ins (memr $RA):$addr, g8rc:$RB), "lxvprl $XTp, $addr, $RB", IIC_LdStLFD, []>; - def LXVPRLL : XForm_XTp5_XAB5<31, 621, (outs vsrprc:$XTp), + def LXVPRLL : XForm_XTp5_RAB5<31, 621, (outs vsrprc:$XTp), (ins (memr $RA):$addr, g8rc:$RB), "lxvprll $XTp, $addr, $RB", IIC_LdStLFD, []>; } @@ -149,11 +319,11 @@ let Predicates = [HasVSX, IsISAFuture] in { def STXVRLL : XX1Form_memOp<31, 685, (outs), (ins vsrc:$XT, (memr $RA):$addr, g8rc:$RB), "stxvrll $XT, $addr, $RB", IIC_LdStLoad, []>; - def STXVPRL : XForm_XTp5_XAB5<31, 717, (outs), + def STXVPRL : XForm_XTp5_RAB5<31, 717, (outs), (ins vsrprc:$XTp, (memr $RA):$addr, g8rc:$RB), "stxvprl $XTp, $addr, $RB", IIC_LdStLFD, []>; def STXVPRLL - : XForm_XTp5_XAB5<31, 749, (outs), + : XForm_XTp5_RAB5<31, 749, (outs), (ins vsrprc:$XTp, (memr $RA):$addr, g8rc:$RB), "stxvprll $XTp, $addr, $RB", IIC_LdStLFD, []>; } @@ -191,9 +361,118 @@ let Predicates = [HasVSX, IsISAFuture] in { def VUCMPRLH : VXForm_VRTAB5<323, (outs vrrc:$VRT), (ins vrrc:$VRA, vrrc:$VRB), "vucmprlh $VRT, $VRA, $VRB", []>; + + // AES Acceleration Instructions + def XXAESENCP : XX3Form_XTABp5_M2<194, (outs vsrprc:$XTp), + (ins vsrprc:$XAp, vsrprc:$XBp, u2imm:$M), + "xxaesencp $XTp, $XAp, $XBp, $M", []>; + def XXAESDECP : XX3Form_XTABp5_M2<202, (outs vsrprc:$XTp), + (ins vsrprc:$XAp, vsrprc:$XBp, u2imm:$M), + "xxaesdecp $XTp, $XAp, $XBp, $M", []>; + def XXAESGENLKP : XX3Form_XTBp5_M2<420, (outs vsrprc:$XTp), + (ins vsrprc:$XBp, u2imm:$M), + "xxaesgenlkp $XTp, $XBp, $M", []>; + def XXGFMUL128 : XX3Form_XTAB6_P1<26, (outs vsrc:$XT), + (ins vsrc:$XA, vsrc:$XB, u1imm:$P), + "xxgfmul128 $XT, $XA, $XB, $P", []>; + + // VSX Vector Integer Arithmetic Instructions + def XVADDUWM : XX3Form_XTAB6<60, 131, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvadduwm $XT, $XA, $XB", []>; + def XVADDUHM : XX3Form_XTAB6<60, 139, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvadduhm $XT, $XA, $XB", []>; + def XVSUBUWM: XX3Form_XTAB6<60, 147, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvsubuwm $XT, $XA, $XB", []>; + def XVSUBUHM: XX3Form_XTAB6<60, 155, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvsubuhm $XT, $XA, $XB", []>; + def XVMULUWM: XX3Form_XTAB6<60, 163, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvmuluwm $XT, $XA, $XB", []>; + def XVMULUHM: XX3Form_XTAB6<60, 171, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvmuluhm $XT, $XA, $XB", []>; + def XVMULHSW: XX3Form_XTAB6<60, 179, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvmulhsw $XT, $XA, $XB", []>; + def XVMULHSH: XX3Form_XTAB6<60, 187, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvmulhsh $XT, $XA, $XB", []>; + def XVMULHUW: XX3Form_XTAB6<60, 114, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvmulhuw $XT, $XA, $XB", []>; + def XVMULHUH: XX3Form_XTAB6<60, 122, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xvmulhuh $XT, $XA, $XB", []>; + + // Elliptic Curve Cryptography Acceleration Instructions. + def XXMULMUL + : XX3Form_XTAB6_S3<1, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u3imm:$S), + "xxmulmul $XT, $XA, $XB, $S", []>; + def XXMULMULHIADD + : XX3Form_XTAB6_3S1<9, (outs vsrc:$XT), + (ins vsrc:$XA, vsrc:$XB, u1imm:$S0, u1imm:$S1, + u1imm:$S2), + "xxmulmulhiadd $XT, $XA, $XB, $S0, $S1, $S2", []>; + def XXMULMULLOADD + : XX3Form_XTAB6_2S1<17, (outs vsrc:$XT), + (ins vsrc:$XA, vsrc:$XB, u1imm:$S1, u1imm:$S2), + "xxmulmulloadd $XT, $XA, $XB, $S1, $S2", []>; + def XXSSUMUDM + : XX3Form_XTAB6_P<25, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u1imm:$P), + "xxssumudm $XT, $XA, $XB, $P", []>; + def XXSSUMUDMC + : XX3Form_XTAB6_P<57, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, u1imm:$P), + "xxssumudmc $XT, $XA, $XB, $P", []>; + def XSADDADDUQM + : XX3Form_XTAB6<59, 96, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsaddadduqm $XT, $XA, $XB", []>; + def XSADDADDSUQM + : XX3Form_XTAB6<59, 104, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsaddaddsuqm $XT, $XA, $XB", []>; + def XSADDSUBUQM + : XX3Form_XTAB6<59, 112, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsaddsubuqm $XT, $XA, $XB", []>; + def XSADDSUBSUQM + : XX3Form_XTAB6<59, 224, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsaddsubsuqm $XT, $XA, $XB", []>; + def XSMERGE2T1UQM + : XX3Form_XTAB6<59, 232, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsmerge2t1uqm $XT, $XA, $XB", []>; + def XSMERGE2T2UQM + : XX3Form_XTAB6<59, 240, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsmerge2t2uqm $XT, $XA, $XB", []>; + def XSMERGE2T3UQM + : XX3Form_XTAB6<59, 89, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsmerge2t3uqm $XT, $XA, $XB", []>; + def XSMERGE3T1UQM + : XX3Form_XTAB6<59, 121, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsmerge3t1uqm $XT, $XA, $XB", []>; + def XSREBASE2T1UQM + : XX3Form_XTAB6<59, 145, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase2t1uqm $XT, $XA, $XB", []>; + def XSREBASE2T2UQM + : XX3Form_XTAB6<59, 177, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase2t2uqm $XT, $XA, $XB", []>; + def XSREBASE2T3UQM + : XX3Form_XTAB6<59, 209, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase2t3uqm $XT, $XA, $XB", []>; + def XSREBASE2T4UQM + : XX3Form_XTAB6<59, 217, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase2t4uqm $XT, $XA, $XB", []>; + def XSREBASE3T1UQM + : XX3Form_XTAB6<59, 241, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase3t1uqm $XT, $XA, $XB", []>; + def XSREBASE3T2UQM + : XX3Form_XTAB6<59, 249, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase3t2uqm $XT, $XA, $XB", []>; + def XSREBASE3T3UQM + : XX3Form_XTAB6<59, 195, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xsrebase3t3uqm $XT, $XA, $XB", []>; +} + +let Predicates = [HasVSX, PrefixInstrs, IsISAFuture] in { + def XXSSUMUDMCEXT + : 8RR_XX4Form_XTABC6_P< + 34, (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB, vsrc:$XC, u1imm:$P), + "xxssumudmcext $XT, $XA, $XB, $XC, $P", IIC_VecGeneral, []>; } //---------------------------- Anonymous Patterns ----------------------------// +// Predicate combinations available: // Load/Store VSX Vector with Right Length (Left-justified). def : Pat<(v4i32 (int_ppc_vsx_lxvrl addr:$RA, i64:$RB)), (LXVRL $RA, $RB)>; @@ -210,3 +489,32 @@ def : Pat<(int_ppc_vsx_stxvprl v256i1:$XTp, addr:$RA, i64:$RB), (STXVPRL $XTp, $RA, $RB)>; def : Pat<(int_ppc_vsx_stxvprll v256i1:$XTp, addr:$RA, i64:$RB), (STXVPRLL $XTp, $RA, $RB)>; + +//---------------------------- Instruction aliases ---------------------------// +// Predicate combinations available: +// [HasVSX, IsISAFuture] + +let Predicates = [HasVSX, IsISAFuture] in { + def : InstAlias<"xxaes128encp $XTp, $XAp, $XBp", + (XXAESENCP vsrprc:$XTp, vsrprc:$XAp, vsrprc:$XBp, 0)>; + def : InstAlias<"xxaes192encp $XTp, $XAp, $XBp", + (XXAESENCP vsrprc:$XTp, vsrprc:$XAp, vsrprc:$XBp, 1)>; + def : InstAlias<"xxaes256encp $XTp, $XAp, $XBp", + (XXAESENCP vsrprc:$XTp, vsrprc:$XAp, vsrprc:$XBp, 2)>; + def : InstAlias<"xxaes128decp $XTp, $XAp, $XBp", + (XXAESDECP vsrprc:$XTp, vsrprc:$XAp, vsrprc:$XBp, 0)>; + def : InstAlias<"xxaes192decp $XTp, $XAp, $XBp", + (XXAESDECP vsrprc:$XTp, vsrprc:$XAp, vsrprc:$XBp, 1)>; + def : InstAlias<"xxaes256decp $XTp, $XAp, $XBp", + (XXAESDECP vsrprc:$XTp, vsrprc:$XAp, vsrprc:$XBp, 2)>; + def : InstAlias<"xxaes128genlkp $XTp, $XBp", (XXAESGENLKP vsrprc:$XTp, + vsrprc:$XBp, 0)>; + def : InstAlias<"xxaes192genlkp $XTp, $XBp", (XXAESGENLKP vsrprc:$XTp, + vsrprc:$XBp, 1)>; + def : InstAlias<"xxaes256genlkp $XTp, $XBp", (XXAESGENLKP vsrprc:$XTp, + vsrprc:$XBp, 2)>; + def : InstAlias<"xxgfmul128gcm $XT, $XA, $XB", (XXGFMUL128 vsrc:$XT, vsrc:$XA, + vsrc:$XB, 0)>; + def : InstAlias<"xxgfmul128xts $XT, $XA, $XB", (XXGFMUL128 vsrc:$XT, vsrc:$XA, + vsrc:$XB, 1)>; +} diff --git a/llvm/lib/Target/PowerPC/PPCInstrHTM.td b/llvm/lib/Target/PowerPC/PPCInstrHTM.td index 8d0ac512b290d..6b5da44c91c2b 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrHTM.td +++ b/llvm/lib/Target/PowerPC/PPCInstrHTM.td @@ -11,10 +11,6 @@ // //===----------------------------------------------------------------------===// - - -def HasHTM : Predicate<"Subtarget->hasHTM()">; - def HTM_get_imm : SDNodeXFormgetZExtValue(), SDLoc(N)); }]>; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index 55e38bcf4afc9..3014aa6bfe31e 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -1075,7 +1075,7 @@ Register PPCInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, // For opcodes with the ReMaterializable flag set, this function is called to // verify the instruction is really rematable. -bool PPCInstrInfo::isReallyTriviallyReMaterializable( +bool PPCInstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { switch (MI.getOpcode()) { default: @@ -1112,7 +1112,7 @@ bool PPCInstrInfo::isReallyTriviallyReMaterializable( case PPC::DMXXSETACCZ: return true; } - return TargetInstrInfo::isReallyTriviallyReMaterializable(MI); + return TargetInstrInfo::isReMaterializableImpl(MI); } Register PPCInstrInfo::isStoreToStackSlot(const MachineInstr &MI, diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h index 63ebd65910572..d67fc28935586 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h @@ -530,7 +530,7 @@ class PPCInstrInfo : public PPCGenInstrInfo { unsigned &SubIdx) const override; Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override; - bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; + bool isReMaterializableImpl(const MachineInstr &MI) const override; Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td index 1c45050cdf9ca..aca7abd5a45a7 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -1282,7 +1282,7 @@ def RESTORE_CRBIT : PPCEmitTimePseudo<(outs crbitrc:$cond), (ins memri:$F), let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, hasSideEffects = 0 in { let isPredicable = 1, isReturn = 1, Uses = [LR, RM] in def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB, - [(PPCretglue)]>, Requires<[In32BitMode]>; + [(PPCretglue)]>, Requires<[IsPPC32]>; let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in { let isPredicable = 1 in def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, @@ -1455,7 +1455,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in { let isPredicable = 1 in def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl)]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; let isCodeGenOnly = 1 in { def BCCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins (pred $BIBO, $CR):$cond), @@ -1541,7 +1541,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR, RM], isCodeGenOnly = 1 in { let isPredicable = 1 in def BCTRL_RM : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl_rm)]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; } } @@ -1567,7 +1567,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, def BCTRL_LWZinto_toc: XLForm_2_ext_and_DForm_1<19, 528, 20, 0, 1, 32, (outs), (ins (memri $D, $RA):$addr), "bctrl\n\tlwz 2, $addr", IIC_BrB, - [(PPCbctrl_load_toc iaddr:$addr)]>, Requires<[In32BitMode]>; + [(PPCbctrl_load_toc iaddr:$addr)]>, Requires<[IsPPC32]>; } @@ -1576,7 +1576,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, def BCTRL_LWZinto_toc_RM: XLForm_2_ext_and_DForm_1<19, 528, 20, 0, 1, 32, (outs), (ins (memri $D, $RA):$addr), "bctrl\n\tlwz 2, $addr", IIC_BrB, - [(PPCbctrl_load_toc_rm iaddr:$addr)]>, Requires<[In32BitMode]>; + [(PPCbctrl_load_toc_rm iaddr:$addr)]>, Requires<[IsPPC32]>; } @@ -1585,7 +1585,7 @@ let isCodeGenOnly = 1, hasSideEffects = 0 in { let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR, RM] in def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, - []>, Requires<[In32BitMode]>; + []>, Requires<[IsPPC32]>; let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in @@ -1608,7 +1608,7 @@ let hasSideEffects = 1 in { def EH_SjLj_SetJmp32 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins memr:$buf), "#EH_SJLJ_SETJMP32", [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; } let hasSideEffects = 1, isBarrier = 1 in { @@ -1616,7 +1616,7 @@ let hasSideEffects = 1, isBarrier = 1 in { def EH_SjLj_LongJmp32 : PPCCustomInserterPseudo<(outs), (ins memr:$buf), "#EH_SJLJ_LONGJMP32", [(PPCeh_sjlj_longjmp addr:$buf)]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; } // This pseudo is never removed from the function, as it serves as @@ -3438,8 +3438,6 @@ def Msk2Imm : ImmLeaf(Imm); }]>; def Msk4Imm : ImmLeaf(Imm); }]>; def Msk8Imm : ImmLeaf(Imm); }]>; -def MMA : Predicate<"Subtarget->hasMMA()">; - // Prefixed instructions may require access to the above defs at a later // time so we include this after the def. include "PPCInstrP10.td" @@ -5144,9 +5142,9 @@ def RotateInsertByte1 { } // Clear the upper half of the register when in 64-bit mode -let Predicates = [In64BitMode] in +let Predicates = [IsPPC64] in def : Pat<(i32 (bitreverse i32:$A)), (RLDICL_32 RotateInsertByte1.Left, 0, 32)>; -let Predicates = [In32BitMode] in +let Predicates = [IsPPC32] in def : Pat<(i32 (bitreverse i32:$A)), RotateInsertByte1.Left>; // Fast 64-bit reverse bits algorithm: diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td index 8ee9cc952dec6..2384959a60a43 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrP10.td +++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td @@ -51,10 +51,6 @@ // Moreover, the order of operands reflects the order of operands // in the encoding. -//-------------------------- Predicate definitions ---------------------------// -def IsPPC32 : Predicate<"!Subtarget->isPPC64()">; - - //===----------------------------------------------------------------------===// // PowerPC ISA 3.1 specific type constraints. // @@ -634,9 +630,6 @@ multiclass 8LS_DForm_R_SI34_XT6_RA5_MEM_p opcode, dag OOL, dag IOL, } } -def PrefixInstrs : Predicate<"Subtarget->hasPrefixInstrs()">; -def IsISA3_1 : Predicate<"Subtarget->isISA3_1()">; -def PairedVectorMemops : Predicate<"Subtarget->pairedVectorMemops()">; def RCCp { dag AToVSRC = (COPY_TO_REGCLASS $XA, VSRC); dag BToVSRC = (COPY_TO_REGCLASS $XB, VSRC); @@ -870,7 +863,7 @@ class DQForm_XTp5_RA17_MEM opcode, bits<4> xo, dag OOL, dag IOL, let Inst{28...31} = xo; } -class XForm_XTp5_XAB5 opcode, bits<10> xo, dag OOL, dag IOL, +class XForm_XTp5_RAB5 opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> : I, XFormMemOp { bits<5> XTp; @@ -1159,7 +1152,7 @@ let Predicates = [PairedVectorMemops] in { def LXVP : DQForm_XTp5_RA17_MEM<6, 0, (outs vsrprc:$XTp), (ins (memrix16 $DQ, $RA):$addr), "lxvp $XTp, $addr", IIC_LdStLFD, []>; - def LXVPX : XForm_XTp5_XAB5<31, 333, (outs vsrprc:$XTp), (ins (memrr $RA, $RB):$addr), + def LXVPX : XForm_XTp5_RAB5<31, 333, (outs vsrprc:$XTp), (ins (memrr $RA, $RB):$addr), "lxvpx $XTp, $addr", IIC_LdStLFD, []>; } @@ -1168,7 +1161,7 @@ let Predicates = [PairedVectorMemops] in { def STXVP : DQForm_XTp5_RA17_MEM<6, 1, (outs), (ins vsrprc:$XTp, (memrix16 $DQ, $RA):$addr), "stxvp $XTp, $addr", IIC_LdStLFD, []>; - def STXVPX : XForm_XTp5_XAB5<31, 461, (outs), (ins vsrprc:$XTp, (memrr $RA, $RB):$addr), + def STXVPX : XForm_XTp5_RAB5<31, 461, (outs), (ins vsrprc:$XTp, (memrr $RA, $RB):$addr), "stxvpx $XTp, $addr", IIC_LdStLFD, []>; } diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td index 4e5165bfcda55..979ba31b0431b 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -116,20 +116,6 @@ def PPCSToV : SDNode<"PPCISD::SCALAR_TO_VECTOR_PERMUTED", SDTypeProfile<1, 1, []>, []>; def PPCxxperm : SDNode<"PPCISD::XXPERM", SDT_PPCxxperm, []>; -//-------------------------- Predicate definitions ---------------------------// -def HasVSX : Predicate<"Subtarget->hasVSX()">; -def IsLittleEndian : Predicate<"Subtarget->isLittleEndian()">; -def IsBigEndian : Predicate<"!Subtarget->isLittleEndian()">; -def IsPPC64 : Predicate<"Subtarget->isPPC64()">; -def HasOnlySwappingMemOps : Predicate<"!Subtarget->hasP9Vector()">; -def NoP8Vector : Predicate<"!Subtarget->hasP8Vector()">; -def HasP8Vector : Predicate<"Subtarget->hasP8Vector()">; -def HasDirectMove : Predicate<"Subtarget->hasDirectMove()">; -def NoP9Vector : Predicate<"!Subtarget->hasP9Vector()">; -def HasP9Vector : Predicate<"Subtarget->hasP9Vector()">; -def NoP9Altivec : Predicate<"!Subtarget->hasP9Altivec()">; -def NoP10Vector: Predicate<"!Subtarget->hasP10Vector()">; -def HasP10Vector: Predicate<"Subtarget->hasP10Vector()">; def PPCldsplatAlign16 : PatFrag<(ops node:$ptr), (PPCldsplat node:$ptr), [{ return cast(N)->getAlign() >= Align(16) && @@ -1293,13 +1279,13 @@ let Predicates = [HasVSX, HasP8Vector] in { def MFVSRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$RA), (ins vsfrc:$XT), "mfvsrd $RA, $XT", IIC_VecGeneral, [(set i64:$RA, (PPCmfvsr f64:$XT))]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; // FIXME: Setting the hasSideEffects flag here to match current behaviour. let isCodeGenOnly = 1, hasSideEffects = 1 in def MFVRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$RA), (ins vsrc:$XT), "mfvsrd $RA, $XT", IIC_VecGeneral, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def MFVSRWZ : XX1_RS6_RD5_XO<31, 115, (outs gprc:$RA), (ins vsfrc:$XT), "mfvsrwz $RA, $XT", IIC_VecGeneral, [(set i32:$RA, (PPCmfvsr f64:$XT))]>, ZExt32To64; @@ -1311,13 +1297,13 @@ let Predicates = [HasVSX, HasP8Vector] in { def MTVSRD : XX1_RS6_RD5_XO<31, 179, (outs vsfrc:$XT), (ins g8rc:$RA), "mtvsrd $XT, $RA", IIC_VecGeneral, [(set f64:$XT, (PPCmtvsra i64:$RA))]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; // FIXME: Setting the hasSideEffects flag here to match current behaviour. let isCodeGenOnly = 1, hasSideEffects = 1 in def MTVRD : XX1_RS6_RD5_XO<31, 179, (outs vsrc:$XT), (ins g8rc:$RA), "mtvsrd $XT, $RA", IIC_VecGeneral, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def MTVSRWA : XX1_RS6_RD5_XO<31, 211, (outs vsfrc:$XT), (ins gprc:$RA), "mtvsrwa $XT, $RA", IIC_VecGeneral, [(set f64:$XT, (PPCmtvsra i32:$RA))]>; @@ -1344,11 +1330,11 @@ def MTVSRWS: XX1_RS6_RD5_XO<31, 403, (outs vsrc:$XT), (ins gprc:$RA), def MTVSRDD: XX1Form<31, 435, (outs vsrc:$XT), (ins g8rc_nox0:$RA, g8rc:$RB), "mtvsrdd $XT, $RA, $RB", IIC_VecGeneral, - []>, Requires<[In64BitMode]>; + []>, Requires<[IsPPC64]>; def MFVSRLD: XX1_RS6_RD5_XO<31, 307, (outs g8rc:$RA), (ins vsrc:$XT), "mfvsrld $RA, $XT", IIC_VecGeneral, - []>, Requires<[In64BitMode]>; + []>, Requires<[IsPPC64]>; } // HasVSX, IsISA3_0, HasDirectMove diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index 50891da333f01..21dbb7cbc9844 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -811,6 +811,7 @@ struct RISCVOperand final : public MCParsedAsmOperand { bool isSImm6() const { return isSImm<6>(); } bool isSImm10() const { return isSImm<10>(); } bool isSImm11() const { return isSImm<11>(); } + bool isSImm12() const { return isSImm<12>(); } bool isSImm16() const { return isSImm<16>(); } bool isSImm26() const { return isSImm<26>(); } @@ -859,7 +860,7 @@ struct RISCVOperand final : public MCParsedAsmOperand { return SignExtend64<32>(Imm); } - bool isSImm12() const { + bool isSImm12LO() const { if (!isExpr()) return false; @@ -1599,6 +1600,9 @@ bool RISCVAsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidUImm16NonZero: return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 16) - 1); case Match_InvalidSImm12: + return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 11), + (1 << 11) - 1); + case Match_InvalidSImm12LO: return generateImmOutOfRangeError( Operands, ErrorInfo, -(1 << 11), (1 << 11) - 1, "operand must be a symbol with %lo/%pcrel_lo/%tprel_lo specifier or an " diff --git a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp index ae44306170758..50730c697989d 100644 --- a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp +++ b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp @@ -18,6 +18,7 @@ #include "llvm/MC/TargetRegistry.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/DebugLog.h" #define DEBUG_TYPE "llvm-mca-riscv-custombehaviour" @@ -86,7 +87,8 @@ uint8_t RISCVSEWInstrument::getSEW() const { bool RISCVInstrumentManager::supportsInstrumentType( llvm::StringRef Type) const { return Type == RISCVLMULInstrument::DESC_NAME || - Type == RISCVSEWInstrument::DESC_NAME; + Type == RISCVSEWInstrument::DESC_NAME || + InstrumentManager::supportsInstrumentType(Type); } UniqueInstrument @@ -94,8 +96,8 @@ RISCVInstrumentManager::createInstrument(llvm::StringRef Desc, llvm::StringRef Data) { if (Desc == RISCVLMULInstrument::DESC_NAME) { if (!RISCVLMULInstrument::isDataValid(Data)) { - LLVM_DEBUG(dbgs() << "RVCB: Bad data for instrument kind " << Desc << ": " - << Data << '\n'); + LDBG() << "RVCB: Bad data for instrument kind " << Desc << ": " << Data + << '\n'; return nullptr; } return std::make_unique(Data); @@ -103,23 +105,23 @@ RISCVInstrumentManager::createInstrument(llvm::StringRef Desc, if (Desc == RISCVSEWInstrument::DESC_NAME) { if (!RISCVSEWInstrument::isDataValid(Data)) { - LLVM_DEBUG(dbgs() << "RVCB: Bad data for instrument kind " << Desc << ": " - << Data << '\n'); + LDBG() << "RVCB: Bad data for instrument kind " << Desc << ": " << Data + << '\n'; return nullptr; } return std::make_unique(Data); } - LLVM_DEBUG(dbgs() << "RVCB: Unknown instrumentation Desc: " << Desc << '\n'); - return nullptr; + LDBG() << "RVCB: Creating default instrument for Desc: " << Desc << '\n'; + return InstrumentManager::createInstrument(Desc, Data); } SmallVector RISCVInstrumentManager::createInstruments(const MCInst &Inst) { if (Inst.getOpcode() == RISCV::VSETVLI || Inst.getOpcode() == RISCV::VSETIVLI) { - LLVM_DEBUG(dbgs() << "RVCB: Found VSETVLI and creating instrument for it: " - << Inst << "\n"); + LDBG() << "RVCB: Found VSETVLI and creating instrument for it: " << Inst + << "\n"; unsigned VTypeI = Inst.getOperand(2).getImm(); RISCVVType::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI); @@ -250,8 +252,7 @@ unsigned RISCVInstrumentManager::getSchedClassID( // Need LMUL or LMUL, SEW in order to override opcode. If no LMUL is provided, // then no option to override. if (!LI) { - LLVM_DEBUG( - dbgs() << "RVCB: Did not use instrumentation to override Opcode.\n"); + LDBG() << "RVCB: Did not use instrumentation to override Opcode.\n"; return SchedClassID; } uint8_t LMUL = LI->getLMUL(); @@ -313,22 +314,21 @@ unsigned RISCVInstrumentManager::getSchedClassID( // Not a RVV instr if (!VPOpcode) { - LLVM_DEBUG( - dbgs() << "RVCB: Could not find PseudoInstruction for Opcode " - << MCII.getName(Opcode) - << ", LMUL=" << (LI ? LI->getData() : "Unspecified") - << ", SEW=" << (SI ? SI->getData() : "Unspecified") - << ". Ignoring instrumentation and using original SchedClassID=" - << SchedClassID << '\n'); + LDBG() << "RVCB: Could not find PseudoInstruction for Opcode " + << MCII.getName(Opcode) + << ", LMUL=" << (LI ? LI->getData() : "Unspecified") + << ", SEW=" << (SI ? SI->getData() : "Unspecified") + << ". Ignoring instrumentation and using original SchedClassID=" + << SchedClassID << '\n'; return SchedClassID; } // Override using pseudo - LLVM_DEBUG(dbgs() << "RVCB: Found Pseudo Instruction for Opcode " - << MCII.getName(Opcode) << ", LMUL=" << LI->getData() - << ", SEW=" << (SI ? SI->getData() : "Unspecified") - << ". Overriding original SchedClassID=" << SchedClassID - << " with " << MCII.getName(*VPOpcode) << '\n'); + LDBG() << "RVCB: Found Pseudo Instruction for Opcode " << MCII.getName(Opcode) + << ", LMUL=" << LI->getData() + << ", SEW=" << (SI ? SI->getData() : "Unspecified") + << ". Overriding original SchedClassID=" << SchedClassID << " with " + << MCII.getName(*VPOpcode) << '\n'; return MCII.get(*VPOpcode).getSchedClass(); } diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp index cb57c4377779f..410561855e181 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -46,6 +46,8 @@ class RISCVExpandPseudo : public MachineFunctionPass { MachineBasicBlock::iterator &NextMBBI); bool expandCCOp(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI); + bool expandCCOpToCMov(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI); bool expandVMSET_VMCLR(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Opcode); bool expandMV_FPR16INX(MachineBasicBlock &MBB, @@ -178,6 +180,9 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB, bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI) { + // First try expanding to a Conditional Move rather than a branch+mv + if (expandCCOpToCMov(MBB, MBBI)) + return true; MachineFunction *MF = MBB.getParent(); MachineInstr &MI = *MBBI; @@ -193,7 +198,7 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB, // we need to invert the branch condition to jump over TrueBB when the // condition is false. auto CC = static_cast(MI.getOperand(3).getImm()); - CC = RISCVCC::getOppositeBranchCondition(CC); + CC = RISCVCC::getInverseBranchCondition(CC); // Insert branch instruction. BuildMI(MBB, MBBI, DL, TII->get(RISCVCC::getBrCond(CC))) @@ -277,6 +282,86 @@ bool RISCVExpandPseudo::expandCCOp(MachineBasicBlock &MBB, return true; } +bool RISCVExpandPseudo::expandCCOpToCMov(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + MachineInstr &MI = *MBBI; + DebugLoc DL = MI.getDebugLoc(); + + if (MI.getOpcode() != RISCV::PseudoCCMOVGPR && + MI.getOpcode() != RISCV::PseudoCCMOVGPRNoX0) + return false; + + if (!STI->hasVendorXqcicm()) + return false; + + // FIXME: Would be wonderful to support LHS=X0, but not very easy. + if (MI.getOperand(1).getReg() == RISCV::X0 || + MI.getOperand(4).getReg() == RISCV::X0 || + MI.getOperand(5).getReg() == RISCV::X0) + return false; + + auto CC = static_cast(MI.getOperand(3).getImm()); + + unsigned CMovOpcode, CMovIOpcode; + switch (CC) { + default: + llvm_unreachable("Unhandled CC"); + case RISCVCC::COND_EQ: + CMovOpcode = RISCV::QC_MVEQ; + CMovIOpcode = RISCV::QC_MVEQI; + break; + case RISCVCC::COND_NE: + CMovOpcode = RISCV::QC_MVNE; + CMovIOpcode = RISCV::QC_MVNEI; + break; + case RISCVCC::COND_LT: + CMovOpcode = RISCV::QC_MVLT; + CMovIOpcode = RISCV::QC_MVLTI; + break; + case RISCVCC::COND_GE: + CMovOpcode = RISCV::QC_MVGE; + CMovIOpcode = RISCV::QC_MVGEI; + break; + case RISCVCC::COND_LTU: + CMovOpcode = RISCV::QC_MVLTU; + CMovIOpcode = RISCV::QC_MVLTUI; + break; + case RISCVCC::COND_GEU: + CMovOpcode = RISCV::QC_MVGEU; + CMovIOpcode = RISCV::QC_MVGEUI; + break; + } + + if (MI.getOperand(2).getReg() == RISCV::X0) { + // $dst = PseudoCCMOVGPR $lhs, X0, $cc, $falsev (=$dst), $truev + // $dst = PseudoCCMOVGPRNoX0 $lhs, X0, $cc, $falsev (=$dst), $truev + // => + // $dst = QC_MVccI $falsev (=$dst), $lhs, 0, $truev + BuildMI(MBB, MBBI, DL, TII->get(CMovIOpcode)) + .addDef(MI.getOperand(0).getReg()) + .addReg(MI.getOperand(4).getReg()) + .addReg(MI.getOperand(1).getReg()) + .addImm(0) + .addReg(MI.getOperand(5).getReg()); + + MI.eraseFromParent(); + return true; + } + + // $dst = PseudoCCMOVGPR $lhs, $rhs, $cc, $falsev (=$dst), $truev + // $dst = PseudoCCMOVGPRNoX0 $lhs, $rhs, $cc, $falsev (=$dst), $truev + // => + // $dst = QC_MVcc $falsev (=$dst), $lhs, $rhs, $truev + BuildMI(MBB, MBBI, DL, TII->get(CMovOpcode)) + .addDef(MI.getOperand(0).getReg()) + .addReg(MI.getOperand(4).getReg()) + .addReg(MI.getOperand(1).getReg()) + .addReg(MI.getOperand(2).getReg()) + .addReg(MI.getOperand(5).getReg()); + MI.eraseFromParent(); + return true; +} + bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Opcode) { diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 06ce91771c9e7..7d4535ad46916 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -2395,6 +2395,7 @@ bool RISCVFrameLowering::isSupportedStackID(TargetStackID::Value ID) const { case TargetStackID::NoAlloc: case TargetStackID::SGPRSpill: case TargetStackID::WasmLocal: + case TargetStackID::ScalablePredicateVector: return false; } llvm_unreachable("Invalid TargetStackID::Value"); diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td index a9ecf44e8da1e..af1ceb6bcda4e 100644 --- a/llvm/lib/Target/RISCV/RISCVGISel.td +++ b/llvm/lib/Target/RISCV/RISCVGISel.td @@ -41,12 +41,12 @@ def GIImmPlus1 : def PtrVT : PtrValueTypeByHwMode; // Define pattern expansions for pointer ult/slt conditional codes -def : Pat<(XLenVT (setult (PtrVT GPR:$rs1), simm12:$imm12)), - (SLTIU GPR:$rs1, simm12:$imm12)>; +def : Pat<(XLenVT (setult (PtrVT GPR:$rs1), simm12_lo:$imm12)), + (SLTIU GPR:$rs1, simm12_lo:$imm12)>; def : Pat<(XLenVT (setult (PtrVT GPR:$rs1), (PtrVT GPR:$rs2))), (SLTU GPR:$rs1, GPR:$rs2)>; -def : Pat<(XLenVT (setlt (PtrVT GPR:$rs1), simm12:$imm12)), - (SLTI GPR:$rs1, simm12:$imm12)>; +def : Pat<(XLenVT (setlt (PtrVT GPR:$rs1), simm12_lo:$imm12)), + (SLTI GPR:$rs1, simm12_lo:$imm12)>; def : Pat<(XLenVT (setlt (PtrVT GPR:$rs1), (PtrVT GPR:$rs2))), (SLT GPR:$rs1, GPR:$rs2)>; @@ -72,12 +72,12 @@ def : Pat<(XLenVT (setgt (Ty GPR:$rs1), (Ty simm12Minus1Nonzero:$imm))), (XORI (SLTI GPR:$rs1, (ImmPlus1 simm12Minus1Nonzero:$imm)), 1)>; def : Pat<(XLenVT (setgt (Ty GPR:$rs1), (Ty GPR:$rs2))), (SLT GPR:$rs2, GPR:$rs1)>; -def : Pat<(XLenVT (setuge (XLenVT GPR:$rs1), (Ty simm12:$imm))), - (XORI (SLTIU GPR:$rs1, simm12:$imm), 1)>; +def : Pat<(XLenVT (setuge (XLenVT GPR:$rs1), (Ty simm12_lo:$imm))), + (XORI (SLTIU GPR:$rs1, simm12_lo:$imm), 1)>; def : Pat<(XLenVT (setuge (Ty GPR:$rs1), (Ty GPR:$rs2))), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>; -def : Pat<(XLenVT (setge (Ty GPR:$rs1), (Ty simm12:$imm))), - (XORI (SLTI GPR:$rs1, simm12:$imm), 1)>; +def : Pat<(XLenVT (setge (Ty GPR:$rs1), (Ty simm12_lo:$imm))), + (XORI (SLTI GPR:$rs1, simm12_lo:$imm), 1)>; def : Pat<(XLenVT (setge (Ty GPR:$rs1), (Ty GPR:$rs2))), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>; def : Pat<(XLenVT (setule (Ty GPR:$rs1), (Ty simm12Minus1NonzeroNonNeg1:$imm))), @@ -109,15 +109,16 @@ def : LdPat; // Prefer unsigned due to no c.lb in Zcb. def : StPat; let Predicates = [HasAtomicLdSt] in { - def : LdPat; - def : LdPat; + // Prefer unsigned due to no c.lb in Zcb. + def : LdPat; + def : LdPat; def : StPat; def : StPat; } let Predicates = [HasAtomicLdSt, IsRV64] in { - def : LdPat; + // Load pattern is in RISCVInstrInfoA.td and shared with RV32. def : StPat; } @@ -143,8 +144,8 @@ def : Pat<(anyext (i32 GPR:$src)), (COPY GPR:$src)>; def : Pat<(sext (i32 GPR:$src)), (ADDIW GPR:$src, 0)>; def : Pat<(i32 (trunc GPR:$src)), (COPY GPR:$src)>; -def : Pat<(sext_inreg (i64 (add GPR:$rs1, simm12:$imm)), i32), - (ADDIW GPR:$rs1, simm12:$imm)>; +def : Pat<(sext_inreg (i64 (add GPR:$rs1, simm12_lo:$imm)), i32), + (ADDIW GPR:$rs1, simm12_lo:$imm)>; // Use sext if the sign bit of the input is 0. def : Pat<(zext_is_sext (i32 GPR:$src)), (ADDIW GPR:$src, 0)>; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index dda6023b37f7b..437022f5cde9f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -677,95 +677,6 @@ bool RISCVDAGToDAGISel::trySignedBitfieldExtract(SDNode *Node) { return false; } -bool RISCVDAGToDAGISel::trySignedBitfieldInsertInMask(SDNode *Node) { - // Supported only in Xqcibm for now. - if (!Subtarget->hasVendorXqcibm()) - return false; - - using namespace SDPatternMatch; - - SDValue X; - APInt MaskImm; - if (!sd_match(Node, m_Or(m_OneUse(m_Value(X)), m_ConstInt(MaskImm)))) - return false; - - unsigned ShAmt, Width; - if (!MaskImm.isShiftedMask(ShAmt, Width) || MaskImm.isSignedIntN(12)) - return false; - - // If Zbs is enabled and it is a single bit set we can use BSETI which - // can be compressed to C_BSETI when Xqcibm in enabled. - if (Width == 1 && Subtarget->hasStdExtZbs()) - return false; - - // If C1 is a shifted mask (but can't be formed as an ORI), - // use a bitfield insert of -1. - // Transform (or x, C1) - // -> (qc.insbi x, -1, width, shift) - SDLoc DL(Node); - MVT VT = Node->getSimpleValueType(0); - - SDValue Ops[] = {X, CurDAG->getSignedTargetConstant(-1, DL, VT), - CurDAG->getTargetConstant(Width, DL, VT), - CurDAG->getTargetConstant(ShAmt, DL, VT)}; - SDNode *BitIns = CurDAG->getMachineNode(RISCV::QC_INSBI, DL, VT, Ops); - ReplaceNode(Node, BitIns); - return true; -} - -// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value -// being inserted only sets known zero bits. -bool RISCVDAGToDAGISel::tryBitfieldInsertOpFromOrAndImm(SDNode *Node) { - // Supported only in Xqcibm for now. - if (!Subtarget->hasVendorXqcibm()) - return false; - - using namespace SDPatternMatch; - - SDValue And; - APInt MaskImm, OrImm; - if (!sd_match(Node, m_Or(m_OneUse(m_And(m_Value(And), m_ConstInt(MaskImm))), - m_ConstInt(OrImm)))) - return false; - - // Compute the Known Zero for the AND as this allows us to catch more general - // cases than just looking for AND with imm. - KnownBits Known = CurDAG->computeKnownBits(Node->getOperand(0)); - - // The bits being inserted must only set those bits that are known to be zero. - if (!OrImm.isSubsetOf(Known.Zero)) { - // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't - // currently handle this case. - return false; - } - - unsigned ShAmt, Width; - // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00). - if (!Known.Zero.isShiftedMask(ShAmt, Width)) - return false; - - // QC_INSB(I) dst, src, #width, #shamt. - SDLoc DL(Node); - MVT VT = Node->getSimpleValueType(0); - SDValue ImmNode; - auto Opc = RISCV::QC_INSB; - - int32_t LIImm = OrImm.getSExtValue() >> ShAmt; - - if (isInt<5>(LIImm)) { - Opc = RISCV::QC_INSBI; - ImmNode = CurDAG->getSignedTargetConstant(LIImm, DL, MVT::i32); - } else { - ImmNode = selectImm(CurDAG, DL, MVT::i32, LIImm, *Subtarget); - } - - SDValue Ops[] = {And, ImmNode, CurDAG->getTargetConstant(Width, DL, VT), - CurDAG->getTargetConstant(ShAmt, DL, VT)}; - SDNode *BitIns = CurDAG->getMachineNode(Opc, DL, VT, Ops); - ReplaceNode(Node, BitIns); - return true; -} - bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) { // Only supported with XAndesPerf at the moment. if (!Subtarget->hasVendorXAndesPerf()) @@ -1384,12 +1295,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { return; } case ISD::OR: { - if (trySignedBitfieldInsertInMask(Node)) - return; - - if (tryBitfieldInsertOpFromOrAndImm(Node)) - return; - if (tryShrinkShlLogicImm(Node)) return; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h index cf2f763abc063..f03b44c875cab 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -76,8 +76,6 @@ class RISCVDAGToDAGISel : public SelectionDAGISel { bool tryShrinkShlLogicImm(SDNode *Node); bool trySignedBitfieldExtract(SDNode *Node); bool trySignedBitfieldInsertInSign(SDNode *Node); - bool trySignedBitfieldInsertInMask(SDNode *Node); - bool tryBitfieldInsertOpFromOrAndImm(SDNode *Node); bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT, SDValue X, unsigned Msb, unsigned Lsb); bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 542be1c20efc0..50649cf3caba4 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -79,7 +79,7 @@ static cl::opt FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden, cl::desc("Give the maximum number of instructions that we will " "use for creating a floating-point immediate value"), - cl::init(2)); + cl::init(3)); static cl::opt ReassocShlAddiAdd("reassoc-shl-addi-add", cl::Hidden, @@ -4564,6 +4564,14 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, break; } + // Do not slideup if the element type of EVec is different. + if (SlideUp) { + MVT EVecEltVT = EVec.getSimpleValueType().getVectorElementType(); + MVT ContainerEltVT = ContainerVT.getVectorElementType(); + if (EVecEltVT != ContainerEltVT) + SlideUp = false; + } + if (SlideUp) { MVT EVecContainerVT = EVec.getSimpleValueType(); // Make sure the original vector has scalable vector type. @@ -16203,7 +16211,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG, return SDValue(); using namespace SDPatternMatch; - SDValue Base, Inserted; APInt CMask; if (!sd_match(N, m_Xor(m_Value(Base), @@ -16214,7 +16221,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG, if (N->getValueType(0) != MVT::i32) return SDValue(); - unsigned Width, ShAmt; if (!CMask.isShiftedMask(ShAmt, Width)) return SDValue(); @@ -16235,10 +16241,96 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG, return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops); } +static SDValue combineOrToBitfieldInsert(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + if (!Subtarget.hasVendorXqcibm()) + return SDValue(); + + using namespace SDPatternMatch; + + SDValue X; + APInt MaskImm; + if (!sd_match(N, m_Or(m_OneUse(m_Value(X)), m_ConstInt(MaskImm)))) + return SDValue(); + + unsigned ShAmt, Width; + if (!MaskImm.isShiftedMask(ShAmt, Width) || MaskImm.isSignedIntN(12)) + return SDValue(); + + if (N->getValueType(0) != MVT::i32) + return SDValue(); + + // If Zbs is enabled and it is a single bit set we can use BSETI which + // can be compressed to C_BSETI when Xqcibm in enabled. + if (Width == 1 && Subtarget.hasStdExtZbs()) + return SDValue(); + + // If C1 is a shifted mask (but can't be formed as an ORI), + // use a bitfield insert of -1. + // Transform (or x, C1) + // -> (qc.insbi x, -1, width, shift) + SDLoc DL(N); + + SDValue Ops[] = {X, DAG.getSignedConstant(-1, DL, MVT::i32), + DAG.getConstant(Width, DL, MVT::i32), + DAG.getConstant(ShAmt, DL, MVT::i32)}; + return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops); +} + +// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value +// being inserted only sets known zero bits. +static SDValue combineOrAndToBitfieldInsert(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + // Supported only in Xqcibm for now. + if (!Subtarget.hasVendorXqcibm()) + return SDValue(); + + using namespace SDPatternMatch; + + SDValue Inserted; + APInt MaskImm, OrImm; + if (!sd_match( + N, m_SpecificVT(MVT::i32, m_Or(m_OneUse(m_And(m_Value(Inserted), + m_ConstInt(MaskImm))), + m_ConstInt(OrImm))))) + return SDValue(); + + // Compute the Known Zero for the AND as this allows us to catch more general + // cases than just looking for AND with imm. + KnownBits Known = DAG.computeKnownBits(N->getOperand(0)); + + // The bits being inserted must only set those bits that are known to be + // zero. + if (!OrImm.isSubsetOf(Known.Zero)) { + // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't + // currently handle this case. + return SDValue(); + } + + unsigned ShAmt, Width; + // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00). + if (!Known.Zero.isShiftedMask(ShAmt, Width)) + return SDValue(); + + // QC_INSB(I) dst, src, #width, #shamt. + SDLoc DL(N); + + SDValue ImmNode = + DAG.getSignedConstant(OrImm.getSExtValue() >> ShAmt, DL, MVT::i32); + + SDValue Ops[] = {Inserted, ImmNode, DAG.getConstant(Width, DL, MVT::i32), + DAG.getConstant(ShAmt, DL, MVT::i32)}; + return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops); +} + static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget) { SelectionDAG &DAG = DCI.DAG; + if (SDValue V = combineOrToBitfieldInsert(N, DAG, Subtarget)) + return V; + if (SDValue V = combineOrAndToBitfieldInsert(N, DAG, Subtarget)) + return V; if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget)) return V; if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget)) @@ -22190,6 +22282,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, // - They are debug instructions. Otherwise, // - They do not have side-effects, do not access memory and their inputs do // not depend on the results of the select pseudo-instructions. + // - They don't adjust stack. // The TrueV/FalseV operands of the selects cannot depend on the result of // previous selects in the sequence. // These conditions could be further relaxed. See the X86 target for a @@ -22218,6 +22311,8 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, SelectDests.insert(MI.getOperand(0).getReg()); MachineInstr *LastSelectPseudo = &MI; + const RISCVInstrInfo &TII = *Subtarget.getInstrInfo(); + for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); SequenceMBBI != E; ++SequenceMBBI) { if (SequenceMBBI->isDebugInstr()) @@ -22237,7 +22332,9 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, } if (SequenceMBBI->hasUnmodeledSideEffects() || SequenceMBBI->mayLoadOrStore() || - SequenceMBBI->usesCustomInsertionHook()) + SequenceMBBI->usesCustomInsertionHook() || + TII.isFrameInstr(*SequenceMBBI) || + SequenceMBBI->isStackAligningInlineAsm()) break; if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); @@ -22245,7 +22342,6 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, break; } - const RISCVInstrInfo &TII = *Subtarget.getInstrInfo(); const BasicBlock *LLVM_BB = BB->getBasicBlock(); DebugLoc DL = MI.getDebugLoc(); MachineFunction::iterator I = ++BB->getIterator(); diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 90e1c47a71c89..6a6ead2697591 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -70,6 +70,10 @@ static unsigned getSEWOpNum(const MachineInstr &MI) { return RISCVII::getSEWOpNum(MI.getDesc()); } +static unsigned getVecPolicyOpNum(const MachineInstr &MI) { + return RISCVII::getVecPolicyOpNum(MI.getDesc()); +} + /// Get the EEW for a load or store instruction. Return std::nullopt if MI is /// not a load or store which ignores SEW. static std::optional getEEWForLoadStore(const MachineInstr &MI) { @@ -986,7 +990,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { // If there is a policy operand, use it. if (RISCVII::hasVecPolicyOp(TSFlags)) { - const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); + const MachineOperand &Op = MI.getOperand(getVecPolicyOpNum(MI)); uint64_t Policy = Op.getImm(); assert(Policy <= (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) && diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 0ed97c61ec78a..1e6b04f8a4281 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -232,7 +232,7 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI, return 0; } -bool RISCVInstrInfo::isReallyTriviallyReMaterializable( +bool RISCVInstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { switch (RISCV::getRVVMCOpcode(MI.getOpcode())) { case RISCV::VMV_V_X: @@ -243,7 +243,7 @@ bool RISCVInstrInfo::isReallyTriviallyReMaterializable( case RISCV::VID_V: return MI.getOperand(1).isUndef(); default: - return TargetInstrInfo::isReallyTriviallyReMaterializable(MI); + return TargetInstrInfo::isReMaterializableImpl(MI); } } @@ -1023,6 +1023,37 @@ static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target, Cond.push_back(LastInst.getOperand(1)); } +static unsigned getInverseXqcicmOpcode(unsigned Opcode) { + switch (Opcode) { + default: + llvm_unreachable("Unexpected Opcode"); + case RISCV::QC_MVEQ: + return RISCV::QC_MVNE; + case RISCV::QC_MVNE: + return RISCV::QC_MVEQ; + case RISCV::QC_MVLT: + return RISCV::QC_MVGE; + case RISCV::QC_MVGE: + return RISCV::QC_MVLT; + case RISCV::QC_MVLTU: + return RISCV::QC_MVGEU; + case RISCV::QC_MVGEU: + return RISCV::QC_MVLTU; + case RISCV::QC_MVEQI: + return RISCV::QC_MVNEI; + case RISCV::QC_MVNEI: + return RISCV::QC_MVEQI; + case RISCV::QC_MVLTI: + return RISCV::QC_MVGEI; + case RISCV::QC_MVGEI: + return RISCV::QC_MVLTI; + case RISCV::QC_MVLTUI: + return RISCV::QC_MVGEUI; + case RISCV::QC_MVGEUI: + return RISCV::QC_MVLTUI; + } +} + unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC, unsigned SelectOpc) { switch (SelectOpc) { default: @@ -1134,7 +1165,7 @@ unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC, unsigned SelectOpc) { } } -RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) { +RISCVCC::CondCode RISCVCC::getInverseBranchCondition(RISCVCC::CondCode CC) { switch (CC) { default: llvm_unreachable("Unrecognized conditional branch"); @@ -1554,7 +1585,7 @@ bool RISCVInstrInfo::optimizeCondBranch(MachineInstr &MI) const { return Register(); }; - unsigned NewOpc = RISCVCC::getBrCond(getOppositeBranchCondition(CC)); + unsigned NewOpc = RISCVCC::getBrCond(getInverseBranchCondition(CC)); // Might be case 1. // Don't change 0 to 1 since we can use x0. @@ -1801,7 +1832,7 @@ RISCVInstrInfo::optimizeSelect(MachineInstr &MI, // Add condition code, inverting if necessary. auto CC = static_cast(MI.getOperand(3).getImm()); if (Invert) - CC = RISCVCC::getOppositeBranchCondition(CC); + CC = RISCVCC::getInverseBranchCondition(CC); NewMI.addImm(CC); // Copy the false register. @@ -3762,6 +3793,24 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI, return false; // Operands 1 and 2 are commutable, if we switch the opcode. return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2); + case RISCV::QC_SELECTIEQ: + case RISCV::QC_SELECTINE: + case RISCV::QC_SELECTIIEQ: + case RISCV::QC_SELECTIINE: + return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2); + case RISCV::QC_MVEQ: + case RISCV::QC_MVNE: + case RISCV::QC_MVLT: + case RISCV::QC_MVGE: + case RISCV::QC_MVLTU: + case RISCV::QC_MVGEU: + case RISCV::QC_MVEQI: + case RISCV::QC_MVNEI: + case RISCV::QC_MVLTI: + case RISCV::QC_MVGEI: + case RISCV::QC_MVLTUI: + case RISCV::QC_MVGEUI: + return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4); case RISCV::TH_MULA: case RISCV::TH_MULAW: case RISCV::TH_MULAH: @@ -3974,11 +4023,33 @@ MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI, return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1, OpIdx2); } + case RISCV::QC_SELECTIEQ: + case RISCV::QC_SELECTINE: + case RISCV::QC_SELECTIIEQ: + case RISCV::QC_SELECTIINE: + return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); + case RISCV::QC_MVEQ: + case RISCV::QC_MVNE: + case RISCV::QC_MVLT: + case RISCV::QC_MVGE: + case RISCV::QC_MVLTU: + case RISCV::QC_MVGEU: + case RISCV::QC_MVEQI: + case RISCV::QC_MVNEI: + case RISCV::QC_MVLTI: + case RISCV::QC_MVGEI: + case RISCV::QC_MVLTUI: + case RISCV::QC_MVGEUI: { + auto &WorkingMI = cloneIfNew(MI); + WorkingMI.setDesc(get(getInverseXqcicmOpcode(MI.getOpcode()))); + return TargetInstrInfo::commuteInstructionImpl(WorkingMI, false, OpIdx1, + OpIdx2); + } case RISCV::PseudoCCMOVGPRNoX0: case RISCV::PseudoCCMOVGPR: { // CCMOV can be commuted by inverting the condition. auto CC = static_cast(MI.getOperand(3).getImm()); - CC = RISCVCC::getOppositeBranchCondition(CC); + CC = RISCVCC::getInverseBranchCondition(CC); auto &WorkingMI = cloneIfNew(MI); WorkingMI.getOperand(3).setImm(CC); return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI*/ false, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h index 57ec431749ebe..42a0c4c01b472 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -44,7 +44,7 @@ enum CondCode { COND_INVALID }; -CondCode getOppositeBranchCondition(CondCode); +CondCode getInverseBranchCondition(CondCode); unsigned getBrCond(CondCode CC, unsigned SelectOpc = 0); } // end of namespace RISCVCC @@ -75,7 +75,7 @@ class RISCVInstrInfo : public RISCVGenInstrInfo { Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex, TypeSize &MemBytes) const override; - bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; + bool isReMaterializableImpl(const MachineInstr &MI) const override; bool shouldBreakCriticalEdgeToSink(MachineInstr &MI) const override { return MI.getOpcode() == RISCV::ADDI && MI.getOperand(1).isReg() && diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 47900cffa370c..9855c47a63392 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -340,7 +340,9 @@ def uimm16 : RISCVUImmOp<16>; def uimm32 : RISCVUImmOp<32>; def uimm48 : RISCVUImmOp<48>; def uimm64 : RISCVUImmOp<64>; -def simm12 : RISCVSImmLeafOp<12> { + +def simm12_lo : RISCVSImmLeafOp<12> { + let ParserMatchClass = SImmAsmOperand<12, "LO">; let MCOperandPredicate = [{ int64_t Imm; if (MCOp.evaluateAsConstantImm(Imm)) @@ -642,7 +644,7 @@ class BranchCC_rri funct3, string opcodestr> let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { class Load_ri funct3, string opcodestr, DAGOperand rty = GPR> - : RVInstI; class HLoad_r funct7, bits<5> funct5, string opcodestr> @@ -658,7 +660,7 @@ class HLoad_r funct7, bits<5> funct5, string opcodestr> let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { class Store_rri funct3, string opcodestr, DAGOperand rty = GPR> : RVInstS; class HStore_rr funct7, string opcodestr> @@ -671,7 +673,7 @@ class HStore_rr funct7, string opcodestr> let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in class ALU_ri funct3, string opcodestr> - : RVInstI, Sched<[WriteIALU, ReadIALU]>; @@ -754,7 +756,7 @@ def JAL : RVInstJ, Sched<[WriteJal]>; def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd), - (ins GPR:$rs1, simm12:$imm12), + (ins GPR:$rs1, simm12_lo:$imm12), "jalr", "$rd, ${imm12}(${rs1})">, Sched<[WriteJalr, ReadJalr]>; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 @@ -779,7 +781,7 @@ def SH : Store_rri<0b001, "sh">, Sched<[WriteSTH, ReadStoreData, ReadMemBase]>; def SW : Store_rri<0b010, "sw">, Sched<[WriteSTW, ReadStoreData, ReadMemBase]>; // ADDI isn't always rematerializable, but isReMaterializable will be used as -// a hint which is verified in isReallyTriviallyReMaterializable. +// a hint which is verified in isReMaterializableImpl. let isReMaterializable = 1, isAsCheapAsAMove = 1 in def ADDI : ALU_ri<0b000, "addi">; @@ -894,7 +896,7 @@ def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase] let IsSignExtendingOpW = 1 in { let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd), - (ins GPR:$rs1, simm12:$imm12), + (ins GPR:$rs1, simm12_lo:$imm12), "addiw", "$rd, $rs1, $imm12">, Sched<[WriteIALU32, ReadIALU32]>; @@ -1041,7 +1043,7 @@ def PseudoSD : PseudoStore<"sd">; } // Predicates = [IsRV64] def : InstAlias<"nop", (ADDI X0, X0, 0), 3>; -def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12:$imm), 2>; +def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12_lo:$imm), 2>; def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>; def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>; @@ -1094,16 +1096,16 @@ def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>; // Non-zero offset aliases of "jalr" are the lowest weight, followed by the // two-register form, then the one-register forms and finally "ret". def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>; -def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>; +def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12_lo:$offset)>; def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>; -def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>; +def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12_lo:$offset)>; def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>; def : InstAlias<"ret", (JALR X0, X1, 0), 4>; // Non-canonical forms for jump targets also accepted by the assembler. -def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>; -def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>; -def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>; +def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12_lo:$offset), 0>; +def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12_lo:$offset), 0>; +def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12_lo:$offset), 0>; def : InstAlias<"jr (${rs})", (JALR X0, GPR:$rs, 0), 0>; def : InstAlias<"jalr (${rs})", (JALR X1, GPR:$rs, 0), 0>; def : InstAlias<"jalr $rd, (${rs})", (JALR GPR:$rd, GPR:$rs, 0), 0>; @@ -1178,13 +1180,13 @@ def : InstAlias<"sw $rs2, (${rs1})", (SW GPR:$rs2, GPR:$rs1, 0)>; def : InstAlias<"add $rd, $rs1, $imm12", - (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (ADDI GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; def : InstAlias<"and $rd, $rs1, $imm12", - (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (ANDI GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; def : InstAlias<"xor $rd, $rs1, $imm12", - (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (XORI GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; def : InstAlias<"or $rd, $rs1, $imm12", - (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (ORI GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; def : InstAlias<"sll $rd, $rs1, $shamt", (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; def : InstAlias<"srl $rd, $rs1, $shamt", @@ -1200,7 +1202,7 @@ def : InstAlias<"sd $rs2, (${rs1})", (SD GPR:$rs2, GPR:$rs1, 0)>; def : InstAlias<"addw $rd, $rs1, $imm12", - (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (ADDIW GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; def : InstAlias<"sllw $rd, $rs1, $shamt", (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; def : InstAlias<"srlw $rd, $rs1, $shamt", @@ -1209,9 +1211,9 @@ def : InstAlias<"sraw $rd, $rs1, $shamt", (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; } // Predicates = [IsRV64] def : InstAlias<"slt $rd, $rs1, $imm12", - (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (SLTI GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; def : InstAlias<"sltu $rd, $rs1, $imm12", - (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>; + (SLTIU GPR:$rd, GPR:$rs1, simm12_lo:$imm12)>; } def : MnemonicAlias<"move", "mv">; @@ -1284,12 +1286,12 @@ def InsnR4 : DirectiveInsnR4<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, AnyReg:$rs3), "$opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3">; def InsnI : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, - AnyReg:$rs1, simm12:$imm12), + AnyReg:$rs1, simm12_lo:$imm12), "$opcode, $funct3, $rd, $rs1, $imm12">; def InsnI_Mem : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, - simm12:$imm12), + simm12_lo:$imm12), "$opcode, $funct3, $rd, ${imm12}(${rs1})">; def InsnB : DirectiveInsnB<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, AnyReg:$rs2, @@ -1303,7 +1305,7 @@ def InsnJ : DirectiveInsnJ<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, "$opcode, $rd, $imm20">; def InsnS : DirectiveInsnS<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, AnyReg:$rs1, - simm12:$imm12), + simm12_lo:$imm12), "$opcode, $funct3, $rs2, ${imm12}(${rs1})">; } // isCodeGenOnly, hasSideEffects, mayLoad, mayStore, hasNoSchedulingInfo @@ -1324,10 +1326,10 @@ def : InstAlias<".insn_r4 $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; def : InstAlias<".insn_i $opcode, $funct3, $rd, $rs1, $imm12", (InsnI AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, - simm12:$imm12)>; + simm12_lo:$imm12)>; def : InstAlias<".insn_i $opcode, $funct3, $rd, ${imm12}(${rs1})", (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, - AnyReg:$rs1, simm12:$imm12)>; + AnyReg:$rs1, simm12_lo:$imm12)>; def : InstAlias<".insn_i $opcode, $funct3, $rd, (${rs1})", (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 0)>; @@ -1347,7 +1349,7 @@ def : InstAlias<".insn_uj $opcode, $rd, $imm20", (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; def : InstAlias<".insn_s $opcode, $funct3, $rs2, ${imm12}(${rs1})", (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, - AnyReg:$rs1, simm12:$imm12)>; + AnyReg:$rs1, simm12_lo:$imm12)>; def : InstAlias<".insn_s $opcode, $funct3, $rs2, (${rs1})", (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, AnyReg:$rs1, 0)>; @@ -1374,7 +1376,7 @@ class PatGprImm; class PatGprSimm12 - : PatGprImm; + : PatGprImm; class PatGprUimmLog2XLen : PatGprImm; @@ -1542,8 +1544,8 @@ def : GICustomOperandRenderer<"renderFrameIndex">, def : Pat<(frameindex:$fi), (ADDI (iPTR (to_tframeindex $fi)), 0)>; -def : Pat<(add_like frameindex:$fi, simm12:$offset), - (ADDI (iPTR (to_tframeindex $fi)), simm12:$offset)>; +def : Pat<(add_like frameindex:$fi, simm12_lo:$offset), + (ADDI (iPTR (to_tframeindex $fi)), simm12_lo:$offset)>; def GIAddrRegImm : GIComplexOperandMatcher, @@ -1576,7 +1578,7 @@ def PROBED_STACKALLOC_DYN : Pseudo<(outs), // It will be expanded after register allocation. // FIXME: The scheduling information does not reflect the multiple instructions. let Size = 8, isReMaterializable = 1 in -def PseudoMovAddr : Pseudo<(outs GPR:$dst), (ins uimm20_lui:$hi, simm12:$lo), []>, +def PseudoMovAddr : Pseudo<(outs GPR:$dst), (ins uimm20_lui:$hi, simm12_lo:$lo), []>, Sched<[WriteIALU]>; def riscv_hi_oneuse : unop_oneuse; @@ -1673,7 +1675,7 @@ defm Select_GPR : SelectCC_GPR_rrirr; class SelectCompressOpt : Pat<(riscv_selectcc_frag:$select (XLenVT GPR:$lhs), simm12_no6:$Constant, Cond, (XLenVT GPR:$truev), GPR:$falsev), - (Select_GPR_Using_CC_GPR (XLenVT (ADDI GPR:$lhs, (NegImm simm12:$Constant))), (XLenVT X0), + (Select_GPR_Using_CC_GPR (XLenVT (ADDI GPR:$lhs, (NegImm simm12_lo:$Constant))), (XLenVT X0), (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>; def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">; @@ -1712,7 +1714,7 @@ multiclass BccPat { class BrccCompressOpt : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place), - (Inst (XLenVT (ADDI GPR:$lhs, (NegImm simm12:$Constant))), + (Inst (XLenVT (ADDI GPR:$lhs, (NegImm simm12_lo:$Constant))), (XLenVT X0), bb:$place)>; defm : BccPat; @@ -1753,33 +1755,33 @@ def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>, let Predicates = [NoStdExtZicfilp], isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in -def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12:$imm12), []>, - PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; +def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12_lo:$imm12), []>, + PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12_lo:$imm12)>; let Predicates = [HasStdExtZicfilp], isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in { -def PseudoBRINDNonX7 : Pseudo<(outs), (ins GPRJALRNonX7:$rs1, simm12:$imm12), []>, - PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; -def PseudoBRINDX7 : Pseudo<(outs), (ins GPRX7:$rs1, simm12:$imm12), []>, - PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; +def PseudoBRINDNonX7 : Pseudo<(outs), (ins GPRJALRNonX7:$rs1, simm12_lo:$imm12), []>, + PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12_lo:$imm12)>; +def PseudoBRINDX7 : Pseudo<(outs), (ins GPRX7:$rs1, simm12_lo:$imm12), []>, + PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12_lo:$imm12)>; } // For Zicfilp, need to avoid using X7/T2 for indirect branches which need // landing pad. let Predicates = [HasStdExtZicfilp] in { def : Pat<(brind GPRJALRNonX7:$rs1), (PseudoBRINDNonX7 GPRJALRNonX7:$rs1, 0)>; -def : Pat<(brind (add GPRJALRNonX7:$rs1, simm12:$imm12)), - (PseudoBRINDNonX7 GPRJALRNonX7:$rs1, simm12:$imm12)>; +def : Pat<(brind (add GPRJALRNonX7:$rs1, simm12_lo:$imm12)), + (PseudoBRINDNonX7 GPRJALRNonX7:$rs1, simm12_lo:$imm12)>; def : Pat<(riscv_sw_guarded_brind GPRX7:$rs1), (PseudoBRINDX7 GPRX7:$rs1, 0)>; -def : Pat<(riscv_sw_guarded_brind (add GPRX7:$rs1, simm12:$imm12)), - (PseudoBRINDX7 GPRX7:$rs1, simm12:$imm12)>; +def : Pat<(riscv_sw_guarded_brind (add GPRX7:$rs1, simm12_lo:$imm12)), + (PseudoBRINDX7 GPRX7:$rs1, simm12_lo:$imm12)>; } let Predicates = [NoStdExtZicfilp] in { def : Pat<(brind GPRJALR:$rs1), (PseudoBRIND GPRJALR:$rs1, 0)>; -def : Pat<(brind (add GPRJALR:$rs1, simm12:$imm12)), - (PseudoBRIND GPRJALR:$rs1, simm12:$imm12)>; +def : Pat<(brind (add GPRJALR:$rs1, simm12_lo:$imm12)), + (PseudoBRIND GPRJALR:$rs1, simm12_lo:$imm12)>; } // PseudoCALLReg is a generic pseudo instruction for calls which will eventually @@ -1942,7 +1944,7 @@ def tlsdesc_call_symbol : Operand { let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in def PseudoTLSDESCCall : Pseudo<(outs GPR:$rd), - (ins GPR:$rs1, simm12:$imm12, tlsdesc_call_symbol:$src), [], + (ins GPR:$rs1, simm12_lo:$imm12, tlsdesc_call_symbol:$src), [], "jalr", "$rd, ${imm12}(${rs1}), $src">, Sched<[WriteJalr, ReadJalr]> { let Defs = [X10]; @@ -1971,8 +1973,8 @@ def PseudoZEXT_W : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.w", "$rd, $rs /// Loads class LdPat - : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPRMem:$rs1), simm12:$imm12))), - (Inst GPRMem:$rs1, simm12:$imm12)>; + : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPRMem:$rs1), simm12_lo:$imm12))), + (Inst GPRMem:$rs1, simm12_lo:$imm12)>; def : LdPat; def : LdPat; // Prefer unsigned due to no c.lb in Zcb. @@ -1987,8 +1989,8 @@ def : LdPat; class StPat : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm (XLenVT GPRMem:$rs1), - simm12:$imm12)), - (Inst StTy:$rs2, GPRMem:$rs1, simm12:$imm12)>; + simm12_lo:$imm12)), + (Inst StTy:$rs2, GPRMem:$rs1, simm12_lo:$imm12)>; def : StPat; def : StPat; @@ -2228,8 +2230,8 @@ def : PatGprImm, XORI, u32simm12>; // Select 'or' as ADDIW if the immediate bits are known to be 0 in $rs1 and // $rs1 is sign extended. This can improve compressibility. Using ADDIW gives // more power to RISCVOptWInstrs. -def : Pat<(riscv_or_disjoint 33signbits_node:$rs1, simm12:$imm), - (ADDIW $rs1, simm12:$imm)>; +def : Pat<(riscv_or_disjoint 33signbits_node:$rs1, simm12_lo:$imm), + (ADDIW $rs1, simm12_lo:$imm)>; /// Loads diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td index 59f5aebf658d8..25accd93eaa03 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -165,23 +165,23 @@ class seq_cst_store // any ordering. This is necessary because AtomicExpandPass has added fences to // atomic load/stores and changed them to unordered ones. let Predicates = [HasAtomicLdSt] in { - def : LdPat, LB>; + // Use unsigned for aext due to no c.lb in Zcb. + def : LdPat, LB>; + def : LdPat, LBU>; def : LdPat, LH>; - def : LdPat, LBU>; - def : LdPat, LHU>; + def : LdPat, LHU>; def : StPat, SB, GPR, XLenVT>; def : StPat, SH, GPR, XLenVT>; def : StPat, SW, GPR, XLenVT>; -} -let Predicates = [HasAtomicLdSt, IsRV32] in { - def : LdPat, LW>; + // Used by GISel for RV32 and RV64. + def : LdPat, LW, i32>; } let Predicates = [HasAtomicLdSt, IsRV64] in { - def : LdPat, LW>; - def : LdPat, LWU>; + def : LdPat, LW, i64>; + def : LdPat, LWU, i64>; def : LdPat, LD, i64>; def : StPat, SD, GPR, i64>; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td index 3d9737e3645d5..b9510efc2fba1 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -529,11 +529,11 @@ def PseudoFROUND_D_IN32X : PseudoFROUND; /// Loads let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in -def PseudoRV32ZdinxLD : Pseudo<(outs GPRPair:$dst), (ins GPR:$rs1, simm12:$imm12), []>; +def PseudoRV32ZdinxLD : Pseudo<(outs GPRPair:$dst), (ins GPR:$rs1, simm12_lo:$imm12), []>; /// Stores let hasSideEffects = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in -def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPair:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>; +def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPair:$rs2, GPRNoX0:$rs1, simm12_lo:$imm12), []>; } // Predicates = [HasStdExtZdinx, IsRV32] let Predicates = [HasStdExtZdinx, HasStdExtZilsd, IsRV32] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td index 2c1cf77acff56..fde030ecc3b89 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -196,7 +196,7 @@ let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in class FPLoad_r funct3, string opcodestr, DAGOperand rty, SchedWrite sw> : RVInstI, Sched<[sw, ReadFMemBase]>; @@ -204,7 +204,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in class FPStore_r funct3, string opcodestr, DAGOperand rty, SchedWrite sw> : RVInstS, Sched<[sw, ReadFStoreData, ReadFMemBase]>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td index f732ab13e5f88..0114fbdc56302 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoSFB.td @@ -60,7 +60,7 @@ class SFBALU_rr class SFBALU_ri : Pseudo<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs, cond_code:$cc, GPR:$falsev, GPR:$rs1, - simm12:$imm), []>, + simm12_lo:$imm), []>, Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, ReadSFBALU]> { let hasSideEffects = 0; let mayLoad = 0; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index b4be9e0c09b3e..298d35a5b4efa 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -345,49 +345,60 @@ defset list AllVectors = { } } - defset list AllFloatVectors = { - defset list NoGroupFloatVectors = { - defset list FractionalGroupFloatVectors = { - def VF16MF4: VTypeInfo; - def VF16MF2: VTypeInfo; - def VF32MF2: VTypeInfo; - def VBF16MF4: VTypeInfo; - def VBF16MF2: VTypeInfo; + defset list AllFloatAndBF16Vectors = { + defset list AllFloatVectors = { + defset list NoGroupFloatVectors = { + defset list FractionalGroupFloatVectors = { + def VF16MF4: VTypeInfo; + def VF16MF2: VTypeInfo; + def VF32MF2: VTypeInfo; + } + def VF16M1: VTypeInfo; + def VF32M1: VTypeInfo; + def VF64M1: VTypeInfo; + } + + defset list GroupFloatVectors = { + def VF16M2: GroupVTypeInfo; + def VF16M4: GroupVTypeInfo; + def VF16M8: GroupVTypeInfo; + + def VF32M2: GroupVTypeInfo; + def VF32M4: GroupVTypeInfo; + def VF32M8: GroupVTypeInfo; + + def VF64M2: GroupVTypeInfo; + def VF64M4: GroupVTypeInfo; + def VF64M8: GroupVTypeInfo; } - def VF16M1: VTypeInfo; - def VF32M1: VTypeInfo; - def VF64M1: VTypeInfo; - def VBF16M1: VTypeInfo; } - defset list GroupFloatVectors = { - def VF16M2: GroupVTypeInfo; - def VF16M4: GroupVTypeInfo; - def VF16M8: GroupVTypeInfo; - - def VF32M2: GroupVTypeInfo; - def VF32M4: GroupVTypeInfo; - def VF32M8: GroupVTypeInfo; - - def VF64M2: GroupVTypeInfo; - def VF64M4: GroupVTypeInfo; - def VF64M8: GroupVTypeInfo; - - def VBF16M2: GroupVTypeInfo; - def VBF16M4: GroupVTypeInfo; - def VBF16M8: GroupVTypeInfo; + defset list AllBF16Vectors = { + defset list NoGroupBF16Vectors = { + defset list FractionalGroupBF16Vectors = { + def VBF16MF4: VTypeInfo; + def VBF16MF2: VTypeInfo; + } + def VBF16M1: VTypeInfo; + } + + defset list GroupBF16Vectors = { + def VBF16M2: GroupVTypeInfo; + def VBF16M4: GroupVTypeInfo; + def VBF16M8: GroupVTypeInfo; + } } } } @@ -531,7 +542,7 @@ defset list AllWidenableIntToFloatVectors = { def : VTypeInfoToWide; } -defset list AllWidenableBFloatToFloatVectors = { +defset list AllWidenableBF16ToFloatVectors = { def : VTypeInfoToWide; def : VTypeInfoToWide; def : VTypeInfoToWide; @@ -543,7 +554,8 @@ defset list AllWidenableBFloatToFloatVectors = { // This represents the information we need in codegen for each pseudo. // The definition should be consistent with `struct PseudoInfo` in // RISCVInstrInfo.h. -class RISCVVPseudo pattern = [], string opcodestr = "", string argstr = ""> +class RISCVVPseudo pattern = [], + string opcodestr = "", string argstr = ""> : Pseudo { Pseudo Pseudo = !cast(NAME); // Used as a key. Instruction BaseInstr = !cast(PseudoToVInst.VInst); @@ -999,8 +1011,7 @@ class VPseudoNullaryNoMask : class VPseudoNullaryMask : RISCVVPseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$passthru, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1179,8 +1190,7 @@ class VPseudoBinaryNoMask TargetConstraintType = 1, DAGOperand sewop = sew> : RISCVVPseudo<(outs RetClass:$rd), - (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew), - []> { + (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1216,8 +1226,7 @@ class VPseudoBinaryNoMaskRoundingMode TargetConstraintType = 1> : RISCVVPseudo<(outs RetClass:$rd), (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, - vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1309,7 +1318,7 @@ class VPseudoIStoreNoMask LMUL, bit Ordered>: RISCVVPseudo<(outs), (ins StClass:$rd, GPRMemZeroOffset:$rs1, IdxClass:$rs2, - AVL:$vl, sew:$sew),[]>, + AVL:$vl, sew:$sew)>, RISCVVSX { let mayLoad = 0; let mayStore = 1; @@ -1322,7 +1331,7 @@ class VPseudoIStoreMask LMUL, bit Ordered>: RISCVVPseudo<(outs), (ins StClass:$rd, GPRMemZeroOffset:$rs1, IdxClass:$rs2, - VMaskOp:$vm, AVL:$vl, sew:$sew),[]>, + VMaskOp:$vm, AVL:$vl, sew:$sew)>, RISCVVSX { let mayLoad = 0; let mayStore = 1; @@ -1340,8 +1349,7 @@ class VPseudoBinaryMaskPolicy.R:$rd), (ins GetVRegNoV0.R:$passthru, Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1360,8 +1368,7 @@ class VPseudoTernaryMaskPolicy.R:$rd), (ins GetVRegNoV0.R:$passthru, Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1403,8 +1410,7 @@ class VPseudoBinaryMOutMask { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1427,8 +1433,7 @@ class VPseudoTiedBinaryMask.R:$rd), (ins GetVRegNoV0.R:$passthru, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1535,8 +1540,7 @@ class VPseudoTernaryNoMaskWithPolicyRoundingMode TargetConstraintType = 1> : RISCVVPseudo<(outs RetClass:$rd), (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1705,8 +1709,8 @@ class VPseudoUSSegStoreNoMask NF> : RISCVVPseudo<(outs), - (ins ValClass:$rd, GPRMemZeroOffset:$rs1, AVL:$vl, sew:$sew), - []>, + (ins ValClass:$rd, GPRMemZeroOffset:$rs1, AVL:$vl, + sew:$sew)>, RISCVVSSEG { let mayLoad = 0; let mayStore = 1; @@ -5859,7 +5863,7 @@ multiclass VPatConversionWF_VF { - foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; @@ -5966,7 +5970,7 @@ multiclass VPatConversionVF_WF_RTZ { - foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, @@ -6018,9 +6022,9 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in { PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>, Sched<[WriteRdVLENB]>; let Defs = [VL, VTYPE] in { - def PseudoReadVLENBViaVSETVLIX0 : Pseudo<(outs GPRNoX0:$rd), (ins uimm5:$shamt), - []>, - Sched<[WriteVSETVLI, ReadVSETVLI]>; + def PseudoReadVLENBViaVSETVLIX0 : Pseudo<(outs GPRNoX0:$rd), + (ins uimm5:$shamt), []>, + Sched<[WriteVSETVLI, ReadVSETVLI]>; } } @@ -6683,14 +6687,14 @@ defm PseudoVID : VPseudoVID_V; let Predicates = [HasVInstructions] in { let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { let HasSEWOp = 1, BaseInstr = VMV_X_S in - def PseudoVMV_X_S: + def PseudoVMV_X_S : RISCVVPseudo<(outs GPR:$rd), (ins VR:$rs2, sew:$sew)>, Sched<[WriteVMovXS, ReadVMovXS]>; let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, isReMaterializable = 1, Constraints = "$rd = $passthru" in - def PseudoVMV_S_X: RISCVVPseudo<(outs VR:$rd), - (ins VR:$passthru, GPR:$rs1, AVL:$vl, sew:$sew), - []>, + def PseudoVMV_S_X : + RISCVVPseudo<(outs VR:$rd), + (ins VR:$passthru, GPR:$rs1, AVL:$vl, sew:$sew)>, Sched<[WriteVMovSX, ReadVMovSX_V, ReadVMovSX_X]>; } } // Predicates = [HasVInstructions] @@ -6710,8 +6714,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { Constraints = "$rd = $passthru" in def "PseudoVFMV_S_" # f.FX : RISCVVPseudo<(outs VR:$rd), - (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew), - []>, + (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>, Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>; } } @@ -7143,31 +7146,32 @@ defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">; // We can use vmerge.vvm to support vector-vector vfmerge. // NOTE: Clang previously used int_riscv_vfmerge for vector-vector, but now uses // int_riscv_vmerge. Support both for compatibility. -foreach vti = AllFloatVectors in { +foreach vti = AllFloatAndBF16Vectors in { let Predicates = GetVTypeMinimalPredicates.Predicates in defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM", vti.Vector, vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass, vti.RegClass>; - let Predicates = GetVTypePredicates.Predicates in - defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE", - "V"#vti.ScalarSuffix#"M", - vti.Vector, - vti.Vector, vti.Scalar, vti.Mask, - vti.Log2SEW, vti.LMul, vti.RegClass, - vti.RegClass, vti.ScalarRegClass>; } foreach fvti = AllFloatVectors in { - defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); - let Predicates = GetVTypePredicates.Predicates in - def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru), - (fvti.Vector fvti.RegClass:$rs2), - (fvti.Scalar (fpimm0)), - (fvti.Mask VMV0:$vm), VLOpFrag)), - (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, - (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + let Predicates = GetVTypePredicates.Predicates in { + defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE", + "V"#fvti.ScalarSuffix#"M", + fvti.Vector, + fvti.Vector, fvti.Scalar, fvti.Mask, + fvti.Log2SEW, fvti.LMul, fvti.RegClass, + fvti.RegClass, fvti.ScalarRegClass>; + + defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); + def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru), + (fvti.Vector fvti.RegClass:$rs2), + (fvti.Scalar (fpimm0)), + (fvti.Mask VMV0:$vm), VLOpFrag)), + (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + } } //===----------------------------------------------------------------------===// @@ -7328,13 +7332,12 @@ foreach vti = NoGroupIntegerVectors in { //===----------------------------------------------------------------------===// // 16.3. Vector Slide Instructions //===----------------------------------------------------------------------===// -defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>; -defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>; +defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllVectors, uimm5>; +defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllVectors, uimm5>; + defm : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>; defm : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>; -defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>; -defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>; defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; @@ -7342,19 +7345,14 @@ defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVe // 16.4. Vector Register Gather Instructions //===----------------------------------------------------------------------===// defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", - AllIntegerVectors, uimm5>; + AllVectors, uimm5>; defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", - eew=16, vtilist=AllIntegerVectors>; + eew=16, vtilist=AllVectors>; -defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", - AllFloatVectors, uimm5>; -defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", - eew=16, vtilist=AllFloatVectors>; //===----------------------------------------------------------------------===// // 16.5. Vector Compress Instruction //===----------------------------------------------------------------------===// -defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllVectors>; // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVVLPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index d4c9215e1863a..139ff9277bb91 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1388,7 +1388,7 @@ defm : VPatFPSetCCSDNode_VV_VF_FV; // Floating-point vselects: // 11.15. Vector Integer Merge Instructions // 13.15. Vector Floating-Point Merge Instruction -foreach fvti = AllFloatVectors in { +foreach fvti = AllFloatAndBF16Vectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, @@ -1397,7 +1397,12 @@ foreach fvti = AllFloatVectors in { (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + } +} +foreach fvti = AllFloatVectors in { + defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), fvti.RegClass:$rs2)), @@ -1412,9 +1417,7 @@ foreach fvti = AllFloatVectors in { (fvti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; } -} -foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), (SplatFPOp fvti.ScalarRegClass:$rs1), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td index ff35c1bd558a4..cf904eab1dd39 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -2423,10 +2423,10 @@ foreach vti = AllFloatVectors in { } } -foreach fvti = AllFloatVectors in { - // Floating-point vselects: - // 11.15. Vector Integer Merge Instructions - // 13.15. Vector Floating-Point Merge Instruction +// Floating-point vselects: +// 11.15. Vector Integer Merge Instructions +// 13.15. Vector Floating-Point Merge Instruction +foreach fvti = AllFloatAndBF16Vectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), @@ -2437,7 +2437,12 @@ foreach fvti = AllFloatVectors in { (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + } +} +foreach fvti = AllFloatVectors in { + defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), fvti.RegClass:$rs2, @@ -2457,9 +2462,7 @@ foreach fvti = AllFloatVectors in { fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; } -} -foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), (SplatFPOp fvti.ScalarRegClass:$rs1), @@ -2767,7 +2770,7 @@ foreach vti = NoGroupFloatVectors in { } } -foreach vti = AllFloatVectors in { +foreach vti = AllFloatAndBF16Vectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td index 9835c033aea9c..b683e895c31c0 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td @@ -560,7 +560,7 @@ multiclass VPseudoVNCVT_BF16_S { } multiclass VPatConversionS_BF16 { - foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; let Predicates = [HasVendorXAndesVBFHCvt] in @@ -572,7 +572,7 @@ multiclass VPatConversionS_BF16 { } multiclass VPatConversionBF16_S { - foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; let Predicates = [HasVendorXAndesVBFHCvt] in diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td index 996e08bd0a27d..d8f5d3e09d374 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXCV.td @@ -271,7 +271,7 @@ class CVInstImmBranch funct3, dag outs, dag ins, let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { class CVLoad_ri_inc funct3, string opcodestr> : RVInstI { let Constraints = "$rs1_wb = $rs1"; } @@ -292,7 +292,7 @@ class CVLoad_rr funct7, bits<3> funct3, string opcodestr> let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { class CVStore_ri_inc funct3, string opcodestr> : RVInstS { let Constraints = "$rs1_wb = $rs1"; } @@ -332,7 +332,7 @@ class CVStore_rr funct3, bits<7> funct7, string opcodestr> class CVLoad_ri funct3, string opcodestr> : RVInstI; + (ins GPRMem:$rs1, simm12_lo:$imm12), opcodestr, "$rd, ${imm12}(${rs1})">; //===----------------------------------------------------------------------===// // Instructions @@ -673,8 +673,8 @@ class CVLdrrPat (Inst CVrr:$regreg)>; class CVStriPat - : Pat<(StoreOp (XLenVT GPR:$rs2), GPR:$rs1, simm12:$imm12), - (Inst GPR:$rs2, GPR:$rs1, simm12:$imm12)>; + : Pat<(StoreOp (XLenVT GPR:$rs2), GPR:$rs1, simm12_lo:$imm12), + (Inst GPR:$rs2, GPR:$rs1, simm12_lo:$imm12)>; class CVStrriPat : Pat<(StoreOp (XLenVT GPR:$rs2), GPR:$rs1, GPR:$rs3), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td index b546339ce99e2..557d8736eede3 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -770,7 +770,7 @@ multiclass VPatVQMACCQOQ : VPatVMACC; multiclass VPatVFWMACC - : VPatVMACC; defset list VFNRCLIPInfoPairs = { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td index 808d9117a1746..efdbd1298aec6 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td @@ -524,7 +524,7 @@ class QCIRVInstRI funct1, DAGOperand InTyImm11, let Inst{30-20} = imm11; } -let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCommutable = 1 in class QCISELECTIICC funct3, string opcodestr> : RVInstR4<0b00, funct3, OPC_CUSTOM_2, (outs GPRNoX0:$rd_wb), (ins GPRNoX0:$rd, GPRNoX0:$rs1, simm5:$simm1, simm5:$simm2), @@ -537,7 +537,7 @@ class QCISELECTIICC funct3, string opcodestr> let rs2 = simm1; } -let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCommutable = 1 in class QCISELECTICC funct3, string opcodestr> : RVInstR4<0b01, funct3, OPC_CUSTOM_2, (outs GPRNoX0:$rd_wb), (ins GPRNoX0:$rd, GPRNoX0:$rs1, GPRNoX0:$rs2, simm5:$simm2), @@ -604,7 +604,7 @@ class QCILICC funct3, bits<2> funct2, DAGOperand InTyRs2, string opcodes let Inst{31-25} = {simm, funct2}; } -let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCommutable = 1 in class QCIMVCC funct3, string opcodestr> : RVInstR4<0b00, funct3, OPC_CUSTOM_2, (outs GPRNoX0:$rd_wb), (ins GPRNoX0:$rd, GPRNoX0:$rs1, GPRNoX0:$rs2, GPRNoX0:$rs3), @@ -612,7 +612,7 @@ class QCIMVCC funct3, string opcodestr> let Constraints = "$rd = $rd_wb"; } -let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in +let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCommutable = 1 in class QCIMVCCI funct3, string opcodestr, DAGOperand immType> : RVInstR4<0b10, funct3, OPC_CUSTOM_2, (outs GPRNoX0:$rd_wb), (ins GPRNoX0:$rd, GPRNoX0:$rs1, immType:$imm, GPRNoX0:$rs3), @@ -953,7 +953,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { } def QC_MULIADD : RVInstI<0b110, OPC_CUSTOM_0, (outs GPRNoX0:$rd_wb), - (ins GPRNoX0:$rd, GPRNoX0:$rs1, simm12:$imm12), + (ins GPRNoX0:$rd, GPRNoX0:$rs1, simm12_lo:$imm12), "qc.muliadd", "$rd, $rs1, $imm12"> { let Constraints = "$rd = $rd_wb"; } @@ -1350,6 +1350,10 @@ class QCIMVCCIPat : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), InTyImm:$imm, Cond, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), (Inst GPRNoX0:$rd, GPRNoX0:$rs1, InTyImm:$imm, GPRNoX0:$rs3)>; +class QCIMVCCIZeroPat + : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), Cond, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), + (Inst GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>; + class QCISELECTCCIPat : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rd), simm5:$imm, Cond, (i32 GPRNoX0:$rs2), (i32 GPRNoX0:$rs3))), (Inst GPRNoX0:$rd, simm5:$imm, GPRNoX0:$rs2, GPRNoX0:$rs3)>; @@ -1411,8 +1415,8 @@ class SelectQCbi (IntCCtoRISCVCC $cc), GPRNoX0:$truev, GPRNoX0:$falsev)>; let Predicates = [HasVendorXqciac, IsRV32] in { -def : Pat<(i32 (add GPRNoX0:$rd, (mul GPRNoX0:$rs1, simm12:$imm12))), - (QC_MULIADD GPRNoX0:$rd, GPRNoX0:$rs1, simm12:$imm12)>; +def : Pat<(i32 (add GPRNoX0:$rd, (mul GPRNoX0:$rs1, simm12_lo:$imm12))), + (QC_MULIADD GPRNoX0:$rd, GPRNoX0:$rs1, simm12_lo:$imm12)>; def : Pat<(i32 (add_like_non_imm12 (shl GPRNoX0:$rs1, uimm5gt3:$imm), GPRNoX0:$rs2)), (QC_SHLADD GPRNoX0:$rs1, GPRNoX0:$rs2, uimm5gt3:$imm)>; def : Pat<(i32 (riscv_shl_add GPRNoX0:$rs1, uimm5gt3:$imm, GPRNoX0:$rs2)), @@ -1538,14 +1542,7 @@ def: Pat<(i32 (ctlz (not (i32 GPR:$rs1)))), (QC_CLO GPR:$rs1)>; let Predicates = [HasVendorXqciint, IsRV32] in def : Pat<(riscv_mileaveret_glue), (QC_C_MILEAVERET)>; -let Predicates = [HasVendorXqcicm, IsRV32] in { -// (SELECT X, Y, Z) is canonicalised to `(riscv_selectcc x, 0, NE, y, z)`. -// This exists to prioritise over the `Select_GPR_Using_CC_GPR` pattern. -def : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), SETNE, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), - (QC_MVNEI GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>; -def : Pat<(i32 (riscv_selectcc (i32 GPRNoX0:$rs1), (i32 0), SETEQ, (i32 GPRNoX0:$rs3), (i32 GPRNoX0:$rd))), - (QC_MVEQI GPRNoX0:$rd, GPRNoX0:$rs1, 0, GPRNoX0:$rs3)>; - +let Predicates = [HasVendorXqcicm, NoShortForwardBranchOpt, IsRV32] in { def : QCIMVCCPat; def : QCIMVCCPat; def : QCIMVCCPat; @@ -1553,12 +1550,24 @@ def : QCIMVCCPat; def : QCIMVCCPat; def : QCIMVCCPat; -def : QCIMVCCIPat; -def : QCIMVCCIPat; -def : QCIMVCCIPat; -def : QCIMVCCIPat; -def : QCIMVCCIPat; -def : QCIMVCCIPat; +// These exist to prioritise over the `Select_GPR_Using_CC_GPR` pattern for X0. +def : QCIMVCCIZeroPat; +def : QCIMVCCIZeroPat; +def : QCIMVCCIZeroPat; +def : QCIMVCCIZeroPat; +def : QCIMVCCIZeroPat; +def : QCIMVCCIZeroPat; +} + +let Predicates = [HasVendorXqcicm, IsRV32] in { +// These all use *imm5nonzero because we want to use PseudoCCMOVGPR with X0 when SFB is enabled. +// When SFB is not enabled, the `QCIMVCCIZeroPat`s above will be used if RHS=0. +def : QCIMVCCIPat; +def : QCIMVCCIPat; +def : QCIMVCCIPat; +def : QCIMVCCIPat; +def : QCIMVCCIPat; +def : QCIMVCCIPat; } let Predicates = [HasVendorXqcicli, IsRV32] in { @@ -1667,27 +1676,27 @@ def : CompressPat<(QC_E_LW GPRC:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_LW GPRC:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm)>; def : CompressPat<(QC_E_LW GPRNoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm), (C_LWSP GPRNoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm)>; -def : CompressPat<(QC_E_LB GPR:$rd, GPRMem:$rs1, simm12:$imm12), - (LB GPR:$rd, GPRMem:$rs1, simm12:$imm12)>; -def : CompressPat<(QC_E_LBU GPR:$rd, GPRMem:$rs1, simm12:$imm12), - (LBU GPR:$rd, GPRMem:$rs1, simm12:$imm12)>; -def : CompressPat<(QC_E_LH GPR:$rd, GPRMem:$rs1, simm12:$imm12), - (LH GPR:$rd, GPRMem:$rs1, simm12:$imm12)>; -def : CompressPat<(QC_E_LHU GPR:$rd, GPRMem:$rs1, simm12:$imm12), - (LHU GPR:$rd, GPRMem:$rs1, simm12:$imm12)>; -def : CompressPat<(QC_E_LW GPR:$rd, GPRMem:$rs1, simm12:$imm12), - (LW GPR:$rd, GPRMem:$rs1, simm12:$imm12)>; +def : CompressPat<(QC_E_LB GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12), + (LB GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12)>; +def : CompressPat<(QC_E_LBU GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12), + (LBU GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12)>; +def : CompressPat<(QC_E_LH GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12), + (LH GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12)>; +def : CompressPat<(QC_E_LHU GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12), + (LHU GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12)>; +def : CompressPat<(QC_E_LW GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12), + (LW GPR:$rd, GPRMem:$rs1, simm12_lo:$imm12)>; def : CompressPat<(QC_E_SW GPRC:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_SW GPRC:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm)>; def : CompressPat<(QC_E_SW GPR:$rs2, SPMem:$rs1, uimm8_lsb00:$imm), (C_SWSP GPR:$rs2, SPMem:$rs1, uimm8_lsb00:$imm)>; -def : CompressPat<(QC_E_SB GPR:$rs2, GPRMem:$rs1, simm12:$imm12), - (SB GPR:$rs2, GPRMem:$rs1, simm12:$imm12)>; -def : CompressPat<(QC_E_SH GPR:$rs2, GPRMem:$rs1, simm12:$imm12), - (SH GPR:$rs2, GPRMem:$rs1, simm12:$imm12)>; -def : CompressPat<(QC_E_SW GPR:$rs2, GPRMem:$rs1, simm12:$imm12), - (SW GPR:$rs2, GPRMem:$rs1, simm12:$imm12)>; +def : CompressPat<(QC_E_SB GPR:$rs2, GPRMem:$rs1, simm12_lo:$imm12), + (SB GPR:$rs2, GPRMem:$rs1, simm12_lo:$imm12)>; +def : CompressPat<(QC_E_SH GPR:$rs2, GPRMem:$rs1, simm12_lo:$imm12), + (SH GPR:$rs2, GPRMem:$rs1, simm12_lo:$imm12)>; +def : CompressPat<(QC_E_SW GPR:$rs2, GPRMem:$rs1, simm12_lo:$imm12), + (SW GPR:$rs2, GPRMem:$rs1, simm12_lo:$imm12)>; } // isCompressOnly = true, Predicates = [HasVendorXqcilo, IsRV32] let Predicates = [HasVendorXqcicm, IsRV32] in { @@ -1752,23 +1761,23 @@ def : CompressPat<(QC_E_ADDAI X2, simm10_lsb0000nonzero:$imm), def : CompressPat<(QC_E_ADDI X2, X2, simm10_lsb0000nonzero:$imm), (C_ADDI16SP X2, simm10_lsb0000nonzero:$imm)>; -def : CompressPat<(QC_E_ADDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm), - (ADDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm)>; -def : CompressPat<(QC_E_ANDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm), - (ANDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm)>; -def : CompressPat<(QC_E_ORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm), - (ORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm)>; -def : CompressPat<(QC_E_XORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm), - (XORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12:$imm)>; - -def : CompressPat<(QC_E_ADDAI GPRNoX0:$rd, simm12:$imm), - (ADDI GPRNoX0:$rd, GPRNoX0:$rd, simm12:$imm)>; -def : CompressPat<(QC_E_ANDAI GPRNoX0:$rd, simm12:$imm), - (ANDI GPRNoX0:$rd, GPRNoX0:$rd, simm12:$imm)>; -def : CompressPat<(QC_E_ORAI GPRNoX0:$rd, simm12:$imm), - (ORI GPRNoX0:$rd, GPRNoX0:$rd, simm12:$imm)>; -def : CompressPat<(QC_E_XORAI GPRNoX0:$rd, simm12:$imm), - (XORI GPRNoX0:$rd, GPRNoX0:$rd, simm12:$imm)>; +def : CompressPat<(QC_E_ADDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm), + (ADDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm)>; +def : CompressPat<(QC_E_ANDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm), + (ANDI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm)>; +def : CompressPat<(QC_E_ORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm), + (ORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm)>; +def : CompressPat<(QC_E_XORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm), + (XORI GPRNoX0:$rs1, GPRNoX0:$rs2, simm12_lo:$imm)>; + +def : CompressPat<(QC_E_ADDAI GPRNoX0:$rd, simm12_lo:$imm), + (ADDI GPRNoX0:$rd, GPRNoX0:$rd, simm12_lo:$imm)>; +def : CompressPat<(QC_E_ANDAI GPRNoX0:$rd, simm12_lo:$imm), + (ANDI GPRNoX0:$rd, GPRNoX0:$rd, simm12_lo:$imm)>; +def : CompressPat<(QC_E_ORAI GPRNoX0:$rd, simm12_lo:$imm), + (ORI GPRNoX0:$rd, GPRNoX0:$rd, simm12_lo:$imm)>; +def : CompressPat<(QC_E_XORAI GPRNoX0:$rd, simm12_lo:$imm), + (XORI GPRNoX0:$rd, GPRNoX0:$rd, simm12_lo:$imm)>; } // let isCompressOnly = true, Predicates = [HasVendorXqcilia, IsRV32] let isCompressOnly = true, Predicates = [HasVendorXqciac, IsRV32] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td index 5e013b496c6b1..1674c957b6579 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td @@ -63,13 +63,14 @@ defm SD : SRL_r_aq_rl<0b011, "sd">; //===----------------------------------------------------------------------===// class PatLAQ - : Pat<(vt (OpNode (vt GPRMemZeroOffset:$rs1))), (Inst GPRMemZeroOffset:$rs1)>; + : Pat<(vt (OpNode (XLenVT GPRMemZeroOffset:$rs1))), + (Inst GPRMemZeroOffset:$rs1)>; // n.b. this switches order of arguments // to deal with the fact that SRL has addr, data // while atomic_store has data, addr class PatSRL - : Pat<(OpNode (vt GPR:$rs2), (vt GPRMemZeroOffset:$rs1)), + : Pat<(OpNode (vt GPR:$rs2), (XLenVT GPRMemZeroOffset:$rs1)), (Inst GPRMemZeroOffset:$rs1, GPR:$rs2)>; @@ -97,16 +98,15 @@ let Predicates = [HasStdExtZalasr] in { let Predicates = [HasStdExtZalasr, IsRV32] in { def : PatLAQ, LW_AQ>; def : PatLAQ, LW_AQ>; - -} // Predicates = [HasStdExtZalasr, IsRV64] +} // Predicates = [HasStdExtZalasr, IsRV32] let Predicates = [HasStdExtZalasr, IsRV64] in { - def : PatLAQ, LW_AQ>; - def : PatLAQ, LW_AQ>; + def : PatLAQ, LW_AQ, i64>; + def : PatLAQ, LW_AQ, i64>; - def : PatLAQ, LD_AQ>; - def : PatLAQ, LD_AQ>; + def : PatLAQ, LD_AQ, i64>; + def : PatLAQ, LD_AQ, i64>; - def : PatSRL, SD_RL>; - def : PatSRL, SD_RL>; + def : PatSRL, SD_RL, i64>; + def : PatSRL, SD_RL, i64>; } // Predicates = [HasStdExtZalasr, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td index 6d8672b72a12d..0be9eab6870ec 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td @@ -53,7 +53,7 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w", "PseudoVFNCVTBF16_F_F", isSEWAware=1>; - foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; let Predicates = [HasVInstructionsBF16Minimal] in @@ -91,9 +91,9 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { let Predicates = [HasStdExtZvfbfwma] in { defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmaccbf16", "PseudoVFWMACCBF16", - AllWidenableBFloatToFloatVectors, isSEWAware=1>; + AllWidenableBF16ToFloatVectors, isSEWAware=1>; defm : VPatWidenFPMulAccVL_VV_VF_RM; + AllWidenableBF16ToFloatVectors>; defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16", - AllWidenableBFloatToFloatVectors>; + AllWidenableBF16ToFloatVectors>; } diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index 95f8a8789fa6c..17a794867be9e 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -347,16 +347,58 @@ defvar SiFiveP400TuneFeatures = [TuneNoDefaultUnroll, TunePostRAScheduler]; def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model, - !listconcat(RVA22U64Features, - [FeatureStdExtZifencei, + [Feature64Bit, + FeatureStdExtI, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtD, + FeatureStdExtC, + FeatureStdExtZicsr, + FeatureStdExtZiccif, + FeatureStdExtZiccrse, + FeatureStdExtZiccamoa, + FeatureStdExtZicclsm, + FeatureStdExtZa64rs, + FeatureStdExtZihpm, + FeatureStdExtZihintpause, + FeatureStdExtB, + FeatureStdExtZic64b, + FeatureStdExtZicbom, + FeatureStdExtZicbop, + FeatureStdExtZicboz, + FeatureStdExtZfhmin, + FeatureStdExtZkt, + FeatureStdExtZifencei, FeatureStdExtZihintntl, FeatureUnalignedScalarMem, - FeatureUnalignedVectorMem]), + FeatureUnalignedVectorMem], SiFiveP400TuneFeatures>; def SIFIVE_P470 : RISCVProcessorModel<"sifive-p470", SiFiveP400Model, - !listconcat(RVA22U64Features, - [FeatureStdExtV, + [Feature64Bit, + FeatureStdExtI, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtD, + FeatureStdExtC, + FeatureStdExtZicsr, + FeatureStdExtZiccif, + FeatureStdExtZiccrse, + FeatureStdExtZiccamoa, + FeatureStdExtZicclsm, + FeatureStdExtZa64rs, + FeatureStdExtZihpm, + FeatureStdExtZihintpause, + FeatureStdExtB, + FeatureStdExtZic64b, + FeatureStdExtZicbom, + FeatureStdExtZicbop, + FeatureStdExtZicboz, + FeatureStdExtZfhmin, + FeatureStdExtZkt, + FeatureStdExtV, FeatureStdExtZifencei, FeatureStdExtZihintntl, FeatureStdExtZvl128b, @@ -368,7 +410,7 @@ def SIFIVE_P470 : RISCVProcessorModel<"sifive-p470", SiFiveP400Model, FeatureVendorXSiFivecdiscarddlone, FeatureVendorXSiFivecflushdlone, FeatureUnalignedScalarMem, - FeatureUnalignedVectorMem]), + FeatureUnalignedVectorMem], !listconcat(SiFiveP400TuneFeatures, [TuneNoSinkSplatOperands, TuneVXRMPipelineFlush])>; @@ -397,8 +439,29 @@ def SIFIVE_P550 : RISCVProcessorModel<"sifive-p550", SiFiveP500Model, } def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model, - !listconcat(RVA22U64Features, - [FeatureStdExtV, + [Feature64Bit, + FeatureStdExtI, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtD, + FeatureStdExtC, + FeatureStdExtZicsr, + FeatureStdExtZiccif, + FeatureStdExtZiccrse, + FeatureStdExtZiccamoa, + FeatureStdExtZicclsm, + FeatureStdExtZa64rs, + FeatureStdExtZihpm, + FeatureStdExtZihintpause, + FeatureStdExtB, + FeatureStdExtZic64b, + FeatureStdExtZicbom, + FeatureStdExtZicbop, + FeatureStdExtZicboz, + FeatureStdExtZfhmin, + FeatureStdExtZkt, + FeatureStdExtV, FeatureStdExtZifencei, FeatureStdExtZihintntl, FeatureStdExtZvl128b, @@ -408,7 +471,7 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", SiFiveP600Model, FeatureStdExtZvksc, FeatureStdExtZvksg, FeatureUnalignedScalarMem, - FeatureUnalignedVectorMem]), + FeatureUnalignedVectorMem], [TuneNoDefaultUnroll, TuneConditionalCompressedMoveFusion, TuneLUIADDIFusion, diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td index d81718c2361de..3f2e7dbd07a67 100644 --- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td +++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td @@ -169,6 +169,64 @@ class SiFive7GetOrderedReductionCycles { int c = !mul(6, VLUpperBound); } +class isSingleDLEN { + bit c = !or(!eq(mx, "MF2"), !or(!eq(mx, "MF4"), !eq(mx, "MF8"))); +} + +class SiFive7GetCyclesVRGatherVV { + // if (hasFastGather && isSingleDLEN(mx)) + // c = 1; + // else if (hasFastGather && (log2(SEW/8) + log2(LMUL) <= log2(DLEN / 32)) + // c = LMUL * 2 * ceil(vl * SEW / DLEN); + // else + // c = vl; + + defvar y = !logtwo(!div(sew, 8)); + defvar x = !cond( + !eq(mx, "M1") : y, + !eq(mx, "M2") : !add(y, 1), + !eq(mx, "M4") : !add(y, 2), + !eq(mx, "M8") : !add(y, 3), + // Give isSingleDLEN(mx) cases a garbage value to avoid build failures, + // even though x will go unused. + true : 1 + ); + // LMUL * 2 * ceil(vl * SEW / DLEN) = LMUL * 2 * ceil(2 * LMUL) + defvar z = !cond( + !eq(mx, "M1") : 4, + !eq(mx, "M2") : 16, + !eq(mx, "M4") : 64, + !eq(mx, "M8") : 256, + // Give isSingleDLEN(mx) cases a garbage value to avoid build failures, + // even though z will go unused. + true : 1 + ); + defvar VLUpperBound = SiFive7GetCyclesOnePerElement.c; + bit IsSingleDLEN = isSingleDLEN.c; + + int c = !cond( + !and(hasFastGather, IsSingleDLEN) : 1, + !and(hasFastGather, !le(x, !logtwo(!div(VLEN, 64)))) : z, + true: VLUpperBound + ); +} + +class SiFive7GetCyclesVCompress { + + // if (hasFastGather && isSingleDLEN(mx)) + // c = 1 + // else + // c = vl + defvar VLUpperBound = SiFive7GetCyclesOnePerElement.c; + bit IsSingleDLEN = isSingleDLEN.c; + + int c = !if(!and(hasFastGather, IsSingleDLEN), + 1, + VLUpperBound); +} + class SiFive7GetSiFiveVFNRClipCycles { int latency = !cond( !eq(mx, "MF8"): 7, @@ -259,7 +317,8 @@ multiclass SiFive7WriteResBase { + bit isFP64Throttled = false, + bit hasFastGather = false> { // Branching let Latency = 3 in { @@ -929,16 +988,16 @@ multiclass SiFive7WriteResBase.c; defvar IsWorstCase = SiFive7IsWorstCaseMX.c; let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in { defm : LMULWriteResMX<"WriteVMALUV", [VCQ, VA1], mx, IsWorstCase>; - defm : LMULWriteResMX<"WriteVMPopV", [VCQ, VA1], mx, IsWorstCase>; - defm : LMULWriteResMX<"WriteVMFFSV", [VCQ, VA1], mx, IsWorstCase>; defm : LMULWriteResMX<"WriteVMSFSV", [VCQ, VA1], mx, IsWorstCase>; } } + // Simple mask logical used in series foreach mx = SchedMxList in { defvar Cycles = SiFive7GetCyclesDefault.c; defvar IsWorstCase = SiFive7IsWorstCaseMX.c; @@ -947,13 +1006,23 @@ multiclass SiFive7WriteResBase; } } + // Mask reduction + foreach mx = SchedMxList in { + defvar IsWorstCase = SiFive7IsWorstCaseMX.c; + let Latency = 11, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 3)] in { + defm "" : LMULWriteResMX<"WriteVMFFSV", [VCQ, VA1], mx, IsWorstCase>; + defm "" : LMULWriteResMX<"WriteVMPopV", [VCQ, VA1], mx, IsWorstCase>; + } + } // 16. Vector Permutation Instructions + let Latency = 11, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 3)] in { + def : WriteRes; + def : WriteRes; + } let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, 1)] in { def : WriteRes; - def : WriteRes; def : WriteRes; - def : WriteRes; } foreach mx = SchedMxList in { defvar Cycles = SiFive7GetCyclesDefault.c; @@ -966,13 +1035,33 @@ multiclass SiFive7WriteResBase.val in { - defvar Cycles = SiFive7GetCyclesOnePerElement.c; defvar IsWorstCase = SiFive7IsWorstCaseMXSEW.c; - let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in { - defm : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [VCQ, VA1], mx, sew, IsWorstCase>; - defm : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [VCQ, VA1], mx, sew, IsWorstCase>; - defm : LMULSEWWriteResMXSEW<"WriteVCompressV", [VCQ, VA1], mx, sew, IsWorstCase>; - } + defvar IsSingleDLEN = isSingleDLEN.c; + + defvar GatherVVCycles = + SiFive7GetCyclesVRGatherVV.c; + // 7 + DLEN/ SEW + defvar SlowGatherLat = !add(7, !div(!div(VLEN, 2), sew)); + defvar GatherVVLat = !if(hasFastGather, + !add(3, GatherVVCycles), SlowGatherLat); + + let Latency = GatherVVLat, AcquireAtCycles = [0, 1], + ReleaseAtCycles = [1, !add(5, GatherVVCycles)] in + defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [VCQ, VA1], mx, sew, IsWorstCase>; + + // VRGatherEI16VV is not improved by fastGather. + defvar GatherEI16VVCycles = SiFive7GetCyclesOnePerElement.c; + let Latency = SlowGatherLat, AcquireAtCycles = [0, 1], + ReleaseAtCycles = [1, !add(5, GatherEI16VVCycles)] in + defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [VCQ, VA1], mx, sew, IsWorstCase>; + + defvar CompressCycles = SiFive7GetCyclesVCompress.c; + defvar CompressLat = !if(!and(hasFastGather, IsSingleDLEN), + 4, + !add(7, CompressCycles)); // 7 + VL + let Latency = CompressLat, AcquireAtCycles = [0, 1], + ReleaseAtCycles = [1, !add(8, CompressCycles)] in + defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [VCQ, VA1], mx, sew, IsWorstCase>; } } @@ -1398,7 +1487,8 @@ multiclass SiFive7ReadAdvance { /// eventually be supplied by different SchedMachineModels. multiclass SiFive7SchedResources { + bit isFP64Throttled, + bit hasFastGather> { defm SiFive7 : SiFive7ProcResources; // Pull out defs from SiFive7ProcResources so we can refer to them by name. @@ -1425,7 +1515,8 @@ multiclass SiFive7SchedResources; + SiFive7VCQ, fpLatencies, isFP64Throttled, + hasFastGather>; //===----------------------------------------------------------------------===// // Bypass and advance @@ -1458,6 +1549,7 @@ class SiFive7SchedMachineModel : SchedMachineModel { SiFive7FPLatencies FPLatencies; bit IsFP64Throttled = false; + bit HasFastGather = false; string Name = !subst("Model", "", !subst("SiFive7", "", NAME)); } @@ -1484,6 +1576,7 @@ def SiFive7VLEN1024X300Model : SiFive7SchedMachineModel<1024> { let HasExtraVALU = true; let FPLatencies = SiFive7LowFPLatencies; let IsFP64Throttled = true; + let HasFastGather = true; } /// Binding models to their scheduling resources. @@ -1491,7 +1584,8 @@ foreach model = [SiFive7VLEN512Model, SiFive7VLEN1024X300Model] in { let SchedModel = model in defm model.Name : SiFive7SchedResources; + model.IsFP64Throttled, + model.HasFastGather>; } // Some model name aliases. diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 72a35ee2bc309..ee25f6918de8b 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -167,6 +167,42 @@ static bool canUseShiftPair(Instruction *Inst, const APInt &Imm) { return false; } +// If this is i64 AND is part of (X & -(1 << C1) & 0xffffffff) == C2 << C1), +// DAGCombiner can convert this to (sraiw X, C1) == sext(C2) for RV64. On RV32, +// the type will be split so only the lower 32 bits need to be compared using +// (srai/srli X, C) == C2. +static bool canUseShiftCmp(Instruction *Inst, const APInt &Imm) { + if (!Inst->hasOneUse()) + return false; + + // Look for equality comparison. + auto *Cmp = dyn_cast(*Inst->user_begin()); + if (!Cmp || !Cmp->isEquality()) + return false; + + // Right hand side of comparison should be a constant. + auto *C = dyn_cast(Cmp->getOperand(1)); + if (!C) + return false; + + uint64_t Mask = Imm.getZExtValue(); + + // Mask should be of the form -(1 << C) in the lower 32 bits. + if (!isUInt<32>(Mask) || !isPowerOf2_32(-uint32_t(Mask))) + return false; + + // Comparison constant should be a subset of Mask. + uint64_t CmpC = C->getZExtValue(); + if ((CmpC & Mask) != CmpC) + return false; + + // We'll need to sign extend the comparison constant and shift it right. Make + // sure the new constant can use addi/xori+seqz/snez. + unsigned ShiftBits = llvm::countr_zero(Mask); + int64_t NewCmpC = SignExtend64<32>(CmpC) >> ShiftBits; + return NewCmpC >= -2048 && NewCmpC <= 2048; +} + InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, @@ -224,6 +260,9 @@ InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, if (Inst && Idx == 1 && Imm.getBitWidth() <= ST->getXLen() && canUseShiftPair(Inst, Imm)) return TTI::TCC_Free; + if (Inst && Idx == 1 && Imm.getBitWidth() == 64 && + canUseShiftCmp(Inst, Imm)) + return TTI::TCC_Free; Takes12BitImm = true; break; case Instruction::Add: @@ -2774,6 +2813,44 @@ bool RISCVTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, Alignment, Mask, EVL, Stride); return true; } + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei_mask: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsuxei: { + // Intrinsic interface (only listed ordered version): + // riscv_vloxei(merge, ptr, index, vl) + // riscv_vloxei_mask(merge, ptr, index, mask, vl, policy) + // riscv_vsoxei(val, ptr, index, vl) + // riscv_vsoxei_mask(val, ptr, index, mask, vl, policy) + bool IsWrite = Inst->getType()->isVoidTy(); + Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType(); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + Value *Mask; + if (HasMask) { + Mask = Inst->getArgOperand(VLIndex - 1); + } else { + // Mask cannot be nullptr here: vector GEP produces , + // and casting that to scalar i64 triggers a vector/scalar mismatch + // assertion in CreatePointerCast. Use an all-true mask so ASan lowers it + // via extractelement instead. + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Mask = ConstantInt::getTrue(MaskType); + } + Value *EVL = Inst->getArgOperand(VLIndex); + Value *OffsetOp = Inst->getArgOperand(PtrOperandNo + 1); + Info.InterestingOperands.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, + Align(1), Mask, EVL, + /* Stride */ nullptr, OffsetOp); + return true; + } } return false; } @@ -3062,8 +3139,8 @@ bool RISCVTTIImpl::isProfitableToSinkOperands( bool IsVPSplat = match(Op, m_Intrinsic( m_Value(), m_Value(), m_Value())); if (!IsVPSplat && - !match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), - m_Undef(), m_ZeroMask()))) + !match(Op, m_Shuffle(m_InsertElt(m_Value(), m_Value(), m_ZeroInt()), + m_Value(), m_ZeroMask()))) continue; // Don't sink i1 splats. diff --git a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp index c2a6e51913a0a..b765fecbc8de3 100644 --- a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp @@ -81,6 +81,7 @@ class SPIRVAsmPrinter : public AsmPrinter { void outputExecutionMode(const Module &M); void outputAnnotations(const Module &M); void outputModuleSections(); + void outputFPFastMathDefaultInfo(); bool isHidden() { return MF->getFunction() .getFnAttribute(SPIRV_BACKEND_SERVICE_FUN_NAME) @@ -498,11 +499,27 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) { NamedMDNode *Node = M.getNamedMetadata("spirv.ExecutionMode"); if (Node) { for (unsigned i = 0; i < Node->getNumOperands(); i++) { + // If SPV_KHR_float_controls2 is enabled and we find any of + // FPFastMathDefault, ContractionOff or SignedZeroInfNanPreserve execution + // modes, skip it, it'll be done somewhere else. + if (ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) { + const auto EM = + cast( + cast((Node->getOperand(i))->getOperand(1)) + ->getValue()) + ->getZExtValue(); + if (EM == SPIRV::ExecutionMode::FPFastMathDefault || + EM == SPIRV::ExecutionMode::ContractionOff || + EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) + continue; + } + MCInst Inst; Inst.setOpcode(SPIRV::OpExecutionMode); addOpsFromMDNode(cast(Node->getOperand(i)), Inst, MAI); outputMCInst(Inst); } + outputFPFastMathDefaultInfo(); } for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) { const Function &F = *FI; @@ -552,12 +569,84 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) { } if (ST->isKernel() && !M.getNamedMetadata("spirv.ExecutionMode") && !M.getNamedMetadata("opencl.enable.FP_CONTRACT")) { - MCInst Inst; - Inst.setOpcode(SPIRV::OpExecutionMode); - Inst.addOperand(MCOperand::createReg(FReg)); - unsigned EM = static_cast(SPIRV::ExecutionMode::ContractionOff); - Inst.addOperand(MCOperand::createImm(EM)); - outputMCInst(Inst); + if (ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) { + // When SPV_KHR_float_controls2 is enabled, ContractionOff is + // deprecated. We need to use FPFastMathDefault with the appropriate + // flags instead. Since FPFastMathDefault takes a target type, we need + // to emit it for each floating-point type that exists in the module + // to match the effect of ContractionOff. As of now, there are 3 FP + // types: fp16, fp32 and fp64. + + // We only end up here because there is no "spirv.ExecutionMode" + // metadata, so that means no FPFastMathDefault. Therefore, we only + // need to make sure AllowContract is set to 0, as the rest of flags. + // We still need to emit the OpExecutionMode instruction, otherwise + // it's up to the client API to define the flags. Therefore, we need + // to find the constant with 0 value. + + // Collect the SPIRVTypes for fp16, fp32, and fp64 and the constant of + // type int32 with 0 value to represent the FP Fast Math Mode. + std::vector SPIRVFloatTypes; + const MachineInstr *ConstZero = nullptr; + for (const MachineInstr *MI : + MAI->getMSInstrs(SPIRV::MB_TypeConstVars)) { + // Skip if the instruction is not OpTypeFloat or OpConstant. + unsigned OpCode = MI->getOpcode(); + if (OpCode != SPIRV::OpTypeFloat && OpCode != SPIRV::OpConstantNull) + continue; + + // Collect the SPIRV type if it's a float. + if (OpCode == SPIRV::OpTypeFloat) { + // Skip if the target type is not fp16, fp32, fp64. + const unsigned OpTypeFloatSize = MI->getOperand(1).getImm(); + if (OpTypeFloatSize != 16 && OpTypeFloatSize != 32 && + OpTypeFloatSize != 64) { + continue; + } + SPIRVFloatTypes.push_back(MI); + } else { + // Check if the constant is int32, if not skip it. + const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); + MachineInstr *TypeMI = MRI.getVRegDef(MI->getOperand(1).getReg()); + if (!TypeMI || TypeMI->getOperand(1).getImm() != 32) + continue; + + ConstZero = MI; + } + } + + // When SPV_KHR_float_controls2 is enabled, ContractionOff is + // deprecated. We need to use FPFastMathDefault with the appropriate + // flags instead. Since FPFastMathDefault takes a target type, we need + // to emit it for each floating-point type that exists in the module + // to match the effect of ContractionOff. As of now, there are 3 FP + // types: fp16, fp32 and fp64. + for (const MachineInstr *MI : SPIRVFloatTypes) { + MCInst Inst; + Inst.setOpcode(SPIRV::OpExecutionModeId); + Inst.addOperand(MCOperand::createReg(FReg)); + unsigned EM = + static_cast(SPIRV::ExecutionMode::FPFastMathDefault); + Inst.addOperand(MCOperand::createImm(EM)); + const MachineFunction *MF = MI->getMF(); + MCRegister TypeReg = + MAI->getRegisterAlias(MF, MI->getOperand(0).getReg()); + Inst.addOperand(MCOperand::createReg(TypeReg)); + assert(ConstZero && "There should be a constant zero."); + MCRegister ConstReg = MAI->getRegisterAlias( + ConstZero->getMF(), ConstZero->getOperand(0).getReg()); + Inst.addOperand(MCOperand::createReg(ConstReg)); + outputMCInst(Inst); + } + } else { + MCInst Inst; + Inst.setOpcode(SPIRV::OpExecutionMode); + Inst.addOperand(MCOperand::createReg(FReg)); + unsigned EM = + static_cast(SPIRV::ExecutionMode::ContractionOff); + Inst.addOperand(MCOperand::createImm(EM)); + outputMCInst(Inst); + } } } } @@ -606,6 +695,101 @@ void SPIRVAsmPrinter::outputAnnotations(const Module &M) { } } +void SPIRVAsmPrinter::outputFPFastMathDefaultInfo() { + // Collect the SPIRVTypes that are OpTypeFloat and the constants of type + // int32, that might be used as FP Fast Math Mode. + std::vector SPIRVFloatTypes; + // Hashtable to associate immediate values with the constant holding them. + std::unordered_map ConstMap; + for (const MachineInstr *MI : MAI->getMSInstrs(SPIRV::MB_TypeConstVars)) { + // Skip if the instruction is not OpTypeFloat or OpConstant. + unsigned OpCode = MI->getOpcode(); + if (OpCode != SPIRV::OpTypeFloat && OpCode != SPIRV::OpConstantI && + OpCode != SPIRV::OpConstantNull) + continue; + + // Collect the SPIRV type if it's a float. + if (OpCode == SPIRV::OpTypeFloat) { + SPIRVFloatTypes.push_back(MI); + } else { + // Check if the constant is int32, if not skip it. + const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); + MachineInstr *TypeMI = MRI.getVRegDef(MI->getOperand(1).getReg()); + if (!TypeMI || TypeMI->getOpcode() != SPIRV::OpTypeInt || + TypeMI->getOperand(1).getImm() != 32) + continue; + + if (OpCode == SPIRV::OpConstantI) + ConstMap[MI->getOperand(2).getImm()] = MI; + else + ConstMap[0] = MI; + } + } + + for (const auto &[Func, FPFastMathDefaultInfoVec] : + MAI->FPFastMathDefaultInfoMap) { + if (FPFastMathDefaultInfoVec.empty()) + continue; + + for (const MachineInstr *MI : SPIRVFloatTypes) { + unsigned OpTypeFloatSize = MI->getOperand(1).getImm(); + unsigned Index = SPIRV::FPFastMathDefaultInfoVector:: + computeFPFastMathDefaultInfoVecIndex(OpTypeFloatSize); + assert(Index < FPFastMathDefaultInfoVec.size() && + "Index out of bounds for FPFastMathDefaultInfoVec"); + const auto &FPFastMathDefaultInfo = FPFastMathDefaultInfoVec[Index]; + assert(FPFastMathDefaultInfo.Ty && + "Expected target type for FPFastMathDefaultInfo"); + assert(FPFastMathDefaultInfo.Ty->getScalarSizeInBits() == + OpTypeFloatSize && + "Mismatched float type size"); + MCInst Inst; + Inst.setOpcode(SPIRV::OpExecutionModeId); + MCRegister FuncReg = MAI->getFuncReg(Func); + assert(FuncReg.isValid()); + Inst.addOperand(MCOperand::createReg(FuncReg)); + Inst.addOperand( + MCOperand::createImm(SPIRV::ExecutionMode::FPFastMathDefault)); + MCRegister TypeReg = + MAI->getRegisterAlias(MI->getMF(), MI->getOperand(0).getReg()); + Inst.addOperand(MCOperand::createReg(TypeReg)); + unsigned Flags = FPFastMathDefaultInfo.FastMathFlags; + if (FPFastMathDefaultInfo.ContractionOff && + (Flags & SPIRV::FPFastMathMode::AllowContract)) + report_fatal_error( + "Conflicting FPFastMathFlags: ContractionOff and AllowContract"); + + if (FPFastMathDefaultInfo.SignedZeroInfNanPreserve && + !(Flags & + (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf | + SPIRV::FPFastMathMode::NSZ))) { + if (FPFastMathDefaultInfo.FPFastMathDefault) + report_fatal_error("Conflicting FPFastMathFlags: " + "SignedZeroInfNanPreserve but at least one of " + "NotNaN/NotInf/NSZ is enabled."); + } + + // Don't emit if none of the execution modes was used. + if (Flags == SPIRV::FPFastMathMode::None && + !FPFastMathDefaultInfo.ContractionOff && + !FPFastMathDefaultInfo.SignedZeroInfNanPreserve && + !FPFastMathDefaultInfo.FPFastMathDefault) + continue; + + // Retrieve the constant instruction for the immediate value. + auto It = ConstMap.find(Flags); + if (It == ConstMap.end()) + report_fatal_error("Expected constant instruction for FP Fast Math " + "Mode operand of FPFastMathDefault execution mode."); + const MachineInstr *ConstMI = It->second; + MCRegister ConstReg = MAI->getRegisterAlias( + ConstMI->getMF(), ConstMI->getOperand(0).getReg()); + Inst.addOperand(MCOperand::createReg(ConstReg)); + outputMCInst(Inst); + } + } +} + void SPIRVAsmPrinter::outputModuleSections() { const Module *M = MMI->getModule(); // Get the global subtarget to output module-level info. @@ -614,7 +798,8 @@ void SPIRVAsmPrinter::outputModuleSections() { MAI = &SPIRVModuleAnalysis::MAI; assert(ST && TII && MAI && M && "Module analysis is required"); // Output instructions according to the Logical Layout of a Module: - // 1,2. All OpCapability instructions, then optional OpExtension instructions. + // 1,2. All OpCapability instructions, then optional OpExtension + // instructions. outputGlobalRequirements(); // 3. Optional OpExtInstImport instructions. outputOpExtInstImports(*M); @@ -622,7 +807,8 @@ void SPIRVAsmPrinter::outputModuleSections() { outputOpMemoryModel(); // 5. All entry point declarations, using OpEntryPoint. outputEntryPoints(); - // 6. Execution-mode declarations, using OpExecutionMode or OpExecutionModeId. + // 6. Execution-mode declarations, using OpExecutionMode or + // OpExecutionModeId. outputExecutionMode(*M); // 7a. Debug: all OpString, OpSourceExtension, OpSource, and // OpSourceContinued, without forward references. diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp index 86f445954400e..0e0c4547c751e 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -1096,6 +1096,41 @@ static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, return true; } +static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, + unsigned Scope, MachineIRBuilder &MIRBuilder, + SPIRVGlobalRegistry *GR) { + switch (Opcode) { + case SPIRV::OpCommitReadPipe: + case SPIRV::OpCommitWritePipe: + return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0)); + case SPIRV::OpGroupCommitReadPipe: + case SPIRV::OpGroupCommitWritePipe: + case SPIRV::OpGroupReserveReadPipePackets: + case SPIRV::OpGroupReserveWritePipePackets: { + Register ScopeConstReg = + MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0); + MachineRegisterInfo *MRI = MIRBuilder.getMRI(); + MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass); + MachineInstrBuilder MIB; + MIB = MIRBuilder.buildInstr(Opcode); + // Add Return register and type. + if (Opcode == SPIRV::OpGroupReserveReadPipePackets || + Opcode == SPIRV::OpGroupReserveWritePipePackets) + MIB.addDef(Call->ReturnRegister) + .addUse(GR->getSPIRVTypeID(Call->ReturnType)); + + MIB.addUse(ScopeConstReg); + for (unsigned int i = 0; i < Call->Arguments.size(); ++i) + MIB.addUse(Call->Arguments[i]); + + return true; + } + default: + return buildOpFromWrapper(MIRBuilder, Opcode, Call, + GR->getSPIRVTypeID(Call->ReturnType)); + } +} + static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) { switch (dim) { case SPIRV::Dim::DIM_1D: @@ -1127,11 +1162,24 @@ static unsigned getNumSizeComponents(SPIRVType *imgType) { static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalRegistry *GR, const CallBase &CB) { // Lookup the extended instruction number in the TableGen records. const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; uint32_t Number = SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number; + // fmin_common and fmax_common are now deprecated, and we should use fmin and + // fmax with NotInf and NotNaN flags instead. Keep original number to add + // later the NoNans and NoInfs flags. + uint32_t OrigNumber = Number; + const SPIRVSubtarget &ST = + cast(MIRBuilder.getMF().getSubtarget()); + if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) && + (Number == SPIRV::OpenCLExtInst::fmin_common || + Number == SPIRV::OpenCLExtInst::fmax_common)) { + Number = (Number == SPIRV::OpenCLExtInst::fmin_common) + ? SPIRV::OpenCLExtInst::fmin + : SPIRV::OpenCLExtInst::fmax; + } // Build extended instruction. auto MIB = @@ -1143,6 +1191,13 @@ static bool generateExtInst(const SPIRV::IncomingCall *Call, for (auto Argument : Call->Arguments) MIB.addUse(Argument); + MIB.getInstr()->copyIRFlags(CB); + if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common || + OrigNumber == SPIRV::OpenCLExtInst::fmax_common) { + // Add NoNans and NoInfs flags to fmin/fmax instruction. + MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans); + MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs); + } return true; } @@ -2350,6 +2405,20 @@ static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR); } +static bool generatePipeInst(const SPIRV::IncomingCall *Call, + MachineIRBuilder &MIRBuilder, + SPIRVGlobalRegistry *GR) { + const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; + unsigned Opcode = + SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode; + + unsigned Scope = SPIRV::Scope::Workgroup; + if (Builtin->Name.contains("sub_group")) + Scope = SPIRV::Scope::Subgroup; + + return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR); +} + static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) { @@ -2859,7 +2928,7 @@ std::optional lowerBuiltin(const StringRef DemangledCall, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl &Args, - SPIRVGlobalRegistry *GR) { + SPIRVGlobalRegistry *GR, const CallBase &CB) { LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n"); // Lookup the builtin in the TableGen records. @@ -2882,7 +2951,7 @@ std::optional lowerBuiltin(const StringRef DemangledCall, // Match the builtin with implementation based on the grouping. switch (Call->Builtin->Group) { case SPIRV::Extended: - return generateExtInst(Call.get(), MIRBuilder, GR); + return generateExtInst(Call.get(), MIRBuilder, GR, CB); case SPIRV::Relational: return generateRelationalInst(Call.get(), MIRBuilder, GR); case SPIRV::Group: @@ -2948,6 +3017,8 @@ std::optional lowerBuiltin(const StringRef DemangledCall, return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR); case SPIRV::Block2DLoadStore: return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR); + case SPIRV::Pipe: + return generatePipeInst(Call.get(), MIRBuilder, GR); } return false; } diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h index 1a8641a8328dd..f6a5234cd3c73 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.h +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.h @@ -39,7 +39,7 @@ std::optional lowerBuiltin(const StringRef DemangledCall, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl &Args, - SPIRVGlobalRegistry *GR); + SPIRVGlobalRegistry *GR, const CallBase &CB); /// Helper function for finding a builtin function attributes /// by a demangled function name. Defined in SPIRVBuiltins.cpp. diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td index d08560bb6565a..2a8deb6bf498b 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td @@ -69,6 +69,7 @@ def ExtendedBitOps : BuiltinGroup; def BindlessINTEL : BuiltinGroup; def TernaryBitwiseINTEL : BuiltinGroup; def Block2DLoadStore : BuiltinGroup; +def Pipe : BuiltinGroup; //===----------------------------------------------------------------------===// // Class defining a demangled builtin record. The information in the record @@ -633,6 +634,29 @@ defm : DemangledNativeBuiltin<"__spirv_AtomicSMax", OpenCL_std, Atomic, 4, 4, Op defm : DemangledNativeBuiltin<"__spirv_AtomicUMin", OpenCL_std, Atomic, 4, 4, OpAtomicUMin>; defm : DemangledNativeBuiltin<"__spirv_AtomicUMax", OpenCL_std, Atomic, 4, 4, OpAtomicUMax>; +// Pipe Instruction +defm : DemangledNativeBuiltin<"__read_pipe_2", OpenCL_std, Pipe,2, 2, OpReadPipe>; +defm : DemangledNativeBuiltin<"__write_pipe_2", OpenCL_std, Pipe, 2, 2, OpWritePipe>; +defm : DemangledNativeBuiltin<"__read_pipe_4", OpenCL_std, Pipe,4, 4, OpReservedReadPipe>; +defm : DemangledNativeBuiltin<"__write_pipe_4", OpenCL_std, Pipe, 4, 4, OpReservedWritePipe>; +defm : DemangledNativeBuiltin<"__reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpReserveReadPipePackets>; +defm : DemangledNativeBuiltin<"__reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpReserveWritePipePackets>; +defm : DemangledNativeBuiltin<"__commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpCommitReadPipe>; +defm : DemangledNativeBuiltin<"__commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpCommitWritePipe>; +defm : DemangledNativeBuiltin<"is_valid_reserve_id", OpenCL_std, Pipe, 1, 1, OpIsValidReserveId>; +defm : DemangledNativeBuiltin<"__get_pipe_num_packets_ro", OpenCL_std, Pipe, 1, 1, OpGetNumPipePackets>; +defm : DemangledNativeBuiltin<"__get_pipe_max_packets_ro", OpenCL_std, Pipe, 1, 1, OpGetMaxPipePackets>; +defm : DemangledNativeBuiltin<"__get_pipe_num_packets_wo", OpenCL_std, Pipe, 1, 1, OpGetNumPipePackets>; +defm : DemangledNativeBuiltin<"__get_pipe_max_packets_wo", OpenCL_std, Pipe, 1, 1, OpGetMaxPipePackets>; +defm : DemangledNativeBuiltin<"__work_group_reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveReadPipePackets>; +defm : DemangledNativeBuiltin<"__work_group_reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveWritePipePackets>; +defm : DemangledNativeBuiltin<"__work_group_commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitReadPipe>; +defm : DemangledNativeBuiltin<"__work_group_commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitWritePipe>; +defm : DemangledNativeBuiltin<"__sub_group_reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveReadPipePackets>; +defm : DemangledNativeBuiltin<"__sub_group_reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveWritePipePackets>; +defm : DemangledNativeBuiltin<"__sub_group_commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitReadPipe>; +defm : DemangledNativeBuiltin<"__sub_group_commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitWritePipe>; + // Barrier builtin records: defm : DemangledNativeBuiltin<"barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>; defm : DemangledNativeBuiltin<"work_group_barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>; diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp index a412887e51adb..1a7c02c676465 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp @@ -641,9 +641,9 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, GR->getPointerSize())); } } - if (auto Res = - SPIRV::lowerBuiltin(DemangledName, ST->getPreferredInstructionSet(), - MIRBuilder, ResVReg, OrigRetTy, ArgVRegs, GR)) + if (auto Res = SPIRV::lowerBuiltin( + DemangledName, ST->getPreferredInstructionSet(), MIRBuilder, + ResVReg, OrigRetTy, ArgVRegs, GR, *Info.CB)) return *Res; } diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp index 993de9e9f64ec..85ea9e156cb97 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp @@ -148,7 +148,10 @@ static const std::map> SPIRV::Extension::Extension::SPV_KHR_float_controls2}, {"SPV_INTEL_tensor_float32_conversion", SPIRV::Extension::Extension::SPV_INTEL_tensor_float32_conversion}, - {"SPV_KHR_bfloat16", SPIRV::Extension::Extension::SPV_KHR_bfloat16}}; + {"SPV_KHR_bfloat16", SPIRV::Extension::Extension::SPV_KHR_bfloat16}, + {"SPV_EXT_relaxed_printf_string_address_space", + SPIRV::Extension::Extension:: + SPV_EXT_relaxed_printf_string_address_space}}; bool SPIRVExtensionsParser::parse(cl::Option &O, StringRef ArgName, StringRef ArgValue, diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp index f5a49e2b47363..9f2e07508a36a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp @@ -25,6 +25,7 @@ #include "llvm/IR/TypedPointerType.h" #include "llvm/Transforms/Utils/Local.h" +#include #include #include @@ -152,6 +153,7 @@ class SPIRVEmitIntrinsics void insertPtrCastOrAssignTypeInstr(Instruction *I, IRBuilder<> &B); bool shouldTryToAddMemAliasingDecoration(Instruction *Inst); void insertSpirvDecorations(Instruction *I, IRBuilder<> &B); + void insertConstantsForFPFastMathDefault(Module &M); void processGlobalValue(GlobalVariable &GV, IRBuilder<> &B); void processParamTypes(Function *F, IRBuilder<> &B); void processParamTypesByFunHeader(Function *F, IRBuilder<> &B); @@ -1909,11 +1911,12 @@ Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) { B.SetInsertPoint(&I); SmallVector Types = {I.getInsertedValueOperand()->getType()}; SmallVector Args; - for (auto &Op : I.operands()) - if (isa(Op)) - Args.push_back(UndefValue::get(B.getInt32Ty())); - else - Args.push_back(Op); + Value *AggregateOp = I.getAggregateOperand(); + if (isa(AggregateOp)) + Args.push_back(UndefValue::get(B.getInt32Ty())); + else + Args.push_back(AggregateOp); + Args.push_back(I.getInsertedValueOperand()); for (auto &Op : I.indices()) Args.push_back(B.getInt32(Op)); Instruction *NewI = @@ -2248,6 +2251,198 @@ void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *I, } } +static SPIRV::FPFastMathDefaultInfoVector &getOrCreateFPFastMathDefaultInfoVec( + const Module &M, + DenseMap + &FPFastMathDefaultInfoMap, + Function *F) { + auto it = FPFastMathDefaultInfoMap.find(F); + if (it != FPFastMathDefaultInfoMap.end()) + return it->second; + + // If the map does not contain the entry, create a new one. Initialize it to + // contain all 3 elements sorted by bit width of target type: {half, float, + // double}. + SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec; + FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()), + SPIRV::FPFastMathMode::None); + FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()), + SPIRV::FPFastMathMode::None); + FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()), + SPIRV::FPFastMathMode::None); + return FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec); +} + +static SPIRV::FPFastMathDefaultInfo &getFPFastMathDefaultInfo( + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, + const Type *Ty) { + size_t BitWidth = Ty->getScalarSizeInBits(); + int Index = + SPIRV::FPFastMathDefaultInfoVector::computeFPFastMathDefaultInfoVecIndex( + BitWidth); + assert(Index >= 0 && Index < 3 && + "Expected FPFastMathDefaultInfo for half, float, or double"); + assert(FPFastMathDefaultInfoVec.size() == 3 && + "Expected FPFastMathDefaultInfoVec to have exactly 3 elements"); + return FPFastMathDefaultInfoVec[Index]; +} + +void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(Module &M) { + const SPIRVSubtarget *ST = TM->getSubtargetImpl(); + if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) + return; + + // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap. + // We need the entry point (function) as the key, and the target + // type and flags as the value. + // We also need to check ContractionOff and SignedZeroInfNanPreserve + // execution modes, as they are now deprecated and must be replaced + // with FPFastMathDefaultInfo. + auto Node = M.getNamedMetadata("spirv.ExecutionMode"); + if (!Node) { + if (!M.getNamedMetadata("opencl.enable.FP_CONTRACT")) { + // This requires emitting ContractionOff. However, because + // ContractionOff is now deprecated, we need to replace it with + // FPFastMathDefaultInfo with FP Fast Math Mode bitmask set to all 0. + // We need to create the constant for that. + + // Create constant instruction with the bitmask flags. + Constant *InitValue = + ConstantInt::get(Type::getInt32Ty(M.getContext()), 0); + // TODO: Reuse constant if there is one already with the required + // value. + [[maybe_unused]] GlobalVariable *GV = + new GlobalVariable(M, // Module + Type::getInt32Ty(M.getContext()), // Type + true, // isConstant + GlobalValue::InternalLinkage, // Linkage + InitValue // Initializer + ); + } + return; + } + + // The table maps function pointers to their default FP fast math info. It + // can be assumed that the SmallVector is sorted by the bit width of the + // type. The first element is the smallest bit width, and the last element + // is the largest bit width, therefore, we will have {half, float, double} + // in the order of their bit widths. + DenseMap + FPFastMathDefaultInfoMap; + + for (unsigned i = 0; i < Node->getNumOperands(); i++) { + MDNode *MDN = cast(Node->getOperand(i)); + assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands"); + Function *F = cast( + cast(MDN->getOperand(0))->getValue()); + const auto EM = + cast( + cast(MDN->getOperand(1))->getValue()) + ->getZExtValue(); + if (EM == SPIRV::ExecutionMode::FPFastMathDefault) { + assert(MDN->getNumOperands() == 4 && + "Expected 4 operands for FPFastMathDefault"); + const Type *T = cast(MDN->getOperand(2))->getType(); + unsigned Flags = + cast( + cast(MDN->getOperand(3))->getValue()) + ->getZExtValue(); + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec = + getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F); + SPIRV::FPFastMathDefaultInfo &Info = + getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T); + Info.FastMathFlags = Flags; + Info.FPFastMathDefault = true; + } else if (EM == SPIRV::ExecutionMode::ContractionOff) { + assert(MDN->getNumOperands() == 2 && + "Expected no operands for ContractionOff"); + + // We need to save this info for every possible FP type, i.e. {half, + // float, double, fp128}. + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec = + getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F); + for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) { + Info.ContractionOff = true; + } + } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) { + assert(MDN->getNumOperands() == 3 && + "Expected 1 operand for SignedZeroInfNanPreserve"); + unsigned TargetWidth = + cast( + cast(MDN->getOperand(2))->getValue()) + ->getZExtValue(); + // We need to save this info only for the FP type with TargetWidth. + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec = + getOrCreateFPFastMathDefaultInfoVec(M, FPFastMathDefaultInfoMap, F); + int Index = SPIRV::FPFastMathDefaultInfoVector:: + computeFPFastMathDefaultInfoVecIndex(TargetWidth); + assert(Index >= 0 && Index < 3 && + "Expected FPFastMathDefaultInfo for half, float, or double"); + assert(FPFastMathDefaultInfoVec.size() == 3 && + "Expected FPFastMathDefaultInfoVec to have exactly 3 elements"); + FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true; + } + } + + std::unordered_map GlobalVars; + for (auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) { + if (FPFastMathDefaultInfoVec.empty()) + continue; + + for (const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) { + assert(Info.Ty && "Expected target type for FPFastMathDefaultInfo"); + // Skip if none of the execution modes was used. + unsigned Flags = Info.FastMathFlags; + if (Flags == SPIRV::FPFastMathMode::None && !Info.ContractionOff && + !Info.SignedZeroInfNanPreserve && !Info.FPFastMathDefault) + continue; + + // Check if flags are compatible. + if (Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract)) + report_fatal_error("Conflicting FPFastMathFlags: ContractionOff " + "and AllowContract"); + + if (Info.SignedZeroInfNanPreserve && + !(Flags & + (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf | + SPIRV::FPFastMathMode::NSZ))) { + if (Info.FPFastMathDefault) + report_fatal_error("Conflicting FPFastMathFlags: " + "SignedZeroInfNanPreserve but at least one of " + "NotNaN/NotInf/NSZ is enabled."); + } + + if ((Flags & SPIRV::FPFastMathMode::AllowTransform) && + !((Flags & SPIRV::FPFastMathMode::AllowReassoc) && + (Flags & SPIRV::FPFastMathMode::AllowContract))) { + report_fatal_error("Conflicting FPFastMathFlags: " + "AllowTransform requires AllowReassoc and " + "AllowContract to be set."); + } + + auto it = GlobalVars.find(Flags); + GlobalVariable *GV = nullptr; + if (it != GlobalVars.end()) { + // Reuse existing global variable. + GV = it->second; + } else { + // Create constant instruction with the bitmask flags. + Constant *InitValue = + ConstantInt::get(Type::getInt32Ty(M.getContext()), Flags); + // TODO: Reuse constant if there is one already with the required + // value. + GV = new GlobalVariable(M, // Module + Type::getInt32Ty(M.getContext()), // Type + true, // isConstant + GlobalValue::InternalLinkage, // Linkage + InitValue // Initializer + ); + GlobalVars[Flags] = GV; + } + } + } +} + void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I, IRBuilder<> &B) { auto *II = dyn_cast(I); @@ -2568,9 +2763,9 @@ GetElementPtrInst * SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *GEP) { // getelementptr [0 x T], P, 0 (zero), I -> getelementptr T, P, I. // If type is 0-length array and first index is 0 (zero), drop both the - // 0-length array type and the first index. This is a common pattern in the - // IR, e.g. when using a zero-length array as a placeholder for a flexible - // array such as unbound arrays. + // 0-length array type and the first index. This is a common pattern in + // the IR, e.g. when using a zero-length array as a placeholder for a + // flexible array such as unbound arrays. assert(GEP && "GEP is null"); Type *SrcTy = GEP->getSourceElementType(); SmallVector Indices(GEP->indices()); @@ -2632,8 +2827,9 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) { processParamTypesByFunHeader(CurrF, B); - // StoreInst's operand type can be changed during the next transformations, - // so we need to store it in the set. Also store already transformed types. + // StoreInst's operand type can be changed during the next + // transformations, so we need to store it in the set. Also store already + // transformed types. for (auto &I : instructions(Func)) { StoreInst *SI = dyn_cast(&I); if (!SI) @@ -2680,8 +2876,8 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) { for (auto &I : llvm::reverse(instructions(Func))) deduceOperandElementType(&I, &IncompleteRets); - // Pass forward for PHIs only, their operands are not preceed the instruction - // in meaning of `instructions(Func)`. + // Pass forward for PHIs only, their operands are not preceed the + // instruction in meaning of `instructions(Func)`. for (BasicBlock &BB : Func) for (PHINode &Phi : BB.phis()) if (isPointerTy(Phi.getType())) @@ -2691,8 +2887,8 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) { TrackConstants = true; if (!I->getType()->isVoidTy() || isa(I)) setInsertPointAfterDef(B, I); - // Visitors return either the original/newly created instruction for further - // processing, nullptr otherwise. + // Visitors return either the original/newly created instruction for + // further processing, nullptr otherwise. I = visit(*I); if (!I) continue; @@ -2815,6 +3011,7 @@ bool SPIRVEmitIntrinsics::runOnModule(Module &M) { bool Changed = false; parseFunDeclarations(M); + insertConstantsForFPFastMathDefault(M); TodoType.clear(); for (auto &F : M) diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp index 115766ce886c7..6fd1c7ed78c06 100644 --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -806,7 +806,7 @@ Register SPIRVGlobalRegistry::buildGlobalVariable( // arguments. MDNode *GVarMD = nullptr; if (GVar && (GVarMD = GVar->getMetadata("spirv.Decorations")) != nullptr) - buildOpSpirvDecorations(Reg, MIRBuilder, GVarMD); + buildOpSpirvDecorations(Reg, MIRBuilder, GVarMD, ST); return Reg; } diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp index 45e88fc94144e..ba95ad822df75 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp @@ -132,7 +132,8 @@ bool SPIRVInstrInfo::isHeaderInstr(const MachineInstr &MI) const { } } -bool SPIRVInstrInfo::canUseFastMathFlags(const MachineInstr &MI) const { +bool SPIRVInstrInfo::canUseFastMathFlags(const MachineInstr &MI, + bool KHRFloatControls2) const { switch (MI.getOpcode()) { case SPIRV::OpFAddS: case SPIRV::OpFSubS: @@ -146,6 +147,24 @@ bool SPIRVInstrInfo::canUseFastMathFlags(const MachineInstr &MI) const { case SPIRV::OpFRemV: case SPIRV::OpFMod: return true; + case SPIRV::OpFNegateV: + case SPIRV::OpFNegate: + case SPIRV::OpOrdered: + case SPIRV::OpUnordered: + case SPIRV::OpFOrdEqual: + case SPIRV::OpFOrdNotEqual: + case SPIRV::OpFOrdLessThan: + case SPIRV::OpFOrdLessThanEqual: + case SPIRV::OpFOrdGreaterThan: + case SPIRV::OpFOrdGreaterThanEqual: + case SPIRV::OpFUnordEqual: + case SPIRV::OpFUnordNotEqual: + case SPIRV::OpFUnordLessThan: + case SPIRV::OpFUnordLessThanEqual: + case SPIRV::OpFUnordGreaterThan: + case SPIRV::OpFUnordGreaterThanEqual: + case SPIRV::OpExtInst: + return KHRFloatControls2 ? true : false; default: return false; } diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h index 72d2243fba62a..4de9d6a936abd 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h +++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.h @@ -36,7 +36,8 @@ class SPIRVInstrInfo : public SPIRVGenInstrInfo { bool isTypeDeclInstr(const MachineInstr &MI) const; bool isDecorationInstr(const MachineInstr &MI) const; bool isAliasingInstr(const MachineInstr &MI) const; - bool canUseFastMathFlags(const MachineInstr &MI) const; + bool canUseFastMathFlags(const MachineInstr &MI, + bool KHRFloatControls2) const; bool canUseNSW(const MachineInstr &MI) const; bool canUseNUW(const MachineInstr &MI) const; diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td index 496dcba17c10d..1723bfb639189 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td +++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td @@ -763,7 +763,38 @@ def OpGetDefaultQueue: Op<303, (outs ID:$res), (ins TYPE:$type), def OpBuildNDRange: Op<304, (outs ID:$res), (ins TYPE:$type, ID:$GWS, ID:$LWS, ID:$GWO), "$res = OpBuildNDRange $type $GWS $LWS $GWO">; -// TODO: 3.42.23. Pipe Instructions +// 3.42.23. Pipe Instructions + +def OpReadPipe: Op<274, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReadPipe $type $Pipe $Pointer $PcktSize $PcktAlign">; +def OpWritePipe: Op<275, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpWritePipe $type $Pipe $Pointer $PcktSize $PcktAlign">; +def OpReservedReadPipe : Op<276, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$ReserveId, ID:$Index, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReservedReadPipe $type $Pipe $ReserveId $Index $Pointer $PcktSize $PcktAlign">; +def OpReservedWritePipe : Op<277, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$ReserveId, ID:$Index, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReservedWritePipe $type $Pipe $ReserveId $Index $Pointer $PcktSize $PcktAlign">; +def OpReserveReadPipePackets : Op<278, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$NumPckts, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReserveReadPipePackets $type $Pipe $NumPckts $PcktSize $PcktAlign">; +def OpReserveWritePipePackets : Op<279, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$NumPckts, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReserveWritePipePackets $type $Pipe $NumPckts $PcktSize $PcktAlign">; +def OpCommitReadPipe : Op<280, (outs), (ins ID:$Pipe, ID:$ReserveId, ID:$PcktSize, ID:$PcktAlign), + "OpCommitReadPipe $Pipe $ReserveId $PcktSize $PcktAlign">; +def OpCommitWritePipe : Op<281, (outs), (ins ID:$Pipe, ID:$ReserveId, ID:$PcktSize, ID:$PcktAlign), + "OpCommitWritePipe $Pipe $ReserveId $PcktSize $PcktAlign">; +def OpIsValidReserveId : Op<282, (outs ID:$res), (ins TYPE:$type, ID:$ReserveId), + "$res = OpIsValidReserveId $type $ReserveId">; +def OpGetNumPipePackets : Op<283, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGetNumPipePackets $type $Pipe $PacketSize $PacketAlign">; +def OpGetMaxPipePackets : Op<284, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGetMaxPipePackets $type $Pipe $PacketSize $PacketAlign">; +def OpGroupReserveReadPipePackets : Op<285, (outs ID:$res), (ins TYPE:$type, ID:$Scope, ID:$Pipe, ID:$NumPckts, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGroupReserveReadPipePackets $type $Scope $Pipe $NumPckts $PacketSize $PacketAlign">; +def OpGroupReserveWritePipePackets : Op<286, (outs ID:$res), (ins TYPE:$type, ID:$Scope, ID:$Pipe, ID:$NumPckts, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGroupReserveWritePipePackets $type $Scope $Pipe $NumPckts $PacketSize $PacketAlign">; +def OpGroupCommitReadPipe : Op<287, (outs), (ins ID:$Scope, ID:$Pipe, ID:$ReserveId, ID:$PacketSize, ID:$PacketAlign), + "OpGroupCommitReadPipe $Scope $Pipe $ReserveId $PacketSize $PacketAlign">; +def OpGroupCommitWritePipe : Op<288, (outs), (ins ID:$Scope, ID:$Pipe, ID:$ReserveId, ID:$PacketSize, ID:$PacketAlign), + "OpGroupCommitWritePipe $Scope $Pipe $ReserveId $PacketSize $PacketAlign">; // 3.42.24. Non-Uniform Instructions diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index 3ad5528fab061..0afec42135337 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -197,6 +197,8 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, unsigned Opcode) const; + bool selectDebugTrap(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, bool Signed) const; @@ -207,6 +209,9 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectOpIsInf(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; + bool selectOpIsNan(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; + template bool selectDot4AddPacked(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; @@ -278,6 +283,12 @@ class SPIRVInstructionSelector : public InstructionSelector { GL::GLSLExtInst GLInst) const; bool selectExtInst(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, const ExtInstList &ExtInsts) const; + bool selectExtInstForLRound(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I, CL::OpenCLExtInst CLInst, + GL::GLSLExtInst GLInst) const; + bool selectExtInstForLRound(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I, + const ExtInstList &ExtInsts) const; bool selectLog10(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; @@ -303,7 +314,8 @@ class SPIRVInstructionSelector : public InstructionSelector { MachineInstr &I) const; bool selectModf(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; - + bool selectFrexp(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; // Utilities std::pair buildI32Constant(uint32_t Val, MachineInstr &I, @@ -708,7 +720,22 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg, return selectSUCmp(ResVReg, ResType, I, true); case TargetOpcode::G_UCMP: return selectSUCmp(ResVReg, ResType, I, false); - + case TargetOpcode::G_LROUND: + case TargetOpcode::G_LLROUND: { + Register regForLround = + MRI->createVirtualRegister(MRI->getRegClass(ResVReg), "lround"); + MRI->setRegClass(regForLround, &SPIRV::iIDRegClass); + GR.assignSPIRVTypeToVReg(GR.getSPIRVTypeForVReg(I.getOperand(1).getReg()), + regForLround, *(I.getParent()->getParent())); + selectExtInstForLRound(regForLround, GR.getSPIRVTypeForVReg(regForLround), + I, CL::round, GL::Round); + MachineBasicBlock &BB = *I.getParent(); + auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConvertFToS)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(regForLround); + return MIB.constrainAllUses(TII, TRI, RBI); + } case TargetOpcode::G_STRICT_FMA: case TargetOpcode::G_FMA: return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma); @@ -725,6 +752,8 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg, return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp); case TargetOpcode::G_FEXP2: return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2); + case TargetOpcode::G_FMODF: + return selectModf(ResVReg, ResType, I); case TargetOpcode::G_FLOG: return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log); @@ -809,6 +838,9 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg, case TargetOpcode::G_USUBSAT: return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat); + case TargetOpcode::G_FFREXP: + return selectFrexp(ResVReg, ResType, I); + case TargetOpcode::G_UADDO: return selectOverflowArith(ResVReg, ResType, I, ResType->getOpcode() == SPIRV::OpTypeVector @@ -975,16 +1007,26 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg, // represent code after lowering or intrinsics which are not implemented but // should not crash when found in a customer's LLVM IR input. case TargetOpcode::G_TRAP: - case TargetOpcode::G_DEBUGTRAP: case TargetOpcode::G_UBSANTRAP: case TargetOpcode::DBG_LABEL: return true; + case TargetOpcode::G_DEBUGTRAP: + return selectDebugTrap(ResVReg, ResType, I); default: return false; } } +bool SPIRVInstructionSelector::selectDebugTrap(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + unsigned Opcode = SPIRV::OpNop; + MachineBasicBlock &BB = *I.getParent(); + return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) + .constrainAllUses(TII, TRI, RBI); +} + bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, @@ -1033,7 +1075,8 @@ bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, .addDef(ResVReg) .addUse(GR.getSPIRVTypeID(ResType)) .addImm(static_cast(Set)) - .addImm(Opcode); + .addImm(Opcode) + .setMIFlags(I.getFlags()); const unsigned NumOps = I.getNumOperands(); unsigned Index = 1; if (Index < NumOps && @@ -1047,6 +1090,88 @@ bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, } return false; } +bool SPIRVInstructionSelector::selectExtInstForLRound( + Register ResVReg, const SPIRVType *ResType, MachineInstr &I, + CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst) const { + ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst}, + {SPIRV::InstructionSet::GLSL_std_450, GLInst}}; + return selectExtInstForLRound(ResVReg, ResType, I, ExtInsts); +} + +bool SPIRVInstructionSelector::selectExtInstForLRound( + Register ResVReg, const SPIRVType *ResType, MachineInstr &I, + const ExtInstList &Insts) const { + for (const auto &Ex : Insts) { + SPIRV::InstructionSet::InstructionSet Set = Ex.first; + uint32_t Opcode = Ex.second; + if (STI.canUseExtInstSet(Set)) { + MachineBasicBlock &BB = *I.getParent(); + auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addImm(static_cast(Set)) + .addImm(Opcode); + const unsigned NumOps = I.getNumOperands(); + unsigned Index = 1; + if (Index < NumOps && + I.getOperand(Index).getType() == + MachineOperand::MachineOperandType::MO_IntrinsicID) + Index = 2; + for (; Index < NumOps; ++Index) + MIB.add(I.getOperand(Index)); + MIB.constrainAllUses(TII, TRI, RBI); + return true; + } + } + return false; +} + +bool SPIRVInstructionSelector::selectFrexp(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp}, + {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}}; + for (const auto &Ex : ExtInsts) { + SPIRV::InstructionSet::InstructionSet Set = Ex.first; + uint32_t Opcode = Ex.second; + if (!STI.canUseExtInstSet(Set)) + continue; + + MachineIRBuilder MIRBuilder(I); + SPIRVType *PointeeTy = GR.getSPIRVTypeForVReg(I.getOperand(1).getReg()); + const SPIRVType *PointerType = GR.getOrCreateSPIRVPointerType( + PointeeTy, MIRBuilder, SPIRV::StorageClass::Function); + Register PointerVReg = + createVirtualRegister(PointerType, &GR, MRI, MRI->getMF()); + + auto It = getOpVariableMBBIt(I); + auto MIB = BuildMI(*It->getParent(), It, It->getDebugLoc(), + TII.get(SPIRV::OpVariable)) + .addDef(PointerVReg) + .addUse(GR.getSPIRVTypeID(PointerType)) + .addImm(static_cast(SPIRV::StorageClass::Function)) + .constrainAllUses(TII, TRI, RBI); + + MIB = MIB & + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addImm(static_cast(Ex.first)) + .addImm(Opcode) + .add(I.getOperand(2)) + .addUse(PointerVReg) + .constrainAllUses(TII, TRI, RBI); + + MIB = MIB & + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) + .addDef(I.getOperand(1).getReg()) + .addUse(GR.getSPIRVTypeID(PointeeTy)) + .addUse(PointerVReg) + .constrainAllUses(TII, TRI, RBI); + return MIB; + } + return false; +} bool SPIRVInstructionSelector::selectOpWithSrcs(Register ResVReg, const SPIRVType *ResType, @@ -2056,6 +2181,17 @@ bool SPIRVInstructionSelector::selectOpIsInf(Register ResVReg, .constrainAllUses(TII, TRI, RBI); } +bool SPIRVInstructionSelector::selectOpIsNan(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + MachineBasicBlock &BB = *I.getParent(); + return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsNan)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(I.getOperand(2).getReg()) + .constrainAllUses(TII, TRI, RBI); +} + template bool SPIRVInstructionSelector::selectDot4AddPacked(Register ResVReg, const SPIRVType *ResType, @@ -2496,6 +2632,7 @@ bool SPIRVInstructionSelector::selectCmp(Register ResVReg, .addUse(GR.getSPIRVTypeID(ResType)) .addUse(Cmp0) .addUse(Cmp1) + .setMIFlags(I.getFlags()) .constrainAllUses(TII, TRI, RBI); } @@ -3199,6 +3336,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, return selectExtInst(ResVReg, ResType, I, CL::fract, GL::Fract); case Intrinsic::spv_isinf: return selectOpIsInf(ResVReg, ResType, I); + case Intrinsic::spv_isnan: + return selectOpIsNan(ResVReg, ResType, I); case Intrinsic::spv_normalize: return selectExtInst(ResVReg, ResType, I, CL::normalize, GL::Normalize); case Intrinsic::spv_refract: @@ -3316,9 +3455,6 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, case Intrinsic::spv_discard: { return selectDiscard(ResVReg, ResType, I); } - case Intrinsic::modf: { - return selectModf(ResVReg, ResType, I); - } default: { std::string DiagMsg; raw_string_ostream OS(DiagMsg); @@ -4131,6 +4267,7 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg, PtrTyReg, LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function), GR.getPointerSize())); + // Assign SPIR-V type of the pointer type of the alloca variable to the // new register. GR.assignSPIRVTypeToVReg(PtrType, PtrTyReg, MIRBuilder.getMF()); @@ -4143,10 +4280,7 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg, .addUse(GR.getSPIRVTypeID(PtrType)) .addImm(static_cast(SPIRV::StorageClass::Function)); Register Variable = AllocaMIB->getOperand(0).getReg(); - // Modf must have 4 operands, the first two are the 2 parts of the result, - // the third is the operand, and the last one is the floating point value. - assert(I.getNumOperands() == 4 && - "Expected 4 operands for modf instruction"); + MachineBasicBlock &BB = *I.getParent(); // Create the OpenCLLIB::modf instruction. auto MIB = @@ -4156,8 +4290,8 @@ bool SPIRVInstructionSelector::selectModf(Register ResVReg, .addImm(static_cast(SPIRV::InstructionSet::OpenCL_std)) .addImm(CL::modf) .setMIFlags(I.getFlags()) - .add(I.getOperand(3)) // Floating point value. - .addUse(Variable); // Pointer to integral part. + .add(I.getOperand(I.getNumExplicitDefs())) // Floating point value. + .addUse(Variable); // Pointer to integral part. // Assign the integral part stored in the ptr to the second element of the // result. Register IntegralPartReg = I.getOperand(1).getReg(); diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp index aea3397ad2fd6..205895e48a379 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp @@ -39,6 +39,7 @@ class SPIRVLegalizeImplicitBinding : public ModulePass { void collectBindingInfo(Module &M); uint32_t getAndReserveFirstUnusedBinding(uint32_t DescSet); void replaceImplicitBindingCalls(Module &M); + void verifyUniqueOrderIdPerResource(SmallVectorImpl &Calls); // A map from descriptor set to a bit vector of used binding numbers. std::vector UsedBindings; @@ -94,6 +95,33 @@ void SPIRVLegalizeImplicitBinding::collectBindingInfo(Module &M) { }); } +void SPIRVLegalizeImplicitBinding::verifyUniqueOrderIdPerResource( + SmallVectorImpl &Calls) { + // Check that the order Id is unique per resource. + for (uint32_t i = 1; i < Calls.size(); ++i) { + const uint32_t OrderIdArgIdx = 0; + const uint32_t DescSetArgIdx = 1; + const uint32_t OrderA = + cast(Calls[i - 1]->getArgOperand(OrderIdArgIdx)) + ->getZExtValue(); + const uint32_t OrderB = + cast(Calls[i]->getArgOperand(OrderIdArgIdx)) + ->getZExtValue(); + if (OrderA == OrderB) { + const uint32_t DescSetA = + cast(Calls[i - 1]->getArgOperand(DescSetArgIdx)) + ->getZExtValue(); + const uint32_t DescSetB = + cast(Calls[i]->getArgOperand(DescSetArgIdx)) + ->getZExtValue(); + if (DescSetA != DescSetB) { + report_fatal_error("Implicit binding calls with the same order ID must " + "have the same descriptor set"); + } + } + } +} + uint32_t SPIRVLegalizeImplicitBinding::getAndReserveFirstUnusedBinding( uint32_t DescSet) { if (UsedBindings.size() <= DescSet) { @@ -112,11 +140,23 @@ uint32_t SPIRVLegalizeImplicitBinding::getAndReserveFirstUnusedBinding( } void SPIRVLegalizeImplicitBinding::replaceImplicitBindingCalls(Module &M) { + uint32_t lastOrderId = -1; + uint32_t lastBindingNumber = -1; + for (CallInst *OldCI : ImplicitBindingCalls) { IRBuilder<> Builder(OldCI); + const uint32_t OrderId = + cast(OldCI->getArgOperand(0))->getZExtValue(); const uint32_t DescSet = cast(OldCI->getArgOperand(1))->getZExtValue(); - const uint32_t NewBinding = getAndReserveFirstUnusedBinding(DescSet); + + // Reuse an existing binding for this order ID, if one was already assigned. + // Otherwise, assign a new binding. + const uint32_t NewBinding = (lastOrderId == OrderId) + ? lastBindingNumber + : getAndReserveFirstUnusedBinding(DescSet); + lastOrderId = OrderId; + lastBindingNumber = NewBinding; SmallVector Args; Args.push_back(Builder.getInt32(DescSet)); @@ -142,6 +182,7 @@ bool SPIRVLegalizeImplicitBinding::runOnModule(Module &M) { if (ImplicitBindingCalls.empty()) { return false; } + verifyUniqueOrderIdPerResource(ImplicitBindingCalls); replaceImplicitBindingCalls(M); return true; diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp index 170bddd507e3b..53074ea3b2597 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp @@ -276,6 +276,10 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) { {G_UADDO, G_SADDO, G_USUBO, G_SSUBO, G_UMULO, G_SMULO}) .alwaysLegal(); + getActionDefinitionsBuilder({G_LROUND, G_LLROUND}) + .legalForCartesianProduct(allFloatScalarsAndVectors, + allIntScalarsAndVectors); + // FP conversions. getActionDefinitionsBuilder({G_FPTRUNC, G_FPEXT}) .legalForCartesianProduct(allFloatScalarsAndVectors); @@ -286,6 +290,9 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) { // Control-flow. In some cases (e.g. constants) s1 may be promoted to s32. getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s32}); + getActionDefinitionsBuilder(G_FFREXP).legalForCartesianProduct( + allFloatScalarsAndVectors, {s32, v2s32, v3s32, v4s32, v8s32, v16s32}); + // TODO: Review the target OpenCL and GLSL Extended Instruction Set specs to // tighten these requirements. Many of these math functions are only legal on // specific bitwidths, so they are not selectable for @@ -293,6 +300,7 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) { getActionDefinitionsBuilder({G_STRICT_FSQRT, G_FPOW, G_FEXP, + G_FMODF, G_FEXP2, G_FLOG, G_FLOG2, @@ -580,7 +588,8 @@ bool SPIRVLegalizerInfo::legalizeIsFPClass( } if (FPClassTest PartialCheck = Mask & fcNan) { - auto InfWithQnanBitC = buildSPIRVConstant(IntTy, Inf | QNaNBitMask); + auto InfWithQnanBitC = + buildSPIRVConstant(IntTy, std::move(Inf) | QNaNBitMask); if (PartialCheck == fcNan) { // isnan(V) ==> abs(V) u> int(inf) appendToRes( @@ -606,7 +615,7 @@ bool SPIRVLegalizerInfo::legalizeIsFPClass( APInt ExpLSB = ExpMask & ~(ExpMask.shl(1)); auto ExpMinusOne = assignSPIRVTy( MIRBuilder.buildSub(IntTy, Abs, buildSPIRVConstant(IntTy, ExpLSB))); - APInt MaxExpMinusOne = ExpMask - ExpLSB; + APInt MaxExpMinusOne = std::move(ExpMask) - ExpLSB; auto NormalRes = assignSPIRVTy( MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, ExpMinusOne, buildSPIRVConstant(IntTy, MaxExpMinusOne))); diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp index a95f393b75605..dc717a6ca5870 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -248,6 +248,22 @@ static InstrSignature instrToSignature(const MachineInstr &MI, Register DefReg; InstrSignature Signature{MI.getOpcode()}; for (unsigned i = 0; i < MI.getNumOperands(); ++i) { + // The only decorations that can be applied more than once to a given + // or structure member are UserSemantic(5635), CacheControlLoadINTEL (6442), + // and CacheControlStoreINTEL (6443). For all the rest of decorations, we + // will only add to the signature the Opcode, the id to which it applies, + // and the decoration id, disregarding any decoration flags. This will + // ensure that any subsequent decoration with the same id will be deemed as + // a duplicate. Then, at the call site, we will be able to handle duplicates + // in the best way. + unsigned Opcode = MI.getOpcode(); + if ((Opcode == SPIRV::OpDecorate) && i >= 2) { + unsigned DecorationID = MI.getOperand(1).getImm(); + if (DecorationID != SPIRV::Decoration::UserSemantic && + DecorationID != SPIRV::Decoration::CacheControlLoadINTEL && + DecorationID != SPIRV::Decoration::CacheControlStoreINTEL) + continue; + } const MachineOperand &MO = MI.getOperand(i); size_t h; if (MO.isReg()) { @@ -559,8 +575,54 @@ static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI, MAI.setSkipEmission(&MI); InstrSignature MISign = instrToSignature(MI, MAI, true); auto FoundMI = IS.insert(std::move(MISign)); - if (!FoundMI.second) + if (!FoundMI.second) { + if (MI.getOpcode() == SPIRV::OpDecorate) { + assert(MI.getNumOperands() >= 2 && + "Decoration instructions must have at least 2 operands"); + assert(MSType == SPIRV::MB_Annotations && + "Only OpDecorate instructions can be duplicates"); + // For FPFastMathMode decoration, we need to merge the flags of the + // duplicate decoration with the original one, so we need to find the + // original instruction that has the same signature. For the rest of + // instructions, we will simply skip the duplicate. + if (MI.getOperand(1).getImm() != SPIRV::Decoration::FPFastMathMode) + return; // Skip duplicates of other decorations. + + const SPIRV::InstrList &Decorations = MAI.MS[MSType]; + for (const MachineInstr *OrigMI : Decorations) { + if (instrToSignature(*OrigMI, MAI, true) == MISign) { + assert(OrigMI->getNumOperands() == MI.getNumOperands() && + "Original instruction must have the same number of operands"); + assert( + OrigMI->getNumOperands() == 3 && + "FPFastMathMode decoration must have 3 operands for OpDecorate"); + unsigned OrigFlags = OrigMI->getOperand(2).getImm(); + unsigned NewFlags = MI.getOperand(2).getImm(); + if (OrigFlags == NewFlags) + return; // No need to merge, the flags are the same. + + // Emit warning about possible conflict between flags. + unsigned FinalFlags = OrigFlags | NewFlags; + llvm::errs() + << "Warning: Conflicting FPFastMathMode decoration flags " + "in instruction: " + << *OrigMI << "Original flags: " << OrigFlags + << ", new flags: " << NewFlags + << ". They will be merged on a best effort basis, but not " + "validated. Final flags: " + << FinalFlags << "\n"; + MachineInstr *OrigMINonConst = const_cast(OrigMI); + MachineOperand &OrigFlagsOp = OrigMINonConst->getOperand(2); + OrigFlagsOp = + MachineOperand::CreateImm(static_cast(FinalFlags)); + return; // Merge done, so we found a duplicate; don't add it to MAI.MS + } + } + assert(false && "No original instruction found for the duplicate " + "OpDecorate, but we found one in IS."); + } return; // insert failed, so we found a duplicate; don't add it to MAI.MS + } // No duplicates, so add it. if (Append) MAI.MS[MSType].push_back(&MI); @@ -934,6 +996,11 @@ static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex, } else if (Dec == SPIRV::Decoration::FPMaxErrorDecorationINTEL) { Reqs.addRequirements(SPIRV::Capability::FPMaxErrorINTEL); Reqs.addExtension(SPIRV::Extension::SPV_INTEL_fp_max_error); + } else if (Dec == SPIRV::Decoration::FPFastMathMode) { + if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) { + Reqs.addRequirements(SPIRV::Capability::FloatControls2); + Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls2); + } } } @@ -1222,6 +1289,31 @@ static void AddDotProductRequirements(const MachineInstr &MI, } } +void addPrintfRequirements(const MachineInstr &MI, + SPIRV::RequirementHandler &Reqs, + const SPIRVSubtarget &ST) { + SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry(); + const SPIRVType *PtrType = GR->getSPIRVTypeForVReg(MI.getOperand(4).getReg()); + if (PtrType) { + MachineOperand ASOp = PtrType->getOperand(1); + if (ASOp.isImm()) { + unsigned AddrSpace = ASOp.getImm(); + if (AddrSpace != SPIRV::StorageClass::UniformConstant) { + if (!ST.canUseExtension( + SPIRV::Extension:: + SPV_EXT_relaxed_printf_string_address_space)) { + report_fatal_error("SPV_EXT_relaxed_printf_string_address_space is " + "required because printf uses a format string not " + "in constant address space.", + false); + } + Reqs.addExtension( + SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space); + } + } + } +} + static bool isBFloat16Type(const SPIRVType *TypeDef) { return TypeDef && TypeDef->getNumOperands() == 3 && TypeDef->getOpcode() == SPIRV::OpTypeFloat && @@ -1230,8 +1322,9 @@ static bool isBFloat16Type(const SPIRVType *TypeDef) { } void addInstrRequirements(const MachineInstr &MI, - SPIRV::RequirementHandler &Reqs, + SPIRV::ModuleAnalysisInfo &MAI, const SPIRVSubtarget &ST) { + SPIRV::RequirementHandler &Reqs = MAI.Reqs; switch (MI.getOpcode()) { case SPIRV::OpMemoryModel: { int64_t Addr = MI.getOperand(0).getImm(); @@ -1321,6 +1414,12 @@ void addInstrRequirements(const MachineInstr &MI, static_cast( SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) { Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info); + break; + } + if (MI.getOperand(3).getImm() == + static_cast(SPIRV::OpenCLExtInst::printf)) { + addPrintfRequirements(MI, Reqs, ST); + break; } break; } @@ -1781,15 +1880,45 @@ void addInstrRequirements(const MachineInstr &MI, break; case SPIRV::OpConvertHandleToImageINTEL: case SPIRV::OpConvertHandleToSamplerINTEL: - case SPIRV::OpConvertHandleToSampledImageINTEL: + case SPIRV::OpConvertHandleToSampledImageINTEL: { if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images)) report_fatal_error("OpConvertHandleTo[Image/Sampler/SampledImage]INTEL " "instructions require the following SPIR-V extension: " "SPV_INTEL_bindless_images", false); + SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry(); + SPIRV::AddressingModel::AddressingModel AddrModel = MAI.Addr; + SPIRVType *TyDef = GR->getSPIRVTypeForVReg(MI.getOperand(1).getReg()); + if (MI.getOpcode() == SPIRV::OpConvertHandleToImageINTEL && + TyDef->getOpcode() != SPIRV::OpTypeImage) { + report_fatal_error("Incorrect return type for the instruction " + "OpConvertHandleToImageINTEL", + false); + } else if (MI.getOpcode() == SPIRV::OpConvertHandleToSamplerINTEL && + TyDef->getOpcode() != SPIRV::OpTypeSampler) { + report_fatal_error("Incorrect return type for the instruction " + "OpConvertHandleToSamplerINTEL", + false); + } else if (MI.getOpcode() == SPIRV::OpConvertHandleToSampledImageINTEL && + TyDef->getOpcode() != SPIRV::OpTypeSampledImage) { + report_fatal_error("Incorrect return type for the instruction " + "OpConvertHandleToSampledImageINTEL", + false); + } + SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(MI.getOperand(2).getReg()); + unsigned Bitwidth = GR->getScalarOrVectorBitWidth(SpvTy); + if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) && + !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) { + report_fatal_error( + "Parameter value must be a 32-bit scalar in case of " + "Physical32 addressing model or a 64-bit scalar in case of " + "Physical64 addressing model", + false); + } Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bindless_images); Reqs.addCapability(SPIRV::Capability::BindlessImagesINTEL); break; + } case SPIRV::OpSubgroup2DBlockLoadINTEL: case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL: case SPIRV::OpSubgroup2DBlockLoadTransformINTEL: @@ -1927,15 +2056,18 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI, continue; for (const MachineBasicBlock &MBB : *MF) for (const MachineInstr &MI : MBB) - addInstrRequirements(MI, MAI.Reqs, ST); + addInstrRequirements(MI, MAI, ST); } // Collect requirements for OpExecutionMode instructions. auto Node = M.getNamedMetadata("spirv.ExecutionMode"); if (Node) { - bool RequireFloatControls = false, RequireFloatControls2 = false, + bool RequireFloatControls = false, RequireIntelFloatControls2 = false, + RequireKHRFloatControls2 = false, VerLower14 = !ST.isAtLeastSPIRVVer(VersionTuple(1, 4)); - bool HasFloatControls2 = + bool HasIntelFloatControls2 = ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2); + bool HasKHRFloatControls2 = + ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2); for (unsigned i = 0; i < Node->getNumOperands(); i++) { MDNode *MDN = cast(Node->getOperand(i)); const MDOperand &MDOp = MDN->getOperand(1); @@ -1948,7 +2080,6 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI, switch (EM) { case SPIRV::ExecutionMode::DenormPreserve: case SPIRV::ExecutionMode::DenormFlushToZero: - case SPIRV::ExecutionMode::SignedZeroInfNanPreserve: case SPIRV::ExecutionMode::RoundingModeRTE: case SPIRV::ExecutionMode::RoundingModeRTZ: RequireFloatControls = VerLower14; @@ -1959,8 +2090,28 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI, case SPIRV::ExecutionMode::RoundingModeRTNINTEL: case SPIRV::ExecutionMode::FloatingPointModeALTINTEL: case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL: - if (HasFloatControls2) { - RequireFloatControls2 = true; + if (HasIntelFloatControls2) { + RequireIntelFloatControls2 = true; + MAI.Reqs.getAndAddRequirements( + SPIRV::OperandCategory::ExecutionModeOperand, EM, ST); + } + break; + case SPIRV::ExecutionMode::FPFastMathDefault: { + if (HasKHRFloatControls2) { + RequireKHRFloatControls2 = true; + MAI.Reqs.getAndAddRequirements( + SPIRV::OperandCategory::ExecutionModeOperand, EM, ST); + } + break; + } + case SPIRV::ExecutionMode::ContractionOff: + case SPIRV::ExecutionMode::SignedZeroInfNanPreserve: + if (HasKHRFloatControls2) { + RequireKHRFloatControls2 = true; + MAI.Reqs.getAndAddRequirements( + SPIRV::OperandCategory::ExecutionModeOperand, + SPIRV::ExecutionMode::FPFastMathDefault, ST); + } else { MAI.Reqs.getAndAddRequirements( SPIRV::OperandCategory::ExecutionModeOperand, EM, ST); } @@ -1975,8 +2126,10 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI, if (RequireFloatControls && ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls)) MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls); - if (RequireFloatControls2) + if (RequireIntelFloatControls2) MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_float_controls2); + if (RequireKHRFloatControls2) + MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls2); } for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) { const Function &F = *FI; @@ -2016,8 +2169,11 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI, } } -static unsigned getFastMathFlags(const MachineInstr &I) { +static unsigned getFastMathFlags(const MachineInstr &I, + const SPIRVSubtarget &ST) { unsigned Flags = SPIRV::FPFastMathMode::None; + bool CanUseKHRFloatControls2 = + ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2); if (I.getFlag(MachineInstr::MIFlag::FmNoNans)) Flags |= SPIRV::FPFastMathMode::NotNaN; if (I.getFlag(MachineInstr::MIFlag::FmNoInfs)) @@ -2026,12 +2182,45 @@ static unsigned getFastMathFlags(const MachineInstr &I) { Flags |= SPIRV::FPFastMathMode::NSZ; if (I.getFlag(MachineInstr::MIFlag::FmArcp)) Flags |= SPIRV::FPFastMathMode::AllowRecip; - if (I.getFlag(MachineInstr::MIFlag::FmReassoc)) - Flags |= SPIRV::FPFastMathMode::Fast; + if (I.getFlag(MachineInstr::MIFlag::FmContract) && CanUseKHRFloatControls2) + Flags |= SPIRV::FPFastMathMode::AllowContract; + if (I.getFlag(MachineInstr::MIFlag::FmReassoc)) { + if (CanUseKHRFloatControls2) + // LLVM reassoc maps to SPIRV transform, see + // https://github.com/KhronosGroup/SPIRV-Registry/issues/326 for details. + // Because we are enabling AllowTransform, we must enable AllowReassoc and + // AllowContract too, as required by SPIRV spec. Also, we used to map + // MIFlag::FmReassoc to FPFastMathMode::Fast, which now should instead by + // replaced by turning all the other bits instead. Therefore, we're + // enabling every bit here except None and Fast. + Flags |= SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf | + SPIRV::FPFastMathMode::NSZ | SPIRV::FPFastMathMode::AllowRecip | + SPIRV::FPFastMathMode::AllowTransform | + SPIRV::FPFastMathMode::AllowReassoc | + SPIRV::FPFastMathMode::AllowContract; + else + Flags |= SPIRV::FPFastMathMode::Fast; + } + + if (CanUseKHRFloatControls2) { + // Error out if SPIRV::FPFastMathMode::Fast is enabled. + assert(!(Flags & SPIRV::FPFastMathMode::Fast) && + "SPIRV::FPFastMathMode::Fast is deprecated and should not be used " + "anymore."); + + // Error out if AllowTransform is enabled without AllowReassoc and + // AllowContract. + assert((!(Flags & SPIRV::FPFastMathMode::AllowTransform) || + ((Flags & SPIRV::FPFastMathMode::AllowReassoc && + Flags & SPIRV::FPFastMathMode::AllowContract))) && + "SPIRV::FPFastMathMode::AllowTransform requires AllowReassoc and " + "AllowContract flags to be enabled as well."); + } + return Flags; } -static bool isFastMathMathModeAvailable(const SPIRVSubtarget &ST) { +static bool isFastMathModeAvailable(const SPIRVSubtarget &ST) { if (ST.isKernel()) return true; if (ST.getSPIRVVersion() < VersionTuple(1, 2)) @@ -2039,9 +2228,10 @@ static bool isFastMathMathModeAvailable(const SPIRVSubtarget &ST) { return ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2); } -static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST, - const SPIRVInstrInfo &TII, - SPIRV::RequirementHandler &Reqs) { +static void handleMIFlagDecoration( + MachineInstr &I, const SPIRVSubtarget &ST, const SPIRVInstrInfo &TII, + SPIRV::RequirementHandler &Reqs, const SPIRVGlobalRegistry *GR, + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec) { if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) && getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand, SPIRV::Decoration::NoSignedWrap, ST, Reqs) @@ -2057,13 +2247,53 @@ static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST, buildOpDecorate(I.getOperand(0).getReg(), I, TII, SPIRV::Decoration::NoUnsignedWrap, {}); } - if (!TII.canUseFastMathFlags(I)) - return; - unsigned FMFlags = getFastMathFlags(I); - if (FMFlags == SPIRV::FPFastMathMode::None) + if (!TII.canUseFastMathFlags( + I, ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))) return; - if (isFastMathMathModeAvailable(ST)) { + unsigned FMFlags = getFastMathFlags(I, ST); + if (FMFlags == SPIRV::FPFastMathMode::None) { + // We also need to check if any FPFastMathDefault info was set for the + // types used in this instruction. + if (FPFastMathDefaultInfoVec.empty()) + return; + + // There are three types of instructions that can use fast math flags: + // 1. Arithmetic instructions (FAdd, FMul, FSub, FDiv, FRem, etc.) + // 2. Relational instructions (FCmp, FOrd, FUnord, etc.) + // 3. Extended instructions (ExtInst) + // For arithmetic instructions, the floating point type can be in the + // result type or in the operands, but they all must be the same. + // For the relational and logical instructions, the floating point type + // can only be in the operands 1 and 2, not the result type. Also, the + // operands must have the same type. For the extended instructions, the + // floating point type can be in the result type or in the operands. It's + // unclear if the operands and the result type must be the same. Let's + // assume they must be. Therefore, for 1. and 2., we can check the first + // operand type, and for 3. we can check the result type. + assert(I.getNumOperands() >= 3 && "Expected at least 3 operands"); + Register ResReg = I.getOpcode() == SPIRV::OpExtInst + ? I.getOperand(1).getReg() + : I.getOperand(2).getReg(); + SPIRVType *ResType = GR->getSPIRVTypeForVReg(ResReg, I.getMF()); + const Type *Ty = GR->getTypeForSPIRVType(ResType); + Ty = Ty->isVectorTy() ? cast(Ty)->getElementType() : Ty; + + // Match instruction type with the FPFastMathDefaultInfoVec. + bool Emit = false; + for (SPIRV::FPFastMathDefaultInfo &Elem : FPFastMathDefaultInfoVec) { + if (Ty == Elem.Ty) { + FMFlags = Elem.FastMathFlags; + Emit = Elem.ContractionOff || Elem.SignedZeroInfNanPreserve || + Elem.FPFastMathDefault; + break; + } + } + + if (FMFlags == SPIRV::FPFastMathMode::None && !Emit) + return; + } + if (isFastMathModeAvailable(ST)) { Register DstReg = I.getOperand(0).getReg(); buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags}); @@ -2073,14 +2303,17 @@ static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST, // Walk all functions and add decorations related to MI flags. static void addDecorations(const Module &M, const SPIRVInstrInfo &TII, MachineModuleInfo *MMI, const SPIRVSubtarget &ST, - SPIRV::ModuleAnalysisInfo &MAI) { + SPIRV::ModuleAnalysisInfo &MAI, + const SPIRVGlobalRegistry *GR) { for (auto F = M.begin(), E = M.end(); F != E; ++F) { MachineFunction *MF = MMI->getMachineFunction(*F); if (!MF) continue; + for (auto &MBB : *MF) for (auto &MI : MBB) - handleMIFlagDecoration(MI, ST, TII, MAI.Reqs); + handleMIFlagDecoration(MI, ST, TII, MAI.Reqs, GR, + MAI.FPFastMathDefaultInfoMap[&(*F)]); } } @@ -2126,6 +2359,111 @@ static void patchPhis(const Module &M, SPIRVGlobalRegistry *GR, } } +static SPIRV::FPFastMathDefaultInfoVector &getOrCreateFPFastMathDefaultInfoVec( + const Module &M, SPIRV::ModuleAnalysisInfo &MAI, const Function *F) { + auto it = MAI.FPFastMathDefaultInfoMap.find(F); + if (it != MAI.FPFastMathDefaultInfoMap.end()) + return it->second; + + // If the map does not contain the entry, create a new one. Initialize it to + // contain all 3 elements sorted by bit width of target type: {half, float, + // double}. + SPIRV::FPFastMathDefaultInfoVector FPFastMathDefaultInfoVec; + FPFastMathDefaultInfoVec.emplace_back(Type::getHalfTy(M.getContext()), + SPIRV::FPFastMathMode::None); + FPFastMathDefaultInfoVec.emplace_back(Type::getFloatTy(M.getContext()), + SPIRV::FPFastMathMode::None); + FPFastMathDefaultInfoVec.emplace_back(Type::getDoubleTy(M.getContext()), + SPIRV::FPFastMathMode::None); + return MAI.FPFastMathDefaultInfoMap[F] = std::move(FPFastMathDefaultInfoVec); +} + +static SPIRV::FPFastMathDefaultInfo &getFPFastMathDefaultInfo( + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, + const Type *Ty) { + size_t BitWidth = Ty->getScalarSizeInBits(); + int Index = + SPIRV::FPFastMathDefaultInfoVector::computeFPFastMathDefaultInfoVecIndex( + BitWidth); + assert(Index >= 0 && Index < 3 && + "Expected FPFastMathDefaultInfo for half, float, or double"); + assert(FPFastMathDefaultInfoVec.size() == 3 && + "Expected FPFastMathDefaultInfoVec to have exactly 3 elements"); + return FPFastMathDefaultInfoVec[Index]; +} + +static void collectFPFastMathDefaults(const Module &M, + SPIRV::ModuleAnalysisInfo &MAI, + const SPIRVSubtarget &ST) { + if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) + return; + + // Store the FPFastMathDefaultInfo in the FPFastMathDefaultInfoMap. + // We need the entry point (function) as the key, and the target + // type and flags as the value. + // We also need to check ContractionOff and SignedZeroInfNanPreserve + // execution modes, as they are now deprecated and must be replaced + // with FPFastMathDefaultInfo. + auto Node = M.getNamedMetadata("spirv.ExecutionMode"); + if (!Node) + return; + + for (unsigned i = 0; i < Node->getNumOperands(); i++) { + MDNode *MDN = cast(Node->getOperand(i)); + assert(MDN->getNumOperands() >= 2 && "Expected at least 2 operands"); + const Function *F = cast( + cast(MDN->getOperand(0))->getValue()); + const auto EM = + cast( + cast(MDN->getOperand(1))->getValue()) + ->getZExtValue(); + if (EM == SPIRV::ExecutionMode::FPFastMathDefault) { + assert(MDN->getNumOperands() == 4 && + "Expected 4 operands for FPFastMathDefault"); + + const Type *T = cast(MDN->getOperand(2))->getType(); + unsigned Flags = + cast( + cast(MDN->getOperand(3))->getValue()) + ->getZExtValue(); + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec = + getOrCreateFPFastMathDefaultInfoVec(M, MAI, F); + SPIRV::FPFastMathDefaultInfo &Info = + getFPFastMathDefaultInfo(FPFastMathDefaultInfoVec, T); + Info.FastMathFlags = Flags; + Info.FPFastMathDefault = true; + } else if (EM == SPIRV::ExecutionMode::ContractionOff) { + assert(MDN->getNumOperands() == 2 && + "Expected no operands for ContractionOff"); + + // We need to save this info for every possible FP type, i.e. {half, + // float, double, fp128}. + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec = + getOrCreateFPFastMathDefaultInfoVec(M, MAI, F); + for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) { + Info.ContractionOff = true; + } + } else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) { + assert(MDN->getNumOperands() == 3 && + "Expected 1 operand for SignedZeroInfNanPreserve"); + unsigned TargetWidth = + cast( + cast(MDN->getOperand(2))->getValue()) + ->getZExtValue(); + // We need to save this info only for the FP type with TargetWidth. + SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec = + getOrCreateFPFastMathDefaultInfoVec(M, MAI, F); + int Index = SPIRV::FPFastMathDefaultInfoVector:: + computeFPFastMathDefaultInfoVecIndex(TargetWidth); + assert(Index >= 0 && Index < 3 && + "Expected FPFastMathDefaultInfo for half, float, or double"); + assert(FPFastMathDefaultInfoVec.size() == 3 && + "Expected FPFastMathDefaultInfoVec to have exactly 3 elements"); + FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve = true; + } + } +} + struct SPIRV::ModuleAnalysisInfo SPIRVModuleAnalysis::MAI; void SPIRVModuleAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { @@ -2147,7 +2485,8 @@ bool SPIRVModuleAnalysis::runOnModule(Module &M) { patchPhis(M, GR, *TII, MMI); addMBBNames(M, *TII, MMI, *ST, MAI); - addDecorations(M, *TII, MMI, *ST, MAI); + collectFPFastMathDefaults(M, MAI, *ST); + addDecorations(M, *TII, MMI, *ST, MAI, GR); collectReqs(M, MAI, MMI, *ST); diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h index 41c792a98534f..d8376cd1aeb5a 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.h @@ -159,6 +159,13 @@ struct ModuleAnalysisInfo { InstrList MS[NUM_MODULE_SECTIONS]; // The table maps MBB number to SPIR-V unique ID register. DenseMap, MCRegister> BBNumToRegMap; + // The table maps function pointers to their default FP fast math info. It can + // be assumed that the SmallVector is sorted by the bit width of the type. The + // first element is the smallest bit width, and the last element is the + // largest bit width, therefore, we will have {half, float, double} in + // the order of their bit widths. + DenseMap + FPFastMathDefaultInfoMap; MCRegister getFuncReg(const Function *F) { assert(F && "Function is null"); diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp index 1a08c6ac0dcaf..db6f2d61e8f29 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp @@ -839,6 +839,7 @@ static uint32_t convertFloatToSPIRVWord(float F) { static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB) { + const SPIRVSubtarget &ST = cast(MIB.getMF().getSubtarget()); SmallVector ToErase; for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { @@ -849,7 +850,7 @@ static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MIB.setInsertPt(*MI.getParent(), MI.getNextNode()); if (isSpvIntrinsic(MI, Intrinsic::spv_assign_decoration)) { buildOpSpirvDecorations(MI.getOperand(1).getReg(), MIB, - MI.getOperand(2).getMetadata()); + MI.getOperand(2).getMetadata(), ST); } else if (isSpvIntrinsic(MI, Intrinsic::spv_assign_fpmaxerror_decoration)) { ConstantFP *OpV = mdconst::dyn_extract( diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp index 2b34f61fa2434..4e4e6fb4ab791 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp @@ -335,6 +335,21 @@ static void lowerFunnelShifts(IntrinsicInst *FSHIntrinsic) { FSHIntrinsic->setCalledFunction(FSHFunc); } +static void lowerConstrainedFPCmpIntrinsic( + ConstrainedFPCmpIntrinsic *ConstrainedCmpIntrinsic, + SmallVector &EraseFromParent) { + if (!ConstrainedCmpIntrinsic) + return; + // Extract the floating-point values being compared + Value *LHS = ConstrainedCmpIntrinsic->getArgOperand(0); + Value *RHS = ConstrainedCmpIntrinsic->getArgOperand(1); + FCmpInst::Predicate Pred = ConstrainedCmpIntrinsic->getPredicate(); + IRBuilder<> Builder(ConstrainedCmpIntrinsic); + Value *FCmp = Builder.CreateFCmp(Pred, LHS, RHS); + ConstrainedCmpIntrinsic->replaceAllUsesWith(FCmp); + EraseFromParent.push_back(dyn_cast(ConstrainedCmpIntrinsic)); +} + static void lowerExpectAssume(IntrinsicInst *II) { // If we cannot use the SPV_KHR_expect_assume extension, then we need to // ignore the intrinsic and move on. It should be removed later on by LLVM. @@ -376,6 +391,7 @@ static bool toSpvLifetimeIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID) { bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) { bool Changed = false; const SPIRVSubtarget &STI = TM.getSubtarget(*F); + SmallVector EraseFromParent; for (BasicBlock &BB : *F) { for (Instruction &I : make_early_inc_range(BB)) { auto Call = dyn_cast(&I); @@ -423,9 +439,17 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) { lowerPtrAnnotation(II); Changed = true; break; + case Intrinsic::experimental_constrained_fcmp: + case Intrinsic::experimental_constrained_fcmps: + lowerConstrainedFPCmpIntrinsic(dyn_cast(II), + EraseFromParent); + Changed = true; + break; } } } + for (auto *I : EraseFromParent) + I->eraseFromParent(); return Changed; } diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td index 66ce5a2d67c3e..6a32dbabff3d3 100644 --- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td +++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td @@ -802,6 +802,7 @@ defm RoundingModeRTPINTEL : ExecutionModeOperand<5620, [RoundToInfinityINTEL]>; defm RoundingModeRTNINTEL : ExecutionModeOperand<5621, [RoundToInfinityINTEL]>; defm FloatingPointModeALTINTEL : ExecutionModeOperand<5622, [FloatingPointModeINTEL]>; defm FloatingPointModeIEEEINTEL : ExecutionModeOperand<5623, [FloatingPointModeINTEL]>; +defm FPFastMathDefault : ExecutionModeOperand<6028, [FloatControls2]>; //===----------------------------------------------------------------------===// // Multiclass used to define StorageClass enum values and at the same time @@ -1153,6 +1154,9 @@ defm NotInf : FPFastMathModeOperand<0x2, [Kernel]>; defm NSZ : FPFastMathModeOperand<0x4, [Kernel]>; defm AllowRecip : FPFastMathModeOperand<0x8, [Kernel]>; defm Fast : FPFastMathModeOperand<0x10, [Kernel]>; +defm AllowContract : FPFastMathModeOperand<0x10000, [FloatControls2]>; +defm AllowReassoc : FPFastMathModeOperand<0x20000, [FloatControls2]>; +defm AllowTransform : FPFastMathModeOperand<0x40000, [FloatControls2]>; //===----------------------------------------------------------------------===// // Multiclass used to define FPRoundingMode enum values and at the same time diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp index 820e56b362edc..327c011ea178f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp @@ -181,7 +181,7 @@ void buildOpMemberDecorate(Register Reg, MachineInstr &I, } void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, - const MDNode *GVarMD) { + const MDNode *GVarMD, const SPIRVSubtarget &ST) { for (unsigned I = 0, E = GVarMD->getNumOperands(); I != E; ++I) { auto *OpMD = dyn_cast(GVarMD->getOperand(I)); if (!OpMD) @@ -193,6 +193,20 @@ void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, if (!DecorationId) report_fatal_error("Expect SPIR-V operand to be the first " "element of the decoration"); + + // The goal of `spirv.Decorations` metadata is to provide a way to + // represent SPIR-V entities that do not map to LLVM in an obvious way. + // FP flags do have obvious matches between LLVM IR and SPIR-V. + // Additionally, we have no guarantee at this point that the flags passed + // through the decoration are not violated already in the optimizer passes. + // Therefore, we simply ignore FP flags, including NoContraction, and + // FPFastMathMode. + if (DecorationId->getZExtValue() == + static_cast(SPIRV::Decoration::NoContraction) || + DecorationId->getZExtValue() == + static_cast(SPIRV::Decoration::FPFastMathMode)) { + continue; // Ignored. + } auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate) .addUse(Reg) .addImm(static_cast(DecorationId->getZExtValue())); diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h index 45c520a922d10..409a0fd758a32 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.h +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h @@ -113,6 +113,54 @@ class PartialOrderingVisitor { std::function Op); }; +namespace SPIRV { +struct FPFastMathDefaultInfo { + const Type *Ty = nullptr; + unsigned FastMathFlags = 0; + // When SPV_KHR_float_controls2 ContractionOff and SignzeroInfNanPreserve are + // deprecated, and we replace them with FPFastMathDefault appropriate flags + // instead. However, we have no guarantee about the order in which we will + // process execution modes. Therefore it could happen that we first process + // ContractionOff, setting AllowContraction bit to 0, and then we process + // FPFastMathDefault enabling AllowContraction bit, effectively invalidating + // ContractionOff. Because of that, it's best to keep separate bits for the + // different execution modes, and we will try and combine them later when we + // emit OpExecutionMode instructions. + bool ContractionOff = false; + bool SignedZeroInfNanPreserve = false; + bool FPFastMathDefault = false; + + FPFastMathDefaultInfo() = default; + FPFastMathDefaultInfo(const Type *Ty, unsigned FastMathFlags) + : Ty(Ty), FastMathFlags(FastMathFlags) {} + bool operator==(const FPFastMathDefaultInfo &Other) const { + return Ty == Other.Ty && FastMathFlags == Other.FastMathFlags && + ContractionOff == Other.ContractionOff && + SignedZeroInfNanPreserve == Other.SignedZeroInfNanPreserve && + FPFastMathDefault == Other.FPFastMathDefault; + } +}; + +struct FPFastMathDefaultInfoVector + : public SmallVector { + static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth) { + switch (BitWidth) { + case 16: // half + return 0; + case 32: // float + return 1; + case 64: // double + return 2; + default: + report_fatal_error("Expected BitWidth to be 16, 32, 64", false); + } + llvm_unreachable( + "Unreachable code in computeFPFastMathDefaultInfoVecIndex"); + } +}; + +} // namespace SPIRV + // Add the given string as a series of integer operand, inserting null // terminators and padding to make sure the operands all have 32-bit // little-endian words. @@ -161,7 +209,7 @@ void buildOpMemberDecorate(Register Reg, MachineInstr &I, // Add an OpDecorate instruction by "spirv.Decorations" metadata node. void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, - const MDNode *GVarMD); + const MDNode *GVarMD, const SPIRVSubtarget &ST); // Return a valid position for the OpVariable instruction inside a function, // i.e., at the beginning of the first block of the function. @@ -508,6 +556,5 @@ unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType); MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB); - } // namespace llvm #endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H diff --git a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp index cfa3511436b97..cb02e33b8e5dd 100644 --- a/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp +++ b/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp @@ -28,7 +28,6 @@ #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Casting.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include #include diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index 2737cca62cd20..a1607097af1ef 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -115,7 +115,8 @@ static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. unsigned size = (LocVT == MVT::f128) ? 16 : 8; - Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8); + Align alignment = + (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8); unsigned Offset = State.AllocateStack(size, alignment); unsigned Reg = 0; diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp index 59d1db784c688..383c96e8cca73 100644 --- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp +++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp @@ -21,7 +21,6 @@ #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSubtargetInfo.h" -#include "llvm/Support/ErrorHandling.h" #include #include diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h index 707887c59bd65..f8706b748b355 100644 --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -528,7 +528,9 @@ class SystemZTargetLowering : public TargetLowering { bool shouldConsiderGEPOffsetSplit() const override { return true; } - bool shouldExpandCmpUsingSelects(EVT VT) const override { return true; } + bool preferSelectsOverBooleanArithmetic(EVT VT) const override { + return true; + } const char *getTargetNodeName(unsigned Opcode) const override; std::pair diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp index ad7e503cb1552..cf8569194d778 100644 --- a/llvm/lib/Target/TargetMachine.cpp +++ b/llvm/lib/Target/TargetMachine.cpp @@ -27,7 +27,7 @@ #include "llvm/Target/TargetLoweringObjectFile.h" using namespace llvm; -cl::opt NoKernelInfoEndLTO( +cl::opt llvm::NoKernelInfoEndLTO( "no-kernel-info-end-lto", cl::desc("remove the kernel-info pass at the end of the full LTO pipeline"), cl::init(false), cl::Hidden); diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCCodeEmitter.cpp b/llvm/lib/Target/VE/MCTargetDesc/VEMCCodeEmitter.cpp index 711937c488275..ab719390e3245 100644 --- a/llvm/lib/Target/VE/MCTargetDesc/VEMCCodeEmitter.cpp +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCCodeEmitter.cpp @@ -25,7 +25,6 @@ #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/EndianStream.h" -#include "llvm/Support/ErrorHandling.h" #include #include diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp index 2cfdc751a55e0..a068138791cb4 100644 --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -957,6 +957,8 @@ const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const { EVT VETargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, EVT VT) const { + if (VT.isVector()) + return VT.changeVectorElementType(MVT::i1); return MVT::i32; } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp index e6486e247209b..5c3127e2d3dc6 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -216,6 +216,18 @@ static MachineInstr *findStartOfTree(MachineOperand &MO, return Def; } +// FAKE_USEs are no-ops, so remove them here so that the values used by them +// will be correctly dropped later. +static void removeFakeUses(MachineFunction &MF) { + SmallVector ToDelete; + for (auto &MBB : MF) + for (auto &MI : MBB) + if (MI.isFakeUse()) + ToDelete.push_back(&MI); + for (auto *MI : ToDelete) + MI->eraseFromParent(); +} + bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { LLVM_DEBUG(dbgs() << "********** Make Locals Explicit **********\n" "********** Function: " @@ -226,6 +238,8 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { WebAssemblyFunctionInfo &MFI = *MF.getInfo(); const auto *TII = MF.getSubtarget().getInstrInfo(); + removeFakeUses(MF); + // Map non-stackified virtual registers to their local ids. DenseMap Reg2Local; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index 64b9dc31f75b7..163bf9ba5b089 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -186,7 +186,6 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( // SIMD-specific configuration if (Subtarget->hasSIMD128()) { - // Combine partial.reduce.add before legalization gets confused. setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); // Combine wide-vector muls, with extend inputs, to extmul_half. @@ -317,6 +316,12 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Custom); setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Custom); } + + // Partial MLA reductions. + for (auto Op : {ISD::PARTIAL_REDUCE_SMLA, ISD::PARTIAL_REDUCE_UMLA}) { + setPartialReduceMLAAction(Op, MVT::v4i32, MVT::v16i8, Legal); + setPartialReduceMLAAction(Op, MVT::v4i32, MVT::v8i16, Legal); + } } // As a special case, these operators use the type to mean the type to @@ -416,41 +421,6 @@ MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL, return TargetLowering::getPointerMemTy(DL, AS); } -bool WebAssemblyTargetLowering::shouldExpandPartialReductionIntrinsic( - const IntrinsicInst *I) const { - if (I->getIntrinsicID() != Intrinsic::vector_partial_reduce_add) - return true; - - EVT VT = EVT::getEVT(I->getType()); - if (VT.getSizeInBits() > 128) - return true; - - auto Op1 = I->getOperand(1); - - if (auto *InputInst = dyn_cast(Op1)) { - unsigned Opcode = InstructionOpcodeToISD(InputInst->getOpcode()); - if (Opcode == ISD::MUL) { - if (isa(InputInst->getOperand(0)) && - isa(InputInst->getOperand(1))) { - // dot only supports signed inputs but also support lowering unsigned. - if (cast(InputInst->getOperand(0))->getOpcode() != - cast(InputInst->getOperand(1))->getOpcode()) - return true; - - EVT Op1VT = EVT::getEVT(Op1->getType()); - if (Op1VT.getVectorElementType() == VT.getVectorElementType() && - ((VT.getVectorElementCount() * 2 == - Op1VT.getVectorElementCount()) || - (VT.getVectorElementCount() * 4 == Op1VT.getVectorElementCount()))) - return false; - } - } else if (ISD::isExtOpcode(Opcode)) { - return false; - } - } - return true; -} - TargetLowering::AtomicExpansionKind WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { // We have wasm instructions for these @@ -2113,106 +2083,6 @@ SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, MachinePointerInfo(SV)); } -// Try to lower partial.reduce.add to a dot or fallback to a sequence with -// extmul and adds. -SDValue performLowerPartialReduction(SDNode *N, SelectionDAG &DAG) { - assert(N->getOpcode() == ISD::INTRINSIC_WO_CHAIN); - if (N->getConstantOperandVal(0) != Intrinsic::vector_partial_reduce_add) - return SDValue(); - - assert(N->getValueType(0) == MVT::v4i32 && "can only support v4i32"); - SDLoc DL(N); - - SDValue Input = N->getOperand(2); - if (Input->getOpcode() == ISD::MUL) { - SDValue ExtendLHS = Input->getOperand(0); - SDValue ExtendRHS = Input->getOperand(1); - assert((ISD::isExtOpcode(ExtendLHS.getOpcode()) && - ISD::isExtOpcode(ExtendRHS.getOpcode())) && - "expected widening mul or add"); - assert(ExtendLHS.getOpcode() == ExtendRHS.getOpcode() && - "expected binop to use the same extend for both operands"); - - SDValue ExtendInLHS = ExtendLHS->getOperand(0); - SDValue ExtendInRHS = ExtendRHS->getOperand(0); - bool IsSigned = ExtendLHS->getOpcode() == ISD::SIGN_EXTEND; - unsigned LowOpc = - IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U; - unsigned HighOpc = IsSigned ? WebAssemblyISD::EXTEND_HIGH_S - : WebAssemblyISD::EXTEND_HIGH_U; - SDValue LowLHS; - SDValue LowRHS; - SDValue HighLHS; - SDValue HighRHS; - - auto AssignInputs = [&](MVT VT) { - LowLHS = DAG.getNode(LowOpc, DL, VT, ExtendInLHS); - LowRHS = DAG.getNode(LowOpc, DL, VT, ExtendInRHS); - HighLHS = DAG.getNode(HighOpc, DL, VT, ExtendInLHS); - HighRHS = DAG.getNode(HighOpc, DL, VT, ExtendInRHS); - }; - - if (ExtendInLHS->getValueType(0) == MVT::v8i16) { - if (IsSigned) { - // i32x4.dot_i16x8_s - SDValue Dot = DAG.getNode(WebAssemblyISD::DOT, DL, MVT::v4i32, - ExtendInLHS, ExtendInRHS); - return DAG.getNode(ISD::ADD, DL, MVT::v4i32, N->getOperand(1), Dot); - } - - // (add (add (extmul_low_sx lhs, rhs), (extmul_high_sx lhs, rhs))) - MVT VT = MVT::v4i32; - AssignInputs(VT); - SDValue MulLow = DAG.getNode(ISD::MUL, DL, VT, LowLHS, LowRHS); - SDValue MulHigh = DAG.getNode(ISD::MUL, DL, VT, HighLHS, HighRHS); - SDValue Add = DAG.getNode(ISD::ADD, DL, VT, MulLow, MulHigh); - return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(1), Add); - } else { - assert(ExtendInLHS->getValueType(0) == MVT::v16i8 && - "expected v16i8 input types"); - AssignInputs(MVT::v8i16); - // Lower to a wider tree, using twice the operations compared to above. - if (IsSigned) { - // Use two dots - SDValue DotLHS = - DAG.getNode(WebAssemblyISD::DOT, DL, MVT::v4i32, LowLHS, LowRHS); - SDValue DotRHS = - DAG.getNode(WebAssemblyISD::DOT, DL, MVT::v4i32, HighLHS, HighRHS); - SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::v4i32, DotLHS, DotRHS); - return DAG.getNode(ISD::ADD, DL, MVT::v4i32, N->getOperand(1), Add); - } - - SDValue MulLow = DAG.getNode(ISD::MUL, DL, MVT::v8i16, LowLHS, LowRHS); - SDValue MulHigh = DAG.getNode(ISD::MUL, DL, MVT::v8i16, HighLHS, HighRHS); - - SDValue AddLow = DAG.getNode(WebAssemblyISD::EXT_ADD_PAIRWISE_U, DL, - MVT::v4i32, MulLow); - SDValue AddHigh = DAG.getNode(WebAssemblyISD::EXT_ADD_PAIRWISE_U, DL, - MVT::v4i32, MulHigh); - SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::v4i32, AddLow, AddHigh); - return DAG.getNode(ISD::ADD, DL, MVT::v4i32, N->getOperand(1), Add); - } - } else { - // Accumulate the input using extadd_pairwise. - assert(ISD::isExtOpcode(Input.getOpcode()) && "expected extend"); - bool IsSigned = Input->getOpcode() == ISD::SIGN_EXTEND; - unsigned PairwiseOpc = IsSigned ? WebAssemblyISD::EXT_ADD_PAIRWISE_S - : WebAssemblyISD::EXT_ADD_PAIRWISE_U; - SDValue ExtendIn = Input->getOperand(0); - if (ExtendIn->getValueType(0) == MVT::v8i16) { - SDValue Add = DAG.getNode(PairwiseOpc, DL, MVT::v4i32, ExtendIn); - return DAG.getNode(ISD::ADD, DL, MVT::v4i32, N->getOperand(1), Add); - } - - assert(ExtendIn->getValueType(0) == MVT::v16i8 && - "expected v16i8 input types"); - SDValue Add = - DAG.getNode(PairwiseOpc, DL, MVT::v4i32, - DAG.getNode(PairwiseOpc, DL, MVT::v8i16, ExtendIn)); - return DAG.getNode(ISD::ADD, DL, MVT::v4i32, N->getOperand(1), Add); - } -} - SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -3683,11 +3553,8 @@ WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N, return performVectorTruncZeroCombine(N, DCI); case ISD::TRUNCATE: return performTruncateCombine(N, DCI); - case ISD::INTRINSIC_WO_CHAIN: { - if (auto AnyAllCombine = performAnyAllCombine(N, DCI.DAG)) - return AnyAllCombine; - return performLowerPartialReduction(N, DCI.DAG); - } + case ISD::INTRINSIC_WO_CHAIN: + return performAnyAllCombine(N, DCI.DAG); case ISD::MUL: return performMulCombine(N, DCI); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h index 72401a7a259c0..b33a8530310be 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -45,8 +45,6 @@ class WebAssemblyTargetLowering final : public TargetLowering { /// right decision when generating code for different targets. const WebAssemblySubtarget *Subtarget; - bool - shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; bool shouldScalarizeBinop(SDValue VecOp) const override; FastISel *createFastISel(FunctionLoweringInfo &FuncInfo, @@ -89,8 +87,7 @@ class WebAssemblyTargetLowering final : public TargetLowering { bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl &Outs, - LLVMContext &Context, - const Type *RetTy) const override; + LLVMContext &Context, const Type *RetTy) const override; SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SDLoc &dl, diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp index feac04a17068a..343d90e88950f 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -39,18 +39,18 @@ WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI) WebAssembly::CATCHRET), RI(STI.getTargetTriple()) {} -bool WebAssemblyInstrInfo::isReallyTriviallyReMaterializable( +bool WebAssemblyInstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { switch (MI.getOpcode()) { case WebAssembly::CONST_I32: case WebAssembly::CONST_I64: case WebAssembly::CONST_F32: case WebAssembly::CONST_F64: - // TargetInstrInfo::isReallyTriviallyReMaterializable misses these + // TargetInstrInfo::isReMaterializableImpl misses these // because of the ARGUMENTS implicit def, so we manualy override it here. return true; default: - return TargetInstrInfo::isReallyTriviallyReMaterializable(MI); + return TargetInstrInfo::isReMaterializableImpl(MI); } } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h index ba00097034bf5..b92f62d7638c1 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h @@ -37,7 +37,7 @@ class WebAssemblyInstrInfo final : public WebAssemblyGenInstrInfo { const WebAssemblyRegisterInfo &getRegisterInfo() const { return RI; } - bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; + bool isReMaterializableImpl(const MachineInstr &MI) const override; void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index d8948ad2df037..130602650d34e 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -1504,6 +1504,51 @@ def : Pat<(v2f64 (extloadv2f32 (i64 I64:$addr))), defm Q15MULR_SAT_S : SIMDBinary; +//===----------------------------------------------------------------------===// +// Partial reductions, using: dot, extmul and extadd_pairwise +//===----------------------------------------------------------------------===// +// MLA: v8i16 -> v4i32 +def : Pat<(v4i32 (partial_reduce_smla (v4i32 V128:$acc), (v8i16 V128:$lhs), + (v8i16 V128:$rhs))), + (ADD_I32x4 (DOT $lhs, $rhs), $acc)>; +def : Pat<(v4i32 (partial_reduce_umla (v4i32 V128:$acc), (v8i16 V128:$lhs), + (v8i16 V128:$rhs))), + (ADD_I32x4 (ADD_I32x4 (EXTMUL_LOW_U_I32x4 $lhs, $rhs), + (EXTMUL_HIGH_U_I32x4 $lhs, $rhs)), + $acc)>; +// MLA: v16i8 -> v4i32 +def : Pat<(v4i32 (partial_reduce_smla (v4i32 V128:$acc), (v16i8 V128:$lhs), + (v16i8 V128:$rhs))), + (ADD_I32x4 (ADD_I32x4 (DOT (extend_low_s_I16x8 $lhs), + (extend_low_s_I16x8 $rhs)), + (DOT (extend_high_s_I16x8 $lhs), + (extend_high_s_I16x8 $rhs))), + $acc)>; +def : Pat<(v4i32 (partial_reduce_umla (v4i32 V128:$acc), (v16i8 V128:$lhs), + (v16i8 V128:$rhs))), + (ADD_I32x4 (ADD_I32x4 (extadd_pairwise_u_I32x4 (EXTMUL_LOW_U_I16x8 $lhs, $rhs)), + (extadd_pairwise_u_I32x4 (EXTMUL_HIGH_U_I16x8 $lhs, $rhs))), + $acc)>; + +// Accumulate: v8i16 -> v4i32 +def : Pat<(v4i32 (partial_reduce_smla (v4i32 V128:$acc), (v8i16 V128:$in), + (I16x8.splat (i32 1)))), + (ADD_I32x4 (extadd_pairwise_s_I32x4 $in), $acc)>; + +def : Pat<(v4i32 (partial_reduce_umla (v4i32 V128:$acc), (v8i16 V128:$in), + (I16x8.splat (i32 1)))), + (ADD_I32x4 (extadd_pairwise_u_I32x4 $in), $acc)>; + +// Accumulate: v16i8 -> v4i32 +def : Pat<(v4i32 (partial_reduce_smla (v4i32 V128:$acc), (v16i8 V128:$in), + (I8x16.splat (i32 1)))), + (ADD_I32x4 (extadd_pairwise_s_I32x4 (extadd_pairwise_s_I16x8 $in)), + $acc)>; +def : Pat<(v4i32 (partial_reduce_umla (v4i32 V128:$acc), (v16i8 V128:$in), + (I8x16.splat (i32 1)))), + (ADD_I32x4 (extadd_pairwise_u_I32x4 (extadd_pairwise_u_I16x8 $in)), + $acc)>; + //===----------------------------------------------------------------------===// // Relaxed swizzle //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp index 08ca20b5eef6e..97f2ed0a828ba 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -867,6 +867,10 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { if (Insert->isDebugValue()) continue; + // Ignore FAKE_USEs, which are no-ops and will be deleted later. + if (Insert->isFakeUse()) + continue; + // Iterate through the inputs in reverse order, since we'll be pulling // operands off the stack in LIFO order. CommutingState Commuting; diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp index ce5e92135f706..a8908d4b710e6 100644 --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -1247,7 +1247,7 @@ class X86AsmParser : public MCTargetAsmParser { /// return false if no parsing errors occurred, true otherwise. bool HandleAVX512Operand(OperandVector &Operands); - bool ParseZ(std::unique_ptr &Z, const SMLoc &StartLoc); + bool ParseZ(std::unique_ptr &Z, SMLoc StartLoc); bool is64BitMode() const { // FIXME: Can tablegen auto-generate this? @@ -2907,8 +2907,7 @@ X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) { // true on failure, false otherwise // If no {z} mark was found - Parser doesn't advance -bool X86AsmParser::ParseZ(std::unique_ptr &Z, - const SMLoc &StartLoc) { +bool X86AsmParser::ParseZ(std::unique_ptr &Z, SMLoc StartLoc) { MCAsmParser &Parser = getParser(); // Assuming we are just pass the '{' mark, quering the next token // Searched for {z}, but none was found. Return false, as no parsing error was @@ -4018,9 +4017,14 @@ bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) { return Error(Ops[0]->getStartLoc(), "all tmm registers must be distinct"); } - // Check that we aren't mixing AH/BH/CH/DH with REX prefix. We only need to - // check this with the legacy encoding, VEX/EVEX/XOP don't use REX. - if ((TSFlags & X86II::EncodingMask) == 0) { + // High 8-bit regs (AH/BH/CH/DH) are incompatible with encodings that imply + // extended prefixes: + // * Legacy path that would emit a REX (e.g. uses r8..r15 or sil/dil/bpl/spl) + // * EVEX + // * REX2 + // VEX/XOP don't use REX; they are excluded from the legacy check. + const unsigned Enc = TSFlags & X86II::EncodingMask; + if (Enc != X86II::VEX && Enc != X86II::XOP) { MCRegister HReg; bool UsesRex = TSFlags & X86II::REX_W; unsigned NumOps = Inst.getNumOperands(); @@ -4036,11 +4040,13 @@ bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) { UsesRex = true; } - if (UsesRex && HReg) { + if (HReg && + (Enc == X86II::EVEX || ForcedOpcodePrefix == OpcodePrefix_REX2 || + ForcedOpcodePrefix == OpcodePrefix_REX || UsesRex)) { StringRef RegName = X86IntelInstPrinter::getRegisterName(HReg); return Error(Ops[0]->getStartLoc(), - "can't encode '" + RegName + "' in an instruction requiring " - "REX prefix"); + "can't encode '" + RegName.str() + + "' in an instruction requiring EVEX/REX2/REX prefix"); } } diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index 2c752457d165e..e7709ef589502 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -21,6 +21,7 @@ #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/IntrinsicsX86.h" #include "llvm/IR/Type.h" using namespace llvm; @@ -110,7 +111,8 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, .legalFor(HasSSE2 || UseX87, {s64}) .legalFor(UseX87, {s80}); - getActionDefinitionsBuilder(G_GET_ROUNDING).customFor({s32}); + getActionDefinitionsBuilder({G_GET_ROUNDING, G_SET_ROUNDING}) + .customFor({s32}); // merge/unmerge for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { @@ -147,6 +149,10 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, }); } + getActionDefinitionsBuilder({G_UMIN, G_UMAX, G_SMIN, G_SMAX}) + .widenScalarToNextPow2(0, /*Min=*/32) + .lower(); + // integer addition/subtraction getActionDefinitionsBuilder({G_ADD, G_SUB}) .legalFor({s8, s16, s32}) @@ -617,6 +623,8 @@ bool X86LegalizerInfo::legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, return legalizeFPTOSI(MI, MRI, Helper); case TargetOpcode::G_GET_ROUNDING: return legalizeGETROUNDING(MI, MRI, Helper); + case TargetOpcode::G_SET_ROUNDING: + return legalizeSETROUNDING(MI, MRI, Helper); } llvm_unreachable("expected switch to return"); } @@ -859,6 +867,134 @@ bool X86LegalizerInfo::legalizeGETROUNDING(MachineInstr &MI, return true; } +bool X86LegalizerInfo::legalizeSETROUNDING(MachineInstr &MI, + MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const { + MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; + MachineFunction &MF = MIRBuilder.getMF(); + Register Src = MI.getOperand(0).getReg(); + const LLT s8 = LLT::scalar(8); + const LLT s16 = LLT::scalar(16); + const LLT s32 = LLT::scalar(32); + + // Allocate stack slot for control word and MXCSR (4 bytes). + int MemSize = 4; + Align Alignment = Align(4); + MachinePointerInfo PtrInfo; + auto StackTemp = Helper.createStackTemporary(TypeSize::getFixed(MemSize), + Alignment, PtrInfo); + Register StackPtr = StackTemp.getReg(0); + + auto StoreMMO = + MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 2, Align(2)); + MIRBuilder.buildInstr(X86::G_FNSTCW16) + .addUse(StackPtr) + .addMemOperand(StoreMMO); + + auto LoadMMO = + MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, 2, Align(2)); + auto CWD16 = MIRBuilder.buildLoad(s16, StackPtr, *LoadMMO); + + // Clear RM field (bits 11:10) + auto ClearedCWD = + MIRBuilder.buildAnd(s16, CWD16, MIRBuilder.buildConstant(s16, 0xf3ff)); + + // Check if Src is a constant + auto *SrcDef = MRI.getVRegDef(Src); + Register RMBits; + Register MXCSRRMBits; + + if (SrcDef && SrcDef->getOpcode() == TargetOpcode::G_CONSTANT) { + uint64_t RM = getIConstantFromReg(Src, MRI).getZExtValue(); + int FieldVal = X86::getRoundingModeX86(RM); + + if (FieldVal == X86::rmInvalid) { + FieldVal = X86::rmToNearest; + LLVMContext &C = MF.getFunction().getContext(); + C.diagnose(DiagnosticInfoUnsupported( + MF.getFunction(), "rounding mode is not supported by X86 hardware", + DiagnosticLocation(MI.getDebugLoc()), DS_Error)); + return false; + } + + FieldVal = FieldVal << 3; + RMBits = MIRBuilder.buildConstant(s16, FieldVal).getReg(0); + MXCSRRMBits = MIRBuilder.buildConstant(s32, FieldVal).getReg(0); + } else { + // Convert Src (rounding mode) to bits for control word + // (0xc9 << (2 * Src + 4)) & 0xc00 + auto Src32 = MIRBuilder.buildZExtOrTrunc(s32, Src); + auto ShiftAmt = MIRBuilder.buildAdd( + s32, MIRBuilder.buildShl(s32, Src32, MIRBuilder.buildConstant(s32, 1)), + MIRBuilder.buildConstant(s32, 4)); + auto ShiftAmt8 = MIRBuilder.buildTrunc(s8, ShiftAmt); + auto Shifted = MIRBuilder.buildShl(s16, MIRBuilder.buildConstant(s16, 0xc9), + ShiftAmt8); + RMBits = + MIRBuilder.buildAnd(s16, Shifted, MIRBuilder.buildConstant(s16, 0xc00)) + .getReg(0); + + // For non-constant case, we still need to compute MXCSR bits dynamically + auto RMBits32 = MIRBuilder.buildZExt(s32, RMBits); + MXCSRRMBits = + MIRBuilder.buildShl(s32, RMBits32, MIRBuilder.buildConstant(s32, 3)) + .getReg(0); + } + // Update rounding mode bits + auto NewCWD = + MIRBuilder.buildOr(s16, ClearedCWD, RMBits, MachineInstr::Disjoint); + + // Store new FP Control Word to stack + auto StoreNewMMO = + MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 2, Align(2)); + MIRBuilder.buildStore(NewCWD, StackPtr, *StoreNewMMO); + + // Load FP control word from the slot using G_FLDCW16 + auto LoadNewMMO = + MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, 2, Align(2)); + MIRBuilder.buildInstr(X86::G_FLDCW16) + .addUse(StackPtr) + .addMemOperand(LoadNewMMO); + + if (Subtarget.hasSSE1()) { + // Store MXCSR to stack (use STMXCSR) + auto StoreMXCSRMMO = MF.getMachineMemOperand( + PtrInfo, MachineMemOperand::MOStore, 4, Align(4)); + MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) + .addIntrinsicID(Intrinsic::x86_sse_stmxcsr) + .addUse(StackPtr) + .addMemOperand(StoreMXCSRMMO); + + // Load MXCSR from stack + auto LoadMXCSRMMO = MF.getMachineMemOperand( + PtrInfo, MachineMemOperand::MOLoad, 4, Align(4)); + auto MXCSR = MIRBuilder.buildLoad(s32, StackPtr, *LoadMXCSRMMO); + + // Clear RM field (bits 14:13) + auto ClearedMXCSR = MIRBuilder.buildAnd( + s32, MXCSR, MIRBuilder.buildConstant(s32, 0xffff9fff)); + + // Update rounding mode bits + auto NewMXCSR = MIRBuilder.buildOr(s32, ClearedMXCSR, MXCSRRMBits); + + // Store new MXCSR to stack + auto StoreNewMXCSRMMO = MF.getMachineMemOperand( + PtrInfo, MachineMemOperand::MOStore, 4, Align(4)); + MIRBuilder.buildStore(NewMXCSR, StackPtr, *StoreNewMXCSRMMO); + + // Load MXCSR from stack (use LDMXCSR) + auto LoadNewMXCSRMMO = MF.getMachineMemOperand( + PtrInfo, MachineMemOperand::MOLoad, 4, Align(4)); + MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) + .addIntrinsicID(Intrinsic::x86_sse_ldmxcsr) + .addUse(StackPtr) + .addMemOperand(LoadNewMXCSRMMO); + } + + MI.eraseFromParent(); + return true; +} + bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { return true; diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h index 0003552d70ee0..09c727c8e8685 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h @@ -57,6 +57,9 @@ class X86LegalizerInfo : public LegalizerInfo { bool legalizeGETROUNDING(MachineInstr &MI, MachineRegisterInfo &MRI, LegalizerHelper &Helper) const; + + bool legalizeSETROUNDING(MachineInstr &MI, MachineRegisterInfo &MRI, + LegalizerHelper &Helper) const; }; } // namespace llvm #endif diff --git a/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp b/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp index e2a1bbf383b3c..a69a781bf070b 100644 --- a/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp +++ b/llvm/lib/Target/X86/MCA/X86CustomBehaviour.cpp @@ -20,24 +20,22 @@ namespace llvm { namespace mca { -void X86InstrPostProcess::setMemBarriers(std::unique_ptr &Inst, - const MCInst &MCI) { +void X86InstrPostProcess::setMemBarriers(Instruction &Inst, const MCInst &MCI) { switch (MCI.getOpcode()) { case X86::MFENCE: - Inst->setLoadBarrier(true); - Inst->setStoreBarrier(true); + Inst.setLoadBarrier(true); + Inst.setStoreBarrier(true); break; case X86::LFENCE: - Inst->setLoadBarrier(true); + Inst.setLoadBarrier(true); break; case X86::SFENCE: - Inst->setStoreBarrier(true); + Inst.setStoreBarrier(true); break; } } -void X86InstrPostProcess::useStackEngine(std::unique_ptr &Inst, - const MCInst &MCI) { +void X86InstrPostProcess::useStackEngine(Instruction &Inst, const MCInst &MCI) { // TODO(boomanaiden154): We currently do not handle PUSHF/POPF because we // have not done the necessary benchmarking to see if they are also // optimized by the stack engine. @@ -46,18 +44,18 @@ void X86InstrPostProcess::useStackEngine(std::unique_ptr &Inst, // delay subsequent rsp using non-stack instructions. if (X86::isPOP(MCI.getOpcode()) || X86::isPUSH(MCI.getOpcode())) { auto *StackRegisterDef = - llvm::find_if(Inst->getDefs(), [](const WriteState &State) { + llvm::find_if(Inst.getDefs(), [](const WriteState &State) { return State.getRegisterID() == X86::RSP; }); assert( - StackRegisterDef != Inst->getDefs().end() && + StackRegisterDef != Inst.getDefs().end() && "Expected push instruction to implicitly use stack pointer register."); - Inst->getDefs().erase(StackRegisterDef); + Inst.getDefs().erase(StackRegisterDef); } } -void X86InstrPostProcess::postProcessInstruction( - std::unique_ptr &Inst, const MCInst &MCI) { +void X86InstrPostProcess::postProcessInstruction(Instruction &Inst, + const MCInst &MCI) { // Set IsALoadBarrier and IsAStoreBarrier flags. setMemBarriers(Inst, MCI); useStackEngine(Inst, MCI); diff --git a/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h b/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h index c5459e42dfc9f..d6197f3344bbb 100644 --- a/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h +++ b/llvm/lib/Target/X86/MCA/X86CustomBehaviour.h @@ -26,12 +26,12 @@ namespace mca { class X86InstrPostProcess : public InstrPostProcess { /// Called within X86InstrPostProcess to specify certain instructions /// as load and store barriers. - void setMemBarriers(std::unique_ptr &Inst, const MCInst &MCI); + void setMemBarriers(Instruction &Inst, const MCInst &MCI); /// Called within X86InstrPostPorcess to remove some rsp read operands /// on stack instructions to better simulate the stack engine. We currently /// do not model features of the stack engine like sync uops. - void useStackEngine(std::unique_ptr &Inst, const MCInst &MCI); + void useStackEngine(Instruction &Inst, const MCInst &MCI); public: X86InstrPostProcess(const MCSubtargetInfo &STI, const MCInstrInfo &MCII) @@ -39,8 +39,7 @@ class X86InstrPostProcess : public InstrPostProcess { ~X86InstrPostProcess() = default; - void postProcessInstruction(std::unique_ptr &Inst, - const MCInst &MCI) override; + void postProcessInstruction(Instruction &Inst, const MCInst &MCI) override; }; } // namespace mca diff --git a/llvm/lib/Target/X86/X86FixupInstTuning.cpp b/llvm/lib/Target/X86/X86FixupInstTuning.cpp index 33dc0a232815c..a1d4e0bc62310 100644 --- a/llvm/lib/Target/X86/X86FixupInstTuning.cpp +++ b/llvm/lib/Target/X86/X86FixupInstTuning.cpp @@ -277,6 +277,22 @@ bool X86FixupInstTuningPass::processInstruction( return true; }; + // Is ADD(X,X) more efficient than SHL(X,1)? + auto ProcessShiftLeftToAdd = [&](unsigned AddOpc) -> bool { + if (MI.getOperand(NumOperands - 1).getImm() != 1) + return false; + if (!NewOpcPreferable(AddOpc, /*ReplaceInTie*/ true)) + return false; + LLVM_DEBUG(dbgs() << "Replacing: " << MI); + { + MI.setDesc(TII->get(AddOpc)); + MI.removeOperand(NumOperands - 1); + MI.addOperand(MI.getOperand(NumOperands - 2)); + } + LLVM_DEBUG(dbgs() << " With: " << MI); + return false; + }; + switch (Opc) { case X86::BLENDPDrri: return ProcessBLENDToMOV(X86::MOVSDrr, 0x3, 0x1); @@ -563,6 +579,44 @@ bool X86FixupInstTuningPass::processInstruction( return ProcessUNPCKPS(X86::VPUNPCKHDQZ256rmkz); case X86::VUNPCKHPSZrmkz: return ProcessUNPCKPS(X86::VPUNPCKHDQZrmkz); + + case X86::PSLLWri: + return ProcessShiftLeftToAdd(X86::PADDWrr); + case X86::VPSLLWri: + return ProcessShiftLeftToAdd(X86::VPADDWrr); + case X86::VPSLLWYri: + return ProcessShiftLeftToAdd(X86::VPADDWYrr); + case X86::VPSLLWZ128ri: + return ProcessShiftLeftToAdd(X86::VPADDWZ128rr); + case X86::VPSLLWZ256ri: + return ProcessShiftLeftToAdd(X86::VPADDWZ256rr); + case X86::VPSLLWZri: + return ProcessShiftLeftToAdd(X86::VPADDWZrr); + case X86::PSLLDri: + return ProcessShiftLeftToAdd(X86::PADDDrr); + case X86::VPSLLDri: + return ProcessShiftLeftToAdd(X86::VPADDDrr); + case X86::VPSLLDYri: + return ProcessShiftLeftToAdd(X86::VPADDDYrr); + case X86::VPSLLDZ128ri: + return ProcessShiftLeftToAdd(X86::VPADDDZ128rr); + case X86::VPSLLDZ256ri: + return ProcessShiftLeftToAdd(X86::VPADDDZ256rr); + case X86::VPSLLDZri: + return ProcessShiftLeftToAdd(X86::VPADDDZrr); + case X86::PSLLQri: + return ProcessShiftLeftToAdd(X86::PADDQrr); + case X86::VPSLLQri: + return ProcessShiftLeftToAdd(X86::VPADDQrr); + case X86::VPSLLQYri: + return ProcessShiftLeftToAdd(X86::VPADDQYrr); + case X86::VPSLLQZ128ri: + return ProcessShiftLeftToAdd(X86::VPADDQZ128rr); + case X86::VPSLLQZ256ri: + return ProcessShiftLeftToAdd(X86::VPADDQZ256rr); + case X86::VPSLLQZri: + return ProcessShiftLeftToAdd(X86::VPADDQZrr); + default: return false; } diff --git a/llvm/lib/Target/X86/X86FixupSetCC.cpp b/llvm/lib/Target/X86/X86FixupSetCC.cpp index 2de89947c4519..ea93a575ec530 100644 --- a/llvm/lib/Target/X86/X86FixupSetCC.cpp +++ b/llvm/lib/Target/X86/X86FixupSetCC.cpp @@ -136,6 +136,12 @@ bool X86FixupSetCCPass::runOnMachineFunction(MachineFunction &MF) { .addReg(ZeroReg) .addReg(Reg0) .addImm(X86::sub_8bit); + + // Redirect the debug-instr-number to the setcc. + if (unsigned InstrNum = ZExt->peekDebugInstrNum()) + MF.makeDebugValueSubstitution({InstrNum, 0}, + {MI.getDebugInstrNum(), 0}); + ToErase.push_back(ZExt); } } diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index 08c9d738baceb..a66a3213403b4 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -53,6 +53,7 @@ X86FrameLowering::X86FrameLowering(const X86Subtarget &STI, STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) { // Cache a bunch of frame-related predicates for this subtarget. SlotSize = TRI->getSlotSize(); + assert(SlotSize == 4 || SlotSize == 8); Is64Bit = STI.is64Bit(); IsLP64 = STI.isTarget64BitLP64(); // standard x86_64 uses 64-bit frame/stack pointers, x32 - 32-bit. @@ -224,7 +225,7 @@ flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) { return false; } -constexpr int64_t MaxSPChunk = (1LL << 31) - 1; +constexpr uint64_t MaxSPChunk = (1ULL << 31) - 1; /// emitSPUpdate - Emit a series of instructions to increment / decrement the /// stack pointer by a constant value. @@ -245,8 +246,6 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, return; } - uint64_t Chunk = MaxSPChunk; - MachineFunction &MF = *MBB.getParent(); const X86Subtarget &STI = MF.getSubtarget(); const X86TargetLowering &TLI = *STI.getTargetLowering(); @@ -260,7 +259,7 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, // loop, by inlineStackProbe(). BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset); return; - } else if (Offset > Chunk) { + } else if (Offset > MaxSPChunk) { // Rather than emit a long series of instructions for large offsets, // load the offset into a register and do one sub/add unsigned Reg = 0; @@ -284,7 +283,7 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, .addReg(Reg); MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead. return; - } else if (Offset > 8 * Chunk) { + } else if (Offset > 8 * MaxSPChunk) { // If we would need more than 8 add or sub instructions (a >16GB stack // frame), it's worth spilling RAX to materialize this immediate. // pushq %rax @@ -322,8 +321,7 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, } while (Offset) { - uint64_t ThisVal = std::min(Offset, Chunk); - if (ThisVal == SlotSize) { + if (Offset == SlotSize) { // Use push / pop for slot sized adjustments as a size optimization. We // need to find a dead register when using pop. unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX) @@ -334,11 +332,12 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB, BuildMI(MBB, MBBI, DL, TII.get(Opc)) .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub)) .setMIFlag(Flag); - Offset -= ThisVal; - continue; + return; } } + uint64_t ThisVal = std::min(Offset, MaxSPChunk); + BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue) .setMIFlag(Flag); @@ -445,7 +444,7 @@ int64_t X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB, return CalcNewOffset(0); FoundStackAdjust(PI, Offset); - if (std::abs((int64_t)CalcNewOffset(Offset)) < MaxSPChunk) + if ((uint64_t)std::abs((int64_t)CalcNewOffset(Offset)) < MaxSPChunk) break; if (doMergeWithPrevious ? (PI == MBB.begin()) : (PI == MBB.end())) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2feb76e0eb7b4..cda5568a2cb59 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4452,11 +4452,12 @@ static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG, template SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget, const SDLoc &DL, EVT VT, ArrayRef Ops, - F Builder, bool CheckBWI = true) { + F Builder, bool CheckBWI = true, + bool AllowAVX512 = true) { assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2"); unsigned NumSubs = 1; - if ((CheckBWI && Subtarget.useBWIRegs()) || - (!CheckBWI && Subtarget.useAVX512Regs())) { + if (AllowAVX512 && ((CheckBWI && Subtarget.useBWIRegs()) || + (!CheckBWI && Subtarget.useAVX512Regs()))) { if (VT.getSizeInBits() > 512) { NumSubs = VT.getSizeInBits() / 512; assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size"); @@ -5346,6 +5347,19 @@ bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) { return false; } + +int getRoundingModeX86(unsigned RM) { + switch (static_cast<::llvm::RoundingMode>(RM)) { + // clang-format off + case ::llvm::RoundingMode::NearestTiesToEven: return X86::rmToNearest; break; + case ::llvm::RoundingMode::TowardNegative: return X86::rmDownward; break; + case ::llvm::RoundingMode::TowardPositive: return X86::rmUpward; break; + case ::llvm::RoundingMode::TowardZero: return X86::rmTowardZero; break; + default: + return X86::rmInvalid; // Invalid rounding mode + } +} + } // namespace X86 } // namespace llvm @@ -11721,10 +11735,19 @@ static SDValue lowerShuffleAsDecomposedShuffleMerge( // we'll have to do 2x as many shuffles in order to achieve this, a 2-input // pre-shuffle first is a better strategy. if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) { + // If we don't have blends, see if we can create a cheap unpack. + if (!Subtarget.hasSSE41() && VT.is128BitVector() && + (is128BitUnpackShuffleMask(V1Mask, DAG) || + is128BitUnpackShuffleMask(V2Mask, DAG))) + if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack( + DL, VT, V1, V2, Mask, Subtarget, DAG)) + return PermUnpack; + // Only prefer immediate blends to unpack/rotate. - if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, - DAG, true)) + if (SDValue BlendPerm = + lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG, true)) return BlendPerm; + // If either input vector provides only a single element which is repeated // multiple times, unpacking from both input vectors would generate worse // code. e.g. for @@ -11736,13 +11759,16 @@ static SDValue lowerShuffleAsDecomposedShuffleMerge( if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG)) return UnpackPerm; + if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute( DL, VT, V1, V2, Mask, Subtarget, DAG)) return RotatePerm; + // Unpack/rotate failed - try again with variable blends. if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG)) return BlendPerm; + if (VT.getScalarSizeInBits() >= 32) if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack( DL, VT, V1, V2, Mask, Subtarget, DAG)) @@ -28686,16 +28712,14 @@ SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op, SDValue RMBits; if (auto *CVal = dyn_cast(NewRM)) { uint64_t RM = CVal->getZExtValue(); - int FieldVal; - switch (static_cast(RM)) { - // clang-format off - case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break; - case RoundingMode::TowardNegative: FieldVal = X86::rmDownward; break; - case RoundingMode::TowardPositive: FieldVal = X86::rmUpward; break; - case RoundingMode::TowardZero: FieldVal = X86::rmTowardZero; break; - default: - llvm_unreachable("rounding mode is not supported by X86 hardware"); - // clang-format on + int FieldVal = X86::getRoundingModeX86(RM); + + if (FieldVal == X86::rmInvalid) { + FieldVal = X86::rmToNearest; + LLVMContext &C = MF.getFunction().getContext(); + C.diagnose(DiagnosticInfoUnsupported( + MF.getFunction(), "rounding mode is not supported by X86 hardware", + DiagnosticLocation(DL.getDebugLoc()), DS_Error)); } RMBits = DAG.getConstant(FieldVal, DL, MVT::i16); } else { @@ -30289,22 +30313,8 @@ static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG, uint64_t ShiftAmt = APIntShiftAmt.getZExtValue(); - if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) { - // Hardware support for vector shifts is sparse which makes us scalarize the - // vector operations in many cases. Also, on sandybridge ADD is faster than - // shl: (shl V, 1) -> (add (freeze V), (freeze V)) - if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) { - // R may be undef at run-time, but (shl R, 1) must be an even number (LSB - // must be 0). (add undef, undef) however can be any value. To make this - // safe, we must freeze R to ensure that register allocation uses the same - // register for an undefined value. This ensures that the result will - // still be even and preserves the original semantics. - R = DAG.getFreeze(R); - return DAG.getNode(ISD::ADD, dl, VT, R, R); - } - + if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG); - } // i64 SRA needs to be performed as partial shifts. if (((!Subtarget.hasXOP() && VT == MVT::v2i64) || @@ -31205,16 +31215,16 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget, unsigned NumElts = VT.getVectorNumElements(); if (Subtarget.hasVBMI2() && EltSizeInBits > 8) { - if (IsFSHR) - std::swap(Op0, Op1); if (IsCstSplat) { + if (IsFSHR) + std::swap(Op0, Op1); uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits); SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8); return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, {Op0, Op1, Imm}, DAG, Subtarget); } - return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT, + return getAVX512Node(IsFSHR ? ISD::FSHR : ISD::FSHL, DL, VT, {Op0, Op1, Amt}, DAG, Subtarget); } assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 || @@ -35129,8 +35139,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(VALIGN) NODE_NAME_CASE(VSHLD) NODE_NAME_CASE(VSHRD) - NODE_NAME_CASE(VSHLDV) - NODE_NAME_CASE(VSHRDV) NODE_NAME_CASE(PSHUFD) NODE_NAME_CASE(PSHUFHW) NODE_NAME_CASE(PSHUFLW) @@ -44607,8 +44615,11 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode( APInt DemandedMask = OriginalDemandedBits << ShAmt; - // If we just want the sign bit then we don't need to shift it. - if (OriginalDemandedBits.isSignMask()) + // If we only want bits that already match the signbit then we don't need + // to shift. + unsigned NumHiDemandedBits = BitWidth - OriginalDemandedBits.countr_zero(); + if (TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1) >= + NumHiDemandedBits) return TLO.CombineTo(Op, Op0); // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1 @@ -45161,11 +45172,27 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( case X86ISD::Wrapper: case X86ISD::WrapperRIP: return true; + case X86ISD::PACKSS: + case X86ISD::PACKUS: { + APInt DemandedLHS, DemandedRHS; + getPackDemandedElts(Op.getSimpleValueType(), DemandedElts, DemandedLHS, + DemandedRHS); + return (!DemandedLHS || + DAG.isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), DemandedLHS, + PoisonOnly, Depth + 1)) && + (!DemandedRHS || + DAG.isGuaranteedNotToBeUndefOrPoison(Op.getOperand(1), DemandedRHS, + PoisonOnly, Depth + 1)); + } + case X86ISD::INSERTPS: case X86ISD::BLENDI: + case X86ISD::PSHUFB: case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: + case X86ISD::VPERMILPV: case X86ISD::VPERMILPI: + case X86ISD::VPERMV: case X86ISD::VPERMV3: { SmallVector Mask; SmallVector Ops; @@ -45227,11 +45254,19 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode( case X86ISD::BLENDI: case X86ISD::BLENDV: return false; + // SSE packs. + case X86ISD::PACKSS: + case X86ISD::PACKUS: + return false; // SSE target shuffles. + case X86ISD::INSERTPS: + case X86ISD::PSHUFB: case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: + case X86ISD::VPERMILPV: case X86ISD::VPERMILPI: + case X86ISD::VPERMV: case X86ISD::VPERMV3: return false; // SSE comparisons handle all icmp/fcmp cases. @@ -46181,7 +46216,7 @@ static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS, SDValue Zero = DAG.getConstant(0, DL, DpVT); return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1}, - DpBuilder, false); + DpBuilder, /*CheckBWI=*/false, Subtarget.hasVNNI()); } // Create a PSADBW given two sources representable as zexts of vXi8. @@ -52353,16 +52388,41 @@ static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT, // Do not flip "e > c", where "c" is a constant, because Cmp instruction // cannot take an immediate as its first operand. // - if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() && - EFLAGS.getValueType().isInteger() && - !isa(EFLAGS.getOperand(1))) { - SDValue NewSub = - DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(), - EFLAGS.getOperand(1), EFLAGS.getOperand(0)); - SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo()); + // If EFLAGS is from a CMP that compares the same operands as the earlier + // SUB producing X (i.e. CMP X, Y), we can directly use the carry flag with + // SBB/ADC without creating a flipped SUB. + if (EFLAGS.getOpcode() == X86ISD::CMP && + EFLAGS.getValueType().isInteger() && X == EFLAGS.getOperand(0)) { return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, DAG.getVTList(VT, MVT::i32), X, - DAG.getConstant(0, DL, VT), NewEFLAGS); + DAG.getConstant(0, DL, VT), EFLAGS); + } + + if (EFLAGS.getOpcode() == X86ISD::SUB && + EFLAGS.getValueType().isInteger() && + !isa(EFLAGS.getOperand(1))) { + // Only create NewSub if we know one of the folds will succeed to avoid + // introducing a temporary node that may persist and affect one-use checks + // below. + if (EFLAGS.getNode()->hasOneUse()) { + SDValue NewSub = DAG.getNode( + X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(), + EFLAGS.getOperand(1), EFLAGS.getOperand(0)); + SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo()); + return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, + DAG.getVTList(VT, MVT::i32), X, + DAG.getConstant(0, DL, VT), NewEFLAGS); + } + + if (IsSub && X == EFLAGS.getValue(0)) { + SDValue NewSub = DAG.getNode( + X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(), + EFLAGS.getOperand(1), EFLAGS.getOperand(0)); + SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo()); + return DAG.getNode(X86ISD::SBB, DL, DAG.getVTList(VT, MVT::i32), + EFLAGS.getOperand(0), EFLAGS.getOperand(1), + NewEFLAGS); + } } } @@ -58047,7 +58107,8 @@ static SDValue matchVPMADD52(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, }; return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Acc, X, Y}, VPMADD52Builder, - /*CheckBWI*/ false); + /*CheckBWI*/ false, + /*AllowAVX512*/ Subtarget.hasIFMA()); } static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 0c9ba591b03eb..b55556aadd867 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -471,8 +471,7 @@ namespace llvm { // VBMI2 Concat & Shift. VSHLD, VSHRD, - VSHLDV, - VSHRDV, + // Shuffle Packed Values at 128-bit granularity. SHUF128, MOVDDUP, @@ -1004,13 +1003,14 @@ namespace llvm { /// Current rounding mode is represented in bits 11:10 of FPSR. These /// values are same as corresponding constants for rounding mode used /// in glibc. - enum RoundingMode { - rmToNearest = 0, // FE_TONEAREST - rmDownward = 1 << 10, // FE_DOWNWARD - rmUpward = 2 << 10, // FE_UPWARD - rmTowardZero = 3 << 10, // FE_TOWARDZERO - rmMask = 3 << 10 // Bit mask selecting rounding mode - }; + enum RoundingMode { + rmInvalid = -1, // For handle Invalid rounding mode + rmToNearest = 0, // FE_TONEAREST + rmDownward = 1 << 10, // FE_DOWNWARD + rmUpward = 2 << 10, // FE_UPWARD + rmTowardZero = 3 << 10, // FE_TOWARDZERO + rmMask = 3 << 10 // Bit mask selecting rounding mode + }; } /// Define some predicates that are used for node matching. @@ -1058,6 +1058,10 @@ namespace llvm { /// functions. bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF); + + /// Convert LLVM rounding mode to X86 rounding mode. + int getRoundingModeX86(unsigned RM); + } // end namespace X86 //===--------------------------------------------------------------------===// diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index b8f299965faa3..564810cb4b88e 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -3238,6 +3238,7 @@ multiclass avx512_load opc, string OpcodeStr, string Name, (_.VT _.RC:$src1), (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.RR]>; + let mayLoad = 1, canFoldAsLoad = 1 in def rmk : AVX512PI opc, string OpcodeStr, string Name, (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.RM]>; } + let mayLoad = 1, canFoldAsLoad = 1 in def rmkz : AVX512PI; // VBMI2 //===----------------------------------------------------------------------===// -multiclass VBMI2_shift_var_rm Op, string OpStr, SDNode OpNode, +multiclass VBMI2_shift_var_rm Op, string OpStr, SDNode OpNode, bit SwapLR, X86FoldableSchedWrite sched, X86VectorVTInfo VTI> { let Constraints = "$src1 = $dst", ExeDomain = VTI.ExeDomain in { defm r: AVX512_maskable_3src, + !if(SwapLR, + (VTI.VT (OpNode (VTI.VT VTI.RC:$src2), (VTI.VT VTI.RC:$src1), (VTI.VT VTI.RC:$src3))), + (VTI.VT (OpNode (VTI.VT VTI.RC:$src1), (VTI.VT VTI.RC:$src2), (VTI.VT VTI.RC:$src3))))>, T8, PD, EVEX, VVVV, Sched<[sched]>; defm m: AVX512_maskable_3src, + !if(SwapLR, + (VTI.VT (OpNode (VTI.VT VTI.RC:$src2), (VTI.VT VTI.RC:$src1), (VTI.VT (VTI.LdFrag addr:$src3)))), + (VTI.VT (OpNode (VTI.VT VTI.RC:$src1), (VTI.VT VTI.RC:$src2), (VTI.VT (VTI.LdFrag addr:$src3)))))>, T8, PD, EVEX, VVVV, Sched<[sched.Folded, sched.ReadAfterFold]>; } } -multiclass VBMI2_shift_var_rmb Op, string OpStr, SDNode OpNode, +multiclass VBMI2_shift_var_rmb Op, string OpStr, SDNode OpNode, bit SwapLR, X86FoldableSchedWrite sched, X86VectorVTInfo VTI> - : VBMI2_shift_var_rm { + : VBMI2_shift_var_rm { let Constraints = "$src1 = $dst", ExeDomain = VTI.ExeDomain in defm mb: AVX512_maskable_3src, + !if(SwapLR, + (OpNode (VTI.VT VTI.RC:$src2), (VTI.VT VTI.RC:$src1), (VTI.VT (VTI.BroadcastLdFrag addr:$src3))), + (OpNode (VTI.VT VTI.RC:$src1), (VTI.VT VTI.RC:$src2), (VTI.VT (VTI.BroadcastLdFrag addr:$src3))))>, T8, PD, EVEX, VVVV, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } -multiclass VBMI2_shift_var_rm_common Op, string OpStr, SDNode OpNode, +multiclass VBMI2_shift_var_rm_common Op, string OpStr, SDNode OpNode, bit SwapLR, X86SchedWriteWidths sched, AVX512VLVectorVTInfo VTI> { let Predicates = [HasVBMI2] in - defm Z : VBMI2_shift_var_rm, + defm Z : VBMI2_shift_var_rm, EVEX_V512; let Predicates = [HasVBMI2, HasVLX] in { - defm Z256 : VBMI2_shift_var_rm, + defm Z256 : VBMI2_shift_var_rm, EVEX_V256; - defm Z128 : VBMI2_shift_var_rm, + defm Z128 : VBMI2_shift_var_rm, EVEX_V128; } } -multiclass VBMI2_shift_var_rmb_common Op, string OpStr, SDNode OpNode, +multiclass VBMI2_shift_var_rmb_common Op, string OpStr, SDNode OpNode, bit SwapLR, X86SchedWriteWidths sched, AVX512VLVectorVTInfo VTI> { let Predicates = [HasVBMI2] in - defm Z : VBMI2_shift_var_rmb, + defm Z : VBMI2_shift_var_rmb, EVEX_V512; let Predicates = [HasVBMI2, HasVLX] in { - defm Z256 : VBMI2_shift_var_rmb, + defm Z256 : VBMI2_shift_var_rmb, EVEX_V256; - defm Z128 : VBMI2_shift_var_rmb, + defm Z128 : VBMI2_shift_var_rmb, EVEX_V128; } } multiclass VBMI2_shift_var wOp, bits<8> dqOp, string Prefix, - SDNode OpNode, X86SchedWriteWidths sched> { - defm W : VBMI2_shift_var_rm_common { + defm W : VBMI2_shift_var_rm_common, REX_W, EVEX_CD8<16, CD8VF>; - defm D : VBMI2_shift_var_rmb_common, EVEX_CD8<32, CD8VF>; - defm Q : VBMI2_shift_var_rmb_common, REX_W, EVEX_CD8<64, CD8VF>; } @@ -12379,8 +12385,8 @@ multiclass VBMI2_shift_imm wOp, bits<8> dqOp, string Prefix, } // Concat & Shift -defm VPSHLDV : VBMI2_shift_var<0x70, 0x71, "vpshldv", X86VShldv, SchedWriteVecIMul>; -defm VPSHRDV : VBMI2_shift_var<0x72, 0x73, "vpshrdv", X86VShrdv, SchedWriteVecIMul>; +defm VPSHLDV : VBMI2_shift_var<0x70, 0x71, "vpshldv", fshl, 0, SchedWriteVecIMul>; +defm VPSHRDV : VBMI2_shift_var<0x72, 0x73, "vpshrdv", fshr, 1, SchedWriteVecIMul>; defm VPSHLD : VBMI2_shift_imm<0x70, 0x71, "vpshld", X86VShld, SchedWriteVecIMul>; defm VPSHRD : VBMI2_shift_imm<0x72, 0x73, "vpshrd", X86VShrd, SchedWriteVecIMul>; diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td index b476859069a57..031fdc1e7162c 100644 --- a/llvm/lib/Target/X86/X86InstrArithmetic.td +++ b/llvm/lib/Target/X86/X86InstrArithmetic.td @@ -25,18 +25,12 @@ let SchedRW = [WriteLEA] in { [(set GR32:$dst, lea32addr:$src)]>, OpSize32, Requires<[Not64BitMode]>; - let Predicates = [HasNDD], isCodeGenOnly = 1 in { - def LEA64_8r : I<0x8D, MRMSrcMem, (outs GR8:$dst), (ins lea64_8mem:$src), - "lea{b}\t{$src|$dst}, {$dst|$src}", - [(set GR8:$dst, lea64_iaddr:$src)]>, - OpSize16, - Requires<[In64BitMode]>; - - def LEA64_16r : I<0x8D, MRMSrcMem, (outs GR16:$dst), (ins lea64_16mem:$src), - "lea{w}\t{$src|$dst}, {$dst|$src}", - [(set GR16:$dst, lea64_iaddr:$src)]>, - OpSize16, - Requires<[In64BitMode]>; + let isCodeGenOnly = 1 in { + def LEA64_8r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_8mem:$src), + "lea{l}\t{$src|$dst}, {$dst|$src}", []>, OpSize32; + + def LEA64_16r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_16mem:$src), + "lea{l}\t{$src|$dst}, {$dst|$src}", []>, OpSize32; } def LEA64_32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_32mem:$src), @@ -51,6 +45,11 @@ let SchedRW = [WriteLEA] in { [(set GR64:$dst, lea64addr:$src)]>; } // SchedRW +let Predicates = [HasNDD] in { + def : Pat<(i8 lea64_iaddr:$src), (EXTRACT_SUBREG (LEA64_8r lea64_8mem:$src), sub_8bit)>; + def : Pat<(i16 lea64_iaddr:$src), (EXTRACT_SUBREG (LEA64_16r lea64_16mem:$src), sub_16bit)>; +} + // Pseudo instruction for lea that prevent optimizer from eliminating // the instruction. let SchedRW = [WriteLEA], isPseudo = true, hasSideEffects = 1 in { diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 0c20ffed77e77..5321ecf0c1b2c 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -406,16 +406,6 @@ def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>; def X86VShld : SDNode<"X86ISD::VSHLD", SDTShuff3OpI>; def X86VShrd : SDNode<"X86ISD::VSHRD", SDTShuff3OpI>; -def X86VShldv : SDNode<"X86ISD::VSHLDV", - SDTypeProfile<1, 3, [SDTCisVec<0>, - SDTCisSameAs<0,1>, - SDTCisSameAs<0,2>, - SDTCisSameAs<0,3>]>>; -def X86VShrdv : SDNode<"X86ISD::VSHRDV", - SDTypeProfile<1, 3, [SDTCisVec<0>, - SDTCisSameAs<0,1>, - SDTCisSameAs<0,2>, - SDTCisSameAs<0,3>]>>; def X86Conflict : SDNode<"X86ISD::CONFLICT", SDTIntUnaryOp>; diff --git a/llvm/lib/Target/X86/X86InstrGISel.td b/llvm/lib/Target/X86/X86InstrGISel.td index 39198214037a3..b0c6bb6f61ad8 100644 --- a/llvm/lib/Target/X86/X86InstrGISel.td +++ b/llvm/lib/Target/X86/X86InstrGISel.td @@ -34,6 +34,14 @@ def G_FNSTCW16 : X86GenericInstruction { let mayStore = true; } +def G_FLDCW16 : X86GenericInstruction { + let OutOperandList = (outs); + let InOperandList = (ins ptype0:$src); + let hasSideEffects = true; + let mayLoad = true; +} + def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 58d526269ff3c..1d2cd39951bf4 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -44,6 +44,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetOptions.h" +#include #include using namespace llvm; @@ -755,7 +756,7 @@ static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) { return isPICBase; } -bool X86InstrInfo::isReallyTriviallyReMaterializable( +bool X86InstrInfo::isReMaterializableImpl( const MachineInstr &MI) const { switch (MI.getOpcode()) { default: @@ -951,7 +952,7 @@ bool X86InstrInfo::isReallyTriviallyReMaterializable( break; } } - return TargetInstrInfo::isReallyTriviallyReMaterializable(MI); + return TargetInstrInfo::isReMaterializableImpl(MI); } void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, @@ -2573,10 +2574,13 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, case X86::VCMPPSZ256rri: case X86::VCMPPDZrrik: case X86::VCMPPSZrrik: + case X86::VCMPPHZrrik: case X86::VCMPPDZ128rrik: case X86::VCMPPSZ128rrik: + case X86::VCMPPHZ128rrik: case X86::VCMPPDZ256rrik: case X86::VCMPPSZ256rrik: + case X86::VCMPPHZ256rrik: WorkingMI = CloneIfNew(MI); WorkingMI->getOperand(MI.getNumExplicitOperands() - 1) .setImm(X86::getSwappedVCMPImm( @@ -2830,10 +2834,13 @@ bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI, case X86::VCMPPSZ256rri: case X86::VCMPPDZrrik: case X86::VCMPPSZrrik: + case X86::VCMPPHZrrik: case X86::VCMPPDZ128rrik: case X86::VCMPPSZ128rrik: + case X86::VCMPPHZ128rrik: case X86::VCMPPDZ256rrik: - case X86::VCMPPSZ256rrik: { + case X86::VCMPPSZ256rrik: + case X86::VCMPPHZ256rrik: { unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0; // Float comparison can be safely commuted for @@ -8106,6 +8113,39 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, LiveIntervals *LIS) const { + // If LoadMI is a masked load, check MI having the same mask. + const MCInstrDesc &MCID = get(LoadMI.getOpcode()); + unsigned NumOps = MCID.getNumOperands(); + if (NumOps >= 3) { + Register MaskReg; + const MachineOperand &Op1 = LoadMI.getOperand(1); + const MachineOperand &Op2 = LoadMI.getOperand(2); + + auto IsVKWMClass = [](const TargetRegisterClass *RC) { + return RC == &X86::VK2WMRegClass || RC == &X86::VK4WMRegClass || + RC == &X86::VK8WMRegClass || RC == &X86::VK16WMRegClass || + RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass; + }; + + if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1, &RI))) + MaskReg = Op1.getReg(); + else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2, &RI))) + MaskReg = Op2.getReg(); + + if (MaskReg) { + bool HasSameMask = false; + for (unsigned I = 1, E = MI.getDesc().getNumOperands(); I < E; ++I) { + const MachineOperand &Op = MI.getOperand(I); + if (Op.isReg() && Op.getReg() == MaskReg) { + HasSameMask = true; + break; + } + } + if (!HasSameMask) + return nullptr; + } + } + // TODO: Support the case where LoadMI loads a wide register, but MI // only uses a subreg. for (auto Op : Ops) { @@ -8114,7 +8154,6 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( } // If loading from a FrameIndex, fold directly from the FrameIndex. - unsigned NumOps = LoadMI.getDesc().getNumOperands(); int FrameIndex; if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index 86133b3d969b1..5f75559bd9598 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -340,7 +340,7 @@ class X86InstrInfo final : public X86GenInstrInfo { Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override; - bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override; + bool isReMaterializableImpl(const MachineInstr &MI) const override; void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp index 278ae46b8a5f5..0ba71ada8638e 100644 --- a/llvm/lib/Target/X86/X86LowerAMXType.cpp +++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp @@ -854,6 +854,7 @@ class X86LowerAMXCast { : Func(F), SC(ShapeC), DT(nullptr) {} bool combineCastStore(IntrinsicInst *Cast, StoreInst *ST); bool combineLoadCast(IntrinsicInst *Cast, LoadInst *LD); + bool combineTilezero(IntrinsicInst *Cast); bool combineLdSt(SmallVectorImpl &Casts); bool combineAMXcast(TargetLibraryInfo *TLI); bool transformAMXCast(IntrinsicInst *AMXCast); @@ -1175,6 +1176,26 @@ bool X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { return EraseLoad; } +// %19 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer) +// --> +// %19 = tail call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 %col) +bool X86LowerAMXCast::combineTilezero(IntrinsicInst *Cast) { + Value *Row = nullptr, *Col = nullptr; + Use &U = *(Cast->use_begin()); + unsigned OpNo = U.getOperandNo(); + auto *II = cast(U.getUser()); + if (!isAMXIntrinsic(II)) + return false; + + std::tie(Row, Col) = SC->getShape(II, OpNo); + + IRBuilder<> Builder(Cast); + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tilezero_internal, {}, {Row, Col}); + Cast->replaceAllUsesWith(NewInst); + return true; +} + bool X86LowerAMXCast::combineLdSt(SmallVectorImpl &Casts) { bool Change = false; for (auto *Cast : Casts) { @@ -1198,6 +1219,14 @@ bool X86LowerAMXCast::combineLdSt(SmallVectorImpl &Casts) { for (auto *Store : DeadStores) Store->eraseFromParent(); } else { // x86_cast_vector_to_tile + // %19 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer) + // --> + // %19 = tail call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 %col) + if (isa(Cast->getOperand(0))) { + Change |= combineTilezero(cast(Cast)); + continue; + } + auto *Load = dyn_cast(Cast->getOperand(0)); if (!Load || !Load->hasOneUse()) continue; @@ -1210,6 +1239,7 @@ bool X86LowerAMXCast::combineLdSt(SmallVectorImpl &Casts) { // Set the operand is null so that load instruction can be erased. Cast->setOperand(0, nullptr); Load->eraseFromParent(); + Change = true; } } } diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp index 40de36d81ddd2..805bdb41737c1 100644 --- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -29,6 +29,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/PatternMatch.h" +#include "llvm/IR/ProfDataUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BuildLibCalls.h" #include "llvm/Transforms/Utils/Local.h" @@ -1283,11 +1284,19 @@ void StrNCmpInliner::inlineCompare(Value *LHS, StringRef RHS, uint64_t N, Value *VR = ConstantInt::get(CI->getType(), static_cast(RHS[i])); Value *Sub = Swapped ? B.CreateSub(VR, VL) : B.CreateSub(VL, VR); - if (i < N - 1) - B.CreateCondBr(B.CreateICmpNE(Sub, ConstantInt::get(CI->getType(), 0)), - BBNE, BBSubs[i + 1]); - else + if (i < N - 1) { + BranchInst *CondBrInst = B.CreateCondBr( + B.CreateICmpNE(Sub, ConstantInt::get(CI->getType(), 0)), BBNE, + BBSubs[i + 1]); + + Function *F = CI->getFunction(); + assert(F && "Instruction does not belong to a function!"); + std::optional EC = F->getEntryCount(); + if (EC && EC->getCount() > 0) + setExplicitlyUnknownBranchWeights(*CondBrInst, DEBUG_TYPE); + } else { B.CreateBr(BBNE); + } Phi->addIncoming(Sub, BBSubs[i]); } @@ -1341,6 +1350,10 @@ static bool foldMemChr(CallInst *Call, DomTreeUpdater *DTU, BB->getTerminator()->eraseFromParent(); SwitchInst *SI = IRB.CreateSwitch( IRB.CreateTrunc(Call->getArgOperand(1), ByteTy), BBNext, N); + // We can't know the precise weights here, as they would depend on the value + // distribution of Call->getArgOperand(1). So we just mark it as "unknown". + setExplicitlyUnknownBranchWeightsIfProfiled(*SI, *Call->getFunction(), + DEBUG_TYPE); Type *IndexTy = DL.getIndexType(Call->getType()); SmallVector Updates; diff --git a/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp b/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp index c00e9c7bbee06..81efca9dfd209 100644 --- a/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroCleanup.cpp @@ -75,8 +75,8 @@ bool Lowerer::lower(Function &F) { case Intrinsic::coro_subfn_addr: lowerSubFn(Builder, cast(II)); break; - case Intrinsic::coro_end: case Intrinsic::coro_suspend_retcon: + case Intrinsic::coro_is_in_ramp: if (IsPrivateAndUnprocessed) { II->replaceAllUsesWith(PoisonValue::get(II->getType())); } else diff --git a/llvm/lib/Transforms/Coroutines/CoroCloner.h b/llvm/lib/Transforms/Coroutines/CoroCloner.h index d1887980fb3bc..26ec4f3ed6a8c 100644 --- a/llvm/lib/Transforms/Coroutines/CoroCloner.h +++ b/llvm/lib/Transforms/Coroutines/CoroCloner.h @@ -120,6 +120,7 @@ class BaseCloner { void replaceRetconOrAsyncSuspendUses(); void replaceCoroSuspends(); void replaceCoroEnds(); + void replaceCoroIsInRamp(); void replaceSwiftErrorOps(); void salvageDebugInfo(); void handleFinalSuspend(); diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp index 02c38d02cff64..c2d7bcc346776 100644 --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -213,7 +213,7 @@ static bool replaceCoroEndAsync(AnyCoroEndInst *End) { /// Replace a non-unwind call to llvm.coro.end. static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, - bool InResume, CallGraph *CG) { + bool InRamp, CallGraph *CG) { // Start inserting right before the coro.end. IRBuilder<> Builder(End); @@ -225,7 +225,7 @@ static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, "switch coroutine should not return any values"); // coro.end doesn't immediately end the coroutine in the main function // in this lowering, because we need to deallocate the coroutine. - if (!InResume) + if (InRamp) return; Builder.CreateRetVoid(); break; @@ -345,8 +345,7 @@ static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape, /// Replace an unwind call to llvm.coro.end. static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, - Value *FramePtr, bool InResume, - CallGraph *CG) { + Value *FramePtr, bool InRamp, CallGraph *CG) { IRBuilder<> Builder(End); switch (Shape.ABI) { @@ -359,7 +358,7 @@ static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, // FIXME: We should refactor this once there is other language // which uses Switch-Resumed style other than C++. markCoroutineAsDone(Builder, Shape, FramePtr); - if (!InResume) + if (InRamp) return; break; } @@ -383,15 +382,11 @@ static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, } static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, - Value *FramePtr, bool InResume, CallGraph *CG) { + Value *FramePtr, bool InRamp, CallGraph *CG) { if (End->isUnwind()) - replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); + replaceUnwindCoroEnd(End, Shape, FramePtr, InRamp, CG); else - replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); - - auto &Context = End->getContext(); - End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) - : ConstantInt::getFalse(Context)); + replaceFallthroughCoroEnd(End, Shape, FramePtr, InRamp, CG); End->eraseFromParent(); } @@ -558,7 +553,16 @@ void coro::BaseCloner::replaceCoroEnds() { // We use a null call graph because there's no call graph node for // the cloned function yet. We'll just be rebuilding that later. auto *NewCE = cast(VMap[CE]); - replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); + replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in ramp*/ false, nullptr); + } +} + +void coro::BaseCloner::replaceCoroIsInRamp() { + auto &Ctx = OrigF.getContext(); + for (auto *II : Shape.CoroIsInRampInsts) { + auto *NewII = cast(VMap[II]); + NewII->replaceAllUsesWith(ConstantInt::getFalse(Ctx)); + NewII->eraseFromParent(); } } @@ -1077,6 +1081,8 @@ void coro::BaseCloner::create() { // Remove coro.end intrinsics. replaceCoroEnds(); + replaceCoroIsInRamp(); + // Salvage debug info that points into the coroutine frame. salvageDebugInfo(); } @@ -1956,14 +1962,19 @@ class PrettyStackTraceFunction : public PrettyStackTraceEntry { static void removeCoroEndsFromRampFunction(const coro::Shape &Shape) { if (Shape.ABI != coro::ABI::Switch) { for (auto *End : Shape.CoroEnds) { - replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, nullptr); + replaceCoroEnd(End, Shape, Shape.FramePtr, /*in ramp*/ true, nullptr); } } else { - for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) { - auto &Context = End->getContext(); - End->replaceAllUsesWith(ConstantInt::getFalse(Context)); + for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) End->eraseFromParent(); - } + } +} + +static void removeCoroIsInRampFromRampFunction(const coro::Shape &Shape) { + for (auto *II : Shape.CoroIsInRampInsts) { + auto &Ctx = II->getContext(); + II->replaceAllUsesWith(ConstantInt::getTrue(Ctx)); + II->eraseFromParent(); } } @@ -2028,6 +2039,7 @@ static void doSplitCoroutine(Function &F, SmallVectorImpl &Clones, coro::salvageDebugInfo(ArgToAllocaMap, *DVR, false /*UseEntryValue*/); removeCoroEndsFromRampFunction(Shape); + removeCoroIsInRampFromRampFunction(Shape); if (shouldCreateNoAllocVariant) SwitchCoroutineSplitter::createNoAllocVariant(F, Shape, Clones); diff --git a/llvm/lib/Transforms/Coroutines/Coroutines.cpp b/llvm/lib/Transforms/Coroutines/Coroutines.cpp index 28a89a8f87dbd..47c2d0d462e00 100644 --- a/llvm/lib/Transforms/Coroutines/Coroutines.cpp +++ b/llvm/lib/Transforms/Coroutines/Coroutines.cpp @@ -93,6 +93,7 @@ static Intrinsic::ID NonOverloadedCoroIntrinsics[] = { Intrinsic::coro_save, Intrinsic::coro_subfn_addr, Intrinsic::coro_suspend, + Intrinsic::coro_is_in_ramp, }; bool coro::isSuspendBlock(BasicBlock *BB) { @@ -275,6 +276,9 @@ void coro::Shape::analyze(Function &F, } } break; + case Intrinsic::coro_is_in_ramp: + CoroIsInRampInsts.push_back(cast(II)); + break; case Intrinsic::coro_promise: assert(CoroPromise == nullptr && "CoroEarly must ensure coro.promise unique"); diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp index 83aa7de5400f5..28ee4449421bd 100644 --- a/llvm/lib/Transforms/IPO/FunctionImport.cpp +++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp @@ -72,6 +72,7 @@ STATISTIC(NumImportedModules, "Number of modules imported from"); STATISTIC(NumDeadSymbols, "Number of dead stripped symbols in index"); STATISTIC(NumLiveSymbols, "Number of live symbols in index"); +namespace llvm { cl::opt ForceImportAll("force-import-all", cl::init(false), cl::Hidden, cl::desc("Import functions with noinline attribute")); @@ -185,9 +186,8 @@ static cl::opt CtxprofMoveRootsToOwnModule( extern cl::list MoveSymbolGUID; -namespace llvm { extern cl::opt EnableMemProfContextDisambiguation; -} +} // end namespace llvm // Load lazily a module from \p FileName in \p Context. static std::unique_ptr loadFile(const std::string &FileName, diff --git a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp index 4f5373846f43a..150a2dc5d48e2 100644 --- a/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp +++ b/llvm/lib/Transforms/IPO/FunctionSpecialization.cpp @@ -28,10 +28,13 @@ using namespace llvm; STATISTIC(NumSpecsCreated, "Number of specializations created"); +namespace llvm { + static cl::opt ForceSpecialization( - "force-specialization", cl::init(false), cl::Hidden, cl::desc( - "Force function specialization for every call site with a constant " - "argument")); + "force-specialization", cl::init(false), cl::Hidden, + cl::desc( + "Force function specialization for every call site with a constant " + "argument")); static cl::opt MaxClones( "funcspec-max-clones", cl::init(3), cl::Hidden, cl::desc( @@ -91,6 +94,8 @@ static cl::opt SpecializeLiteralConstant( extern cl::opt ProfcheckDisableMetadataFixes; +} // end namespace llvm + bool InstCostVisitor::canEliminateSuccessor(BasicBlock *BB, BasicBlock *Succ) const { unsigned I = 0; diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index f88d51f443bcf..99c4982c58b47 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1680,7 +1680,9 @@ processGlobal(GlobalValue &GV, /// FastCC. static void ChangeCalleesToFastCall(Function *F) { for (User *U : F->users()) - cast(U)->setCallingConv(CallingConv::Fast); + if (auto *Call = dyn_cast(U)) + if (Call->getCalledOperand() == F) + Call->setCallingConv(CallingConv::Fast); } static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs, @@ -1766,10 +1768,12 @@ isValidCandidateForColdCC(Function &F, return false; for (User *U : F.users()) { - CallBase &CB = cast(*U); - Function *CallerFunc = CB.getParent()->getParent(); + CallBase *CB = dyn_cast(U); + if (!CB || CB->getCalledOperand() != &F) + continue; + Function *CallerFunc = CB->getParent()->getParent(); BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); - if (!isColdCallSite(CB, CallerBFI)) + if (!isColdCallSite(*CB, CallerBFI)) return false; if (!llvm::is_contained(AllCallsCold, CallerFunc)) return false; @@ -1779,7 +1783,9 @@ isValidCandidateForColdCC(Function &F, static void changeCallSitesToColdCC(Function *F) { for (User *U : F->users()) - cast(U)->setCallingConv(CallingConv::Cold); + if (auto *Call = dyn_cast(U)) + if (Call->getCalledOperand() == F) + Call->setCallingConv(CallingConv::Cold); } // This function iterates over all the call instructions in the input Function diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index 7b67e60f7cc61..ddb95a4184756 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -214,11 +214,12 @@ static cl::opt MemProfRequireDefinitionForPromotion( "memprof-require-definition-for-promotion", cl::init(false), cl::Hidden, cl::desc( "Require target function definition when promoting indirect calls")); -} // namespace llvm extern cl::opt MemProfReportHintedSizes; extern cl::opt MinClonedColdBytePercent; +} // namespace llvm + namespace { /// CRTP base for graphs built from either IR or ThinLTO summary index. /// @@ -3980,7 +3981,6 @@ void CallsiteContextGraph::identifyClones( void ModuleCallsiteContextGraph::updateAllocationCall( CallInfo &Call, AllocationType AllocType) { std::string AllocTypeString = getAllocTypeAttributeString(AllocType); - removeAnyExistingAmbiguousAttribute(cast(Call.call())); auto A = llvm::Attribute::get(Call.call()->getFunction()->getContext(), "memprof", AllocTypeString); cast(Call.call())->addFnAttr(A); @@ -4526,6 +4526,16 @@ void CallsiteContextGraph:: // If Clone not already assigned to a function clone: // Assign to first function clone without assignment // Assign caller to selected function clone +// For each call with graph Node having clones: +// If number func clones > number call's callsite Node clones: +// Record func CallInfo clones without Node clone in UnassignedCallClones +// For callsite Nodes in DFS order from allocations: +// If IsAllocation: +// Update allocation with alloc type +// Else: +// For Call, all MatchingCalls, and associated UnnassignedCallClones: +// Update call to call recorded callee clone +// template bool CallsiteContextGraph::assignFunctions() { bool Changed = false; @@ -4553,6 +4563,34 @@ bool CallsiteContextGraph::assignFunctions() { DenseMap CallMap; }; + // Map to keep track of information needed to update calls in function clones + // when their corresponding callsite node was not itself cloned for that + // function clone. Because of call context pruning (i.e. we only keep as much + // caller information as needed to distinguish hot vs cold), we may not have + // caller edges coming to each callsite node from all possible function + // callers. A function clone may get created for other callsites in the + // function for which there are caller edges that were not pruned. Any other + // callsites in that function clone, which were not themselved cloned for + // that function clone, should get updated the same way as the corresponding + // callsite in the original function (which may call a clone of its callee). + // + // We build this map after completing function cloning for each function, so + // that we can record the information from its call maps before they are + // destructed. The map will be used as we update calls to update any still + // unassigned call clones. Note that we may create new node clones as we clone + // other functions, so later on we check which node clones were still not + // created. To this end, the inner map is a map from function clone number to + // the list of calls cloned for that function (can be more than one due to the + // Node's MatchingCalls array). + // + // The alternative is creating new callsite clone nodes below as we clone the + // function, but that is tricker to get right and likely more overhead. + // + // Inner map is a std::map so sorted by key (clone number), in order to get + // ordered remarks in the full LTO case. + DenseMap>> + UnassignedCallClones; + // Walk all functions for which we saw calls with memprof metadata, and handle // cloning for each of its calls. for (auto &[Func, CallsWithMetadata] : FuncToCallsWithMetadata) { @@ -4996,6 +5034,63 @@ bool CallsiteContextGraph::assignFunctions() { } } } + + if (FuncCloneInfos.size() < 2) + continue; + + // In this case there is more than just the original function copy. + // Record call clones of any callsite nodes in the function that did not + // themselves get cloned for all of the function clones. + for (auto &Call : CallsWithMetadata) { + ContextNode *Node = getNodeForInst(Call); + if (!Node || !Node->hasCall() || Node->emptyContextIds()) + continue; + // If Node has enough clones already to cover all function clones, we can + // skip it. Need to add one for the original copy. + // Use >= in case there were clones that were skipped due to having empty + // context ids + if (Node->Clones.size() + 1 >= FuncCloneInfos.size()) + continue; + // First collect all function clones we cloned this callsite node for. + // They may not be sequential due to empty clones e.g. + DenseSet NodeCallClones; + for (auto *C : Node->Clones) + NodeCallClones.insert(C->Call.cloneNo()); + unsigned I = 0; + // Now check all the function clones. + for (auto &FC : FuncCloneInfos) { + // Function clones should be sequential. + assert(FC.FuncClone.cloneNo() == I); + // Skip the first clone which got the original call. + // Also skip any other clones created for this Node. + if (++I == 1 || NodeCallClones.contains(I)) { + continue; + } + // Record the call clones created for this callsite in this function + // clone. + auto &CallVector = UnassignedCallClones[Node][I]; + DenseMap &CallMap = FC.CallMap; + if (auto It = CallMap.find(Call); It != CallMap.end()) { + CallInfo CallClone = It->second; + CallVector.push_back(CallClone); + } else { + // All but the original clone (skipped earlier) should have an entry + // for all calls. + assert(false && "Expected to find call in CallMap"); + } + // Need to do the same for all matching calls. + for (auto &MatchingCall : Node->MatchingCalls) { + if (auto It = CallMap.find(MatchingCall); It != CallMap.end()) { + CallInfo CallClone = It->second; + CallVector.push_back(CallClone); + } else { + // All but the original clone (skipped earlier) should have an entry + // for all calls. + assert(false && "Expected to find call in CallMap"); + } + } + } + } } uint8_t BothTypes = @@ -5057,6 +5152,26 @@ bool CallsiteContextGraph::assignFunctions() { // Update all the matching calls as well. for (auto &Call : Node->MatchingCalls) updateCall(Call, CalleeFunc); + + // Now update all calls recorded earlier that are still in function clones + // which don't have a clone of this callsite node. + if (!UnassignedCallClones.contains(Node)) + return; + DenseSet NodeCallClones; + for (auto *C : Node->Clones) + NodeCallClones.insert(C->Call.cloneNo()); + // Note that we already confirmed Node is in this map a few lines above. + auto &ClonedCalls = UnassignedCallClones[Node]; + for (auto &[CloneNo, CallVector] : ClonedCalls) { + // Should start at 1 as we never create an entry for original node. + assert(CloneNo > 0); + // If we subsequently created a clone, skip this one. + if (NodeCallClones.contains(CloneNo)) + continue; + // Use the original Node's CalleeFunc. + for (auto &Call : CallVector) + updateCall(Call, CalleeFunc); + } }; // Performs DFS traversal starting from allocation nodes to update calls to @@ -5527,7 +5642,6 @@ bool MemProfContextDisambiguation::applyImport(Module &M) { // clone J-1 (J==0 is the original clone and does not have a VMaps // entry). CBClone = cast((*VMaps[J - 1])[CB]); - removeAnyExistingAmbiguousAttribute(CBClone); CBClone->addFnAttr(A); ORE.emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CBClone) << ore::NV("AllocationCall", CBClone) << " in clone " diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp index 5bc7e34938127..e39e311dd795f 100644 --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -116,6 +116,8 @@ STATISTIC( NumCSInlinedHitGrowthLimit, "Number of functions with FDO inline stopped due to growth size limit"); +namespace llvm { + // Command line option to specify the file to read samples from. This is // mainly used for debugging. static cl::opt SampleProfileFile( @@ -198,7 +200,6 @@ static cl::opt DisableSampleLoaderInlining( "pass, and merge (or scale) profiles (as configured by " "--sample-profile-merge-inlinee).")); -namespace llvm { cl::opt SortProfiledSCC("sort-profiled-scc-member", cl::init(true), cl::Hidden, cl::desc("Sort profiled recursion by edge weights.")); @@ -1664,8 +1665,9 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) { else if (OverwriteExistingWeights) I.setMetadata(LLVMContext::MD_prof, nullptr); } else if (!isa(&I)) { - setBranchWeights(I, {static_cast(BlockWeights[BB])}, - /*IsExpected=*/false); + setBranchWeights( + I, ArrayRef{static_cast(BlockWeights[BB])}, + /*IsExpected=*/false); } } } else if (OverwriteExistingWeights || ProfileSampleBlockAccurate) { @@ -1676,7 +1678,8 @@ void SampleProfileLoader::generateMDProfMetadata(Function &F) { if (cast(I).isIndirectCall()) { I.setMetadata(LLVMContext::MD_prof, nullptr); } else { - setBranchWeights(I, {uint32_t(0)}, /*IsExpected=*/false); + setBranchWeights(I, ArrayRef{uint32_t(0)}, + /*IsExpected=*/false); } } } diff --git a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp index 093a39eb4b5d7..70b8614826826 100644 --- a/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileMatcher.cpp @@ -23,6 +23,8 @@ using namespace sampleprof; #define DEBUG_TYPE "sample-profile-matcher" +namespace llvm { + static cl::opt FuncProfileSimilarityThreshold( "func-profile-similarity-threshold", cl::Hidden, cl::init(80), cl::desc("Consider a profile matches a function if the similarity of their " @@ -55,6 +57,8 @@ static cl::opt SalvageStaleProfileMaxCallsites( cl::desc("The maximum number of callsites in a function, above which stale " "profile matching will be skipped.")); +} // end namespace llvm + void SampleProfileMatcher::findIRAnchors(const Function &F, AnchorMap &IRAnchors) const { // For inlined code, recover the original callsite and callee by finding the diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp index 09bffa7bf5846..ac41fdd988605 100644 --- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp +++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp @@ -120,6 +120,8 @@ STATISTIC(NumVirtConstProp1Bit, "Number of 1 bit virtual constant propagations"); STATISTIC(NumVirtConstProp, "Number of virtual constant propagations"); +namespace llvm { + static cl::opt ClSummaryAction( "wholeprogramdevirt-summary-action", cl::desc("What to do with the summary when running this pass"), @@ -175,6 +177,8 @@ static cl::list extern cl::opt ProfcheckDisableMetadataFixes; +} // end namespace llvm + /// With Clang, a pure virtual class's deleting destructor is emitted as a /// `llvm.trap` intrinsic followed by an unreachable IR instruction. In the /// context of whole program devirtualization, the deleting destructor of a pure diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index d1ca0a6a393c5..59e103cda0230 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -880,11 +880,11 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) { // zext(bool) + C -> bool ? C + 1 : C if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->getScalarSizeInBits() == 1) - return SelectInst::Create(X, InstCombiner::AddOne(Op1C), Op1); + return createSelectInst(X, InstCombiner::AddOne(Op1C), Op1); // sext(bool) + C -> bool ? C - 1 : C if (match(Op0, m_SExt(m_Value(X))) && X->getType()->getScalarSizeInBits() == 1) - return SelectInst::Create(X, InstCombiner::SubOne(Op1C), Op1); + return createSelectInst(X, InstCombiner::SubOne(Op1C), Op1); // ~X + C --> (C-1) - X if (match(Op0, m_Not(m_Value(X)))) { diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 4b7793f6e010b..9b272c4721cbd 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -3080,6 +3080,13 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) { assert(ZextLowShlAmt->uge(HighSize) && ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat"); + // We cannot reuse the result if it may produce poison. + // Drop poison generating flags in the expression tree. + // Or + cast(U)->dropPoisonGeneratingFlags(); + // Shl + cast(X)->dropPoisonGeneratingFlags(); + FShiftArgs = {U, U, ConstantInt::get(Or0->getType(), *ZextHighShlAmt)}; break; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 6ad493772d170..cf6d0ecab4f69 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -64,6 +64,7 @@ #include "llvm/Support/KnownBits.h" #include "llvm/Support/KnownFPClass.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/TypeSize.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/InstCombine/InstCombiner.h" #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" @@ -2405,6 +2406,22 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true, /*MatchBitReversals*/ true)) return BitOp; + + // R = fshl(X, X, C2) + // fshl(R, R, C1) --> fshl(X, X, (C1 + C2) % bitsize) + Value *InnerOp; + const APInt *ShAmtInnerC, *ShAmtOuterC; + if (match(Op0, m_FShl(m_Value(InnerOp), m_Deferred(InnerOp), + m_APInt(ShAmtInnerC))) && + match(ShAmtC, m_APInt(ShAmtOuterC)) && Op0 == Op1) { + APInt Sum = *ShAmtOuterC + *ShAmtInnerC; + APInt Modulo = Sum.urem(APInt(Sum.getBitWidth(), BitWidth)); + if (Modulo.isZero()) + return replaceInstUsesWith(*II, InnerOp); + Constant *ModuloC = ConstantInt::get(Ty, Modulo); + return CallInst::Create(cast(Op0)->getCalledFunction(), + {InnerOp, InnerOp, ModuloC}); + } } // fshl(X, X, Neg(Y)) --> fshr(X, X, Y) @@ -3412,6 +3429,10 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { !isPowerOf2_64(RK.ArgValue) || !isa(RK.IRArgValue)) continue; + // Remove align 1 bundles; they don't add any useful information. + if (RK.ArgValue == 1) + return CallBase::removeOperandBundle(II, OBU.getTagID()); + // Don't try to remove align assumptions for pointers derived from // arguments. We might lose information if the function gets inline and // the align argument attribute disappears. @@ -3761,6 +3782,17 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { return replaceInstUsesWith(CI, Res); } } + + // vector.reduce.add.vNiM(splat(%x)) -> mul(%x, N) + if (Value *Splat = getSplatValue(Arg)) { + ElementCount VecToReduceCount = + cast(Arg->getType())->getElementCount(); + if (VecToReduceCount.isFixed()) { + unsigned VectorSize = VecToReduceCount.getFixedValue(); + return BinaryOperator::CreateMul( + Splat, ConstantInt::get(Splat->getType(), VectorSize)); + } + } } [[fallthrough]]; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index e4cb457499ef5..07ad65c8b7d42 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -5780,6 +5780,45 @@ Instruction *InstCombinerImpl::foldICmpWithMinMax(Instruction &I, return nullptr; } +/// Match and fold patterns like: +/// icmp eq/ne X, min(max(X, Lo), Hi) +/// which represents a range check and can be repsented as a ConstantRange. +/// +/// For icmp eq, build ConstantRange [Lo, Hi + 1) and convert to: +/// (X - Lo) u< (Hi + 1 - Lo) +/// For icmp ne, build ConstantRange [Hi + 1, Lo) and convert to: +/// (X - (Hi + 1)) u< (Lo - (Hi + 1)) +Instruction *InstCombinerImpl::foldICmpWithClamp(ICmpInst &I, Value *X, + MinMaxIntrinsic *Min) { + if (!I.isEquality() || !Min->hasOneUse() || !Min->isMin()) + return nullptr; + + const APInt *Lo = nullptr, *Hi = nullptr; + if (Min->isSigned()) { + if (!match(Min->getLHS(), m_OneUse(m_SMax(m_Specific(X), m_APInt(Lo)))) || + !match(Min->getRHS(), m_APInt(Hi)) || !Lo->slt(*Hi)) + return nullptr; + } else { + if (!match(Min->getLHS(), m_OneUse(m_UMax(m_Specific(X), m_APInt(Lo)))) || + !match(Min->getRHS(), m_APInt(Hi)) || !Lo->ult(*Hi)) + return nullptr; + } + + ConstantRange CR = ConstantRange::getNonEmpty(*Lo, *Hi + 1); + ICmpInst::Predicate Pred; + APInt C, Offset; + if (I.getPredicate() == ICmpInst::ICMP_EQ) + CR.getEquivalentICmp(Pred, C, Offset); + else + CR.inverse().getEquivalentICmp(Pred, C, Offset); + + if (!Offset.isZero()) + X = Builder.CreateAdd(X, ConstantInt::get(X->getType(), Offset)); + + return replaceInstUsesWith( + I, Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), C))); +} + // Canonicalize checking for a power-of-2-or-zero value: static Instruction *foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder) { @@ -7467,10 +7506,14 @@ Instruction *InstCombinerImpl::foldICmpCommutative(CmpPredicate Pred, if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI)) return NI; - if (auto *MinMax = dyn_cast(Op0)) + if (auto *MinMax = dyn_cast(Op0)) { if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred)) return Res; + if (Instruction *Res = foldICmpWithClamp(CxtI, Op1, MinMax)) + return Res; + } + { Value *X; const APInt *C; @@ -8527,6 +8570,9 @@ static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, DenormalMode::getIEEE()) { CI.replaceOperand(I, 0, X); CI.replaceOperand(I, 1, Y); + I.setHasNoInfs(LHSI->hasNoInfs()); + if (LHSI->hasNoNaNs()) + I.setHasNoNaNs(true); return &I; } break; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 7a979c16da501..e01c145bf5de3 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -23,6 +23,7 @@ #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/PatternMatch.h" +#include "llvm/IR/ProfDataUtils.h" #include "llvm/IR/Value.h" #include "llvm/Support/Debug.h" #include "llvm/Support/KnownBits.h" @@ -62,14 +63,14 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final public InstVisitor { public: InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, - bool MinimizeSize, AAResults *AA, AssumptionCache &AC, + Function &F, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const DataLayout &DL, ReversePostOrderTraversal &RPOT) - : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE, - BFI, BPI, PSI, DL, RPOT) {} + : InstCombiner(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI, BPI, + PSI, DL, RPOT) {} virtual ~InstCombinerImpl() = default; @@ -469,6 +470,17 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final Value *simplifyNonNullOperand(Value *V, bool HasDereferenceable, unsigned Depth = 0); + SelectInst *createSelectInst(Value *C, Value *S1, Value *S2, + const Twine &NameStr = "", + InsertPosition InsertBefore = nullptr, + Instruction *MDFrom = nullptr) { + SelectInst *SI = + SelectInst::Create(C, S1, S2, NameStr, InsertBefore, MDFrom); + if (!MDFrom) + setExplicitlyUnknownBranchWeightsIfProfiled(*SI, F, DEBUG_TYPE); + return SI; + } + public: /// Create and insert the idiom we use to indicate a block is unreachable /// without having to rewrite the CFG from within InstCombine. @@ -713,6 +725,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ); Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred); + Instruction *foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min); Instruction *foldICmpEquality(ICmpInst &Cmp); Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I); Instruction *foldSignBitTest(ICmpInst &I); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 53e77e6cc5c31..9491610190c10 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -338,8 +338,18 @@ bool PointerReplacer::collectUsers() { if (!TryPushInstOperand(TrueInst) || !TryPushInstOperand(FalseInst)) return false; } else if (auto *GEP = dyn_cast(Inst)) { - UsersToReplace.insert(GEP); - PushUsersToWorklist(GEP); + auto *PtrOp = dyn_cast(GEP->getPointerOperand()); + if (!PtrOp) + return false; + if (isAvailable(PtrOp)) { + UsersToReplace.insert(GEP); + PushUsersToWorklist(GEP); + continue; + } + + Worklist.emplace_back(GEP); + if (!TryPushInstOperand(PtrOp)) + return false; } else if (auto *MI = dyn_cast(Inst)) { if (MI->isVolatile()) return false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 4ea75409252bd..87000a1c36eef 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -2934,32 +2934,6 @@ static Instruction *foldSelectWithSRem(SelectInst &SI, InstCombinerImpl &IC, return nullptr; } -static Value *foldSelectWithFrozenICmp(SelectInst &Sel, InstCombiner::BuilderTy &Builder) { - FreezeInst *FI = dyn_cast(Sel.getCondition()); - if (!FI) - return nullptr; - - Value *Cond = FI->getOperand(0); - Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue(); - - // select (freeze(x == y)), x, y --> y - // select (freeze(x != y)), x, y --> x - // The freeze should be only used by this select. Otherwise, remaining uses of - // the freeze can observe a contradictory value. - // c = freeze(x == y) ; Let's assume that y = poison & x = 42; c is 0 or 1 - // a = select c, x, y ; - // f(a, c) ; f(poison, 1) cannot happen, but if a is folded - // ; to y, this can happen. - CmpPredicate Pred; - if (FI->hasOneUse() && - match(Cond, m_c_ICmp(Pred, m_Specific(TrueVal), m_Specific(FalseVal))) && - (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)) { - return Pred == ICmpInst::ICMP_EQ ? FalseVal : TrueVal; - } - - return nullptr; -} - /// Given that \p CondVal is known to be \p CondIsTrue, try to simplify \p SI. static Value *simplifyNestedSelectsUsingImpliedCond(SelectInst &SI, Value *CondVal, @@ -4446,9 +4420,6 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) { if (Instruction *PN = foldSelectToPhi(SI, DT, Builder)) return replaceInstUsesWith(SI, PN); - if (Value *Fr = foldSelectWithFrozenICmp(SI, Builder)) - return replaceInstUsesWith(SI, Fr); - if (Value *V = foldRoundUpIntegerWithPow2Alignment(SI, Builder)) return replaceInstUsesWith(SI, V); @@ -4611,5 +4582,15 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) { return replaceOperand(SI, 2, ConstantInt::get(FalseVal->getType(), 0)); } + Value *MaskedLoadPtr; + const APInt *MaskedLoadAlignment; + if (match(TrueVal, m_OneUse(m_MaskedLoad(m_Value(MaskedLoadPtr), + m_APInt(MaskedLoadAlignment), + m_Specific(CondVal), m_Value())))) + return replaceInstUsesWith( + SI, Builder.CreateMaskedLoad(TrueVal->getType(), MaskedLoadPtr, + Align(MaskedLoadAlignment->getZExtValue()), + CondVal, FalseVal)); + return nullptr; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 550f095b26ba4..d457e0c7dd1c4 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -1253,7 +1253,7 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) { // shl (zext i1 X), C1 --> select (X, 1 << C1, 0) if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { auto *NewC = Builder.CreateShl(ConstantInt::get(Ty, 1), C1); - return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty)); + return createSelectInst(X, NewC, ConstantInt::getNullValue(Ty)); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index b17cf17db1580..18a45c6799bac 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -319,20 +319,20 @@ Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) { return nullptr; } -/// Find elements of V demanded by UserInstr. -static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { +/// Find elements of V demanded by UserInstr. If returns false, we were not able +/// to determine all elements. +static bool findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr, + APInt &UnionUsedElts) { unsigned VWidth = cast(V->getType())->getNumElements(); - // Conservatively assume that all elements are needed. - APInt UsedElts(APInt::getAllOnes(VWidth)); - switch (UserInstr->getOpcode()) { case Instruction::ExtractElement: { ExtractElementInst *EEI = cast(UserInstr); assert(EEI->getVectorOperand() == V); ConstantInt *EEIIndexC = dyn_cast(EEI->getIndexOperand()); if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) { - UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue()); + UnionUsedElts.setBit(EEIIndexC->getZExtValue()); + return true; } break; } @@ -341,23 +341,23 @@ static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { unsigned MaskNumElts = cast(UserInstr->getType())->getNumElements(); - UsedElts = APInt(VWidth, 0); - for (unsigned i = 0; i < MaskNumElts; i++) { - unsigned MaskVal = Shuffle->getMaskValue(i); + for (auto I : llvm::seq(MaskNumElts)) { + unsigned MaskVal = Shuffle->getMaskValue(I); if (MaskVal == -1u || MaskVal >= 2 * VWidth) continue; if (Shuffle->getOperand(0) == V && (MaskVal < VWidth)) - UsedElts.setBit(MaskVal); + UnionUsedElts.setBit(MaskVal); if (Shuffle->getOperand(1) == V && ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth))) - UsedElts.setBit(MaskVal - VWidth); + UnionUsedElts.setBit(MaskVal - VWidth); } - break; + return true; } default: break; } - return UsedElts; + + return false; } /// Find union of elements of V demanded by all its users. @@ -370,7 +370,8 @@ static APInt findDemandedEltsByAllUsers(Value *V) { APInt UnionUsedElts(VWidth, 0); for (const Use &U : V->uses()) { if (Instruction *I = dyn_cast(U.getUser())) { - UnionUsedElts |= findDemandedEltsBySingleUser(V, I); + if (!findDemandedEltsBySingleUser(V, I, UnionUsedElts)) + return APInt::getAllOnes(VWidth); } else { UnionUsedElts = APInt::getAllOnes(VWidth); break; @@ -723,6 +724,11 @@ static bool replaceExtractElements(InsertElementInst *InsElt, NumExtElts >= NumInsElts) return false; + Value *ExtVecOp = ExtElt->getVectorOperand(); + // Bail out on constant vectors. + if (isa(ExtVecOp)) + return false; + // Create a shuffle mask to widen the extended-from vector using poison // values. The mask selects all of the values of the original vector followed // by as many poison values as needed to create a vector of the same length @@ -733,7 +739,6 @@ static bool replaceExtractElements(InsertElementInst *InsElt, for (unsigned i = NumExtElts; i < NumInsElts; ++i) ExtendMask.push_back(-1); - Value *ExtVecOp = ExtElt->getVectorOperand(); auto *ExtVecOpInst = dyn_cast(ExtVecOp); BasicBlock *InsertionBlock = (ExtVecOpInst && !isa(ExtVecOpInst)) ? ExtVecOpInst->getParent() diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index f0ddd5ca94c5a..917004c4702b6 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -132,9 +132,11 @@ STATISTIC(NumReassoc , "Number of reassociations"); DEBUG_COUNTER(VisitCounter, "instcombine-visit", "Controls which instructions are visited"); -static cl::opt -EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), - cl::init(true)); +namespace llvm { + +static cl::opt EnableCodeSinking("instcombine-code-sinking", + cl::desc("Enable code sinking"), + cl::init(true)); static cl::opt MaxSinkNumUsers( "instcombine-max-sink-users", cl::init(32), @@ -156,6 +158,8 @@ extern cl::opt ProfcheckDisableMetadataFixes; static cl::opt ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true)); +} // end namespace llvm + std::optional InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) { // Handle target specific intrinsics @@ -1735,7 +1739,7 @@ Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) { Constant *Zero = ConstantInt::getNullValue(BO.getType()); Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C); Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C); - return SelectInst::Create(X, TVal, FVal); + return createSelectInst(X, TVal, FVal); } static Value *simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, @@ -5169,6 +5173,7 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { // - or: pick -1 // - select's condition: if the true value is constant, choose it by making // the condition true. + // - phi: pick the common constant across operands // - default: pick 0 // // Note that this transform is intentionally done here rather than @@ -5179,17 +5184,43 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid // duplicating logic for binops at least. auto getUndefReplacement = [&](Type *Ty) { - Value *BestValue = nullptr; + auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * { + // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be + // removed. + Constant *BestValue = nullptr; + for (Value *V : PN.incoming_values()) { + if (match(V, m_Freeze(m_Undef()))) + continue; + + Constant *C = dyn_cast(V); + if (!C) + return nullptr; + + if (!isGuaranteedNotToBeUndefOrPoison(C)) + return nullptr; + + if (BestValue && BestValue != C) + return nullptr; + + BestValue = C; + } + return BestValue; + }; + Value *NullValue = Constant::getNullValue(Ty); - for (const auto *U : I.users()) { + Value *BestValue = nullptr; + for (auto *U : I.users()) { Value *V = NullValue; if (match(U, m_Or(m_Value(), m_Value()))) V = ConstantInt::getAllOnesValue(Ty); else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value()))) V = ConstantInt::getTrue(Ty); else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) { - if (!isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT)) + if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT)) V = NullValue; + } else if (auto *PHI = dyn_cast(U)) { + if (Value *MaybeV = pickCommonConstantFromPHI(*PHI)) + V = MaybeV; } if (!BestValue) @@ -5198,6 +5229,7 @@ Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) { BestValue = NullValue; } assert(BestValue && "Must have at least one use"); + assert(BestValue != &I && "Cannot replace with itself"); return BestValue; }; @@ -5934,8 +5966,8 @@ static bool combineInstructionsOverFunction( LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " << F.getName() << "\n"); - InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT, - ORE, BFI, BPI, PSI, DL, RPOT); + InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI, + BPI, PSI, DL, RPOT); IC.MaxArraySizeForCombine = MaxArraySize; bool MadeChangeInThisIteration = IC.prepareWorklist(F); MadeChangeInThisIteration |= IC.run(); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index a20e0dec8841b..cdae9a7271915 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1786,6 +1786,25 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, else NumInstrumentedReads++; + if (O.MaybeByteOffset) { + Type *Ty = Type::getInt8Ty(*C); + IRBuilder IB(O.getInsn()); + + Value *OffsetOp = O.MaybeByteOffset; + if (TargetTriple.isRISCV()) { + Type *OffsetTy = OffsetOp->getType(); + // RVV indexed loads/stores zero-extend offset operands which are narrower + // than XLEN to XLEN. + if (OffsetTy->getScalarType()->getIntegerBitWidth() < + static_cast(LongSize)) { + VectorType *OrigType = cast(OffsetTy); + Type *ExtendTy = VectorType::get(IntptrTy, OrigType); + OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy); + } + } + Addr = IB.CreateGEP(Ty, Addr, {OffsetOp}); + } + unsigned Granularity = 1 << Mapping.Scale; if (O.MaybeMask) { instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL, diff --git a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp index c14bbecf0d4e1..7c78eb35a865a 100644 --- a/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp +++ b/llvm/lib/Transforms/Instrumentation/ControlHeightReduction.cpp @@ -1591,7 +1591,16 @@ static void insertTrivialPHIs(CHRScope *Scope, } TrivialPHIs.insert(PN); CHR_DEBUG(dbgs() << "Insert phi " << *PN << "\n"); + bool FoundLifetimeAnnotation = false; for (Instruction *UI : Users) { + // If we found a lifetime annotation, remove it, but set a flag + // to ensure that we remove all other lifetime annotations attached + // to the alloca. + if (UI->isLifetimeStartOrEnd()) { + UI->eraseFromParent(); + FoundLifetimeAnnotation = true; + continue; + } for (unsigned J = 0, NumOps = UI->getNumOperands(); J < NumOps; ++J) { if (UI->getOperand(J) == &I) { UI->setOperand(J, PN); @@ -1599,6 +1608,14 @@ static void insertTrivialPHIs(CHRScope *Scope, } CHR_DEBUG(dbgs() << "Updated user " << *UI << "\n"); } + // Erase any leftover lifetime annotations for a dynamic alloca. + if (FoundLifetimeAnnotation) { + for (User *U : make_early_inc_range(I.users())) { + if (auto *UI = dyn_cast(U)) + if (UI->isLifetimeStartOrEnd()) + UI->eraseFromParent(); + } + } } } } @@ -1693,14 +1710,12 @@ void CHR::transformScopes(CHRScope *Scope, DenseSet &TrivialPHIs) { BasicBlock *ExitBlock = LastRegion->getExit(); std::optional ProfileCount = BFI.getBlockProfileCount(EntryBlock); - if (ExitBlock) { - // Insert a trivial phi at the exit block (where the CHR hot path and the - // cold path merges) for a value that's defined in the scope but used - // outside it (meaning it's alive at the exit block). We will add the - // incoming values for the CHR cold paths to it below. Without this, we'd - // miss updating phi's for such values unless there happens to already be a - // phi for that value there. - insertTrivialPHIs(Scope, EntryBlock, ExitBlock, TrivialPHIs); + SmallVector StaticAllocas; + for (Instruction &I : *EntryBlock) { + if (auto *AI = dyn_cast(&I)) { + if (AI->isStaticAlloca()) + StaticAllocas.push_back(AI); + } } // Split the entry block of the first region. The new block becomes the new @@ -1719,6 +1734,20 @@ void CHR::transformScopes(CHRScope *Scope, DenseSet &TrivialPHIs) { FirstRegion->replaceEntryRecursive(NewEntryBlock); BasicBlock *PreEntryBlock = EntryBlock; + // Move static allocas into the pre-entry block so they stay static. + for (AllocaInst *AI : StaticAllocas) + AI->moveBefore(EntryBlock->begin()); + + if (ExitBlock) { + // Insert a trivial phi at the exit block (where the CHR hot path and the + // cold path merges) for a value that's defined in the scope but used + // outside it (meaning it's alive at the exit block). We will add the + // incoming values for the CHR cold paths to it below. Without this, we'd + // miss updating phi's for such values unless there happens to already be a + // phi for that value there. + insertTrivialPHIs(Scope, EntryBlock, ExitBlock, TrivialPHIs); + } + ValueToValueMapTy VMap; // Clone the blocks in the scope (excluding the PreEntryBlock) to split into a // hot path (originals) and a cold path (clones) and update the PHIs at the diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index 61fef1387d82a..480ff4a8c3cb9 100644 --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -572,7 +572,8 @@ class DataFlowSanitizer { const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes; public: - DataFlowSanitizer(const std::vector &ABIListFiles); + DataFlowSanitizer(const std::vector &ABIListFiles, + IntrusiveRefCntPtr FS); bool runImpl(Module &M, llvm::function_ref GetTLI); @@ -867,12 +868,11 @@ bool LibAtomicFunction(const Function &F) { } // end anonymous namespace DataFlowSanitizer::DataFlowSanitizer( - const std::vector &ABIListFiles) { + const std::vector &ABIListFiles, + IntrusiveRefCntPtr FS) { std::vector AllABIListFiles(std::move(ABIListFiles)); llvm::append_range(AllABIListFiles, ClABIListFiles); - // FIXME: should we propagate vfs::FileSystem to this constructor? - ABIList.set( - SpecialCaseList::createOrDie(AllABIListFiles, *vfs::getRealFileSystem())); + ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles, *FS)); CombineTaintLookupTableNames.insert_range(ClCombineTaintLookupTables); } @@ -3471,7 +3471,7 @@ PreservedAnalyses DataFlowSanitizerPass::run(Module &M, AM.getResult(M).getManager(); return FAM.getResult(F); }; - if (!DataFlowSanitizer(ABIListFiles).runImpl(M, GetTLI)) + if (!DataFlowSanitizer(ABIListFiles, FS).runImpl(M, GetTLI)) return PreservedAnalyses::all(); PreservedAnalyses PA = PreservedAnalyses::none(); diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp index e5bf2d1187a89..d8422755c28b8 100644 --- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -35,6 +35,7 @@ #include "llvm/Support/FileSystem.h" #include "llvm/Support/Path.h" #include "llvm/Support/Regex.h" +#include "llvm/Support/VirtualFileSystem.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Instrumentation/CFGMST.h" #include "llvm/Transforms/Instrumentation/GCOVProfiler.h" @@ -92,8 +93,10 @@ class GCOVFunction; class GCOVProfiler { public: - GCOVProfiler() : GCOVProfiler(GCOVOptions::getDefault()) {} - GCOVProfiler(const GCOVOptions &Opts) : Options(Opts) {} + GCOVProfiler() + : GCOVProfiler(GCOVOptions::getDefault(), *vfs::getRealFileSystem()) {} + GCOVProfiler(const GCOVOptions &Opts, vfs::FileSystem &VFS) + : Options(Opts), VFS(VFS) {} bool runOnModule(Module &M, function_ref GetBFI, function_ref GetBPI, @@ -110,6 +113,7 @@ class GCOVProfiler { os->write_zeros(4 - s.size() % 4); } void writeBytes(const char *Bytes, int Size) { os->write(Bytes, Size); } + vfs::FileSystem &getVirtualFileSystem() const { return VFS; } private: // Create the .gcno files for the Module based on DebugInfo. @@ -166,6 +170,7 @@ class GCOVProfiler { std::vector ExcludeRe; DenseSet ExecBlocks; StringMap InstrumentedFiles; + vfs::FileSystem &VFS; }; struct BBInfo { @@ -214,10 +219,10 @@ static StringRef getFunctionName(const DISubprogram *SP) { /// Prefer relative paths in the coverage notes. Clang also may split /// up absolute paths into a directory and filename component. When /// the relative path doesn't exist, reconstruct the absolute path. -static SmallString<128> getFilename(const DIScope *SP) { +static SmallString<128> getFilename(const DIScope *SP, vfs::FileSystem &VFS) { SmallString<128> Path; StringRef RelPath = SP->getFilename(); - if (sys::fs::exists(RelPath)) + if (VFS.exists(RelPath)) Path = RelPath; else sys::path::append(Path, SP->getDirectory(), SP->getFilename()); @@ -357,7 +362,7 @@ namespace { void writeOut(uint32_t CfgChecksum) { write(GCOV_TAG_FUNCTION); - SmallString<128> Filename = getFilename(SP); + SmallString<128> Filename = getFilename(SP, P->getVirtualFileSystem()); uint32_t BlockLen = 3 + wordsOfString(getFunctionName(SP)); BlockLen += 1 + wordsOfString(Filename) + 4; @@ -455,7 +460,7 @@ bool GCOVProfiler::isFunctionInstrumented(const Function &F) { if (FilterRe.empty() && ExcludeRe.empty()) { return true; } - SmallString<128> Filename = getFilename(F.getSubprogram()); + SmallString<128> Filename = getFilename(F.getSubprogram(), VFS); auto It = InstrumentedFiles.find(Filename); if (It != InstrumentedFiles.end()) { return It->second; @@ -467,7 +472,7 @@ bool GCOVProfiler::isFunctionInstrumented(const Function &F) { // Path can be // /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/bits/*.h so for // such a case we must get the real_path. - if (sys::fs::real_path(Filename, RealPath)) { + if (VFS.getRealPath(Filename, RealPath)) { // real_path can fail with path like "foo.c". RealFilename = Filename; } else { @@ -524,9 +529,10 @@ std::string GCOVProfiler::mangleName(const DICompileUnit *CU, SmallString<128> Filename = CU->getFilename(); sys::path::replace_extension(Filename, Notes ? "gcno" : "gcda"); StringRef FName = sys::path::filename(Filename); - SmallString<128> CurPath; - if (sys::fs::current_path(CurPath)) + ErrorOr CWD = VFS.getCurrentWorkingDirectory(); + if (!CWD) return std::string(FName); + SmallString<128> CurPath{*CWD}; sys::path::append(CurPath, FName); return std::string(CurPath); } @@ -554,7 +560,7 @@ bool GCOVProfiler::runOnModule( PreservedAnalyses GCOVProfilerPass::run(Module &M, ModuleAnalysisManager &AM) { - GCOVProfiler Profiler(GCOVOpts); + GCOVProfiler Profiler(GCOVOpts, *VFS); FunctionAnalysisManager &FAM = AM.getResult(M).getManager(); @@ -789,7 +795,7 @@ bool GCOVProfiler::emitProfileNotes( // Add the function line number to the lines of the entry block // to have a counter for the function definition. uint32_t Line = SP->getLine(); - auto Filename = getFilename(SP); + auto Filename = getFilename(SP, VFS); BranchProbabilityInfo *BPI = GetBPI(F); BlockFrequencyInfo *BFI = GetBFI(F); @@ -881,7 +887,7 @@ bool GCOVProfiler::emitProfileNotes( if (SP != getDISubprogram(Scope)) continue; - GCOVLines &Lines = Block.getFile(getFilename(Loc->getScope())); + GCOVLines &Lines = Block.getFile(getFilename(Loc->getScope(), VFS)); Lines.addLine(Loc.getLine()); } Line = 0; diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp index f451c2b471aa6..cf87e354aef56 100644 --- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp +++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp @@ -55,11 +55,11 @@ using namespace llvm; STATISTIC(NumOfPGOICallPromotion, "Number of indirect call promotions."); STATISTIC(NumOfPGOICallsites, "Number of indirect call candidate sites."); +namespace llvm { extern cl::opt MaxNumVTableAnnotations; -namespace llvm { extern cl::opt EnableVTableProfileUse; -} +} // namespace llvm // Command line option to disable indirect-call promotion with the default as // false. This is for debug purpose. @@ -672,8 +672,8 @@ CallBase &llvm::pgo::promoteIndirectCall(CallBase &CB, Function *DirectCallee, createBranchWeights(CB.getContext(), Count, TotalCount - Count)); if (AttachProfToDirectCall) - setBranchWeights(NewInst, {static_cast(Count)}, - /*IsExpected=*/false); + setFittedBranchWeights(NewInst, {Count}, + /*IsExpected=*/false); using namespace ore; diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 7933604b8ac25..eff6f0caf0c05 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1923,20 +1923,17 @@ struct MemorySanitizerVisitor : public InstVisitor { /// /// Shadow = ParamTLS+ArgOffset. Value *getShadowPtrForArgument(IRBuilder<> &IRB, int ArgOffset) { - Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); - if (ArgOffset) - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); - return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg"); + return IRB.CreatePtrAdd(MS.ParamTLS, + ConstantInt::get(MS.IntptrTy, ArgOffset), "_msarg"); } /// Compute the origin address for a given function argument. Value *getOriginPtrForArgument(IRBuilder<> &IRB, int ArgOffset) { if (!MS.TrackOrigins) return nullptr; - Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); - if (ArgOffset) - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); - return IRB.CreateIntToPtr(Base, IRB.getPtrTy(0), "_msarg_o"); + return IRB.CreatePtrAdd(MS.ParamOriginTLS, + ConstantInt::get(MS.IntptrTy, ArgOffset), + "_msarg_o"); } /// Compute the shadow address for a retval. @@ -4926,36 +4923,56 @@ struct MemorySanitizerVisitor : public InstVisitor { // <2 x double> @llvm.x86.avx512.rcp14.pd.128 // (<2 x double>, <2 x double>, i8) // + // <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512 + // (<8 x double>, i32, <8 x double>, i8, i32) + // A Imm WriteThru Mask Rounding + // + // All operands other than A and WriteThru (e.g., Mask, Imm, Rounding) must + // be fully initialized. + // // Dst[i] = Mask[i] ? some_op(A[i]) : WriteThru[i] // Dst_shadow[i] = Mask[i] ? all_or_nothing(A_shadow[i]) : WriteThru_shadow[i] - void handleAVX512VectorGenericMaskedFP(IntrinsicInst &I) { + void handleAVX512VectorGenericMaskedFP(IntrinsicInst &I, unsigned AIndex, + unsigned WriteThruIndex, + unsigned MaskIndex) { IRBuilder<> IRB(&I); - assert(I.arg_size() == 3); - Value *A = I.getOperand(0); - Value *WriteThrough = I.getOperand(1); - Value *Mask = I.getOperand(2); + unsigned NumArgs = I.arg_size(); + assert(AIndex < NumArgs); + assert(WriteThruIndex < NumArgs); + assert(MaskIndex < NumArgs); + assert(AIndex != WriteThruIndex); + assert(AIndex != MaskIndex); + assert(WriteThruIndex != MaskIndex); + + Value *A = I.getOperand(AIndex); + Value *WriteThru = I.getOperand(WriteThruIndex); + Value *Mask = I.getOperand(MaskIndex); assert(isFixedFPVector(A)); - assert(isFixedFPVector(WriteThrough)); + assert(isFixedFPVector(WriteThru)); [[maybe_unused]] unsigned ANumElements = cast(A->getType())->getNumElements(); unsigned OutputNumElements = - cast(WriteThrough->getType())->getNumElements(); + cast(WriteThru->getType())->getNumElements(); assert(ANumElements == OutputNumElements); - assert(Mask->getType()->isIntegerTy()); - // Some bits of the mask might be unused, but check them all anyway - // (typically the mask is an integer constant). - insertCheckShadowOf(Mask, &I); + for (unsigned i = 0; i < NumArgs; ++i) { + if (i != AIndex && i != WriteThruIndex) { + // Imm, Mask, Rounding etc. are "control" data, hence we require that + // they be fully initialized. + assert(I.getOperand(i)->getType()->isIntegerTy()); + insertCheckShadowOf(I.getOperand(i), &I); + } + } // The mask has 1 bit per element of A, but a minimum of 8 bits. if (Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8) Mask = IRB.CreateTrunc(Mask, Type::getIntNTy(*MS.C, ANumElements)); assert(Mask->getType()->getScalarSizeInBits() == ANumElements); - assert(I.getType() == WriteThrough->getType()); + assert(I.getType() == WriteThru->getType()); Mask = IRB.CreateBitCast( Mask, FixedVectorType::get(IRB.getInt1Ty(), OutputNumElements)); @@ -4966,9 +4983,9 @@ struct MemorySanitizerVisitor : public InstVisitor { AShadow = IRB.CreateSExt(IRB.CreateICmpNE(AShadow, getCleanShadow(AShadow)), AShadow->getType()); - Value *WriteThroughShadow = getShadow(WriteThrough); + Value *WriteThruShadow = getShadow(WriteThru); - Value *Shadow = IRB.CreateSelect(Mask, AShadow, WriteThroughShadow); + Value *Shadow = IRB.CreateSelect(Mask, AShadow, WriteThruShadow); setShadow(&I, Shadow); setOriginForNaryOp(I); @@ -5790,10 +5807,22 @@ struct MemorySanitizerVisitor : public InstVisitor { case Intrinsic::x86_avx512_vpdpbusds_512: case Intrinsic::x86_avx2_vpdpbssd_128: case Intrinsic::x86_avx2_vpdpbssd_256: + case Intrinsic::x86_avx10_vpdpbssd_512: case Intrinsic::x86_avx2_vpdpbssds_128: case Intrinsic::x86_avx2_vpdpbssds_256: - case Intrinsic::x86_avx10_vpdpbssd_512: case Intrinsic::x86_avx10_vpdpbssds_512: + case Intrinsic::x86_avx2_vpdpbsud_128: + case Intrinsic::x86_avx2_vpdpbsud_256: + case Intrinsic::x86_avx10_vpdpbsud_512: + case Intrinsic::x86_avx2_vpdpbsuds_128: + case Intrinsic::x86_avx2_vpdpbsuds_256: + case Intrinsic::x86_avx10_vpdpbsuds_512: + case Intrinsic::x86_avx2_vpdpbuud_128: + case Intrinsic::x86_avx2_vpdpbuud_256: + case Intrinsic::x86_avx10_vpdpbuud_512: + case Intrinsic::x86_avx2_vpdpbuuds_128: + case Intrinsic::x86_avx2_vpdpbuuds_256: + case Intrinsic::x86_avx10_vpdpbuuds_512: handleVectorPmaddIntrinsic(I, /*ReductionFactor=*/4, /*EltSize=*/8); break; @@ -6202,7 +6231,8 @@ struct MemorySanitizerVisitor : public InstVisitor { case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512: case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256: case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128: - handleAVX512VectorGenericMaskedFP(I); + handleAVX512VectorGenericMaskedFP(I, /*AIndex=*/0, /*WriteThruIndex=*/1, + /*MaskIndex=*/2); break; // AVX512/AVX10 Reciprocal Square Root @@ -6253,7 +6283,64 @@ struct MemorySanitizerVisitor : public InstVisitor { case Intrinsic::x86_avx512fp16_mask_rcp_ph_512: case Intrinsic::x86_avx512fp16_mask_rcp_ph_256: case Intrinsic::x86_avx512fp16_mask_rcp_ph_128: - handleAVX512VectorGenericMaskedFP(I); + handleAVX512VectorGenericMaskedFP(I, /*AIndex=*/0, /*WriteThruIndex=*/1, + /*MaskIndex=*/2); + break; + + // <32 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.512 + // (<32 x half>, i32, <32 x half>, i32, i32) + // <16 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.256 + // (<16 x half>, i32, <16 x half>, i32, i16) + // <8 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.128 + // (<8 x half>, i32, <8 x half>, i32, i8) + // + // <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512 + // (<16 x float>, i32, <16 x float>, i16, i32) + // <8 x float> @llvm.x86.avx512.mask.rndscale.ps.256 + // (<8 x float>, i32, <8 x float>, i8) + // <4 x float> @llvm.x86.avx512.mask.rndscale.ps.128 + // (<4 x float>, i32, <4 x float>, i8) + // + // <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512 + // (<8 x double>, i32, <8 x double>, i8, i32) + // A Imm WriteThru Mask Rounding + // <4 x double> @llvm.x86.avx512.mask.rndscale.pd.256 + // (<4 x double>, i32, <4 x double>, i8) + // <2 x double> @llvm.x86.avx512.mask.rndscale.pd.128 + // (<2 x double>, i32, <2 x double>, i8) + // A Imm WriteThru Mask + // + // <32 x bfloat> @llvm.x86.avx10.mask.rndscale.bf16.512 + // (<32 x bfloat>, i32, <32 x bfloat>, i32) + // <16 x bfloat> @llvm.x86.avx10.mask.rndscale.bf16.256 + // (<16 x bfloat>, i32, <16 x bfloat>, i16) + // <8 x bfloat> @llvm.x86.avx10.mask.rndscale.bf16.128 + // (<8 x bfloat>, i32, <8 x bfloat>, i8) + // + // Not supported: three vectors + // - <8 x half> @llvm.x86.avx512fp16.mask.rndscale.sh + // (<8 x half>, <8 x half>,<8 x half>, i8, i32, i32) + // - <4 x float> @llvm.x86.avx512.mask.rndscale.ss + // (<4 x float>, <4 x float>, <4 x float>, i8, i32, i32) + // - <2 x double> @llvm.x86.avx512.mask.rndscale.sd + // (<2 x double>, <2 x double>, <2 x double>, i8, i32, + // i32) + // A B WriteThru Mask Imm + // Rounding + case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512: + case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256: + case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128: + case Intrinsic::x86_avx512_mask_rndscale_ps_512: + case Intrinsic::x86_avx512_mask_rndscale_ps_256: + case Intrinsic::x86_avx512_mask_rndscale_ps_128: + case Intrinsic::x86_avx512_mask_rndscale_pd_512: + case Intrinsic::x86_avx512_mask_rndscale_pd_256: + case Intrinsic::x86_avx512_mask_rndscale_pd_128: + case Intrinsic::x86_avx10_mask_rndscale_bf16_512: + case Intrinsic::x86_avx10_mask_rndscale_bf16_256: + case Intrinsic::x86_avx10_mask_rndscale_bf16_128: + handleAVX512VectorGenericMaskedFP(I, /*AIndex=*/0, /*WriteThruIndex=*/2, + /*MaskIndex=*/3); break; // AVX512 FP16 Arithmetic @@ -7129,9 +7216,8 @@ struct VarArgHelperBase : public VarArgHelper { /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(IRBuilder<> &IRB, unsigned ArgOffset) { - Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); - return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_s"); + return IRB.CreatePtrAdd( + MS.VAArgTLS, ConstantInt::get(MS.IntptrTy, ArgOffset), "_msarg_va_s"); } /// Compute the shadow address for a given va_arg. @@ -7145,12 +7231,12 @@ struct VarArgHelperBase : public VarArgHelper { /// Compute the origin address for a given va_arg. Value *getOriginPtrForVAArgument(IRBuilder<> &IRB, int ArgOffset) { - Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy); // getOriginPtrForVAArgument() is always called after // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never // overflow. - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); - return IRB.CreateIntToPtr(Base, MS.PtrTy, "_msarg_va_o"); + return IRB.CreatePtrAdd(MS.VAArgOriginTLS, + ConstantInt::get(MS.IntptrTy, ArgOffset), + "_msarg_va_o"); } void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase, @@ -7377,10 +7463,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase { NextNodeIRBuilder IRB(OrigInst); Value *VAListTag = OrigInst->getArgOperand(0); - Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr( - IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), - ConstantInt::get(MS.IntptrTy, 16)), - MS.PtrTy); + Value *RegSaveAreaPtrPtr = + IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 16)); Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr); Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; const Align Alignment = Align(16); @@ -7392,10 +7476,8 @@ struct VarArgAMD64Helper : public VarArgHelperBase { if (MS.TrackOrigins) IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy, Alignment, AMD64FpEndOffset); - Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( - IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), - ConstantInt::get(MS.IntptrTy, 8)), - MS.PtrTy); + Value *OverflowArgAreaPtrPtr = + IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, 8)); Value *OverflowArgAreaPtr = IRB.CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr); Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr; @@ -7525,19 +7607,15 @@ struct VarArgAArch64Helper : public VarArgHelperBase { // Retrieve a va_list field of 'void*' size. Value *getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) { - Value *SaveAreaPtrPtr = IRB.CreateIntToPtr( - IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), - ConstantInt::get(MS.IntptrTy, offset)), - MS.PtrTy); + Value *SaveAreaPtrPtr = + IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset)); return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr); } // Retrieve a va_list field of 'int' size. Value *getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) { - Value *SaveAreaPtr = IRB.CreateIntToPtr( - IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), - ConstantInt::get(MS.IntptrTy, offset)), - MS.PtrTy); + Value *SaveAreaPtr = + IRB.CreatePtrAdd(VAListTag, ConstantInt::get(MS.IntptrTy, offset)); Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr); return IRB.CreateSExt(SaveArea32, MS.IntptrTy); } diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp index d9e850e7a2bf3..120c4f65a7292 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -222,7 +222,6 @@ cl::opt NoPGOWarnMismatchComdatWeak( cl::desc("The option is used to turn on/off " "warnings about hash mismatch for comdat " "or weak functions.")); -} // namespace llvm // Command line option to enable/disable select instruction instrumentation. static cl::opt @@ -347,7 +346,6 @@ cl::list CtxPGOSkipCallsiteInstrument( extern cl::opt MaxNumVTableAnnotations; -namespace llvm { // Command line option to turn on CFG dot dump after profile annotation. // Defined in Analysis/BlockFrequencyInfo.cpp: -pgo-view-counts extern cl::opt PGOViewCounts; diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp index 343bec37018c5..a5f417a02a99a 100644 --- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp +++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp @@ -54,6 +54,8 @@ using namespace llvm; STATISTIC(NumOfPGOMemOPOpt, "Number of memop intrinsics optimized."); STATISTIC(NumOfPGOMemOPAnnotate, "Number of memop intrinsics annotated."); +namespace llvm { + // The minimum call count to optimize memory intrinsic calls. static cl::opt MemOPCountThreshold("pgo-memop-count-threshold", cl::Hidden, cl::init(1000), @@ -93,6 +95,8 @@ static cl::opt MemOpMaxOptSize("memop-value-prof-max-opt-size", cl::Hidden, cl::init(128), cl::desc("Optimize the memop size <= this value")); +} // end namespace llvm + namespace { static const char *getMIName(const MemIntrinsic *MI) { diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc index a3d4e5367b9ab..0534fdd0c756c 100644 --- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc +++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc @@ -21,7 +21,9 @@ using namespace llvm; using CandidateInfo = ValueProfileCollector::CandidateInfo; +namespace llvm { extern cl::opt MemOPOptMemcmpBcmp; +} // end namespace llvm ///--------------------------- MemIntrinsicPlugin ------------------------------ class MemIntrinsicPlugin : public InstVisitor { diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index 8555ef5c22f82..e54a2e54f9943 100644 --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -163,7 +163,7 @@ bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I, const SCEV *&OffSCEV) { Type *Int64Ty = Type::getInt64Ty(I->getContext()); OperandBundleUse AlignOB = I->getOperandBundleAt(Idx); - if (AlignOB.getTagName() != "align") + if (AlignOB.getTagID() != LLVMContext::OB_align) return false; assert(AlignOB.Inputs.size() >= 2); AAPtr = AlignOB.Inputs[0].get(); diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp index 944b253e0f5e7..e9a3e983bc1e2 100644 --- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp @@ -190,12 +190,12 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, std::vector *NewBBs) { SelectInst *SI = SIToUnfold.getInst(); PHINode *SIUse = SIToUnfold.getUse(); - BasicBlock *StartBlock = SI->getParent(); + assert(SI->hasOneUse()); + // The select may come indirectly, instead of from where it is defined. + BasicBlock *StartBlock = SIUse->getIncomingBlock(*SI->use_begin()); BranchInst *StartBlockTerm = dyn_cast(StartBlock->getTerminator()); - assert(StartBlockTerm); - assert(SI->hasOneUse()); if (StartBlockTerm->isUnconditional()) { BasicBlock *EndBlock = StartBlock->getUniqueSuccessor(); @@ -332,7 +332,7 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, } // Preserve loop info - if (Loop *L = LI->getLoopFor(SI->getParent())) { + if (Loop *L = LI->getLoopFor(StartBlock)) { for (BasicBlock *NewBB : *NewBBs) L->addBasicBlockToLoop(NewBB, *LI); } @@ -533,6 +533,8 @@ struct MainSwitch { return false; // Only fold the select coming from directly where it is defined. + // TODO: We have dealt with the select coming indirectly now. This + // constraint can be relaxed. PHINode *PHIUser = dyn_cast(SIUse); if (PHIUser && PHIUser->getIncomingBlock(*SI->use_begin()) != SIBB) return false; diff --git a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp index c2e58ba393553..89980d54ee897 100644 --- a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp +++ b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/DropUnnecessaryAssumes.h" +#include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/IntrinsicInst.h" @@ -16,36 +17,119 @@ using namespace llvm; using namespace llvm::PatternMatch; +static bool affectedValuesAreEphemeral(ArrayRef Affected) { + // Check whether all the uses are ephemeral, i.e. recursively only used + // by assumes. In that case, the assume does not provide useful information. + // Note that additional users may appear as a result of inlining and CSE, + // so we should only make this assumption late in the optimization pipeline. + SmallSetVector Worklist; + auto AddUsers = [&](Value *V) { + for (User *U : V->users()) { + // Bail out if we need to inspect too many users. + if (Worklist.size() >= 32) + return false; + Worklist.insert(cast(U)); + } + return true; + }; + + for (Value *V : Affected) { + // Do not handle assumes on globals for now. The use list for them may + // contain uses in other functions. + if (!isa(V)) + return false; + + if (!AddUsers(V)) + return false; + } + + for (unsigned Idx = 0; Idx < Worklist.size(); ++Idx) { + Instruction *I = Worklist[Idx]; + + // Use in assume is ephemeral. + if (isa(I)) + continue; + + // Use in side-effecting instruction is non-ephemeral. + if (I->mayHaveSideEffects() || I->isTerminator()) + return false; + + // Otherwise, recursively look at the users. + if (!AddUsers(I)) + return false; + } + + return true; +} + PreservedAnalyses DropUnnecessaryAssumesPass::run(Function &F, FunctionAnalysisManager &FAM) { AssumptionCache &AC = FAM.getResult(F); bool Changed = false; - for (AssumptionCache::ResultElem &Elem : AC.assumptions()) { - auto *Assume = cast_or_null(Elem.Assume); + for (const WeakVH &Elem : AC.assumptions()) { + auto *Assume = cast_or_null(Elem); if (!Assume) continue; - // TODO: Handle assumes with operand bundles. - if (Assume->hasOperandBundles()) + if (Assume->hasOperandBundles()) { + // Handle operand bundle assumptions. + SmallVector DeadBundleArgs; + SmallVector KeptBundles; + unsigned NumBundles = Assume->getNumOperandBundles(); + for (unsigned I = 0; I != NumBundles; ++I) { + auto IsDead = [](OperandBundleUse Bundle) { + // "ignore" operand bundles are always dead. + if (Bundle.getTagName() == "ignore") + return true; + + // Bundles without arguments do not affect any specific values. + // Always keep them for now. + if (Bundle.Inputs.empty()) + return false; + + SmallVector Affected; + AssumptionCache::findValuesAffectedByOperandBundle( + Bundle, [&](Value *A) { Affected.push_back(A); }); + + return affectedValuesAreEphemeral(Affected); + }; + + OperandBundleUse Bundle = Assume->getOperandBundleAt(I); + if (IsDead(Bundle)) + append_range(DeadBundleArgs, Bundle.Inputs); + else + KeptBundles.emplace_back(Bundle); + } + + if (KeptBundles.size() != NumBundles) { + if (KeptBundles.empty()) { + // All operand bundles are dead, remove the whole assume. + Assume->eraseFromParent(); + } else { + // Otherwise only drop the dead operand bundles. + CallBase *NewAssume = + CallBase::Create(Assume, KeptBundles, Assume->getIterator()); + AC.registerAssumption(cast(NewAssume)); + Assume->eraseFromParent(); + } + + RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadBundleArgs); + Changed = true; + } continue; + } Value *Cond = Assume->getArgOperand(0); // Don't drop type tests, which have special semantics. if (match(Cond, m_Intrinsic())) continue; - SmallPtrSet Affected; + SmallVector Affected; findValuesAffectedByCondition(Cond, /*IsAssume=*/true, - [&](Value *A) { Affected.insert(A); }); - - // If all the affected uses have only one use (part of the assume), then - // the assume does not provide useful information. Note that additional - // users may appear as a result of inlining and CSE, so we should only - // make this assumption late in the optimization pipeline. - // TODO: Handle dead cyclic usages. - // TODO: Handle multiple dead assumes on the same value. - if (!all_of(Affected, match_fn(m_OneUse(m_Value())))) + [&](Value *A) { Affected.push_back(A); }); + + if (!affectedValuesAreEphemeral(Affected)) continue; Assume->eraseFromParent(); diff --git a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp index 2025fbbf05973..3c14036e509ef 100644 --- a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp +++ b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp @@ -26,6 +26,8 @@ using namespace llvm; +namespace llvm { + static cl::opt JumpTableSizeThreshold("jump-table-to-switch-size-threshold", cl::Hidden, cl::desc("Only split jump tables with size less or " @@ -43,6 +45,8 @@ static cl::opt FunctionSizeThreshold( extern cl::opt ProfcheckDisableMetadataFixes; +} // end namespace llvm + #define DEBUG_TYPE "jump-table-to-switch" namespace { @@ -201,14 +205,12 @@ PreservedAnalyses JumpTableToSwitchPass::run(Function &F, PostDominatorTree *PDT = AM.getCachedResult(F); DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Lazy); bool Changed = false; - InstrProfSymtab Symtab; - if (auto E = Symtab.create(*F.getParent())) - F.getContext().emitError( - "Could not create indirect call table, likely corrupted IR" + - toString(std::move(E))); - DenseMap FToGuid; - for (const auto &[G, FPtr] : Symtab.getIDToNameMap()) - FToGuid.insert({FPtr, G}); + auto FuncToGuid = [&](const Function &Fct) { + if (Fct.getMetadata(AssignGUIDPass::GUIDMetadataName)) + return AssignGUIDPass::getGUID(Fct); + + return Function::getGUIDAssumingExternalLinkage(getIRPGOFuncName(F, InLTO)); + }; for (BasicBlock &BB : make_early_inc_range(F)) { BasicBlock *CurrentBB = &BB; @@ -230,12 +232,8 @@ PreservedAnalyses JumpTableToSwitchPass::run(Function &F, std::optional JumpTable = parseJumpTable(GEP, PtrTy); if (!JumpTable) continue; - SplittedOutTail = expandToSwitch( - Call, *JumpTable, DTU, ORE, [&](const Function &Fct) { - if (Fct.getMetadata(AssignGUIDPass::GUIDMetadataName)) - return AssignGUIDPass::getGUID(Fct); - return FToGuid.lookup_or(&Fct, 0U); - }); + SplittedOutTail = + expandToSwitch(Call, *JumpTable, DTU, ORE, FuncToGuid); Changed = true; break; } diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp index bab1f2a90a8fd..9655173de4422 100644 --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -116,6 +116,8 @@ STATISTIC(NumIntAssociationsHoisted, STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions " "reassociated and hoisted out of the loop"); +namespace llvm { + /// Memory promotion is enabled by default. static cl::opt DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), @@ -154,7 +156,7 @@ static cl::opt IntAssociationUpperLimit( // which may not be precise, since optimizeUses is capped. The result is // correct, but we may not get as "far up" as possible to get which access is // clobbering the one queried. -cl::opt llvm::SetLicmMssaOptCap( +cl::opt SetLicmMssaOptCap( "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, cl::desc("Enable imprecision in LICM in pathological cases, in exchange " "for faster compile. Caps the MemorySSA clobbering calls.")); @@ -162,7 +164,7 @@ cl::opt llvm::SetLicmMssaOptCap( // Experimentally, memory promotion carries less importance than sinking and // hoisting. Limit when we do promotion when using MemorySSA, in order to save // compile time. -cl::opt llvm::SetLicmMssaNoAccForPromotionCap( +cl::opt SetLicmMssaNoAccForPromotionCap( "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " "effect. When MSSA in LICM is enabled, then this is the maximum " @@ -171,6 +173,8 @@ cl::opt llvm::SetLicmMssaNoAccForPromotionCap( extern cl::opt ProfcheckDisableMetadataFixes; +} // end namespace llvm + static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp index b5eb647a042b9..2073303237f69 100644 --- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp +++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp @@ -100,6 +100,7 @@ STATISTIC(OnlySecondCandidateIsGuarded, "The second candidate is guarded while the first one is not"); STATISTIC(NumHoistedInsts, "Number of hoisted preheader instructions."); STATISTIC(NumSunkInsts, "Number of hoisted preheader instructions."); +STATISTIC(NumDA, "DA checks passed"); enum FusionDependenceAnalysisChoice { FUSION_DEPENDENCE_ANALYSIS_SCEV, @@ -1371,6 +1372,47 @@ struct LoopFuser { << "\n"); } #endif + unsigned Levels = DepResult->getLevels(); + unsigned SameSDLevels = DepResult->getSameSDLevels(); + unsigned CurLoopLevel = FC0.L->getLoopDepth(); + + // Check if DA is missing info regarding the current loop level + if (CurLoopLevel > Levels + SameSDLevels) + return false; + + // Iterating over the outer levels. + for (unsigned Level = 1; Level <= std::min(CurLoopLevel - 1, Levels); + ++Level) { + unsigned Direction = DepResult->getDirection(Level, false); + + // Check if the direction vector does not include equality. If an outer + // loop has a non-equal direction, outer indicies are different and it + // is safe to fuse. + if (!(Direction & Dependence::DVEntry::EQ)) { + LLVM_DEBUG(dbgs() << "Safe to fuse due to non-equal acceses in the " + "outer loops\n"); + NumDA++; + return true; + } + } + + assert(CurLoopLevel > Levels && "Fusion candidates are not separated"); + + unsigned CurDir = DepResult->getDirection(CurLoopLevel, true); + + // Check if the direction vector does not include greater direction. In + // that case, the dependency is not a backward loop-carried and is legal + // to fuse. For example here we have a forward dependency + // for (int i = 0; i < n; i++) + // A[i] = ...; + // for (int i = 0; i < n; i++) + // ... = A[i-1]; + if (!(CurDir & Dependence::DVEntry::GT)) { + LLVM_DEBUG(dbgs() << "Safe to fuse with no backward loop-carried " + "dependency\n"); + NumDA++; + return true; + } if (DepResult->getNextPredecessor() || DepResult->getNextSuccessor()) LLVM_DEBUG( diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 0874b29ab7d22..019536ca91ae0 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1598,11 +1598,8 @@ bool LoopIdiomRecognize::optimizeCRCLoop(const PolynomialInfo &Info) { // crc = (crc << 8) ^ tbl[(iv'th byte of data) ^ (top byte of crc)] { auto LoByte = [](IRBuilderBase &Builder, Value *Op, const Twine &Name) { - Type *OpTy = Op->getType(); - unsigned OpBW = OpTy->getIntegerBitWidth(); - return OpBW > 8 - ? Builder.CreateAnd(Op, ConstantInt::get(OpTy, 0XFF), Name) - : Op; + return Builder.CreateZExtOrTrunc( + Op, IntegerType::getInt8Ty(Op->getContext()), Name); }; auto HiIdx = [LoByte, CRCBW](IRBuilderBase &Builder, Value *Op, const Twine &Name) { diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp index 08446ccaa9fca..28ae4f0a0aad9 100644 --- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp @@ -260,6 +260,17 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level, Dep.push_back('I'); } + // If all the elements of any direction vector have only '*', legality + // can't be proven. Exit early to save compile time. + if (all_of(Dep, [](char C) { return C == '*'; })) { + ORE->emit([&]() { + return OptimizationRemarkMissed(DEBUG_TYPE, "Dependence", + L->getStartLoc(), L->getHeader()) + << "All loops have dependencies in all directions."; + }); + return false; + } + // Test whether the dependency is forward or not. bool IsKnownForward = true; if (Src->getParent() != Dst->getParent()) { diff --git a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp index 1a9e16be6989e..d31154fcf085d 100644 --- a/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp +++ b/llvm/lib/Transforms/Utils/FunctionImportUtils.cpp @@ -17,6 +17,8 @@ using namespace llvm; +namespace llvm { + /// Uses the "source_filename" instead of a Module hash ID for the suffix of /// promoted locals during LTO. NOTE: This requires that the source filename /// has a unique name / path to avoid name collisions. @@ -35,6 +37,8 @@ cl::list MoveSymbolGUID( "used with the name of contextual profiling roots."), cl::Hidden); +} // end namespace llvm + FunctionImportGlobalProcessing::FunctionImportGlobalProcessing( Module &M, const ModuleSummaryIndex &Index, SetVector *GlobalsToImport, bool ClearDSOLocalOnDeclarations) diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 123881e276584..21b2652d04120 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3025,6 +3025,12 @@ static void combineMetadata(Instruction *K, const Instruction *J, // Preserve !nosanitize if both K and J have it. K->setMetadata(Kind, JMD); break; + case LLVMContext::MD_captures: + K->setMetadata( + Kind, MDNode::fromCaptureComponents( + K->getContext(), MDNode::toCaptureComponents(JMD) | + MDNode::toCaptureComponents(KMD))); + break; } } // Set !invariant.group from J if J has it. If both instructions have it diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp index 735bad1cb1348..e1dcaa85a5780 100644 --- a/llvm/lib/Transforms/Utils/LoopPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -883,84 +883,6 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, } } -struct WeightInfo { - // Weights for current iteration. - SmallVector Weights; - // Weights to subtract after each iteration. - const SmallVector SubWeights; -}; - -/// Update the branch weights of an exiting block of a peeled-off loop -/// iteration. -/// Let F is a weight of the edge to continue (fallthrough) into the loop. -/// Let E is a weight of the edge to an exit. -/// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to -/// go to exit. -/// Then, Estimated ExitCount = F / E. -/// For I-th (counting from 0) peeled off iteration we set the weights for -/// the peeled exit as (EC - I, 1). It gives us reasonable distribution, -/// The probability to go to exit 1/(EC-I) increases. At the same time -/// the estimated exit count in the remainder loop reduces by I. -/// To avoid dealing with division rounding we can just multiple both part -/// of weights to E and use weight as (F - I * E, E). -static void updateBranchWeights(Instruction *Term, WeightInfo &Info) { - setBranchWeights(*Term, Info.Weights, /*IsExpected=*/false); - for (auto [Idx, SubWeight] : enumerate(Info.SubWeights)) - if (SubWeight != 0) - // Don't set the probability of taking the edge from latch to loop header - // to less than 1:1 ratio (meaning Weight should not be lower than - // SubWeight), as this could significantly reduce the loop's hotness, - // which would be incorrect in the case of underestimating the trip count. - Info.Weights[Idx] = - Info.Weights[Idx] > SubWeight - ? std::max(Info.Weights[Idx] - SubWeight, SubWeight) - : SubWeight; -} - -/// Initialize the weights for all exiting blocks. -static void initBranchWeights(DenseMap &WeightInfos, - Loop *L) { - SmallVector ExitingBlocks; - L->getExitingBlocks(ExitingBlocks); - for (BasicBlock *ExitingBlock : ExitingBlocks) { - Instruction *Term = ExitingBlock->getTerminator(); - SmallVector Weights; - if (!extractBranchWeights(*Term, Weights)) - continue; - - // See the comment on updateBranchWeights() for an explanation of what we - // do here. - uint32_t FallThroughWeights = 0; - uint32_t ExitWeights = 0; - for (auto [Succ, Weight] : zip(successors(Term), Weights)) { - if (L->contains(Succ)) - FallThroughWeights += Weight; - else - ExitWeights += Weight; - } - - // Don't try to update weights for degenerate case. - if (FallThroughWeights == 0) - continue; - - SmallVector SubWeights; - for (auto [Succ, Weight] : zip(successors(Term), Weights)) { - if (!L->contains(Succ)) { - // Exit weights stay the same. - SubWeights.push_back(0); - continue; - } - - // Subtract exit weights on each iteration, distributed across all - // fallthrough edges. - double W = (double)Weight / (double)FallThroughWeights; - SubWeights.push_back((uint32_t)(ExitWeights * W)); - } - - WeightInfos.insert({Term, {std::move(Weights), std::move(SubWeights)}}); - } -} - /// Clones the body of the loop L, putting it between \p InsertTop and \p /// InsertBot. /// \param IterNumber The serial number of the iteration currently being @@ -1332,11 +1254,6 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, Instruction *LatchTerm = cast(cast(Latch)->getTerminator()); - // If we have branch weight information, we'll want to update it for the - // newly created branches. - DenseMap Weights; - initBranchWeights(Weights, L); - // Identify what noalias metadata is inside the loop: if it is inside the // loop, the associated metadata must be cloned for each iteration. SmallVector LoopLocalNoAliasDeclScopes; @@ -1382,11 +1299,6 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, assert(DT.verify(DominatorTree::VerificationLevel::Fast)); #endif - for (auto &[Term, Info] : Weights) { - auto *TermCopy = cast(VMap[Term]); - updateBranchWeights(TermCopy, Info); - } - // Remove Loop metadata from the latch branch instruction // because it is not the Loop's latch branch anymore. auto *LatchTermCopy = cast(VMap[LatchTerm]); @@ -1426,15 +1338,38 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, } } - for (const auto &[Term, Info] : Weights) { - setBranchWeights(*Term, Info.Weights, /*IsExpected=*/false); - } - // Update Metadata for count of peeled off iterations. unsigned AlreadyPeeled = 0; if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData)) AlreadyPeeled = *Peeled; - addStringMetadataToLoop(L, PeeledCountMetaData, AlreadyPeeled + PeelCount); + unsigned TotalPeeled = AlreadyPeeled + PeelCount; + addStringMetadataToLoop(L, PeeledCountMetaData, TotalPeeled); + + // Update metadata for the estimated trip count. The original branch weight + // metadata is already correct for both the remaining loop and the peeled loop + // iterations, so do not adjust it. + // + // For example, consider what happens when peeling 2 iterations from a loop + // with an estimated trip count of 10 and inserting them before the remaining + // loop. Each of the peeled iterations and each iteration in the remaining + // loop still has the same probability of exiting the *entire original* loop + // as it did when in the original loop, and thus it should still have the same + // branch weights. The peeled iterations' non-zero probabilities of exiting + // already appropriately reduce the probability of reaching the remaining + // iterations just as they did in the original loop. Trying to also adjust + // the remaining loop's branch weights to reflect its new trip count of 8 will + // erroneously further reduce its block frequencies. However, in case an + // analysis later needs to determine the trip count of the remaining loop + // while examining it in isolation without considering the probability of + // actually reaching it, we store the new trip count as separate metadata. + if (auto EstimatedTripCount = getLoopEstimatedTripCount(L)) { + unsigned EstimatedTripCountNew = *EstimatedTripCount; + if (EstimatedTripCountNew < TotalPeeled) + EstimatedTripCountNew = 0; + else + EstimatedTripCountNew -= TotalPeeled; + setLoopEstimatedTripCount(L, EstimatedTripCountNew); + } if (Loop *ParentLoop = L->getParentLoop()) L = ParentLoop; diff --git a/llvm/lib/Transforms/Utils/ProfileVerify.cpp b/llvm/lib/Transforms/Utils/ProfileVerify.cpp index faacd422c009c..c578b4b839258 100644 --- a/llvm/lib/Transforms/Utils/ProfileVerify.cpp +++ b/llvm/lib/Transforms/Utils/ProfileVerify.cpp @@ -28,6 +28,10 @@ static cl::opt AnnotateSelect("profcheck-annotate-select", cl::init(true), cl::desc("Also inject (if missing) and verify MD_prof for " "`select` instructions")); +static cl::opt + WeightsForTest("profcheck-weights-for-test", cl::init(false), + cl::desc("Generate weights with small values for tests.")); + static cl::opt SelectTrueWeight( "profcheck-default-select-true-weight", cl::init(2U), cl::desc("When annotating `select` instructions, this value will be used " @@ -91,6 +95,10 @@ bool ProfileInjector::inject() { if (F.getEntryCount(/*AllowSynthetic=*/true)->getCount() == 0) return false; bool Changed = false; + // Cycle through the weights list. If we didn't, tests with more than (say) + // one conditional branch would have the same !prof metadata on all of them, + // and numerically that may make for a poor unit test. + uint32_t WeightsForTestOffset = 0; for (auto &BB : F) { if (AnnotateSelect) { for (auto &I : BB) { @@ -103,38 +111,48 @@ bool ProfileInjector::inject() { if (!Term || Term->getMetadata(LLVMContext::MD_prof)) continue; SmallVector Probs; - Probs.reserve(Term->getNumSuccessors()); - for (auto I = 0U, E = Term->getNumSuccessors(); I < E; ++I) - Probs.emplace_back(BPI.getEdgeProbability(&BB, Term->getSuccessor(I))); - assert(llvm::find_if(Probs, - [](const BranchProbability &P) { - return P.isUnknown(); - }) == Probs.end() && - "All branch probabilities should be valid"); - const auto *FirstZeroDenominator = - find_if(Probs, [](const BranchProbability &P) { - return P.getDenominator() == 0; - }); - (void)FirstZeroDenominator; - assert(FirstZeroDenominator == Probs.end()); - const auto *FirstNonZeroNumerator = - find_if(Probs, [](const BranchProbability &P) { return !P.isZero(); }); - assert(FirstNonZeroNumerator != Probs.end()); - DynamicAPInt LCM(Probs[0].getDenominator()); - DynamicAPInt GCD(FirstNonZeroNumerator->getNumerator()); - for (const auto &Prob : drop_begin(Probs)) { - if (!Prob.getNumerator()) - continue; - LCM = llvm::lcm(LCM, DynamicAPInt(Prob.getDenominator())); - GCD = llvm::gcd(GCD, DynamicAPInt(Prob.getNumerator())); - } SmallVector Weights; Weights.reserve(Term->getNumSuccessors()); - for (const auto &Prob : Probs) { - DynamicAPInt W = - (Prob.getNumerator() * LCM / GCD) / Prob.getDenominator(); - Weights.emplace_back(static_cast((int64_t)W)); + if (WeightsForTest) { + static const std::array Primes{3, 5, 7, 11, 13, 17, 19, 23, 29, 31, + 37, 41, 43, 47, 53, 59, 61, 67, 71}; + for (uint32_t I = 0, E = Term->getNumSuccessors(); I < E; ++I) + Weights.emplace_back( + Primes[(WeightsForTestOffset + I) % Primes.size()]); + ++WeightsForTestOffset; + } else { + Probs.reserve(Term->getNumSuccessors()); + for (auto I = 0U, E = Term->getNumSuccessors(); I < E; ++I) + Probs.emplace_back(BPI.getEdgeProbability(&BB, Term->getSuccessor(I))); + + assert(llvm::find_if(Probs, + [](const BranchProbability &P) { + return P.isUnknown(); + }) == Probs.end() && + "All branch probabilities should be valid"); + const auto *FirstZeroDenominator = + find_if(Probs, [](const BranchProbability &P) { + return P.getDenominator() == 0; + }); + (void)FirstZeroDenominator; + assert(FirstZeroDenominator == Probs.end()); + const auto *FirstNonZeroNumerator = find_if( + Probs, [](const BranchProbability &P) { return !P.isZero(); }); + assert(FirstNonZeroNumerator != Probs.end()); + DynamicAPInt LCM(Probs[0].getDenominator()); + DynamicAPInt GCD(FirstNonZeroNumerator->getNumerator()); + for (const auto &Prob : drop_begin(Probs)) { + if (!Prob.getNumerator()) + continue; + LCM = llvm::lcm(LCM, DynamicAPInt(Prob.getDenominator())); + GCD = llvm::gcd(GCD, DynamicAPInt(Prob.getNumerator())); + } + for (const auto &Prob : Probs) { + DynamicAPInt W = + (Prob.getNumerator() * LCM / GCD) / Prob.getDenominator(); + Weights.emplace_back(static_cast((int64_t)W)); + } } setBranchWeights(*Term, Weights, /*IsExpected=*/false); Changed = true; diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index a1f759dd1df83..8bba634521e3e 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -95,7 +95,9 @@ using namespace PatternMatch; #define DEBUG_TYPE "simplifycfg" -cl::opt llvm::RequireAndPreserveDomTree( +namespace llvm { + +cl::opt RequireAndPreserveDomTree( "simplifycfg-require-and-preserve-domtree", cl::Hidden, cl::desc( @@ -205,6 +207,8 @@ static cl::opt MaxJumpThreadingLiveBlocks( extern cl::opt ProfcheckDisableMetadataFixes; +} // end namespace llvm + STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); STATISTIC(NumLinearMaps, "Number of switch instructions turned into linear mapping"); @@ -525,28 +529,33 @@ static bool dominatesMergePoint( static ConstantInt *getConstantInt(Value *V, const DataLayout &DL) { // Normal constant int. ConstantInt *CI = dyn_cast(V); - if (CI || !isa(V) || !V->getType()->isPointerTy() || - DL.isNonIntegralPointerType(V->getType())) + if (CI || !isa(V) || !V->getType()->isPointerTy()) return CI; + // It is not safe to look through inttoptr or ptrtoint when using unstable + // pointer types. + if (DL.hasUnstableRepresentation(V->getType())) + return nullptr; + // This is some kind of pointer constant. Turn it into a pointer-sized // ConstantInt if possible. - IntegerType *PtrTy = cast(DL.getIntPtrType(V->getType())); + IntegerType *IntPtrTy = cast(DL.getIntPtrType(V->getType())); // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*). if (isa(V)) - return ConstantInt::get(PtrTy, 0); + return ConstantInt::get(IntPtrTy, 0); - // IntToPtr const int. + // IntToPtr const int, we can look through this if the semantics of + // inttoptr for this address space are a simple (truncating) bitcast. if (ConstantExpr *CE = dyn_cast(V)) if (CE->getOpcode() == Instruction::IntToPtr) if (ConstantInt *CI = dyn_cast(CE->getOperand(0))) { // The constant is very likely to have the right type already. - if (CI->getType() == PtrTy) + if (CI->getType() == IntPtrTy) return CI; else return cast( - ConstantFoldIntegerCast(CI, PtrTy, /*isSigned=*/false, DL)); + ConstantFoldIntegerCast(CI, IntPtrTy, /*isSigned=*/false, DL)); } return nullptr; } @@ -866,10 +875,12 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(Instruction *TI) { } } - // Unwrap any lossless ptrtoint cast. + // Unwrap any lossless ptrtoint cast (except for unstable pointers). if (CV) { if (PtrToIntInst *PTII = dyn_cast(CV)) { Value *Ptr = PTII->getPointerOperand(); + if (DL.hasUnstableRepresentation(Ptr->getType())) + return CV; if (PTII->getType() == DL.getIntPtrType(Ptr->getType())) CV = Ptr; } @@ -948,33 +959,6 @@ static bool valuesOverlap(std::vector &C1, return false; } -// Set branch weights on SwitchInst. This sets the metadata if there is at -// least one non-zero weight. -static void setBranchWeights(SwitchInst *SI, ArrayRef Weights, - bool IsExpected) { - // Check that there is at least one non-zero weight. Otherwise, pass - // nullptr to setMetadata which will erase the existing metadata. - MDNode *N = nullptr; - if (llvm::any_of(Weights, [](uint32_t W) { return W != 0; })) - N = MDBuilder(SI->getParent()->getContext()) - .createBranchWeights(Weights, IsExpected); - SI->setMetadata(LLVMContext::MD_prof, N); -} - -// Similar to the above, but for branch and select instructions that take -// exactly 2 weights. -static void setBranchWeights(Instruction *I, uint32_t TrueWeight, - uint32_t FalseWeight, bool IsExpected) { - assert(isa(I) || isa(I)); - // Check that there is at least one non-zero weight. Otherwise, pass - // nullptr to setMetadata which will erase the existing metadata. - MDNode *N = nullptr; - if (TrueWeight || FalseWeight) - N = MDBuilder(I->getParent()->getContext()) - .createBranchWeights(TrueWeight, FalseWeight, IsExpected); - I->setMetadata(LLVMContext::MD_prof, N); -} - /// If TI is known to be a terminator instruction and its block is known to /// only have a single predecessor block, check to see if that predecessor is /// also a value comparison with the same value, and if that comparison @@ -1174,16 +1158,6 @@ static void getBranchWeights(Instruction *TI, } } -/// Keep halving the weights until all can fit in uint32_t. -static void fitWeights(MutableArrayRef Weights) { - uint64_t Max = *llvm::max_element(Weights); - if (Max > UINT_MAX) { - unsigned Offset = 32 - llvm::countl_zero(Max); - for (uint64_t &I : Weights) - I >>= Offset; - } -} - static void cloneInstructionsIntoPredecessorBlockAndUpdateSSAUses( BasicBlock *BB, BasicBlock *PredBlock, ValueToValueMapTy &VMap) { Instruction *PTI = PredBlock->getTerminator(); @@ -1427,6 +1401,8 @@ bool SimplifyCFGOpt::performValueComparisonIntoPredecessorFolding( Builder.SetInsertPoint(PTI); // Convert pointer to int before we switch. if (CV->getType()->isPointerTy()) { + assert(!DL.hasUnstableRepresentation(CV->getType()) && + "Should not end up here with unstable pointers"); CV = Builder.CreatePtrToInt(CV, DL.getIntPtrType(CV->getType()), "magicptr"); } @@ -1437,14 +1413,9 @@ bool SimplifyCFGOpt::performValueComparisonIntoPredecessorFolding( for (ValueEqualityComparisonCase &V : PredCases) NewSI->addCase(V.Value, V.Dest); - if (PredHasWeights || SuccHasWeights) { - // Halve the weights if any of them cannot fit in an uint32_t - fitWeights(Weights); - - SmallVector MDWeights(Weights.begin(), Weights.end()); - - setBranchWeights(NewSI, MDWeights, /*IsExpected=*/false); - } + if (PredHasWeights || SuccHasWeights) + setFittedBranchWeights(*NewSI, Weights, /*IsExpected=*/false, + /*ElideAllZero=*/true); eraseTerminatorAndDCECond(PTI); @@ -4044,39 +4015,34 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI, // Try to update branch weights. uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight; - SmallVector MDWeights; + SmallVector MDWeights; if (extractPredSuccWeights(PBI, BI, PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight)) { - SmallVector NewWeights; if (PBI->getSuccessor(0) == BB) { // PBI: br i1 %x, BB, FalseDest // BI: br i1 %y, UniqueSucc, FalseDest // TrueWeight is TrueWeight for PBI * TrueWeight for BI. - NewWeights.push_back(PredTrueWeight * SuccTrueWeight); + MDWeights.push_back(PredTrueWeight * SuccTrueWeight); // FalseWeight is FalseWeight for PBI * TotalWeight for BI + // TrueWeight for PBI * FalseWeight for BI. // We assume that total weights of a BranchInst can fit into 32 bits. // Therefore, we will not have overflow using 64-bit arithmetic. - NewWeights.push_back(PredFalseWeight * - (SuccFalseWeight + SuccTrueWeight) + - PredTrueWeight * SuccFalseWeight); + MDWeights.push_back(PredFalseWeight * (SuccFalseWeight + SuccTrueWeight) + + PredTrueWeight * SuccFalseWeight); } else { // PBI: br i1 %x, TrueDest, BB // BI: br i1 %y, TrueDest, UniqueSucc // TrueWeight is TrueWeight for PBI * TotalWeight for BI + // FalseWeight for PBI * TrueWeight for BI. - NewWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) + - PredFalseWeight * SuccTrueWeight); + MDWeights.push_back(PredTrueWeight * (SuccFalseWeight + SuccTrueWeight) + + PredFalseWeight * SuccTrueWeight); // FalseWeight is FalseWeight for PBI * FalseWeight for BI. - NewWeights.push_back(PredFalseWeight * SuccFalseWeight); + MDWeights.push_back(PredFalseWeight * SuccFalseWeight); } - // Halve the weights if any of them cannot fit in an uint32_t - fitWeights(NewWeights); - - append_range(MDWeights, NewWeights); - setBranchWeights(PBI, MDWeights[0], MDWeights[1], /*IsExpected=*/false); + setFittedBranchWeights(*PBI, MDWeights, /*IsExpected=*/false, + /*ElideAllZero=*/true); // TODO: If BB is reachable from all paths through PredBlock, then we // could replace PBI's branch probabilities with BI's. @@ -4116,8 +4082,8 @@ static bool performBranchToCommonDestFolding(BranchInst *BI, BranchInst *PBI, if (auto *SI = dyn_cast(PBI->getCondition())) if (!MDWeights.empty()) { assert(isSelectInRoleOfConjunctionOrDisjunction(SI)); - setBranchWeights(SI, MDWeights[0], MDWeights[1], - /*IsExpected=*/false); + setFittedBranchWeights(*SI, {MDWeights[0], MDWeights[1]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } ++NumFoldBranchToCommonDest; @@ -4469,9 +4435,9 @@ static bool mergeConditionalStoreToAddress( if (InvertQCond) std::swap(QWeights[0], QWeights[1]); auto CombinedWeights = getDisjunctionWeights(PWeights, QWeights); - setBranchWeights(PostBB->getTerminator(), CombinedWeights[0], - CombinedWeights[1], - /*IsExpected=*/false); + setFittedBranchWeights(*PostBB->getTerminator(), + {CombinedWeights[0], CombinedWeights[1]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } QB.SetInsertPoint(T); @@ -4827,10 +4793,9 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, uint64_t NewWeights[2] = {PredCommon * (SuccCommon + SuccOther) + PredOther * SuccCommon, PredOther * SuccOther}; - // Halve the weights if any of them cannot fit in an uint32_t - fitWeights(NewWeights); - setBranchWeights(PBI, NewWeights[0], NewWeights[1], /*IsExpected=*/false); + setFittedBranchWeights(*PBI, NewWeights, /*IsExpected=*/false, + /*ElideAllZero=*/true); // Cond may be a select instruction with the first operand set to "true", or // the second to "false" (see how createLogicalOp works for `and` and `or`) if (!ProfcheckDisableMetadataFixes) @@ -4840,8 +4805,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, assert(dyn_cast(SI)->getCondition() == PBICond); // The corresponding probabilities are what was referred to above as // PredCommon and PredOther. - setBranchWeights(SI, PredCommon, PredOther, - /*IsExpected=*/false); + setFittedBranchWeights(*SI, {PredCommon, PredOther}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } @@ -4867,8 +4832,8 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI, if (HasWeights) { uint64_t TrueWeight = PBIOp ? PredFalseWeight : PredTrueWeight; uint64_t FalseWeight = PBIOp ? PredTrueWeight : PredFalseWeight; - setBranchWeights(NV, TrueWeight, FalseWeight, - /*IsExpected=*/false); + setFittedBranchWeights(*NV, {TrueWeight, FalseWeight}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } } @@ -4931,7 +4896,8 @@ bool SimplifyCFGOpt::simplifyTerminatorOnSelect(Instruction *OldTerm, // Create a conditional branch sharing the condition of the select. BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB); if (TrueWeight != FalseWeight) - setBranchWeights(NewBI, TrueWeight, FalseWeight, /*IsExpected=*/false); + setBranchWeights(*NewBI, {TrueWeight, FalseWeight}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) { // Neither of the selected blocks were successors, so this @@ -5246,6 +5212,8 @@ bool SimplifyCFGOpt::simplifyBranchOnICmpChain(BranchInst *BI, Builder.SetInsertPoint(BI); // Convert pointer to int before we switch. if (CompVal->getType()->isPointerTy()) { + assert(!DL.hasUnstableRepresentation(CompVal->getType()) && + "Should not end up here with unstable pointers"); CompVal = Builder.CreatePtrToInt( CompVal, DL.getIntPtrType(CompVal->getType()), "magicptr"); } @@ -5878,7 +5846,8 @@ bool SimplifyCFGOpt::turnSwitchRangeIntoICmp(SwitchInst *SI, TrueWeight /= 2; FalseWeight /= 2; } - setBranchWeights(NewBI, TrueWeight, FalseWeight, /*IsExpected=*/false); + setFittedBranchWeights(*NewBI, {TrueWeight, FalseWeight}, + /*IsExpected=*/false, /*ElideAllZero=*/true); } } @@ -6318,9 +6287,12 @@ static bool initializeUniqueCases(SwitchInst *SI, PHINode *&PHI, // Helper function that checks if it is possible to transform a switch with only // two cases (or two cases + default) that produces a result into a select. // TODO: Handle switches with more than 2 cases that map to the same result. +// The branch weights correspond to the provided Condition (i.e. if Condition is +// modified from the original SwitchInst, the caller must adjust the weights) static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Constant *DefaultResult, Value *Condition, - IRBuilder<> &Builder, const DataLayout &DL) { + IRBuilder<> &Builder, const DataLayout &DL, + ArrayRef BranchWeights) { // If we are selecting between only two cases transform into a simple // select or a two-way select if default is possible. // Example: @@ -6329,6 +6301,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, // case 20: return 2; ----> %2 = icmp eq i32 %a, 20 // default: return 4; %3 = select i1 %2, i32 2, i32 %1 // } + + const bool HasBranchWeights = + !BranchWeights.empty() && !ProfcheckDisableMetadataFixes; + if (ResultVector.size() == 2 && ResultVector[0].second.size() == 1 && ResultVector[1].second.size() == 1) { ConstantInt *FirstCase = ResultVector[0].second[0]; @@ -6339,11 +6315,36 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Builder.CreateICmpEQ(Condition, SecondCase, "switch.selectcmp"); SelectValue = Builder.CreateSelect(ValueCompare, ResultVector[1].first, DefaultResult, "switch.select"); + if (auto *SI = dyn_cast(SelectValue); + SI && HasBranchWeights) { + // We start with 3 probabilities, where the numerator is the + // corresponding BranchWeights[i], and the denominator is the sum over + // BranchWeights. We want the probability and negative probability of + // Condition == SecondCase. + assert(BranchWeights.size() == 3); + setBranchWeights( + *SI, {BranchWeights[2], BranchWeights[0] + BranchWeights[1]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); + } } Value *ValueCompare = Builder.CreateICmpEQ(Condition, FirstCase, "switch.selectcmp"); - return Builder.CreateSelect(ValueCompare, ResultVector[0].first, - SelectValue, "switch.select"); + Value *Ret = Builder.CreateSelect(ValueCompare, ResultVector[0].first, + SelectValue, "switch.select"); + if (auto *SI = dyn_cast(Ret); SI && HasBranchWeights) { + // We may have had a DefaultResult. Base the position of the first and + // second's branch weights accordingly. Also the proability that Condition + // != FirstCase needs to take that into account. + assert(BranchWeights.size() >= 2); + size_t FirstCasePos = (Condition != nullptr); + size_t SecondCasePos = FirstCasePos + 1; + uint32_t DefaultCase = (Condition != nullptr) ? BranchWeights[0] : 0; + setBranchWeights(*SI, + {BranchWeights[FirstCasePos], + DefaultCase + BranchWeights[SecondCasePos]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); + } + return Ret; } // Handle the degenerate case where two cases have the same result value. @@ -6379,8 +6380,18 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Value *And = Builder.CreateAnd(Condition, AndMask); Value *Cmp = Builder.CreateICmpEQ( And, Constant::getIntegerValue(And->getType(), AndMask)); - return Builder.CreateSelect(Cmp, ResultVector[0].first, - DefaultResult); + Value *Ret = + Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + if (auto *SI = dyn_cast(Ret); SI && HasBranchWeights) { + // We know there's a Default case. We base the resulting branch + // weights off its probability. + assert(BranchWeights.size() >= 2); + setBranchWeights( + *SI, + {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); + } + return Ret; } } @@ -6397,7 +6408,16 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Value *And = Builder.CreateAnd(Condition, ~BitMask, "switch.and"); Value *Cmp = Builder.CreateICmpEQ( And, Constant::getNullValue(And->getType()), "switch.selectcmp"); - return Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + Value *Ret = + Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + if (auto *SI = dyn_cast(Ret); SI && HasBranchWeights) { + assert(BranchWeights.size() >= 2); + setBranchWeights( + *SI, + {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); + } + return Ret; } } @@ -6408,7 +6428,15 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Value *Cmp2 = Builder.CreateICmpEQ(Condition, CaseValues[1], "switch.selectcmp.case2"); Value *Cmp = Builder.CreateOr(Cmp1, Cmp2, "switch.selectcmp"); - return Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + Value *Ret = + Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + if (auto *SI = dyn_cast(Ret); SI && HasBranchWeights) { + assert(BranchWeights.size() >= 2); + setBranchWeights( + *SI, {accumulate(drop_begin(BranchWeights), 0U), BranchWeights[0]}, + /*IsExpected=*/false, /*ElideAllZero=*/true); + } + return Ret; } } @@ -6469,8 +6497,18 @@ static bool trySwitchToSelect(SwitchInst *SI, IRBuilder<> &Builder, assert(PHI != nullptr && "PHI for value select not found"); Builder.SetInsertPoint(SI); - Value *SelectValue = - foldSwitchToSelect(UniqueResults, DefaultResult, Cond, Builder, DL); + SmallVector BranchWeights; + if (!ProfcheckDisableMetadataFixes) { + [[maybe_unused]] auto HasWeights = + extractBranchWeights(getBranchWeightMDNode(*SI), BranchWeights); + assert(!HasWeights == (BranchWeights.empty())); + } + assert(BranchWeights.empty() || + (BranchWeights.size() >= + UniqueResults.size() + (DefaultResult != nullptr))); + + Value *SelectValue = foldSwitchToSelect(UniqueResults, DefaultResult, Cond, + Builder, DL, BranchWeights); if (!SelectValue) return false; @@ -8078,8 +8116,8 @@ static bool mergeNestedCondBranch(BranchInst *BI, DomTreeUpdater *DTU) { if (HasWeight) { uint64_t Weights[2] = {BBTWeight * BB1FWeight + BBFWeight * BB2TWeight, BBTWeight * BB1TWeight + BBFWeight * BB2FWeight}; - fitWeights(Weights); - setBranchWeights(BI, Weights[0], Weights[1], /*IsExpected=*/false); + setFittedBranchWeights(*BI, Weights, /*IsExpected=*/false, + /*ElideAllZero=*/true); } return true; } diff --git a/llvm/lib/Transforms/Vectorize/CMakeLists.txt b/llvm/lib/Transforms/Vectorize/CMakeLists.txt index 96670fe3ea195..9f4a242214471 100644 --- a/llvm/lib/Transforms/Vectorize/CMakeLists.txt +++ b/llvm/lib/Transforms/Vectorize/CMakeLists.txt @@ -1,5 +1,4 @@ add_llvm_component_library(LLVMVectorize - EVLIndVarSimplify.cpp LoadStoreVectorizer.cpp LoopIdiomVectorize.cpp LoopVectorizationLegality.cpp diff --git a/llvm/lib/Transforms/Vectorize/EVLIndVarSimplify.cpp b/llvm/lib/Transforms/Vectorize/EVLIndVarSimplify.cpp deleted file mode 100644 index 5dd689799b828..0000000000000 --- a/llvm/lib/Transforms/Vectorize/EVLIndVarSimplify.cpp +++ /dev/null @@ -1,300 +0,0 @@ -//===---- EVLIndVarSimplify.cpp - Optimize vectorized loops w/ EVL IV------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This pass optimizes a vectorized loop with canonical IV to using EVL-based -// IV if it was tail-folded by predicated EVL. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Transforms/Vectorize/EVLIndVarSimplify.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/Analysis/IVDescriptors.h" -#include "llvm/Analysis/LoopInfo.h" -#include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/OptimizationRemarkEmitter.h" -#include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/Analysis/ScalarEvolutionExpressions.h" -#include "llvm/Analysis/ValueTracking.h" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/PatternMatch.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Scalar/LoopPassManager.h" -#include "llvm/Transforms/Utils/Local.h" - -#define DEBUG_TYPE "evl-iv-simplify" - -using namespace llvm; - -STATISTIC(NumEliminatedCanonicalIV, "Number of canonical IVs we eliminated"); - -static cl::opt EnableEVLIndVarSimplify( - "enable-evl-indvar-simplify", - cl::desc("Enable EVL-based induction variable simplify Pass"), cl::Hidden, - cl::init(true)); - -namespace { -struct EVLIndVarSimplifyImpl { - ScalarEvolution &SE; - OptimizationRemarkEmitter *ORE = nullptr; - - EVLIndVarSimplifyImpl(LoopStandardAnalysisResults &LAR, - OptimizationRemarkEmitter *ORE) - : SE(LAR.SE), ORE(ORE) {} - - /// Returns true if modify the loop. - bool run(Loop &L); -}; -} // anonymous namespace - -/// Returns the constant part of vectorization factor from the induction -/// variable's step value SCEV expression. -static uint32_t getVFFromIndVar(const SCEV *Step, const Function &F) { - if (!Step) - return 0U; - - // Looking for loops with IV step value in the form of `( x - // vscale)`. - if (const auto *Mul = dyn_cast(Step)) { - if (Mul->getNumOperands() == 2) { - const SCEV *LHS = Mul->getOperand(0); - const SCEV *RHS = Mul->getOperand(1); - if (const auto *Const = dyn_cast(LHS); - Const && isa(RHS)) { - uint64_t V = Const->getAPInt().getLimitedValue(); - if (llvm::isUInt<32>(V)) - return V; - } - } - } - - // If not, see if the vscale_range of the parent function is a fixed value, - // which makes the step value to be replaced by a constant. - if (F.hasFnAttribute(Attribute::VScaleRange)) - if (const auto *ConstStep = dyn_cast(Step)) { - APInt V = ConstStep->getAPInt().abs(); - ConstantRange CR = llvm::getVScaleRange(&F, 64); - if (const APInt *Fixed = CR.getSingleElement()) { - V = V.zextOrTrunc(Fixed->getBitWidth()); - uint64_t VF = V.udiv(*Fixed).getLimitedValue(); - if (VF && llvm::isUInt<32>(VF) && - // Make sure step is divisible by vscale. - V.urem(*Fixed).isZero()) - return VF; - } - } - - return 0U; -} - -bool EVLIndVarSimplifyImpl::run(Loop &L) { - if (!EnableEVLIndVarSimplify) - return false; - - if (!getBooleanLoopAttribute(&L, "llvm.loop.isvectorized")) - return false; - const MDOperand *EVLMD = - findStringMetadataForLoop(&L, "llvm.loop.isvectorized.tailfoldingstyle") - .value_or(nullptr); - if (!EVLMD || !EVLMD->equalsStr("evl")) - return false; - - BasicBlock *LatchBlock = L.getLoopLatch(); - ICmpInst *OrigLatchCmp = L.getLatchCmpInst(); - if (!LatchBlock || !OrigLatchCmp) - return false; - - InductionDescriptor IVD; - PHINode *IndVar = L.getInductionVariable(SE); - if (!IndVar || !L.getInductionDescriptor(SE, IVD)) { - const char *Reason = (IndVar ? "induction descriptor is not available" - : "cannot recognize induction variable"); - LLVM_DEBUG(dbgs() << "Cannot retrieve IV from loop " << L.getName() - << " because" << Reason << "\n"); - if (ORE) { - ORE->emit([&]() { - return OptimizationRemarkMissed(DEBUG_TYPE, "UnrecognizedIndVar", - L.getStartLoc(), L.getHeader()) - << "Cannot retrieve IV because " << ore::NV("Reason", Reason); - }); - } - return false; - } - - BasicBlock *InitBlock, *BackEdgeBlock; - if (!L.getIncomingAndBackEdge(InitBlock, BackEdgeBlock)) { - LLVM_DEBUG(dbgs() << "Expect unique incoming and backedge in " - << L.getName() << "\n"); - if (ORE) { - ORE->emit([&]() { - return OptimizationRemarkMissed(DEBUG_TYPE, "UnrecognizedLoopStructure", - L.getStartLoc(), L.getHeader()) - << "Does not have a unique incoming and backedge"; - }); - } - return false; - } - - // Retrieve the loop bounds. - std::optional Bounds = L.getBounds(SE); - if (!Bounds) { - LLVM_DEBUG(dbgs() << "Could not obtain the bounds for loop " << L.getName() - << "\n"); - if (ORE) { - ORE->emit([&]() { - return OptimizationRemarkMissed(DEBUG_TYPE, "UnrecognizedLoopStructure", - L.getStartLoc(), L.getHeader()) - << "Could not obtain the loop bounds"; - }); - } - return false; - } - Value *CanonicalIVInit = &Bounds->getInitialIVValue(); - Value *CanonicalIVFinal = &Bounds->getFinalIVValue(); - - const SCEV *StepV = IVD.getStep(); - uint32_t VF = getVFFromIndVar(StepV, *L.getHeader()->getParent()); - if (!VF) { - LLVM_DEBUG(dbgs() << "Could not infer VF from IndVar step '" << *StepV - << "'\n"); - if (ORE) { - ORE->emit([&]() { - return OptimizationRemarkMissed(DEBUG_TYPE, "UnrecognizedIndVar", - L.getStartLoc(), L.getHeader()) - << "Could not infer VF from IndVar step " - << ore::NV("Step", StepV); - }); - } - return false; - } - LLVM_DEBUG(dbgs() << "Using VF=" << VF << " for loop " << L.getName() - << "\n"); - - // Try to find the EVL-based induction variable. - using namespace PatternMatch; - BasicBlock *BB = IndVar->getParent(); - - Value *EVLIndVar = nullptr; - Value *RemTC = nullptr; - Value *TC = nullptr; - auto IntrinsicMatch = m_Intrinsic( - m_Value(RemTC), m_SpecificInt(VF), - /*Scalable=*/m_SpecificInt(1)); - for (PHINode &PN : BB->phis()) { - if (&PN == IndVar) - continue; - - // Check 1: it has to contain both incoming (init) & backedge blocks - // from IndVar. - if (PN.getBasicBlockIndex(InitBlock) < 0 || - PN.getBasicBlockIndex(BackEdgeBlock) < 0) - continue; - // Check 2: EVL index is always increasing, thus its inital value has to be - // equal to either the initial IV value (when the canonical IV is also - // increasing) or the last IV value (when canonical IV is decreasing). - Value *Init = PN.getIncomingValueForBlock(InitBlock); - using Direction = Loop::LoopBounds::Direction; - switch (Bounds->getDirection()) { - case Direction::Increasing: - if (Init != CanonicalIVInit) - continue; - break; - case Direction::Decreasing: - if (Init != CanonicalIVFinal) - continue; - break; - case Direction::Unknown: - // To be more permissive and see if either the initial or final IV value - // matches PN's init value. - if (Init != CanonicalIVInit && Init != CanonicalIVFinal) - continue; - break; - } - Value *RecValue = PN.getIncomingValueForBlock(BackEdgeBlock); - assert(RecValue && "expect recurrent IndVar value"); - - LLVM_DEBUG(dbgs() << "Found candidate PN of EVL-based IndVar: " << PN - << "\n"); - - // Check 3: Pattern match to find the EVL-based index and total trip count - // (TC). - if (match(RecValue, - m_c_Add(m_ZExtOrSelf(IntrinsicMatch), m_Specific(&PN))) && - match(RemTC, m_Sub(m_Value(TC), m_Specific(&PN)))) { - EVLIndVar = RecValue; - break; - } - } - - if (!EVLIndVar || !TC) - return false; - - LLVM_DEBUG(dbgs() << "Using " << *EVLIndVar << " for EVL-based IndVar\n"); - if (ORE) { - ORE->emit([&]() { - DebugLoc DL; - BasicBlock *Region = nullptr; - if (auto *I = dyn_cast(EVLIndVar)) { - DL = I->getDebugLoc(); - Region = I->getParent(); - } else { - DL = L.getStartLoc(); - Region = L.getHeader(); - } - return OptimizationRemark(DEBUG_TYPE, "UseEVLIndVar", DL, Region) - << "Using " << ore::NV("EVLIndVar", EVLIndVar) - << " for EVL-based IndVar"; - }); - } - - // Create an EVL-based comparison and replace the branch to use it as - // predicate. - - // Loop::getLatchCmpInst check at the beginning of this function has ensured - // that latch block ends in a conditional branch. - auto *LatchBranch = cast(LatchBlock->getTerminator()); - assert(LatchBranch->isConditional() && - "expect the loop latch to be ended with a conditional branch"); - ICmpInst::Predicate Pred; - if (LatchBranch->getSuccessor(0) == L.getHeader()) - Pred = ICmpInst::ICMP_NE; - else - Pred = ICmpInst::ICMP_EQ; - - IRBuilder<> Builder(OrigLatchCmp); - auto *NewLatchCmp = Builder.CreateICmp(Pred, EVLIndVar, TC); - OrigLatchCmp->replaceAllUsesWith(NewLatchCmp); - - // llvm::RecursivelyDeleteDeadPHINode only deletes cycles whose values are - // not used outside the cycles. However, in this case the now-RAUW-ed - // OrigLatchCmp will be considered a use outside the cycle while in reality - // it's practically dead. Thus we need to remove it before calling - // RecursivelyDeleteDeadPHINode. - (void)RecursivelyDeleteTriviallyDeadInstructions(OrigLatchCmp); - if (llvm::RecursivelyDeleteDeadPHINode(IndVar)) - LLVM_DEBUG(dbgs() << "Removed original IndVar\n"); - - ++NumEliminatedCanonicalIV; - - return true; -} - -PreservedAnalyses EVLIndVarSimplifyPass::run(Loop &L, LoopAnalysisManager &LAM, - LoopStandardAnalysisResults &AR, - LPMUpdater &U) { - Function &F = *L.getHeader()->getParent(); - auto &FAMProxy = LAM.getResult(L, AR); - OptimizationRemarkEmitter *ORE = - FAMProxy.getCachedResult(F); - - if (EVLIndVarSimplifyImpl(AR, ORE).run(L)) - return PreservedAnalyses::allInSet(); - return PreservedAnalyses::all(); -} diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index ff35db14f7094..7d376c370bb1c 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -293,9 +293,8 @@ void LoopVectorizeHints::getHintsFromMetadata() { } void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) { - if (!Name.starts_with(Prefix())) + if (!Name.consume_front(Prefix())) return; - Name = Name.substr(Prefix().size(), StringRef::npos); const ConstantInt *C = mdconst::dyn_extract(Arg); if (!C) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index ca092dcfcb492..77506878e2029 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -692,11 +692,6 @@ class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { // vectorization of *epilogue* loops in the process of vectorizing loops and // their epilogues. class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { - /// The additional bypass block which conditionally skips over the epilogue - /// loop after executing the main loop. Needed to resume inductions and - /// reductions during epilogue vectorization. - BasicBlock *AdditionalBypassBlock = nullptr; - public: EpilogueVectorizerEpilogueLoop( Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, @@ -706,28 +701,12 @@ class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { GeneratedRTChecks &Checks, VPlan &Plan) : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TTI, AC, EPI, CM, BFI, PSI, Checks, Plan, EPI.EpilogueVF, - EPI.EpilogueVF, EPI.EpilogueUF) { - TripCount = EPI.TripCount; - } + EPI.EpilogueVF, EPI.EpilogueUF) {} /// Implements the interface for creating a vectorized skeleton using the /// *epilogue loop* strategy (i.e., the second pass of VPlan execution). BasicBlock *createVectorizedLoopSkeleton() final; - /// Return the additional bypass block which targets the scalar loop by - /// skipping the epilogue loop after completing the main loop. - BasicBlock *getAdditionalBypassBlock() const { - assert(AdditionalBypassBlock && - "Trying to access AdditionalBypassBlock but it has not been set"); - return AdditionalBypassBlock; - } - protected: - /// Emits an iteration count bypass check after the main vector loop has - /// finished to see if there are any iterations left to execute by either - /// the vector epilogue or the scalar epilogue. - BasicBlock *emitMinimumVectorEpilogueIterCountCheck(BasicBlock *VectorPH, - BasicBlock *Bypass, - BasicBlock *Insert); void printDebugTracesAtStart() override; void printDebugTracesAtEnd() override; }; @@ -1783,9 +1762,10 @@ class GeneratedRTChecks { GeneratedRTChecks(PredicatedScalarEvolution &PSE, DominatorTree *DT, LoopInfo *LI, TargetTransformInfo *TTI, const DataLayout &DL, TTI::TargetCostKind CostKind) - : DT(DT), LI(LI), TTI(TTI), SCEVExp(*PSE.getSE(), DL, "scev.check"), - MemCheckExp(*PSE.getSE(), DL, "scev.check"), PSE(PSE), - CostKind(CostKind) {} + : DT(DT), LI(LI), TTI(TTI), + SCEVExp(*PSE.getSE(), DL, "scev.check", /*PreserveLCSSA=*/false), + MemCheckExp(*PSE.getSE(), DL, "scev.check", /*PreserveLCSSA=*/false), + PSE(PSE), CostKind(CostKind) {} /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can /// accurately estimate the cost of the runtime checks. The blocks are @@ -2459,8 +2439,9 @@ struct CSEDenseMapInfo { } // end anonymous namespace -///Perform cse of induction variable instructions. -static void cse(BasicBlock *BB) { +/// FIXME: This legacy common-subexpression-elimination routine is scheduled for +/// removal, in favor of the VPlan-based one. +static void legacyCSE(BasicBlock *BB) { // Perform simple cse. SmallDenseMap CSEMap; for (Instruction &In : llvm::make_early_inc_range(*BB)) { @@ -2564,7 +2545,7 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB]; // Remove redundant induction instructions. - cse(HeaderBB); + legacyCSE(HeaderBB); } void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { @@ -2907,15 +2888,12 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I, InstructionCost SafeDivisorCost = 0; auto *VecTy = toVectorTy(I->getType(), VF); - auto *DivisorI = dyn_cast(I->getOperand(1)); - if (DivisorI && !Legal->isInvariant(DivisorI)) { - // The cost of the select guard to ensure all lanes are well defined - // after we speculate above any internal control flow. - SafeDivisorCost += - TTI.getCmpSelInstrCost(Instruction::Select, VecTy, - toVectorTy(Type::getInt1Ty(I->getContext()), VF), - CmpInst::BAD_ICMP_PREDICATE, CostKind); - } + // The cost of the select guard to ensure all lanes are well defined + // after we speculate above any internal control flow. + SafeDivisorCost += + TTI.getCmpSelInstrCost(Instruction::Select, VecTy, + toVectorTy(Type::getInt1Ty(I->getContext()), VF), + CmpInst::BAD_ICMP_PREDICATE, CostKind); SmallVector Operands(I->operand_values()); SafeDivisorCost += TTI.getArithmeticInstrCost( @@ -3925,7 +3903,8 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks( if (VF.isScalar()) continue; - VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, + *CM.PSE.getSE()); precomputeCosts(*Plan, VF, CostCtx); auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly(Iter)) { @@ -4182,7 +4161,8 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() { // Add on other costs that are modelled in VPlan, but not in the legacy // cost model. - VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind, + *CM.PSE.getSE()); VPRegionBlock *VectorRegion = P->getVectorLoopRegion(); assert(VectorRegion && "Expected to have a vector region!"); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly( @@ -5721,6 +5701,20 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { Worklist.push_back(InstOp); } + auto UpdateMemOpUserCost = [this, VF](LoadInst *LI) { + // If there are direct memory op users of the newly scalarized load, + // their cost may have changed because there's no scalarization + // overhead for the operand. Update it. + for (User *U : LI->users()) { + if (!isa(U)) + continue; + if (getWideningDecision(cast(U), VF) != CM_Scalarize) + continue; + setWideningDecision( + cast(U), VF, CM_Scalarize, + getMemInstScalarizationCost(cast(U), VF)); + } + }; for (auto *I : AddrDefs) { if (isa(I)) { // Setting the desired widening decision should ideally be handled in @@ -5730,21 +5724,24 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { InstWidening Decision = getWideningDecision(I, VF); if (Decision == CM_Widen || Decision == CM_Widen_Reverse || (!isPredicatedInst(I) && !Legal->isUniformMemOp(*I, VF) && - Decision == CM_Scalarize)) + Decision == CM_Scalarize)) { // Scalarize a widened load of address or update the cost of a scalar // load of an address. setWideningDecision( I, VF, CM_Scalarize, (VF.getKnownMinValue() * getMemoryInstructionCost(I, ElementCount::getFixed(1)))); - else if (const auto *Group = getInterleavedAccessGroup(I)) { + UpdateMemOpUserCost(cast(I)); + } else if (const auto *Group = getInterleavedAccessGroup(I)) { // Scalarize an interleave group of address loads. for (unsigned I = 0; I < Group->getFactor(); ++I) { - if (Instruction *Member = Group->getMember(I)) + if (Instruction *Member = Group->getMember(I)) { setWideningDecision( Member, VF, CM_Scalarize, (VF.getKnownMinValue() * getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); + UpdateMemOpUserCost(cast(Member)); + } } } } else { @@ -6384,19 +6381,8 @@ void LoopVectorizationCostModel::collectValuesToIgnore() { LoopBlocksDFS DFS(TheLoop); DFS.perform(LI); - MapVector> DeadInvariantStoreOps; for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO()))) for (Instruction &I : reverse(*BB)) { - // Find all stores to invariant variables. Since they are going to sink - // outside the loop we do not need calculate cost for them. - StoreInst *SI; - if ((SI = dyn_cast(&I)) && - Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) { - ValuesToIgnore.insert(&I); - DeadInvariantStoreOps[SI->getPointerOperand()].push_back( - SI->getValueOperand()); - } - if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I)) continue; @@ -6443,9 +6429,6 @@ void LoopVectorizationCostModel::collectValuesToIgnore() { append_range(DeadInterleavePointerOps, Op->operands()); } - for (const auto &[_, Ops] : DeadInvariantStoreOps) - llvm::append_range(DeadOps, drop_end(Ops)); - // Mark ops that would be trivially dead and are only used by ignored // instructions as free. BasicBlock *Header = TheLoop->getHeader(); @@ -6871,7 +6854,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF) const { - VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE()); InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx); // Now compute and add the VPlan-based cost. @@ -6908,6 +6891,28 @@ static bool planContainsAdditionalSimplifications(VPlan &Plan, return nullptr; }; + // Check if a select for a safe divisor was hoisted to the pre-header. If so, + // the select doesn't need to be considered for the vector loop cost; go with + // the more accurate VPlan-based cost model. + for (VPRecipeBase &R : *Plan.getVectorPreheader()) { + auto *VPI = dyn_cast(&R); + if (!VPI || VPI->getOpcode() != Instruction::Select || + VPI->getNumUsers() != 1) + continue; + + if (auto *WR = dyn_cast(*VPI->user_begin())) { + switch (WR->getOpcode()) { + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::URem: + case Instruction::SRem: + return true; + default: + break; + } + } + } + DenseSet SeenInstrs; auto Iter = vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly(Iter)) { @@ -7082,7 +7087,8 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() { // simplifications not accounted for in the legacy cost model. If that's the // case, don't trigger the assertion, as the extra simplifications may cause a // different VF to be picked by the VPlan-based cost model. - VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind, + *CM.PSE.getSE()); precomputeCosts(BestPlan, BestFactor.Width, CostCtx); // Verify that the VPlan-based and legacy cost models agree, except for VPlans // with early exits and plans with additional VPlan simplifications. The @@ -7223,7 +7229,6 @@ DenseMap LoopVectorizationPlanner::executePlan( VPlanTransforms::narrowInterleaveGroups( BestVPlan, BestVF, TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)); - VPlanTransforms::cse(BestVPlan); VPlanTransforms::removeDeadRecipes(BestVPlan); VPlanTransforms::convertToConcreteRecipes(BestVPlan); @@ -7237,6 +7242,7 @@ DenseMap LoopVectorizationPlanner::executePlan( BestVPlan, VectorPH, CM.foldTailByMasking(), CM.requiresScalarEpilogue(BestVF.isVector())); VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF); + VPlanTransforms::cse(BestVPlan); VPlanTransforms::simplifyRecipes(BestVPlan); // 0. Generate SCEV-dependent code in the entry, including TripCount, before @@ -7419,124 +7425,28 @@ BasicBlock *EpilogueVectorizerMainLoop::emitIterationCountCheck( // EpilogueVectorizerEpilogueLoop //===--------------------------------------------------------------------===// -/// This function is partially responsible for generating the control flow -/// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. +/// This function creates a new scalar preheader, using the previous one as +/// entry block to the epilogue VPlan. The minimum iteration check is being +/// represented in VPlan. BasicBlock *EpilogueVectorizerEpilogueLoop::createVectorizedLoopSkeleton() { - BasicBlock *ScalarPH = createScalarPreheader("vec.epilog."); - BasicBlock *VectorPH = ScalarPH->getSinglePredecessor(); - // Now, compare the remaining count and if there aren't enough iterations to - // execute the vectorized epilogue skip to the scalar part. - VectorPH->setName("vec.epilog.ph"); - BasicBlock *VecEpilogueIterationCountCheck = - SplitBlock(VectorPH, VectorPH->begin(), DT, LI, nullptr, - "vec.epilog.iter.check", true); - VectorPHVPBB = replaceVPBBWithIRVPBB(VectorPHVPBB, VectorPH); - - emitMinimumVectorEpilogueIterCountCheck(VectorPH, ScalarPH, - VecEpilogueIterationCountCheck); - AdditionalBypassBlock = VecEpilogueIterationCountCheck; - - // Adjust the control flow taking the state info from the main loop - // vectorization into account. - assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && - "expected this to be saved from the previous pass."); - EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( - VecEpilogueIterationCountCheck, VectorPH); - - EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( - VecEpilogueIterationCountCheck, ScalarPH); - - // Adjust the terminators of runtime check blocks and phis using them. - BasicBlock *SCEVCheckBlock = RTChecks.getSCEVChecks().second; - BasicBlock *MemCheckBlock = RTChecks.getMemRuntimeChecks().second; - if (SCEVCheckBlock) - SCEVCheckBlock->getTerminator()->replaceUsesOfWith( - VecEpilogueIterationCountCheck, ScalarPH); - if (MemCheckBlock) - MemCheckBlock->getTerminator()->replaceUsesOfWith( - VecEpilogueIterationCountCheck, ScalarPH); - - DT->changeImmediateDominator(ScalarPH, EPI.EpilogueIterationCountCheck); - - // The vec.epilog.iter.check block may contain Phi nodes from inductions or - // reductions which merge control-flow from the latch block and the middle - // block. Update the incoming values here and move the Phi into the preheader. - SmallVector PhisInBlock( - llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis())); - - for (PHINode *Phi : PhisInBlock) { - Phi->moveBefore(VectorPH->getFirstNonPHIIt()); - Phi->replaceIncomingBlockWith( - VecEpilogueIterationCountCheck->getSinglePredecessor(), - VecEpilogueIterationCountCheck); - - // If the phi doesn't have an incoming value from the - // EpilogueIterationCountCheck, we are done. Otherwise remove the incoming - // value and also those from other check blocks. This is needed for - // reduction phis only. - if (none_of(Phi->blocks(), [&](BasicBlock *IncB) { - return EPI.EpilogueIterationCountCheck == IncB; - })) + BasicBlock *NewScalarPH = createScalarPreheader("vec.epilog."); + BasicBlock *OriginalScalarPH = NewScalarPH->getSinglePredecessor(); + OriginalScalarPH->setName("vec.epilog.iter.check"); + VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(OriginalScalarPH); + VPBasicBlock *OldEntry = Plan.getEntry(); + for (auto &R : make_early_inc_range(*OldEntry)) { + // Skip moving VPIRInstructions (including VPIRPhis), which are unmovable by + // defining. + if (isa(&R)) continue; - Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); - if (SCEVCheckBlock) - Phi->removeIncomingValue(SCEVCheckBlock); - if (MemCheckBlock) - Phi->removeIncomingValue(MemCheckBlock); + R.moveBefore(*NewEntry, NewEntry->end()); } - return VectorPH; -} - -BasicBlock * -EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( - BasicBlock *VectorPH, BasicBlock *Bypass, BasicBlock *Insert) { - - assert(EPI.TripCount && - "Expected trip count to have been saved in the first pass."); - Value *TC = EPI.TripCount; - IRBuilder<> Builder(Insert->getTerminator()); - Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); - - // Generate code to check if the loop's trip count is less than VF * UF of the - // vector epilogue loop. - auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF.isVector()) - ? ICmpInst::ICMP_ULE - : ICmpInst::ICMP_ULT; - - Value *CheckMinIters = - Builder.CreateICmp(P, Count, - createStepForVF(Builder, Count->getType(), - EPI.EpilogueVF, EPI.EpilogueUF), - "min.epilog.iters.check"); - - BranchInst &BI = *BranchInst::Create(Bypass, VectorPH, CheckMinIters); - auto VScale = Cost->getVScaleForTuning(); - unsigned MainLoopStep = - estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale); - unsigned EpilogueLoopStep = - estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale); - // We assume the remaining `Count` is equally distributed in - // [0, MainLoopStep) - // So the probability for `Count < EpilogueLoopStep` should be - // min(MainLoopStep, EpilogueLoopStep) / MainLoopStep - // TODO: Improve the estimate by taking the estimated trip count into - // consideration. - unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep); - const uint32_t Weights[] = {EstimatedSkipCount, - MainLoopStep - EstimatedSkipCount}; - setBranchWeights(BI, Weights, /*IsExpected=*/false); - ReplaceInstWithInst(Insert->getTerminator(), &BI); - - // A new entry block has been created for the epilogue VPlan. Hook it in, as - // otherwise we would try to modify the entry to the main vector loop. - VPIRBasicBlock *NewEntry = Plan.createVPIRBasicBlock(Insert); - VPBasicBlock *OldEntry = Plan.getEntry(); VPBlockUtils::reassociateBlocks(OldEntry, NewEntry); Plan.setEntry(NewEntry); // OldEntry is now dead and will be cleaned up when the plan gets destroyed. - return Insert; + return OriginalScalarPH; } void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { @@ -7594,12 +7504,13 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef Operands, VPSingleDefRecipe *VectorPtr; if (Reverse) { // When folding the tail, we may compute an address that we don't in the - // original scalar loop and it may not be inbounds. Drop Inbounds in that - // case. + // original scalar loop: drop the GEP no-wrap flags in this case. + // Otherwise preserve existing flags without no-unsigned-wrap, as we will + // emit negative indices. GEPNoWrapFlags Flags = - (CM.foldTailByMasking() || !GEP || !GEP->isInBounds()) + CM.foldTailByMasking() || !GEP ? GEPNoWrapFlags::none() - : GEPNoWrapFlags::inBounds(); + : GEP->getNoWrapFlags().withoutNoUnsignedWrap(); VectorPtr = new VPVectorEndPointerRecipe(Ptr, &Plan.getVF(), getLoadStoreType(I), /*Stride*/ -1, Flags, I->getDebugLoc()); @@ -8041,11 +7952,18 @@ bool VPRecipeBuilder::getScaledReductions( BinaryOperator *ExtendUser = dyn_cast(Op); std::optional BinOpc; Type *ExtOpTypes[2] = {nullptr}; - - auto CollectExtInfo = [this, &Exts, - &ExtOpTypes](SmallVectorImpl &Ops) -> bool { - unsigned I = 0; - for (Value *OpI : Ops) { + TTI::PartialReductionExtendKind ExtKinds[2] = {TTI::PR_None}; + + auto CollectExtInfo = [this, &Exts, &ExtOpTypes, + &ExtKinds](SmallVectorImpl &Ops) -> bool { + for (const auto &[I, OpI] : enumerate(Ops)) { + auto *CI = dyn_cast(OpI); + if (I > 0 && CI && + canConstantBeExtended(CI, ExtOpTypes[0], ExtKinds[0])) { + ExtOpTypes[I] = ExtOpTypes[0]; + ExtKinds[I] = ExtKinds[0]; + continue; + } Value *ExtOp; if (!match(OpI, m_ZExtOrSExt(m_Value(ExtOp)))) return false; @@ -8056,7 +7974,7 @@ bool VPRecipeBuilder::getScaledReductions( return false; ExtOpTypes[I] = ExtOp->getType(); - I++; + ExtKinds[I] = TTI::getPartialReductionExtendKind(Exts[I]); } return true; }; @@ -8085,10 +8003,6 @@ bool VPRecipeBuilder::getScaledReductions( } else return false; - TTI::PartialReductionExtendKind OpAExtend = - TTI::getPartialReductionExtendKind(Exts[0]); - TTI::PartialReductionExtendKind OpBExtend = - Exts[1] ? TTI::getPartialReductionExtendKind(Exts[1]) : TTI::PR_None; PartialReductionChain Chain(RdxExitInstr, Exts[0], Exts[1], ExtendUser); TypeSize PHISize = PHI->getType()->getPrimitiveSizeInBits(); @@ -8101,7 +8015,8 @@ bool VPRecipeBuilder::getScaledReductions( [&](ElementCount VF) { InstructionCost Cost = TTI->getPartialReductionCost( Update->getOpcode(), ExtOpTypes[0], ExtOpTypes[1], - PHI->getType(), VF, OpAExtend, OpBExtend, BinOpc, CM.CostKind); + PHI->getType(), VF, ExtKinds[0], ExtKinds[1], BinOpc, + CM.CostKind); return Cost.isValid(); }, Range)) { @@ -8178,8 +8093,11 @@ VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(VPSingleDefRecipe *R, if (isa(Instr) || isa(Instr)) return tryToWidenMemory(Instr, Operands, Range); - if (std::optional ScaleFactor = getScalingForReduction(Instr)) - return tryToCreatePartialReduction(Instr, Operands, ScaleFactor.value()); + if (std::optional ScaleFactor = getScalingForReduction(Instr)) { + if (auto PartialRed = + tryToCreatePartialReduction(Instr, Operands, ScaleFactor.value())) + return PartialRed; + } if (!shouldWiden(Instr, Range)) return nullptr; @@ -8213,6 +8131,10 @@ VPRecipeBuilder::tryToCreatePartialReduction(Instruction *Reduction, isa(BinOpRecipe)) std::swap(BinOp, Accumulator); + if (ScaleFactor != + vputils::getVFScaleFactor(Accumulator->getDefiningRecipe())) + return nullptr; + unsigned ReductionOpcode = Reduction->getOpcode(); if (ReductionOpcode == Instruction::Sub) { auto *const Zero = ConstantInt::get(Reduction->getType(), 0); @@ -8267,14 +8189,12 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, VFRange SubRange = {VF, MaxVFTimes2}; if (auto Plan = tryToBuildVPlanWithVPRecipes( std::unique_ptr(VPlan0->duplicate()), SubRange, &LVer)) { - bool HasScalarVF = Plan->hasScalarVFOnly(); // Now optimize the initial VPlan. - if (!HasScalarVF) - VPlanTransforms::runPass(VPlanTransforms::truncateToMinimalBitwidths, - *Plan, CM.getMinimalBitwidths()); + VPlanTransforms::runPass(VPlanTransforms::truncateToMinimalBitwidths, + *Plan, CM.getMinimalBitwidths()); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); // TODO: try to put it close to addActiveLaneMask(). - if (CM.foldTailWithEVL() && !HasScalarVF) + if (CM.foldTailWithEVL()) VPlanTransforms::runPass(VPlanTransforms::addExplicitVectorLength, *Plan, CM.getMaxSafeElements()); assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid"); @@ -8704,7 +8624,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( // TODO: Enable following transform when the EVL-version of extended-reduction // and mulacc-reduction are implemented. if (!CM.foldTailWithEVL()) { - VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, + *CM.PSE.getSE()); VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan, CostCtx, Range); } @@ -9615,63 +9536,65 @@ static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) { } /// Prepare \p Plan for vectorizing the epilogue loop. That is, re-use expanded -/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. -static void -preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, - const SCEV2ValueTy &ExpandedSCEVs, - EpilogueLoopVectorizationInfo &EPI) { +/// SCEVs from \p ExpandedSCEVs and set resume values for header recipes. Some +/// reductions require creating new instructions to compute the resume values. +/// They are collected in a vector and returned. They must be moved to the +/// preheader of the vector epilogue loop, after created by the execution of \p +/// Plan. +static SmallVector preparePlanForEpilogueVectorLoop( + VPlan &Plan, Loop *L, const SCEV2ValueTy &ExpandedSCEVs, + EpilogueLoopVectorizationInfo &EPI, LoopVectorizationCostModel &CM, + ScalarEvolution &SE) { VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion(); VPBasicBlock *Header = VectorLoop->getEntryBasicBlock(); Header->setName("vec.epilog.vector.body"); - DenseMap ToFrozen; // Ensure that the start values for all header phi recipes are updated before // vectorizing the epilogue loop. - for (VPRecipeBase &R : Header->phis()) { - if (auto *IV = dyn_cast(&R)) { - // When vectorizing the epilogue loop, the canonical induction start - // value needs to be changed from zero to the value after the main - // vector loop. Find the resume value created during execution of the main - // VPlan. It must be the first phi in the loop preheader. - // FIXME: Improve modeling for canonical IV start values in the epilogue - // loop. - using namespace llvm::PatternMatch; - PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin(); - for (Value *Inc : EPResumeVal->incoming_values()) { - if (match(Inc, m_SpecificInt(0))) - continue; - assert(!EPI.VectorTripCount && - "Must only have a single non-zero incoming value"); - EPI.VectorTripCount = Inc; - } - // If we didn't find a non-zero vector trip count, all incoming values - // must be zero, which also means the vector trip count is zero. Pick the - // first zero as vector trip count. - // TODO: We should not choose VF * UF so the main vector loop is known to - // be dead. - if (!EPI.VectorTripCount) { - assert( - EPResumeVal->getNumIncomingValues() > 0 && - all_of(EPResumeVal->incoming_values(), - [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) && - "all incoming values must be 0"); - EPI.VectorTripCount = EPResumeVal->getOperand(0); - } - VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal); - assert(all_of(IV->users(), - [](const VPUser *U) { - return isa(U) || - isa(U) || - cast(U)->isScalarCast() || - cast(U)->getOpcode() == - Instruction::Add; - }) && - "the canonical IV should only be used by its increment or " - "ScalarIVSteps when resetting the start value"); - IV->setOperand(0, VPV); + VPCanonicalIVPHIRecipe *IV = Plan.getCanonicalIV(); + // When vectorizing the epilogue loop, the canonical induction start + // value needs to be changed from zero to the value after the main + // vector loop. Find the resume value created during execution of the main + // VPlan. It must be the first phi in the loop preheader. + // FIXME: Improve modeling for canonical IV start values in the epilogue + // loop. + using namespace llvm::PatternMatch; + PHINode *EPResumeVal = &*L->getLoopPreheader()->phis().begin(); + for (Value *Inc : EPResumeVal->incoming_values()) { + if (match(Inc, m_SpecificInt(0))) continue; - } + assert(!EPI.VectorTripCount && + "Must only have a single non-zero incoming value"); + EPI.VectorTripCount = Inc; + } + // If we didn't find a non-zero vector trip count, all incoming values + // must be zero, which also means the vector trip count is zero. Pick the + // first zero as vector trip count. + // TODO: We should not choose VF * UF so the main vector loop is known to + // be dead. + if (!EPI.VectorTripCount) { + assert(EPResumeVal->getNumIncomingValues() > 0 && + all_of(EPResumeVal->incoming_values(), + [](Value *Inc) { return match(Inc, m_SpecificInt(0)); }) && + "all incoming values must be 0"); + EPI.VectorTripCount = EPResumeVal->getOperand(0); + } + VPValue *VPV = Plan.getOrAddLiveIn(EPResumeVal); + assert(all_of(IV->users(), + [](const VPUser *U) { + return isa(U) || + isa(U) || + cast(U)->isScalarCast() || + cast(U)->getOpcode() == + Instruction::Add; + }) && + "the canonical IV should only be used by its increment or " + "ScalarIVSteps when resetting the start value"); + IV->setOperand(0, VPV); + DenseMap ToFrozen; + SmallVector InstsToMove; + for (VPRecipeBase &R : drop_begin(Header->phis())) { Value *ResumeV = nullptr; // TODO: Move setting of resume values to prepareToExecute. if (auto *ReductionPhi = dyn_cast(&R)) { @@ -9694,6 +9617,8 @@ preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, BasicBlock *PBB = cast(ResumeV)->getParent(); IRBuilder<> Builder(PBB, PBB->getFirstNonPHIIt()); ResumeV = Builder.CreateICmpNE(ResumeV, StartV); + if (auto *I = dyn_cast(ResumeV)) + InstsToMove.push_back(I); } else if (RecurrenceDescriptor::isFindIVRecurrenceKind(RK)) { Value *StartV = getStartValueFromReductionResult(RdxResult); ToFrozen[StartV] = cast(ResumeV)->getIncomingValueForBlock( @@ -9708,8 +9633,12 @@ preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, BasicBlock *ResumeBB = cast(ResumeV)->getParent(); IRBuilder<> Builder(ResumeBB, ResumeBB->getFirstNonPHIIt()); Value *Cmp = Builder.CreateICmpEQ(ResumeV, ToFrozen[StartV]); + if (auto *I = dyn_cast(Cmp)) + InstsToMove.push_back(I); Value *Sentinel = RdxResult->getOperand(2)->getLiveInIRValue(); ResumeV = Builder.CreateSelect(Cmp, Sentinel, ResumeV); + if (auto *I = dyn_cast(ResumeV)) + InstsToMove.push_back(I); } else { VPValue *StartVal = Plan.getOrAddLiveIn(ResumeV); auto *PhiR = dyn_cast(&R); @@ -9761,6 +9690,18 @@ preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L, Plan.resetTripCount(ExpandedVal); ExpandR->eraseFromParent(); } + + auto VScale = CM.getVScaleForTuning(); + unsigned MainLoopStep = + estimateElementCount(EPI.MainLoopVF * EPI.MainLoopUF, VScale); + unsigned EpilogueLoopStep = + estimateElementCount(EPI.EpilogueVF * EPI.EpilogueUF, VScale); + VPlanTransforms::addMinimumVectorEpilogueIterationCheck( + Plan, EPI.TripCount, EPI.VectorTripCount, + CM.requiresScalarEpilogue(EPI.EpilogueVF.isVector()), EPI.EpilogueVF, + EPI.EpilogueUF, MainLoopStep, EpilogueLoopStep, SE); + + return InstsToMove; } // Generate bypass values from the additional bypass block. Note that when the @@ -9827,6 +9768,101 @@ static void fixScalarResumeValuesFromBypass(BasicBlock *BypassBlock, Loop *L, } } +/// Connect the epilogue vector loop generated for \p EpiPlan to the main vector +// loop, after both plans have executed, updating branches from the iteration +// and runtime checks of the main loop, as well as updating various phis. \p +// InstsToMove contains instructions that need to be moved to the preheader of +// the epilogue vector loop. +static void connectEpilogueVectorLoop( + VPlan &EpiPlan, Loop *L, EpilogueLoopVectorizationInfo &EPI, + DominatorTree *DT, LoopVectorizationLegality &LVL, + DenseMap &ExpandedSCEVs, GeneratedRTChecks &Checks, + ArrayRef InstsToMove) { + BasicBlock *VecEpilogueIterationCountCheck = + cast(EpiPlan.getEntry())->getIRBasicBlock(); + + BasicBlock *VecEpiloguePreHeader = + cast(VecEpilogueIterationCountCheck->getTerminator()) + ->getSuccessor(1); + // Adjust the control flow taking the state info from the main loop + // vectorization into account. + assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && + "expected this to be saved from the previous pass."); + DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); + EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( + VecEpilogueIterationCountCheck, VecEpiloguePreHeader); + + DTU.applyUpdates({{DominatorTree::Delete, EPI.MainLoopIterationCountCheck, + VecEpilogueIterationCountCheck}, + {DominatorTree::Insert, EPI.MainLoopIterationCountCheck, + VecEpiloguePreHeader}}); + + BasicBlock *ScalarPH = + cast(EpiPlan.getScalarPreheader())->getIRBasicBlock(); + EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( + VecEpilogueIterationCountCheck, ScalarPH); + DTU.applyUpdates( + {{DominatorTree::Delete, EPI.EpilogueIterationCountCheck, + VecEpilogueIterationCountCheck}, + {DominatorTree::Insert, EPI.EpilogueIterationCountCheck, ScalarPH}}); + + // Adjust the terminators of runtime check blocks and phis using them. + BasicBlock *SCEVCheckBlock = Checks.getSCEVChecks().second; + BasicBlock *MemCheckBlock = Checks.getMemRuntimeChecks().second; + if (SCEVCheckBlock) { + SCEVCheckBlock->getTerminator()->replaceUsesOfWith( + VecEpilogueIterationCountCheck, ScalarPH); + DTU.applyUpdates({{DominatorTree::Delete, SCEVCheckBlock, + VecEpilogueIterationCountCheck}, + {DominatorTree::Insert, SCEVCheckBlock, ScalarPH}}); + } + if (MemCheckBlock) { + MemCheckBlock->getTerminator()->replaceUsesOfWith( + VecEpilogueIterationCountCheck, ScalarPH); + DTU.applyUpdates( + {{DominatorTree::Delete, MemCheckBlock, VecEpilogueIterationCountCheck}, + {DominatorTree::Insert, MemCheckBlock, ScalarPH}}); + } + + // The vec.epilog.iter.check block may contain Phi nodes from inductions + // or reductions which merge control-flow from the latch block and the + // middle block. Update the incoming values here and move the Phi into the + // preheader. + SmallVector PhisInBlock( + llvm::make_pointer_range(VecEpilogueIterationCountCheck->phis())); + + for (PHINode *Phi : PhisInBlock) { + Phi->moveBefore(VecEpiloguePreHeader->getFirstNonPHIIt()); + Phi->replaceIncomingBlockWith( + VecEpilogueIterationCountCheck->getSinglePredecessor(), + VecEpilogueIterationCountCheck); + + // If the phi doesn't have an incoming value from the + // EpilogueIterationCountCheck, we are done. Otherwise remove the + // incoming value and also those from other check blocks. This is needed + // for reduction phis only. + if (none_of(Phi->blocks(), [&](BasicBlock *IncB) { + return EPI.EpilogueIterationCountCheck == IncB; + })) + continue; + Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); + if (SCEVCheckBlock) + Phi->removeIncomingValue(SCEVCheckBlock); + if (MemCheckBlock) + Phi->removeIncomingValue(MemCheckBlock); + } + + auto IP = VecEpiloguePreHeader->getFirstNonPHIIt(); + for (auto *I : InstsToMove) + I->moveBefore(IP); + + // VecEpilogueIterationCountCheck conditionally skips over the epilogue loop + // after executing the main loop. We need to update the resume values of + // inductions and reductions during epilogue vectorization. + fixScalarResumeValuesFromBypass(VecEpilogueIterationCountCheck, L, EpiPlan, + LVL, ExpandedSCEVs, EPI.VectorTripCount); +} + bool LoopVectorizePass::processLoop(Loop *L) { assert((EnableVPlanNativePath || L->isInnermost()) && "VPlan-native path is not enabled. Only process inner loops."); @@ -10043,7 +10079,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled; VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM, - CM.CostKind); + CM.CostKind, *CM.PSE.getSE()); if (!ForceVectorization && !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, LVP.getPlanFor(VF.Width), SEL, @@ -10188,6 +10224,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { // factor) again shortly afterwards. VPlan &BestEpiPlan = LVP.getPlanFor(EpilogueVF.Width); BestEpiPlan.getMiddleBlock()->setName("vec.epilog.middle.block"); + BestEpiPlan.getVectorPreheader()->setName("vec.epilog.ph"); preparePlanForMainVectorLoop(*BestMainPlan, BestEpiPlan); EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1, BestEpiPlan); @@ -10201,15 +10238,12 @@ bool LoopVectorizePass::processLoop(Loop *L) { // edges from the first pass. EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TTI, AC, EPI, &CM, BFI, PSI, Checks, BestEpiPlan); - EpilogILV.setTripCount(MainILV.getTripCount()); - preparePlanForEpilogueVectorLoop(BestEpiPlan, L, ExpandedSCEVs, EPI); - + SmallVector InstsToMove = preparePlanForEpilogueVectorLoop( + BestEpiPlan, L, ExpandedSCEVs, EPI, CM, *PSE.getSE()); LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, DT, true); - - fixScalarResumeValuesFromBypass(EpilogILV.getAdditionalBypassBlock(), L, - BestEpiPlan, LVL, ExpandedSCEVs, - EPI.VectorTripCount); + connectEpilogueVectorLoop(BestEpiPlan, L, EPI, DT, LVL, ExpandedSCEVs, + Checks, InstsToMove); ++LoopsEpilogueVectorized; } else { InnerLoopVectorizer LB(L, PSE, LI, DT, TTI, AC, VF.Width, IC, &CM, BFI, PSI, diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 6ac9018df641e..fedca65d241e8 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1100,7 +1100,9 @@ class BinOpSameOpcodeHelper { // constant + x cannot be -constant - x // instead, it should be x - -constant if (Pos == 1 || - (FromOpcode == Instruction::Add && ToOpcode == Instruction::Sub)) + ((FromOpcode == Instruction::Add || FromOpcode == Instruction::Or || + FromOpcode == Instruction::Xor) && + ToOpcode == Instruction::Sub)) return SmallVector({LHS, RHS}); return SmallVector({RHS, LHS}); } @@ -1188,6 +1190,10 @@ class BinOpSameOpcodeHelper { if (CIValue.isAllOnes()) InterchangeableMask = CanBeAll; break; + case Instruction::Xor: + if (CIValue.isZero()) + InterchangeableMask = XorBIT | OrBIT | AndBIT | SubBIT | AddBIT; + break; default: if (CIValue.isZero()) InterchangeableMask = CanBeAll; @@ -2099,6 +2105,7 @@ class BoUpSLP { UserIgnoreList = nullptr; PostponedGathers.clear(); ValueToGatherNodes.clear(); + TreeEntryToStridedPtrInfoMap.clear(); } unsigned getTreeSize() const { return VectorizableTree.size(); } @@ -2234,11 +2241,9 @@ class BoUpSLP { /// TODO: If load combining is allowed in the IR optimizer, this analysis /// may not be necessary. bool isLoadCombineCandidate(ArrayRef Stores) const; - bool isStridedLoad(ArrayRef VL, ArrayRef PointerOps, - ArrayRef Order, const TargetTransformInfo &TTI, - const DataLayout &DL, ScalarEvolution &SE, - const bool IsAnyPointerUsedOutGraph, const int64_t Diff, - StridedPtrInfo &SPtrInfo) const; + bool isStridedLoad(ArrayRef PointerOps, Type *ScalarTy, + Align Alignment, const int64_t Diff, Value *Ptr0, + Value *PtrN, StridedPtrInfo &SPtrInfo) const; /// Checks if the given array of loads can be represented as a vectorized, /// scatter or just simple gather. @@ -6818,16 +6823,21 @@ isMaskedLoadCompress(ArrayRef VL, ArrayRef PointerOps, /// 4. Any pointer operand is an instruction with the users outside of the /// current graph (for masked gathers extra extractelement instructions /// might be required). -bool BoUpSLP::isStridedLoad(ArrayRef VL, ArrayRef PointerOps, - ArrayRef Order, - const TargetTransformInfo &TTI, - const DataLayout &DL, ScalarEvolution &SE, - const bool IsAnyPointerUsedOutGraph, - const int64_t Diff, - StridedPtrInfo &SPtrInfo) const { - const size_t Sz = VL.size(); +bool BoUpSLP::isStridedLoad(ArrayRef PointerOps, Type *ScalarTy, + Align Alignment, const int64_t Diff, Value *Ptr0, + Value *PtrN, StridedPtrInfo &SPtrInfo) const { + const size_t Sz = PointerOps.size(); + if (Diff % (Sz - 1) != 0) + return false; + + // Try to generate strided load node. + auto IsAnyPointerUsedOutGraph = any_of(PointerOps, [&](Value *V) { + return isa(V) && any_of(V->users(), [&](User *U) { + return !isVectorized(U) && !MustGather.contains(U); + }); + }); + const uint64_t AbsoluteDiff = std::abs(Diff); - Type *ScalarTy = VL.front()->getType(); auto *VecTy = getWidenedType(ScalarTy, Sz); if (IsAnyPointerUsedOutGraph || (AbsoluteDiff > Sz && @@ -6838,20 +6848,9 @@ bool BoUpSLP::isStridedLoad(ArrayRef VL, ArrayRef PointerOps, int64_t Stride = Diff / static_cast(Sz - 1); if (Diff != Stride * static_cast(Sz - 1)) return false; - Align Alignment = - cast(Order.empty() ? VL.front() : VL[Order.front()]) - ->getAlign(); - if (!TTI.isLegalStridedLoadStore(VecTy, Alignment)) + if (!TTI->isLegalStridedLoadStore(VecTy, Alignment)) return false; - Value *Ptr0; - Value *PtrN; - if (Order.empty()) { - Ptr0 = PointerOps.front(); - PtrN = PointerOps.back(); - } else { - Ptr0 = PointerOps[Order.front()]; - PtrN = PointerOps[Order.back()]; - } + // Iterate through all pointers and check if all distances are // unique multiple of Dist. SmallSet Dists; @@ -6860,14 +6859,14 @@ bool BoUpSLP::isStridedLoad(ArrayRef VL, ArrayRef PointerOps, if (Ptr == PtrN) Dist = Diff; else if (Ptr != Ptr0) - Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, DL, SE); + Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE); // If the strides are not the same or repeated, we can't // vectorize. if (((Dist / Stride) * Stride) != Dist || !Dists.insert(Dist).second) break; } if (Dists.size() == Sz) { - Type *StrideTy = DL.getIndexType(Ptr0->getType()); + Type *StrideTy = DL->getIndexType(Ptr0->getType()); SPtrInfo.StrideVal = ConstantInt::get(StrideTy, Stride); SPtrInfo.Ty = getWidenedType(ScalarTy, Sz); return true; @@ -6956,18 +6955,11 @@ BoUpSLP::LoadsState BoUpSLP::canVectorizeLoads( cast(V), UserIgnoreList); })) return LoadsState::CompressVectorize; - // Simple check if not a strided access - clear order. - bool IsPossibleStrided = *Diff % (Sz - 1) == 0; - // Try to generate strided load node. - auto IsAnyPointerUsedOutGraph = - IsPossibleStrided && any_of(PointerOps, [&](Value *V) { - return isa(V) && any_of(V->users(), [&](User *U) { - return !isVectorized(U) && !MustGather.contains(U); - }); - }); - if (IsPossibleStrided && - isStridedLoad(VL, PointerOps, Order, *TTI, *DL, *SE, - IsAnyPointerUsedOutGraph, *Diff, SPtrInfo)) + Align Alignment = + cast(Order.empty() ? VL.front() : VL[Order.front()]) + ->getAlign(); + if (isStridedLoad(PointerOps, ScalarTy, Alignment, *Diff, Ptr0, PtrN, + SPtrInfo)) return LoadsState::StridedVectorize; } if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) || @@ -8945,6 +8937,8 @@ BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { void BoUpSLP::buildTree(ArrayRef Roots, const SmallDenseSet &UserIgnoreLst) { deleteTree(); + assert(TreeEntryToStridedPtrInfoMap.empty() && + "TreeEntryToStridedPtrInfoMap is not cleared"); UserIgnoreList = &UserIgnoreLst; if (!allSameType(Roots)) return; @@ -8953,6 +8947,8 @@ void BoUpSLP::buildTree(ArrayRef Roots, void BoUpSLP::buildTree(ArrayRef Roots) { deleteTree(); + assert(TreeEntryToStridedPtrInfoMap.empty() && + "TreeEntryToStridedPtrInfoMap is not cleared"); if (!allSameType(Roots)) return; buildTreeRec(Roots, 0, EdgeInfo()); @@ -17522,7 +17518,9 @@ Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { return !isa(V) && isa(V); })) || all_of(E->Scalars, [&](Value *V) { - return isa(V) || E->isCopyableElement(V) || + return isa(V) || + (E->Idx == 0 && isa(V)) || + E->isCopyableElement(V) || (!isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V)); })) Res = FindLastInst(); @@ -19122,7 +19120,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) { } case Instruction::InsertElement: { assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); - Builder.SetInsertPoint(cast(E->Scalars.back())); + if (const TreeEntry *OpE = getOperandEntry(E, 1); + OpE && !OpE->isGather() && OpE->hasState() && + !OpE->hasCopyableElements()) + Builder.SetInsertPoint(cast(E->Scalars.back())); + else + setInsertPointAfterBundle(E); Value *V = vectorizeOperand(E, 1); ArrayRef Op = E->getOperand(1); Type *ScalarTy = Op.front()->getType(); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index a1c6f7977885f..2555ebe2ad897 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -845,19 +845,10 @@ InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) { if (VF.isScalable()) return InstructionCost::getInvalid(); - // First compute the cost of the conditionally executed recipes, followed by - // account for the branching cost, except if the mask is a header mask or - // uniform condition. - using namespace llvm::VPlanPatternMatch; + // Compute and return the cost of the conditionally executed recipes. + assert(VF.isVector() && "Can only compute vector cost at the moment."); VPBasicBlock *Then = cast(getEntry()->getSuccessors()[0]); - InstructionCost ThenCost = Then->cost(VF, Ctx); - - // For the scalar case, we may not always execute the original predicated - // block, Thus, scale the block's cost by the probability of executing it. - if (VF.isScalar()) - return ThenCost / getPredBlockCostDivisor(Ctx.CostKind); - - return ThenCost; + return Then->cost(VF, Ctx); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -977,24 +968,36 @@ void VPlan::execute(VPTransformState *State) { // logic generic during VPlan execution. State->CFG.DTU.applyUpdates( {{DominatorTree::Delete, ScalarPh, ScalarPh->getSingleSuccessor()}}); - } else { + } + ReversePostOrderTraversal> RPOT( + Entry); + // Generate code for the VPlan, in parts of the vector skeleton, loop body and + // successor blocks including the middle, exit and scalar preheader blocks. + for (VPBlockBase *Block : RPOT) + Block->execute(State); + + // If the original loop is unreachable, delete it and all its blocks. + if (!ScalarPhVPBB->hasPredecessors()) { + // DeleteDeadBlocks will remove single-entry phis. Remove them from the exit + // VPIRBBs in VPlan as well, otherwise we would retain references to deleted + // IR instructions. + for (VPIRBasicBlock *EB : getExitBlocks()) { + for (VPRecipeBase &R : make_early_inc_range(EB->phis())) { + if (R.getNumOperands() == 1) + R.eraseFromParent(); + } + } + Loop *OrigLoop = State->LI->getLoopFor(getScalarHeader()->getIRBasicBlock()); - // If the original loop is unreachable, we need to delete it. auto Blocks = OrigLoop->getBlocksVector(); Blocks.push_back(cast(ScalarPhVPBB)->getIRBasicBlock()); for (auto *BB : Blocks) State->LI->removeBlock(BB); + DeleteDeadBlocks(Blocks, &State->CFG.DTU); State->LI->erase(OrigLoop); } - ReversePostOrderTraversal> RPOT( - Entry); - // Generate code for the VPlan, in parts of the vector skeleton, loop body and - // successor blocks including the middle, exit and scalar preheader blocks. - for (VPBlockBase *Block : RPOT) - Block->execute(State); - State->CFG.DTU.flush(); VPBasicBlock *Header = vputils::getFirstLoopHeader(*this, State->VPDT); @@ -1750,6 +1753,16 @@ void LoopVectorizationPlanner::printPlans(raw_ostream &O) { } #endif +bool llvm::canConstantBeExtended(const ConstantInt *CI, Type *NarrowType, + TTI::PartialReductionExtendKind ExtKind) { + APInt TruncatedVal = CI->getValue().trunc(NarrowType->getScalarSizeInBits()); + unsigned WideSize = CI->getType()->getScalarSizeInBits(); + APInt ExtendedVal = ExtKind == TTI::PR_SignExtend + ? TruncatedVal.sext(WideSize) + : TruncatedVal.zext(WideSize); + return ExtendedVal == CI->getValue(); +} + TargetTransformInfo::OperandValueInfo VPCostContext::getOperandInfo(VPValue *V) const { if (!V->isLiveIn()) @@ -1759,7 +1772,8 @@ VPCostContext::getOperandInfo(VPValue *V) const { } InstructionCost VPCostContext::getScalarizationOverhead( - Type *ResultTy, ArrayRef Operands, ElementCount VF) { + Type *ResultTy, ArrayRef Operands, ElementCount VF, + bool AlwaysIncludeReplicatingR) { if (VF.isScalar()) return 0; @@ -1779,7 +1793,11 @@ InstructionCost VPCostContext::getScalarizationOverhead( SmallPtrSet UniqueOperands; SmallVector Tys; for (auto *Op : Operands) { - if (Op->isLiveIn() || isa(Op) || + if (Op->isLiveIn() || + (!AlwaysIncludeReplicatingR && + isa(Op)) || + (isa(Op) && + cast(Op)->getOpcode() == Instruction::Load) || !UniqueOperands.insert(Op).second) continue; Tys.push_back(toVectorizedTy(Types.inferScalarType(Op), VF)); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index e64cefde81e31..c167dd7f65fac 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -29,6 +29,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist.h" @@ -705,6 +706,9 @@ class VPIRFlags { VPIRFlags(WrapFlagsTy WrapFlags) : OpType(OperationType::OverflowingBinOp), WrapFlags(WrapFlags) {} + VPIRFlags(TruncFlagsTy TruncFlags) + : OpType(OperationType::Trunc), TruncFlags(TruncFlags) {} + VPIRFlags(FastMathFlags FMFs) : OpType(OperationType::FPMathOp), FMFs(FMFs) {} VPIRFlags(DisjointFlagsTy DisjointFlags) @@ -1494,9 +1498,10 @@ class VPWidenCastRecipe : public VPRecipeWithIRFlags, public VPIRMetadata { VPWidenCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy, const VPIRFlags &Flags = {}, + const VPIRMetadata &Metadata = {}, DebugLoc DL = DebugLoc::getUnknown()) : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op, Flags, DL), - VPIRMetadata(), Opcode(Opcode), ResultTy(ResultTy) { + VPIRMetadata(Metadata), Opcode(Opcode), ResultTy(ResultTy) { assert(flagsValidForOpcode(Opcode) && "Set flags not supported for the provided opcode"); } @@ -1504,11 +1509,11 @@ class VPWidenCastRecipe : public VPRecipeWithIRFlags, public VPIRMetadata { ~VPWidenCastRecipe() override = default; VPWidenCastRecipe *clone() override { + auto *New = new VPWidenCastRecipe(Opcode, getOperand(0), ResultTy, *this, + *this, getDebugLoc()); if (auto *UV = getUnderlyingValue()) - return new VPWidenCastRecipe(Opcode, getOperand(0), ResultTy, - *cast(UV)); - - return new VPWidenCastRecipe(Opcode, getOperand(0), ResultTy); + New->setUnderlyingValue(UV); + return New; } VP_CLASSOF_IMPL(VPDef::VPWidenCastSC) @@ -2973,7 +2978,8 @@ class LLVM_ABI_FOR_TEST VPBranchOnMaskRecipe : public VPRecipeBase { /// the expression is elevated to connect the non-expression recipe with the /// VPExpressionRecipe itself. class VPExpressionRecipe : public VPSingleDefRecipe { - /// Recipes included in this VPExpressionRecipe. + /// Recipes included in this VPExpressionRecipe. This could contain + /// duplicates. SmallVector ExpressionRecipes; /// Temporary VPValues used for external operands of the expression, i.e. @@ -2993,6 +2999,10 @@ class VPExpressionRecipe : public VPSingleDefRecipe { /// vector operands, performing a reduction.add on the result, and adding /// the scalar result to a chain. MulAccReduction, + /// Represent an inloop multiply-accumulate reduction, multiplying the + /// extended vector operands, negating the multiplication, performing a + /// reduction.add on the result, and adding the scalar result to a chain. + ExtNegatedMulAccReduction, }; /// Type of the expression. @@ -3016,10 +3026,26 @@ class VPExpressionRecipe : public VPSingleDefRecipe { VPWidenRecipe *Mul, VPReductionRecipe *Red) : VPExpressionRecipe(ExpressionTypes::ExtMulAccReduction, {Ext0, Ext1, Mul, Red}) {} + VPExpressionRecipe(VPWidenCastRecipe *Ext0, VPWidenCastRecipe *Ext1, + VPWidenRecipe *Mul, VPWidenRecipe *Sub, + VPReductionRecipe *Red) + : VPExpressionRecipe(ExpressionTypes::ExtNegatedMulAccReduction, + {Ext0, Ext1, Mul, Sub, Red}) { + assert(Mul->getOpcode() == Instruction::Mul && "Expected a mul"); + assert(Red->getRecurrenceKind() == RecurKind::Add && + "Expected an add reduction"); + assert(getNumOperands() >= 3 && "Expected at least three operands"); + [[maybe_unused]] auto *SubConst = dyn_cast(getOperand(2)->getLiveInIRValue()); + assert(SubConst && SubConst->getValue() == 0 && + Sub->getOpcode() == Instruction::Sub && "Expected a negating sub"); + } ~VPExpressionRecipe() override { - for (auto *R : reverse(ExpressionRecipes)) - delete R; + SmallPtrSet ExpressionRecipesSeen; + for (auto *R : reverse(ExpressionRecipes)) { + if (ExpressionRecipesSeen.insert(R).second) + delete R; + } for (VPValue *T : LiveInPlaceholders) delete T; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 46ab7712e2671..07bfe7a896d86 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -395,20 +395,6 @@ bool VPDominatorTree::properlyDominates(const VPRecipeBase *A, return Base::properlyDominates(ParentA, ParentB); } -/// Get the VF scaling factor applied to the recipe's output, if the recipe has -/// one. -static unsigned getVFScaleFactor(VPValue *R) { - if (auto *RR = dyn_cast(R)) - return RR->getVFScaleFactor(); - if (auto *RR = dyn_cast(R)) - return RR->getVFScaleFactor(); - assert( - (!isa(R) || cast(R)->getOpcode() != - VPInstruction::ReductionStartVector) && - "getting scaling factor of reduction-start-vector not implemented yet"); - return 1; -} - bool VPRegisterUsage::exceedsMaxNumRegs(const TargetTransformInfo &TTI, unsigned OverrideMaxNumRegs) const { return any_of(MaxLocalUsers, [&TTI, &OverrideMaxNumRegs](auto &LU) { @@ -571,7 +557,8 @@ SmallVector llvm::calculateRegisterUsageForPlan( } else { // The output from scaled phis and scaled reductions actually has // fewer lanes than the VF. - unsigned ScaleFactor = getVFScaleFactor(VPV); + unsigned ScaleFactor = + vputils::getVFScaleFactor(VPV->getDefiningRecipe()); ElementCount VF = VFs[J].divideCoefficientBy(ScaleFactor); LLVM_DEBUG(if (VF != VFs[J]) { dbgs() << "LV(REG): Scaled down VF from " << VFs[J] << " to " << VF diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp index cef91c15dd873..c8212af9f8e00 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp @@ -756,6 +756,43 @@ void VPlanTransforms::addMinimumIterationCheck( } } +void VPlanTransforms::addMinimumVectorEpilogueIterationCheck( + VPlan &Plan, Value *TripCount, Value *VectorTripCount, + bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, + unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE) { + // Add the minimum iteration check for the epilogue vector loop. + VPValue *TC = Plan.getOrAddLiveIn(TripCount); + VPBuilder Builder(cast(Plan.getEntry())); + VPValue *Count = Builder.createNaryOp( + Instruction::Sub, {TC, Plan.getOrAddLiveIn(VectorTripCount)}, + DebugLoc::getUnknown(), "n.vec.remaining"); + + // Generate code to check if the loop's trip count is less than VF * UF of + // the vector epilogue loop. + auto P = RequiresScalarEpilogue ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; + VPValue *VFxUF = Builder.createExpandSCEV(SE.getElementCount( + TripCount->getType(), (EpilogueVF * EpilogueUF), SCEV::FlagNUW)); + + auto *CheckMinIters = Builder.createICmp( + P, Count, VFxUF, DebugLoc::getUnknown(), "min.epilog.iters.check"); + VPInstruction *Branch = + Builder.createNaryOp(VPInstruction::BranchOnCond, CheckMinIters); + + // We assume the remaining `Count` is equally distributed in + // [0, MainLoopStep) + // So the probability for `Count < EpilogueLoopStep` should be + // min(MainLoopStep, EpilogueLoopStep) / MainLoopStep + // TODO: Improve the estimate by taking the estimated trip count into + // consideration. + unsigned EstimatedSkipCount = std::min(MainLoopStep, EpilogueLoopStep); + const uint32_t Weights[] = {EstimatedSkipCount, + MainLoopStep - EstimatedSkipCount}; + MDBuilder MDB(Plan.getContext()); + MDNode *BranchWeights = + MDB.createBranchWeights(Weights, /*IsExpected=*/false); + Branch->addMetadata(LLVMContext::MD_prof, BranchWeights); +} + bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { auto GetMinMaxCompareValue = [](VPReductionPHIRecipe *RedPhiR) -> VPValue * { auto *MinMaxR = dyn_cast( diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h index fe59774b7c838..1580a3be3180a 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h +++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h @@ -349,12 +349,14 @@ struct VPCostContext { LoopVectorizationCostModel &CM; SmallPtrSet SkipCostComputation; TargetTransformInfo::TargetCostKind CostKind; + ScalarEvolution &SE; VPCostContext(const TargetTransformInfo &TTI, const TargetLibraryInfo &TLI, const VPlan &Plan, LoopVectorizationCostModel &CM, - TargetTransformInfo::TargetCostKind CostKind) + TargetTransformInfo::TargetCostKind CostKind, + ScalarEvolution &SE) : TTI(TTI), TLI(TLI), Types(Plan), LLVMCtx(Plan.getContext()), CM(CM), - CostKind(CostKind) {} + CostKind(CostKind), SE(SE) {} /// Return the cost for \p UI with \p VF using the legacy cost model as /// fallback until computing the cost of all recipes migrates to VPlan. @@ -374,10 +376,12 @@ struct VPCostContext { /// Estimate the overhead of scalarizing a recipe with result type \p ResultTy /// and \p Operands with \p VF. This is a convenience wrapper for the - /// type-based getScalarizationOverhead API. - InstructionCost getScalarizationOverhead(Type *ResultTy, - ArrayRef Operands, - ElementCount VF); + /// type-based getScalarizationOverhead API. If \p AlwaysIncludeReplicatingR + /// is true, always compute the cost of scalarizing replicating operands. + InstructionCost + getScalarizationOverhead(Type *ResultTy, ArrayRef Operands, + ElementCount VF, + bool AlwaysIncludeReplicatingR = false); }; /// This class can be used to assign names to VPValues. For VPValues without @@ -468,6 +472,10 @@ class VPlanPrinter { }; #endif +/// Check if a constant \p CI can be safely treated as having been extended +/// from a narrower type with the given extension kind. +bool canConstantBeExtended(const ConstantInt *CI, Type *NarrowType, + TTI::PartialReductionExtendKind ExtKind); } // end namespace llvm #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index aa3de3613b68e..43d61f2321a2c 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -40,6 +40,7 @@ #include using namespace llvm; +using namespace llvm::VPlanPatternMatch; using VectorParts = SmallVector; @@ -303,7 +304,6 @@ VPPartialReductionRecipe::computeCost(ElementCount VF, VPRecipeBase *OpR = Op->getDefiningRecipe(); // If the partial reduction is predicated, a select will be operand 0 - using namespace llvm::VPlanPatternMatch; if (match(getOperand(1), m_Select(m_VPValue(), m_VPValue(Op), m_VPValue()))) { OpR = Op->getDefiningRecipe(); } @@ -340,6 +340,14 @@ VPPartialReductionRecipe::computeCost(ElementCount VF, : Widen->getOperand(1)); ExtAType = GetExtendKind(ExtAR); ExtBType = GetExtendKind(ExtBR); + + if (!ExtBR && Widen->getOperand(1)->isLiveIn()) { + auto *CI = cast(Widen->getOperand(1)->getLiveInIRValue()); + if (canConstantBeExtended(CI, InputTypeA, ExtAType)) { + InputTypeB = InputTypeA; + ExtBType = ExtAType; + } + } }; if (isa(OpR)) { @@ -1214,6 +1222,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const { case Instruction::Select: case Instruction::PHI: case VPInstruction::AnyOf: + case VPInstruction::Broadcast: case VPInstruction::BuildStructVector: case VPInstruction::BuildVector: case VPInstruction::CalculateTripCountMinusVF: @@ -1954,7 +1963,6 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF, Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF); VPValue *Op0, *Op1; - using namespace llvm::VPlanPatternMatch; if (!ScalarCond && ScalarTy->getScalarSizeInBits() == 1 && (match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1))) || match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1))))) { @@ -2016,13 +2024,13 @@ bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const { return Opcode == Instruction::FAdd || Opcode == Instruction::FMul || Opcode == Instruction::FSub || Opcode == Instruction::FNeg || Opcode == Instruction::FDiv || Opcode == Instruction::FRem || + Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc || Opcode == Instruction::FCmp || Opcode == Instruction::Select || Opcode == VPInstruction::WideIVStep || Opcode == VPInstruction::ReductionStartVector || Opcode == VPInstruction::ComputeReductionResult; case OperationType::NonNegOp: - return Opcode == Instruction::ZExt; - break; + return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP; case OperationType::Cmp: return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp; case OperationType::Other: @@ -2754,10 +2762,7 @@ VPExpressionRecipe::VPExpressionRecipe( ExpressionTypes ExpressionType, ArrayRef ExpressionRecipes) : VPSingleDefRecipe(VPDef::VPExpressionSC, {}, {}), - ExpressionRecipes(SetVector( - ExpressionRecipes.begin(), ExpressionRecipes.end()) - .takeVector()), - ExpressionType(ExpressionType) { + ExpressionRecipes(ExpressionRecipes), ExpressionType(ExpressionType) { assert(!ExpressionRecipes.empty() && "Nothing to combine?"); assert( none_of(ExpressionRecipes, @@ -2801,14 +2806,22 @@ VPExpressionRecipe::VPExpressionRecipe( continue; addOperand(Op); LiveInPlaceholders.push_back(new VPValue()); - R->setOperand(Idx, LiveInPlaceholders.back()); } } + + // Replace each external operand with the first one created for it in + // LiveInPlaceholders. + for (auto *R : ExpressionRecipes) + for (auto const &[LiveIn, Tmp] : zip(operands(), LiveInPlaceholders)) + R->replaceUsesOfWith(LiveIn, Tmp); } void VPExpressionRecipe::decompose() { for (auto *R : ExpressionRecipes) - R->insertBefore(this); + // Since the list could contain duplicates, make sure the recipe hasn't + // already been inserted. + if (!R->getParent()) + R->insertBefore(this); for (const auto &[Idx, Op] : enumerate(operands())) LiveInPlaceholders[Idx]->replaceAllUsesWith(Op); @@ -2838,12 +2851,17 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF, return Ctx.TTI.getMulAccReductionCost(false, Opcode, RedTy, SrcVecTy, Ctx.CostKind); - case ExpressionTypes::ExtMulAccReduction: + case ExpressionTypes::ExtNegatedMulAccReduction: + assert(Opcode == Instruction::Add && "Unexpected opcode"); + Opcode = Instruction::Sub; + LLVM_FALLTHROUGH; + case ExpressionTypes::ExtMulAccReduction: { return Ctx.TTI.getMulAccReductionCost( cast(ExpressionRecipes.front())->getOpcode() == Instruction::ZExt, Opcode, RedTy, SrcVecTy, Ctx.CostKind); } + } llvm_unreachable("Unknown VPExpressionRecipe::ExpressionTypes enum"); } @@ -2889,6 +2907,30 @@ void VPExpressionRecipe::print(raw_ostream &O, const Twine &Indent, O << ")"; break; } + case ExpressionTypes::ExtNegatedMulAccReduction: { + getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker); + O << " + reduce." + << Instruction::getOpcodeName( + RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind())) + << " (sub (0, mul"; + auto *Mul = cast(ExpressionRecipes[2]); + Mul->printFlags(O); + O << "("; + getOperand(0)->printAsOperand(O, SlotTracker); + auto *Ext0 = cast(ExpressionRecipes[0]); + O << " " << Instruction::getOpcodeName(Ext0->getOpcode()) << " to " + << *Ext0->getResultType() << "), ("; + getOperand(1)->printAsOperand(O, SlotTracker); + auto *Ext1 = cast(ExpressionRecipes[1]); + O << " " << Instruction::getOpcodeName(Ext1->getOpcode()) << " to " + << *Ext1->getResultType() << ")"; + if (Red->isConditional()) { + O << ", "; + Red->getCondOp()->printAsOperand(O, SlotTracker); + } + O << "))"; + break; + } case ExpressionTypes::MulAccReduction: case ExpressionTypes::ExtMulAccReduction: { getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker); @@ -3051,7 +3093,7 @@ void VPReplicateRecipe::execute(VPTransformState &State) { if (State.VF.isVector() && shouldPack()) { Value *WideValue = State.Lane->isFirstLane() - ? PoisonValue::get(VectorType::get(UI->getType(), State.VF)) + ? PoisonValue::get(toVectorizedTy(UI->getType(), State.VF)) : State.get(this); State.set(this, State.packScalarIntoVectorizedValue(this, WideValue, *State.Lane)); @@ -3068,6 +3110,62 @@ bool VPReplicateRecipe::shouldPack() const { }); } +/// Returns true if \p Ptr is a pointer computation for which the legacy cost +/// model computes a SCEV expression when computing the address cost. +static bool shouldUseAddressAccessSCEV(const VPValue *Ptr) { + auto *PtrR = Ptr->getDefiningRecipe(); + if (!PtrR || !((isa(PtrR) && + cast(PtrR)->getOpcode() == + Instruction::GetElementPtr) || + isa(PtrR) || + match(Ptr, m_GetElementPtr(m_VPValue(), m_VPValue())))) + return false; + + // We are looking for a GEP where all indices are either loop invariant or + // inductions. + for (VPValue *Opd : drop_begin(PtrR->operands())) { + if (!Opd->isDefinedOutsideLoopRegions() && + !isa(Opd)) + return false; + } + + return true; +} + +/// Returns true if \p V is used as part of the address of another load or +/// store. +static bool isUsedByLoadStoreAddress(const VPUser *V) { + SmallPtrSet Seen; + SmallVector WorkList = {V}; + + while (!WorkList.empty()) { + auto *Cur = dyn_cast(WorkList.pop_back_val()); + if (!Cur || !Seen.insert(Cur).second) + continue; + + for (VPUser *U : Cur->users()) { + if (auto *InterleaveR = dyn_cast(U)) + if (InterleaveR->getAddr() == Cur) + return true; + if (auto *RepR = dyn_cast(U)) { + if (RepR->getOpcode() == Instruction::Load && + RepR->getOperand(0) == Cur) + return true; + if (RepR->getOpcode() == Instruction::Store && + RepR->getOperand(1) == Cur) + return true; + } + if (auto *MemR = dyn_cast(U)) { + if (MemR->getAddr() == Cur && MemR->isConsecutive()) + return true; + } + } + + append_range(WorkList, cast(Cur)->users()); + } + return false; +} + InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, VPCostContext &Ctx) const { Instruction *UI = cast(getUnderlyingValue()); @@ -3175,21 +3273,58 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, } case Instruction::Load: case Instruction::Store: { - if (isSingleScalar()) { - bool IsLoad = UI->getOpcode() == Instruction::Load; - Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0)); - Type *ScalarPtrTy = Ctx.Types.inferScalarType(getOperand(IsLoad ? 0 : 1)); - const Align Alignment = getLoadStoreAlignment(UI); - unsigned AS = getLoadStoreAddressSpace(UI); - TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0)); - InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost( - UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo, UI); - return ScalarMemOpCost + Ctx.TTI.getAddressComputationCost( - ScalarPtrTy, nullptr, nullptr, Ctx.CostKind); - } + if (VF.isScalable() && !isSingleScalar()) + return InstructionCost::getInvalid(); + // TODO: See getMemInstScalarizationCost for how to handle replicating and // predicated cases. - break; + const VPRegionBlock *ParentRegion = getParent()->getParent(); + if (ParentRegion && ParentRegion->isReplicator()) + break; + + bool IsLoad = UI->getOpcode() == Instruction::Load; + const VPValue *PtrOp = getOperand(!IsLoad); + // TODO: Handle cases where we need to pass a SCEV to + // getAddressComputationCost. + if (shouldUseAddressAccessSCEV(PtrOp)) + break; + + Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0)); + Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp); + const Align Alignment = getLoadStoreAlignment(UI); + unsigned AS = getLoadStoreAddressSpace(UI); + TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0)); + InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost( + UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo); + + Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF); + + InstructionCost ScalarCost = + ScalarMemOpCost + Ctx.TTI.getAddressComputationCost( + PtrTy, &Ctx.SE, nullptr, Ctx.CostKind); + if (isSingleScalar()) + return ScalarCost; + + SmallVector OpsToScalarize; + Type *ResultTy = Type::getVoidTy(PtrTy->getContext()); + // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we + // don't assign scalarization overhead in general, if the target prefers + // vectorized addressing or the loaded value is used as part of an address + // of another load or store. + bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing(); + if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) { + bool EfficientVectorLoadStore = + Ctx.TTI.supportsEfficientVectorElementLoadStore(); + if (!(IsLoad && !PreferVectorizedAddressing) && + !(!IsLoad && EfficientVectorLoadStore)) + append_range(OpsToScalarize, operands()); + + if (!EfficientVectorLoadStore) + ResultTy = Ctx.Types.inferScalarType(this); + } + + return (ScalarCost * VF.getFixedValue()) + + Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true); } } @@ -3267,11 +3402,22 @@ void VPPredInstPHIRecipe::execute(VPTransformState &State) { // also do that packing, thereby "hoisting" the insert-element sequence. // Otherwise, a phi node for the scalar value is needed. if (State.hasVectorValue(getOperand(0))) { - Value *VectorValue = State.get(getOperand(0)); - InsertElementInst *IEI = cast(VectorValue); - PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); - VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. - VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. + auto *VecI = cast(State.get(getOperand(0))); + assert((isa(VecI)) && + "Packed operands must generate an insertelement or insertvalue"); + + // If VectorI is a struct, it will be a sequence like: + // %1 = insertvalue %unmodified, %x, 0 + // %2 = insertvalue %1, %y, 1 + // %VectorI = insertvalue %2, %z, 2 + // To get the unmodified vector we need to look through the chain. + if (auto *StructTy = dyn_cast(VecI->getType())) + for (unsigned I = 0; I < StructTy->getNumContainedTypes() - 1; I++) + VecI = cast(VecI->getOperand(0)); + + PHINode *VPhi = State.Builder.CreatePHI(VecI->getType(), 2); + VPhi->addIncoming(VecI->getOperand(0), PredicatingBB); // Unmodified vector. + VPhi->addIncoming(VecI, PredicatedBB); // New vector with inserted element. if (State.hasVectorValue(this)) State.reset(this, VPhi); else diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 84f02059743c3..f76777bc6cf2e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -40,7 +40,7 @@ using namespace llvm; using namespace VPlanPatternMatch; -cl::opt EnableWideActiveLaneMask( +static cl::opt EnableWideActiveLaneMask( "enable-wide-lane-mask", cl::init(false), cl::Hidden, cl::desc("Enable use of wide get active lane mask instructions")); @@ -1110,8 +1110,7 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { // x && !x -> 0 if (match(&R, m_LogicalAnd(m_VPValue(X), m_Not(m_Deferred(X))))) - return Def->replaceAllUsesWith(Plan->getOrAddLiveIn( - ConstantInt::getFalse(VPTypeAnalysis(*Plan).inferScalarType(Def)))); + return Def->replaceAllUsesWith(Plan->getFalse()); if (match(Def, m_Select(m_VPValue(), m_VPValue(X), m_Deferred(X)))) return Def->replaceAllUsesWith(X); @@ -2124,6 +2123,8 @@ static void licm(VPlan &Plan) { void VPlanTransforms::truncateToMinimalBitwidths( VPlan &Plan, const MapVector &MinBWs) { + if (Plan.hasScalarVFOnly()) + return; // Keep track of created truncates, so they can be re-used. Note that we // cannot use RAUW after creating a new truncate, as this would could make // other uses have different types for their operands, making them invalidly @@ -2195,7 +2196,8 @@ void VPlanTransforms::truncateToMinimalBitwidths( auto [ProcessedIter, IterIsEmpty] = ProcessedTruncs.try_emplace(Op); VPWidenCastRecipe *NewOp = IterIsEmpty - ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy) + ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy, + VPIRFlags::TruncFlagsTy(false, false)) : ProcessedIter->second; R.setOperand(Idx, NewOp); if (!IterIsEmpty) @@ -2703,6 +2705,8 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) { /// void VPlanTransforms::addExplicitVectorLength( VPlan &Plan, const std::optional &MaxSafeElements) { + if (Plan.hasScalarVFOnly()) + return; VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock(); auto *CanonicalIVPHI = Plan.getCanonicalIV(); @@ -2852,6 +2856,7 @@ void VPlanTransforms::replaceSymbolicStrides( return R->getParent()->getParent() || R->getParent() == Plan.getVectorLoopRegion()->getSinglePredecessor(); }; + ValueToSCEVMapTy RewriteMap; for (const SCEV *Stride : StridesMap.values()) { using namespace SCEVPatternMatch; auto *StrideV = cast(Stride)->getValue(); @@ -2879,6 +2884,22 @@ void VPlanTransforms::replaceSymbolicStrides( VPValue *CI = Plan.getOrAddLiveIn(ConstantInt::get(U->getType(), C)); StrideVPV->replaceUsesWithIf(CI, CanUseVersionedStride); } + RewriteMap[StrideV] = PSE.getSCEV(StrideV); + } + + for (VPRecipeBase &R : *Plan.getEntry()) { + auto *ExpSCEV = dyn_cast(&R); + if (!ExpSCEV) + continue; + const SCEV *ScevExpr = ExpSCEV->getSCEV(); + auto *NewSCEV = + SCEVParameterRewriter::rewrite(ScevExpr, *PSE.getSE(), RewriteMap); + if (NewSCEV != ScevExpr) { + VPValue *NewExp = vputils::getOrCreateVPValueForSCEVExpr(Plan, NewSCEV); + ExpSCEV->replaceAllUsesWith(NewExp); + if (Plan.getTripCount() == ExpSCEV) + Plan.resetTripCount(NewExp); + } } } @@ -3324,12 +3345,7 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy); } - [[maybe_unused]] auto *ConstStep = - ScalarStep->isLiveIn() - ? dyn_cast(ScalarStep->getLiveInIRValue()) - : nullptr; - assert(!ConstStep || ConstStep->getValue() != 1); - (void)ConstStep; + assert(!match(ScalarStep, m_One()) && "Expected non-unit scalar-step"); if (TypeInfo.inferScalarType(ScalarStep) != IVTy) { ScalarStep = Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy); @@ -3525,7 +3541,15 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, }; VPValue *VecOp = Red->getVecOp(); + VPRecipeBase *Sub = nullptr; VPValue *A, *B; + VPValue *Tmp = nullptr; + // Sub reductions could have a sub between the add reduction and vec op. + if (match(VecOp, + m_Binary(m_SpecificInt(0), m_VPValue(Tmp)))) { + Sub = VecOp->getDefiningRecipe(); + VecOp = Tmp; + } // Try to match reduce.add(mul(...)). if (match(VecOp, m_Mul(m_VPValue(A), m_VPValue(B)))) { auto *RecipeA = @@ -3542,12 +3566,21 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, IsMulAccValidAndClampRange(RecipeA->getOpcode() == Instruction::CastOps::ZExt, Mul, RecipeA, RecipeB, nullptr)) { + if (Sub) + return new VPExpressionRecipe(RecipeA, RecipeB, Mul, + cast(Sub), Red); return new VPExpressionRecipe(RecipeA, RecipeB, Mul, Red); } // Match reduce.add(mul). - if (IsMulAccValidAndClampRange(true, Mul, nullptr, nullptr, nullptr)) + // TODO: Add an expression type for this variant with a negated mul + if (!Sub && + IsMulAccValidAndClampRange(true, Mul, nullptr, nullptr, nullptr)) return new VPExpressionRecipe(Mul, Red); } + // TODO: Add an expression type for negated versions of other expression + // variants. + if (Sub) + return nullptr; // Match reduce.add(ext(mul(ext(A), ext(B)))). // All extend recipes must have same opcode or A == B // which can be transform to reduce.add(zext(mul(sext(A), sext(B)))). @@ -3566,13 +3599,13 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, Mul, Ext0, Ext1, Ext)) { auto *NewExt0 = new VPWidenCastRecipe( Ext0->getOpcode(), Ext0->getOperand(0), Ext->getResultType(), *Ext0, - Ext0->getDebugLoc()); + *Ext0, Ext0->getDebugLoc()); NewExt0->insertBefore(Ext0); VPWidenCastRecipe *NewExt1 = NewExt0; if (Ext0 != Ext1) { NewExt1 = new VPWidenCastRecipe(Ext1->getOpcode(), Ext1->getOperand(0), - Ext->getResultType(), *Ext1, + Ext->getResultType(), *Ext1, *Ext1, Ext1->getDebugLoc()); NewExt1->insertBefore(Ext1); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 69452a7e37572..4c65cb7d7a80d 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -117,6 +117,13 @@ struct VPlanTransforms { bool TailFolded, bool CheckNeededWithTailFolding, Loop *OrigLoop, const uint32_t *MinItersBypassWeights, DebugLoc DL, ScalarEvolution &SE); + /// Add a check to \p Plan to see if the epilogue vector loop should be + /// executed. + static void addMinimumVectorEpilogueIterationCheck( + VPlan &Plan, Value *TripCount, Value *VectorTripCount, + bool RequiresScalarEpilogue, ElementCount EpilogueVF, unsigned EpilogueUF, + unsigned MainLoopStep, unsigned EpilogueLoopStep, ScalarEvolution &SE); + /// Replace loops in \p Plan's flat CFG with VPRegionBlocks, turning \p Plan's /// flat CFG into a hierarchical CFG. LLVM_ABI_FOR_TEST static void createLoopRegions(VPlan &Plan); diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index 917aa01f8a926..059993043dcda 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -13,6 +13,7 @@ #include "llvm/Analysis/ScalarEvolutionExpressions.h" using namespace llvm; +using namespace llvm::VPlanPatternMatch; bool vputils::onlyFirstLaneUsed(const VPValue *Def) { return all_of(Def->users(), @@ -63,7 +64,6 @@ bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) { }; VPValue *A, *B; - using namespace VPlanPatternMatch; if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B), m_One()))) return B == Plan.getTripCount() && @@ -90,7 +90,6 @@ const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) { } bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) { - using namespace VPlanPatternMatch; // Live-ins are uniform. if (V->isLiveIn()) return true; @@ -141,11 +140,24 @@ VPBasicBlock *vputils::getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT) { return I == DepthFirst.end() ? nullptr : cast(*I); } +unsigned vputils::getVFScaleFactor(VPRecipeBase *R) { + if (!R) + return 1; + if (auto *RR = dyn_cast(R)) + return RR->getVFScaleFactor(); + if (auto *RR = dyn_cast(R)) + return RR->getVFScaleFactor(); + assert( + (!isa(R) || cast(R)->getOpcode() != + VPInstruction::ReductionStartVector) && + "getting scaling factor of reduction-start-vector not implemented yet"); + return 1; +} + std::optional vputils::getRecipesForUncountableExit(VPlan &Plan, SmallVectorImpl &Recipes, SmallVectorImpl &GEPs) { - using namespace llvm::VPlanPatternMatch; // Given a VPlan like the following (just including the recipes contributing // to loop control exiting here, not the actual work), we're looking to match // the recipes contributing to the uncountable exit condition comparison diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.h b/llvm/lib/Transforms/Vectorize/VPlanUtils.h index 33dd8efaec2db..0222b0aa81063 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.h +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.h @@ -102,6 +102,10 @@ bool isUniformAcrossVFsAndUFs(VPValue *V); /// exist. VPBasicBlock *getFirstLoopHeader(VPlan &Plan, VPDominatorTree &VPDT); +/// Get the VF scaling factor applied to the recipe's output, if the recipe has +/// one. +unsigned getVFScaleFactor(VPRecipeBase *R); + /// Returns the VPValue representing the uncountable exit comparison used by /// AnyOf if the recipes it depends on can be traced back to live-ins and /// the addresses (in GEP/PtrAdd form) of any (non-masked) load used in diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 92caa0b4e51d5..013ea2e883534 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -199,7 +199,8 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const { // EVLIVIncrement is only used by EVLIV & BranchOnCount. // Having more than two users is unexpected. using namespace llvm::VPlanPatternMatch; - if ((I->getNumUsers() != 1) && + if (I->getOpcode() != VPInstruction::Broadcast && + I->getNumUsers() != 1 && (I->getNumUsers() != 2 || none_of(I->users(), match_fn(m_BranchOnCount(m_Specific(I), m_VPValue()))))) { diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 0ef933f596604..d6eb00da11dc8 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -1031,6 +1031,16 @@ bool VectorCombine::foldBitOpOfCastConstant(Instruction &I) { // Create the cast operation directly to ensure we get a new instruction Instruction *NewCast = CastInst::Create(CastOpcode, NewOp, I.getType()); + // Preserve cast instruction flags + if (RHSFlags.NNeg) + NewCast->setNonNeg(); + if (RHSFlags.NUW) + NewCast->setHasNoUnsignedWrap(); + if (RHSFlags.NSW) + NewCast->setHasNoSignedWrap(); + + NewCast->andIRFlags(LHSCast); + // Insert the new instruction Value *Result = Builder.Insert(NewCast); @@ -2487,21 +2497,31 @@ bool VectorCombine::foldShuffleOfCastops(Instruction &I) { if (!match(&I, m_Shuffle(m_Value(V0), m_Value(V1), m_Mask(OldMask)))) return false; + // Check whether this is a binary shuffle. + bool IsBinaryShuffle = !isa(V1); + auto *C0 = dyn_cast(V0); auto *C1 = dyn_cast(V1); - if (!C0 || !C1) + if (!C0 || (IsBinaryShuffle && !C1)) return false; Instruction::CastOps Opcode = C0->getOpcode(); - if (C0->getSrcTy() != C1->getSrcTy()) + + // If this is allowed, foldShuffleOfCastops can get stuck in a loop + // with foldBitcastOfShuffle. Reject in favor of foldBitcastOfShuffle. + if (!IsBinaryShuffle && Opcode == Instruction::BitCast) return false; - // Handle shuffle(zext_nneg(x), sext(y)) -> sext(shuffle(x,y)) folds. - if (Opcode != C1->getOpcode()) { - if (match(C0, m_SExtLike(m_Value())) && match(C1, m_SExtLike(m_Value()))) - Opcode = Instruction::SExt; - else + if (IsBinaryShuffle) { + if (C0->getSrcTy() != C1->getSrcTy()) return false; + // Handle shuffle(zext_nneg(x), sext(y)) -> sext(shuffle(x,y)) folds. + if (Opcode != C1->getOpcode()) { + if (match(C0, m_SExtLike(m_Value())) && match(C1, m_SExtLike(m_Value()))) + Opcode = Instruction::SExt; + else + return false; + } } auto *ShuffleDstTy = dyn_cast(I.getType()); @@ -2544,23 +2564,31 @@ bool VectorCombine::foldShuffleOfCastops(Instruction &I) { InstructionCost CostC0 = TTI.getCastInstrCost(C0->getOpcode(), CastDstTy, CastSrcTy, TTI::CastContextHint::None, CostKind); - InstructionCost CostC1 = - TTI.getCastInstrCost(C1->getOpcode(), CastDstTy, CastSrcTy, - TTI::CastContextHint::None, CostKind); - InstructionCost OldCost = CostC0 + CostC1; - OldCost += - TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, ShuffleDstTy, - CastDstTy, OldMask, CostKind, 0, nullptr, {}, &I); - InstructionCost NewCost = - TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, NewShuffleDstTy, - CastSrcTy, NewMask, CostKind); + TargetTransformInfo::ShuffleKind ShuffleKind; + if (IsBinaryShuffle) + ShuffleKind = TargetTransformInfo::SK_PermuteTwoSrc; + else + ShuffleKind = TargetTransformInfo::SK_PermuteSingleSrc; + + InstructionCost OldCost = CostC0; + OldCost += TTI.getShuffleCost(ShuffleKind, ShuffleDstTy, CastDstTy, OldMask, + CostKind, 0, nullptr, {}, &I); + + InstructionCost NewCost = TTI.getShuffleCost(ShuffleKind, NewShuffleDstTy, + CastSrcTy, NewMask, CostKind); NewCost += TTI.getCastInstrCost(Opcode, ShuffleDstTy, NewShuffleDstTy, TTI::CastContextHint::None, CostKind); if (!C0->hasOneUse()) NewCost += CostC0; - if (!C1->hasOneUse()) - NewCost += CostC1; + if (IsBinaryShuffle) { + InstructionCost CostC1 = + TTI.getCastInstrCost(C1->getOpcode(), CastDstTy, CastSrcTy, + TTI::CastContextHint::None, CostKind); + OldCost += CostC1; + if (!C1->hasOneUse()) + NewCost += CostC1; + } LLVM_DEBUG(dbgs() << "Found a shuffle feeding two casts: " << I << "\n OldCost: " << OldCost << " vs NewCost: " << NewCost @@ -2568,14 +2596,20 @@ bool VectorCombine::foldShuffleOfCastops(Instruction &I) { if (NewCost > OldCost) return false; - Value *Shuf = Builder.CreateShuffleVector(C0->getOperand(0), - C1->getOperand(0), NewMask); + Value *Shuf; + if (IsBinaryShuffle) + Shuf = Builder.CreateShuffleVector(C0->getOperand(0), C1->getOperand(0), + NewMask); + else + Shuf = Builder.CreateShuffleVector(C0->getOperand(0), NewMask); + Value *Cast = Builder.CreateCast(Opcode, Shuf, ShuffleDstTy); // Intersect flags from the old casts. if (auto *NewInst = dyn_cast(Cast)) { NewInst->copyIRFlags(C0); - NewInst->andIRFlags(C1); + if (IsBinaryShuffle) + NewInst->andIRFlags(C1); } Worklist.pushValue(Shuf); @@ -4433,7 +4467,7 @@ bool VectorCombine::shrinkPhiOfShuffles(Instruction &I) { // Create new mask using difference of the two incoming masks. int MaskOffset = NewMask[0u]; - unsigned Index = (InputNumElements - MaskOffset) % InputNumElements; + unsigned Index = (InputNumElements + MaskOffset) % InputNumElements; NewMask.clear(); for (unsigned I = 0u; I < InputNumElements; ++I) { diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt index 839929204c064..6f98eaee241bc 100644 --- a/llvm/runtimes/CMakeLists.txt +++ b/llvm/runtimes/CMakeLists.txt @@ -507,10 +507,14 @@ if(build_runtimes) endif() # Forward user-provived system configuration to runtimes for requirement introspection. - # CMAKE_PREFIX_PATH is the search path for CMake packages. + # CMAKE_PREFIX_PATH is the search path for CMake packages. In order to pass through + # the command line interface, the CMake semicolon separator needs to be replaced + # with $ if(CMAKE_PREFIX_PATH) - list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}") + string(JOIN "$" escaped_cmake_prefix_path ${CMAKE_PREFIX_PATH}) + list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${escaped_cmake_prefix_path}") endif() + # CMAKE_PROGRAM_PATH is the search path for executables such as python. if(CMAKE_PROGRAM_PATH) list(APPEND extra_cmake_args "-DCMAKE_PROGRAM_PATH=${CMAKE_PROGRAM_PATH}") diff --git a/llvm/test/Analysis/CostModel/AMDGPU/canonicalize.ll b/llvm/test/Analysis/CostModel/AMDGPU/canonicalize.ll index 7ac4db3119210..904db9064a369 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/canonicalize.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/canonicalize.ll @@ -3,11 +3,13 @@ ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx803 -passes="print" -cost-kind=throughput 2>&1 -disable-output | FileCheck -check-prefixes=ALL,GFX8 %s ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -passes="print" -cost-kind=throughput 2>&1 -disable-output | FileCheck -check-prefixes=ALL,GFX9 %s ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes="print" -cost-kind=throughput 2>&1 -disable-output | FileCheck -check-prefixes=ALL,GFX10 %s +; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 -passes="print" -cost-kind=throughput 2>&1 -disable-output | FileCheck -check-prefixes=GFX1250 %s ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -passes="print" -cost-kind=code-size 2>&1 -disable-output | FileCheck -check-prefixes=ALL-SIZE,BASE-SIZE %s ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx803 -passes="print" -cost-kind=code-size 2>&1 -disable-output | FileCheck -check-prefixes=ALL-SIZE,GFX8-SIZE %s ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -passes="print" -cost-kind=code-size 2>&1 -disable-output | FileCheck -check-prefixes=ALL-SIZE,GFX9-SIZE %s ; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes="print" -cost-kind=code-size 2>&1 -disable-output | FileCheck -check-prefixes=ALL-SIZE,GFX10-SIZE %s +; RUN: opt < %s -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 -passes="print" -cost-kind=code-size 2>&1 -disable-output | FileCheck -check-prefixes=GFX1250-SIZE %s define void @canonicalize_f16() { ; BASE-LABEL: 'canonicalize_f16' @@ -141,6 +143,16 @@ define void @canonicalize_bf16() { ; GFX10-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v17bf16 = call <17 x bfloat> @llvm.canonicalize.v17bf16(<17 x bfloat> undef) ; GFX10-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; +; GFX1250-LABEL: 'canonicalize_bf16' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = call bfloat @llvm.canonicalize.bf16(bfloat undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = call <3 x bfloat> @llvm.canonicalize.v3bf16(<3 x bfloat> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = call <5 x bfloat> @llvm.canonicalize.v5bf16(<5 x bfloat> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = call <16 x bfloat> @llvm.canonicalize.v16bf16(<16 x bfloat> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = call <17 x bfloat> @llvm.canonicalize.v17bf16(<17 x bfloat> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; ; BASE-SIZE-LABEL: 'canonicalize_bf16' ; BASE-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = call bfloat @llvm.canonicalize.bf16(bfloat undef) ; BASE-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> undef) @@ -181,6 +193,15 @@ define void @canonicalize_bf16() { ; GFX10-SIZE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v17bf16 = call <17 x bfloat> @llvm.canonicalize.v17bf16(<17 x bfloat> undef) ; GFX10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; +; GFX1250-SIZE-LABEL: 'canonicalize_bf16' +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = call bfloat @llvm.canonicalize.bf16(bfloat undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = call <3 x bfloat> @llvm.canonicalize.v3bf16(<3 x bfloat> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = call <5 x bfloat> @llvm.canonicalize.v5bf16(<5 x bfloat> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = call <16 x bfloat> @llvm.canonicalize.v16bf16(<16 x bfloat> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = call <17 x bfloat> @llvm.canonicalize.v17bf16(<17 x bfloat> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void %bf16 = call bfloat @llvm.canonicalize.bf16(bfloat undef) #1 %v2bf16 = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> undef) #1 %v3bf16 = call <3 x bfloat> @llvm.canonicalize.v3bf16(<3 x bfloat> undef) #1 @@ -203,6 +224,17 @@ define void @canonicalize_f32() { ; ALL-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v16f32 = call <16 x float> @llvm.canonicalize.v16f32(<16 x float> undef) ; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; +; GFX1250-LABEL: 'canonicalize_f32' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = call float @llvm.canonicalize.f32(float undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = call <3 x float> @llvm.canonicalize.v3f32(<3 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = call <5 x float> @llvm.canonicalize.v5f32(<5 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = call <8 x float> @llvm.canonicalize.v8f32(<8 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = call <9 x float> @llvm.canonicalize.v9f32(<9 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32 = call <16 x float> @llvm.canonicalize.v16f32(<16 x float> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; ; ALL-SIZE-LABEL: 'canonicalize_f32' ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = call float @llvm.canonicalize.f32(float undef) ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> undef) @@ -214,6 +246,16 @@ define void @canonicalize_f32() { ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v16f32 = call <16 x float> @llvm.canonicalize.v16f32(<16 x float> undef) ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; +; GFX1250-SIZE-LABEL: 'canonicalize_f32': +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = call float @llvm.canonicalize.f32(float undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = call <3 x float> @llvm.canonicalize.v3f32(<3 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = call <4 x float> @llvm.canonicalize.v4f32(<4 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = call <5 x float> @llvm.canonicalize.v5f32(<5 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = call <8 x float> @llvm.canonicalize.v8f32(<8 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = call <9 x float> @llvm.canonicalize.v9f32(<9 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32 = call <16 x float> @llvm.canonicalize.v16f32(<16 x float> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void %f32 = call float @llvm.canonicalize.f32(float undef) #1 %v2f32 = call <2 x float> @llvm.canonicalize.v2f32(<2 x float> undef) #1 %v3f32 = call <3 x float> @llvm.canonicalize.v3f32(<3 x float> undef) #1 @@ -236,6 +278,16 @@ define void @canonicalize_f64() { ; ALL-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %v16f64 = call <16 x double> @llvm.canonicalize.v16f64(<16 x double> undef) ; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; +; GFX1250-LABEL: 'canonicalize_f64' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = call double @llvm.canonicalize.f64(double undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = call <3 x double> @llvm.canonicalize.v3f64(<3 x double> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = call <4 x double> @llvm.canonicalize.v4f64(<4 x double> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = call <5 x double> @llvm.canonicalize.v5f64(<5 x double> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v8f64 = call <8 x double> @llvm.canonicalize.v8f64(<8 x double> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %v16f64 = call <16 x double> @llvm.canonicalize.v16f64(<16 x double> undef) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; ; ALL-SIZE-LABEL: 'canonicalize_f64' ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = call double @llvm.canonicalize.f64(double undef) ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> undef) @@ -245,6 +297,16 @@ define void @canonicalize_f64() { ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v8f64 = call <8 x double> @llvm.canonicalize.v8f64(<8 x double> undef) ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %v16f64 = call <16 x double> @llvm.canonicalize.v16f64(<16 x double> undef) ; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; +; GFX1250-SIZE-LABEL: 'canonicalize_f64' +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = call double @llvm.canonicalize.f64(double undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = call <3 x double> @llvm.canonicalize.v3f64(<3 x double> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = call <4 x double> @llvm.canonicalize.v4f64(<4 x double> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = call <5 x double> @llvm.canonicalize.v5f64(<5 x double> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v8f64 = call <8 x double> @llvm.canonicalize.v8f64(<8 x double> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %v16f64 = call <16 x double> @llvm.canonicalize.v16f64(<16 x double> undef) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; %f64 = call double @llvm.canonicalize.f64(double undef) #1 %v2f64 = call <2 x double> @llvm.canonicalize.v2f64(<2 x double> undef) #1 @@ -255,9 +317,3 @@ define void @canonicalize_f64() { %v16f64 = call <16 x double> @llvm.canonicalize.v16f64(<16 x double> undef) #1 ret void } - - - - - - diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll b/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll index 55994d865fa6c..9b1495b35a89d 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fadd.ll @@ -2,159 +2,190 @@ ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF16,GFX90A-FASTF64 %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32,FASTF16,FASTF64 %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32,SLOWF64 %s +; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF16-SIZE,GFX90A-FASTF64-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32-SIZE,FASTF16-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32-SIZE,SLOWF64-SIZE %s +; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250-SIZE %s ; END. define amdgpu_kernel void @fadd_f32() #0 { ; GFX90A-FASTF64-LABEL: 'fadd_f32' -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fadd <2 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fadd <3 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fadd <4 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fadd <5 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fadd <8 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fadd <9 x float> undef, undef +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fadd <2 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fadd <3 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fadd <4 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fadd <5 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fadd <8 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fadd <9 x float> poison, poison ; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; NOPACKEDF32-LABEL: 'fadd_f32' -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fadd <2 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fadd <3 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fadd <4 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fadd <5 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fadd <8 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fadd <9 x float> undef, undef +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fadd <2 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fadd <3 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fadd <4 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fadd <5 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fadd <8 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fadd <9 x float> poison, poison ; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX90A-FASTF64-SIZE-LABEL: 'fadd_f32' -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fadd <2 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fadd <3 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fadd <4 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fadd <5 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fadd <8 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fadd <9 x float> undef, undef +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fadd <2 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fadd <3 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fadd <4 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fadd <5 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fadd <8 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fadd <9 x float> poison, poison ; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; NOPACKEDF32-SIZE-LABEL: 'fadd_f32' -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fadd <2 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fadd <3 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fadd <4 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fadd <5 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fadd <8 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fadd <9 x float> undef, undef +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fadd float poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fadd <2 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fadd <3 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fadd <4 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fadd <5 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fadd <8 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fadd <9 x float> poison, poison ; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f32 = fadd float undef, undef - %v2f32 = fadd <2 x float> undef, undef - %v3f32 = fadd <3 x float> undef, undef - %v4f32 = fadd <4 x float> undef, undef - %v5f32 = fadd <5 x float> undef, undef - %v8f32 = fadd <8 x float> undef, undef - %v9f32 = fadd <9 x float> undef, undef + %f32 = fadd float poison, poison + %v2f32 = fadd <2 x float> poison, poison + %v3f32 = fadd <3 x float> poison, poison + %v4f32 = fadd <4 x float> poison, poison + %v5f32 = fadd <5 x float> poison, poison + %v8f32 = fadd <8 x float> poison, poison + %v9f32 = fadd <9 x float> poison, poison ret void } define amdgpu_kernel void @fadd_f64() #0 { ; GFX90A-FASTF64-LABEL: 'fadd_f64' -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fadd double undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fadd <2 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fadd <3 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fadd <4 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fadd <5 x double> undef, undef +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fadd double poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fadd <2 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fadd <3 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fadd <4 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fadd <5 x double> poison, poison ; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FASTF64-LABEL: 'fadd_f64' -; FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fadd double undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fadd <2 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fadd <3 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fadd <4 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fadd <5 x double> undef, undef +; FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fadd double poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fadd <2 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fadd <3 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fadd <4 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fadd <5 x double> poison, poison ; FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOWF64-LABEL: 'fadd_f64' -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = fadd double undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = fadd <2 x double> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = fadd <3 x double> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = fadd <4 x double> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = fadd <5 x double> undef, undef +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = fadd double poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = fadd <2 x double> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = fadd <3 x double> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = fadd <4 x double> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = fadd <5 x double> poison, poison ; SLOWF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX90A-FASTF64-SIZE-LABEL: 'fadd_f64' -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fadd double undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fadd <2 x double> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fadd <3 x double> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fadd <4 x double> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fadd <5 x double> undef, undef +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fadd double poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fadd <2 x double> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fadd <3 x double> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fadd <4 x double> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fadd <5 x double> poison, poison ; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; NOPACKEDF32-SIZE-LABEL: 'fadd_f64' -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fadd double undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fadd <2 x double> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fadd <3 x double> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fadd <4 x double> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fadd <5 x double> undef, undef +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fadd double poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fadd <2 x double> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fadd <3 x double> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fadd <4 x double> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fadd <5 x double> poison, poison ; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f64 = fadd double undef, undef - %v2f64 = fadd <2 x double> undef, undef - %v3f64 = fadd <3 x double> undef, undef - %v4f64 = fadd <4 x double> undef, undef - %v5f64 = fadd <5 x double> undef, undef + %f64 = fadd double poison, poison + %v2f64 = fadd <2 x double> poison, poison + %v3f64 = fadd <3 x double> poison, poison + %v4f64 = fadd <4 x double> poison, poison + %v5f64 = fadd <5 x double> poison, poison ret void } define amdgpu_kernel void @fadd_f16() #0 { ; FASTF16-LABEL: 'fadd_f16' -; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fadd <2 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fadd <3 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fadd <4 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fadd <5 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fadd <16 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fadd <17 x half> undef, undef +; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fadd <2 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fadd <3 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fadd <4 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fadd <5 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fadd <16 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fadd <17 x half> poison, poison ; FASTF16-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOWF64-LABEL: 'fadd_f16' -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fadd <2 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fadd <3 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fadd <4 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fadd <5 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fadd <16 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fadd <17 x half> undef, undef +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fadd <2 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fadd <3 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fadd <4 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fadd <5 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fadd <16 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fadd <17 x half> poison, poison ; SLOWF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FASTF16-SIZE-LABEL: 'fadd_f16' -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fadd <2 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fadd <3 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fadd <4 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fadd <5 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fadd <16 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fadd <17 x half> undef, undef +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fadd <2 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fadd <3 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fadd <4 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fadd <5 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fadd <16 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fadd <17 x half> poison, poison ; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SLOWF64-SIZE-LABEL: 'fadd_f16' -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fadd <2 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fadd <3 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fadd <4 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fadd <5 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fadd <16 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fadd <17 x half> undef, undef +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fadd half poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fadd <2 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fadd <3 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fadd <4 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fadd <5 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fadd <16 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fadd <17 x half> poison, poison ; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f16 = fadd half undef, undef - %v2f16 = fadd <2 x half> undef, undef - %v3f16 = fadd <3 x half> undef, undef - %v4f16 = fadd <4 x half> undef, undef - %v5f16 = fadd <5 x half> undef, undef - %v16f16 = fadd <16 x half> undef, undef - %v17f16 = fadd <17 x half> undef, undef + %f16 = fadd half poison, poison + %v2f16 = fadd <2 x half> poison, poison + %v3f16 = fadd <3 x half> poison, poison + %v4f16 = fadd <4 x half> poison, poison + %v5f16 = fadd <5 x half> poison, poison + %v16f16 = fadd <16 x half> poison, poison + %v17f16 = fadd <17 x half> poison, poison + ret void +} + +define amdgpu_kernel void @fadd_bf16() #0 { +; GFX1250-LABEL: 'fadd_bf16' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fadd bfloat poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fadd <2 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = fadd <3 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = fadd <4 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = fadd <5 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = fadd <16 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = fadd <17 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; GFX1250-SIZE-LABEL: 'fadd_bf16' +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fadd bfloat poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fadd <2 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = fadd <3 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = fadd <4 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = fadd <5 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = fadd <16 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = fadd <17 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void + %bf16 = fadd bfloat poison, poison + %v2bf16 = fadd <2 x bfloat> poison, poison + %v3bf16 = fadd <3 x bfloat> poison, poison + %v4bf16 = fadd <4 x bfloat> poison, poison + %v5bf16 = fadd <5 x bfloat> poison, poison + %v16bf16 = fadd <16 x bfloat> poison, poison + %v17bf16 = fadd <17 x bfloat> poison, poison ret void } diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fma.ll b/llvm/test/Analysis/CostModel/AMDGPU/fma.ll index 2ff9d4f7f5e38..f34ee31bcf4ce 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fma.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fma.ll @@ -2,166 +2,186 @@ ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1010 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FAST %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FAST %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck -check-prefixes=SLOW %s +; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1010 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FAST-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FAST-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck -check-prefix=SLOW-SIZE %s +; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250-SIZE %s define void @fma_f16() { ; FAST-LABEL: 'fma_f16' -; FAST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = call half @llvm.fma.f16(half undef, half undef, half undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> undef, <3 x half> undef, <3 x half> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> undef, <5 x half> undef, <5 x half> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> undef, <17 x half> undef, <17 x half> undef) +; FAST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = call half @llvm.fma.f16(half poison, half poison, half poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> poison, <3 x half> poison, <3 x half> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> poison, <5 x half> poison, <5 x half> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> poison, <17 x half> poison, <17 x half> poison) ; FAST-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-LABEL: 'fma_f16' -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f16 = call half @llvm.fma.f16(half undef, half undef, half undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> undef, <3 x half> undef, <3 x half> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> undef, <5 x half> undef, <5 x half> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> undef, <17 x half> undef, <17 x half> undef) +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f16 = call half @llvm.fma.f16(half poison, half poison, half poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> poison, <3 x half> poison, <3 x half> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> poison, <5 x half> poison, <5 x half> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> poison, <17 x half> poison, <17 x half> poison) ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FAST-SIZE-LABEL: 'fma_f16' -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = call half @llvm.fma.f16(half undef, half undef, half undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> undef, <3 x half> undef, <3 x half> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> undef, <5 x half> undef, <5 x half> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> undef, <17 x half> undef, <17 x half> undef) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = call half @llvm.fma.f16(half poison, half poison, half poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> poison, <3 x half> poison, <3 x half> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> poison, <5 x half> poison, <5 x half> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> poison, <17 x half> poison, <17 x half> poison) ; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SLOW-SIZE-LABEL: 'fma_f16' -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f16 = call half @llvm.fma.f16(half undef, half undef, half undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> undef, <3 x half> undef, <3 x half> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> undef, <5 x half> undef, <5 x half> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> undef, <17 x half> undef, <17 x half> undef) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f16 = call half @llvm.fma.f16(half poison, half poison, half poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> poison, <3 x half> poison, <3 x half> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> poison, <5 x half> poison, <5 x half> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> poison, <17 x half> poison, <17 x half> poison) ; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f16 = call half @llvm.fma.f16(half undef, half undef, half undef) - %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) - %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> undef, <3 x half> undef, <3 x half> undef) - %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) - %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> undef, <5 x half> undef, <5 x half> undef) - %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) - %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> undef, <17 x half> undef, <17 x half> undef) + %f16 = call half @llvm.fma.f16(half poison, half poison, half poison) + %v2f16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison) + %v3f16 = call <3 x half> @llvm.fma.v3f16(<3 x half> poison, <3 x half> poison, <3 x half> poison) + %v4f16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison) + %v5f16 = call <5 x half> @llvm.fma.v5f16(<5 x half> poison, <5 x half> poison, <5 x half> poison) + %v16f16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison) + %v17f16 = call <17 x half> @llvm.fma.v17f16(<17 x half> poison, <17 x half> poison, <17 x half> poison) ret void } define void @fma_bf16() { ; FAST-LABEL: 'fma_bf16' -; FAST-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> undef, <3 x bfloat> undef, <3 x bfloat> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> undef, <5 x bfloat> undef, <5 x bfloat> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef) -; FAST-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> undef, <17 x bfloat> undef, <17 x bfloat> undef) +; FAST-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) +; FAST-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) ; FAST-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-LABEL: 'fma_bf16' -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> undef, <3 x bfloat> undef, <3 x bfloat> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> undef, <5 x bfloat> undef, <5 x bfloat> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> undef, <17 x bfloat> undef, <17 x bfloat> undef) +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 136 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FAST-SIZE-LABEL: 'fma_bf16' -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> undef, <3 x bfloat> undef, <3 x bfloat> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> undef, <5 x bfloat> undef, <5 x bfloat> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef) -; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> undef, <17 x bfloat> undef, <17 x bfloat> undef) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) +; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) ; FAST-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SLOW-SIZE-LABEL: 'fma_bf16' -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> undef, <3 x bfloat> undef, <3 x bfloat> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> undef, <5 x bfloat> undef, <5 x bfloat> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> undef, <17 x bfloat> undef, <17 x bfloat> undef) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 68 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) ; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %bf16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef) - %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef) - %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> undef, <3 x bfloat> undef, <3 x bfloat> undef) - %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef) - %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> undef, <5 x bfloat> undef, <5 x bfloat> undef) - %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef) - %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> undef, <17 x bfloat> undef, <17 x bfloat> undef) +; GFX1250-LABEL: 'fma_bf16' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; GFX1250-SIZE-LABEL: 'fma_bf16' +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void + %bf16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison) + %v2bf16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison) + %v3bf16 = call <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat> poison, <3 x bfloat> poison, <3 x bfloat> poison) + %v4bf16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison) + %v5bf16 = call <5 x bfloat> @llvm.fma.v5bf16(<5 x bfloat> poison, <5 x bfloat> poison, <5 x bfloat> poison) + %v16bf16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison) + %v17bf16 = call <17 x bfloat> @llvm.fma.v17bf16(<17 x bfloat> poison, <17 x bfloat> poison, <17 x bfloat> poison) ret void } define void @fma_f32() { ; SLOW-LABEL: 'fma_f32' -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f32 = call float @llvm.fma.f32(float undef, float undef, float undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f32 = call <3 x float> @llvm.fma.v3f32(<3 x float> undef, <3 x float> undef, <3 x float> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %v5f32 = call <5 x float> @llvm.fma.v5f32(<5 x float> undef, <5 x float> undef, <5 x float> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v8f32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %v9f32 = call <9 x float> @llvm.fma.v9f32(<9 x float> undef, <9 x float> undef, <9 x float> undef) +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f32 = call float @llvm.fma.f32(float poison, float poison, float poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f32 = call <2 x float> @llvm.fma.v2f32(<2 x float> poison, <2 x float> poison, <2 x float> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f32 = call <3 x float> @llvm.fma.v3f32(<3 x float> poison, <3 x float> poison, <3 x float> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f32 = call <4 x float> @llvm.fma.v4f32(<4 x float> poison, <4 x float> poison, <4 x float> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %v5f32 = call <5 x float> @llvm.fma.v5f32(<5 x float> poison, <5 x float> poison, <5 x float> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v8f32 = call <8 x float> @llvm.fma.v8f32(<8 x float> poison, <8 x float> poison, <8 x float> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %v9f32 = call <9 x float> @llvm.fma.v9f32(<9 x float> poison, <9 x float> poison, <9 x float> poison) ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-SIZE-LABEL: 'fma_f32' -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f32 = call float @llvm.fma.f32(float undef, float undef, float undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f32 = call <3 x float> @llvm.fma.v3f32(<3 x float> undef, <3 x float> undef, <3 x float> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v5f32 = call <5 x float> @llvm.fma.v5f32(<5 x float> undef, <5 x float> undef, <5 x float> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8f32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v9f32 = call <9 x float> @llvm.fma.v9f32(<9 x float> undef, <9 x float> undef, <9 x float> undef) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f32 = call float @llvm.fma.f32(float poison, float poison, float poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f32 = call <2 x float> @llvm.fma.v2f32(<2 x float> poison, <2 x float> poison, <2 x float> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f32 = call <3 x float> @llvm.fma.v3f32(<3 x float> poison, <3 x float> poison, <3 x float> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f32 = call <4 x float> @llvm.fma.v4f32(<4 x float> poison, <4 x float> poison, <4 x float> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v5f32 = call <5 x float> @llvm.fma.v5f32(<5 x float> poison, <5 x float> poison, <5 x float> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8f32 = call <8 x float> @llvm.fma.v8f32(<8 x float> poison, <8 x float> poison, <8 x float> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %v9f32 = call <9 x float> @llvm.fma.v9f32(<9 x float> poison, <9 x float> poison, <9 x float> poison) ; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f32 = call float @llvm.fma.f32(float undef, float undef, float undef) - %v2f32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) - %v3f32 = call <3 x float> @llvm.fma.v3f32(<3 x float> undef, <3 x float> undef, <3 x float> undef) - %v4f32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) - %v5f32 = call <5 x float> @llvm.fma.v5f32(<5 x float> undef, <5 x float> undef, <5 x float> undef) - %v8f32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) - %v9f32 = call <9 x float> @llvm.fma.v9f32(<9 x float> undef, <9 x float> undef, <9 x float> undef) + %f32 = call float @llvm.fma.f32(float poison, float poison, float poison) + %v2f32 = call <2 x float> @llvm.fma.v2f32(<2 x float> poison, <2 x float> poison, <2 x float> poison) + %v3f32 = call <3 x float> @llvm.fma.v3f32(<3 x float> poison, <3 x float> poison, <3 x float> poison) + %v4f32 = call <4 x float> @llvm.fma.v4f32(<4 x float> poison, <4 x float> poison, <4 x float> poison) + %v5f32 = call <5 x float> @llvm.fma.v5f32(<5 x float> poison, <5 x float> poison, <5 x float> poison) + %v8f32 = call <8 x float> @llvm.fma.v8f32(<8 x float> poison, <8 x float> poison, <8 x float> poison) + %v9f32 = call <9 x float> @llvm.fma.v9f32(<9 x float> poison, <9 x float> poison, <9 x float> poison) ret void } define void @fma_f64() { ; SLOW-LABEL: 'fma_f64' -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = call double @llvm.fma.f64(double undef, double undef, double undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = call <3 x double> @llvm.fma.v3f64(<3 x double> undef, <3 x double> undef, <3 x double> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) -; SLOW-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = call <5 x double> @llvm.fma.v5f64(<5 x double> undef, <5 x double> undef, <5 x double> undef) +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = call double @llvm.fma.f64(double poison, double poison, double poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = call <2 x double> @llvm.fma.v2f64(<2 x double> poison, <2 x double> poison, <2 x double> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = call <3 x double> @llvm.fma.v3f64(<3 x double> poison, <3 x double> poison, <3 x double> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = call <4 x double> @llvm.fma.v4f64(<4 x double> poison, <4 x double> poison, <4 x double> poison) +; SLOW-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = call <5 x double> @llvm.fma.v5f64(<5 x double> poison, <5 x double> poison, <5 x double> poison) ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-SIZE-LABEL: 'fma_f64' -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = call double @llvm.fma.f64(double undef, double undef, double undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = call <3 x double> @llvm.fma.v3f64(<3 x double> undef, <3 x double> undef, <3 x double> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = call <5 x double> @llvm.fma.v5f64(<5 x double> undef, <5 x double> undef, <5 x double> undef) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = call double @llvm.fma.f64(double poison, double poison, double poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = call <2 x double> @llvm.fma.v2f64(<2 x double> poison, <2 x double> poison, <2 x double> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = call <3 x double> @llvm.fma.v3f64(<3 x double> poison, <3 x double> poison, <3 x double> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = call <4 x double> @llvm.fma.v4f64(<4 x double> poison, <4 x double> poison, <4 x double> poison) +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = call <5 x double> @llvm.fma.v5f64(<5 x double> poison, <5 x double> poison, <5 x double> poison) ; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f64 = call double @llvm.fma.f64(double undef, double undef, double undef) - %v2f64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) - %v3f64 = call <3 x double> @llvm.fma.v3f64(<3 x double> undef, <3 x double> undef, <3 x double> undef) - %v4f64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) - %v5f64 = call <5 x double> @llvm.fma.v5f64(<5 x double> undef, <5 x double> undef, <5 x double> undef) + %f64 = call double @llvm.fma.f64(double poison, double poison, double poison) + %v2f64 = call <2 x double> @llvm.fma.v2f64(<2 x double> poison, <2 x double> poison, <2 x double> poison) + %v3f64 = call <3 x double> @llvm.fma.v3f64(<3 x double> poison, <3 x double> poison, <3 x double> poison) + %v4f64 = call <4 x double> @llvm.fma.v4f64(<4 x double> poison, <4 x double> poison, <4 x double> poison) + %v5f64 = call <5 x double> @llvm.fma.v5f64(<5 x double> poison, <5 x double> poison, <5 x double> poison) ret void } diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll index adc4eea309a58..c0b9cda23ea04 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fmul.ll @@ -2,210 +2,231 @@ ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=GFX9,GFX90A-FASTF64 %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=GFX9,F32,FASTF64 %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=F32,SLOW %s +; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=GFX9-SIZE,GFX90A-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=SIZE,GFX9-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SIZE,SLOW-SIZE %s +; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250-SIZE %s ; END. define amdgpu_kernel void @fmul_f32() #0 { ; GFX90A-FASTF64-LABEL: 'fmul_f32' -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fmul <2 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fmul <3 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fmul <4 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fmul <5 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fmul <8 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fmul <9 x float> undef, undef +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fmul <2 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fmul <3 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fmul <4 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fmul <5 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fmul <8 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fmul <9 x float> poison, poison ; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; F32-LABEL: 'fmul_f32' -; F32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float undef, undef -; F32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fmul <2 x float> undef, undef -; F32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fmul <3 x float> undef, undef -; F32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fmul <4 x float> undef, undef -; F32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fmul <5 x float> undef, undef -; F32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fmul <8 x float> undef, undef -; F32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fmul <9 x float> undef, undef +; F32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float poison, poison +; F32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fmul <2 x float> poison, poison +; F32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fmul <3 x float> poison, poison +; F32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fmul <4 x float> poison, poison +; F32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fmul <5 x float> poison, poison +; F32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fmul <8 x float> poison, poison +; F32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fmul <9 x float> poison, poison ; F32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX90A-SIZE-LABEL: 'fmul_f32' -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fmul <2 x float> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fmul <3 x float> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fmul <4 x float> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fmul <5 x float> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fmul <8 x float> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fmul <9 x float> undef, undef +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fmul <2 x float> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fmul <3 x float> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fmul <4 x float> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fmul <5 x float> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fmul <8 x float> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fmul <9 x float> poison, poison ; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SIZE-LABEL: 'fmul_f32' -; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fmul <2 x float> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fmul <3 x float> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fmul <4 x float> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fmul <5 x float> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fmul <8 x float> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fmul <9 x float> undef, undef +; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fmul float poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fmul <2 x float> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fmul <3 x float> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fmul <4 x float> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fmul <5 x float> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fmul <8 x float> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fmul <9 x float> poison, poison ; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f32 = fmul float undef, undef - %v2f32 = fmul <2 x float> undef, undef - %v3f32 = fmul <3 x float> undef, undef - %v4f32 = fmul <4 x float> undef, undef - %v5f32 = fmul <5 x float> undef, undef - %v8f32 = fmul <8 x float> undef, undef - %v9f32 = fmul <9 x float> undef, undef + %f32 = fmul float poison, poison + %v2f32 = fmul <2 x float> poison, poison + %v3f32 = fmul <3 x float> poison, poison + %v4f32 = fmul <4 x float> poison, poison + %v5f32 = fmul <5 x float> poison, poison + %v8f32 = fmul <8 x float> poison, poison + %v9f32 = fmul <9 x float> poison, poison ret void } define amdgpu_kernel void @fmul_f64() #0 { ; GFX90A-FASTF64-LABEL: 'fmul_f64' -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fmul double undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fmul <2 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fmul <3 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fmul <4 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fmul <5 x double> undef, undef +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fmul double poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fmul <2 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fmul <3 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fmul <4 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fmul <5 x double> poison, poison ; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FASTF64-LABEL: 'fmul_f64' -; FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fmul double undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fmul <2 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fmul <3 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fmul <4 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fmul <5 x double> undef, undef +; FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fmul double poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fmul <2 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fmul <3 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fmul <4 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fmul <5 x double> poison, poison ; FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-LABEL: 'fmul_f64' -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = fmul double undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = fmul <2 x double> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = fmul <3 x double> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = fmul <4 x double> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = fmul <5 x double> undef, undef +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = fmul double poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = fmul <2 x double> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = fmul <3 x double> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = fmul <4 x double> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = fmul <5 x double> poison, poison ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX90A-SIZE-LABEL: 'fmul_f64' -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fmul double undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fmul <2 x double> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fmul <3 x double> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fmul <4 x double> undef, undef -; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fmul <5 x double> undef, undef +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fmul double poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fmul <2 x double> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fmul <3 x double> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fmul <4 x double> poison, poison +; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fmul <5 x double> poison, poison ; GFX90A-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SIZE-LABEL: 'fmul_f64' -; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fmul double undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fmul <2 x double> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fmul <3 x double> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fmul <4 x double> undef, undef -; SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fmul <5 x double> undef, undef +; SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fmul double poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fmul <2 x double> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fmul <3 x double> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fmul <4 x double> poison, poison +; SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fmul <5 x double> poison, poison ; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f64 = fmul double undef, undef - %v2f64 = fmul <2 x double> undef, undef - %v3f64 = fmul <3 x double> undef, undef - %v4f64 = fmul <4 x double> undef, undef - %v5f64 = fmul <5 x double> undef, undef + %f64 = fmul double poison, poison + %v2f64 = fmul <2 x double> poison, poison + %v3f64 = fmul <3 x double> poison, poison + %v4f64 = fmul <4 x double> poison, poison + %v5f64 = fmul <5 x double> poison, poison ret void } define amdgpu_kernel void @fmul_f16() #0 { ; GFX9-LABEL: 'fmul_f16' -; GFX9-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fmul <2 x half> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fmul <3 x half> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fmul <4 x half> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fmul <5 x half> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fmul <16 x half> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fmul <17 x half> undef, undef +; GFX9-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fmul <2 x half> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fmul <3 x half> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fmul <4 x half> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fmul <5 x half> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fmul <16 x half> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fmul <17 x half> poison, poison ; GFX9-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-LABEL: 'fmul_f16' -; SLOW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fmul <2 x half> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fmul <3 x half> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fmul <4 x half> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fmul <5 x half> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fmul <16 x half> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fmul <17 x half> undef, undef +; SLOW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fmul <2 x half> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fmul <3 x half> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fmul <4 x half> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fmul <5 x half> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fmul <16 x half> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fmul <17 x half> poison, poison ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX9-SIZE-LABEL: 'fmul_f16' -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fmul <2 x half> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fmul <3 x half> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fmul <4 x half> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fmul <5 x half> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fmul <16 x half> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fmul <17 x half> undef, undef +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fmul <2 x half> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fmul <3 x half> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fmul <4 x half> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fmul <5 x half> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fmul <16 x half> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fmul <17 x half> poison, poison ; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SLOW-SIZE-LABEL: 'fmul_f16' -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fmul <2 x half> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fmul <3 x half> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fmul <4 x half> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fmul <5 x half> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fmul <16 x half> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fmul <17 x half> undef, undef +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fmul half poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fmul <2 x half> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fmul <3 x half> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fmul <4 x half> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fmul <5 x half> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fmul <16 x half> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fmul <17 x half> poison, poison ; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f16 = fmul half undef, undef - %v2f16 = fmul <2 x half> undef, undef - %v3f16 = fmul <3 x half> undef, undef - %v4f16 = fmul <4 x half> undef, undef - %v5f16 = fmul <5 x half> undef, undef - %v16f16 = fmul <16 x half> undef, undef - %v17f16 = fmul <17 x half> undef, undef + %f16 = fmul half poison, poison + %v2f16 = fmul <2 x half> poison, poison + %v3f16 = fmul <3 x half> poison, poison + %v4f16 = fmul <4 x half> poison, poison + %v5f16 = fmul <5 x half> poison, poison + %v16f16 = fmul <16 x half> poison, poison + %v17f16 = fmul <17 x half> poison, poison ret void } define amdgpu_kernel void @fmul_bf16() #0 { ; GFX9-LABEL: 'fmul_bf16' -; GFX9-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %bf16 = fmul bfloat undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v2bf16 = fmul <2 x bfloat> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v3bf16 = fmul <3 x bfloat> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %v4bf16 = fmul <4 x bfloat> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v5bf16 = fmul <5 x bfloat> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %v16bf16 = fmul <16 x bfloat> undef, undef -; GFX9-NEXT: Cost Model: Found an estimated cost of 50 for instruction: %v17bf16 = fmul <17 x bfloat> undef, undef +; GFX9-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = fmul <2 x bfloat> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = fmul <3 x bfloat> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = fmul <4 x bfloat> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = fmul <5 x bfloat> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = fmul <16 x bfloat> poison, poison +; GFX9-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v17bf16 = fmul <17 x bfloat> poison, poison ; GFX9-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOW-LABEL: 'fmul_bf16' -; SLOW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = fmul <2 x bfloat> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = fmul <3 x bfloat> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = fmul <4 x bfloat> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = fmul <5 x bfloat> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = fmul <16 x bfloat> undef, undef -; SLOW-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17bf16 = fmul <17 x bfloat> undef, undef +; SLOW-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = fmul <2 x bfloat> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = fmul <3 x bfloat> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = fmul <4 x bfloat> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = fmul <5 x bfloat> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = fmul <16 x bfloat> poison, poison +; SLOW-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17bf16 = fmul <17 x bfloat> poison, poison ; SLOW-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; +; GFX1250-LABEL: 'fmul_bf16' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fmul <2 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = fmul <3 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = fmul <4 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = fmul <5 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = fmul <16 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = fmul <17 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; ; GFX9-SIZE-LABEL: 'fmul_bf16' -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fmul <2 x bfloat> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v3bf16 = fmul <3 x bfloat> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4bf16 = fmul <4 x bfloat> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v5bf16 = fmul <5 x bfloat> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16bf16 = fmul <16 x bfloat> undef, undef -; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v17bf16 = fmul <17 x bfloat> undef, undef +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = fmul <2 x bfloat> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = fmul <3 x bfloat> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = fmul <4 x bfloat> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = fmul <5 x bfloat> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = fmul <16 x bfloat> poison, poison +; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v17bf16 = fmul <17 x bfloat> poison, poison ; GFX9-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SLOW-SIZE-LABEL: 'fmul_bf16' -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = fmul <2 x bfloat> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = fmul <3 x bfloat> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = fmul <4 x bfloat> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = fmul <5 x bfloat> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = fmul <16 x bfloat> undef, undef -; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17bf16 = fmul <17 x bfloat> undef, undef +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2bf16 = fmul <2 x bfloat> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3bf16 = fmul <3 x bfloat> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4bf16 = fmul <4 x bfloat> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5bf16 = fmul <5 x bfloat> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16bf16 = fmul <16 x bfloat> poison, poison +; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17bf16 = fmul <17 x bfloat> poison, poison ; SLOW-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %bf16 = fmul bfloat undef, undef - %v2bf16 = fmul <2 x bfloat> undef, undef - %v3bf16 = fmul <3 x bfloat> undef, undef - %v4bf16 = fmul <4 x bfloat> undef, undef - %v5bf16 = fmul <5 x bfloat> undef, undef - %v16bf16 = fmul <16 x bfloat> undef, undef - %v17bf16 = fmul <17 x bfloat> undef, undef +; GFX1250-SIZE-LABEL: 'fmul_bf16' +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fmul bfloat poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fmul <2 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = fmul <3 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = fmul <4 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = fmul <5 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = fmul <16 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = fmul <17 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void + %bf16 = fmul bfloat poison, poison + %v2bf16 = fmul <2 x bfloat> poison, poison + %v3bf16 = fmul <3 x bfloat> poison, poison + %v4bf16 = fmul <4 x bfloat> poison, poison + %v5bf16 = fmul <5 x bfloat> poison, poison + %v16bf16 = fmul <16 x bfloat> poison, poison + %v17bf16 = fmul <17 x bfloat> poison, poison ret void } diff --git a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll index 4e71a71326bad..6b71603f70f6b 100644 --- a/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll +++ b/llvm/test/Analysis/CostModel/AMDGPU/fsub.ll @@ -2,158 +2,191 @@ ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF16,GFX90A-FASTF64 %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32,FASTF16,FASTF64 %s ; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32,SLOWF64 %s +; RUN: opt -passes="print" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx90a -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF16-SIZE,GFX90A-FASTF64-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32-SIZE,FASTF16-SIZE %s ; RUN: opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=NOPACKEDF32-SIZE,SLOWF64-SIZE %s +; RUN opt -passes="print" -cost-kind=code-size 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250-SIZE %s ; END. define amdgpu_kernel void @fsub_f32() #0 { ; GFX90A-FASTF64-LABEL: 'fsub_f32' -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fsub <2 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fsub <3 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fsub <4 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fsub <5 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fsub <8 x float> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fsub <9 x float> undef, undef +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fsub <2 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fsub <3 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fsub <4 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fsub <5 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fsub <8 x float> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fsub <9 x float> poison, poison ; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; NOPACKEDF32-LABEL: 'fsub_f32' -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fsub <2 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fsub <3 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fsub <4 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fsub <5 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fsub <8 x float> undef, undef -; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fsub <9 x float> undef, undef +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fsub <2 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fsub <3 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fsub <4 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fsub <5 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fsub <8 x float> poison, poison +; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fsub <9 x float> poison, poison ; NOPACKEDF32-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX90A-FASTF64-SIZE-LABEL: 'fsub_f32' -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fsub <2 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fsub <3 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fsub <4 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fsub <5 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fsub <8 x float> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fsub <9 x float> undef, undef +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32 = fsub <2 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f32 = fsub <3 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = fsub <4 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v5f32 = fsub <5 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32 = fsub <8 x float> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v9f32 = fsub <9 x float> poison, poison ; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; NOPACKEDF32-SIZE-LABEL: 'fsub_f32' -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fsub <2 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fsub <3 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fsub <4 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fsub <5 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fsub <8 x float> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fsub <9 x float> undef, undef +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32 = fsub float poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = fsub <2 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f32 = fsub <3 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f32 = fsub <4 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v5f32 = fsub <5 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8f32 = fsub <8 x float> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %v9f32 = fsub <9 x float> poison, poison ; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f32 = fsub float undef, undef - %v2f32 = fsub <2 x float> undef, undef - %v3f32 = fsub <3 x float> undef, undef - %v4f32 = fsub <4 x float> undef, undef - %v5f32 = fsub <5 x float> undef, undef - %v8f32 = fsub <8 x float> undef, undef - %v9f32 = fsub <9 x float> undef, undef + %f32 = fsub float poison, poison + %v2f32 = fsub <2 x float> poison, poison + %v3f32 = fsub <3 x float> poison, poison + %v4f32 = fsub <4 x float> poison, poison + %v5f32 = fsub <5 x float> poison, poison + %v8f32 = fsub <8 x float> poison, poison + %v9f32 = fsub <9 x float> poison, poison ret void } define amdgpu_kernel void @fsub_f64() #0 { ; GFX90A-FASTF64-LABEL: 'fsub_f64' -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fsub double undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fsub <2 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fsub <3 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fsub <4 x double> undef, undef -; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fsub <5 x double> undef, undef +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fsub double poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fsub <2 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fsub <3 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fsub <4 x double> poison, poison +; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fsub <5 x double> poison, poison ; GFX90A-FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FASTF64-LABEL: 'fsub_f64' -; FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fsub double undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fsub <2 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fsub <3 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fsub <4 x double> undef, undef -; FASTF64-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fsub <5 x double> undef, undef +; FASTF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fsub double poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fsub <2 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fsub <3 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fsub <4 x double> poison, poison +; FASTF64-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fsub <5 x double> poison, poison ; FASTF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOWF64-LABEL: 'fsub_f64' -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = fsub double undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = fsub <2 x double> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = fsub <3 x double> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = fsub <4 x double> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = fsub <5 x double> undef, undef +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %f64 = fsub double poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = fsub <2 x double> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v3f64 = fsub <3 x double> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4f64 = fsub <4 x double> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %v5f64 = fsub <5 x double> poison, poison ; SLOWF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; GFX90A-FASTF64-SIZE-LABEL: 'fsub_f64' -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fsub double undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fsub <2 x double> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fsub <3 x double> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fsub <4 x double> undef, undef -; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fsub <5 x double> undef, undef +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64 = fsub double poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f64 = fsub <2 x double> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v3f64 = fsub <3 x double> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f64 = fsub <4 x double> poison, poison +; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v5f64 = fsub <5 x double> poison, poison ; GFX90A-FASTF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; NOPACKEDF32-SIZE-LABEL: 'fsub_f64' -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fsub double undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fsub <2 x double> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fsub <3 x double> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fsub <4 x double> undef, undef -; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fsub <5 x double> undef, undef +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %f64 = fsub double poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2f64 = fsub <2 x double> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v3f64 = fsub <3 x double> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4f64 = fsub <4 x double> poison, poison +; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v5f64 = fsub <5 x double> poison, poison ; NOPACKEDF32-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f64 = fsub double undef, undef - %v2f64 = fsub <2 x double> undef, undef - %v3f64 = fsub <3 x double> undef, undef - %v4f64 = fsub <4 x double> undef, undef - %v5f64 = fsub <5 x double> undef, undef + %f64 = fsub double poison, poison + %v2f64 = fsub <2 x double> poison, poison + %v3f64 = fsub <3 x double> poison, poison + %v4f64 = fsub <4 x double> poison, poison + %v5f64 = fsub <5 x double> poison, poison ret void } define amdgpu_kernel void @fsub_f16() #0 { ; FASTF16-LABEL: 'fsub_f16' -; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fsub <2 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fsub <3 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fsub <4 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fsub <5 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fsub <16 x half> undef, undef -; FASTF16-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fsub <17 x half> undef, undef +; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fsub <2 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fsub <3 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fsub <4 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fsub <5 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fsub <16 x half> poison, poison +; FASTF16-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fsub <17 x half> poison, poison ; FASTF16-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; SLOWF64-LABEL: 'fsub_f16' -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fsub <2 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fsub <3 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fsub <4 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fsub <5 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fsub <16 x half> undef, undef -; SLOWF64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fsub <17 x half> undef, undef +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fsub <2 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fsub <3 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fsub <4 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fsub <5 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fsub <16 x half> poison, poison +; SLOWF64-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fsub <17 x half> poison, poison ; SLOWF64-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void ; ; FASTF16-SIZE-LABEL: 'fsub_f16' -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fsub <2 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fsub <3 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fsub <4 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fsub <5 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fsub <16 x half> undef, undef -; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fsub <17 x half> undef, undef +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f16 = fsub <2 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3f16 = fsub <3 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = fsub <4 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5f16 = fsub <5 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f16 = fsub <16 x half> poison, poison +; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17f16 = fsub <17 x half> poison, poison ; FASTF16-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; ; SLOWF64-SIZE-LABEL: 'fsub_f16' -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fsub <2 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fsub <3 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fsub <4 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fsub <5 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fsub <16 x half> undef, undef -; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fsub <17 x half> undef, undef +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16 = fsub half poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = fsub <2 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v3f16 = fsub <3 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4f16 = fsub <4 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v5f16 = fsub <5 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16f16 = fsub <16 x half> poison, poison +; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %v17f16 = fsub <17 x half> poison, poison ; SLOWF64-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %f16 = fsub half undef, undef - %v2f16 = fsub <2 x half> undef, undef - %v3f16 = fsub <3 x half> undef, undef - %v4f16 = fsub <4 x half> undef, undef - %v5f16 = fsub <5 x half> undef, undef - %v16f16 = fsub <16 x half> undef, undef - %v17f16 = fsub <17 x half> undef, undef + %f16 = fsub half poison, poison + %v2f16 = fsub <2 x half> poison, poison + %v3f16 = fsub <3 x half> poison, poison + %v4f16 = fsub <4 x half> poison, poison + %v5f16 = fsub <5 x half> poison, poison + %v16f16 = fsub <16 x half> poison, poison + %v17f16 = fsub <17 x half> poison, poison + ret void +} + +define amdgpu_kernel void @fsub_bf16() #0 { +; GFX1250-LABEL: 'fsub_bf16' +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fsub bfloat poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fsub <2 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = fsub <3 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = fsub <4 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = fsub <5 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = fsub <16 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = fsub <17 x bfloat> poison, poison +; GFX1250-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void +; +; GFX1250-SIZE-LABEL: 'fsub_bf16' +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %bf16 = fsub bfloat poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2bf16 = fsub <2 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v3bf16 = fsub <3 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4bf16 = fsub <4 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v5bf16 = fsub <5 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16bf16 = fsub <16 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v17bf16 = fsub <17 x bfloat> poison, poison +; GFX1250-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + %bf16 = fsub bfloat poison, poison + %v2bf16 = fsub <2 x bfloat> poison, poison + %v3bf16 = fsub <3 x bfloat> poison, poison + %v4bf16 = fsub <4 x bfloat> poison, poison + %v5bf16 = fsub <5 x bfloat> poison, poison + %v16bf16 = fsub <16 x bfloat> poison, poison + %v17bf16 = fsub <17 x bfloat> poison, poison ret void } diff --git a/llvm/test/Analysis/GlobalsModRef/nonescaping-noalias.ll b/llvm/test/Analysis/GlobalsModRef/nonescaping-noalias.ll index eed93cf0df8ef..e2eb4f6e7b9e9 100644 --- a/llvm/test/Analysis/GlobalsModRef/nonescaping-noalias.ll +++ b/llvm/test/Analysis/GlobalsModRef/nonescaping-noalias.ll @@ -62,7 +62,7 @@ define ptr @test1_tls_noopt(ptr %coro, ptr %param) presplitcoroutine { ; CHECK-NEXT: store i32 [[V]], ptr [[PARAM]], align 4 ; CHECK-NEXT: ret ptr [[CORO]] ; CHECK: suspend: -; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.coro.end(ptr [[CORO]], i1 false, token none) +; CHECK-NEXT: call void @llvm.coro.end(ptr [[CORO]], i1 false, token none) ; CHECK-NEXT: ret ptr [[CORO]] ; entry: @@ -79,7 +79,7 @@ resume: ret ptr %coro suspend: - call i1 @llvm.coro.end(ptr %coro, i1 0, token none) + call void @llvm.coro.end(ptr %coro, i1 0, token none) ret ptr %coro } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json index 07fde84c1541b..ae36ff54686c5 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_2D_vocab.json @@ -87,6 +87,32 @@ "Function": [1, 2], "Pointer": [3, 4], "Constant": [5, 6], - "Variable": [7, 8] + "Variable": [7, 8], + "FCMP_false": [9, 10], + "FCMP_oeq": [11, 12], + "FCMP_ogt": [13, 14], + "FCMP_oge": [15, 16], + "FCMP_olt": [17, 18], + "FCMP_ole": [19, 20], + "FCMP_one": [21, 22], + "FCMP_ord": [23, 24], + "FCMP_uno": [25, 26], + "FCMP_ueq": [27, 28], + "FCMP_ugt": [29, 30], + "FCMP_uge": [31, 32], + "FCMP_ult": [33, 34], + "FCMP_ule": [35, 36], + "FCMP_une": [37, 38], + "FCMP_true": [39, 40], + "ICMP_eq": [41, 42], + "ICMP_ne": [43, 44], + "ICMP_ugt": [45, 46], + "ICMP_uge": [47, 48], + "ICMP_ult": [49, 50], + "ICMP_ule": [51, 52], + "ICMP_sgt": [53, 54], + "ICMP_sge": [55, 56], + "ICMP_slt": [57, 58], + "ICMP_sle": [59, 60] } } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json index 932b3a217b70c..9003dc73954aa 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_arg_vocab.json @@ -86,6 +86,32 @@ "Function": [1, 2, 3], "Pointer": [4, 5, 6], "Constant": [7, 8, 9], - "Variable": [10, 11, 12] + "Variable": [10, 11, 12], + "FCMP_false": [13, 14, 15], + "FCMP_oeq": [16, 17, 18], + "FCMP_ogt": [19, 20, 21], + "FCMP_oge": [22, 23, 24], + "FCMP_olt": [25, 26, 27], + "FCMP_ole": [28, 29, 30], + "FCMP_one": [31, 32, 33], + "FCMP_ord": [34, 35, 36], + "FCMP_uno": [37, 38, 39], + "FCMP_ueq": [40, 41, 42], + "FCMP_ugt": [43, 44, 45], + "FCMP_uge": [46, 47, 48], + "FCMP_ult": [49, 50, 51], + "FCMP_ule": [52, 53, 54], + "FCMP_une": [55, 56, 57], + "FCMP_true": [58, 59, 60], + "ICMP_eq": [61, 62, 63], + "ICMP_ne": [64, 65, 66], + "ICMP_ugt": [67, 68, 69], + "ICMP_uge": [70, 71, 72], + "ICMP_ult": [73, 74, 75], + "ICMP_ule": [76, 77, 78], + "ICMP_sgt": [79, 80, 81], + "ICMP_sge": [82, 83, 84], + "ICMP_slt": [85, 86, 87], + "ICMP_sle": [88, 89, 90] } } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json index 19f3efee9f6a1..7ef85490b27df 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json +++ b/llvm/test/Analysis/IR2Vec/Inputs/dummy_3D_nonzero_opc_vocab.json @@ -47,6 +47,7 @@ "FPTrunc": [133, 134, 135], "FPExt": [136, 137, 138], "PtrToInt": [139, 140, 141], + "PtrToAddr": [202, 203, 204], "IntToPtr": [142, 143, 144], "BitCast": [145, 146, 147], "AddrSpaceCast": [148, 149, 150], @@ -86,6 +87,32 @@ "Function": [0, 0, 0], "Pointer": [0, 0, 0], "Constant": [0, 0, 0], - "Variable": [0, 0, 0] + "Variable": [0, 0, 0], + "FCMP_false": [0, 0, 0], + "FCMP_oeq": [0, 0, 0], + "FCMP_ogt": [0, 0, 0], + "FCMP_oge": [0, 0, 0], + "FCMP_olt": [0, 0, 0], + "FCMP_ole": [0, 0, 0], + "FCMP_one": [0, 0, 0], + "FCMP_ord": [0, 0, 0], + "FCMP_uno": [0, 0, 0], + "FCMP_ueq": [0, 0, 0], + "FCMP_ugt": [0, 0, 0], + "FCMP_uge": [0, 0, 0], + "FCMP_ult": [0, 0, 0], + "FCMP_ule": [0, 0, 0], + "FCMP_une": [0, 0, 0], + "FCMP_true": [0, 0, 0], + "ICMP_eq": [0, 0, 0], + "ICMP_ne": [0, 0, 0], + "ICMP_ugt": [0, 0, 0], + "ICMP_uge": [0, 0, 0], + "ICMP_ult": [0, 0, 0], + "ICMP_ule": [0, 0, 0], + "ICMP_sgt": [1, 1, 1], + "ICMP_sge": [0, 0, 0], + "ICMP_slt": [0, 0, 0], + "ICMP_sle": [0, 0, 0] } } diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt index df7769c9c6a65..d62b0dd157b0b 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_default_vocab_print.txt @@ -82,3 +82,29 @@ Key: Function: [ 0.20 0.40 ] Key: Pointer: [ 0.60 0.80 ] Key: Constant: [ 1.00 1.20 ] Key: Variable: [ 1.40 1.60 ] +Key: FCMP_false: [ 1.80 2.00 ] +Key: FCMP_oeq: [ 2.20 2.40 ] +Key: FCMP_ogt: [ 2.60 2.80 ] +Key: FCMP_oge: [ 3.00 3.20 ] +Key: FCMP_olt: [ 3.40 3.60 ] +Key: FCMP_ole: [ 3.80 4.00 ] +Key: FCMP_one: [ 4.20 4.40 ] +Key: FCMP_ord: [ 4.60 4.80 ] +Key: FCMP_uno: [ 5.00 5.20 ] +Key: FCMP_ueq: [ 5.40 5.60 ] +Key: FCMP_ugt: [ 5.80 6.00 ] +Key: FCMP_uge: [ 6.20 6.40 ] +Key: FCMP_ult: [ 6.60 6.80 ] +Key: FCMP_ule: [ 7.00 7.20 ] +Key: FCMP_une: [ 7.40 7.60 ] +Key: FCMP_true: [ 7.80 8.00 ] +Key: ICMP_eq: [ 8.20 8.40 ] +Key: ICMP_ne: [ 8.60 8.80 ] +Key: ICMP_ugt: [ 9.00 9.20 ] +Key: ICMP_uge: [ 9.40 9.60 ] +Key: ICMP_ult: [ 9.80 10.00 ] +Key: ICMP_ule: [ 10.20 10.40 ] +Key: ICMP_sgt: [ 10.60 10.80 ] +Key: ICMP_sge: [ 11.00 11.20 ] +Key: ICMP_slt: [ 11.40 11.60 ] +Key: ICMP_sle: [ 11.80 12.00 ] diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt index f3ce809fd2fd2..e443adb17ac78 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd1_vocab_print.txt @@ -82,3 +82,29 @@ Key: Function: [ 0.50 1.00 ] Key: Pointer: [ 1.50 2.00 ] Key: Constant: [ 2.50 3.00 ] Key: Variable: [ 3.50 4.00 ] +Key: FCMP_false: [ 4.50 5.00 ] +Key: FCMP_oeq: [ 5.50 6.00 ] +Key: FCMP_ogt: [ 6.50 7.00 ] +Key: FCMP_oge: [ 7.50 8.00 ] +Key: FCMP_olt: [ 8.50 9.00 ] +Key: FCMP_ole: [ 9.50 10.00 ] +Key: FCMP_one: [ 10.50 11.00 ] +Key: FCMP_ord: [ 11.50 12.00 ] +Key: FCMP_uno: [ 12.50 13.00 ] +Key: FCMP_ueq: [ 13.50 14.00 ] +Key: FCMP_ugt: [ 14.50 15.00 ] +Key: FCMP_uge: [ 15.50 16.00 ] +Key: FCMP_ult: [ 16.50 17.00 ] +Key: FCMP_ule: [ 17.50 18.00 ] +Key: FCMP_une: [ 18.50 19.00 ] +Key: FCMP_true: [ 19.50 20.00 ] +Key: ICMP_eq: [ 20.50 21.00 ] +Key: ICMP_ne: [ 21.50 22.00 ] +Key: ICMP_ugt: [ 22.50 23.00 ] +Key: ICMP_uge: [ 23.50 24.00 ] +Key: ICMP_ult: [ 24.50 25.00 ] +Key: ICMP_ule: [ 25.50 26.00 ] +Key: ICMP_sgt: [ 26.50 27.00 ] +Key: ICMP_sge: [ 27.50 28.00 ] +Key: ICMP_slt: [ 28.50 29.00 ] +Key: ICMP_sle: [ 29.50 30.00 ] diff --git a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt index 72b25b9bd3d9c..7fb6043552f7b 100644 --- a/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt +++ b/llvm/test/Analysis/IR2Vec/Inputs/reference_wtd2_vocab_print.txt @@ -82,3 +82,29 @@ Key: Function: [ 0.00 0.00 ] Key: Pointer: [ 0.00 0.00 ] Key: Constant: [ 0.00 0.00 ] Key: Variable: [ 0.00 0.00 ] +Key: FCMP_false: [ 0.00 0.00 ] +Key: FCMP_oeq: [ 0.00 0.00 ] +Key: FCMP_ogt: [ 0.00 0.00 ] +Key: FCMP_oge: [ 0.00 0.00 ] +Key: FCMP_olt: [ 0.00 0.00 ] +Key: FCMP_ole: [ 0.00 0.00 ] +Key: FCMP_one: [ 0.00 0.00 ] +Key: FCMP_ord: [ 0.00 0.00 ] +Key: FCMP_uno: [ 0.00 0.00 ] +Key: FCMP_ueq: [ 0.00 0.00 ] +Key: FCMP_ugt: [ 0.00 0.00 ] +Key: FCMP_uge: [ 0.00 0.00 ] +Key: FCMP_ult: [ 0.00 0.00 ] +Key: FCMP_ule: [ 0.00 0.00 ] +Key: FCMP_une: [ 0.00 0.00 ] +Key: FCMP_true: [ 0.00 0.00 ] +Key: ICMP_eq: [ 0.00 0.00 ] +Key: ICMP_ne: [ 0.00 0.00 ] +Key: ICMP_ugt: [ 0.00 0.00 ] +Key: ICMP_uge: [ 0.00 0.00 ] +Key: ICMP_ult: [ 0.00 0.00 ] +Key: ICMP_ule: [ 0.00 0.00 ] +Key: ICMP_sgt: [ 0.00 0.00 ] +Key: ICMP_sge: [ 0.00 0.00 ] +Key: ICMP_slt: [ 0.00 0.00 ] +Key: ICMP_sle: [ 0.00 0.00 ] diff --git a/llvm/test/Analysis/IR2Vec/if-else.ll b/llvm/test/Analysis/IR2Vec/if-else.ll index fe532479086d3..804c1ca5cb6f6 100644 --- a/llvm/test/Analysis/IR2Vec/if-else.ll +++ b/llvm/test/Analysis/IR2Vec/if-else.ll @@ -29,7 +29,7 @@ return: ; preds = %if.else, %if.then ; CHECK: Basic block vectors: ; CHECK-NEXT: Basic block: entry: -; CHECK-NEXT: [ 816.00 825.00 834.00 ] +; CHECK-NEXT: [ 816.20 825.20 834.20 ] ; CHECK-NEXT: Basic block: if.then: ; CHECK-NEXT: [ 195.00 198.00 201.00 ] ; CHECK-NEXT: Basic block: if.else: diff --git a/llvm/test/Analysis/IR2Vec/unreachable.ll b/llvm/test/Analysis/IR2Vec/unreachable.ll index b0e3e49978018..9be0ee1c2de7a 100644 --- a/llvm/test/Analysis/IR2Vec/unreachable.ll +++ b/llvm/test/Analysis/IR2Vec/unreachable.ll @@ -33,7 +33,7 @@ return: ; preds = %if.else, %if.then ; CHECK: Basic block vectors: ; CHECK-NEXT: Basic block: entry: -; CHECK-NEXT: [ 816.00 825.00 834.00 ] +; CHECK-NEXT: [ 816.20 825.20 834.20 ] ; CHECK-NEXT: Basic block: if.then: ; CHECK-NEXT: [ 195.00 198.00 201.00 ] ; CHECK-NEXT: Basic block: if.else: diff --git a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll index 023a8c056968f..27a85c7a46084 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll @@ -560,3 +560,44 @@ loop: exit: ret void } + +; TODO: Relax HasSameSize check in isSafeDependenceDistance. +define void @different_type_sizes_safe_dep_dist(i16 %n, ptr %p) { +; CHECK-LABEL: 'different_type_sizes_safe_dep_dist' +; CHECK-NEXT: loop: +; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop +; CHECK-NEXT: Unknown data dependence. +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Unknown: +; CHECK-NEXT: store i32 0, ptr %gep.iv, align 1 -> +; CHECK-NEXT: store i16 1, ptr %gep.off.iv, align 1 +; CHECK-EMPTY: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Grouped accesses: +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + %n.pos = icmp sgt i16 %n, 0 + br i1 %n.pos, label %ph, label %exit + +ph: + %gep.off = getelementptr i32, ptr %p, i16 %n + br label %loop + +loop: + %iv = phi i16 [ 0, %ph ], [ %iv.next, %loop ] + %gep.iv = getelementptr inbounds i32, ptr %p, i16 %iv + store i32 0, ptr %gep.iv, align 1 + %gep.off.iv = getelementptr i32, ptr %gep.off, i16 %iv + store i16 1, ptr %gep.off.iv, align 1 + %iv.next = add i16 %iv, 1 + %exit.cond = icmp eq i16 %iv.next, %n + br i1 %exit.cond, label %exit, label %loop + +exit: + ret void +} diff --git a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll index 207a44d5d08d4..6d9aa8d1ea32b 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll @@ -565,6 +565,138 @@ e.2: ret void } +define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_multiple_predecessors(ptr %A, ptr %B, i1 %c) nosync nofree { +; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_multiple_predecessors' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %B High: (2000 + %B)) +; CHECK-NEXT: Member: {%B,+,4}<%loop.header> +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: %A High: (2000 + %A)) +; CHECK-NEXT: Member: {%A,+,4}<%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ] + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ] + br i1 %c, label %then, label %else + +then: + br label %loop.header + +else: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %latch ] + %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv + %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv + %l = load i32, ptr %gep.A, align 4 + store i32 0, ptr %gep.B, align 4 + %cntable.c.1 = icmp ult i64 %iv, 1000 + %iv.next = add nuw nsw i64 %iv, 1 + br i1 %cntable.c.1, label %b2, label %e.1 + +b2: + %uncntable.c.0 = icmp eq i32 %l, 0 + br i1 %uncntable.c.0, label %e.2, label %b3 + +b3: + %cntable.c.2 = icmp eq i64 %iv.next, 500 + br i1 %cntable.c.2, label %cleanup4, label %latch + +latch: + br label %loop.header + +cleanup4: + ret void + +e.1: + ret void + +e.2: + ret void +} + +define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_multiple_predecessors_no_valid(ptr %A, ptr %B, i1 %c) nosync nofree { +; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_multiple_predecessors_no_valid' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%B,+,4}<%loop.header> +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: %A High: (2000 + %A)) +; CHECK-NEXT: Member: {%A,+,4}<%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ] + br i1 %c, label %then, label %else + +then: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ] + br label %loop.header + +else: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %latch ] + %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv + %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv + %l = load i32, ptr %gep.A, align 4 + store i32 0, ptr %gep.B, align 4 + %cntable.c.1 = icmp ult i64 %iv, 1000 + %iv.next = add nuw nsw i64 %iv, 1 + br i1 %cntable.c.1, label %b2, label %e.1 + +b2: + %uncntable.c.0 = icmp eq i32 %l, 0 + br i1 %uncntable.c.0, label %e.2, label %b3 + +b3: + %cntable.c.2 = icmp eq i64 %iv.next, 500 + br i1 %cntable.c.2, label %cleanup4, label %latch + +latch: + br label %loop.header + +cleanup4: + ret void + +e.1: + ret void + +e.2: + ret void +} + define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_deref_via_assumption_too_small(ptr %A, ptr %B) nosync nofree { ; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_deref_via_assumption_too_small' ; CHECK-NEXT: loop.header: @@ -624,3 +756,129 @@ e.1: e.2: ret void } + +define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_nofree_via_context(ptr %A, ptr %B) nosync { +; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_nofree_via_context' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%B,+,4}<%loop.header> +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: %A High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%A,+,4}<%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ] + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] + %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv + %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv + %l = load i32, ptr %gep.A, align 4 + store i32 0, ptr %gep.B, align 4 + %cntable.c.1 = icmp ult i64 %iv, 1000 + %iv.next = add nuw nsw i64 %iv, 1 + br i1 %cntable.c.1, label %b2, label %e.1 + +b2: + %uncntable.c.0 = icmp eq i32 %l, 0 + br i1 %uncntable.c.0, label %e.2, label %b3 + +b3: + %cntable.c.2 = icmp eq i64 %iv.next, 500 + br i1 %cntable.c.2, label %cleanup4, label %latch + +latch: + br label %loop.header + +cleanup4: + ret void + +e.1: + ret void + +e.2: + ret void +} + +define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_missing_nofree_multiple_predecessors(ptr %A, ptr %B, i1 %c) nosync { +; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_missing_nofree_multiple_predecessors' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%B,+,4}<%loop.header> +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: %A High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%A,+,4}<%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ] + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ] + br i1 %c, label %then, label %else + +then: + br label %loop.header + +else: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %latch ] + %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv + %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv + %l = load i32, ptr %gep.A, align 4 + store i32 0, ptr %gep.B, align 4 + %cntable.c.1 = icmp ult i64 %iv, 1000 + %iv.next = add nuw nsw i64 %iv, 1 + br i1 %cntable.c.1, label %b2, label %e.1 + +b2: + %uncntable.c.0 = icmp eq i32 %l, 0 + br i1 %uncntable.c.0, label %e.2, label %b3 + +b3: + %cntable.c.2 = icmp eq i64 %iv.next, 500 + br i1 %cntable.c.2, label %cleanup4, label %latch + +latch: + br label %loop.header + +cleanup4: + ret void + +e.1: + ret void + +e.2: + ret void +} diff --git a/llvm/test/Analysis/LoopAccessAnalysis/inbounds-gep-in-predicated-blocks.ll b/llvm/test/Analysis/LoopAccessAnalysis/inbounds-gep-in-predicated-blocks.ll new file mode 100644 index 0000000000000..6eed0ec864820 --- /dev/null +++ b/llvm/test/Analysis/LoopAccessAnalysis/inbounds-gep-in-predicated-blocks.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes='print' -disable-output %s 2>&1 | FileCheck %s + +; unsigned long long s0 = 0, s1 = 0; +; for (int i = 0; i < 100; i++) { +; if (i % 4 == 0) { +; A[s0] = 2; // A[0], A[4], A[8], A[12], ... +; A[s1] = 1; // A[0], A[8], A[16], A[24], ... +; } +; s0 += (1ULL << 62) + 1; +; s1 += (1ULL << 62) + 2; +; } +; FIXME: We cannot use inbounds on idx.0, idx.1 to infer no-wrap (and determine +; there are no dependences), as the pointers are not dereferenced in all loop iterations. +define void @test_inbounds_gep_used_in_predicated_block(ptr %A, i64 %n) { +; CHECK-LABEL: 'test_inbounds_gep_used_in_predicated_block' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Grouped accesses: +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + br label %loop.header + +loop.header: + %i = phi i64 [ 0, %entry ], [ %i.next, %loop.latch ] + %offset.0 = phi i64 [ 0, %entry ], [ %offset.0.next, %loop.latch ] + %offset.1 = phi i64 [ 0, %entry ], [ %offset.1.next, %loop.latch ] + %idx.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0 + %idx.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1 + %mask = and i64 %i, 3 + %cond = icmp eq i64 %mask, 0 + br i1 %cond, label %if.then, label %loop.latch + +if.then: + store i8 2, ptr %idx.0 + store i8 1, ptr %idx.1 + br label %loop.latch + +loop.latch: + %i.next = add nuw nsw i64 %i, 1 + %offset.0.next = add i64 %offset.0, 4611686018427387905 ; 2^62 + 1 + %offset.1.next = add i64 %offset.1, 4611686018427387906 ; 2^62 + 2 + %cond.exit = icmp eq i64 %i.next, 100 + br i1 %cond.exit, label %exit, label %loop.header + +exit: + ret void +} + +define void @test_header_existing(ptr %src, ptr %dst, i64 %start) { +; CHECK-LABEL: 'test_header_existing' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: ptr %dst +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.src = getelementptr nusw { i8, i8, i32 }, ptr %src, i64 %iv.next +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %dst High: (1 + %dst)) +; CHECK-NEXT: Member: %dst +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: (8 + (8 * %start) + %src) High: (809 + %src)) +; CHECK-NEXT: Member: {(8 + (8 * %start) + %src),+,8}<%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ %start, %entry ], [ %iv.next, %loop.latch ] + %ec = icmp eq i64 %iv, 100 + br i1 %ec, label %exit, label %loop.latch + +loop.latch: + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr nusw { i8, i8, i32 }, ptr %src, i64 %iv.next + %l = load i8, ptr %gep.src, align 1 + store i8 %l, ptr %dst, align 1 + br label %loop.header + +exit: + ret void +} diff --git a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll index 1e21fbf08a92f..e1c62309142d0 100644 --- a/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll +++ b/llvm/test/Analysis/ScalarEvolution/mul-udiv-folds.ll @@ -188,3 +188,43 @@ loop: exit: ret void } + +define noundef i64 @udiv_mul_common_vscale_factor(i64 %a, i64 %b) { +; CHECK-LABEL: 'udiv_mul_common_vscale_factor' +; CHECK-NEXT: Classifying expressions for: @udiv_mul_common_vscale_factor +; CHECK-NEXT: %vs = call i64 @llvm.vscale.i64() +; CHECK-NEXT: --> vscale U: [1,0) S: [1,0) +; CHECK-NEXT: %a.vs = mul i64 %a, %vs +; CHECK-NEXT: --> (vscale * %a) U: full-set S: full-set +; CHECK-NEXT: %b.vs = mul i64 %b, %vs +; CHECK-NEXT: --> (vscale * %b) U: full-set S: full-set +; CHECK-NEXT: %div = udiv i64 %a.vs, %b.vs +; CHECK-NEXT: --> ((vscale * %a) /u (vscale * %b)) U: full-set S: full-set +; CHECK-NEXT: Determining loop execution counts for: @udiv_mul_common_vscale_factor +; + %vs = call i64 @llvm.vscale() + %a.vs = mul i64 %a, %vs + %b.vs = mul i64 %b, %vs + %div = udiv i64 %a.vs, %b.vs + ret i64 %div +} + +define noundef i64 @udiv_mul_nuw_common_vscale_factor(i64 %a, i64 %b) { +; CHECK-LABEL: 'udiv_mul_nuw_common_vscale_factor' +; CHECK-NEXT: Classifying expressions for: @udiv_mul_nuw_common_vscale_factor +; CHECK-NEXT: %vs = call i64 @llvm.vscale.i64() +; CHECK-NEXT: --> vscale U: [1,0) S: [1,0) +; CHECK-NEXT: %a.vs = mul nuw i64 %a, %vs +; CHECK-NEXT: --> (vscale * %a) U: full-set S: full-set +; CHECK-NEXT: %b.vs = mul nuw i64 %b, %vs +; CHECK-NEXT: --> (vscale * %b) U: full-set S: full-set +; CHECK-NEXT: %div = udiv i64 %a.vs, %b.vs +; CHECK-NEXT: --> (%a /u %b) U: full-set S: full-set +; CHECK-NEXT: Determining loop execution counts for: @udiv_mul_nuw_common_vscale_factor +; + %vs = call i64 @llvm.vscale() + %a.vs = mul nuw i64 %a, %vs + %b.vs = mul nuw i64 %b, %vs + %div = udiv i64 %a.vs, %b.vs + ret i64 %div +} diff --git a/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll b/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll index bf140c7fa216a..b1fe7b1b2b7ee 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll @@ -574,5 +574,164 @@ exit: ret void } +define void @test_ptr_aligned_by_2_and_4_via_assumption(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptr_aligned_by_2_and_4_via_assumption' +; CHECK-NEXT: Classifying expressions for: @test_ptr_aligned_by_2_and_4_via_assumption +; CHECK-NEXT: %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptr_aligned_by_2_and_4_via_assumption +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 2) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 4) ] + br label %loop + +loop: + %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + +define void @test_ptrs_aligned_by_4_via_assumption(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptrs_aligned_by_4_via_assumption' +; CHECK-NEXT: Classifying expressions for: @test_ptrs_aligned_by_4_via_assumption +; CHECK-NEXT: %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptrs_aligned_by_4_via_assumption +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 4) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 4) ] + br label %loop + +loop: + %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + +define void @test_ptrs_aligned_by_8_via_assumption(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptrs_aligned_by_8_via_assumption' +; CHECK-NEXT: Classifying expressions for: @test_ptrs_aligned_by_8_via_assumption +; CHECK-NEXT: %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptrs_aligned_by_8_via_assumption +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 8) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 8) ] + br label %loop + +loop: + %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + +declare i1 @cond() + +define void @test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors' +; CHECK-NEXT: Classifying expressions for: @test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors +; CHECK-NEXT: %c = call i1 @cond() +; CHECK-NEXT: --> %c U: full-set S: full-set +; CHECK-NEXT: %iv = phi ptr [ %start, %then ], [ %start, %else ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 2) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 4) ] + %c = call i1 @cond() + br i1 %c, label %then, label %else + +then: + br label %loop + +else: + br label %loop + +loop: + %iv = phi ptr [ %start, %then] , [ %start, %else ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + declare void @llvm.assume(i1) declare void @llvm.experimental.guard(i1, ...) diff --git a/llvm/test/Assembler/auto_upgrade_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_intrinsics.ll index 37cb49650f6bd..64d4a3ba7c802 100644 --- a/llvm/test/Assembler/auto_upgrade_intrinsics.ll +++ b/llvm/test/Assembler/auto_upgrade_intrinsics.ll @@ -47,11 +47,11 @@ entry: ret void } -declare i1 @llvm.coro.end(ptr, i1) +declare void @llvm.coro.end(ptr, i1) define void @test.coro.end(ptr %ptr) { ; CHECK-LABEL: @test.coro.end( -; CHECK: call i1 @llvm.coro.end(ptr %ptr, i1 false, token none) - call i1 @llvm.coro.end(ptr %ptr, i1 false) +; CHECK: call void @llvm.coro.end(ptr %ptr, i1 false, token none) + call void @llvm.coro.end(ptr %ptr, i1 false) ret void } diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll index d860104b9cb3d..5628e17b4936e 100644 --- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll +++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll @@ -13,6 +13,7 @@ ; CHECK-NEXT: ) = COPY $d0 + ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) + ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0) + ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.1) + ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD]](s32) + ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32) + ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) + ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $s0 = COPY [[FPEXT1]](s32) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0) + ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %stack.0) + ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD1]](s32) + ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC2]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; CHECK-NEXT: $d1 = COPY [[BUILD_VECTOR1]](<4 x s16>) + ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1 + %1:_(<4 x s16>) = COPY $d0 + %0:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %1(<4 x s16>) + %3:_(<2 x s16>), %4:_(<2 x s16>) = G_FMODF %0 + %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %3(<2 x s16>) + %7:_(s16) = G_IMPLICIT_DEF + %8:_(<4 x s16>) = G_BUILD_VECTOR %5(s16), %6(s16), %7(s16), %7(s16) + %9:_(s16), %10:_(s16) = G_UNMERGE_VALUES %4(<2 x s16>) + %11:_(<4 x s16>) = G_BUILD_VECTOR %9(s16), %10(s16), %7(s16), %7(s16) + $d0 = COPY %8(<4 x s16>) + $d1 = COPY %11(<4 x s16>) + RET_ReallyLR implicit $d0, implicit $d1 +... +--- +name: test_modf_v3f32 +body: | + bb.0.entry: + ; CHECK-LABEL: name: test_modf_v3f32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>) + ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<4 x s32>) + ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.2 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $s0 = COPY [[UV]](s32) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0) + ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.2) + ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $s0 = COPY [[UV1]](s32) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0) + ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %stack.1) + ; CHECK-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $s0 = COPY [[UV2]](s32) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX2]](p0) + ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0 + ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (s32) from %stack.0) + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[DEF]](s32) + ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[DEF]](s32) + ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>) + ; CHECK-NEXT: $q1 = COPY [[BUILD_VECTOR1]](<4 x s32>) + ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1 + %1:_(<2 x s64>) = COPY $q0 + %2:_(<4 x s32>) = G_BITCAST %1(<2 x s64>) + %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %2(<4 x s32>) + %0:_(<3 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32), %5(s32) + %7:_(<3 x s32>), %8:_(<3 x s32>) = G_FMODF %0 + %9:_(s32), %10:_(s32), %11:_(s32) = G_UNMERGE_VALUES %7(<3 x s32>) + %12:_(s32) = G_IMPLICIT_DEF + %13:_(<4 x s32>) = G_BUILD_VECTOR %9(s32), %10(s32), %11(s32), %12(s32) + %14:_(s32), %15:_(s32), %16:_(s32) = G_UNMERGE_VALUES %8(<3 x s32>) + %17:_(<4 x s32>) = G_BUILD_VECTOR %14(s32), %15(s32), %16(s32), %12(s32) + $q0 = COPY %13(<4 x s32>) + $q1 = COPY %17(<4 x s32>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: test_modf_v2f64 +body: | + bb.0.entry: + ; CHECK-LABEL: name: test_modf_v2f64 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 + ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) + ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $d0 = COPY [[UV]](s64) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0) + ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.1) + ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $d0 = COPY [[UV1]](s64) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0) + ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0 + ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s64) from %stack.0) + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY1]](s64), [[COPY2]](s64) + ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64) + ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>) + ; CHECK-NEXT: $q1 = COPY [[BUILD_VECTOR1]](<2 x s64>) + ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1 + %0:_(<2 x s64>) = COPY $q0 + %1:_(<2 x s64>), %2:_(<2 x s64>) = G_FMODF %0 + $q0 = COPY %1(<2 x s64>) + $q1 = COPY %2(<2 x s64>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: test_modf_fp128 +body: | + bb.0.entry: + ; CHECK-LABEL: name: test_modf_fp128 + ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0 + ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $q0 = COPY [[COPY]](s128) + ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0) + ; CHECK-NEXT: BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s128) = COPY $q0 + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s128) from %stack.0) + ; CHECK-NEXT: $q0 = COPY [[COPY1]](s128) + ; CHECK-NEXT: $q1 = COPY [[LOAD]](s128) + ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1 + %0:_(s128) = COPY $q0 + %1:_(s128), %2:_(s128) = G_FMODF %0 + $q0 = COPY %1(s128) + $q1 = COPY %2(s128) + RET_ReallyLR implicit $q0, implicit $q1 +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir index 040f97f96ee21..d721b73c2b5ba 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -508,6 +508,10 @@ # DEBUG-NEXT: G_FREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK +# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} +# DEBUG-NEXT: .. the first uncovered type index: 1, OK +# DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. the first uncovered type index: 1, OK @@ -540,8 +544,9 @@ # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FLDEXP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices -# DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} +# DEBUG-NEXT: .. the first uncovered type index: 2, OK +# DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FFREXP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined @@ -642,6 +647,9 @@ # DEBUG-NEXT: G_GET_ROUNDING (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_SET_ROUNDING (opcode {{[0-9]+}}): 1 type index, 0 imm indices +# DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_PTR_ADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT: .. the first uncovered type index: 2, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/ptrauth-elf-got.mir b/llvm/test/CodeGen/AArch64/GlobalISel/ptrauth-elf-got.mir new file mode 100644 index 0000000000000..faf2cb8221ec7 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/ptrauth-elf-got.mir @@ -0,0 +1,23 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +# RUN: llc -O0 -mtriple=aarch64-linux-gnu -relocation-model=pic -run-pass=instruction-select -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s + +--- | + @var_got = external global i8 + define ptr @loadgotauth_implicit_defs() { ret ptr null } + + !llvm.module.flags = !{!0} + !0 = !{i32 8, !"ptrauth-elf-got", i32 1} +... + +--- +name: loadgotauth_implicit_defs +legalized: true +regBankSelected: true +body: | + bb.0: + ; CHECK-LABEL: name: loadgotauth_implicit_defs + ; CHECK: [[LOADgotAUTH:%[0-9]+]]:gpr64common = LOADgotAUTH target-flags(aarch64-got) @var_got, implicit-def $x16, implicit-def $x17, implicit-def $nzcv + ; CHECK-NEXT: $x0 = COPY [[LOADgotAUTH]] + %0:gpr(p0) = G_GLOBAL_VALUE @var_got + $x0 = COPY %0(p0) +... diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir new file mode 100644 index 0000000000000..604cb96e38dc3 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir @@ -0,0 +1,136 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-unknown -run-pass=instruction-select %s -o - | FileCheck %s +--- +name: test_modf_fp128 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$q0' } +frameInfo: + maxAlignment: 16 +stack: + - { id: 0, size: 16, alignment: 16 } +body: | + bb.1: + liveins: $q0 + + ; CHECK-LABEL: name: test_modf_fp128 + ; CHECK: liveins: $q0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $q0 = COPY [[COPY]] + ; CHECK-NEXT: $x0 = COPY [[ADDXri]] + ; CHECK-NEXT: BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0) + ; CHECK-NEXT: $q0 = COPY [[COPY1]] + ; CHECK-NEXT: $q1 = COPY [[LDRQui]] + ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1 + %0:fpr(s128) = COPY $q0 + %3:gpr(p0) = G_FRAME_INDEX %stack.0 + ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + $q0 = COPY %0(s128) + $x0 = COPY %3(p0) + BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0 + ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + %1:fpr(s128) = COPY $q0 + %2:fpr(s128) = G_LOAD %3(p0) :: (load (s128) from %stack.0) + $q0 = COPY %1(s128) + $q1 = COPY %2(s128) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: test_modf_double +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$d0' } +frameInfo: + maxAlignment: 8 +stack: + - { id: 0, size: 8, alignment: 8 } +machineFunctionInfo: {} +body: | + bb.1: + liveins: $d0 + + ; CHECK-LABEL: name: test_modf_double + ; CHECK: liveins: $d0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $d0 = COPY [[COPY]] + ; CHECK-NEXT: $x0 = COPY [[ADDXri]] + ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui %stack.0, 0 :: (load (s64) from %stack.0) + ; CHECK-NEXT: $d0 = COPY [[COPY1]] + ; CHECK-NEXT: $d1 = COPY [[LDRDui]] + ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1 + %0:fpr(s64) = COPY $d0 + %3:gpr(p0) = G_FRAME_INDEX %stack.0 + ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + $d0 = COPY %0(s64) + $x0 = COPY %3(p0) + BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0 + ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + %1:fpr(s64) = COPY $d0 + %2:fpr(s64) = G_LOAD %3(p0) :: (load (s64) from %stack.0) + $d0 = COPY %1(s64) + $d1 = COPY %2(s64) + RET_ReallyLR implicit $d0, implicit $d1 +... +--- +name: test_modf_double_vec +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$d0' } +frameInfo: + maxAlignment: 8 +stack: + - { id: 0, size: 8, alignment: 8 } +machineFunctionInfo: {} +body: | + bb.1: + liveins: $d0 + + ; CHECK-LABEL: name: test_modf_double_vec + ; CHECK: liveins: $d0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0 + ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: $d0 = COPY [[COPY]] + ; CHECK-NEXT: $x0 = COPY [[ADDXri]] + ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0 + ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui %stack.0, 0 :: (load (s64) from %stack.0) + ; CHECK-NEXT: $d0 = COPY [[COPY1]] + ; CHECK-NEXT: $d1 = COPY [[LDRDui]] + ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1 + %0:fpr(s64) = COPY $d0 + %3:gpr(p0) = G_FRAME_INDEX %stack.0 + ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp + $d0 = COPY %0(s64) + $x0 = COPY %3(p0) + BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0 + ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp + %1:fpr(s64) = COPY $d0 + %2:fpr(s64) = G_LOAD %3(p0) :: (load (s64) from %stack.0) + $d0 = COPY %1(s64) + $d1 = COPY %2(s64) + RET_ReallyLR implicit $d0, implicit $d1 +... diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll index 8655bb1292ef7..cdde11042462b 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll @@ -1365,7 +1365,72 @@ for.end12: ; preds = %vector.body ret void } -declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) +define noundef <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coerce) { +; CHECK-SD-LABEL: cmplx_mul_combined_re_im: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: lsr x9, x0, #16 +; CHECK-SD-NEXT: adrp x8, .LCPI14_0 +; CHECK-SD-NEXT: dup v4.8h, w0 +; CHECK-SD-NEXT: dup v1.8h, w9 +; CHECK-SD-NEXT: fmov s3, w9 +; CHECK-SD-NEXT: sqneg v2.8h, v1.8h +; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-SD-NEXT: tbl v1.16b, { v2.16b, v3.16b }, v1.16b +; CHECK-SD-NEXT: rev32 v2.8h, v0.8h +; CHECK-SD-NEXT: sqdmull v3.4s, v0.4h, v4.4h +; CHECK-SD-NEXT: sqdmull2 v0.4s, v0.8h, v4.8h +; CHECK-SD-NEXT: sqdmlal v3.4s, v2.4h, v1.4h +; CHECK-SD-NEXT: sqdmlal2 v0.4s, v2.8h, v1.8h +; CHECK-SD-NEXT: uzp2 v0.8h, v3.8h, v0.8h +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: cmplx_mul_combined_re_im: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: lsr w9, w0, #16 +; CHECK-GI-NEXT: adrp x8, .LCPI14_0 +; CHECK-GI-NEXT: rev32 v4.8h, v0.8h +; CHECK-GI-NEXT: dup v1.8h, w9 +; CHECK-GI-NEXT: fmov s3, w9 +; CHECK-GI-NEXT: sqneg v2.8h, v1.8h +; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-GI-NEXT: tbl v1.16b, { v2.16b, v3.16b }, v1.16b +; CHECK-GI-NEXT: mov d2, v0.d[1] +; CHECK-GI-NEXT: dup v3.8h, w0 +; CHECK-GI-NEXT: sqdmull v2.4s, v2.4h, v3.4h +; CHECK-GI-NEXT: sqdmull v5.4s, v4.4h, v1.4h +; CHECK-GI-NEXT: sqdmlal v5.4s, v0.4h, v3.4h +; CHECK-GI-NEXT: sqdmlal2 v2.4s, v4.8h, v1.8h +; CHECK-GI-NEXT: uzp2 v0.8h, v5.8h, v2.8h +; CHECK-GI-NEXT: ret +entry: + %scale.sroa.0.0.extract.trunc = trunc i64 %scale.coerce to i16 + %scale.sroa.2.0.extract.shift23 = lshr i64 %scale.coerce, 16 + %scale.sroa.2.0.extract.trunc = trunc i64 %scale.sroa.2.0.extract.shift23 to i16 + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> + %vecinit.i24 = insertelement <8 x i16> poison, i16 %scale.sroa.0.0.extract.trunc, i64 0 + %vecinit.i = insertelement <8 x i16> poison, i16 %scale.sroa.2.0.extract.trunc, i64 0 + %vecinit7.i = shufflevector <8 x i16> %vecinit.i, <8 x i16> poison, <8 x i32> zeroinitializer + %vqnegq_v1.i = tail call noundef <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %vecinit7.i) + %vbsl5.i = shufflevector <8 x i16> %vqnegq_v1.i, <8 x i16> %vecinit.i, <8 x i32> + %shuffle.i40 = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> + %shuffle.i39 = shufflevector <8 x i16> %vecinit.i24, <8 x i16> poison, <4 x i32> zeroinitializer + %vqdmull_v2.i36 = tail call noundef <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i40, <4 x i16> %shuffle.i39) + %shuffle.i44 = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> + %vqdmull_v2.i = tail call noundef <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i44, <4 x i16> %shuffle.i39) + %shuffle.i38 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> + %shuffle.i37 = shufflevector <8 x i16> %vbsl5.i, <8 x i16> poison, <4 x i32> + %vqdmlal2.i45 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i38, <4 x i16> %shuffle.i37) + %vqdmlal_v3.i46 = tail call noundef <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %vqdmull_v2.i36, <4 x i32> %vqdmlal2.i45) + %shuffle.i42 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> + %shuffle.i41 = shufflevector <8 x i16> %vbsl5.i, <8 x i16> poison, <4 x i32> + %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i42, <4 x i16> %shuffle.i41) + %vqdmlal_v3.i = tail call noundef <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %vqdmull_v2.i, <4 x i32> %vqdmlal2.i) + %0 = bitcast <4 x i32> %vqdmlal_v3.i46 to <8 x i16> + %1 = bitcast <4 x i32> %vqdmlal_v3.i to <8 x i16> + %shuffle.i35 = shufflevector <8 x i16> %0, <8 x i16> %1, <8 x i32> + ret <8 x i16> %shuffle.i35 +} + ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll index 79fc12ea76f63..269cbf03f32a0 100644 --- a/llvm/test/CodeGen/AArch64/abdu-neg.ll +++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll @@ -180,13 +180,11 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x8, x8, x10 -; CHECK-NEXT: sbc x9, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x8, x8, x10, lo +; CHECK-NEXT: csel x9, x9, x11, lo ; CHECK-NEXT: negs x0, x8 ; CHECK-NEXT: ngc x1, x9 ; CHECK-NEXT: ret @@ -203,13 +201,11 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x8, x8, x10 -; CHECK-NEXT: sbc x9, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x8, x8, x10, lo +; CHECK-NEXT: csel x9, x9, x11, lo ; CHECK-NEXT: negs x0, x8 ; CHECK-NEXT: ngc x1, x9 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll index 6db7693fb3a1c..3cbe648788a84 100644 --- a/llvm/test/CodeGen/AArch64/abdu.ll +++ b/llvm/test/CodeGen/AArch64/abdu.ll @@ -169,13 +169,11 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x0, x8, x10 -; CHECK-NEXT: sbc x1, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x0, x8, x10, lo +; CHECK-NEXT: csel x1, x9, x11, lo ; CHECK-NEXT: ret %aext = zext i128 %a to i256 %bext = zext i128 %b to i256 @@ -189,13 +187,11 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128_undef: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x0, x8, x10 -; CHECK-NEXT: sbc x1, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x0, x8, x10, lo +; CHECK-NEXT: csel x1, x9, x11, lo ; CHECK-NEXT: ret %aext = zext i128 %a to i256 %bext = zext i128 %b to i256 @@ -263,13 +259,11 @@ define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_minmax_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x0, x8, x10 -; CHECK-NEXT: sbc x1, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x0, x8, x10, lo +; CHECK-NEXT: csel x1, x9, x11, lo ; CHECK-NEXT: ret %min = call i128 @llvm.umin.i128(i128 %a, i128 %b) %max = call i128 @llvm.umax.i128(i128 %a, i128 %b) @@ -339,13 +333,11 @@ define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_cmp_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x0, x8, x10 -; CHECK-NEXT: sbc x1, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x0, x8, x10, lo +; CHECK-NEXT: csel x1, x9, x11, lo ; CHECK-NEXT: ret %cmp = icmp uge i128 %a, %b %ab = sub i128 %a, %b @@ -437,13 +429,11 @@ define i128 @abd_select_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_select_i128: ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 -; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: cset w10, lo -; CHECK-NEXT: sbfx x10, x10, #0, #1 -; CHECK-NEXT: eor x8, x8, x10 -; CHECK-NEXT: eor x9, x9, x10 -; CHECK-NEXT: subs x0, x8, x10 -; CHECK-NEXT: sbc x1, x9, x10 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbcs x11, x3, x1 +; CHECK-NEXT: csel x0, x8, x10, lo +; CHECK-NEXT: csel x1, x9, x11, lo ; CHECK-NEXT: ret %cmp = icmp ult i128 %a, %b %ab = select i1 %cmp, i128 %a, i128 %b diff --git a/llvm/test/CodeGen/AArch64/and-mask-variable.ll b/llvm/test/CodeGen/AArch64/and-mask-variable.ll new file mode 100644 index 0000000000000..f41cdc6dd241b --- /dev/null +++ b/llvm/test/CodeGen/AArch64/and-mask-variable.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +define i32 @mask_pair(i32 %x, i32 %y) { +; CHECK-SD-LABEL: mask_pair: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr w8, w0, w1 +; CHECK-SD-NEXT: lsl w0, w8, w1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mask_pair: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #-1 // =0xffffffff +; CHECK-GI-NEXT: lsl w8, w8, w1 +; CHECK-GI-NEXT: and w0, w8, w0 +; CHECK-GI-NEXT: ret + %shl = shl nsw i32 -1, %y + %and = and i32 %shl, %x + ret i32 %and +} + +define i64 @mask_pair_64(i64 %x, i64 %y) { +; CHECK-SD-LABEL: mask_pair_64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: lsr x8, x0, x1 +; CHECK-SD-NEXT: lsl x0, x8, x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mask_pair_64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov x8, #-1 // =0xffffffffffffffff +; CHECK-GI-NEXT: lsl x8, x8, x1 +; CHECK-GI-NEXT: and x0, x8, x0 +; CHECK-GI-NEXT: ret + %shl = shl nsw i64 -1, %y + %and = and i64 %shl, %x + ret i64 %and +} + +define i128 @mask_pair_128(i128 %x, i128 %y) { +; CHECK-SD-LABEL: mask_pair_128: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: mov x8, #-1 // =0xffffffffffffffff +; CHECK-SD-NEXT: mvn w9, w2 +; CHECK-SD-NEXT: mov x10, #9223372036854775807 // =0x7fffffffffffffff +; CHECK-SD-NEXT: lsl x8, x8, x2 +; CHECK-SD-NEXT: lsr x9, x10, x9 +; CHECK-SD-NEXT: tst x2, #0x40 +; CHECK-SD-NEXT: orr x9, x8, x9 +; CHECK-SD-NEXT: csel x9, x8, x9, ne +; CHECK-SD-NEXT: csel x8, xzr, x8, ne +; CHECK-SD-NEXT: and x0, x8, x0 +; CHECK-SD-NEXT: and x1, x9, x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: mask_pair_128: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: mov w8, #64 // =0x40 +; CHECK-GI-NEXT: mov x9, #-1 // =0xffffffffffffffff +; CHECK-GI-NEXT: sub x10, x2, #64 +; CHECK-GI-NEXT: sub x8, x8, x2 +; CHECK-GI-NEXT: lsl x11, x9, x2 +; CHECK-GI-NEXT: cmp x2, #64 +; CHECK-GI-NEXT: lsr x8, x9, x8 +; CHECK-GI-NEXT: lsl x9, x9, x10 +; CHECK-GI-NEXT: csel x10, x11, xzr, lo +; CHECK-GI-NEXT: orr x8, x8, x11 +; CHECK-GI-NEXT: and x0, x10, x0 +; CHECK-GI-NEXT: csel x8, x8, x9, lo +; CHECK-GI-NEXT: cmp x2, #0 +; CHECK-GI-NEXT: csinv x8, x8, xzr, ne +; CHECK-GI-NEXT: and x1, x8, x1 +; CHECK-GI-NEXT: ret + %shl = shl nsw i128 -1, %y + %and = and i128 %shl, %x + ret i128 %and +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll new file mode 100644 index 0000000000000..b1b9fcf8a8b3c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/arm64-cvt-simd-intrinsics.ll @@ -0,0 +1,609 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK +; RUN: llc < %s -mtriple aarch64-unknown-unknown -global-isel -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK + + +; +; Intriniscs +; + +define float @fcvtas_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtas_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtas s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtas_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtas_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtas d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtas.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtas_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtas_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtas s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtas_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtas_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtas d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtas_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtas_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtas d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtas_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtas_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtas s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + + +define float @fcvtau_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtau_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtau s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtau_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtau_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtau d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtau.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtau_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtau_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtau s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtau_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtau_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtau d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtau_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtau_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtau d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtau_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtau_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtau s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtms_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtms_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtms s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtms_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtms_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtms d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtms.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtms_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtms_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtms s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtms_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtms_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtms d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtms_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtms_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtms d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtms_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtms_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtms s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtmu_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtmu_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtmu s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtmu_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtmu_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtmu d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtmu.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtmu_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtmu_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtmu s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtmu_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtmu_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtmu d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtmu_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtmu_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtmu d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtmu_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtmu_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtmu s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtns_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtns_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtns s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtns.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtns_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtns_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtns d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtns.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtns_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtns_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtns s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtns_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtns_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtns d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtns_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtns_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtns d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtns_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtns_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtns s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtnu_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtnu_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtnu s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtnu.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtnu_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtnu_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtnu d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtnu.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtnu_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtnu_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtnu s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtnu_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtnu_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtnu d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtnu_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtnu_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtnu d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtnu_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtnu_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtnu s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtps_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtps_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtps s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtps.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtps_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtps_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtps d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtps.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtps_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtps_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtps s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtps_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtps_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtps d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtps_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtps_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtps d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtps_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtps_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtps s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtpu_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtpu_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtpu s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtpu.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtpu_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtpu_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtpu d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtpu.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtpu_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtpu_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtpu s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtpu_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtpu_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtpu d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtpu_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtpu_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtpu d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtpu_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtpu_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtpu s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtzs_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtzs_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtzs.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtzs_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtzs_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtzs.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtzs_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtzs_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtzs_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtzs_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtzs_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtzs_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtzs.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtzs_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtzs_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtzs.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} + +define float @fcvtzu_1s1d_simd(double %A) nounwind { +; CHECK-LABEL: fcvtzu_1s1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu s0, d0 +; CHECK-NEXT: ret + %i = call i32 @llvm.aarch64.neon.fcvtzu.i32.f64(double %A) + %f = bitcast i32 %i to float + ret float %f +} + +define double @fcvtzu_1d1s_simd(float %A) nounwind { +; CHECK-LABEL: fcvtzu_1d1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu d0, s0 +; CHECK-NEXT: ret + %i = call i64 @llvm.aarch64.neon.fcvtzu.i64.f32(float %A) + %d = bitcast i64 %i to double + ret double %d +} + +define float @fcvtzu_1s1h_simd(half %a) { +; CHECK-LABEL: fcvtzu_1s1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu s0, h0 +; CHECK-NEXT: ret + %fcvt = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a) + %f = bitcast i32 %fcvt to float + ret float %f +} + +define double @fcvtzu_1d1h_simd(half %a) { +; CHECK-LABEL: fcvtzu_1d1h_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu d0, h0 +; CHECK-NEXT: ret + %vcvtah_s64_f16 = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a) + %d = bitcast i64 %vcvtah_s64_f16 to double + ret double %d +} + +define double @fcvtzu_1d1d_simd(double %a) { +; CHECK-LABEL: fcvtzu_1d1d_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu d0, d0 +; CHECK-NEXT: ret + %vcvtah_s64_f64 = tail call i64 @llvm.aarch64.neon.fcvtzu.i64.f64(double %a) + %d = bitcast i64 %vcvtah_s64_f64 to double + ret double %d +} + +define float @fcvtzu_1s1s_simd(float %a) { +; CHECK-LABEL: fcvtzu_1s1s_simd: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu s0, s0 +; CHECK-NEXT: ret + %vcvtah_s32_f32 = tail call i32 @llvm.aarch64.neon.fcvtzu.i32.f32(float %a) + %d = bitcast i32 %vcvtah_s32_f32 to float + ret float %d +} diff --git a/llvm/test/CodeGen/AArch64/arm64-vcvt.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt.ll index 60fcb643fb9f4..627d31f9a64fc 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vcvt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vcvt.ll @@ -1,15 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s --check-prefixes=CHECK,CHECK-SD -; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI - -; CHECK-GI: warning: Instruction selection used fallback path for fcvtas_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtau_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtms_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtmu_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtps_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtpu_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtns_1d -; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcvtnu_1d +; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI define <2 x i32> @fcvtas_2s(<2 x float> %A) nounwind { ; CHECK-LABEL: fcvtas_2s: @@ -405,16 +396,10 @@ define <2 x i64> @fcvtzs_2d_intrinsic(<2 x double> %A) nounwind { } define <1 x i64> @fcvtzs_1d_intrinsic(<1 x double> %A) nounwind { -; CHECK-SD-LABEL: fcvtzs_1d_intrinsic: -; CHECK-SD: // %bb.0: -; CHECK-SD-NEXT: fcvtzs d0, d0 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: fcvtzs_1d_intrinsic: -; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: fcvtzs x8, d0 -; CHECK-GI-NEXT: fmov d0, x8 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: fcvtzs_1d_intrinsic: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzs d0, d0 +; CHECK-NEXT: ret %tmp3 = call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> %A) ret <1 x i64> %tmp3 } @@ -490,16 +475,10 @@ define <2 x i64> @fcvtzu_2d_intrinsic(<2 x double> %A) nounwind { } define <1 x i64> @fcvtzu_1d_intrinsic(<1 x double> %A) nounwind { -; CHECK-SD-LABEL: fcvtzu_1d_intrinsic: -; CHECK-SD: // %bb.0: -; CHECK-SD-NEXT: fcvtzu d0, d0 -; CHECK-SD-NEXT: ret -; -; CHECK-GI-LABEL: fcvtzu_1d_intrinsic: -; CHECK-GI: // %bb.0: -; CHECK-GI-NEXT: fcvtzu x8, d0 -; CHECK-GI-NEXT: fmov d0, x8 -; CHECK-GI-NEXT: ret +; CHECK-LABEL: fcvtzu_1d_intrinsic: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtzu d0, d0 +; CHECK-NEXT: ret %tmp3 = call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> %A) ret <1 x i64> %tmp3 } diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll index 1c216e7357215..e371748a43b29 100644 --- a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll @@ -11,6 +11,16 @@ entry: ret <4 x i16> %1 } +define <4 x half> @v4bf16_to_v4f16(float, <4 x bfloat> %a) nounwind { +; CHECK-LABEL: v4bf16_to_v4f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x bfloat> %a to <4 x half> + ret <4 x half> %1 +} + define <2 x i32> @v4bf16_to_v2i32(float, <4 x bfloat> %a) nounwind { ; CHECK-LABEL: v4bf16_to_v2i32: ; CHECK: // %bb.0: // %entry @@ -82,6 +92,16 @@ entry: ret <4 x bfloat> %1 } +define <4 x bfloat> @v4f16_to_v4bf16(float, <4 x half> %a) nounwind { +; CHECK-LABEL: v4f16_to_v4bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x half> %a to <4 x bfloat> + ret <4 x bfloat> %1 +} + define <4 x bfloat> @v2i32_to_v4bf16(float, <2 x i32> %a) nounwind { ; CHECK-LABEL: v2i32_to_v4bf16: ; CHECK: // %bb.0: // %entry @@ -152,6 +172,16 @@ entry: ret <8 x i16> %1 } +define <8 x half> @v8bf16_to_v8f16(float, <8 x bfloat> %a) nounwind { +; CHECK-LABEL: v8bf16_to_v8f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x bfloat> %a to <8 x half> + ret <8 x half> %1 +} + define <4 x i32> @v8bf16_to_v4i32(float, <8 x bfloat> %a) nounwind { ; CHECK-LABEL: v8bf16_to_v4i32: ; CHECK: // %bb.0: // %entry @@ -202,6 +232,16 @@ entry: ret <8 x bfloat> %1 } +define <8 x bfloat> @v8f16_to_v8bf16(float, <8 x half> %a) nounwind { +; CHECK-LABEL: v8f16_to_v8bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x half> %a to <8 x bfloat> + ret <8 x bfloat> %1 +} + define <8 x bfloat> @v4i32_to_v8bf16(float, <4 x i32> %a) nounwind { ; CHECK-LABEL: v4i32_to_v8bf16: ; CHECK: // %bb.0: // %entry diff --git a/llvm/test/CodeGen/AArch64/cbz_wzr.mir b/llvm/test/CodeGen/AArch64/cbz_wzr.mir new file mode 100644 index 0000000000000..7deea56ba23a1 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/cbz_wzr.mir @@ -0,0 +1,260 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -o - %s -mtriple=aarch64-none-eabi -run-pass=machine-cp -mcp-use-is-copy-instr | FileCheck %s + +--- +name: cbz_wzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: cbz_wzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CBZW $wzr, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $w8 = ORRWrs $wzr, $wzr, 0 + CBZW killed renamable $w8, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... +--- +name: cbnz_wzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: cbnz_wzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CBNZW $wzr, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $w8 = ORRWrs $wzr, $wzr, 0 + CBNZW killed renamable $w8, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... +--- +name: tbz_wzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: tbz_wzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: TBZW $wzr, 0, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $w8 = ORRWrs $wzr, $wzr, 0 + TBZW killed renamable $w8, 0, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... +--- +name: tbnz_wzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: tbnz_wzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: TBNZW $wzr, 0, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $w8 = ORRWrs $wzr, $wzr, 0 + TBNZW killed renamable $w8, 0, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... + +--- +name: cbz_xzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: cbz_xzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CBZX $xzr, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $x8 = ORRXrs $xzr, $xzr, 0 + CBZX killed renamable $x8, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... +--- +name: cbnz_xzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: cbnz_xzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: CBNZX $xzr, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $x8 = ORRXrs $xzr, $xzr, 0 + CBNZX killed renamable $x8, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... +--- +name: tbz_xzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: tbz_xzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: TBZX $xzr, 0, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $x8 = ORRXrs $xzr, $xzr, 0 + TBZX killed renamable $x8, 0, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... +--- +name: tbnz_xzr +tracksRegLiveness: true +body: | + ; CHECK-LABEL: name: tbnz_xzr + ; CHECK: bb.0: + ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; CHECK-NEXT: liveins: $x0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: TBNZX $xzr, 0, %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: $w0 = MOVZWi 10, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: $w0 = MOVZWi 20, 0 + ; CHECK-NEXT: RET undef $lr, implicit $w0 + bb.0: + liveins: $x0 + + $x8 = ORRXrs $xzr, $xzr, 0 + TBNZX killed renamable $x8, 0, %bb.2 + + bb.1: + $w0 = MOVZWi 10, 0 + RET undef $lr, implicit $w0 + + bb.2: + $w0 = MOVZWi 20, 0 + RET undef $lr, implicit $w0 +... diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll index 0960c4c2a3342..a56d5b1b49b38 100644 --- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll +++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll @@ -78,9 +78,8 @@ B: define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sxtb w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w8, w1, w2, mi +; CHECK-NEXT: tst w0, #0x80 +; CHECK-NEXT: csel w8, w1, w2, ne ; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret entry: @@ -100,9 +99,8 @@ B: define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sxth w8, w0 -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w8, w1, w2, mi +; CHECK-NEXT: tst w0, #0x8000 +; CHECK-NEXT: csel w8, w1, w2, ne ; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret entry: @@ -167,10 +165,8 @@ B: define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: g_i32_sign_extend_i64: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: csel x8, x1, x2, mi +; CHECK-NEXT: tst w0, #0x80000000 +; CHECK-NEXT: csel x8, x1, x2, ne ; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir index aca2816225e3e..7fd0cee068fd1 100644 --- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir +++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir @@ -164,10 +164,10 @@ stack: - { id: 1, name: z1.addr, size: 16, alignment: 16, stack-id: scalable-vector, debug-info-variable: '!31', debug-info-expression: '!DIExpression()', debug-info-location: '!32' } - - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!33', debug-info-expression: '!DIExpression()', debug-info-location: '!34' } - - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!35', debug-info-expression: '!DIExpression()', debug-info-location: '!36' } - { id: 4, name: w0.addr, size: 4, alignment: 4, local-offset: -4, debug-info-variable: '!37', @@ -181,10 +181,10 @@ stack: - { id: 7, name: localv1, size: 16, alignment: 16, stack-id: scalable-vector, debug-info-variable: '!45', debug-info-expression: '!DIExpression()', debug-info-location: '!46' } - - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!48', debug-info-expression: '!DIExpression()', debug-info-location: '!49' } - - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-vector, + - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-predicate-vector, debug-info-variable: '!51', debug-info-expression: '!DIExpression()', debug-info-location: '!52' } machineFunctionInfo: {} diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir index 0ea180b20730f..41ba5542150ab 100644 --- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir +++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir @@ -96,8 +96,8 @@ stack: - { id: 1, size: 8, alignment: 8 } - { id: 2, size: 16, alignment: 16, stack-id: scalable-vector } - { id: 3, size: 16, alignment: 16, stack-id: scalable-vector } - - { id: 4, size: 2, alignment: 2, stack-id: scalable-vector } - - { id: 5, size: 2, alignment: 2, stack-id: scalable-vector } + - { id: 4, size: 2, alignment: 2, stack-id: scalable-predicate-vector } + - { id: 5, size: 2, alignment: 2, stack-id: scalable-predicate-vector } machineFunctionInfo: {} body: | bb.0.entry: diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir new file mode 100644 index 0000000000000..113f343bac73e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir @@ -0,0 +1,344 @@ +# RUN: llc -start-before=aarch64-asm-printer -o - %s | FileCheck %s + +# Check that z30_z31 debug info does not crash. + +# CHECK: .Ldebug_loc0: +# CHECK: .byte 4 // DW_LLE_offset_pair +# CHECK: .uleb128 .Ltmp2-.Lfunc_begin0 // starting offset +# CHECK: .uleb128 .Ltmp3-.Lfunc_begin0 // ending offset +# CHECK: .byte 2 // Loc expr size +# CHECK: .byte 144 // DW_OP_regx +# CHECK: .byte 126 // 126 +# CHECK: .byte 4 // DW_LLE_offset_pair +# CHECK: .uleb128 .Ltmp3-.Lfunc_begin0 // starting offset +# CHECK: .uleb128 .Lfunc_end0-.Lfunc_begin0 // ending offset +# CHECK: .byte 6 // Loc expr size +# CHECK: .byte 144 // sub-register DW_OP_regx +# CHECK: .byte 94 // 94 +# CHECK: .byte 147 // DW_OP_piece +# CHECK: .byte 16 // 16 +# CHECK: .byte 147 // DW_OP_piece +# CHECK: .byte 31 // 31 +# CHECK: .byte 0 // DW_LLE_end_of_list + + +--- | + target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32" + target triple = "aarch64" + + define void @_Z10Sort16RowsILi6EEv12SharedTraitsI10TraitsLaneEP22Trans_NS_hwy_float16_tiS4_(i8 %st.coerce, ptr noundef %keys, i32 noundef %0, ptr noundef %1) #2 !dbg !2 { + unreachable + } + + attributes #2 = { mustprogress uwtable vscale_range(1,16) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="neoverse-n1" "target-features"="+aes,+crc,+dotprod,+fp-armv8,+fullfp16,+lse,+neon,+perfmon,+ras,+rcpc,+rdm,+sha2,+spe,+ssbs,+sve,+sve-aes,+sve2,+sve2-aes,+v8.1a,+v8.2a,+v8a,-fmv" "tune-cpu"="generic" } + + !llvm.dbg.cu = !{!3} + !llvm.module.flags = !{!4, !5, !6, !7, !8, !9} + !llvm.ident = !{!10} + + !2 = distinct !DISubprogram(name: "Sort16Rows<6>", linkageName: "_Z10Sort16RowsILi6EEv12SharedTraitsI10TraitsLaneEP22Trans_NS_hwy_float16_tiS4_", scope: !12, file: !12, line: 369, type: !18, scopeLine: 370, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !19, retainedNodes: !20, keyInstructions: true) + !3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !14, producer: "clang version 22.0.0git (https://github.com/llvm/llvm-project.git)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) + !4 = !{i32 7, !"Dwarf Version", i32 5} + !5 = !{i32 2, !"Debug Info Version", i32 3} + !6 = !{i32 1, !"wchar_size", i32 4} + !7 = !{i32 7, !"uwtable", i32 2} + !8 = !{i32 7, !"frame-pointer", i32 1} + !9 = !{i32 7, !"debug-info-assignment-tracking", i1 true} + !10 = !{!"clang version 22.0.0git (https://github.com/llvm/llvm-project.git)"} + !12 = !DIFile(filename: "example.cpp", directory: "/app", checksumkind: CSK_MD5, checksum: "5fbaafea0ede06ddd1ffc371aeee276e") + !14 = !DIFile(filename: "/app/example.cpp", directory: "/app", checksumkind: CSK_MD5, checksum: "5fbaafea0ede06ddd1ffc371aeee276e") + !17 = !DIBasicType(name: "__fp16", size: 16, encoding: DW_ATE_float) + !18 = !DISubroutineType(types: !21) + !19 = !{!120} + !20 = !{!77, !78, !79, !80, !81, !82, !83, !84, !85, !86, !87, !88, !89, !90, !91, !92, !93, !94, !95, !96, !97, !98, !99, !100, !101, !102, !103, !104, !105} + !21 = !{null, !22, !23, !24, !23} + !22 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "SharedTraits", file: !12, line: 272, size: 8, flags: DIFlagTypePassByValue, elements: !25, templateParams: !26, identifier: "_ZTS12SharedTraitsI10TraitsLaneE") + !23 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !55, size: 64) + !24 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) + !25 = !{!27} + !26 = !{!76} + !27 = !DIDerivedType(tag: DW_TAG_inheritance, scope: !22, baseType: !28, extraData: i32 0) + !28 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "TraitsLane", file: !12, line: 325, size: 8, flags: DIFlagTypePassByValue, elements: !29, identifier: "_ZTS10TraitsLane") + !29 = !{!30, !31, !32, !33} + !30 = !DIDerivedType(tag: DW_TAG_inheritance, scope: !28, baseType: !34, extraData: i32 0) + !31 = !DISubprogram(name: "Sort2", linkageName: "_ZN10TraitsLane5Sort2E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EERu13__SVFloat16_tS4_", scope: !28, file: !12, line: 326, type: !70, scopeLine: 326, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) + !32 = !DISubprogram(name: "SortPairsDistance1", linkageName: "_ZN10TraitsLane18SortPairsDistance1E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 344, type: !74, scopeLine: 344, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) + !33 = !DISubprogram(name: "SortPairsDistance4", linkageName: "_ZN10TraitsLane18SortPairsDistance4E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 352, type: !74, scopeLine: 352, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) + !34 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "KeyLane", file: !12, line: 307, size: 8, flags: DIFlagTypePassByValue, elements: !35, identifier: "_ZTS7KeyLane") + !35 = !{!36, !37, !38} + !36 = !DISubprogram(name: "SwapAdjacentPairs", linkageName: "_ZN7KeyLane17SwapAdjacentPairsE4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !34, file: !12, line: 309, type: !39, scopeLine: 309, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) + !37 = !DISubprogram(name: "SwapAdjacentPairs", linkageName: "_ZN7KeyLane17SwapAdjacentPairsEu13__SVFloat32_t", scope: !34, file: !12, line: 314, type: !58, scopeLine: 314, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) + !38 = !DISubprogram(name: "OddEvenPairs", linkageName: "_ZN7KeyLane12OddEvenPairsE4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_tS3_", scope: !34, file: !12, line: 318, type: !68, scopeLine: 318, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized) + !39 = !DISubroutineType(types: !40) + !40 = !{!41, !42, !43, !41} + !41 = !DIDerivedType(tag: DW_TAG_typedef, name: "Vec >", file: !12, line: 270, baseType: !44) + !42 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !34, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer) + !43 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !51, identifier: "_ZTS4SimdI22Trans_NS_hwy_float16_tLi1ELi0EE") + !44 = !DIDerivedType(tag: DW_TAG_typedef, name: "VFromD >", file: !12, line: 142, baseType: !45) + !45 = !DIDerivedType(tag: DW_TAG_typedef, name: "svfloat16_t", file: !12, line: 26, baseType: !46) + !46 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVFloat16_t", file: !12, baseType: !47) + !47 = !DICompositeType(tag: DW_TAG_array_type, baseType: !17, flags: DIFlagVector, elements: !48) + !48 = !{!49} + !49 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 4, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + !50 = !{} + !51 = !{!52, !53, !54} + !52 = !DITemplateTypeParameter(name: "Lane", type: !55) + !53 = !DITemplateValueParameter(type: !24, value: i32 1) + !54 = !DITemplateValueParameter(name: "kPow2", type: !24, value: i32 0) + !55 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Trans_NS_hwy_float16_t", file: !12, line: 6, size: 16, flags: DIFlagTypePassByValue, elements: !56, identifier: "_ZTS22Trans_NS_hwy_float16_t") + !56 = !{!57} + !57 = !DIDerivedType(tag: DW_TAG_member, name: "native", scope: !55, file: !12, line: 7, baseType: !17, size: 16) + !58 = !DISubroutineType(types: !59) + !59 = !{!60, !42, !60} + !60 = !DIDerivedType(tag: DW_TAG_typedef, name: "Vec >", file: !12, line: 270, baseType: !61) + !61 = !DIDerivedType(tag: DW_TAG_typedef, name: "VFromD >", file: !12, line: 142, baseType: !62) + !62 = !DIDerivedType(tag: DW_TAG_typedef, name: "svfloat32_t", file: !12, line: 27, baseType: !63) + !63 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVFloat32_t", file: !12, baseType: !64) + !64 = !DICompositeType(tag: DW_TAG_array_type, baseType: !65, flags: DIFlagVector, elements: !66) + !65 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float) + !66 = !{!67} + !67 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 2, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + !68 = !DISubroutineType(types: !69) + !69 = !{!41, !42, !43, !41, !41} + !70 = !DISubroutineType(types: !71) + !71 = !{null, !72, !43, !73, !73} + !72 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !28, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer) + !73 = !DIDerivedType(tag: DW_TAG_reference_type, baseType: !41, size: 64) + !74 = !DISubroutineType(types: !75) + !75 = !{!41, !72, !43, !41} + !76 = !DITemplateTypeParameter(name: "Base", type: !28) + !77 = !DILocalVariable(name: "st", arg: 1, scope: !2, file: !12, line: 369, type: !22) + !78 = !DILocalVariable(name: "keys", arg: 2, scope: !2, file: !12, line: 369, type: !23) + !79 = !DILocalVariable(arg: 3, scope: !2, file: !12, line: 369, type: !24) + !80 = !DILocalVariable(arg: 4, scope: !2, file: !12, line: 370, type: !23) + !81 = !DILocalVariable(name: "d", scope: !2, file: !12, line: 371, type: !106) + !82 = !DILocalVariable(name: "v8", scope: !2, file: !12, line: 373, type: !112) + !83 = !DILocalVariable(name: "v9", scope: !2, file: !12, line: 373, type: !112) + !84 = !DILocalVariable(name: "va", scope: !2, file: !12, line: 373, type: !112) + !85 = !DILocalVariable(name: "vb", scope: !2, file: !12, line: 373, type: !112) + !86 = !DILocalVariable(name: "vc", scope: !2, file: !12, line: 373, type: !112) + !87 = !DILocalVariable(name: "vd", scope: !2, file: !12, line: 373, type: !112) + !88 = !DILocalVariable(name: "ve", scope: !2, file: !12, line: 373, type: !112) + !89 = !DILocalVariable(name: "vf", scope: !2, file: !12, line: 373, type: !112) + !90 = !DILocalVariable(name: "v2", scope: !2, file: !12, line: 373, type: !112) + !91 = !DILocalVariable(name: "v4", scope: !2, file: !12, line: 373, type: !112) + !92 = !DILocalVariable(name: "v7", scope: !2, file: !12, line: 373, type: !112) + !93 = !DILocalVariable(name: "v0", scope: !2, file: !12, line: 374, type: !112) + !94 = !DILocalVariable(name: "v3", scope: !2, file: !12, line: 375, type: !112) + !95 = !DILocalVariable(name: "v5", scope: !2, file: !12, line: 376, type: !112) + !96 = !DILocalVariable(name: "v6", scope: !2, file: !12, line: 377, type: !112) + !97 = !DILocalVariable(name: "kIota", scope: !2, file: !12, line: 378, type: !112) + !98 = !DILocalVariable(name: "m8", scope: !2, file: !12, line: 379, type: !113) + !99 = !DILocalVariable(name: "m9", scope: !2, file: !12, line: 380, type: !113) + !100 = !DILocalVariable(name: "ma", scope: !2, file: !12, line: 381, type: !113) + !101 = !DILocalVariable(name: "mb", scope: !2, file: !12, line: 382, type: !113) + !102 = !DILocalVariable(name: "mc", scope: !2, file: !12, line: 383, type: !113) + !103 = !DILocalVariable(name: "md", scope: !2, file: !12, line: 384, type: !113) + !104 = !DILocalVariable(name: "me", scope: !2, file: !12, line: 385, type: !113) + !105 = !DILocalVariable(name: "mf", scope: !2, file: !12, line: 386, type: !113) + !106 = !DIDerivedType(tag: DW_TAG_typedef, name: "CappedTag", file: !12, line: 97, baseType: !107) + !107 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !108, file: !12, line: 89, baseType: !43) + !108 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "ClampNAndPow2", file: !12, line: 88, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !109, identifier: "_ZTS13ClampNAndPow2I22Trans_NS_hwy_float16_tLi1EE") + !109 = !{!110, !111} + !110 = !DITemplateTypeParameter(name: "T", type: !55) + !111 = !DITemplateValueParameter(name: "N", type: !24, value: i32 1) + !112 = !DIDerivedType(tag: DW_TAG_typedef, name: "V", scope: !2, file: !12, line: 372, baseType: !41) + !113 = !DIDerivedType(tag: DW_TAG_typedef, name: "Mask >", file: !12, line: 271, baseType: !114) + !114 = !DIDerivedType(tag: DW_TAG_typedef, name: "svbool_t", file: !12, line: 28, baseType: !115) + !115 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVBool_t", file: !12, baseType: !116) + !116 = !DICompositeType(tag: DW_TAG_array_type, baseType: !117, flags: DIFlagVector, elements: !118) + !117 = !DIBasicType(name: "unsigned char", size: 8, encoding: DW_ATE_unsigned_char) + !118 = !{!119} + !119 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 1, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + !120 = !DITemplateValueParameter(name: "kKeysPerRow", type: !24, value: i32 6) + !121 = !DILocalVariable(name: "this", arg: 1, scope: !122, type: !123, flags: DIFlagArtificial | DIFlagObjectPointer) + !122 = distinct !DISubprogram(name: "Sort2", linkageName: "_ZN10TraitsLane5Sort2E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EERu13__SVFloat16_tS4_", scope: !28, file: !12, line: 326, type: !70, scopeLine: 328, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !31, retainedNodes: !124, keyInstructions: true) + !123 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !28, size: 64) + !124 = !{!121, !125, !126, !127, !128, !129, !130, !131, !132} + !125 = !DILocalVariable(name: "d", arg: 2, scope: !122, file: !12, line: 326, type: !43) + !126 = !DILocalVariable(name: "a", arg: 3, scope: !122, file: !12, line: 327, type: !73) + !127 = !DILocalVariable(name: "b", arg: 4, scope: !122, file: !12, line: 328, type: !73) + !128 = !DILocalVariable(name: "__trans_tmp_52", scope: !122, file: !12, line: 329, type: !41) + !129 = !DILocalVariable(name: "a_copy", scope: !122, file: !12, line: 329, type: !41) + !130 = !DILocalVariable(name: "__trans_tmp_45", scope: !122, file: !12, line: 330, type: !41) + !131 = !DILocalVariable(name: "__trans_tmp_53", scope: !133, file: !12, line: 334, type: !41) + !132 = !DILocalVariable(name: "__trans_tmp_29", scope: !134, file: !12, line: 336, type: !45) + !133 = distinct !DILexicalBlock(scope: !122, file: !12, line: 333, column: 5) + !134 = distinct !DILexicalBlock(scope: !133, file: !12, line: 335, column: 7) + !137 = distinct !DISubprogram(name: "SortPairsDistance1", linkageName: "_ZN10TraitsLane18SortPairsDistance1E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 344, type: !74, scopeLine: 345, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !32, retainedNodes: !139, keyInstructions: true) + !139 = !{!140, !141, !142, !143} + !140 = !DILocalVariable(name: "this", arg: 1, scope: !137, type: !123, flags: DIFlagArtificial | DIFlagObjectPointer) + !141 = !DILocalVariable(name: "d", arg: 2, scope: !137, file: !12, line: 344, type: !43) + !142 = !DILocalVariable(name: "v", arg: 3, scope: !137, file: !12, line: 345, type: !41) + !143 = !DILocalVariable(name: "__trans_tmp_48", scope: !137, file: !12, line: 346, type: !41) + !144 = distinct !DISubprogram(name: "Merge16x16<6, SharedTraits, __SVFloat16_t>", linkageName: "_Z10Merge16x16ILi6E12SharedTraitsI10TraitsLaneEu13__SVFloat16_tEvT0_RT1_S6_S6_S6_S6_S6_S6_S6_S6_S6_S6_S6_", scope: !12, file: !12, line: 286, type: !146, scopeLine: 288, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !147, retainedNodes: !148, keyInstructions: true) + !145 = distinct !DILocation(line: 388, column: 3, scope: !2) + !146 = !DISubroutineType(types: !149) + !147 = !{!164, !165, !166} + !148 = !{!151, !152, !153, !154, !155, !156, !157, !158, !159, !160, !161, !162, !163} + !149 = !{null, !22, !150, !150, !150, !150, !150, !150, !150, !150, !150, !150, !150, !150} + !150 = !DIDerivedType(tag: DW_TAG_reference_type, baseType: !47, size: 64) + !151 = !DILocalVariable(name: "st", arg: 1, scope: !144, file: !12, line: 286, type: !22) + !152 = !DILocalVariable(name: "v0", arg: 2, scope: !144, file: !12, line: 286, type: !150) + !153 = !DILocalVariable(name: "v2", arg: 3, scope: !144, file: !12, line: 286, type: !150) + !154 = !DILocalVariable(name: "v5", arg: 4, scope: !144, file: !12, line: 286, type: !150) + !155 = !DILocalVariable(name: "v6", arg: 5, scope: !144, file: !12, line: 287, type: !150) + !156 = !DILocalVariable(name: "v7", arg: 6, scope: !144, file: !12, line: 287, type: !150) + !157 = !DILocalVariable(name: "v9", arg: 7, scope: !144, file: !12, line: 287, type: !150) + !158 = !DILocalVariable(name: "va", arg: 8, scope: !144, file: !12, line: 287, type: !150) + !159 = !DILocalVariable(name: "vb", arg: 9, scope: !144, file: !12, line: 287, type: !150) + !160 = !DILocalVariable(name: "vc", arg: 10, scope: !144, file: !12, line: 288, type: !150) + !161 = !DILocalVariable(name: "vd", arg: 11, scope: !144, file: !12, line: 288, type: !150) + !162 = !DILocalVariable(name: "ve", arg: 12, scope: !144, file: !12, line: 288, type: !150) + !163 = !DILocalVariable(name: "vf", arg: 13, scope: !144, file: !12, line: 288, type: !150) + !164 = !DITemplateValueParameter(type: !24, value: i32 6) + !165 = !DITemplateTypeParameter(name: "Traits", type: !22) + !166 = !DITemplateTypeParameter(name: "V", type: !47) + !184 = !DILocalVariable(name: "this", arg: 1, scope: !185, type: !186, flags: DIFlagArtificial | DIFlagObjectPointer) + !185 = distinct !DISubprogram(name: "SortPairsDistance2 >", linkageName: "_ZN12SharedTraitsI10TraitsLaneE18SortPairsDistance2I4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEEEDTcl4ZerocvT__EEES6_S7_", scope: !22, file: !12, line: 273, type: !187, scopeLine: 273, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !188, declaration: !189, retainedNodes: !190, keyInstructions: true) + !186 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !22, size: 64) + !187 = !DISubroutineType(types: !191) + !188 = !{!193} + !189 = !DISubprogram(name: "SortPairsDistance2 >", linkageName: "_ZN12SharedTraitsI10TraitsLaneE18SortPairsDistance2I4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEEEDTcl4ZerocvT__EEES6_S7_", scope: !22, file: !12, line: 273, type: !187, scopeLine: 273, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized, templateParams: !188) + !190 = !{!184, !194, !195, !196, !197} + !191 = !{!41, !192, !43, !41} + !192 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !22, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer) + !193 = !DITemplateTypeParameter(name: "D", type: !43) + !194 = !DILocalVariable(name: "d", arg: 2, scope: !185, file: !12, line: 273, type: !43) + !195 = !DILocalVariable(name: "v", arg: 3, scope: !185, file: !12, line: 273, type: !41) + !196 = !DILocalVariable(name: "base", scope: !185, file: !12, line: 274, type: !28) + !197 = !DILocalVariable(name: "swapped", scope: !185, file: !12, line: 275, type: !41) + !200 = !DILocation(line: 0, scope: !122, inlinedAt: !201) + !201 = distinct !DILocation(line: 358, column: 5, scope: !202, inlinedAt: !203) + !202 = distinct !DISubprogram(name: "SortPairsDistance4", linkageName: "_ZN10TraitsLane18SortPairsDistance4E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 352, type: !74, scopeLine: 353, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !33, retainedNodes: !204, keyInstructions: true) + !203 = distinct !DILocation(line: 298, column: 11, scope: !144, inlinedAt: !145) + !204 = !{!205, !206, !207, !208, !209, !210, !211} + !205 = !DILocalVariable(name: "this", arg: 1, scope: !202, type: !123, flags: DIFlagArtificial | DIFlagObjectPointer) + !206 = !DILocalVariable(name: "d", arg: 2, scope: !202, file: !12, line: 352, type: !43) + !207 = !DILocalVariable(name: "v", arg: 3, scope: !202, file: !12, line: 353, type: !41) + !208 = !DILocalVariable(name: "__trans_tmp_42", scope: !202, file: !12, line: 354, type: !41) + !209 = !DILocalVariable(name: "__trans_tmp_39", scope: !202, file: !12, line: 354, type: !41) + !210 = !DILocalVariable(name: "dw", scope: !202, file: !12, line: 355, type: !212) + !211 = !DILocalVariable(name: "__trans_tmp_51", scope: !219, file: !12, line: 360, type: !44) + !212 = !DIDerivedType(tag: DW_TAG_typedef, name: "RepartitionToWide >", file: !12, line: 103, baseType: !213) + !213 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition >", file: !12, line: 101, baseType: !214) + !214 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition", scope: !43, file: !12, line: 86, baseType: !215) + !215 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !216, identifier: "_ZTS4SimdIfLi0ELi0EE") + !216 = !{!217, !218, !54} + !217 = !DITemplateTypeParameter(name: "Lane", type: !65) + !218 = !DITemplateValueParameter(type: !24, value: i32 0) + !219 = distinct !DILexicalBlock(scope: !202, file: !12, line: 359, column: 5) + !220 = !DILocalVariable(name: "this", arg: 1, scope: !221, type: !222, flags: DIFlagArtificial | DIFlagObjectPointer) + !221 = distinct !DISubprogram(name: "SwapAdjacentPairs", linkageName: "_ZN7KeyLane17SwapAdjacentPairsEu13__SVFloat32_t", scope: !34, file: !12, line: 314, type: !58, scopeLine: 314, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !37, retainedNodes: !223, keyInstructions: true) + !222 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !34, size: 64) + !223 = !{!220, !224} + !224 = !DILocalVariable(name: "v", arg: 2, scope: !221, file: !12, line: 314, type: !60) + !225 = distinct !DILocation(line: 357, column: 38, scope: !202, inlinedAt: !203) + !226 = !DILocalVariable(name: "v", arg: 1, scope: !227, file: !12, line: 264, type: !64) + !227 = distinct !DISubprogram(name: "Shuffle1032<__SVFloat32_t>", linkageName: "_Z11Shuffle1032Iu13__SVFloat32_tET_S1_", scope: !12, file: !12, line: 264, type: !228, scopeLine: 264, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !229, retainedNodes: !230, keyInstructions: true) + !228 = !DISubroutineType(types: !231) + !229 = !{!262} + !230 = !{!226, !232, !233, !234} + !231 = !{!64, !64} + !232 = !DILocalVariable(name: "d", scope: !227, file: !12, line: 265, type: !235) + !233 = !DILocalVariable(name: "d8", scope: !227, file: !12, line: 266, type: !252) + !234 = !DILocalVariable(name: "v8", scope: !227, file: !12, line: 267, type: !257) + !235 = !DIDerivedType(tag: DW_TAG_typedef, name: "DFromV<__SVFloat32_t>", file: !12, line: 108, baseType: !236) + !236 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !237, file: !12, line: 116, baseType: !238) + !237 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "DFromV_t<__SVFloat32_t>", file: !12, line: 115, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !239, identifier: "_ZTS8DFromV_tIu13__SVFloat32_tE") + !238 = !DIDerivedType(tag: DW_TAG_typedef, name: "ScalableTag", file: !12, line: 95, baseType: !241) + !239 = !{!240} + !240 = !DITemplateTypeParameter(type: !64) + !241 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !242, file: !12, line: 92, baseType: !243) + !242 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "ScalableTagChecker", file: !12, line: 91, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !244, identifier: "_ZTS18ScalableTagCheckerIfE") + !243 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !246, file: !12, line: 89, baseType: !247) + !244 = !{!245} + !245 = !DITemplateTypeParameter(name: "T", type: !65) + !246 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "ClampNAndPow2", file: !12, line: 88, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !248, identifier: "_ZTS13ClampNAndPow2IfLi64EE") + !247 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !250, identifier: "_ZTS4SimdIfLi64ELi0EE") + !248 = !{!245, !249} + !249 = !DITemplateValueParameter(name: "N", type: !24, value: i32 64) + !250 = !{!217, !251, !54} + !251 = !DITemplateValueParameter(type: !24, value: i32 64) + !252 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition >", file: !12, line: 101, baseType: !253) + !253 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition", scope: !247, file: !12, line: 86, baseType: !254) + !254 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !255, identifier: "_ZTS4SimdIhLi0ELi0EE") + !255 = !{!256, !218, !54} + !256 = !DITemplateTypeParameter(name: "Lane", type: !117) + !257 = !DIDerivedType(tag: DW_TAG_typedef, name: "svuint8_t", file: !12, line: 22, baseType: !258) + !258 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVUint8_t", file: !12, baseType: !259) + !259 = !DICompositeType(tag: DW_TAG_array_type, baseType: !117, flags: DIFlagVector, elements: !260) + !260 = !{!261} + !261 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 8, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus)) + !262 = !DITemplateTypeParameter(name: "V", type: !64) + !263 = !DILocalVariable(name: "hi", arg: 1, scope: !264, file: !12, line: 248, type: !259) + !264 = distinct !DISubprogram(name: "CombineShiftRightBytes<8, __SVUint8_t>", linkageName: "_Z22CombineShiftRightBytesILi8Eu11__SVUint8_tET0_S1_S1_", scope: !12, file: !12, line: 248, type: !265, scopeLine: 248, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !266, retainedNodes: !267, keyInstructions: true) + !265 = !DISubroutineType(types: !268) + !266 = !{!283, !284} + !267 = !{!263, !269, !270, !271, !272, !273, !274, !275, !276} + !268 = !{!259, !259, !259} + !269 = !DILocalVariable(name: "lo", arg: 2, scope: !264, file: !12, line: 248, type: !259) + !270 = !DILocalVariable(name: "__trans_tmp_33", scope: !264, file: !12, line: 249, type: !257) + !271 = !DILocalVariable(name: "__trans_tmp_15", scope: !264, file: !12, line: 249, type: !257) + !272 = !DILocalVariable(name: "__trans_tmp_32", scope: !264, file: !12, line: 250, type: !257) + !273 = !DILocalVariable(name: "d8", scope: !264, file: !12, line: 251, type: !277) + !274 = !DILocalVariable(name: "__trans_tmp_16", scope: !264, file: !12, line: 252, type: !114) + !275 = !DILocalVariable(name: "lo_down", scope: !264, file: !12, line: 254, type: !257) + !276 = !DILocalVariable(name: "__trans_tmp_34", scope: !264, file: !12, line: 255, type: !114) + !277 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition >", file: !12, line: 101, baseType: !278) + !278 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition", scope: !279, file: !12, line: 86, baseType: !254) + !279 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !280, identifier: "_ZTS4SimdIcLi0ELi0EE") + !280 = !{!281, !218, !54} + !281 = !DITemplateTypeParameter(name: "Lane", type: !282) + !282 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_unsigned_char) + !283 = !DITemplateValueParameter(name: "kBytes", type: !24, value: i32 8) + !284 = !DITemplateTypeParameter(name: "V", type: !259) + !285 = !DILocalVariable(name: "hi", arg: 1, scope: !286, file: !12, line: 216, type: !257) + !286 = distinct !DISubprogram(name: "Ext<8>", linkageName: "_Z3ExtILi8EEu11__SVUint8_tS0_S0_", scope: !12, file: !12, line: 216, type: !287, scopeLine: 216, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !288, retainedNodes: !289, keyInstructions: true) + !287 = !DISubroutineType(types: !290) + !288 = !{!292} + !289 = !{!285, !291} + !290 = !{!257, !257, !257} + !291 = !DILocalVariable(name: "lo", arg: 2, scope: !286, file: !12, line: 216, type: !257) + !292 = !DITemplateValueParameter(name: "kIndex", type: !24, value: i32 8) + !293 = !DILocalVariable(name: "a", arg: 1, scope: !294, file: !12, line: 180, type: !47) + !294 = distinct !DISubprogram(name: "Min<__SVFloat16_t>", linkageName: "_Z3MinIu13__SVFloat16_tET_S1_S1_", scope: !12, file: !12, line: 180, type: !295, scopeLine: 180, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !296, retainedNodes: !297, keyInstructions: true) + !295 = !DISubroutineType(types: !298) + !296 = !{!166} + !297 = !{!293, !299, !300, !301, !302, !303, !304} + !298 = !{!47, !47, !47} + !299 = !DILocalVariable(name: "b", arg: 2, scope: !294, file: !12, line: 180, type: !47) + !300 = !DILocalVariable(name: "__trans_tmp_36", scope: !294, file: !12, line: 181, type: !45) + !301 = !DILocalVariable(name: "__trans_tmp_25", scope: !294, file: !12, line: 181, type: !45) + !302 = !DILocalVariable(name: "__trans_tmp_27", scope: !294, file: !12, line: 182, type: !114) + !303 = !DILocalVariable(name: "__trans_tmp_24", scope: !294, file: !12, line: 183, type: !114) + !304 = !DILocalVariable(name: "__trans_tmp_19", scope: !294, file: !12, line: 184, type: !114) + !308 = distinct !DILocation(line: 315, column: 12, scope: !221, inlinedAt: !225) + !309 = distinct !DILocation(line: 268, column: 21, scope: !227, inlinedAt: !308) + !311 = distinct !DILocation(line: 254, column: 18, scope: !264, inlinedAt: !309) + !312 = !DILocation(line: 217, column: 10, scope: !286, inlinedAt: !311, atomGroup: 1, atomRank: 2) + !313 = !DILocation(line: 257, column: 20, scope: !264, inlinedAt: !309, atomGroup: 5, atomRank: 2) + !314 = !DILocation(line: 0, scope: !294, inlinedAt: !315) + !315 = distinct !DILocation(line: 331, column: 22, scope: !122, inlinedAt: !201) + !316 = !DILocation(line: 185, column: 20, scope: !294, inlinedAt: !315) + !317 = !DILocation(line: 403, column: 1, scope: !2, atomGroup: 19449, atomRank: 1) + +... +--- +name: _Z10Sort16RowsILi6EEv12SharedTraitsI10TraitsLaneEP22Trans_NS_hwy_float16_tiS4_ +body: | + bb.0: + liveins: $x1, $z0, $z1, $p0 + + $z30 = LDR_ZXI $x1, -14 + $z31 = LDR_ZXI $x1, -13 + $z23 = ORR_ZZZ $z30, $z30 + renamable $z2 = EXT_ZZI_B renamable $z30_z31, 8, debug-location !312 + renamable $z7 = SEL_ZPZZ_B renamable $p0, renamable $z0, killed renamable $z1, debug-location !313 + DBG_VALUE $z30, $noreg, !129, !DIExpression(), debug-location !200 + renamable $p3 = nofpexcept FCMGT_PPzZZ_H renamable $p0, renamable $z0, undef renamable $z1, debug-location !316 + DBG_VALUE $z30_z31, $noreg, !129, !DIExpression(), debug-location !200 + DBG_VALUE $z30_z31, $noreg, !293, !DIExpression(), debug-location !314 + RET undef $lr, debug-location !317 +... + diff --git a/llvm/test/CodeGen/AArch64/dp-3source.ll b/llvm/test/CodeGen/AArch64/dp-3source.ll index 313f671c19c5e..26ee07627e3e5 100644 --- a/llvm/test/CodeGen/AArch64/dp-3source.ll +++ b/llvm/test/CodeGen/AArch64/dp-3source.ll @@ -1,164 +1,212 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_madd32: +; CHECK: ; %bb.0: +; CHECK-NEXT: madd w0, w1, w2, w0 +; CHECK-NEXT: ret %mid = mul i32 %val1, %val2 %res = add i32 %val0, %mid -; CHECK: madd {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i32 %res } define i64 @test_madd64(i64 %val0, i64 %val1, i64 %val2) { ; CHECK-LABEL: test_madd64: +; CHECK: ; %bb.0: +; CHECK-NEXT: madd x0, x1, x2, x0 +; CHECK-NEXT: ret %mid = mul i64 %val1, %val2 %res = add i64 %val0, %mid -; CHECK: madd {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i32 @test_msub32(i32 %val0, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_msub32: +; CHECK: ; %bb.0: +; CHECK-NEXT: msub w0, w1, w2, w0 +; CHECK-NEXT: ret %mid = mul i32 %val1, %val2 %res = sub i32 %val0, %mid -; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i32 %res } define i64 @test_msub64(i64 %val0, i64 %val1, i64 %val2) { ; CHECK-LABEL: test_msub64: +; CHECK: ; %bb.0: +; CHECK-NEXT: msub x0, x1, x2, x0 +; CHECK-NEXT: ret %mid = mul i64 %val1, %val2 %res = sub i64 %val0, %mid -; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i64 @test_smaddl(i64 %acc, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_smaddl: +; CHECK: ; %bb.0: +; CHECK-NEXT: smaddl x0, w1, w2, x0 +; CHECK-NEXT: ret %ext1 = sext i32 %val1 to i64 %ext2 = sext i32 %val2 to i64 %prod = mul i64 %ext1, %ext2 %res = add i64 %acc, %prod -; CHECK: smaddl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i64 @test_smsubl(i64 %acc, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_smsubl: +; CHECK: ; %bb.0: +; CHECK-NEXT: smsubl x0, w1, w2, x0 +; CHECK-NEXT: ret %ext1 = sext i32 %val1 to i64 %ext2 = sext i32 %val2 to i64 %prod = mul i64 %ext1, %ext2 %res = sub i64 %acc, %prod -; CHECK: smsubl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i64 @test_umaddl(i64 %acc, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_umaddl: +; CHECK: ; %bb.0: +; CHECK-NEXT: umaddl x0, w1, w2, x0 +; CHECK-NEXT: ret %ext1 = zext i32 %val1 to i64 %ext2 = zext i32 %val2 to i64 %prod = mul i64 %ext1, %ext2 %res = add i64 %acc, %prod -; CHECK: umaddl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i64 @test_umsubl(i64 %acc, i32 %val1, i32 %val2) { ; CHECK-LABEL: test_umsubl: +; CHECK: ; %bb.0: +; CHECK-NEXT: umsubl x0, w1, w2, x0 +; CHECK-NEXT: ret %ext1 = zext i32 %val1 to i64 %ext2 = zext i32 %val2 to i64 %prod = mul i64 %ext1, %ext2 %res = sub i64 %acc, %prod -; CHECK: umsubl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i64 @test_smulh(i64 %lhs, i64 %rhs) { -; CHECK-LABEL: test_smulh: +; CHECK-SD-LABEL: test_smulh: +; CHECK-SD: ; %bb.0: +; CHECK-SD-NEXT: smulh x0, x0, x1 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_smulh: +; CHECK-GI: ; %bb.0: +; CHECK-GI-NEXT: asr x8, x1, #63 +; CHECK-GI-NEXT: asr x9, x0, #63 +; CHECK-GI-NEXT: umulh x10, x0, x1 +; CHECK-GI-NEXT: mul x8, x0, x8 +; CHECK-GI-NEXT: madd x8, x9, x1, x8 +; CHECK-GI-NEXT: add x0, x8, x10 +; CHECK-GI-NEXT: ret %ext1 = sext i64 %lhs to i128 %ext2 = sext i64 %rhs to i128 %res = mul i128 %ext1, %ext2 %high = lshr i128 %res, 64 %val = trunc i128 %high to i64 -; CHECK: smulh {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ret i64 %val } define i64 @test_umulh(i64 %lhs, i64 %rhs) { ; CHECK-LABEL: test_umulh: +; CHECK: ; %bb.0: +; CHECK-NEXT: umulh x0, x0, x1 +; CHECK-NEXT: ret %ext1 = zext i64 %lhs to i128 %ext2 = zext i64 %rhs to i128 %res = mul i128 %ext1, %ext2 %high = lshr i128 %res, 64 %val = trunc i128 %high to i64 -; CHECK: umulh {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ret i64 %val } define i32 @test_mul32(i32 %lhs, i32 %rhs) { ; CHECK-LABEL: test_mul32: +; CHECK: ; %bb.0: +; CHECK-NEXT: mul w0, w0, w1 +; CHECK-NEXT: ret %res = mul i32 %lhs, %rhs -; CHECK: mul {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i32 %res } define i64 @test_mul64(i64 %lhs, i64 %rhs) { ; CHECK-LABEL: test_mul64: +; CHECK: ; %bb.0: +; CHECK-NEXT: mul x0, x0, x1 +; CHECK-NEXT: ret %res = mul i64 %lhs, %rhs -; CHECK: mul {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i32 @test_mneg32(i32 %lhs, i32 %rhs) { ; CHECK-LABEL: test_mneg32: +; CHECK: ; %bb.0: +; CHECK-NEXT: mneg w0, w0, w1 +; CHECK-NEXT: ret %prod = mul i32 %lhs, %rhs %res = sub i32 0, %prod -; CHECK: mneg {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i32 %res } define i64 @test_mneg64(i64 %lhs, i64 %rhs) { ; CHECK-LABEL: test_mneg64: +; CHECK: ; %bb.0: +; CHECK-NEXT: mneg x0, x0, x1 +; CHECK-NEXT: ret %prod = mul i64 %lhs, %rhs %res = sub i64 0, %prod -; CHECK: mneg {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ret i64 %res } define i64 @test_smull(i32 %lhs, i32 %rhs) { ; CHECK-LABEL: test_smull: +; CHECK: ; %bb.0: +; CHECK-NEXT: smull x0, w0, w1 +; CHECK-NEXT: ret %ext1 = sext i32 %lhs to i64 %ext2 = sext i32 %rhs to i64 %res = mul i64 %ext1, %ext2 -; CHECK: smull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i64 %res } define i64 @test_umull(i32 %lhs, i32 %rhs) { ; CHECK-LABEL: test_umull: +; CHECK: ; %bb.0: +; CHECK-NEXT: umull x0, w0, w1 +; CHECK-NEXT: ret %ext1 = zext i32 %lhs to i64 %ext2 = zext i32 %rhs to i64 %res = mul i64 %ext1, %ext2 -; CHECK: umull {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i64 %res } define i64 @test_smnegl(i32 %lhs, i32 %rhs) { ; CHECK-LABEL: test_smnegl: +; CHECK: ; %bb.0: +; CHECK-NEXT: smnegl x0, w0, w1 +; CHECK-NEXT: ret %ext1 = sext i32 %lhs to i64 %ext2 = sext i32 %rhs to i64 %prod = mul i64 %ext1, %ext2 %res = sub i64 0, %prod -; CHECK: smnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i64 %res } define i64 @test_umnegl(i32 %lhs, i32 %rhs) { ; CHECK-LABEL: test_umnegl: +; CHECK: ; %bb.0: +; CHECK-NEXT: umnegl x0, w0, w1 +; CHECK-NEXT: ret %ext1 = zext i32 %lhs to i64 %ext2 = zext i32 %rhs to i64 %prod = mul i64 %ext1, %ext2 %res = sub i64 0, %prod -; CHECK: umnegl {{x[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret i64 %res } @@ -168,11 +216,34 @@ define i64 @test_umnegl(i32 %lhs, i32 %rhs) { define void @test_mneg(){ ; CHECK-LABEL: test_mneg: +; CHECK: ; %bb.0: +; CHECK-NEXT: Lloh0: +; CHECK-NEXT: adrp x8, _a@GOTPAGE +; CHECK-NEXT: Lloh1: +; CHECK-NEXT: adrp x9, _b@GOTPAGE +; CHECK-NEXT: Lloh2: +; CHECK-NEXT: ldr x8, [x8, _a@GOTPAGEOFF] +; CHECK-NEXT: Lloh3: +; CHECK-NEXT: ldr x9, [x9, _b@GOTPAGEOFF] +; CHECK-NEXT: Lloh4: +; CHECK-NEXT: ldr w8, [x8] +; CHECK-NEXT: Lloh5: +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: mneg w8, w8, w9 +; CHECK-NEXT: Lloh6: +; CHECK-NEXT: adrp x9, _c@GOTPAGE +; CHECK-NEXT: Lloh7: +; CHECK-NEXT: ldr x9, [x9, _c@GOTPAGEOFF] +; CHECK-NEXT: Lloh8: +; CHECK-NEXT: str w8, [x9] +; CHECK-NEXT: ret +; CHECK-NEXT: .loh AdrpLdrGotStr Lloh6, Lloh7, Lloh8 +; CHECK-NEXT: .loh AdrpLdrGotLdr Lloh1, Lloh3, Lloh5 +; CHECK-NEXT: .loh AdrpLdrGotLdr Lloh0, Lloh2, Lloh4 %1 = load i32, ptr @a, align 4 %2 = load i32, ptr @b, align 4 %3 = sub i32 0, %1 %4 = mul i32 %2, %3 store i32 %4, ptr @c, align 4 -; CHECK: mneg {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ret void } diff --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll index 8e822d19a19b9..5a96116142b51 100644 --- a/llvm/test/CodeGen/AArch64/extract-bits.ll +++ b/llvm/test/CodeGen/AArch64/extract-bits.ll @@ -532,11 +532,10 @@ define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr32_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr w10, w0, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr w8, w0, w1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i32 %val, %numskipbits %numhighbits = sub i32 32, %numlowbits @@ -548,12 +547,11 @@ define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { ; CHECK-LABEL: bextr32_c1_indexzext: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #32 // =0x20 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr w10, w0, w1 -; CHECK-NEXT: sub w8, w8, w2 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr w8, w0, w1 +; CHECK-NEXT: mov w9, #32 // =0x20 +; CHECK-NEXT: sub w9, w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %skip = zext i8 %numskipbits to i32 %shifted = lshr i32 %val, %skip @@ -569,10 +567,9 @@ define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind ; CHECK: // %bb.0: ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: neg w9, w2 -; CHECK-NEXT: mov w10, #-1 // =0xffffffff -; CHECK-NEXT: lsr w9, w10, w9 ; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: and w0, w9, w8 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %val = load i32, ptr %w %shifted = lshr i32 %val, %numskipbits @@ -587,11 +584,10 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n ; CHECK: // %bb.0: ; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: mov w9, #32 // =0x20 -; CHECK-NEXT: mov w10, #-1 // =0xffffffff ; CHECK-NEXT: sub w9, w9, w2 ; CHECK-NEXT: lsr w8, w8, w1 -; CHECK-NEXT: lsr w9, w10, w9 -; CHECK-NEXT: and w0, w9, w8 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %val = load i32, ptr %w %skip = zext i8 %numskipbits to i32 @@ -606,11 +602,10 @@ define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr32_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr w10, w0, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w10, w8 +; CHECK-NEXT: lsr w8, w0, w1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i32 %val, %numskipbits %numhighbits = sub i32 32, %numlowbits @@ -624,11 +619,10 @@ define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: neg x8, x2 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg x9, x2 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %numhighbits = sub i64 64, %numlowbits @@ -640,13 +634,12 @@ define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_c1_indexzext: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #64 // =0x40 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: sub w8, w8, w2 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: mov w9, #64 // =0x40 +; CHECK-NEXT: sub w9, w9, w2 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %skip = zext i8 %numskipbits to i64 %shifted = lshr i64 %val, %skip @@ -662,10 +655,9 @@ define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind ; CHECK: // %bb.0: ; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: neg x9, x2 -; CHECK-NEXT: mov x10, #-1 // =0xffffffffffffffff -; CHECK-NEXT: lsr x9, x10, x9 ; CHECK-NEXT: lsr x8, x8, x1 -; CHECK-NEXT: and x0, x9, x8 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %val = load i64, ptr %w %shifted = lshr i64 %val, %numskipbits @@ -679,13 +671,12 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n ; CHECK-LABEL: bextr64_c3_load_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: ldr x8, [x0] +; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: mov w9, #64 // =0x40 -; CHECK-NEXT: mov x10, #-1 // =0xffffffffffffffff ; CHECK-NEXT: sub w9, w9, w2 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 ; CHECK-NEXT: lsr x8, x8, x1 -; CHECK-NEXT: lsr x9, x10, x9 -; CHECK-NEXT: and x0, x9, x8 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %val = load i64, ptr %w %skip = zext i8 %numskipbits to i64 @@ -700,11 +691,10 @@ define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) n define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: neg x8, x2 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x10, x8 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg x9, x2 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %numhighbits = sub i64 64, %numlowbits @@ -737,11 +727,10 @@ define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_32_c1: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %truncshifted = trunc i64 %shifted to i32 @@ -756,11 +745,10 @@ define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { ; CHECK-LABEL: bextr64_32_c2: ; CHECK: // %bb.0: -; CHECK-NEXT: neg w8, w2 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: lsr x10, x0, x1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsr x8, x0, x1 +; CHECK-NEXT: neg w9, w2 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %shifted = lshr i64 %val, %numskipbits %numhighbits = sub i32 32, %numlowbits diff --git a/llvm/test/CodeGen/AArch64/extract-lowbits.ll b/llvm/test/CodeGen/AArch64/extract-lowbits.ll index 4b8f3e86b5fef..368440c65df84 100644 --- a/llvm/test/CodeGen/AArch64/extract-lowbits.ll +++ b/llvm/test/CodeGen/AArch64/extract-lowbits.ll @@ -347,10 +347,9 @@ define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind { define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: lsr w8, w8, w9 -; CHECK-NEXT: and w0, w8, w0 +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: lsl w9, w0, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %numhighbits = sub i32 32, %numlowbits %mask = lshr i32 -1, %numhighbits @@ -362,10 +361,9 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c1_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32 // =0x20 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w0 +; CHECK-NEXT: lsl w9, w0, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %numhighbits = sub i8 32, %numlowbits %sh_prom = zext i8 %numhighbits to i32 @@ -377,11 +375,10 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c2_load: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 // =0xffffffff +; CHECK-NEXT: ldr w8, [x0] ; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: ldr w10, [x0] -; CHECK-NEXT: lsr w8, w8, w9 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsl w8, w8, w9 +; CHECK-NEXT: lsr w0, w8, w9 ; CHECK-NEXT: ret %val = load i32, ptr %w %numhighbits = sub i32 32, %numlowbits @@ -394,11 +391,10 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c3_load_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #32 // =0x20 -; CHECK-NEXT: mov w9, #-1 // =0xffffffff -; CHECK-NEXT: ldr w10, [x0] +; CHECK-NEXT: ldr w9, [x0] ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr w8, w9, w8 -; CHECK-NEXT: and w0, w8, w10 +; CHECK-NEXT: lsl w9, w9, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %val = load i32, ptr %w %numhighbits = sub i8 32, %numlowbits @@ -411,10 +407,9 @@ define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { ; CHECK-LABEL: bzhi32_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 // =0xffffffff -; CHECK-NEXT: neg w9, w1 -; CHECK-NEXT: lsr w8, w8, w9 -; CHECK-NEXT: and w0, w0, w8 +; CHECK-NEXT: neg w8, w1 +; CHECK-NEXT: lsl w9, w0, w8 +; CHECK-NEXT: lsr w0, w9, w8 ; CHECK-NEXT: ret %numhighbits = sub i32 32, %numlowbits %mask = lshr i32 -1, %numhighbits @@ -427,10 +422,9 @@ define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c0: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: neg x9, x1 -; CHECK-NEXT: lsr x8, x8, x9 -; CHECK-NEXT: and x0, x8, x0 +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: lsl x9, x0, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %numhighbits = sub i64 64, %numlowbits %mask = lshr i64 -1, %numhighbits @@ -442,10 +436,9 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c1_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #64 // =0x40 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x0 +; CHECK-NEXT: lsl x9, x0, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %numhighbits = sub i8 64, %numlowbits %sh_prom = zext i8 %numhighbits to i64 @@ -457,11 +450,10 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c2_load: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ldr x8, [x0] ; CHECK-NEXT: neg x9, x1 -; CHECK-NEXT: ldr x10, [x0] -; CHECK-NEXT: lsr x8, x8, x9 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsl x8, x8, x9 +; CHECK-NEXT: lsr x0, x8, x9 ; CHECK-NEXT: ret %val = load i64, ptr %w %numhighbits = sub i64 64, %numlowbits @@ -474,11 +466,10 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c3_load_indexzext: ; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #64 // =0x40 -; CHECK-NEXT: mov x9, #-1 // =0xffffffffffffffff -; CHECK-NEXT: ldr x10, [x0] +; CHECK-NEXT: ldr x9, [x0] ; CHECK-NEXT: sub w8, w8, w1 -; CHECK-NEXT: lsr x8, x9, x8 -; CHECK-NEXT: and x0, x8, x10 +; CHECK-NEXT: lsl x9, x9, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %val = load i64, ptr %w %numhighbits = sub i8 64, %numlowbits @@ -491,10 +482,9 @@ define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind { ; CHECK-LABEL: bzhi64_c4_commutative: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff -; CHECK-NEXT: neg x9, x1 -; CHECK-NEXT: lsr x8, x8, x9 -; CHECK-NEXT: and x0, x0, x8 +; CHECK-NEXT: neg x8, x1 +; CHECK-NEXT: lsl x9, x0, x8 +; CHECK-NEXT: lsr x0, x9, x8 ; CHECK-NEXT: ret %numhighbits = sub i64 64, %numlowbits %mask = lshr i64 -1, %numhighbits diff --git a/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll b/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll index 832e34b664fbe..f5cf629b2a4a4 100644 --- a/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll +++ b/llvm/test/CodeGen/AArch64/extract-vector-cmp.ll @@ -75,10 +75,9 @@ define void @vector_loop_with_icmp(ptr nocapture noundef writeonly %dest) { ; CHECK-LABEL: vector_loop_with_icmp: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: index z0.d, #0, #1 -; CHECK-NEXT: mov w8, #2 // =0x2 -; CHECK-NEXT: mov w9, #16 // =0x10 -; CHECK-NEXT: dup v1.2d, x8 +; CHECK-NEXT: mov z1.d, #2 // =0x2 ; CHECK-NEXT: add x8, x0, #4 +; CHECK-NEXT: mov w9, #16 // =0x10 ; CHECK-NEXT: mov w10, #1 // =0x1 ; CHECK-NEXT: b .LBB5_2 ; CHECK-NEXT: .LBB5_1: // %pred.store.continue6 diff --git a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll index 8bc3497ad3c3c..6233ce743b706 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll @@ -1,20 +1,30 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI define <4 x half> @add_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: add_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fadd v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: add_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: add_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fadd v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: add_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fadd v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fadd <4 x half> %a, %b @@ -22,28 +32,54 @@ entry: } define <4 x half> @build_h4(<4 x half> %a) { -; CHECK-COMMON-LABEL: build_h4: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: mov w8, #15565 // =0x3ccd -; CHECK-COMMON-NEXT: dup v0.4h, w8 -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: build_h4: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: mov w8, #15565 // =0x3ccd +; CHECK-CVT-SD-NEXT: dup v0.4h, w8 +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: build_h4: +; CHECK-FP16-SD: // %bb.0: // %entry +; CHECK-FP16-SD-NEXT: mov w8, #15565 // =0x3ccd +; CHECK-FP16-SD-NEXT: dup v0.4h, w8 +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: build_h4: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: adrp x8, .LCPI1_0 +; CHECK-CVT-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: build_h4: +; CHECK-FP16-GI: // %bb.0: // %entry +; CHECK-FP16-GI-NEXT: adrp x8, .LCPI1_0 +; CHECK-FP16-GI-NEXT: ldr d0, [x8, :lo12:.LCPI1_0] +; CHECK-FP16-GI-NEXT: ret entry: ret <4 x half> } define <4 x half> @sub_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: sub_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fsub v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sub_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fsub v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sub_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fsub v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sub_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fsub v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fsub <4 x half> %a, %b @@ -51,18 +87,26 @@ entry: } define <4 x half> @mul_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: mul_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fmul v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: mul_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: mul_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fmul v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: mul_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fmul v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fmul <4 x half> %a, %b @@ -70,18 +114,26 @@ entry: } define <4 x half> @div_h(<4 x half> %a, <4 x half> %b) { -; CHECK-CVT-LABEL: div_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fdiv v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: div_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: div_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fdiv v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: div_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fdiv v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fdiv <4 x half> %a, %b @@ -89,92 +141,162 @@ entry: } define <4 x half> @load_h(ptr %a) { -; CHECK-COMMON-LABEL: load_h: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: ldr d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: load_h: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ret entry: %0 = load <4 x half>, ptr %a, align 4 ret <4 x half> %0 } define void @store_h(ptr %a, <4 x half> %b) { -; CHECK-COMMON-LABEL: store_h: -; CHECK-COMMON: // %bb.0: // %entry -; CHECK-COMMON-NEXT: str d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: store_h: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret entry: store <4 x half> %b, ptr %a, align 4 ret void } define <4 x half> @s_to_h(<4 x float> %a) { -; CHECK-COMMON-LABEL: s_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: s_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = fptrunc <4 x float> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @d_to_h(<4 x double> %a) { -; CHECK-COMMON-LABEL: d_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: d_to_h: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: d_to_h: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: d_to_h: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov d2, v0.d[1] +; CHECK-CVT-GI-NEXT: fcvt h0, d0 +; CHECK-CVT-GI-NEXT: mov d3, v1.d[1] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: fcvt h2, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[1], v2.h[0] +; CHECK-CVT-GI-NEXT: fcvt h2, d3 +; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-CVT-GI-NEXT: mov v0.h[3], v2.h[0] +; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: d_to_h: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov d2, v0.d[1] +; CHECK-FP16-GI-NEXT: fcvt h0, d0 +; CHECK-FP16-GI-NEXT: mov d3, v1.d[1] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: fcvt h2, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[1], v2.h[0] +; CHECK-FP16-GI-NEXT: fcvt h2, d3 +; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-FP16-GI-NEXT: mov v0.h[3], v2.h[0] +; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-FP16-GI-NEXT: ret %1 = fptrunc <4 x double> %a to <4 x half> ret <4 x half> %1 } define <4 x float> @h_to_s(<4 x half> %a) { -; CHECK-COMMON-LABEL: h_to_s: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: h_to_s: +; CHECK: // %bb.0: +; CHECK-NEXT: fcvtl v0.4s, v0.4h +; CHECK-NEXT: ret %1 = fpext <4 x half> %a to <4 x float> ret <4 x float> %1 } define <4 x double> @h_to_d(<4 x half> %a) { -; CHECK-COMMON-LABEL: h_to_d: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fcvtl v0.4s, v0.4h -; CHECK-COMMON-NEXT: fcvtl2 v1.2d, v0.4s -; CHECK-COMMON-NEXT: fcvtl v0.2d, v0.2s -; CHECK-COMMON-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_d: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_d: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v0.4s +; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v0.2s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_d: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-CVT-GI-NEXT: mov h1, v0.h[1] +; CHECK-CVT-GI-NEXT: mov h2, v0.h[2] +; CHECK-CVT-GI-NEXT: mov h3, v0.h[3] +; CHECK-CVT-GI-NEXT: fcvt d0, h0 +; CHECK-CVT-GI-NEXT: fcvt d4, h1 +; CHECK-CVT-GI-NEXT: fcvt d1, h2 +; CHECK-CVT-GI-NEXT: fcvt d2, h3 +; CHECK-CVT-GI-NEXT: mov v0.d[1], v4.d[0] +; CHECK-CVT-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_d: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-FP16-GI-NEXT: mov h1, v0.h[1] +; CHECK-FP16-GI-NEXT: mov h2, v0.h[2] +; CHECK-FP16-GI-NEXT: mov h3, v0.h[3] +; CHECK-FP16-GI-NEXT: fcvt d0, h0 +; CHECK-FP16-GI-NEXT: fcvt d4, h1 +; CHECK-FP16-GI-NEXT: fcvt d1, h2 +; CHECK-FP16-GI-NEXT: fcvt d2, h3 +; CHECK-FP16-GI-NEXT: mov v0.d[1], v4.d[0] +; CHECK-FP16-GI-NEXT: mov v1.d[1], v2.d[0] +; CHECK-FP16-GI-NEXT: ret %1 = fpext <4 x half> %a to <4 x double> ret <4 x double> %1 } define <4 x half> @bitcast_i_to_h(float, <4 x i16> %a) { -; CHECK-COMMON-LABEL: bitcast_i_to_h: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fmov d0, d1 -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: bitcast_i_to_h: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret %2 = bitcast <4 x i16> %a to <4 x half> ret <4 x half> %2 } define <4 x i16> @bitcast_h_to_i(float, <4 x half> %a) { -; CHECK-COMMON-LABEL: bitcast_h_to_i: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: fmov d0, d1 -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: bitcast_h_to_i: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret %2 = bitcast <4 x half> %a to <4 x i16> ret <4 x i16> %2 } define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_i8: ; CHECK-FP16: // %bb.0: @@ -182,6 +304,15 @@ define <4 x half> @sitofp_i8(<4 x i8> %a) #0 { ; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } @@ -204,43 +335,59 @@ define <4 x half> @sitofp_i16(<4 x i16> %a) #0 { define <4 x half> @sitofp_i32(<4 x i32> %a) #0 { -; CHECK-COMMON-LABEL: sitofp_i32: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: scvtf v0.4s, v0.4s -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: sitofp_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: scvtf v0.4s, v0.4s +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = sitofp <4 x i32> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @sitofp_i64(<4 x i64> %a) #0 { -; CHECK-COMMON-LABEL: sitofp_i64: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: scvtf v0.2d, v0.2d -; CHECK-COMMON-NEXT: scvtf v1.2d, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: sitofp_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: scvtf v0.2d, v0.2d +; CHECK-NEXT: scvtf v1.2d, v1.2d +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = sitofp <4 x i64> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @uitofp_i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; -; CHECK-FP16-LABEL: uitofp_i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-FP16-SD-LABEL: uitofp_i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } @@ -264,35 +411,35 @@ define <4 x half> @uitofp_i16(<4 x i16> %a) #0 { define <4 x half> @uitofp_i32(<4 x i32> %a) #0 { -; CHECK-COMMON-LABEL: uitofp_i32: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: ucvtf v0.4s, v0.4s -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: uitofp_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ucvtf v0.4s, v0.4s +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = uitofp <4 x i32> %a to <4 x half> ret <4 x half> %1 } define <4 x half> @uitofp_i64(<4 x i64> %a) #0 { -; CHECK-COMMON-LABEL: uitofp_i64: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: ucvtf v0.2d, v0.2d -; CHECK-COMMON-NEXT: ucvtf v1.2d, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.2s, v0.2d -; CHECK-COMMON-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-COMMON-NEXT: fcvtn v0.4h, v0.4s -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: uitofp_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ucvtf v0.2d, v0.2d +; CHECK-NEXT: ucvtf v1.2d, v1.2d +; CHECK-NEXT: fcvtn v0.2s, v0.2d +; CHECK-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-NEXT: fcvtn v0.4h, v0.4s +; CHECK-NEXT: ret %1 = uitofp <4 x i64> %a to <4 x half> ret <4 x half> %1 } define void @test_insert_at_zero(half %a, ptr %b) #0 { -; CHECK-COMMON-LABEL: test_insert_at_zero: -; CHECK-COMMON: // %bb.0: -; CHECK-COMMON-NEXT: // kill: def $h0 killed $h0 def $d0 -; CHECK-COMMON-NEXT: str d0, [x0] -; CHECK-COMMON-NEXT: ret +; CHECK-LABEL: test_insert_at_zero: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret %1 = insertelement <4 x half> undef, half %a, i64 0 store <4 x half> %1, ptr %b, align 4 ret void @@ -331,17 +478,29 @@ define <4 x i16> @fptosi_i16(<4 x half> %a) #0 { } define <4 x i8> @fptoui_i8(<4 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; -; CHECK-FP16-LABEL: fptoui_i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: fcvtzs v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-FP16-SD-LABEL: fptoui_i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtzs v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: fptoui_i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: fcvtzu v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret ; NOTE: fcvtzs selected here because the xtn shaves the sign bit %1 = fptoui<4 x half> %a to <4 x i8> ret <4 x i8> %1 @@ -364,36 +523,45 @@ define <4 x i16> @fptoui_i16(<4 x half> %a) #0 { } define <4 x i1> @test_fcmp_une(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_une: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_une: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_une: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_une: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp une <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ueq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ueq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ueq: ; CHECK-FP16: // %bb.0: @@ -402,102 +570,149 @@ define <4 x i1> @test_fcmp_ueq(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ueq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ueq <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ugt(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ugt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ugt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ugt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ugt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ugt <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_uge(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uge <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ult(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ult: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ult: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ult: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ult: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ult <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ule(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ule: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ule: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ule: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ule: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ule <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uno: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: mvn v0.8b, v0.8b -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uno: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: mvn v0.8b, v0.8b +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uno: ; CHECK-FP16: // %bb.0: @@ -506,21 +721,32 @@ define <4 x i1> @test_fcmp_uno(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: mvn v0.8b, v0.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uno: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uno <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_one: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_one: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_one: ; CHECK-FP16: // %bb.0: @@ -528,60 +754,94 @@ define <4 x i1> @test_fcmp_one(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_one: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp one <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_oeq(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oeq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oeq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oeq: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oeq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oeq <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_ogt(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ogt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ogt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ogt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ogt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ogt <4 x half> %a, %b ret <4 x i1> %1 } define <4 x i1> @test_fcmp_oge(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.4h, v0.4h, v1.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oge <4 x half> %a, %b ret <4 x i1> %1 @@ -624,15 +884,15 @@ define <4 x i1> @test_fcmp_ole(<4 x half> %a, <4 x half> %b) #0 { } define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ord: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v2.16b -; CHECK-CVT-NEXT: xtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ord: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-SD-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ord: ; CHECK-FP16: // %bb.0: @@ -640,6 +900,16 @@ define <4 x i1> @test_fcmp_ord(<4 x half> %a, <4 x half> %b) #0 { ; CHECK-FP16-NEXT: fcmgt v0.4h, v1.4h, v0.4h ; CHECK-FP16-NEXT: orr v0.8b, v0.8b, v2.8b ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ord: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: xtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ord <4 x half> %a, %b ret <4 x i1> %1 diff --git a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll index fcb42a74ce697..86763eb5f9e3b 100644 --- a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll +++ b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll @@ -1,24 +1,38 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT -; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-SD +; RUN: llc < %s -mtriple=aarch64 -mattr=-fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-CVT,CHECK-CVT-GI +; RUN: llc < %s -mtriple=aarch64 -mattr=+fullfp16 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-FP16,CHECK-FP16-GI define <8 x half> @add_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: add_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fadd v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fadd v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: add_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fadd v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fadd v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: add_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fadd v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: add_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fadd v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fadd v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fadd <8 x half> %a, %b ret <8 x half> %0 @@ -26,22 +40,34 @@ entry: define <8 x half> @sub_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: sub_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fsub v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fsub v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sub_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fsub v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fsub v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sub_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fsub v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sub_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fsub v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fsub v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fsub <8 x half> %a, %b ret <8 x half> %0 @@ -49,22 +75,34 @@ entry: define <8 x half> @mul_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: mul_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fmul v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fmul v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: mul_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fmul v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fmul v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: mul_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fmul v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: mul_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fmul v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fmul v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fmul <8 x half> %a, %b ret <8 x half> %0 @@ -72,22 +110,34 @@ entry: define <8 x half> @div_h(<8 x half> %a, <8 x half> %b) { -; CHECK-CVT-LABEL: div_h: -; CHECK-CVT: // %bb.0: // %entry -; CHECK-CVT-NEXT: fcvtl v2.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v3.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v0.4s, v0.8h -; CHECK-CVT-NEXT: fdiv v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fdiv v1.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: div_h: +; CHECK-CVT-SD: // %bb.0: // %entry +; CHECK-CVT-SD-NEXT: fcvtl v2.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v3.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-SD-NEXT: fdiv v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: div_h: ; CHECK-FP16: // %bb.0: // %entry ; CHECK-FP16-NEXT: fdiv v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: div_h: +; CHECK-CVT-GI: // %bb.0: // %entry +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fdiv v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fdiv v1.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-GI-NEXT: ret entry: %0 = fdiv <8 x half> %a, %b ret <8 x half> %0 @@ -126,39 +176,171 @@ define <8 x half> @s_to_h(<8 x float> %a) { } define <8 x half> @d_to_h(<8 x double> %a) { -; CHECK-LABEL: d_to_h: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtxn v0.2s, v0.2d -; CHECK-NEXT: fcvtxn v2.2s, v2.2d -; CHECK-NEXT: fcvtxn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtxn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: d_to_h: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: d_to_h: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtxn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtxn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtxn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: d_to_h: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov d4, v0.d[1] +; CHECK-CVT-GI-NEXT: fcvt h0, d0 +; CHECK-CVT-GI-NEXT: mov d5, v1.d[1] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: fcvt h4, d4 +; CHECK-CVT-GI-NEXT: mov v0.h[1], v4.h[0] +; CHECK-CVT-GI-NEXT: fcvt h4, d5 +; CHECK-CVT-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-CVT-GI-NEXT: mov d1, v2.d[1] +; CHECK-CVT-GI-NEXT: fcvt h2, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[3], v4.h[0] +; CHECK-CVT-GI-NEXT: fcvt h1, d1 +; CHECK-CVT-GI-NEXT: mov v0.h[4], v2.h[0] +; CHECK-CVT-GI-NEXT: mov d2, v3.d[1] +; CHECK-CVT-GI-NEXT: fcvt h3, d3 +; CHECK-CVT-GI-NEXT: mov v0.h[5], v1.h[0] +; CHECK-CVT-GI-NEXT: fcvt h1, d2 +; CHECK-CVT-GI-NEXT: mov v0.h[6], v3.h[0] +; CHECK-CVT-GI-NEXT: mov v0.h[7], v1.h[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: d_to_h: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov d4, v0.d[1] +; CHECK-FP16-GI-NEXT: fcvt h0, d0 +; CHECK-FP16-GI-NEXT: mov d5, v1.d[1] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: fcvt h4, d4 +; CHECK-FP16-GI-NEXT: mov v0.h[1], v4.h[0] +; CHECK-FP16-GI-NEXT: fcvt h4, d5 +; CHECK-FP16-GI-NEXT: mov v0.h[2], v1.h[0] +; CHECK-FP16-GI-NEXT: mov d1, v2.d[1] +; CHECK-FP16-GI-NEXT: fcvt h2, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[3], v4.h[0] +; CHECK-FP16-GI-NEXT: fcvt h1, d1 +; CHECK-FP16-GI-NEXT: mov v0.h[4], v2.h[0] +; CHECK-FP16-GI-NEXT: mov d2, v3.d[1] +; CHECK-FP16-GI-NEXT: fcvt h3, d3 +; CHECK-FP16-GI-NEXT: mov v0.h[5], v1.h[0] +; CHECK-FP16-GI-NEXT: fcvt h1, d2 +; CHECK-FP16-GI-NEXT: mov v0.h[6], v3.h[0] +; CHECK-FP16-GI-NEXT: mov v0.h[7], v1.h[0] +; CHECK-FP16-GI-NEXT: ret %1 = fptrunc <8 x double> %a to <8 x half> ret <8 x half> %1 } define <8 x float> @h_to_s(<8 x half> %a) { -; CHECK-LABEL: h_to_s: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-NEXT: fcvtl v0.4s, v0.4h -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_s: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_s: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-FP16-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_s: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-GI-NEXT: mov v0.16b, v2.16b +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_s: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-FP16-GI-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-FP16-GI-NEXT: mov v0.16b, v2.16b +; CHECK-FP16-GI-NEXT: ret %1 = fpext <8 x half> %a to <8 x float> ret <8 x float> %1 } define <8 x double> @h_to_d(<8 x half> %a) { -; CHECK-LABEL: h_to_d: -; CHECK: // %bb.0: -; CHECK-NEXT: fcvtl v1.4s, v0.4h -; CHECK-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-NEXT: fcvtl v0.2d, v1.2s -; CHECK-NEXT: fcvtl2 v3.2d, v2.4s -; CHECK-NEXT: fcvtl2 v1.2d, v1.4s -; CHECK-NEXT: fcvtl v2.2d, v2.2s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: h_to_d: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.2d, v1.2s +; CHECK-CVT-SD-NEXT: fcvtl2 v3.2d, v2.4s +; CHECK-CVT-SD-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-CVT-SD-NEXT: fcvtl v2.2d, v2.2s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: h_to_d: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: fcvtl v1.4s, v0.4h +; CHECK-FP16-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-FP16-SD-NEXT: fcvtl v0.2d, v1.2s +; CHECK-FP16-SD-NEXT: fcvtl2 v3.2d, v2.4s +; CHECK-FP16-SD-NEXT: fcvtl2 v1.2d, v1.4s +; CHECK-FP16-SD-NEXT: fcvtl v2.2d, v2.2s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: h_to_d: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: mov h1, v0.h[1] +; CHECK-CVT-GI-NEXT: mov h2, v0.h[2] +; CHECK-CVT-GI-NEXT: mov h3, v0.h[3] +; CHECK-CVT-GI-NEXT: mov h4, v0.h[4] +; CHECK-CVT-GI-NEXT: mov h5, v0.h[5] +; CHECK-CVT-GI-NEXT: mov h6, v0.h[6] +; CHECK-CVT-GI-NEXT: mov h7, v0.h[7] +; CHECK-CVT-GI-NEXT: fcvt d0, h0 +; CHECK-CVT-GI-NEXT: fcvt d16, h1 +; CHECK-CVT-GI-NEXT: fcvt d1, h2 +; CHECK-CVT-GI-NEXT: fcvt d17, h3 +; CHECK-CVT-GI-NEXT: fcvt d2, h4 +; CHECK-CVT-GI-NEXT: fcvt d4, h5 +; CHECK-CVT-GI-NEXT: fcvt d3, h6 +; CHECK-CVT-GI-NEXT: fcvt d5, h7 +; CHECK-CVT-GI-NEXT: mov v0.d[1], v16.d[0] +; CHECK-CVT-GI-NEXT: mov v1.d[1], v17.d[0] +; CHECK-CVT-GI-NEXT: mov v2.d[1], v4.d[0] +; CHECK-CVT-GI-NEXT: mov v3.d[1], v5.d[0] +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: h_to_d: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: mov h1, v0.h[1] +; CHECK-FP16-GI-NEXT: mov h2, v0.h[2] +; CHECK-FP16-GI-NEXT: mov h3, v0.h[3] +; CHECK-FP16-GI-NEXT: mov h4, v0.h[4] +; CHECK-FP16-GI-NEXT: mov h5, v0.h[5] +; CHECK-FP16-GI-NEXT: mov h6, v0.h[6] +; CHECK-FP16-GI-NEXT: mov h7, v0.h[7] +; CHECK-FP16-GI-NEXT: fcvt d0, h0 +; CHECK-FP16-GI-NEXT: fcvt d16, h1 +; CHECK-FP16-GI-NEXT: fcvt d1, h2 +; CHECK-FP16-GI-NEXT: fcvt d17, h3 +; CHECK-FP16-GI-NEXT: fcvt d2, h4 +; CHECK-FP16-GI-NEXT: fcvt d4, h5 +; CHECK-FP16-GI-NEXT: fcvt d3, h6 +; CHECK-FP16-GI-NEXT: fcvt d5, h7 +; CHECK-FP16-GI-NEXT: mov v0.d[1], v16.d[0] +; CHECK-FP16-GI-NEXT: mov v1.d[1], v17.d[0] +; CHECK-FP16-GI-NEXT: mov v2.d[1], v4.d[0] +; CHECK-FP16-GI-NEXT: mov v3.d[1], v5.d[0] +; CHECK-FP16-GI-NEXT: ret %1 = fpext <8 x half> %a to <8 x double> ret <8 x double> %1 } @@ -183,14 +365,14 @@ define <8 x i16> @bitcast_h_to_i(float, <8 x half> %a) { } define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: shl v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshr v0.4h, v0.4h, #8 -; CHECK-CVT-NEXT: sshll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: scvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v4i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: shl v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-CVT-SD-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_v4i8: ; CHECK-FP16: // %bb.0: @@ -198,76 +380,132 @@ define <4 x half> @sitofp_v4i8(<4 x i8> %a) #0 { ; CHECK-FP16-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-FP16-NEXT: scvtf v0.4h, v0.4h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v4i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: shl v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: sshr v0.4s, v0.4s, #24 +; CHECK-CVT-GI-NEXT: scvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } define <8 x half> @sitofp_v8i8(<8 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v8i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_v8i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0 ; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v8i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <8 x i8> %a to <8 x half> ret <8 x half> %1 } define <16 x half> @sitofp_v16i8(<16 x i8> %a) #0 { -; CHECK-CVT-LABEL: sitofp_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll2 v1.8h, v0.16b, #0 -; CHECK-CVT-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: sshll v2.4s, v1.4h, #0 -; CHECK-CVT-NEXT: sshll v3.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v4.4s, v1.8h, #0 -; CHECK-CVT-NEXT: sshll2 v5.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v2.4s, v2.4s -; CHECK-CVT-NEXT: scvtf v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s -; CHECK-CVT-NEXT: scvtf v2.4s, v4.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s -; CHECK-CVT-NEXT: scvtf v3.4s, v5.4s -; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: sitofp_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: sshll2 v1.8h, v0.16b, #0 -; CHECK-FP16-NEXT: sshll v0.8h, v0.8b, #0 -; CHECK-FP16-NEXT: scvtf v1.8h, v1.8h -; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_v16i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll2 v1.8h, v0.16b, #0 +; CHECK-CVT-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: sshll v2.4s, v1.4h, #0 +; CHECK-CVT-SD-NEXT: sshll v3.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v4.4s, v1.8h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v5.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v2.4s, v2.4s +; CHECK-CVT-SD-NEXT: scvtf v3.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s +; CHECK-CVT-SD-NEXT: scvtf v2.4s, v4.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s +; CHECK-CVT-SD-NEXT: scvtf v3.4s, v5.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: sitofp_v16i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: sshll2 v1.8h, v0.16b, #0 +; CHECK-FP16-SD-NEXT: sshll v0.8h, v0.8b, #0 +; CHECK-FP16-SD-NEXT: scvtf v1.8h, v1.8h +; CHECK-FP16-SD-NEXT: scvtf v0.8h, v0.8h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_v16i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v1.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-CVT-GI-NEXT: sshll v2.4s, v1.4h, #0 +; CHECK-CVT-GI-NEXT: sshll v3.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v1.4s, v1.8h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v2.4s +; CHECK-CVT-GI-NEXT: scvtf v3.4s, v3.4s +; CHECK-CVT-GI-NEXT: scvtf v4.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v5.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: sitofp_v16i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: sshll v1.8h, v0.8b, #0 +; CHECK-FP16-GI-NEXT: sshll2 v2.8h, v0.16b, #0 +; CHECK-FP16-GI-NEXT: scvtf v0.8h, v1.8h +; CHECK-FP16-GI-NEXT: scvtf v1.8h, v2.8h +; CHECK-FP16-GI-NEXT: ret %1 = sitofp <16 x i8> %a to <16 x half> ret <16 x half> %1 } define <8 x half> @sitofp_i16(<8 x i16> %a) #0 { -; CHECK-CVT-LABEL: sitofp_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: sshll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: sshll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: scvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: scvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: sshll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: scvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: sitofp_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: scvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: sshll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: scvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: scvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = sitofp <8 x i16> %a to <8 x half> ret <8 x half> %1 } @@ -286,108 +524,213 @@ define <8 x half> @sitofp_i32(<8 x i32> %a) #0 { define <8 x half> @sitofp_i64(<8 x i64> %a) #0 { -; CHECK-LABEL: sitofp_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: scvtf v0.2d, v0.2d -; CHECK-NEXT: scvtf v2.2d, v2.2d -; CHECK-NEXT: scvtf v1.2d, v1.2d -; CHECK-NEXT: scvtf v3.2d, v3.2d -; CHECK-NEXT: fcvtn v0.2s, v0.2d -; CHECK-NEXT: fcvtn v2.2s, v2.2d -; CHECK-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: sitofp_i64: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: scvtf v0.2d, v0.2d +; CHECK-CVT-SD-NEXT: scvtf v2.2d, v2.2d +; CHECK-CVT-SD-NEXT: scvtf v1.2d, v1.2d +; CHECK-CVT-SD-NEXT: scvtf v3.2d, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: sitofp_i64: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: scvtf v0.2d, v0.2d +; CHECK-FP16-SD-NEXT: scvtf v2.2d, v2.2d +; CHECK-FP16-SD-NEXT: scvtf v1.2d, v1.2d +; CHECK-FP16-SD-NEXT: scvtf v3.2d, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: sitofp_i64: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: scvtf v0.2d, v0.2d +; CHECK-CVT-GI-NEXT: scvtf v1.2d, v1.2d +; CHECK-CVT-GI-NEXT: scvtf v2.2d, v2.2d +; CHECK-CVT-GI-NEXT: scvtf v3.2d, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: sitofp_i64: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: scvtf v0.2d, v0.2d +; CHECK-FP16-GI-NEXT: scvtf v1.2d, v1.2d +; CHECK-FP16-GI-NEXT: scvtf v2.2d, v2.2d +; CHECK-FP16-GI-NEXT: scvtf v3.2d, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-GI-NEXT: ret %1 = sitofp <8 x i64> %a to <8 x half> ret <8 x half> %1 } define <4 x half> @uitofp_v4i8(<4 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v4i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ucvtf v0.4s, v0.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v0.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: uitofp_v4i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: bic v0.4h, #255, lsl #8 -; CHECK-FP16-NEXT: ucvtf v0.4h, v0.4h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v4i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-CVT-SD-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_v4i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: bic v0.4h, #255, lsl #8 +; CHECK-FP16-SD-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v4i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: movi v1.2d, #0x0000ff000000ff +; CHECK-CVT-GI-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-CVT-GI-NEXT: ucvtf v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_v4i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: movi d1, #0xff00ff00ff00ff +; CHECK-FP16-GI-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-FP16-GI-NEXT: ucvtf v0.4h, v0.4h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <4 x i8> %a to <4 x half> ret <4 x half> %1 } define <8 x half> @uitofp_v8i8(<8 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v8i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v8i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: uitofp_v8i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0 ; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v8i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = uitofp <8 x i8> %a to <8 x half> ret <8 x half> %1 } define <16 x half> @uitofp_v16i8(<16 x i8> %a) #0 { -; CHECK-CVT-LABEL: uitofp_v16i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll2 v1.8h, v0.16b, #0 -; CHECK-CVT-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-CVT-NEXT: ushll v2.4s, v1.4h, #0 -; CHECK-CVT-NEXT: ushll v3.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v4.4s, v1.8h, #0 -; CHECK-CVT-NEXT: ushll2 v5.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v2.4s, v2.4s -; CHECK-CVT-NEXT: ucvtf v3.4s, v3.4s -; CHECK-CVT-NEXT: fcvtn v1.4h, v2.4s -; CHECK-CVT-NEXT: ucvtf v2.4s, v4.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v3.4s -; CHECK-CVT-NEXT: ucvtf v3.4s, v5.4s -; CHECK-CVT-NEXT: fcvtn2 v1.8h, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v3.4s -; CHECK-CVT-NEXT: ret -; -; CHECK-FP16-LABEL: uitofp_v16i8: -; CHECK-FP16: // %bb.0: -; CHECK-FP16-NEXT: ushll2 v1.8h, v0.16b, #0 -; CHECK-FP16-NEXT: ushll v0.8h, v0.8b, #0 -; CHECK-FP16-NEXT: ucvtf v1.8h, v1.8h -; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h -; CHECK-FP16-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_v16i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll2 v1.8h, v0.16b, #0 +; CHECK-CVT-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-CVT-SD-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-CVT-SD-NEXT: ushll v3.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v4.4s, v1.8h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v5.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v2.4s +; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcvtn v1.4h, v2.4s +; CHECK-CVT-SD-NEXT: ucvtf v2.4s, v4.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v3.4s +; CHECK-CVT-SD-NEXT: ucvtf v3.4s, v5.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v1.8h, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v3.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_v16i8: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: ushll2 v1.8h, v0.16b, #0 +; CHECK-FP16-SD-NEXT: ushll v0.8h, v0.8b, #0 +; CHECK-FP16-SD-NEXT: ucvtf v1.8h, v1.8h +; CHECK-FP16-SD-NEXT: ucvtf v0.8h, v0.8h +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_v16i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v1.8h, v0.8b, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.8h, v0.16b, #0 +; CHECK-CVT-GI-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-CVT-GI-NEXT: ushll v3.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v1.4s, v1.8h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v2.4s +; CHECK-CVT-GI-NEXT: ucvtf v3.4s, v3.4s +; CHECK-CVT-GI-NEXT: ucvtf v4.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v5.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v2.4s +; CHECK-CVT-GI-NEXT: fcvtn v1.4h, v3.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v4.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v1.8h, v5.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_v16i8: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: ushll v1.8h, v0.8b, #0 +; CHECK-FP16-GI-NEXT: ushll2 v2.8h, v0.16b, #0 +; CHECK-FP16-GI-NEXT: ucvtf v0.8h, v1.8h +; CHECK-FP16-GI-NEXT: ucvtf v1.8h, v2.8h +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <16 x i8> %a to <16 x half> ret <16 x half> %1 } define <8 x half> @uitofp_i16(<8 x i16> %a) #0 { -; CHECK-CVT-LABEL: uitofp_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: ushll v1.4s, v0.4h, #0 -; CHECK-CVT-NEXT: ushll2 v2.4s, v0.8h, #0 -; CHECK-CVT-NEXT: ucvtf v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtn v0.4h, v1.4s -; CHECK-CVT-NEXT: ucvtf v1.4s, v2.4s -; CHECK-CVT-NEXT: fcvtn2 v0.8h, v1.4s -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-SD-NEXT: ushll2 v2.4s, v0.8h, #0 +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-SD-NEXT: ucvtf v1.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v1.4s +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: uitofp_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: ucvtf v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ushll v1.4s, v0.4h, #0 +; CHECK-CVT-GI-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-CVT-GI-NEXT: ucvtf v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: ucvtf v2.4s, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v1.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret %1 = uitofp <8 x i16> %a to <8 x half> ret <8 x half> %1 } @@ -407,19 +750,61 @@ define <8 x half> @uitofp_i32(<8 x i32> %a) #0 { define <8 x half> @uitofp_i64(<8 x i64> %a) #0 { -; CHECK-LABEL: uitofp_i64: -; CHECK: // %bb.0: -; CHECK-NEXT: ucvtf v0.2d, v0.2d -; CHECK-NEXT: ucvtf v2.2d, v2.2d -; CHECK-NEXT: ucvtf v1.2d, v1.2d -; CHECK-NEXT: ucvtf v3.2d, v3.2d -; CHECK-NEXT: fcvtn v0.2s, v0.2d -; CHECK-NEXT: fcvtn v2.2s, v2.2d -; CHECK-NEXT: fcvtn2 v0.4s, v1.2d -; CHECK-NEXT: fcvtn2 v2.4s, v3.2d -; CHECK-NEXT: fcvtn v0.4h, v0.4s -; CHECK-NEXT: fcvtn2 v0.8h, v2.4s -; CHECK-NEXT: ret +; CHECK-CVT-SD-LABEL: uitofp_i64: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: ucvtf v0.2d, v0.2d +; CHECK-CVT-SD-NEXT: ucvtf v2.2d, v2.2d +; CHECK-CVT-SD-NEXT: ucvtf v1.2d, v1.2d +; CHECK-CVT-SD-NEXT: ucvtf v3.2d, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-SD-NEXT: ret +; +; CHECK-FP16-SD-LABEL: uitofp_i64: +; CHECK-FP16-SD: // %bb.0: +; CHECK-FP16-SD-NEXT: ucvtf v0.2d, v0.2d +; CHECK-FP16-SD-NEXT: ucvtf v2.2d, v2.2d +; CHECK-FP16-SD-NEXT: ucvtf v1.2d, v1.2d +; CHECK-FP16-SD-NEXT: ucvtf v3.2d, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-SD-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-SD-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-SD-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-SD-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-SD-NEXT: ret +; +; CHECK-CVT-GI-LABEL: uitofp_i64: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: ucvtf v0.2d, v0.2d +; CHECK-CVT-GI-NEXT: ucvtf v1.2d, v1.2d +; CHECK-CVT-GI-NEXT: ucvtf v2.2d, v2.2d +; CHECK-CVT-GI-NEXT: ucvtf v3.2d, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-CVT-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-CVT-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-CVT-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-CVT-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-CVT-GI-NEXT: ret +; +; CHECK-FP16-GI-LABEL: uitofp_i64: +; CHECK-FP16-GI: // %bb.0: +; CHECK-FP16-GI-NEXT: ucvtf v0.2d, v0.2d +; CHECK-FP16-GI-NEXT: ucvtf v1.2d, v1.2d +; CHECK-FP16-GI-NEXT: ucvtf v2.2d, v2.2d +; CHECK-FP16-GI-NEXT: ucvtf v3.2d, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.2s, v0.2d +; CHECK-FP16-GI-NEXT: fcvtn v2.2s, v2.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v0.4s, v1.2d +; CHECK-FP16-GI-NEXT: fcvtn2 v2.4s, v3.2d +; CHECK-FP16-GI-NEXT: fcvtn v0.4h, v0.4s +; CHECK-FP16-GI-NEXT: fcvtn2 v0.8h, v2.4s +; CHECK-FP16-GI-NEXT: ret %1 = uitofp <8 x i64> %a to <8 x half> ret <8 x half> %1 } @@ -436,94 +821,132 @@ define void @test_insert_at_zero(half %a, ptr %b) #0 { } define <8 x i8> @fptosi_i8(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptosi_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptosi_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptosi_i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptosi_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptosi<8 x half> %a to <8 x i8> ret <8 x i8> %1 } define <8 x i16> @fptosi_i16(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptosi_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzs v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzs v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptosi_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptosi_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzs v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptosi_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzs v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzs v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptosi<8 x half> %a to <8 x i16> ret <8 x i16> %1 } define <8 x i8> @fptoui_i8(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i8: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i8: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptoui_i8: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i8: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptoui<8 x half> %a to <8 x i8> ret <8 x i8> %1 } define <8 x i16> @fptoui_i16(<8 x half> %a) #0 { -; CHECK-CVT-LABEL: fptoui_i16: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v1.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtzu v1.4s, v1.4s -; CHECK-CVT-NEXT: fcvtzu v0.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: fptoui_i16: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v1.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: fptoui_i16: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcvtzu v0.8h, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: fptoui_i16: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v1.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtzu v1.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcvtzu v0.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fptoui<8 x half> %a to <8 x i16> ret <8 x i16> %1 } define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_une: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_une: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_une: ; CHECK-FP16: // %bb.0: @@ -531,27 +954,41 @@ define <8 x i1> @test_fcmp_une(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_une: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp une <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ueq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ueq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ueq: ; CHECK-FP16: // %bb.0: @@ -561,23 +998,41 @@ define <8 x i1> @test_fcmp_ueq(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ueq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ueq <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ugt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ugt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ugt: ; CHECK-FP16: // %bb.0: @@ -585,23 +1040,37 @@ define <8 x i1> @test_fcmp_ugt(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ugt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ugt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uge: ; CHECK-FP16: // %bb.0: @@ -609,23 +1078,37 @@ define <8 x i1> @test_fcmp_uge(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uge <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ult: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ult: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ult: ; CHECK-FP16: // %bb.0: @@ -633,23 +1116,37 @@ define <8 x i1> @test_fcmp_ult(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ult: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ult <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ule: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ule: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ule: ; CHECK-FP16: // %bb.0: @@ -657,27 +1154,41 @@ define <8 x i1> @test_fcmp_ule(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ule: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: mvn v1.16b, v2.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ule <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_uno: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: mvn v0.16b, v0.16b -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_uno: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_uno: ; CHECK-FP16: // %bb.0: @@ -687,26 +1198,44 @@ define <8 x i1> @test_fcmp_uno(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: mvn v0.16b, v0.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_uno: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: mvn v1.16b, v1.16b +; CHECK-CVT-GI-NEXT: mvn v0.16b, v0.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp uno <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_one: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmgt v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_one: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_one: ; CHECK-FP16: // %bb.0: @@ -715,136 +1244,212 @@ define <8 x i1> @test_fcmp_one(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_one: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp one <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_oeq(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oeq: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmeq v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmeq v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oeq: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmeq v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oeq: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmeq v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oeq: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmeq v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmeq v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oeq <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ogt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ogt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ogt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ogt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ogt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ogt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_oge(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_oge: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_oge: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_oge: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.8h, v0.8h, v1.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_oge: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp oge <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_olt(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_olt: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmgt v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_olt: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_olt: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmgt v0.8h, v1.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_olt: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp olt <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ole(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ole: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcmge v2.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmge v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v2.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ole: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v2.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ole: ; CHECK-FP16: // %bb.0: ; CHECK-FP16-NEXT: fcmge v0.8h, v1.8h, v0.8h ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ole: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v2.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ole <8 x half> %a, %b ret <8 x i1> %1 } define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 { -; CHECK-CVT-LABEL: test_fcmp_ord: -; CHECK-CVT: // %bb.0: -; CHECK-CVT-NEXT: fcvtl2 v2.4s, v1.8h -; CHECK-CVT-NEXT: fcvtl2 v3.4s, v0.8h -; CHECK-CVT-NEXT: fcvtl v1.4s, v1.4h -; CHECK-CVT-NEXT: fcvtl v0.4s, v0.4h -; CHECK-CVT-NEXT: fcmge v4.4s, v3.4s, v2.4s -; CHECK-CVT-NEXT: fcmgt v2.4s, v2.4s, v3.4s -; CHECK-CVT-NEXT: fcmge v3.4s, v0.4s, v1.4s -; CHECK-CVT-NEXT: fcmgt v0.4s, v1.4s, v0.4s -; CHECK-CVT-NEXT: orr v1.16b, v2.16b, v4.16b -; CHECK-CVT-NEXT: orr v0.16b, v0.16b, v3.16b -; CHECK-CVT-NEXT: uzp1 v0.8h, v0.8h, v1.8h -; CHECK-CVT-NEXT: xtn v0.8b, v0.8h -; CHECK-CVT-NEXT: ret +; CHECK-CVT-SD-LABEL: test_fcmp_ord: +; CHECK-CVT-SD: // %bb.0: +; CHECK-CVT-SD-NEXT: fcvtl2 v2.4s, v1.8h +; CHECK-CVT-SD-NEXT: fcvtl2 v3.4s, v0.8h +; CHECK-CVT-SD-NEXT: fcvtl v1.4s, v1.4h +; CHECK-CVT-SD-NEXT: fcvtl v0.4s, v0.4h +; CHECK-CVT-SD-NEXT: fcmge v4.4s, v3.4s, v2.4s +; CHECK-CVT-SD-NEXT: fcmgt v2.4s, v2.4s, v3.4s +; CHECK-CVT-SD-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-SD-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-SD-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-SD-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-SD-NEXT: uzp1 v0.8h, v0.8h, v1.8h +; CHECK-CVT-SD-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-SD-NEXT: ret ; ; CHECK-FP16-LABEL: test_fcmp_ord: ; CHECK-FP16: // %bb.0: @@ -853,8 +1458,27 @@ define <8 x i1> @test_fcmp_ord(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-FP16-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-FP16-NEXT: xtn v0.8b, v0.8h ; CHECK-FP16-NEXT: ret +; +; CHECK-CVT-GI-LABEL: test_fcmp_ord: +; CHECK-CVT-GI: // %bb.0: +; CHECK-CVT-GI-NEXT: fcvtl v2.4s, v0.4h +; CHECK-CVT-GI-NEXT: fcvtl v3.4s, v1.4h +; CHECK-CVT-GI-NEXT: fcvtl2 v0.4s, v0.8h +; CHECK-CVT-GI-NEXT: fcvtl2 v1.4s, v1.8h +; CHECK-CVT-GI-NEXT: fcmge v4.4s, v2.4s, v3.4s +; CHECK-CVT-GI-NEXT: fcmgt v2.4s, v3.4s, v2.4s +; CHECK-CVT-GI-NEXT: fcmge v3.4s, v0.4s, v1.4s +; CHECK-CVT-GI-NEXT: fcmgt v0.4s, v1.4s, v0.4s +; CHECK-CVT-GI-NEXT: orr v1.16b, v2.16b, v4.16b +; CHECK-CVT-GI-NEXT: orr v0.16b, v0.16b, v3.16b +; CHECK-CVT-GI-NEXT: uzp1 v0.8h, v1.8h, v0.8h +; CHECK-CVT-GI-NEXT: xtn v0.8b, v0.8h +; CHECK-CVT-GI-NEXT: ret %1 = fcmp ord <8 x half> %a, %b ret <8 x i1> %1 } attributes #0 = { nounwind } + +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-CVT: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir new file mode 100644 index 0000000000000..35eafe8b7d99c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir @@ -0,0 +1,587 @@ +# RUN: llc -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -mtriple=aarch64-none-linux-gnu -run-pass=prologepilog %s -o - | FileCheck %s +# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -o - | FileCheck %s --check-prefix=ASM +# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -filetype=obj -o %t +# RUN: llvm-objdump --dwarf=frames %t | FileCheck %s --check-prefix=UNWINDINFO +# RUN: rm -rf %t +# +# Test allocation and deallocation of SVE objects on the stack with +# split-sve-objects (and hazard padding) enabled. This also tests using a +# combination of scalable and non-scalable offsets to access the SVE on the +# stack. +# +# With split-sve-objects (which implies hazard padding) the SVE area is split +# into PPR and ZPR areas with (fixed-size) hazard padding between them. The PPR +# area holds all scalable predicate callee saves and locals, and the ZPR area +# holds all scalable vector callee saves and locals. Additionally, any FPR +# callee save is promoted to a ZPR callee save (to avoid needing additional +# hazard padding in the callee save area). +# +# +-------------+ +# | stack arg | +# +-------------+ <- SP before call +# | Callee Saves| +# | Frame record| (if available) +# |-------------| <- FP (if available) +# | PPR area | +# |-------------| +# |/////////////| hazard padding +# |-------------| +# | ZPR area | +# +-------------+ +# | : | +# | Stack objs | +# | : | +# +-------------+ <- SP after call and frame-setup +# +--- | + + define void @test_allocate_split_sve() uwtable { entry: unreachable } + define void @test_allocate_split_sve_realigned() uwtable { entry: unreachable } + define void @test_address_split_sve() uwtable { entry: unreachable } + define void @test_address_split_sve_fp() uwtable { entry: unreachable } + define aarch64_sve_vector_pcs void @save_restore_ppr_zpr() uwtable { entry: unreachable } + +... +--- +# +----------+ +# |scratchreg| // x29 is used as scratch reg. +# |----------| +# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes +# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes) +# |----------| +# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area +# |//////////| // Note: This is currently not included in the "stackSize" +# +----------+ +# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes, +# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes) +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.1 | // not scalable +# +----------+ <- SP + +# CHECK-LABEL: name: test_allocate_split_sve +# CHECK: stackSize: 1056 + +# CHECK: bb.0.entry: +# CHECK: liveins: $z0, $p0, $fp +# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.4) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 +# +# CHECK-NEXT: $x8 = ADDXri $sp, 1040, 0 +# CHECK-NEXT: $x8 = ADDPL_XXI $x8, 7, implicit $vg +# CHECK-NEXT: STR_ZXI $z0, killed $x8, 0 :: (store () into %stack.0) +# CHECK-NEXT: $x8 = ADDXri $sp, 2064, 0 +# CHECK-NEXT: STR_PXI $p0, killed $x8, 18 :: (store () into %stack.1) +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.4) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_allocate_split_sve: +# ASM: str x29, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: .cfi_def_cfa_offset 1040 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG +# ASM-NEXT: sub sp, sp, #1040 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: addvl sp, sp, #-2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG +# +# ASM: addvl sp, sp, #2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: .cfi_def_cfa wsp, 1056 +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: ldr x29, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +1040 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056 +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: test_allocate_split_sve +stack: + - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 } + - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 } + - { id: 2, stack-id: default, size: 16, alignment: 8 } +body: | + bb.0.entry: + liveins: $z0, $p0 + STR_ZXI $z0, %stack.0, 0 :: (store () into %stack.0) + STR_PXI $p0, %stack.1, 0 :: (store () into %stack.1) + RET_ReallyLR +... +--- + +# Stack realignment is not supported with split-sve-objects, so we fallback to +# the default hazard padding implementation. This does not prevent hazards +# between ZPRs and PPRs (TODO: support this case). +# +# +----------+ +# | lr, fp | // frame record +# |----------| +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes +# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes) +# +----------+ +# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes, +# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes) +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.1 | // not scalable +# +----------+ <- SP + +name: test_allocate_split_sve_realigned +stack: + - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 } + - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 } + - { id: 2, stack-id: default, size: 16, alignment: 32 } +body: | + bb.0.entry: + liveins: $z0, $p0 + STR_ZXI $z0, %stack.0, 0 :: (store () into %stack.0) + STR_PXI $p0, %stack.1, 0 :: (store () into %stack.1) + RET_ReallyLR + +# CHECK-LABEL: name: test_allocate_split_sve_realigned +# CHECK: stackSize: 2080 + +# CHECK: bb.0.entry: +# CHECK: liveins: $z0, $p0, $lr +# CHECK: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040 +# CHECK-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.5) +# CHECK-NEXT: frame-setup STRXui killed $lr, $sp, 129 :: (store (s64) into %stack.4) +# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -2, implicit $vg +# CHECK-NEXT: $sp = frame-setup ANDXri killed $x9, 7930 +# +# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0 +# CHECK-NEXT: $x8 = ADDPL_XXI $x8, -1, implicit $vg +# CHECK-NEXT: STR_ZXI $z0, killed $x8, -1 :: (store () into %stack.0) +# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0 +# CHECK-NEXT: STR_PXI $p0, killed $x8, -15 :: (store () into %stack.1) +# +# CHECK-NEXT: $sp = frame-destroy SUBXri $fp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1040 +# CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 129 :: (load (s64) from %stack.4) +# CHECK-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.5) +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_allocate_split_sve_realigned +# ASM: sub sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 1040 +# ASM-NEXT: str x29, [sp, #1024] +# ASM-NEXT: str x30, [sp, #1032] +# ASM-NEXT: add x29, sp, #1024 +# ASM-NEXT: .cfi_def_cfa w29, 16 +# ASM-NEXT: .cfi_offset w30, -8 +# ASM-NEXT: .cfi_offset w29, -16 +# +# ASM: sub sp, x29, #1024 +# ASM-NEXT: .cfi_def_cfa wsp, 1040 +# ASM-NEXT: ldr x30, [sp, #1032] +# ASM-NEXT: ldr x29, [sp, #1024] +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w30 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +1040 +# UNWINDINFO: DW_CFA_def_cfa: reg29 +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# +# UNWINDINFO: DW_CFA_def_cfa: reg31 +1040 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg30 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 +... +--- + +# +----------+ +# |scratchreg| // x29 is used as scratch reg. +# +----------+ +# | %stack.2 | // scalable predicate @ SP + 2064b + 46 scalable bytes +# |----------| +# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area +# |//////////| // Note: This is currently not included in the "stackSize" +# |----------| +# | %stack.0 | // scalable vector @ SP + 1040b + 16 scalable bytes +# | %stack.1 | // scalable vector @ SP + 1040b +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.3 | // not scalable +# +----------+ <- SP + +# CHECK-LABEL: name: test_address_split_sve +# CHECK: stackSize: 1056 + +# CHECK: bb.0.entry: +# CHECK-NEXT: liveins: +# CHECK-NEXT: {{ $}} +# CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.5) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 +# +# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0 +# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 1 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0 +# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], 0 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 2064, 0 +# CHECK-NEXT: STR_PXI $p0, killed $[[TMP]], 23 +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.5) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_address_split_sve +# ASM: str x29, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: .cfi_def_cfa_offset 1040 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG +# ASM-NEXT: sub sp, sp, #1040 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: addvl sp, sp, #-2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG +# +# ASM: addvl sp, sp, #2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: .cfi_def_cfa wsp, 1056 +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: ldr x29, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +1040 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056 +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: test_address_split_sve +frameInfo: + maxAlignment: 16 +stack: + - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 } + - { id: 3, stack-id: default, size: 16, alignment: 8 } +body: | + bb.0.entry: + liveins: $z0, $z1, $p0 + + STR_ZXI $z0, %stack.0, 0 :: (store () into %stack.0) + STR_ZXI $z1, %stack.1, 0 :: (store () into %stack.1) + STR_PXI $p0, %stack.2, 0 :: (store () into %stack.2) + + RET_ReallyLR +... +--- +# +----------+ +# | lr, fp | // frame record +# +----------+ <- FP +# | %stack.2 | // scalable predicate @ FP - 2 scalable bytes +# |----------| +# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area +# |//////////| // Note: This is currently not included in the "stackSize" +# |----------| +# | %stack.0 | // scalable vector @ FP - 1024b - 32 scalable bytes +# | %stack.1 | // scalable vector @ FP - 1024b - 48 scalable bytes +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.3 | // not scalable +# +----------+ <- SP + +# CHECK-LABEL: name: test_address_split_sve_fp +# CHECK: stackSize: 1056 +# +# CHECK: bb.0.entry: +# CHECK-NEXT: liveins: +# CHECK-NEXT: {{ $}} +# CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 :: (store (s64) into %stack.6), (store (s64) into %stack.5) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg +# +# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0 +# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], -2 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0 +# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], -3 +# CHECK-NEXT: STR_PXI $p0, $fp, -1 +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 +# CHECK-NEXT: early-clobber $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.6), (load (s64) from %stack.5) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_address_split_sve_fp +# ASM: stp x29, x30, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: mov x29, sp +# ASM-NEXT: .cfi_def_cfa w29, 16 +# ASM-NEXT: .cfi_offset w30, -8 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: sub sp, sp, #1040 +# ASM-NEXT: addvl sp, sp, #-2 +# +# ASM: addvl sp, sp, #2 +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa wsp, 16 +# ASM-NEXT: ldp x29, x30, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w30 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_def_cfa: reg29 +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# +# UNWINDINFO: DW_CFA_def_cfa: reg31 +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg30 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: test_address_split_sve_fp +frameInfo: + maxAlignment: 16 + isFrameAddressTaken: true +stack: + - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 } + - { id: 3, stack-id: default, size: 16, alignment: 8 } +body: | + bb.0.entry: + liveins: $z0, $z1, $p0 + + STR_ZXI $z0, %stack.0, 0 :: (store () into %stack.0) + STR_ZXI $z1, %stack.1, 0 :: (store () into %stack.1) + STR_PXI $p0, %stack.2, 0 :: (store () into %stack.2) + + RET_ReallyLR +... +--- +# CHECK-LABEL: name: save_restore_ppr_zpr +# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.8) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: frame-setup STR_PXI killed $p6, $sp, 5 :: (store (s16) into %stack.7) +# CHECK-NEXT: frame-setup STR_PXI killed $p5, $sp, 6 :: (store (s16) into %stack.6) +# CHECK-NEXT: frame-setup STR_PXI killed $p4, $sp, 7 :: (store (s16) into %stack.5) +# +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 :: (store (s128) into %stack.4) +# CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 :: (store (s128) into %stack.3) +# CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 :: (store (s128) into %stack.2) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1056, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# +# +# CHECK: $sp = frame-destroy ADDXri $sp, 1056, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.4) +# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.3) +# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.2) +# +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10 +# CHECK-NEXT: $p6 = frame-destroy LDR_PXI $sp, 5 :: (load (s16) from %stack.7) +# CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 6 :: (load (s16) from %stack.6) +# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7 :: (load (s16) from %stack.5) +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 +# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.8) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: save_restore_ppr_zpr: +# ASM: str x29, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM-NEXT: str p6, [sp, #5, mul vl] +# ASM-NEXT: str p5, [sp, #6, mul vl] +# ASM-NEXT: str p4, [sp, #7, mul vl] +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG +# ASM-NEXT: addvl sp, sp, #-3 +# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG +# ASM-NEXT: str z10, [sp] +# ASM-NEXT: str z9, [sp, #1, mul vl] +# ASM-NEXT: str z8, [sp, #2, mul vl] +# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040 +# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 24 * VG - 1040 +# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 32 * VG - 1040 +# ASM-NEXT: sub sp, sp, #1056 +# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 2096 + 32 * VG +# +# ASM: add sp, sp, #1056 +# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG +# ASM-NEXT: ldr z10, [sp] +# ASM-NEXT: ldr z9, [sp, #1, mul vl] +# ASM-NEXT: ldr z8, [sp, #2, mul vl] +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG +# ASM-NEXT: addvl sp, sp, #3 +# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM-NEXT: .cfi_restore z8 +# ASM-NEXT: .cfi_restore z9 +# ASM-NEXT: .cfi_restore z10 +# ASM-NEXT: ldr p6, [sp, #5, mul vl] +# ASM-NEXT: ldr p5, [sp, #6, mul vl] +# ASM-NEXT: ldr p4, [sp, #7, mul vl] +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: .cfi_def_cfa wsp, 16 +# ASM-NEXT: ldr x29, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2096, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 +# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 +# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106 +# UNWINDINFO: DW_CFA_def_cfa: reg31 +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: save_restore_ppr_zpr +stack: + - { id: 0, stack-id: default, size: 32, alignment: 16 } +body: | + bb.0.entry: + + $p4 = IMPLICIT_DEF + $p5 = IMPLICIT_DEF + $p6 = IMPLICIT_DEF + $z8 = IMPLICIT_DEF + $z9 = IMPLICIT_DEF + $z10 = IMPLICIT_DEF + + RET_ReallyLR diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir index 03a6aabffaaf1..110141609a59e 100644 --- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir +++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir @@ -1215,19 +1215,19 @@ body: | # CHECK: - { id: 2, name: '', type: default, offset: -112, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, # CHECK: - { id: 3, name: '', type: default, offset: -114, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, +# CHECK-NEXT: stack-id: scalable-predicate-vector, # CHECK: - { id: 4, name: '', type: spill-slot, offset: -144, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, # CHECK: - { id: 5, name: '', type: spill-slot, offset: -146, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, +# CHECK-NEXT: stack-id: scalable-predicate-vector, # CHECK: - { id: 6, name: '', type: spill-slot, offset: -16, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z8', # CHECK: - { id: 7, name: '', type: spill-slot, offset: -32, size: 16, alignment: 16, # CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z23', # CHECK: - { id: 8, name: '', type: spill-slot, offset: -34, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p4', +# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p4', # CHECK: - { id: 9, name: '', type: spill-slot, offset: -36, size: 2, alignment: 2, -# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p15', +# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p15', # CHECK: - { id: 10, name: '', type: spill-slot, offset: -16, size: 8, alignment: 16, # CHECK-NEXT: stack-id: default, callee-saved-register: '$fp', # @@ -1295,9 +1295,9 @@ stack: - { id: 0, type: default, size: 32, alignment: 16, stack-id: scalable-vector } - { id: 1, type: default, size: 4, alignment: 2, stack-id: scalable-vector } - { id: 2, type: default, size: 16, alignment: 16, stack-id: scalable-vector } - - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-vector } + - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-predicate-vector } - { id: 4, type: spill-slot, size: 16, alignment: 16, stack-id: scalable-vector } - - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-vector } + - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-predicate-vector } body: | bb.0.entry: diff --git a/llvm/test/CodeGen/AArch64/freeze.ll b/llvm/test/CodeGen/AArch64/freeze.ll index fae3bbe2dcfba..fb909fec90434 100644 --- a/llvm/test/CodeGen/AArch64/freeze.ll +++ b/llvm/test/CodeGen/AArch64/freeze.ll @@ -466,15 +466,12 @@ define <8 x i16> @freeze_urhadd(<8 x i16> %a0, <8 x i16> %a1) { ret <8 x i16> %masked } -; TODO: Unnecessary sext_inreg define <8 x i16> @freeze_shadd(<8 x i8> %a0, <8 x i16> %a1) { ; CHECK-LABEL: freeze_shadd: ; CHECK: // %bb.0: ; CHECK-NEXT: sshll v0.8h, v0.8b, #0 ; CHECK-NEXT: sshr v1.8h, v1.8h, #8 ; CHECK-NEXT: shadd v0.8h, v0.8h, v1.8h -; CHECK-NEXT: shl v0.8h, v0.8h, #8 -; CHECK-NEXT: sshr v0.8h, v0.8h, #8 ; CHECK-NEXT: ret %x0 = sext <8 x i8> %a0 to <8 x i16> %x1 = ashr <8 x i16> %a1, splat (i16 8) @@ -485,15 +482,12 @@ define <8 x i16> @freeze_shadd(<8 x i8> %a0, <8 x i16> %a1) { ret <8 x i16> %sext } -; TODO: Unnecessary sext_inreg define <8 x i16> @freeze_srhadd(<8 x i8> %a0, <8 x i16> %a1) { ; CHECK-LABEL: freeze_srhadd: ; CHECK: // %bb.0: ; CHECK-NEXT: sshll v0.8h, v0.8b, #0 ; CHECK-NEXT: sshr v1.8h, v1.8h, #8 ; CHECK-NEXT: srhadd v0.8h, v0.8h, v1.8h -; CHECK-NEXT: shl v0.8h, v0.8h, #8 -; CHECK-NEXT: sshr v0.8h, v0.8h, #8 ; CHECK-NEXT: ret %x0 = sext <8 x i8> %a0 to <8 x i16> %x1 = ashr <8 x i16> %a1, splat (i16 8) diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll index 5e01612e3881a..e2c861b40e706 100644 --- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll +++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll @@ -310,6 +310,171 @@ define void @test_2x32bit_mask_with_32bit_index_and_trip_count(i32 %i, i32 %n) # ret void } +; Extra use of the get_active_lane_mask from an extractelement, which is replaced with ptest_first. + +define void @test_2x8bit_mask_with_extracts_and_ptest(i64 %i, i64 %n) { +; CHECK-SVE-LABEL: test_2x8bit_mask_with_extracts_and_ptest: +; CHECK-SVE: // %bb.0: // %entry +; CHECK-SVE-NEXT: whilelo p1.b, x0, x1 +; CHECK-SVE-NEXT: b.pl .LBB11_2 +; CHECK-SVE-NEXT: // %bb.1: // %if.then +; CHECK-SVE-NEXT: punpklo p0.h, p1.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: b use +; CHECK-SVE-NEXT: .LBB11_2: // %if.end +; CHECK-SVE-NEXT: ret +; +; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_extracts_and_ptest: +; CHECK-SVE2p1-SME2: // %bb.0: // %entry +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.h, p1.h }, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB11_2 +; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then +; CHECK-SVE2p1-SME2-NEXT: b use +; CHECK-SVE2p1-SME2-NEXT: .LBB11_2: // %if.end +; CHECK-SVE2p1-SME2-NEXT: ret +entry: + %r = call @llvm.get.active.lane.mask.nxv16i1.i32(i64 %i, i64 %n) + %v0 = call @llvm.vector.extract.nxv8i1.nxv16i1.i64( %r, i64 0) + %v1 = call @llvm.vector.extract.nxv8i1.nxv16i1.i64( %r, i64 8) + %elt0 = extractelement %r, i32 0 + br i1 %elt0, label %if.then, label %if.end + +if.then: + tail call void @use( %v0, %v1) + br label %if.end + +if.end: + ret void +} + +; Extra use of the get_active_lane_mask from an extractelement, which is +; replaced with ptest_first and reinterpret_casts because the extract is not nxv16i1. + +define void @test_2x8bit_mask_with_extracts_and_reinterpret_casts(i64 %i, i64 %n) { +; CHECK-SVE-LABEL: test_2x8bit_mask_with_extracts_and_reinterpret_casts: +; CHECK-SVE: // %bb.0: // %entry +; CHECK-SVE-NEXT: whilelo p1.h, x0, x1 +; CHECK-SVE-NEXT: b.pl .LBB12_2 +; CHECK-SVE-NEXT: // %bb.1: // %if.then +; CHECK-SVE-NEXT: punpklo p0.h, p1.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: b use +; CHECK-SVE-NEXT: .LBB12_2: // %if.end +; CHECK-SVE-NEXT: ret +; +; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_extracts_and_reinterpret_casts: +; CHECK-SVE2p1-SME2: // %bb.0: // %entry +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.s, p1.s }, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB12_2 +; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then +; CHECK-SVE2p1-SME2-NEXT: b use +; CHECK-SVE2p1-SME2-NEXT: .LBB12_2: // %if.end +; CHECK-SVE2p1-SME2-NEXT: ret +entry: + %r = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 %i, i64 %n) + %v0 = tail call @llvm.vector.extract.nxv4i1.nxv8i1( %r, i64 0) + %v1 = tail call @llvm.vector.extract.nxv4i1.nxv8i1( %r, i64 4) + %elt0 = extractelement %r, i64 0 + br i1 %elt0, label %if.then, label %if.end + +if.then: + tail call void @use( %v0, %v1) + br label %if.end + +if.end: + ret void +} + +define void @test_4x4bit_mask_with_extracts_and_ptest(i64 %i, i64 %n) { +; CHECK-SVE-LABEL: test_4x4bit_mask_with_extracts_and_ptest: +; CHECK-SVE: // %bb.0: // %entry +; CHECK-SVE-NEXT: whilelo p0.b, x0, x1 +; CHECK-SVE-NEXT: b.pl .LBB13_2 +; CHECK-SVE-NEXT: // %bb.1: // %if.then +; CHECK-SVE-NEXT: punpklo p1.h, p0.b +; CHECK-SVE-NEXT: punpkhi p3.h, p0.b +; CHECK-SVE-NEXT: punpklo p0.h, p1.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: punpklo p2.h, p3.b +; CHECK-SVE-NEXT: punpkhi p3.h, p3.b +; CHECK-SVE-NEXT: b use +; CHECK-SVE-NEXT: .LBB13_2: // %if.end +; CHECK-SVE-NEXT: ret +; +; CHECK-SVE2p1-SME2-LABEL: test_4x4bit_mask_with_extracts_and_ptest: +; CHECK-SVE2p1-SME2: // %bb.0: // %entry +; CHECK-SVE2p1-SME2-NEXT: cnth x8 +; CHECK-SVE2p1-SME2-NEXT: adds x8, x0, x8 +; CHECK-SVE2p1-SME2-NEXT: csinv x8, x8, xzr, lo +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.s, p1.s }, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB13_2 +; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then +; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.s, p3.s }, x8, x1 +; CHECK-SVE2p1-SME2-NEXT: b use +; CHECK-SVE2p1-SME2-NEXT: .LBB13_2: // %if.end +; CHECK-SVE2p1-SME2-NEXT: ret +entry: + %r = call @llvm.get.active.lane.mask.nxv16i1.i32(i64 %i, i64 %n) + %v0 = call @llvm.vector.extract.nxv4i1.nxv16i1.i64( %r, i64 0) + %v1 = call @llvm.vector.extract.nxv4i1.nxv16i1.i64( %r, i64 4) + %v2 = call @llvm.vector.extract.nxv4i1.nxv16i1.i64( %r, i64 8) + %v3 = call @llvm.vector.extract.nxv4i1.nxv16i1.i64( %r, i64 12) + %elt0 = extractelement %r, i32 0 + br i1 %elt0, label %if.then, label %if.end + +if.then: + tail call void @use( %v0, %v1, %v2, %v3) + br label %if.end + +if.end: + ret void +} + +define void @test_4x2bit_mask_with_extracts_and_reinterpret_casts(i64 %i, i64 %n) { +; CHECK-SVE-LABEL: test_4x2bit_mask_with_extracts_and_reinterpret_casts: +; CHECK-SVE: // %bb.0: // %entry +; CHECK-SVE-NEXT: whilelo p0.h, x0, x1 +; CHECK-SVE-NEXT: b.pl .LBB14_2 +; CHECK-SVE-NEXT: // %bb.1: // %if.then +; CHECK-SVE-NEXT: punpklo p1.h, p0.b +; CHECK-SVE-NEXT: punpkhi p3.h, p0.b +; CHECK-SVE-NEXT: punpklo p0.h, p1.b +; CHECK-SVE-NEXT: punpkhi p1.h, p1.b +; CHECK-SVE-NEXT: punpklo p2.h, p3.b +; CHECK-SVE-NEXT: punpkhi p3.h, p3.b +; CHECK-SVE-NEXT: b use +; CHECK-SVE-NEXT: .LBB14_2: // %if.end +; CHECK-SVE-NEXT: ret +; +; CHECK-SVE2p1-SME2-LABEL: test_4x2bit_mask_with_extracts_and_reinterpret_casts: +; CHECK-SVE2p1-SME2: // %bb.0: // %entry +; CHECK-SVE2p1-SME2-NEXT: cntw x8 +; CHECK-SVE2p1-SME2-NEXT: adds x8, x0, x8 +; CHECK-SVE2p1-SME2-NEXT: csinv x8, x8, xzr, lo +; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.d, p1.d }, x0, x1 +; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB14_2 +; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then +; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.d, p3.d }, x8, x1 +; CHECK-SVE2p1-SME2-NEXT: b use +; CHECK-SVE2p1-SME2-NEXT: .LBB14_2: // %if.end +; CHECK-SVE2p1-SME2-NEXT: ret +entry: + %r = call @llvm.get.active.lane.mask.nxv8i1.i32(i64 %i, i64 %n) + %v0 = call @llvm.vector.extract.nxv2i1.nxv8i1.i64( %r, i64 0) + %v1 = call @llvm.vector.extract.nxv2i1.nxv8i1.i64( %r, i64 2) + %v2 = call @llvm.vector.extract.nxv2i1.nxv8i1.i64( %r, i64 4) + %v3 = call @llvm.vector.extract.nxv2i1.nxv8i1.i64( %r, i64 6) + %elt0 = extractelement %r, i32 0 + br i1 %elt0, label %if.then, label %if.end + +if.then: + tail call void @use( %v0, %v1, %v2, %v3) + br label %if.end + +if.end: + ret void +} + declare void @use(...) attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll index 18665bcbeae83..7195e2b2f1255 100644 --- a/llvm/test/CodeGen/AArch64/icmp.ll +++ b/llvm/test/CodeGen/AArch64/icmp.ll @@ -2093,3 +2093,54 @@ define <2 x i1> @icmp_slt_v2i64_Zero_LHS(<2 x i64> %a) { %c = icmp slt <2 x i64> , %a ret <2 x i1> %c } + +; Test TST optimization for i8 sign bit testing with cross-type select +; This tests the pattern: icmp slt i8 %val, 0; select i1 %cmp, i32 %a, i32 %b +; The optimization should convert sxtb+cmp to tst for sign bit testing. + +define i32 @i8_signbit_tst_constants(i8 %x, i8 %y) { +; CHECK-SD-LABEL: i8_signbit_tst_constants: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w9, w0, w1 +; CHECK-SD-NEXT: mov w8, #42 // =0x2a +; CHECK-SD-NEXT: tst w9, #0x80 +; CHECK-SD-NEXT: mov w9, #20894 // =0x519e +; CHECK-SD-NEXT: csel w0, w9, w8, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: i8_signbit_tst_constants: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: mov w9, #42 // =0x2a +; CHECK-GI-NEXT: mov w10, #20894 // =0x519e +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w0, w10, w9, mi +; CHECK-GI-NEXT: ret + %add = add i8 %x, %y + %cmp = icmp slt i8 %add, 0 + %sel = select i1 %cmp, i32 20894, i32 42 + ret i32 %sel +} + +; Test i8 sign bit testing with variable select values (problematic case) +define i32 @i8_signbit_variables(i8 %x, i8 %y, i32 %a, i32 %b) { +; CHECK-SD-LABEL: i8_signbit_variables: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: add w8, w0, w1 +; CHECK-SD-NEXT: tst w8, #0x80 +; CHECK-SD-NEXT: csel w0, w2, w3, ne +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: i8_signbit_variables: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: add w8, w0, w1 +; CHECK-GI-NEXT: sxtb w8, w8 +; CHECK-GI-NEXT: cmp w8, #0 +; CHECK-GI-NEXT: csel w0, w2, w3, mi +; CHECK-GI-NEXT: ret + %add = add i8 %x, %y + %cmp = icmp slt i8 %add, 0 + %sel = select i1 %cmp, i32 %a, i32 %b + ret i32 %sel +} diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll index e68539bcf07d9..e8bbaf96395f0 100644 --- a/llvm/test/CodeGen/AArch64/isinf.ll +++ b/llvm/test/CodeGen/AArch64/isinf.ll @@ -27,9 +27,8 @@ define i32 @replace_isinf_call_f32(float %x) { ; CHECK-LABEL: replace_isinf_call_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 -; CHECK-NEXT: and w9, w9, #0x7fffffff -; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: mov w8, #-16777216 // =0xff000000 +; CHECK-NEXT: cmp w8, w9, lsl #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %abs = tail call float @llvm.fabs.f32(float %x) @@ -43,9 +42,8 @@ define i32 @replace_isinf_call_f64(double %x) { ; CHECK-LABEL: replace_isinf_call_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: fmov x9, d0 -; CHECK-NEXT: mov x8, #9218868437227405312 // =0x7ff0000000000000 -; CHECK-NEXT: and x9, x9, #0x7fffffffffffffff -; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: mov x8, #-9007199254740992 // =0xffe0000000000000 +; CHECK-NEXT: cmp x8, x9, lsl #1 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret %abs = tail call double @llvm.fabs.f64(double %x) diff --git a/llvm/test/CodeGen/AArch64/ldexp.ll b/llvm/test/CodeGen/AArch64/ldexp.ll index 6019fa1490e3d..1e35bd627a199 100644 --- a/llvm/test/CodeGen/AArch64/ldexp.ll +++ b/llvm/test/CodeGen/AArch64/ldexp.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 ; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck -check-prefixes=SVE,SVELINUX %s +; RUN: llc -mtriple=aarch64 -global-isel < %s -o - | FileCheck -check-prefixes=GISEL %s ; RUN: llc -mtriple=aarch64-windows-msvc -mattr=+sve < %s -o - | FileCheck -check-prefixes=SVE,SVEWINDOWS %s ; RUN: llc -mtriple=aarch64-windows-msvc < %s -o - | FileCheck -check-prefixes=WINDOWS %s @@ -15,6 +16,10 @@ define double @testExp(double %val, i32 %a) { ; SVE-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExp: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexp +; ; WINDOWS-LABEL: testExp: ; WINDOWS: // %bb.0: // %entry ; WINDOWS-NEXT: b ldexp @@ -37,6 +42,10 @@ define double @testExpIntrinsic(double %val, i32 %a) { ; SVE-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExpIntrinsic: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexp +; ; WINDOWS-LABEL: testExpIntrinsic: ; WINDOWS: // %bb.0: // %entry ; WINDOWS-NEXT: b ldexp @@ -55,6 +64,10 @@ define float @testExpf(float %val, i32 %a) { ; SVELINUX-NEXT: // kill: def $s0 killed $s0 killed $z0 ; SVELINUX-NEXT: ret ; +; GISEL-LABEL: testExpf: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexpf +; ; SVEWINDOWS-LABEL: testExpf: ; SVEWINDOWS: // %bb.0: // %entry ; SVEWINDOWS-NEXT: b ldexpf @@ -77,6 +90,10 @@ define float @testExpfIntrinsic(float %val, i32 %a) { ; SVE-NEXT: // kill: def $s0 killed $s0 killed $z0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExpfIntrinsic: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexpf +; ; WINDOWS-LABEL: testExpfIntrinsic: ; WINDOWS: .seh_proc testExpfIntrinsic ; WINDOWS-NEXT: // %bb.0: // %entry @@ -98,6 +115,90 @@ entry: ret float %call } +define <2 x float> @test_ldexp_v2f32_v2i32(<2 x float> %Val, <2 x i32> %Exp) { +; SVE-LABEL: test_ldexp_v2f32_v2i32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $d1 killed $d1 def $z1 +; SVE-NEXT: mov w8, v1.s[1] +; SVE-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE-NEXT: mov s2, v0.s[1] +; SVE-NEXT: ptrue p0.s +; SVE-NEXT: fscale z0.s, p0/m, z0.s, z1.s +; SVE-NEXT: fmov s3, w8 +; SVE-NEXT: fscale z2.s, p0/m, z2.s, z3.s +; SVE-NEXT: mov v0.s[1], v2.s[0] +; SVE-NEXT: // kill: def $d0 killed $d0 killed $z0 +; SVE-NEXT: ret +; +; GISEL-LABEL: test_ldexp_v2f32_v2i32: +; GISEL: // %bb.0: +; GISEL-NEXT: sub sp, sp, #48 +; GISEL-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; GISEL-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; GISEL-NEXT: .cfi_def_cfa_offset 48 +; GISEL-NEXT: .cfi_offset w30, -16 +; GISEL-NEXT: .cfi_offset b8, -24 +; GISEL-NEXT: .cfi_offset b9, -32 +; GISEL-NEXT: // kill: def $d1 killed $d1 def $q1 +; GISEL-NEXT: fmov w0, s1 +; GISEL-NEXT: // kill: def $d0 killed $d0 def $q0 +; GISEL-NEXT: mov s8, v0.s[1] +; GISEL-NEXT: mov s9, v1.s[1] +; GISEL-NEXT: // kill: def $s0 killed $s0 killed $q0 +; GISEL-NEXT: bl ldexpf +; GISEL-NEXT: // kill: def $s0 killed $s0 def $q0 +; GISEL-NEXT: str q0, [sp] // 16-byte Folded Spill +; GISEL-NEXT: fmov w0, s9 +; GISEL-NEXT: fmov s0, s8 +; GISEL-NEXT: bl ldexpf +; GISEL-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; GISEL-NEXT: // kill: def $s0 killed $s0 def $q0 +; GISEL-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; GISEL-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; GISEL-NEXT: mov v1.s[1], v0.s[0] +; GISEL-NEXT: fmov d0, d1 +; GISEL-NEXT: add sp, sp, #48 +; GISEL-NEXT: ret +; +; WINDOWS-LABEL: test_ldexp_v2f32_v2i32: +; WINDOWS: .seh_proc test_ldexp_v2f32_v2i32 +; WINDOWS-NEXT: // %bb.0: +; WINDOWS-NEXT: sub sp, sp, #48 +; WINDOWS-NEXT: .seh_stackalloc 48 +; WINDOWS-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; WINDOWS-NEXT: .seh_save_reg x30, 32 +; WINDOWS-NEXT: .seh_endprologue +; WINDOWS-NEXT: // kill: def $d0 killed $d0 def $q0 +; WINDOWS-NEXT: mov s2, v0.s[1] +; WINDOWS-NEXT: // kill: def $d1 killed $d1 def $q1 +; WINDOWS-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill +; WINDOWS-NEXT: mov w0, v1.s[1] +; WINDOWS-NEXT: fcvt d0, s2 +; WINDOWS-NEXT: bl ldexp +; WINDOWS-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; WINDOWS-NEXT: fcvt s0, d0 +; WINDOWS-NEXT: fcvt d1, s1 +; WINDOWS-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; WINDOWS-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; WINDOWS-NEXT: fmov w0, s0 +; WINDOWS-NEXT: fmov d0, d1 +; WINDOWS-NEXT: bl ldexp +; WINDOWS-NEXT: fcvt s0, d0 +; WINDOWS-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; WINDOWS-NEXT: mov v0.s[1], v1.s[0] +; WINDOWS-NEXT: // kill: def $d0 killed $d0 killed $q0 +; WINDOWS-NEXT: .seh_startepilogue +; WINDOWS-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; WINDOWS-NEXT: .seh_save_reg x30, 32 +; WINDOWS-NEXT: add sp, sp, #48 +; WINDOWS-NEXT: .seh_stackalloc 48 +; WINDOWS-NEXT: .seh_endepilogue +; WINDOWS-NEXT: ret +; WINDOWS-NEXT: .seh_endfunclet +; WINDOWS-NEXT: .seh_endproc + %result = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %Val, <2 x i32> %Exp) + ret <2 x float> %result +} declare float @ldexpf(float, i32) memory(none) @@ -106,6 +207,10 @@ define fp128 @testExpl(fp128 %val, i32 %a) { ; SVE: // %bb.0: // %entry ; SVE-NEXT: b ldexpl ; +; GISEL-LABEL: testExpl: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexpl +; ; WINDOWS-LABEL: testExpl: ; WINDOWS: // %bb.0: // %entry ; WINDOWS-NEXT: b ldexpl @@ -126,6 +231,17 @@ define half @testExpf16(half %val, i32 %a) { ; SVE-NEXT: fcvt h0, s0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExpf16: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; GISEL-NEXT: .cfi_def_cfa_offset 16 +; GISEL-NEXT: .cfi_offset w30, -16 +; GISEL-NEXT: fcvt s0, h0 +; GISEL-NEXT: bl ldexpf +; GISEL-NEXT: fcvt h0, s0 +; GISEL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; GISEL-NEXT: ret +; ; WINDOWS-LABEL: testExpf16: ; WINDOWS: .seh_proc testExpf16 ; WINDOWS-NEXT: // %bb.0: // %entry diff --git a/llvm/test/CodeGen/AArch64/llvm.modf.ll b/llvm/test/CodeGen/AArch64/llvm.modf.ll index 41fe796daca86..503742fa1c443 100644 --- a/llvm/test/CodeGen/AArch64/llvm.modf.ll +++ b/llvm/test/CodeGen/AArch64/llvm.modf.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s +; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK,CHECK-SD %s +; RUN: llc -mtriple=aarch64-gnu-linux -global-isel < %s | FileCheck -check-prefixes=CHECK,CHECK-GI %s define { half, half } @test_modf_f16(half %a) { ; CHECK-LABEL: test_modf_f16: @@ -55,61 +56,95 @@ define half @test_modf_f16_only_use_integral_part(half %a) { } define { <2 x half>, <2 x half> } @test_modf_v2f16(<2 x half> %a) { -; CHECK-LABEL: test_modf_v2f16: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: mov h1, v0.h[1] -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: add x0, sp, #44 -; CHECK-NEXT: fcvt s0, h1 -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcvt h0, s0 -; CHECK-NEXT: add x0, sp, #40 -; CHECK-NEXT: fcvt s1, h1 -; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: fmov s0, s1 -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcvt h2, s0 -; CHECK-NEXT: add x0, sp, #56 -; CHECK-NEXT: mov h1, v1.h[2] -; CHECK-NEXT: fcvt s0, h1 -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: mov v2.h[1], v1.h[0] -; CHECK-NEXT: str q2, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: fcvt h2, s0 -; CHECK-NEXT: add x0, sp, #60 -; CHECK-NEXT: mov h1, v1.h[3] -; CHECK-NEXT: fcvt s0, h1 -; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: mov v1.h[2], v2.h[0] -; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldp s2, s1, [sp, #40] -; CHECK-NEXT: fcvt h4, s0 -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload -; CHECK-NEXT: fcvt h3, s1 -; CHECK-NEXT: fcvt h1, s2 -; CHECK-NEXT: ldr s2, [sp, #56] -; CHECK-NEXT: mov v0.h[3], v4.h[0] -; CHECK-NEXT: fcvt h2, s2 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: mov v1.h[1], v3.h[0] -; CHECK-NEXT: ldr s3, [sp, #60] -; CHECK-NEXT: mov v1.h[2], v2.h[0] -; CHECK-NEXT: fcvt h2, s3 -; CHECK-NEXT: mov v1.h[3], v2.h[0] -; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1 -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_modf_v2f16: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: mov h1, v0.h[1] +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: add x0, sp, #44 +; CHECK-SD-NEXT: fcvt s0, h1 +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcvt h0, s0 +; CHECK-SD-NEXT: add x0, sp, #40 +; CHECK-SD-NEXT: fcvt s1, h1 +; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: fmov s0, s1 +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcvt h2, s0 +; CHECK-SD-NEXT: add x0, sp, #56 +; CHECK-SD-NEXT: mov h1, v1.h[2] +; CHECK-SD-NEXT: fcvt s0, h1 +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v2.h[1], v1.h[0] +; CHECK-SD-NEXT: str q2, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: fcvt h2, s0 +; CHECK-SD-NEXT: add x0, sp, #60 +; CHECK-SD-NEXT: mov h1, v1.h[3] +; CHECK-SD-NEXT: fcvt s0, h1 +; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v1.h[2], v2.h[0] +; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldp s2, s1, [sp, #40] +; CHECK-SD-NEXT: fcvt h4, s0 +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-SD-NEXT: fcvt h3, s1 +; CHECK-SD-NEXT: fcvt h1, s2 +; CHECK-SD-NEXT: ldr s2, [sp, #56] +; CHECK-SD-NEXT: mov v0.h[3], v4.h[0] +; CHECK-SD-NEXT: fcvt h2, s2 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: mov v1.h[1], v3.h[0] +; CHECK-SD-NEXT: ldr s3, [sp, #60] +; CHECK-SD-NEXT: mov v1.h[2], v2.h[0] +; CHECK-SD-NEXT: fcvt h2, s3 +; CHECK-SD-NEXT: mov v1.h[3], v2.h[0] +; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_modf_v2f16: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: str d8, [sp, #48] // 8-byte Folded Spill +; CHECK-GI-NEXT: str x30, [sp, #56] // 8-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w30, -8 +; CHECK-GI-NEXT: .cfi_offset b8, -16 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: mov h8, v0.h[1] +; CHECK-GI-NEXT: add x0, sp, #40 +; CHECK-GI-NEXT: fcvt s0, h0 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: fcvt h0, s0 +; CHECK-GI-NEXT: ldr s1, [sp, #40] +; CHECK-GI-NEXT: add x0, sp, #44 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: fcvt h0, s1 +; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: fcvt s0, h8 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: ldr s1, [sp, #44] +; CHECK-GI-NEXT: fcvt h3, s0 +; CHECK-GI-NEXT: ldr x30, [sp, #56] // 8-byte Folded Reload +; CHECK-GI-NEXT: ldr d8, [sp, #48] // 8-byte Folded Reload +; CHECK-GI-NEXT: fcvt h2, s1 +; CHECK-GI-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: mov v0.h[1], v3.h[0] +; CHECK-GI-NEXT: mov v1.h[1], v2.h[0] +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %result = call { <2 x half>, <2 x half> } @llvm.modf.v2f16(<2 x half> %a) ret { <2 x half>, <2 x half> } %result } @@ -130,80 +165,156 @@ define { float, float } @test_modf_f32(float %a) { } define { <3 x float>, <3 x float> } @test_modf_v3f32(<3 x float> %a) { -; CHECK-LABEL: test_modf_v3f32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #80 -; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill -; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w20, -16 -; CHECK-NEXT: .cfi_offset w30, -32 -; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: mov s0, v0.s[1] -; CHECK-NEXT: add x0, sp, #56 -; CHECK-NEXT: add x19, sp, #56 -; CHECK-NEXT: bl modff -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: add x0, sp, #44 -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: add x0, sp, #60 -; CHECK-NEXT: add x20, sp, #60 -; CHECK-NEXT: mov v0.s[1], v1.s[0] -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: mov s0, v0.s[2] -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldr s1, [sp, #44] -; CHECK-NEXT: ldr q2, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload -; CHECK-NEXT: ld1 { v1.s }[1], [x19] -; CHECK-NEXT: mov v2.s[2], v0.s[0] -; CHECK-NEXT: ld1 { v1.s }[2], [x20] -; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: add sp, sp, #80 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_modf_v3f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #80 +; CHECK-SD-NEXT: str x30, [sp, #48] // 8-byte Folded Spill +; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 80 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w20, -16 +; CHECK-SD-NEXT: .cfi_offset w30, -32 +; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: mov s0, v0.s[1] +; CHECK-SD-NEXT: add x0, sp, #56 +; CHECK-SD-NEXT: add x19, sp, #56 +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: add x0, sp, #44 +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: add x0, sp, #60 +; CHECK-SD-NEXT: add x20, sp, #60 +; CHECK-SD-NEXT: mov v0.s[1], v1.s[0] +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov s0, v0.s[2] +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldr s1, [sp, #44] +; CHECK-SD-NEXT: ldr q2, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload +; CHECK-SD-NEXT: ld1 { v1.s }[1], [x19] +; CHECK-SD-NEXT: mov v2.s[2], v0.s[0] +; CHECK-SD-NEXT: ld1 { v1.s }[2], [x20] +; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.16b, v2.16b +; CHECK-SD-NEXT: add sp, sp, #80 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_modf_v3f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #112 +; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x19, [sp, #96] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 112 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -24 +; CHECK-GI-NEXT: .cfi_offset b9, -32 +; CHECK-GI-NEXT: add x0, sp, #68 +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: mov s9, v0.s[2] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: ldr s1, [sp, #68] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: add x0, sp, #72 +; CHECK-GI-NEXT: stp q0, q1, [sp, #32] // 32-byte Folded Spill +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: add x0, sp, #76 +; CHECK-GI-NEXT: add x19, sp, #76 +; CHECK-GI-NEXT: ldr s0, [sp, #72] +; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: fmov s0, s9 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: ldp q3, q2, [sp, #16] // 32-byte Folded Reload +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v2.s[1], v1.s[0] +; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v1.s[1], v3.s[0] +; CHECK-GI-NEXT: mov v2.s[2], v0.s[0] +; CHECK-GI-NEXT: ld1 { v1.s }[2], [x19] +; CHECK-GI-NEXT: ldp x30, x19, [sp, #96] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.16b, v2.16b +; CHECK-GI-NEXT: add sp, sp, #112 +; CHECK-GI-NEXT: ret %result = call { <3 x float>, <3 x float> } @llvm.modf.v3f32(<3 x float> %a) ret { <3 x float>, <3 x float> } %result } define { <2 x float>, <2 x float> } @test_modf_v2f32(<2 x float> %a) { -; CHECK-LABEL: test_modf_v2f32: -; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 -; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: add x0, sp, #40 -; CHECK-NEXT: add x19, sp, #40 -; CHECK-NEXT: mov s0, v0.s[1] -; CHECK-NEXT: bl modff -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: add x0, sp, #44 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0 -; CHECK-NEXT: bl modff -; CHECK-NEXT: ldr s1, [sp, #44] -; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0 -; CHECK-NEXT: ld1 { v1.s }[1], [x19] -; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.s[1], v2.s[0] -; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1 -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: add sp, sp, #64 -; CHECK-NEXT: ret +; CHECK-SD-LABEL: test_modf_v2f32: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: add x0, sp, #40 +; CHECK-SD-NEXT: add x19, sp, #40 +; CHECK-SD-NEXT: mov s0, v0.s[1] +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: add x0, sp, #44 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-SD-NEXT: bl modff +; CHECK-SD-NEXT: ldr s1, [sp, #44] +; CHECK-SD-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-SD-NEXT: ld1 { v1.s }[1], [x19] +; CHECK-SD-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.s[1], v2.s[0] +; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_modf_v2f32: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #64 +; CHECK-GI-NEXT: str d8, [sp, #32] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 64 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -32 +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: add x0, sp, #40 +; CHECK-GI-NEXT: mov s8, v0.s[1] +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: add x0, sp, #44 +; CHECK-GI-NEXT: add x19, sp, #44 +; CHECK-GI-NEXT: ldr s0, [sp, #40] +; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: fmov s0, s8 +; CHECK-GI-NEXT: bl modff +; CHECK-GI-NEXT: ldp q2, q1, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0 +; CHECK-GI-NEXT: ldr d8, [sp, #32] // 8-byte Folded Reload +; CHECK-GI-NEXT: mov v2.s[1], v0.s[0] +; CHECK-GI-NEXT: ld1 { v1.s }[1], [x19] +; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1 +; CHECK-GI-NEXT: fmov d0, d2 +; CHECK-GI-NEXT: add sp, sp, #64 +; CHECK-GI-NEXT: ret %result = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> %a) ret { <2 x float>, <2 x float> } %result } @@ -224,32 +335,80 @@ define { double, double } @test_modf_f64(double %a) { } define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) { -; CHECK-LABEL: test_modf_v2f64: +; CHECK-SD-LABEL: test_modf_v2f64: +; CHECK-SD: // %bb.0: +; CHECK-SD-NEXT: sub sp, sp, #64 +; CHECK-SD-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-SD-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SD-NEXT: .cfi_offset w19, -8 +; CHECK-SD-NEXT: .cfi_offset w30, -16 +; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-SD-NEXT: mov d0, v0.d[1] +; CHECK-SD-NEXT: add x0, sp, #32 +; CHECK-SD-NEXT: add x19, sp, #32 +; CHECK-SD-NEXT: bl modf +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-SD-NEXT: add x0, sp, #40 +; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-SD-NEXT: bl modf +; CHECK-SD-NEXT: ldr d1, [sp, #40] +; CHECK-SD-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload +; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-SD-NEXT: ld1 { v1.d }[1], [x19] +; CHECK-SD-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-SD-NEXT: mov v0.d[1], v2.d[0] +; CHECK-SD-NEXT: add sp, sp, #64 +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: test_modf_v2f64: +; CHECK-GI: // %bb.0: +; CHECK-GI-NEXT: sub sp, sp, #80 +; CHECK-GI-NEXT: str d8, [sp, #48] // 8-byte Folded Spill +; CHECK-GI-NEXT: stp x30, x19, [sp, #64] // 16-byte Folded Spill +; CHECK-GI-NEXT: .cfi_def_cfa_offset 80 +; CHECK-GI-NEXT: .cfi_offset w19, -8 +; CHECK-GI-NEXT: .cfi_offset w30, -16 +; CHECK-GI-NEXT: .cfi_offset b8, -32 +; CHECK-GI-NEXT: add x0, sp, #40 +; CHECK-GI-NEXT: mov d8, v0.d[1] +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-GI-NEXT: bl modf +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill +; CHECK-GI-NEXT: add x0, sp, #56 +; CHECK-GI-NEXT: add x19, sp, #56 +; CHECK-GI-NEXT: ldr d0, [sp, #40] +; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; CHECK-GI-NEXT: fmov d0, d8 +; CHECK-GI-NEXT: bl modf +; CHECK-GI-NEXT: ldp q2, q1, [sp] // 32-byte Folded Reload +; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-GI-NEXT: ldr d8, [sp, #48] // 8-byte Folded Reload +; CHECK-GI-NEXT: mov v2.d[1], v0.d[0] +; CHECK-GI-NEXT: ld1 { v1.d }[1], [x19] +; CHECK-GI-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload +; CHECK-GI-NEXT: mov v0.16b, v2.16b +; CHECK-GI-NEXT: add sp, sp, #80 +; CHECK-GI-NEXT: ret + %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a) + ret { <2 x double>, <2 x double> } %result +} + +define { fp128, fp128 } @test_modf_fp128(fp128 %a) { +; CHECK-LABEL: test_modf_fp128: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #64 -; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: .cfi_offset w30, -16 -; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill -; CHECK-NEXT: mov d0, v0.d[1] -; CHECK-NEXT: add x0, sp, #32 -; CHECK-NEXT: add x19, sp, #32 -; CHECK-NEXT: bl modf -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill -; CHECK-NEXT: add x0, sp, #40 -; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 -; CHECK-NEXT: bl modf -; CHECK-NEXT: ldr d1, [sp, #40] -; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: ld1 { v1.d }[1], [x19] -; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload -; CHECK-NEXT: mov v0.d[1], v2.d[0] -; CHECK-NEXT: add sp, sp, #64 +; CHECK-NEXT: mov x0, sp +; CHECK-NEXT: bl modfl +; CHECK-NEXT: ldr q1, [sp] +; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #32 ; CHECK-NEXT: ret - %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a) - ret { <2 x double>, <2 x double> } %result + %result = call { fp128, fp128 } @llvm.modf.fp128(fp128 %a) + ret { fp128, fp128 } %result } diff --git a/llvm/test/CodeGen/AArch64/madd-lohi.ll b/llvm/test/CodeGen/AArch64/madd-lohi.ll deleted file mode 100644 index e5d8fcdda326d..0000000000000 --- a/llvm/test/CodeGen/AArch64/madd-lohi.ll +++ /dev/null @@ -1,25 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=arm64-apple-ios7.0 %s -o - | FileCheck %s -; RUN: llc -mtriple=aarch64_be-linux-gnu %s -o - | FileCheck --check-prefix=CHECK-BE %s - -define i128 @test_128bitmul(i128 %lhs, i128 %rhs) { -; CHECK-LABEL: test_128bitmul: -; CHECK: ; %bb.0: -; CHECK-NEXT: umulh x8, x0, x2 -; CHECK-NEXT: madd x8, x0, x3, x8 -; CHECK-NEXT: mul x0, x0, x2 -; CHECK-NEXT: madd x1, x1, x2, x8 -; CHECK-NEXT: ret -; -; CHECK-BE-LABEL: test_128bitmul: -; CHECK-BE: // %bb.0: -; CHECK-BE-NEXT: umulh x8, x1, x3 -; CHECK-BE-NEXT: madd x8, x1, x2, x8 -; CHECK-BE-NEXT: mul x1, x1, x3 -; CHECK-BE-NEXT: madd x0, x0, x3, x8 -; CHECK-BE-NEXT: ret - - - %prod = mul i128 %lhs, %rhs - ret i128 %prod -} diff --git a/llvm/test/CodeGen/AArch64/masked-integer-compare.ll b/llvm/test/CodeGen/AArch64/masked-integer-compare.ll new file mode 100644 index 0000000000000..363cd10c78a94 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/masked-integer-compare.ll @@ -0,0 +1,178 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s + +; Test code generation support for SUBS (shifted register) from masked integer +; compare sequences. These sequences appear in isinf tests, for example. + +define i1 @combine_masked_i32(i32 %x) { +; CHECK-LABEL: combine_masked_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-16777216 // =0xff000000 +; CHECK-NEXT: cmp w8, w0, lsl #1 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %sub = sub i32 %and, u0x7f800000 + %cmp = icmp eq i32 %sub, 0 + ret i1 %cmp +} + +define i1 @combine_masked_i64(i64 %x) { +; CHECK-LABEL: combine_masked_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-9007199254740992 // =0xffe0000000000000 +; CHECK-NEXT: cmp x8, x0, lsl #1 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i64 %x, u0x7fffffffffffffff + %sub = sub i64 %and, u0x7ff0000000000000 + %cmp = icmp eq i64 %sub, 0 + ret i1 %cmp +} + +define i1 @combine_masked_ne(i32 %x) { +; CHECK-LABEL: combine_masked_ne: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-16777216 // =0xff000000 +; CHECK-NEXT: cmp w8, w0, lsl #1 +; CHECK-NEXT: cset w0, ne +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp ne i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @combine_masked_lsl4(i32 %x) { +; CHECK-LABEL: combine_masked_lsl4: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-134217728 // =0xf8000000 +; CHECK-NEXT: cmp w8, w0, lsl #4 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x0fffffff + %cmp = icmp eq i32 %and, u0x0f800000 + ret i1 %cmp +} + +define i1 @dont_combine_not_mask(i32 %x) { +; CHECK-LABEL: dont_combine_not_mask: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, #0x7ffffffe +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7ffffffe + %cmp = icmp eq i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @dont_combine_cmp_not_masked(i32 %x) { +; CHECK-LABEL: dont_combine_cmp_not_masked: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, #0x3fffffff +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x3fffffff + %cmp = icmp eq i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @dont_combine_not_constant_mask(i32 %x, i32 %m) { +; CHECK-LABEL: dont_combine_not_constant_mask: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, w1 +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, %m + %cmp = icmp eq i32 %and, u0x7f800000 + ret i1 %cmp +} + +define i1 @dont_combine_not_constant_cmp(i32 %x, i32 %c) { +; CHECK-LABEL: dont_combine_not_constant_cmp: +; CHECK: // %bb.0: +; CHECK-NEXT: and w8, w0, #0xfffffff +; CHECK-NEXT: cmp w8, w1 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x0fffffff + %cmp = icmp eq i32 %and, %c + ret i1 %cmp +} + +define i1 @dont_combine_subs_imm(i32 %x) { +; CHECK-LABEL: dont_combine_subs_imm: +; CHECK: // %bb.0: +; CHECK-NEXT: and w8, w0, #0x7fffffff +; CHECK-NEXT: cmp w8, #291 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp eq i32 %and, u0x123 + ret i1 %cmp +} + +define i1 @dont_combine_subs_imm_lsl12(i32 %x) { +; CHECK-LABEL: dont_combine_subs_imm_lsl12: +; CHECK: // %bb.0: +; CHECK-NEXT: and w8, w0, #0x7fffffff +; CHECK-NEXT: cmp w8, #291, lsl #12 // =1191936 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp eq i32 %and, u0x123000 + ret i1 %cmp +} + +define { i1, i1 } @dont_combine_multi_use_cmp(i32 %x) { +; CHECK-LABEL: dont_combine_multi_use_cmp: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w9, w0, #0x7fffffff +; CHECK-NEXT: cmp w9, w8 +; CHECK-NEXT: cset w0, eq +; CHECK-NEXT: cset w1, lt +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %eq = icmp eq i32 %and, u0x7f800000 + %lt = icmp slt i32 %and, u0x7f800000 + %r1 = insertvalue { i1, i1 } poison, i1 %eq, 0 + %r2 = insertvalue { i1, i1 } %r1, i1 %lt, 1 + ret { i1, i1 } %r2 +} + +define { i32, i1 } @dont_combine_multi_use_sub(i32 %x) { +; CHECK-LABEL: dont_combine_multi_use_sub: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #-2139095040 // =0x80800000 +; CHECK-NEXT: and w9, w0, #0x7fffffff +; CHECK-NEXT: adds w0, w9, w8 +; CHECK-NEXT: cset w1, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %sub = sub i32 %and, u0x7f800000 + %cmp = icmp eq i32 %sub, 0 + %r1 = insertvalue { i32, i1 } poison, i32 %sub, 0 + %r2 = insertvalue { i32, i1 } %r1, i1 %cmp, 1 + ret { i32, i1 } %r2 +} + +define { i32, i1 } @dont_combine_multi_use_and(i32 %x) { +; CHECK-LABEL: dont_combine_multi_use_and: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000 +; CHECK-NEXT: and w0, w0, #0x7fffffff +; CHECK-NEXT: cmp w0, w8 +; CHECK-NEXT: cset w1, eq +; CHECK-NEXT: ret + %and = and i32 %x, u0x7fffffff + %cmp = icmp eq i32 %and, u0x7f800000 + %r1 = insertvalue { i32, i1 } poison, i32 %and, 0 + %r2 = insertvalue { i32, i1 } %r1, i1 %cmp, 1 + ret { i32, i1 } %r2 +} diff --git a/llvm/test/CodeGen/AArch64/movi64_sve.ll b/llvm/test/CodeGen/AArch64/movi64_sve.ll new file mode 100644 index 0000000000000..1d4e00d0c3d10 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/movi64_sve.ll @@ -0,0 +1,238 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=aarch64 -mattr=+neon < %s | FileCheck %s --check-prefixes=COMMON,NEON +; RUN: llc -mtriple=aarch64 -mattr=+neon,+sve < %s | FileCheck %s --check-prefixes=COMMON,SVE + +define <2 x i64> @movi_1_v2i64() { +; NEON-LABEL: movi_1_v2i64: +; NEON: // %bb.0: +; NEON-NEXT: mov w8, #1 // =0x1 +; NEON-NEXT: dup v0.2d, x8 +; NEON-NEXT: ret +; +; SVE-LABEL: movi_1_v2i64: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #1 // =0x1 +; SVE-NEXT: ret + ret <2 x i64> splat (i64 1) +} + +define <2 x i64> @movi_127_v2i64() { +; NEON-LABEL: movi_127_v2i64: +; NEON: // %bb.0: +; NEON-NEXT: mov w8, #127 // =0x7f +; NEON-NEXT: dup v0.2d, x8 +; NEON-NEXT: ret +; +; SVE-LABEL: movi_127_v2i64: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #127 // =0x7f +; SVE-NEXT: ret + ret <2 x i64> splat (i64 127) +} + +define <2 x i64> @movi_m128_v2i64() { +; NEON-LABEL: movi_m128_v2i64: +; NEON: // %bb.0: +; NEON-NEXT: mov x8, #-128 // =0xffffffffffffff80 +; NEON-NEXT: dup v0.2d, x8 +; NEON-NEXT: ret +; +; SVE-LABEL: movi_m128_v2i64: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #-128 // =0xffffffffffffff80 +; SVE-NEXT: ret + ret <2 x i64> splat (i64 -128) +} + +define <2 x i64> @movi_256_v2i64() { +; NEON-LABEL: movi_256_v2i64: +; NEON: // %bb.0: +; NEON-NEXT: mov w8, #256 // =0x100 +; NEON-NEXT: dup v0.2d, x8 +; NEON-NEXT: ret +; +; SVE-LABEL: movi_256_v2i64: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #256 // =0x100 +; SVE-NEXT: ret + ret <2 x i64> splat (i64 256) +} + +define <2 x i64> @movi_32512_v2i64() { +; NEON-LABEL: movi_32512_v2i64: +; NEON: // %bb.0: +; NEON-NEXT: mov w8, #32512 // =0x7f00 +; NEON-NEXT: dup v0.2d, x8 +; NEON-NEXT: ret +; +; SVE-LABEL: movi_32512_v2i64: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #32512 // =0x7f00 +; SVE-NEXT: ret + ret <2 x i64> splat (i64 32512) +} + +define <2 x i64> @movi_m32768_v2i64() { +; NEON-LABEL: movi_m32768_v2i64: +; NEON: // %bb.0: +; NEON-NEXT: mov x8, #-32768 // =0xffffffffffff8000 +; NEON-NEXT: dup v0.2d, x8 +; NEON-NEXT: ret +; +; SVE-LABEL: movi_m32768_v2i64: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #-32768 // =0xffffffffffff8000 +; SVE-NEXT: ret + ret <2 x i64> splat (i64 -32768) +} + +; Special cases where the destination vector does not have 64-bit elements + +define <4 x i32> @movi_v4i32_1() { +; NEON-LABEL: movi_v4i32_1: +; NEON: // %bb.0: +; NEON-NEXT: adrp x8, .LCPI6_0 +; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI6_0] +; NEON-NEXT: ret +; +; SVE-LABEL: movi_v4i32_1: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #127 // =0x7f +; SVE-NEXT: ret + ret <4 x i32> +} + +define <4 x i32> @movi_v4i32_2() { +; NEON-LABEL: movi_v4i32_2: +; NEON: // %bb.0: +; NEON-NEXT: adrp x8, .LCPI7_0 +; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI7_0] +; NEON-NEXT: ret +; +; SVE-LABEL: movi_v4i32_2: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #32512 // =0x7f00 +; SVE-NEXT: ret + ret <4 x i32> +} + +define <8 x i16> @movi_v8i16_1() { +; NEON-LABEL: movi_v8i16_1: +; NEON: // %bb.0: +; NEON-NEXT: adrp x8, .LCPI8_0 +; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI8_0] +; NEON-NEXT: ret +; +; SVE-LABEL: movi_v8i16_1: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #127 // =0x7f +; SVE-NEXT: ret + ret <8 x i16> +} + +define <8 x i16> @movi_v8i16_2() { +; NEON-LABEL: movi_v8i16_2: +; NEON: // %bb.0: +; NEON-NEXT: adrp x8, .LCPI9_0 +; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI9_0] +; NEON-NEXT: ret +; +; SVE-LABEL: movi_v8i16_2: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #32512 // =0x7f00 +; SVE-NEXT: ret + ret <8 x i16> +} + +define <16 x i8> @movi_v16i8_1() { +; NEON-LABEL: movi_v16i8_1: +; NEON: // %bb.0: +; NEON-NEXT: adrp x8, .LCPI10_0 +; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI10_0] +; NEON-NEXT: ret +; +; SVE-LABEL: movi_v16i8_1: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #127 // =0x7f +; SVE-NEXT: ret + ret <16 x i8> +} + +define <16 x i8> @movi_v16i8_2() { +; NEON-LABEL: movi_v16i8_2: +; NEON: // %bb.0: +; NEON-NEXT: adrp x8, .LCPI11_0 +; NEON-NEXT: ldr q0, [x8, :lo12:.LCPI11_0] +; NEON-NEXT: ret +; +; SVE-LABEL: movi_v16i8_2: +; SVE: // %bb.0: +; SVE-NEXT: mov z0.d, #32512 // =0x7f00 +; SVE-NEXT: ret + ret <16 x i8> +} + +; Negative cases + +define <2 x i64> @movi_128_v2i64() { +; COMMON-LABEL: movi_128_v2i64: +; COMMON: // %bb.0: +; COMMON-NEXT: mov w8, #128 // =0x80 +; COMMON-NEXT: dup v0.2d, x8 +; COMMON-NEXT: ret + ret <2 x i64> splat (i64 128) +} + +define <2 x i64> @movi_m127_v2i64() { +; COMMON-LABEL: movi_m127_v2i64: +; COMMON: // %bb.0: +; COMMON-NEXT: mov x8, #-129 // =0xffffffffffffff7f +; COMMON-NEXT: dup v0.2d, x8 +; COMMON-NEXT: ret + ret <2 x i64> splat (i64 -129) +} + +define <2 x i64> @movi_32513_v2i64() { +; COMMON-LABEL: movi_32513_v2i64: +; COMMON: // %bb.0: +; COMMON-NEXT: mov w8, #32513 // =0x7f01 +; COMMON-NEXT: dup v0.2d, x8 +; COMMON-NEXT: ret + ret <2 x i64> splat (i64 32513) +} + +define <2 x i64> @movi_m32769_v2i64() { +; COMMON-LABEL: movi_m32769_v2i64: +; COMMON: // %bb.0: +; COMMON-NEXT: mov x8, #-32769 // =0xffffffffffff7fff +; COMMON-NEXT: dup v0.2d, x8 +; COMMON-NEXT: ret + ret <2 x i64> splat (i64 -32769) +} + +define <2 x i64> @movi_257_v2i64() { +; COMMON-LABEL: movi_257_v2i64: +; COMMON: // %bb.0: +; COMMON-NEXT: mov w8, #257 // =0x101 +; COMMON-NEXT: dup v0.2d, x8 +; COMMON-NEXT: ret + ret <2 x i64> splat (i64 257) +} + +define <4 x i32> @movi_v4i32_3() { +; COMMON-LABEL: movi_v4i32_3: +; COMMON: // %bb.0: +; COMMON-NEXT: adrp x8, .LCPI17_0 +; COMMON-NEXT: ldr q0, [x8, :lo12:.LCPI17_0] +; COMMON-NEXT: ret + ret <4 x i32> +} + +define <16 x i8> @movi_v16i8_3() { +; COMMON-LABEL: movi_v16i8_3: +; COMMON: // %bb.0: +; COMMON-NEXT: adrp x8, .LCPI18_0 +; COMMON-NEXT: ldr q0, [x8, :lo12:.LCPI18_0] +; COMMON-NEXT: ret + ret <16 x i8> +} diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll index c38516fc57bbd..428750740fc56 100644 --- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll +++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll @@ -1255,3 +1255,199 @@ entry: %partial.reduce = tail call <2 x i64> @llvm.vector.partial.reduce.add(<2 x i64> %acc, <8 x i64> %input.wide) ret <2 x i64> %partial.reduce } + +define <4 x i32> @partial_reduce_shl_sext_const_rhs6(<16 x i8> %l, <4 x i32> %part) { +; CHECK-NODOT-LABEL: partial_reduce_shl_sext_const_rhs6: +; CHECK-NODOT: // %bb.0: +; CHECK-NODOT-NEXT: sshll v2.8h, v0.8b, #0 +; CHECK-NODOT-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-NODOT-NEXT: sshll v3.4s, v0.4h, #6 +; CHECK-NODOT-NEXT: sshll2 v4.4s, v2.8h, #6 +; CHECK-NODOT-NEXT: sshll v2.4s, v2.4h, #6 +; CHECK-NODOT-NEXT: sshll2 v0.4s, v0.8h, #6 +; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-NODOT-NEXT: add v2.4s, v4.4s, v3.4s +; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-NODOT-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-NODOT-NEXT: ret +; +; CHECK-DOT-LABEL: partial_reduce_shl_sext_const_rhs6: +; CHECK-DOT: // %bb.0: +; CHECK-DOT-NEXT: movi v2.16b, #64 +; CHECK-DOT-NEXT: sdot v1.4s, v0.16b, v2.16b +; CHECK-DOT-NEXT: mov v0.16b, v1.16b +; CHECK-DOT-NEXT: ret +; +; CHECK-DOT-I8MM-LABEL: partial_reduce_shl_sext_const_rhs6: +; CHECK-DOT-I8MM: // %bb.0: +; CHECK-DOT-I8MM-NEXT: movi v2.16b, #64 +; CHECK-DOT-I8MM-NEXT: sdot v1.4s, v0.16b, v2.16b +; CHECK-DOT-I8MM-NEXT: mov v0.16b, v1.16b +; CHECK-DOT-I8MM-NEXT: ret + %ext = sext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 6) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_sext_const_rhs7(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs7: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: sshll v2.8h, v0.8b, #0 +; CHECK-COMMON-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-COMMON-NEXT: sshll v3.4s, v0.4h, #7 +; CHECK-COMMON-NEXT: sshll2 v4.4s, v2.8h, #7 +; CHECK-COMMON-NEXT: sshll v2.4s, v2.4h, #7 +; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #7 +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-COMMON-NEXT: ret + %ext = sext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 7) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_sext_const_rhs8(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs8: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: sshll v2.8h, v0.8b, #0 +; CHECK-COMMON-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-COMMON-NEXT: sshll v3.4s, v0.4h, #8 +; CHECK-COMMON-NEXT: sshll2 v4.4s, v2.8h, #8 +; CHECK-COMMON-NEXT: sshll v2.4s, v2.4h, #8 +; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #8 +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-COMMON-NEXT: ret + %ext = sext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 8) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_sext_const_rhs_9(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs_9: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: ret + %ext = sext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 32) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_sext_non_const_rhs(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_sext_non_const_rhs: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: sshll v2.8h, v0.8b, #0 +; CHECK-COMMON-NEXT: sshll2 v0.8h, v0.16b, #0 +; CHECK-COMMON-NEXT: sshll v3.4s, v2.4h, #0 +; CHECK-COMMON-NEXT: sshll2 v2.4s, v2.8h, #0 +; CHECK-COMMON-NEXT: sshll v4.4s, v0.4h, #0 +; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #0 +; CHECK-COMMON-NEXT: ushl v4.4s, v4.4s, v4.4s +; CHECK-COMMON-NEXT: ushl v2.4s, v2.4s, v2.4s +; CHECK-COMMON-NEXT: ushl v3.4s, v3.4s, v3.4s +; CHECK-COMMON-NEXT: ushl v0.4s, v0.4s, v0.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v3.4s +; CHECK-COMMON-NEXT: add v2.4s, v2.4s, v4.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-COMMON-NEXT: ret + %ext = sext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, %ext + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_zext_const_rhs6(<16 x i8> %l, <4 x i32> %part) { +; CHECK-NODOT-LABEL: partial_reduce_shl_zext_const_rhs6: +; CHECK-NODOT: // %bb.0: +; CHECK-NODOT-NEXT: ushll v2.8h, v0.8b, #0 +; CHECK-NODOT-NEXT: ushll2 v0.8h, v0.16b, #0 +; CHECK-NODOT-NEXT: ushll v3.4s, v0.4h, #6 +; CHECK-NODOT-NEXT: ushll2 v4.4s, v2.8h, #6 +; CHECK-NODOT-NEXT: ushll v2.4s, v2.4h, #6 +; CHECK-NODOT-NEXT: ushll2 v0.4s, v0.8h, #6 +; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-NODOT-NEXT: add v2.4s, v4.4s, v3.4s +; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-NODOT-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-NODOT-NEXT: ret +; +; CHECK-DOT-LABEL: partial_reduce_shl_zext_const_rhs6: +; CHECK-DOT: // %bb.0: +; CHECK-DOT-NEXT: movi v2.16b, #64 +; CHECK-DOT-NEXT: udot v1.4s, v0.16b, v2.16b +; CHECK-DOT-NEXT: mov v0.16b, v1.16b +; CHECK-DOT-NEXT: ret +; +; CHECK-DOT-I8MM-LABEL: partial_reduce_shl_zext_const_rhs6: +; CHECK-DOT-I8MM: // %bb.0: +; CHECK-DOT-I8MM-NEXT: movi v2.16b, #64 +; CHECK-DOT-I8MM-NEXT: udot v1.4s, v0.16b, v2.16b +; CHECK-DOT-I8MM-NEXT: mov v0.16b, v1.16b +; CHECK-DOT-I8MM-NEXT: ret + %ext = zext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 6) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_zext_const_rhs8(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_zext_const_rhs8: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: ushll v2.8h, v0.8b, #0 +; CHECK-COMMON-NEXT: ushll2 v0.8h, v0.16b, #0 +; CHECK-COMMON-NEXT: ushll v3.4s, v0.4h, #8 +; CHECK-COMMON-NEXT: ushll2 v4.4s, v2.8h, #8 +; CHECK-COMMON-NEXT: ushll v2.4s, v2.4h, #8 +; CHECK-COMMON-NEXT: ushll2 v0.4s, v0.8h, #8 +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-COMMON-NEXT: ret + %ext = zext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 8) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_zext_const_rhs_9(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_zext_const_rhs_9: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: ret + %ext = zext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, splat (i32 32) + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} + +define <4 x i32> @partial_reduce_shl_zext_non_const_rhs(<16 x i8> %l, <4 x i32> %part) { +; CHECK-COMMON-LABEL: partial_reduce_shl_zext_non_const_rhs: +; CHECK-COMMON: // %bb.0: +; CHECK-COMMON-NEXT: ushll v2.8h, v0.8b, #0 +; CHECK-COMMON-NEXT: ushll2 v0.8h, v0.16b, #0 +; CHECK-COMMON-NEXT: ushll v3.4s, v2.4h, #0 +; CHECK-COMMON-NEXT: ushll2 v2.4s, v2.8h, #0 +; CHECK-COMMON-NEXT: ushll v4.4s, v0.4h, #0 +; CHECK-COMMON-NEXT: ushll2 v0.4s, v0.8h, #0 +; CHECK-COMMON-NEXT: ushl v4.4s, v4.4s, v4.4s +; CHECK-COMMON-NEXT: ushl v2.4s, v2.4s, v2.4s +; CHECK-COMMON-NEXT: ushl v3.4s, v3.4s, v3.4s +; CHECK-COMMON-NEXT: ushl v0.4s, v0.4s, v0.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v3.4s +; CHECK-COMMON-NEXT: add v2.4s, v2.4s, v4.4s +; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s +; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s +; CHECK-COMMON-NEXT: ret + %ext = zext <16 x i8> %l to <16 x i32> + %shift = shl nsw <16 x i32> %ext, %ext + %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) + ret <4 x i32> %red +} diff --git a/llvm/test/CodeGen/AArch64/pr161420.ll b/llvm/test/CodeGen/AArch64/pr161420.ll new file mode 100644 index 0000000000000..dcdf0ed1e7a35 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/pr161420.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32" +target triple = "arm64-apple-macosx15.0.0" + +; From: https://github.com/llvm/llvm-project/issues/161420. This test checks that +; two `luti4` instructions are emitted. +define void @pluto(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) #0 { +; CHECK-LABEL: pluto: +; CHECK: ; %bb.0: ; %bb +; CHECK-NEXT: mov w8, #0 ; =0x0 +; CHECK-NEXT: ldr zt0, [x1] +; CHECK-NEXT: ldr z4, [x3] +; CHECK-NEXT: ptrue pn8.h +; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0] +; CHECK-NEXT: luti4 { z16.h - z19.h }, zt0, z4[0] +; CHECK-NEXT: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z16.h - z19.h } +; CHECK-NEXT: ldr zt0, [x2] +; CHECK-NEXT: luti4 { z4.h - z7.h }, zt0, z4[0] +; CHECK-NEXT: fmla za.h[w8, 2, vgx4], { z0.h - z3.h }, { z4.h - z7.h } +; CHECK-NEXT: ret +bb: + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg1) + %load = load , ptr %arg3, align 16 + %call = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16() + %call4 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %call, ptr %arg) + %extractvalue = extractvalue { , , , } %call4, 0 + %extractvalue5 = extractvalue { , , , } %call4, 1 + %extractvalue6 = extractvalue { , , , } %call4, 2 + %extractvalue7 = extractvalue { , , , } %call4, 3 + %call8 = tail call { , , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, %load, i32 0) + %extractvalue9 = extractvalue { , , , } %call8, 0 + %extractvalue10 = extractvalue { , , , } %call8, 1 + %extractvalue11 = extractvalue { , , , } %call8, 2 + %extractvalue12 = extractvalue { , , , } %call8, 3 + tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 0, %extractvalue, %extractvalue5, %extractvalue6, %extractvalue7, %extractvalue9, %extractvalue10, %extractvalue11, %extractvalue12) + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg2) + %call13 = tail call { , , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, %load, i32 0) + %extractvalue14 = extractvalue { , , , } %call13, 0 + %extractvalue15 = extractvalue { , , , } %call13, 1 + %extractvalue16 = extractvalue { , , , } %call13, 2 + %extractvalue17 = extractvalue { , , , } %call13, 3 + tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 2, %extractvalue, %extractvalue5, %extractvalue6, %extractvalue7, %extractvalue14, %extractvalue15, %extractvalue16, %extractvalue17) + ret void +} + +declare void @llvm.aarch64.sme.ldr.zt(i32, ptr) +declare target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16() +declare { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount"), ptr) +declare { , , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 immarg, , i32 immarg) +declare void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32, , , , , , , , ) + +attributes #0 = { mustprogress nofree noinline norecurse nosync nounwind ssp willreturn uwtable(sync) "aarch64_inout_za" "aarch64_inout_zt0" "aarch64_pstate_sm_enabled" "target-cpu"="apple-m1" "target-features"="+fp-armv8,+lse,+neon,+sme,+sme-f16f16,+sme2,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a" } diff --git a/llvm/test/CodeGen/AArch64/seh-minimal-prologue-epilogue.ll b/llvm/test/CodeGen/AArch64/seh-minimal-prologue-epilogue.ll new file mode 100644 index 0000000000000..cc71b8b3065ad --- /dev/null +++ b/llvm/test/CodeGen/AArch64/seh-minimal-prologue-epilogue.ll @@ -0,0 +1,53 @@ +; RUN: llc -mtriple=aarch64-windows %s -o - | FileCheck %s + +; This test verifies that functions requiring Windows CFI that have minimal +; or no prologue instructions still emit proper SEH directives, specifically +; ensuring .seh_endprologue is emitted before .seh_startepilogue. +; +; This reproduces the issue where Swift async functions with swifttailcc +; calling convention would fail with: +; "error: starting epilogue (.seh_startepilogue) before prologue has ended (.seh_endprologue)" + +; Test 1: Swift-style tail call function with minimal prologue +define swifttailcc void @test_swifttailcc_minimal(ptr %async_ctx, ptr %arg1, ptr %arg2) { +; CHECK-LABEL: test_swifttailcc_minimal: +; CHECK-NOT: .seh_proc test_swifttailcc_minimal +; CHECK-NOT: .seh_endprologue +; CHECK-NOT: .seh_startepilogue +; CHECK-NOT: .seh_endepilogue +; CHECK-NOT: .seh_endproc +entry: + %ptr1 = getelementptr inbounds i8, ptr %async_ctx, i64 16 + %ptr2 = getelementptr inbounds i8, ptr %async_ctx, i64 24 + store ptr %arg1, ptr %ptr1, align 8 + store ptr %arg2, ptr %ptr2, align 8 + musttail call swifttailcc void @external_swift_function(ptr %async_ctx, ptr %arg1) + ret void +} + +; Test 2: Function similar to the original failing case +define linkonce_odr hidden swifttailcc void @test_linkonce_swifttailcc(ptr swiftasync %async_ctx, ptr %arg1, ptr noalias dereferenceable(40) %arg2, ptr %arg3, i64 %value, ptr %arg4, ptr %arg5, ptr %arg6, i1 %flag, ptr %arg7, ptr noalias dereferenceable(40) %arg8) { +; CHECK-LABEL: test_linkonce_swifttailcc: +; CHECK-NEXT: .seh_proc +; CHECK: .seh_endprologue +; CHECK: .seh_startepilogue +; CHECK: .seh_endepilogue +; CHECK: .seh_endproc +entry: + %frame_ptr = getelementptr inbounds nuw i8, ptr %async_ctx, i64 16 + %ctx1 = getelementptr inbounds nuw i8, ptr %async_ctx, i64 400 + %ctx2 = getelementptr inbounds nuw i8, ptr %async_ctx, i64 1168 + %spill1 = getelementptr inbounds nuw i8, ptr %async_ctx, i64 2392 + store ptr %arg8, ptr %spill1, align 8 + %spill2 = getelementptr inbounds nuw i8, ptr %async_ctx, i64 2384 + store ptr %arg7, ptr %spill2, align 8 + %spill3 = getelementptr inbounds nuw i8, ptr %async_ctx, i64 2225 + store i1 %flag, ptr %spill3, align 1 + %spill4 = getelementptr inbounds nuw i8, ptr %async_ctx, i64 2376 + store ptr %arg6, ptr %spill4, align 8 + musttail call swifttailcc void @external_swift_continuation(ptr swiftasync %async_ctx, i64 0, i64 0) + ret void +} + +declare swifttailcc void @external_swift_function(ptr, ptr) +declare swifttailcc void @external_swift_continuation(ptr swiftasync, i64, i64) diff --git a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll index a0a14f2ffae3f..e3007a3723484 100644 --- a/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll +++ b/llvm/test/CodeGen/AArch64/sme-agnostic-za.ll @@ -169,8 +169,6 @@ define i64 @streaming_agnostic_caller_nonstreaming_private_za_callee(i64 %v) nou ; CHECK-NEWLOWERING-NEXT: smstop sm ; CHECK-NEWLOWERING-NEXT: mov x0, x8 ; CHECK-NEWLOWERING-NEXT: bl private_za_decl -; CHECK-NEWLOWERING-NEXT: smstart sm -; CHECK-NEWLOWERING-NEXT: smstop sm ; CHECK-NEWLOWERING-NEXT: bl private_za_decl ; CHECK-NEWLOWERING-NEXT: smstart sm ; CHECK-NEWLOWERING-NEXT: mov x8, x0 @@ -268,19 +266,11 @@ define i64 @streaming_compatible_agnostic_caller_nonstreaming_private_za_callee( ; CHECK-NEWLOWERING-NEXT: .LBB5_2: ; CHECK-NEWLOWERING-NEXT: mov x0, x8 ; CHECK-NEWLOWERING-NEXT: bl private_za_decl +; CHECK-NEWLOWERING-NEXT: bl private_za_decl ; CHECK-NEWLOWERING-NEXT: tbz w20, #0, .LBB5_4 ; CHECK-NEWLOWERING-NEXT: // %bb.3: ; CHECK-NEWLOWERING-NEXT: smstart sm ; CHECK-NEWLOWERING-NEXT: .LBB5_4: -; CHECK-NEWLOWERING-NEXT: tbz w20, #0, .LBB5_6 -; CHECK-NEWLOWERING-NEXT: // %bb.5: -; CHECK-NEWLOWERING-NEXT: smstop sm -; CHECK-NEWLOWERING-NEXT: .LBB5_6: -; CHECK-NEWLOWERING-NEXT: bl private_za_decl -; CHECK-NEWLOWERING-NEXT: tbz w20, #0, .LBB5_8 -; CHECK-NEWLOWERING-NEXT: // %bb.7: -; CHECK-NEWLOWERING-NEXT: smstart sm -; CHECK-NEWLOWERING-NEXT: .LBB5_8: ; CHECK-NEWLOWERING-NEXT: mov x8, x0 ; CHECK-NEWLOWERING-NEXT: mov x0, x19 ; CHECK-NEWLOWERING-NEXT: bl __arm_sme_restore diff --git a/llvm/test/CodeGen/AArch64/sme-peephole-opts.ll b/llvm/test/CodeGen/AArch64/sme-peephole-opts.ll index cab094e638cdf..ced0d41c22dab 100644 --- a/llvm/test/CodeGen/AArch64/sme-peephole-opts.ll +++ b/llvm/test/CodeGen/AArch64/sme-peephole-opts.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-streaming-hazard-size=0 -mattr=+sve,+sme2 < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-new-sme-abi -aarch64-streaming-hazard-size=0 -mattr=+sve,+sme2 < %s | FileCheck %s declare void @callee() declare void @callee_sm() "aarch64_pstate_sm_enabled" @@ -563,3 +563,128 @@ define void @test13(ptr %ptr) nounwind "aarch64_pstate_sm_enabled" { store %res1, ptr %ptr ret void } + +; normal caller -> streaming callees (with ZA state) +define void @test14(ptr %callee) nounwind "aarch64_inout_za" { +; CHECK-LABEL: test14: +; CHECK: // %bb.0: +; CHECK-NEXT: stp d15, d14, [sp, #-96]! // 16-byte Folded Spill +; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: str x19, [sp, #80] // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: sub x10, x29, #80 +; CHECK-NEXT: stp x9, x8, [x29, #-80] +; CHECK-NEXT: msr TPIDR2_EL0, x10 +; CHECK-NEXT: smstart sm +; CHECK-NEXT: bl callee_sm +; CHECK-NEXT: bl callee_sm +; CHECK-NEXT: smstop sm +; CHECK-NEXT: smstart za +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: sub x0, x29, #80 +; CHECK-NEXT: cbnz x8, .LBB15_2 +; CHECK-NEXT: // %bb.1: +; CHECK-NEXT: bl __arm_tpidr2_restore +; CHECK-NEXT: .LBB15_2: +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: sub sp, x29, #64 +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: ldr x19, [sp, #80] // 8-byte Folded Reload +; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp d15, d14, [sp], #96 // 16-byte Folded Reload +; CHECK-NEXT: ret + call void @callee_sm() + call void @callee_sm() + ret void +} + +; normal caller -> streaming callees (with ZA agnostic state) +define void @test15(ptr %callee) nounwind "aarch64_za_state_agnostic" { +; CHECK-LABEL: test15: +; CHECK: // %bb.0: +; CHECK-NEXT: stp d15, d14, [sp, #-96]! // 16-byte Folded Spill +; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-NEXT: bl __arm_sme_state_size +; CHECK-NEXT: sub sp, sp, x0 +; CHECK-NEXT: mov x20, sp +; CHECK-NEXT: mov x0, x20 +; CHECK-NEXT: bl __arm_sme_save +; CHECK-NEXT: smstart sm +; CHECK-NEXT: bl callee_sm +; CHECK-NEXT: bl callee_sm +; CHECK-NEXT: smstop sm +; CHECK-NEXT: mov x0, x20 +; CHECK-NEXT: bl __arm_sme_restore +; CHECK-NEXT: sub sp, x29, #64 +; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp d15, d14, [sp], #96 // 16-byte Folded Reload +; CHECK-NEXT: ret + call void @callee_sm() + call void @callee_sm() + ret void +} + +; locally streaming caller -> normal callees (with ZA state) +define void @test16(ptr %callee) nounwind "aarch64_pstate_sm_body" "aarch64_new_za" { +; CHECK-LABEL: test16: +; CHECK: // %bb.0: +; CHECK-NEXT: stp d15, d14, [sp, #-96]! // 16-byte Folded Spill +; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: add x29, sp, #64 +; CHECK-NEXT: str x19, [sp, #80] // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: stp x9, x8, [x29, #-80] +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: cbz x8, .LBB17_2 +; CHECK-NEXT: // %bb.1: +; CHECK-NEXT: bl __arm_tpidr2_save +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: zero {za} +; CHECK-NEXT: .LBB17_2: +; CHECK-NEXT: smstart za +; CHECK-NEXT: smstart sm +; CHECK-NEXT: sub x8, x29, #80 +; CHECK-NEXT: msr TPIDR2_EL0, x8 +; CHECK-NEXT: smstop sm +; CHECK-NEXT: bl callee +; CHECK-NEXT: bl callee +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: smstop za +; CHECK-NEXT: sub sp, x29, #64 +; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: ldr x19, [sp, #80] // 8-byte Folded Reload +; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp d15, d14, [sp], #96 // 16-byte Folded Reload +; CHECK-NEXT: ret + call void @callee() + call void @callee() + ret void +} diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll index fc43c714d69b3..b6dee97ea2962 100644 --- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll +++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-SDAG ; A simple EH test case that corresponds to the following C++ source: ; @@ -87,6 +88,90 @@ define void @za_with_raii(i1 %fail) "aarch64_inout_za" personality ptr @__gxx_pe ; CHECK-NEXT: mov x0, x19 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl _Unwind_Resume +; +; CHECK-SDAG-LABEL: za_with_raii: +; CHECK-SDAG: .Lfunc_begin0: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception0 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: tbnz w0, #0, .LBB0_2 +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB0_2: // %throw_exception +; CHECK-SDAG-NEXT: sub x20, x29, #16 +; CHECK-SDAG-NEXT: mov w0, #8 // =0x8 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl __cxa_allocate_exception +; CHECK-SDAG-NEXT: mov x8, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x9, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x9, .LBB0_4 +; CHECK-SDAG-NEXT: // %bb.3: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_4: // %throw_exception +; CHECK-SDAG-NEXT: adrp x9, .L.str +; CHECK-SDAG-NEXT: add x9, x9, :lo12:.L.str +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: str x9, [x8] +; CHECK-SDAG-NEXT: .Ltmp0: // EH_LABEL +; CHECK-SDAG-NEXT: adrp x1, :got:typeinfo_for_char_const_ptr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: mov x0, x8 +; CHECK-SDAG-NEXT: ldr x1, [x1, :got_lo12:typeinfo_for_char_const_ptr] +; CHECK-SDAG-NEXT: mov x2, xzr +; CHECK-SDAG-NEXT: bl __cxa_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_6 +; CHECK-SDAG-NEXT: // %bb.5: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_6: // %throw_exception +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp1: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.7: // %throw_fail +; CHECK-SDAG-NEXT: .LBB0_8: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp2: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_10 +; CHECK-SDAG-NEXT: // %bb.9: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_10: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_12 +; CHECK-SDAG-NEXT: // %bb.11: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_12: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr br i1 %fail, label %throw_exception, label %return_normally throw_exception: @@ -124,7 +209,7 @@ throw_fail: ; } ; shared_za_call(); ; } -define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { +define void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: try_catch: ; CHECK: .Lfunc_begin1: ; CHECK-NEXT: .cfi_startproc @@ -142,11 +227,11 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msub x9, x8, x8, x9 ; CHECK-NEXT: mov sp, x9 ; CHECK-NEXT: stp x9, x8, [x29, #-16] -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: sub x8, x29, #16 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl may_throw -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: .LBB1_1: // %after_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -160,7 +245,7 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: b shared_za_call ; CHECK-NEXT: .LBB1_4: // %catch -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -175,6 +260,78 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: b .LBB1_1 +; +; CHECK-SDAG-LABEL: try_catch: +; CHECK-SDAG: .Lfunc_begin1: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception1 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: .Ltmp3: // EH_LABEL +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_2 +; CHECK-SDAG-NEXT: // %bb.1: +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_2: +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp4: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB1_3: // %after_catch +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB1_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp5: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB1_3 invoke void @may_throw() to label %after_catch unwind label %catch @@ -235,16 +392,16 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: zero {za} ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: smstart za -; CHECK-NEXT: .Ltmp6: +; CHECK-NEXT: .Ltmp6: // EH_LABEL ; CHECK-NEXT: bl shared_za_call -; CHECK-NEXT: .Ltmp7: +; CHECK-NEXT: .Ltmp7: // EH_LABEL ; CHECK-NEXT: .LBB2_3: // %exit ; CHECK-NEXT: smstop za ; CHECK-NEXT: mov sp, x29 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB2_4: // %catch -; CHECK-NEXT: .Ltmp8: +; CHECK-NEXT: .Ltmp8: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -260,6 +417,78 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: msr TPIDR2_EL0, xzr ; CHECK-NEXT: b .LBB2_3 +; +; CHECK-SDAG-LABEL: try_catch_shared_za_callee: +; CHECK-SDAG: .Lfunc_begin2: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception2 +; CHECK-SDAG-NEXT: // %bb.0: // %prelude +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: cbz x8, .LBB2_2 +; CHECK-SDAG-NEXT: // %bb.1: // %save.za +; CHECK-SDAG-NEXT: bl __arm_tpidr2_save +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .LBB2_2: +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: zero {za} +; CHECK-SDAG-NEXT: .Ltmp6: // EH_LABEL +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: .Ltmp7: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB2_3: // %exit +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB2_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp8: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl noexcept_shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB2_3 invoke void @shared_za_call() #4 to label %exit unwind label %catch catch: @@ -275,6 +504,234 @@ exit: ret void } +; A simple ZT0 exception example that corresponds to: +; +; struct ZT0Resource { +; ~ZT0Resource() __arm_inout("zt0") { +; shared_zt0_call(); // simulate cleanup in destructor +; } +; }; +; +; void za_with_raii() __arm_inout("zt0") { +; ZT0Resource r; +; may_throw(); +; } +; +; This code may require reloading ZT0 in the cleanup for ~ZT0Resource(). +; +; FIXME: Codegen with `-aarch64-new-sme-abi` is broken with ZT0 (as it is not implemented). +define void @try_catch_shared_zt0_callee() "aarch64_inout_zt0" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_shared_zt0_callee: +; CHECK: .Lfunc_begin3: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: stp x9, x8, [x29, #-80] +; CHECK-NEXT: .Ltmp9: // EH_LABEL +; CHECK-NEXT: sub x19, x29, #64 +; CHECK-NEXT: str zt0, [x19] +; CHECK-NEXT: smstop za +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x19] +; CHECK-NEXT: .Ltmp10: // EH_LABEL +; CHECK-NEXT: // %bb.1: // %return_normally +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-NEXT: .Ltmp11: // EH_LABEL +; CHECK-NEXT: sub x20, x29, #64 +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: smstart za +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: sub x0, x29, #80 +; CHECK-NEXT: cbnz x8, .LBB3_4 +; CHECK-NEXT: // %bb.3: // %unwind_dtors +; CHECK-NEXT: bl __arm_tpidr2_restore +; CHECK-NEXT: .LBB3_4: // %unwind_dtors +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: bl shared_zt0_call +; CHECK-NEXT: str zt0, [x20] +; CHECK-NEXT: smstop za +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl _Unwind_Resume +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x20] +; +; CHECK-SDAG-LABEL: try_catch_shared_zt0_callee: +; CHECK-SDAG: .Lfunc_begin3: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: sub sp, sp, #96 +; CHECK-SDAG-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -32 +; CHECK-SDAG-NEXT: .Ltmp9: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: str zt0, [x19] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x19] +; CHECK-SDAG-NEXT: .Ltmp10: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: add sp, sp, #96 +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp11: // EH_LABEL +; CHECK-SDAG-NEXT: mov x20, sp +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] +; CHECK-SDAG-NEXT: bl shared_zt0_call +; CHECK-SDAG-NEXT: str zt0, [x20] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] + invoke void @may_throw() + to label %return_normally unwind label %unwind_dtors + +unwind_dtors: + %5 = landingpad { ptr, i32 } + cleanup + tail call void @shared_zt0_call() + resume { ptr, i32 } %5 + +return_normally: + ret void +} + +; This example corresponds to: +; +; __arm_agnostic("sme_za_state") void try_catch_agnostic_za() +; { +; try { +; may_throw(); +; } catch(...) { +; } +; } +; +; In this example we must execute __arm_sme_restore once we enter the catch block +; (before executing __arm_sme_save again, which would invalidate the prior save). +define void @try_catch_agnostic_za() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_agnostic_za: +; CHECK: .Lfunc_begin4: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: bl __arm_sme_state_size +; CHECK-NEXT: sub sp, sp, x0 +; CHECK-NEXT: mov x19, sp +; CHECK-NEXT: .Ltmp12: // EH_LABEL +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_save +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: .Ltmp13: // EH_LABEL +; CHECK-NEXT: .LBB4_1: // %exit +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_restore +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_2: // %catch +; CHECK-NEXT: .Ltmp14: // EH_LABEL +; CHECK-NEXT: bl __cxa_begin_catch +; CHECK-NEXT: bl __cxa_end_catch +; CHECK-NEXT: b .LBB4_1 +; +; CHECK-SDAG-LABEL: try_catch_agnostic_za: +; CHECK-SDAG: .Lfunc_begin4: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: bl __arm_sme_state_size +; CHECK-SDAG-NEXT: sub sp, sp, x0 +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: .Ltmp12: // EH_LABEL +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: .Ltmp13: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB4_1: // %exit +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB4_2: // %catch +; CHECK-SDAG-NEXT: .Ltmp14: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: b .LBB4_1 + invoke void @may_throw() + to label %exit unwind label %catch +catch: + %eh_info = landingpad { ptr, i32 } + catch ptr null + %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0 + tail call ptr @__cxa_begin_catch(ptr %exception_ptr) + tail call void @__cxa_end_catch() + br label %exit + +exit: + ret void +} + declare ptr @__cxa_allocate_exception(i64) declare void @__cxa_throw(ptr, ptr, ptr) declare ptr @__cxa_begin_catch(ptr) @@ -284,3 +741,4 @@ declare i32 @__gxx_personality_v0(...) declare void @may_throw() declare void @shared_za_call() "aarch64_inout_za" declare void @noexcept_shared_za_call() "aarch64_inout_za" +declare void @shared_zt0_call() "aarch64_inout_zt0" diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll index 92d3e1182bf34..d48e0cd4d9a92 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll @@ -48,6 +48,27 @@ define {, , , , , , } %res } +; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd. +define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, %x) { +; CHECK-LABEL: test_multiple_luti4_zt_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr zt0, [x0] +; CHECK-NEXT: luti4 { z4.s - z7.s }, zt0, z0[1] +; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7 +; CHECK-NEXT: ldr zt0, [x1] +; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1] +; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3 +; CHECK-NEXT: ret + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA) + %res1 = call {, , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, %x, i32 1) + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB) + %res2 = call {, , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, %x, i32 1) + + call void (...) @llvm.fake.use({, , , } %res1) + call void (...) @llvm.fake.use({, , , } %res2) + ret void +} + declare {, , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8i16(i32, , i32) declare {, , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4i32(i32, , i32) declare {, , , } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8bf16(i32, , i32) diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll index 778f31194baf4..c1eff8dd1f8a8 100644 --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll @@ -14,4 +14,27 @@ define {, , , , , , } %res } +; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd. +define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, %v0, %v1) #0 { +; CHECK-LABEL: test_multiple_luti4_zt_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr zt0, [x0] +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: luti4 { z4.b - z7.b }, zt0, { z0, z1 } +; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7 +; CHECK-NEXT: ldr zt0, [x1] +; CHECK-NEXT: luti4 { z0.b - z3.b }, zt0, { z0, z1 } +; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3 +; CHECK-NEXT: ret + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA) + %res1 = call {, , , } @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, %v0, %v1) + tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB) + %res2 = call {, , , } @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, %v0, %v1) + + call void (...) @llvm.fake.use({ , , , } %res1) + call void (...) @llvm.fake.use({ , , , } %res2) + ret void +} + attributes #0 = { "target-features"="+sme2,+sme-lutv2"} diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir index bff0cacfd5190..0298168bb47a7 100644 --- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir +++ b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir @@ -983,26 +983,22 @@ body: | ; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4 ; EXPAND-NEXT: {{ $}} - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.3) + ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.2) + ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1) ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.1) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 + ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0) ; ; EXPAND-NEXT: $p8 = IMPLICIT_DEF ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.2) + ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1) ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.1) + ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0) ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.3) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 + ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3 ; If we spill a register above p8, p4 must also be saved, so we can guarantee diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.mir b/llvm/test/CodeGen/AArch64/spillfill-sve.mir index 2b16dd0f29ecc..5569175fb58fc 100644 --- a/llvm/test/CodeGen/AArch64/spillfill-sve.mir +++ b/llvm/test/CodeGen/AArch64/spillfill-sve.mir @@ -39,7 +39,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_ppr ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_ppr ; EXPAND: STR_PXI $p0, $sp, 7 @@ -82,7 +82,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_ppr2 ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_ppr2 ; EXPAND: STR_PXI $p0, $sp, 6 @@ -127,7 +127,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_ppr2 ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_ppr2mul2 ; EXPAND: STR_PXI $p0, $sp, 6 @@ -172,7 +172,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_pnr ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_pnr ; EXPAND: STR_PXI $pn0, $sp, 7 @@ -211,7 +211,7 @@ body: | ; CHECK-LABEL: name: spills_fills_stack_id_virtreg_pnr ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2 - ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '' + ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '' ; EXPAND-LABEL: name: spills_fills_stack_id_virtreg_pnr ; EXPAND: renamable $pn8 = WHILEGE_CXX_B diff --git a/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll new file mode 100644 index 0000000000000..690a39d12e6f1 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll @@ -0,0 +1,824 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 | FileCheck %s +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -pass-remarks-analysis=stack-frame-layout 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK-FRAMELAYOUT + +; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 + +; +; %ppr_local sp+2048+30*vscale (= #15, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding sp+2048+16*vscale +; sp+1024+16*vscale +; %zpr_local sp+1024 +; +; -> sp +define void @zpr_and_ppr_local( %pred, %vector) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: zpr_and_ppr_local: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: add x8, sp, #2048 +; CHECK-NEXT: str p0, [x8, #15, mul vl] +; CHECK-NEXT: add x8, sp, #1024 +; CHECK-NEXT: str z0, [x8] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca + %zpr_local = alloca + store volatile %pred, ptr %ppr_local + store volatile %vector, ptr %zpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 + +; +; -> fp +; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding fp-16*vscale +; fp-1024-16*vscale +; %zpr_local fp-1024-32*vscale (= #-2, mul vl for str/ldr ZPR) +; +; -> sp +define void @zpr_and_ppr_local_fp( %pred, %vector) "aarch64_pstate_sm_compatible" "frame-pointer"="all" { +; CHECK-LABEL: zpr_and_ppr_local_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x8, x29, #1024 +; CHECK-NEXT: str p0, [x29, #-1, mul vl] +; CHECK-NEXT: str z0, [x8, #-2, mul vl] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca + %zpr_local = alloca + store volatile %pred, ptr %ppr_local + store volatile %vector, ptr %zpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024 + +; +; %ppr_local sp+2064+14*vscale (= #7, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding sp+2064 +; sp+1040 +; %fpr_local sp+1032 +; 8 bytes of padding sp+1024 +; +; -> sp +define void @fpr_and_ppr_local( %pred, double %double) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: fpr_and_ppr_local: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: add x8, sp, #2064 +; CHECK-NEXT: str p0, [x8, #7, mul vl] +; CHECK-NEXT: str d0, [sp, #1032] +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca + %fpr_local = alloca double + store volatile %pred, ptr %ppr_local + store volatile double %double, ptr %fpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024 + +; +; -> fp +; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding +; +; %fpr_local sp+1032 +; 8 bytes of padding sp+1024 +; +; -> sp +define void @fpr_and_ppr_local_fp( %pred, double %double) "aarch64_pstate_sm_compatible" "frame-pointer"="all" { +; CHECK-LABEL: fpr_and_ppr_local_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: str p0, [x29, #-1, mul vl] +; CHECK-NEXT: str d0, [sp, #1032] +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca + %fpr_local = alloca double + store volatile %pred, ptr %ppr_local + store volatile double %double, ptr %fpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8 + +; +; %ppr_local sp+2064+30*vscale (= #15, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding +; sp+1040+16*vscale +; sp+1040 +; sp+16 +; %gpr_local sp+8 +; 8 bytes of padding +; -> sp +define void @gpr_and_ppr_local( %pred, i64 %int) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: gpr_and_ppr_local: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2080 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040 +; CHECK-NEXT: add x8, sp, #2064 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: str p0, [x8, #15, mul vl] +; CHECK-NEXT: str x0, [sp, #8] +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed + %ppr_local = alloca + %gpr_local = alloca i64 + store volatile %pred, ptr %ppr_local + store volatile i64 %int, ptr %gpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8 + +; +; -> fp +; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding +; +; +; +; %gpr_local sp+8 +; 8 bytes of padding +; -> sp +define void @gpr_and_ppr_local_fp( %pred, i64 %int) "aarch64_pstate_sm_compatible" "frame-pointer"="all" { +; CHECK-LABEL: gpr_and_ppr_local_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: str p0, [x29, #-1, mul vl] +; CHECK-NEXT: str x0, [sp, #8] +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed + %ppr_local = alloca + %gpr_local = alloca i64 + store volatile %pred, ptr %ppr_local + store volatile i64 %int, ptr %gpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-320 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-320 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2088-320 x vscale], Type: Variable, Align: 8, Size: 8 + +; +; +; %ppr_local sp+2080+286*vscale (addvl #17, addpl #7) +; 14 * vscale bytes of padding sp+2080+272*vscale +; sp+1056+272*vscale +; sp+1056+16*vscale +; %zpr_local sp+1056 +; %fpr_local sp+1048 +; 8 bytes of padding sp+1040 +; sp+16 +; %gpr_local sp+8 +; 8 bytes of padding sp +; -> sp +define void @all_stack_areas( %pred, double %fp) { +; CHECK-LABEL: all_stack_areas: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-17 +; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 2096 + 160 * VG +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1040 +; CHECK-NEXT: add x0, sp, #2080 +; CHECK-NEXT: add x8, sp, #2080 +; CHECK-NEXT: add x1, sp, #1056 +; CHECK-NEXT: addvl x0, x0, #17 +; CHECK-NEXT: add x2, sp, #1048 +; CHECK-NEXT: add x3, sp, #8 +; CHECK-NEXT: addpl x0, x0, #7 +; CHECK-NEXT: str d0, [sp, #1048] +; CHECK-NEXT: str p0, [x8, #143, mul vl] +; CHECK-NEXT: bl foo +; CHECK-NEXT: add sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #17 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca + %zpr_local = alloca + %fpr_local = alloca double + ; // Needed to sort %fpr_local into the FPR region + store double %fp, ptr %fpr_local + ; // Needed to sort %ppr_local into the PPR region + store %pred, ptr %ppr_local + %gpr_local = alloca i64 + call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local) + ret void +} +declare void @foo(ptr, ptr, ptr, ptr) + +; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1064-320 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2096-320 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2104-320 x vscale], Type: Variable, Align: 8, Size: 8 + +; +; -> fp +; fp-32*vscale +; %ppr_local fp-34*vscale (addpl #-17) +; 14 * vscale bytes of padding fp-48*vscale +; fp-1024-48*vscale +; fp-1024-304*vscale +; %zpr_local sp-1024-320*vscale (addvl #-20) +; %fpr_local sp+1048 +; 8 bytes of padding sp+1040 +; sp+16 +; %gpr_local sp+8 +; 8 bytes of padding sp +; -> sp +define void @all_stack_areas_fp( %pred, double %fp) "frame-pointer"="all" { +; CHECK-LABEL: all_stack_areas_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x28, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-17 +; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w28, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1056 +; CHECK-NEXT: sub x1, x29, #1024 +; CHECK-NEXT: addpl x0, x29, #-17 +; CHECK-NEXT: add x2, sp, #1048 +; CHECK-NEXT: addvl x1, x1, #-20 +; CHECK-NEXT: add x3, sp, #8 +; CHECK-NEXT: str d0, [sp, #1048] +; CHECK-NEXT: str p0, [x29, #-17, mul vl] +; CHECK-NEXT: bl foo +; CHECK-NEXT: add sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #17 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x28, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca + %zpr_local = alloca + %fpr_local = alloca double + ; // Needed to sort %fpr_local into the FPR region + store double %fp, ptr %fpr_local + ; // Needed to sort %ppr_local into the PPR region + store %pred, ptr %ppr_local + %gpr_local = alloca i64 + call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local) + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: svecc_call +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2112-288 x vscale], Type: Variable, Align: 16, Size: 1024 + +define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, %P3, i16 %P4) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: svecc_call: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: cntd x9 +; CHECK-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 64 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w26, -16 +; CHECK-NEXT: .cfi_offset w27, -24 +; CHECK-NEXT: .cfi_offset w28, -32 +; CHECK-NEXT: .cfi_offset vg, -48 +; CHECK-NEXT: .cfi_offset w30, -56 +; CHECK-NEXT: .cfi_offset w29, -64 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-16 +; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088 +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: mov x8, x0 +; CHECK-NEXT: bl __arm_sme_state +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: tbz w19, #0, .LBB8_2 +; CHECK-NEXT: // %bb.1: // %entry +; CHECK-NEXT: smstop sm +; CHECK-NEXT: .LBB8_2: // %entry +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: mov w1, #45 // =0x2d +; CHECK-NEXT: mov w2, #37 // =0x25 +; CHECK-NEXT: bl memset +; CHECK-NEXT: tbz w19, #0, .LBB8_4 +; CHECK-NEXT: // %bb.3: // %entry +; CHECK-NEXT: smstart sm +; CHECK-NEXT: .LBB8_4: // %entry +; CHECK-NEXT: mov w0, #22647 // =0x5877 +; CHECK-NEXT: movk w0, #59491, lsl #16 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #16 +; CHECK-NEXT: .cfi_restore z8 +; CHECK-NEXT: .cfi_restore z9 +; CHECK-NEXT: .cfi_restore z10 +; CHECK-NEXT: .cfi_restore z11 +; CHECK-NEXT: .cfi_restore z12 +; CHECK-NEXT: .cfi_restore z13 +; CHECK-NEXT: .cfi_restore z14 +; CHECK-NEXT: .cfi_restore z15 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: .cfi_def_cfa wsp, 64 +; CHECK-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: .cfi_restore w19 +; CHECK-NEXT: .cfi_restore w26 +; CHECK-NEXT: .cfi_restore w27 +; CHECK-NEXT: .cfi_restore w28 +; CHECK-NEXT: .cfi_restore vg +; CHECK-NEXT: .cfi_restore w30 +; CHECK-NEXT: .cfi_restore w29 +; CHECK-NEXT: ret +entry: + tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 + %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37) + ret i32 -396142473 +} +declare ptr @memset(ptr, i32, i32) + +; FIXME: aarch64-split-sve-objects is currently not supported in this function +; as it requires stack reealignment (for the 32-byte aligned alloca). +; GPR CSRs +; +; FPR CSRs +; +; <--- hazard between PPRs and ZPRs here! +; +; -> sp +define void @zpr_and_ppr_local_realignment( %pred, %vector, i64 %gpr) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: zpr_and_ppr_local_realignment: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: sub x9, sp, #1040 +; CHECK-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill +; CHECK-NEXT: add x29, sp, #1024 +; CHECK-NEXT: addvl x9, x9, #-2 +; CHECK-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x8, x29, #1024 +; CHECK-NEXT: str p0, [x8, #-1, mul vl] +; CHECK-NEXT: str z0, [x8, #-2, mul vl] +; CHECK-NEXT: str x0, [sp] +; CHECK-NEXT: sub sp, x29, #1024 +; CHECK-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload +; CHECK-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ret + %ppr_local = alloca + %zpr_local = alloca + %gpr_local = alloca i64, align 32 + store volatile %pred, ptr %ppr_local + store volatile %vector, ptr %zpr_local + store volatile i64 %gpr, ptr %gpr_local + ret void +} + +define void @zpr_and_ppr_local_stack_probing( %pred, %vector, i64 %gpr) +; CHECK-LABEL: zpr_and_ppr_local_stack_probing: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str xzr, [sp] +; CHECK-NEXT: sub sp, sp, #1824 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str xzr, [sp] +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xb0, 0x16, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2864 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: add x8, sp, #2848 +; CHECK-NEXT: str p0, [x8, #15, mul vl] +; CHECK-NEXT: add x8, sp, #1824 +; CHECK-NEXT: str z0, [x8] +; CHECK-NEXT: str x0, [sp] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1824 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + "probe-stack"="inline-asm" "stack-probe-size"="4096" "frame-pointer"="none" "aarch64_pstate_sm_compatible" +{ + %ppr_local = alloca + %zpr_local = alloca + %gpr_local = alloca i64, i64 100, align 8 + store volatile %pred, ptr %ppr_local + store volatile %vector, ptr %zpr_local + store volatile i64 %gpr, ptr %gpr_local + ret void +} diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll index 5f52280935c73..333a8be27f687 100644 --- a/llvm/test/CodeGen/AArch64/stack-hazard.ll +++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=0 | FileCheck %s --check-prefixes=CHECK,CHECK0 ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=64 | FileCheck %s --check-prefixes=CHECK,CHECK64 -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024 +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE define i32 @basic(i32 noundef %num) { ; CHECK-LABEL: basic: @@ -1503,72 +1504,24 @@ define [2 x ] @sve_signature_pred_2xv4i1([2 x } define [2 x ] @sve_signature_pred_2xv4i1_caller([2 x ] %arg1, [2 x ] %arg2) nounwind "aarch64_pstate_sm_compatible" { -; CHECK0-LABEL: sve_signature_pred_2xv4i1_caller: -; CHECK0: // %bb.0: -; CHECK0-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK0-NEXT: mov p5.b, p0.b -; CHECK0-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK0-NEXT: mov p4.b, p1.b -; CHECK0-NEXT: mov p0.b, p2.b -; CHECK0-NEXT: mov p1.b, p3.b -; CHECK0-NEXT: mov p2.b, p5.b -; CHECK0-NEXT: mov p3.b, p4.b -; CHECK0-NEXT: bl sve_signature_pred_2xv4i1 -; CHECK0-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK0-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK0-NEXT: addvl sp, sp, #1 -; CHECK0-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK0-NEXT: ret -; -; CHECK64-LABEL: sve_signature_pred_2xv4i1_caller: -; CHECK64: // %bb.0: -; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill -; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: mov p4.b, p1.b -; CHECK64-NEXT: mov p5.b, p0.b -; CHECK64-NEXT: mov p0.b, p2.b -; CHECK64-NEXT: mov p1.b, p3.b -; CHECK64-NEXT: mov p2.b, p5.b -; CHECK64-NEXT: mov p3.b, p4.b -; CHECK64-NEXT: bl sve_signature_pred_2xv4i1 -; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: addvl sp, sp, #1 -; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload -; CHECK64-NEXT: add sp, sp, #80 -; CHECK64-NEXT: ret -; -; CHECK1024-LABEL: sve_signature_pred_2xv4i1_caller: -; CHECK1024: // %bb.0: -; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill -; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill -; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: mov p4.b, p1.b -; CHECK1024-NEXT: mov p5.b, p0.b -; CHECK1024-NEXT: mov p0.b, p2.b -; CHECK1024-NEXT: mov p1.b, p3.b -; CHECK1024-NEXT: mov p2.b, p5.b -; CHECK1024-NEXT: mov p3.b, p4.b -; CHECK1024-NEXT: bl sve_signature_pred_2xv4i1 -; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: addvl sp, sp, #1 -; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload -; CHECK1024-NEXT: add sp, sp, #1040 -; CHECK1024-NEXT: ret +; CHECK-LABEL: sve_signature_pred_2xv4i1_caller: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p5.b, p0.b +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p4.b, p1.b +; CHECK-NEXT: mov p0.b, p2.b +; CHECK-NEXT: mov p1.b, p3.b +; CHECK-NEXT: mov p2.b, p5.b +; CHECK-NEXT: mov p3.b, p4.b +; CHECK-NEXT: bl sve_signature_pred_2xv4i1 +; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret %res = call [2 x ] @sve_signature_pred_2xv4i1([2 x ] %arg2, [2 x ] %arg1) ret [2 x ] %res } @@ -2113,139 +2066,269 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, %P3, ; CHECK64-NEXT: .cfi_restore w29 ; CHECK64-NEXT: ret ; -; CHECK1024-LABEL: svecc_call: -; CHECK1024: // %bb.0: // %entry -; CHECK1024-NEXT: sub sp, sp, #1088 -; CHECK1024-NEXT: .cfi_def_cfa_offset 1088 -; CHECK1024-NEXT: cntd x9 -; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill -; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill -; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill -; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill -; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill -; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill -; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill -; CHECK1024-NEXT: add x29, sp, #1024 -; CHECK1024-NEXT: .cfi_def_cfa w29, 64 -; CHECK1024-NEXT: .cfi_offset w19, -16 -; CHECK1024-NEXT: .cfi_offset w26, -24 -; CHECK1024-NEXT: .cfi_offset w27, -32 -; CHECK1024-NEXT: .cfi_offset w28, -40 -; CHECK1024-NEXT: .cfi_offset vg, -48 -; CHECK1024-NEXT: .cfi_offset w30, -56 -; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088 -; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: mov x8, x0 -; CHECK1024-NEXT: bl __arm_sme_state -; CHECK1024-NEXT: mov x19, x0 -; CHECK1024-NEXT: //APP -; CHECK1024-NEXT: //NO_APP -; CHECK1024-NEXT: tbz w19, #0, .LBB28_2 -; CHECK1024-NEXT: // %bb.1: // %entry -; CHECK1024-NEXT: smstop sm -; CHECK1024-NEXT: .LBB28_2: // %entry -; CHECK1024-NEXT: mov x0, x8 -; CHECK1024-NEXT: mov w1, #45 // =0x2d -; CHECK1024-NEXT: mov w2, #37 // =0x25 -; CHECK1024-NEXT: bl memset -; CHECK1024-NEXT: tbz w19, #0, .LBB28_4 -; CHECK1024-NEXT: // %bb.3: // %entry -; CHECK1024-NEXT: smstart sm -; CHECK1024-NEXT: .LBB28_4: // %entry -; CHECK1024-NEXT: mov w0, #22647 // =0x5877 -; CHECK1024-NEXT: movk w0, #59491, lsl #16 -; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: addvl sp, sp, #18 -; CHECK1024-NEXT: .cfi_restore z8 -; CHECK1024-NEXT: .cfi_restore z9 -; CHECK1024-NEXT: .cfi_restore z10 -; CHECK1024-NEXT: .cfi_restore z11 -; CHECK1024-NEXT: .cfi_restore z12 -; CHECK1024-NEXT: .cfi_restore z13 -; CHECK1024-NEXT: .cfi_restore z14 -; CHECK1024-NEXT: .cfi_restore z15 -; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088 -; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload -; CHECK1024-NEXT: add sp, sp, #1088 -; CHECK1024-NEXT: .cfi_def_cfa_offset 0 -; CHECK1024-NEXT: .cfi_restore w19 -; CHECK1024-NEXT: .cfi_restore w26 -; CHECK1024-NEXT: .cfi_restore w27 -; CHECK1024-NEXT: .cfi_restore w28 -; CHECK1024-NEXT: .cfi_restore vg -; CHECK1024-NEXT: .cfi_restore w30 -; CHECK1024-NEXT: .cfi_restore w29 -; CHECK1024-NEXT: ret +; CHECK1024-NOSPLITSVE-LABEL: svecc_call: +; CHECK1024-NOSPLITSVE: // %bb.0: // %entry +; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088 +; CHECK1024-NOSPLITSVE-NEXT: cntd x9 +; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64 +; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18 +; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: mov x8, x0 +; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state +; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0 +; CHECK1024-NOSPLITSVE-NEXT: //APP +; CHECK1024-NOSPLITSVE-NEXT: //NO_APP +; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_2 +; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry +; CHECK1024-NOSPLITSVE-NEXT: smstop sm +; CHECK1024-NOSPLITSVE-NEXT: .LBB28_2: // %entry +; CHECK1024-NOSPLITSVE-NEXT: mov x0, x8 +; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d +; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25 +; CHECK1024-NOSPLITSVE-NEXT: bl memset +; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_4 +; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry +; CHECK1024-NOSPLITSVE-NEXT: smstart sm +; CHECK1024-NOSPLITSVE-NEXT: .LBB28_4: // %entry +; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877 +; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16 +; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088 +; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29 +; CHECK1024-NOSPLITSVE-NEXT: ret +; +; CHECK1024-SPLITSVE-LABEL: svecc_call: +; CHECK1024-SPLITSVE: // %bb.0: // %entry +; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64 +; CHECK1024-SPLITSVE-NEXT: cntd x9 +; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: mov x29, sp +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2 +; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16 +; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: mov x8, x0 +; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state +; CHECK1024-SPLITSVE-NEXT: mov x19, x0 +; CHECK1024-SPLITSVE-NEXT: //APP +; CHECK1024-SPLITSVE-NEXT: //NO_APP +; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_2 +; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry +; CHECK1024-SPLITSVE-NEXT: smstop sm +; CHECK1024-SPLITSVE-NEXT: .LBB28_2: // %entry +; CHECK1024-SPLITSVE-NEXT: mov x0, x8 +; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d +; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25 +; CHECK1024-SPLITSVE-NEXT: bl memset +; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_4 +; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry +; CHECK1024-SPLITSVE-NEXT: smstart sm +; CHECK1024-SPLITSVE-NEXT: .LBB28_4: // %entry +; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877 +; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16 +; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15 +; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2 +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64 +; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29 +; CHECK1024-SPLITSVE-NEXT: ret entry: tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37) @@ -2505,138 +2588,267 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, &1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI + +; CHECK-GI: warning: Instruction selection used fallback path for unused_div_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for unused_div_round_dynamic +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_twice_round_dynamic +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_fpexcept_strict +; CHECK-GI-NEXT: warning: Instruction selection used fallback path for set_rounding_round_dynamic ; Div whose result is unused should be removed unless we have strict exceptions -; CHECK-LABEL: unused_div: -; CHECK-NOT: fdiv -; CHECK: ret define void @unused_div(float %x, float %y) { +; CHECK-LABEL: unused_div: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ret entry: %add = fdiv float %x, %y ret void } -; CHECK-LABEL: unused_div_fpexcept_strict: -; CHECK: fdiv s0, s0, s1 -; CHECK-NEXT: ret define void @unused_div_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fdiv s0, s0, s1 +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 ret void } -; CHECK-LABEL: unused_div_round_dynamic: -; CHECK-NOT: fdiv -; CHECK: ret define void @unused_div_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: unused_div_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 ret void @@ -33,14 +42,14 @@ entry: ; Machine CSE should eliminate the second add unless we have strict exceptions - -; CHECK-LABEL: add_twice: -; CHECK: fadd [[ADD:s[0-9]+]], s0, s1 -; CHECK-NEXT: cmp w0, #0 -; CHECK-NEXT: fmul [[MUL:s[0-9]+]], [[ADD]], [[ADD]] -; CHECK-NEXT: fcsel s0, [[ADD]], [[MUL]], eq -; CHECK-NEXT: ret define float @add_twice(float %x, float %y, i32 %n) { +; CHECK-LABEL: add_twice: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cmp w0, #0 +; CHECK-NEXT: fmul s1, s0, s0 +; CHECK-NEXT: fcsel s0, s0, s1, eq +; CHECK-NEXT: ret entry: %add = fadd float %x, %y %tobool.not = icmp eq i32 %n, 0 @@ -56,15 +65,17 @@ if.end: ret float %a.0 } -; CHECK-LABEL: add_twice_fpexcept_strict: -; CHECK: fmov [[X:s[0-9]+]], s0 -; CHECK-NEXT: fadd s0, s0, s1 -; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]] -; CHECK: fadd [[ADD:s[0-9]+]], [[X]], s1 -; CHECK-NEXT: fmul s0, s0, [[ADD]] -; CHECK: [[LABEL]]: -; CHECK-NEXT: ret define float @add_twice_fpexcept_strict(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov s2, s0 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cbz w0, .LBB4_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: fadd s1, s2, s1 +; CHECK-NEXT: fmul s0, s0, s1 +; CHECK-NEXT: .LBB4_2: // %if.end +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 %tobool.not = icmp eq i32 %n, 0 @@ -80,14 +91,15 @@ if.end: ret float %a.0 } -; CHECK-LABEL: add_twice_round_dynamic: -; CHECK: fadd s0, s0, s1 -; CHECK-NEXT: cbz w0, [[LABEL:.LBB[0-9_]+]] -; CHECK-NOT: fadd -; CHECK: fmul s0, s0, s0 -; CHECK: [[LABEL]]: -; CHECK-NEXT: ret define float @add_twice_round_dynamic(float %x, float %y, i32 %n) #0 { +; CHECK-LABEL: add_twice_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: cbz w0, .LBB5_2 +; CHECK-NEXT: // %bb.1: // %if.then +; CHECK-NEXT: fmul s0, s0, s0 +; CHECK-NEXT: .LBB5_2: // %if.end +; CHECK-NEXT: ret entry: %add = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 %tobool.not = icmp eq i32 %n, 0 @@ -108,17 +120,18 @@ if.end: ; dynamic (as they may give different results) or when we have strict exceptions ; (the llvm.set.rounding is irrelevant, but both could trap). -; CHECK-LABEL: set_rounding: -; CHECK-DAG: fadd [[SREG:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-NEXT: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-NEXT: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG]], [[SREG]] -; CHECK-NEXT: ret define float @set_rounding(float %x, float %y) { +; CHECK-LABEL: set_rounding: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s0, s0 +; CHECK-NEXT: ret entry: %add1 = fadd float %x, %y call void @llvm.set.rounding(i32 0) @@ -128,18 +141,19 @@ entry: ret float %sub } -; CHECK-LABEL: set_rounding_fpexcept_strict: -; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]] -; CHECK-NEXT: ret define float @set_rounding_fpexcept_strict(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_fpexcept_strict: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fadd s2, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s2, s0 +; CHECK-NEXT: ret entry: %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 call void @llvm.set.rounding(i32 0) #0 @@ -149,18 +163,19 @@ entry: ret float %sub } -; CHECK-LABEL: set_rounding_round_dynamic: -; CHECK-DAG: fadd [[SREG1:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG1:x[0-9]+]], FPCR -; CHECK-DAG: orr [[XREG2:x[0-9]+]], [[XREG1]], #0xc00000 -; CHECK: msr FPCR, [[XREG2]] -; CHECK-DAG: fadd [[SREG2:s[0-9]+]], s0, s1 -; CHECK-DAG: mrs [[XREG3:x[0-9]+]], FPCR -; CHECK-DAG: and [[XREG4:x[0-9]+]], [[XREG3]], #0xffffffffff3fffff -; CHECK-NEXT: msr FPCR, [[XREG4]] -; CHECK-NEXT: fsub s0, [[SREG1]], [[SREG2]] -; CHECK-NEXT: ret define float @set_rounding_round_dynamic(float %x, float %y) #0 { +; CHECK-LABEL: set_rounding_round_dynamic: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: fadd s2, s0, s1 +; CHECK-NEXT: orr x8, x8, #0xc00000 +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fadd s0, s0, s1 +; CHECK-NEXT: mrs x8, FPCR +; CHECK-NEXT: and x8, x8, #0xffffffffff3fffff +; CHECK-NEXT: msr FPCR, x8 +; CHECK-NEXT: fsub s0, s2, s0 +; CHECK-NEXT: ret entry: %add1 = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0 call void @llvm.set.rounding(i32 0) #0 @@ -178,3 +193,6 @@ declare i32 @llvm.get.rounding() declare void @llvm.set.rounding(i32) attributes #0 = { strictfp } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-GI: {{.*}} +; CHECK-SD: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll index 7bddd1d70aaa8..cc63c7ffc0c1e 100644 --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll @@ -56,9 +56,9 @@ define aarch64_sve_vector_pcs @caller_with_many_svepred_arg(< ; CHECK: name: caller_with_many_svepred_arg ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2, -; CHECK-NEXT: stack-id: scalable-vector +; CHECK-NEXT: stack-id: scalable-predicate-vector ; CHECK: - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2, -; CHECK-NEXT: stack-id: scalable-vector +; CHECK-NEXT: stack-id: scalable-predicate-vector ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0 ; CHECK-DAG: [[BASE1:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0 @@ -90,7 +90,7 @@ define aarch64_sve_vector_pcs @caller_with_svepred_arg_1xv16i ; CHECK: name: caller_with_svepred_arg_1xv16i1_4xv16i1 ; CHECK: stack: ; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2, -; CHECK-NEXT: stack-id: scalable-vector, +; CHECK-NEXT: stack-id: scalable-predicate-vector, ; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0 ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp ; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store () into %stack.0) @@ -139,7 +139,7 @@ define [4 x ] @caller_with_svepred_arg_4xv16i1_4xv16i1([4 x ] @caller_with_svepred_arg_2xv32i1_1xv16i1([2 x , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv5i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv5i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv6i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv6i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: st1b { z1.s }, p1, [x1] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1b { z0.d }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv7i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv7i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv8i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv9i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv9i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #9 // =0x9 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv10i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv10i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.h +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z0.h }, p1, [x1] +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1b { z1.d }, p0, [x1, #4, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv11i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv11i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #11 // =0xb +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv12i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv12i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ptrue p1.h +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z0.h }, p1, [x1] +; CHECK-NEXT: st1b { z1.s }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv13i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv13i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #13 // =0xd +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv14i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv14i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ptrue p2.h +; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #2, mul vl] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: ld1b { z1.h }, p2/z, [x0] +; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpkhi z2.s, z1.h +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z0.h }, p2, [x1] +; CHECK-NEXT: uunpklo z2.d, z2.s +; CHECK-NEXT: st1b { z1.s }, p1, [x1, #2, mul vl] +; CHECK-NEXT: st1b { z2.d }, p0, [x1, #6, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv15i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv15i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #15 // =0xf +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv16i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv17i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv17i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #17 // =0x11 +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv18i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv18i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uzp1 z1.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpklo z2.s, z1.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEXT: uzp1 z2.s, z0.s, z2.s +; CHECK-NEXT: uzp1 z1.h, z2.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpklo z2.s, z1.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpklo z2.d, z2.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z0.s +; CHECK-NEXT: uzp1 z1.h, z2.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpkhi z2.s, z1.h +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uunpkhi z2.d, z2.s +; CHECK-NEXT: uzp1 z2.s, z0.s, z2.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpkhi z2.s, z1.h +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uunpklo z2.d, z2.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z0.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1b { z0.d }, p0, [x1, x8] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv19i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv19i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #19 // =0x13 +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv20i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv20i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv21i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv21i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #21 // =0x15 +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv22i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv22i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: cntw x8, all, mul #5 +; CHECK-NEXT: ldr z2, [x0] +; CHECK-NEXT: ptrue p1.d +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: ld1b { z1.d }, p1/z, [x0, x8] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b +; CHECK-NEXT: uunpkhi z1.h, z1.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1b { z1.d }, p1, [x1, x8] +; CHECK-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl] +; CHECK-NEXT: str z2, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv23i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv23i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #23 // =0x17 +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv24i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv24i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1b { z1.h }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: st1b { z0.h }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv25i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv25i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #25 // =0x19 +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv26i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv26i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cnth x8, all, mul #3 +; CHECK-NEXT: ldr z2, [x0] +; CHECK-NEXT: ptrue p1.h +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8] +; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0, #2, mul vl] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1b { z1.d }, p0, [x1, x8] +; CHECK-NEXT: st1b { z0.h }, p1, [x1, #2, mul vl] +; CHECK-NEXT: str z2, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv27i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv27i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #27 // =0x1b +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv28i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv28i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ldr z2, [x0] +; CHECK-NEXT: ptrue p1.h +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0, #2, mul vl] +; CHECK-NEXT: str z2, [x1] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: st1b { z0.h }, p1, [x1, #2, mul vl] +; CHECK-NEXT: st1b { z1.s }, p0, [x1, #6, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv29i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv29i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #29 // =0x1d +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv30i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv30i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: cntw x8, all, mul #7 +; CHECK-NEXT: ldr z3, [x0] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8] +; CHECK-NEXT: ptrue p2.h +; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1b { z2.h }, p2/z, [x0, #2, mul vl] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z2.b, z0.b +; CHECK-NEXT: uunpkhi z1.h, z0.b +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: uunpkhi z2.s, z1.h +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uunpklo z2.d, z2.s +; CHECK-NEXT: st1b { z2.d }, p0, [x1, x8] +; CHECK-NEXT: st1b { z0.h }, p2, [x1, #2, mul vl] +; CHECK-NEXT: st1b { z1.s }, p1, [x1, #6, mul vl] +; CHECK-NEXT: str z3, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv31i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv31i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w10, #31 // =0x1f +; CHECK-NEXT: lsr x9, x8, #4 +; CHECK-NEXT: mul x9, x9, x10 +; CHECK-NEXT: whilelo p0.b, x8, x9 +; CHECK-NEXT: whilelo p1.b, xzr, x9 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl] +; CHECK-NEXT: st1b { z1.b }, p1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv32i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv5i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv5i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv6i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv6i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1h { z0.s }, p1, [x1] +; CHECK-NEXT: st1h { z1.d }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv7i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv7i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv8i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv9i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv9i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #9 // =0x9 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv10i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv10i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z1.h, z0.h, z0.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uzp1 z1.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h +; CHECK-NEXT: uunpkhi z1.s, z1.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [x1, #4, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv11i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv11i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #11 // =0xb +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv12i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv12i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: st1h { z0.s }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv13i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv13i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #13 // =0xd +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv14i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv14i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z2, [x0] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl] +; CHECK-NEXT: str z2, [x1] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1h { z0.s }, p1, [x1, #2, mul vl] +; CHECK-NEXT: st1h { z1.d }, p0, [x1, #6, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv15i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv15i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #15 // =0xf +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv16i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1i32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3i32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv5i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv5i32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv6i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv6i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1w { z0.d }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv7i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv7i32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv8i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1i64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2i64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3i64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3i64: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1d { z1.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4i64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv5f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv5f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv6f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv6f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: st1h { z1.s }, p1, [x1] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv7f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv7f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv8f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv9f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv9f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #9 // =0x9 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv10f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv10f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: st1h { z1.d }, p0, [x1, #4, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv11f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv11f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #11 // =0xb +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv12f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv12f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: st1h { z1.s }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv13f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv13f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #13 // =0xd +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv14f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv14f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z2, [x0] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl] +; CHECK-NEXT: str z2, [x1] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: st1h { z1.s }, p1, [x1, #2, mul vl] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [x1, #6, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv15f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv15f16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #15 // =0xf +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv16f16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1f32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3f32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv5f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv5f32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv6f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv6f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: st1w { z1.d }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv7f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv7f32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv8f32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1f64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2f64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3f64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3f64: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1d { z1.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4f64(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv1bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv1bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv2bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv2bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv3bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv3bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv4bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv4bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv5bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv5bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv6bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv6bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: st1h { z1.s }, p1, [x1] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv7bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv7bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv8bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv8bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv9bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv9bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #9 // =0x9 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv10bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv10bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: st1h { z1.d }, p0, [x1, #4, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv11bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv11bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #11 // =0xb +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv12bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv12bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ldr z0, [x0] +; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: str z0, [x1] +; CHECK-NEXT: st1h { z1.s }, p0, [x1, #2, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv13bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv13bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #13 // =0xd +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv14bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv14bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ldr z2, [x0] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl] +; CHECK-NEXT: str z2, [x1] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: st1h { z1.s }, p1, [x1, #2, mul vl] +; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [x1, #6, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv15bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv15bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #15 // =0xf +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define void @sve_load_store_nxv16bf16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_load_store_nxv16bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-NEXT: ldr z1, [x0] +; CHECK-NEXT: str z0, [x1, #1, mul vl] +; CHECK-NEXT: str z1, [x1] +; CHECK-NEXT: ret + %c = load , ptr %a + store %c, ptr %b + ret void +} + +define @sve_sextload_nxv1i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv1i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv2i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv3i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv3i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv4i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv5i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv5i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv6i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv6i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cntd x8, all, mul #3 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv7i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv7i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv8i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv9i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv9i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #9 // =0x9 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv10i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv10i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntd x8, all, mul #5 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: str z1, [sp] +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [sp, #4, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv11i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv11i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #11 // =0xb +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv12i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv12i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntw x8, all, mul #3 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: str z1, [sp] +; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv13i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv13i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #13 // =0xd +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv14i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv14i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntd x8, all, mul #7 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1sb { z2.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: str z2, [sp] +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: st1h { z1.d }, p0, [sp, #6, mul vl] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv15i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv15i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #15 // =0xf +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv16i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv1i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv1i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv2i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv3i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv3i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv4i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv5i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv5i16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv6i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv6i16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntd x8, all, mul #3 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ptrue p1.d +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0] +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: str z1, [sp] +; CHECK-NEXT: st1w { z0.d }, p1, [sp, #2, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv7i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv7i16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv8i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv1i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv1i32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv2i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv3i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv3i32: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sw { z0.d }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1d { z1.d }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_sextload_nxv4i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_sextload_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.sext = sext %c to + ret %c.sext +} + +define @sve_zextload_nxv1i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv1i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv2i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv3i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv3i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv4i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv5i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv5i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv6i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv6i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cntd x8, all, mul #3 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv7i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv7i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv8i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv9i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv9i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #9 // =0x9 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv10i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv10i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntd x8, all, mul #5 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: str z1, [sp] +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: st1h { z0.d }, p0, [sp, #4, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv11i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv11i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #11 // =0xb +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv12i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv12i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntw x8, all, mul #3 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: str z1, [sp] +; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv13i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv13i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #13 // =0xd +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv14i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv14i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntd x8, all, mul #7 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: ld1sb { z2.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpkhi z1.s, z0.h +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: str z2, [sp] +; CHECK-NEXT: uunpklo z1.d, z1.s +; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: st1h { z1.d }, p0, [sp, #6, mul vl] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv15i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv15i8: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #15 // =0xf +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.b, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0] +; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1h { z1.h }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv16i8(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv1i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv1i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv2i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv3i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv3i16: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv4i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv5i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv5i16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #5 // =0x5 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv6i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv6i16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: cntd x8, all, mul #3 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ptrue p1.d +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0] +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: str z1, [sp] +; CHECK-NEXT: st1w { z0.d }, p1, [sp, #2, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv7i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv7i16: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #7 // =0x7 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.h, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1w { z1.s }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv8i16(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv1i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv1i32: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: whilelo p0.d, xzr, x8 +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv2i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv3i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv3i32: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #3 // =0x3 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: whilelo p0.s, xzr, x8 +; CHECK-NEXT: punpkhi p1.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sw { z0.d }, p1/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p1, [sp, #1, mul vl] +; CHECK-NEXT: st1d { z1.d }, p0, [sp] +; CHECK-NEXT: ldr z1, [sp, #1, mul vl] +; CHECK-NEXT: ldr z0, [sp] +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} + +define @sve_zextload_nxv4i32(ptr %a, ptr %b) { +; CHECK-LABEL: sve_zextload_nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %c = load , ptr %a + %c.zext = sext %c to + ret %c.zext +} diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll index 2cbb29ebe1a1f..d8de12c5f66b9 100644 --- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll +++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll @@ -672,5 +672,3 @@ entry: ret i32 %x } declare void @other() -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; CHECK-FRAMELAYOUT: {{.*}} diff --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll index 4a04934971711..6946cc23d867d 100644 --- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll +++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD -; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI +; RUN: llc < %s -mtriple=aarch64 -O3 | FileCheck %s --check-prefixes=CHECK,CHECK-SD +; RUN: llc < %s -mtriple=aarch64 -O3 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI declare void @t() @@ -581,3 +581,323 @@ end: ret void } +define ptr @tbnz_wzr(i1 %cmp1.not.i, ptr %locflg) { +; CHECK-SD-LABEL: tbnz_wzr: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: tbz w0, #0, .LBB20_2 +; CHECK-SD-NEXT: // %bb.1: +; CHECK-SD-NEXT: tbnz wzr, #0, .LBB20_3 +; CHECK-SD-NEXT: b .LBB20_4 +; CHECK-SD-NEXT: .LBB20_2: // %opnfil.exit.thread +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: tbz w8, #0, .LBB20_4 +; CHECK-SD-NEXT: .LBB20_3: // %if.else25 +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: .LBB20_4: // %common.ret +; CHECK-SD-NEXT: mov x0, xzr +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: tbnz_wzr: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #0 // =0x0 +; CHECK-GI-NEXT: tbz w0, #0, .LBB20_3 +; CHECK-GI-NEXT: // %bb.1: // %if.end10 +; CHECK-GI-NEXT: tbnz w8, #0, .LBB20_4 +; CHECK-GI-NEXT: .LBB20_2: // %common.ret +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: ret +; CHECK-GI-NEXT: .LBB20_3: // %opnfil.exit.thread +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: tbz w8, #0, .LBB20_2 +; CHECK-GI-NEXT: .LBB20_4: // %if.else25 +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: ret +entry: + br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread + +opnfil.exit.thread: ; preds = %entry + store i32 0, ptr %locflg, align 4 + br label %if.end10 + +if.end10: ; preds = %opnfil.exit.thread, %entry + %cmp5 = phi i1 [ true, %opnfil.exit.thread ], [ false, %entry ] + br i1 %cmp5, label %if.else25, label %if.then12 + +if.then12: ; preds = %if.end10 + %call20 = load i32, ptr null, align 4 + br label %if.end26 + +if.else25: ; preds = %if.end10 + store i32 0, ptr %locflg, align 4 + br label %if.end26 + +if.end26: ; preds = %if.else25, %if.then12 + br i1 %cmp5, label %common.ret, label %if.then28 + +common.ret: ; preds = %if.then28, %if.end26 + %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ] + ret ptr %common.ret.op + +if.then28: ; preds = %if.end26 + %0 = load ptr, ptr null, align 8 + br label %common.ret +} + +define ptr @tbz_wzr(i1 %cmp1.not.i, ptr %locflg) { +; CHECK-SD-LABEL: tbz_wzr: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: tbz w0, #0, .LBB21_2 +; CHECK-SD-NEXT: // %bb.1: +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: tbnz w8, #0, .LBB21_3 +; CHECK-SD-NEXT: b .LBB21_4 +; CHECK-SD-NEXT: .LBB21_2: // %opnfil.exit.thread +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: tbz wzr, #0, .LBB21_4 +; CHECK-SD-NEXT: .LBB21_3: // %if.else25 +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: .LBB21_4: // %common.ret +; CHECK-SD-NEXT: mov x0, xzr +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: tbz_wzr: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: tbz w0, #0, .LBB21_3 +; CHECK-GI-NEXT: // %bb.1: // %if.end10 +; CHECK-GI-NEXT: tbnz w8, #0, .LBB21_4 +; CHECK-GI-NEXT: .LBB21_2: // %common.ret +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: ret +; CHECK-GI-NEXT: .LBB21_3: // %opnfil.exit.thread +; CHECK-GI-NEXT: mov w8, #0 // =0x0 +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: tbz w8, #0, .LBB21_2 +; CHECK-GI-NEXT: .LBB21_4: // %if.else25 +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: ret +entry: + br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread + +opnfil.exit.thread: ; preds = %entry + store i32 0, ptr %locflg, align 4 + br label %if.end10 + +if.end10: ; preds = %opnfil.exit.thread, %entry + %cmp5 = phi i1 [ false, %opnfil.exit.thread ], [ true, %entry ] + br i1 %cmp5, label %if.else25, label %if.then12 + +if.then12: ; preds = %if.end10 + %call20 = load i32, ptr null, align 4 + br label %if.end26 + +if.else25: ; preds = %if.end10 + store i32 0, ptr %locflg, align 4 + br label %if.end26 + +if.end26: ; preds = %if.else25, %if.then12 + br i1 %cmp5, label %common.ret, label %if.then28 + +common.ret: ; preds = %if.then28, %if.end26 + %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ] + ret ptr %common.ret.op + +if.then28: ; preds = %if.end26 + %0 = load ptr, ptr null, align 8 + br label %common.ret +} + +define ptr @cbnz_wzr(i1 %cmp1.not.i, ptr %locflg) { +; CHECK-SD-LABEL: cbnz_wzr: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: tbz w0, #0, .LBB22_2 +; CHECK-SD-NEXT: // %bb.1: +; CHECK-SD-NEXT: cbnz wzr, .LBB22_3 +; CHECK-SD-NEXT: b .LBB22_4 +; CHECK-SD-NEXT: .LBB22_2: // %opnfil.exit.thread +; CHECK-SD-NEXT: mov w8, #10 // =0xa +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: cbz w8, .LBB22_4 +; CHECK-SD-NEXT: .LBB22_3: // %if.else25 +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: .LBB22_4: // %common.ret +; CHECK-SD-NEXT: mov x0, xzr +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: cbnz_wzr: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, wzr +; CHECK-GI-NEXT: tbnz w0, #0, .LBB22_2 +; CHECK-GI-NEXT: // %bb.1: // %opnfil.exit.thread +; CHECK-GI-NEXT: mov w8, #10 // =0xa +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: .LBB22_2: // %if.end10 +; CHECK-GI-NEXT: cbz w8, .LBB22_4 +; CHECK-GI-NEXT: // %bb.3: // %if.else25 +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: .LBB22_4: // %common.ret +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: ret +entry: + br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread + +opnfil.exit.thread: ; preds = %entry + store i32 0, ptr %locflg, align 4 + br label %if.end10 + +if.end10: ; preds = %opnfil.exit.thread, %entry + %cmp5 = phi i32 [ 10, %opnfil.exit.thread ], [ 0, %entry ] + %cmp5b = icmp ne i32 %cmp5, 0 + br i1 %cmp5b, label %if.else25, label %if.then12 + +if.then12: ; preds = %if.end10 + %call20 = load i32, ptr null, align 4 + br label %if.end26 + +if.else25: ; preds = %if.end10 + store i32 0, ptr %locflg, align 4 + br label %if.end26 + +if.end26: ; preds = %if.else25, %if.then12 + br i1 %cmp5b, label %common.ret, label %if.then28 + +common.ret: ; preds = %if.then28, %if.end26 + %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ] + ret ptr %common.ret.op + +if.then28: ; preds = %if.end26 + %0 = load ptr, ptr null, align 8 + br label %common.ret +} + +define ptr @cbz_wzr(i1 %cmp1.not.i, ptr %locflg) { +; CHECK-SD-LABEL: cbz_wzr: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: tbz w0, #0, .LBB23_2 +; CHECK-SD-NEXT: // %bb.1: +; CHECK-SD-NEXT: mov w8, #10 // =0xa +; CHECK-SD-NEXT: cbnz w8, .LBB23_3 +; CHECK-SD-NEXT: b .LBB23_4 +; CHECK-SD-NEXT: .LBB23_2: // %opnfil.exit.thread +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: cbz wzr, .LBB23_4 +; CHECK-SD-NEXT: .LBB23_3: // %if.else25 +; CHECK-SD-NEXT: str wzr, [x1] +; CHECK-SD-NEXT: .LBB23_4: // %common.ret +; CHECK-SD-NEXT: mov x0, xzr +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: cbz_wzr: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w8, #10 // =0xa +; CHECK-GI-NEXT: tbnz w0, #0, .LBB23_2 +; CHECK-GI-NEXT: // %bb.1: // %opnfil.exit.thread +; CHECK-GI-NEXT: mov w8, wzr +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: .LBB23_2: // %if.end10 +; CHECK-GI-NEXT: cbz w8, .LBB23_4 +; CHECK-GI-NEXT: // %bb.3: // %if.else25 +; CHECK-GI-NEXT: str wzr, [x1] +; CHECK-GI-NEXT: .LBB23_4: // %common.ret +; CHECK-GI-NEXT: mov x0, xzr +; CHECK-GI-NEXT: ret +entry: + br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread + +opnfil.exit.thread: ; preds = %entry + store i32 0, ptr %locflg, align 4 + br label %if.end10 + +if.end10: ; preds = %opnfil.exit.thread, %entry + %cmp5 = phi i32 [ 0, %opnfil.exit.thread ], [ 10, %entry ] + %cmp5b = icmp ne i32 %cmp5, 0 + br i1 %cmp5b, label %if.else25, label %if.then12 + +if.then12: ; preds = %if.end10 + %call20 = load i32, ptr null, align 4 + br label %if.end26 + +if.else25: ; preds = %if.end10 + store i32 0, ptr %locflg, align 4 + br label %if.end26 + +if.end26: ; preds = %if.else25, %if.then12 + br i1 %cmp5b, label %common.ret, label %if.then28 + +common.ret: ; preds = %if.then28, %if.end26 + %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ] + ret ptr %common.ret.op + +if.then28: ; preds = %if.end26 + %0 = load ptr, ptr null, align 8 + br label %common.ret +} + +define i1 @avifSequenceHeaderParse() { +; CHECK-SD-LABEL: avifSequenceHeaderParse: +; CHECK-SD: // %bb.0: // %entry +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: cbz w8, .LBB24_2 +; CHECK-SD-NEXT: .LBB24_1: // %bb6 +; CHECK-SD-NEXT: mov w0, wzr +; CHECK-SD-NEXT: ret +; CHECK-SD-NEXT: .LBB24_2: // %bb1 +; CHECK-SD-NEXT: cbz w8, .LBB24_4 +; CHECK-SD-NEXT: // %bb.3: +; CHECK-SD-NEXT: tbz xzr, #63, .LBB24_1 +; CHECK-SD-NEXT: b .LBB24_5 +; CHECK-SD-NEXT: .LBB24_4: // %bb2 +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: tbz x8, #63, .LBB24_1 +; CHECK-SD-NEXT: .LBB24_5: // %bb4 +; CHECK-SD-NEXT: mov w8, #1 // =0x1 +; CHECK-SD-NEXT: mov w0, wzr +; CHECK-SD-NEXT: ret +; +; CHECK-GI-LABEL: avifSequenceHeaderParse: +; CHECK-GI: // %bb.0: // %entry +; CHECK-GI-NEXT: mov w0, wzr +; CHECK-GI-NEXT: mov w8, #1 // =0x1 +; CHECK-GI-NEXT: ret +entry: + %a = icmp slt i64 0, 0 + br i1 %a, label %bb1, label %bb6 + +bb1: ; preds = %entry + %b = icmp eq i32 1, 0 + br i1 %b, label %bb2, label %bb3 + +bb2: ; preds = %bb1 + %c = load i8, ptr null, align 1 + %d = zext i8 1 to i64 + %e = shl i64 %d, 0 + br label %bb3 + +bb3: ; preds = %bb2, %bb1 + %f = phi i64 [ %e, %bb2 ], [ 0, %bb1 ] + %g = icmp slt i64 %f, 0 + br i1 %g, label %bb4, label %bb6 + +bb4: ; preds = %bb3 + %h = icmp eq i32 1, 0 + br i1 %h, label %bb5, label %bb7 + +bb5: ; preds = %bb4 + %i = load i8, ptr null, align 1 + %j = shl i64 0, 0 + br label %bb7 + +bb6: ; preds = %bb7, %bb3, %entry + %k = phi i1 [ false, %bb7 ], [ false, %bb3 ], [ false, %entry ] + ret i1 %k + +bb7: ; preds = %bb5, %bb4 + %l = phi ptr [ inttoptr (i64 1 to ptr), %bb5 ], [ null, %bb4 ] + %m = phi i64 [ %j, %bb5 ], [ 0, %bb4 ] + %n = icmp ult ptr %l, null + br label %bb6 +} diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll index 62d41fca10db3..19e1aa5d152ce 100644 --- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll @@ -26,9 +26,9 @@ define i32 @reduce_and_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x) @@ -120,9 +120,9 @@ define i32 @reduce_and_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_and_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x) @@ -305,9 +305,9 @@ define i32 @reduce_or_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x) @@ -399,9 +399,9 @@ define i32 @reduce_or_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_or_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x) @@ -584,9 +584,9 @@ define i32 @reduce_xor_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_xor_v1i8: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.b[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.b[0] +; CHECK-NEXT: tst w8, #0x80 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i8> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x) @@ -679,9 +679,9 @@ define i32 @reduce_xor_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind { ; CHECK-LABEL: reduce_xor_v1i16: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 -; CHECK-NEXT: smov w8, v0.h[0] -; CHECK-NEXT: cmp w8, #0 -; CHECK-NEXT: csel w0, w0, w1, mi +; CHECK-NEXT: umov w8, v0.h[0] +; CHECK-NEXT: tst w8, #0x8000 +; CHECK-NEXT: csel w0, w0, w1, ne ; CHECK-NEXT: ret %x = icmp slt <1 x i16> %a0, zeroinitializer %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x) diff --git a/llvm/test/CodeGen/AArch64/wincfi-seh-only-in-epilogue.ll b/llvm/test/CodeGen/AArch64/wincfi-minimal-seh-prologue.ll similarity index 78% rename from llvm/test/CodeGen/AArch64/wincfi-seh-only-in-epilogue.ll rename to llvm/test/CodeGen/AArch64/wincfi-minimal-seh-prologue.ll index 7daceae3dd4c0..8308108b84f08 100644 --- a/llvm/test/CodeGen/AArch64/wincfi-seh-only-in-epilogue.ll +++ b/llvm/test/CodeGen/AArch64/wincfi-minimal-seh-prologue.ll @@ -5,8 +5,9 @@ entry: ret void } -; Check that there is no .seh_endprologue but there is seh_startepilogue/seh_endepilogue. -; CHECK-NOT: .seh_endprologue +; Check that there is a minimal SEH prologue with seh_startepilogue/seh_endepilogue. +; CHECK: .seh_proc test +; CHECK: .seh_endprologue ; CHECK: .seh_startepilogue ; CHECK: add sp, sp, #48 ; CHECK: .seh_stackalloc 48 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll index 666523c88860c..ff618c05e2b80 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll @@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 -; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v8, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] +; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen ; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v6, s16 -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v8, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen ; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v6, s20 -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v8, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB14_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v6, s20 -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v8, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB14_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll index 351502816ae6e..007417c83e324 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll @@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_mov_b32_e32 v6, s16 -; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v8, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] +; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen ; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_mov_b32_e32 v6, s16 -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v8, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen ; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v6, s20 -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX908-NEXT: v_mov_b32_e32 v8, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB14_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v6, s20 -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX8-NEXT: v_mov_b32_e32 v8, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB14_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-divrem.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-divrem.mir new file mode 100644 index 0000000000000..f75fa857448d7 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-divrem.mir @@ -0,0 +1,125 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx803 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX8 %s + +--- +name: test_sdivrem_s16 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; GFX8-LABEL: name: test_sdivrem_s16 + ; GFX8: liveins: $vgpr0, $vgpr1 + ; GFX8-NEXT: {{ $}} + ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX8-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16 + ; GFX8-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 16 + ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; GFX8-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG]], [[C]](s32) + ; GFX8-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG1]], [[C]](s32) + ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT_INREG]], [[ASHR]] + ; GFX8-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SEXT_INREG1]], [[ASHR1]] + ; GFX8-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR]] + ; GFX8-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ADD1]], [[ASHR1]] + ; GFX8-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[XOR1]](s32) + ; GFX8-NEXT: [[AMDGPU_RCP_IFLAG:%[0-9]+]]:_(s32) = G_AMDGPU_RCP_IFLAG [[UITOFP]](s32) + ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41EFFFFFC0000000 + ; GFX8-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[AMDGPU_RCP_IFLAG]], [[C1]] + ; GFX8-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FMUL]](s32) + ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX8-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[XOR1]] + ; GFX8-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SUB]], [[FPTOUI]] + ; GFX8-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[FPTOUI]], [[MUL]] + ; GFX8-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[FPTOUI]], [[UMULH]] + ; GFX8-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[XOR]], [[ADD2]] + ; GFX8-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] + ; GFX8-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] + ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[SUB1]](s32), [[XOR1]] + ; GFX8-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[UMULH1]], [[C3]] + ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[ADD3]], [[UMULH1]] + ; GFX8-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[XOR1]] + ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB2]], [[SUB1]] + ; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[SELECT1]](s32), [[XOR1]] + ; GFX8-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] + ; GFX8-NEXT: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD4]], [[SELECT]] + ; GFX8-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[XOR1]] + ; GFX8-NEXT: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SUB3]], [[SELECT1]] + ; GFX8-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ASHR]], [[ASHR1]] + ; GFX8-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SELECT2]], [[XOR2]] + ; GFX8-NEXT: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[XOR3]], [[XOR2]] + ; GFX8-NEXT: [[XOR4:%[0-9]+]]:_(s32) = G_XOR [[SELECT3]], [[ASHR]] + ; GFX8-NEXT: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[XOR4]], [[ASHR]] + ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SUB4]](s32) + ; GFX8-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SUB5]](s32) + ; GFX8-NEXT: S_NOP 0, implicit [[TRUNC]](s16), implicit [[TRUNC1]](s16) + ; GFX8-NEXT: $vgpr0 = COPY [[SUB4]](s32) + ; GFX8-NEXT: $vgpr0 = COPY [[SUB5]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16), %5:_(s16) = G_SDIVREM %2, %3 + S_NOP 0, implicit %4, implicit %5 + %6:_(s32) = G_ANYEXT %4 + %7:_(s32) = G_ANYEXT %5 + $vgpr0 = COPY %6 + $vgpr0 = COPY %7 + +... + +--- +name: test_udivrem_s16 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; GFX8-LABEL: name: test_udivrem_s16 + ; GFX8: liveins: $vgpr0, $vgpr1 + ; GFX8-NEXT: {{ $}} + ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]] + ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]] + ; GFX8-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[AND1]](s32) + ; GFX8-NEXT: [[AMDGPU_RCP_IFLAG:%[0-9]+]]:_(s32) = G_AMDGPU_RCP_IFLAG [[UITOFP]](s32) + ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x41EFFFFFC0000000 + ; GFX8-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[AMDGPU_RCP_IFLAG]], [[C1]] + ; GFX8-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FMUL]](s32) + ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX8-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[AND1]] + ; GFX8-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SUB]], [[FPTOUI]] + ; GFX8-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[FPTOUI]], [[MUL]] + ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[FPTOUI]], [[UMULH]] + ; GFX8-NEXT: [[UMULH1:%[0-9]+]]:_(s32) = G_UMULH [[AND]], [[ADD]] + ; GFX8-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[AND1]] + ; GFX8-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[AND]], [[MUL1]] + ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX8-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[SUB1]](s32), [[AND1]] + ; GFX8-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UMULH1]], [[C3]] + ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[ADD1]], [[UMULH1]] + ; GFX8-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND1]] + ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[SUB2]], [[SUB1]] + ; GFX8-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[SELECT1]](s32), [[AND1]] + ; GFX8-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[SELECT]], [[C3]] + ; GFX8-NEXT: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[ADD2]], [[SELECT]] + ; GFX8-NEXT: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SELECT1]], [[AND1]] + ; GFX8-NEXT: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SUB3]], [[SELECT1]] + ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SELECT2]](s32) + ; GFX8-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SELECT3]](s32) + ; GFX8-NEXT: S_NOP 0, implicit [[TRUNC]](s16), implicit [[TRUNC1]](s16) + ; GFX8-NEXT: $vgpr0 = COPY [[SELECT2]](s32) + ; GFX8-NEXT: $vgpr0 = COPY [[SELECT3]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s16) = G_TRUNC %0 + %3:_(s16) = G_TRUNC %1 + %4:_(s16), %5:_(s16) = G_UDIVREM %2, %3 + S_NOP 0, implicit %4, implicit %5 + %6:_(s32) = G_ANYEXT %4 + %7:_(s32) = G_ANYEXT %5 + $vgpr0 = COPY %6 + $vgpr0 = COPY %7 + +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll index dd5a9ae48e207..6e85e6fc7297d 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.make.buffer.rsrc.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx900 -stop-after=instruction-select < %s | FileCheck %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1250 -stop-after=instruction-select < %s | FileCheck --check-prefix=CHECK45 %s define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg %p) { ; CHECK-LABEL: name: basic_raw_buffer @@ -25,7 +26,39 @@ define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg %p) { ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i32 1234, i32 5678) + ; + ; CHECK45-LABEL: name: basic_raw_buffer + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -6629298651489370112 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], [[S_MOV_B]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 9 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -536870912 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE1]], [[S_MOV_B64_]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_1]].sub0 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_1]].sub1 + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY2]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } @@ -43,7 +76,23 @@ define amdgpu_ps float @read_raw_buffer(ptr addrspace(1) inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + ; + ; CHECK45-LABEL: name: read_raw_buffer + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3 + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE1]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p, i16 0, i64 0, i32 0) %loaded = call float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 4, i32 0, i32 0) ret float %loaded } @@ -74,19 +123,54 @@ define amdgpu_ps ptr addrspace(8) @basic_struct_buffer(ptr inreg %p) { ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 1234, i32 5678) + ; + ; CHECK45-LABEL: name: basic_struct_buffer + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -6629298651489370112 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], [[S_MOV_B]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 9 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16384 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -536870912 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_2]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE1]], [[S_MOV_B64_]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_OR_B64_1]], [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub0 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY2]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } -define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i32 inreg %numVals, i32 inreg %flags) { +define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i64 inreg %numVals, i32 inreg %flags) { ; CHECK-LABEL: name: variable_top_half ; CHECK: bb.1 (%ir-block.0): - ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def dead $scc ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 262144 @@ -104,20 +188,64 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i32 inreg %nu ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: variable_top_half + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY2]], [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, [[S_LSHL_B32_]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 16384 + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_4]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], [[S_MOV_B32_5]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_LSHL_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_OR_B64_1]], [[REG_SEQUENCE4]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub0 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i64 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } -define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, i32 inreg %numVals, i32 inreg %flags) { +define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, i64 inreg %numVals, i32 inreg %flags) { ; CHECK-LABEL: name: general_case ; CHECK: bb.1 (%ir-block.0): - ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def dead $scc ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 @@ -136,20 +264,66 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: general_case + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, [[S_LSHL_B32_]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 12 + ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY2]], [[S_MOV_B32_4]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_LSHL_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], [[S_MOV_B32_5]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_LSHL_B32_2]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_OR_B64_1]], [[REG_SEQUENCE4]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub0 + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1, implicit $sgpr2, implicit $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } -define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i32 inreg %numVals, i32 inreg %flags) { +define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i64 inreg %numVals, i32 inreg %flags) { ; CHECK-LABEL: name: general_case_load ; CHECK: bb.1 (%ir-block.0): - ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY1]], [[S_MOV_B32_]], implicit-def dead $scc ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 @@ -161,23 +335,61 @@ define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i32 i ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY5]], [[REG_SEQUENCE]], [[S_MOV_B32_2]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: general_case_load + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, [[S_LSHL_B32_]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 12 + ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY2]], [[S_MOV_B32_4]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_LSHL_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], [[S_MOV_B32_5]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_LSHL_B32_2]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_OR_B64_1]], [[REG_SEQUENCE4]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub0 + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1, [[COPY8]], %subreg.sub2, [[COPY9]], %subreg.sub3 + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN [[COPY10]], [[REG_SEQUENCE5]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } ; None of the components are uniform due to the lack of an inreg -define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i32 %numVals, i32 %flags) { +define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i64 %numVals, i32 %flags) { ; CHECK-LABEL: name: general_case_load_with_waterfall ; CHECK: bb.1 (%ir-block.0): ; CHECK-NEXT: successors: %bb.2(0x80000000) - ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr5 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16 @@ -221,7 +433,75 @@ define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i3 ; CHECK-NEXT: bb.5: ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: general_case_load_with_waterfall + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: successors: %bb.2(0x80000000) + ; CHECK45-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[V_LSHL_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHL_OR_B32_e64 [[COPY3]], [[COPY6]], [[COPY8]], implicit $exec + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]] + ; CHECK45-NEXT: [[V_LSHRREV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_LSHRREV_B64_e64 [[COPY9]], [[REG_SEQUENCE1]], implicit $exec + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 12 + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] + ; CHECK45-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY10]], [[COPY2]], implicit $exec + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]] + ; CHECK45-NEXT: [[V_LSHLREV_B32_e64_1:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY11]], [[COPY5]], implicit $exec + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[V_LSHRREV_B64_e64_]].sub0 + ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[V_LSHRREV_B64_e64_]].sub1 + ; CHECK45-NEXT: [[V_OR3_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR3_B32_e64 [[COPY13]], [[V_LSHLREV_B32_e64_]], [[V_LSHLREV_B32_e64_1]], implicit $exec + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[V_LSHL_OR_B32_e64_]], %subreg.sub1, [[COPY12]], %subreg.sub2, [[V_OR3_B32_e64_]], %subreg.sub3 + ; CHECK45-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; CHECK45-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: bb.2: + ; CHECK45-NEXT: successors: %bb.3(0x80000000) + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[V_LSHL_OR_B32_e64_]], implicit $exec + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[V_OR3_B32_e64_]], implicit $exec + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; CHECK45-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; CHECK45-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; CHECK45-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; CHECK45-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; CHECK45-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; CHECK45-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; CHECK45-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: bb.3: + ; CHECK45-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN [[COPY14]], [[REG_SEQUENCE3]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK45-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; CHECK45-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: bb.4: + ; CHECK45-NEXT: successors: %bb.5(0x80000000) + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_5]] + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: bb.5: + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } @@ -240,7 +520,23 @@ define amdgpu_ps float @read_buffer_fat_ptr_p0(ptr inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i32 0, i32 0) + ; + ; CHECK45-LABEL: name: read_buffer_fat_ptr_p0 + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3 + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE1]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i64 0, i32 0) %loaded = load float, ptr addrspace(7) %ptr ret float %loaded } @@ -259,14 +555,30 @@ define amdgpu_ps float @read_buffer_fat_ptr_p1(ptr addrspace(1) inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 - %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + ; + ; CHECK45-LABEL: name: read_buffer_fat_ptr_p1 + ; CHECK45: bb.1 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_1]], %subreg.sub3 + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET [[REG_SEQUENCE1]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %p, i16 0, i64 0, i32 0) %loaded = load float, ptr addrspace(7) %ptr ret float %loaded } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture readnone, i16, i32, i32) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) -declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture readnone, i16, i32, i32) -declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture readnone, i16, i64, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i64, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture readnone, i16, i64, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) nocapture readnone, i16, i64, i32) declare float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) declare float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32, i32 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.format.ll index feaf7ce42ecae..434f763044e45 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.format.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.format.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12 %s ; Natural mapping define amdgpu_ps float @raw_buffer_load_format_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.load.format.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.load.format.ll index 636ba9b320591..41d45530886ee 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.load.format.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.buffer.load.format.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck %s ; Natural mapping define amdgpu_ps float @raw_ptr_buffer_load_format_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) { diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.tbuffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.tbuffer.load.ll index 4d7d3ec5bdcb8..8ad5f50450155 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.tbuffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.ptr.tbuffer.load.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=instruction-select -o - %s | FileCheck %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=instruction-select -o - %s | FileCheck %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=instruction-select -o - %s | FileCheck %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=instruction-select -o - %s | FileCheck %s define amdgpu_ps float @raw_tbuffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) { ; CHECK-LABEL: name: raw_tbuffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.tbuffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.tbuffer.load.ll index 0ae28336dce8d..b7e2074ca9a63 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.tbuffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.tbuffer.load.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX10_GFX11 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX10_GFX11 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX10_GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX10_GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s define amdgpu_ps float @raw_tbuffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { ; GFX10_GFX11-LABEL: name: raw_tbuffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.tbuffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.tbuffer.load.ll index d644ef93d1850..23858b9c92947 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.tbuffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.ptr.tbuffer.load.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s define amdgpu_ps float @struct_tbuffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { ; CHECK-LABEL: name: struct_tbuffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.tbuffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.tbuffer.load.ll index 7c811f489463a..dec015df58c1b 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.tbuffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.tbuffer.load.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK-GFX12 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -mattr=+wavefrontsize64 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=CHECK-GFX12 %s define amdgpu_ps float @struct_tbuffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { ; CHECK-LABEL: name: struct_tbuffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll index ba5a8e9c68a1f..9e412b6c7cd0a 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll @@ -209,48 +209,48 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v2 ; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s10, v3, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v6, s9 -; GFX8-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s8, v0 ; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s11, v4, v[1:2] -; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s8, v0 -; GFX8-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v1, vcc +; GFX8-NEXT: v_mov_b32_e32 v2, s9 +; GFX8-NEXT: v_mov_b32_e32 v5, s11 +; GFX8-NEXT: v_subb_u32_e64 v7, s[0:1], v2, v1, vcc ; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s9, v1 -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v6 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v7 ; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v6 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v6 +; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v7 ; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1] -; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s10, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v6 ; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v0, vcc ; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v4 ; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v7 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2 ; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v8 -; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v7 +; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v2 ; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1] ; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9 ; GFX8-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v0, vcc ; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1] ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v9, v12, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc ; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v0, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v14, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v4, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc ; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1] ; GFX8-NEXT: v_mov_b32_e32 v5, s5 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v14, vcc ; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; GFX8-NEXT: v_mov_b32_e32 v0, s6 +; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[0:1] ; GFX8-NEXT: v_mov_b32_e32 v1, s7 ; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; GFX8-NEXT: s_endpgm @@ -299,7 +299,6 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v1, vcc ; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v3, 0 -; GFX9-NEXT: v_mov_b32_e32 v7, s19 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v4, v[1:2] ; GFX9-NEXT: v_mul_hi_u32 v6, v3, v0 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s3, v3, v[1:2] @@ -346,30 +345,30 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_add_u32_e32 v3, v4, v3 ; GFX9-NEXT: v_add3_u32 v3, v3, v2, v6 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s18, v3, v[1:2] -; GFX9-NEXT: v_mov_b32_e32 v6, s17 -; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, s16, v0 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s19, v5, v[1:2] -; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v6, s[0:1], v6, v1, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v6 +; GFX9-NEXT: v_mov_b32_e32 v2, s17 +; GFX9-NEXT: v_mov_b32_e32 v4, s19 +; GFX9-NEXT: v_subb_co_u32_e64 v8, s[0:1], v2, v1, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v8 ; GFX9-NEXT: v_sub_u32_e32 v0, s17, v1 ; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v6 -; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[0:1] -; GFX9-NEXT: v_subrev_co_u32_e32 v8, vcc, s18, v2 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v8 +; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s18, v7 ; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[0:1], 0, v0, vcc ; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v5 ; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v9 ; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v8 -; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2 +; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v9 -; GFX9-NEXT: v_subrev_co_u32_e32 v7, vcc, s18, v8 +; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s18, v2 ; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[0:1] ; GFX9-NEXT: v_add_co_u32_e64 v13, s[0:1], 1, v10 ; GFX9-NEXT: v_subbrev_co_u32_e32 v15, vcc, 0, v0, vcc @@ -378,14 +377,15 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1) ; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v13, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v14, vcc ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 +; GFX9-NEXT: v_mov_b32_e32 v6, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v10, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v7, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1] -; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[12:13] -; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[14:15] +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[0:1] +; GFX9-NEXT: global_store_dwordx2 v6, v[0:1], s[12:13] +; GFX9-NEXT: global_store_dwordx2 v6, v[2:3], s[14:15] ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: udivrem_i64: @@ -1070,6 +1070,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1 ; GFX8-NEXT: v_mul_hi_u32 v4, s8, v0 ; GFX8-NEXT: v_mul_hi_u32 v0, s9, v0 +; GFX8-NEXT: v_mov_b32_e32 v5, s13 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4 @@ -1082,184 +1083,183 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3 -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v0, v2 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v0, v2 ; GFX8-NEXT: v_mul_hi_u32 v4, s9, v1 -; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v6, 0 +; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v7, 0 ; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v4, v2 -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v7, v[1:2] +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v2 +; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v8, v[1:2] +; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s13, v7, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v3, s9 -; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s8, v0 -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s13, v6, v[1:2] -; GFX8-NEXT: v_mov_b32_e32 v4, s13 -; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v1, vcc -; GFX8-NEXT: v_sub_u32_e64 v1, s[0:1], s9, v1 +; GFX8-NEXT: v_sub_u32_e32 v1, vcc, s8, v0 +; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v2, vcc +; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s9, v2 ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v1 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v9, v2, v3, s[0:1] -; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s15 -; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s14 -; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v4, vcc -; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f800000, v2 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v8 -; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v5, vcc -; GFX8-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1 -; GFX8-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1 -; GFX8-NEXT: v_trunc_f32_e32 v3, v2 -; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v3 -; GFX8-NEXT: v_add_f32_e32 v1, v2, v1 -; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v1 -; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v6 -; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v7, s[0:1] -; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v12, 0 -; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v3 +; GFX8-NEXT: v_cndmask_b32_e64 v9, v3, v4, s[0:1] +; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s15 +; GFX8-NEXT: v_cvt_f32_u32_e32 v4, s14 +; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v5, vcc +; GFX8-NEXT: v_mul_f32_e32 v2, 0x4f800000, v3 +; GFX8-NEXT: v_add_f32_e32 v2, v2, v4 +; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2 +; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v1 +; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v6, vcc +; GFX8-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2 +; GFX8-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 +; GFX8-NEXT: v_trunc_f32_e32 v4, v3 +; GFX8-NEXT: v_mul_f32_e32 v3, 0xcf800000, v4 +; GFX8-NEXT: v_add_f32_e32 v2, v3, v2 +; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v2 +; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v7 +; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v8, s[0:1] +; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v12, 0 +; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v4 ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v11 ; GFX8-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1] -; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc -; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3] +; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v6, v5, vcc +; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1] -; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s3, v12, v[2:3] +; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s3, v12, v[3:4] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v11 ; GFX8-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1] -; GFX8-NEXT: v_mul_lo_u32 v3, v15, v1 -; GFX8-NEXT: v_mul_lo_u32 v17, v12, v2 -; GFX8-NEXT: v_mul_hi_u32 v5, v12, v1 -; GFX8-NEXT: v_mul_hi_u32 v1, v15, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v17 +; GFX8-NEXT: v_mul_lo_u32 v4, v15, v2 +; GFX8-NEXT: v_mul_lo_u32 v17, v12, v3 +; GFX8-NEXT: v_mul_hi_u32 v6, v12, v2 +; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v17 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5 -; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v5, v15, v2 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v17, v3 -; GFX8-NEXT: v_mul_hi_u32 v17, v12, v2 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v5, v1 -; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v17 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v6, v15, v3 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v17, v4 +; GFX8-NEXT: v_mul_hi_u32 v17, v12, v3 +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v6, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v17 ; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v17 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v17 ; GFX8-NEXT: v_add_u32_e32 v17, vcc, 1, v13 ; GFX8-NEXT: v_addc_u32_e32 v18, vcc, 0, v14, vcc ; GFX8-NEXT: v_subrev_u32_e32 v19, vcc, s12, v10 -; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2 -; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v4, vcc -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3 -; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3 -; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v1 -; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v12, 0 -; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v2, vcc +; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3 +; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4 +; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4 +; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4 +; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v2 +; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0 +; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v3, vcc ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 -; GFX8-NEXT: v_cndmask_b32_e32 v2, v13, v17, vcc -; GFX8-NEXT: v_mov_b32_e32 v1, v4 -; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v15, v[1:2] +; GFX8-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc +; GFX8-NEXT: v_mov_b32_e32 v2, v5 +; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3] ; GFX8-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s3, v12, v[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v19, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, v15, v3 -; GFX8-NEXT: v_mul_lo_u32 v9, v12, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1] -; GFX8-NEXT: v_mul_hi_u32 v8, v12, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v11, v20, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9 -; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v8, v15, v4 -; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7 +; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1] +; GFX8-NEXT: v_mul_lo_u32 v7, v15, v4 +; GFX8-NEXT: v_mul_lo_u32 v8, v12, v5 ; GFX8-NEXT: v_mul_hi_u32 v9, v12, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v8, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v9 -; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v9 -; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9 ; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v9, v15, v5 +; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4 ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7 +; GFX8-NEXT: v_mul_hi_u32 v8, v12, v5 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v9, v4 +; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 +; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v9, v8 +; GFX8-NEXT: v_mul_hi_u32 v5, v15, v5 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v7 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v12, v3 -; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v15, v4, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, s11, v3 -; GFX8-NEXT: v_mul_lo_u32 v8, s10, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, v6, s[0:1] -; GFX8-NEXT: v_mul_hi_u32 v0, s10, v3 -; GFX8-NEXT: v_mul_hi_u32 v3, s11, v3 +; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v7 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v12, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc +; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4 +; GFX8-NEXT: v_mul_lo_u32 v8, s10, v5 +; GFX8-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1] +; GFX8-NEXT: v_mul_hi_u32 v1, s10, v4 +; GFX8-NEXT: v_mul_hi_u32 v4, s11, v4 ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0 -; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4 -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v8, v0 -; GFX8-NEXT: v_mul_hi_u32 v8, s10, v4 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1 +; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc +; GFX8-NEXT: v_mul_lo_u32 v7, s11, v5 +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v8, v1 +; GFX8-NEXT: v_mul_hi_u32 v8, s10, v5 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4 ; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v8 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8 -; GFX8-NEXT: v_add_u32_e32 v9, vcc, v3, v0 -; GFX8-NEXT: v_mul_hi_u32 v8, s11, v4 -; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s14, v9, 0 -; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc -; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v8, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, v4 -; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s14, v10, v[0:1] -; GFX8-NEXT: v_mov_b32_e32 v4, s11 -; GFX8-NEXT: v_mov_b32_e32 v0, s15 -; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s15, v9, v[7:8] -; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v3 -; GFX8-NEXT: v_subb_u32_e64 v11, s[0:1], v4, v7, vcc -; GFX8-NEXT: v_sub_u32_e64 v3, s[0:1], s11, v7 -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v11 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v4, v1 +; GFX8-NEXT: v_mul_hi_u32 v8, s11, v5 +; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s14, v11, 0 +; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc +; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1 +; GFX8-NEXT: v_add_u32_e32 v12, vcc, v8, v1 +; GFX8-NEXT: v_mov_b32_e32 v1, v5 +; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s14, v12, v[1:2] +; GFX8-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v5, s15 +; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s15, v11, v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v1, s11 +; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v4 +; GFX8-NEXT: v_subb_u32_e64 v1, s[0:1], v1, v0, vcc +; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s11, v0 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v1 ; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] ; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v8 -; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v11 -; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v7, s[0:1] -; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s14, v8 -; GFX8-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v3, vcc -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v12 +; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1] +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v1 +; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1] +; GFX8-NEXT: v_subrev_u32_e32 v9, vcc, s14, v8 +; GFX8-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v0, vcc +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v7 +; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v9 ; GFX8-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1] -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v12 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1] -; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v9 -; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v3, v0, vcc -; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v10, s[0:1] -; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v14 +; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v11 +; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc +; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v12, s[0:1] +; GFX8-NEXT: v_add_u32_e32 v5, vcc, 1, v14 ; GFX8-NEXT: v_addc_u32_e32 v16, vcc, 0, v15, vcc ; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v7 +; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v9 ; GFX8-NEXT: v_subbrev_u32_e64 v0, s[0:1], 0, v0, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc ; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX8-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v4, v10, v14, s[0:1] -; GFX8-NEXT: v_mov_b32_e32 v10, s5 -; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v13, vcc -; GFX8-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc -; GFX8-NEXT: v_mov_b32_e32 v9, s4 -; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v8, v11, v0, s[0:1] -; GFX8-NEXT: flat_store_dwordx4 v[9:10], v[1:4] +; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1] +; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[2:5] ; GFX8-NEXT: v_mov_b32_e32 v0, s6 ; GFX8-NEXT: v_mov_b32_e32 v1, s7 -; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[5:8] +; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[6:9] ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: udivrem_v2i64: @@ -1355,11 +1355,11 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_add_u32_e32 v3, v4, v3 ; GFX9-NEXT: v_add3_u32 v8, v3, v2, v5 ; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s4, v8, v[1:2] -; GFX9-NEXT: v_mov_b32_e32 v4, s17 ; GFX9-NEXT: v_mov_b32_e32 v5, s5 ; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s5, v7, v[1:2] +; GFX9-NEXT: v_mov_b32_e32 v3, s17 ; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v4, v2, vcc +; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v3, v2, vcc ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v0 ; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v1 @@ -1387,7 +1387,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v15, v4 ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v11 ; GFX9-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1] -; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc +; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v5, vcc ; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4] ; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1] @@ -1396,128 +1396,128 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1 ; GFX9-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1] ; GFX9-NEXT: v_mul_lo_u32 v4, v15, v2 ; GFX9-NEXT: v_mul_lo_u32 v17, v12, v3 -; GFX9-NEXT: v_mul_hi_u32 v6, v12, v2 +; GFX9-NEXT: v_mul_hi_u32 v5, v12, v2 ; GFX9-NEXT: v_mul_hi_u32 v2, v15, v2 ; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v17 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v6, v15, v3 +; GFX9-NEXT: v_mul_lo_u32 v5, v15, v3 ; GFX9-NEXT: v_add_u32_e32 v4, v17, v4 ; GFX9-NEXT: v_mul_hi_u32 v17, v12, v3 ; GFX9-NEXT: v_mul_hi_u32 v3, v15, v3 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v5, v2 +; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v17 ; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc -; GFX9-NEXT: v_add_u32_e32 v6, v6, v17 +; GFX9-NEXT: v_add_u32_e32 v5, v5, v17 ; GFX9-NEXT: v_add_co_u32_e32 v17, vcc, 1, v13 ; GFX9-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v14, vcc -; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10 -; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v5, vcc ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc ; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v12, v2 -; GFX9-NEXT: v_add3_u32 v3, v6, v4, v3 +; GFX9-NEXT: v_add3_u32 v3, v5, v4, v3 ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0 ; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v3, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc ; GFX9-NEXT: v_mov_b32_e32 v2, v5 -; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3] -; GFX9-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3] +; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10 +; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v6, vcc +; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s3, v12, v[2:3] +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9 -; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1] -; GFX9-NEXT: v_mul_lo_u32 v7, v15, v4 -; GFX9-NEXT: v_mul_lo_u32 v8, v12, v5 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc -; GFX9-NEXT: v_mul_hi_u32 v10, v12, v4 -; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v20, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v10 -; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v10, v15, v5 +; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1] +; GFX9-NEXT: v_mul_lo_u32 v6, v15, v4 +; GFX9-NEXT: v_mul_lo_u32 v7, v12, v5 +; GFX9-NEXT: v_mul_hi_u32 v9, v12, v4 ; GFX9-NEXT: v_mul_hi_u32 v4, v15, v4 -; GFX9-NEXT: v_add_u32_e32 v7, v8, v7 -; GFX9-NEXT: v_mul_hi_u32 v8, v12, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v18, vcc +; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v9 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3] +; GFX9-NEXT: v_mul_lo_u32 v9, v15, v5 +; GFX9-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX9-NEXT: v_mul_hi_u32 v7, v12, v5 ; GFX9-NEXT: v_mul_hi_u32 v5, v15, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 -; GFX9-NEXT: v_add_u32_e32 v8, v10, v8 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v9, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3] +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v6 +; GFX9-NEXT: v_add_u32_e32 v7, v9, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3] +; GFX9-NEXT: v_add3_u32 v5, v7, v6, v5 +; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v12, v4 +; GFX9-NEXT: v_addc_co_u32_e64 v5, s[2:3], v15, v5, s[2:3] +; GFX9-NEXT: v_mul_lo_u32 v6, s19, v4 +; GFX9-NEXT: v_mul_lo_u32 v7, s18, v5 +; GFX9-NEXT: v_mul_hi_u32 v9, s18, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v19, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc +; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc -; GFX9-NEXT: v_add3_u32 v5, v8, v7, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc -; GFX9-NEXT: v_mul_lo_u32 v7, s19, v4 -; GFX9-NEXT: v_mul_lo_u32 v8, s18, v5 -; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1] -; GFX9-NEXT: v_mul_hi_u32 v1, s18, v4 +; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v9 +; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc +; GFX9-NEXT: v_mul_lo_u32 v9, s19, v5 ; GFX9-NEXT: v_mul_hi_u32 v4, s19, v4 -; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v7, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc -; GFX9-NEXT: v_mul_lo_u32 v7, s19, v5 -; GFX9-NEXT: v_add_u32_e32 v1, v8, v1 -; GFX9-NEXT: v_mul_hi_u32 v8, s18, v5 -; GFX9-NEXT: v_mul_hi_u32 v12, s19, v5 -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 -; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc -; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v1 +; GFX9-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX9-NEXT: v_mul_hi_u32 v7, s18, v5 +; GFX9-NEXT: v_mul_hi_u32 v13, s19, v5 +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 +; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc +; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v6 ; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s6, v11, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v9, s[0:1] -; GFX9-NEXT: v_add_u32_e32 v0, v10, v8 -; GFX9-NEXT: v_add3_u32 v8, v0, v1, v12 -; GFX9-NEXT: v_mov_b32_e32 v0, v5 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s6, v8, v[0:1] -; GFX9-NEXT: v_mov_b32_e32 v9, s19 +; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v8, s[0:1] +; GFX9-NEXT: v_add_u32_e32 v1, v9, v7 +; GFX9-NEXT: v_add3_u32 v12, v1, v12, v13 +; GFX9-NEXT: v_mov_b32_e32 v1, v5 +; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s6, v12, v[1:2] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1] ; GFX9-NEXT: v_mov_b32_e32 v5, s7 -; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[0:1] -; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s18, v4 -; GFX9-NEXT: v_subb_co_u32_e64 v9, s[0:1], v9, v0, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v9 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[8:9] +; GFX9-NEXT: v_mov_b32_e32 v1, s19 +; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, s18, v4 +; GFX9-NEXT: v_subb_co_u32_e64 v1, s[0:1], v1, v0, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v1 ; GFX9-NEXT: v_sub_u32_e32 v0, s19, v0 ; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v9 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v8 +; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v1 ; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1] -; GFX9-NEXT: v_subrev_co_u32_e32 v10, vcc, s6, v1 -; GFX9-NEXT: v_subbrev_co_u32_e64 v12, s[0:1], 0, v0, vcc -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v12 +; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1] +; GFX9-NEXT: v_subrev_co_u32_e32 v9, vcc, s6, v8 +; GFX9-NEXT: v_subbrev_co_u32_e64 v10, s[0:1], 0, v0, vcc +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v10 +; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v9 ; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1] -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v12 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v10 ; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1] ; GFX9-NEXT: v_add_co_u32_e64 v14, s[0:1], 1, v11 ; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc -; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v8, s[0:1] +; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v12, s[0:1] ; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, 1, v14 ; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v15, vcc ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 ; GFX9-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc ; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc -; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v10 +; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v9 ; GFX9-NEXT: v_subbrev_co_u32_e64 v0, s[0:1], 0, v0, s[0:1] ; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 ; GFX9-NEXT: v_mov_b32_e32 v13, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v14, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v8, v1, v8, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1] ; GFX9-NEXT: global_store_dwordx4 v13, v[2:5], s[12:13] ; GFX9-NEXT: global_store_dwordx4 v13, v[6:9], s[14:15] ; GFX9-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll index d053425afbb6d..7cc505171da82 100644 --- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll +++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll @@ -1483,7 +1483,6 @@ define void @flat_atomic_xchg_i64_noret_av(ptr %ptr) #0 { ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1_vol ; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1 -; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_cbranch_execz .LBB20_2 ; GFX90A-NEXT: .LBB20_4: ; %atomicrmw.private diff --git a/llvm/test/CodeGen/AMDGPU/add_u64.ll b/llvm/test/CodeGen/AMDGPU/add_u64.ll index 0373027201378..22acedc4d6e25 100644 --- a/llvm/test/CodeGen/AMDGPU/add_u64.ll +++ b/llvm/test/CodeGen/AMDGPU/add_u64.ll @@ -109,7 +109,7 @@ define amdgpu_ps <2 x float> @test_add_u64_v_64bit_imm(i64 %a) { ; ; GFX1250-LABEL: test_add_u64_v_64bit_imm: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0x13b9ac9ff), v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 0x13b9ac9ff, v[0:1] ; GFX1250-NEXT: ; return to shader part epilog %add = add i64 %a, 5294967295 %ret = bitcast i64 %add to <2 x float> diff --git a/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll b/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll index e65f401bcf68a..7fcb29d367006 100644 --- a/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll +++ b/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll @@ -1,11 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s -; GCN-LABEL: {{^}}alignbit_shr_pat: -; GCN-DAG: s_load_dword s[[SHR:[0-9]+]] -; GCN-DAG: load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]] - define amdgpu_kernel void @alignbit_shr_pat(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) { +; GCN-LABEL: alignbit_shr_pat: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dword s8, s[4:5], 0xd +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_and_b32 s0, s8, 31 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s0 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp3 = and i32 %arg2, 31 @@ -16,12 +29,24 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_v: -; GCN-DAG: load_dword v[[SHR:[0-9]+]], -; GCN-DAG: load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]] - define amdgpu_kernel void @alignbit_shr_pat_v(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) { +; GCN-LABEL: alignbit_shr_pat_v: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[4:5], s[0:1] +; GCN-NEXT: buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: buffer_load_dword v0, v[1:2], s[4:7], 0 addr64 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_alignbit_b32 v0, v4, v3, v0 +; GCN-NEXT: buffer_store_dword v0, v[1:2], s[4:7], 0 addr64 +; GCN-NEXT: s_endpgm bb: %tid = tail call i32 @llvm.amdgcn.workitem.id.x() %gep1 = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %tid @@ -36,12 +61,24 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and30: -; Negative test, wrong constant -; GCN: v_lshr_b64 -; GCN-NOT: v_alignbit_b32 - define amdgpu_kernel void @alignbit_shr_pat_wrong_and30(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) { +; GCN-LABEL: alignbit_shr_pat_wrong_and30: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dword s8, s[4:5], 0xd +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_and_b32 s0, s8, 30 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s0 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp3 = and i32 %arg2, 30 @@ -52,12 +89,23 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and63: -; Negative test, wrong constant -; GCN: v_lshr_b64 -; GCN-NOT: v_alignbit_b32 - define amdgpu_kernel void @alignbit_shr_pat_wrong_and63(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) { +; GCN-LABEL: alignbit_shr_pat_wrong_and63: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dword s8, s[4:5], 0xd +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s8 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp3 = and i32 %arg2, 63 @@ -68,11 +116,22 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_const30: -; GCN: load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], 30 - define amdgpu_kernel void @alignbit_shr_pat_const30(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) { +; GCN-LABEL: alignbit_shr_pat_const30: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], 30 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp5 = lshr i64 %tmp, 30 @@ -81,12 +140,22 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_const33: -; Negative test, shift amount more than 31 -; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}} -; GCN-NOT: v_alignbit_b32 - define amdgpu_kernel void @alignbit_shr_pat_wrong_const33(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) { +; GCN-LABEL: alignbit_shr_pat_wrong_const33: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_mov_b32 s2, s6 +; GCN-NEXT: s_mov_b32 s3, s7 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshrrev_b32_e32 v0, 1, v0 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp5 = lshr i64 %tmp, 33 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll index be08c4e33f072..df9c97fa23722 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll @@ -7526,831 +7526,1167 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v32i32_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v41, s30, 0 -; SI-NEXT: v_writelane_b32 v41, s31, 1 -; SI-NEXT: v_writelane_b32 v41, s34, 2 -; SI-NEXT: v_writelane_b32 v41, s35, 3 -; SI-NEXT: v_writelane_b32 v41, s36, 4 -; SI-NEXT: v_writelane_b32 v41, s37, 5 -; SI-NEXT: v_writelane_b32 v41, s38, 6 -; SI-NEXT: v_writelane_b32 v41, s39, 7 -; SI-NEXT: v_writelane_b32 v41, s48, 8 -; SI-NEXT: v_writelane_b32 v41, s49, 9 -; SI-NEXT: v_writelane_b32 v41, s50, 10 -; SI-NEXT: v_writelane_b32 v41, s51, 11 -; SI-NEXT: v_writelane_b32 v41, s52, 12 -; SI-NEXT: v_writelane_b32 v41, s53, 13 -; SI-NEXT: v_writelane_b32 v41, s54, 14 -; SI-NEXT: v_writelane_b32 v41, s55, 15 -; SI-NEXT: v_writelane_b32 v41, s64, 16 -; SI-NEXT: v_writelane_b32 v41, s65, 17 -; SI-NEXT: v_writelane_b32 v41, s66, 18 -; SI-NEXT: v_writelane_b32 v41, s67, 19 -; SI-NEXT: v_writelane_b32 v41, s68, 20 -; SI-NEXT: v_writelane_b32 v41, s69, 21 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_writelane_b32 v20, s69, 21 +; SI-NEXT: v_writelane_b32 v20, s70, 22 +; SI-NEXT: v_writelane_b32 v20, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s80, 24 +; SI-NEXT: v_writelane_b32 v20, s81, 25 +; SI-NEXT: v_writelane_b32 v20, s82, 26 +; SI-NEXT: v_writelane_b32 v20, s83, 27 +; SI-NEXT: v_writelane_b32 v20, s84, 28 +; SI-NEXT: v_writelane_b32 v20, s85, 29 +; SI-NEXT: v_writelane_b32 v20, s86, 30 +; SI-NEXT: v_writelane_b32 v20, s87, 31 +; SI-NEXT: v_writelane_b32 v20, s96, 32 +; SI-NEXT: v_writelane_b32 v20, s97, 33 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_writelane_b32 v41, s70, 22 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_writelane_b32 v41, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s98, 34 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s99, 35 +; SI-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: v_writelane_b32 v22, s47, 41 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s50, s9, 24 +; SI-NEXT: s_lshr_b32 s51, s21, 8 +; SI-NEXT: s_lshr_b32 s48, s19, 24 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s19, 8 +; SI-NEXT: s_lshr_b32 s54, s17, 24 +; SI-NEXT: s_lshr_b32 s55, s17, 16 +; SI-NEXT: s_lshr_b32 s49, s17, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: v_writelane_b32 v22, s47, 41 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: s_add_i32 s7, s7, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: s_add_i32 s41, s41, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: s_add_i32 s43, s43, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 16 ; SI-NEXT: s_add_i32 s45, s45, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: s_add_i32 s29, s29, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: s_add_i32 s6, s6, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s47, s47, 3 ; SI-NEXT: s_add_i32 s44, s44, 3 -; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s46, s46, 3 ; SI-NEXT: s_add_i32 s42, s42, 3 -; SI-NEXT: s_add_i32 s40, s40, 3 -; SI-NEXT: s_add_i32 s14, s14, 3 -; SI-NEXT: s_add_i32 s12, s12, 3 -; SI-NEXT: s_add_i32 s10, s10, 3 -; SI-NEXT: s_add_i32 s8, s8, 3 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s50, s9, 24 +; SI-NEXT: s_lshr_b32 s51, s21, 8 +; SI-NEXT: s_lshr_b32 s48, s19, 24 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s19, 8 +; SI-NEXT: s_lshr_b32 s54, s17, 24 +; SI-NEXT: s_lshr_b32 s55, s17, 16 +; SI-NEXT: s_lshr_b32 s49, s17, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 -; SI-NEXT: v_or_b32_e32 v40, s4, v40 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s71, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s70, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s69, 24 -; SI-NEXT: v_and_b32_e32 v55, 0xff, v55 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v54, 24, v54 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v54, v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v53 -; SI-NEXT: v_or_b32_e32 v53, s4, v53 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s68, 8 -; SI-NEXT: v_and_b32_e32 v52, 0xff, v52 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s67, 0xff -; SI-NEXT: v_and_b32_e32 v40, 0xffff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v51 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s66, 24 -; SI-NEXT: v_or_b32_e32 v54, v40, v54 -; SI-NEXT: v_and_b32_e32 v53, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v51, v51, v52 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v54, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v54, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v51, v53, v51 -; SI-NEXT: v_add_i32_e32 v52, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v55, v54, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v51, v52, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v52, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v50, 8, v50 -; SI-NEXT: v_or_b32_e32 v50, s4, v50 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s65, 8 -; SI-NEXT: v_and_b32_e32 v49, 0xff, v49 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s64, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v48 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s55, 24 -; SI-NEXT: v_and_b32_e32 v50, 0xffff, v50 -; SI-NEXT: v_or_b32_e32 v48, v48, v49 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s47, s38, 8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: s_and_b32 s47, s36, 0xff +; SI-NEXT: s_lshl_b32 s57, s34, 24 +; SI-NEXT: s_lshl_b32 s47, s47, 16 +; SI-NEXT: s_or_b32 s47, s57, s47 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s49, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s55, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s47, s54, 24 +; SI-NEXT: s_or_b32 s17, s47, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 8 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s94, 0xff +; SI-NEXT: s_lshl_b32 s18, s92, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xff +; SI-NEXT: s_lshl_b32 s17, s53, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s52, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s48, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 8 +; SI-NEXT: s_and_b32 s17, s20, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s88, 0xff +; SI-NEXT: s_lshl_b32 s18, s78, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xff +; SI-NEXT: s_lshl_b32 s17, s51, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 17 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 16 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 8 +; SI-NEXT: s_and_b32 s17, s22, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s74, 0xff +; SI-NEXT: s_lshl_b32 s18, s72, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 15 +; SI-NEXT: v_mov_b32_e32 v7, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 14 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 13 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v8, s16 +; SI-NEXT: s_lshl_b32 s16, s62, 8 +; SI-NEXT: s_and_b32 s17, s24, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s60, 0xff +; SI-NEXT: s_lshl_b32 s18, s58, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 12 +; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 11 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 10 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_and_b32 s17, s26, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s46, 0xff +; SI-NEXT: s_lshl_b32 s18, s98, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 9 +; SI-NEXT: v_mov_b32_e32 v11, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 8 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 7 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v12, s16 +; SI-NEXT: s_lshl_b32 s16, s96, 8 +; SI-NEXT: s_and_b32 s17, s28, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s86, 0xff +; SI-NEXT: s_lshl_b32 s18, s84, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 6 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 5 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v14, s16 +; SI-NEXT: s_lshl_b32 s16, s82, 8 +; SI-NEXT: s_and_b32 s17, s44, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s80, 0xff +; SI-NEXT: s_lshl_b32 s18, s70, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 3 +; SI-NEXT: v_mov_b32_e32 v15, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 2 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v51, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v48, v50, v48 -; SI-NEXT: v_add_i32_e32 v49, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v52, v51, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v48, v49, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v49, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v39 -; SI-NEXT: v_or_b32_e32 v39, s4, v39 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s54, 8 -; SI-NEXT: v_and_b32_e32 v38, 0xff, v38 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s53, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38 -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v37 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s52, 24 -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v37, v37, v38 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: s_lshl_b32 s16, s68, 8 +; SI-NEXT: s_and_b32 s17, s42, 0xff +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s66, 0xff +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_lshl_b32 s18, s64, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: v_readlane_b32 s17, v21, 0 +; SI-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 63 +; SI-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 62 +; SI-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v22, 0 +; SI-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: v_readlane_b32 s19, v22, 1 +; SI-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v22, 2 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v48, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v37, v39, v37 -; SI-NEXT: v_add_i32_e32 v38, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v49, v48, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v37, v38, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v38, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v35 -; SI-NEXT: v_or_b32_e32 v35, s4, v35 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s51, 8 -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s50, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s49, 24 -; SI-NEXT: v_and_b32_e32 v35, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v22, 3 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 61 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v37, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v33, v35, v33 -; SI-NEXT: v_add_i32_e32 v34, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v38, v37, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v33, v34, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v34, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; SI-NEXT: v_or_b32_e32 v29, s4, v29 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s48, 8 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s39, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s38, 24 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 60 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 59 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v33, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v27, v29, v27 -; SI-NEXT: v_add_i32_e32 v28, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v34, v33, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v28, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; SI-NEXT: v_or_b32_e32 v23, s4, v23 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s37, 8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v22, 7 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v22, 5 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 8 +; SI-NEXT: v_readlane_b32 s17, v22, 9 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 10 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v27, vcc, 44, v0 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s36, 0xff -; SI-NEXT: buffer_store_dword v28, v27, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v36 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s35, 24 -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_or_b32_e32 v22, v27, v22 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_add_i32_e32 v23, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 58 +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_readlane_b32 s15, v22, 57 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 56 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s47, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v32 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s46, 0xff -; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 12 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 13 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 14 +; SI-NEXT: v_readlane_b32 s15, v22, 15 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v31 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s31, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v30 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s30, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v27, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 55 +; SI-NEXT: s_lshl_b32 s13, s13, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_readlane_b32 s13, v22, 54 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 53 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v26 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s44, 0xff -; SI-NEXT: s_lshl_b32 s5, s95, 8 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 18 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 19 +; SI-NEXT: s_lshl_b32 s12, s12, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 20 +; SI-NEXT: v_readlane_b32 s13, v22, 21 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 22 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s14, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v25 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s94, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s93, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s42, 0xff -; SI-NEXT: s_lshl_b32 s5, s92, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s91, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s90, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 52 +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_readlane_b32 s11, v22, 51 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 50 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s12, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s89, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s88, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s79, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 25 +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 26 +; SI-NEXT: v_readlane_b32 s11, v22, 27 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 28 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s78, 8 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s77, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s76, 24 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 49 +; SI-NEXT: s_lshl_b32 s9, s9, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_readlane_b32 s9, v22, 48 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s50, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s75, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s74, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s73, 24 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 30 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 31 +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 32 +; SI-NEXT: v_readlane_b32 s9, v22, 33 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s10, v22, 34 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s72, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s63, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s62, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: v_readlane_b32 s7, v22, 47 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_readlane_b32 s7, v22, 46 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 45 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s61, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s59, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_readlane_b32 s6, v22, 36 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: v_readlane_b32 s7, v22, 37 +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_readlane_b32 s6, v22, 38 +; SI-NEXT: v_readlane_b32 s7, v22, 39 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 40 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s58, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: v_readlane_b32 s5, v22, 44 +; SI-NEXT: s_lshl_b32 s5, s5, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: v_readlane_b32 s5, v22, 43 +; SI-NEXT: s_and_b32 s5, s5, 0xff +; SI-NEXT: v_readlane_b32 s6, v22, 42 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s56, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s19, v22, 11 +; SI-NEXT: v_readlane_b32 s17, v22, 17 +; SI-NEXT: v_readlane_b32 s15, v22, 23 +; SI-NEXT: v_readlane_b32 s13, v22, 29 +; SI-NEXT: v_readlane_b32 s11, v22, 35 +; SI-NEXT: v_readlane_b32 s9, v22, 41 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s71, v41, 23 -; SI-NEXT: v_readlane_b32 s70, v41, 22 -; SI-NEXT: v_readlane_b32 s69, v41, 21 -; SI-NEXT: v_readlane_b32 s68, v41, 20 -; SI-NEXT: v_readlane_b32 s67, v41, 19 -; SI-NEXT: v_readlane_b32 s66, v41, 18 -; SI-NEXT: v_readlane_b32 s65, v41, 17 -; SI-NEXT: v_readlane_b32 s64, v41, 16 -; SI-NEXT: v_readlane_b32 s55, v41, 15 -; SI-NEXT: v_readlane_b32 s54, v41, 14 -; SI-NEXT: v_readlane_b32 s53, v41, 13 -; SI-NEXT: v_readlane_b32 s52, v41, 12 -; SI-NEXT: v_readlane_b32 s51, v41, 11 -; SI-NEXT: v_readlane_b32 s50, v41, 10 -; SI-NEXT: v_readlane_b32 s49, v41, 9 -; SI-NEXT: v_readlane_b32 s48, v41, 8 -; SI-NEXT: v_readlane_b32 s39, v41, 7 -; SI-NEXT: v_readlane_b32 s38, v41, 6 -; SI-NEXT: v_readlane_b32 s37, v41, 5 -; SI-NEXT: v_readlane_b32 s36, v41, 4 -; SI-NEXT: v_readlane_b32 s35, v41, 3 -; SI-NEXT: v_readlane_b32 s34, v41, 2 -; SI-NEXT: v_readlane_b32 s31, v41, 1 -; SI-NEXT: v_readlane_b32 s30, v41, 0 -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v20, 35 +; SI-NEXT: v_readlane_b32 s98, v20, 34 +; SI-NEXT: v_readlane_b32 s97, v20, 33 +; SI-NEXT: v_readlane_b32 s96, v20, 32 +; SI-NEXT: v_readlane_b32 s87, v20, 31 +; SI-NEXT: v_readlane_b32 s86, v20, 30 +; SI-NEXT: v_readlane_b32 s85, v20, 29 +; SI-NEXT: v_readlane_b32 s84, v20, 28 +; SI-NEXT: v_readlane_b32 s83, v20, 27 +; SI-NEXT: v_readlane_b32 s82, v20, 26 +; SI-NEXT: v_readlane_b32 s81, v20, 25 +; SI-NEXT: v_readlane_b32 s80, v20, 24 +; SI-NEXT: v_readlane_b32 s71, v20, 23 +; SI-NEXT: v_readlane_b32 s70, v20, 22 +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $sgpr71 -; SI-NEXT: ; implicit-def: $sgpr70 -; SI-NEXT: ; implicit-def: $sgpr69 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr68 -; SI-NEXT: ; implicit-def: $sgpr67 -; SI-NEXT: ; implicit-def: $sgpr66 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $sgpr65 -; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s50, 0 +; SI-NEXT: v_writelane_b32 v22, s51, 1 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 2 +; SI-NEXT: v_writelane_b32 v22, s51, 3 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 4 +; SI-NEXT: v_writelane_b32 v22, s51, 5 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 6 +; SI-NEXT: v_writelane_b32 v22, s51, 7 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 8 +; SI-NEXT: v_writelane_b32 v22, s51, 9 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 10 +; SI-NEXT: v_writelane_b32 v22, s51, 11 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 12 +; SI-NEXT: v_writelane_b32 v22, s51, 13 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 14 +; SI-NEXT: v_writelane_b32 v22, s51, 15 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 16 +; SI-NEXT: v_writelane_b32 v22, s51, 17 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 18 +; SI-NEXT: v_writelane_b32 v22, s51, 19 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 20 +; SI-NEXT: v_writelane_b32 v22, s51, 21 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 22 +; SI-NEXT: v_writelane_b32 v22, s51, 23 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 24 +; SI-NEXT: v_writelane_b32 v22, s51, 25 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 26 +; SI-NEXT: v_writelane_b32 v22, s51, 27 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 28 +; SI-NEXT: v_writelane_b32 v22, s51, 29 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 30 +; SI-NEXT: v_writelane_b32 v22, s51, 31 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 32 +; SI-NEXT: v_writelane_b32 v22, s51, 33 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 34 +; SI-NEXT: v_writelane_b32 v22, s51, 35 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 36 +; SI-NEXT: v_writelane_b32 v22, s51, 37 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 38 +; SI-NEXT: v_writelane_b32 v22, s51, 39 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr55 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr52 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $sgpr51 -; SI-NEXT: ; implicit-def: $sgpr50 -; SI-NEXT: ; implicit-def: $sgpr49 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; kill: killed $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr38 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr34 -; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr30 -; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr94 -; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr92 -; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr90 -; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr88 -; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 40 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: v_writelane_b32 v22, s51, 41 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v32i32_to_v128i8_scalar: @@ -17296,8 +17632,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 @@ -17305,133 +17648,93 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:80 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:88 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:112 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:144 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:152 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:168 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:176 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v25 +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v29 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29 +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v45 +; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v43 +; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v42 +; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v41 +; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v53 +; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v34 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:200 @@ -17440,31 +17743,31 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224 ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 @@ -17476,140 +17779,206 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 ; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 ; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:148 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:156 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:164 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:172 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:196 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:220 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:228 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:252 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:260 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:268 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:276 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:284 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:308 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB15_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v57, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 -; SI-NEXT: v_or_b32_e32 v0, v0, v60 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v0, v0, v20 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v4, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 +; SI-NEXT: v_or_b32_e32 v0, v0, v30 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v6, v0, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mov_b32_e32 v30, v5 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_or_b32_e32 v2, v2, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v0, v0, v61 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v26, v3 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v5, v2, v3 +; SI-NEXT: v_mov_b32_e32 v3, v7 +; SI-NEXT: v_mov_b32_e32 v2, v9 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -17618,306 +17987,277 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_mov_b32_e32 v3, v7 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v58, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v57 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_mov_b32_e32 v2, v9 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v10, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v29, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v11, v1 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v23 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 +; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v12, v1 +; SI-NEXT: v_or_b32_e32 v1, v44, v1 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v25 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v0, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v13, v1 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v36 +; SI-NEXT: v_or_b32_e32 v0, v0, v40 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v14, v1 +; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v27 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v62 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 +; SI-NEXT: v_or_b32_e32 v0, v0, v32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_or_b32_e32 v1, v15, v1 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 -; SI-NEXT: v_mov_b32_e32 v43, v16 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_mov_b32_e32 v50, v16 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v16, v1 ; SI-NEXT: v_or_b32_e32 v16, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_mov_b32_e32 v48, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mov_b32_e32 v32, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v34 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v17, v1 ; SI-NEXT: v_or_b32_e32 v17, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 -; SI-NEXT: v_mov_b32_e32 v55, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v51, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v40, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v44 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v18, v1 ; SI-NEXT: v_or_b32_e32 v18, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 -; SI-NEXT: v_mov_b32_e32 v44, v23 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v50, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v39 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v63 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v19, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: v_mov_b32_e32 v61, v45 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v54 +; SI-NEXT: v_mov_b32_e32 v54, v23 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v20, v1 ; SI-NEXT: v_or_b32_e32 v20, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v52 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v21, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 -; SI-NEXT: v_mov_b32_e32 v59, v24 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v22, v1 ; SI-NEXT: v_or_b32_e32 v22, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v39, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_mov_b32_e32 v45, v24 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v34, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v23, v1 ; SI-NEXT: v_or_b32_e32 v23, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v24, v1 ; SI-NEXT: v_or_b32_e32 v24, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v42 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v1 +; SI-NEXT: v_mov_b32_e32 v43, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v25, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v56 +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v63, v1 +; SI-NEXT: v_or_b32_e32 v1, v33, v1 ; SI-NEXT: v_or_b32_e32 v26, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v46 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v33 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v32, v1 +; SI-NEXT: v_mov_b32_e32 v37, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v27, v1 ; SI-NEXT: v_or_b32_e32 v27, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v28, v1 ; SI-NEXT: v_or_b32_e32 v28, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v1 +; SI-NEXT: v_mov_b32_e32 v36, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v29, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 -; SI-NEXT: v_or_b32_e32 v0, v0, v30 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v49 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v30, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 ; SI-NEXT: v_or_b32_e32 v0, v0, v3 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v31, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v40 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v44 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: v_mov_b32_e32 v38, v63 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 @@ -17944,108 +18284,112 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_mov_b64 s[4:5], 0 ; SI-NEXT: s_branch .LBB15_3 ; SI-NEXT: .LBB15_2: -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v45 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 s[4:5], -1 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mov_b32_e32 v45, v33 -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; SI-NEXT: .LBB15_3: ; %Flow -; SI-NEXT: v_mov_b32_e32 v63, v46 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mov_b32_e32 v35, v57 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; SI-NEXT: s_cbranch_vccnz .LBB15_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v0, s4, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_and_b32 s4, s16, 0xff ; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s6, s18, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, s4, v0 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s20, 0xff ; SI-NEXT: s_lshl_b32 s6, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_and_b32 s7, s22, 0xff ; SI-NEXT: s_addk_i32 s5, 0x300 ; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s6, s24, 0xff ; SI-NEXT: s_lshl_b32 s7, s25, 8 -; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: s_and_b32 s8, s26, 0xff ; SI-NEXT: s_addk_i32 s6, 0x300 ; SI-NEXT: s_lshl_b32 s7, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18054,17 +18398,17 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18074,15 +18418,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18092,15 +18436,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18110,15 +18454,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18128,15 +18472,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18146,15 +18490,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18164,15 +18508,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18181,16 +18525,17 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18200,15 +18545,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18218,84 +18563,79 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v43, v1 +; SI-NEXT: v_or_b32_e32 v1, v50, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -18304,15 +18644,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -18321,15 +18661,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18339,9 +18679,9 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 @@ -18357,106 +18697,110 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v55, v1 +; SI-NEXT: v_or_b32_e32 v1, v48, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v44, v1 +; SI-NEXT: v_or_b32_e32 v1, v54, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v59, v1 +; SI-NEXT: v_or_b32_e32 v1, v45, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v46, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v41 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -18464,14 +18808,14 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -18479,14 +18823,14 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -18516,7 +18860,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v128i8_to_v32i32_scalar: @@ -18538,113 +18882,115 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:80 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:88 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:96 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:104 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:136 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:144 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:152 +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:160 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:168 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 @@ -18653,29 +18999,28 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v7 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 @@ -18685,130 +19030,141 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13 ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(12) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v37, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76 +; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:84 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:92 +; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:108 +; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:116 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 ; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:284 ; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB15_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -18817,208 +19173,197 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v3, v7 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v29, v9 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v59, v0 -; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v1 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v0 +; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v37, v0 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v62, v0 +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v63, v1 +; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_mov_b32_e32 v60, v0 +; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v51, v3 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_mov_b32_e32 v35, v0 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v34, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v59, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v54, v0 -; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v32, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v32, v61 +; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v55, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v55, v43 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v46, v61 +; VI-NEXT: v_or_b32_sdwa v0, v42, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v53, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v41, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v44, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v41, v33 ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v47, v45 +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_or_b32_sdwa v0, v56, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v48, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v36, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v1, v33, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v50, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v51, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v48, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v49, v51 +; VI-NEXT: v_mov_b32_e32 v40, v34 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v57, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -19049,85 +19394,95 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_mov_b64 s[4:5], 0 ; VI-NEXT: s_branch .LBB15_3 ; VI-NEXT: .LBB15_2: -; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_mov_b32_e32 v46, v61 -; VI-NEXT: v_mov_b32_e32 v47, v45 -; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: v_mov_b32_e32 v51, v7 -; VI-NEXT: v_mov_b32_e32 v48, v29 +; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_mov_b32_e32 v41, v33 +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: v_mov_b32_e32 v49, v51 ; VI-NEXT: s_mov_b64 s[4:5], -1 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; VI-NEXT: .LBB15_3: ; %Flow +; VI-NEXT: v_mov_b32_e32 v51, v41 +; VI-NEXT: v_mov_b32_e32 v36, v44 +; VI-NEXT: v_mov_b32_e32 v53, v54 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_mov_b32_e32 v54, v60 +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; VI-NEXT: v_mov_b32_e32 v44, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_mov_b32_e32 v46, v49 ; VI-NEXT: s_cbranch_vccnz .LBB15_5 ; VI-NEXT: ; %bb.4: ; %cmp.true -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_e32 v0, s4, v0 ; VI-NEXT: s_add_i32 s16, s16, 3 -; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_add_i32 s18, s18, 3 +; VI-NEXT: s_lshl_b32 s6, s19, 8 +; VI-NEXT: s_add_i32 s20, s20, 3 +; VI-NEXT: s_add_i32 s22, s22, 3 +; VI-NEXT: s_lshl_b32 s7, s23, 8 +; VI-NEXT: s_add_i32 s24, s24, 3 +; VI-NEXT: s_add_i32 s26, s26, 3 +; VI-NEXT: s_lshl_b32 s8, s27, 8 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_e32 v0, s4, v0 +; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s18, 0xff -; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 -; VI-NEXT: s_add_i32 s20, s20, 3 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s20, 0xff ; VI-NEXT: s_lshl_b32 s6, s21, 8 -; VI-NEXT: s_add_i32 s22, s22, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s22, 0xff -; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 -; VI-NEXT: s_add_i32 s24, s24, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s24, 0xff ; VI-NEXT: s_lshl_b32 s7, s25, 8 -; VI-NEXT: s_add_i32 s26, s26, 3 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s7, s26, 0xff -; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_or_b32 s7, s8, s7 ; VI-NEXT: s_and_b32 s6, s6, 0xffff @@ -19136,26 +19491,25 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -19163,8 +19517,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -19176,9 +19530,9 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -19190,14 +19544,14 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -19205,280 +19559,280 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v49 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 @@ -19523,504 +19877,524 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:136 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:144 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:152 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:160 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:168 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:176 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX9-NEXT: s_waitcnt vmcnt(35) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v24 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v51 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v41, 8, v41 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4 -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 +; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v7 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v11 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v5 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v7 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164 -; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_waitcnt vmcnt(42) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 +; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:124 +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:132 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:140 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:220 +; GFX9-NEXT: s_waitcnt vmcnt(29) +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:228 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:236 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:244 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:252 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:268 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:284 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:292 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:308 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB15_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v2, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v28 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v8, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff +; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v38 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v61, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v63, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v37, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_mov_b32_e32 v47, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v38, v51 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v60 -; GFX9-NEXT: v_mov_b32_e32 v52, v56 -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_mov_b32_e32 v34, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v51, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v53, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v45, v62 +; GFX9-NEXT: v_mov_b32_e32 v46, v56 +; GFX9-NEXT: v_mov_b32_e32 v56, v58 +; GFX9-NEXT: v_mov_b32_e32 v58, v53 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -20051,32 +20425,48 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB15_3 ; GFX9-NEXT: .LBB15_2: -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v0 -; GFX9-NEXT: v_mov_b32_e32 v63, v57 -; GFX9-NEXT: v_mov_b32_e32 v53, v3 +; GFX9-NEXT: v_mov_b32_e32 v38, v51 +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 -; GFX9-NEXT: v_mov_b32_e32 v57, v38 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB15_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v62, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB15_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -20123,348 +20513,352 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_movk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_and_b32 s8, s8, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s8, v0 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v38 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v63 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v53 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v46 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v63 +; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v40 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v32 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v43 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v44 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v42 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v48 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v33 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 @@ -27132,24 +27526,23 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:40 +; SI-NEXT: v_mov_b32_e32 v52, v30 +; SI-NEXT: v_mov_b32_e32 v53, v28 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt expcnt(3) ; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(0) @@ -27159,165 +27552,177 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68 -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v14 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v0 ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6 -; SI-NEXT: v_mov_b32_e32 v39, v10 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8 -; SI-NEXT: v_mov_b32_e32 v38, v12 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v15 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v18 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v55 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30 -; SI-NEXT: v_mov_b32_e32 v37, v14 -; SI-NEXT: v_mov_b32_e32 v14, v11 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v11 ; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v40 +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v15 ; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: v_mul_f32_e32 v16, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v18 ; SI-NEXT: v_mul_f32_e32 v17, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v20 ; SI-NEXT: v_mul_f32_e32 v18, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v41, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v22 ; SI-NEXT: v_mul_f32_e32 v19, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v24 ; SI-NEXT: v_mul_f32_e32 v20, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v26 ; SI-NEXT: v_mul_f32_e32 v21, 1.0, v29 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v28 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v52 ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s26 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s29 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v22, 1.0, v51 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v46 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v47 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v56 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v57 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v58 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v59 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v60 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v61 +; SI-NEXT: v_mul_f32_e32 v23, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v38 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v26, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v31 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v60 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_mul_f32_e32 v28, 1.0, v42 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_mul_f32_e32 v37, 1.0, v62 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63 +; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22 -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v36 +; SI-NEXT: v_mul_f32_e64 v12, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s24 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB19_4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB19_2 ; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v3, 16 +; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_alignbit_b32 v2, v2, v8, 16 +; SI-NEXT: v_alignbit_b32 v3, v3, v9, 16 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36 -; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16 -; SI-NEXT: v_mov_b32_e32 v33, v14 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v58 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v56 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v44 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_alignbit_b32 v5, v5, v11, 16 +; SI-NEXT: v_alignbit_b32 v7, v7, v14, 16 +; SI-NEXT: v_alignbit_b32 v8, v8, v54, 16 +; SI-NEXT: v_alignbit_b32 v9, v9, v46, 16 +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_alignbit_b32 v13, v13, v47, 16 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v45 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v57 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 @@ -27325,16 +27730,6 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: v_alignbit_b32 v15, v15, v53, 16 -; SI-NEXT: v_alignbit_b32 v17, v17, v39, 16 -; SI-NEXT: v_alignbit_b32 v18, v18, v41, 16 -; SI-NEXT: v_alignbit_b32 v19, v19, v40, 16 -; SI-NEXT: v_alignbit_b32 v20, v20, v55, 16 -; SI-NEXT: v_alignbit_b32 v21, v21, v54, 16 -; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 @@ -27342,212 +27737,238 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 ; SI-NEXT: v_alignbit_b32 v30, v30, v31, 16 -; SI-NEXT: v_alignbit_b32 v23, v23, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_alignbit_b32 v4, v4, v34, 16 +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_alignbit_b32 v16, v16, v43, 16 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_alignbit_b32 v17, v17, v41, 16 +; SI-NEXT: v_alignbit_b32 v18, v18, v40, 16 +; SI-NEXT: v_mov_b32_e32 v40, v55 +; SI-NEXT: v_alignbit_b32 v19, v19, v55, 16 +; SI-NEXT: v_alignbit_b32 v20, v20, v48, 16 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_alignbit_b32 v21, v21, v53, 16 +; SI-NEXT: v_alignbit_b32 v22, v22, v52, 16 ; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_alignbit_b32 v24, v24, v51, 16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_alignbit_b32 v25, v25, v50, 16 +; SI-NEXT: v_alignbit_b32 v23, v23, v51, 16 +; SI-NEXT: v_alignbit_b32 v24, v24, v50, 16 ; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_alignbit_b32 v26, v26, v49, 16 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_alignbit_b32 v27, v27, v48, 16 -; SI-NEXT: v_mov_b32_e32 v48, v37 +; SI-NEXT: v_alignbit_b32 v25, v25, v49, 16 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: v_alignbit_b32 v26, v26, v39, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_alignbit_b32 v27, v27, v38, 16 +; SI-NEXT: v_mov_b32_e32 v35, v37 ; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 +; SI-NEXT: v_alignbit_b32 v31, v31, v42, 16 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_alignbit_b32 v10, v10, v61, 16 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_mov_b32_e32 v35, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_alignbit_b32 v12, v12, v54, 16 +; SI-NEXT: v_mov_b32_e32 v41, v61 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v43, v8 -; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v9 +; SI-NEXT: v_alignbit_b32 v11, v11, v59, 16 +; SI-NEXT: v_mov_b32_e32 v55, v59 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32 -; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v11 -; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v56, v11 -; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v12 -; SI-NEXT: v_alignbit_b32 v11, v11, v12, 16 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v63, v14 -; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v14 -; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v14, v14, v45, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v36, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_alignbit_b32 v14, v14, v38, 16 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v53, v38 -; SI-NEXT: v_alignbit_b32 v16, v16, v38, 16 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 +; SI-NEXT: v_alignbit_b32 v15, v15, v47, 16 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: s_branch .LBB19_3 +; SI-NEXT: .LBB19_2: +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_mov_b32_e32 v43, v41 ; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16 -; SI-NEXT: s_cbranch_execnz .LBB19_3 -; SI-NEXT: .LBB19_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_mov_b32_e32 v52, v51 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_mov_b32_e32 v50, v49 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB19_3: ; %Flow +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v39, v52 +; SI-NEXT: v_mov_b32_e32 v49, v40 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v44 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: s_cbranch_vccnz .LBB19_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v44 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v63 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v47 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v62 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v60 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v58 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v46 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 ; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 ; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 ; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 +; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 -; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 -; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(5) ; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -27561,105 +27982,107 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_alignbit_b32 v6, v7, v6, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v45 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16 ; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v56 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v41 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_alignbit_b32 v10, v11, v10, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v46 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v55 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_alignbit_b32 v11, v12, v11, 16 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v54 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v56 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v53 ; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 ; SI-NEXT: v_alignbit_b32 v14, v15, v14, 16 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v61 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_alignbit_b32 v15, v16, v15, 16 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v43 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_alignbit_b32 v16, v17, v16, 16 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v38 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v50 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v39 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_alignbit_b32 v18, v19, v18, 16 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v41 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v49 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_alignbit_b32 v19, v20, v19, 16 -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v40 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_alignbit_b32 v20, v21, v20, 16 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v48 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v39 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_alignbit_b32 v23, v24, v23, 16 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v52 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 ; SI-NEXT: v_alignbit_b32 v24, v25, v24, 16 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v51 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v38 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v36 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_alignbit_b32 v26, v27, v26, 16 -; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v49 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v34 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_alignbit_b32 v27, v28, v27, 16 -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48 +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v35 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_alignbit_b32 v30, v31, v30, 16 -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v37 +; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v33 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_alignbit_b32 v31, v32, v31, 16 -; SI-NEXT: .LBB19_3: ; %end +; SI-NEXT: .LBB19_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -27678,41 +28101,6 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB19_4: -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_mov_b32_e32 v33, v14 -; SI-NEXT: v_mov_b32_e32 v62, v38 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 -; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB19_2 ; ; VI-LABEL: bitcast_v64bf16_to_v32i32_scalar: ; VI: ; %bb.0: @@ -29901,15 +30289,13 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr53 @@ -29919,13 +30305,15 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr32 @@ -30008,87 +30396,82 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v56, v31 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v36, v63 ; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v29 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v28 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v38, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v29 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v50, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v27 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v43, v3 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v58, v2 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v46, v2 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f32_f16_e32 v60, v1 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 @@ -30104,7 +30487,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v31 @@ -30125,7 +30508,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v45, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 @@ -30139,6 +30522,8 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v31 @@ -30147,17 +30532,19 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v62 -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 @@ -30179,133 +30566,122 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; SI-NEXT: s_cbranch_execz .LBB20_4 ; SI-NEXT: ; %bb.3: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 -; SI-NEXT: v_add_i32_e32 v33, vcc, 3, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(5) ; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v28 ; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v9 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25 -; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26 ; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v26 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v50 -; SI-NEXT: v_mov_b32_e32 v50, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v48 ; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 +; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 ; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v38 ; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v23 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v44, vcc, 3, v63 +; SI-NEXT: v_add_i32_e32 v46, vcc, 3, v62 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v44 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v46 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v56 -; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v22 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v6, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v58 +; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 ; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21 +; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v21 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 -; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v20 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v22 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v34 +; SI-NEXT: v_mov_b32_e32 v34, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v32 +; SI-NEXT: v_mov_b32_e32 v32, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v56 ; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19 ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v61 +; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 ; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18 +; SI-NEXT: v_add_i32_e32 v33, vcc, 3, v1 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v18 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v59 +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v8 ; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v57 -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 -; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_add_i32_e32 v42, vcc, 3, v63 -; SI-NEXT: v_add_i32_e32 v44, vcc, 3, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v57 +; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26 ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 ; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v10 ; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v11 ; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12 ; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v13 ; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14 ; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v26 +; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27 +; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 ; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29 ; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v44 ; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 ; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 ; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v29 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v42 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v2 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v6, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 @@ -30321,14 +30697,16 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v38, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 ; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 ; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v6, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v45 ; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 @@ -30338,37 +30716,37 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v59, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v2 -; SI-NEXT: v_mov_b32_e32 v52, v29 -; SI-NEXT: v_mov_b32_e32 v48, v30 -; SI-NEXT: v_mov_b32_e32 v56, v28 -; SI-NEXT: v_mov_b32_e32 v34, v7 -; SI-NEXT: v_mov_b32_e32 v32, v6 -; SI-NEXT: v_mov_b32_e32 v46, v8 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 +; SI-NEXT: v_mov_b32_e32 v50, v28 +; SI-NEXT: v_mov_b32_e32 v48, v29 +; SI-NEXT: v_mov_b32_e32 v38, v30 +; SI-NEXT: v_mov_b32_e32 v58, v27 +; SI-NEXT: v_mov_b32_e32 v56, v8 +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: .LBB20_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 @@ -30379,41 +30757,45 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v44 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v44 ; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 ; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 ; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 @@ -30422,7 +30804,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v55 ; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 @@ -30431,7 +30813,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 ; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 @@ -30440,7 +30822,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 ; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 @@ -30449,7 +30831,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 ; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 @@ -30458,7 +30840,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 ; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 @@ -30467,7 +30849,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 ; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 @@ -30476,7 +30858,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 @@ -30485,7 +30867,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 @@ -30495,20 +30877,9 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30519,7 +30890,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30530,7 +30901,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30541,7 +30912,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30552,7 +30923,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30563,7 +30934,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30572,9 +30943,9 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30584,8 +30955,8 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30596,7 +30967,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30607,7 +30978,7 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -30617,30 +30988,37 @@ define <64 x half> @bitcast_v32i32_to_v64f16(<32 x i32> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v46 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v56 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v56 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v52 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v50 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v48 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v38 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -34412,385 +34790,431 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v32i32_to_v64i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s69, 21 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s47, s47, 3 ; SI-NEXT: s_add_i32 s45, s45, 3 -; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s46, s46, 3 ; SI-NEXT: s_add_i32 s44, s44, 3 +; SI-NEXT: s_add_i32 s43, s43, 3 ; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: s_add_i32 s41, s41, 3 ; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: .LBB25_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v16, s4, v16 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s79, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s47, s36, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s69, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s68, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v16, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s67, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s94, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s77, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s66, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s65, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s47, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s46, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr79 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr55 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr77 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr52 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v32i32_to_v64i16_scalar: @@ -35860,179 +36284,162 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v56, v10 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_mov_b32_e32 v57, v8 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v35, v8 +; SI-NEXT: v_mov_b32_e32 v38, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 ; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v15 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v19 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v40 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v36 ; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v10 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v12 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38 -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v33 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v50 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32 -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB27_4 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB27_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v7, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v7, v0, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v9, v0, v50 +; SI-NEXT: v_or_b32_e32 v9, v0, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v10, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 -; SI-NEXT: v_or_b32_e32 v12, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_or_b32_e32 v13, v0, v13 +; SI-NEXT: v_or_b32_e32 v10, v0, v47 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v11, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v12, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v54 +; SI-NEXT: v_or_b32_e32 v13, v0, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_or_b32_e32 v14, v0, v48 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_or_b32_e32 v14, v0, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v15, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18 -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_or_b32_e32 v16, v0, v37 +; SI-NEXT: v_or_b32_e32 v16, v0, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20 ; SI-NEXT: v_or_b32_e32 v17, v0, v17 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v18, v0, v35 +; SI-NEXT: v_or_b32_e32 v18, v0, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: v_or_b32_e32 v19, v0, v19 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_or_b32_e32 v20, v0, v33 +; SI-NEXT: v_or_b32_e32 v20, v0, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: v_or_b32_e32 v21, v0, v21 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v22, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v39, v23 +; SI-NEXT: v_or_b32_e32 v22, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v23, v0, v23 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 -; SI-NEXT: v_mov_b32_e32 v24, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v24, v0, v57 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 ; SI-NEXT: v_or_b32_e32 v25, v0, v25 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 -; SI-NEXT: v_mov_b32_e32 v26, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 +; SI-NEXT: v_or_b32_e32 v26, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v27, v0, v54 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v27, v0, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_mov_b32_e32 v33, v28 ; SI-NEXT: v_or_b32_e32 v28, v0, v5 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_or_b32_e32 v29, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v29, v0, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 @@ -36040,15 +36447,18 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v30, v0, v3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_or_b32_e32 v8, v1, v55 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: v_or_b32_e32 v31, v0, v34 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_or_b32_e32 v8, v1, v56 +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: v_or_b32_e32 v31, v0, v31 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -36056,14 +36466,40 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: s_cbranch_execnz .LBB27_3 -; SI-NEXT: .LBB27_2: ; %cmp.true +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_branch .LBB27_3 +; SI-NEXT: .LBB27_2: +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB27_3: ; %Flow +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: v_mov_b32_e32 v58, v49 +; SI-NEXT: s_cbranch_vccnz .LBB27_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v32, v1 +; SI-NEXT: v_or_b32_e32 v1, v56, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v52, v53 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -36104,143 +36540,143 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v38, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v39, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v47, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v36, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v49, v0 +; SI-NEXT: v_or_b32_e32 v0, v44, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 -; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 @@ -36249,7 +36685,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: .LBB27_3: ; %end +; SI-NEXT: .LBB27_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -36268,35 +36704,6 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB27_4: -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_mov_b32_e32 v39, v23 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v33, v28 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB27_2 ; ; VI-LABEL: bitcast_v64i16_to_v32i32_scalar: ; VI: ; %bb.0: @@ -43243,1220 +43650,1742 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a, ; SI-LABEL: bitcast_v32f32_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: v_writelane_b32 v63, s98, 34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v56, s16 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v47, s17 -; SI-NEXT: v_mov_b32_e32 v44, s18 -; SI-NEXT: v_mov_b32_e32 v42, s19 -; SI-NEXT: v_mov_b32_e32 v40, s20 -; SI-NEXT: v_mov_b32_e32 v53, s21 -; SI-NEXT: v_mov_b32_e32 v51, s22 -; SI-NEXT: v_mov_b32_e32 v48, s23 -; SI-NEXT: v_mov_b32_e32 v38, s24 -; SI-NEXT: v_mov_b32_e32 v35, s25 -; SI-NEXT: v_mov_b32_e32 v33, s26 -; SI-NEXT: v_mov_b32_e32 v30, s27 -; SI-NEXT: v_mov_b32_e32 v28, s28 -; SI-NEXT: v_mov_b32_e32 v25, s29 -; SI-NEXT: s_cbranch_scc0 .LBB37_4 +; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr61 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 17 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s19, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 39 +; SI-NEXT: s_lshr_b32 s46, s19, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 38 +; SI-NEXT: s_lshr_b32 s46, s19, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 37 +; SI-NEXT: s_lshr_b32 s46, s17, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 36 +; SI-NEXT: s_lshr_b32 s46, s17, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 35 +; SI-NEXT: s_lshr_b32 s46, s17, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 34 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 32 +; SI-NEXT: v_writelane_b32 v61, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 30 +; SI-NEXT: v_writelane_b32 v61, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 28 +; SI-NEXT: v_writelane_b32 v61, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 26 +; SI-NEXT: v_writelane_b32 v61, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 24 +; SI-NEXT: v_writelane_b32 v61, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 22 +; SI-NEXT: v_writelane_b32 v61, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 20 +; SI-NEXT: v_writelane_b32 v61, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 18 +; SI-NEXT: v_writelane_b32 v61, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 16 +; SI-NEXT: v_writelane_b32 v61, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 14 +; SI-NEXT: v_writelane_b32 v61, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 12 +; SI-NEXT: v_writelane_b32 v61, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 10 +; SI-NEXT: v_writelane_b32 v61, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 8 +; SI-NEXT: v_writelane_b32 v61, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 6 +; SI-NEXT: v_writelane_b32 v61, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 4 +; SI-NEXT: v_writelane_b32 v61, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 2 +; SI-NEXT: v_writelane_b32 v61, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 0 +; SI-NEXT: s_lshr_b64 s[48:49], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v61, s47, 1 +; SI-NEXT: s_lshr_b64 s[50:51], s[40:41], 24 +; SI-NEXT: s_lshr_b64 s[52:53], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[40:41], 8 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_cbranch_execnz .LBB37_4 +; SI-NEXT: .LBB37_2: ; %cmp.true +; SI-NEXT: v_add_f32_e64 v2, s5, 1.0 +; SI-NEXT: v_add_f32_e64 v1, s4, 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[1:2], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[1:2], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[1:2], 8 +; SI-NEXT: v_add_f32_e64 v4, s7, 1.0 +; SI-NEXT: v_add_f32_e64 v3, s6, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 8 +; SI-NEXT: v_add_f32_e64 v6, s9, 1.0 +; SI-NEXT: v_add_f32_e64 v5, s8, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[5:6], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[5:6], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[5:6], 8 +; SI-NEXT: v_add_f32_e64 v8, s11, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s10, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[7:8], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[7:8], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[7:8], 8 +; SI-NEXT: v_add_f32_e64 v10, s13, 1.0 +; SI-NEXT: v_add_f32_e64 v9, s12, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[9:10], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[9:10], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[9:10], 8 +; SI-NEXT: v_add_f32_e64 v12, s15, 1.0 +; SI-NEXT: v_add_f32_e64 v11, s14, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[11:12], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[11:12], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[11:12], 8 +; SI-NEXT: v_add_f32_e64 v16, s41, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s40, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[15:16], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[15:16], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[15:16], 8 +; SI-NEXT: v_add_f32_e64 v21, s43, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s42, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[20:21], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v2 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v2 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v2 -; SI-NEXT: v_alignbit_b32 v23, v53, v40, 24 -; SI-NEXT: v_alignbit_b32 v26, v53, v40, 16 -; SI-NEXT: v_alignbit_b32 v29, v53, v40, 8 -; SI-NEXT: v_alignbit_b32 v32, v42, v44, 24 -; SI-NEXT: v_alignbit_b32 v36, v42, v44, 16 -; SI-NEXT: v_alignbit_b32 v39, v42, v44, 8 -; SI-NEXT: v_alignbit_b32 v50, v47, v56, 24 -; SI-NEXT: v_alignbit_b32 v54, v47, v56, 16 -; SI-NEXT: v_alignbit_b32 v41, v47, v56, 8 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v25 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v25 -; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v30 -; SI-NEXT: v_lshrrev_b32_e32 v52, 24, v35 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v43, 8, v35 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v48 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v48 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v53 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v53 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v42 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v47 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v47 -; SI-NEXT: s_cbranch_execnz .LBB37_3 -; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[20:21], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 16 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[20:21], 8 +; SI-NEXT: v_add_f32_e64 v26, s45, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s44, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 16 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 8 +; SI-NEXT: v_add_f32_e64 v30, s29, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s28, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[29:30], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[29:30], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[29:30], 8 +; SI-NEXT: v_add_f32_e64 v36, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v35, s26, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[35:36], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[35:36], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[35:36], 8 +; SI-NEXT: v_add_f32_e64 v49, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v48, s24, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[48:49], 8 +; SI-NEXT: v_add_f32_e64 v53, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v52, s22, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 24 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 16 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 8 +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v2 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v2 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 16 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v2 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v4 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v4 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 16 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v4 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v6 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v6 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 16 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v6 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 16 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v10 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v10 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 16 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v10 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v12 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v12 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 16 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v12 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 16 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v21 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v21 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v21 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v26 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v26 +; SI-NEXT: v_add_f32_e64 v41, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v40, s20, 1.0 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v26 +; SI-NEXT: v_add_f32_e64 v58, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v57, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v46, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v45, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[40:41], 16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v30 +; SI-NEXT: v_lshr_b64 v[32:33], v[40:41], 8 +; SI-NEXT: v_lshr_b64 v[37:38], v[45:46], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[57:58], 16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v30 +; SI-NEXT: v_lshr_b64 v[27:28], v[40:41], 24 +; SI-NEXT: v_lshr_b64 v[33:34], v[45:46], 24 +; SI-NEXT: v_lshr_b64 v[38:39], v[45:46], 8 +; SI-NEXT: v_lshr_b64 v[50:51], v[57:58], 24 +; SI-NEXT: v_lshr_b64 v[43:44], v[57:58], 8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v30 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v55, 24, v36 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v49 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v49 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v17, 8, v53 +; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v41 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v41 +; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v41 +; SI-NEXT: v_lshrrev_b32_e32 v18, 24, v46 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v46 +; SI-NEXT: v_lshrrev_b32_e32 v23, 8, v46 +; SI-NEXT: v_lshrrev_b32_e32 v39, 24, v58 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v58 +; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v58 +; SI-NEXT: s_branch .LBB37_5 +; SI-NEXT: .LBB37_3: +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 0 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 2 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 3 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 4 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 5 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 6 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 7 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 8 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 9 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 10 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 11 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 13 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 14 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 15 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 16 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 17 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 18 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 19 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 20 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 21 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 22 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 23 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 24 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 25 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 26 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 27 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 28 +; SI-NEXT: v_writelane_b32 v61, s49, 29 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 30 +; SI-NEXT: v_writelane_b32 v61, s49, 31 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 32 +; SI-NEXT: v_writelane_b32 v61, s49, 33 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: s_branch .LBB37_2 +; SI-NEXT: .LBB37_4: +; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 34 +; SI-NEXT: v_mov_b32_e32 v54, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 35 +; SI-NEXT: v_mov_b32_e32 v51, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 36 +; SI-NEXT: v_mov_b32_e32 v39, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 37 +; SI-NEXT: v_mov_b32_e32 v23, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 38 +; SI-NEXT: v_mov_b32_e32 v22, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 39 +; SI-NEXT: v_mov_b32_e32 v18, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 40 +; SI-NEXT: v_mov_b32_e32 v34, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 41 +; SI-NEXT: v_mov_b32_e32 v28, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 42 +; SI-NEXT: v_mov_b32_e32 v24, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 43 +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 44 +; SI-NEXT: v_mov_b32_e32 v19, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 45 +; SI-NEXT: v_mov_b32_e32 v14, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 47 +; SI-NEXT: v_mov_b32_e32 v59, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 48 +; SI-NEXT: v_mov_b32_e32 v56, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 49 +; SI-NEXT: v_mov_b32_e32 v47, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 50 +; SI-NEXT: v_mov_b32_e32 v44, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 51 +; SI-NEXT: v_mov_b32_e32 v55, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 52 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 53 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 54 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 55 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 16 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 56 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 57 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 58 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 16 -; SI-NEXT: v_add_f32_e32 v35, 1.0, v35 -; SI-NEXT: v_add_f32_e32 v38, 1.0, v38 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 59 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 60 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 61 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 16 -; SI-NEXT: v_add_f32_e32 v48, 1.0, v48 -; SI-NEXT: v_add_f32_e32 v51, 1.0, v51 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 62 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 63 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 1 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 2 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 3 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 5 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 6 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 7 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 8 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 9 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 10 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 11 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 12 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 13 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 14 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 15 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 17 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 32 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v2 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v2 -; SI-NEXT: v_add_f32_e32 v47, 1.0, v47 -; SI-NEXT: v_add_f32_e32 v56, 1.0, v56 -; SI-NEXT: v_add_f32_e32 v42, 1.0, v42 -; SI-NEXT: v_add_f32_e32 v44, 1.0, v44 -; SI-NEXT: v_add_f32_e32 v53, 1.0, v53 -; SI-NEXT: v_add_f32_e32 v40, 1.0, v40 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v2 -; SI-NEXT: v_alignbit_b32 v23, v53, v40, 24 -; SI-NEXT: v_alignbit_b32 v26, v53, v40, 16 -; SI-NEXT: v_alignbit_b32 v29, v53, v40, 8 -; SI-NEXT: v_alignbit_b32 v32, v42, v44, 24 -; SI-NEXT: v_alignbit_b32 v36, v42, v44, 16 -; SI-NEXT: v_alignbit_b32 v39, v42, v44, 8 -; SI-NEXT: v_alignbit_b32 v50, v47, v56, 24 -; SI-NEXT: v_alignbit_b32 v54, v47, v56, 16 -; SI-NEXT: v_alignbit_b32 v41, v47, v56, 8 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v25 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v25 -; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v30 -; SI-NEXT: v_lshrrev_b32_e32 v52, 24, v35 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v43, 8, v35 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v48 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v48 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v53 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v53 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v42 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v47 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v47 -; SI-NEXT: .LBB37_3: ; %end -; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 -; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v41 -; SI-NEXT: v_and_b32_e32 v54, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v41, v56, v41 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v54 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s48 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 33 +; SI-NEXT: v_readlane_b32 s4, v61, 30 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 31 +; SI-NEXT: v_readlane_b32 s4, v61, 28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 29 +; SI-NEXT: v_readlane_b32 s4, v61, 26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 27 +; SI-NEXT: v_readlane_b32 s4, v61, 24 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 25 +; SI-NEXT: v_readlane_b32 s4, v61, 22 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 23 +; SI-NEXT: v_readlane_b32 s4, v61, 20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 21 +; SI-NEXT: v_readlane_b32 s4, v61, 18 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 19 +; SI-NEXT: v_readlane_b32 s4, v61, 16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 17 +; SI-NEXT: v_readlane_b32 s4, v61, 14 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 15 +; SI-NEXT: v_readlane_b32 s4, v61, 12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 13 +; SI-NEXT: v_readlane_b32 s4, v61, 10 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 11 +; SI-NEXT: v_readlane_b32 s4, v61, 8 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 9 +; SI-NEXT: v_readlane_b32 s4, v61, 6 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 7 +; SI-NEXT: v_readlane_b32 s4, v61, 4 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 5 +; SI-NEXT: v_readlane_b32 s4, v61, 2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 3 +; SI-NEXT: v_readlane_b32 s4, v61, 0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s50 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s52 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s54 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s64 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s66 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s68 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s70 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s80 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s82 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s84 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s86 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s96 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s98 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s46 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s56 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s58 +; SI-NEXT: v_mov_b32_e32 v27, s62 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s72 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s74 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s76 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v57, s16 +; SI-NEXT: v_mov_b32_e32 v58, s17 +; SI-NEXT: v_mov_b32_e32 v45, s18 +; SI-NEXT: v_mov_b32_e32 v46, s19 +; SI-NEXT: v_mov_b32_e32 v40, s20 +; SI-NEXT: v_mov_b32_e32 v41, s21 +; SI-NEXT: v_mov_b32_e32 v52, s22 +; SI-NEXT: v_mov_b32_e32 v53, s23 +; SI-NEXT: v_mov_b32_e32 v48, s24 +; SI-NEXT: v_mov_b32_e32 v49, s25 +; SI-NEXT: v_mov_b32_e32 v35, s26 +; SI-NEXT: v_mov_b32_e32 v36, s27 +; SI-NEXT: v_mov_b32_e32 v29, s28 +; SI-NEXT: v_mov_b32_e32 v30, s29 +; SI-NEXT: v_mov_b32_e32 v25, s44 +; SI-NEXT: v_mov_b32_e32 v26, s45 +; SI-NEXT: v_mov_b32_e32 v20, s42 +; SI-NEXT: v_mov_b32_e32 v21, s43 +; SI-NEXT: v_mov_b32_e32 v15, s40 +; SI-NEXT: v_mov_b32_e32 v16, s41 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v12, s15 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v10, s13 +; SI-NEXT: v_mov_b32_e32 v7, s10 +; SI-NEXT: v_mov_b32_e32 v8, s11 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v6, s9 +; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s7 +; SI-NEXT: v_readlane_b32 s5, v61, 1 +; SI-NEXT: v_mov_b32_e32 v13, s60 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s78 +; SI-NEXT: v_mov_b32_e32 v31, s88 +; SI-NEXT: v_mov_b32_e32 v32, s90 +; SI-NEXT: v_mov_b32_e32 v33, s92 +; SI-NEXT: v_mov_b32_e32 v37, s94 +; SI-NEXT: v_mov_b32_e32 v38, s30 +; SI-NEXT: v_mov_b32_e32 v50, s34 +; SI-NEXT: v_mov_b32_e32 v42, s36 +; SI-NEXT: v_mov_b32_e32 v43, s38 +; SI-NEXT: .LBB37_5: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v43 +; SI-NEXT: v_and_b32_e32 v57, 0xff, v57 +; SI-NEXT: v_and_b32_e32 v42, 0xff, v42 +; SI-NEXT: v_or_b32_e32 v43, v57, v43 ; SI-NEXT: v_lshlrev_b32_e32 v50, 24, v50 -; SI-NEXT: v_or_b32_e32 v50, v50, v54 -; SI-NEXT: v_and_b32_e32 v54, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v50, v54, v50 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v42 +; SI-NEXT: v_or_b32_e32 v50, v50, v42 +; SI-NEXT: v_and_b32_e32 v42, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v50, v42, v50 ; SI-NEXT: buffer_store_dword v50, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v50, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_or_b32_e32 v21, v50, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v39 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v32 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v42 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v63 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v61 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v29 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v23 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v53 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v60 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v58 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v50, 0xff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v54, 8, v54 +; SI-NEXT: v_and_b32_e32 v51, 0xff, v51 +; SI-NEXT: v_or_b32_e32 v50, v50, v54 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v39, 24, v39 +; SI-NEXT: v_or_b32_e32 v39, v39, v51 +; SI-NEXT: v_and_b32_e32 v50, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v39, v50, v39 +; SI-NEXT: v_add_i32_e32 v50, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v39, v50, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v38 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v39, 0xff, v45 +; SI-NEXT: v_and_b32_e32 v37, 0xff, v37 +; SI-NEXT: v_or_b32_e32 v38, v39, v38 +; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; SI-NEXT: v_or_b32_e32 v33, v33, v37 +; SI-NEXT: v_and_b32_e32 v37, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v33, v37, v33 +; SI-NEXT: v_add_i32_e32 v37, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v33, v37, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v51 +; SI-NEXT: v_and_b32_e32 v33, 0xff, v46 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: v_or_b32_e32 v23, v33, v23 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v18, v22, v18 +; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v32 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v40 +; SI-NEXT: v_or_b32_e32 v18, v22, v18 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v31 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v27 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v34 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v24 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v52 +; SI-NEXT: v_or_b32_e32 v18, v22, v18 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xff, v53 +; SI-NEXT: v_or_b32_e32 v17, v18, v17 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_or_b32_e32 v14, v14, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_add_i32_e32 v17, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v14, v17, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v17 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v48 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v13, v17, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v57 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v45 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v49 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v60 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v59 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v56 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v38 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v43 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v55 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v52 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v36 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v47 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v55 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v33 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v29 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v49 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v34 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v28 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v30 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v31 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v24 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v25 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v20 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v26 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v19, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v21 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v15, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v6 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v16 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v15 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v15, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v7 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: v_add_i32_e32 v13, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v8 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xff, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_or_b32_e32 v12, v13, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v9 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; SI-NEXT: v_or_b32_e32 v11, v12, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v10 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xff, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v14 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v15 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB37_4: -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: s_branch .LBB37_2 ; ; VI-LABEL: bitcast_v32f32_to_v128i8_scalar: ; VI: ; %bb.0: @@ -53997,8 +54926,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 @@ -54006,133 +54942,93 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:80 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:88 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:112 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:144 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:152 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:168 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:176 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v25 +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v29 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29 +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v45 +; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v43 +; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v42 +; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v41 +; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v53 +; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v34 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:200 @@ -54141,31 +55037,31 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224 ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 @@ -54177,140 +55073,206 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 ; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 ; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:148 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:156 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:164 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:172 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:196 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:220 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:228 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:252 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:260 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:268 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:276 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:284 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:308 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB39_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v57, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 -; SI-NEXT: v_or_b32_e32 v0, v0, v60 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v0, v0, v20 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v4, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 +; SI-NEXT: v_or_b32_e32 v0, v0, v30 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v6, v0, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mov_b32_e32 v30, v5 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_or_b32_e32 v2, v2, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v0, v0, v61 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v26, v3 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v5, v2, v3 +; SI-NEXT: v_mov_b32_e32 v3, v7 +; SI-NEXT: v_mov_b32_e32 v2, v9 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -54319,306 +55281,277 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_mov_b32_e32 v3, v7 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v58, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v57 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_mov_b32_e32 v2, v9 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v10, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v29, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v11, v1 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v23 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 +; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v12, v1 +; SI-NEXT: v_or_b32_e32 v1, v44, v1 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v25 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v0, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v13, v1 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v36 +; SI-NEXT: v_or_b32_e32 v0, v0, v40 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v14, v1 +; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v27 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v62 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 +; SI-NEXT: v_or_b32_e32 v0, v0, v32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_or_b32_e32 v1, v15, v1 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 -; SI-NEXT: v_mov_b32_e32 v43, v16 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_mov_b32_e32 v50, v16 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v16, v1 ; SI-NEXT: v_or_b32_e32 v16, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_mov_b32_e32 v48, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mov_b32_e32 v32, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v34 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v17, v1 ; SI-NEXT: v_or_b32_e32 v17, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 -; SI-NEXT: v_mov_b32_e32 v55, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v51, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v40, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v44 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v18, v1 ; SI-NEXT: v_or_b32_e32 v18, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 -; SI-NEXT: v_mov_b32_e32 v44, v23 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v50, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v39 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v63 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v19, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: v_mov_b32_e32 v61, v45 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v54 +; SI-NEXT: v_mov_b32_e32 v54, v23 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v20, v1 ; SI-NEXT: v_or_b32_e32 v20, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v52 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v21, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 -; SI-NEXT: v_mov_b32_e32 v59, v24 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v22, v1 ; SI-NEXT: v_or_b32_e32 v22, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v39, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_mov_b32_e32 v45, v24 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v34, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v23, v1 ; SI-NEXT: v_or_b32_e32 v23, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v24, v1 ; SI-NEXT: v_or_b32_e32 v24, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v42 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v1 +; SI-NEXT: v_mov_b32_e32 v43, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v25, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v56 +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v63, v1 +; SI-NEXT: v_or_b32_e32 v1, v33, v1 ; SI-NEXT: v_or_b32_e32 v26, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v46 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v33 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v32, v1 +; SI-NEXT: v_mov_b32_e32 v37, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v27, v1 ; SI-NEXT: v_or_b32_e32 v27, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v28, v1 ; SI-NEXT: v_or_b32_e32 v28, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v1 +; SI-NEXT: v_mov_b32_e32 v36, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v29, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 -; SI-NEXT: v_or_b32_e32 v0, v0, v30 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v49 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v30, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 ; SI-NEXT: v_or_b32_e32 v0, v0, v3 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v31, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v40 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v44 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: v_mov_b32_e32 v38, v63 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 @@ -54645,108 +55578,112 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_mov_b64 s[4:5], 0 ; SI-NEXT: s_branch .LBB39_3 ; SI-NEXT: .LBB39_2: -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v45 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 s[4:5], -1 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mov_b32_e32 v45, v33 -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; SI-NEXT: .LBB39_3: ; %Flow -; SI-NEXT: v_mov_b32_e32 v63, v46 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mov_b32_e32 v35, v57 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; SI-NEXT: s_cbranch_vccnz .LBB39_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v0, s4, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_and_b32 s4, s16, 0xff ; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s6, s18, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, s4, v0 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s20, 0xff ; SI-NEXT: s_lshl_b32 s6, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_and_b32 s7, s22, 0xff ; SI-NEXT: s_addk_i32 s5, 0x300 ; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s6, s24, 0xff ; SI-NEXT: s_lshl_b32 s7, s25, 8 -; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: s_and_b32 s8, s26, 0xff ; SI-NEXT: s_addk_i32 s6, 0x300 ; SI-NEXT: s_lshl_b32 s7, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54755,17 +55692,17 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54775,15 +55712,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54793,15 +55730,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54811,15 +55748,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54829,15 +55766,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54847,15 +55784,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54865,15 +55802,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54882,16 +55819,17 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54901,15 +55839,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -54919,84 +55857,79 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v43, v1 +; SI-NEXT: v_or_b32_e32 v1, v50, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -55005,15 +55938,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -55022,15 +55955,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -55040,9 +55973,9 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 @@ -55058,106 +55991,110 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v55, v1 +; SI-NEXT: v_or_b32_e32 v1, v48, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v44, v1 +; SI-NEXT: v_or_b32_e32 v1, v54, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v59, v1 +; SI-NEXT: v_or_b32_e32 v1, v45, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v46, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v41 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -55165,14 +56102,14 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -55180,14 +56117,14 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -55217,7 +56154,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v128i8_to_v32f32_scalar: @@ -55239,113 +56176,115 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:80 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:88 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:96 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:104 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:136 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:144 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:152 +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:160 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:168 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 @@ -55354,29 +56293,28 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v7 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 @@ -55386,130 +56324,141 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13 ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(12) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v37, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76 +; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:84 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:92 +; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:108 +; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:116 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 ; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:284 ; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB39_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -55518,208 +56467,197 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v3, v7 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v29, v9 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v59, v0 -; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v1 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v0 +; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v37, v0 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v62, v0 +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v63, v1 +; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_mov_b32_e32 v60, v0 +; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v51, v3 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_mov_b32_e32 v35, v0 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v34, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v59, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v54, v0 -; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v32, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v32, v61 +; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v55, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v55, v43 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v46, v61 +; VI-NEXT: v_or_b32_sdwa v0, v42, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v53, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v41, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v44, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v41, v33 ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v47, v45 +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_or_b32_sdwa v0, v56, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v48, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v36, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v1, v33, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v50, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v51, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v48, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v49, v51 +; VI-NEXT: v_mov_b32_e32 v40, v34 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v57, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -55750,85 +56688,95 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: s_mov_b64 s[4:5], 0 ; VI-NEXT: s_branch .LBB39_3 ; VI-NEXT: .LBB39_2: -; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_mov_b32_e32 v46, v61 -; VI-NEXT: v_mov_b32_e32 v47, v45 -; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: v_mov_b32_e32 v51, v7 -; VI-NEXT: v_mov_b32_e32 v48, v29 +; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_mov_b32_e32 v41, v33 +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: v_mov_b32_e32 v49, v51 ; VI-NEXT: s_mov_b64 s[4:5], -1 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; VI-NEXT: .LBB39_3: ; %Flow +; VI-NEXT: v_mov_b32_e32 v51, v41 +; VI-NEXT: v_mov_b32_e32 v36, v44 +; VI-NEXT: v_mov_b32_e32 v53, v54 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_mov_b32_e32 v54, v60 +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; VI-NEXT: v_mov_b32_e32 v44, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_mov_b32_e32 v46, v49 ; VI-NEXT: s_cbranch_vccnz .LBB39_5 ; VI-NEXT: ; %bb.4: ; %cmp.true -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_e32 v0, s4, v0 ; VI-NEXT: s_add_i32 s16, s16, 3 -; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_add_i32 s18, s18, 3 +; VI-NEXT: s_lshl_b32 s6, s19, 8 +; VI-NEXT: s_add_i32 s20, s20, 3 +; VI-NEXT: s_add_i32 s22, s22, 3 +; VI-NEXT: s_lshl_b32 s7, s23, 8 +; VI-NEXT: s_add_i32 s24, s24, 3 +; VI-NEXT: s_add_i32 s26, s26, 3 +; VI-NEXT: s_lshl_b32 s8, s27, 8 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_e32 v0, s4, v0 +; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s18, 0xff -; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 -; VI-NEXT: s_add_i32 s20, s20, 3 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s20, 0xff ; VI-NEXT: s_lshl_b32 s6, s21, 8 -; VI-NEXT: s_add_i32 s22, s22, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s22, 0xff -; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 -; VI-NEXT: s_add_i32 s24, s24, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s24, 0xff ; VI-NEXT: s_lshl_b32 s7, s25, 8 -; VI-NEXT: s_add_i32 s26, s26, 3 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s7, s26, 0xff -; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_or_b32 s7, s8, s7 ; VI-NEXT: s_and_b32 s6, s6, 0xffff @@ -55837,26 +56785,25 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -55864,8 +56811,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -55877,9 +56824,9 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -55891,14 +56838,14 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -55906,280 +56853,280 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v49 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 @@ -56224,504 +57171,524 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:136 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:144 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:152 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:160 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:168 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:176 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX9-NEXT: s_waitcnt vmcnt(35) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v24 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v51 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v41, 8, v41 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4 -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 +; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v7 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v11 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v5 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v7 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164 -; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_waitcnt vmcnt(42) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 +; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:124 +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:132 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:140 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:220 +; GFX9-NEXT: s_waitcnt vmcnt(29) +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:228 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:236 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:244 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:252 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:268 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:284 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:292 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:308 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB39_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v2, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v28 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v8, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff +; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v38 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v61, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v63, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v37, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_mov_b32_e32 v47, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v38, v51 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v60 -; GFX9-NEXT: v_mov_b32_e32 v52, v56 -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_mov_b32_e32 v34, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v51, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v53, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v45, v62 +; GFX9-NEXT: v_mov_b32_e32 v46, v56 +; GFX9-NEXT: v_mov_b32_e32 v56, v58 +; GFX9-NEXT: v_mov_b32_e32 v58, v53 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -56752,32 +57719,48 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB39_3 ; GFX9-NEXT: .LBB39_2: -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v0 -; GFX9-NEXT: v_mov_b32_e32 v63, v57 -; GFX9-NEXT: v_mov_b32_e32 v53, v3 +; GFX9-NEXT: v_mov_b32_e32 v38, v51 +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 -; GFX9-NEXT: v_mov_b32_e32 v57, v38 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB39_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v62, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB39_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -56824,348 +57807,352 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a, ; GFX9-NEXT: s_movk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_and_b32 s8, s8, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s8, v0 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v38 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v63 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v53 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v46 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v63 +; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v40 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v32 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v43 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v44 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v42 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v48 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v33 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 @@ -63879,24 +64866,23 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:40 +; SI-NEXT: v_mov_b32_e32 v52, v30 +; SI-NEXT: v_mov_b32_e32 v53, v28 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt expcnt(3) ; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(0) @@ -63906,165 +64892,177 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68 -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v14 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v0 ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6 -; SI-NEXT: v_mov_b32_e32 v39, v10 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8 -; SI-NEXT: v_mov_b32_e32 v38, v12 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v15 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v18 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v55 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30 -; SI-NEXT: v_mov_b32_e32 v37, v14 -; SI-NEXT: v_mov_b32_e32 v14, v11 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v11 ; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v40 +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v15 ; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: v_mul_f32_e32 v16, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v18 ; SI-NEXT: v_mul_f32_e32 v17, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v20 ; SI-NEXT: v_mul_f32_e32 v18, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v41, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v22 ; SI-NEXT: v_mul_f32_e32 v19, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v24 ; SI-NEXT: v_mul_f32_e32 v20, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v26 ; SI-NEXT: v_mul_f32_e32 v21, 1.0, v29 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v28 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v52 ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s26 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s29 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v22, 1.0, v51 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v46 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v47 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v56 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v57 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v58 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v59 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v60 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v61 +; SI-NEXT: v_mul_f32_e32 v23, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v38 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v26, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v31 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v60 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_mul_f32_e32 v28, 1.0, v42 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_mul_f32_e32 v37, 1.0, v62 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63 +; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22 -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v36 +; SI-NEXT: v_mul_f32_e64 v12, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s24 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB43_4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB43_2 ; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v3, 16 +; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_alignbit_b32 v2, v2, v8, 16 +; SI-NEXT: v_alignbit_b32 v3, v3, v9, 16 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36 -; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16 -; SI-NEXT: v_mov_b32_e32 v33, v14 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v58 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v56 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v44 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_alignbit_b32 v5, v5, v11, 16 +; SI-NEXT: v_alignbit_b32 v7, v7, v14, 16 +; SI-NEXT: v_alignbit_b32 v8, v8, v54, 16 +; SI-NEXT: v_alignbit_b32 v9, v9, v46, 16 +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_alignbit_b32 v13, v13, v47, 16 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v45 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v57 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 @@ -64072,16 +65070,6 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: v_alignbit_b32 v15, v15, v53, 16 -; SI-NEXT: v_alignbit_b32 v17, v17, v39, 16 -; SI-NEXT: v_alignbit_b32 v18, v18, v41, 16 -; SI-NEXT: v_alignbit_b32 v19, v19, v40, 16 -; SI-NEXT: v_alignbit_b32 v20, v20, v55, 16 -; SI-NEXT: v_alignbit_b32 v21, v21, v54, 16 -; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 @@ -64089,212 +65077,238 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 ; SI-NEXT: v_alignbit_b32 v30, v30, v31, 16 -; SI-NEXT: v_alignbit_b32 v23, v23, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_alignbit_b32 v4, v4, v34, 16 +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_alignbit_b32 v16, v16, v43, 16 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_alignbit_b32 v17, v17, v41, 16 +; SI-NEXT: v_alignbit_b32 v18, v18, v40, 16 +; SI-NEXT: v_mov_b32_e32 v40, v55 +; SI-NEXT: v_alignbit_b32 v19, v19, v55, 16 +; SI-NEXT: v_alignbit_b32 v20, v20, v48, 16 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_alignbit_b32 v21, v21, v53, 16 +; SI-NEXT: v_alignbit_b32 v22, v22, v52, 16 ; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_alignbit_b32 v24, v24, v51, 16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_alignbit_b32 v25, v25, v50, 16 +; SI-NEXT: v_alignbit_b32 v23, v23, v51, 16 +; SI-NEXT: v_alignbit_b32 v24, v24, v50, 16 ; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_alignbit_b32 v26, v26, v49, 16 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_alignbit_b32 v27, v27, v48, 16 -; SI-NEXT: v_mov_b32_e32 v48, v37 +; SI-NEXT: v_alignbit_b32 v25, v25, v49, 16 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: v_alignbit_b32 v26, v26, v39, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_alignbit_b32 v27, v27, v38, 16 +; SI-NEXT: v_mov_b32_e32 v35, v37 ; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 +; SI-NEXT: v_alignbit_b32 v31, v31, v42, 16 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_alignbit_b32 v10, v10, v61, 16 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_mov_b32_e32 v35, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_alignbit_b32 v12, v12, v54, 16 +; SI-NEXT: v_mov_b32_e32 v41, v61 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v43, v8 -; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v9 +; SI-NEXT: v_alignbit_b32 v11, v11, v59, 16 +; SI-NEXT: v_mov_b32_e32 v55, v59 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32 -; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v11 -; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v56, v11 -; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v12 -; SI-NEXT: v_alignbit_b32 v11, v11, v12, 16 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v63, v14 -; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v14 -; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v14, v14, v45, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v36, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_alignbit_b32 v14, v14, v38, 16 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v53, v38 -; SI-NEXT: v_alignbit_b32 v16, v16, v38, 16 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 +; SI-NEXT: v_alignbit_b32 v15, v15, v47, 16 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: s_branch .LBB43_3 +; SI-NEXT: .LBB43_2: +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_mov_b32_e32 v43, v41 ; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16 -; SI-NEXT: s_cbranch_execnz .LBB43_3 -; SI-NEXT: .LBB43_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_mov_b32_e32 v52, v51 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_mov_b32_e32 v50, v49 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB43_3: ; %Flow +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v39, v52 +; SI-NEXT: v_mov_b32_e32 v49, v40 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v44 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: s_cbranch_vccnz .LBB43_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v44 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v63 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v47 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v62 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v60 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v58 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v46 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 ; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 ; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 ; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 +; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 -; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 -; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(5) ; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -64308,105 +65322,107 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_alignbit_b32 v6, v7, v6, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v45 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16 ; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v56 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v41 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_alignbit_b32 v10, v11, v10, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v46 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v55 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_alignbit_b32 v11, v12, v11, 16 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v54 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v56 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v53 ; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 ; SI-NEXT: v_alignbit_b32 v14, v15, v14, 16 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v61 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_alignbit_b32 v15, v16, v15, 16 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v43 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_alignbit_b32 v16, v17, v16, 16 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v38 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v50 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v39 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_alignbit_b32 v18, v19, v18, 16 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v41 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v49 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_alignbit_b32 v19, v20, v19, 16 -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v40 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_alignbit_b32 v20, v21, v20, 16 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v48 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v39 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_alignbit_b32 v23, v24, v23, 16 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v52 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 ; SI-NEXT: v_alignbit_b32 v24, v25, v24, 16 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v51 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v38 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v36 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_alignbit_b32 v26, v27, v26, 16 -; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v49 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v34 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_alignbit_b32 v27, v28, v27, 16 -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48 +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v35 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_alignbit_b32 v30, v31, v30, 16 -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v37 +; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v33 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_alignbit_b32 v31, v32, v31, 16 -; SI-NEXT: .LBB43_3: ; %end +; SI-NEXT: .LBB43_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -64425,41 +65441,6 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB43_4: -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_mov_b32_e32 v33, v14 -; SI-NEXT: v_mov_b32_e32 v62, v38 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 -; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB43_2 ; ; VI-LABEL: bitcast_v64bf16_to_v32f32_scalar: ; VI: ; %bb.0: @@ -66648,15 +67629,13 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr53 @@ -66666,13 +67645,15 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr32 @@ -66755,87 +67736,82 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v56, v31 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v36, v63 ; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v29 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v28 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v38, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v29 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v50, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v27 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v43, v3 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v58, v2 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v46, v2 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f32_f16_e32 v60, v1 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 @@ -66851,7 +67827,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v31 @@ -66872,7 +67848,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v45, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 @@ -66886,6 +67862,8 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v31 @@ -66894,17 +67872,19 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v62 -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 @@ -66926,133 +67906,122 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; SI-NEXT: s_cbranch_execz .LBB44_4 ; SI-NEXT: ; %bb.3: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(5) ; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v28 ; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v9 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 ; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v26 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v50 -; SI-NEXT: v_mov_b32_e32 v50, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v9, v48 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 ; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v9, v38 ; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_f32_e32 v44, 1.0, v63 +; SI-NEXT: v_add_f32_e32 v46, 1.0, v62 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v44 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v46 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v23 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v22 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v6, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v58 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 ; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v21 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v22 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v34 +; SI-NEXT: v_mov_b32_e32 v34, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v32 +; SI-NEXT: v_mov_b32_e32 v32, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v56 ; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v61 ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v33, 1.0, v1 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v18 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v17 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v61 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v59 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 ; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v59 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 ; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v57 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_add_f32_e32 v42, 1.0, v63 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 -; SI-NEXT: v_add_f32_e32 v44, 1.0, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v57 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 ; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 ; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 ; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 ; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 ; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 ; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 ; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v26 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 ; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 ; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v44 ; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 ; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 ; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v29 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v42 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v2 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v6, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 @@ -67071,8 +68040,13 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 ; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 ; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v6, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v45 ; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 @@ -67082,40 +68056,37 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v59, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v2 -; SI-NEXT: v_mov_b32_e32 v52, v29 -; SI-NEXT: v_mov_b32_e32 v48, v30 -; SI-NEXT: v_mov_b32_e32 v56, v28 -; SI-NEXT: v_mov_b32_e32 v34, v7 -; SI-NEXT: v_mov_b32_e32 v32, v6 -; SI-NEXT: v_mov_b32_e32 v46, v8 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v38, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 +; SI-NEXT: v_mov_b32_e32 v50, v28 +; SI-NEXT: v_mov_b32_e32 v48, v29 +; SI-NEXT: v_mov_b32_e32 v38, v30 +; SI-NEXT: v_mov_b32_e32 v58, v27 +; SI-NEXT: v_mov_b32_e32 v56, v8 +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: .LBB44_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 @@ -67126,41 +68097,45 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v44 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v44 ; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 ; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 ; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 @@ -67169,7 +68144,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v55 ; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 @@ -67178,7 +68153,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 ; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 @@ -67187,7 +68162,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 ; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 @@ -67196,7 +68171,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 ; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 @@ -67205,7 +68180,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 ; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 @@ -67214,7 +68189,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 ; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 @@ -67223,7 +68198,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 @@ -67232,7 +68207,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 @@ -67242,20 +68217,9 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67266,7 +68230,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67277,7 +68241,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67288,7 +68252,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67299,7 +68263,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67310,7 +68274,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67319,9 +68283,9 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67331,8 +68295,8 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67343,7 +68307,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67354,7 +68318,7 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -67364,30 +68328,37 @@ define <64 x half> @bitcast_v32f32_to_v64f16(<32 x float> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v46 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v56 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v56 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v52 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v50 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v48 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v38 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -67620,7 +68591,8 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_cbranch_scc0 .LBB45_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_lshr_b32 s4, s9, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v47, s4 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_cvt_f32_f16_e32 v58, s4 ; SI-NEXT: s_lshr_b32 s4, s6, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v43, s4 ; SI-NEXT: s_lshr_b32 s4, s7, 16 @@ -67638,10 +68610,10 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_lshr_b32 s4, s14, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v11, s4 ; SI-NEXT: s_lshr_b32 s4, s15, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v63, s4 +; SI-NEXT: v_cvt_f32_f16_e32 v9, s4 ; SI-NEXT: s_lshr_b32 s4, s40, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v21, s4 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v61, s4 ; SI-NEXT: s_lshr_b32 s4, s41, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v23, s4 ; SI-NEXT: s_lshr_b32 s4, s42, 16 @@ -67661,7 +68633,6 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_lshr_b32 s4, s28, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v48, s4 ; SI-NEXT: s_lshr_b32 s4, s27, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v1, s12 ; SI-NEXT: v_cvt_f32_f16_e32 v50, s4 ; SI-NEXT: s_lshr_b32 s4, s26, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v52, s4 @@ -67670,9 +68641,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_lshr_b32 s4, s24, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v40, s4 ; SI-NEXT: s_lshr_b32 s4, s23, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, s13 +; SI-NEXT: v_cvt_f32_f16_e32 v1, s14 ; SI-NEXT: v_cvt_f32_f16_e32 v42, s4 ; SI-NEXT: s_lshr_b32 s4, s22, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v44, s4 @@ -67681,12 +68650,12 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_lshr_b32 s4, s20, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v56, s4 ; SI-NEXT: s_lshr_b32 s4, s19, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, s14 -; SI-NEXT: v_cvt_f32_f16_e32 v57, s4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, s15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, s4 ; SI-NEXT: s_lshr_b32 s4, s18, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v58, s4 +; SI-NEXT: v_cvt_f32_f16_e32 v47, s4 ; SI-NEXT: s_lshr_b32 s4, s17, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v60, s4 ; SI-NEXT: s_lshr_b32 s4, s16, 16 @@ -67694,11 +68663,12 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_cvt_f32_f16_e32 v5, s9 ; SI-NEXT: v_cvt_f32_f16_e32 v6, s6 ; SI-NEXT: v_cvt_f32_f16_e32 v7, s7 -; SI-NEXT: v_cvt_f32_f16_e32 v9, s8 -; SI-NEXT: v_cvt_f32_f16_e32 v59, s10 -; SI-NEXT: v_cvt_f32_f16_e32 v61, s11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v20, s15 +; SI-NEXT: v_cvt_f32_f16_e32 v41, s8 +; SI-NEXT: v_cvt_f32_f16_e32 v45, s10 +; SI-NEXT: v_cvt_f32_f16_e32 v59, s11 +; SI-NEXT: v_cvt_f32_f16_e32 v63, s12 +; SI-NEXT: v_cvt_f32_f16_e32 v57, s13 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f32_f16_e32 v22, s40 ; SI-NEXT: v_cvt_f32_f16_e32 v24, s41 ; SI-NEXT: v_cvt_f32_f16_e32 v26, s42 @@ -67713,127 +68683,136 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_cvt_f32_f16_e32 v16, s26 ; SI-NEXT: v_cvt_f32_f16_e32 v53, s25 ; SI-NEXT: v_cvt_f32_f16_e32 v12, s24 -; SI-NEXT: v_cvt_f32_f16_e32 v41, s23 +; SI-NEXT: v_cvt_f32_f16_e32 v14, s23 ; SI-NEXT: v_cvt_f32_f16_e32 v8, s22 -; SI-NEXT: v_cvt_f32_f16_e32 v45, s21 +; SI-NEXT: v_cvt_f32_f16_e32 v20, s21 ; SI-NEXT: v_cvt_f32_f16_e32 v4, s20 ; SI-NEXT: v_cvt_f32_f16_e32 v10, s19 -; SI-NEXT: v_cvt_f32_f16_e32 v14, s18 -; SI-NEXT: v_cvt_f32_f16_e32 v18, s17 +; SI-NEXT: v_cvt_f32_f16_e32 v18, s18 +; SI-NEXT: v_cvt_f32_f16_e32 v21, s17 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v1, s16 ; SI-NEXT: s_cbranch_execnz .LBB45_3 ; SI-NEXT: .LBB45_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v15, s12, 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_add_f32_e64 v1, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v17, s13, 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v15, v17 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_f32_e64 v2, s19, 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_add_f32_e64 v22, s40, 1.0 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_add_f32_e64 v21, s28, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s20, 1.0 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v22 -; SI-NEXT: v_add_f32_e64 v19, s14, 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4 +; SI-NEXT: v_add_f32_e64 v24, s41, 1.0 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e64 v23, s29, 1.0 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v15, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v24 +; SI-NEXT: v_add_f32_e64 v37, s10, 1.0 +; SI-NEXT: v_add_f32_e64 v45, s9, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v37, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e64 v41, s6, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s14, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s12, 1.0 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v41 -; SI-NEXT: v_add_f32_e64 v6, s21, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v63, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v19 +; SI-NEXT: v_add_f32_e64 v18, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v49, s8, 1.0 ; SI-NEXT: v_add_f32_e64 v10, s23, 1.0 ; SI-NEXT: v_add_f32_e64 v14, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s27, 1.0 -; SI-NEXT: v_add_f32_e64 v23, s29, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 +; SI-NEXT: v_add_f32_e64 v21, s28, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v49 +; SI-NEXT: v_add_f32_e64 v53, s7, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v49, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v1 +; SI-NEXT: v_add_f32_e64 v1, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v6, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v8, s22, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v10 +; SI-NEXT: v_add_f32_e64 v12, s24, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v14 +; SI-NEXT: v_add_f32_e64 v16, s26, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v21 +; SI-NEXT: v_add_f32_e64 v25, s47, 1.0 ; SI-NEXT: v_add_f32_e64 v27, s46, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s45, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s43, 1.0 ; SI-NEXT: v_add_f32_e64 v26, s42, 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v23 -; SI-NEXT: v_add_f32_e64 v25, s47, 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v17 -; SI-NEXT: v_add_f32_e64 v53, s7, 1.0 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v57 -; SI-NEXT: v_add_f32_e64 v49, s8, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s15, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v53 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v7, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v53, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 -; SI-NEXT: v_add_f32_e64 v1, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v28, s43, 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v49 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v41 -; SI-NEXT: v_add_f32_e64 v45, s9, 1.0 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v2 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v1 ; SI-NEXT: v_add_f32_e64 v1, s16, 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v29 ; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v20 +; SI-NEXT: v_add_f32_e64 v17, s13, 1.0 ; SI-NEXT: v_add_f32_e64 v34, s11, 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v6 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v15, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v6 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v17 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v61, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v59, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v57, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v50, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v56 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v56, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: v_cvt_f32_f16_e32 v62, v6 -; SI-NEXT: v_add_f32_e64 v4, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s26, 1.0 -; SI-NEXT: v_add_f32_e64 v29, s45, 1.0 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e64 v30, s44, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s41, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s15, 1.0 -; SI-NEXT: v_add_f32_e64 v37, s10, 1.0 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v29 +; SI-NEXT: v_add_f32_e64 v22, s40, 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v59, v37 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v57, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v22 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 @@ -67841,13 +68820,11 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v15, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 ; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 @@ -67856,43 +68833,38 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v63, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v61, v61 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v58, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v47, v47 ; SI-NEXT: .LBB45_3: ; %end ; SI-NEXT: v_cvt_f16_f32_e32 v2, v62 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v21 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen @@ -67905,7 +68877,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v20 ; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -67919,7 +68891,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 ; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -68016,20 +68988,22 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v63 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v11 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 @@ -68038,41 +69012,37 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v57 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v63 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v61 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v59 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v41 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -68092,7 +69062,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v58 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v5 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -68118,23 +69088,21 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB45_4: ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr47 ; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr40 @@ -68163,27 +69131,28 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a, ; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr61 ; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr57 ; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $vgpr63 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: s_branch .LBB45_2 ; ; VI-LABEL: bitcast_v32f32_to_v64f16_scalar: @@ -71117,21 +72086,21 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_mov_b32_e32 v36, s16 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v35, s17 -; SI-NEXT: v_mov_b32_e32 v33, s18 -; SI-NEXT: v_mov_b32_e32 v32, s19 -; SI-NEXT: v_mov_b32_e32 v31, s20 -; SI-NEXT: v_mov_b32_e32 v29, s21 -; SI-NEXT: v_mov_b32_e32 v28, s22 +; SI-NEXT: v_mov_b32_e32 v31, s16 +; SI-NEXT: v_mov_b32_e32 v32, s17 +; SI-NEXT: v_mov_b32_e32 v29, s18 +; SI-NEXT: v_mov_b32_e32 v30, s19 +; SI-NEXT: v_mov_b32_e32 v27, s20 +; SI-NEXT: v_mov_b32_e32 v28, s21 +; SI-NEXT: v_mov_b32_e32 v25, s22 ; SI-NEXT: v_mov_b32_e32 v26, s23 -; SI-NEXT: v_mov_b32_e32 v25, s24 +; SI-NEXT: v_mov_b32_e32 v23, s24 ; SI-NEXT: v_mov_b32_e32 v24, s25 -; SI-NEXT: v_mov_b32_e32 v22, s26 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v20, s28 -; SI-NEXT: v_mov_b32_e32 v19, s29 +; SI-NEXT: v_mov_b32_e32 v21, s26 +; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v19, s28 +; SI-NEXT: v_mov_b32_e32 v20, s29 ; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill @@ -71150,234 +72119,242 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v23, v18, v17, 16 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v27, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v48, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v50, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v52, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v41, v21, v22, 16 -; SI-NEXT: v_alignbit_b32 v43, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v28, 16 -; SI-NEXT: v_alignbit_b32 v47, v29, v31, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v32, v33, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v35, v36, 16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v29 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v32 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v35 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[36:37], v[15:16], 16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v18 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_mov_b32_e32 v53, v40 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_mov_b32_e32 v55, v48 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v32 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 ; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v35, 1.0, v35 -; SI-NEXT: v_add_f32_e32 v36, 1.0, v36 -; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 -; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_add_f32_e32 v31, 1.0, v31 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[33:34], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 ; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 +; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 ; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 ; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 ; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_alignbit_b32 v23, v18, v17, 16 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v27, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v48, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v50, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v52, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v41, v21, v22, 16 -; SI-NEXT: v_alignbit_b32 v43, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v28, 16 -; SI-NEXT: v_alignbit_b32 v47, v29, v31, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v32, v33, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v35, v36, 16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 +; SI-NEXT: v_add_f32_e32 v31, 1.0, v31 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v32 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_and_b32_e32 v36, 0xffff, v36 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v60 -; SI-NEXT: v_and_b32_e32 v35, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_or_b32_e32 v36, v36, v60 -; SI-NEXT: v_or_b32_e32 v23, v35, v23 -; SI-NEXT: v_add_i32_e32 v35, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v36, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v23, v35, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v58 -; SI-NEXT: v_or_b32_e32 v23, v23, v33 -; SI-NEXT: v_add_i32_e32 v33, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v23, v33, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v63 -; SI-NEXT: v_or_b32_e32 v23, v23, v32 -; SI-NEXT: v_add_i32_e32 v32, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v23, v32, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v47 -; SI-NEXT: v_or_b32_e32 v23, v23, v31 -; SI-NEXT: v_add_i32_e32 v31, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v23, v31, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v62 -; SI-NEXT: v_or_b32_e32 v23, v23, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v23, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v43 +; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 +; SI-NEXT: v_or_b32_e32 v31, v31, v50 +; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v31, 0xffff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v35 +; SI-NEXT: v_or_b32_e32 v31, v31, v32 +; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v42 +; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 +; SI-NEXT: v_or_b32_e32 v29, v29, v31 +; SI-NEXT: v_add_i32_e32 v31, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v29, v31, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v29, 0xffff, v30 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v46 +; SI-NEXT: v_or_b32_e32 v29, v29, v30 +; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v41 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v27, v29 +; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v27, v29, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 ; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v45 -; SI-NEXT: v_or_b32_e32 v23, v23, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v23, v28, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v40 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v61 -; SI-NEXT: v_or_b32_e32 v23, v23, v26 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v34 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v23, v26, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v39 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v33 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v41 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v57 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v63 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v54 -; SI-NEXT: v_or_b32_e32 v20, v20, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v49 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v56 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v62 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v61 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v60 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -71389,7 +72366,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -71401,57 +72378,64 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v57 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v56 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen @@ -71474,39 +72458,44 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr47 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; kill: killed $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v32f32_to_v64i16_scalar: @@ -72560,179 +73549,162 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v56, v10 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_mov_b32_e32 v57, v8 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v35, v8 +; SI-NEXT: v_mov_b32_e32 v38, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 ; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v15 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v19 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v40 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v36 ; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v10 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v12 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38 -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v33 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v50 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32 -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB51_4 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB51_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v7, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v7, v0, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v9, v0, v50 +; SI-NEXT: v_or_b32_e32 v9, v0, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v10, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 -; SI-NEXT: v_or_b32_e32 v12, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_or_b32_e32 v13, v0, v13 +; SI-NEXT: v_or_b32_e32 v10, v0, v47 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v11, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v12, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v54 +; SI-NEXT: v_or_b32_e32 v13, v0, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_or_b32_e32 v14, v0, v48 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_or_b32_e32 v14, v0, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v15, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18 -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_or_b32_e32 v16, v0, v37 +; SI-NEXT: v_or_b32_e32 v16, v0, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20 ; SI-NEXT: v_or_b32_e32 v17, v0, v17 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v18, v0, v35 +; SI-NEXT: v_or_b32_e32 v18, v0, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: v_or_b32_e32 v19, v0, v19 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_or_b32_e32 v20, v0, v33 +; SI-NEXT: v_or_b32_e32 v20, v0, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: v_or_b32_e32 v21, v0, v21 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v22, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v39, v23 +; SI-NEXT: v_or_b32_e32 v22, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v23, v0, v23 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 -; SI-NEXT: v_mov_b32_e32 v24, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v24, v0, v57 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 ; SI-NEXT: v_or_b32_e32 v25, v0, v25 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 -; SI-NEXT: v_mov_b32_e32 v26, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 +; SI-NEXT: v_or_b32_e32 v26, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v27, v0, v54 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v27, v0, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_mov_b32_e32 v33, v28 ; SI-NEXT: v_or_b32_e32 v28, v0, v5 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_or_b32_e32 v29, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v29, v0, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 @@ -72740,15 +73712,18 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_or_b32_e32 v30, v0, v3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_or_b32_e32 v8, v1, v55 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: v_or_b32_e32 v31, v0, v34 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_or_b32_e32 v8, v1, v56 +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: v_or_b32_e32 v31, v0, v31 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -72756,14 +73731,40 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: s_cbranch_execnz .LBB51_3 -; SI-NEXT: .LBB51_2: ; %cmp.true +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_branch .LBB51_3 +; SI-NEXT: .LBB51_2: +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB51_3: ; %Flow +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: v_mov_b32_e32 v58, v49 +; SI-NEXT: s_cbranch_vccnz .LBB51_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v32, v1 +; SI-NEXT: v_or_b32_e32 v1, v56, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v52, v53 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -72804,143 +73805,143 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v38, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v39, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v47, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v36, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v49, v0 +; SI-NEXT: v_or_b32_e32 v0, v44, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 -; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 @@ -72949,7 +73950,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: .LBB51_3: ; %end +; SI-NEXT: .LBB51_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -72968,35 +73969,6 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a, ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB51_4: -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_mov_b32_e32 v39, v23 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v33, v28 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB51_2 ; ; VI-LABEL: bitcast_v64i16_to_v32f32_scalar: ; VI: ; %bb.0: @@ -78979,894 +79951,1230 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v16i64_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v41, s30, 0 -; SI-NEXT: v_writelane_b32 v41, s31, 1 -; SI-NEXT: v_writelane_b32 v41, s34, 2 -; SI-NEXT: v_writelane_b32 v41, s35, 3 -; SI-NEXT: v_writelane_b32 v41, s36, 4 -; SI-NEXT: v_writelane_b32 v41, s37, 5 -; SI-NEXT: v_writelane_b32 v41, s38, 6 -; SI-NEXT: v_writelane_b32 v41, s39, 7 -; SI-NEXT: v_writelane_b32 v41, s48, 8 -; SI-NEXT: v_writelane_b32 v41, s49, 9 -; SI-NEXT: v_writelane_b32 v41, s50, 10 -; SI-NEXT: v_writelane_b32 v41, s51, 11 -; SI-NEXT: v_writelane_b32 v41, s52, 12 -; SI-NEXT: v_writelane_b32 v41, s53, 13 -; SI-NEXT: v_writelane_b32 v41, s54, 14 -; SI-NEXT: v_writelane_b32 v41, s55, 15 -; SI-NEXT: v_writelane_b32 v41, s64, 16 -; SI-NEXT: v_writelane_b32 v41, s65, 17 -; SI-NEXT: v_writelane_b32 v41, s66, 18 -; SI-NEXT: v_writelane_b32 v41, s67, 19 -; SI-NEXT: v_writelane_b32 v41, s68, 20 -; SI-NEXT: v_writelane_b32 v41, s69, 21 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_writelane_b32 v20, s69, 21 +; SI-NEXT: v_writelane_b32 v20, s70, 22 +; SI-NEXT: v_writelane_b32 v20, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s80, 24 +; SI-NEXT: v_writelane_b32 v20, s81, 25 +; SI-NEXT: v_writelane_b32 v20, s82, 26 +; SI-NEXT: v_writelane_b32 v20, s83, 27 +; SI-NEXT: v_writelane_b32 v20, s84, 28 +; SI-NEXT: v_writelane_b32 v20, s85, 29 +; SI-NEXT: v_writelane_b32 v20, s86, 30 +; SI-NEXT: v_writelane_b32 v20, s87, 31 +; SI-NEXT: v_writelane_b32 v20, s96, 32 +; SI-NEXT: v_writelane_b32 v20, s97, 33 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_writelane_b32 v41, s70, 22 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_writelane_b32 v41, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s98, 34 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s99, 35 +; SI-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s49, s19, 24 +; SI-NEXT: s_lshr_b32 s48, s19, 16 +; SI-NEXT: s_lshr_b32 s50, s19, 8 +; SI-NEXT: s_lshr_b32 s51, s17, 24 +; SI-NEXT: s_lshr_b32 s52, s17, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 8 +; SI-NEXT: s_lshr_b64 s[54:55], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 +; SI-NEXT: s_add_u32 s42, s42, 3 +; SI-NEXT: s_addc_u32 s43, s43, 0 +; SI-NEXT: s_add_u32 s44, s44, 3 +; SI-NEXT: s_addc_u32 s45, s45, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s47, s47, 3 -; SI-NEXT: s_addc_u32 s46, s46, 0 -; SI-NEXT: s_add_u32 s45, s45, 3 -; SI-NEXT: s_addc_u32 s44, s44, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s49, s19, 24 +; SI-NEXT: s_lshr_b32 s48, s19, 16 +; SI-NEXT: s_lshr_b32 s50, s19, 8 +; SI-NEXT: s_lshr_b32 s51, s17, 24 +; SI-NEXT: s_lshr_b32 s52, s17, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 8 +; SI-NEXT: s_lshr_b64 s[54:55], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: .LBB57_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 -; SI-NEXT: v_or_b32_e32 v40, s4, v40 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s71, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s70, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s69, 24 -; SI-NEXT: v_and_b32_e32 v55, 0xff, v55 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v54, 24, v54 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v54, v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v53 -; SI-NEXT: v_or_b32_e32 v53, s4, v53 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s68, 8 -; SI-NEXT: v_and_b32_e32 v52, 0xff, v52 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s67, 0xff -; SI-NEXT: v_and_b32_e32 v40, 0xffff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v51 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s66, 24 -; SI-NEXT: v_or_b32_e32 v54, v40, v54 -; SI-NEXT: v_and_b32_e32 v53, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v51, v51, v52 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v54, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v54, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v51, v53, v51 -; SI-NEXT: v_add_i32_e32 v52, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v55, v54, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v51, v52, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v52, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v50, 8, v50 -; SI-NEXT: v_or_b32_e32 v50, s4, v50 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s65, 8 -; SI-NEXT: v_and_b32_e32 v49, 0xff, v49 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s64, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v48 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s55, 24 -; SI-NEXT: v_and_b32_e32 v50, 0xffff, v50 -; SI-NEXT: v_or_b32_e32 v48, v48, v49 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s47, s38, 8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: s_and_b32 s47, s36, 0xff +; SI-NEXT: s_lshl_b32 s57, s34, 24 +; SI-NEXT: s_lshl_b32 s47, s47, 16 +; SI-NEXT: s_or_b32 s47, s57, s47 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s53, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s52, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s47, s51, 24 +; SI-NEXT: s_or_b32 s17, s47, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 8 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s94, 0xff +; SI-NEXT: s_lshl_b32 s18, s92, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xff +; SI-NEXT: s_lshl_b32 s17, s50, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s48, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s49, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 8 +; SI-NEXT: s_and_b32 s17, s20, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s88, 0xff +; SI-NEXT: s_lshl_b32 s18, s78, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 17 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 16 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 15 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 8 +; SI-NEXT: s_and_b32 s17, s22, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s74, 0xff +; SI-NEXT: s_lshl_b32 s18, s72, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 14 +; SI-NEXT: v_mov_b32_e32 v7, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 13 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 12 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v8, s16 +; SI-NEXT: s_lshl_b32 s16, s62, 8 +; SI-NEXT: s_and_b32 s17, s24, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s60, 0xff +; SI-NEXT: s_lshl_b32 s18, s58, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 11 +; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 10 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 9 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_and_b32 s17, s26, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s46, 0xff +; SI-NEXT: s_lshl_b32 s18, s98, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 8 +; SI-NEXT: v_mov_b32_e32 v11, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 7 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 6 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v12, s16 +; SI-NEXT: s_lshl_b32 s16, s96, 8 +; SI-NEXT: s_and_b32 s17, s28, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s86, 0xff +; SI-NEXT: s_lshl_b32 s18, s84, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 5 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 4 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 3 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v14, s16 +; SI-NEXT: s_lshl_b32 s16, s82, 8 +; SI-NEXT: s_and_b32 s17, s44, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s80, 0xff +; SI-NEXT: s_lshl_b32 s18, s70, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 2 +; SI-NEXT: v_mov_b32_e32 v15, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 1 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 0 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v51, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v48, v50, v48 -; SI-NEXT: v_add_i32_e32 v49, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v52, v51, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v48, v49, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v49, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v39 -; SI-NEXT: v_or_b32_e32 v39, s4, v39 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s54, 8 -; SI-NEXT: v_and_b32_e32 v38, 0xff, v38 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s53, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38 -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v37 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s52, 24 -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v37, v37, v38 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: s_lshl_b32 s16, s68, 8 +; SI-NEXT: s_and_b32 s17, s42, 0xff +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s66, 0xff +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_lshl_b32 s18, s64, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: v_readlane_b32 s17, v22, 63 +; SI-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 62 +; SI-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 61 +; SI-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v22, 0 +; SI-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: v_readlane_b32 s19, v22, 1 +; SI-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v22, 2 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v48, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v37, v39, v37 -; SI-NEXT: v_add_i32_e32 v38, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v49, v48, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v37, v38, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v38, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v35 -; SI-NEXT: v_or_b32_e32 v35, s4, v35 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s51, 8 -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s50, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s49, 24 -; SI-NEXT: v_and_b32_e32 v35, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v22, 3 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 60 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v37, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v33, v35, v33 -; SI-NEXT: v_add_i32_e32 v34, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v38, v37, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v33, v34, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v34, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; SI-NEXT: v_or_b32_e32 v29, s4, v29 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s48, 8 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s39, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s38, 24 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 59 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 58 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v33, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v27, v29, v27 -; SI-NEXT: v_add_i32_e32 v28, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v34, v33, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v28, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; SI-NEXT: v_or_b32_e32 v23, s4, v23 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s37, 8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v22, 7 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v22, 5 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 8 +; SI-NEXT: v_readlane_b32 s17, v22, 9 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 10 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v27, vcc, 44, v0 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s36, 0xff -; SI-NEXT: buffer_store_dword v28, v27, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v36 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s35, 24 -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_or_b32_e32 v22, v27, v22 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_add_i32_e32 v23, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 57 +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_readlane_b32 s15, v22, 56 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 55 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s47, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v32 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s46, 0xff -; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 12 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 13 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 14 +; SI-NEXT: v_readlane_b32 s15, v22, 15 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v31 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s31, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v30 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s30, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v27, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 54 +; SI-NEXT: s_lshl_b32 s13, s13, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_readlane_b32 s13, v22, 53 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 52 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v26 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s44, 0xff -; SI-NEXT: s_lshl_b32 s5, s95, 8 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 18 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 19 +; SI-NEXT: s_lshl_b32 s12, s12, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 20 +; SI-NEXT: v_readlane_b32 s13, v22, 21 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 22 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s14, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v25 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s94, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s93, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s42, 0xff -; SI-NEXT: s_lshl_b32 s5, s92, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s91, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s90, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 51 +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_readlane_b32 s11, v22, 50 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 49 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s12, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s89, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s88, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s79, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 25 +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 26 +; SI-NEXT: v_readlane_b32 s11, v22, 27 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 28 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s78, 8 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s77, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s76, 24 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 48 +; SI-NEXT: s_lshl_b32 s9, s9, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_readlane_b32 s9, v22, 47 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: v_readlane_b32 s10, v22, 46 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s10, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s75, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s74, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s73, 24 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 30 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 31 +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 32 +; SI-NEXT: v_readlane_b32 s9, v22, 33 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s10, v22, 34 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s72, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s63, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s62, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: v_readlane_b32 s7, v22, 45 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_readlane_b32 s7, v22, 44 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 43 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s61, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s59, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_readlane_b32 s6, v22, 36 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: v_readlane_b32 s7, v22, 37 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s54, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 38 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s58, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: v_readlane_b32 s5, v22, 42 +; SI-NEXT: s_lshl_b32 s5, s5, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: v_readlane_b32 s5, v22, 41 +; SI-NEXT: s_and_b32 s5, s5, 0xff +; SI-NEXT: v_readlane_b32 s6, v22, 40 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s56, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s19, v22, 11 +; SI-NEXT: v_readlane_b32 s17, v22, 17 +; SI-NEXT: v_readlane_b32 s15, v22, 23 +; SI-NEXT: v_readlane_b32 s13, v22, 29 +; SI-NEXT: v_readlane_b32 s11, v22, 35 +; SI-NEXT: v_readlane_b32 s9, v22, 39 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s71, v41, 23 -; SI-NEXT: v_readlane_b32 s70, v41, 22 -; SI-NEXT: v_readlane_b32 s69, v41, 21 -; SI-NEXT: v_readlane_b32 s68, v41, 20 -; SI-NEXT: v_readlane_b32 s67, v41, 19 -; SI-NEXT: v_readlane_b32 s66, v41, 18 -; SI-NEXT: v_readlane_b32 s65, v41, 17 -; SI-NEXT: v_readlane_b32 s64, v41, 16 -; SI-NEXT: v_readlane_b32 s55, v41, 15 -; SI-NEXT: v_readlane_b32 s54, v41, 14 -; SI-NEXT: v_readlane_b32 s53, v41, 13 -; SI-NEXT: v_readlane_b32 s52, v41, 12 -; SI-NEXT: v_readlane_b32 s51, v41, 11 -; SI-NEXT: v_readlane_b32 s50, v41, 10 -; SI-NEXT: v_readlane_b32 s49, v41, 9 -; SI-NEXT: v_readlane_b32 s48, v41, 8 -; SI-NEXT: v_readlane_b32 s39, v41, 7 -; SI-NEXT: v_readlane_b32 s38, v41, 6 -; SI-NEXT: v_readlane_b32 s37, v41, 5 -; SI-NEXT: v_readlane_b32 s36, v41, 4 -; SI-NEXT: v_readlane_b32 s35, v41, 3 -; SI-NEXT: v_readlane_b32 s34, v41, 2 -; SI-NEXT: v_readlane_b32 s31, v41, 1 -; SI-NEXT: v_readlane_b32 s30, v41, 0 -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v20, 35 +; SI-NEXT: v_readlane_b32 s98, v20, 34 +; SI-NEXT: v_readlane_b32 s97, v20, 33 +; SI-NEXT: v_readlane_b32 s96, v20, 32 +; SI-NEXT: v_readlane_b32 s87, v20, 31 +; SI-NEXT: v_readlane_b32 s86, v20, 30 +; SI-NEXT: v_readlane_b32 s85, v20, 29 +; SI-NEXT: v_readlane_b32 s84, v20, 28 +; SI-NEXT: v_readlane_b32 s83, v20, 27 +; SI-NEXT: v_readlane_b32 s82, v20, 26 +; SI-NEXT: v_readlane_b32 s81, v20, 25 +; SI-NEXT: v_readlane_b32 s80, v20, 24 +; SI-NEXT: v_readlane_b32 s71, v20, 23 +; SI-NEXT: v_readlane_b32 s70, v20, 22 +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $sgpr71 -; SI-NEXT: ; implicit-def: $sgpr70 -; SI-NEXT: ; implicit-def: $sgpr69 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr68 -; SI-NEXT: ; implicit-def: $sgpr67 -; SI-NEXT: ; implicit-def: $sgpr66 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $sgpr65 -; SI-NEXT: ; implicit-def: $sgpr64 -; SI-NEXT: ; implicit-def: $sgpr55 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s54, 0 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s55, 1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr52 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr50 -; SI-NEXT: ; implicit-def: $sgpr49 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr38 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr34 -; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr30 -; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr94 -; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr92 -; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr90 -; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr88 -; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: s_branch .LBB57_2 -; -; VI-LABEL: bitcast_v16i64_to_v128i8_scalar: -; VI: ; %bb.0: -; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; VI-NEXT: s_mov_b64 exec, s[4:5] -; VI-NEXT: v_writelane_b32 v20, s30, 0 -; VI-NEXT: v_writelane_b32 v20, s31, 1 -; VI-NEXT: v_writelane_b32 v20, s34, 2 -; VI-NEXT: v_writelane_b32 v20, s35, 3 -; VI-NEXT: v_writelane_b32 v20, s36, 4 -; VI-NEXT: v_writelane_b32 v20, s37, 5 -; VI-NEXT: v_writelane_b32 v20, s38, 6 -; VI-NEXT: v_writelane_b32 v20, s39, 7 -; VI-NEXT: v_writelane_b32 v20, s48, 8 -; VI-NEXT: v_writelane_b32 v20, s49, 9 -; VI-NEXT: v_writelane_b32 v20, s50, 10 -; VI-NEXT: v_writelane_b32 v20, s51, 11 -; VI-NEXT: v_writelane_b32 v20, s52, 12 -; VI-NEXT: v_writelane_b32 v20, s53, 13 -; VI-NEXT: v_writelane_b32 v20, s54, 14 -; VI-NEXT: v_writelane_b32 v20, s55, 15 -; VI-NEXT: v_writelane_b32 v20, s64, 16 -; VI-NEXT: v_writelane_b32 v20, s65, 17 -; VI-NEXT: v_writelane_b32 v20, s66, 18 -; VI-NEXT: v_writelane_b32 v20, s67, 19 -; VI-NEXT: v_writelane_b32 v20, s68, 20 -; VI-NEXT: v_writelane_b32 v20, s69, 21 -; VI-NEXT: v_writelane_b32 v20, s70, 22 -; VI-NEXT: v_writelane_b32 v20, s71, 23 -; VI-NEXT: v_writelane_b32 v20, s80, 24 -; VI-NEXT: v_writelane_b32 v20, s81, 25 -; VI-NEXT: v_writelane_b32 v20, s82, 26 -; VI-NEXT: v_writelane_b32 v20, s83, 27 -; VI-NEXT: v_writelane_b32 v20, s84, 28 -; VI-NEXT: v_writelane_b32 v20, s85, 29 -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; VI-NEXT: v_writelane_b32 v20, s86, 30 -; VI-NEXT: v_readfirstlane_b32 s44, v1 -; VI-NEXT: v_readfirstlane_b32 s45, v2 -; VI-NEXT: v_readfirstlane_b32 s42, v3 -; VI-NEXT: v_readfirstlane_b32 s43, v4 -; VI-NEXT: v_readfirstlane_b32 s40, v5 -; VI-NEXT: v_readfirstlane_b32 s41, v6 -; VI-NEXT: v_readfirstlane_b32 s14, v7 -; VI-NEXT: v_readfirstlane_b32 s15, v8 -; VI-NEXT: v_readfirstlane_b32 s12, v9 -; VI-NEXT: v_readfirstlane_b32 s13, v10 -; VI-NEXT: v_readfirstlane_b32 s10, v11 -; VI-NEXT: v_readfirstlane_b32 s11, v12 -; VI-NEXT: v_readfirstlane_b32 s8, v13 -; VI-NEXT: v_readfirstlane_b32 s9, v14 -; VI-NEXT: v_readfirstlane_b32 s6, v15 -; VI-NEXT: v_readfirstlane_b32 s7, v16 -; VI-NEXT: v_readfirstlane_b32 s4, v17 -; VI-NEXT: s_and_b64 s[46:47], vcc, exec -; VI-NEXT: v_readfirstlane_b32 s5, v18 -; VI-NEXT: v_writelane_b32 v20, s87, 31 -; VI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane -; VI-NEXT: s_cbranch_scc0 .LBB57_4 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 2 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s55, 3 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 4 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s55, 5 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 6 +; SI-NEXT: v_writelane_b32 v22, s55, 7 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 8 +; SI-NEXT: v_writelane_b32 v22, s55, 9 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 10 +; SI-NEXT: v_writelane_b32 v22, s55, 11 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 12 +; SI-NEXT: v_writelane_b32 v22, s55, 13 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 14 +; SI-NEXT: v_writelane_b32 v22, s55, 15 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 16 +; SI-NEXT: v_writelane_b32 v22, s55, 17 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 18 +; SI-NEXT: v_writelane_b32 v22, s55, 19 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 20 +; SI-NEXT: v_writelane_b32 v22, s55, 21 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 22 +; SI-NEXT: v_writelane_b32 v22, s55, 23 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 24 +; SI-NEXT: v_writelane_b32 v22, s55, 25 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 26 +; SI-NEXT: v_writelane_b32 v22, s55, 27 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 28 +; SI-NEXT: v_writelane_b32 v22, s55, 29 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 30 +; SI-NEXT: v_writelane_b32 v22, s55, 31 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 32 +; SI-NEXT: v_writelane_b32 v22, s55, 33 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 34 +; SI-NEXT: v_writelane_b32 v22, s55, 35 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 36 +; SI-NEXT: v_writelane_b32 v22, s55, 37 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 38 +; SI-NEXT: v_writelane_b32 v22, s55, 39 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: s_branch .LBB57_2 +; +; VI-LABEL: bitcast_v16i64_to_v128i8_scalar: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; VI-NEXT: s_mov_b64 exec, s[4:5] +; VI-NEXT: v_writelane_b32 v20, s30, 0 +; VI-NEXT: v_writelane_b32 v20, s31, 1 +; VI-NEXT: v_writelane_b32 v20, s34, 2 +; VI-NEXT: v_writelane_b32 v20, s35, 3 +; VI-NEXT: v_writelane_b32 v20, s36, 4 +; VI-NEXT: v_writelane_b32 v20, s37, 5 +; VI-NEXT: v_writelane_b32 v20, s38, 6 +; VI-NEXT: v_writelane_b32 v20, s39, 7 +; VI-NEXT: v_writelane_b32 v20, s48, 8 +; VI-NEXT: v_writelane_b32 v20, s49, 9 +; VI-NEXT: v_writelane_b32 v20, s50, 10 +; VI-NEXT: v_writelane_b32 v20, s51, 11 +; VI-NEXT: v_writelane_b32 v20, s52, 12 +; VI-NEXT: v_writelane_b32 v20, s53, 13 +; VI-NEXT: v_writelane_b32 v20, s54, 14 +; VI-NEXT: v_writelane_b32 v20, s55, 15 +; VI-NEXT: v_writelane_b32 v20, s64, 16 +; VI-NEXT: v_writelane_b32 v20, s65, 17 +; VI-NEXT: v_writelane_b32 v20, s66, 18 +; VI-NEXT: v_writelane_b32 v20, s67, 19 +; VI-NEXT: v_writelane_b32 v20, s68, 20 +; VI-NEXT: v_writelane_b32 v20, s69, 21 +; VI-NEXT: v_writelane_b32 v20, s70, 22 +; VI-NEXT: v_writelane_b32 v20, s71, 23 +; VI-NEXT: v_writelane_b32 v20, s80, 24 +; VI-NEXT: v_writelane_b32 v20, s81, 25 +; VI-NEXT: v_writelane_b32 v20, s82, 26 +; VI-NEXT: v_writelane_b32 v20, s83, 27 +; VI-NEXT: v_writelane_b32 v20, s84, 28 +; VI-NEXT: v_writelane_b32 v20, s85, 29 +; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 +; VI-NEXT: v_writelane_b32 v20, s86, 30 +; VI-NEXT: v_readfirstlane_b32 s44, v1 +; VI-NEXT: v_readfirstlane_b32 s45, v2 +; VI-NEXT: v_readfirstlane_b32 s42, v3 +; VI-NEXT: v_readfirstlane_b32 s43, v4 +; VI-NEXT: v_readfirstlane_b32 s40, v5 +; VI-NEXT: v_readfirstlane_b32 s41, v6 +; VI-NEXT: v_readfirstlane_b32 s14, v7 +; VI-NEXT: v_readfirstlane_b32 s15, v8 +; VI-NEXT: v_readfirstlane_b32 s12, v9 +; VI-NEXT: v_readfirstlane_b32 s13, v10 +; VI-NEXT: v_readfirstlane_b32 s10, v11 +; VI-NEXT: v_readfirstlane_b32 s11, v12 +; VI-NEXT: v_readfirstlane_b32 s8, v13 +; VI-NEXT: v_readfirstlane_b32 s9, v14 +; VI-NEXT: v_readfirstlane_b32 s6, v15 +; VI-NEXT: v_readfirstlane_b32 s7, v16 +; VI-NEXT: v_readfirstlane_b32 s4, v17 +; VI-NEXT: s_and_b64 s[46:47], vcc, exec +; VI-NEXT: v_readfirstlane_b32 s5, v18 +; VI-NEXT: v_writelane_b32 v20, s87, 31 +; VI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane +; VI-NEXT: s_cbranch_scc0 .LBB57_4 ; VI-NEXT: ; %bb.1: ; %cmp.false ; VI-NEXT: s_lshr_b32 s46, s5, 24 ; VI-NEXT: v_writelane_b32 v21, s46, 8 @@ -88751,8 +90059,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 @@ -88760,133 +90075,93 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:80 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:88 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:112 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:144 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:152 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:168 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:176 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v25 +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v29 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29 +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v45 +; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v43 +; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v42 +; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v41 +; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v53 +; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v34 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:200 @@ -88895,31 +90170,31 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224 ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 @@ -88931,140 +90206,206 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 ; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 ; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:148 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:156 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:164 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:172 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:196 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:220 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:228 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:252 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:260 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:268 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:276 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:284 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:308 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB59_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v57, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 -; SI-NEXT: v_or_b32_e32 v0, v0, v60 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v0, v0, v20 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v4, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 +; SI-NEXT: v_or_b32_e32 v0, v0, v30 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v6, v0, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mov_b32_e32 v30, v5 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_or_b32_e32 v2, v2, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v0, v0, v61 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v26, v3 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v5, v2, v3 +; SI-NEXT: v_mov_b32_e32 v3, v7 +; SI-NEXT: v_mov_b32_e32 v2, v9 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -89073,306 +90414,277 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_mov_b32_e32 v3, v7 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v58, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v57 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_mov_b32_e32 v2, v9 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v10, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v29, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v11, v1 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v23 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 +; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v12, v1 +; SI-NEXT: v_or_b32_e32 v1, v44, v1 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v25 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v0, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v13, v1 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v36 +; SI-NEXT: v_or_b32_e32 v0, v0, v40 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v14, v1 +; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v27 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v62 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 +; SI-NEXT: v_or_b32_e32 v0, v0, v32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_or_b32_e32 v1, v15, v1 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 -; SI-NEXT: v_mov_b32_e32 v43, v16 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_mov_b32_e32 v50, v16 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v16, v1 ; SI-NEXT: v_or_b32_e32 v16, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_mov_b32_e32 v48, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mov_b32_e32 v32, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v34 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v17, v1 ; SI-NEXT: v_or_b32_e32 v17, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 -; SI-NEXT: v_mov_b32_e32 v55, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v51, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v40, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v44 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v18, v1 ; SI-NEXT: v_or_b32_e32 v18, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 -; SI-NEXT: v_mov_b32_e32 v44, v23 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v50, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v39 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v63 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v19, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: v_mov_b32_e32 v61, v45 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v54 +; SI-NEXT: v_mov_b32_e32 v54, v23 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v20, v1 ; SI-NEXT: v_or_b32_e32 v20, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v52 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v21, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 -; SI-NEXT: v_mov_b32_e32 v59, v24 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v22, v1 ; SI-NEXT: v_or_b32_e32 v22, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v39, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_mov_b32_e32 v45, v24 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v34, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v23, v1 ; SI-NEXT: v_or_b32_e32 v23, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v24, v1 ; SI-NEXT: v_or_b32_e32 v24, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v42 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v1 +; SI-NEXT: v_mov_b32_e32 v43, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v25, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v56 +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v63, v1 +; SI-NEXT: v_or_b32_e32 v1, v33, v1 ; SI-NEXT: v_or_b32_e32 v26, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v46 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v33 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v32, v1 +; SI-NEXT: v_mov_b32_e32 v37, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v27, v1 ; SI-NEXT: v_or_b32_e32 v27, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v28, v1 ; SI-NEXT: v_or_b32_e32 v28, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v1 +; SI-NEXT: v_mov_b32_e32 v36, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v29, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 -; SI-NEXT: v_or_b32_e32 v0, v0, v30 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v49 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v30, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 ; SI-NEXT: v_or_b32_e32 v0, v0, v3 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v31, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v40 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v44 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: v_mov_b32_e32 v38, v63 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 @@ -89399,108 +90711,112 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_mov_b64 s[4:5], 0 ; SI-NEXT: s_branch .LBB59_3 ; SI-NEXT: .LBB59_2: -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v45 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 s[4:5], -1 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mov_b32_e32 v45, v33 -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; SI-NEXT: .LBB59_3: ; %Flow -; SI-NEXT: v_mov_b32_e32 v63, v46 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mov_b32_e32 v35, v57 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; SI-NEXT: s_cbranch_vccnz .LBB59_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v0, s4, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_and_b32 s4, s16, 0xff ; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s6, s18, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, s4, v0 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s20, 0xff ; SI-NEXT: s_lshl_b32 s6, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_and_b32 s7, s22, 0xff ; SI-NEXT: s_addk_i32 s5, 0x300 ; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s6, s24, 0xff ; SI-NEXT: s_lshl_b32 s7, s25, 8 -; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: s_and_b32 s8, s26, 0xff ; SI-NEXT: s_addk_i32 s6, 0x300 ; SI-NEXT: s_lshl_b32 s7, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89509,17 +90825,17 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89529,15 +90845,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89547,15 +90863,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89565,15 +90881,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89583,15 +90899,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89601,15 +90917,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89619,15 +90935,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89636,16 +90952,17 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89655,15 +90972,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89673,84 +90990,79 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v43, v1 +; SI-NEXT: v_or_b32_e32 v1, v50, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -89759,15 +91071,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -89776,15 +91088,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89794,9 +91106,9 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 @@ -89812,106 +91124,110 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v55, v1 +; SI-NEXT: v_or_b32_e32 v1, v48, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v44, v1 +; SI-NEXT: v_or_b32_e32 v1, v54, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v59, v1 +; SI-NEXT: v_or_b32_e32 v1, v45, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v46, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v41 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -89919,14 +91235,14 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -89934,14 +91250,14 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -89971,7 +91287,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v128i8_to_v16i64_scalar: @@ -89993,113 +91309,115 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:80 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:88 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:96 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:104 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:136 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:144 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:152 +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:160 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:168 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 @@ -90108,29 +91426,28 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v7 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 @@ -90140,130 +91457,141 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13 ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(12) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v37, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76 +; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:84 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:92 +; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:108 +; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:116 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 ; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:284 ; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB59_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -90272,208 +91600,197 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v3, v7 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v29, v9 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v59, v0 -; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v1 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v0 +; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v37, v0 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v62, v0 +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v63, v1 +; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_mov_b32_e32 v60, v0 +; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v51, v3 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_mov_b32_e32 v35, v0 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v34, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v59, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v54, v0 -; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v32, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v32, v61 +; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v55, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v55, v43 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v46, v61 +; VI-NEXT: v_or_b32_sdwa v0, v42, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v53, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v41, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v44, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v41, v33 ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v47, v45 +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_or_b32_sdwa v0, v56, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v48, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v36, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v1, v33, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v50, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v51, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v48, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v49, v51 +; VI-NEXT: v_mov_b32_e32 v40, v34 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v57, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -90504,85 +91821,95 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_mov_b64 s[4:5], 0 ; VI-NEXT: s_branch .LBB59_3 ; VI-NEXT: .LBB59_2: -; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_mov_b32_e32 v46, v61 -; VI-NEXT: v_mov_b32_e32 v47, v45 -; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: v_mov_b32_e32 v51, v7 -; VI-NEXT: v_mov_b32_e32 v48, v29 +; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_mov_b32_e32 v41, v33 +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: v_mov_b32_e32 v49, v51 ; VI-NEXT: s_mov_b64 s[4:5], -1 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; VI-NEXT: .LBB59_3: ; %Flow +; VI-NEXT: v_mov_b32_e32 v51, v41 +; VI-NEXT: v_mov_b32_e32 v36, v44 +; VI-NEXT: v_mov_b32_e32 v53, v54 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_mov_b32_e32 v54, v60 +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; VI-NEXT: v_mov_b32_e32 v44, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_mov_b32_e32 v46, v49 ; VI-NEXT: s_cbranch_vccnz .LBB59_5 ; VI-NEXT: ; %bb.4: ; %cmp.true -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_e32 v0, s4, v0 ; VI-NEXT: s_add_i32 s16, s16, 3 -; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_add_i32 s18, s18, 3 +; VI-NEXT: s_lshl_b32 s6, s19, 8 +; VI-NEXT: s_add_i32 s20, s20, 3 +; VI-NEXT: s_add_i32 s22, s22, 3 +; VI-NEXT: s_lshl_b32 s7, s23, 8 +; VI-NEXT: s_add_i32 s24, s24, 3 +; VI-NEXT: s_add_i32 s26, s26, 3 +; VI-NEXT: s_lshl_b32 s8, s27, 8 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_e32 v0, s4, v0 +; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s18, 0xff -; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 -; VI-NEXT: s_add_i32 s20, s20, 3 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s20, 0xff ; VI-NEXT: s_lshl_b32 s6, s21, 8 -; VI-NEXT: s_add_i32 s22, s22, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s22, 0xff -; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 -; VI-NEXT: s_add_i32 s24, s24, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s24, 0xff ; VI-NEXT: s_lshl_b32 s7, s25, 8 -; VI-NEXT: s_add_i32 s26, s26, 3 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s7, s26, 0xff -; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_or_b32 s7, s8, s7 ; VI-NEXT: s_and_b32 s6, s6, 0xffff @@ -90591,26 +91918,25 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -90618,8 +91944,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -90631,9 +91957,9 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -90645,14 +91971,14 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -90660,280 +91986,280 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v49 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 @@ -90978,504 +92304,524 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:136 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:144 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:152 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:160 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:168 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:176 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX9-NEXT: s_waitcnt vmcnt(35) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v24 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v51 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v41, 8, v41 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4 -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 +; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v7 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v11 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v5 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v7 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164 -; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_waitcnt vmcnt(42) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 +; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:124 +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:132 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:140 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:220 +; GFX9-NEXT: s_waitcnt vmcnt(29) +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:228 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:236 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:244 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:252 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:268 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:284 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:292 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:308 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB59_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v2, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v28 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v8, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff +; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v38 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v61, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v63, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v37, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_mov_b32_e32 v47, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v38, v51 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v60 -; GFX9-NEXT: v_mov_b32_e32 v52, v56 -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_mov_b32_e32 v34, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v51, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v53, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v45, v62 +; GFX9-NEXT: v_mov_b32_e32 v46, v56 +; GFX9-NEXT: v_mov_b32_e32 v56, v58 +; GFX9-NEXT: v_mov_b32_e32 v58, v53 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -91506,32 +92852,48 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB59_3 ; GFX9-NEXT: .LBB59_2: -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v0 -; GFX9-NEXT: v_mov_b32_e32 v63, v57 -; GFX9-NEXT: v_mov_b32_e32 v53, v3 +; GFX9-NEXT: v_mov_b32_e32 v38, v51 +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 -; GFX9-NEXT: v_mov_b32_e32 v57, v38 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB59_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v62, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB59_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -91578,348 +92940,352 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_movk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_and_b32 s8, s8, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s8, v0 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v38 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v63 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v53 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v46 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v63 +; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v40 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v32 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v43 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v44 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v42 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v48 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v33 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 @@ -98575,24 +99941,23 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:40 +; SI-NEXT: v_mov_b32_e32 v52, v30 +; SI-NEXT: v_mov_b32_e32 v53, v28 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt expcnt(3) ; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(0) @@ -98602,165 +99967,177 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68 -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v14 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v0 ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6 -; SI-NEXT: v_mov_b32_e32 v39, v10 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8 -; SI-NEXT: v_mov_b32_e32 v38, v12 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v15 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v18 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v55 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30 -; SI-NEXT: v_mov_b32_e32 v37, v14 -; SI-NEXT: v_mov_b32_e32 v14, v11 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v11 ; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v40 +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v15 ; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: v_mul_f32_e32 v16, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v18 ; SI-NEXT: v_mul_f32_e32 v17, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v20 ; SI-NEXT: v_mul_f32_e32 v18, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v41, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v22 ; SI-NEXT: v_mul_f32_e32 v19, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v24 ; SI-NEXT: v_mul_f32_e32 v20, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v26 ; SI-NEXT: v_mul_f32_e32 v21, 1.0, v29 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v28 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v52 ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s26 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s29 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v22, 1.0, v51 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v46 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v47 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v56 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v57 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v58 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v59 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v60 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v61 +; SI-NEXT: v_mul_f32_e32 v23, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v38 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v26, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v31 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v60 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_mul_f32_e32 v28, 1.0, v42 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_mul_f32_e32 v37, 1.0, v62 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63 +; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22 -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v36 +; SI-NEXT: v_mul_f32_e64 v12, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s24 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB63_4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB63_2 ; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v3, 16 +; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_alignbit_b32 v2, v2, v8, 16 +; SI-NEXT: v_alignbit_b32 v3, v3, v9, 16 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36 -; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16 -; SI-NEXT: v_mov_b32_e32 v33, v14 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v58 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v56 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v44 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_alignbit_b32 v5, v5, v11, 16 +; SI-NEXT: v_alignbit_b32 v7, v7, v14, 16 +; SI-NEXT: v_alignbit_b32 v8, v8, v54, 16 +; SI-NEXT: v_alignbit_b32 v9, v9, v46, 16 +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_alignbit_b32 v13, v13, v47, 16 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v45 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v57 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 @@ -98768,16 +100145,6 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: v_alignbit_b32 v15, v15, v53, 16 -; SI-NEXT: v_alignbit_b32 v17, v17, v39, 16 -; SI-NEXT: v_alignbit_b32 v18, v18, v41, 16 -; SI-NEXT: v_alignbit_b32 v19, v19, v40, 16 -; SI-NEXT: v_alignbit_b32 v20, v20, v55, 16 -; SI-NEXT: v_alignbit_b32 v21, v21, v54, 16 -; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 @@ -98785,212 +100152,238 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 ; SI-NEXT: v_alignbit_b32 v30, v30, v31, 16 -; SI-NEXT: v_alignbit_b32 v23, v23, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_alignbit_b32 v4, v4, v34, 16 +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_alignbit_b32 v16, v16, v43, 16 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_alignbit_b32 v17, v17, v41, 16 +; SI-NEXT: v_alignbit_b32 v18, v18, v40, 16 +; SI-NEXT: v_mov_b32_e32 v40, v55 +; SI-NEXT: v_alignbit_b32 v19, v19, v55, 16 +; SI-NEXT: v_alignbit_b32 v20, v20, v48, 16 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_alignbit_b32 v21, v21, v53, 16 +; SI-NEXT: v_alignbit_b32 v22, v22, v52, 16 ; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_alignbit_b32 v24, v24, v51, 16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_alignbit_b32 v25, v25, v50, 16 +; SI-NEXT: v_alignbit_b32 v23, v23, v51, 16 +; SI-NEXT: v_alignbit_b32 v24, v24, v50, 16 ; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_alignbit_b32 v26, v26, v49, 16 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_alignbit_b32 v27, v27, v48, 16 -; SI-NEXT: v_mov_b32_e32 v48, v37 +; SI-NEXT: v_alignbit_b32 v25, v25, v49, 16 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: v_alignbit_b32 v26, v26, v39, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_alignbit_b32 v27, v27, v38, 16 +; SI-NEXT: v_mov_b32_e32 v35, v37 ; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 +; SI-NEXT: v_alignbit_b32 v31, v31, v42, 16 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_alignbit_b32 v10, v10, v61, 16 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_mov_b32_e32 v35, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_alignbit_b32 v12, v12, v54, 16 +; SI-NEXT: v_mov_b32_e32 v41, v61 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v43, v8 -; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v9 +; SI-NEXT: v_alignbit_b32 v11, v11, v59, 16 +; SI-NEXT: v_mov_b32_e32 v55, v59 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32 -; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v14, v14, v45, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v11 -; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v56, v11 -; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v12 -; SI-NEXT: v_alignbit_b32 v11, v11, v12, 16 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v63, v14 -; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v14 -; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v36, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_alignbit_b32 v14, v14, v38, 16 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v53, v38 -; SI-NEXT: v_alignbit_b32 v16, v16, v38, 16 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 +; SI-NEXT: v_alignbit_b32 v15, v15, v47, 16 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: s_branch .LBB63_3 +; SI-NEXT: .LBB63_2: +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_mov_b32_e32 v43, v41 ; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16 -; SI-NEXT: s_cbranch_execnz .LBB63_3 -; SI-NEXT: .LBB63_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_mov_b32_e32 v52, v51 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_mov_b32_e32 v50, v49 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB63_3: ; %Flow +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v39, v52 +; SI-NEXT: v_mov_b32_e32 v49, v40 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v44 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: s_cbranch_vccnz .LBB63_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v44 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v63 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v47 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v62 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v60 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v58 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v46 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 ; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 ; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 ; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 +; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 -; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 -; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(5) ; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -99004,105 +100397,107 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_alignbit_b32 v6, v7, v6, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v45 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16 ; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v56 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v41 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_alignbit_b32 v10, v11, v10, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v46 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v55 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_alignbit_b32 v11, v12, v11, 16 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v54 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v56 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v53 ; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 ; SI-NEXT: v_alignbit_b32 v14, v15, v14, 16 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v61 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_alignbit_b32 v15, v16, v15, 16 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v43 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_alignbit_b32 v16, v17, v16, 16 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v38 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v50 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v39 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_alignbit_b32 v18, v19, v18, 16 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v41 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v49 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_alignbit_b32 v19, v20, v19, 16 -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v40 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_alignbit_b32 v20, v21, v20, 16 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v48 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v39 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_alignbit_b32 v23, v24, v23, 16 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v52 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 ; SI-NEXT: v_alignbit_b32 v24, v25, v24, 16 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v51 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v38 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v36 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_alignbit_b32 v26, v27, v26, 16 -; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v49 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v34 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_alignbit_b32 v27, v28, v27, 16 -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48 +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v35 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_alignbit_b32 v30, v31, v30, 16 -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v37 +; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v33 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_alignbit_b32 v31, v32, v31, 16 -; SI-NEXT: .LBB63_3: ; %end +; SI-NEXT: .LBB63_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -99121,41 +100516,6 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB63_4: -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_mov_b32_e32 v33, v14 -; SI-NEXT: v_mov_b32_e32 v62, v38 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 -; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB63_2 ; ; VI-LABEL: bitcast_v64bf16_to_v16i64_scalar: ; VI: ; %bb.0: @@ -101344,13 +102704,12 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr41 @@ -101360,15 +102719,16 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr32 @@ -101451,108 +102811,89 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v54, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v52, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v31 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v63 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v29 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v28 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v36, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v30 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v50, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v56, v28 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v56, v3 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v58, v2 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v46, v4 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v60, v1 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v35, v2 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v12 @@ -101582,6 +102923,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v31 @@ -101590,17 +102932,19 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v62 -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v60, v1 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 @@ -101617,7 +102961,22 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr63 ; SI-NEXT: .LBB64_2: ; %Flow ; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; SI-NEXT: s_cbranch_execz .LBB64_4 @@ -101626,8 +102985,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_addc_u32_e32 v35, vcc, 0, v2, vcc ; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 ; SI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 ; SI-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc ; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 ; SI-NEXT: v_addc_u32_e32 v8, vcc, 0, v8, vcc @@ -101646,108 +103005,95 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21 ; SI-NEXT: v_addc_u32_e32 v22, vcc, 0, v22, vcc ; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v9 ; SI-NEXT: v_addc_u32_e32 v24, vcc, 0, v24, vcc -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 ; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 ; SI-NEXT: v_addc_u32_e32 v26, vcc, 0, v26, vcc -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v50 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 ; SI-NEXT: v_addc_u32_e32 v28, vcc, 0, v28, vcc -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v25 +; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v9 +; SI-NEXT: v_addc_u32_e32 v30, vcc, 0, v30, vcc +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v48 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v48 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v44, vcc, 3, v63 +; SI-NEXT: v_addc_u32_e32 v46, vcc, 0, v62, vcc ; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v22 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v44 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v46 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v38 -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v50 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v10, v38 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v23 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v58 +; SI-NEXT: v_mov_b32_e32 v58, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v34 +; SI-NEXT: v_mov_b32_e32 v34, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v32 +; SI-NEXT: v_mov_b32_e32 v32, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v56 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 -; SI-NEXT: v_addc_u32_e32 v30, vcc, 0, v30, vcc -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_add_i32_e32 v42, vcc, 3, v63 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v2 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v14 -; SI-NEXT: v_addc_u32_e32 v44, vcc, 0, v62, vcc -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v61 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v44 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v57 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v43 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v61 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v57 ; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v14 ; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v62 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 @@ -101765,12 +103111,17 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 ; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 ; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 ; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v43 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 ; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 @@ -101778,39 +103129,36 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v59, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v6 ; SI-NEXT: v_mov_b32_e32 v50, v29 ; SI-NEXT: v_mov_b32_e32 v48, v30 -; SI-NEXT: v_mov_b32_e32 v46, v28 -; SI-NEXT: v_mov_b32_e32 v34, v8 -; SI-NEXT: v_mov_b32_e32 v32, v7 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cvt_f32_f16_e32 v43, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 +; SI-NEXT: v_mov_b32_e32 v56, v28 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: .LBB64_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 @@ -101821,32 +103169,34 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v35 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v56 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v44 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v44 ; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 ; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 @@ -101855,7 +103205,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 ; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 @@ -101864,7 +103214,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v55 ; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 @@ -101873,7 +103223,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 ; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 @@ -101882,7 +103232,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 ; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 @@ -101891,7 +103241,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 ; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 @@ -101900,7 +103250,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 ; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 @@ -101909,7 +103259,7 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 ; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 @@ -101918,16 +103268,16 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 @@ -101937,8 +103287,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -101948,8 +103298,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -101959,8 +103309,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -101970,8 +103320,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -101981,8 +103331,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -101992,8 +103342,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -102003,8 +103353,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -102014,8 +103364,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -102025,8 +103375,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -102036,8 +103386,8 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -102046,18 +103396,16 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v1, v58 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 @@ -102068,20 +103416,20 @@ define <64 x half> @bitcast_v16i64_to_v64f16(<16 x i64> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v56 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v50 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v48 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -105876,385 +107224,431 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v16i64_to_v64i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s69, 21 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 +; SI-NEXT: s_add_u32 s42, s42, 3 +; SI-NEXT: s_addc_u32 s43, s43, 0 +; SI-NEXT: s_add_u32 s44, s44, 3 +; SI-NEXT: s_addc_u32 s45, s45, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s47, s47, 3 -; SI-NEXT: s_addc_u32 s46, s46, 0 -; SI-NEXT: s_add_u32 s45, s45, 3 -; SI-NEXT: s_addc_u32 s44, s44, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: .LBB69_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v16, s4, v16 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s79, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s47, s36, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s69, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s68, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v16, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s67, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s94, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s77, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s66, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s65, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s47, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s46, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr79 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr55 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr77 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr52 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v16i64_to_v64i16_scalar: @@ -107332,179 +108726,162 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v56, v10 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_mov_b32_e32 v57, v8 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v35, v8 +; SI-NEXT: v_mov_b32_e32 v38, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 ; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v15 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v19 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v40 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v36 ; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v10 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v12 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38 -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v33 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v50 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32 -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB71_4 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB71_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v7, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v7, v0, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v9, v0, v50 +; SI-NEXT: v_or_b32_e32 v9, v0, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v10, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 -; SI-NEXT: v_or_b32_e32 v12, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_or_b32_e32 v13, v0, v13 +; SI-NEXT: v_or_b32_e32 v10, v0, v47 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v11, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v12, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v54 +; SI-NEXT: v_or_b32_e32 v13, v0, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_or_b32_e32 v14, v0, v48 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_or_b32_e32 v14, v0, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v15, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18 -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_or_b32_e32 v16, v0, v37 +; SI-NEXT: v_or_b32_e32 v16, v0, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20 ; SI-NEXT: v_or_b32_e32 v17, v0, v17 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v18, v0, v35 +; SI-NEXT: v_or_b32_e32 v18, v0, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: v_or_b32_e32 v19, v0, v19 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_or_b32_e32 v20, v0, v33 +; SI-NEXT: v_or_b32_e32 v20, v0, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: v_or_b32_e32 v21, v0, v21 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v22, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v39, v23 +; SI-NEXT: v_or_b32_e32 v22, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v23, v0, v23 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 -; SI-NEXT: v_mov_b32_e32 v24, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v24, v0, v57 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 ; SI-NEXT: v_or_b32_e32 v25, v0, v25 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 -; SI-NEXT: v_mov_b32_e32 v26, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 +; SI-NEXT: v_or_b32_e32 v26, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v27, v0, v54 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v27, v0, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_mov_b32_e32 v33, v28 ; SI-NEXT: v_or_b32_e32 v28, v0, v5 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_or_b32_e32 v29, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v29, v0, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 @@ -107512,15 +108889,18 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_or_b32_e32 v30, v0, v3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_or_b32_e32 v8, v1, v55 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: v_or_b32_e32 v31, v0, v34 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_or_b32_e32 v8, v1, v56 +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: v_or_b32_e32 v31, v0, v31 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -107528,14 +108908,40 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: s_cbranch_execnz .LBB71_3 -; SI-NEXT: .LBB71_2: ; %cmp.true +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_branch .LBB71_3 +; SI-NEXT: .LBB71_2: +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB71_3: ; %Flow +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: v_mov_b32_e32 v58, v49 +; SI-NEXT: s_cbranch_vccnz .LBB71_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v32, v1 +; SI-NEXT: v_or_b32_e32 v1, v56, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v52, v53 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -107576,143 +108982,143 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v38, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v39, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v47, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v36, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v49, v0 +; SI-NEXT: v_or_b32_e32 v0, v44, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 -; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 @@ -107721,7 +109127,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: .LBB71_3: ; %end +; SI-NEXT: .LBB71_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -107740,35 +109146,6 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3 ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB71_4: -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_mov_b32_e32 v39, v23 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v33, v28 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB71_2 ; ; VI-LABEL: bitcast_v64i16_to_v16i64_scalar: ; VI: ; %bb.0: @@ -112695,656 +114072,1312 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a, ; SI-LABEL: bitcast_v16f64_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: v_writelane_b32 v63, s98, 34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_mov_b32_e32 v31, s16 -; SI-NEXT: v_mov_b32_e32 v32, s17 -; SI-NEXT: v_mov_b32_e32 v29, s18 -; SI-NEXT: v_mov_b32_e32 v30, s19 -; SI-NEXT: v_mov_b32_e32 v27, s20 -; SI-NEXT: v_mov_b32_e32 v28, s21 -; SI-NEXT: v_mov_b32_e32 v25, s22 -; SI-NEXT: v_mov_b32_e32 v26, s23 -; SI-NEXT: v_mov_b32_e32 v23, s24 -; SI-NEXT: v_mov_b32_e32 v24, s25 -; SI-NEXT: v_mov_b32_e32 v21, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v19, s28 -; SI-NEXT: v_mov_b32_e32 v20, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB73_4 +; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_readfirstlane_b32 s7, v4 +; SI-NEXT: v_readfirstlane_b32 s8, v5 +; SI-NEXT: v_readfirstlane_b32 s9, v6 +; SI-NEXT: v_readfirstlane_b32 s10, v7 +; SI-NEXT: v_readfirstlane_b32 s11, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s14, v11 +; SI-NEXT: v_readfirstlane_b32 s15, v12 +; SI-NEXT: v_readfirstlane_b32 s40, v13 +; SI-NEXT: v_readfirstlane_b32 s41, v14 +; SI-NEXT: v_readfirstlane_b32 s42, v15 +; SI-NEXT: v_readfirstlane_b32 s43, v16 +; SI-NEXT: v_readfirstlane_b32 s44, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s45, v18 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr61 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 34 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 35 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 36 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 37 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 38 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 39 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s19, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s19, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s19, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s17, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s17, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s17, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 32 +; SI-NEXT: v_writelane_b32 v61, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 30 +; SI-NEXT: v_writelane_b32 v61, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 28 +; SI-NEXT: v_writelane_b32 v61, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 26 +; SI-NEXT: v_writelane_b32 v61, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 24 +; SI-NEXT: v_writelane_b32 v61, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 22 +; SI-NEXT: v_writelane_b32 v61, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 20 +; SI-NEXT: v_writelane_b32 v61, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 18 +; SI-NEXT: v_writelane_b32 v61, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 16 +; SI-NEXT: v_writelane_b32 v61, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 14 +; SI-NEXT: v_writelane_b32 v61, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 12 +; SI-NEXT: v_writelane_b32 v61, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 10 +; SI-NEXT: v_writelane_b32 v61, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 8 +; SI-NEXT: v_writelane_b32 v61, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 6 +; SI-NEXT: v_writelane_b32 v61, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 4 +; SI-NEXT: v_writelane_b32 v61, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 2 +; SI-NEXT: v_writelane_b32 v61, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 0 +; SI-NEXT: s_lshr_b64 s[48:49], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v61, s47, 1 +; SI-NEXT: s_lshr_b64 s[50:51], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[52:53], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[64:65], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_cbranch_execnz .LBB73_4 +; SI-NEXT: .LBB73_2: ; %cmp.true +; SI-NEXT: v_add_f64 v[5:6], s[40:41], 1.0 +; SI-NEXT: v_add_f64 v[7:8], s[14:15], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v6 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v8 +; SI-NEXT: v_add_f64 v[9:10], s[12:13], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v10 +; SI-NEXT: v_add_f64 v[11:12], s[10:11], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v10 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v10 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v12 +; SI-NEXT: v_add_f64 v[13:14], s[8:9], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v12 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v12 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v14 +; SI-NEXT: v_add_f64 v[15:16], s[6:7], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v14 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v14 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v16 +; SI-NEXT: v_add_f64 v[17:18], s[4:5], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v18 +; SI-NEXT: v_add_f64 v[19:20], s[28:29], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v18 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v18 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v20 +; SI-NEXT: v_add_f64 v[21:22], s[26:27], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v20 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v20 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v22 +; SI-NEXT: v_add_f64 v[23:24], s[24:25], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v22 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v22 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v2 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v24 +; SI-NEXT: v_add_f64 v[38:39], s[22:23], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v2 -; SI-NEXT: v_alignbit_b32 v38, v28, v27, 24 -; SI-NEXT: v_alignbit_b32 v48, v28, v27, 16 -; SI-NEXT: v_alignbit_b32 v50, v28, v27, 8 -; SI-NEXT: v_alignbit_b32 v52, v30, v29, 24 -; SI-NEXT: v_alignbit_b32 v54, v30, v29, 16 -; SI-NEXT: v_alignbit_b32 v40, v30, v29, 8 -; SI-NEXT: v_alignbit_b32 v42, v32, v31, 24 -; SI-NEXT: v_alignbit_b32 v44, v32, v31, 16 -; SI-NEXT: v_alignbit_b32 v46, v32, v31, 8 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v22 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v55, 8, v22 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v24 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v47, 24, v26 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v26 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v30 +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v32 -; SI-NEXT: v_lshrrev_b32_e32 v35, 8, v32 -; SI-NEXT: s_cbranch_execnz .LBB73_3 -; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v39 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v39 +; SI-NEXT: v_add_f64 v[52:53], s[20:21], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v39 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 24 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v53 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v53 +; SI-NEXT: v_add_f64 v[44:45], s[18:19], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v53 +; SI-NEXT: v_add_f64 v[1:2], s[44:45], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 24 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v45 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v45 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 24 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 8 +; SI-NEXT: v_add_f64 v[3:4], s[42:43], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 24 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 24 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 24 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 24 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 24 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 24 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 24 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 24 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[19:20], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[19:20], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[19:20], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[21:22], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[47:48], v[23:24], 16 +; SI-NEXT: v_add_f64 v[58:59], s[16:17], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[35:36], v[21:22], 8 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 8 +; SI-NEXT: v_lshrrev_b32_e32 v27, 24, v2 +; SI-NEXT: v_lshr_b64 v[36:37], v[23:24], 24 +; SI-NEXT: v_lshr_b64 v[49:50], v[38:39], 24 +; SI-NEXT: v_lshr_b64 v[40:41], v[38:39], 8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[44:45], 8 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v2 +; SI-NEXT: v_mov_b32_e32 v37, v27 +; SI-NEXT: v_lshr_b64 v[50:51], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[52:53], 24 +; SI-NEXT: v_lshr_b64 v[54:55], v[52:53], 8 +; SI-NEXT: v_lshr_b64 v[26:27], v[58:59], 24 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v2 +; SI-NEXT: v_mov_b32_e32 v51, v28 +; SI-NEXT: v_lshr_b64 v[42:43], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[55:56], v[44:45], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[58:59], 16 +; SI-NEXT: v_mov_b32_e32 v43, v29 +; SI-NEXT: v_lshr_b64 v[56:57], v[44:45], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[58:59], 8 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 8, v4 +; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v45 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v59 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v59 +; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v59 +; SI-NEXT: v_lshrrev_b32_e32 v57, 24, v4 +; SI-NEXT: s_branch .LBB73_5 +; SI-NEXT: .LBB73_3: +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 0 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 2 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 3 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 4 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 5 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 6 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 7 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 8 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 9 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 10 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 11 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 13 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 14 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 15 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 16 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 17 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 18 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 19 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 20 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 21 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 22 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 23 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 24 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 25 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 26 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 27 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 28 +; SI-NEXT: v_writelane_b32 v61, s49, 29 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 30 +; SI-NEXT: v_writelane_b32 v61, s49, 31 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 32 +; SI-NEXT: v_writelane_b32 v61, s49, 33 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: s_branch .LBB73_2 +; SI-NEXT: .LBB73_4: +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 34 +; SI-NEXT: v_mov_b32_e32 v37, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 35 +; SI-NEXT: v_mov_b32_e32 v51, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 36 +; SI-NEXT: v_mov_b32_e32 v43, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 37 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v57, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 38 +; SI-NEXT: v_mov_b32_e32 v33, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 39 +; SI-NEXT: v_mov_b32_e32 v30, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 40 +; SI-NEXT: v_mov_b32_e32 v34, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 41 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 42 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 43 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 44 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 45 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 46 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 47 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 48 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 49 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 50 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 51 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 52 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 53 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 54 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 55 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 56 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 57 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 58 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 59 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 60 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 61 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 62 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 63 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 1 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 2 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 3 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 4 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 5 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 6 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v2 -; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 -; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 7 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v2 -; SI-NEXT: v_alignbit_b32 v38, v28, v27, 24 -; SI-NEXT: v_alignbit_b32 v48, v28, v27, 16 -; SI-NEXT: v_alignbit_b32 v50, v28, v27, 8 -; SI-NEXT: v_alignbit_b32 v52, v30, v29, 24 -; SI-NEXT: v_alignbit_b32 v54, v30, v29, 16 -; SI-NEXT: v_alignbit_b32 v40, v30, v29, 8 -; SI-NEXT: v_alignbit_b32 v42, v32, v31, 24 -; SI-NEXT: v_alignbit_b32 v44, v32, v31, 16 -; SI-NEXT: v_alignbit_b32 v46, v32, v31, 8 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v22 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v55, 8, v22 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v24 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v47, 24, v26 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v26 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v30 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 9 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v32 -; SI-NEXT: v_lshrrev_b32_e32 v35, 8, v32 -; SI-NEXT: .LBB73_3: ; %end -; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v46 -; SI-NEXT: v_and_b32_e32 v31, 0xff, v31 -; SI-NEXT: v_and_b32_e32 v44, 0xff, v44 -; SI-NEXT: v_or_b32_e32 v31, v31, v46 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v42 -; SI-NEXT: v_or_b32_e32 v42, v42, v44 -; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v31, v31, v42 -; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 10 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v31, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v35 -; SI-NEXT: v_or_b32_e32 v31, v31, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 -; SI-NEXT: v_or_b32_e32 v32, v33, v32 -; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v31, v31, v32 -; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 11 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v31, 8, v40 -; SI-NEXT: v_and_b32_e32 v29, 0xff, v29 -; SI-NEXT: v_or_b32_e32 v29, v29, v31 -; SI-NEXT: v_and_b32_e32 v31, 0xff, v54 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v52 -; SI-NEXT: v_or_b32_e32 v31, v32, v31 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v29, v29, v31 -; SI-NEXT: v_add_i32_e32 v31, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v29, v31, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 12 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v63 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_and_b32_e32 v30, 0xff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshlrev_b32_e32 v31, 24, v61 -; SI-NEXT: v_or_b32_e32 v30, v31, v30 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 13 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v50 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_mov_b32_e32 v29, s46 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s98 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s96 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s86 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s84 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s82 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s80 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s70 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s68 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s66 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s64 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s54 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s52 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s50 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s4, v62, 14 +; SI-NEXT: v_mov_b32_e32 v60, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 15 +; SI-NEXT: v_mov_b32_e32 v31, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 16 +; SI-NEXT: v_mov_b32_e32 v32, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 17 +; SI-NEXT: v_mov_b32_e32 v18, s5 +; SI-NEXT: v_mov_b32_e32 v46, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 1 +; SI-NEXT: v_readlane_b32 s4, v61, 2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 3 +; SI-NEXT: v_readlane_b32 s4, v61, 4 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 5 +; SI-NEXT: v_readlane_b32 s4, v61, 6 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 7 +; SI-NEXT: v_readlane_b32 s4, v61, 8 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 9 +; SI-NEXT: v_readlane_b32 s4, v61, 10 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 11 +; SI-NEXT: v_readlane_b32 s4, v61, 12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 13 +; SI-NEXT: v_readlane_b32 s4, v61, 14 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 15 +; SI-NEXT: v_readlane_b32 s4, v61, 16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 17 +; SI-NEXT: v_readlane_b32 s4, v61, 18 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 19 +; SI-NEXT: v_readlane_b32 s4, v61, 20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 21 +; SI-NEXT: v_readlane_b32 s4, v61, 22 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 23 +; SI-NEXT: v_readlane_b32 s4, v61, 24 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 25 +; SI-NEXT: v_readlane_b32 s4, v61, 26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 27 +; SI-NEXT: v_readlane_b32 s4, v61, 28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 29 +; SI-NEXT: v_readlane_b32 s4, v61, 30 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s48 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 31 +; SI-NEXT: v_readlane_b32 s4, v61, 32 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: v_mov_b32_e32 v59, s17 +; SI-NEXT: v_mov_b32_e32 v58, s16 +; SI-NEXT: v_mov_b32_e32 v45, s19 +; SI-NEXT: v_mov_b32_e32 v44, s18 +; SI-NEXT: v_mov_b32_e32 v53, s21 +; SI-NEXT: v_mov_b32_e32 v52, s20 +; SI-NEXT: v_mov_b32_e32 v39, s23 +; SI-NEXT: v_mov_b32_e32 v38, s22 +; SI-NEXT: v_mov_b32_e32 v24, s25 +; SI-NEXT: v_mov_b32_e32 v23, s24 +; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: v_mov_b32_e32 v21, s26 +; SI-NEXT: v_mov_b32_e32 v20, s29 +; SI-NEXT: v_mov_b32_e32 v19, s28 +; SI-NEXT: v_mov_b32_e32 v16, s7 +; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v14, s9 +; SI-NEXT: v_mov_b32_e32 v13, s8 +; SI-NEXT: v_mov_b32_e32 v12, s11 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v10, s13 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v8, s15 +; SI-NEXT: v_mov_b32_e32 v7, s14 +; SI-NEXT: v_mov_b32_e32 v6, s41 +; SI-NEXT: v_mov_b32_e32 v5, s40 +; SI-NEXT: v_mov_b32_e32 v4, s43 +; SI-NEXT: v_mov_b32_e32 v3, s42 +; SI-NEXT: v_mov_b32_e32 v2, s45 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v28, s38 +; SI-NEXT: v_mov_b32_e32 v27, s36 +; SI-NEXT: v_mov_b32_e32 v26, s34 +; SI-NEXT: v_mov_b32_e32 v25, s30 +; SI-NEXT: v_mov_b32_e32 v56, s94 +; SI-NEXT: v_mov_b32_e32 v55, s92 +; SI-NEXT: v_mov_b32_e32 v54, s90 +; SI-NEXT: v_mov_b32_e32 v42, s88 +; SI-NEXT: v_mov_b32_e32 v41, s78 +; SI-NEXT: v_mov_b32_e32 v40, s76 +; SI-NEXT: v_mov_b32_e32 v50, s74 +; SI-NEXT: v_mov_b32_e32 v49, s72 +; SI-NEXT: v_mov_b32_e32 v48, s62 +; SI-NEXT: v_mov_b32_e32 v47, s60 +; SI-NEXT: v_mov_b32_e32 v36, s58 +; SI-NEXT: v_mov_b32_e32 v35, s56 +; SI-NEXT: v_readlane_b32 s5, v61, 33 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: .LBB73_5: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_and_b32_e32 v29, 0xff, v58 ; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v29 -; SI-NEXT: v_and_b32_e32 v29, 0xff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v38 -; SI-NEXT: v_or_b32_e32 v29, v30, v29 -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v27, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v60 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v58 ; SI-NEXT: v_or_b32_e32 v28, v29, v28 -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: buffer_store_dword v26, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v59 +; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v46 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_and_b32_e32 v27, 0xff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v31 +; SI-NEXT: v_or_b32_e32 v27, v28, v27 +; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v26, 0xff, v44 +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v55 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v25, 0xff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v60 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 ; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v27 -; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v54 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v52 +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v42 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v41 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v25, 0xff, v53 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_or_b32_e32 v27, v28, v27 -; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v57 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v56 +; SI-NEXT: v_add_i32_e32 v26, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v40 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v38 +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v49 ; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v47 ; SI-NEXT: v_or_b32_e32 v26, v27, v26 ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 +; SI-NEXT: v_add_i32_e32 v26, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xff, v39 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v48 +; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v47 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v36 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 ; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v45 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v24 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_and_b32_e32 v24, 0xff, v43 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v25 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v41 ; SI-NEXT: v_or_b32_e32 v24, v25, v24 -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v35 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 ; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 ; SI-NEXT: v_or_b32_e32 v23, v24, v23 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 @@ -113352,28 +115385,37 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v55 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v53 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v51 ; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 ; SI-NEXT: v_or_b32_e32 v21, v22, v21 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 @@ -113381,518 +115423,370 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v49 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v39 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v37 ; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v17, v17, v19 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v20 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 ; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xff, v18 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v19, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_or_b32_e32 v18, v19, v18 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: v_add_i32_e32 v18, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_or_b32_e32 v17, v18, v17 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v15, 0xff, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v16 +; SI-NEXT: v_or_b32_e32 v15, v15, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_or_b32_e32 v16, v17, v16 +; SI-NEXT: v_or_b32_e32 v15, v15, v16 +; SI-NEXT: v_add_i32_e32 v16, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_or_b32_e32 v15, v16, v15 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 +; SI-NEXT: v_add_i32_e32 v15, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v15 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v15, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v6 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: v_add_i32_e32 v13, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v7 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xff, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_or_b32_e32 v12, v13, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v8 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; SI-NEXT: v_or_b32_e32 v11, v12, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v9 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xff, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v10 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v34 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v14 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v15 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v30 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v33 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v57 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v51 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v37 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB73_4: -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: s_branch .LBB73_2 ; ; VI-LABEL: bitcast_v16f64_to_v128i8_scalar: ; VI: ; %bb.0: @@ -123434,8 +125328,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 @@ -123443,133 +125344,93 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:80 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:88 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:112 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:144 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:152 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:168 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:176 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v25 +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v29 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29 +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v45 +; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v43 +; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v42 +; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v41 +; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v53 +; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v34 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:200 @@ -123578,31 +125439,31 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224 ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 @@ -123614,140 +125475,206 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 ; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 ; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:148 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:156 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:164 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:172 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:196 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:220 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:228 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:252 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:260 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:268 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:276 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:284 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:308 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB75_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v57, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 -; SI-NEXT: v_or_b32_e32 v0, v0, v60 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v0, v0, v20 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v4, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 +; SI-NEXT: v_or_b32_e32 v0, v0, v30 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v6, v0, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mov_b32_e32 v30, v5 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_or_b32_e32 v2, v2, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v0, v0, v61 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v26, v3 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v5, v2, v3 +; SI-NEXT: v_mov_b32_e32 v3, v7 +; SI-NEXT: v_mov_b32_e32 v2, v9 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -123756,306 +125683,277 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_mov_b32_e32 v3, v7 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v58, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v57 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_mov_b32_e32 v2, v9 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v10, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v29, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v11, v1 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v23 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 +; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v12, v1 +; SI-NEXT: v_or_b32_e32 v1, v44, v1 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v25 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v0, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v13, v1 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v36 +; SI-NEXT: v_or_b32_e32 v0, v0, v40 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v14, v1 +; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v27 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v62 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 +; SI-NEXT: v_or_b32_e32 v0, v0, v32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_or_b32_e32 v1, v15, v1 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 -; SI-NEXT: v_mov_b32_e32 v43, v16 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_mov_b32_e32 v50, v16 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v16, v1 ; SI-NEXT: v_or_b32_e32 v16, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_mov_b32_e32 v48, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mov_b32_e32 v32, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v34 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v17, v1 ; SI-NEXT: v_or_b32_e32 v17, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 -; SI-NEXT: v_mov_b32_e32 v55, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v51, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v40, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v44 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v18, v1 ; SI-NEXT: v_or_b32_e32 v18, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v50 -; SI-NEXT: v_mov_b32_e32 v44, v23 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v50, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v39 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v63 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v19, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: v_mov_b32_e32 v61, v45 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v54 +; SI-NEXT: v_mov_b32_e32 v54, v23 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v20, v1 ; SI-NEXT: v_or_b32_e32 v20, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v52 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v21, v1 ; SI-NEXT: v_or_b32_e32 v21, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 -; SI-NEXT: v_mov_b32_e32 v59, v24 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v22, v1 ; SI-NEXT: v_or_b32_e32 v22, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xff, v61 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v39, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_mov_b32_e32 v45, v24 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v34, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v23, v1 ; SI-NEXT: v_or_b32_e32 v23, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v59 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v24, v1 ; SI-NEXT: v_or_b32_e32 v24, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v42 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v43 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v1 +; SI-NEXT: v_mov_b32_e32 v43, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 ; SI-NEXT: v_or_b32_e32 v25, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v56 +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v63, v1 +; SI-NEXT: v_or_b32_e32 v1, v33, v1 ; SI-NEXT: v_or_b32_e32 v26, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v46 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v33 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v32, v1 +; SI-NEXT: v_mov_b32_e32 v37, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v27, v1 ; SI-NEXT: v_or_b32_e32 v27, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v28, v1 ; SI-NEXT: v_or_b32_e32 v28, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v1 +; SI-NEXT: v_mov_b32_e32 v36, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v56 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v29, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 -; SI-NEXT: v_or_b32_e32 v0, v0, v30 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v49 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v30, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 ; SI-NEXT: v_or_b32_e32 v0, v0, v3 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v31, v0, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v40 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v0, 0xff, v44 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: v_mov_b32_e32 v38, v63 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 @@ -124082,108 +125980,112 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: v_mov_b32_e32 v57, v1 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_mov_b64 s[4:5], 0 ; SI-NEXT: s_branch .LBB75_3 ; SI-NEXT: .LBB75_2: -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v55, v56 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v45 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 s[4:5], -1 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mov_b32_e32 v45, v33 -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: v_mov_b32_e32 v34, v35 -; SI-NEXT: v_mov_b32_e32 v35, v36 -; SI-NEXT: v_mov_b32_e32 v36, v54 -; SI-NEXT: v_mov_b32_e32 v54, v37 -; SI-NEXT: v_mov_b32_e32 v37, v41 -; SI-NEXT: v_mov_b32_e32 v41, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; SI-NEXT: .LBB75_3: ; %Flow -; SI-NEXT: v_mov_b32_e32 v63, v46 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mov_b32_e32 v35, v57 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; SI-NEXT: s_cbranch_vccnz .LBB75_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v0, s4, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_and_b32 s4, s16, 0xff ; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s6, s18, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, s4, v0 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s20, 0xff ; SI-NEXT: s_lshl_b32 s6, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_and_b32 s7, s22, 0xff ; SI-NEXT: s_addk_i32 s5, 0x300 ; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s6, s7 -; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s6, s24, 0xff ; SI-NEXT: s_lshl_b32 s7, s25, 8 -; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: s_and_b32 s8, s26, 0xff ; SI-NEXT: s_addk_i32 s6, 0x300 ; SI-NEXT: s_lshl_b32 s7, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124192,17 +126094,17 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124212,15 +126114,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124230,15 +126132,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124248,15 +126150,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124266,15 +126168,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124284,15 +126186,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124302,15 +126204,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124319,16 +126221,17 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124338,15 +126241,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124356,84 +126259,79 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v43, v1 +; SI-NEXT: v_or_b32_e32 v1, v50, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -124442,15 +126340,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 @@ -124459,15 +126357,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124477,9 +126375,9 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 @@ -124495,106 +126393,110 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v55, v1 +; SI-NEXT: v_or_b32_e32 v1, v48, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v44, v1 +; SI-NEXT: v_or_b32_e32 v1, v54, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_or_b32_e32 v1, v59, v1 +; SI-NEXT: v_or_b32_e32 v1, v45, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v46, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v41 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -124602,14 +126504,14 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -124617,14 +126519,14 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -124654,7 +126556,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v128i8_to_v16f64_scalar: @@ -124676,113 +126578,115 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:80 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:88 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:96 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:104 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:136 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:144 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:152 +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:160 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:168 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v17 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 @@ -124791,29 +126695,28 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v7 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 @@ -124823,130 +126726,141 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13 ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(12) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v37, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76 +; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:84 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:92 +; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:108 +; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:116 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 ; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:284 ; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB75_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v2, v6, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -124955,208 +126869,197 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v3, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v3, v7 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v29, v9 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v59, v0 -; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v1 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v0 +; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v37, v0 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v62, v0 +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v63, v1 +; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_mov_b32_e32 v60, v0 +; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v51, v3 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_mov_b32_e32 v35, v0 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v34, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v59, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v54, v0 -; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v32, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v32, v61 +; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v55, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v55, v43 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v46, v61 +; VI-NEXT: v_or_b32_sdwa v0, v42, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v53, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v41, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v44, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v41, v33 ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v47, v45 +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_or_b32_sdwa v0, v56, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v48, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v36, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v1, v33, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v50, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v51, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v48, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v49, v51 +; VI-NEXT: v_mov_b32_e32 v40, v34 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v57, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -125187,85 +127090,95 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: s_mov_b64 s[4:5], 0 ; VI-NEXT: s_branch .LBB75_3 ; VI-NEXT: .LBB75_2: -; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v32, v54 -; VI-NEXT: v_mov_b32_e32 v43, v49 -; VI-NEXT: v_mov_b32_e32 v46, v61 -; VI-NEXT: v_mov_b32_e32 v47, v45 -; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v34, v26 -; VI-NEXT: v_mov_b32_e32 v58, v44 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_mov_b32_e32 v63, v42 -; VI-NEXT: v_mov_b32_e32 v51, v7 -; VI-NEXT: v_mov_b32_e32 v48, v29 +; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v44, v56 +; VI-NEXT: v_mov_b32_e32 v41, v33 +; VI-NEXT: v_mov_b32_e32 v50, v40 +; VI-NEXT: v_mov_b32_e32 v38, v39 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v54, v53 +; VI-NEXT: v_mov_b32_e32 v52, v36 +; VI-NEXT: v_mov_b32_e32 v49, v51 ; VI-NEXT: s_mov_b64 s[4:5], -1 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; VI-NEXT: .LBB75_3: ; %Flow +; VI-NEXT: v_mov_b32_e32 v51, v41 +; VI-NEXT: v_mov_b32_e32 v36, v44 +; VI-NEXT: v_mov_b32_e32 v53, v54 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_mov_b32_e32 v54, v60 +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; VI-NEXT: v_mov_b32_e32 v44, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_mov_b32_e32 v46, v49 ; VI-NEXT: s_cbranch_vccnz .LBB75_5 ; VI-NEXT: ; %bb.4: ; %cmp.true -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_e32 v0, s4, v0 ; VI-NEXT: s_add_i32 s16, s16, 3 -; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_add_i32 s18, s18, 3 +; VI-NEXT: s_lshl_b32 s6, s19, 8 +; VI-NEXT: s_add_i32 s20, s20, 3 +; VI-NEXT: s_add_i32 s22, s22, 3 +; VI-NEXT: s_lshl_b32 s7, s23, 8 +; VI-NEXT: s_add_i32 s24, s24, 3 +; VI-NEXT: s_add_i32 s26, s26, 3 +; VI-NEXT: s_lshl_b32 s8, s27, 8 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_e32 v0, s4, v0 +; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s18, 0xff -; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 -; VI-NEXT: s_add_i32 s20, s20, 3 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s20, 0xff ; VI-NEXT: s_lshl_b32 s6, s21, 8 -; VI-NEXT: s_add_i32 s22, s22, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s22, 0xff -; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 -; VI-NEXT: s_add_i32 s24, s24, 3 ; VI-NEXT: s_or_b32 s5, s6, s5 ; VI-NEXT: s_and_b32 s6, s24, 0xff ; VI-NEXT: s_lshl_b32 s7, s25, 8 -; VI-NEXT: s_add_i32 s26, s26, 3 ; VI-NEXT: s_or_b32 s6, s7, s6 ; VI-NEXT: s_and_b32 s7, s26, 0xff -; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_or_b32 s7, s8, s7 ; VI-NEXT: s_and_b32 s6, s6, 0xffff @@ -125274,26 +127187,25 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -125301,8 +127213,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -125314,9 +127226,9 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -125328,14 +127240,14 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 @@ -125343,280 +127255,280 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v49 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 @@ -125661,504 +127573,524 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:144 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:152 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:128 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:136 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:144 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:152 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:160 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:168 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:176 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX9-NEXT: s_waitcnt vmcnt(35) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(34) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v45 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v24 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v51 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v41, 8, v41 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v5 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v9 ; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4 -; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 +; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v7 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 -; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 +; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 -; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v11 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v5 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v7 ; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164 -; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_waitcnt vmcnt(42) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2 -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 +; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:124 +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:132 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:140 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:220 +; GFX9-NEXT: s_waitcnt vmcnt(29) +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:228 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:236 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:244 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:252 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:268 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:284 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:292 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:308 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:316 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB75_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v2, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v28 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v8, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff +; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v13, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v38 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v61, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v63, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v37, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_mov_b32_e32 v47, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v53 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v38, v51 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v60 -; GFX9-NEXT: v_mov_b32_e32 v52, v56 -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_mov_b32_e32 v34, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_or_b32_sdwa v0, v54, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v43 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_or_b32_sdwa v1, v32, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v51, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v53, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v39, v31 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v45, v62 +; GFX9-NEXT: v_mov_b32_e32 v46, v56 +; GFX9-NEXT: v_mov_b32_e32 v56, v58 +; GFX9-NEXT: v_mov_b32_e32 v58, v53 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -126189,32 +128121,48 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB75_3 ; GFX9-NEXT: .LBB75_2: -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v61, v0 -; GFX9-NEXT: v_mov_b32_e32 v63, v57 -; GFX9-NEXT: v_mov_b32_e32 v53, v3 +; GFX9-NEXT: v_mov_b32_e32 v38, v51 +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v33, v43 +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v35, v62 +; GFX9-NEXT: v_mov_b32_e32 v36, v31 +; GFX9-NEXT: v_mov_b32_e32 v40, v30 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 -; GFX9-NEXT: v_mov_b32_e32 v57, v38 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB75_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v62, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB75_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -126261,348 +128209,352 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a, ; GFX9-NEXT: s_movk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_and_b32 s8, s8, 0xffff +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s8, v0 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v38 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v63 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v53 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v46 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v63 +; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v40 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v32 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v43 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v44 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v42 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v48 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v54 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v33 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 @@ -129496,92 +131448,92 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: v_writelane_b32 v62, s46, 3 ; SI-NEXT: s_cbranch_execnz .LBB77_4 ; SI-NEXT: .LBB77_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[19:20], s[12:13], 1.0 +; SI-NEXT: v_add_f64 v[35:36], s[44:45], 1.0 ; SI-NEXT: v_add_f64 v[3:4], s[6:7], 1.0 -; SI-NEXT: v_add_f64 v[1:2], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[49:50], s[28:29], 1.0 ; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v3 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v20 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v35 +; SI-NEXT: v_add_f64 v[1:2], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[41:42], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[27:28], s[40:41], 1.0 +; SI-NEXT: v_add_f64 v[15:16], s[10:11], 1.0 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v35 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v19 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_add_f64 v[31:32], s[42:43], 1.0 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v16 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v16 +; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v15 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v15 +; SI-NEXT: v_and_b32_e32 v43, 0xffff0000, v28 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v28 +; SI-NEXT: v_and_b32_e32 v45, 0xffff0000, v27 +; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v27 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: v_and_b32_e32 v54, 0xffff0000, v42 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v42 -; SI-NEXT: v_and_b32_e32 v40, 0xffff0000, v41 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v41 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v42 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v42 +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v41 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v41 ; SI-NEXT: v_and_b32_e32 v42, 0xffff0000, v2 ; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v2 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_f64 v[2:3], s[20:21], 1.0 ; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; SI-NEXT: v_add_f64 v[11:12], s[8:9], 1.0 +; SI-NEXT: v_add_f64 v[7:8], s[4:5], 1.0 ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; SI-NEXT: v_and_b32_e32 v46, 0xffff0000, v3 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v3 +; SI-NEXT: v_and_b32_e32 v47, 0xffff0000, v32 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v32 +; SI-NEXT: v_and_b32_e32 v57, 0xffff0000, v31 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v31 +; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v3 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v3 ; SI-NEXT: v_add_f64 v[3:4], s[16:17], 1.0 -; SI-NEXT: v_and_b32_e32 v43, 0xffff0000, v1 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v1 -; SI-NEXT: v_and_b32_e32 v61, 0xffff0000, v4 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v7 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v12 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v12 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v11 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v11 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v1 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v4 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 ; SI-NEXT: v_mov_b32_e32 v4, v5 ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: v_add_f64 v[51:52], s[26:27], 1.0 -; SI-NEXT: v_add_f64 v[49:50], s[28:29], 1.0 -; SI-NEXT: v_add_f64 v[35:36], s[44:45], 1.0 -; SI-NEXT: v_add_f64 v[31:32], s[42:43], 1.0 -; SI-NEXT: v_add_f64 v[27:28], s[40:41], 1.0 ; SI-NEXT: v_add_f64 v[23:24], s[14:15], 1.0 -; SI-NEXT: v_add_f64 v[15:16], s[10:11], 1.0 -; SI-NEXT: v_add_f64 v[11:12], s[8:9], 1.0 -; SI-NEXT: v_add_f64 v[7:8], s[4:5], 1.0 +; SI-NEXT: v_add_f64 v[19:20], s[12:13], 1.0 ; SI-NEXT: v_add_f64 v[59:60], s[18:19], 1.0 ; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v8 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v8 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v7 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v12 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v12 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v11 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v11 -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v16 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v16 -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v15 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v15 +; SI-NEXT: v_and_b32_e32 v37, 0xffff0000, v20 ; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v20 -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v24 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v24 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v28 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v27 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v32 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v31 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v36 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v35 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35 -; SI-NEXT: v_and_b32_e32 v38, 0xffff0000, v50 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v50 -; SI-NEXT: v_and_b32_e32 v48, 0xffff0000, v49 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v49 +; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v19 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v19 +; SI-NEXT: v_and_b32_e32 v53, 0xffff0000, v24 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v24 +; SI-NEXT: v_and_b32_e32 v55, 0xffff0000, v23 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v23 +; SI-NEXT: v_and_b32_e32 v61, 0xffff0000, v36 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v36 +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v49 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 ; SI-NEXT: v_and_b32_e32 v50, 0xffff0000, v52 ; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v52 ; SI-NEXT: v_and_b32_e32 v52, 0xffff0000, v51 ; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_and_b32_e32 v47, 0xffff0000, v2 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v2 -; SI-NEXT: v_and_b32_e32 v58, 0xffff0000, v60 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v60 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v2 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v60 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v60 ; SI-NEXT: v_and_b32_e32 v60, 0xffff0000, v59 ; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v59 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 @@ -129658,17 +131610,17 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: ; kill: killed $sgpr46 ; SI-NEXT: s_branch .LBB77_2 ; SI-NEXT: .LBB77_4: -; SI-NEXT: v_mov_b32_e32 v1, s71 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v1, s37 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, s69 +; SI-NEXT: v_mov_b32_e32 v1, s36 ; SI-NEXT: v_readlane_b32 s4, v62, 0 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, s68 -; SI-NEXT: v_mov_b32_e32 v61, s4 +; SI-NEXT: v_mov_b32_e32 v1, s34 +; SI-NEXT: v_mov_b32_e32 v7, s4 ; SI-NEXT: v_readlane_b32 s4, v62, 1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: v_readlane_b32 s4, v62, 2 @@ -129690,45 +131642,45 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: v_mov_b32_e32 v26, s82 ; SI-NEXT: v_mov_b32_e32 v33, s81 ; SI-NEXT: v_mov_b32_e32 v30, s80 +; SI-NEXT: v_mov_b32_e32 v37, s71 ; SI-NEXT: v_mov_b32_e32 v34, s70 -; SI-NEXT: v_mov_b32_e32 v8, s67 -; SI-NEXT: v_mov_b32_e32 v7, s66 -; SI-NEXT: v_mov_b32_e32 v24, s65 -; SI-NEXT: v_mov_b32_e32 v23, s64 -; SI-NEXT: v_mov_b32_e32 v16, s55 -; SI-NEXT: v_mov_b32_e32 v15, s54 -; SI-NEXT: v_mov_b32_e32 v28, s53 -; SI-NEXT: v_mov_b32_e32 v27, s52 -; SI-NEXT: v_mov_b32_e32 v12, s51 -; SI-NEXT: v_mov_b32_e32 v11, s50 -; SI-NEXT: v_mov_b32_e32 v32, s49 -; SI-NEXT: v_mov_b32_e32 v31, s48 -; SI-NEXT: v_mov_b32_e32 v20, s39 -; SI-NEXT: v_mov_b32_e32 v19, s38 -; SI-NEXT: v_mov_b32_e32 v36, s37 -; SI-NEXT: v_mov_b32_e32 v35, s36 -; SI-NEXT: v_mov_b32_e32 v38, s35 -; SI-NEXT: v_mov_b32_e32 v37, s34 -; SI-NEXT: v_mov_b32_e32 v48, s31 -; SI-NEXT: v_mov_b32_e32 v39, s30 +; SI-NEXT: v_mov_b32_e32 v39, s69 +; SI-NEXT: v_mov_b32_e32 v38, s68 +; SI-NEXT: v_mov_b32_e32 v53, s67 +; SI-NEXT: v_mov_b32_e32 v48, s66 +; SI-NEXT: v_mov_b32_e32 v55, s65 +; SI-NEXT: v_mov_b32_e32 v54, s64 +; SI-NEXT: v_mov_b32_e32 v43, s55 +; SI-NEXT: v_mov_b32_e32 v40, s54 +; SI-NEXT: v_mov_b32_e32 v45, s53 +; SI-NEXT: v_mov_b32_e32 v44, s52 +; SI-NEXT: v_mov_b32_e32 v47, s51 +; SI-NEXT: v_mov_b32_e32 v46, s50 +; SI-NEXT: v_mov_b32_e32 v57, s49 +; SI-NEXT: v_mov_b32_e32 v56, s48 +; SI-NEXT: v_mov_b32_e32 v61, s39 +; SI-NEXT: v_mov_b32_e32 v58, s38 +; SI-NEXT: v_mov_b32_e32 v8, s35 +; SI-NEXT: v_mov_b32_e32 v24, s31 +; SI-NEXT: v_mov_b32_e32 v23, s30 ; SI-NEXT: v_mov_b32_e32 v50, s95 ; SI-NEXT: v_mov_b32_e32 v49, s94 ; SI-NEXT: v_mov_b32_e32 v52, s93 ; SI-NEXT: v_mov_b32_e32 v51, s92 -; SI-NEXT: v_mov_b32_e32 v54, s91 -; SI-NEXT: v_mov_b32_e32 v53, s90 -; SI-NEXT: v_mov_b32_e32 v40, s89 -; SI-NEXT: v_mov_b32_e32 v55, s88 +; SI-NEXT: v_mov_b32_e32 v16, s91 +; SI-NEXT: v_mov_b32_e32 v15, s90 +; SI-NEXT: v_mov_b32_e32 v28, s89 +; SI-NEXT: v_mov_b32_e32 v27, s88 ; SI-NEXT: v_mov_b32_e32 v42, s79 ; SI-NEXT: v_mov_b32_e32 v41, s78 -; SI-NEXT: v_mov_b32_e32 v43, s77 -; SI-NEXT: v_mov_b32_e32 v44, s76 -; SI-NEXT: v_mov_b32_e32 v46, s75 -; SI-NEXT: v_mov_b32_e32 v45, s74 -; SI-NEXT: v_mov_b32_e32 v47, s73 -; SI-NEXT: v_mov_b32_e32 v56, s72 -; SI-NEXT: v_mov_b32_e32 v58, s63 -; SI-NEXT: v_mov_b32_e32 v57, s62 +; SI-NEXT: v_mov_b32_e32 v11, s77 +; SI-NEXT: v_mov_b32_e32 v12, s76 +; SI-NEXT: v_mov_b32_e32 v32, s75 +; SI-NEXT: v_mov_b32_e32 v31, s74 +; SI-NEXT: v_mov_b32_e32 v19, s73 +; SI-NEXT: v_mov_b32_e32 v20, s72 +; SI-NEXT: v_mov_b32_e32 v36, s63 +; SI-NEXT: v_mov_b32_e32 v35, s62 ; SI-NEXT: v_mov_b32_e32 v60, s61 ; SI-NEXT: v_mov_b32_e32 v59, s60 ; SI-NEXT: v_mov_b32_e32 v3, s4 @@ -129739,7 +131691,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: v_alignbit_b32 v2, v2, v3, 16 ; SI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v61 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 @@ -129753,30 +131705,30 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v58 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v36 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v57 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v35 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v47 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v19 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v56 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v20 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v46 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v32 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v45 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v31 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v43 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v11 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v44 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v12 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -129788,16 +131740,16 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v40 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v28 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v55 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v27 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v54 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v16 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v15 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -129816,128 +131768,128 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg ; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v48 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v24 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v23 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v38 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v8 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v37 +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v36 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v61 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v58 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v32 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v57 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v31 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v56 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v47 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v11 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v46 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v45 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v44 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v43 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v15 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v40 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v55 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v23 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v54 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v53 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v48 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s99, v63, 35 -; SI-NEXT: v_readlane_b32 s98, v63, 34 -; SI-NEXT: v_readlane_b32 s97, v63, 33 -; SI-NEXT: v_readlane_b32 s96, v63, 32 -; SI-NEXT: v_readlane_b32 s87, v63, 31 -; SI-NEXT: v_readlane_b32 s86, v63, 30 -; SI-NEXT: v_readlane_b32 s85, v63, 29 -; SI-NEXT: v_readlane_b32 s84, v63, 28 -; SI-NEXT: v_readlane_b32 s83, v63, 27 -; SI-NEXT: v_readlane_b32 s82, v63, 26 -; SI-NEXT: v_readlane_b32 s81, v63, 25 -; SI-NEXT: v_readlane_b32 s80, v63, 24 -; SI-NEXT: v_readlane_b32 s71, v63, 23 -; SI-NEXT: v_readlane_b32 s70, v63, 22 -; SI-NEXT: v_readlane_b32 s69, v63, 21 -; SI-NEXT: v_readlane_b32 s68, v63, 20 -; SI-NEXT: v_readlane_b32 s67, v63, 19 -; SI-NEXT: v_readlane_b32 s66, v63, 18 -; SI-NEXT: v_readlane_b32 s65, v63, 17 -; SI-NEXT: v_readlane_b32 s64, v63, 16 -; SI-NEXT: v_readlane_b32 s55, v63, 15 -; SI-NEXT: v_readlane_b32 s54, v63, 14 -; SI-NEXT: v_readlane_b32 s53, v63, 13 -; SI-NEXT: v_readlane_b32 s52, v63, 12 -; SI-NEXT: v_readlane_b32 s51, v63, 11 -; SI-NEXT: v_readlane_b32 s50, v63, 10 -; SI-NEXT: v_readlane_b32 s49, v63, 9 -; SI-NEXT: v_readlane_b32 s48, v63, 8 -; SI-NEXT: v_readlane_b32 s39, v63, 7 -; SI-NEXT: v_readlane_b32 s38, v63, 6 -; SI-NEXT: v_readlane_b32 s37, v63, 5 -; SI-NEXT: v_readlane_b32 s36, v63, 4 -; SI-NEXT: v_readlane_b32 s35, v63, 3 -; SI-NEXT: v_readlane_b32 s34, v63, 2 -; SI-NEXT: v_readlane_b32 s31, v63, 1 -; SI-NEXT: v_readlane_b32 s30, v63, 0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v39 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v38 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v34 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v37 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v34 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -133206,24 +135158,23 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:40 +; SI-NEXT: v_mov_b32_e32 v52, v30 +; SI-NEXT: v_mov_b32_e32 v53, v28 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt expcnt(3) ; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(0) @@ -133233,165 +135184,177 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68 -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v14 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v0 ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6 -; SI-NEXT: v_mov_b32_e32 v39, v10 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8 -; SI-NEXT: v_mov_b32_e32 v38, v12 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v15 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v18 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v55 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30 -; SI-NEXT: v_mov_b32_e32 v37, v14 -; SI-NEXT: v_mov_b32_e32 v14, v11 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v11 ; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v40 +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v15 ; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: v_mul_f32_e32 v16, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v18 ; SI-NEXT: v_mul_f32_e32 v17, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v20 ; SI-NEXT: v_mul_f32_e32 v18, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v41, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v22 ; SI-NEXT: v_mul_f32_e32 v19, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v24 ; SI-NEXT: v_mul_f32_e32 v20, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v26 ; SI-NEXT: v_mul_f32_e32 v21, 1.0, v29 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v28 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v52 ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s26 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s29 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v22, 1.0, v51 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v46 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v47 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v56 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v57 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v58 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v59 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v60 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v61 +; SI-NEXT: v_mul_f32_e32 v23, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v38 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v26, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v31 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v60 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_mul_f32_e32 v28, 1.0, v42 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_mul_f32_e32 v37, 1.0, v62 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63 +; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22 -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v35 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v36 +; SI-NEXT: v_mul_f32_e64 v12, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s24 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB79_4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB79_2 ; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v3, 16 +; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_alignbit_b32 v2, v2, v8, 16 +; SI-NEXT: v_alignbit_b32 v3, v3, v9, 16 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36 -; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16 -; SI-NEXT: v_mov_b32_e32 v33, v14 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v58 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v56 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v44 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_alignbit_b32 v5, v5, v11, 16 +; SI-NEXT: v_alignbit_b32 v7, v7, v14, 16 +; SI-NEXT: v_alignbit_b32 v8, v8, v54, 16 +; SI-NEXT: v_alignbit_b32 v9, v9, v46, 16 +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_alignbit_b32 v13, v13, v47, 16 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v45 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v57 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 @@ -133399,16 +135362,6 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: v_alignbit_b32 v15, v15, v53, 16 -; SI-NEXT: v_alignbit_b32 v17, v17, v39, 16 -; SI-NEXT: v_alignbit_b32 v18, v18, v41, 16 -; SI-NEXT: v_alignbit_b32 v19, v19, v40, 16 -; SI-NEXT: v_alignbit_b32 v20, v20, v55, 16 -; SI-NEXT: v_alignbit_b32 v21, v21, v54, 16 -; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 @@ -133416,212 +135369,238 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 ; SI-NEXT: v_alignbit_b32 v30, v30, v31, 16 -; SI-NEXT: v_alignbit_b32 v23, v23, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_alignbit_b32 v4, v4, v34, 16 +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_alignbit_b32 v16, v16, v43, 16 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_alignbit_b32 v17, v17, v41, 16 +; SI-NEXT: v_alignbit_b32 v18, v18, v40, 16 +; SI-NEXT: v_mov_b32_e32 v40, v55 +; SI-NEXT: v_alignbit_b32 v19, v19, v55, 16 +; SI-NEXT: v_alignbit_b32 v20, v20, v48, 16 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_alignbit_b32 v21, v21, v53, 16 +; SI-NEXT: v_alignbit_b32 v22, v22, v52, 16 ; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_alignbit_b32 v24, v24, v51, 16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_alignbit_b32 v25, v25, v50, 16 +; SI-NEXT: v_alignbit_b32 v23, v23, v51, 16 +; SI-NEXT: v_alignbit_b32 v24, v24, v50, 16 ; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_alignbit_b32 v26, v26, v49, 16 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_alignbit_b32 v27, v27, v48, 16 -; SI-NEXT: v_mov_b32_e32 v48, v37 +; SI-NEXT: v_alignbit_b32 v25, v25, v49, 16 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: v_alignbit_b32 v26, v26, v39, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_alignbit_b32 v27, v27, v38, 16 +; SI-NEXT: v_mov_b32_e32 v35, v37 ; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: v_alignbit_b32 v29, v29, v32, 16 +; SI-NEXT: v_alignbit_b32 v31, v31, v42, 16 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_alignbit_b32 v10, v10, v61, 16 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_mov_b32_e32 v35, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_alignbit_b32 v12, v12, v54, 16 +; SI-NEXT: v_mov_b32_e32 v41, v61 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v43, v8 -; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v9 +; SI-NEXT: v_alignbit_b32 v11, v11, v59, 16 +; SI-NEXT: v_mov_b32_e32 v55, v59 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32 -; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v60, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v58, v11 -; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v14, v14, v45, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v56, v11 -; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v12 -; SI-NEXT: v_alignbit_b32 v11, v11, v12, 16 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v63, v14 -; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v14 -; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v36, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_alignbit_b32 v14, v14, v38, 16 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v53, v38 -; SI-NEXT: v_alignbit_b32 v16, v16, v38, 16 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 +; SI-NEXT: v_alignbit_b32 v15, v15, v47, 16 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: s_branch .LBB79_3 +; SI-NEXT: .LBB79_2: +; SI-NEXT: v_mov_b32_e32 v63, v44 +; SI-NEXT: v_mov_b32_e32 v44, v43 +; SI-NEXT: v_mov_b32_e32 v43, v41 ; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16 -; SI-NEXT: s_cbranch_execnz .LBB79_3 -; SI-NEXT: .LBB79_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35 +; SI-NEXT: v_mov_b32_e32 v48, v53 +; SI-NEXT: v_mov_b32_e32 v52, v51 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v61 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v58, v57 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v46, v45 +; SI-NEXT: v_mov_b32_e32 v50, v49 +; SI-NEXT: v_mov_b32_e32 v36, v39 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v34, v38 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v37, v32 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v33, v42 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB79_3: ; %Flow +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v39, v52 +; SI-NEXT: v_mov_b32_e32 v49, v40 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v44 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: s_cbranch_vccnz .LBB79_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v44 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v63 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v47 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v62 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v60 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v58 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v46 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 ; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 ; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 ; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 ; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 ; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 +; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 -; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 -; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v26 +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(5) ; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v29 +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v31 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -133635,105 +135614,107 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_alignbit_b32 v6, v7, v6, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v45 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16 ; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42 ; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 ; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 ; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 ; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v56 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v41 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_alignbit_b32 v10, v11, v10, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v46 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v55 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_alignbit_b32 v11, v12, v11, 16 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v54 ; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 ; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v56 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 ; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v53 ; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 ; SI-NEXT: v_alignbit_b32 v14, v15, v14, 16 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v61 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_alignbit_b32 v15, v16, v15, 16 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v43 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_alignbit_b32 v16, v17, v16, 16 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v38 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v50 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v39 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; SI-NEXT: v_alignbit_b32 v18, v19, v18, 16 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v41 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v49 ; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 ; SI-NEXT: v_alignbit_b32 v19, v20, v19, 16 -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v40 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_alignbit_b32 v20, v21, v20, 16 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v48 ; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 ; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v39 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 ; SI-NEXT: v_alignbit_b32 v23, v24, v23, 16 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v52 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 ; SI-NEXT: v_alignbit_b32 v24, v25, v24, 16 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v51 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v38 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v36 ; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 ; SI-NEXT: v_alignbit_b32 v26, v27, v26, 16 -; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v49 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v34 ; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 ; SI-NEXT: v_alignbit_b32 v27, v28, v27, 16 -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48 +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v35 ; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 ; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 ; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 ; SI-NEXT: v_alignbit_b32 v30, v31, v30, 16 -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v37 +; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v33 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; SI-NEXT: v_alignbit_b32 v31, v32, v31, 16 -; SI-NEXT: .LBB79_3: ; %end +; SI-NEXT: .LBB79_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -133752,41 +135733,6 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB79_4: -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v61, v53 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v59, v2 -; SI-NEXT: v_mov_b32_e32 v57, v11 -; SI-NEXT: v_mov_b32_e32 v47, v10 -; SI-NEXT: v_mov_b32_e32 v45, v12 -; SI-NEXT: v_mov_b32_e32 v33, v14 -; SI-NEXT: v_mov_b32_e32 v62, v38 -; SI-NEXT: v_mov_b32_e32 v38, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v40 -; SI-NEXT: v_mov_b32_e32 v40, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB79_2 ; ; VI-LABEL: bitcast_v64bf16_to_v16f64_scalar: ; VI: ; %bb.0: @@ -136060,12 +138006,12 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; kill: killed $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; kill: killed $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33 @@ -136074,38 +138020,41 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB80_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v30 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v11 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v11 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v8, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v38 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v30 ; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v26 ; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v8, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v48 -; SI-NEXT: v_mov_b32_e32 v48, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v49 +; SI-NEXT: v_mov_b32_e32 v48, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v49 ; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v25 ; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v24 ; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v7 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v8, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v10 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v9, v50 +; SI-NEXT: v_mov_b32_e32 v50, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v51 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 ; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 @@ -136117,19 +138066,18 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v32 ; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v29 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v28 ; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 @@ -136152,7 +138100,6 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) @@ -136166,54 +138113,53 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v57, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v38 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v39 ; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v8, v53 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v50, v9 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: v_mov_b32_e32 v53, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v38, v54 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v61 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v35 -; SI-NEXT: v_mov_b32_e32 v35, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v40 +; SI-NEXT: v_mov_b32_e32 v40, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v49, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v51, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v55, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v56, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v59, v36 ; SI-NEXT: v_mov_b32_e32 v36, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v60, v40 -; SI-NEXT: v_mov_b32_e32 v40, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v60, v35 +; SI-NEXT: v_mov_b32_e32 v35, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v62, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v63, v6 @@ -136240,71 +138186,59 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: s_cbranch_execz .LBB80_4 ; SI-NEXT: ; %bb.3: ; %cmp.true ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 ; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v27 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v26 ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v44, v3 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v63 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v26 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v23 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 ; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v1, v61 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v22 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v60 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v59 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v60 ; SI-NEXT: v_add_f64 v[35:36], v[11:12], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v23 ; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v13, v36 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v59 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 ; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 ; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 ; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 ; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v35 ; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v36 ; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v14 @@ -136315,17 +138249,19 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v29 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v32 ; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v42, v4 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 @@ -136351,11 +138287,15 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v53, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v63 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 ; SI-NEXT: v_cvt_f32_f16_e32 v38, v45 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 @@ -136367,41 +138307,43 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 ; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 ; SI-NEXT: v_cvt_f32_f16_e32 v45, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v56, v56 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v59, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v61, v33 ; SI-NEXT: v_cvt_f32_f16_e32 v62, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v63, v5 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v61, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v3 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: .LBB80_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v57 ; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 @@ -136518,7 +138460,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v54 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 @@ -136527,7 +138469,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 ; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 @@ -136536,7 +138478,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 ; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 @@ -136545,7 +138487,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 @@ -136554,7 +138496,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 @@ -136563,7 +138505,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 @@ -136572,7 +138514,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 @@ -136583,7 +138525,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -136594,7 +138536,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -136605,7 +138547,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -136616,7 +138558,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -136627,7 +138569,7 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -136636,18 +138578,16 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 @@ -136656,25 +138596,25 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 @@ -136684,10 +138624,12 @@ define <64 x half> @bitcast_v16f64_to_v64f16(<16 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v40 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload @@ -136874,69 +138816,67 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: s_lshr_b32 s46, s5, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v10, s46 ; SI-NEXT: s_lshr_b32 s46, s4, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v55, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v51, s46 ; SI-NEXT: s_lshr_b32 s46, s7, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v59, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v43, s46 ; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v8, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v40, s46 ; SI-NEXT: s_lshr_b32 s46, s9, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v9, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v42, s46 ; SI-NEXT: s_lshr_b32 s46, s8, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v16, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v44, s46 ; SI-NEXT: s_lshr_b32 s46, s11, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v23, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v8, s46 ; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v27, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v9, s46 ; SI-NEXT: s_lshr_b32 s46, s13, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v31, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v16, s46 ; SI-NEXT: s_lshr_b32 s46, s12, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v38, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v23, s46 ; SI-NEXT: s_lshr_b32 s46, s15, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v50, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v27, s46 ; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v54, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v31, s46 ; SI-NEXT: s_lshr_b32 s46, s41, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v40, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v38, s46 ; SI-NEXT: s_lshr_b32 s46, s40, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v42, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v50, s46 ; SI-NEXT: s_lshr_b32 s46, s43, 16 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v61, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v32, s46 ; SI-NEXT: s_lshr_b32 s46, s42, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v12, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v22, s46 ; SI-NEXT: s_lshr_b32 s46, s45, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v32, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v30, s46 ; SI-NEXT: s_lshr_b32 s46, s44, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v36, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v25, s46 ; SI-NEXT: s_lshr_b32 s46, s29, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v28, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v12, s46 ; SI-NEXT: s_lshr_b32 s46, s28, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v48, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v37, s46 ; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v1, s42 -; SI-NEXT: v_cvt_f32_f16_e32 v24, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v1, s25 +; SI-NEXT: v_cvt_f32_f16_e32 v34, s46 ; SI-NEXT: s_lshr_b32 s46, s26, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v52, s46 -; SI-NEXT: s_lshr_b32 s46, s25, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v13, s46 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_cvt_f32_f16_e32 v35, s46 ; SI-NEXT: s_lshr_b32 s46, s24, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v20, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v24, s46 ; SI-NEXT: s_lshr_b32 s46, s23, 16 ; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, s45 -; SI-NEXT: v_cvt_f32_f16_e32 v17, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v1, s24 +; SI-NEXT: v_cvt_f32_f16_e32 v28, s46 ; SI-NEXT: s_lshr_b32 s46, s22, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v35, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v17, s46 ; SI-NEXT: s_lshr_b32 s46, s21, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v46, s46 ; SI-NEXT: s_lshr_b32 s46, s20, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v5, s46 +; SI-NEXT: v_cvt_f32_f16_e32 v56, s46 ; SI-NEXT: s_lshr_b32 s46, s19, 16 ; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v1, s23 ; SI-NEXT: v_cvt_f32_f16_e32 v57, s46 ; SI-NEXT: s_lshr_b32 s46, s18, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v58, s46 @@ -136945,7 +138885,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: s_lshr_b32 s46, s16, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v62, s46 ; SI-NEXT: v_cvt_f32_f16_e32 v7, s5 -; SI-NEXT: v_cvt_f32_f16_e32 v56, s4 +; SI-NEXT: v_cvt_f32_f16_e32 v5, s4 ; SI-NEXT: v_cvt_f32_f16_e32 v14, s7 ; SI-NEXT: v_cvt_f32_f16_e32 v15, s6 ; SI-NEXT: v_cvt_f32_f16_e32 v6, s9 @@ -136953,25 +138893,25 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v11, s11 ; SI-NEXT: v_cvt_f32_f16_e32 v19, s10 ; SI-NEXT: v_cvt_f32_f16_e32 v39, s13 -; SI-NEXT: v_cvt_f32_f16_e32 v51, s12 +; SI-NEXT: v_cvt_f32_f16_e32 v55, s12 ; SI-NEXT: v_cvt_f32_f16_e32 v41, s15 -; SI-NEXT: v_cvt_f32_f16_e32 v43, s14 -; SI-NEXT: v_cvt_f32_f16_e32 v45, s41 -; SI-NEXT: v_cvt_f32_f16_e32 v44, s40 -; SI-NEXT: v_cvt_f32_f16_e32 v63, s43 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v37, s29 -; SI-NEXT: v_cvt_f32_f16_e32 v22, s28 -; SI-NEXT: v_cvt_f32_f16_e32 v49, s27 +; SI-NEXT: v_cvt_f32_f16_e32 v45, s14 +; SI-NEXT: v_cvt_f32_f16_e32 v59, s41 +; SI-NEXT: v_cvt_f32_f16_e32 v54, s40 +; SI-NEXT: v_cvt_f32_f16_e32 v36, s43 +; SI-NEXT: v_cvt_f32_f16_e32 v26, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v21, s45 +; SI-NEXT: v_cvt_f32_f16_e32 v61, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v63, s29 +; SI-NEXT: v_cvt_f32_f16_e32 v20, s28 +; SI-NEXT: v_cvt_f32_f16_e32 v33, s27 ; SI-NEXT: v_cvt_f32_f16_e32 v29, s26 -; SI-NEXT: v_cvt_f32_f16_e32 v53, s25 -; SI-NEXT: v_cvt_f32_f16_e32 v26, s24 -; SI-NEXT: v_cvt_f32_f16_e32 v30, s23 -; SI-NEXT: v_cvt_f32_f16_e32 v21, s22 -; SI-NEXT: v_cvt_f32_f16_e32 v33, s21 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v49, s22 +; SI-NEXT: v_cvt_f32_f16_e32 v53, s21 ; SI-NEXT: v_cvt_f32_f16_e32 v47, s20 -; SI-NEXT: v_cvt_f32_f16_e32 v34, s19 -; SI-NEXT: v_cvt_f32_f16_e32 v25, s18 +; SI-NEXT: v_cvt_f32_f16_e32 v52, s19 +; SI-NEXT: v_cvt_f32_f16_e32 v48, s18 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s17 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v1, s16 @@ -136990,151 +138930,158 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v53 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v48 ; SI-NEXT: v_add_f64 v[36:37], s[28:29], 1.0 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v49 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v36 ; SI-NEXT: v_add_f64 v[14:15], s[10:11], 1.0 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_add_f64 v[10:11], s[8:9], 1.0 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v36 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: v_add_f64 v[6:7], s[6:7], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v13 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: v_add_f64 v[4:5], s[4:5], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v59, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v35 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v14 ; SI-NEXT: v_add_f64 v[29:30], s[42:43], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v29 +; SI-NEXT: v_add_f64 v[6:7], s[6:7], 1.0 +; SI-NEXT: v_add_f64 v[21:22], s[14:15], 1.0 ; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v4, v30 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v21 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v4, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v24 -; SI-NEXT: v_add_f64 v[10:11], s[8:9], 1.0 -; SI-NEXT: v_add_f64 v[25:26], s[40:41], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v17 -; SI-NEXT: v_add_f64 v[21:22], s[14:15], 1.0 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v56, v57 +; SI-NEXT: v_add_f64 v[46:47], s[20:21], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v15 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v46 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v47 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v63, v37 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v4, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v53, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v47, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v15 +; SI-NEXT: v_mov_b32_e32 v15, v12 +; SI-NEXT: v_mov_b32_e32 v12, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v56, v14 +; SI-NEXT: v_mov_b32_e32 v14, v5 +; SI-NEXT: v_mov_b32_e32 v5, v40 +; SI-NEXT: v_mov_b32_e32 v40, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_add_f64 v[33:34], s[44:45], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v21 -; SI-NEXT: v_add_f64 v[18:19], s[12:13], 1.0 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v63, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v1 +; SI-NEXT: v_add_f64 v[25:26], s[40:41], 1.0 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v59, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v34 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v61, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v49 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v49, v1 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_f64 v[1:2], s[18:19], 1.0 -; SI-NEXT: v_cvt_f32_f16_e32 v61, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v62, v62 +; SI-NEXT: v_add_f64 v[18:19], s[12:13], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v34 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v25 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v7 +; SI-NEXT: v_mov_b32_e32 v7, v42 +; SI-NEXT: v_mov_b32_e32 v42, v20 +; SI-NEXT: v_mov_b32_e32 v20, v21 +; SI-NEXT: v_mov_b32_e32 v21, v26 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v55, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v48 ; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_cvt_f32_f16_e32 v37, v13 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_cvt_f32_f16_e32 v35, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v4, v34 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v52 ; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v1 ; SI-NEXT: v_add_f64 v[1:2], s[16:17], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v39, v19 ; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v1 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v7 -; SI-NEXT: v_mov_b32_e32 v7, v61 -; SI-NEXT: v_mov_b32_e32 v61, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v51, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v62 ; SI-NEXT: v_cvt_f32_f16_e32 v62, v19 ; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v4, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v57 -; SI-NEXT: v_add_f64 v[46:47], s[20:21], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v15 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v46 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v47 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v47 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v28, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v47, v46 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 ; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v15 -; SI-NEXT: v_mov_b32_e32 v15, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v14 -; SI-NEXT: v_mov_b32_e32 v14, v12 -; SI-NEXT: v_mov_b32_e32 v12, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v60 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v4 ; SI-NEXT: v_mov_b32_e32 v18, v3 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v20, v17 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_cvt_f32_f16_e32 v24, v17 ; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_cvt_f32_f16_e32 v34, v13 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v24, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v28, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v52, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 ; SI-NEXT: .LBB81_3: ; %end ; SI-NEXT: v_cvt_f16_f32_e32 v3, v62 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -137151,19 +139098,19 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v48 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v52 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v56 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v47 ; SI-NEXT: v_add_i32_e32 v4, vcc, 16, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 @@ -137171,188 +139118,188 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v53 ; SI-NEXT: v_add_i32_e32 v4, vcc, 20, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v49 ; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 ; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v24 ; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 ; SI-NEXT: v_add_i32_e32 v4, vcc, 36, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v29 ; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v33 ; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v20 ; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v63 ; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v61 ; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v21 ; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 ; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v63 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v36 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x44, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v54 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x48, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v59 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v45 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v41 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v55 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v39 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v19 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v11 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v44 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v18 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v56 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v5 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -137390,62 +139337,62 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a ; SI-NEXT: ; implicit-def: $vgpr62 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr57 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; kill: killed $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr59 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr3 @@ -140338,219 +142285,224 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB85_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v18 ; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v36, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v39, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v49, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v51, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v40, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v47, v28, v27, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v30, v29, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v32, v31, 16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v32 +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 ; SI-NEXT: s_cbranch_execnz .LBB85_3 ; SI-NEXT: .LBB85_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 -; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v18 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v36, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v39, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v49, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v51, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v40, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v47, v28, v27, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v30, v29, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v32, v31, 16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v32 ; SI-NEXT: .LBB85_3: ; %end -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v60 +; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v43 ; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v31, v31, v60 +; SI-NEXT: v_or_b32_e32 v31, v31, v50 ; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v31, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v57 ; SI-NEXT: v_or_b32_e32 v31, v31, v32 ; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v58 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v42 ; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 ; SI-NEXT: v_or_b32_e32 v29, v29, v31 ; SI-NEXT: v_add_i32_e32 v31, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v29, v31, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v29, 0xffff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v63 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v56 ; SI-NEXT: v_or_b32_e32 v29, v29, v30 ; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v41 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 ; SI-NEXT: v_or_b32_e32 v27, v27, v29 ; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v27, v29, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v62 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v47 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v40 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v27 ; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v61 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v46 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v39 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v45 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v57 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v63 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v49 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v62 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v61 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v60 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140562,7 +142514,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140572,9 +142524,11 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140584,9 +142538,11 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140596,9 +142552,11 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140608,25 +142566,27 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload @@ -140647,39 +142607,43 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB85_4: -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; kill: killed $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: s_branch .LBB85_2 ; ; VI-LABEL: bitcast_v16f64_to_v64i16_scalar: @@ -141703,179 +143667,162 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v56, v10 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_mov_b32_e32 v57, v8 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v35, v8 +; SI-NEXT: v_mov_b32_e32 v38, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 ; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v15 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v19 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v40 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v36 ; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v10 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v12 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38 -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v33 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v50 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32 -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB87_4 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB87_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v7, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v7, v0, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v9, v0, v50 +; SI-NEXT: v_or_b32_e32 v9, v0, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v10, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 -; SI-NEXT: v_or_b32_e32 v12, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_or_b32_e32 v13, v0, v13 +; SI-NEXT: v_or_b32_e32 v10, v0, v47 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v11, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v12, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v54 +; SI-NEXT: v_or_b32_e32 v13, v0, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_or_b32_e32 v14, v0, v48 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_or_b32_e32 v14, v0, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v15, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18 -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_or_b32_e32 v16, v0, v37 +; SI-NEXT: v_or_b32_e32 v16, v0, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20 ; SI-NEXT: v_or_b32_e32 v17, v0, v17 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v18, v0, v35 +; SI-NEXT: v_or_b32_e32 v18, v0, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: v_or_b32_e32 v19, v0, v19 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_or_b32_e32 v20, v0, v33 +; SI-NEXT: v_or_b32_e32 v20, v0, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: v_or_b32_e32 v21, v0, v21 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v22, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v39, v23 +; SI-NEXT: v_or_b32_e32 v22, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v23, v0, v23 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 -; SI-NEXT: v_mov_b32_e32 v24, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v24, v0, v57 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 ; SI-NEXT: v_or_b32_e32 v25, v0, v25 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 -; SI-NEXT: v_mov_b32_e32 v26, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 +; SI-NEXT: v_or_b32_e32 v26, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v27, v0, v54 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v27, v0, v27 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_mov_b32_e32 v33, v28 ; SI-NEXT: v_or_b32_e32 v28, v0, v5 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_or_b32_e32 v29, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v29, v0, v29 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 @@ -141883,15 +143830,18 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_or_b32_e32 v30, v0, v3 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_or_b32_e32 v8, v1, v55 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: v_or_b32_e32 v31, v0, v34 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_or_b32_e32 v8, v1, v56 +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: v_or_b32_e32 v31, v0, v31 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -141899,14 +143849,40 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: s_cbranch_execnz .LBB87_3 -; SI-NEXT: .LBB87_2: ; %cmp.true +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_branch .LBB87_3 +; SI-NEXT: .LBB87_2: +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v42, v41 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v55, v61 +; SI-NEXT: v_mov_b32_e32 v38, v2 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v54 +; SI-NEXT: v_mov_b32_e32 v54, v14 +; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v62, v52 +; SI-NEXT: v_mov_b32_e32 v60, v59 +; SI-NEXT: v_mov_b32_e32 v49, v51 +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: .LBB87_3: ; %Flow +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: v_mov_b32_e32 v58, v49 +; SI-NEXT: s_cbranch_vccnz .LBB87_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v32, v1 +; SI-NEXT: v_or_b32_e32 v1, v56, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v52, v53 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -141947,143 +143923,143 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v4, s8 ; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v38, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_or_b32_e32 v0, v39, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v50, v0 +; SI-NEXT: v_or_b32_e32 v0, v47, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v36, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v49, v0 +; SI-NEXT: v_or_b32_e32 v0, v44, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v39, v0 -; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v30, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 @@ -142092,7 +144068,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a, ; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: .LBB87_3: ; %end +; SI-NEXT: .LBB87_5: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -142111,35 +144087,6 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a, ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB87_4: -; SI-NEXT: v_mov_b32_e32 v38, v61 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v63, v2 -; SI-NEXT: v_mov_b32_e32 v55, v4 -; SI-NEXT: v_mov_b32_e32 v53, v6 -; SI-NEXT: v_mov_b32_e32 v52, v57 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v61, v56 -; SI-NEXT: v_mov_b32_e32 v50, v43 -; SI-NEXT: v_mov_b32_e32 v36, v41 -; SI-NEXT: v_mov_b32_e32 v57, v40 -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v13 -; SI-NEXT: v_mov_b32_e32 v43, v48 -; SI-NEXT: v_mov_b32_e32 v48, v15 -; SI-NEXT: v_mov_b32_e32 v41, v14 -; SI-NEXT: v_mov_b32_e32 v56, v16 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v45, v44 -; SI-NEXT: v_mov_b32_e32 v59, v42 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v37, v20 -; SI-NEXT: v_mov_b32_e32 v39, v23 -; SI-NEXT: v_mov_b32_e32 v35, v24 -; SI-NEXT: v_mov_b32_e32 v33, v28 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: s_branch .LBB87_2 ; ; VI-LABEL: bitcast_v64i16_to_v16f64_scalar: ; VI: ; %bb.0: @@ -150763,22 +152710,22 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 ; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 @@ -150804,13 +152751,11 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 ; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v43, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v55, 8, v25 ; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 ; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 @@ -150819,49 +152764,46 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v28 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 @@ -150870,34 +152812,35 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v22 ; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v26 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 ; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16 ; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 ; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 +; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v2 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v2 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 @@ -150907,131 +152850,155 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 -; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v2 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 ; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v4 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v6 -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5 -; VI-NEXT: s_waitcnt vmcnt(10) ; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v0 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:124 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:68 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:284 -; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:308 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: s_cbranch_scc0 .LBB89_4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276 +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324 +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; VI-NEXT: s_cbranch_scc0 .LBB89_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -151040,225 +153007,205 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v0, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v1, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v2, v8 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v3, v8 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v3, v10 ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v2, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v36, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v37, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v38, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v38, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v39, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v49, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v48, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v45, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v32, v1 -; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v54, v22 -; VI-NEXT: v_mov_b32_e32 v41, v24 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v34, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v37, v1 -; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v55, v26 +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v0, v42, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v1, v41, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v39, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v42, v43 +; VI-NEXT: v_mov_b32_e32 v43, v37 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v27 +; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v51, v0 -; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v47, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v54 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v53, v28 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v47, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v61, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v57, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v24, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v0, v34, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v58, v1 -; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v61, v60 -; VI-NEXT: v_mov_b32_e32 v60, v59 +; VI-NEXT: v_or_b32_sdwa v1, v25, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v38, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v48, v1 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v57, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v29, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v28, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v40, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v62, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v51, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v40, v41 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v52, v0 -; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v1 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v0, v29, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v31, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v33 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v46, v1 +; VI-NEXT: v_mov_b32_e32 v56, v1 ; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v63, v0 -; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v47, v1 -; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v63, v39 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v57, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v57, v0 +; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v58, v1 +; VI-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v52, v60 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v53, v35 +; VI-NEXT: s_waitcnt vmcnt(3) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -151287,14 +153234,54 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_cbranch_execnz .LBB89_3 -; VI-NEXT: .LBB89_2: ; %cmp.true -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v59 -; VI-NEXT: v_or_b32_sdwa v29, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_branch .LBB89_3 +; VI-NEXT: .LBB89_2: +; VI-NEXT: v_mov_b32_e32 v47, v54 +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v58, v7 +; VI-NEXT: v_mov_b32_e32 v57, v5 +; VI-NEXT: v_mov_b32_e32 v56, v3 +; VI-NEXT: s_mov_b64 s[4:5], -1 +; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 +; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; VI-NEXT: .LBB89_3: ; %Flow +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; VI-NEXT: s_cbranch_vccnz .LBB89_5 +; VI-NEXT: ; %bb.4: ; %cmp.true +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 @@ -151313,351 +153300,356 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: s_lshl_b32 s9, s19, 8 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_lshl_b32 s10, s17, 8 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 +; VI-NEXT: v_or_b32_sdwa v29, v41, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v31, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 +; VI-NEXT: v_or_b32_sdwa v30, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 +; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v28, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v26, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v62 -; VI-NEXT: v_or_b32_sdwa v28, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44 -; VI-NEXT: v_or_b32_sdwa v53, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v45 -; VI-NEXT: v_or_b32_sdwa v27, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42 -; VI-NEXT: v_or_b32_sdwa v52, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40 -; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v59, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v61 -; VI-NEXT: v_or_b32_sdwa v24, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v44, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v48, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48 -; VI-NEXT: v_or_b32_sdwa v24, v24, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24 +; VI-NEXT: v_or_b32_sdwa v27, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v23, v41, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v40, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v38, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38 -; VI-NEXT: v_or_b32_sdwa v23, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23 +; VI-NEXT: v_or_b32_sdwa v26, v61, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v22, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v34, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34 +; VI-NEXT: v_or_b32_sdwa v26, v26, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v50, v33, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v50 -; VI-NEXT: v_or_b32_sdwa v22, v22, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22 +; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v21, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v21 +; VI-NEXT: v_or_b32_sdwa v25, v25, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v54, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v24, v36, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v20, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v32, v32, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v32 +; VI-NEXT: v_or_b32_sdwa v24, v24, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v49, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49 -; VI-NEXT: v_or_b32_sdwa v20, v20, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20 -; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v23, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v19, v37, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v61, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v61 +; VI-NEXT: v_or_b32_sdwa v23, v23, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v22, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60 +; VI-NEXT: v_or_b32_sdwa v36, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36 +; VI-NEXT: v_or_b32_sdwa v22, v22, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v37, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v63, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v31, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v19, v19, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19 -; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v18, v32, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v38, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38 +; VI-NEXT: v_or_b32_sdwa v21, v63, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v57, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v57 -; VI-NEXT: v_or_b32_sdwa v18, v18, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18 +; VI-NEXT: v_or_b32_sdwa v20, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v39, v45, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v39 +; VI-NEXT: v_or_b32_sdwa v20, v20, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v19, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v48, v42, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v18, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55 +; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48 +; VI-NEXT: v_or_b32_sdwa v19, v19, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v62, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v62 +; VI-NEXT: v_or_b32_sdwa v18, v18, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v53 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v51 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v50 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v56, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v49, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49 +; VI-NEXT: v_or_b32_sdwa v15, v15, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v34, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34 -; VI-NEXT: v_or_b32_sdwa v14, v14, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51 +; VI-NEXT: v_or_b32_sdwa v14, v14, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v2 +; VI-NEXT: v_or_b32_sdwa v29, v29, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14 +; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v13, v59, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36 -; VI-NEXT: v_or_b32_sdwa v13, v13, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v26 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v52 -; VI-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54 -; VI-NEXT: v_or_b32_sdwa v21, v21, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v52, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v52 +; VI-NEXT: v_or_b32_sdwa v13, v13, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v44 +; VI-NEXT: v_or_b32_sdwa v28, v28, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13 -; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26 +; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51 -; VI-NEXT: v_or_b32_sdwa v12, v12, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v59 -; VI-NEXT: v_or_b32_sdwa v25, v25, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v54, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v54, vcc, 0x300, v54 +; VI-NEXT: v_or_b32_sdwa v12, v12, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12 -; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v33, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v50, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v40, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 -; VI-NEXT: v_or_b32_sdwa v30, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 -; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v2 +; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v53, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v9, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v41 -; VI-NEXT: v_or_b32_sdwa v9, v9, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v10 +; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42 +; VI-NEXT: v_or_b32_sdwa v9, v9, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v10 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x300, v55 -; VI-NEXT: v_or_b32_sdwa v10, v39, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v53 -; VI-NEXT: v_or_b32_sdwa v27, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v28, v29, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v29, v30, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v49, v16, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v10, v53, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v53, vcc, 0x300, v40 +; VI-NEXT: v_or_b32_sdwa v27, v27, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27 -; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28 -; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v8, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42 -; VI-NEXT: v_or_b32_sdwa v8, v8, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v11 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v40 -; VI-NEXT: v_or_b32_sdwa v11, v33, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v30, v31, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v17, v17, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v43, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v43 +; VI-NEXT: v_or_b32_sdwa v8, v8, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v11 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v41 +; VI-NEXT: v_or_b32_sdwa v17, v17, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v11, v50, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17 +; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v49 +; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v0 +; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v30, v30, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8 ; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v30 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v7, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v44, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v44, vcc, 0x300, v44 -; VI-NEXT: v_or_b32_sdwa v7, v7, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45 +; VI-NEXT: v_or_b32_sdwa v7, v7, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v6, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45 -; VI-NEXT: v_or_b32_sdwa v6, v6, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46 +; VI-NEXT: v_or_b32_sdwa v6, v6, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v5, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46 -; VI-NEXT: v_or_b32_sdwa v5, v5, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v47, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v47, vcc, 0x300, v47 +; VI-NEXT: v_or_b32_sdwa v5, v5, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4 -; VI-NEXT: v_or_b32_sdwa v4, v47, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v32 -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v4, v56, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x300, v4 ; VI-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v4 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v47, v32, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_e32 v47, s4, v47 +; VI-NEXT: v_add_u32_e32 v56, vcc, 3, v56 +; VI-NEXT: v_or_b32_sdwa v56, v57, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_e32 v56, s4, v56 ; VI-NEXT: s_and_b32 s4, s26, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s24, 0xff @@ -151670,35 +153662,26 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: s_or_b32 s8, s9, s8 ; VI-NEXT: s_and_b32 s9, s16, 0xff ; VI-NEXT: s_or_b32 s9, s10, s9 -; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v56 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_addk_i32 s7, 0x300 ; VI-NEXT: s_addk_i32 s9, 0x300 -; VI-NEXT: v_or_b32_sdwa v15, v15, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v32, v16, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: s_lshl_b32 s4, s4, 16 ; VI-NEXT: s_lshl_b32 s6, s6, 16 ; VI-NEXT: s_lshl_b32 s8, s8, 16 ; VI-NEXT: s_and_b32 s9, s9, 0xffff ; VI-NEXT: s_and_b32 s7, s7, 0xffff ; VI-NEXT: s_and_b32 s5, s5, 0xffff -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17 -; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v32 -; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v0 ; VI-NEXT: s_or_b32 s8, s8, s9 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_or_b32 s4, s4, s5 ; VI-NEXT: s_add_i32 s8, s8, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 -; VI-NEXT: v_or_b32_sdwa v31, v31, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v47 -; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15 +; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v56 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_mov_b32_e32 v2, s4 -; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31 -; VI-NEXT: .LBB89_3: ; %end +; VI-NEXT: .LBB89_5: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload @@ -151717,39 +153700,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] -; VI-NEXT: .LBB89_4: -; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v61, v60 -; VI-NEXT: v_mov_b32_e32 v60, v59 -; VI-NEXT: v_mov_b32_e32 v45, v62 -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v57, v5 -; VI-NEXT: v_mov_b32_e32 v47, v4 -; VI-NEXT: v_mov_b32_e32 v63, v3 -; VI-NEXT: v_mov_b32_e32 v53, v28 -; VI-NEXT: v_mov_b32_e32 v43, v27 -; VI-NEXT: v_mov_b32_e32 v55, v26 -; VI-NEXT: v_mov_b32_e32 v41, v24 -; VI-NEXT: v_mov_b32_e32 v54, v22 -; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; VI-NEXT: s_branch .LBB89_2 ; ; GFX9-LABEL: bitcast_v128i8_to_v64bf16_scalar: ; GFX9: ; %bb.0: @@ -151770,31 +153720,36 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 ; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 ; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:104 ; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 ; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 @@ -151804,133 +153759,129 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 ; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 ; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 -; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v17 -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; GFX9-NEXT: s_waitcnt vmcnt(24) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v44, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v43, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v29 ; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v47 +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v56 +; GFX9-NEXT: s_waitcnt vmcnt(21) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v45 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v44 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v28 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v30 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v46 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 ; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 @@ -151938,148 +153889,149 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(5) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5 ; GFX9-NEXT: s_waitcnt vmcnt(7) ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v1 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:164 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v1 +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:108 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:244 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:124 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:132 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220 +; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244 ; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260 +; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268 +; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300 ; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(30) -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(33) -; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(36) -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(39) -; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(31) +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(35) +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(35) +; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB89_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false ; GFX9-NEXT: s_and_b32 s4, s28, 0xff @@ -152087,19 +154039,12 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff ; GFX9-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v2, v0, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX9-NEXT: v_or_b32_sdwa v2, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v4, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v6, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v8, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -152121,272 +154066,291 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 ; GFX9-NEXT: s_or_b32 s7, s7, s8 ; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v57, v5 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v2, v34, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v34, v35 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v39, v16 -; GFX9-NEXT: v_or_b32_sdwa v17, v34, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v36, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v46, v32 +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v42, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v55, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v17, v45, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v45, v59 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v53, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v52, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v50, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v49, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v48, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0 ; GFX9-NEXT: v_lshl_or_b32 v17, v17, 16, v1 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v55, v22 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v33, v45 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v18, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v47, v32 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v19, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v20, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v21, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v51, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v49, v39 +; GFX9-NEXT: v_mov_b32_e32 v59, v44 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v34, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v23, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_mov_b32_e32 v46, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v58, v50 +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v24, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v35, v45 -; GFX9-NEXT: v_mov_b32_e32 v45, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v42 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v54, v63 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v25, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v54, v2 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v29, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v29, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v27, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v36, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v29, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_mov_b32_e32 v57, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 -; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB89_3 ; GFX9-NEXT: .LBB89_2: -; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v33, v45 -; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v58, v50 +; GFX9-NEXT: v_mov_b32_e32 v45, v59 +; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v34, v35 +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v49, v39 +; GFX9-NEXT: v_mov_b32_e32 v55, v22 +; GFX9-NEXT: v_mov_b32_e32 v51, v5 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 +; GFX9-NEXT: v_mov_b32_e32 v46, v32 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB89_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB89_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_addk_i32 s4, 0x300 +; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s24, s24, 3 ; GFX9-NEXT: s_lshl_b32 s5, s25, 8 ; GFX9-NEXT: s_add_i32 s26, s26, 3 @@ -152399,61 +154363,55 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: s_lshl_b32 s9, s17, 8 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_lshl_b32 s10, s19, 8 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(15) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(14) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 +; GFX9-NEXT: s_waitcnt vmcnt(12) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(12) ; GFX9-NEXT: v_add_u32_e32 v25, 3, v25 -; GFX9-NEXT: s_waitcnt vmcnt(11) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v25, v37, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v37, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v25, v26, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v38, v38, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v16, 3, v16 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_add_u32_e32 v23, 3, v23 +; GFX9-NEXT: s_waitcnt vmcnt(8) +; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16 +; GFX9-NEXT: v_or_b32_sdwa v23, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 ; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: v_and_b32_e32 v3, s4, v3 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_and_b32 s4, s24, 0xff ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_and_b32 s5, s26, 0xff @@ -152465,8 +154423,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: s_and_b32 s8, s16, 0xff ; GFX9-NEXT: s_or_b32 s8, s9, s8 ; GFX9-NEXT: s_and_b32 s9, s18, 0xff -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_or_b32 s9, s10, s9 ; GFX9-NEXT: s_addk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s5, 0x300 @@ -152483,14 +154439,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -152498,9 +154454,9 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) @@ -152510,254 +154466,277 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v37, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37 +; GFX9-NEXT: s_waitcnt vmcnt(4) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v38, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v39, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v39, v36, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v48, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v18, 3, v18 +; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18 +; GFX9-NEXT: s_waitcnt vmcnt(4) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v48, v46, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v49, v35, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v16, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v47, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v51, v34, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v16, 3, v16 -; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16 ; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0 +; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31 +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 +; GFX9-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v49, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30 +; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v19, 3, v19 +; GFX9-NEXT: v_add_u32_e32 v26, 3, v58 +; GFX9-NEXT: v_or_b32_sdwa v19, v51, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19 +; GFX9-NEXT: v_and_b32_e32 v29, 0xffff, v29 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v51, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v45 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v24, 3, v24 -; GFX9-NEXT: v_add_u32_e32 v26, 3, v61 -; GFX9-NEXT: v_or_b32_sdwa v24, v54, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24 -; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48 -; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51 -; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v45 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v34 ; GFX9-NEXT: v_add_u32_e32 v20, 3, v20 -; GFX9-NEXT: v_or_b32_sdwa v20, v57, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v20, v35, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20 +; GFX9-NEXT: v_lshl_or_b32 v29, v34, 16, v29 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v56 +; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v21, 3, v21 -; GFX9-NEXT: v_or_b32_sdwa v21, v32, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v26, 3, v60 +; GFX9-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40 ; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21 ; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54 +; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20 ; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v23, 3, v23 -; GFX9-NEXT: v_add_u32_e32 v26, 3, v47 -; GFX9-NEXT: v_or_b32_sdwa v23, v41, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40 -; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v43 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v46 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v22, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v22, v44, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v22, v36, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v35, 0x300, v22 ; GFX9-NEXT: v_add_u32_e32 v22, 0x300, v52 -; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41 ; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20 ; GFX9-NEXT: v_lshl_or_b32 v28, v35, 16, v28 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v43 +; GFX9-NEXT: v_add_u32_e32 v24, 3, v24 +; GFX9-NEXT: v_or_b32_sdwa v24, v57, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42 +; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24 +; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48 +; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51 +; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41 +; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v43, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v52, 0x300, v43 -; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; GFX9-NEXT: v_mov_b32_e32 v0, s8 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v19 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v44, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v33 -; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1 -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44 +; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v45, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v27, 0x300, v23 ; GFX9-NEXT: v_add_u32_e32 v26, 0x300, v25 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2 ; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v38 ; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v50 ; GFX9-NEXT: v_add_u32_e32 v38, 0x300, v39 @@ -152769,33 +154748,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a ; GFX9-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; GFX9-NEXT: v_and_b32_e32 v26, 0xffff, v26 ; GFX9-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; GFX9-NEXT: v_mov_b32_e32 v2, s4 ; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v21 ; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v22 -; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23 ; GFX9-NEXT: v_lshl_or_b32 v24, v39, 16, v24 +; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v18 +; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23 ; GFX9-NEXT: v_lshl_or_b32 v25, v38, 16, v25 ; GFX9-NEXT: v_lshl_or_b32 v26, v37, 16, v26 ; GFX9-NEXT: v_lshl_or_b32 v27, v36, 16, v27 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v18, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18 -; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44 -; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v18 -; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v19, 3, v19 -; GFX9-NEXT: v_or_b32_sdwa v19, v60, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19 -; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42 -; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; GFX9-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v19 -; GFX9-NEXT: v_lshl_or_b32 v29, v34, 16, v29 ; GFX9-NEXT: .LBB89_5: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload @@ -158082,16 +160042,15 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v46, v15 -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; GFX9-NEXT: ; implicit-def: $vgpr33 -; GFX9-NEXT: ; kill: killed $vgpr33 -; GFX9-NEXT: ; implicit-def: $vgpr33 -; GFX9-NEXT: ; kill: killed $vgpr33 -; GFX9-NEXT: ; implicit-def: $vgpr33 -; GFX9-NEXT: ; kill: killed $vgpr33 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:4 +; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 @@ -158124,50 +160083,69 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: v_mov_b32_e32 v46, v0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr59 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr59 ; GFX9-NEXT: ; implicit-def: $vgpr59 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr59 ; GFX9-NEXT: ; implicit-def: $vgpr59 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; kill: killed $vgpr59 ; GFX9-NEXT: ; implicit-def: $vgpr59 ; GFX9-NEXT: ; kill: killed $vgpr58 ; GFX9-NEXT: ; implicit-def: $vgpr58 -; GFX9-NEXT: v_mov_b32_e32 v47, v16 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr36 @@ -158180,21 +160158,21 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: ; implicit-def: $vgpr51 ; GFX9-NEXT: ; implicit-def: $vgpr53 ; GFX9-NEXT: ; implicit-def: $vgpr48 -; GFX9-NEXT: ; implicit-def: $vgpr49 ; GFX9-NEXT: ; implicit-def: $vgpr52 -; GFX9-NEXT: ; implicit-def: $vgpr16 +; GFX9-NEXT: ; implicit-def: $vgpr32 ; GFX9-NEXT: ; implicit-def: $vgpr42 ; GFX9-NEXT: ; implicit-def: $vgpr39 -; GFX9-NEXT: ; implicit-def: $vgpr45 +; GFX9-NEXT: ; kill: killed $vgpr0 +; GFX9-NEXT: ; implicit-def: $vgpr0 ; GFX9-NEXT: ; implicit-def: $vgpr40 ; GFX9-NEXT: ; implicit-def: $vgpr54 -; GFX9-NEXT: ; implicit-def: $vgpr62 +; GFX9-NEXT: ; implicit-def: $vgpr43 ; GFX9-NEXT: ; implicit-def: $vgpr61 ; GFX9-NEXT: ; implicit-def: $vgpr60 ; GFX9-NEXT: ; implicit-def: $vgpr35 -; GFX9-NEXT: ; implicit-def: $vgpr44 +; GFX9-NEXT: ; implicit-def: $vgpr45 ; GFX9-NEXT: ; implicit-def: $vgpr34 -; GFX9-NEXT: ; implicit-def: $vgpr43 +; GFX9-NEXT: ; implicit-def: $vgpr47 ; GFX9-NEXT: ; kill: killed $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr33 ; GFX9-NEXT: ; implicit-def: $vgpr55 @@ -158204,11 +160182,15 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: ; implicit-def: $vgpr37 ; GFX9-NEXT: ; implicit-def: $vgpr57 ; GFX9-NEXT: ; implicit-def: $vgpr36 -; GFX9-NEXT: ; implicit-def: $vgpr63 +; GFX9-NEXT: ; implicit-def: $vgpr49 ; GFX9-NEXT: ; kill: killed $vgpr59 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; GFX9-NEXT: ; implicit-def: $vgpr58 ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 @@ -158218,6 +160200,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; GFX9-NEXT: ; implicit-def: $vgpr58 ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill @@ -158230,21 +160216,21 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; GFX9-NEXT: ; implicit-def: $vgpr58 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-NEXT: ; implicit-def: $vgpr58 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; GFX9-NEXT: ; implicit-def: $vgpr58 ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; GFX9-NEXT: ; implicit-def: $vgpr58 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; GFX9-NEXT: ; implicit-def: $vgpr58 +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; GFX9-NEXT: ; implicit-def: $vgpr58 ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 @@ -158261,555 +160247,551 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-NEXT: ; implicit-def: $vgpr58 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-NEXT: ; implicit-def: $vgpr58 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(33) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; GFX9-NEXT: ; implicit-def: $vgpr15 -; GFX9-NEXT: ; kill: killed $vgpr15 -; GFX9-NEXT: ; implicit-def: $vgpr15 -; GFX9-NEXT: ; kill: killed $vgpr15 -; GFX9-NEXT: ; implicit-def: $vgpr15 -; GFX9-NEXT: ; kill: killed $vgpr15 -; GFX9-NEXT: ; implicit-def: $vgpr15 -; GFX9-NEXT: ; kill: killed $vgpr15 -; GFX9-NEXT: ; implicit-def: $vgpr15 -; GFX9-NEXT: ; kill: killed $vgpr15 -; GFX9-NEXT: ; implicit-def: $vgpr15 -; GFX9-NEXT: ; kill: killed $vgpr15 -; GFX9-NEXT: ; implicit-def: $vgpr15 +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; GFX9-NEXT: ; implicit-def: $vgpr31 ; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc ; GFX9-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; GFX9-NEXT: s_cbranch_execz .LBB90_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false +; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v4 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 24, v8 +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 24, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v18 +; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v20 +; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v11 +; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v10 +; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v8 +; GFX9-NEXT: v_lshrrev_b32_e32 v42, 8, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v53, 24, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v51, 8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v41, 8, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v49, 8, v24 +; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v23 +; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v23 +; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v22 +; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v22 +; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v21 +; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v21 +; GFX9-NEXT: v_lshrrev_b32_e32 v47, 8, v20 +; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v19 +; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v18 +; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v18 +; GFX9-NEXT: v_lshrrev_b32_e32 v61, 16, v17 +; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v17 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v16 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v16 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v16 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v15 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v15 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v14 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v4 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v32 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v32 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v32 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v47 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(38) -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v31 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v47 +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v3 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v2 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v31 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v47 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v30 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v46 +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v63 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v63 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v63 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v62 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v62 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v30 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v30 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v46 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v30 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v14 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v29 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v29 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v14 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v28 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v28 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v13 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v14 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v28 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v12 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v14 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v27 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v12 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v13 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v27 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v12 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v13 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v26 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v11 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v12 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v10 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v12 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v26 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v10 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v12 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v9 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v11 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v25 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v8 -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 24, v6 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v10 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v24 -; GFX9-NEXT: v_lshrrev_b32_e32 v58, 16, v18 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v8 -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v6 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v10 +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v8 +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v6 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[46:47] -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v7 -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v5 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[15:16] +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v9 +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v5 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v22 -; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v20 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[13:14] -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[11:12] +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[9:10] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[13:14] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[7:8] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[11:12] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[5:6] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[9:10] +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[7:8] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[3:4] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[5:6] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[1:2] -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[31:32] -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[3:4] +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[29:30] -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[1:2] +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[27:28] +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[62:63] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[25:26] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[29:30] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[23:24] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[27:28] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[21:22] +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[25:26] ; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[23:24] +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[21:22] +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[19:20] -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b64 v[58:59], 24, v[17:18] -; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v11 -; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v10 -; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v9 -; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v8 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v7 -; GFX9-NEXT: v_lshrrev_b32_e32 v42, 8, v6 -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 8, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v53, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v51, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v41, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v63, 8, v24 -; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v23 -; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v23 -; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v22 -; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v22 -; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v21 -; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v21 +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v9 +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 8, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 8, v5 ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v20 -; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v20 ; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v19 -; GFX9-NEXT: v_lshrrev_b32_e32 v44, 8, v19 -; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v18 -; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v18 -; GFX9-NEXT: v_lshrrev_b32_e32 v61, 16, v17 -; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v17 -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; GFX9-NEXT: .LBB90_2: ; %Flow -; GFX9-NEXT: s_or_saveexec_b64 s[4:5], s[4:5] -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: s_xor_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; GFX9-NEXT: s_cbranch_execz .LBB90_4 ; GFX9-NEXT: ; %bb.3: ; %cmp.true -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v18 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v18 +; GFX9-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 +; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1 ; GFX9-NEXT: s_movk_i32 s6, 0x7fff -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v18 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v33, v16, v33, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v18, vcc +; GFX9-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX9-NEXT: v_add3_u32 v32, v32, v31, s6 +; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v31 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX9-NEXT: v_bfe_u32 v31, v18, 16, 1 +; GFX9-NEXT: v_cndmask_b32_e32 v47, v32, v33, vcc +; GFX9-NEXT: v_add3_u32 v31, v31, v18, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v18 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17 +; GFX9-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v56, v31, v32, vcc +; GFX9-NEXT: v_bfe_u32 v31, v18, 16, 1 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_add3_u32 v31, v31, v18, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v18 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_cndmask_b32_e32 v57, v31, v32, vcc +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 ; GFX9-NEXT: s_mov_b32 s7, 0x7060302 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; GFX9-NEXT: v_perm_b32 v34, v15, v33, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v17 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v17 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v18, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; GFX9-NEXT: v_perm_b32 v33, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v20 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v20 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v31, vcc +; GFX9-NEXT: v_perm_b32 v13, v17, v57, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v20 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v20 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v31, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_perm_b32 v14, v56, v47, s7 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v20, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v19 +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; GFX9-NEXT: v_perm_b32 v18, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v19 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v19 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v20, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v17, v15, v20, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v22 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v18, v20, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v20, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v19 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v32, v13, v0, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v20, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: v_perm_b32 v31, v17, v0, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v22 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v22 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v21 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; GFX9-NEXT: v_cndmask_b32_e32 v13, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_mov_b32_e32 v59, v32 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v21 +; GFX9-NEXT: v_mov_b32_e32 v58, v31 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v14, v13, v0, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v13, v17, v0, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v24 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v24 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v23 +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v22 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; GFX9-NEXT: v_perm_b32 v18, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v21 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v21 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v61, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v17, v15, v61, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v24 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v23 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v14, v13, v0, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v13, v17, v0, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v26 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v26 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v25 +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v24 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: v_perm_b32 v59, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v23 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v23 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v58, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v26 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v26 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; GFX9-NEXT: v_perm_b32 v63, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v25 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v25 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-NEXT: v_cndmask_b32_e32 v18, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v62, v15, v18, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v28 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v28 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v60, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v27 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v28, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v27 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v26, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v56, v15, v26, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v30 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v30 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v27, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v29 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v30, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v29 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v25, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v33, v15, v25, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v32 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v32 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v29, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: s_waitcnt vmcnt(51) -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v31 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v32, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v31 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v24, v16, v17, vcc -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc -; GFX9-NEXT: v_perm_b32 v35, v15, v24, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v2 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v25 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v14, v13, v0, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; GFX9-NEXT: v_perm_b32 v13, v17, v0, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v28 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v28 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; GFX9-NEXT: v_cndmask_b32_e32 v0, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v27 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v28, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v27 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v25, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: v_perm_b32 v60, v17, v25, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v30 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v30 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v27, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v29 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v30, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v29 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v24, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: v_perm_b32 v33, v17, v24, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v63 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v63 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v29, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: s_waitcnt vmcnt(52) +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v62 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v44, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v62 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v23, v18, v19, vcc +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_cndmask_b32_e32 v17, v18, v19, vcc +; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; GFX9-NEXT: v_perm_b32 v35, v17, v23, s7 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2 +; GFX9-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX9-NEXT: v_bfe_u32 v18, v17, 16, 1 ; GFX9-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_bfe_u32 v15, v2, 16, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v31, v16, v17, vcc -; GFX9-NEXT: v_add3_u32 v15, v15, v2, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v2 +; GFX9-NEXT: v_add3_u32 v18, v18, v17, s6 +; GFX9-NEXT: v_or_b32_e32 v19, 0x400000, v17 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX9-NEXT: v_bfe_u32 v17, v2, 16, 1 +; GFX9-NEXT: v_cndmask_b32_e32 v43, v18, v19, vcc +; GFX9-NEXT: v_add3_u32 v17, v17, v2, s6 +; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v2 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX9-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v22, v15, v16, vcc -; GFX9-NEXT: v_bfe_u32 v15, v2, 16, 1 +; GFX9-NEXT: v_cndmask_b32_e32 v21, v17, v18, vcc +; GFX9-NEXT: v_bfe_u32 v17, v2, 16, 1 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_add3_u32 v15, v15, v2, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v2 +; GFX9-NEXT: v_add3_u32 v17, v17, v2, s6 +; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v2 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v23, v15, v16, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v22, v17, v18, vcc ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v15, 0x400000, v1 +; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v15, vcc -; GFX9-NEXT: v_perm_b32 v37, v1, v23, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v17, vcc +; GFX9-NEXT: v_perm_b32 v37, v1, v22, s7 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v4 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v15, 0x400000, v1 +; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v21, v2, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v20, v2, v17, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 ; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v3 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v19, v2, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v18, v2, v4, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 ; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v3 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v20, v2, v4, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v19, v2, v4, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 ; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc -; GFX9-NEXT: v_perm_b32 v48, v1, v20, s7 +; GFX9-NEXT: v_perm_b32 v48, v1, v19, s7 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v6 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 @@ -158818,8 +160800,7 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v6 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; GFX9-NEXT: v_cndmask_b32_e32 v18, v2, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v17, v2, v3, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 ; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1 @@ -158833,13 +160814,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v5 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v17, v2, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v3, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 ; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc -; GFX9-NEXT: v_perm_b32 v50, v1, v17, s7 +; GFX9-NEXT: v_perm_b32 v50, v1, v4, s7 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v8 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 @@ -158862,13 +160843,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v7 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v3, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 ; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v7, vcc -; GFX9-NEXT: v_perm_b32 v52, v1, v4, s7 +; GFX9-NEXT: v_perm_b32 v52, v1, v3, s7 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v10 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 @@ -158887,312 +160868,319 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: v_cndmask_b32_e32 v10, v2, v10, vcc ; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v2, v2, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v15, 0x400000, v1 +; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v9 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX9-NEXT: v_bfe_u32 v9, v1, 16, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v2, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v31, vcc ; GFX9-NEXT: v_add3_u32 v9, v9, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v15, 0x400000, v1 +; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v15, vcc -; GFX9-NEXT: v_perm_b32 v39, v1, v3, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v31, vcc +; GFX9-NEXT: v_perm_b32 v39, v1, v2, s7 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v12 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX9-NEXT: v_bfe_u32 v9, v1, 16, 1 ; GFX9-NEXT: v_add3_u32 v9, v9, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v15, 0x400000, v1 +; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v12 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX9-NEXT: v_bfe_u32 v12, v1, 16, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v31, vcc ; GFX9-NEXT: v_add3_u32 v12, v12, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v15, 0x400000, v1 +; GFX9-NEXT: v_or_b32_e32 v31, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v11 ; GFX9-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v15, vcc -; GFX9-NEXT: v_bfe_u32 v15, v1, 16, 1 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v31, vcc +; GFX9-NEXT: v_bfe_u32 v31, v1, 16, 1 ; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 -; GFX9-NEXT: v_add3_u32 v15, v15, v1, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v1 +; GFX9-NEXT: v_add3_u32 v31, v31, v1, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v1 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX9-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v15, v16, vcc -; GFX9-NEXT: v_bfe_u32 v15, v11, 16, 1 -; GFX9-NEXT: v_add3_u32 v15, v15, v11, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v11 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v31, v32, vcc +; GFX9-NEXT: v_bfe_u32 v31, v11, 16, 1 +; GFX9-NEXT: v_add3_u32 v31, v31, v11, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v11 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 -; GFX9-NEXT: v_cndmask_b32_e32 v11, v15, v16, vcc -; GFX9-NEXT: v_perm_b32 v54, v11, v2, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v11, v31, v32, vcc +; GFX9-NEXT: v_perm_b32 v54, v11, v1, s7 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v14 ; GFX9-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 -; GFX9-NEXT: v_bfe_u32 v15, v11, 16, 1 +; GFX9-NEXT: v_bfe_u32 v31, v11, 16, 1 ; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; GFX9-NEXT: v_add3_u32 v15, v15, v11, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v11 +; GFX9-NEXT: v_add3_u32 v31, v31, v11, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v11 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 ; GFX9-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; GFX9-NEXT: v_cndmask_b32_e32 v11, v15, v16, vcc -; GFX9-NEXT: v_bfe_u32 v15, v14, 16, 1 -; GFX9-NEXT: v_add3_u32 v15, v15, v14, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v14 +; GFX9-NEXT: v_cndmask_b32_e32 v11, v31, v32, vcc +; GFX9-NEXT: v_bfe_u32 v31, v14, 16, 1 +; GFX9-NEXT: v_add3_u32 v31, v31, v14, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v14 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 -; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v13 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v31, v32, vcc +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v13 +; GFX9-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 ; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1 ; GFX9-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v41, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_bfe_u32 v15, v13, 16, 1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v16, v41, vcc -; GFX9-NEXT: v_add3_u32 v15, v15, v13, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v13 +; GFX9-NEXT: v_add3_u32 v32, v32, v31, s6 +; GFX9-NEXT: v_or_b32_e32 v41, 0x400000, v31 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX9-NEXT: v_bfe_u32 v31, v13, 16, 1 +; GFX9-NEXT: v_perm_b32 v61, v28, v0, s7 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v32, v41, vcc +; GFX9-NEXT: v_add3_u32 v31, v31, v13, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v13 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 -; GFX9-NEXT: v_cndmask_b32_e32 v13, v15, v16, vcc -; GFX9-NEXT: v_perm_b32 v41, v13, v1, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v47 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v31, v32, vcc +; GFX9-NEXT: v_perm_b32 v41, v13, v0, s7 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v16 ; GFX9-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; GFX9-NEXT: v_bfe_u32 v15, v13, 16, 1 -; GFX9-NEXT: v_add3_u32 v15, v15, v13, s6 -; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v13 +; GFX9-NEXT: v_bfe_u32 v31, v13, 16, 1 +; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX9-NEXT: v_add3_u32 v31, v31, v13, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v13 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 -; GFX9-NEXT: v_cndmask_b32_e32 v13, v15, v16, vcc -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v47 +; GFX9-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v13, v31, v32, vcc +; GFX9-NEXT: v_bfe_u32 v31, v16, 16, 1 +; GFX9-NEXT: v_add3_u32 v31, v31, v16, s6 +; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v16 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v16, v16 +; GFX9-NEXT: v_cndmask_b32_e32 v16, v31, v32, vcc +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX9-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 +; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX9-NEXT: v_bfe_u32 v26, v31, 16, 1 ; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v16, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v16, v16, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v44, 0x400000, v15 +; GFX9-NEXT: v_add3_u32 v26, v26, v31, s6 +; GFX9-NEXT: v_or_b32_e32 v45, 0x400000, v31 +; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX9-NEXT: v_bfe_u32 v31, v15, 16, 1 +; GFX9-NEXT: v_cndmask_b32_e32 v26, v26, v45, vcc +; GFX9-NEXT: v_add3_u32 v31, v31, v15, s6 +; GFX9-NEXT: v_or_b32_e32 v45, 0x400000, v15 ; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v44, v16, v44, vcc -; GFX9-NEXT: v_perm_b32 v16, v44, v13, s7 -; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v13 +; GFX9-NEXT: v_cndmask_b32_e32 v15, v31, v45, vcc +; GFX9-NEXT: v_perm_b32 v31, v15, v26, s7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v16 +; GFX9-NEXT: v_perm_b32 v32, v16, v13, s7 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v13 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v26 ; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v14 -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v12 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v10 -; GFX9-NEXT: v_perm_b32 v53, v8, v5, s7 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v8 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v17 -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v32 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v30 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v28 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; GFX9-NEXT: v_perm_b32 v34, v30, v27, s7 -; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v27 -; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; GFX9-NEXT: v_perm_b32 v36, v32, v29, s7 -; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v29 -; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v25 -; GFX9-NEXT: v_perm_b32 v38, v22, v31, s7 ; GFX9-NEXT: v_perm_b32 v42, v14, v11, s7 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v11 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v31 -; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v24 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v19 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v12 +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v10 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v8 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v6 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v18 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v21 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v44 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v30 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v28 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; GFX9-NEXT: v_perm_b32 v34, v30, v27, s7 +; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v27 +; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v25 +; GFX9-NEXT: v_perm_b32 v36, v44, v29, s7 +; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v29 +; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v24 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v23 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v22 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v23 -; GFX9-NEXT: v_perm_b32 v55, v12, v9, s7 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v9 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v20 -; GFX9-NEXT: v_perm_b32 v49, v19, v21, s7 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v46 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_bfe_u32 v45, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v45, v45, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v43, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v46 -; GFX9-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v43, v45, v43, vcc -; GFX9-NEXT: v_bfe_u32 v45, v15, 16, 1 -; GFX9-NEXT: v_add3_u32 v45, v45, v15, s6 -; GFX9-NEXT: v_or_b32_e32 v46, 0x400000, v15 -; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v15, v45, v46, vcc -; GFX9-NEXT: v_perm_b32 v15, v15, v43, s7 -; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v44 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v43 -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[15:16] -; GFX9-NEXT: v_perm_b32 v51, v6, v18, s7 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v18 +; GFX9-NEXT: v_perm_b32 v38, v21, v43, s7 +; GFX9-NEXT: v_perm_b32 v49, v18, v20, s7 +; GFX9-NEXT: v_perm_b32 v53, v8, v5, s7 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v20 +; GFX9-NEXT: v_perm_b32 v51, v6, v17, s7 ; GFX9-NEXT: v_perm_b32 v40, v10, v7, s7 -; GFX9-NEXT: v_perm_b32 v57, v28, v60, s7 ; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v21 -; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v60 -; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v61 -; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v39 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v17 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v19 +; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v57 +; GFX9-NEXT: v_perm_b32 v55, v12, v9, s7 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v9 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v43 +; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v47 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v17 -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[41:42] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[54:55] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[39:40] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[52:53] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v56 +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[31:32] +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[50:51] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[41:42] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[48:49] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[54:55] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[37:38] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[39:40] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[35:36] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[52:53] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[33:34] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[50:51] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[56:57] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[48:49] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[62:63] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[37:38] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[58:59] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[35:36] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[60:61] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[33:34] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[60:61] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v32 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[43:44] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[13:14] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b64 v[43:44], 24, v[43:44] -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[62:63] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v43, 24, v16 -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 8, v16 -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v15 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v15 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v42 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v42 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v41 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v41 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v55 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v55 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v54 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v40 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v39 -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v53 +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[43:44] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[58:59] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b64 v[56:57], 24, v[58:59] +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v32 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v31 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v31 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v42 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v42 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v41 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v41 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v55 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v55 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v54 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v40 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v39 +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v39 +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 24, v53 ; GFX9-NEXT: v_lshrrev_b32_e32 v39, 8, v53 -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 24, v51 +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 24, v51 ; GFX9-NEXT: v_lshrrev_b32_e32 v42, 8, v51 ; GFX9-NEXT: v_lshrrev_b32_e32 v53, 24, v38 ; GFX9-NEXT: v_lshrrev_b32_e32 v51, 8, v38 @@ -159200,439 +161188,449 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX9-NEXT: v_lshrrev_b32_e32 v41, 8, v37 ; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v36 ; GFX9-NEXT: v_lshrrev_b32_e32 v36, 8, v36 -; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v35 ; GFX9-NEXT: v_lshrrev_b32_e32 v35, 8, v35 -; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v34 ; GFX9-NEXT: v_lshrrev_b32_e32 v34, 8, v34 -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v33 ; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v33 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v57 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v57 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v56 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v56 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v61 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v63 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v63 +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v61 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v60 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v60 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v62 -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v50 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v62 -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v52 -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 8, v50 -; GFX9-NEXT: v_lshrrev_b32_e32 v50, 24, v49 -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v59 -; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v14 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v14 +; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v13 +; GFX9-NEXT: v_mov_b32_e32 v34, v62 +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v52 +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v50 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshrrev_b32_e32 v63, 8, v59 -; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v61 -; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v61 -; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v60 -; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v60 -; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v54 -; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v40 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v52 +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 8, v13 +; GFX9-NEXT: v_mov_b32_e32 v35, v63 +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v31, 8, v52 +; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v32, 8, v50 +; GFX9-NEXT: v_lshrrev_b32_e32 v50, 24, v49 ; GFX9-NEXT: v_lshrrev_b32_e32 v52, 8, v49 ; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v48 +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v35 +; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v63, v16 +; GFX9-NEXT: v_lshrrev_b32_e32 v54, 8, v54 +; GFX9-NEXT: v_lshrrev_b32_e32 v40, 8, v40 ; GFX9-NEXT: v_lshrrev_b32_e32 v48, 8, v48 -; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v58 -; GFX9-NEXT: v_lshrrev_b32_e32 v58, 8, v58 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v61 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v62 -; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v62 -; GFX9-NEXT: v_lshrrev_b32_e32 v44, 8, v61 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v60 -; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v60 -; GFX9-NEXT: v_lshrrev_b32_e32 v61, 16, v59 -; GFX9-NEXT: v_lshrrev_b32_e32 v62, 8, v59 +; GFX9-NEXT: v_mov_b32_e32 v62, v15 +; GFX9-NEXT: v_lshrrev_b32_e32 v49, 8, v35 +; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v34 +; GFX9-NEXT: v_lshrrev_b32_e32 v57, 8, v34 +; GFX9-NEXT: v_lshrrev_b32_e32 v37, 24, v44 +; GFX9-NEXT: v_lshrrev_b32_e32 v50, 8, v44 +; GFX9-NEXT: v_lshrrev_b32_e32 v56, 16, v43 +; GFX9-NEXT: v_lshrrev_b32_e32 v55, 8, v43 +; GFX9-NEXT: v_lshrrev_b32_e32 v35, 24, v59 +; GFX9-NEXT: v_lshrrev_b32_e32 v43, 8, v58 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v60 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v33, 24, v61 +; GFX9-NEXT: v_lshrrev_b32_e32 v47, 8, v61 +; GFX9-NEXT: v_lshrrev_b32_e32 v45, 8, v60 +; GFX9-NEXT: v_lshrrev_b32_e32 v60, 8, v59 +; GFX9-NEXT: v_lshrrev_b32_e32 v61, 16, v58 ; GFX9-NEXT: .LBB90_4: ; %end ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5] -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v15 -; GFX9-NEXT: v_or_b32_sdwa v7, v7, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v39 -; GFX9-NEXT: v_or_b32_sdwa v8, v8, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v45 -; GFX9-NEXT: v_or_b32_sdwa v9, v9, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v40 -; GFX9-NEXT: v_or_b32_sdwa v10, v10, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v54 -; GFX9-NEXT: v_or_b32_sdwa v11, v11, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v16, 8, v16 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v9, 8, v40 +; GFX9-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v10, 8, v54 +; GFX9-NEXT: v_or_b32_sdwa v10, v11, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v32, 8, v32 +; GFX9-NEXT: v_lshlrev_b16_e32 v31, 8, v31 +; GFX9-NEXT: v_or_b32_sdwa v5, v5, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v32, 8, v42 +; GFX9-NEXT: v_or_b32_sdwa v7, v7, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v31, 8, v39 +; GFX9-NEXT: v_or_b32_sdwa v6, v6, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v8, v8, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v41, 8, v41 +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshlrev_b16_e32 v51, 8, v51 -; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v48 -; GFX9-NEXT: v_or_b32_sdwa v5, v5, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v16, 8, v42 ; GFX9-NEXT: v_or_b32_sdwa v2, v2, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v48 ; GFX9-NEXT: v_or_b32_sdwa v3, v3, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshlrev_b16_e32 v48, 8, v52 -; GFX9-NEXT: v_or_b32_sdwa v6, v6, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v39, 8, v62 -; GFX9-NEXT: v_or_b32_sdwa v17, v17, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v39, 8, v60 -; GFX9-NEXT: v_lshlrev_b16_e32 v41, 8, v41 -; GFX9-NEXT: v_or_b32_sdwa v18, v18, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v4, v4, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v15 -; GFX9-NEXT: v_or_b32_sdwa v12, v12, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshlrev_b16_e32 v11, 8, v11 +; GFX9-NEXT: v_or_b32_sdwa v11, v12, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_lshlrev_b16_e32 v12, 8, v12 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v16, 8, v16 -; GFX9-NEXT: v_or_b32_sdwa v16, v47, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v12, v13, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v15 -; GFX9-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v13, 8, v13 +; GFX9-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_lshlrev_b16_e32 v14, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v14, v15, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v15 -; GFX9-NEXT: v_or_b32_sdwa v14, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v39, 8, v51 -; GFX9-NEXT: v_or_b32_sdwa v38, v38, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v15, v16, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v16, 8, v43 +; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v17, 8, v60 +; GFX9-NEXT: v_or_b32_sdwa v17, v18, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_lshlrev_b16_e32 v18, 8, v31 +; GFX9-NEXT: v_or_b32_sdwa v18, v38, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen +; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v53 -; GFX9-NEXT: v_lshlrev_b16_e32 v15, 8, v15 -; GFX9-NEXT: v_or_b32_sdwa v15, v46, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:4 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:4 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:8 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:8 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:12 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:12 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:16 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:16 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:20 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:20 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:24 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:24 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:28 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_store_dword v1, v46, s[0:3], 0 offen offset:28 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:32 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:32 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:36 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:36 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:40 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:40 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:44 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v11, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:44 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:48 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:48 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:52 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v13, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:52 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:56 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:56 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:60 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v15, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:60 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:64 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v35 +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:64 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v35 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:68 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:68 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v44 -; GFX9-NEXT: v_or_b32_sdwa v2, v19, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:72 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v43 -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v33 -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v45 +; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:72 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v47 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v33 +; GFX9-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:76 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v55 -; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:76 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v55 +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v56, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:80 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v50 -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v37 -; GFX9-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:80 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v50 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v37 +; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:84 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v58 -; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:84 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v57 +; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v36, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:88 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v63 -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:88 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v49 +; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:92 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:92 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:96 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:96 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v26, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:100 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:100 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v27, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:104 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:104 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:108 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:108 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v29, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:112 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:112 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:116 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:116 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:120 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:120 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload ; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:124 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_store_dword v0, v46, s[0:3], 0 offen offset:124 ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload @@ -159656,177 +161654,175 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:244 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:240 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:136 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:120 -; GFX11-TRUE16-NEXT: s_clause 0x1a -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:236 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:136 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:112 +; GFX11-TRUE16-NEXT: s_clause 0x18 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:12 ; GFX11-TRUE16-NEXT: s_clause 0x2 ; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8 -; GFX11-TRUE16-NEXT: scratch_load_b32 v85, off, s32 offset:4 -; GFX11-TRUE16-NEXT: scratch_load_b32 v84, off, s32 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_lo16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v99, off, s32 offset:4 +; GFX11-TRUE16-NEXT: scratch_load_b32 v98, off, s32 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr140_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr123_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr107_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr75_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr61_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr154_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16 @@ -159840,135 +161836,135 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_2 ; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[15:16] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[11:12] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[7:8] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[5:6] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[27:28] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[13:14] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[11:12] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[9:10] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[7:8] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[3:4] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v7 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v5 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v3 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v2 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v1 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v85 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v85 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v99 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v84 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v17 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[13:14] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[9:10] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[3:4] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[1:2] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[84:85] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v98 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v30 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v17 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[5:6] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[1:2] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[98:99] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[29:30] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[23:24] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[21:22] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v1.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v1.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v2.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v2.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v2.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v43.h, v3.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v3.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v3.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v4.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v4.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v4.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v62.h, v5.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v56.h, v5.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v5.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v47.h, v6.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v42.h, v6.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v6.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v91.h, v7.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v79.h, v7.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v7.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v74.h, v8.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v60.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v111.h, v9.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.h, v9.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v89.h, v10.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v10.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v138.h, v11.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v108.h, v12.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.h, v12.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v153.h, v13.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v137.h, v14.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v154.h, v15.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.h, v15.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v152.h, v16.l -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v16.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v17.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v9.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v76.h, v10.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v10.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v127.h, v11.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v11.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v12.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v142.h, v13.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v13.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v14.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v143.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v15.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v141.h, v16.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v16.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v17.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v18.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v18.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v19.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v19.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v19.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v20.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v20.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v20.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v21.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v21.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v21.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v22.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v22.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v22.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v23.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v23.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v23.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v24.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v24.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v24.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v25.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v25.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v25.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v26.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v26.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v26.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v27.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v27.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v27.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v28.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v28.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v28.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v29.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v29.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v29.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v30.l +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v30.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v30.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v84.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v84.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v85.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v85.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v98.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v98.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v99.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v99.h ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5 @@ -159984,148 +161980,153 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99 ; GFX11-TRUE16-NEXT: .LBB90_2: ; %Flow ; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_4 ; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true ; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v17 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_lshlrev_b32 v31, 16, v18 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v18 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_add_f32 v31, 0x40c00000, v31 ; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v31, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v31 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v31, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v31, v38, v34, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v32, v37, vcc_lo -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_lshlrev_b32 v17, 16, v17 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v18, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_cndmask_b32 v32, v35, v36 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v17 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v17, 0x7fff -; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v32 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v32 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v32 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v37, v49, vcc_lo -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v135, v37, v49, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v31, v33, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v20, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v135.h ; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v36, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v148.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v149, v33, v35, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v33, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 -; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v19 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v22 +; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v31 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v17, v34, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v19, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v149.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v31 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v147.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v19, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v33, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v34 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v17, v33, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v20, 0xffff0000, v21 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v18, 0xffff0000, v22 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v21 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v34 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v34 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_lshlrev_b32 v22, 16, v22 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v22 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v151, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 -; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v149, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v24 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v21 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v17, v36, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v151.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v149.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v24 :: v_dual_lshlrev_b32 v21, 16, v23 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v23 +; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 -; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v23 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v36 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v19, v35, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 -; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff -; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v20, 0x40c00000, v20 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v148.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v36 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v36 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v36 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v161, v19, v23, vcc_lo -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v33 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v151, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff +; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v17, v24, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 -; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v151.h ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v25 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v150.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v161.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v23 :: v_dual_lshlrev_b32 v21, 16, v25 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -160138,10 +162139,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v33 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v161, v19, v23 :: v_dual_lshlrev_b32 v22, 16, v28 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -160154,10 +162153,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v27 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v160.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v163.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v161.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v150.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -160170,10 +162169,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v49 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v35 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v49 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v35 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 @@ -160186,10 +162185,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v29 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v165.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v38 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v38 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v163.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v38 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v38 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 @@ -160202,14 +162201,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v85 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v51 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v51 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v99 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v51 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v51 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v85 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v99 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v53, v17, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 @@ -160218,14 +162217,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v84 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v167.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v98 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v160.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v165.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v84 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v98 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_cndmask_b32 v52, v19, v24 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 @@ -160234,10 +162233,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v162.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v53 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v53 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v23, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v53 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v37 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v2 @@ -160250,11 +162249,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v177.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v37 +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v167.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v22, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v22, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v1 @@ -160267,11 +162265,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v20, 0x40c00000, v20 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v2, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v55 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v55 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v55 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v20, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v19, v21, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v21, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 @@ -160284,10 +162282,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 ; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v179.h -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v162.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v177.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v180, v17, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 @@ -160302,9 +162301,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v164.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v65 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v183, v2, v19, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v65 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v65 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v2, v19, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v17, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v3 @@ -160314,13 +162313,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v17, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v183.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v179.h ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v166.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[48:49] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v43, v1, v18, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v164.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v67 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v1, v18, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6 @@ -160331,13 +162330,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v43.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[50:51] ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[50:51] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v67 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v67 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v47, v2, v17, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[48:49] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v67 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v42, v2, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v8 @@ -160351,23 +162350,23 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v47.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v62, v2, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v50 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v56, v2, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v5, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v48 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v1, v17, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v2, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v56.h ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v74, v3, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v60, v3, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v10 @@ -160381,8 +162380,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v74.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v176.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v60.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v166.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v1, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 @@ -160390,21 +162389,20 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v91, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v79, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v91.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v89, v1, v4, vcc_lo -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v79.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v76, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v99, v2, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v2, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 @@ -160412,10 +162410,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v178.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.l, v89.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[82:83] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v98, v2, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.l, v76.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v176.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[82:83] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 @@ -160423,29 +162421,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v111, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v106, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.l, v111.h -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v108, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.l, v106.h +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v104, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[98:99] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[96:97] ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v115, v3, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v3, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.l, v108.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v99 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.l, v104.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v112, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v4 :: v_dual_add_f32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 @@ -160454,8 +162452,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v180.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v138, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v178.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v127, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 @@ -160463,19 +162461,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v131, v4, v5, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v129, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v137, v6, v7, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v125, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v114.l, v138.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v130, v3, v4, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v40.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v128, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 @@ -160483,11 +162481,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v62.h -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v137.h +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v125.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.l, v127.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v153, v4, v6, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v142, v4, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 @@ -160496,83 +162494,82 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v153.h -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v152, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v142.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v141, v2, v9, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[114:115] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[68:69] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v154, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v42.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[112:113] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v143, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[33:34] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[130:131] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[66:67] -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v145, v4, v8, vcc_lo +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[66:67] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[68:69] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[64:65] +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v134, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v152.h -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[64:65] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v131 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v144, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v154.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v145 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v145 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v131 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v130 -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[144:145] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[37:38] +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v141.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[33:34] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v129 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v129 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v133, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v143.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v134 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v134 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v128 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v113 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[133:134] +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[128:129] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[35:36] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v144 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v115 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v115 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v114 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v99 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v98 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v83 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v82 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v68 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v66 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v65 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v64 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v54 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v52 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v50 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v48 -; GFX11-TRUE16-NEXT: .LBB90_4: ; %end +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v133 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v113 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v112 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v97 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v97 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v96 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v83 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v83 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v82 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v68 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v66 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v64 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v54 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v52 +; GFX11-TRUE16-NEXT: .LBB90_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v180.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v143.l +; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v178.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v152.l ; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v64.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v146.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v145.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0 ; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v65.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.l, v1.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v141.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v139.l ; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v179.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v142.l +; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v177.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v140.l ; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v66.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v144.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v67.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v2.l, v2.h ; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v43.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v140.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v127.l +; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v40.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v138.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v136.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v131.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.l, v3.h ; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v183.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v139.l +; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v179.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v137.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v69.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v121.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v5, v3 @@ -160580,89 +162577,89 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v4.l, v4.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.l, v6.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v62.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v125.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v132.l +; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v56.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v130.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v83.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v4 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v105.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v107.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v6.l, v6.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v47.h +; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v42.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v123.l -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v98.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v96.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v117.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v5, v6 -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v97.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v91.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v110.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v90.l -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v114.h +; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v79.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v111.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v91.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v7 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v74.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v107.l -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v115.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v60.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v109.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v75.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v5, v8 -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v130.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v128.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v111.h +; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v106.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v95.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v131.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v129.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v5, v9 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v59.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v61.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v89.h +; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v76.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v93.l -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v144.h +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v133.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v5, v10 -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v145.h +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v134.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v138.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v79.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v44.l +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v127.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v89.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v45.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v31.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v5, v11 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v108.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v77.l +; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v104.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v78.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v32.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v120.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v5, v12 ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v153.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v72.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v142.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v34.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v5, v13 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v109.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v105.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v137.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v61.l +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v125.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v63.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v35.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v102.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v5, v14 @@ -160670,71 +162667,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v154.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v57.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v94.l +; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v143.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v90.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v37.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v5, v15 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v101.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v16.l, v16.h ; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v152.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v46.l +; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v141.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v47.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v38.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v78.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v74.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v5, v16 ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v48.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v17.l, v17.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v18.l, v18.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v148.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v136.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v135.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v124.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v49.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v5, v17 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v63.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v59.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.l, v18.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.l, v19.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v5.h ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v126.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v122.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v50.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v84.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v5, v18 ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v51.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v19.l, v19.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v150.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v122.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v56.l +; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v148.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v110.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v44.l ; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v52.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v5, v19 ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v80.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v20.l, v20.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v149.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v120.l +; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v147.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v108.l ; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v53.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v41.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v183.l ; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v5, v20 ; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v54.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.l, v21.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v22.l, v22.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v160.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v106.l +; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v150.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v94.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l ; GFX11-TRUE16-NEXT: v_and_b16 v34.l, 0xff, v55.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v5, v21 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v181.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v180.l ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v22.l, v22.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v23.l, v23.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v151.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v104.l +; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v149.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v92.l ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:16 @@ -160742,71 +162739,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v23.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.l, v24.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v162.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v92.l +; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v160.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v88.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v5, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v24.l, v24.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v25.l, v25.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v161.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v88.l +; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v151.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v77.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v5, v24 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v25.l, v25.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v26.l, v26.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v164.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v76.l +; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v162.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v72.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v5, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v26.l, v26.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v27.l, v27.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v163.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v73.l +; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v161.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v62.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v5, v26 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v27.l, v27.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v28.l, v28.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v166.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v60.l +; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v164.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v57.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v5, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v28.l, v28.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v29.l, v29.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v165.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v58.l +; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v163.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v46.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v5, v28 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.l, v29.h ; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v30.l, v30.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v176.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v45.l +; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v166.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v43.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v5, v29 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v30.l, v30.h ; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v31.l, v31.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v167.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v42.l +; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v165.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v41.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v5, v30 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v31.l, v31.h ; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v32.l, v32.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v178.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v40.l +; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v176.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v182.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v5, v31 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v32.l, v32.h ; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v33.l, v33.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h -; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v177.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v182.l +; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v167.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v181.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v5, v32 ; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v33.l, v33.h @@ -160822,66 +162819,64 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[26:29], off offset:96 ; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[30:33], off offset:112 ; GFX11-TRUE16-NEXT: s_clause 0x1f -; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:12 -; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:16 -; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:20 -; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:24 -; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:28 -; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:32 -; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:36 -; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:40 -; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:44 -; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:48 -; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:52 -; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:56 -; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:60 -; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:64 -; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:68 -; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:72 -; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:76 -; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:80 -; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:84 -; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:88 -; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:92 -; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:96 -; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:100 -; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:104 -; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:108 -; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:112 -; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:116 -; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:120 -; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:124 -; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:128 -; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:132 -; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:136 -; GFX11-TRUE16-NEXT: s_clause 0x1a -; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:140 -; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:144 -; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:148 -; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:152 -; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:156 -; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:160 -; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:164 -; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:168 -; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:172 -; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:176 -; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:180 -; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:184 -; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:188 -; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:192 -; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:196 -; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:200 -; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:204 -; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:208 -; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:212 -; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:216 -; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:220 -; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:224 -; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:228 -; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:232 -; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:236 -; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:240 -; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:244 +; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:12 +; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:16 +; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:20 +; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:24 +; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:28 +; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:32 +; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:36 +; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:40 +; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:44 +; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:48 +; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:52 +; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:56 +; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:60 +; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:64 +; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:68 +; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:72 +; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:76 +; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:80 +; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:84 +; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:88 +; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:92 +; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:96 +; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:100 +; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:104 +; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:108 +; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:112 +; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:116 +; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:120 +; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:124 +; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:128 +; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:132 +; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:136 +; GFX11-TRUE16-NEXT: s_clause 0x18 +; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:140 +; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:144 +; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:148 +; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:152 +; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:156 +; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:160 +; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:164 +; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:168 +; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:172 +; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:176 +; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:180 +; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:184 +; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:188 +; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:192 +; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:196 +; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:200 +; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:204 +; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:208 +; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:212 +; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:216 +; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:220 +; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:224 +; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:228 +; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:232 +; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:236 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -162079,613 +164074,652 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-LABEL: bitcast_v64bf16_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:72 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mul_f32_e32 v59, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:72 +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v10 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v14 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v9 +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v18 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v14 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v22 -; SI-NEXT: v_mul_f32_e32 v46, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v61, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v44, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v56, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v63, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v47, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v57, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v45, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v58, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v18, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v62, 1.0, v20 -; SI-NEXT: v_mul_f32_e32 v60, 1.0, v19 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v19, 1.0, v24 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v26 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v28 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v20, 1.0, v30 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v29 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v15 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v11, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v5, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v6, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v13, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v12, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s28 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v18 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v26 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: v_writelane_b32 v63, s98, 34 +; SI-NEXT: v_mov_b32_e32 v46, v21 +; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v32, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v5, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v11 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v31, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v29, 1.0, v32 -; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v21, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v9, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v12, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v20, 1.0, v46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v48 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v23 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v52 +; SI-NEXT: v_mul_f32_e32 v26, 1.0, v25 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v16, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v28, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v30, 1.0, v29 +; SI-NEXT: v_mul_f32_e32 v31, 1.0, v33 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v34 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v32, 1.0, v34 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v35 -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v36 -; SI-NEXT: v_mul_f32_e32 v37, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v29, 1.0, v35 +; SI-NEXT: v_mul_f32_e32 v36, 1.0, v36 +; SI-NEXT: v_mul_f32_e32 v35, 1.0, v37 ; SI-NEXT: v_mul_f32_e32 v34, 1.0, v38 -; SI-NEXT: v_mul_f32_e32 v15, 1.0, v39 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v48 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v39 ; SI-NEXT: v_mul_f32_e32 v48, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v51 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v54 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v55 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v51 +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v40 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v52 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v42 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v43 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_mul_f32_e32 v33, 1.0, v54 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v44 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_mul_f32_e32 v36, 1.0, v55 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v40 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_mul_f32_e32 v41, 1.0, v41 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_mul_f32_e32 v42, 1.0, v42 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v43 -; SI-NEXT: v_mul_f32_e64 v39, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v49, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s29 -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB91_2 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mov_b32_e32 v43, v36 -; SI-NEXT: v_alignbit_b32 v36, v1, v2, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: v_alignbit_b32 v6, v1, v6, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10 -; SI-NEXT: v_alignbit_b32 v2, v1, v13, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v16 -; SI-NEXT: v_alignbit_b32 v5, v1, v17, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; SI-NEXT: v_alignbit_b32 v4, v1, v3, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44 -; SI-NEXT: v_alignbit_b32 v3, v1, v7, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47 -; SI-NEXT: v_alignbit_b32 v16, v1, v57, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v31 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v13, v1, v58, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62 -; SI-NEXT: v_alignbit_b32 v10, v1, v60, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v11 -; SI-NEXT: v_alignbit_b32 v44, v19, v8, 16 -; SI-NEXT: v_alignbit_b32 v7, v1, v22, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v25 -; SI-NEXT: v_alignbit_b32 v8, v44, v36, 24 -; SI-NEXT: v_alignbit_b32 v60, v1, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v29 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v36, 16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v39 -; SI-NEXT: v_alignbit_b32 v57, v1, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v35 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v36, 8 -; SI-NEXT: v_alignbit_b32 v58, v22, v9, 16 -; SI-NEXT: v_alignbit_b32 v40, v1, v37, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v58, v6, 24 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v58, v6, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v49 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v58, v6, 8 -; SI-NEXT: v_alignbit_b32 v47, v25, v12, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v47, v2, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v47, v2, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v47, v2, 8 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v38 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v46 -; SI-NEXT: v_alignbit_b32 v53, v1, v48, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v50 -; SI-NEXT: v_alignbit_b32 v50, v8, v59, 16 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v45 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v5, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v5, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v5, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v52, v1, v52, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v55 -; SI-NEXT: v_mov_b32_e32 v17, v63 -; SI-NEXT: v_alignbit_b32 v1, v1, v41, 16 -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v31 -; SI-NEXT: v_alignbit_b32 v62, v8, v61, 16 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v3, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v14, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v15, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v7, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v6, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v18, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v19, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v10, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s29 +; SI-NEXT: v_mul_f32_e64 v25, 1.0, s28 +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB91_2 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v3 +; SI-NEXT: v_mov_b32_e32 v42, v37 +; SI-NEXT: v_alignbit_b32 v37, v2, v11, 16 +; SI-NEXT: v_alignbit_b32 v11, v44, v4, 16 +; SI-NEXT: v_readfirstlane_b32 s4, v37 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14 +; SI-NEXT: v_writelane_b32 v62, s6, 0 +; SI-NEXT: v_alignbit_b32 v2, v2, v15, 16 +; SI-NEXT: v_writelane_b32 v62, s7, 1 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v62, v4, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v62, v4, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v56 -; SI-NEXT: v_alignbit_b32 v55, v8, v63, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v48, v62, v4, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v31 -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v37 -; SI-NEXT: v_alignbit_b32 v38, v8, v45, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v9 -; SI-NEXT: v_alignbit_b32 v35, v8, v18, 16 -; SI-NEXT: v_mov_b32_e32 v45, v8 -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v29, v35, v13, 8 -; SI-NEXT: v_alignbit_b32 v61, v38, v16, 24 -; SI-NEXT: v_alignbit_b32 v41, v38, v16, 16 -; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v59 -; SI-NEXT: v_alignbit_b32 v30, v8, v21, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v10, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v10, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v10, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v23 -; SI-NEXT: v_alignbit_b32 v27, v8, v24, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v20 -; SI-NEXT: v_alignbit_b32 v24, v8, v26, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v60, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v60, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v32 -; SI-NEXT: v_alignbit_b32 v21, v8, v14, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v57, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v57, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v57, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v34 -; SI-NEXT: v_alignbit_b32 v18, v8, v15, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v18 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v14, v52, v6, 16 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: v_alignbit_b32 v2, v2, v19, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v40, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v10 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v19, v2, v8, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v40, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v8, v18, v40, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_readfirstlane_b32 s5, v19 +; SI-NEXT: v_alignbit_b32 v2, v2, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v56 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[18:19], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_alignbit_b32 v47, v45, v47, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v28 -; SI-NEXT: v_alignbit_b32 v63, v8, v51, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_readfirstlane_b32 s5, v47 +; SI-NEXT: v_alignbit_b32 v2, v2, v1, 16 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v58 +; SI-NEXT: s_lshr_b64 s[20:21], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_mov_b32_e32 v4, v58 +; SI-NEXT: v_alignbit_b32 v58, v8, v41, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v63, v53, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v59 +; SI-NEXT: v_readfirstlane_b32 s5, v58 +; SI-NEXT: v_alignbit_b32 v2, v2, v61, 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v63, v53, 16 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v33 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v2, v2, v60, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v23, v22 +; SI-NEXT: v_mov_b32_e32 v40, v36 +; SI-NEXT: s_mov_b64 vcc, 0 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v56 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v18 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_alignbit_b32 v41, v15, v6, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v41 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[4:5], 8 +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v63, v53, 8 -; SI-NEXT: v_alignbit_b32 v12, v40, v43, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_alignbit_b32 v59, v1, v13, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s5, v59 +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 +; SI-NEXT: s_lshr_b64 s[56:57], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[4:5], 8 +; SI-NEXT: v_alignbit_b32 v61, v1, v17, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v61 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v58 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_alignbit_b32 v2, v2, v21, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v52, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v9 +; SI-NEXT: v_alignbit_b32 v2, v2, v12, 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v52, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_alignbit_b32 v60, v2, v20, 16 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v52, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v24 +; SI-NEXT: v_alignbit_b32 v1, v2, v46, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v60 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[4:5], 8 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v42 -; SI-NEXT: v_mov_b32_e32 v15, v9 -; SI-NEXT: v_alignbit_b32 v9, v8, v54, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v1, v5 +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v9, v1, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v23 +; SI-NEXT: v_mov_b32_e32 v5, v28 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v10 +; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v47 +; SI-NEXT: v_lshrrev_b32_e32 v12, 8, v41 +; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v61 +; SI-NEXT: v_lshrrev_b32_e32 v23, 8, v60 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v25, v2, v26, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v9, v1, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v57 +; SI-NEXT: v_readfirstlane_b32 s5, v25 +; SI-NEXT: v_alignbit_b32 v2, v2, v16, 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v9, v1, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v22, v2, v30, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_readfirstlane_b32 s5, v22 +; SI-NEXT: v_alignbit_b32 v2, v2, v27, 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v37 -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v49 -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_alignbit_b32 v17, v2, v36, 16 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v46 -; SI-NEXT: v_lshrrev_b32_e32 v46, 24, v56 -; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v8 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_readfirstlane_b32 s5, v17 +; SI-NEXT: v_alignbit_b32 v2, v2, v34, 16 +; SI-NEXT: s_lshr_b64 s[48:49], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[50:51], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v34 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v42 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v20 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v38 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v28 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v22 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v15 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v29 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v29, v37 +; SI-NEXT: v_mov_b32_e32 v37, v42 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v33 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v59 -; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v20 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v37 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v12 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v33 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v4 +; SI-NEXT: v_lshrrev_b32_e32 v16, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v11 +; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 8, v14 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v19 +; SI-NEXT: v_lshrrev_b32_e32 v4, 8, v59 +; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v43 +; SI-NEXT: v_mov_b32_e32 v31, v20 +; SI-NEXT: v_mov_b32_e32 v20, v34 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_alignbit_b32 v30, v2, v36, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_alignbit_b32 v2, v2, v39, 16 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s5, v30 +; SI-NEXT: s_lshr_b64 s[54:55], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v30 -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v28, v36 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_alignbit_b32 v57, v2, v39, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v20, 8, v24 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_readfirstlane_b32 s5, v57 +; SI-NEXT: v_alignbit_b32 v2, v2, v50, 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[70:71], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v11 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v39 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v49 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v46, v2, v38, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v23 -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v55 +; SI-NEXT: v_readfirstlane_b32 s5, v46 +; SI-NEXT: v_alignbit_b32 v2, v2, v54, 16 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v20, v29 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v21 -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v18 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v57 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v38, v2, v53, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v9 -; SI-NEXT: v_alignbit_b32 v26, v24, v60, 16 -; SI-NEXT: v_lshrrev_b32_e32 v51, 8, v44 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 24, v18 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v58 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v47 -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v50 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v49 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v62 -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v2, v32 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v55 -; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v35 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v46 +; SI-NEXT: v_readfirstlane_b32 s5, v38 +; SI-NEXT: v_lshrrev_b32_e32 v2, 24, v2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v27 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v29, v28 -; SI-NEXT: v_mov_b32_e32 v23, v48 +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v38 +; SI-NEXT: s_lshr_b64 s[86:87], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[4:5], 8 +; SI-NEXT: v_mov_b32_e32 v32, v8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v18, 8, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v63 -; SI-NEXT: v_mov_b32_e32 v48, v33 -; SI-NEXT: v_mov_b32_e32 v34, v53 -; SI-NEXT: v_mov_b32_e32 v53, v42 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 8, v30 +; SI-NEXT: v_mov_b32_e32 v55, v49 +; SI-NEXT: v_mov_b32_e32 v49, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v8, v6 ; SI-NEXT: s_branch .LBB91_3 ; SI-NEXT: .LBB91_2: -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v55, v49 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: v_writelane_b32 v62, s4, 0 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: v_mov_b32_e32 v40, v36 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: s_mov_b64 vcc, -1 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: v_writelane_b32 v62, s5, 1 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $vgpr60 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; kill: killed $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 @@ -162703,1130 +164737,975 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: v_mov_b32_e32 v53, v42 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_mov_b32_e32 v48, v33 -; SI-NEXT: v_mov_b32_e32 v29, v28 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v17, v63 -; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; kill: killed $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; kill: killed $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; kill: killed $vgpr56 +; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: .LBB91_3: ; %Flow +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v56, v17 -; SI-NEXT: v_mov_b32_e32 v54, v61 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v42, v32 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, vcc ; SI-NEXT: s_cbranch_vccnz .LBB91_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v43 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v7, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v1 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v8 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_alignbit_b32 v10, v6, v4, 16 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v33 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s52, v10 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v15 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v9, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s86, v9 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v50 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v44, 0xffff0000, v44 -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_alignbit_b32 v52, v3, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v12, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s80, v12 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v13, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s66, v13 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v33 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v38, v4, v3, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v15 ; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_alignbit_b32 v51, v5, v4, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v53 -; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v15, v7, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v42 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v46, v6, v3, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v39 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_alignbit_b32 v57, v7, v5, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v26 -; SI-NEXT: v_alignbit_b32 v9, v7, v6, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v32 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v23, v7, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v53 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v57, v6, v3, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v11 -; SI-NEXT: v_alignbit_b32 v12, v8, v7, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v28 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v54 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v28 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_alignbit_b32 v60, v10, v6, 16 -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v29 -; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v16, v7, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v30, v6, v3, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v21 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: v_alignbit_b32 v7, v13, v7, 16 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v31 -; SI-NEXT: v_alignbit_b32 v63, v13, v10, 16 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v40 +; SI-NEXT: v_alignbit_b32 v18, v9, v7, 16 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v34 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_alignbit_b32 v20, v10, v9, 16 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s38, v15 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s90, v16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s30, v23 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s76, v18 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s62, v20 +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v10 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v37 -; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s5, v38 +; SI-NEXT: v_readfirstlane_b32 s87, v46 +; SI-NEXT: v_readfirstlane_b32 s81, v57 +; SI-NEXT: v_readfirstlane_b32 s67, v30 +; SI-NEXT: s_lshr_b64 s[54:55], s[66:67], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[66:67], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[66:67], 8 +; SI-NEXT: s_lshr_b64 s[66:67], s[80:81], 24 +; SI-NEXT: s_lshr_b64 s[70:71], s[80:81], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[80:81], 8 +; SI-NEXT: s_lshr_b64 s[80:81], s[86:87], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[86:87], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[86:87], 8 +; SI-NEXT: s_lshr_b64 s[86:87], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[4:5], 8 +; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 8, v30 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v17, v7, v3, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s53, v17 +; SI-NEXT: s_lshr_b64 s[48:49], s[52:53], 24 +; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: v_alignbit_b32 v10, v14, v10, 16 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v32 -; SI-NEXT: v_alignbit_b32 v18, v14, v13, 16 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_alignbit_b32 v21, v12, v10, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_readfirstlane_b32 s56, v21 +; SI-NEXT: s_lshr_b64 s[50:51], s[52:53], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[52:53], 8 +; SI-NEXT: v_lshrrev_b32_e32 v6, 24, v6 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_alignbit_b32 v22, v9, v3, 16 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_alignbit_b32 v23, v13, v12, 16 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v49, 0x40c00000, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v21, v15, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v31 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v9 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s42, v23 +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v22 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v57 +; SI-NEXT: v_readfirstlane_b32 s39, v22 +; SI-NEXT: v_lshrrev_b32_e32 v9, 24, v9 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v46 +; SI-NEXT: s_lshr_b64 s[36:37], s[38:39], 16 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v38 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v21 +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; SI-NEXT: v_alignbit_b32 v13, v16, v13, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v15 -; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v16 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v24, v15, v13, 16 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v25, v10, v3, 16 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s31, v25 +; SI-NEXT: v_readfirstlane_b32 s26, v24 +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b64 s[94:95], s[30:31], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[30:31], 8 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v23 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_alignbit_b32 v26, v16, v15, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_readfirstlane_b32 s20, v26 +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v21 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 -; SI-NEXT: v_alignbit_b32 v16, v19, v16, 16 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_alignbit_b32 v24, v15, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v20 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v24 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_alignbit_b32 v27, v15, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v3, v22, v19, 16 -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v59 -; SI-NEXT: v_add_f32_e32 v54, 0x40c00000, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v54 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: v_add_f32_e32 v59, 0x40c00000, v44 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v59 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_alignbit_b32 v30, v15, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_alignbit_b32 v4, v25, v22, 16 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v40, 0x40c00000, v25 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v40 -; SI-NEXT: v_alignbit_b32 v35, v45, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 +; SI-NEXT: v_alignbit_b32 v27, v18, v16, 16 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v10 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v60, v12, v3, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s91, v60 +; SI-NEXT: v_readfirstlane_b32 s14, v27 +; SI-NEXT: v_lshrrev_b32_e32 v10, 24, v10 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b64 s[88:89], s[90:91], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[90:91], 8 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_alignbit_b32 v29, v20, v18, 16 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s8, v29 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12 +; SI-NEXT: v_alignbit_b32 v61, v11, v3, 16 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s77, v61 +; SI-NEXT: v_lshrrev_b32_e32 v12, 24, v12 +; SI-NEXT: s_lshr_b64 s[74:75], s[76:77], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[76:77], 8 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v61 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v13 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v18 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v59, v36, v3, 16 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v8 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s63, v59 +; SI-NEXT: s_lshr_b64 s[60:61], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[62:63], 8 +; SI-NEXT: v_lshrrev_b32_e32 v4, 8, v59 +; SI-NEXT: v_lshrrev_b32_e32 v18, 8, v25 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v15 -; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_alignbit_b32 v5, v28, v25, 16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v28 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v41 -; SI-NEXT: v_alignbit_b32 v38, v15, v14, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v56 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v20 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v33 -; SI-NEXT: v_alignbit_b32 v2, v33, v28, 16 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v3 +; SI-NEXT: v_alignbit_b32 v41, v49, v15, 16 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s57, v41 +; SI-NEXT: v_lshrrev_b32_e32 v3, 24, v3 +; SI-NEXT: s_lshr_b64 s[46:47], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[56:57], 8 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v12, 8, v41 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v43, 0x40c00000, v33 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v43 -; SI-NEXT: v_alignbit_b32 v55, v61, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_alignbit_b32 v6, v36, v33, 16 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v15 +; SI-NEXT: v_alignbit_b32 v58, v32, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s43, v58 +; SI-NEXT: s_lshr_b64 s[40:41], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[42:43], 8 +; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v15 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v58 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 -; SI-NEXT: v_add_f32_e32 v46, 0x40c00000, v36 -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v46 -; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36 -; SI-NEXT: v_alignbit_b32 v62, v15, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v39 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v39 -; SI-NEXT: v_alignbit_b32 v36, v39, v36, 16 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v47, v45, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s27, v47 +; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v47 +; SI-NEXT: s_lshr_b64 s[24:25], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[26:27], 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 -; SI-NEXT: v_add_f32_e32 v42, 0x40c00000, v39 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v42 -; SI-NEXT: v_alignbit_b32 v50, v17, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v19, v11, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s21, v19 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v19 +; SI-NEXT: s_lshr_b64 s[18:19], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[20:21], 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 -; SI-NEXT: v_add_f32_e32 v56, 0x40c00000, v39 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v56 -; SI-NEXT: v_alignbit_b32 v47, v25, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v14, v52, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s15, v14 +; SI-NEXT: v_lshrrev_b32_e32 v7, 8, v14 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v39 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v39 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_alignbit_b32 v11, v44, v16, 16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 24, v23 +; SI-NEXT: v_lshrrev_b32_e32 v23, 8, v60 +; SI-NEXT: v_readfirstlane_b32 s9, v11 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v11 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v62, s6, 0 +; SI-NEXT: v_writelane_b32 v62, s7, 1 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[20:21], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[56:57], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[76:77], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[90:91], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[30:31], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[38:39], 24 +; SI-NEXT: s_lshr_b64 s[38:39], s[38:39], 8 +; SI-NEXT: .LBB91_5: ; %end +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 +; SI-NEXT: s_lshl_b32 s5, s10, 8 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s6, 0xff +; SI-NEXT: v_readlane_b32 s6, v62, 0 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v39 -; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v50 -; SI-NEXT: v_alignbit_b32 v58, v22, v14, 16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v16 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v56 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s16, 8 +; SI-NEXT: s_lshl_b32 s6, s8, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 8, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: s_lshl_b32 s4, s4, 8 +; SI-NEXT: v_readlane_b32 s7, v62, 1 +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s12, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v42, 8, v63 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v1, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v52 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v46 -; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v55 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s22, 8 +; SI-NEXT: s_lshl_b32 s6, s14, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s18, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v43 -; SI-NEXT: v_alignbit_b32 v43, v38, v16, 8 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v1, 0xff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s28, 8 +; SI-NEXT: s_lshl_b32 s6, s20, 24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v41 -; SI-NEXT: v_alignbit_b32 v41, v38, v16, 16 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v40 -; SI-NEXT: v_mov_b32_e32 v40, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v54 -; SI-NEXT: v_alignbit_b32 v54, v38, v16, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v20 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v20, v35, v13, 8 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v35 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v23 -; SI-NEXT: v_alignbit_b32 v23, v62, v4, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v49 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v47 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v18 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v31 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v62 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v11 -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v58 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v26 -; SI-NEXT: v_alignbit_b32 v26, v24, v60, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_alignbit_b32 v44, v19, v14, 16 -; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v59 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v36, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v36, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v36, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v58, v6, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v58, v6, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v58, v6, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v47, v2, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v47, v2, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v47, v2, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v5, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v5, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v5, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v62, v4, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v62, v4, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v10, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v10, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v10, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v60, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v60, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v57, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v57, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v57, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v51, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v51, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v8, v18, v51, 8 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v51, 8, v44 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v21 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v63, v34, 24 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v9 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v63, v34, 16 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v63, v34, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v52, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v52, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v52, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v9, v1, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v9, v1, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v9, v1, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v38 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v30 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v27 -; SI-NEXT: .LBB91_5: ; %end -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v36, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v28 -; SI-NEXT: v_or_b32_e32 v32, v36, v32 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v36, 0xff, v29 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v29 -; SI-NEXT: v_or_b32_e32 v36, v56, v36 -; SI-NEXT: v_or_b32_e32 v32, v32, v36 -; SI-NEXT: buffer_store_dword v32, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v32, 0xff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v36, 8, v51 -; SI-NEXT: v_or_b32_e32 v32, v32, v36 -; SI-NEXT: v_and_b32_e32 v36, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_or_b32_e32 v14, v14, v36 -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32 -; SI-NEXT: v_or_b32_e32 v14, v32, v14 -; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v14, v32, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v6 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v14, v14, v32 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v32, 0xff, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v19 -; SI-NEXT: v_or_b32_e32 v32, v33, v32 -; SI-NEXT: v_or_b32_e32 v14, v14, v32 -; SI-NEXT: v_add_i32_e32 v32, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v14, v32, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v58 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v6 -; SI-NEXT: v_or_b32_e32 v14, v32, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v60 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v28, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v49 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v2 -; SI-NEXT: v_or_b32_e32 v14, v28, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v57 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v19 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v56 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v25, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v39 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v2 -; SI-NEXT: v_or_b32_e32 v14, v25, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v9 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v23 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s44, 8 +; SI-NEXT: s_lshl_b32 s6, s26, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 32, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v20 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v17 -; SI-NEXT: v_or_b32_e32 v14, v22, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s40, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v31 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v17 -; SI-NEXT: v_or_b32_e32 v14, v22, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v13 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v34 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v17 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s58, 8 +; SI-NEXT: s_lshl_b32 s6, s42, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v19, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s46, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v55 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v46 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v61 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v12 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v49 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s72, 8 +; SI-NEXT: s_lshl_b32 s6, s56, 24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v17 -; SI-NEXT: v_or_b32_e32 v14, v19, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v43 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v41 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v54 -; SI-NEXT: v_or_b32_e32 v14, v16, v14 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v38 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v2 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v16, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s60, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v20 -; SI-NEXT: v_or_b32_e32 v11, v11, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v36 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s78, 8 +; SI-NEXT: s_lshl_b32 s6, s62, 24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_or_b32_e32 v13, v14, v13 -; SI-NEXT: v_or_b32_e32 v11, v11, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v15 -; SI-NEXT: v_or_b32_e32 v11, v11, v13 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v45 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_or_b32_e32 v13, v14, v13 -; SI-NEXT: v_or_b32_e32 v11, v11, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s74, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v61 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v10 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s92, 8 +; SI-NEXT: s_lshl_b32 s6, s76, 24 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v13, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v30 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v13, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s88, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; SI-NEXT: v_or_b32_e32 v7, v7, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v60 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: s_lshl_b32 s6, s90, 24 +; SI-NEXT: v_readlane_b32 s34, v63, 2 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v11, v10 -; SI-NEXT: v_or_b32_e32 v7, v7, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v27 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s94, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v25 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v18 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s38, 8 +; SI-NEXT: s_lshl_b32 s6, s30, 24 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s30, v63, 0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v24 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v2 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s36, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v22 +; SI-NEXT: s_lshl_b32 s5, s52, 8 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s6, s48, 24 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s36, v63, 4 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v21 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s50, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v17 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s68, 8 +; SI-NEXT: s_lshl_b32 s6, s54, 24 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s50, v63, 10 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v52 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s64, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s82, 8 +; SI-NEXT: s_lshl_b32 s6, s66, 24 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s64, v63, 16 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v63 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v42 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s70, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v57 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s96, 8 +; SI-NEXT: s_lshl_b32 s6, s80, 24 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s70, v63, 22 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v12 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s84, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s86, 24 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s84, v63, 28 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 @@ -163835,19 +165714,30 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v9 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v35 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 +; SI-NEXT: s_and_b32 s4, s98, 0xff +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 @@ -163855,22 +165745,24 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; @@ -168941,39 +170833,38 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v44, v19 -; SI-NEXT: v_mov_b32_e32 v43, v17 -; SI-NEXT: v_mov_b32_e32 v32, v14 -; SI-NEXT: v_mov_b32_e32 v14, v12 -; SI-NEXT: v_mov_b32_e32 v12, v10 -; SI-NEXT: v_mov_b32_e32 v41, v7 -; SI-NEXT: v_mov_b32_e32 v55, v5 -; SI-NEXT: v_mov_b32_e32 v54, v3 -; SI-NEXT: v_mov_b32_e32 v51, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:392 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:76 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:100 +; SI-NEXT: v_mov_b32_e32 v42, v29 +; SI-NEXT: v_mov_b32_e32 v43, v27 +; SI-NEXT: v_mov_b32_e32 v44, v25 +; SI-NEXT: v_mov_b32_e32 v45, v23 +; SI-NEXT: v_mov_b32_e32 v46, v21 +; SI-NEXT: v_mov_b32_e32 v47, v19 +; SI-NEXT: v_mov_b32_e32 v56, v17 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v15 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v58, v13 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v59, v11 +; SI-NEXT: v_mov_b32_e32 v55, v9 +; SI-NEXT: v_mov_b32_e32 v54, v7 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:392 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:100 ; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:108 ; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 ; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:140 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:148 @@ -168982,335 +170873,359 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:172 ; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180 ; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:136 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v18 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:144 +; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v2 ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v20 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v22 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v24 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v26 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v28 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v30 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v4 +; SI-NEXT: v_lshlrev_b32_e32 v63, 8, v6 +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v16 +; SI-NEXT: v_lshlrev_b32_e32 v62, 8, v18 ; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 ; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 ; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v25 -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: v_lshlrev_b32_e32 v49, 8, v49 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v50 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v51 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v15 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v40 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v17 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v19 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:144 -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v17 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v23 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v29 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v32 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v34 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:168 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:176 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v35 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v36 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v37 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:168 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:176 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v38 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v39 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v48 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:196 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:212 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:220 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:192 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:200 +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:200 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:208 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:208 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:228 +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:236 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:244 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:252 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:224 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:240 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:224 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:232 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:240 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:260 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:284 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:256 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:272 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:256 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:264 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:272 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:292 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:308 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:316 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:288 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v45, 8, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:304 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:296 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:304 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:324 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:340 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:340 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:348 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:320 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:320 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v1 +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v2 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:328 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:336 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:336 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:356 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:364 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:372 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:372 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:380 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:352 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:352 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v62, 8, v1 +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v2 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v63, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:360 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:360 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:368 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:368 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:388 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:384 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:384 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:48 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:40 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:32 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:112 ; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:120 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:184 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:216 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:280 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:312 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:344 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:376 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:344 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:376 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:104 ; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:96 @@ -169319,118 +171234,129 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:64 ; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:56 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB92_2 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v51 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v2, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v53 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v41 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xff, v5 +; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_or_b32_e32 v9, v2, v63 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v54 +; SI-NEXT: v_or_b32_e32 v11, v2, v8 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v55 +; SI-NEXT: v_or_b32_e32 v13, v2, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v59 +; SI-NEXT: v_or_b32_e32 v15, v2, v12 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v58 +; SI-NEXT: v_or_b32_e32 v17, v2, v14 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v57 +; SI-NEXT: v_or_b32_e32 v19, v2, v61 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v56 +; SI-NEXT: v_or_b32_e32 v21, v2, v62 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v47 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v6, 0xff, v42 +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 ; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 ; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 @@ -169440,1780 +171366,1688 @@ define <64 x half> @bitcast_v128i8_to_v64f16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 ; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 ; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_and_b32_e32 v39, 0xff, v50 -; SI-NEXT: v_and_b32_e32 v48, 0xff, v40 -; SI-NEXT: v_and_b32_e32 v49, 0xff, v49 -; SI-NEXT: v_and_b32_e32 v52, 0xff, v52 -; SI-NEXT: v_and_b32_e32 v42, 0xff, v42 -; SI-NEXT: v_and_b32_e32 v46, 0xff, v46 -; SI-NEXT: v_or_b32_e32 v45, v46, v45 -; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 -; SI-NEXT: v_or_b32_e32 v56, v56, v61 -; SI-NEXT: v_and_b32_e32 v57, 0xff, v57 -; SI-NEXT: v_and_b32_e32 v47, 0xff, v47 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 -; SI-NEXT: v_or_b32_e32 v3, v47, v3 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: v_and_b32_e32 v35, 0xff, v52 +; SI-NEXT: v_and_b32_e32 v36, 0xff, v40 +; SI-NEXT: v_and_b32_e32 v37, 0xff, v60 +; SI-NEXT: v_and_b32_e32 v39, 0xff, v31 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v4, v4, v49 +; SI-NEXT: v_and_b32_e32 v49, 0xff, v51 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:976 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v23, v2, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xff, v46 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v2, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v54 +; SI-NEXT: v_or_b32_e32 v25, v2, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xff, v45 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v2, v0 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v55 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v2, v6 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v41 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v2, v8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v9 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v2, v12 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v11 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v2, v14 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v13 -; SI-NEXT: v_mov_b32_e32 v8, v7 -; SI-NEXT: v_mov_b32_e32 v7, v19 -; SI-NEXT: v_or_b32_e32 v19, v2, v32 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v15 -; SI-NEXT: v_and_b32_e32 v35, 0xff, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v17, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v23, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v27, v2, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v2, 0xff, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v56 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v31, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v21 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v31 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:976 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_or_b32_e32 v51, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v51 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v27, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v29, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v29 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v29, v2, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xff, v43 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v2, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v2, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v5, v5, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v32, v6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v4, v4, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v2, v32 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v4 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v33, v6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v33, v6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v45 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v6, v6, v8 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v6, v6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v8, v8, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v8, v8, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v8 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v12, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v10, v10, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v2, v10 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v12, v12, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v12, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v12 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v14, v14, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v14, v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v14 ; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v32, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v32, v32, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v32 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v18, v18, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v18, v18, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v18 ; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v22, v22, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v22, v22, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v22 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v24, v24, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v24, v24, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v24 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v26, v26, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v26, v26, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v26 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v28, v28, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v28, v28, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v28 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v30, v30, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v30, v30, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v30 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v34, v34, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v34, v34, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v34 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v16, v16, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v16, v16, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v16 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v20, v20, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v20, v20, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v20 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v35, v35, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v35, v35, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v35 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v36, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v36, v36, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v36, v36, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v36 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v37, v37, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v37, v37, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v37 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v38, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v38, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v38, v38, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v38, v38, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v38 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v39, v39, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v39, v39, v1 +; SI-NEXT: v_mov_b32_e32 v1, v48 +; SI-NEXT: v_and_b32_e32 v48, 0xff, v50 +; SI-NEXT: v_or_b32_e32 v48, v48, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v51, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v39 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v7, v7, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v7 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v48, v48, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v48 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v49, v49, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v51, v51, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v49, v49, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v49 ; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v52, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v50, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v50, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v50, v50, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v52, v52, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v50, v50, v3 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v50 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v9, v9, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v54, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v54, v54, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v54 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v52, v52, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v2, v51 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v52 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v53, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v11, v11, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v11 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v53, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v53, v53, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v53, v53, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v53 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: v_and_b32_e32 v54, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v55, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v54, v54, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v39, v54 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v55, v55, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v55, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v55, v55, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v13, v13, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v40, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v41, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v40, v40, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v37, v40 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v41, v41, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v41, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v40, 0xff, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v41, v41, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v40, v40, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v48, v40 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: v_and_b32_e32 v42, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v42, v42, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v38, v42 +; SI-NEXT: v_or_b32_e32 v42, v42, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v33, v42 ; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v43, 0xff, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v43, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v43, v43, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v43 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: v_or_b32_e32 v43, v43, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_or_b32_e32 v15, v15, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v36, v15 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: v_and_b32_e32 v44, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v44, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:980 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v44, v44, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v36, v44 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v44, v44, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v45, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v46, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v46, v46, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v46 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: v_or_b32_e32 v45, v45, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v38, v45 +; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v58, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v58, v58, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v58 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: v_and_b32_e32 v46, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v59, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v59, v59, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v59 -; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: v_or_b32_e32 v46, v46, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v60, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v60, v60, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v60 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: v_and_b32_e32 v47, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v61, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v25, v61, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v25 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: v_or_b32_e32 v47, v47, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v35, v47 +; SI-NEXT: ; implicit-def: $vgpr47 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v61, 0xff, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v61, v61, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v61 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: v_and_b32_e32 v56, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v56, v56, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v57, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v57, v57, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v32, v57 +; SI-NEXT: ; implicit-def: $vgpr57 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v44 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v55 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; kill: killed $vgpr0 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v13 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v41 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: .LBB92_2: ; %Flow -; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] -; SI-NEXT: s_cbranch_execz .LBB92_4 -; SI-NEXT: ; %bb.3: ; %cmp.true -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v47 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v5, v3, v2 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v57 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v19 -; SI-NEXT: v_and_b32_e32 v29, 0xff, v29 -; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_add_i32_e32 v34, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30 -; SI-NEXT: v_and_b32_e32 v30, 0xff, v30 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 -; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_mov_b32_e32 v17, v43 -; SI-NEXT: v_mov_b32_e32 v19, v44 -; SI-NEXT: v_add_i32_e32 v47, vcc, 3, v19 -; SI-NEXT: v_and_b32_e32 v47, 0xff, v47 -; SI-NEXT: v_add_i32_e32 v57, vcc, 3, v15 -; SI-NEXT: v_and_b32_e32 v57, 0xff, v57 -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v51 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: s_movk_i32 s6, 0x300 +; SI-NEXT: v_and_b32_e32 v58, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v23, v63, v2 -; SI-NEXT: v_add_i32_e32 v63, vcc, 3, v54 -; SI-NEXT: v_and_b32_e32 v63, 0xff, v63 +; SI-NEXT: v_or_b32_e32 v58, v58, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v25, v25, v2 +; SI-NEXT: v_and_b32_e32 v59, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v31, v62, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v56 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v33, v61, v2 -; SI-NEXT: v_add_i32_e32 v56, vcc, 3, v17 -; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 -; SI-NEXT: v_add_i32_e32 v61, vcc, 3, v41 -; SI-NEXT: v_and_b32_e32 v61, 0xff, v61 -; SI-NEXT: v_add_i32_e32 v62, vcc, 3, v55 -; SI-NEXT: v_and_b32_e32 v62, 0xff, v62 +; SI-NEXT: v_or_b32_e32 v59, v59, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v35, v60, v2 -; SI-NEXT: v_add_i32_e32 v60, vcc, 3, v9 -; SI-NEXT: v_and_b32_e32 v60, 0xff, v60 -; SI-NEXT: v_or_b32_e32 v12, v12, v60 -; SI-NEXT: v_add_i32_e32 v60, vcc, s6, v25 +; SI-NEXT: v_and_b32_e32 v60, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v37, v59, v2 -; SI-NEXT: v_add_i32_e32 v59, vcc, 3, v11 -; SI-NEXT: v_and_b32_e32 v59, 0xff, v59 -; SI-NEXT: v_or_b32_e32 v14, v14, v59 -; SI-NEXT: v_add_i32_e32 v59, vcc, s6, v31 +; SI-NEXT: v_or_b32_e32 v60, v60, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v38, v58, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v46 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v39, v45, v2 -; SI-NEXT: v_add_i32_e32 v46, vcc, 3, v21 -; SI-NEXT: v_and_b32_e32 v46, 0xff, v46 -; SI-NEXT: v_add_i32_e32 v58, vcc, 3, v13 -; SI-NEXT: v_and_b32_e32 v58, 0xff, v58 -; SI-NEXT: v_or_b32_e32 v32, v32, v58 -; SI-NEXT: v_add_i32_e32 v58, vcc, s6, v33 -; SI-NEXT: v_add_i32_e32 v9, vcc, s6, v32 +; SI-NEXT: v_and_b32_e32 v61, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:980 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_or_b32_e32 v61, v61, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v48, v0, v2 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v62, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:976 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_or_b32_e32 v62, v62, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v31, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_or_b32_e32 v31, v31, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v42 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_and_b32_e32 v63, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v63, v63, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v6, v3 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v2, v53 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v2, v55 +; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v2, v41 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v2, v43 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v9 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v11 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v13 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v15 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v17 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v19 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v62 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v5 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v56 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr60 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v61 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v46 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v59 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: .LBB92_2: ; %Flow +; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] +; SI-NEXT: s_cbranch_execz .LBB92_4 +; SI-NEXT: ; %bb.3: ; %cmp.true ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:976 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v40 +; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 +; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v52 +; SI-NEXT: v_and_b32_e32 v29, 0xff, v29 +; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 +; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_add_i32_e32 v34, vcc, 3, v34 +; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v30, 0xff, v30 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 +; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 +; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: v_mov_b32_e32 v7, v49 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: v_add_i32_e32 v42, vcc, 3, v42 +; SI-NEXT: v_and_b32_e32 v42, 0xff, v42 +; SI-NEXT: v_add_i32_e32 v43, vcc, 3, v43 +; SI-NEXT: v_and_b32_e32 v43, 0xff, v43 +; SI-NEXT: v_add_i32_e32 v44, vcc, 3, v44 +; SI-NEXT: v_and_b32_e32 v44, 0xff, v44 +; SI-NEXT: v_add_i32_e32 v45, vcc, 3, v45 +; SI-NEXT: v_and_b32_e32 v45, 0xff, v45 +; SI-NEXT: v_add_i32_e32 v56, vcc, 3, v56 +; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 +; SI-NEXT: v_or_b32_e32 v56, v62, v56 +; SI-NEXT: v_add_i32_e32 v62, vcc, 3, v5 +; SI-NEXT: v_add_i32_e32 v46, vcc, 3, v46 +; SI-NEXT: v_and_b32_e32 v62, 0xff, v62 +; SI-NEXT: v_and_b32_e32 v46, 0xff, v46 +; SI-NEXT: v_or_b32_e32 v62, v63, v62 +; SI-NEXT: v_add_i32_e32 v63, vcc, 3, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v1 +; SI-NEXT: s_movk_i32 s6, 0x300 +; SI-NEXT: v_add_i32_e32 v57, vcc, 3, v57 +; SI-NEXT: v_and_b32_e32 v57, 0xff, v57 +; SI-NEXT: v_or_b32_e32 v57, v61, v57 +; SI-NEXT: v_add_i32_e32 v61, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v58, vcc, 3, v58 +; SI-NEXT: v_and_b32_e32 v58, 0xff, v58 +; SI-NEXT: v_or_b32_e32 v58, v14, v58 +; SI-NEXT: v_add_i32_e32 v59, vcc, 3, v59 +; SI-NEXT: v_and_b32_e32 v59, 0xff, v59 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v11, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v11 +; SI-NEXT: v_or_b32_e32 v59, v12, v59 +; SI-NEXT: v_and_b32_e32 v61, 0xff, v61 +; SI-NEXT: v_or_b32_e32 v61, v8, v61 +; SI-NEXT: v_and_b32_e32 v63, 0xff, v63 +; SI-NEXT: v_add_i32_e32 v47, vcc, 3, v47 +; SI-NEXT: v_and_b32_e32 v47, 0xff, v47 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_add_i32_e32 v62, vcc, s6, v62 +; SI-NEXT: v_add_i32_e32 v61, vcc, s6, v61 +; SI-NEXT: v_add_i32_e32 v59, vcc, s6, v59 +; SI-NEXT: v_add_i32_e32 v58, vcc, s6, v58 +; SI-NEXT: v_add_i32_e32 v57, vcc, s6, v57 +; SI-NEXT: v_add_i32_e32 v56, vcc, s6, v56 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v15, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v17, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v54, vcc, s6, v17 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v19, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v21, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v23, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v25, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v39, vcc, s6, v25 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v32, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v38, vcc, s6, v32 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v33, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v37, vcc, s6, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v35, v6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v35, vcc, s6, v35 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v52 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v49 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v40 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v53 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v50 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v48 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v48, vcc, s6, v23 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v0, v2 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v51 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v50 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v31 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v60 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v60, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v55, vcc, s6, v15 +; SI-NEXT: v_and_b32_e32 v60, 0xff, v60 +; SI-NEXT: v_or_b32_e32 v60, v10, v60 +; SI-NEXT: v_add_i32_e32 v60, vcc, s6, v60 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v6, v41 +; SI-NEXT: v_or_b32_e32 v63, v6, v63 +; SI-NEXT: v_add_i32_e32 v63, vcc, s6, v63 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v27, vcc, s6, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v29 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v29 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v20, v0, v20 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v2, v20 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v16, v0, v16 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v2, v16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v34 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v30 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v30 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v28 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v26 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v26 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v24 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v24 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v22 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v22 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v22, vcc, s6, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, s6, v16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v18 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v18 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v36, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v36, vcc, 3, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v36, 0xff, v36 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v36 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v2, v2, v36 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v36, v7, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v36, vcc, s6, v36 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v49, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v42, v4, v42 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v49, vcc, 3, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v49, 0xff, v49 +; SI-NEXT: v_add_i32_e32 v42, vcc, s6, v42 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v43, v4, v43 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v43, vcc, s6, v43 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v41, v2, v49 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v49, vcc, s6, v21 +; SI-NEXT: v_add_i32_e32 v41, vcc, s6, v41 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v44, v4, v44 +; SI-NEXT: v_add_i32_e32 v44, vcc, s6, v44 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v49 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v50, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v50, vcc, 3, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v50, 0xff, v50 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v50 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v52, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v51, v9, v50 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v50, vcc, s6, v19 +; SI-NEXT: v_add_i32_e32 v51, vcc, s6, v51 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v52, vcc, 3, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v52, 0xff, v52 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v52 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v53, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v45, v4, v45 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v45, vcc, s6, v45 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v52, v2, v52 +; SI-NEXT: v_mov_b32_e32 v2, v53 +; SI-NEXT: v_add_i32_e32 v53, vcc, 3, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v53, 0xff, v53 -; SI-NEXT: v_or_b32_e32 v7, v7, v53 -; SI-NEXT: v_add_i32_e32 v54, vcc, s6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, s6, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v40, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v40, 0xff, v40 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v40 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v46, v4, v46 +; SI-NEXT: v_or_b32_e32 v13, v2, v3 +; SI-NEXT: v_add_i32_e32 v13, vcc, s6, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_i32_e32 v46, vcc, s6, v46 +; SI-NEXT: v_add_i32_e32 v52, vcc, s6, v52 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v63 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v42, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v42, 0xff, v42 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v42 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v43, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v43, 0xff, v43 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v43 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v62 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v44, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v44, 0xff, v44 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v44 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v61 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v45, vcc, 3, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v45, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v45 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v60 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v45, vcc, s6, v48 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v46 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v59 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v46, vcc, s6, v39 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v47 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v58 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v47, vcc, s6, v38 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v56 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v57 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v56, vcc, s6, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v57 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v56 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_or_b32_e32 v53, v9, v53 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v53, vcc, s6, v53 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_or_b32_e32 v47, v4, v47 +; SI-NEXT: v_add_i32_e32 v47, vcc, s6, v47 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_add_i32_e32 v34, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v8, v61 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v47 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v6, v62 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v46 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v57, vcc, s6, v35 -; SI-NEXT: v_add_i32_e32 v61, vcc, s6, v23 -; SI-NEXT: v_add_i32_e32 v62, vcc, s6, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v63, v0, v63 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v0, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v45 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v44 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v43 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v42 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v36 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_add_i32_e32 v40, vcc, 3, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v40, 0xff, v40 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_add_i32_e32 v33, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v40, v9, v40 +; SI-NEXT: v_add_i32_e32 v40, vcc, s6, v40 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v40 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v53 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v52 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v13, v51 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_add_i32_e32 v32, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v13, v41 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_add_i32_e32 v41, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v55, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v51, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v25, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v31, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v31, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v44, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v23, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v43, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v30, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v42, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v29, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v36, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v37 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v40, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v28, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v53, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v27, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v52, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v26, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v50, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v25, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v49, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v24, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v48, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v23, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v39, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v22, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v38, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v21, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v37, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v20, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v36, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v19, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v35, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v18, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v34, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v33, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v16, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v30, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v15, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v29, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v14, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v28, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v12, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v26, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v11, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v24, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v10, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v18, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v9, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v8, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v7, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v6, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v5, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v1, vcc, s6, v0 -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, s6, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v1 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v2 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v3 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v6 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v8 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v16 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: v_add_i32_e32 v1, vcc, s6, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v18 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v28 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v20 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v22 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v2, v32 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v24 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v39 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v26 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v33 +; SI-NEXT: v_mov_b32_e32 v37, v3 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v27 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v55 +; SI-NEXT: v_mov_b32_e32 v39, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v28 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v50 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v56 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v8 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v30 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v9 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v46 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v34 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v48 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v59 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v12 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v44 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v57 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v38 +; SI-NEXT: v_mov_b32_e32 v38, v4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v42 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v47 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v54 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v53 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v18 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v49 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v35 +; SI-NEXT: v_mov_b32_e32 v35, v2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v50 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v52 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v49 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v40 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v22 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v43 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v0, v45 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v26 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v21, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v27, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, s6, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v31 +; SI-NEXT: v_mov_b32_e32 v33, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, s6, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, s6, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v7 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v9 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v13 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v15 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v60 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v19 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v21 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v58 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v61 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v25 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v51 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v54 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v41 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_add_i32_e32 v32, vcc, s6, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v62 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v14, vcc, s6, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, s6, v63 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v14 -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v32 -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_add_i32_e32 v63, vcc, s6, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v63, v63 -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v13, v1 ; SI-NEXT: .LBB92_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v10 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v10, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v10 -; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v10 -; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 16, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 24, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v10 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v10 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v10 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v10 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v10 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v2, v39 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v2, v37 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v2, v33 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v10 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v36 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v27 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v23 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v37 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v10 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v2, v19 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v23 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v10 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v1, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v31 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v17 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v10 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v0, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v5 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x7c, v10 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_store_dword v0, v2, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload @@ -177051,22 +178885,22 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 ; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 @@ -177092,13 +178926,11 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 ; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v43, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v55, 8, v25 ; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 ; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 @@ -177107,49 +178939,46 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v28 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 @@ -177158,34 +178987,35 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v22 ; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v26 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 ; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16 ; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 ; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 +; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v2 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v2 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 @@ -177195,131 +179025,155 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 -; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v2 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 ; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v4 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v6 -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5 -; VI-NEXT: s_waitcnt vmcnt(10) ; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v0 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:124 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:68 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:284 -; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:308 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: s_cbranch_scc0 .LBB93_4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276 +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324 +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; VI-NEXT: s_cbranch_scc0 .LBB93_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -177328,225 +179182,205 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v0, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v1, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v2, v8 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v3, v8 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v3, v10 ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v2, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v36, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v37, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v38, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v38, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v39, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v49, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v48, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v45, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v32, v1 -; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v54, v22 -; VI-NEXT: v_mov_b32_e32 v41, v24 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v34, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v37, v1 -; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v55, v26 +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v0, v42, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v1, v41, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v39, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v42, v43 +; VI-NEXT: v_mov_b32_e32 v43, v37 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v27 +; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v51, v0 -; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v47, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v54 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v53, v28 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v47, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v61, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v57, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v24, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v0, v34, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v58, v1 -; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v61, v60 -; VI-NEXT: v_mov_b32_e32 v60, v59 +; VI-NEXT: v_or_b32_sdwa v1, v25, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v38, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v48, v1 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v57, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v29, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v28, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v40, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v62, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v51, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v40, v41 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v52, v0 -; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v1 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v0, v29, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v31, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v33 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v46, v1 +; VI-NEXT: v_mov_b32_e32 v56, v1 ; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v63, v0 -; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v47, v1 -; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v63, v39 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v57, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v57, v0 +; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v58, v1 +; VI-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v52, v60 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v53, v35 +; VI-NEXT: s_waitcnt vmcnt(3) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -177575,14 +179409,54 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_cbranch_execnz .LBB93_3 -; VI-NEXT: .LBB93_2: ; %cmp.true -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v59 -; VI-NEXT: v_or_b32_sdwa v29, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_branch .LBB93_3 +; VI-NEXT: .LBB93_2: +; VI-NEXT: v_mov_b32_e32 v47, v54 +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v58, v7 +; VI-NEXT: v_mov_b32_e32 v57, v5 +; VI-NEXT: v_mov_b32_e32 v56, v3 +; VI-NEXT: s_mov_b64 s[4:5], -1 +; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 +; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; VI-NEXT: .LBB93_3: ; %Flow +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; VI-NEXT: s_cbranch_vccnz .LBB93_5 +; VI-NEXT: ; %bb.4: ; %cmp.true +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 @@ -177601,351 +179475,356 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: s_lshl_b32 s9, s19, 8 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_lshl_b32 s10, s17, 8 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 +; VI-NEXT: v_or_b32_sdwa v29, v41, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v31, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 +; VI-NEXT: v_or_b32_sdwa v30, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 +; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v28, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v26, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v62 -; VI-NEXT: v_or_b32_sdwa v28, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44 -; VI-NEXT: v_or_b32_sdwa v53, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v45 -; VI-NEXT: v_or_b32_sdwa v27, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42 -; VI-NEXT: v_or_b32_sdwa v52, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40 -; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v59, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v61 -; VI-NEXT: v_or_b32_sdwa v24, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v44, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v48, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48 -; VI-NEXT: v_or_b32_sdwa v24, v24, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24 +; VI-NEXT: v_or_b32_sdwa v27, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v23, v41, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v40, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v38, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38 -; VI-NEXT: v_or_b32_sdwa v23, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23 +; VI-NEXT: v_or_b32_sdwa v26, v61, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v22, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v34, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34 +; VI-NEXT: v_or_b32_sdwa v26, v26, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v50, v33, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v50 -; VI-NEXT: v_or_b32_sdwa v22, v22, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22 +; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v21, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v21 +; VI-NEXT: v_or_b32_sdwa v25, v25, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v54, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v24, v36, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v20, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v32, v32, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v32 +; VI-NEXT: v_or_b32_sdwa v24, v24, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v49, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49 -; VI-NEXT: v_or_b32_sdwa v20, v20, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20 -; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v23, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v19, v37, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v61, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v61 +; VI-NEXT: v_or_b32_sdwa v23, v23, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v22, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60 +; VI-NEXT: v_or_b32_sdwa v36, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36 +; VI-NEXT: v_or_b32_sdwa v22, v22, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v37, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v63, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v31, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v19, v19, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19 -; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v18, v32, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v38, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38 +; VI-NEXT: v_or_b32_sdwa v21, v63, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v57, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v57 -; VI-NEXT: v_or_b32_sdwa v18, v18, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18 +; VI-NEXT: v_or_b32_sdwa v20, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v39, v45, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v39 +; VI-NEXT: v_or_b32_sdwa v20, v20, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v19, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v48, v42, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v18, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55 +; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48 +; VI-NEXT: v_or_b32_sdwa v19, v19, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v62, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v62 +; VI-NEXT: v_or_b32_sdwa v18, v18, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v53 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v51 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v50 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v56, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v49, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49 +; VI-NEXT: v_or_b32_sdwa v15, v15, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v34, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34 -; VI-NEXT: v_or_b32_sdwa v14, v14, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51 +; VI-NEXT: v_or_b32_sdwa v14, v14, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v2 +; VI-NEXT: v_or_b32_sdwa v29, v29, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14 +; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v13, v59, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36 -; VI-NEXT: v_or_b32_sdwa v13, v13, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v26 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v52 -; VI-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54 -; VI-NEXT: v_or_b32_sdwa v21, v21, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v52, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v52 +; VI-NEXT: v_or_b32_sdwa v13, v13, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v44 +; VI-NEXT: v_or_b32_sdwa v28, v28, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13 -; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26 +; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51 -; VI-NEXT: v_or_b32_sdwa v12, v12, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v59 -; VI-NEXT: v_or_b32_sdwa v25, v25, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v54, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v54, vcc, 0x300, v54 +; VI-NEXT: v_or_b32_sdwa v12, v12, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12 -; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v33, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v50, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v40, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 -; VI-NEXT: v_or_b32_sdwa v30, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 -; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v2 +; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v53, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v9, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v41 -; VI-NEXT: v_or_b32_sdwa v9, v9, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v10 +; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42 +; VI-NEXT: v_or_b32_sdwa v9, v9, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v10 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x300, v55 -; VI-NEXT: v_or_b32_sdwa v10, v39, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v53 -; VI-NEXT: v_or_b32_sdwa v27, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v28, v29, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v29, v30, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v49, v16, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v10, v53, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v53, vcc, 0x300, v40 +; VI-NEXT: v_or_b32_sdwa v27, v27, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27 -; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28 -; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v8, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42 -; VI-NEXT: v_or_b32_sdwa v8, v8, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v11 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v40 -; VI-NEXT: v_or_b32_sdwa v11, v33, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v30, v31, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v17, v17, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v43, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v43 +; VI-NEXT: v_or_b32_sdwa v8, v8, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v11 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v41 +; VI-NEXT: v_or_b32_sdwa v17, v17, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v11, v50, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17 +; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v49 +; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v0 +; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v30, v30, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8 ; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v30 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v7, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v44, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v44, vcc, 0x300, v44 -; VI-NEXT: v_or_b32_sdwa v7, v7, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45 +; VI-NEXT: v_or_b32_sdwa v7, v7, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v6, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45 -; VI-NEXT: v_or_b32_sdwa v6, v6, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46 +; VI-NEXT: v_or_b32_sdwa v6, v6, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v5, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46 -; VI-NEXT: v_or_b32_sdwa v5, v5, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v47, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v47, vcc, 0x300, v47 +; VI-NEXT: v_or_b32_sdwa v5, v5, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4 -; VI-NEXT: v_or_b32_sdwa v4, v47, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v32 -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v4, v56, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x300, v4 ; VI-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v4 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v47, v32, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_e32 v47, s4, v47 +; VI-NEXT: v_add_u32_e32 v56, vcc, 3, v56 +; VI-NEXT: v_or_b32_sdwa v56, v57, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_e32 v56, s4, v56 ; VI-NEXT: s_and_b32 s4, s26, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s24, 0xff @@ -177958,35 +179837,26 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: s_or_b32 s8, s9, s8 ; VI-NEXT: s_and_b32 s9, s16, 0xff ; VI-NEXT: s_or_b32 s9, s10, s9 -; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v56 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_addk_i32 s7, 0x300 ; VI-NEXT: s_addk_i32 s9, 0x300 -; VI-NEXT: v_or_b32_sdwa v15, v15, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v32, v16, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: s_lshl_b32 s4, s4, 16 ; VI-NEXT: s_lshl_b32 s6, s6, 16 ; VI-NEXT: s_lshl_b32 s8, s8, 16 ; VI-NEXT: s_and_b32 s9, s9, 0xffff ; VI-NEXT: s_and_b32 s7, s7, 0xffff ; VI-NEXT: s_and_b32 s5, s5, 0xffff -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17 -; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v32 -; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v0 ; VI-NEXT: s_or_b32 s8, s8, s9 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_or_b32 s4, s4, s5 ; VI-NEXT: s_add_i32 s8, s8, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 -; VI-NEXT: v_or_b32_sdwa v31, v31, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v47 -; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15 +; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v56 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_mov_b32_e32 v2, s4 -; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31 -; VI-NEXT: .LBB93_3: ; %end +; VI-NEXT: .LBB93_5: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload @@ -178005,39 +179875,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] -; VI-NEXT: .LBB93_4: -; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v61, v60 -; VI-NEXT: v_mov_b32_e32 v60, v59 -; VI-NEXT: v_mov_b32_e32 v45, v62 -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v57, v5 -; VI-NEXT: v_mov_b32_e32 v47, v4 -; VI-NEXT: v_mov_b32_e32 v63, v3 -; VI-NEXT: v_mov_b32_e32 v53, v28 -; VI-NEXT: v_mov_b32_e32 v43, v27 -; VI-NEXT: v_mov_b32_e32 v55, v26 -; VI-NEXT: v_mov_b32_e32 v41, v24 -; VI-NEXT: v_mov_b32_e32 v54, v22 -; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; VI-NEXT: s_branch .LBB93_2 ; ; GFX9-LABEL: bitcast_v128i8_to_v64f16_scalar: ; GFX9: ; %bb.0: @@ -178058,31 +179895,36 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 ; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 ; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:104 ; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 ; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 @@ -178092,133 +179934,129 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 ; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 ; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 -; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v17 -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; GFX9-NEXT: s_waitcnt vmcnt(24) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v44, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v43, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v29 ; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v47 +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v56 +; GFX9-NEXT: s_waitcnt vmcnt(21) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v45 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v44 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v28 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v30 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v46 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 ; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 @@ -178226,148 +180064,149 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(5) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5 ; GFX9-NEXT: s_waitcnt vmcnt(7) ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v1 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:164 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v1 +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:108 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:244 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:124 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:132 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220 +; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244 ; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260 +; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268 +; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300 ; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(30) -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(33) -; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(36) -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(39) -; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(31) +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(35) +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(35) +; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB93_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false ; GFX9-NEXT: s_and_b32 s4, s28, 0xff @@ -178375,19 +180214,12 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff ; GFX9-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v2, v0, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX9-NEXT: v_or_b32_sdwa v2, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v4, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v6, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v8, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -178409,272 +180241,291 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 ; GFX9-NEXT: s_or_b32 s7, s7, s8 ; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v57, v5 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v2, v34, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v34, v35 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v39, v16 -; GFX9-NEXT: v_or_b32_sdwa v17, v34, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v36, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v46, v32 +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v42, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v55, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v17, v45, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v45, v59 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v53, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v52, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v50, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v49, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v48, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0 ; GFX9-NEXT: v_lshl_or_b32 v17, v17, 16, v1 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v55, v22 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v33, v45 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v18, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v47, v32 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v19, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v20, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v21, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v51, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v49, v39 +; GFX9-NEXT: v_mov_b32_e32 v59, v44 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v34, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v23, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_mov_b32_e32 v46, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v58, v50 +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v24, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v35, v45 -; GFX9-NEXT: v_mov_b32_e32 v45, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v42 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v54, v63 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v25, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v54, v2 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v29, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v29, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v27, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v36, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v29, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_mov_b32_e32 v57, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 -; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB93_3 ; GFX9-NEXT: .LBB93_2: -; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v33, v45 -; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v58, v50 +; GFX9-NEXT: v_mov_b32_e32 v45, v59 +; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v34, v35 +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v49, v39 +; GFX9-NEXT: v_mov_b32_e32 v55, v22 +; GFX9-NEXT: v_mov_b32_e32 v51, v5 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 +; GFX9-NEXT: v_mov_b32_e32 v46, v32 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB93_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB93_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_addk_i32 s4, 0x300 +; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s24, s24, 3 ; GFX9-NEXT: s_lshl_b32 s5, s25, 8 ; GFX9-NEXT: s_add_i32 s26, s26, 3 @@ -178687,61 +180538,55 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: s_lshl_b32 s9, s17, 8 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_lshl_b32 s10, s19, 8 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(15) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(14) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 +; GFX9-NEXT: s_waitcnt vmcnt(12) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(12) ; GFX9-NEXT: v_add_u32_e32 v25, 3, v25 -; GFX9-NEXT: s_waitcnt vmcnt(11) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v25, v37, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v37, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v25, v26, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v38, v38, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v16, 3, v16 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_add_u32_e32 v23, 3, v23 +; GFX9-NEXT: s_waitcnt vmcnt(8) +; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16 +; GFX9-NEXT: v_or_b32_sdwa v23, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 ; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: v_and_b32_e32 v3, s4, v3 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_and_b32 s4, s24, 0xff ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_and_b32 s5, s26, 0xff @@ -178753,8 +180598,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: s_and_b32 s8, s16, 0xff ; GFX9-NEXT: s_or_b32 s8, s9, s8 ; GFX9-NEXT: s_and_b32 s9, s18, 0xff -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_or_b32 s9, s10, s9 ; GFX9-NEXT: s_addk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s5, 0x300 @@ -178771,14 +180614,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -178786,9 +180629,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) @@ -178798,254 +180641,277 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v37, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37 +; GFX9-NEXT: s_waitcnt vmcnt(4) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v38, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v39, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v39, v36, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v48, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v18, 3, v18 +; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18 +; GFX9-NEXT: s_waitcnt vmcnt(4) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v48, v46, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v49, v35, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v16, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v47, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v51, v34, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v16, 3, v16 -; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16 ; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0 +; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31 +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 +; GFX9-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v49, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30 +; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v19, 3, v19 +; GFX9-NEXT: v_add_u32_e32 v26, 3, v58 +; GFX9-NEXT: v_or_b32_sdwa v19, v51, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19 +; GFX9-NEXT: v_and_b32_e32 v29, 0xffff, v29 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v51, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v45 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v24, 3, v24 -; GFX9-NEXT: v_add_u32_e32 v26, 3, v61 -; GFX9-NEXT: v_or_b32_sdwa v24, v54, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24 -; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48 -; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51 -; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v45 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v34 ; GFX9-NEXT: v_add_u32_e32 v20, 3, v20 -; GFX9-NEXT: v_or_b32_sdwa v20, v57, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v20, v35, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20 +; GFX9-NEXT: v_lshl_or_b32 v29, v34, 16, v29 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v56 +; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v21, 3, v21 -; GFX9-NEXT: v_or_b32_sdwa v21, v32, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v26, 3, v60 +; GFX9-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40 ; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21 ; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54 +; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20 ; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v23, 3, v23 -; GFX9-NEXT: v_add_u32_e32 v26, 3, v47 -; GFX9-NEXT: v_or_b32_sdwa v23, v41, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40 -; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v43 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v46 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v22, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v22, v44, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v22, v36, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v35, 0x300, v22 ; GFX9-NEXT: v_add_u32_e32 v22, 0x300, v52 -; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41 ; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20 ; GFX9-NEXT: v_lshl_or_b32 v28, v35, 16, v28 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v43 +; GFX9-NEXT: v_add_u32_e32 v24, 3, v24 +; GFX9-NEXT: v_or_b32_sdwa v24, v57, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42 +; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24 +; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48 +; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51 +; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41 +; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v43, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v52, 0x300, v43 -; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; GFX9-NEXT: v_mov_b32_e32 v0, s8 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v19 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v44, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v33 -; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1 -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44 +; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v45, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v27, 0x300, v23 ; GFX9-NEXT: v_add_u32_e32 v26, 0x300, v25 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2 ; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v38 ; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v50 ; GFX9-NEXT: v_add_u32_e32 v38, 0x300, v39 @@ -179057,33 +180923,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i ; GFX9-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; GFX9-NEXT: v_and_b32_e32 v26, 0xffff, v26 ; GFX9-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; GFX9-NEXT: v_mov_b32_e32 v2, s4 ; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v21 ; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v22 -; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23 ; GFX9-NEXT: v_lshl_or_b32 v24, v39, 16, v24 +; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v18 +; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23 ; GFX9-NEXT: v_lshl_or_b32 v25, v38, 16, v25 ; GFX9-NEXT: v_lshl_or_b32 v26, v37, 16, v26 ; GFX9-NEXT: v_lshl_or_b32 v27, v36, 16, v27 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v18, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18 -; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44 -; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v18 -; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v19, 3, v19 -; GFX9-NEXT: v_or_b32_sdwa v19, v60, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19 -; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42 -; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; GFX9-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v19 -; GFX9-NEXT: v_lshl_or_b32 v29, v34, 16, v29 ; GFX9-NEXT: .LBB93_5: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload @@ -182779,1037 +184626,1093 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v13 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v7 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v5 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v3 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v1 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v30 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v29 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v27 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v25 -; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 -; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v15 -; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 -; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v12 -; VI-NEXT: v_lshrrev_b32_e32 v62, 16, v11 -; VI-NEXT: v_lshrrev_b32_e32 v63, 16, v10 -; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v8 -; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v6 -; VI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v2 -; VI-NEXT: v_lshrrev_b32_e32 v56, 16, v28 -; VI-NEXT: v_lshrrev_b32_e32 v58, 16, v26 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v23 -; VI-NEXT: v_lshrrev_b32_e32 v59, 16, v22 -; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v21 -; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 -; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v19 -; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v18 -; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v17 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr52 -; VI-NEXT: ; implicit-def: $vgpr45 +; VI-NEXT: v_mov_b32_e32 v44, v12 +; VI-NEXT: v_mov_b32_e32 v12, v0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:4 +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v16 +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v43, v11 +; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v14 +; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; VI-NEXT: v_lshrrev_b32_e32 v62, 16, v4 +; VI-NEXT: v_mov_b32_e32 v32, v20 +; VI-NEXT: v_mov_b32_e32 v55, v22 +; VI-NEXT: v_mov_b32_e32 v54, v21 +; VI-NEXT: v_mov_b32_e32 v31, v19 +; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v44 +; VI-NEXT: ; implicit-def: $vgpr20 +; VI-NEXT: ; implicit-def: $vgpr57 +; VI-NEXT: ; implicit-def: $vgpr51 +; VI-NEXT: ; implicit-def: $vgpr41 +; VI-NEXT: ; implicit-def: $vgpr56 +; VI-NEXT: ; implicit-def: $vgpr63 +; VI-NEXT: ; implicit-def: $vgpr58 +; VI-NEXT: ; implicit-def: $vgpr21 +; VI-NEXT: ; implicit-def: $vgpr47 +; VI-NEXT: ; implicit-def: $vgpr22 +; VI-NEXT: ; implicit-def: $vgpr46 ; VI-NEXT: ; implicit-def: $vgpr42 -; VI-NEXT: s_waitcnt vmcnt(13) -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v61 -; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v60 -; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr45 +; VI-NEXT: ; implicit-def: $vgpr52 +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v32 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v15 +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v43 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v31 +; VI-NEXT: ; implicit-def: $vgpr8 +; VI-NEXT: ; implicit-def: $vgpr15 +; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v13 +; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v9 +; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr13 +; VI-NEXT: ; implicit-def: $vgpr9 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr5 +; VI-NEXT: ; implicit-def: $vgpr4 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr30 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v29 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v28 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr29 +; VI-NEXT: ; implicit-def: $vgpr28 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v27 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v26 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v17 +; VI-NEXT: ; implicit-def: $vgpr27 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v25 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v24 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v18 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v34 +; VI-NEXT: ; implicit-def: $vgpr25 +; VI-NEXT: ; implicit-def: $vgpr24 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v23 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v55 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v54 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr23 +; VI-NEXT: ; implicit-def: $vgpr34 +; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v33 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: ; kill: killed $vgpr0 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr10 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; VI-NEXT: ; implicit-def: $vgpr50 -; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr33 +; VI-NEXT: ; implicit-def: $vgpr61 ; VI-NEXT: ; implicit-def: $vgpr49 ; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr32 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr32 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr32 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr37 +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; VI-NEXT: s_cbranch_execz .LBB94_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v16 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v16 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v15 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v14 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v14 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v13 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v12 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v12 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v11 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v10 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v10 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v9 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v8 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v8 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v7 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v6 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v6 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v5 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v4 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v4 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v3 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v2 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v2 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v1 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v61 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v61 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v60 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v30 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v30 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v29 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v28 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v28 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v27 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v26 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v26 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v25 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v24 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v24 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v23 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v22 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v22 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v21 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v20 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v20 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v19 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v18 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v18 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v17 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[15:16] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[13:14] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[11:12] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[9:10] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[7:8] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v31, v33 -; VI-NEXT: v_mov_b32_e32 v33, v43 -; VI-NEXT: v_lshrrev_b64 v[42:43], 24, v[5:6] -; VI-NEXT: v_mov_b32_e32 v43, v33 -; VI-NEXT: v_mov_b32_e32 v33, v46 -; VI-NEXT: v_lshrrev_b64 v[45:46], 24, v[3:4] -; VI-NEXT: v_mov_b32_e32 v46, v33 -; VI-NEXT: v_mov_b32_e32 v33, v53 -; VI-NEXT: v_lshrrev_b64 v[52:53], 24, v[1:2] -; VI-NEXT: v_mov_b32_e32 v53, v33 -; VI-NEXT: v_lshrrev_b64 v[32:33], 24, v[60:61] -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[32:33], 24, v[29:30] -; VI-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28] -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v33, v36 -; VI-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] -; VI-NEXT: v_lshrrev_b64 v[49:50], 24, v[23:24] -; VI-NEXT: v_mov_b32_e32 v36, v33 -; VI-NEXT: v_mov_b32_e32 v33, v41 -; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[21:22] -; VI-NEXT: v_mov_b32_e32 v34, v51 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v56, v38 +; VI-NEXT: v_mov_b32_e32 v45, v7 +; VI-NEXT: v_mov_b32_e32 v63, v53 +; VI-NEXT: v_mov_b32_e32 v15, v3 +; VI-NEXT: v_mov_b32_e32 v28, v48 +; VI-NEXT: v_mov_b32_e32 v48, v16 +; VI-NEXT: v_mov_b32_e32 v16, v40 +; VI-NEXT: v_mov_b32_e32 v47, v39 +; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v32 +; VI-NEXT: v_lshrrev_b32_e32 v29, 24, v44 +; VI-NEXT: v_lshrrev_b32_e32 v5, 24, v32 +; VI-NEXT: v_lshrrev_b32_e32 v13, 24, v18 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 24, v1 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v1 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v38 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v37 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v44 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v43 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v62, v36 +; VI-NEXT: v_lshrrev_b32_e32 v41, 24, v38 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v11 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v10 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; VI-NEXT: v_lshrrev_b32_e32 v8, 24, v11 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshrrev_b32_e32 v23, 8, v6 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 24, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshrrev_b32_e32 v24, 8, v52 +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_lshrrev_b32_e32 v57, 24, v53 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v3 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; VI-NEXT: v_lshrrev_b32_e32 v20, 8, v53 +; VI-NEXT: v_lshrrev_b32_e32 v19, 8, v2 +; VI-NEXT: v_lshrrev_b32_e32 v25, 8, v3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v59 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v59 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v58 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v26 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshrrev_b32_e32 v14, 24, v27 +; VI-NEXT: v_lshrrev_b32_e32 v60, 8, v27 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v33 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v34 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: v_lshrrev_b32_e32 v42, 24, v34 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshrrev_b32_e32 v22, 8, v35 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v9, 24, v36 +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v31 +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v18 +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v61, 8, v17 +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[37:38] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[43:44] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[10:11] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[6:7] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[2:3] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[26:27] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[33:34] +; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v36 +; VI-NEXT: v_lshrrev_b64 v[37:38], 24, v[35:36] +; VI-NEXT: v_lshrrev_b64 v[10:11], 24, v[52:53] +; VI-NEXT: v_lshrrev_b64 v[52:53], 24, v[58:59] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[31:32] +; VI-NEXT: v_mov_b32_e32 v53, v63 +; VI-NEXT: v_mov_b32_e32 v27, v19 +; VI-NEXT: v_mov_b32_e32 v34, v14 +; VI-NEXT: v_lshrrev_b32_e32 v9, 24, v55 +; VI-NEXT: v_mov_b32_e32 v7, v45 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v31 +; VI-NEXT: v_mov_b32_e32 v3, v15 +; VI-NEXT: v_mov_b32_e32 v15, v29 +; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v17 +; VI-NEXT: v_mov_b32_e32 v38, v56 +; VI-NEXT: v_mov_b32_e32 v29, v41 +; VI-NEXT: v_mov_b32_e32 v45, v60 +; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v55 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v49 +; VI-NEXT: v_lshrrev_b32_e32 v4, 24, v50 +; VI-NEXT: v_lshrrev_b32_e32 v30, 8, v50 +; VI-NEXT: v_lshrrev_b32_e32 v51, 24, v40 +; VI-NEXT: v_lshrrev_b64 v[35:36], 24, v[49:50] +; VI-NEXT: v_lshrrev_b64 v[49:50], 24, v[39:40] +; VI-NEXT: v_mov_b32_e32 v58, v51 +; VI-NEXT: v_mov_b32_e32 v36, v62 +; VI-NEXT: v_lshrrev_b64 v[61:62], 24, v[54:55] ; VI-NEXT: v_lshrrev_b64 v[50:51], 24, v[17:18] -; VI-NEXT: v_mov_b32_e32 v41, v33 -; VI-NEXT: v_mov_b32_e32 v33, v31 -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[19:20] -; VI-NEXT: v_mov_b32_e32 v51, v34 +; VI-NEXT: v_lshrrev_b32_e32 v63, 8, v40 +; VI-NEXT: v_mov_b32_e32 v40, v16 +; VI-NEXT: v_mov_b32_e32 v16, v48 +; VI-NEXT: v_mov_b32_e32 v48, v28 +; VI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v39 +; VI-NEXT: v_mov_b32_e32 v39, v47 +; VI-NEXT: v_mov_b32_e32 v47, v4 +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v54 ; VI-NEXT: .LBB94_2: ; %Flow ; VI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; VI-NEXT: s_cbranch_execz .LBB94_4 ; VI-NEXT: ; %bb.3: ; %cmp.true ; VI-NEXT: v_mov_b32_e32 v63, 0x200 -; VI-NEXT: v_add_f16_sdwa v31, v18, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; VI-NEXT: v_add_f16_sdwa v21, v18, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v21 ; VI-NEXT: v_add_f16_e32 v18, 0x200, v18 -; VI-NEXT: v_or_b32_e32 v32, v18, v31 -; VI-NEXT: v_add_f16_sdwa v31, v17, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; VI-NEXT: v_add_f16_sdwa v20, v17, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_or_b32_e32 v58, v18, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v20 ; VI-NEXT: v_add_f16_e32 v17, 0x200, v17 -; VI-NEXT: v_or_b32_e32 v31, v17, v31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_sdwa v31, v20, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v20, 0x200, v20 -; VI-NEXT: v_or_b32_e32 v32, v20, v31 -; VI-NEXT: v_add_f16_sdwa v31, v19, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v19, 0x200, v19 -; VI-NEXT: v_or_b32_e32 v31, v19, v31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_sdwa v34, v22, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v34 -; VI-NEXT: v_add_f16_e32 v22, 0x200, v22 -; VI-NEXT: v_or_b32_e32 v32, v22, v31 -; VI-NEXT: v_add_f16_sdwa v31, v21, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v21, 0x200, v21 -; VI-NEXT: v_or_b32_e32 v31, v21, v31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_sdwa v31, v24, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v24, 0x200, v24 -; VI-NEXT: v_or_b32_e32 v32, v24, v31 -; VI-NEXT: v_add_f16_sdwa v31, v23, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v23, 0x200, v23 -; VI-NEXT: v_or_b32_e32 v31, v23, v31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_sdwa v31, v26, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v26, 0x200, v26 -; VI-NEXT: v_or_b32_e32 v36, v26, v31 -; VI-NEXT: v_add_f16_sdwa v31, v25, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; VI-NEXT: v_add_f16_sdwa v22, v32, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_or_b32_e32 v57, v17, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v22 +; VI-NEXT: v_add_f16_e32 v32, 0x200, v32 +; VI-NEXT: v_or_b32_e32 v15, v32, v0 +; VI-NEXT: v_add_f16_sdwa v0, v31, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_add_f16_e32 v31, 0x200, v31 +; VI-NEXT: v_add_f16_sdwa v23, v55, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_e32 v14, v31, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v23 +; VI-NEXT: v_add_f16_e32 v55, 0x200, v55 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_e32 v62, v55, v0 +; VI-NEXT: v_add_f16_sdwa v0, v54, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_add_f16_e32 v54, 0x200, v54 +; VI-NEXT: v_or_b32_e32 v61, v54, v0 +; VI-NEXT: v_mov_b32_e32 v26, v54 +; VI-NEXT: v_mov_b32_e32 v27, v55 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v60, v25, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v60 ; VI-NEXT: v_add_f16_e32 v25, 0x200, v25 -; VI-NEXT: v_or_b32_e32 v35, v25, v31 -; VI-NEXT: v_add_f16_sdwa v31, v28, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v28, 0x200, v28 -; VI-NEXT: v_or_b32_e32 v38, v28, v31 -; VI-NEXT: v_add_f16_sdwa v31, v27, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v27, 0x200, v27 -; VI-NEXT: v_or_b32_e32 v37, v27, v31 -; VI-NEXT: v_add_f16_sdwa v31, v30, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v30, 0x200, v30 -; VI-NEXT: v_add_f16_sdwa v32, v29, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_add_f16_e32 v29, 0x200, v29 -; VI-NEXT: v_or_b32_e32 v49, v30, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_or_b32_e32 v48, v29, v31 -; VI-NEXT: v_add_f16_sdwa v31, v61, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_e32 v61, 0x200, v61 -; VI-NEXT: v_add_f16_sdwa v32, v60, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v60, 0x200, v60 -; VI-NEXT: v_or_b32_e32 v51, v61, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_or_b32_e32 v50, v60, v31 -; VI-NEXT: v_add_f16_sdwa v31, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v34, v25, v0 +; VI-NEXT: v_add_f16_sdwa v0, v24, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v24, 0x200, v24 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_or_b32_e32 v33, v24, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v0, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; VI-NEXT: v_add_f16_e32 v2, 0x200, v2 -; VI-NEXT: v_add_f16_sdwa v32, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; VI-NEXT: v_or_b32_e32 v36, v2, v0 +; VI-NEXT: v_add_f16_sdwa v0, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_add_f16_e32 v1, 0x200, v1 -; VI-NEXT: v_or_b32_e32 v53, v2, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_or_b32_e32 v52, v1, v31 -; VI-NEXT: v_add_f16_sdwa v31, v4, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_e32 v4, 0x200, v4 -; VI-NEXT: v_add_f16_sdwa v32, v3, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_or_b32_e32 v35, v1, v0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v0, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_add_f16_e32 v2, 0x200, v2 +; VI-NEXT: v_or_b32_e32 v38, v2, v0 +; VI-NEXT: v_add_f16_sdwa v0, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v1, 0x200, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_or_b32_e32 v37, v1, v0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_f16_sdwa v1, v8, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v0, v9, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v9, 0x200, v9 +; VI-NEXT: v_add_f16_e32 v8, 0x200, v8 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_or_b32_e32 v49, v9, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v47, v3, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_add_f16_e32 v3, 0x200, v3 -; VI-NEXT: v_or_b32_e32 v46, v4, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_or_b32_e32 v45, v3, v31 -; VI-NEXT: v_add_f16_sdwa v31, v6, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: v_add_f16_sdwa v1, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v2, 0x200, v2 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v48, v8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v47 +; VI-NEXT: v_mov_b32_e32 v9, v31 +; VI-NEXT: v_add_f16_sdwa v8, v43, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v10, v32 +; VI-NEXT: v_add_f16_e32 v43, 0x200, v43 +; VI-NEXT: v_or_b32_e32 v51, v3, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 +; VI-NEXT: v_or_b32_e32 v50, v2, v0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_f16_sdwa v3, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v0, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; VI-NEXT: v_add_f16_e32 v2, 0x200, v2 +; VI-NEXT: v_add_f16_e32 v1, 0x200, v1 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v53, v2, v0 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; VI-NEXT: v_add_f16_sdwa v3, v44, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v44, 0x200, v44 +; VI-NEXT: v_or_b32_e32 v52, v1, v0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_f16_sdwa v59, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v0, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; VI-NEXT: v_add_f16_e32 v2, 0x200, v2 +; VI-NEXT: v_add_f16_e32 v1, 0x200, v1 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; VI-NEXT: v_or_b32_e32 v46, v2, v0 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v59 +; VI-NEXT: v_or_b32_e32 v45, v1, v0 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_f16_sdwa v1, v6, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v11, v7, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v7, 0x200, v7 ; VI-NEXT: v_add_f16_e32 v6, 0x200, v6 -; VI-NEXT: v_add_f16_sdwa v32, v5, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v5, 0x200, v5 -; VI-NEXT: v_or_b32_e32 v43, v6, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_add_f16_sdwa v44, v8, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v42, v5, v31 -; VI-NEXT: v_add_f16_e32 v8, 0x200, v8 -; VI-NEXT: v_add_f16_sdwa v32, v7, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v44 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v11 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v5, v7, v0 +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 +; VI-NEXT: v_or_b32_e32 v4, v6, v0 +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_f16_sdwa v39, v6, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_add_f16_sdwa v56, v7, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_add_f16_e32 v7, 0x200, v7 -; VI-NEXT: v_or_b32_e32 v41, v8, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_or_b32_e32 v40, v7, v31 -; VI-NEXT: v_add_f16_sdwa v31, v10, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; VI-NEXT: v_add_f16_e32 v10, 0x200, v10 -; VI-NEXT: v_add_f16_sdwa v32, v9, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; VI-NEXT: v_add_f16_e32 v9, 0x200, v9 -; VI-NEXT: v_or_b32_e32 v55, v10, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v32 -; VI-NEXT: v_add_f16_sdwa v39, v12, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_or_b32_e32 v54, v9, v31 -; VI-NEXT: v_add_f16_e32 v12, 0x200, v12 -; VI-NEXT: v_add_f16_sdwa v33, v11, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v39 -; VI-NEXT: v_add_f16_sdwa v47, v14, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v32, v12, v31 -; VI-NEXT: v_lshlrev_b32_e32 v31, 16, v33 -; VI-NEXT: v_add_f16_e32 v14, 0x200, v14 -; VI-NEXT: v_add_f16_sdwa v33, v13, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v56, 16, v47 -; VI-NEXT: v_or_b32_e32 v57, v14, v56 -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v56, 16, v33 -; VI-NEXT: v_add_f16_sdwa v33, v16, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_add_f16_e32 v16, 0x200, v16 -; VI-NEXT: v_add_f16_sdwa v63, v15, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v58, 16, v33 -; VI-NEXT: v_add_f16_e32 v15, 0x200, v15 -; VI-NEXT: v_or_b32_e32 v59, v16, v58 -; VI-NEXT: v_lshlrev_b32_e32 v58, 16, v63 -; VI-NEXT: v_or_b32_e32 v58, v15, v58 -; VI-NEXT: v_lshrrev_b32_e32 v62, 8, v59 -; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v62, 8, v58 -; VI-NEXT: v_lshrrev_b64 v[58:59], 24, v[58:59] -; VI-NEXT: v_add_f16_e32 v13, 0x200, v13 -; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v56, v13, v56 -; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v57 -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v56 -; VI-NEXT: v_lshrrev_b64 v[56:57], 24, v[56:57] -; VI-NEXT: v_add_f16_e32 v11, 0x200, v11 -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v31, v11, v31 -; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v32 -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v31 -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[31:32] -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v55 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v54 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[54:55] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v41 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v40 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[40:41] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v43 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v42 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v46 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v45 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v53 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v52 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v51 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v50 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[50:51] -; VI-NEXT: v_lshrrev_b64 v[42:43], 24, v[42:43] -; VI-NEXT: v_lshrrev_b64 v[45:46], 24, v[45:46] -; VI-NEXT: v_lshrrev_b64 v[52:53], 24, v[52:53] -; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v49 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v48 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[48:49] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v38 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v37 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[37:38] -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v36 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v35 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: v_add_f16_e32 v6, 0x200, v6 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v56 +; VI-NEXT: v_or_b32_e32 v41, v7, v0 +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v39 +; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_e32 v40, v6, v0 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_add_f16_sdwa v19, v24, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_add_f16_sdwa v42, v25, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v25, 0x200, v25 +; VI-NEXT: v_add_f16_e32 v24, 0x200, v24 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v42 +; VI-NEXT: v_or_b32_e32 v7, v25, v0 +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_add_f16_sdwa v28, v2, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v2, 0x200, v2 +; VI-NEXT: v_add_f16_sdwa v16, v1, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v1, 0x200, v1 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v19 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_add_f16_sdwa v13, v54, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v54, 0x200, v54 +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v25, 8, v46 +; VI-NEXT: v_or_b32_e32 v6, v24, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; VI-NEXT: v_or_b32_e32 v32, v44, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v8 +; VI-NEXT: v_or_b32_e32 v31, v43, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v28 +; VI-NEXT: v_or_b32_e32 v30, v2, v0 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_add_f16_sdwa v2, v55, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_f16_e32 v55, 0x200, v55 +; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v16 +; VI-NEXT: v_or_b32_e32 v29, v1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v24, 8, v4 +; VI-NEXT: v_lshrrev_b32_e32 v63, 8, v34 +; VI-NEXT: v_or_b32_e32 v1, v55, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v13 +; VI-NEXT: v_or_b32_e32 v0, v54, v0 +; VI-NEXT: v_lshrrev_b32_e32 v13, 8, v1 +; VI-NEXT: v_lshrrev_b32_e32 v54, 8, v0 +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[0:1] +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v30 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v29 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[29:30] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v32 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v31 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[31:32] +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v7 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v6 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[0:1], 24, v[6:7] +; VI-NEXT: v_mov_b32_e32 v32, v10 +; VI-NEXT: v_mov_b32_e32 v31, v9 +; VI-NEXT: v_lshrrev_b32_e32 v10, 8, v41 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v7, v11 +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[10:11], 24, v[40:41] +; VI-NEXT: v_mov_b32_e32 v55, v27 +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v54, v26 +; VI-NEXT: v_mov_b32_e32 v26, v20 +; VI-NEXT: v_lshrrev_b32_e32 v20, 8, v5 +; VI-NEXT: v_lshrrev_b64 v[10:11], 24, v[4:5] +; VI-NEXT: v_mov_b32_e32 v5, v22 +; VI-NEXT: v_mov_b32_e32 v13, v21 +; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[45:46] +; VI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v53 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v52 +; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[50:51] +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v50 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v48 +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[21:22], 24, v[48:49] +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v49 +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v30, 8, v36 +; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v35 ; VI-NEXT: v_lshrrev_b64 v[35:36], 24, v[35:36] -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v54, v39 -; VI-NEXT: v_mov_b32_e32 v37, v44 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_mov_b32_e32 v56, v58 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v49 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v48 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[49:50], 24, v[48:49] -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v48, v33 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v51 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v50 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[50:51] -; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v51 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v50 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[50:51] -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v51 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v50 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v33, 8, 8 -; VI-NEXT: v_mov_b32_e32 v33, v47 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v33, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v39, 8, 8 -; VI-NEXT: v_mov_b32_e32 v39, v63 -; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b64 v[50:51], 24, v[50:51] -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v36, v2 +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v15 +; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v62 +; VI-NEXT: v_lshrrev_b32_e32 v4, 8, v61 +; VI-NEXT: v_lshrrev_b64 v[61:62], 24, v[61:62] +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v2, 8, v14 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v48, v56 +; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v33 +; VI-NEXT: v_lshrrev_b64 v[49:50], 24, v[33:34] +; VI-NEXT: v_lshrrev_b64 v[33:34], 24, v[14:15] +; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v58 +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v57 +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v9, v23 +; VI-NEXT: v_lshrrev_b32_e32 v23, 8, v40 +; VI-NEXT: v_mov_b32_e32 v14, v8 +; VI-NEXT: v_mov_b32_e32 v40, v42 +; VI-NEXT: v_bfe_u32 v8, v42, 8, 8 +; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v38 +; VI-NEXT: v_lshrrev_b32_e32 v22, 8, v37 +; VI-NEXT: v_lshrrev_b64 v[37:38], 24, v[37:38] +; VI-NEXT: v_lshrrev_b32_e32 v27, 8, v45 +; VI-NEXT: v_lshrrev_b64 v[52:53], 24, v[52:53] +; VI-NEXT: v_lshrrev_b32_e32 v45, 8, v51 +; VI-NEXT: v_mov_b32_e32 v38, v28 +; VI-NEXT: v_lshrrev_b64 v[50:51], 24, v[57:58] +; VI-NEXT: v_bfe_u32 v28, v36, 8, 8 +; VI-NEXT: v_bfe_u32 v29, v38, 8, 8 +; VI-NEXT: v_mov_b32_e32 v53, v3 +; VI-NEXT: v_bfe_u32 v15, v3, 8, 8 +; VI-NEXT: v_mov_b32_e32 v3, v59 +; VI-NEXT: v_bfe_u32 v51, v48, 8, 8 +; VI-NEXT: v_bfe_u32 v57, v7, 8, 8 +; VI-NEXT: v_bfe_u32 v58, v60, 8, 8 ; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_bfe_u32 v32, v63, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v44, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v34, v62, 8, 8 +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_bfe_u32 v2, v2, 8, 8 +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; VI-NEXT: v_bfe_u32 v34, v47, 8, 8 +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v9, v9, 8, 8 +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v5, v5, 8, 8 +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v13, v13, 8, 8 +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_bfe_u32 v32, v47, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: v_bfe_u32 v42, v0, 8, 8 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v44, v32 -; VI-NEXT: v_bfe_u32 v32, v32, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_bfe_u32 v0, v0, 8, 8 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v55, v32 -; VI-NEXT: v_bfe_u32 v32, v32, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v36, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_bfe_u32 v32, v32, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v58, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v57, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v59, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v34, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v53, 8, 8 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v59 -; VI-NEXT: v_mov_b32_e32 v59, v34 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v32, v41, 8, 8 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v47, v0, 8, 8 ; VI-NEXT: .LBB94_4: ; %end ; VI-NEXT: s_or_b64 exec, exec, s[4:5] -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v34, 8, v32 -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v34, 8, v32 -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v2, v2, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b16_e32 v34, 8, v52 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v34, v32, v34 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 4, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v45 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 8, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v52 +; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v0, v12, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v44, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 12, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v42 +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 4, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 16, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v27 +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 20, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 8, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v25 ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 24, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 12, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v10 +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v24 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 16, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v20 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v57 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v7, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 20, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v23 +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 24, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v51 +; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v37, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 28, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 28, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 32, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 32, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v8 +; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 36, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 36, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v11, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 40, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 40, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v15 +; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v54, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 44, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 44, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v16, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v13, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 48, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v1, v14, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v33, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 52, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 48, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v29 +; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v15, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 52, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v15, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 56, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v48, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 60, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v50 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v17, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 64, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 56, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v28 +; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v41, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x44, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v31 -; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 60, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v50 +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v19, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x48, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 64, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v13 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v53, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x4c, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v40 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x44, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v33 +; VI-NEXT: v_or_b32_sdwa v0, v11, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x50, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x48, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v22, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v59, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x54, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v49 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x4c, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v4 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v61 +; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x58, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x50, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v41 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v9 +; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v57, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x5c, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v35 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x54, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v56 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v49 +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x60, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x58, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v63 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v58 +; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v26, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v58, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x64, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x5c, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v21 +; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v35 ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x68, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x60, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v30 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v47 +; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v56, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x6c, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x64, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v22 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v37 ; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x70, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x68, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v46 +; VI-NEXT: v_or_b32_sdwa v0, v29, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x74, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x6c, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v51, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_add_u32_e32 v2, vcc, 0x78, v0 -; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v3 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x70, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v42 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x74, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x7c, v0 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v0 +; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v2, v36, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x78, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v0, 8, v45 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v34 +; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x7c, v12 +; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload @@ -184818,55 +186721,55 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 @@ -184892,24 +186795,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -184921,24 +186824,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] @@ -185000,24 +186903,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -185029,24 +186932,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 ; GFX11-TRUE16-NEXT: .LBB94_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 @@ -185084,7 +186987,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h @@ -185092,15 +186995,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h @@ -185108,15 +187011,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h @@ -185124,15 +187027,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h @@ -185140,15 +187043,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h @@ -185156,15 +187059,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h @@ -185172,8 +187075,8 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 @@ -185188,15 +187091,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h @@ -185204,15 +187107,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h @@ -185220,15 +187123,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h @@ -185236,15 +187139,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h @@ -185252,15 +187155,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h @@ -185268,15 +187171,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h @@ -186028,23 +187931,24 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; SI-LABEL: bitcast_v64f16_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 @@ -186053,1830 +187957,1684 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:36 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v63, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72 +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v29, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v60, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 -; SI-NEXT: v_mov_b32_e32 v59, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v7 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v9 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v25 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v30 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: v_mov_b32_e32 v46, v29 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v61, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v31, v14 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v6, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v15 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v59, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v62, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v36 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v56 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v60 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v50 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v48, v52 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v11, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v51 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v51, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v53 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v53, v40 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v49, v54 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v40, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v11, s17 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v52, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s21 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v28, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v46, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v55 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_cvt_f16_f32_e32 v53, v40 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_cvt_f16_f32_e32 v55, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v17, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v37, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v22, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v30, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s28 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: v_writelane_b32 v63, s98, 34 +; SI-NEXT: v_writelane_b32 v63, s99, 35 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB95_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v13, v13, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v44 -; SI-NEXT: v_or_b32_e32 v55, v12, v11 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v17 -; SI-NEXT: v_or_b32_e32 v57, v16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v20 -; SI-NEXT: v_or_b32_e32 v17, v14, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v15 -; SI-NEXT: v_or_b32_e32 v16, v19, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v24 -; SI-NEXT: v_or_b32_e32 v19, v23, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v29 -; SI-NEXT: v_or_b32_e32 v47, v60, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_or_b32_e32 v43, v42, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v33 -; SI-NEXT: v_or_b32_e32 v14, v63, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v61 -; SI-NEXT: v_or_b32_e32 v42, v58, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32 -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v60, v12, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v3 -; SI-NEXT: v_or_b32_e32 v22, v2, v11 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 -; SI-NEXT: v_or_b32_e32 v12, v46, v12 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v24, v2, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 -; SI-NEXT: v_or_b32_e32 v4, v4, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_or_b32_e32 v34, v34, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v6 -; SI-NEXT: v_or_b32_e32 v3, v59, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_or_b32_e32 v59, v56, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v18 -; SI-NEXT: v_or_b32_e32 v6, v62, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v7 -; SI-NEXT: v_or_b32_e32 v62, v25, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v26 -; SI-NEXT: v_or_b32_e32 v2, v27, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v8 -; SI-NEXT: v_or_b32_e32 v25, v28, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30 -; SI-NEXT: v_or_b32_e32 v1, v36, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v5 -; SI-NEXT: v_or_b32_e32 v23, v35, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v39, v11 -; SI-NEXT: v_mov_b32_e32 v36, v2 -; SI-NEXT: v_mov_b32_e32 v35, v1 -; SI-NEXT: v_alignbit_b32 v1, v55, v13, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v55, v13, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v55, v13, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v57, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v57, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v57, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v21, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v21, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v21, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v47, v19, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v47, v19, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v47, v19, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v14, v43, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v14, v43, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v14, v43, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v60, v42, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v60, v42, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v60, v42, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v24, v22, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v24, v22, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v24, v22, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v34, v4, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v34, v4, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v34, v4, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v59, v3, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v59, v3, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v59, v3, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v26, 8, v34 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v38 -; SI-NEXT: v_or_b32_e32 v61, v50, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v49 -; SI-NEXT: v_or_b32_e32 v2, v48, v11 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v49, v6 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v62, v49, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v62, v49, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v62, v49, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v25, v36, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v25, v36, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v25, v36, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v23, v35, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v23, v35, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v23, v35, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v61, v18, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v61, v18, 16 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v48 -; SI-NEXT: v_or_b32_e32 v58, v54, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v51 -; SI-NEXT: v_or_b32_e32 v6, v53, v11 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v1, v61, v18, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v58, v2, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v1, v58, v2, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v55 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v47 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v14 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v60 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v59 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v62 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v25 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v23 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v37 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v61 -; SI-NEXT: v_or_b32_e32 v54, v40, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v58 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v54 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v44, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v20, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v15, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v29, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v33, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v32, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v31, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v10, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v9, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v8, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v5, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v38, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v48, 8, 8 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v41 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v37, 8, 8 -; SI-NEXT: v_or_b32_e32 v11, v45, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: s_or_b32 s44, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v12 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v17 +; SI-NEXT: s_or_b32 s45, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 4 +; SI-NEXT: v_writelane_b32 v62, s5, 5 +; SI-NEXT: s_lshr_b64 s[4:5], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 2 +; SI-NEXT: v_writelane_b32 v62, s5, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 0 +; SI-NEXT: v_writelane_b32 v62, s5, 1 +; SI-NEXT: v_readfirstlane_b32 s4, v38 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v37 +; SI-NEXT: s_or_b32 s42, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v48 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v22 +; SI-NEXT: s_or_b32 s43, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 10 +; SI-NEXT: v_writelane_b32 v62, s5, 11 +; SI-NEXT: s_lshr_b64 s[4:5], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 8 +; SI-NEXT: v_writelane_b32 v62, s5, 9 +; SI-NEXT: s_lshr_b64 s[4:5], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 6 +; SI-NEXT: v_writelane_b32 v62, s5, 7 +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_or_b32 s40, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v30 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v39 +; SI-NEXT: s_or_b32 s41, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 16 +; SI-NEXT: v_writelane_b32 v62, s5, 17 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 14 +; SI-NEXT: v_writelane_b32 v62, s5, 15 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 12 +; SI-NEXT: v_writelane_b32 v62, s5, 13 +; SI-NEXT: v_readfirstlane_b32 s4, v35 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v20 +; SI-NEXT: s_or_b32 s28, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v47 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_mov_b32_e32 v22, v2 +; SI-NEXT: v_mov_b32_e32 v39, v5 +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: v_readfirstlane_b32 s46, v55 +; SI-NEXT: v_mov_b32_e32 v17, v43 +; SI-NEXT: v_mov_b32_e32 v40, v34 +; SI-NEXT: v_mov_b32_e32 v41, v21 +; SI-NEXT: v_mov_b32_e32 v51, v42 +; SI-NEXT: s_lshr_b32 s71, s45, 8 +; SI-NEXT: s_lshr_b32 s70, s43, 8 +; SI-NEXT: s_lshr_b32 s69, s41, 8 +; SI-NEXT: v_bfe_u32 v38, v47, 8, 8 +; SI-NEXT: v_bfe_u32 v37, v33, 8, 8 +; SI-NEXT: v_bfe_u32 v35, v32, 8, 8 +; SI-NEXT: v_bfe_u32 v20, v10, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v9, 8, 8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v52, 8, 8 -; SI-NEXT: v_alignbit_b32 v28, v58, v2, 24 -; SI-NEXT: v_alignbit_b32 v2, v54, v6, 24 -; SI-NEXT: v_alignbit_b32 v39, v54, v6, 16 -; SI-NEXT: v_alignbit_b32 v40, v54, v6, 8 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 24 -; SI-NEXT: v_alignbit_b32 v56, v12, v11, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v30, v12, v11, 8 -; SI-NEXT: v_mov_b32_e32 v20, v29 -; SI-NEXT: v_mov_b32_e32 v15, v33 -; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v55, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: s_or_b32 s29, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 22 +; SI-NEXT: v_writelane_b32 v62, s5, 23 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 20 +; SI-NEXT: v_writelane_b32 v62, s5, 21 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 18 +; SI-NEXT: v_writelane_b32 v62, s5, 19 +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_mov_b32_e32 v2, v1 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_or_b32 s26, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v43 +; SI-NEXT: s_or_b32 s27, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 28 +; SI-NEXT: v_writelane_b32 v62, s5, 29 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 26 +; SI-NEXT: v_writelane_b32 v62, s5, 27 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 24 +; SI-NEXT: v_writelane_b32 v62, s5, 25 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshr_b32 s68, s29, 8 +; SI-NEXT: s_lshr_b32 s66, s27, 8 +; SI-NEXT: v_bfe_u32 v43, v31, 8, 8 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s24, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v32 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_bfe_u32 v15, v5, 8, 8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_or_b32 s25, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 34 +; SI-NEXT: v_writelane_b32 v62, s5, 35 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 32 +; SI-NEXT: v_writelane_b32 v62, s5, 33 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 30 +; SI-NEXT: v_writelane_b32 v62, s5, 31 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v29 +; SI-NEXT: s_or_b32 s22, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v6 +; SI-NEXT: s_or_b32 s23, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 40 +; SI-NEXT: v_writelane_b32 v62, s5, 41 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 38 +; SI-NEXT: v_writelane_b32 v62, s5, 39 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 36 +; SI-NEXT: v_writelane_b32 v62, s5, 37 +; SI-NEXT: v_readfirstlane_b32 s4, v58 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v13 +; SI-NEXT: s_or_b32 s20, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_or_b32 s21, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 44 +; SI-NEXT: v_writelane_b32 v62, s5, 45 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: v_mov_b32_e32 v58, v11 +; SI-NEXT: v_writelane_b32 v62, s4, 42 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_writelane_b32 v62, s5, 43 +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v7, v29 +; SI-NEXT: v_mov_b32_e32 v29, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s4, v34 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_or_b32 s18, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v61 +; SI-NEXT: s_or_b32 s19, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v21 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v24 +; SI-NEXT: s_or_b32 s16, s5, s4 +; SI-NEXT: v_mov_b32_e32 v1, v53 +; SI-NEXT: v_mov_b32_e32 v34, v61 +; SI-NEXT: v_mov_b32_e32 v21, v24 +; SI-NEXT: s_lshr_b32 s64, s25, 8 +; SI-NEXT: s_lshr_b32 s54, s23, 8 +; SI-NEXT: s_lshr_b32 s52, s21, 8 +; SI-NEXT: s_lshr_b32 s50, s19, 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[96:97], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 8 +; SI-NEXT: v_mov_b32_e32 v13, v12 +; SI-NEXT: v_bfe_u32 v24, v12, 8, 8 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_bfe_u32 v48, v48, 8, 8 +; SI-NEXT: v_bfe_u32 v61, v59, 8, 8 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_readfirstlane_b32 s4, v11 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_bfe_u32 v18, v11, 8, 8 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: s_or_b32 s17, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v44 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v42 +; SI-NEXT: s_or_b32 s14, s5, s4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s4, v6 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v46 +; SI-NEXT: s_or_b32 s15, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v8 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v23 +; SI-NEXT: s_or_b32 s12, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v3 +; SI-NEXT: s_or_b32 s13, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v27 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v28 +; SI-NEXT: s_or_b32 s10, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v4 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v45 +; SI-NEXT: s_or_b32 s11, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v36 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v57 +; SI-NEXT: s_or_b32 s8, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v59 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v56 +; SI-NEXT: s_or_b32 s9, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v26 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v25 +; SI-NEXT: s_or_b32 s6, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v52 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v50 +; SI-NEXT: s_or_b32 s7, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v49 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v54 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v53 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_or_b32 s5, s46, s5 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 24 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 48 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 49 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 8 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 46 +; SI-NEXT: s_lshr_b32 s48, s17, 8 +; SI-NEXT: s_lshr_b32 s67, s15, 8 +; SI-NEXT: s_lshr_b32 s65, s13, 8 +; SI-NEXT: s_lshr_b32 s55, s11, 8 +; SI-NEXT: s_lshr_b32 s53, s9, 8 +; SI-NEXT: s_lshr_b32 s51, s7, 8 +; SI-NEXT: s_lshr_b32 s49, s5, 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[82:83], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[86:87], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 47 +; SI-NEXT: s_mov_b64 vcc, 0 +; SI-NEXT: v_mov_b32_e32 v57, v30 +; SI-NEXT: v_bfe_u32 v50, v30, 8, 8 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v46, v33 +; SI-NEXT: v_mov_b32_e32 v30, v32 ; SI-NEXT: v_mov_b32_e32 v32, v31 ; SI-NEXT: v_mov_b32_e32 v31, v10 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v9, v7 -; SI-NEXT: v_bfe_u32 v29, v7, 8, 8 -; SI-NEXT: v_mov_b32_e32 v7, v8 -; SI-NEXT: v_mov_b32_e32 v8, v5 -; SI-NEXT: v_mov_b32_e32 v44, v37 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v54, v9 +; SI-NEXT: v_bfe_u32 v42, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v45, v4, 8, 8 +; SI-NEXT: v_bfe_u32 v47, v52, 8, 8 +; SI-NEXT: v_bfe_u32 v33, v1, 8, 8 +; SI-NEXT: v_mov_b32_e32 v3, v14 +; SI-NEXT: v_mov_b32_e32 v25, v59 +; SI-NEXT: v_mov_b32_e32 v1, v52 +; SI-NEXT: v_mov_b32_e32 v44, v11 ; SI-NEXT: s_branch .LBB95_3 ; SI-NEXT: .LBB95_2: -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: v_mov_b32_e32 v20, v29 -; SI-NEXT: v_mov_b32_e32 v15, v33 -; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v7, v29 +; SI-NEXT: v_mov_b32_e32 v29, v6 +; SI-NEXT: v_mov_b32_e32 v39, v5 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: v_mov_b32_e32 v51, v42 +; SI-NEXT: v_writelane_b32 v62, s4, 0 +; SI-NEXT: v_writelane_b32 v62, s5, 1 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v41, v21 +; SI-NEXT: v_writelane_b32 v62, s4, 2 +; SI-NEXT: v_writelane_b32 v62, s5, 3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v21, v24 +; SI-NEXT: v_writelane_b32 v62, s4, 4 +; SI-NEXT: v_writelane_b32 v62, s5, 5 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v40, v34 +; SI-NEXT: v_writelane_b32 v62, s4, 6 +; SI-NEXT: v_writelane_b32 v62, s5, 7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v34, v61 +; SI-NEXT: v_writelane_b32 v62, s4, 8 +; SI-NEXT: v_writelane_b32 v62, s5, 9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v13, v12 +; SI-NEXT: v_writelane_b32 v62, s4, 10 +; SI-NEXT: v_writelane_b32 v62, s5, 11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_writelane_b32 v62, s4, 12 +; SI-NEXT: v_writelane_b32 v62, s5, 13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v57, v30 +; SI-NEXT: v_writelane_b32 v62, s4, 14 +; SI-NEXT: v_writelane_b32 v62, s5, 15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v58, v11 +; SI-NEXT: v_writelane_b32 v62, s4, 16 +; SI-NEXT: v_writelane_b32 v62, s5, 17 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_writelane_b32 v62, s4, 18 +; SI-NEXT: v_writelane_b32 v62, s5, 19 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v46, v33 +; SI-NEXT: v_writelane_b32 v62, s4, 20 +; SI-NEXT: v_writelane_b32 v62, s5, 21 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v30, v32 +; SI-NEXT: v_writelane_b32 v62, s4, 22 +; SI-NEXT: v_writelane_b32 v62, s5, 23 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: v_mov_b32_e32 v32, v31 +; SI-NEXT: v_writelane_b32 v62, s4, 24 +; SI-NEXT: v_writelane_b32 v62, s5, 25 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: v_mov_b32_e32 v31, v10 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v9, v7 -; SI-NEXT: v_mov_b32_e32 v7, v8 -; SI-NEXT: v_mov_b32_e32 v8, v5 -; SI-NEXT: v_mov_b32_e32 v44, v37 -; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: v_writelane_b32 v62, s4, 26 +; SI-NEXT: v_writelane_b32 v62, s5, 27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v54, v9 +; SI-NEXT: v_writelane_b32 v62, s4, 28 +; SI-NEXT: v_writelane_b32 v62, s5, 29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v4 +; SI-NEXT: v_writelane_b32 v62, s4, 30 +; SI-NEXT: v_writelane_b32 v62, s5, 31 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v22, v2 +; SI-NEXT: v_writelane_b32 v62, s4, 32 +; SI-NEXT: v_writelane_b32 v62, s5, 33 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v2, v1 +; SI-NEXT: v_writelane_b32 v62, s4, 34 +; SI-NEXT: v_writelane_b32 v62, s5, 35 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v17, v43 +; SI-NEXT: v_writelane_b32 v62, s4, 36 +; SI-NEXT: v_writelane_b32 v62, s5, 37 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_mov_b64 vcc, -1 +; SI-NEXT: v_writelane_b32 v62, s4, 38 +; SI-NEXT: v_writelane_b32 v62, s5, 39 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v25, v59 +; SI-NEXT: v_writelane_b32 v62, s4, 40 +; SI-NEXT: v_writelane_b32 v62, s5, 41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v1, v52 +; SI-NEXT: v_writelane_b32 v62, s4, 42 +; SI-NEXT: v_writelane_b32 v62, s5, 43 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr71 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr64 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: v_writelane_b32 v62, s4, 44 +; SI-NEXT: v_writelane_b32 v62, s5, 45 +; SI-NEXT: v_writelane_b32 v62, s80, 46 +; SI-NEXT: v_writelane_b32 v62, s81, 47 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v62, s80, 48 +; SI-NEXT: v_writelane_b32 v62, s81, 49 +; SI-NEXT: ; implicit-def: $sgpr80 ; SI-NEXT: .LBB95_3: ; %Flow -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, v44 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v5, v8 -; SI-NEXT: v_mov_b32_e32 v6, v7 -; SI-NEXT: v_mov_b32_e32 v7, v9 -; SI-NEXT: v_mov_b32_e32 v8, v10 -; SI-NEXT: v_mov_b32_e32 v9, v31 -; SI-NEXT: v_mov_b32_e32 v31, v33 -; SI-NEXT: v_mov_b32_e32 v44, v15 -; SI-NEXT: v_mov_b32_e32 v33, v20 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v50, v2 -; SI-NEXT: v_mov_b32_e32 v53, v40 -; SI-NEXT: v_mov_b32_e32 v40, v28 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_mov_b32_e32 v2, v48 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: v_mov_b32_e32 v11, v27 -; SI-NEXT: v_mov_b32_e32 v38, v30 -; SI-NEXT: v_mov_b32_e32 v27, v52 -; SI-NEXT: v_mov_b32_e32 v30, v29 -; SI-NEXT: v_mov_b32_e32 v29, v26 +; SI-NEXT: v_mov_b32_e32 v14, v17 +; SI-NEXT: v_mov_b32_e32 v17, v39 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, vcc +; SI-NEXT: v_mov_b32_e32 v23, v2 +; SI-NEXT: v_mov_b32_e32 v59, v34 +; SI-NEXT: v_mov_b32_e32 v2, v25 ; SI-NEXT: s_cbranch_vccnz .LBB95_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v52, v29 +; SI-NEXT: v_mov_b32_e32 v29, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v18, v14 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mov_b32_e32 v15, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v55 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v4 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v31 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v2 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f32_f16_e32 v35, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v8 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v11, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v31, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v27 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v34 -; SI-NEXT: v_or_b32_e32 v56, v12, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v11 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v13, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_bfe_u32 v50, v57, 8, 8 +; SI-NEXT: v_bfe_u32 v48, v12, 8, 8 +; SI-NEXT: v_bfe_u32 v43, v32, 8, 8 +; SI-NEXT: v_bfe_u32 v24, v13, 8, 8 +; SI-NEXT: v_bfe_u32 v20, v31, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v54, 8, 8 +; SI-NEXT: v_bfe_u32 v42, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v45, v55, 8, 8 +; SI-NEXT: v_bfe_u32 v61, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v33, v53, 8, 8 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s4, v14 +; SI-NEXT: s_lshl_b32 s4, s4, 16 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v44 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v7 +; SI-NEXT: v_readfirstlane_b32 s5, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v53 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_or_b32_e32 v36, v14, v13 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s49, s5, 8 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v14, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_or_b32_e32 v54, v14, v16 +; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v16, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s7, v14 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readfirstlane_b32 s7, v3 +; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_or_b32_e32 v52, v17, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s8, v14 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_lshr_b64 s[82:83], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[86:87], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s51, s7, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_or_b32_e32 v58, v17, v19 -; SI-NEXT: v_alignbit_b32 v40, v58, v52, 24 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s8, v14 +; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v19, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readfirstlane_b32 s9, v2 +; SI-NEXT: s_lshl_b32 s9, s9, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_or_b32_e32 v11, v21, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s10, v14 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b32 s53, s9, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v4, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v13 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v27, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v61, v21, v22 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s10, v14 +; SI-NEXT: s_lshl_b32 s10, s10, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v16, v23, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s11, v14 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readfirstlane_b32 s11, v55 +; SI-NEXT: s_lshl_b32 s11, s11, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s12, v14 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b32 s55, s11, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v24, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s12, v14 +; SI-NEXT: s_lshl_b32 s12, s12, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v26, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_or_b32_e32 v48, v25, v24 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s13, v14 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readfirstlane_b32 s13, v5 +; SI-NEXT: s_lshl_b32 s13, s13, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v26, v3 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_or_b32_e32 v25, v26, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_or_b32_e32 v53, v26, v27 -; SI-NEXT: v_mov_b32_e32 v26, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v28, v4 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v62, v28, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s14, v14 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_lshr_b64 s[78:79], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[12:13], 8 +; SI-NEXT: s_lshr_b32 s65, s13, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v29, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v27, v34 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v8 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v63 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_or_b32_e32 v59, v29, v34 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v35 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v29, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v51 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v30, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_or_b32_e32 v3, v30, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v44 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_or_b32_e32 v4, v34, v30 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v32 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_mov_b32_e32 v30, v10 -; SI-NEXT: v_mov_b32_e32 v32, v30 -; SI-NEXT: v_or_b32_e32 v34, v35, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v46 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_or_b32_e32 v22, v35, v36 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v10 -; SI-NEXT: v_mov_b32_e32 v35, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v51 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s15, v14 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readfirstlane_b32 s15, v6 +; SI-NEXT: s_lshl_b32 s15, s15, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v24, v37, v36 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v38 -; SI-NEXT: v_or_b32_e32 v42, v39, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v45 -; SI-NEXT: v_mov_b32_e32 v36, v48 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s16, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v41 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: s_lshr_b64 s[92:93], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[14:15], 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshr_b64 s[30:31], s[14:15], 8 +; SI-NEXT: s_lshr_b32 s67, s15, 8 +; SI-NEXT: v_readfirstlane_b32 s16, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v21 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s17, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readfirstlane_b32 s17, v44 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s18, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v40 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s48, s17, 8 +; SI-NEXT: v_readfirstlane_b32 s18, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v60 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s19, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v59 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: v_readfirstlane_b32 s19, v54 +; SI-NEXT: s_lshl_b32 s19, s19, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s20, v14 +; SI-NEXT: s_or_b32 s19, s20, s19 +; SI-NEXT: s_lshr_b64 s[96:97], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 8 +; SI-NEXT: s_lshr_b32 s50, s19, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v13 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v60, v37, v39 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v49 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_alignbit_b32 v39, v54, v29, 16 -; SI-NEXT: v_or_b32_e32 v43, v48, v37 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v44 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s20, v14 +; SI-NEXT: s_lshl_b32 s20, s20, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v14, v49, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v28, v14, v43, 8 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s21, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v58 +; SI-NEXT: s_or_b32 s20, s21, s20 +; SI-NEXT: v_readfirstlane_b32 s21, v31 +; SI-NEXT: s_lshl_b32 s21, s21, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s22, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v39 +; SI-NEXT: s_or_b32 s21, s22, s21 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 8 +; SI-NEXT: s_lshr_b32 s52, s21, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s22, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v29 +; SI-NEXT: s_lshl_b32 s22, s22, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s23, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v52 +; SI-NEXT: s_or_b32 s22, s23, s22 +; SI-NEXT: v_readfirstlane_b32 s23, v32 +; SI-NEXT: s_lshl_b32 s23, s23, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s24, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v17 +; SI-NEXT: s_or_b32 s23, s24, s23 +; SI-NEXT: s_lshr_b32 s54, s23, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s24, v14 +; SI-NEXT: s_lshl_b32 s24, s24, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_or_b32_e32 v19, v48, v37 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v33 -; SI-NEXT: v_or_b32_e32 v47, v49, v37 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s25, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v30 +; SI-NEXT: s_or_b32 s24, s25, s24 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v14 +; SI-NEXT: v_readfirstlane_b32 s25, v30 +; SI-NEXT: s_lshl_b32 s25, s25, 16 +; SI-NEXT: v_bfe_u32 v35, v30, 8, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v48 -; SI-NEXT: v_or_b32_e32 v21, v50, v37 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_or_b32_e32 v16, v37, v49 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_alignbit_b32 v50, v54, v29, 24 -; SI-NEXT: v_or_b32_e32 v57, v48, v37 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v18 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_or_b32_e32 v17, v49, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_mov_b32_e32 v49, v53 -; SI-NEXT: v_alignbit_b32 v53, v54, v29, 8 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_or_b32_e32 v13, v48, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v20 -; SI-NEXT: v_or_b32_e32 v55, v51, v37 -; SI-NEXT: v_alignbit_b32 v10, v55, v13, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v55, v13, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v17, v57, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v17, v57, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v17, v57, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v16, v21, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v16, v21, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v16, v21, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v47, v19, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v47, v19, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v47, v19, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v14, v43, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v14, v43, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v60, v42, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v60, v42, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v60, v42, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v24, v22, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v24, v22, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v24, v22, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v34, v4, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v34, v4, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v34, v4, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v59, v3, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v59, v3, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v59, v3, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v62, v49, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v62, v49, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v62, v49, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v25, v36, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v25, v36, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v25, v36, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v23, v35, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v23, v35, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v23, v35, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v61, v11, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v61, v11, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v10, v61, v11, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v58, v52, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v10, v58, v52, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v10, v56 -; SI-NEXT: v_alignbit_b32 v11, v12, v10, 24 -; SI-NEXT: v_alignbit_b32 v56, v12, v10, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v38, v12, v10, 8 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v55 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v17 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v47 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v14 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v60 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v59 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v62 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v23 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v61 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v58 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v54 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v12 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v20, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v18, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v15, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v33, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v44, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v31, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v30, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v9, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v8, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v6, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v5, 8, 8 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_bfe_u32 v10, v26, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v2, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v1, 8, 8 -; SI-NEXT: v_alignbit_b32 v48, v55, v13, 24 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 -; SI-NEXT: v_bfe_u32 v30, v7, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v27, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: .LBB95_5: ; %end -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v13 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v52, 24, v48 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v37, v37, v51 -; SI-NEXT: v_and_b32_e32 v37, 0xffff, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v51, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_or_b32_e32 v51, v52, v51 -; SI-NEXT: v_or_b32_e32 v37, v37, v51 -; SI-NEXT: buffer_store_dword v37, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v55 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v37, v37, v51 -; SI-NEXT: v_and_b32_e32 v37, 0xffff, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v10 -; SI-NEXT: v_or_b32_e32 v20, v48, v20 -; SI-NEXT: v_or_b32_e32 v20, v37, v20 -; SI-NEXT: v_add_i32_e32 v37, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v20, v37, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v57 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v20, v20, v37 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v10 -; SI-NEXT: v_or_b32_e32 v37, v48, v37 -; SI-NEXT: v_or_b32_e32 v20, v20, v37 -; SI-NEXT: v_add_i32_e32 v37, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v20, v37, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v20, v20, v37 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v37, v18 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v18, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v10 -; SI-NEXT: v_or_b32_e32 v20, v37, v20 -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v18, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v22 +; SI-NEXT: s_or_b32 s25, s26, s25 +; SI-NEXT: s_lshr_b32 s64, s25, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v23 +; SI-NEXT: s_lshl_b32 s26, s26, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s27, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v46 +; SI-NEXT: s_or_b32 s26, s27, s26 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v18 +; SI-NEXT: v_bfe_u32 v18, v44, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s27, v46 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s27, s27, 16 +; SI-NEXT: v_bfe_u32 v37, v46, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s28, v14 +; SI-NEXT: s_or_b32 s27, s28, s27 +; SI-NEXT: s_lshr_b32 s66, s27, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v15, v20, v15 -; SI-NEXT: v_or_b32_e32 v15, v18, v15 -; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s28, v14 +; SI-NEXT: s_lshl_b32 s28, s28, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s29, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v56 +; SI-NEXT: s_or_b32 s28, s29, s28 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v15 +; SI-NEXT: v_bfe_u32 v15, v5, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s29, v56 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s29, s29, 16 +; SI-NEXT: v_bfe_u32 v38, v56, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s40, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s29, s40, s29 +; SI-NEXT: s_lshr_b32 s68, s29, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s40, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s40, s40, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s41, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s40, s41, s40 +; SI-NEXT: v_readfirstlane_b32 s41, v57 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s41, s41, 16 +; SI-NEXT: v_readfirstlane_b32 s42, v14 +; SI-NEXT: s_or_b32 s41, s42, s41 +; SI-NEXT: s_lshr_b32 s69, s41, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s42, v14 +; SI-NEXT: s_lshl_b32 s42, s42, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v28 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s43, v14 +; SI-NEXT: s_or_b32 s42, s43, s42 +; SI-NEXT: v_readfirstlane_b32 s43, v12 +; SI-NEXT: s_lshl_b32 s43, s43, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s44, v14 +; SI-NEXT: s_or_b32 s43, s44, s43 +; SI-NEXT: s_lshr_b32 s70, s43, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s44, v14 +; SI-NEXT: s_lshl_b32 s44, s44, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_bfe_u32 v47, v1, 8, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s45, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s44, s45, s44 +; SI-NEXT: v_readfirstlane_b32 s45, v13 +; SI-NEXT: s_lshl_b32 s45, s45, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s46, v14 +; SI-NEXT: s_or_b32 s45, s46, s45 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 4 +; SI-NEXT: v_writelane_b32 v62, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 2 +; SI-NEXT: v_writelane_b32 v62, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 0 +; SI-NEXT: v_writelane_b32 v62, s47, 1 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 10 +; SI-NEXT: v_writelane_b32 v62, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 8 +; SI-NEXT: v_writelane_b32 v62, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 6 +; SI-NEXT: v_writelane_b32 v62, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 16 +; SI-NEXT: v_writelane_b32 v62, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 14 +; SI-NEXT: v_writelane_b32 v62, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 12 +; SI-NEXT: v_writelane_b32 v62, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 22 +; SI-NEXT: v_writelane_b32 v62, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 20 +; SI-NEXT: v_writelane_b32 v62, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 18 +; SI-NEXT: v_writelane_b32 v62, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 28 +; SI-NEXT: v_writelane_b32 v62, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 26 +; SI-NEXT: v_writelane_b32 v62, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 24 +; SI-NEXT: v_writelane_b32 v62, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 34 +; SI-NEXT: v_writelane_b32 v62, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 32 +; SI-NEXT: v_writelane_b32 v62, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 30 +; SI-NEXT: v_writelane_b32 v62, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 40 +; SI-NEXT: v_writelane_b32 v62, s47, 41 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 38 +; SI-NEXT: v_writelane_b32 v62, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 36 +; SI-NEXT: v_writelane_b32 v62, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[20:21], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 44 +; SI-NEXT: v_writelane_b32 v62, s47, 45 +; SI-NEXT: s_lshr_b64 s[46:47], s[20:21], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 42 +; SI-NEXT: v_writelane_b32 v62, s47, 43 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 48 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 49 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 8 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 46 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 47 +; SI-NEXT: s_lshr_b32 s71, s45, 8 +; SI-NEXT: .LBB95_5: ; %end +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 0 +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 1 +; SI-NEXT: s_lshl_b32 s47, vcc_lo, 8 +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 2 +; SI-NEXT: s_and_b32 s44, s44, 0xff +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 3 +; SI-NEXT: s_or_b32 s44, s44, s47 +; SI-NEXT: s_and_b32 s47, vcc_lo, 0xff +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 4 +; SI-NEXT: s_lshl_b32 s57, vcc_lo, 24 +; SI-NEXT: s_lshl_b32 s47, s47, 16 +; SI-NEXT: s_or_b32 s47, s57, s47 +; SI-NEXT: s_and_b32 s44, s44, 0xffff +; SI-NEXT: s_or_b32 s44, s44, s47 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v16, s44 +; SI-NEXT: s_and_b32 s44, s45, 0xff +; SI-NEXT: s_lshl_b32 s45, s71, 8 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: s_or_b32 s44, s44, s45 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v24 +; SI-NEXT: s_and_b32 s44, s44, 0xffff +; SI-NEXT: v_or_b32_e32 v13, v21, v13 +; SI-NEXT: v_or_b32_e32 v13, s44, v13 +; SI-NEXT: v_readlane_b32 s44, v62, 6 +; SI-NEXT: v_readlane_b32 s45, v62, 7 +; SI-NEXT: s_lshl_b32 s44, s44, 8 +; SI-NEXT: s_and_b32 s42, s42, 0xff +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 5 +; SI-NEXT: s_or_b32 s42, s42, s44 +; SI-NEXT: v_readlane_b32 s44, v62, 8 +; SI-NEXT: v_readlane_b32 s45, v62, 9 +; SI-NEXT: s_and_b32 s44, s44, 0xff +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 10 +; SI-NEXT: s_lshl_b32 s45, vcc_lo, 24 +; SI-NEXT: s_lshl_b32 s44, s44, 16 +; SI-NEXT: s_or_b32 s44, s45, s44 +; SI-NEXT: s_and_b32 s42, s42, 0xffff +; SI-NEXT: s_or_b32 s42, s42, s44 +; SI-NEXT: v_mov_b32_e32 v21, s42 +; SI-NEXT: s_and_b32 s42, s43, 0xff +; SI-NEXT: s_lshl_b32 s43, s70, 8 +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 +; SI-NEXT: s_or_b32 s42, s42, s43 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v48 +; SI-NEXT: s_and_b32 s42, s42, 0xffff +; SI-NEXT: v_or_b32_e32 v12, v23, v12 +; SI-NEXT: v_or_b32_e32 v12, s42, v12 +; SI-NEXT: v_readlane_b32 s42, v62, 12 +; SI-NEXT: v_readlane_b32 s43, v62, 13 +; SI-NEXT: s_lshl_b32 s42, s42, 8 +; SI-NEXT: s_and_b32 s40, s40, 0xff +; SI-NEXT: s_or_b32 s40, s40, s42 +; SI-NEXT: v_readlane_b32 s42, v62, 14 +; SI-NEXT: v_readlane_b32 s43, v62, 15 +; SI-NEXT: s_and_b32 s42, s42, 0xff +; SI-NEXT: v_readlane_b32 s44, v62, 16 +; SI-NEXT: s_lshl_b32 s43, s44, 24 +; SI-NEXT: s_lshl_b32 s42, s42, 16 +; SI-NEXT: s_or_b32 s42, s43, s42 +; SI-NEXT: s_and_b32 s40, s40, 0xffff +; SI-NEXT: s_or_b32 s40, s40, s42 +; SI-NEXT: v_mov_b32_e32 v23, s40 +; SI-NEXT: s_and_b32 s40, s41, 0xff +; SI-NEXT: s_lshl_b32 s41, s69, 8 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v57 +; SI-NEXT: s_or_b32 s40, s40, s41 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v50 +; SI-NEXT: s_and_b32 s40, s40, 0xffff +; SI-NEXT: v_or_b32_e32 v11, v24, v11 +; SI-NEXT: v_or_b32_e32 v11, s40, v11 +; SI-NEXT: v_readlane_b32 s40, v62, 18 +; SI-NEXT: v_readlane_b32 s41, v62, 19 +; SI-NEXT: s_lshl_b32 s40, s40, 8 +; SI-NEXT: s_and_b32 s28, s28, 0xff +; SI-NEXT: s_or_b32 s28, s28, s40 +; SI-NEXT: v_readlane_b32 s40, v62, 20 +; SI-NEXT: v_readlane_b32 s41, v62, 21 +; SI-NEXT: s_and_b32 s40, s40, 0xff +; SI-NEXT: v_readlane_b32 s42, v62, 22 +; SI-NEXT: s_lshl_b32 s41, s42, 24 +; SI-NEXT: s_lshl_b32 s40, s40, 16 +; SI-NEXT: s_or_b32 s40, s41, s40 +; SI-NEXT: s_and_b32 s28, s28, 0xffff +; SI-NEXT: s_or_b32 s28, s28, s40 +; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: s_and_b32 s28, s29, 0xff +; SI-NEXT: s_lshl_b32 s29, s68, 8 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v56 +; SI-NEXT: s_or_b32 s28, s28, s29 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v38 +; SI-NEXT: s_and_b32 s28, s28, 0xffff +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_or_b32_e32 v25, s28, v25 +; SI-NEXT: v_readlane_b32 s28, v62, 24 +; SI-NEXT: v_readlane_b32 s29, v62, 25 +; SI-NEXT: s_lshl_b32 s28, s28, 8 +; SI-NEXT: s_and_b32 s26, s26, 0xff +; SI-NEXT: s_or_b32 s26, s26, s28 +; SI-NEXT: v_readlane_b32 s28, v62, 26 +; SI-NEXT: v_readlane_b32 s29, v62, 27 +; SI-NEXT: s_and_b32 s28, s28, 0xff +; SI-NEXT: v_readlane_b32 s40, v62, 28 +; SI-NEXT: s_lshl_b32 s29, s40, 24 +; SI-NEXT: s_lshl_b32 s28, s28, 16 +; SI-NEXT: s_or_b32 s28, s29, s28 +; SI-NEXT: s_and_b32 s26, s26, 0xffff +; SI-NEXT: s_or_b32 s26, s26, s28 +; SI-NEXT: v_mov_b32_e32 v26, s26 +; SI-NEXT: s_and_b32 s26, s27, 0xff +; SI-NEXT: s_lshl_b32 s27, s66, 8 +; SI-NEXT: v_and_b32_e32 v27, 0xff, v46 +; SI-NEXT: s_or_b32 s26, s26, s27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v37 +; SI-NEXT: s_and_b32 s26, s26, 0xffff +; SI-NEXT: v_or_b32_e32 v27, v28, v27 +; SI-NEXT: v_or_b32_e32 v27, s26, v27 +; SI-NEXT: v_readlane_b32 s26, v62, 30 +; SI-NEXT: v_readlane_b32 s27, v62, 31 +; SI-NEXT: s_lshl_b32 s26, s26, 8 +; SI-NEXT: s_and_b32 s24, s24, 0xff +; SI-NEXT: s_or_b32 s24, s24, s26 +; SI-NEXT: v_readlane_b32 s26, v62, 32 +; SI-NEXT: v_readlane_b32 s27, v62, 33 +; SI-NEXT: s_and_b32 s26, s26, 0xff +; SI-NEXT: v_readlane_b32 s28, v62, 34 +; SI-NEXT: s_lshl_b32 s27, s28, 24 +; SI-NEXT: s_lshl_b32 s26, s26, 16 +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 11 +; SI-NEXT: s_or_b32 s26, s27, s26 +; SI-NEXT: s_and_b32 s24, s24, 0xffff +; SI-NEXT: s_or_b32 s24, s24, s26 +; SI-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v42 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v16, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v28, s24 +; SI-NEXT: s_and_b32 s24, s25, 0xff +; SI-NEXT: s_lshl_b32 s25, s64, 8 +; SI-NEXT: v_and_b32_e32 v29, 0xff, v30 +; SI-NEXT: buffer_store_dword v13, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v60 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v13, vcc, 8, v0 +; SI-NEXT: s_or_b32 s24, s24, s25 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v35 +; SI-NEXT: buffer_store_dword v21, v13, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 +; SI-NEXT: s_and_b32 s24, s24, 0xffff +; SI-NEXT: v_or_b32_e32 v29, v30, v29 +; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 +; SI-NEXT: v_or_b32_e32 v29, s24, v29 +; SI-NEXT: buffer_store_dword v23, v12, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v12, vcc, 20, v0 +; SI-NEXT: v_readlane_b32 s24, v62, 36 +; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 +; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 +; SI-NEXT: v_readlane_b32 s25, v62, 37 +; SI-NEXT: s_lshl_b32 s24, s24, 8 +; SI-NEXT: s_and_b32 s22, s22, 0xff +; SI-NEXT: buffer_store_dword v24, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 +; SI-NEXT: s_or_b32 s22, s22, s24 +; SI-NEXT: v_readlane_b32 s24, v62, 38 +; SI-NEXT: buffer_store_dword v25, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 +; SI-NEXT: v_readlane_b32 s25, v62, 39 +; SI-NEXT: s_and_b32 s24, s24, 0xff +; SI-NEXT: v_readlane_b32 s26, v62, 40 +; SI-NEXT: buffer_store_dword v26, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 +; SI-NEXT: s_lshl_b32 s24, s24, 16 +; SI-NEXT: s_lshl_b32 s25, s26, 24 +; SI-NEXT: buffer_store_dword v27, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 +; SI-NEXT: s_and_b32 s22, s22, 0xffff +; SI-NEXT: s_or_b32 s24, s25, s24 +; SI-NEXT: buffer_store_dword v28, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 +; SI-NEXT: s_or_b32 s22, s22, s24 +; SI-NEXT: buffer_store_dword v29, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 +; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: s_and_b32 s22, s23, 0xff +; SI-NEXT: s_lshl_b32 s23, s54, 8 ; SI-NEXT: v_and_b32_e32 v10, 0xff, v32 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 +; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s22, s22, s23 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v13 -; SI-NEXT: v_or_b32_e32 v10, v18, v10 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v10, v15, v10 -; SI-NEXT: v_add_i32_e32 v15, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v13 -; SI-NEXT: v_or_b32_e32 v15, v18, v15 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v43 +; SI-NEXT: s_and_b32 s22, s22, 0xffff +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_or_b32_e32 v10, s22, v10 +; SI-NEXT: s_and_b32 s20, s20, 0xff +; SI-NEXT: s_lshl_b32 s22, s62, 8 +; SI-NEXT: s_or_b32 s20, s20, s22 +; SI-NEXT: v_readlane_b32 s22, v62, 42 +; SI-NEXT: v_readlane_b32 s23, v62, 43 +; SI-NEXT: s_and_b32 s22, s22, 0xff +; SI-NEXT: v_readlane_b32 s24, v62, 44 +; SI-NEXT: s_lshl_b32 s22, s22, 16 +; SI-NEXT: s_lshl_b32 s23, s24, 24 +; SI-NEXT: s_and_b32 s20, s20, 0xffff +; SI-NEXT: s_or_b32 s22, s23, s22 +; SI-NEXT: v_add_i32_e32 v11, vcc, 52, v0 +; SI-NEXT: s_or_b32 s20, s20, s22 +; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v29 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v13 -; SI-NEXT: v_or_b32_e32 v9, v15, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 +; SI-NEXT: v_mov_b32_e32 v11, s20 +; SI-NEXT: s_and_b32 s20, s21, 0xff +; SI-NEXT: s_lshl_b32 s21, s52, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v31 +; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s20, s20, s21 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v20 +; SI-NEXT: s_and_b32 s20, s20, 0xffff ; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v9, s20, v9 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_lshl_b32 s20, s58, 8 +; SI-NEXT: s_or_b32 s18, s18, s20 +; SI-NEXT: s_and_b32 s20, s98, 0xff +; SI-NEXT: s_lshl_b32 s20, s20, 16 +; SI-NEXT: s_lshl_b32 s21, s96, 24 +; SI-NEXT: s_and_b32 s18, s18, 0xffff +; SI-NEXT: s_or_b32 s20, s21, s20 ; SI-NEXT: v_add_i32_e32 v10, vcc, 60, v0 +; SI-NEXT: s_or_b32 s18, s18, s20 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v3 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v13 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v15, v10 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v59 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v3 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 +; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 +; SI-NEXT: v_mov_b32_e32 v10, s18 +; SI-NEXT: s_and_b32 s18, s19, 0xff +; SI-NEXT: s_lshl_b32 s19, s50, 8 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v54 +; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s18, s18, s19 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v19 +; SI-NEXT: s_and_b32 s18, s18, 0xffff ; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: v_or_b32_e32 v8, s18, v8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s18, s38, 8 +; SI-NEXT: s_or_b32 s16, s16, s18 +; SI-NEXT: s_and_b32 s18, s36, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s34, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s18, s19, s18 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s18 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v49 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v4 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v62 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v4 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v30 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 +; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s48, 8 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v44 +; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v18 +; SI-NEXT: s_and_b32 s16, s16, 0xffff ; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v7, s16, v7 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: s_lshl_b32 s16, s30, 8 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_and_b32 s16, s94, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s92, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v36 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v4 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v25 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v4 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v3 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v6, v8, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 +; SI-NEXT: v_mov_b32_e32 v8, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: s_lshl_b32 s15, s67, 8 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v42 +; SI-NEXT: s_and_b32 s14, s14, 0xffff ; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v6, s14, v6 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s14, s90, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: s_and_b32 s14, s88, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s78, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v35 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v23 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v4 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v3 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 +; SI-NEXT: v_mov_b32_e32 v7, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s65, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v15 +; SI-NEXT: s_and_b32 s12, s12, 0xffff ; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s12, v5 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s76, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s74, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s72, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v61 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v4 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v3 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 +; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s55, 8 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v55 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s10, s10, s11 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v45 +; SI-NEXT: s_and_b32 s10, s10, 0xffff ; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v4, s10, v4 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s60, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s56, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s46, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v40 -; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_or_b32_e32 v2, v5, v2 +; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s53, 8 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v61 +; SI-NEXT: s_and_b32 s8, s8, 0xffff ; SI-NEXT: v_or_b32_e32 v2, v4, v2 +; SI-NEXT: v_or_b32_e32 v2, s8, v2 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s86, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s84, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s82, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v53 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v50 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v54 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_or_b32_e32 v1, v4, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 +; SI-NEXT: v_mov_b32_e32 v4, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s51, 8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v47 +; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s6, v1 +; SI-NEXT: v_readlane_b32 s6, v62, 46 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: v_readlane_b32 s7, v62, 47 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s80, 0xff +; SI-NEXT: v_readlane_b32 s8, v62, 48 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v38 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v56 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v4, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s49, 8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v53 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v33 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s45, v62, 17 +; SI-NEXT: v_readlane_b32 s43, v62, 23 +; SI-NEXT: v_readlane_b32 s41, v62, 29 +; SI-NEXT: v_readlane_b32 s29, v62, 35 +; SI-NEXT: v_readlane_b32 s27, v62, 41 +; SI-NEXT: v_readlane_b32 s25, v62, 45 +; SI-NEXT: v_readlane_b32 s9, v62, 49 +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; @@ -188114,113 +189872,105 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: .LBB95_2: ; %cmp.true ; VI-NEXT: s_lshr_b32 s46, s45, 16 ; VI-NEXT: v_mov_b32_e32 v7, 0x200 -; VI-NEXT: v_add_f16_e32 v1, s46, v7 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; VI-NEXT: v_add_f16_e32 v11, s46, v7 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v11 ; VI-NEXT: v_add_f16_e32 v2, s45, v7 ; VI-NEXT: s_lshr_b32 s45, s44, 16 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v23, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s45, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; VI-NEXT: v_add_f16_e32 v40, s45, v7 ; VI-NEXT: v_add_f16_e32 v2, s44, v7 ; VI-NEXT: s_lshr_b32 s44, s43, 16 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v40 +; VI-NEXT: v_add_f16_e32 v43, s44, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v22, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s44, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v43 ; VI-NEXT: v_add_f16_e32 v2, s43, v7 ; VI-NEXT: s_lshr_b32 s43, s42, 16 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v25, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s43, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; VI-NEXT: v_add_f16_e32 v54, s43, v7 ; VI-NEXT: v_add_f16_e32 v2, s42, v7 ; VI-NEXT: s_lshr_b32 s42, s41, 16 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v54 +; VI-NEXT: v_add_f16_e32 v37, s42, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v24, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s42, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v37 ; VI-NEXT: v_add_f16_e32 v2, s41, v7 ; VI-NEXT: s_lshr_b32 s41, s40, 16 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v27, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s41, v7 +; VI-NEXT: v_add_f16_e32 v49, s41, v7 ; VI-NEXT: v_add_f16_e32 v2, s40, v7 ; VI-NEXT: s_lshr_b32 s40, s15, 16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; VI-NEXT: v_add_f16_e32 v53, s40, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v49 +; VI-NEXT: v_add_f16_e32 v52, s40, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v26, v2, v1 -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v53 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v52 ; VI-NEXT: v_add_f16_e32 v2, s15, v7 ; VI-NEXT: s_lshr_b32 s15, s14, 16 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v29, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s15, v7 +; VI-NEXT: v_add_f16_e32 v55, s15, v7 ; VI-NEXT: v_add_f16_e32 v2, s14, v7 ; VI-NEXT: s_lshr_b32 s14, s13, 16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; VI-NEXT: v_add_f16_e32 v43, s14, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v55 +; VI-NEXT: v_add_f16_e32 v53, s14, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v28, v2, v1 -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v43 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v53 ; VI-NEXT: v_add_f16_e32 v2, s13, v7 ; VI-NEXT: s_lshr_b32 s13, s12, 16 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v6, v2, v1 ; VI-NEXT: v_add_f16_e32 v1, s13, v7 ; VI-NEXT: v_add_f16_e32 v2, s12, v7 ; VI-NEXT: s_lshr_b32 s12, s11, 16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; VI-NEXT: v_add_f16_e32 v37, s12, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; VI-NEXT: v_add_f16_e32 v39, s12, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v5, v2, v1 -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v37 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v39 ; VI-NEXT: v_add_f16_e32 v2, s11, v7 ; VI-NEXT: s_lshr_b32 s11, s10, 16 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v31, v2, v1 -; VI-NEXT: v_add_f16_e32 v1, s11, v7 +; VI-NEXT: v_add_f16_e32 v60, s11, v7 ; VI-NEXT: v_add_f16_e32 v2, s10, v7 ; VI-NEXT: s_lshr_b32 s10, s9, 16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; VI-NEXT: v_add_f16_e32 v52, s10, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v60 +; VI-NEXT: v_add_f16_e32 v48, s10, v7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v30, v2, v1 -; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v52 +; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v48 ; VI-NEXT: v_add_f16_e32 v2, s9, v7 ; VI-NEXT: s_lshr_b32 s9, s8, 16 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v4, v2, v1 ; VI-NEXT: v_add_f16_e32 v1, s9, v7 ; VI-NEXT: v_add_f16_e32 v2, s8, v7 ; VI-NEXT: s_lshr_b32 s8, s7, 16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; VI-NEXT: v_add_f16_e32 v50, s8, v7 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v3, v2, v1 ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 ; VI-NEXT: v_add_f16_e32 v2, s7, v7 ; VI-NEXT: s_lshr_b32 s7, s6, 16 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v2, v2, v1 ; VI-NEXT: v_add_f16_e32 v1, s7, v7 ; VI-NEXT: v_add_f16_e32 v8, s6, v7 ; VI-NEXT: s_lshr_b32 s6, s17, 16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; VI-NEXT: v_add_f16_e32 v36, s6, v7 -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v1, v8, v1 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v36 ; VI-NEXT: v_add_f16_e32 v9, s17, v7 @@ -188228,12 +189978,12 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_e32 v33, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s6, v7 ; VI-NEXT: s_lshr_b32 s6, s19, 16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s16, v7 ; VI-NEXT: v_add_f16_e32 v38, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v32, v9, v8 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v38 ; VI-NEXT: v_add_f16_e32 v9, s19, v7 @@ -188241,12 +189991,12 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_e32 v21, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s6, v7 ; VI-NEXT: s_lshr_b32 s6, s21, 16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s18, v7 ; VI-NEXT: v_add_f16_e32 v61, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v20, v9, v8 ; VI-NEXT: s_lshr_b32 s7, s20, 16 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v61 @@ -188254,12 +190004,12 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_e32 v35, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s7, v7 ; VI-NEXT: s_lshr_b32 s6, s23, 16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s20, v7 ; VI-NEXT: v_add_f16_e32 v45, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v34, v9, v8 ; VI-NEXT: s_lshr_b32 s7, s22, 16 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v45 @@ -188267,12 +190017,12 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_e32 v19, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s7, v7 ; VI-NEXT: s_lshr_b32 s6, s25, 16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s22, v7 ; VI-NEXT: v_add_f16_e32 v47, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v18, v9, v8 ; VI-NEXT: s_lshr_b32 s7, s24, 16 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v47 @@ -188280,12 +190030,12 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_e32 v16, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s7, v7 ; VI-NEXT: s_lshr_b32 s6, s27, 16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s24, v7 ; VI-NEXT: v_add_f16_e32 v57, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v15, v9, v8 ; VI-NEXT: s_lshr_b32 s7, s26, 16 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v57 @@ -188293,112 +190043,116 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_e32 v13, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s7, v7 ; VI-NEXT: s_lshr_b32 s6, s29, 16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s26, v7 ; VI-NEXT: v_add_f16_e32 v59, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v12, v9, v8 ; VI-NEXT: s_lshr_b32 s7, s28, 16 ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v59 ; VI-NEXT: v_add_f16_e32 v9, s29, v7 -; VI-NEXT: s_lshr_b32 s6, s5, 16 ; VI-NEXT: v_or_b32_e32 v10, v9, v8 ; VI-NEXT: v_add_f16_e32 v8, s7, v7 -; VI-NEXT: s_lshr_b32 s7, s4, 16 -; VI-NEXT: v_add_f16_e32 v51, s6, v7 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: s_lshr_b32 s6, s5, 16 +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; VI-NEXT: v_add_f16_e32 v9, s28, v7 -; VI-NEXT: v_add_f16_e32 v54, s5, v7 -; VI-NEXT: v_add_f16_e32 v11, s7, v7 -; VI-NEXT: v_add_f16_e32 v55, s4, v7 -; VI-NEXT: v_lshlrev_b32_e32 v7, 16, v51 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; VI-NEXT: s_lshr_b32 s7, s4, 16 +; VI-NEXT: v_add_f16_e32 v51, s6, v7 +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v9, v9, v8 -; VI-NEXT: v_or_b32_e32 v8, v54, v7 -; VI-NEXT: v_lshlrev_b32_e32 v7, 16, v11 -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v7, v55, v7 -; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v8 -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v7 +; VI-NEXT: v_add_f16_e32 v8, s5, v7 +; VI-NEXT: v_add_f16_e32 v14, s7, v7 +; VI-NEXT: v_add_f16_e32 v17, s4, v7 +; VI-NEXT: v_lshlrev_b32_e32 v7, 16, v51 +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v8, v8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v7, 16, v14 +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v7, v17, v7 +; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v8 +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v14, 8, v7 ; VI-NEXT: v_lshrrev_b64 v[7:8], 24, v[7:8] -; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v10 -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v9 -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v13 -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v1 +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v10 +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v9 +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v13 +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v12 +; VI-NEXT: v_lshrrev_b64 v[13:14], 24, v[12:13] +; VI-NEXT: v_lshrrev_b32_e32 v12, 8, v2 +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v12, 8, v1 ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[1:2] -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v16 +; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v19 +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v4 -; VI-NEXT: v_lshrrev_b32_e32 v39, 8, v16 -; VI-NEXT: v_lshrrev_b64 v[16:17], 24, v[15:16] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v3 -; VI-NEXT: v_lshrrev_b64 v[13:14], 24, v[12:13] -; VI-NEXT: v_lshrrev_b32_e32 v44, 8, v18 -; VI-NEXT: v_lshrrev_b64 v[17:18], 24, v[18:19] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[3:4] -; VI-NEXT: v_lshrrev_b32_e32 v49, 8, v15 -; VI-NEXT: v_lshrrev_b32_e32 v40, 8, v19 -; VI-NEXT: v_lshrrev_b32_e32 v60, 8, v35 +; VI-NEXT: v_lshrrev_b64 v[9:10], 24, v[9:10] +; VI-NEXT: v_lshrrev_b64 v[16:17], 24, v[15:16] +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v10, 8, v18 +; VI-NEXT: v_lshrrev_b64 v[17:18], 24, v[18:19] +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v31 +; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v15 +; VI-NEXT: v_lshrrev_b32_e32 v8, 8, v35 ; VI-NEXT: v_lshrrev_b64 v[18:19], 24, v[34:35] +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v30 ; VI-NEXT: v_lshrrev_b32_e32 v35, 8, v23 ; VI-NEXT: v_lshrrev_b64 v[14:15], 24, v[22:23] +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v23, v50, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v23, v52, 8, 8 -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v31 -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v23, v37, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v30 -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v23, v43, 8, 8 -; VI-NEXT: v_lshrrev_b64 v[10:11], 24, v[9:10] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; VI-NEXT: v_bfe_u32 v23, v53, 8, 8 -; VI-NEXT: v_lshrrev_b32_e32 v48, 8, v12 -; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v34 -; VI-NEXT: v_lshrrev_b32_e32 v34, 8, v24 -; VI-NEXT: v_lshrrev_b64 v[11:12], 24, v[24:25] -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v50, v11 +; VI-NEXT: v_bfe_u32 v11, v48, 8, 8 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[3:4], 24, v[30:31] +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v6 +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v11, v39, 8, 8 ; VI-NEXT: v_lshrrev_b32_e32 v56, 8, v20 ; VI-NEXT: v_lshrrev_b64 v[19:20], 24, v[20:21] -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v5 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v5 ; VI-NEXT: v_lshrrev_b64 v[4:5], 24, v[5:6] +; VI-NEXT: v_lshrrev_b64 v[30:31], 24, v[26:27] +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v11, v53, 8, 8 +; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v34 ; VI-NEXT: v_lshrrev_b32_e32 v41, 8, v21 ; VI-NEXT: v_lshrrev_b64 v[20:21], 24, v[32:33] -; VI-NEXT: v_lshrrev_b32_e32 v30, 8, v6 ; VI-NEXT: v_lshrrev_b64 v[5:6], 24, v[28:29] -; VI-NEXT: v_lshrrev_b64 v[8:9], 24, v[26:27] +; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v25 +; VI-NEXT: v_lshrrev_b32_e32 v34, 8, v24 +; VI-NEXT: v_lshrrev_b64 v[24:25], 24, v[24:25] +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v11, v52, 8, 8 ; VI-NEXT: v_lshrrev_b32_e32 v46, 8, v33 ; VI-NEXT: v_lshrrev_b32_e32 v58, 8, v32 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v29 ; VI-NEXT: v_lshrrev_b32_e32 v33, 8, v28 ; VI-NEXT: v_lshrrev_b32_e32 v28, 8, v27 ; VI-NEXT: v_lshrrev_b32_e32 v29, 8, v26 -; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v25 ; VI-NEXT: v_lshrrev_b32_e32 v21, 8, v22 ; VI-NEXT: v_bfe_u32 v25, v51, 8, 8 ; VI-NEXT: v_bfe_u32 v27, v59, 8, 8 @@ -188408,12 +190162,11 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_bfe_u32 v1, v61, 8, 8 ; VI-NEXT: v_bfe_u32 v22, v38, 8, 8 ; VI-NEXT: v_bfe_u32 v2, v36, 8, 8 -; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: v_bfe_u32 v44, v37, 8, 8 +; VI-NEXT: v_bfe_u32 v11, v43, 8, 8 ; VI-NEXT: v_bfe_u32 v26, v50, 8, 8 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_bfe_u32 v23, v23, 8, 8 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_bfe_u32 v24, v24, 8, 8 +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; VI-NEXT: s_branch .LBB95_5 ; VI-NEXT: .LBB95_3: ; VI-NEXT: ; implicit-def: $sgpr46 @@ -188573,133 +190326,120 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: s_branch .LBB95_2 ; VI-NEXT: .LBB95_4: ; VI-NEXT: v_mov_b32_e32 v1, s44 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s45 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s42 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s43 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s40 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s41 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s14 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s15 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s12 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s13 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s10 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s9 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s7 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s16 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s17 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s18 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s19 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s20 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s21 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s22 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s23 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s24 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s25 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s26 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s27 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s28 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s29 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s71 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s69 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s68 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s67 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s66 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s65 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s64 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s55 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s87 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s85 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s53 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s52 ; VI-NEXT: v_readlane_b32 s6, v62, 0 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 1 ; VI-NEXT: v_mov_b32_e32 v36, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 2 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 3 ; VI-NEXT: v_mov_b32_e32 v38, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 4 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 5 ; VI-NEXT: v_mov_b32_e32 v61, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 6 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 7 ; VI-NEXT: v_mov_b32_e32 v45, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 9 ; VI-NEXT: v_mov_b32_e32 v47, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 10 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 11 ; VI-NEXT: v_mov_b32_e32 v57, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 12 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 13 -; VI-NEXT: v_mov_b32_e32 v55, s4 -; VI-NEXT: v_readlane_b32 s4, v62, 16 ; VI-NEXT: v_mov_b32_e32 v59, s6 ; VI-NEXT: v_readlane_b32 s6, v62, 14 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v1, s6 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v1, s4 +; VI-NEXT: v_readlane_b32 s4, v62, 16 ; VI-NEXT: v_mov_b32_e32 v2, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 17 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v1, s6 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v22, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 18 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 19 ; VI-NEXT: v_mov_b32_e32 v15, s4 @@ -188714,45 +190454,48 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_readlane_b32 s4, v62, 24 ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 25 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 26 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s80 ; VI-NEXT: v_mov_b32_e32 v46, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 27 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s81 ; VI-NEXT: v_mov_b32_e32 v41, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 28 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v3, s80 -; VI-NEXT: v_mov_b32_e32 v60, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s82 +; VI-NEXT: v_mov_b32_e32 v8, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 29 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v3, s81 -; VI-NEXT: v_mov_b32_e32 v40, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 30 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v3, s82 -; VI-NEXT: v_mov_b32_e32 v39, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 31 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 32 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 33 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 34 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 35 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 36 -; VI-NEXT: v_mov_b32_e32 v48, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 37 -; VI-NEXT: v_mov_b32_e32 v49, s4 +; VI-NEXT: v_mov_b32_e32 v7, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 38 -; VI-NEXT: v_mov_b32_e32 v44, s4 +; VI-NEXT: v_mov_b32_e32 v10, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 39 ; VI-NEXT: v_mov_b32_e32 v42, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 40 @@ -188760,31 +190503,36 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_readlane_b32 s4, v62, 41 ; VI-NEXT: v_mov_b32_e32 v58, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 42 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 43 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 44 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 45 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 46 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 47 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, s4 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v3, s78 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; VI-NEXT: v_readlane_b32 s4, v62, 48 -; VI-NEXT: v_mov_b32_e32 v31, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 49 -; VI-NEXT: v_mov_b32_e32 v30, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s4 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s46 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v3, s78 +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; VI-NEXT: v_readlane_b32 s4, v62, 50 ; VI-NEXT: v_mov_b32_e32 v33, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 51 @@ -188796,22 +190544,29 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_readlane_b32 s4, v62, 54 ; VI-NEXT: v_mov_b32_e32 v34, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 55 -; VI-NEXT: v_mov_b32_e32 v9, s4 +; VI-NEXT: v_mov_b32_e32 v31, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 56 ; VI-NEXT: v_mov_b32_e32 v3, s88 ; VI-NEXT: v_readlane_b32 s6, v62, 15 ; VI-NEXT: v_mov_b32_e32 v21, s4 ; VI-NEXT: v_readlane_b32 s4, v62, 57 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v40, s71 ; VI-NEXT: v_mov_b32_e32 v50, s70 -; VI-NEXT: v_mov_b32_e32 v43, s54 -; VI-NEXT: v_mov_b32_e32 v37, s86 -; VI-NEXT: v_mov_b32_e32 v52, s84 +; VI-NEXT: v_mov_b32_e32 v54, s69 +; VI-NEXT: v_mov_b32_e32 v43, s68 +; VI-NEXT: v_mov_b32_e32 v49, s67 +; VI-NEXT: v_mov_b32_e32 v37, s66 +; VI-NEXT: v_mov_b32_e32 v55, s65 +; VI-NEXT: v_mov_b32_e32 v52, s64 +; VI-NEXT: v_mov_b32_e32 v53, s54 +; VI-NEXT: v_mov_b32_e32 v60, s87 +; VI-NEXT: v_mov_b32_e32 v39, s86 +; VI-NEXT: v_mov_b32_e32 v48, s84 ; VI-NEXT: v_mov_b32_e32 v51, s6 -; VI-NEXT: v_mov_b32_e32 v54, s5 -; VI-NEXT: v_mov_b32_e32 v23, s83 -; VI-NEXT: v_mov_b32_e32 v24, s50 +; VI-NEXT: v_mov_b32_e32 v44, s83 +; VI-NEXT: v_mov_b32_e32 v11, s50 ; VI-NEXT: v_mov_b32_e32 v26, s51 ; VI-NEXT: v_mov_b32_e32 v35, s4 ; VI-NEXT: v_mov_b32_e32 v20, s76 @@ -188820,16 +190575,15 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_mov_b32_e32 v17, s62 ; VI-NEXT: v_mov_b32_e32 v16, s60 ; VI-NEXT: v_mov_b32_e32 v13, s58 -; VI-NEXT: v_mov_b32_e32 v10, s56 -; VI-NEXT: v_mov_b32_e32 v7, s46 +; VI-NEXT: v_mov_b32_e32 v9, s56 ; VI-NEXT: v_mov_b32_e32 v3, s90 ; VI-NEXT: v_mov_b32_e32 v4, s30 ; VI-NEXT: v_mov_b32_e32 v5, s34 -; VI-NEXT: v_mov_b32_e32 v8, s36 -; VI-NEXT: v_mov_b32_e32 v11, s38 +; VI-NEXT: v_mov_b32_e32 v30, s36 +; VI-NEXT: v_mov_b32_e32 v24, s38 ; VI-NEXT: v_mov_b32_e32 v14, s48 ; VI-NEXT: .LBB95_5: ; %end -; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v58 ; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 @@ -188871,30 +190625,30 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_readlane_b32 s31, v63, 1 ; VI-NEXT: v_readlane_b32 s30, v63, 0 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v58, v53, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v58, v23, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v20, v53, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v20, v23, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v58, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_store_dword v20, v0, s[0:3], 0 offen ; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v46 -; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v20, v46, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v2, v20, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v20, vcc, 4, v0 ; VI-NEXT: buffer_store_dword v2, v20, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v56 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v2, v20, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v19, v20, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v2, v2, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v19, vcc, 8, v0 ; VI-NEXT: buffer_store_dword v2, v19, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v41 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v2, v19, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -188903,36 +190657,38 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v2, v2, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v19, vcc, 12, v0 ; VI-NEXT: buffer_store_dword v2, v19, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v42 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v2, v19, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v2, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v18, vcc, 16, v0 ; VI-NEXT: buffer_store_dword v2, v18, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v60 +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v8 +; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 20, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v44 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v10 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v17 -; VI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v17, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v8, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 24, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v40 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v15 @@ -188940,19 +190696,21 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 28, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v49 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v16 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v15, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v7, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 32, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v39 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v12 @@ -188960,19 +190718,21 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 36, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v48 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v13 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v12, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v7, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 40, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -188982,21 +190742,21 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 44, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v10 +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v9 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 48, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -189006,38 +190766,43 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 52, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 +; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 56, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v25 ; VI-NEXT: v_or_b32_sdwa v2, v51, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 60, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v14 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v40, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 64, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v35 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -189046,130 +190811,121 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x44, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v24 +; VI-NEXT: v_or_b32_sdwa v2, v54, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x48, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v24 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v31 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 +; VI-NEXT: v_or_b32_sdwa v2, v43, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x4c, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v29 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v8 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v30 +; VI-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x50, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v28 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v23 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v6, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v44 +; VI-NEXT: v_or_b32_sdwa v2, v37, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x54, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v33 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v5 -; VI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v55, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x58, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x5c, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v4 -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x60, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v30 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v43, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v53, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x64, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v3 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x68, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v37, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x6c, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -189177,28 +190933,28 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x70, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v48, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x74, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -189206,15 +190962,15 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x78, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x7c, v0 ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD @@ -191614,24 +193370,22 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:392 -; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v54, v15 +; SI-NEXT: v_mov_b32_e32 v57, v5 +; SI-NEXT: v_mov_b32_e32 v41, v3 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:392 ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:4 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:36 ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20 @@ -191648,160 +193402,164 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164 ; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:188 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v14 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v12 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v22 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v20 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v30 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v8 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v28 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v8 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 +; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v20 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr59 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; kill: killed $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:360 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; kill: killed $vgpr42 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v3 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v36 +; SI-NEXT: v_lshlrev_b32_e32 v36, 24, v3 ; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v9 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v15 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v11 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v19 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v21 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v27 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v17 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v29 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v23 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v27 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v31 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill ; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v32 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: v_lshlrev_b32_e32 v31, 8, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v18 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:80 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:88 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v29 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:184 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v31 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:108 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:80 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:184 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:112 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:108 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:180 ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v33 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v34 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v34, 8, v10 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:172 ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:196 @@ -191810,21 +193568,21 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:216 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:212 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:204 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:228 @@ -191833,23 +193591,23 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:248 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:244 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v9 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:236 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:260 @@ -191858,23 +193616,23 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:276 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:272 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v11 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:268 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:292 @@ -191882,25 +193640,26 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:312 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:308 ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v9 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:300 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 @@ -191911,66 +193670,65 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v63, 8, v8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:344 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:924 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:340 ; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v60, 24, v9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:356 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:380 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:352 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:376 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:372 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:368 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:364 -; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:352 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v4 -; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:388 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:384 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:936 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v10 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:376 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:952 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v18 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:372 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:368 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:956 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:364 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:388 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:384 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v26 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v11 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v31, 8, v15 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v8 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v11 +; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:8 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(0) @@ -191978,658 +193736,661 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v35 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:56 -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v3 +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v5 ; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v6 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v6 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr6 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:960 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v12 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v12 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; kill: killed $vgpr6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill +; SI-NEXT: ; kill: killed $vgpr12 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:24 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v13 +; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v14 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v15 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:24 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:48 -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v13 -; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:16 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:104 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v14 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; kill: killed $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:944 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:360 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:964 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB96_2 ; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v57 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v8, v16, v7 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v12, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v13, v3, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v5, v2, v5 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v6, 0xff, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v2, v6, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v42, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v11, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v5, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v7, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v10, v22, v7 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v15, v24, v7 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v17, v7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v18, v36, v7 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v7, v16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v11, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v19, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v40, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v10, v24, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v15, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v20, v7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v26, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v18, v25, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v19, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v30, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v30, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v21, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v21, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v29, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v23, v7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v33, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v33, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v27, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v49, v7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v27, v2, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v38, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v50, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v38, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v52, v7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v52, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v14, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v1, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v53, v2, v7 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v55, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v55, v7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v17, v6, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v2, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v43, v3, v7 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v14, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v44, v7, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v44, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v16, v3, v7 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v45, v6, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v45, v3, v7 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v25, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v47, v7, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v47, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v24, v3, v7 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v58, v6, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v58, v3, v7 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v16, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v59, v7, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v59, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v3, v34 +; SI-NEXT: v_or_b32_e32 v34, v6, v7 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v62, v6, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v36, v6, v7 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v24, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v48, v7, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v13, v1, v6 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v35, v6, v7 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v6, v6, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_or_b32_e32 v54, v6, v7 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v23, v1, v8 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v41, v7, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v37, v37, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v36, v1, v8 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v57, v51, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v35 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v63, v7, v63 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v35, v37, v22 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v51, v56, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v37, v51, v22 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v56, v60, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v51, v22, v63 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v34, v56, v22 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v60, v7, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v9, v9, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v7, v7, v31 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:976 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v26, v12, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v56, v60, v22 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v54, v22, v4 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v41 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v9, v9, v22 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v57 +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: v_or_b32_e32 v4, v32, v4 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v28, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v25 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v28, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v29, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v32, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v32, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v39, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v62, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v41, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v46, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v57, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v61, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v60, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v3, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v63, v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v39, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v1, v22, v1 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v55 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v48, v22, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v40, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v46, v22, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v31, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v49, v22, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v12, v12, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v50, v22, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v42, v22, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v22, 0xff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v20, v22, v20 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v22, v22, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: v_or_b32_e32 v53, v22, v53 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: v_or_b32_e32 v3, v22, v3 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v61 -; SI-NEXT: v_mov_b32_e32 v61, v42 -; SI-NEXT: v_or_b32_e32 v31, v22, v31 -; SI-NEXT: v_or_b32_e32 v22, v12, v61 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v28 -; SI-NEXT: v_or_b32_e32 v43, v12, v5 -; SI-NEXT: v_alignbit_b32 v5, v22, v5, 16 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v12, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v32 -; SI-NEXT: v_or_b32_e32 v7, v7, v11 -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v32, v32, v59 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_or_b32_e32 v25, v6, v13 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v7 +; SI-NEXT: v_or_b32_e32 v6, v6, v5 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v7, v25, v5, 16 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v26 +; SI-NEXT: v_or_b32_e32 v6, v6, v11 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v5, v5, v12 -; SI-NEXT: v_alignbit_b32 v11, v5, v11, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v40 -; SI-NEXT: v_or_b32_e32 v42, v11, v10 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v40, v11, v15 -; SI-NEXT: v_alignbit_b32 v11, v42, v15, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v5, v5, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v26 -; SI-NEXT: v_or_b32_e32 v26, v11, v18 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v39, v11, v19 -; SI-NEXT: v_alignbit_b32 v11, v26, v19, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v6, v5, v11, 16 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: v_or_b32_e32 v28, v11, v30 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v57 -; SI-NEXT: v_or_b32_e32 v11, v11, v21 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v6 +; SI-NEXT: v_or_b32_e32 v6, v11, v10 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v28 +; SI-NEXT: v_or_b32_e32 v11, v11, v15 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v11, v6, v15, 16 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v26, v11, v18 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v29 +; SI-NEXT: v_or_b32_e32 v11, v11, v19 ; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v11, v26, v19, 16 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v20 +; SI-NEXT: v_or_b32_e32 v28, v11, v30 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v20, v11, v21 ; SI-NEXT: v_alignbit_b32 v11, v28, v21, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v29 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v29, v11, v33 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v60 -; SI-NEXT: v_or_b32_e32 v21, v11, v27 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v23, v11, v27 ; SI-NEXT: v_alignbit_b32 v11, v29, v27, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v49 ; SI-NEXT: v_or_b32_e32 v19, v11, v38 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v63 -; SI-NEXT: v_or_b32_e32 v27, v11, v52 -; SI-NEXT: v_alignbit_b32 v11, v19, v52, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v46 +; SI-NEXT: v_or_b32_e32 v27, v11, v50 +; SI-NEXT: v_alignbit_b32 v11, v19, v50, 16 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v23 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: v_or_b32_e32 v11, v11, v2 -; SI-NEXT: v_alignbit_b32 v1, v11, v55, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v52 +; SI-NEXT: v_or_b32_e32 v11, v11, v14 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v21, v15, v53 +; SI-NEXT: v_alignbit_b32 v15, v11, v53, 16 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v55 +; SI-NEXT: v_or_b32_e32 v15, v15, v2 +; SI-NEXT: v_or_b32_e32 v46, v3, v43 +; SI-NEXT: v_alignbit_b32 v3, v15, v43, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v17, v3, v16 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v39 +; SI-NEXT: v_or_b32_e32 v39, v3, v45 +; SI-NEXT: v_alignbit_b32 v3, v17, v45, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 -; SI-NEXT: v_or_b32_e32 v15, v1, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v1, v1, v44 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v61, v3, v24 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v3, v3, v58 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v15, v44, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v3, v61, v58, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v45 -; SI-NEXT: v_or_b32_e32 v17, v1, v25 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 -; SI-NEXT: v_or_b32_e32 v1, v1, v47 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v47, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v62, v3, v34 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v31 +; SI-NEXT: v_or_b32_e32 v40, v3, v36 +; SI-NEXT: v_alignbit_b32 v3, v62, v36, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v59, v3, v35 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v12 +; SI-NEXT: v_or_b32_e32 v31, v3, v54 +; SI-NEXT: v_alignbit_b32 v3, v59, v54, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v47, v3, v37 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v25, v3, v57 +; SI-NEXT: v_alignbit_b32 v3, v47, v57, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v58 -; SI-NEXT: v_or_b32_e32 v1, v1, v16 -; SI-NEXT: v_alignbit_b32 v32, v1, v59, 16 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v62 -; SI-NEXT: v_or_b32_e32 v59, v6, v23 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v20 -; SI-NEXT: v_or_b32_e32 v62, v32, v24 -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v50 -; SI-NEXT: v_or_b32_e32 v50, v6, v36 -; SI-NEXT: v_alignbit_b32 v6, v59, v36, 16 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v47, v6, v35 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v49, v6, v37 -; SI-NEXT: v_alignbit_b32 v6, v47, v37, 16 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v45, v6, v34 -; SI-NEXT: v_or_b32_e32 v48, v3, v56 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v45, v3, v51 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v22 +; SI-NEXT: v_or_b32_e32 v22, v3, v56 ; SI-NEXT: v_alignbit_b32 v3, v45, v56, 16 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v54 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v44, v3, v4 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v14 -; SI-NEXT: v_mov_b32_e32 v14, v3 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; kill: killed $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr3 @@ -192834,169 +194595,191 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: ; kill: killed $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: v_or_b32_e32 v46, v32, v13 -; SI-NEXT: v_alignbit_b32 v13, v62, v13, 16 -; SI-NEXT: v_alignbit_b32 v6, v44, v9, 16 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v61 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v38 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v24 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; kill: killed $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: v_mov_b32_e32 v3, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:980 ; 4-byte Folded Reload +; SI-NEXT: ; kill: killed $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; kill: killed $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: v_or_b32_e32 v12, v1, v9 +; SI-NEXT: v_alignbit_b32 v1, v44, v9, 16 +; SI-NEXT: ; kill: killed $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v34 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4 +; SI-NEXT: ; kill: killed $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v13 +; SI-NEXT: v_mov_b32_e32 v13, v25 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v14 +; SI-NEXT: v_mov_b32_e32 v14, v31 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v35 +; SI-NEXT: v_mov_b32_e32 v35, v22 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v51 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr57 ; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr63 ; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; kill: killed $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; kill: killed $vgpr42 ; SI-NEXT: .LBB96_2: ; %Flow ; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; SI-NEXT: s_cbranch_execz .LBB96_4 ; SI-NEXT: ; %bb.3: ; %cmp.true ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v61 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v41 -; SI-NEXT: v_or_b32_e32 v1, v31, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:976 ; 4-byte Folded Reload +; SI-NEXT: s_movk_i32 s6, 0x300 +; SI-NEXT: v_mov_b32_e32 v30, v16 +; SI-NEXT: v_mov_b32_e32 v33, v31 +; SI-NEXT: v_mov_b32_e32 v31, v22 +; SI-NEXT: s_mov_b32 s7, 0x3000000 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_or_b32_e32 v3, v3, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x300, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v56, v5 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_or_b32_e32 v2, v9, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v23, v2, v3 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: v_or_b32_e32 v2, v4, v2 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v57 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: s_movk_i32 s6, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_or_b32_e32 v4, v32, v4 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: v_or_b32_e32 v2, v4, v2 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:968 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v56, v5 -; SI-NEXT: v_mov_b32_e32 v30, v16 -; SI-NEXT: s_mov_b32 s7, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v31, v24 +; SI-NEXT: v_mov_b32_e32 v32, v24 ; SI-NEXT: v_add_i32_e32 v44, vcc, s7, v2 -; SI-NEXT: v_add_i32_e32 v27, vcc, s7, v1 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:916 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:952 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:936 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:972 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:964 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, s6, v3 ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v51, v6 -; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v37, v7 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 ; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: v_or_b32_e32 v4, v60, v4 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:956 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v32, vcc, s7, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v48, v32 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:924 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v35, vcc, s7, v3 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 ; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 ; SI-NEXT: v_or_b32_e32 v4, v63, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v4 ; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:948 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v45, vcc, s7, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v45 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v45 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 ; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_or_b32_e32 v5, v53, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v5, vcc, s6, v5 ; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v6, v51, v6 ; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v33, vcc, s7, v5 -; SI-NEXT: v_mov_b32_e32 v49, v33 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, s7, v5 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 ; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_or_b32_e32 v6, v35, v6 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v6, vcc, s6, v6 ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_or_b32_e32 v7, v37, v7 ; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v47, vcc, s7, v6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 ; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 ; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, s6, v7 ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193005,16 +194788,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; SI-NEXT: v_or_b32_e32 v8, v9, v8 ; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:932 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v6, vcc, s7, v7 -; SI-NEXT: v_mov_b32_e32 v50, v6 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, s7, v7 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 ; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v8, vcc, s6, v8 ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193023,15 +194805,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_or_b32_e32 v9, v10, v9 ; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:960 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v59, vcc, s7, v8 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v9 ; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, s6, v9 ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193040,16 +194822,16 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: v_or_b32_e32 v10, v11, v10 ; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v8, vcc, s7, v9 -; SI-NEXT: v_mov_b32_e32 v46, v8 +; SI-NEXT: v_mov_b32_e32 v40, v8 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v10 ; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v10, v11, v10 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, s6, v10 ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193058,15 +194840,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 ; SI-NEXT: v_or_b32_e32 v11, v12, v11 ; SI-NEXT: v_or_b32_e32 v10, v11, v10 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:928 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v62, vcc, s7, v10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v11 ; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, s6, v11 ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193075,15 +194857,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 ; SI-NEXT: v_or_b32_e32 v12, v13, v12 ; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, s7, v11 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12 ; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v12, v13, v12 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v12, vcc, s6, v12 ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193092,15 +194874,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_or_b32_e32 v13, v14, v13 ; SI-NEXT: v_or_b32_e32 v12, v13, v12 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v1, vcc, s7, v12 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v61, vcc, s7, v12 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v13 ; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v13, v14, v13 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v13, vcc, s6, v13 ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193109,45 +194891,51 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 ; SI-NEXT: v_or_b32_e32 v14, v15, v14 ; SI-NEXT: v_or_b32_e32 v13, v14, v13 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v10, vcc, s7, v13 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v39, vcc, s7, v13 +; SI-NEXT: v_mov_b32_e32 v13, v2 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14 ; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v14, vcc, s6, v14 ; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15 ; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 ; SI-NEXT: v_or_b32_e32 v15, v17, v15 ; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15 ; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v15, v17, v15 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v15, vcc, s6, v15 ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v16, v17, v16 ; SI-NEXT: v_or_b32_e32 v15, v16, v15 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v13, vcc, s7, v15 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v46, vcc, s7, v15 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 ; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v16, v17, v16 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, s6, v16 ; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193156,15 +194944,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 ; SI-NEXT: v_or_b32_e32 v17, v18, v17 ; SI-NEXT: v_or_b32_e32 v16, v17, v16 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v15, vcc, s7, v16 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193173,32 +194961,32 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 ; SI-NEXT: v_or_b32_e32 v18, v19, v18 ; SI-NEXT: v_or_b32_e32 v18, v18, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v16, vcc, s7, v18 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v17, v19, v17 -; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v23 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19 ; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 ; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v19, v19, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, s7, v19 ; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v11 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v17, v20, v17 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193207,83 +194995,95 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 ; SI-NEXT: v_or_b32_e32 v20, v21, v20 ; SI-NEXT: v_or_b32_e32 v20, v20, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v27, vcc, s7, v20 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v17, v21, v17 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21 ; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v21, v22, v21 -; SI-NEXT: v_or_b32_e32 v21, v21, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v19, vcc, s7, v21 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v21, v26, v21 +; SI-NEXT: v_or_b32_e32 v22, v21, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v19, vcc, s7, v22 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v19 +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v17, v22, v17 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v17, v21, v17 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_or_b32_e32 v22, v22, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v21, vcc, s7, v22 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21 +; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_or_b32_e32 v21, v26, v21 +; SI-NEXT: v_or_b32_e32 v24, v21, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v17, v23, v17 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v17, v21, v17 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, s6, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23 -; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_or_b32_e32 v23, v26, v23 -; SI-NEXT: v_or_b32_e32 v23, v23, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v26, v25 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v29, vcc, s7, v23 +; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21 +; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_or_b32_e32 v21, v26, v21 +; SI-NEXT: v_or_b32_e32 v29, v21, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v29, vcc, s7, v29 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: v_or_b32_e32 v24, v24, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v26, v21, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v26 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17 ; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v25, v25, v17 -; SI-NEXT: v_or_b32_e32 v2, v25, v2 -; SI-NEXT: v_add_i32_e32 v18, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v17, vcc, s7, v14 -; SI-NEXT: v_mov_b32_e32 v14, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, s7, v20 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v28, v21, v17 +; SI-NEXT: v_or_b32_e32 v2, v28, v2 +; SI-NEXT: v_add_i32_e32 v20, vcc, s7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v17, vcc, s7, v23 +; SI-NEXT: v_mov_b32_e32 v12, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, s7, v14 +; SI-NEXT: v_mov_b32_e32 v14, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v23, vcc, s7, v24 +; SI-NEXT: v_add_i32_e32 v21, vcc, s7, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v15 +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193292,17 +195092,15 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v28, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v28 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v25 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v28 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193311,52 +195109,51 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v39, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v10, vcc, s7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v26, v3 +; SI-NEXT: v_or_b32_e32 v3, v36, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_add_i32_e32 v26, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v26 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v26 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v3, v32, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v40, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v16, vcc, s7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) @@ -193365,310 +195162,295 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v31, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v42, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v42 +; SI-NEXT: v_add_i32_e32 v6, vcc, s7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 +; SI-NEXT: v_or_b32_e32 v2, v34, v2 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v7, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v4, vcc, s7, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(2) ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v54 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: v_or_b32_e32 v3, v30, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_add_i32_e32 v5, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v59 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v41 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v2, v33, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v62 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v59 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v3, v1, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v43, vcc, s7, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: v_add_i32_e32 v7, vcc, s7, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:944 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v2, v1, v2 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v3, v1, v3 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v22, vcc, s7, v2 -; SI-NEXT: v_alignbit_b32 v2, v22, v43, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill +; SI-NEXT: v_add_i32_e32 v1, vcc, s7, v2 +; SI-NEXT: v_alignbit_b32 v2, v5, v4, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_alignbit_b32 v2, v6, v16, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_alignbit_b32 v2, v26, v10, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v5, v7, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v28, v20, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v42, v40, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v29, v23, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v26, v39, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v2, v28, v18, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v19, v27, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v29, v21, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v11, v21, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v19, v27, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v2, v11, v16, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v2, v15, v13, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:884 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v2, v17, v10, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:888 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v15, v46, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v2, v17, v39, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v2, v1, v9, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:892 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v61, v9, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:856 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_alignbit_b32 v2, v62, v8, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:896 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:860 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v59, v6, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:900 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v59, v14, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:864 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v47, v33, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v47, v13, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v45, v32, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:908 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v45, v35, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:872 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v44, v14, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v29 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v15 +; SI-NEXT: v_alignbit_b32 v2, v44, v12, 16 +; SI-NEXT: v_alignbit_b32 v3, v1, v7, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:876 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v17 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v61 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v47 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v62 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v44 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:940 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:904 ; 4-byte Folded Spill ; SI-NEXT: .LBB96_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v43 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v58 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v7 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v43 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v40 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v42 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v55 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v39 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v20 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v53 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v52 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v23 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v13 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v29 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:880 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v21 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v38 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v38 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:884 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v18 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v18 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:852 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:888 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:892 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:856 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:896 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:860 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v40 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -193680,9 +195462,9 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:900 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:864 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -193694,9 +195476,9 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:868 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -193704,13 +195486,13 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v8 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:908 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:872 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -193718,19 +195500,19 @@ define <64 x i16> @bitcast_v128i8_to_v64i16(<128 x i8> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v45 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:912 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:876 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:940 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:904 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 @@ -197895,1598 +199677,1736 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:332 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:328 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:324 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:320 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:308 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:304 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:296 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:292 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:288 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:276 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:272 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:264 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:256 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:244 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:240 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:236 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:232 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:228 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:224 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:212 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:208 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:200 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:196 -; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane -; SI-NEXT: v_writelane_b32 v63, s30, 0 -; SI-NEXT: v_writelane_b32 v62, s28, 0 -; SI-NEXT: v_writelane_b32 v62, s25, 1 -; SI-NEXT: v_writelane_b32 v62, s24, 2 -; SI-NEXT: v_writelane_b32 v62, s23, 3 -; SI-NEXT: v_writelane_b32 v62, s22, 4 -; SI-NEXT: v_writelane_b32 v62, s21, 5 -; SI-NEXT: v_writelane_b32 v62, s18, 6 -; SI-NEXT: v_writelane_b32 v62, s16, 7 -; SI-NEXT: v_writelane_b32 v63, s31, 1 -; SI-NEXT: v_writelane_b32 v63, s34, 2 -; SI-NEXT: v_writelane_b32 v63, s35, 3 -; SI-NEXT: v_writelane_b32 v63, s36, 4 -; SI-NEXT: v_writelane_b32 v63, s37, 5 -; SI-NEXT: v_writelane_b32 v63, s38, 6 -; SI-NEXT: v_writelane_b32 v63, s39, 7 -; SI-NEXT: v_writelane_b32 v63, s48, 8 -; SI-NEXT: v_writelane_b32 v63, s49, 9 -; SI-NEXT: v_writelane_b32 v63, s50, 10 -; SI-NEXT: v_writelane_b32 v63, s51, 11 -; SI-NEXT: v_writelane_b32 v63, s52, 12 -; SI-NEXT: v_writelane_b32 v63, s53, 13 -; SI-NEXT: v_writelane_b32 v63, s54, 14 -; SI-NEXT: v_writelane_b32 v63, s55, 15 -; SI-NEXT: v_writelane_b32 v63, s64, 16 -; SI-NEXT: v_writelane_b32 v63, s65, 17 -; SI-NEXT: v_writelane_b32 v63, s66, 18 -; SI-NEXT: v_writelane_b32 v63, s67, 19 -; SI-NEXT: v_writelane_b32 v63, s68, 20 -; SI-NEXT: v_writelane_b32 v63, s69, 21 -; SI-NEXT: v_writelane_b32 v63, s70, 22 -; SI-NEXT: v_writelane_b32 v63, s71, 23 -; SI-NEXT: v_writelane_b32 v63, s80, 24 -; SI-NEXT: v_writelane_b32 v63, s81, 25 -; SI-NEXT: v_writelane_b32 v63, s82, 26 -; SI-NEXT: v_writelane_b32 v63, s83, 27 -; SI-NEXT: v_writelane_b32 v63, s84, 28 -; SI-NEXT: v_writelane_b32 v63, s85, 29 -; SI-NEXT: v_writelane_b32 v63, s86, 30 -; SI-NEXT: v_writelane_b32 v63, s87, 31 -; SI-NEXT: v_writelane_b32 v63, s96, 32 -; SI-NEXT: v_writelane_b32 v63, s97, 33 -; SI-NEXT: v_writelane_b32 v63, s98, 34 -; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:332 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:328 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:320 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:316 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:312 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:308 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:304 +; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_writelane_b32 v41, s30, 0 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v29, v26 -; SI-NEXT: v_readfirstlane_b32 s15, v16 -; SI-NEXT: v_readfirstlane_b32 s18, v25 -; SI-NEXT: v_readfirstlane_b32 s43, v15 -; SI-NEXT: v_readfirstlane_b32 s42, v24 -; SI-NEXT: v_readfirstlane_b32 s44, v23 -; SI-NEXT: v_readfirstlane_b32 s49, v12 -; SI-NEXT: v_readfirstlane_b32 s8, v11 -; SI-NEXT: v_readfirstlane_b32 s53, v20 -; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_writelane_b32 v43, s29, 0 +; SI-NEXT: v_writelane_b32 v43, s28, 1 +; SI-NEXT: v_writelane_b32 v43, s27, 2 +; SI-NEXT: v_writelane_b32 v43, s26, 3 +; SI-NEXT: v_writelane_b32 v43, s25, 4 +; SI-NEXT: v_writelane_b32 v43, s24, 5 +; SI-NEXT: v_writelane_b32 v43, s23, 6 +; SI-NEXT: v_writelane_b32 v43, s22, 7 +; SI-NEXT: v_writelane_b32 v43, s21, 8 +; SI-NEXT: v_writelane_b32 v43, s20, 9 +; SI-NEXT: v_writelane_b32 v43, s19, 10 +; SI-NEXT: v_writelane_b32 v43, s18, 11 +; SI-NEXT: v_writelane_b32 v43, s17, 12 +; SI-NEXT: v_writelane_b32 v43, s16, 13 +; SI-NEXT: v_writelane_b32 v41, s31, 1 +; SI-NEXT: v_writelane_b32 v41, s34, 2 +; SI-NEXT: v_writelane_b32 v41, s35, 3 +; SI-NEXT: v_writelane_b32 v41, s36, 4 +; SI-NEXT: v_writelane_b32 v41, s37, 5 +; SI-NEXT: v_writelane_b32 v41, s38, 6 +; SI-NEXT: v_writelane_b32 v41, s39, 7 +; SI-NEXT: v_writelane_b32 v41, s48, 8 +; SI-NEXT: v_writelane_b32 v41, s49, 9 +; SI-NEXT: v_writelane_b32 v41, s50, 10 +; SI-NEXT: v_writelane_b32 v41, s51, 11 +; SI-NEXT: v_writelane_b32 v41, s52, 12 +; SI-NEXT: v_writelane_b32 v41, s53, 13 +; SI-NEXT: v_writelane_b32 v41, s54, 14 +; SI-NEXT: v_writelane_b32 v41, s55, 15 +; SI-NEXT: v_writelane_b32 v41, s64, 16 +; SI-NEXT: v_writelane_b32 v41, s65, 17 +; SI-NEXT: v_writelane_b32 v41, s66, 18 +; SI-NEXT: v_writelane_b32 v41, s67, 19 +; SI-NEXT: v_writelane_b32 v41, s68, 20 +; SI-NEXT: v_writelane_b32 v41, s69, 21 +; SI-NEXT: v_writelane_b32 v41, s70, 22 +; SI-NEXT: v_writelane_b32 v41, s71, 23 +; SI-NEXT: v_writelane_b32 v41, s80, 24 +; SI-NEXT: v_writelane_b32 v41, s81, 25 +; SI-NEXT: v_writelane_b32 v41, s82, 26 +; SI-NEXT: v_writelane_b32 v41, s83, 27 +; SI-NEXT: v_writelane_b32 v41, s84, 28 +; SI-NEXT: v_writelane_b32 v41, s85, 29 +; SI-NEXT: v_writelane_b32 v41, s86, 30 +; SI-NEXT: v_writelane_b32 v41, s87, 31 +; SI-NEXT: v_writelane_b32 v41, s96, 32 +; SI-NEXT: v_writelane_b32 v41, s97, 33 +; SI-NEXT: v_writelane_b32 v41, s98, 34 +; SI-NEXT: v_readfirstlane_b32 s39, v26 +; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane +; SI-NEXT: v_readfirstlane_b32 s47, v12 +; SI-NEXT: v_writelane_b32 v42, s39, 0 +; SI-NEXT: v_readfirstlane_b32 s56, v11 +; SI-NEXT: v_writelane_b32 v42, s47, 1 +; SI-NEXT: v_readfirstlane_b32 s48, v24 +; SI-NEXT: v_writelane_b32 v42, s56, 2 +; SI-NEXT: v_readfirstlane_b32 s49, v23 +; SI-NEXT: v_writelane_b32 v42, s48, 3 +; SI-NEXT: v_readfirstlane_b32 s50, v21 +; SI-NEXT: v_writelane_b32 v42, s49, 4 +; SI-NEXT: v_readfirstlane_b32 s51, v22 +; SI-NEXT: v_writelane_b32 v42, s50, 5 +; SI-NEXT: v_writelane_b32 v42, s51, 6 +; SI-NEXT: v_readfirstlane_b32 s57, v20 +; SI-NEXT: v_readfirstlane_b32 s58, v19 +; SI-NEXT: v_readfirstlane_b32 s64, v29 +; SI-NEXT: v_readfirstlane_b32 s65, v30 +; SI-NEXT: v_readfirstlane_b32 s59, v28 +; SI-NEXT: v_readfirstlane_b32 s60, v27 +; SI-NEXT: v_readfirstlane_b32 s11, v1 +; SI-NEXT: v_readfirstlane_b32 s12, v2 +; SI-NEXT: v_readfirstlane_b32 s13, v9 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 14 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:300 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:296 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:292 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:288 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:284 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:280 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v32 +; SI-NEXT: v_writelane_b32 v43, s4, 15 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:276 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:272 +; SI-NEXT: v_writelane_b32 v43, s4, 16 +; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_readfirstlane_b32 s4, v34 -; SI-NEXT: v_writelane_b32 v62, s4, 8 -; SI-NEXT: v_readfirstlane_b32 s4, v38 -; SI-NEXT: v_writelane_b32 v62, s4, 9 +; SI-NEXT: v_writelane_b32 v43, s4, 17 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v35 +; SI-NEXT: v_writelane_b32 v43, s4, 18 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s44, v36 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s90, v37 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:268 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:264 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:260 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:256 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s6, v38 +; SI-NEXT: v_readfirstlane_b32 s14, v10 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s18, v7 +; SI-NEXT: v_readfirstlane_b32 s21, v5 +; SI-NEXT: v_readfirstlane_b32 s22, v6 +; SI-NEXT: v_readfirstlane_b32 s40, v17 +; SI-NEXT: v_readfirstlane_b32 s41, v18 +; SI-NEXT: v_readfirstlane_b32 s42, v4 +; SI-NEXT: v_readfirstlane_b32 s43, v3 +; SI-NEXT: v_readfirstlane_b32 s76, v16 +; SI-NEXT: v_readfirstlane_b32 s77, v15 +; SI-NEXT: v_readfirstlane_b32 s38, v25 +; SI-NEXT: v_writelane_b32 v41, s99, 35 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 19 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s4, v39 +; SI-NEXT: v_writelane_b32 v43, s4, 20 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s4, v48 +; SI-NEXT: v_writelane_b32 v43, s4, 21 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_readfirstlane_b32 s4, v49 -; SI-NEXT: v_writelane_b32 v62, s4, 10 +; SI-NEXT: v_writelane_b32 v43, s4, 22 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_readfirstlane_b32 s4, v50 -; SI-NEXT: v_writelane_b32 v62, s4, 11 -; SI-NEXT: v_readfirstlane_b32 s79, v52 -; SI-NEXT: v_readfirstlane_b32 s88, v54 -; SI-NEXT: v_readfirstlane_b32 s4, v55 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:192 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:176 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:168 -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:164 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:148 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:144 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:140 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:136 -; SI-NEXT: v_writelane_b32 v62, s4, 12 -; SI-NEXT: v_readfirstlane_b32 s77, v41 -; SI-NEXT: v_readfirstlane_b32 s4, v42 -; SI-NEXT: v_readfirstlane_b32 s94, v31 -; SI-NEXT: v_readfirstlane_b32 s70, v32 -; SI-NEXT: v_readfirstlane_b32 s51, v33 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s37, v45 -; SI-NEXT: v_readfirstlane_b32 s24, v56 -; SI-NEXT: v_readfirstlane_b32 s7, v57 -; SI-NEXT: v_readfirstlane_b32 s92, v58 -; SI-NEXT: v_readfirstlane_b32 s28, v59 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:80 -; SI-NEXT: v_readfirstlane_b32 s35, v43 -; SI-NEXT: v_readfirstlane_b32 s55, v46 -; SI-NEXT: v_readfirstlane_b32 s68, v35 -; SI-NEXT: v_readfirstlane_b32 s87, v37 -; SI-NEXT: v_readfirstlane_b32 s67, v39 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s74, v53 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:64 -; SI-NEXT: v_readfirstlane_b32 s85, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:40 -; SI-NEXT: v_writelane_b32 v62, s4, 13 -; SI-NEXT: v_readfirstlane_b32 s98, v40 -; SI-NEXT: v_readfirstlane_b32 s69, v51 -; SI-NEXT: v_readfirstlane_b32 s21, v36 -; SI-NEXT: v_readfirstlane_b32 s40, v19 -; SI-NEXT: v_readfirstlane_b32 s23, v28 -; SI-NEXT: v_readfirstlane_b32 s34, v27 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v21, v13 -; SI-NEXT: v_mov_b32_e32 v13, v5 -; SI-NEXT: v_readfirstlane_b32 s97, v29 -; SI-NEXT: v_readfirstlane_b32 s80, v18 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v22 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v30 -; SI-NEXT: v_readfirstlane_b32 s96, v17 -; SI-NEXT: v_readfirstlane_b32 s64, v9 -; SI-NEXT: v_readfirstlane_b32 s25, v8 -; SI-NEXT: v_readfirstlane_b32 s83, v7 -; SI-NEXT: v_readfirstlane_b32 s84, v4 -; SI-NEXT: v_readfirstlane_b32 s93, v3 -; SI-NEXT: v_readfirstlane_b32 s76, v1 -; SI-NEXT: v_readfirstlane_b32 s58, v38 -; SI-NEXT: v_readfirstlane_b32 s65, v49 -; SI-NEXT: v_readfirstlane_b32 s62, v54 -; SI-NEXT: v_readfirstlane_b32 s81, v44 -; SI-NEXT: v_readfirstlane_b32 s71, v47 -; SI-NEXT: v_readfirstlane_b32 s38, v60 -; SI-NEXT: v_readfirstlane_b32 s86, v61 -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:220 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s90, v50 -; SI-NEXT: v_readfirstlane_b32 s31, v52 -; SI-NEXT: v_readfirstlane_b32 s4, v55 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 -; SI-NEXT: v_readfirstlane_b32 s72, v31 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:72 -; SI-NEXT: v_readfirstlane_b32 s82, v56 -; SI-NEXT: v_readfirstlane_b32 s95, v57 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s39, v58 -; SI-NEXT: v_readfirstlane_b32 s56, v59 -; SI-NEXT: v_readfirstlane_b32 s57, v41 -; SI-NEXT: v_readfirstlane_b32 s36, v42 -; SI-NEXT: v_readfirstlane_b32 s73, v45 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:284 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:252 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:124 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 -; SI-NEXT: v_readfirstlane_b32 s16, v34 -; SI-NEXT: v_readfirstlane_b32 s48, v32 -; SI-NEXT: v_readfirstlane_b32 s52, v33 -; SI-NEXT: v_writelane_b32 v62, s4, 14 -; SI-NEXT: v_readfirstlane_b32 s47, v35 -; SI-NEXT: v_readfirstlane_b32 s60, v37 -; SI-NEXT: v_readfirstlane_b32 s61, v39 -; SI-NEXT: v_readfirstlane_b32 s89, v43 +; SI-NEXT: v_writelane_b32 v43, s4, 23 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s4, v51 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:252 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:248 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:240 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:228 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s91, v32 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s8, v33 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:224 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; SI-NEXT: v_writelane_b32 v43, s4, 24 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v34 +; SI-NEXT: v_writelane_b32 v43, s4, 25 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v35 +; SI-NEXT: v_writelane_b32 v43, s4, 26 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s4, v36 +; SI-NEXT: v_writelane_b32 v43, s4, 27 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s4, v37 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:216 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:212 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:208 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:204 +; SI-NEXT: v_writelane_b32 v43, s4, 28 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 29 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s89, v38 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s78, v39 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s7, v48 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s82, v49 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s4, v50 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s96, v51 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:200 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:196 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:188 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:184 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:176 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s70, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:168 +; SI-NEXT: v_writelane_b32 v43, s4, 30 +; SI-NEXT: v_readfirstlane_b32 s4, v32 +; SI-NEXT: v_writelane_b32 v43, s4, 31 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:156 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s99, v46 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:312 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:280 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:216 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:184 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:88 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24 -; SI-NEXT: v_readfirstlane_b32 s54, v48 -; SI-NEXT: v_readfirstlane_b32 s50, v53 -; SI-NEXT: v_readfirstlane_b32 s78, v49 -; SI-NEXT: v_readfirstlane_b32 s30, v51 -; SI-NEXT: v_readfirstlane_b32 s66, v54 -; SI-NEXT: v_readfirstlane_b32 s91, v40 +; SI-NEXT: v_readfirstlane_b32 s4, v34 +; SI-NEXT: v_writelane_b32 v43, s4, 32 +; SI-NEXT: v_readfirstlane_b32 s9, v35 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_readfirstlane_b32 s4, v37 +; SI-NEXT: v_writelane_b32 v43, s4, 33 +; SI-NEXT: v_readfirstlane_b32 s10, v36 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 34 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v38 +; SI-NEXT: v_writelane_b32 v43, s4, 35 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s4, v39 +; SI-NEXT: v_writelane_b32 v43, s4, 36 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s69, v48 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s30, v49 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s16, v50 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s36, v51 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:144 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:132 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:120 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:336 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104 +; SI-NEXT: v_writelane_b32 v43, s4, 37 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s6, v44 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v50 +; SI-NEXT: v_readfirstlane_b32 s4, v52 +; SI-NEXT: v_writelane_b32 v43, s4, 38 +; SI-NEXT: v_readfirstlane_b32 s4, v53 +; SI-NEXT: v_writelane_b32 v43, s4, 39 +; SI-NEXT: v_readfirstlane_b32 s4, v54 +; SI-NEXT: v_writelane_b32 v43, s4, 40 +; SI-NEXT: v_writelane_b32 v43, s44, 41 +; SI-NEXT: v_writelane_b32 v43, s6, 42 +; SI-NEXT: v_writelane_b32 v43, s7, 43 +; SI-NEXT: v_writelane_b32 v43, s8, 44 +; SI-NEXT: v_writelane_b32 v43, s9, 45 +; SI-NEXT: v_writelane_b32 v43, s10, 46 +; SI-NEXT: v_writelane_b32 v43, s11, 47 +; SI-NEXT: v_writelane_b32 v43, s12, 48 +; SI-NEXT: v_writelane_b32 v43, s13, 49 +; SI-NEXT: v_writelane_b32 v43, s14, 50 +; SI-NEXT: v_writelane_b32 v43, s15, 51 +; SI-NEXT: v_writelane_b32 v43, s18, 52 +; SI-NEXT: v_writelane_b32 v43, s21, 53 +; SI-NEXT: v_writelane_b32 v43, s22, 54 +; SI-NEXT: v_writelane_b32 v43, s40, 55 +; SI-NEXT: v_writelane_b32 v43, s41, 56 +; SI-NEXT: v_writelane_b32 v43, s42, 57 +; SI-NEXT: v_writelane_b32 v43, s43, 58 +; SI-NEXT: v_writelane_b32 v43, s76, 59 +; SI-NEXT: v_writelane_b32 v43, s77, 60 +; SI-NEXT: v_readfirstlane_b32 s93, v55 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_readfirstlane_b32 s95, v40 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s17, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s98, v34 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s23, v35 +; SI-NEXT: v_readfirstlane_b32 s25, v31 +; SI-NEXT: v_readfirstlane_b32 s28, v32 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s26, v36 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s88, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s79, v38 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s75, v39 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:88 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:80 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:68 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s24, v49 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s85, v50 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s66, v51 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:52 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s4, v10 -; SI-NEXT: v_writelane_b32 v62, s4, 15 -; SI-NEXT: v_readfirstlane_b32 s4, v2 -; SI-NEXT: v_writelane_b32 v62, s4, 16 -; SI-NEXT: v_writelane_b32 v62, s17, 17 -; SI-NEXT: v_writelane_b32 v62, s15, 18 -; SI-NEXT: v_writelane_b32 v62, s18, 19 -; SI-NEXT: v_writelane_b32 v62, s43, 20 -; SI-NEXT: v_writelane_b32 v62, s42, 21 -; SI-NEXT: v_writelane_b32 v62, s44, 22 -; SI-NEXT: v_writelane_b32 v62, s16, 23 -; SI-NEXT: v_writelane_b32 v62, s49, 24 -; SI-NEXT: v_writelane_b32 v62, s8, 25 -; SI-NEXT: v_writelane_b32 v62, s6, 26 -; SI-NEXT: v_readfirstlane_b32 s45, v52 -; SI-NEXT: v_writelane_b32 v62, s56, 27 -; SI-NEXT: v_writelane_b32 v62, s45, 28 -; SI-NEXT: v_writelane_b32 v62, s53, 29 -; SI-NEXT: v_writelane_b32 v62, s94, 30 -; SI-NEXT: v_writelane_b32 v62, s57, 31 -; SI-NEXT: v_writelane_b32 v62, s58, 32 -; SI-NEXT: v_writelane_b32 v62, s47, 33 -; SI-NEXT: v_readfirstlane_b32 s46, v55 -; SI-NEXT: v_writelane_b32 v62, s40, 34 -; SI-NEXT: v_readfirstlane_b32 s59, v47 -; SI-NEXT: v_writelane_b32 v62, s46, 35 -; SI-NEXT: v_writelane_b32 v62, s59, 36 -; SI-NEXT: v_writelane_b32 v62, s60, 37 -; SI-NEXT: v_writelane_b32 v62, s36, 38 -; SI-NEXT: v_writelane_b32 v62, s65, 39 -; SI-NEXT: v_writelane_b32 v62, s61, 40 -; SI-NEXT: v_writelane_b32 v62, s73, 41 -; SI-NEXT: v_writelane_b32 v62, s62, 42 -; SI-NEXT: v_writelane_b32 v62, s72, 43 -; SI-NEXT: v_writelane_b32 v62, s23, 44 -; SI-NEXT: v_writelane_b32 v62, s48, 45 -; SI-NEXT: v_writelane_b32 v62, s34, 46 -; SI-NEXT: v_writelane_b32 v62, s78, 47 -; SI-NEXT: v_writelane_b32 v62, s30, 48 -; SI-NEXT: v_writelane_b32 v62, s54, 49 -; SI-NEXT: v_writelane_b32 v62, s50, 50 -; SI-NEXT: v_writelane_b32 v62, s52, 51 -; SI-NEXT: v_writelane_b32 v62, s82, 52 -; SI-NEXT: v_writelane_b32 v62, s66, 53 -; SI-NEXT: v_readfirstlane_b32 s22, v36 +; SI-NEXT: v_readfirstlane_b32 vcc_lo, v13 +; SI-NEXT: v_readfirstlane_b32 vcc_hi, v14 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 61 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 62 +; SI-NEXT: v_writelane_b32 v43, s38, 63 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v57 +; SI-NEXT: v_readfirstlane_b32 s20, v31 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v58 +; SI-NEXT: v_readfirstlane_b32 s19, v32 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v59 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v56 -; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v60 -; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v45 -; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v61 -; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v42 -; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v41 -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v31 -; SI-NEXT: v_writelane_b32 v62, s91, 54 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s27, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s94, v34 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s72, v35 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s73, v36 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s67, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s71, v38 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s97, v39 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s35, v48 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s83, v49 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s87, v50 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s63, v51 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s74, v31 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s81, v32 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s80, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s86, v34 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s34, v35 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s84, v36 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s31, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s61, v38 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s62, v39 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_readfirstlane_b32 s53, v48 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s52, v49 +; SI-NEXT: v_writelane_b32 v42, s52, 7 +; SI-NEXT: v_writelane_b32 v42, s53, 8 +; SI-NEXT: v_writelane_b32 v42, s57, 9 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_readfirstlane_b32 s54, v50 +; SI-NEXT: v_writelane_b32 v42, s58, 10 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s55, v51 +; SI-NEXT: v_writelane_b32 v42, s54, 11 +; SI-NEXT: v_writelane_b32 v42, s55, 12 +; SI-NEXT: v_writelane_b32 v42, s64, 13 +; SI-NEXT: v_writelane_b32 v42, s65, 14 +; SI-NEXT: v_writelane_b32 v42, s67, 15 +; SI-NEXT: v_writelane_b32 v42, s71, 16 +; SI-NEXT: v_writelane_b32 v42, s80, 17 +; SI-NEXT: v_writelane_b32 v42, s81, 18 +; SI-NEXT: v_writelane_b32 v42, s59, 19 +; SI-NEXT: v_writelane_b32 v42, s60, 20 +; SI-NEXT: v_writelane_b32 v42, s86, 21 +; SI-NEXT: v_writelane_b32 v42, s97, 22 +; SI-NEXT: v_writelane_b32 v42, s34, 23 +; SI-NEXT: v_writelane_b32 v42, s66, 24 +; SI-NEXT: v_writelane_b32 v42, s85, 25 +; SI-NEXT: v_writelane_b32 v42, s31, 26 +; SI-NEXT: v_writelane_b32 v42, s84, 27 +; SI-NEXT: v_writelane_b32 v42, s35, 28 +; SI-NEXT: v_writelane_b32 v42, s98, 29 +; SI-NEXT: v_writelane_b32 v42, s17, 30 +; SI-NEXT: v_writelane_b32 v42, s20, 31 +; SI-NEXT: v_writelane_b32 v42, s61, 32 +; SI-NEXT: v_writelane_b32 v42, s19, 33 +; SI-NEXT: v_writelane_b32 v42, s62, 34 +; SI-NEXT: v_writelane_b32 v42, s23, 35 +; SI-NEXT: v_writelane_b32 v42, s83, 36 +; SI-NEXT: v_writelane_b32 v42, s87, 37 +; SI-NEXT: v_writelane_b32 v42, s26, 38 +; SI-NEXT: v_writelane_b32 v42, s94, 39 +; SI-NEXT: v_writelane_b32 v42, s27, 40 +; SI-NEXT: v_writelane_b32 v42, s63, 41 +; SI-NEXT: v_writelane_b32 v42, s79, 42 +; SI-NEXT: v_writelane_b32 v42, s88, 43 +; SI-NEXT: v_writelane_b32 v42, s72, 44 +; SI-NEXT: v_writelane_b32 v42, s73, 45 +; SI-NEXT: v_writelane_b32 v42, s74, 46 +; SI-NEXT: v_writelane_b32 v42, s75, 47 +; SI-NEXT: v_writelane_b32 v42, s24, 48 +; SI-NEXT: v_writelane_b32 v42, s25, 49 +; SI-NEXT: v_writelane_b32 v42, s28, 50 ; SI-NEXT: s_cbranch_scc0 .LBB97_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v5, v13 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s5, v62, 5 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_writelane_b32 v62, s4, 55 -; SI-NEXT: v_readlane_b32 s4, v62, 4 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: v_readlane_b32 s5, v62, 3 -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_or_b32 s63, s5, s4 -; SI-NEXT: v_readlane_b32 s4, v62, 6 -; SI-NEXT: s_and_b32 s5, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s19, 24 -; SI-NEXT: v_readlane_b32 s4, v62, 0 -; SI-NEXT: s_or_b32 s9, s9, s5 -; SI-NEXT: s_and_b32 s5, s4, 0xff -; SI-NEXT: s_lshl_b32 s10, s29, 8 -; SI-NEXT: s_or_b32 s4, s5, s10 -; SI-NEXT: v_writelane_b32 v62, s4, 56 -; SI-NEXT: s_and_b32 s5, s76, 0xff -; SI-NEXT: v_readlane_b32 s10, v62, 16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s10, 24 -; SI-NEXT: s_or_b32 s5, s11, s5 -; SI-NEXT: s_and_b32 s11, s26, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s12, s27, 24 -; SI-NEXT: s_or_b32 s14, s12, s11 -; SI-NEXT: s_and_b32 s11, s83, 0xff -; SI-NEXT: s_lshl_b32 s12, s25, 8 -; SI-NEXT: s_or_b32 s10, s11, s12 -; SI-NEXT: v_writelane_b32 v62, s10, 57 -; SI-NEXT: s_and_b32 s11, s64, 0xff -; SI-NEXT: v_readlane_b32 s10, v62, 15 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s13, s10, 24 -; SI-NEXT: s_or_b32 s41, s13, s11 -; SI-NEXT: s_and_b32 s11, s43, 0xff -; SI-NEXT: s_lshl_b32 s13, s15, 8 -; SI-NEXT: s_or_b32 s10, s11, s13 -; SI-NEXT: s_and_b32 s11, s96, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s80, 24 -; SI-NEXT: s_or_b32 s43, s15, s11 -; SI-NEXT: s_and_b32 s11, s44, 0xff -; SI-NEXT: s_lshl_b32 s15, s42, 8 -; SI-NEXT: s_or_b32 s13, s11, s15 -; SI-NEXT: s_and_b32 s11, s18, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s97, 24 -; SI-NEXT: s_or_b32 s44, s15, s11 -; SI-NEXT: s_and_b32 s11, s59, 0xff -; SI-NEXT: s_lshl_b32 s15, s46, 8 -; SI-NEXT: s_or_b32 s12, s11, s15 -; SI-NEXT: s_and_b32 s11, s45, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s6, 24 -; SI-NEXT: s_or_b32 s45, s15, s11 -; SI-NEXT: s_and_b32 s11, s30, 0xff -; SI-NEXT: s_lshl_b32 s15, s78, 8 -; SI-NEXT: v_writelane_b32 v62, s10, 58 -; SI-NEXT: s_or_b32 s10, s11, s15 -; SI-NEXT: s_and_b32 s11, s99, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s89, 24 -; SI-NEXT: s_or_b32 s46, s15, s11 -; SI-NEXT: s_and_b32 s11, s61, 0xff -; SI-NEXT: s_lshl_b32 s15, s60, 8 -; SI-NEXT: s_or_b32 s6, s11, s15 -; SI-NEXT: s_and_b32 s11, s22, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s47, 24 -; SI-NEXT: s_or_b32 s47, s15, s11 -; SI-NEXT: s_and_b32 s11, s57, 0xff -; SI-NEXT: s_lshl_b32 s15, s56, 8 -; SI-NEXT: v_writelane_b32 v62, s6, 59 -; SI-NEXT: s_or_b32 s6, s11, s15 -; SI-NEXT: s_and_b32 s11, s39, 0xff -; SI-NEXT: v_writelane_b32 v62, s6, 60 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s95, 24 -; SI-NEXT: s_or_b32 s56, s15, s11 -; SI-NEXT: s_and_b32 s11, s48, 0xff -; SI-NEXT: s_lshl_b32 s15, s72, 8 -; SI-NEXT: v_readlane_b32 s6, v62, 14 -; SI-NEXT: s_or_b32 s48, s11, s15 -; SI-NEXT: s_and_b32 s11, s6, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s31, 24 -; SI-NEXT: s_or_b32 vcc_lo, s15, s11 -; SI-NEXT: s_and_b32 s11, s86, 0xff -; SI-NEXT: s_lshl_b32 s15, s38, 8 -; SI-NEXT: s_or_b32 s72, s11, s15 -; SI-NEXT: s_and_b32 s11, s71, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s81, 24 -; SI-NEXT: s_or_b32 vcc_hi, s15, s11 -; SI-NEXT: s_and_b32 s11, s58, 0xff -; SI-NEXT: s_lshl_b32 s15, s85, 8 -; SI-NEXT: s_or_b32 s57, s11, s15 -; SI-NEXT: s_and_b32 s11, s69, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s74, 24 -; SI-NEXT: v_writelane_b32 v62, s74, 61 -; SI-NEXT: s_or_b32 s74, s15, s11 -; SI-NEXT: s_and_b32 s11, s87, 0xff -; SI-NEXT: s_lshl_b32 s15, s21, 8 -; SI-NEXT: s_or_b32 s58, s11, s15 -; SI-NEXT: s_and_b32 s11, s68, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s28, 24 -; SI-NEXT: s_or_b32 s75, s15, s11 -; SI-NEXT: s_and_b32 s11, s24, 0xff -; SI-NEXT: s_lshl_b32 s15, s55, 8 -; SI-NEXT: v_writelane_b32 v62, s25, 62 -; SI-NEXT: s_or_b32 s59, s11, s15 -; SI-NEXT: s_and_b32 s11, s37, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s51, 24 -; SI-NEXT: v_readlane_b32 s4, v62, 13 -; SI-NEXT: s_mov_b32 s18, s21 -; SI-NEXT: s_mov_b32 s21, s97 -; SI-NEXT: s_mov_b32 s97, s37 -; SI-NEXT: s_mov_b32 s37, s76 -; SI-NEXT: s_or_b32 s76, s15, s11 -; SI-NEXT: s_and_b32 s11, s35, 0xff -; SI-NEXT: s_lshl_b32 s15, s4, 8 -; SI-NEXT: s_or_b32 s60, s11, s15 -; SI-NEXT: s_and_b32 s11, s77, 0xff -; SI-NEXT: v_readlane_b32 s4, v62, 12 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s4, 24 -; SI-NEXT: v_readlane_b32 s4, v62, 11 -; SI-NEXT: s_mov_b32 s6, s95 -; SI-NEXT: s_mov_b32 s95, s39 -; SI-NEXT: s_mov_b32 s39, s89 -; SI-NEXT: s_mov_b32 s89, s99 -; SI-NEXT: s_mov_b32 s99, s83 -; SI-NEXT: s_mov_b32 s83, s55 -; SI-NEXT: s_mov_b32 s55, s64 -; SI-NEXT: s_mov_b32 s64, s35 -; SI-NEXT: s_mov_b32 s35, s77 -; SI-NEXT: s_or_b32 s77, s15, s11 -; SI-NEXT: s_and_b32 s11, s4, 0xff -; SI-NEXT: v_readlane_b32 s4, v62, 10 -; SI-NEXT: s_lshl_b32 s15, s4, 8 -; SI-NEXT: v_readlane_b32 s4, v62, 9 -; SI-NEXT: s_or_b32 s61, s11, s15 -; SI-NEXT: s_and_b32 s11, s4, 0xff -; SI-NEXT: v_readlane_b32 s4, v62, 8 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s4, 24 -; SI-NEXT: s_or_b32 s78, s15, s11 -; SI-NEXT: v_readlane_b32 s11, v62, 7 -; SI-NEXT: s_and_b32 s11, s11, 0xff -; SI-NEXT: s_lshl_b32 s15, s17, 8 -; SI-NEXT: s_or_b32 s11, s11, s15 -; SI-NEXT: s_and_b32 s11, s11, 0xffff -; SI-NEXT: v_mov_b32_e32 v51, s9 -; SI-NEXT: s_or_b32 s17, s11, s9 -; SI-NEXT: v_readlane_b32 s9, v62, 2 -; SI-NEXT: v_readlane_b32 s11, v62, 1 -; SI-NEXT: s_and_b32 s9, s9, 0xff -; SI-NEXT: s_lshl_b32 s15, s11, 8 -; SI-NEXT: s_or_b32 s9, s9, s15 -; SI-NEXT: s_and_b32 s9, s9, 0xffff -; SI-NEXT: s_mov_b32 s4, s96 -; SI-NEXT: s_mov_b32 s96, s24 -; SI-NEXT: v_mov_b32_e32 v52, s14 -; SI-NEXT: s_or_b32 s24, s9, s14 -; SI-NEXT: s_and_b32 s14, s93, 0xff -; SI-NEXT: s_lshl_b32 s15, s84, 8 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v53, v6, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v50, s14, v53 -; SI-NEXT: s_and_b32 s14, s8, 0xff -; SI-NEXT: s_lshl_b32 s15, s49, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v21 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v54, v14, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v17, s14, v54 -; SI-NEXT: s_and_b32 s14, s40, 0xff -; SI-NEXT: s_lshl_b32 s15, s53, 8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v55, v18, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v16, s14, v55 -; SI-NEXT: s_and_b32 s14, s34, 0xff -; SI-NEXT: s_lshl_b32 s15, s23, 8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v40, v19, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v15, s14, v40 -; SI-NEXT: s_and_b32 s14, s91, 0xff -; SI-NEXT: s_lshl_b32 s15, s66, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v43 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v41, v22, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v12, s14, v41 -; SI-NEXT: s_and_b32 s14, s50, 0xff -; SI-NEXT: s_lshl_b32 s15, s54, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v32 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v42, v23, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v11, s14, v42 -; SI-NEXT: s_and_b32 s14, s73, 0xff -; SI-NEXT: s_lshl_b32 s15, s36, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v28, v59, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v10, s14, v28 -; SI-NEXT: s_and_b32 s14, s82, 0xff -; SI-NEXT: s_lshl_b32 s15, s52, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v26 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v60, v24, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v9, s14, v60 -; SI-NEXT: s_and_b32 s14, s90, 0xff -; SI-NEXT: s_lshl_b32 s15, s16, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v31, v44, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v8, s14, v31 -; SI-NEXT: s_and_b32 s14, s62, 0xff -; SI-NEXT: s_lshl_b32 s15, s65, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v61, v45, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v7, s14, v61 -; SI-NEXT: s_and_b32 s14, s98, 0xff -; SI-NEXT: s_lshl_b32 s15, s67, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_or_b32_e32 v6, v47, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v4, s14, v6 -; SI-NEXT: s_and_b32 s14, s92, 0xff -; SI-NEXT: s_lshl_b32 s15, s7, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_readlane_b32 s8, v62, 55 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mov_b32_e32 v22, v14 -; SI-NEXT: v_or_b32_e32 v14, v56, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: v_or_b32_e32 v2, s14, v14 -; SI-NEXT: s_and_b32 s14, s70, 0xff -; SI-NEXT: s_lshl_b32 s15, s94, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 -; SI-NEXT: s_or_b32 s42, s8, s63 -; SI-NEXT: v_readlane_b32 s8, v62, 56 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mov_b32_e32 v32, v23 -; SI-NEXT: v_mov_b32_e32 v23, v18 -; SI-NEXT: v_or_b32_e32 v18, v57, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_or_b32 s40, s8, s5 -; SI-NEXT: v_readlane_b32 s8, v62, 57 -; SI-NEXT: v_or_b32_e32 v1, s14, v18 -; SI-NEXT: s_and_b32 s14, s88, 0xff -; SI-NEXT: s_lshl_b32 s15, s79, 8 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v34 -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: v_readlane_b32 s9, v62, 60 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_or_b32 s15, s8, s41 -; SI-NEXT: v_readlane_b32 s8, v62, 58 -; SI-NEXT: s_and_b32 s16, s9, 0xffff -; SI-NEXT: v_mov_b32_e32 v27, v26 -; SI-NEXT: v_mov_b32_e32 v26, v24 -; SI-NEXT: v_mov_b32_e32 v24, v19 -; SI-NEXT: v_or_b32_e32 v19, v58, v3 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: s_or_b32 s36, s16, s56 -; SI-NEXT: s_and_b32 s16, s48, 0xffff -; SI-NEXT: v_or_b32_e32 v3, s14, v19 -; SI-NEXT: s_or_b32 s14, s8, s43 -; SI-NEXT: s_and_b32 s8, s13, 0xffff -; SI-NEXT: s_or_b32 s53, s16, vcc_lo -; SI-NEXT: s_and_b32 s16, s72, 0xffff -; SI-NEXT: s_or_b32 s13, s8, s44 -; SI-NEXT: s_and_b32 s8, s12, 0xffff -; SI-NEXT: s_or_b32 s94, s16, vcc_hi -; SI-NEXT: s_and_b32 s16, s57, 0xffff -; SI-NEXT: s_or_b32 s12, s8, s45 -; SI-NEXT: s_and_b32 s8, s10, 0xffff -; SI-NEXT: s_or_b32 s49, s16, s74 -; SI-NEXT: s_and_b32 s16, s58, 0xffff -; SI-NEXT: s_or_b32 s10, s8, s46 -; SI-NEXT: v_readlane_b32 s8, v62, 59 -; SI-NEXT: s_or_b32 s48, s16, s75 -; SI-NEXT: s_and_b32 s16, s59, 0xffff -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: s_or_b32 s11, s16, s76 -; SI-NEXT: s_and_b32 s16, s60, 0xffff -; SI-NEXT: s_and_b32 s23, s61, 0xffff -; SI-NEXT: s_mov_b32 s30, s87 -; SI-NEXT: s_mov_b32 s87, s85 -; SI-NEXT: s_or_b32 s8, s8, s47 -; SI-NEXT: s_or_b32 s9, s16, s77 -; SI-NEXT: s_or_b32 s16, s23, s78 -; SI-NEXT: v_mov_b32_e32 v36, v35 -; SI-NEXT: v_mov_b32_e32 v30, v37 -; SI-NEXT: v_mov_b32_e32 v35, v45 -; SI-NEXT: v_mov_b32_e32 v20, v47 -; SI-NEXT: v_mov_b32_e32 v49, v56 -; SI-NEXT: v_mov_b32_e32 v48, v39 -; SI-NEXT: v_mov_b32_e32 v39, v57 -; SI-NEXT: v_mov_b32_e32 v25, v58 -; SI-NEXT: v_alignbit_b32 v57, s42, v51, 16 -; SI-NEXT: v_alignbit_b32 v58, s40, v52, 16 -; SI-NEXT: v_alignbit_b32 v56, s15, v53, 16 -; SI-NEXT: v_alignbit_b32 v47, s14, v54, 16 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v46, s13, v55, 16 -; SI-NEXT: v_alignbit_b32 v45, s12, v40, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v44, s10, v41, 16 -; SI-NEXT: v_alignbit_b32 v43, s8, v42, 16 -; SI-NEXT: v_alignbit_b32 v42, s36, v28, 16 -; SI-NEXT: v_alignbit_b32 v41, s53, v60, 16 -; SI-NEXT: v_alignbit_b32 v40, s94, v31, 16 -; SI-NEXT: v_alignbit_b32 v55, s49, v61, 16 -; SI-NEXT: v_alignbit_b32 v54, s48, v6, 16 -; SI-NEXT: v_alignbit_b32 v53, s11, v14, 16 -; SI-NEXT: v_mov_b32_e32 v14, v22 -; SI-NEXT: v_alignbit_b32 v52, s9, v18, 16 -; SI-NEXT: v_mov_b32_e32 v18, v23 -; SI-NEXT: v_alignbit_b32 v51, s16, v19, 16 -; SI-NEXT: v_mov_b32_e32 v19, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: s_lshr_b32 s73, s63, 16 -; SI-NEXT: s_lshr_b32 s72, s5, 16 -; SI-NEXT: s_lshr_b32 s63, s41, 16 -; SI-NEXT: s_lshr_b32 s62, s43, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s60, s45, 16 -; SI-NEXT: s_lshr_b32 s59, s46, 16 -; SI-NEXT: s_lshr_b32 s58, s47, 16 -; SI-NEXT: s_lshr_b32 s57, s56, 16 -; SI-NEXT: s_lshr_b32 s56, vcc_lo, 16 -; SI-NEXT: s_lshr_b32 s47, vcc_hi, 16 -; SI-NEXT: s_lshr_b32 s46, s74, 16 -; SI-NEXT: v_readlane_b32 s25, v62, 62 -; SI-NEXT: v_readlane_b32 s74, v62, 61 -; SI-NEXT: s_lshr_b32 s45, s75, 16 -; SI-NEXT: s_lshr_b32 s44, s76, 16 -; SI-NEXT: s_mov_b32 s76, s37 -; SI-NEXT: s_mov_b32 s37, s97 -; SI-NEXT: s_mov_b32 s97, s21 -; SI-NEXT: s_mov_b32 s21, s18 -; SI-NEXT: s_mov_b32 s18, s17 -; SI-NEXT: s_mov_b32 s85, s87 -; SI-NEXT: s_mov_b32 s87, s30 -; SI-NEXT: s_mov_b32 s17, s24 -; SI-NEXT: s_lshr_b32 s43, s77, 16 -; SI-NEXT: s_mov_b32 s77, s35 -; SI-NEXT: s_mov_b32 s35, s64 -; SI-NEXT: s_mov_b32 s64, s55 -; SI-NEXT: s_mov_b32 s55, s83 -; SI-NEXT: s_mov_b32 s83, s99 -; SI-NEXT: s_mov_b32 s99, s89 -; SI-NEXT: s_mov_b32 s89, s39 -; SI-NEXT: s_mov_b32 s39, s95 -; SI-NEXT: s_mov_b32 s95, s6 -; SI-NEXT: s_lshr_b32 s41, s78, 16 -; SI-NEXT: s_mov_b32 s24, s96 -; SI-NEXT: s_mov_b32 s96, s4 -; SI-NEXT: s_cbranch_execnz .LBB97_3 -; SI-NEXT: .LBB97_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v36 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v5, v27 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: s_add_i32 s4, s88, 3 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s79, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_readlane_b32 s4, v62, 11 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 10 -; SI-NEXT: v_readlane_b32 s6, v62, 9 +; SI-NEXT: v_readlane_b32 s4, v43, 13 +; SI-NEXT: v_readlane_b32 s5, v43, 12 ; SI-NEXT: s_and_b32 s4, s4, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s8, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 8 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_add_i32 s70, s70, 3 -; SI-NEXT: v_readlane_b32 s6, v62, 30 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s5, s70, 0xff -; SI-NEXT: s_lshl_b32 s8, s6, 8 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v48 -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_addk_i32 s5, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_and_b32 s5, s5, 0xffff -; SI-NEXT: v_or_b32_e32 v2, v39, v2 -; SI-NEXT: v_or_b32_e32 v2, s5, v2 -; SI-NEXT: s_add_i32 s5, s35, 3 -; SI-NEXT: v_readlane_b32 s6, v62, 13 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: s_lshl_b32 s8, s6, 8 -; SI-NEXT: s_add_i32 s9, s77, 3 -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_readlane_b32 s6, v62, 12 -; SI-NEXT: s_and_b32 s9, s9, 0xff -; SI-NEXT: s_lshl_b32 s8, s6, 24 -; SI-NEXT: s_lshl_b32 s9, s9, 16 -; SI-NEXT: s_addk_i32 s5, 0x300 -; SI-NEXT: s_or_b32 s8, s8, s9 -; SI-NEXT: s_and_b32 s5, s5, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: s_add_i32 s79, s92, 3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v1 -; SI-NEXT: s_add_i32 s16, s4, 0x3000000 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x3000000, v2 -; SI-NEXT: s_add_i32 s9, s5, 0x3000000 -; SI-NEXT: s_and_b32 s4, s79, 0xff -; SI-NEXT: s_lshl_b32 s5, s7, 8 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v2, v49, v2 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: s_add_i32 s4, s24, 3 +; SI-NEXT: s_or_b32 s29, s4, s5 +; SI-NEXT: v_readlane_b32 s4, v43, 5 +; SI-NEXT: v_readlane_b32 s5, v43, 4 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s55, 8 -; SI-NEXT: s_add_i32 s8, s37, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s51, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s52, s98, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s52, 0xff -; SI-NEXT: s_lshl_b32 s5, s67, 8 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v38 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v4, v20, v4 -; SI-NEXT: s_add_i32 s30, s87, 3 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s30, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s8, s68, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s28, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s48, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 42 -; SI-NEXT: v_mov_b32_e32 v22, v30 -; SI-NEXT: s_add_i32 s87, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 39 -; SI-NEXT: s_and_b32 s4, s87, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v22 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v7, v35, v7 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: v_readlane_b32 s4, v62, 32 -; SI-NEXT: s_add_i32 s67, s4, 3 -; SI-NEXT: s_and_b32 s4, s67, 0xff -; SI-NEXT: s_lshl_b32 s5, s85, 8 -; SI-NEXT: s_add_i32 s8, s69, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s74, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s50, s90, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 23 -; SI-NEXT: s_add_i32 s49, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s50, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_add_i32 s94, s86, 3 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s94, 0xff -; SI-NEXT: s_lshl_b32 s5, s38, 8 -; SI-NEXT: s_add_i32 s8, s71, 3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s81, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s94, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 52 -; SI-NEXT: s_add_i32 s18, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 51 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v9, v24, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: v_readlane_b32 s4, v62, 45 -; SI-NEXT: s_add_i32 s98, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 43 -; SI-NEXT: v_readlane_b32 s6, v62, 14 -; SI-NEXT: s_and_b32 s4, s98, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s8, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s31, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s53, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 41 -; SI-NEXT: s_add_i32 s86, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 38 -; SI-NEXT: s_and_b32 s4, s86, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v10, v59, v10 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: v_readlane_b32 s4, v62, 31 -; SI-NEXT: s_add_i32 s66, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 27 -; SI-NEXT: s_and_b32 s4, s66, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s37, s39, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s37, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s95, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s36, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 50 -; SI-NEXT: s_add_i32 s21, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 49 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_readlane_b32 s5, v62, 37 -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_add_i32 s71, s22, 3 -; SI-NEXT: s_and_b32 s8, s71, 0xff -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_add_i32 s35, s99, 3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v32, v11 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: v_readlane_b32 s4, v62, 40 -; SI-NEXT: s_add_i32 s85, s4, 3 -; SI-NEXT: s_and_b32 s4, s85, 0xff -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 33 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 54 -; SI-NEXT: s_add_i32 s17, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 53 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_readlane_b32 s5, v62, 47 -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_and_b32 s6, s35, 0xff -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_mov_b32_e32 v30, s16 -; SI-NEXT: v_mov_b32_e32 v39, s9 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x3000000, v2 -; SI-NEXT: v_mov_b32_e32 v28, s11 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v4 -; SI-NEXT: v_mov_b32_e32 v27, s48 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v7 -; SI-NEXT: v_mov_b32_e32 v26, s49 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v8 -; SI-NEXT: v_mov_b32_e32 v25, s94 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v9 -; SI-NEXT: v_mov_b32_e32 v24, s53 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v10 -; SI-NEXT: v_mov_b32_e32 v23, s36 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v11 -; SI-NEXT: v_mov_b32_e32 v22, s8 -; SI-NEXT: v_alignbit_b32 v43, v22, v11, 16 -; SI-NEXT: v_alignbit_b32 v42, v23, v10, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v9, 16 -; SI-NEXT: v_alignbit_b32 v40, v25, v8, 16 -; SI-NEXT: v_alignbit_b32 v55, v26, v7, 16 -; SI-NEXT: v_alignbit_b32 v54, v27, v4, 16 -; SI-NEXT: v_alignbit_b32 v53, v28, v2, 16 -; SI-NEXT: v_alignbit_b32 v52, v39, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, v30, v3, 16 -; SI-NEXT: s_lshr_b32 s58, s8, 16 -; SI-NEXT: s_lshr_b32 s57, s36, 16 -; SI-NEXT: s_lshr_b32 s56, s53, 16 -; SI-NEXT: s_lshr_b32 s47, s94, 16 -; SI-NEXT: s_lshr_b32 s46, s49, 16 -; SI-NEXT: s_lshr_b32 s45, s48, 16 -; SI-NEXT: s_lshr_b32 s44, s11, 16 -; SI-NEXT: s_lshr_b32 s43, s9, 16 -; SI-NEXT: s_lshr_b32 s41, s16, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v12, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s4, v62, 48 -; SI-NEXT: s_add_i32 s7, s4, 3 +; SI-NEXT: s_or_b32 s45, s4, s5 +; SI-NEXT: s_and_b32 s4, s43, 0xff +; SI-NEXT: s_lshl_b32 s5, s42, 8 +; SI-NEXT: s_or_b32 s46, s4, s5 +; SI-NEXT: s_and_b32 s4, s56, 0xff +; SI-NEXT: s_lshl_b32 s5, s47, 8 +; SI-NEXT: s_or_b32 s47, s4, s5 +; SI-NEXT: s_and_b32 s4, s58, 0xff +; SI-NEXT: s_lshl_b32 s5, s57, 8 +; SI-NEXT: s_or_b32 s56, s4, s5 +; SI-NEXT: s_and_b32 s4, s60, 0xff +; SI-NEXT: s_lshl_b32 s5, s59, 8 +; SI-NEXT: s_or_b32 s57, s4, s5 +; SI-NEXT: s_and_b32 s4, s62, 0xff +; SI-NEXT: s_lshl_b32 s5, s61, 8 +; SI-NEXT: s_or_b32 s58, s4, s5 +; SI-NEXT: s_and_b32 s4, s74, 0xff +; SI-NEXT: s_lshl_b32 s5, s63, 8 +; SI-NEXT: s_or_b32 s59, s4, s5 +; SI-NEXT: s_and_b32 s4, s73, 0xff +; SI-NEXT: s_lshl_b32 s5, s72, 8 +; SI-NEXT: s_or_b32 s60, s4, s5 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s75, 8 +; SI-NEXT: s_or_b32 s61, s4, s5 +; SI-NEXT: s_and_b32 s4, s28, 0xff +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s62, s4, s5 +; SI-NEXT: s_and_b32 s4, s36, 0xff +; SI-NEXT: s_lshl_b32 s5, s16, 8 +; SI-NEXT: s_or_b32 s63, s4, s5 +; SI-NEXT: s_and_b32 s4, s10, 0xff +; SI-NEXT: s_lshl_b32 s5, s9, 8 +; SI-NEXT: s_or_b32 s72, s4, s5 ; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s89, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s10, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 46 -; SI-NEXT: s_add_i32 s99, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 44 -; SI-NEXT: s_and_b32 s4, s99, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v19, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_readlane_b32 s4, v62, 36 -; SI-NEXT: s_add_i32 s81, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 35 -; SI-NEXT: v_readlane_b32 s6, v62, 28 -; SI-NEXT: s_and_b32 s4, s81, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s55, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 26 -; SI-NEXT: s_and_b32 s6, s55, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s12, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 34 -; SI-NEXT: s_add_i32 s69, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 29 -; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v5 -; SI-NEXT: s_and_b32 s4, s69, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v29 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v18, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_readlane_b32 s4, v62, 22 -; SI-NEXT: s_add_i32 s34, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 21 -; SI-NEXT: v_readlane_b32 s6, v62, 19 -; SI-NEXT: s_and_b32 s4, s34, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s92, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s92, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s97, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s13, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 25 -; SI-NEXT: s_add_i32 s51, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 24 -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v5 -; SI-NEXT: s_and_b32 s4, s51, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v21 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v14, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_readlane_b32 s4, v62, 20 -; SI-NEXT: s_add_i32 s95, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 18 -; SI-NEXT: s_and_b32 s4, s95, 0xff +; SI-NEXT: s_lshl_b32 s5, s78, 8 +; SI-NEXT: s_or_b32 s73, s4, s5 +; SI-NEXT: s_and_b32 s4, s8, 0xff +; SI-NEXT: s_lshl_b32 s5, s91, 8 +; SI-NEXT: s_or_b32 s74, s4, s5 +; SI-NEXT: s_and_b32 s4, s6, 0xff +; SI-NEXT: s_lshl_b32 s5, s90, 8 +; SI-NEXT: s_or_b32 s75, s4, s5 +; SI-NEXT: v_readlane_b32 s4, v43, 9 +; SI-NEXT: v_readlane_b32 s5, v43, 8 +; SI-NEXT: s_and_b32 s4, s4, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s6, s96, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s80, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s14, s4, 0x3000000 -; SI-NEXT: s_add_i32 s4, s93, 3 +; SI-NEXT: s_or_b32 s5, s4, s5 +; SI-NEXT: v_readlane_b32 s4, v43, 7 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s84, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v6, v6, v13 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_add_i32 s4, s83, 3 +; SI-NEXT: v_readlane_b32 s6, v43, 6 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_or_b32 s7, s6, s4 +; SI-NEXT: v_readlane_b32 s4, v43, 11 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s25, 8 -; SI-NEXT: s_add_i32 s6, s64, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 15 +; SI-NEXT: v_readlane_b32 s6, v43, 10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: v_writelane_b32 v42, s7, 51 +; SI-NEXT: s_or_b32 s4, s6, s4 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_readlane_b32 s6, v43, 1 +; SI-NEXT: v_readlane_b32 s7, v43, 0 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s7, s6, s7 +; SI-NEXT: s_and_b32 s6, s11, 0xff ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s15, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 2 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 1 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s6, s26, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_lshl_b32 s8, s12, 24 +; SI-NEXT: s_or_b32 s37, s8, s6 +; SI-NEXT: v_readlane_b32 s6, v43, 3 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s27, 24 +; SI-NEXT: v_readlane_b32 s8, v43, 2 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s17, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 0 +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_or_b32 s6, s8, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: s_lshl_b32 s9, s15, 8 +; SI-NEXT: s_or_b32 s9, s8, s9 +; SI-NEXT: s_and_b32 s8, s13, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s10, s14, 24 +; SI-NEXT: s_or_b32 s68, s10, s8 +; SI-NEXT: s_and_b32 s8, s21, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s10, s22, 24 +; SI-NEXT: s_or_b32 s8, s10, s8 +; SI-NEXT: s_and_b32 s10, s77, 0xff +; SI-NEXT: s_lshl_b32 s11, s76, 8 +; SI-NEXT: s_or_b32 s11, s10, s11 +; SI-NEXT: s_and_b32 s10, s40, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s12, s41, 24 +; SI-NEXT: s_or_b32 s99, s12, s10 +; SI-NEXT: s_and_b32 s10, vcc_lo, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s12, vcc_hi, 24 +; SI-NEXT: s_or_b32 s10, s12, s10 +; SI-NEXT: s_and_b32 s12, s49, 0xff +; SI-NEXT: s_lshl_b32 s13, s48, 8 +; SI-NEXT: s_or_b32 s13, s12, s13 +; SI-NEXT: s_and_b32 s12, s38, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s14, s39, 24 +; SI-NEXT: s_or_b32 s92, s14, s12 +; SI-NEXT: s_and_b32 s12, s50, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s14, s51, 24 +; SI-NEXT: s_or_b32 s12, s14, s12 +; SI-NEXT: s_and_b32 s14, s55, 0xff +; SI-NEXT: s_lshl_b32 s15, s54, 8 +; SI-NEXT: s_or_b32 s15, s14, s15 +; SI-NEXT: s_and_b32 s14, s52, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s24, s53, 24 +; SI-NEXT: s_mov_b32 s28, s90 +; SI-NEXT: s_or_b32 s90, s24, s14 +; SI-NEXT: s_and_b32 s14, s64, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s25, s65, 24 +; SI-NEXT: s_or_b32 s14, s25, s14 +; SI-NEXT: s_and_b32 s25, s34, 0xff +; SI-NEXT: s_lshl_b32 s40, s86, 8 +; SI-NEXT: s_or_b32 s41, s25, s40 +; SI-NEXT: s_and_b32 s25, s80, 0xff +; SI-NEXT: s_lshl_b32 s25, s25, 16 +; SI-NEXT: s_lshl_b32 s40, s81, 24 +; SI-NEXT: s_or_b32 s18, s40, s25 +; SI-NEXT: s_and_b32 s40, s31, 0xff +; SI-NEXT: s_lshl_b32 s40, s40, 16 +; SI-NEXT: s_lshl_b32 s42, s84, 24 +; SI-NEXT: s_or_b32 s40, s42, s40 +; SI-NEXT: s_and_b32 s42, s35, 0xff +; SI-NEXT: s_lshl_b32 s43, s97, 8 +; SI-NEXT: s_or_b32 s43, s42, s43 +; SI-NEXT: s_and_b32 s42, s71, 0xff +; SI-NEXT: s_lshl_b32 s42, s42, 16 +; SI-NEXT: s_lshl_b32 s76, s67, 24 +; SI-NEXT: s_or_b32 s35, s76, s42 +; SI-NEXT: s_and_b32 s42, s87, 0xff +; SI-NEXT: s_lshl_b32 s42, s42, 16 +; SI-NEXT: s_lshl_b32 s76, s83, 24 +; SI-NEXT: s_or_b32 s42, s76, s42 +; SI-NEXT: s_and_b32 s76, s19, 0xff +; SI-NEXT: s_lshl_b32 s77, s20, 8 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s66, 0xff +; SI-NEXT: v_writelane_b32 v42, s78, 52 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s85, 24 +; SI-NEXT: s_or_b32 s19, s78, s77 +; SI-NEXT: s_and_b32 s77, s94, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s27, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: s_or_b32 vcc_lo, s78, s77 +; SI-NEXT: s_or_b32 vcc_hi, s76, s19 +; SI-NEXT: s_and_b32 s76, s26, 0xff +; SI-NEXT: s_lshl_b32 s77, s23, 8 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s98, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: s_or_b32 s71, s78, s77 +; SI-NEXT: s_and_b32 s77, s79, 0xff +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 40 +; SI-NEXT: s_and_b32 s41, s41, 0xffff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s88, 24 +; SI-NEXT: s_or_b32 s39, s76, s71 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 39 +; SI-NEXT: s_or_b32 s41, s41, s18 +; SI-NEXT: s_mov_b32 s31, s18 +; SI-NEXT: s_or_b32 s38, s78, s77 +; SI-NEXT: s_lshl_b32 s77, s17, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 38 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 37 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: s_or_b32 s80, s78, s77 +; SI-NEXT: s_and_b32 s77, s95, 0xff +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 36 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s93, 24 +; SI-NEXT: s_or_b32 s49, s76, s80 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 35 +; SI-NEXT: s_or_b32 s48, s78, s77 +; SI-NEXT: s_lshl_b32 s77, s17, 8 +; SI-NEXT: v_readlane_b32 s17, v43, 34 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 33 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: s_or_b32 s81, s78, s77 +; SI-NEXT: s_and_b32 s77, s30, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s69, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 31 +; SI-NEXT: s_or_b32 s50, s78, s77 +; SI-NEXT: s_or_b32 s51, s76, s81 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s96, 8 +; SI-NEXT: v_readlane_b32 s17, v43, 30 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s82, 24 +; SI-NEXT: v_writelane_b32 v42, s96, 53 +; SI-NEXT: v_readlane_b32 s18, v43, 32 +; SI-NEXT: v_writelane_b32 v42, s82, 54 +; SI-NEXT: s_or_b32 s82, s78, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 28 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s70, 24 +; SI-NEXT: s_or_b32 s53, s76, s82 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 27 +; SI-NEXT: s_or_b32 s52, s78, s77 +; SI-NEXT: s_lshl_b32 s77, s17, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 26 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 25 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: v_writelane_b32 v42, s16, 55 +; SI-NEXT: s_or_b32 s16, s78, s77 +; SI-NEXT: s_and_b32 s77, s89, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 29 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 22 +; SI-NEXT: v_readlane_b32 s18, v43, 21 +; SI-NEXT: s_or_b32 s54, s78, s77 +; SI-NEXT: s_or_b32 s55, s76, s16 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s18, 8 +; SI-NEXT: v_readlane_b32 s17, v43, 20 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 19 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: v_readlane_b32 s17, v43, 24 +; SI-NEXT: s_or_b32 s83, s78, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 23 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 17 +; SI-NEXT: v_readlane_b32 s18, v43, 16 +; SI-NEXT: s_or_b32 s64, s78, s77 +; SI-NEXT: s_or_b32 s65, s76, s83 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 15 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 14 +; SI-NEXT: v_writelane_b32 v42, s89, 56 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: v_writelane_b32 v42, s70, 57 +; SI-NEXT: s_or_b32 s85, s78, s77 +; SI-NEXT: s_and_b32 s77, s44, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 18 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_and_b32 s9, s9, 0xffff +; SI-NEXT: s_and_b32 s11, s11, 0xffff +; SI-NEXT: s_and_b32 s13, s13, 0xffff +; SI-NEXT: s_and_b32 s15, s15, 0xffff +; SI-NEXT: s_and_b32 s43, s43, 0xffff +; SI-NEXT: v_writelane_b32 v42, s69, 58 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: s_and_b32 s44, s29, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s37 +; SI-NEXT: s_or_b32 s9, s9, s68 +; SI-NEXT: s_or_b32 s11, s11, s99 +; SI-NEXT: s_or_b32 s13, s13, s92 +; SI-NEXT: s_or_b32 s15, s15, s90 +; SI-NEXT: s_or_b32 s43, s43, s35 +; SI-NEXT: v_writelane_b32 v42, s30, 59 +; SI-NEXT: s_mov_b32 s23, s91 +; SI-NEXT: s_mov_b32 s91, s36 +; SI-NEXT: s_or_b32 s66, s78, s77 +; SI-NEXT: s_or_b32 s67, s76, s85 +; SI-NEXT: s_and_b32 s45, s45, 0xffff +; SI-NEXT: s_and_b32 s46, s46, 0xffff +; SI-NEXT: s_and_b32 s47, s47, 0xffff +; SI-NEXT: s_and_b32 s56, s56, 0xffff +; SI-NEXT: s_and_b32 s57, s57, 0xffff +; SI-NEXT: s_and_b32 s30, s58, 0xffff +; SI-NEXT: s_and_b32 s34, s59, 0xffff +; SI-NEXT: s_and_b32 s36, s60, 0xffff +; SI-NEXT: s_and_b32 s97, s61, 0xffff +; SI-NEXT: s_and_b32 s86, s62, 0xffff +; SI-NEXT: s_and_b32 s98, s63, 0xffff +; SI-NEXT: s_and_b32 s17, s72, 0xffff +; SI-NEXT: s_and_b32 s87, s73, 0xffff +; SI-NEXT: s_and_b32 s96, s74, 0xffff +; SI-NEXT: s_and_b32 s22, s75, 0xffff +; SI-NEXT: s_or_b32 s74, s44, s4 +; SI-NEXT: s_mov_b32 s75, s5 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 16 +; SI-NEXT: s_mov_b32 s70, s93 +; SI-NEXT: s_mov_b32 s69, s95 +; SI-NEXT: s_mov_b32 s93, s28 +; SI-NEXT: s_or_b32 s72, s45, s6 +; SI-NEXT: s_mov_b32 s73, s7 +; SI-NEXT: s_lshr_b64 s[26:27], s[6:7], 16 +; SI-NEXT: s_or_b32 s62, s46, s8 +; SI-NEXT: s_mov_b32 s63, s9 +; SI-NEXT: s_lshr_b64 s[28:29], s[8:9], 16 +; SI-NEXT: s_or_b32 s60, s47, s10 +; SI-NEXT: s_mov_b32 s61, s11 +; SI-NEXT: s_lshr_b64 s[88:89], s[10:11], 16 +; SI-NEXT: s_or_b32 s58, s56, s12 +; SI-NEXT: s_mov_b32 s59, s13 +; SI-NEXT: s_lshr_b64 s[20:21], s[12:13], 16 +; SI-NEXT: s_or_b32 s56, s57, s14 +; SI-NEXT: s_mov_b32 s57, s15 +; SI-NEXT: s_lshr_b64 s[24:25], s[14:15], 16 +; SI-NEXT: s_or_b32 s46, s30, s40 +; SI-NEXT: s_mov_b32 s47, s41 +; SI-NEXT: s_or_b32 s44, s34, s42 +; SI-NEXT: s_mov_b32 s34, s4 +; SI-NEXT: s_mov_b32 s45, s43 +; SI-NEXT: s_lshr_b64 s[94:95], s[42:43], 16 +; SI-NEXT: s_or_b32 s42, s36, vcc_lo +; SI-NEXT: s_mov_b32 s43, vcc_hi +; SI-NEXT: s_lshr_b64 vcc, vcc, 16 +; SI-NEXT: s_or_b32 s40, s97, s38 +; SI-NEXT: s_mov_b32 s41, s39 +; SI-NEXT: s_lshr_b64 s[38:39], s[38:39], 16 +; SI-NEXT: s_or_b32 s14, s86, s48 +; SI-NEXT: s_mov_b32 s15, s49 +; SI-NEXT: s_lshr_b64 s[48:49], s[48:49], 16 +; SI-NEXT: s_or_b32 s12, s98, s50 +; SI-NEXT: s_mov_b32 s13, s51 +; SI-NEXT: s_lshr_b64 s[50:51], s[50:51], 16 +; SI-NEXT: s_or_b32 s10, s17, s52 +; SI-NEXT: s_mov_b32 s11, s53 +; SI-NEXT: s_lshr_b64 s[52:53], s[52:53], 16 +; SI-NEXT: s_or_b32 s8, s87, s54 +; SI-NEXT: s_mov_b32 s9, s55 +; SI-NEXT: s_lshr_b64 s[54:55], s[54:55], 16 +; SI-NEXT: s_or_b32 s6, s96, s64 +; SI-NEXT: s_mov_b32 s7, s65 +; SI-NEXT: s_lshr_b64 s[64:65], s[64:65], 16 +; SI-NEXT: s_or_b32 s4, s22, s66 +; SI-NEXT: s_mov_b32 s5, s67 +; SI-NEXT: s_lshr_b64 s[66:67], s[66:67], 16 +; SI-NEXT: v_readlane_b32 s17, v42, 51 +; SI-NEXT: s_lshr_b32 s55, s17, 16 +; SI-NEXT: s_lshr_b32 s53, s37, 16 +; SI-NEXT: s_lshr_b32 s51, s68, 16 +; SI-NEXT: s_lshr_b32 s49, s99, 16 +; SI-NEXT: s_lshr_b32 s86, s92, 16 +; SI-NEXT: s_lshr_b32 s39, s90, 16 +; SI-NEXT: s_lshr_b32 s18, s31, 16 +; SI-NEXT: s_lshr_b32 s22, s35, 16 +; SI-NEXT: s_lshr_b32 s97, s19, 16 +; SI-NEXT: s_lshr_b32 s65, s71, 16 +; SI-NEXT: s_lshr_b32 s19, s80, 16 +; SI-NEXT: s_lshr_b32 s71, s81, 16 +; SI-NEXT: s_lshr_b32 s67, s82, 16 +; SI-NEXT: v_readlane_b32 s82, v42, 54 +; SI-NEXT: v_readlane_b32 s96, v42, 53 +; SI-NEXT: s_lshr_b32 s80, s16, 16 +; SI-NEXT: v_readlane_b32 s16, v42, 55 +; SI-NEXT: s_lshr_b32 s81, s83, 16 +; SI-NEXT: s_mov_b32 s90, s93 +; SI-NEXT: v_readlane_b32 s78, v42, 52 +; SI-NEXT: s_mov_b32 s95, s69 +; SI-NEXT: s_mov_b32 s93, s70 +; SI-NEXT: v_readlane_b32 s30, v42, 59 +; SI-NEXT: v_readlane_b32 s69, v42, 58 +; SI-NEXT: v_readlane_b32 s70, v42, 57 +; SI-NEXT: v_readlane_b32 s89, v42, 56 +; SI-NEXT: s_lshr_b32 s77, s85, 16 +; SI-NEXT: s_mov_b32 s84, vcc_lo +; SI-NEXT: s_mov_b32 s36, s91 +; SI-NEXT: s_mov_b32 s91, s23 +; SI-NEXT: s_cbranch_execnz .LBB97_3 +; SI-NEXT: .LBB97_2: ; %cmp.true +; SI-NEXT: v_readlane_b32 s4, v43, 42 ; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: v_readlane_b32 s6, v43, 41 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s76, 3 +; SI-NEXT: s_lshl_b32 s5, s90, 8 +; SI-NEXT: s_add_i32 s6, s6, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 16 +; SI-NEXT: v_readlane_b32 s5, v43, 18 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s5, 24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s40, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 7 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 17 -; SI-NEXT: v_readlane_b32 s6, v62, 6 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s18, s4, 0x3000000 -; SI-NEXT: s_add_i32 s4, s20, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 5 -; SI-NEXT: v_readlane_b32 s6, v62, 4 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 +; SI-NEXT: v_readlane_b32 s5, v43, 17 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: v_readlane_b32 s6, v43, 16 +; SI-NEXT: v_readlane_b32 s7, v43, 15 +; SI-NEXT: s_and_b32 s5, s5, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: s_add_i32 s7, s7, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readlane_b32 s6, v43, 14 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readlane_b32 s6, v43, 44 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 3 +; SI-NEXT: v_readlane_b32 s8, v43, 24 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s42, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v20, s10 -; SI-NEXT: v_mov_b32_e32 v19, s12 -; SI-NEXT: v_mov_b32_e32 v18, s13 -; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v5 -; SI-NEXT: v_mov_b32_e32 v5, s14 -; SI-NEXT: v_add_i32_e32 v50, vcc, 0x3000000, v6 -; SI-NEXT: v_mov_b32_e32 v6, s15 -; SI-NEXT: v_alignbit_b32 v57, s42, v13, 16 -; SI-NEXT: v_mov_b32_e32 v13, s17 -; SI-NEXT: v_alignbit_b32 v58, s40, v13, 16 -; SI-NEXT: v_alignbit_b32 v56, v6, v50, 16 -; SI-NEXT: v_alignbit_b32 v47, v5, v17, 16 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v46, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v45, v19, v15, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v44, v20, v12, 16 -; SI-NEXT: s_lshr_b32 s73, s42, 16 -; SI-NEXT: s_lshr_b32 s72, s40, 16 -; SI-NEXT: s_lshr_b32 s63, s15, 16 -; SI-NEXT: s_lshr_b32 s62, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s13, 16 -; SI-NEXT: s_lshr_b32 s60, s12, 16 -; SI-NEXT: s_lshr_b32 s59, s10, 16 +; SI-NEXT: s_lshl_b32 s7, s91, 8 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readlane_b32 s7, v43, 23 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readlane_b32 s7, v43, 22 +; SI-NEXT: s_add_i32 s7, s7, 3 +; SI-NEXT: v_readlane_b32 s8, v43, 21 +; SI-NEXT: v_readlane_b32 s9, v43, 20 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readlane_b32 s8, v43, 19 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readlane_b32 s8, v43, 43 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s9, s78, 8 +; SI-NEXT: s_add_i32 s10, s89, 3 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readlane_b32 s9, v43, 29 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 24 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_addk_i32 s8, 0x300 +; SI-NEXT: s_or_b32 s9, s9, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readlane_b32 s9, v43, 28 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: v_readlane_b32 s10, v43, 27 +; SI-NEXT: v_readlane_b32 s11, v43, 26 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_readlane_b32 s10, v43, 25 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 24 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_addk_i32 s9, 0x300 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s9, s9, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_readlane_b32 s10, v43, 46 +; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: v_readlane_b32 s11, v43, 45 +; SI-NEXT: v_readlane_b32 s12, v43, 32 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s11, s70, 24 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_addk_i32 s10, 0x300 +; SI-NEXT: s_or_b32 s11, s11, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readlane_b32 s11, v43, 31 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: v_readlane_b32 s13, v43, 30 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: s_lshl_b32 s12, s96, 8 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: s_lshl_b32 s12, s82, 24 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_addk_i32 s11, 0x300 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: s_and_b32 s11, s11, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_add_i32 s12, s36, 3 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s13, s16, 8 +; SI-NEXT: s_add_i32 s14, s30, 3 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: s_lshl_b32 s13, s69, 24 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_addk_i32 s12, 0x300 +; SI-NEXT: s_or_b32 s13, s13, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readlane_b32 s13, v43, 36 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: v_readlane_b32 s14, v43, 35 +; SI-NEXT: v_readlane_b32 s15, v43, 34 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readlane_b32 s14, v43, 33 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_addk_i32 s13, 0x300 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: s_and_b32 s13, s13, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readlane_b32 s14, v42, 50 +; SI-NEXT: s_add_i32 s17, s14, 3 +; SI-NEXT: v_readlane_b32 s15, v42, 49 +; SI-NEXT: s_and_b32 s14, s17, 0xff +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_add_i32 s16, s95, 3 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s15, s93, 24 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_addk_i32 s14, 0x300 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readlane_b32 s15, v43, 40 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: v_readlane_b32 s16, v43, 39 +; SI-NEXT: v_readlane_b32 s17, v43, 38 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readlane_b32 s16, v43, 37 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_addk_i32 s15, 0x300 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s15, s15, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readlane_b32 s16, v42, 48 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 47 +; SI-NEXT: v_readlane_b32 s18, v42, 42 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s99, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 43 +; SI-NEXT: s_and_b32 s18, s99, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 38 +; SI-NEXT: s_add_i32 s87, s17, 3 +; SI-NEXT: v_readlane_b32 s18, v42, 35 +; SI-NEXT: v_readlane_b32 s19, v42, 29 +; SI-NEXT: s_and_b32 s17, s87, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 8 +; SI-NEXT: s_add_i32 s23, s19, 3 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v42, 30 +; SI-NEXT: s_and_b32 s23, s23, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_lshl_b32 s23, s23, 16 +; SI-NEXT: s_addk_i32 s17, 0x300 +; SI-NEXT: s_or_b32 s18, s18, s23 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_add_i32 s40, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 45 +; SI-NEXT: s_add_i32 s41, s17, 0x3000000 +; SI-NEXT: s_add_i32 s68, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 44 +; SI-NEXT: v_readlane_b32 s18, v42, 39 +; SI-NEXT: s_and_b32 s16, s68, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s96, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 40 +; SI-NEXT: s_and_b32 s18, s96, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 33 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: v_readlane_b32 s18, v42, 31 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 8 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v42, 24 +; SI-NEXT: s_addk_i32 s17, 0x300 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s42, s16, 0x3000000 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: v_readlane_b32 s17, v42, 25 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s43, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 46 +; SI-NEXT: s_add_i32 s23, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 41 +; SI-NEXT: v_readlane_b32 s18, v42, 37 +; SI-NEXT: s_and_b32 s16, s23, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s86, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 36 +; SI-NEXT: s_and_b32 s18, s86, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s44, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 28 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 22 +; SI-NEXT: v_readlane_b32 s18, v42, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 15 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s45, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 34 +; SI-NEXT: s_add_i32 s83, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 32 +; SI-NEXT: v_readlane_b32 s18, v42, 26 +; SI-NEXT: s_and_b32 s16, s83, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 27 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s46, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 23 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 21 +; SI-NEXT: v_readlane_b32 s18, v42, 17 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 18 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s47, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 20 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 19 +; SI-NEXT: v_readlane_b32 s18, v42, 13 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 14 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s56, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 12 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 11 +; SI-NEXT: v_readlane_b32 s18, v42, 7 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 8 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s57, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 10 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 9 +; SI-NEXT: v_readlane_b32 s18, v42, 5 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 6 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s58, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 4 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 3 +; SI-NEXT: v_readlane_b32 s18, v43, 63 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 0 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s59, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 2 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 1 +; SI-NEXT: v_readlane_b32 s18, v43, 61 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 62 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s60, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 60 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 59 +; SI-NEXT: v_readlane_b32 s18, v43, 55 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 56 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s61, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 58 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 57 +; SI-NEXT: v_readlane_b32 s18, v43, 53 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 54 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s62, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 52 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 51 +; SI-NEXT: v_readlane_b32 s18, v43, 49 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 50 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s63, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 5 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 4 +; SI-NEXT: v_readlane_b32 s18, v43, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 2 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s72, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 1 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 0 +; SI-NEXT: v_readlane_b32 s18, v43, 47 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 48 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s73, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 13 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 12 +; SI-NEXT: v_readlane_b32 s18, v43, 11 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 10 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s74, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 9 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 7 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 6 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_add_i32 s8, s8, 0x3000000 +; SI-NEXT: s_add_i32 s9, s9, 0x3000000 +; SI-NEXT: s_add_i32 s10, s10, 0x3000000 +; SI-NEXT: s_add_i32 s11, s11, 0x3000000 +; SI-NEXT: s_add_i32 s12, s12, 0x3000000 +; SI-NEXT: s_add_i32 s13, s13, 0x3000000 +; SI-NEXT: s_add_i32 s14, s14, 0x3000000 +; SI-NEXT: s_add_i32 s15, s15, 0x3000000 +; SI-NEXT: s_add_i32 s75, s16, 0x3000000 +; SI-NEXT: s_lshr_b64 s[76:77], s[74:75], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[48:49], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[50:51], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[64:65], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[72:73], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[60:61], 16 +; SI-NEXT: s_lshr_b64 s[20:21], s[58:59], 16 +; SI-NEXT: s_lshr_b64 s[24:25], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[46:47], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[84:85], s[42:43], 16 +; SI-NEXT: s_lshr_b32 s55, s75, 16 +; SI-NEXT: s_lshr_b32 s53, s73, 16 +; SI-NEXT: s_lshr_b32 s51, s63, 16 +; SI-NEXT: s_lshr_b32 s49, s61, 16 +; SI-NEXT: s_lshr_b32 s86, s59, 16 +; SI-NEXT: s_lshr_b32 s39, s57, 16 +; SI-NEXT: s_lshr_b32 s18, s47, 16 +; SI-NEXT: s_lshr_b32 s22, s45, 16 +; SI-NEXT: s_lshr_b32 s97, s43, 16 +; SI-NEXT: s_lshr_b32 s65, s41, 16 +; SI-NEXT: s_lshr_b32 s19, s15, 16 +; SI-NEXT: s_lshr_b32 s71, s13, 16 +; SI-NEXT: s_lshr_b32 s67, s11, 16 +; SI-NEXT: s_lshr_b32 s80, s9, 16 +; SI-NEXT: s_lshr_b32 s81, s7, 16 +; SI-NEXT: s_lshr_b32 s77, s5, 16 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v57 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s16, s74, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s75, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s72, 0xffff +; SI-NEXT: s_lshl_b32 s17, s26, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v58 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s73, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v50 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s62, 0xffff +; SI-NEXT: s_lshl_b32 s17, s28, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v56 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s63, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s60, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v47 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s61, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v16 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s58, 0xffff +; SI-NEXT: s_lshl_b32 s17, s20, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v46 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s59, 0xffff +; SI-NEXT: s_lshl_b32 s17, s86, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v15 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s56, 0xffff +; SI-NEXT: s_lshl_b32 s17, s24, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v45 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s57, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v12 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s46, 0xffff +; SI-NEXT: s_lshl_b32 s17, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v44 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s47, 0xffff +; SI-NEXT: s_lshl_b32 s17, s18, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xffff +; SI-NEXT: s_lshl_b32 s17, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v43 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xffff +; SI-NEXT: s_lshl_b32 s17, s22, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s84, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v42 -; SI-NEXT: s_and_b32 s4, s36, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s97, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x44, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v41 -; SI-NEXT: s_and_b32 s4, s53, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s65, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v40 -; SI-NEXT: s_and_b32 s4, s94, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s19, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v7 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v55 -; SI-NEXT: s_and_b32 s4, s49, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s71, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v54 -; SI-NEXT: s_and_b32 s4, s48, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v53 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s67, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s54, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s80, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s64, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s81, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s66, 16 ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 -; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_or_b32 s4, s4, s6 ; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s99, v63, 35 -; SI-NEXT: v_readlane_b32 s98, v63, 34 -; SI-NEXT: v_readlane_b32 s97, v63, 33 -; SI-NEXT: v_readlane_b32 s96, v63, 32 -; SI-NEXT: v_readlane_b32 s87, v63, 31 -; SI-NEXT: v_readlane_b32 s86, v63, 30 -; SI-NEXT: v_readlane_b32 s85, v63, 29 -; SI-NEXT: v_readlane_b32 s84, v63, 28 -; SI-NEXT: v_readlane_b32 s83, v63, 27 -; SI-NEXT: v_readlane_b32 s82, v63, 26 -; SI-NEXT: v_readlane_b32 s81, v63, 25 -; SI-NEXT: v_readlane_b32 s80, v63, 24 -; SI-NEXT: v_readlane_b32 s71, v63, 23 -; SI-NEXT: v_readlane_b32 s70, v63, 22 -; SI-NEXT: v_readlane_b32 s69, v63, 21 -; SI-NEXT: v_readlane_b32 s68, v63, 20 -; SI-NEXT: v_readlane_b32 s67, v63, 19 -; SI-NEXT: v_readlane_b32 s66, v63, 18 -; SI-NEXT: v_readlane_b32 s65, v63, 17 -; SI-NEXT: v_readlane_b32 s64, v63, 16 -; SI-NEXT: v_readlane_b32 s55, v63, 15 -; SI-NEXT: v_readlane_b32 s54, v63, 14 -; SI-NEXT: v_readlane_b32 s53, v63, 13 -; SI-NEXT: v_readlane_b32 s52, v63, 12 -; SI-NEXT: v_readlane_b32 s51, v63, 11 -; SI-NEXT: v_readlane_b32 s50, v63, 10 -; SI-NEXT: v_readlane_b32 s49, v63, 9 -; SI-NEXT: v_readlane_b32 s48, v63, 8 -; SI-NEXT: v_readlane_b32 s39, v63, 7 -; SI-NEXT: v_readlane_b32 s38, v63, 6 -; SI-NEXT: v_readlane_b32 s37, v63, 5 -; SI-NEXT: v_readlane_b32 s36, v63, 4 -; SI-NEXT: v_readlane_b32 s35, v63, 3 -; SI-NEXT: v_readlane_b32 s34, v63, 2 -; SI-NEXT: v_readlane_b32 s31, v63, 1 -; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v41, 35 +; SI-NEXT: v_readlane_b32 s98, v41, 34 +; SI-NEXT: v_readlane_b32 s97, v41, 33 +; SI-NEXT: v_readlane_b32 s96, v41, 32 +; SI-NEXT: v_readlane_b32 s87, v41, 31 +; SI-NEXT: v_readlane_b32 s86, v41, 30 +; SI-NEXT: v_readlane_b32 s85, v41, 29 +; SI-NEXT: v_readlane_b32 s84, v41, 28 +; SI-NEXT: v_readlane_b32 s83, v41, 27 +; SI-NEXT: v_readlane_b32 s82, v41, 26 +; SI-NEXT: v_readlane_b32 s81, v41, 25 +; SI-NEXT: v_readlane_b32 s80, v41, 24 +; SI-NEXT: v_readlane_b32 s71, v41, 23 +; SI-NEXT: v_readlane_b32 s70, v41, 22 +; SI-NEXT: v_readlane_b32 s69, v41, 21 +; SI-NEXT: v_readlane_b32 s68, v41, 20 +; SI-NEXT: v_readlane_b32 s67, v41, 19 +; SI-NEXT: v_readlane_b32 s66, v41, 18 +; SI-NEXT: v_readlane_b32 s65, v41, 17 +; SI-NEXT: v_readlane_b32 s64, v41, 16 +; SI-NEXT: v_readlane_b32 s55, v41, 15 +; SI-NEXT: v_readlane_b32 s54, v41, 14 +; SI-NEXT: v_readlane_b32 s53, v41, 13 +; SI-NEXT: v_readlane_b32 s52, v41, 12 +; SI-NEXT: v_readlane_b32 s51, v41, 11 +; SI-NEXT: v_readlane_b32 s50, v41, 10 +; SI-NEXT: v_readlane_b32 s49, v41, 9 +; SI-NEXT: v_readlane_b32 s48, v41, 8 +; SI-NEXT: v_readlane_b32 s39, v41, 7 +; SI-NEXT: v_readlane_b32 s38, v41, 6 +; SI-NEXT: v_readlane_b32 s37, v41, 5 +; SI-NEXT: v_readlane_b32 s36, v41, 4 +; SI-NEXT: v_readlane_b32 s35, v41, 3 +; SI-NEXT: v_readlane_b32 s34, v41, 2 +; SI-NEXT: v_readlane_b32 s31, v41, 1 +; SI-NEXT: v_readlane_b32 s30, v41, 0 ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: v_mov_b32_e32 v5, v13 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v25, v58 -; SI-NEXT: v_mov_b32_e32 v48, v39 -; SI-NEXT: v_mov_b32_e32 v39, v57 -; SI-NEXT: v_mov_b32_e32 v49, v56 -; SI-NEXT: v_mov_b32_e32 v20, v47 -; SI-NEXT: v_mov_b32_e32 v30, v37 -; SI-NEXT: v_mov_b32_e32 v36, v35 -; SI-NEXT: v_mov_b32_e32 v35, v45 -; SI-NEXT: v_mov_b32_e32 v27, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mov_b32_e32 v32, v23 -; SI-NEXT: ; implicit-def: $sgpr18 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $sgpr17 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr55 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr86 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $sgpr94 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr18 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr16 -; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr97 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr19 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v128i8_to_v64i16_scalar: @@ -199508,22 +201428,22 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 ; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 @@ -199549,13 +201469,11 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 ; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v43, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v55, 8, v25 ; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 ; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 @@ -199564,49 +201482,46 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v28 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200 @@ -199615,34 +201530,35 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v22 ; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24 +; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v26 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec ; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 ; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16 ; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 ; VI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 +; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v0 +; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v0 ; VI-NEXT: s_waitcnt vmcnt(6) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v2 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v2 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264 @@ -199652,131 +201568,155 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296 ; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0 -; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) ; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v2 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312 ; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 ; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328 ; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 ; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:36 +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v4 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v5 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v6 -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5 -; VI-NEXT: s_waitcnt vmcnt(10) ; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v0 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:84 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:92 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:100 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108 -; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:116 -; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:124 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:132 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:140 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:148 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:156 -; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:164 -; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:172 -; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:180 -; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:188 -; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:196 -; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:204 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:212 -; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:220 -; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:228 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:236 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:244 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:252 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260 -; VI-NEXT: s_waitcnt vmcnt(14) -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0 +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:68 +; VI-NEXT: s_waitcnt vmcnt(10) +; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:268 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:76 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:284 -; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:292 -; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300 -; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:308 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:316 -; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:324 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; VI-NEXT: s_cbranch_scc0 .LBB97_4 +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:84 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:92 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:100 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:108 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:116 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:124 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:132 +; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:140 +; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:148 +; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:156 +; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:164 +; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:172 +; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:180 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:188 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:196 +; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:204 +; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:212 +; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220 +; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:228 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:236 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:244 +; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:252 +; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260 +; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:268 +; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:276 +; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:284 +; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:292 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:300 +; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:308 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:316 +; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:324 +; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; VI-NEXT: s_waitcnt vmcnt(14) +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill +; VI-NEXT: s_cbranch_scc0 .LBB97_2 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 @@ -199785,225 +201725,205 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v0, v0, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v1, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(6) +; VI-NEXT: v_or_b32_sdwa v2, v2, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v2, v8 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v3, v8 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v3, v10 ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v2, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v36, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v37, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v38, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v38, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v39, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v49, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v48, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v45, v62 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v2, v35, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_or_b32_sdwa v3, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v32, v1 -; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v54, v22 -; VI-NEXT: v_mov_b32_e32 v41, v24 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v34, v0 -; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v37, v1 -; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v55, v26 +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v0, v42, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v1, v41, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v0, v39, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v42, v43 +; VI-NEXT: v_mov_b32_e32 v43, v37 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v39, v0 -; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v49, v1 -; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v27 +; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v51, v0 -; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v47, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v47, v54 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v35, v1 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v53, v28 +; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v47, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v33, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v61, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v58, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v57, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v36, v0 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v1, v24, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v0, v34, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v56, v0 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v58, v1 -; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v61, v60 -; VI-NEXT: v_mov_b32_e32 v60, v59 +; VI-NEXT: v_or_b32_sdwa v1, v25, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v38, v0 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v48, v1 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v57, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v29, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v50, v0 -; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v28, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v0, v40, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v1, v62, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v51, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v40, v41 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v52, v0 -; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v46, v1 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v0, v29, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v0, v31, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v54, v33 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v46, v1 +; VI-NEXT: v_mov_b32_e32 v56, v1 ; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v63, v0 -; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_mov_b32_e32 v47, v1 -; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v63, v39 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_mov_b32_e32 v57, v1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v57, v0 +; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v58, v1 +; VI-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v52, v60 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v53, v35 +; VI-NEXT: s_waitcnt vmcnt(3) ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -200032,14 +201952,54 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_cbranch_execnz .LBB97_3 -; VI-NEXT: .LBB97_2: ; %cmp.true -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v59 -; VI-NEXT: v_or_b32_sdwa v29, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_branch .LBB97_3 +; VI-NEXT: .LBB97_2: +; VI-NEXT: v_mov_b32_e32 v47, v54 +; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; VI-NEXT: v_mov_b32_e32 v58, v7 +; VI-NEXT: v_mov_b32_e32 v57, v5 +; VI-NEXT: v_mov_b32_e32 v56, v3 +; VI-NEXT: s_mov_b64 s[4:5], -1 +; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 +; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; VI-NEXT: .LBB97_3: ; %Flow +; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; VI-NEXT: s_cbranch_vccnz .LBB97_5 +; VI-NEXT: ; %bb.4: ; %cmp.true +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(4) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 @@ -200058,351 +202018,356 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_lshl_b32 s9, s19, 8 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_lshl_b32 s10, s17, 8 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 +; VI-NEXT: v_or_b32_sdwa v29, v41, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v31, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33 +; VI-NEXT: v_or_b32_sdwa v30, v58, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 +; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v28, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v26, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v62 -; VI-NEXT: v_or_b32_sdwa v28, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44 -; VI-NEXT: v_or_b32_sdwa v53, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v45 -; VI-NEXT: v_or_b32_sdwa v27, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42 -; VI-NEXT: v_or_b32_sdwa v52, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40 -; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v59, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v61 -; VI-NEXT: v_or_b32_sdwa v24, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v44, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v48, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48 -; VI-NEXT: v_or_b32_sdwa v24, v24, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24 +; VI-NEXT: v_or_b32_sdwa v27, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v23, v41, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v40, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v38, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38 -; VI-NEXT: v_or_b32_sdwa v23, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23 +; VI-NEXT: v_or_b32_sdwa v26, v61, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v22, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v34, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34 +; VI-NEXT: v_or_b32_sdwa v26, v26, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v50, v33, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v50 -; VI-NEXT: v_or_b32_sdwa v22, v22, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22 +; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v21, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v21 +; VI-NEXT: v_or_b32_sdwa v25, v25, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v54, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v24, v36, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v20, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v32, v32, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v32 +; VI-NEXT: v_or_b32_sdwa v24, v24, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v49, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49 -; VI-NEXT: v_or_b32_sdwa v20, v20, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20 -; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_or_b32_sdwa v23, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v19, v37, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v61, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v61 +; VI-NEXT: v_or_b32_sdwa v23, v23, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v22, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60 +; VI-NEXT: v_or_b32_sdwa v36, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36 +; VI-NEXT: v_or_b32_sdwa v22, v22, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v37, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v63, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v31, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v19, v19, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19 -; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: v_or_b32_sdwa v18, v32, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_or_b32_sdwa v38, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38 +; VI-NEXT: v_or_b32_sdwa v21, v63, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v57, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v57 -; VI-NEXT: v_or_b32_sdwa v18, v18, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18 +; VI-NEXT: v_or_b32_sdwa v20, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v39, v45, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v39 +; VI-NEXT: v_or_b32_sdwa v20, v20, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v19, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v48, v42, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v18, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55 +; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48 +; VI-NEXT: v_or_b32_sdwa v19, v19, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v62, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v62 +; VI-NEXT: v_or_b32_sdwa v18, v18, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v53 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v51 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v50 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v56, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v49, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49 +; VI-NEXT: v_or_b32_sdwa v15, v15, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v34, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34 -; VI-NEXT: v_or_b32_sdwa v14, v14, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51 +; VI-NEXT: v_or_b32_sdwa v14, v14, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v2 +; VI-NEXT: v_or_b32_sdwa v29, v29, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14 +; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_or_b32_sdwa v13, v59, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36 -; VI-NEXT: v_or_b32_sdwa v13, v13, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v26 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v52 -; VI-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54 -; VI-NEXT: v_or_b32_sdwa v21, v21, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v52, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v52 +; VI-NEXT: v_or_b32_sdwa v13, v13, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v44 +; VI-NEXT: v_or_b32_sdwa v28, v28, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13 -; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26 +; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51 -; VI-NEXT: v_or_b32_sdwa v12, v12, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v59 -; VI-NEXT: v_or_b32_sdwa v25, v25, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v54, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v54, vcc, 0x300, v54 +; VI-NEXT: v_or_b32_sdwa v12, v12, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12 -; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v33, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v50, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v40, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 -; VI-NEXT: v_or_b32_sdwa v30, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2 -; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v2 +; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v53, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v9, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v41 -; VI-NEXT: v_or_b32_sdwa v9, v9, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v10 +; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42 +; VI-NEXT: v_or_b32_sdwa v9, v9, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v10 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x300, v55 -; VI-NEXT: v_or_b32_sdwa v10, v39, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v53 -; VI-NEXT: v_or_b32_sdwa v27, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v28, v29, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v29, v30, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v49, v16, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v10, v53, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v53, vcc, 0x300, v40 +; VI-NEXT: v_or_b32_sdwa v27, v27, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10 ; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27 -; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28 -; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v8, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42 -; VI-NEXT: v_or_b32_sdwa v8, v8, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v11 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v40 -; VI-NEXT: v_or_b32_sdwa v11, v33, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v30, v31, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v17, v17, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v43, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v43 +; VI-NEXT: v_or_b32_sdwa v8, v8, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v11 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v41 +; VI-NEXT: v_or_b32_sdwa v17, v17, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v11, v50, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17 +; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v49 +; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v0 +; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v30, v30, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v31, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8 ; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 ; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v30 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v7, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v44, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v44, vcc, 0x300, v44 -; VI-NEXT: v_or_b32_sdwa v7, v7, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45 +; VI-NEXT: v_or_b32_sdwa v7, v7, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v6, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45 -; VI-NEXT: v_or_b32_sdwa v6, v6, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46 +; VI-NEXT: v_or_b32_sdwa v6, v6, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v5, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46 -; VI-NEXT: v_or_b32_sdwa v5, v5, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v47, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v47, vcc, 0x300, v47 +; VI-NEXT: v_or_b32_sdwa v5, v5, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4 -; VI-NEXT: v_or_b32_sdwa v4, v47, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v32 -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v4, v56, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x300, v4 ; VI-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v4 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v47, v32, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_e32 v47, s4, v47 +; VI-NEXT: v_add_u32_e32 v56, vcc, 3, v56 +; VI-NEXT: v_or_b32_sdwa v56, v57, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_e32 v56, s4, v56 ; VI-NEXT: s_and_b32 s4, s26, 0xff ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_and_b32 s5, s24, 0xff @@ -200415,35 +202380,26 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: s_or_b32 s8, s9, s8 ; VI-NEXT: s_and_b32 s9, s16, 0xff ; VI-NEXT: s_or_b32 s9, s10, s9 -; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v56 ; VI-NEXT: s_addk_i32 s5, 0x300 ; VI-NEXT: s_addk_i32 s7, 0x300 ; VI-NEXT: s_addk_i32 s9, 0x300 -; VI-NEXT: v_or_b32_sdwa v15, v15, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v32, v16, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: s_lshl_b32 s4, s4, 16 ; VI-NEXT: s_lshl_b32 s6, s6, 16 ; VI-NEXT: s_lshl_b32 s8, s8, 16 ; VI-NEXT: s_and_b32 s9, s9, 0xffff ; VI-NEXT: s_and_b32 s7, s7, 0xffff ; VI-NEXT: s_and_b32 s5, s5, 0xffff -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17 -; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v32 -; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v0 ; VI-NEXT: s_or_b32 s8, s8, s9 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_or_b32 s4, s4, s5 ; VI-NEXT: s_add_i32 s8, s8, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 -; VI-NEXT: v_or_b32_sdwa v31, v31, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v47 -; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15 +; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v56 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s6 ; VI-NEXT: v_mov_b32_e32 v2, s4 -; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31 -; VI-NEXT: .LBB97_3: ; %end +; VI-NEXT: .LBB97_5: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload @@ -200462,39 +202418,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] -; VI-NEXT: .LBB97_4: -; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v61, v60 -; VI-NEXT: v_mov_b32_e32 v60, v59 -; VI-NEXT: v_mov_b32_e32 v45, v62 -; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v57, v5 -; VI-NEXT: v_mov_b32_e32 v47, v4 -; VI-NEXT: v_mov_b32_e32 v63, v3 -; VI-NEXT: v_mov_b32_e32 v53, v28 -; VI-NEXT: v_mov_b32_e32 v43, v27 -; VI-NEXT: v_mov_b32_e32 v55, v26 -; VI-NEXT: v_mov_b32_e32 v41, v24 -; VI-NEXT: v_mov_b32_e32 v54, v22 -; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 -; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; VI-NEXT: s_branch .LBB97_2 ; ; GFX9-LABEL: bitcast_v128i8_to_v64i16_scalar: ; GFX9: ; %bb.0: @@ -200515,31 +202438,36 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:56 ; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64 ; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:80 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:88 +; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:96 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:104 ; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112 ; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120 ; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128 @@ -200549,133 +202477,129 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160 ; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168 ; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176 -; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v17 -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; GFX9-NEXT: s_waitcnt vmcnt(24) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v44, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v43, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v29 ; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v47 +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v56 +; GFX9-NEXT: s_waitcnt vmcnt(21) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v45 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v44 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40 ; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v28 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v30 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(20) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:200 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208 -; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:216 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224 ; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240 +; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v46 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(7) ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:264 +; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264 ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272 -; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:280 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280 ; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288 ; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296 ; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304 @@ -200683,148 +202607,149 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(5) ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9 -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312 ; GFX9-NEXT: s_nop 0 ; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320 -; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:328 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:328 ; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:28 ; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36 -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5 ; GFX9-NEXT: s_waitcnt vmcnt(7) ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3 ; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v1 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:84 -; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:92 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:100 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108 -; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:116 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:132 -; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:140 -; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:148 -; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:156 -; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:164 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v1 +; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:68 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:84 +; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:92 +; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:100 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:108 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v4 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:172 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:180 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:188 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:196 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:204 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:212 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:220 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:228 -; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:244 +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:116 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:124 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:132 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:140 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:148 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:156 +; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:164 +; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:172 +; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:180 +; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:188 +; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:196 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:204 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:220 +; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:236 +; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244 ; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252 -; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:268 -; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:276 -; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:284 -; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:292 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:260 +; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:268 +; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:276 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284 +; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:292 +; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:300 ; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308 -; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:316 -; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:324 -; GFX9-NEXT: s_nop 0 -; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:316 +; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:324 +; GFX9-NEXT: s_nop 0 +; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(23) +; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(24) +; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill ; GFX9-NEXT: s_waitcnt vmcnt(28) -; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(30) -; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(33) -; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(36) -; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(39) -; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(41) -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(31) +; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(34) +; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(35) +; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(35) +; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB97_2 ; GFX9-NEXT: ; %bb.1: ; %cmp.false ; GFX9-NEXT: s_and_b32 s4, s28, 0xff @@ -200832,19 +202757,12 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff ; GFX9-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v2, v0, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; GFX9-NEXT: v_or_b32_sdwa v2, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v4, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v6, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v8, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 @@ -200866,272 +202784,291 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 ; GFX9-NEXT: s_or_b32 s7, s7, s8 ; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v4, v0, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v57, v5 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v2, v34, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v34, v35 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v39, v16 -; GFX9-NEXT: v_or_b32_sdwa v17, v34, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v36, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v46, v32 +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v42, v61 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v55, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v17, v45, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v45, v59 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v53, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v52, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v50, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v49, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v48, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0 ; GFX9-NEXT: v_lshl_or_b32 v17, v17, 16, v1 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v55, v22 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v33, v45 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v18, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v47, v32 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v19, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v20, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v21, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v51, v57 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v49, v39 +; GFX9-NEXT: v_mov_b32_e32 v59, v44 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v34, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v23, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_mov_b32_e32 v46, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v58, v50 +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v24, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v35, v45 -; GFX9-NEXT: v_mov_b32_e32 v45, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v42 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_mov_b32_e32 v38, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v54, v63 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v25, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v54, v2 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v29, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v29, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v27, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v36, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v29, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_mov_b32_e32 v57, v35 +; GFX9-NEXT: v_mov_b32_e32 v35, v38 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 -; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_branch .LBB97_3 ; GFX9-NEXT: .LBB97_2: -; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v33, v45 -; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v58, v50 +; GFX9-NEXT: v_mov_b32_e32 v45, v59 +; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v34, v35 +; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: v_mov_b32_e32 v49, v39 +; GFX9-NEXT: v_mov_b32_e32 v55, v22 +; GFX9-NEXT: v_mov_b32_e32 v51, v5 ; GFX9-NEXT: s_mov_b64 s[4:5], -1 +; GFX9-NEXT: v_mov_b32_e32 v46, v32 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 ; GFX9-NEXT: .LBB97_3: ; %Flow -; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload ; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; GFX9-NEXT: s_cbranch_vccnz .LBB97_5 ; GFX9-NEXT: ; %bb.4: ; %cmp.true -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_addk_i32 s4, 0x300 +; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s24, s24, 3 ; GFX9-NEXT: s_lshl_b32 s5, s25, 8 ; GFX9-NEXT: s_add_i32 s26, s26, 3 @@ -201144,61 +203081,55 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_lshl_b32 s9, s17, 8 ; GFX9-NEXT: s_add_i32 s18, s18, 3 ; GFX9-NEXT: s_lshl_b32 s10, s19, 8 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(15) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(14) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 +; GFX9-NEXT: s_waitcnt vmcnt(12) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(12) ; GFX9-NEXT: v_add_u32_e32 v25, 3, v25 -; GFX9-NEXT: s_waitcnt vmcnt(11) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v25, v37, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v37, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v25, v26, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v38, v38, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v16, 3, v16 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_add_u32_e32 v23, 3, v23 +; GFX9-NEXT: s_waitcnt vmcnt(8) +; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16 +; GFX9-NEXT: v_or_b32_sdwa v23, v50, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 ; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: v_and_b32_e32 v3, s4, v3 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload ; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_and_b32 s4, s24, 0xff ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_and_b32 s5, s26, 0xff @@ -201210,8 +203141,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: s_and_b32 s8, s16, 0xff ; GFX9-NEXT: s_or_b32 s8, s9, s8 ; GFX9-NEXT: s_and_b32 s9, s18, 0xff -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_or_b32 s9, s10, s9 ; GFX9-NEXT: s_addk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s5, 0x300 @@ -201228,14 +203157,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -201243,9 +203172,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) @@ -201255,254 +203184,277 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v1, 3, v42 +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v37, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37 +; GFX9-NEXT: s_waitcnt vmcnt(4) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 +; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v38, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_or_b32_sdwa v39, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) ; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v39, v36, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v48, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v18, 3, v18 +; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18 +; GFX9-NEXT: s_waitcnt vmcnt(4) ; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v48, v46, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v49, v35, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v16, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v2, v47, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v51, v34, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v16, 3, v16 -; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16 ; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_waitcnt vmcnt(2) +; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0 +; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31 +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 +; GFX9-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v49, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30 +; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v19, 3, v19 +; GFX9-NEXT: v_add_u32_e32 v26, 3, v58 +; GFX9-NEXT: v_or_b32_sdwa v19, v51, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19 +; GFX9-NEXT: v_and_b32_e32 v29, 0xffff, v29 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v51, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v45 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload ; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v24, 3, v24 -; GFX9-NEXT: v_add_u32_e32 v26, 3, v61 -; GFX9-NEXT: v_or_b32_sdwa v24, v54, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24 -; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48 -; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51 -; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v55 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v45 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v34 ; GFX9-NEXT: v_add_u32_e32 v20, 3, v20 -; GFX9-NEXT: v_or_b32_sdwa v20, v57, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v20, v35, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20 +; GFX9-NEXT: v_lshl_or_b32 v29, v34, 16, v29 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v56 +; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v21, 3, v21 -; GFX9-NEXT: v_or_b32_sdwa v21, v32, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v26, 3, v60 +; GFX9-NEXT: v_or_b32_sdwa v21, v22, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40 ; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21 ; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54 +; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20 ; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v23, 3, v23 -; GFX9-NEXT: v_add_u32_e32 v26, 3, v47 -; GFX9-NEXT: v_or_b32_sdwa v23, v41, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40 -; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v43 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v46 +; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v22, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v22, v44, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v22, v36, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v35, 0x300, v22 ; GFX9-NEXT: v_add_u32_e32 v22, 0x300, v52 -; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41 ; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20 ; GFX9-NEXT: v_lshl_or_b32 v28, v35, 16, v28 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v26, 3, v43 +; GFX9-NEXT: v_add_u32_e32 v24, 3, v24 +; GFX9-NEXT: v_or_b32_sdwa v24, v57, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42 +; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24 +; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48 +; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51 +; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41 +; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v43, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; GFX9-NEXT: v_add_u32_e32 v52, 0x300, v43 -; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; GFX9-NEXT: v_mov_b32_e32 v0, s8 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v19 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v44, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v26, 3, v33 -; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1 -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31 +; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44 +; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18 ; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_add_u32_e32 v26, 3, v26 +; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v45, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v27, 0x300, v23 ; GFX9-NEXT: v_add_u32_e32 v26, 0x300, v25 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v2, 3, v2 -; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2 ; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v38 ; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v50 ; GFX9-NEXT: v_add_u32_e32 v38, 0x300, v39 @@ -201514,33 +203466,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; GFX9-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; GFX9-NEXT: v_and_b32_e32 v26, 0xffff, v26 ; GFX9-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; GFX9-NEXT: v_mov_b32_e32 v2, s4 ; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v21 ; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v22 -; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23 ; GFX9-NEXT: v_lshl_or_b32 v24, v39, 16, v24 +; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v18 +; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23 ; GFX9-NEXT: v_lshl_or_b32 v25, v38, 16, v25 ; GFX9-NEXT: v_lshl_or_b32 v26, v37, 16, v26 ; GFX9-NEXT: v_lshl_or_b32 v27, v36, 16, v27 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v18, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18 -; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44 -; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v18 -; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v19, 3, v19 -; GFX9-NEXT: v_or_b32_sdwa v19, v60, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19 -; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42 -; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; GFX9-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v19 -; GFX9-NEXT: v_lshl_or_b32 v29, v34, 16, v29 ; GFX9-NEXT: .LBB97_5: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload @@ -205289,36 +207222,88 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:8 ; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:4 ; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v15 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v13 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v11 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v7 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v5 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v3 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v1 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v29 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v28 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v27 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v26 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr48 +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v25 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr48 +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v24 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v23 +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr48 +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 ; VI-NEXT: v_lshrrev_b32_e32 v60, 16, v16 +; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v15 ; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 ; VI-NEXT: v_lshrrev_b32_e32 v63, 16, v12 ; VI-NEXT: v_lshrrev_b32_e32 v59, 16, v10 @@ -205327,20 +207312,68 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_lshrrev_b32_e32 v56, 16, v4 ; VI-NEXT: v_lshrrev_b32_e32 v57, 16, v2 ; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v30 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v23 ; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v22 ; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v21 ; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 -; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v19 -; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v62, 16, v19 +; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v18 +; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v17 +; VI-NEXT: ; implicit-def: $vgpr42 +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr48 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr32 +; VI-NEXT: ; implicit-def: $vgpr61 +; VI-NEXT: ; implicit-def: $vgpr58 +; VI-NEXT: ; implicit-def: $vgpr39 +; VI-NEXT: ; kill: killed $vgpr35 +; VI-NEXT: ; implicit-def: $vgpr35 +; VI-NEXT: ; kill: killed $vgpr40 +; VI-NEXT: ; implicit-def: $vgpr43 +; VI-NEXT: ; implicit-def: $vgpr40 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr41 +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr41 +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr41 +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr41 +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr45 +; VI-NEXT: ; implicit-def: $vgpr44 +; VI-NEXT: ; implicit-def: $vgpr41 +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; VI-NEXT: ; implicit-def: $vgpr48 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v37 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v36 -; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; VI-NEXT: ; implicit-def: $vgpr31 ; VI-NEXT: ; kill: killed $vgpr31 ; VI-NEXT: ; implicit-def: $vgpr31 @@ -205411,7 +207444,6 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: ; kill: killed $vgpr31 ; VI-NEXT: ; implicit-def: $vgpr31 ; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr32 ; VI-NEXT: ; implicit-def: $vgpr31 ; VI-NEXT: ; kill: killed $vgpr31 ; VI-NEXT: ; implicit-def: $vgpr31 @@ -205422,151 +207454,53 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: ; kill: killed $vgpr31 ; VI-NEXT: ; implicit-def: $vgpr31 ; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr55 +; VI-NEXT: ; implicit-def: $vgpr31 +; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v37 +; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v36 +; VI-NEXT: ; kill: killed $vgpr31 ; VI-NEXT: ; implicit-def: $vgpr31 ; VI-NEXT: ; kill: killed $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr62 ; VI-NEXT: ; implicit-def: $vgpr31 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr38 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr42 -; VI-NEXT: ; implicit-def: $vgpr61 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr58 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr39 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; kill: killed $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr35 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: ; kill: killed $vgpr48 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: ; kill: killed $vgpr48 -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: ; kill: killed $vgpr48 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: ; kill: killed $vgpr48 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; kill: killed $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr43 -; VI-NEXT: ; implicit-def: $vgpr40 -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr45 -; VI-NEXT: ; implicit-def: $vgpr44 -; VI-NEXT: ; implicit-def: $vgpr41 -; VI-NEXT: ; implicit-def: $vgpr54 -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; VI-NEXT: ; implicit-def: $vgpr48 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; VI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; VI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; VI-NEXT: s_cbranch_execz .LBB98_2 ; VI-NEXT: ; %bb.1: ; %cmp.false ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v16 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v16 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v15 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v14 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v14 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v13 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v12 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v12 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v11 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v10 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v10 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v9 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v8 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v8 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v7 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v6 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v6 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v5 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[31:32], 24, v[15:16] ; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill @@ -205580,143 +207514,148 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v31, v7 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v10 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v11 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v12 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v13 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v14 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v9, v15 +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v16 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v9, v8 ; VI-NEXT: v_lshrrev_b64 v[7:8], 24, v[7:8] -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v7, v5 -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v7, v6 ; VI-NEXT: v_lshrrev_b64 v[5:6], 24, v[5:6] -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v5, 24, v4 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v3 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v5, v3 ; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4] ; VI-NEXT: v_lshrrev_b32_e32 v3, 24, v2 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, v1 ; VI-NEXT: v_lshrrev_b64 v[43:44], 24, v[1:2] ; VI-NEXT: v_lshrrev_b32_e32 v1, 24, v37 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v37 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v36 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v36 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v37 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v3, v2 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[36:37] -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v5, v4 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 24, v30 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v30 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v29 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v29 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v30 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[29:30] ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 24, v28 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v28 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v27 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v27 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v28 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[27:28] ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v26 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v25 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v25 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v26 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[25:26] ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v24 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v23 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v23 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v24 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[23:24] +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v22 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v21 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v21 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v22 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v20 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v19 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v19 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v20 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v17 ; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[19:20] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v18 -; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24] ; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[21:22] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; VI-NEXT: v_mov_b32_e32 v1, v46 ; VI-NEXT: v_lshrrev_b64 v[45:46], 24, v[17:18] -; VI-NEXT: v_mov_b32_e32 v32, v15 ; VI-NEXT: v_lshrrev_b32_e32 v35, 24, v26 ; VI-NEXT: v_lshrrev_b32_e32 v39, 24, v24 ; VI-NEXT: v_lshrrev_b32_e32 v58, 24, v22 ; VI-NEXT: v_lshrrev_b32_e32 v61, 24, v20 -; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v20 -; VI-NEXT: v_lshrrev_b32_e32 v38, 8, v19 ; VI-NEXT: v_lshrrev_b32_e32 v31, 24, v18 -; VI-NEXT: v_lshrrev_b32_e32 v62, 8, v18 -; VI-NEXT: v_lshrrev_b32_e32 v55, 8, v17 +; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v18 +; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v17 ; VI-NEXT: v_mov_b32_e32 v46, v1 ; VI-NEXT: ; implicit-def: $vgpr1 ; VI-NEXT: ; implicit-def: $vgpr3 @@ -205739,93 +207678,89 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: s_cbranch_execz .LBB98_4 ; VI-NEXT: ; %bb.3: ; %cmp.true ; VI-NEXT: v_mov_b32_e32 v31, 3 -; VI-NEXT: v_add_u16_sdwa v51, v18, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_add_u16_sdwa v55, v18, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_add_u16_e32 v32, 3, v18 -; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v51 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; VI-NEXT: v_add_u16_sdwa v54, v17, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v18, v32, v18 ; VI-NEXT: v_add_u16_e32 v32, 3, v17 -; VI-NEXT: v_add_u16_sdwa v17, v17, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v54 +; VI-NEXT: v_add_u16_sdwa v38, v20, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v17, v32, v17 ; VI-NEXT: v_add_u16_e32 v32, 3, v20 -; VI-NEXT: v_add_u16_sdwa v20, v20, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v38 +; VI-NEXT: v_add_u16_sdwa v62, v19, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v20, v32, v20 ; VI-NEXT: v_add_u16_e32 v32, 3, v19 -; VI-NEXT: v_add_u16_sdwa v19, v19, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v62 ; VI-NEXT: v_add_u16_sdwa v48, v22, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v19, v32, v19 ; VI-NEXT: v_add_u16_e32 v32, 3, v22 ; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v48 ; VI-NEXT: v_add_u16_sdwa v53, v21, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v22, v32, v22 ; VI-NEXT: v_add_u16_e32 v32, 3, v21 ; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v53 ; VI-NEXT: v_add_u16_sdwa v61, v24, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v21, v32, v21 ; VI-NEXT: v_add_u16_e32 v32, 3, v24 ; VI-NEXT: v_lshlrev_b32_e32 v24, 16, v61 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: v_add_u16_sdwa v49, v23, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v24, v32, v24 ; VI-NEXT: v_add_u16_e32 v32, 3, v23 -; VI-NEXT: v_add_u16_sdwa v23, v23, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; VI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; VI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 ; VI-NEXT: v_add_u16_sdwa v58, v26, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v23, v32, v23 ; VI-NEXT: v_add_u16_e32 v32, 3, v26 ; VI-NEXT: v_lshlrev_b32_e32 v26, 16, v58 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v26, v32, v26 ; VI-NEXT: v_add_u16_e32 v32, 3, v25 ; VI-NEXT: v_add_u16_sdwa v25, v25, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 ; VI-NEXT: v_add_u16_sdwa v39, v28, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v25, v32, v25 ; VI-NEXT: v_add_u16_e32 v32, 3, v28 ; VI-NEXT: v_lshlrev_b32_e32 v28, 16, v39 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v28, v32, v28 ; VI-NEXT: v_add_u16_e32 v32, 3, v27 ; VI-NEXT: v_add_u16_sdwa v27, v27, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 ; VI-NEXT: v_add_u16_sdwa v35, v30, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v27, v32, v27 ; VI-NEXT: v_add_u16_e32 v33, 3, v30 ; VI-NEXT: v_add_u16_e32 v34, 3, v29 ; VI-NEXT: v_add_u16_sdwa v32, v29, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v29, 16, v35 ; VI-NEXT: v_add_u16_sdwa v52, v37, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v30, v33, v29 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v29, 16, v32 ; VI-NEXT: v_add_u16_e32 v33, 3, v37 ; VI-NEXT: v_add_u16_sdwa v50, v36, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v32, 16, v52 -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v29, v34, v29 ; VI-NEXT: v_add_u16_e32 v34, 3, v36 ; VI-NEXT: v_or_b32_e32 v37, v33, v32 ; VI-NEXT: v_lshlrev_b32_e32 v32, 16, v50 ; VI-NEXT: v_add_u16_sdwa v57, v2, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v36, v34, v32 ; VI-NEXT: v_add_u16_e32 v33, 3, v2 ; VI-NEXT: v_add_u16_e32 v34, 3, v1 @@ -205834,9 +207769,9 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_e32 v2, v33, v1 ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v32 ; VI-NEXT: v_add_u16_sdwa v56, v4, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v1, v34, v1 ; VI-NEXT: v_add_u16_e32 v33, 3, v4 ; VI-NEXT: v_add_u16_e32 v34, 3, v3 @@ -205845,9 +207780,9 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_e32 v4, v33, v3 ; VI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 ; VI-NEXT: v_add_u16_sdwa v47, v6, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v3, v34, v3 ; VI-NEXT: v_add_u16_e32 v33, 3, v6 ; VI-NEXT: v_add_u16_e32 v34, 3, v5 @@ -205855,203 +207790,206 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_lshlrev_b32_e32 v5, 16, v47 ; VI-NEXT: v_or_b32_e32 v6, v33, v5 ; VI-NEXT: v_lshlrev_b32_e32 v5, 16, v32 -; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v5, v34, v5 ; VI-NEXT: v_add_u16_sdwa v34, v8, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; VI-NEXT: v_add_u16_e32 v38, 3, v8 +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; VI-NEXT: v_add_u16_e32 v40, 3, v8 ; VI-NEXT: v_add_u16_e32 v33, 3, v7 ; VI-NEXT: v_add_u16_sdwa v32, v7, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v7, 16, v34 -; VI-NEXT: v_or_b32_e32 v8, v38, v7 +; VI-NEXT: v_or_b32_e32 v8, v40, v7 ; VI-NEXT: v_lshlrev_b32_e32 v7, 16, v32 ; VI-NEXT: v_add_u16_sdwa v59, v10, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v7, v33, v7 ; VI-NEXT: v_add_u16_e32 v33, 3, v10 -; VI-NEXT: v_add_u16_e32 v38, 3, v9 +; VI-NEXT: v_add_u16_e32 v40, 3, v9 ; VI-NEXT: v_add_u16_sdwa v32, v9, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v9, 16, v59 ; VI-NEXT: v_or_b32_e32 v10, v33, v9 ; VI-NEXT: v_lshlrev_b32_e32 v9, 16, v32 ; VI-NEXT: v_add_u16_sdwa v63, v12, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v9, v38, v9 +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v9, v40, v9 ; VI-NEXT: v_add_u16_e32 v33, 3, v12 -; VI-NEXT: v_add_u16_e32 v38, 3, v11 +; VI-NEXT: v_add_u16_e32 v40, 3, v11 ; VI-NEXT: v_add_u16_sdwa v32, v11, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v63 -; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v12, v33, v11 ; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v32 ; VI-NEXT: v_add_u16_sdwa v33, v14, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v11, v38, v11 -; VI-NEXT: v_add_u16_e32 v38, 3, v14 -; VI-NEXT: v_add_u16_e32 v49, 3, v13 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v11, v40, v11 +; VI-NEXT: v_add_u16_e32 v40, 3, v14 +; VI-NEXT: v_add_u16_e32 v41, 3, v13 ; VI-NEXT: v_add_u16_sdwa v32, v13, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v33 ; VI-NEXT: v_add_u16_sdwa v60, v16, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_or_b32_e32 v14, v38, v13 -; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; VI-NEXT: v_or_b32_e32 v14, v40, v13 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 ; VI-NEXT: v_add_u16_sdwa v31, v15, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_add_u16_e32 v16, 3, v16 ; VI-NEXT: v_add_u16_e32 v32, 3, v15 ; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v60 -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v16, v16, v15 ; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v31 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; VI-NEXT: v_or_b32_e32 v15, v32, v15 -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v16 -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; VI-NEXT: v_lshrrev_b32_e32 v31, 8, v15 +; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v16 +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v15 ; VI-NEXT: v_lshrrev_b64 v[15:16], 24, v[15:16] -; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: v_or_b32_e32 v13, v49, v13 +; VI-NEXT: v_or_b32_e32 v13, v41, v13 ; VI-NEXT: v_lshrrev_b32_e32 v15, 8, v14 -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v15, 8, v13 ; VI-NEXT: v_lshrrev_b64 v[13:14], 24, v[13:14] -; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v13, 8, v11 ; VI-NEXT: v_lshrrev_b64 v[11:12], 24, v[11:12] -; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v10 -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v11, 8, v9 ; VI-NEXT: v_lshrrev_b64 v[9:10], 24, v[9:10] -; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v9, 8, v7 ; VI-NEXT: v_lshrrev_b64 v[7:8], 24, v[7:8] -; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v6 -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v7, 8, v5 ; VI-NEXT: v_lshrrev_b64 v[5:6], 24, v[5:6] -; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v5, 8, v3 ; VI-NEXT: v_lshrrev_b64 v[40:41], 24, v[3:4] ; VI-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v3, 8, v1 ; VI-NEXT: v_lshrrev_b64 v[43:44], 24, v[1:2] ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v37 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v36 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[36:37] -; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v30 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v29 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[29:30] ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v28 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v27 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[27:28] ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v26 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v25 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[25:26] ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v24 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v23 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[1:2], 24, v[23:24] +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v22 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill ; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v21 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v20 ; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v19 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v60, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v33, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v63, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v59, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v34, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v47, 8, 8 ; VI-NEXT: v_lshrrev_b64 v[44:45], 24, v[19:20] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v56, 8, 8 ; VI-NEXT: v_lshrrev_b64 v[45:46], 24, v[17:18] -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v57, 8, 8 ; VI-NEXT: v_mov_b32_e32 v46, v35 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v52, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v46, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[21:22] +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v1, v39, 8, 8 -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: v_mov_b32_e32 v51, v49 ; VI-NEXT: v_mov_b32_e32 v49, v53 -; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v52, v51 -; VI-NEXT: v_bfe_u32 v31, v51, 8, 8 -; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; VI-NEXT: v_lshrrev_b64 v[54:55], 24, v[23:24] -; VI-NEXT: v_lshrrev_b64 v[41:42], 24, v[21:22] -; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v20 -; VI-NEXT: v_lshrrev_b32_e32 v38, 8, v19 -; VI-NEXT: v_lshrrev_b32_e32 v62, 8, v18 -; VI-NEXT: v_lshrrev_b32_e32 v55, 8, v17 +; VI-NEXT: v_mov_b32_e32 v53, v38 +; VI-NEXT: v_mov_b32_e32 v38, v55 +; VI-NEXT: v_lshrrev_b32_e32 v32, 8, v18 +; VI-NEXT: v_lshrrev_b32_e32 v42, 8, v17 +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v35, v58, 8, 8 -; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; VI-NEXT: v_bfe_u32 v39, v61, 8, 8 ; VI-NEXT: v_bfe_u32 v58, v48, 8, 8 -; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_mov_b32_e32 v55, v31 ; VI-NEXT: v_bfe_u32 v61, v53, 8, 8 +; VI-NEXT: v_bfe_u32 v31, v38, 8, 8 +; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; VI-NEXT: .LBB98_4: ; %end ; VI-NEXT: s_or_b64 exec, exec, s[4:5] -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -206061,22 +207999,22 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v57, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 4, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -206087,28 +208025,28 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 8, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v56, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 12, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206116,28 +208054,28 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 16, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 20, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206145,28 +208083,28 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 24, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v34, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 28, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206174,28 +208112,28 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 32, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v59, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 36, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206203,28 +208141,28 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 40, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 44, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206232,76 +208170,79 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 48, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v33, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 52, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 -; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v55, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 56, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v60, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 60, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v55 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v42 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v45 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v54, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 64, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v62 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v32 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v31 -; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x44, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v38 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v44 -; VI-NEXT: v_or_b32_sdwa v2, v51, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v62, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x48, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v42 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v61 @@ -206309,8 +208250,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x4c, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206320,9 +208261,9 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x50, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -206333,22 +208274,23 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x54, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v54 +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 +; VI-NEXT: v_or_b32_sdwa v2, v51, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x58, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -206359,15 +208301,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x5c, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206375,9 +208317,9 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x60, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) @@ -206388,15 +208330,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x64, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206404,29 +208346,29 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x68, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x6c, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(2) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206434,21 +208376,21 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x70, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 ; VI-NEXT: v_or_b32_sdwa v2, v46, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x74, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(0) @@ -206461,18 +208403,17 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_add_u32_e32 v2, vcc, 0x78, v0 ; VI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x7c, v0 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 ; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v1 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v2 -; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload @@ -207482,55 +209423,55 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 @@ -207556,24 +209497,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -207585,24 +209526,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] @@ -207664,24 +209605,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18] -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3 @@ -207693,24 +209634,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17 ; GFX11-TRUE16-NEXT: .LBB98_4: ; %end ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0 @@ -207748,7 +209689,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l ; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h @@ -207756,15 +209697,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l ; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l ; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h @@ -207772,15 +209713,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l ; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l ; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h @@ -207788,15 +209729,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l ; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h @@ -207804,15 +209745,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l ; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l ; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h @@ -207820,15 +209761,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l ; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h @@ -207836,8 +209777,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l ; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15 @@ -207852,15 +209793,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l ; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l ; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h @@ -207868,15 +209809,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l ; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l ; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h @@ -207884,15 +209825,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l ; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l ; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h @@ -207900,15 +209841,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l ; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l ; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h @@ -207916,15 +209857,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l ; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l ; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h @@ -207932,15 +209873,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l ; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27 ; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h ; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h @@ -208639,1444 +210580,1726 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) { ; GFX11-FAKE16-NEXT: v_or_b32_e32 v20, v20, v21 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v21, v22, v23 ; GFX11-FAKE16-NEXT: v_or_b32_e32 v22, v24, v25 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, v26, v27 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, v28, v29 -; GFX11-FAKE16-NEXT: s_clause 0x5 -; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 -; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 -; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 -; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 -; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 -; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 -; GFX11-FAKE16-NEXT: s_clause 0x13 -; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12 -; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16 -; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20 -; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:24 -; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:28 -; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:32 -; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:36 -; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:40 -; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:44 -; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:48 -; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:52 -; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:56 -; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:60 -; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:64 -; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:68 -; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:72 -; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:76 -; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:80 -; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:84 -; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:88 -; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] - %cmp = icmp eq i32 %b, 0 - br i1 %cmp, label %cmp.true, label %cmp.false - -cmp.true: - %a1 = add <64 x i16> %a, splat (i16 3) - %a2 = bitcast <64 x i16> %a1 to <128 x i8> - br label %end - -cmp.false: - %a3 = bitcast <64 x i16> %a to <128 x i8> - br label %end - -end: - %phi = phi <128 x i8> [ %a2, %cmp.true ], [ %a3, %cmp.false ] - ret <128 x i8> %phi -} - -define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i32 inreg %b) { -; SI-LABEL: bitcast_v64i16_to_v128i8_scalar: -; SI: ; %bb.0: -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; SI-NEXT: v_writelane_b32 v63, s30, 0 -; SI-NEXT: v_writelane_b32 v63, s31, 1 -; SI-NEXT: v_writelane_b32 v63, s34, 2 -; SI-NEXT: v_writelane_b32 v63, s35, 3 -; SI-NEXT: v_writelane_b32 v63, s36, 4 -; SI-NEXT: v_writelane_b32 v63, s37, 5 -; SI-NEXT: v_writelane_b32 v63, s38, 6 -; SI-NEXT: v_writelane_b32 v63, s39, 7 -; SI-NEXT: v_writelane_b32 v63, s48, 8 -; SI-NEXT: v_writelane_b32 v63, s49, 9 -; SI-NEXT: v_writelane_b32 v63, s50, 10 -; SI-NEXT: v_writelane_b32 v63, s51, 11 -; SI-NEXT: v_writelane_b32 v63, s52, 12 -; SI-NEXT: v_writelane_b32 v63, s53, 13 -; SI-NEXT: v_writelane_b32 v63, s54, 14 -; SI-NEXT: v_writelane_b32 v63, s55, 15 -; SI-NEXT: v_writelane_b32 v63, s64, 16 -; SI-NEXT: v_writelane_b32 v63, s65, 17 -; SI-NEXT: v_writelane_b32 v63, s66, 18 -; SI-NEXT: v_writelane_b32 v63, s67, 19 -; SI-NEXT: v_writelane_b32 v63, s68, 20 -; SI-NEXT: v_writelane_b32 v63, s69, 21 -; SI-NEXT: v_writelane_b32 v63, s70, 22 -; SI-NEXT: v_writelane_b32 v63, s71, 23 -; SI-NEXT: v_writelane_b32 v63, s80, 24 -; SI-NEXT: v_writelane_b32 v63, s81, 25 -; SI-NEXT: v_writelane_b32 v63, s82, 26 -; SI-NEXT: v_writelane_b32 v63, s83, 27 -; SI-NEXT: v_writelane_b32 v63, s84, 28 -; SI-NEXT: v_writelane_b32 v63, s85, 29 -; SI-NEXT: v_writelane_b32 v63, s86, 30 -; SI-NEXT: v_writelane_b32 v63, s87, 31 -; SI-NEXT: v_writelane_b32 v63, s96, 32 -; SI-NEXT: v_writelane_b32 v63, s97, 33 -; SI-NEXT: v_writelane_b32 v63, s98, 34 -; SI-NEXT: s_mov_b32 s6, s18 -; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane -; SI-NEXT: v_writelane_b32 v63, s99, 35 -; SI-NEXT: v_readfirstlane_b32 s62, v30 -; SI-NEXT: v_readfirstlane_b32 s63, v29 -; SI-NEXT: v_readfirstlane_b32 s59, v26 -; SI-NEXT: v_readfirstlane_b32 s60, v25 -; SI-NEXT: v_readfirstlane_b32 s98, v22 -; SI-NEXT: v_readfirstlane_b32 s61, v21 -; SI-NEXT: v_readfirstlane_b32 s99, v18 -; SI-NEXT: v_readfirstlane_b32 s58, v17 -; SI-NEXT: v_readfirstlane_b32 s96, v14 -; SI-NEXT: v_readfirstlane_b32 s97, v13 -; SI-NEXT: v_readfirstlane_b32 s86, v10 -; SI-NEXT: v_readfirstlane_b32 s87, v9 -; SI-NEXT: v_readfirstlane_b32 s84, v6 -; SI-NEXT: v_readfirstlane_b32 s85, v5 -; SI-NEXT: v_readfirstlane_b32 s81, v2 -; SI-NEXT: v_readfirstlane_b32 s82, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v24 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s88, v36 -; SI-NEXT: v_readfirstlane_b32 s18, v37 -; SI-NEXT: v_readfirstlane_b32 s78, v38 -; SI-NEXT: v_readfirstlane_b32 s79, v39 -; SI-NEXT: v_readfirstlane_b32 s76, v48 -; SI-NEXT: v_readfirstlane_b32 s77, v49 -; SI-NEXT: v_readfirstlane_b32 s74, v50 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_readfirstlane_b32 s75, v51 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_readfirstlane_b32 s72, v52 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_readfirstlane_b32 s73, v53 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v54 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v28 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v40 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v55 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v41 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v43 -; SI-NEXT: v_writelane_b32 v62, s6, 0 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB99_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s56, s4, s5 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s57, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s56 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: v_alignbit_b32 v8, s57, v1, 24 -; SI-NEXT: v_alignbit_b32 v50, s57, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s57, v1, 8 -; SI-NEXT: s_or_b32 s46, s4, s5 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_or_b32 s47, s4, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, s46 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, s47, v1, 24 -; SI-NEXT: s_or_b32 s44, s4, s5 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, s47, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, s47, v1, 8 -; SI-NEXT: s_or_b32 s45, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s44 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, s45, v1, 24 -; SI-NEXT: s_or_b32 s42, s4, s5 -; SI-NEXT: s_and_b32 s4, s82, 0xffff -; SI-NEXT: s_lshl_b32 s5, s81, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, s45, v1, 16 -; SI-NEXT: v_alignbit_b32 v49, s45, v1, 8 -; SI-NEXT: s_or_b32 s43, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s42 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, s43, v1, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, s43, v1, 16 -; SI-NEXT: v_alignbit_b32 v48, s43, v1, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s85, 0xffff -; SI-NEXT: s_lshl_b32 s5, s84, 16 -; SI-NEXT: v_or_b32_e32 v16, v1, v2 -; SI-NEXT: s_or_b32 s41, s4, s5 -; SI-NEXT: v_alignbit_b32 v1, s41, v16, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s41, v16, 16 -; SI-NEXT: s_and_b32 s4, s87, 0xffff -; SI-NEXT: s_lshl_b32 s5, s86, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s41, v16, 8 -; SI-NEXT: s_or_b32 s40, s4, s5 -; SI-NEXT: s_and_b32 s4, s97, 0xffff -; SI-NEXT: s_lshl_b32 s5, s96, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: s_or_b32 s15, s4, s5 -; SI-NEXT: s_and_b32 s4, s58, 0xffff -; SI-NEXT: s_lshl_b32 s5, s99, 16 -; SI-NEXT: v_or_b32_e32 v14, v1, v4 -; SI-NEXT: s_or_b32 s14, s4, s5 -; SI-NEXT: s_and_b32 s4, s61, 0xffff -; SI-NEXT: s_lshl_b32 s5, s98, 16 -; SI-NEXT: v_alignbit_b32 v1, s40, v14, 24 -; SI-NEXT: s_or_b32 s13, s4, s5 -; SI-NEXT: s_and_b32 s4, s60, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s40, v14, 16 -; SI-NEXT: s_or_b32 s12, s4, s5 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s40, v14, 8 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: s_and_b32 s4, s75, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: v_or_b32_e32 v12, v1, v5 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: s_and_b32 s4, s77, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: v_alignbit_b32 v1, s15, v12, 24 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: s_and_b32 s4, s79, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s15, v12, 16 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s88, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s15, v12, 8 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_lshr_b32 s4, s11, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: v_writelane_b32 v62, s4, 1 -; SI-NEXT: s_lshr_b32 s4, s10, 8 -; SI-NEXT: v_or_b32_e32 v10, v1, v6 -; SI-NEXT: v_writelane_b32 v62, s4, 3 -; SI-NEXT: s_lshr_b32 s4, s9, 8 -; SI-NEXT: v_alignbit_b32 v1, s14, v10, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 6 -; SI-NEXT: s_lshr_b32 s4, s8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s14, v10, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 9 -; SI-NEXT: s_lshr_b32 s4, s7, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s14, v10, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 12 -; SI-NEXT: s_lshr_b32 s4, s6, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v19 -; SI-NEXT: v_writelane_b32 v62, s4, 15 -; SI-NEXT: s_and_b32 s4, s72, 0xffff -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v8, v1, v9 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v23 -; SI-NEXT: v_writelane_b32 v62, s4, 2 -; SI-NEXT: s_and_b32 s4, s74, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v1, v13 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27 -; SI-NEXT: v_writelane_b32 v62, s4, 5 -; SI-NEXT: s_and_b32 s4, s76, 0xffff -; SI-NEXT: v_mov_b32_e32 v28, v13 -; SI-NEXT: v_or_b32_e32 v13, v1, v17 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v31 -; SI-NEXT: v_writelane_b32 v62, s4, 8 -; SI-NEXT: s_and_b32 s4, s78, 0xffff -; SI-NEXT: v_mov_b32_e32 v26, v9 -; SI-NEXT: v_or_b32_e32 v9, v1, v18 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v32 -; SI-NEXT: v_writelane_b32 v62, s4, 11 -; SI-NEXT: s_and_b32 s4, s88, 0xffff -; SI-NEXT: v_mov_b32_e32 v25, v6 -; SI-NEXT: v_or_b32_e32 v6, v1, v20 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 -; SI-NEXT: v_writelane_b32 v62, s4, 14 -; SI-NEXT: s_bfe_u32 s4, s74, 0x80008 -; SI-NEXT: v_or_b32_e32 v4, v1, v21 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 -; SI-NEXT: v_writelane_b32 v62, s4, 4 -; SI-NEXT: s_bfe_u32 s4, s76, 0x80008 -; SI-NEXT: v_or_b32_e32 v2, v1, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 -; SI-NEXT: v_writelane_b32 v62, s4, 7 -; SI-NEXT: s_bfe_u32 s4, s78, 0x80008 -; SI-NEXT: v_or_b32_e32 v1, v1, v24 -; SI-NEXT: v_writelane_b32 v62, s4, 10 -; SI-NEXT: s_bfe_u32 s4, s88, 0x80008 -; SI-NEXT: v_mov_b32_e32 v29, v17 -; SI-NEXT: v_mov_b32_e32 v30, v18 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v21 -; SI-NEXT: v_mov_b32_e32 v38, v22 -; SI-NEXT: v_mov_b32_e32 v39, v24 -; SI-NEXT: s_lshr_b32 s68, s57, 8 -; SI-NEXT: s_lshr_b32 s65, s47, 8 -; SI-NEXT: s_lshr_b32 s54, s45, 8 -; SI-NEXT: s_lshr_b32 s51, s43, 8 -; SI-NEXT: s_lshr_b32 s48, s41, 8 -; SI-NEXT: s_lshr_b32 s37, s40, 8 -; SI-NEXT: s_lshr_b32 s34, s15, 8 -; SI-NEXT: s_lshr_b32 s95, s14, 8 -; SI-NEXT: s_lshr_b32 s92, s13, 8 -; SI-NEXT: s_lshr_b32 s89, s12, 8 -; SI-NEXT: s_and_b32 s71, s19, 0xffff -; SI-NEXT: s_and_b32 s69, s23, 0xffff -; SI-NEXT: s_and_b32 s66, s27, 0xffff -; SI-NEXT: s_and_b32 s55, s81, 0xffff -; SI-NEXT: s_and_b32 s52, s84, 0xffff -; SI-NEXT: s_and_b32 s49, s86, 0xffff -; SI-NEXT: s_and_b32 s38, s96, 0xffff -; SI-NEXT: s_and_b32 s35, s99, 0xffff -; SI-NEXT: s_and_b32 s30, s98, 0xffff -; SI-NEXT: s_and_b32 s93, s59, 0xffff -; SI-NEXT: s_and_b32 s90, s62, 0xffff -; SI-NEXT: s_bfe_u32 s83, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s80, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s70, s27, 0x80008 -; SI-NEXT: s_bfe_u32 s67, s81, 0x80008 -; SI-NEXT: s_bfe_u32 s64, s84, 0x80008 -; SI-NEXT: s_bfe_u32 s53, s86, 0x80008 -; SI-NEXT: s_bfe_u32 s50, s96, 0x80008 -; SI-NEXT: s_bfe_u32 s39, s99, 0x80008 -; SI-NEXT: s_bfe_u32 s36, s98, 0x80008 -; SI-NEXT: s_bfe_u32 s31, s59, 0x80008 -; SI-NEXT: s_bfe_u32 s94, s62, 0x80008 -; SI-NEXT: s_bfe_u32 s91, s72, 0x80008 -; SI-NEXT: v_writelane_b32 v62, s4, 13 -; SI-NEXT: v_alignbit_b32 v45, s13, v8, 24 -; SI-NEXT: v_alignbit_b32 v47, s13, v8, 16 -; SI-NEXT: v_alignbit_b32 v57, s13, v8, 8 -; SI-NEXT: v_alignbit_b32 v41, s12, v5, 24 -; SI-NEXT: v_alignbit_b32 v43, s12, v5, 16 -; SI-NEXT: v_alignbit_b32 v44, s12, v5, 8 -; SI-NEXT: v_alignbit_b32 v21, s11, v13, 24 -; SI-NEXT: v_alignbit_b32 v22, s11, v13, 16 -; SI-NEXT: v_alignbit_b32 v24, s11, v13, 8 -; SI-NEXT: v_alignbit_b32 v17, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v18, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v20, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v59, s9, v6, 24 -; SI-NEXT: v_alignbit_b32 v60, s9, v6, 16 -; SI-NEXT: v_alignbit_b32 v61, s9, v6, 8 -; SI-NEXT: v_alignbit_b32 v46, s8, v4, 24 -; SI-NEXT: v_alignbit_b32 v56, s8, v4, 16 -; SI-NEXT: v_alignbit_b32 v58, s8, v4, 8 -; SI-NEXT: v_alignbit_b32 v55, s7, v2, 24 -; SI-NEXT: v_alignbit_b32 v40, s7, v2, 16 -; SI-NEXT: v_alignbit_b32 v42, s7, v2, 8 -; SI-NEXT: v_alignbit_b32 v52, s6, v1, 24 -; SI-NEXT: v_alignbit_b32 v53, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, s6, v1, 8 -; SI-NEXT: s_cbranch_execnz .LBB99_3 -; SI-NEXT: .LBB99_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s88, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s79, s79, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s79, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s77, s77, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s77, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s75, s75, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s75, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s73, s73, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s63, s63, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s60, s60, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s60, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s61, s61, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s61, 0xffff -; SI-NEXT: s_lshl_b32 s5, s98, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s58, s58, 3 -; SI-NEXT: s_add_i32 s13, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s58, 0xffff -; SI-NEXT: s_lshl_b32 s5, s99, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s97, s97, 3 -; SI-NEXT: s_add_i32 s14, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s97, 0xffff -; SI-NEXT: s_lshl_b32 s5, s96, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s87, s87, 3 -; SI-NEXT: s_add_i32 s15, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s87, 0xffff -; SI-NEXT: s_lshl_b32 s5, s86, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s85, s85, 3 -; SI-NEXT: s_add_i32 s40, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s85, 0xffff -; SI-NEXT: s_lshl_b32 s5, s84, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s41, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s82, s82, 3 -; SI-NEXT: s_add_i32 s42, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s82, 0xffff -; SI-NEXT: s_lshl_b32 s5, s81, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s43, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s44, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s45, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s46, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s47, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s56, s4, 0x30000 -; SI-NEXT: v_readlane_b32 s4, v62, 0 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s57, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v21, s56 -; SI-NEXT: v_alignbit_b32 v22, s57, v21, 24 -; SI-NEXT: v_alignbit_b32 v50, s57, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s57, v21, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v21, s46 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s47, v21, 24 -; SI-NEXT: s_lshr_b32 s4, s11, 8 -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s47, v21, 16 -; SI-NEXT: v_alignbit_b32 v51, s47, v21, 8 -; SI-NEXT: v_mov_b32_e32 v21, s44 -; SI-NEXT: v_writelane_b32 v62, s4, 1 -; SI-NEXT: s_lshr_b32 s4, s10, 16 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v32 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s45, v21, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 2 -; SI-NEXT: s_lshr_b32 s4, s10, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_or_b32_e32 v3, v16, v3 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s45, v21, 16 -; SI-NEXT: v_alignbit_b32 v49, s45, v21, 8 -; SI-NEXT: v_mov_b32_e32 v21, s42 -; SI-NEXT: v_writelane_b32 v62, s4, 3 -; SI-NEXT: s_lshr_b32 s4, s9, 24 -; SI-NEXT: v_or_b32_e32 v5, v36, v5 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v3 -; SI-NEXT: v_mov_b32_e32 v3, s41 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s43, v21, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 4 -; SI-NEXT: s_lshr_b32 s4, s9, 16 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x30000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v31 -; SI-NEXT: v_or_b32_e32 v7, v14, v7 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s43, v21, 16 -; SI-NEXT: v_alignbit_b32 v48, s43, v21, 8 -; SI-NEXT: v_alignbit_b32 v21, v3, v16, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 5 -; SI-NEXT: s_lshr_b32 s4, s9, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v11 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v7 -; SI-NEXT: v_mov_b32_e32 v7, s40 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v3, v16, 16 -; SI-NEXT: v_alignbit_b32 v3, v3, v16, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 6 -; SI-NEXT: s_lshr_b32 s4, s8, 24 -; SI-NEXT: v_or_b32_e32 v5, v30, v5 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v7, v14, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 7 -; SI-NEXT: s_lshr_b32 s4, s8, 16 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v27 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v7, v14, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 8 -; SI-NEXT: s_lshr_b32 s4, s8, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v15 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v11 -; SI-NEXT: v_mov_b32_e32 v11, s15 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v7, v14, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 9 -; SI-NEXT: s_lshr_b32 s4, s7, 24 -; SI-NEXT: v_or_b32_e32 v5, v29, v5 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v11, v12, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 10 -; SI-NEXT: s_lshr_b32 s4, s7, 16 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v35 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v34 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v33 -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v23 -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v19 -; SI-NEXT: v_or_b32_e32 v10, v25, v10 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v11, v12, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 11 -; SI-NEXT: s_lshr_b32 s4, s7, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v10 -; SI-NEXT: v_mov_b32_e32 v15, s14 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v11, v12, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 12 -; SI-NEXT: s_lshr_b32 s4, s6, 24 -; SI-NEXT: v_or_b32_e32 v1, v39, v1 -; SI-NEXT: v_or_b32_e32 v2, v38, v2 -; SI-NEXT: v_or_b32_e32 v4, v37, v4 -; SI-NEXT: v_or_b32_e32 v5, v28, v5 -; SI-NEXT: v_or_b32_e32 v8, v26, v8 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v15, v10, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 13 -; SI-NEXT: s_lshr_b32 s4, s6, 16 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x30000, v1 -; SI-NEXT: v_mov_b32_e32 v35, s6 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x30000, v2 -; SI-NEXT: v_mov_b32_e32 v34, s7 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x30000, v4 -; SI-NEXT: v_mov_b32_e32 v33, s8 -; SI-NEXT: v_mov_b32_e32 v32, s9 -; SI-NEXT: v_mov_b32_e32 v20, s10 -; SI-NEXT: v_mov_b32_e32 v17, s11 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x30000, v5 -; SI-NEXT: v_mov_b32_e32 v18, s12 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v8 -; SI-NEXT: v_mov_b32_e32 v19, s13 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v15, v10, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 14 -; SI-NEXT: s_lshr_b32 s4, s6, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v15, v10, 8 -; SI-NEXT: v_alignbit_b32 v45, v19, v8, 24 -; SI-NEXT: v_alignbit_b32 v47, v19, v8, 16 -; SI-NEXT: v_alignbit_b32 v57, v19, v8, 8 -; SI-NEXT: v_alignbit_b32 v41, v18, v5, 24 -; SI-NEXT: v_alignbit_b32 v43, v18, v5, 16 -; SI-NEXT: v_alignbit_b32 v44, v18, v5, 8 -; SI-NEXT: v_alignbit_b32 v21, v17, v13, 24 -; SI-NEXT: v_alignbit_b32 v22, v17, v13, 16 -; SI-NEXT: v_alignbit_b32 v24, v17, v13, 8 -; SI-NEXT: v_alignbit_b32 v17, v20, v9, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v9, 16 -; SI-NEXT: v_alignbit_b32 v20, v20, v9, 8 -; SI-NEXT: v_alignbit_b32 v59, v32, v6, 24 -; SI-NEXT: v_alignbit_b32 v60, v32, v6, 16 -; SI-NEXT: v_alignbit_b32 v61, v32, v6, 8 -; SI-NEXT: v_alignbit_b32 v46, v33, v4, 24 -; SI-NEXT: v_alignbit_b32 v56, v33, v4, 16 -; SI-NEXT: v_alignbit_b32 v58, v33, v4, 8 -; SI-NEXT: v_alignbit_b32 v55, v34, v2, 24 -; SI-NEXT: v_alignbit_b32 v40, v34, v2, 16 -; SI-NEXT: v_alignbit_b32 v42, v34, v2, 8 -; SI-NEXT: v_alignbit_b32 v52, v35, v1, 24 -; SI-NEXT: v_alignbit_b32 v53, v35, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v35, v1, 8 -; SI-NEXT: s_lshr_b32 s83, s57, 24 -; SI-NEXT: s_lshr_b32 s71, s57, 16 -; SI-NEXT: s_lshr_b32 s68, s57, 8 -; SI-NEXT: s_lshr_b32 s80, s47, 24 -; SI-NEXT: s_lshr_b32 s69, s47, 16 -; SI-NEXT: s_lshr_b32 s65, s47, 8 -; SI-NEXT: s_lshr_b32 s70, s45, 24 -; SI-NEXT: s_lshr_b32 s66, s45, 16 -; SI-NEXT: s_lshr_b32 s54, s45, 8 -; SI-NEXT: s_lshr_b32 s67, s43, 24 -; SI-NEXT: s_lshr_b32 s55, s43, 16 -; SI-NEXT: s_lshr_b32 s51, s43, 8 -; SI-NEXT: s_lshr_b32 s64, s41, 24 -; SI-NEXT: s_lshr_b32 s52, s41, 16 -; SI-NEXT: s_lshr_b32 s48, s41, 8 -; SI-NEXT: s_lshr_b32 s53, s40, 24 -; SI-NEXT: s_lshr_b32 s49, s40, 16 -; SI-NEXT: s_lshr_b32 s37, s40, 8 -; SI-NEXT: s_lshr_b32 s50, s15, 24 -; SI-NEXT: s_lshr_b32 s38, s15, 16 -; SI-NEXT: s_lshr_b32 s34, s15, 8 -; SI-NEXT: s_lshr_b32 s39, s14, 24 -; SI-NEXT: s_lshr_b32 s35, s14, 16 -; SI-NEXT: s_lshr_b32 s95, s14, 8 -; SI-NEXT: s_lshr_b32 s36, s13, 24 -; SI-NEXT: s_lshr_b32 s30, s13, 16 -; SI-NEXT: s_lshr_b32 s92, s13, 8 -; SI-NEXT: s_lshr_b32 s31, s12, 24 -; SI-NEXT: s_lshr_b32 s93, s12, 16 -; SI-NEXT: s_lshr_b32 s89, s12, 8 -; SI-NEXT: s_lshr_b32 s94, s11, 24 -; SI-NEXT: s_lshr_b32 s90, s11, 16 -; SI-NEXT: s_lshr_b32 s91, s10, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 15 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_and_b32 s4, s56, 0xff -; SI-NEXT: s_lshl_b32 s5, s68, 8 -; SI-NEXT: s_lshl_b32 s16, s83, 24 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_readlane_b32 s99, v63, 35 -; SI-NEXT: v_readlane_b32 s98, v63, 34 -; SI-NEXT: v_readlane_b32 s97, v63, 33 -; SI-NEXT: v_readlane_b32 s96, v63, 32 -; SI-NEXT: v_readlane_b32 s87, v63, 31 -; SI-NEXT: v_readlane_b32 s86, v63, 30 -; SI-NEXT: v_readlane_b32 s85, v63, 29 -; SI-NEXT: v_readlane_b32 s84, v63, 28 -; SI-NEXT: v_readlane_b32 s83, v63, 27 -; SI-NEXT: v_readlane_b32 s82, v63, 26 -; SI-NEXT: v_readlane_b32 s81, v63, 25 -; SI-NEXT: v_readlane_b32 s68, v63, 20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_or_b32_e32 v3, v7, v3 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s57, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s71, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v3, v7, v3 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s46, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v51 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s47, 0xff -; SI-NEXT: s_lshl_b32 s5, s65, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s69, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s80, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s54, 8 -; SI-NEXT: s_lshl_b32 s16, s70, 24 -; SI-NEXT: v_readlane_b32 s80, v63, 24 -; SI-NEXT: v_readlane_b32 s71, v63, 23 -; SI-NEXT: v_readlane_b32 s70, v63, 22 -; SI-NEXT: v_readlane_b32 s69, v63, 21 -; SI-NEXT: v_readlane_b32 s65, v63, 17 -; SI-NEXT: v_readlane_b32 s54, v63, 14 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s44, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v49 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s66, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s51, 8 -; SI-NEXT: s_lshl_b32 s16, s67, 24 -; SI-NEXT: v_readlane_b32 s67, v63, 19 -; SI-NEXT: v_readlane_b32 s66, v63, 18 -; SI-NEXT: v_readlane_b32 s51, v63, 11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v48 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s55, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s48, 8 -; SI-NEXT: s_lshl_b32 s16, s64, 24 -; SI-NEXT: v_readlane_b32 s64, v63, 16 -; SI-NEXT: v_readlane_b32 s55, v63, 15 -; SI-NEXT: v_readlane_b32 s48, v63, 8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v16 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s52, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s37, 8 -; SI-NEXT: s_lshl_b32 s16, s53, 24 -; SI-NEXT: v_readlane_b32 s53, v63, 13 -; SI-NEXT: v_readlane_b32 s52, v63, 12 -; SI-NEXT: v_readlane_b32 s37, v63, 5 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v14 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s49, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s34, 8 -; SI-NEXT: v_readlane_b32 s49, v63, 9 -; SI-NEXT: v_readlane_b32 s34, v63, 2 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v23, v26, v27 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v24, v28, v29 +; GFX11-FAKE16-NEXT: s_clause 0x5 +; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[1:4], off offset:32 +; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[5:8], off offset:48 +; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[9:12], off offset:64 +; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[13:16], off offset:80 +; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[17:20], off offset:96 +; GFX11-FAKE16-NEXT: scratch_store_b128 v0, v[21:24], off offset:112 +; GFX11-FAKE16-NEXT: s_clause 0x13 +; GFX11-FAKE16-NEXT: scratch_load_b32 v75, off, s32 offset:12 +; GFX11-FAKE16-NEXT: scratch_load_b32 v74, off, s32 offset:16 +; GFX11-FAKE16-NEXT: scratch_load_b32 v73, off, s32 offset:20 +; GFX11-FAKE16-NEXT: scratch_load_b32 v72, off, s32 offset:24 +; GFX11-FAKE16-NEXT: scratch_load_b32 v63, off, s32 offset:28 +; GFX11-FAKE16-NEXT: scratch_load_b32 v62, off, s32 offset:32 +; GFX11-FAKE16-NEXT: scratch_load_b32 v61, off, s32 offset:36 +; GFX11-FAKE16-NEXT: scratch_load_b32 v60, off, s32 offset:40 +; GFX11-FAKE16-NEXT: scratch_load_b32 v59, off, s32 offset:44 +; GFX11-FAKE16-NEXT: scratch_load_b32 v58, off, s32 offset:48 +; GFX11-FAKE16-NEXT: scratch_load_b32 v57, off, s32 offset:52 +; GFX11-FAKE16-NEXT: scratch_load_b32 v56, off, s32 offset:56 +; GFX11-FAKE16-NEXT: scratch_load_b32 v47, off, s32 offset:60 +; GFX11-FAKE16-NEXT: scratch_load_b32 v46, off, s32 offset:64 +; GFX11-FAKE16-NEXT: scratch_load_b32 v45, off, s32 offset:68 +; GFX11-FAKE16-NEXT: scratch_load_b32 v44, off, s32 offset:72 +; GFX11-FAKE16-NEXT: scratch_load_b32 v43, off, s32 offset:76 +; GFX11-FAKE16-NEXT: scratch_load_b32 v42, off, s32 offset:80 +; GFX11-FAKE16-NEXT: scratch_load_b32 v41, off, s32 offset:84 +; GFX11-FAKE16-NEXT: scratch_load_b32 v40, off, s32 offset:88 +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq i32 %b, 0 + br i1 %cmp, label %cmp.true, label %cmp.false + +cmp.true: + %a1 = add <64 x i16> %a, splat (i16 3) + %a2 = bitcast <64 x i16> %a1 to <128 x i8> + br label %end + +cmp.false: + %a3 = bitcast <64 x i16> %a to <128 x i8> + br label %end + +end: + %phi = phi <128 x i8> [ %a2, %cmp.true ], [ %a3, %cmp.false ] + ret <128 x i8> %phi +} + +define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i32 inreg %b) { +; SI-LABEL: bitcast_v64i16_to_v128i8_scalar: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 +; SI-NEXT: v_writelane_b32 v40, s36, 4 +; SI-NEXT: v_writelane_b32 v40, s37, 5 +; SI-NEXT: v_writelane_b32 v40, s38, 6 +; SI-NEXT: v_writelane_b32 v40, s39, 7 +; SI-NEXT: v_writelane_b32 v40, s48, 8 +; SI-NEXT: v_writelane_b32 v40, s49, 9 +; SI-NEXT: v_writelane_b32 v40, s50, 10 +; SI-NEXT: v_writelane_b32 v40, s51, 11 +; SI-NEXT: v_writelane_b32 v40, s52, 12 +; SI-NEXT: v_writelane_b32 v40, s53, 13 +; SI-NEXT: v_writelane_b32 v40, s54, 14 +; SI-NEXT: v_writelane_b32 v40, s55, 15 +; SI-NEXT: v_writelane_b32 v40, s64, 16 +; SI-NEXT: v_writelane_b32 v40, s65, 17 +; SI-NEXT: v_writelane_b32 v40, s66, 18 +; SI-NEXT: v_writelane_b32 v40, s67, 19 +; SI-NEXT: v_writelane_b32 v40, s68, 20 +; SI-NEXT: v_writelane_b32 v40, s69, 21 +; SI-NEXT: v_writelane_b32 v40, s70, 22 +; SI-NEXT: s_mov_b32 s88, s17 +; SI-NEXT: v_writelane_b32 v40, s71, 23 +; SI-NEXT: v_writelane_b32 v40, s80, 24 +; SI-NEXT: v_writelane_b32 v40, s81, 25 +; SI-NEXT: v_writelane_b32 v40, s82, 26 +; SI-NEXT: v_writelane_b32 v40, s83, 27 +; SI-NEXT: v_readfirstlane_b32 s6, v16 +; SI-NEXT: ; implicit-def: $vgpr41 : SGPR spill to VGPR lane +; SI-NEXT: v_readfirstlane_b32 s7, v15 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v41, s6, 0 +; SI-NEXT: v_readfirstlane_b32 s8, v21 +; SI-NEXT: v_writelane_b32 v41, s7, 1 +; SI-NEXT: v_readfirstlane_b32 s9, v20 +; SI-NEXT: v_writelane_b32 v41, s8, 2 +; SI-NEXT: v_readfirstlane_b32 s10, v19 +; SI-NEXT: v_writelane_b32 v41, s9, 3 +; SI-NEXT: v_readfirstlane_b32 s11, v25 +; SI-NEXT: v_writelane_b32 v41, s10, 4 +; SI-NEXT: v_readfirstlane_b32 s12, v24 +; SI-NEXT: v_writelane_b32 v41, s11, 5 +; SI-NEXT: v_readfirstlane_b32 s13, v23 +; SI-NEXT: v_writelane_b32 v41, s12, 6 +; SI-NEXT: v_readfirstlane_b32 s15, v29 +; SI-NEXT: v_writelane_b32 v41, s13, 7 +; SI-NEXT: v_readfirstlane_b32 s14, v28 +; SI-NEXT: v_writelane_b32 v41, s15, 8 +; SI-NEXT: s_mov_b32 s79, s16 +; SI-NEXT: v_readfirstlane_b32 s16, v27 +; SI-NEXT: v_writelane_b32 v41, s14, 9 +; SI-NEXT: v_writelane_b32 v41, s16, 10 +; SI-NEXT: v_writelane_b32 v40, s84, 28 +; SI-NEXT: v_writelane_b32 v40, s85, 29 +; SI-NEXT: v_writelane_b32 v40, s86, 30 +; SI-NEXT: v_writelane_b32 v40, s87, 31 +; SI-NEXT: v_writelane_b32 v40, s96, 32 +; SI-NEXT: v_writelane_b32 v40, s97, 33 +; SI-NEXT: v_writelane_b32 v40, s98, 34 +; SI-NEXT: v_writelane_b32 v40, s99, 35 +; SI-NEXT: v_readfirstlane_b32 s98, v30 +; SI-NEXT: v_readfirstlane_b32 s97, v26 +; SI-NEXT: v_readfirstlane_b32 s96, v22 +; SI-NEXT: v_readfirstlane_b32 s87, v18 +; SI-NEXT: v_readfirstlane_b32 s81, v17 +; SI-NEXT: v_readfirstlane_b32 s86, v14 +; SI-NEXT: v_readfirstlane_b32 s67, v13 +; SI-NEXT: v_readfirstlane_b32 s69, v12 +; SI-NEXT: v_readfirstlane_b32 s71, v11 +; SI-NEXT: v_readfirstlane_b32 s85, v10 +; SI-NEXT: v_readfirstlane_b32 s51, v9 +; SI-NEXT: v_readfirstlane_b32 s53, v8 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s89, v31 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s91, v32 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s93, v33 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s55, v34 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s17, v35 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s95, v36 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s35, v37 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s83, v38 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:80 +; SI-NEXT: v_readfirstlane_b32 s65, v7 +; SI-NEXT: v_readfirstlane_b32 s84, v6 +; SI-NEXT: v_readfirstlane_b32 s31, v5 +; SI-NEXT: v_readfirstlane_b32 s37, v4 +; SI-NEXT: v_readfirstlane_b32 s49, v3 +; SI-NEXT: v_readfirstlane_b32 s78, v2 +; SI-NEXT: v_readfirstlane_b32 s39, v1 +; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s77, v31 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s38, v32 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s48, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s50, v39 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s76, v48 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s30, v49 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s34, v50 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s36, v51 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s99, v34 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_readfirstlane_b32 s90, v35 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_readfirstlane_b32 s92, v36 +; SI-NEXT: v_writelane_b32 v41, s90, 11 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_readfirstlane_b32 s94, v37 +; SI-NEXT: v_writelane_b32 v41, s92, 12 +; SI-NEXT: v_writelane_b32 v41, s94, 13 +; SI-NEXT: v_writelane_b32 v41, s30, 14 +; SI-NEXT: v_writelane_b32 v41, s34, 15 +; SI-NEXT: v_writelane_b32 v41, s36, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v12 -; SI-NEXT: s_and_b32 s4, s15, 0xff +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v38 +; SI-NEXT: v_writelane_b32 v41, s38, 17 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_writelane_b32 v41, s48, 18 +; SI-NEXT: v_writelane_b32 v41, s50, 19 +; SI-NEXT: s_cbranch_scc0 .LBB99_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_and_b32 s4, s79, 0xffff +; SI-NEXT: s_lshl_b32 s5, s88, 16 +; SI-NEXT: s_or_b32 s60, s4, s5 +; SI-NEXT: s_and_b32 s4, s18, 0xffff +; SI-NEXT: s_lshl_b32 s5, s19, 16 +; SI-NEXT: s_or_b32 s61, s4, s5 +; SI-NEXT: s_and_b32 s4, s20, 0xffff +; SI-NEXT: s_lshl_b32 s5, s21, 16 +; SI-NEXT: s_or_b32 s56, s4, s5 +; SI-NEXT: s_and_b32 s4, s22, 0xffff +; SI-NEXT: s_lshl_b32 s5, s23, 16 +; SI-NEXT: s_or_b32 s57, s4, s5 +; SI-NEXT: s_and_b32 s4, s24, 0xffff +; SI-NEXT: s_lshl_b32 s5, s25, 16 +; SI-NEXT: s_or_b32 s44, s4, s5 +; SI-NEXT: s_and_b32 s4, s26, 0xffff +; SI-NEXT: s_lshl_b32 s5, s27, 16 +; SI-NEXT: s_or_b32 s45, s4, s5 +; SI-NEXT: s_and_b32 s4, s28, 0xffff +; SI-NEXT: s_lshl_b32 s5, s29, 16 +; SI-NEXT: s_or_b32 s74, s4, s5 +; SI-NEXT: s_and_b32 s4, s39, 0xffff +; SI-NEXT: s_lshl_b32 s5, s78, 16 +; SI-NEXT: s_or_b32 s75, s4, s5 +; SI-NEXT: s_and_b32 s4, s49, 0xffff +; SI-NEXT: s_lshl_b32 s5, s37, 16 +; SI-NEXT: s_or_b32 s72, s4, s5 +; SI-NEXT: s_and_b32 s4, s31, 0xffff +; SI-NEXT: s_lshl_b32 s5, s84, 16 +; SI-NEXT: s_or_b32 s73, s4, s5 +; SI-NEXT: s_and_b32 s4, s65, 0xffff +; SI-NEXT: s_lshl_b32 s5, s53, 16 +; SI-NEXT: s_or_b32 s62, s4, s5 +; SI-NEXT: s_and_b32 s4, s51, 0xffff +; SI-NEXT: s_lshl_b32 s5, s85, 16 +; SI-NEXT: s_or_b32 s63, s4, s5 +; SI-NEXT: s_and_b32 s4, s71, 0xffff +; SI-NEXT: s_lshl_b32 s5, s69, 16 +; SI-NEXT: s_or_b32 s58, s4, s5 +; SI-NEXT: s_and_b32 s4, s67, 0xffff +; SI-NEXT: s_lshl_b32 s5, s86, 16 +; SI-NEXT: s_or_b32 s59, s4, s5 +; SI-NEXT: s_and_b32 s4, s7, 0xffff +; SI-NEXT: s_lshl_b32 s5, s6, 16 +; SI-NEXT: s_or_b32 s46, s4, s5 +; SI-NEXT: s_and_b32 s4, s81, 0xffff +; SI-NEXT: s_lshl_b32 s5, s87, 16 +; SI-NEXT: s_or_b32 s47, s4, s5 +; SI-NEXT: s_and_b32 s4, s10, 0xffff +; SI-NEXT: s_lshl_b32 s5, s9, 16 +; SI-NEXT: s_or_b32 s42, s4, s5 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_lshl_b32 s5, s96, 16 +; SI-NEXT: s_or_b32 s43, s4, s5 +; SI-NEXT: s_and_b32 s4, s13, 0xffff +; SI-NEXT: s_lshl_b32 s5, s12, 16 +; SI-NEXT: s_or_b32 s40, s4, s5 +; SI-NEXT: s_and_b32 s4, s11, 0xffff +; SI-NEXT: s_lshl_b32 s5, s97, 16 +; SI-NEXT: s_or_b32 s41, s4, s5 +; SI-NEXT: s_and_b32 s4, s16, 0xffff +; SI-NEXT: s_lshl_b32 s5, s14, 16 +; SI-NEXT: s_or_b32 s14, s4, s5 +; SI-NEXT: s_and_b32 s4, s15, 0xffff +; SI-NEXT: s_lshl_b32 s5, s98, 16 +; SI-NEXT: s_or_b32 s15, s4, s5 +; SI-NEXT: s_and_b32 s4, s94, 0xffff +; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_and_b32 s4, s90, 0xffff +; SI-NEXT: s_lshl_b32 s5, s99, 16 +; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_and_b32 s4, s36, 0xffff +; SI-NEXT: s_lshl_b32 s5, s34, 16 +; SI-NEXT: s_or_b32 s10, s4, s5 +; SI-NEXT: s_and_b32 s4, s30, 0xffff +; SI-NEXT: s_lshl_b32 s5, s76, 16 +; SI-NEXT: s_or_b32 s11, s4, s5 +; SI-NEXT: s_and_b32 s4, s50, 0xffff +; SI-NEXT: s_lshl_b32 s5, s48, 16 +; SI-NEXT: s_or_b32 s8, s4, s5 +; SI-NEXT: s_and_b32 s4, s38, 0xffff +; SI-NEXT: s_lshl_b32 s5, s77, 16 +; SI-NEXT: s_or_b32 s9, s4, s5 +; SI-NEXT: s_and_b32 s4, s83, 0xffff +; SI-NEXT: s_lshl_b32 s5, s35, 16 +; SI-NEXT: s_or_b32 s6, s4, s5 +; SI-NEXT: s_and_b32 s4, s95, 0xffff +; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: s_or_b32 s7, s4, s5 +; SI-NEXT: s_and_b32 s4, s55, 0xffff +; SI-NEXT: s_lshl_b32 s5, s93, 16 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s38, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s50, 24 +; SI-NEXT: s_and_b32 s5, s91, 0xffff +; SI-NEXT: s_lshl_b32 s16, s89, 16 +; SI-NEXT: s_or_b32 s5, s5, s16 +; SI-NEXT: s_lshr_b32 s16, s61, 8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v43, s16, 20 +; SI-NEXT: s_lshr_b32 s16, s57, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 23 +; SI-NEXT: s_lshr_b32 s16, s45, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 26 +; SI-NEXT: s_lshr_b32 s16, s75, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 29 +; SI-NEXT: s_lshr_b32 s16, s73, 8 +; SI-NEXT: s_lshr_b64 vcc, s[60:61], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 32 +; SI-NEXT: s_lshr_b32 s16, s63, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 22 +; SI-NEXT: v_writelane_b32 v43, s16, 35 +; SI-NEXT: s_lshr_b32 s16, s59, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 23 +; SI-NEXT: s_lshr_b64 vcc, s[60:61], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 38 +; SI-NEXT: s_lshr_b32 s16, s47, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 20 +; SI-NEXT: v_writelane_b32 v43, s16, 41 +; SI-NEXT: s_lshr_b32 s16, s43, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 21 +; SI-NEXT: s_lshr_b64 vcc, s[56:57], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 44 +; SI-NEXT: s_lshr_b32 s16, s41, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 28 +; SI-NEXT: v_writelane_b32 v43, s16, 47 +; SI-NEXT: s_lshr_b32 s16, s15, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 29 +; SI-NEXT: s_lshr_b64 vcc, s[56:57], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 50 +; SI-NEXT: s_lshr_b32 s16, s13, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 26 +; SI-NEXT: v_writelane_b32 v43, s16, 53 +; SI-NEXT: s_lshr_b32 s16, s11, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 27 +; SI-NEXT: s_lshr_b64 vcc, s[56:57], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 56 +; SI-NEXT: s_lshr_b32 s16, s9, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 59 +; SI-NEXT: s_lshr_b32 s16, s7, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 25 +; SI-NEXT: s_lshr_b64 vcc, s[46:47], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 62 +; SI-NEXT: s_lshr_b32 s16, s5, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 32 +; SI-NEXT: v_writelane_b32 v42, s16, 1 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 33 +; SI-NEXT: s_lshr_b64 vcc, s[46:47], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 19 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 30 +; SI-NEXT: v_writelane_b32 v43, s16, 22 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 31 +; SI-NEXT: s_lshr_b64 vcc, s[42:43], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 25 +; SI-NEXT: s_and_b32 s16, s78, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 38 +; SI-NEXT: v_writelane_b32 v43, s16, 28 +; SI-NEXT: s_and_b32 s16, s84, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 39 +; SI-NEXT: s_lshr_b64 vcc, s[42:43], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 31 +; SI-NEXT: s_and_b32 s16, s85, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 36 +; SI-NEXT: v_writelane_b32 v43, s16, 34 +; SI-NEXT: s_and_b32 s16, s86, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 37 +; SI-NEXT: s_lshr_b64 vcc, s[42:43], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 37 +; SI-NEXT: s_and_b32 s16, s87, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 34 +; SI-NEXT: v_writelane_b32 v43, s16, 40 +; SI-NEXT: s_and_b32 s16, s96, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 35 +; SI-NEXT: s_lshr_b64 vcc, s[40:41], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 43 +; SI-NEXT: s_and_b32 s16, s97, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 44 +; SI-NEXT: v_writelane_b32 v43, s16, 46 +; SI-NEXT: s_and_b32 s16, s98, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 45 +; SI-NEXT: s_lshr_b64 vcc, s[40:41], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 49 +; SI-NEXT: s_and_b32 s16, s99, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 42 +; SI-NEXT: v_writelane_b32 v43, s16, 52 +; SI-NEXT: s_and_b32 s16, s76, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 43 +; SI-NEXT: s_lshr_b64 vcc, s[40:41], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 55 +; SI-NEXT: s_and_b32 s16, s77, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 40 +; SI-NEXT: v_writelane_b32 v43, s16, 58 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 41 +; SI-NEXT: s_lshr_b64 vcc, s[14:15], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 61 +; SI-NEXT: s_and_b32 s16, s89, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 50 +; SI-NEXT: v_writelane_b32 v42, s16, 0 +; SI-NEXT: s_bfe_u32 s16, s19, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 51 +; SI-NEXT: s_lshr_b64 vcc, s[14:15], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 18 +; SI-NEXT: s_bfe_u32 s16, s23, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 48 +; SI-NEXT: v_writelane_b32 v43, s16, 21 +; SI-NEXT: s_bfe_u32 s16, s27, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 49 +; SI-NEXT: s_lshr_b64 vcc, s[14:15], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 24 +; SI-NEXT: s_bfe_u32 s16, s78, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 46 +; SI-NEXT: v_writelane_b32 v43, s16, 27 +; SI-NEXT: s_bfe_u32 s16, s84, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 47 +; SI-NEXT: s_lshr_b64 vcc, s[12:13], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 30 +; SI-NEXT: s_bfe_u32 s16, s85, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 56 +; SI-NEXT: v_writelane_b32 v43, s16, 33 +; SI-NEXT: s_bfe_u32 s16, s86, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 57 +; SI-NEXT: s_lshr_b64 vcc, s[12:13], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 36 +; SI-NEXT: s_bfe_u32 s16, s87, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 54 +; SI-NEXT: v_writelane_b32 v43, s16, 39 +; SI-NEXT: s_bfe_u32 s16, s96, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 55 +; SI-NEXT: s_lshr_b64 vcc, s[12:13], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 42 +; SI-NEXT: s_bfe_u32 s16, s97, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 52 +; SI-NEXT: v_writelane_b32 v43, s16, 45 +; SI-NEXT: s_bfe_u32 s16, s98, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 53 +; SI-NEXT: s_lshr_b64 vcc, s[10:11], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 48 +; SI-NEXT: s_bfe_u32 s16, s99, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 62 +; SI-NEXT: v_writelane_b32 v43, s16, 51 +; SI-NEXT: s_bfe_u32 s16, s76, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 63 +; SI-NEXT: s_lshr_b64 vcc, s[10:11], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 54 +; SI-NEXT: s_bfe_u32 s16, s77, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 60 +; SI-NEXT: v_writelane_b32 v43, s16, 57 +; SI-NEXT: s_bfe_u32 s16, s17, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 61 +; SI-NEXT: s_lshr_b64 vcc, s[10:11], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 60 +; SI-NEXT: s_bfe_u32 s16, s89, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 58 +; SI-NEXT: v_writelane_b32 v43, s16, 63 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 59 +; SI-NEXT: s_lshr_b64 vcc, s[8:9], 24 +; SI-NEXT: s_mov_b32 s16, s93 +; SI-NEXT: s_lshr_b64 s[92:93], s[60:61], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 4 +; SI-NEXT: s_mov_b32 s93, s16 +; SI-NEXT: s_mov_b32 s16, s71 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 5 +; SI-NEXT: s_lshr_b64 vcc, s[8:9], 16 +; SI-NEXT: s_mov_b32 s71, s16 +; SI-NEXT: s_mov_b32 s16, s81 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 2 +; SI-NEXT: s_mov_b32 s81, s16 +; SI-NEXT: s_mov_b32 s16, s83 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 3 +; SI-NEXT: s_lshr_b64 vcc, s[8:9], 8 +; SI-NEXT: s_mov_b32 s83, s16 +; SI-NEXT: s_mov_b32 s16, s65 +; SI-NEXT: s_lshr_b64 s[64:65], s[74:75], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 0 +; SI-NEXT: s_mov_b32 s65, s16 +; SI-NEXT: s_mov_b32 s16, s67 +; SI-NEXT: s_lshr_b64 s[66:67], s[74:75], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 1 +; SI-NEXT: s_lshr_b64 vcc, s[6:7], 24 +; SI-NEXT: s_mov_b32 s67, s16 +; SI-NEXT: s_mov_b32 s16, s69 +; SI-NEXT: s_lshr_b64 s[68:69], s[74:75], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 10 +; SI-NEXT: s_mov_b32 s69, s16 +; SI-NEXT: s_mov_b32 s16, s51 +; SI-NEXT: s_lshr_b64 s[50:51], s[72:73], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 11 +; SI-NEXT: s_lshr_b64 vcc, s[6:7], 16 +; SI-NEXT: s_mov_b32 s51, s16 +; SI-NEXT: s_mov_b32 s16, s53 +; SI-NEXT: s_lshr_b64 s[52:53], s[72:73], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 8 +; SI-NEXT: s_mov_b32 s53, s16 +; SI-NEXT: s_mov_b32 s16, s55 +; SI-NEXT: s_lshr_b64 s[54:55], s[72:73], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 9 +; SI-NEXT: s_lshr_b64 vcc, s[6:7], 8 +; SI-NEXT: s_mov_b32 s55, s16 +; SI-NEXT: s_mov_b32 s16, s37 +; SI-NEXT: s_lshr_b64 s[36:37], s[62:63], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 6 +; SI-NEXT: s_mov_b32 s37, s16 +; SI-NEXT: s_mov_b32 s16, s39 +; SI-NEXT: s_lshr_b64 s[38:39], s[62:63], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 7 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 24 +; SI-NEXT: s_mov_b32 s39, s16 +; SI-NEXT: s_mov_b32 s16, s49 +; SI-NEXT: s_lshr_b64 s[48:49], s[62:63], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 16 +; SI-NEXT: s_mov_b32 s49, s16 +; SI-NEXT: s_mov_b32 s16, s95 +; SI-NEXT: s_lshr_b64 s[94:95], s[58:59], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 17 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 16 +; SI-NEXT: s_mov_b32 s95, s16 +; SI-NEXT: s_mov_b32 s16, s31 +; SI-NEXT: s_lshr_b64 s[30:31], s[58:59], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 14 +; SI-NEXT: s_mov_b32 s31, s16 +; SI-NEXT: s_mov_b32 s16, s35 +; SI-NEXT: s_lshr_b64 s[34:35], s[58:59], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 15 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 8 +; SI-NEXT: s_mov_b32 s35, s16 +; SI-NEXT: s_mov_b32 s16, s91 +; SI-NEXT: s_lshr_b64 s[90:91], s[46:47], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 12 +; SI-NEXT: s_mov_b32 s91, s16 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 13 +; SI-NEXT: s_cbranch_execnz .LBB99_3 +; SI-NEXT: .LBB99_2: ; %cmp.true +; SI-NEXT: s_add_i32 s4, s55, 3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s95, 8 -; SI-NEXT: v_readlane_b32 s50, v63, 10 -; SI-NEXT: v_readlane_b32 s38, v63, 6 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s93, 16 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s5, s91, 3 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_lshl_b32 s6, s89, 16 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_add_i32 s6, s83, 3 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s7, s35, 16 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_add_i32 s7, s95, 3 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_lshl_b32 s8, s17, 16 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readlane_b32 s8, v41, 19 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: v_readlane_b32 s9, v41, 18 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readlane_b32 s9, v41, 17 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: s_and_b32 s9, s9, 0xffff +; SI-NEXT: s_lshl_b32 s10, s77, 16 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_readlane_b32 s10, v41, 16 +; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: v_readlane_b32 s11, v41, 15 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readlane_b32 s11, v41, 14 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: s_and_b32 s11, s11, 0xffff +; SI-NEXT: s_lshl_b32 s12, s76, 16 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_readlane_b32 s12, v41, 13 +; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: v_readlane_b32 s13, v41, 12 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readlane_b32 s13, v41, 11 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: s_and_b32 s13, s13, 0xffff +; SI-NEXT: s_lshl_b32 s14, s99, 16 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readlane_b32 s14, v41, 10 +; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: v_readlane_b32 s15, v41, 9 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readlane_b32 s15, v41, 8 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: s_and_b32 s15, s15, 0xffff +; SI-NEXT: s_lshl_b32 s16, s98, 16 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readlane_b32 s16, v41, 7 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v41, 6 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s40, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 5 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s97, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s41, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 4 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v41, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s42, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 2 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s96, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s43, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 1 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v41, 0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s46, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s81, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s87, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s47, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s71, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s69, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s58, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s67, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s86, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s59, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s65, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s62, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s51, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s85, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s63, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s49, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s37, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s72, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s31, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s84, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s73, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s28, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s29, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s74, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s39, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s75, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s24, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s25, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s44, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s26, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s27, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s45, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s20, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s21, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s56, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s22, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s23, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s57, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s79, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s60, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s18, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s19, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s61, s16, 0x30000 +; SI-NEXT: s_lshr_b64 s[16:17], s[60:61], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 22 +; SI-NEXT: v_writelane_b32 v41, s17, 23 +; SI-NEXT: s_lshr_b64 s[16:17], s[60:61], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 20 +; SI-NEXT: v_writelane_b32 v41, s17, 21 +; SI-NEXT: s_lshr_b32 s16, s61, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 18 +; SI-NEXT: s_lshr_b32 s16, s61, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 19 +; SI-NEXT: s_lshr_b32 s16, s61, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 20 +; SI-NEXT: s_lshr_b32 s16, s57, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 21 +; SI-NEXT: s_lshr_b32 s16, s57, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 22 +; SI-NEXT: s_lshr_b32 s16, s57, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 23 +; SI-NEXT: s_lshr_b32 s16, s45, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 24 +; SI-NEXT: s_lshr_b32 s16, s45, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 25 +; SI-NEXT: s_lshr_b32 s16, s45, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 26 +; SI-NEXT: s_lshr_b32 s16, s75, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 27 +; SI-NEXT: s_lshr_b32 s16, s75, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 28 +; SI-NEXT: s_lshr_b32 s16, s75, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 29 +; SI-NEXT: s_lshr_b32 s16, s73, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 30 +; SI-NEXT: s_lshr_b32 s16, s73, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 31 +; SI-NEXT: s_lshr_b32 s16, s73, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 32 +; SI-NEXT: s_lshr_b32 s16, s63, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 33 +; SI-NEXT: s_lshr_b32 s16, s63, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 34 +; SI-NEXT: s_lshr_b32 s16, s63, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 35 +; SI-NEXT: s_lshr_b32 s16, s59, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 36 +; SI-NEXT: s_lshr_b32 s16, s59, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 37 +; SI-NEXT: s_lshr_b32 s16, s59, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 38 +; SI-NEXT: s_lshr_b32 s16, s47, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 39 +; SI-NEXT: s_lshr_b32 s16, s47, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 40 +; SI-NEXT: s_lshr_b32 s16, s47, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 41 +; SI-NEXT: s_lshr_b32 s16, s43, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 42 +; SI-NEXT: s_lshr_b32 s16, s43, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 43 +; SI-NEXT: s_lshr_b32 s16, s43, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 44 +; SI-NEXT: s_lshr_b32 s16, s41, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 45 +; SI-NEXT: s_lshr_b32 s16, s41, 16 +; SI-NEXT: s_add_i32 s15, s15, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 46 +; SI-NEXT: s_lshr_b32 s16, s41, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 47 +; SI-NEXT: s_lshr_b32 s16, s15, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 48 +; SI-NEXT: s_lshr_b32 s16, s15, 16 +; SI-NEXT: s_add_i32 s13, s13, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 49 +; SI-NEXT: s_lshr_b32 s16, s15, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 50 +; SI-NEXT: s_lshr_b32 s16, s13, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 51 +; SI-NEXT: s_lshr_b32 s16, s13, 16 +; SI-NEXT: s_add_i32 s11, s11, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 52 +; SI-NEXT: s_lshr_b32 s16, s13, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 53 +; SI-NEXT: s_lshr_b32 s16, s11, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 54 +; SI-NEXT: s_lshr_b32 s16, s11, 16 +; SI-NEXT: s_add_i32 s9, s9, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 55 +; SI-NEXT: s_lshr_b32 s16, s11, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 56 +; SI-NEXT: s_lshr_b32 s16, s9, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 57 +; SI-NEXT: s_lshr_b32 s16, s9, 16 +; SI-NEXT: s_add_i32 s7, s7, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 58 +; SI-NEXT: s_lshr_b32 s16, s9, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 59 +; SI-NEXT: s_lshr_b32 s16, s7, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 60 +; SI-NEXT: s_lshr_b32 s16, s7, 16 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 61 +; SI-NEXT: s_lshr_b32 s16, s7, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 62 +; SI-NEXT: s_lshr_b32 s16, s5, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 63 +; SI-NEXT: s_lshr_b32 s16, s5, 16 +; SI-NEXT: v_writelane_b32 v42, s16, 0 +; SI-NEXT: s_lshr_b32 s16, s5, 8 +; SI-NEXT: v_writelane_b32 v42, s16, 1 +; SI-NEXT: s_lshr_b64 s[16:17], s[56:57], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 28 +; SI-NEXT: v_writelane_b32 v41, s17, 29 +; SI-NEXT: s_lshr_b64 s[16:17], s[56:57], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 26 +; SI-NEXT: v_writelane_b32 v41, s17, 27 +; SI-NEXT: s_lshr_b64 s[16:17], s[56:57], 8 +; SI-NEXT: v_writelane_b32 v41, s16, 24 +; SI-NEXT: v_writelane_b32 v41, s17, 25 +; SI-NEXT: s_lshr_b64 s[16:17], s[46:47], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 32 +; SI-NEXT: v_writelane_b32 v41, s17, 33 +; SI-NEXT: s_lshr_b64 s[16:17], s[46:47], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 30 +; SI-NEXT: v_writelane_b32 v41, s17, 31 +; SI-NEXT: s_lshr_b64 s[16:17], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 38 +; SI-NEXT: v_writelane_b32 v41, s17, 39 +; SI-NEXT: s_lshr_b64 s[16:17], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 36 +; SI-NEXT: v_writelane_b32 v41, s17, 37 +; SI-NEXT: s_lshr_b64 s[16:17], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v41, s16, 34 +; SI-NEXT: v_writelane_b32 v41, s17, 35 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 44 +; SI-NEXT: v_writelane_b32 v41, s17, 45 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 42 +; SI-NEXT: v_writelane_b32 v41, s17, 43 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 8 +; SI-NEXT: s_add_i32 s14, s14, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 40 +; SI-NEXT: v_writelane_b32 v41, s17, 41 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 50 +; SI-NEXT: v_writelane_b32 v41, s17, 51 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 48 +; SI-NEXT: v_writelane_b32 v41, s17, 49 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_add_i32 s12, s12, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 46 +; SI-NEXT: v_writelane_b32 v41, s17, 47 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 56 +; SI-NEXT: v_writelane_b32 v41, s17, 57 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 54 +; SI-NEXT: v_writelane_b32 v41, s17, 55 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 8 +; SI-NEXT: s_add_i32 s10, s10, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 52 +; SI-NEXT: v_writelane_b32 v41, s17, 53 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 62 +; SI-NEXT: v_writelane_b32 v41, s17, 63 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 60 +; SI-NEXT: v_writelane_b32 v41, s17, 61 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 8 +; SI-NEXT: s_add_i32 s8, s8, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 58 +; SI-NEXT: v_writelane_b32 v41, s17, 59 +; SI-NEXT: s_lshr_b64 s[16:17], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 4 +; SI-NEXT: v_writelane_b32 v43, s17, 5 +; SI-NEXT: s_lshr_b64 s[16:17], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 2 +; SI-NEXT: v_writelane_b32 v43, s17, 3 +; SI-NEXT: s_lshr_b64 s[16:17], s[8:9], 8 +; SI-NEXT: s_add_i32 s6, s6, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 0 +; SI-NEXT: v_writelane_b32 v43, s17, 1 +; SI-NEXT: s_lshr_b64 s[16:17], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 10 +; SI-NEXT: v_writelane_b32 v43, s17, 11 +; SI-NEXT: s_lshr_b64 s[16:17], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 8 +; SI-NEXT: v_writelane_b32 v43, s17, 9 +; SI-NEXT: s_lshr_b64 s[16:17], s[6:7], 8 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 6 +; SI-NEXT: v_writelane_b32 v43, s17, 7 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 16 +; SI-NEXT: v_writelane_b32 v43, s17, 17 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 14 +; SI-NEXT: v_writelane_b32 v43, s17, 15 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[60:61], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[64:65], s[74:75], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[74:75], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[74:75], 8 +; SI-NEXT: s_lshr_b64 s[50:51], s[72:73], 24 +; SI-NEXT: s_lshr_b64 s[52:53], s[72:73], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[72:73], 8 +; SI-NEXT: s_lshr_b64 s[36:37], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[38:39], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[48:49], s[62:63], 8 +; SI-NEXT: s_lshr_b64 s[94:95], s[58:59], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[58:59], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[58:59], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[46:47], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 12 +; SI-NEXT: v_writelane_b32 v43, s17, 13 +; SI-NEXT: .LBB99_3: ; %end +; SI-NEXT: s_lshl_b32 s17, s92, 8 +; SI-NEXT: s_and_b32 s18, s60, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 20 +; SI-NEXT: v_readlane_b32 s19, v41, 21 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: v_readlane_b32 s20, v41, 22 +; SI-NEXT: s_lshl_b32 s19, s20, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 20 +; SI-NEXT: v_mov_b32_e32 v1, s17 +; SI-NEXT: s_and_b32 s17, s61, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 19 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 18 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_readlane_b32 s16, v41, 24 +; SI-NEXT: v_readlane_b32 s17, v41, 25 +; SI-NEXT: s_lshl_b32 s17, s16, 8 +; SI-NEXT: s_and_b32 s18, s56, 0xff +; SI-NEXT: v_readlane_b32 s21, v41, 23 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 26 +; SI-NEXT: v_readlane_b32 s19, v41, 27 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: v_readlane_b32 s20, v41, 28 +; SI-NEXT: s_lshl_b32 s19, s20, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 23 +; SI-NEXT: v_mov_b32_e32 v3, s17 +; SI-NEXT: s_and_b32 s17, s57, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 22 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 21 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v4, s17 +; SI-NEXT: s_lshl_b32 s17, s82, 8 +; SI-NEXT: s_and_b32 s18, s44, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s80, 0xff +; SI-NEXT: s_lshl_b32 s19, s70, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 26 +; SI-NEXT: v_mov_b32_e32 v5, s17 +; SI-NEXT: s_and_b32 s17, s45, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 25 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v6, s17 +; SI-NEXT: s_lshl_b32 s17, s68, 8 +; SI-NEXT: s_and_b32 s18, s74, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s66, 0xff +; SI-NEXT: s_lshl_b32 s19, s64, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 29 +; SI-NEXT: v_mov_b32_e32 v7, s17 +; SI-NEXT: s_and_b32 s17, s75, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 28 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 27 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v8, s17 +; SI-NEXT: s_lshl_b32 s17, s54, 8 +; SI-NEXT: s_and_b32 s18, s72, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s52, 0xff +; SI-NEXT: s_lshl_b32 s19, s50, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 32 +; SI-NEXT: v_mov_b32_e32 v9, s17 +; SI-NEXT: s_and_b32 s17, s73, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 31 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 30 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v10, s17 +; SI-NEXT: s_lshl_b32 s17, s48, 8 +; SI-NEXT: s_and_b32 s18, s62, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s38, 0xff +; SI-NEXT: s_lshl_b32 s19, s36, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 35 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: s_and_b32 s17, s63, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 34 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 33 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v12, s17 +; SI-NEXT: s_lshl_b32 s17, s34, 8 +; SI-NEXT: s_and_b32 s18, s58, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s30, 0xff +; SI-NEXT: s_lshl_b32 s19, s94, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 38 +; SI-NEXT: v_mov_b32_e32 v13, s17 +; SI-NEXT: s_and_b32 s17, s59, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 37 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 36 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v14, s17 +; SI-NEXT: s_lshl_b32 s17, s90, 8 +; SI-NEXT: s_and_b32 s18, s46, 0xff +; SI-NEXT: v_readlane_b32 s21, v41, 29 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 30 +; SI-NEXT: v_readlane_b32 s19, v41, 31 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: v_readlane_b32 s20, v41, 32 +; SI-NEXT: s_lshl_b32 s19, s20, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 41 +; SI-NEXT: v_mov_b32_e32 v15, s17 +; SI-NEXT: s_and_b32 s17, s47, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 40 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 39 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v10 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s35, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s39, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s92, 8 -; SI-NEXT: v_readlane_b32 s39, v63, 7 -; SI-NEXT: v_readlane_b32 s35, v63, 3 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v10, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v16, s17 +; SI-NEXT: v_readlane_b32 s16, v41, 34 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: v_readlane_b32 s17, v41, 35 +; SI-NEXT: v_readlane_b32 s18, v41, 36 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: s_and_b32 s17, s42, 0xff +; SI-NEXT: v_readlane_b32 s19, v41, 37 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v41, 38 +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: v_readlane_b32 s17, v43, 44 +; SI-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v43, 43 +; SI-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 42 +; SI-NEXT: v_readlane_b32 s19, v41, 39 +; SI-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 40 +; SI-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: v_readlane_b32 s19, v41, 41 +; SI-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v41, 42 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v41, 43 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v41, 44 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v43, 47 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v57 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v47 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s30, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v45 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s13, s36, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s13, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v43, 46 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 45 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v44 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s89, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v43 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s93, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v41, 46 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v41, 47 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v41, 45 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v41, 48 +; SI-NEXT: v_readlane_b32 s17, v41, 49 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v41, 50 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v41 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s31, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: v_readlane_b32 s15, v43, 50 +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_readlane_b32 s15, v43, 49 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 48 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 1 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v13 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v41, 52 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s15, v41, 53 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_readlane_b32 s14, v41, 54 +; SI-NEXT: v_readlane_b32 s15, v41, 55 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v41, 56 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v24 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s90, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v21 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s94, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 3 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: v_readlane_b32 s13, v43, 53 +; SI-NEXT: s_lshl_b32 s13, s13, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_readlane_b32 s13, v43, 52 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: v_readlane_b32 s14, v43, 51 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v9 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_readlane_b32 s12, v41, 58 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s13, v41, 59 +; SI-NEXT: s_lshl_b32 s12, s12, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: v_readlane_b32 s12, v41, 60 +; SI-NEXT: v_readlane_b32 s13, v41, 61 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s14, v41, 62 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s14, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 2 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v18 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s91, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 6 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: v_readlane_b32 s11, v43, 56 +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_readlane_b32 s11, v43, 55 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: v_readlane_b32 s12, v43, 54 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s12, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v6 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_readlane_b32 s10, v43, 0 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s11, v43, 1 +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: v_readlane_b32 s10, v43, 2 +; SI-NEXT: v_readlane_b32 s11, v43, 3 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s12, v43, 4 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v61 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v60 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s9, v62, 4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v59 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 9 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: v_readlane_b32 s9, v43, 59 +; SI-NEXT: s_lshl_b32 s9, s9, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_readlane_b32 s9, v43, 58 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: v_readlane_b32 s10, v43, 57 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s10, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v58 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v56 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s8, v62, 7 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_readlane_b32 s8, v43, 6 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s9, v43, 7 +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: v_readlane_b32 s8, v43, 8 +; SI-NEXT: v_readlane_b32 s9, v43, 9 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s10, v43, 10 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v46 -; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: v_readlane_b32 s7, v43, 62 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_readlane_b32 s7, v43, 61 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: v_readlane_b32 s8, v43, 60 +; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_lshl_b32 s8, s8, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 12 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v42 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 11 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v40 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s7, v62, 10 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v55 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s7, 24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_readlane_b32 s6, v43, 12 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: v_readlane_b32 s7, v43, 13 +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_readlane_b32 s6, v43, 14 +; SI-NEXT: v_readlane_b32 s7, v43, 15 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s8, v43, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 15 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s6, 0xff +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: v_readlane_b32 s5, v42, 1 ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v54 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 14 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v53 +; SI-NEXT: v_readlane_b32 s5, v42, 0 ; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s6, v62, 13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v52 +; SI-NEXT: v_readlane_b32 s6, v43, 63 ; SI-NEXT: s_lshl_b32 s5, s5, 16 ; SI-NEXT: s_lshl_b32 s6, s6, 24 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s21, v41, 33 +; SI-NEXT: v_readlane_b32 s19, v41, 51 +; SI-NEXT: v_readlane_b32 s17, v41, 57 +; SI-NEXT: v_readlane_b32 s15, v41, 63 +; SI-NEXT: v_readlane_b32 s13, v43, 5 +; SI-NEXT: v_readlane_b32 s11, v43, 11 +; SI-NEXT: v_readlane_b32 s9, v43, 17 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s36, v63, 4 -; SI-NEXT: v_readlane_b32 s31, v63, 1 -; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: v_readlane_b32 s99, v40, 35 +; SI-NEXT: v_readlane_b32 s98, v40, 34 +; SI-NEXT: v_readlane_b32 s97, v40, 33 +; SI-NEXT: v_readlane_b32 s96, v40, 32 +; SI-NEXT: v_readlane_b32 s87, v40, 31 +; SI-NEXT: v_readlane_b32 s86, v40, 30 +; SI-NEXT: v_readlane_b32 s85, v40, 29 +; SI-NEXT: v_readlane_b32 s84, v40, 28 +; SI-NEXT: v_readlane_b32 s83, v40, 27 +; SI-NEXT: v_readlane_b32 s82, v40, 26 +; SI-NEXT: v_readlane_b32 s81, v40, 25 +; SI-NEXT: v_readlane_b32 s80, v40, 24 +; SI-NEXT: v_readlane_b32 s71, v40, 23 +; SI-NEXT: v_readlane_b32 s70, v40, 22 +; SI-NEXT: v_readlane_b32 s69, v40, 21 +; SI-NEXT: v_readlane_b32 s68, v40, 20 +; SI-NEXT: v_readlane_b32 s67, v40, 19 +; SI-NEXT: v_readlane_b32 s66, v40, 18 +; SI-NEXT: v_readlane_b32 s65, v40, 17 +; SI-NEXT: v_readlane_b32 s64, v40, 16 +; SI-NEXT: v_readlane_b32 s55, v40, 15 +; SI-NEXT: v_readlane_b32 s54, v40, 14 +; SI-NEXT: v_readlane_b32 s53, v40, 13 +; SI-NEXT: v_readlane_b32 s52, v40, 12 +; SI-NEXT: v_readlane_b32 s51, v40, 11 +; SI-NEXT: v_readlane_b32 s50, v40, 10 +; SI-NEXT: v_readlane_b32 s49, v40, 9 +; SI-NEXT: v_readlane_b32 s48, v40, 8 +; SI-NEXT: v_readlane_b32 s39, v40, 7 +; SI-NEXT: v_readlane_b32 s38, v40, 6 +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vcc_lo -; SI-NEXT: v_mov_b32_e32 v39, v24 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v38, v22 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v37, v21 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v30, v18 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v29, v17 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v28, v13 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v26, v9 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: v_mov_b32_e32 v25, v6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; kill: killed $vcc_lo -; SI-NEXT: ; implicit-def: $vcc_lo +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: v_writelane_b32 v41, s4, 20 +; SI-NEXT: v_writelane_b32 v41, s5, 21 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $sgpr68 -; SI-NEXT: ; implicit-def: $sgpr71 -; SI-NEXT: ; implicit-def: $sgpr83 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $sgpr65 -; SI-NEXT: ; implicit-def: $sgpr69 -; SI-NEXT: ; implicit-def: $sgpr80 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $sgpr54 -; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 ; SI-NEXT: ; implicit-def: $sgpr70 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $sgpr51 -; SI-NEXT: ; implicit-def: $sgpr55 -; SI-NEXT: ; implicit-def: $sgpr67 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 ; SI-NEXT: ; implicit-def: $sgpr64 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr37 -; SI-NEXT: ; implicit-def: $sgpr49 -; SI-NEXT: ; implicit-def: $sgpr53 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr34 -; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr52 ; SI-NEXT: ; implicit-def: $sgpr50 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr95 -; SI-NEXT: ; implicit-def: $sgpr35 -; SI-NEXT: ; implicit-def: $sgpr39 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr92 -; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr89 -; SI-NEXT: ; implicit-def: $sgpr93 -; SI-NEXT: ; implicit-def: $sgpr31 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr91 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; kill: killed $vcc_lo -; SI-NEXT: ; implicit-def: $vcc_lo -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; kill: killed $vcc_lo -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: v_writelane_b32 v41, s4, 22 +; SI-NEXT: v_writelane_b32 v41, s5, 23 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 24 +; SI-NEXT: v_writelane_b32 v41, s5, 25 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 26 +; SI-NEXT: v_writelane_b32 v41, s5, 27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 28 +; SI-NEXT: v_writelane_b32 v41, s5, 29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 30 +; SI-NEXT: v_writelane_b32 v41, s5, 31 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 32 +; SI-NEXT: v_writelane_b32 v41, s5, 33 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 34 +; SI-NEXT: v_writelane_b32 v41, s5, 35 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 36 +; SI-NEXT: v_writelane_b32 v41, s5, 37 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 38 +; SI-NEXT: v_writelane_b32 v41, s5, 39 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 40 +; SI-NEXT: v_writelane_b32 v41, s5, 41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 42 +; SI-NEXT: v_writelane_b32 v41, s5, 43 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 44 +; SI-NEXT: v_writelane_b32 v41, s5, 45 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 46 +; SI-NEXT: v_writelane_b32 v41, s5, 47 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 48 +; SI-NEXT: v_writelane_b32 v41, s5, 49 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 50 +; SI-NEXT: v_writelane_b32 v41, s5, 51 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 52 +; SI-NEXT: v_writelane_b32 v41, s5, 53 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 54 +; SI-NEXT: v_writelane_b32 v41, s5, 55 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 56 +; SI-NEXT: v_writelane_b32 v41, s5, 57 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 58 +; SI-NEXT: v_writelane_b32 v41, s5, 59 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 60 +; SI-NEXT: v_writelane_b32 v41, s5, 61 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 62 +; SI-NEXT: v_writelane_b32 v41, s5, 63 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v43, s4, 0 +; SI-NEXT: v_writelane_b32 v43, s5, 1 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 2 +; SI-NEXT: v_writelane_b32 v43, s5, 3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 4 +; SI-NEXT: v_writelane_b32 v43, s5, 5 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 6 +; SI-NEXT: v_writelane_b32 v43, s5, 7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 8 +; SI-NEXT: v_writelane_b32 v43, s5, 9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 10 +; SI-NEXT: v_writelane_b32 v43, s5, 11 +; SI-NEXT: v_writelane_b32 v43, s16, 12 +; SI-NEXT: v_writelane_b32 v43, s17, 13 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s16, 14 +; SI-NEXT: v_writelane_b32 v43, s17, 15 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: v_writelane_b32 v43, s16, 16 +; SI-NEXT: v_writelane_b32 v43, s17, 17 ; SI-NEXT: s_branch .LBB99_2 ; ; VI-LABEL: bitcast_v64i16_to_v128i8_scalar: @@ -217279,1071 +219502,408 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg % ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 -; SI-NEXT: v_mul_f32_e32 v45, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v30 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_mul_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:40 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_mul_f32_e32 v59, 1.0, v14 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:48 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_mul_f32_e32 v60, 1.0, v15 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_mul_f32_e32 v61, 1.0, v16 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:56 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mul_f32_e32 v62, 1.0, v17 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:60 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mul_f32_e32 v63, 1.0, v18 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:64 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:76 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v12, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v36, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v35, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v10 +; SI-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v18, 1.0, v18 ; SI-NEXT: v_mul_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_mul_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_mul_f32_e32 v44, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v46, 1.0, v22 -; SI-NEXT: v_mul_f32_e32 v47, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v56, 1.0, v24 -; SI-NEXT: v_mul_f32_e32 v57, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v58, 1.0, v26 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v3, 1.0, v21 +; SI-NEXT: v_mul_f32_e32 v4, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v5, 1.0, v23 +; SI-NEXT: v_mul_f32_e32 v6, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v7, 1.0, v25 +; SI-NEXT: v_mul_f32_e32 v8, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v9, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v10, 1.0, v28 ; SI-NEXT: v_mul_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_mul_f32_e64 v15, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v18, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v21, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v22, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v23, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v24, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v25, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v26, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v27, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v30, 1.0, s29 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e64 v20, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v21, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v22, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v24, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v25, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v27, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v28, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v30, 1.0, s28 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v31, 1.0, v28 -; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32 -; SI-NEXT: v_mul_f32_e32 v33, 1.0, v33 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v34 -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v35 -; SI-NEXT: v_mul_f32_e32 v36, 1.0, v36 -; SI-NEXT: v_mul_f32_e32 v37, 1.0, v37 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v38 -; SI-NEXT: s_waitcnt vmcnt(11) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v49 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v51 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v39 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v48 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v55 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v41 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v43 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v50 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v52 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v53 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v54 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v40 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v42 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v1, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v28, 1.0, s28 -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB101_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_mov_b32_e32 v42, v51 -; SI-NEXT: v_mov_b32_e32 v55, v50 -; SI-NEXT: v_mov_b32_e32 v40, v52 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_mov_b32_e32 v24, v47 -; SI-NEXT: v_mov_b32_e32 v23, v46 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_mov_b32_e32 v25, v56 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_mov_b32_e32 v26, v57 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v34 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v36, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v38 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v51 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v33 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v59, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v35 -; SI-NEXT: v_mov_b32_e32 v35, v43 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 -; SI-NEXT: v_cvt_f32_f16_e32 v60, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_mov_b32_e32 v38, v10 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v39 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v63 -; SI-NEXT: v_cvt_f32_f16_e32 v61, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_mov_b32_e32 v19, v28 -; SI-NEXT: v_mov_b32_e32 v28, v14 -; SI-NEXT: v_mov_b32_e32 v39, v22 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v62, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v63, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47 -; SI-NEXT: v_mov_b32_e32 v47, v3 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v49 -; SI-NEXT: v_mov_b32_e32 v49, v15 -; SI-NEXT: v_mov_b32_e32 v15, v41 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v33, v11 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v50 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v52 -; SI-NEXT: v_mov_b32_e32 v51, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v54 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 -; SI-NEXT: v_mov_b32_e32 v20, v2 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v57 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v31, 1.0, v15 +; SI-NEXT: v_mul_f32_e32 v15, 1.0, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 -; SI-NEXT: v_mov_b32_e32 v37, v9 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v2 -; SI-NEXT: v_mov_b32_e32 v27, v58 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v3 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v9 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v34, v13 -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_mul_f32_e32 v12, 1.0, v32 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v39 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v51 +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v42 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v43 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v44 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v45 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v46 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v47 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v56 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v14 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v57 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: s_waitcnt vmcnt(6) expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v54 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 -; SI-NEXT: s_cbranch_execnz .LBB101_3 -; SI-NEXT: .LBB101_2: ; %cmp.true -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v53 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v40 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v55 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v51 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v42 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v22 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v29 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v27 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v26 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v25 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v23 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v63, v63 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v62, v62 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v61, v61 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v60, v60 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v59, v59 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v38, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v43 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v37, v57 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v6, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v50 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v36, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v9 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v23 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v25 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v26 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v27 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v58 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v59 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v60 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v45, v45 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v30, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v61 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_mul_f32_e32 v62, 1.0, v62 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v35, v55 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v39, v54 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v49, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v42 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v53 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v48 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v31 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v7 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v33 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v40 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v7, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v13 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v52 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v43, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v33, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v51 -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v63, 1.0, v63 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v43, v14 -; SI-NEXT: .LBB101_3: ; %end -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v53, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_or_b32_e32 v14, v14, v53 -; SI-NEXT: buffer_store_dword v14, v0, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v34 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v32, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v33, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s29 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB101_2 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v49 -; SI-NEXT: v_add_i32_e32 v16, vcc, 12, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_mov_b32_e32 v32, v53 +; SI-NEXT: v_mov_b32_e32 v53, v54 +; SI-NEXT: v_mov_b32_e32 v54, v40 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v18 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_mov_b32_e32 v40, v35 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v39 -; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v35 -; SI-NEXT: v_add_i32_e32 v16, vcc, 24, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v19 -; SI-NEXT: v_add_i32_e32 v16, vcc, 28, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v20 -; SI-NEXT: v_add_i32_e32 v16, vcc, 32, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v21 -; SI-NEXT: v_add_i32_e32 v16, vcc, 36, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v36 -; SI-NEXT: v_add_i32_e32 v16, vcc, 40, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v37 -; SI-NEXT: v_add_i32_e32 v16, vcc, 44, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v47 -; SI-NEXT: v_add_i32_e32 v16, vcc, 48, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v59 -; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v60 -; SI-NEXT: v_add_i32_e32 v16, vcc, 56, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v61 -; SI-NEXT: v_add_i32_e32 v16, vcc, 60, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v62 -; SI-NEXT: v_add_i32_e32 v16, vcc, 64, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v63 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v44 -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x44, v0 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v46 -; SI-NEXT: v_add_i32_e32 v15, vcc, 0x48, v0 -; SI-NEXT: v_or_b32_e32 v1, v14, v1 -; SI-NEXT: buffer_store_dword v1, v15, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v56 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x4c, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v14, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v57 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v5 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v6 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v7 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v32 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v31 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v10 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v33 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v18 +; SI-NEXT: v_mov_b32_e32 v31, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v12 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v19 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v34 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v3 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB101_4: -; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v40, v52 -; SI-NEXT: v_mov_b32_e32 v55, v50 -; SI-NEXT: v_mov_b32_e32 v42, v51 -; SI-NEXT: v_mov_b32_e32 v28, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v55, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v29 +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v36, v16 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v49, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v51 +; SI-NEXT: v_cvt_f32_f16_e32 v50, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v16 +; SI-NEXT: v_mov_b32_e32 v46, v54 +; SI-NEXT: v_mov_b32_e32 v16, v20 +; SI-NEXT: v_mov_b32_e32 v20, v32 +; SI-NEXT: v_mov_b32_e32 v32, v53 +; SI-NEXT: v_mov_b32_e32 v45, v40 +; SI-NEXT: v_mov_b32_e32 v56, v37 +; SI-NEXT: v_mov_b32_e32 v47, v11 +; SI-NEXT: s_branch .LBB101_3 +; SI-NEXT: .LBB101_2: +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 @@ -218369,72 +219929,775 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg % ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: v_mov_b32_e32 v27, v58 -; SI-NEXT: v_mov_b32_e32 v26, v57 -; SI-NEXT: v_mov_b32_e32 v25, v56 -; SI-NEXT: v_mov_b32_e32 v24, v47 -; SI-NEXT: v_mov_b32_e32 v23, v46 +; SI-NEXT: s_mov_b64 s[4:5], -1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: v_mov_b32_e32 v20, v53 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v46, v40 +; SI-NEXT: v_mov_b32_e32 v45, v35 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v56, v37 +; SI-NEXT: v_mov_b32_e32 v47, v11 +; SI-NEXT: v_mov_b32_e32 v31, v15 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr61 ; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr6 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; kill: killed $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; kill: killed $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; kill: killed $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; kill: killed $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: s_branch .LBB101_2 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: .LBB101_3: ; %Flow +; SI-NEXT: v_mov_b32_e32 v5, v59 +; SI-NEXT: v_mov_b32_e32 v10, v63 +; SI-NEXT: v_mov_b32_e32 v59, v28 +; SI-NEXT: v_mov_b32_e32 v63, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v7, v60 +; SI-NEXT: v_mov_b32_e32 v11, v57 +; SI-NEXT: v_mov_b32_e32 v12, v38 +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: v_mov_b32_e32 v35, v1 +; SI-NEXT: v_mov_b32_e32 v37, v55 +; SI-NEXT: v_mov_b32_e32 v15, v36 +; SI-NEXT: v_mov_b32_e32 v38, v4 +; SI-NEXT: v_mov_b32_e32 v53, v6 +; SI-NEXT: v_mov_b32_e32 v54, v8 +; SI-NEXT: v_mov_b32_e32 v40, v9 +; SI-NEXT: v_mov_b32_e32 v57, v13 +; SI-NEXT: v_mov_b32_e32 v60, v41 +; SI-NEXT: v_mov_b32_e32 v36, v2 +; SI-NEXT: s_cbranch_vccnz .LBB101_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v43 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v16 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v51 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v62 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v5 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v61 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v22 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v22 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v22 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v44 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v51 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v12 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v41 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v31 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v60, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v29 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v53, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v1 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v49, v2 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v21, v43 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v22, v27 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v19 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v46 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v19 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v19 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v22, v23 +; SI-NEXT: v_and_b32_e32 v44, 0xffff0000, v44 +; SI-NEXT: v_add_f32_e32 v44, 0x40c00000, v44 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v44 +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v22, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v8 +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v24, v10 +; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v19, v32 +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v19, v46 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v19, v31 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v19, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v15, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v9 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v47 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v22, v47 +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v22, v4 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v62 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v56 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v45 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v17, v45 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v38 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v56 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v50 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v57, v3 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v50, v6 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v63, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v59, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v11 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v61 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v17, v7 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v16, v13 +; SI-NEXT: .LBB101_5: ; %end +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v26 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v63 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v18 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v35 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v37 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v38 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v53 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v54 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v40 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v57 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v17 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v52 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v34 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v16 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v64bf16_to_v64f16_scalar: ; VI: ; %bb.0: @@ -220488,6 +222751,7 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v47, v0 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:136 ; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 @@ -220511,26 +222775,149 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 ; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:92 ; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:88 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:120 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 @@ -220546,23 +222933,8 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:112 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:120 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 @@ -220576,21 +222948,34 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 ; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 ; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 -; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 +; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 ; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 ; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 ; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 ; SI-NEXT: v_cvt_f16_f32_e32 v43, v43 -; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_cvt_f16_f32_e32 v44, v44 -; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_cvt_f16_f32_e32 v45, v45 -; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_cvt_f16_f32_e32 v46, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v47, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; kill: killed $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; kill: killed $vgpr4 ; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_cvt_f16_f32_e32 v56, v1 ; SI-NEXT: s_waitcnt vmcnt(5) @@ -220615,333 +223000,222 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v31, v2 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB102_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v63 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v31 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v56 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v57 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v7 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v8 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v9 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v60 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v61 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v11 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v62 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v13 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v63 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v14 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v31 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr60 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v15 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v17 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v18 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v19 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v11 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v13 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v16 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v24 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v18 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v19 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v21 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v22 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v24 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v26 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v27 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v12 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v29 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v15 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v30 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v17 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v33 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v20 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v35 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v23 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v36 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v25 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v38 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v28 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v39 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v32 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v49 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v34 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v51 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v52 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v45 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v54 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v50 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v56 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v55 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v57 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v53 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v41 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v40 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v60 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v43 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v61 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v42 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v44 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v45 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v62 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v46 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 @@ -220991,110 +223265,99 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr62 ; SI-NEXT: .LBB102_2: ; %Flow ; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; SI-NEXT: s_cbranch_execz .LBB102_4 ; SI-NEXT: ; %bb.3: ; %cmp.true -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v2, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v63, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v60, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v58 ; SI-NEXT: v_add_f32_e32 v62, 0x38000000, v63 ; SI-NEXT: v_add_f32_e32 v63, 0x38000000, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v61 -; SI-NEXT: v_cvt_f32_f16_e32 v60, v60 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v58 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v57 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 ; SI-NEXT: v_add_f32_e32 v59, 0x38000000, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v57, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v46 ; SI-NEXT: v_add_f32_e32 v60, 0x38000000, v61 ; SI-NEXT: v_add_f32_e32 v61, 0x38000000, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload ; SI-NEXT: v_cvt_f32_f16_e32 v56, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v47 ; SI-NEXT: v_add_f32_e32 v46, 0x38000000, v57 -; SI-NEXT: v_add_f32_e32 v57, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v45 -; SI-NEXT: v_add_f32_e32 v47, 0x38000000, v56 +; SI-NEXT: v_add_f32_e32 v57, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v45 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v31 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v56 ; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v45, v43 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v42 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v41 +; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v41 ; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v44 ; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v45 ; SI-NEXT: v_add_f32_e32 v45, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v54 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v53 +; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v53 ; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v40 ; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v41 ; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 ; SI-NEXT: v_cvt_f32_f16_e32 v53, v51 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v50 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v49 ; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v52 ; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v53 ; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 ; SI-NEXT: v_cvt_f32_f16_e32 v49, v39 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v38 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v37 +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v37 ; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v48 ; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v49 ; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v34 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 +; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v33 ; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v36 ; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v37 ; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v29 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v28 ; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v32 ; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v33 ; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v27 ; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v58 -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 @@ -221104,8 +223367,13 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v4, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 @@ -221127,624 +223395,626 @@ define <64 x bfloat> @bitcast_v64f16_to_v64bf16(<64 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v24, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v7 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v58, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v12 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v16 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v20 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v23 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v25 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v32 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v29 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v37 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v36 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v34 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v49 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v48 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v38 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v53 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v52 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v51 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v50 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v41 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v55 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v54 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v45 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v44 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v43 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v42 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v57 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v46 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v4, v46 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v0, v61 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v59 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v60 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v59 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cvt_f32_f16_e32 v58, v58 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v0, v63 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v62 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v63 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v62 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v2, v31 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v58 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 ; SI-NEXT: v_mov_b32_e32 v3, v24 -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; SI-NEXT: .LBB102_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: buffer_store_dword v0, v47, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v3 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x7c, v47 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload @@ -222018,13 +224288,14 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v37, v0 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:8 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24 ; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:32 @@ -222042,66 +224313,64 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:72 ; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:76 ; SI-NEXT: v_cvt_f16_f32_e32 v40, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 -; SI-NEXT: v_mov_b32_e32 v46, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 +; SI-NEXT: v_mov_b32_e32 v46, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v43, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v46, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v47, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v57, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v8, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v25, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v29, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v30, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s24 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v23 ; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v59, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v60, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v39 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v61, v48 -; SI-NEXT: s_waitcnt vmcnt(11) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v48 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_cvt_f16_f32_e32 v61, v49 +; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 ; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v54 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v55 ; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 ; SI-NEXT: s_waitcnt vmcnt(5) @@ -222111,17 +224380,19 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_cvt_f16_f32_e32 v63, v63 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v50 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v44, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v38, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v37, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v48, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v49, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v35, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v50, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v51, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v49, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v33, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v50, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v51, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill @@ -222129,737 +224400,717 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB103_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v23 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23 +; SI-NEXT: v_mov_b32_e32 v23, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v24 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v25 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v25 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v29 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v29 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v30 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v30 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v38 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v37 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v39 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v27 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v48 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v48 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v49 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v49 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v33 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v50 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v50 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v51 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v52 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v40 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v40 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v43 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v43 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v20 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v21 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v8 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v46 ; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_mov_b32_e32 v50, v19 -; SI-NEXT: v_mov_b32_e32 v51, v22 -; SI-NEXT: v_mov_b32_e32 v38, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_mov_b32_e32 v37, v45 -; SI-NEXT: v_mov_b32_e32 v27, v26 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v26 -; SI-NEXT: v_mov_b32_e32 v49, v47 -; SI-NEXT: v_mov_b32_e32 v35, v28 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v58 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v59 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v60 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v39 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v53 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v54 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_mov_b32_e32 v51, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_mov_b32_e32 v36, v22 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v44 +; SI-NEXT: v_mov_b32_e32 v50, v26 +; SI-NEXT: v_mov_b32_e32 v33, v28 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v38 +; SI-NEXT: v_mov_b32_e32 v38, v7 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v60 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v61 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v54 ; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v41 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v62 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v6 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v62 +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v10 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v10 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v11 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v11 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v12 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v13 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v13 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v14 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v31 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v31 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v17 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v18 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v18 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v19 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v20 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v46 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v45 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v35 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v47 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57 -; SI-NEXT: v_mov_b32_e32 v57, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v32 -; SI-NEXT: v_mov_b32_e32 v32, v7 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v33 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v45 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56 -; SI-NEXT: v_mov_b32_e32 v33, v12 -; SI-NEXT: v_mov_b32_e32 v34, v5 -; SI-NEXT: v_mov_b32_e32 v58, v7 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v36 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v61 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v42 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v63 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v44 -; SI-NEXT: v_mov_b32_e32 v44, v18 -; SI-NEXT: v_mov_b32_e32 v5, v43 -; SI-NEXT: v_mov_b32_e32 v18, v6 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v57 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v58 +; SI-NEXT: v_mov_b32_e32 v58, v5 +; SI-NEXT: v_mov_b32_e32 v59, v11 +; SI-NEXT: v_mov_b32_e32 v60, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v9 +; SI-NEXT: v_mov_b32_e32 v5, v23 +; SI-NEXT: v_mov_b32_e32 v7, v6 ; SI-NEXT: s_branch .LBB103_3 ; SI-NEXT: .LBB103_2: -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: v_mov_b32_e32 v35, v28 -; SI-NEXT: v_mov_b32_e32 v49, v47 -; SI-NEXT: v_mov_b32_e32 v27, v26 -; SI-NEXT: v_mov_b32_e32 v37, v45 -; SI-NEXT: v_mov_b32_e32 v38, v16 -; SI-NEXT: v_mov_b32_e32 v51, v22 -; SI-NEXT: v_mov_b32_e32 v50, v19 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: v_mov_b32_e32 v33, v28 +; SI-NEXT: v_mov_b32_e32 v50, v26 +; SI-NEXT: v_mov_b32_e32 v36, v22 +; SI-NEXT: v_mov_b32_e32 v51, v21 ; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: ; kill: killed $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; kill: killed $vgpr5 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr0 ; SI-NEXT: v_mov_b32_e32 v5, v6 -; SI-NEXT: ; kill: killed $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; kill: killed $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; kill: killed $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr60 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: .LBB103_3: ; %Flow -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v36, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v43, v9 -; SI-NEXT: v_mov_b32_e32 v12, v31 +; SI-NEXT: v_mov_b32_e32 v19, v20 +; SI-NEXT: v_mov_b32_e32 v6, v27 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v61, v2 ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: v_mov_b32_e32 v31, v11 -; SI-NEXT: v_mov_b32_e32 v9, v17 ; SI-NEXT: s_cbranch_vccnz .LBB103_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v62 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v8 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v10 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v2 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v55 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v15, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v42 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v8 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v10 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v15 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v61 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v42 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v34 ; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v15 -; SI-NEXT: v_mov_b32_e32 v6, v37 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v39 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v52 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v8 +; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v53 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v51 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v60 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v50 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v35 +; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v10 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_cvt_f32_f16_e32 v10, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v7 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v13 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v18 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v45, 0x38000000, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v32 ; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v49 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v15, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v15 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v10 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v4, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v14, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v14 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v33 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v46 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v5 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v45, 0x38000000, v45 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v3, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v15, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v57 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v15 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v8, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v35 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v38 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v10, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v3, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v47 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v50 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v11, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v56 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v44 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v36 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v21, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v31, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v28, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v31, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v32, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v32, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v34, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v33, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v34, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v36, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v36, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v51, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v50, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v51, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v40, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v40, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v41, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v44, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v43, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v44, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v44 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v46, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v46, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v46, 0x38000000, v46 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v47, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v47, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v47, 0x38000000, v47 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v56, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v56, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v56 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v57, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v57, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v57, 0x38000000, v57 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v58, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v58, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v26, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v26, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v22, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v19, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v19, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v35, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v35, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v13, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v13, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v12, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v7, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v7, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v5, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v5, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v59, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v59, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v59, 0x38000000, v59 ; SI-NEXT: v_cvt_f16_f32_e32 v59, v59 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v60, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v60, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v60, 0x38000000, v60 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v61, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v61, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v61, 0x38000000, v61 ; SI-NEXT: v_cvt_f16_f32_e32 v61, v61 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v62, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v62, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v62, 0x38000000, v62 ; SI-NEXT: v_cvt_f16_f32_e32 v62, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v63, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v63, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v63, 0x38000000, v63 ; SI-NEXT: v_cvt_f16_f32_e32 v63, v63 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v63 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v63 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v62 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v62 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v61 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v61 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v0, v60 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v59 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v19 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v58 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v57 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v56 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v56 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v47 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v45 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v46 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v44 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v43 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v42 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v41 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v51 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v50 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v36 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v32 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v31 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v7, v32 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v12 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v21 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v11 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v6 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v16 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v20 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v10 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v39 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v52 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v27 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v18 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v2 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v23 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v15 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v48 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v24 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v24 +; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v23 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v14 -; SI-NEXT: v_mov_b32_e32 v16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v37 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v48 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v39 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v25 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v55 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v30 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v49 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v5 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 +; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v54 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v5 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v4 -; SI-NEXT: v_mov_b32_e32 v4, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v55 +; SI-NEXT: v_mov_b32_e32 v17, v11 +; SI-NEXT: v_mov_b32_e32 v16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v3 -; SI-NEXT: v_mov_b32_e32 v3, v13 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v3 +; SI-NEXT: v_mov_b32_e32 v3, v19 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v4 +; SI-NEXT: v_mov_b32_e32 v4, v22 +; SI-NEXT: v_mov_b32_e32 v22, v6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v2 ; SI-NEXT: .LBB103_5: ; %end ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v37, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload @@ -222869,7 +225120,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload @@ -222880,7 +225131,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload @@ -222891,7 +225142,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload @@ -222902,7 +225153,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 16, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 16, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload @@ -222913,10 +225164,10 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -222924,10 +225175,10 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 24, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -222935,47 +225186,47 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v4 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload @@ -222986,7 +225237,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload @@ -222997,148 +225248,145 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg % ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v31 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v16 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v17 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v21 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v40 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v13 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v8 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v22 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v32 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v59 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v57 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v38 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v33 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v60 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v10 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v58 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v44 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v18 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v34 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v10 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v31 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v58 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v15 ; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v37 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v23 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v14 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v22 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v24 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v40 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v24 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v8 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v48 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v45 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v48 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v39 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v21 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v25 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v49 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v25 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v28 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v29 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v52 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v29 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v47 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v30 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v28 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v30 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v15 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v26 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v12 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v43 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v14 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7 -; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v11 +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v26 +; SI-NEXT: v_alignbit_b32 v0, v0, v1, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x7c, v37 +; SI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload @@ -226977,999 +229225,1089 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76 -; SI-NEXT: v_mul_f32_e32 v44, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v3 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:68 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:76 +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v6 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v8 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v12 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v61, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v45, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v5, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v12 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v4, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v7 ; SI-NEXT: v_mul_f32_e32 v8, 1.0, v9 ; SI-NEXT: v_mul_f32_e32 v6, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v62, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v46, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v13, 1.0, v14 -; SI-NEXT: v_mul_f32_e32 v60, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v57, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v16, 1.0, v18 -; SI-NEXT: v_mul_f32_e32 v56, 1.0, v19 -; SI-NEXT: v_mul_f32_e32 v47, 1.0, v20 -; SI-NEXT: v_mul_f32_e32 v17, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v19, 1.0, v22 -; SI-NEXT: v_mul_f32_e32 v18, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v20, 1.0, v24 -; SI-NEXT: v_mul_f32_e32 v21, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v26 -; SI-NEXT: v_mul_f32_e32 v63, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v58, 1.0, v28 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v29 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v30 -; SI-NEXT: v_mul_f32_e64 v7, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v11, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v29, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v14, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v25, 1.0, s28 -; SI-NEXT: v_mul_f32_e64 v24, 1.0, s29 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v31, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v32 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v33 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v36 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v34 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v35 -; SI-NEXT: v_mul_f32_e32 v59, 1.0, v37 -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v38 -; SI-NEXT: v_mul_f32_e32 v30, 1.0, v39 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v48 -; SI-NEXT: v_mul_f32_e32 v4, 1.0, v49 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v50 -; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v54 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v36, 1.0, v51 -; SI-NEXT: v_mul_f32_e32 v37, 1.0, v52 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v53 -; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v43 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v55 -; SI-NEXT: v_mul_f32_e32 v33, 1.0, v40 -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v41 -; SI-NEXT: v_mul_f32_e32 v32, 1.0, v42 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e64 v53, 1.0, s17 +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v11 +; SI-NEXT: v_mul_f32_e32 v12, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v10, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v18 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v19 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v18, 1.0, v21 +; SI-NEXT: v_mul_f32_e32 v16, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v19, 1.0, v23 +; SI-NEXT: v_mul_f32_e32 v23, 1.0, v25 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v63, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v7, 1.0, v29 +; SI-NEXT: v_mul_f32_e32 v11, 1.0, v30 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v30, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v22, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v20, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v29, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v28, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v21, 1.0, s28 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: v_mul_f32_e32 v31, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v62, 1.0, v32 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v33 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v34 +; SI-NEXT: v_mul_f32_e32 v35, 1.0, v35 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v36 +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v36, 1.0, v48 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v52, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v51, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v50, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v15, 1.0, s26 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v51 +; SI-NEXT: v_mul_f32_e32 v5, 1.0, v52 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v54 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v55 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v40 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v41 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v43 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v44 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v46 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v57 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v58 +; SI-NEXT: v_mul_f32_e64 v54, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v52, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v33, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v32, 1.0, s29 +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB105_2 ; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v54 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v26 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v52 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v50 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v50 ; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v45 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v44 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v53 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v14 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v24 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v7, v5 -; SI-NEXT: v_mov_b32_e32 v42, v62 -; SI-NEXT: v_mov_b32_e32 v43, v63 -; SI-NEXT: v_mov_b32_e32 v55, v12 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v25 -; SI-NEXT: v_mov_b32_e32 v25, v60 -; SI-NEXT: v_mov_b32_e32 v54, v47 -; SI-NEXT: v_mov_b32_e32 v40, v20 -; SI-NEXT: v_mov_b32_e32 v51, v61 -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v46 -; SI-NEXT: v_mov_b32_e32 v29, v31 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v19 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v31 -; SI-NEXT: v_mov_b32_e32 v24, v56 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_mov_b32_e32 v52, v10 -; SI-NEXT: v_mov_b32_e32 v53, v59 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v49 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v24 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v45 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v11 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v57 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v50, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v62 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v35 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62 -; SI-NEXT: v_mov_b32_e32 v62, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v63 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v12 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v10 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v59 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v36 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v32 -; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v12 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v39 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v39 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_mov_b32_e32 v41, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v60 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v20 -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v33 -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v34 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v56 -; SI-NEXT: v_mov_b32_e32 v39, v4 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v37 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v48 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v58 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v48 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v37, v38 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v38 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_mov_b32_e32 v25, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v58 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v34 +; SI-NEXT: v_mov_b32_e32 v57, v13 +; SI-NEXT: v_mov_b32_e32 v40, v3 +; SI-NEXT: v_mov_b32_e32 v54, v50 +; SI-NEXT: v_mov_b32_e32 v46, v19 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v9 +; SI-NEXT: v_mov_b32_e32 v44, v15 +; SI-NEXT: v_mov_b32_e32 v9, v11 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v6 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v59 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v10 +; SI-NEXT: v_mov_b32_e32 v41, v27 +; SI-NEXT: v_mov_b32_e32 v52, v62 +; SI-NEXT: v_mov_b32_e32 v21, v58 +; SI-NEXT: v_mov_b32_e32 v58, v20 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v60 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v56 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v55 +; SI-NEXT: v_mov_b32_e32 v55, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v42 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v53, v5 +; SI-NEXT: v_mov_b32_e32 v42, v43 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v13 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15 +; SI-NEXT: v_mov_b32_e32 v5, v19 +; SI-NEXT: v_mov_b32_e32 v7, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v63 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47 +; SI-NEXT: v_mov_b32_e32 v47, v3 +; SI-NEXT: v_mov_b32_e32 v3, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_mov_b32_e32 v1, v13 ; SI-NEXT: s_branch .LBB105_3 ; SI-NEXT: .LBB105_2: -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v55, v12 -; SI-NEXT: v_mov_b32_e32 v33, v34 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v25, v1 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v21, v58 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v52, v62 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v7, v5 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: v_mov_b32_e32 v51, v61 -; SI-NEXT: v_mov_b32_e32 v42, v62 -; SI-NEXT: v_mov_b32_e32 v29, v31 -; SI-NEXT: v_mov_b32_e32 v25, v60 -; SI-NEXT: v_mov_b32_e32 v24, v56 -; SI-NEXT: v_mov_b32_e32 v54, v47 -; SI-NEXT: v_mov_b32_e32 v40, v20 -; SI-NEXT: v_mov_b32_e32 v43, v63 -; SI-NEXT: v_mov_b32_e32 v52, v10 -; SI-NEXT: v_mov_b32_e32 v53, v59 -; SI-NEXT: v_mov_b32_e32 v39, v4 -; SI-NEXT: v_mov_b32_e32 v37, v38 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v54, v50 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v9, v11 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v53, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v40, v3 +; SI-NEXT: v_mov_b32_e32 v44, v15 +; SI-NEXT: v_mov_b32_e32 v57, v13 +; SI-NEXT: v_mov_b32_e32 v46, v19 +; SI-NEXT: v_mov_b32_e32 v41, v27 ; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: v_mov_b32_e32 v42, v43 +; SI-NEXT: v_mov_b32_e32 v3, v17 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; kill: killed $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: .LBB105_3: ; %Flow -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; SI-NEXT: s_cbranch_vccnz .LBB105_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v40 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v19 -; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v55 -; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v39 -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v37 -; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v30 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v32 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v34 -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v33 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v30 +; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v31 +; SI-NEXT: v_mov_b32_e32 v38, v9 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v38 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v38, 0xffff0000, v31 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v28 +; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v34 +; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29 +; SI-NEXT: v_and_b32_e32 v34, 0xffff0000, v30 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v6 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_alignbit_b32 v1, v3, v1, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v51 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v3 -; SI-NEXT: v_alignbit_b32 v1, v9, v1, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v40 +; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v4 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v26 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_alignbit_b32 v1, v11, v1, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v41 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v19 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v42 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v15 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v11 -; SI-NEXT: v_alignbit_b32 v1, v14, v1, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v25 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v50 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v29 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v14 -; SI-NEXT: v_alignbit_b32 v51, v16, v1, 16 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v54 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v44 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v16 -; SI-NEXT: v_alignbit_b32 v1, v18, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v16 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v23 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v24 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v53 -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v25 -; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v27 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v29 -; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v28 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v31 -; SI-NEXT: v_and_b32_e32 v38, 0xffff0000, v2 -; SI-NEXT: v_and_b32_e32 v34, 0xffff0000, v9 -; SI-NEXT: v_and_b32_e32 v37, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v57 +; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v18, v20, v1, 16 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v7 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v43 -; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v17 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v46 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v20 -; SI-NEXT: v_alignbit_b32 v1, v22, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v13 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v63 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v1, v23, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v11 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v61 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v12 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v1, v26, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v9 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v32 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v29 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v33 -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v60 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v7 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 ; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v1, v27, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v12 -; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v25 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 -; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v24 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v1, v28, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v52, v30, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v56 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v36, v35, v1, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v8 -; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v32 -; SI-NEXT: v_alignbit_b32 v48, v49, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v31 -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v10 -; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v25 -; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v20 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v21 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v42 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v28, v59, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v26, v28, v26, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v2, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 -; SI-NEXT: v_alignbit_b32 v46, v61, v31, 16 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_alignbit_b32 v4, v6, v4, 16 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v53 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_alignbit_b32 v6, v8, v6, 16 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v21, v30, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v31 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v23, v10, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v63, v23, v27, 16 -; SI-NEXT: v_alignbit_b32 v27, v21, v12, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v57, v58, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_alignbit_b32 v8, v10, v8, 16 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v17, v1, v20, 16 -; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 -; SI-NEXT: v_alignbit_b32 v19, v17, v19, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_alignbit_b32 v10, v12, v10, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v24 -; SI-NEXT: v_alignbit_b32 v56, v47, v20, 16 -; SI-NEXT: v_alignbit_b32 v20, v62, v11, 16 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v16, v56, v16, 16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_alignbit_b32 v12, v14, v12, 16 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_alignbit_b32 v14, v18, v14, 16 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_alignbit_b32 v18, v20, v18, 16 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v23 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v47 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_alignbit_b32 v24, v24, v23, 16 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v22, v45, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v15 -; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v29 -; SI-NEXT: v_alignbit_b32 v13, v60, v25, 16 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_alignbit_b32 v22, v21, v20, 16 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[48:49], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[7:8], 16 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v9 -; SI-NEXT: v_alignbit_b32 v24, v44, v3, 16 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v26 +; SI-NEXT: v_alignbit_b32 v26, v59, v25, 16 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v27 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[51:52], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[1:2], 16 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v15 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v39 +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v16, v45, v16, 16 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v9, v11, v9, 16 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 +; SI-NEXT: v_alignbit_b32 v28, v58, v27, 16 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v29 +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v50, 0xffff0000, v15 -; SI-NEXT: v_mov_b32_e32 v15, v24 -; SI-NEXT: v_add_f32_e32 v50, 0x40c00000, v50 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v50 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v3, v39, 16 -; SI-NEXT: v_alignbit_b32 v4, v3, v4, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_alignbit_b32 v20, v55, v20, 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[19:20], 16 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v29 +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v29 +; SI-NEXT: v_alignbit_b32 v35, v43, v32, 16 +; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v30 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[62:63], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v30 +; SI-NEXT: v_alignbit_b32 v39, v29, v32, 16 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[5:6], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v9, v5, 16 -; SI-NEXT: v_alignbit_b32 v5, v36, v7, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[27:28], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v2, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, v46, v33, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[23:24], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v24, v38, 16 -; SI-NEXT: v_alignbit_b32 v38, v48, v8, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v4, v22, v37, 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[17:18], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v57, v32, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v4, v20, v34, 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[13:14], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v20, v52 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[11:12], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v13, v14, 16 -; SI-NEXT: v_mov_b32_e32 v14, v51 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 ; SI-NEXT: .LBB105_5: ; %end +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v52 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v3 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_or_b32_e32 v4, v7, v4 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v11 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v3 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v43 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v51 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v25 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v36 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v62 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v33 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v14 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v56 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v17 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v13 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v63 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -233273,632 +235611,740 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:56 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:72 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v3 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:28 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v30 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:36 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v49, s21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v4, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v17 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v9 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v27, s19 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v47 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v33, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v35 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v59 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v9, v60 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v61 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v62 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v63 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v37 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v21, v48 -; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v51 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v54 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v51, v55 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v37, v40 -; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v25, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v60, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v57, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v58, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v59, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v63, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v47, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v56, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v62, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v45, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v61, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v39 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v56 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cvt_f16_f32_e32 v4, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v58, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v56, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: s_cbranch_scc0 .LBB109_4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v43 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v59 +; SI-NEXT: s_cbranch_scc0 .LBB109_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_cbranch_execnz .LBB109_3 -; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_branch .LBB109_3 +; SI-NEXT: .LBB109_2: +; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: .LBB109_3: ; %Flow +; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: v_mov_b32_e32 v8, v3 +; SI-NEXT: s_cbranch_vccnz .LBB109_5 +; SI-NEXT: ; %bb.4: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 +; SI-NEXT: v_mov_b32_e32 v47, v38 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v39, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v36 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v36, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v34 +; SI-NEXT: v_mov_b32_e32 v45, v35 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v58 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v23 +; SI-NEXT: v_mov_b32_e32 v57, v33 +; SI-NEXT: v_or_b32_e32 v34, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v32 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v56 +; SI-NEXT: v_or_b32_e32 v32, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v9, v31 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_lshr_b64 v[58:59], v[33:34], 16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_or_b32_e32 v1, v1, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_or_b32_e32 v2, v2, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_or_b32_e32 v5, v5, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_or_b32_e32 v8, v8, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_or_b32_e32 v11, v11, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_or_b32_e32 v15, v15, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_or_b32_e32 v31, v31, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v33 -; SI-NEXT: v_or_b32_e32 v32, v32, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v36 -; SI-NEXT: v_or_b32_e32 v35, v35, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v39 -; SI-NEXT: v_or_b32_e32 v38, v38, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, v51 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v52 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_or_b32_e32 v49, v48, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v53 -; SI-NEXT: v_or_b32_e32 v52, v48, v51 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v30 -; SI-NEXT: v_or_b32_e32 v29, v29, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v59 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v47, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v30, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v26, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v22, v3, v5 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v54 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v54, v15 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v12 +; SI-NEXT: v_mov_b32_e32 v12, v42 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v51 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v5, v60 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v60 +; SI-NEXT: v_or_b32_e32 v43, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v41 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 +; SI-NEXT: v_or_b32_e32 v41, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v63 +; SI-NEXT: v_lshr_b64 v[62:63], v[38:39], 16 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v46, v48, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v57 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v61 -; SI-NEXT: v_or_b32_e32 v57, v48, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v63 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v60, v48, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v62 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_or_b32_e32 v59, v54, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v56 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v48 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_or_b32_e32 v56, v54, v48 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v45, v40, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v7 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_or_b32_e32 v7, v41, v55 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v51, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v51 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_mov_b32_e32 v63, v51 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v45 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v47 +; SI-NEXT: v_or_b32_e32 v44, v28, v33 +; SI-NEXT: v_lshr_b64 v[46:47], v[29:30], 16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v49, v24, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v11 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; SI-NEXT: v_or_b32_e32 v2, v2, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v52 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v52, v20, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v61 +; SI-NEXT: v_or_b32_e32 v61, v24, v29 +; SI-NEXT: v_mov_b32_e32 v38, v49 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v37, v20, v31 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v20, v12 +; SI-NEXT: v_or_b32_e32 v12, v28, v25 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v24, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v14, v14, v13 -; SI-NEXT: v_or_b32_e32 v23, v23, v17 -; SI-NEXT: v_or_b32_e32 v34, v34, v21 -; SI-NEXT: v_alignbit_b32 v4, v57, v4, 16 -; SI-NEXT: v_alignbit_b32 v63, v46, v51, 16 -; SI-NEXT: v_alignbit_b32 v62, v29, v48, 16 -; SI-NEXT: v_alignbit_b32 v61, v52, v54, 16 -; SI-NEXT: v_alignbit_b32 v44, v49, v55, 16 -; SI-NEXT: v_alignbit_b32 v13, v32, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v2, v21, 16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v7, v40 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v7, v41, v10 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v28, v12 +; SI-NEXT: v_or_b32_e32 v12, v20, v21 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v10, v35, v10, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v41, v20 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v20, v12 +; SI-NEXT: v_or_b32_e32 v12, v24, v17 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v20, v31, v20, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v41, v28 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v12, v28, v15 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v28, v15, v28, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v42, v7 -; SI-NEXT: v_or_b32_e32 v7, v41, v27 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v27, v11, v27, 16 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v12 +; SI-NEXT: v_or_b32_e32 v12, v20, v13 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v7, v41, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: v_or_b32_e32 v43, v42, v24 -; SI-NEXT: v_alignbit_b32 v26, v8, v26, 16 +; SI-NEXT: v_or_b32_e32 v12, v24, v42 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v20, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v54 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v7, v41, v37 -; SI-NEXT: v_mov_b32_e32 v51, v7 -; SI-NEXT: v_alignbit_b32 v7, v38, v40, 16 -; SI-NEXT: v_alignbit_b32 v24, v5, v24, 16 +; SI-NEXT: v_or_b32_e32 v12, v28, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[56:57], v[31:32], 16 +; SI-NEXT: v_or_b32_e32 v54, v20, v40 +; SI-NEXT: v_or_b32_e32 v20, v24, v5 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v31, v55 +; SI-NEXT: v_lshr_b64 v[54:55], v[15:16], 16 +; SI-NEXT: v_mov_b32_e32 v15, v20 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_alignbit_b32 v37, v1, v37, 16 -; SI-NEXT: .LBB109_3: ; %end -; SI-NEXT: v_and_b32_e32 v48, 0xffff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v4, v48, v4 -; SI-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v57 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v58 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v63 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v47 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v56 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v62 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v30 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v45 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v61 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v52 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v53 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v50, v8 +; SI-NEXT: v_or_b32_e32 v8, v28, v3 +; SI-NEXT: v_lshr_b64 v[28:29], v[5:6], 16 +; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v49 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v50 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v12, v50, v1 +; SI-NEXT: v_lshr_b64 v[49:50], v[35:36], 16 +; SI-NEXT: v_mov_b32_e32 v35, v44 +; SI-NEXT: v_lshr_b64 v[44:45], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[20:21], v[42:43], 16 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshr_b64 v[20:21], v[9:10], 16 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v38 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v39 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshr_b64 v[20:21], v[40:41], 16 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[12:13], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[3:4], 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[1:2], 16 +; SI-NEXT: v_mov_b32_e32 v42, v61 +; SI-NEXT: v_mov_b32_e32 v61, v37 +; SI-NEXT: v_mov_b32_e32 v37, v53 +; SI-NEXT: v_mov_b32_e32 v51, v43 +; SI-NEXT: .LBB109_5: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v62 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v52 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v36 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v49 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v13 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v33 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v58 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v20 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v56 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v46 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v22 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v44 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v17 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v19 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v28 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v16 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v12 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v26 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v9 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v12 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v24 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v12 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v12 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v9 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v28 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v24 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v11 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen @@ -233920,8 +236366,6 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB109_4: -; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v64f16_to_v64i16_scalar: ; VI: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll index 9b28fd9e7b6fd..64b5ecc8f6b8e 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll @@ -1209,37 +1209,35 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s6 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v3, s11 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v7, s10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v4i32_to_v8i16_scalar: @@ -3544,65 +3542,67 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 ; SI-NEXT: .LBB25_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_mov_b32_e32 v3, s10 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s11 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v5, s27 +; SI-NEXT: v_mov_b32_e32 v6, s26 +; SI-NEXT: v_mov_b32_e32 v7, s25 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v10, s6 +; SI-NEXT: v_mov_b32_e32 v11, s4 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s8 -; SI-NEXT: v_mov_b32_e32 v14, s7 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v13, s24 +; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s22 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v4i32_to_v16i8_scalar: @@ -5664,36 +5664,41 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s11, s19, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_4 ; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v11, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v9, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[5:6], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v11 +; SI-NEXT: s_branch .LBB37_5 ; SI-NEXT: .LBB37_3: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB37_2 ; SI-NEXT: .LBB37_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s19 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v5, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: .LBB37_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v10 +; SI-NEXT: v_mov_b32_e32 v2, v11 +; SI-NEXT: v_mov_b32_e32 v4, v8 +; SI-NEXT: v_mov_b32_e32 v6, v9 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v4f32_to_v8i16_scalar: @@ -7997,64 +8002,75 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s9, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s24, s19, 24 +; SI-NEXT: s_lshr_b32 s26, s19, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 8 +; SI-NEXT: s_lshr_b32 s22, s17, 24 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v12 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v21, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[0:1], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[18:19], 24 +; SI-NEXT: v_lshr_b64 v[16:17], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[20:21], 8 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v19 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v19 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v21 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v21 +; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 -; SI-NEXT: v_mov_b32_e32 v13, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v20, s16 +; SI-NEXT: v_mov_b32_e32 v21, s17 +; SI-NEXT: v_mov_b32_e32 v18, s18 +; SI-NEXT: v_mov_b32_e32 v19, s19 +; SI-NEXT: v_mov_b32_e32 v5, s25 +; SI-NEXT: v_mov_b32_e32 v6, s23 +; SI-NEXT: v_mov_b32_e32 v7, s22 +; SI-NEXT: v_mov_b32_e32 v13, s27 +; SI-NEXT: v_mov_b32_e32 v14, s26 +; SI-NEXT: v_mov_b32_e32 v15, s24 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v16, s12 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB49_5: ; %end +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v20 +; SI-NEXT: v_mov_b32_e32 v4, v21 +; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v10, v16 +; SI-NEXT: v_mov_b32_e32 v12, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v4f32_to_v16i8_scalar: @@ -9769,37 +9785,35 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 ; SI-NEXT: s_add_u32 s18, s18, 3 ; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s6 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v3, s11 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v7, s10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v2i64_to_v8i16_scalar: @@ -12106,65 +12120,67 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 ; SI-NEXT: s_add_u32 s18, s18, 3 ; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: .LBB69_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_mov_b32_e32 v3, s10 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s11 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v5, s27 +; SI-NEXT: v_mov_b32_e32 v6, s26 +; SI-NEXT: v_mov_b32_e32 v7, s25 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v10, s6 +; SI-NEXT: v_mov_b32_e32 v11, s4 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s8 -; SI-NEXT: v_mov_b32_e32 v14, s7 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v13, s24 +; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s22 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v2i64_to_v16i8_scalar: @@ -13498,34 +13514,34 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s11, s19, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_4 ; SI-NEXT: .LBB73_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[8:9], s[18:19], 1.0 ; SI-NEXT: v_add_f64 v[10:11], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v5, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 16 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v11 ; SI-NEXT: s_branch .LBB73_5 ; SI-NEXT: .LBB73_3: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB73_2 ; SI-NEXT: .LBB73_4: -; SI-NEXT: v_mov_b32_e32 v11, s17 ; SI-NEXT: v_mov_b32_e32 v9, s19 -; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v11, s17 ; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: .LBB73_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v10 ; SI-NEXT: v_mov_b32_e32 v2, v11 @@ -15789,67 +15805,73 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 8 -; SI-NEXT: s_lshr_b32 s8, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 8 +; SI-NEXT: s_lshr_b32 s27, s19, 24 +; SI-NEXT: s_lshr_b32 s26, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s19, 8 +; SI-NEXT: s_lshr_b32 s24, s17, 24 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b32 s22, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[16:17], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[18:19], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v11, v17, v16, 24 -; SI-NEXT: v_alignbit_b32 v10, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v9, v17, v16, 8 -; SI-NEXT: v_alignbit_b32 v3, v19, v18, 24 -; SI-NEXT: v_alignbit_b32 v2, v19, v18, 16 -; SI-NEXT: v_alignbit_b32 v1, v19, v18, 8 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v17 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v19 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v19 +; SI-NEXT: v_add_f64 v[20:21], s[16:17], 1.0 +; SI-NEXT: v_add_f64 v[18:19], s[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[0:1], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[18:19], 24 +; SI-NEXT: v_lshr_b64 v[16:17], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[20:21], 8 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v19 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v19 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v21 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v21 ; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v19, s17 -; SI-NEXT: v_mov_b32_e32 v17, s19 -; SI-NEXT: v_mov_b32_e32 v16, s18 -; SI-NEXT: v_mov_b32_e32 v18, s16 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v5, s6 -; SI-NEXT: v_mov_b32_e32 v15, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v13, s9 +; SI-NEXT: v_mov_b32_e32 v19, s19 +; SI-NEXT: v_mov_b32_e32 v21, s17 +; SI-NEXT: v_mov_b32_e32 v20, s16 +; SI-NEXT: v_mov_b32_e32 v18, s18 +; SI-NEXT: v_mov_b32_e32 v15, s27 +; SI-NEXT: v_mov_b32_e32 v14, s26 +; SI-NEXT: v_mov_b32_e32 v13, s25 +; SI-NEXT: v_mov_b32_e32 v7, s24 +; SI-NEXT: v_mov_b32_e32 v6, s23 +; SI-NEXT: v_mov_b32_e32 v5, s22 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v0, s12 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v16, s6 +; SI-NEXT: v_mov_b32_e32 v11, s4 ; SI-NEXT: .LBB85_5: ; %end -; SI-NEXT: v_mov_b32_e32 v0, v18 -; SI-NEXT: v_mov_b32_e32 v4, v19 -; SI-NEXT: v_mov_b32_e32 v8, v16 -; SI-NEXT: v_mov_b32_e32 v12, v17 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v20 +; SI-NEXT: v_mov_b32_e32 v4, v21 +; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v10, v16 +; SI-NEXT: v_mov_b32_e32 v12, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v2f64_to_v16i8_scalar: @@ -17515,11 +17537,11 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: s_cmp_lg_u32 s24, 0 @@ -17530,8 +17552,8 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 @@ -17545,10 +17567,10 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v7 @@ -17557,11 +17579,13 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v2, v2, v8 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[8:9], v[5:6], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v10 +; SI-NEXT: v_mov_b32_e32 v5, v8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -18405,60 +18429,62 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3 ; SI-NEXT: v_mul_f32_e64 v15, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v14, 1.0, s17 ; SI-NEXT: v_mul_f32_e64 v9, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v13, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v12, 1.0, s21 ; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v14 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v12 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v14 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v15 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v12 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v12 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v10 -; SI-NEXT: v_alignbit_b32 v4, v4, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v11 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v8 -; SI-NEXT: v_alignbit_b32 v6, v7, v2, 16 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_alignbit_b32 v6, v7, v3, 16 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[8:9], v[5:6], 16 +; SI-NEXT: v_alignbit_b32 v4, v12, v13, 16 ; SI-NEXT: .LBB95_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v10 +; SI-NEXT: v_mov_b32_e32 v5, v8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: s_branch .LBB95_2 @@ -19152,30 +19178,28 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: v_alignbit_b32 v3, s8, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s8, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s8, v0, 8 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_alignbit_b32 v11, s9, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v0, 8 -; SI-NEXT: s_lshr_b32 s10, s8, 8 -; SI-NEXT: s_lshr_b32 s13, s9, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 24 +; SI-NEXT: s_and_b32 s6, s20, 0xffff +; SI-NEXT: s_lshl_b32 s7, s21, 16 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xffff +; SI-NEXT: s_lshl_b32 s9, s23, 16 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s9, s5, 8 +; SI-NEXT: s_lshr_b32 s15, s7, 8 ; SI-NEXT: s_and_b32 s11, s19, 0xffff -; SI-NEXT: s_and_b32 s14, s23, 0xffff -; SI-NEXT: s_bfe_u32 s12, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s15, s23, 0x80008 +; SI-NEXT: s_and_b32 s25, s23, 0xffff +; SI-NEXT: s_bfe_u32 s13, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s27, s23, 0x80008 ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true ; SI-NEXT: s_add_i32 s20, s20, 3 @@ -19183,64 +19207,66 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in ; SI-NEXT: s_lshl_b32 s5, s21, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 +; SI-NEXT: s_add_i32 s6, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s22, 0xffff ; SI-NEXT: s_lshl_b32 s5, s23, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 +; SI-NEXT: s_add_i32 s7, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s8, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s8, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s8, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_alignbit_b32 v11, s9, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v0, 8 -; SI-NEXT: s_lshr_b32 s12, s8, 24 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s10, s8, 8 -; SI-NEXT: s_lshr_b32 s15, s9, 24 -; SI-NEXT: s_lshr_b32 s14, s9, 16 -; SI-NEXT: s_lshr_b32 s13, s9, 8 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s8, s19, 16 +; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s13, s5, 24 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: s_lshr_b32 s9, s5, 8 +; SI-NEXT: s_lshr_b32 s27, s7, 24 +; SI-NEXT: s_lshr_b32 s25, s7, 16 +; SI-NEXT: s_lshr_b32 s15, s7, 8 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v4, s8 -; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v4, s5 +; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s11 -; SI-NEXT: v_mov_b32_e32 v7, s12 -; SI-NEXT: v_mov_b32_e32 v8, s7 -; SI-NEXT: v_mov_b32_e32 v12, s9 -; SI-NEXT: v_mov_b32_e32 v13, s13 -; SI-NEXT: v_mov_b32_e32 v14, s14 -; SI-NEXT: v_mov_b32_e32 v15, s15 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v8, s6 +; SI-NEXT: v_mov_b32_e32 v9, s26 +; SI-NEXT: v_mov_b32_e32 v10, s24 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v12, s7 +; SI-NEXT: v_mov_b32_e32 v13, s15 +; SI-NEXT: v_mov_b32_e32 v14, s25 +; SI-NEXT: v_mov_b32_e32 v15, s27 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v8i16_to_v16i8_scalar: @@ -20067,53 +20093,53 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; SI-NEXT: v_readfirstlane_b32 s6, v1 +; SI-NEXT: v_readfirstlane_b32 s14, v1 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s8, v0 +; SI-NEXT: v_readfirstlane_b32 s15, v0 ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s7, s19, 24 -; SI-NEXT: s_or_b32 s4, s7, s4 -; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_or_b32 s40, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s40 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s42, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_or_b32 s41, s5, s7 +; SI-NEXT: s_lshr_b64 s[8:9], s[40:41], 16 +; SI-NEXT: s_and_b32 s5, s28, 0xff ; SI-NEXT: s_lshl_b32 s9, s29, 8 -; SI-NEXT: s_or_b32 s7, s7, s9 -; SI-NEXT: s_and_b32 s9, s8, 0xff -; SI-NEXT: s_lshl_b32 s9, s9, 16 -; SI-NEXT: s_lshl_b32 s11, s6, 24 -; SI-NEXT: s_or_b32 s13, s11, s9 -; SI-NEXT: s_and_b32 s9, s26, 0xff +; SI-NEXT: s_or_b32 s5, s5, s9 +; SI-NEXT: s_and_b32 s9, s15, 0xff ; SI-NEXT: s_lshl_b32 s9, s9, 16 -; SI-NEXT: s_lshl_b32 s11, s27, 24 -; SI-NEXT: s_or_b32 s9, s11, s9 -; SI-NEXT: s_and_b32 s11, s16, 0xff -; SI-NEXT: s_lshl_b32 s12, s17, 8 -; SI-NEXT: s_or_b32 s11, s11, s12 -; SI-NEXT: s_and_b32 s11, s11, 0xffff -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_or_b32 s11, s11, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s12, s25, 8 -; SI-NEXT: s_and_b32 s7, s7, 0xffff -; SI-NEXT: s_or_b32 s4, s4, s12 -; SI-NEXT: v_alignbit_b32 v1, s10, v0, 16 -; SI-NEXT: s_or_b32 s7, s7, s13 -; SI-NEXT: v_mov_b32_e32 v0, s9 +; SI-NEXT: s_lshl_b32 s10, s14, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s12, s10, s9 +; SI-NEXT: s_or_b32 s43, s5, s12 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v5, s7, v0, 16 -; SI-NEXT: s_or_b32 s9, s4, s9 -; SI-NEXT: s_lshr_b32 s12, s5, 16 -; SI-NEXT: s_lshr_b32 s13, s13, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[42:43], 16 +; SI-NEXT: s_or_b32 s4, s4, s42 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s12, 16 +; SI-NEXT: s_mov_b32 s7, s41 +; SI-NEXT: s_mov_b32 s5, s43 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true ; SI-NEXT: s_add_i32 s24, s24, 3 @@ -20121,76 +20147,74 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in ; SI-NEXT: s_lshl_b32 s5, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s7, s26, 0xff +; SI-NEXT: s_and_b32 s6, s26, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s7, s7, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s7 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s8, s8, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_lshl_b32 s5, s6, 24 -; SI-NEXT: s_and_b32 s6, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s15, 0xff +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s14, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s10, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s11 -; SI-NEXT: v_alignbit_b32 v1, s10, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s9 -; SI-NEXT: v_alignbit_b32 v5, s7, v0, 16 -; SI-NEXT: s_lshr_b32 s12, s10, 16 -; SI-NEXT: s_lshr_b32 s13, s7, 16 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 ; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s11 -; SI-NEXT: v_mov_b32_e32 v2, s10 -; SI-NEXT: v_mov_b32_e32 v3, s12 -; SI-NEXT: v_mov_b32_e32 v4, s9 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB99_2 ; ; VI-LABEL: bitcast_v16i8_to_v8i16_scalar: @@ -22076,41 +22100,41 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i ; SI-LABEL: bitcast_v8f16_to_v16i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v18, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v14, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s22 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v8, v20, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; SI-NEXT: v_or_b32_e32 v0, v17, v0 -; SI-NEXT: v_or_b32_e32 v4, v16, v1 -; SI-NEXT: v_or_b32_e32 v12, v19, v5 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23 +; SI-NEXT: v_or_b32_e32 v19, v16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: v_or_b32_e32 v20, v8, v0 +; SI-NEXT: v_lshr_b64 v[0:1], v[19:20], 16 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v26 +; SI-NEXT: v_or_b32_e32 v17, v25, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v14 +; SI-NEXT: v_or_b32_e32 v18, v24, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v18 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[17:18], 24 +; SI-NEXT: v_lshr_b64 v[21:22], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[17:18], 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 @@ -22120,13 +22144,13 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v8, v1, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v18 +; SI-NEXT: v_or_b32_e32 v17, v1, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v14 -; SI-NEXT: v_or_b32_e32 v12, v2, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v17 +; SI-NEXT: v_or_b32_e32 v18, v2, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 @@ -22136,34 +22160,38 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 +; SI-NEXT: v_or_b32_e32 v19, v1, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: v_or_b32_e32 v20, v2, v0 +; SI-NEXT: v_lshr_b64 v[0:1], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[17:18], 24 +; SI-NEXT: v_lshr_b64 v[21:22], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[17:18], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v18 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 ; SI-NEXT: .LBB105_3: ; %end +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v19 +; SI-NEXT: v_mov_b32_e32 v4, v20 +; SI-NEXT: v_mov_b32_e32 v8, v17 +; SI-NEXT: v_mov_b32_e32 v10, v21 +; SI-NEXT: v_mov_b32_e32 v12, v18 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr0 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: s_branch .LBB105_2 @@ -24073,89 +24101,94 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s24, 0 -; SI-NEXT: v_mul_f32_e64 v18, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v19, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v22, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v23, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v20, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v21, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v25, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v24, 1.0, s22 ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v20 -; SI-NEXT: v_alignbit_b32 v0, v0, v19, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v17, 16 -; SI-NEXT: v_alignbit_b32 v8, v5, v23, 16 -; SI-NEXT: v_alignbit_b32 v12, v14, v21, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v16 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v19, v1, v16, 16 +; SI-NEXT: v_alignbit_b32 v20, v6, v8, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_alignbit_b32 v21, v2, v26, 16 +; SI-NEXT: v_alignbit_b32 v22, v14, v24, 16 +; SI-NEXT: v_lshr_b64 v[4:5], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[10:11], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v23 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v22 +; SI-NEXT: v_lshr_b64 v[17:18], v[21:22], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[21:22], 8 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v22 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v26 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_alignbit_b32 v21, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v8, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v21 -; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v15 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v18 -; SI-NEXT: v_alignbit_b32 v12, v14, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v19 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; SI-NEXT: v_alignbit_b32 v22, v14, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; SI-NEXT: v_alignbit_b32 v19, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v16 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v20, v6, v1, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_lshr_b64 v[10:11], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[4:5], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshr_b64 v[17:18], v[21:22], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[21:22], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v22 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 ; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v15 ; SI-NEXT: .LBB109_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v19 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v20 +; SI-NEXT: v_mov_b32_e32 v8, v21 +; SI-NEXT: v_mov_b32_e32 v9, v11 +; SI-NEXT: v_mov_b32_e32 v11, v17 +; SI-NEXT: v_mov_b32_e32 v12, v22 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v8bf16_to_v16i8_scalar: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll index c87d52c1e6907..ee209f84efe7c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll @@ -520,44 +520,41 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s21, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s19, 16 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_mov_b32_e32 v0, s18 ; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s12, s19, 16 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v3, s13 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v7, s12 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v5i32_to_v10i16_scalar: @@ -1731,42 +1728,47 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s21, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v8, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v14, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v13, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v12, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v11, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[5:6], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[8:9], 16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v14 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: v_mov_b32_e32 v14, s17 +; SI-NEXT: v_mov_b32_e32 v11, s18 +; SI-NEXT: v_mov_b32_e32 v12, s19 ; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v5, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v13 +; SI-NEXT: v_mov_b32_e32 v2, v14 +; SI-NEXT: v_mov_b32_e32 v4, v11 +; SI-NEXT: v_mov_b32_e32 v6, v12 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v5f32_to_v10i16_scalar: @@ -3319,11 +3321,11 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 @@ -3333,49 +3335,51 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB23_3 ; SI-NEXT: .LBB23_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v9 -; SI-NEXT: v_or_b32_e32 v8, v8, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_or_b32_e32 v6, v6, v10 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v2, v2, v10 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v9 +; SI-NEXT: v_lshr_b64 v[11:12], v[5:6], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 +; SI-NEXT: v_or_b32_e32 v8, v8, v13 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v10 +; SI-NEXT: v_mov_b32_e32 v5, v11 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll index c3ace0ac5af71..57eae8600dc4a 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll @@ -1360,50 +1360,47 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v3, s14 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s6 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v7, s13 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s6 +; SI-NEXT: v_mov_b32_e32 v11, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v6i32_to_v12i16_scalar: @@ -3505,48 +3502,55 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB29_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_4 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v17, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v14, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v13, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v12, s20, 1.0 +; SI-NEXT: v_lshr_b64 v[9:10], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v17 +; SI-NEXT: s_branch .LBB29_5 ; SI-NEXT: .LBB29_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB29_2 ; SI-NEXT: .LBB29_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: v_mov_b32_e32 v17, s17 +; SI-NEXT: v_mov_b32_e32 v14, s18 +; SI-NEXT: v_mov_b32_e32 v15, s19 +; SI-NEXT: v_mov_b32_e32 v12, s20 +; SI-NEXT: v_mov_b32_e32 v13, s21 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v5, s6 +; SI-NEXT: v_mov_b32_e32 v9, s4 +; SI-NEXT: .LBB29_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v16 +; SI-NEXT: v_mov_b32_e32 v2, v17 +; SI-NEXT: v_mov_b32_e32 v4, v14 +; SI-NEXT: v_mov_b32_e32 v6, v15 +; SI-NEXT: v_mov_b32_e32 v8, v12 +; SI-NEXT: v_mov_b32_e32 v10, v13 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v6f32_to_v12i16_scalar: @@ -5249,50 +5253,47 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 ; SI-NEXT: s_add_u32 s20, s20, 3 ; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v3, s14 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s6 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v7, s13 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s6 +; SI-NEXT: v_mov_b32_e32 v11, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v3i64_to_v12i16_scalar: @@ -6578,45 +6579,45 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[16:17], s[16:17], 1.0 ; SI-NEXT: v_add_f64 v[12:13], s[20:21], 1.0 ; SI-NEXT: v_add_f64 v[14:15], s[18:19], 1.0 -; SI-NEXT: v_alignbit_b32 v9, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v5, v15, v14, 16 -; SI-NEXT: v_alignbit_b32 v1, v17, v16, 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[16:17], 16 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v17 ; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v17, s17 ; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_mov_b32_e32 v15, s19 ; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v13, s21 ; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v17, s17 +; SI-NEXT: v_mov_b32_e32 v15, s19 +; SI-NEXT: v_mov_b32_e32 v13, s21 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v5, s6 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: .LBB49_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v16 ; SI-NEXT: v_mov_b32_e32 v2, v17 @@ -8296,15 +8297,15 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: s_cmp_lg_u32 s28, 0 @@ -8317,53 +8318,56 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v12 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v10, v10, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v7 ; SI-NEXT: v_or_b32_e32 v6, v6, v12 ; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v2, v2, v12 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v2, v2, v12 +; SI-NEXT: v_lshr_b64 v[14:15], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[15:16], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[12:13], v[9:10], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 ; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v14 +; SI-NEXT: v_mov_b32_e32 v5, v15 +; SI-NEXT: v_mov_b32_e32 v9, v12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: ; SI-NEXT: s_branch .LBB59_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll index c830d6b344b6f..7d0897bb2151b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll @@ -585,57 +585,53 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s23, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s10 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v3, s23 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s8 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v7, s15 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s6 +; SI-NEXT: v_mov_b32_e32 v11, s14 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr23 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v7i32_to_v14i16_scalar: @@ -2048,54 +2044,61 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s23, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v13, s4, v12, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v20, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v17, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s20, 1.0 +; SI-NEXT: v_lshr_b64 v[9:10], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[12:13], 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v20 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 +; SI-NEXT: v_mov_b32_e32 v19, s16 +; SI-NEXT: v_mov_b32_e32 v20, s17 +; SI-NEXT: v_mov_b32_e32 v17, s18 +; SI-NEXT: v_mov_b32_e32 v18, s19 +; SI-NEXT: v_mov_b32_e32 v15, s20 +; SI-NEXT: v_mov_b32_e32 v16, s21 ; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v3, s14 +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v11, s23 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v9, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v19 +; SI-NEXT: v_mov_b32_e32 v2, v20 +; SI-NEXT: v_mov_b32_e32 v4, v17 +; SI-NEXT: v_mov_b32_e32 v6, v18 +; SI-NEXT: v_mov_b32_e32 v8, v15 +; SI-NEXT: v_mov_b32_e32 v10, v16 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v7f32_to_v14i16_scalar: @@ -3965,22 +3968,21 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i ; SI-LABEL: bitcast_v14f16_to_v14i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v14, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 ; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB23_4 ; SI-NEXT: ; %bb.1: ; %cmp.false @@ -3993,17 +3995,14 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 @@ -4012,40 +4011,46 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v14 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_or_b32_e32 v10, v10, v14 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_or_b32_e32 v6, v6, v14 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_or_b32_e32 v2, v2, v14 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshr_b64 v[14:15], v[1:2], 16 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v2, v2, v14 +; SI-NEXT: v_lshr_b64 v[15:16], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[16:17], v[9:10], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v14 +; SI-NEXT: v_mov_b32_e32 v5, v15 +; SI-NEXT: v_mov_b32_e32 v9, v16 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll index 52e125d0d658f..cb4b3bd4382a4 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll @@ -1514,63 +1514,59 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s10 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v3, s25 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s8 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v7, s24 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s7 +; SI-NEXT: v_mov_b32_e32 v11, s15 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v15, s14 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v8i32_to_v16i16_scalar: @@ -5255,119 +5251,123 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 ; SI-NEXT: .LBB25_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v2, s42 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s25 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v5, s75 +; SI-NEXT: v_mov_b32_e32 v6, s74 +; SI-NEXT: v_mov_b32_e32 v7, s73 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v10, s26 +; SI-NEXT: v_mov_b32_e32 v11, s24 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s14 -; SI-NEXT: v_mov_b32_e32 v14, s13 -; SI-NEXT: v_mov_b32_e32 v15, s11 +; SI-NEXT: v_mov_b32_e32 v13, s72 +; SI-NEXT: v_mov_b32_e32 v14, s63 +; SI-NEXT: v_mov_b32_e32 v15, s62 ; SI-NEXT: v_mov_b32_e32 v16, s20 +; SI-NEXT: v_mov_b32_e32 v17, s14 +; SI-NEXT: v_mov_b32_e32 v18, s12 +; SI-NEXT: v_mov_b32_e32 v19, s10 ; SI-NEXT: v_mov_b32_e32 v20, s21 -; SI-NEXT: v_mov_b32_e32 v21, s12 -; SI-NEXT: v_mov_b32_e32 v22, s10 -; SI-NEXT: v_mov_b32_e32 v23, s8 +; SI-NEXT: v_mov_b32_e32 v21, s61 +; SI-NEXT: v_mov_b32_e32 v22, s60 +; SI-NEXT: v_mov_b32_e32 v23, s59 ; SI-NEXT: v_mov_b32_e32 v24, s22 +; SI-NEXT: v_mov_b32_e32 v25, s8 +; SI-NEXT: v_mov_b32_e32 v26, s6 +; SI-NEXT: v_mov_b32_e32 v27, s4 ; SI-NEXT: v_mov_b32_e32 v28, s23 -; SI-NEXT: v_mov_b32_e32 v29, s9 -; SI-NEXT: v_mov_b32_e32 v30, s7 -; SI-NEXT: v_mov_b32_e32 v31, s6 +; SI-NEXT: v_mov_b32_e32 v29, s58 +; SI-NEXT: v_mov_b32_e32 v30, s57 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v8i32_to_v32i8_scalar: @@ -8503,60 +8503,69 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b32 s24, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_4 ; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v23, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v21, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v17, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s22, 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[22:23], 16 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v23 +; SI-NEXT: s_branch .LBB37_5 ; SI-NEXT: .LBB37_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: s_branch .LBB37_2 ; SI-NEXT: .LBB37_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v22, s16 +; SI-NEXT: v_mov_b32_e32 v23, s17 +; SI-NEXT: v_mov_b32_e32 v20, s18 +; SI-NEXT: v_mov_b32_e32 v21, s19 +; SI-NEXT: v_mov_b32_e32 v18, s20 +; SI-NEXT: v_mov_b32_e32 v19, s21 +; SI-NEXT: v_mov_b32_e32 v16, s22 +; SI-NEXT: v_mov_b32_e32 v17, s23 +; SI-NEXT: v_mov_b32_e32 v3, s14 +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v15, s25 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: .LBB37_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v22 +; SI-NEXT: v_mov_b32_e32 v2, v23 +; SI-NEXT: v_mov_b32_e32 v4, v20 +; SI-NEXT: v_mov_b32_e32 v6, v21 +; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v10, v19 +; SI-NEXT: v_mov_b32_e32 v12, v16 +; SI-NEXT: v_mov_b32_e32 v14, v17 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v8f32_to_v16i16_scalar: @@ -12246,116 +12255,137 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s15, s23, 24 -; SI-NEXT: s_lshr_b32 s24, s23, 16 -; SI-NEXT: s_lshr_b32 s25, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s9, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s23, 24 +; SI-NEXT: s_lshr_b32 s74, s23, 16 +; SI-NEXT: s_lshr_b32 s75, s23, 8 +; SI-NEXT: s_lshr_b32 s61, s21, 24 +; SI-NEXT: s_lshr_b32 s63, s21, 16 +; SI-NEXT: s_lshr_b32 s73, s21, 8 +; SI-NEXT: s_lshr_b32 s58, s19, 24 +; SI-NEXT: s_lshr_b32 s60, s19, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 8 +; SI-NEXT: s_lshr_b32 s56, s17, 24 +; SI-NEXT: s_lshr_b32 s57, s17, 16 +; SI-NEXT: s_lshr_b32 s59, s17, 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v28, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s22, 1.0 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v12 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v39, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v38, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v49, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v48, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v35, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v34, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v37, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v36, s20, 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[24:25], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[0:1], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[48:49], 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[36:37], 24 +; SI-NEXT: v_lshr_b64 v[32:33], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[36:37], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[34:35], 24 +; SI-NEXT: v_lshr_b64 v[28:29], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[34:35], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[38:39], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[38:39], 8 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v49 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v37 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v37 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v35 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v35 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v39 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v39 +; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v16, s20 -; SI-NEXT: v_mov_b32_e32 v20, s21 -; SI-NEXT: v_mov_b32_e32 v24, s22 -; SI-NEXT: v_mov_b32_e32 v28, s23 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 -; SI-NEXT: v_mov_b32_e32 v13, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v21, s14 -; SI-NEXT: v_mov_b32_e32 v22, s13 -; SI-NEXT: v_mov_b32_e32 v23, s12 -; SI-NEXT: v_mov_b32_e32 v29, s25 -; SI-NEXT: v_mov_b32_e32 v30, s24 -; SI-NEXT: v_mov_b32_e32 v31, s15 +; SI-NEXT: v_mov_b32_e32 v38, s16 +; SI-NEXT: v_mov_b32_e32 v39, s17 +; SI-NEXT: v_mov_b32_e32 v34, s18 +; SI-NEXT: v_mov_b32_e32 v35, s19 +; SI-NEXT: v_mov_b32_e32 v36, s20 +; SI-NEXT: v_mov_b32_e32 v37, s21 +; SI-NEXT: v_mov_b32_e32 v48, s22 +; SI-NEXT: v_mov_b32_e32 v49, s23 +; SI-NEXT: v_mov_b32_e32 v5, s59 +; SI-NEXT: v_mov_b32_e32 v6, s57 +; SI-NEXT: v_mov_b32_e32 v7, s56 +; SI-NEXT: v_mov_b32_e32 v13, s62 +; SI-NEXT: v_mov_b32_e32 v14, s60 +; SI-NEXT: v_mov_b32_e32 v15, s58 +; SI-NEXT: v_mov_b32_e32 v21, s73 +; SI-NEXT: v_mov_b32_e32 v22, s63 +; SI-NEXT: v_mov_b32_e32 v23, s61 +; SI-NEXT: v_mov_b32_e32 v29, s75 +; SI-NEXT: v_mov_b32_e32 v30, s74 +; SI-NEXT: v_mov_b32_e32 v31, s72 +; SI-NEXT: v_mov_b32_e32 v27, s40 +; SI-NEXT: v_mov_b32_e32 v24, s42 +; SI-NEXT: v_mov_b32_e32 v25, s44 +; SI-NEXT: v_mov_b32_e32 v19, s24 +; SI-NEXT: v_mov_b32_e32 v32, s26 +; SI-NEXT: v_mov_b32_e32 v17, s28 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v28, s12 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB49_5: ; %end +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v38 +; SI-NEXT: v_mov_b32_e32 v4, v39 +; SI-NEXT: v_mov_b32_e32 v10, v28 +; SI-NEXT: v_mov_b32_e32 v8, v34 +; SI-NEXT: v_mov_b32_e32 v12, v35 +; SI-NEXT: v_mov_b32_e32 v18, v32 +; SI-NEXT: v_mov_b32_e32 v16, v36 +; SI-NEXT: v_mov_b32_e32 v20, v37 +; SI-NEXT: v_mov_b32_e32 v26, v24 +; SI-NEXT: v_mov_b32_e32 v24, v48 +; SI-NEXT: v_mov_b32_e32 v28, v49 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v8f32_to_v32i8_scalar: @@ -15064,63 +15094,59 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 ; SI-NEXT: s_add_u32 s22, s22, 3 ; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s10 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v3, s25 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s8 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v7, s24 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s7 +; SI-NEXT: v_mov_b32_e32 v11, s15 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v15, s14 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v4i64_to_v16i16_scalar: @@ -18815,119 +18841,123 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 ; SI-NEXT: s_add_u32 s22, s22, 3 ; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: .LBB69_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v2, s42 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s25 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v5, s75 +; SI-NEXT: v_mov_b32_e32 v6, s74 +; SI-NEXT: v_mov_b32_e32 v7, s73 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v10, s26 +; SI-NEXT: v_mov_b32_e32 v11, s24 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s14 -; SI-NEXT: v_mov_b32_e32 v14, s13 -; SI-NEXT: v_mov_b32_e32 v15, s11 +; SI-NEXT: v_mov_b32_e32 v13, s72 +; SI-NEXT: v_mov_b32_e32 v14, s63 +; SI-NEXT: v_mov_b32_e32 v15, s62 ; SI-NEXT: v_mov_b32_e32 v16, s20 +; SI-NEXT: v_mov_b32_e32 v17, s14 +; SI-NEXT: v_mov_b32_e32 v18, s12 +; SI-NEXT: v_mov_b32_e32 v19, s10 ; SI-NEXT: v_mov_b32_e32 v20, s21 -; SI-NEXT: v_mov_b32_e32 v21, s12 -; SI-NEXT: v_mov_b32_e32 v22, s10 -; SI-NEXT: v_mov_b32_e32 v23, s8 +; SI-NEXT: v_mov_b32_e32 v21, s61 +; SI-NEXT: v_mov_b32_e32 v22, s60 +; SI-NEXT: v_mov_b32_e32 v23, s59 ; SI-NEXT: v_mov_b32_e32 v24, s22 +; SI-NEXT: v_mov_b32_e32 v25, s8 +; SI-NEXT: v_mov_b32_e32 v26, s6 +; SI-NEXT: v_mov_b32_e32 v27, s4 ; SI-NEXT: v_mov_b32_e32 v28, s23 -; SI-NEXT: v_mov_b32_e32 v29, s9 -; SI-NEXT: v_mov_b32_e32 v30, s7 -; SI-NEXT: v_mov_b32_e32 v31, s6 +; SI-NEXT: v_mov_b32_e32 v29, s58 +; SI-NEXT: v_mov_b32_e32 v30, s57 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v4i64_to_v32i8_scalar: @@ -21155,56 +21185,56 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b32 s24, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_4 ; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[22:23], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[20:21], s[18:19], 1.0 ; SI-NEXT: v_add_f64 v[16:17], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[18:19], s[20:21], 1.0 -; SI-NEXT: v_alignbit_b32 v13, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v9, v19, v18, 16 -; SI-NEXT: v_alignbit_b32 v5, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v1, v23, v22, 16 +; SI-NEXT: v_add_f64 v[20:21], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[22:23], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[22:23], 16 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v17 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v23 ; SI-NEXT: s_branch .LBB73_5 ; SI-NEXT: .LBB73_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: s_branch .LBB73_2 ; SI-NEXT: .LBB73_4: -; SI-NEXT: v_mov_b32_e32 v23, s17 -; SI-NEXT: v_mov_b32_e32 v21, s19 -; SI-NEXT: v_mov_b32_e32 v19, s21 ; SI-NEXT: v_mov_b32_e32 v17, s23 -; SI-NEXT: v_mov_b32_e32 v16, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v20, s18 +; SI-NEXT: v_mov_b32_e32 v19, s21 +; SI-NEXT: v_mov_b32_e32 v21, s19 +; SI-NEXT: v_mov_b32_e32 v23, s17 ; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v20, s18 +; SI-NEXT: v_mov_b32_e32 v18, s20 +; SI-NEXT: v_mov_b32_e32 v16, s22 +; SI-NEXT: v_mov_b32_e32 v15, s25 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v3, s14 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: .LBB73_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v22 ; SI-NEXT: v_mov_b32_e32 v2, v23 @@ -24819,121 +24849,133 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v32, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v33, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v34, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v35, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s25, s23, 24 -; SI-NEXT: s_lshr_b32 s24, s23, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 8 -; SI-NEXT: s_lshr_b32 s14, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 8 -; SI-NEXT: s_lshr_b32 s8, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s74, s23, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 8 +; SI-NEXT: s_lshr_b32 s72, s21, 24 +; SI-NEXT: s_lshr_b32 s63, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s21, 8 +; SI-NEXT: s_lshr_b32 s61, s19, 24 +; SI-NEXT: s_lshr_b32 s60, s19, 16 +; SI-NEXT: s_lshr_b32 s59, s19, 8 +; SI-NEXT: s_lshr_b32 s58, s17, 24 +; SI-NEXT: s_lshr_b32 s57, s17, 16 +; SI-NEXT: s_lshr_b32 s56, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[24:25], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[20:21], 1.0 -; SI-NEXT: v_alignbit_b32 v27, v25, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v32, v25, v24, 8 -; SI-NEXT: v_alignbit_b32 v19, v17, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v33, v17, v16, 8 -; SI-NEXT: v_alignbit_b32 v11, v9, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v34, v9, v8, 8 -; SI-NEXT: v_alignbit_b32 v3, v1, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v1, v0, 16 -; SI-NEXT: v_alignbit_b32 v35, v1, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v25 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v17 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; SI-NEXT: v_add_f64 v[50:51], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[37:38], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[48:49], s[16:17], 1.0 +; SI-NEXT: v_add_f64 v[35:36], s[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[0:1], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[50:51], 24 +; SI-NEXT: v_lshr_b64 v[25:26], v[50:51], 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[33:34], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v51 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v51 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v38 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v36 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v49 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 ; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v9, s19 -; SI-NEXT: v_mov_b32_e32 v17, s21 -; SI-NEXT: v_mov_b32_e32 v25, s23 -; SI-NEXT: v_mov_b32_e32 v24, s22 -; SI-NEXT: v_mov_b32_e32 v16, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v5, s6 -; SI-NEXT: v_mov_b32_e32 v15, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v13, s9 -; SI-NEXT: v_mov_b32_e32 v23, s14 -; SI-NEXT: v_mov_b32_e32 v22, s13 -; SI-NEXT: v_mov_b32_e32 v21, s12 -; SI-NEXT: v_mov_b32_e32 v31, s25 -; SI-NEXT: v_mov_b32_e32 v30, s24 -; SI-NEXT: v_mov_b32_e32 v29, s15 +; SI-NEXT: v_mov_b32_e32 v51, s23 +; SI-NEXT: v_mov_b32_e32 v38, s21 +; SI-NEXT: v_mov_b32_e32 v36, s19 +; SI-NEXT: v_mov_b32_e32 v49, s17 +; SI-NEXT: v_mov_b32_e32 v48, s16 +; SI-NEXT: v_mov_b32_e32 v35, s18 +; SI-NEXT: v_mov_b32_e32 v37, s20 +; SI-NEXT: v_mov_b32_e32 v50, s22 +; SI-NEXT: v_mov_b32_e32 v31, s75 +; SI-NEXT: v_mov_b32_e32 v30, s74 +; SI-NEXT: v_mov_b32_e32 v29, s73 +; SI-NEXT: v_mov_b32_e32 v23, s72 +; SI-NEXT: v_mov_b32_e32 v22, s63 +; SI-NEXT: v_mov_b32_e32 v21, s62 +; SI-NEXT: v_mov_b32_e32 v15, s61 +; SI-NEXT: v_mov_b32_e32 v14, s60 +; SI-NEXT: v_mov_b32_e32 v13, s59 +; SI-NEXT: v_mov_b32_e32 v7, s58 +; SI-NEXT: v_mov_b32_e32 v6, s57 +; SI-NEXT: v_mov_b32_e32 v5, s56 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v0, s42 +; SI-NEXT: v_mov_b32_e32 v3, s40 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v33, s26 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v17, s14 +; SI-NEXT: v_mov_b32_e32 v32, s12 +; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v25, s8 +; SI-NEXT: v_mov_b32_e32 v24, s6 +; SI-NEXT: v_mov_b32_e32 v27, s4 ; SI-NEXT: .LBB85_5: ; %end -; SI-NEXT: v_mov_b32_e32 v4, v1 -; SI-NEXT: v_mov_b32_e32 v12, v9 -; SI-NEXT: v_mov_b32_e32 v20, v17 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v1, v35 -; SI-NEXT: v_mov_b32_e32 v9, v34 -; SI-NEXT: v_mov_b32_e32 v17, v33 -; SI-NEXT: v_mov_b32_e32 v25, v32 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v4, v49 +; SI-NEXT: v_mov_b32_e32 v10, v33 +; SI-NEXT: v_mov_b32_e32 v8, v35 +; SI-NEXT: v_mov_b32_e32 v12, v36 +; SI-NEXT: v_mov_b32_e32 v18, v32 +; SI-NEXT: v_mov_b32_e32 v16, v37 +; SI-NEXT: v_mov_b32_e32 v20, v38 +; SI-NEXT: v_mov_b32_e32 v26, v24 +; SI-NEXT: v_mov_b32_e32 v24, v50 +; SI-NEXT: v_mov_b32_e32 v28, v51 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v4f64_to_v32i8_scalar: @@ -27681,26 +27723,24 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i ; SI-LABEL: bitcast_v16f16_to_v16i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v14, v0 -; SI-NEXT: v_mov_b32_e32 v16, v2 +; SI-NEXT: v_mov_b32_e32 v5, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB91_4 ; SI-NEXT: ; %bb.1: ; %cmp.false @@ -27713,16 +27753,12 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v18 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 @@ -27730,51 +27766,59 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_or_b32_e32 v14, v14, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v11 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v10, v10, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v14, v14, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v7 ; SI-NEXT: v_or_b32_e32 v6, v6, v16 ; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v2, v2, v16 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v2, v2, v16 +; SI-NEXT: v_lshr_b64 v[18:19], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[16:17], v[13:14], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v18 +; SI-NEXT: v_mov_b32_e32 v5, v21 +; SI-NEXT: v_mov_b32_e32 v9, v19 +; SI-NEXT: v_mov_b32_e32 v13, v16 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -29151,115 +29195,119 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mul_f32_e64 v31, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v30, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v20, 1.0, s18 ; SI-NEXT: v_mul_f32_e64 v16, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v29, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v28, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v19, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v18, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v22, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 ; SI-NEXT: v_mul_f32_e64 v27, 1.0, s24 ; SI-NEXT: v_mul_f32_e64 v26, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v21, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v20, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s27 ; SI-NEXT: v_mul_f32_e64 v25, 1.0, s28 ; SI-NEXT: v_mul_f32_e64 v24, 1.0, s29 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v0 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v13 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v30 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v12 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v28 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v29 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v26 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v26 ; SI-NEXT: v_alignbit_b32 v4, v4, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v27 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_alignbit_b32 v8, v6, v2, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_alignbit_b32 v12, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v22 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v23 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_alignbit_b32 v8, v7, v2, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v7 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v20 -; SI-NEXT: v_alignbit_b32 v14, v15, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v21 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v23 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v6 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v18 -; SI-NEXT: v_alignbit_b32 v10, v11, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v5 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_alignbit_b32 v10, v11, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v16 -; SI-NEXT: v_alignbit_b32 v6, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v14, v15, v7, 16 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v16 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 +; SI-NEXT: v_alignbit_b32 v6, v7, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_alignbit_b32 v2, v3, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v12 +; SI-NEXT: v_lshr_b64 v[17:18], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[18:19], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[13:14], 16 +; SI-NEXT: v_alignbit_b32 v12, v24, v25, 16 ; SI-NEXT: .LBB95_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v17 +; SI-NEXT: v_mov_b32_e32 v5, v18 +; SI-NEXT: v_mov_b32_e32 v9, v21 +; SI-NEXT: v_mov_b32_e32 v13, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: s_branch .LBB95_2 ; ; VI-LABEL: bitcast_v16bf16_to_v16i16_scalar: @@ -30446,80 +30494,83 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32 ; SI-LABEL: bitcast_v16i16_to_v32i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v4, v1 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_readfirstlane_b32 s78, v1 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_readfirstlane_b32 s79, v0 ; SI-NEXT: s_cbranch_scc0 .LBB97_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s12, s4, s5 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: v_mov_b32_e32 v6, s6 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: v_alignbit_b32 v11, s9, v6, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v6, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v6, 8 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_alignbit_b32 v19, s10, v6, 24 -; SI-NEXT: v_alignbit_b32 v18, s10, v6, 16 -; SI-NEXT: v_alignbit_b32 v17, s10, v6, 8 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v0 -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_or_b32_e32 v28, v6, v5 -; SI-NEXT: v_alignbit_b32 v3, s12, v1, 24 -; SI-NEXT: v_alignbit_b32 v2, s12, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s12, v1, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, s8, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, s8, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, s8, 8 -; SI-NEXT: s_lshr_b32 s44, s12, 8 -; SI-NEXT: s_lshr_b32 s14, s9, 8 -; SI-NEXT: s_lshr_b32 s41, s10, 8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 -; SI-NEXT: s_and_b32 s45, s19, 0xffff -; SI-NEXT: s_and_b32 s15, s23, 0xffff -; SI-NEXT: s_and_b32 s42, s27, 0xffff -; SI-NEXT: v_and_b32_e32 v30, 0xffff, v4 -; SI-NEXT: s_bfe_u32 s13, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s40, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s43, s27, 0x80008 -; SI-NEXT: v_bfe_u32 v31, v4, 8, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s20, 0xffff +; SI-NEXT: s_lshl_b32 s7, s21, 16 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xffff +; SI-NEXT: s_lshl_b32 s8, s23, 16 +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_and_b32 s8, s24, 0xffff +; SI-NEXT: s_lshl_b32 s9, s25, 16 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s10, s27, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 24 +; SI-NEXT: s_or_b32 s9, s9, s10 +; SI-NEXT: s_and_b32 s10, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s29, 16 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s79, 0xffff +; SI-NEXT: s_lshl_b32 s13, s78, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: s_lshr_b32 s13, s5, 8 +; SI-NEXT: s_lshr_b32 s41, s7, 8 +; SI-NEXT: s_lshr_b32 s47, s9, 8 +; SI-NEXT: s_lshr_b32 s88, s11, 8 +; SI-NEXT: s_and_b32 s15, s19, 0xffff +; SI-NEXT: s_and_b32 s45, s23, 0xffff +; SI-NEXT: s_and_b32 s59, s27, 0xffff +; SI-NEXT: s_and_b32 s90, s78, 0xffff +; SI-NEXT: s_bfe_u32 s43, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s57, s23, 0x80008 +; SI-NEXT: s_bfe_u32 s89, s27, 0x80008 +; SI-NEXT: s_bfe_u32 s91, s78, 0x80008 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 8 ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true ; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_and_b32 s4, s28, 0xffff ; SI-NEXT: s_lshl_b32 s5, s29, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s79, s79, 3 +; SI-NEXT: s_add_i32 s10, s4, 0x30000 +; SI-NEXT: s_and_b32 s4, s79, 0xffff +; SI-NEXT: s_lshl_b32 s5, s78, 16 +; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 +; SI-NEXT: s_add_i32 s11, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s24, 0xffff ; SI-NEXT: s_lshl_b32 s5, s25, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 +; SI-NEXT: s_add_i32 s8, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s26, 0xffff ; SI-NEXT: s_lshl_b32 s5, s27, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 +; SI-NEXT: s_add_i32 s9, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s20, 0xffff ; SI-NEXT: s_lshl_b32 s5, s21, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 @@ -30529,99 +30580,103 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32 ; SI-NEXT: s_lshl_b32 s5, s23, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 +; SI-NEXT: s_add_i32 s7, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: v_or_b32_e32 v0, v5, v0 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_mov_b32_e32 v4, s6 -; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: v_alignbit_b32 v11, s9, v4, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v4, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v4, 8 -; SI-NEXT: v_mov_b32_e32 v4, s7 -; SI-NEXT: v_alignbit_b32 v3, s12, v1, 24 -; SI-NEXT: v_alignbit_b32 v2, s12, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s12, v1, 8 -; SI-NEXT: v_alignbit_b32 v19, s10, v4, 24 -; SI-NEXT: v_alignbit_b32 v18, s10, v4, 16 -; SI-NEXT: v_alignbit_b32 v17, s10, v4, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v0, 8 -; SI-NEXT: s_lshr_b32 s13, s12, 24 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s12, 8 -; SI-NEXT: s_lshr_b32 s40, s9, 24 -; SI-NEXT: s_lshr_b32 s15, s9, 16 -; SI-NEXT: s_lshr_b32 s14, s9, 8 -; SI-NEXT: s_lshr_b32 s43, s10, 24 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s10, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s12, s19, 16 +; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 8 +; SI-NEXT: s_lshr_b32 s43, s5, 24 +; SI-NEXT: s_lshr_b32 s15, s5, 16 +; SI-NEXT: s_lshr_b32 s13, s5, 8 +; SI-NEXT: s_lshr_b32 s57, s7, 24 +; SI-NEXT: s_lshr_b32 s45, s7, 16 +; SI-NEXT: s_lshr_b32 s41, s7, 8 +; SI-NEXT: s_lshr_b32 s89, s9, 24 +; SI-NEXT: s_lshr_b32 s59, s9, 16 +; SI-NEXT: s_lshr_b32 s47, s9, 8 +; SI-NEXT: s_lshr_b32 s91, s11, 24 +; SI-NEXT: s_lshr_b32 s90, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s11, 8 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s11 -; SI-NEXT: v_mov_b32_e32 v4, s12 -; SI-NEXT: v_mov_b32_e32 v5, s44 -; SI-NEXT: v_mov_b32_e32 v6, s45 -; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s40 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v4, s5 +; SI-NEXT: v_mov_b32_e32 v5, s13 +; SI-NEXT: v_mov_b32_e32 v6, s15 +; SI-NEXT: v_mov_b32_e32 v7, s43 ; SI-NEXT: v_mov_b32_e32 v8, s6 -; SI-NEXT: v_mov_b32_e32 v12, s9 -; SI-NEXT: v_mov_b32_e32 v13, s14 -; SI-NEXT: v_mov_b32_e32 v14, s15 -; SI-NEXT: v_mov_b32_e32 v15, s40 -; SI-NEXT: v_mov_b32_e32 v16, s7 -; SI-NEXT: v_mov_b32_e32 v20, s10 -; SI-NEXT: v_mov_b32_e32 v21, s41 -; SI-NEXT: v_mov_b32_e32 v22, s42 -; SI-NEXT: v_mov_b32_e32 v23, s43 -; SI-NEXT: v_mov_b32_e32 v24, s8 +; SI-NEXT: v_mov_b32_e32 v9, s46 +; SI-NEXT: v_mov_b32_e32 v10, s44 +; SI-NEXT: v_mov_b32_e32 v11, s42 +; SI-NEXT: v_mov_b32_e32 v12, s7 +; SI-NEXT: v_mov_b32_e32 v13, s41 +; SI-NEXT: v_mov_b32_e32 v14, s45 +; SI-NEXT: v_mov_b32_e32 v15, s57 +; SI-NEXT: v_mov_b32_e32 v16, s8 +; SI-NEXT: v_mov_b32_e32 v17, s60 +; SI-NEXT: v_mov_b32_e32 v18, s58 +; SI-NEXT: v_mov_b32_e32 v19, s56 +; SI-NEXT: v_mov_b32_e32 v20, s9 +; SI-NEXT: v_mov_b32_e32 v21, s47 +; SI-NEXT: v_mov_b32_e32 v22, s59 +; SI-NEXT: v_mov_b32_e32 v23, s89 +; SI-NEXT: v_mov_b32_e32 v24, s10 +; SI-NEXT: v_mov_b32_e32 v25, s74 +; SI-NEXT: v_mov_b32_e32 v26, s62 +; SI-NEXT: v_mov_b32_e32 v27, s72 +; SI-NEXT: v_mov_b32_e32 v28, s11 +; SI-NEXT: v_mov_b32_e32 v29, s88 +; SI-NEXT: v_mov_b32_e32 v30, s90 +; SI-NEXT: v_mov_b32_e32 v31, s91 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v16i16_to_v32i8_scalar: @@ -32019,234 +32074,229 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18 -; SI-NEXT: v_mov_b32_e32 v19, v14 -; SI-NEXT: v_mov_b32_e32 v20, v12 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s9, v3 -; SI-NEXT: v_readfirstlane_b32 s10, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: v_readfirstlane_b32 s6, v0 +; SI-NEXT: v_mov_b32_e32 v22, v14 +; SI-NEXT: v_mov_b32_e32 v21, v10 +; SI-NEXT: v_readfirstlane_b32 s43, v1 +; SI-NEXT: v_readfirstlane_b32 s42, v0 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v3 ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s8, s19, 24 -; SI-NEXT: s_or_b32 s4, s8, s4 -; SI-NEXT: s_and_b32 s8, s28, 0xff -; SI-NEXT: s_lshl_b32 s12, s29, 8 -; SI-NEXT: s_or_b32 s8, s8, s12 -; SI-NEXT: s_and_b32 s12, s6, 0xff -; SI-NEXT: s_lshl_b32 s12, s12, 16 -; SI-NEXT: s_lshl_b32 s15, s7, 24 -; SI-NEXT: s_or_b32 s41, s15, s12 -; SI-NEXT: s_and_b32 s12, s26, 0xff -; SI-NEXT: s_lshl_b32 s12, s12, 16 -; SI-NEXT: s_lshl_b32 s15, s27, 24 -; SI-NEXT: s_or_b32 s12, s15, s12 -; SI-NEXT: s_and_b32 s15, s16, 0xff -; SI-NEXT: s_lshl_b32 s40, s17, 8 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v8 -; SI-NEXT: s_or_b32 s15, s15, s40 -; SI-NEXT: v_or_b32_e32 v9, v9, v2 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_and_b32 s15, s15, 0xffff -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v0, v10 -; SI-NEXT: s_or_b32 s15, s15, s4 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s12 ; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s40, s25, 8 -; SI-NEXT: v_or_b32_e32 v10, v9, v11 +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s14, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_or_b32 s13, s5, s7 +; SI-NEXT: s_lshr_b64 s[8:9], s[12:13], 16 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v12 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v8 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s29, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v16 +; SI-NEXT: s_or_b32 s5, s5, s9 +; SI-NEXT: s_and_b32 s9, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v9, v23 +; SI-NEXT: v_or_b32_e32 v13, v24, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v15, v0, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s43, 24 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v9 ; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: s_or_b32 s4, s4, s40 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v5, s12 -; SI-NEXT: v_or_b32_e32 v12, v3, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v16 -; SI-NEXT: s_or_b32 s12, s4, s12 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s40, s9, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v21 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: s_or_b32 s4, s4, s40 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v15, v7, v13 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v14, v9, v15 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v20 -; SI-NEXT: v_or_b32_e32 v18, s4, v12 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s40, s13, 8 -; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: v_or_b32_e32 v10, v10, v1 +; SI-NEXT: v_or_b32_e32 v14, v14, v7 +; SI-NEXT: v_or_b32_e32 v26, v5, v17 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v21 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s12, s10, s9 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_or_b32 s4, s4, s40 -; SI-NEXT: s_or_b32 s8, s8, s41 -; SI-NEXT: v_or_b32_e32 v22, v17, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_or_b32_e32 v17, v17, v25 +; SI-NEXT: s_or_b32 s15, s5, s12 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s11, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, s8, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v22, 16 -; SI-NEXT: v_or_b32_e32 v12, s4, v22 -; SI-NEXT: s_lshr_b32 s40, s5, 16 -; SI-NEXT: s_lshr_b32 s41, s41, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_or_b32_e32 v9, v3, v9 +; SI-NEXT: v_or_b32_e32 v10, v10, v15 +; SI-NEXT: v_or_b32_e32 v14, v14, v26 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 16 +; SI-NEXT: s_or_b32 s4, s4, s14 +; SI-NEXT: v_or_b32_e32 v19, v11, v9 +; SI-NEXT: v_mov_b32_e32 v20, v10 +; SI-NEXT: v_lshr_b64 v[9:10], v[9:10], 16 +; SI-NEXT: v_or_b32_e32 v17, v17, v13 +; SI-NEXT: v_mov_b32_e32 v18, v14 +; SI-NEXT: v_lshr_b64 v[13:14], v[13:14], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s12, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v26 +; SI-NEXT: s_mov_b32 s7, s13 +; SI-NEXT: s_mov_b32 s5, s15 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true -; SI-NEXT: s_add_i32 s14, s14, 3 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s13, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v17, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v19 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16 -; SI-NEXT: v_or_b32_e32 v1, v21, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s10, s10, 3 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s9, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v4 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: s_and_b32 s4, s24, 0xff ; SI-NEXT: s_lshl_b32 s5, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_and_b32 s6, s26, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s7, 24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v21 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v12 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v25, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_or_b32_e32 v2, v23, v2 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s43, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x300, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v24, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v6 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v22 +; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v6 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v8 -; SI-NEXT: s_add_i32 s15, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x300, v7 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v9 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s15 -; SI-NEXT: v_alignbit_b32 v1, s11, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s12 -; SI-NEXT: v_alignbit_b32 v5, s8, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v18, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s11, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v5 +; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[17:18], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v18 ; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s15 -; SI-NEXT: v_mov_b32_e32 v2, s11 -; SI-NEXT: v_mov_b32_e32 v3, s40 -; SI-NEXT: v_mov_b32_e32 v4, s12 -; SI-NEXT: v_mov_b32_e32 v6, s8 -; SI-NEXT: v_mov_b32_e32 v7, s41 -; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v8, v19 +; SI-NEXT: v_mov_b32_e32 v10, v20 +; SI-NEXT: v_mov_b32_e32 v12, v17 +; SI-NEXT: v_mov_b32_e32 v14, v18 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: s_branch .LBB99_2 ; @@ -35384,116 +35434,121 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32 ; SI-LABEL: bitcast_v16f16_to_v32i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v34, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v33, s16 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v32, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v32, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v37, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v36, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v53, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v14, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v35, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v48, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v39, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s22 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_cvt_f16_f32_e32 v40, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v55, s24 ; SI-NEXT: v_cvt_f16_f32_e32 v22, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v38, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v51, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v50, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s26 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v42, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s28 ; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37 -; SI-NEXT: v_or_b32_e32 v8, v36, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; SI-NEXT: v_or_b32_e32 v12, v35, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 -; SI-NEXT: v_or_b32_e32 v16, v39, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v22 -; SI-NEXT: v_or_b32_e32 v20, v38, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v51 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_or_b32_e32 v48, v16, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v24, v50, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v30 -; SI-NEXT: v_or_b32_e32 v0, v33, v0 -; SI-NEXT: v_or_b32_e32 v4, v32, v1 -; SI-NEXT: v_or_b32_e32 v28, v49, v5 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_or_b32_e32 v49, v8, v1 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_or_b32_e32 v35, v52, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v14 +; SI-NEXT: v_or_b32_e32 v36, v39, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_or_b32_e32 v37, v55, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_or_b32_e32 v38, v54, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[35:36], 24 +; SI-NEXT: v_or_b32_e32 v33, v41, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[12:13], v[35:36], 16 +; SI-NEXT: v_or_b32_e32 v34, v0, v2 +; SI-NEXT: v_lshr_b64 v[24:25], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[35:36], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 ; SI-NEXT: v_bfe_u32 v23, v22, 8, 8 ; SI-NEXT: v_bfe_u32 v31, v30, 8, 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[50:51], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v39 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v2 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v49 -; SI-NEXT: v_or_b32_e32 v24, v1, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v33, v2, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v55 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v30 -; SI-NEXT: v_or_b32_e32 v28, v2, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v30 +; SI-NEXT: v_or_b32_e32 v34, v0, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v22 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v16, v3, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v37 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; SI-NEXT: v_or_b32_e32 v37, v3, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v53 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v22, v0 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v52 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 -; SI-NEXT: v_or_b32_e32 v20, v0, v2 +; SI-NEXT: v_or_b32_e32 v38, v0, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_or_b32_e32 v8, v2, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v34 +; SI-NEXT: v_or_b32_e32 v35, v2, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v32 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v14 -; SI-NEXT: v_or_b32_e32 v12, v1, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 +; SI-NEXT: v_or_b32_e32 v36, v1, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 @@ -35503,60 +35558,72 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_or_b32_e32 v48, v1, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: v_or_b32_e32 v49, v2, v0 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[24:25], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshr_b64 v[12:13], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[50:51], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 ; SI-NEXT: v_bfe_u32 v23, v22, 8, 8 ; SI-NEXT: v_bfe_u32 v31, v30, 8, 8 ; SI-NEXT: .LBB105_3: ; %end +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v49 +; SI-NEXT: v_mov_b32_e32 v8, v35 +; SI-NEXT: v_mov_b32_e32 v10, v12 +; SI-NEXT: v_mov_b32_e32 v12, v36 +; SI-NEXT: v_mov_b32_e32 v16, v37 +; SI-NEXT: v_mov_b32_e32 v18, v24 +; SI-NEXT: v_mov_b32_e32 v20, v38 +; SI-NEXT: v_mov_b32_e32 v24, v33 +; SI-NEXT: v_mov_b32_e32 v26, v50 +; SI-NEXT: v_mov_b32_e32 v28, v34 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: s_branch .LBB105_2 ; ; VI-LABEL: bitcast_v16f16_to_v32i8_scalar: @@ -38793,166 +38860,186 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e64 v34, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v32, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v38, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v39, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v37, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v50, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v51, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v48, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v49, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v54, 1.0, s29 -; SI-NEXT: v_mul_f32_e64 v55, 1.0, s28 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v0 +; SI-NEXT: v_mul_f32_e64 v24, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v32, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v51, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v52, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v39, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v50, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v55, 1.0, s25 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mul_f32_e64 v40, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v53, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v54, 1.0, s26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mul_f32_e64 v42, 1.0, s29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v43, 1.0, s28 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v38 -; SI-NEXT: v_alignbit_b32 v8, v5, v39, 16 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v50 -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v34 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v32 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v36 -; SI-NEXT: v_alignbit_b32 v16, v5, v51, 16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v48 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v54 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v52 -; SI-NEXT: v_alignbit_b32 v0, v0, v35, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v33, 16 -; SI-NEXT: v_alignbit_b32 v12, v14, v37, 16 -; SI-NEXT: v_alignbit_b32 v20, v22, v49, 16 -; SI-NEXT: v_alignbit_b32 v24, v5, v55, 16 -; SI-NEXT: v_alignbit_b32 v28, v30, v53, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v36 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v52 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8 +; SI-NEXT: v_alignbit_b32 v48, v1, v32, 16 +; SI-NEXT: v_alignbit_b32 v49, v6, v16, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_alignbit_b32 v37, v2, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v39 +; SI-NEXT: v_alignbit_b32 v35, v2, v40, 16 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v41 +; SI-NEXT: v_alignbit_b32 v38, v14, v50, 16 +; SI-NEXT: v_alignbit_b32 v36, v22, v54, 16 +; SI-NEXT: v_alignbit_b32 v33, v2, v43, 16 +; SI-NEXT: v_alignbit_b32 v34, v30, v0, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[19:20], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[12:13], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[20:21], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[17:18], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v39 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v53 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v41 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v42 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v43 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v24, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v53 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v31 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50 -; SI-NEXT: v_alignbit_b32 v28, v30, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v55 +; SI-NEXT: v_alignbit_b32 v34, v30, v0, 16 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v16, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v48 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v49 +; SI-NEXT: v_alignbit_b32 v35, v1, v0, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v54 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v23 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v38 -; SI-NEXT: v_alignbit_b32 v20, v22, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v39 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v51 +; SI-NEXT: v_alignbit_b32 v36, v22, v0, 16 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v52 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v8, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v36 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v37 +; SI-NEXT: v_alignbit_b32 v37, v1, v0, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v50 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v15 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v34 -; SI-NEXT: v_alignbit_b32 v12, v14, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24 +; SI-NEXT: v_alignbit_b32 v38, v14, v0, 16 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v32 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_alignbit_b32 v48, v1, v0, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_alignbit_b32 v49, v6, v0, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[19:20], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshr_b64 v[12:13], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[20:21], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[28:29], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 ; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v15 ; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v23 ; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v31 ; SI-NEXT: .LBB109_3: ; %end +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v49 +; SI-NEXT: v_mov_b32_e32 v8, v37 +; SI-NEXT: v_mov_b32_e32 v10, v12 +; SI-NEXT: v_mov_b32_e32 v12, v38 +; SI-NEXT: v_mov_b32_e32 v16, v35 +; SI-NEXT: v_mov_b32_e32 v18, v20 +; SI-NEXT: v_mov_b32_e32 v20, v36 +; SI-NEXT: v_mov_b32_e32 v24, v33 +; SI-NEXT: v_mov_b32_e32 v26, v28 +; SI-NEXT: v_mov_b32_e32 v28, v34 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v16bf16_to_v32i8_scalar: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll index 6cf53d187fcab..57de868ad37b3 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll @@ -647,70 +647,65 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s25, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b32 s26, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s28, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s26, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s28, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v3, s28 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v7, s27 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s7 +; SI-NEXT: v_mov_b32_e32 v11, s26 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v15, s25 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v9i32_to_v18i16_scalar: @@ -2361,66 +2356,75 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s25, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b32 s28, s23, 16 +; SI-NEXT: s_lshr_b32 s27, s21, 16 +; SI-NEXT: s_lshr_b32 s26, s19, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s4, v16, 16 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v26, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v23, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v21, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s22, 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v26 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr28 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v25, s16 +; SI-NEXT: v_mov_b32_e32 v26, s17 +; SI-NEXT: v_mov_b32_e32 v23, s18 +; SI-NEXT: v_mov_b32_e32 v24, s19 +; SI-NEXT: v_mov_b32_e32 v21, s20 +; SI-NEXT: v_mov_b32_e32 v22, s21 +; SI-NEXT: v_mov_b32_e32 v19, s22 +; SI-NEXT: v_mov_b32_e32 v20, s23 ; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v3, s25 +; SI-NEXT: v_mov_b32_e32 v7, s26 +; SI-NEXT: v_mov_b32_e32 v11, s27 +; SI-NEXT: v_mov_b32_e32 v15, s28 +; SI-NEXT: v_mov_b32_e32 v17, s8 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v25 +; SI-NEXT: v_mov_b32_e32 v2, v26 +; SI-NEXT: v_mov_b32_e32 v4, v23 +; SI-NEXT: v_mov_b32_e32 v6, v24 +; SI-NEXT: v_mov_b32_e32 v8, v21 +; SI-NEXT: v_mov_b32_e32 v10, v22 +; SI-NEXT: v_mov_b32_e32 v12, v19 +; SI-NEXT: v_mov_b32_e32 v14, v20 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v9f32_to_v18i16_scalar: @@ -4659,112 +4663,114 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i ; SI-LABEL: bitcast_v18f16_to_v18i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v14, v0 -; SI-NEXT: v_mov_b32_e32 v18, v4 +; SI-NEXT: v_mov_b32_e32 v5, v3 +; SI-NEXT: v_mov_b32_e32 v9, v2 +; SI-NEXT: v_mov_b32_e32 v10, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB23_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB23_3 ; SI-NEXT: .LBB23_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v18 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_or_b32_e32 v16, v16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v15 +; SI-NEXT: v_or_b32_e32 v14, v14, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_or_b32_e32 v10, v10, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v14, v14, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v7 ; SI-NEXT: v_or_b32_e32 v6, v6, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v16, v16, v18 ; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_or_b32_e32 v2, v2, v18 +; SI-NEXT: v_lshr_b64 v[18:19], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[13:14], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v23 +; SI-NEXT: v_mov_b32_e32 v5, v18 +; SI-NEXT: v_mov_b32_e32 v9, v21 +; SI-NEXT: v_mov_b32_e32 v13, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll index 2abb2f3b9de52..3aaf25423a184 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll @@ -681,76 +681,71 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s8 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v11, s28 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s7 +; SI-NEXT: v_mov_b32_e32 v15, s27 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s6 +; SI-NEXT: v_mov_b32_e32 v19, s26 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr28 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v10i32_to_v20i16_scalar: @@ -3371,241 +3366,239 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s28, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s27, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s26, 0xff -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s15, 24 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s60, 8 +; SI-NEXT: s_and_b32 s7, s16, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s56, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: s_and_b32 s5, s17, 0xff +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s93, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s92, 24 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_lshl_b32 s5, s46, 8 +; SI-NEXT: s_and_b32 s7, s18, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s44, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s42, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s12, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s11, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s19, 0xff +; SI-NEXT: s_lshl_b32 s7, s91, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s90, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s89, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s40, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s26, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s21, 0xff +; SI-NEXT: s_lshl_b32 s7, s88, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s79, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s78, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s14, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s12, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s23, 0xff +; SI-NEXT: s_lshl_b32 s7, s77, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s76, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s75, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s7, s8, 8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_lshl_b32 s5, s74, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s73, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s6, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s72, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v10i32_to_v40i8_scalar: @@ -7591,72 +7584,83 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a, ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s25, 16 +; SI-NEXT: s_lshr_b32 s29, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB25_4 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v29, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v27, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v26, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v23, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v21, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s24, 1.0 +; SI-NEXT: v_lshr_b64 v[17:18], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[22:23], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[28:29], 16 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v29 +; SI-NEXT: s_branch .LBB25_5 ; SI-NEXT: .LBB25_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB25_2 ; SI-NEXT: .LBB25_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v28, s16 +; SI-NEXT: v_mov_b32_e32 v29, s17 +; SI-NEXT: v_mov_b32_e32 v26, s18 +; SI-NEXT: v_mov_b32_e32 v27, s19 +; SI-NEXT: v_mov_b32_e32 v24, s20 +; SI-NEXT: v_mov_b32_e32 v25, s21 +; SI-NEXT: v_mov_b32_e32 v22, s22 +; SI-NEXT: v_mov_b32_e32 v23, s23 +; SI-NEXT: v_mov_b32_e32 v20, s24 +; SI-NEXT: v_mov_b32_e32 v21, s25 +; SI-NEXT: v_mov_b32_e32 v3, s26 +; SI-NEXT: v_mov_b32_e32 v7, s27 +; SI-NEXT: v_mov_b32_e32 v11, s28 +; SI-NEXT: v_mov_b32_e32 v15, s29 +; SI-NEXT: v_mov_b32_e32 v19, s40 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: .LBB25_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v28 +; SI-NEXT: v_mov_b32_e32 v2, v29 +; SI-NEXT: v_mov_b32_e32 v4, v26 +; SI-NEXT: v_mov_b32_e32 v6, v27 +; SI-NEXT: v_mov_b32_e32 v8, v24 +; SI-NEXT: v_mov_b32_e32 v10, v25 +; SI-NEXT: v_mov_b32_e32 v12, v22 +; SI-NEXT: v_mov_b32_e32 v14, v23 +; SI-NEXT: v_mov_b32_e32 v16, v20 +; SI-NEXT: v_mov_b32_e32 v18, v21 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v10f32_to_v20i16_scalar: @@ -10305,256 +10309,261 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB33_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s28, s25, 24 -; SI-NEXT: s_lshr_b32 s29, s25, 16 -; SI-NEXT: s_lshr_b32 s40, s25, 8 -; SI-NEXT: s_lshr_b32 s15, s23, 24 -; SI-NEXT: s_lshr_b32 s26, s23, 16 -; SI-NEXT: s_lshr_b32 s27, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s9, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s91, s25, 24 +; SI-NEXT: s_lshr_b32 s93, s25, 16 +; SI-NEXT: s_lshr_b32 s94, s25, 8 +; SI-NEXT: s_lshr_b32 s88, s23, 24 +; SI-NEXT: s_lshr_b32 s90, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s23, 8 +; SI-NEXT: s_lshr_b32 s77, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s89, s21, 8 +; SI-NEXT: s_lshr_b32 s74, s19, 24 +; SI-NEXT: s_lshr_b32 s76, s19, 16 +; SI-NEXT: s_lshr_b32 s78, s19, 8 +; SI-NEXT: s_lshr_b32 s72, s17, 24 +; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB33_4 ; SI-NEXT: .LBB33_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v31, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v34, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v28, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v29, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v23, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v21, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v17, s24, 1.0 -; SI-NEXT: v_alignbit_b32 v1, v16, v17, 24 -; SI-NEXT: v_alignbit_b32 v2, v16, v17, 16 -; SI-NEXT: v_alignbit_b32 v3, v16, v17, 8 -; SI-NEXT: v_alignbit_b32 v4, v18, v21, 24 -; SI-NEXT: v_alignbit_b32 v5, v18, v21, 16 -; SI-NEXT: v_alignbit_b32 v6, v18, v21, 8 -; SI-NEXT: v_alignbit_b32 v7, v23, v24, 24 -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 16 -; SI-NEXT: v_alignbit_b32 v9, v23, v24, 8 -; SI-NEXT: v_alignbit_b32 v10, v28, v29, 24 -; SI-NEXT: v_alignbit_b32 v11, v28, v29, 16 -; SI-NEXT: v_alignbit_b32 v12, v28, v29, 8 -; SI-NEXT: v_alignbit_b32 v13, v31, v34, 24 -; SI-NEXT: v_alignbit_b32 v14, v31, v34, 16 -; SI-NEXT: v_alignbit_b32 v15, v31, v34, 8 -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v16 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v18 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v18 -; SI-NEXT: v_lshrrev_b32_e32 v30, 24, v23 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v23 -; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v37, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v31 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v31 +; SI-NEXT: v_add_f32_e64 v5, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v6, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v12, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v13, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s18, 1.0 +; SI-NEXT: v_readfirstlane_b32 s16, v6 +; SI-NEXT: v_readfirstlane_b32 s17, v5 +; SI-NEXT: v_add_f32_e64 v3, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v1, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v2, s24, 1.0 +; SI-NEXT: s_lshr_b64 s[26:27], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 8 +; SI-NEXT: v_readfirstlane_b32 s16, v10 +; SI-NEXT: v_readfirstlane_b32 s17, v7 +; SI-NEXT: v_readfirstlane_b32 s8, v2 +; SI-NEXT: v_readfirstlane_b32 s9, v1 +; SI-NEXT: v_readfirstlane_b32 s14, v4 +; SI-NEXT: v_readfirstlane_b32 s15, v3 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[16:17], 8 +; SI-NEXT: v_readfirstlane_b32 s16, v13 +; SI-NEXT: v_readfirstlane_b32 s17, v12 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 +; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v1 +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v16, 8, v3 +; SI-NEXT: v_lshrrev_b32_e32 v17, 24, v5 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v7 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v12 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v12 ; SI-NEXT: s_branch .LBB33_5 ; SI-NEXT: .LBB33_3: -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr27 -; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB33_2 ; SI-NEXT: .LBB33_4: -; SI-NEXT: v_mov_b32_e32 v34, s16 -; SI-NEXT: v_mov_b32_e32 v31, s17 -; SI-NEXT: v_mov_b32_e32 v29, s18 -; SI-NEXT: v_mov_b32_e32 v28, s19 -; SI-NEXT: v_mov_b32_e32 v24, s20 -; SI-NEXT: v_mov_b32_e32 v23, s21 -; SI-NEXT: v_mov_b32_e32 v21, s22 -; SI-NEXT: v_mov_b32_e32 v18, s23 -; SI-NEXT: v_mov_b32_e32 v17, s24 -; SI-NEXT: v_mov_b32_e32 v16, s25 -; SI-NEXT: v_mov_b32_e32 v48, s8 -; SI-NEXT: v_mov_b32_e32 v39, s7 -; SI-NEXT: v_mov_b32_e32 v38, s6 -; SI-NEXT: v_mov_b32_e32 v37, s11 -; SI-NEXT: v_mov_b32_e32 v36, s10 -; SI-NEXT: v_mov_b32_e32 v35, s9 -; SI-NEXT: v_mov_b32_e32 v33, s14 -; SI-NEXT: v_mov_b32_e32 v32, s13 -; SI-NEXT: v_mov_b32_e32 v30, s12 -; SI-NEXT: v_mov_b32_e32 v27, s27 -; SI-NEXT: v_mov_b32_e32 v26, s26 -; SI-NEXT: v_mov_b32_e32 v25, s15 -; SI-NEXT: v_mov_b32_e32 v22, s40 -; SI-NEXT: v_mov_b32_e32 v20, s29 -; SI-NEXT: v_mov_b32_e32 v19, s28 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: v_mov_b32_e32 v12, s17 +; SI-NEXT: v_mov_b32_e32 v10, s18 +; SI-NEXT: v_mov_b32_e32 v7, s19 +; SI-NEXT: v_mov_b32_e32 v6, s20 +; SI-NEXT: v_mov_b32_e32 v5, s21 +; SI-NEXT: v_mov_b32_e32 v4, s22 +; SI-NEXT: v_mov_b32_e32 v3, s23 +; SI-NEXT: v_mov_b32_e32 v2, s24 +; SI-NEXT: v_mov_b32_e32 v1, s25 +; SI-NEXT: v_mov_b32_e32 v25, s75 +; SI-NEXT: v_mov_b32_e32 v24, s73 +; SI-NEXT: v_mov_b32_e32 v23, s72 +; SI-NEXT: v_mov_b32_e32 v22, s78 +; SI-NEXT: v_mov_b32_e32 v21, s76 +; SI-NEXT: v_mov_b32_e32 v20, s74 +; SI-NEXT: v_mov_b32_e32 v19, s89 +; SI-NEXT: v_mov_b32_e32 v18, s79 +; SI-NEXT: v_mov_b32_e32 v17, s77 +; SI-NEXT: v_mov_b32_e32 v16, s92 +; SI-NEXT: v_mov_b32_e32 v15, s90 +; SI-NEXT: v_mov_b32_e32 v14, s88 +; SI-NEXT: v_mov_b32_e32 v11, s94 +; SI-NEXT: v_mov_b32_e32 v9, s93 +; SI-NEXT: v_mov_b32_e32 v8, s91 ; SI-NEXT: .LBB33_5: ; %end -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_or_b32_e32 v15, v34, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v48 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v38 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: s_lshl_b32 s5, s60, 8 +; SI-NEXT: v_or_b32_e32 v13, s5, v13 +; SI-NEXT: s_and_b32 s5, s58, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s56, 24 ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: v_or_b32_e32 v13, s5, v13 +; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_or_b32_e32 v12, v13, v12 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v25 +; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: s_lshl_b32 s5, s46, 8 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 +; SI-NEXT: v_or_b32_e32 v10, s5, v10 +; SI-NEXT: s_and_b32 s5, s44, 0xff ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v37 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v35 +; SI-NEXT: v_or_b32_e32 v13, v23, v13 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s42, 24 +; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v10, s5, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v33 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v30 +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v22 +; SI-NEXT: v_or_b32_e32 v7, v7, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v21 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: s_lshl_b32 s5, s40, 8 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v20 +; SI-NEXT: v_or_b32_e32 v6, s5, v6 +; SI-NEXT: s_and_b32 s5, s28, 0xff ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v10, v12, v10 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s26, 24 +; SI-NEXT: v_or_b32_e32 v7, v7, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v6, s5, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v27 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v25 +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v19 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v18 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s5, s14, 8 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: v_or_b32_e32 v4, s5, v4 +; SI-NEXT: s_and_b32 s5, s12, 0xff +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s10, 24 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 ; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v4, s5, v4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v16 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v15 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_lshl_b32 s5, s8, 8 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v14 +; SI-NEXT: v_or_b32_e32 v2, s5, v2 +; SI-NEXT: s_and_b32 s5, s6, 0xff ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v2, s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v22 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v9 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v19 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v8 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -15184,124 +15193,127 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i ; SI-LABEL: bitcast_v20f16_to_v20i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v19, v5 -; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 +; SI-NEXT: v_mov_b32_e32 v9, v4 +; SI-NEXT: v_mov_b32_e32 v10, v3 +; SI-NEXT: v_mov_b32_e32 v13, v2 ; SI-NEXT: v_mov_b32_e32 v14, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB47_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB47_3 ; SI-NEXT: .LBB47_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v24 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v25 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v21 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_or_b32_e32 v14, v14, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_or_b32_e32 v10, v10, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v20 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v19 +; SI-NEXT: v_lshr_b64 v[24:25], v[9:10], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v18, v18, v20 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[17:18], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 ; SI-NEXT: .LBB47_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v23 +; SI-NEXT: v_mov_b32_e32 v5, v21 +; SI-NEXT: v_mov_b32_e32 v9, v24 +; SI-NEXT: v_mov_b32_e32 v13, v25 +; SI-NEXT: v_mov_b32_e32 v17, v26 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB47_4: ; SI-NEXT: s_branch .LBB47_2 @@ -16674,330 +16686,353 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32 ; SI-LABEL: bitcast_v20i16_to_v40i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v8, s30, 0 +; SI-NEXT: v_writelane_b32 v8, s31, 1 +; SI-NEXT: v_writelane_b32 v8, s34, 2 +; SI-NEXT: v_writelane_b32 v8, s35, 3 +; SI-NEXT: v_writelane_b32 v8, s36, 4 +; SI-NEXT: v_writelane_b32 v8, s37, 5 +; SI-NEXT: v_writelane_b32 v8, s38, 6 +; SI-NEXT: v_writelane_b32 v8, s39, 7 +; SI-NEXT: v_writelane_b32 v8, s48, 8 +; SI-NEXT: v_writelane_b32 v8, s49, 9 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_readfirstlane_b32 s72, v6 -; SI-NEXT: v_readfirstlane_b32 s73, v5 -; SI-NEXT: v_readfirstlane_b32 s62, v2 -; SI-NEXT: v_readfirstlane_b32 s63, v1 +; SI-NEXT: v_writelane_b32 v8, s50, 10 +; SI-NEXT: v_readfirstlane_b32 s39, v6 +; SI-NEXT: v_readfirstlane_b32 s48, v5 +; SI-NEXT: v_readfirstlane_b32 s49, v4 +; SI-NEXT: v_readfirstlane_b32 s50, v3 +; SI-NEXT: v_readfirstlane_b32 s35, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v4 +; SI-NEXT: v_readfirstlane_b32 s38, v1 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_or_b32 s12, s4, s5 ; SI-NEXT: s_and_b32 s4, s18, 0xffff ; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s14, s4, s5 +; SI-NEXT: s_or_b32 s13, s4, s5 ; SI-NEXT: s_and_b32 s4, s20, 0xffff ; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s11, s4, s5 +; SI-NEXT: s_or_b32 s10, s4, s5 ; SI-NEXT: s_and_b32 s4, s22, 0xffff ; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_or_b32 s11, s4, s5 ; SI-NEXT: s_and_b32 s4, s24, 0xffff ; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: v_mov_b32_e32 v1, s13 -; SI-NEXT: s_or_b32 s9, s4, s5 +; SI-NEXT: s_or_b32 s8, s4, s5 ; SI-NEXT: s_and_b32 s4, s26, 0xffff ; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: v_alignbit_b32 v7, s14, v1, 24 -; SI-NEXT: v_alignbit_b32 v12, s14, v1, 16 -; SI-NEXT: v_alignbit_b32 v16, s14, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: s_or_b32 s10, s4, s5 +; SI-NEXT: s_or_b32 s9, s4, s5 ; SI-NEXT: s_and_b32 s4, s28, 0xffff ; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: v_alignbit_b32 v8, s12, v1, 24 -; SI-NEXT: v_alignbit_b32 v13, s12, v1, 16 -; SI-NEXT: v_alignbit_b32 v17, s12, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: v_alignbit_b32 v6, s10, v1, 24 -; SI-NEXT: v_alignbit_b32 v11, s10, v1, 16 -; SI-NEXT: v_alignbit_b32 v15, s10, v1, 8 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_alignbit_b32 v5, s8, v1, 24 -; SI-NEXT: v_alignbit_b32 v9, s8, v1, 16 -; SI-NEXT: v_alignbit_b32 v14, s8, v1, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v18 ; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: v_alignbit_b32 v2, s6, v1, 24 -; SI-NEXT: v_alignbit_b32 v4, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v10, s6, v1, 8 -; SI-NEXT: s_lshr_b32 s59, s14, 8 -; SI-NEXT: s_lshr_b32 s56, s12, 8 -; SI-NEXT: s_lshr_b32 s45, s10, 8 -; SI-NEXT: s_lshr_b32 s42, s8, 8 -; SI-NEXT: s_lshr_b32 s15, s6, 8 -; SI-NEXT: s_and_b32 s60, s19, 0xffff -; SI-NEXT: s_and_b32 s57, s23, 0xffff -; SI-NEXT: s_and_b32 s46, s27, 0xffff -; SI-NEXT: s_and_b32 s43, s62, 0xffff -; SI-NEXT: s_and_b32 s40, s72, 0xffff -; SI-NEXT: s_bfe_u32 s61, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s58, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s47, s27, 0x80008 -; SI-NEXT: s_bfe_u32 s44, s62, 0x80008 -; SI-NEXT: s_bfe_u32 s41, s72, 0x80008 +; SI-NEXT: s_and_b32 s4, s38, 0xffff +; SI-NEXT: s_lshl_b32 s5, s35, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_or_b32 s7, s4, s5 +; SI-NEXT: s_and_b32 s4, s50, 0xffff +; SI-NEXT: s_lshl_b32 s5, s49, 16 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s48, 0xffff +; SI-NEXT: s_lshl_b32 s15, s39, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_or_b32 s5, s5, s15 +; SI-NEXT: s_lshr_b32 s34, s13, 8 +; SI-NEXT: s_lshr_b32 s95, s11, 8 +; SI-NEXT: s_lshr_b32 s59, s9, 8 +; SI-NEXT: s_lshr_b32 s45, s7, 8 +; SI-NEXT: s_lshr_b32 s15, s5, 8 +; SI-NEXT: s_and_b32 s36, s19, 0xffff +; SI-NEXT: s_and_b32 s30, s23, 0xffff +; SI-NEXT: s_and_b32 s61, s27, 0xffff +; SI-NEXT: s_and_b32 s47, s35, 0xffff +; SI-NEXT: s_and_b32 s41, s39, 0xffff +; SI-NEXT: s_bfe_u32 s37, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s31, s23, 0x80008 +; SI-NEXT: s_bfe_u32 s94, s27, 0x80008 +; SI-NEXT: s_bfe_u32 s57, s35, 0x80008 +; SI-NEXT: s_bfe_u32 s43, s39, 0x80008 +; SI-NEXT: s_lshr_b64 s[88:89], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: s_add_i32 s73, s73, 3 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: s_add_i32 s50, s50, 3 +; SI-NEXT: s_and_b32 s4, s50, 0xffff +; SI-NEXT: s_lshl_b32 s5, s49, 16 +; SI-NEXT: s_add_i32 s48, s48, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s48, 0xffff +; SI-NEXT: s_lshl_b32 s6, s39, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s63, s63, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s28, 0xffff +; SI-NEXT: s_lshl_b32 s7, s29, 16 +; SI-NEXT: s_add_i32 s38, s38, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s38, 0xffff +; SI-NEXT: s_lshl_b32 s8, s35, 16 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s8, s24, 0xffff +; SI-NEXT: s_lshl_b32 s9, s25, 16 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: s_and_b32 s10, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_and_b32 s11, s22, 0xffff +; SI-NEXT: s_lshl_b32 s12, s23, 16 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_and_b32 s12, s16, 0xffff +; SI-NEXT: s_lshl_b32 s13, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s13, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v3 -; SI-NEXT: s_add_i32 s14, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v2, s13 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_alignbit_b32 v7, s14, v2, 24 -; SI-NEXT: v_alignbit_b32 v12, s14, v2, 16 -; SI-NEXT: v_alignbit_b32 v16, s14, v2, 8 -; SI-NEXT: v_mov_b32_e32 v2, s11 -; SI-NEXT: v_or_b32_e32 v1, v18, v1 -; SI-NEXT: v_alignbit_b32 v8, s12, v2, 24 -; SI-NEXT: v_alignbit_b32 v13, s12, v2, 16 -; SI-NEXT: v_alignbit_b32 v17, s12, v2, 8 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x30000, v1 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_alignbit_b32 v6, s10, v2, 24 -; SI-NEXT: v_alignbit_b32 v11, s10, v2, 16 -; SI-NEXT: v_alignbit_b32 v15, s10, v2, 8 -; SI-NEXT: v_mov_b32_e32 v2, s7 -; SI-NEXT: v_alignbit_b32 v5, s8, v2, 24 -; SI-NEXT: v_alignbit_b32 v9, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v14, s8, v2, 8 -; SI-NEXT: v_alignbit_b32 v2, v3, v1, 24 -; SI-NEXT: v_alignbit_b32 v4, v3, v1, 16 -; SI-NEXT: v_alignbit_b32 v10, v3, v1, 8 -; SI-NEXT: s_lshr_b32 s61, s14, 24 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s14, 8 -; SI-NEXT: s_lshr_b32 s58, s12, 24 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s12, 8 -; SI-NEXT: s_lshr_b32 s47, s10, 24 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s10, 8 -; SI-NEXT: s_lshr_b32 s44, s8, 24 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s8, 8 -; SI-NEXT: s_lshr_b32 s41, s6, 24 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s6, 8 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: s_and_b32 s13, s18, 0xffff +; SI-NEXT: s_lshl_b32 s14, s19, 16 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_add_i32 s8, s8, 0x30000 +; SI-NEXT: s_add_i32 s9, s9, 0x30000 +; SI-NEXT: s_add_i32 s10, s10, 0x30000 +; SI-NEXT: s_add_i32 s11, s11, 0x30000 +; SI-NEXT: s_add_i32 s12, s12, 0x30000 +; SI-NEXT: s_add_i32 s13, s13, 0x30000 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_add_i32 s6, s6, 0x30000 +; SI-NEXT: s_add_i32 s7, s7, 0x30000 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[40:41], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s37, s13, 24 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s34, s13, 8 +; SI-NEXT: s_lshr_b32 s31, s11, 24 +; SI-NEXT: s_lshr_b32 s30, s11, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 8 +; SI-NEXT: s_lshr_b32 s94, s9, 24 +; SI-NEXT: s_lshr_b32 s61, s9, 16 +; SI-NEXT: s_lshr_b32 s59, s9, 8 +; SI-NEXT: s_lshr_b32 s57, s7, 24 +; SI-NEXT: s_lshr_b32 s47, s7, 16 +; SI-NEXT: s_lshr_b32 s45, s7, 8 +; SI-NEXT: s_lshr_b32 s43, s5, 24 +; SI-NEXT: s_lshr_b32 s41, s5, 16 +; SI-NEXT: s_lshr_b32 s15, s5, 8 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v16 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s59, 8 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s13, s61, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v7, v12 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s13, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_or_b32 s12, s12, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s34, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: s_and_b32 s13, s36, 0xff +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s37, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s60, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s46, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s42, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v17 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s56, 8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v13 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s58, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s95, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s30, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s31, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v15 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s45, 8 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s88, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s58, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s44, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s46, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s47, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s59, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s9, s61, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s94, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v14 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s42, 8 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s74, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s90, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s78, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v9 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s44, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s45, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s47, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s57, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s76, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s72, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s62, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v10 -; SI-NEXT: s_and_b32 s4, s6, 0xff +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff ; SI-NEXT: s_lshl_b32 s5, s15, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s40, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; SI-NEXT: s_and_b32 s5, s41, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s41, 24 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: s_lshl_b32 s6, s43, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s50, v8, 10 +; SI-NEXT: v_readlane_b32 s49, v8, 9 +; SI-NEXT: v_readlane_b32 s48, v8, 8 +; SI-NEXT: v_readlane_b32 s39, v8, 7 +; SI-NEXT: v_readlane_b32 s38, v8, 6 +; SI-NEXT: v_readlane_b32 s37, v8, 5 +; SI-NEXT: v_readlane_b32 s36, v8, 4 +; SI-NEXT: v_readlane_b32 s35, v8, 3 +; SI-NEXT: v_readlane_b32 s34, v8, 2 +; SI-NEXT: v_readlane_b32 s31, v8, 1 +; SI-NEXT: v_readlane_b32 s30, v8, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr60 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr95 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr58 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr94 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v20i16_to_v40i8_scalar: @@ -19045,286 +19080,280 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26 -; SI-NEXT: v_readfirstlane_b32 s14, v19 -; SI-NEXT: v_readfirstlane_b32 s40, v18 -; SI-NEXT: v_readfirstlane_b32 s12, v11 -; SI-NEXT: v_readfirstlane_b32 s13, v10 -; SI-NEXT: v_readfirstlane_b32 s8, v3 -; SI-NEXT: v_readfirstlane_b32 s9, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: v_readfirstlane_b32 s6, v0 +; SI-NEXT: v_mov_b32_e32 v31, v18 +; SI-NEXT: v_mov_b32_e32 v32, v14 +; SI-NEXT: v_mov_b32_e32 v33, v10 +; SI-NEXT: v_readfirstlane_b32 s43, v1 +; SI-NEXT: v_readfirstlane_b32 s42, v0 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v34, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v36, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v35, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v23 -; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v25 -; SI-NEXT: v_lshlrev_b32_e32 v31, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v25 ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s10, s19, 24 -; SI-NEXT: s_or_b32 s4, s10, s4 -; SI-NEXT: s_and_b32 s10, s28, 0xff -; SI-NEXT: s_lshl_b32 s15, s29, 8 -; SI-NEXT: s_or_b32 s10, s10, s15 -; SI-NEXT: s_and_b32 s15, s6, 0xff -; SI-NEXT: s_lshl_b32 s15, s15, 16 -; SI-NEXT: s_lshl_b32 s41, s7, 24 -; SI-NEXT: s_or_b32 s43, s41, s15 -; SI-NEXT: s_and_b32 s15, s26, 0xff -; SI-NEXT: s_lshl_b32 s15, s15, 16 -; SI-NEXT: s_lshl_b32 s41, s27, 24 -; SI-NEXT: s_or_b32 s15, s41, s15 -; SI-NEXT: s_and_b32 s41, s16, 0xff -; SI-NEXT: s_lshl_b32 s42, s17, 8 -; SI-NEXT: s_or_b32 s41, s41, s42 -; SI-NEXT: s_and_b32 s41, s41, 0xffff -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v8 -; SI-NEXT: s_or_b32 s41, s41, s4 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s12 ; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s42, s25, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v2 +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s14, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: v_and_b32_e32 v10, 0xff, v33 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: v_or_b32_e32 v10, v10, v36 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v12 +; SI-NEXT: s_or_b32 s13, s5, s7 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v0, v10 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_or_b32_e32 v10, v9, v11 +; SI-NEXT: s_lshr_b64 s[8:9], s[12:13], 16 +; SI-NEXT: v_or_b32_e32 v13, v35, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v8 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v16 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s29, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v24 +; SI-NEXT: s_or_b32 s5, s5, s9 +; SI-NEXT: s_and_b32 s9, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v9, v34 +; SI-NEXT: v_or_b32_e32 v17, v37, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v19, v0, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v32 +; SI-NEXT: v_or_b32_e32 v39, v5, v18 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s43, 24 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v9 ; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v16 -; SI-NEXT: s_or_b32 s15, s4, s15 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s42, s8, 8 +; SI-NEXT: v_or_b32_e32 v10, v10, v3 +; SI-NEXT: v_or_b32_e32 v14, v14, v7 +; SI-NEXT: v_or_b32_e32 v18, v18, v23 +; SI-NEXT: v_or_b32_e32 v48, v21, v25 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v31 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s12, s10, s9 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v13, v13, v27 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: v_or_b32_e32 v15, v3, v9 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_or_b32_e32 v19, v7, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v25, v13, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v12 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v24 -; SI-NEXT: v_or_b32_e32 v23, s4, v15 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s42, s12, 8 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v17, v17, v30 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: v_or_b32_e32 v21, v28, v13 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 -; SI-NEXT: v_or_b32_e32 v32, v29, v18 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v18, v17, v32 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v20 -; SI-NEXT: v_or_b32_e32 v26, s4, v21 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s42, s14, 8 -; SI-NEXT: s_and_b32 s10, s10, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: s_or_b32 s10, s10, s43 -; SI-NEXT: v_or_b32_e32 v33, v31, v17 +; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v25, v25, v38 +; SI-NEXT: s_or_b32 s15, s5, s12 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s11, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, s10, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v15, 16 -; SI-NEXT: v_alignbit_b32 v13, v25, v21, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v33, 16 -; SI-NEXT: v_or_b32_e32 v21, s4, v33 -; SI-NEXT: s_lshr_b32 s42, s5, 16 -; SI-NEXT: s_lshr_b32 s43, s43, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v32 +; SI-NEXT: v_or_b32_e32 v9, v1, v9 +; SI-NEXT: v_or_b32_e32 v10, v10, v19 +; SI-NEXT: v_or_b32_e32 v14, v14, v39 +; SI-NEXT: v_or_b32_e32 v18, v18, v48 +; SI-NEXT: v_and_b32_e32 v29, 0xffff, v25 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 16 +; SI-NEXT: s_or_b32 s4, s4, s14 +; SI-NEXT: v_or_b32_e32 v25, v11, v9 +; SI-NEXT: v_mov_b32_e32 v26, v10 +; SI-NEXT: v_lshr_b64 v[9:10], v[9:10], 16 +; SI-NEXT: v_or_b32_e32 v27, v15, v13 +; SI-NEXT: v_mov_b32_e32 v28, v14 +; SI-NEXT: v_lshr_b64 v[13:14], v[13:14], 16 +; SI-NEXT: v_or_b32_e32 v29, v29, v17 +; SI-NEXT: v_mov_b32_e32 v30, v18 +; SI-NEXT: v_lshr_b64 v[17:18], v[17:18], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s12, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v48 +; SI-NEXT: s_mov_b32 s7, s13 +; SI-NEXT: s_mov_b32 s5, s15 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true -; SI-NEXT: s_add_i32 s40, s40, 3 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v31, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v24 -; SI-NEXT: v_or_b32_e32 v1, v30, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v29, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s5, s12, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v12 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v28, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v4 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v31 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v20 +; SI-NEXT: v_or_b32_e32 v9, v38, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v37, v10 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v24 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 +; SI-NEXT: v_or_b32_e32 v9, v23, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 ; SI-NEXT: s_and_b32 s4, s24, 0xff ; SI-NEXT: s_lshl_b32 s5, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_and_b32 s6, s26, 0xff +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v21, v10 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s15, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s7, 24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v33 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v12 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v36, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s43, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_or_b32_e32 v2, v34, v2 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v35, v10 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x300, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v1 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v4 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v1 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v6 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v16 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v8 -; SI-NEXT: s_add_i32 s41, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x300, v7 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v9 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s41 -; SI-NEXT: v_alignbit_b32 v1, s11, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s15 -; SI-NEXT: v_alignbit_b32 v5, s10, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v23, 16 -; SI-NEXT: v_alignbit_b32 v13, v25, v26, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v21, 16 -; SI-NEXT: s_lshr_b32 s42, s11, 16 -; SI-NEXT: s_lshr_b32 s43, s10, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v5 +; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[29:30], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v30 ; SI-NEXT: .LBB51_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s41 -; SI-NEXT: v_mov_b32_e32 v2, s11 -; SI-NEXT: v_mov_b32_e32 v3, s42 -; SI-NEXT: v_mov_b32_e32 v4, s15 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, v23 -; SI-NEXT: v_mov_b32_e32 v12, v26 -; SI-NEXT: v_mov_b32_e32 v14, v25 -; SI-NEXT: v_mov_b32_e32 v16, v21 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v8, v25 +; SI-NEXT: v_mov_b32_e32 v10, v26 +; SI-NEXT: v_mov_b32_e32 v12, v27 +; SI-NEXT: v_mov_b32_e32 v14, v28 +; SI-NEXT: v_mov_b32_e32 v16, v29 +; SI-NEXT: v_mov_b32_e32 v18, v30 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: s_branch .LBB51_2 ; @@ -20574,78 +20603,78 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB55_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v20, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v21, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v22, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v23, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v24, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s25, 16 +; SI-NEXT: s_lshr_b32 s29, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB55_4 ; SI-NEXT: .LBB55_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[4:5], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[24:25], 1.0 -; SI-NEXT: v_add_f64 v[12:13], s[22:23], 1.0 -; SI-NEXT: v_alignbit_b32 v20, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v21, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v22, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v24, v1, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_add_f64 v[20:21], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[22:23], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[24:25], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[26:27], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[28:29], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[17:18], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[22:23], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[28:29], 16 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v29 ; SI-NEXT: s_branch .LBB55_5 ; SI-NEXT: .LBB55_3: -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB55_2 ; SI-NEXT: .LBB55_4: -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v21, s25 +; SI-NEXT: v_mov_b32_e32 v23, s23 +; SI-NEXT: v_mov_b32_e32 v25, s21 +; SI-NEXT: v_mov_b32_e32 v27, s19 +; SI-NEXT: v_mov_b32_e32 v29, s17 +; SI-NEXT: v_mov_b32_e32 v28, s16 +; SI-NEXT: v_mov_b32_e32 v26, s18 +; SI-NEXT: v_mov_b32_e32 v24, s20 +; SI-NEXT: v_mov_b32_e32 v22, s22 +; SI-NEXT: v_mov_b32_e32 v20, s24 +; SI-NEXT: v_mov_b32_e32 v19, s40 +; SI-NEXT: v_mov_b32_e32 v15, s29 +; SI-NEXT: v_mov_b32_e32 v11, s28 +; SI-NEXT: v_mov_b32_e32 v7, s27 +; SI-NEXT: v_mov_b32_e32 v3, s26 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: .LBB55_5: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v1, v24 -; SI-NEXT: v_mov_b32_e32 v5, v23 -; SI-NEXT: v_mov_b32_e32 v9, v22 -; SI-NEXT: v_mov_b32_e32 v13, v21 -; SI-NEXT: v_mov_b32_e32 v17, v20 +; SI-NEXT: v_mov_b32_e32 v0, v28 +; SI-NEXT: v_mov_b32_e32 v2, v29 +; SI-NEXT: v_mov_b32_e32 v4, v26 +; SI-NEXT: v_mov_b32_e32 v6, v27 +; SI-NEXT: v_mov_b32_e32 v8, v24 +; SI-NEXT: v_mov_b32_e32 v10, v25 +; SI-NEXT: v_mov_b32_e32 v12, v22 +; SI-NEXT: v_mov_b32_e32 v14, v23 +; SI-NEXT: v_mov_b32_e32 v16, v20 +; SI-NEXT: v_mov_b32_e32 v18, v21 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v5f64_to_v20i16_scalar: @@ -21435,76 +21464,71 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 ; SI-NEXT: s_add_u32 s24, s24, 3 ; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: .LBB59_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s8 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v11, s28 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s7 +; SI-NEXT: v_mov_b32_e32 v15, s27 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s6 +; SI-NEXT: v_mov_b32_e32 v19, s26 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr28 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: s_branch .LBB59_2 ; ; VI-LABEL: bitcast_v5i64_to_v20i16_scalar: @@ -22779,357 +22803,375 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32 ; SI-LABEL: bitcast_v20f16_to_v40i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v16, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v13, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v39, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v33, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v9, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v54, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v53, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s24 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v50, s26 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v43, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v17, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s29 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v47, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v46, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, s28 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB61_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v16 -; SI-NEXT: v_or_b32_e32 v28, v15, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v10 -; SI-NEXT: v_or_b32_e32 v24, v12, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v39 -; SI-NEXT: v_or_b32_e32 v14, v33, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v9 -; SI-NEXT: v_or_b32_e32 v13, v20, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54 -; SI-NEXT: v_or_b32_e32 v7, v53, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 -; SI-NEXT: v_or_b32_e32 v11, v50, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v43 -; SI-NEXT: v_or_b32_e32 v5, v44, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_or_b32_e32 v6, v41, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 -; SI-NEXT: v_or_b32_e32 v4, v46, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_or_b32_e32 v3, v45, v3 -; SI-NEXT: v_alignbit_b32 v30, v24, v28, 24 -; SI-NEXT: v_alignbit_b32 v35, v24, v28, 16 -; SI-NEXT: v_alignbit_b32 v37, v24, v28, 8 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 24 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v13, v14, 8 -; SI-NEXT: v_alignbit_b32 v23, v11, v7, 24 -; SI-NEXT: v_alignbit_b32 v26, v11, v7, 16 -; SI-NEXT: v_alignbit_b32 v32, v11, v7, 8 -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 8 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v22, v3, v4, 8 -; SI-NEXT: v_lshrrev_b32_e32 v40, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 8, v6 -; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v3 -; SI-NEXT: v_bfe_u32 v42, v10, 8, 8 -; SI-NEXT: v_bfe_u32 v55, v9, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v8, 8, 8 -; SI-NEXT: v_bfe_u32 v48, v2, 8, 8 -; SI-NEXT: v_bfe_u32 v34, v1, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s4, v13 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v12 +; SI-NEXT: s_or_b32 s12, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_or_b32 s13, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v16 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v15 +; SI-NEXT: s_or_b32 s10, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: s_or_b32 s11, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: s_or_b32 s8, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v8 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v17 +; SI-NEXT: s_or_b32 s9, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v21 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v5 +; SI-NEXT: s_or_b32 s6, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v20 +; SI-NEXT: s_or_b32 s7, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v24 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v23 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_readfirstlane_b32 s15, v22 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[20:21], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[18:19], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[22:23], s[10:11], 16 +; SI-NEXT: s_or_b32 s5, s15, s5 +; SI-NEXT: s_lshr_b64 s[26:27], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s23, s13, 8 +; SI-NEXT: s_lshr_b32 s21, s11, 8 +; SI-NEXT: s_lshr_b32 s19, s9, 8 +; SI-NEXT: s_lshr_b32 s17, s7, 8 +; SI-NEXT: s_lshr_b32 s15, s5, 8 +; SI-NEXT: v_bfe_u32 v25, v10, 8, 8 +; SI-NEXT: v_bfe_u32 v7, v9, 8, 8 +; SI-NEXT: v_bfe_u32 v6, v8, 8, 8 +; SI-NEXT: v_bfe_u32 v4, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v3, v1, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB61_3 ; SI-NEXT: .LBB61_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v3, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_readfirstlane_b32 s5, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v4, v4, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v43 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v54 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v5 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readfirstlane_b32 s6, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v19 +; SI-NEXT: v_readfirstlane_b32 s7, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v20 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v53 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v50 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v33 -; SI-NEXT: v_or_b32_e32 v7, v13, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v14, v14, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v9 -; SI-NEXT: v_or_b32_e32 v28, v15, v16 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v10 -; SI-NEXT: v_or_b32_e32 v13, v17, v13 -; SI-NEXT: v_or_b32_e32 v24, v12, v15 -; SI-NEXT: v_alignbit_b32 v30, v24, v28, 24 -; SI-NEXT: v_alignbit_b32 v35, v24, v28, 16 -; SI-NEXT: v_alignbit_b32 v37, v24, v28, 8 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 24 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v13, v14, 8 -; SI-NEXT: v_alignbit_b32 v23, v11, v7, 24 -; SI-NEXT: v_alignbit_b32 v26, v11, v7, 16 -; SI-NEXT: v_alignbit_b32 v32, v11, v7, 8 -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 8 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v22, v3, v4, 8 -; SI-NEXT: v_lshrrev_b32_e32 v40, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 8, v6 -; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v3 -; SI-NEXT: v_bfe_u32 v42, v10, 8, 8 -; SI-NEXT: v_bfe_u32 v55, v9, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v8, 8, 8 -; SI-NEXT: v_bfe_u32 v48, v2, 8, 8 -; SI-NEXT: v_bfe_u32 v34, v1, 8, 8 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readfirstlane_b32 s8, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_readfirstlane_b32 s7, v2 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_readfirstlane_b32 s8, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v8 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: v_readfirstlane_b32 s9, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v17 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v16 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_readfirstlane_b32 s10, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v15 +; SI-NEXT: v_readfirstlane_b32 s9, v8 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_readfirstlane_b32 s10, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v12 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_readfirstlane_b32 s11, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v13 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_readfirstlane_b32 s11, v9 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: v_readfirstlane_b32 s12, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v5 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_readfirstlane_b32 s12, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: v_readfirstlane_b32 s13, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: v_readfirstlane_b32 s14, v3 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[20:21], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[18:19], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[22:23], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s23, s13, 8 +; SI-NEXT: s_lshr_b32 s21, s11, 8 +; SI-NEXT: s_lshr_b32 s19, s9, 8 +; SI-NEXT: s_lshr_b32 s17, s7, 8 +; SI-NEXT: s_lshr_b32 s15, s5, 8 +; SI-NEXT: v_bfe_u32 v25, v10, 8, 8 +; SI-NEXT: v_bfe_u32 v7, v9, 8, 8 +; SI-NEXT: v_bfe_u32 v6, v8, 8, 8 +; SI-NEXT: v_bfe_u32 v4, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v3, v1, 8, 8 ; SI-NEXT: .LBB61_3: ; %end -; SI-NEXT: v_and_b32_e32 v12, 0xff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v37 -; SI-NEXT: v_or_b32_e32 v12, v12, v15 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v30 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v15, v16, v15 -; SI-NEXT: v_or_b32_e32 v12, v12, v15 -; SI-NEXT: buffer_store_dword v12, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v40 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_or_b32_e32 v12, v12, v15 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v42 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v15, v10 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v12, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v36 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v29 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v12, v14, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v52 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v55 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v9, v12, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v32 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v23 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s20, s20, 8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_or_b32 s12, s12, s20 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s23, 8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v49 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v51 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v10 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v25 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v10, v5 +; SI-NEXT: v_or_b32_e32 v5, s12, v5 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s26, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s22, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s18, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v10, vcc, 4, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v5, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v27 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v19 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v10, s10 +; SI-NEXT: buffer_store_dword v10, v5, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s21, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v9 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v7, v5 +; SI-NEXT: v_or_b32_e32 v5, s10, v5 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s40, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s28, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s24, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 ; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v38 +; SI-NEXT: v_add_i32_e32 v5, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: buffer_store_dword v7, v5, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s19, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s8, v5 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s44, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s42, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s46, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v6, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v48 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v2, v6, v2 -; SI-NEXT: v_or_b32_e32 v2, v5, v2 -; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v2, v5, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: v_or_b32_e32 v2, v4, v2 +; SI-NEXT: v_or_b32_e32 v2, s6, v2 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s60, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s58, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s56, 24 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v22 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v25 +; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s15, 8 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v34 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB61_4: -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr21 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr19 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr17 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB61_2 ; ; VI-LABEL: bitcast_v20f16_to_v40i8_scalar: @@ -31153,232 +31195,246 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB75_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s24 -; SI-NEXT: v_alignbit_b32 v2, s25, v1, 24 -; SI-NEXT: v_alignbit_b32 v11, s25, v1, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s22 -; SI-NEXT: v_alignbit_b32 v4, s23, v1, 24 -; SI-NEXT: v_alignbit_b32 v13, s23, v1, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s20 -; SI-NEXT: v_alignbit_b32 v6, s21, v1, 24 -; SI-NEXT: v_alignbit_b32 v15, s21, v1, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s18 -; SI-NEXT: v_alignbit_b32 v8, s19, v1, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v1, 16 -; SI-NEXT: v_alignbit_b32 v17, s19, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s16 -; SI-NEXT: v_alignbit_b32 v18, s17, v1, 24 -; SI-NEXT: v_alignbit_b32 v19, s17, v1, 16 -; SI-NEXT: v_alignbit_b32 v20, s17, v1, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB75_4 ; SI-NEXT: .LBB75_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[9:10], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[7:8], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[5:6], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[15:16], s[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[8:9], 8 ; SI-NEXT: v_add_f64 v[1:2], s[24:25], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[15:16], 24 ; SI-NEXT: v_add_f64 v[3:4], s[22:23], 1.0 -; SI-NEXT: v_readfirstlane_b32 s25, v2 +; SI-NEXT: v_add_f64 v[20:21], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 8 +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[3:4], 24 +; SI-NEXT: v_lshr_b64 v[26:27], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[5:6], v[1:2], 24 +; SI-NEXT: v_lshr_b64 v[12:13], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[8:9], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[20:21], 16 +; SI-NEXT: v_readfirstlane_b32 s17, v21 +; SI-NEXT: v_readfirstlane_b32 s19, v16 +; SI-NEXT: v_readfirstlane_b32 s21, v9 ; SI-NEXT: v_readfirstlane_b32 s23, v4 -; SI-NEXT: v_readfirstlane_b32 s21, v6 -; SI-NEXT: v_readfirstlane_b32 s19, v8 -; SI-NEXT: v_readfirstlane_b32 s17, v10 -; SI-NEXT: v_alignbit_b32 v2, s25, v1, 24 -; SI-NEXT: v_alignbit_b32 v11, s25, v1, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v1, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v3, 24 -; SI-NEXT: v_alignbit_b32 v13, s23, v3, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v3, 8 -; SI-NEXT: v_alignbit_b32 v6, s21, v5, 24 -; SI-NEXT: v_alignbit_b32 v15, s21, v5, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v5, 8 -; SI-NEXT: v_alignbit_b32 v8, s19, v7, 24 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v7, 16 -; SI-NEXT: v_alignbit_b32 v17, s19, v7, 8 -; SI-NEXT: v_alignbit_b32 v18, s17, v9, 24 -; SI-NEXT: v_alignbit_b32 v19, s17, v9, 16 -; SI-NEXT: v_alignbit_b32 v20, s17, v9, 8 +; SI-NEXT: v_readfirstlane_b32 s25, v2 +; SI-NEXT: v_lshr_b64 v[6:7], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 8 +; SI-NEXT: v_lshr_b64 v[18:19], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[20:21], 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 ; SI-NEXT: s_branch .LBB75_5 ; SI-NEXT: .LBB75_3: -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB75_2 ; SI-NEXT: .LBB75_4: -; SI-NEXT: v_mov_b32_e32 v1, s24 +; SI-NEXT: v_mov_b32_e32 v20, s16 +; SI-NEXT: v_mov_b32_e32 v15, s18 +; SI-NEXT: v_mov_b32_e32 v8, s20 ; SI-NEXT: v_mov_b32_e32 v3, s22 -; SI-NEXT: v_mov_b32_e32 v5, s20 -; SI-NEXT: v_mov_b32_e32 v7, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: v_mov_b32_e32 v1, s24 +; SI-NEXT: v_mov_b32_e32 v28, s60 +; SI-NEXT: v_mov_b32_e32 v27, s58 +; SI-NEXT: v_mov_b32_e32 v26, s56 +; SI-NEXT: v_mov_b32_e32 v25, s46 +; SI-NEXT: v_mov_b32_e32 v24, s44 +; SI-NEXT: v_mov_b32_e32 v23, s42 +; SI-NEXT: v_mov_b32_e32 v22, s40 +; SI-NEXT: v_mov_b32_e32 v18, s28 +; SI-NEXT: v_mov_b32_e32 v17, s26 +; SI-NEXT: v_mov_b32_e32 v13, s14 +; SI-NEXT: v_mov_b32_e32 v12, s12 +; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v10, s6 +; SI-NEXT: v_mov_b32_e32 v6, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: .LBB75_5: ; %end +; SI-NEXT: v_and_b32_e32 v2, 0xff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v28 ; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 +; SI-NEXT: s_lshl_b32 s5, s94, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v27 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s29, 0xff -; SI-NEXT: v_or_b32_e32 v9, v9, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 +; SI-NEXT: s_and_b32 s5, s93, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s28, 24 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: s_lshl_b32 s6, s92, 24 +; SI-NEXT: v_or_b32_e32 v4, v7, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v9, v9, v18 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v18, s4 -; SI-NEXT: buffer_store_dword v18, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v17 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v15 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v25 ; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s27, 8 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v10 +; SI-NEXT: s_lshl_b32 s5, s91, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v24 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: s_and_b32 s5, s90, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v23 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s15, 24 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: s_lshl_b32 s6, s89, 24 +; SI-NEXT: v_or_b32_e32 v4, v7, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 8, v0 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 8, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v16 +; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v22 ; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v15 +; SI-NEXT: s_lshl_b32 s5, s88, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: s_and_b32 s5, s79, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s12, 24 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 +; SI-NEXT: s_lshl_b32 s6, s78, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v4, v7, v4 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 16, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v14 +; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13 ; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s11, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v13 +; SI-NEXT: s_lshl_b32 s5, s77, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v12 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: s_and_b32 s5, s76, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v11 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: s_lshl_b32 s6, s75, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v12 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v10 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v11 +; SI-NEXT: s_lshl_b32 s5, s74, 8 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; SI-NEXT: s_and_b32 s5, s73, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_lshl_b32 s6, s72, 24 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -35291,241 +35347,239 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB79_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB79_3 ; SI-NEXT: .LBB79_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 ; SI-NEXT: s_add_u32 s24, s24, 3 ; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: .LBB79_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s28, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s27, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s26, 0xff -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s15, 24 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s60, 8 +; SI-NEXT: s_and_b32 s7, s16, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s56, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: s_and_b32 s5, s17, 0xff +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s93, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s92, 24 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_lshl_b32 s5, s46, 8 +; SI-NEXT: s_and_b32 s7, s18, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s44, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s42, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s12, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s11, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s19, 0xff +; SI-NEXT: s_lshl_b32 s7, s91, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s90, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s89, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s40, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s26, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s21, 0xff +; SI-NEXT: s_lshl_b32 s7, s88, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s79, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s78, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s14, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s12, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s23, 0xff +; SI-NEXT: s_lshl_b32 s7, s77, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s76, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s75, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s7, s8, 8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_lshl_b32 s5, s74, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s73, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s6, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s72, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB79_4: -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB79_2 ; ; VI-LABEL: bitcast_v5i64_to_v40i8_scalar: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll index 6fc9a35cd9ee6..f335b48ba4ae1 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll @@ -716,83 +716,77 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s27, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s25, 16 +; SI-NEXT: s_lshr_b32 s40, s23, 16 +; SI-NEXT: s_lshr_b32 s41, s21, 16 +; SI-NEXT: s_lshr_b32 s42, s19, 16 +; SI-NEXT: s_lshr_b32 s43, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_lshr_b32 s27, s25, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s40, s23, 16 +; SI-NEXT: s_lshr_b32 s41, s21, 16 +; SI-NEXT: s_lshr_b32 s42, s19, 16 +; SI-NEXT: s_lshr_b32 s43, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v3, s43 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s12 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v7, s42 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s8 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v11, s41 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s7 +; SI-NEXT: v_mov_b32_e32 v15, s40 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s6 +; SI-NEXT: v_mov_b32_e32 v19, s27 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v11i32_to_v22i16_scalar: @@ -2676,78 +2670,89 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a, ; SI-NEXT: s_cmp_lg_u32 s27, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 16 +; SI-NEXT: s_lshr_b32 s43, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s41, s21, 16 +; SI-NEXT: s_lshr_b32 s40, s19, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v20, s26, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v21, s4, v20, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v32, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v31, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v30, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v27, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v26, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v23, s24, 1.0 +; SI-NEXT: v_lshr_b64 v[17:18], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[31:32], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[20:21], 16 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v32 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v31, s16 +; SI-NEXT: v_mov_b32_e32 v32, s17 +; SI-NEXT: v_mov_b32_e32 v29, s18 +; SI-NEXT: v_mov_b32_e32 v30, s19 +; SI-NEXT: v_mov_b32_e32 v27, s20 +; SI-NEXT: v_mov_b32_e32 v28, s21 +; SI-NEXT: v_mov_b32_e32 v25, s22 +; SI-NEXT: v_mov_b32_e32 v26, s23 +; SI-NEXT: v_mov_b32_e32 v23, s24 +; SI-NEXT: v_mov_b32_e32 v24, s25 ; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v3, s27 +; SI-NEXT: v_mov_b32_e32 v7, s40 +; SI-NEXT: v_mov_b32_e32 v11, s41 +; SI-NEXT: v_mov_b32_e32 v15, s42 +; SI-NEXT: v_mov_b32_e32 v19, s43 +; SI-NEXT: v_mov_b32_e32 v21, s8 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v9, s10 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v31 +; SI-NEXT: v_mov_b32_e32 v2, v32 +; SI-NEXT: v_mov_b32_e32 v4, v29 +; SI-NEXT: v_mov_b32_e32 v6, v30 +; SI-NEXT: v_mov_b32_e32 v8, v27 +; SI-NEXT: v_mov_b32_e32 v10, v28 +; SI-NEXT: v_mov_b32_e32 v12, v25 +; SI-NEXT: v_mov_b32_e32 v14, v26 +; SI-NEXT: v_mov_b32_e32 v16, v23 +; SI-NEXT: v_mov_b32_e32 v18, v24 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v11f32_to_v22i16_scalar: @@ -5293,136 +5298,137 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i ; SI-LABEL: bitcast_v22f16_to_v22i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v12, v7 -; SI-NEXT: v_mov_b32_e32 v13, v6 -; SI-NEXT: v_mov_b32_e32 v19, v5 -; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 +; SI-NEXT: v_mov_b32_e32 v9, v7 +; SI-NEXT: v_mov_b32_e32 v10, v4 +; SI-NEXT: v_mov_b32_e32 v13, v3 ; SI-NEXT: v_mov_b32_e32 v14, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB23_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB23_3 ; SI-NEXT: .LBB23_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v24 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v25 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v26 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v18, v18, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_or_b32_e32 v14, v14, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v11 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_or_b32_e32 v10, v10, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v22 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v19 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshr_b64 v[26:27], v[13:14], 16 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v21 +; SI-NEXT: v_lshr_b64 v[27:28], v[17:18], 16 +; SI-NEXT: v_or_b32_e32 v20, v20, v22 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v23 +; SI-NEXT: v_mov_b32_e32 v5, v24 +; SI-NEXT: v_mov_b32_e32 v9, v25 +; SI-NEXT: v_mov_b32_e32 v13, v26 +; SI-NEXT: v_mov_b32_e32 v17, v27 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll index c9860dbb7d72c..2cde373ec130c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll @@ -1824,89 +1824,83 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s11 +; SI-NEXT: v_mov_b32_e32 v3, s45 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s12 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s10 +; SI-NEXT: v_mov_b32_e32 v7, s44 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s10 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s9 +; SI-NEXT: v_mov_b32_e32 v11, s43 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s8 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s8 +; SI-NEXT: v_mov_b32_e32 v15, s42 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s6 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s7 +; SI-NEXT: v_mov_b32_e32 v19, s41 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s4 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s6 +; SI-NEXT: v_mov_b32_e32 v23, s40 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v12i32_to_v24i16_scalar: @@ -5008,84 +5002,97 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a, ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB29_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s44, s25, 16 +; SI-NEXT: s_lshr_b32 s43, s23, 16 +; SI-NEXT: s_lshr_b32 s42, s21, 16 +; SI-NEXT: s_lshr_b32 s41, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_4 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v22, s27, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s26, 1.0 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v35, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v34, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v33, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v32, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v31, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v30, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v27, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v26, s24, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s26, 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 +; SI-NEXT: s_branch .LBB29_5 ; SI-NEXT: .LBB29_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: s_branch .LBB29_2 ; SI-NEXT: .LBB29_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v32, s18 +; SI-NEXT: v_mov_b32_e32 v33, s19 +; SI-NEXT: v_mov_b32_e32 v30, s20 +; SI-NEXT: v_mov_b32_e32 v31, s21 +; SI-NEXT: v_mov_b32_e32 v28, s22 +; SI-NEXT: v_mov_b32_e32 v29, s23 +; SI-NEXT: v_mov_b32_e32 v26, s24 +; SI-NEXT: v_mov_b32_e32 v27, s25 +; SI-NEXT: v_mov_b32_e32 v24, s26 +; SI-NEXT: v_mov_b32_e32 v25, s27 +; SI-NEXT: v_mov_b32_e32 v3, s40 +; SI-NEXT: v_mov_b32_e32 v7, s41 +; SI-NEXT: v_mov_b32_e32 v11, s42 +; SI-NEXT: v_mov_b32_e32 v15, s43 +; SI-NEXT: v_mov_b32_e32 v19, s44 +; SI-NEXT: v_mov_b32_e32 v23, s45 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v9, s10 +; SI-NEXT: v_mov_b32_e32 v13, s8 +; SI-NEXT: v_mov_b32_e32 v17, s6 +; SI-NEXT: v_mov_b32_e32 v21, s4 +; SI-NEXT: .LBB29_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v32 +; SI-NEXT: v_mov_b32_e32 v6, v33 +; SI-NEXT: v_mov_b32_e32 v8, v30 +; SI-NEXT: v_mov_b32_e32 v10, v31 +; SI-NEXT: v_mov_b32_e32 v12, v28 +; SI-NEXT: v_mov_b32_e32 v14, v29 +; SI-NEXT: v_mov_b32_e32 v16, v26 +; SI-NEXT: v_mov_b32_e32 v18, v27 +; SI-NEXT: v_mov_b32_e32 v20, v24 +; SI-NEXT: v_mov_b32_e32 v22, v25 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v12f32_to_v24i16_scalar: @@ -7630,91 +7637,91 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB41_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v24, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v25, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v27, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v28, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v29, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s44, s25, 16 +; SI-NEXT: s_lshr_b32 s43, s23, 16 +; SI-NEXT: s_lshr_b32 s42, s21, 16 +; SI-NEXT: s_lshr_b32 s41, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_4 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[4:5], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[12:13], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[20:21], s[26:27], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[24:25], 1.0 -; SI-NEXT: v_alignbit_b32 v24, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v25, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v26, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v27, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v28, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v29, v1, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_add_f64 v[24:25], s[26:27], 1.0 +; SI-NEXT: v_add_f64 v[26:27], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[28:29], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[30:31], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[32:33], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[34:35], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: s_branch .LBB41_5 ; SI-NEXT: .LBB41_3: -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: s_branch .LBB41_2 ; SI-NEXT: .LBB41_4: -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 +; SI-NEXT: v_mov_b32_e32 v25, s27 +; SI-NEXT: v_mov_b32_e32 v27, s25 +; SI-NEXT: v_mov_b32_e32 v29, s23 +; SI-NEXT: v_mov_b32_e32 v31, s21 +; SI-NEXT: v_mov_b32_e32 v33, s19 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v32, s18 +; SI-NEXT: v_mov_b32_e32 v30, s20 +; SI-NEXT: v_mov_b32_e32 v28, s22 +; SI-NEXT: v_mov_b32_e32 v26, s24 +; SI-NEXT: v_mov_b32_e32 v24, s26 +; SI-NEXT: v_mov_b32_e32 v23, s45 +; SI-NEXT: v_mov_b32_e32 v19, s44 +; SI-NEXT: v_mov_b32_e32 v15, s43 +; SI-NEXT: v_mov_b32_e32 v11, s42 +; SI-NEXT: v_mov_b32_e32 v7, s41 +; SI-NEXT: v_mov_b32_e32 v3, s40 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v9, s10 +; SI-NEXT: v_mov_b32_e32 v13, s8 +; SI-NEXT: v_mov_b32_e32 v17, s6 +; SI-NEXT: v_mov_b32_e32 v21, s4 ; SI-NEXT: .LBB41_5: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v22, v21 -; SI-NEXT: v_mov_b32_e32 v1, v29 -; SI-NEXT: v_mov_b32_e32 v5, v28 -; SI-NEXT: v_mov_b32_e32 v9, v27 -; SI-NEXT: v_mov_b32_e32 v13, v26 -; SI-NEXT: v_mov_b32_e32 v17, v25 -; SI-NEXT: v_mov_b32_e32 v21, v24 +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v32 +; SI-NEXT: v_mov_b32_e32 v6, v33 +; SI-NEXT: v_mov_b32_e32 v8, v30 +; SI-NEXT: v_mov_b32_e32 v10, v31 +; SI-NEXT: v_mov_b32_e32 v12, v28 +; SI-NEXT: v_mov_b32_e32 v14, v29 +; SI-NEXT: v_mov_b32_e32 v16, v26 +; SI-NEXT: v_mov_b32_e32 v18, v27 +; SI-NEXT: v_mov_b32_e32 v20, v24 +; SI-NEXT: v_mov_b32_e32 v22, v25 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v6f64_to_v24i16_scalar: @@ -9690,89 +9697,83 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 ; SI-NEXT: s_add_u32 s26, s26, 3 ; SI-NEXT: s_addc_u32 s27, s27, 0 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: .LBB49_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s11 +; SI-NEXT: v_mov_b32_e32 v3, s45 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s12 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s10 +; SI-NEXT: v_mov_b32_e32 v7, s44 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s10 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s9 +; SI-NEXT: v_mov_b32_e32 v11, s43 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s8 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s8 +; SI-NEXT: v_mov_b32_e32 v15, s42 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s6 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s7 +; SI-NEXT: v_mov_b32_e32 v19, s41 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s4 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s6 +; SI-NEXT: v_mov_b32_e32 v23, s40 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v6i64_to_v24i16_scalar: @@ -12464,149 +12465,150 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i ; SI-LABEL: bitcast_v24f16_to_v24i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v9 -; SI-NEXT: v_mov_b32_e32 v12, v8 -; SI-NEXT: v_mov_b32_e32 v13, v7 -; SI-NEXT: v_mov_b32_e32 v20, v6 -; SI-NEXT: v_mov_b32_e32 v19, v5 +; SI-NEXT: v_mov_b32_e32 v13, v8 +; SI-NEXT: v_mov_b32_e32 v17, v7 ; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 +; SI-NEXT: v_mov_b32_e32 v19, v3 ; SI-NEXT: v_mov_b32_e32 v14, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v27, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v28, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v22, v22, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v19 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v28 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v29 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v30 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v25 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_or_b32_e32 v18, v18, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_or_b32_e32 v14, v14, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v11 +; SI-NEXT: v_lshr_b64 v[27:28], v[1:2], 16 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_or_b32_e32 v10, v10, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v24 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v22, v22, v24 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[21:22], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 ; SI-NEXT: v_or_b32_e32 v20, v20, v21 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 ; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v27 +; SI-NEXT: v_mov_b32_e32 v5, v25 +; SI-NEXT: v_mov_b32_e32 v9, v28 +; SI-NEXT: v_mov_b32_e32 v13, v29 +; SI-NEXT: v_mov_b32_e32 v17, v30 +; SI-NEXT: v_mov_b32_e32 v21, v31 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: ; SI-NEXT: s_branch .LBB59_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll index eaf314d4b65dc..718851f97bade 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll @@ -2004,102 +2004,95 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s40 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v3, s58 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s14 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v7, s57 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s12 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v11, s56 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s10 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v15, s47 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s8 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s8 +; SI-NEXT: v_mov_b32_e32 v19, s46 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s6 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s7 +; SI-NEXT: v_mov_b32_e32 v23, s45 ; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_mov_b32_e32 v25, s4 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: v_mov_b32_e32 v27, s6 +; SI-NEXT: v_mov_b32_e32 v27, s44 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr47 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v14i32_to_v28i16_scalar: @@ -5543,96 +5536,111 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB29_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s58, s29, 16 +; SI-NEXT: s_lshr_b32 s57, s27, 16 +; SI-NEXT: s_lshr_b32 s56, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: s_lshr_b32 s45, s19, 16 +; SI-NEXT: s_lshr_b32 s44, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_4 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v22, s27, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s26, 1.0 -; SI-NEXT: v_add_f32_e64 v26, s29, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s28, 1.0 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v49, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v48, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v39, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v38, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v37, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v36, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v35, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v34, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v33, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v32, s24, 1.0 +; SI-NEXT: v_add_f32_e64 v31, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v30, s26, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s29, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s28, 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 16 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v49 +; SI-NEXT: s_branch .LBB29_5 ; SI-NEXT: .LBB29_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 ; SI-NEXT: s_branch .LBB29_2 ; SI-NEXT: .LBB29_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 -; SI-NEXT: v_mov_b32_e32 v27, s12 +; SI-NEXT: v_mov_b32_e32 v48, s16 +; SI-NEXT: v_mov_b32_e32 v49, s17 +; SI-NEXT: v_mov_b32_e32 v38, s18 +; SI-NEXT: v_mov_b32_e32 v39, s19 +; SI-NEXT: v_mov_b32_e32 v36, s20 +; SI-NEXT: v_mov_b32_e32 v37, s21 +; SI-NEXT: v_mov_b32_e32 v34, s22 +; SI-NEXT: v_mov_b32_e32 v35, s23 +; SI-NEXT: v_mov_b32_e32 v32, s24 +; SI-NEXT: v_mov_b32_e32 v33, s25 +; SI-NEXT: v_mov_b32_e32 v30, s26 +; SI-NEXT: v_mov_b32_e32 v31, s27 +; SI-NEXT: v_mov_b32_e32 v28, s28 +; SI-NEXT: v_mov_b32_e32 v29, s29 +; SI-NEXT: v_mov_b32_e32 v3, s44 +; SI-NEXT: v_mov_b32_e32 v7, s45 +; SI-NEXT: v_mov_b32_e32 v11, s46 +; SI-NEXT: v_mov_b32_e32 v15, s47 +; SI-NEXT: v_mov_b32_e32 v19, s56 +; SI-NEXT: v_mov_b32_e32 v23, s57 +; SI-NEXT: v_mov_b32_e32 v27, s58 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_mov_b32_e32 v21, s6 +; SI-NEXT: v_mov_b32_e32 v17, s8 +; SI-NEXT: v_mov_b32_e32 v13, s10 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v1, s40 +; SI-NEXT: .LBB29_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v49 +; SI-NEXT: v_mov_b32_e32 v4, v38 +; SI-NEXT: v_mov_b32_e32 v6, v39 +; SI-NEXT: v_mov_b32_e32 v8, v36 +; SI-NEXT: v_mov_b32_e32 v10, v37 +; SI-NEXT: v_mov_b32_e32 v12, v34 +; SI-NEXT: v_mov_b32_e32 v14, v35 +; SI-NEXT: v_mov_b32_e32 v16, v32 +; SI-NEXT: v_mov_b32_e32 v18, v33 +; SI-NEXT: v_mov_b32_e32 v20, v30 +; SI-NEXT: v_mov_b32_e32 v22, v31 +; SI-NEXT: v_mov_b32_e32 v24, v28 +; SI-NEXT: v_mov_b32_e32 v26, v29 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v14f32_to_v28i16_scalar: @@ -8475,102 +8483,95 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s40 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v3, s58 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s14 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v7, s57 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s12 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v11, s56 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s10 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v15, s47 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s8 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s8 +; SI-NEXT: v_mov_b32_e32 v19, s46 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s6 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s7 +; SI-NEXT: v_mov_b32_e32 v23, s45 ; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_mov_b32_e32 v25, s4 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: v_mov_b32_e32 v27, s6 +; SI-NEXT: v_mov_b32_e32 v27, s44 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr47 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v7i64_to_v28i16_scalar: @@ -10809,104 +10810,104 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v28, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v29, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v30, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v31, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v32, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v33, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v34, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s58, s29, 16 +; SI-NEXT: s_lshr_b32 s57, s27, 16 +; SI-NEXT: s_lshr_b32 s56, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: s_lshr_b32 s45, s19, 16 +; SI-NEXT: s_lshr_b32 s44, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[4:5], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[12:13], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[24:25], 1.0 -; SI-NEXT: v_add_f64 v[24:25], s[28:29], 1.0 -; SI-NEXT: v_add_f64 v[20:21], s[26:27], 1.0 -; SI-NEXT: v_alignbit_b32 v28, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v29, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v30, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v31, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v32, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v33, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v34, v1, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_add_f64 v[28:29], s[28:29], 1.0 +; SI-NEXT: v_add_f64 v[30:31], s[26:27], 1.0 +; SI-NEXT: v_add_f64 v[32:33], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[34:35], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[36:37], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[38:39], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[48:49], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 16 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v25, s29 -; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 -; SI-NEXT: v_mov_b32_e32 v27, s12 +; SI-NEXT: v_mov_b32_e32 v29, s29 +; SI-NEXT: v_mov_b32_e32 v31, s27 +; SI-NEXT: v_mov_b32_e32 v33, s25 +; SI-NEXT: v_mov_b32_e32 v35, s23 +; SI-NEXT: v_mov_b32_e32 v37, s21 +; SI-NEXT: v_mov_b32_e32 v39, s19 +; SI-NEXT: v_mov_b32_e32 v49, s17 +; SI-NEXT: v_mov_b32_e32 v48, s16 +; SI-NEXT: v_mov_b32_e32 v38, s18 +; SI-NEXT: v_mov_b32_e32 v36, s20 +; SI-NEXT: v_mov_b32_e32 v34, s22 +; SI-NEXT: v_mov_b32_e32 v32, s24 +; SI-NEXT: v_mov_b32_e32 v30, s26 +; SI-NEXT: v_mov_b32_e32 v28, s28 +; SI-NEXT: v_mov_b32_e32 v27, s58 +; SI-NEXT: v_mov_b32_e32 v23, s57 +; SI-NEXT: v_mov_b32_e32 v19, s56 +; SI-NEXT: v_mov_b32_e32 v15, s47 +; SI-NEXT: v_mov_b32_e32 v11, s46 +; SI-NEXT: v_mov_b32_e32 v7, s45 +; SI-NEXT: v_mov_b32_e32 v3, s44 +; SI-NEXT: v_mov_b32_e32 v1, s40 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v13, s10 +; SI-NEXT: v_mov_b32_e32 v17, s8 +; SI-NEXT: v_mov_b32_e32 v21, s6 +; SI-NEXT: v_mov_b32_e32 v25, s4 ; SI-NEXT: .LBB49_5: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v22, v21 -; SI-NEXT: v_mov_b32_e32 v26, v25 -; SI-NEXT: v_mov_b32_e32 v1, v34 -; SI-NEXT: v_mov_b32_e32 v5, v33 -; SI-NEXT: v_mov_b32_e32 v9, v32 -; SI-NEXT: v_mov_b32_e32 v13, v31 -; SI-NEXT: v_mov_b32_e32 v17, v30 -; SI-NEXT: v_mov_b32_e32 v21, v29 -; SI-NEXT: v_mov_b32_e32 v25, v28 +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v49 +; SI-NEXT: v_mov_b32_e32 v4, v38 +; SI-NEXT: v_mov_b32_e32 v6, v39 +; SI-NEXT: v_mov_b32_e32 v8, v36 +; SI-NEXT: v_mov_b32_e32 v10, v37 +; SI-NEXT: v_mov_b32_e32 v12, v34 +; SI-NEXT: v_mov_b32_e32 v14, v35 +; SI-NEXT: v_mov_b32_e32 v16, v32 +; SI-NEXT: v_mov_b32_e32 v18, v33 +; SI-NEXT: v_mov_b32_e32 v20, v30 +; SI-NEXT: v_mov_b32_e32 v22, v31 +; SI-NEXT: v_mov_b32_e32 v24, v28 +; SI-NEXT: v_mov_b32_e32 v26, v29 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v7f64_to_v28i16_scalar: @@ -13866,83 +13867,107 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-LABEL: bitcast_v28f16_to_v28i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v19, v5 +; SI-NEXT: v_mov_b32_e32 v17, v12 +; SI-NEXT: v_mov_b32_e32 v21, v11 +; SI-NEXT: v_mov_b32_e32 v22, v8 +; SI-NEXT: v_mov_b32_e32 v25, v7 ; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v20, v0 +; SI-NEXT: v_mov_b32_e32 v26, v3 +; SI-NEXT: v_mov_b32_e32 v28, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v32, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v35 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v29 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v36 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v32 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v30 +; SI-NEXT: v_or_b32_e32 v22, v22, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 @@ -13950,48 +13975,30 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_or_b32_e32 v26, v26, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v22, v22, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_or_b32_e32 v18, v18, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_or_b32_e32 v14, v14, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 @@ -13999,18 +14006,20 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_or_b32_e32 v10, v10, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v28 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshr_b64 v[34:35], v[1:2], 16 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_or_b32_e32 v26, v26, v28 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_lshr_b64 v[29:30], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[25:26], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 @@ -14018,14 +14027,14 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-NEXT: v_or_b32_e32 v16, v16, v17 ; SI-NEXT: v_or_b32_e32 v20, v20, v21 ; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 ; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v34 +; SI-NEXT: v_mov_b32_e32 v5, v32 +; SI-NEXT: v_mov_b32_e32 v9, v29 +; SI-NEXT: v_mov_b32_e32 v13, v35 +; SI-NEXT: v_mov_b32_e32 v17, v30 +; SI-NEXT: v_mov_b32_e32 v21, v36 +; SI-NEXT: v_mov_b32_e32 v25, v37 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: ; SI-NEXT: s_branch .LBB59_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll index fd190b23dd8ca..acc02472c7161 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll @@ -903,31 +903,32 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s18 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 +; SI-NEXT: v_lshr_b64 v[3:4], v[1:2], 16 +; SI-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; SI-NEXT: .LBB5_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: s_branch .LBB5_2 ; @@ -1391,26 +1392,27 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB9_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB9_3 ; SI-NEXT: .LBB9_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[1:2], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 ; SI-NEXT: .LBB9_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB9_4: ; SI-NEXT: s_branch .LBB9_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll index 352b2cb7123b1..d3fbba3cf4dd7 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll @@ -2143,96 +2143,113 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v16i32_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v3, v2 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v30, v1 -; SI-NEXT: v_mov_b32_e32 v28, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v0 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_cbranch_scc0 .LBB13_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_cbranch_execnz .LBB13_3 +; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 ; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_mov_b32_e32 v3, s63 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s42 ; SI-NEXT: v_mov_b32_e32 v6, s19 +; SI-NEXT: v_mov_b32_e32 v7, s62 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s40 ; SI-NEXT: v_mov_b32_e32 v10, s21 +; SI-NEXT: v_mov_b32_e32 v11, s61 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s14 ; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s60 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s12 ; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v19, s59 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s10 ; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: v_mov_b32_e32 v23, s58 ; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v25, s8 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: s_cbranch_scc0 .LBB13_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_cbranch_execnz .LBB13_3 -; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v10 -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 -; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14 -; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12 -; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18 -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 -; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 -; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26 -; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 -; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30 -; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: .LBB13_3: ; %end +; SI-NEXT: v_mov_b32_e32 v27, s57 +; SI-NEXT: v_mov_b32_e32 v28, s4 +; SI-NEXT: v_mov_b32_e32 v29, s6 +; SI-NEXT: v_mov_b32_e32 v30, s5 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v16i32_to_v32i16_scalar: @@ -9385,386 +9402,449 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32 ; SI-LABEL: bitcast_v16i32_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v4, s30, 0 +; SI-NEXT: v_writelane_b32 v4, s31, 1 +; SI-NEXT: v_writelane_b32 v4, s34, 2 +; SI-NEXT: v_writelane_b32 v4, s35, 3 +; SI-NEXT: v_writelane_b32 v4, s36, 4 +; SI-NEXT: v_writelane_b32 v4, s37, 5 +; SI-NEXT: v_writelane_b32 v4, s38, 6 +; SI-NEXT: v_writelane_b32 v4, s39, 7 +; SI-NEXT: v_writelane_b32 v4, s48, 8 +; SI-NEXT: v_writelane_b32 v4, s49, 9 +; SI-NEXT: v_writelane_b32 v4, s50, 10 +; SI-NEXT: v_writelane_b32 v4, s51, 11 +; SI-NEXT: v_writelane_b32 v4, s52, 12 +; SI-NEXT: v_writelane_b32 v4, s53, 13 +; SI-NEXT: v_writelane_b32 v4, s54, 14 +; SI-NEXT: v_writelane_b32 v4, s55, 15 +; SI-NEXT: v_writelane_b32 v4, s64, 16 +; SI-NEXT: v_writelane_b32 v4, s65, 17 +; SI-NEXT: v_writelane_b32 v4, s66, 18 +; SI-NEXT: v_writelane_b32 v4, s67, 19 +; SI-NEXT: v_writelane_b32 v4, s68, 20 +; SI-NEXT: v_writelane_b32 v4, s69, 21 +; SI-NEXT: v_writelane_b32 v4, s70, 22 +; SI-NEXT: v_writelane_b32 v4, s71, 23 +; SI-NEXT: v_writelane_b32 v4, s80, 24 +; SI-NEXT: v_writelane_b32 v4, s81, 25 +; SI-NEXT: v_writelane_b32 v4, s82, 26 +; SI-NEXT: v_writelane_b32 v4, s83, 27 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v2 +; SI-NEXT: v_writelane_b32 v4, s84, 28 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_writelane_b32 v4, s85, 29 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v9, 8 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v9, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v9, 8 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v9, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 16 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: .LBB25_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s63, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s62, 0xff -; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s61, 24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s60, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s59, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s58, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s57, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s56, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s47, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s46, 8 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s44, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s7, s36, 8 +; SI-NEXT: s_and_b32 s9, s16, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s34, 0xff +; SI-NEXT: s_lshl_b32 s11, s30, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_and_b32 s7, s17, 0xff +; SI-NEXT: s_lshl_b32 s9, s85, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s84, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s83, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_and_b32 s9, s18, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s92, 0xff +; SI-NEXT: s_lshl_b32 s11, s90, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: s_and_b32 s7, s19, 0xff +; SI-NEXT: s_lshl_b32 s9, s82, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s81, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s80, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s43, 8 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xff, v15 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s9, s78, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s76, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s74, 24 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s21, 0xff +; SI-NEXT: s_lshl_b32 s9, s71, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s70, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s69, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s41, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v9, v9, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s9, s88, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s72, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s62, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s23, 0xff +; SI-NEXT: s_lshl_b32 s9, s68, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s67, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s66, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s24, 0xff +; SI-NEXT: s_lshl_b32 s9, s60, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s56, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s25, 0xff +; SI-NEXT: s_lshl_b32 s9, s65, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s64, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s55, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: buffer_store_dword v11, v9, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v10 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s14, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s13, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s12, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s11, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s26, 0xff +; SI-NEXT: s_lshl_b32 s9, s46, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s44, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s42, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s27, 0xff +; SI-NEXT: s_lshl_b32 s9, s54, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s53, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s52, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s40, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s14, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s29, 0xff +; SI-NEXT: s_lshl_b32 s9, s51, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s50, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s49, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s7, s10, 8 +; SI-NEXT: s_or_b32 s4, s4, s7 +; SI-NEXT: s_and_b32 s7, s8, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s48, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s39, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s8, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s38, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s85, v4, 29 +; SI-NEXT: v_readlane_b32 s84, v4, 28 +; SI-NEXT: v_readlane_b32 s83, v4, 27 +; SI-NEXT: v_readlane_b32 s82, v4, 26 +; SI-NEXT: v_readlane_b32 s81, v4, 25 +; SI-NEXT: v_readlane_b32 s80, v4, 24 +; SI-NEXT: v_readlane_b32 s71, v4, 23 +; SI-NEXT: v_readlane_b32 s70, v4, 22 +; SI-NEXT: v_readlane_b32 s69, v4, 21 +; SI-NEXT: v_readlane_b32 s68, v4, 20 +; SI-NEXT: v_readlane_b32 s67, v4, 19 +; SI-NEXT: v_readlane_b32 s66, v4, 18 +; SI-NEXT: v_readlane_b32 s65, v4, 17 +; SI-NEXT: v_readlane_b32 s64, v4, 16 +; SI-NEXT: v_readlane_b32 s55, v4, 15 +; SI-NEXT: v_readlane_b32 s54, v4, 14 +; SI-NEXT: v_readlane_b32 s53, v4, 13 +; SI-NEXT: v_readlane_b32 s52, v4, 12 +; SI-NEXT: v_readlane_b32 s51, v4, 11 +; SI-NEXT: v_readlane_b32 s50, v4, 10 +; SI-NEXT: v_readlane_b32 s49, v4, 9 +; SI-NEXT: v_readlane_b32 s48, v4, 8 +; SI-NEXT: v_readlane_b32 s39, v4, 7 +; SI-NEXT: v_readlane_b32 s38, v4, 6 +; SI-NEXT: v_readlane_b32 s37, v4, 5 +; SI-NEXT: v_readlane_b32 s36, v4, 4 +; SI-NEXT: v_readlane_b32 s35, v4, 3 +; SI-NEXT: v_readlane_b32 s34, v4, 2 +; SI-NEXT: v_readlane_b32 s31, v4, 1 +; SI-NEXT: v_readlane_b32 s30, v4, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr85 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr15 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v16i32_to_v64i8_scalar: @@ -13390,42 +13470,46 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v48, v30 +; SI-NEXT: v_mov_b32_e32 v48, v28 +; SI-NEXT: v_mov_b32_e32 v38, v26 +; SI-NEXT: v_mov_b32_e32 v49, v24 +; SI-NEXT: v_mov_b32_e32 v51, v14 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v44, v6 ; SI-NEXT: v_mov_b32_e32 v33, v4 ; SI-NEXT: v_mov_b32_e32 v32, v2 ; SI-NEXT: v_mov_b32_e32 v31, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v40, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68 +; SI-NEXT: v_lshlrev_b32_e32 v50, 24, v1 +; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v17 ; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v21 ; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 @@ -13437,49 +13521,48 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2 ; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v6 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v10 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v12 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v14 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v28 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v26 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v24 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB27_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_and_b32_e32 v0, 0xff, v32 -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: v_or_b32_e32 v0, v0, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_or_b32_e32 v0, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v57 +; SI-NEXT: v_or_b32_e32 v0, v0, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v7, v1 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 @@ -13487,113 +13570,80 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v47, v1 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_mov_b32_e32 v29, v8 +; SI-NEXT: v_mov_b32_e32 v26, v8 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_or_b32_e32 v0, v0, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v9, v1 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v9 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_or_b32_e32 v0, v0, v45 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_mov_b32_e32 v44, v10 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v30 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v11, v1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v25, v11 ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v60 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 ; SI-NEXT: v_or_b32_e32 v0, v0, v21 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 -; SI-NEXT: v_mov_b32_e32 v36, v12 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v13, v1 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v55 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v0, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v53 +; SI-NEXT: v_or_b32_e32 v0, v0, v29 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 -; SI-NEXT: v_mov_b32_e32 v52, v14 +; SI-NEXT: v_or_b32_e32 v1, v14, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v43 +; SI-NEXT: v_or_b32_e32 v0, v0, v28 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: v_or_b32_e32 v1, v42, v1 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_or_b32_e32 v1, v15, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v44 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v62 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: v_or_b32_e32 v3, v61, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v40, v5 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: s_lshl_b32 s5, s17, 8 -; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_lshl_b32 s7, s23, 24 -; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -13602,6 +13652,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s22, 0xff ; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_or_b32 s5, s5, s6 @@ -13610,65 +13661,116 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s26, 0xff ; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s27, 24 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_cbranch_execnz .LBB27_3 ; SI-NEXT: .LBB27_2: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 +; SI-NEXT: v_or_b32_e32 v1, v39, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v2, v63, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v0, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v39, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v27 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v29 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v61, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v48 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v23, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -13710,100 +13812,58 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v18 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v56, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40 +; SI-NEXT: v_or_b32_e32 v0, v21, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v28, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v23, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v17, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v21, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v35 -; SI-NEXT: v_or_b32_e32 v0, v19, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v17, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v43 +; SI-NEXT: v_or_b32_e32 v0, v28, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -13826,47 +13886,12 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB27_4: -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_mov_b32_e32 v29, v8 -; SI-NEXT: v_mov_b32_e32 v44, v10 -; SI-NEXT: v_mov_b32_e32 v36, v12 -; SI-NEXT: v_mov_b32_e32 v52, v14 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_mov_b32_e32 v40, v5 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v56, v9 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v25, v11 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v26, v8 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; SI-NEXT: s_branch .LBB27_2 ; @@ -13889,142 +13914,121 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v37, v30 -; VI-NEXT: v_mov_b32_e32 v61, v28 -; VI-NEXT: v_mov_b32_e32 v31, v0 +; VI-NEXT: v_mov_b32_e32 v36, v28 +; VI-NEXT: v_mov_b32_e32 v35, v26 +; VI-NEXT: v_mov_b32_e32 v34, v24 +; VI-NEXT: v_mov_b32_e32 v39, v14 +; VI-NEXT: v_mov_b32_e32 v48, v12 +; VI-NEXT: v_mov_b32_e32 v49, v10 +; VI-NEXT: v_mov_b32_e32 v50, v8 +; VI-NEXT: v_mov_b32_e32 v51, v6 +; VI-NEXT: v_mov_b32_e32 v44, v2 +; VI-NEXT: v_mov_b32_e32 v45, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; VI-NEXT: v_mov_b32_e32 v37, v30 +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v29 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v33 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; VI-NEXT: s_waitcnt vmcnt(10) -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; VI-NEXT: s_waitcnt vmcnt(8) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; VI-NEXT: s_waitcnt vmcnt(11) +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB27_4 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v4 +; VI-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v26, v4 ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 +; VI-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 +; VI-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v39, v10 +; VI-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v25, v11 +; VI-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v12 +; VI-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 +; VI-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v41, v5 +; VI-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -14054,47 +14058,91 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_cbranch_execnz .LBB27_3 ; VI-NEXT: .LBB27_2: ; %cmp.true +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v26 +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v45 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v0, s4, v0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18 +; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v22 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v34 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v36 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 +; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 +; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 @@ -14134,76 +14182,35 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v20 -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v22 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v24 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v26 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v41 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: .LBB27_3: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -14224,43 +14231,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB27_4: -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v35, v4 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_mov_b32_e32 v39, v10 -; VI-NEXT: v_mov_b32_e32 v43, v12 -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_mov_b32_e32 v41, v5 -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v25, v11 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v26, v4 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB27_2 ; @@ -14283,147 +14256,124 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v37, v30 -; GFX9-NEXT: v_mov_b32_e32 v61, v28 -; GFX9-NEXT: v_mov_b32_e32 v31, v0 +; GFX9-NEXT: v_mov_b32_e32 v36, v28 +; GFX9-NEXT: v_mov_b32_e32 v35, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v39, v14 +; GFX9-NEXT: v_mov_b32_e32 v48, v12 +; GFX9-NEXT: v_mov_b32_e32 v49, v10 +; GFX9-NEXT: v_mov_b32_e32 v50, v8 +; GFX9-NEXT: v_mov_b32_e32 v51, v6 +; GFX9-NEXT: v_mov_b32_e32 v44, v2 +; GFX9-NEXT: v_mov_b32_e32 v45, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; GFX9-NEXT: v_mov_b32_e32 v37, v30 +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; GFX9-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(19) ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; GFX9-NEXT: s_waitcnt vmcnt(20) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28 -; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; GFX9-NEXT: s_waitcnt vmcnt(16) -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(12) -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; GFX9-NEXT: s_waitcnt vmcnt(10) -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(8) -; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(17) +; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v33 +; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec +; GFX9-NEXT: s_waitcnt vmcnt(15) +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(13) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB27_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v35, v4 +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v26, v4 ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v43, v12 +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v41, v5 +; GFX9-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -14453,48 +14403,78 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB27_3 ; GFX9-NEXT: .LBB27_2: ; %cmp.true +; GFX9-NEXT: v_add_u32_e32 v1, 3, v28 +; GFX9-NEXT: v_add_u32_e32 v2, 3, v26 +; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s5, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s29, 8 ; GFX9-NEXT: s_or_b32 s5, s6, s5 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v31 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v44 -; GFX9-NEXT: v_add_u32_e32 v2, 3, v35 -; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 ; GFX9-NEXT: s_addk_i32 s5, 0x300 -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_and_b32 s5, s5, 0xffff ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s5, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 ; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v16 +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v18 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v20 +; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v22 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v35 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v37 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v40 +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_and_b32 s5, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s17, 8 @@ -14518,6 +14498,20 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_or_b32 s7, s8, s7 ; GFX9-NEXT: s_addk_i32 s6, 0x300 ; GFX9-NEXT: s_addk_i32 s7, 0x300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 +; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_add_i32 s24, s24, 3 @@ -14534,76 +14528,35 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_and_b32 s7, s7, 0xffff ; GFX9-NEXT: s_lshl_b32 s8, s8, 16 ; GFX9-NEXT: s_or_b32 s7, s7, s8 -; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v16 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v20 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v24 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v30 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v29 -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 -; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 -; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 +; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: .LBB27_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -14624,43 +14577,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB27_4: -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v35, v4 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 -; GFX9-NEXT: v_mov_b32_e32 v43, v12 -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_mov_b32_e32 v41, v5 -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v26, v4 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB27_2 ; @@ -16911,78 +16830,93 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a, ; SI-LABEL: bitcast_v16f32_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v3, v2 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v30, v1 -; SI-NEXT: v_mov_b32_e32 v28, v0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_mov_b32_e32 v33, v1 +; SI-NEXT: v_mov_b32_e32 v32, v0 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v36, s18 +; SI-NEXT: v_mov_b32_e32 v37, s19 +; SI-NEXT: v_mov_b32_e32 v38, s20 +; SI-NEXT: v_mov_b32_e32 v39, s21 +; SI-NEXT: v_mov_b32_e32 v48, s22 +; SI-NEXT: v_mov_b32_e32 v49, s23 +; SI-NEXT: v_mov_b32_e32 v50, s24 +; SI-NEXT: v_mov_b32_e32 v51, s25 +; SI-NEXT: v_mov_b32_e32 v52, s26 +; SI-NEXT: v_mov_b32_e32 v53, s27 +; SI-NEXT: v_mov_b32_e32 v54, s28 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v26, s29 +; SI-NEXT: v_mov_b32_e32 v55, s29 ; SI-NEXT: s_cbranch_scc0 .LBB37_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_3 ; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v0, 1.0, v0 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_add_f32_e32 v35, 1.0, v35 +; SI-NEXT: v_add_f32_e32 v34, 1.0, v34 +; SI-NEXT: v_add_f32_e32 v37, 1.0, v37 +; SI-NEXT: v_add_f32_e32 v36, 1.0, v36 +; SI-NEXT: v_add_f32_e32 v39, 1.0, v39 +; SI-NEXT: v_add_f32_e32 v38, 1.0, v38 +; SI-NEXT: v_add_f32_e32 v49, 1.0, v49 +; SI-NEXT: v_add_f32_e32 v48, 1.0, v48 +; SI-NEXT: v_add_f32_e32 v51, 1.0, v51 +; SI-NEXT: v_add_f32_e32 v50, 1.0, v50 +; SI-NEXT: v_add_f32_e32 v53, 1.0, v53 +; SI-NEXT: v_add_f32_e32 v52, 1.0, v52 +; SI-NEXT: v_add_f32_e32 v55, 1.0, v55 +; SI-NEXT: v_add_f32_e32 v54, 1.0, v54 +; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 +; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: .LBB37_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v36 +; SI-NEXT: v_mov_b32_e32 v6, v37 +; SI-NEXT: v_mov_b32_e32 v8, v38 +; SI-NEXT: v_mov_b32_e32 v10, v39 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_mov_b32_e32 v14, v49 +; SI-NEXT: v_mov_b32_e32 v16, v50 +; SI-NEXT: v_mov_b32_e32 v18, v51 +; SI-NEXT: v_mov_b32_e32 v20, v52 +; SI-NEXT: v_mov_b32_e32 v22, v53 +; SI-NEXT: v_mov_b32_e32 v24, v54 +; SI-NEXT: v_mov_b32_e32 v26, v55 +; SI-NEXT: v_mov_b32_e32 v28, v32 +; SI-NEXT: v_mov_b32_e32 v30, v33 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB37_4: ; SI-NEXT: ; implicit-def: $vgpr1 @@ -16997,10 +16931,10 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a, ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: s_branch .LBB37_2 ; ; VI-LABEL: bitcast_v16f32_to_v32i16_scalar: @@ -24115,433 +24049,494 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3 ; SI-LABEL: bitcast_v16f32_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 +; SI-NEXT: v_writelane_b32 v40, s36, 4 +; SI-NEXT: v_writelane_b32 v40, s37, 5 +; SI-NEXT: v_writelane_b32 v40, s38, 6 +; SI-NEXT: v_writelane_b32 v40, s39, 7 +; SI-NEXT: v_writelane_b32 v40, s48, 8 +; SI-NEXT: v_writelane_b32 v40, s49, 9 +; SI-NEXT: v_writelane_b32 v40, s50, 10 +; SI-NEXT: v_writelane_b32 v40, s51, 11 +; SI-NEXT: v_writelane_b32 v40, s52, 12 +; SI-NEXT: v_writelane_b32 v40, s53, 13 +; SI-NEXT: v_writelane_b32 v40, s54, 14 +; SI-NEXT: v_writelane_b32 v40, s55, 15 +; SI-NEXT: v_writelane_b32 v40, s64, 16 +; SI-NEXT: v_writelane_b32 v40, s65, 17 +; SI-NEXT: v_writelane_b32 v40, s66, 18 +; SI-NEXT: v_writelane_b32 v40, s67, 19 +; SI-NEXT: v_writelane_b32 v40, s68, 20 +; SI-NEXT: v_writelane_b32 v40, s69, 21 +; SI-NEXT: v_writelane_b32 v40, s70, 22 +; SI-NEXT: v_writelane_b32 v40, s71, 23 +; SI-NEXT: v_writelane_b32 v40, s80, 24 +; SI-NEXT: v_writelane_b32 v40, s81, 25 +; SI-NEXT: v_writelane_b32 v40, s82, 26 +; SI-NEXT: v_writelane_b32 v40, s83, 27 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v28, s16 -; SI-NEXT: v_mov_b32_e32 v25, s17 -; SI-NEXT: v_mov_b32_e32 v20, s18 -; SI-NEXT: v_mov_b32_e32 v19, s19 +; SI-NEXT: v_writelane_b32 v40, s84, 28 +; SI-NEXT: v_readfirstlane_b32 s36, v1 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s37, v2 +; SI-NEXT: v_writelane_b32 v40, s85, 29 +; SI-NEXT: s_cbranch_scc0 .LBB49_3 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_lshr_b32 s82, s37, 24 +; SI-NEXT: s_lshr_b32 s84, s37, 16 +; SI-NEXT: s_lshr_b32 s85, s37, 8 +; SI-NEXT: s_lshr_b32 s71, s29, 24 +; SI-NEXT: s_lshr_b32 s81, s29, 16 +; SI-NEXT: s_lshr_b32 s83, s29, 8 +; SI-NEXT: s_lshr_b32 s68, s27, 24 +; SI-NEXT: s_lshr_b32 s70, s27, 16 +; SI-NEXT: s_lshr_b32 s80, s27, 8 +; SI-NEXT: s_lshr_b32 s65, s25, 24 +; SI-NEXT: s_lshr_b32 s67, s25, 16 +; SI-NEXT: s_lshr_b32 s69, s25, 8 +; SI-NEXT: s_lshr_b32 s54, s23, 24 +; SI-NEXT: s_lshr_b32 s64, s23, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 8 +; SI-NEXT: s_lshr_b32 s51, s21, 24 +; SI-NEXT: s_lshr_b32 s53, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s21, 8 +; SI-NEXT: s_lshr_b32 s48, s19, 24 +; SI-NEXT: s_lshr_b32 s50, s19, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 8 +; SI-NEXT: s_lshr_b32 s38, s17, 24 +; SI-NEXT: s_lshr_b32 s39, s17, 16 +; SI-NEXT: s_lshr_b32 s49, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[36:37], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[36:37], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[36:37], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 8 +; SI-NEXT: s_cbranch_execnz .LBB49_4 +; SI-NEXT: .LBB49_2: ; %cmp.true +; SI-NEXT: v_add_f32_e64 v20, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v11, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v9, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v8, s24, 1.0 +; SI-NEXT: v_add_f32_e64 v5, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v6, s26, 1.0 +; SI-NEXT: v_add_f32_e64 v3, s29, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s28, 1.0 +; SI-NEXT: v_add_f32_e64 v1, s37, 1.0 +; SI-NEXT: v_add_f32_e64 v2, s36, 1.0 +; SI-NEXT: v_readfirstlane_b32 s16, v22 +; SI-NEXT: v_readfirstlane_b32 s17, v20 +; SI-NEXT: v_readfirstlane_b32 s18, v18 +; SI-NEXT: v_readfirstlane_b32 s19, v16 +; SI-NEXT: v_readfirstlane_b32 s20, v15 +; SI-NEXT: v_readfirstlane_b32 s21, v11 +; SI-NEXT: v_readfirstlane_b32 s22, v10 +; SI-NEXT: v_readfirstlane_b32 s23, v9 +; SI-NEXT: v_readfirstlane_b32 s24, v8 +; SI-NEXT: v_readfirstlane_b32 s25, v7 +; SI-NEXT: v_readfirstlane_b32 s26, v6 +; SI-NEXT: v_readfirstlane_b32 s27, v5 +; SI-NEXT: v_readfirstlane_b32 s14, v4 +; SI-NEXT: v_readfirstlane_b32 s15, v3 +; SI-NEXT: v_readfirstlane_b32 s8, v2 +; SI-NEXT: v_readfirstlane_b32 s9, v1 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 8 +; SI-NEXT: v_lshrrev_b32_e32 v12, 24, v1 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v1 +; SI-NEXT: v_lshrrev_b32_e32 v17, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v3 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v5 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v7 +; SI-NEXT: v_lshrrev_b32_e32 v29, 24, v9 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v9 +; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v11 +; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v37, 8, v16 +; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v20 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v20 +; SI-NEXT: s_branch .LBB49_5 +; SI-NEXT: .LBB49_3: +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr85 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_branch .LBB49_2 +; SI-NEXT: .LBB49_4: +; SI-NEXT: v_mov_b32_e32 v22, s16 +; SI-NEXT: v_mov_b32_e32 v20, s17 +; SI-NEXT: v_mov_b32_e32 v18, s18 +; SI-NEXT: v_mov_b32_e32 v16, s19 ; SI-NEXT: v_mov_b32_e32 v15, s20 -; SI-NEXT: v_mov_b32_e32 v14, s21 -; SI-NEXT: v_mov_b32_e32 v11, s22 +; SI-NEXT: v_mov_b32_e32 v11, s21 +; SI-NEXT: v_mov_b32_e32 v10, s22 ; SI-NEXT: v_mov_b32_e32 v9, s23 ; SI-NEXT: v_mov_b32_e32 v8, s24 ; SI-NEXT: v_mov_b32_e32 v7, s25 ; SI-NEXT: v_mov_b32_e32 v6, s26 ; SI-NEXT: v_mov_b32_e32 v5, s27 ; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v3, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB49_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v10, v2, v1, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v13, v2, v1, 8 -; SI-NEXT: v_alignbit_b32 v16, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 8 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 24 -; SI-NEXT: v_alignbit_b32 v22, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v6, 8 -; SI-NEXT: v_alignbit_b32 v29, v7, v8, 24 -; SI-NEXT: v_alignbit_b32 v30, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v31, v7, v8, 8 -; SI-NEXT: v_alignbit_b32 v35, v9, v11, 24 -; SI-NEXT: v_alignbit_b32 v36, v9, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v9, v11, 8 -; SI-NEXT: v_alignbit_b32 v49, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v50, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v55, v19, v20, 24 -; SI-NEXT: v_alignbit_b32 v41, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v43, v19, v20, 8 -; SI-NEXT: v_alignbit_b32 v46, v25, v28, 24 -; SI-NEXT: v_alignbit_b32 v56, v25, v28, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v25, v28, 8 -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v2 -; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v3 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v3 -; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v3 -; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v5 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v5 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v7 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v7 -; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v9 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v14 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v14 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v19 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v19 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 8, v19 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v25 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v25 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25 -; SI-NEXT: s_cbranch_execnz .LBB49_3 -; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_alignbit_b32 v10, v2, v1, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v13, v2, v1, 8 -; SI-NEXT: v_alignbit_b32 v16, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 8 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 24 -; SI-NEXT: v_alignbit_b32 v22, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v6, 8 -; SI-NEXT: v_alignbit_b32 v29, v7, v8, 24 -; SI-NEXT: v_alignbit_b32 v30, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v31, v7, v8, 8 -; SI-NEXT: v_alignbit_b32 v35, v9, v11, 24 -; SI-NEXT: v_alignbit_b32 v36, v9, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v9, v11, 8 -; SI-NEXT: v_alignbit_b32 v49, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v50, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v55, v19, v20, 24 -; SI-NEXT: v_alignbit_b32 v41, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v43, v19, v20, 8 -; SI-NEXT: v_alignbit_b32 v46, v25, v28, 24 -; SI-NEXT: v_alignbit_b32 v56, v25, v28, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v25, v28, 8 -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v2 -; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v3 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v3 -; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v3 -; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v5 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v5 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v7 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v7 -; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v9 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v14 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v14 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v19 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v19 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 8, v19 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v25 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v25 +; SI-NEXT: v_mov_b32_e32 v2, s36 +; SI-NEXT: v_mov_b32_e32 v1, s37 +; SI-NEXT: v_mov_b32_e32 v48, s49 +; SI-NEXT: v_mov_b32_e32 v39, s39 +; SI-NEXT: v_mov_b32_e32 v38, s38 +; SI-NEXT: v_mov_b32_e32 v37, s52 +; SI-NEXT: v_mov_b32_e32 v36, s50 +; SI-NEXT: v_mov_b32_e32 v35, s48 +; SI-NEXT: v_mov_b32_e32 v34, s55 +; SI-NEXT: v_mov_b32_e32 v33, s53 +; SI-NEXT: v_mov_b32_e32 v32, s51 +; SI-NEXT: v_mov_b32_e32 v31, s66 +; SI-NEXT: v_mov_b32_e32 v30, s64 +; SI-NEXT: v_mov_b32_e32 v29, s54 +; SI-NEXT: v_mov_b32_e32 v28, s69 +; SI-NEXT: v_mov_b32_e32 v27, s67 +; SI-NEXT: v_mov_b32_e32 v26, s65 +; SI-NEXT: v_mov_b32_e32 v25, s80 +; SI-NEXT: v_mov_b32_e32 v24, s70 +; SI-NEXT: v_mov_b32_e32 v23, s68 +; SI-NEXT: v_mov_b32_e32 v21, s83 +; SI-NEXT: v_mov_b32_e32 v19, s81 +; SI-NEXT: v_mov_b32_e32 v17, s71 +; SI-NEXT: v_mov_b32_e32 v14, s85 +; SI-NEXT: v_mov_b32_e32 v13, s84 +; SI-NEXT: v_mov_b32_e32 v12, s82 +; SI-NEXT: .LBB49_5: ; %end +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: v_or_b32_e32 v22, s5, v22 +; SI-NEXT: s_and_b32 s5, s30, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s94, 24 +; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: v_or_b32_e32 v22, s5, v22 +; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25 -; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v58 -; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 -; SI-NEXT: v_or_b32_e32 v28, v28, v58 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 -; SI-NEXT: v_lshlrev_b32_e32 v46, 24, v46 -; SI-NEXT: v_or_b32_e32 v46, v46, v56 -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; SI-NEXT: v_or_b32_e32 v28, v28, v46 -; SI-NEXT: v_or_b32_e32 v10, v25, v10 -; SI-NEXT: v_and_b32_e32 v25, 0xff, v63 -; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v48 +; SI-NEXT: v_or_b32_e32 v20, v20, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 +; SI-NEXT: s_lshl_b32 s5, s92, 8 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v38, 24, v38 +; SI-NEXT: v_or_b32_e32 v18, s5, v18 +; SI-NEXT: s_and_b32 s5, s90, 0xff +; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_or_b32_e32 v22, v38, v22 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s88, 24 +; SI-NEXT: v_or_b32_e32 v20, v20, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v18, s5, v18 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v62 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v25, v28, v25 -; SI-NEXT: v_or_b32_e32 v10, v10, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v10, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v43 -; SI-NEXT: v_or_b32_e32 v10, v10, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v41 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v20, v25, v20 -; SI-NEXT: v_or_b32_e32 v10, v10, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v10, v20, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v18, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v61 -; SI-NEXT: v_or_b32_e32 v10, v10, v19 -; SI-NEXT: v_and_b32_e32 v19, 0xff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v59 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_or_b32_e32 v10, v10, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v10, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v52 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v37 +; SI-NEXT: v_or_b32_e32 v16, v16, v18 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 +; SI-NEXT: s_lshl_b32 s5, s74, 8 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v35 +; SI-NEXT: v_or_b32_e32 v15, s5, v15 +; SI-NEXT: s_and_b32 s5, s62, 0xff +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_or_b32_e32 v18, v20, v18 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s60, 24 +; SI-NEXT: v_or_b32_e32 v16, v16, v18 +; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v15, s5, v15 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v34 +; SI-NEXT: v_or_b32_e32 v11, v11, v15 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v33 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: s_lshl_b32 s5, s78, 8 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v49 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v15, v19, v15 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v57 -; SI-NEXT: v_or_b32_e32 v10, v10, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v45 +; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v32 +; SI-NEXT: v_or_b32_e32 v10, s5, v10 +; SI-NEXT: s_and_b32 s5, s76, 0xff +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: v_or_b32_e32 v15, v16, v15 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s72, 24 +; SI-NEXT: v_or_b32_e32 v11, v11, v15 +; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: v_or_b32_e32 v10, v10, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v10, v14, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v11, v15, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v10, s5, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v37 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v35 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v44 +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v31 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v42 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v30 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: s_lshl_b32 s5, s58, 8 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v40 +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v29 +; SI-NEXT: v_or_b32_e32 v8, s5, v8 +; SI-NEXT: s_and_b32 s5, s56, 0xff ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s46, 24 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: s_or_b32 s5, s7, s5 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: v_or_b32_e32 v8, s5, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v31 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v29 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v54 +; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v28 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v53 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v27 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: s_lshl_b32 s5, s44, 8 ; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v51 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v26 +; SI-NEXT: v_or_b32_e32 v6, s5, v6 +; SI-NEXT: s_and_b32 s5, s42, 0xff ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s40, 24 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: s_or_b32 s5, s7, s5 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v6, s5, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v23 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v21 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 ; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v48 +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v25 ; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s5, s14, 8 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v38 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v23 +; SI-NEXT: v_or_b32_e32 v4, s5, v4 +; SI-NEXT: s_and_b32 s5, s12, 0xff ; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s10, 24 ; SI-NEXT: v_or_b32_e32 v5, v5, v6 ; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 +; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: s_or_b32 s5, s7, s5 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v4, s5, v4 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v18 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v16 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v34 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v21 ; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v33 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v19 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_lshl_b32 s5, s8, 8 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v32 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 +; SI-NEXT: v_or_b32_e32 v2, s5, v2 +; SI-NEXT: s_and_b32 s5, s6, 0xff ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 ; SI-NEXT: v_or_b32_e32 v3, v3, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v2, s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v14 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v26 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v13 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v12 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s85, v40, 29 +; SI-NEXT: v_readlane_b32 s84, v40, 28 +; SI-NEXT: v_readlane_b32 s83, v40, 27 +; SI-NEXT: v_readlane_b32 s82, v40, 26 +; SI-NEXT: v_readlane_b32 s81, v40, 25 +; SI-NEXT: v_readlane_b32 s80, v40, 24 +; SI-NEXT: v_readlane_b32 s71, v40, 23 +; SI-NEXT: v_readlane_b32 s70, v40, 22 +; SI-NEXT: v_readlane_b32 s69, v40, 21 +; SI-NEXT: v_readlane_b32 s68, v40, 20 +; SI-NEXT: v_readlane_b32 s67, v40, 19 +; SI-NEXT: v_readlane_b32 s66, v40, 18 +; SI-NEXT: v_readlane_b32 s65, v40, 17 +; SI-NEXT: v_readlane_b32 s64, v40, 16 +; SI-NEXT: v_readlane_b32 s55, v40, 15 +; SI-NEXT: v_readlane_b32 s54, v40, 14 +; SI-NEXT: v_readlane_b32 s53, v40, 13 +; SI-NEXT: v_readlane_b32 s52, v40, 12 +; SI-NEXT: v_readlane_b32 s51, v40, 11 +; SI-NEXT: v_readlane_b32 s50, v40, 10 +; SI-NEXT: v_readlane_b32 s49, v40, 9 +; SI-NEXT: v_readlane_b32 s48, v40, 8 +; SI-NEXT: v_readlane_b32 s39, v40, 7 +; SI-NEXT: v_readlane_b32 s38, v40, 6 +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; kill: killed $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v16f32_to_v64i8_scalar: ; VI: ; %bb.0: @@ -28259,42 +28254,46 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v48, v30 +; SI-NEXT: v_mov_b32_e32 v48, v28 +; SI-NEXT: v_mov_b32_e32 v38, v26 +; SI-NEXT: v_mov_b32_e32 v49, v24 +; SI-NEXT: v_mov_b32_e32 v51, v14 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v44, v6 ; SI-NEXT: v_mov_b32_e32 v33, v4 ; SI-NEXT: v_mov_b32_e32 v32, v2 ; SI-NEXT: v_mov_b32_e32 v31, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v40, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68 +; SI-NEXT: v_lshlrev_b32_e32 v50, 24, v1 +; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v17 ; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v21 ; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 @@ -28306,49 +28305,48 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2 ; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v6 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v10 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v12 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v14 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v28 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v26 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v24 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_and_b32_e32 v0, 0xff, v32 -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: v_or_b32_e32 v0, v0, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_or_b32_e32 v0, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v57 +; SI-NEXT: v_or_b32_e32 v0, v0, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v7, v1 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 @@ -28356,113 +28354,80 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v47, v1 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_mov_b32_e32 v29, v8 +; SI-NEXT: v_mov_b32_e32 v26, v8 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_or_b32_e32 v0, v0, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v9, v1 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v9 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_or_b32_e32 v0, v0, v45 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_mov_b32_e32 v44, v10 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v30 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v11, v1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v25, v11 ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v60 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 ; SI-NEXT: v_or_b32_e32 v0, v0, v21 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 -; SI-NEXT: v_mov_b32_e32 v36, v12 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v13, v1 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v55 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v0, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v53 +; SI-NEXT: v_or_b32_e32 v0, v0, v29 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 -; SI-NEXT: v_mov_b32_e32 v52, v14 +; SI-NEXT: v_or_b32_e32 v1, v14, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v43 +; SI-NEXT: v_or_b32_e32 v0, v0, v28 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: v_or_b32_e32 v1, v42, v1 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_or_b32_e32 v1, v15, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v44 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v62 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: v_or_b32_e32 v3, v61, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v40, v5 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: s_lshl_b32 s5, s17, 8 -; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_lshl_b32 s7, s23, 24 -; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -28471,6 +28436,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s22, 0xff ; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_or_b32 s5, s5, s6 @@ -28479,65 +28445,116 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s26, 0xff ; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s27, 24 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 +; SI-NEXT: v_or_b32_e32 v1, v39, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v2, v63, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v0, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v39, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v27 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v29 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v61, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v48 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v23, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -28579,100 +28596,58 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v18 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v56, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40 +; SI-NEXT: v_or_b32_e32 v0, v21, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v28, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v23, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v17, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v21, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v35 -; SI-NEXT: v_or_b32_e32 v0, v19, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v17, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v43 +; SI-NEXT: v_or_b32_e32 v0, v28, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -28695,47 +28670,12 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_mov_b32_e32 v29, v8 -; SI-NEXT: v_mov_b32_e32 v44, v10 -; SI-NEXT: v_mov_b32_e32 v36, v12 -; SI-NEXT: v_mov_b32_e32 v52, v14 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_mov_b32_e32 v40, v5 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v56, v9 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v25, v11 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v26, v8 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; SI-NEXT: s_branch .LBB51_2 ; @@ -28758,142 +28698,121 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v37, v30 -; VI-NEXT: v_mov_b32_e32 v61, v28 -; VI-NEXT: v_mov_b32_e32 v31, v0 +; VI-NEXT: v_mov_b32_e32 v36, v28 +; VI-NEXT: v_mov_b32_e32 v35, v26 +; VI-NEXT: v_mov_b32_e32 v34, v24 +; VI-NEXT: v_mov_b32_e32 v39, v14 +; VI-NEXT: v_mov_b32_e32 v48, v12 +; VI-NEXT: v_mov_b32_e32 v49, v10 +; VI-NEXT: v_mov_b32_e32 v50, v8 +; VI-NEXT: v_mov_b32_e32 v51, v6 +; VI-NEXT: v_mov_b32_e32 v44, v2 +; VI-NEXT: v_mov_b32_e32 v45, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; VI-NEXT: v_mov_b32_e32 v37, v30 +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v29 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v33 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; VI-NEXT: s_waitcnt vmcnt(10) -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; VI-NEXT: s_waitcnt vmcnt(8) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; VI-NEXT: s_waitcnt vmcnt(11) +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB51_4 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v4 +; VI-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v26, v4 ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 +; VI-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 +; VI-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v39, v10 +; VI-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v25, v11 +; VI-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v12 +; VI-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 +; VI-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v41, v5 +; VI-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -28923,47 +28842,91 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_cbranch_execnz .LBB51_3 ; VI-NEXT: .LBB51_2: ; %cmp.true +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v26 +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v45 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v0, s4, v0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18 +; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v22 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v34 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v36 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 +; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 +; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 @@ -29003,76 +28966,35 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v20 -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v22 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v24 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v26 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v41 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: .LBB51_3: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -29093,43 +29015,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB51_4: -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v35, v4 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_mov_b32_e32 v39, v10 -; VI-NEXT: v_mov_b32_e32 v43, v12 -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_mov_b32_e32 v41, v5 -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v25, v11 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v26, v4 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB51_2 ; @@ -29152,147 +29040,124 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v37, v30 -; GFX9-NEXT: v_mov_b32_e32 v61, v28 -; GFX9-NEXT: v_mov_b32_e32 v31, v0 +; GFX9-NEXT: v_mov_b32_e32 v36, v28 +; GFX9-NEXT: v_mov_b32_e32 v35, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v39, v14 +; GFX9-NEXT: v_mov_b32_e32 v48, v12 +; GFX9-NEXT: v_mov_b32_e32 v49, v10 +; GFX9-NEXT: v_mov_b32_e32 v50, v8 +; GFX9-NEXT: v_mov_b32_e32 v51, v6 +; GFX9-NEXT: v_mov_b32_e32 v44, v2 +; GFX9-NEXT: v_mov_b32_e32 v45, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; GFX9-NEXT: v_mov_b32_e32 v37, v30 +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; GFX9-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(19) ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; GFX9-NEXT: s_waitcnt vmcnt(20) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28 -; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; GFX9-NEXT: s_waitcnt vmcnt(16) -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(12) -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; GFX9-NEXT: s_waitcnt vmcnt(10) -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(8) -; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(17) +; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v33 +; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec +; GFX9-NEXT: s_waitcnt vmcnt(15) +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(13) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB51_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v35, v4 +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v26, v4 ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v43, v12 +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v41, v5 +; GFX9-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -29322,48 +29187,78 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB51_3 ; GFX9-NEXT: .LBB51_2: ; %cmp.true +; GFX9-NEXT: v_add_u32_e32 v1, 3, v28 +; GFX9-NEXT: v_add_u32_e32 v2, 3, v26 +; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s5, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s29, 8 ; GFX9-NEXT: s_or_b32 s5, s6, s5 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v31 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v44 -; GFX9-NEXT: v_add_u32_e32 v2, 3, v35 -; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 ; GFX9-NEXT: s_addk_i32 s5, 0x300 -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_and_b32 s5, s5, 0xffff ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s5, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 ; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v16 +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v18 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v20 +; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v22 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v35 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v37 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v40 +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_and_b32 s5, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s17, 8 @@ -29387,6 +29282,20 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; GFX9-NEXT: s_or_b32 s7, s8, s7 ; GFX9-NEXT: s_addk_i32 s6, 0x300 ; GFX9-NEXT: s_addk_i32 s7, 0x300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 +; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_add_i32 s24, s24, 3 @@ -29403,76 +29312,35 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; GFX9-NEXT: s_and_b32 s7, s7, 0xffff ; GFX9-NEXT: s_lshl_b32 s8, s8, 16 ; GFX9-NEXT: s_or_b32 s7, s7, s8 -; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v16 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v20 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v24 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v30 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v29 -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 -; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 -; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 +; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: .LBB51_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -29493,43 +29361,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB51_4: -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v35, v4 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 -; GFX9-NEXT: v_mov_b32_e32 v43, v12 -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_mov_b32_e32 v41, v5 -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v26, v4 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB51_2 ; @@ -31135,96 +30969,113 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32 ; SI-LABEL: bitcast_v8i64_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v3, v2 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v30, v1 -; SI-NEXT: v_mov_b32_e32 v28, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v0 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_cbranch_scc0 .LBB57_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_cbranch_execnz .LBB57_3 +; SI-NEXT: .LBB57_2: ; %cmp.true +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s28, s28, 3 +; SI-NEXT: s_addc_u32 s29, s29, 0 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 ; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_mov_b32_e32 v3, s63 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s42 ; SI-NEXT: v_mov_b32_e32 v6, s19 +; SI-NEXT: v_mov_b32_e32 v7, s62 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s40 ; SI-NEXT: v_mov_b32_e32 v10, s21 +; SI-NEXT: v_mov_b32_e32 v11, s61 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s14 ; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s60 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s12 ; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v19, s59 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s10 ; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: v_mov_b32_e32 v23, s58 ; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v25, s8 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: s_cbranch_scc0 .LBB57_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_cbranch_execnz .LBB57_3 -; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 -; SI-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 -; SI-NEXT: v_addc_u32_e32 v10, vcc, 0, v10, vcc -; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12 -; SI-NEXT: v_addc_u32_e32 v14, vcc, 0, v14, vcc -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_addc_u32_e32 v18, vcc, 0, v18, vcc -; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 -; SI-NEXT: v_addc_u32_e32 v22, vcc, 0, v22, vcc -; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 -; SI-NEXT: v_addc_u32_e32 v26, vcc, 0, v26, vcc -; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 -; SI-NEXT: v_addc_u32_e32 v30, vcc, 0, v30, vcc -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: .LBB57_3: ; %end +; SI-NEXT: v_mov_b32_e32 v27, s57 +; SI-NEXT: v_mov_b32_e32 v28, s4 +; SI-NEXT: v_mov_b32_e32 v29, s6 +; SI-NEXT: v_mov_b32_e32 v30, s5 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v8i64_to_v32i16_scalar: @@ -38395,386 +38246,449 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in ; SI-LABEL: bitcast_v8i64_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v4, s30, 0 +; SI-NEXT: v_writelane_b32 v4, s31, 1 +; SI-NEXT: v_writelane_b32 v4, s34, 2 +; SI-NEXT: v_writelane_b32 v4, s35, 3 +; SI-NEXT: v_writelane_b32 v4, s36, 4 +; SI-NEXT: v_writelane_b32 v4, s37, 5 +; SI-NEXT: v_writelane_b32 v4, s38, 6 +; SI-NEXT: v_writelane_b32 v4, s39, 7 +; SI-NEXT: v_writelane_b32 v4, s48, 8 +; SI-NEXT: v_writelane_b32 v4, s49, 9 +; SI-NEXT: v_writelane_b32 v4, s50, 10 +; SI-NEXT: v_writelane_b32 v4, s51, 11 +; SI-NEXT: v_writelane_b32 v4, s52, 12 +; SI-NEXT: v_writelane_b32 v4, s53, 13 +; SI-NEXT: v_writelane_b32 v4, s54, 14 +; SI-NEXT: v_writelane_b32 v4, s55, 15 +; SI-NEXT: v_writelane_b32 v4, s64, 16 +; SI-NEXT: v_writelane_b32 v4, s65, 17 +; SI-NEXT: v_writelane_b32 v4, s66, 18 +; SI-NEXT: v_writelane_b32 v4, s67, 19 +; SI-NEXT: v_writelane_b32 v4, s68, 20 +; SI-NEXT: v_writelane_b32 v4, s69, 21 +; SI-NEXT: v_writelane_b32 v4, s70, 22 +; SI-NEXT: v_writelane_b32 v4, s71, 23 +; SI-NEXT: v_writelane_b32 v4, s80, 24 +; SI-NEXT: v_writelane_b32 v4, s81, 25 +; SI-NEXT: v_writelane_b32 v4, s82, 26 +; SI-NEXT: v_writelane_b32 v4, s83, 27 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v2 +; SI-NEXT: v_writelane_b32 v4, s84, 28 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_writelane_b32 v4, s85, 29 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v10, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v10, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: .LBB69_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s63, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s62, 0xff -; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s61, 24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s60, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s59, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s58, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s57, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s56, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s47, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s46, 8 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s44, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s7, s36, 8 +; SI-NEXT: s_and_b32 s9, s16, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s34, 0xff +; SI-NEXT: s_lshl_b32 s11, s30, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_and_b32 s7, s17, 0xff +; SI-NEXT: s_lshl_b32 s9, s85, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s84, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s83, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_and_b32 s9, s18, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s92, 0xff +; SI-NEXT: s_lshl_b32 s11, s90, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: s_and_b32 s7, s19, 0xff +; SI-NEXT: s_lshl_b32 s9, s82, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s81, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s80, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s43, 8 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xff, v15 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s9, s76, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s72, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s62, 24 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s21, 0xff +; SI-NEXT: s_lshl_b32 s9, s71, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s70, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s69, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s41, 24 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s14, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s9, s88, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s78, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s74, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s23, 0xff +; SI-NEXT: s_lshl_b32 s9, s68, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s67, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s66, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s24, 0xff +; SI-NEXT: s_lshl_b32 s9, s60, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s56, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s25, 0xff +; SI-NEXT: s_lshl_b32 s9, s65, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s64, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s55, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s13, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s12, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s11, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s26, 0xff +; SI-NEXT: s_lshl_b32 s9, s46, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s44, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s42, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s27, 0xff +; SI-NEXT: s_lshl_b32 s9, s54, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s53, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s52, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s40, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s14, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s29, 0xff +; SI-NEXT: s_lshl_b32 s9, s51, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s50, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s49, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s7, s10, 8 +; SI-NEXT: s_or_b32 s4, s4, s7 +; SI-NEXT: s_and_b32 s7, s8, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s48, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s39, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s8, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s38, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s85, v4, 29 +; SI-NEXT: v_readlane_b32 s84, v4, 28 +; SI-NEXT: v_readlane_b32 s83, v4, 27 +; SI-NEXT: v_readlane_b32 s82, v4, 26 +; SI-NEXT: v_readlane_b32 s81, v4, 25 +; SI-NEXT: v_readlane_b32 s80, v4, 24 +; SI-NEXT: v_readlane_b32 s71, v4, 23 +; SI-NEXT: v_readlane_b32 s70, v4, 22 +; SI-NEXT: v_readlane_b32 s69, v4, 21 +; SI-NEXT: v_readlane_b32 s68, v4, 20 +; SI-NEXT: v_readlane_b32 s67, v4, 19 +; SI-NEXT: v_readlane_b32 s66, v4, 18 +; SI-NEXT: v_readlane_b32 s65, v4, 17 +; SI-NEXT: v_readlane_b32 s64, v4, 16 +; SI-NEXT: v_readlane_b32 s55, v4, 15 +; SI-NEXT: v_readlane_b32 s54, v4, 14 +; SI-NEXT: v_readlane_b32 s53, v4, 13 +; SI-NEXT: v_readlane_b32 s52, v4, 12 +; SI-NEXT: v_readlane_b32 s51, v4, 11 +; SI-NEXT: v_readlane_b32 s50, v4, 10 +; SI-NEXT: v_readlane_b32 s49, v4, 9 +; SI-NEXT: v_readlane_b32 s48, v4, 8 +; SI-NEXT: v_readlane_b32 s39, v4, 7 +; SI-NEXT: v_readlane_b32 s38, v4, 6 +; SI-NEXT: v_readlane_b32 s37, v4, 5 +; SI-NEXT: v_readlane_b32 s36, v4, 4 +; SI-NEXT: v_readlane_b32 s35, v4, 3 +; SI-NEXT: v_readlane_b32 s34, v4, 2 +; SI-NEXT: v_readlane_b32 s31, v4, 1 +; SI-NEXT: v_readlane_b32 s30, v4, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr85 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr15 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v8i64_to_v64i8_scalar: @@ -42400,42 +42314,46 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v48, v30 +; SI-NEXT: v_mov_b32_e32 v48, v28 +; SI-NEXT: v_mov_b32_e32 v38, v26 +; SI-NEXT: v_mov_b32_e32 v49, v24 +; SI-NEXT: v_mov_b32_e32 v51, v14 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v44, v6 ; SI-NEXT: v_mov_b32_e32 v33, v4 ; SI-NEXT: v_mov_b32_e32 v32, v2 ; SI-NEXT: v_mov_b32_e32 v31, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v40, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68 +; SI-NEXT: v_lshlrev_b32_e32 v50, 24, v1 +; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v17 ; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v21 ; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 @@ -42447,49 +42365,48 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2 ; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v6 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v10 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v12 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v14 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v28 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v26 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v24 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB71_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_and_b32_e32 v0, 0xff, v32 -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: v_or_b32_e32 v0, v0, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_or_b32_e32 v0, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v57 +; SI-NEXT: v_or_b32_e32 v0, v0, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v7, v1 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 @@ -42497,113 +42414,80 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v47, v1 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_mov_b32_e32 v29, v8 +; SI-NEXT: v_mov_b32_e32 v26, v8 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_or_b32_e32 v0, v0, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v9, v1 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v9 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_or_b32_e32 v0, v0, v45 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_mov_b32_e32 v44, v10 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v30 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v11, v1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v25, v11 ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v60 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 ; SI-NEXT: v_or_b32_e32 v0, v0, v21 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 -; SI-NEXT: v_mov_b32_e32 v36, v12 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v13, v1 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v55 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v0, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v53 +; SI-NEXT: v_or_b32_e32 v0, v0, v29 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 -; SI-NEXT: v_mov_b32_e32 v52, v14 +; SI-NEXT: v_or_b32_e32 v1, v14, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v43 +; SI-NEXT: v_or_b32_e32 v0, v0, v28 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: v_or_b32_e32 v1, v42, v1 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_or_b32_e32 v1, v15, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v44 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v62 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: v_or_b32_e32 v3, v61, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v40, v5 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: s_lshl_b32 s5, s17, 8 -; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_lshl_b32 s7, s23, 24 -; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -42612,6 +42496,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s22, 0xff ; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_or_b32 s5, s5, s6 @@ -42620,65 +42505,116 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s26, 0xff ; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s27, 24 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_cbranch_execnz .LBB71_3 ; SI-NEXT: .LBB71_2: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 +; SI-NEXT: v_or_b32_e32 v1, v39, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v2, v63, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v0, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v39, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v27 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v29 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v61, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v48 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v23, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -42720,100 +42656,58 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v18 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v56, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40 +; SI-NEXT: v_or_b32_e32 v0, v21, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v28, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v23, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v17, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v21, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v35 -; SI-NEXT: v_or_b32_e32 v0, v19, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v17, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v43 +; SI-NEXT: v_or_b32_e32 v0, v28, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -42836,47 +42730,12 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB71_4: -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_mov_b32_e32 v29, v8 -; SI-NEXT: v_mov_b32_e32 v44, v10 -; SI-NEXT: v_mov_b32_e32 v36, v12 -; SI-NEXT: v_mov_b32_e32 v52, v14 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_mov_b32_e32 v40, v5 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v56, v9 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v25, v11 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v26, v8 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; SI-NEXT: s_branch .LBB71_2 ; @@ -42899,142 +42758,121 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v37, v30 -; VI-NEXT: v_mov_b32_e32 v61, v28 -; VI-NEXT: v_mov_b32_e32 v31, v0 +; VI-NEXT: v_mov_b32_e32 v36, v28 +; VI-NEXT: v_mov_b32_e32 v35, v26 +; VI-NEXT: v_mov_b32_e32 v34, v24 +; VI-NEXT: v_mov_b32_e32 v39, v14 +; VI-NEXT: v_mov_b32_e32 v48, v12 +; VI-NEXT: v_mov_b32_e32 v49, v10 +; VI-NEXT: v_mov_b32_e32 v50, v8 +; VI-NEXT: v_mov_b32_e32 v51, v6 +; VI-NEXT: v_mov_b32_e32 v44, v2 +; VI-NEXT: v_mov_b32_e32 v45, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; VI-NEXT: v_mov_b32_e32 v37, v30 +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v29 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v33 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; VI-NEXT: s_waitcnt vmcnt(10) -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; VI-NEXT: s_waitcnt vmcnt(8) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; VI-NEXT: s_waitcnt vmcnt(11) +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB71_4 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v4 +; VI-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v26, v4 ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 +; VI-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 +; VI-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v39, v10 +; VI-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v25, v11 +; VI-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v12 +; VI-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 +; VI-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v41, v5 +; VI-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -43064,47 +42902,91 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_cbranch_execnz .LBB71_3 ; VI-NEXT: .LBB71_2: ; %cmp.true +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v26 +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v45 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v0, s4, v0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18 +; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v22 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v34 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v36 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 +; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 +; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 @@ -43144,76 +43026,35 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v20 -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v22 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v24 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v26 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v41 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: .LBB71_3: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -43234,43 +43075,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB71_4: -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v35, v4 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_mov_b32_e32 v39, v10 -; VI-NEXT: v_mov_b32_e32 v43, v12 -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_mov_b32_e32 v41, v5 -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v25, v11 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v26, v4 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB71_2 ; @@ -43293,147 +43100,124 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v37, v30 -; GFX9-NEXT: v_mov_b32_e32 v61, v28 -; GFX9-NEXT: v_mov_b32_e32 v31, v0 +; GFX9-NEXT: v_mov_b32_e32 v36, v28 +; GFX9-NEXT: v_mov_b32_e32 v35, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v39, v14 +; GFX9-NEXT: v_mov_b32_e32 v48, v12 +; GFX9-NEXT: v_mov_b32_e32 v49, v10 +; GFX9-NEXT: v_mov_b32_e32 v50, v8 +; GFX9-NEXT: v_mov_b32_e32 v51, v6 +; GFX9-NEXT: v_mov_b32_e32 v44, v2 +; GFX9-NEXT: v_mov_b32_e32 v45, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; GFX9-NEXT: v_mov_b32_e32 v37, v30 +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; GFX9-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(19) ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; GFX9-NEXT: s_waitcnt vmcnt(20) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28 -; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; GFX9-NEXT: s_waitcnt vmcnt(16) -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(12) -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; GFX9-NEXT: s_waitcnt vmcnt(10) -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(8) -; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(17) +; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v33 +; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec +; GFX9-NEXT: s_waitcnt vmcnt(15) +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(13) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB71_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v35, v4 +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v26, v4 ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v43, v12 +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v41, v5 +; GFX9-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -43463,48 +43247,78 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB71_3 ; GFX9-NEXT: .LBB71_2: ; %cmp.true +; GFX9-NEXT: v_add_u32_e32 v1, 3, v28 +; GFX9-NEXT: v_add_u32_e32 v2, 3, v26 +; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s5, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s29, 8 ; GFX9-NEXT: s_or_b32 s5, s6, s5 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v31 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v44 -; GFX9-NEXT: v_add_u32_e32 v2, 3, v35 -; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 ; GFX9-NEXT: s_addk_i32 s5, 0x300 -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_and_b32 s5, s5, 0xffff ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s5, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 ; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v16 +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v18 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v20 +; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v22 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v35 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v37 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v40 +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_and_b32 s5, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s17, 8 @@ -43528,6 +43342,20 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; GFX9-NEXT: s_or_b32 s7, s8, s7 ; GFX9-NEXT: s_addk_i32 s6, 0x300 ; GFX9-NEXT: s_addk_i32 s7, 0x300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 +; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_add_i32 s24, s24, 3 @@ -43544,76 +43372,35 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; GFX9-NEXT: s_and_b32 s7, s7, 0xffff ; GFX9-NEXT: s_lshl_b32 s8, s8, 16 ; GFX9-NEXT: s_or_b32 s7, s7, s8 -; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v16 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v20 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v24 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v30 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v29 -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 -; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 -; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 +; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: .LBB71_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -43634,43 +43421,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB71_4: -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v35, v4 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 -; GFX9-NEXT: v_mov_b32_e32 v43, v12 -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_mov_b32_e32 v41, v5 -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v26, v4 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB71_2 ; @@ -44650,100 +44403,100 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 ; SI-NEXT: v_mov_b32_e32 v33, v1 ; SI-NEXT: v_mov_b32_e32 v32, v0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v36, s18 +; SI-NEXT: v_mov_b32_e32 v37, s19 +; SI-NEXT: v_mov_b32_e32 v38, s20 +; SI-NEXT: v_mov_b32_e32 v39, s21 +; SI-NEXT: v_mov_b32_e32 v48, s22 +; SI-NEXT: v_mov_b32_e32 v49, s23 +; SI-NEXT: v_mov_b32_e32 v50, s24 +; SI-NEXT: v_mov_b32_e32 v51, s25 +; SI-NEXT: v_mov_b32_e32 v52, s26 +; SI-NEXT: v_mov_b32_e32 v53, s27 +; SI-NEXT: v_mov_b32_e32 v54, s28 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v25, s29 +; SI-NEXT: v_mov_b32_e32 v55, s29 ; SI-NEXT: s_cbranch_scc0 .LBB73_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v33, v32, 16 -; SI-NEXT: v_alignbit_b32 v48, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v39, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v38, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v36, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v35, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v34, v1, v0, 16 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_3 ; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 -; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0 -; SI-NEXT: v_add_f64 v[8:9], v[8:9], 1.0 -; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0 -; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0 -; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0 ; SI-NEXT: v_add_f64 v[32:33], v[32:33], 1.0 -; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0 -; SI-NEXT: v_alignbit_b32 v29, v33, v32, 16 -; SI-NEXT: v_alignbit_b32 v48, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v39, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v38, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v36, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v35, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v34, v1, v0, 16 +; SI-NEXT: v_add_f64 v[54:55], v[54:55], 1.0 +; SI-NEXT: v_add_f64 v[52:53], v[52:53], 1.0 +; SI-NEXT: v_add_f64 v[50:51], v[50:51], 1.0 +; SI-NEXT: v_add_f64 v[48:49], v[48:49], 1.0 +; SI-NEXT: v_add_f64 v[38:39], v[38:39], 1.0 +; SI-NEXT: v_add_f64 v[36:37], v[36:37], 1.0 +; SI-NEXT: v_add_f64 v[34:35], v[34:35], 1.0 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: .LBB73_3: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v22, v21 -; SI-NEXT: v_mov_b32_e32 v26, v25 +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v36 +; SI-NEXT: v_mov_b32_e32 v6, v37 +; SI-NEXT: v_mov_b32_e32 v8, v38 +; SI-NEXT: v_mov_b32_e32 v10, v39 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_mov_b32_e32 v14, v49 +; SI-NEXT: v_mov_b32_e32 v16, v50 +; SI-NEXT: v_mov_b32_e32 v18, v51 +; SI-NEXT: v_mov_b32_e32 v20, v52 +; SI-NEXT: v_mov_b32_e32 v22, v53 +; SI-NEXT: v_mov_b32_e32 v24, v54 +; SI-NEXT: v_mov_b32_e32 v26, v55 ; SI-NEXT: v_mov_b32_e32 v28, v32 ; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v1, v34 -; SI-NEXT: v_mov_b32_e32 v5, v35 -; SI-NEXT: v_mov_b32_e32 v9, v36 -; SI-NEXT: v_mov_b32_e32 v13, v37 -; SI-NEXT: v_mov_b32_e32 v17, v38 -; SI-NEXT: v_mov_b32_e32 v21, v39 -; SI-NEXT: v_mov_b32_e32 v25, v48 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB73_4: -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: s_branch .LBB73_2 ; ; VI-LABEL: bitcast_v8f64_to_v32i16_scalar: @@ -51674,376 +51427,435 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32 ; SI-LABEL: bitcast_v8f64_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 +; SI-NEXT: v_writelane_b32 v40, s36, 4 +; SI-NEXT: v_writelane_b32 v40, s37, 5 +; SI-NEXT: v_writelane_b32 v40, s38, 6 +; SI-NEXT: v_writelane_b32 v40, s39, 7 +; SI-NEXT: v_writelane_b32 v40, s48, 8 +; SI-NEXT: v_writelane_b32 v40, s49, 9 +; SI-NEXT: v_writelane_b32 v40, s50, 10 +; SI-NEXT: v_writelane_b32 v40, s51, 11 +; SI-NEXT: v_writelane_b32 v40, s52, 12 +; SI-NEXT: v_writelane_b32 v40, s53, 13 +; SI-NEXT: v_writelane_b32 v40, s54, 14 +; SI-NEXT: v_writelane_b32 v40, s55, 15 +; SI-NEXT: v_writelane_b32 v40, s64, 16 +; SI-NEXT: v_writelane_b32 v40, s65, 17 +; SI-NEXT: v_writelane_b32 v40, s66, 18 +; SI-NEXT: v_writelane_b32 v40, s67, 19 +; SI-NEXT: v_writelane_b32 v40, s68, 20 +; SI-NEXT: v_writelane_b32 v40, s69, 21 +; SI-NEXT: v_writelane_b32 v40, s70, 22 +; SI-NEXT: v_writelane_b32 v40, s71, 23 +; SI-NEXT: v_writelane_b32 v40, s80, 24 +; SI-NEXT: v_writelane_b32 v40, s81, 25 +; SI-NEXT: v_writelane_b32 v40, s82, 26 +; SI-NEXT: v_writelane_b32 v40, s83, 27 +; SI-NEXT: v_writelane_b32 v40, s84, 28 +; SI-NEXT: v_writelane_b32 v40, s85, 29 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; SI-NEXT: v_writelane_b32 v40, s86, 30 ; SI-NEXT: v_readfirstlane_b32 s4, v1 ; SI-NEXT: s_and_b64 s[6:7], vcc, exec ; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_writelane_b32 v40, s87, 31 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_alignbit_b32 v2, s5, v1, 24 -; SI-NEXT: v_alignbit_b32 v17, s5, v1, 16 -; SI-NEXT: v_alignbit_b32 v18, s5, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s28 -; SI-NEXT: v_alignbit_b32 v20, s29, v1, 24 -; SI-NEXT: v_alignbit_b32 v4, s29, v1, 16 -; SI-NEXT: v_alignbit_b32 v19, s29, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s26 -; SI-NEXT: v_alignbit_b32 v6, s27, v1, 24 -; SI-NEXT: v_alignbit_b32 v21, s27, v1, 16 -; SI-NEXT: v_alignbit_b32 v22, s27, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s24 -; SI-NEXT: v_alignbit_b32 v8, s25, v1, 24 -; SI-NEXT: v_alignbit_b32 v23, s25, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, s25, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s22 -; SI-NEXT: v_alignbit_b32 v10, s23, v1, 24 -; SI-NEXT: v_alignbit_b32 v25, s23, v1, 16 -; SI-NEXT: v_alignbit_b32 v26, s23, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s20 -; SI-NEXT: v_alignbit_b32 v12, s21, v1, 24 -; SI-NEXT: v_alignbit_b32 v14, s21, v1, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s18 -; SI-NEXT: v_alignbit_b32 v27, s19, v1, 24 -; SI-NEXT: v_alignbit_b32 v28, s19, v1, 16 -; SI-NEXT: v_alignbit_b32 v29, s19, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s16 -; SI-NEXT: v_alignbit_b32 v30, s17, v1, 24 -; SI-NEXT: v_alignbit_b32 v31, s17, v1, 16 -; SI-NEXT: v_alignbit_b32 v32, s17, v1, 8 -; SI-NEXT: s_lshr_b32 s8, s5, 24 -; SI-NEXT: s_lshr_b32 s9, s5, 16 -; SI-NEXT: s_lshr_b32 s10, s5, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_lshr_b32 s48, s5, 24 +; SI-NEXT: s_lshr_b32 s49, s5, 16 +; SI-NEXT: s_lshr_b32 s50, s5, 8 +; SI-NEXT: s_lshr_b32 s51, s29, 24 +; SI-NEXT: s_lshr_b32 s52, s29, 16 +; SI-NEXT: s_lshr_b32 s53, s29, 8 +; SI-NEXT: s_lshr_b32 s54, s27, 24 +; SI-NEXT: s_lshr_b32 s55, s27, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 8 +; SI-NEXT: s_lshr_b32 s65, s25, 24 +; SI-NEXT: s_lshr_b32 s66, s25, 16 +; SI-NEXT: s_lshr_b32 s67, s25, 8 +; SI-NEXT: s_lshr_b32 s68, s23, 24 +; SI-NEXT: s_lshr_b32 s69, s23, 16 +; SI-NEXT: s_lshr_b32 s70, s23, 8 +; SI-NEXT: s_lshr_b32 s71, s21, 24 +; SI-NEXT: s_lshr_b32 s80, s21, 16 +; SI-NEXT: s_lshr_b32 s81, s21, 8 +; SI-NEXT: s_lshr_b32 s82, s19, 24 +; SI-NEXT: s_lshr_b32 s83, s19, 16 +; SI-NEXT: s_lshr_b32 s84, s19, 8 +; SI-NEXT: s_lshr_b32 s85, s17, 24 +; SI-NEXT: s_lshr_b32 s86, s17, 16 +; SI-NEXT: s_lshr_b32 s87, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[76:77], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[15:16], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[13:14], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[11:12], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[9:10], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[7:8], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[28:29], s[18:19], 1.0 ; SI-NEXT: v_add_f64 v[5:6], s[26:27], 1.0 -; SI-NEXT: v_add_f64 v[1:2], s[4:5], 1.0 +; SI-NEXT: v_add_f64 v[13:14], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[3:4], s[28:29], 1.0 -; SI-NEXT: v_readfirstlane_b32 s5, v2 -; SI-NEXT: v_readfirstlane_b32 s29, v4 -; SI-NEXT: v_readfirstlane_b32 s27, v6 +; SI-NEXT: v_lshr_b64 v[48:49], v[28:29], 24 +; SI-NEXT: v_add_f64 v[1:2], s[4:5], 1.0 +; SI-NEXT: v_add_f64 v[7:8], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[20:21], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[32:33], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[15:16], v[3:4], 24 +; SI-NEXT: v_lshr_b64 v[23:24], v[5:6], 8 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 8 +; SI-NEXT: v_lshr_b64 v[50:51], v[28:29], 8 +; SI-NEXT: v_lshr_b64 v[9:10], v[1:2], 24 +; SI-NEXT: v_lshr_b64 v[16:17], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 24 +; SI-NEXT: v_lshr_b64 v[36:37], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[51:52], v[32:33], 24 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[3:4], 8 +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[32:33], 16 +; SI-NEXT: v_readfirstlane_b32 s17, v33 +; SI-NEXT: v_readfirstlane_b32 s19, v29 +; SI-NEXT: v_readfirstlane_b32 s21, v21 +; SI-NEXT: v_readfirstlane_b32 s23, v14 ; SI-NEXT: v_readfirstlane_b32 s25, v8 -; SI-NEXT: v_readfirstlane_b32 s23, v10 -; SI-NEXT: v_readfirstlane_b32 s21, v12 -; SI-NEXT: v_readfirstlane_b32 s19, v14 -; SI-NEXT: v_readfirstlane_b32 s17, v16 -; SI-NEXT: v_alignbit_b32 v2, s5, v1, 24 -; SI-NEXT: v_alignbit_b32 v17, s5, v1, 16 -; SI-NEXT: v_alignbit_b32 v18, s5, v1, 8 -; SI-NEXT: v_alignbit_b32 v20, s29, v3, 24 -; SI-NEXT: v_alignbit_b32 v4, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v19, s29, v3, 8 -; SI-NEXT: v_alignbit_b32 v6, s27, v5, 24 -; SI-NEXT: v_alignbit_b32 v21, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, s27, v5, 8 -; SI-NEXT: v_alignbit_b32 v8, s25, v7, 24 -; SI-NEXT: v_alignbit_b32 v23, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, s25, v7, 8 -; SI-NEXT: v_alignbit_b32 v10, s23, v9, 24 -; SI-NEXT: v_alignbit_b32 v25, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v26, s23, v9, 8 -; SI-NEXT: v_alignbit_b32 v12, s21, v11, 24 -; SI-NEXT: s_lshr_b32 s8, s5, 24 -; SI-NEXT: s_lshr_b32 s9, s5, 16 -; SI-NEXT: s_lshr_b32 s10, s5, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 -; SI-NEXT: v_alignbit_b32 v14, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v11, 8 -; SI-NEXT: v_alignbit_b32 v27, s19, v13, 24 -; SI-NEXT: v_alignbit_b32 v28, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v29, s19, v13, 8 -; SI-NEXT: v_alignbit_b32 v30, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v31, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v32, s17, v15, 8 +; SI-NEXT: v_readfirstlane_b32 s27, v6 +; SI-NEXT: v_readfirstlane_b32 s29, v4 +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_lshr_b64 v[11:12], v[1:2], 8 +; SI-NEXT: v_lshr_b64 v[18:19], v[5:6], 24 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 8 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 24 +; SI-NEXT: v_lshr_b64 v[38:39], v[20:21], 8 +; SI-NEXT: v_lshr_b64 v[53:54], v[32:33], 8 +; SI-NEXT: s_lshr_b32 s48, s5, 24 +; SI-NEXT: s_lshr_b32 s49, s5, 16 +; SI-NEXT: s_lshr_b32 s50, s5, 8 +; SI-NEXT: s_lshr_b32 s51, s29, 24 +; SI-NEXT: s_lshr_b32 s52, s29, 16 +; SI-NEXT: s_lshr_b32 s53, s29, 8 +; SI-NEXT: s_lshr_b32 s54, s27, 24 +; SI-NEXT: s_lshr_b32 s55, s27, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 8 +; SI-NEXT: s_lshr_b32 s65, s25, 24 +; SI-NEXT: s_lshr_b32 s66, s25, 16 +; SI-NEXT: s_lshr_b32 s67, s25, 8 +; SI-NEXT: s_lshr_b32 s68, s23, 24 +; SI-NEXT: s_lshr_b32 s69, s23, 16 +; SI-NEXT: s_lshr_b32 s70, s23, 8 +; SI-NEXT: s_lshr_b32 s71, s21, 24 +; SI-NEXT: s_lshr_b32 s80, s21, 16 +; SI-NEXT: s_lshr_b32 s81, s21, 8 +; SI-NEXT: s_lshr_b32 s82, s19, 24 +; SI-NEXT: s_lshr_b32 s83, s19, 16 +; SI-NEXT: s_lshr_b32 s84, s19, 8 +; SI-NEXT: s_lshr_b32 s85, s17, 24 +; SI-NEXT: s_lshr_b32 s86, s17, 16 +; SI-NEXT: s_lshr_b32 s87, s17, 8 ; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr87 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr85 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr82 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 +; SI-NEXT: v_mov_b32_e32 v32, s16 +; SI-NEXT: v_mov_b32_e32 v28, s18 +; SI-NEXT: v_mov_b32_e32 v20, s20 +; SI-NEXT: v_mov_b32_e32 v13, s22 ; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 +; SI-NEXT: v_mov_b32_e32 v5, s26 +; SI-NEXT: v_mov_b32_e32 v3, s28 +; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_mov_b32_e32 v53, s74 +; SI-NEXT: v_mov_b32_e32 v52, s62 +; SI-NEXT: v_mov_b32_e32 v51, s58 +; SI-NEXT: v_mov_b32_e32 v50, s56 +; SI-NEXT: v_mov_b32_e32 v49, s44 +; SI-NEXT: v_mov_b32_e32 v48, s42 +; SI-NEXT: v_mov_b32_e32 v38, s40 +; SI-NEXT: v_mov_b32_e32 v37, s12 +; SI-NEXT: v_mov_b32_e32 v36, s8 +; SI-NEXT: v_mov_b32_e32 v35, s38 +; SI-NEXT: v_mov_b32_e32 v34, s36 +; SI-NEXT: v_mov_b32_e32 v30, s34 +; SI-NEXT: v_mov_b32_e32 v26, s30 +; SI-NEXT: v_mov_b32_e32 v25, s94 +; SI-NEXT: v_mov_b32_e32 v24, s92 +; SI-NEXT: v_mov_b32_e32 v23, s88 +; SI-NEXT: v_mov_b32_e32 v22, s78 +; SI-NEXT: v_mov_b32_e32 v18, s76 +; SI-NEXT: v_mov_b32_e32 v17, s72 +; SI-NEXT: v_mov_b32_e32 v16, s60 +; SI-NEXT: v_mov_b32_e32 v15, s46 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v10, s10 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: .LBB85_5: ; %end +; SI-NEXT: v_and_b32_e32 v2, 0xff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v53 ; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s6, s63, 8 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 -; SI-NEXT: v_and_b32_e32 v31, 0xff, v31 +; SI-NEXT: s_lshl_b32 s6, s87, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v52 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s62, 0xff -; SI-NEXT: v_or_b32_e32 v15, v15, v32 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v30 +; SI-NEXT: s_and_b32 s6, s86, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v51 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s61, 24 -; SI-NEXT: v_or_b32_e32 v30, v30, v31 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: s_lshl_b32 s7, s85, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v15, v15, v30 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v15, v0, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v30, s4 -; SI-NEXT: buffer_store_dword v30, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v29 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v50 ; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s6, s60, 8 -; SI-NEXT: v_or_b32_e32 v13, v13, v15 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v28 +; SI-NEXT: s_lshl_b32 s6, s84, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v49 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s59, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 +; SI-NEXT: s_and_b32 s6, s83, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v48 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s58, 24 -; SI-NEXT: v_or_b32_e32 v15, v27, v15 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_lshl_b32 s7, s82, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v13, v13, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 8, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: buffer_store_dword v15, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v16 +; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v20 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v38 ; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s6, s57, 8 -; SI-NEXT: v_or_b32_e32 v11, v11, v13 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 +; SI-NEXT: s_lshl_b32 s6, s81, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v37 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s56, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; SI-NEXT: s_and_b32 s6, s80, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v36 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s47, 24 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: s_lshl_b32 s7, s71, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 16, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v26 +; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v13 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v35 ; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s6, s46, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v25 +; SI-NEXT: s_lshl_b32 s6, s70, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v34 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: s_and_b32 s6, s69, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v30 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s44, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 +; SI-NEXT: s_lshl_b32 s7, s68, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v24 +; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v7 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v26 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s6, s43, 8 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v23 +; SI-NEXT: s_lshl_b32 s6, s67, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v25 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: s_and_b32 s6, s66, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s41, 24 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: s_lshl_b32 s7, s65, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 32, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v22 +; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v5 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v23 ; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s6, s40, 8 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v21 +; SI-NEXT: s_lshl_b32 s6, s64, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v22 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: s_and_b32 s6, s55, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v18 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s14, 24 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 +; SI-NEXT: s_lshl_b32 s7, s54, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v4, v5, v4 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 +; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v17 ; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s6, s13, 8 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v19 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s6, s53, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v16 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s12, 0xff -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v20 +; SI-NEXT: s_and_b32 s6, s52, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v15 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s11, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: s_lshl_b32 s7, s51, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v18 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 ; SI-NEXT: s_and_b32 s4, s5, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v17 +; SI-NEXT: s_lshl_b32 s5, s50, 8 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v10 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; SI-NEXT: s_and_b32 s5, s49, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v9 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s8, 24 +; SI-NEXT: s_lshl_b32 s6, s48, 24 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -52054,6 +51866,41 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s87, v40, 31 +; SI-NEXT: v_readlane_b32 s86, v40, 30 +; SI-NEXT: v_readlane_b32 s85, v40, 29 +; SI-NEXT: v_readlane_b32 s84, v40, 28 +; SI-NEXT: v_readlane_b32 s83, v40, 27 +; SI-NEXT: v_readlane_b32 s82, v40, 26 +; SI-NEXT: v_readlane_b32 s81, v40, 25 +; SI-NEXT: v_readlane_b32 s80, v40, 24 +; SI-NEXT: v_readlane_b32 s71, v40, 23 +; SI-NEXT: v_readlane_b32 s70, v40, 22 +; SI-NEXT: v_readlane_b32 s69, v40, 21 +; SI-NEXT: v_readlane_b32 s68, v40, 20 +; SI-NEXT: v_readlane_b32 s67, v40, 19 +; SI-NEXT: v_readlane_b32 s66, v40, 18 +; SI-NEXT: v_readlane_b32 s65, v40, 17 +; SI-NEXT: v_readlane_b32 s64, v40, 16 +; SI-NEXT: v_readlane_b32 s55, v40, 15 +; SI-NEXT: v_readlane_b32 s54, v40, 14 +; SI-NEXT: v_readlane_b32 s53, v40, 13 +; SI-NEXT: v_readlane_b32 s52, v40, 12 +; SI-NEXT: v_readlane_b32 s51, v40, 11 +; SI-NEXT: v_readlane_b32 s50, v40, 10 +; SI-NEXT: v_readlane_b32 s49, v40, 9 +; SI-NEXT: v_readlane_b32 s48, v40, 8 +; SI-NEXT: v_readlane_b32 s39, v40, 7 +; SI-NEXT: v_readlane_b32 s38, v40, 6 +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; @@ -55687,42 +55534,46 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v48, v30 +; SI-NEXT: v_mov_b32_e32 v48, v28 +; SI-NEXT: v_mov_b32_e32 v38, v26 +; SI-NEXT: v_mov_b32_e32 v49, v24 +; SI-NEXT: v_mov_b32_e32 v51, v14 +; SI-NEXT: v_mov_b32_e32 v54, v12 +; SI-NEXT: v_mov_b32_e32 v34, v10 +; SI-NEXT: v_mov_b32_e32 v44, v6 ; SI-NEXT: v_mov_b32_e32 v33, v4 ; SI-NEXT: v_mov_b32_e32 v32, v2 ; SI-NEXT: v_mov_b32_e32 v31, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 -; SI-NEXT: v_lshlrev_b32_e32 v40, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68 +; SI-NEXT: v_lshlrev_b32_e32 v50, 24, v1 +; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v17 ; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v21 ; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 @@ -55734,49 +55585,48 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2 ; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42 -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v44 +; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v6 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v52 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v10 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v12 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v14 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v28 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v26 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v24 +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB87_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_and_b32_e32 v0, 0xff, v32 -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 +; SI-NEXT: v_or_b32_e32 v0, v0, v39 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: v_or_b32_e32 v4, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: v_or_b32_e32 v0, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_or_b32_e32 v0, v0, v60 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v6, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v51 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_or_b32_e32 v0, v0, v57 +; SI-NEXT: v_or_b32_e32 v0, v0, v58 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v7, v1 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: v_or_b32_e32 v7, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v18 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v20 @@ -55784,113 +55634,80 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v47, v1 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_mov_b32_e32 v29, v8 +; SI-NEXT: v_mov_b32_e32 v26, v8 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 ; SI-NEXT: v_or_b32_e32 v8, v0, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 ; SI-NEXT: v_or_b32_e32 v0, v0, v46 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v9, v1 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v9 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_or_b32_e32 v9, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v26 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v38 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v48 ; SI-NEXT: v_or_b32_e32 v0, v0, v45 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_mov_b32_e32 v44, v10 ; SI-NEXT: v_or_b32_e32 v10, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v48 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v30 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v42 ; SI-NEXT: v_or_b32_e32 v0, v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v11, v1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v25, v11 ; SI-NEXT: v_or_b32_e32 v11, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v60 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v40 ; SI-NEXT: v_or_b32_e32 v0, v0, v21 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v19, v1 -; SI-NEXT: v_mov_b32_e32 v36, v12 ; SI-NEXT: v_or_b32_e32 v12, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v55 ; SI-NEXT: v_or_b32_e32 v0, v0, v17 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v1, v13, v1 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 ; SI-NEXT: v_or_b32_e32 v13, v0, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v55 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v0, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v53 +; SI-NEXT: v_or_b32_e32 v0, v0, v29 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 -; SI-NEXT: v_mov_b32_e32 v52, v14 +; SI-NEXT: v_or_b32_e32 v1, v14, v1 ; SI-NEXT: v_or_b32_e32 v14, v0, v1 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v43 +; SI-NEXT: v_or_b32_e32 v0, v0, v28 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: v_or_b32_e32 v1, v42, v1 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v0, 0xff, v53 -; SI-NEXT: v_or_b32_e32 v0, v0, v15 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_or_b32_e32 v1, v15, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v44 ; SI-NEXT: v_or_b32_e32 v15, v0, v1 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v62 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: v_or_b32_e32 v3, v61, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v40, v5 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v5, v2, v3 -; SI-NEXT: s_lshl_b32 s5, s17, 8 -; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_lshl_b32 s7, s23, 24 -; SI-NEXT: s_lshl_b32 s8, s27, 24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_or_b32_e32 v3, s4, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_or_b32 s4, s4, s5 @@ -55899,6 +55716,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s22, 0xff ; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 ; SI-NEXT: s_and_b32 s5, s5, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_or_b32 s5, s5, s6 @@ -55907,65 +55725,116 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s26, 0xff ; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s27, 24 ; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_cbranch_execnz .LBB87_3 ; SI-NEXT: .LBB87_2: ; %cmp.true +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 +; SI-NEXT: v_or_b32_e32 v1, v39, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v2, v63, v2 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_and_b32 s4, s28, 0xff ; SI-NEXT: s_lshl_b32 s5, s29, 8 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v1, v38, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v42, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v0, v50, v0 ; SI-NEXT: v_or_b32_e32 v0, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v39, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v27 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v29 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v40, v1 +; SI-NEXT: v_or_b32_e32 v1, v61, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v59, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v47, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v48 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v25, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v23, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -56007,100 +55876,58 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: s_add_i32 s4, s4, 0x3000000 ; SI-NEXT: s_add_i32 s5, s5, 0x3000000 ; SI-NEXT: s_add_i32 s6, s6, 0x3000000 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v57, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v18 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v56, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40 +; SI-NEXT: v_or_b32_e32 v0, v21, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v28, v1 +; SI-NEXT: v_or_b32_e32 v1, v19, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v25, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49 -; SI-NEXT: v_or_b32_e32 v0, v23, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v17, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v21, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v35 -; SI-NEXT: v_or_b32_e32 v0, v19, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v17, v1 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 -; SI-NEXT: v_or_b32_e32 v0, v48, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 -; SI-NEXT: v_or_b32_e32 v0, v51, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v43 +; SI-NEXT: v_or_b32_e32 v0, v28, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -56123,47 +55950,12 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB87_4: -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v27, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v38, v1 -; SI-NEXT: v_mov_b32_e32 v43, v6 -; SI-NEXT: v_mov_b32_e32 v29, v8 -; SI-NEXT: v_mov_b32_e32 v44, v10 -; SI-NEXT: v_mov_b32_e32 v36, v12 -; SI-NEXT: v_mov_b32_e32 v52, v14 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v16, v18 -; SI-NEXT: v_mov_b32_e32 v18, v20 -; SI-NEXT: v_mov_b32_e32 v20, v22 -; SI-NEXT: v_mov_b32_e32 v22, v24 -; SI-NEXT: v_mov_b32_e32 v24, v26 -; SI-NEXT: v_mov_b32_e32 v26, v28 -; SI-NEXT: v_mov_b32_e32 v30, v48 -; SI-NEXT: v_mov_b32_e32 v39, v40 -; SI-NEXT: v_mov_b32_e32 v41, v3 -; SI-NEXT: v_mov_b32_e32 v40, v5 -; SI-NEXT: v_mov_b32_e32 v63, v59 -; SI-NEXT: v_mov_b32_e32 v62, v58 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_mov_b32_e32 v57, v7 -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v46 -; SI-NEXT: v_mov_b32_e32 v56, v9 -; SI-NEXT: v_mov_b32_e32 v46, v45 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v45, v23 -; SI-NEXT: v_mov_b32_e32 v25, v11 -; SI-NEXT: v_mov_b32_e32 v23, v21 -; SI-NEXT: v_mov_b32_e32 v21, v19 -; SI-NEXT: v_mov_b32_e32 v19, v17 -; SI-NEXT: v_mov_b32_e32 v17, v13 -; SI-NEXT: v_mov_b32_e32 v48, v51 -; SI-NEXT: v_mov_b32_e32 v51, v15 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v26, v8 +; SI-NEXT: v_mov_b32_e32 v52, v42 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; SI-NEXT: s_branch .LBB87_2 ; @@ -56186,142 +55978,121 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v37, v30 -; VI-NEXT: v_mov_b32_e32 v61, v28 -; VI-NEXT: v_mov_b32_e32 v31, v0 +; VI-NEXT: v_mov_b32_e32 v36, v28 +; VI-NEXT: v_mov_b32_e32 v35, v26 +; VI-NEXT: v_mov_b32_e32 v34, v24 +; VI-NEXT: v_mov_b32_e32 v39, v14 +; VI-NEXT: v_mov_b32_e32 v48, v12 +; VI-NEXT: v_mov_b32_e32 v49, v10 +; VI-NEXT: v_mov_b32_e32 v50, v8 +; VI-NEXT: v_mov_b32_e32 v51, v6 +; VI-NEXT: v_mov_b32_e32 v44, v2 +; VI-NEXT: v_mov_b32_e32 v45, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; VI-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; VI-NEXT: v_mov_b32_e32 v37, v30 +; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v29 ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v33 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; VI-NEXT: s_waitcnt vmcnt(12) -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; VI-NEXT: s_waitcnt vmcnt(10) -; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; VI-NEXT: s_waitcnt vmcnt(8) -; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; VI-NEXT: s_waitcnt vmcnt(6) -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; VI-NEXT: s_waitcnt vmcnt(4) -; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; VI-NEXT: s_waitcnt vmcnt(13) +; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; VI-NEXT: s_waitcnt vmcnt(11) +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; VI-NEXT: s_waitcnt vmcnt(9) +; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; VI-NEXT: s_waitcnt vmcnt(7) +; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; VI-NEXT: s_waitcnt vmcnt(3) +; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; VI-NEXT: s_cbranch_scc0 .LBB87_4 ; VI-NEXT: ; %bb.1: ; %cmp.false -; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v4 +; VI-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_mov_b32_e32 v26, v4 ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 +; VI-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 +; VI-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v39, v10 +; VI-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v25, v11 +; VI-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v43, v12 +; VI-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 +; VI-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: s_waitcnt vmcnt(3) -; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(5) +; VI-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v41, v5 +; VI-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff @@ -56351,47 +56122,91 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_cbranch_execnz .LBB87_3 ; VI-NEXT: .LBB87_2: ; %cmp.true +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v26 +; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; VI-NEXT: s_add_i32 s28, s28, 3 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_or_b32 s4, s5, s4 ; VI-NEXT: s_addk_i32 s4, 0x300 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v45 ; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v0, s4, v0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 ; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1 -; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 +; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 -; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v48 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0 -; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18 +; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v22 +; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v34 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35 +; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v36 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37 +; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 +; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 +; VI-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 ; VI-NEXT: s_add_i32 s16, s16, 3 ; VI-NEXT: s_and_b32 s4, s16, 0xff ; VI-NEXT: s_lshl_b32 s5, s17, 8 @@ -56431,76 +56246,35 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_add_i32 s4, s4, 0x3000000 ; VI-NEXT: s_add_i32 s5, s5, 0x3000000 ; VI-NEXT: s_add_i32 s6, s6, 0x3000000 -; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0 -; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v20 -; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v22 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v24 -; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v26 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60 -; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29 -; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v54 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33 +; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v41 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v51 -; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v50 +; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0 -; VI-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 +; VI-NEXT: v_mov_b32_e32 v2, s6 ; VI-NEXT: .LBB87_3: ; %end ; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -56521,43 +56295,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB87_4: -; VI-NEXT: v_mov_b32_e32 v44, v2 -; VI-NEXT: v_mov_b32_e32 v34, v39 -; VI-NEXT: v_mov_b32_e32 v35, v4 -; VI-NEXT: v_mov_b32_e32 v29, v33 -; VI-NEXT: v_mov_b32_e32 v49, v6 -; VI-NEXT: v_mov_b32_e32 v48, v8 -; VI-NEXT: v_mov_b32_e32 v39, v10 -; VI-NEXT: v_mov_b32_e32 v43, v12 -; VI-NEXT: v_mov_b32_e32 v16, v18 -; VI-NEXT: v_mov_b32_e32 v18, v20 -; VI-NEXT: v_mov_b32_e32 v20, v22 -; VI-NEXT: v_mov_b32_e32 v22, v24 -; VI-NEXT: v_mov_b32_e32 v24, v26 -; VI-NEXT: v_mov_b32_e32 v26, v61 -; VI-NEXT: v_mov_b32_e32 v30, v37 -; VI-NEXT: v_mov_b32_e32 v38, v1 -; VI-NEXT: v_mov_b32_e32 v41, v5 -; VI-NEXT: v_mov_b32_e32 v40, v3 -; VI-NEXT: v_mov_b32_e32 v63, v59 -; VI-NEXT: v_mov_b32_e32 v36, v58 -; VI-NEXT: v_mov_b32_e32 v58, v57 -; VI-NEXT: v_mov_b32_e32 v57, v7 -; VI-NEXT: v_mov_b32_e32 v59, v56 -; VI-NEXT: v_mov_b32_e32 v56, v47 -; VI-NEXT: v_mov_b32_e32 v47, v46 -; VI-NEXT: v_mov_b32_e32 v46, v9 -; VI-NEXT: v_mov_b32_e32 v45, v25 -; VI-NEXT: v_mov_b32_e32 v61, v23 -; VI-NEXT: v_mov_b32_e32 v25, v11 -; VI-NEXT: v_mov_b32_e32 v23, v21 -; VI-NEXT: v_mov_b32_e32 v21, v19 -; VI-NEXT: v_mov_b32_e32 v19, v17 -; VI-NEXT: v_mov_b32_e32 v17, v13 -; VI-NEXT: v_mov_b32_e32 v37, v27 -; VI-NEXT: v_mov_b32_e32 v27, v42 -; VI-NEXT: v_mov_b32_e32 v33, v28 -; VI-NEXT: v_mov_b32_e32 v28, v15 +; VI-NEXT: v_mov_b32_e32 v28, v44 +; VI-NEXT: v_mov_b32_e32 v26, v4 +; VI-NEXT: v_mov_b32_e32 v33, v42 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB87_2 ; @@ -56580,147 +56320,124 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v37, v30 -; GFX9-NEXT: v_mov_b32_e32 v61, v28 -; GFX9-NEXT: v_mov_b32_e32 v31, v0 +; GFX9-NEXT: v_mov_b32_e32 v36, v28 +; GFX9-NEXT: v_mov_b32_e32 v35, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v39, v14 +; GFX9-NEXT: v_mov_b32_e32 v48, v12 +; GFX9-NEXT: v_mov_b32_e32 v49, v10 +; GFX9-NEXT: v_mov_b32_e32 v50, v8 +; GFX9-NEXT: v_mov_b32_e32 v51, v6 +; GFX9-NEXT: v_mov_b32_e32 v44, v2 +; GFX9-NEXT: v_mov_b32_e32 v45, v0 ; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8 -; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16 -; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32 -; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40 -; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:48 -; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:56 -; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:64 -; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72 -; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v17 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:4 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:12 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:20 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:36 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:44 +; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:52 +; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:60 +; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:68 +; GFX9-NEXT: v_mov_b32_e32 v37, v30 +; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v57, 8, v17 ; GFX9-NEXT: v_lshlrev_b32_e32 v56, 8, v19 ; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v21 ; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v25 -; GFX9-NEXT: v_lshlrev_b32_e32 v45, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 -; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v29 +; GFX9-NEXT: s_waitcnt vmcnt(19) ; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX9-NEXT: s_waitcnt vmcnt(21) -; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48 -; GFX9-NEXT: s_waitcnt vmcnt(20) -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28 -; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec ; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38 -; GFX9-NEXT: s_waitcnt vmcnt(16) -; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v36 -; GFX9-NEXT: s_waitcnt vmcnt(14) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(12) -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v34 -; GFX9-NEXT: s_waitcnt vmcnt(10) -; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(8) -; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v42 -; GFX9-NEXT: s_waitcnt vmcnt(6) -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v43 -; GFX9-NEXT: s_waitcnt vmcnt(4) -; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(17) +; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v33 +; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec +; GFX9-NEXT: s_waitcnt vmcnt(15) +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(13) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(11) +; GFX9-NEXT: v_lshlrev_b32_e32 v33, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(9) +; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(7) +; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v28 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v26 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v24 +; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; GFX9-NEXT: s_cbranch_scc0 .LBB87_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false -; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v35, v4 +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v4, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v26, v4 ; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v48, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_or_b32_sdwa v0, v35, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v23 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v43, v12 +; GFX9-NEXT: v_or_b32_sdwa v0, v55, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v54, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v54, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 +; GFX9-NEXT: v_or_b32_sdwa v0, v53, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v53, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v42, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v41, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: s_waitcnt vmcnt(5) +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_or_b32_sdwa v2, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff -; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v41, v5 +; GFX9-NEXT: v_or_b32_sdwa v0, v45, v30 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s4, v0 ; GFX9-NEXT: s_and_b32 s4, s16, 0xff @@ -56750,48 +56467,78 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB87_3 ; GFX9-NEXT: .LBB87_2: ; %cmp.true +; GFX9-NEXT: v_add_u32_e32 v1, 3, v28 +; GFX9-NEXT: v_add_u32_e32 v2, 3, v26 +; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 +; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s5, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s29, 8 ; GFX9-NEXT: s_or_b32 s5, s6, s5 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v31 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v44 -; GFX9-NEXT: v_add_u32_e32 v2, 3, v35 -; GFX9-NEXT: s_movk_i32 s4, 0x300 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v45 ; GFX9-NEXT: s_addk_i32 s5, 0x300 -; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_and_b32 s5, s5, 0xffff ; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 -; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_or_b32_e32 v3, s5, v0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 +; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v49 ; GFX9-NEXT: v_add_u32_e32 v1, 3, v48 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_add_u32_e32 v0, 3, v39 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v16 +; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v18 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v20 +; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v22 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v35 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v36 +; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v37 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v40 +; GFX9-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: s_add_i32 s16, s16, 3 ; GFX9-NEXT: s_and_b32 s5, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s17, 8 @@ -56815,6 +56562,20 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_or_b32 s7, s8, s7 ; GFX9-NEXT: s_addk_i32 s6, 0x300 ; GFX9-NEXT: s_addk_i32 s7, 0x300 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 +; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 ; GFX9-NEXT: s_and_b32 s6, s6, 0xffff ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_add_i32 s24, s24, 3 @@ -56831,76 +56592,35 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_and_b32 s7, s7, 0xffff ; GFX9-NEXT: s_lshl_b32 s8, s8, 16 ; GFX9-NEXT: s_or_b32 s7, s7, s8 -; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v0, 3, v0 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v16 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v18 -; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v20 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_add_u32_e32 v0, 3, v24 -; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v26 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v30 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v62 -; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v60 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v29 -; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 -; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v55 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v54 -; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v53 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_add_u32_e32 v0, 3, v33 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v41 +; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; GFX9-NEXT: v_add_u32_e32 v0, 3, v51 -; GFX9-NEXT: v_add_u32_e32 v1, 3, v50 -; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v1, v27, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v1, 3, v43 +; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 ; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v0, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 +; GFX9-NEXT: v_mov_b32_e32 v2, s7 ; GFX9-NEXT: .LBB87_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload @@ -56921,43 +56641,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB87_4: -; GFX9-NEXT: v_mov_b32_e32 v44, v2 -; GFX9-NEXT: v_mov_b32_e32 v34, v39 -; GFX9-NEXT: v_mov_b32_e32 v35, v4 -; GFX9-NEXT: v_mov_b32_e32 v29, v33 -; GFX9-NEXT: v_mov_b32_e32 v49, v6 -; GFX9-NEXT: v_mov_b32_e32 v48, v8 -; GFX9-NEXT: v_mov_b32_e32 v39, v10 -; GFX9-NEXT: v_mov_b32_e32 v43, v12 -; GFX9-NEXT: v_mov_b32_e32 v16, v18 -; GFX9-NEXT: v_mov_b32_e32 v18, v20 -; GFX9-NEXT: v_mov_b32_e32 v20, v22 -; GFX9-NEXT: v_mov_b32_e32 v22, v24 -; GFX9-NEXT: v_mov_b32_e32 v24, v26 -; GFX9-NEXT: v_mov_b32_e32 v26, v61 -; GFX9-NEXT: v_mov_b32_e32 v30, v37 -; GFX9-NEXT: v_mov_b32_e32 v38, v1 -; GFX9-NEXT: v_mov_b32_e32 v41, v5 -; GFX9-NEXT: v_mov_b32_e32 v40, v3 -; GFX9-NEXT: v_mov_b32_e32 v63, v59 -; GFX9-NEXT: v_mov_b32_e32 v36, v58 -; GFX9-NEXT: v_mov_b32_e32 v58, v57 -; GFX9-NEXT: v_mov_b32_e32 v57, v7 -; GFX9-NEXT: v_mov_b32_e32 v59, v56 -; GFX9-NEXT: v_mov_b32_e32 v56, v47 -; GFX9-NEXT: v_mov_b32_e32 v47, v46 -; GFX9-NEXT: v_mov_b32_e32 v46, v9 -; GFX9-NEXT: v_mov_b32_e32 v45, v25 -; GFX9-NEXT: v_mov_b32_e32 v61, v23 -; GFX9-NEXT: v_mov_b32_e32 v25, v11 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v21, v19 -; GFX9-NEXT: v_mov_b32_e32 v19, v17 -; GFX9-NEXT: v_mov_b32_e32 v17, v13 -; GFX9-NEXT: v_mov_b32_e32 v37, v27 -; GFX9-NEXT: v_mov_b32_e32 v27, v42 -; GFX9-NEXT: v_mov_b32_e32 v33, v28 -; GFX9-NEXT: v_mov_b32_e32 v28, v15 +; GFX9-NEXT: v_mov_b32_e32 v28, v44 +; GFX9-NEXT: v_mov_b32_e32 v26, v4 +; GFX9-NEXT: v_mov_b32_e32 v33, v42 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB87_2 ; @@ -58845,95 +58531,120 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-LABEL: bitcast_v32f16_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v31, v17 -; SI-NEXT: v_mov_b32_e32 v30, v16 -; SI-NEXT: v_mov_b32_e32 v29, v15 -; SI-NEXT: v_mov_b32_e32 v28, v14 -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v14, v0 +; SI-NEXT: v_mov_b32_e32 v21, v16 +; SI-NEXT: v_mov_b32_e32 v25, v15 +; SI-NEXT: v_mov_b32_e32 v26, v12 +; SI-NEXT: v_mov_b32_e32 v29, v11 +; SI-NEXT: v_mov_b32_e32 v22, v8 +; SI-NEXT: v_mov_b32_e32 v30, v7 +; SI-NEXT: v_mov_b32_e32 v32, v4 +; SI-NEXT: v_mov_b32_e32 v33, v3 +; SI-NEXT: v_mov_b32_e32 v34, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v37, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v33, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB91_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB91_3 ; SI-NEXT: .LBB91_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v48 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v37 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v49 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v35 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v33 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v38 +; SI-NEXT: v_or_b32_e32 v22, v22, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v52 +; SI-NEXT: v_or_b32_e32 v26, v26, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 @@ -58942,52 +58653,34 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v30, v30, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_or_b32_e32 v26, v26, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v22, v22, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 ; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_or_b32_e32 v18, v18, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_or_b32_e32 v14, v14, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 @@ -58996,19 +58689,23 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v10, v10, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v31 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_or_b32_e32 v30, v30, v32 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; SI-NEXT: v_lshr_b64 v[37:38], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[29:30], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 @@ -59017,15 +58714,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-NEXT: v_or_b32_e32 v20, v20, v21 ; SI-NEXT: v_or_b32_e32 v24, v24, v25 ; SI-NEXT: v_or_b32_e32 v28, v28, v29 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v37 +; SI-NEXT: v_mov_b32_e32 v5, v35 +; SI-NEXT: v_mov_b32_e32 v9, v33 +; SI-NEXT: v_mov_b32_e32 v13, v38 +; SI-NEXT: v_mov_b32_e32 v17, v48 +; SI-NEXT: v_mov_b32_e32 v21, v49 +; SI-NEXT: v_mov_b32_e32 v25, v52 +; SI-NEXT: v_mov_b32_e32 v29, v50 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -61430,185 +61127,185 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a ; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_mul_f32_e64 v57, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v56, 1.0, s17 -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v0 -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v0 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v1 ; SI-NEXT: v_mul_f32_e32 v47, 1.0, v2 ; SI-NEXT: v_mul_f32_e32 v46, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v5 ; SI-NEXT: v_mul_f32_e32 v45, 1.0, v6 ; SI-NEXT: v_mul_f32_e32 v44, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v21, 1.0, v9 ; SI-NEXT: v_mul_f32_e32 v43, 1.0, v10 ; SI-NEXT: v_mul_f32_e32 v42, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 ; SI-NEXT: v_mul_f32_e32 v41, 1.0, v14 ; SI-NEXT: v_mul_f32_e32 v40, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v17 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v32, 1.0, s19 +; SI-NEXT: v_mul_f32_e32 v29, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_mul_f32_e64 v32, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mul_f32_e64 v63, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v62, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v37, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v51, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 ; SI-NEXT: v_mul_f32_e64 v61, 1.0, s24 ; SI-NEXT: v_mul_f32_e64 v60, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v49, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v48, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v54, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s27 ; SI-NEXT: v_mul_f32_e64 v59, 1.0, s28 ; SI-NEXT: v_mul_f32_e64 v58, 1.0, s29 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v57 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v56 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v56 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v63 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v62 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v37 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v62 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v61 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v60 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v49 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v48 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v60 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v54 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v59 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v58 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v39 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v46 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v39 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v52 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v45 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v44 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v51 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v43 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v52 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v41 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v40 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v55 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v54 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v58 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v46 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v44 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v42 +; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v40 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v29 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v56 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v57 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v28 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v62 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v63 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v60 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v60 ; SI-NEXT: v_alignbit_b32 v4, v4, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v61 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_alignbit_b32 v8, v6, v2, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v58 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v59 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_alignbit_b32 v12, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v46 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v47 +; SI-NEXT: v_alignbit_b32 v8, v7, v2, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v59 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v7 -; SI-NEXT: v_alignbit_b32 v16, v9, v2, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v44 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; SI-NEXT: v_alignbit_b32 v12, v10, v2, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v46 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v47 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 +; SI-NEXT: v_alignbit_b32 v16, v11, v2, 16 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v44 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v45 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v9 -; SI-NEXT: v_alignbit_b32 v20, v10, v2, 16 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v42 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v11 +; SI-NEXT: v_alignbit_b32 v20, v14, v2, 16 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v42 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v43 -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_alignbit_b32 v24, v11, v2, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v40 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 +; SI-NEXT: v_alignbit_b32 v24, v15, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v41 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v11 -; SI-NEXT: v_alignbit_b32 v28, v13, v2, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v54 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v29 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v13 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v21 +; SI-NEXT: v_alignbit_b32 v26, v27, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v55 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v13 -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v11 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v13 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v11 ; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v52 -; SI-NEXT: v_alignbit_b32 v30, v31, v2, 16 +; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; SI-NEXT: v_alignbit_b32 v22, v23, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v53 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v11 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v10 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v50 -; SI-NEXT: v_alignbit_b32 v26, v27, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v51 -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v10 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v38 -; SI-NEXT: v_alignbit_b32 v22, v23, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v39 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v9 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v34 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v11 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v39 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_alignbit_b32 v18, v19, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v35 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v49 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_alignbit_b32 v30, v31, v15, 16 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v7 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v10 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v14 ; SI-NEXT: v_alignbit_b32 v14, v15, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v49 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v54 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v6 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v36 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_alignbit_b32 v10, v11, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v37 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 ; SI-NEXT: v_alignbit_b32 v6, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v28 +; SI-NEXT: v_lshr_b64 v[33:34], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[29:30], 16 +; SI-NEXT: v_alignbit_b32 v28, v40, v41, 16 ; SI-NEXT: .LBB95_3: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload @@ -61626,41 +61323,49 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v1, v33 +; SI-NEXT: v_mov_b32_e32 v5, v34 +; SI-NEXT: v_mov_b32_e32 v9, v35 +; SI-NEXT: v_mov_b32_e32 v13, v36 +; SI-NEXT: v_mov_b32_e32 v17, v37 +; SI-NEXT: v_mov_b32_e32 v21, v38 +; SI-NEXT: v_mov_b32_e32 v25, v50 +; SI-NEXT: v_mov_b32_e32 v29, v48 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: s_branch .LBB95_2 ; ; VI-LABEL: bitcast_v32bf16_to_v32i16_scalar: @@ -64884,534 +64589,686 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v37, s30, 0 -; SI-NEXT: v_writelane_b32 v37, s31, 1 -; SI-NEXT: v_writelane_b32 v37, s34, 2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_writelane_b32 v20, s69, 21 +; SI-NEXT: v_writelane_b32 v20, s70, 22 +; SI-NEXT: v_writelane_b32 v20, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s80, 24 +; SI-NEXT: v_writelane_b32 v20, s81, 25 +; SI-NEXT: v_writelane_b32 v20, s82, 26 +; SI-NEXT: v_writelane_b32 v20, s83, 27 +; SI-NEXT: v_writelane_b32 v20, s84, 28 +; SI-NEXT: v_writelane_b32 v20, s85, 29 +; SI-NEXT: v_writelane_b32 v20, s86, 30 +; SI-NEXT: v_writelane_b32 v20, s87, 31 +; SI-NEXT: v_writelane_b32 v20, s96, 32 +; SI-NEXT: v_writelane_b32 v20, s97, 33 +; SI-NEXT: v_writelane_b32 v20, s98, 34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_writelane_b32 v37, s35, 3 -; SI-NEXT: v_readfirstlane_b32 s34, v18 -; SI-NEXT: v_readfirstlane_b32 s35, v17 -; SI-NEXT: v_readfirstlane_b32 s30, v14 -; SI-NEXT: v_readfirstlane_b32 s31, v13 -; SI-NEXT: v_readfirstlane_b32 s94, v10 -; SI-NEXT: v_readfirstlane_b32 s95, v9 -; SI-NEXT: v_readfirstlane_b32 s92, v6 -; SI-NEXT: v_readfirstlane_b32 s93, v5 -; SI-NEXT: v_readfirstlane_b32 s90, v2 -; SI-NEXT: v_readfirstlane_b32 s91, v1 +; SI-NEXT: v_writelane_b32 v20, s99, 35 +; SI-NEXT: s_mov_b32 s93, s18 +; SI-NEXT: s_mov_b32 s31, s17 +; SI-NEXT: v_readfirstlane_b32 s59, v18 +; SI-NEXT: v_readfirstlane_b32 s18, v17 +; SI-NEXT: v_readfirstlane_b32 s63, v16 +; SI-NEXT: v_readfirstlane_b32 s17, v15 +; SI-NEXT: v_readfirstlane_b32 s72, v14 +; SI-NEXT: v_readfirstlane_b32 s76, v13 +; SI-NEXT: v_readfirstlane_b32 s57, v12 +; SI-NEXT: v_readfirstlane_b32 s61, v11 +; SI-NEXT: v_readfirstlane_b32 s44, v10 +; SI-NEXT: v_readfirstlane_b32 s58, v9 +; SI-NEXT: v_readfirstlane_b32 s62, v8 +; SI-NEXT: v_readfirstlane_b32 s45, v7 +; SI-NEXT: v_readfirstlane_b32 s96, v6 +; SI-NEXT: v_readfirstlane_b32 s97, v5 +; SI-NEXT: v_readfirstlane_b32 s99, v4 +; SI-NEXT: v_readfirstlane_b32 s46, v3 +; SI-NEXT: v_readfirstlane_b32 s83, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v16 +; SI-NEXT: v_readfirstlane_b32 s85, v1 +; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane ; SI-NEXT: s_cbranch_scc0 .LBB97_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: s_lshl_b32 s5, s31, 16 ; SI-NEXT: s_or_b32 s40, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff +; SI-NEXT: s_and_b32 s4, s93, 0xffff ; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s41, s4, s5 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 24 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v21, s4, 4 +; SI-NEXT: v_writelane_b32 v21, s5, 5 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v21, s4, 2 +; SI-NEXT: v_writelane_b32 v21, s5, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v21, s4, 0 +; SI-NEXT: v_writelane_b32 v21, s5, 1 ; SI-NEXT: s_and_b32 s4, s20, 0xffff ; SI-NEXT: s_lshl_b32 s5, s21, 16 ; SI-NEXT: s_or_b32 s14, s4, s5 ; SI-NEXT: s_and_b32 s4, s22, 0xffff ; SI-NEXT: s_lshl_b32 s5, s23, 16 ; SI-NEXT: s_or_b32 s15, s4, s5 +; SI-NEXT: s_lshr_b64 s[4:5], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v21, s4, 10 +; SI-NEXT: v_writelane_b32 v21, s5, 11 +; SI-NEXT: s_lshr_b64 s[4:5], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v21, s4, 8 +; SI-NEXT: v_writelane_b32 v21, s5, 9 +; SI-NEXT: s_lshr_b64 s[4:5], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v21, s4, 6 +; SI-NEXT: v_writelane_b32 v21, s5, 7 ; SI-NEXT: s_and_b32 s4, s24, 0xffff ; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: v_mov_b32_e32 v1, s40 -; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_or_b32 s10, s4, s5 ; SI-NEXT: s_and_b32 s4, s26, 0xffff ; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: v_alignbit_b32 v18, s41, v1, 24 -; SI-NEXT: v_alignbit_b32 v25, s41, v1, 16 -; SI-NEXT: v_alignbit_b32 v30, s41, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s14 -; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_or_b32 s11, s4, s5 +; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v21, s4, 16 +; SI-NEXT: v_writelane_b32 v21, s5, 17 +; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v21, s4, 14 +; SI-NEXT: v_writelane_b32 v21, s5, 15 +; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v21, s4, 12 +; SI-NEXT: v_writelane_b32 v21, s5, 13 ; SI-NEXT: s_and_b32 s4, s28, 0xffff ; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: v_alignbit_b32 v19, s15, v1, 24 -; SI-NEXT: v_alignbit_b32 v26, s15, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, s15, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s12 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: s_and_b32 s4, s91, 0xffff -; SI-NEXT: s_lshl_b32 s5, s90, 16 -; SI-NEXT: v_alignbit_b32 v17, s13, v1, 24 -; SI-NEXT: v_alignbit_b32 v23, s13, v1, 16 -; SI-NEXT: v_alignbit_b32 v29, s13, v1, 8 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s10 -; SI-NEXT: v_alignbit_b32 v16, s11, v1, 24 -; SI-NEXT: v_alignbit_b32 v20, s11, v1, 16 -; SI-NEXT: v_alignbit_b32 v27, s11, v1, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s93, 0xffff -; SI-NEXT: s_lshl_b32 s5, s92, 16 -; SI-NEXT: v_or_b32_e32 v5, v1, v33 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: s_and_b32 s4, s95, 0xffff -; SI-NEXT: s_lshl_b32 s5, s94, 16 -; SI-NEXT: v_or_b32_e32 v4, v1, v34 +; SI-NEXT: s_or_b32 s42, s4, s5 +; SI-NEXT: s_and_b32 s4, s85, 0xffff +; SI-NEXT: s_lshl_b32 s5, s83, 16 +; SI-NEXT: s_or_b32 s43, s4, s5 +; SI-NEXT: s_and_b32 s4, s46, 0xffff +; SI-NEXT: s_lshl_b32 s5, s99, 16 +; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_and_b32 s4, s97, 0xffff +; SI-NEXT: s_lshl_b32 s5, s96, 16 +; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_and_b32 s4, s45, 0xffff +; SI-NEXT: s_lshl_b32 s5, s62, 16 ; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: s_and_b32 s4, s31, 0xffff -; SI-NEXT: s_lshl_b32 s5, s30, 16 -; SI-NEXT: v_or_b32_e32 v2, v1, v35 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: s_and_b32 s4, s35, 0xffff -; SI-NEXT: s_lshl_b32 s5, s34, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v36 +; SI-NEXT: s_and_b32 s4, s58, 0xffff +; SI-NEXT: s_lshl_b32 s5, s44, 16 +; SI-NEXT: s_or_b32 s9, s4, s5 +; SI-NEXT: s_and_b32 s4, s61, 0xffff +; SI-NEXT: s_lshl_b32 s5, s57, 16 ; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: v_alignbit_b32 v9, s9, v5, 24 -; SI-NEXT: v_alignbit_b32 v12, s9, v5, 16 -; SI-NEXT: v_alignbit_b32 v21, s9, v5, 8 -; SI-NEXT: v_alignbit_b32 v6, s8, v4, 24 -; SI-NEXT: v_alignbit_b32 v8, s8, v4, 16 -; SI-NEXT: v_alignbit_b32 v13, s8, v4, 8 -; SI-NEXT: v_alignbit_b32 v24, s7, v2, 24 -; SI-NEXT: v_alignbit_b32 v28, s7, v2, 16 -; SI-NEXT: v_alignbit_b32 v32, s7, v2, 8 -; SI-NEXT: v_alignbit_b32 v10, s6, v1, 24 -; SI-NEXT: v_alignbit_b32 v14, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v22, s6, v1, 8 -; SI-NEXT: s_lshr_b32 s78, s41, 8 -; SI-NEXT: s_lshr_b32 s75, s15, 8 -; SI-NEXT: s_lshr_b32 s72, s13, 8 -; SI-NEXT: s_lshr_b32 s61, s11, 8 -; SI-NEXT: s_lshr_b32 s58, s9, 8 -; SI-NEXT: s_lshr_b32 s47, s8, 8 -; SI-NEXT: s_lshr_b32 s45, s7, 8 -; SI-NEXT: s_lshr_b32 s42, s6, 8 -; SI-NEXT: s_and_b32 s88, s19, 0xffff -; SI-NEXT: s_and_b32 s77, s23, 0xffff -; SI-NEXT: s_and_b32 s74, s27, 0xffff -; SI-NEXT: s_and_b32 s63, s90, 0xffff -; SI-NEXT: s_and_b32 s60, s92, 0xffff -; SI-NEXT: s_and_b32 s57, s94, 0xffff -; SI-NEXT: s_and_b32 s46, s30, 0xffff -; SI-NEXT: s_and_b32 s43, s34, 0xffff -; SI-NEXT: s_bfe_u32 s89, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s79, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s76, s27, 0x80008 -; SI-NEXT: s_bfe_u32 s73, s90, 0x80008 -; SI-NEXT: s_bfe_u32 s62, s92, 0x80008 -; SI-NEXT: s_bfe_u32 s59, s94, 0x80008 -; SI-NEXT: s_bfe_u32 s56, s30, 0x80008 -; SI-NEXT: s_bfe_u32 s44, s34, 0x80008 +; SI-NEXT: s_and_b32 s4, s76, 0xffff +; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: s_or_b32 s7, s4, s5 +; SI-NEXT: s_and_b32 s4, s17, 0xffff +; SI-NEXT: s_lshl_b32 s5, s63, 16 +; SI-NEXT: s_and_b32 s78, s72, 0xffff +; SI-NEXT: s_lshr_b64 s[34:35], s[8:9], 24 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s47, s59, 16 +; SI-NEXT: s_mov_b32 s35, s78 +; SI-NEXT: s_mov_b32 s78, s93 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 24 +; SI-NEXT: s_or_b32 s5, s5, s47 +; SI-NEXT: s_lshr_b32 s79, s7, 8 +; SI-NEXT: s_mov_b32 s93, s78 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 16 +; SI-NEXT: s_mov_b32 s78, s31 +; SI-NEXT: s_lshr_b64 s[30:31], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s88, s5, 8 +; SI-NEXT: s_bfe_u32 s89, s72, 0x80008 +; SI-NEXT: s_lshr_b64 s[36:37], s[8:9], 16 +; SI-NEXT: s_mov_b32 s95, s79 +; SI-NEXT: s_mov_b32 s31, s78 +; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 24 +; SI-NEXT: s_and_b32 s90, s59, 0xffff +; SI-NEXT: s_mov_b32 s37, s89 +; SI-NEXT: s_mov_b32 s79, s88 +; SI-NEXT: s_lshr_b64 s[88:89], s[4:5], 16 +; SI-NEXT: s_bfe_u32 vcc_lo, s59, 0x80008 +; SI-NEXT: s_mov_b32 s89, s90 +; SI-NEXT: s_lshr_b64 s[90:91], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s60, s41, 8 +; SI-NEXT: s_lshr_b32 s87, s15, 8 +; SI-NEXT: s_lshr_b32 s82, s11, 8 +; SI-NEXT: s_lshr_b32 s71, s43, 8 +; SI-NEXT: s_lshr_b32 s68, s13, 8 +; SI-NEXT: s_lshr_b32 s73, s9, 8 +; SI-NEXT: s_and_b32 s74, s19, 0xffff +; SI-NEXT: s_and_b32 s98, s23, 0xffff +; SI-NEXT: s_and_b32 s84, s27, 0xffff +; SI-NEXT: s_and_b32 s80, s83, 0xffff +; SI-NEXT: s_and_b32 s69, s96, 0xffff +; SI-NEXT: s_and_b32 s75, s44, 0xffff +; SI-NEXT: s_bfe_u32 s47, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s56, s23, 0x80008 +; SI-NEXT: s_bfe_u32 s86, s27, 0x80008 +; SI-NEXT: s_bfe_u32 s81, s83, 0x80008 +; SI-NEXT: s_bfe_u32 s70, s96, 0x80008 +; SI-NEXT: s_bfe_u32 s77, s44, 0x80008 +; SI-NEXT: s_lshr_b64 s[54:55], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[48:49], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[50:51], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[38:39], s[8:9], 8 +; SI-NEXT: s_mov_b32 s91, vcc_lo ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true -; SI-NEXT: s_add_i32 s35, s35, 3 -; SI-NEXT: s_and_b32 s4, s35, 0xffff -; SI-NEXT: s_lshl_b32 s5, s34, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s31, s31, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s31, 0xffff -; SI-NEXT: s_lshl_b32 s5, s30, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s95, s95, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s95, 0xffff -; SI-NEXT: s_lshl_b32 s5, s94, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s93, s93, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s93, 0xffff -; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_and_b32 s4, s17, 0xffff +; SI-NEXT: s_lshl_b32 s5, s63, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s59, 16 +; SI-NEXT: s_add_i32 s61, s61, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s61, 0xffff +; SI-NEXT: s_lshl_b32 s7, s57, 16 +; SI-NEXT: s_add_i32 s76, s76, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s76, 0xffff +; SI-NEXT: s_lshl_b32 s8, s72, 16 +; SI-NEXT: s_add_i32 s45, s45, 3 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s8, s45, 0xffff +; SI-NEXT: s_lshl_b32 s9, s62, 16 +; SI-NEXT: s_add_i32 s58, s58, 3 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: s_and_b32 s9, s58, 0xffff +; SI-NEXT: s_lshl_b32 s10, s44, 16 +; SI-NEXT: s_add_i32 s46, s46, 3 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: s_and_b32 s10, s46, 0xffff +; SI-NEXT: s_lshl_b32 s11, s99, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_add_i32 s97, s97, 3 +; SI-NEXT: s_add_i32 s12, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s97, 0xffff +; SI-NEXT: s_lshl_b32 s11, s96, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s91, s91, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s91, 0xffff -; SI-NEXT: s_lshl_b32 s5, s90, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s13, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s29, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_add_i32 s85, s85, 3 +; SI-NEXT: s_add_i32 s42, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s85, 0xffff +; SI-NEXT: s_lshl_b32 s11, s83, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s43, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s24, 0xffff +; SI-NEXT: s_lshl_b32 s11, s25, 16 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_and_b32 s11, s26, 0xffff +; SI-NEXT: s_lshl_b32 s14, s27, 16 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s13, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: s_and_b32 s14, s20, 0xffff +; SI-NEXT: s_lshl_b32 s15, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s14, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: s_and_b32 s15, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s23, 16 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s15, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s40, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v15 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v11 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v7 -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 -; SI-NEXT: s_add_i32 s41, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v6, s40 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_alignbit_b32 v18, s41, v6, 24 -; SI-NEXT: v_alignbit_b32 v25, s41, v6, 16 -; SI-NEXT: v_alignbit_b32 v30, s41, v6, 8 -; SI-NEXT: v_mov_b32_e32 v6, s14 -; SI-NEXT: v_or_b32_e32 v1, v36, v1 -; SI-NEXT: v_or_b32_e32 v2, v35, v2 -; SI-NEXT: v_or_b32_e32 v4, v34, v4 -; SI-NEXT: v_or_b32_e32 v3, v33, v3 -; SI-NEXT: v_alignbit_b32 v19, s15, v6, 24 -; SI-NEXT: v_alignbit_b32 v26, s15, v6, 16 -; SI-NEXT: v_alignbit_b32 v31, s15, v6, 8 -; SI-NEXT: v_mov_b32_e32 v6, s12 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x30000, v1 -; SI-NEXT: v_mov_b32_e32 v15, s6 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x30000, v2 -; SI-NEXT: v_mov_b32_e32 v10, s7 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x30000, v4 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x30000, v3 -; SI-NEXT: v_mov_b32_e32 v3, s9 -; SI-NEXT: v_alignbit_b32 v17, s13, v6, 24 -; SI-NEXT: v_alignbit_b32 v23, s13, v6, 16 -; SI-NEXT: v_alignbit_b32 v29, s13, v6, 8 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_alignbit_b32 v16, s11, v6, 24 -; SI-NEXT: v_alignbit_b32 v20, s11, v6, 16 -; SI-NEXT: v_alignbit_b32 v27, s11, v6, 8 -; SI-NEXT: v_alignbit_b32 v9, v3, v5, 24 -; SI-NEXT: v_alignbit_b32 v12, v3, v5, 16 -; SI-NEXT: v_alignbit_b32 v21, v3, v5, 8 -; SI-NEXT: v_alignbit_b32 v6, v7, v4, 24 -; SI-NEXT: v_alignbit_b32 v8, v7, v4, 16 -; SI-NEXT: v_alignbit_b32 v13, v7, v4, 8 -; SI-NEXT: v_alignbit_b32 v24, v10, v2, 24 -; SI-NEXT: v_alignbit_b32 v28, v10, v2, 16 -; SI-NEXT: v_alignbit_b32 v32, v10, v2, 8 -; SI-NEXT: v_alignbit_b32 v10, v15, v1, 24 -; SI-NEXT: v_alignbit_b32 v14, v15, v1, 16 -; SI-NEXT: v_alignbit_b32 v22, v15, v1, 8 -; SI-NEXT: s_lshr_b32 s89, s41, 24 -; SI-NEXT: s_lshr_b32 s88, s41, 16 -; SI-NEXT: s_lshr_b32 s78, s41, 8 -; SI-NEXT: s_lshr_b32 s79, s15, 24 -; SI-NEXT: s_lshr_b32 s77, s15, 16 -; SI-NEXT: s_lshr_b32 s75, s15, 8 -; SI-NEXT: s_lshr_b32 s76, s13, 24 -; SI-NEXT: s_lshr_b32 s74, s13, 16 -; SI-NEXT: s_lshr_b32 s72, s13, 8 -; SI-NEXT: s_lshr_b32 s73, s11, 24 -; SI-NEXT: s_lshr_b32 s63, s11, 16 -; SI-NEXT: s_lshr_b32 s61, s11, 8 -; SI-NEXT: s_lshr_b32 s62, s9, 24 -; SI-NEXT: s_lshr_b32 s60, s9, 16 -; SI-NEXT: s_lshr_b32 s58, s9, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 8 -; SI-NEXT: s_lshr_b32 s56, s7, 24 -; SI-NEXT: s_lshr_b32 s46, s7, 16 -; SI-NEXT: s_lshr_b32 s45, s7, 8 -; SI-NEXT: s_lshr_b32 s44, s6, 24 -; SI-NEXT: s_lshr_b32 s43, s6, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 8 +; SI-NEXT: s_or_b32 s15, s17, s15 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s31, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s40, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s93, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s19, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s41, s16, 0x30000 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v21, s16, 4 +; SI-NEXT: v_writelane_b32 v21, s17, 5 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v21, s16, 2 +; SI-NEXT: v_writelane_b32 v21, s17, 3 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 8 +; SI-NEXT: s_add_i32 s14, s14, 0x30000 +; SI-NEXT: s_add_i32 s15, s15, 0x30000 +; SI-NEXT: v_writelane_b32 v21, s16, 0 +; SI-NEXT: v_writelane_b32 v21, s17, 1 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v21, s16, 10 +; SI-NEXT: v_writelane_b32 v21, s17, 11 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v21, s16, 8 +; SI-NEXT: v_writelane_b32 v21, s17, 9 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_add_i32 s10, s10, 0x30000 +; SI-NEXT: s_add_i32 s11, s11, 0x30000 +; SI-NEXT: v_writelane_b32 v21, s16, 6 +; SI-NEXT: v_writelane_b32 v21, s17, 7 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v21, s16, 16 +; SI-NEXT: v_writelane_b32 v21, s17, 17 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 16 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_add_i32 s6, s6, 0x30000 +; SI-NEXT: s_add_i32 s7, s7, 0x30000 +; SI-NEXT: s_add_i32 s8, s8, 0x30000 +; SI-NEXT: s_add_i32 s9, s9, 0x30000 +; SI-NEXT: v_writelane_b32 v21, s16, 14 +; SI-NEXT: v_writelane_b32 v21, s17, 15 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v21, s16, 12 +; SI-NEXT: s_lshr_b64 s[54:55], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[48:49], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[50:51], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[38:39], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s47, s41, 24 +; SI-NEXT: s_lshr_b32 s74, s41, 16 +; SI-NEXT: s_lshr_b32 s60, s41, 8 +; SI-NEXT: s_lshr_b32 s56, s15, 24 +; SI-NEXT: s_lshr_b32 s98, s15, 16 +; SI-NEXT: s_lshr_b32 s87, s15, 8 +; SI-NEXT: s_lshr_b32 s86, s11, 24 +; SI-NEXT: s_lshr_b32 s84, s11, 16 +; SI-NEXT: s_lshr_b32 s82, s11, 8 +; SI-NEXT: s_lshr_b32 s81, s43, 24 +; SI-NEXT: s_lshr_b32 s80, s43, 16 +; SI-NEXT: s_lshr_b32 s71, s43, 8 +; SI-NEXT: s_lshr_b32 s70, s13, 24 +; SI-NEXT: s_lshr_b32 s69, s13, 16 +; SI-NEXT: s_lshr_b32 s68, s13, 8 +; SI-NEXT: s_lshr_b32 s77, s9, 24 +; SI-NEXT: s_lshr_b32 s75, s9, 16 +; SI-NEXT: s_lshr_b32 s73, s9, 8 +; SI-NEXT: s_lshr_b32 s37, s7, 24 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s95, s7, 8 +; SI-NEXT: s_lshr_b32 s91, s5, 24 +; SI-NEXT: s_lshr_b32 s89, s5, 16 +; SI-NEXT: s_lshr_b32 s79, s5, 8 +; SI-NEXT: v_writelane_b32 v21, s17, 13 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v30 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: s_lshl_b32 s5, s78, 8 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v25 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s88, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v18 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s89, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v31 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s15, 0xff -; SI-NEXT: s_lshl_b32 s5, s75, 8 +; SI-NEXT: v_readlane_b32 s18, v21, 0 +; SI-NEXT: v_readlane_b32 s19, v21, 1 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v21, 2 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v21, 3 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s60, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s74, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s47, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v21, 6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v21, 7 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v21, 5 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v21, 8 +; SI-NEXT: v_readlane_b32 s17, v21, 9 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 10 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v26 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s77, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s79, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v29 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s5, s72, 8 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: s_lshl_b32 s15, s87, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: s_and_b32 s15, s98, 0xff +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s56, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v21, 12 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s15, v21, 13 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s10, s10, s14 +; SI-NEXT: v_readlane_b32 s14, v21, 14 +; SI-NEXT: v_readlane_b32 s15, v21, 15 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v21, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s10, s10, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v23 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s74, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v17 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s76, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s82, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s84, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s14, s86, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v27 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s5, s61, 8 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s42, 0xff +; SI-NEXT: s_lshl_b32 s11, s66, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s64, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s14, s54, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s63, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s73, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s43, 0xff +; SI-NEXT: s_lshl_b32 s11, s71, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s80, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s14, s81, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v21 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s58, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s12, 0xff +; SI-NEXT: s_lshl_b32 s11, s52, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s50, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s48, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v9 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s62, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s13, 0xff +; SI-NEXT: s_lshl_b32 s11, s68, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s69, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s70, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s38, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s36, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s34, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v13 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s47, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s73, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s9, s75, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s77, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v6 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s59, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s30, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s94, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s92, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v32 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: s_lshl_b32 s5, s45, 8 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v28 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s46, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s95, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s35, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s37, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s56, 24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s90, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s88, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s78, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v22 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s42, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v14 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s79, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v10 +; SI-NEXT: s_and_b32 s5, s89, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s44, 24 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: s_lshl_b32 s6, s91, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s19, v21, 11 +; SI-NEXT: v_readlane_b32 s17, v21, 17 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: v_readlane_b32 s35, v37, 3 -; SI-NEXT: v_readlane_b32 s34, v37, 2 -; SI-NEXT: v_readlane_b32 s31, v37, 1 -; SI-NEXT: v_readlane_b32 s30, v37, 0 +; SI-NEXT: v_readlane_b32 s99, v20, 35 +; SI-NEXT: v_readlane_b32 s98, v20, 34 +; SI-NEXT: v_readlane_b32 s97, v20, 33 +; SI-NEXT: v_readlane_b32 s96, v20, 32 +; SI-NEXT: v_readlane_b32 s87, v20, 31 +; SI-NEXT: v_readlane_b32 s86, v20, 30 +; SI-NEXT: v_readlane_b32 s85, v20, 29 +; SI-NEXT: v_readlane_b32 s84, v20, 28 +; SI-NEXT: v_readlane_b32 s83, v20, 27 +; SI-NEXT: v_readlane_b32 s82, v20, 26 +; SI-NEXT: v_readlane_b32 s81, v20, 25 +; SI-NEXT: v_readlane_b32 s80, v20, 24 +; SI-NEXT: v_readlane_b32 s71, v20, 23 +; SI-NEXT: v_readlane_b32 s70, v20, 22 +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 ; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v21, s4, 0 +; SI-NEXT: v_writelane_b32 v21, s5, 1 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $sgpr88 -; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr47 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr87 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr95 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: v_writelane_b32 v21, s4, 2 +; SI-NEXT: v_writelane_b32 v21, s5, 3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 4 +; SI-NEXT: v_writelane_b32 v21, s5, 5 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 6 +; SI-NEXT: v_writelane_b32 v21, s5, 7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 8 +; SI-NEXT: v_writelane_b32 v21, s5, 9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 10 +; SI-NEXT: v_writelane_b32 v21, s5, 11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 12 +; SI-NEXT: v_writelane_b32 v21, s5, 13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 14 +; SI-NEXT: v_writelane_b32 v21, s5, 15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 16 +; SI-NEXT: v_writelane_b32 v21, s5, 17 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v32i16_to_v64i8_scalar: @@ -69207,433 +69064,541 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v46, v30 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:60 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:24 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:68 -; SI-NEXT: v_readfirstlane_b32 s15, v27 -; SI-NEXT: v_readfirstlane_b32 s40, v26 -; SI-NEXT: v_readfirstlane_b32 s12, v19 -; SI-NEXT: v_readfirstlane_b32 s13, v18 -; SI-NEXT: v_readfirstlane_b32 s10, v11 -; SI-NEXT: v_readfirstlane_b32 s11, v10 -; SI-NEXT: v_readfirstlane_b32 s8, v3 -; SI-NEXT: v_readfirstlane_b32 s9, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: v_readfirstlane_b32 s6, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v49, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v23 -; SI-NEXT: v_lshlrev_b32_e32 v52, 24, v25 -; SI-NEXT: v_lshlrev_b32_e32 v54, 24, v21 -; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v29 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:68 +; SI-NEXT: v_readfirstlane_b32 s43, v1 +; SI-NEXT: v_readfirstlane_b32 s42, v0 +; SI-NEXT: v_lshlrev_b32_e32 v42, 8, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v53, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v52, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v29 +; SI-NEXT: v_lshlrev_b32_e32 v47, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v41, 24, v25 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s44, v31 -; SI-NEXT: v_readfirstlane_b32 s45, v32 -; SI-NEXT: v_readfirstlane_b32 s42, v33 -; SI-NEXT: v_readfirstlane_b32 s43, v34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35 -; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v36 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v37 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v38 +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36 +; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v48 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v39 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v37 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v39 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v49 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v48 +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v30 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v59 +; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v31 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v61 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v62 +; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v34 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v2 +; SI-NEXT: v_or_b32_e32 v0, v0, v42 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_or_b32_e32 v0, v18, v0 +; SI-NEXT: v_or_b32_e32 v37, v1, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v12 +; SI-NEXT: v_or_b32_e32 v1, v1, v40 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, v44 +; SI-NEXT: v_or_b32_e32 v44, v53, v9 +; SI-NEXT: v_or_b32_e32 v33, v1, v44 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v15, v46 +; SI-NEXT: v_or_b32_e32 v46, v52, v9 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_or_b32_e32 v55, v3, v9 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v63 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v27, v13 +; SI-NEXT: v_mov_b32_e32 v58, v8 +; SI-NEXT: v_mov_b32_e32 v49, v45 +; SI-NEXT: v_mov_b32_e32 v36, v24 +; SI-NEXT: v_mov_b32_e32 v34, v26 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: s_or_b32 s41, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s14, s19, 24 -; SI-NEXT: s_or_b32 s4, s14, s4 -; SI-NEXT: s_and_b32 s14, s28, 0xff -; SI-NEXT: s_lshl_b32 s46, s29, 8 -; SI-NEXT: s_or_b32 s14, s14, s46 -; SI-NEXT: s_and_b32 s46, s6, 0xff -; SI-NEXT: s_lshl_b32 s46, s46, 16 -; SI-NEXT: s_lshl_b32 s47, s7, 24 -; SI-NEXT: s_or_b32 s57, s47, s46 -; SI-NEXT: s_and_b32 s46, s26, 0xff -; SI-NEXT: s_lshl_b32 s46, s46, 16 -; SI-NEXT: s_lshl_b32 s47, s27, 24 -; SI-NEXT: s_or_b32 s46, s47, s46 -; SI-NEXT: s_and_b32 s47, s16, 0xff -; SI-NEXT: s_lshl_b32 s56, s17, 8 -; SI-NEXT: s_or_b32 s47, s47, s56 -; SI-NEXT: s_and_b32 s47, s47, 0xffff -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v8 -; SI-NEXT: s_or_b32 s47, s47, s4 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s12 ; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s56, s25, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v0 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v2, v10 +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s14, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s5, 0xffff +; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_or_b32 s13, s7, s5 +; SI-NEXT: s_lshr_b64 s[8:9], s[12:13], 16 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s29, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s7, 0xffff +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s10, s43, 24 +; SI-NEXT: s_or_b32 s7, s10, s7 +; SI-NEXT: s_or_b32 s15, s9, s7 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v5, s46 -; SI-NEXT: v_or_b32_e32 v10, v9, v11 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v16 -; SI-NEXT: s_or_b32 s46, s4, s46 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s56, s8, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 16 +; SI-NEXT: s_or_b32 s4, s4, s14 +; SI-NEXT: v_mov_b32_e32 v39, v32 +; SI-NEXT: s_lshr_b32 s9, s5, 16 +; SI-NEXT: s_lshr_b32 s11, s7, 16 +; SI-NEXT: s_mov_b32 s7, s13 +; SI-NEXT: s_mov_b32 s5, s15 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v43 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v48, v1, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v26 +; SI-NEXT: v_or_b32_e32 v1, v1, v59 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v35, v1, v55 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v17 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v43, v13, v9 +; SI-NEXT: v_or_b32_e32 v50, v1, v43 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v51 +; SI-NEXT: v_or_b32_e32 v1, v1, v19 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v13, v13, v49 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v15, v3, v9 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_or_b32_e32 v19, v7, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v36, v13, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v12 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v24 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v55 -; SI-NEXT: v_or_b32_e32 v35, s4, v15 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s56, s10, 8 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v17, v17, v53 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v23, v51, v13 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 -; SI-NEXT: v_or_b32_e32 v27, v52, v18 -; SI-NEXT: v_or_b32_e32 v62, v47, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v41 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v18, v17, v27 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v20 -; SI-NEXT: v_and_b32_e32 v21, 0xff, v30 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: v_or_b32_e32 v37, s4, v23 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s56, s12, 8 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v21, v21, v43 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; SI-NEXT: v_or_b32_e32 v33, v58, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v34, 0xff, v46 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v25, v54, v17 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v31, v42, v26 -; SI-NEXT: v_or_b32_e32 v32, v32, v60 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v38, v21, v31 -; SI-NEXT: v_and_b32_e32 v21, 0xff, v28 -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_mov_b32_e32 v13, v4 +; SI-NEXT: v_mov_b32_e32 v4, v2 +; SI-NEXT: v_mov_b32_e32 v2, v59 +; SI-NEXT: v_mov_b32_e32 v59, v3 +; SI-NEXT: v_mov_b32_e32 v3, v63 +; SI-NEXT: v_mov_b32_e32 v63, v40 +; SI-NEXT: v_mov_b32_e32 v40, v42 +; SI-NEXT: v_or_b32_e32 v42, v11, v9 +; SI-NEXT: v_or_b32_e32 v54, v1, v42 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v6 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v8 +; SI-NEXT: v_or_b32_e32 v1, v1, v47 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v11, v45, v9 +; SI-NEXT: v_or_b32_e32 v1, v1, v11 +; SI-NEXT: v_mov_b32_e32 v19, v10 +; SI-NEXT: v_lshr_b64 v[9:10], v[0:1], 16 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v63, v59, v34 -; SI-NEXT: v_or_b32_e32 v39, s4, v25 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s56, s15, 8 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v48, v32, v63 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v57 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v29, v44, v21 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v26, v26, v56 -; SI-NEXT: v_or_b32_e32 v34, v61, v32 -; SI-NEXT: v_or_b32_e32 v32, s4, v29 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: s_lshl_b32 s56, s42, 8 -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v26, v26, v62 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v17, v18, v25, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v33, 16 -; SI-NEXT: v_or_b32_e32 v33, s4, v33 -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: s_lshl_b32 s56, s44, 8 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: s_or_b32 s14, s14, s57 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s41, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v15, 16 -; SI-NEXT: v_alignbit_b32 v13, v36, v23, 16 -; SI-NEXT: v_alignbit_b32 v21, v38, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, v48, v34, 16 -; SI-NEXT: v_or_b32_e32 v34, s4, v34 -; SI-NEXT: s_lshr_b32 s56, s5, 16 -; SI-NEXT: s_lshr_b32 s57, s57, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v16 +; SI-NEXT: v_or_b32_e32 v0, v0, v57 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v23, v56, v10 +; SI-NEXT: v_mov_b32_e32 v8, v6 +; SI-NEXT: v_mov_b32_e32 v6, v14 +; SI-NEXT: v_or_b32_e32 v45, v0, v23 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v24 +; SI-NEXT: v_lshr_b64 v[9:10], v[44:45], 16 +; SI-NEXT: v_or_b32_e32 v0, v0, v5 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v14, v41, v14 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v24, v17 +; SI-NEXT: v_mov_b32_e32 v17, v47 +; SI-NEXT: v_or_b32_e32 v47, v0, v14 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[9:10], v[46:47], 16 +; SI-NEXT: v_mov_b32_e32 v46, v15 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v46 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v62 +; SI-NEXT: v_or_b32_e32 v0, v0, v61 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v31, v7, v15 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v9, v61 +; SI-NEXT: v_mov_b32_e32 v61, v7 +; SI-NEXT: v_mov_b32_e32 v7, v5 +; SI-NEXT: v_mov_b32_e32 v5, v52 +; SI-NEXT: v_mov_b32_e32 v52, v41 +; SI-NEXT: v_mov_b32_e32 v41, v62 +; SI-NEXT: v_mov_b32_e32 v62, v57 +; SI-NEXT: v_mov_b32_e32 v57, v53 +; SI-NEXT: v_mov_b32_e32 v53, v56 +; SI-NEXT: v_or_b32_e32 v56, v0, v31 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v51, v22 +; SI-NEXT: v_lshr_b64 v[21:22], v[55:56], 16 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v15, 0xff, v10 +; SI-NEXT: v_or_b32_e32 v0, v0, v25 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v22, v30, v15 +; SI-NEXT: v_or_b32_e32 v44, v0, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v32 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v60 +; SI-NEXT: v_or_b32_e32 v0, v0, v38 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v38, v29, v15 +; SI-NEXT: v_lshr_b64 v[25:26], v[43:44], 16 +; SI-NEXT: v_or_b32_e32 v43, v0, v38 +; SI-NEXT: v_mov_b32_e32 v0, v30 +; SI-NEXT: v_lshr_b64 v[29:30], v[42:43], 16 +; SI-NEXT: v_mov_b32_e32 v42, v40 +; SI-NEXT: v_mov_b32_e32 v40, v63 +; SI-NEXT: v_mov_b32_e32 v63, v3 +; SI-NEXT: v_mov_b32_e32 v3, v59 +; SI-NEXT: v_mov_b32_e32 v59, v2 +; SI-NEXT: v_mov_b32_e32 v10, v19 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v13 +; SI-NEXT: v_mov_b32_e32 v13, v27 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v22 +; SI-NEXT: v_mov_b32_e32 v14, v6 +; SI-NEXT: v_mov_b32_e32 v6, v8 +; SI-NEXT: v_mov_b32_e32 v8, v58 +; SI-NEXT: v_mov_b32_e32 v22, v51 +; SI-NEXT: v_mov_b32_e32 v51, v44 +; SI-NEXT: v_mov_b32_e32 v44, v60 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v55, v43 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v0 +; SI-NEXT: v_mov_b32_e32 v26, v34 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v62 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v63 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v38 +; SI-NEXT: v_mov_b32_e32 v38, v1 +; SI-NEXT: v_mov_b32_e32 v34, v45 +; SI-NEXT: v_mov_b32_e32 v45, v49 +; SI-NEXT: v_mov_b32_e32 v49, v47 +; SI-NEXT: v_mov_b32_e32 v47, v17 +; SI-NEXT: v_mov_b32_e32 v17, v24 +; SI-NEXT: v_mov_b32_e32 v24, v36 +; SI-NEXT: v_mov_b32_e32 v36, v56 +; SI-NEXT: v_mov_b32_e32 v56, v53 +; SI-NEXT: v_mov_b32_e32 v53, v57 +; SI-NEXT: v_mov_b32_e32 v57, v62 +; SI-NEXT: v_mov_b32_e32 v62, v41 +; SI-NEXT: v_mov_b32_e32 v41, v52 +; SI-NEXT: v_mov_b32_e32 v52, v5 +; SI-NEXT: v_mov_b32_e32 v5, v7 +; SI-NEXT: v_mov_b32_e32 v7, v61 +; SI-NEXT: v_mov_b32_e32 v61, v9 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true -; SI-NEXT: s_add_i32 s45, s45, 3 -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: s_lshl_b32 s5, s44, 8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: s_and_b32 s6, s26, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_lshl_b32 s5, s27, 24 +; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v61, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v34, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v45 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s43, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v9, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v54, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v46 -; SI-NEXT: v_or_b32_e32 v1, v60, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v59, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: v_add_i32_e32 v48, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: s_lshl_b32 s5, s42, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v41 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v1, v9, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v55, vcc, 0x3000000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v33, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40 +; SI-NEXT: v_or_b32_e32 v1, v13, v1 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v55 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v17, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v50, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v55 -; SI-NEXT: v_or_b32_e32 v1, v56, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v47, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s40, s40, 3 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s15, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v51, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v28 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v44, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v32, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v35, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v62 +; SI-NEXT: v_or_b32_e32 v0, v61, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v50 -; SI-NEXT: v_or_b32_e32 v1, v43, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v42, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: v_add_i32_e32 v38, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s5, s12, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v7, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v36, vcc, 0x3000000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v54, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v39, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 +; SI-NEXT: v_or_b32_e32 v1, v52, v1 +; SI-NEXT: v_lshr_b64 v[25:26], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[54:55], 16 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v51 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v48, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v24 +; SI-NEXT: v_or_b32_e32 v0, v5, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v24 -; SI-NEXT: v_or_b32_e32 v1, v53, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v52, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v41, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v49, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v10 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v12 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v51, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v37, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v53, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v33, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16 -; SI-NEXT: v_or_b32_e32 v1, v49, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: v_add_i32_e32 v36, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v56, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v34, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v4 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s5, s25, 8 -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s26, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s46, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s7, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s14, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v35, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v6 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v18, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v37, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v6 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v8 -; SI-NEXT: s_add_i32 s47, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: v_or_b32_e32 v0, v47, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: v_or_b32_e32 v1, v45, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: s_add_i32 s41, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s47 -; SI-NEXT: v_alignbit_b32 v1, s41, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s46 -; SI-NEXT: v_alignbit_b32 v5, s14, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v35, 16 -; SI-NEXT: v_alignbit_b32 v13, v36, v37, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v39, 16 -; SI-NEXT: v_alignbit_b32 v21, v38, v32, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v33, 16 -; SI-NEXT: v_alignbit_b32 v29, v48, v34, 16 -; SI-NEXT: s_lshr_b32 s56, s41, 16 -; SI-NEXT: s_lshr_b32 s57, s14, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v36 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v38 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v48 -; SI-NEXT: .LBB99_3: ; %end +; SI-NEXT: v_add_i32_e32 v38, vcc, 0x3000000, v0 +; SI-NEXT: v_lshr_b64 v[0:1], v[37:38], 16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[0:1], v[33:34], 16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[0:1], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[35:36], 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v49 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: .LBB99_3: ; %end +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -69650,55 +69615,62 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v0, s47 -; SI-NEXT: v_mov_b32_e32 v2, s41 -; SI-NEXT: v_mov_b32_e32 v3, s56 -; SI-NEXT: v_mov_b32_e32 v4, s46 -; SI-NEXT: v_mov_b32_e32 v6, s14 -; SI-NEXT: v_mov_b32_e32 v7, s57 -; SI-NEXT: v_mov_b32_e32 v8, v35 -; SI-NEXT: v_mov_b32_e32 v12, v37 -; SI-NEXT: v_mov_b32_e32 v14, v36 -; SI-NEXT: v_mov_b32_e32 v16, v39 -; SI-NEXT: v_mov_b32_e32 v20, v32 -; SI-NEXT: v_mov_b32_e32 v22, v38 -; SI-NEXT: v_mov_b32_e32 v24, v33 -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v30, v48 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v8, v37 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mov_b32_e32 v10, v38 +; SI-NEXT: v_mov_b32_e32 v12, v33 +; SI-NEXT: v_mov_b32_e32 v14, v34 +; SI-NEXT: v_mov_b32_e32 v16, v48 +; SI-NEXT: v_mov_b32_e32 v18, v49 +; SI-NEXT: v_mov_b32_e32 v20, v35 +; SI-NEXT: v_mov_b32_e32 v22, v36 +; SI-NEXT: v_mov_b32_e32 v24, v50 +; SI-NEXT: v_mov_b32_e32 v26, v51 +; SI-NEXT: v_mov_b32_e32 v28, v54 +; SI-NEXT: v_mov_b32_e32 v30, v55 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: v_mov_b32_e32 v39, v32 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: s_branch .LBB99_2 ; @@ -69721,139 +69693,126 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v55, v20 -; VI-NEXT: v_mov_b32_e32 v53, v14 -; VI-NEXT: v_mov_b32_e32 v34, v12 -; VI-NEXT: v_mov_b32_e32 v32, v0 +; VI-NEXT: v_mov_b32_e32 v31, v30 +; VI-NEXT: v_mov_b32_e32 v38, v28 +; VI-NEXT: v_mov_b32_e32 v32, v26 +; VI-NEXT: v_mov_b32_e32 v30, v24 +; VI-NEXT: v_mov_b32_e32 v26, v22 +; VI-NEXT: v_mov_b32_e32 v49, v20 +; VI-NEXT: v_mov_b32_e32 v48, v14 +; VI-NEXT: v_mov_b32_e32 v39, v12 +; VI-NEXT: v_mov_b32_e32 v20, v10 +; VI-NEXT: v_mov_b32_e32 v35, v8 +; VI-NEXT: v_mov_b32_e32 v34, v6 +; VI-NEXT: v_mov_b32_e32 v36, v4 +; VI-NEXT: v_mov_b32_e32 v37, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 ; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 ; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 ; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 ; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 ; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:36 ; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 ; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:56 ; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:64 ; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 ; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:68 -; VI-NEXT: v_mov_b32_e32 v51, v23 -; VI-NEXT: v_mov_b32_e32 v30, v26 -; VI-NEXT: v_mov_b32_e32 v26, v22 ; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 ; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5 ; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v17 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v19 -; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v21 -; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v51 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v53, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v55, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v40, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v27 ; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v31 -; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v33 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v4 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v35 -; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v20 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v6 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v8 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v10 ; VI-NEXT: s_waitcnt vmcnt(13) -; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v12 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v12 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v14 ; VI-NEXT: s_waitcnt vmcnt(9) -; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v39 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v22 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v48 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v33 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v49 +; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v24 ; VI-NEXT: s_cbranch_scc0 .LBB99_4 ; VI-NEXT: ; %bb.1: ; %cmp.false ; VI-NEXT: v_or_b32_sdwa v0, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v34 -; VI-NEXT: v_or_b32_sdwa v1, v34, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v0, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v49, v7 -; VI-NEXT: v_or_b32_sdwa v3, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v48, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v18, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v55, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v37, v8 +; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v9 +; VI-NEXT: v_or_b32_sdwa v0, v26, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v30, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v28, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v31, v10 +; VI-NEXT: v_or_b32_sdwa v0, v32, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v38, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v17, v11 -; VI-NEXT: v_mov_b32_e32 v19, v13 -; VI-NEXT: s_and_b32 s4, s28, 0xff -; VI-NEXT: s_lshl_b32 s5, s29, 8 -; VI-NEXT: v_mov_b32_e32 v39, v14 -; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v21, v15 -; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_mov_b32_e32 v20, v5 -; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: s_lshl_b32 s5, s17, 8 -; VI-NEXT: s_lshl_b32 s6, s19, 8 -; VI-NEXT: s_lshl_b32 s7, s23, 8 -; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: v_mov_b32_e32 v25, v23 -; VI-NEXT: v_mov_b32_e32 v48, v51 -; VI-NEXT: v_mov_b32_e32 v23, v26 -; VI-NEXT: v_mov_b32_e32 v26, v30 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v34, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v31, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v54, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v41, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v42, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v43, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v45, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v44, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v45, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_and_b32 s4, s28, 0xff +; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v47, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v47, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) ; VI-NEXT: v_or_b32_sdwa v1, v57, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_or_b32 s4, s4, s5 +; VI-NEXT: v_or_b32_sdwa v2, v34, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v35, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v32, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_and_b32 s4, s4, 0xffff +; VI-NEXT: v_or_b32_sdwa v0, v37, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff +; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 ; VI-NEXT: s_and_b32 s5, s18, 0xff +; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_or_b32 s5, s5, s6 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 @@ -69862,6 +69821,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_lshl_b32 s6, s21, 8 ; VI-NEXT: s_or_b32 s5, s5, s6 ; VI-NEXT: s_and_b32 s6, s22, 0xff +; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 @@ -69870,70 +69830,75 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_lshl_b32 s7, s25, 8 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_and_b32 s7, s26, 0xff +; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_or_b32 s7, s7, s8 ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 +; VI-NEXT: v_mov_b32_e32 v24, v36 +; VI-NEXT: v_mov_b32_e32 v28, v26 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: v_mov_b32_e32 v30, v34 ; VI-NEXT: s_cbranch_execnz .LBB99_3 ; VI-NEXT: .LBB99_2: ; %cmp.true +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44 -; VI-NEXT: v_or_b32_sdwa v3, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v43 -; VI-NEXT: v_or_b32_sdwa v13, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v13, v61, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42 -; VI-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v41 -; VI-NEXT: v_or_b32_sdwa v12, v62, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v12, v59, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54 -; VI-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 -; VI-NEXT: v_or_b32_sdwa v11, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v30, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28 -; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26 -; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v24 -; VI-NEXT: v_or_b32_sdwa v9, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57 -; VI-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v8, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v11, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31 +; VI-NEXT: v_or_b32_sdwa v3, v46, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v26, v24 +; VI-NEXT: v_add_u32_e32 v24, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v38 +; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v27, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30 +; VI-NEXT: v_or_b32_sdwa v9, v25, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v23, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v8, v21, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v18 -; VI-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v7, v21, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v36 -; VI-NEXT: v_or_b32_sdwa v6, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31 +; VI-NEXT: v_or_b32_sdwa v7, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v48 ; VI-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v6, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v20 +; VI-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v17, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37 -; VI-NEXT: v_or_b32_sdwa v5, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35 -; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v5, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v34 ; VI-NEXT: s_add_i32 s28, s28, 3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_add_i32 s26, s26, 3 @@ -69961,12 +69926,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_and_b32 s10, s16, 0xff ; VI-NEXT: s_lshl_b32 s11, s17, 8 ; VI-NEXT: s_or_b32 s10, s11, s10 -; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_addk_i32 s8, 0x300 ; VI-NEXT: s_addk_i32 s10, 0x300 -; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_lshl_b32 s5, s5, 16 @@ -69976,7 +69940,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_and_b32 s8, s8, 0xffff ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_or_b32 s9, s9, s10 ; VI-NEXT: s_or_b32 s7, s7, s8 ; VI-NEXT: s_or_b32 s5, s5, s6 @@ -69988,10 +69952,10 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v8, v8, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v9, v9, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v10, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v11, v11, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v12, v12, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v9, v9, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v10, v10, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v11, v11, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v12, v12, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 @@ -70001,12 +69965,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(1) -; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v2 @@ -70014,11 +69977,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: v_mov_b32_e32 v0, s9 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s5 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v32 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37 ; VI-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v3, s4, v3 ; VI-NEXT: v_or_b32_sdwa v4, v4, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 @@ -70044,22 +70008,8 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB99_4: -; VI-NEXT: v_mov_b32_e32 v25, v23 -; VI-NEXT: v_mov_b32_e32 v23, v26 -; VI-NEXT: v_mov_b32_e32 v26, v30 -; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v48, v51 -; VI-NEXT: v_mov_b32_e32 v31, v10 -; VI-NEXT: v_mov_b32_e32 v36, v34 -; VI-NEXT: v_mov_b32_e32 v35, v6 -; VI-NEXT: v_mov_b32_e32 v37, v8 -; VI-NEXT: v_mov_b32_e32 v39, v14 -; VI-NEXT: v_mov_b32_e32 v21, v15 -; VI-NEXT: v_mov_b32_e32 v19, v13 -; VI-NEXT: v_mov_b32_e32 v17, v11 -; VI-NEXT: v_mov_b32_e32 v40, v9 -; VI-NEXT: v_mov_b32_e32 v49, v7 -; VI-NEXT: v_mov_b32_e32 v20, v5 +; VI-NEXT: v_mov_b32_e32 v24, v36 +; VI-NEXT: v_mov_b32_e32 v28, v26 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB99_2 ; @@ -70082,244 +70032,228 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v34, v30 -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:8 +; GFX9-NEXT: v_mov_b32_e32 v48, v30 +; GFX9-NEXT: v_mov_b32_e32 v33, v28 +; GFX9-NEXT: v_mov_b32_e32 v37, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v32, v22 +; GFX9-NEXT: v_mov_b32_e32 v30, v20 +; GFX9-NEXT: v_mov_b32_e32 v49, v14 +; GFX9-NEXT: v_mov_b32_e32 v22, v12 +; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_mov_b32_e32 v35, v8 +; GFX9-NEXT: v_mov_b32_e32 v20, v6 +; GFX9-NEXT: v_mov_b32_e32 v28, v4 +; GFX9-NEXT: v_mov_b32_e32 v26, v2 +; GFX9-NEXT: v_mov_b32_e32 v24, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 ; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 ; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 ; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 ; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 ; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:56 ; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:64 ; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:72 ; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_mov_b32_e32 v51, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v50, 8, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v7 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v17 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v19 -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v51 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v53, 8, v29 -; GFX9-NEXT: s_waitcnt vmcnt(19) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v36 -; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v32 -; GFX9-NEXT: s_waitcnt vmcnt(17) -; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v38 +; GFX9-NEXT: v_lshlrev_b32_e32 v54, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v53, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v55, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: s_waitcnt vmcnt(21) +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(20) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v4 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(13) -; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v31 -; GFX9-NEXT: s_waitcnt vmcnt(11) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v33 -; GFX9-NEXT: s_waitcnt vmcnt(9) -; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v37 -; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v39 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v48 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v49 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(18) +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(16) +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(12) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(8) +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v36 +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v38 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v31 ; GFX9-NEXT: s_cbranch_scc0 .LBB99_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v24, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff ; GFX9-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v50, v3 -; GFX9-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v28, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v20, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v35, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v31, v5 ; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v55 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v18, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v32, v16 +; GFX9-NEXT: v_or_b32_sdwa v1, v30, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: v_mov_b32_e32 v16, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v24 -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v17, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v48, v10 +; GFX9-NEXT: v_or_b32_sdwa v1, v33, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s5, s18, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 -; GFX9-NEXT: v_mov_b32_e32 v55, v11 ; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s5, s5, s6 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s5, s20, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s21, 8 -; GFX9-NEXT: v_mov_b32_e32 v33, v12 ; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s5, s5, s6 ; GFX9-NEXT: s_and_b32 s6, s22, 0xff ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v43, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v19, v13 ; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6 ; GFX9-NEXT: s_and_b32 s6, s24, 0xff ; GFX9-NEXT: s_lshl_b32 s7, s25, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v45, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s6, s6, s7 ; GFX9-NEXT: s_and_b32 s7, s26, 0xff ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: v_mov_b32_e32 v29, v14 +; GFX9-NEXT: v_or_b32_sdwa v2, v26, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s7, s7, s8 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7 ; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2 -; GFX9-NEXT: v_mov_b32_e32 v42, v15 -; GFX9-NEXT: v_mov_b32_e32 v27, v25 -; GFX9-NEXT: v_mov_b32_e32 v30, v18 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v49, v20 -; GFX9-NEXT: v_mov_b32_e32 v39, v26 -; GFX9-NEXT: v_mov_b32_e32 v35, v28 -; GFX9-NEXT: v_mov_b32_e32 v54, v31 -; GFX9-NEXT: v_mov_b32_e32 v31, v51 -; GFX9-NEXT: v_mov_b32_e32 v2, s6 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v18, v22 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 -; GFX9-NEXT: v_mov_b32_e32 v20, v24 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB99_3 ; GFX9-NEXT: .LBB99_2: ; %cmp.true ; GFX9-NEXT: v_add_u32_e32 v3, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v3, v31, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v14, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v44 -; GFX9-NEXT: v_or_b32_sdwa v3, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v13, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v15, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v41 -; GFX9-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v12, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v40 -; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v34 -; GFX9-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v48 +; GFX9-NEXT: v_or_b32_sdwa v3, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v11, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v39 -; GFX9-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v29, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v37 +; GFX9-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v10, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v33 +; GFX9-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v16 -; GFX9-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v32 +; GFX9-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v9, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v37 -; GFX9-NEXT: v_or_b32_sdwa v3, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v3, v21, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v30 -; GFX9-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v8, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v49 -; GFX9-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v29 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v18 ; GFX9-NEXT: v_or_b32_sdwa v3, v42, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v7, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v8, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v30 ; GFX9-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v48 +; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v49 ; GFX9-NEXT: v_or_b32_sdwa v3, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v33 +; GFX9-NEXT: v_add_u32_e32 v7, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v16 ; GFX9-NEXT: v_or_b32_sdwa v3, v17, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v3, v54, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v22 +; GFX9-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v17, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v57 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v20 +; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v5, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v3, v31, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v26 +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v4, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v28 ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 @@ -70344,13 +70278,18 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_and_b32 s9, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s10, s17, 8 ; GFX9-NEXT: s_add_i32 s18, s18, 3 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v57 ; GFX9-NEXT: v_add_u32_e32 v2, 3, v46 ; GFX9-NEXT: s_or_b32 s9, s10, s9 ; GFX9-NEXT: s_and_b32 s10, s18, 0xff ; GFX9-NEXT: s_lshl_b32 s11, s19, 8 -; GFX9-NEXT: v_or_b32_sdwa v2, v54, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_or_b32 s10, s11, s10 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: s_addk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s5, 0x300 @@ -70359,69 +70298,48 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_addk_i32 s9, 0x300 ; GFX9-NEXT: s_addk_i32 s10, 0x300 -; GFX9-NEXT: v_mov_b32_e32 v22, 0xffff ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10 ; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8 ; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; GFX9-NEXT: v_and_b32_e32 v22, s4, v22 +; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; GFX9-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; GFX9-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; GFX9-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10 ; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v5, v19, 16, v5 ; GFX9-NEXT: v_lshl_or_b32 v6, v17, 16, v6 -; GFX9-NEXT: v_lshl_or_b32 v7, v23, 16, v7 -; GFX9-NEXT: v_lshl_or_b32 v8, v16, 16, v8 +; GFX9-NEXT: v_lshl_or_b32 v7, v16, 16, v7 +; GFX9-NEXT: v_lshl_or_b32 v8, v18, 16, v8 ; GFX9-NEXT: v_lshl_or_b32 v9, v21, 16, v9 ; GFX9-NEXT: v_lshl_or_b32 v10, v25, 16, v10 -; GFX9-NEXT: v_lshl_or_b32 v11, v24, 16, v11 -; GFX9-NEXT: v_lshl_or_b32 v12, v36, 16, v12 +; GFX9-NEXT: v_lshl_or_b32 v11, v29, 16, v11 +; GFX9-NEXT: v_lshl_or_b32 v12, v24, 16, v12 ; GFX9-NEXT: v_lshl_or_b32 v13, v15, 16, v13 ; GFX9-NEXT: v_lshl_or_b32 v14, v14, 16, v2 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v3 +; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v4, v20, 16, v4 ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s9 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: v_mov_b32_e32 v2, s5 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v5, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshl_or_b32 v5, v19, 16, v5 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v4, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshl_or_b32 v4, v20, 16, v4 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: v_or_b32_sdwa v3, v18, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v22, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v22, 0xffff ; GFX9-NEXT: v_add_u32_e32 v3, 0x300, v3 +; GFX9-NEXT: v_and_b32_e32 v22, s4, v22 ; GFX9-NEXT: v_lshl_or_b32 v3, v3, 16, v22 ; GFX9-NEXT: .LBB99_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload @@ -70443,27 +70361,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB99_4: -; GFX9-NEXT: v_mov_b32_e32 v30, v18 -; GFX9-NEXT: v_mov_b32_e32 v49, v20 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v54, v31 -; GFX9-NEXT: v_mov_b32_e32 v29, v14 -; GFX9-NEXT: v_mov_b32_e32 v48, v10 -; GFX9-NEXT: v_mov_b32_e32 v39, v26 -; GFX9-NEXT: v_mov_b32_e32 v32, v16 -; GFX9-NEXT: v_mov_b32_e32 v16, v22 -; GFX9-NEXT: v_mov_b32_e32 v33, v12 -; GFX9-NEXT: v_mov_b32_e32 v35, v28 -; GFX9-NEXT: v_mov_b32_e32 v37, v24 -; GFX9-NEXT: v_mov_b32_e32 v31, v51 -; GFX9-NEXT: v_mov_b32_e32 v27, v25 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v42, v15 -; GFX9-NEXT: v_mov_b32_e32 v19, v13 -; GFX9-NEXT: v_mov_b32_e32 v55, v11 -; GFX9-NEXT: v_mov_b32_e32 v17, v9 -; GFX9-NEXT: v_mov_b32_e32 v50, v3 +; GFX9-NEXT: v_mov_b32_e32 v31, v5 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB99_2 ; @@ -76984,716 +76882,611 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32 ; SI-LABEL: bitcast_v32f16_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v22, s17 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v6 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v62, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v9, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v11, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v42, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s24 ; SI-NEXT: v_cvt_f16_f32_e32 v5, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v13, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v17, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s28 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 +; SI-NEXT: v_writelane_b32 v40, s36, 4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_writelane_b32 v40, s37, 5 ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22 -; SI-NEXT: v_or_b32_e32 v37, v10, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v7 -; SI-NEXT: v_or_b32_e32 v32, v9, v8 -; SI-NEXT: v_alignbit_b32 v8, v32, v37, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v32, v37, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v32, v37, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v13 -; SI-NEXT: v_or_b32_e32 v24, v12, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v6 -; SI-NEXT: v_or_b32_e32 v23, v11, v8 -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v46 -; SI-NEXT: v_or_b32_e32 v18, v42, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v5 -; SI-NEXT: v_or_b32_e32 v19, v14, v8 -; SI-NEXT: v_alignbit_b32 v8, v19, v18, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v18, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v18, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v26 -; SI-NEXT: v_or_b32_e32 v16, v25, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v20 -; SI-NEXT: v_or_b32_e32 v17, v28, v8 -; SI-NEXT: v_alignbit_b32 v8, v17, v16, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v17, v16, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v17, v16, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v27 -; SI-NEXT: v_or_b32_e32 v15, v21, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4 -; SI-NEXT: v_or_b32_e32 v14, v62, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35 -; SI-NEXT: v_or_b32_e32 v12, v34, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v3 -; SI-NEXT: v_or_b32_e32 v13, v30, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v53 -; SI-NEXT: v_or_b32_e32 v10, v50, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v2 -; SI-NEXT: v_or_b32_e32 v11, v48, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41 -; SI-NEXT: v_or_b32_e32 v9, v40, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v1 -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 24 -; SI-NEXT: v_or_b32_e32 v8, v55, v8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v9, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v9, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v9, 8 -; SI-NEXT: v_alignbit_b32 v57, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v58, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v61, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v44, v13, v12, 24 -; SI-NEXT: v_alignbit_b32 v47, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v56, v13, v12, 8 -; SI-NEXT: v_alignbit_b32 v43, v11, v10, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v32 -; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v23 -; SI-NEXT: v_lshrrev_b32_e32 v36, 8, v19 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v14 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v8 -; SI-NEXT: v_bfe_u32 v54, v7, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v6, 8, 8 -; SI-NEXT: v_bfe_u32 v49, v5, 8, 8 -; SI-NEXT: v_bfe_u32 v38, v20, 8, 8 -; SI-NEXT: v_bfe_u32 v33, v4, 8, 8 -; SI-NEXT: v_bfe_u32 v29, v3, 8, 8 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v22, v2, 8, 8 -; SI-NEXT: v_bfe_u32 v60, v1, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s4, v21 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v9 +; SI-NEXT: s_or_b32 s18, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v8 +; SI-NEXT: s_or_b32 s19, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v12 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_or_b32 s16, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v6 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v10 +; SI-NEXT: s_or_b32 s17, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: s_or_b32 s14, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v13 +; SI-NEXT: s_or_b32 s15, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: s_or_b32 s12, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v20 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v22 +; SI-NEXT: s_or_b32 s13, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v25 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v24 +; SI-NEXT: s_or_b32 s10, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v4 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v23 +; SI-NEXT: s_or_b32 s11, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v28 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v27 +; SI-NEXT: s_or_b32 s8, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v26 +; SI-NEXT: s_or_b32 s9, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v31 +; SI-NEXT: s_or_b32 s6, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v29 +; SI-NEXT: s_or_b32 s7, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v38 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v37 +; SI-NEXT: s_lshr_b64 s[20:21], s[18:19], 24 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_readfirstlane_b32 s21, v35 +; SI-NEXT: s_lshr_b64 s[22:23], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[14:15], 16 +; SI-NEXT: s_or_b32 s5, s21, s5 +; SI-NEXT: s_lshr_b64 s[56:57], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s45, s19, 8 +; SI-NEXT: s_lshr_b32 s43, s17, 8 +; SI-NEXT: s_lshr_b32 s41, s15, 8 +; SI-NEXT: s_lshr_b32 s29, s13, 8 +; SI-NEXT: s_lshr_b32 s27, s11, 8 +; SI-NEXT: s_lshr_b32 s25, s9, 8 +; SI-NEXT: s_lshr_b32 s23, s7, 8 +; SI-NEXT: s_lshr_b32 s21, s5, 8 +; SI-NEXT: v_bfe_u32 v48, v7, 8, 8 +; SI-NEXT: v_bfe_u32 v39, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v36, v5, 8, 8 +; SI-NEXT: v_bfe_u32 v34, v20, 8, 8 +; SI-NEXT: v_bfe_u32 v32, v4, 8, 8 +; SI-NEXT: v_bfe_u32 v30, v3, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v18, v1, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v18, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v41 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_cvt_f32_f16_e32 v10, v55 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_readfirstlane_b32 s4, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v35 +; SI-NEXT: v_readfirstlane_b32 s5, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v9, v9, v8 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v11, v53 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v1 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v48 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v13, v35 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s6, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v31 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readfirstlane_b32 s6, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v28 +; SI-NEXT: v_readfirstlane_b32 s7, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v29 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v34 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v2 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readfirstlane_b32 s8, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v27 +; SI-NEXT: v_readfirstlane_b32 s7, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v30 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readfirstlane_b32 s8, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v25 +; SI-NEXT: v_readfirstlane_b32 s9, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v26 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v21 -; SI-NEXT: v_or_b32_e32 v12, v14, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v27 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v13, v13, v15 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v62 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_readfirstlane_b32 s10, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readfirstlane_b32 s9, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v15, v15, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v26 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v4 -; SI-NEXT: v_or_b32_e32 v14, v16, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v46 +; SI-NEXT: v_readfirstlane_b32 s11, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v23 +; SI-NEXT: s_lshl_b32 s9, s9, 16 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_readfirstlane_b32 s10, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: s_lshl_b32 s10, s10, 16 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v42 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readfirstlane_b32 s11, v4 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: v_readfirstlane_b32 s12, v18 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_readfirstlane_b32 s12, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v20 +; SI-NEXT: v_readfirstlane_b32 s13, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v17 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readfirstlane_b32 s13, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v5 +; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: v_readfirstlane_b32 s14, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_readfirstlane_b32 s17, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v21 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readfirstlane_b32 s14, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: v_readfirstlane_b32 s15, v14 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v21, v18 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readfirstlane_b32 s15, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: v_readfirstlane_b32 s16, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readfirstlane_b32 s16, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readfirstlane_b32 s17, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_alignbit_b32 v57, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v58, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v61, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v44, v13, v12, 24 -; SI-NEXT: v_alignbit_b32 v47, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v56, v13, v12, 8 -; SI-NEXT: v_alignbit_b32 v43, v11, v10, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v14 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v8 -; SI-NEXT: v_bfe_u32 v54, v7, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v6, 8, 8 -; SI-NEXT: v_bfe_u32 v49, v5, 8, 8 -; SI-NEXT: v_bfe_u32 v38, v20, 8, 8 -; SI-NEXT: v_bfe_u32 v33, v4, 8, 8 -; SI-NEXT: v_bfe_u32 v29, v3, 8, 8 -; SI-NEXT: v_bfe_u32 v60, v1, 8, 8 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v22 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_lshrrev_b32_e32 v36, 8, v19 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v24, v22, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v23, v23, v22 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v23 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_or_b32_e32 v37, v22, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v7 -; SI-NEXT: v_or_b32_e32 v32, v25, v21 -; SI-NEXT: v_alignbit_b32 v21, v32, v37, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v32, v37, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v32, v37, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v23, v24, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v23, v24, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v23, v24, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v19, v18, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v19, v18, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v19, v18, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v17, v16, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v17, v16, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v17, v16, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v11, v10, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v11, v10, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v8, v9, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v8, v9, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v8, v9, 8 -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v32 -; SI-NEXT: v_bfe_u32 v22, v2, 8, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: v_readfirstlane_b32 s18, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readfirstlane_b32 s18, v11 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: v_readfirstlane_b32 s19, v9 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: v_readfirstlane_b32 s19, v7 +; SI-NEXT: s_lshl_b32 s19, s19, 16 +; SI-NEXT: v_readfirstlane_b32 s20, v8 +; SI-NEXT: s_or_b32 s19, s20, s19 +; SI-NEXT: s_lshr_b64 s[20:21], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[22:23], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s45, s19, 8 +; SI-NEXT: s_lshr_b32 s43, s17, 8 +; SI-NEXT: s_lshr_b32 s41, s15, 8 +; SI-NEXT: s_lshr_b32 s29, s13, 8 +; SI-NEXT: s_lshr_b32 s27, s11, 8 +; SI-NEXT: s_lshr_b32 s25, s9, 8 +; SI-NEXT: s_lshr_b32 s23, s7, 8 +; SI-NEXT: s_lshr_b32 s21, s5, 8 +; SI-NEXT: v_bfe_u32 v48, v7, 8, 8 +; SI-NEXT: v_bfe_u32 v39, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v36, v5, 8, 8 +; SI-NEXT: v_bfe_u32 v34, v20, 8, 8 +; SI-NEXT: v_bfe_u32 v32, v4, 8, 8 +; SI-NEXT: v_bfe_u32 v30, v3, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v18, v1, 8, 8 ; SI-NEXT: .LBB105_3: ; %end -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v37 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_lshl_b32 s26, s26, 8 +; SI-NEXT: s_and_b32 s22, s22, 0xff +; SI-NEXT: s_or_b32 s18, s18, s26 +; SI-NEXT: s_lshl_b32 s22, s22, 16 +; SI-NEXT: s_lshl_b32 s20, s20, 24 +; SI-NEXT: s_and_b32 s18, s18, 0xffff +; SI-NEXT: s_or_b32 s20, s20, s22 +; SI-NEXT: s_or_b32 s18, s18, s20 +; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: s_and_b32 s18, s19, 0xff +; SI-NEXT: s_lshl_b32 s19, s45, 8 ; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s18, s18, s19 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 -; SI-NEXT: v_or_b32_e32 v21, v21, v25 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_or_b32_e32 v25, v26, v25 -; SI-NEXT: v_or_b32_e32 v21, v21, v25 -; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v52 -; SI-NEXT: v_or_b32_e32 v21, v21, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v54 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v7, v25, v7 -; SI-NEXT: v_or_b32_e32 v7, v21, v7 -; SI-NEXT: v_add_i32_e32 v21, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v7, v21, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v48 +; SI-NEXT: s_and_b32 s18, s18, 0xffff +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v7, s18, v7 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s18, s42, 8 +; SI-NEXT: s_or_b32 s16, s16, s18 +; SI-NEXT: s_and_b32 s18, s28, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s24, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: v_add_i32_e32 v8, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s18 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v24 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v7, v7, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v21, v24, v21 -; SI-NEXT: v_or_b32_e32 v7, v7, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v7, v21, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v39 -; SI-NEXT: v_or_b32_e32 v7, v7, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v51 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v6, v21, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v8, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s43, 8 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v39 +; SI-NEXT: s_and_b32 s16, s16, 0xffff ; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v6, s16, v6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s40, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 ; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v18, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v36 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v49 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 +; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v7, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: s_lshl_b32 s15, s41, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v36 +; SI-NEXT: s_and_b32 s14, s14, 0xffff ; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s14, v5 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s14, s62, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: s_and_b32 s14, s58, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s46, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 ; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 +; SI-NEXT: v_mov_b32_e32 v6, s12 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s29, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v20 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v31 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v38 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v34 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s12, v5 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s76, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s72, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s60, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 ; SI-NEXT: v_add_i32_e32 v6, vcc, 28, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v61 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v58 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v57 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v28 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v33 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_add_i32_e32 v5, vcc, 32, v0 +; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s27, 8 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v32 +; SI-NEXT: s_and_b32 s10, s10, 0xffff ; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v4, s10, v4 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s90, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s78, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s74, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v56 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v44 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v63 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v29 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s25, 8 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v30 +; SI-NEXT: s_and_b32 s8, s8, 0xffff ; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v3, s8, v3 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s94, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s92, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s88, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 ; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 ; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v43 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v59 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v22 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v2, v4, v2 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: v_mov_b32_e32 v4, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s23, 8 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v19 +; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_or_b32_e32 v2, s6, v2 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s36, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s34, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s30, 24 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v9 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 ; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v45 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v60 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v18 +; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr29 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr21 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: s_branch .LBB105_2 ; ; VI-LABEL: bitcast_v32f16_to_v64i8_scalar: @@ -81898,139 +81691,126 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v55, v20 -; VI-NEXT: v_mov_b32_e32 v53, v14 -; VI-NEXT: v_mov_b32_e32 v34, v12 -; VI-NEXT: v_mov_b32_e32 v32, v0 +; VI-NEXT: v_mov_b32_e32 v31, v30 +; VI-NEXT: v_mov_b32_e32 v38, v28 +; VI-NEXT: v_mov_b32_e32 v32, v26 +; VI-NEXT: v_mov_b32_e32 v30, v24 +; VI-NEXT: v_mov_b32_e32 v26, v22 +; VI-NEXT: v_mov_b32_e32 v49, v20 +; VI-NEXT: v_mov_b32_e32 v48, v14 +; VI-NEXT: v_mov_b32_e32 v39, v12 +; VI-NEXT: v_mov_b32_e32 v20, v10 +; VI-NEXT: v_mov_b32_e32 v35, v8 +; VI-NEXT: v_mov_b32_e32 v34, v6 +; VI-NEXT: v_mov_b32_e32 v36, v4 +; VI-NEXT: v_mov_b32_e32 v37, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 ; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 ; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 ; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 ; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 ; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:36 ; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 ; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:56 ; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:64 ; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 ; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:68 -; VI-NEXT: v_mov_b32_e32 v51, v23 -; VI-NEXT: v_mov_b32_e32 v30, v26 -; VI-NEXT: v_mov_b32_e32 v26, v22 ; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 ; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5 ; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v17 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v19 -; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v21 -; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v51 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v53, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v55, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v40, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v27 ; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v31 -; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v33 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v4 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v35 -; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v20 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v6 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v8 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v10 ; VI-NEXT: s_waitcnt vmcnt(13) -; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v12 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v12 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v14 ; VI-NEXT: s_waitcnt vmcnt(9) -; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v39 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v22 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v48 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v33 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v49 +; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v24 ; VI-NEXT: s_cbranch_scc0 .LBB107_4 ; VI-NEXT: ; %bb.1: ; %cmp.false ; VI-NEXT: v_or_b32_sdwa v0, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v34 -; VI-NEXT: v_or_b32_sdwa v1, v34, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v0, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v49, v7 -; VI-NEXT: v_or_b32_sdwa v3, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v48, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v18, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v55, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v37, v8 +; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v9 +; VI-NEXT: v_or_b32_sdwa v0, v26, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v30, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v28, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v31, v10 +; VI-NEXT: v_or_b32_sdwa v0, v32, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v38, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v17, v11 -; VI-NEXT: v_mov_b32_e32 v19, v13 -; VI-NEXT: s_and_b32 s4, s28, 0xff -; VI-NEXT: s_lshl_b32 s5, s29, 8 -; VI-NEXT: v_mov_b32_e32 v39, v14 -; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v21, v15 -; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_mov_b32_e32 v20, v5 -; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: s_lshl_b32 s5, s17, 8 -; VI-NEXT: s_lshl_b32 s6, s19, 8 -; VI-NEXT: s_lshl_b32 s7, s23, 8 -; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: v_mov_b32_e32 v25, v23 -; VI-NEXT: v_mov_b32_e32 v48, v51 -; VI-NEXT: v_mov_b32_e32 v23, v26 -; VI-NEXT: v_mov_b32_e32 v26, v30 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v34, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v31, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v54, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v41, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v42, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v43, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v45, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v44, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v45, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_and_b32 s4, s28, 0xff +; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v47, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v47, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) ; VI-NEXT: v_or_b32_sdwa v1, v57, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_or_b32 s4, s4, s5 +; VI-NEXT: v_or_b32_sdwa v2, v34, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v35, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v32, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_and_b32 s4, s4, 0xffff +; VI-NEXT: v_or_b32_sdwa v0, v37, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff +; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 ; VI-NEXT: s_and_b32 s5, s18, 0xff +; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_or_b32 s5, s5, s6 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 @@ -82039,6 +81819,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_lshl_b32 s6, s21, 8 ; VI-NEXT: s_or_b32 s5, s5, s6 ; VI-NEXT: s_and_b32 s6, s22, 0xff +; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 @@ -82047,70 +81828,75 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_lshl_b32 s7, s25, 8 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_and_b32 s7, s26, 0xff +; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_or_b32 s7, s7, s8 ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 +; VI-NEXT: v_mov_b32_e32 v24, v36 +; VI-NEXT: v_mov_b32_e32 v28, v26 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: v_mov_b32_e32 v30, v34 ; VI-NEXT: s_cbranch_execnz .LBB107_3 ; VI-NEXT: .LBB107_2: ; %cmp.true +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44 -; VI-NEXT: v_or_b32_sdwa v3, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v43 -; VI-NEXT: v_or_b32_sdwa v13, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v13, v61, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42 -; VI-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v41 -; VI-NEXT: v_or_b32_sdwa v12, v62, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v12, v59, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54 -; VI-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 -; VI-NEXT: v_or_b32_sdwa v11, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v30, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28 -; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26 -; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v24 -; VI-NEXT: v_or_b32_sdwa v9, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57 -; VI-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v8, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v11, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31 +; VI-NEXT: v_or_b32_sdwa v3, v46, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v26, v24 +; VI-NEXT: v_add_u32_e32 v24, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v38 +; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v27, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30 +; VI-NEXT: v_or_b32_sdwa v9, v25, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v23, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v8, v21, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v18 -; VI-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v7, v21, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v36 -; VI-NEXT: v_or_b32_sdwa v6, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31 +; VI-NEXT: v_or_b32_sdwa v7, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v48 ; VI-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v6, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v20 +; VI-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v17, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37 -; VI-NEXT: v_or_b32_sdwa v5, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35 -; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v5, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v34 ; VI-NEXT: s_add_i32 s28, s28, 3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_add_i32 s26, s26, 3 @@ -82138,12 +81924,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_and_b32 s10, s16, 0xff ; VI-NEXT: s_lshl_b32 s11, s17, 8 ; VI-NEXT: s_or_b32 s10, s11, s10 -; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_addk_i32 s8, 0x300 ; VI-NEXT: s_addk_i32 s10, 0x300 -; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_lshl_b32 s5, s5, 16 @@ -82153,7 +81938,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_and_b32 s8, s8, 0xffff ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_or_b32 s9, s9, s10 ; VI-NEXT: s_or_b32 s7, s7, s8 ; VI-NEXT: s_or_b32 s5, s5, s6 @@ -82165,10 +81950,10 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v8, v8, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v9, v9, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v10, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v11, v11, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v12, v12, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v9, v9, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v10, v10, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v11, v11, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v12, v12, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 @@ -82178,12 +81963,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v2 @@ -82191,11 +81975,12 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: v_mov_b32_e32 v0, s9 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s5 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v32 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37 ; VI-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v3, s4, v3 ; VI-NEXT: v_or_b32_sdwa v4, v4, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 @@ -82221,22 +82006,8 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB107_4: -; VI-NEXT: v_mov_b32_e32 v25, v23 -; VI-NEXT: v_mov_b32_e32 v23, v26 -; VI-NEXT: v_mov_b32_e32 v26, v30 -; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v48, v51 -; VI-NEXT: v_mov_b32_e32 v31, v10 -; VI-NEXT: v_mov_b32_e32 v36, v34 -; VI-NEXT: v_mov_b32_e32 v35, v6 -; VI-NEXT: v_mov_b32_e32 v37, v8 -; VI-NEXT: v_mov_b32_e32 v39, v14 -; VI-NEXT: v_mov_b32_e32 v21, v15 -; VI-NEXT: v_mov_b32_e32 v19, v13 -; VI-NEXT: v_mov_b32_e32 v17, v11 -; VI-NEXT: v_mov_b32_e32 v40, v9 -; VI-NEXT: v_mov_b32_e32 v49, v7 -; VI-NEXT: v_mov_b32_e32 v20, v5 +; VI-NEXT: v_mov_b32_e32 v24, v36 +; VI-NEXT: v_mov_b32_e32 v28, v26 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB107_2 ; @@ -82259,244 +82030,228 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v34, v30 -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:8 +; GFX9-NEXT: v_mov_b32_e32 v48, v30 +; GFX9-NEXT: v_mov_b32_e32 v33, v28 +; GFX9-NEXT: v_mov_b32_e32 v37, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v32, v22 +; GFX9-NEXT: v_mov_b32_e32 v30, v20 +; GFX9-NEXT: v_mov_b32_e32 v49, v14 +; GFX9-NEXT: v_mov_b32_e32 v22, v12 +; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_mov_b32_e32 v35, v8 +; GFX9-NEXT: v_mov_b32_e32 v20, v6 +; GFX9-NEXT: v_mov_b32_e32 v28, v4 +; GFX9-NEXT: v_mov_b32_e32 v26, v2 +; GFX9-NEXT: v_mov_b32_e32 v24, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 ; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 ; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 ; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 ; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 ; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:56 ; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:64 ; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:72 ; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_mov_b32_e32 v51, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v50, 8, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v7 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v17 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v19 -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v51 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v53, 8, v29 -; GFX9-NEXT: s_waitcnt vmcnt(19) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v36 -; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v32 -; GFX9-NEXT: s_waitcnt vmcnt(17) -; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v38 +; GFX9-NEXT: v_lshlrev_b32_e32 v54, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v53, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v55, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: s_waitcnt vmcnt(21) +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(20) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v4 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(13) -; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v31 -; GFX9-NEXT: s_waitcnt vmcnt(11) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v33 -; GFX9-NEXT: s_waitcnt vmcnt(9) -; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v37 -; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v39 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v48 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v49 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(18) +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(16) +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(12) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(8) +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v36 +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v38 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v31 ; GFX9-NEXT: s_cbranch_scc0 .LBB107_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v24, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff ; GFX9-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v50, v3 -; GFX9-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v28, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v20, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v35, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v31, v5 ; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v55 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v18, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v32, v16 +; GFX9-NEXT: v_or_b32_sdwa v1, v30, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: v_mov_b32_e32 v16, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v24 -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v17, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v48, v10 +; GFX9-NEXT: v_or_b32_sdwa v1, v33, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s5, s18, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 -; GFX9-NEXT: v_mov_b32_e32 v55, v11 ; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s5, s5, s6 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s5, s20, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s21, 8 -; GFX9-NEXT: v_mov_b32_e32 v33, v12 ; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s5, s5, s6 ; GFX9-NEXT: s_and_b32 s6, s22, 0xff ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v43, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v19, v13 ; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6 ; GFX9-NEXT: s_and_b32 s6, s24, 0xff ; GFX9-NEXT: s_lshl_b32 s7, s25, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v45, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s6, s6, s7 ; GFX9-NEXT: s_and_b32 s7, s26, 0xff ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: v_mov_b32_e32 v29, v14 +; GFX9-NEXT: v_or_b32_sdwa v2, v26, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s7, s7, s8 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7 ; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2 -; GFX9-NEXT: v_mov_b32_e32 v42, v15 -; GFX9-NEXT: v_mov_b32_e32 v27, v25 -; GFX9-NEXT: v_mov_b32_e32 v30, v18 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v49, v20 -; GFX9-NEXT: v_mov_b32_e32 v39, v26 -; GFX9-NEXT: v_mov_b32_e32 v35, v28 -; GFX9-NEXT: v_mov_b32_e32 v54, v31 -; GFX9-NEXT: v_mov_b32_e32 v31, v51 -; GFX9-NEXT: v_mov_b32_e32 v2, s6 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v18, v22 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 -; GFX9-NEXT: v_mov_b32_e32 v20, v24 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB107_3 ; GFX9-NEXT: .LBB107_2: ; %cmp.true ; GFX9-NEXT: v_add_u32_e32 v3, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v3, v31, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v14, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v44 -; GFX9-NEXT: v_or_b32_sdwa v3, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v13, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v15, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v41 -; GFX9-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v12, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v40 -; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v34 -; GFX9-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v48 +; GFX9-NEXT: v_or_b32_sdwa v3, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v11, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v39 -; GFX9-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v29, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v37 +; GFX9-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v10, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v33 +; GFX9-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v16 -; GFX9-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v32 +; GFX9-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v9, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v37 -; GFX9-NEXT: v_or_b32_sdwa v3, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v3, v21, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v30 -; GFX9-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v8, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v49 -; GFX9-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v29 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v18 ; GFX9-NEXT: v_or_b32_sdwa v3, v42, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v7, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v8, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v30 ; GFX9-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v48 +; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v49 ; GFX9-NEXT: v_or_b32_sdwa v3, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v33 +; GFX9-NEXT: v_add_u32_e32 v7, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v16 ; GFX9-NEXT: v_or_b32_sdwa v3, v17, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v3, v54, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v22 +; GFX9-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v17, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v57 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v20 +; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v5, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v3, v31, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v26 +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v4, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v28 ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 @@ -82521,13 +82276,18 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_and_b32 s9, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s10, s17, 8 ; GFX9-NEXT: s_add_i32 s18, s18, 3 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v57 ; GFX9-NEXT: v_add_u32_e32 v2, 3, v46 ; GFX9-NEXT: s_or_b32 s9, s10, s9 ; GFX9-NEXT: s_and_b32 s10, s18, 0xff ; GFX9-NEXT: s_lshl_b32 s11, s19, 8 -; GFX9-NEXT: v_or_b32_sdwa v2, v54, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_or_b32 s10, s11, s10 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: s_addk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s5, 0x300 @@ -82536,69 +82296,48 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_addk_i32 s9, 0x300 ; GFX9-NEXT: s_addk_i32 s10, 0x300 -; GFX9-NEXT: v_mov_b32_e32 v22, 0xffff ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10 ; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8 ; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; GFX9-NEXT: v_and_b32_e32 v22, s4, v22 +; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; GFX9-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; GFX9-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; GFX9-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10 ; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v5, v19, 16, v5 ; GFX9-NEXT: v_lshl_or_b32 v6, v17, 16, v6 -; GFX9-NEXT: v_lshl_or_b32 v7, v23, 16, v7 -; GFX9-NEXT: v_lshl_or_b32 v8, v16, 16, v8 +; GFX9-NEXT: v_lshl_or_b32 v7, v16, 16, v7 +; GFX9-NEXT: v_lshl_or_b32 v8, v18, 16, v8 ; GFX9-NEXT: v_lshl_or_b32 v9, v21, 16, v9 ; GFX9-NEXT: v_lshl_or_b32 v10, v25, 16, v10 -; GFX9-NEXT: v_lshl_or_b32 v11, v24, 16, v11 -; GFX9-NEXT: v_lshl_or_b32 v12, v36, 16, v12 +; GFX9-NEXT: v_lshl_or_b32 v11, v29, 16, v11 +; GFX9-NEXT: v_lshl_or_b32 v12, v24, 16, v12 ; GFX9-NEXT: v_lshl_or_b32 v13, v15, 16, v13 ; GFX9-NEXT: v_lshl_or_b32 v14, v14, 16, v2 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v3 +; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v4, v20, 16, v4 ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s9 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: v_mov_b32_e32 v2, s5 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v5, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshl_or_b32 v5, v19, 16, v5 -; GFX9-NEXT: s_waitcnt vmcnt(1) ; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v4, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshl_or_b32 v4, v20, 16, v4 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: v_or_b32_sdwa v3, v18, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v22, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v22, 0xffff ; GFX9-NEXT: v_add_u32_e32 v3, 0x300, v3 +; GFX9-NEXT: v_and_b32_e32 v22, s4, v22 ; GFX9-NEXT: v_lshl_or_b32 v3, v3, 16, v22 ; GFX9-NEXT: .LBB107_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload @@ -82620,27 +82359,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB107_4: -; GFX9-NEXT: v_mov_b32_e32 v30, v18 -; GFX9-NEXT: v_mov_b32_e32 v49, v20 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v54, v31 -; GFX9-NEXT: v_mov_b32_e32 v29, v14 -; GFX9-NEXT: v_mov_b32_e32 v48, v10 -; GFX9-NEXT: v_mov_b32_e32 v39, v26 -; GFX9-NEXT: v_mov_b32_e32 v32, v16 -; GFX9-NEXT: v_mov_b32_e32 v16, v22 -; GFX9-NEXT: v_mov_b32_e32 v33, v12 -; GFX9-NEXT: v_mov_b32_e32 v35, v28 -; GFX9-NEXT: v_mov_b32_e32 v37, v24 -; GFX9-NEXT: v_mov_b32_e32 v31, v51 -; GFX9-NEXT: v_mov_b32_e32 v27, v25 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v42, v15 -; GFX9-NEXT: v_mov_b32_e32 v19, v13 -; GFX9-NEXT: v_mov_b32_e32 v55, v11 -; GFX9-NEXT: v_mov_b32_e32 v17, v9 -; GFX9-NEXT: v_mov_b32_e32 v50, v3 +; GFX9-NEXT: v_mov_b32_e32 v31, v5 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB107_2 ; @@ -85353,13 +85072,13 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16 @@ -85367,20 +85086,20 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16 +; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16 ; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16 @@ -85400,18 +85119,18 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v16 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v15 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v14 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v14 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v14 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v13 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v12 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v11 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v11 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v10 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v9 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v8 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v8 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v7 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v7 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v6 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v5 @@ -85440,19 +85159,19 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v8.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v8.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v9.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v9.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v10.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v11.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v11.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v12.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v12.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v13.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v13.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v14.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v87.h, v14.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v14.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v15.l +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v15.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v16.l ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v16.h @@ -85626,29 +85345,29 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v68.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v2, v6, vcc_lo ; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22] ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[19:20] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] ; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v2, v3, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v2, v3, vcc_lo ; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v14 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v83.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 -; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v82, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v82.h +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18] +; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v80, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 @@ -85665,82 +85384,81 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v23 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v80.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24 ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v27, v2, v3 :: v_dual_add_f32 v2, 0x40c00000, v4 ; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v82.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v4, v5, vcc_lo -; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11-TRUE16-NEXT: v_add3_u32 v6, v8, v3, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h +; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v4, v5, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13 -; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff ; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v6, v7, vcc_lo -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v6, v7, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24 ; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28] -; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v96.h -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v87.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v3, v4, vcc_lo ; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16 ; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v33 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v33 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26] +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v33 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33 ; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28 -; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v27 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v23 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v112, v4, v6 :: v_dual_add_f32 v1, 0x40c00000, v5 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v15 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v112.h +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo -; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15 -; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32 -; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v27 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff +; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1 ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 ; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v7, v11, vcc_lo +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff +; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v7, v11, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v4, v8, vcc_lo ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v103.h ; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v2, v3, vcc_lo -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v114.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v113.h ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v38 ; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v38 @@ -85805,7 +85523,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v113.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v114.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v98.l ; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v27.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v6 @@ -85822,12 +85540,12 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v8.l, v8.h ; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v83.h +; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v100.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v30.l ; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v33.h ; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v8 -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v80.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v81.l ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v9.l, v9.h ; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h @@ -85841,14 +85559,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h ; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v97.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v87.l +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v96.l ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v69.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v10 ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v11.l, v11.h ; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v82.h +; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v80.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v11 @@ -85862,14 +85580,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) { ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v13.l, v13.h ; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v96.h -; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v81.l +; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v87.h +; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v83.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v13 ; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v14.l, v14.h ; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h -; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v114.h +; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v113.h ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v71.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v14 @@ -86513,598 +86231,499 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a, ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mul_f32_e64 v19, 1.0, s17 -; SI-NEXT: v_mul_f32_e32 v42, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v20, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v43, 1.0, v6 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v31, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v44, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v29, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v56, 1.0, v14 -; SI-NEXT: v_mul_f32_e32 v33, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v10 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v9 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v11 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v13 ; SI-NEXT: v_mul_f32_e32 v36, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v18 -; SI-NEXT: v_mul_f32_e32 v32, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v15 +; SI-NEXT: v_mul_f32_e32 v32, 1.0, v18 +; SI-NEXT: v_mul_f32_e32 v34, 1.0, v17 ; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v22, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s21 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v41, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v29, 1.0, s23 ; SI-NEXT: v_mul_f32_e64 v4, 1.0, s22 ; SI-NEXT: v_mul_f32_e64 v8, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v26, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v38, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v35, 1.0, s27 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s29 -; SI-NEXT: v_mul_f32_e64 v11, 1.0, s28 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e64 v53, 1.0, s29 +; SI-NEXT: v_mul_f32_e64 v55, 1.0, s28 +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v22 -; SI-NEXT: v_alignbit_b32 v27, v1, v3, 16 -; SI-NEXT: v_alignbit_b32 v30, v24, v2, 16 -; SI-NEXT: v_alignbit_b32 v1, v30, v27, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v30, v27, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v30, v27, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v23, v1, v3, 16 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v41 -; SI-NEXT: v_alignbit_b32 v21, v1, v6, 16 -; SI-NEXT: v_alignbit_b32 v19, v17, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v19, v21, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v19, v21, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v19, v21, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v20, v1, v6, 16 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v26 -; SI-NEXT: v_alignbit_b32 v15, v1, v9, 16 -; SI-NEXT: v_alignbit_b32 v16, v13, v7, 16 -; SI-NEXT: v_alignbit_b32 v1, v16, v15, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v15, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v15, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v42 -; SI-NEXT: v_alignbit_b32 v10, v1, v11, 16 -; SI-NEXT: v_alignbit_b32 v11, v9, v20, 16 -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v17, v1, v38, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v53 +; SI-NEXT: v_alignbit_b32 v14, v1, v55, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v50 +; SI-NEXT: v_alignbit_b32 v11, v1, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v29 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v25 -; SI-NEXT: v_alignbit_b32 v6, v1, v28, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v31 -; SI-NEXT: v_alignbit_b32 v3, v1, v34, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v56 -; SI-NEXT: v_alignbit_b32 v2, v1, v35, 16 -; SI-NEXT: v_alignbit_b32 v8, v7, v33, 16 -; SI-NEXT: v_alignbit_b32 v4, v8, v2, 24 +; SI-NEXT: v_alignbit_b32 v8, v1, v46, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 +; SI-NEXT: v_alignbit_b32 v21, v19, v4, 16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v35 +; SI-NEXT: v_alignbit_b32 v4, v1, v25, 16 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v36 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v48 -; SI-NEXT: v_alignbit_b32 v1, v1, v39, 16 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v43 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v44 -; SI-NEXT: v_alignbit_b32 v5, v4, v32, 16 -; SI-NEXT: v_mov_b32_e32 v31, v23 -; SI-NEXT: v_alignbit_b32 v20, v18, v23, 16 -; SI-NEXT: v_alignbit_b32 v14, v12, v29, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v1, 24 -; SI-NEXT: v_mov_b32_e32 v38, v36 -; SI-NEXT: v_alignbit_b32 v36, v20, v6, 24 -; SI-NEXT: v_alignbit_b32 v25, v14, v3, 24 -; SI-NEXT: v_alignbit_b32 v50, v8, v2, 16 -; SI-NEXT: v_mov_b32_e32 v53, v32 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v23, v5, v1, 16 -; SI-NEXT: v_alignbit_b32 v32, v5, v1, 8 -; SI-NEXT: v_alignbit_b32 v55, v20, v6, 16 -; SI-NEXT: v_alignbit_b32 v40, v20, v6, 8 -; SI-NEXT: v_mov_b32_e32 v35, v29 -; SI-NEXT: v_alignbit_b32 v52, v14, v3, 16 -; SI-NEXT: v_alignbit_b32 v54, v14, v3, 8 -; SI-NEXT: v_mov_b32_e32 v37, v33 -; SI-NEXT: v_alignbit_b32 v51, v8, v2, 8 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v22 -; SI-NEXT: v_lshrrev_b32_e32 v62, 8, v30 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v23, v41 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v41 -; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v19 -; SI-NEXT: v_mov_b32_e32 v28, v26 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v26 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v16 -; SI-NEXT: v_mov_b32_e32 v26, v42 -; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v11 -; SI-NEXT: v_mov_b32_e32 v29, v43 -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v43 -; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v20 -; SI-NEXT: v_mov_b32_e32 v34, v44 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v44 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v14 -; SI-NEXT: v_mov_b32_e32 v33, v56 -; SI-NEXT: v_lshrrev_b32_e32 v43, 24, v56 -; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v8 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v48 -; SI-NEXT: v_mov_b32_e32 v48, v32 -; SI-NEXT: v_mov_b32_e32 v32, v50 -; SI-NEXT: v_mov_b32_e32 v50, v25 -; SI-NEXT: v_mov_b32_e32 v25, v36 -; SI-NEXT: v_mov_b32_e32 v36, v38 -; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v26 +; SI-NEXT: v_alignbit_b32 v18, v16, v7, 16 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v40 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v57 +; SI-NEXT: v_alignbit_b32 v3, v1, v37, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_alignbit_b32 v24, v22, v2, 16 +; SI-NEXT: v_alignbit_b32 v15, v13, v27, 16 +; SI-NEXT: v_alignbit_b32 v12, v10, v49, 16 +; SI-NEXT: v_alignbit_b32 v9, v7, v43, 16 +; SI-NEXT: v_alignbit_b32 v5, v6, v60, 16 +; SI-NEXT: v_alignbit_b32 v2, v1, v34, 16 +; SI-NEXT: v_readfirstlane_b32 s8, v23 +; SI-NEXT: v_readfirstlane_b32 s9, v24 +; SI-NEXT: v_readfirstlane_b32 s14, v20 +; SI-NEXT: v_readfirstlane_b32 s15, v21 +; SI-NEXT: v_readfirstlane_b32 s20, v17 +; SI-NEXT: v_readfirstlane_b32 s21, v18 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: v_readfirstlane_b32 s27, v15 +; SI-NEXT: v_readfirstlane_b32 s42, v11 +; SI-NEXT: v_readfirstlane_b32 s43, v12 +; SI-NEXT: v_readfirstlane_b32 s56, v8 +; SI-NEXT: v_readfirstlane_b32 s57, v9 +; SI-NEXT: v_readfirstlane_b32 s62, v4 +; SI-NEXT: v_readfirstlane_b32 s63, v5 +; SI-NEXT: v_readfirstlane_b32 s76, v3 +; SI-NEXT: v_readfirstlane_b32 s77, v2 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[18:19], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[20:21], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[40:41], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[56:57], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[56:57], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[62:63], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[76:77], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[76:77], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[76:77], 8 +; SI-NEXT: v_lshrrev_b32_e32 v30, 24, v26 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v24 +; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v29 +; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v21 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v35 +; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v18 +; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v33 +; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v15 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v39 +; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v12 +; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v40 +; SI-NEXT: v_lshrrev_b32_e32 v41, 8, v9 +; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v57 +; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v32 +; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v2 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v55 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_alignbit_b32 v14, v14, v13, 16 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v38 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v36 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v39 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v44 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v46 +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v52 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v49 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v53 -; SI-NEXT: v_add_f32_e32 v42, 0x40c00000, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v42 -; SI-NEXT: v_alignbit_b32 v5, v4, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v43, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v43 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v34 -; SI-NEXT: v_add_f32_e32 v44, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v44 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v29 -; SI-NEXT: v_add_f32_e32 v45, 0x40c00000, v10 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v45 -; SI-NEXT: v_alignbit_b32 v48, v5, v1, 8 -; SI-NEXT: v_lshrrev_b32_e32 v43, 24, v43 -; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v42 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v37 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_alignbit_b32 v8, v7, v3, 16 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v32, v8, v2, 16 -; SI-NEXT: v_alignbit_b32 v51, v8, v2, 8 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v61 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_alignbit_b32 v3, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v34 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_alignbit_b32 v8, v8, v7, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43 +; SI-NEXT: v_alignbit_b32 v11, v11, v10, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v49 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v57 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v40 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v39 +; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v1 +; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v60 +; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v6 +; SI-NEXT: v_add_f32_e32 v34, 0x40c00000, v7 +; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v10 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v36 +; SI-NEXT: v_alignbit_b32 v2, v1, v2, 16 +; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 +; SI-NEXT: v_alignbit_b32 v9, v7, v9, 16 +; SI-NEXT: v_alignbit_b32 v12, v10, v12, 16 +; SI-NEXT: v_readfirstlane_b32 s76, v3 +; SI-NEXT: v_readfirstlane_b32 s77, v2 +; SI-NEXT: v_readfirstlane_b32 s62, v4 +; SI-NEXT: v_readfirstlane_b32 s63, v5 +; SI-NEXT: v_readfirstlane_b32 s56, v8 +; SI-NEXT: v_readfirstlane_b32 s57, v9 +; SI-NEXT: v_readfirstlane_b32 s42, v11 +; SI-NEXT: v_readfirstlane_b32 s43, v12 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: s_lshr_b64 s[40:41], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[56:57], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[62:63], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[76:77], 16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v12 +; SI-NEXT: v_lshrrev_b32_e32 v41, 8, v9 +; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v2 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v36 +; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v34 +; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v25 +; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v32 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 ; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_alignbit_b32 v15, v15, v13, 16 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v21, v19, v17, 16 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_alignbit_b32 v3, v6, v3, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v35 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_alignbit_b32 v14, v12, v6, 16 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v50, v14, v3, 24 -; SI-NEXT: v_alignbit_b32 v52, v14, v3, 16 -; SI-NEXT: v_alignbit_b32 v54, v14, v3, 8 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v14 +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_alignbit_b32 v17, v17, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s20, v17 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v13 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v17 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v17 -; SI-NEXT: v_add_f32_e32 v56, 0x40c00000, v13 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v41 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v56 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v41 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v56 -; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v8 -; SI-NEXT: v_alignbit_b32 v19, v17, v19, 16 -; SI-NEXT: v_alignbit_b32 v16, v13, v16, 16 -; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v19 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_alignbit_b32 v6, v9, v6, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_alignbit_b32 v20, v18, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v25, v20, v6, 24 -; SI-NEXT: v_alignbit_b32 v55, v20, v6, 16 -; SI-NEXT: v_alignbit_b32 v40, v20, v6, 8 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_alignbit_b32 v27, v23, v22, 16 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_alignbit_b32 v10, v10, v9, 16 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v20, v20, v19, 16 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v23, v23, v22, 16 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v13 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v35 +; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v13 +; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v31 +; SI-NEXT: v_alignbit_b32 v15, v13, v15, 16 +; SI-NEXT: v_alignbit_b32 v18, v16, v18, 16 +; SI-NEXT: v_readfirstlane_b32 s27, v15 +; SI-NEXT: v_readfirstlane_b32 s21, v18 +; SI-NEXT: v_readfirstlane_b32 s14, v20 +; SI-NEXT: v_readfirstlane_b32 s8, v23 +; SI-NEXT: s_lshr_b64 s[18:19], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[26:27], 8 +; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v18 +; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v15 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v31 +; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v33 ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v19 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 -; SI-NEXT: v_add_f32_e32 v59, 0x40c00000, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v59 -; SI-NEXT: v_alignbit_b32 v30, v24, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, v30, v27, 24 -; SI-NEXT: v_lshrrev_b32_e32 v62, 8, v30 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v30, v27, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v30, v27, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v19, v21, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v19, v21, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v19, v21, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v26 -; SI-NEXT: v_add_f32_e32 v47, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v47 -; SI-NEXT: v_alignbit_b32 v11, v9, v11, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 24 -; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v47 -; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v20 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v2, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v5, v1, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v5, v1, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v59 -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v45 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v44 -; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v5 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v26 +; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v19 +; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v26 +; SI-NEXT: v_alignbit_b32 v21, v19, v21, 16 +; SI-NEXT: v_alignbit_b32 v24, v22, v24, 16 +; SI-NEXT: v_readfirstlane_b32 s15, v21 +; SI-NEXT: v_readfirstlane_b32 s9, v24 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[20:21], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[56:57], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[76:77], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[76:77], 8 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v24 +; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v21 +; SI-NEXT: v_lshrrev_b32_e32 v30, 24, v26 +; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v29 ; SI-NEXT: .LBB109_3: ; %end -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 -; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v22 -; SI-NEXT: v_or_b32_e32 v22, v22, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 -; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v36, 8, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v27, v27, v36 -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v35, 0xff, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v23 -; SI-NEXT: v_or_b32_e32 v33, v33, v35 -; SI-NEXT: v_or_b32_e32 v27, v27, v33 -; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v62 -; SI-NEXT: v_or_b32_e32 v27, v27, v30 -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v22, v27, v22 -; SI-NEXT: buffer_store_dword v22, v24, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v23 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 +; SI-NEXT: s_lshl_b32 s5, s10, 8 +; SI-NEXT: v_or_b32_e32 v23, s5, v23 +; SI-NEXT: s_and_b32 s5, s6, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: v_or_b32_e32 v23, s4, v23 +; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v23, 0xff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v27 ; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 +; SI-NEXT: s_lshl_b32 s4, s16, 8 +; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v30 +; SI-NEXT: v_or_b32_e32 v20, s4, v20 +; SI-NEXT: s_and_b32 s4, s12, 0xff +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v22, v24, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s8, 24 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_add_i32_e32 v23, vcc, 4, v0 +; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v20, s4, v20 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v46 -; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v41 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v17, v21, v17 -; SI-NEXT: v_or_b32_e32 v17, v19, v17 -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v20, 0xff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v58 +; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 +; SI-NEXT: s_lshl_b32 s4, s22, 8 +; SI-NEXT: v_or_b32_e32 v20, v20, v21 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v62 +; SI-NEXT: v_or_b32_e32 v17, s4, v17 +; SI-NEXT: s_and_b32 s4, s18, 0xff +; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_or_b32_e32 v19, v21, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s14, 24 +; SI-NEXT: v_or_b32_e32 v19, v20, v19 +; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v17, s4, v17 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 -; SI-NEXT: v_or_b32_e32 v15, v15, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v17, v19, v17 -; SI-NEXT: v_or_b32_e32 v15, v15, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v17, 0xff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v28 +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: s_lshl_b32 s4, s28, 8 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v31 +; SI-NEXT: v_or_b32_e32 v14, s4, v14 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v16, v18, v16 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s20, 24 +; SI-NEXT: v_or_b32_e32 v16, v17, v16 +; SI-NEXT: v_add_i32_e32 v17, vcc, 20, v0 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v14, s4, v14 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v57 -; SI-NEXT: v_or_b32_e32 v15, v15, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v61 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v16, v13 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v16, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 -; SI-NEXT: v_or_b32_e32 v10, v10, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v15 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v59 ; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: s_lshl_b32 s4, s44, 8 +; SI-NEXT: v_or_b32_e32 v14, v14, v15 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v63 +; SI-NEXT: v_or_b32_e32 v11, s4, v11 +; SI-NEXT: s_and_b32 s4, s40, 0xff +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_or_b32_e32 v10, v10, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s26, 24 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v11, s4, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v58 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v63 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v9, v11, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v40 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v55 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v25 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v6, v9, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v11, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v47 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: s_lshl_b32 s4, s58, 8 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v56 +; SI-NEXT: v_or_b32_e32 v8, s4, v8 +; SI-NEXT: s_and_b32 s4, s46, 0xff +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: v_or_b32_e32 v10, v12, v10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s42, 24 +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v8, s4, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v47 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v59 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v6, v9, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v54 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v52 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v50 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v60 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v41 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s4, s72, 8 +; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v45 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v4, s4, v4 +; SI-NEXT: s_and_b32 s4, s60, 0xff +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_or_b32_e32 v7, v9, v7 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s56, 24 +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 +; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v4, s4, v4 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v51 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v56 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v43 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v4, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v54 +; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v6 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: s_lshl_b32 s4, s76, 8 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v42 +; SI-NEXT: v_or_b32_e32 v3, s4, v3 +; SI-NEXT: s_and_b32 s4, s74, 0xff +; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s62, 24 +; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, s4, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v48 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v44 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v42 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v48 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v51 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload @@ -87126,97 +86745,70 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: v_mov_b32_e32 v53, v32 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v49, v48 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v37, v33 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v33, v56 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v35, v29 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v34, v44 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v31, v23 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v29, v43 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v28, v26 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v26, v42 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: v_mov_b32_e32 v23, v41 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr62 ; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr59 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr63 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v32bf16_to_v64i8_scalar: @@ -92395,139 +91987,126 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; VI-NEXT: v_mov_b32_e32 v55, v20 -; VI-NEXT: v_mov_b32_e32 v53, v14 -; VI-NEXT: v_mov_b32_e32 v34, v12 -; VI-NEXT: v_mov_b32_e32 v32, v0 +; VI-NEXT: v_mov_b32_e32 v31, v30 +; VI-NEXT: v_mov_b32_e32 v38, v28 +; VI-NEXT: v_mov_b32_e32 v32, v26 +; VI-NEXT: v_mov_b32_e32 v30, v24 +; VI-NEXT: v_mov_b32_e32 v26, v22 +; VI-NEXT: v_mov_b32_e32 v49, v20 +; VI-NEXT: v_mov_b32_e32 v48, v14 +; VI-NEXT: v_mov_b32_e32 v39, v12 +; VI-NEXT: v_mov_b32_e32 v20, v10 +; VI-NEXT: v_mov_b32_e32 v35, v8 +; VI-NEXT: v_mov_b32_e32 v34, v6 +; VI-NEXT: v_mov_b32_e32 v36, v4 +; VI-NEXT: v_mov_b32_e32 v37, v0 ; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 -; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 -; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8 +; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 +; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 ; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4 -; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:16 +; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 ; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:12 -; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:24 +; VI-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 ; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:20 -; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:32 +; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 ; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:28 ; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 ; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:36 ; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 ; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:44 -; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56 +; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:56 ; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52 -; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64 +; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:64 ; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:60 -; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72 +; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:72 ; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:68 -; VI-NEXT: v_mov_b32_e32 v51, v23 -; VI-NEXT: v_mov_b32_e32 v30, v26 -; VI-NEXT: v_mov_b32_e32 v26, v22 ; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1 ; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3 ; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5 ; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 -; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v9 -; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v11 -; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v13 -; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v15 -; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v17 -; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v19 -; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v21 -; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v51 -; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v25 +; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v9 +; VI-NEXT: v_lshlrev_b32_e32 v53, 8, v11 +; VI-NEXT: v_lshlrev_b32_e32 v55, 8, v13 +; VI-NEXT: v_lshlrev_b32_e32 v40, 8, v15 +; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 +; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 +; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 ; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v27 ; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; VI-NEXT: s_waitcnt vmcnt(14) ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v31 -; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v33 +; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v28 +; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v4 ; VI-NEXT: s_and_b64 s[4:5], vcc, exec -; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v35 -; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v37 -; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v20 +; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v6 +; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v8 +; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v10 ; VI-NEXT: s_waitcnt vmcnt(13) -; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v12 +; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v12 ; VI-NEXT: s_waitcnt vmcnt(11) -; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v14 ; VI-NEXT: s_waitcnt vmcnt(9) -; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v39 +; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v22 ; VI-NEXT: s_waitcnt vmcnt(7) -; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v48 +; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v33 ; VI-NEXT: s_waitcnt vmcnt(5) -; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v49 +; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v24 ; VI-NEXT: s_cbranch_scc0 .LBB111_4 ; VI-NEXT: ; %bb.1: ; %cmp.false ; VI-NEXT: v_or_b32_sdwa v0, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v36, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v36, v34 -; VI-NEXT: v_or_b32_sdwa v1, v34, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_or_b32_sdwa v0, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v35, v6 -; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v20, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v39, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v53, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v49, v7 -; VI-NEXT: v_or_b32_sdwa v3, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v48, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v18, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v55, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v37, v8 +; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v49, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v26, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v24, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v40, v9 +; VI-NEXT: v_or_b32_sdwa v0, v26, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v30, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v30, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v28, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v31, v10 +; VI-NEXT: v_or_b32_sdwa v0, v32, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v38, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_mov_b32_e32 v17, v11 -; VI-NEXT: v_mov_b32_e32 v19, v13 -; VI-NEXT: s_and_b32 s4, s28, 0xff -; VI-NEXT: s_lshl_b32 s5, s29, 8 -; VI-NEXT: v_mov_b32_e32 v39, v14 -; VI-NEXT: s_or_b32 s4, s4, s5 -; VI-NEXT: v_mov_b32_e32 v21, v15 -; VI-NEXT: s_and_b32 s4, s4, 0xffff -; VI-NEXT: v_mov_b32_e32 v20, v5 -; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: s_lshl_b32 s5, s17, 8 -; VI-NEXT: s_lshl_b32 s6, s19, 8 -; VI-NEXT: s_lshl_b32 s7, s23, 8 -; VI-NEXT: s_lshl_b32 s8, s27, 8 -; VI-NEXT: v_mov_b32_e32 v25, v23 -; VI-NEXT: v_mov_b32_e32 v48, v51 -; VI-NEXT: v_mov_b32_e32 v23, v26 -; VI-NEXT: v_mov_b32_e32 v26, v30 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_or_b32_sdwa v0, v34, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v31, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v52, v56 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v54, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v41, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v54, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v41, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v42, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v43, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v42, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v43, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v1, v45, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v44, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v1, v45, v63 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_and_b32 s4, s28, 0xff +; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v47, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v47, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_waitcnt vmcnt(4) ; VI-NEXT: v_or_b32_sdwa v1, v57, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_or_b32 s4, s4, s5 +; VI-NEXT: v_or_b32_sdwa v2, v34, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v3, v35, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-NEXT: v_or_b32_sdwa v0, v32, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: s_and_b32 s4, s4, 0xffff +; VI-NEXT: v_or_b32_sdwa v0, v37, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD ; VI-NEXT: v_or_b32_e32 v3, s4, v0 ; VI-NEXT: s_and_b32 s4, s16, 0xff +; VI-NEXT: s_lshl_b32 s5, s17, 8 ; VI-NEXT: s_or_b32 s4, s4, s5 ; VI-NEXT: s_and_b32 s5, s18, 0xff +; VI-NEXT: s_lshl_b32 s6, s19, 8 ; VI-NEXT: s_or_b32 s5, s5, s6 ; VI-NEXT: s_and_b32 s4, s4, 0xffff ; VI-NEXT: s_lshl_b32 s5, s5, 16 @@ -92536,6 +92115,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: s_lshl_b32 s6, s21, 8 ; VI-NEXT: s_or_b32 s5, s5, s6 ; VI-NEXT: s_and_b32 s6, s22, 0xff +; VI-NEXT: s_lshl_b32 s7, s23, 8 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_and_b32 s5, s5, 0xffff ; VI-NEXT: s_lshl_b32 s6, s6, 16 @@ -92544,70 +92124,75 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: s_lshl_b32 s7, s25, 8 ; VI-NEXT: s_or_b32 s6, s6, s7 ; VI-NEXT: s_and_b32 s7, s26, 0xff +; VI-NEXT: s_lshl_b32 s8, s27, 8 ; VI-NEXT: s_or_b32 s7, s7, s8 ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: s_lshl_b32 s7, s7, 16 ; VI-NEXT: s_or_b32 s6, s6, s7 +; VI-NEXT: v_mov_b32_e32 v24, v36 +; VI-NEXT: v_mov_b32_e32 v28, v26 ; VI-NEXT: v_mov_b32_e32 v0, s4 ; VI-NEXT: v_mov_b32_e32 v1, s5 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: v_mov_b32_e32 v30, v34 ; VI-NEXT: s_cbranch_execnz .LBB111_3 ; VI-NEXT: .LBB111_2: ; %cmp.true +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44 -; VI-NEXT: v_or_b32_sdwa v3, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v43 -; VI-NEXT: v_or_b32_sdwa v13, v33, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v13, v61, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42 -; VI-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v15, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v41 -; VI-NEXT: v_or_b32_sdwa v12, v62, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v12, v59, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54 -; VI-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 -; VI-NEXT: v_or_b32_sdwa v11, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30 -; VI-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v30, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28 -; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26 -; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v24 -; VI-NEXT: v_or_b32_sdwa v9, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23 +; VI-NEXT: s_waitcnt vmcnt(5) ; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57 -; VI-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v22, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55 -; VI-NEXT: v_or_b32_sdwa v8, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52 +; VI-NEXT: v_or_b32_sdwa v11, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31 +; VI-NEXT: v_or_b32_sdwa v3, v46, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v26, v24 +; VI-NEXT: v_add_u32_e32 v24, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v38 +; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v32 +; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v27, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30 +; VI-NEXT: v_or_b32_sdwa v9, v25, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28 +; VI-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v23, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v49 +; VI-NEXT: v_or_b32_sdwa v8, v21, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v18 -; VI-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v18, vcc, 0x300, v3 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v16 -; VI-NEXT: v_or_b32_sdwa v7, v21, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v53 -; VI-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v16, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v36 -; VI-NEXT: v_or_b32_sdwa v6, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31 +; VI-NEXT: v_or_b32_sdwa v7, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v48 ; VI-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v16, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v39 +; VI-NEXT: v_or_b32_sdwa v6, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v20 +; VI-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v17, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37 -; VI-NEXT: v_or_b32_sdwa v5, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35 -; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; VI-NEXT: v_or_b32_sdwa v5, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v34 ; VI-NEXT: s_add_i32 s28, s28, 3 +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26 ; VI-NEXT: s_and_b32 s4, s28, 0xff ; VI-NEXT: s_lshl_b32 s5, s29, 8 ; VI-NEXT: s_add_i32 s26, s26, 3 @@ -92635,12 +92220,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: s_and_b32 s10, s16, 0xff ; VI-NEXT: s_lshl_b32 s11, s17, 8 ; VI-NEXT: s_or_b32 s10, s11, s10 -; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47 ; VI-NEXT: s_addk_i32 s6, 0x300 ; VI-NEXT: s_addk_i32 s8, 0x300 ; VI-NEXT: s_addk_i32 s10, 0x300 -; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45 ; VI-NEXT: s_addk_i32 s4, 0x300 ; VI-NEXT: s_lshl_b32 s5, s5, 16 @@ -92650,7 +92234,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: s_and_b32 s8, s8, 0xffff ; VI-NEXT: s_and_b32 s6, s6, 0xffff ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1 -; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: s_or_b32 s9, s9, s10 ; VI-NEXT: s_or_b32 s7, s7, s8 ; VI-NEXT: s_or_b32 s5, s5, s6 @@ -92662,10 +92246,10 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v8, v8, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v9, v9, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v10, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v11, v11, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 -; VI-NEXT: v_or_b32_sdwa v12, v12, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v9, v9, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v10, v10, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v11, v11, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 +; VI-NEXT: v_or_b32_sdwa v12, v12, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 @@ -92675,12 +92259,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8 ; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9 ; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10 -; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 -; VI-NEXT: s_waitcnt vmcnt(2) -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 -; VI-NEXT: s_waitcnt vmcnt(1) +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11 ; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12 ; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13 ; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v2 @@ -92688,11 +92271,12 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: v_mov_b32_e32 v0, s9 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s5 -; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: s_waitcnt vmcnt(1) ; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_add_u32_e32 v20, vcc, 0x300, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v32 +; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37 ; VI-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; VI-NEXT: v_or_b32_e32 v3, s4, v3 ; VI-NEXT: v_or_b32_sdwa v4, v4, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0 @@ -92718,22 +92302,8 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB111_4: -; VI-NEXT: v_mov_b32_e32 v25, v23 -; VI-NEXT: v_mov_b32_e32 v23, v26 -; VI-NEXT: v_mov_b32_e32 v26, v30 -; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; VI-NEXT: v_mov_b32_e32 v48, v51 -; VI-NEXT: v_mov_b32_e32 v31, v10 -; VI-NEXT: v_mov_b32_e32 v36, v34 -; VI-NEXT: v_mov_b32_e32 v35, v6 -; VI-NEXT: v_mov_b32_e32 v37, v8 -; VI-NEXT: v_mov_b32_e32 v39, v14 -; VI-NEXT: v_mov_b32_e32 v21, v15 -; VI-NEXT: v_mov_b32_e32 v19, v13 -; VI-NEXT: v_mov_b32_e32 v17, v11 -; VI-NEXT: v_mov_b32_e32 v40, v9 -; VI-NEXT: v_mov_b32_e32 v49, v7 -; VI-NEXT: v_mov_b32_e32 v20, v5 +; VI-NEXT: v_mov_b32_e32 v24, v36 +; VI-NEXT: v_mov_b32_e32 v28, v26 ; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; VI-NEXT: s_branch .LBB111_2 ; @@ -92756,244 +92326,228 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; GFX9-NEXT: v_mov_b32_e32 v34, v30 -; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:76 -; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 -; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:8 +; GFX9-NEXT: v_mov_b32_e32 v48, v30 +; GFX9-NEXT: v_mov_b32_e32 v33, v28 +; GFX9-NEXT: v_mov_b32_e32 v37, v26 +; GFX9-NEXT: v_mov_b32_e32 v34, v24 +; GFX9-NEXT: v_mov_b32_e32 v32, v22 +; GFX9-NEXT: v_mov_b32_e32 v30, v20 +; GFX9-NEXT: v_mov_b32_e32 v49, v14 +; GFX9-NEXT: v_mov_b32_e32 v22, v12 +; GFX9-NEXT: v_mov_b32_e32 v39, v10 +; GFX9-NEXT: v_mov_b32_e32 v35, v8 +; GFX9-NEXT: v_mov_b32_e32 v20, v6 +; GFX9-NEXT: v_mov_b32_e32 v28, v4 +; GFX9-NEXT: v_mov_b32_e32 v26, v2 +; GFX9-NEXT: v_mov_b32_e32 v24, v0 +; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76 +; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 +; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8 ; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4 -; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:16 +; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16 ; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:12 -; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:24 +; GFX9-NEXT: buffer_load_ushort v8, off, s[0:3], s32 offset:24 ; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20 -; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:32 +; GFX9-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:32 ; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:28 -; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:40 +; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40 ; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:36 -; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:48 +; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:48 ; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:44 -; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56 +; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:56 ; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52 -; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64 +; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:64 ; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:60 -; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72 +; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:72 ; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:68 -; GFX9-NEXT: v_mov_b32_e32 v51, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v50, 8, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v5 +; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v7 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v13 -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v17 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v19 -; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v23 -; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v51 -; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v27 -; GFX9-NEXT: v_lshlrev_b32_e32 v53, 8, v29 -; GFX9-NEXT: s_waitcnt vmcnt(19) -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v36 -; GFX9-NEXT: s_waitcnt vmcnt(18) -; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v32 -; GFX9-NEXT: s_waitcnt vmcnt(17) -; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v38 +; GFX9-NEXT: v_lshlrev_b32_e32 v54, 8, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v53, 8, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v55, 8, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v19 +; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v21 +; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v25 +; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v27 +; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29 +; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(22) +; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GFX9-NEXT: s_waitcnt vmcnt(21) +; GFX9-NEXT: v_lshlrev_b32_e32 v47, 8, v2 +; GFX9-NEXT: s_waitcnt vmcnt(20) +; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v4 ; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v30 -; GFX9-NEXT: s_waitcnt vmcnt(13) -; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v31 -; GFX9-NEXT: s_waitcnt vmcnt(11) -; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v33 -; GFX9-NEXT: s_waitcnt vmcnt(9) -; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v37 -; GFX9-NEXT: s_waitcnt vmcnt(7) -; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v35 -; GFX9-NEXT: s_waitcnt vmcnt(5) -; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v39 -; GFX9-NEXT: s_waitcnt vmcnt(3) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v48 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v49 -; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; GFX9-NEXT: s_waitcnt vmcnt(18) +; GFX9-NEXT: v_lshlrev_b32_e32 v59, 8, v6 +; GFX9-NEXT: s_waitcnt vmcnt(16) +; GFX9-NEXT: v_lshlrev_b32_e32 v58, 8, v8 +; GFX9-NEXT: s_waitcnt vmcnt(14) +; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v10 +; GFX9-NEXT: s_waitcnt vmcnt(12) +; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v12 +; GFX9-NEXT: s_waitcnt vmcnt(10) +; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v14 +; GFX9-NEXT: s_waitcnt vmcnt(8) +; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v36 +; GFX9-NEXT: s_waitcnt vmcnt(6) +; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v38 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v31 ; GFX9-NEXT: s_cbranch_scc0 .LBB111_4 ; GFX9-NEXT: ; %bb.1: ; %cmp.false ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 ; GFX9-NEXT: s_or_b32 s4, s4, s5 -; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v24, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff ; GFX9-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX9-NEXT: v_or_b32_sdwa v2, v2, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v50, v3 -; GFX9-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v4, v28, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1 -; GFX9-NEXT: v_or_b32_sdwa v0, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v20, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v35, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_mov_b32_e32 v31, v5 ; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v39, v54 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v12, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v22, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v49, v55 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v18, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v32, v16 +; GFX9-NEXT: v_or_b32_sdwa v1, v30, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0 -; GFX9-NEXT: v_mov_b32_e32 v16, v22 -; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v37, v24 -; GFX9-NEXT: v_or_b32_sdwa v1, v24, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v0, v32, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v17, v9 +; GFX9-NEXT: v_or_b32_sdwa v1, v34, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v26, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v37, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v28, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_mov_b32_e32 v48, v10 +; GFX9-NEXT: v_or_b32_sdwa v1, v33, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v34, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v48, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_and_b32 s4, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s17, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v52, v29 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s5, s18, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s19, 8 -; GFX9-NEXT: v_mov_b32_e32 v55, v11 ; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v41, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v41, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s5, s5, s6 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v40, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5 ; GFX9-NEXT: s_and_b32 s5, s20, 0xff ; GFX9-NEXT: s_lshl_b32 s6, s21, 8 -; GFX9-NEXT: v_mov_b32_e32 v33, v12 ; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v44, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v44, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s5, s5, s6 ; GFX9-NEXT: s_and_b32 s6, s22, 0xff ; GFX9-NEXT: s_lshl_b32 s7, s23, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v43, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v43, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s6, s6, s7 -; GFX9-NEXT: v_mov_b32_e32 v19, v13 ; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0 -; GFX9-NEXT: v_or_b32_sdwa v0, v46, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v0, v46, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6 ; GFX9-NEXT: s_and_b32 s6, s24, 0xff ; GFX9-NEXT: s_lshl_b32 s7, s25, 8 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_or_b32_sdwa v1, v45, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX9-NEXT: v_or_b32_sdwa v1, v45, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s6, s6, s7 ; GFX9-NEXT: s_and_b32 s7, s26, 0xff ; GFX9-NEXT: s_lshl_b32 s8, s27, 8 -; GFX9-NEXT: v_mov_b32_e32 v29, v14 +; GFX9-NEXT: v_or_b32_sdwa v2, v26, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0 +; GFX9-NEXT: v_or_b32_sdwa v0, v57, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_or_b32 s7, s7, s8 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: s_waitcnt vmcnt(3) +; GFX9-NEXT: v_or_b32_sdwa v1, v56, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7 ; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2 -; GFX9-NEXT: v_mov_b32_e32 v42, v15 -; GFX9-NEXT: v_mov_b32_e32 v27, v25 -; GFX9-NEXT: v_mov_b32_e32 v30, v18 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v49, v20 -; GFX9-NEXT: v_mov_b32_e32 v39, v26 -; GFX9-NEXT: v_mov_b32_e32 v35, v28 -; GFX9-NEXT: v_mov_b32_e32 v54, v31 -; GFX9-NEXT: v_mov_b32_e32 v31, v51 -; GFX9-NEXT: v_mov_b32_e32 v2, s6 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v0, v57, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GFX9-NEXT: v_mov_b32_e32 v18, v22 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v1, v56, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: v_mov_b32_e32 v1, s5 -; GFX9-NEXT: v_mov_b32_e32 v20, v24 +; GFX9-NEXT: v_mov_b32_e32 v2, s6 ; GFX9-NEXT: s_cbranch_execnz .LBB111_3 ; GFX9-NEXT: .LBB111_2: ; %cmp.true ; GFX9-NEXT: v_add_u32_e32 v3, 3, v45 -; GFX9-NEXT: v_or_b32_sdwa v3, v31, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v14, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v44 -; GFX9-NEXT: v_or_b32_sdwa v3, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v13, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v43 -; GFX9-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v15, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v41 -; GFX9-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v12, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v40 -; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v34 -; GFX9-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v48 +; GFX9-NEXT: v_or_b32_sdwa v3, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v11, 0x300, v3 ; GFX9-NEXT: v_add_u32_e32 v3, 3, v52 -; GFX9-NEXT: v_or_b32_sdwa v3, v60, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v39 -; GFX9-NEXT: v_or_b32_sdwa v3, v59, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v29, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v37 +; GFX9-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v10, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v35 -; GFX9-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v33 +; GFX9-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v16 -; GFX9-NEXT: v_or_b32_sdwa v3, v58, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v32 +; GFX9-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v9, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v37 -; GFX9-NEXT: v_or_b32_sdwa v3, v47, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v34 +; GFX9-NEXT: v_or_b32_sdwa v3, v21, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v30 -; GFX9-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v8, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v49 -; GFX9-NEXT: v_or_b32_sdwa v3, v23, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v29 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v18 ; GFX9-NEXT: v_or_b32_sdwa v3, v42, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v7, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v32 +; GFX9-NEXT: v_add_u32_e32 v8, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v30 ; GFX9-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v48 +; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v49 ; GFX9-NEXT: v_or_b32_sdwa v3, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3 -; GFX9-NEXT: v_add_u32_e32 v3, 3, v33 +; GFX9-NEXT: v_add_u32_e32 v7, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v16 ; GFX9-NEXT: v_or_b32_sdwa v3, v17, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v39 +; GFX9-NEXT: v_or_b32_sdwa v3, v54, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v22 +; GFX9-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v17, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(15) -; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 -; GFX9-NEXT: v_add_u32_e32 v0, 3, v57 -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v20 +; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; GFX9-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v5, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v35 +; GFX9-NEXT: v_or_b32_sdwa v3, v31, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v26 +; GFX9-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v4, 0x300, v3 +; GFX9-NEXT: v_add_u32_e32 v3, 3, v28 ; GFX9-NEXT: s_add_i32 s28, s28, 3 ; GFX9-NEXT: s_and_b32 s4, s28, 0xff ; GFX9-NEXT: s_lshl_b32 s5, s29, 8 @@ -93018,13 +92572,18 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; GFX9-NEXT: s_and_b32 s9, s16, 0xff ; GFX9-NEXT: s_lshl_b32 s10, s17, 8 ; GFX9-NEXT: s_add_i32 s18, s18, 3 +; GFX9-NEXT: v_add_u32_e32 v0, 3, v57 ; GFX9-NEXT: v_add_u32_e32 v2, 3, v46 ; GFX9-NEXT: s_or_b32 s9, s10, s9 ; GFX9-NEXT: s_and_b32 s10, s18, 0xff ; GFX9-NEXT: s_lshl_b32 s11, s19, 8 -; GFX9-NEXT: v_or_b32_sdwa v2, v54, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_waitcnt vmcnt(4) +; GFX9-NEXT: v_add_u32_e32 v1, 3, v56 +; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_or_b32 s10, s11, s10 ; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0 +; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2 ; GFX9-NEXT: s_addk_i32 s4, 0x300 ; GFX9-NEXT: s_addk_i32 s5, 0x300 @@ -93033,69 +92592,48 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; GFX9-NEXT: s_addk_i32 s8, 0x300 ; GFX9-NEXT: s_addk_i32 s9, 0x300 ; GFX9-NEXT: s_addk_i32 s10, 0x300 -; GFX9-NEXT: v_mov_b32_e32 v22, 0xffff ; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1 ; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10 ; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8 ; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; GFX9-NEXT: v_and_b32_e32 v22, s4, v22 +; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; GFX9-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; GFX9-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; GFX9-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10 ; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX9-NEXT: v_lshl_or_b32 v5, v19, 16, v5 ; GFX9-NEXT: v_lshl_or_b32 v6, v17, 16, v6 -; GFX9-NEXT: v_lshl_or_b32 v7, v23, 16, v7 -; GFX9-NEXT: v_lshl_or_b32 v8, v16, 16, v8 +; GFX9-NEXT: v_lshl_or_b32 v7, v16, 16, v7 +; GFX9-NEXT: v_lshl_or_b32 v8, v18, 16, v8 ; GFX9-NEXT: v_lshl_or_b32 v9, v21, 16, v9 ; GFX9-NEXT: v_lshl_or_b32 v10, v25, 16, v10 -; GFX9-NEXT: v_lshl_or_b32 v11, v24, 16, v11 -; GFX9-NEXT: v_lshl_or_b32 v12, v36, 16, v12 +; GFX9-NEXT: v_lshl_or_b32 v11, v29, 16, v11 +; GFX9-NEXT: v_lshl_or_b32 v12, v24, 16, v12 ; GFX9-NEXT: v_lshl_or_b32 v13, v15, 16, v13 ; GFX9-NEXT: v_lshl_or_b32 v14, v14, 16, v2 +; GFX9-NEXT: s_waitcnt vmcnt(1) +; GFX9-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v3 +; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; GFX9-NEXT: v_lshl_or_b32 v4, v20, 16, v4 ; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, s9 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: v_mov_b32_e32 v2, s5 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; GFX9-NEXT: s_waitcnt vmcnt(2) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v5, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshl_or_b32 v5, v19, 16, v5 -; GFX9-NEXT: s_waitcnt vmcnt(1) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v4, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: v_or_b32_sdwa v3, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v3 -; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; GFX9-NEXT: v_lshl_or_b32 v4, v20, 16, v4 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_add_u32_e32 v3, 3, v3 -; GFX9-NEXT: v_or_b32_sdwa v3, v18, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_or_b32_sdwa v3, v22, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_mov_b32_e32 v22, 0xffff ; GFX9-NEXT: v_add_u32_e32 v3, 0x300, v3 +; GFX9-NEXT: v_and_b32_e32 v22, s4, v22 ; GFX9-NEXT: v_lshl_or_b32 v3, v3, 16, v22 ; GFX9-NEXT: .LBB111_3: ; %end ; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload @@ -93117,27 +92655,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a, ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] ; GFX9-NEXT: .LBB111_4: -; GFX9-NEXT: v_mov_b32_e32 v30, v18 -; GFX9-NEXT: v_mov_b32_e32 v49, v20 -; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; GFX9-NEXT: v_mov_b32_e32 v54, v31 -; GFX9-NEXT: v_mov_b32_e32 v29, v14 -; GFX9-NEXT: v_mov_b32_e32 v48, v10 -; GFX9-NEXT: v_mov_b32_e32 v39, v26 -; GFX9-NEXT: v_mov_b32_e32 v32, v16 -; GFX9-NEXT: v_mov_b32_e32 v16, v22 -; GFX9-NEXT: v_mov_b32_e32 v33, v12 -; GFX9-NEXT: v_mov_b32_e32 v35, v28 -; GFX9-NEXT: v_mov_b32_e32 v37, v24 -; GFX9-NEXT: v_mov_b32_e32 v31, v51 -; GFX9-NEXT: v_mov_b32_e32 v27, v25 -; GFX9-NEXT: v_mov_b32_e32 v23, v21 -; GFX9-NEXT: v_mov_b32_e32 v42, v15 -; GFX9-NEXT: v_mov_b32_e32 v19, v13 -; GFX9-NEXT: v_mov_b32_e32 v55, v11 -; GFX9-NEXT: v_mov_b32_e32 v17, v9 -; GFX9-NEXT: v_mov_b32_e32 v50, v3 +; GFX9-NEXT: v_mov_b32_e32 v31, v5 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; GFX9-NEXT: s_branch .LBB111_2 ; diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll index e66762f1e02c2..a1c0a87b65e02 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll @@ -2760,216 +2760,214 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; SI-NEXT: v_readfirstlane_b32 s9, v1 -; SI-NEXT: v_readfirstlane_b32 s8, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v3 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v1 +; SI-NEXT: v_readfirstlane_b32 s7, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: s_and_b64 s[8:9], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v4 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s8, s8, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: s_lshl_b32 s9, s56, 16 +; SI-NEXT: s_and_b32 s11, s16, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_mov_b32_e32 v1, s9 +; SI-NEXT: s_and_b32 s9, s17, 0xffff +; SI-NEXT: s_lshl_b32 s11, s76, 16 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s18, 0xffff +; SI-NEXT: s_lshl_b32 s11, s46, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s19, 0xffff +; SI-NEXT: s_lshl_b32 s11, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s21, 0xffff +; SI-NEXT: s_lshl_b32 s11, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s11, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s22, 0xffff +; SI-NEXT: s_lshl_b32 s11, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s10, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s23, 0xffff +; SI-NEXT: s_lshl_b32 s11, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s24, 0xffff +; SI-NEXT: s_lshl_b32 s11, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s25, 0xffff +; SI-NEXT: s_lshl_b32 s11, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s27, 0xffff +; SI-NEXT: s_lshl_b32 s11, s63, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s29, 0xffff +; SI-NEXT: s_lshl_b32 s11, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s9, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s6, s6, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s61, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s8, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr60 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v18i32_to_v36i16_scalar: @@ -9746,207 +9744,207 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; SI-NEXT: v_mov_b32_e32 v19, s16 -; SI-NEXT: v_mov_b32_e32 v18, s17 +; SI-NEXT: v_mov_b32_e32 v18, s16 +; SI-NEXT: v_mov_b32_e32 v19, s17 ; SI-NEXT: v_mov_b32_e32 v16, s18 -; SI-NEXT: v_mov_b32_e32 v15, s19 +; SI-NEXT: v_mov_b32_e32 v17, s19 ; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v13, s21 +; SI-NEXT: v_mov_b32_e32 v15, s21 ; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v11, s23 +; SI-NEXT: v_mov_b32_e32 v13, s23 ; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v9, s25 +; SI-NEXT: v_mov_b32_e32 v11, s25 ; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v7, s27 +; SI-NEXT: v_mov_b32_e32 v9, s27 ; SI-NEXT: v_mov_b32_e32 v6, s28 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v5, s29 +; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v17, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v25, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v27, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v33, v18, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v11 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 ; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_alignbit_b32 v17, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v25, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v27, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v33, v18, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v11 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v33 -; SI-NEXT: v_or_b32_e32 v19, v19, v33 -; SI-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 ; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v28 +; SI-NEXT: buffer_store_dword v18, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v37 ; SI-NEXT: v_or_b32_e32 v18, v18, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v27 +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: v_or_b32_e32 v16, v16, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v35 -; SI-NEXT: v_or_b32_e32 v15, v15, v16 -; SI-NEXT: v_add_i32_e32 v16, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 +; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v26 ; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_or_b32_e32 v14, v14, v16 +; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v35 ; SI-NEXT: v_or_b32_e32 v14, v14, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 +; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v25 +; SI-NEXT: v_or_b32_e32 v12, v12, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v12, v14, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v12, 0xffff, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v34 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v32 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v24 +; SI-NEXT: v_or_b32_e32 v10, v10, v12 +; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v10, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v33 ; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v30 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v23 +; SI-NEXT: v_or_b32_e32 v8, v8, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v32 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0 +; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v28 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22 +; SI-NEXT: v_or_b32_e32 v6, v6, v8 +; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v6, v8, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v31 ; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v26 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v20 -; SI-NEXT: v_or_b32_e32 v1, v1, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; SI-NEXT: v_or_b32_e32 v1, v1, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v24 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v18f32_to_v36i16_scalar: @@ -15972,216 +15970,214 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; SI-NEXT: v_readfirstlane_b32 s9, v1 -; SI-NEXT: v_readfirstlane_b32 s8, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v3 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v1 +; SI-NEXT: v_readfirstlane_b32 s7, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: s_and_b64 s[8:9], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v4 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: s_lshl_b32 s9, s56, 16 +; SI-NEXT: s_and_b32 s11, s16, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_mov_b32_e32 v1, s9 +; SI-NEXT: s_and_b32 s9, s17, 0xffff +; SI-NEXT: s_lshl_b32 s11, s76, 16 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s18, 0xffff +; SI-NEXT: s_lshl_b32 s11, s46, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s19, 0xffff +; SI-NEXT: s_lshl_b32 s11, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s21, 0xffff +; SI-NEXT: s_lshl_b32 s11, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s11, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s22, 0xffff +; SI-NEXT: s_lshl_b32 s11, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s10, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s23, 0xffff +; SI-NEXT: s_lshl_b32 s11, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s24, 0xffff +; SI-NEXT: s_lshl_b32 s11, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s25, 0xffff +; SI-NEXT: s_lshl_b32 s11, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s27, 0xffff +; SI-NEXT: s_lshl_b32 s11, s63, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s29, 0xffff +; SI-NEXT: s_lshl_b32 s11, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s9, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s6, s6, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s61, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s8, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr60 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v9i64_to_v36i16_scalar: @@ -21460,97 +21456,97 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i ; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v7, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v24, v11, v10, 16 -; SI-NEXT: v_alignbit_b32 v26, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v15, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v34, v19, v18, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v19 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0 -; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0 -; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0 -; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0 -; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0 -; SI-NEXT: v_add_f64 v[8:9], v[8:9], 1.0 -; SI-NEXT: v_add_f64 v[6:7], v[6:7], 1.0 ; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 ; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v7, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v24, v11, v10, 16 -; SI-NEXT: v_alignbit_b32 v26, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v15, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v34, v19, v18, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v19 +; SI-NEXT: v_add_f64 v[6:7], v[6:7], 1.0 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_add_f64 v[8:9], v[8:9], 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 ; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v18, v18, v34 +; SI-NEXT: v_or_b32_e32 v18, v18, v28 ; SI-NEXT: buffer_store_dword v18, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v18, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v37 ; SI-NEXT: v_or_b32_e32 v18, v18, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v27 +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: v_or_b32_e32 v16, v16, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v16, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v26 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: v_or_b32_e32 v14, v14, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v14, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v35 ; SI-NEXT: v_or_b32_e32 v14, v14, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v25 ; SI-NEXT: v_or_b32_e32 v12, v12, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v12, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v34 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen @@ -21562,79 +21558,79 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i ; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v33 ; SI-NEXT: v_or_b32_e32 v10, v10, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v23 ; SI-NEXT: v_or_b32_e32 v8, v8, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v32 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22 ; SI-NEXT: v_or_b32_e32 v6, v6, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v6, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v31 ; SI-NEXT: v_or_b32_e32 v6, v6, v7 ; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v20 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v21 ; SI-NEXT: v_or_b32_e32 v1, v1, v6 ; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v9f64_to_v36i16_scalar: @@ -28520,302 +28516,321 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i ; SI-LABEL: bitcast_v36f16_to_v36i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v12 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v52, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v45, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v44, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v43, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v42, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v55, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v40, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v36, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v35, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v34, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v33, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v32, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v30, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v28, s29 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v23 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v44 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_or_b32_e32 v21, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_or_b32_e32 v1, v1, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v3, v3, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v6, v6, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_or_b32_e32 v23, v23, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v30 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v26 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v50 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v49 +; SI-NEXT: v_or_b32_e32 v8, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v22 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v39 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v37 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v38 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v11 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_or_b32_e32 v24, v24, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v36 +; SI-NEXT: v_or_b32_e32 v2, v2, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v45 +; SI-NEXT: v_or_b32_e32 v43, v12, v17 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v54 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 +; SI-NEXT: v_or_b32_e32 v45, v11, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v40 +; SI-NEXT: v_or_b32_e32 v42, v12, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v53 +; SI-NEXT: v_or_b32_e32 v54, v19, v9 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_or_b32_e32 v40, v11, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v51 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_or_b32_e32 v27, v27, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v13, v13, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v17 -; SI-NEXT: v_or_b32_e32 v16, v16, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v37 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v33 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v22, v22, v36 -; SI-NEXT: v_or_b32_e32 v21, v21, v35 -; SI-NEXT: v_or_b32_e32 v18, v18, v34 -; SI-NEXT: v_or_b32_e32 v15, v15, v33 -; SI-NEXT: v_or_b32_e32 v29, v29, v32 -; SI-NEXT: v_or_b32_e32 v26, v26, v31 -; SI-NEXT: v_or_b32_e32 v11, v11, v30 -; SI-NEXT: v_or_b32_e32 v8, v8, v12 -; SI-NEXT: v_or_b32_e32 v5, v5, v9 -; SI-NEXT: v_alignbit_b32 v36, v19, v36, 16 -; SI-NEXT: v_alignbit_b32 v35, v16, v35, 16 -; SI-NEXT: v_alignbit_b32 v34, v13, v34, 16 -; SI-NEXT: v_alignbit_b32 v33, v27, v33, 16 -; SI-NEXT: v_alignbit_b32 v32, v24, v32, 16 -; SI-NEXT: v_alignbit_b32 v31, v23, v31, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v30, 16 -; SI-NEXT: v_alignbit_b32 v12, v3, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v1, v9, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v53, v12, v7 +; SI-NEXT: v_or_b32_e32 v51, v11, v5 +; SI-NEXT: v_or_b32_e32 v48, v19, v3 +; SI-NEXT: v_or_b32_e32 v38, v22, v1 +; SI-NEXT: v_lshr_b64 v[34:35], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[1:2], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v22, v22, v36 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v35 -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v34 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v45 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v34 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v33 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v28 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v41 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v32 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v30 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v25 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v31 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v28 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v13, v10 -; SI-NEXT: v_add_i32_e32 v13, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v26 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v12 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v9 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v24 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v22 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v39 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v38 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll index b8091d8256457..47cb6bd3b3bb6 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll @@ -2838,240 +2838,238 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_readfirstlane_b32 s11, v1 -; SI-NEXT: v_readfirstlane_b32 s10, v2 -; SI-NEXT: v_readfirstlane_b32 s9, v3 -; SI-NEXT: v_readfirstlane_b32 s8, v4 -; SI-NEXT: v_readfirstlane_b32 s7, v5 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v1 +; SI-NEXT: v_readfirstlane_b32 s9, v2 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_readfirstlane_b32 s7, v4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_and_b64 s[10:11], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v6 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s10, s10, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s11, s60, 16 +; SI-NEXT: s_and_b32 s13, s16, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: v_mov_b32_e32 v1, s11 +; SI-NEXT: s_and_b32 s11, s17, 0xffff +; SI-NEXT: s_lshl_b32 s13, s89, 16 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_lshl_b32 s11, s58, 16 +; SI-NEXT: s_and_b32 s13, s18, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s19, 0xffff +; SI-NEXT: s_lshl_b32 s13, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s20, 0xffff +; SI-NEXT: s_lshl_b32 s13, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s21, 0xffff +; SI-NEXT: s_lshl_b32 s13, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s22, 0xffff +; SI-NEXT: s_lshl_b32 s13, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s23, 0xffff +; SI-NEXT: s_lshl_b32 s13, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s24, 0xffff +; SI-NEXT: s_lshl_b32 s13, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s25, 0xffff +; SI-NEXT: s_lshl_b32 s13, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s26, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s27, 0xffff +; SI-NEXT: s_lshl_b32 s13, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s28, 0xffff +; SI-NEXT: s_lshl_b32 s13, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s29, 0xffff +; SI-NEXT: s_lshl_b32 s13, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s8, s8, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v20i32_to_v40i16_scalar: @@ -10571,165 +10569,165 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_mov_b32_e32 v21, s16 +; SI-NEXT: v_mov_b32_e32 v19, s16 ; SI-NEXT: v_mov_b32_e32 v20, s17 -; SI-NEXT: v_mov_b32_e32 v19, s18 -; SI-NEXT: v_mov_b32_e32 v17, s19 -; SI-NEXT: v_mov_b32_e32 v16, s20 -; SI-NEXT: v_mov_b32_e32 v15, s21 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v11, s25 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v9, s27 -; SI-NEXT: v_mov_b32_e32 v8, s28 +; SI-NEXT: v_mov_b32_e32 v17, s18 +; SI-NEXT: v_mov_b32_e32 v18, s19 +; SI-NEXT: v_mov_b32_e32 v15, s20 +; SI-NEXT: v_mov_b32_e32 v16, s21 +; SI-NEXT: v_mov_b32_e32 v13, s22 +; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v12, s25 +; SI-NEXT: v_mov_b32_e32 v9, s26 +; SI-NEXT: v_mov_b32_e32 v10, s27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v7, s29 +; SI-NEXT: v_mov_b32_e32 v7, s28 +; SI-NEXT: v_mov_b32_e32 v8, s29 ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v18, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v26, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v28, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v33, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v35, v17, v19, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 ; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 ; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 ; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_alignbit_b32 v18, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v26, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v28, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v33, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v35, v17, v19, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: v_or_b32_e32 v21, v21, v37 -; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 -; SI-NEXT: v_or_b32_e32 v20, v20, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v31 ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_or_b32_e32 v19, v19, v26 +; SI-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v35 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v50 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v30 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 +; SI-NEXT: v_add_i32_e32 v19, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v49 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v29 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v31 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v36 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v28 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v25 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v38 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v26 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v32 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v37 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v24 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 ; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v30 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v36 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen @@ -10741,7 +10739,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -10753,45 +10751,45 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v18 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v20f32_to_v40i16_scalar: @@ -17582,240 +17580,238 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_readfirstlane_b32 s11, v1 -; SI-NEXT: v_readfirstlane_b32 s10, v2 -; SI-NEXT: v_readfirstlane_b32 s9, v3 -; SI-NEXT: v_readfirstlane_b32 s8, v4 -; SI-NEXT: v_readfirstlane_b32 s7, v5 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v1 +; SI-NEXT: v_readfirstlane_b32 s9, v2 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_readfirstlane_b32 s7, v4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_and_b64 s[10:11], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v6 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 -; SI-NEXT: s_add_u32 s28, s28, 3 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s11, s60, 16 +; SI-NEXT: s_and_b32 s13, s16, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: v_mov_b32_e32 v1, s11 +; SI-NEXT: s_and_b32 s11, s17, 0xffff +; SI-NEXT: s_lshl_b32 s13, s89, 16 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_lshl_b32 s11, s58, 16 +; SI-NEXT: s_and_b32 s13, s18, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s19, 0xffff +; SI-NEXT: s_lshl_b32 s13, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s20, 0xffff +; SI-NEXT: s_lshl_b32 s13, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s21, 0xffff +; SI-NEXT: s_lshl_b32 s13, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s22, 0xffff +; SI-NEXT: s_lshl_b32 s13, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s23, 0xffff +; SI-NEXT: s_lshl_b32 s13, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s24, 0xffff +; SI-NEXT: s_lshl_b32 s13, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s25, 0xffff +; SI-NEXT: s_lshl_b32 s13, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s26, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s27, 0xffff +; SI-NEXT: s_lshl_b32 s13, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s28, 0xffff +; SI-NEXT: s_lshl_b32 s13, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s29, 0xffff +; SI-NEXT: s_lshl_b32 s13, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s8, s8, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v10i64_to_v40i16_scalar: @@ -23902,126 +23898,126 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: v_mov_b32_e32 v8, s29 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v28, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 ; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v28, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v31 ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: v_or_b32_e32 v19, v19, v37 +; SI-NEXT: v_or_b32_e32 v19, v19, v26 ; SI-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v50 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v30 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v49 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v29 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v25 ; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v38 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 ; SI-NEXT: v_or_b32_e32 v9, v9, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v37 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen @@ -24033,7 +24029,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v36 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen @@ -24045,7 +24041,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -24057,7 +24053,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -24069,33 +24065,33 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v10f64_to_v40i16_scalar: @@ -32185,338 +32181,367 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i ; SI-LABEL: bitcast_v40f16_to_v40i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v35, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v49, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v42, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v58, s16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v59, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v57, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v56, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v47, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v45, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s29 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v27, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v26 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v25 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v48 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v39 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v25 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v34, v25, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v38 -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v23 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v45, v22, v17 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v54 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v57 +; SI-NEXT: v_or_b32_e32 v54, v22, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v51 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v58 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v57, v3, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_or_b32_e32 v58, v1, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v43, v3, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v40 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_or_b32_e32 v47, v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v41 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v40, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_or_b32_e32 v41, v1, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v5, v5, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8 -; SI-NEXT: v_or_b32_e32 v9, v9, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_or_b32_e32 v51, v22, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v48 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_or_b32_e32 v12, v12, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v10 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v22 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 +; SI-NEXT: v_or_b32_e32 v2, v2, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v53 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v49 +; SI-NEXT: v_or_b32_e32 v4, v4, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v52 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_or_b32_e32 v28, v28, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v53 +; SI-NEXT: v_or_b32_e32 v6, v6, v21 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v22 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52 +; SI-NEXT: v_or_b32_e32 v8, v8, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v44 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; SI-NEXT: v_or_b32_e32 v10, v10, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v42 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v21 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v21 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v44 +; SI-NEXT: v_or_b32_e32 v14, v14, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v42 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_or_b32_e32 v33, v25, v33 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v22 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v35, v35, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v13 -; SI-NEXT: v_or_b32_e32 v14, v14, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v17 -; SI-NEXT: v_or_b32_e32 v20, v20, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v39 -; SI-NEXT: v_or_b32_e32 v19, v19, v25 -; SI-NEXT: v_or_b32_e32 v22, v22, v26 -; SI-NEXT: v_or_b32_e32 v21, v21, v27 -; SI-NEXT: v_or_b32_e32 v16, v16, v24 -; SI-NEXT: v_or_b32_e32 v15, v15, v48 -; SI-NEXT: v_or_b32_e32 v30, v30, v38 -; SI-NEXT: v_or_b32_e32 v29, v29, v37 -; SI-NEXT: v_or_b32_e32 v11, v11, v51 -; SI-NEXT: v_or_b32_e32 v6, v6, v23 -; SI-NEXT: v_or_b32_e32 v4, v4, v52 -; SI-NEXT: v_alignbit_b32 v49, v19, v26, 16 -; SI-NEXT: v_alignbit_b32 v26, v20, v27, 16 -; SI-NEXT: v_alignbit_b32 v25, v14, v24, 16 -; SI-NEXT: v_alignbit_b32 v24, v35, v48, 16 -; SI-NEXT: v_alignbit_b32 v48, v33, v50, 16 -; SI-NEXT: v_alignbit_b32 v39, v28, v38, 16 -; SI-NEXT: v_alignbit_b32 v38, v12, v37, 16 -; SI-NEXT: v_alignbit_b32 v37, v9, v51, 16 -; SI-NEXT: v_alignbit_b32 v36, v3, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v52, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v21 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v46 +; SI-NEXT: v_or_b32_e32 v16, v16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v59 +; SI-NEXT: v_or_b32_e32 v12, v12, v22 +; SI-NEXT: v_or_b32_e32 v20, v20, v21 +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 16 +; SI-NEXT: v_or_b32_e32 v50, v23, v1 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_or_b32_e32 v22, v22, v27 -; SI-NEXT: v_or_b32_e32 v18, v19, v18 -; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v38 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v58 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v26 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v36 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v25 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v13, v14, v13 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v24 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v34 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v31 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v46 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v48 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v25 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v32 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v43 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v13, v10 -; SI-NEXT: v_add_i32_e32 v13, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v38 -; SI-NEXT: v_or_b32_e32 v10, v10, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v10, v7 -; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v37 -; SI-NEXT: v_or_b32_e32 v7, v7, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v28 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v36 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v40 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v26 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v53 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v21 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll index ab1f8606cffd7..ecc715cfb52f3 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll @@ -1065,24 +1065,23 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b) ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: s_add_u32 s16, s16, 3 ; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_i64_to_v4i16_scalar: @@ -2708,38 +2707,39 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) { ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true ; SI-NEXT: s_add_u32 s16, s16, 3 ; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: .LBB25_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v7, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_i64_to_v8i8_scalar: @@ -4222,23 +4222,23 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg % ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_4 ; SI-NEXT: .LBB37_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[4:5], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v1, v5, v4, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[4:5], 16 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v5 ; SI-NEXT: s_branch .LBB37_5 ; SI-NEXT: .LBB37_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB37_2 ; SI-NEXT: .LBB37_4: ; SI-NEXT: v_mov_b32_e32 v5, s17 ; SI-NEXT: v_mov_b32_e32 v4, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: .LBB37_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v4 ; SI-NEXT: v_mov_b32_e32 v2, v5 @@ -5836,40 +5836,43 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b) ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s8, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 8 +; SI-NEXT: s_lshr_b32 s14, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[8:9], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v3, v9, v8, 24 -; SI-NEXT: v_alignbit_b32 v2, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v1, v9, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v9 +; SI-NEXT: v_add_f64 v[10:11], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[3:4], v[10:11], 24 +; SI-NEXT: v_lshr_b64 v[8:9], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v11 ; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v9, s17 -; SI-NEXT: v_mov_b32_e32 v8, s16 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v5, s6 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: v_mov_b32_e32 v7, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v8, s6 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: .LBB49_5: ; %end -; SI-NEXT: v_mov_b32_e32 v0, v8 -; SI-NEXT: v_mov_b32_e32 v4, v9 +; SI-NEXT: v_mov_b32_e32 v0, v10 +; SI-NEXT: v_mov_b32_e32 v2, v8 +; SI-NEXT: v_mov_b32_e32 v4, v11 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_f64_to_v8i8_scalar: @@ -7049,24 +7052,23 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 ; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v2i32_to_v4i16_scalar: @@ -8688,38 +8690,39 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 ; SI-NEXT: .LBB69_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v7, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v2i32_to_v8i8_scalar: @@ -9564,24 +9567,27 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_4 ; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v5, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[1:2], v[4:5], 16 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v5 +; SI-NEXT: s_branch .LBB73_5 ; SI-NEXT: .LBB73_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB73_2 ; SI-NEXT: .LBB73_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: v_mov_b32_e32 v5, s17 +; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: .LBB73_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v4 +; SI-NEXT: v_mov_b32_e32 v2, v5 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v2f32_to_v4i16_scalar: @@ -11206,38 +11212,44 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v11, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[3:4], v[10:11], 24 +; SI-NEXT: v_lshr_b64 v[8:9], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v11 +; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v7, s12 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v8, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB85_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v10 +; SI-NEXT: v_mov_b32_e32 v2, v8 +; SI-NEXT: v_mov_b32_e32 v4, v11 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v2f32_to_v8i8_scalar: @@ -11249,8 +11261,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; VI-NEXT: s_lshr_b32 s5, s17, 24 ; VI-NEXT: s_lshr_b32 s8, s17, 16 -; VI-NEXT: s_lshr_b32 s9, s17, 8 -; VI-NEXT: s_lshr_b32 s10, s16, 16 +; VI-NEXT: s_lshr_b32 s10, s17, 8 +; VI-NEXT: s_lshr_b32 s9, s16, 16 ; VI-NEXT: s_lshr_b32 s11, s16, 8 ; VI-NEXT: s_cbranch_execnz .LBB85_4 ; VI-NEXT: .LBB85_2: ; %cmp.true @@ -11265,9 +11277,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: s_branch .LBB85_5 ; VI-NEXT: .LBB85_3: ; VI-NEXT: ; implicit-def: $sgpr11 -; VI-NEXT: ; implicit-def: $sgpr10 -; VI-NEXT: ; implicit-def: $sgpr4 ; VI-NEXT: ; implicit-def: $sgpr9 +; VI-NEXT: ; implicit-def: $sgpr4 +; VI-NEXT: ; implicit-def: $sgpr10 ; VI-NEXT: ; implicit-def: $sgpr8 ; VI-NEXT: ; implicit-def: $sgpr5 ; VI-NEXT: s_branch .LBB85_2 @@ -11275,8 +11287,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; VI-NEXT: v_mov_b32_e32 v8, s16 ; VI-NEXT: v_mov_b32_e32 v9, s17 ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: v_mov_b32_e32 v2, s10 -; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v2, s9 +; VI-NEXT: v_mov_b32_e32 v5, s10 ; VI-NEXT: v_mov_b32_e32 v6, s8 ; VI-NEXT: v_mov_b32_e32 v7, s5 ; VI-NEXT: v_mov_b32_e32 v3, s4 @@ -11294,8 +11306,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB85_4 ; GFX9-NEXT: .LBB85_2: ; %cmp.true @@ -11310,9 +11322,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: s_branch .LBB85_5 ; GFX9-NEXT: .LBB85_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB85_2 @@ -11320,8 +11332,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -11340,8 +11352,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB85_4 @@ -11358,16 +11370,16 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; GFX11-NEXT: s_branch .LBB85_5 ; GFX11-NEXT: .LBB85_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB85_2 ; GFX11-NEXT: .LBB85_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB85_5: ; %end @@ -12327,7 +12339,7 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v4, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: s_cmp_lg_u32 s20, 0 @@ -12336,23 +12348,24 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i ; SI-NEXT: s_cbranch_execnz .LBB91_3 ; SI-NEXT: .LBB91_2: ; %cmp.true ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_lshr_b64 v[4:5], v[1:2], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v4 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -12938,34 +12951,35 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s17 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v6 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v6 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v4 -; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v7 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 +; SI-NEXT: v_lshr_b64 v[4:5], v[1:2], 16 +; SI-NEXT: v_alignbit_b32 v0, v6, v0, 16 ; SI-NEXT: .LBB95_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v4 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: s_branch .LBB95_2 @@ -13395,52 +13409,52 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s9, s7, 8 -; SI-NEXT: s_and_b32 s10, s19, 0xffff -; SI-NEXT: s_bfe_u32 s8, s19, 0x80008 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s9, s5, 8 +; SI-NEXT: s_and_b32 s11, s19, 0xffff +; SI-NEXT: s_bfe_u32 s7, s19, 0x80008 ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s8, s7, 24 -; SI-NEXT: s_lshr_b32 s10, s7, 16 -; SI-NEXT: s_lshr_b32 s9, s7, 8 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s7, s5, 24 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: s_lshr_b32 s9, s5, 8 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v4, s7 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s5 ; SI-NEXT: v_mov_b32_e32 v5, s9 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v6, s11 +; SI-NEXT: v_mov_b32_e32 v7, s7 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v4i16_to_v8i8_scalar: @@ -13503,8 +13517,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB97_4 ; GFX9-NEXT: .LBB97_2: ; %cmp.true @@ -13519,9 +13533,9 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: s_branch .LBB97_5 ; GFX9-NEXT: .LBB97_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB97_2 @@ -13529,8 +13543,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -13549,8 +13563,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB97_4 @@ -13567,16 +13581,16 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; GFX11-NEXT: s_branch .LBB97_5 ; GFX11-NEXT: .LBB97_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB97_2 ; GFX11-NEXT: .LBB97_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB97_5: ; %end @@ -13970,27 +13984,27 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 ; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_or_b32 s4, s6, s4 -; SI-NEXT: s_and_b32 s6, s16, 0xff -; SI-NEXT: s_lshl_b32 s8, s17, 8 -; SI-NEXT: s_or_b32 s6, s6, s8 -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_or_b32 s6, s6, s4 -; SI-NEXT: s_lshr_b32 s8, s5, 16 +; SI-NEXT: s_or_b32 s10, s6, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s8, s7, s6 +; SI-NEXT: s_or_b32 s11, s5, s8 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshr_b64 s[6:7], s[10:11], 16 +; SI-NEXT: s_or_b32 s4, s4, s10 +; SI-NEXT: s_lshr_b32 s7, s8, 16 +; SI-NEXT: s_mov_b32 s5, s11 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -14004,34 +14018,33 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s7, s22, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s23, 24 ; SI-NEXT: s_lshl_b32 s7, s7, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s7 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s7, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s7, 16 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s7, s5, 16 ; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v2, s7 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: v_mov_b32_e32 v3, s7 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB99_2 ; ; VI-LABEL: bitcast_v8i8_to_v4i16_scalar: @@ -15220,53 +15233,55 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; SI-LABEL: bitcast_v4f16_to_v8i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v10, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s18 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v11 +; SI-NEXT: v_or_b32_e32 v9, v8, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v0, v9, v0 -; SI-NEXT: v_or_b32_e32 v4, v8, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_or_b32_e32 v10, v0, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v2 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v3 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v9, v2, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_or_b32_e32 v10, v0, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: .LBB105_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v9 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: s_branch .LBB105_2 @@ -15330,8 +15345,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; GFX9-NEXT: s_lshr_b32 s5, s17, 24 ; GFX9-NEXT: s_lshr_b32 s8, s17, 16 -; GFX9-NEXT: s_lshr_b32 s9, s17, 8 -; GFX9-NEXT: s_lshr_b32 s10, s16, 16 +; GFX9-NEXT: s_lshr_b32 s10, s17, 8 +; GFX9-NEXT: s_lshr_b32 s9, s16, 16 ; GFX9-NEXT: s_lshr_b32 s11, s16, 8 ; GFX9-NEXT: s_cbranch_execnz .LBB105_4 ; GFX9-NEXT: .LBB105_2: ; %cmp.true @@ -15347,9 +15362,9 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: s_branch .LBB105_5 ; GFX9-NEXT: .LBB105_3: ; GFX9-NEXT: ; implicit-def: $sgpr11 -; GFX9-NEXT: ; implicit-def: $sgpr10 -; GFX9-NEXT: ; implicit-def: $sgpr4 ; GFX9-NEXT: ; implicit-def: $sgpr9 +; GFX9-NEXT: ; implicit-def: $sgpr4 +; GFX9-NEXT: ; implicit-def: $sgpr10 ; GFX9-NEXT: ; implicit-def: $sgpr8 ; GFX9-NEXT: ; implicit-def: $sgpr5 ; GFX9-NEXT: s_branch .LBB105_2 @@ -15357,8 +15372,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX9-NEXT: v_mov_b32_e32 v8, s16 ; GFX9-NEXT: v_mov_b32_e32 v9, s17 ; GFX9-NEXT: v_mov_b32_e32 v1, s11 -; GFX9-NEXT: v_mov_b32_e32 v2, s10 -; GFX9-NEXT: v_mov_b32_e32 v5, s9 +; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_mov_b32_e32 v5, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s8 ; GFX9-NEXT: v_mov_b32_e32 v7, s5 ; GFX9-NEXT: v_mov_b32_e32 v3, s4 @@ -15377,8 +15392,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24 ; GFX11-NEXT: s_lshr_b32 s3, s1, 24 ; GFX11-NEXT: s_lshr_b32 s5, s1, 16 -; GFX11-NEXT: s_lshr_b32 s6, s1, 8 -; GFX11-NEXT: s_lshr_b32 s7, s0, 16 +; GFX11-NEXT: s_lshr_b32 s7, s1, 8 +; GFX11-NEXT: s_lshr_b32 s6, s0, 16 ; GFX11-NEXT: s_lshr_b32 s8, s0, 8 ; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 ; GFX11-NEXT: s_cbranch_vccnz .LBB105_4 @@ -15395,16 +15410,16 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; GFX11-NEXT: s_branch .LBB105_5 ; GFX11-NEXT: .LBB105_3: ; GFX11-NEXT: ; implicit-def: $sgpr8 -; GFX11-NEXT: ; implicit-def: $sgpr7 -; GFX11-NEXT: ; implicit-def: $sgpr2 ; GFX11-NEXT: ; implicit-def: $sgpr6 +; GFX11-NEXT: ; implicit-def: $sgpr2 +; GFX11-NEXT: ; implicit-def: $sgpr7 ; GFX11-NEXT: ; implicit-def: $sgpr5 ; GFX11-NEXT: ; implicit-def: $sgpr3 ; GFX11-NEXT: s_branch .LBB105_2 ; GFX11-NEXT: .LBB105_4: ; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1 -; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7 -; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5 +; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6 +; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5 ; GFX11-NEXT: v_mov_b32_e32 v7, s3 ; GFX11-NEXT: v_mov_b32_e32 v3, s2 ; GFX11-NEXT: .LBB105_5: ; %end @@ -16420,48 +16435,50 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s20, 0 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v12, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s18 ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8 -; SI-NEXT: v_alignbit_b32 v0, v0, v11, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v9, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v9, v1, v12, 16 +; SI-NEXT: v_alignbit_b32 v10, v6, v8, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v10 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; SI-NEXT: v_alignbit_b32 v9, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v8 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v10, v6, v1, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 ; SI-NEXT: .LBB109_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v9 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 @@ -16476,8 +16493,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 ; VI-NEXT: s_lshr_b32 s8, s17, 24 ; VI-NEXT: s_lshr_b32 s5, s17, 16 -; VI-NEXT: s_lshr_b32 s9, s17, 8 -; VI-NEXT: s_lshr_b32 s10, s16, 16 +; VI-NEXT: s_lshr_b32 s10, s17, 8 +; VI-NEXT: s_lshr_b32 s9, s16, 16 ; VI-NEXT: s_lshr_b32 s11, s16, 8 ; VI-NEXT: s_cbranch_execnz .LBB109_4 ; VI-NEXT: .LBB109_2: ; %cmp.true @@ -16529,16 +16546,16 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; VI-NEXT: s_setpc_b64 s[30:31] ; VI-NEXT: .LBB109_3: ; VI-NEXT: ; implicit-def: $sgpr11 -; VI-NEXT: ; implicit-def: $sgpr10 -; VI-NEXT: ; implicit-def: $sgpr4 ; VI-NEXT: ; implicit-def: $sgpr9 +; VI-NEXT: ; implicit-def: $sgpr4 +; VI-NEXT: ; implicit-def: $sgpr10 ; VI-NEXT: ; implicit-def: $sgpr5 ; VI-NEXT: ; implicit-def: $sgpr8 ; VI-NEXT: s_branch .LBB109_2 ; VI-NEXT: .LBB109_4: ; VI-NEXT: v_mov_b32_e32 v1, s11 -; VI-NEXT: v_mov_b32_e32 v2, s10 -; VI-NEXT: v_mov_b32_e32 v5, s9 +; VI-NEXT: v_mov_b32_e32 v2, s9 +; VI-NEXT: v_mov_b32_e32 v5, s10 ; VI-NEXT: v_mov_b32_e32 v7, s8 ; VI-NEXT: v_mov_b32_e32 v3, s4 ; VI-NEXT: v_mov_b32_e32 v0, s16 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll index 9f5c9c4c509ed..2cc7c448b2e11 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll @@ -3022,264 +3022,260 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9 -; SI-NEXT: v_readfirstlane_b32 s13, v1 -; SI-NEXT: v_readfirstlane_b32 s12, v2 -; SI-NEXT: v_readfirstlane_b32 s11, v3 -; SI-NEXT: v_readfirstlane_b32 s10, v4 -; SI-NEXT: v_readfirstlane_b32 s9, v5 -; SI-NEXT: v_readfirstlane_b32 s8, v6 -; SI-NEXT: v_readfirstlane_b32 s7, v7 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v1 +; SI-NEXT: v_readfirstlane_b32 s11, v2 +; SI-NEXT: v_readfirstlane_b32 s8, v3 +; SI-NEXT: v_readfirstlane_b32 s9, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v5 +; SI-NEXT: v_readfirstlane_b32 s7, v6 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_and_b64 s[12:13], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v8 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s12, s12, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: buffer_store_dword v11, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s13, s72, 16 +; SI-NEXT: s_and_b32 s15, s16, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v1, s13 +; SI-NEXT: s_and_b32 s13, s17, 0xffff +; SI-NEXT: s_lshl_b32 s15, s94, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_lshl_b32 s13, s62, 16 +; SI-NEXT: s_and_b32 s15, s18, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v3, s13 +; SI-NEXT: s_and_b32 s13, s19, 0xffff +; SI-NEXT: s_lshl_b32 s15, s93, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v4, s13 +; SI-NEXT: s_lshl_b32 s13, s60, 16 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s21, 0xffff +; SI-NEXT: s_lshl_b32 s15, s92, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s22, 0xffff +; SI-NEXT: s_lshl_b32 s15, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s23, 0xffff +; SI-NEXT: s_lshl_b32 s15, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s24, 0xffff +; SI-NEXT: s_lshl_b32 s15, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s25, 0xffff +; SI-NEXT: s_lshl_b32 s15, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s26, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s27, 0xffff +; SI-NEXT: s_lshl_b32 s15, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s28, 0xffff +; SI-NEXT: s_lshl_b32 s15, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s29, 0xffff +; SI-NEXT: s_lshl_b32 s15, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s10, s10, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v22i32_to_v44i16_scalar: @@ -11524,171 +11520,171 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9 -; SI-NEXT: v_mov_b32_e32 v23, s16 +; SI-NEXT: v_mov_b32_e32 v21, s16 ; SI-NEXT: v_mov_b32_e32 v22, s17 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v19, s19 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v17, s21 -; SI-NEXT: v_mov_b32_e32 v16, s22 -; SI-NEXT: v_mov_b32_e32 v15, s23 -; SI-NEXT: v_mov_b32_e32 v14, s24 -; SI-NEXT: v_mov_b32_e32 v12, s25 -; SI-NEXT: v_mov_b32_e32 v13, s26 +; SI-NEXT: v_mov_b32_e32 v19, s18 +; SI-NEXT: v_mov_b32_e32 v20, s19 +; SI-NEXT: v_mov_b32_e32 v17, s20 +; SI-NEXT: v_mov_b32_e32 v18, s21 +; SI-NEXT: v_mov_b32_e32 v15, s22 +; SI-NEXT: v_mov_b32_e32 v16, s23 +; SI-NEXT: v_mov_b32_e32 v13, s24 +; SI-NEXT: v_mov_b32_e32 v14, s25 +; SI-NEXT: v_mov_b32_e32 v11, s26 +; SI-NEXT: v_mov_b32_e32 v12, s27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v11, s27 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v9, s29 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v10, s29 ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v20, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v28, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v30, v11, v13, 16 -; SI-NEXT: v_alignbit_b32 v32, v12, v14, 16 -; SI-NEXT: v_alignbit_b32 v34, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v39, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 +; SI-NEXT: v_lshr_b64 v[33:34], v[21:22], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 ; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 ; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 ; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_alignbit_b32 v20, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v28, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v30, v11, v13, 16 -; SI-NEXT: v_alignbit_b32 v32, v12, v14, 16 -; SI-NEXT: v_alignbit_b32 v34, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v39, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_lshr_b64 v[28:29], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_lshr_b64 v[30:31], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_lshr_b64 v[31:32], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_lshr_b64 v[32:33], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_or_b32_e32 v23, v23, v49 -; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v33 ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: v_or_b32_e32 v21, v21, v33 +; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v39 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v53 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v32 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 12, v0 +; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v37 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v52 +; SI-NEXT: v_or_b32_e32 v19, v19, v20 +; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v31 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 +; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v50 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v51 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v34 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v30 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v50 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v32 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v38 -; SI-NEXT: v_or_b32_e32 v12, v12, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v12, v14, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v30 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v28 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v36 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v48 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v28 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v35 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v39 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen @@ -11700,7 +11696,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -11712,7 +11708,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -11724,47 +11720,47 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v22f32_to_v44i16_scalar: @@ -19266,264 +19262,260 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9 -; SI-NEXT: v_readfirstlane_b32 s13, v1 -; SI-NEXT: v_readfirstlane_b32 s12, v2 -; SI-NEXT: v_readfirstlane_b32 s11, v3 -; SI-NEXT: v_readfirstlane_b32 s10, v4 -; SI-NEXT: v_readfirstlane_b32 s9, v5 -; SI-NEXT: v_readfirstlane_b32 s8, v6 -; SI-NEXT: v_readfirstlane_b32 s7, v7 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v1 +; SI-NEXT: v_readfirstlane_b32 s11, v2 +; SI-NEXT: v_readfirstlane_b32 s8, v3 +; SI-NEXT: v_readfirstlane_b32 s9, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v5 +; SI-NEXT: v_readfirstlane_b32 s7, v6 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_and_b64 s[12:13], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v8 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: buffer_store_dword v11, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s13, s72, 16 +; SI-NEXT: s_and_b32 s15, s16, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v1, s13 +; SI-NEXT: s_and_b32 s13, s17, 0xffff +; SI-NEXT: s_lshl_b32 s15, s94, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_lshl_b32 s13, s62, 16 +; SI-NEXT: s_and_b32 s15, s18, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v3, s13 +; SI-NEXT: s_and_b32 s13, s19, 0xffff +; SI-NEXT: s_lshl_b32 s15, s93, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v4, s13 +; SI-NEXT: s_lshl_b32 s13, s60, 16 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s21, 0xffff +; SI-NEXT: s_lshl_b32 s15, s92, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s22, 0xffff +; SI-NEXT: s_lshl_b32 s15, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s23, 0xffff +; SI-NEXT: s_lshl_b32 s15, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s24, 0xffff +; SI-NEXT: s_lshl_b32 s15, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s25, 0xffff +; SI-NEXT: s_lshl_b32 s15, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s26, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s27, 0xffff +; SI-NEXT: s_lshl_b32 s15, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s28, 0xffff +; SI-NEXT: s_lshl_b32 s15, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s29, 0xffff +; SI-NEXT: s_lshl_b32 s15, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s10, s10, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v11i64_to_v44i16_scalar: @@ -26272,131 +26264,131 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: v_mov_b32_e32 v10, s29 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v23, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v27, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v36, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v39, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v22 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[21:22], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 ; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_alignbit_b32 v23, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v27, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v36, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v39, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[11:12], 16 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v22 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v34 ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v21, v21, v49 +; SI-NEXT: v_or_b32_e32 v21, v21, v30 ; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v54 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v53 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v52 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v29 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v51 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v50 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v31 ; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v49 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen @@ -26408,7 +26400,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v48 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen @@ -26420,7 +26412,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -26432,7 +26424,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -26444,7 +26436,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -26456,35 +26448,35 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v11f64_to_v44i16_scalar: @@ -35515,368 +35507,413 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i ; SI-LABEL: bitcast_v44f16_to_v44i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v20 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_cvt_f16_f32_e32 v57, v2 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_cvt_f16_f32_e32 v58, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v40, s16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v63, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v59, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v60, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v52, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s29 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v50, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s29 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v63 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_or_b32_e32 v26, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_or_b32_e32 v1, v1, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_or_b32_e32 v3, v3, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v6, v6, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v61 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v24, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v29 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v22, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v36 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v42 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v30 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v41 +; SI-NEXT: v_or_b32_e32 v8, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v27 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v53 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v33 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v11 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v9, v9, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v52 +; SI-NEXT: v_or_b32_e32 v2, v2, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_or_b32_e32 v13, v13, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_or_b32_e32 v31, v31, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v52 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_or_b32_e32 v54, v12, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v58 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_or_b32_e32 v60, v19, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v46 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_or_b32_e32 v34, v34, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_or_b32_e32 v40, v11, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v62 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_or_b32_e32 v58, v12, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v45 +; SI-NEXT: v_or_b32_e32 v46, v19, v9 +; SI-NEXT: v_or_b32_e32 v62, v11, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v35 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_or_b32_e32 v56, v11, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v43 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_or_b32_e32 v37, v37, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v18, v18, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v25 -; SI-NEXT: v_or_b32_e32 v24, v24, v53 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v50 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v48 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v27, v27, v52 -; SI-NEXT: v_or_b32_e32 v26, v26, v30 -; SI-NEXT: v_or_b32_e32 v23, v23, v29 -; SI-NEXT: v_or_b32_e32 v20, v20, v28 -; SI-NEXT: v_or_b32_e32 v39, v39, v51 -; SI-NEXT: v_or_b32_e32 v36, v36, v50 -; SI-NEXT: v_or_b32_e32 v33, v33, v49 -; SI-NEXT: v_or_b32_e32 v15, v15, v48 -; SI-NEXT: v_or_b32_e32 v11, v11, v17 -; SI-NEXT: v_or_b32_e32 v8, v8, v16 -; SI-NEXT: v_or_b32_e32 v5, v5, v12 -; SI-NEXT: v_alignbit_b32 v52, v24, v52, 16 -; SI-NEXT: v_alignbit_b32 v30, v21, v30, 16 -; SI-NEXT: v_alignbit_b32 v29, v18, v29, 16 -; SI-NEXT: v_alignbit_b32 v28, v37, v28, 16 -; SI-NEXT: v_alignbit_b32 v51, v34, v51, 16 -; SI-NEXT: v_alignbit_b32 v50, v31, v50, 16 -; SI-NEXT: v_alignbit_b32 v49, v13, v49, 16 -; SI-NEXT: v_alignbit_b32 v48, v9, v48, 16 -; SI-NEXT: v_alignbit_b32 v17, v6, v17, 16 -; SI-NEXT: v_alignbit_b32 v16, v3, v16, 16 -; SI-NEXT: v_alignbit_b32 v12, v1, v12, 16 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v45, v12, v7 +; SI-NEXT: v_or_b32_e32 v12, v19, v3 +; SI-NEXT: v_or_b32_e32 v43, v11, v5 +; SI-NEXT: v_or_b32_e32 v11, v20, v1 +; SI-NEXT: v_lshr_b64 v[29:30], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_mov_b32_e32 v35, v12 +; SI-NEXT: v_mov_b32_e32 v33, v11 +; SI-NEXT: v_lshr_b64 v[30:31], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[1:2], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_or_b32_e32 v27, v27, v52 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v30 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v29 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v28 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v51 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v35 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v50 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v49 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v48 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v54 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v48 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 56, v0 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v17 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v38 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v29 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v60 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v57 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v36 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v41 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v43 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v53 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll index 152a48bec2636..c35e183fa787f 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll @@ -3189,289 +3189,301 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v24i32_to_v48i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v12, s30, 0 +; SI-NEXT: v_writelane_b32 v12, s31, 1 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; SI-NEXT: v_readfirstlane_b32 s15, v1 -; SI-NEXT: v_readfirstlane_b32 s14, v2 -; SI-NEXT: v_readfirstlane_b32 s13, v3 -; SI-NEXT: v_readfirstlane_b32 s12, v4 -; SI-NEXT: v_readfirstlane_b32 s11, v5 -; SI-NEXT: v_readfirstlane_b32 s10, v6 -; SI-NEXT: v_readfirstlane_b32 s9, v7 -; SI-NEXT: v_readfirstlane_b32 s8, v8 -; SI-NEXT: v_readfirstlane_b32 s7, v9 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v10 +; SI-NEXT: v_writelane_b32 v12, s34, 2 +; SI-NEXT: v_readfirstlane_b32 s12, v1 +; SI-NEXT: v_readfirstlane_b32 s13, v2 +; SI-NEXT: v_readfirstlane_b32 s10, v3 +; SI-NEXT: v_readfirstlane_b32 s11, v4 +; SI-NEXT: v_readfirstlane_b32 s8, v5 +; SI-NEXT: v_readfirstlane_b32 s9, v6 +; SI-NEXT: v_readfirstlane_b32 s6, v7 +; SI-NEXT: v_readfirstlane_b32 s7, v8 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_and_b64 s[14:15], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v10 +; SI-NEXT: v_writelane_b32 v12, s35, 3 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s14, s14, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: buffer_store_dword v12, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: s_lshl_b32 s15, s76, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v1, s15 +; SI-NEXT: s_and_b32 s15, s17, 0xffff +; SI-NEXT: s_lshl_b32 s16, s35, 16 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_lshl_b32 s15, s74, 16 +; SI-NEXT: s_and_b32 s16, s18, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v3, s15 +; SI-NEXT: s_and_b32 s15, s19, 0xffff +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s21, 0xffff +; SI-NEXT: s_lshl_b32 s16, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s22, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s23, 0xffff +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s24, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s25, 0xffff +; SI-NEXT: s_lshl_b32 s16, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s26, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s27, 0xffff +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s28, 0xffff +; SI-NEXT: s_lshl_b32 s16, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s29, 0xffff +; SI-NEXT: s_lshl_b32 s16, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s12, s12, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s35, v12, 3 +; SI-NEXT: v_readlane_b32 s34, v12, 2 +; SI-NEXT: v_readlane_b32 s31, v12, 1 +; SI-NEXT: v_readlane_b32 s30, v12, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr94 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v24i32_to_v48i16_scalar: @@ -5100,88 +5112,88 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v2 +; SI-NEXT: v_mov_b32_e32 v56, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_mov_b32_e32 v38, v16 -; SI-NEXT: v_mov_b32_e32 v39, v14 -; SI-NEXT: v_mov_b32_e32 v48, v12 -; SI-NEXT: v_mov_b32_e32 v49, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v13 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:4 +; SI-NEXT: v_mov_b32_e32 v31, v22 +; SI-NEXT: v_mov_b32_e32 v34, v20 +; SI-NEXT: v_mov_b32_e32 v35, v18 +; SI-NEXT: v_mov_b32_e32 v36, v16 +; SI-NEXT: v_mov_b32_e32 v37, v14 +; SI-NEXT: v_mov_b32_e32 v38, v12 +; SI-NEXT: v_mov_b32_e32 v39, v10 +; SI-NEXT: v_mov_b32_e32 v48, v8 +; SI-NEXT: v_mov_b32_e32 v50, v6 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v13 ; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4 ; SI-NEXT: s_cbranch_scc0 .LBB15_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v12, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v7, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v9, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v10, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v13, v0, v61 +; SI-NEXT: v_or_b32_e32 v11, v0, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_or_b32_e32 v12, v0, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v15, v0, v44 +; SI-NEXT: v_or_b32_e32 v13, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v15, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v16, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v16, v0, v58 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v17, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v17, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v33 +; SI-NEXT: v_or_b32_e32 v19, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v27 +; SI-NEXT: v_or_b32_e32 v20, v0, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff @@ -5191,13 +5203,13 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v22, v0, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v22, v0, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v58 -; SI-NEXT: v_or_b32_e32 v23, v0, v34 +; SI-NEXT: v_or_b32_e32 v8, v1, v57 +; SI-NEXT: v_or_b32_e32 v23, v0, v25 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -5207,60 +5219,60 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB15_3 ; SI-NEXT: .LBB15_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v41, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v33, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 @@ -5291,17 +5303,17 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v25, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -5309,7 +5321,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v34, v0 +; SI-NEXT: v_or_b32_e32 v0, v25, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -5339,66 +5351,64 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB15_4: -; SI-NEXT: v_mov_b32_e32 v43, v34 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v25 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: v_mov_b32_e32 v46, v27 -; SI-NEXT: v_mov_b32_e32 v47, v33 -; SI-NEXT: v_mov_b32_e32 v56, v32 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v39 -; SI-NEXT: v_mov_b32_e32 v39, v37 -; SI-NEXT: v_mov_b32_e32 v37, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v48 -; SI-NEXT: v_mov_b32_e32 v48, v38 -; SI-NEXT: v_mov_b32_e32 v38, v36 -; SI-NEXT: v_mov_b32_e32 v36, v24 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v42, v41 -; SI-NEXT: v_mov_b32_e32 v41, v57 -; SI-NEXT: v_mov_b32_e32 v57, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v37 -; SI-NEXT: v_mov_b32_e32 v37, v39 -; SI-NEXT: v_mov_b32_e32 v39, v49 -; SI-NEXT: v_mov_b32_e32 v49, v40 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v25, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v31, v57 +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v46, v51 +; SI-NEXT: v_mov_b32_e32 v51, v39 +; SI-NEXT: v_mov_b32_e32 v39, v34 +; SI-NEXT: v_mov_b32_e32 v34, v30 +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v47, v52 +; SI-NEXT: v_mov_b32_e32 v52, v48 +; SI-NEXT: v_mov_b32_e32 v48, v35 +; SI-NEXT: v_mov_b32_e32 v35, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v57 ; SI-NEXT: v_mov_b32_e32 v57, v41 -; SI-NEXT: v_mov_b32_e32 v41, v42 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v24, v36 -; SI-NEXT: v_mov_b32_e32 v36, v38 -; SI-NEXT: v_mov_b32_e32 v38, v48 -; SI-NEXT: v_mov_b32_e32 v48, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v32 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v41, v49 +; SI-NEXT: v_mov_b32_e32 v49, v36 +; SI-NEXT: v_mov_b32_e32 v36, v26 +; SI-NEXT: v_mov_b32_e32 v42, v50 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v37, v24 +; SI-NEXT: v_mov_b32_e32 v33, v32 ; SI-NEXT: v_mov_b32_e32 v32, v56 -; SI-NEXT: v_mov_b32_e32 v33, v47 -; SI-NEXT: v_mov_b32_e32 v27, v46 +; SI-NEXT: v_mov_b32_e32 v56, v40 +; SI-NEXT: v_mov_b32_e32 v40, v38 +; SI-NEXT: v_mov_b32_e32 v38, v31 +; SI-NEXT: v_mov_b32_e32 v43, v25 +; SI-NEXT: v_mov_b32_e32 v44, v27 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: v_mov_b32_e32 v45, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v26, v36 +; SI-NEXT: v_mov_b32_e32 v36, v49 +; SI-NEXT: v_mov_b32_e32 v49, v41 +; SI-NEXT: v_mov_b32_e32 v41, v57 +; SI-NEXT: v_mov_b32_e32 v57, v55 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v34 +; SI-NEXT: v_mov_b32_e32 v34, v39 +; SI-NEXT: v_mov_b32_e32 v39, v51 +; SI-NEXT: v_mov_b32_e32 v51, v46 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v34, v43 +; SI-NEXT: v_mov_b32_e32 v45, v53 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v25, v43 +; SI-NEXT: v_mov_b32_e32 v31, v38 +; SI-NEXT: v_mov_b32_e32 v38, v40 +; SI-NEXT: v_mov_b32_e32 v40, v56 +; SI-NEXT: v_mov_b32_e32 v56, v32 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v24, v37 +; SI-NEXT: v_mov_b32_e32 v37, v50 +; SI-NEXT: v_mov_b32_e32 v50, v42 +; SI-NEXT: v_mov_b32_e32 v28, v35 +; SI-NEXT: v_mov_b32_e32 v35, v48 +; SI-NEXT: v_mov_b32_e32 v48, v52 +; SI-NEXT: v_mov_b32_e32 v52, v47 ; SI-NEXT: s_branch .LBB15_2 ; ; VI-LABEL: bitcast_v48i16_to_v24i32_scalar: @@ -12563,180 +12573,186 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; SI-NEXT: v_mov_b32_e32 v26, s16 +; SI-NEXT: v_mov_b32_e32 v23, s16 ; SI-NEXT: v_mov_b32_e32 v24, s17 -; SI-NEXT: v_mov_b32_e32 v23, s18 +; SI-NEXT: v_mov_b32_e32 v21, s18 ; SI-NEXT: v_mov_b32_e32 v22, s19 -; SI-NEXT: v_mov_b32_e32 v20, s20 -; SI-NEXT: v_mov_b32_e32 v19, s21 -; SI-NEXT: v_mov_b32_e32 v18, s22 -; SI-NEXT: v_mov_b32_e32 v15, s23 -; SI-NEXT: v_mov_b32_e32 v17, s24 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v19, s20 +; SI-NEXT: v_mov_b32_e32 v20, s21 +; SI-NEXT: v_mov_b32_e32 v17, s22 +; SI-NEXT: v_mov_b32_e32 v18, s23 +; SI-NEXT: v_mov_b32_e32 v15, s24 ; SI-NEXT: v_mov_b32_e32 v16, s25 -; SI-NEXT: v_mov_b32_e32 v14, s26 -; SI-NEXT: v_mov_b32_e32 v13, s27 -; SI-NEXT: v_mov_b32_e32 v12, s28 -; SI-NEXT: v_mov_b32_e32 v11, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s26 +; SI-NEXT: v_mov_b32_e32 v14, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v11, s28 +; SI-NEXT: v_mov_b32_e32 v12, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v21, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v25, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v34, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v16, v17, 16 -; SI-NEXT: v_alignbit_b32 v38, v15, v18, 16 -; SI-NEXT: v_alignbit_b32 v48, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v51, v22, v23, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v26, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 ; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 ; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 ; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 ; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_alignbit_b32 v21, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v25, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v34, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v16, v17, 16 -; SI-NEXT: v_alignbit_b32 v38, v15, v18, 16 -; SI-NEXT: v_alignbit_b32 v48, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v51, v22, v23, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v26, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_or_b32_e32 v26, v26, v53 -; SI-NEXT: buffer_store_dword v26, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v40 -; SI-NEXT: v_or_b32_e32 v24, v24, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v24, v26, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v37 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v31 +; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v51 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v42 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v55 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v36 +; SI-NEXT: v_or_b32_e32 v21, v21, v23 +; SI-NEXT: v_add_i32_e32 v23, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v48 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v41 +; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v35 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 +; SI-NEXT: v_add_i32_e32 v21, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v40 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v52 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v54 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v34 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v30 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v53 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v31 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v39 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen @@ -12748,7 +12764,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -12760,7 +12776,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -12772,62 +12788,64 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v24f32_to_v48i16_scalar: @@ -14442,88 +14460,88 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v2 +; SI-NEXT: v_mov_b32_e32 v56, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_mov_b32_e32 v38, v16 -; SI-NEXT: v_mov_b32_e32 v39, v14 -; SI-NEXT: v_mov_b32_e32 v48, v12 -; SI-NEXT: v_mov_b32_e32 v49, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v13 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:4 +; SI-NEXT: v_mov_b32_e32 v31, v22 +; SI-NEXT: v_mov_b32_e32 v34, v20 +; SI-NEXT: v_mov_b32_e32 v35, v18 +; SI-NEXT: v_mov_b32_e32 v36, v16 +; SI-NEXT: v_mov_b32_e32 v37, v14 +; SI-NEXT: v_mov_b32_e32 v38, v12 +; SI-NEXT: v_mov_b32_e32 v39, v10 +; SI-NEXT: v_mov_b32_e32 v48, v8 +; SI-NEXT: v_mov_b32_e32 v50, v6 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v13 ; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4 ; SI-NEXT: s_cbranch_scc0 .LBB31_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v12, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v7, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v9, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v10, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v13, v0, v61 +; SI-NEXT: v_or_b32_e32 v11, v0, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_or_b32_e32 v12, v0, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v15, v0, v44 +; SI-NEXT: v_or_b32_e32 v13, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v15, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v16, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v16, v0, v58 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v17, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v17, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v33 +; SI-NEXT: v_or_b32_e32 v19, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v27 +; SI-NEXT: v_or_b32_e32 v20, v0, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff @@ -14533,13 +14551,13 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v22, v0, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v22, v0, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v58 -; SI-NEXT: v_or_b32_e32 v23, v0, v34 +; SI-NEXT: v_or_b32_e32 v8, v1, v57 +; SI-NEXT: v_or_b32_e32 v23, v0, v25 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -14549,60 +14567,60 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB31_3 ; SI-NEXT: .LBB31_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v41, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v33, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 @@ -14633,17 +14651,17 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v25, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -14651,7 +14669,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v34, v0 +; SI-NEXT: v_or_b32_e32 v0, v25, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -14681,66 +14699,64 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB31_4: -; SI-NEXT: v_mov_b32_e32 v43, v34 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v25 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: v_mov_b32_e32 v46, v27 -; SI-NEXT: v_mov_b32_e32 v47, v33 -; SI-NEXT: v_mov_b32_e32 v56, v32 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v39 -; SI-NEXT: v_mov_b32_e32 v39, v37 -; SI-NEXT: v_mov_b32_e32 v37, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v48 -; SI-NEXT: v_mov_b32_e32 v48, v38 -; SI-NEXT: v_mov_b32_e32 v38, v36 -; SI-NEXT: v_mov_b32_e32 v36, v24 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v42, v41 -; SI-NEXT: v_mov_b32_e32 v41, v57 -; SI-NEXT: v_mov_b32_e32 v57, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v37 -; SI-NEXT: v_mov_b32_e32 v37, v39 -; SI-NEXT: v_mov_b32_e32 v39, v49 -; SI-NEXT: v_mov_b32_e32 v49, v40 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v25, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v31, v57 +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v46, v51 +; SI-NEXT: v_mov_b32_e32 v51, v39 +; SI-NEXT: v_mov_b32_e32 v39, v34 +; SI-NEXT: v_mov_b32_e32 v34, v30 +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v47, v52 +; SI-NEXT: v_mov_b32_e32 v52, v48 +; SI-NEXT: v_mov_b32_e32 v48, v35 +; SI-NEXT: v_mov_b32_e32 v35, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v57 ; SI-NEXT: v_mov_b32_e32 v57, v41 -; SI-NEXT: v_mov_b32_e32 v41, v42 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v24, v36 -; SI-NEXT: v_mov_b32_e32 v36, v38 -; SI-NEXT: v_mov_b32_e32 v38, v48 -; SI-NEXT: v_mov_b32_e32 v48, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v32 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v41, v49 +; SI-NEXT: v_mov_b32_e32 v49, v36 +; SI-NEXT: v_mov_b32_e32 v36, v26 +; SI-NEXT: v_mov_b32_e32 v42, v50 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v37, v24 +; SI-NEXT: v_mov_b32_e32 v33, v32 ; SI-NEXT: v_mov_b32_e32 v32, v56 -; SI-NEXT: v_mov_b32_e32 v33, v47 -; SI-NEXT: v_mov_b32_e32 v27, v46 +; SI-NEXT: v_mov_b32_e32 v56, v40 +; SI-NEXT: v_mov_b32_e32 v40, v38 +; SI-NEXT: v_mov_b32_e32 v38, v31 +; SI-NEXT: v_mov_b32_e32 v43, v25 +; SI-NEXT: v_mov_b32_e32 v44, v27 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: v_mov_b32_e32 v45, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v26, v36 +; SI-NEXT: v_mov_b32_e32 v36, v49 +; SI-NEXT: v_mov_b32_e32 v49, v41 +; SI-NEXT: v_mov_b32_e32 v41, v57 +; SI-NEXT: v_mov_b32_e32 v57, v55 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v34 +; SI-NEXT: v_mov_b32_e32 v34, v39 +; SI-NEXT: v_mov_b32_e32 v39, v51 +; SI-NEXT: v_mov_b32_e32 v51, v46 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v34, v43 +; SI-NEXT: v_mov_b32_e32 v45, v53 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v25, v43 +; SI-NEXT: v_mov_b32_e32 v31, v38 +; SI-NEXT: v_mov_b32_e32 v38, v40 +; SI-NEXT: v_mov_b32_e32 v40, v56 +; SI-NEXT: v_mov_b32_e32 v56, v32 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v24, v37 +; SI-NEXT: v_mov_b32_e32 v37, v50 +; SI-NEXT: v_mov_b32_e32 v50, v42 +; SI-NEXT: v_mov_b32_e32 v28, v35 +; SI-NEXT: v_mov_b32_e32 v35, v48 +; SI-NEXT: v_mov_b32_e32 v48, v52 +; SI-NEXT: v_mov_b32_e32 v52, v47 ; SI-NEXT: s_branch .LBB31_2 ; ; VI-LABEL: bitcast_v48i16_to_v24f32_scalar: @@ -21132,289 +21148,301 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v12i64_to_v48i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v12, s30, 0 +; SI-NEXT: v_writelane_b32 v12, s31, 1 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; SI-NEXT: v_readfirstlane_b32 s15, v1 -; SI-NEXT: v_readfirstlane_b32 s14, v2 -; SI-NEXT: v_readfirstlane_b32 s13, v3 -; SI-NEXT: v_readfirstlane_b32 s12, v4 -; SI-NEXT: v_readfirstlane_b32 s11, v5 -; SI-NEXT: v_readfirstlane_b32 s10, v6 -; SI-NEXT: v_readfirstlane_b32 s9, v7 -; SI-NEXT: v_readfirstlane_b32 s8, v8 -; SI-NEXT: v_readfirstlane_b32 s7, v9 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v10 +; SI-NEXT: v_writelane_b32 v12, s34, 2 +; SI-NEXT: v_readfirstlane_b32 s12, v1 +; SI-NEXT: v_readfirstlane_b32 s13, v2 +; SI-NEXT: v_readfirstlane_b32 s10, v3 +; SI-NEXT: v_readfirstlane_b32 s11, v4 +; SI-NEXT: v_readfirstlane_b32 s8, v5 +; SI-NEXT: v_readfirstlane_b32 s9, v6 +; SI-NEXT: v_readfirstlane_b32 s6, v7 +; SI-NEXT: v_readfirstlane_b32 s7, v8 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_and_b64 s[14:15], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v10 +; SI-NEXT: v_writelane_b32 v12, s35, 3 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: buffer_store_dword v12, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: s_lshl_b32 s15, s76, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v1, s15 +; SI-NEXT: s_and_b32 s15, s17, 0xffff +; SI-NEXT: s_lshl_b32 s16, s35, 16 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_lshl_b32 s15, s74, 16 +; SI-NEXT: s_and_b32 s16, s18, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v3, s15 +; SI-NEXT: s_and_b32 s15, s19, 0xffff +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s21, 0xffff +; SI-NEXT: s_lshl_b32 s16, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s22, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s23, 0xffff +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s24, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s25, 0xffff +; SI-NEXT: s_lshl_b32 s16, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s26, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s27, 0xffff +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s28, 0xffff +; SI-NEXT: s_lshl_b32 s16, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s29, 0xffff +; SI-NEXT: s_lshl_b32 s16, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s12, s12, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s35, v12, 3 +; SI-NEXT: v_readlane_b32 s34, v12, 2 +; SI-NEXT: v_readlane_b32 s31, v12, 1 +; SI-NEXT: v_readlane_b32 s30, v12, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr94 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v12i64_to_v48i16_scalar: @@ -23043,88 +23071,88 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v2 +; SI-NEXT: v_mov_b32_e32 v56, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_mov_b32_e32 v38, v16 -; SI-NEXT: v_mov_b32_e32 v39, v14 -; SI-NEXT: v_mov_b32_e32 v48, v12 -; SI-NEXT: v_mov_b32_e32 v49, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v13 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:4 +; SI-NEXT: v_mov_b32_e32 v31, v22 +; SI-NEXT: v_mov_b32_e32 v34, v20 +; SI-NEXT: v_mov_b32_e32 v35, v18 +; SI-NEXT: v_mov_b32_e32 v36, v16 +; SI-NEXT: v_mov_b32_e32 v37, v14 +; SI-NEXT: v_mov_b32_e32 v38, v12 +; SI-NEXT: v_mov_b32_e32 v39, v10 +; SI-NEXT: v_mov_b32_e32 v48, v8 +; SI-NEXT: v_mov_b32_e32 v50, v6 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v13 ; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4 ; SI-NEXT: s_cbranch_scc0 .LBB43_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v12, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v7, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v9, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v10, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v13, v0, v61 +; SI-NEXT: v_or_b32_e32 v11, v0, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_or_b32_e32 v12, v0, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v15, v0, v44 +; SI-NEXT: v_or_b32_e32 v13, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v15, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v16, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v16, v0, v58 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v17, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v17, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v33 +; SI-NEXT: v_or_b32_e32 v19, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v27 +; SI-NEXT: v_or_b32_e32 v20, v0, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff @@ -23134,13 +23162,13 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v22, v0, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v22, v0, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v58 -; SI-NEXT: v_or_b32_e32 v23, v0, v34 +; SI-NEXT: v_or_b32_e32 v8, v1, v57 +; SI-NEXT: v_or_b32_e32 v23, v0, v25 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -23150,60 +23178,60 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB43_3 ; SI-NEXT: .LBB43_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v41, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v33, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 @@ -23234,17 +23262,17 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v25, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -23252,7 +23280,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v34, v0 +; SI-NEXT: v_or_b32_e32 v0, v25, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -23282,66 +23310,64 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB43_4: -; SI-NEXT: v_mov_b32_e32 v43, v34 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v25 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: v_mov_b32_e32 v46, v27 -; SI-NEXT: v_mov_b32_e32 v47, v33 -; SI-NEXT: v_mov_b32_e32 v56, v32 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v39 -; SI-NEXT: v_mov_b32_e32 v39, v37 -; SI-NEXT: v_mov_b32_e32 v37, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v48 -; SI-NEXT: v_mov_b32_e32 v48, v38 -; SI-NEXT: v_mov_b32_e32 v38, v36 -; SI-NEXT: v_mov_b32_e32 v36, v24 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v42, v41 -; SI-NEXT: v_mov_b32_e32 v41, v57 -; SI-NEXT: v_mov_b32_e32 v57, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v37 -; SI-NEXT: v_mov_b32_e32 v37, v39 -; SI-NEXT: v_mov_b32_e32 v39, v49 -; SI-NEXT: v_mov_b32_e32 v49, v40 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v25, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v31, v57 +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v46, v51 +; SI-NEXT: v_mov_b32_e32 v51, v39 +; SI-NEXT: v_mov_b32_e32 v39, v34 +; SI-NEXT: v_mov_b32_e32 v34, v30 +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v47, v52 +; SI-NEXT: v_mov_b32_e32 v52, v48 +; SI-NEXT: v_mov_b32_e32 v48, v35 +; SI-NEXT: v_mov_b32_e32 v35, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v57 ; SI-NEXT: v_mov_b32_e32 v57, v41 -; SI-NEXT: v_mov_b32_e32 v41, v42 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v24, v36 -; SI-NEXT: v_mov_b32_e32 v36, v38 -; SI-NEXT: v_mov_b32_e32 v38, v48 -; SI-NEXT: v_mov_b32_e32 v48, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v32 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v41, v49 +; SI-NEXT: v_mov_b32_e32 v49, v36 +; SI-NEXT: v_mov_b32_e32 v36, v26 +; SI-NEXT: v_mov_b32_e32 v42, v50 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v37, v24 +; SI-NEXT: v_mov_b32_e32 v33, v32 ; SI-NEXT: v_mov_b32_e32 v32, v56 -; SI-NEXT: v_mov_b32_e32 v33, v47 -; SI-NEXT: v_mov_b32_e32 v27, v46 +; SI-NEXT: v_mov_b32_e32 v56, v40 +; SI-NEXT: v_mov_b32_e32 v40, v38 +; SI-NEXT: v_mov_b32_e32 v38, v31 +; SI-NEXT: v_mov_b32_e32 v43, v25 +; SI-NEXT: v_mov_b32_e32 v44, v27 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: v_mov_b32_e32 v45, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v26, v36 +; SI-NEXT: v_mov_b32_e32 v36, v49 +; SI-NEXT: v_mov_b32_e32 v49, v41 +; SI-NEXT: v_mov_b32_e32 v41, v57 +; SI-NEXT: v_mov_b32_e32 v57, v55 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v34 +; SI-NEXT: v_mov_b32_e32 v34, v39 +; SI-NEXT: v_mov_b32_e32 v39, v51 +; SI-NEXT: v_mov_b32_e32 v51, v46 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v34, v43 +; SI-NEXT: v_mov_b32_e32 v45, v53 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v25, v43 +; SI-NEXT: v_mov_b32_e32 v31, v38 +; SI-NEXT: v_mov_b32_e32 v38, v40 +; SI-NEXT: v_mov_b32_e32 v40, v56 +; SI-NEXT: v_mov_b32_e32 v56, v32 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v24, v37 +; SI-NEXT: v_mov_b32_e32 v37, v50 +; SI-NEXT: v_mov_b32_e32 v50, v42 +; SI-NEXT: v_mov_b32_e32 v28, v35 +; SI-NEXT: v_mov_b32_e32 v35, v48 +; SI-NEXT: v_mov_b32_e32 v48, v52 +; SI-NEXT: v_mov_b32_e32 v52, v47 ; SI-NEXT: s_branch .LBB43_2 ; ; VI-LABEL: bitcast_v48i16_to_v12i64_scalar: @@ -28937,153 +28963,159 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v11, s28 ; SI-NEXT: v_mov_b32_e32 v12, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v25, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v26, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v35, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v38, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v48, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v50, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_alignbit_b32 v25, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v26, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v35, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v38, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v48, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v50, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v37 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_or_b32_e32 v23, v23, v53 +; SI-NEXT: v_or_b32_e32 v23, v23, v31 ; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v42 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v36 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v41 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v35 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v40 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v54 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v30 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v53 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 ; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen @@ -29095,7 +29127,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29107,7 +29139,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29119,7 +29151,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29131,7 +29163,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29143,38 +29175,40 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v12f64_to_v48i16_scalar: @@ -30765,88 +30799,88 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v2 +; SI-NEXT: v_mov_b32_e32 v56, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_mov_b32_e32 v38, v16 -; SI-NEXT: v_mov_b32_e32 v39, v14 -; SI-NEXT: v_mov_b32_e32 v48, v12 -; SI-NEXT: v_mov_b32_e32 v49, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v13 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:4 +; SI-NEXT: v_mov_b32_e32 v31, v22 +; SI-NEXT: v_mov_b32_e32 v34, v20 +; SI-NEXT: v_mov_b32_e32 v35, v18 +; SI-NEXT: v_mov_b32_e32 v36, v16 +; SI-NEXT: v_mov_b32_e32 v37, v14 +; SI-NEXT: v_mov_b32_e32 v38, v12 +; SI-NEXT: v_mov_b32_e32 v39, v10 +; SI-NEXT: v_mov_b32_e32 v48, v8 +; SI-NEXT: v_mov_b32_e32 v50, v6 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v13 ; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v27 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4 ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v40 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v41 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v12, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v7, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v9, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v10, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v13, v0, v61 +; SI-NEXT: v_or_b32_e32 v11, v0, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_or_b32_e32 v12, v0, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v15, v0, v44 +; SI-NEXT: v_or_b32_e32 v13, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v14, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v15, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v16, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v16, v0, v58 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v17, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v17, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v33 +; SI-NEXT: v_or_b32_e32 v19, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v27 +; SI-NEXT: v_or_b32_e32 v20, v0, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff @@ -30856,13 +30890,13 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v22, v0, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v22, v0, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v58 -; SI-NEXT: v_or_b32_e32 v23, v0, v34 +; SI-NEXT: v_or_b32_e32 v8, v1, v57 +; SI-NEXT: v_or_b32_e32 v23, v0, v25 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -30872,60 +30906,60 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v0, v40, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v41, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v33, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 @@ -30956,17 +30990,17 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v25, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v58, v1 +; SI-NEXT: v_or_b32_e32 v1, v57, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -30974,7 +31008,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v34, v0 +; SI-NEXT: v_or_b32_e32 v0, v25, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -31004,66 +31038,64 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: v_mov_b32_e32 v43, v34 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v44, v25 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: v_mov_b32_e32 v46, v27 -; SI-NEXT: v_mov_b32_e32 v47, v33 -; SI-NEXT: v_mov_b32_e32 v56, v32 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, v49 -; SI-NEXT: v_mov_b32_e32 v49, v39 -; SI-NEXT: v_mov_b32_e32 v39, v37 -; SI-NEXT: v_mov_b32_e32 v37, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v48 -; SI-NEXT: v_mov_b32_e32 v48, v38 -; SI-NEXT: v_mov_b32_e32 v38, v36 -; SI-NEXT: v_mov_b32_e32 v36, v24 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v42, v41 -; SI-NEXT: v_mov_b32_e32 v41, v57 -; SI-NEXT: v_mov_b32_e32 v57, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v37 -; SI-NEXT: v_mov_b32_e32 v37, v39 -; SI-NEXT: v_mov_b32_e32 v39, v49 -; SI-NEXT: v_mov_b32_e32 v49, v40 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v25, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v31, v57 +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v46, v51 +; SI-NEXT: v_mov_b32_e32 v51, v39 +; SI-NEXT: v_mov_b32_e32 v39, v34 +; SI-NEXT: v_mov_b32_e32 v34, v30 +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v47, v52 +; SI-NEXT: v_mov_b32_e32 v52, v48 +; SI-NEXT: v_mov_b32_e32 v48, v35 +; SI-NEXT: v_mov_b32_e32 v35, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v57 ; SI-NEXT: v_mov_b32_e32 v57, v41 -; SI-NEXT: v_mov_b32_e32 v41, v42 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v24, v36 -; SI-NEXT: v_mov_b32_e32 v36, v38 -; SI-NEXT: v_mov_b32_e32 v38, v48 -; SI-NEXT: v_mov_b32_e32 v48, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v32 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v41, v49 +; SI-NEXT: v_mov_b32_e32 v49, v36 +; SI-NEXT: v_mov_b32_e32 v36, v26 +; SI-NEXT: v_mov_b32_e32 v42, v50 +; SI-NEXT: v_mov_b32_e32 v50, v37 +; SI-NEXT: v_mov_b32_e32 v37, v24 +; SI-NEXT: v_mov_b32_e32 v33, v32 ; SI-NEXT: v_mov_b32_e32 v32, v56 -; SI-NEXT: v_mov_b32_e32 v33, v47 -; SI-NEXT: v_mov_b32_e32 v27, v46 +; SI-NEXT: v_mov_b32_e32 v56, v40 +; SI-NEXT: v_mov_b32_e32 v40, v38 +; SI-NEXT: v_mov_b32_e32 v38, v31 +; SI-NEXT: v_mov_b32_e32 v43, v25 +; SI-NEXT: v_mov_b32_e32 v44, v27 +; SI-NEXT: v_mov_b32_e32 v53, v45 +; SI-NEXT: v_mov_b32_e32 v45, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v26, v36 +; SI-NEXT: v_mov_b32_e32 v36, v49 +; SI-NEXT: v_mov_b32_e32 v49, v41 +; SI-NEXT: v_mov_b32_e32 v41, v57 +; SI-NEXT: v_mov_b32_e32 v57, v55 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v34 +; SI-NEXT: v_mov_b32_e32 v34, v39 +; SI-NEXT: v_mov_b32_e32 v39, v51 +; SI-NEXT: v_mov_b32_e32 v51, v46 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v34, v43 +; SI-NEXT: v_mov_b32_e32 v45, v53 +; SI-NEXT: v_mov_b32_e32 v27, v44 +; SI-NEXT: v_mov_b32_e32 v25, v43 +; SI-NEXT: v_mov_b32_e32 v31, v38 +; SI-NEXT: v_mov_b32_e32 v38, v40 +; SI-NEXT: v_mov_b32_e32 v40, v56 +; SI-NEXT: v_mov_b32_e32 v56, v32 +; SI-NEXT: v_mov_b32_e32 v32, v33 +; SI-NEXT: v_mov_b32_e32 v24, v37 +; SI-NEXT: v_mov_b32_e32 v37, v50 +; SI-NEXT: v_mov_b32_e32 v50, v42 +; SI-NEXT: v_mov_b32_e32 v28, v35 +; SI-NEXT: v_mov_b32_e32 v35, v48 +; SI-NEXT: v_mov_b32_e32 v48, v52 +; SI-NEXT: v_mov_b32_e32 v52, v47 ; SI-NEXT: s_branch .LBB51_2 ; ; VI-LABEL: bitcast_v48i16_to_v12f64_scalar: @@ -38404,13 +38436,13 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v54, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v26, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cvt_f16_f32_e32 v14, v45 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v47 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v56 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v57 ; SI-NEXT: s_waitcnt vmcnt(13) @@ -38455,74 +38487,83 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: s_xor_b64 exec, exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB58_2 ; SI-NEXT: ; %bb.1: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v29, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v60 +; SI-NEXT: v_mov_b32_e32 v35, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v43 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v29 -; SI-NEXT: v_or_b32_e32 v60, v30, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v26 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v30 -; SI-NEXT: v_or_b32_e32 v58, v33, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v40 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_or_b32_e32 v60, v28, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v52 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v28 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 +; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v40 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v54 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v52 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v54 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_or_b32_e32 v52, v35, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v25 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v57 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v23 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v53 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v59 -; SI-NEXT: v_or_b32_e32 v57, v33, v23 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v34 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v31 +; SI-NEXT: v_or_b32_e32 v52, v32, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v35 +; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v25 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v46 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v33 +; SI-NEXT: v_or_b32_e32 v49, v31, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v57 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v61 +; SI-NEXT: v_or_b32_e32 v35, v31, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v23 +; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v56 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_or_b32_e32 v57, v32, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 ; SI-NEXT: v_or_b32_e32 v56, v24, v22 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v33 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v31 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 @@ -38544,7 +38585,7 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v32 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 @@ -38557,7 +38598,7 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v38 ; SI-NEXT: v_or_b32_e32 v59, v2, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 @@ -38580,121 +38621,115 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v36 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v36 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 ; SI-NEXT: v_or_b32_e32 v14, v14, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v58 ; SI-NEXT: v_or_b32_e32 v20, v20, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v18 ; SI-NEXT: v_or_b32_e32 v19, v19, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v16 ; SI-NEXT: v_or_b32_e32 v17, v17, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v15 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v35 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v37 -; SI-NEXT: v_or_b32_e32 v38, v33, v24 +; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v32 +; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v33 +; SI-NEXT: v_or_b32_e32 v38, v31, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v42 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v36 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_or_b32_e32 v37, v35, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v51 +; SI-NEXT: v_or_b32_e32 v37, v32, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v51 ; SI-NEXT: v_cvt_f16_f32_e32 v39, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v48 +; SI-NEXT: v_or_b32_e32 v58, v30, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 ; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 ; SI-NEXT: v_cvt_f16_f32_e32 v48, v24 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v32 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v39 -; SI-NEXT: v_or_b32_e32 v51, v33, v35 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v48 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v39 +; SI-NEXT: v_or_b32_e32 v51, v31, v32 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v48 ; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_or_b32_e32 v50, v24, v33 +; SI-NEXT: v_or_b32_e32 v50, v24, v31 ; SI-NEXT: v_or_b32_e32 v8, v8, v29 ; SI-NEXT: v_or_b32_e32 v7, v7, v30 ; SI-NEXT: v_or_b32_e32 v6, v6, v55 ; SI-NEXT: v_or_b32_e32 v21, v21, v45 -; SI-NEXT: v_or_b32_e32 v28, v28, v25 -; SI-NEXT: v_or_b32_e32 v27, v27, v46 -; SI-NEXT: v_alignbit_b32 v44, v50, v31, 16 -; SI-NEXT: v_alignbit_b32 v43, v51, v32, 16 +; SI-NEXT: v_alignbit_b32 v44, v50, v27, 16 +; SI-NEXT: v_alignbit_b32 v43, v51, v28, 16 ; SI-NEXT: v_alignbit_b32 v42, v37, v29, 16 +; SI-NEXT: v_mov_b32_e32 v29, v49 ; SI-NEXT: v_alignbit_b32 v41, v38, v30, 16 ; SI-NEXT: v_alignbit_b32 v40, v17, v55, 16 ; SI-NEXT: v_alignbit_b32 v55, v19, v45, 16 ; SI-NEXT: v_alignbit_b32 v54, v20, v26, 16 ; SI-NEXT: v_alignbit_b32 v26, v14, v25, 16 ; SI-NEXT: v_alignbit_b32 v25, v10, v46, 16 +; SI-NEXT: v_mov_b32_e32 v46, v35 ; SI-NEXT: v_alignbit_b32 v24, v11, v23, 16 ; SI-NEXT: v_alignbit_b32 v23, v5, v22, 16 ; SI-NEXT: v_alignbit_b32 v22, v59, v47, 16 ; SI-NEXT: .LBB58_2: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v44 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: buffer_store_dword v29, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v48 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v58 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v43 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v39 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v60 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v44 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v48 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v43 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v39 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v42 -; SI-NEXT: v_or_b32_e32 v8, v8, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v8, v29, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v42 +; SI-NEXT: v_or_b32_e32 v8, v8, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v8, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v36 -; SI-NEXT: v_or_b32_e32 v8, v8, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v8, v29, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v36 +; SI-NEXT: v_or_b32_e32 v8, v8, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v8, v27, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41 @@ -38744,7 +38779,7 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v28 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v29 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v26 ; SI-NEXT: v_or_b32_e32 v6, v6, v7 ; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 @@ -38756,7 +38791,7 @@ define <48 x i16> @bitcast_v48f16_to_v48i16(<48 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v27 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v46 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v25 ; SI-NEXT: v_or_b32_e32 v6, v6, v7 ; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 @@ -39282,428 +39317,464 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i ; SI-LABEL: bitcast_v48f16_to_v48i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:8 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:12 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:16 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v42, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v59, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v58, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v57, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v50, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s29 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v40 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v26, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v32 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v20 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v40, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v35 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v30 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v53 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v51 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v29 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v50 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v57 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v59 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v59, v3, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v62, v1, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 +; SI-NEXT: v_or_b32_e32 v57, v3, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v44 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 +; SI-NEXT: v_or_b32_e32 v1, v1, v21 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v35, v5, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v3, v3, v13 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v3, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v53 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_or_b32_e32 v33, v5, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v32 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v32, v1, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v30 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_or_b32_e32 v29, v3, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v39 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v28, v26, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v31 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_or_b32_e32 v27, v1, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v43 ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v25 +; SI-NEXT: v_or_b32_e32 v44, v26, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v43 +; SI-NEXT: v_or_b32_e32 v2, v2, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v40 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v41 +; SI-NEXT: v_or_b32_e32 v4, v4, v25 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v26 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v40 +; SI-NEXT: v_or_b32_e32 v6, v6, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v56 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v45 +; SI-NEXT: v_or_b32_e32 v8, v8, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v47 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v60 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v25 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_or_b32_e32 v2, v2, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v26 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v56 +; SI-NEXT: v_or_b32_e32 v10, v10, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v47 +; SI-NEXT: v_or_b32_e32 v12, v12, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v63 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v60 +; SI-NEXT: v_or_b32_e32 v14, v14, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_or_b32_e32 v5, v5, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v11, v11, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v61 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_or_b32_e32 v10, v10, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v14, v14, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v25 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_or_b32_e32 v35, v35, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v33 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_or_b32_e32 v34, v34, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v25 ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v63 +; SI-NEXT: v_or_b32_e32 v18, v18, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v61 +; SI-NEXT: v_or_b32_e32 v22, v22, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v42 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v26 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_or_b32_e32 v38, v38, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_or_b32_e32 v49, v49, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v17 -; SI-NEXT: v_or_b32_e32 v18, v18, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v19 -; SI-NEXT: v_or_b32_e32 v23, v23, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v41 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v40 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v53 -; SI-NEXT: v_or_b32_e32 v22, v22, v50 -; SI-NEXT: v_or_b32_e32 v25, v25, v30 -; SI-NEXT: v_or_b32_e32 v24, v24, v29 -; SI-NEXT: v_or_b32_e32 v21, v21, v41 -; SI-NEXT: v_or_b32_e32 v16, v16, v28 -; SI-NEXT: v_or_b32_e32 v48, v48, v54 -; SI-NEXT: v_or_b32_e32 v39, v39, v42 -; SI-NEXT: v_or_b32_e32 v32, v32, v52 -; SI-NEXT: v_or_b32_e32 v31, v31, v51 -; SI-NEXT: v_or_b32_e32 v15, v15, v43 -; SI-NEXT: v_or_b32_e32 v8, v8, v27 -; SI-NEXT: v_or_b32_e32 v7, v7, v26 -; SI-NEXT: v_or_b32_e32 v6, v6, v44 -; SI-NEXT: v_alignbit_b32 v40, v22, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v23, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, v18, v41, 16 -; SI-NEXT: v_alignbit_b32 v28, v49, v28, 16 -; SI-NEXT: v_alignbit_b32 v55, v38, v54, 16 -; SI-NEXT: v_alignbit_b32 v54, v34, v42, 16 -; SI-NEXT: v_alignbit_b32 v53, v35, v52, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v51, 16 -; SI-NEXT: v_alignbit_b32 v51, v10, v43, 16 -; SI-NEXT: v_alignbit_b32 v50, v11, v27, 16 -; SI-NEXT: v_alignbit_b32 v27, v5, v26, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v44, 16 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v25 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_or_b32_e32 v16, v16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v58 +; SI-NEXT: v_lshr_b64 v[50:51], v[15:16], 16 +; SI-NEXT: v_or_b32_e32 v20, v20, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v42 +; SI-NEXT: v_mov_b32_e32 v51, v29 +; SI-NEXT: v_lshr_b64 v[29:30], v[21:22], 16 +; SI-NEXT: v_or_b32_e32 v24, v24, v25 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 16 +; SI-NEXT: v_mov_b32_e32 v31, v44 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[54:55], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[13:14], 16 +; SI-NEXT: v_mov_b32_e32 v55, v35 +; SI-NEXT: v_mov_b32_e32 v53, v32 +; SI-NEXT: v_mov_b32_e32 v49, v28 +; SI-NEXT: v_mov_b32_e32 v39, v27 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_mov_b32_e32 v11, v33 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[3:4], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v25, v25, v40 -; SI-NEXT: v_or_b32_e32 v20, v22, v20 -; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v30 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v52 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v29 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v28 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v49 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v46 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v29 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v55 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v38 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v37 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v55 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v54 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v38 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v53 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v12, v16, v12 -; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v12, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v36 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v52 -; SI-NEXT: v_or_b32_e32 v12, v12, v16 -; SI-NEXT: v_add_i32_e32 v16, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v12, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v51 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v50 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v8, v3 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v3, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v26 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v40 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v41 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v31 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v25 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll index 97d040b545c09..29005a42d8860 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll @@ -3408,313 +3408,333 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v26i32_to_v52i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v14, s30, 0 +; SI-NEXT: v_writelane_b32 v14, s31, 1 +; SI-NEXT: v_writelane_b32 v14, s34, 2 +; SI-NEXT: v_writelane_b32 v14, s35, 3 +; SI-NEXT: v_writelane_b32 v14, s36, 4 +; SI-NEXT: v_writelane_b32 v14, s37, 5 +; SI-NEXT: v_writelane_b32 v14, s38, 6 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; SI-NEXT: v_readfirstlane_b32 s41, v1 -; SI-NEXT: v_readfirstlane_b32 s40, v2 -; SI-NEXT: v_readfirstlane_b32 s15, v3 -; SI-NEXT: v_readfirstlane_b32 s14, v4 -; SI-NEXT: v_readfirstlane_b32 s13, v5 -; SI-NEXT: v_readfirstlane_b32 s12, v6 -; SI-NEXT: v_readfirstlane_b32 s11, v7 -; SI-NEXT: v_readfirstlane_b32 s10, v8 -; SI-NEXT: v_readfirstlane_b32 s9, v9 -; SI-NEXT: v_readfirstlane_b32 s8, v10 -; SI-NEXT: v_readfirstlane_b32 s7, v11 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v12 +; SI-NEXT: v_writelane_b32 v14, s39, 7 +; SI-NEXT: v_readfirstlane_b32 s14, v1 +; SI-NEXT: v_readfirstlane_b32 s15, v2 +; SI-NEXT: v_readfirstlane_b32 s12, v3 +; SI-NEXT: v_readfirstlane_b32 s13, v4 +; SI-NEXT: v_readfirstlane_b32 s10, v5 +; SI-NEXT: v_readfirstlane_b32 s11, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v7 +; SI-NEXT: v_readfirstlane_b32 s9, v8 +; SI-NEXT: v_readfirstlane_b32 s6, v9 +; SI-NEXT: v_readfirstlane_b32 s7, v10 +; SI-NEXT: v_readfirstlane_b32 s4, v11 +; SI-NEXT: s_and_b64 s[40:41], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v12 +; SI-NEXT: v_writelane_b32 v14, s48, 8 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 ; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s40, s40, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: s_lshl_b32 s41, s88, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s41 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s78, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s48, v14, 8 +; SI-NEXT: v_readlane_b32 s39, v14, 7 +; SI-NEXT: v_readlane_b32 s38, v14, 6 +; SI-NEXT: v_readlane_b32 s37, v14, 5 +; SI-NEXT: v_readlane_b32 s36, v14, 4 +; SI-NEXT: v_readlane_b32 s35, v14, 3 +; SI-NEXT: v_readlane_b32 s34, v14, 2 +; SI-NEXT: v_readlane_b32 s31, v14, 1 +; SI-NEXT: v_readlane_b32 s30, v14, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr37 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v26i32_to_v52i16_scalar: @@ -5490,116 +5510,119 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: v_mov_b32_e32 v47, v8 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v6 +; SI-NEXT: v_mov_b32_e32 v32, v4 +; SI-NEXT: v_mov_b32_e32 v34, v2 +; SI-NEXT: v_mov_b32_e32 v37, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_mov_b32_e32 v39, v12 -; SI-NEXT: v_mov_b32_e32 v48, v10 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v62, v30 +; SI-NEXT: v_mov_b32_e32 v30, v24 +; SI-NEXT: v_mov_b32_e32 v38, v22 +; SI-NEXT: v_mov_b32_e32 v39, v20 +; SI-NEXT: v_mov_b32_e32 v48, v18 +; SI-NEXT: v_mov_b32_e32 v49, v16 +; SI-NEXT: v_mov_b32_e32 v50, v14 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: v_mov_b32_e32 v41, v10 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v2 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v8 ; SI-NEXT: s_cbranch_scc0 .LBB15_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v42 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v9, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v10, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v11, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v12, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v13, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v14, v0, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_or_b32_e32 v15, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v12, v0, v56 +; SI-NEXT: v_or_b32_e32 v16, v0, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v13, v0, v47 +; SI-NEXT: v_or_b32_e32 v17, v0, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v14, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 -; SI-NEXT: v_or_b32_e32 v16, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v17, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_or_b32_e32 v18, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 +; SI-NEXT: v_or_b32_e32 v19, v0, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v31 +; SI-NEXT: v_or_b32_e32 v20, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 +; SI-NEXT: v_or_b32_e32 v21, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v29 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v22, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v23, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v24, v0, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v24, v0, v29 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v62 -; SI-NEXT: v_or_b32_e32 v25, v0, v32 +; SI-NEXT: v_or_b32_e32 v8, v1, v63 +; SI-NEXT: v_or_b32_e32 v25, v0, v27 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -5609,72 +5632,74 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB15_3 ; SI-NEXT: .LBB15_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_or_b32_e32 v0, v31, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v61, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 @@ -5684,13 +5709,13 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 @@ -5700,17 +5725,17 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -5718,7 +5743,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -5748,85 +5773,87 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB15_4: -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v47, v43 +; SI-NEXT: v_mov_b32_e32 v43, v50 +; SI-NEXT: v_mov_b32_e32 v50, v38 +; SI-NEXT: v_mov_b32_e32 v38, v62 +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v62, v56 +; SI-NEXT: v_mov_b32_e32 v56, v44 +; SI-NEXT: v_mov_b32_e32 v44, v40 +; SI-NEXT: v_mov_b32_e32 v40, v39 +; SI-NEXT: v_mov_b32_e32 v39, v28 +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v59, v45 +; SI-NEXT: v_mov_b32_e32 v45, v41 +; SI-NEXT: v_mov_b32_e32 v41, v48 +; SI-NEXT: v_mov_b32_e32 v48, v26 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, v52 +; SI-NEXT: v_mov_b32_e32 v52, v46 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mov_b32_e32 v46, v42 +; SI-NEXT: v_mov_b32_e32 v42, v49 +; SI-NEXT: v_mov_b32_e32 v49, v30 +; SI-NEXT: v_mov_b32_e32 v61, v63 ; SI-NEXT: v_mov_b32_e32 v63, v57 -; SI-NEXT: v_mov_b32_e32 v57, v32 -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v50 -; SI-NEXT: v_mov_b32_e32 v50, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v40 -; SI-NEXT: v_mov_b32_e32 v40, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v41, v49 -; SI-NEXT: v_mov_b32_e32 v49, v38 -; SI-NEXT: v_mov_b32_e32 v38, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v47, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v43 -; SI-NEXT: v_mov_b32_e32 v45, v58 -; SI-NEXT: v_mov_b32_e32 v58, v27 -; SI-NEXT: v_mov_b32_e32 v44, v60 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_mov_b32_e32 v43, v62 -; SI-NEXT: v_mov_b32_e32 v62, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v62 -; SI-NEXT: v_mov_b32_e32 v62, v43 -; SI-NEXT: v_mov_b32_e32 v29, v60 -; SI-NEXT: v_mov_b32_e32 v60, v44 -; SI-NEXT: v_mov_b32_e32 v27, v58 -; SI-NEXT: v_mov_b32_e32 v58, v45 -; SI-NEXT: v_mov_b32_e32 v43, v46 -; SI-NEXT: v_mov_b32_e32 v44, v47 -; SI-NEXT: v_mov_b32_e32 v45, v56 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v38 -; SI-NEXT: v_mov_b32_e32 v38, v49 -; SI-NEXT: v_mov_b32_e32 v49, v41 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v40 -; SI-NEXT: v_mov_b32_e32 v40, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 +; SI-NEXT: v_mov_b32_e32 v57, v27 +; SI-NEXT: v_mov_b32_e32 v53, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v55 ; SI-NEXT: v_mov_b32_e32 v55, v32 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v50 -; SI-NEXT: v_mov_b32_e32 v50, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v32, v57 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v29, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v55 +; SI-NEXT: v_mov_b32_e32 v55, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v53 +; SI-NEXT: v_mov_b32_e32 v27, v57 ; SI-NEXT: v_mov_b32_e32 v57, v63 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v63, v61 +; SI-NEXT: v_mov_b32_e32 v30, v49 +; SI-NEXT: v_mov_b32_e32 v49, v42 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v52 +; SI-NEXT: v_mov_b32_e32 v52, v60 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v26, v48 +; SI-NEXT: v_mov_b32_e32 v48, v41 +; SI-NEXT: v_mov_b32_e32 v41, v45 +; SI-NEXT: v_mov_b32_e32 v45, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v39 +; SI-NEXT: v_mov_b32_e32 v39, v40 +; SI-NEXT: v_mov_b32_e32 v40, v44 +; SI-NEXT: v_mov_b32_e32 v44, v56 +; SI-NEXT: v_mov_b32_e32 v56, v62 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v47 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB15_2 ; ; VI-LABEL: bitcast_v52i16_to_v26i32_scalar: @@ -13639,211 +13666,217 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; SI-NEXT: v_mov_b32_e32 v28, s16 -; SI-NEXT: v_mov_b32_e32 v27, s17 -; SI-NEXT: v_mov_b32_e32 v25, s18 +; SI-NEXT: v_mov_b32_e32 v25, s16 +; SI-NEXT: v_mov_b32_e32 v26, s17 +; SI-NEXT: v_mov_b32_e32 v23, s18 ; SI-NEXT: v_mov_b32_e32 v24, s19 -; SI-NEXT: v_mov_b32_e32 v21, s20 -; SI-NEXT: v_mov_b32_e32 v19, s21 -; SI-NEXT: v_mov_b32_e32 v22, s22 +; SI-NEXT: v_mov_b32_e32 v19, s20 +; SI-NEXT: v_mov_b32_e32 v20, s21 +; SI-NEXT: v_mov_b32_e32 v21, s22 +; SI-NEXT: v_mov_b32_e32 v22, s23 +; SI-NEXT: v_mov_b32_e32 v17, s24 +; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v15, s26 +; SI-NEXT: v_mov_b32_e32 v16, s27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v20, s23 -; SI-NEXT: v_mov_b32_e32 v18, s24 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v16, s26 -; SI-NEXT: v_mov_b32_e32 v15, s27 -; SI-NEXT: v_mov_b32_e32 v14, s28 -; SI-NEXT: v_mov_b32_e32 v13, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s28 +; SI-NEXT: v_mov_b32_e32 v14, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v23, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v35, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v37, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v48, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v50, v20, v22, 16 -; SI-NEXT: v_alignbit_b32 v52, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v25, 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v27, v28, 16 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 ; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 +; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 ; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_alignbit_b32 v23, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v35, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v37, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v48, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v50, v20, v22, 16 -; SI-NEXT: v_alignbit_b32 v52, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v25, 16 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v27, v28, 16 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_or_b32_e32 v28, v28, v40 -; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v44 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v49 ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v35 +; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v54 -; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v47 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v43 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v52 -; SI-NEXT: v_or_b32_e32 v21, v21, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v21, v24, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v46 +; SI-NEXT: v_or_b32_e32 v23, v23, v24 +; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v42 -; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v50 -; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v38 +; SI-NEXT: v_or_b32_e32 v19, v19, v23 +; SI-NEXT: v_add_i32_e32 v23, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v19, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 +; SI-NEXT: v_or_b32_e32 v19, v19, v20 +; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v37 +; SI-NEXT: v_or_b32_e32 v19, v19, v20 +; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v44 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v43 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v37 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v53 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v42 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v35 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v36 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v51 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 ; SI-NEXT: v_or_b32_e32 v1, v1, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -13855,7 +13888,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -13867,7 +13900,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -13879,68 +13912,71 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v26f32_to_v52i16_scalar: @@ -15715,116 +15751,119 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: v_mov_b32_e32 v47, v8 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v6 +; SI-NEXT: v_mov_b32_e32 v32, v4 +; SI-NEXT: v_mov_b32_e32 v34, v2 +; SI-NEXT: v_mov_b32_e32 v37, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_mov_b32_e32 v39, v12 -; SI-NEXT: v_mov_b32_e32 v48, v10 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v62, v30 +; SI-NEXT: v_mov_b32_e32 v30, v24 +; SI-NEXT: v_mov_b32_e32 v38, v22 +; SI-NEXT: v_mov_b32_e32 v39, v20 +; SI-NEXT: v_mov_b32_e32 v48, v18 +; SI-NEXT: v_mov_b32_e32 v49, v16 +; SI-NEXT: v_mov_b32_e32 v50, v14 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: v_mov_b32_e32 v41, v10 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v2 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v8 ; SI-NEXT: s_cbranch_scc0 .LBB31_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v42 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v9, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v10, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v11, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v12, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v13, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v14, v0, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_or_b32_e32 v15, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v12, v0, v56 +; SI-NEXT: v_or_b32_e32 v16, v0, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v13, v0, v47 +; SI-NEXT: v_or_b32_e32 v17, v0, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v14, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 -; SI-NEXT: v_or_b32_e32 v16, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v17, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_or_b32_e32 v18, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 +; SI-NEXT: v_or_b32_e32 v19, v0, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v31 +; SI-NEXT: v_or_b32_e32 v20, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 +; SI-NEXT: v_or_b32_e32 v21, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v29 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v22, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v23, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v24, v0, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v24, v0, v29 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v62 -; SI-NEXT: v_or_b32_e32 v25, v0, v32 +; SI-NEXT: v_or_b32_e32 v8, v1, v63 +; SI-NEXT: v_or_b32_e32 v25, v0, v27 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -15834,72 +15873,74 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB31_3 ; SI-NEXT: .LBB31_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_or_b32_e32 v0, v31, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v61, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 @@ -15909,13 +15950,13 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 @@ -15925,17 +15966,17 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -15943,7 +15984,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -15973,85 +16014,87 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB31_4: -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v47, v43 +; SI-NEXT: v_mov_b32_e32 v43, v50 +; SI-NEXT: v_mov_b32_e32 v50, v38 +; SI-NEXT: v_mov_b32_e32 v38, v62 +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v62, v56 +; SI-NEXT: v_mov_b32_e32 v56, v44 +; SI-NEXT: v_mov_b32_e32 v44, v40 +; SI-NEXT: v_mov_b32_e32 v40, v39 +; SI-NEXT: v_mov_b32_e32 v39, v28 +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v59, v45 +; SI-NEXT: v_mov_b32_e32 v45, v41 +; SI-NEXT: v_mov_b32_e32 v41, v48 +; SI-NEXT: v_mov_b32_e32 v48, v26 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, v52 +; SI-NEXT: v_mov_b32_e32 v52, v46 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mov_b32_e32 v46, v42 +; SI-NEXT: v_mov_b32_e32 v42, v49 +; SI-NEXT: v_mov_b32_e32 v49, v30 +; SI-NEXT: v_mov_b32_e32 v61, v63 ; SI-NEXT: v_mov_b32_e32 v63, v57 -; SI-NEXT: v_mov_b32_e32 v57, v32 -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v50 -; SI-NEXT: v_mov_b32_e32 v50, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v40 -; SI-NEXT: v_mov_b32_e32 v40, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v41, v49 -; SI-NEXT: v_mov_b32_e32 v49, v38 -; SI-NEXT: v_mov_b32_e32 v38, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v47, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v43 -; SI-NEXT: v_mov_b32_e32 v45, v58 -; SI-NEXT: v_mov_b32_e32 v58, v27 -; SI-NEXT: v_mov_b32_e32 v44, v60 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_mov_b32_e32 v43, v62 -; SI-NEXT: v_mov_b32_e32 v62, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v62 -; SI-NEXT: v_mov_b32_e32 v62, v43 -; SI-NEXT: v_mov_b32_e32 v29, v60 -; SI-NEXT: v_mov_b32_e32 v60, v44 -; SI-NEXT: v_mov_b32_e32 v27, v58 -; SI-NEXT: v_mov_b32_e32 v58, v45 -; SI-NEXT: v_mov_b32_e32 v43, v46 -; SI-NEXT: v_mov_b32_e32 v44, v47 -; SI-NEXT: v_mov_b32_e32 v45, v56 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v38 -; SI-NEXT: v_mov_b32_e32 v38, v49 -; SI-NEXT: v_mov_b32_e32 v49, v41 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v40 -; SI-NEXT: v_mov_b32_e32 v40, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 +; SI-NEXT: v_mov_b32_e32 v57, v27 +; SI-NEXT: v_mov_b32_e32 v53, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v55 ; SI-NEXT: v_mov_b32_e32 v55, v32 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v50 -; SI-NEXT: v_mov_b32_e32 v50, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v32, v57 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v29, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v55 +; SI-NEXT: v_mov_b32_e32 v55, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v53 +; SI-NEXT: v_mov_b32_e32 v27, v57 ; SI-NEXT: v_mov_b32_e32 v57, v63 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v63, v61 +; SI-NEXT: v_mov_b32_e32 v30, v49 +; SI-NEXT: v_mov_b32_e32 v49, v42 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v52 +; SI-NEXT: v_mov_b32_e32 v52, v60 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v26, v48 +; SI-NEXT: v_mov_b32_e32 v48, v41 +; SI-NEXT: v_mov_b32_e32 v41, v45 +; SI-NEXT: v_mov_b32_e32 v45, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v39 +; SI-NEXT: v_mov_b32_e32 v39, v40 +; SI-NEXT: v_mov_b32_e32 v40, v44 +; SI-NEXT: v_mov_b32_e32 v44, v56 +; SI-NEXT: v_mov_b32_e32 v56, v62 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v47 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB31_2 ; ; VI-LABEL: bitcast_v52i16_to_v26f32_scalar: @@ -23059,313 +23102,333 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v13i64_to_v52i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v14, s30, 0 +; SI-NEXT: v_writelane_b32 v14, s31, 1 +; SI-NEXT: v_writelane_b32 v14, s34, 2 +; SI-NEXT: v_writelane_b32 v14, s35, 3 +; SI-NEXT: v_writelane_b32 v14, s36, 4 +; SI-NEXT: v_writelane_b32 v14, s37, 5 +; SI-NEXT: v_writelane_b32 v14, s38, 6 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; SI-NEXT: v_readfirstlane_b32 s41, v1 -; SI-NEXT: v_readfirstlane_b32 s40, v2 -; SI-NEXT: v_readfirstlane_b32 s15, v3 -; SI-NEXT: v_readfirstlane_b32 s14, v4 -; SI-NEXT: v_readfirstlane_b32 s13, v5 -; SI-NEXT: v_readfirstlane_b32 s12, v6 -; SI-NEXT: v_readfirstlane_b32 s11, v7 -; SI-NEXT: v_readfirstlane_b32 s10, v8 -; SI-NEXT: v_readfirstlane_b32 s9, v9 -; SI-NEXT: v_readfirstlane_b32 s8, v10 -; SI-NEXT: v_readfirstlane_b32 s7, v11 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v12 +; SI-NEXT: v_writelane_b32 v14, s39, 7 +; SI-NEXT: v_readfirstlane_b32 s14, v1 +; SI-NEXT: v_readfirstlane_b32 s15, v2 +; SI-NEXT: v_readfirstlane_b32 s12, v3 +; SI-NEXT: v_readfirstlane_b32 s13, v4 +; SI-NEXT: v_readfirstlane_b32 s10, v5 +; SI-NEXT: v_readfirstlane_b32 s11, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v7 +; SI-NEXT: v_readfirstlane_b32 s9, v8 +; SI-NEXT: v_readfirstlane_b32 s6, v9 +; SI-NEXT: v_readfirstlane_b32 s7, v10 +; SI-NEXT: v_readfirstlane_b32 s4, v11 +; SI-NEXT: s_and_b64 s[40:41], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v12 +; SI-NEXT: v_writelane_b32 v14, s48, 8 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: s_lshl_b32 s41, s88, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s41 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s78, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s48, v14, 8 +; SI-NEXT: v_readlane_b32 s39, v14, 7 +; SI-NEXT: v_readlane_b32 s38, v14, 6 +; SI-NEXT: v_readlane_b32 s37, v14, 5 +; SI-NEXT: v_readlane_b32 s36, v14, 4 +; SI-NEXT: v_readlane_b32 s35, v14, 3 +; SI-NEXT: v_readlane_b32 s34, v14, 2 +; SI-NEXT: v_readlane_b32 s31, v14, 1 +; SI-NEXT: v_readlane_b32 s30, v14, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr37 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v13i64_to_v52i16_scalar: @@ -25141,116 +25204,119 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: v_mov_b32_e32 v47, v8 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v6 +; SI-NEXT: v_mov_b32_e32 v32, v4 +; SI-NEXT: v_mov_b32_e32 v34, v2 +; SI-NEXT: v_mov_b32_e32 v37, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_mov_b32_e32 v39, v12 -; SI-NEXT: v_mov_b32_e32 v48, v10 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v62, v30 +; SI-NEXT: v_mov_b32_e32 v30, v24 +; SI-NEXT: v_mov_b32_e32 v38, v22 +; SI-NEXT: v_mov_b32_e32 v39, v20 +; SI-NEXT: v_mov_b32_e32 v48, v18 +; SI-NEXT: v_mov_b32_e32 v49, v16 +; SI-NEXT: v_mov_b32_e32 v50, v14 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: v_mov_b32_e32 v41, v10 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v2 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v8 ; SI-NEXT: s_cbranch_scc0 .LBB43_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v42 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v9, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v10, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v11, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v12, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v13, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v14, v0, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_or_b32_e32 v15, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v12, v0, v56 +; SI-NEXT: v_or_b32_e32 v16, v0, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v13, v0, v47 +; SI-NEXT: v_or_b32_e32 v17, v0, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v14, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 -; SI-NEXT: v_or_b32_e32 v16, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v17, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_or_b32_e32 v18, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 +; SI-NEXT: v_or_b32_e32 v19, v0, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v31 +; SI-NEXT: v_or_b32_e32 v20, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 +; SI-NEXT: v_or_b32_e32 v21, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v29 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v22, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v23, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v24, v0, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v24, v0, v29 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v62 -; SI-NEXT: v_or_b32_e32 v25, v0, v32 +; SI-NEXT: v_or_b32_e32 v8, v1, v63 +; SI-NEXT: v_or_b32_e32 v25, v0, v27 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -25260,72 +25326,74 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB43_3 ; SI-NEXT: .LBB43_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_or_b32_e32 v0, v31, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v61, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 @@ -25335,13 +25403,13 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 @@ -25351,17 +25419,17 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -25369,7 +25437,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -25399,85 +25467,87 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB43_4: -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v47, v43 +; SI-NEXT: v_mov_b32_e32 v43, v50 +; SI-NEXT: v_mov_b32_e32 v50, v38 +; SI-NEXT: v_mov_b32_e32 v38, v62 +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v62, v56 +; SI-NEXT: v_mov_b32_e32 v56, v44 +; SI-NEXT: v_mov_b32_e32 v44, v40 +; SI-NEXT: v_mov_b32_e32 v40, v39 +; SI-NEXT: v_mov_b32_e32 v39, v28 +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v59, v45 +; SI-NEXT: v_mov_b32_e32 v45, v41 +; SI-NEXT: v_mov_b32_e32 v41, v48 +; SI-NEXT: v_mov_b32_e32 v48, v26 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, v52 +; SI-NEXT: v_mov_b32_e32 v52, v46 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mov_b32_e32 v46, v42 +; SI-NEXT: v_mov_b32_e32 v42, v49 +; SI-NEXT: v_mov_b32_e32 v49, v30 +; SI-NEXT: v_mov_b32_e32 v61, v63 ; SI-NEXT: v_mov_b32_e32 v63, v57 -; SI-NEXT: v_mov_b32_e32 v57, v32 -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v50 -; SI-NEXT: v_mov_b32_e32 v50, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v40 -; SI-NEXT: v_mov_b32_e32 v40, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v41, v49 -; SI-NEXT: v_mov_b32_e32 v49, v38 -; SI-NEXT: v_mov_b32_e32 v38, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v47, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v43 -; SI-NEXT: v_mov_b32_e32 v45, v58 -; SI-NEXT: v_mov_b32_e32 v58, v27 -; SI-NEXT: v_mov_b32_e32 v44, v60 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_mov_b32_e32 v43, v62 -; SI-NEXT: v_mov_b32_e32 v62, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v62 -; SI-NEXT: v_mov_b32_e32 v62, v43 -; SI-NEXT: v_mov_b32_e32 v29, v60 -; SI-NEXT: v_mov_b32_e32 v60, v44 -; SI-NEXT: v_mov_b32_e32 v27, v58 -; SI-NEXT: v_mov_b32_e32 v58, v45 -; SI-NEXT: v_mov_b32_e32 v43, v46 -; SI-NEXT: v_mov_b32_e32 v44, v47 -; SI-NEXT: v_mov_b32_e32 v45, v56 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v38 -; SI-NEXT: v_mov_b32_e32 v38, v49 -; SI-NEXT: v_mov_b32_e32 v49, v41 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v40 -; SI-NEXT: v_mov_b32_e32 v40, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 +; SI-NEXT: v_mov_b32_e32 v57, v27 +; SI-NEXT: v_mov_b32_e32 v53, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v55 ; SI-NEXT: v_mov_b32_e32 v55, v32 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v50 -; SI-NEXT: v_mov_b32_e32 v50, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v32, v57 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v29, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v55 +; SI-NEXT: v_mov_b32_e32 v55, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v53 +; SI-NEXT: v_mov_b32_e32 v27, v57 ; SI-NEXT: v_mov_b32_e32 v57, v63 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v63, v61 +; SI-NEXT: v_mov_b32_e32 v30, v49 +; SI-NEXT: v_mov_b32_e32 v49, v42 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v52 +; SI-NEXT: v_mov_b32_e32 v52, v60 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v26, v48 +; SI-NEXT: v_mov_b32_e32 v48, v41 +; SI-NEXT: v_mov_b32_e32 v41, v45 +; SI-NEXT: v_mov_b32_e32 v45, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v39 +; SI-NEXT: v_mov_b32_e32 v39, v40 +; SI-NEXT: v_mov_b32_e32 v40, v44 +; SI-NEXT: v_mov_b32_e32 v44, v56 +; SI-NEXT: v_mov_b32_e32 v56, v62 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v47 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB43_2 ; ; VI-LABEL: bitcast_v52i16_to_v13i64_scalar: @@ -31639,171 +31709,177 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v13, s28 ; SI-NEXT: v_mov_b32_e32 v14, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v28, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v32, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v34, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v37, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v39, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v23, 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v28, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v32, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v34, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v37, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v39, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v23, 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v49 ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_or_b32_e32 v25, v25, v40 +; SI-NEXT: v_or_b32_e32 v25, v25, v35 ; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v47 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v46 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v38 ; SI-NEXT: v_or_b32_e32 v19, v19, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v19, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v37 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v44 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v43 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v42 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v36 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen @@ -31815,7 +31891,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31827,7 +31903,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31839,7 +31915,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31851,7 +31927,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31863,7 +31939,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31875,44 +31951,47 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v13f64_to_v52i16_scalar: @@ -33661,116 +33740,119 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: v_mov_b32_e32 v47, v8 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v6 +; SI-NEXT: v_mov_b32_e32 v32, v4 +; SI-NEXT: v_mov_b32_e32 v34, v2 +; SI-NEXT: v_mov_b32_e32 v37, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_mov_b32_e32 v39, v12 -; SI-NEXT: v_mov_b32_e32 v48, v10 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v29 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v62, v30 +; SI-NEXT: v_mov_b32_e32 v30, v24 +; SI-NEXT: v_mov_b32_e32 v38, v22 +; SI-NEXT: v_mov_b32_e32 v39, v20 +; SI-NEXT: v_mov_b32_e32 v48, v18 +; SI-NEXT: v_mov_b32_e32 v49, v16 +; SI-NEXT: v_mov_b32_e32 v50, v14 +; SI-NEXT: v_mov_b32_e32 v40, v12 +; SI-NEXT: v_mov_b32_e32 v41, v10 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v2 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v8 ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v63 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v10, v0, v42 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 -; SI-NEXT: v_or_b32_e32 v11, v0, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v9, v0, v61 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v10, v0, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v11, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v12, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v13, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v14, v0, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_or_b32_e32 v15, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 -; SI-NEXT: v_or_b32_e32 v12, v0, v56 +; SI-NEXT: v_or_b32_e32 v16, v0, v54 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v13, v0, v47 +; SI-NEXT: v_or_b32_e32 v17, v0, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v14, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 -; SI-NEXT: v_or_b32_e32 v16, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v17, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_or_b32_e32 v18, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 +; SI-NEXT: v_or_b32_e32 v19, v0, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v20, v0, v31 +; SI-NEXT: v_or_b32_e32 v20, v0, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 +; SI-NEXT: v_or_b32_e32 v21, v0, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v29 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v22, v0, v45 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v23, v0, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v24, v0, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v24, v0, v29 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v8, v1, v62 -; SI-NEXT: v_or_b32_e32 v25, v0, v32 +; SI-NEXT: v_or_b32_e32 v8, v1, v63 +; SI-NEXT: v_or_b32_e32 v25, v0, v27 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -33780,72 +33862,74 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_or_b32_e32 v0, v31, v0 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_or_b32_e32 v0, v61, v0 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v42, v0 +; SI-NEXT: v_or_b32_e32 v0, v60, v0 ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v41, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v58, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v56, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 @@ -33855,13 +33939,13 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v51, v0 ; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: s_or_b32 s7, s8, s7 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 @@ -33871,17 +33955,17 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: v_or_b32_e32 v0, v27, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34 ; SI-NEXT: s_or_b32 s9, s10, s9 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v62, v1 +; SI-NEXT: v_or_b32_e32 v1, v63, v1 ; SI-NEXT: s_add_i32 s4, s4, 0x30000 ; SI-NEXT: s_add_i32 s5, s5, 0x30000 ; SI-NEXT: s_add_i32 s6, s6, 0x30000 @@ -33889,7 +33973,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_add_i32 s8, s8, 0x30000 ; SI-NEXT: s_add_i32 s9, s9, 0x30000 ; SI-NEXT: s_add_i32 s10, s10, 0x30000 -; SI-NEXT: v_or_b32_e32 v0, v32, v0 +; SI-NEXT: v_or_b32_e32 v0, v27, v0 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 @@ -33919,85 +34003,87 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v47, v43 +; SI-NEXT: v_mov_b32_e32 v43, v50 +; SI-NEXT: v_mov_b32_e32 v50, v38 +; SI-NEXT: v_mov_b32_e32 v38, v62 +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v62, v56 +; SI-NEXT: v_mov_b32_e32 v56, v44 +; SI-NEXT: v_mov_b32_e32 v44, v40 +; SI-NEXT: v_mov_b32_e32 v40, v39 +; SI-NEXT: v_mov_b32_e32 v39, v28 +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v59, v45 +; SI-NEXT: v_mov_b32_e32 v45, v41 +; SI-NEXT: v_mov_b32_e32 v41, v48 +; SI-NEXT: v_mov_b32_e32 v48, v26 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, v52 +; SI-NEXT: v_mov_b32_e32 v52, v46 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mov_b32_e32 v46, v42 +; SI-NEXT: v_mov_b32_e32 v42, v49 +; SI-NEXT: v_mov_b32_e32 v49, v30 +; SI-NEXT: v_mov_b32_e32 v61, v63 ; SI-NEXT: v_mov_b32_e32 v63, v57 -; SI-NEXT: v_mov_b32_e32 v57, v32 -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v42, v50 -; SI-NEXT: v_mov_b32_e32 v50, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: v_mov_b32_e32 v32, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v40 -; SI-NEXT: v_mov_b32_e32 v40, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) -; SI-NEXT: v_mov_b32_e32 v41, v49 -; SI-NEXT: v_mov_b32_e32 v49, v38 -; SI-NEXT: v_mov_b32_e32 v38, v35 -; SI-NEXT: v_mov_b32_e32 v35, v26 -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v47, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v46, v43 -; SI-NEXT: v_mov_b32_e32 v45, v58 -; SI-NEXT: v_mov_b32_e32 v58, v27 -; SI-NEXT: v_mov_b32_e32 v44, v60 -; SI-NEXT: v_mov_b32_e32 v60, v29 -; SI-NEXT: v_mov_b32_e32 v43, v62 -; SI-NEXT: v_mov_b32_e32 v62, v31 -; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v62 -; SI-NEXT: v_mov_b32_e32 v62, v43 -; SI-NEXT: v_mov_b32_e32 v29, v60 -; SI-NEXT: v_mov_b32_e32 v60, v44 -; SI-NEXT: v_mov_b32_e32 v27, v58 -; SI-NEXT: v_mov_b32_e32 v58, v45 -; SI-NEXT: v_mov_b32_e32 v43, v46 -; SI-NEXT: v_mov_b32_e32 v44, v47 -; SI-NEXT: v_mov_b32_e32 v45, v56 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v26, v35 -; SI-NEXT: v_mov_b32_e32 v35, v38 -; SI-NEXT: v_mov_b32_e32 v38, v49 -; SI-NEXT: v_mov_b32_e32 v49, v41 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v40 -; SI-NEXT: v_mov_b32_e32 v40, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 +; SI-NEXT: v_mov_b32_e32 v57, v27 +; SI-NEXT: v_mov_b32_e32 v53, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v55 ; SI-NEXT: v_mov_b32_e32 v55, v32 -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v50 -; SI-NEXT: v_mov_b32_e32 v50, v42 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v32, v57 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v29 +; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 +; SI-NEXT: v_mov_b32_e32 v29, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v55 +; SI-NEXT: v_mov_b32_e32 v55, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v53 +; SI-NEXT: v_mov_b32_e32 v27, v57 ; SI-NEXT: v_mov_b32_e32 v57, v63 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v63, v61 +; SI-NEXT: v_mov_b32_e32 v30, v49 +; SI-NEXT: v_mov_b32_e32 v49, v42 +; SI-NEXT: v_mov_b32_e32 v42, v46 +; SI-NEXT: v_mov_b32_e32 v46, v52 +; SI-NEXT: v_mov_b32_e32 v52, v60 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v26, v48 +; SI-NEXT: v_mov_b32_e32 v48, v41 +; SI-NEXT: v_mov_b32_e32 v41, v45 +; SI-NEXT: v_mov_b32_e32 v45, v59 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v39 +; SI-NEXT: v_mov_b32_e32 v39, v40 +; SI-NEXT: v_mov_b32_e32 v40, v44 +; SI-NEXT: v_mov_b32_e32 v44, v56 +; SI-NEXT: v_mov_b32_e32 v56, v62 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v62, v38 +; SI-NEXT: v_mov_b32_e32 v38, v50 +; SI-NEXT: v_mov_b32_e32 v50, v43 +; SI-NEXT: v_mov_b32_e32 v43, v47 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB51_2 ; ; VI-LABEL: bitcast_v52i16_to_v13f64_scalar: @@ -35773,11 +35859,12 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: s_lshr_b32 s40, s6, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v5, s40 ; SI-NEXT: s_lshr_b32 s40, s9, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v7, s40 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v63, s40 ; SI-NEXT: s_lshr_b32 s40, s8, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v9, s40 ; SI-NEXT: s_lshr_b32 s40, s11, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v11, s40 +; SI-NEXT: v_cvt_f32_f16_e32 v62, s40 ; SI-NEXT: s_lshr_b32 s40, s10, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v14, s40 ; SI-NEXT: s_lshr_b32 s40, s13, 16 @@ -35816,20 +35903,20 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v42, s40 ; SI-NEXT: s_lshr_b32 s40, s16, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v44, s40 -; SI-NEXT: v_cvt_f32_f16_e32 v46, s5 -; SI-NEXT: v_cvt_f32_f16_e32 v3, s4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, s7 -; SI-NEXT: v_cvt_f32_f16_e32 v4, s6 +; SI-NEXT: v_cvt_f32_f16_e32 v45, s5 +; SI-NEXT: v_cvt_f32_f16_e32 v17, s4 +; SI-NEXT: v_cvt_f32_f16_e32 v21, s7 +; SI-NEXT: v_cvt_f32_f16_e32 v25, s6 ; SI-NEXT: v_cvt_f32_f16_e32 v12, s9 -; SI-NEXT: v_cvt_f32_f16_e32 v45, s8 +; SI-NEXT: v_cvt_f32_f16_e32 v3, s8 ; SI-NEXT: v_cvt_f32_f16_e32 v15, s11 -; SI-NEXT: v_cvt_f32_f16_e32 v17, s10 +; SI-NEXT: v_cvt_f32_f16_e32 v7, s10 ; SI-NEXT: v_cvt_f32_f16_e32 v19, s13 -; SI-NEXT: v_cvt_f32_f16_e32 v21, s12 +; SI-NEXT: v_cvt_f32_f16_e32 v8, s12 ; SI-NEXT: v_cvt_f32_f16_e32 v23, s15 -; SI-NEXT: v_cvt_f32_f16_e32 v25, s14 +; SI-NEXT: v_cvt_f32_f16_e32 v4, s14 ; SI-NEXT: v_cvt_f32_f16_e32 v27, s29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, s28 +; SI-NEXT: v_cvt_f32_f16_e32 v11, s28 ; SI-NEXT: v_cvt_f32_f16_e32 v30, s27 ; SI-NEXT: v_cvt_f32_f16_e32 v32, s26 ; SI-NEXT: v_cvt_f32_f16_e32 v34, s25 @@ -35846,22 +35933,6 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: .LBB53_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[1:2], s[16:17], 1.0 ; SI-NEXT: v_add_f64 v[54:55], s[18:19], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f64 v[3:4], s[4:5], 1.0 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v2 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v55 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v42 ; SI-NEXT: v_add_f64 v[49:50], s[20:21], 1.0 ; SI-NEXT: v_add_f64 v[37:38], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[33:34], s[24:25], 1.0 @@ -35872,41 +35943,54 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: v_add_f64 v[14:15], s[10:11], 1.0 ; SI-NEXT: v_add_f64 v[11:12], s[8:9], 1.0 ; SI-NEXT: v_add_f64 v[7:8], s[6:7], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v49 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v50 +; SI-NEXT: v_add_f64 v[3:4], s[4:5], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v54 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v50 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v37 ; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v38 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v33 ; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v34 +; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v31 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v32 +; SI-NEXT: s_waitcnt expcnt(5) ; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v27 +; SI-NEXT: s_waitcnt expcnt(4) ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v23 +; SI-NEXT: s_waitcnt expcnt(3) ; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v18 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v19 +; SI-NEXT: s_waitcnt expcnt(2) ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v14 +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v12 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v7 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v3 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v32 ; SI-NEXT: v_cvt_f32_f16_e32 v32, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 @@ -35915,13 +35999,17 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v48, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v52, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v53, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v55, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v43, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v63, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v62 +; SI-NEXT: v_cvt_f32_f16_e32 v62, v62 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v61 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v60 @@ -35935,14 +36023,12 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v51 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v42, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v51, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v44 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v44, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v49, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 ; SI-NEXT: .LBB53_3: ; %end ; SI-NEXT: v_cvt_f16_f32_e32 v10, v44 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v43 @@ -36028,7 +36114,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: buffer_store_dword v10, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v10, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v11 ; SI-NEXT: v_add_i32_e32 v26, vcc, 48, v0 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: v_or_b32_e32 v10, v13, v10 @@ -36042,7 +36128,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: buffer_store_dword v10, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v10, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v4 ; SI-NEXT: v_add_i32_e32 v22, vcc, 56, v0 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: v_or_b32_e32 v10, v13, v10 @@ -36056,7 +36142,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: buffer_store_dword v10, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v10, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v8 ; SI-NEXT: v_add_i32_e32 v18, vcc, 64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: v_or_b32_e32 v10, v13, v10 @@ -36070,56 +36156,56 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: buffer_store_dword v10, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v10, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v7 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x48, v0 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: v_or_b32_e32 v10, v13, v10 ; SI-NEXT: buffer_store_dword v10, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v10, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v62 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v15 -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x4c, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v13, vcc, 0x4c, v0 ; SI-NEXT: v_or_b32_e32 v10, v11, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v10, v45 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x50, v0 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x50, v0 +; SI-NEXT: v_or_b32_e32 v3, v3, v9 +; SI-NEXT: buffer_store_dword v3, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v9, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v3, v63 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v12 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x54, v0 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v7, v3 +; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v3, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v25 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v21 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: buffer_store_dword v4, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x60, v0 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -36167,33 +36253,33 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a ; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr45 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: s_branch .LBB53_2 ; @@ -42201,23 +42287,22 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:88 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v45, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v51, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v4 ; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v44, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v52, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v10 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v12 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f16_f32_e32 v7, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v42, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v41, v18 @@ -42227,6 +42312,7 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v40, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v23 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v24 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v55, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v23, v27 @@ -42236,35 +42322,35 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cvt_f16_f32_e32 v18, v46 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v56 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v56 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v57 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v58 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v59 ; SI-NEXT: v_cvt_f16_f32_e32 v59, v60 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v61 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v62 -; SI-NEXT: v_cvt_f16_f32_e32 v61, v63 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v63 ; SI-NEXT: v_cvt_f16_f32_e32 v58, v31 ; SI-NEXT: s_waitcnt vmcnt(13) ; SI-NEXT: v_cvt_f16_f32_e32 v26, v36 ; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_cvt_f16_f32_e32 v9, v38 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v63, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v48 ; SI-NEXT: s_waitcnt vmcnt(10) ; SI-NEXT: v_cvt_f16_f32_e32 v36, v53 ; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_cvt_f16_f32_e32 v25, v54 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v53, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v32 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v48, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v33 ; SI-NEXT: s_waitcnt vmcnt(6) ; SI-NEXT: v_cvt_f16_f32_e32 v31, v34 ; SI-NEXT: s_waitcnt vmcnt(5) ; SI-NEXT: v_cvt_f16_f32_e32 v24, v35 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v54, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v37 ; SI-NEXT: s_waitcnt vmcnt(3) ; SI-NEXT: v_cvt_f16_f32_e32 v32, v39 ; SI-NEXT: s_waitcnt vmcnt(2) @@ -42276,38 +42362,46 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_mov_b32_e32 v22, v2 ; SI-NEXT: v_mov_b32_e32 v39, v3 ; SI-NEXT: v_mov_b32_e32 v49, v5 -; SI-NEXT: v_mov_b32_e32 v60, v7 -; SI-NEXT: v_mov_b32_e32 v62, v8 +; SI-NEXT: v_mov_b32_e32 v54, v7 +; SI-NEXT: v_mov_b32_e32 v61, v8 +; SI-NEXT: v_mov_b32_e32 v63, v4 ; SI-NEXT: s_xor_b64 exec, exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB58_2 ; SI-NEXT: ; %bb.1: ; %cmp.true ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v33, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 +; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v34 +; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v61 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v62 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 +; SI-NEXT: v_or_b32_e32 v63, v34, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v42 ; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v48 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v62 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v60 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 @@ -42319,13 +42413,13 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v63, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v8 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v61, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v7 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 @@ -42340,8 +42434,7 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 @@ -42355,94 +42448,82 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v33, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v34, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 +; SI-NEXT: v_or_b32_e32 v2, v33, v30 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v2, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v43 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v33, v2 -; SI-NEXT: v_or_b32_e32 v2, v34, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v43 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v33, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v42 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v34 -; SI-NEXT: v_or_b32_e32 v62, v35, v42 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v54 +; SI-NEXT: v_or_b32_e32 v61, v35, v42 ; SI-NEXT: v_cvt_f32_f16_e32 v35, v41 +; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v34 ; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v49 ; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v49 +; SI-NEXT: v_or_b32_e32 v54, v33, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v40 ; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v35 ; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_or_b32_e32 v60, v34, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v40 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: v_or_b32_e32 v49, v33, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v55 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v2, v48 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_or_b32_e32 v49, v34, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v39 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v34, v29 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v33 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v37 ; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v56 +; SI-NEXT: v_or_b32_e32 v39, v35, v55 ; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v39 +; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: v_or_b32_e32 v22, v22, v29 ; SI-NEXT: v_or_b32_e32 v37, v33, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v27 ; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v59 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 +; SI-NEXT: v_or_b32_e32 v35, v34, v27 ; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v26 ; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v33 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v58 ; SI-NEXT: v_or_b32_e32 v59, v28, v26 -; SI-NEXT: v_or_b32_e32 v39, v35, v55 -; SI-NEXT: v_or_b32_e32 v30, v30, v27 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v34 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v36 @@ -42461,18 +42542,22 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 ; SI-NEXT: v_or_b32_e32 v31, v25, v57 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v32 -; SI-NEXT: v_or_b32_e32 v54, v5, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v48 -; SI-NEXT: v_or_b32_e32 v53, v3, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v63 +; SI-NEXT: v_or_b32_e32 v53, v5, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v38 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v11, v2 +; SI-NEXT: v_or_b32_e32 v48, v3, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v62 ; SI-NEXT: v_or_b32_e32 v9, v9, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v61 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v60 ; SI-NEXT: v_or_b32_e32 v14, v14, v25 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v10 ; SI-NEXT: v_or_b32_e32 v12, v12, v25 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v17 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_or_b32_e32 v18, v18, v25 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 @@ -42480,8 +42565,8 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v20 ; SI-NEXT: v_or_b32_e32 v15, v15, v25 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v13 -; SI-NEXT: v_or_b32_e32 v4, v4, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v11 +; SI-NEXT: v_or_b32_e32 v11, v4, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 ; SI-NEXT: v_or_b32_e32 v6, v6, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v52 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v50 @@ -42497,10 +42582,10 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v52, v25, v33 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v50 ; SI-NEXT: v_or_b32_e32 v51, v28, v25 -; SI-NEXT: v_alignbit_b32 v45, v51, v38, 16 +; SI-NEXT: v_alignbit_b32 v45, v51, v30, 16 ; SI-NEXT: v_alignbit_b32 v44, v52, v44, 16 ; SI-NEXT: v_alignbit_b32 v43, v6, v42, 16 -; SI-NEXT: v_alignbit_b32 v42, v4, v41, 16 +; SI-NEXT: v_alignbit_b32 v42, v11, v41, 16 ; SI-NEXT: v_alignbit_b32 v41, v15, v46, 16 ; SI-NEXT: v_alignbit_b32 v40, v21, v55, 16 ; SI-NEXT: v_alignbit_b32 v55, v23, v29, 16 @@ -42508,56 +42593,60 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_alignbit_b32 v28, v12, v27, 16 ; SI-NEXT: v_alignbit_b32 v27, v14, v26, 16 ; SI-NEXT: v_alignbit_b32 v26, v9, v56, 16 -; SI-NEXT: v_alignbit_b32 v25, v53, v24, 16 -; SI-NEXT: v_alignbit_b32 v24, v54, v57, 16 +; SI-NEXT: v_mov_b32_e32 v56, v35 +; SI-NEXT: v_alignbit_b32 v25, v48, v24, 16 +; SI-NEXT: v_alignbit_b32 v24, v53, v57, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: .LBB58_2: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v45 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: buffer_store_dword v33, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v50 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: v_add_i32_e32 v34, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v33, v34, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v30, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v30, v30, v33 +; SI-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v30, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v50 +; SI-NEXT: v_or_b32_e32 v30, v30, v33 +; SI-NEXT: v_add_i32_e32 v33, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v30, v33, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v30, 0xffff, v63 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v44 +; SI-NEXT: v_or_b32_e32 v30, v30, v33 +; SI-NEXT: v_add_i32_e32 v33, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v30, v33, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v30, 0xffff, v52 +; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v30, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v61 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v43 +; SI-NEXT: v_or_b32_e32 v1, v1, v30 +; SI-NEXT: v_add_i32_e32 v30, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v30, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v44 -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: v_add_i32_e32 v34, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v33, v34, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff, v52 -; SI-NEXT: v_or_b32_e32 v1, v33, v1 -; SI-NEXT: v_add_i32_e32 v33, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v1, v33, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v43 -; SI-NEXT: v_or_b32_e32 v1, v1, v33 -; SI-NEXT: v_add_i32_e32 v33, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v1, v33, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v11 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v6 ; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v60 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v6 ; SI-NEXT: v_add_i32_e32 v6, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v13 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 ; SI-NEXT: v_or_b32_e32 v1, v1, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen @@ -42610,7 +42699,7 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v28 ; SI-NEXT: v_or_b32_e32 v1, v1, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 @@ -42629,7 +42718,7 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v61 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v60 ; SI-NEXT: v_or_b32_e32 v1, v1, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen @@ -42641,7 +42730,7 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v63 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v62 ; SI-NEXT: v_or_b32_e32 v1, v1, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen @@ -42652,8 +42741,8 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -42664,7 +42753,7 @@ define <52 x i16> @bitcast_v52f16_to_v52i16(<52 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 @@ -43203,482 +43292,533 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i ; SI-LABEL: bitcast_v52f16_to_v52i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:12 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:24 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v3 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:32 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v14 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v18 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v44, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v45, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v43, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s29 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_cvt_f16_f32_e32 v53, v32 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v33 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v29, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v28 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v36 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v38 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v28, v47 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v5, v56 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v43, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v42, s25 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v39 +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s25 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v44 -; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v43 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v42 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v53 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v51 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v43 +; SI-NEXT: v_mov_b32_e32 v38, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v41 +; SI-NEXT: v_mov_b32_e32 v28, v7 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v36, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v39 +; SI-NEXT: v_mov_b32_e32 v9, v15 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v39, v7, v19 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v38 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v38, v7, v15 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v52 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v50 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_mov_b32_e32 v29, v11 +; SI-NEXT: v_or_b32_e32 v5, v5, v23 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v29 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v37 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v5, v5, v25 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v3, v33 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v35 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_or_b32_e32 v5, v5, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v3, v3, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v8 -; SI-NEXT: v_or_b32_e32 v9, v9, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v7 -; SI-NEXT: v_or_b32_e32 v14, v14, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v10 -; SI-NEXT: v_or_b32_e32 v12, v12, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v16 -; SI-NEXT: v_or_b32_e32 v17, v17, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v31 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_or_b32_e32 v36, v36, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v32 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v34, v34, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v29 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_or_b32_e32 v48, v29, v48 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v50, v50, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v18 -; SI-NEXT: v_or_b32_e32 v19, v19, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v22 -; SI-NEXT: v_or_b32_e32 v25, v25, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v45 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v42 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v40 -; SI-NEXT: v_or_b32_e32 v24, v24, v29 -; SI-NEXT: v_or_b32_e32 v27, v27, v43 -; SI-NEXT: v_or_b32_e32 v26, v26, v45 -; SI-NEXT: v_or_b32_e32 v21, v21, v30 -; SI-NEXT: v_or_b32_e32 v20, v20, v41 -; SI-NEXT: v_or_b32_e32 v49, v49, v46 -; SI-NEXT: v_or_b32_e32 v37, v37, v55 -; SI-NEXT: v_or_b32_e32 v35, v35, v54 -; SI-NEXT: v_or_b32_e32 v33, v33, v47 -; SI-NEXT: v_or_b32_e32 v15, v15, v52 -; SI-NEXT: v_or_b32_e32 v13, v13, v51 -; SI-NEXT: v_or_b32_e32 v11, v11, v56 -; SI-NEXT: v_or_b32_e32 v6, v6, v28 -; SI-NEXT: v_or_b32_e32 v4, v4, v57 -; SI-NEXT: v_alignbit_b32 v44, v24, v43, 16 -; SI-NEXT: v_alignbit_b32 v43, v25, v45, 16 -; SI-NEXT: v_alignbit_b32 v42, v19, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v50, v41, 16 -; SI-NEXT: v_alignbit_b32 v41, v48, v46, 16 -; SI-NEXT: v_alignbit_b32 v40, v34, v55, 16 -; SI-NEXT: v_alignbit_b32 v55, v36, v54, 16 -; SI-NEXT: v_alignbit_b32 v54, v17, v47, 16 -; SI-NEXT: v_alignbit_b32 v53, v12, v52, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v51, 16 -; SI-NEXT: v_alignbit_b32 v51, v9, v56, 16 -; SI-NEXT: v_alignbit_b32 v29, v3, v28, 16 -; SI-NEXT: v_alignbit_b32 v28, v5, v57, 16 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v28 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v7, v7, v17 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v44 +; SI-NEXT: v_or_b32_e32 v5, v5, v21 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v56 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v42 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v5, v5, v11 +; SI-NEXT: v_or_b32_e32 v56, v7, v13 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v31 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v40 +; SI-NEXT: v_or_b32_e32 v36, v1, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v55 +; SI-NEXT: v_or_b32_e32 v37, v28, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v53 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v35, v3, v5 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_or_b32_e32 v33, v28, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v27 +; SI-NEXT: v_or_b32_e32 v31, v29, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v30 +; SI-NEXT: v_or_b32_e32 v2, v2, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v57 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 +; SI-NEXT: v_or_b32_e32 v4, v4, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v51 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v60 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v57 +; SI-NEXT: v_or_b32_e32 v6, v6, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v32 +; SI-NEXT: v_or_b32_e32 v8, v8, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v47 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v60 +; SI-NEXT: v_or_b32_e32 v10, v10, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v63 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v47 +; SI-NEXT: v_or_b32_e32 v12, v12, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v63 +; SI-NEXT: v_or_b32_e32 v14, v14, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v62 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 +; SI-NEXT: v_or_b32_e32 v18, v18, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v58 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v27 +; SI-NEXT: v_lshr_b64 v[50:51], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v62 +; SI-NEXT: v_or_b32_e32 v22, v22, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v61 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v58 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v45 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v28 +; SI-NEXT: v_lshr_b64 v[54:55], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v46 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v61 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_or_b32_e32 v16, v16, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v45 +; SI-NEXT: v_or_b32_e32 v20, v20, v27 +; SI-NEXT: v_mov_b32_e32 v53, v33 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v46 +; SI-NEXT: v_or_b32_e32 v24, v24, v27 +; SI-NEXT: v_lshr_b64 v[43:44], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[7:8], 16 +; SI-NEXT: v_mov_b32_e32 v7, v56 +; SI-NEXT: v_lshr_b64 v[55:56], v[3:4], 16 +; SI-NEXT: v_mov_b32_e32 v44, v37 +; SI-NEXT: v_lshr_b64 v[41:42], v[19:20], 16 +; SI-NEXT: v_mov_b32_e32 v19, v39 +; SI-NEXT: v_lshr_b64 v[39:40], v[15:16], 16 +; SI-NEXT: v_mov_b32_e32 v15, v38 +; SI-NEXT: v_lshr_b64 v[37:38], v[11:12], 16 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v42, v36 +; SI-NEXT: v_mov_b32_e32 v40, v35 +; SI-NEXT: v_mov_b32_e32 v51, v32 +; SI-NEXT: v_lshr_b64 v[48:49], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[9:10], 16 +; SI-NEXT: v_mov_b32_e32 v34, v31 +; SI-NEXT: v_lshr_b64 v[31:32], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[1:2], 16 +; SI-NEXT: v_mov_b32_e32 v32, v29 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_or_b32_e32 v27, v27, v44 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v43 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v43 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v46 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_add_i32_e32 v23, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v41 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v19 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v42 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v18, v19, v18 -; SI-NEXT: v_add_i32_e32 v19, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v30 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v49 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v41 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v39 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v39 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v15 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v40 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v55 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v31 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v54 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: v_or_b32_e32 v16, v17, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v11 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v53 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v15, v15, v16 -; SI-NEXT: v_add_i32_e32 v16, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v10, v7 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v62 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v51 -; SI-NEXT: v_or_b32_e32 v7, v7, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v29 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v42 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v33 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v51 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v40 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v57 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v28 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll index a43ce77b20631..8ee5b966f40b8 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll @@ -3637,337 +3637,368 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v28i32_to_v56i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v16, s30, 0 +; SI-NEXT: v_writelane_b32 v16, s31, 1 +; SI-NEXT: v_writelane_b32 v16, s34, 2 +; SI-NEXT: v_writelane_b32 v16, s35, 3 +; SI-NEXT: v_writelane_b32 v16, s36, 4 +; SI-NEXT: v_writelane_b32 v16, s37, 5 +; SI-NEXT: v_writelane_b32 v16, s38, 6 +; SI-NEXT: v_writelane_b32 v16, s39, 7 +; SI-NEXT: v_writelane_b32 v16, s48, 8 +; SI-NEXT: v_writelane_b32 v16, s49, 9 +; SI-NEXT: v_writelane_b32 v16, s50, 10 +; SI-NEXT: v_writelane_b32 v16, s51, 11 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; SI-NEXT: v_readfirstlane_b32 s43, v1 -; SI-NEXT: v_readfirstlane_b32 s42, v2 -; SI-NEXT: v_readfirstlane_b32 s41, v3 -; SI-NEXT: v_readfirstlane_b32 s40, v4 -; SI-NEXT: v_readfirstlane_b32 s15, v5 -; SI-NEXT: v_readfirstlane_b32 s14, v6 -; SI-NEXT: v_readfirstlane_b32 s13, v7 -; SI-NEXT: v_readfirstlane_b32 s12, v8 -; SI-NEXT: v_readfirstlane_b32 s11, v9 -; SI-NEXT: v_readfirstlane_b32 s10, v10 -; SI-NEXT: v_readfirstlane_b32 s9, v11 -; SI-NEXT: v_readfirstlane_b32 s8, v12 -; SI-NEXT: v_readfirstlane_b32 s7, v13 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: v_writelane_b32 v16, s52, 12 +; SI-NEXT: v_readfirstlane_b32 s40, v1 +; SI-NEXT: v_readfirstlane_b32 s41, v2 +; SI-NEXT: v_readfirstlane_b32 s14, v3 +; SI-NEXT: v_readfirstlane_b32 s15, v4 +; SI-NEXT: v_readfirstlane_b32 s12, v5 +; SI-NEXT: v_readfirstlane_b32 s13, v6 +; SI-NEXT: v_readfirstlane_b32 s10, v7 +; SI-NEXT: v_readfirstlane_b32 s11, v8 +; SI-NEXT: v_readfirstlane_b32 s8, v9 +; SI-NEXT: v_readfirstlane_b32 s9, v10 +; SI-NEXT: v_readfirstlane_b32 s6, v11 +; SI-NEXT: v_readfirstlane_b32 s7, v12 +; SI-NEXT: v_readfirstlane_b32 s4, v13 +; SI-NEXT: s_and_b64 s[42:43], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: v_writelane_b32 v16, s53, 13 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s43, s43, 3 ; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s42, s42, 3 ; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: buffer_store_dword v14, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: s_lshl_b32 s43, s92, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s43 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s16, s88, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s53, v16, 13 +; SI-NEXT: v_readlane_b32 s52, v16, 12 +; SI-NEXT: v_readlane_b32 s51, v16, 11 +; SI-NEXT: v_readlane_b32 s50, v16, 10 +; SI-NEXT: v_readlane_b32 s49, v16, 9 +; SI-NEXT: v_readlane_b32 s48, v16, 8 +; SI-NEXT: v_readlane_b32 s39, v16, 7 +; SI-NEXT: v_readlane_b32 s38, v16, 6 +; SI-NEXT: v_readlane_b32 s37, v16, 5 +; SI-NEXT: v_readlane_b32 s36, v16, 4 +; SI-NEXT: v_readlane_b32 s35, v16, 3 +; SI-NEXT: v_readlane_b32 s34, v16, 2 +; SI-NEXT: v_readlane_b32 s31, v16, 1 +; SI-NEXT: v_readlane_b32 s30, v16, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v28i32_to_v56i16_scalar: @@ -5900,48 +5931,52 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v12 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v58, v10 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v60, v8 +; SI-NEXT: v_mov_b32_e32 v33, v6 +; SI-NEXT: v_mov_b32_e32 v35, v4 +; SI-NEXT: v_mov_b32_e32 v39, v2 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:36 -; SI-NEXT: v_mov_b32_e32 v32, v26 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:36 +; SI-NEXT: v_mov_b32_e32 v31, v26 +; SI-NEXT: v_mov_b32_e32 v41, v24 +; SI-NEXT: v_mov_b32_e32 v42, v22 +; SI-NEXT: v_mov_b32_e32 v43, v20 +; SI-NEXT: v_mov_b32_e32 v49, v18 +; SI-NEXT: v_mov_b32_e32 v44, v16 +; SI-NEXT: v_mov_b32_e32 v45, v14 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill @@ -5950,87 +5985,87 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v10 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v12 ; SI-NEXT: s_cbranch_scc0 .LBB15_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v7, v0, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: v_or_b32_e32 v9, v0, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 ; SI-NEXT: v_or_b32_e32 v10, v0, v14 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v11, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: v_or_b32_e32 v12, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v13, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v13, v0, v63 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v14, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v14, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v15, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v16, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v17, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 -; SI-NEXT: v_or_b32_e32 v8, v1, v18 +; SI-NEXT: v_or_b32_e32 v16, v0, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v17, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_or_b32_e32 v19, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v20, v0, v56 +; SI-NEXT: v_or_b32_e32 v20, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v47 +; SI-NEXT: v_or_b32_e32 v21, v0, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v22, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v23, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v24, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v25, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v25, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v29 +; SI-NEXT: v_or_b32_e32 v26, v0, v52 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v27, v0, v63 +; SI-NEXT: v_or_b32_e32 v27, v0, v29 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -6040,9 +6075,10 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB15_3 ; SI-NEXT: .LBB15_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -6085,96 +6121,96 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 @@ -6198,74 +6234,83 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB15_4: -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v62, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v47, v44 ; SI-NEXT: v_mov_b32_e32 v44, v41 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_mov_b32_e32 v41, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v43, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v42 -; SI-NEXT: v_mov_b32_e32 v42, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v57 +; SI-NEXT: v_mov_b32_e32 v41, v30 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v59 +; SI-NEXT: v_mov_b32_e32 v59, v56 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_mov_b32_e32 v56, v50 +; SI-NEXT: v_mov_b32_e32 v50, v45 +; SI-NEXT: v_mov_b32_e32 v45, v42 +; SI-NEXT: v_mov_b32_e32 v42, v28 +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v57 ; SI-NEXT: v_mov_b32_e32 v57, v46 -; SI-NEXT: v_mov_b32_e32 v46, v61 -; SI-NEXT: v_mov_b32_e32 v61, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v31 +; SI-NEXT: v_mov_b32_e32 v46, v49 +; SI-NEXT: v_mov_b32_e32 v49, v43 +; SI-NEXT: v_mov_b32_e32 v43, v31 +; SI-NEXT: v_mov_b32_e32 v53, v40 +; SI-NEXT: v_mov_b32_e32 v40, v48 +; SI-NEXT: v_mov_b32_e32 v48, v39 +; SI-NEXT: v_mov_b32_e32 v39, v38 +; SI-NEXT: v_mov_b32_e32 v38, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v60 +; SI-NEXT: v_mov_b32_e32 v60, v29 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v47 -; SI-NEXT: v_mov_b32_e32 v47, v58 -; SI-NEXT: v_mov_b32_e32 v58, v61 -; SI-NEXT: v_mov_b32_e32 v61, v46 +; SI-NEXT: v_mov_b32_e32 v29, v60 +; SI-NEXT: v_mov_b32_e32 v60, v32 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v38 +; SI-NEXT: v_mov_b32_e32 v38, v39 +; SI-NEXT: v_mov_b32_e32 v39, v48 +; SI-NEXT: v_mov_b32_e32 v48, v40 +; SI-NEXT: v_mov_b32_e32 v40, v53 +; SI-NEXT: v_mov_b32_e32 v31, v43 +; SI-NEXT: v_mov_b32_e32 v43, v49 +; SI-NEXT: v_mov_b32_e32 v49, v46 ; SI-NEXT: v_mov_b32_e32 v46, v57 -; SI-NEXT: v_mov_b32_e32 v57, v60 -; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v42 -; SI-NEXT: v_mov_b32_e32 v42, v49 -; SI-NEXT: v_mov_b32_e32 v49, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v43 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v45, v56 +; SI-NEXT: v_mov_b32_e32 v57, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v42 +; SI-NEXT: v_mov_b32_e32 v42, v45 +; SI-NEXT: v_mov_b32_e32 v45, v50 +; SI-NEXT: v_mov_b32_e32 v50, v56 ; SI-NEXT: v_mov_b32_e32 v56, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v59, v63 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v41 +; SI-NEXT: v_mov_b32_e32 v41, v44 +; SI-NEXT: v_mov_b32_e32 v44, v47 +; SI-NEXT: v_mov_b32_e32 v47, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v62 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB15_2 ; ; VI-LABEL: bitcast_v56i16_to_v28i32_scalar: @@ -14755,223 +14800,227 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; SI-NEXT: v_mov_b32_e32 v30, s16 -; SI-NEXT: v_mov_b32_e32 v29, s17 -; SI-NEXT: v_mov_b32_e32 v25, s18 -; SI-NEXT: v_mov_b32_e32 v23, s19 -; SI-NEXT: v_mov_b32_e32 v28, s20 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v27, s16 +; SI-NEXT: v_mov_b32_e32 v28, s17 +; SI-NEXT: v_mov_b32_e32 v23, s18 +; SI-NEXT: v_mov_b32_e32 v24, s19 +; SI-NEXT: v_mov_b32_e32 v25, s20 ; SI-NEXT: v_mov_b32_e32 v26, s21 -; SI-NEXT: v_mov_b32_e32 v24, s22 +; SI-NEXT: v_mov_b32_e32 v21, s22 ; SI-NEXT: v_mov_b32_e32 v22, s23 -; SI-NEXT: v_mov_b32_e32 v20, s24 -; SI-NEXT: v_mov_b32_e32 v19, s25 -; SI-NEXT: v_mov_b32_e32 v18, s26 -; SI-NEXT: v_mov_b32_e32 v17, s27 -; SI-NEXT: v_mov_b32_e32 v16, s28 -; SI-NEXT: v_mov_b32_e32 v15, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v19, s24 +; SI-NEXT: v_mov_b32_e32 v20, s25 +; SI-NEXT: v_mov_b32_e32 v17, s26 +; SI-NEXT: v_mov_b32_e32 v18, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v15, s28 +; SI-NEXT: v_mov_b32_e32 v16, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v21, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v37, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v39, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v49, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v51, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v54, v22, v24, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v28, 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v23, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v29, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 ; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 +; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 ; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 ; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_alignbit_b32 v21, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v37, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v39, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v49, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v51, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v54, v22, v24, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v28, 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v23, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v29, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_or_b32_e32 v30, v30, v44 -; SI-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v56 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v52 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v27, v37 +; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v42 -; SI-NEXT: v_or_b32_e32 v25, v25, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v25, v29, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v51 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v47 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v58 +; SI-NEXT: v_or_b32_e32 v23, v23, v24 +; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v40 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_and_b32_e32 v24, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v23, v24, v23 +; SI-NEXT: v_add_i32_e32 v24, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v46 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v57 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v24, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v45 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 +; SI-NEXT: v_or_b32_e32 v21, v21, v23 +; SI-NEXT: v_add_i32_e32 v23, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v51 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v56 +; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 +; SI-NEXT: v_add_i32_e32 v21, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v47 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v49 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v41 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v46 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v39 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v55 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v45 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -14983,7 +15032,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -14995,7 +15044,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -15007,7 +15056,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -15019,74 +15068,77 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v28f32_to_v56i16_scalar: @@ -17015,48 +17067,52 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v12 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v58, v10 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v60, v8 +; SI-NEXT: v_mov_b32_e32 v33, v6 +; SI-NEXT: v_mov_b32_e32 v35, v4 +; SI-NEXT: v_mov_b32_e32 v39, v2 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:36 -; SI-NEXT: v_mov_b32_e32 v32, v26 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:36 +; SI-NEXT: v_mov_b32_e32 v31, v26 +; SI-NEXT: v_mov_b32_e32 v41, v24 +; SI-NEXT: v_mov_b32_e32 v42, v22 +; SI-NEXT: v_mov_b32_e32 v43, v20 +; SI-NEXT: v_mov_b32_e32 v49, v18 +; SI-NEXT: v_mov_b32_e32 v44, v16 +; SI-NEXT: v_mov_b32_e32 v45, v14 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill @@ -17065,87 +17121,87 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v10 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v12 ; SI-NEXT: s_cbranch_scc0 .LBB31_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v7, v0, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: v_or_b32_e32 v9, v0, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 ; SI-NEXT: v_or_b32_e32 v10, v0, v14 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v11, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: v_or_b32_e32 v12, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v13, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v13, v0, v63 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v14, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v14, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v15, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v16, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v17, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 -; SI-NEXT: v_or_b32_e32 v8, v1, v18 +; SI-NEXT: v_or_b32_e32 v16, v0, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v17, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_or_b32_e32 v19, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v20, v0, v56 +; SI-NEXT: v_or_b32_e32 v20, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v47 +; SI-NEXT: v_or_b32_e32 v21, v0, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v22, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v23, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v24, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v25, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v25, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v29 +; SI-NEXT: v_or_b32_e32 v26, v0, v52 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v27, v0, v63 +; SI-NEXT: v_or_b32_e32 v27, v0, v29 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -17155,9 +17211,10 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB31_3 ; SI-NEXT: .LBB31_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -17200,96 +17257,96 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 @@ -17313,74 +17370,83 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB31_4: -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v62, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v47, v44 ; SI-NEXT: v_mov_b32_e32 v44, v41 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_mov_b32_e32 v41, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v43, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v42 -; SI-NEXT: v_mov_b32_e32 v42, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v57 +; SI-NEXT: v_mov_b32_e32 v41, v30 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v59 +; SI-NEXT: v_mov_b32_e32 v59, v56 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_mov_b32_e32 v56, v50 +; SI-NEXT: v_mov_b32_e32 v50, v45 +; SI-NEXT: v_mov_b32_e32 v45, v42 +; SI-NEXT: v_mov_b32_e32 v42, v28 +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v57 ; SI-NEXT: v_mov_b32_e32 v57, v46 -; SI-NEXT: v_mov_b32_e32 v46, v61 -; SI-NEXT: v_mov_b32_e32 v61, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v31 +; SI-NEXT: v_mov_b32_e32 v46, v49 +; SI-NEXT: v_mov_b32_e32 v49, v43 +; SI-NEXT: v_mov_b32_e32 v43, v31 +; SI-NEXT: v_mov_b32_e32 v53, v40 +; SI-NEXT: v_mov_b32_e32 v40, v48 +; SI-NEXT: v_mov_b32_e32 v48, v39 +; SI-NEXT: v_mov_b32_e32 v39, v38 +; SI-NEXT: v_mov_b32_e32 v38, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v60 +; SI-NEXT: v_mov_b32_e32 v60, v29 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v47 -; SI-NEXT: v_mov_b32_e32 v47, v58 -; SI-NEXT: v_mov_b32_e32 v58, v61 -; SI-NEXT: v_mov_b32_e32 v61, v46 +; SI-NEXT: v_mov_b32_e32 v29, v60 +; SI-NEXT: v_mov_b32_e32 v60, v32 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v38 +; SI-NEXT: v_mov_b32_e32 v38, v39 +; SI-NEXT: v_mov_b32_e32 v39, v48 +; SI-NEXT: v_mov_b32_e32 v48, v40 +; SI-NEXT: v_mov_b32_e32 v40, v53 +; SI-NEXT: v_mov_b32_e32 v31, v43 +; SI-NEXT: v_mov_b32_e32 v43, v49 +; SI-NEXT: v_mov_b32_e32 v49, v46 ; SI-NEXT: v_mov_b32_e32 v46, v57 -; SI-NEXT: v_mov_b32_e32 v57, v60 -; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v42 -; SI-NEXT: v_mov_b32_e32 v42, v49 -; SI-NEXT: v_mov_b32_e32 v49, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v43 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v45, v56 +; SI-NEXT: v_mov_b32_e32 v57, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v42 +; SI-NEXT: v_mov_b32_e32 v42, v45 +; SI-NEXT: v_mov_b32_e32 v45, v50 +; SI-NEXT: v_mov_b32_e32 v50, v56 ; SI-NEXT: v_mov_b32_e32 v56, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v59, v63 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v41 +; SI-NEXT: v_mov_b32_e32 v41, v44 +; SI-NEXT: v_mov_b32_e32 v44, v47 +; SI-NEXT: v_mov_b32_e32 v47, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v62 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB31_2 ; ; VI-LABEL: bitcast_v56i16_to_v28f32_scalar: @@ -19525,8 +19591,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a, ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v24 ; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v29 ; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v17 @@ -19565,8 +19631,8 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a, ; SI-NEXT: v_cvt_f32_f16_e32 v4, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v62 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v61 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v47 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v45 ; SI-NEXT: v_mov_b32_e32 v45, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v43 @@ -25008,337 +25074,368 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v14i64_to_v56i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v16, s30, 0 +; SI-NEXT: v_writelane_b32 v16, s31, 1 +; SI-NEXT: v_writelane_b32 v16, s34, 2 +; SI-NEXT: v_writelane_b32 v16, s35, 3 +; SI-NEXT: v_writelane_b32 v16, s36, 4 +; SI-NEXT: v_writelane_b32 v16, s37, 5 +; SI-NEXT: v_writelane_b32 v16, s38, 6 +; SI-NEXT: v_writelane_b32 v16, s39, 7 +; SI-NEXT: v_writelane_b32 v16, s48, 8 +; SI-NEXT: v_writelane_b32 v16, s49, 9 +; SI-NEXT: v_writelane_b32 v16, s50, 10 +; SI-NEXT: v_writelane_b32 v16, s51, 11 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; SI-NEXT: v_readfirstlane_b32 s43, v1 -; SI-NEXT: v_readfirstlane_b32 s42, v2 -; SI-NEXT: v_readfirstlane_b32 s41, v3 -; SI-NEXT: v_readfirstlane_b32 s40, v4 -; SI-NEXT: v_readfirstlane_b32 s15, v5 -; SI-NEXT: v_readfirstlane_b32 s14, v6 -; SI-NEXT: v_readfirstlane_b32 s13, v7 -; SI-NEXT: v_readfirstlane_b32 s12, v8 -; SI-NEXT: v_readfirstlane_b32 s11, v9 -; SI-NEXT: v_readfirstlane_b32 s10, v10 -; SI-NEXT: v_readfirstlane_b32 s9, v11 -; SI-NEXT: v_readfirstlane_b32 s8, v12 -; SI-NEXT: v_readfirstlane_b32 s7, v13 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: v_writelane_b32 v16, s52, 12 +; SI-NEXT: v_readfirstlane_b32 s40, v1 +; SI-NEXT: v_readfirstlane_b32 s41, v2 +; SI-NEXT: v_readfirstlane_b32 s14, v3 +; SI-NEXT: v_readfirstlane_b32 s15, v4 +; SI-NEXT: v_readfirstlane_b32 s12, v5 +; SI-NEXT: v_readfirstlane_b32 s13, v6 +; SI-NEXT: v_readfirstlane_b32 s10, v7 +; SI-NEXT: v_readfirstlane_b32 s11, v8 +; SI-NEXT: v_readfirstlane_b32 s8, v9 +; SI-NEXT: v_readfirstlane_b32 s9, v10 +; SI-NEXT: v_readfirstlane_b32 s6, v11 +; SI-NEXT: v_readfirstlane_b32 s7, v12 +; SI-NEXT: v_readfirstlane_b32 s4, v13 +; SI-NEXT: s_and_b64 s[42:43], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: v_writelane_b32 v16, s53, 13 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: buffer_store_dword v14, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: s_lshl_b32 s43, s92, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s43 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s16, s88, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s53, v16, 13 +; SI-NEXT: v_readlane_b32 s52, v16, 12 +; SI-NEXT: v_readlane_b32 s51, v16, 11 +; SI-NEXT: v_readlane_b32 s50, v16, 10 +; SI-NEXT: v_readlane_b32 s49, v16, 9 +; SI-NEXT: v_readlane_b32 s48, v16, 8 +; SI-NEXT: v_readlane_b32 s39, v16, 7 +; SI-NEXT: v_readlane_b32 s38, v16, 6 +; SI-NEXT: v_readlane_b32 s37, v16, 5 +; SI-NEXT: v_readlane_b32 s36, v16, 4 +; SI-NEXT: v_readlane_b32 s35, v16, 3 +; SI-NEXT: v_readlane_b32 s34, v16, 2 +; SI-NEXT: v_readlane_b32 s31, v16, 1 +; SI-NEXT: v_readlane_b32 s30, v16, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v14i64_to_v56i16_scalar: @@ -27271,48 +27368,52 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v12 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v58, v10 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v60, v8 +; SI-NEXT: v_mov_b32_e32 v33, v6 +; SI-NEXT: v_mov_b32_e32 v35, v4 +; SI-NEXT: v_mov_b32_e32 v39, v2 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:36 -; SI-NEXT: v_mov_b32_e32 v32, v26 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:36 +; SI-NEXT: v_mov_b32_e32 v31, v26 +; SI-NEXT: v_mov_b32_e32 v41, v24 +; SI-NEXT: v_mov_b32_e32 v42, v22 +; SI-NEXT: v_mov_b32_e32 v43, v20 +; SI-NEXT: v_mov_b32_e32 v49, v18 +; SI-NEXT: v_mov_b32_e32 v44, v16 +; SI-NEXT: v_mov_b32_e32 v45, v14 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill @@ -27321,87 +27422,87 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v10 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v12 ; SI-NEXT: s_cbranch_scc0 .LBB43_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v7, v0, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: v_or_b32_e32 v9, v0, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 ; SI-NEXT: v_or_b32_e32 v10, v0, v14 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v11, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: v_or_b32_e32 v12, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v13, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v13, v0, v63 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v14, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v14, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v15, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v16, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v17, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 -; SI-NEXT: v_or_b32_e32 v8, v1, v18 +; SI-NEXT: v_or_b32_e32 v16, v0, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v17, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_or_b32_e32 v19, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v20, v0, v56 +; SI-NEXT: v_or_b32_e32 v20, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v47 +; SI-NEXT: v_or_b32_e32 v21, v0, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v22, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v23, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v24, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v25, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v25, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v29 +; SI-NEXT: v_or_b32_e32 v26, v0, v52 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v27, v0, v63 +; SI-NEXT: v_or_b32_e32 v27, v0, v29 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -27411,9 +27512,10 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB43_3 ; SI-NEXT: .LBB43_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -27456,96 +27558,96 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 @@ -27569,74 +27671,83 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB43_4: -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v62, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v47, v44 ; SI-NEXT: v_mov_b32_e32 v44, v41 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_mov_b32_e32 v41, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v43, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v42 -; SI-NEXT: v_mov_b32_e32 v42, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v57 +; SI-NEXT: v_mov_b32_e32 v41, v30 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v59 +; SI-NEXT: v_mov_b32_e32 v59, v56 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_mov_b32_e32 v56, v50 +; SI-NEXT: v_mov_b32_e32 v50, v45 +; SI-NEXT: v_mov_b32_e32 v45, v42 +; SI-NEXT: v_mov_b32_e32 v42, v28 +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v57 ; SI-NEXT: v_mov_b32_e32 v57, v46 -; SI-NEXT: v_mov_b32_e32 v46, v61 -; SI-NEXT: v_mov_b32_e32 v61, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v31 +; SI-NEXT: v_mov_b32_e32 v46, v49 +; SI-NEXT: v_mov_b32_e32 v49, v43 +; SI-NEXT: v_mov_b32_e32 v43, v31 +; SI-NEXT: v_mov_b32_e32 v53, v40 +; SI-NEXT: v_mov_b32_e32 v40, v48 +; SI-NEXT: v_mov_b32_e32 v48, v39 +; SI-NEXT: v_mov_b32_e32 v39, v38 +; SI-NEXT: v_mov_b32_e32 v38, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v60 +; SI-NEXT: v_mov_b32_e32 v60, v29 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v47 -; SI-NEXT: v_mov_b32_e32 v47, v58 -; SI-NEXT: v_mov_b32_e32 v58, v61 -; SI-NEXT: v_mov_b32_e32 v61, v46 +; SI-NEXT: v_mov_b32_e32 v29, v60 +; SI-NEXT: v_mov_b32_e32 v60, v32 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v38 +; SI-NEXT: v_mov_b32_e32 v38, v39 +; SI-NEXT: v_mov_b32_e32 v39, v48 +; SI-NEXT: v_mov_b32_e32 v48, v40 +; SI-NEXT: v_mov_b32_e32 v40, v53 +; SI-NEXT: v_mov_b32_e32 v31, v43 +; SI-NEXT: v_mov_b32_e32 v43, v49 +; SI-NEXT: v_mov_b32_e32 v49, v46 ; SI-NEXT: v_mov_b32_e32 v46, v57 -; SI-NEXT: v_mov_b32_e32 v57, v60 -; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v42 -; SI-NEXT: v_mov_b32_e32 v42, v49 -; SI-NEXT: v_mov_b32_e32 v49, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v43 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v45, v56 +; SI-NEXT: v_mov_b32_e32 v57, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v42 +; SI-NEXT: v_mov_b32_e32 v42, v45 +; SI-NEXT: v_mov_b32_e32 v45, v50 +; SI-NEXT: v_mov_b32_e32 v50, v56 ; SI-NEXT: v_mov_b32_e32 v56, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v59, v63 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v41 +; SI-NEXT: v_mov_b32_e32 v41, v44 +; SI-NEXT: v_mov_b32_e32 v44, v47 +; SI-NEXT: v_mov_b32_e32 v47, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v62 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB43_2 ; ; VI-LABEL: bitcast_v56i16_to_v14i64_scalar: @@ -34376,194 +34487,198 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v15, s28 ; SI-NEXT: v_mov_b32_e32 v16, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v30, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v36, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v38, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v48, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v51, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v53, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 ; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_alignbit_b32 v29, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v30, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v36, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v38, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v48, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v51, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v53, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v52 ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_or_b32_e32 v27, v27, v44 +; SI-NEXT: v_or_b32_e32 v27, v27, v37 ; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v51 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v27 ; SI-NEXT: v_add_i32_e32 v27, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v58 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v40 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_and_b32_e32 v24, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v23, v24, v23 ; SI-NEXT: v_add_i32_e32 v24, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v46 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v57 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v56 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v47 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v46 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v45 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34575,7 +34690,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34587,7 +34702,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34599,7 +34714,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34611,7 +34726,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34623,7 +34738,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34635,50 +34750,53 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v14f64_to_v56i16_scalar: @@ -36579,48 +36697,52 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_mov_b32_e32 v57, v12 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v58, v10 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v60, v8 +; SI-NEXT: v_mov_b32_e32 v33, v6 +; SI-NEXT: v_mov_b32_e32 v35, v4 +; SI-NEXT: v_mov_b32_e32 v39, v2 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v61, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:36 -; SI-NEXT: v_mov_b32_e32 v32, v26 -; SI-NEXT: v_mov_b32_e32 v33, v24 -; SI-NEXT: v_mov_b32_e32 v34, v22 -; SI-NEXT: v_mov_b32_e32 v35, v20 -; SI-NEXT: v_mov_b32_e32 v36, v18 -; SI-NEXT: v_mov_b32_e32 v37, v16 -; SI-NEXT: v_mov_b32_e32 v38, v14 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:36 +; SI-NEXT: v_mov_b32_e32 v31, v26 +; SI-NEXT: v_mov_b32_e32 v41, v24 +; SI-NEXT: v_mov_b32_e32 v42, v22 +; SI-NEXT: v_mov_b32_e32 v43, v20 +; SI-NEXT: v_mov_b32_e32 v49, v18 +; SI-NEXT: v_mov_b32_e32 v44, v16 +; SI-NEXT: v_mov_b32_e32 v45, v14 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill @@ -36629,87 +36751,87 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v6 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v8 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(6) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v10 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v12 ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v7, v0, v18 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 ; SI-NEXT: v_or_b32_e32 v9, v0, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 ; SI-NEXT: v_or_b32_e32 v10, v0, v14 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v11, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: v_or_b32_e32 v12, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 -; SI-NEXT: v_or_b32_e32 v13, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v13, v0, v63 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v14, v0, v44 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 -; SI-NEXT: v_or_b32_e32 v15, v0, v43 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v14, v0, v62 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v15, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v16, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 -; SI-NEXT: v_or_b32_e32 v17, v0, v59 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 -; SI-NEXT: v_or_b32_e32 v8, v1, v18 +; SI-NEXT: v_or_b32_e32 v16, v0, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v17, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v18, v0, v58 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 -; SI-NEXT: v_or_b32_e32 v19, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v18, v0, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_or_b32_e32 v19, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v31 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v20, v0, v56 +; SI-NEXT: v_or_b32_e32 v20, v0, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28 ; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: s_and_b32 s5, s18, 0xffff ; SI-NEXT: s_lshl_b32 s6, s19, 16 -; SI-NEXT: v_or_b32_e32 v21, v0, v47 +; SI-NEXT: v_or_b32_e32 v21, v0, v34 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 ; SI-NEXT: s_or_b32 s5, s5, s6 ; SI-NEXT: s_and_b32 s6, s20, 0xffff ; SI-NEXT: s_lshl_b32 s7, s21, 16 -; SI-NEXT: v_or_b32_e32 v22, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v22, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 -; SI-NEXT: v_or_b32_e32 v23, v0, v45 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v23, v0, v32 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 -; SI-NEXT: v_or_b32_e32 v24, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v24, v0, v59 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_or_b32_e32 v25, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v25, v0, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v26, v0, v29 +; SI-NEXT: v_or_b32_e32 v26, v0, v52 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: s_or_b32 s10, s10, s11 -; SI-NEXT: v_or_b32_e32 v27, v0, v63 +; SI-NEXT: v_or_b32_e32 v27, v0, v29 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -36719,9 +36841,10 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -36764,96 +36887,96 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v63, v0 ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v44, v0 +; SI-NEXT: v_or_b32_e32 v0, v62, v0 ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v48, v0 ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v37, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v31 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v55, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v46, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v45, v0 +; SI-NEXT: v_or_b32_e32 v0, v32, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v53, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v46 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v29, v0 +; SI-NEXT: v_or_b32_e32 v0, v52, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v29, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 @@ -36877,74 +37000,83 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v56 -; SI-NEXT: v_mov_b32_e32 v56, v45 -; SI-NEXT: v_mov_b32_e32 v45, v29 -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v62, v58 +; SI-NEXT: v_mov_b32_e32 v58, v51 +; SI-NEXT: v_mov_b32_e32 v51, v47 +; SI-NEXT: v_mov_b32_e32 v47, v44 ; SI-NEXT: v_mov_b32_e32 v44, v41 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_mov_b32_e32 v41, v39 -; SI-NEXT: v_mov_b32_e32 v39, v36 -; SI-NEXT: v_mov_b32_e32 v36, v33 -; SI-NEXT: v_mov_b32_e32 v33, v30 -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v43, v55 -; SI-NEXT: v_mov_b32_e32 v55, v54 -; SI-NEXT: v_mov_b32_e32 v54, v53 -; SI-NEXT: v_mov_b32_e32 v53, v52 -; SI-NEXT: v_mov_b32_e32 v52, v51 -; SI-NEXT: v_mov_b32_e32 v51, v50 -; SI-NEXT: v_mov_b32_e32 v50, v49 -; SI-NEXT: v_mov_b32_e32 v49, v42 -; SI-NEXT: v_mov_b32_e32 v42, v48 -; SI-NEXT: v_mov_b32_e32 v48, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_mov_b32_e32 v34, v28 -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v57 +; SI-NEXT: v_mov_b32_e32 v41, v30 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v63, v59 +; SI-NEXT: v_mov_b32_e32 v59, v56 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_mov_b32_e32 v56, v50 +; SI-NEXT: v_mov_b32_e32 v50, v45 +; SI-NEXT: v_mov_b32_e32 v45, v42 +; SI-NEXT: v_mov_b32_e32 v42, v28 +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v57 ; SI-NEXT: v_mov_b32_e32 v57, v46 -; SI-NEXT: v_mov_b32_e32 v46, v61 -; SI-NEXT: v_mov_b32_e32 v61, v58 -; SI-NEXT: v_mov_b32_e32 v58, v47 -; SI-NEXT: v_mov_b32_e32 v47, v31 +; SI-NEXT: v_mov_b32_e32 v46, v49 +; SI-NEXT: v_mov_b32_e32 v49, v43 +; SI-NEXT: v_mov_b32_e32 v43, v31 +; SI-NEXT: v_mov_b32_e32 v53, v40 +; SI-NEXT: v_mov_b32_e32 v40, v48 +; SI-NEXT: v_mov_b32_e32 v48, v39 +; SI-NEXT: v_mov_b32_e32 v39, v38 +; SI-NEXT: v_mov_b32_e32 v38, v37 +; SI-NEXT: v_mov_b32_e32 v37, v36 +; SI-NEXT: v_mov_b32_e32 v36, v35 +; SI-NEXT: v_mov_b32_e32 v35, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v34, v33 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v32 +; SI-NEXT: v_mov_b32_e32 v32, v60 +; SI-NEXT: v_mov_b32_e32 v60, v29 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v31, v47 -; SI-NEXT: v_mov_b32_e32 v47, v58 -; SI-NEXT: v_mov_b32_e32 v58, v61 -; SI-NEXT: v_mov_b32_e32 v61, v46 +; SI-NEXT: v_mov_b32_e32 v29, v60 +; SI-NEXT: v_mov_b32_e32 v60, v32 +; SI-NEXT: v_mov_b32_e32 v32, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v33, v34 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v35 +; SI-NEXT: v_mov_b32_e32 v35, v36 +; SI-NEXT: v_mov_b32_e32 v36, v37 +; SI-NEXT: v_mov_b32_e32 v37, v38 +; SI-NEXT: v_mov_b32_e32 v38, v39 +; SI-NEXT: v_mov_b32_e32 v39, v48 +; SI-NEXT: v_mov_b32_e32 v48, v40 +; SI-NEXT: v_mov_b32_e32 v40, v53 +; SI-NEXT: v_mov_b32_e32 v31, v43 +; SI-NEXT: v_mov_b32_e32 v43, v49 +; SI-NEXT: v_mov_b32_e32 v49, v46 ; SI-NEXT: v_mov_b32_e32 v46, v57 -; SI-NEXT: v_mov_b32_e32 v57, v60 -; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v34, v37 -; SI-NEXT: v_mov_b32_e32 v37, v48 -; SI-NEXT: v_mov_b32_e32 v48, v42 -; SI-NEXT: v_mov_b32_e32 v42, v49 -; SI-NEXT: v_mov_b32_e32 v49, v50 -; SI-NEXT: v_mov_b32_e32 v50, v51 -; SI-NEXT: v_mov_b32_e32 v51, v52 -; SI-NEXT: v_mov_b32_e32 v52, v53 -; SI-NEXT: v_mov_b32_e32 v53, v54 -; SI-NEXT: v_mov_b32_e32 v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, v43 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v33, v36 -; SI-NEXT: v_mov_b32_e32 v36, v39 -; SI-NEXT: v_mov_b32_e32 v39, v41 -; SI-NEXT: v_mov_b32_e32 v41, v44 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v29, v45 -; SI-NEXT: v_mov_b32_e32 v45, v56 +; SI-NEXT: v_mov_b32_e32 v57, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v28, v42 +; SI-NEXT: v_mov_b32_e32 v42, v45 +; SI-NEXT: v_mov_b32_e32 v45, v50 +; SI-NEXT: v_mov_b32_e32 v50, v56 ; SI-NEXT: v_mov_b32_e32 v56, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v59, v63 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v41 +; SI-NEXT: v_mov_b32_e32 v41, v44 +; SI-NEXT: v_mov_b32_e32 v44, v47 +; SI-NEXT: v_mov_b32_e32 v47, v51 +; SI-NEXT: v_mov_b32_e32 v51, v58 +; SI-NEXT: v_mov_b32_e32 v58, v62 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB51_2 ; ; VI-LABEL: bitcast_v56i16_to_v14f64_scalar: @@ -37772,7 +37904,6 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr63 @@ -37784,7 +37915,6 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr57 ; SI-NEXT: ; implicit-def: $vgpr59 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr62 ; SI-NEXT: ; kill: killed $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr29 @@ -37794,23 +37924,25 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB52_2 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v29 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v23 ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_cvt_f32_f16_e32 v62, v29 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v21 @@ -37819,31 +37951,31 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v57, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v42, v28 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v27 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v43, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v26 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v44, v29 @@ -37875,6 +38007,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v49, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v51, v29 @@ -37882,26 +38015,25 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v54, v29 ; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v55, v29 -; SI-NEXT: v_mov_b32_e32 v29, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v26 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v36, v8 +; SI-NEXT: v_mov_b32_e32 v29, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v47, v25 +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: v_cvt_f32_f16_e32 v38, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v48, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v50, v5 @@ -37936,34 +38068,24 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 ; SI-NEXT: v_add_f64 v[54:55], v[1:2], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v18 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v3, v45 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v54 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v17 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v3, v43 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v3, v42 -; SI-NEXT: v_mov_b32_e32 v42, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v3, v43 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 ; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 ; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 ; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 ; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v54 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v4 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5 @@ -37977,14 +38099,16 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v14 ; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 @@ -38005,13 +38129,14 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v38, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v48, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v50, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v52, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v40, v55 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 @@ -38020,6 +38145,9 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v60, v60 ; SI-NEXT: v_cvt_f32_f16_e32 v59, v59 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v57 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v3, v42 ; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v46, v46 ; SI-NEXT: v_cvt_f32_f16_e32 v56, v56 @@ -38034,29 +38162,29 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 ; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 ; SI-NEXT: v_cvt_f32_f16_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v47, v26 -; SI-NEXT: v_mov_b32_e32 v45, v27 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v55, v1 -; SI-NEXT: v_mov_b32_e32 v43, v28 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v47, v25 +; SI-NEXT: v_mov_b32_e32 v45, v26 +; SI-NEXT: v_mov_b32_e32 v43, v27 +; SI-NEXT: v_mov_b32_e32 v42, v28 +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: .LBB52_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: v_cvt_f16_f32_e32 v1, v55 @@ -38107,14 +38235,16 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v36 ; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v30 ; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 @@ -38123,7 +38253,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v63 ; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 @@ -38132,7 +38262,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 ; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 @@ -38141,7 +38271,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v58 ; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 @@ -38150,7 +38280,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v56 ; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 @@ -38159,7 +38289,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v46 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 @@ -38168,7 +38298,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v44 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 @@ -38178,8 +38308,8 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -38189,8 +38319,8 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -38200,8 +38330,8 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -38211,8 +38341,8 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -38221,7 +38351,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 @@ -38230,7 +38360,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 @@ -38239,7 +38369,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 @@ -38248,19 +38378,10 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v42 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v62 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 @@ -38269,7 +38390,7 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 @@ -38278,20 +38399,27 @@ define <56 x half> @bitcast_v14f64_to_v56f16(<14 x double> %a, i32 %b) { ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v31 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v47 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v43 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -38889,17 +39017,17 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: s_cbranch_scc0 .LBB53_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_lshr_b32 s42, s5, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v29, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v41, s42 ; SI-NEXT: s_lshr_b32 s42, s4, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v25, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v45, s42 ; SI-NEXT: s_lshr_b32 s42, s7, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v21, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v47, s42 ; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v1, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v25, s42 ; SI-NEXT: s_lshr_b32 s42, s9, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v17, s42 ; SI-NEXT: s_lshr_b32 s42, s8, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v2, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v29, s42 ; SI-NEXT: s_lshr_b32 s42, s11, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v10, s42 ; SI-NEXT: s_lshr_b32 s42, s10, 16 @@ -38907,7 +39035,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: s_lshr_b32 s42, s13, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v48, s42 ; SI-NEXT: s_lshr_b32 s42, s12, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v18, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v33, s42 ; SI-NEXT: s_lshr_b32 s42, s15, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v20, s42 ; SI-NEXT: s_lshr_b32 s42, s14, 16 @@ -38944,18 +39072,18 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v46, s42 ; SI-NEXT: s_lshr_b32 s42, s16, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v56, s42 +; SI-NEXT: v_cvt_f32_f16_e32 v21, s5 +; SI-NEXT: v_cvt_f32_f16_e32 v34, s4 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_cvt_f32_f16_e32 v57, s5 -; SI-NEXT: v_cvt_f32_f16_e32 v33, s4 -; SI-NEXT: v_cvt_f32_f16_e32 v34, s7 +; SI-NEXT: v_cvt_f32_f16_e32 v57, s7 +; SI-NEXT: v_cvt_f32_f16_e32 v52, s6 +; SI-NEXT: v_cvt_f32_f16_e32 v40, s9 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_cvt_f32_f16_e32 v58, s6 +; SI-NEXT: v_cvt_f32_f16_e32 v58, s8 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v59, s9 +; SI-NEXT: v_cvt_f32_f16_e32 v59, s11 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v60, s8 -; SI-NEXT: v_cvt_f32_f16_e32 v16, s11 -; SI-NEXT: v_cvt_f32_f16_e32 v7, s10 +; SI-NEXT: v_cvt_f32_f16_e32 v60, s10 ; SI-NEXT: v_cvt_f32_f16_e32 v19, s13 ; SI-NEXT: v_cvt_f32_f16_e32 v8, s12 ; SI-NEXT: v_cvt_f32_f16_e32 v23, s15 @@ -38969,13 +39097,13 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v38, s25 ; SI-NEXT: v_cvt_f32_f16_e32 v15, s24 ; SI-NEXT: v_cvt_f32_f16_e32 v50, s23 -; SI-NEXT: v_cvt_f32_f16_e32 v52, s22 +; SI-NEXT: v_cvt_f32_f16_e32 v16, s22 ; SI-NEXT: v_cvt_f32_f16_e32 v54, s21 -; SI-NEXT: v_cvt_f32_f16_e32 v40, s20 -; SI-NEXT: v_cvt_f32_f16_e32 v41, s19 +; SI-NEXT: v_cvt_f32_f16_e32 v7, s20 +; SI-NEXT: v_cvt_f32_f16_e32 v18, s19 ; SI-NEXT: v_cvt_f32_f16_e32 v43, s18 -; SI-NEXT: v_cvt_f32_f16_e32 v45, s17 -; SI-NEXT: v_cvt_f32_f16_e32 v47, s16 +; SI-NEXT: v_cvt_f32_f16_e32 v2, s17 +; SI-NEXT: v_cvt_f32_f16_e32 v1, s16 ; SI-NEXT: s_cbranch_execnz .LBB53_3 ; SI-NEXT: .LBB53_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[1:2], s[16:17], 1.0 @@ -38995,37 +39123,41 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_f64 v[3:4], s[4:5], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v4 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v5 ; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v41, v43 +; SI-NEXT: v_add_f64 v[18:19], s[12:13], 1.0 +; SI-NEXT: v_add_f64 v[7:8], s[6:7], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v57, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v43 ; SI-NEXT: v_cvt_f32_f16_e32 v43, v42 ; SI-NEXT: v_add_f64 v[49:50], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[37:38], s[24:25], 1.0 ; SI-NEXT: v_add_f64 v[15:16], s[10:11], 1.0 -; SI-NEXT: v_add_f64 v[7:8], s[6:7], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v49 ; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v37 ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v38 ; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v60, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v39 ; SI-NEXT: v_cvt_f32_f16_e32 v39, v51 ; SI-NEXT: v_cvt_f32_f16_e32 v51, v44 ; SI-NEXT: v_add_f64 v[53:54], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[35:36], s[26:27], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v7 ; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v54 ; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v53, v46 +; SI-NEXT: v_add_f64 v[35:36], s[26:27], 1.0 ; SI-NEXT: v_add_f64 v[30:31], s[28:29], 1.0 ; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0 -; SI-NEXT: v_add_f64 v[18:19], s[12:13], 1.0 ; SI-NEXT: v_add_f64 v[11:12], s[8:9], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v50 ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v35 @@ -39035,20 +39167,16 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v27 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v18 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v59, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v60, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v59, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v26 @@ -39058,20 +39186,20 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v36, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 ; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v49 ; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v47, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v48, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v63 @@ -39094,9 +39222,9 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v56, v5 ; SI-NEXT: .LBB53_3: ; %end ; SI-NEXT: v_cvt_f16_f32_e32 v5, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v2 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v5, v6, v5 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v9 @@ -39114,14 +39242,14 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v5, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v18 ; SI-NEXT: v_add_i32_e32 v9, vcc, 12, v0 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v5, v6, v5 ; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v5, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v7 ; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v5, v6, v5 @@ -39135,7 +39263,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v5, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v16 ; SI-NEXT: v_add_i32_e32 v9, vcc, 24, v0 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v5, v6, v5 @@ -39218,7 +39346,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: v_or_b32_e32 v4, v5, v4 ; SI-NEXT: buffer_store_dword v4, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v8 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 @@ -39233,16 +39361,16 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: buffer_store_dword v4, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v4, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v60 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 ; SI-NEXT: buffer_store_dword v4, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v4, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v60 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v58 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x54, v0 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 @@ -39254,35 +39382,35 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v40 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v25 ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v52 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v57 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v34 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v21 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -39306,19 +39434,19 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB53_4: -; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr16 ; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr49 @@ -39343,25 +39471,25 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a ; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: s_branch .LBB53_2 ; ; VI-LABEL: bitcast_v14f64_to_v56f16_scalar: @@ -45958,30 +46086,30 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:100 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:104 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v47, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v56, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v46, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v11 ; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v45, v14 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v15 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v44, v18 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v21 @@ -45992,9 +46120,9 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v21, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v28 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v7, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cvt_f16_f32_e32 v22, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v57 @@ -46008,69 +46136,76 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v63, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v61, v49 ; SI-NEXT: v_cvt_f16_f32_e32 v29, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v59, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v55 ; SI-NEXT: v_cvt_f16_f32_e32 v49, v31 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v34 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v50, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v35, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v37 ; SI-NEXT: s_waitcnt vmcnt(11) ; SI-NEXT: v_cvt_f16_f32_e32 v27, v38 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v55, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v39 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v32, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v48 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v33, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v51 ; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_cvt_f16_f32_e32 v26, v52 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v36, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v54 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_cvt_f16_f32_e32 v34, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v41 ; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v42 ; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_or_saveexec_b64 s[4:5], s[4:5] -; SI-NEXT: v_mov_b32_e32 v48, v7 -; SI-NEXT: v_mov_b32_e32 v51, v9 -; SI-NEXT: v_mov_b32_e32 v52, v11 -; SI-NEXT: v_mov_b32_e32 v54, v13 -; SI-NEXT: v_mov_b32_e32 v41, v12 +; SI-NEXT: v_mov_b32_e32 v51, v57 +; SI-NEXT: v_mov_b32_e32 v52, v7 +; SI-NEXT: v_mov_b32_e32 v54, v9 +; SI-NEXT: v_mov_b32_e32 v55, v11 +; SI-NEXT: v_mov_b32_e32 v41, v13 +; SI-NEXT: v_mov_b32_e32 v48, v5 ; SI-NEXT: s_xor_b64 exec, exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB58_2 ; SI-NEXT: ; %bb.1: ; %cmp.true -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v31, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v47 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v37, v56 +; SI-NEXT: v_mov_b32_e32 v7, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 +; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 +; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v39 +; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v38 ; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v33 +; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v25 ; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v11 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v63, v9 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 @@ -46085,6 +46220,7 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 @@ -46092,7 +46228,6 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 ; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 @@ -46103,108 +46238,100 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v39, v7 -; SI-NEXT: v_or_b32_e32 v7, v37, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v46 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v31 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v39 +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 ; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v7, v38, v46 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v38, v45 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v54 +; SI-NEXT: v_or_b32_e32 v5, v38, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v46 +; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v37 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v37, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 ; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: v_or_b32_e32 v48, v39, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v45 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v38 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 +; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 +; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v41 ; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v31, v7 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_or_b32_e32 v7, v31, v45 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v44 -; SI-NEXT: v_or_b32_e32 v41, v37, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v43 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v9 +; SI-NEXT: v_or_b32_e32 v9, v37, v45 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v37, v44 ; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 ; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 ; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 ; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v7, v55 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v52 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v51 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_or_b32_e32 v9, v38, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v43 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v37, v55 ; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 ; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_or_b32_e32 v54, v39, v43 -; SI-NEXT: v_or_b32_e32 v52, v38, v42 -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v37 +; SI-NEXT: v_or_b32_e32 v41, v39, v43 +; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v54 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v9, v63 +; SI-NEXT: v_or_b32_e32 v55, v37, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v37, v52 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v38 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v48 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_or_b32_e32 v51, v25, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v40 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 +; SI-NEXT: v_or_b32_e32 v54, v25, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v40 ; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v51 +; SI-NEXT: v_or_b32_e32 v52, v37, v40 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_or_b32_e32 v48, v37, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v37, v30 ; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v62 -; SI-NEXT: v_or_b32_e32 v57, v21, v30 +; SI-NEXT: v_or_b32_e32 v51, v21, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v29 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v37 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v61 -; SI-NEXT: v_or_b32_e32 v62, v25, v37 +; SI-NEXT: v_or_b32_e32 v62, v25, v59 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v28 ; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v49 @@ -46217,56 +46344,61 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v61, v29, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v29, v27 ; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v60 ; SI-NEXT: v_or_b32_e32 v49, v21, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v7 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v29 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v29, v26 -; SI-NEXT: v_or_b32_e32 v35, v25, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v34 +; SI-NEXT: v_or_b32_e32 v37, v25, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v32 ; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v21 -; SI-NEXT: v_or_b32_e32 v33, v29, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v34 +; SI-NEXT: v_or_b32_e32 v39, v29, v26 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v25 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v53 -; SI-NEXT: v_or_b32_e32 v36, v21, v25 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v32 -; SI-NEXT: v_or_b32_e32 v55, v7, v21 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f32_f16_e32 v25, v50 -; SI-NEXT: v_alignbit_b32 v26, v36, v26, 16 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v53 +; SI-NEXT: v_or_b32_e32 v34, v21, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v50 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v25 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v50 -; SI-NEXT: v_or_b32_e32 v53, v25, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v58 -; SI-NEXT: v_or_b32_e32 v59, v13, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v9 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v31 +; SI-NEXT: v_or_b32_e32 v53, v7, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v36 +; SI-NEXT: v_or_b32_e32 v50, v25, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; SI-NEXT: v_or_b32_e32 v35, v13, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v63 ; SI-NEXT: v_or_b32_e32 v16, v16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v14 ; SI-NEXT: v_or_b32_e32 v15, v15, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v22 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v20 -; SI-NEXT: v_alignbit_b32 v29, v59, v28, 16 -; SI-NEXT: v_alignbit_b32 v28, v53, v27, 16 +; SI-NEXT: v_alignbit_b32 v29, v35, v28, 16 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_alignbit_b32 v27, v55, v60, 16 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_alignbit_b32 v28, v50, v27, 16 ; SI-NEXT: v_or_b32_e32 v22, v21, v22 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v18 ; SI-NEXT: v_or_b32_e32 v24, v24, v21 @@ -46274,16 +46406,7 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v17 ; SI-NEXT: v_or_b32_e32 v10, v10, v21 -; SI-NEXT: v_alignbit_b32 v44, v10, v43, 16 -; SI-NEXT: v_alignbit_b32 v43, v19, v42, 16 -; SI-NEXT: v_alignbit_b32 v25, v22, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, v15, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v16, v37, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v7 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v12 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v12 ; SI-NEXT: v_or_b32_e32 v1, v1, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v8 ; SI-NEXT: v_or_b32_e32 v5, v5, v21 @@ -46294,14 +46417,22 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_alignbit_b32 v56, v3, v47, 16 ; SI-NEXT: v_alignbit_b32 v47, v6, v46, 16 ; SI-NEXT: v_alignbit_b32 v46, v5, v45, 16 -; SI-NEXT: v_alignbit_b32 v45, v1, v31, 16 -; SI-NEXT: v_alignbit_b32 v21, v24, v38, 16 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v45, v1, v57, 16 +; SI-NEXT: v_alignbit_b32 v44, v10, v43, 16 +; SI-NEXT: v_alignbit_b32 v43, v19, v42, 16 +; SI-NEXT: v_alignbit_b32 v21, v24, v58, 16 +; SI-NEXT: v_alignbit_b32 v25, v22, v40, 16 +; SI-NEXT: v_alignbit_b32 v40, v15, v30, 16 +; SI-NEXT: v_alignbit_b32 v30, v16, v59, 16 +; SI-NEXT: v_alignbit_b32 v27, v53, v60, 16 +; SI-NEXT: v_mov_b32_e32 v60, v37 +; SI-NEXT: v_alignbit_b32 v26, v34, v26, 16 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: .LBB58_2: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v56 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v56 ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: v_or_b32_e32 v3, v3, v4 @@ -46309,15 +46440,13 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v31, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v31, v31, v37 -; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v37, 0xffff, v5 +; SI-NEXT: v_or_b32_e32 v37, v37, v38 +; SI-NEXT: buffer_store_dword v37, v0, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v48 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v47 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: v_or_b32_e32 v3, v3, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen @@ -46335,26 +46464,28 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v5 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: v_or_b32_e32 v2, v2, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v41 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: v_or_b32_e32 v2, v2, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v41 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 @@ -46366,7 +46497,7 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v55 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v0 @@ -46378,7 +46509,7 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0 @@ -46390,7 +46521,7 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v52 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 @@ -46402,7 +46533,7 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 @@ -46432,8 +46563,8 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -46444,32 +46575,32 @@ define <56 x i16> @bitcast_v56f16_to_v56i16(<56 x half> %a, i32 %b) { ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v60 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v55 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen @@ -47071,501 +47202,582 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:40 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v26 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v58, s23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v5 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v12 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v17 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v8, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v47, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v45, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v43, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s29 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v28 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_cvt_f16_f32_e32 v33, v38 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v10, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v49 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v53, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v51 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v53 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v5, v47 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v8, v56 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v30, v57 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v7, v58 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v57, v60 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v29, v61 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v62 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v63 -; SI-NEXT: v_cvt_f16_f32_e32 v56, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v47, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v45, s29 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v55 +; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v33, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v47 -; SI-NEXT: v_add_f32_e32 v45, 0x38000000, v45 -; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v44 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v43 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v44 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v40 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v41 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v54 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 +; SI-NEXT: v_mov_b32_e32 v28, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v45 +; SI-NEXT: v_mov_b32_e32 v51, v23 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v43 +; SI-NEXT: v_mov_b32_e32 v49, v19 +; SI-NEXT: v_mov_b32_e32 v53, v36 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v36, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v41 +; SI-NEXT: v_mov_b32_e32 v38, v15 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v39 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v5, v5, v23 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v52 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v50 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v37 +; SI-NEXT: v_mov_b32_e32 v37, v11 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v35 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v62 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 +; SI-NEXT: v_mov_b32_e32 v33, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v57 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_or_b32_e32 v57, v54, v29 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_or_b32_e32 v4, v4, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_or_b32_e32 v7, v7, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_or_b32_e32 v6, v6, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_or_b32_e32 v13, v13, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_or_b32_e32 v16, v16, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_or_b32_e32 v15, v15, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v52 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_or_b32_e32 v34, v34, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v5, v28, v19 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v31 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_or_b32_e32 v37, v37, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v35 +; SI-NEXT: v_or_b32_e32 v28, v28, v15 +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_or_b32_e32 v36, v36, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_or_b32_e32 v39, v5, v29 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v5, v31, v25 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v38 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_or_b32_e32 v51, v51, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_or_b32_e32 v52, v52, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v19 -; SI-NEXT: v_or_b32_e32 v20, v20, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v21 -; SI-NEXT: v_or_b32_e32 v27, v27, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v56 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v41 -; SI-NEXT: v_or_b32_e32 v25, v25, v54 -; SI-NEXT: v_or_b32_e32 v28, v28, v47 -; SI-NEXT: v_or_b32_e32 v26, v26, v46 -; SI-NEXT: v_or_b32_e32 v23, v23, v45 -; SI-NEXT: v_or_b32_e32 v22, v22, v33 -; SI-NEXT: v_or_b32_e32 v50, v50, v43 -; SI-NEXT: v_or_b32_e32 v48, v48, v42 -; SI-NEXT: v_or_b32_e32 v38, v38, v58 -; SI-NEXT: v_or_b32_e32 v3, v3, v40 -; SI-NEXT: v_or_b32_e32 v18, v18, v55 -; SI-NEXT: v_or_b32_e32 v17, v17, v59 -; SI-NEXT: v_or_b32_e32 v12, v12, v53 -; SI-NEXT: v_or_b32_e32 v10, v10, v30 -; SI-NEXT: v_or_b32_e32 v8, v8, v60 -; SI-NEXT: v_alignbit_b32 v56, v25, v47, 16 -; SI-NEXT: v_alignbit_b32 v47, v27, v46, 16 -; SI-NEXT: v_alignbit_b32 v46, v20, v45, 16 -; SI-NEXT: v_alignbit_b32 v45, v52, v33, 16 -; SI-NEXT: v_alignbit_b32 v44, v51, v43, 16 -; SI-NEXT: v_alignbit_b32 v43, v36, v42, 16 -; SI-NEXT: v_alignbit_b32 v42, v37, v58, 16 -; SI-NEXT: v_alignbit_b32 v41, v34, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, v15, v55, 16 -; SI-NEXT: v_alignbit_b32 v55, v16, v59, 16 -; SI-NEXT: v_alignbit_b32 v54, v13, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, v6, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v7, v60, 16 -; SI-NEXT: v_alignbit_b32 v29, v4, v29, 16 -; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v56 -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_or_b32_e32 v28, v28, v33 -; SI-NEXT: v_or_b32_e32 v24, v25, v24 -; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v47 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v21, v24, v21 -; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v21, v24, s[0:3], 0 offen +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v28, v28, v21 +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v46 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v21, v21, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v28, v37 +; SI-NEXT: v_or_b32_e32 v38, v31, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v56 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v37, v28, v11 +; SI-NEXT: v_or_b32_e32 v62, v31, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v5, v5, v17 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v5, v63 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v5, v5, v9 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v52 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v39 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v5, v46 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 +; SI-NEXT: v_or_b32_e32 v35, v28, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v34 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_or_b32_e32 v34, v1, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v31 +; SI-NEXT: v_or_b32_e32 v56, v28, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v32 +; SI-NEXT: v_or_b32_e32 v4, v4, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v60 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v33 +; SI-NEXT: v_or_b32_e32 v6, v6, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v57 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v60 +; SI-NEXT: v_or_b32_e32 v8, v8, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v57 +; SI-NEXT: v_or_b32_e32 v10, v10, v27 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v63 +; SI-NEXT: v_or_b32_e32 v12, v12, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v36 +; SI-NEXT: v_or_b32_e32 v14, v14, v27 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v44 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v18, v18, v27 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[52:53], v[17:18], 16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v27 +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v49 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 +; SI-NEXT: v_or_b32_e32 v22, v22, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v59 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[54:55], v[21:22], 16 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v51 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v36 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v59 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v36, v30 +; SI-NEXT: v_or_b32_e32 v30, v28, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v58 +; SI-NEXT: v_lshr_b64 v[41:42], v[29:30], 16 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v36 +; SI-NEXT: v_or_b32_e32 v16, v16, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v61 +; SI-NEXT: v_lshr_b64 v[43:44], v[15:16], 16 +; SI-NEXT: v_mov_b32_e32 v44, v34 +; SI-NEXT: v_mov_b32_e32 v42, v33 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v58 +; SI-NEXT: v_or_b32_e32 v20, v20, v27 +; SI-NEXT: v_lshr_b64 v[45:46], v[19:20], 16 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v61 +; SI-NEXT: v_or_b32_e32 v24, v24, v27 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[47:48], v[23:24], 16 +; SI-NEXT: v_mov_b32_e32 v23, v36 +; SI-NEXT: v_mov_b32_e32 v46, v35 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_mov_b32_e32 v7, v63 +; SI-NEXT: v_mov_b32_e32 v34, v56 +; SI-NEXT: v_mov_b32_e32 v56, v62 +; SI-NEXT: v_lshr_b64 v[62:63], v[3:4], 16 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v19, v39 +; SI-NEXT: v_mov_b32_e32 v15, v38 +; SI-NEXT: v_lshr_b64 v[39:40], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[11:12], 16 +; SI-NEXT: v_mov_b32_e32 v11, v37 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[1:2], 16 +; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v47 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v35 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v38 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v45 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v31 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v43 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v41 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v19 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v39 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v41 -; SI-NEXT: v_or_b32_e32 v3, v3, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v3, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 -; SI-NEXT: v_or_b32_e32 v3, v3, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v3, v19, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v40 -; SI-NEXT: v_or_b32_e32 v3, v3, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v3, v18, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v3, v3, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v3, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v55 -; SI-NEXT: v_or_b32_e32 v3, v3, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v3, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v54 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v11 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v53 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v30 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v63 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v57 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v33 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v62 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v29 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 ; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll index 4f46875076809..967f1a9b442b0 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll @@ -3847,361 +3847,396 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v30i32_to_v60i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v18, s30, 0 +; SI-NEXT: v_writelane_b32 v18, s31, 1 +; SI-NEXT: v_writelane_b32 v18, s34, 2 +; SI-NEXT: v_writelane_b32 v18, s35, 3 +; SI-NEXT: v_writelane_b32 v18, s36, 4 +; SI-NEXT: v_writelane_b32 v18, s37, 5 +; SI-NEXT: v_writelane_b32 v18, s38, 6 +; SI-NEXT: v_writelane_b32 v18, s39, 7 +; SI-NEXT: v_writelane_b32 v18, s48, 8 +; SI-NEXT: v_writelane_b32 v18, s49, 9 +; SI-NEXT: v_writelane_b32 v18, s50, 10 +; SI-NEXT: v_writelane_b32 v18, s51, 11 +; SI-NEXT: v_writelane_b32 v18, s52, 12 +; SI-NEXT: v_writelane_b32 v18, s53, 13 +; SI-NEXT: v_writelane_b32 v18, s54, 14 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17 -; SI-NEXT: v_readfirstlane_b32 s45, v1 -; SI-NEXT: v_readfirstlane_b32 s44, v2 -; SI-NEXT: v_readfirstlane_b32 s43, v3 -; SI-NEXT: v_readfirstlane_b32 s42, v4 -; SI-NEXT: v_readfirstlane_b32 s41, v5 -; SI-NEXT: v_readfirstlane_b32 s40, v6 -; SI-NEXT: v_readfirstlane_b32 s15, v7 -; SI-NEXT: v_readfirstlane_b32 s14, v8 -; SI-NEXT: v_readfirstlane_b32 s13, v9 -; SI-NEXT: v_readfirstlane_b32 s12, v10 -; SI-NEXT: v_readfirstlane_b32 s11, v11 -; SI-NEXT: v_readfirstlane_b32 s10, v12 -; SI-NEXT: v_readfirstlane_b32 s9, v13 -; SI-NEXT: v_readfirstlane_b32 s8, v14 -; SI-NEXT: v_readfirstlane_b32 s7, v15 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v16 +; SI-NEXT: v_writelane_b32 v18, s55, 15 +; SI-NEXT: v_readfirstlane_b32 s42, v1 +; SI-NEXT: v_readfirstlane_b32 s43, v2 +; SI-NEXT: v_readfirstlane_b32 s40, v3 +; SI-NEXT: v_readfirstlane_b32 s41, v4 +; SI-NEXT: v_readfirstlane_b32 s14, v5 +; SI-NEXT: v_readfirstlane_b32 s15, v6 +; SI-NEXT: v_readfirstlane_b32 s12, v7 +; SI-NEXT: v_readfirstlane_b32 s13, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v9 +; SI-NEXT: v_readfirstlane_b32 s11, v10 +; SI-NEXT: v_readfirstlane_b32 s8, v11 +; SI-NEXT: v_readfirstlane_b32 s9, v12 +; SI-NEXT: v_readfirstlane_b32 s6, v13 +; SI-NEXT: v_readfirstlane_b32 s7, v14 +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_and_b64 s[44:45], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: v_writelane_b32 v18, s64, 16 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s45, s45, 3 ; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s44, s44, 3 ; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: s_add_i32 s41, s41, 3 ; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: buffer_store_dword v15, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: s_lshl_b32 s45, s30, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s45 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s92, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s22, 0xffff +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s64, v18, 16 +; SI-NEXT: v_readlane_b32 s55, v18, 15 +; SI-NEXT: v_readlane_b32 s54, v18, 14 +; SI-NEXT: v_readlane_b32 s53, v18, 13 +; SI-NEXT: v_readlane_b32 s52, v18, 12 +; SI-NEXT: v_readlane_b32 s51, v18, 11 +; SI-NEXT: v_readlane_b32 s50, v18, 10 +; SI-NEXT: v_readlane_b32 s49, v18, 9 +; SI-NEXT: v_readlane_b32 s48, v18, 8 +; SI-NEXT: v_readlane_b32 s39, v18, 7 +; SI-NEXT: v_readlane_b32 s38, v18, 6 +; SI-NEXT: v_readlane_b32 s37, v18, 5 +; SI-NEXT: v_readlane_b32 s36, v18, 4 +; SI-NEXT: v_readlane_b32 s35, v18, 3 +; SI-NEXT: v_readlane_b32 s34, v18, 2 +; SI-NEXT: v_readlane_b32 s31, v18, 1 +; SI-NEXT: v_readlane_b32 s30, v18, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v30i32_to_v60i16_scalar: @@ -6300,41 +6335,44 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v39, v16 -; SI-NEXT: v_mov_b32_e32 v48, v14 -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: v_mov_b32_e32 v53, v14 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v62, v12 +; SI-NEXT: v_mov_b32_e32 v32, v10 +; SI-NEXT: v_mov_b32_e32 v55, v8 +; SI-NEXT: v_mov_b32_e32 v37, v6 +; SI-NEXT: v_mov_b32_e32 v41, v4 +; SI-NEXT: v_mov_b32_e32 v44, v2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v63, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 ; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v30, v28 -; SI-NEXT: v_mov_b32_e32 v33, v26 -; SI-NEXT: v_mov_b32_e32 v34, v24 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5 +; SI-NEXT: v_mov_b32_e32 v39, v26 +; SI-NEXT: v_mov_b32_e32 v48, v24 +; SI-NEXT: v_mov_b32_e32 v49, v22 +; SI-NEXT: v_mov_b32_e32 v47, v20 +; SI-NEXT: v_mov_b32_e32 v50, v18 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 @@ -6345,8 +6383,8 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill @@ -6358,51 +6396,51 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v14 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v16 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v16 ; SI-NEXT: s_cbranch_scc0 .LBB15_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v32 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v9, v0, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 ; SI-NEXT: v_or_b32_e32 v10, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 ; SI-NEXT: v_or_b32_e32 v11, v0, v22 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v12, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_or_b32_e32 v13, v0, v13 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: v_or_b32_e32 v14, v0, v18 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v15, v0, v15 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: v_or_b32_e32 v16, v0, v17 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: v_or_b32_e32 v17, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: v_or_b32_e32 v18, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 ; SI-NEXT: v_or_b32_e32 v19, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_or_b32_e32 v20, v0, v59 +; SI-NEXT: v_or_b32_e32 v20, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v21, v0, v58 +; SI-NEXT: v_or_b32_e32 v21, v0, v43 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -6415,17 +6453,17 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v26 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v8, v1, v28 ; SI-NEXT: s_or_b32 s10, s10, s11 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -6436,30 +6474,30 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_or_b32_e32 v22, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 -; SI-NEXT: v_or_b32_e32 v23, v0, v56 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v22, v0, v42 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v23, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v24, v0, v47 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 -; SI-NEXT: v_or_b32_e32 v25, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v24, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v25, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v26, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v27, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v26, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_or_b32_e32 v27, v0, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v28, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v29, v0, v63 +; SI-NEXT: v_or_b32_e32 v28, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v29, v0, v33 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: s_cbranch_execnz .LBB15_3 ; SI-NEXT: .LBB15_2: ; %cmp.true ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v63 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -6502,119 +6540,119 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: .LBB15_3: ; %end @@ -6637,35 +6675,67 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB15_4: -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_mov_b32_e32 v46, v44 -; SI-NEXT: v_mov_b32_e32 v32, v30 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v58 -; SI-NEXT: v_mov_b32_e32 v58, v57 -; SI-NEXT: v_mov_b32_e32 v57, v56 -; SI-NEXT: v_mov_b32_e32 v56, v47 -; SI-NEXT: v_mov_b32_e32 v47, v62 +; SI-NEXT: v_mov_b32_e32 v45, v43 +; SI-NEXT: v_mov_b32_e32 v44, v42 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_mov_b32_e32 v42, v40 +; SI-NEXT: v_mov_b32_e32 v41, v38 +; SI-NEXT: v_mov_b32_e32 v40, v37 +; SI-NEXT: v_mov_b32_e32 v38, v36 +; SI-NEXT: v_mov_b32_e32 v37, v35 +; SI-NEXT: v_mov_b32_e32 v36, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v35, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v34, v32 +; SI-NEXT: v_mov_b32_e32 v33, v62 ; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v63 -; SI-NEXT: v_mov_b32_e32 v63, v61 -; SI-NEXT: v_mov_b32_e32 v61, v31 +; SI-NEXT: v_mov_b32_e32 v32, v63 +; SI-NEXT: v_mov_b32_e32 v63, v53 +; SI-NEXT: v_mov_b32_e32 v53, v61 +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v59 +; SI-NEXT: v_mov_b32_e32 v59, v51 +; SI-NEXT: v_mov_b32_e32 v51, v57 +; SI-NEXT: v_mov_b32_e32 v57, v50 +; SI-NEXT: v_mov_b32_e32 v50, v47 +; SI-NEXT: v_mov_b32_e32 v47, v48 +; SI-NEXT: v_mov_b32_e32 v48, v30 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v44, v46 -; SI-NEXT: v_mov_b32_e32 v31, v61 -; SI-NEXT: v_mov_b32_e32 v61, v63 -; SI-NEXT: v_mov_b32_e32 v63, v60 ; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: v_mov_b32_e32 v62, v47 -; SI-NEXT: v_mov_b32_e32 v47, v56 -; SI-NEXT: v_mov_b32_e32 v56, v57 -; SI-NEXT: v_mov_b32_e32 v57, v58 -; SI-NEXT: v_mov_b32_e32 v58, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v32 +; SI-NEXT: v_mov_b32_e32 v30, v48 +; SI-NEXT: v_mov_b32_e32 v48, v47 +; SI-NEXT: v_mov_b32_e32 v47, v50 +; SI-NEXT: v_mov_b32_e32 v50, v57 +; SI-NEXT: v_mov_b32_e32 v57, v51 +; SI-NEXT: v_mov_b32_e32 v51, v59 +; SI-NEXT: v_mov_b32_e32 v59, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: v_mov_b32_e32 v61, v53 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v63, v32 +; SI-NEXT: v_mov_b32_e32 v62, v33 +; SI-NEXT: v_mov_b32_e32 v32, v34 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v35 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v36 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v37, v40 +; SI-NEXT: v_mov_b32_e32 v38, v41 +; SI-NEXT: v_mov_b32_e32 v40, v42 +; SI-NEXT: v_mov_b32_e32 v41, v43 +; SI-NEXT: v_mov_b32_e32 v42, v44 +; SI-NEXT: v_mov_b32_e32 v43, v45 +; SI-NEXT: v_mov_b32_e32 v44, v46 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB15_2 ; ; VI-LABEL: bitcast_v60i16_to_v30i32_scalar: @@ -15867,245 +15937,248 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17 -; SI-NEXT: v_mov_b32_e32 v30, s16 +; SI-NEXT: v_mov_b32_e32 v27, s16 ; SI-NEXT: v_mov_b32_e32 v28, s17 -; SI-NEXT: v_mov_b32_e32 v33, s18 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v32, s19 -; SI-NEXT: v_mov_b32_e32 v29, s20 -; SI-NEXT: v_mov_b32_e32 v27, s21 -; SI-NEXT: v_mov_b32_e32 v25, s22 +; SI-NEXT: v_mov_b32_e32 v29, s18 +; SI-NEXT: v_mov_b32_e32 v30, s19 +; SI-NEXT: v_mov_b32_e32 v25, s20 +; SI-NEXT: v_mov_b32_e32 v26, s21 +; SI-NEXT: v_mov_b32_e32 v23, s22 ; SI-NEXT: v_mov_b32_e32 v24, s23 -; SI-NEXT: v_mov_b32_e32 v23, s24 -; SI-NEXT: v_mov_b32_e32 v21, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v19, s27 -; SI-NEXT: v_mov_b32_e32 v18, s28 -; SI-NEXT: v_mov_b32_e32 v17, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v21, s24 +; SI-NEXT: v_mov_b32_e32 v22, s25 +; SI-NEXT: v_mov_b32_e32 v19, s26 +; SI-NEXT: v_mov_b32_e32 v20, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v17, s28 +; SI-NEXT: v_mov_b32_e32 v18, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v26, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v38, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v48, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v53, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v55, v21, v23, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v44, v27, v29, 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[29:30], 16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v32, v33, 16 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 -; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 -; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 ; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 ; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 ; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 ; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v26, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v38, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v48, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v53, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v55, v21, v23, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v44, v27, v29, 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 +; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 +; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v32, v33, 16 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 -; SI-NEXT: v_or_b32_e32 v30, v30, v56 -; SI-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v60 -; SI-NEXT: v_or_b32_e32 v28, v28, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v46 -; SI-NEXT: v_or_b32_e32 v28, v28, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v54 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v27, v39 +; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v59 -; SI-NEXT: v_or_b32_e32 v28, v28, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v62 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v53 ; SI-NEXT: v_and_b32_e32 v28, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v44 -; SI-NEXT: v_or_b32_e32 v28, v28, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v28, v29, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v28, v27 +; SI-NEXT: v_add_i32_e32 v28, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v58 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v30 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v61 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 +; SI-NEXT: v_add_i32_e32 v28, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v52 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v27, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v57 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v60 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v51 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_add_i32_e32 v25, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v55 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v24, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 36, v0 +; SI-NEXT: v_add_i32_e32 v23, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v53 -; SI-NEXT: v_or_b32_e32 v20, v20, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v58 +; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v49 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 +; SI-NEXT: v_add_i32_e32 v21, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v57 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v51 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v43 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v56 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -16117,7 +16190,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v45 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -16129,7 +16202,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -16141,92 +16214,94 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v30f32_to_v60i16_scalar: @@ -18310,41 +18385,44 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v39, v16 -; SI-NEXT: v_mov_b32_e32 v48, v14 -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: v_mov_b32_e32 v53, v14 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v62, v12 +; SI-NEXT: v_mov_b32_e32 v32, v10 +; SI-NEXT: v_mov_b32_e32 v55, v8 +; SI-NEXT: v_mov_b32_e32 v37, v6 +; SI-NEXT: v_mov_b32_e32 v41, v4 +; SI-NEXT: v_mov_b32_e32 v44, v2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v63, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 ; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v30, v28 -; SI-NEXT: v_mov_b32_e32 v33, v26 -; SI-NEXT: v_mov_b32_e32 v34, v24 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5 +; SI-NEXT: v_mov_b32_e32 v39, v26 +; SI-NEXT: v_mov_b32_e32 v48, v24 +; SI-NEXT: v_mov_b32_e32 v49, v22 +; SI-NEXT: v_mov_b32_e32 v47, v20 +; SI-NEXT: v_mov_b32_e32 v50, v18 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 @@ -18355,8 +18433,8 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill @@ -18368,51 +18446,51 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v14 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v16 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v16 ; SI-NEXT: s_cbranch_scc0 .LBB31_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v32 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v9, v0, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 ; SI-NEXT: v_or_b32_e32 v10, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 ; SI-NEXT: v_or_b32_e32 v11, v0, v22 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v12, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_or_b32_e32 v13, v0, v13 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: v_or_b32_e32 v14, v0, v18 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v15, v0, v15 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: v_or_b32_e32 v16, v0, v17 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: v_or_b32_e32 v17, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: v_or_b32_e32 v18, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 ; SI-NEXT: v_or_b32_e32 v19, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_or_b32_e32 v20, v0, v59 +; SI-NEXT: v_or_b32_e32 v20, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v21, v0, v58 +; SI-NEXT: v_or_b32_e32 v21, v0, v43 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -18425,17 +18503,17 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v26 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v8, v1, v28 ; SI-NEXT: s_or_b32 s10, s10, s11 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -18446,30 +18524,30 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_or_b32_e32 v22, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 -; SI-NEXT: v_or_b32_e32 v23, v0, v56 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v22, v0, v42 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v23, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v24, v0, v47 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 -; SI-NEXT: v_or_b32_e32 v25, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v24, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v25, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v26, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v27, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v26, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_or_b32_e32 v27, v0, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v28, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v29, v0, v63 +; SI-NEXT: v_or_b32_e32 v28, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v29, v0, v33 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: s_cbranch_execnz .LBB31_3 ; SI-NEXT: .LBB31_2: ; %cmp.true ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v63 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -18512,119 +18590,119 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: .LBB31_3: ; %end @@ -18647,35 +18725,67 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB31_4: -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_mov_b32_e32 v46, v44 -; SI-NEXT: v_mov_b32_e32 v32, v30 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v58 -; SI-NEXT: v_mov_b32_e32 v58, v57 -; SI-NEXT: v_mov_b32_e32 v57, v56 -; SI-NEXT: v_mov_b32_e32 v56, v47 -; SI-NEXT: v_mov_b32_e32 v47, v62 +; SI-NEXT: v_mov_b32_e32 v45, v43 +; SI-NEXT: v_mov_b32_e32 v44, v42 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_mov_b32_e32 v42, v40 +; SI-NEXT: v_mov_b32_e32 v41, v38 +; SI-NEXT: v_mov_b32_e32 v40, v37 +; SI-NEXT: v_mov_b32_e32 v38, v36 +; SI-NEXT: v_mov_b32_e32 v37, v35 +; SI-NEXT: v_mov_b32_e32 v36, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v35, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v34, v32 +; SI-NEXT: v_mov_b32_e32 v33, v62 ; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v63 -; SI-NEXT: v_mov_b32_e32 v63, v61 -; SI-NEXT: v_mov_b32_e32 v61, v31 +; SI-NEXT: v_mov_b32_e32 v32, v63 +; SI-NEXT: v_mov_b32_e32 v63, v53 +; SI-NEXT: v_mov_b32_e32 v53, v61 +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v59 +; SI-NEXT: v_mov_b32_e32 v59, v51 +; SI-NEXT: v_mov_b32_e32 v51, v57 +; SI-NEXT: v_mov_b32_e32 v57, v50 +; SI-NEXT: v_mov_b32_e32 v50, v47 +; SI-NEXT: v_mov_b32_e32 v47, v48 +; SI-NEXT: v_mov_b32_e32 v48, v30 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v44, v46 -; SI-NEXT: v_mov_b32_e32 v31, v61 -; SI-NEXT: v_mov_b32_e32 v61, v63 -; SI-NEXT: v_mov_b32_e32 v63, v60 ; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: v_mov_b32_e32 v62, v47 -; SI-NEXT: v_mov_b32_e32 v47, v56 -; SI-NEXT: v_mov_b32_e32 v56, v57 -; SI-NEXT: v_mov_b32_e32 v57, v58 -; SI-NEXT: v_mov_b32_e32 v58, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v32 +; SI-NEXT: v_mov_b32_e32 v30, v48 +; SI-NEXT: v_mov_b32_e32 v48, v47 +; SI-NEXT: v_mov_b32_e32 v47, v50 +; SI-NEXT: v_mov_b32_e32 v50, v57 +; SI-NEXT: v_mov_b32_e32 v57, v51 +; SI-NEXT: v_mov_b32_e32 v51, v59 +; SI-NEXT: v_mov_b32_e32 v59, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: v_mov_b32_e32 v61, v53 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v63, v32 +; SI-NEXT: v_mov_b32_e32 v62, v33 +; SI-NEXT: v_mov_b32_e32 v32, v34 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v35 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v36 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v37, v40 +; SI-NEXT: v_mov_b32_e32 v38, v41 +; SI-NEXT: v_mov_b32_e32 v40, v42 +; SI-NEXT: v_mov_b32_e32 v41, v43 +; SI-NEXT: v_mov_b32_e32 v42, v44 +; SI-NEXT: v_mov_b32_e32 v43, v45 +; SI-NEXT: v_mov_b32_e32 v44, v46 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB31_2 ; ; VI-LABEL: bitcast_v60i16_to_v30f32_scalar: @@ -26969,361 +27079,396 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v15i64_to_v60i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v18, s30, 0 +; SI-NEXT: v_writelane_b32 v18, s31, 1 +; SI-NEXT: v_writelane_b32 v18, s34, 2 +; SI-NEXT: v_writelane_b32 v18, s35, 3 +; SI-NEXT: v_writelane_b32 v18, s36, 4 +; SI-NEXT: v_writelane_b32 v18, s37, 5 +; SI-NEXT: v_writelane_b32 v18, s38, 6 +; SI-NEXT: v_writelane_b32 v18, s39, 7 +; SI-NEXT: v_writelane_b32 v18, s48, 8 +; SI-NEXT: v_writelane_b32 v18, s49, 9 +; SI-NEXT: v_writelane_b32 v18, s50, 10 +; SI-NEXT: v_writelane_b32 v18, s51, 11 +; SI-NEXT: v_writelane_b32 v18, s52, 12 +; SI-NEXT: v_writelane_b32 v18, s53, 13 +; SI-NEXT: v_writelane_b32 v18, s54, 14 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17 -; SI-NEXT: v_readfirstlane_b32 s45, v1 -; SI-NEXT: v_readfirstlane_b32 s44, v2 -; SI-NEXT: v_readfirstlane_b32 s43, v3 -; SI-NEXT: v_readfirstlane_b32 s42, v4 -; SI-NEXT: v_readfirstlane_b32 s41, v5 -; SI-NEXT: v_readfirstlane_b32 s40, v6 -; SI-NEXT: v_readfirstlane_b32 s15, v7 -; SI-NEXT: v_readfirstlane_b32 s14, v8 -; SI-NEXT: v_readfirstlane_b32 s13, v9 -; SI-NEXT: v_readfirstlane_b32 s12, v10 -; SI-NEXT: v_readfirstlane_b32 s11, v11 -; SI-NEXT: v_readfirstlane_b32 s10, v12 -; SI-NEXT: v_readfirstlane_b32 s9, v13 -; SI-NEXT: v_readfirstlane_b32 s8, v14 -; SI-NEXT: v_readfirstlane_b32 s7, v15 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v16 +; SI-NEXT: v_writelane_b32 v18, s55, 15 +; SI-NEXT: v_readfirstlane_b32 s42, v1 +; SI-NEXT: v_readfirstlane_b32 s43, v2 +; SI-NEXT: v_readfirstlane_b32 s40, v3 +; SI-NEXT: v_readfirstlane_b32 s41, v4 +; SI-NEXT: v_readfirstlane_b32 s14, v5 +; SI-NEXT: v_readfirstlane_b32 s15, v6 +; SI-NEXT: v_readfirstlane_b32 s12, v7 +; SI-NEXT: v_readfirstlane_b32 s13, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v9 +; SI-NEXT: v_readfirstlane_b32 s11, v10 +; SI-NEXT: v_readfirstlane_b32 s8, v11 +; SI-NEXT: v_readfirstlane_b32 s9, v12 +; SI-NEXT: v_readfirstlane_b32 s6, v13 +; SI-NEXT: v_readfirstlane_b32 s7, v14 +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_and_b64 s[44:45], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: v_writelane_b32 v18, s64, 16 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 +; SI-NEXT: s_add_u32 s42, s42, 3 +; SI-NEXT: s_addc_u32 s43, s43, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s45, s45, 3 -; SI-NEXT: s_addc_u32 s44, s44, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: buffer_store_dword v15, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: s_lshl_b32 s45, s30, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s45 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s92, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s22, 0xffff +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s64, v18, 16 +; SI-NEXT: v_readlane_b32 s55, v18, 15 +; SI-NEXT: v_readlane_b32 s54, v18, 14 +; SI-NEXT: v_readlane_b32 s53, v18, 13 +; SI-NEXT: v_readlane_b32 s52, v18, 12 +; SI-NEXT: v_readlane_b32 s51, v18, 11 +; SI-NEXT: v_readlane_b32 s50, v18, 10 +; SI-NEXT: v_readlane_b32 s49, v18, 9 +; SI-NEXT: v_readlane_b32 s48, v18, 8 +; SI-NEXT: v_readlane_b32 s39, v18, 7 +; SI-NEXT: v_readlane_b32 s38, v18, 6 +; SI-NEXT: v_readlane_b32 s37, v18, 5 +; SI-NEXT: v_readlane_b32 s36, v18, 4 +; SI-NEXT: v_readlane_b32 s35, v18, 3 +; SI-NEXT: v_readlane_b32 s34, v18, 2 +; SI-NEXT: v_readlane_b32 s31, v18, 1 +; SI-NEXT: v_readlane_b32 s30, v18, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v15i64_to_v60i16_scalar: @@ -29422,41 +29567,44 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v39, v16 -; SI-NEXT: v_mov_b32_e32 v48, v14 -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: v_mov_b32_e32 v53, v14 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v62, v12 +; SI-NEXT: v_mov_b32_e32 v32, v10 +; SI-NEXT: v_mov_b32_e32 v55, v8 +; SI-NEXT: v_mov_b32_e32 v37, v6 +; SI-NEXT: v_mov_b32_e32 v41, v4 +; SI-NEXT: v_mov_b32_e32 v44, v2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v63, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 ; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v30, v28 -; SI-NEXT: v_mov_b32_e32 v33, v26 -; SI-NEXT: v_mov_b32_e32 v34, v24 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5 +; SI-NEXT: v_mov_b32_e32 v39, v26 +; SI-NEXT: v_mov_b32_e32 v48, v24 +; SI-NEXT: v_mov_b32_e32 v49, v22 +; SI-NEXT: v_mov_b32_e32 v47, v20 +; SI-NEXT: v_mov_b32_e32 v50, v18 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 @@ -29467,8 +29615,8 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill @@ -29480,51 +29628,51 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v14 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v16 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v16 ; SI-NEXT: s_cbranch_scc0 .LBB43_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v32 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v9, v0, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 ; SI-NEXT: v_or_b32_e32 v10, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 ; SI-NEXT: v_or_b32_e32 v11, v0, v22 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v12, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_or_b32_e32 v13, v0, v13 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: v_or_b32_e32 v14, v0, v18 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v15, v0, v15 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: v_or_b32_e32 v16, v0, v17 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: v_or_b32_e32 v17, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: v_or_b32_e32 v18, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 ; SI-NEXT: v_or_b32_e32 v19, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_or_b32_e32 v20, v0, v59 +; SI-NEXT: v_or_b32_e32 v20, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v21, v0, v58 +; SI-NEXT: v_or_b32_e32 v21, v0, v43 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -29537,17 +29685,17 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v26 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v8, v1, v28 ; SI-NEXT: s_or_b32 s10, s10, s11 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -29558,30 +29706,30 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_or_b32_e32 v22, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 -; SI-NEXT: v_or_b32_e32 v23, v0, v56 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v22, v0, v42 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v23, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v24, v0, v47 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 -; SI-NEXT: v_or_b32_e32 v25, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v24, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v25, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v26, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v27, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v26, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_or_b32_e32 v27, v0, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v28, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v29, v0, v63 +; SI-NEXT: v_or_b32_e32 v28, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v29, v0, v33 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: s_cbranch_execnz .LBB43_3 ; SI-NEXT: .LBB43_2: ; %cmp.true ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v63 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -29624,119 +29772,119 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: .LBB43_3: ; %end @@ -29759,35 +29907,67 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB43_4: -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_mov_b32_e32 v46, v44 -; SI-NEXT: v_mov_b32_e32 v32, v30 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v58 -; SI-NEXT: v_mov_b32_e32 v58, v57 -; SI-NEXT: v_mov_b32_e32 v57, v56 -; SI-NEXT: v_mov_b32_e32 v56, v47 -; SI-NEXT: v_mov_b32_e32 v47, v62 +; SI-NEXT: v_mov_b32_e32 v45, v43 +; SI-NEXT: v_mov_b32_e32 v44, v42 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_mov_b32_e32 v42, v40 +; SI-NEXT: v_mov_b32_e32 v41, v38 +; SI-NEXT: v_mov_b32_e32 v40, v37 +; SI-NEXT: v_mov_b32_e32 v38, v36 +; SI-NEXT: v_mov_b32_e32 v37, v35 +; SI-NEXT: v_mov_b32_e32 v36, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v35, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v34, v32 +; SI-NEXT: v_mov_b32_e32 v33, v62 ; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v63 -; SI-NEXT: v_mov_b32_e32 v63, v61 -; SI-NEXT: v_mov_b32_e32 v61, v31 +; SI-NEXT: v_mov_b32_e32 v32, v63 +; SI-NEXT: v_mov_b32_e32 v63, v53 +; SI-NEXT: v_mov_b32_e32 v53, v61 +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v59 +; SI-NEXT: v_mov_b32_e32 v59, v51 +; SI-NEXT: v_mov_b32_e32 v51, v57 +; SI-NEXT: v_mov_b32_e32 v57, v50 +; SI-NEXT: v_mov_b32_e32 v50, v47 +; SI-NEXT: v_mov_b32_e32 v47, v48 +; SI-NEXT: v_mov_b32_e32 v48, v30 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v44, v46 -; SI-NEXT: v_mov_b32_e32 v31, v61 -; SI-NEXT: v_mov_b32_e32 v61, v63 -; SI-NEXT: v_mov_b32_e32 v63, v60 ; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: v_mov_b32_e32 v62, v47 -; SI-NEXT: v_mov_b32_e32 v47, v56 -; SI-NEXT: v_mov_b32_e32 v56, v57 -; SI-NEXT: v_mov_b32_e32 v57, v58 -; SI-NEXT: v_mov_b32_e32 v58, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v32 +; SI-NEXT: v_mov_b32_e32 v30, v48 +; SI-NEXT: v_mov_b32_e32 v48, v47 +; SI-NEXT: v_mov_b32_e32 v47, v50 +; SI-NEXT: v_mov_b32_e32 v50, v57 +; SI-NEXT: v_mov_b32_e32 v57, v51 +; SI-NEXT: v_mov_b32_e32 v51, v59 +; SI-NEXT: v_mov_b32_e32 v59, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: v_mov_b32_e32 v61, v53 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v63, v32 +; SI-NEXT: v_mov_b32_e32 v62, v33 +; SI-NEXT: v_mov_b32_e32 v32, v34 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v35 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v36 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v37, v40 +; SI-NEXT: v_mov_b32_e32 v38, v41 +; SI-NEXT: v_mov_b32_e32 v40, v42 +; SI-NEXT: v_mov_b32_e32 v41, v43 +; SI-NEXT: v_mov_b32_e32 v42, v44 +; SI-NEXT: v_mov_b32_e32 v43, v45 +; SI-NEXT: v_mov_b32_e32 v44, v46 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB43_2 ; ; VI-LABEL: bitcast_v60i16_to_v15i64_scalar: @@ -37157,203 +37337,207 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v17, s28 ; SI-NEXT: v_mov_b32_e32 v18, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v31, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v37, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v39, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v50, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v55, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v43, v26, v25, 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v30, v29, 16 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v28 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[39:40], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 ; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_alignbit_b32 v31, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v37, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v39, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v50, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v55, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v43, v26, v25, 16 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[1:2], 16 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[39:40], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v30, v29, 16 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v28 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v39 ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v56 +; SI-NEXT: v_or_b32_e32 v27, v27, v39 ; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v60 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v63 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v46 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v54 +; SI-NEXT: v_and_b32_e32 v28, 0xffff, v29 +; SI-NEXT: v_or_b32_e32 v27, v28, v27 ; SI-NEXT: v_add_i32_e32 v28, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v62 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v53 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v27 ; SI-NEXT: v_add_i32_e32 v27, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v58 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v61 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v52 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v57 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v60 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v59 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v58 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v57 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v56 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37365,7 +37549,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37377,7 +37561,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37389,7 +37573,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v45 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37401,7 +37585,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37413,7 +37597,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37425,7 +37609,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37437,56 +37621,59 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v15f64_to_v60i16_scalar: @@ -39540,41 +39727,44 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v39, v16 -; SI-NEXT: v_mov_b32_e32 v48, v14 -; SI-NEXT: v_mov_b32_e32 v49, v12 -; SI-NEXT: v_mov_b32_e32 v50, v10 -; SI-NEXT: v_mov_b32_e32 v51, v8 -; SI-NEXT: v_mov_b32_e32 v52, v6 -; SI-NEXT: v_mov_b32_e32 v53, v4 -; SI-NEXT: v_mov_b32_e32 v54, v2 -; SI-NEXT: v_mov_b32_e32 v55, v0 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: v_mov_b32_e32 v53, v14 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v62, v12 +; SI-NEXT: v_mov_b32_e32 v32, v10 +; SI-NEXT: v_mov_b32_e32 v55, v8 +; SI-NEXT: v_mov_b32_e32 v37, v6 +; SI-NEXT: v_mov_b32_e32 v41, v4 +; SI-NEXT: v_mov_b32_e32 v44, v2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v63, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:60 ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:12 ; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:28 ; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 ; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v30, v28 -; SI-NEXT: v_mov_b32_e32 v33, v26 -; SI-NEXT: v_mov_b32_e32 v34, v24 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v36, v20 -; SI-NEXT: v_mov_b32_e32 v37, v18 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5 +; SI-NEXT: v_mov_b32_e32 v39, v26 +; SI-NEXT: v_mov_b32_e32 v48, v24 +; SI-NEXT: v_mov_b32_e32 v49, v22 +; SI-NEXT: v_mov_b32_e32 v47, v20 +; SI-NEXT: v_mov_b32_e32 v50, v18 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v7 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 @@ -39585,8 +39775,8 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 ; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v29 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill @@ -39598,51 +39788,51 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v14 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v16 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v16 ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 -; SI-NEXT: v_or_b32_e32 v7, v0, v32 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v9, v0, v26 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v63 +; SI-NEXT: v_or_b32_e32 v7, v0, v31 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 +; SI-NEXT: v_or_b32_e32 v9, v0, v28 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 ; SI-NEXT: v_or_b32_e32 v10, v0, v24 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v55 ; SI-NEXT: v_or_b32_e32 v11, v0, v22 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32 ; SI-NEXT: v_or_b32_e32 v12, v0, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v62 ; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_or_b32_e32 v13, v0, v13 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v53 ; SI-NEXT: v_or_b32_e32 v14, v0, v18 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60 ; SI-NEXT: v_or_b32_e32 v15, v0, v15 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50 ; SI-NEXT: v_or_b32_e32 v16, v0, v17 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47 ; SI-NEXT: v_or_b32_e32 v17, v0, v5 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49 ; SI-NEXT: v_or_b32_e32 v18, v0, v3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48 ; SI-NEXT: v_or_b32_e32 v19, v0, v46 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_or_b32_e32 v20, v0, v59 +; SI-NEXT: v_or_b32_e32 v20, v0, v45 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30 -; SI-NEXT: v_or_b32_e32 v21, v0, v58 +; SI-NEXT: v_or_b32_e32 v21, v0, v43 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 @@ -39655,17 +39845,17 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_and_b32 s7, s22, 0xffff ; SI-NEXT: s_lshl_b32 s8, s23, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 ; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_and_b32 s8, s24, 0xffff ; SI-NEXT: s_lshl_b32 s9, s25, 16 +; SI-NEXT: v_or_b32_e32 v8, v1, v26 ; SI-NEXT: s_or_b32 s8, s8, s9 ; SI-NEXT: s_and_b32 s9, s26, 0xffff ; SI-NEXT: s_lshl_b32 s10, s27, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 ; SI-NEXT: s_or_b32 s9, s9, s10 ; SI-NEXT: s_and_b32 s10, s28, 0xffff ; SI-NEXT: s_lshl_b32 s11, s29, 16 -; SI-NEXT: v_or_b32_e32 v8, v1, v28 ; SI-NEXT: s_or_b32 s10, s10, s11 ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: v_mov_b32_e32 v2, s6 @@ -39676,30 +39866,30 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_or_b32_e32 v22, v0, v57 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v45 -; SI-NEXT: v_or_b32_e32 v23, v0, v56 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v44 +; SI-NEXT: v_or_b32_e32 v22, v0, v42 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v23, v0, v40 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_or_b32_e32 v24, v0, v47 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v43 -; SI-NEXT: v_or_b32_e32 v25, v0, v62 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v24, v0, v38 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v25, v0, v36 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v26, v0, v61 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v27, v0, v60 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v26, v0, v35 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51 +; SI-NEXT: v_or_b32_e32 v27, v0, v34 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_or_b32_e32 v28, v0, v31 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38 -; SI-NEXT: v_or_b32_e32 v29, v0, v63 +; SI-NEXT: v_or_b32_e32 v28, v0, v54 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56 +; SI-NEXT: v_or_b32_e32 v29, v0, v33 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v63 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff @@ -39742,119 +39932,119 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: v_mov_b32_e32 v6, s10 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v44 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v62 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_or_b32_e32 v0, v1, v0 ; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v34 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: v_or_b32_e32 v0, v46, v0 ; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v59, v0 +; SI-NEXT: v_or_b32_e32 v0, v45, v0 ; SI-NEXT: v_add_i32_e32 v20, vcc, 0x30000, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v58, v0 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 ; SI-NEXT: v_add_i32_e32 v21, vcc, 0x30000, v0 ; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v57, v0 +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v56, v0 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v44 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v47, v0 +; SI-NEXT: v_or_b32_e32 v0, v38, v0 ; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v62, v0 +; SI-NEXT: v_or_b32_e32 v0, v36, v0 ; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v42 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v61, v0 +; SI-NEXT: v_or_b32_e32 v0, v35, v0 ; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v60, v0 +; SI-NEXT: v_or_b32_e32 v0, v34, v0 ; SI-NEXT: v_add_i32_e32 v27, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v31, v0 +; SI-NEXT: v_or_b32_e32 v0, v54, v0 ; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v0, v63, v0 +; SI-NEXT: v_or_b32_e32 v0, v33, v0 ; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: .LBB51_3: ; %end @@ -39877,35 +40067,67 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_mov_b32_e32 v46, v44 -; SI-NEXT: v_mov_b32_e32 v32, v30 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v58 -; SI-NEXT: v_mov_b32_e32 v58, v57 -; SI-NEXT: v_mov_b32_e32 v57, v56 -; SI-NEXT: v_mov_b32_e32 v56, v47 -; SI-NEXT: v_mov_b32_e32 v47, v62 +; SI-NEXT: v_mov_b32_e32 v45, v43 +; SI-NEXT: v_mov_b32_e32 v44, v42 +; SI-NEXT: v_mov_b32_e32 v43, v41 +; SI-NEXT: v_mov_b32_e32 v42, v40 +; SI-NEXT: v_mov_b32_e32 v41, v38 +; SI-NEXT: v_mov_b32_e32 v40, v37 +; SI-NEXT: v_mov_b32_e32 v38, v36 +; SI-NEXT: v_mov_b32_e32 v37, v35 +; SI-NEXT: v_mov_b32_e32 v36, v55 +; SI-NEXT: v_mov_b32_e32 v55, v34 +; SI-NEXT: v_mov_b32_e32 v35, v54 +; SI-NEXT: v_mov_b32_e32 v54, v33 +; SI-NEXT: v_mov_b32_e32 v34, v32 +; SI-NEXT: v_mov_b32_e32 v33, v62 ; SI-NEXT: v_mov_b32_e32 v62, v60 -; SI-NEXT: v_mov_b32_e32 v60, v63 -; SI-NEXT: v_mov_b32_e32 v63, v61 -; SI-NEXT: v_mov_b32_e32 v61, v31 +; SI-NEXT: v_mov_b32_e32 v32, v63 +; SI-NEXT: v_mov_b32_e32 v63, v53 +; SI-NEXT: v_mov_b32_e32 v53, v61 +; SI-NEXT: v_mov_b32_e32 v61, v52 +; SI-NEXT: v_mov_b32_e32 v52, v59 +; SI-NEXT: v_mov_b32_e32 v59, v51 +; SI-NEXT: v_mov_b32_e32 v51, v57 +; SI-NEXT: v_mov_b32_e32 v57, v50 +; SI-NEXT: v_mov_b32_e32 v50, v47 +; SI-NEXT: v_mov_b32_e32 v47, v48 +; SI-NEXT: v_mov_b32_e32 v48, v30 ; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 -; SI-NEXT: v_mov_b32_e32 v44, v46 -; SI-NEXT: v_mov_b32_e32 v31, v61 -; SI-NEXT: v_mov_b32_e32 v61, v63 -; SI-NEXT: v_mov_b32_e32 v63, v60 ; SI-NEXT: v_mov_b32_e32 v60, v62 -; SI-NEXT: v_mov_b32_e32 v62, v47 -; SI-NEXT: v_mov_b32_e32 v47, v56 -; SI-NEXT: v_mov_b32_e32 v56, v57 -; SI-NEXT: v_mov_b32_e32 v57, v58 -; SI-NEXT: v_mov_b32_e32 v58, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v32 +; SI-NEXT: v_mov_b32_e32 v30, v48 +; SI-NEXT: v_mov_b32_e32 v48, v47 +; SI-NEXT: v_mov_b32_e32 v47, v50 +; SI-NEXT: v_mov_b32_e32 v50, v57 +; SI-NEXT: v_mov_b32_e32 v57, v51 +; SI-NEXT: v_mov_b32_e32 v51, v59 +; SI-NEXT: v_mov_b32_e32 v59, v52 +; SI-NEXT: v_mov_b32_e32 v52, v61 +; SI-NEXT: v_mov_b32_e32 v61, v53 +; SI-NEXT: v_mov_b32_e32 v53, v63 +; SI-NEXT: v_mov_b32_e32 v63, v32 +; SI-NEXT: v_mov_b32_e32 v62, v33 +; SI-NEXT: v_mov_b32_e32 v32, v34 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v54, v35 +; SI-NEXT: v_mov_b32_e32 v34, v55 +; SI-NEXT: v_mov_b32_e32 v55, v36 +; SI-NEXT: v_mov_b32_e32 v35, v37 +; SI-NEXT: v_mov_b32_e32 v36, v38 +; SI-NEXT: v_mov_b32_e32 v37, v40 +; SI-NEXT: v_mov_b32_e32 v38, v41 +; SI-NEXT: v_mov_b32_e32 v40, v42 +; SI-NEXT: v_mov_b32_e32 v41, v43 +; SI-NEXT: v_mov_b32_e32 v42, v44 +; SI-NEXT: v_mov_b32_e32 v43, v45 +; SI-NEXT: v_mov_b32_e32 v44, v46 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_branch .LBB51_2 ; ; VI-LABEL: bitcast_v60i16_to_v15f64_scalar: @@ -40737,6 +40959,10 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; kill: killed $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; kill: killed $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; kill: killed $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill @@ -40777,16 +41003,14 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr59 ; SI-NEXT: ; implicit-def: $vgpr61 ; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; kill: killed $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; kill: killed $vgpr58 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 ; SI-NEXT: ; implicit-def: $vgpr31 @@ -40824,55 +41048,53 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; kill: killed $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; kill: killed $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc ; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] ; SI-NEXT: s_cbranch_execz .LBB52_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v3 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v31 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v55, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v32 ; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v33 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v52, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v41, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v31 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v37 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v34 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 @@ -40899,30 +41121,30 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v55, v2 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v50, v4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v35 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v4, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v51 -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v4, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v38 +; SI-NEXT: v_mov_b32_e32 v38, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v39 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill @@ -40931,33 +41153,32 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v31, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v57 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v60 -; SI-NEXT: v_mov_b32_e32 v60, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v62 -; SI-NEXT: v_mov_b32_e32 v62, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v59 -; SI-NEXT: v_mov_b32_e32 v59, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v61 -; SI-NEXT: v_mov_b32_e32 v61, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v63 -; SI-NEXT: v_mov_b32_e32 v63, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v38 -; SI-NEXT: v_mov_b32_e32 v38, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v34, v51 +; SI-NEXT: v_cvt_f32_f16_e32 v36, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v32, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v33, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v35, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v37, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v39, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v49, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v51, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v53, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v54, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v42, v58 +; SI-NEXT: v_mov_b32_e32 v58, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v60 +; SI-NEXT: v_mov_b32_e32 v60, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v44, v62 +; SI-NEXT: v_mov_b32_e32 v62, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v46, v61 +; SI-NEXT: v_mov_b32_e32 v61, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v47, v63 +; SI-NEXT: v_mov_b32_e32 v63, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v56, v48 -; SI-NEXT: v_mov_b32_e32 v48, v29 +; SI-NEXT: v_mov_b32_e32 v48, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v3 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr3 @@ -40978,32 +41199,24 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; SI-NEXT: s_cbranch_execz .LBB52_4 ; SI-NEXT: ; %bb.3: ; %cmp.true -; SI-NEXT: v_add_f64 v[32:33], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 ; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v29 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 ; SI-NEXT: v_add_f64 v[49:50], v[3:4], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v5 ; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v6, v36 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v6, v34 +; SI-NEXT: v_add_f64 v[32:33], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 ; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 @@ -41011,8 +41224,9 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 ; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 ; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 ; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v49 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v50 ; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v7 @@ -41032,15 +41246,16 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v22 ; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v30 ; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 @@ -41067,15 +41282,16 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v52, v49 ; SI-NEXT: v_cvt_f32_f16_e32 v55, v33 ; SI-NEXT: v_cvt_f32_f16_e32 v41, v32 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v6, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v62, v62 ; SI-NEXT: v_cvt_f32_f16_e32 v60, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v58, v58 ; SI-NEXT: v_cvt_f32_f16_e32 v63, v63 ; SI-NEXT: v_cvt_f32_f16_e32 v61, v61 -; SI-NEXT: v_cvt_f32_f16_e32 v59, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v59 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v57 ; SI-NEXT: v_cvt_f32_f16_e32 v36, v56 ; SI-NEXT: v_cvt_f32_f16_e32 v32, v47 @@ -41095,22 +41311,22 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f32_f16_e32 v46, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v47, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v56, v5 -; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_cvt_f32_f16_e32 v57, v1 -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill @@ -41119,7 +41335,7 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: .LBB52_4: ; %end ; SI-NEXT: s_or_b64 exec, exec, s[4:5] ; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 @@ -41238,7 +41454,7 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v37 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 @@ -41247,7 +41463,7 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v35 ; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 @@ -41256,7 +41472,7 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 ; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 @@ -41292,16 +41508,16 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v31 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 @@ -41310,7 +41526,7 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v63 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 @@ -41319,47 +41535,47 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v1, v58 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v60 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v62 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v62 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -41370,19 +41586,19 @@ define <60 x half> @bitcast_v15f64_to_v60f16(<15 x double> %a, i32 %b) { ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v2, v48 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v2, v38 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v48 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen @@ -42027,29 +42243,28 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: s_cbranch_scc0 .LBB53_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_lshr_b32 s44, s5, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v59, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v40, s44 ; SI-NEXT: s_lshr_b32 s44, s4, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v36, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v52, s44 ; SI-NEXT: s_lshr_b32 s44, s7, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v13, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v48, s44 ; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v61, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v36, s44 ; SI-NEXT: s_lshr_b32 s44, s9, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v33, s44 ; SI-NEXT: s_lshr_b32 s44, s8, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v48, s44 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_cvt_f32_f16_e32 v59, s44 ; SI-NEXT: s_lshr_b32 s44, s11, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v25, s44 ; SI-NEXT: s_lshr_b32 s44, s10, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s44 ; SI-NEXT: s_lshr_b32 s44, s13, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v5, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v18, s44 ; SI-NEXT: s_lshr_b32 s44, s12, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v17, s44 ; SI-NEXT: s_lshr_b32 s44, s15, 16 -; SI-NEXT: v_cvt_f32_f16_e32 v43, s44 +; SI-NEXT: v_cvt_f32_f16_e32 v14, s44 ; SI-NEXT: s_lshr_b32 s44, s14, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v22, s44 ; SI-NEXT: s_lshr_b32 s44, s41, 16 @@ -42087,15 +42302,17 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: s_lshr_b32 s44, s17, 16 ; SI-NEXT: v_cvt_f32_f16_e32 v58, s44 ; SI-NEXT: s_lshr_b32 s44, s16, 16 +; SI-NEXT: s_waitcnt expcnt(3) ; SI-NEXT: v_cvt_f32_f16_e32 v60, s44 -; SI-NEXT: v_cvt_f32_f16_e32 v14, s5 -; SI-NEXT: v_cvt_f32_f16_e32 v18, s4 +; SI-NEXT: v_cvt_f32_f16_e32 v10, s5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, s4 ; SI-NEXT: v_cvt_f32_f16_e32 v19, s7 ; SI-NEXT: v_cvt_f32_f16_e32 v29, s6 -; SI-NEXT: v_cvt_f32_f16_e32 v52, s9 -; SI-NEXT: v_cvt_f32_f16_e32 v40, s8 -; SI-NEXT: v_cvt_f32_f16_e32 v45, s11 -; SI-NEXT: v_cvt_f32_f16_e32 v47, s10 +; SI-NEXT: v_cvt_f32_f16_e32 v43, s9 +; SI-NEXT: v_cvt_f32_f16_e32 v45, s8 +; SI-NEXT: v_cvt_f32_f16_e32 v47, s11 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v61, s10 ; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_cvt_f32_f16_e32 v62, s13 ; SI-NEXT: s_waitcnt expcnt(0) @@ -42146,9 +42363,9 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v53 ; SI-NEXT: v_add_f64 v[20:21], s[12:13], 1.0 ; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v62, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v63, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v58 @@ -42156,17 +42373,17 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v57, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: v_add_f64 v[37:38], s[26:27], 1.0 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v61, v15 ; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v38 ; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v37, v55 ; SI-NEXT: v_add_f64 v[7:8], s[6:7], 1.0 ; SI-NEXT: v_add_f64 v[34:35], s[28:29], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v41 ; SI-NEXT: v_add_f64 v[30:31], s[42:43], 1.0 @@ -42174,7 +42391,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v35 ; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v30 ; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v43, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v30 ; SI-NEXT: v_cvt_f32_f16_e32 v30, v32 ; SI-NEXT: v_cvt_f32_f16_e32 v32, v39 @@ -42183,37 +42400,35 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v50 ; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v47, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v49 ; SI-NEXT: v_cvt_f32_f16_e32 v49, v46 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_f64 v[3:4], s[4:5], 1.0 ; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v34 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v3 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v34, v51 ; SI-NEXT: v_cvt_f32_f16_e32 v51, v56 ; SI-NEXT: v_add_f64 v[26:27], s[40:41], 1.0 ; SI-NEXT: v_add_f64 v[22:23], s[14:15], 1.0 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v5 ; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v54 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v26 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v29, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v45, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v22 ; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 ; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 @@ -42221,244 +42436,244 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 ; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v61, v61 ; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v59, v59 ; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v59 -; SI-NEXT: v_mov_b32_e32 v59, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v13 -; SI-NEXT: v_mov_b32_e32 v13, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v9 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v53, v60 ; SI-NEXT: v_cvt_f32_f16_e32 v58, v58 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v55, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v55, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v41, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v44, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v44, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v46, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v46, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v56, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v56, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v60, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v60, v5 ; SI-NEXT: .LBB53_3: ; %end -; SI-NEXT: v_cvt_f16_f32_e32 v6, v60 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v60 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v57 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v58 -; SI-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 4, v0 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: buffer_store_dword v9, v6, s[0:3], 0 offen -; SI-NEXT: v_cvt_f16_f32_e32 v6, v56 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v58 +; SI-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v9, v21 -; SI-NEXT: v_add_i32_e32 v10, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 4, v0 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 ; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v56 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v20 -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v6, v21 +; SI-NEXT: v_add_i32_e32 v9, vcc, 8, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v8 -; SI-NEXT: v_add_i32_e32 v10, vcc, 16, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v20 +; SI-NEXT: v_add_i32_e32 v9, vcc, 12, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v42 -; SI-NEXT: v_add_i32_e32 v10, vcc, 20, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v8 +; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v7 -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v42 +; SI-NEXT: v_add_i32_e32 v9, vcc, 20, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v54 -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v7 +; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v16 -; SI-NEXT: v_add_i32_e32 v10, vcc, 32, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v54 +; SI-NEXT: v_add_i32_e32 v7, vcc, 28, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v50 -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v16 +; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v15 -; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v50 +; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v38 -; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: buffer_store_dword v6, v10, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v5, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v15 +; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v34 -; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v38 +; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v34 +; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v35 -; SI-NEXT: v_add_i32_e32 v9, vcc, 52, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v35 +; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v12 -; SI-NEXT: v_add_i32_e32 v9, vcc, 56, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v12 +; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v31 -; SI-NEXT: v_add_i32_e32 v9, vcc, 60, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v31 +; SI-NEXT: v_add_i32_e32 v6, vcc, 60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v11 -; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v11 +; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v27 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x44, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v27 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x44, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v22 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v23 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x4c, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v63 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v62 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v61 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v47 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: buffer_store_dword v1, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v1, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v43 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v61 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v29 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v48 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v19 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v13 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v1, v2, v1 @@ -42521,27 +42736,27 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a ; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr63 ; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr61 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr47 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: s_branch .LBB53_2 ; ; VI-LABEL: bitcast_v15f64_to_v60f16_scalar: @@ -50964,566 +51179,651 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:24 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:56 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:40 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v24 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v22 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v42, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v29 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v58, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v28, s26 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v45 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v38 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v50 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v49, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v32, s28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v53 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v44 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v10, v46 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v47 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v23, v57 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v58 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v7, v59 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v60 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v19, v61 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v62 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v63 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v36, v39 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v62, v50 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v47, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v60, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v45, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v59, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v43, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v58, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v57, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v33, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v44, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: s_cbranch_scc0 .LBB59_2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s25 +; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: s_branch .LBB59_3 -; SI-NEXT: .LBB59_2: -; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: .LBB59_3: ; %Flow -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: v_mov_b32_e32 v61, v14 -; SI-NEXT: v_mov_b32_e32 v63, v15 -; SI-NEXT: v_mov_b32_e32 v15, v18 -; SI-NEXT: v_mov_b32_e32 v18, v22 -; SI-NEXT: v_mov_b32_e32 v22, v33 -; SI-NEXT: v_mov_b32_e32 v33, v11 -; SI-NEXT: v_mov_b32_e32 v11, v8 -; SI-NEXT: v_mov_b32_e32 v8, v5 -; SI-NEXT: v_mov_b32_e32 v5, v42 -; SI-NEXT: v_mov_b32_e32 v42, v1 -; SI-NEXT: s_cbranch_vccnz .LBB59_5 -; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: s_cbranch_execnz .LBB59_3 +; SI-NEXT: .LBB59_2: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v58 +; SI-NEXT: v_mov_b32_e32 v43, v34 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v35, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v33 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v33, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v28 +; SI-NEXT: v_mov_b32_e32 v41, v32 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v49 +; SI-NEXT: v_mov_b32_e32 v53, v27 +; SI-NEXT: v_or_b32_e32 v28, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v38, v55 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v42 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v40 +; SI-NEXT: v_mov_b32_e32 v40, v54 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 +; SI-NEXT: v_lshr_b64 v[46:47], v[27:28], 16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v52 +; SI-NEXT: v_mov_b32_e32 v52, v15 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v30, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_or_b32_e32 v62, v1, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v3, v3, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v26, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v22, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v51, v11 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v12 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v63 +; SI-NEXT: v_or_b32_e32 v12, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v61 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v57 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; SI-NEXT: v_or_b32_e32 v57, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_lshr_b64 v[58:59], v[34:35], 16 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v8 +; SI-NEXT: v_mov_b32_e32 v8, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v31 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v4 +; SI-NEXT: v_mov_b32_e32 v59, v48 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v60 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v43 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshr_b64 v[47:48], v[17:18], 16 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshr_b64 v[44:45], v[29:30], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v39 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v62 +; SI-NEXT: v_or_b32_e32 v62, v24, v32 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_or_b32_e32 v6, v6, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_or_b32_e32 v9, v9, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v42, v24, v27 +; SI-NEXT: v_mov_b32_e32 v48, v62 +; SI-NEXT: v_or_b32_e32 v50, v20, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v41 +; SI-NEXT: v_mov_b32_e32 v34, v42 +; SI-NEXT: v_lshr_b64 v[42:43], v[25:26], 16 +; SI-NEXT: v_mov_b32_e32 v62, v50 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v41, v20, v29 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_or_b32_e32 v12, v12, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_or_b32_e32 v16, v16, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v20, v20, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v32 -; SI-NEXT: v_or_b32_e32 v31, v31, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v35 -; SI-NEXT: v_or_b32_e32 v34, v34, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v38 -; SI-NEXT: v_or_b32_e32 v37, v37, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_or_b32_e32 v48, v39, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v52 -; SI-NEXT: v_or_b32_e32 v51, v39, v50 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v27 -; SI-NEXT: v_or_b32_e32 v26, v26, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v30 -; SI-NEXT: v_or_b32_e32 v29, v29, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v56 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v53 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_or_b32_e32 v44, v39, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v47 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v56 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v24, v8 +; SI-NEXT: v_or_b32_e32 v8, v38, v25 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v20, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v8 +; SI-NEXT: v_or_b32_e32 v8, v24, v21 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v8, v20, v17 +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v24, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v36 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v8, v38, v15 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v38, v53 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: v_or_b32_e32 v54, v24, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v52, v38, v9 +; SI-NEXT: v_mov_b32_e32 v27, v52 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_or_b32_e32 v36, v20, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_lshr_b64 v[52:53], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v51, v24, v56 +; SI-NEXT: v_mov_b32_e32 v15, v51 +; SI-NEXT: v_lshr_b64 v[50:51], v[13:14], 16 +; SI-NEXT: v_or_b32_e32 v24, v20, v5 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v39, v8 +; SI-NEXT: v_or_b32_e32 v8, v38, v3 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v56 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v47, v50, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v58 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v46, 0x38000000, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v57 -; SI-NEXT: v_or_b32_e32 v46, v50, v53 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v58 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v8, v39, v1 +; SI-NEXT: v_lshr_b64 v[38:39], v[32:33], 16 +; SI-NEXT: v_mov_b32_e32 v32, v41 +; SI-NEXT: v_lshr_b64 v[40:41], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[20:21], v[11:12], 16 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[56:57], 16 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v11, v24 +; SI-NEXT: v_lshr_b64 v[8:9], v[9:10], 16 +; SI-NEXT: v_mov_b32_e32 v39, v31 +; SI-NEXT: v_mov_b32_e32 v31, v60 +; SI-NEXT: v_mov_b32_e32 v60, v61 +; SI-NEXT: v_mov_b32_e32 v61, v63 +; SI-NEXT: v_mov_b32_e32 v63, v37 +; SI-NEXT: v_mov_b32_e32 v37, v55 +; SI-NEXT: v_lshr_b64 v[55:56], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[3:4], 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[1:2], 16 +; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v58 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v38 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v46 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v44 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v42 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v40 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v58, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_or_b32_e32 v14, v58, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v63 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v13 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v63, v58, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v61 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v36, v1 -; SI-NEXT: v_add_f32_e32 v57, 0x38000000, v57 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v57, v57 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v57 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v42 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v41 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v54 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v43, v43, v50 -; SI-NEXT: v_or_b32_e32 v28, v28, v57 -; SI-NEXT: v_or_b32_e32 v5, v5, v42 -; SI-NEXT: v_or_b32_e32 v8, v8, v41 -; SI-NEXT: v_or_b32_e32 v11, v11, v40 -; SI-NEXT: v_or_b32_e32 v33, v33, v55 -; SI-NEXT: v_or_b32_e32 v22, v22, v54 -; SI-NEXT: v_or_b32_e32 v18, v18, v25 -; SI-NEXT: v_or_b32_e32 v15, v15, v24 -; SI-NEXT: v_or_b32_e32 v61, v58, v19 -; SI-NEXT: v_or_b32_e32 v1, v36, v14 -; SI-NEXT: v_alignbit_b32 v60, v44, v39, 16 -; SI-NEXT: v_alignbit_b32 v59, v29, v53, 16 -; SI-NEXT: v_alignbit_b32 v58, v26, v50, 16 -; SI-NEXT: v_alignbit_b32 v57, v51, v57, 16 -; SI-NEXT: v_alignbit_b32 v56, v48, v56, 16 -; SI-NEXT: v_alignbit_b32 v42, v37, v42, 16 -; SI-NEXT: v_alignbit_b32 v41, v34, v41, 16 -; SI-NEXT: v_alignbit_b32 v40, v31, v40, 16 -; SI-NEXT: v_alignbit_b32 v55, v20, v55, 16 -; SI-NEXT: v_alignbit_b32 v54, v16, v54, 16 -; SI-NEXT: v_alignbit_b32 v25, v12, v25, 16 -; SI-NEXT: v_alignbit_b32 v24, v9, v24, 16 -; SI-NEXT: v_alignbit_b32 v23, v6, v23, 16 -; SI-NEXT: v_alignbit_b32 v19, v3, v19, 16 -; SI-NEXT: v_alignbit_b32 v36, v62, v14, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: .LBB59_5: ; %end -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v60 -; SI-NEXT: v_or_b32_e32 v39, v39, v50 -; SI-NEXT: buffer_store_dword v39, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v45 -; SI-NEXT: v_or_b32_e32 v39, v39, v50 -; SI-NEXT: v_add_i32_e32 v50, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v39, v50, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v59 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_or_b32_e32 v39, v39, v50 -; SI-NEXT: v_add_i32_e32 v50, vcc, 8, v0 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v39, v50, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v58 -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v57 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v52 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v56 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v14 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v42 -; SI-NEXT: v_or_b32_e32 v5, v5, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v5, v26, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v38 -; SI-NEXT: v_or_b32_e32 v5, v5, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v5, v26, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v40 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v32 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v55 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v21 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v54 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v17 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v25 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v13 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v24 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v10 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v63 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v23 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v61 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v19 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v36 -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v62 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x70, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload @@ -51543,6 +51843,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: .LBB59_4: +; SI-NEXT: s_branch .LBB59_2 ; ; VI-LABEL: bitcast_v60f16_to_v60i16_scalar: ; VI: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll index 4aded5da3668a..685e2fbdecfad 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll @@ -632,51 +632,53 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s4, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s19, s17, 24 +; SI-NEXT: s_lshr_b32 s22, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s4, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b32 s19, s17, 24 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s22, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s23 +; SI-NEXT: v_mov_b32_e32 v6, s22 +; SI-NEXT: v_mov_b32_e32 v7, s19 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v10, s10 +; SI-NEXT: v_mov_b32_e32 v11, s6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr19 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v3i32_to_v12i8_scalar: @@ -3133,31 +3135,29 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB17_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB17_3 ; SI-NEXT: .LBB17_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 ; SI-NEXT: .LBB17_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s6 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s10 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB17_4: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB17_2 ; ; VI-LABEL: bitcast_v3i32_to_v6i16_scalar: @@ -3762,50 +3762,59 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB21_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s4, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s19, s17, 24 +; SI-NEXT: s_lshr_b32 s22, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB21_4 ; SI-NEXT: .LBB21_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, s4, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v17, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[11:12], v[8:9], 24 +; SI-NEXT: v_lshr_b64 v[3:4], v[16:17], 24 +; SI-NEXT: v_lshr_b64 v[14:15], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[16:17], 8 +; SI-NEXT: v_lshr_b64 v[12:13], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[8:9], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v17 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v17 +; SI-NEXT: s_branch .LBB21_5 ; SI-NEXT: .LBB21_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr19 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB21_2 ; SI-NEXT: .LBB21_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: v_mov_b32_e32 v17, s17 ; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s23 +; SI-NEXT: v_mov_b32_e32 v6, s22 +; SI-NEXT: v_mov_b32_e32 v7, s19 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v12, s12 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v14, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB21_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v16 +; SI-NEXT: v_mov_b32_e32 v2, v14 +; SI-NEXT: v_mov_b32_e32 v4, v17 +; SI-NEXT: v_mov_b32_e32 v10, v12 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v3f32_to_v12i8_scalar: @@ -6282,30 +6291,33 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB33_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB33_4 ; SI-NEXT: .LBB33_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v5, s4, v4, 16 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v8, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[1:2], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[4:5], 16 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v8 +; SI-NEXT: s_branch .LBB33_5 ; SI-NEXT: .LBB33_3: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB33_2 ; SI-NEXT: .LBB33_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_mov_b32_e32 v7, s16 +; SI-NEXT: v_mov_b32_e32 v8, s17 ; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v5, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: .LBB33_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v7 +; SI-NEXT: v_mov_b32_e32 v2, v8 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v3f32_to_v6i16_scalar: @@ -7981,62 +7993,64 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s22, 0 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v14, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v15, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v12, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v13, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v17, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v18, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v15, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v14, 1.0, s20 ; SI-NEXT: s_cbranch_scc0 .LBB39_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v12 -; SI-NEXT: v_alignbit_b32 v0, v0, v17, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v15, 16 -; SI-NEXT: v_alignbit_b32 v8, v10, v13, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v12 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v15 +; SI-NEXT: v_alignbit_b32 v12, v1, v18, 16 +; SI-NEXT: v_alignbit_b32 v13, v6, v16, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_alignbit_b32 v8, v10, v14, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v15 +; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v0 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: s_cbranch_execnz .LBB39_3 ; SI-NEXT: .LBB39_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v17 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v14 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v12 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; SI-NEXT: v_alignbit_b32 v13, v6, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 ; SI-NEXT: v_alignbit_b32 v8, v10, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v0 ; SI-NEXT: .LBB39_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v12 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v13 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB39_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 @@ -9524,69 +9538,71 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i ; SI-LABEL: bitcast_v6f16_to_v12i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s20 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB43_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v16 +; SI-NEXT: v_or_b32_e32 v12, v15, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v10 -; SI-NEXT: v_or_b32_e32 v0, v14, v0 -; SI-NEXT: v_or_b32_e32 v4, v13, v1 -; SI-NEXT: v_or_b32_e32 v8, v12, v7 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_or_b32_e32 v13, v14, v1 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_or_b32_e32 v8, v0, v2 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v11, v10, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB43_3 ; SI-NEXT: .LBB43_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v13 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v14 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v2 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v12, v2, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10 -; SI-NEXT: v_or_b32_e32 v8, v1, v2 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 +; SI-NEXT: v_or_b32_e32 v13, v2, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v10 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 +; SI-NEXT: v_or_b32_e32 v8, v0, v1 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v11, v10, 8, 8 ; SI-NEXT: .LBB43_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v12 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v13 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB43_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr8 @@ -10274,37 +10290,37 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB45_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 ; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_or_b32 s4, s6, s4 -; SI-NEXT: s_and_b32 s6, s16, 0xff -; SI-NEXT: s_lshl_b32 s8, s17, 8 -; SI-NEXT: s_or_b32 s6, s6, s8 -; SI-NEXT: s_and_b32 s6, s6, 0xffff -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_or_b32 s6, s6, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s8, s25, 8 -; SI-NEXT: s_or_b32 s4, s4, s8 -; SI-NEXT: s_and_b32 s8, s26, 0xff -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_lshl_b32 s9, s27, 24 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s6, s25, 8 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s26, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s8, s7, s6 +; SI-NEXT: s_or_b32 s10, s5, s8 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s9, s7, s6 +; SI-NEXT: s_or_b32 s13, s5, s9 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s10, s9, s8 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_or_b32 s8, s4, s10 -; SI-NEXT: s_lshr_b32 s9, s5, 16 -; SI-NEXT: s_lshr_b32 s10, s10, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[12:13], 16 +; SI-NEXT: s_or_b32 s4, s4, s12 +; SI-NEXT: s_lshr_b32 s7, s9, 16 +; SI-NEXT: s_lshr_b32 s11, s8, 16 +; SI-NEXT: s_mov_b32 s5, s13 ; SI-NEXT: s_cbranch_execnz .LBB45_3 ; SI-NEXT: .LBB45_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -10318,52 +10334,51 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s7, s22, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s23, 24 ; SI-NEXT: s_lshl_b32 s7, s7, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s7 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s24, 0xff +; SI-NEXT: s_lshl_b32 s7, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_and_b32 s8, s26, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s27, 24 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s27, 24 ; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s7, 16 -; SI-NEXT: s_lshr_b32 s10, s8, 16 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_add_i32 s10, s6, 0x3000000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s7, s5, 16 +; SI-NEXT: s_lshr_b32 s11, s10, 16 ; SI-NEXT: .LBB45_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v2, s7 -; SI-NEXT: v_mov_b32_e32 v3, s9 -; SI-NEXT: v_mov_b32_e32 v4, s8 -; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v4, s10 +; SI-NEXT: v_mov_b32_e32 v5, s11 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB45_4: +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB45_2 ; ; VI-LABEL: bitcast_v12i8_to_v6i16_scalar: @@ -10954,74 +10969,74 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s9, s7, 8 -; SI-NEXT: s_lshr_b32 s12, s8, 8 -; SI-NEXT: s_and_b32 s10, s19, 0xffff -; SI-NEXT: s_and_b32 s13, s21, 0xffff +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_and_b32 s9, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s21, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_or_b32 s14, s9, s11 +; SI-NEXT: s_lshr_b32 s7, s5, 8 +; SI-NEXT: s_lshr_b32 s15, s14, 8 +; SI-NEXT: s_and_b32 s9, s19, 0xffff +; SI-NEXT: s_and_b32 s22, s21, 0xffff ; SI-NEXT: s_bfe_u32 s11, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s14, s21, 0x80008 +; SI-NEXT: s_bfe_u32 s23, s21, 0x80008 ; SI-NEXT: s_cbranch_execnz .LBB47_3 ; SI-NEXT: .LBB47_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s11, s7, 24 -; SI-NEXT: s_lshr_b32 s10, s7, 16 -; SI-NEXT: s_lshr_b32 s9, s7, 8 -; SI-NEXT: s_lshr_b32 s14, s8, 24 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s8, 8 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s20, 0xffff +; SI-NEXT: s_lshl_b32 s7, s21, 16 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_add_i32 s14, s6, 0x30000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s11, s5, 24 +; SI-NEXT: s_lshr_b32 s9, s5, 16 +; SI-NEXT: s_lshr_b32 s7, s5, 8 +; SI-NEXT: s_lshr_b32 s23, s14, 24 +; SI-NEXT: s_lshr_b32 s22, s14, 16 +; SI-NEXT: s_lshr_b32 s15, s14, 8 ; SI-NEXT: .LBB47_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v4, s7 -; SI-NEXT: v_mov_b32_e32 v5, s9 -; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s5 +; SI-NEXT: v_mov_b32_e32 v5, s7 +; SI-NEXT: v_mov_b32_e32 v6, s9 ; SI-NEXT: v_mov_b32_e32 v7, s11 -; SI-NEXT: v_mov_b32_e32 v8, s8 -; SI-NEXT: v_mov_b32_e32 v9, s12 -; SI-NEXT: v_mov_b32_e32 v10, s13 -; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v8, s14 +; SI-NEXT: v_mov_b32_e32 v9, s15 +; SI-NEXT: v_mov_b32_e32 v10, s22 +; SI-NEXT: v_mov_b32_e32 v11, s23 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB47_4: +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr23 ; SI-NEXT: s_branch .LBB47_2 ; ; VI-LABEL: bitcast_v6i16_to_v12i8_scalar: @@ -12541,44 +12556,45 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3 ; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v10, 1.0, s17 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v6, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v9, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v8, 1.0, s21 ; SI-NEXT: s_cbranch_scc0 .LBB53_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v8 ; SI-NEXT: s_cbranch_execnz .LBB53_3 ; SI-NEXT: .LBB53_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v11 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v8 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v6 -; SI-NEXT: v_alignbit_b32 v4, v5, v2, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: v_lshr_b64 v[6:7], v[1:2], 16 +; SI-NEXT: v_alignbit_b32 v4, v5, v9, 16 ; SI-NEXT: .LBB53_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB53_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 @@ -13210,7 +13226,7 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 @@ -13220,32 +13236,33 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v2, v2, v6 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v5 +; SI-NEXT: v_lshr_b64 v[6:7], v[1:2], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_or_b32_e32 v4, v4, v8 ; SI-NEXT: .LBB57_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: ; SI-NEXT: s_branch .LBB57_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll index b2dcd77274989..e27164c2d6d69 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll @@ -4610,50 +4610,48 @@ define amdgpu_kernel void @udiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 +; GFX6-NEXT: s_mov_b32 s0, s8 +; GFX6-NEXT: s_and_b32 s8, s6, 0x7fff +; GFX6-NEXT: s_and_b32 s6, s4, 0x7fff +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 ; GFX6-NEXT: s_and_b32 s6, s10, 0x7fff -; GFX6-NEXT: s_and_b32 s7, s4, 0x7fff -; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s7 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s6 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[4:5], 30 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 ; GFX6-NEXT: s_bfe_u32 s4, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s6 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s4 -; GFX6-NEXT: s_bfe_u32 s7, s10, 0xf000f -; GFX6-NEXT: v_alignbit_b32 v2, s5, v2, 30 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_cvt_f32_u32_e32 v6, s7 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v7, v5 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX6-NEXT: v_cvt_f32_u32_e32 v2, v2 -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v1 -; GFX6-NEXT: v_mul_f32_e32 v1, v6, v7 -; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mad_f32 v4, -v1, v5, v6 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v2 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v5 -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GFX6-NEXT: v_mul_f32_e32 v1, v0, v6 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_cvt_u32_f32_e32 v5, v1 -; GFX6-NEXT: v_mad_f32 v0, -v1, v2, v0 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v2 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v3 -; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v5, vcc -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s4 +; GFX6-NEXT: s_mov_b32 s1, s9 +; GFX6-NEXT: s_bfe_u32 s9, s10, 0xf000f +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_cvt_f32_u32_e32 v4, s9 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v5, v3 +; GFX6-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GFX6-NEXT: v_mul_f32_e32 v0, v4, v5 +; GFX6-NEXT: v_trunc_f32_e32 v0, v0 +; GFX6-NEXT: v_mad_f32 v4, -v0, v3, v4 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s8 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v1 +; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v3 +; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v0, vcc +; GFX6-NEXT: v_mul_f32_e32 v0, v5, v6 +; GFX6-NEXT: v_trunc_f32_e32 v0, v0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v0 +; GFX6-NEXT: v_mad_f32 v0, -v0, v1, v5 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 +; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc ; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 ; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: s_mov_b32 s0, s8 -; GFX6-NEXT: s_mov_b32 s1, s9 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -4667,46 +4665,44 @@ define amdgpu_kernel void @udiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_and_b32 s3, s6, 0x7fff +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s3 +; GFX9-NEXT: s_and_b32 s5, s2, 0x7fff +; GFX9-NEXT: s_bfe_u32 s8, s2, 0xf000f +; GFX9-NEXT: s_lshr_b64 s[2:3], s[6:7], 30 ; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s5 -; GFX9-NEXT: s_and_b32 s4, s2, 0x7fff -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v0 ; GFX9-NEXT: s_bfe_u32 s3, s6, 0xf000f -; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s4 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s3 -; GFX9-NEXT: s_bfe_u32 s2, s2, 0xf000f -; GFX9-NEXT: v_mov_b32_e32 v3, s6 -; GFX9-NEXT: v_alignbit_b32 v3, s7, v3, 30 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_cvt_f32_u32_e32 v7, s2 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v6 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v1, v4 -; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5 -; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v3 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v1 -; GFX9-NEXT: v_mul_f32_e32 v1, v7, v8 -; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX9-NEXT: v_trunc_f32_e32 v1, v1 -; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v5, vcc -; GFX9-NEXT: v_mad_f32 v5, -v1, v6, v7 -; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v3 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, v6 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v1, vcc -; GFX9-NEXT: v_mul_f32_e32 v1, v0, v7 -; GFX9-NEXT: v_trunc_f32_e32 v1, v1 -; GFX9-NEXT: v_cvt_u32_f32_e32 v6, v1 -; GFX9-NEXT: v_mad_f32 v0, -v1, v3, v0 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v3 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v4 -; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v6, vcc -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 +; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s3 +; GFX9-NEXT: v_cvt_f32_u32_e32 v5, s8 +; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX9-NEXT: s_and_b32 s2, s2, 0x7fff +; GFX9-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s2 +; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 +; GFX9-NEXT: v_mul_f32_e32 v0, v5, v6 +; GFX9-NEXT: s_and_b32 s4, s4, 0x7fff +; GFX9-NEXT: v_trunc_f32_e32 v0, v0 +; GFX9-NEXT: v_mad_f32 v5, -v0, v4, v5 +; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s4 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v1 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v0, vcc +; GFX9-NEXT: v_mul_f32_e32 v0, v6, v7 +; GFX9-NEXT: v_trunc_f32_e32 v0, v0 +; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v0 +; GFX9-NEXT: v_mad_f32 v0, -v0, v1, v6 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v5, vcc ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 @@ -4797,58 +4793,56 @@ define amdgpu_kernel void @urem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 ; GFX6-NEXT: s_mov_b32 s0, s8 -; GFX6-NEXT: s_and_b32 s8, s4, 0x7fff -; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s8 -; GFX6-NEXT: s_and_b32 s7, s10, 0x7fff -; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s7 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GFX6-NEXT: v_alignbit_b32 v2, s5, v2, 30 -; GFX6-NEXT: s_bfe_u32 s5, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s5 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v1 -; GFX6-NEXT: s_bfe_u32 s8, s10, 0xf000f -; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s8 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v1, v1, s4 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v5 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s10, v1 -; GFX6-NEXT: v_mul_f32_e32 v1, v3, v4 -; GFX6-NEXT: v_cvt_f32_u32_e32 v4, v2 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 -; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX6-NEXT: v_cvt_f32_u32_e32 v7, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v8, v4 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_mad_f32 v3, -v1, v5, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v5 -; GFX6-NEXT: v_mul_f32_e32 v3, v7, v8 -; GFX6-NEXT: v_trunc_f32_e32 v3, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v5, v3 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX6-NEXT: v_mad_f32 v3, -v3, v4, v7 +; GFX6-NEXT: s_and_b32 s8, s6, 0x7fff +; GFX6-NEXT: s_and_b32 s6, s4, 0x7fff +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX6-NEXT: s_and_b32 s6, s10, 0x7fff +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s6 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[4:5], 30 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX6-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX6-NEXT: s_bfe_u32 s6, s4, 0xf000f +; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s6 +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX6-NEXT: s_bfe_u32 s11, s10, 0xf000f +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s11 +; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v3 +; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s8 ; GFX6-NEXT: s_lshr_b32 s4, s4, 15 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v4 -; GFX6-NEXT: v_mul_lo_u32 v1, v1, s4 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v3, v2 -; GFX6-NEXT: s_lshr_b32 s6, s10, 15 -; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s6, v1 -; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 -; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v6 -; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s10, v0 +; GFX6-NEXT: v_mul_f32_e32 v0, v1, v2 +; GFX6-NEXT: v_cvt_f32_u32_e32 v2, s5 +; GFX6-NEXT: v_trunc_f32_e32 v0, v0 +; GFX6-NEXT: v_mad_f32 v1, -v0, v3, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v2 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v3 ; GFX6-NEXT: s_mov_b32 s1, s9 +; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v0, vcc +; GFX6-NEXT: v_mul_f32_e32 v1, v5, v6 +; GFX6-NEXT: v_trunc_f32_e32 v1, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v3, v1 +; GFX6-NEXT: v_mad_f32 v1, -v1, v2, v5 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v2 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4 +; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; GFX6-NEXT: v_mul_lo_u32 v1, v1, s5 +; GFX6-NEXT: s_lshr_b32 s9, s10, 15 +; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s9, v0 +; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s8, v1 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 15, v2 +; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -4862,54 +4856,52 @@ define amdgpu_kernel void @urem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_and_b32 s3, s6, 0x7fff +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s5 +; GFX9-NEXT: s_and_b32 s3, s2, 0x7fff ; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX9-NEXT: s_and_b32 s4, s2, 0x7fff -; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s4 -; GFX9-NEXT: s_bfe_u32 s4, s6, 0xf000f -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s4 -; GFX9-NEXT: v_mov_b32_e32 v3, s6 -; GFX9-NEXT: v_alignbit_b32 v3, s7, v3, 30 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v1, v4 -; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5 -; GFX9-NEXT: s_bfe_u32 s5, s2, 0xf000f -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v7, s5 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v6 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v5, vcc -; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v3 -; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX9-NEXT: v_mul_f32_e32 v4, v7, v8 -; GFX9-NEXT: v_cvt_f32_u32_e32 v8, v0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v9, v5 +; GFX9-NEXT: s_and_b32 s8, s4, 0x7fff +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v0 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[6:7], 30 +; GFX9-NEXT: s_bfe_u32 s5, s6, 0xf000f +; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s5 +; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 +; GFX9-NEXT: s_bfe_u32 s9, s2, 0xf000f +; GFX9-NEXT: s_and_b32 s3, s4, 0x7fff +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v5, s9 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v3, vcc +; GFX9-NEXT: v_cvt_f32_u32_e32 v3, s3 +; GFX9-NEXT: v_mul_f32_e32 v1, v5, v6 +; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s8 +; GFX9-NEXT: v_trunc_f32_e32 v1, v1 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v3 +; GFX9-NEXT: v_mad_f32 v5, -v1, v4, v5 +; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, v4 +; GFX9-NEXT: v_mul_f32_e32 v4, v6, v7 ; GFX9-NEXT: v_trunc_f32_e32 v4, v4 -; GFX9-NEXT: v_mad_f32 v7, -v4, v6, v7 -; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, v6 -; GFX9-NEXT: v_mul_f32_e32 v6, v8, v9 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_cvt_u32_f32_e32 v7, v6 -; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v4, vcc -; GFX9-NEXT: v_mad_f32 v6, -v6, v5, v8 -; GFX9-NEXT: s_lshr_b32 s3, s6, 15 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v6|, v5 -; GFX9-NEXT: v_mul_lo_u32 v4, v4, s3 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc -; GFX9-NEXT: v_mul_lo_u32 v1, v1, s6 -; GFX9-NEXT: v_mul_lo_u32 v3, v5, v3 +; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: v_mad_f32 v4, -v4, v3, v6 +; GFX9-NEXT: s_lshr_b32 s4, s6, 15 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v3 +; GFX9-NEXT: v_mul_lo_u32 v0, v0, s6 +; GFX9-NEXT: v_mul_lo_u32 v1, v1, s4 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v5, vcc +; GFX9-NEXT: v_mul_lo_u32 v3, v3, s3 ; GFX9-NEXT: s_lshr_b32 s3, s2, 15 -; GFX9-NEXT: v_sub_u32_e32 v4, s3, v4 -; GFX9-NEXT: v_sub_u32_e32 v5, s2, v1 -; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3 -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 +; GFX9-NEXT: v_sub_u32_e32 v4, s2, v0 +; GFX9-NEXT: v_sub_u32_e32 v5, s3, v1 +; GFX9-NEXT: v_sub_u32_e32 v0, s8, v3 +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 @@ -5006,64 +4998,63 @@ define amdgpu_kernel void @sdiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: s_bfe_i32 s6, s4, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, s6 -; GFX6-NEXT: v_mov_b32_e32 v1, s4 -; GFX6-NEXT: v_alignbit_b32 v1, s5, v1, 30 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 +; GFX6-NEXT: s_bfe_i32 s7, s4, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s7 +; GFX6-NEXT: s_mov_b32 s0, s8 +; GFX6-NEXT: s_mov_b32 s1, s9 +; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], 30 ; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v3, s5 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v2 -; GFX6-NEXT: s_xor_b32 s5, s5, s6 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX6-NEXT: s_xor_b32 s5, s5, s7 ; GFX6-NEXT: s_ashr_i32 s5, s5, 30 ; GFX6-NEXT: s_or_b32 s5, s5, 1 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v2, v3 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[6:7], |v3|, |v2| -; GFX6-NEXT: s_and_b64 s[6:7], s[6:7], exec -; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[12:13], |v1|, |v0| +; GFX6-NEXT: s_and_b64 s[12:13], s[12:13], exec +; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 ; GFX6-NEXT: s_cselect_b32 s5, s5, 0 ; GFX6-NEXT: s_bfe_i32 s4, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, s4 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, s5, v4 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s4 +; GFX6-NEXT: v_add_i32_e32 v2, vcc, s5, v2 ; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v4, s5 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v5, v2 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v0 ; GFX6-NEXT: s_xor_b32 s4, s5, s4 ; GFX6-NEXT: s_ashr_i32 s4, s4, 30 -; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 15 -; GFX6-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX6-NEXT: v_trunc_f32_e32 v5, v5 -; GFX6-NEXT: v_mad_f32 v4, -v5, v2, v4 +; GFX6-NEXT: s_or_b32 s7, s4, 1 +; GFX6-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX6-NEXT: v_trunc_f32_e32 v3, v3 +; GFX6-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v1|, |v0| +; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX6-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX6-NEXT: s_cselect_b32 s4, s7, 0 +; GFX6-NEXT: s_bfe_i32 s5, s8, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX6-NEXT: v_add_i32_e32 v3, vcc, s4, v3 +; GFX6-NEXT: s_bfe_i32 s4, s6, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s4 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v0 +; GFX6-NEXT: s_xor_b32 s4, s4, s5 +; GFX6-NEXT: s_ashr_i32 s4, s4, 30 ; GFX6-NEXT: s_or_b32 s6, s4, 1 -; GFX6-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v4|, |v2| -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, v1 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 +; GFX6-NEXT: v_mul_f32_e32 v4, v1, v4 +; GFX6-NEXT: v_trunc_f32_e32 v4, v4 +; GFX6-NEXT: v_mad_f32 v1, -v4, v0, v1 +; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v1|, |v0| ; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec ; GFX6-NEXT: s_cselect_b32 s4, s6, 0 -; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 15 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, s4, v5 -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v2 -; GFX6-NEXT: v_xor_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GFX6-NEXT: v_or_b32_e32 v0, 1, v0 -; GFX6-NEXT: v_mul_f32_e32 v1, v5, v6 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_mad_f32 v5, -v1, v2, v5 -; GFX6-NEXT: v_cvt_i32_f32_e32 v1, v1 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, |v2| -; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v3 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v4 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 ; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: s_mov_b32 s0, s8 -; GFX6-NEXT: s_mov_b32 s1, s9 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -5077,60 +5068,59 @@ define amdgpu_kernel void @sdiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: s_bfe_i32 s4, s6, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s4 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_bfe_i32 s3, s2, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v4, s3 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v3 -; GFX9-NEXT: s_xor_b32 s3, s3, s4 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_bfe_i32 s3, s6, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s3 +; GFX9-NEXT: s_bfe_i32 s5, s2, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX9-NEXT: s_xor_b32 s3, s5, s3 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v0 ; GFX9-NEXT: s_ashr_i32 s3, s3, 30 +; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], 30 ; GFX9-NEXT: s_or_b32 s3, s3, 1 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v3, v4 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v4|, |v3| -; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[10:11], |v1|, |v0| +; GFX9-NEXT: s_and_b64 s[10:11], s[10:11], exec ; GFX9-NEXT: s_cselect_b32 s3, s3, 0 -; GFX9-NEXT: s_bfe_i32 s4, s6, 0xf000f -; GFX9-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s4 +; GFX9-NEXT: s_bfe_i32 s5, s6, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s5 ; GFX9-NEXT: s_bfe_i32 s2, s2, 0xf000f -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: v_add_u32_e32 v4, s3, v5 -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s2 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v3 -; GFX9-NEXT: v_alignbit_b32 v1, s7, v1, 30 -; GFX9-NEXT: s_xor_b32 s2, s2, s4 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GFX9-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v0 +; GFX9-NEXT: s_xor_b32 s2, s2, s5 +; GFX9-NEXT: s_ashr_i32 s2, s2, 30 +; GFX9-NEXT: v_add_u32_e32 v3, s3, v3 +; GFX9-NEXT: v_mul_f32_e32 v4, v1, v4 +; GFX9-NEXT: v_trunc_f32_e32 v4, v4 +; GFX9-NEXT: v_mad_f32 v1, -v4, v0, v1 +; GFX9-NEXT: s_or_b32 s5, s2, 1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, |v0| +; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], exec +; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX9-NEXT: s_cselect_b32 s2, s5, 0 +; GFX9-NEXT: s_bfe_i32 s3, s8, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s3 +; GFX9-NEXT: v_add_u32_e32 v4, s2, v4 +; GFX9-NEXT: s_bfe_i32 s2, s4, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v0 +; GFX9-NEXT: s_xor_b32 s2, s2, s3 ; GFX9-NEXT: s_ashr_i32 s2, s2, 30 -; GFX9-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_mad_f32 v5, -v6, v3, v5 -; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 15 ; GFX9-NEXT: s_or_b32 s4, s2, 1 -; GFX9-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v5|, |v3| -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v1 +; GFX9-NEXT: v_mul_f32_e32 v5, v1, v5 +; GFX9-NEXT: v_trunc_f32_e32 v5, v5 +; GFX9-NEXT: v_mad_f32 v1, -v5, v0, v1 +; GFX9-NEXT: v_cvt_i32_f32_e32 v5, v5 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, |v0| ; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], exec ; GFX9-NEXT: s_cselect_b32 s2, s4, 0 -; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 15 -; GFX9-NEXT: v_add_u32_e32 v5, s2, v6 -; GFX9-NEXT: v_cvt_f32_i32_e32 v6, v0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v3 -; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1 -; GFX9-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GFX9-NEXT: v_or_b32_e32 v0, 1, v0 -; GFX9-NEXT: v_mul_f32_e32 v1, v6, v7 -; GFX9-NEXT: v_trunc_f32_e32 v1, v1 -; GFX9-NEXT: v_cvt_i32_f32_e32 v7, v1 -; GFX9-NEXT: v_mad_f32 v1, -v1, v3, v6 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, |v3| -; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GFX9-NEXT: v_add_u32_e32 v0, v7, v0 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v4 -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 +; GFX9-NEXT: v_add_u32_e32 v0, s2, v5 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 @@ -5233,74 +5223,73 @@ define amdgpu_kernel void @srem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_bfe_i32 s6, s10, 0xf0000 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_alignbit_b32 v2, s5, v2, 30 -; GFX6-NEXT: s_bfe_i32 s5, s4, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v4, s5 -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, s6 -; GFX6-NEXT: s_xor_b32 s5, s6, s5 -; GFX6-NEXT: s_ashr_i32 s5, s5, 30 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v4 ; GFX6-NEXT: s_mov_b32 s0, s8 ; GFX6-NEXT: s_mov_b32 s1, s9 -; GFX6-NEXT: s_lshr_b32 s8, s10, 15 -; GFX6-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX6-NEXT: v_trunc_f32_e32 v6, v6 -; GFX6-NEXT: v_mad_f32 v5, -v6, v4, v5 -; GFX6-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX6-NEXT: s_lshr_b32 s9, s4, 15 -; GFX6-NEXT: s_or_b32 s5, s5, 1 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[6:7], |v5|, |v4| -; GFX6-NEXT: s_and_b64 s[6:7], s[6:7], exec -; GFX6-NEXT: s_cselect_b32 s5, s5, 0 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, s5, v6 -; GFX6-NEXT: v_mul_lo_u32 v4, v4, s4 -; GFX6-NEXT: s_bfe_i32 s4, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, s4 -; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v6, s5 -; GFX6-NEXT: s_xor_b32 s4, s5, s4 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v7, v5 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v2 +; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], 30 +; GFX6-NEXT: s_bfe_i32 s5, s4, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX6-NEXT: s_bfe_i32 s12, s10, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s12 +; GFX6-NEXT: s_xor_b32 s5, s12, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 +; GFX6-NEXT: s_ashr_i32 s5, s5, 30 +; GFX6-NEXT: s_and_b32 s7, s6, 0x7fff +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GFX6-NEXT: s_lshr_b32 s11, s10, 15 +; GFX6-NEXT: s_and_b32 s9, s8, 0x7fff +; GFX6-NEXT: s_lshr_b32 s14, s4, 15 +; GFX6-NEXT: s_or_b32 s5, s5, 1 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[12:13], |v1|, |v0| +; GFX6-NEXT: s_and_b64 s[12:13], s[12:13], exec +; GFX6-NEXT: s_cselect_b32 s5, s5, 0 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s5, v2 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4 +; GFX6-NEXT: s_bfe_i32 s4, s4, 0xf000f +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s4 +; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf000f +; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s10, v0 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v1 +; GFX6-NEXT: s_xor_b32 s4, s5, s4 +; GFX6-NEXT: s_ashr_i32 s4, s4, 30 +; GFX6-NEXT: s_or_b32 s10, s4, 1 +; GFX6-NEXT: v_mul_f32_e32 v3, v0, v3 +; GFX6-NEXT: v_trunc_f32_e32 v3, v3 +; GFX6-NEXT: v_mad_f32 v0, -v3, v1, v0 +; GFX6-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v0|, |v1| +; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX6-NEXT: s_cselect_b32 s4, s10, 0 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v3 +; GFX6-NEXT: s_bfe_i32 s4, s8, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s4 +; GFX6-NEXT: s_bfe_i32 s5, s6, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v3, s5 +; GFX6-NEXT: s_xor_b32 s4, s5, s4 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v1 ; GFX6-NEXT: s_ashr_i32 s4, s4, 30 -; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 15 -; GFX6-NEXT: v_mul_f32_e32 v7, v6, v7 -; GFX6-NEXT: v_trunc_f32_e32 v7, v7 -; GFX6-NEXT: v_mad_f32 v6, -v7, v5, v6 ; GFX6-NEXT: s_or_b32 s6, s4, 1 -; GFX6-NEXT: v_cvt_i32_f32_e32 v7, v7 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v6|, |v5| -; GFX6-NEXT: v_cvt_f32_i32_e32 v6, v2 -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s14 +; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 +; GFX6-NEXT: v_trunc_f32_e32 v4, v4 +; GFX6-NEXT: v_mad_f32 v3, -v4, v1, v3 +; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v3|, |v1| ; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX6-NEXT: v_and_b32_e32 v1, 0x7fff, v0 ; GFX6-NEXT: s_cselect_b32 s4, s6, 0 -; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 15 -; GFX6-NEXT: v_add_i32_e32 v5, vcc, s4, v7 -; GFX6-NEXT: v_cvt_f32_i32_e32 v7, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v8, v6 -; GFX6-NEXT: v_xor_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s10, v4 -; GFX6-NEXT: v_mul_f32_e32 v2, v7, v8 -; GFX6-NEXT: v_trunc_f32_e32 v2, v2 -; GFX6-NEXT: v_mad_f32 v7, -v2, v6, v7 -; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 -; GFX6-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GFX6-NEXT: v_or_b32_e32 v0, 1, v0 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| -; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GFX6-NEXT: v_mul_lo_u32 v5, v5, s9 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_mul_lo_u32 v0, v0, v3 -; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s8, v5 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 -; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v1, v0 +; GFX6-NEXT: v_add_i32_e32 v1, vcc, s4, v4 +; GFX6-NEXT: v_mul_lo_u32 v1, v1, s9 +; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s11, v0 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 +; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s7, v1 ; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 15, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 +; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -5312,78 +5301,77 @@ define amdgpu_kernel void @srem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 -; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_bfe_i32 s3, s6, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v4, s3 -; GFX9-NEXT: s_bfe_i32 s4, s2, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s4 -; GFX9-NEXT: s_xor_b32 s3, s4, s3 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_ashr_i32 s3, s3, 30 -; GFX9-NEXT: s_lshr_b32 s8, s2, 15 -; GFX9-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_mad_f32 v5, -v6, v4, v5 -; GFX9-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX9-NEXT: v_alignbit_b32 v1, s7, v1, 30 -; GFX9-NEXT: s_lshr_b32 s7, s6, 15 -; GFX9-NEXT: s_or_b32 s3, s3, 1 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v5|, |v4| -; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX9-NEXT: s_cselect_b32 s3, s3, 0 -; GFX9-NEXT: v_add_u32_e32 v4, s3, v6 -; GFX9-NEXT: s_bfe_i32 s3, s6, 0xf000f -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s3 -; GFX9-NEXT: s_bfe_i32 s4, s2, 0xf000f -; GFX9-NEXT: v_cvt_f32_i32_e32 v6, s4 -; GFX9-NEXT: s_xor_b32 s3, s4, s3 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v5 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v1 -; GFX9-NEXT: s_ashr_i32 s3, s3, 30 -; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 15 -; GFX9-NEXT: v_mul_f32_e32 v7, v6, v7 -; GFX9-NEXT: v_trunc_f32_e32 v7, v7 -; GFX9-NEXT: v_mad_f32 v6, -v7, v5, v6 -; GFX9-NEXT: v_cvt_i32_f32_e32 v7, v7 -; GFX9-NEXT: s_or_b32 s3, s3, 1 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v6|, |v5| -; GFX9-NEXT: v_cvt_f32_i32_e32 v6, v1 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_bfe_i32 s5, s6, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], 30 +; GFX9-NEXT: s_bfe_i32 s7, s2, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s7 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX9-NEXT: s_xor_b32 s5, s7, s5 +; GFX9-NEXT: s_ashr_i32 s5, s5, 30 +; GFX9-NEXT: s_lshr_b32 s3, s2, 15 +; GFX9-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX9-NEXT: v_trunc_f32_e32 v2, v2 +; GFX9-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX9-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GFX9-NEXT: s_and_b32 s9, s4, 0x7fff +; GFX9-NEXT: s_and_b32 s12, s8, 0x7fff +; GFX9-NEXT: s_lshr_b32 s13, s6, 15 +; GFX9-NEXT: s_or_b32 s5, s5, 1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[10:11], |v1|, |v0| +; GFX9-NEXT: s_and_b64 s[10:11], s[10:11], exec +; GFX9-NEXT: s_cselect_b32 s5, s5, 0 +; GFX9-NEXT: v_add_u32_e32 v0, s5, v2 +; GFX9-NEXT: s_bfe_i32 s5, s6, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX9-NEXT: v_mul_lo_u32 v0, v0, s6 +; GFX9-NEXT: s_bfe_i32 s6, s2, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v2, s6 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v1 +; GFX9-NEXT: s_xor_b32 s5, s6, s5 +; GFX9-NEXT: s_ashr_i32 s5, s5, 30 +; GFX9-NEXT: s_or_b32 s5, s5, 1 +; GFX9-NEXT: v_mul_f32_e32 v3, v2, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mad_f32 v2, -v3, v1, v2 +; GFX9-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[6:7], |v2|, |v1| +; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], exec +; GFX9-NEXT: s_cselect_b32 s5, s5, 0 +; GFX9-NEXT: v_add_u32_e32 v1, s5, v3 +; GFX9-NEXT: s_bfe_i32 s5, s8, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v2, s5 +; GFX9-NEXT: s_bfe_i32 s4, s4, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s4 +; GFX9-NEXT: s_xor_b32 s4, s4, s5 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v2 +; GFX9-NEXT: s_ashr_i32 s4, s4, 30 +; GFX9-NEXT: s_or_b32 s6, s4, 1 +; GFX9-NEXT: v_mul_lo_u32 v1, v1, s13 +; GFX9-NEXT: v_mul_f32_e32 v4, v3, v4 +; GFX9-NEXT: v_trunc_f32_e32 v4, v4 +; GFX9-NEXT: v_mad_f32 v3, -v4, v2, v3 +; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v3|, |v2| ; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX9-NEXT: s_cselect_b32 s3, s3, 0 -; GFX9-NEXT: v_add_u32_e32 v5, s3, v7 -; GFX9-NEXT: v_bfe_i32 v7, v0, 0, 15 -; GFX9-NEXT: v_cvt_f32_i32_e32 v8, v7 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v9, v6 -; GFX9-NEXT: v_xor_b32_e32 v1, v7, v1 -; GFX9-NEXT: v_ashrrev_i32_e32 v1, 30, v1 -; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 -; GFX9-NEXT: v_mul_f32_e32 v7, v8, v9 -; GFX9-NEXT: v_trunc_f32_e32 v7, v7 -; GFX9-NEXT: v_cvt_i32_f32_e32 v9, v7 -; GFX9-NEXT: v_mad_f32 v7, -v7, v6, v8 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| -; GFX9-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; GFX9-NEXT: v_mul_lo_u32 v4, v4, s6 -; GFX9-NEXT: v_mul_lo_u32 v5, v5, s7 -; GFX9-NEXT: v_add_u32_e32 v1, v9, v1 -; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3 -; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX9-NEXT: v_sub_u32_e32 v3, s2, v4 -; GFX9-NEXT: v_sub_u32_e32 v4, s8, v5 -; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1 -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 +; GFX9-NEXT: s_cselect_b32 s4, s6, 0 +; GFX9-NEXT: v_add_u32_e32 v2, s4, v4 +; GFX9-NEXT: v_mul_lo_u32 v2, v2, s12 +; GFX9-NEXT: v_sub_u32_e32 v4, s2, v0 +; GFX9-NEXT: v_sub_u32_e32 v5, s3, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 0 +; GFX9-NEXT: v_sub_u32_e32 v0, s9, v2 +; GFX9-NEXT: v_and_b32_e32 v2, 0x7fff, v4 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 -; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX9-NEXT: global_store_dword v2, v0, s[0:1] +; GFX9-NEXT: v_or_b32_e32 v2, v2, v4 +; GFX9-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX9-NEXT: global_store_dword v3, v0, s[0:1] ; GFX9-NEXT: v_and_b32_e32 v0, 0x1fff, v1 -; GFX9-NEXT: global_store_short v2, v0, s[0:1] offset:4 +; GFX9-NEXT: global_store_short v3, v0, s[0:1] offset:4 ; GFX9-NEXT: s_endpgm %r = srem <3 x i15> %x, %y store <3 x i15> %r, ptr addrspace(1) %out @@ -7792,8 +7780,9 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX6-LABEL: sdiv_i64_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 +; GFX6-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: s_lshl_b64 s[0:1], 0x1000, s0 ; GFX6-NEXT: s_ashr_i32 s8, s1, 31 @@ -7803,143 +7792,175 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX6-NEXT: s_xor_b64 s[10:11], s[0:1], s[8:9] ; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s10 ; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s11 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_sub_u32 s4, 0, s10 -; GFX6-NEXT: s_subb_u32 s5, 0, s11 +; GFX6-NEXT: s_sub_u32 s12, 0, s10 +; GFX6-NEXT: s_subb_u32 s13, 0, s11 ; GFX6-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX6-NEXT: v_rcp_f32_e32 v0, v0 -; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_ashr_i32 s12, s3, 31 -; GFX6-NEXT: s_add_u32 s2, s2, s12 -; GFX6-NEXT: s_mov_b32 s13, s12 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GFX6-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GFX6-NEXT: v_trunc_f32_e32 v1, v1 ; GFX6-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: s_addc_u32 s3, s3, s12 -; GFX6-NEXT: s_xor_b64 s[2:3], s[2:3], s[12:13] -; GFX6-NEXT: v_mul_lo_u32 v2, s4, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s4, v0 -; GFX6-NEXT: v_mul_lo_u32 v5, s5, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s4, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v3, v0, v4 -; GFX6-NEXT: v_mul_lo_u32 v5, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v2 -; GFX6-NEXT: v_mul_lo_u32 v6, v1, v4 -; GFX6-NEXT: v_mul_hi_u32 v4, v1, v4 -; GFX6-NEXT: v_mul_hi_u32 v8, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v6 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v5, v4, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s4, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s4, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s5, v0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s4, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_mul_lo_u32 v6, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v3 -; GFX6-NEXT: v_mul_hi_u32 v8, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v5, v1, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, v1, v3 -; GFX6-NEXT: v_mul_hi_u32 v4, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s2, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s2, v0 -; GFX6-NEXT: v_mul_hi_u32 v4, s2, v1 -; GFX6-NEXT: v_mul_hi_u32 v5, s3, v1 -; GFX6-NEXT: v_mul_lo_u32 v1, s3, v1 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s3, v0 -; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s10, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s10, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s11, v0 -; GFX6-NEXT: v_mov_b32_e32 v5, s11 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s10, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v4, v2 -; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s3, v2 -; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s2, v3 -; GFX6-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v5, vcc -; GFX6-NEXT: v_subrev_i32_e64 v5, s[0:1], s10, v3 -; GFX6-NEXT: v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v5 -; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1] -; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v4, v6, v5, s[0:1] -; GFX6-NEXT: v_add_i32_e64 v5, s[0:1], 1, v0 -; GFX6-NEXT: v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1] -; GFX6-NEXT: v_add_i32_e64 v7, s[0:1], 2, v0 -; GFX6-NEXT: v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1] -; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v4, v5, v7, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v5, v6, v8, s[0:1] -; GFX6-NEXT: v_mov_b32_e32 v6, s3 -; GFX6-NEXT: v_subb_u32_e32 v2, vcc, v6, v2, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s11, v2 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s10, v3 -; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s11, v2 -; GFX6-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc -; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc -; GFX6-NEXT: s_xor_b64 s[0:1], s[12:13], s[8:9] -; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0 -; GFX6-NEXT: v_xor_b32_e32 v1, s1, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX6-NEXT: v_mul_hi_u32 v2, s12, v0 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: v_readfirstlane_b32 s0, v0 +; GFX6-NEXT: s_mul_i32 s1, s12, s14 +; GFX6-NEXT: v_readfirstlane_b32 s17, v2 +; GFX6-NEXT: s_mul_i32 s15, s13, s0 +; GFX6-NEXT: s_mul_i32 s16, s12, s0 +; GFX6-NEXT: s_add_i32 s1, s17, s1 +; GFX6-NEXT: v_mul_hi_u32 v3, v0, s16 +; GFX6-NEXT: s_add_i32 s1, s1, s15 +; GFX6-NEXT: v_mul_hi_u32 v0, v0, s1 +; GFX6-NEXT: v_mul_hi_u32 v4, v1, s16 +; GFX6-NEXT: v_readfirstlane_b32 s15, v3 +; GFX6-NEXT: s_mul_i32 s17, s0, s1 +; GFX6-NEXT: v_mul_hi_u32 v1, v1, s1 +; GFX6-NEXT: s_add_u32 s15, s15, s17 +; GFX6-NEXT: v_readfirstlane_b32 s17, v0 +; GFX6-NEXT: s_addc_u32 s17, 0, s17 +; GFX6-NEXT: s_mul_i32 s16, s14, s16 +; GFX6-NEXT: v_readfirstlane_b32 s18, v4 +; GFX6-NEXT: s_add_u32 s15, s15, s16 +; GFX6-NEXT: s_addc_u32 s15, s17, s18 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: s_addc_u32 s16, s16, 0 +; GFX6-NEXT: s_mul_i32 s1, s14, s1 +; GFX6-NEXT: s_add_u32 s1, s15, s1 +; GFX6-NEXT: s_addc_u32 s15, 0, s16 +; GFX6-NEXT: s_add_u32 s16, s0, s1 +; GFX6-NEXT: v_mov_b32_e32 v0, s16 +; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s12, v0 +; GFX6-NEXT: s_or_b32 s0, s0, s1 +; GFX6-NEXT: s_cmp_lg_u32 s0, 0 +; GFX6-NEXT: s_addc_u32 s14, s14, s15 +; GFX6-NEXT: s_mul_i32 s0, s12, s14 +; GFX6-NEXT: v_readfirstlane_b32 s1, v0 +; GFX6-NEXT: s_add_i32 s0, s1, s0 +; GFX6-NEXT: s_mul_i32 s13, s13, s16 +; GFX6-NEXT: s_mul_i32 s1, s12, s16 +; GFX6-NEXT: s_add_i32 s0, s0, s13 ; GFX6-NEXT: v_mov_b32_e32 v2, s1 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 -; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: v_mov_b32_e32 v0, s0 +; GFX6-NEXT: v_mul_hi_u32 v3, s14, v2 +; GFX6-NEXT: v_mul_hi_u32 v2, s16, v2 +; GFX6-NEXT: v_mul_hi_u32 v1, s14, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, s16, v0 +; GFX6-NEXT: s_mul_i32 s13, s16, s0 +; GFX6-NEXT: v_readfirstlane_b32 s17, v2 +; GFX6-NEXT: s_add_u32 s13, s17, s13 +; GFX6-NEXT: v_readfirstlane_b32 s15, v0 +; GFX6-NEXT: s_mul_i32 s1, s14, s1 +; GFX6-NEXT: s_addc_u32 s15, 0, s15 +; GFX6-NEXT: v_readfirstlane_b32 s12, v3 +; GFX6-NEXT: s_add_u32 s1, s13, s1 +; GFX6-NEXT: s_addc_u32 s1, s15, s12 +; GFX6-NEXT: v_readfirstlane_b32 s12, v1 +; GFX6-NEXT: s_addc_u32 s12, s12, 0 +; GFX6-NEXT: s_mul_i32 s0, s14, s0 +; GFX6-NEXT: s_add_u32 s0, s1, s0 +; GFX6-NEXT: s_addc_u32 s12, 0, s12 +; GFX6-NEXT: s_add_u32 s15, s16, s0 +; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX6-NEXT: s_or_b32 s0, s0, s1 +; GFX6-NEXT: s_cmp_lg_u32 s0, 0 +; GFX6-NEXT: s_addc_u32 s14, s14, s12 +; GFX6-NEXT: s_ashr_i32 s12, s7, 31 +; GFX6-NEXT: s_add_u32 s0, s6, s12 +; GFX6-NEXT: s_mov_b32 s13, s12 +; GFX6-NEXT: s_addc_u32 s1, s7, s12 +; GFX6-NEXT: s_xor_b64 s[6:7], s[0:1], s[12:13] +; GFX6-NEXT: v_mov_b32_e32 v0, s14 +; GFX6-NEXT: v_mul_hi_u32 v1, s6, v0 +; GFX6-NEXT: v_mov_b32_e32 v2, s15 +; GFX6-NEXT: v_mul_hi_u32 v3, s6, v2 +; GFX6-NEXT: s_mov_b32 s0, s4 +; GFX6-NEXT: v_readfirstlane_b32 s4, v1 +; GFX6-NEXT: v_mul_hi_u32 v1, s7, v2 +; GFX6-NEXT: s_mul_i32 s1, s6, s14 +; GFX6-NEXT: v_readfirstlane_b32 s16, v3 +; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0 +; GFX6-NEXT: s_add_u32 s1, s16, s1 +; GFX6-NEXT: s_addc_u32 s4, 0, s4 +; GFX6-NEXT: s_mul_i32 s15, s7, s15 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: s_add_u32 s1, s1, s15 +; GFX6-NEXT: s_addc_u32 s1, s4, s16 +; GFX6-NEXT: v_readfirstlane_b32 s4, v0 +; GFX6-NEXT: s_addc_u32 s4, s4, 0 +; GFX6-NEXT: s_mul_i32 s14, s7, s14 +; GFX6-NEXT: s_add_u32 s14, s1, s14 +; GFX6-NEXT: v_mov_b32_e32 v0, s14 +; GFX6-NEXT: v_mul_hi_u32 v0, s10, v0 +; GFX6-NEXT: s_addc_u32 s15, 0, s4 +; GFX6-NEXT: s_mov_b32 s1, s5 +; GFX6-NEXT: s_mul_i32 s4, s10, s15 +; GFX6-NEXT: v_readfirstlane_b32 s5, v0 +; GFX6-NEXT: s_add_i32 s4, s5, s4 +; GFX6-NEXT: s_mul_i32 s5, s11, s14 +; GFX6-NEXT: s_add_i32 s16, s4, s5 +; GFX6-NEXT: s_sub_i32 s17, s7, s16 +; GFX6-NEXT: s_mul_i32 s4, s10, s14 +; GFX6-NEXT: s_sub_u32 s6, s6, s4 +; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX6-NEXT: s_or_b32 s18, s4, s5 +; GFX6-NEXT: s_cmp_lg_u32 s18, 0 +; GFX6-NEXT: s_subb_u32 s17, s17, s11 +; GFX6-NEXT: s_sub_u32 s19, s6, s10 +; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX6-NEXT: s_or_b32 s4, s4, s5 +; GFX6-NEXT: s_cmp_lg_u32 s4, 0 +; GFX6-NEXT: s_subb_u32 s4, s17, 0 +; GFX6-NEXT: s_cmp_ge_u32 s4, s11 +; GFX6-NEXT: s_cselect_b32 s5, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s19, s10 +; GFX6-NEXT: s_cselect_b32 s17, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s4, s11 +; GFX6-NEXT: s_cselect_b32 s4, s17, s5 +; GFX6-NEXT: s_add_u32 s5, s14, 1 +; GFX6-NEXT: s_addc_u32 s17, s15, 0 +; GFX6-NEXT: s_add_u32 s19, s14, 2 +; GFX6-NEXT: s_addc_u32 s20, s15, 0 +; GFX6-NEXT: s_cmp_lg_u32 s4, 0 +; GFX6-NEXT: s_cselect_b32 s4, s19, s5 +; GFX6-NEXT: s_cselect_b32 s5, s20, s17 +; GFX6-NEXT: s_cmp_lg_u32 s18, 0 +; GFX6-NEXT: s_subb_u32 s7, s7, s16 +; GFX6-NEXT: s_cmp_ge_u32 s7, s11 +; GFX6-NEXT: s_cselect_b32 s16, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s6, s10 +; GFX6-NEXT: s_cselect_b32 s6, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s7, s11 +; GFX6-NEXT: s_cselect_b32 s6, s6, s16 +; GFX6-NEXT: s_cmp_lg_u32 s6, 0 +; GFX6-NEXT: s_cselect_b32 s5, s5, s15 +; GFX6-NEXT: s_cselect_b32 s4, s4, s14 +; GFX6-NEXT: s_xor_b64 s[6:7], s[12:13], s[8:9] +; GFX6-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7] +; GFX6-NEXT: s_sub_u32 s4, s4, s6 +; GFX6-NEXT: s_subb_u32 s5, s5, s7 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_i64_pow2_shl_denom: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34 -; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: s_lshl_b64 s[0:1], 0x1000, s0 -; GFX9-NEXT: s_ashr_i32 s2, s1, 31 -; GFX9-NEXT: s_add_u32 s0, s0, s2 -; GFX9-NEXT: s_mov_b32 s3, s2 -; GFX9-NEXT: s_addc_u32 s1, s1, s2 -; GFX9-NEXT: s_xor_b64 s[6:7], s[0:1], s[2:3] -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6 -; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7 -; GFX9-NEXT: s_sub_u32 s0, 0, s6 -; GFX9-NEXT: s_subb_u32 s1, 0, s7 +; GFX9-NEXT: s_ashr_i32 s6, s1, 31 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_mov_b32 s7, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s6 +; GFX9-NEXT: s_xor_b64 s[8:9], s[0:1], s[6:7] +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s9 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-NEXT: s_sub_u32 s10, 0, s8 +; GFX9-NEXT: s_subb_u32 s11, 0, s9 ; GFX9-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX9-NEXT: v_rcp_f32_e32 v1, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 @@ -7949,130 +7970,122 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX9-NEXT: v_madmk_f32 v1, v2, 0xcf800000, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX9-NEXT: v_readfirstlane_b32 s4, v2 -; GFX9-NEXT: v_readfirstlane_b32 s5, v1 -; GFX9-NEXT: s_mul_i32 s12, s0, s4 -; GFX9-NEXT: s_mul_hi_u32 s14, s0, s5 -; GFX9-NEXT: s_mul_i32 s13, s1, s5 -; GFX9-NEXT: s_add_i32 s12, s14, s12 -; GFX9-NEXT: s_mul_i32 s15, s0, s5 -; GFX9-NEXT: s_add_i32 s12, s12, s13 -; GFX9-NEXT: s_mul_hi_u32 s14, s5, s15 -; GFX9-NEXT: s_mul_hi_u32 s13, s5, s12 -; GFX9-NEXT: s_mul_i32 s5, s5, s12 -; GFX9-NEXT: s_add_u32 s5, s14, s5 +; GFX9-NEXT: v_readfirstlane_b32 s12, v2 +; GFX9-NEXT: v_readfirstlane_b32 s4, v1 +; GFX9-NEXT: s_mul_i32 s5, s10, s12 +; GFX9-NEXT: s_mul_hi_u32 s14, s10, s4 +; GFX9-NEXT: s_mul_i32 s13, s11, s4 +; GFX9-NEXT: s_add_i32 s5, s14, s5 +; GFX9-NEXT: s_mul_i32 s15, s10, s4 +; GFX9-NEXT: s_add_i32 s5, s5, s13 +; GFX9-NEXT: s_mul_hi_u32 s14, s4, s15 +; GFX9-NEXT: s_mul_i32 s16, s4, s5 +; GFX9-NEXT: s_mul_hi_u32 s13, s4, s5 +; GFX9-NEXT: s_add_u32 s14, s14, s16 ; GFX9-NEXT: s_addc_u32 s13, 0, s13 -; GFX9-NEXT: s_mul_hi_u32 s16, s4, s15 -; GFX9-NEXT: s_mul_i32 s15, s4, s15 -; GFX9-NEXT: s_add_u32 s5, s5, s15 -; GFX9-NEXT: s_mul_hi_u32 s14, s4, s12 -; GFX9-NEXT: s_addc_u32 s5, s13, s16 -; GFX9-NEXT: s_addc_u32 s13, s14, 0 -; GFX9-NEXT: s_mul_i32 s12, s4, s12 -; GFX9-NEXT: s_add_u32 s5, s5, s12 -; GFX9-NEXT: s_addc_u32 s12, 0, s13 -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s5, v1 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s4, s4, s12 -; GFX9-NEXT: v_readfirstlane_b32 s12, v1 -; GFX9-NEXT: s_mul_i32 s5, s0, s4 -; GFX9-NEXT: s_mul_hi_u32 s13, s0, s12 -; GFX9-NEXT: s_add_i32 s5, s13, s5 -; GFX9-NEXT: s_mul_i32 s1, s1, s12 -; GFX9-NEXT: s_add_i32 s5, s5, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s12 -; GFX9-NEXT: s_mul_hi_u32 s13, s4, s0 -; GFX9-NEXT: s_mul_i32 s14, s4, s0 -; GFX9-NEXT: s_mul_i32 s16, s12, s5 -; GFX9-NEXT: s_mul_hi_u32 s0, s12, s0 -; GFX9-NEXT: s_mul_hi_u32 s15, s12, s5 -; GFX9-NEXT: s_add_u32 s0, s0, s16 -; GFX9-NEXT: s_addc_u32 s12, 0, s15 -; GFX9-NEXT: s_add_u32 s0, s0, s14 -; GFX9-NEXT: s_mul_hi_u32 s1, s4, s5 -; GFX9-NEXT: s_addc_u32 s0, s12, s13 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s5, s4, s5 -; GFX9-NEXT: s_add_u32 s0, s0, s5 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s12, s4, s1 -; GFX9-NEXT: s_ashr_i32 s4, s11, 31 -; GFX9-NEXT: s_add_u32 s0, s10, s4 +; GFX9-NEXT: s_mul_hi_u32 s17, s12, s15 +; GFX9-NEXT: s_mul_i32 s15, s12, s15 +; GFX9-NEXT: s_add_u32 s14, s14, s15 +; GFX9-NEXT: s_mul_hi_u32 s16, s12, s5 +; GFX9-NEXT: s_addc_u32 s13, s13, s17 +; GFX9-NEXT: s_addc_u32 s14, s16, 0 +; GFX9-NEXT: s_mul_i32 s5, s12, s5 +; GFX9-NEXT: s_add_u32 s5, s13, s5 +; GFX9-NEXT: s_addc_u32 s13, 0, s14 +; GFX9-NEXT: s_add_u32 s14, s4, s5 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s12, s12, s13 +; GFX9-NEXT: s_mul_i32 s4, s10, s12 +; GFX9-NEXT: s_mul_hi_u32 s5, s10, s14 +; GFX9-NEXT: s_add_i32 s4, s5, s4 +; GFX9-NEXT: s_mul_i32 s11, s11, s14 +; GFX9-NEXT: s_add_i32 s4, s4, s11 +; GFX9-NEXT: s_mul_i32 s10, s10, s14 +; GFX9-NEXT: s_mul_hi_u32 s11, s12, s10 +; GFX9-NEXT: s_mul_i32 s13, s12, s10 +; GFX9-NEXT: s_mul_i32 s16, s14, s4 +; GFX9-NEXT: s_mul_hi_u32 s10, s14, s10 +; GFX9-NEXT: s_mul_hi_u32 s15, s14, s4 +; GFX9-NEXT: s_add_u32 s10, s10, s16 +; GFX9-NEXT: s_addc_u32 s15, 0, s15 +; GFX9-NEXT: s_add_u32 s10, s10, s13 +; GFX9-NEXT: s_mul_hi_u32 s5, s12, s4 +; GFX9-NEXT: s_addc_u32 s10, s15, s11 +; GFX9-NEXT: s_addc_u32 s5, s5, 0 +; GFX9-NEXT: s_mul_i32 s4, s12, s4 +; GFX9-NEXT: s_add_u32 s4, s10, s4 +; GFX9-NEXT: s_addc_u32 s10, 0, s5 +; GFX9-NEXT: s_add_u32 s11, s14, s4 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s10, s12, s10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_ashr_i32 s4, s3, 31 +; GFX9-NEXT: s_add_u32 s2, s2, s4 ; GFX9-NEXT: s_mov_b32 s5, s4 -; GFX9-NEXT: s_addc_u32 s1, s11, s4 -; GFX9-NEXT: s_xor_b64 s[10:11], s[0:1], s[4:5] -; GFX9-NEXT: v_readfirstlane_b32 s13, v1 -; GFX9-NEXT: s_mul_i32 s1, s10, s12 -; GFX9-NEXT: s_mul_hi_u32 s14, s10, s13 -; GFX9-NEXT: s_mul_hi_u32 s0, s10, s12 -; GFX9-NEXT: s_add_u32 s1, s14, s1 -; GFX9-NEXT: s_addc_u32 s0, 0, s0 -; GFX9-NEXT: s_mul_hi_u32 s15, s11, s13 -; GFX9-NEXT: s_mul_i32 s13, s11, s13 -; GFX9-NEXT: s_add_u32 s1, s1, s13 -; GFX9-NEXT: s_mul_hi_u32 s14, s11, s12 -; GFX9-NEXT: s_addc_u32 s0, s0, s15 -; GFX9-NEXT: s_addc_u32 s1, s14, 0 -; GFX9-NEXT: s_mul_i32 s12, s11, s12 -; GFX9-NEXT: s_add_u32 s12, s0, s12 -; GFX9-NEXT: s_addc_u32 s13, 0, s1 -; GFX9-NEXT: s_mul_i32 s0, s6, s13 -; GFX9-NEXT: s_mul_hi_u32 s1, s6, s12 -; GFX9-NEXT: s_add_i32 s0, s1, s0 -; GFX9-NEXT: s_mul_i32 s1, s7, s12 -; GFX9-NEXT: s_add_i32 s14, s0, s1 -; GFX9-NEXT: s_mul_i32 s1, s6, s12 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: s_sub_i32 s0, s11, s14 -; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s10, v1 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s10, s0, s7 -; GFX9-NEXT: v_subrev_co_u32_e64 v2, s[0:1], s6, v1 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s10, s10, 0 -; GFX9-NEXT: s_cmp_ge_u32 s10, s7 -; GFX9-NEXT: s_cselect_b32 s15, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v2 -; GFX9-NEXT: s_cmp_eq_u32 s10, s7 -; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v3, s15 -; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[0:1] -; GFX9-NEXT: s_add_u32 s0, s12, 1 -; GFX9-NEXT: s_addc_u32 s10, s13, 0 -; GFX9-NEXT: s_add_u32 s1, s12, 2 -; GFX9-NEXT: s_addc_u32 s15, s13, 0 -; GFX9-NEXT: v_mov_b32_e32 v3, s0 -; GFX9-NEXT: v_mov_b32_e32 v4, s1 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v4, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v3, s10 -; GFX9-NEXT: v_mov_b32_e32 v4, s15 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s11, s14 -; GFX9-NEXT: s_cmp_ge_u32 s0, s7 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v1 -; GFX9-NEXT: s_cmp_eq_u32 s0, s7 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v4, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc -; GFX9-NEXT: v_mov_b32_e32 v4, s13 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc -; GFX9-NEXT: v_mov_b32_e32 v3, s12 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc -; GFX9-NEXT: s_xor_b64 s[0:1], s[4:5], s[2:3] -; GFX9-NEXT: v_xor_b32_e32 v2, s0, v2 -; GFX9-NEXT: v_xor_b32_e32 v3, s1, v1 -; GFX9-NEXT: v_mov_b32_e32 v4, s1 -; GFX9-NEXT: v_subrev_co_u32_e32 v1, vcc, s0, v2 -; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v3, v4, vcc -; GFX9-NEXT: global_store_dwordx2 v0, v[1:2], s[8:9] +; GFX9-NEXT: s_addc_u32 s3, s3, s4 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5] +; GFX9-NEXT: s_mul_i32 s13, s2, s10 +; GFX9-NEXT: s_mul_hi_u32 s14, s2, s11 +; GFX9-NEXT: s_mul_hi_u32 s12, s2, s10 +; GFX9-NEXT: s_add_u32 s13, s14, s13 +; GFX9-NEXT: s_addc_u32 s12, 0, s12 +; GFX9-NEXT: s_mul_hi_u32 s15, s3, s11 +; GFX9-NEXT: s_mul_i32 s11, s3, s11 +; GFX9-NEXT: s_add_u32 s11, s13, s11 +; GFX9-NEXT: s_mul_hi_u32 s14, s3, s10 +; GFX9-NEXT: s_addc_u32 s11, s12, s15 +; GFX9-NEXT: s_addc_u32 s12, s14, 0 +; GFX9-NEXT: s_mul_i32 s10, s3, s10 +; GFX9-NEXT: s_add_u32 s14, s11, s10 +; GFX9-NEXT: s_addc_u32 s15, 0, s12 +; GFX9-NEXT: s_mul_i32 s10, s8, s15 +; GFX9-NEXT: s_mul_hi_u32 s11, s8, s14 +; GFX9-NEXT: s_add_i32 s10, s11, s10 +; GFX9-NEXT: s_mul_i32 s11, s9, s14 +; GFX9-NEXT: s_add_i32 s16, s10, s11 +; GFX9-NEXT: s_sub_i32 s12, s3, s16 +; GFX9-NEXT: s_mul_i32 s10, s8, s14 +; GFX9-NEXT: s_sub_u32 s2, s2, s10 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s17, s12, s9 +; GFX9-NEXT: s_sub_u32 s18, s2, s8 +; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GFX9-NEXT: s_subb_u32 s12, s17, 0 +; GFX9-NEXT: s_cmp_ge_u32 s12, s9 +; GFX9-NEXT: s_cselect_b32 s13, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s18, s8 +; GFX9-NEXT: s_cselect_b32 s17, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s12, s9 +; GFX9-NEXT: s_cselect_b32 s12, s17, s13 +; GFX9-NEXT: s_add_u32 s13, s14, 1 +; GFX9-NEXT: s_addc_u32 s17, s15, 0 +; GFX9-NEXT: s_add_u32 s18, s14, 2 +; GFX9-NEXT: s_addc_u32 s19, s15, 0 +; GFX9-NEXT: s_cmp_lg_u32 s12, 0 +; GFX9-NEXT: s_cselect_b32 s12, s18, s13 +; GFX9-NEXT: s_cselect_b32 s13, s19, s17 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s3, s3, s16 +; GFX9-NEXT: s_cmp_ge_u32 s3, s9 +; GFX9-NEXT: s_cselect_b32 s10, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s2, s8 +; GFX9-NEXT: s_cselect_b32 s2, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s3, s9 +; GFX9-NEXT: s_cselect_b32 s2, s2, s10 +; GFX9-NEXT: s_cmp_lg_u32 s2, 0 +; GFX9-NEXT: s_cselect_b32 s3, s13, s15 +; GFX9-NEXT: s_cselect_b32 s2, s12, s14 +; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7] +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5] +; GFX9-NEXT: s_sub_u32 s2, s2, s4 +; GFX9-NEXT: s_subb_u32 s3, s3, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 +; GFX9-NEXT: global_store_dwordx2 v0, v[1:2], s[0:1] ; GFX9-NEXT: s_endpgm %shl.y = shl i64 4096, %y %r = sdiv i64 %x, %shl.y @@ -8276,276 +8289,343 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x ; GFX6-LABEL: sdiv_v2i64_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0xd -; GFX6-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_lshl_b64 s[0:1], 0x1000, s12 -; GFX6-NEXT: s_lshl_b64 s[14:15], 0x1000, s14 -; GFX6-NEXT: s_ashr_i32 s12, s1, 31 -; GFX6-NEXT: s_add_u32 s0, s0, s12 -; GFX6-NEXT: s_mov_b32 s13, s12 -; GFX6-NEXT: s_addc_u32 s1, s1, s12 -; GFX6-NEXT: s_xor_b64 s[2:3], s[0:1], s[12:13] -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX6-NEXT: s_sub_u32 s0, 0, s2 -; GFX6-NEXT: s_subb_u32 s1, 0, s3 -; GFX6-NEXT: s_ashr_i32 s16, s9, 31 +; GFX6-NEXT: s_lshl_b64 s[6:7], 0x1000, s12 +; GFX6-NEXT: s_lshl_b64 s[0:1], 0x1000, s14 +; GFX6-NEXT: s_ashr_i32 s2, s7, 31 +; GFX6-NEXT: s_add_u32 s6, s6, s2 +; GFX6-NEXT: s_mov_b32 s3, s2 +; GFX6-NEXT: s_addc_u32 s7, s7, s2 +; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3] +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GFX6-NEXT: s_sub_u32 s14, 0, s6 +; GFX6-NEXT: s_subb_u32 s15, 0, s7 ; GFX6-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 ; GFX6-NEXT: v_rcp_f32_e32 v0, v0 -; GFX6-NEXT: s_mov_b32 s17, s16 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GFX6-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GFX6-NEXT: v_trunc_f32_e32 v1, v1 ; GFX6-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v2, s0, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s0, v0 -; GFX6-NEXT: v_mul_lo_u32 v5, s1, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s0, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v3, v0, v4 -; GFX6-NEXT: v_mul_lo_u32 v5, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v6, v1, v4 -; GFX6-NEXT: v_mul_lo_u32 v4, v1, v4 -; GFX6-NEXT: v_mul_hi_u32 v8, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s0, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s0, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s1, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s0, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_mul_lo_u32 v6, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v3 -; GFX6-NEXT: v_mul_hi_u32 v8, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v5, v1, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, v1, v3 -; GFX6-NEXT: v_mul_hi_u32 v4, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: s_add_u32 s0, s8, s16 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: s_addc_u32 s1, s9, s16 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: s_xor_b64 s[8:9], s[0:1], s[16:17] -; GFX6-NEXT: v_mul_lo_u32 v2, s8, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s8, v0 -; GFX6-NEXT: v_mul_hi_u32 v4, s8, v1 -; GFX6-NEXT: v_mul_hi_u32 v5, s9, v1 -; GFX6-NEXT: v_mul_lo_u32 v1, s9, v1 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s9, v0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX6-NEXT: v_mul_hi_u32 v2, s14, v0 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: v_readfirstlane_b32 s12, v0 +; GFX6-NEXT: s_mul_i32 s13, s14, s16 +; GFX6-NEXT: v_readfirstlane_b32 s19, v2 +; GFX6-NEXT: s_mul_i32 s17, s15, s12 +; GFX6-NEXT: s_mul_i32 s18, s14, s12 +; GFX6-NEXT: s_add_i32 s13, s19, s13 +; GFX6-NEXT: v_mul_hi_u32 v3, v0, s18 +; GFX6-NEXT: s_add_i32 s13, s13, s17 +; GFX6-NEXT: v_mul_hi_u32 v0, v0, s13 +; GFX6-NEXT: v_mul_hi_u32 v4, v1, s18 +; GFX6-NEXT: v_readfirstlane_b32 s17, v3 +; GFX6-NEXT: s_mul_i32 s20, s12, s13 +; GFX6-NEXT: v_mul_hi_u32 v1, v1, s13 +; GFX6-NEXT: s_add_u32 s17, s17, s20 +; GFX6-NEXT: v_readfirstlane_b32 s20, v0 +; GFX6-NEXT: s_mul_i32 s18, s16, s18 +; GFX6-NEXT: s_addc_u32 s20, 0, s20 +; GFX6-NEXT: v_readfirstlane_b32 s19, v4 +; GFX6-NEXT: s_add_u32 s17, s17, s18 +; GFX6-NEXT: s_addc_u32 s17, s20, s19 +; GFX6-NEXT: v_readfirstlane_b32 s18, v1 +; GFX6-NEXT: s_addc_u32 s18, s18, 0 +; GFX6-NEXT: s_mul_i32 s13, s16, s13 +; GFX6-NEXT: s_add_u32 s13, s17, s13 +; GFX6-NEXT: s_addc_u32 s17, 0, s18 +; GFX6-NEXT: s_add_u32 s18, s12, s13 +; GFX6-NEXT: v_mov_b32_e32 v0, s18 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s14, v0 +; GFX6-NEXT: s_or_b32 s12, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_addc_u32 s16, s16, s17 +; GFX6-NEXT: s_mul_i32 s12, s14, s16 +; GFX6-NEXT: v_readfirstlane_b32 s13, v0 +; GFX6-NEXT: s_add_i32 s12, s13, s12 +; GFX6-NEXT: s_mul_i32 s15, s15, s18 +; GFX6-NEXT: s_mul_i32 s13, s14, s18 +; GFX6-NEXT: s_add_i32 s12, s12, s15 +; GFX6-NEXT: v_mov_b32_e32 v2, s13 +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mul_hi_u32 v3, s16, v2 +; GFX6-NEXT: v_mul_hi_u32 v2, s18, v2 +; GFX6-NEXT: v_mul_hi_u32 v1, s16, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, s18, v0 +; GFX6-NEXT: s_mul_i32 s15, s18, s12 +; GFX6-NEXT: v_readfirstlane_b32 s19, v2 +; GFX6-NEXT: s_add_u32 s15, s19, s15 +; GFX6-NEXT: v_readfirstlane_b32 s17, v0 +; GFX6-NEXT: s_mul_i32 s13, s16, s13 +; GFX6-NEXT: s_addc_u32 s17, 0, s17 +; GFX6-NEXT: v_readfirstlane_b32 s14, v3 +; GFX6-NEXT: s_add_u32 s13, s15, s13 +; GFX6-NEXT: s_addc_u32 s13, s17, s14 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: s_addc_u32 s14, s14, 0 +; GFX6-NEXT: s_mul_i32 s12, s16, s12 +; GFX6-NEXT: s_add_u32 s12, s13, s12 +; GFX6-NEXT: s_addc_u32 s14, 0, s14 +; GFX6-NEXT: s_add_u32 s15, s18, s12 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: s_or_b32 s12, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_addc_u32 s14, s16, s14 +; GFX6-NEXT: s_ashr_i32 s12, s9, 31 +; GFX6-NEXT: s_add_u32 s8, s8, s12 +; GFX6-NEXT: s_mov_b32 s13, s12 +; GFX6-NEXT: s_addc_u32 s9, s9, s12 +; GFX6-NEXT: s_xor_b64 s[8:9], s[8:9], s[12:13] +; GFX6-NEXT: v_mov_b32_e32 v0, s14 +; GFX6-NEXT: v_mul_hi_u32 v1, s8, v0 +; GFX6-NEXT: v_mov_b32_e32 v2, s15 +; GFX6-NEXT: v_mul_hi_u32 v3, s8, v2 +; GFX6-NEXT: s_mul_i32 s16, s8, s14 +; GFX6-NEXT: v_readfirstlane_b32 s17, v1 +; GFX6-NEXT: v_mul_hi_u32 v1, s9, v2 +; GFX6-NEXT: v_readfirstlane_b32 s18, v3 ; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s2, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s2, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s3, v0 -; GFX6-NEXT: v_mov_b32_e32 v5, s3 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s2, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v4, v2 -; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s9, v2 -; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s8, v3 -; GFX6-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v5, vcc -; GFX6-NEXT: v_subrev_i32_e64 v5, s[0:1], s2, v3 -; GFX6-NEXT: v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v5 -; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1] -; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v4, v6, v5, s[0:1] -; GFX6-NEXT: v_add_i32_e64 v5, s[0:1], 1, v0 -; GFX6-NEXT: v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1] -; GFX6-NEXT: v_add_i32_e64 v7, s[0:1], 2, v0 -; GFX6-NEXT: v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1] -; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v4, v5, v7, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v5, v6, v8, s[0:1] -; GFX6-NEXT: s_xor_b64 s[0:1], s[16:17], s[12:13] -; GFX6-NEXT: s_ashr_i32 s8, s15, 31 -; GFX6-NEXT: s_add_u32 s12, s14, s8 -; GFX6-NEXT: v_mov_b32_e32 v6, s9 -; GFX6-NEXT: s_mov_b32 s9, s8 -; GFX6-NEXT: s_addc_u32 s13, s15, s8 -; GFX6-NEXT: s_xor_b64 s[12:13], s[12:13], s[8:9] -; GFX6-NEXT: v_subb_u32_e32 v2, vcc, v6, v2, vcc -; GFX6-NEXT: v_cvt_f32_u32_e32 v6, s12 -; GFX6-NEXT: v_cvt_f32_u32_e32 v7, s13 -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s3, v2 -; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s2, v3 -; GFX6-NEXT: v_mac_f32_e32 v6, 0x4f800000, v7 -; GFX6-NEXT: v_rcp_f32_e32 v6, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s3, v2 -; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v3, vcc -; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; GFX6-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v6 -; GFX6-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 -; GFX6-NEXT: v_trunc_f32_e32 v3, v3 -; GFX6-NEXT: v_mac_f32_e32 v2, 0xcf800000, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2 -; GFX6-NEXT: v_cvt_u32_f32_e32 v3, v3 -; GFX6-NEXT: s_sub_u32 s2, 0, s12 -; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc -; GFX6-NEXT: v_mul_hi_u32 v4, s2, v2 -; GFX6-NEXT: v_mul_lo_u32 v5, s2, v3 -; GFX6-NEXT: s_subb_u32 s3, 0, s13 -; GFX6-NEXT: v_mul_lo_u32 v6, s3, v2 -; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v5, v4 -; GFX6-NEXT: v_mul_lo_u32 v5, s2, v2 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6 -; GFX6-NEXT: v_mul_lo_u32 v6, v2, v4 -; GFX6-NEXT: v_mul_hi_u32 v7, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v8, v2, v4 -; GFX6-NEXT: v_mul_hi_u32 v9, v3, v4 -; GFX6-NEXT: v_mul_lo_u32 v4, v3, v4 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GFX6-NEXT: v_mul_lo_u32 v8, v3, v5 -; GFX6-NEXT: v_mul_hi_u32 v5, v3, v5 -; GFX6-NEXT: v_xor_b32_e32 v1, s1, v1 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v8 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc -; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v9, vcc -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v5, v4 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s2, v3 -; GFX6-NEXT: v_mul_hi_u32 v5, s2, v2 -; GFX6-NEXT: v_mul_lo_u32 v6, s3, v2 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5 -; GFX6-NEXT: v_mul_lo_u32 v5, s2, v2 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6 -; GFX6-NEXT: v_mul_lo_u32 v8, v2, v4 -; GFX6-NEXT: v_mul_hi_u32 v9, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v10, v2, v4 -; GFX6-NEXT: v_mul_hi_u32 v7, v3, v5 -; GFX6-NEXT: v_mul_lo_u32 v5, v3, v5 -; GFX6-NEXT: v_mul_hi_u32 v6, v3, v4 -; GFX6-NEXT: v_add_i32_e32 v8, vcc, v9, v8 -; GFX6-NEXT: v_addc_u32_e32 v9, vcc, 0, v10, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, v3, v4 -; GFX6-NEXT: v_add_i32_e32 v5, vcc, v8, v5 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v9, v7, vcc -; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v5, v4 -; GFX6-NEXT: s_ashr_i32 s2, s11, 31 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc -; GFX6-NEXT: s_add_u32 s10, s10, s2 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: s_mov_b32 s3, s2 -; GFX6-NEXT: s_addc_u32 s11, s11, s2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc -; GFX6-NEXT: s_xor_b64 s[10:11], s[10:11], s[2:3] -; GFX6-NEXT: v_mul_lo_u32 v4, s10, v3 -; GFX6-NEXT: v_mul_hi_u32 v5, s10, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, s10, v3 -; GFX6-NEXT: v_mul_hi_u32 v8, s11, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s11, v3 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v5, v4 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GFX6-NEXT: v_mul_lo_u32 v7, s11, v2 -; GFX6-NEXT: v_mul_hi_u32 v2, s11, v2 -; GFX6-NEXT: v_mov_b32_e32 v6, s1 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v7 -; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v5, v2, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s12, v3 -; GFX6-NEXT: v_mul_hi_u32 v5, s12, v2 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 -; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc -; GFX6-NEXT: v_mul_lo_u32 v6, s13, v2 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5 -; GFX6-NEXT: v_mul_lo_u32 v5, s12, v2 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v6, v4 -; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s11, v4 -; GFX6-NEXT: v_mov_b32_e32 v7, s13 -; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s10, v5 -; GFX6-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v7, vcc -; GFX6-NEXT: v_subrev_i32_e64 v7, s[0:1], s12, v5 -; GFX6-NEXT: v_subbrev_u32_e64 v6, s[0:1], 0, v6, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v7 -; GFX6-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] -; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v6, v8, v7, s[0:1] -; GFX6-NEXT: v_add_i32_e64 v7, s[0:1], 1, v2 -; GFX6-NEXT: v_addc_u32_e64 v8, s[0:1], 0, v3, s[0:1] -; GFX6-NEXT: v_add_i32_e64 v9, s[0:1], 2, v2 -; GFX6-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1] -; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v6, v7, v9, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v7, v8, v10, s[0:1] -; GFX6-NEXT: v_mov_b32_e32 v8, s11 -; GFX6-NEXT: v_subb_u32_e32 v4, vcc, v8, v4, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s13, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s12, v5 -; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s13, v4 -; GFX6-NEXT: v_cndmask_b32_e32 v4, v8, v5, vcc -; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v6, vcc -; GFX6-NEXT: s_xor_b64 s[0:1], s[2:3], s[8:9] -; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v7, vcc -; GFX6-NEXT: v_xor_b32_e32 v2, s0, v2 -; GFX6-NEXT: v_xor_b32_e32 v3, s1, v3 -; GFX6-NEXT: v_mov_b32_e32 v4, s1 -; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s0, v2 -; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v3, v4, vcc -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; GFX6-NEXT: s_add_u32 s16, s18, s16 +; GFX6-NEXT: s_addc_u32 s17, 0, s17 +; GFX6-NEXT: s_mul_i32 s15, s9, s15 +; GFX6-NEXT: v_readfirstlane_b32 s18, v1 +; GFX6-NEXT: s_add_u32 s15, s16, s15 +; GFX6-NEXT: s_addc_u32 s15, s17, s18 +; GFX6-NEXT: v_readfirstlane_b32 s16, v0 +; GFX6-NEXT: s_addc_u32 s16, s16, 0 +; GFX6-NEXT: s_mul_i32 s14, s9, s14 +; GFX6-NEXT: s_add_u32 s17, s15, s14 +; GFX6-NEXT: v_mov_b32_e32 v0, s17 +; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0 +; GFX6-NEXT: s_addc_u32 s16, 0, s16 +; GFX6-NEXT: s_mul_i32 s14, s6, s16 +; GFX6-NEXT: v_readfirstlane_b32 s15, v0 +; GFX6-NEXT: s_add_i32 s14, s15, s14 +; GFX6-NEXT: s_mul_i32 s15, s7, s17 +; GFX6-NEXT: s_add_i32 s18, s14, s15 +; GFX6-NEXT: s_sub_i32 s19, s9, s18 +; GFX6-NEXT: s_mul_i32 s14, s6, s17 +; GFX6-NEXT: s_sub_u32 s8, s8, s14 +; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GFX6-NEXT: s_or_b32 s20, s14, s15 +; GFX6-NEXT: s_cmp_lg_u32 s20, 0 +; GFX6-NEXT: s_subb_u32 s19, s19, s7 +; GFX6-NEXT: s_sub_u32 s21, s8, s6 +; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GFX6-NEXT: s_or_b32 s14, s14, s15 +; GFX6-NEXT: s_cmp_lg_u32 s14, 0 +; GFX6-NEXT: s_subb_u32 s14, s19, 0 +; GFX6-NEXT: s_cmp_ge_u32 s14, s7 +; GFX6-NEXT: s_cselect_b32 s15, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s21, s6 +; GFX6-NEXT: s_cselect_b32 s19, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s14, s7 +; GFX6-NEXT: s_cselect_b32 s14, s19, s15 +; GFX6-NEXT: s_add_u32 s15, s17, 1 +; GFX6-NEXT: s_addc_u32 s19, s16, 0 +; GFX6-NEXT: s_add_u32 s21, s17, 2 +; GFX6-NEXT: s_addc_u32 s22, s16, 0 +; GFX6-NEXT: s_cmp_lg_u32 s14, 0 +; GFX6-NEXT: s_cselect_b32 s14, s21, s15 +; GFX6-NEXT: s_cselect_b32 s15, s22, s19 +; GFX6-NEXT: s_cmp_lg_u32 s20, 0 +; GFX6-NEXT: s_subb_u32 s9, s9, s18 +; GFX6-NEXT: s_cmp_ge_u32 s9, s7 +; GFX6-NEXT: s_cselect_b32 s18, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s8, s6 +; GFX6-NEXT: s_cselect_b32 s6, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s9, s7 +; GFX6-NEXT: s_cselect_b32 s6, s6, s18 +; GFX6-NEXT: s_cmp_lg_u32 s6, 0 +; GFX6-NEXT: s_cselect_b32 s7, s15, s16 +; GFX6-NEXT: s_cselect_b32 s6, s14, s17 +; GFX6-NEXT: s_xor_b64 s[2:3], s[12:13], s[2:3] +; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3] +; GFX6-NEXT: s_sub_u32 s14, s6, s2 +; GFX6-NEXT: s_subb_u32 s15, s7, s3 +; GFX6-NEXT: s_ashr_i32 s6, s1, 31 +; GFX6-NEXT: s_add_u32 s0, s0, s6 +; GFX6-NEXT: s_mov_b32 s7, s6 +; GFX6-NEXT: s_addc_u32 s1, s1, s6 +; GFX6-NEXT: s_xor_b64 s[8:9], s[0:1], s[6:7] +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s9 +; GFX6-NEXT: s_sub_u32 s12, 0, s8 +; GFX6-NEXT: s_subb_u32 s13, 0, s9 +; GFX6-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 +; GFX6-NEXT: v_rcp_f32_e32 v0, v0 +; GFX6-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GFX6-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GFX6-NEXT: v_trunc_f32_e32 v1, v1 +; GFX6-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX6-NEXT: v_mul_hi_u32 v2, s12, v0 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: v_readfirstlane_b32 s2, v0 +; GFX6-NEXT: s_mul_i32 s1, s12, s16 +; GFX6-NEXT: v_readfirstlane_b32 s3, v2 +; GFX6-NEXT: s_mul_i32 s0, s13, s2 +; GFX6-NEXT: s_add_i32 s1, s3, s1 +; GFX6-NEXT: s_add_i32 s3, s1, s0 +; GFX6-NEXT: s_mul_i32 s17, s12, s2 +; GFX6-NEXT: v_mul_hi_u32 v2, v0, s3 +; GFX6-NEXT: v_mul_hi_u32 v0, v0, s17 +; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; GFX6-NEXT: s_mul_i32 s4, s2, s3 +; GFX6-NEXT: v_readfirstlane_b32 s5, v2 +; GFX6-NEXT: v_readfirstlane_b32 s18, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, v1, s17 +; GFX6-NEXT: v_mul_hi_u32 v1, v1, s3 +; GFX6-NEXT: s_add_u32 s4, s18, s4 +; GFX6-NEXT: s_addc_u32 s5, 0, s5 +; GFX6-NEXT: s_mul_i32 s17, s16, s17 +; GFX6-NEXT: v_readfirstlane_b32 s18, v0 +; GFX6-NEXT: s_add_u32 s4, s4, s17 +; GFX6-NEXT: s_addc_u32 s4, s5, s18 +; GFX6-NEXT: v_readfirstlane_b32 s5, v1 +; GFX6-NEXT: s_addc_u32 s5, s5, 0 +; GFX6-NEXT: s_mul_i32 s3, s16, s3 +; GFX6-NEXT: s_add_u32 s3, s4, s3 +; GFX6-NEXT: s_addc_u32 s4, 0, s5 +; GFX6-NEXT: s_add_u32 s5, s2, s3 +; GFX6-NEXT: v_mov_b32_e32 v0, s5 +; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s12, v0 +; GFX6-NEXT: s_or_b32 s2, s2, s3 +; GFX6-NEXT: s_cmp_lg_u32 s2, 0 +; GFX6-NEXT: s_addc_u32 s4, s16, s4 +; GFX6-NEXT: s_mul_i32 s2, s12, s4 +; GFX6-NEXT: v_readfirstlane_b32 s3, v0 +; GFX6-NEXT: s_add_i32 s2, s3, s2 +; GFX6-NEXT: s_mul_i32 s13, s13, s5 +; GFX6-NEXT: s_mul_i32 s3, s12, s5 +; GFX6-NEXT: s_add_i32 s2, s2, s13 +; GFX6-NEXT: v_mov_b32_e32 v2, s3 +; GFX6-NEXT: v_mov_b32_e32 v0, s2 +; GFX6-NEXT: v_mul_hi_u32 v3, s4, v2 +; GFX6-NEXT: v_mul_hi_u32 v2, s5, v2 +; GFX6-NEXT: v_mul_hi_u32 v1, s4, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0 +; GFX6-NEXT: s_mul_i32 s13, s5, s2 +; GFX6-NEXT: v_readfirstlane_b32 s17, v2 +; GFX6-NEXT: s_add_u32 s13, s17, s13 +; GFX6-NEXT: v_readfirstlane_b32 s16, v0 +; GFX6-NEXT: s_mul_i32 s3, s4, s3 +; GFX6-NEXT: s_addc_u32 s16, 0, s16 +; GFX6-NEXT: v_readfirstlane_b32 s12, v3 +; GFX6-NEXT: s_add_u32 s3, s13, s3 +; GFX6-NEXT: s_addc_u32 s3, s16, s12 +; GFX6-NEXT: v_readfirstlane_b32 s12, v1 +; GFX6-NEXT: s_addc_u32 s12, s12, 0 +; GFX6-NEXT: s_mul_i32 s2, s4, s2 +; GFX6-NEXT: s_add_u32 s2, s3, s2 +; GFX6-NEXT: s_addc_u32 s12, 0, s12 +; GFX6-NEXT: s_add_u32 s13, s5, s2 +; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0 +; GFX6-NEXT: s_or_b32 s2, s2, s3 +; GFX6-NEXT: s_cmp_lg_u32 s2, 0 +; GFX6-NEXT: s_addc_u32 s12, s4, s12 +; GFX6-NEXT: s_ashr_i32 s4, s11, 31 +; GFX6-NEXT: s_add_u32 s2, s10, s4 +; GFX6-NEXT: s_mov_b32 s5, s4 +; GFX6-NEXT: s_addc_u32 s3, s11, s4 +; GFX6-NEXT: s_xor_b64 s[10:11], s[2:3], s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mul_hi_u32 v1, s10, v0 +; GFX6-NEXT: v_mov_b32_e32 v2, s13 +; GFX6-NEXT: v_mul_hi_u32 v3, s10, v2 +; GFX6-NEXT: s_mul_i32 s2, s10, s12 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: v_mul_hi_u32 v1, s11, v2 +; GFX6-NEXT: v_readfirstlane_b32 s17, v3 +; GFX6-NEXT: v_mul_hi_u32 v0, s11, v0 +; GFX6-NEXT: s_add_u32 s2, s17, s2 +; GFX6-NEXT: s_addc_u32 s16, 0, s16 +; GFX6-NEXT: s_mul_i32 s13, s11, s13 +; GFX6-NEXT: v_readfirstlane_b32 s17, v1 +; GFX6-NEXT: s_add_u32 s2, s2, s13 +; GFX6-NEXT: s_addc_u32 s2, s16, s17 +; GFX6-NEXT: v_readfirstlane_b32 s13, v0 +; GFX6-NEXT: s_addc_u32 s13, s13, 0 +; GFX6-NEXT: s_mul_i32 s12, s11, s12 +; GFX6-NEXT: s_add_u32 s16, s2, s12 +; GFX6-NEXT: v_mov_b32_e32 v0, s16 +; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0 +; GFX6-NEXT: s_addc_u32 s17, 0, s13 +; GFX6-NEXT: s_mul_i32 s12, s8, s17 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: v_readfirstlane_b32 s13, v0 +; GFX6-NEXT: s_add_i32 s12, s13, s12 +; GFX6-NEXT: s_mul_i32 s13, s9, s16 +; GFX6-NEXT: s_add_i32 s18, s12, s13 +; GFX6-NEXT: s_sub_i32 s19, s11, s18 +; GFX6-NEXT: s_mul_i32 s12, s8, s16 +; GFX6-NEXT: s_sub_u32 s10, s10, s12 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: s_or_b32 s20, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s20, 0 +; GFX6-NEXT: s_subb_u32 s19, s19, s9 +; GFX6-NEXT: s_sub_u32 s21, s10, s8 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: s_or_b32 s12, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_subb_u32 s12, s19, 0 +; GFX6-NEXT: s_cmp_ge_u32 s12, s9 +; GFX6-NEXT: s_cselect_b32 s13, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s21, s8 +; GFX6-NEXT: s_cselect_b32 s19, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s12, s9 +; GFX6-NEXT: s_cselect_b32 s12, s19, s13 +; GFX6-NEXT: s_add_u32 s13, s16, 1 +; GFX6-NEXT: s_addc_u32 s19, s17, 0 +; GFX6-NEXT: s_add_u32 s21, s16, 2 +; GFX6-NEXT: s_addc_u32 s22, s17, 0 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_cselect_b32 s12, s21, s13 +; GFX6-NEXT: s_cselect_b32 s13, s22, s19 +; GFX6-NEXT: s_cmp_lg_u32 s20, 0 +; GFX6-NEXT: s_subb_u32 s11, s11, s18 +; GFX6-NEXT: s_cmp_ge_u32 s11, s9 +; GFX6-NEXT: s_cselect_b32 s18, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s10, s8 +; GFX6-NEXT: s_cselect_b32 s8, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s11, s9 +; GFX6-NEXT: s_cselect_b32 s8, s8, s18 +; GFX6-NEXT: s_cmp_lg_u32 s8, 0 +; GFX6-NEXT: s_cselect_b32 s9, s13, s17 +; GFX6-NEXT: s_cselect_b32 s8, s12, s16 +; GFX6-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7] +; GFX6-NEXT: s_xor_b64 s[6:7], s[8:9], s[4:5] +; GFX6-NEXT: s_sub_u32 s4, s6, s4 +; GFX6-NEXT: s_subb_u32 s5, s7, s5 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s14 +; GFX6-NEXT: v_mov_b32_e32 v1, s15 +; GFX6-NEXT: v_mov_b32_e32 v2, s4 +; GFX6-NEXT: v_mov_b32_e32 v3, s5 +; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: sdiv_v2i64_pow2_shl_denom: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 -; GFX9-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24 -; GFX9-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshl_b64 s[0:1], 0x1000, s12 -; GFX9-NEXT: s_lshl_b64 s[6:7], 0x1000, s14 -; GFX9-NEXT: s_ashr_i32 s12, s1, 31 -; GFX9-NEXT: s_add_u32 s0, s0, s12 -; GFX9-NEXT: s_mov_b32 s13, s12 -; GFX9-NEXT: s_addc_u32 s1, s1, s12 -; GFX9-NEXT: s_xor_b64 s[14:15], s[0:1], s[12:13] -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s14 -; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s15 -; GFX9-NEXT: s_sub_u32 s0, 0, s14 -; GFX9-NEXT: s_subb_u32 s1, 0, s15 +; GFX9-NEXT: s_lshl_b64 s[6:7], 0x1000, s12 +; GFX9-NEXT: s_lshl_b64 s[0:1], 0x1000, s14 +; GFX9-NEXT: s_ashr_i32 s2, s7, 31 +; GFX9-NEXT: s_add_u32 s6, s6, s2 +; GFX9-NEXT: s_mov_b32 s3, s2 +; GFX9-NEXT: s_addc_u32 s7, s7, s2 +; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3] +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GFX9-NEXT: s_sub_u32 s14, 0, s6 +; GFX9-NEXT: s_subb_u32 s15, 0, s7 ; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 ; GFX9-NEXT: v_rcp_f32_e32 v0, v0 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -8554,270 +8634,255 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x ; GFX9-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX9-NEXT: v_readfirstlane_b32 s16, v1 +; GFX9-NEXT: v_readfirstlane_b32 s12, v0 +; GFX9-NEXT: s_mul_i32 s13, s14, s16 +; GFX9-NEXT: s_mul_hi_u32 s18, s14, s12 +; GFX9-NEXT: s_mul_i32 s17, s15, s12 +; GFX9-NEXT: s_add_i32 s13, s18, s13 +; GFX9-NEXT: s_mul_i32 s19, s14, s12 +; GFX9-NEXT: s_add_i32 s13, s13, s17 +; GFX9-NEXT: s_mul_hi_u32 s18, s12, s19 +; GFX9-NEXT: s_mul_i32 s20, s12, s13 +; GFX9-NEXT: s_mul_hi_u32 s17, s12, s13 +; GFX9-NEXT: s_add_u32 s18, s18, s20 +; GFX9-NEXT: s_addc_u32 s17, 0, s17 +; GFX9-NEXT: s_mul_hi_u32 s20, s16, s19 +; GFX9-NEXT: s_mul_i32 s19, s16, s19 +; GFX9-NEXT: s_add_u32 s18, s18, s19 +; GFX9-NEXT: s_mul_hi_u32 s21, s16, s13 +; GFX9-NEXT: s_addc_u32 s17, s17, s20 +; GFX9-NEXT: s_addc_u32 s18, s21, 0 +; GFX9-NEXT: s_mul_i32 s13, s16, s13 +; GFX9-NEXT: s_add_u32 s13, s17, s13 +; GFX9-NEXT: s_addc_u32 s17, 0, s18 +; GFX9-NEXT: s_add_u32 s18, s12, s13 +; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GFX9-NEXT: s_addc_u32 s16, s16, s17 +; GFX9-NEXT: s_mul_i32 s12, s14, s16 +; GFX9-NEXT: s_mul_hi_u32 s13, s14, s18 +; GFX9-NEXT: s_add_i32 s12, s13, s12 +; GFX9-NEXT: s_mul_i32 s15, s15, s18 +; GFX9-NEXT: s_add_i32 s12, s12, s15 +; GFX9-NEXT: s_mul_i32 s14, s14, s18 +; GFX9-NEXT: s_mul_hi_u32 s15, s16, s14 +; GFX9-NEXT: s_mul_i32 s17, s16, s14 +; GFX9-NEXT: s_mul_i32 s20, s18, s12 +; GFX9-NEXT: s_mul_hi_u32 s14, s18, s14 +; GFX9-NEXT: s_mul_hi_u32 s19, s18, s12 +; GFX9-NEXT: s_add_u32 s14, s14, s20 +; GFX9-NEXT: s_addc_u32 s19, 0, s19 +; GFX9-NEXT: s_add_u32 s14, s14, s17 +; GFX9-NEXT: s_mul_hi_u32 s13, s16, s12 +; GFX9-NEXT: s_addc_u32 s14, s19, s15 +; GFX9-NEXT: s_addc_u32 s13, s13, 0 +; GFX9-NEXT: s_mul_i32 s12, s16, s12 +; GFX9-NEXT: s_add_u32 s12, s14, s12 +; GFX9-NEXT: s_addc_u32 s14, 0, s13 +; GFX9-NEXT: s_add_u32 s15, s18, s12 +; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GFX9-NEXT: s_addc_u32 s14, s16, s14 +; GFX9-NEXT: s_ashr_i32 s12, s9, 31 +; GFX9-NEXT: s_add_u32 s8, s8, s12 +; GFX9-NEXT: s_mov_b32 s13, s12 +; GFX9-NEXT: s_addc_u32 s9, s9, s12 +; GFX9-NEXT: s_xor_b64 s[8:9], s[8:9], s[12:13] +; GFX9-NEXT: s_mul_i32 s17, s8, s14 +; GFX9-NEXT: s_mul_hi_u32 s18, s8, s15 +; GFX9-NEXT: s_mul_hi_u32 s16, s8, s14 +; GFX9-NEXT: s_add_u32 s17, s18, s17 +; GFX9-NEXT: s_addc_u32 s16, 0, s16 +; GFX9-NEXT: s_mul_hi_u32 s19, s9, s15 +; GFX9-NEXT: s_mul_i32 s15, s9, s15 +; GFX9-NEXT: s_add_u32 s15, s17, s15 +; GFX9-NEXT: s_mul_hi_u32 s18, s9, s14 +; GFX9-NEXT: s_addc_u32 s15, s16, s19 +; GFX9-NEXT: s_addc_u32 s16, s18, 0 +; GFX9-NEXT: s_mul_i32 s14, s9, s14 +; GFX9-NEXT: s_add_u32 s18, s15, s14 +; GFX9-NEXT: s_addc_u32 s19, 0, s16 +; GFX9-NEXT: s_mul_i32 s14, s6, s19 +; GFX9-NEXT: s_mul_hi_u32 s15, s6, s18 +; GFX9-NEXT: s_add_i32 s14, s15, s14 +; GFX9-NEXT: s_mul_i32 s15, s7, s18 +; GFX9-NEXT: s_add_i32 s20, s14, s15 +; GFX9-NEXT: s_sub_i32 s16, s9, s20 +; GFX9-NEXT: s_mul_i32 s14, s6, s18 +; GFX9-NEXT: s_sub_u32 s8, s8, s14 +; GFX9-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GFX9-NEXT: s_subb_u32 s21, s16, s7 +; GFX9-NEXT: s_sub_u32 s22, s8, s6 +; GFX9-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GFX9-NEXT: s_subb_u32 s16, s21, 0 +; GFX9-NEXT: s_cmp_ge_u32 s16, s7 +; GFX9-NEXT: s_cselect_b32 s17, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s22, s6 +; GFX9-NEXT: s_cselect_b32 s21, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s16, s7 +; GFX9-NEXT: s_cselect_b32 s16, s21, s17 +; GFX9-NEXT: s_add_u32 s17, s18, 1 +; GFX9-NEXT: s_addc_u32 s21, s19, 0 +; GFX9-NEXT: s_add_u32 s22, s18, 2 +; GFX9-NEXT: s_addc_u32 s23, s19, 0 +; GFX9-NEXT: s_cmp_lg_u32 s16, 0 +; GFX9-NEXT: s_cselect_b32 s16, s22, s17 +; GFX9-NEXT: s_cselect_b32 s17, s23, s21 +; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GFX9-NEXT: s_subb_u32 s9, s9, s20 +; GFX9-NEXT: s_cmp_ge_u32 s9, s7 +; GFX9-NEXT: s_cselect_b32 s14, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s8, s6 +; GFX9-NEXT: s_cselect_b32 s6, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s9, s7 +; GFX9-NEXT: s_cselect_b32 s6, s6, s14 +; GFX9-NEXT: s_cmp_lg_u32 s6, 0 +; GFX9-NEXT: s_cselect_b32 s7, s17, s19 +; GFX9-NEXT: s_cselect_b32 s6, s16, s18 +; GFX9-NEXT: s_xor_b64 s[2:3], s[12:13], s[2:3] +; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3] +; GFX9-NEXT: s_sub_u32 s14, s6, s2 +; GFX9-NEXT: s_subb_u32 s15, s7, s3 +; GFX9-NEXT: s_ashr_i32 s2, s1, 31 +; GFX9-NEXT: s_add_u32 s0, s0, s2 +; GFX9-NEXT: s_mov_b32 s3, s2 +; GFX9-NEXT: s_addc_u32 s1, s1, s2 +; GFX9-NEXT: s_xor_b64 s[6:7], s[0:1], s[2:3] +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-NEXT: s_sub_u32 s8, 0, s6 +; GFX9-NEXT: s_subb_u32 s9, 0, s7 +; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 +; GFX9-NEXT: v_rcp_f32_e32 v1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1 +; GFX9-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1 +; GFX9-NEXT: v_trunc_f32_e32 v2, v2 +; GFX9-NEXT: v_mac_f32_e32 v1, 0xcf800000, v2 +; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2 ; GFX9-NEXT: v_readfirstlane_b32 s4, v1 -; GFX9-NEXT: v_readfirstlane_b32 s5, v0 -; GFX9-NEXT: s_mul_i32 s16, s0, s4 -; GFX9-NEXT: s_mul_hi_u32 s18, s0, s5 -; GFX9-NEXT: s_mul_i32 s17, s1, s5 -; GFX9-NEXT: s_add_i32 s16, s18, s16 -; GFX9-NEXT: s_mul_i32 s19, s0, s5 -; GFX9-NEXT: s_add_i32 s16, s16, s17 -; GFX9-NEXT: s_mul_hi_u32 s17, s5, s16 -; GFX9-NEXT: s_mul_i32 s18, s5, s16 -; GFX9-NEXT: s_mul_hi_u32 s5, s5, s19 -; GFX9-NEXT: s_add_u32 s5, s5, s18 +; GFX9-NEXT: v_readfirstlane_b32 s13, v2 +; GFX9-NEXT: s_mul_hi_u32 s12, s8, s4 +; GFX9-NEXT: s_mul_i32 s16, s8, s13 +; GFX9-NEXT: s_mul_i32 s5, s9, s4 +; GFX9-NEXT: s_add_i32 s12, s12, s16 +; GFX9-NEXT: s_add_i32 s12, s12, s5 +; GFX9-NEXT: s_mul_i32 s17, s8, s4 +; GFX9-NEXT: s_mul_i32 s16, s4, s12 +; GFX9-NEXT: s_mul_hi_u32 s18, s4, s17 +; GFX9-NEXT: s_mul_hi_u32 s5, s4, s12 +; GFX9-NEXT: s_add_u32 s16, s18, s16 +; GFX9-NEXT: s_addc_u32 s5, 0, s5 +; GFX9-NEXT: s_mul_hi_u32 s19, s13, s17 +; GFX9-NEXT: s_mul_i32 s17, s13, s17 +; GFX9-NEXT: s_add_u32 s16, s16, s17 +; GFX9-NEXT: s_mul_hi_u32 s18, s13, s12 +; GFX9-NEXT: s_addc_u32 s5, s5, s19 +; GFX9-NEXT: s_addc_u32 s16, s18, 0 +; GFX9-NEXT: s_mul_i32 s12, s13, s12 +; GFX9-NEXT: s_add_u32 s5, s5, s12 +; GFX9-NEXT: s_addc_u32 s12, 0, s16 +; GFX9-NEXT: s_add_u32 s16, s4, s5 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s12, s13, s12 +; GFX9-NEXT: s_mul_i32 s4, s8, s12 +; GFX9-NEXT: s_mul_hi_u32 s5, s8, s16 +; GFX9-NEXT: s_add_i32 s4, s5, s4 +; GFX9-NEXT: s_mul_i32 s9, s9, s16 +; GFX9-NEXT: s_add_i32 s4, s4, s9 +; GFX9-NEXT: s_mul_i32 s8, s8, s16 +; GFX9-NEXT: s_mul_hi_u32 s9, s12, s8 +; GFX9-NEXT: s_mul_i32 s13, s12, s8 +; GFX9-NEXT: s_mul_i32 s18, s16, s4 +; GFX9-NEXT: s_mul_hi_u32 s8, s16, s8 +; GFX9-NEXT: s_mul_hi_u32 s17, s16, s4 +; GFX9-NEXT: s_add_u32 s8, s8, s18 ; GFX9-NEXT: s_addc_u32 s17, 0, s17 -; GFX9-NEXT: s_mul_hi_u32 s20, s4, s19 -; GFX9-NEXT: s_mul_i32 s19, s4, s19 -; GFX9-NEXT: s_add_u32 s5, s5, s19 -; GFX9-NEXT: s_mul_hi_u32 s18, s4, s16 -; GFX9-NEXT: s_addc_u32 s5, s17, s20 -; GFX9-NEXT: s_addc_u32 s17, s18, 0 -; GFX9-NEXT: s_mul_i32 s16, s4, s16 -; GFX9-NEXT: s_add_u32 s5, s5, s16 -; GFX9-NEXT: s_addc_u32 s16, 0, s17 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s5, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s4, s4, s16 -; GFX9-NEXT: v_readfirstlane_b32 s16, v0 -; GFX9-NEXT: s_mul_i32 s5, s0, s4 -; GFX9-NEXT: s_mul_hi_u32 s17, s0, s16 -; GFX9-NEXT: s_add_i32 s5, s17, s5 -; GFX9-NEXT: s_mul_i32 s1, s1, s16 -; GFX9-NEXT: s_add_i32 s5, s5, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s16 -; GFX9-NEXT: s_mul_hi_u32 s17, s4, s0 -; GFX9-NEXT: s_mul_i32 s18, s4, s0 -; GFX9-NEXT: s_mul_i32 s20, s16, s5 -; GFX9-NEXT: s_mul_hi_u32 s0, s16, s0 -; GFX9-NEXT: s_mul_hi_u32 s19, s16, s5 -; GFX9-NEXT: s_add_u32 s0, s0, s20 -; GFX9-NEXT: s_addc_u32 s16, 0, s19 -; GFX9-NEXT: s_add_u32 s0, s0, s18 -; GFX9-NEXT: s_mul_hi_u32 s1, s4, s5 -; GFX9-NEXT: s_addc_u32 s0, s16, s17 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s5, s4, s5 -; GFX9-NEXT: s_add_u32 s0, s0, s5 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s16, s4, s1 -; GFX9-NEXT: s_ashr_i32 s4, s9, 31 -; GFX9-NEXT: s_add_u32 s0, s8, s4 +; GFX9-NEXT: s_add_u32 s8, s8, s13 +; GFX9-NEXT: s_mul_hi_u32 s5, s12, s4 +; GFX9-NEXT: s_addc_u32 s8, s17, s9 +; GFX9-NEXT: s_addc_u32 s5, s5, 0 +; GFX9-NEXT: s_mul_i32 s4, s12, s4 +; GFX9-NEXT: s_add_u32 s4, s8, s4 +; GFX9-NEXT: s_addc_u32 s8, 0, s5 +; GFX9-NEXT: s_add_u32 s13, s16, s4 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s12, s12, s8 +; GFX9-NEXT: s_ashr_i32 s4, s11, 31 +; GFX9-NEXT: s_add_u32 s8, s10, s4 ; GFX9-NEXT: s_mov_b32 s5, s4 -; GFX9-NEXT: s_addc_u32 s1, s9, s4 -; GFX9-NEXT: s_xor_b64 s[8:9], s[0:1], s[4:5] -; GFX9-NEXT: v_readfirstlane_b32 s17, v0 -; GFX9-NEXT: s_mul_i32 s1, s8, s16 -; GFX9-NEXT: s_mul_hi_u32 s18, s8, s17 -; GFX9-NEXT: s_mul_hi_u32 s0, s8, s16 -; GFX9-NEXT: s_add_u32 s1, s18, s1 -; GFX9-NEXT: s_addc_u32 s0, 0, s0 -; GFX9-NEXT: s_mul_hi_u32 s19, s9, s17 -; GFX9-NEXT: s_mul_i32 s17, s9, s17 -; GFX9-NEXT: s_add_u32 s1, s1, s17 -; GFX9-NEXT: s_mul_hi_u32 s18, s9, s16 -; GFX9-NEXT: s_addc_u32 s0, s0, s19 -; GFX9-NEXT: s_addc_u32 s1, s18, 0 -; GFX9-NEXT: s_mul_i32 s16, s9, s16 -; GFX9-NEXT: s_add_u32 s16, s0, s16 -; GFX9-NEXT: s_addc_u32 s17, 0, s1 -; GFX9-NEXT: s_mul_i32 s0, s14, s17 -; GFX9-NEXT: s_mul_hi_u32 s1, s14, s16 -; GFX9-NEXT: s_add_i32 s0, s1, s0 -; GFX9-NEXT: s_mul_i32 s1, s15, s16 -; GFX9-NEXT: s_add_i32 s18, s0, s1 -; GFX9-NEXT: s_mul_i32 s1, s14, s16 -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_sub_i32 s0, s9, s18 -; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s8, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s8, s0, s15 -; GFX9-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s14, v0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s8, s8, 0 -; GFX9-NEXT: s_cmp_ge_u32 s8, s15 +; GFX9-NEXT: s_addc_u32 s9, s11, s4 +; GFX9-NEXT: s_xor_b64 s[8:9], s[8:9], s[4:5] +; GFX9-NEXT: s_mul_i32 s11, s8, s12 +; GFX9-NEXT: s_mul_hi_u32 s16, s8, s13 +; GFX9-NEXT: s_mul_hi_u32 s10, s8, s12 +; GFX9-NEXT: s_add_u32 s11, s16, s11 +; GFX9-NEXT: s_addc_u32 s10, 0, s10 +; GFX9-NEXT: s_mul_hi_u32 s17, s9, s13 +; GFX9-NEXT: s_mul_i32 s13, s9, s13 +; GFX9-NEXT: s_add_u32 s11, s11, s13 +; GFX9-NEXT: s_mul_hi_u32 s16, s9, s12 +; GFX9-NEXT: s_addc_u32 s10, s10, s17 +; GFX9-NEXT: s_addc_u32 s11, s16, 0 +; GFX9-NEXT: s_mul_i32 s12, s9, s12 +; GFX9-NEXT: s_add_u32 s16, s10, s12 +; GFX9-NEXT: s_addc_u32 s17, 0, s11 +; GFX9-NEXT: s_mul_i32 s10, s6, s17 +; GFX9-NEXT: s_mul_hi_u32 s11, s6, s16 +; GFX9-NEXT: s_add_i32 s10, s11, s10 +; GFX9-NEXT: s_mul_i32 s11, s7, s16 +; GFX9-NEXT: s_add_i32 s18, s10, s11 +; GFX9-NEXT: s_sub_i32 s12, s9, s18 +; GFX9-NEXT: s_mul_i32 s10, s6, s16 +; GFX9-NEXT: s_sub_u32 s8, s8, s10 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s19, s12, s7 +; GFX9-NEXT: s_sub_u32 s20, s8, s6 +; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GFX9-NEXT: s_subb_u32 s12, s19, 0 +; GFX9-NEXT: s_cmp_ge_u32 s12, s7 +; GFX9-NEXT: s_cselect_b32 s13, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s20, s6 ; GFX9-NEXT: s_cselect_b32 s19, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v1 -; GFX9-NEXT: s_cmp_eq_u32 s8, s15 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v2, s19 -; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[0:1] -; GFX9-NEXT: s_add_u32 s0, s16, 1 -; GFX9-NEXT: s_addc_u32 s8, s17, 0 -; GFX9-NEXT: s_add_u32 s1, s16, 2 +; GFX9-NEXT: s_cmp_eq_u32 s12, s7 +; GFX9-NEXT: s_cselect_b32 s12, s19, s13 +; GFX9-NEXT: s_add_u32 s13, s16, 1 ; GFX9-NEXT: s_addc_u32 s19, s17, 0 -; GFX9-NEXT: v_mov_b32_e32 v2, s0 -; GFX9-NEXT: v_mov_b32_e32 v3, s1 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, v3, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v2, s8 -; GFX9-NEXT: v_mov_b32_e32 v3, s19 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s9, s18 -; GFX9-NEXT: s_cmp_ge_u32 s0, s15 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s14, v0 -; GFX9-NEXT: s_cmp_eq_u32 s0, s15 -; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v3, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: s_xor_b64 s[0:1], s[4:5], s[12:13] -; GFX9-NEXT: s_ashr_i32 s4, s7, 31 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc -; GFX9-NEXT: s_add_u32 s6, s6, s4 -; GFX9-NEXT: v_mov_b32_e32 v3, s17 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX9-NEXT: s_mov_b32 s5, s4 -; GFX9-NEXT: s_addc_u32 s7, s7, s4 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc -; GFX9-NEXT: v_mov_b32_e32 v2, s16 -; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5] -; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc -; GFX9-NEXT: v_cvt_f32_u32_e32 v2, s6 -; GFX9-NEXT: v_cvt_f32_u32_e32 v3, s7 -; GFX9-NEXT: v_xor_b32_e32 v1, s0, v1 -; GFX9-NEXT: v_xor_b32_e32 v5, s1, v0 -; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s0, v1 -; GFX9-NEXT: v_mac_f32_e32 v2, 0x4f800000, v3 -; GFX9-NEXT: v_rcp_f32_e32 v2, v2 -; GFX9-NEXT: s_sub_u32 s0, 0, s6 -; GFX9-NEXT: v_mov_b32_e32 v6, s1 -; GFX9-NEXT: s_subb_u32 s1, 0, s7 -; GFX9-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2 -; GFX9-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 -; GFX9-NEXT: v_trunc_f32_e32 v3, v3 -; GFX9-NEXT: v_mac_f32_e32 v2, 0xcf800000, v3 -; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2 -; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 -; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v5, v6, vcc -; GFX9-NEXT: v_readfirstlane_b32 s8, v2 -; GFX9-NEXT: v_readfirstlane_b32 s13, v3 -; GFX9-NEXT: s_mul_hi_u32 s12, s0, s8 -; GFX9-NEXT: s_mul_i32 s14, s0, s13 -; GFX9-NEXT: s_mul_i32 s9, s1, s8 -; GFX9-NEXT: s_add_i32 s12, s12, s14 -; GFX9-NEXT: s_add_i32 s12, s12, s9 -; GFX9-NEXT: s_mul_i32 s15, s0, s8 -; GFX9-NEXT: s_mul_hi_u32 s9, s8, s12 -; GFX9-NEXT: s_mul_i32 s14, s8, s12 -; GFX9-NEXT: s_mul_hi_u32 s8, s8, s15 -; GFX9-NEXT: s_add_u32 s8, s8, s14 -; GFX9-NEXT: s_addc_u32 s9, 0, s9 -; GFX9-NEXT: s_mul_hi_u32 s16, s13, s15 -; GFX9-NEXT: s_mul_i32 s15, s13, s15 -; GFX9-NEXT: s_add_u32 s8, s8, s15 -; GFX9-NEXT: s_mul_hi_u32 s14, s13, s12 -; GFX9-NEXT: s_addc_u32 s8, s9, s16 -; GFX9-NEXT: s_addc_u32 s9, s14, 0 -; GFX9-NEXT: s_mul_i32 s12, s13, s12 -; GFX9-NEXT: s_add_u32 s8, s8, s12 -; GFX9-NEXT: s_addc_u32 s9, 0, s9 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s8, v2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s8, s13, s9 -; GFX9-NEXT: v_readfirstlane_b32 s12, v2 -; GFX9-NEXT: s_mul_i32 s9, s0, s8 -; GFX9-NEXT: s_mul_hi_u32 s13, s0, s12 -; GFX9-NEXT: s_add_i32 s9, s13, s9 -; GFX9-NEXT: s_mul_i32 s1, s1, s12 -; GFX9-NEXT: s_add_i32 s9, s9, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s12 -; GFX9-NEXT: s_mul_hi_u32 s13, s8, s0 -; GFX9-NEXT: s_mul_i32 s14, s8, s0 -; GFX9-NEXT: s_mul_i32 s16, s12, s9 -; GFX9-NEXT: s_mul_hi_u32 s0, s12, s0 -; GFX9-NEXT: s_mul_hi_u32 s15, s12, s9 -; GFX9-NEXT: s_add_u32 s0, s0, s16 -; GFX9-NEXT: s_addc_u32 s12, 0, s15 -; GFX9-NEXT: s_add_u32 s0, s0, s14 -; GFX9-NEXT: s_mul_hi_u32 s1, s8, s9 -; GFX9-NEXT: s_addc_u32 s0, s12, s13 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s9, s8, s9 -; GFX9-NEXT: s_add_u32 s0, s0, s9 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s12, s8, s1 -; GFX9-NEXT: s_ashr_i32 s8, s11, 31 -; GFX9-NEXT: s_add_u32 s0, s10, s8 -; GFX9-NEXT: s_mov_b32 s9, s8 -; GFX9-NEXT: s_addc_u32 s1, s11, s8 -; GFX9-NEXT: s_xor_b64 s[10:11], s[0:1], s[8:9] -; GFX9-NEXT: v_readfirstlane_b32 s13, v2 -; GFX9-NEXT: s_mul_i32 s1, s10, s12 -; GFX9-NEXT: s_mul_hi_u32 s14, s10, s13 -; GFX9-NEXT: s_mul_hi_u32 s0, s10, s12 -; GFX9-NEXT: s_add_u32 s1, s14, s1 -; GFX9-NEXT: s_addc_u32 s0, 0, s0 -; GFX9-NEXT: s_mul_hi_u32 s15, s11, s13 -; GFX9-NEXT: s_mul_i32 s13, s11, s13 -; GFX9-NEXT: s_add_u32 s1, s1, s13 -; GFX9-NEXT: s_mul_hi_u32 s14, s11, s12 -; GFX9-NEXT: s_addc_u32 s0, s0, s15 -; GFX9-NEXT: s_addc_u32 s1, s14, 0 -; GFX9-NEXT: s_mul_i32 s12, s11, s12 -; GFX9-NEXT: s_add_u32 s12, s0, s12 -; GFX9-NEXT: s_addc_u32 s13, 0, s1 -; GFX9-NEXT: s_mul_i32 s0, s6, s13 -; GFX9-NEXT: s_mul_hi_u32 s1, s6, s12 -; GFX9-NEXT: s_add_i32 s0, s1, s0 -; GFX9-NEXT: s_mul_i32 s1, s7, s12 -; GFX9-NEXT: s_add_i32 s14, s0, s1 -; GFX9-NEXT: s_mul_i32 s1, s6, s12 -; GFX9-NEXT: v_mov_b32_e32 v2, s1 -; GFX9-NEXT: s_sub_i32 s0, s11, s14 -; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s10, v2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s10, s0, s7 -; GFX9-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s6, v2 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s10, s10, 0 -; GFX9-NEXT: s_cmp_ge_u32 s10, s7 -; GFX9-NEXT: s_cselect_b32 s15, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v3 -; GFX9-NEXT: s_cmp_eq_u32 s10, s7 -; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v5, s15 -; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v3, s[0:1] -; GFX9-NEXT: s_add_u32 s0, s12, 1 -; GFX9-NEXT: s_addc_u32 s10, s13, 0 -; GFX9-NEXT: s_add_u32 s1, s12, 2 -; GFX9-NEXT: s_addc_u32 s15, s13, 0 -; GFX9-NEXT: v_mov_b32_e32 v5, s0 -; GFX9-NEXT: v_mov_b32_e32 v6, s1 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v3 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v6, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v5, s10 -; GFX9-NEXT: v_mov_b32_e32 v6, s15 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s11, s14 -; GFX9-NEXT: s_cmp_ge_u32 s0, s7 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v2 -; GFX9-NEXT: s_cmp_eq_u32 s0, s7 -; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v6, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-NEXT: v_mov_b32_e32 v6, s13 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v5, vcc -; GFX9-NEXT: v_mov_b32_e32 v5, s12 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GFX9-NEXT: s_xor_b64 s[0:1], s[8:9], s[4:5] -; GFX9-NEXT: v_xor_b32_e32 v3, s0, v3 -; GFX9-NEXT: v_xor_b32_e32 v5, s1, v2 -; GFX9-NEXT: v_mov_b32_e32 v6, s1 -; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v3 -; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v6, vcc -; GFX9-NEXT: global_store_dwordx4 v4, v[0:3], s[2:3] +; GFX9-NEXT: s_add_u32 s20, s16, 2 +; GFX9-NEXT: s_addc_u32 s21, s17, 0 +; GFX9-NEXT: s_cmp_lg_u32 s12, 0 +; GFX9-NEXT: s_cselect_b32 s12, s20, s13 +; GFX9-NEXT: s_cselect_b32 s13, s21, s19 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s9, s9, s18 +; GFX9-NEXT: s_cmp_ge_u32 s9, s7 +; GFX9-NEXT: s_cselect_b32 s10, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s8, s6 +; GFX9-NEXT: s_cselect_b32 s6, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s9, s7 +; GFX9-NEXT: s_cselect_b32 s6, s6, s10 +; GFX9-NEXT: s_cmp_lg_u32 s6, 0 +; GFX9-NEXT: s_cselect_b32 s7, s13, s17 +; GFX9-NEXT: s_cselect_b32 s6, s12, s16 +; GFX9-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3] +; GFX9-NEXT: s_xor_b64 s[4:5], s[6:7], s[2:3] +; GFX9-NEXT: s_sub_u32 s2, s4, s2 +; GFX9-NEXT: s_subb_u32 s3, s5, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s14 +; GFX9-NEXT: v_mov_b32_e32 v2, s15 +; GFX9-NEXT: v_mov_b32_e32 v3, s2 +; GFX9-NEXT: v_mov_b32_e32 v4, s3 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: global_store_dwordx4 v0, v[1:4], s[0:1] ; GFX9-NEXT: s_endpgm %shl.y = shl <2 x i64> , %y %r = sdiv <2 x i64> %x, %shl.y @@ -8983,8 +9048,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX6-LABEL: srem_i64_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 +; GFX6-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) ; GFX6-NEXT: s_lshl_b64 s[0:1], 0x1000, s0 ; GFX6-NEXT: s_ashr_i32 s2, s1, 31 @@ -8994,130 +9058,167 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX6-NEXT: s_xor_b64 s[8:9], s[0:1], s[2:3] ; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8 ; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s9 -; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX6-NEXT: s_sub_u32 s4, 0, s8 -; GFX6-NEXT: s_subb_u32 s5, 0, s9 +; GFX6-NEXT: s_sub_u32 s10, 0, s8 +; GFX6-NEXT: s_subb_u32 s11, 0, s9 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX6-NEXT: v_rcp_f32_e32 v0, v0 -; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_ashr_i32 s10, s3, 31 -; GFX6-NEXT: s_add_u32 s2, s2, s10 -; GFX6-NEXT: s_mov_b32 s11, s10 +; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GFX6-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GFX6-NEXT: v_trunc_f32_e32 v1, v1 ; GFX6-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: s_addc_u32 s3, s3, s10 -; GFX6-NEXT: s_xor_b64 s[12:13], s[2:3], s[10:11] -; GFX6-NEXT: v_mul_lo_u32 v2, s4, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s4, v0 -; GFX6-NEXT: v_mul_lo_u32 v5, s5, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s4, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v3, v0, v4 -; GFX6-NEXT: v_mul_lo_u32 v5, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v2 -; GFX6-NEXT: v_mul_lo_u32 v6, v1, v4 -; GFX6-NEXT: v_mul_hi_u32 v4, v1, v4 -; GFX6-NEXT: v_mul_hi_u32 v8, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v6 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v5, v4, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s4, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s4, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s5, v0 -; GFX6-NEXT: s_mov_b32 s5, s1 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s4, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_mul_lo_u32 v6, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v3 -; GFX6-NEXT: v_mul_hi_u32 v8, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v5, v1, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, v1, v3 -; GFX6-NEXT: v_mul_hi_u32 v4, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s12, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s12, v0 -; GFX6-NEXT: v_mul_hi_u32 v4, s12, v1 -; GFX6-NEXT: v_mul_hi_u32 v5, s13, v1 -; GFX6-NEXT: v_mul_lo_u32 v1, s13, v1 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s13, v0 -; GFX6-NEXT: v_mul_hi_u32 v0, s13, v0 -; GFX6-NEXT: s_mov_b32 s4, s0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GFX6-NEXT: v_mul_lo_u32 v1, s8, v1 -; GFX6-NEXT: v_mul_hi_u32 v2, s8, v0 -; GFX6-NEXT: v_mul_lo_u32 v3, s9, v0 -; GFX6-NEXT: v_mul_lo_u32 v0, s8, v0 -; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v1, vcc, v3, v1 -; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s13, v1 -; GFX6-NEXT: v_mov_b32_e32 v3, s9 -; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s12, v0 -; GFX6-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GFX6-NEXT: v_subrev_i32_e64 v4, s[0:1], s8, v0 -; GFX6-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[2:3], s9, v5 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GFX6-NEXT: v_cmp_le_u32_e64 s[2:3], s8, v4 -; GFX6-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GFX6-NEXT: v_cmp_eq_u32_e64 s[2:3], s9, v5 -; GFX6-NEXT: v_subrev_i32_e64 v3, s[0:1], s8, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GFX6-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GFX6-NEXT: v_mov_b32_e32 v4, s13 -; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s9, v1 -; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s8, v0 -; GFX6-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s9, v1 -; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX6-NEXT: v_xor_b32_e32 v0, s10, v0 -; GFX6-NEXT: v_xor_b32_e32 v1, s10, v1 -; GFX6-NEXT: v_mov_b32_e32 v2, s10 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s10, v0 -; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX6-NEXT: v_mul_hi_u32 v2, s10, v0 +; GFX6-NEXT: v_readfirstlane_b32 s12, v1 +; GFX6-NEXT: v_readfirstlane_b32 s0, v0 +; GFX6-NEXT: s_mul_i32 s1, s10, s12 +; GFX6-NEXT: v_readfirstlane_b32 s15, v2 +; GFX6-NEXT: s_mul_i32 s13, s11, s0 +; GFX6-NEXT: s_mul_i32 s14, s10, s0 +; GFX6-NEXT: s_add_i32 s1, s15, s1 +; GFX6-NEXT: v_mul_hi_u32 v3, v0, s14 +; GFX6-NEXT: s_add_i32 s1, s1, s13 +; GFX6-NEXT: v_mul_hi_u32 v0, v0, s1 +; GFX6-NEXT: v_mul_hi_u32 v4, v1, s14 +; GFX6-NEXT: v_readfirstlane_b32 s13, v3 +; GFX6-NEXT: s_mul_i32 s15, s0, s1 +; GFX6-NEXT: v_mul_hi_u32 v1, v1, s1 +; GFX6-NEXT: s_add_u32 s13, s13, s15 +; GFX6-NEXT: v_readfirstlane_b32 s15, v0 +; GFX6-NEXT: s_addc_u32 s15, 0, s15 +; GFX6-NEXT: s_mul_i32 s14, s12, s14 +; GFX6-NEXT: v_readfirstlane_b32 s16, v4 +; GFX6-NEXT: s_add_u32 s13, s13, s14 +; GFX6-NEXT: s_addc_u32 s13, s15, s16 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: s_addc_u32 s14, s14, 0 +; GFX6-NEXT: s_mul_i32 s1, s12, s1 +; GFX6-NEXT: s_add_u32 s1, s13, s1 +; GFX6-NEXT: s_addc_u32 s13, 0, s14 +; GFX6-NEXT: s_add_u32 s14, s0, s1 +; GFX6-NEXT: v_mov_b32_e32 v0, s14 +; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s10, v0 +; GFX6-NEXT: s_or_b32 s0, s0, s1 +; GFX6-NEXT: s_cmp_lg_u32 s0, 0 +; GFX6-NEXT: s_addc_u32 s12, s12, s13 +; GFX6-NEXT: s_mul_i32 s0, s10, s12 +; GFX6-NEXT: v_readfirstlane_b32 s1, v0 +; GFX6-NEXT: s_add_i32 s0, s1, s0 +; GFX6-NEXT: s_mul_i32 s11, s11, s14 +; GFX6-NEXT: s_mul_i32 s1, s10, s14 +; GFX6-NEXT: s_add_i32 s0, s0, s11 +; GFX6-NEXT: v_mov_b32_e32 v2, s1 +; GFX6-NEXT: v_mov_b32_e32 v0, s0 +; GFX6-NEXT: v_mul_hi_u32 v3, s12, v2 +; GFX6-NEXT: v_mul_hi_u32 v2, s14, v2 +; GFX6-NEXT: v_mul_hi_u32 v1, s12, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, s14, v0 +; GFX6-NEXT: s_mul_i32 s11, s14, s0 +; GFX6-NEXT: v_readfirstlane_b32 s15, v2 +; GFX6-NEXT: s_add_u32 s11, s15, s11 +; GFX6-NEXT: v_readfirstlane_b32 s13, v0 +; GFX6-NEXT: s_mul_i32 s1, s12, s1 +; GFX6-NEXT: s_addc_u32 s13, 0, s13 +; GFX6-NEXT: v_readfirstlane_b32 s10, v3 +; GFX6-NEXT: s_add_u32 s1, s11, s1 +; GFX6-NEXT: s_addc_u32 s1, s13, s10 +; GFX6-NEXT: v_readfirstlane_b32 s10, v1 +; GFX6-NEXT: s_addc_u32 s10, s10, 0 +; GFX6-NEXT: s_mul_i32 s0, s12, s0 +; GFX6-NEXT: s_add_u32 s0, s1, s0 +; GFX6-NEXT: s_addc_u32 s10, 0, s10 +; GFX6-NEXT: s_add_u32 s13, s14, s0 +; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX6-NEXT: s_or_b32 s0, s0, s1 +; GFX6-NEXT: s_cmp_lg_u32 s0, 0 +; GFX6-NEXT: s_addc_u32 s12, s12, s10 +; GFX6-NEXT: s_ashr_i32 s10, s7, 31 +; GFX6-NEXT: s_add_u32 s0, s6, s10 +; GFX6-NEXT: s_mov_b32 s11, s10 +; GFX6-NEXT: s_addc_u32 s1, s7, s10 +; GFX6-NEXT: s_xor_b64 s[6:7], s[0:1], s[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mul_hi_u32 v1, s6, v0 +; GFX6-NEXT: v_mov_b32_e32 v2, s13 +; GFX6-NEXT: v_mul_hi_u32 v3, s6, v2 +; GFX6-NEXT: s_mov_b32 s0, s4 +; GFX6-NEXT: v_readfirstlane_b32 s4, v1 +; GFX6-NEXT: v_mul_hi_u32 v1, s7, v2 +; GFX6-NEXT: s_mul_i32 s1, s6, s12 +; GFX6-NEXT: v_readfirstlane_b32 s14, v3 +; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0 +; GFX6-NEXT: s_add_u32 s1, s14, s1 +; GFX6-NEXT: s_addc_u32 s4, 0, s4 +; GFX6-NEXT: s_mul_i32 s13, s7, s13 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: s_add_u32 s1, s1, s13 +; GFX6-NEXT: s_addc_u32 s1, s4, s14 +; GFX6-NEXT: v_readfirstlane_b32 s4, v0 +; GFX6-NEXT: s_addc_u32 s4, s4, 0 +; GFX6-NEXT: s_mul_i32 s12, s7, s12 +; GFX6-NEXT: s_add_u32 s12, s1, s12 +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0 +; GFX6-NEXT: s_addc_u32 s4, 0, s4 +; GFX6-NEXT: s_mov_b32 s1, s5 +; GFX6-NEXT: s_mul_i32 s4, s8, s4 +; GFX6-NEXT: v_readfirstlane_b32 s5, v0 +; GFX6-NEXT: s_add_i32 s4, s5, s4 +; GFX6-NEXT: s_mul_i32 s5, s9, s12 +; GFX6-NEXT: s_add_i32 s13, s4, s5 +; GFX6-NEXT: s_sub_i32 s14, s7, s13 +; GFX6-NEXT: s_mul_i32 s4, s8, s12 +; GFX6-NEXT: s_sub_u32 s6, s6, s4 +; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX6-NEXT: s_or_b32 s12, s4, s5 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_subb_u32 s14, s14, s9 +; GFX6-NEXT: s_sub_u32 s15, s6, s8 +; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX6-NEXT: s_or_b32 s4, s4, s5 +; GFX6-NEXT: s_cmp_lg_u32 s4, 0 +; GFX6-NEXT: s_subb_u32 s16, s14, 0 +; GFX6-NEXT: s_cmp_ge_u32 s16, s9 +; GFX6-NEXT: s_cselect_b32 s5, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s15, s8 +; GFX6-NEXT: s_cselect_b32 s17, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s16, s9 +; GFX6-NEXT: s_cselect_b32 s17, s17, s5 +; GFX6-NEXT: s_cmp_lg_u32 s4, 0 +; GFX6-NEXT: s_subb_u32 s14, s14, s9 +; GFX6-NEXT: s_sub_u32 s18, s15, s8 +; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX6-NEXT: s_or_b32 s4, s4, s5 +; GFX6-NEXT: s_cmp_lg_u32 s4, 0 +; GFX6-NEXT: s_subb_u32 s4, s14, 0 +; GFX6-NEXT: s_cmp_lg_u32 s17, 0 +; GFX6-NEXT: s_cselect_b32 s14, s18, s15 +; GFX6-NEXT: s_cselect_b32 s4, s4, s16 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_subb_u32 s5, s7, s13 +; GFX6-NEXT: s_cmp_ge_u32 s5, s9 +; GFX6-NEXT: s_cselect_b32 s7, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s6, s8 +; GFX6-NEXT: s_cselect_b32 s8, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s5, s9 +; GFX6-NEXT: s_cselect_b32 s7, s8, s7 +; GFX6-NEXT: s_cmp_lg_u32 s7, 0 +; GFX6-NEXT: s_cselect_b32 s5, s4, s5 +; GFX6-NEXT: s_cselect_b32 s4, s14, s6 +; GFX6-NEXT: s_xor_b64 s[4:5], s[4:5], s[10:11] +; GFX6-NEXT: s_sub_u32 s4, s4, s10 +; GFX6-NEXT: s_subb_u32 s5, s5, s10 +; GFX6-NEXT: v_mov_b32_e32 v0, s4 +; GFX6-NEXT: v_mov_b32_e32 v1, s5 +; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: srem_i64_pow2_shl_denom: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34 -; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: s_lshl_b64 s[0:1], 0x1000, s0 ; GFX9-NEXT: s_ashr_i32 s2, s1, 31 @@ -9127,8 +9228,9 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX9-NEXT: s_xor_b64 s[6:7], s[0:1], s[2:3] ; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6 ; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7 -; GFX9-NEXT: s_sub_u32 s0, 0, s6 -; GFX9-NEXT: s_subb_u32 s1, 0, s7 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-NEXT: s_sub_u32 s8, 0, s6 +; GFX9-NEXT: s_subb_u32 s9, 0, s7 ; GFX9-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX9-NEXT: v_rcp_f32_e32 v1, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 @@ -9138,127 +9240,123 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x ; GFX9-NEXT: v_madmk_f32 v1, v2, 0xcf800000, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX9-NEXT: v_readfirstlane_b32 s2, v2 -; GFX9-NEXT: v_readfirstlane_b32 s3, v1 -; GFX9-NEXT: s_mul_i32 s4, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s12, s0, s3 -; GFX9-NEXT: s_mul_i32 s5, s1, s3 -; GFX9-NEXT: s_add_i32 s4, s12, s4 -; GFX9-NEXT: s_mul_i32 s13, s0, s3 -; GFX9-NEXT: s_add_i32 s4, s4, s5 -; GFX9-NEXT: s_mul_hi_u32 s12, s3, s13 -; GFX9-NEXT: s_mul_hi_u32 s5, s3, s4 -; GFX9-NEXT: s_mul_i32 s3, s3, s4 -; GFX9-NEXT: s_add_u32 s3, s12, s3 -; GFX9-NEXT: s_addc_u32 s5, 0, s5 -; GFX9-NEXT: s_mul_hi_u32 s14, s2, s13 -; GFX9-NEXT: s_mul_i32 s13, s2, s13 -; GFX9-NEXT: s_add_u32 s3, s3, s13 -; GFX9-NEXT: s_mul_hi_u32 s12, s2, s4 -; GFX9-NEXT: s_addc_u32 s3, s5, s14 -; GFX9-NEXT: s_addc_u32 s5, s12, 0 -; GFX9-NEXT: s_mul_i32 s4, s2, s4 -; GFX9-NEXT: s_add_u32 s3, s3, s4 -; GFX9-NEXT: s_addc_u32 s4, 0, s5 -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s3, v1 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s2, s2, s4 +; GFX9-NEXT: v_readfirstlane_b32 s10, v2 ; GFX9-NEXT: v_readfirstlane_b32 s4, v1 -; GFX9-NEXT: s_mul_i32 s3, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s5, s0, s4 -; GFX9-NEXT: s_add_i32 s3, s5, s3 -; GFX9-NEXT: s_mul_i32 s1, s1, s4 -; GFX9-NEXT: s_add_i32 s3, s3, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s4 -; GFX9-NEXT: s_mul_hi_u32 s5, s2, s0 -; GFX9-NEXT: s_mul_i32 s12, s2, s0 -; GFX9-NEXT: s_mul_i32 s14, s4, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s4, s0 -; GFX9-NEXT: s_mul_hi_u32 s13, s4, s3 -; GFX9-NEXT: s_add_u32 s0, s0, s14 -; GFX9-NEXT: s_addc_u32 s4, 0, s13 -; GFX9-NEXT: s_add_u32 s0, s0, s12 -; GFX9-NEXT: s_mul_hi_u32 s1, s2, s3 -; GFX9-NEXT: s_addc_u32 s0, s4, s5 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s3, s2, s3 -; GFX9-NEXT: s_add_u32 s0, s0, s3 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s2, s2, s1 -; GFX9-NEXT: s_ashr_i32 s4, s11, 31 -; GFX9-NEXT: s_add_u32 s0, s10, s4 +; GFX9-NEXT: s_mul_i32 s5, s8, s10 +; GFX9-NEXT: s_mul_hi_u32 s12, s8, s4 +; GFX9-NEXT: s_mul_i32 s11, s9, s4 +; GFX9-NEXT: s_add_i32 s5, s12, s5 +; GFX9-NEXT: s_mul_i32 s13, s8, s4 +; GFX9-NEXT: s_add_i32 s5, s5, s11 +; GFX9-NEXT: s_mul_hi_u32 s12, s4, s13 +; GFX9-NEXT: s_mul_i32 s14, s4, s5 +; GFX9-NEXT: s_mul_hi_u32 s11, s4, s5 +; GFX9-NEXT: s_add_u32 s12, s12, s14 +; GFX9-NEXT: s_addc_u32 s11, 0, s11 +; GFX9-NEXT: s_mul_hi_u32 s15, s10, s13 +; GFX9-NEXT: s_mul_i32 s13, s10, s13 +; GFX9-NEXT: s_add_u32 s12, s12, s13 +; GFX9-NEXT: s_mul_hi_u32 s14, s10, s5 +; GFX9-NEXT: s_addc_u32 s11, s11, s15 +; GFX9-NEXT: s_addc_u32 s12, s14, 0 +; GFX9-NEXT: s_mul_i32 s5, s10, s5 +; GFX9-NEXT: s_add_u32 s5, s11, s5 +; GFX9-NEXT: s_addc_u32 s11, 0, s12 +; GFX9-NEXT: s_add_u32 s12, s4, s5 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s10, s10, s11 +; GFX9-NEXT: s_mul_i32 s4, s8, s10 +; GFX9-NEXT: s_mul_hi_u32 s5, s8, s12 +; GFX9-NEXT: s_add_i32 s4, s5, s4 +; GFX9-NEXT: s_mul_i32 s9, s9, s12 +; GFX9-NEXT: s_add_i32 s4, s4, s9 +; GFX9-NEXT: s_mul_i32 s8, s8, s12 +; GFX9-NEXT: s_mul_hi_u32 s9, s10, s8 +; GFX9-NEXT: s_mul_i32 s11, s10, s8 +; GFX9-NEXT: s_mul_i32 s14, s12, s4 +; GFX9-NEXT: s_mul_hi_u32 s8, s12, s8 +; GFX9-NEXT: s_mul_hi_u32 s13, s12, s4 +; GFX9-NEXT: s_add_u32 s8, s8, s14 +; GFX9-NEXT: s_addc_u32 s13, 0, s13 +; GFX9-NEXT: s_add_u32 s8, s8, s11 +; GFX9-NEXT: s_mul_hi_u32 s5, s10, s4 +; GFX9-NEXT: s_addc_u32 s8, s13, s9 +; GFX9-NEXT: s_addc_u32 s5, s5, 0 +; GFX9-NEXT: s_mul_i32 s4, s10, s4 +; GFX9-NEXT: s_add_u32 s4, s8, s4 +; GFX9-NEXT: s_addc_u32 s8, 0, s5 +; GFX9-NEXT: s_add_u32 s9, s12, s4 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s8, s10, s8 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_ashr_i32 s4, s3, 31 +; GFX9-NEXT: s_add_u32 s2, s2, s4 ; GFX9-NEXT: s_mov_b32 s5, s4 -; GFX9-NEXT: s_addc_u32 s1, s11, s4 -; GFX9-NEXT: s_xor_b64 s[10:11], s[0:1], s[4:5] -; GFX9-NEXT: v_readfirstlane_b32 s3, v1 -; GFX9-NEXT: s_mul_i32 s1, s10, s2 -; GFX9-NEXT: s_mul_hi_u32 s5, s10, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s10, s2 -; GFX9-NEXT: s_add_u32 s1, s5, s1 -; GFX9-NEXT: s_addc_u32 s0, 0, s0 -; GFX9-NEXT: s_mul_hi_u32 s12, s11, s3 -; GFX9-NEXT: s_mul_i32 s3, s11, s3 -; GFX9-NEXT: s_add_u32 s1, s1, s3 -; GFX9-NEXT: s_mul_hi_u32 s5, s11, s2 -; GFX9-NEXT: s_addc_u32 s0, s0, s12 -; GFX9-NEXT: s_addc_u32 s1, s5, 0 -; GFX9-NEXT: s_mul_i32 s2, s11, s2 -; GFX9-NEXT: s_add_u32 s0, s0, s2 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: s_mul_i32 s1, s6, s1 -; GFX9-NEXT: s_mul_hi_u32 s2, s6, s0 -; GFX9-NEXT: s_add_i32 s1, s2, s1 -; GFX9-NEXT: s_mul_i32 s2, s7, s0 -; GFX9-NEXT: s_mul_i32 s0, s6, s0 -; GFX9-NEXT: s_add_i32 s5, s1, s2 -; GFX9-NEXT: v_mov_b32_e32 v1, s0 -; GFX9-NEXT: s_sub_i32 s1, s11, s5 -; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s10, v1 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s10, s1, s7 -; GFX9-NEXT: v_subrev_co_u32_e64 v2, s[0:1], s6, v1 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s12, s10, 0 -; GFX9-NEXT: s_cmp_ge_u32 s12, s7 -; GFX9-NEXT: s_cselect_b32 s13, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[2:3], s6, v2 -; GFX9-NEXT: s_cmp_eq_u32 s12, s7 -; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[2:3] -; GFX9-NEXT: v_mov_b32_e32 v4, s13 -; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[2:3] -; GFX9-NEXT: s_subb_u32 s2, s10, s7 -; GFX9-NEXT: v_subrev_co_u32_e64 v4, s[0:1], s6, v2 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s2, s2, 0 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v3 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v4, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v3, s12 -; GFX9-NEXT: v_mov_b32_e32 v4, s2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s11, s5 -; GFX9-NEXT: s_cmp_ge_u32 s0, s7 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s6, v1 -; GFX9-NEXT: s_cmp_eq_u32 s0, s7 -; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v5, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GFX9-NEXT: v_mov_b32_e32 v5, s0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GFX9-NEXT: v_xor_b32_e32 v1, s4, v1 -; GFX9-NEXT: v_xor_b32_e32 v2, s4, v3 -; GFX9-NEXT: v_mov_b32_e32 v3, s4 -; GFX9-NEXT: v_subrev_co_u32_e32 v1, vcc, s4, v1 -; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v3, vcc -; GFX9-NEXT: global_store_dwordx2 v0, v[1:2], s[8:9] +; GFX9-NEXT: s_addc_u32 s3, s3, s4 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5] +; GFX9-NEXT: s_mul_i32 s11, s2, s8 +; GFX9-NEXT: s_mul_hi_u32 s12, s2, s9 +; GFX9-NEXT: s_mul_hi_u32 s10, s2, s8 +; GFX9-NEXT: s_add_u32 s11, s12, s11 +; GFX9-NEXT: s_addc_u32 s10, 0, s10 +; GFX9-NEXT: s_mul_hi_u32 s13, s3, s9 +; GFX9-NEXT: s_mul_i32 s9, s3, s9 +; GFX9-NEXT: s_add_u32 s9, s11, s9 +; GFX9-NEXT: s_mul_hi_u32 s12, s3, s8 +; GFX9-NEXT: s_addc_u32 s9, s10, s13 +; GFX9-NEXT: s_addc_u32 s10, s12, 0 +; GFX9-NEXT: s_mul_i32 s8, s3, s8 +; GFX9-NEXT: s_add_u32 s8, s9, s8 +; GFX9-NEXT: s_addc_u32 s9, 0, s10 +; GFX9-NEXT: s_mul_i32 s9, s6, s9 +; GFX9-NEXT: s_mul_hi_u32 s10, s6, s8 +; GFX9-NEXT: s_add_i32 s9, s10, s9 +; GFX9-NEXT: s_mul_i32 s10, s7, s8 +; GFX9-NEXT: s_add_i32 s12, s9, s10 +; GFX9-NEXT: s_sub_i32 s10, s3, s12 +; GFX9-NEXT: s_mul_i32 s8, s6, s8 +; GFX9-NEXT: s_sub_u32 s2, s2, s8 +; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_subb_u32 s13, s10, s7 +; GFX9-NEXT: s_sub_u32 s14, s2, s6 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s15, s13, 0 +; GFX9-NEXT: s_cmp_ge_u32 s15, s7 +; GFX9-NEXT: s_cselect_b32 s16, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s14, s6 +; GFX9-NEXT: s_cselect_b32 s17, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s15, s7 +; GFX9-NEXT: s_cselect_b32 s16, s17, s16 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s13, s13, s7 +; GFX9-NEXT: s_sub_u32 s17, s14, s6 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s10, s13, 0 +; GFX9-NEXT: s_cmp_lg_u32 s16, 0 +; GFX9-NEXT: s_cselect_b32 s11, s17, s14 +; GFX9-NEXT: s_cselect_b32 s10, s10, s15 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_subb_u32 s3, s3, s12 +; GFX9-NEXT: s_cmp_ge_u32 s3, s7 +; GFX9-NEXT: s_cselect_b32 s8, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s2, s6 +; GFX9-NEXT: s_cselect_b32 s6, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s3, s7 +; GFX9-NEXT: s_cselect_b32 s6, s6, s8 +; GFX9-NEXT: s_cmp_lg_u32 s6, 0 +; GFX9-NEXT: s_cselect_b32 s3, s10, s3 +; GFX9-NEXT: s_cselect_b32 s2, s11, s2 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5] +; GFX9-NEXT: s_sub_u32 s2, s2, s4 +; GFX9-NEXT: s_subb_u32 s3, s3, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_mov_b32_e32 v2, s3 +; GFX9-NEXT: global_store_dwordx2 v0, v[1:2], s[0:1] ; GFX9-NEXT: s_endpgm %shl.y = shl i64 4096, %y %r = srem i64 %x, %shl.y @@ -9353,272 +9451,347 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x ; GFX6-LABEL: srem_v2i64_pow2_shl_denom: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0xd -; GFX6-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9 -; GFX6-NEXT: s_mov_b32 s7, 0xf000 -; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_lshl_b64 s[0:1], 0x1000, s12 -; GFX6-NEXT: s_lshl_b64 s[16:17], 0x1000, s14 +; GFX6-NEXT: s_lshl_b64 s[2:3], 0x1000, s12 +; GFX6-NEXT: s_lshl_b64 s[0:1], 0x1000, s14 +; GFX6-NEXT: s_ashr_i32 s6, s3, 31 +; GFX6-NEXT: s_add_u32 s2, s2, s6 +; GFX6-NEXT: s_mov_b32 s7, s6 +; GFX6-NEXT: s_addc_u32 s3, s3, s6 +; GFX6-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7] +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s3 +; GFX6-NEXT: s_sub_u32 s12, 0, s2 +; GFX6-NEXT: s_subb_u32 s13, 0, s3 +; GFX6-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 +; GFX6-NEXT: v_rcp_f32_e32 v0, v0 +; GFX6-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GFX6-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GFX6-NEXT: v_trunc_f32_e32 v1, v1 +; GFX6-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX6-NEXT: v_mul_hi_u32 v2, s12, v0 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: v_readfirstlane_b32 s6, v0 +; GFX6-NEXT: s_mul_i32 s7, s12, s14 +; GFX6-NEXT: v_readfirstlane_b32 s17, v2 +; GFX6-NEXT: s_mul_i32 s15, s13, s6 +; GFX6-NEXT: s_mul_i32 s16, s12, s6 +; GFX6-NEXT: s_add_i32 s7, s17, s7 +; GFX6-NEXT: v_mul_hi_u32 v3, v0, s16 +; GFX6-NEXT: s_add_i32 s7, s7, s15 +; GFX6-NEXT: v_mul_hi_u32 v0, v0, s7 +; GFX6-NEXT: v_mul_hi_u32 v4, v1, s16 +; GFX6-NEXT: v_readfirstlane_b32 s15, v3 +; GFX6-NEXT: s_mul_i32 s18, s6, s7 +; GFX6-NEXT: v_mul_hi_u32 v1, v1, s7 +; GFX6-NEXT: s_add_u32 s15, s15, s18 +; GFX6-NEXT: v_readfirstlane_b32 s18, v0 +; GFX6-NEXT: s_mul_i32 s16, s14, s16 +; GFX6-NEXT: s_addc_u32 s18, 0, s18 +; GFX6-NEXT: v_readfirstlane_b32 s17, v4 +; GFX6-NEXT: s_add_u32 s15, s15, s16 +; GFX6-NEXT: s_addc_u32 s15, s18, s17 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: s_addc_u32 s16, s16, 0 +; GFX6-NEXT: s_mul_i32 s7, s14, s7 +; GFX6-NEXT: s_add_u32 s7, s15, s7 +; GFX6-NEXT: s_addc_u32 s15, 0, s16 +; GFX6-NEXT: s_add_u32 s16, s6, s7 +; GFX6-NEXT: v_mov_b32_e32 v0, s16 +; GFX6-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s12, v0 +; GFX6-NEXT: s_or_b32 s6, s6, s7 +; GFX6-NEXT: s_cmp_lg_u32 s6, 0 +; GFX6-NEXT: s_addc_u32 s14, s14, s15 +; GFX6-NEXT: s_mul_i32 s6, s12, s14 +; GFX6-NEXT: v_readfirstlane_b32 s7, v0 +; GFX6-NEXT: s_add_i32 s6, s7, s6 +; GFX6-NEXT: s_mul_i32 s13, s13, s16 +; GFX6-NEXT: s_mul_i32 s7, s12, s16 +; GFX6-NEXT: s_add_i32 s6, s6, s13 +; GFX6-NEXT: v_mov_b32_e32 v2, s7 +; GFX6-NEXT: v_mov_b32_e32 v0, s6 +; GFX6-NEXT: v_mul_hi_u32 v3, s14, v2 +; GFX6-NEXT: v_mul_hi_u32 v2, s16, v2 +; GFX6-NEXT: v_mul_hi_u32 v1, s14, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, s16, v0 +; GFX6-NEXT: s_mul_i32 s13, s16, s6 +; GFX6-NEXT: v_readfirstlane_b32 s17, v2 +; GFX6-NEXT: s_add_u32 s13, s17, s13 +; GFX6-NEXT: v_readfirstlane_b32 s15, v0 +; GFX6-NEXT: s_mul_i32 s7, s14, s7 +; GFX6-NEXT: s_addc_u32 s15, 0, s15 +; GFX6-NEXT: v_readfirstlane_b32 s12, v3 +; GFX6-NEXT: s_add_u32 s7, s13, s7 +; GFX6-NEXT: s_addc_u32 s7, s15, s12 +; GFX6-NEXT: v_readfirstlane_b32 s12, v1 +; GFX6-NEXT: s_addc_u32 s12, s12, 0 +; GFX6-NEXT: s_mul_i32 s6, s14, s6 +; GFX6-NEXT: s_add_u32 s6, s7, s6 +; GFX6-NEXT: s_addc_u32 s12, 0, s12 +; GFX6-NEXT: s_add_u32 s13, s16, s6 +; GFX6-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX6-NEXT: s_or_b32 s6, s6, s7 +; GFX6-NEXT: s_cmp_lg_u32 s6, 0 +; GFX6-NEXT: s_addc_u32 s12, s14, s12 +; GFX6-NEXT: s_ashr_i32 s6, s9, 31 +; GFX6-NEXT: s_add_u32 s8, s8, s6 +; GFX6-NEXT: s_mov_b32 s7, s6 +; GFX6-NEXT: s_addc_u32 s9, s9, s6 +; GFX6-NEXT: s_xor_b64 s[8:9], s[8:9], s[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mul_hi_u32 v1, s8, v0 +; GFX6-NEXT: v_mov_b32_e32 v2, s13 +; GFX6-NEXT: v_mul_hi_u32 v3, s8, v2 +; GFX6-NEXT: s_mul_i32 s14, s8, s12 +; GFX6-NEXT: v_readfirstlane_b32 s15, v1 +; GFX6-NEXT: v_mul_hi_u32 v1, s9, v2 +; GFX6-NEXT: v_readfirstlane_b32 s16, v3 +; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0 +; GFX6-NEXT: s_add_u32 s14, s16, s14 +; GFX6-NEXT: s_addc_u32 s15, 0, s15 +; GFX6-NEXT: s_mul_i32 s13, s9, s13 +; GFX6-NEXT: v_readfirstlane_b32 s16, v1 +; GFX6-NEXT: s_add_u32 s13, s14, s13 +; GFX6-NEXT: s_addc_u32 s13, s15, s16 +; GFX6-NEXT: v_readfirstlane_b32 s14, v0 +; GFX6-NEXT: s_addc_u32 s14, s14, 0 +; GFX6-NEXT: s_mul_i32 s12, s9, s12 +; GFX6-NEXT: s_add_u32 s12, s13, s12 +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0 +; GFX6-NEXT: s_addc_u32 s13, 0, s14 +; GFX6-NEXT: s_mul_i32 s13, s2, s13 +; GFX6-NEXT: v_readfirstlane_b32 s14, v0 +; GFX6-NEXT: s_add_i32 s13, s14, s13 +; GFX6-NEXT: s_mul_i32 s14, s3, s12 +; GFX6-NEXT: s_add_i32 s14, s13, s14 +; GFX6-NEXT: s_sub_i32 s15, s9, s14 +; GFX6-NEXT: s_mul_i32 s12, s2, s12 +; GFX6-NEXT: s_sub_u32 s8, s8, s12 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: s_or_b32 s16, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s16, 0 +; GFX6-NEXT: s_subb_u32 s15, s15, s3 +; GFX6-NEXT: s_sub_u32 s17, s8, s2 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: s_or_b32 s12, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_subb_u32 s18, s15, 0 +; GFX6-NEXT: s_cmp_ge_u32 s18, s3 +; GFX6-NEXT: s_cselect_b32 s13, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s17, s2 +; GFX6-NEXT: s_cselect_b32 s19, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s18, s3 +; GFX6-NEXT: s_cselect_b32 s19, s19, s13 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_subb_u32 s15, s15, s3 +; GFX6-NEXT: s_sub_u32 s20, s17, s2 +; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX6-NEXT: s_or_b32 s12, s12, s13 +; GFX6-NEXT: s_cmp_lg_u32 s12, 0 +; GFX6-NEXT: s_subb_u32 s12, s15, 0 +; GFX6-NEXT: s_cmp_lg_u32 s19, 0 +; GFX6-NEXT: s_cselect_b32 s13, s20, s17 +; GFX6-NEXT: s_cselect_b32 s12, s12, s18 +; GFX6-NEXT: s_cmp_lg_u32 s16, 0 +; GFX6-NEXT: s_subb_u32 s9, s9, s14 +; GFX6-NEXT: s_cmp_ge_u32 s9, s3 +; GFX6-NEXT: s_cselect_b32 s14, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s8, s2 +; GFX6-NEXT: s_cselect_b32 s2, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s9, s3 +; GFX6-NEXT: s_cselect_b32 s2, s2, s14 +; GFX6-NEXT: s_cmp_lg_u32 s2, 0 +; GFX6-NEXT: s_cselect_b32 s3, s12, s9 +; GFX6-NEXT: s_cselect_b32 s2, s13, s8 +; GFX6-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7] +; GFX6-NEXT: s_sub_u32 s12, s2, s6 +; GFX6-NEXT: s_subb_u32 s13, s3, s6 ; GFX6-NEXT: s_ashr_i32 s2, s1, 31 ; GFX6-NEXT: s_add_u32 s0, s0, s2 ; GFX6-NEXT: s_mov_b32 s3, s2 ; GFX6-NEXT: s_addc_u32 s1, s1, s2 -; GFX6-NEXT: s_xor_b64 s[14:15], s[0:1], s[2:3] -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s14 -; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s15 -; GFX6-NEXT: s_sub_u32 s0, 0, s14 -; GFX6-NEXT: s_subb_u32 s1, 0, s15 -; GFX6-NEXT: s_ashr_i32 s12, s9, 31 +; GFX6-NEXT: s_xor_b64 s[6:7], s[0:1], s[2:3] +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GFX6-NEXT: s_sub_u32 s8, 0, s6 +; GFX6-NEXT: s_subb_u32 s9, 0, s7 ; GFX6-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 ; GFX6-NEXT: v_rcp_f32_e32 v0, v0 -; GFX6-NEXT: s_mov_b32 s13, s12 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GFX6-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GFX6-NEXT: v_trunc_f32_e32 v1, v1 ; GFX6-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: v_mul_lo_u32 v2, s0, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s0, v0 -; GFX6-NEXT: v_mul_lo_u32 v5, s1, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s0, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v3, v0, v4 -; GFX6-NEXT: v_mul_lo_u32 v5, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v6, v1, v4 -; GFX6-NEXT: v_mul_lo_u32 v4, v1, v4 -; GFX6-NEXT: v_mul_hi_u32 v8, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s0, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s0, v0 -; GFX6-NEXT: v_mul_lo_u32 v4, s1, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s0, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_mul_lo_u32 v6, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, v0, v3 -; GFX6-NEXT: v_mul_hi_u32 v8, v0, v2 -; GFX6-NEXT: v_mul_hi_u32 v5, v1, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, v1, v3 -; GFX6-NEXT: v_mul_hi_u32 v4, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: s_add_u32 s0, s8, s12 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: s_addc_u32 s1, s9, s12 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GFX6-NEXT: s_xor_b64 s[8:9], s[0:1], s[12:13] -; GFX6-NEXT: v_mul_lo_u32 v2, s8, v1 -; GFX6-NEXT: v_mul_hi_u32 v3, s8, v0 -; GFX6-NEXT: v_mul_hi_u32 v4, s8, v1 -; GFX6-NEXT: v_mul_hi_u32 v5, s9, v1 -; GFX6-NEXT: v_mul_lo_u32 v1, s9, v1 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s9, v0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX6-NEXT: v_mul_hi_u32 v2, s8, v0 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: v_readfirstlane_b32 s2, v0 +; GFX6-NEXT: s_mul_i32 s1, s8, s14 +; GFX6-NEXT: v_readfirstlane_b32 s3, v2 +; GFX6-NEXT: s_mul_i32 s0, s9, s2 +; GFX6-NEXT: s_add_i32 s1, s3, s1 +; GFX6-NEXT: s_add_i32 s3, s1, s0 +; GFX6-NEXT: s_mul_i32 s15, s8, s2 +; GFX6-NEXT: v_mul_hi_u32 v2, v0, s3 +; GFX6-NEXT: v_mul_hi_u32 v0, v0, s15 +; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; GFX6-NEXT: s_mul_i32 s4, s2, s3 +; GFX6-NEXT: v_readfirstlane_b32 s5, v2 +; GFX6-NEXT: v_readfirstlane_b32 s16, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, v1, s15 +; GFX6-NEXT: v_mul_hi_u32 v1, v1, s3 +; GFX6-NEXT: s_add_u32 s4, s16, s4 +; GFX6-NEXT: s_addc_u32 s5, 0, s5 +; GFX6-NEXT: s_mul_i32 s15, s14, s15 +; GFX6-NEXT: v_readfirstlane_b32 s16, v0 +; GFX6-NEXT: s_add_u32 s4, s4, s15 +; GFX6-NEXT: s_addc_u32 s4, s5, s16 +; GFX6-NEXT: v_readfirstlane_b32 s5, v1 +; GFX6-NEXT: s_addc_u32 s5, s5, 0 +; GFX6-NEXT: s_mul_i32 s3, s14, s3 +; GFX6-NEXT: s_add_u32 s3, s4, s3 +; GFX6-NEXT: s_addc_u32 s4, 0, s5 +; GFX6-NEXT: s_add_u32 s5, s2, s3 +; GFX6-NEXT: v_mov_b32_e32 v0, s5 +; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0 +; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0 +; GFX6-NEXT: s_or_b32 s2, s2, s3 +; GFX6-NEXT: s_cmp_lg_u32 s2, 0 +; GFX6-NEXT: s_addc_u32 s4, s14, s4 +; GFX6-NEXT: s_mul_i32 s2, s8, s4 +; GFX6-NEXT: v_readfirstlane_b32 s3, v0 +; GFX6-NEXT: s_add_i32 s2, s3, s2 +; GFX6-NEXT: s_mul_i32 s9, s9, s5 +; GFX6-NEXT: s_mul_i32 s3, s8, s5 +; GFX6-NEXT: s_add_i32 s2, s2, s9 +; GFX6-NEXT: v_mov_b32_e32 v2, s3 +; GFX6-NEXT: v_mov_b32_e32 v0, s2 +; GFX6-NEXT: v_mul_hi_u32 v3, s4, v2 +; GFX6-NEXT: v_mul_hi_u32 v2, s5, v2 +; GFX6-NEXT: v_mul_hi_u32 v1, s4, v0 +; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0 +; GFX6-NEXT: s_mul_i32 s9, s5, s2 +; GFX6-NEXT: v_readfirstlane_b32 s15, v2 +; GFX6-NEXT: s_add_u32 s9, s15, s9 +; GFX6-NEXT: v_readfirstlane_b32 s14, v0 +; GFX6-NEXT: s_mul_i32 s3, s4, s3 +; GFX6-NEXT: s_addc_u32 s14, 0, s14 +; GFX6-NEXT: v_readfirstlane_b32 s8, v3 +; GFX6-NEXT: s_add_u32 s3, s9, s3 +; GFX6-NEXT: s_addc_u32 s3, s14, s8 +; GFX6-NEXT: v_readfirstlane_b32 s8, v1 +; GFX6-NEXT: s_addc_u32 s8, s8, 0 +; GFX6-NEXT: s_mul_i32 s2, s4, s2 +; GFX6-NEXT: s_add_u32 s2, s3, s2 +; GFX6-NEXT: s_addc_u32 s8, 0, s8 +; GFX6-NEXT: s_add_u32 s14, s5, s2 +; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0 +; GFX6-NEXT: s_or_b32 s2, s2, s3 +; GFX6-NEXT: s_cmp_lg_u32 s2, 0 +; GFX6-NEXT: s_addc_u32 s15, s4, s8 +; GFX6-NEXT: s_ashr_i32 s4, s11, 31 +; GFX6-NEXT: s_add_u32 s2, s10, s4 +; GFX6-NEXT: s_mov_b32 s5, s4 +; GFX6-NEXT: s_addc_u32 s3, s11, s4 +; GFX6-NEXT: s_xor_b64 s[8:9], s[2:3], s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v0, s15 +; GFX6-NEXT: v_mul_hi_u32 v1, s8, v0 +; GFX6-NEXT: v_mov_b32_e32 v2, s14 +; GFX6-NEXT: v_mul_hi_u32 v3, s8, v2 +; GFX6-NEXT: s_mul_i32 s2, s8, s15 +; GFX6-NEXT: v_readfirstlane_b32 s10, v1 +; GFX6-NEXT: v_mul_hi_u32 v1, s9, v2 +; GFX6-NEXT: v_readfirstlane_b32 s11, v3 ; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GFX6-NEXT: v_mul_lo_u32 v1, s14, v1 -; GFX6-NEXT: v_mul_hi_u32 v2, s14, v0 -; GFX6-NEXT: v_mul_lo_u32 v3, s15, v0 -; GFX6-NEXT: v_mul_lo_u32 v0, s14, v0 -; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GFX6-NEXT: v_add_i32_e32 v1, vcc, v3, v1 -; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s9, v1 -; GFX6-NEXT: v_mov_b32_e32 v3, s15 -; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s8, v0 -; GFX6-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GFX6-NEXT: v_subrev_i32_e64 v4, s[0:1], s14, v0 -; GFX6-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[2:3], s15, v5 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GFX6-NEXT: v_cmp_le_u32_e64 s[2:3], s14, v4 -; GFX6-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GFX6-NEXT: v_cmp_eq_u32_e64 s[2:3], s15, v5 -; GFX6-NEXT: v_subrev_i32_e64 v3, s[0:1], s14, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GFX6-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GFX6-NEXT: s_ashr_i32 s0, s17, 31 -; GFX6-NEXT: s_add_u32 s2, s16, s0 -; GFX6-NEXT: s_mov_b32 s1, s0 -; GFX6-NEXT: s_addc_u32 s3, s17, s0 -; GFX6-NEXT: v_mov_b32_e32 v4, s9 -; GFX6-NEXT: s_xor_b64 s[8:9], s[2:3], s[0:1] -; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc -; GFX6-NEXT: v_cvt_f32_u32_e32 v4, s8 -; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s9 -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s15, v1 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s14, v0 -; GFX6-NEXT: v_mac_f32_e32 v4, 0x4f800000, v5 -; GFX6-NEXT: v_rcp_f32_e32 v4, v4 -; GFX6-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s15, v1 -; GFX6-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc -; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX6-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v4 -; GFX6-NEXT: v_mul_f32_e32 v4, 0x2f800000, v2 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mac_f32_e32 v2, 0xcf800000, v4 -; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2 -; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX6-NEXT: s_sub_u32 s0, 0, s8 -; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GFX6-NEXT: v_mul_hi_u32 v3, s0, v2 -; GFX6-NEXT: v_mul_lo_u32 v5, s0, v4 -; GFX6-NEXT: s_subb_u32 s1, 0, s9 -; GFX6-NEXT: v_mul_lo_u32 v6, s1, v2 -; GFX6-NEXT: s_ashr_i32 s14, s11, 31 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v5, v3 -; GFX6-NEXT: v_mul_lo_u32 v5, s0, v2 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v6 -; GFX6-NEXT: v_mul_lo_u32 v6, v2, v3 -; GFX6-NEXT: v_mul_hi_u32 v7, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v8, v2, v3 -; GFX6-NEXT: v_mul_hi_u32 v9, v4, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, v4, v3 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GFX6-NEXT: v_mul_lo_u32 v8, v4, v5 -; GFX6-NEXT: v_mul_hi_u32 v5, v4, v5 -; GFX6-NEXT: s_mov_b32 s15, s14 -; GFX6-NEXT: v_xor_b32_e32 v0, s12, v0 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v8 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v7, v5, vcc -; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v9, vcc -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v5, v3 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v4, v5, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, s0, v3 -; GFX6-NEXT: v_mul_hi_u32 v5, s0, v2 -; GFX6-NEXT: v_mul_lo_u32 v6, s1, v2 -; GFX6-NEXT: v_xor_b32_e32 v1, s12, v1 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5 -; GFX6-NEXT: v_mul_lo_u32 v5, s0, v2 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6 -; GFX6-NEXT: v_mul_lo_u32 v8, v2, v4 -; GFX6-NEXT: v_mul_hi_u32 v9, v2, v5 -; GFX6-NEXT: v_mul_hi_u32 v10, v2, v4 -; GFX6-NEXT: v_mul_hi_u32 v7, v3, v5 -; GFX6-NEXT: v_mul_lo_u32 v5, v3, v5 -; GFX6-NEXT: v_mul_hi_u32 v6, v3, v4 -; GFX6-NEXT: v_add_i32_e32 v8, vcc, v9, v8 -; GFX6-NEXT: v_addc_u32_e32 v9, vcc, 0, v10, vcc -; GFX6-NEXT: v_mul_lo_u32 v4, v3, v4 -; GFX6-NEXT: v_add_i32_e32 v5, vcc, v8, v5 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v9, v7, vcc -; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v5, v4 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v6, vcc -; GFX6-NEXT: s_add_u32 s0, s10, s14 -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GFX6-NEXT: s_addc_u32 s1, s11, s14 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc -; GFX6-NEXT: s_xor_b64 s[10:11], s[0:1], s[14:15] -; GFX6-NEXT: v_mul_lo_u32 v4, s10, v3 -; GFX6-NEXT: v_mul_hi_u32 v5, s10, v2 -; GFX6-NEXT: v_mul_hi_u32 v7, s10, v3 -; GFX6-NEXT: v_mul_hi_u32 v8, s11, v3 -; GFX6-NEXT: v_mul_lo_u32 v3, s11, v3 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v5, v4 -; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GFX6-NEXT: v_mul_lo_u32 v7, s11, v2 -; GFX6-NEXT: v_mul_hi_u32 v2, s11, v2 -; GFX6-NEXT: v_mov_b32_e32 v6, s12 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v7 -; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v5, v2, vcc -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v3, s8, v3 -; GFX6-NEXT: v_mul_hi_u32 v4, s8, v2 -; GFX6-NEXT: v_mul_lo_u32 v5, s9, v2 -; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s12, v0 -; GFX6-NEXT: v_mul_lo_u32 v2, s8, v2 -; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v6, vcc -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, v5, v3 -; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s11, v3 -; GFX6-NEXT: v_mov_b32_e32 v5, s9 -; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s10, v2 -; GFX6-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v5, vcc -; GFX6-NEXT: v_subrev_i32_e64 v6, s[0:1], s8, v2 -; GFX6-NEXT: v_subbrev_u32_e64 v7, s[2:3], 0, v4, s[0:1] -; GFX6-NEXT: v_cmp_le_u32_e64 s[2:3], s9, v7 -; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[2:3] -; GFX6-NEXT: v_cmp_le_u32_e64 s[2:3], s8, v6 -; GFX6-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v5, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[2:3] -; GFX6-NEXT: v_cmp_eq_u32_e64 s[2:3], s9, v7 -; GFX6-NEXT: v_subrev_i32_e64 v5, s[0:1], s8, v6 -; GFX6-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[2:3] -; GFX6-NEXT: v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1] -; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v8 -; GFX6-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[0:1] -; GFX6-NEXT: v_mov_b32_e32 v6, s11 -; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v6, v3, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s9, v3 -; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX6-NEXT: v_cmp_le_u32_e32 vcc, s8, v2 -; GFX6-NEXT: v_cndmask_b32_e64 v4, v7, v4, s[0:1] -; GFX6-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc -; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, s9, v3 -; GFX6-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 -; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc -; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; GFX6-NEXT: v_xor_b32_e32 v2, s14, v2 -; GFX6-NEXT: v_xor_b32_e32 v3, s14, v3 -; GFX6-NEXT: v_mov_b32_e32 v4, s14 -; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s14, v2 -; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v3, v4, vcc -; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; GFX6-NEXT: s_add_u32 s2, s11, s2 +; GFX6-NEXT: s_addc_u32 s10, 0, s10 +; GFX6-NEXT: s_mul_i32 s11, s9, s14 +; GFX6-NEXT: v_readfirstlane_b32 s14, v1 +; GFX6-NEXT: s_add_u32 s2, s2, s11 +; GFX6-NEXT: s_addc_u32 s2, s10, s14 +; GFX6-NEXT: v_readfirstlane_b32 s10, v0 +; GFX6-NEXT: s_addc_u32 s10, s10, 0 +; GFX6-NEXT: s_mul_i32 s11, s9, s15 +; GFX6-NEXT: s_add_u32 s11, s2, s11 +; GFX6-NEXT: v_mov_b32_e32 v0, s11 +; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0 +; GFX6-NEXT: s_addc_u32 s10, 0, s10 +; GFX6-NEXT: s_mul_i32 s10, s6, s10 +; GFX6-NEXT: s_mov_b32 s3, 0xf000 +; GFX6-NEXT: v_readfirstlane_b32 s14, v0 +; GFX6-NEXT: s_add_i32 s10, s14, s10 +; GFX6-NEXT: s_mul_i32 s14, s7, s11 +; GFX6-NEXT: s_add_i32 s14, s10, s14 +; GFX6-NEXT: s_sub_i32 s15, s9, s14 +; GFX6-NEXT: s_mul_i32 s10, s6, s11 +; GFX6-NEXT: s_sub_u32 s8, s8, s10 +; GFX6-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX6-NEXT: s_or_b32 s16, s10, s11 +; GFX6-NEXT: s_cmp_lg_u32 s16, 0 +; GFX6-NEXT: s_subb_u32 s15, s15, s7 +; GFX6-NEXT: s_sub_u32 s17, s8, s6 +; GFX6-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX6-NEXT: s_or_b32 s10, s10, s11 +; GFX6-NEXT: s_cmp_lg_u32 s10, 0 +; GFX6-NEXT: s_subb_u32 s18, s15, 0 +; GFX6-NEXT: s_cmp_ge_u32 s18, s7 +; GFX6-NEXT: s_cselect_b32 s11, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s17, s6 +; GFX6-NEXT: s_cselect_b32 s19, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s18, s7 +; GFX6-NEXT: s_cselect_b32 s19, s19, s11 +; GFX6-NEXT: s_cmp_lg_u32 s10, 0 +; GFX6-NEXT: s_subb_u32 s15, s15, s7 +; GFX6-NEXT: s_sub_u32 s20, s17, s6 +; GFX6-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX6-NEXT: s_or_b32 s10, s10, s11 +; GFX6-NEXT: s_cmp_lg_u32 s10, 0 +; GFX6-NEXT: s_subb_u32 s10, s15, 0 +; GFX6-NEXT: s_cmp_lg_u32 s19, 0 +; GFX6-NEXT: s_cselect_b32 s11, s20, s17 +; GFX6-NEXT: s_cselect_b32 s10, s10, s18 +; GFX6-NEXT: s_cmp_lg_u32 s16, 0 +; GFX6-NEXT: s_subb_u32 s9, s9, s14 +; GFX6-NEXT: s_cmp_ge_u32 s9, s7 +; GFX6-NEXT: s_cselect_b32 s14, -1, 0 +; GFX6-NEXT: s_cmp_ge_u32 s8, s6 +; GFX6-NEXT: s_cselect_b32 s6, -1, 0 +; GFX6-NEXT: s_cmp_eq_u32 s9, s7 +; GFX6-NEXT: s_cselect_b32 s6, s6, s14 +; GFX6-NEXT: s_cmp_lg_u32 s6, 0 +; GFX6-NEXT: s_cselect_b32 s7, s10, s9 +; GFX6-NEXT: s_cselect_b32 s6, s11, s8 +; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5] +; GFX6-NEXT: s_sub_u32 s5, s6, s4 +; GFX6-NEXT: s_subb_u32 s4, s7, s4 +; GFX6-NEXT: s_mov_b32 s2, -1 +; GFX6-NEXT: v_mov_b32_e32 v0, s12 +; GFX6-NEXT: v_mov_b32_e32 v1, s13 +; GFX6-NEXT: v_mov_b32_e32 v2, s5 +; GFX6-NEXT: v_mov_b32_e32 v3, s4 +; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0 ; GFX6-NEXT: s_endpgm ; ; GFX9-LABEL: srem_v2i64_pow2_shl_denom: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 -; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 -; GFX9-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshl_b64 s[0:1], 0x1000, s12 -; GFX9-NEXT: s_lshl_b64 s[14:15], 0x1000, s14 -; GFX9-NEXT: s_ashr_i32 s2, s1, 31 -; GFX9-NEXT: s_add_u32 s0, s0, s2 -; GFX9-NEXT: s_mov_b32 s3, s2 -; GFX9-NEXT: s_addc_u32 s1, s1, s2 -; GFX9-NEXT: s_xor_b64 s[12:13], s[0:1], s[2:3] -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s12 -; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s13 -; GFX9-NEXT: s_sub_u32 s0, 0, s12 -; GFX9-NEXT: s_subb_u32 s1, 0, s13 +; GFX9-NEXT: s_lshl_b64 s[2:3], 0x1000, s12 +; GFX9-NEXT: s_lshl_b64 s[0:1], 0x1000, s14 +; GFX9-NEXT: s_ashr_i32 s6, s3, 31 +; GFX9-NEXT: s_add_u32 s2, s2, s6 +; GFX9-NEXT: s_mov_b32 s7, s6 +; GFX9-NEXT: s_addc_u32 s3, s3, s6 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7] +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3 +; GFX9-NEXT: s_sub_u32 s12, 0, s2 +; GFX9-NEXT: s_subb_u32 s13, 0, s3 ; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 ; GFX9-NEXT: v_rcp_f32_e32 v0, v0 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -9627,264 +9800,257 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x ; GFX9-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s2, v1 -; GFX9-NEXT: v_readfirstlane_b32 s3, v0 -; GFX9-NEXT: s_mul_i32 s4, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s16, s0, s3 -; GFX9-NEXT: s_mul_i32 s5, s1, s3 -; GFX9-NEXT: s_add_i32 s4, s16, s4 -; GFX9-NEXT: s_mul_i32 s17, s0, s3 -; GFX9-NEXT: s_add_i32 s4, s4, s5 -; GFX9-NEXT: s_mul_hi_u32 s5, s3, s4 -; GFX9-NEXT: s_mul_i32 s16, s3, s4 -; GFX9-NEXT: s_mul_hi_u32 s3, s3, s17 -; GFX9-NEXT: s_add_u32 s3, s3, s16 -; GFX9-NEXT: s_addc_u32 s5, 0, s5 -; GFX9-NEXT: s_mul_hi_u32 s18, s2, s17 -; GFX9-NEXT: s_mul_i32 s17, s2, s17 -; GFX9-NEXT: s_add_u32 s3, s3, s17 -; GFX9-NEXT: s_mul_hi_u32 s16, s2, s4 -; GFX9-NEXT: s_addc_u32 s3, s5, s18 -; GFX9-NEXT: s_addc_u32 s5, s16, 0 -; GFX9-NEXT: s_mul_i32 s4, s2, s4 -; GFX9-NEXT: s_add_u32 s3, s3, s4 -; GFX9-NEXT: s_addc_u32 s4, 0, s5 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s3, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s2, s2, s4 -; GFX9-NEXT: v_readfirstlane_b32 s4, v0 -; GFX9-NEXT: s_mul_i32 s3, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s5, s0, s4 -; GFX9-NEXT: s_add_i32 s3, s5, s3 -; GFX9-NEXT: s_mul_i32 s1, s1, s4 -; GFX9-NEXT: s_add_i32 s3, s3, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s4 -; GFX9-NEXT: s_mul_hi_u32 s5, s2, s0 -; GFX9-NEXT: s_mul_i32 s16, s2, s0 -; GFX9-NEXT: s_mul_i32 s18, s4, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s4, s0 -; GFX9-NEXT: s_mul_hi_u32 s17, s4, s3 -; GFX9-NEXT: s_add_u32 s0, s0, s18 -; GFX9-NEXT: s_addc_u32 s4, 0, s17 -; GFX9-NEXT: s_add_u32 s0, s0, s16 -; GFX9-NEXT: s_mul_hi_u32 s1, s2, s3 -; GFX9-NEXT: s_addc_u32 s0, s4, s5 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s3, s2, s3 -; GFX9-NEXT: s_add_u32 s0, s0, s3 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s2, s2, s1 -; GFX9-NEXT: s_ashr_i32 s16, s9, 31 -; GFX9-NEXT: s_add_u32 s0, s8, s16 -; GFX9-NEXT: s_mov_b32 s17, s16 -; GFX9-NEXT: s_addc_u32 s1, s9, s16 -; GFX9-NEXT: s_xor_b64 s[4:5], s[0:1], s[16:17] -; GFX9-NEXT: v_readfirstlane_b32 s3, v0 -; GFX9-NEXT: s_mul_i32 s1, s4, s2 -; GFX9-NEXT: s_mul_hi_u32 s8, s4, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s4, s2 -; GFX9-NEXT: s_add_u32 s1, s8, s1 -; GFX9-NEXT: s_addc_u32 s0, 0, s0 -; GFX9-NEXT: s_mul_hi_u32 s9, s5, s3 -; GFX9-NEXT: s_mul_i32 s3, s5, s3 -; GFX9-NEXT: s_add_u32 s1, s1, s3 -; GFX9-NEXT: s_mul_hi_u32 s8, s5, s2 -; GFX9-NEXT: s_addc_u32 s0, s0, s9 -; GFX9-NEXT: s_addc_u32 s1, s8, 0 -; GFX9-NEXT: s_mul_i32 s2, s5, s2 +; GFX9-NEXT: v_readfirstlane_b32 s14, v1 +; GFX9-NEXT: v_readfirstlane_b32 s6, v0 +; GFX9-NEXT: s_mul_i32 s7, s12, s14 +; GFX9-NEXT: s_mul_hi_u32 s16, s12, s6 +; GFX9-NEXT: s_mul_i32 s15, s13, s6 +; GFX9-NEXT: s_add_i32 s7, s16, s7 +; GFX9-NEXT: s_mul_i32 s17, s12, s6 +; GFX9-NEXT: s_add_i32 s7, s7, s15 +; GFX9-NEXT: s_mul_hi_u32 s16, s6, s17 +; GFX9-NEXT: s_mul_i32 s18, s6, s7 +; GFX9-NEXT: s_mul_hi_u32 s15, s6, s7 +; GFX9-NEXT: s_add_u32 s16, s16, s18 +; GFX9-NEXT: s_addc_u32 s15, 0, s15 +; GFX9-NEXT: s_mul_hi_u32 s18, s14, s17 +; GFX9-NEXT: s_mul_i32 s17, s14, s17 +; GFX9-NEXT: s_add_u32 s16, s16, s17 +; GFX9-NEXT: s_mul_hi_u32 s19, s14, s7 +; GFX9-NEXT: s_addc_u32 s15, s15, s18 +; GFX9-NEXT: s_addc_u32 s16, s19, 0 +; GFX9-NEXT: s_mul_i32 s7, s14, s7 +; GFX9-NEXT: s_add_u32 s7, s15, s7 +; GFX9-NEXT: s_addc_u32 s15, 0, s16 +; GFX9-NEXT: s_add_u32 s16, s6, s7 +; GFX9-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX9-NEXT: s_addc_u32 s14, s14, s15 +; GFX9-NEXT: s_mul_i32 s6, s12, s14 +; GFX9-NEXT: s_mul_hi_u32 s7, s12, s16 +; GFX9-NEXT: s_add_i32 s6, s7, s6 +; GFX9-NEXT: s_mul_i32 s13, s13, s16 +; GFX9-NEXT: s_add_i32 s6, s6, s13 +; GFX9-NEXT: s_mul_i32 s12, s12, s16 +; GFX9-NEXT: s_mul_hi_u32 s13, s14, s12 +; GFX9-NEXT: s_mul_i32 s15, s14, s12 +; GFX9-NEXT: s_mul_i32 s18, s16, s6 +; GFX9-NEXT: s_mul_hi_u32 s12, s16, s12 +; GFX9-NEXT: s_mul_hi_u32 s17, s16, s6 +; GFX9-NEXT: s_add_u32 s12, s12, s18 +; GFX9-NEXT: s_addc_u32 s17, 0, s17 +; GFX9-NEXT: s_add_u32 s12, s12, s15 +; GFX9-NEXT: s_mul_hi_u32 s7, s14, s6 +; GFX9-NEXT: s_addc_u32 s12, s17, s13 +; GFX9-NEXT: s_addc_u32 s7, s7, 0 +; GFX9-NEXT: s_mul_i32 s6, s14, s6 +; GFX9-NEXT: s_add_u32 s6, s12, s6 +; GFX9-NEXT: s_addc_u32 s12, 0, s7 +; GFX9-NEXT: s_add_u32 s13, s16, s6 +; GFX9-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX9-NEXT: s_addc_u32 s12, s14, s12 +; GFX9-NEXT: s_ashr_i32 s6, s9, 31 +; GFX9-NEXT: s_add_u32 s8, s8, s6 +; GFX9-NEXT: s_mov_b32 s7, s6 +; GFX9-NEXT: s_addc_u32 s9, s9, s6 +; GFX9-NEXT: s_xor_b64 s[8:9], s[8:9], s[6:7] +; GFX9-NEXT: s_mul_i32 s15, s8, s12 +; GFX9-NEXT: s_mul_hi_u32 s16, s8, s13 +; GFX9-NEXT: s_mul_hi_u32 s14, s8, s12 +; GFX9-NEXT: s_add_u32 s15, s16, s15 +; GFX9-NEXT: s_addc_u32 s14, 0, s14 +; GFX9-NEXT: s_mul_hi_u32 s17, s9, s13 +; GFX9-NEXT: s_mul_i32 s13, s9, s13 +; GFX9-NEXT: s_add_u32 s13, s15, s13 +; GFX9-NEXT: s_mul_hi_u32 s16, s9, s12 +; GFX9-NEXT: s_addc_u32 s13, s14, s17 +; GFX9-NEXT: s_addc_u32 s14, s16, 0 +; GFX9-NEXT: s_mul_i32 s12, s9, s12 +; GFX9-NEXT: s_add_u32 s12, s13, s12 +; GFX9-NEXT: s_addc_u32 s13, 0, s14 +; GFX9-NEXT: s_mul_i32 s13, s2, s13 +; GFX9-NEXT: s_mul_hi_u32 s14, s2, s12 +; GFX9-NEXT: s_add_i32 s13, s14, s13 +; GFX9-NEXT: s_mul_i32 s14, s3, s12 +; GFX9-NEXT: s_add_i32 s16, s13, s14 +; GFX9-NEXT: s_sub_i32 s14, s9, s16 +; GFX9-NEXT: s_mul_i32 s12, s2, s12 +; GFX9-NEXT: s_sub_u32 s8, s8, s12 +; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GFX9-NEXT: s_subb_u32 s17, s14, s3 +; GFX9-NEXT: s_sub_u32 s18, s8, s2 +; GFX9-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GFX9-NEXT: s_subb_u32 s19, s17, 0 +; GFX9-NEXT: s_cmp_ge_u32 s19, s3 +; GFX9-NEXT: s_cselect_b32 s20, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s18, s2 +; GFX9-NEXT: s_cselect_b32 s21, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s19, s3 +; GFX9-NEXT: s_cselect_b32 s20, s21, s20 +; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GFX9-NEXT: s_subb_u32 s17, s17, s3 +; GFX9-NEXT: s_sub_u32 s21, s18, s2 +; GFX9-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GFX9-NEXT: s_subb_u32 s14, s17, 0 +; GFX9-NEXT: s_cmp_lg_u32 s20, 0 +; GFX9-NEXT: s_cselect_b32 s15, s21, s18 +; GFX9-NEXT: s_cselect_b32 s14, s14, s19 +; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GFX9-NEXT: s_subb_u32 s9, s9, s16 +; GFX9-NEXT: s_cmp_ge_u32 s9, s3 +; GFX9-NEXT: s_cselect_b32 s12, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s8, s2 +; GFX9-NEXT: s_cselect_b32 s2, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s9, s3 +; GFX9-NEXT: s_cselect_b32 s2, s2, s12 +; GFX9-NEXT: s_cmp_lg_u32 s2, 0 +; GFX9-NEXT: s_cselect_b32 s3, s14, s9 +; GFX9-NEXT: s_cselect_b32 s2, s15, s8 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7] +; GFX9-NEXT: s_sub_u32 s12, s2, s6 +; GFX9-NEXT: s_subb_u32 s13, s3, s6 +; GFX9-NEXT: s_ashr_i32 s2, s1, 31 ; GFX9-NEXT: s_add_u32 s0, s0, s2 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: s_mul_i32 s1, s12, s1 -; GFX9-NEXT: s_mul_hi_u32 s2, s12, s0 -; GFX9-NEXT: s_add_i32 s1, s2, s1 -; GFX9-NEXT: s_mul_i32 s2, s13, s0 -; GFX9-NEXT: s_mul_i32 s0, s12, s0 -; GFX9-NEXT: s_add_i32 s8, s1, s2 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: s_sub_i32 s1, s5, s8 -; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s4, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s4, s1, s13 -; GFX9-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s12, v0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s9, s4, 0 -; GFX9-NEXT: s_cmp_ge_u32 s9, s13 -; GFX9-NEXT: s_cselect_b32 s17, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v1 -; GFX9-NEXT: s_cmp_eq_u32 s9, s13 -; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[2:3] -; GFX9-NEXT: v_mov_b32_e32 v3, s17 -; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[2:3] -; GFX9-NEXT: s_subb_u32 s2, s4, s13 -; GFX9-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v1 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s2, s2, 0 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v2, s9 -; GFX9-NEXT: v_mov_b32_e32 v3, s2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s5, s8 -; GFX9-NEXT: s_cmp_ge_u32 s0, s13 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 -; GFX9-NEXT: s_cmp_eq_u32 s0, s13 -; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v5, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc -; GFX9-NEXT: v_mov_b32_e32 v5, s0 -; GFX9-NEXT: s_ashr_i32 s0, s15, 31 -; GFX9-NEXT: s_add_u32 s2, s14, s0 -; GFX9-NEXT: s_mov_b32 s1, s0 -; GFX9-NEXT: s_addc_u32 s3, s15, s0 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; GFX9-NEXT: s_xor_b64 s[4:5], s[2:3], s[0:1] -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s4 -; GFX9-NEXT: v_cvt_f32_u32_e32 v3, s5 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc -; GFX9-NEXT: v_xor_b32_e32 v0, s16, v0 -; GFX9-NEXT: v_xor_b32_e32 v2, s16, v2 -; GFX9-NEXT: v_mac_f32_e32 v1, 0x4f800000, v3 -; GFX9-NEXT: v_rcp_f32_e32 v3, v1 -; GFX9-NEXT: v_mov_b32_e32 v5, s16 -; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, s16, v0 -; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v2, v5, vcc -; GFX9-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v3 -; GFX9-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 -; GFX9-NEXT: v_trunc_f32_e32 v3, v3 -; GFX9-NEXT: v_mac_f32_e32 v2, 0xcf800000, v3 +; GFX9-NEXT: s_mov_b32 s3, s2 +; GFX9-NEXT: s_addc_u32 s1, s1, s2 +; GFX9-NEXT: s_xor_b64 s[2:3], s[0:1], s[2:3] +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3 +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX9-NEXT: s_sub_u32 s6, 0, s2 +; GFX9-NEXT: s_subb_u32 s7, 0, s3 +; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1 +; GFX9-NEXT: v_rcp_f32_e32 v1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1 +; GFX9-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1 +; GFX9-NEXT: v_trunc_f32_e32 v2, v2 +; GFX9-NEXT: v_mac_f32_e32 v1, 0xcf800000, v2 +; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2 -; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 -; GFX9-NEXT: s_sub_u32 s0, 0, s4 -; GFX9-NEXT: s_subb_u32 s1, 0, s5 -; GFX9-NEXT: v_readfirstlane_b32 s2, v2 -; GFX9-NEXT: v_readfirstlane_b32 s9, v3 -; GFX9-NEXT: s_mul_hi_u32 s8, s0, s2 -; GFX9-NEXT: s_mul_i32 s12, s0, s9 -; GFX9-NEXT: s_mul_i32 s3, s1, s2 -; GFX9-NEXT: s_add_i32 s8, s8, s12 -; GFX9-NEXT: s_add_i32 s8, s8, s3 -; GFX9-NEXT: s_mul_i32 s13, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s3, s2, s8 -; GFX9-NEXT: s_mul_i32 s12, s2, s8 -; GFX9-NEXT: s_mul_hi_u32 s2, s2, s13 -; GFX9-NEXT: s_add_u32 s2, s2, s12 -; GFX9-NEXT: s_addc_u32 s3, 0, s3 -; GFX9-NEXT: s_mul_hi_u32 s14, s9, s13 -; GFX9-NEXT: s_mul_i32 s13, s9, s13 -; GFX9-NEXT: s_add_u32 s2, s2, s13 -; GFX9-NEXT: s_mul_hi_u32 s12, s9, s8 -; GFX9-NEXT: s_addc_u32 s2, s3, s14 -; GFX9-NEXT: s_addc_u32 s3, s12, 0 +; GFX9-NEXT: v_readfirstlane_b32 s4, v1 +; GFX9-NEXT: v_readfirstlane_b32 s9, v2 +; GFX9-NEXT: s_mul_hi_u32 s8, s6, s4 +; GFX9-NEXT: s_mul_i32 s14, s6, s9 +; GFX9-NEXT: s_mul_i32 s5, s7, s4 +; GFX9-NEXT: s_add_i32 s8, s8, s14 +; GFX9-NEXT: s_add_i32 s8, s8, s5 +; GFX9-NEXT: s_mul_i32 s15, s6, s4 +; GFX9-NEXT: s_mul_i32 s14, s4, s8 +; GFX9-NEXT: s_mul_hi_u32 s16, s4, s15 +; GFX9-NEXT: s_mul_hi_u32 s5, s4, s8 +; GFX9-NEXT: s_add_u32 s14, s16, s14 +; GFX9-NEXT: s_addc_u32 s5, 0, s5 +; GFX9-NEXT: s_mul_hi_u32 s17, s9, s15 +; GFX9-NEXT: s_mul_i32 s15, s9, s15 +; GFX9-NEXT: s_add_u32 s14, s14, s15 +; GFX9-NEXT: s_mul_hi_u32 s16, s9, s8 +; GFX9-NEXT: s_addc_u32 s5, s5, s17 +; GFX9-NEXT: s_addc_u32 s14, s16, 0 ; GFX9-NEXT: s_mul_i32 s8, s9, s8 -; GFX9-NEXT: s_add_u32 s2, s2, s8 -; GFX9-NEXT: s_addc_u32 s3, 0, s3 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s2, s9, s3 -; GFX9-NEXT: v_readfirstlane_b32 s8, v2 -; GFX9-NEXT: s_mul_i32 s3, s0, s2 -; GFX9-NEXT: s_mul_hi_u32 s9, s0, s8 -; GFX9-NEXT: s_add_i32 s3, s9, s3 -; GFX9-NEXT: s_mul_i32 s1, s1, s8 -; GFX9-NEXT: s_add_i32 s3, s3, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s8 -; GFX9-NEXT: s_mul_hi_u32 s9, s2, s0 -; GFX9-NEXT: s_mul_i32 s12, s2, s0 -; GFX9-NEXT: s_mul_i32 s14, s8, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s8, s0 -; GFX9-NEXT: s_mul_hi_u32 s13, s8, s3 -; GFX9-NEXT: s_add_u32 s0, s0, s14 -; GFX9-NEXT: s_addc_u32 s8, 0, s13 -; GFX9-NEXT: s_add_u32 s0, s0, s12 -; GFX9-NEXT: s_mul_hi_u32 s1, s2, s3 -; GFX9-NEXT: s_addc_u32 s0, s8, s9 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s3, s2, s3 -; GFX9-NEXT: s_add_u32 s0, s0, s3 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s2, s2, s1 -; GFX9-NEXT: s_ashr_i32 s8, s11, 31 -; GFX9-NEXT: s_add_u32 s0, s10, s8 -; GFX9-NEXT: s_mov_b32 s9, s8 -; GFX9-NEXT: s_addc_u32 s1, s11, s8 -; GFX9-NEXT: s_xor_b64 s[10:11], s[0:1], s[8:9] -; GFX9-NEXT: v_readfirstlane_b32 s3, v2 -; GFX9-NEXT: s_mul_i32 s1, s10, s2 -; GFX9-NEXT: s_mul_hi_u32 s9, s10, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s10, s2 -; GFX9-NEXT: s_add_u32 s1, s9, s1 -; GFX9-NEXT: s_addc_u32 s0, 0, s0 -; GFX9-NEXT: s_mul_hi_u32 s12, s11, s3 -; GFX9-NEXT: s_mul_i32 s3, s11, s3 -; GFX9-NEXT: s_add_u32 s1, s1, s3 -; GFX9-NEXT: s_mul_hi_u32 s9, s11, s2 -; GFX9-NEXT: s_addc_u32 s0, s0, s12 -; GFX9-NEXT: s_addc_u32 s1, s9, 0 -; GFX9-NEXT: s_mul_i32 s2, s11, s2 -; GFX9-NEXT: s_add_u32 s0, s0, s2 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: s_mul_i32 s1, s4, s1 -; GFX9-NEXT: s_mul_hi_u32 s2, s4, s0 -; GFX9-NEXT: s_add_i32 s1, s2, s1 -; GFX9-NEXT: s_mul_i32 s2, s5, s0 -; GFX9-NEXT: s_mul_i32 s0, s4, s0 -; GFX9-NEXT: s_add_i32 s9, s1, s2 -; GFX9-NEXT: v_mov_b32_e32 v2, s0 -; GFX9-NEXT: s_sub_i32 s1, s11, s9 -; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s10, v2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s10, s1, s5 -; GFX9-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s4, v2 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s12, s10, 0 -; GFX9-NEXT: s_cmp_ge_u32 s12, s5 -; GFX9-NEXT: s_cselect_b32 s13, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[2:3], s4, v3 -; GFX9-NEXT: s_cmp_eq_u32 s12, s5 -; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[2:3] -; GFX9-NEXT: v_mov_b32_e32 v6, s13 -; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: v_cndmask_b32_e64 v5, v6, v5, s[2:3] -; GFX9-NEXT: s_subb_u32 s2, s10, s5 -; GFX9-NEXT: v_subrev_co_u32_e64 v6, s[0:1], s4, v3 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s2, s2, 0 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v5 -; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v6, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v5, s12 -; GFX9-NEXT: v_mov_b32_e32 v6, s2 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s11, s9 -; GFX9-NEXT: s_cmp_ge_u32 s0, s5 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s4, v2 -; GFX9-NEXT: s_cmp_eq_u32 s0, s5 -; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v7, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v6, vcc -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 -; GFX9-NEXT: v_mov_b32_e32 v7, s0 -; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc -; GFX9-NEXT: v_xor_b32_e32 v2, s8, v2 -; GFX9-NEXT: v_xor_b32_e32 v3, s8, v5 -; GFX9-NEXT: v_mov_b32_e32 v5, s8 -; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s8, v2 -; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v5, vcc -; GFX9-NEXT: global_store_dwordx4 v4, v[0:3], s[6:7] +; GFX9-NEXT: s_add_u32 s5, s5, s8 +; GFX9-NEXT: s_addc_u32 s8, 0, s14 +; GFX9-NEXT: s_add_u32 s14, s4, s5 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s8, s9, s8 +; GFX9-NEXT: s_mul_i32 s4, s6, s8 +; GFX9-NEXT: s_mul_hi_u32 s5, s6, s14 +; GFX9-NEXT: s_add_i32 s4, s5, s4 +; GFX9-NEXT: s_mul_i32 s7, s7, s14 +; GFX9-NEXT: s_add_i32 s4, s4, s7 +; GFX9-NEXT: s_mul_i32 s6, s6, s14 +; GFX9-NEXT: s_mul_hi_u32 s7, s8, s6 +; GFX9-NEXT: s_mul_i32 s9, s8, s6 +; GFX9-NEXT: s_mul_i32 s16, s14, s4 +; GFX9-NEXT: s_mul_hi_u32 s6, s14, s6 +; GFX9-NEXT: s_mul_hi_u32 s15, s14, s4 +; GFX9-NEXT: s_add_u32 s6, s6, s16 +; GFX9-NEXT: s_addc_u32 s15, 0, s15 +; GFX9-NEXT: s_add_u32 s6, s6, s9 +; GFX9-NEXT: s_mul_hi_u32 s5, s8, s4 +; GFX9-NEXT: s_addc_u32 s6, s15, s7 +; GFX9-NEXT: s_addc_u32 s5, s5, 0 +; GFX9-NEXT: s_mul_i32 s4, s8, s4 +; GFX9-NEXT: s_add_u32 s4, s6, s4 +; GFX9-NEXT: s_addc_u32 s6, 0, s5 +; GFX9-NEXT: s_add_u32 s9, s14, s4 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s8, s8, s6 +; GFX9-NEXT: s_ashr_i32 s4, s11, 31 +; GFX9-NEXT: s_add_u32 s6, s10, s4 +; GFX9-NEXT: s_mov_b32 s5, s4 +; GFX9-NEXT: s_addc_u32 s7, s11, s4 +; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5] +; GFX9-NEXT: s_mul_i32 s11, s6, s8 +; GFX9-NEXT: s_mul_hi_u32 s14, s6, s9 +; GFX9-NEXT: s_mul_hi_u32 s10, s6, s8 +; GFX9-NEXT: s_add_u32 s11, s14, s11 +; GFX9-NEXT: s_addc_u32 s10, 0, s10 +; GFX9-NEXT: s_mul_hi_u32 s15, s7, s9 +; GFX9-NEXT: s_mul_i32 s9, s7, s9 +; GFX9-NEXT: s_add_u32 s9, s11, s9 +; GFX9-NEXT: s_mul_hi_u32 s14, s7, s8 +; GFX9-NEXT: s_addc_u32 s9, s10, s15 +; GFX9-NEXT: s_addc_u32 s10, s14, 0 +; GFX9-NEXT: s_mul_i32 s8, s7, s8 +; GFX9-NEXT: s_add_u32 s8, s9, s8 +; GFX9-NEXT: s_addc_u32 s9, 0, s10 +; GFX9-NEXT: s_mul_i32 s9, s2, s9 +; GFX9-NEXT: s_mul_hi_u32 s10, s2, s8 +; GFX9-NEXT: s_add_i32 s9, s10, s9 +; GFX9-NEXT: s_mul_i32 s10, s3, s8 +; GFX9-NEXT: s_add_i32 s14, s9, s10 +; GFX9-NEXT: s_sub_i32 s10, s7, s14 +; GFX9-NEXT: s_mul_i32 s8, s2, s8 +; GFX9-NEXT: s_sub_u32 s6, s6, s8 +; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_subb_u32 s15, s10, s3 +; GFX9-NEXT: s_sub_u32 s16, s6, s2 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s17, s15, 0 +; GFX9-NEXT: s_cmp_ge_u32 s17, s3 +; GFX9-NEXT: s_cselect_b32 s18, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s16, s2 +; GFX9-NEXT: s_cselect_b32 s19, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s17, s3 +; GFX9-NEXT: s_cselect_b32 s18, s19, s18 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s15, s15, s3 +; GFX9-NEXT: s_sub_u32 s19, s16, s2 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s10, s15, 0 +; GFX9-NEXT: s_cmp_lg_u32 s18, 0 +; GFX9-NEXT: s_cselect_b32 s11, s19, s16 +; GFX9-NEXT: s_cselect_b32 s10, s10, s17 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_subb_u32 s7, s7, s14 +; GFX9-NEXT: s_cmp_ge_u32 s7, s3 +; GFX9-NEXT: s_cselect_b32 s8, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s6, s2 +; GFX9-NEXT: s_cselect_b32 s2, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s7, s3 +; GFX9-NEXT: s_cselect_b32 s2, s2, s8 +; GFX9-NEXT: s_cmp_lg_u32 s2, 0 +; GFX9-NEXT: s_cselect_b32 s3, s10, s7 +; GFX9-NEXT: s_cselect_b32 s2, s11, s6 +; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5] +; GFX9-NEXT: s_sub_u32 s2, s2, s4 +; GFX9-NEXT: s_subb_u32 s3, s3, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s12 +; GFX9-NEXT: v_mov_b32_e32 v2, s13 +; GFX9-NEXT: v_mov_b32_e32 v3, s2 +; GFX9-NEXT: v_mov_b32_e32 v4, s3 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: global_store_dwordx4 v0, v[1:4], s[0:1] ; GFX9-NEXT: s_endpgm %shl.y = shl <2 x i64> , %y %r = srem <2 x i64> %x, %shl.y diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll index 97df2a0dbd44b..258bc2959f391 100644 --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll @@ -5548,7 +5548,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX7LESS: ; %bb.0: ; %entry ; GFX7LESS-NEXT: s_mov_b64 s[6:7], exec ; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GFX7LESS-NEXT: s_mov_b32 s4, 0 ; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0 ; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v4, s7, v0 ; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 @@ -5557,33 +5556,32 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX7LESS-NEXT: s_cbranch_execz .LBB9_4 ; GFX7LESS-NEXT: ; %bb.1: ; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0) -; GFX7LESS-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX7LESS-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0 -; GFX7LESS-NEXT: v_mov_b32_e32 v5, s4 ; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000 -; GFX7LESS-NEXT: s_mul_i32 s12, s5, 5 +; GFX7LESS-NEXT: s_mul_i32 s12, s6, 5 ; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0) -; GFX7LESS-NEXT: v_mov_b32_e32 v0, s14 -; GFX7LESS-NEXT: v_mov_b32_e32 v1, s15 +; GFX7LESS-NEXT: v_mov_b32_e32 v0, s4 +; GFX7LESS-NEXT: v_mov_b32_e32 v1, s5 ; GFX7LESS-NEXT: s_mov_b32 s6, -1 ; GFX7LESS-NEXT: s_mov_b32 s4, s2 ; GFX7LESS-NEXT: s_mov_b32 s5, s3 ; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX7LESS-NEXT: v_mov_b32_e32 v9, v1 -; GFX7LESS-NEXT: v_mov_b32_e32 v8, v0 -; GFX7LESS-NEXT: v_subrev_i32_e32 v6, vcc, s12, v8 -; GFX7LESS-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc +; GFX7LESS-NEXT: v_mov_b32_e32 v8, v1 +; GFX7LESS-NEXT: v_mov_b32_e32 v7, v0 +; GFX7LESS-NEXT: v_subrev_i32_e32 v5, vcc, s12, v7 +; GFX7LESS-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc ; GFX7LESS-NEXT: s_waitcnt expcnt(0) -; GFX7LESS-NEXT: v_mov_b32_e32 v0, v6 -; GFX7LESS-NEXT: v_mov_b32_e32 v1, v7 -; GFX7LESS-NEXT: v_mov_b32_e32 v2, v8 -; GFX7LESS-NEXT: v_mov_b32_e32 v3, v9 +; GFX7LESS-NEXT: v_mov_b32_e32 v0, v5 +; GFX7LESS-NEXT: v_mov_b32_e32 v1, v6 +; GFX7LESS-NEXT: v_mov_b32_e32 v2, v7 +; GFX7LESS-NEXT: v_mov_b32_e32 v3, v8 ; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX7LESS-NEXT: s_waitcnt vmcnt(0) ; GFX7LESS-NEXT: buffer_wbinvl1 -; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX7LESS-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2 @@ -5611,39 +5609,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX8-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0 -; GFX8-NEXT: s_mov_b32 s4, 0 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 ; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX8-NEXT: s_and_saveexec_b64 s[8:9], vcc ; GFX8-NEXT: s_cbranch_execz .LBB9_4 ; GFX8-NEXT: ; %bb.1: ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX8-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX8-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX8-NEXT: s_mov_b64 s[10:11], 0 -; GFX8-NEXT: v_mov_b32_e32 v5, s4 -; GFX8-NEXT: s_mul_i32 s12, s5, 5 -; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v0, s14 -; GFX8-NEXT: v_mov_b32_e32 v1, s15 ; GFX8-NEXT: s_mov_b32 s7, 0xf000 +; GFX8-NEXT: s_mul_i32 s12, s6, 5 +; GFX8-NEXT: s_waitcnt lgkmcnt(0) +; GFX8-NEXT: v_mov_b32_e32 v0, s4 +; GFX8-NEXT: v_mov_b32_e32 v1, s5 ; GFX8-NEXT: s_mov_b32 s6, -1 ; GFX8-NEXT: s_mov_b32 s4, s2 ; GFX8-NEXT: s_mov_b32 s5, s3 ; GFX8-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX8-NEXT: v_mov_b32_e32 v9, v1 -; GFX8-NEXT: v_mov_b32_e32 v8, v0 -; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s12, v8 -; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc -; GFX8-NEXT: v_mov_b32_e32 v0, v6 -; GFX8-NEXT: v_mov_b32_e32 v1, v7 -; GFX8-NEXT: v_mov_b32_e32 v2, v8 -; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: v_mov_b32_e32 v8, v1 +; GFX8-NEXT: v_mov_b32_e32 v7, v0 +; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s12, v7 +; GFX8-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc +; GFX8-NEXT: v_mov_b32_e32 v0, v5 +; GFX8-NEXT: v_mov_b32_e32 v1, v6 +; GFX8-NEXT: v_mov_b32_e32 v2, v7 +; GFX8-NEXT: v_mov_b32_e32 v3, v8 ; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1_vol -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX8-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX8-NEXT: s_cbranch_execnz .LBB9_2 @@ -5670,39 +5666,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX9-NEXT: s_mov_b64 s[6:7], exec ; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX9-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0 -; GFX9-NEXT: s_mov_b32 s4, 0 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 ; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX9-NEXT: s_and_saveexec_b64 s[8:9], vcc ; GFX9-NEXT: s_cbranch_execz .LBB9_4 ; GFX9-NEXT: ; %bb.1: ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0 -; GFX9-NEXT: s_bcnt1_i32_b64 s5, s[6:7] +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GFX9-NEXT: s_bcnt1_i32_b64 s6, s[6:7] ; GFX9-NEXT: s_mov_b64 s[10:11], 0 -; GFX9-NEXT: v_mov_b32_e32 v5, s4 -; GFX9-NEXT: s_mul_i32 s12, s5, 5 -; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s14 -; GFX9-NEXT: v_mov_b32_e32 v1, s15 ; GFX9-NEXT: s_mov_b32 s7, 0xf000 +; GFX9-NEXT: s_mul_i32 s12, s6, 5 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s5 ; GFX9-NEXT: s_mov_b32 s6, -1 ; GFX9-NEXT: s_mov_b32 s4, s2 ; GFX9-NEXT: s_mov_b32 s5, s3 ; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX9-NEXT: v_mov_b32_e32 v9, v1 -; GFX9-NEXT: v_mov_b32_e32 v8, v0 -; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s12, v8 -; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v9, v5, vcc -; GFX9-NEXT: v_mov_b32_e32 v0, v6 -; GFX9-NEXT: v_mov_b32_e32 v1, v7 -; GFX9-NEXT: v_mov_b32_e32 v2, v8 -; GFX9-NEXT: v_mov_b32_e32 v3, v9 +; GFX9-NEXT: v_mov_b32_e32 v8, v1 +; GFX9-NEXT: v_mov_b32_e32 v7, v0 +; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s12, v7 +; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v8, vcc +; GFX9-NEXT: v_mov_b32_e32 v0, v5 +; GFX9-NEXT: v_mov_b32_e32 v1, v6 +; GFX9-NEXT: v_mov_b32_e32 v2, v7 +; GFX9-NEXT: v_mov_b32_e32 v3, v8 ; GFX9-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol -; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] ; GFX9-NEXT: s_or_b64 s[10:11], vcc, s[10:11] ; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11] ; GFX9-NEXT: s_cbranch_execnz .LBB9_2 diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll index d301f16512a60..37040123ee20c 100644 --- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll +++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll @@ -7,8 +7,8 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_load_dword s0, s[4:5], 0x8 ; CHECK-NEXT: s_load_dwordx2 s[10:11], s[4:5], 0x0 ; CHECK-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x10 -; CHECK-NEXT: v_mov_b32_e32 v30, 0x9037ab78 -; CHECK-NEXT: v_mov_b32_e32 v31, 0x3e21eeb6 +; CHECK-NEXT: v_mov_b32_e32 v1, 0x3e21eeb6 +; CHECK-NEXT: v_mov_b32_e32 v20, 0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_bitcmp1_b32 s0, 0 ; CHECK-NEXT: s_cselect_b64 s[16:17], -1, 0 @@ -16,9 +16,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_bitcmp1_b32 s0, 8 ; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3] -; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 ; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 +; CHECK-NEXT: v_mov_b32_e32 v0, 0x9037ab78 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v1 +; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 ; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3] +; CHECK-NEXT: v_accvgpr_write_b32 a2, v0 ; CHECK-NEXT: v_mov_b32_e32 v2, 0xa17f65f6 ; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f ; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90 @@ -34,15 +37,14 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_mov_b32_e32 v14, 0x8427b883 ; CHECK-NEXT: v_mov_b32_e32 v15, 0x3fae1bb4 ; CHECK-NEXT: s_mov_b64 s[22:23], 0 -; CHECK-NEXT: v_mov_b32_e32 v20, 0x57b87036 -; CHECK-NEXT: v_mov_b32_e32 v21, 0x3fb3b136 +; CHECK-NEXT: v_mov_b32_e32 v0, 0x57b87036 +; CHECK-NEXT: v_mov_b32_e32 v1, 0x3fb3b136 ; CHECK-NEXT: s_and_b64 s[4:5], exec, s[16:17] ; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523 ; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555 ; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19] -; CHECK-NEXT: v_mov_b32_e32 v0, 0 -; CHECK-NEXT: v_mov_b64_e32 v[16:17], 0 -; CHECK-NEXT: ; implicit-def: $agpr0_agpr1 +; CHECK-NEXT: v_mov_b32_e32 v21, v20 +; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31 ; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23 ; CHECK-NEXT: s_branch .LBB0_2 ; CHECK-NEXT: .LBB0_1: ; %Flow9 @@ -62,11 +64,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[14:15] ; CHECK-NEXT: flat_load_dwordx2 v[24:25], v[24:25] -; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[30:31] +; CHECK-NEXT: v_accvgpr_read_b32 v27, a3 +; CHECK-NEXT: v_accvgpr_read_b32 v26, a2 ; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[2:3] -; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[20:21] -; CHECK-NEXT: v_accvgpr_write_b32 a2, 0 -; CHECK-NEXT: v_accvgpr_write_b32 a3, 0 +; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[0:1] +; CHECK-NEXT: v_accvgpr_write_b32 a0, 0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, 0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[24:25] ; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27] @@ -93,32 +96,30 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: .LBB0_6: ; %.preheader1855.i.i.i3329 ; CHECK-NEXT: ; Parent Loop BB0_2 Depth=1 ; CHECK-NEXT: ; => This Inner Loop Header: Depth=2 -; CHECK-NEXT: v_accvgpr_read_b32 v29, a3 -; CHECK-NEXT: v_accvgpr_read_b32 v28, a2 +; CHECK-NEXT: v_accvgpr_read_b32 v29, a1 +; CHECK-NEXT: v_accvgpr_read_b32 v28, a0 ; CHECK-NEXT: s_mov_b64 s[24:25], -1 ; CHECK-NEXT: s_mov_b64 s[8:9], -1 ; CHECK-NEXT: s_mov_b64 vcc, s[2:3] -; CHECK-NEXT: ; implicit-def: $agpr2_agpr3 +; CHECK-NEXT: ; implicit-def: $agpr0_agpr1 ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.7: ; %.lr.ph2070.i.i.i3291 ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 -; CHECK-NEXT: v_accvgpr_mov_b32 a3, a1 -; CHECK-NEXT: v_accvgpr_mov_b32 a2, a0 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v30 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v31 ; CHECK-NEXT: s_mov_b64 s[8:9], s[18:19] ; CHECK-NEXT: s_mov_b64 vcc, s[6:7] ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.8: ; %.preheader1856.preheader.i.i.i3325 ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 -; CHECK-NEXT: v_accvgpr_write_b32 a2, v26 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v26 ; CHECK-NEXT: s_mov_b64 s[24:25], 0 -; CHECK-NEXT: v_accvgpr_write_b32 a3, v27 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v27 ; CHECK-NEXT: s_mov_b64 s[8:9], 0 ; CHECK-NEXT: s_branch .LBB0_5 ; CHECK-NEXT: .LBB0_9: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[10:11] -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: s_mov_b64 s[22:23], 0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v25 +; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11] ; CHECK-NEXT: s_mov_b64 s[8:9], s[20:21] ; CHECK-NEXT: s_branch .LBB0_15 ; CHECK-NEXT: .LBB0_10: ; in Loop: Header=BB0_2 Depth=1 @@ -135,21 +136,19 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_cndmask_b32_e64 v23, v23, 0, s[16:17] ; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17] ; CHECK-NEXT: v_cndmask_b32_e64 v16, 0, 1, s[8:9] -; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17] ; CHECK-NEXT: v_mov_b32_e32 v17, v16 +; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17] +; CHECK-NEXT: global_store_dwordx2 v20, v[16:17], s[12:13] ; CHECK-NEXT: s_cselect_b32 s23, s23, 0 ; CHECK-NEXT: s_cselect_b32 s22, s22, 0 ; CHECK-NEXT: s_mov_b64 s[8:9], -1 -; CHECK-NEXT: global_store_dwordx2 v0, v[16:17], s[12:13] ; CHECK-NEXT: s_branch .LBB0_14 ; CHECK-NEXT: .LBB0_13: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: s_mov_b64 s[8:9], 0 ; CHECK-NEXT: v_mov_b64_e32 v[22:23], 0 -; CHECK-NEXT: .LBB0_14: ; %Flow8 +; CHECK-NEXT: .LBB0_14: ; %Flow6 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 -; CHECK-NEXT: v_mov_b64_e32 v[16:17], 0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v25 +; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[24:25] ; CHECK-NEXT: .LBB0_15: ; %Flow6 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: s_mov_b64 s[24:25], -1 @@ -158,7 +157,7 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: ; %bb.16: ; %._crit_edge2105.i.i.i2330 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: s_mov_b64 s[24:25], 0 -; CHECK-NEXT: global_store_dwordx2 v0, v[16:17], s[12:13] +; CHECK-NEXT: global_store_dwordx2 v20, v[20:21], s[12:13] ; CHECK-NEXT: s_branch .LBB0_1 ; CHECK-NEXT: .LBB0_17: ; %DummyReturnBlock ; CHECK-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir b/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir index c456f9c4b16e5..a2ec87053a8d5 100644 --- a/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir +++ b/llvm/test/CodeGen/AMDGPU/av_spill_cross_bb_usage.mir @@ -49,7 +49,7 @@ body: | ; GCN-NEXT: {{ $}} ; GCN-NEXT: bb.1: ; GCN-NEXT: successors: %bb.2(0x80000000) - ; GCN-NEXT: liveins: $exec, $sgpr30, $sgpr31, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr40, $sgpr30_sgpr31, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr41_vgpr42:0x000000000000000F, $vgpr43_vgpr44:0x000000000000000F, $vgpr45_vgpr46:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F + ; GCN-NEXT: liveins: $exec, $sgpr30, $sgpr31, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr40, $sgpr30_sgpr31, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr43_vgpr44:0x000000000000000F ; GCN-NEXT: {{ $}} ; GCN-NEXT: renamable $vgpr57 = COPY $vgpr9, implicit $exec ; GCN-NEXT: renamable $vgpr56 = COPY $vgpr8, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 371e460d9638e..94ba5cdd09df4 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -49040,6 +49040,9 @@ declare bfloat @llvm.fma.bf16(bfloat, bfloat, bfloat) declare <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat>, <2 x bfloat>, <2 x bfloat>) declare <3 x bfloat> @llvm.fma.v3bf16(<3 x bfloat>, <3 x bfloat>, <3 x bfloat>) declare <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>) +declare <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>) +declare <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat>, <16 x bfloat>, <16 x bfloat>) +declare <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat>, <32 x bfloat>, <32 x bfloat>) define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GCN-LABEL: v_fma_bf16: @@ -49990,6 +49993,4672 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ret <4 x bfloat> %op } +define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { +; GCN-LABEL: v_fma_v8bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_fma_f32 v7, v7, v15, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v14, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v13, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v12, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v11, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v10, v18 +; GCN-NEXT: v_fma_f32 v1, v1, v9, v17 +; GCN-NEXT: v_fma_f32 v0, v0, v8, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v8bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_fma_f32 v6, v6, v14, v15 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_fma_f32 v5, v5, v13, v14 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_fma_f32 v4, v4, v12, v13 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_fma_f32 v3, v3, v11, v12 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_fma_f32 v2, v2, v10, v11 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_fma_f32 v1, v1, v9, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_fma_f32 v0, v0, v8, v9 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v8bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX8-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX8-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v12 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13 +; GFX8-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX8-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX8-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v7 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX8-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX8-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX8-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v5 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9 +; GFX8-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v5, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v6, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v7, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v8bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX900-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX900-NEXT: v_add3_u32 v13, v13, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX900-NEXT: v_add3_u32 v7, v7, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX900-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX900-NEXT: v_add3_u32 v11, v11, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX900-NEXT: v_add3_u32 v6, v6, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX900-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX900-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX900-NEXT: v_add3_u32 v10, v10, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX900-NEXT: v_add3_u32 v5, v5, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX900-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX900-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX900-NEXT: v_add3_u32 v9, v9, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX900-NEXT: v_add3_u32 v4, v4, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v5, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v6, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v7, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v8bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX950-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v13, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v10, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v6, v1, v5 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v9, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v5, v0, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v6, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v7, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v11, v12 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v8bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX10-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX10-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v10 +; GFX10-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v13, v15, vcc_lo +; GFX10-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX10-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX10-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_fmac_f32_e32 v2, v14, v6 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v13, v15, vcc_lo +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX10-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX10-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX10-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX10-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX10-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v8bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v11, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v14, v16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v10, v11, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v6, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_add3_u32 v10, v15, v14, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v14 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v4 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v6.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v12, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v7, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v8 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v4, v10, v11 :: v_dual_and_b32 v5, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc_lo +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v15, v17, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v15, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v13, v16, v14 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v0, v1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v15, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v13, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v13, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v9, v11, v5, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v0, v12, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v8bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v14, v13 :: v_dual_and_b32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v10, v13, v15 :: v_dual_and_b32 v7, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v13, v15 :: v_dual_and_b32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v7, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v14, v6 :: v_dual_and_b32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v9, v1, v5 :: v_dual_and_b32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: v_fma_v8bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v4, v8 +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v5, v9 +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v6, v10 +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v7, v11 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %op = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) + ret <8 x bfloat> %op +} + +define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) { +; GCN-LABEL: v_fma_v16bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v15, v15, v31, v32 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v14, v14, v30, v31 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_fma_f32 v13, v13, v29, v30 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_fma_f32 v12, v12, v28, v29 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_fma_f32 v11, v11, v27, v28 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_fma_f32 v10, v10, v26, v27 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_fma_f32 v9, v9, v25, v26 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_fma_f32 v8, v8, v24, v25 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_fma_f32 v7, v7, v23, v24 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v22, v23 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v21, v22 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v20, v21 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v19, v20 +; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v18, v19 +; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v1, v1, v17, v18 +; GCN-NEXT: v_fma_f32 v0, v0, v16, v19 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v16bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v15, v15, v31, v32 +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v14, v14, v30, v31 +; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_fma_f32 v13, v13, v29, v30 +; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_fma_f32 v12, v12, v28, v29 +; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_fma_f32 v11, v11, v27, v28 +; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_fma_f32 v10, v10, v26, v27 +; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_fma_f32 v9, v9, v25, v26 +; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_fma_f32 v8, v8, v24, v25 +; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_fma_f32 v7, v7, v23, v24 +; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_fma_f32 v6, v6, v22, v23 +; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_fma_f32 v5, v5, v21, v22 +; GFX7-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_fma_f32 v4, v4, v20, v21 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_fma_f32 v3, v3, v19, v20 +; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v2, v2, v18, v19 +; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_fma_f32 v1, v1, v17, v18 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v16bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX8-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX8-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX8-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX8-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX8-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX8-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v24 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8 +; GFX8-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX8-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v7 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v15 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v6 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v14 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v5 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v13 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v4 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v12 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v3 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v11 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v2 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v10 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v9 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v9, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v10, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v11, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v13, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v14, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v15, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v8, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v16bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX900-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX900-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX900-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX900-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX900-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX900-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX900-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v8, v8, v24, s4 +; GFX900-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX900-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX900-NEXT: v_add3_u32 v16, v16, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v15, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v14, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v13, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v4, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v11, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v10, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v9, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v9, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v10, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v11, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v13, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v14, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v15, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v8, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v16bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX950-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v14 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v25, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v13 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v22, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v12 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v21, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v20, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v19, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v11, v2, v10 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v18, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v10, v1, v9 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v17, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v9, v0, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v9, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v10, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v11, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v12, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v13, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v14, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v15, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v23, v24 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v16bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX10-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX10-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX10-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX10-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v25, v27, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX10-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v24, v25, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX10-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v5, v26, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX10-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX10-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v21, v24, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX10-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX10-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v20, v21, vcc_lo +; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX10-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX10-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX10-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX10-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX10-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX10-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX10-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v2, v25, v10 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v17, v1, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX10-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX10-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX10-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX10-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX10-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX10-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX10-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX10-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX10-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX10-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX10-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v16bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_lshlrev_b32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v26, v28, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v22, v6, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v13 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v25, v29, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v5 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v23, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v24, v26, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v26 +; GFX11TRUE16-NEXT: v_add3_u32 v15, v15, v23, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v24, v24, v26, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v22, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v14, v29, v28 :: v_dual_cndmask_b32 v15, v15, v25 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v24, v27, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v22, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v28, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v20, v4, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v5, v13 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v23, v27, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v5, v28, v14, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v21, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v20, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v22, vcc_lo +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v14, v23, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v27, v20, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v14.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v25, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v23, v26, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v12, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v20, v21, v22 :: v_dual_and_b32 v25, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_add3_u32 v21, v23, v12, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v12, v23, v24, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v19, v3, v11 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v21, v22 :: v_dual_and_b32 v22, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v22, v25, v23 :: v_dual_fmac_f32 v11, v2, v10 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v19 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v18, v19, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v8 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v11, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_cndmask_b32 v10, v2, v10 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v12, v21, v11, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v16 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v17, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v24, v23 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v0, v1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v11, v11, v17, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v21, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11TRUE16-NEXT: v_add3_u32 v12, v19, v22, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v9, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v22 +; GFX11TRUE16-NEXT: v_add3_u32 v16, v18, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v8.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v0, v19, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v12, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v11.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v18.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v16bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_and_b32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v23, v7, v15 :: v_dual_lshlrev_b32 v26, 16, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v22, v25, v27 :: v_dual_and_b32 v15, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v15, v6, v14 :: v_dual_lshlrev_b32 v14, 16, v13 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v14, v5, v13 :: v_dual_lshlrev_b32 v5, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v5, v26, v13 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v6, v21, v24 :: v_dual_lshlrev_b32 v25, 16, v3 +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v13, v4, v12 :: v_dual_lshlrev_b32 v4, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v3, v11 :: v_dual_cndmask_b32 v5, v20, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX11FAKE16-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v25, v10 :: v_dual_and_b32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_and_b32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: v_fma_v16bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v8, v16 +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v9, v17 +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v10, v18 +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v11, v19 +; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v12, v20 +; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v13, v21 +; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v14, v22 +; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v15, v23 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %op = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) + ret <16 x bfloat> %op +} + +define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GCN-LABEL: v_fma_v32bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GCN-NEXT: s_waitcnt vmcnt(2) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v31, v31, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v30, v30, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v29, v29, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v28, v28, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v27, v27, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v26, v26, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v25, v25, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v24, v24, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v23, v23, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v22, v22, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v21, v21, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v20, v20, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v19, v19, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v18, v18, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v17, v17, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v16, v16, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v15, v15, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v14, v14, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v13, v13, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v12, v12, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v11, v11, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v10, v10, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v9, v9, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v8, v8, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v7, v7, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v6, v6, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v5, v5, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v4, v4, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v3, v3, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v2, v2, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v1, v1, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v0, v0, v32, v33 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v32bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(2) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v31, v31, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v30, v30, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v29, v29, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v28, v28, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v27, v27, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v26, v26, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v25, v25, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v24, v24, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v23, v23, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v22, v22, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v21, v21, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v20, v20, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v19, v19, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v18, v18, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v17, v17, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v16, v16, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v15, v15, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v14, v14, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v13, v13, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v12, v12, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v11, v11, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v10, v10, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v9, v9, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v8, v8, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v7, v7, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v6, v6, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v5, v5, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v4, v4, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v3, v3, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v2, v2, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v1, v1, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v0, v0, v32, v33 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v32bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: s_waitcnt vmcnt(1) +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX8-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX8-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX8-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX8-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX8-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX8-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX8-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX8-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX8-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX8-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX8-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX8-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX8-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX8-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:4 +; GFX8-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v17 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX8-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v31 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v31 +; GFX8-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v17, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v15 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc +; GFX8-NEXT: v_bfe_u32 v17, v32, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v32 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX8-NEXT: v_or_b32_e32 v31, 0x400000, v32 +; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v31, vcc +; GFX8-NEXT: v_bfe_u32 v31, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v14 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v31, v14, vcc +; GFX8-NEXT: v_bfe_u32 v31, v30, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v30 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX8-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX8-NEXT: v_cndmask_b32_e32 v30, v31, v30, vcc +; GFX8-NEXT: v_bfe_u32 v31, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v13 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v31, v13, vcc +; GFX8-NEXT: v_bfe_u32 v31, v29, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v29 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX8-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX8-NEXT: v_cndmask_b32_e32 v29, v31, v29, vcc +; GFX8-NEXT: v_bfe_u32 v31, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v12 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v31, v12, vcc +; GFX8-NEXT: v_bfe_u32 v31, v28, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v28 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX8-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX8-NEXT: v_cndmask_b32_e32 v28, v31, v28, vcc +; GFX8-NEXT: v_bfe_u32 v31, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v11 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v31, v11, vcc +; GFX8-NEXT: v_bfe_u32 v31, v27, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v27 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX8-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX8-NEXT: v_cndmask_b32_e32 v27, v31, v27, vcc +; GFX8-NEXT: v_bfe_u32 v31, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v10 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v31, v10, vcc +; GFX8-NEXT: v_bfe_u32 v31, v26, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v26 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX8-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX8-NEXT: v_cndmask_b32_e32 v26, v31, v26, vcc +; GFX8-NEXT: v_bfe_u32 v31, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v9 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v31, v9, vcc +; GFX8-NEXT: v_bfe_u32 v31, v25, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v25 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX8-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX8-NEXT: v_cndmask_b32_e32 v25, v31, v25, vcc +; GFX8-NEXT: v_bfe_u32 v31, v8, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v8 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v31, v8, vcc +; GFX8-NEXT: v_bfe_u32 v31, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v24 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v24, v31, v24, vcc +; GFX8-NEXT: v_bfe_u32 v31, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v7 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v31, v7, vcc +; GFX8-NEXT: v_bfe_u32 v31, v23, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v23 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX8-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX8-NEXT: v_cndmask_b32_e32 v23, v31, v23, vcc +; GFX8-NEXT: v_bfe_u32 v31, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v6 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v31, v6, vcc +; GFX8-NEXT: v_bfe_u32 v31, v22, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v22 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX8-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX8-NEXT: v_cndmask_b32_e32 v22, v31, v22, vcc +; GFX8-NEXT: v_bfe_u32 v31, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v5 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v5, vcc +; GFX8-NEXT: v_bfe_u32 v31, v21, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v21 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX8-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX8-NEXT: v_cndmask_b32_e32 v21, v31, v21, vcc +; GFX8-NEXT: v_bfe_u32 v31, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v4 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v31, v4, vcc +; GFX8-NEXT: v_bfe_u32 v31, v20, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v20 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX8-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX8-NEXT: v_cndmask_b32_e32 v20, v31, v20, vcc +; GFX8-NEXT: v_bfe_u32 v31, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v3 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v31, v3, vcc +; GFX8-NEXT: v_bfe_u32 v31, v19, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v19 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX8-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX8-NEXT: v_cndmask_b32_e32 v19, v31, v19, vcc +; GFX8-NEXT: v_bfe_u32 v31, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v2 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v31, v2, vcc +; GFX8-NEXT: v_bfe_u32 v31, v18, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v18 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX8-NEXT: v_cndmask_b32_e32 v18, v31, v18, vcc +; GFX8-NEXT: v_bfe_u32 v31, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_fma_f32 v33, v35, v34, v33 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v31, v1, vcc +; GFX8-NEXT: v_bfe_u32 v31, v33, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v33 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33 +; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v33 +; GFX8-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX8-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, v32, v0 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16 +; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16 +; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16 +; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16 +; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16 +; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16 +; GFX8-NEXT: v_alignbit_b32 v13, v13, v30, 16 +; GFX8-NEXT: v_alignbit_b32 v14, v14, v17, 16 +; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v32bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX900-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX900-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX900-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX900-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX900-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX900-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX900-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX900-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX900-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX900-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX900-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX900-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX900-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX900-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX900-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX900-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX900-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX900-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX900-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX900-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX900-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX900-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX900-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 +; GFX900-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v0, v0, v16, v33 +; GFX900-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v31, s4 +; GFX900-NEXT: v_or_b32_e32 v31, 0x400000, v31 +; GFX900-NEXT: v_cndmask_b32_e32 v16, v16, v31, vcc +; GFX900-NEXT: v_bfe_u32 v31, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v15, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v31, v15, vcc +; GFX900-NEXT: v_bfe_u32 v31, v32, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v32, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX900-NEXT: v_or_b32_e32 v32, 0x400000, v32 +; GFX900-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX900-NEXT: v_bfe_u32 v32, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v14, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v32, v14, vcc +; GFX900-NEXT: v_bfe_u32 v32, v30, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v30, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX900-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX900-NEXT: v_cndmask_b32_e32 v30, v32, v30, vcc +; GFX900-NEXT: v_bfe_u32 v32, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v13, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v32, v13, vcc +; GFX900-NEXT: v_bfe_u32 v32, v29, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v29, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX900-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX900-NEXT: v_cndmask_b32_e32 v29, v32, v29, vcc +; GFX900-NEXT: v_bfe_u32 v32, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v12, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v32, v12, vcc +; GFX900-NEXT: v_bfe_u32 v32, v28, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v28, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX900-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX900-NEXT: v_cndmask_b32_e32 v28, v32, v28, vcc +; GFX900-NEXT: v_bfe_u32 v32, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v11, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v32, v11, vcc +; GFX900-NEXT: v_bfe_u32 v32, v27, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v27, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX900-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX900-NEXT: v_cndmask_b32_e32 v27, v32, v27, vcc +; GFX900-NEXT: v_bfe_u32 v32, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v10, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v32, v10, vcc +; GFX900-NEXT: v_bfe_u32 v32, v26, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v26, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX900-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX900-NEXT: v_cndmask_b32_e32 v26, v32, v26, vcc +; GFX900-NEXT: v_bfe_u32 v32, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v9, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v32, v9, vcc +; GFX900-NEXT: v_bfe_u32 v32, v25, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v25, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX900-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX900-NEXT: v_cndmask_b32_e32 v25, v32, v25, vcc +; GFX900-NEXT: v_bfe_u32 v32, v8, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v8, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v32, v8, vcc +; GFX900-NEXT: v_bfe_u32 v32, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v24, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v24, v32, v24, vcc +; GFX900-NEXT: v_bfe_u32 v32, v7, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v7, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v32, v7, vcc +; GFX900-NEXT: v_bfe_u32 v32, v23, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v23, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX900-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX900-NEXT: v_cndmask_b32_e32 v23, v32, v23, vcc +; GFX900-NEXT: v_bfe_u32 v32, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v6, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v32, v6, vcc +; GFX900-NEXT: v_bfe_u32 v32, v22, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v22, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX900-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX900-NEXT: v_cndmask_b32_e32 v22, v32, v22, vcc +; GFX900-NEXT: v_bfe_u32 v32, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v5, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v32, v5, vcc +; GFX900-NEXT: v_bfe_u32 v32, v21, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v21, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX900-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX900-NEXT: v_cndmask_b32_e32 v21, v32, v21, vcc +; GFX900-NEXT: v_bfe_u32 v32, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v4, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v32, v4, vcc +; GFX900-NEXT: v_bfe_u32 v32, v20, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v20, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX900-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX900-NEXT: v_cndmask_b32_e32 v20, v32, v20, vcc +; GFX900-NEXT: v_bfe_u32 v32, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v3, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v32, v3, vcc +; GFX900-NEXT: v_bfe_u32 v32, v19, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v19, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX900-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX900-NEXT: v_cndmask_b32_e32 v19, v32, v19, vcc +; GFX900-NEXT: v_bfe_u32 v32, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v2, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v32, v2, vcc +; GFX900-NEXT: v_bfe_u32 v32, v18, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v18, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX900-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX900-NEXT: v_cndmask_b32_e32 v18, v32, v18, vcc +; GFX900-NEXT: v_bfe_u32 v32, v1, 16, 1 +; GFX900-NEXT: v_fma_f32 v17, v35, v34, v17 +; GFX900-NEXT: v_add3_u32 v32, v32, v1, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v32, v1, vcc +; GFX900-NEXT: v_bfe_u32 v32, v17, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v17, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v17 +; GFX900-NEXT: v_cndmask_b32_e32 v17, v32, v17, vcc +; GFX900-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v0, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v17, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v18, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v19, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v20, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v21, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v22, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v23, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v24, s4 +; GFX900-NEXT: v_perm_b32 v8, v8, v25, s4 +; GFX900-NEXT: v_perm_b32 v9, v9, v26, s4 +; GFX900-NEXT: v_perm_b32 v10, v10, v27, s4 +; GFX900-NEXT: v_perm_b32 v11, v11, v28, s4 +; GFX900-NEXT: v_perm_b32 v12, v12, v29, s4 +; GFX900-NEXT: v_perm_b32 v13, v13, v30, s4 +; GFX900-NEXT: v_perm_b32 v14, v14, v31, s4 +; GFX900-NEXT: v_perm_b32 v15, v15, v16, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v32bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: scratch_load_dword v35, off, s32 offset:64 +; GFX950-NEXT: scratch_load_dword v36, off, s32 +; GFX950-NEXT: scratch_load_dword v38, off, s32 offset:60 +; GFX950-NEXT: scratch_load_dword v39, off, s32 offset:56 +; GFX950-NEXT: scratch_load_dword v48, off, s32 offset:52 +; GFX950-NEXT: scratch_load_dword v49, off, s32 offset:48 +; GFX950-NEXT: scratch_load_dword v50, off, s32 offset:44 +; GFX950-NEXT: scratch_load_dword v51, off, s32 offset:40 +; GFX950-NEXT: scratch_load_dword v52, off, s32 offset:36 +; GFX950-NEXT: scratch_load_dword v53, off, s32 offset:32 +; GFX950-NEXT: scratch_load_dword v54, off, s32 offset:28 +; GFX950-NEXT: scratch_load_dword v31, off, s32 offset:4 +; GFX950-NEXT: scratch_load_dword v32, off, s32 offset:8 +; GFX950-NEXT: scratch_load_dword v33, off, s32 offset:12 +; GFX950-NEXT: scratch_load_dword v34, off, s32 offset:16 +; GFX950-NEXT: scratch_load_dword v37, off, s32 offset:20 +; GFX950-NEXT: scratch_load_dword v55, off, s32 offset:24 +; GFX950-NEXT: v_accvgpr_write_b32 a3, v43 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a5, v45 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a6, v46 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a8, v56 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a11, v59 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a13, v61 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a14, v62 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a15, v63 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v43, 0xffff0000, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v45, 16, v14 +; GFX950-NEXT: v_and_b32_e32 v46, 0xffff0000, v29 +; GFX950-NEXT: v_lshlrev_b32_e32 v56, 16, v29 +; GFX950-NEXT: v_and_b32_e32 v59, 0xffff0000, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v61, 16, v12 +; GFX950-NEXT: v_and_b32_e32 v62, 0xffff0000, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX950-NEXT: v_accvgpr_write_b32 a2, v42 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a4, v44 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a7, v47 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a9, v57 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v42, 0xffff0000, v30 +; GFX950-NEXT: v_lshlrev_b32_e32 v44, 16, v30 +; GFX950-NEXT: v_and_b32_e32 v47, 0xffff0000, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v57, 16, v13 +; GFX950-NEXT: v_accvgpr_write_b32 a0, v40 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a1, v41 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v40, 0xffff0000, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v41, 16, v15 +; GFX950-NEXT: v_accvgpr_write_b32 a10, v58 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a12, v60 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v58, 0xffff0000, v28 +; GFX950-NEXT: v_lshlrev_b32_e32 v60, 16, v28 +; GFX950-NEXT: s_waitcnt vmcnt(16) +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v35 +; GFX950-NEXT: s_waitcnt vmcnt(15) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX950-NEXT: v_lshlrev_b32_e32 v63, 16, v36 +; GFX950-NEXT: s_waitcnt vmcnt(14) +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v38 +; GFX950-NEXT: v_lshlrev_b32_e32 v29, 16, v38 +; GFX950-NEXT: s_waitcnt vmcnt(11) +; GFX950-NEXT: v_and_b32_e32 v36, 0xffff0000, v49 +; GFX950-NEXT: v_and_b32_e32 v38, 0xffff0000, v11 +; GFX950-NEXT: v_fmac_f32_e32 v36, v38, v62 +; GFX950-NEXT: v_lshlrev_b32_e32 v38, 16, v49 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v39 +; GFX950-NEXT: v_lshlrev_b32_e32 v30, 16, v39 +; GFX950-NEXT: v_fmac_f32_e32 v38, v11, v27 +; GFX950-NEXT: s_waitcnt vmcnt(10) +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v50 +; GFX950-NEXT: v_and_b32_e32 v27, 0xffff0000, v26 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v10 +; GFX950-NEXT: v_fmac_f32_e32 v11, v39, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v50 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_fmac_f32_e32 v27, v10, v26 +; GFX950-NEXT: s_waitcnt vmcnt(9) +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v51 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v25 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v9 +; GFX950-NEXT: v_fmac_f32_e32 v10, v39, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v51 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_fmac_f32_e32 v26, v9, v25 +; GFX950-NEXT: s_waitcnt vmcnt(8) +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v24 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v8 +; GFX950-NEXT: v_fmac_f32_e32 v9, v39, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v52 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_fmac_f32_e32 v25, v8, v24 +; GFX950-NEXT: s_waitcnt vmcnt(7) +; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v53 +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v7 +; GFX950-NEXT: v_fmac_f32_e32 v8, v39, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v53 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v7, v23 +; GFX950-NEXT: s_waitcnt vmcnt(6) +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v54 +; GFX950-NEXT: v_and_b32_e32 v23, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v39, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v54 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v23, v6, v22 +; GFX950-NEXT: s_waitcnt vmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v55 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v39, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v22, v5, v21 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v37 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v39, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v37 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v21, v4, v20 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v34 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v37, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v37, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v34 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v20, v3, v19 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v33 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v34, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v34, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v19, v2, v18 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v33, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v33, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v32 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v18, v1, v17 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v31 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v32, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v28, 16, v35 +; GFX950-NEXT: v_fmac_f32_e32 v15, v40, v12 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v48 +; GFX950-NEXT: v_lshlrev_b32_e32 v35, 16, v48 +; GFX950-NEXT: v_fmac_f32_e32 v1, v32, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v31 +; GFX950-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v28, v41, v63 +; GFX950-NEXT: v_fmac_f32_e32 v14, v43, v42 +; GFX950-NEXT: v_fmac_f32_e32 v29, v45, v44 +; GFX950-NEXT: v_fmac_f32_e32 v13, v47, v46 +; GFX950-NEXT: v_fmac_f32_e32 v30, v57, v56 +; GFX950-NEXT: v_fmac_f32_e32 v12, v59, v58 +; GFX950-NEXT: v_fmac_f32_e32 v35, v61, v60 +; GFX950-NEXT: v_fmac_f32_e32 v17, v0, v16 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v17, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v18, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v19, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v20, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v21, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v22, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v23, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v24, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v8, v25, v9 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v9, v26, v10 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v10, v27, v11 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v11, v38, v36 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v12, v35, v12 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v13, v30, v13 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v14, v29, v14 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v15, v28, v15 +; GFX950-NEXT: v_accvgpr_read_b32 v63, a15 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v62, a14 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v61, a13 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v60, a12 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v59, a11 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v58, a10 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v57, a9 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v56, a8 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v47, a7 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v46, a6 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v45, a5 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v44, a4 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v43, a3 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v42, a2 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v41, a1 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v40, a0 ; Reload Reuse +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v32bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_clause 0x8 +; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44 +; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:40 +; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v15 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v52, 0xffff0000, v10 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v32 +; GFX10-NEXT: v_and_b32_e32 v32, 0xffff0000, v33 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX10-NEXT: v_fmac_f32_e32 v31, v49, v50 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v30 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v14 +; GFX10-NEXT: v_fmac_f32_e32 v15, v51, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v14 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v34 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 +; GFX10-NEXT: v_fmac_f32_e32 v32, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v29 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v13 +; GFX10-NEXT: v_fmac_f32_e32 v14, v51, v30 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v35 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:24 +; GFX10-NEXT: v_fmac_f32_e32 v30, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v28 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v12 +; GFX10-NEXT: v_fmac_f32_e32 v13, v51, v29 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20 +; GFX10-NEXT: v_fmac_f32_e32 v29, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v27 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v11 +; GFX10-NEXT: v_fmac_f32_e32 v12, v51, v28 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v37 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16 +; GFX10-NEXT: v_fmac_f32_e32 v28, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10 +; GFX10-NEXT: v_fmac_f32_e32 v11, v51, v27 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v26 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v38 +; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v25 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v39 +; GFX10-NEXT: v_fmac_f32_e32 v27, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v9 +; GFX10-NEXT: v_fmac_f32_e32 v10, v52, v51 +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:8 +; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v31 +; GFX10-NEXT: v_fmac_f32_e32 v26, v49, v38 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4 +; GFX10-NEXT: v_and_b32_e32 v49, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v24 +; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v31, v31 +; GFX10-NEXT: v_fmac_f32_e32 v9, v49, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v8 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v25, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v7 +; GFX10-NEXT: v_fmac_f32_e32 v48, v8, v24 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v22 +; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX10-NEXT: v_fmac_f32_e32 v8, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v33, v7, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v21 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v34, 0xffff0000, v34 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v7, v39, v24 +; GFX10-NEXT: v_fmac_f32_e32 v34, v6, v22 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v4 +; GFX10-NEXT: s_waitcnt vmcnt(5) +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v19 +; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v6, v23, v49 +; GFX10-NEXT: v_fmac_f32_e32 v35, v5, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX10-NEXT: s_waitcnt vmcnt(4) +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v2 +; GFX10-NEXT: v_fmac_f32_e32 v5, v39, v24 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v36, v4, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v16 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX10-NEXT: v_fmac_f32_e32 v39, v23, v22 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v37 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v23, v3, v19 +; GFX10-NEXT: s_waitcnt vmcnt(2) +; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v50 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v51 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v51 +; GFX10-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX10-NEXT: v_cmp_u_f32_e64 s5, v33, v33 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX10-NEXT: v_fmac_f32_e32 v37, v21, v49 +; GFX10-NEXT: v_fmac_f32_e32 v50, v2, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v1, v17 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v38, v0, v16 +; GFX10-NEXT: v_bfe_u32 v0, v48, 16, 1 +; GFX10-NEXT: v_bfe_u32 v16, v33, 16, 1 +; GFX10-NEXT: v_bfe_u32 v2, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v33 +; GFX10-NEXT: v_bfe_u32 v18, v7, 16, 1 +; GFX10-NEXT: v_bfe_u32 v21, v34, 16, 1 +; GFX10-NEXT: v_add3_u32 v0, v0, v48, 0x7fff +; GFX10-NEXT: v_bfe_u32 v48, v35, 16, 1 +; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff +; GFX10-NEXT: v_bfe_u32 v33, v5, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v3, v4, v24 +; GFX10-NEXT: v_fmac_f32_e32 v51, v22, v20 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v8 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v7 +; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v34 +; GFX10-NEXT: v_bfe_u32 v24, v6, 16, 1 +; GFX10-NEXT: v_add3_u32 v2, v2, v8, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v8, v8 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v35 +; GFX10-NEXT: v_add3_u32 v18, v18, v7, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s6, v7, v7 +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v5 +; GFX10-NEXT: v_add3_u32 v21, v21, v34, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s7, v34, v34 +; GFX10-NEXT: v_bfe_u32 v34, v39, 16, 1 +; GFX10-NEXT: v_add3_u32 v48, v48, v35, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s9, v35, v35 +; GFX10-NEXT: v_bfe_u32 v35, v23, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v5, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s10, v5, v5 +; GFX10-NEXT: v_bfe_u32 v5, v37, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v49, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v24, v24, v6, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s8, v6, v6 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v39 +; GFX10-NEXT: v_add3_u32 v34, v34, v39, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s11, v39, v39 +; GFX10-NEXT: v_or_b32_e32 v39, 0x400000, v23 +; GFX10-NEXT: v_add3_u32 v35, v35, v23, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s12, v23, v23 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v37 +; GFX10-NEXT: v_add3_u32 v5, v5, v37, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s13, v37, v37 +; GFX10-NEXT: v_bfe_u32 v37, v31, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v53, v2, v4, s4 +; GFX10-NEXT: v_bfe_u32 v4, v3, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v17, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v17, v18, v20, s6 +; GFX10-NEXT: v_add3_u32 v37, v37, v31, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v18, v21, v22, s7 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v22, v19, 16, 1 +; GFX10-NEXT: v_add3_u32 v4, v4, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v31, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v15, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v15, v15 +; GFX10-NEXT: v_cndmask_b32_e64 v21, v24, v49, s8 +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v19 +; GFX10-NEXT: v_add3_u32 v37, v37, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v7, v33, v7, s10 +; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1 +; GFX10-NEXT: v_add3_u32 v22, v22, v19, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v6, v34, v6, s11 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v32, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v32 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v32, v32 +; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v51 +; GFX10-NEXT: v_cndmask_b32_e64 v35, v35, v39, s12 +; GFX10-NEXT: v_add3_u32 v37, v37, v32, 0x7fff +; GFX10-NEXT: v_bfe_u32 v39, v38, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v23, s13 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v38 +; GFX10-NEXT: v_cndmask_b32_e64 v32, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v14, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v14 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v14, v14 +; GFX10-NEXT: v_add3_u32 v39, v39, v38, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v50 +; GFX10-NEXT: v_add3_u32 v37, v37, v14, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v8, v48, v8, s9 +; GFX10-NEXT: v_perm_b32 v15, v15, v31, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v30, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v30 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v30, v30 +; GFX10-NEXT: v_perm_b32 v14, v14, v32, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v30, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v30, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v13, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v13 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v13, v13 +; GFX10-NEXT: v_add3_u32 v37, v37, v13, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v13, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v29, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v29 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v29, v29 +; GFX10-NEXT: v_perm_b32 v13, v13, v30, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v29, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v29, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v12, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v12, v12 +; GFX10-NEXT: v_add3_u32 v37, v37, v12, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v12, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v28, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v28 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v28, v28 +; GFX10-NEXT: v_perm_b32 v12, v12, v29, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v28, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v28, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v11, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v11 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v11, v11 +; GFX10-NEXT: v_add3_u32 v37, v37, v11, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v11, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v27, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v27 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v27, v27 +; GFX10-NEXT: v_perm_b32 v11, v11, v28, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v27, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v27, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v10, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v10 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v10, v10 +; GFX10-NEXT: v_add3_u32 v37, v37, v10, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v10, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v26, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v26 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v26, v26 +; GFX10-NEXT: v_perm_b32 v10, v10, v27, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v26, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v26, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v9, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v9 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9 +; GFX10-NEXT: v_add3_u32 v37, v37, v9, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v9, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v25, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v25 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v25, v25 +; GFX10-NEXT: v_perm_b32 v9, v9, v26, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v25, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v25, v37, v52, s14 +; GFX10-NEXT: v_cndmask_b32_e32 v52, v0, v1, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_bfe_u32 v1, v50, 16, 1 +; GFX10-NEXT: v_bfe_u32 v37, v36, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v36 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_add3_u32 v1, v1, v50, 0x7fff +; GFX10-NEXT: v_add3_u32 v37, v37, v36, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v4, v22, v24, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX10-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX10-NEXT: v_cndmask_b32_e32 v20, v39, v23, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX10-NEXT: v_perm_b32 v1, v4, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v3, v35, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v18, v17, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v2, v5, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v37, v0, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v20, v19, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v8, v21, 0x7060302 +; GFX10-NEXT: v_perm_b32 v8, v52, v25, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v22, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v16, v53, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v32bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: s_clause 0x10 +; GFX11TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11TRUE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11TRUE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11TRUE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11TRUE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11TRUE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11TRUE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11TRUE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11TRUE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11TRUE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11TRUE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11TRUE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11TRUE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11TRUE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11TRUE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11TRUE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v99, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v100, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v101, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v102, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v115, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_and_b32_e32 v116, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v97, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_and_b32_e32 v117, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v118, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v103, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v112, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v85, 0xffff0000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX11TRUE16-NEXT: v_and_b32_e32 v113, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v114, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(16) +; GFX11TRUE16-NEXT: v_and_b32_e32 v119, 0xffff0000, v31 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(15) +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v32 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(14) +; GFX11TRUE16-NEXT: v_and_b32_e32 v129, 0xffff0000, v33 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v33 +; GFX11TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v13 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(12) +; GFX11TRUE16-NEXT: v_and_b32_e32 v131, 0xffff0000, v35 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(10) +; GFX11TRUE16-NEXT: v_and_b32_e32 v133, 0xffff0000, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(9) +; GFX11TRUE16-NEXT: v_and_b32_e32 v134, 0xffff0000, v38 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11TRUE16-NEXT: v_and_b32_e32 v144, 0xffff0000, v48 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v48 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11TRUE16-NEXT: v_and_b32_e32 v146, 0xffff0000, v50 +; GFX11TRUE16-NEXT: v_and_b32_e32 v145, 0xffff0000, v49 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v49, 16, v49 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(4) +; GFX11TRUE16-NEXT: v_and_b32_e32 v147, 0xffff0000, v51 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v51 +; GFX11TRUE16-NEXT: v_and_b32_e32 v96, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v148, 0xffff0000, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v87, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v83, 0xffff0000, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_lshlrev_b32 v25, 16, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v55, 16, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v98, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v84, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v48, v7, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v135, 0xffff0000, v39 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v39, 16, v39 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v49, v6, v22 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_lshlrev_b32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v51, v4, v20 :: v_dual_fmac_f32 v148, v118, v117 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_and_b32 v81, 0xffff0000, v26 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v55, v0, v16 :: v_dual_lshlrev_b32 v26, 16, v26 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v145, v98, v97 +; GFX11TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11TRUE16-NEXT: v_add3_u32 v83, v83, v146, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v86, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v82, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_lshlrev_b32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v38 +; GFX11TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v28 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_lshlrev_b32 v28, 16, v28 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_and_b32 v70, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_lshlrev_b32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v35, 16, v35 +; GFX11TRUE16-NEXT: v_and_b32_e32 v80, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v132, 0xffff0000, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v50 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v37, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v71, 0xffff0000, v27 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX11TRUE16-NEXT: v_add3_u32 v97, v97, v51, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v32 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_and_b32_e32 v130, 0xffff0000, v34 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_lshlrev_b32 v34, 16, v34 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_and_b32 v67, 0xffff0000, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_and_b32 v65, 0xffff0000, v30 +; GFX11TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_fmac_f32 v31, v15, v32 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v119, v64, v128 :: v_dual_and_b32 v66, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v53 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v53 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_lshlrev_b32 v30, 16, v30 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v52, 16, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v54 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_lshlrev_b32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v54, v1, v17 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11TRUE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11TRUE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v14, v132, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11TRUE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v150, v14, v132, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v2, v5, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v35, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v35, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v4, v9, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v131 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v35 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v8, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11TRUE16-NEXT: v_add3_u32 v19, v19, v36, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v21, v133, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v134, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v16, v10, v17, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11TRUE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v27, v27, v38, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v150, v18, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11TRUE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v29, v29, v135, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v17, v19, v20, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11TRUE16-NEXT: v_add3_u32 v65, v65, v39, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11TRUE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v67, v67, v144, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v21, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11TRUE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v69, v69, v48, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v23, v24, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11TRUE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v71, v71, v145, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11TRUE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v25, v26, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11TRUE16-NEXT: v_add3_u32 v81, v81, v49, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11TRUE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v85, v85, v50, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v19, v27, v28, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11TRUE16-NEXT: v_add3_u32 v87, v87, v147, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v29, v30, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v99, v99, v64, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11TRUE16-NEXT: v_bfe_u32 v117, v54, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v20, v65, v66, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11TRUE16-NEXT: v_add3_u32 v101, v101, v52, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v118, 0x400000, v54 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v117, v117, v54, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v67, v68, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v119, v148, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v148 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v21, v69, v70, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v119, v119, v148, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v19.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v18.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v11.l, v17.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v12.l, v16.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v34.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v22, v81, v82, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v33.h +; GFX11TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v83, v84, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v23, v85, v86, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v23.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v87, v96, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11TRUE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v24, v97, v98, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11TRUE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11TRUE16-NEXT: v_add3_u32 v113, v113, v53, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v99, v100, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11TRUE16-NEXT: v_add3_u32 v103, v103, v128, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v25, v101, v102, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v32, v116, v115 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v103, v112, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11TRUE16-NEXT: v_bfe_u32 v115, v32, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v116, 0x400000, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v26, v113, v114, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_add3_u32 v115, v115, v32, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v26.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v27, v117, v118, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v28, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v115, v116, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v148, v148 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v27.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v119, v31, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v28.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v32bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: s_clause 0x10 +; GFX11FAKE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11FAKE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11FAKE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11FAKE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11FAKE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11FAKE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11FAKE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11FAKE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11FAKE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11FAKE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11FAKE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11FAKE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11FAKE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11FAKE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11FAKE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11FAKE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v99, 16, v21 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v100, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v97, 16, v22 +; GFX11FAKE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v101, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v102, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v117, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v118, 16, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v87, 16, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v98, 16, v6 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v103, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v112, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v85, 16, v24 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v113, 16, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v114, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v115, 16, v17 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v116, 16, v1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(15) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v32 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(14) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v129, 16, v33 +; GFX11FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v68, 16, v13 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(12) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v131, 16, v35 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(10) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v133, 16, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(9) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v134, 16, v38 +; GFX11FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(7) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v144, 16, v48 +; GFX11FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(5) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v146, 16, v50 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v145, 16, v49 +; GFX11FAKE16-NEXT: v_and_b32_e32 v49, 0xffff0000, v49 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(4) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v147, 16, v51 +; GFX11FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v96, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v83, 16, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_and_b32 v25, 0xffff0000, v25 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v135, 16, v39 +; GFX11FAKE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v48, v7, v23 :: v_dual_fmac_f32 v49, v6, v22 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_and_b32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v51, v4, v20 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_lshlrev_b32 v81, 16, v26 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v145, v98, v97 :: v_dual_and_b32 v26, 0xffff0000, v26 +; GFX11FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v86, 16, v8 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v82, 16, v10 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_and_b32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v69, 16, v28 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_and_b32 v28, 0xffff0000, v28 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_lshlrev_b32 v70, 16, v12 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v80, 16, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v132, 16, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11FAKE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v71, 16, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v130, 16, v34 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_and_b32 v34, 0xffff0000, v34 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_lshlrev_b32 v67, 16, v29 +; GFX11FAKE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX11FAKE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v119, 16, v31 +; GFX11FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_and_b32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_lshlrev_b32 v65, 16, v30 +; GFX11FAKE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v31, v15, v32 :: v_dual_lshlrev_b32 v66, 16, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v119, v64, v128 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(3) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v52 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(2) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v53 +; GFX11FAKE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v53 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(1) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v54 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v54, 16, v55 +; GFX11FAKE16-NEXT: v_and_b32_e32 v55, 0xffff0000, v55 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_and_b32 v30, 0xffff0000, v30 +; GFX11FAKE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v52 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_and_b32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v55, v0, v16 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v32, v1, v17 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v54, v118, v117 +; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11FAKE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v35, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v117, v32, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v148, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v116, v115 +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v10, v14, v35, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v14, v19, v36, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v19, v23, v37, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v23, v27, v38, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v27, v65, v39, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v65, v69, v48, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v69, v81, v49, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v81, v85, v50, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v85, v97, v51, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v97, v101, v52, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v101, v113, v53, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v113, v117, v32, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v117, v2, v5, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11FAKE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v17, v132, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v115, v15, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v119, v54, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v12, v17, v132, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v17, v21, v133, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v134, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v25, v29, v135, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v29, v67, v144, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v67, v71, v145, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v71, v83, v146, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v83, v87, v147, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v87, v99, v64, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v99, v103, v128, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v103, v115, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v115, v119, v54, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v119, v4, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11FAKE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v13, v8, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11FAKE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11FAKE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v16, v10, v16, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v18, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11FAKE16-NEXT: v_or_b32_e32 v116, 0x400000, v15 +; GFX11FAKE16-NEXT: v_or_b32_e32 v118, 0x400000, v32 +; GFX11FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v11, v12, v11, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v17, v22, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11FAKE16-NEXT: v_perm_b32 v12, v16, v13, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v13, v34, v119, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v19, v24, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v10, v14, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v21, v26, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11FAKE16-NEXT: v_perm_b32 v14, v33, v117, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v28, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v9, v17, v9, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v25, v30, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v18, v27, v66, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11FAKE16-NEXT: v_perm_b32 v8, v18, v8, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v29, v68, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v19, v65, v70, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v7, v19, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v67, v80, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v20, v69, v82, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v20, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v71, v84, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v21, v81, v86, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v5, v21, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v83, v96, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v87, v100, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v22, v97, v102, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v99, v112, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v103, v116, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v113, v118, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v24, v115, v31, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v1, v23, v15, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v15, v149, v148, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v24, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v25, v101, v114, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v26, v85, v98, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v4, v26, v4, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: v_fma_v32bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_clause 0x10 +; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 +; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 +; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 +; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 +; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 +; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 +; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 +; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 +; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 +; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 +; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 +; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 +; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 +; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 +; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 +; GFX1250-NEXT: scratch_load_b32 v55, off, s32 +; GFX1250-NEXT: s_wait_loadcnt 0xf +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 +; GFX1250-NEXT: s_wait_loadcnt 0xe +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 +; GFX1250-NEXT: s_wait_loadcnt 0xd +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 +; GFX1250-NEXT: s_wait_loadcnt 0xc +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 +; GFX1250-NEXT: s_wait_loadcnt 0xb +; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 +; GFX1250-NEXT: s_wait_loadcnt 0xa +; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 +; GFX1250-NEXT: s_wait_loadcnt 0x9 +; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 +; GFX1250-NEXT: s_wait_loadcnt 0x8 +; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 +; GFX1250-NEXT: s_wait_loadcnt 0x7 +; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 +; GFX1250-NEXT: s_wait_loadcnt 0x6 +; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 +; GFX1250-NEXT: s_wait_loadcnt 0x5 +; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 +; GFX1250-NEXT: s_wait_loadcnt 0x4 +; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 +; GFX1250-NEXT: s_wait_loadcnt 0x3 +; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 +; GFX1250-NEXT: s_wait_loadcnt 0x2 +; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 +; GFX1250-NEXT: s_wait_loadcnt 0x1 +; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %op = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) + ret <32 x bfloat> %op +} + declare bfloat @llvm.fmuladd.bf16(bfloat, bfloat, bfloat) declare <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat>, <2 x bfloat>, <2 x bfloat>) declare <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat>, <3 x bfloat>, <3 x bfloat>) diff --git a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll index 12f8a59f0b84b..d89b39348ad9a 100644 --- a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll @@ -17,50 +17,50 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: early-clobber renamable $sgpr20_sgpr21_sgpr22_sgpr23 = S_LOAD_DWORDX4_IMM_ec renamable $sgpr8_sgpr9, 24, 0 :: (dereferenceable invariant load (s128) from %ir.arg6.kernarg.offset.align.down, align 8, addrspace 4) ; GFX90A-NEXT: renamable $sgpr33 = S_LOAD_DWORD_IMM renamable $sgpr8_sgpr9, 40, 0 :: (dereferenceable invariant load (s32) from %ir.arg6.kernarg.offset.align.down + 16, align 8, addrspace 4) ; GFX90A-NEXT: renamable $sgpr24_sgpr25_sgpr26_sgpr27 = S_LOAD_DWORDX4_IMM renamable $sgpr8_sgpr9, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4) - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_LOAD_DWORDX2_IMM renamable $sgpr8_sgpr9, 16, 0 :: (dereferenceable invariant load (s64) from %ir.arg.kernarg.offset1 + 16, align 16, addrspace 4) + ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_LOAD_DWORDX2_IMM renamable $sgpr8_sgpr9, 16, 0 :: (dereferenceable invariant load (s64) from %ir.arg.kernarg.offset1 + 16, align 16, addrspace 4) ; GFX90A-NEXT: S_BITCMP1_B32 renamable $sgpr17, 0, implicit-def $scc ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_CSELECT_B64 -1, 0, implicit killed $scc - ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $sgpr28_sgpr29 = S_XOR_B64 renamable $sgpr12_sgpr13, -1, implicit-def dead $scc ; GFX90A-NEXT: S_BITCMP1_B32 renamable $sgpr17, 8, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_CSELECT_B64 -1, 0, implicit killed $scc - ; GFX90A-NEXT: renamable $sgpr30_sgpr31 = S_XOR_B64 killed renamable $sgpr18_sgpr19, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr30_sgpr31 = S_CSELECT_B64 -1, 0, implicit killed $scc + ; GFX90A-NEXT: renamable $sgpr30_sgpr31 = S_XOR_B64 killed renamable $sgpr30_sgpr31, -1, implicit-def dead $scc ; GFX90A-NEXT: renamable $vgpr5 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr4 = DS_READ_B32_gfx9 renamable $vgpr5, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) null`, align 8, addrspace 3) - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr28_sgpr29, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.2, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1.bb103: ; GFX90A-NEXT: successors: %bb.58(0x40000000), %bb.2(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr30_sgpr31, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.58, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.2: ; GFX90A-NEXT: successors: %bb.3(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr46, $sgpr47, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr4, $vgpr5 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr56, $sgpr57, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr4, $vgpr5 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 ; GFX90A-NEXT: renamable $vgpr3 = IMPLICIT_DEF implicit-def $vgpr2 - ; GFX90A-NEXT: renamable $vgpr21 = IMPLICIT_DEF implicit-def $vgpr20 - ; GFX90A-NEXT: renamable $vgpr23 = IMPLICIT_DEF implicit-def $vgpr22 ; GFX90A-NEXT: renamable $vgpr25 = IMPLICIT_DEF implicit-def $vgpr24 + ; GFX90A-NEXT: renamable $vgpr27 = IMPLICIT_DEF implicit-def $vgpr26 + ; GFX90A-NEXT: renamable $vgpr29 = IMPLICIT_DEF implicit-def $vgpr28 ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 0 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.3.Flow17: ; GFX90A-NEXT: successors: %bb.4(0x40000000), %bb.57(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr30 = V_AND_B32_e32 1023, $vgpr31, implicit $exec - ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr34_sgpr35, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.57, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.4.bb15: ; GFX90A-NEXT: successors: %bb.35(0x40000000), %bb.5(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = V_LSHLREV_B64_e64 2, $vgpr4_vgpr5, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = COPY renamable $sgpr25, implicit $exec @@ -75,12 +75,12 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.5: ; GFX90A-NEXT: successors: %bb.6(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 @@ -98,47 +98,49 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr56_vgpr57 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr44_vgpr45 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr42_vgpr43 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.6.Flow20: ; GFX90A-NEXT: successors: %bb.7(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr20 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr22 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr24 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr25 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr26 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr28 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr29 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr15 = COPY killed renamable $sgpr18, implicit $exec ; GFX90A-NEXT: renamable $vgpr3 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr21 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr23 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr25 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr27 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.7.Flow19: ; GFX90A-NEXT: successors: %bb.62(0x40000000), %bb.8(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0 - ; GFX90A-NEXT: $sgpr24_sgpr25 = S_AND_SAVEEXEC_B64 $sgpr36_sgpr37, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr18_sgpr19 = S_AND_SAVEEXEC_B64 $sgpr36_sgpr37, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.62, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.8.Flow32: ; GFX90A-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr24_sgpr25, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr18_sgpr19, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def $scc + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr40_sgpr41, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.10, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.9.bb89: ; GFX90A-NEXT: successors: %bb.10(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET renamable $vgpr11, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr10, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) @@ -146,16 +148,16 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.10.Flow33: ; GFX90A-NEXT: successors: %bb.11(0x40000000), %bb.12(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr44_sgpr45, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr46_sgpr47, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.12, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.11.bb84: ; GFX90A-NEXT: successors: %bb.12(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET renamable $vgpr9, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) @@ -163,16 +165,16 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.12.Flow34: ; GFX90A-NEXT: successors: %bb.13(0x40000000), %bb.14(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr42_sgpr43, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr44_sgpr45, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.14, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.13.bb79: ; GFX90A-NEXT: successors: %bb.14(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET renamable $vgpr7, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr6, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) @@ -180,10 +182,10 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.14.Flow35: ; GFX90A-NEXT: successors: %bb.15(0x40000000), %bb.16(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr40_sgpr41, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr42_sgpr43, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.16, implicit $exec ; GFX90A-NEXT: {{ $}} @@ -357,15 +359,15 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.35.bb20: ; GFX90A-NEXT: successors: %bb.37(0x40000000), %bb.36(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_SBYTE renamable $vgpr40_vgpr41, 1024, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i23) ; GFX90A-NEXT: renamable $vgpr42 = V_ADD_CO_U32_e32 1024, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr38_sgpr39 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 @@ -383,33 +385,33 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr58_vgpr59 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr56_vgpr57 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr44_vgpr45 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr24_sgpr25 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.37, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.36.Flow21: ; GFX90A-NEXT: successors: %bb.6(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr24_sgpr25, implicit-def $scc ; GFX90A-NEXT: S_BRANCH %bb.6 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.37.bb27: ; GFX90A-NEXT: successors: %bb.39(0x40000000), %bb.38(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19, $sgpr44_sgpr45, $sgpr42_sgpr43, $sgpr54_sgpr55, $sgpr52_sgpr53, $sgpr64_sgpr65, $sgpr50_sgpr51, $sgpr66_sgpr67 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41, $sgpr46_sgpr47, $sgpr44_sgpr45, $sgpr64_sgpr65, $sgpr54_sgpr55, $sgpr52_sgpr53, $sgpr66_sgpr67, $sgpr48_sgpr49 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE renamable $vgpr40_vgpr41, 2048, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i30) ; GFX90A-NEXT: renamable $vgpr44 = V_ADD_CO_U32_e32 2048, $vgpr40, implicit-def $vcc, implicit $exec - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr45, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec @@ -422,49 +424,51 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr60_vgpr61 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr58_vgpr59 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr56_vgpr57 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr38_sgpr39 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.39, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.38.Flow22: ; GFX90A-NEXT: successors: %bb.36(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr38_sgpr39, implicit-def $scc ; GFX90A-NEXT: renamable $sgpr38_sgpr39 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr40_sgpr41, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr42_sgpr43, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr66_sgpr67, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_AND_B64 killed renamable $sgpr42_sgpr43, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr18_sgpr19, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_AND_B64 killed renamable $sgpr40_sgpr41, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_ANDN2_B64 killed renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_OR_B64 killed renamable $sgpr36_sgpr37, killed renamable $sgpr46_sgpr47, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_OR_B64 killed renamable $sgpr36_sgpr37, killed renamable $sgpr56_sgpr57, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.36 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.39.bb34: ; GFX90A-NEXT: successors: %bb.41(0x40000000), %bb.40(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19, $sgpr44_sgpr45, $sgpr50_sgpr51, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr54_sgpr55, $sgpr62_sgpr63, $sgpr52_sgpr53, $sgpr64_sgpr65, $sgpr66_sgpr67 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr46_sgpr47, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr54_sgpr55, $sgpr52_sgpr53, $sgpr66_sgpr67 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE renamable $vgpr40_vgpr41, 3072, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i37) ; GFX90A-NEXT: renamable $vgpr56 = V_ADD_CO_U32_e32 3072, $vgpr40, implicit-def $vcc, implicit $exec - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr57, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr0, implicit $exec + ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF @@ -472,48 +476,48 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr62_vgpr63 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr60_vgpr61 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr58_vgpr59 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr40_sgpr41 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.41, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.40.Flow23: ; GFX90A-NEXT: successors: %bb.38(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr68_sgpr69, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr40_sgpr41, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr42_sgpr43, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr68_sgpr69, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr18_sgpr19, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr48_sgpr49, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_OR_B64 killed renamable $sgpr48_sgpr49, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.38 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.41.bb41: ; GFX90A-NEXT: successors: %bb.46(0x40000000), %bb.42(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr52_sgpr53, $sgpr50_sgpr51, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr54_sgpr55, $sgpr66_sgpr67, $sgpr68_sgpr69 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr58 = V_ADD_CO_U32_e32 4096, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = COPY $vcc ; GFX90A-NEXT: renamable $vgpr59, dead renamable $sgpr18_sgpr19 = V_ADDC_U32_e64 0, $vgpr41, killed $sgpr18_sgpr19, 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE renamable $vgpr58_vgpr59, 0, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i44) - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr3, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr0, implicit $exec ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF @@ -522,271 +526,273 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr62_vgpr63 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr60_vgpr61 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr42_sgpr43 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.46, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.42.Flow24: ; GFX90A-NEXT: successors: %bb.40(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr42_sgpr43, implicit-def $scc ; GFX90A-NEXT: renamable $vgpr59 = COPY killed renamable $vgpr3, implicit $exec - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr18_sgpr19, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr48_sgpr49, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_OR_B64 killed renamable $sgpr42_sgpr43, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.40 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.43.bb55: ; GFX90A-NEXT: successors: %bb.48(0x40000000), %bb.44(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr44_sgpr45, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr46_sgpr47, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59, $sgpr48_sgpr49 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: S_BITCMP1_B32 killed renamable $sgpr17, 16, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_CSELECT_B64 -1, 0, implicit killed $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_XOR_B64 renamable $sgpr64_sgpr65, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_CSELECT_B64 -1, 0, implicit killed $scc + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_XOR_B64 renamable $sgpr66_sgpr67, -1, implicit-def dead $scc ; GFX90A-NEXT: renamable $vgpr62 = V_ADD_CO_U32_e32 6144, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $vgpr63, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec - ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr50_sgpr51, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr18_sgpr19, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.48, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.44: ; GFX90A-NEXT: successors: %bb.45(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr57, $vgpr56, $vgpr30, $vgpr31, $vgpr60, $vgpr62, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $vgpr61, $vgpr58, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr47, $vgpr46, $vgpr2, $vgpr4, $vgpr5, $vgpr45, $vgpr44, $vgpr43, $vgpr42, $vgpr41, $vgpr40, $vgpr63 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr62, $vgpr56, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $vgpr57, $vgpr61, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr47, $vgpr46, $vgpr2, $vgpr4, $vgpr5, $vgpr45, $vgpr44, $vgpr43, $vgpr42, $vgpr41, $vgpr40, $vgpr60, $vgpr63, $vgpr58 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 0 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.45.Flow26: ; GFX90A-NEXT: successors: %bb.47(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_OR_B64 killed renamable $sgpr44_sgpr45, killed renamable $sgpr46_sgpr47, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.47 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.46.bb48: ; GFX90A-NEXT: successors: %bb.43(0x40000000), %bb.47(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr64_sgpr65, $sgpr50_sgpr51, $sgpr66_sgpr67, $sgpr44_sgpr45, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr66_sgpr67, $sgpr58_sgpr59, $sgpr68_sgpr69, $sgpr64_sgpr65, $sgpr46_sgpr47, $sgpr54_sgpr55, $sgpr60_sgpr61 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr60 = V_ADD_CO_U32_e32 5120, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = COPY $vcc ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 4096, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $vgpr1, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE killed renamable $vgpr0_vgpr1, 1024, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i51) - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr61, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $sgpr18_sgpr19, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr0, implicit $exec - ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr62_vgpr63 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 - ; GFX90A-NEXT: $sgpr18_sgpr19 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF + ; GFX90A-NEXT: $sgpr44_sgpr45 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.43, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.47.Flow25: ; GFX90A-NEXT: successors: %bb.42(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $sgpr70_sgpr71, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr70_sgpr71, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr68_sgpr69, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr66_sgpr67, $exec, implicit-def dead $scc + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr58_sgpr59, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $sgpr70_sgpr71, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr44_sgpr45, implicit-def $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr70_sgpr71, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr68_sgpr69, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr66_sgpr67, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr48_sgpr49, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_OR_B64 killed renamable $sgpr44_sgpr45, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.42 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.48.bb63: ; GFX90A-NEXT: successors: %bb.50(0x40000000), %bb.49(0x40000000) - ; GFX90A-NEXT: liveins: $vcc, $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49 + ; GFX90A-NEXT: liveins: $vcc, $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr56_sgpr57:0x000000000000000F, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59, $sgpr48_sgpr49 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.50, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.49: ; GFX90A-NEXT: successors: %bb.44(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_MOV_B64 -1 ; GFX90A-NEXT: S_BRANCH %bb.44 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.50.bb68: ; GFX90A-NEXT: successors: %bb.54(0x40000000), %bb.51(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr56_sgpr57:0x000000000000000F, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = nuw nsw V_LSHLREV_B32_e32 3, $vgpr30, implicit $exec ; GFX90A-NEXT: renamable $vgpr1 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr50_sgpr51, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.54, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.51: ; GFX90A-NEXT: successors: %bb.45(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: S_BRANCH %bb.45 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.52.bb80: ; GFX90A-NEXT: successors: %bb.59(0x40000000), %bb.53(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr56_sgpr57:0x000000000000000F, $sgpr62_sgpr63, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr17 = S_BFE_U32 renamable $sgpr20, 65560, implicit-def dead $scc ; GFX90A-NEXT: S_CMP_EQ_U32 killed renamable $sgpr17, 0, implicit-def $scc ; GFX90A-NEXT: renamable $vgpr8 = V_ADD_CO_U32_e32 4096, $vgpr0, implicit-def $vcc, implicit $exec - ; GFX90A-NEXT: renamable $vgpr9, dead renamable $sgpr52_sgpr53 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr9, dead renamable $sgpr18_sgpr19 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: S_CBRANCH_SCC1 %bb.59, implicit killed $scc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.53: ; GFX90A-NEXT: successors: %bb.61(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: S_BRANCH %bb.61 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.54.bb73: ; GFX90A-NEXT: successors: %bb.52(0x40000000), %bb.55(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr56_sgpr57:0x000000000000000F, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr8 = FLAT_LOAD_UBYTE renamable $vgpr0_vgpr1, 2048, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i76) ; GFX90A-NEXT: renamable $vgpr6 = V_ADD_CO_U32_e32 2048, $vgpr0, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $vgpr7, dead renamable $sgpr58_sgpr59 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr7, dead renamable $sgpr18_sgpr19 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr8, implicit $exec - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 - ; GFX90A-NEXT: $sgpr60_sgpr61 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF + ; GFX90A-NEXT: $sgpr62_sgpr63 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.52, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.55.Flow29: ; GFX90A-NEXT: successors: %bb.45(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr60_sgpr61, implicit-def $scc + ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr62_sgpr63, implicit-def $scc ; GFX90A-NEXT: S_BRANCH %bb.45 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.56.bb90: ; GFX90A-NEXT: successors: %bb.60(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr53 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $sgpr64_sgpr65, implicit $exec - ; GFX90A-NEXT: renamable $vgpr12 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr16_vgpr17 = DS_READ_B64_gfx9 killed renamable $vgpr12, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $sgpr21, implicit $exec - ; GFX90A-NEXT: renamable $vgpr18_vgpr19 = DS_READ_B64_gfx9 killed renamable $vgpr12, 0, 0, implicit $exec :: (load (s64) from %ir.7, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $sgpr22, implicit $exec - ; GFX90A-NEXT: renamable $vgpr14_vgpr15 = DS_READ_B64_gfx9 killed renamable $vgpr12, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $sgpr46, implicit $exec - ; GFX90A-NEXT: renamable $vgpr13 = V_ALIGNBIT_B32_opsel_e64 0, killed $sgpr47, 0, killed $vgpr12, 0, 1, 0, 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr52 = V_ALIGNBIT_B32_opsel_e64 0, $vgpr19, 0, $vgpr18, 0, 1, 0, 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr19 = V_CNDMASK_B32_e64 0, 0, 0, 1, $sgpr12_sgpr13, implicit $exec - ; GFX90A-NEXT: renamable $vgpr17 = V_ALIGNBIT_B32_opsel_e64 0, $vgpr17, 0, $vgpr16, 0, 1, 0, 0, implicit $exec + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr56_sgpr57:0x000000000000000F, $sgpr62_sgpr63, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: renamable $vgpr12 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $sgpr66_sgpr67, implicit $exec + ; GFX90A-NEXT: renamable $vgpr13 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr14 = COPY renamable $sgpr21, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22_vgpr23 = DS_READ_B64_gfx9 killed renamable $vgpr14, 0, 0, implicit $exec :: (load (s64) from %ir.7, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr20_vgpr21 = DS_READ_B64_gfx9 killed renamable $vgpr13, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr13 = COPY renamable $sgpr22, implicit $exec + ; GFX90A-NEXT: renamable $vgpr18_vgpr19 = DS_READ_B64_gfx9 killed renamable $vgpr13, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_LSHR_B64 killed renamable $sgpr56_sgpr57, 1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vgpr16_vgpr17 = V_LSHRREV_B64_e64 1, $vgpr22_vgpr23, implicit $exec + ; GFX90A-NEXT: renamable $vgpr17 = V_CNDMASK_B32_e64 0, 0, 0, 1, $sgpr12_sgpr13, implicit $exec + ; GFX90A-NEXT: renamable $vgpr32_vgpr33 = V_LSHRREV_B64_e64 1, $vgpr20_vgpr21, implicit $exec ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_OR_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $vgpr16, implicit $exec + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_OR_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vgpr14 = COPY renamable $vgpr20, implicit $exec ; GFX90A-NEXT: S_BRANCH %bb.60 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.57: ; GFX90A-NEXT: successors: %bb.7(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr40_sgpr41, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr19 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr52 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr18 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr14 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr19 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr12 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr20 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr32 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr16 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr15 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr53 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 @@ -812,79 +818,80 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.58.bb105: ; GFX90A-NEXT: successors: %bb.3(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr22_vgpr23 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr26_vgpr27 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY renamable $sgpr23, implicit $exec - ; GFX90A-NEXT: renamable $vgpr20_vgpr21 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.419, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr24_vgpr25 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.419, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY renamable $sgpr21, implicit $exec ; GFX90A-NEXT: renamable $vgpr2_vgpr3 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.7, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY killed renamable $sgpr33, implicit $exec - ; GFX90A-NEXT: renamable $vgpr12_vgpr13 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.420, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr14_vgpr15 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.420, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY renamable $sgpr22, implicit $exec - ; GFX90A-NEXT: renamable $vgpr24_vgpr25 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr28_vgpr29 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 -1 ; GFX90A-NEXT: S_BRANCH %bb.3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.59.bb85: ; GFX90A-NEXT: successors: %bb.56(0x40000000), %bb.60(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr56_sgpr57:0x000000000000000F, $sgpr62_sgpr63, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr10 = V_OR_B32_e32 1, $vgpr8, implicit $exec ; GFX90A-NEXT: renamable $vgpr11 = COPY renamable $vgpr9, implicit $exec ; GFX90A-NEXT: renamable $vgpr12 = FLAT_LOAD_UBYTE renamable $vgpr10_vgpr11, 0, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i86) ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr12, implicit $exec - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr54_sgpr55 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.56, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.60.Flow31: ; GFX90A-NEXT: successors: %bb.61(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr54_sgpr55, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.61.Flow30: ; GFX90A-NEXT: successors: %bb.55(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr52_sgpr53, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_OR_B64 killed renamable $sgpr52_sgpr53, killed renamable $sgpr56_sgpr57, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.55 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.62.bb140: ; GFX90A-NEXT: successors: %bb.68(0x40000000), %bb.63(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr24_sgpr25 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr30_sgpr31, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.68, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.63.Flow13: ; GFX90A-NEXT: successors: %bb.64(0x40000000), %bb.66(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr36_sgpr37, implicit-def dead $scc + ; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr24_sgpr25, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.66, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.64.bb159: ; GFX90A-NEXT: successors: %bb.67(0x40000000), %bb.65(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vcc = V_CMP_NE_U32_e64 0, killed $vgpr30, implicit $exec ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec @@ -893,104 +900,106 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.65.Flow10: ; GFX90A-NEXT: successors: %bb.66(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $sgpr12_sgpr13 = S_ANDN2_SAVEEXEC_B64 $sgpr12_sgpr13, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.66.Flow14: ; GFX90A-NEXT: successors: %bb.8(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = COPY $exec ; GFX90A-NEXT: S_BRANCH %bb.8 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.67.bb161: ; GFX90A-NEXT: successors: %bb.65(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr21, killed $vgpr23, implicit $exec - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr25, implicit $exec - ; GFX90A-NEXT: renamable $vgpr3 = V_OR_B32_e32 killed $vgpr13, killed $vgpr3, implicit $exec + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr25, killed $vgpr27, implicit $exec + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr29, implicit $exec + ; GFX90A-NEXT: renamable $vgpr3 = V_OR_B32_e32 killed $vgpr15, killed $vgpr3, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr3, killed $vgpr2, implicit $exec ; GFX90A-NEXT: renamable $vgpr3 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr53, 0, $vgpr3, 0, 0, 6, implicit $exec + ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr12, 0, $vgpr3, 0, 0, 6, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_CNDMASK_B32_e64 0, 0, 0, killed $vgpr2, killed $vcc, implicit $exec - ; GFX90A-NEXT: renamable $vgpr4 = V_OR_B32_e32 killed $vgpr52, killed $vgpr15, implicit $exec + ; GFX90A-NEXT: renamable $vgpr4 = V_OR_B32_e32 killed $vgpr16, killed $vgpr19, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr4, killed $vgpr2, implicit $exec - ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr19, 0, $vgpr3, 0, 0, 6, implicit $exec + ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr17, 0, $vgpr3, 0, 0, 6, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_CNDMASK_B32_e64 0, 0, 0, killed $vgpr2, killed $vcc, implicit $exec - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr17, implicit $exec + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr32, implicit $exec ; GFX90A-NEXT: DS_WRITE2_B32_gfx9 killed renamable $vgpr3, killed renamable $vgpr2, renamable $vgpr3, 0, 1, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, align 4, addrspace 3) ; GFX90A-NEXT: S_BRANCH %bb.65 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.68.bb174: ; GFX90A-NEXT: successors: %bb.72(0x40000000), %bb.69(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr26 = V_OR_B32_e32 1, $vgpr24, implicit $exec - ; GFX90A-NEXT: renamable $vgpr48 = V_OR_B32_e32 $vgpr26, $vgpr22, implicit $exec - ; GFX90A-NEXT: renamable $vgpr34 = V_OR_B32_e32 $vgpr48, $vgpr20, implicit $exec - ; GFX90A-NEXT: renamable $vgpr28 = V_CNDMASK_B32_e64 0, $vgpr34, 0, 0, $sgpr12_sgpr13, implicit $exec - ; GFX90A-NEXT: renamable $vgpr38 = V_OR_B32_e32 $vgpr28, $vgpr2, implicit $exec - ; GFX90A-NEXT: renamable $vgpr36 = V_OR_B32_e32 $vgpr38, $vgpr12, implicit $exec - ; GFX90A-NEXT: renamable $vgpr32 = V_OR_B32_e32 $vgpr36, $vgpr14, implicit $exec - ; GFX90A-NEXT: renamable $vgpr50 = V_CNDMASK_B32_e64 0, 0, 0, $vgpr32, killed $sgpr12_sgpr13, implicit $exec + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: renamable $agpr0 = COPY killed renamable $vgpr32, implicit $exec + ; GFX90A-NEXT: renamable $vgpr32 = V_OR_B32_e32 1, $vgpr28, implicit $exec + ; GFX90A-NEXT: renamable $vgpr52 = V_OR_B32_e32 $vgpr32, $vgpr26, implicit $exec + ; GFX90A-NEXT: renamable $vgpr38 = V_OR_B32_e32 $vgpr52, $vgpr24, implicit $exec + ; GFX90A-NEXT: renamable $vgpr34 = V_CNDMASK_B32_e64 0, $vgpr38, 0, 0, $sgpr12_sgpr13, implicit $exec + ; GFX90A-NEXT: renamable $vgpr50 = V_OR_B32_e32 $vgpr34, $vgpr2, implicit $exec + ; GFX90A-NEXT: renamable $vgpr48 = V_OR_B32_e32 $vgpr50, $vgpr14, implicit $exec + ; GFX90A-NEXT: renamable $vgpr36 = V_OR_B32_e32 $vgpr48, $vgpr18, implicit $exec + ; GFX90A-NEXT: renamable $vgpr54 = V_CNDMASK_B32_e64 0, 0, 0, $vgpr36, killed $sgpr12_sgpr13, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr28_sgpr29, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.72, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.69.Flow: ; GFX90A-NEXT: successors: %bb.70(0x40000000), %bb.71(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr52_vgpr53:0x0000000000000003, $vgpr54_vgpr55:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.71, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.70.bb186: ; GFX90A-NEXT: successors: %bb.71(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr52_vgpr53:0x0000000000000003, $vgpr54_vgpr55:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr4_vgpr5 = V_LSHLREV_B64_e64 3, killed $vgpr4_vgpr5, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = COPY renamable $sgpr27, implicit $exec ; GFX90A-NEXT: renamable $vgpr4, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr26, $vgpr4, 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr2, dead renamable $vcc = V_ADDC_U32_e64 killed $vgpr2, killed $vgpr5, killed $vcc, 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr27 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr49 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr35 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr39 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr37 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr29 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr51 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr33 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr27, renamable $vgpr26_vgpr27, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr33 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr53 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr39 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr51 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr49 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr35 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr55 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr37 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr33, renamable $vgpr32_vgpr33, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: renamable $vgpr5 = COPY renamable $sgpr21, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr5, killed renamable $vgpr52_vgpr53, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr13 = COPY killed renamable $sgpr22, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr13, killed renamable $vgpr38_vgpr39, 0, 0, implicit $exec :: (store (s64) into %ir.8, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr33, killed renamable $vgpr50_vgpr51, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr5, killed renamable $vgpr48_vgpr49, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY killed renamable $sgpr22, implicit $exec - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr12, killed renamable $vgpr34_vgpr35, 0, 0, implicit $exec :: (store (s64) into %ir.8, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr27, killed renamable $vgpr38_vgpr39, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr5, killed renamable $vgpr36_vgpr37, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr27, killed renamable $vgpr28_vgpr29, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr5, killed renamable $vgpr50_vgpr51, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr27, killed renamable $vgpr32_vgpr33, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr33, killed renamable $vgpr34_vgpr35, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr5, killed renamable $vgpr54_vgpr55, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr33, killed renamable $vgpr36_vgpr37, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.71.Flow9: ; GFX90A-NEXT: successors: %bb.63(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr24_sgpr25 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $vgpr32 = COPY killed renamable $agpr0, implicit $exec ; GFX90A-NEXT: S_BRANCH %bb.63 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.72.bb196: ; GFX90A-NEXT: successors: %bb.69(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr52_vgpr53:0x0000000000000003, $vgpr54_vgpr55:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 $vgpr50, killed $vgpr18, implicit $exec - ; GFX90A-NEXT: renamable $vgpr54 = V_OR_B32_e32 killed $vgpr2, killed $vgpr16, implicit $exec - ; GFX90A-NEXT: renamable $vgpr55 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr55, renamable $vgpr54_vgpr55, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 $vgpr54, killed $vgpr22, implicit $exec + ; GFX90A-NEXT: renamable $vgpr20 = V_OR_B32_e32 killed $vgpr2, killed $vgpr20, implicit $exec + ; GFX90A-NEXT: renamable $vgpr21 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr21, renamable $vgpr20_vgpr21, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_MOV_B64 0 ; GFX90A-NEXT: S_BRANCH %bb.69 bb: diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll index c3b14e8829042..ca50835018824 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll @@ -57,8 +57,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB0_1: ; %atomicrmw.start @@ -69,7 +68,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -96,9 +95,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -106,7 +104,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -123,9 +121,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -133,7 +130,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -150,9 +147,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -160,7 +156,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -245,8 +241,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB1_1: ; %atomicrmw.start @@ -256,7 +251,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -292,16 +287,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -318,16 +312,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v1, s20 ; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v4, v1 -; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -468,7 +461,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 @@ -481,7 +473,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB2_1 @@ -507,7 +498,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB2_4 @@ -556,7 +547,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -569,7 +559,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: @@ -594,7 +583,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 @@ -614,7 +603,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -627,7 +615,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: @@ -652,7 +639,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 @@ -672,7 +659,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -684,7 +670,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB2_1 ; GFX7-NEXT: ; %bb.2: @@ -709,7 +694,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB2_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 @@ -830,8 +815,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start @@ -842,7 +826,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -860,16 +844,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -886,9 +869,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -896,7 +878,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -913,9 +895,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -923,7 +904,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -940,9 +921,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -950,7 +930,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1035,8 +1015,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start @@ -1046,7 +1025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1064,15 +1043,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v1, s6 ; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_add_f32_e32 v2, v3, v0 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 @@ -1089,16 +1066,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v4, v1 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -1115,16 +1091,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -1141,16 +1116,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v1, s20 ; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_add_f32_e32 v1, v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v4, v1 -; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -1223,9 +1197,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -1237,7 +1209,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1255,8 +1227,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start @@ -1267,7 +1238,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1285,16 +1256,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1311,9 +1281,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1321,7 +1290,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1338,9 +1307,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1348,7 +1316,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1365,9 +1333,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -1375,7 +1342,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7) ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1448,9 +1415,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -1462,7 +1427,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1480,8 +1445,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start @@ -1492,7 +1456,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1510,16 +1474,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1536,9 +1499,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1546,7 +1508,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1563,9 +1525,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1573,7 +1534,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1590,9 +1551,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -1600,7 +1560,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1673,9 +1633,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory__amdgpu_ignore_denormal_mode: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -1687,7 +1645,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1705,8 +1663,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start @@ -1717,7 +1674,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -1735,16 +1692,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1761,9 +1717,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1771,7 +1726,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1788,9 +1743,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1798,7 +1752,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1815,9 +1769,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -1825,7 +1778,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1883,24 +1836,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1925,25 +1876,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1958,26 +1907,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -1999,26 +1947,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2030,26 +1977,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2061,26 +2007,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2092,27 +2037,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2133,9 +2078,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v2, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start @@ -2146,7 +2089,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[4:5] @@ -2174,9 +2117,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v2, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v6, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start @@ -2187,7 +2128,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -2205,8 +2146,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 +; GFX10-NEXT: v_mov_b32_e32 v6, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start @@ -2218,7 +2158,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v8, v3 ; GFX10-NEXT: v_mov_b32_e32 v7, v2 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -2246,9 +2186,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v2, s20 ; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v6, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -2257,7 +2196,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v9, v4 ; GFX908-NEXT: v_mov_b32_e32 v8, v3 ; GFX908-NEXT: v_mov_b32_e32 v7, v2 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5] @@ -2275,9 +2214,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v6, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -2286,7 +2224,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v9, v4 ; GFX8-NEXT: v_mov_b32_e32 v8, v3 ; GFX8-NEXT: v_mov_b32_e32 v7, v2 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5] @@ -2304,9 +2242,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v6, s20 ; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -2315,7 +2252,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: v_mov_b32_e32 v9, v4 ; GFX7-NEXT: v_mov_b32_e32 v8, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v2 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5] @@ -2373,10 +2310,9 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_readfirstlane_b32 s4, v9 ; GFX12-NEXT: v_readfirstlane_b32 s5, v10 ; GFX12-NEXT: v_readfirstlane_b32 s6, v7 @@ -2390,7 +2326,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048 -; GFX12-NEXT: ; implicit-def: $vgpr4 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB10_1 ; GFX12-NEXT: ; %bb.2: @@ -2420,7 +2355,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB10_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2474,22 +2409,21 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s4, v9 ; GFX11-NEXT: v_readfirstlane_b32 s5, v10 ; GFX11-NEXT: v_readfirstlane_b32 s6, v7 ; GFX11-NEXT: v_readfirstlane_b32 s7, v8 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8] ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB10_1 ; GFX11-NEXT: ; %bb.2: @@ -2518,7 +2452,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB10_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2543,7 +2477,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX10-NEXT: v_mov_b32_e32 v7, v2 ; GFX10-NEXT: v_mov_b32_e32 v10, v1 ; GFX10-NEXT: v_mov_b32_e32 v9, v0 -; GFX10-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 @@ -2556,7 +2489,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX10-NEXT: ; implicit-def: $vgpr4 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_1 @@ -2584,7 +2516,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB10_4 @@ -2640,7 +2572,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: v_mov_b32_e32 v7, v2 ; GFX908-NEXT: v_mov_b32_e32 v10, v1 ; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v9 @@ -2653,7 +2584,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_1 ; GFX908-NEXT: ; %bb.2: @@ -2680,7 +2610,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB10_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2704,7 +2634,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: v_mov_b32_e32 v7, v2 ; GFX8-NEXT: v_mov_b32_e32 v10, v1 ; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v9 @@ -2717,7 +2646,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_1 ; GFX8-NEXT: ; %bb.2: @@ -2744,7 +2672,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB10_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2768,7 +2696,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX7-NEXT: v_mov_b32_e32 v7, v2 ; GFX7-NEXT: v_mov_b32_e32 v10, v1 ; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_i32_e32 v15, vcc, 0x800, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v9 @@ -2780,7 +2707,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX7-NEXT: ; implicit-def: $vgpr4 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_1 ; GFX7-NEXT: ; %bb.2: @@ -2807,7 +2733,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB10_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1 @@ -2903,24 +2829,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2945,25 +2869,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2978,26 +2900,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB11_1 @@ -3010,23 +2931,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v4, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: v_mov_b32_e32 v5, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x800 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v6, s6 +; GFX90A-NEXT: v_mov_b32_e32 v10, s20 ; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX90A-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB11_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3038,26 +2958,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB11_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3069,26 +2988,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB11_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3100,26 +3018,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB11_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3131,27 +3048,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB11_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3173,24 +3090,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 +; GFX12-NEXT: v_mov_b32_e32 v10, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5] -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -3215,25 +3130,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 +; GFX11-NEXT: v_mov_b32_e32 v10, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -3248,26 +3161,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX10-NEXT: v_mov_b32_e32 v10, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_mov_b32_e32 v0, v6 +; GFX10-NEXT: v_mov_b32_e32 v1, v7 +; GFX10-NEXT: v_mov_b32_e32 v2, v8 +; GFX10-NEXT: v_mov_b32_e32 v3, v9 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9] +; GFX10-NEXT: v_mov_b32_e32 v9, v1 +; GFX10-NEXT: v_mov_b32_e32 v8, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB12_1 @@ -3289,26 +3201,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v10, s20 ; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v0, v6 +; GFX908-NEXT: v_mov_b32_e32 v1, v7 +; GFX908-NEXT: v_mov_b32_e32 v2, v8 +; GFX908-NEXT: v_mov_b32_e32 v3, v9 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX908-NEXT: v_mov_b32_e32 v9, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v8, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB12_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3320,26 +3231,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v10, s20 ; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v0, v6 +; GFX8-NEXT: v_mov_b32_e32 v1, v7 +; GFX8-NEXT: v_mov_b32_e32 v2, v8 +; GFX8-NEXT: v_mov_b32_e32 v3, v9 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX8-NEXT: v_mov_b32_e32 v9, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v8, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB12_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3351,26 +3261,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX7-NEXT: v_mov_b32_e32 v5, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v10, s20 ; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB12_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3382,27 +3291,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_ ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v5, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v10, s6 ; GFX6-NEXT: .LBB12_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB12_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7028,9 +6937,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -7042,7 +6949,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -7060,8 +6967,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7072,7 +6978,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7099,9 +7005,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -7109,7 +7014,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -7126,9 +7031,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7138,7 +7042,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v4, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -7156,7 +7060,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -7164,7 +7067,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -7181,7 +7084,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -7277,9 +7180,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start @@ -7290,7 +7191,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_mov_b32_e32 v4, v1 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -7308,8 +7209,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start @@ -7319,7 +7219,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7355,9 +7255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7366,7 +7265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -7385,7 +7284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -7393,7 +7291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -7410,7 +7308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -7543,7 +7441,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x400, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -7558,7 +7455,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b32 v8, v4, s[4:7], 0 offen offset:1024 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-NEXT: ; %bb.2: @@ -7587,7 +7483,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v9, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 @@ -7609,7 +7505,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -7622,7 +7517,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_1 @@ -7648,7 +7542,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_4 @@ -7697,7 +7591,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7710,7 +7603,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_1 ; GFX908-NEXT: ; %bb.2: @@ -7735,7 +7627,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 @@ -7755,7 +7647,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7768,7 +7659,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_1 ; GFX8-NEXT: ; %bb.2: @@ -7778,9 +7668,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_add_f16_sdwa v4, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_add_f16_e32 v6, v8, v5 -; GFX8-NEXT: v_or_b32_e32 v7, v6, v4 +; GFX8-NEXT: v_add_f16_sdwa v6, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v7, v8, v5 +; GFX8-NEXT: v_or_b32_e32 v7, v7, v6 ; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: s_mov_b64 s[12:13], exec ; GFX8-NEXT: v_mov_b32_e32 v7, v8 @@ -7795,7 +7685,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 @@ -7815,7 +7705,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7826,39 +7715,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 ; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6 -; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 ; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5 -; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_add_f32_e32 v6, v6, v10 -; GFX7-NEXT: v_add_f32_e32 v7, v7, v11 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v5 +; GFX7-NEXT: v_add_f32_e32 v7, v7, v10 +; GFX7-NEXT: v_add_f32_e32 v8, v8, v11 ; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7 -; GFX7-NEXT: v_or_b32_e32 v6, v4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v5, v7, v4 -; GFX7-NEXT: v_mov_b32_e32 v8, v6 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v5, v8, v5 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7870,23 +7758,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB21_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v7 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -8003,9 +7891,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -8017,7 +7903,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8035,8 +7921,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start @@ -8047,7 +7932,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX10-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8065,16 +7950,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8091,9 +7975,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8101,7 +7984,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX908-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8118,9 +8001,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8130,7 +8012,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX8-NEXT: v_or_b32_e32 v4, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8148,7 +8030,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8156,7 +8037,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -8173,7 +8054,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -8269,9 +8150,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start @@ -8282,7 +8161,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_mov_b32_e32 v4, v1 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8300,8 +8179,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start @@ -8311,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8329,15 +8207,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v1, s6 ; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 @@ -8354,16 +8230,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_pk_add_f16 v1, v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v4, v1 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8380,9 +8255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8391,7 +8265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8410,7 +8284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8418,7 +8291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB23_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -8435,7 +8308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace( ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -8530,9 +8403,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4 +; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16 ; GFX11-NEXT: v_mov_b32_e32 v0, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 @@ -8544,7 +8415,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8562,8 +8433,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start @@ -8574,7 +8444,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8592,16 +8462,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v5, v0 ; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8618,9 +8487,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8628,7 +8496,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_add_f16 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8645,9 +8513,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8657,7 +8524,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v4, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -8675,7 +8542,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8683,7 +8549,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -8700,7 +8566,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -8796,9 +8662,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start @@ -8809,7 +8673,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_mov_b32_e32 v4, v1 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -8827,8 +8691,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB25_1: ; %atomicrmw.start @@ -8838,7 +8701,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX10-NEXT: v_mov_b32_e32 v5, v2 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: v_mov_b32_e32 v4, v1 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8856,15 +8719,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v1, s6 ; GFX90A-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3 @@ -8881,16 +8742,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_pk_add_f16 v1, v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v4, v1 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8907,9 +8767,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8918,7 +8777,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v4, v1 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2 @@ -8937,7 +8796,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -8945,7 +8803,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB25_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -8962,7 +8820,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -9054,13 +8912,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9082,7 +8939,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -9097,12 +8954,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start @@ -9131,7 +8987,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -9149,10 +9005,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -9183,7 +9038,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -9202,9 +9057,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -9230,7 +9084,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -9248,13 +9102,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9275,7 +9128,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -9292,13 +9145,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9320,7 +9172,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -9337,11 +9189,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9366,7 +9217,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -9382,7 +9233,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -9391,7 +9241,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB26_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -9406,7 +9256,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -9488,13 +9338,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9515,7 +9364,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -9531,11 +9380,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start @@ -9561,7 +9408,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -9580,11 +9427,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start @@ -9610,7 +9455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -9629,12 +9474,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -9656,7 +9500,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -9674,13 +9518,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9700,7 +9543,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -9717,13 +9560,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9744,7 +9586,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -9761,11 +9603,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9789,7 +9630,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -9806,7 +9647,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -9815,7 +9655,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB27_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -9830,7 +9670,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -9930,7 +9770,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -9942,40 +9781,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB28_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX942-NEXT: s_movk_i32 s10, 0x7fff -; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX942-NEXT: s_mov_b32 s11, 0x7060302 ; GFX942-NEXT: .LBB28_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX942-NEXT: v_add_f32_e32 v4, v4, v9 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX942-NEXT: v_add_f32_e32 v6, v6, v10 +; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10 +; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX942-NEXT: s_mov_b64 s[8:9], exec ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX942-NEXT: v_add_f32_e32 v5, v5, v10 -; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10 -; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX942-NEXT: v_add_f32_e32 v7, v7, v5 +; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10 +; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11 +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -9988,27 +9826,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB28_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB28_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 @@ -10022,8 +9859,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1 ; GFX11-TRUE16-NEXT: ; %bb.2: @@ -10036,28 +9872,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v8 :: v_dual_add_f32 v4, v4, v9 -; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, v6, v8 :: v_dual_add_f32 v5, v5, v9 +; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-TRUE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -10071,14 +9907,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_4 ; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv ; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -10088,13 +9924,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 @@ -10108,8 +9943,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1 ; GFX11-FAKE16-NEXT: ; %bb.2: @@ -10122,28 +9956,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v9 :: v_dual_add_f32 v4, v4, v8 -; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_dual_add_f32 v6, v6, v9 :: v_dual_add_f32 v5, v5, v8 +; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-FAKE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -10157,14 +9991,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_4 ; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -10174,13 +10008,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 @@ -10192,8 +10025,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB28_1 @@ -10205,25 +10037,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_add_f32_e32 v4, v4, v8 -; GFX10-NEXT: v_add_f32_e32 v5, v5, v9 -; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX10-NEXT: v_add_f32_e32 v5, v5, v8 +; GFX10-NEXT: v_add_f32_e32 v6, v6, v9 +; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -10235,15 +10067,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB28_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -10252,13 +10084,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_cbranch_execnz .LBB28_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -10270,38 +10101,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB28_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX90A-NEXT: s_movk_i32 s14, 0x7fff -; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX90A-NEXT: s_mov_b32 s15, 0x7060302 ; GFX90A-NEXT: .LBB28_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX90A-NEXT: v_add_f32_e32 v4, v4, v9 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX90A-NEXT: v_add_f32_e32 v5, v5, v10 -; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14 -; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15 +; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX90A-NEXT: v_add_f32_e32 v6, v6, v10 +; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14 +; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX90A-NEXT: v_add_f32_e32 v7, v7, v5 +; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14 +; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -10313,27 +10143,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB28_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB28_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -10345,8 +10174,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB28_1 ; GFX908-NEXT: ; %bb.2: @@ -10360,24 +10188,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX908-NEXT: v_add_f32_e32 v4, v4, v8 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX908-NEXT: v_add_f32_e32 v5, v5, v9 -; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14 -; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX908-NEXT: v_add_f32_e32 v5, v5, v8 +; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14 +; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX908-NEXT: v_add_f32_e32 v6, v6, v9 +; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14 +; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -10389,27 +10217,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB28_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB28_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -10421,8 +10248,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB28_1 ; GFX8-NEXT: ; %bb.2: @@ -10434,27 +10260,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB28_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX8-NEXT: v_add_f32_e32 v4, v4, v8 -; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5 -; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX8-NEXT: v_add_f32_e32 v5, v5, v9 -; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 -; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX8-NEXT: v_add_f32_e32 v5, v5, v8 +; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX8-NEXT: v_add_f32_e32 v6, v6, v9 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -10466,27 +10292,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB28_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB28_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -10497,36 +10322,35 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB28_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 -; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v6 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5 ; GFX7-NEXT: .LBB28_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB28_4 Depth 2 -; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v7 -; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 -; GFX7-NEXT: v_add_f32_e32 v4, v4, v10 -; GFX7-NEXT: v_add_f32_e32 v6, v6, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_alignbit_b32 v4, v4, v6, 16 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v8 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX7-NEXT: v_add_f32_e32 v8, v8, v11 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_add_f32_e32 v5, v5, v10 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; GFX7-NEXT: v_alignbit_b32 v6, v7, v6, 16 +; GFX7-NEXT: v_alignbit_b32 v5, v8, v5, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_mov_b32_e32 v6, v4 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -10538,23 +10362,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB28_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB28_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -10658,13 +10482,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -10686,7 +10509,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -10701,12 +10524,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start @@ -10735,7 +10557,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -10753,10 +10575,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -10787,7 +10608,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -10806,9 +10627,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -10834,7 +10654,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -10852,13 +10672,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -10879,7 +10698,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -10896,13 +10715,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -10924,7 +10742,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -10941,11 +10759,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -10970,7 +10787,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -10986,7 +10803,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -10995,7 +10811,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -11010,7 +10826,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add ; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11092,13 +10908,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -11119,7 +10934,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -11135,11 +10950,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start @@ -11165,7 +10978,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -11184,11 +10997,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start @@ -11214,7 +11025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -11233,12 +11044,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -11260,7 +11070,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -11278,13 +11088,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -11304,7 +11113,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -11321,13 +11130,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -11348,7 +11156,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11365,11 +11173,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -11393,7 +11200,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11410,7 +11217,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -11419,7 +11225,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB30_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -11434,7 +11240,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -11517,13 +11323,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -11545,7 +11350,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -11560,12 +11365,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start @@ -11594,7 +11398,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -11612,10 +11416,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -11646,7 +11449,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -11665,9 +11468,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -11693,7 +11495,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -11711,13 +11513,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -11738,7 +11539,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -11755,13 +11556,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -11783,7 +11583,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -11800,11 +11600,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -11829,7 +11628,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -11845,7 +11644,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -11854,7 +11652,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -11869,7 +11667,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -11951,13 +11749,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -11978,7 +11775,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -11994,11 +11791,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start @@ -12024,7 +11819,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -12043,11 +11838,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start @@ -12073,7 +11866,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -12092,12 +11885,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -12119,7 +11911,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -12137,13 +11929,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -12163,7 +11954,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -12180,13 +11971,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -12207,7 +11997,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12224,11 +12014,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -12252,7 +12041,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12269,7 +12058,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -12278,7 +12066,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB32_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -12293,7 +12081,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -12375,13 +12163,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -12402,7 +12189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -12418,11 +12205,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start @@ -12448,7 +12233,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -12467,11 +12252,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start @@ -12497,7 +12280,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -12516,12 +12299,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -12543,7 +12325,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -12561,13 +12343,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -12587,7 +12368,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -12604,13 +12385,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -12631,7 +12411,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12648,11 +12428,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -12676,7 +12455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -12693,7 +12472,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -12702,7 +12480,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB33_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 @@ -12717,7 +12495,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -12825,8 +12603,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v2, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB34_1: ; %atomicrmw.start @@ -12837,7 +12614,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX10-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -12855,9 +12632,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_mov_b32_e32 v2, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -12865,7 +12641,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: buffer_wbl2 -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1 @@ -12883,9 +12659,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v2, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -12893,7 +12668,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -12910,9 +12685,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v2, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -12920,7 +12694,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -12937,9 +12711,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: v_mov_b32_e32 v2, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB34_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -12947,7 +12720,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_ ; GFX7-NEXT: v_add_f32_e32 v4, v5, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll index f7a1fb35c8106..1a4140cd0912b 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll @@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1 ; GFX90A-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_max_f32_e32 v0, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: v_mov_b32_e32 v4, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX942-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX942-NEXT: v_max_f32_e32 v6, v4, v9 +; GFX942-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX942-NEXT: v_max_f32_e32 v8, v6, v5 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: buffer_wbl2 sc1 ; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 @@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB2_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX90A-NEXT: v_max_f32_e32 v6, v4, v9 +; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX90A-NEXT: v_max_f32_e32 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: @@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_max_f32_e32 v4, v6, v6 -; GFX908-NEXT: v_max_f32_e32 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_max_f32_e32 v5, v7, v7 +; GFX908-NEXT: v_max_f32_e32 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB2_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: @@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6 -; GFX8-NEXT: v_max_f32_e32 v5, v4, v8 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7 +; GFX8-NEXT: v_max_f32_e32 v6, v5, v8 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB2_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1 -; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1 +; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_max_f32_e32 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v1, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB5_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB5_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v2, s16 ; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[4:5] ; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] @@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v2, s16 ; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v6, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5] ; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v2, s20 ; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v6, s20 ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v9, v2 ; GFX908-NEXT: v_mov_b32_e32 v8, v1 ; GFX908-NEXT: v_mov_b32_e32 v7, v0 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v6, s20 ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v9, v2 ; GFX8-NEXT: v_mov_b32_e32 v8, v1 ; GFX8-NEXT: v_mov_b32_e32 v7, v0 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_readfirstlane_b32 s4, v9 ; GFX12-NEXT: v_readfirstlane_b32 s5, v10 ; GFX12-NEXT: v_readfirstlane_b32 s6, v7 @@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048 -; GFX12-NEXT: ; implicit-def: $vgpr4 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_1 ; GFX12-NEXT: ; %bb.2: ; GFX12-NEXT: s_mov_b32 exec_lo, s1 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6] +; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6] ; GFX12-NEXT: s_mov_b32 s1, 0 ; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Loop Header: Depth=1 @@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[4:5] +; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[5:6] ; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s4, v9 ; GFX11-NEXT: v_readfirstlane_b32 s5, v10 ; GFX11-NEXT: v_readfirstlane_b32 s6, v7 ; GFX11-NEXT: v_readfirstlane_b32 s7, v8 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8] ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_1 ; GFX11-NEXT: ; %bb.2: ; GFX11-NEXT: s_mov_b32 exec_lo, s2 -; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX11-NEXT: .p2align 6 ; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Loop Header: Depth=1 @@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5] +; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6] ; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: v_mov_b32_e32 v7, v2 ; GFX908-NEXT: v_mov_b32_e32 v10, v1 ; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v9 @@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_1 ; GFX908-NEXT: ; %bb.2: ; GFX908-NEXT: s_mov_b64 exec, s[6:7] -; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Loop Header: Depth=1 @@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX908-NEXT: s_mov_b64 s[12:13], exec -; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5] +; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6] ; GFX908-NEXT: v_mov_b32_e32 v0, v11 ; GFX908-NEXT: v_mov_b32_e32 v1, v12 ; GFX908-NEXT: v_mov_b32_e32 v2, v13 @@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: v_mov_b32_e32 v7, v2 ; GFX8-NEXT: v_mov_b32_e32 v10, v1 ; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v9 @@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_1 ; GFX8-NEXT: ; %bb.2: ; GFX8-NEXT: s_mov_b64 exec, s[6:7] -; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Loop Header: Depth=1 @@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX8-NEXT: s_mov_b64 s[12:13], exec -; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5] +; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6] ; GFX8-NEXT: v_mov_b32_e32 v0, v11 ; GFX8-NEXT: v_mov_b32_e32 v1, v12 ; GFX8-NEXT: v_mov_b32_e32 v2, v13 @@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v2, v0 -; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: v_mov_b32_e32 v3, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX10-NEXT: v_mov_b32_e32 v2, s20 +; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v8, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX10-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX10-NEXT: v_mov_b32_e32 v0, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v3, v5 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, v0 -; GFX90A-NEXT: v_mov_b32_e32 v0, s20 -; GFX90A-NEXT: v_mov_b32_e32 v3, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x800 +; GFX90A-NEXT: v_mov_b32_e32 v2, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX90A-NEXT: v_mov_b32_e32 v6, s6 +; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v8, s20 ; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX90A-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB8_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v2, v0 -; GFX7-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-NEXT: v_mov_b32_e32 v3, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 -; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX7-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v8, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX7-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX7-NEXT: v_mov_b32_e32 v0, v2 +; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_mov_b32_e32 v2, v4 +; GFX7-NEXT: v_mov_b32_e32 v3, v5 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v5, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v2, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 -; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v8, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX6-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX6-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, v2 +; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_mov_b32_e32 v2, v4 +; GFX6-NEXT: v_mov_b32_e32 v3, v5 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB9_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB9_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 +; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1 ; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 -; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: v_pk_max_num_f16 v4, v0, v2 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5 @@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 -; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5 ; GFX90A-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_max_f16 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v1, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v5, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v1, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1 @@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX10-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 ; GFX10-NEXT: v_mov_b32_e32 v4, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1 ; GFX90A-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: v_pk_max_f16 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v0, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-NEXT: ; implicit-def: $vgpr4 +; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_1 ; GFX12-NEXT: ; %bb.2: @@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6 +; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7 ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_pk_max_num_f16 v5, v4, v8 -; GFX12-NEXT: v_mov_b32_e32 v4, v5 +; GFX12-NEXT: v_pk_max_num_f16 v6, v5, v8 ; GFX12-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX12-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX12-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_cbranch_execnz .LBB18_3 ; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX942-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v4, v7, v7 +; GFX942-NEXT: v_pk_max_f16 v6, v9, v9 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_pk_max_f16 v6, v4, v9 +; GFX942-NEXT: v_pk_max_f16 v8, v6, v5 ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB18_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-NEXT: ; implicit-def: $vgpr4 +; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_1 ; GFX11-NEXT: ; %bb.2: @@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX11-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_pk_max_f16 v5, v4, v8 -; GFX11-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-NEXT: v_pk_max_f16 v6, v5, v8 ; GFX11-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: v_readfirstlane_b32 s4, v0 @@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX11-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv ; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_cbranch_execnz .LBB18_3 ; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_1 @@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX10-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_pk_max_f16 v5, v4, v8 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_pk_max_f16 v6, v5, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_cbranch_execnz .LBB18_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7 -; GFX90A-NEXT: v_pk_max_f16 v6, v4, v9 +; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9 +; GFX90A-NEXT: v_pk_max_f16 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_1 ; GFX908-NEXT: ; %bb.2: @@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v4, v6, v6 -; GFX908-NEXT: v_pk_max_f16 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_pk_max_f16 v5, v7, v7 +; GFX908-NEXT: v_pk_max_f16 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB18_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_1 ; GFX8-NEXT: ; %bb.2: @@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v6, v6 -; GFX8-NEXT: v_max_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_max_f16_e32 v5, v5, v9 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v4 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v6, v7, v7 +; GFX8-NEXT: v_max_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_max_f16_e32 v6, v6, v9 +; GFX8-NEXT: v_or_b32_e32 v6, v6, v5 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB18_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 ; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6 -; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 ; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5 -; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_max_f32_e32 v6, v6, v10 -; GFX7-NEXT: v_max_f32_e32 v7, v7, v11 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v5 +; GFX7-NEXT: v_max_f32_e32 v7, v7, v10 +; GFX7-NEXT: v_max_f32_e32 v8, v8, v11 ; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7 -; GFX7-NEXT: v_or_b32_e32 v6, v4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v5, v7, v4 -; GFX7-NEXT: v_mov_b32_e32 v8, v6 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v5, v8, v5 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB18_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v7 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe +; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 @@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 @@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-TRUE16-NEXT: ; %bb.2: @@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v8 :: v_dual_max_num_f32 v4, v4, v9 -; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v6, v6, v8 :: v_dual_max_num_f32 v5, v5, v9 +; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-FAKE16-NEXT: ; %bb.2: @@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v9 :: v_dual_max_num_f32 v4, v4, v8 -; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v6, v6, v9 :: v_dual_max_num_f32 v5, v5, v8 +; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX942-NEXT: s_movk_i32 s10, 0x7fff -; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX942-NEXT: s_mov_b32 s11, 0x7060302 ; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX942-NEXT: v_max_f32_e32 v4, v4, v9 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX942-NEXT: v_max_f32_e32 v6, v6, v10 +; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10 +; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX942-NEXT: s_mov_b64 s[8:9], exec ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX942-NEXT: v_max_f32_e32 v5, v5, v10 -; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10 -; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX942-NEXT: v_max_f32_e32 v7, v7, v5 +; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10 +; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11 +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB21_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-TRUE16-NEXT: ; %bb.2: @@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v8 :: v_dual_max_f32 v4, v4, v9 -; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_dual_max_f32 v6, v6, v8 :: v_dual_max_f32 v5, v5, v9 +; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv ; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-FAKE16-NEXT: ; %bb.2: @@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v9 :: v_dual_max_f32 v4, v4, v8 -; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_dual_max_f32 v6, v6, v9 :: v_dual_max_f32 v5, v5, v8 +; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_1 @@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f32_e32 v4, v4, v8 -; GFX10-NEXT: v_max_f32_e32 v5, v5, v9 -; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX10-NEXT: v_max_f32_e32 v5, v5, v8 +; GFX10-NEXT: v_max_f32_e32 v6, v6, v9 +; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_cbranch_execnz .LBB21_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX90A-NEXT: s_movk_i32 s14, 0x7fff -; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX90A-NEXT: s_mov_b32 s15, 0x7060302 ; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX90A-NEXT: v_max_f32_e32 v4, v4, v9 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX90A-NEXT: v_max_f32_e32 v5, v5, v10 -; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14 -; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15 +; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX90A-NEXT: v_max_f32_e32 v6, v6, v10 +; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14 +; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX90A-NEXT: v_max_f32_e32 v7, v7, v5 +; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14 +; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_1 ; GFX908-NEXT: ; %bb.2: @@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX908-NEXT: v_max_f32_e32 v4, v4, v8 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX908-NEXT: v_max_f32_e32 v5, v5, v9 -; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14 -; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX908-NEXT: v_max_f32_e32 v5, v5, v8 +; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14 +; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX908-NEXT: v_max_f32_e32 v6, v6, v9 +; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14 +; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB21_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_1 ; GFX8-NEXT: ; %bb.2: @@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX8-NEXT: v_max_f32_e32 v4, v4, v8 -; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5 -; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX8-NEXT: v_max_f32_e32 v5, v5, v9 -; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 -; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX8-NEXT: v_max_f32_e32 v5, v5, v8 +; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX8-NEXT: v_max_f32_e32 v6, v6, v9 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB21_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_1 ; GFX7-NEXT: ; %bb.2: @@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 -; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5 ; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB21_4 Depth 2 -; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7 -; GFX7-NEXT: v_max_f32_e32 v4, v4, v9 -; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8 +; GFX7-NEXT: v_max_f32_e32 v5, v5, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6 ; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_max_f32_e32 v7, v7, v10 -; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16 -; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_max_f32_e32 v8, v8, v11 +; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16 +; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_mov_b32_e32 v6, v4 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB21_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc0 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc0 sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: buffer_wbl2 -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1 @@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_max_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll index 8ac6353133e72..671f42c6efd27 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll @@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1 ; GFX90A-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_min_f32_e32 v0, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: v_mov_b32_e32 v4, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX942-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX942-NEXT: v_min_f32_e32 v6, v4, v9 +; GFX942-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX942-NEXT: v_min_f32_e32 v8, v6, v5 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: buffer_wbl2 sc1 ; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 @@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB2_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB2_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5 +; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5 ; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7 -; GFX90A-NEXT: v_min_f32_e32 v6, v4, v9 +; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9 +; GFX90A-NEXT: v_min_f32_e32 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB2_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_1 ; GFX908-NEXT: ; %bb.2: @@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_max_f32_e32 v4, v6, v6 -; GFX908-NEXT: v_min_f32_e32 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_max_f32_e32 v5, v7, v7 +; GFX908-NEXT: v_min_f32_e32 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB2_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB2_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_1 ; GFX8-NEXT: ; %bb.2: @@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB2_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6 -; GFX8-NEXT: v_min_f32_e32 v5, v4, v8 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7 +; GFX8-NEXT: v_min_f32_e32 v6, v5, v8 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB2_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB2_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1 -; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1 +; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX11-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_max_f32_e32 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX10-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_mov_b32_e32 v1, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s20 ; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX7-NEXT: v_mov_b32_e32 v3, s6 +; GFX7-NEXT: v_mov_b32_e32 v3, s20 ; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote ; GFX7-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX7-NEXT: v_mov_b32_e32 v0, v4 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5 ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB5_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB5_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v2, s16 ; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v6, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[4:5] ; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3] @@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v2, s16 ; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v6, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5] ; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2 ; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v2, s20 ; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v6, s20 ; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v9, v2 ; GFX908-NEXT: v_mov_b32_e32 v8, v1 ; GFX908-NEXT: v_mov_b32_e32 v7, v0 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048 ; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v6, s20 ; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v9, v2 ; GFX8-NEXT: v_mov_b32_e32 v8, v1 ; GFX8-NEXT: v_mov_b32_e32 v7, v0 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3] @@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX12-NEXT: v_readfirstlane_b32 s4, v9 ; GFX12-NEXT: v_readfirstlane_b32 s5, v10 ; GFX12-NEXT: v_readfirstlane_b32 s6, v7 @@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048 -; GFX12-NEXT: ; implicit-def: $vgpr4 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_1 ; GFX12-NEXT: ; %bb.2: ; GFX12-NEXT: s_mov_b32 exec_lo, s1 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6] +; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6] ; GFX12-NEXT: s_mov_b32 s1, 0 ; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Loop Header: Depth=1 @@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[4:5] +; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[5:6] ; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB7_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2 ; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 -; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_readfirstlane_b32 s4, v9 ; GFX11-NEXT: v_readfirstlane_b32 s5, v10 ; GFX11-NEXT: v_readfirstlane_b32 s6, v7 ; GFX11-NEXT: v_readfirstlane_b32 s7, v8 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8] ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048 -; GFX11-NEXT: ; implicit-def: $vgpr4 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_1 ; GFX11-NEXT: ; %bb.2: ; GFX11-NEXT: s_mov_b32 exec_lo, s2 -; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX11-NEXT: .p2align 6 ; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Loop Header: Depth=1 @@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5] +; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6] ; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12 ; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14 ; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1 @@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB7_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: v_mov_b32_e32 v7, v2 ; GFX908-NEXT: v_mov_b32_e32 v10, v1 ; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v9 @@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 ; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX908-NEXT: ; implicit-def: $vgpr4 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_1 ; GFX908-NEXT: ; %bb.2: ; GFX908-NEXT: s_mov_b64 exec, s[6:7] -; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Loop Header: Depth=1 @@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX908-NEXT: s_mov_b64 s[12:13], exec -; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5] +; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6] ; GFX908-NEXT: v_mov_b32_e32 v0, v11 ; GFX908-NEXT: v_mov_b32_e32 v1, v12 ; GFX908-NEXT: v_mov_b32_e32 v2, v13 @@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB7_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: v_mov_b32_e32 v7, v2 ; GFX8-NEXT: v_mov_b32_e32 v10, v1 ; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v9 @@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 ; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048 -; GFX8-NEXT: ; implicit-def: $vgpr4 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_1 ; GFX8-NEXT: ; %bb.2: ; GFX8-NEXT: s_mov_b64 exec, s[6:7] -; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6] +; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6] ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Loop Header: Depth=1 @@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14] ; GFX8-NEXT: s_mov_b64 s[12:13], exec -; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5] +; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6] ; GFX8-NEXT: v_mov_b32_e32 v0, v11 ; GFX8-NEXT: v_mov_b32_e32 v1, v12 ; GFX8-NEXT: v_mov_b32_e32 v2, v13 @@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB7_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1 @@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v2, v0 -; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: v_mov_b32_e32 v3, v1 -; GFX10-NEXT: s_add_i32 s4, s20, 0x800 -; GFX10-NEXT: v_mov_b32_e32 v6, s4 -; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX10-NEXT: v_mov_b32_e32 v2, s20 +; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v8, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_mov_b32_e32 v10, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, v0 +; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX10-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX10-NEXT: v_mov_b32_e32 v0, v7 -; GFX10-NEXT: v_mov_b32_e32 v1, v8 -; GFX10-NEXT: v_mov_b32_e32 v2, v9 -; GFX10-NEXT: v_mov_b32_e32 v3, v10 -; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX10-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX10-NEXT: v_mov_b32_e32 v0, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, v3 +; GFX10-NEXT: v_mov_b32_e32 v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v3, v5 +; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX10-NEXT: v_mov_b32_e32 v5, v1 +; GFX10-NEXT: v_mov_b32_e32 v4, v0 ; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB8_1 @@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_mov_b32_e32 v2, v0 -; GFX90A-NEXT: v_mov_b32_e32 v0, s20 -; GFX90A-NEXT: v_mov_b32_e32 v3, v1 -; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x800 +; GFX90A-NEXT: v_mov_b32_e32 v2, s20 +; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 -; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX90A-NEXT: v_mov_b32_e32 v6, s6 +; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX90A-NEXT: v_mov_b32_e32 v8, s20 ; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX90A-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5] -; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1] -; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1] +; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 -; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB8_1 ; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB8_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB8_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v2, v0 -; GFX7-NEXT: v_mov_b32_e32 v0, s20 -; GFX7-NEXT: v_mov_b32_e32 v3, v1 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX7-NEXT: s_add_i32 s6, s20, 0x800 -; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX7-NEXT: v_mov_b32_e32 v2, s20 +; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX7-NEXT: s_mov_b64 s[4:5], 0 -; GFX7-NEXT: v_mov_b32_e32 v6, s6 +; GFX7-NEXT: v_mov_b32_e32 v8, s20 ; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v10, v1 -; GFX7-NEXT: v_mov_b32_e32 v9, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v8 -; GFX7-NEXT: v_mov_b32_e32 v2, v9 -; GFX7-NEXT: v_mov_b32_e32 v3, v10 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX7-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX7-NEXT: v_mov_b32_e32 v0, v2 +; GFX7-NEXT: v_mov_b32_e32 v1, v3 +; GFX7-NEXT: v_mov_b32_e32 v2, v4 +; GFX7-NEXT: v_mov_b32_e32 v3, v5 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v5, v1 ; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX7-NEXT: v_mov_b32_e32 v4, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB8_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v2, v0 -; GFX6-NEXT: v_mov_b32_e32 v0, s20 -; GFX6-NEXT: v_mov_b32_e32 v3, v1 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 +; GFX6-NEXT: v_mov_b32_e32 v2, s20 +; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 ; GFX6-NEXT: s_add_i32 s6, s20, 0x800 -; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX6-NEXT: s_mov_b64 s[4:5], 0 -; GFX6-NEXT: v_mov_b32_e32 v6, s6 +; GFX6-NEXT: v_mov_b32_e32 v8, s6 ; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v10, v1 -; GFX6-NEXT: v_mov_b32_e32 v9, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX6-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX6-NEXT: v_mov_b32_e32 v0, v7 -; GFX6-NEXT: v_mov_b32_e32 v1, v8 -; GFX6-NEXT: v_mov_b32_e32 v2, v9 -; GFX6-NEXT: v_mov_b32_e32 v3, v10 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX6-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX6-NEXT: v_mov_b32_e32 v0, v2 +; GFX6-NEXT: v_mov_b32_e32 v1, v3 +; GFX6-NEXT: v_mov_b32_e32 v2, v4 +; GFX6-NEXT: v_mov_b32_e32 v3, v5 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX6-NEXT: v_mov_b32_e32 v5, v1 ; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX6-NEXT: v_mov_b32_e32 v4, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX6-NEXT: s_cbranch_execnz .LBB8_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX12-NEXT: v_mov_b32_e32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v6, s4 -; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3] -; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048 +; GFX12-NEXT: v_mov_b32_e32 v2, s16 +; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1] +; GFX12-NEXT: v_mov_b32_e32 v8, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048 ; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10] -; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7] +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX12-NEXT: v_mov_b32_e32 v3, v5 +; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x800 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_mov_b32_e32 v6, s4 -; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048 +; GFX11-NEXT: v_mov_b32_e32 v2, s16 +; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] +; GFX11-NEXT: v_mov_b32_e32 v8, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048 ; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0 +; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8 -; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10 -; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc +; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4 +; GFX11-NEXT: v_mov_b32_e32 v3, v5 +; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10] +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5] +; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 @@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v2, v0 -; GFX908-NEXT: v_mov_b32_e32 v0, s20 -; GFX908-NEXT: v_mov_b32_e32 v3, v1 -; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX908-NEXT: s_add_i32 s6, s20, 0x800 +; GFX908-NEXT: v_mov_b32_e32 v2, s20 +; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX908-NEXT: s_mov_b64 s[4:5], 0 -; GFX908-NEXT: v_mov_b32_e32 v6, s6 +; GFX908-NEXT: v_mov_b32_e32 v8, s20 ; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_mov_b32_e32 v10, v1 -; GFX908-NEXT: v_mov_b32_e32 v9, v0 -; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX908-NEXT: v_mov_b32_e32 v0, v7 -; GFX908-NEXT: v_mov_b32_e32 v1, v8 -; GFX908-NEXT: v_mov_b32_e32 v2, v9 -; GFX908-NEXT: v_mov_b32_e32 v3, v10 -; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX908-NEXT: v_mov_b32_e32 v0, v2 +; GFX908-NEXT: v_mov_b32_e32 v1, v3 +; GFX908-NEXT: v_mov_b32_e32 v2, v4 +; GFX908-NEXT: v_mov_b32_e32 v3, v5 +; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 -; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX908-NEXT: v_mov_b32_e32 v4, v0 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB9_1 ; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_ ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: v_mov_b32_e32 v0, s20 -; GFX8-NEXT: v_mov_b32_e32 v3, v1 -; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048 -; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3] -; GFX8-NEXT: s_add_i32 s6, s20, 0x800 +; GFX8-NEXT: v_mov_b32_e32 v2, s20 +; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048 +; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1] ; GFX8-NEXT: s_mov_b64 s[4:5], 0 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 +; GFX8-NEXT: v_mov_b32_e32 v8, s20 ; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_mov_b32_e32 v10, v1 -; GFX8-NEXT: v_mov_b32_e32 v9, v0 -; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10] -; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5] -; GFX8-NEXT: v_mov_b32_e32 v0, v7 -; GFX8-NEXT: v_mov_b32_e32 v1, v8 -; GFX8-NEXT: v_mov_b32_e32 v2, v9 -; GFX8-NEXT: v_mov_b32_e32 v3, v10 -; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc +; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5] +; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7] +; GFX8-NEXT: v_mov_b32_e32 v0, v2 +; GFX8-NEXT: v_mov_b32_e32 v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, v4 +; GFX8-NEXT: v_mov_b32_e32 v3, v5 +; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 -; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10] +; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] +; GFX8-NEXT: v_mov_b32_e32 v5, v1 ; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX8-NEXT: v_mov_b32_e32 v4, v0 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB9_1 ; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 +; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1 ; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 -; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: s_wait_loadcnt 0x0 @@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX12-NEXT: v_pk_min_num_f16 v4, v0, v2 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5 @@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX942-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX11-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 -; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX11-NEXT: s_waitcnt vmcnt(0) @@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX11-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: v_pk_max_f16 v2, v1, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX10-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, v4 ; GFX10-NEXT: v_mov_b32_e32 v1, v5 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5 ; GFX90A-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX908-NEXT: v_pk_min_f16 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v1, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX8-NEXT: v_or_b32_e32 v5, v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 @@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no ; GFX7-NEXT: v_or_b32_e32 v5, v7, v0 ; GFX7-NEXT: v_mov_b32_e32 v8, v6 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7 @@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 ; GFX12-NEXT: v_mov_b32_e32 v1, s16 -; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: v_mov_b32_e32 v3, s4 +; GFX12-NEXT: v_mov_b32_e32 v3, s16 ; GFX12-NEXT: s_mov_b32 s4, 0 ; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2 ; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1 @@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX942-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX942-NEXT: s_nop 0 ; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-NEXT: v_mov_b32_e32 v1, s16 -; GFX11-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX11-NEXT: v_mov_b32_e32 v3, s4 +; GFX11-NEXT: v_mov_b32_e32 v3, s16 ; GFX11-NEXT: s_mov_b32 s4, 0 ; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv @@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 +; GFX10-NEXT: v_mov_b32_e32 v3, s20 ; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start @@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX10-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 ; GFX10-NEXT: v_mov_b32_e32 v4, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1 ; GFX90A-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_pk_max_f16 v2, v0, v0 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX908-NEXT: v_pk_min_f16 v0, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v5, v1 ; GFX908-NEXT: v_mov_b32_e32 v4, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1 @@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 ; GFX8-NEXT: v_max_f16_e32 v3, v0, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 ; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) @@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2 ; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1 ; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 @@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin ; GFX7-NEXT: v_or_b32_e32 v4, v6, v3 ; GFX7-NEXT: v_mov_b32_e32 v7, v5 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6 @@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_samplecnt 0x0 ; GFX12-NEXT: s_wait_bvhcnt 0x0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-NEXT: s_mov_b32 s1, exec_lo ; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-NEXT: ; implicit-def: $vgpr4 +; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_1 ; GFX12-NEXT: ; %bb.2: @@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6 +; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7 ; GFX12-NEXT: s_mov_b32 s2, exec_lo ; GFX12-NEXT: s_wait_storecnt 0x0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_pk_min_num_f16 v5, v4, v8 -; GFX12-NEXT: v_mov_b32_e32 v4, v5 +; GFX12-NEXT: v_pk_min_num_f16 v6, v5, v8 ; GFX12-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX12-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-NEXT: v_readfirstlane_b32 s4, v0 @@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_wait_alu 0xfffe ; GFX12-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-NEXT: s_cbranch_execnz .LBB18_4 ; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX12-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-NEXT: global_inv scope:SCOPE_DEV ; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-NEXT: s_wait_alu 0xfffe @@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX12-NEXT: s_cbranch_execnz .LBB18_3 ; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX942-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_pk_max_f16 v4, v7, v7 +; GFX942-NEXT: v_pk_max_f16 v6, v9, v9 ; GFX942-NEXT: s_mov_b64 s[8:9], exec -; GFX942-NEXT: v_pk_min_f16 v6, v4, v9 +; GFX942-NEXT: v_pk_min_f16 v8, v6, v5 ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB18_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB18_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-NEXT: ; implicit-def: $vgpr4 +; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_1 ; GFX11-NEXT: ; %bb.2: @@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX11-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX11-NEXT: s_mov_b32 s2, exec_lo ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_pk_min_f16 v5, v4, v8 -; GFX11-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-NEXT: v_pk_min_f16 v6, v5, v8 ; GFX11-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: v_readfirstlane_b32 s4, v0 @@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_execnz .LBB18_4 ; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX11-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: buffer_gl0_inv ; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX11-NEXT: s_cbranch_execnz .LBB18_3 ; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 @@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_1 @@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_pk_max_f16 v4, v6, v6 +; GFX10-NEXT: v_pk_max_f16 v5, v7, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_pk_min_f16 v5, v4, v8 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_pk_min_f16 v6, v5, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB18_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX10-NEXT: s_cbranch_execnz .LBB18_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5 +; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5 ; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7 -; GFX90A-NEXT: v_pk_min_f16 v6, v4, v9 +; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9 +; GFX90A-NEXT: v_pk_min_f16 v8, v6, v5 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB18_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_1 ; GFX908-NEXT: ; %bb.2: @@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_pk_max_f16 v4, v6, v6 -; GFX908-NEXT: v_pk_min_f16 v5, v4, v8 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_pk_max_f16 v5, v7, v7 +; GFX908-NEXT: v_pk_min_f16 v6, v5, v8 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB18_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB18_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_1 ; GFX8-NEXT: ; %bb.2: @@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; GFX8-NEXT: v_max_f16_e32 v5, v6, v6 -; GFX8-NEXT: v_min_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX8-NEXT: v_min_f16_e32 v5, v5, v9 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v4 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_max_f16_e32 v6, v7, v7 +; GFX8-NEXT: v_min_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_min_f16_e32 v6, v6, v9 +; GFX8-NEXT: v_or_b32_e32 v6, v6, v5 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB18_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB18_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; GFX7-NEXT: s_mov_b64 exec, s[6:7] ; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 ; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6 -; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 ; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB18_4 Depth 2 ; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5 -; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4 +; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5 -; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5 +; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_min_f32_e32 v6, v6, v10 -; GFX7-NEXT: v_min_f32_e32 v7, v7, v11 -; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v5 +; GFX7-NEXT: v_min_f32_e32 v7, v7, v10 +; GFX7-NEXT: v_min_f32_e32 v8, v8, v11 ; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7 -; GFX7-NEXT: v_or_b32_e32 v6, v4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v5, v7, v4 -; GFX7-NEXT: v_mov_b32_e32 v8, v6 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v5, v8, v5 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB18_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7 -; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8 ; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB18_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v7 ; GFX7-NEXT: v_mov_b32_e32 v1, v5 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe +; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6 @@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5] ; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7] -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1 +; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1 ; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start @@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1 ; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 @@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 @@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7 @@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: v_mov_b32_e32 v1, v6 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: v_mov_b32_e32 v1, v6 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6 @@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4 -; GFX7-NEXT: v_mov_b32_e32 v4, s6 +; GFX7-NEXT: v_mov_b32_e32 v4, s20 ; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 @@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu ; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v1 ; GFX7-NEXT: v_mov_b32_e32 v5, v0 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe -; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 @@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400 -; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe -; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 +; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024 ; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 @@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1 @@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s4, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[6:7], 0 ; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX942-NEXT: s_movk_i32 s8, 0x7fff ; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX942-NEXT: s_mov_b32 s9, 0x7060302 -; GFX942-NEXT: v_mov_b32_e32 v4, s4 +; GFX942-NEXT: v_mov_b32_e32 v4, s16 ; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5] ; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1] -; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0 -; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0 ; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-TRUE16-NEXT: .p2align 6 ; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h ; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv @@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0 -; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 +; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0 ; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0 +; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1 ; GFX11-FAKE16-NEXT: .p2align 6 ; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start @@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0 -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv @@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: v_mov_b32_e32 v1, s20 -; GFX10-NEXT: s_add_i32 s4, s20, 0x400 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX10-NEXT: v_mov_b32_e32 v4, s4 -; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 +; GFX10-NEXT: v_mov_b32_e32 v4, s20 ; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 ; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v6, v1 ; GFX10-NEXT: v_mov_b32_e32 v5, v0 -; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv @@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX90A-NEXT: v_mov_b32_e32 v1, s20 ; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s4, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 ; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX90A-NEXT: s_movk_i32 s8, 0x7fff ; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX90A-NEXT: s_mov_b32 s9, 0x7060302 -; GFX90A-NEXT: v_mov_b32_e32 v4, s4 +; GFX90A-NEXT: v_mov_b32_e32 v4, s20 ; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc ; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1] -; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1 @@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX908-NEXT: v_mov_b32_e32 v1, s20 ; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s4, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[6:7], 0 ; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX908-NEXT: s_movk_i32 s8, 0x7fff ; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 ; GFX908-NEXT: s_mov_b32 s9, 0x7060302 -; GFX908-NEXT: v_mov_b32_e32 v4, s4 +; GFX908-NEXT: v_mov_b32_e32 v4, s20 ; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9 ; GFX908-NEXT: v_mov_b32_e32 v6, v1 ; GFX908-NEXT: v_mov_b32_e32 v5, v0 -; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, s20 ; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s4, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[6:7], 0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, s4 +; GFX8-NEXT: v_mov_b32_e32 v4, s20 ; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, v0 -; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 @@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024 -; GFX7-NEXT: s_add_i32 s6, s20, 0x400 ; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 @@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX7-NEXT: v_mov_b32_e32 v2, s6 +; GFX7-NEXT: v_mov_b32_e32 v2, s20 ; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 @@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi ; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16 ; GFX7-NEXT: v_mov_b32_e32 v6, v4 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 -; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 @@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-TRUE16-NEXT: ; %bb.2: @@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v8 :: v_dual_min_num_f32 v4, v4, v9 -; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v6, v6, v8 :: v_dual_min_num_f32 v5, v5, v9 +; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd -; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h +; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe ; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe @@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31] ; @@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0 ; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo ; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024 -; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024 ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX12-FAKE16-NEXT: ; %bb.2: @@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v9 :: v_dual_min_num_f32 v4, v4, v8 -; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v6, v6, v9 :: v_dual_min_num_f32 v5, v5, v8 +; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff +; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo -; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd -; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 +; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5 ; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe ; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN +; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN ; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV ; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 ; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe @@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3 ; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX942-NEXT: s_mov_b64 s[2:3], exec ; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3] ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] -; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024 -; GFX942-NEXT: ; implicit-def: $vgpr4 +; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_1 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: s_mov_b64 exec, s[2:3] ; GFX942-NEXT: s_mov_b64 s[2:3], 0 -; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX942-NEXT: s_movk_i32 s10, 0x7fff -; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX942-NEXT: s_mov_b32 s11, 0x7060302 ; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Loop Header: Depth=1 ; GFX942-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX942-NEXT: v_min_f32_e32 v4, v4, v9 -; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10 -; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX942-NEXT: v_min_f32_e32 v6, v6, v10 +; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10 +; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX942-NEXT: s_mov_b64 s[8:9], exec ; GFX942-NEXT: buffer_wbl2 sc1 -; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX942-NEXT: v_min_f32_e32 v5, v5, v10 -; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10 -; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX942-NEXT: v_min_f32_e32 v7, v7, v5 +; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10 +; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 ; GFX942-NEXT: s_nop 1 -; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11 -; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7] +; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11 +; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9] ; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX942-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX942-NEXT: v_readfirstlane_b32 s4, v0 @@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1] ; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1] ; GFX942-NEXT: s_cbranch_execnz .LBB21_4 ; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX942-NEXT: s_mov_b64 exec, s[8:9] ; GFX942-NEXT: s_waitcnt vmcnt(0) -; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v7, v4 +; GFX942-NEXT: v_mov_b32_e32 v9, v6 ; GFX942-NEXT: buffer_inv sc1 ; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3] ; GFX942-NEXT: s_cbranch_execnz .LBB21_3 ; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX942-NEXT: s_or_b64 exec, exec, s[2:3] -; GFX942-NEXT: v_mov_b32_e32 v0, v4 +; GFX942-NEXT: v_mov_b32_e32 v0, v6 ; GFX942-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-TRUE16-NEXT: ; %bb.2: @@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v8 :: v_dual_min_f32 v4, v4, v9 -; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_dual_min_f32 v6, v6, v8 :: v_dual_min_f32 v5, v5, v9 +; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h ; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-TRUE16-NEXT: buffer_gl1_inv ; GFX11-TRUE16-NEXT: buffer_gl0_inv ; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX11-FAKE16: ; %bb.0: ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 -; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024 -; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4 +; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024 ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1 ; GFX11-FAKE16-NEXT: ; %bb.2: @@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1 ; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7 ; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo ; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v9 :: v_dual_min_f32 v4, v4, v8 -; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_dual_min_f32 v6, v6, v9 :: v_dual_min_f32 v5, v5, v8 +; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1 ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo ; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7 ; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0 @@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc +; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc ; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0 ; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4 ; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2 ; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5 ; GFX11-FAKE16-NEXT: buffer_gl1_inv ; GFX11-FAKE16-NEXT: buffer_gl0_inv ; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1 @@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2 ; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5 ; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4 ; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 @@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3] ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 -; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX10-NEXT: ; implicit-def: $vgpr4 +; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_1 @@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 ; GFX10-NEXT: s_mov_b32 s6, exec_lo ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: v_min_f32_e32 v4, v4, v8 -; GFX10-NEXT: v_min_f32_e32 v5, v5, v9 -; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1 -; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 -; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5 -; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff -; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo +; GFX10-NEXT: v_min_f32_e32 v5, v5, v8 +; GFX10-NEXT: v_min_f32_e32 v6, v6, v9 +; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo -; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302 -; GFX10-NEXT: v_mov_b32_e32 v4, v5 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo +; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302 ; GFX10-NEXT: v_mov_b32_e32 v5, v6 +; GFX10-NEXT: v_mov_b32_e32 v6, v7 ; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX10-NEXT: v_readfirstlane_b32 s8, v0 @@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4 ; GFX10-NEXT: s_and_saveexec_b32 s4, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 ; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB21_4 ; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX10-NEXT: s_mov_b32 exec_lo, s6 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6 -; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7 +; GFX10-NEXT: v_mov_b32_e32 v7, v5 ; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: buffer_gl0_inv ; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 @@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX10-NEXT: s_cbranch_execnz .LBB21_3 ; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_mov_b32_e32 v0, v4 +; GFX10-NEXT: v_mov_b32_e32 v0, v5 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX90A: ; %bb.0: ; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4 ; GFX90A-NEXT: s_mov_b64 s[6:7], exec ; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_nop 0 -; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX90A-NEXT: ; implicit-def: $vgpr4 +; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024 ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_1 ; GFX90A-NEXT: ; %bb.2: ; GFX90A-NEXT: s_mov_b64 exec, s[6:7] ; GFX90A-NEXT: s_mov_b64 s[6:7], 0 -; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5 +; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5 ; GFX90A-NEXT: s_movk_i32 s14, 0x7fff -; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX90A-NEXT: s_mov_b32 s15, 0x7060302 ; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7 -; GFX90A-NEXT: v_min_f32_e32 v4, v4, v9 -; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc -; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7 -; GFX90A-NEXT: v_min_f32_e32 v5, v5, v10 -; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1 -; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14 -; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5 -; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc -; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15 +; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX90A-NEXT: v_min_f32_e32 v6, v6, v10 +; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1 +; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14 +; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc +; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; GFX90A-NEXT: v_min_f32_e32 v7, v7, v5 +; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1 +; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14 +; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc +; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15 ; GFX90A-NEXT: s_mov_b64 s[12:13], exec -; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1] +; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1] ; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX90A-NEXT: v_readfirstlane_b32 s8, v0 @@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc ; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_4 ; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX90A-NEXT: s_mov_b64 exec, s[12:13] ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7 +; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9 ; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v7, v4 +; GFX90A-NEXT: v_mov_b32_e32 v9, v6 ; GFX90A-NEXT: buffer_wbinvl1 ; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX90A-NEXT: s_cbranch_execnz .LBB21_3 ; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX90A-NEXT: v_mov_b32_e32 v0, v4 +; GFX90A-NEXT: v_mov_b32_e32 v0, v6 ; GFX90A-NEXT: s_setpc_b64 s[30:31] ; ; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX908: ; %bb.0: ; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4 ; GFX908-NEXT: s_mov_b64 s[6:7], exec ; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_nop 0 -; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX908-NEXT: ; implicit-def: $vgpr4 +; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_1 ; GFX908-NEXT: ; %bb.2: @@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX908-NEXT: ; =>This Loop Header: Depth=1 ; GFX908-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX908-NEXT: v_min_f32_e32 v4, v4, v8 -; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14 -; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX908-NEXT: v_min_f32_e32 v5, v5, v9 -; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14 -; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX908-NEXT: v_min_f32_e32 v5, v5, v8 +; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14 +; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15 -; GFX908-NEXT: v_mov_b32_e32 v4, v5 -; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX908-NEXT: v_min_f32_e32 v6, v6, v9 +; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14 +; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15 ; GFX908-NEXT: v_mov_b32_e32 v5, v6 +; GFX908-NEXT: s_mov_b64 s[12:13], exec +; GFX908-NEXT: v_mov_b32_e32 v6, v7 ; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX908-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX908-NEXT: v_readfirstlane_b32 s8, v0 @@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX908-NEXT: s_cbranch_execnz .LBB21_4 ; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX908-NEXT: s_mov_b64 exec, s[12:13] ; GFX908-NEXT: s_waitcnt vmcnt(0) -; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v6, v4 +; GFX908-NEXT: v_mov_b32_e32 v7, v5 ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX908-NEXT: s_cbranch_execnz .LBB21_3 ; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX908-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX908-NEXT: v_mov_b32_e32 v0, v4 +; GFX908-NEXT: v_mov_b32_e32 v0, v5 ; GFX908-NEXT: s_setpc_b64 s[30:31] ; ; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4 ; GFX8-NEXT: s_mov_b64 s[6:7], exec ; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_nop 0 -; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024 -; GFX8-NEXT: ; implicit-def: $vgpr4 +; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_1 ; GFX8-NEXT: ; %bb.2: @@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB21_4 Depth 2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6 -; GFX8-NEXT: v_min_f32_e32 v4, v4, v8 -; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5 -; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 -; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc -; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 -; GFX8-NEXT: v_min_f32_e32 v5, v5, v9 -; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5 -; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 -; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; GFX8-NEXT: v_min_f32_e32 v5, v5, v8 +; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16 -; GFX8-NEXT: v_mov_b32_e32 v4, v5 -; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 +; GFX8-NEXT: v_min_f32_e32 v6, v6, v9 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v5, v6 +; GFX8-NEXT: s_mov_b64 s[12:13], exec +; GFX8-NEXT: v_mov_b32_e32 v6, v7 ; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_readfirstlane_b32 s8, v0 @@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc ; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX8-NEXT: s_cbranch_execnz .LBB21_4 ; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX8-NEXT: s_mov_b64 exec, s[12:13] ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7 ; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v6, v4 +; GFX8-NEXT: v_mov_b32_e32 v7, v5 ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX8-NEXT: s_cbranch_execnz .LBB21_3 ; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX8-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX8-NEXT: v_mov_b32_e32 v0, v4 +; GFX8-NEXT: v_mov_b32_e32 v0, v5 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4 ; GFX7-NEXT: s_mov_b64 s[6:7], exec ; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3] ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] -; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024 -; GFX7-NEXT: ; implicit-def: $vgpr4 +; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024 ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_1 ; GFX7-NEXT: ; %bb.2: @@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 ; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: s_mov_b64 s[6:7], 0 -; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 -; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5 ; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Loop Header: Depth=1 ; GFX7-NEXT: ; Child Loop BB21_4 Depth 2 -; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5 ; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7 -; GFX7-NEXT: v_min_f32_e32 v4, v4, v9 -; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8 +; GFX7-NEXT: v_min_f32_e32 v5, v5, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6 ; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_min_f32_e32 v7, v7, v10 -; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16 -; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16 -; GFX7-NEXT: v_mov_b32_e32 v7, v5 +; GFX7-NEXT: v_min_f32_e32 v8, v8, v11 +; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16 +; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16 +; GFX7-NEXT: v_mov_b32_e32 v9, v6 ; GFX7-NEXT: s_mov_b64 s[12:13], exec -; GFX7-NEXT: v_mov_b32_e32 v6, v4 +; GFX7-NEXT: v_mov_b32_e32 v8, v5 ; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1 ; GFX7-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX7-NEXT: v_readfirstlane_b32 s8, v0 @@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf ; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5] ; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc +; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc ; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5] ; GFX7-NEXT: s_cbranch_execnz .LBB21_4 ; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1 ; GFX7-NEXT: s_mov_b64 exec, s[12:13] ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5 -; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 ; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7] -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7] ; GFX7-NEXT: s_cbranch_execnz .LBB21_3 ; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end ; GFX7-NEXT: s_or_b64 exec, exec, s[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v7 -; GFX7-NEXT: v_mov_b32_e32 v1, v4 +; GFX7-NEXT: v_mov_b32_e32 v0, v8 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory: @@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: v_mov_b32_e32 v0, s16 ; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024 -; GFX942-NEXT: s_add_i32 s6, s16, 0x400 ; GFX942-NEXT: s_mov_b64 s[4:5], 0 ; GFX942-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX942-NEXT: v_mov_b32_e32 v3, s6 +; GFX942-NEXT: v_mov_b32_e32 v3, s16 ; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX942-NEXT: s_waitcnt vmcnt(0) @@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX942-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5] ; GFX942-NEXT: buffer_wbl2 sc0 sc1 -; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0 +; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: buffer_inv sc0 sc1 ; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_mov_b32_e32 v1, v0 ; GFX90A-NEXT: v_mov_b32_e32 v0, s20 ; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX90A-NEXT: s_add_i32 s6, s20, 0x400 ; GFX90A-NEXT: s_mov_b64 s[4:5], 0 ; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX90A-NEXT: v_mov_b32_e32 v3, s6 +; GFX90A-NEXT: v_mov_b32_e32 v3, s20 ; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX90A-NEXT: s_waitcnt vmcnt(0) @@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1] ; GFX90A-NEXT: buffer_wbl2 -; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX90A-NEXT: s_waitcnt vmcnt(0) ; GFX90A-NEXT: buffer_invl2 ; GFX90A-NEXT: buffer_wbinvl1 @@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_mov_b32_e32 v1, v0 ; GFX908-NEXT: v_mov_b32_e32 v0, s20 ; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX908-NEXT: s_add_i32 s6, s20, 0x400 ; GFX908-NEXT: s_mov_b64 s[4:5], 0 ; GFX908-NEXT: v_max_f32_e32 v2, v1, v1 -; GFX908-NEXT: v_mov_b32_e32 v3, s6 +; GFX908-NEXT: v_mov_b32_e32 v3, s20 ; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX908-NEXT: s_waitcnt vmcnt(0) @@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX908-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX908-NEXT: v_mov_b32_e32 v0, v4 ; GFX908-NEXT: v_mov_b32_e32 v1, v5 -; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX908-NEXT: s_waitcnt vmcnt(0) ; GFX908-NEXT: buffer_wbinvl1 ; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 @@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_mov_b32_e32 v1, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s20 ; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024 -; GFX8-NEXT: s_add_i32 s6, s20, 0x400 ; GFX8-NEXT: s_mov_b64 s[4:5], 0 ; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1 -; GFX8-NEXT: v_mov_b32_e32 v3, s6 +; GFX8-NEXT: v_mov_b32_e32 v3, s20 ; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start ; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX8-NEXT: s_waitcnt vmcnt(0) @@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_ ; GFX8-NEXT: v_min_f32_e32 v4, v0, v2 ; GFX8-NEXT: v_mov_b32_e32 v0, v4 ; GFX8-NEXT: v_mov_b32_e32 v1, v5 -; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc +; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: buffer_wbinvl1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll index 3c991cfb7a1aa..afd0f01580538 100644 --- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll +++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll @@ -782,69 +782,90 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; SDAG-GFX942-LABEL: memcpy_known_medium: ; SDAG-GFX942: ; %bb.0: ; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 -; SDAG-GFX942-NEXT: s_load_dword s13, s[4:5], 0x34 +; SDAG-GFX942-NEXT: s_load_dword s17, s[4:5], 0x34 ; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x44 -; SDAG-GFX942-NEXT: s_load_dword s14, s[4:5], 0x54 -; SDAG-GFX942-NEXT: s_mov_b32 s12, 0 -; SDAG-GFX942-NEXT: s_mov_b32 s5, s12 -; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, 0 +; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x54 +; SDAG-GFX942-NEXT: s_mov_b32 s16, 0 +; SDAG-GFX942-NEXT: s_mov_b32 s5, s16 ; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0) ; SDAG-GFX942-NEXT: s_mov_b32 s4, s3 -; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13] -; SDAG-GFX942-NEXT: s_mov_b32 s13, s2 +; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17] +; SDAG-GFX942-NEXT: s_mov_b32 s17, s2 ; SDAG-GFX942-NEXT: s_mov_b32 s2, s1 -; SDAG-GFX942-NEXT: s_mov_b32 s3, s12 -; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13] -; SDAG-GFX942-NEXT: s_mov_b32 s13, s14 +; SDAG-GFX942-NEXT: s_mov_b32 s3, s16 +; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[16:17] +; SDAG-GFX942-NEXT: s_mov_b32 s17, s12 ; SDAG-GFX942-NEXT: s_mov_b32 s2, s11 -; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[12:13] -; SDAG-GFX942-NEXT: s_mov_b32 s13, s10 +; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[16:17] +; SDAG-GFX942-NEXT: s_mov_b32 s17, s10 ; SDAG-GFX942-NEXT: s_mov_b32 s2, s9 -; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13] +; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17] ; SDAG-GFX942-NEXT: .LBB1_1: ; %load-store-loop ; SDAG-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 -; SDAG-GFX942-NEXT: v_add_u32_e32 v1, s0, v0 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[4:7], 0 offen -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[4:7], 0 offen offset:16 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[4:7], 0 offen offset:32 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[4:7], 0 offen offset:48 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[4:7], 0 offen offset:64 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[4:7], 0 offen offset:80 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[4:7], 0 offen offset:96 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[4:7], 0 offen offset:112 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[4:7], 0 offen offset:128 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[4:7], 0 offen offset:144 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[4:7], 0 offen offset:160 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[4:7], 0 offen offset:176 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[4:7], 0 offen offset:192 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[4:7], 0 offen offset:208 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[4:7], 0 offen offset:224 -; SDAG-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[4:7], 0 offen offset:240 -; SDAG-GFX942-NEXT: v_add_u32_e32 v62, s8, v0 -; SDAG-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0 -; SDAG-GFX942-NEXT: s_and_b64 vcc, exec, vcc -; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0) -; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse -; SDAG-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[12:15], 0 offen offset:16 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[12:15], 0 offen offset:32 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[12:15], 0 offen offset:48 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[12:15], 0 offen offset:64 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[12:15], 0 offen offset:80 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[12:15], 0 offen offset:96 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[12:15], 0 offen offset:112 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[12:15], 0 offen offset:128 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[12:15], 0 offen offset:144 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[12:15], 0 offen offset:160 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[12:15], 0 offen offset:176 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[12:15], 0 offen offset:192 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[12:15], 0 offen offset:208 -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[12:15], 0 offen offset:224 -; SDAG-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload +; SDAG-GFX942-NEXT: s_add_i32 s1, s0, s16 +; SDAG-GFX942-NEXT: v_mov_b32_e32 v60, s1 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[8:11], v60, s[4:7], 0 offen +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[4:7], v60, s[4:7], 0 offen offset:16 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:32 +; SDAG-GFX942-NEXT: s_add_i32 s2, s8, s16 +; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, s2 +; SDAG-GFX942-NEXT: s_addk_i32 s16, 0x100 +; SDAG-GFX942-NEXT: s_cmpk_lt_u32 s16, 0x100 ; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0) -; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen offset:240 -; SDAG-GFX942-NEXT: s_cbranch_vccnz .LBB1_1 +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a0, v15 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a1, v14 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a2, v13 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a3, v12 ; Reload Reuse +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:48 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[16:19], v60, s[4:7], 0 offen offset:64 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[20:23], v60, s[4:7], 0 offen offset:80 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[24:27], v60, s[4:7], 0 offen offset:96 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[28:31], v60, s[4:7], 0 offen offset:112 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[32:35], v60, s[4:7], 0 offen offset:128 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[36:39], v60, s[4:7], 0 offen offset:144 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[40:43], v60, s[4:7], 0 offen offset:160 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[44:47], v60, s[4:7], 0 offen offset:176 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[48:51], v60, s[4:7], 0 offen offset:192 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[52:55], v60, s[4:7], 0 offen offset:208 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[56:59], v60, s[4:7], 0 offen offset:224 +; SDAG-GFX942-NEXT: s_nop 0 +; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[60:63], v60, s[4:7], 0 offen offset:240 +; SDAG-GFX942-NEXT: s_nop 0 +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[8:11], v0, s[12:15], 0 offen +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[4:7], v0, s[12:15], 0 offen offset:16 +; SDAG-GFX942-NEXT: s_nop 1 +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v5, a0 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v4, a1 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v3, a2 ; Reload Reuse +; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v2, a3 ; Reload Reuse +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v0, s[12:15], 0 offen offset:32 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[12:15], v0, s[12:15], 0 offen offset:48 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[16:19], v0, s[12:15], 0 offen offset:64 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[20:23], v0, s[12:15], 0 offen offset:80 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[24:27], v0, s[12:15], 0 offen offset:96 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[28:31], v0, s[12:15], 0 offen offset:112 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[32:35], v0, s[12:15], 0 offen offset:128 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[36:39], v0, s[12:15], 0 offen offset:144 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[40:43], v0, s[12:15], 0 offen offset:160 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[44:47], v0, s[12:15], 0 offen offset:176 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[48:51], v0, s[12:15], 0 offen offset:192 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[52:55], v0, s[12:15], 0 offen offset:208 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[56:59], v0, s[12:15], 0 offen offset:224 +; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15) +; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[60:63], v0, s[12:15], 0 offen offset:240 +; SDAG-GFX942-NEXT: s_cbranch_scc1 .LBB1_1 ; SDAG-GFX942-NEXT: ; %bb.2: ; %memcpy-split ; SDAG-GFX942-NEXT: s_endpgm ; @@ -852,84 +873,87 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; SDAG-GFX1100: ; %bb.0: ; SDAG-GFX1100-NEXT: s_clause 0x3 ; SDAG-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; SDAG-GFX1100-NEXT: s_load_b32 s13, s[4:5], 0x34 +; SDAG-GFX1100-NEXT: s_load_b32 s17, s[4:5], 0x34 ; SDAG-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44 ; SDAG-GFX1100-NEXT: s_load_b32 s18, s[4:5], 0x54 -; SDAG-GFX1100-NEXT: s_mov_b32 s12, 0 -; SDAG-GFX1100-NEXT: v_mov_b32_e32 v0, 0 -; SDAG-GFX1100-NEXT: s_mov_b32 s5, s12 -; SDAG-GFX1100-NEXT: s_mov_b32 s15, s12 -; SDAG-GFX1100-NEXT: s_mov_b32 s17, s12 +; SDAG-GFX1100-NEXT: s_mov_b32 s16, 0 +; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; SDAG-GFX1100-NEXT: s_mov_b32 s5, s16 +; SDAG-GFX1100-NEXT: s_mov_b32 s13, s16 +; SDAG-GFX1100-NEXT: s_mov_b32 s15, s16 ; SDAG-GFX1100-NEXT: s_waitcnt lgkmcnt(0) ; SDAG-GFX1100-NEXT: s_mov_b32 s4, s3 -; SDAG-GFX1100-NEXT: s_mov_b32 s14, s1 -; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13] -; SDAG-GFX1100-NEXT: s_mov_b32 s13, s2 -; SDAG-GFX1100-NEXT: s_mov_b32 s16, s11 -; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[14:15], s[12:13] -; SDAG-GFX1100-NEXT: s_mov_b32 s13, s18 +; SDAG-GFX1100-NEXT: s_mov_b32 s12, s1 +; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17] +; SDAG-GFX1100-NEXT: s_mov_b32 s17, s2 +; SDAG-GFX1100-NEXT: s_mov_b32 s14, s11 +; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[12:13], s[16:17] +; SDAG-GFX1100-NEXT: s_mov_b32 s17, s18 ; SDAG-GFX1100-NEXT: s_mov_b32 s2, s9 -; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[16:17], s[12:13] -; SDAG-GFX1100-NEXT: s_mov_b32 s13, s10 -; SDAG-GFX1100-NEXT: s_mov_b32 s3, s12 +; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17] +; SDAG-GFX1100-NEXT: s_mov_b32 s17, s10 +; SDAG-GFX1100-NEXT: s_mov_b32 s3, s16 ; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13] +; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17] ; SDAG-GFX1100-NEXT: .LBB1_1: ; %load-store-loop ; SDAG-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1 -; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0 -; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0 -; SDAG-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0 -; SDAG-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1 +; SDAG-GFX1100-NEXT: s_add_i32 s1, s0, s16 +; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; SDAG-GFX1100-NEXT: v_mov_b32_e32 v60, s1 +; SDAG-GFX1100-NEXT: s_add_i32 s1, s8, s16 +; SDAG-GFX1100-NEXT: s_addk_i32 s16, 0x100 +; SDAG-GFX1100-NEXT: v_mov_b32_e32 v64, s1 +; SDAG-GFX1100-NEXT: s_cmpk_lt_u32 s16, 0x100 ; SDAG-GFX1100-NEXT: s_clause 0xf -; SDAG-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen -; SDAG-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[9:12], v61, s[4:7], 0 offen offset:32 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[13:16], v61, s[4:7], 0 offen offset:48 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[17:20], v61, s[4:7], 0 offen offset:64 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[21:24], v61, s[4:7], 0 offen offset:80 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[25:28], v61, s[4:7], 0 offen offset:96 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[29:32], v61, s[4:7], 0 offen offset:112 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[33:36], v61, s[4:7], 0 offen offset:128 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[37:40], v61, s[4:7], 0 offen offset:144 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[41:44], v61, s[4:7], 0 offen offset:160 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[45:48], v61, s[4:7], 0 offen offset:176 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[49:52], v61, s[4:7], 0 offen offset:192 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224 -; SDAG-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[0:3], v60, s[4:7], 0 offen +; SDAG-GFX1100-NEXT: buffer_load_b128 v[4:7], v60, s[4:7], 0 offen offset:16 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[8:11], v60, s[4:7], 0 offen offset:32 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[12:15], v60, s[4:7], 0 offen offset:48 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[16:19], v60, s[4:7], 0 offen offset:64 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[20:23], v60, s[4:7], 0 offen offset:80 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[24:27], v60, s[4:7], 0 offen offset:96 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[28:31], v60, s[4:7], 0 offen offset:112 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[32:35], v60, s[4:7], 0 offen offset:128 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[36:39], v60, s[4:7], 0 offen offset:144 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[40:43], v60, s[4:7], 0 offen offset:160 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[44:47], v60, s[4:7], 0 offen offset:176 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[48:51], v60, s[4:7], 0 offen offset:192 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[52:55], v60, s[4:7], 0 offen offset:208 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[56:59], v60, s[4:7], 0 offen offset:224 +; SDAG-GFX1100-NEXT: buffer_load_b128 v[60:63], v60, s[4:7], 0 offen offset:240 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(15) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen +; SDAG-GFX1100-NEXT: buffer_store_b128 v[0:3], v64, s[12:15], 0 offen ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(14) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[5:8], v65, s[12:15], 0 offen offset:16 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[4:7], v64, s[12:15], 0 offen offset:16 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(13) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[9:12], v65, s[12:15], 0 offen offset:32 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[8:11], v64, s[12:15], 0 offen offset:32 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(12) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[13:16], v65, s[12:15], 0 offen offset:48 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[12:15], v64, s[12:15], 0 offen offset:48 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(11) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[17:20], v65, s[12:15], 0 offen offset:64 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[16:19], v64, s[12:15], 0 offen offset:64 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(10) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[21:24], v65, s[12:15], 0 offen offset:80 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[20:23], v64, s[12:15], 0 offen offset:80 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(9) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[25:28], v65, s[12:15], 0 offen offset:96 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[24:27], v64, s[12:15], 0 offen offset:96 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(8) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[29:32], v65, s[12:15], 0 offen offset:112 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[28:31], v64, s[12:15], 0 offen offset:112 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(7) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[33:36], v65, s[12:15], 0 offen offset:128 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[32:35], v64, s[12:15], 0 offen offset:128 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(6) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[37:40], v65, s[12:15], 0 offen offset:144 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[36:39], v64, s[12:15], 0 offen offset:144 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(5) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[41:44], v65, s[12:15], 0 offen offset:160 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[40:43], v64, s[12:15], 0 offen offset:160 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(4) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[45:48], v65, s[12:15], 0 offen offset:176 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[44:47], v64, s[12:15], 0 offen offset:176 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(3) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[49:52], v65, s[12:15], 0 offen offset:192 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[48:51], v64, s[12:15], 0 offen offset:192 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(2) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[53:56], v65, s[12:15], 0 offen offset:208 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[52:55], v64, s[12:15], 0 offen offset:208 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(1) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[56:59], v64, s[12:15], 0 offen offset:224 ; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0) -; SDAG-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240 -; SDAG-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1 +; SDAG-GFX1100-NEXT: buffer_store_b128 v[60:63], v64, s[12:15], 0 offen offset:240 +; SDAG-GFX1100-NEXT: s_cbranch_scc1 .LBB1_1 ; SDAG-GFX1100-NEXT: ; %bb.2: ; %memcpy-split ; SDAG-GFX1100-NEXT: s_endpgm ; @@ -957,52 +981,50 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX942-NEXT: s_mov_b32 s2, s7 ; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3] -; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s16 +; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x100 +; GISEL-GFX942-NEXT: v_mov_b32_e32 v1, s16 ; GISEL-GFX942-NEXT: .LBB1_1: ; %load-store-loop ; GISEL-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 -; GISEL-GFX942-NEXT: v_add_u32_e32 v1, s0, v0 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[8:11], 0 offen -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[8:11], 0 offen offset:16 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[8:11], 0 offen offset:32 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[8:11], 0 offen offset:48 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[8:11], 0 offen offset:64 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[8:11], 0 offen offset:80 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[8:11], 0 offen offset:96 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[8:11], 0 offen offset:112 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[8:11], 0 offen offset:128 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[8:11], 0 offen offset:144 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[8:11], 0 offen offset:160 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[8:11], 0 offen offset:176 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[8:11], 0 offen offset:192 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[8:11], 0 offen offset:208 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[8:11], 0 offen offset:224 -; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[8:11], 0 offen offset:240 -; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s12, v0 -; GISEL-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0 -; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], vcc, -1 -; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], s[2:3], -1 -; GISEL-GFX942-NEXT: s_and_b64 vcc, s[2:3], exec +; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s0, v1 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v62, s[8:11], 0 offen +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v62, s[8:11], 0 offen offset:16 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v62, s[8:11], 0 offen offset:32 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v62, s[8:11], 0 offen offset:48 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v62, s[8:11], 0 offen offset:64 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v62, s[8:11], 0 offen offset:80 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v62, s[8:11], 0 offen offset:96 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v62, s[8:11], 0 offen offset:112 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v62, s[8:11], 0 offen offset:128 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v62, s[8:11], 0 offen offset:144 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v62, s[8:11], 0 offen offset:160 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v62, s[8:11], 0 offen offset:176 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v62, s[8:11], 0 offen offset:192 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v62, s[8:11], 0 offen offset:208 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v62, s[8:11], 0 offen offset:224 +; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v62, s[8:11], 0 offen offset:240 +; GISEL-GFX942-NEXT: v_add_u32_e32 v63, s12, v1 +; GISEL-GFX942-NEXT: v_add_u32_e32 v1, 0x100, v1 +; GISEL-GFX942-NEXT: v_cmp_lt_u32_e32 vcc, v1, v0 ; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) -; GISEL-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse -; GISEL-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[4:7], 0 offen offset:16 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[4:7], 0 offen offset:32 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[4:7], 0 offen offset:48 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[4:7], 0 offen offset:64 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[4:7], 0 offen offset:80 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[4:7], 0 offen offset:96 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[4:7], 0 offen offset:112 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[4:7], 0 offen offset:128 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[4:7], 0 offen offset:144 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[4:7], 0 offen offset:160 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[4:7], 0 offen offset:176 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[4:7], 0 offen offset:192 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[4:7], 0 offen offset:208 -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[4:7], 0 offen offset:224 -; GISEL-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload +; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v63, s[4:7], 0 offen offset:16 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v63, s[4:7], 0 offen offset:32 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v63, s[4:7], 0 offen offset:48 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v63, s[4:7], 0 offen offset:64 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v63, s[4:7], 0 offen offset:80 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v63, s[4:7], 0 offen offset:96 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v63, s[4:7], 0 offen offset:112 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v63, s[4:7], 0 offen offset:128 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v63, s[4:7], 0 offen offset:144 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v63, s[4:7], 0 offen offset:160 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v63, s[4:7], 0 offen offset:176 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v63, s[4:7], 0 offen offset:192 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v63, s[4:7], 0 offen offset:208 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v63, s[4:7], 0 offen offset:224 +; GISEL-GFX942-NEXT: scratch_load_dwordx4 v[2:5], off, off ; 16-byte Folded Reload ; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) -; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen offset:240 +; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen offset:240 ; GISEL-GFX942-NEXT: s_cbranch_vccnz .LBB1_1 ; GISEL-GFX942-NEXT: ; %bb.2: ; %memcpy-split ; GISEL-GFX942-NEXT: s_endpgm @@ -1037,8 +1059,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1 ; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0 ; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0 -; GISEL-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0 -; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1 +; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v0, 0x100, v0 ; GISEL-GFX1100-NEXT: s_clause 0xf ; GISEL-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen ; GISEL-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16 @@ -1056,7 +1077,6 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208 ; GISEL-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224 ; GISEL-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240 -; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1 ; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(15) ; GISEL-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen ; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(14) @@ -1089,7 +1109,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp ; GISEL-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224 ; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0) ; GISEL-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240 -; GISEL-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1 +; GISEL-GFX1100-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x100, v0 ; GISEL-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1 ; GISEL-GFX1100-NEXT: ; %bb.2: ; %memcpy-split ; GISEL-GFX1100-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll index 306fe33bfb7ac..8e12e7e03947b 100644 --- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll +++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll @@ -2523,7 +2523,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i16() #0 { ; CI-NEXT: s_add_u32 s36, s36, s3 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 -; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0 +; CI-NEXT: buffer_load_dwordx2 v[3:4], off, s[0:3], 0 ; CI-NEXT: s_addc_u32 s37, s37, 0 ; CI-NEXT: s_mov_b64 s[6:7], s[0:1] ; CI-NEXT: s_mov_b64 s[0:1], s[36:37] @@ -2533,9 +2533,9 @@ define amdgpu_kernel void @test_call_external_void_func_v3i16() #0 { ; CI-NEXT: s_mov_b64 s[2:3], s[38:39] ; CI-NEXT: s_mov_b32 s32, 0 ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; CI-NEXT: v_mov_b32_e32 v0, v2 -; CI-NEXT: v_mov_b32_e32 v2, v3 +; CI-NEXT: v_lshr_b64 v[1:2], v[3:4], 16 +; CI-NEXT: v_mov_b32_e32 v0, v3 +; CI-NEXT: v_mov_b32_e32 v2, v4 ; CI-NEXT: s_swappc_b64 s[30:31], s[4:5] ; CI-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll index b71885b54b5a2..51652a09863e0 100644 --- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll +++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll @@ -231,7 +231,7 @@ define amdgpu_kernel void @sadd64ri(ptr addrspace(1) %out, i64 %a) { ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], lit64(0x123456789876) +; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], 0x123456789876 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] @@ -434,7 +434,7 @@ define amdgpu_kernel void @vadd64ri(ptr addrspace(1) %out) { ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], lit64(0x123456789876), v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0x123456789876, v[0:1] ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1] ; GFX1250-NEXT: s_endpgm @@ -1210,7 +1210,7 @@ define amdgpu_kernel void @ssub64ri(ptr addrspace(1) %out, i64 %a) { ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_nc_u64 s[2:3], lit64(0x123456789876), s[2:3] +; GFX1250-NEXT: s_sub_nc_u64 s[2:3], 0x123456789876, s[2:3] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] @@ -1413,7 +1413,7 @@ define amdgpu_kernel void @vsub64ri(ptr addrspace(1) %out) { ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_sub_nc_u64_e32 v[2:3], lit64(0x123456789876), v[0:1] +; GFX1250-NEXT: v_sub_nc_u64_e32 v[2:3], 0x123456789876, v[0:1] ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1] ; GFX1250-NEXT: s_endpgm @@ -1973,9 +1973,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car ; GCN-ISEL-LABEL: name: sudiv64 ; GCN-ISEL-LABEL: body: ; GCN-ISEL-LABEL: bb.3 -; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 +; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = S_UADDO_PSEUDO ; GCN-ISEL: S_ADD_CO_PSEUDO %{{[0-9]+}}, killed %{{[0-9]+}}, killed %[[CARRY]] -; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = V_SUB_CO_U32_e64 +; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = S_USUBO_PSEUDO ; GCN-ISEL: S_SUB_CO_PSEUDO killed %{{[0-9]+}}, %{{[0-9]+}}, %[[CARRY]] define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { @@ -2029,7 +2029,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; CISI-NEXT: v_mul_lo_u32 v4, s1, v0 ; CISI-NEXT: v_add_i32_e32 v2, vcc, v2, v3 ; CISI-NEXT: v_mul_lo_u32 v3, s0, v0 -; CISI-NEXT: v_add_i32_e32 v2, vcc, v2, v4 +; CISI-NEXT: v_add_i32_e32 v2, vcc, v4, v2 ; CISI-NEXT: v_mul_lo_u32 v6, v0, v2 ; CISI-NEXT: v_mul_hi_u32 v7, v0, v3 ; CISI-NEXT: v_mul_hi_u32 v8, v0, v2 @@ -2132,18 +2132,18 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; ; VI-LABEL: sudiv64: ; VI: ; %bb.0: -; VI-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 -; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3] -; VI-NEXT: s_mov_b32 s0, 0 -; VI-NEXT: s_cmp_lg_u64 s[0:1], 0 -; VI-NEXT: s_cbranch_scc0 .LBB16_4 +; VI-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] +; VI-NEXT: s_mov_b32 s6, 0 +; VI-NEXT: s_cmp_lg_u64 s[6:7], 0 +; VI-NEXT: s_cbranch_scc0 .LBB16_3 ; VI-NEXT: ; %bb.1: -; VI-NEXT: v_cvt_f32_u32_e32 v0, s2 -; VI-NEXT: v_cvt_f32_u32_e32 v1, s3 -; VI-NEXT: s_sub_u32 s4, 0, s2 -; VI-NEXT: s_subb_u32 s5, 0, s3 +; VI-NEXT: v_cvt_f32_u32_e32 v0, s4 +; VI-NEXT: v_cvt_f32_u32_e32 v1, s5 +; VI-NEXT: s_sub_u32 s8, 0, s4 +; VI-NEXT: s_subb_u32 s9, 0, s5 ; VI-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; VI-NEXT: v_rcp_f32_e32 v0, v0 ; VI-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -2152,17 +2152,17 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; VI-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 ; VI-NEXT: v_cvt_u32_f32_e32 v4, v1 ; VI-NEXT: v_cvt_u32_f32_e32 v5, v0 -; VI-NEXT: v_mul_lo_u32 v2, s4, v4 -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s4, v5, 0 -; VI-NEXT: v_mul_lo_u32 v3, s5, v5 +; VI-NEXT: v_mul_lo_u32 v2, s8, v4 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s8, v5, 0 +; VI-NEXT: v_mul_lo_u32 v3, s9, v5 ; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v2 ; VI-NEXT: v_add_u32_e32 v3, vcc, v1, v3 ; VI-NEXT: v_mul_hi_u32 v6, v5, v0 -; VI-NEXT: v_mad_u64_u32 v[1:2], s[0:1], v5, v3, 0 +; VI-NEXT: v_mad_u64_u32 v[1:2], s[6:7], v5, v3, 0 ; VI-NEXT: v_add_u32_e32 v6, vcc, v6, v1 -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v4, v0, 0 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v4, v0, 0 ; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v2, vcc -; VI-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v3, 0 +; VI-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v4, v3, 0 ; VI-NEXT: v_add_u32_e32 v0, vcc, v6, v0 ; VI-NEXT: v_addc_u32_e32 v0, vcc, v7, v1, vcc ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc @@ -2170,15 +2170,15 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: v_add_u32_e32 v6, vcc, v5, v0 ; VI-NEXT: v_addc_u32_e32 v7, vcc, v4, v1, vcc -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s4, v6, 0 -; VI-NEXT: v_mul_lo_u32 v4, s4, v7 -; VI-NEXT: v_mul_lo_u32 v5, s5, v6 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s8, v6, 0 +; VI-NEXT: v_mul_lo_u32 v4, s8, v7 +; VI-NEXT: v_mul_lo_u32 v5, s9, v6 ; VI-NEXT: v_mul_hi_u32 v8, v6, v0 -; VI-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v7, v0, 0 +; VI-NEXT: v_mad_u64_u32 v[2:3], s[6:7], v7, v0, 0 ; VI-NEXT: v_add_u32_e32 v1, vcc, v4, v1 -; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v5 -; VI-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v6, v1, 0 -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v7, v1, 0 +; VI-NEXT: v_add_u32_e32 v1, vcc, v5, v1 +; VI-NEXT: v_mad_u64_u32 v[4:5], s[6:7], v6, v1, 0 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], v7, v1, 0 ; VI-NEXT: v_add_u32_e32 v4, vcc, v8, v4 ; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc ; VI-NEXT: v_add_u32_e32 v2, vcc, v4, v2 @@ -2188,119 +2188,117 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: v_add_u32_e32 v2, vcc, v6, v0 ; VI-NEXT: v_addc_u32_e32 v3, vcc, v7, v1, vcc -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s10, v3, 0 -; VI-NEXT: v_mul_hi_u32 v4, s10, v2 -; VI-NEXT: v_readfirstlane_b32 s4, v1 -; VI-NEXT: v_readfirstlane_b32 s5, v0 -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s11, v3, 0 -; VI-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s11, v2, 0 -; VI-NEXT: v_readfirstlane_b32 s6, v4 -; VI-NEXT: s_add_u32 s0, s6, s5 -; VI-NEXT: s_addc_u32 s1, 0, s4 -; VI-NEXT: v_readfirstlane_b32 s6, v2 -; VI-NEXT: v_readfirstlane_b32 s5, v3 -; VI-NEXT: s_add_u32 s0, s0, s6 -; VI-NEXT: v_readfirstlane_b32 s4, v1 -; VI-NEXT: s_addc_u32 s0, s1, s5 -; VI-NEXT: s_addc_u32 s6, s4, 0 -; VI-NEXT: v_readfirstlane_b32 s1, v0 -; VI-NEXT: s_add_u32 s7, s0, s1 -; VI-NEXT: v_mov_b32_e32 v2, s7 -; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v2, 0 -; VI-NEXT: s_addc_u32 s6, 0, s6 -; VI-NEXT: s_mul_i32 s0, s2, s6 -; VI-NEXT: v_readfirstlane_b32 s1, v1 -; VI-NEXT: s_add_i32 s0, s1, s0 -; VI-NEXT: s_mul_i32 s1, s3, s7 -; VI-NEXT: s_add_i32 s12, s0, s1 -; VI-NEXT: s_sub_i32 s0, s11, s12 -; VI-NEXT: v_sub_u32_e32 v0, vcc, s10, v0 -; VI-NEXT: s_cmp_lg_u64 vcc, 0 -; VI-NEXT: s_subb_u32 s13, s0, s3 -; VI-NEXT: v_subrev_u32_e64 v1, s[0:1], s2, v0 -; VI-NEXT: s_cmp_lg_u64 s[0:1], 0 -; VI-NEXT: s_subb_u32 s13, s13, 0 -; VI-NEXT: s_cmp_ge_u32 s13, s3 -; VI-NEXT: s_cselect_b32 s14, -1, 0 -; VI-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v1 -; VI-NEXT: s_cmp_eq_u32 s13, s3 -; VI-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; VI-NEXT: v_mov_b32_e32 v3, s14 -; VI-NEXT: s_cselect_b64 s[0:1], -1, 0 -; VI-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1] -; VI-NEXT: s_add_u32 s0, s7, 1 -; VI-NEXT: s_addc_u32 s13, s6, 0 -; VI-NEXT: s_add_u32 s1, s7, 2 -; VI-NEXT: s_addc_u32 s7, s6, 0 -; VI-NEXT: v_mov_b32_e32 v3, s0 -; VI-NEXT: v_mov_b32_e32 v4, s1 -; VI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 -; VI-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[0:1] -; VI-NEXT: v_mov_b32_e32 v1, s13 -; VI-NEXT: v_mov_b32_e32 v4, s7 -; VI-NEXT: s_cmp_lg_u64 vcc, 0 -; VI-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[0:1] -; VI-NEXT: s_subb_u32 s0, s11, s12 -; VI-NEXT: s_cmp_ge_u32 s0, s3 -; VI-NEXT: s_cselect_b32 s1, -1, 0 -; VI-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 -; VI-NEXT: s_cmp_eq_u32 s0, s3 -; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; VI-NEXT: v_mov_b32_e32 v4, s1 -; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-NEXT: v_mov_b32_e32 v4, s6 -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; VI-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc -; VI-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc -; VI-NEXT: s_cbranch_execnz .LBB16_3 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s2, v3, 0 +; VI-NEXT: v_mul_hi_u32 v4, s2, v2 +; VI-NEXT: v_readfirstlane_b32 s8, v1 +; VI-NEXT: v_readfirstlane_b32 s9, v0 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s3, v3, 0 +; VI-NEXT: v_mad_u64_u32 v[2:3], s[6:7], s3, v2, 0 +; VI-NEXT: v_readfirstlane_b32 s10, v4 +; VI-NEXT: s_add_u32 s6, s10, s9 +; VI-NEXT: s_addc_u32 s7, 0, s8 +; VI-NEXT: v_readfirstlane_b32 s10, v2 +; VI-NEXT: v_readfirstlane_b32 s9, v3 +; VI-NEXT: s_add_u32 s6, s6, s10 +; VI-NEXT: v_readfirstlane_b32 s8, v1 +; VI-NEXT: s_addc_u32 s6, s7, s9 +; VI-NEXT: s_addc_u32 s8, s8, 0 +; VI-NEXT: v_readfirstlane_b32 s7, v0 +; VI-NEXT: s_add_u32 s12, s6, s7 +; VI-NEXT: v_mov_b32_e32 v0, s12 +; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s4, v0, 0 +; VI-NEXT: s_addc_u32 s13, 0, s8 +; VI-NEXT: s_mul_i32 s8, s4, s13 +; VI-NEXT: v_readfirstlane_b32 s9, v1 +; VI-NEXT: s_add_i32 s8, s9, s8 +; VI-NEXT: s_mul_i32 s9, s5, s12 +; VI-NEXT: s_add_i32 s14, s8, s9 +; VI-NEXT: s_sub_i32 s10, s3, s14 +; VI-NEXT: v_readfirstlane_b32 s8, v0 +; VI-NEXT: s_sub_u32 s15, s2, s8 +; VI-NEXT: s_cselect_b64 s[8:9], -1, 0 +; VI-NEXT: s_cmp_lg_u64 s[8:9], 0 +; VI-NEXT: s_subb_u32 s16, s10, s5 +; VI-NEXT: s_sub_u32 s17, s15, s4 +; VI-NEXT: s_cselect_b64 s[10:11], -1, 0 +; VI-NEXT: s_cmp_lg_u64 s[10:11], 0 +; VI-NEXT: s_subb_u32 s10, s16, 0 +; VI-NEXT: s_cmp_ge_u32 s10, s5 +; VI-NEXT: s_cselect_b32 s11, -1, 0 +; VI-NEXT: s_cmp_ge_u32 s17, s4 +; VI-NEXT: s_cselect_b32 s16, -1, 0 +; VI-NEXT: s_cmp_eq_u32 s10, s5 +; VI-NEXT: s_cselect_b32 s10, s16, s11 +; VI-NEXT: s_add_u32 s11, s12, 1 +; VI-NEXT: s_addc_u32 s16, s13, 0 +; VI-NEXT: s_add_u32 s17, s12, 2 +; VI-NEXT: s_addc_u32 s18, s13, 0 +; VI-NEXT: s_cmp_lg_u32 s10, 0 +; VI-NEXT: s_cselect_b32 s10, s17, s11 +; VI-NEXT: s_cselect_b32 s11, s18, s16 +; VI-NEXT: s_cmp_lg_u64 s[8:9], 0 +; VI-NEXT: s_subb_u32 s3, s3, s14 +; VI-NEXT: s_cmp_ge_u32 s3, s5 +; VI-NEXT: s_cselect_b32 s8, -1, 0 +; VI-NEXT: s_cmp_ge_u32 s15, s4 +; VI-NEXT: s_cselect_b32 s9, -1, 0 +; VI-NEXT: s_cmp_eq_u32 s3, s5 +; VI-NEXT: s_cselect_b32 s3, s9, s8 +; VI-NEXT: s_cmp_lg_u32 s3, 0 +; VI-NEXT: s_cselect_b32 s9, s11, s13 +; VI-NEXT: s_cselect_b32 s8, s10, s12 +; VI-NEXT: s_cbranch_execnz .LBB16_4 ; VI-NEXT: .LBB16_2: -; VI-NEXT: v_cvt_f32_u32_e32 v0, s2 -; VI-NEXT: s_sub_i32 s0, 0, s2 +; VI-NEXT: v_cvt_f32_u32_e32 v0, s4 +; VI-NEXT: s_sub_i32 s3, 0, s4 ; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; VI-NEXT: v_cvt_u32_f32_e32 v0, v0 -; VI-NEXT: v_mul_lo_u32 v1, s0, v0 +; VI-NEXT: v_mul_lo_u32 v1, s3, v0 ; VI-NEXT: v_mul_hi_u32 v1, v0, v1 ; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1 -; VI-NEXT: v_mul_hi_u32 v0, s10, v0 -; VI-NEXT: v_readfirstlane_b32 s0, v0 -; VI-NEXT: s_mul_i32 s0, s0, s2 -; VI-NEXT: s_sub_i32 s0, s10, s0 -; VI-NEXT: s_sub_i32 s1, s0, s2 +; VI-NEXT: v_mul_hi_u32 v0, s2, v0 +; VI-NEXT: v_readfirstlane_b32 s3, v0 +; VI-NEXT: s_mul_i32 s3, s3, s4 +; VI-NEXT: s_sub_i32 s2, s2, s3 +; VI-NEXT: s_sub_i32 s3, s2, s4 ; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0 -; VI-NEXT: s_cmp_ge_u32 s0, s2 +; VI-NEXT: s_cmp_ge_u32 s2, s4 ; VI-NEXT: s_cselect_b64 vcc, -1, 0 -; VI-NEXT: s_cselect_b32 s0, s1, s0 +; VI-NEXT: s_cselect_b32 s2, s3, s2 ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0 -; VI-NEXT: s_cmp_ge_u32 s0, s2 +; VI-NEXT: s_cmp_ge_u32 s2, s4 ; VI-NEXT: s_cselect_b64 vcc, -1, 0 ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: s_branch .LBB16_5 ; VI-NEXT: .LBB16_3: -; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: ; implicit-def: $sgpr8_sgpr9 +; VI-NEXT: s_branch .LBB16_2 +; VI-NEXT: .LBB16_4: +; VI-NEXT: v_mov_b32_e32 v0, s8 +; VI-NEXT: v_mov_b32_e32 v1, s9 +; VI-NEXT: .LBB16_5: +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; VI-NEXT: s_endpgm -; VI-NEXT: .LBB16_4: -; VI-NEXT: ; implicit-def: $vgpr0_vgpr1 -; VI-NEXT: s_branch .LBB16_2 ; ; GFX9-LABEL: sudiv64: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 -; GFX9-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3] -; GFX9-NEXT: s_mov_b32 s0, 0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX9-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7] +; GFX9-NEXT: s_mov_b32 s4, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX9-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX9-NEXT: ; %bb.1: -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX9-NEXT: s_sub_u32 s0, 0, s2 -; GFX9-NEXT: s_subb_u32 s1, 0, s3 +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GFX9-NEXT: s_sub_u32 s10, 0, s6 +; GFX9-NEXT: s_subb_u32 s11, 0, s7 ; GFX9-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX9-NEXT: v_rcp_f32_e32 v0, v0 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -2309,166 +2307,157 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX9-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s6, v1 -; GFX9-NEXT: v_readfirstlane_b32 s7, v0 -; GFX9-NEXT: s_mul_i32 s12, s0, s6 -; GFX9-NEXT: s_mul_hi_u32 s14, s0, s7 -; GFX9-NEXT: s_mul_i32 s13, s1, s7 -; GFX9-NEXT: s_add_i32 s12, s14, s12 -; GFX9-NEXT: s_add_i32 s12, s12, s13 -; GFX9-NEXT: s_mul_i32 s15, s0, s7 -; GFX9-NEXT: s_mul_hi_u32 s13, s7, s12 -; GFX9-NEXT: s_mul_i32 s14, s7, s12 -; GFX9-NEXT: s_mul_hi_u32 s7, s7, s15 -; GFX9-NEXT: s_add_u32 s7, s7, s14 +; GFX9-NEXT: v_readfirstlane_b32 s12, v1 +; GFX9-NEXT: v_readfirstlane_b32 s8, v0 +; GFX9-NEXT: s_mul_i32 s9, s10, s12 +; GFX9-NEXT: s_mul_hi_u32 s14, s10, s8 +; GFX9-NEXT: s_mul_i32 s13, s11, s8 +; GFX9-NEXT: s_add_i32 s9, s14, s9 +; GFX9-NEXT: s_add_i32 s9, s9, s13 +; GFX9-NEXT: s_mul_i32 s15, s10, s8 +; GFX9-NEXT: s_mul_i32 s14, s8, s9 +; GFX9-NEXT: s_mul_hi_u32 s16, s8, s15 +; GFX9-NEXT: s_mul_hi_u32 s13, s8, s9 +; GFX9-NEXT: s_add_u32 s14, s16, s14 ; GFX9-NEXT: s_addc_u32 s13, 0, s13 -; GFX9-NEXT: s_mul_hi_u32 s16, s6, s15 -; GFX9-NEXT: s_mul_i32 s15, s6, s15 -; GFX9-NEXT: s_add_u32 s7, s7, s15 -; GFX9-NEXT: s_mul_hi_u32 s14, s6, s12 -; GFX9-NEXT: s_addc_u32 s7, s13, s16 -; GFX9-NEXT: s_addc_u32 s13, s14, 0 -; GFX9-NEXT: s_mul_i32 s12, s6, s12 -; GFX9-NEXT: s_add_u32 s7, s7, s12 -; GFX9-NEXT: s_addc_u32 s12, 0, s13 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s7, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s6, s6, s12 -; GFX9-NEXT: v_readfirstlane_b32 s12, v0 -; GFX9-NEXT: s_mul_i32 s7, s0, s6 -; GFX9-NEXT: s_mul_hi_u32 s13, s0, s12 -; GFX9-NEXT: s_add_i32 s7, s13, s7 -; GFX9-NEXT: s_mul_i32 s1, s1, s12 -; GFX9-NEXT: s_add_i32 s7, s7, s1 -; GFX9-NEXT: s_mul_i32 s0, s0, s12 -; GFX9-NEXT: s_mul_hi_u32 s13, s6, s0 -; GFX9-NEXT: s_mul_i32 s14, s6, s0 -; GFX9-NEXT: s_mul_i32 s16, s12, s7 -; GFX9-NEXT: s_mul_hi_u32 s0, s12, s0 -; GFX9-NEXT: s_mul_hi_u32 s15, s12, s7 -; GFX9-NEXT: s_add_u32 s0, s0, s16 -; GFX9-NEXT: s_addc_u32 s12, 0, s15 -; GFX9-NEXT: s_add_u32 s0, s0, s14 -; GFX9-NEXT: s_mul_hi_u32 s1, s6, s7 -; GFX9-NEXT: s_addc_u32 s0, s12, s13 -; GFX9-NEXT: s_addc_u32 s1, s1, 0 -; GFX9-NEXT: s_mul_i32 s7, s6, s7 -; GFX9-NEXT: s_add_u32 s0, s0, s7 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_addc_u32 s0, s6, s1 -; GFX9-NEXT: v_readfirstlane_b32 s7, v0 -; GFX9-NEXT: s_mul_i32 s6, s10, s0 -; GFX9-NEXT: s_mul_hi_u32 s12, s10, s7 -; GFX9-NEXT: s_mul_hi_u32 s1, s10, s0 -; GFX9-NEXT: s_add_u32 s6, s12, s6 -; GFX9-NEXT: s_addc_u32 s1, 0, s1 -; GFX9-NEXT: s_mul_hi_u32 s13, s11, s7 -; GFX9-NEXT: s_mul_i32 s7, s11, s7 -; GFX9-NEXT: s_add_u32 s6, s6, s7 -; GFX9-NEXT: s_mul_hi_u32 s12, s11, s0 -; GFX9-NEXT: s_addc_u32 s1, s1, s13 -; GFX9-NEXT: s_addc_u32 s6, s12, 0 -; GFX9-NEXT: s_mul_i32 s0, s11, s0 -; GFX9-NEXT: s_add_u32 s7, s1, s0 -; GFX9-NEXT: s_addc_u32 s6, 0, s6 -; GFX9-NEXT: s_mul_i32 s0, s2, s6 -; GFX9-NEXT: s_mul_hi_u32 s1, s2, s7 -; GFX9-NEXT: s_add_i32 s0, s1, s0 -; GFX9-NEXT: s_mul_i32 s1, s3, s7 -; GFX9-NEXT: s_add_i32 s12, s0, s1 -; GFX9-NEXT: s_mul_i32 s1, s2, s7 -; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_sub_i32 s0, s11, s12 -; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s10, v0 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: s_subb_u32 s13, s0, s3 -; GFX9-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s2, v0 -; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: s_subb_u32 s13, s13, 0 -; GFX9-NEXT: s_cmp_ge_u32 s13, s3 -; GFX9-NEXT: s_cselect_b32 s14, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v1 -; GFX9-NEXT: s_cmp_eq_u32 s13, s3 -; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v2, s14 -; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[0:1] -; GFX9-NEXT: s_add_u32 s0, s7, 1 -; GFX9-NEXT: s_addc_u32 s13, s6, 0 -; GFX9-NEXT: s_add_u32 s1, s7, 2 -; GFX9-NEXT: s_addc_u32 s14, s6, 0 -; GFX9-NEXT: v_mov_b32_e32 v2, s0 -; GFX9-NEXT: v_mov_b32_e32 v3, s1 -; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1 -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GFX9-NEXT: v_mov_b32_e32 v1, s13 -; GFX9-NEXT: v_mov_b32_e32 v3, s14 -; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] -; GFX9-NEXT: s_subb_u32 s0, s11, s12 -; GFX9-NEXT: s_cmp_ge_u32 s0, s3 -; GFX9-NEXT: s_cselect_b32 s1, -1, 0 -; GFX9-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 -; GFX9-NEXT: s_cmp_eq_u32 s0, s3 -; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; GFX9-NEXT: v_mov_b32_e32 v3, s1 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc -; GFX9-NEXT: v_mov_b32_e32 v3, s6 -; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX9-NEXT: v_mov_b32_e32 v0, s7 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX9-NEXT: s_mul_hi_u32 s17, s12, s15 +; GFX9-NEXT: s_mul_i32 s15, s12, s15 +; GFX9-NEXT: s_add_u32 s14, s14, s15 +; GFX9-NEXT: s_mul_hi_u32 s16, s12, s9 +; GFX9-NEXT: s_addc_u32 s13, s13, s17 +; GFX9-NEXT: s_addc_u32 s14, s16, 0 +; GFX9-NEXT: s_mul_i32 s9, s12, s9 +; GFX9-NEXT: s_add_u32 s9, s13, s9 +; GFX9-NEXT: s_addc_u32 s13, 0, s14 +; GFX9-NEXT: s_add_u32 s14, s8, s9 +; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_addc_u32 s12, s12, s13 +; GFX9-NEXT: s_mul_i32 s8, s10, s12 +; GFX9-NEXT: s_mul_hi_u32 s9, s10, s14 +; GFX9-NEXT: s_add_i32 s8, s9, s8 +; GFX9-NEXT: s_mul_i32 s11, s11, s14 +; GFX9-NEXT: s_add_i32 s8, s8, s11 +; GFX9-NEXT: s_mul_i32 s10, s10, s14 +; GFX9-NEXT: s_mul_hi_u32 s11, s12, s10 +; GFX9-NEXT: s_mul_i32 s13, s12, s10 +; GFX9-NEXT: s_mul_i32 s16, s14, s8 +; GFX9-NEXT: s_mul_hi_u32 s10, s14, s10 +; GFX9-NEXT: s_mul_hi_u32 s15, s14, s8 +; GFX9-NEXT: s_add_u32 s10, s10, s16 +; GFX9-NEXT: s_addc_u32 s15, 0, s15 +; GFX9-NEXT: s_add_u32 s10, s10, s13 +; GFX9-NEXT: s_mul_hi_u32 s9, s12, s8 +; GFX9-NEXT: s_addc_u32 s10, s15, s11 +; GFX9-NEXT: s_addc_u32 s9, s9, 0 +; GFX9-NEXT: s_mul_i32 s8, s12, s8 +; GFX9-NEXT: s_add_u32 s8, s10, s8 +; GFX9-NEXT: s_addc_u32 s10, 0, s9 +; GFX9-NEXT: s_add_u32 s11, s14, s8 +; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_addc_u32 s8, s12, s10 +; GFX9-NEXT: s_mul_i32 s10, s2, s8 +; GFX9-NEXT: s_mul_hi_u32 s12, s2, s11 +; GFX9-NEXT: s_mul_hi_u32 s9, s2, s8 +; GFX9-NEXT: s_add_u32 s10, s12, s10 +; GFX9-NEXT: s_addc_u32 s9, 0, s9 +; GFX9-NEXT: s_mul_hi_u32 s13, s3, s11 +; GFX9-NEXT: s_mul_i32 s11, s3, s11 +; GFX9-NEXT: s_add_u32 s10, s10, s11 +; GFX9-NEXT: s_mul_hi_u32 s12, s3, s8 +; GFX9-NEXT: s_addc_u32 s9, s9, s13 +; GFX9-NEXT: s_addc_u32 s10, s12, 0 +; GFX9-NEXT: s_mul_i32 s8, s3, s8 +; GFX9-NEXT: s_add_u32 s12, s9, s8 +; GFX9-NEXT: s_addc_u32 s13, 0, s10 +; GFX9-NEXT: s_mul_i32 s8, s6, s13 +; GFX9-NEXT: s_mul_hi_u32 s9, s6, s12 +; GFX9-NEXT: s_add_i32 s8, s9, s8 +; GFX9-NEXT: s_mul_i32 s9, s7, s12 +; GFX9-NEXT: s_add_i32 s14, s8, s9 +; GFX9-NEXT: s_sub_i32 s10, s3, s14 +; GFX9-NEXT: s_mul_i32 s8, s6, s12 +; GFX9-NEXT: s_sub_u32 s15, s2, s8 +; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_subb_u32 s16, s10, s7 +; GFX9-NEXT: s_sub_u32 s17, s15, s6 +; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GFX9-NEXT: s_subb_u32 s10, s16, 0 +; GFX9-NEXT: s_cmp_ge_u32 s10, s7 +; GFX9-NEXT: s_cselect_b32 s11, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s17, s6 +; GFX9-NEXT: s_cselect_b32 s16, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s10, s7 +; GFX9-NEXT: s_cselect_b32 s10, s16, s11 +; GFX9-NEXT: s_add_u32 s11, s12, 1 +; GFX9-NEXT: s_addc_u32 s16, s13, 0 +; GFX9-NEXT: s_add_u32 s17, s12, 2 +; GFX9-NEXT: s_addc_u32 s18, s13, 0 +; GFX9-NEXT: s_cmp_lg_u32 s10, 0 +; GFX9-NEXT: s_cselect_b32 s10, s17, s11 +; GFX9-NEXT: s_cselect_b32 s11, s18, s16 +; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX9-NEXT: s_subb_u32 s3, s3, s14 +; GFX9-NEXT: s_cmp_ge_u32 s3, s7 +; GFX9-NEXT: s_cselect_b32 s8, -1, 0 +; GFX9-NEXT: s_cmp_ge_u32 s15, s6 +; GFX9-NEXT: s_cselect_b32 s9, -1, 0 +; GFX9-NEXT: s_cmp_eq_u32 s3, s7 +; GFX9-NEXT: s_cselect_b32 s3, s9, s8 +; GFX9-NEXT: s_cmp_lg_u32 s3, 0 +; GFX9-NEXT: s_cselect_b32 s9, s11, s13 +; GFX9-NEXT: s_cselect_b32 s8, s10, s12 ; GFX9-NEXT: s_cbranch_execnz .LBB16_3 ; GFX9-NEXT: .LBB16_2: -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX9-NEXT: s_sub_i32 s0, 0, s2 -; GFX9-NEXT: s_mov_b32 s1, 0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX9-NEXT: s_sub_i32 s3, 0, s6 +; GFX9-NEXT: s_mov_b32 s9, 0 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX9-NEXT: v_readfirstlane_b32 s3, v0 -; GFX9-NEXT: s_mul_i32 s0, s0, s3 -; GFX9-NEXT: s_mul_hi_u32 s0, s3, s0 -; GFX9-NEXT: s_add_i32 s3, s3, s0 -; GFX9-NEXT: s_mul_hi_u32 s0, s10, s3 -; GFX9-NEXT: s_mul_i32 s4, s0, s2 -; GFX9-NEXT: s_sub_i32 s4, s10, s4 -; GFX9-NEXT: s_add_i32 s3, s0, 1 -; GFX9-NEXT: s_sub_i32 s5, s4, s2 -; GFX9-NEXT: s_cmp_ge_u32 s4, s2 -; GFX9-NEXT: s_cselect_b32 s0, s3, s0 -; GFX9-NEXT: s_cselect_b32 s4, s5, s4 -; GFX9-NEXT: s_add_i32 s3, s0, 1 -; GFX9-NEXT: s_cmp_ge_u32 s4, s2 -; GFX9-NEXT: s_cselect_b32 s0, s3, s0 -; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 +; GFX9-NEXT: v_readfirstlane_b32 s4, v0 +; GFX9-NEXT: s_mul_i32 s3, s3, s4 +; GFX9-NEXT: s_mul_hi_u32 s3, s4, s3 +; GFX9-NEXT: s_add_i32 s4, s4, s3 +; GFX9-NEXT: s_mul_hi_u32 s3, s2, s4 +; GFX9-NEXT: s_mul_i32 s5, s3, s6 +; GFX9-NEXT: s_sub_i32 s2, s2, s5 +; GFX9-NEXT: s_add_i32 s4, s3, 1 +; GFX9-NEXT: s_sub_i32 s5, s2, s6 +; GFX9-NEXT: s_cmp_ge_u32 s2, s6 +; GFX9-NEXT: s_cselect_b32 s3, s4, s3 +; GFX9-NEXT: s_cselect_b32 s2, s5, s2 +; GFX9-NEXT: s_add_i32 s4, s3, 1 +; GFX9-NEXT: s_cmp_ge_u32 s2, s6 +; GFX9-NEXT: s_cselect_b32 s8, s4, s3 ; GFX9-NEXT: .LBB16_3: +; GFX9-NEXT: v_mov_b32_e32 v0, s8 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 -; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GFX9-NEXT: s_endpgm ; GFX9-NEXT: .LBB16_4: -; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX9-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GFX9-NEXT: s_branch .LBB16_2 ; ; GFX1010-LABEL: sudiv64: ; GFX1010: ; %bb.0: ; GFX1010-NEXT: s_clause 0x1 -; GFX1010-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 -; GFX1010-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34 +; GFX1010-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX1010-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX1010-NEXT: s_waitcnt lgkmcnt(0) -; GFX1010-NEXT: s_or_b64 s[4:5], s[10:11], s[2:3] +; GFX1010-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7] ; GFX1010-NEXT: s_mov_b32 s4, 0 ; GFX1010-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX1010-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX1010-NEXT: ; %bb.1: -; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1010-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX1010-NEXT: s_sub_u32 s5, 0, s2 -; GFX1010-NEXT: s_subb_u32 s6, 0, s3 +; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX1010-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GFX1010-NEXT: s_sub_u32 s9, 0, s6 +; GFX1010-NEXT: s_subb_u32 s10, 0, s7 ; GFX1010-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX1010-NEXT: v_rcp_f32_e32 v0, v0 ; GFX1010-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -2477,160 +2466,158 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1010-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 ; GFX1010-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX1010-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1010-NEXT: v_readfirstlane_b32 s0, v1 -; GFX1010-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1010-NEXT: s_mul_i32 s7, s5, s0 -; GFX1010-NEXT: s_mul_hi_u32 s13, s5, s1 -; GFX1010-NEXT: s_mul_i32 s12, s6, s1 -; GFX1010-NEXT: s_add_i32 s7, s13, s7 -; GFX1010-NEXT: s_mul_i32 s14, s5, s1 -; GFX1010-NEXT: s_add_i32 s7, s7, s12 -; GFX1010-NEXT: s_mul_hi_u32 s13, s1, s14 -; GFX1010-NEXT: s_mul_hi_u32 s15, s0, s14 -; GFX1010-NEXT: s_mul_i32 s12, s0, s14 -; GFX1010-NEXT: s_mul_hi_u32 s14, s1, s7 -; GFX1010-NEXT: s_mul_i32 s1, s1, s7 -; GFX1010-NEXT: s_mul_hi_u32 s16, s0, s7 -; GFX1010-NEXT: s_add_u32 s1, s13, s1 -; GFX1010-NEXT: s_addc_u32 s13, 0, s14 -; GFX1010-NEXT: s_add_u32 s1, s1, s12 -; GFX1010-NEXT: s_mul_i32 s7, s0, s7 -; GFX1010-NEXT: s_addc_u32 s1, s13, s15 -; GFX1010-NEXT: s_addc_u32 s12, s16, 0 -; GFX1010-NEXT: s_add_u32 s1, s1, s7 -; GFX1010-NEXT: s_addc_u32 s7, 0, s12 -; GFX1010-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX1010-NEXT: s_cmp_lg_u32 s1, 0 -; GFX1010-NEXT: s_addc_u32 s0, s0, s7 -; GFX1010-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1010-NEXT: s_mul_i32 s7, s5, s0 -; GFX1010-NEXT: s_mul_hi_u32 s12, s5, s1 -; GFX1010-NEXT: s_mul_i32 s6, s6, s1 -; GFX1010-NEXT: s_add_i32 s7, s12, s7 -; GFX1010-NEXT: s_mul_i32 s5, s5, s1 -; GFX1010-NEXT: s_add_i32 s7, s7, s6 -; GFX1010-NEXT: s_mul_hi_u32 s12, s0, s5 -; GFX1010-NEXT: s_mul_i32 s13, s0, s5 -; GFX1010-NEXT: s_mul_hi_u32 s5, s1, s5 -; GFX1010-NEXT: s_mul_hi_u32 s14, s1, s7 -; GFX1010-NEXT: s_mul_i32 s1, s1, s7 -; GFX1010-NEXT: s_mul_hi_u32 s6, s0, s7 -; GFX1010-NEXT: s_add_u32 s1, s5, s1 -; GFX1010-NEXT: s_addc_u32 s5, 0, s14 -; GFX1010-NEXT: s_add_u32 s1, s1, s13 -; GFX1010-NEXT: s_mul_i32 s7, s0, s7 -; GFX1010-NEXT: s_addc_u32 s1, s5, s12 -; GFX1010-NEXT: s_addc_u32 s5, s6, 0 -; GFX1010-NEXT: s_add_u32 s1, s1, s7 -; GFX1010-NEXT: s_addc_u32 s5, 0, s5 -; GFX1010-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX1010-NEXT: s_cmp_lg_u32 s1, 0 -; GFX1010-NEXT: s_addc_u32 s0, s0, s5 -; GFX1010-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1010-NEXT: s_mul_i32 s6, s10, s0 -; GFX1010-NEXT: s_mul_hi_u32 s5, s10, s0 -; GFX1010-NEXT: s_mul_hi_u32 s7, s11, s0 -; GFX1010-NEXT: s_mul_i32 s0, s11, s0 -; GFX1010-NEXT: s_mul_hi_u32 s12, s10, s1 -; GFX1010-NEXT: s_mul_hi_u32 s13, s11, s1 -; GFX1010-NEXT: s_mul_i32 s1, s11, s1 -; GFX1010-NEXT: s_add_u32 s6, s12, s6 -; GFX1010-NEXT: s_addc_u32 s5, 0, s5 -; GFX1010-NEXT: s_add_u32 s1, s6, s1 -; GFX1010-NEXT: s_addc_u32 s1, s5, s13 -; GFX1010-NEXT: s_addc_u32 s5, s7, 0 -; GFX1010-NEXT: s_add_u32 s1, s1, s0 -; GFX1010-NEXT: s_addc_u32 s5, 0, s5 -; GFX1010-NEXT: s_mul_hi_u32 s0, s2, s1 -; GFX1010-NEXT: s_mul_i32 s7, s2, s5 -; GFX1010-NEXT: s_mul_i32 s12, s2, s1 -; GFX1010-NEXT: s_add_i32 s0, s0, s7 -; GFX1010-NEXT: v_sub_co_u32 v0, s7, s10, s12 -; GFX1010-NEXT: s_mul_i32 s6, s3, s1 -; GFX1010-NEXT: s_add_i32 s0, s0, s6 -; GFX1010-NEXT: v_sub_co_u32 v1, s12, v0, s2 -; GFX1010-NEXT: s_sub_i32 s6, s11, s0 -; GFX1010-NEXT: s_cmp_lg_u32 s7, 0 -; GFX1010-NEXT: s_subb_u32 s6, s6, s3 -; GFX1010-NEXT: s_cmp_lg_u32 s12, 0 -; GFX1010-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1 -; GFX1010-NEXT: s_subb_u32 s6, s6, 0 -; GFX1010-NEXT: s_cmp_ge_u32 s6, s3 -; GFX1010-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX1010-NEXT: v_readfirstlane_b32 s5, v1 +; GFX1010-NEXT: v_readfirstlane_b32 s8, v0 +; GFX1010-NEXT: s_mul_i32 s11, s9, s5 +; GFX1010-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX1010-NEXT: s_mul_i32 s12, s10, s8 +; GFX1010-NEXT: s_add_i32 s11, s13, s11 +; GFX1010-NEXT: s_mul_i32 s14, s9, s8 +; GFX1010-NEXT: s_add_i32 s11, s11, s12 +; GFX1010-NEXT: s_mul_hi_u32 s13, s8, s14 +; GFX1010-NEXT: s_mul_i32 s16, s8, s11 +; GFX1010-NEXT: s_mul_hi_u32 s15, s5, s14 +; GFX1010-NEXT: s_mul_i32 s12, s5, s14 +; GFX1010-NEXT: s_mul_hi_u32 s14, s8, s11 +; GFX1010-NEXT: s_add_u32 s13, s13, s16 +; GFX1010-NEXT: s_addc_u32 s14, 0, s14 +; GFX1010-NEXT: s_mul_hi_u32 s17, s5, s11 +; GFX1010-NEXT: s_add_u32 s12, s13, s12 +; GFX1010-NEXT: s_mul_i32 s11, s5, s11 +; GFX1010-NEXT: s_addc_u32 s12, s14, s15 +; GFX1010-NEXT: s_addc_u32 s13, s17, 0 +; GFX1010-NEXT: s_add_u32 s11, s12, s11 +; GFX1010-NEXT: s_addc_u32 s12, 0, s13 +; GFX1010-NEXT: s_add_u32 s8, s8, s11 +; GFX1010-NEXT: s_cselect_b32 s11, -1, 0 +; GFX1010-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX1010-NEXT: s_cmp_lg_u32 s11, 0 +; GFX1010-NEXT: s_mul_i32 s11, s9, s8 +; GFX1010-NEXT: s_addc_u32 s5, s5, s12 +; GFX1010-NEXT: s_mul_i32 s10, s10, s8 +; GFX1010-NEXT: s_mul_i32 s9, s9, s5 +; GFX1010-NEXT: s_mul_hi_u32 s12, s8, s11 +; GFX1010-NEXT: s_add_i32 s9, s13, s9 +; GFX1010-NEXT: s_mul_hi_u32 s13, s5, s11 +; GFX1010-NEXT: s_add_i32 s9, s9, s10 +; GFX1010-NEXT: s_mul_i32 s10, s5, s11 +; GFX1010-NEXT: s_mul_i32 s15, s8, s9 +; GFX1010-NEXT: s_mul_hi_u32 s14, s8, s9 +; GFX1010-NEXT: s_add_u32 s12, s12, s15 +; GFX1010-NEXT: s_addc_u32 s14, 0, s14 +; GFX1010-NEXT: s_mul_hi_u32 s11, s5, s9 +; GFX1010-NEXT: s_add_u32 s10, s12, s10 +; GFX1010-NEXT: s_mul_i32 s9, s5, s9 +; GFX1010-NEXT: s_addc_u32 s10, s14, s13 +; GFX1010-NEXT: s_addc_u32 s11, s11, 0 +; GFX1010-NEXT: s_add_u32 s9, s10, s9 +; GFX1010-NEXT: s_addc_u32 s10, 0, s11 +; GFX1010-NEXT: s_add_u32 s8, s8, s9 +; GFX1010-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1010-NEXT: s_mul_hi_u32 s11, s2, s8 +; GFX1010-NEXT: s_cmp_lg_u32 s9, 0 +; GFX1010-NEXT: s_mul_hi_u32 s9, s3, s8 +; GFX1010-NEXT: s_addc_u32 s5, s5, s10 +; GFX1010-NEXT: s_mul_i32 s8, s3, s8 +; GFX1010-NEXT: s_mul_i32 s12, s2, s5 +; GFX1010-NEXT: s_mul_hi_u32 s10, s2, s5 +; GFX1010-NEXT: s_add_u32 s11, s11, s12 +; GFX1010-NEXT: s_addc_u32 s10, 0, s10 +; GFX1010-NEXT: s_mul_hi_u32 s13, s3, s5 +; GFX1010-NEXT: s_add_u32 s8, s11, s8 +; GFX1010-NEXT: s_mul_i32 s5, s3, s5 +; GFX1010-NEXT: s_addc_u32 s8, s10, s9 +; GFX1010-NEXT: s_addc_u32 s9, s13, 0 +; GFX1010-NEXT: s_add_u32 s5, s8, s5 +; GFX1010-NEXT: s_addc_u32 s8, 0, s9 +; GFX1010-NEXT: s_mul_hi_u32 s9, s6, s5 +; GFX1010-NEXT: s_mul_i32 s10, s6, s8 +; GFX1010-NEXT: s_mul_i32 s11, s7, s5 +; GFX1010-NEXT: s_add_i32 s9, s9, s10 +; GFX1010-NEXT: s_mul_i32 s10, s6, s5 +; GFX1010-NEXT: s_add_i32 s9, s9, s11 +; GFX1010-NEXT: s_sub_i32 s11, s3, s9 +; GFX1010-NEXT: s_sub_u32 s10, s2, s10 ; GFX1010-NEXT: s_cselect_b32 s12, -1, 0 -; GFX1010-NEXT: s_cmp_eq_u32 s6, s3 -; GFX1010-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX1010-NEXT: s_add_u32 s6, s1, 1 -; GFX1010-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX1010-NEXT: s_addc_u32 s12, s5, 0 -; GFX1010-NEXT: s_add_u32 s13, s1, 2 -; GFX1010-NEXT: s_addc_u32 s14, s5, 0 -; GFX1010-NEXT: s_cmp_lg_u32 s7, 0 -; GFX1010-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0 -; GFX1010-NEXT: s_subb_u32 s0, s11, s0 -; GFX1010-NEXT: v_mov_b32_e32 v2, s13 -; GFX1010-NEXT: s_cmp_ge_u32 s0, s3 -; GFX1010-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo -; GFX1010-NEXT: s_cselect_b32 s7, -1, 0 -; GFX1010-NEXT: s_cmp_eq_u32 s0, s3 -; GFX1010-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 -; GFX1010-NEXT: s_cselect_b32 s0, -1, 0 -; GFX1010-NEXT: v_mov_b32_e32 v1, s14 -; GFX1010-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0 -; GFX1010-NEXT: v_cndmask_b32_e32 v2, s6, v2, vcc_lo -; GFX1010-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX1010-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX1010-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc_lo -; GFX1010-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo +; GFX1010-NEXT: s_cmp_lg_u32 s12, 0 +; GFX1010-NEXT: s_subb_u32 s11, s11, s7 +; GFX1010-NEXT: s_sub_u32 s13, s10, s6 +; GFX1010-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1010-NEXT: s_cmp_lg_u32 s14, 0 +; GFX1010-NEXT: s_subb_u32 s11, s11, 0 +; GFX1010-NEXT: s_cmp_ge_u32 s11, s7 +; GFX1010-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1010-NEXT: s_cmp_ge_u32 s13, s6 +; GFX1010-NEXT: s_cselect_b32 s13, -1, 0 +; GFX1010-NEXT: s_cmp_eq_u32 s11, s7 +; GFX1010-NEXT: s_cselect_b32 s11, s13, s14 +; GFX1010-NEXT: s_add_u32 s13, s5, 1 +; GFX1010-NEXT: s_addc_u32 s14, s8, 0 +; GFX1010-NEXT: s_add_u32 s15, s5, 2 +; GFX1010-NEXT: s_addc_u32 s16, s8, 0 +; GFX1010-NEXT: s_cmp_lg_u32 s11, 0 +; GFX1010-NEXT: s_cselect_b32 s11, s15, s13 +; GFX1010-NEXT: s_cselect_b32 s13, s16, s14 +; GFX1010-NEXT: s_cmp_lg_u32 s12, 0 +; GFX1010-NEXT: s_subb_u32 s3, s3, s9 +; GFX1010-NEXT: s_cmp_ge_u32 s3, s7 +; GFX1010-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1010-NEXT: s_cmp_ge_u32 s10, s6 +; GFX1010-NEXT: s_cselect_b32 s10, -1, 0 +; GFX1010-NEXT: s_cmp_eq_u32 s3, s7 +; GFX1010-NEXT: s_cselect_b32 s3, s10, s9 +; GFX1010-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1010-NEXT: s_cselect_b32 s9, s13, s8 +; GFX1010-NEXT: s_cselect_b32 s8, s11, s5 ; GFX1010-NEXT: s_andn2_b32 vcc_lo, exec_lo, s4 ; GFX1010-NEXT: s_cbranch_vccnz .LBB16_3 ; GFX1010-NEXT: .LBB16_2: -; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1010-NEXT: s_sub_i32 s1, 0, s2 +; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX1010-NEXT: s_sub_i32 s4, 0, s6 +; GFX1010-NEXT: s_mov_b32 s9, 0 ; GFX1010-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX1010-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX1010-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1010-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1010-NEXT: s_mul_i32 s1, s1, s0 -; GFX1010-NEXT: s_mul_hi_u32 s1, s0, s1 -; GFX1010-NEXT: s_add_i32 s0, s0, s1 -; GFX1010-NEXT: s_mul_hi_u32 s0, s10, s0 -; GFX1010-NEXT: s_mul_i32 s1, s0, s2 -; GFX1010-NEXT: s_add_i32 s3, s0, 1 -; GFX1010-NEXT: s_sub_i32 s1, s10, s1 -; GFX1010-NEXT: s_sub_i32 s4, s1, s2 -; GFX1010-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1010-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1010-NEXT: s_cselect_b32 s1, s4, s1 -; GFX1010-NEXT: s_add_i32 s3, s0, 1 -; GFX1010-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1010-NEXT: s_mov_b32 s1, 0 -; GFX1010-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1010-NEXT: v_mov_b32_e32 v0, s0 -; GFX1010-NEXT: v_mov_b32_e32 v1, s1 +; GFX1010-NEXT: v_readfirstlane_b32 s3, v0 +; GFX1010-NEXT: s_mul_i32 s4, s4, s3 +; GFX1010-NEXT: s_mul_hi_u32 s4, s3, s4 +; GFX1010-NEXT: s_add_i32 s3, s3, s4 +; GFX1010-NEXT: s_mul_hi_u32 s3, s2, s3 +; GFX1010-NEXT: s_mul_i32 s4, s3, s6 +; GFX1010-NEXT: s_sub_i32 s2, s2, s4 +; GFX1010-NEXT: s_add_i32 s4, s3, 1 +; GFX1010-NEXT: s_sub_i32 s5, s2, s6 +; GFX1010-NEXT: s_cmp_ge_u32 s2, s6 +; GFX1010-NEXT: s_cselect_b32 s3, s4, s3 +; GFX1010-NEXT: s_cselect_b32 s2, s5, s2 +; GFX1010-NEXT: s_add_i32 s4, s3, 1 +; GFX1010-NEXT: s_cmp_ge_u32 s2, s6 +; GFX1010-NEXT: s_cselect_b32 s8, s4, s3 ; GFX1010-NEXT: .LBB16_3: +; GFX1010-NEXT: v_mov_b32_e32 v0, s8 ; GFX1010-NEXT: v_mov_b32_e32 v2, 0 -; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX1010-NEXT: v_mov_b32_e32 v1, s9 +; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GFX1010-NEXT: s_endpgm ; GFX1010-NEXT: .LBB16_4: -; GFX1010-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1010-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GFX1010-NEXT: s_branch .LBB16_2 ; ; GFX1030W32-LABEL: sudiv64: ; GFX1030W32: ; %bb.0: ; GFX1030W32-NEXT: s_clause 0x1 -; GFX1030W32-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 -; GFX1030W32-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34 +; GFX1030W32-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX1030W32-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 ; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0) -; GFX1030W32-NEXT: s_or_b64 s[4:5], s[10:11], s[2:3] -; GFX1030W32-NEXT: s_mov_b32 s4, 0 -; GFX1030W32-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1030W32-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] +; GFX1030W32-NEXT: s_mov_b32 s6, 0 +; GFX1030W32-NEXT: s_cmp_lg_u64 s[6:7], 0 ; GFX1030W32-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX1030W32-NEXT: ; %bb.1: -; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX1030W32-NEXT: s_sub_u32 s5, 0, s2 -; GFX1030W32-NEXT: s_subb_u32 s6, 0, s3 +; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GFX1030W32-NEXT: s_sub_u32 s9, 0, s4 +; GFX1030W32-NEXT: s_subb_u32 s10, 0, s5 ; GFX1030W32-NEXT: v_fmamk_f32 v0, v1, 0x4f800000, v0 ; GFX1030W32-NEXT: v_rcp_f32_e32 v0, v0 ; GFX1030W32-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -2639,160 +2626,158 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1030W32-NEXT: v_fmamk_f32 v0, v1, 0xcf800000, v0 ; GFX1030W32-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX1030W32-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1030W32-NEXT: v_readfirstlane_b32 s0, v1 -; GFX1030W32-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1030W32-NEXT: s_mul_i32 s7, s5, s0 -; GFX1030W32-NEXT: s_mul_hi_u32 s13, s5, s1 -; GFX1030W32-NEXT: s_mul_i32 s12, s6, s1 -; GFX1030W32-NEXT: s_add_i32 s7, s13, s7 -; GFX1030W32-NEXT: s_mul_i32 s14, s5, s1 -; GFX1030W32-NEXT: s_add_i32 s7, s7, s12 -; GFX1030W32-NEXT: s_mul_hi_u32 s13, s1, s14 -; GFX1030W32-NEXT: s_mul_hi_u32 s15, s0, s14 -; GFX1030W32-NEXT: s_mul_i32 s12, s0, s14 -; GFX1030W32-NEXT: s_mul_hi_u32 s14, s1, s7 -; GFX1030W32-NEXT: s_mul_i32 s1, s1, s7 -; GFX1030W32-NEXT: s_mul_hi_u32 s16, s0, s7 -; GFX1030W32-NEXT: s_add_u32 s1, s13, s1 -; GFX1030W32-NEXT: s_addc_u32 s13, 0, s14 -; GFX1030W32-NEXT: s_add_u32 s1, s1, s12 -; GFX1030W32-NEXT: s_mul_i32 s7, s0, s7 -; GFX1030W32-NEXT: s_addc_u32 s1, s13, s15 -; GFX1030W32-NEXT: s_addc_u32 s12, s16, 0 -; GFX1030W32-NEXT: s_add_u32 s1, s1, s7 -; GFX1030W32-NEXT: s_addc_u32 s7, 0, s12 -; GFX1030W32-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX1030W32-NEXT: s_cmp_lg_u32 s1, 0 -; GFX1030W32-NEXT: s_addc_u32 s0, s0, s7 -; GFX1030W32-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1030W32-NEXT: s_mul_i32 s7, s5, s0 -; GFX1030W32-NEXT: s_mul_hi_u32 s12, s5, s1 -; GFX1030W32-NEXT: s_mul_i32 s6, s6, s1 -; GFX1030W32-NEXT: s_add_i32 s7, s12, s7 -; GFX1030W32-NEXT: s_mul_i32 s5, s5, s1 -; GFX1030W32-NEXT: s_add_i32 s7, s7, s6 -; GFX1030W32-NEXT: s_mul_hi_u32 s12, s0, s5 -; GFX1030W32-NEXT: s_mul_i32 s13, s0, s5 -; GFX1030W32-NEXT: s_mul_hi_u32 s5, s1, s5 -; GFX1030W32-NEXT: s_mul_hi_u32 s14, s1, s7 -; GFX1030W32-NEXT: s_mul_i32 s1, s1, s7 -; GFX1030W32-NEXT: s_mul_hi_u32 s6, s0, s7 -; GFX1030W32-NEXT: s_add_u32 s1, s5, s1 -; GFX1030W32-NEXT: s_addc_u32 s5, 0, s14 -; GFX1030W32-NEXT: s_add_u32 s1, s1, s13 -; GFX1030W32-NEXT: s_mul_i32 s7, s0, s7 -; GFX1030W32-NEXT: s_addc_u32 s1, s5, s12 -; GFX1030W32-NEXT: s_addc_u32 s5, s6, 0 -; GFX1030W32-NEXT: s_add_u32 s1, s1, s7 -; GFX1030W32-NEXT: s_addc_u32 s5, 0, s5 -; GFX1030W32-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX1030W32-NEXT: s_cmp_lg_u32 s1, 0 -; GFX1030W32-NEXT: s_addc_u32 s0, s0, s5 -; GFX1030W32-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1030W32-NEXT: s_mul_i32 s6, s10, s0 -; GFX1030W32-NEXT: s_mul_hi_u32 s5, s10, s0 -; GFX1030W32-NEXT: s_mul_hi_u32 s7, s11, s0 -; GFX1030W32-NEXT: s_mul_i32 s0, s11, s0 -; GFX1030W32-NEXT: s_mul_hi_u32 s12, s10, s1 -; GFX1030W32-NEXT: s_mul_hi_u32 s13, s11, s1 -; GFX1030W32-NEXT: s_mul_i32 s1, s11, s1 -; GFX1030W32-NEXT: s_add_u32 s6, s12, s6 -; GFX1030W32-NEXT: s_addc_u32 s5, 0, s5 -; GFX1030W32-NEXT: s_add_u32 s1, s6, s1 -; GFX1030W32-NEXT: s_addc_u32 s1, s5, s13 -; GFX1030W32-NEXT: s_addc_u32 s5, s7, 0 -; GFX1030W32-NEXT: s_add_u32 s1, s1, s0 -; GFX1030W32-NEXT: s_addc_u32 s5, 0, s5 -; GFX1030W32-NEXT: s_mul_hi_u32 s0, s2, s1 -; GFX1030W32-NEXT: s_mul_i32 s7, s2, s5 -; GFX1030W32-NEXT: s_mul_i32 s12, s2, s1 -; GFX1030W32-NEXT: s_add_i32 s0, s0, s7 -; GFX1030W32-NEXT: v_sub_co_u32 v0, s7, s10, s12 -; GFX1030W32-NEXT: s_mul_i32 s6, s3, s1 -; GFX1030W32-NEXT: s_add_i32 s0, s0, s6 -; GFX1030W32-NEXT: v_sub_co_u32 v1, s12, v0, s2 -; GFX1030W32-NEXT: s_sub_i32 s6, s11, s0 -; GFX1030W32-NEXT: s_cmp_lg_u32 s7, 0 -; GFX1030W32-NEXT: s_subb_u32 s6, s6, s3 -; GFX1030W32-NEXT: s_cmp_lg_u32 s12, 0 -; GFX1030W32-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1 -; GFX1030W32-NEXT: s_subb_u32 s6, s6, 0 -; GFX1030W32-NEXT: s_cmp_ge_u32 s6, s3 -; GFX1030W32-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX1030W32-NEXT: v_readfirstlane_b32 s7, v1 +; GFX1030W32-NEXT: v_readfirstlane_b32 s8, v0 +; GFX1030W32-NEXT: s_mul_i32 s11, s9, s7 +; GFX1030W32-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX1030W32-NEXT: s_mul_i32 s12, s10, s8 +; GFX1030W32-NEXT: s_add_i32 s11, s13, s11 +; GFX1030W32-NEXT: s_mul_i32 s14, s9, s8 +; GFX1030W32-NEXT: s_add_i32 s11, s11, s12 +; GFX1030W32-NEXT: s_mul_hi_u32 s13, s8, s14 +; GFX1030W32-NEXT: s_mul_i32 s16, s8, s11 +; GFX1030W32-NEXT: s_mul_hi_u32 s15, s7, s14 +; GFX1030W32-NEXT: s_mul_i32 s12, s7, s14 +; GFX1030W32-NEXT: s_mul_hi_u32 s14, s8, s11 +; GFX1030W32-NEXT: s_add_u32 s13, s13, s16 +; GFX1030W32-NEXT: s_addc_u32 s14, 0, s14 +; GFX1030W32-NEXT: s_mul_hi_u32 s17, s7, s11 +; GFX1030W32-NEXT: s_add_u32 s12, s13, s12 +; GFX1030W32-NEXT: s_mul_i32 s11, s7, s11 +; GFX1030W32-NEXT: s_addc_u32 s12, s14, s15 +; GFX1030W32-NEXT: s_addc_u32 s13, s17, 0 +; GFX1030W32-NEXT: s_add_u32 s11, s12, s11 +; GFX1030W32-NEXT: s_addc_u32 s12, 0, s13 +; GFX1030W32-NEXT: s_add_u32 s8, s8, s11 +; GFX1030W32-NEXT: s_cselect_b32 s11, -1, 0 +; GFX1030W32-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX1030W32-NEXT: s_cmp_lg_u32 s11, 0 +; GFX1030W32-NEXT: s_mul_i32 s11, s9, s8 +; GFX1030W32-NEXT: s_addc_u32 s7, s7, s12 +; GFX1030W32-NEXT: s_mul_i32 s10, s10, s8 +; GFX1030W32-NEXT: s_mul_i32 s9, s9, s7 +; GFX1030W32-NEXT: s_mul_hi_u32 s12, s8, s11 +; GFX1030W32-NEXT: s_add_i32 s9, s13, s9 +; GFX1030W32-NEXT: s_mul_hi_u32 s13, s7, s11 +; GFX1030W32-NEXT: s_add_i32 s9, s9, s10 +; GFX1030W32-NEXT: s_mul_i32 s10, s7, s11 +; GFX1030W32-NEXT: s_mul_i32 s15, s8, s9 +; GFX1030W32-NEXT: s_mul_hi_u32 s14, s8, s9 +; GFX1030W32-NEXT: s_add_u32 s12, s12, s15 +; GFX1030W32-NEXT: s_addc_u32 s14, 0, s14 +; GFX1030W32-NEXT: s_mul_hi_u32 s11, s7, s9 +; GFX1030W32-NEXT: s_add_u32 s10, s12, s10 +; GFX1030W32-NEXT: s_mul_i32 s9, s7, s9 +; GFX1030W32-NEXT: s_addc_u32 s10, s14, s13 +; GFX1030W32-NEXT: s_addc_u32 s11, s11, 0 +; GFX1030W32-NEXT: s_add_u32 s9, s10, s9 +; GFX1030W32-NEXT: s_addc_u32 s10, 0, s11 +; GFX1030W32-NEXT: s_add_u32 s8, s8, s9 +; GFX1030W32-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1030W32-NEXT: s_mul_hi_u32 s11, s2, s8 +; GFX1030W32-NEXT: s_cmp_lg_u32 s9, 0 +; GFX1030W32-NEXT: s_mul_hi_u32 s9, s3, s8 +; GFX1030W32-NEXT: s_addc_u32 s7, s7, s10 +; GFX1030W32-NEXT: s_mul_i32 s8, s3, s8 +; GFX1030W32-NEXT: s_mul_i32 s12, s2, s7 +; GFX1030W32-NEXT: s_mul_hi_u32 s10, s2, s7 +; GFX1030W32-NEXT: s_add_u32 s11, s11, s12 +; GFX1030W32-NEXT: s_addc_u32 s10, 0, s10 +; GFX1030W32-NEXT: s_mul_hi_u32 s13, s3, s7 +; GFX1030W32-NEXT: s_add_u32 s8, s11, s8 +; GFX1030W32-NEXT: s_mul_i32 s7, s3, s7 +; GFX1030W32-NEXT: s_addc_u32 s8, s10, s9 +; GFX1030W32-NEXT: s_addc_u32 s9, s13, 0 +; GFX1030W32-NEXT: s_add_u32 s7, s8, s7 +; GFX1030W32-NEXT: s_addc_u32 s8, 0, s9 +; GFX1030W32-NEXT: s_mul_hi_u32 s9, s4, s7 +; GFX1030W32-NEXT: s_mul_i32 s10, s4, s8 +; GFX1030W32-NEXT: s_mul_i32 s11, s5, s7 +; GFX1030W32-NEXT: s_add_i32 s9, s9, s10 +; GFX1030W32-NEXT: s_mul_i32 s10, s4, s7 +; GFX1030W32-NEXT: s_add_i32 s9, s9, s11 +; GFX1030W32-NEXT: s_sub_i32 s11, s3, s9 +; GFX1030W32-NEXT: s_sub_u32 s10, s2, s10 ; GFX1030W32-NEXT: s_cselect_b32 s12, -1, 0 -; GFX1030W32-NEXT: s_cmp_eq_u32 s6, s3 -; GFX1030W32-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX1030W32-NEXT: s_add_u32 s6, s1, 1 -; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX1030W32-NEXT: s_addc_u32 s12, s5, 0 -; GFX1030W32-NEXT: s_add_u32 s13, s1, 2 -; GFX1030W32-NEXT: s_addc_u32 s14, s5, 0 -; GFX1030W32-NEXT: s_cmp_lg_u32 s7, 0 -; GFX1030W32-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0 -; GFX1030W32-NEXT: s_subb_u32 s0, s11, s0 -; GFX1030W32-NEXT: v_mov_b32_e32 v2, s13 -; GFX1030W32-NEXT: s_cmp_ge_u32 s0, s3 -; GFX1030W32-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo -; GFX1030W32-NEXT: s_cselect_b32 s7, -1, 0 -; GFX1030W32-NEXT: s_cmp_eq_u32 s0, s3 -; GFX1030W32-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 -; GFX1030W32-NEXT: s_cselect_b32 s0, -1, 0 -; GFX1030W32-NEXT: v_mov_b32_e32 v1, s14 -; GFX1030W32-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0 -; GFX1030W32-NEXT: v_cndmask_b32_e32 v2, s6, v2, vcc_lo -; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX1030W32-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc_lo -; GFX1030W32-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo -; GFX1030W32-NEXT: s_andn2_b32 vcc_lo, exec_lo, s4 +; GFX1030W32-NEXT: s_cmp_lg_u32 s12, 0 +; GFX1030W32-NEXT: s_subb_u32 s11, s11, s5 +; GFX1030W32-NEXT: s_sub_u32 s13, s10, s4 +; GFX1030W32-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1030W32-NEXT: s_cmp_lg_u32 s14, 0 +; GFX1030W32-NEXT: s_subb_u32 s11, s11, 0 +; GFX1030W32-NEXT: s_cmp_ge_u32 s11, s5 +; GFX1030W32-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1030W32-NEXT: s_cmp_ge_u32 s13, s4 +; GFX1030W32-NEXT: s_cselect_b32 s13, -1, 0 +; GFX1030W32-NEXT: s_cmp_eq_u32 s11, s5 +; GFX1030W32-NEXT: s_cselect_b32 s11, s13, s14 +; GFX1030W32-NEXT: s_add_u32 s13, s7, 1 +; GFX1030W32-NEXT: s_addc_u32 s14, s8, 0 +; GFX1030W32-NEXT: s_add_u32 s15, s7, 2 +; GFX1030W32-NEXT: s_addc_u32 s16, s8, 0 +; GFX1030W32-NEXT: s_cmp_lg_u32 s11, 0 +; GFX1030W32-NEXT: s_cselect_b32 s11, s15, s13 +; GFX1030W32-NEXT: s_cselect_b32 s13, s16, s14 +; GFX1030W32-NEXT: s_cmp_lg_u32 s12, 0 +; GFX1030W32-NEXT: s_subb_u32 s3, s3, s9 +; GFX1030W32-NEXT: s_cmp_ge_u32 s3, s5 +; GFX1030W32-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1030W32-NEXT: s_cmp_ge_u32 s10, s4 +; GFX1030W32-NEXT: s_cselect_b32 s10, -1, 0 +; GFX1030W32-NEXT: s_cmp_eq_u32 s3, s5 +; GFX1030W32-NEXT: s_cselect_b32 s3, s10, s9 +; GFX1030W32-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1030W32-NEXT: s_cselect_b32 s9, s13, s8 +; GFX1030W32-NEXT: s_cselect_b32 s8, s11, s7 +; GFX1030W32-NEXT: s_andn2_b32 vcc_lo, exec_lo, s6 ; GFX1030W32-NEXT: s_cbranch_vccnz .LBB16_3 ; GFX1030W32-NEXT: .LBB16_2: -; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1030W32-NEXT: s_sub_i32 s1, 0, s2 +; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX1030W32-NEXT: s_sub_i32 s5, 0, s4 +; GFX1030W32-NEXT: s_mov_b32 s9, 0 ; GFX1030W32-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX1030W32-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX1030W32-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1030W32-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1030W32-NEXT: s_mul_i32 s1, s1, s0 -; GFX1030W32-NEXT: s_mul_hi_u32 s1, s0, s1 -; GFX1030W32-NEXT: s_add_i32 s0, s0, s1 -; GFX1030W32-NEXT: s_mul_hi_u32 s0, s10, s0 -; GFX1030W32-NEXT: s_mul_i32 s1, s0, s2 -; GFX1030W32-NEXT: s_add_i32 s3, s0, 1 -; GFX1030W32-NEXT: s_sub_i32 s1, s10, s1 -; GFX1030W32-NEXT: s_sub_i32 s4, s1, s2 -; GFX1030W32-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1030W32-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1030W32-NEXT: s_cselect_b32 s1, s4, s1 -; GFX1030W32-NEXT: s_add_i32 s3, s0, 1 -; GFX1030W32-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1030W32-NEXT: s_mov_b32 s1, 0 -; GFX1030W32-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1030W32-NEXT: v_mov_b32_e32 v0, s0 -; GFX1030W32-NEXT: v_mov_b32_e32 v1, s1 +; GFX1030W32-NEXT: v_readfirstlane_b32 s3, v0 +; GFX1030W32-NEXT: s_mul_i32 s5, s5, s3 +; GFX1030W32-NEXT: s_mul_hi_u32 s5, s3, s5 +; GFX1030W32-NEXT: s_add_i32 s3, s3, s5 +; GFX1030W32-NEXT: s_mul_hi_u32 s3, s2, s3 +; GFX1030W32-NEXT: s_mul_i32 s5, s3, s4 +; GFX1030W32-NEXT: s_sub_i32 s2, s2, s5 +; GFX1030W32-NEXT: s_add_i32 s5, s3, 1 +; GFX1030W32-NEXT: s_sub_i32 s6, s2, s4 +; GFX1030W32-NEXT: s_cmp_ge_u32 s2, s4 +; GFX1030W32-NEXT: s_cselect_b32 s3, s5, s3 +; GFX1030W32-NEXT: s_cselect_b32 s2, s6, s2 +; GFX1030W32-NEXT: s_add_i32 s5, s3, 1 +; GFX1030W32-NEXT: s_cmp_ge_u32 s2, s4 +; GFX1030W32-NEXT: s_cselect_b32 s8, s5, s3 ; GFX1030W32-NEXT: .LBB16_3: +; GFX1030W32-NEXT: v_mov_b32_e32 v0, s8 ; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0 -; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX1030W32-NEXT: v_mov_b32_e32 v1, s9 +; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GFX1030W32-NEXT: s_endpgm ; GFX1030W32-NEXT: .LBB16_4: -; GFX1030W32-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1030W32-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GFX1030W32-NEXT: s_branch .LBB16_2 ; ; GFX1030W64-LABEL: sudiv64: ; GFX1030W64: ; %bb.0: ; GFX1030W64-NEXT: s_clause 0x1 -; GFX1030W64-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 -; GFX1030W64-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34 +; GFX1030W64-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GFX1030W64-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 ; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0) -; GFX1030W64-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3] -; GFX1030W64-NEXT: s_mov_b32 s0, 0 -; GFX1030W64-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1030W64-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] +; GFX1030W64-NEXT: s_mov_b32 s6, 0 +; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0 ; GFX1030W64-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX1030W64-NEXT: ; %bb.1: -; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX1030W64-NEXT: s_sub_u32 s5, 0, s2 -; GFX1030W64-NEXT: s_subb_u32 s6, 0, s3 +; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GFX1030W64-NEXT: s_sub_u32 s9, 0, s4 +; GFX1030W64-NEXT: s_subb_u32 s10, 0, s5 ; GFX1030W64-NEXT: v_fmamk_f32 v0, v1, 0x4f800000, v0 ; GFX1030W64-NEXT: v_rcp_f32_e32 v0, v0 ; GFX1030W64-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -2801,160 +2786,158 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1030W64-NEXT: v_fmamk_f32 v0, v1, 0xcf800000, v0 ; GFX1030W64-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX1030W64-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1030W64-NEXT: v_readfirstlane_b32 s4, v1 -; GFX1030W64-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1030W64-NEXT: s_mul_i32 s1, s5, s4 -; GFX1030W64-NEXT: s_mul_hi_u32 s12, s5, s0 -; GFX1030W64-NEXT: s_mul_i32 s7, s6, s0 -; GFX1030W64-NEXT: s_add_i32 s1, s12, s1 -; GFX1030W64-NEXT: s_mul_i32 s13, s5, s0 -; GFX1030W64-NEXT: s_add_i32 s1, s1, s7 -; GFX1030W64-NEXT: s_mul_hi_u32 s12, s0, s13 -; GFX1030W64-NEXT: s_mul_hi_u32 s14, s4, s13 -; GFX1030W64-NEXT: s_mul_i32 s7, s4, s13 -; GFX1030W64-NEXT: s_mul_hi_u32 s13, s0, s1 -; GFX1030W64-NEXT: s_mul_i32 s0, s0, s1 -; GFX1030W64-NEXT: s_mul_hi_u32 s15, s4, s1 -; GFX1030W64-NEXT: s_add_u32 s0, s12, s0 -; GFX1030W64-NEXT: s_addc_u32 s12, 0, s13 -; GFX1030W64-NEXT: s_add_u32 s0, s0, s7 -; GFX1030W64-NEXT: s_mul_i32 s1, s4, s1 -; GFX1030W64-NEXT: s_addc_u32 s0, s12, s14 -; GFX1030W64-NEXT: s_addc_u32 s7, s15, 0 -; GFX1030W64-NEXT: s_add_u32 s0, s0, s1 -; GFX1030W64-NEXT: s_addc_u32 s7, 0, s7 -; GFX1030W64-NEXT: v_add_co_u32 v0, s[0:1], v0, s0 -; GFX1030W64-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1030W64-NEXT: s_addc_u32 s4, s4, s7 -; GFX1030W64-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1030W64-NEXT: s_mul_i32 s1, s5, s4 -; GFX1030W64-NEXT: s_mul_hi_u32 s7, s5, s0 -; GFX1030W64-NEXT: s_mul_i32 s6, s6, s0 -; GFX1030W64-NEXT: s_add_i32 s1, s7, s1 -; GFX1030W64-NEXT: s_mul_i32 s5, s5, s0 -; GFX1030W64-NEXT: s_add_i32 s1, s1, s6 -; GFX1030W64-NEXT: s_mul_hi_u32 s7, s4, s5 -; GFX1030W64-NEXT: s_mul_i32 s12, s4, s5 -; GFX1030W64-NEXT: s_mul_hi_u32 s5, s0, s5 -; GFX1030W64-NEXT: s_mul_hi_u32 s13, s0, s1 -; GFX1030W64-NEXT: s_mul_i32 s0, s0, s1 -; GFX1030W64-NEXT: s_mul_hi_u32 s6, s4, s1 -; GFX1030W64-NEXT: s_add_u32 s0, s5, s0 -; GFX1030W64-NEXT: s_addc_u32 s5, 0, s13 -; GFX1030W64-NEXT: s_add_u32 s0, s0, s12 -; GFX1030W64-NEXT: s_mul_i32 s1, s4, s1 -; GFX1030W64-NEXT: s_addc_u32 s0, s5, s7 -; GFX1030W64-NEXT: s_addc_u32 s5, s6, 0 -; GFX1030W64-NEXT: s_add_u32 s0, s0, s1 -; GFX1030W64-NEXT: s_addc_u32 s5, 0, s5 -; GFX1030W64-NEXT: v_add_co_u32 v0, s[0:1], v0, s0 -; GFX1030W64-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1030W64-NEXT: s_addc_u32 s0, s4, s5 -; GFX1030W64-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1030W64-NEXT: s_mul_i32 s5, s10, s0 -; GFX1030W64-NEXT: s_mul_hi_u32 s4, s10, s0 -; GFX1030W64-NEXT: s_mul_hi_u32 s6, s11, s0 -; GFX1030W64-NEXT: s_mul_i32 s0, s11, s0 -; GFX1030W64-NEXT: s_mul_hi_u32 s7, s10, s1 -; GFX1030W64-NEXT: s_mul_hi_u32 s12, s11, s1 -; GFX1030W64-NEXT: s_mul_i32 s1, s11, s1 -; GFX1030W64-NEXT: s_add_u32 s5, s7, s5 -; GFX1030W64-NEXT: s_addc_u32 s4, 0, s4 -; GFX1030W64-NEXT: s_add_u32 s1, s5, s1 -; GFX1030W64-NEXT: s_addc_u32 s1, s4, s12 -; GFX1030W64-NEXT: s_addc_u32 s4, s6, 0 -; GFX1030W64-NEXT: s_add_u32 s6, s1, s0 -; GFX1030W64-NEXT: s_addc_u32 s7, 0, s4 -; GFX1030W64-NEXT: s_mul_hi_u32 s0, s2, s6 -; GFX1030W64-NEXT: s_mul_i32 s1, s2, s7 -; GFX1030W64-NEXT: s_mul_i32 s5, s2, s6 -; GFX1030W64-NEXT: s_add_i32 s12, s0, s1 -; GFX1030W64-NEXT: v_sub_co_u32 v0, s[0:1], s10, s5 -; GFX1030W64-NEXT: s_mul_i32 s4, s3, s6 -; GFX1030W64-NEXT: s_add_i32 s12, s12, s4 -; GFX1030W64-NEXT: v_sub_co_u32 v1, s[4:5], v0, s2 -; GFX1030W64-NEXT: s_sub_i32 s13, s11, s12 -; GFX1030W64-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1030W64-NEXT: s_subb_u32 s13, s13, s3 -; GFX1030W64-NEXT: s_cmp_lg_u64 s[4:5], 0 -; GFX1030W64-NEXT: v_cmp_le_u32_e32 vcc, s2, v1 -; GFX1030W64-NEXT: s_subb_u32 s4, s13, 0 -; GFX1030W64-NEXT: s_cmp_ge_u32 s4, s3 -; GFX1030W64-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc -; GFX1030W64-NEXT: s_cselect_b32 s5, -1, 0 -; GFX1030W64-NEXT: s_cmp_eq_u32 s4, s3 -; GFX1030W64-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX1030W64-NEXT: s_add_u32 s4, s6, 1 -; GFX1030W64-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc -; GFX1030W64-NEXT: s_addc_u32 s5, s7, 0 -; GFX1030W64-NEXT: s_add_u32 s13, s6, 2 -; GFX1030W64-NEXT: s_addc_u32 s14, s7, 0 -; GFX1030W64-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1030W64-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 -; GFX1030W64-NEXT: s_subb_u32 s0, s11, s12 -; GFX1030W64-NEXT: v_mov_b32_e32 v2, s13 -; GFX1030W64-NEXT: s_cmp_ge_u32 s0, s3 -; GFX1030W64-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; GFX1030W64-NEXT: s_cselect_b32 s11, -1, 0 -; GFX1030W64-NEXT: s_cmp_eq_u32 s0, s3 -; GFX1030W64-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; GFX1030W64-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX1030W64-NEXT: v_mov_b32_e32 v1, s14 -; GFX1030W64-NEXT: v_cndmask_b32_e64 v0, s11, v0, s[0:1] -; GFX1030W64-NEXT: v_cndmask_b32_e32 v2, s4, v2, vcc -; GFX1030W64-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc -; GFX1030W64-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX1030W64-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc -; GFX1030W64-NEXT: v_cndmask_b32_e32 v0, s6, v2, vcc +; GFX1030W64-NEXT: v_readfirstlane_b32 s8, v1 +; GFX1030W64-NEXT: v_readfirstlane_b32 s6, v0 +; GFX1030W64-NEXT: s_mul_i32 s7, s9, s8 +; GFX1030W64-NEXT: s_mul_hi_u32 s12, s9, s6 +; GFX1030W64-NEXT: s_mul_i32 s11, s10, s6 +; GFX1030W64-NEXT: s_add_i32 s7, s12, s7 +; GFX1030W64-NEXT: s_mul_i32 s13, s9, s6 +; GFX1030W64-NEXT: s_add_i32 s7, s7, s11 +; GFX1030W64-NEXT: s_mul_hi_u32 s12, s6, s13 +; GFX1030W64-NEXT: s_mul_i32 s15, s6, s7 +; GFX1030W64-NEXT: s_mul_hi_u32 s14, s8, s13 +; GFX1030W64-NEXT: s_mul_i32 s11, s8, s13 +; GFX1030W64-NEXT: s_mul_hi_u32 s13, s6, s7 +; GFX1030W64-NEXT: s_add_u32 s12, s12, s15 +; GFX1030W64-NEXT: s_addc_u32 s13, 0, s13 +; GFX1030W64-NEXT: s_mul_hi_u32 s16, s8, s7 +; GFX1030W64-NEXT: s_add_u32 s11, s12, s11 +; GFX1030W64-NEXT: s_mul_i32 s7, s8, s7 +; GFX1030W64-NEXT: s_addc_u32 s11, s13, s14 +; GFX1030W64-NEXT: s_addc_u32 s12, s16, 0 +; GFX1030W64-NEXT: s_add_u32 s7, s11, s7 +; GFX1030W64-NEXT: s_addc_u32 s11, 0, s12 +; GFX1030W64-NEXT: s_add_u32 s12, s6, s7 +; GFX1030W64-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX1030W64-NEXT: s_mul_hi_u32 s13, s9, s12 +; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX1030W64-NEXT: s_mul_i32 s6, s9, s12 +; GFX1030W64-NEXT: s_addc_u32 s8, s8, s11 +; GFX1030W64-NEXT: s_mul_i32 s10, s10, s12 +; GFX1030W64-NEXT: s_mul_i32 s9, s9, s8 +; GFX1030W64-NEXT: s_mul_hi_u32 s7, s12, s6 +; GFX1030W64-NEXT: s_add_i32 s9, s13, s9 +; GFX1030W64-NEXT: s_mul_hi_u32 s11, s8, s6 +; GFX1030W64-NEXT: s_add_i32 s9, s9, s10 +; GFX1030W64-NEXT: s_mul_i32 s6, s8, s6 +; GFX1030W64-NEXT: s_mul_i32 s14, s12, s9 +; GFX1030W64-NEXT: s_mul_hi_u32 s13, s12, s9 +; GFX1030W64-NEXT: s_add_u32 s7, s7, s14 +; GFX1030W64-NEXT: s_addc_u32 s13, 0, s13 +; GFX1030W64-NEXT: s_mul_hi_u32 s10, s8, s9 +; GFX1030W64-NEXT: s_add_u32 s6, s7, s6 +; GFX1030W64-NEXT: s_mul_i32 s9, s8, s9 +; GFX1030W64-NEXT: s_addc_u32 s6, s13, s11 +; GFX1030W64-NEXT: s_addc_u32 s7, s10, 0 +; GFX1030W64-NEXT: s_add_u32 s6, s6, s9 +; GFX1030W64-NEXT: s_addc_u32 s9, 0, s7 +; GFX1030W64-NEXT: s_add_u32 s10, s12, s6 +; GFX1030W64-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX1030W64-NEXT: s_mul_hi_u32 s11, s2, s10 +; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX1030W64-NEXT: s_mul_hi_u32 s6, s3, s10 +; GFX1030W64-NEXT: s_addc_u32 s7, s8, s9 +; GFX1030W64-NEXT: s_mul_i32 s8, s3, s10 +; GFX1030W64-NEXT: s_mul_i32 s10, s2, s7 +; GFX1030W64-NEXT: s_mul_hi_u32 s9, s2, s7 +; GFX1030W64-NEXT: s_add_u32 s10, s11, s10 +; GFX1030W64-NEXT: s_addc_u32 s9, 0, s9 +; GFX1030W64-NEXT: s_mul_hi_u32 s12, s3, s7 +; GFX1030W64-NEXT: s_add_u32 s8, s10, s8 +; GFX1030W64-NEXT: s_mul_i32 s7, s3, s7 +; GFX1030W64-NEXT: s_addc_u32 s6, s9, s6 +; GFX1030W64-NEXT: s_addc_u32 s8, s12, 0 +; GFX1030W64-NEXT: s_add_u32 s10, s6, s7 +; GFX1030W64-NEXT: s_addc_u32 s11, 0, s8 +; GFX1030W64-NEXT: s_mul_hi_u32 s6, s4, s10 +; GFX1030W64-NEXT: s_mul_i32 s7, s4, s11 +; GFX1030W64-NEXT: s_mul_i32 s8, s5, s10 +; GFX1030W64-NEXT: s_add_i32 s6, s6, s7 +; GFX1030W64-NEXT: s_add_i32 s12, s6, s8 +; GFX1030W64-NEXT: s_mul_i32 s6, s4, s10 +; GFX1030W64-NEXT: s_sub_i32 s8, s3, s12 +; GFX1030W64-NEXT: s_sub_u32 s13, s2, s6 +; GFX1030W64-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX1030W64-NEXT: s_subb_u32 s14, s8, s5 +; GFX1030W64-NEXT: s_sub_u32 s15, s13, s4 +; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX1030W64-NEXT: s_subb_u32 s8, s14, 0 +; GFX1030W64-NEXT: s_cmp_ge_u32 s8, s5 +; GFX1030W64-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1030W64-NEXT: s_cmp_ge_u32 s15, s4 +; GFX1030W64-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1030W64-NEXT: s_cmp_eq_u32 s8, s5 +; GFX1030W64-NEXT: s_cselect_b32 s8, s14, s9 +; GFX1030W64-NEXT: s_add_u32 s9, s10, 1 +; GFX1030W64-NEXT: s_addc_u32 s14, s11, 0 +; GFX1030W64-NEXT: s_add_u32 s15, s10, 2 +; GFX1030W64-NEXT: s_addc_u32 s16, s11, 0 +; GFX1030W64-NEXT: s_cmp_lg_u32 s8, 0 +; GFX1030W64-NEXT: s_cselect_b32 s15, s15, s9 +; GFX1030W64-NEXT: s_cselect_b32 s14, s16, s14 +; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX1030W64-NEXT: s_subb_u32 s3, s3, s12 +; GFX1030W64-NEXT: s_cmp_ge_u32 s3, s5 +; GFX1030W64-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1030W64-NEXT: s_cmp_ge_u32 s13, s4 +; GFX1030W64-NEXT: s_cselect_b32 s7, -1, 0 +; GFX1030W64-NEXT: s_cmp_eq_u32 s3, s5 +; GFX1030W64-NEXT: s_cselect_b32 s3, s7, s6 +; GFX1030W64-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1030W64-NEXT: s_cselect_b32 s7, s14, s11 +; GFX1030W64-NEXT: s_cselect_b32 s6, s15, s10 ; GFX1030W64-NEXT: s_cbranch_execnz .LBB16_3 ; GFX1030W64-NEXT: .LBB16_2: -; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1030W64-NEXT: s_sub_i32 s1, 0, s2 +; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX1030W64-NEXT: s_sub_i32 s5, 0, s4 +; GFX1030W64-NEXT: s_mov_b32 s7, 0 ; GFX1030W64-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX1030W64-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX1030W64-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1030W64-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1030W64-NEXT: s_mul_i32 s1, s1, s0 -; GFX1030W64-NEXT: s_mul_hi_u32 s1, s0, s1 -; GFX1030W64-NEXT: s_add_i32 s0, s0, s1 -; GFX1030W64-NEXT: s_mul_hi_u32 s0, s10, s0 -; GFX1030W64-NEXT: s_mul_i32 s1, s0, s2 -; GFX1030W64-NEXT: s_add_i32 s3, s0, 1 -; GFX1030W64-NEXT: s_sub_i32 s1, s10, s1 -; GFX1030W64-NEXT: s_sub_i32 s4, s1, s2 -; GFX1030W64-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1030W64-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1030W64-NEXT: s_cselect_b32 s1, s4, s1 -; GFX1030W64-NEXT: s_add_i32 s3, s0, 1 -; GFX1030W64-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1030W64-NEXT: s_mov_b32 s1, 0 -; GFX1030W64-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1030W64-NEXT: v_mov_b32_e32 v0, s0 -; GFX1030W64-NEXT: v_mov_b32_e32 v1, s1 +; GFX1030W64-NEXT: v_readfirstlane_b32 s3, v0 +; GFX1030W64-NEXT: s_mul_i32 s5, s5, s3 +; GFX1030W64-NEXT: s_mul_hi_u32 s5, s3, s5 +; GFX1030W64-NEXT: s_add_i32 s3, s3, s5 +; GFX1030W64-NEXT: s_mul_hi_u32 s3, s2, s3 +; GFX1030W64-NEXT: s_mul_i32 s5, s3, s4 +; GFX1030W64-NEXT: s_sub_i32 s2, s2, s5 +; GFX1030W64-NEXT: s_add_i32 s5, s3, 1 +; GFX1030W64-NEXT: s_sub_i32 s6, s2, s4 +; GFX1030W64-NEXT: s_cmp_ge_u32 s2, s4 +; GFX1030W64-NEXT: s_cselect_b32 s3, s5, s3 +; GFX1030W64-NEXT: s_cselect_b32 s2, s6, s2 +; GFX1030W64-NEXT: s_add_i32 s5, s3, 1 +; GFX1030W64-NEXT: s_cmp_ge_u32 s2, s4 +; GFX1030W64-NEXT: s_cselect_b32 s6, s5, s3 ; GFX1030W64-NEXT: .LBB16_3: +; GFX1030W64-NEXT: v_mov_b32_e32 v0, s6 ; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0 -; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GFX1030W64-NEXT: v_mov_b32_e32 v1, s7 +; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GFX1030W64-NEXT: s_endpgm ; GFX1030W64-NEXT: .LBB16_4: -; GFX1030W64-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1030W64-NEXT: ; implicit-def: $sgpr6_sgpr7 ; GFX1030W64-NEXT: s_branch .LBB16_2 ; ; GFX11-LABEL: sudiv64: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_clause 0x1 -; GFX11-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 -; GFX11-NEXT: s_load_b64 s[2:3], s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_or_b64 s[4:5], s[10:11], s[2:3] -; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] +; GFX11-NEXT: s_mov_b32 s6, 0 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX11-NEXT: s_cmp_lg_u64 s[6:7], 0 ; GFX11-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX11-NEXT: ; %bb.1: -; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX11-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX11-NEXT: s_sub_u32 s5, 0, s2 -; GFX11-NEXT: s_subb_u32 s6, 0, s3 +; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX11-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GFX11-NEXT: s_sub_u32 s9, 0, s4 +; GFX11-NEXT: s_subb_u32 s10, 0, s5 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_fmamk_f32 v0, v1, 0x4f800000, v0 ; GFX11-NEXT: v_rcp_f32_e32 v0, v0 @@ -2968,310 +2951,308 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX11-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_readfirstlane_b32 s0, v1 -; GFX11-NEXT: v_readfirstlane_b32 s1, v0 -; GFX11-NEXT: s_mul_i32 s7, s5, s0 -; GFX11-NEXT: s_mul_hi_u32 s13, s5, s1 -; GFX11-NEXT: s_mul_i32 s12, s6, s1 -; GFX11-NEXT: s_add_i32 s7, s13, s7 -; GFX11-NEXT: s_mul_i32 s14, s5, s1 -; GFX11-NEXT: s_add_i32 s7, s7, s12 -; GFX11-NEXT: s_mul_hi_u32 s13, s1, s14 -; GFX11-NEXT: s_mul_hi_u32 s15, s0, s14 -; GFX11-NEXT: s_mul_i32 s12, s0, s14 -; GFX11-NEXT: s_mul_hi_u32 s14, s1, s7 -; GFX11-NEXT: s_mul_i32 s1, s1, s7 -; GFX11-NEXT: s_mul_hi_u32 s16, s0, s7 -; GFX11-NEXT: s_add_u32 s1, s13, s1 -; GFX11-NEXT: s_addc_u32 s13, 0, s14 -; GFX11-NEXT: s_add_u32 s1, s1, s12 -; GFX11-NEXT: s_mul_i32 s7, s0, s7 -; GFX11-NEXT: s_addc_u32 s1, s13, s15 -; GFX11-NEXT: s_addc_u32 s12, s16, 0 -; GFX11-NEXT: s_add_u32 s1, s1, s7 -; GFX11-NEXT: s_addc_u32 s7, 0, s12 -; GFX11-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX11-NEXT: s_cmp_lg_u32 s1, 0 -; GFX11-NEXT: s_addc_u32 s0, s0, s7 -; GFX11-NEXT: v_readfirstlane_b32 s1, v0 -; GFX11-NEXT: s_mul_i32 s7, s5, s0 -; GFX11-NEXT: s_mul_hi_u32 s12, s5, s1 -; GFX11-NEXT: s_mul_i32 s6, s6, s1 -; GFX11-NEXT: s_add_i32 s7, s12, s7 -; GFX11-NEXT: s_mul_i32 s5, s5, s1 -; GFX11-NEXT: s_add_i32 s7, s7, s6 -; GFX11-NEXT: s_mul_hi_u32 s12, s0, s5 -; GFX11-NEXT: s_mul_i32 s13, s0, s5 -; GFX11-NEXT: s_mul_hi_u32 s5, s1, s5 -; GFX11-NEXT: s_mul_hi_u32 s14, s1, s7 -; GFX11-NEXT: s_mul_i32 s1, s1, s7 -; GFX11-NEXT: s_mul_hi_u32 s6, s0, s7 -; GFX11-NEXT: s_add_u32 s1, s5, s1 -; GFX11-NEXT: s_addc_u32 s5, 0, s14 -; GFX11-NEXT: s_add_u32 s1, s1, s13 -; GFX11-NEXT: s_mul_i32 s7, s0, s7 -; GFX11-NEXT: s_addc_u32 s1, s5, s12 -; GFX11-NEXT: s_addc_u32 s5, s6, 0 -; GFX11-NEXT: s_add_u32 s1, s1, s7 -; GFX11-NEXT: s_addc_u32 s5, 0, s5 -; GFX11-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX11-NEXT: s_cmp_lg_u32 s1, 0 -; GFX11-NEXT: s_addc_u32 s0, s0, s5 -; GFX11-NEXT: v_readfirstlane_b32 s1, v0 -; GFX11-NEXT: s_mul_i32 s6, s10, s0 -; GFX11-NEXT: s_mul_hi_u32 s5, s10, s0 -; GFX11-NEXT: s_mul_hi_u32 s7, s11, s0 -; GFX11-NEXT: s_mul_i32 s0, s11, s0 -; GFX11-NEXT: s_mul_hi_u32 s12, s10, s1 -; GFX11-NEXT: s_mul_hi_u32 s13, s11, s1 -; GFX11-NEXT: s_mul_i32 s1, s11, s1 -; GFX11-NEXT: s_add_u32 s6, s12, s6 -; GFX11-NEXT: s_addc_u32 s5, 0, s5 -; GFX11-NEXT: s_add_u32 s1, s6, s1 -; GFX11-NEXT: s_addc_u32 s1, s5, s13 -; GFX11-NEXT: s_addc_u32 s5, s7, 0 -; GFX11-NEXT: s_add_u32 s1, s1, s0 -; GFX11-NEXT: s_addc_u32 s5, 0, s5 -; GFX11-NEXT: s_mul_hi_u32 s0, s2, s1 -; GFX11-NEXT: s_mul_i32 s7, s2, s5 -; GFX11-NEXT: s_mul_i32 s12, s2, s1 -; GFX11-NEXT: s_add_i32 s0, s0, s7 -; GFX11-NEXT: v_sub_co_u32 v0, s7, s10, s12 -; GFX11-NEXT: s_mul_i32 s6, s3, s1 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: s_add_i32 s0, s0, s6 -; GFX11-NEXT: v_sub_co_u32 v1, s12, v0, s2 -; GFX11-NEXT: s_sub_i32 s6, s11, s0 -; GFX11-NEXT: s_cmp_lg_u32 s7, 0 -; GFX11-NEXT: s_subb_u32 s6, s6, s3 +; GFX11-NEXT: v_readfirstlane_b32 s7, v1 +; GFX11-NEXT: v_readfirstlane_b32 s8, v0 +; GFX11-NEXT: s_mul_i32 s11, s9, s7 +; GFX11-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX11-NEXT: s_mul_i32 s12, s10, s8 +; GFX11-NEXT: s_add_i32 s11, s13, s11 +; GFX11-NEXT: s_mul_i32 s14, s9, s8 +; GFX11-NEXT: s_add_i32 s11, s11, s12 +; GFX11-NEXT: s_mul_hi_u32 s13, s8, s14 +; GFX11-NEXT: s_mul_i32 s16, s8, s11 +; GFX11-NEXT: s_mul_hi_u32 s15, s7, s14 +; GFX11-NEXT: s_mul_i32 s12, s7, s14 +; GFX11-NEXT: s_mul_hi_u32 s14, s8, s11 +; GFX11-NEXT: s_add_u32 s13, s13, s16 +; GFX11-NEXT: s_addc_u32 s14, 0, s14 +; GFX11-NEXT: s_mul_hi_u32 s17, s7, s11 +; GFX11-NEXT: s_add_u32 s12, s13, s12 +; GFX11-NEXT: s_mul_i32 s11, s7, s11 +; GFX11-NEXT: s_addc_u32 s12, s14, s15 +; GFX11-NEXT: s_addc_u32 s13, s17, 0 +; GFX11-NEXT: s_add_u32 s11, s12, s11 +; GFX11-NEXT: s_addc_u32 s12, 0, s13 +; GFX11-NEXT: s_add_u32 s8, s8, s11 +; GFX11-NEXT: s_cselect_b32 s11, -1, 0 +; GFX11-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX11-NEXT: s_cmp_lg_u32 s11, 0 +; GFX11-NEXT: s_mul_i32 s11, s9, s8 +; GFX11-NEXT: s_addc_u32 s7, s7, s12 +; GFX11-NEXT: s_mul_i32 s10, s10, s8 +; GFX11-NEXT: s_mul_i32 s9, s9, s7 +; GFX11-NEXT: s_mul_hi_u32 s12, s8, s11 +; GFX11-NEXT: s_add_i32 s9, s13, s9 +; GFX11-NEXT: s_mul_hi_u32 s13, s7, s11 +; GFX11-NEXT: s_add_i32 s9, s9, s10 +; GFX11-NEXT: s_mul_i32 s10, s7, s11 +; GFX11-NEXT: s_mul_i32 s15, s8, s9 +; GFX11-NEXT: s_mul_hi_u32 s14, s8, s9 +; GFX11-NEXT: s_add_u32 s12, s12, s15 +; GFX11-NEXT: s_addc_u32 s14, 0, s14 +; GFX11-NEXT: s_mul_hi_u32 s11, s7, s9 +; GFX11-NEXT: s_add_u32 s10, s12, s10 +; GFX11-NEXT: s_mul_i32 s9, s7, s9 +; GFX11-NEXT: s_addc_u32 s10, s14, s13 +; GFX11-NEXT: s_addc_u32 s11, s11, 0 +; GFX11-NEXT: s_add_u32 s9, s10, s9 +; GFX11-NEXT: s_addc_u32 s10, 0, s11 +; GFX11-NEXT: s_add_u32 s8, s8, s9 +; GFX11-NEXT: s_cselect_b32 s9, -1, 0 +; GFX11-NEXT: s_mul_hi_u32 s11, s2, s8 +; GFX11-NEXT: s_cmp_lg_u32 s9, 0 +; GFX11-NEXT: s_mul_hi_u32 s9, s3, s8 +; GFX11-NEXT: s_addc_u32 s7, s7, s10 +; GFX11-NEXT: s_mul_i32 s8, s3, s8 +; GFX11-NEXT: s_mul_i32 s12, s2, s7 +; GFX11-NEXT: s_mul_hi_u32 s10, s2, s7 +; GFX11-NEXT: s_add_u32 s11, s11, s12 +; GFX11-NEXT: s_addc_u32 s10, 0, s10 +; GFX11-NEXT: s_mul_hi_u32 s13, s3, s7 +; GFX11-NEXT: s_add_u32 s8, s11, s8 +; GFX11-NEXT: s_mul_i32 s7, s3, s7 +; GFX11-NEXT: s_addc_u32 s8, s10, s9 +; GFX11-NEXT: s_addc_u32 s9, s13, 0 +; GFX11-NEXT: s_add_u32 s7, s8, s7 +; GFX11-NEXT: s_addc_u32 s8, 0, s9 +; GFX11-NEXT: s_mul_hi_u32 s9, s4, s7 +; GFX11-NEXT: s_mul_i32 s10, s4, s8 +; GFX11-NEXT: s_mul_i32 s11, s5, s7 +; GFX11-NEXT: s_add_i32 s9, s9, s10 +; GFX11-NEXT: s_mul_i32 s10, s4, s7 +; GFX11-NEXT: s_add_i32 s9, s9, s11 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_sub_i32 s11, s3, s9 +; GFX11-NEXT: s_sub_u32 s10, s2, s10 +; GFX11-NEXT: s_cselect_b32 s12, -1, 0 ; GFX11-NEXT: s_cmp_lg_u32 s12, 0 -; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1 -; GFX11-NEXT: s_subb_u32 s6, s6, 0 +; GFX11-NEXT: s_subb_u32 s11, s11, s5 +; GFX11-NEXT: s_sub_u32 s13, s10, s4 +; GFX11-NEXT: s_cselect_b32 s14, -1, 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_cmp_lg_u32 s14, 0 +; GFX11-NEXT: s_subb_u32 s11, s11, 0 +; GFX11-NEXT: s_cmp_ge_u32 s11, s5 +; GFX11-NEXT: s_cselect_b32 s14, -1, 0 +; GFX11-NEXT: s_cmp_ge_u32 s13, s4 +; GFX11-NEXT: s_cselect_b32 s13, -1, 0 +; GFX11-NEXT: s_cmp_eq_u32 s11, s5 +; GFX11-NEXT: s_cselect_b32 s11, s13, s14 +; GFX11-NEXT: s_add_u32 s13, s7, 1 +; GFX11-NEXT: s_addc_u32 s14, s8, 0 +; GFX11-NEXT: s_add_u32 s15, s7, 2 +; GFX11-NEXT: s_addc_u32 s16, s8, 0 +; GFX11-NEXT: s_cmp_lg_u32 s11, 0 +; GFX11-NEXT: s_cselect_b32 s11, s15, s13 +; GFX11-NEXT: s_cselect_b32 s13, s16, s14 +; GFX11-NEXT: s_cmp_lg_u32 s12, 0 +; GFX11-NEXT: s_subb_u32 s3, s3, s9 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: s_cmp_ge_u32 s6, s3 -; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo -; GFX11-NEXT: s_cselect_b32 s12, -1, 0 -; GFX11-NEXT: s_cmp_eq_u32 s6, s3 -; GFX11-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX11-NEXT: s_add_u32 s6, s1, 1 -; GFX11-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX11-NEXT: s_addc_u32 s12, s5, 0 -; GFX11-NEXT: s_add_u32 s13, s1, 2 -; GFX11-NEXT: s_addc_u32 s14, s5, 0 -; GFX11-NEXT: s_cmp_lg_u32 s7, 0 -; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0 -; GFX11-NEXT: s_subb_u32 s0, s11, s0 -; GFX11-NEXT: v_mov_b32_e32 v2, s13 -; GFX11-NEXT: s_cmp_ge_u32 s0, s3 -; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo -; GFX11-NEXT: s_cselect_b32 s7, -1, 0 -; GFX11-NEXT: s_cmp_eq_u32 s0, s3 -; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 -; GFX11-NEXT: s_cselect_b32 s0, -1, 0 -; GFX11-NEXT: v_mov_b32_e32 v1, s14 -; GFX11-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0 -; GFX11-NEXT: v_cndmask_b32_e32 v2, s6, v2, vcc_lo -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc_lo -; GFX11-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo -; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4 +; GFX11-NEXT: s_cmp_ge_u32 s3, s5 +; GFX11-NEXT: s_cselect_b32 s9, -1, 0 +; GFX11-NEXT: s_cmp_ge_u32 s10, s4 +; GFX11-NEXT: s_cselect_b32 s10, -1, 0 +; GFX11-NEXT: s_cmp_eq_u32 s3, s5 +; GFX11-NEXT: s_cselect_b32 s3, s10, s9 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_cmp_lg_u32 s3, 0 +; GFX11-NEXT: s_cselect_b32 s9, s13, s8 +; GFX11-NEXT: s_cselect_b32 s8, s11, s7 +; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6 ; GFX11-NEXT: s_cbranch_vccnz .LBB16_3 ; GFX11-NEXT: .LBB16_2: -; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX11-NEXT: s_sub_i32 s1, 0, s2 +; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX11-NEXT: s_sub_i32 s5, 0, s4 +; GFX11-NEXT: s_mov_b32 s9, 0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX11-NEXT: s_waitcnt_depctr 0xfff ; GFX11-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_readfirstlane_b32 s0, v0 -; GFX11-NEXT: s_mul_i32 s1, s1, s0 -; GFX11-NEXT: s_mul_hi_u32 s1, s0, s1 +; GFX11-NEXT: v_readfirstlane_b32 s3, v0 +; GFX11-NEXT: s_mul_i32 s5, s5, s3 +; GFX11-NEXT: s_mul_hi_u32 s5, s3, s5 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_add_i32 s0, s0, s1 -; GFX11-NEXT: s_mul_hi_u32 s0, s10, s0 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_mul_i32 s1, s0, s2 -; GFX11-NEXT: s_add_i32 s3, s0, 1 -; GFX11-NEXT: s_sub_i32 s1, s10, s1 -; GFX11-NEXT: s_sub_i32 s4, s1, s2 -; GFX11-NEXT: s_cmp_ge_u32 s1, s2 -; GFX11-NEXT: s_cselect_b32 s0, s3, s0 -; GFX11-NEXT: s_cselect_b32 s1, s4, s1 -; GFX11-NEXT: s_add_i32 s3, s0, 1 -; GFX11-NEXT: s_cmp_ge_u32 s1, s2 -; GFX11-NEXT: s_mov_b32 s1, 0 -; GFX11-NEXT: s_cselect_b32 s0, s3, s0 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT: s_add_i32 s3, s3, s5 +; GFX11-NEXT: s_mul_hi_u32 s3, s2, s3 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_mul_i32 s5, s3, s4 +; GFX11-NEXT: s_sub_i32 s2, s2, s5 +; GFX11-NEXT: s_add_i32 s5, s3, 1 +; GFX11-NEXT: s_sub_i32 s6, s2, s4 +; GFX11-NEXT: s_cmp_ge_u32 s2, s4 +; GFX11-NEXT: s_cselect_b32 s3, s5, s3 +; GFX11-NEXT: s_cselect_b32 s2, s6, s2 +; GFX11-NEXT: s_add_i32 s5, s3, 1 +; GFX11-NEXT: s_cmp_ge_u32 s2, s4 +; GFX11-NEXT: s_cselect_b32 s8, s5, s3 ; GFX11-NEXT: .LBB16_3: -; GFX11-NEXT: v_mov_b32_e32 v2, 0 -; GFX11-NEXT: global_store_b64 v2, v[0:1], s[8:9] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_mov_b32_e32 v0, s8 +; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s9 +; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX11-NEXT: s_endpgm ; GFX11-NEXT: .LBB16_4: -; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX11-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GFX11-NEXT: s_branch .LBB16_2 ; ; GFX1250-LABEL: sudiv64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 -; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3] +; GFX1250-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_b64 s[0:1], s[0:1], lit64(0xffffffff00000000) -; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-NEXT: s_and_b64 s[6:7], s[6:7], 0xffffffff00000000 +; GFX1250-NEXT: s_cmp_lg_u64 s[6:7], 0 ; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_cvt_f32_u32 s0, s2 -; GFX1250-NEXT: s_cvt_f32_u32 s1, s3 -; GFX1250-NEXT: s_sub_nc_u64 s[6:7], 0, s[2:3] +; GFX1250-NEXT: s_cvt_f32_u32 s6, s4 +; GFX1250-NEXT: s_cvt_f32_u32 s7, s5 +; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[4:5] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_fmac_f32 s0, s1, 0x4f800000 -; GFX1250-NEXT: v_s_rcp_f32 s0, s0 +; GFX1250-NEXT: s_fmac_f32 s6, s7, 0x4f800000 +; GFX1250-NEXT: v_s_rcp_f32 s6, s6 ; GFX1250-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_mul_f32 s0, s0, 0x5f7ffffc -; GFX1250-NEXT: s_mul_f32 s1, s0, 0x2f800000 +; GFX1250-NEXT: s_mul_f32 s6, s6, 0x5f7ffffc +; GFX1250-NEXT: s_mul_f32 s7, s6, 0x2f800000 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_trunc_f32 s1, s1 -; GFX1250-NEXT: s_fmac_f32 s0, s1, 0xcf800000 -; GFX1250-NEXT: s_cvt_u32_f32 s5, s1 -; GFX1250-NEXT: s_mov_b32 s1, 0 +; GFX1250-NEXT: s_trunc_f32 s7, s7 +; GFX1250-NEXT: s_fmac_f32 s6, s7, 0xcf800000 +; GFX1250-NEXT: s_cvt_u32_f32 s9, s7 +; GFX1250-NEXT: s_mov_b32 s7, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_cvt_u32_f32 s4, s0 -; GFX1250-NEXT: s_mul_u64 s[12:13], s[6:7], s[4:5] +; GFX1250-NEXT: s_cvt_u32_f32 s8, s6 +; GFX1250-NEXT: s_mul_u64 s[12:13], s[10:11], s[8:9] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_hi_u32 s15, s4, s13 -; GFX1250-NEXT: s_mul_i32 s14, s4, s13 -; GFX1250-NEXT: s_mul_hi_u32 s0, s4, s12 -; GFX1250-NEXT: s_mul_i32 s17, s5, s12 -; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[0:1], s[14:15] -; GFX1250-NEXT: s_mul_hi_u32 s16, s5, s12 -; GFX1250-NEXT: s_mul_hi_u32 s18, s5, s13 -; GFX1250-NEXT: s_add_co_u32 s0, s14, s17 -; GFX1250-NEXT: s_add_co_ci_u32 s0, s15, s16 -; GFX1250-NEXT: s_mul_i32 s12, s5, s13 +; GFX1250-NEXT: s_mul_hi_u32 s15, s8, s13 +; GFX1250-NEXT: s_mul_i32 s14, s8, s13 +; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s12 +; GFX1250-NEXT: s_mul_i32 s17, s9, s12 +; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[6:7], s[14:15] +; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s12 +; GFX1250-NEXT: s_mul_hi_u32 s18, s9, s13 +; GFX1250-NEXT: s_add_co_u32 s6, s14, s17 +; GFX1250-NEXT: s_add_co_ci_u32 s6, s15, s16 +; GFX1250-NEXT: s_mul_i32 s12, s9, s13 ; GFX1250-NEXT: s_add_co_ci_u32 s13, s18, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[0:1], s[12:13] -; GFX1250-NEXT: v_add_co_u32 v0, s0, s4, s12 -; GFX1250-NEXT: s_cmp_lg_u32 s0, 0 -; GFX1250-NEXT: s_add_co_ci_u32 s5, s5, s13 -; GFX1250-NEXT: v_readfirstlane_b32 s4, v0 -; GFX1250-NEXT: s_mul_u64 s[6:7], s[6:7], s[4:5] +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13] +; GFX1250-NEXT: s_add_co_u32 s8, s8, s12 +; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: s_cmp_lg_u32 s6, 0 +; GFX1250-NEXT: s_add_co_ci_u32 s9, s9, s13 +; GFX1250-NEXT: s_mul_u64 s[10:11], s[10:11], s[8:9] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_hi_u32 s13, s4, s7 -; GFX1250-NEXT: s_mul_i32 s12, s4, s7 -; GFX1250-NEXT: s_mul_hi_u32 s0, s4, s6 -; GFX1250-NEXT: s_mul_i32 s15, s5, s6 -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[0:1], s[12:13] -; GFX1250-NEXT: s_mul_hi_u32 s14, s5, s6 -; GFX1250-NEXT: s_mul_hi_u32 s4, s5, s7 -; GFX1250-NEXT: s_add_co_u32 s0, s12, s15 -; GFX1250-NEXT: s_add_co_ci_u32 s0, s13, s14 -; GFX1250-NEXT: s_mul_i32 s6, s5, s7 -; GFX1250-NEXT: s_add_co_ci_u32 s7, s4, 0 +; GFX1250-NEXT: s_mul_hi_u32 s13, s8, s11 +; GFX1250-NEXT: s_mul_i32 s12, s8, s11 +; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s10 +; GFX1250-NEXT: s_mul_i32 s15, s9, s10 +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13] +; GFX1250-NEXT: s_mul_hi_u32 s14, s9, s10 +; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s11 +; GFX1250-NEXT: s_add_co_u32 s6, s12, s15 +; GFX1250-NEXT: s_add_co_ci_u32 s6, s13, s14 +; GFX1250-NEXT: s_mul_i32 s10, s9, s11 +; GFX1250-NEXT: s_add_co_ci_u32 s11, s16, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[6:7], s[0:1], s[6:7] -; GFX1250-NEXT: v_add_co_u32 v0, s0, v0, s6 -; GFX1250-NEXT: s_cmp_lg_u32 s0, 0 -; GFX1250-NEXT: s_add_co_ci_u32 s0, s5, s7 -; GFX1250-NEXT: v_readfirstlane_b32 s7, v0 -; GFX1250-NEXT: s_mul_hi_u32 s5, s10, s0 -; GFX1250-NEXT: s_mul_i32 s4, s10, s0 -; GFX1250-NEXT: s_mul_hi_u32 s12, s11, s0 -; GFX1250-NEXT: s_mul_i32 s6, s11, s0 -; GFX1250-NEXT: s_mul_hi_u32 s0, s10, s7 -; GFX1250-NEXT: s_mul_i32 s13, s11, s7 -; GFX1250-NEXT: s_add_nc_u64 s[4:5], s[0:1], s[4:5] -; GFX1250-NEXT: s_mul_hi_u32 s0, s11, s7 -; GFX1250-NEXT: s_add_co_u32 s4, s4, s13 -; GFX1250-NEXT: s_add_co_ci_u32 s0, s5, s0 -; GFX1250-NEXT: s_add_co_ci_u32 s7, s12, 0 +; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[6:7], s[10:11] +; GFX1250-NEXT: s_add_co_u32 s8, s8, s10 +; GFX1250-NEXT: s_cselect_b32 s10, -1, 0 +; GFX1250-NEXT: s_mul_hi_u32 s6, s2, s8 +; GFX1250-NEXT: s_cmp_lg_u32 s10, 0 +; GFX1250-NEXT: s_mul_hi_u32 s12, s3, s8 +; GFX1250-NEXT: s_add_co_ci_u32 s10, s9, s11 +; GFX1250-NEXT: s_mul_i32 s11, s3, s8 +; GFX1250-NEXT: s_mul_hi_u32 s9, s2, s10 +; GFX1250-NEXT: s_mul_i32 s8, s2, s10 +; GFX1250-NEXT: s_mul_hi_u32 s13, s3, s10 +; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[8:9] +; GFX1250-NEXT: s_mul_i32 s10, s3, s10 +; GFX1250-NEXT: s_add_co_u32 s6, s8, s11 +; GFX1250-NEXT: s_add_co_ci_u32 s6, s9, s12 +; GFX1250-NEXT: s_add_co_ci_u32 s11, s13, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[4:5], s[0:1], s[6:7] -; GFX1250-NEXT: s_and_b64 s[6:7], s[4:5], lit64(0xffffffff00000000) +; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[10:11] +; GFX1250-NEXT: s_and_b64 s[10:11], s[8:9], 0xffffffff00000000 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_or_b32 s6, s6, s4 -; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[6:7] -; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[6:7], 2 -; GFX1250-NEXT: v_sub_co_u32 v0, s0, s10, s4 -; GFX1250-NEXT: s_sub_co_i32 s4, s11, s5 -; GFX1250-NEXT: s_cmp_lg_u32 s0, 0 -; GFX1250-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15 -; GFX1250-NEXT: v_sub_co_u32 v1, s12, v0, s2 -; GFX1250-NEXT: s_sub_co_ci_u32 s4, s4, s3 -; GFX1250-NEXT: s_cmp_lg_u32 s12, 0 -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], 1 -; GFX1250-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1 -; GFX1250-NEXT: s_sub_co_ci_u32 s4, s4, 0 +; GFX1250-NEXT: s_or_b32 s10, s10, s8 +; GFX1250-NEXT: s_mul_u64 s[8:9], s[4:5], s[10:11] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_cmp_ge_u32 s4, s3 -; GFX1250-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX1250-NEXT: s_sub_co_u32 s6, s2, s8 +; GFX1250-NEXT: s_cselect_b32 s8, -1, 0 +; GFX1250-NEXT: s_sub_co_i32 s12, s3, s9 +; GFX1250-NEXT: s_cmp_lg_u32 s8, 0 +; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s5 +; GFX1250-NEXT: s_sub_co_u32 s13, s6, s4 ; GFX1250-NEXT: s_cselect_b32 s14, -1, 0 -; GFX1250-NEXT: s_cmp_eq_u32 s4, s3 -; GFX1250-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX1250-NEXT: s_cmp_lg_u32 s0, 0 -; GFX1250-NEXT: v_cndmask_b32_e32 v1, s14, v1, vcc_lo -; GFX1250-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0 -; GFX1250-NEXT: s_sub_co_ci_u32 s0, s11, s5 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: s_cmp_lg_u32 s14, 0 +; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, 0 +; GFX1250-NEXT: s_cmp_ge_u32 s12, s5 +; GFX1250-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1250-NEXT: s_cmp_ge_u32 s13, s4 +; GFX1250-NEXT: s_cselect_b32 s15, -1, 0 +; GFX1250-NEXT: s_cmp_eq_u32 s12, s5 +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[10:11], 1 +; GFX1250-NEXT: s_cselect_b32 s16, s15, s14 +; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[10:11], 2 +; GFX1250-NEXT: s_cmp_lg_u32 s16, 0 +; GFX1250-NEXT: s_cselect_b32 s12, s14, s12 +; GFX1250-NEXT: s_cselect_b32 s13, s15, s13 +; GFX1250-NEXT: s_cmp_lg_u32 s8, 0 +; GFX1250-NEXT: s_sub_co_ci_u32 s3, s3, s9 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_cmp_ge_u32 s3, s5 +; GFX1250-NEXT: s_cselect_b32 s8, -1, 0 +; GFX1250-NEXT: s_cmp_ge_u32 s6, s4 +; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1250-NEXT: s_cmp_eq_u32 s3, s5 +; GFX1250-NEXT: s_cselect_b32 s3, s6, s8 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_cmp_ge_u32 s0, s3 -; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo -; GFX1250-NEXT: s_cselect_b32 s4, -1, 0 -; GFX1250-NEXT: s_cmp_eq_u32 s0, s3 -; GFX1250-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 -; GFX1250-NEXT: s_cselect_b32 s0, -1, 0 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_cndmask_b32_e64 v0, s4, v0, s0 -; GFX1250-NEXT: v_cndmask_b32_e32 v2, s12, v2, vcc_lo -; GFX1250-NEXT: v_cndmask_b32_e32 v1, s13, v3, vcc_lo -; GFX1250-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1250-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo -; GFX1250-NEXT: v_cndmask_b32_e32 v0, s6, v2, vcc_lo +; GFX1250-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1250-NEXT: s_cselect_b32 s9, s13, s11 +; GFX1250-NEXT: s_cselect_b32 s8, s12, s10 ; GFX1250-NEXT: s_cbranch_execnz .LBB16_3 ; GFX1250-NEXT: .LBB16_2: -; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GFX1250-NEXT: s_sub_co_i32 s1, 0, s2 +; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GFX1250-NEXT: s_sub_co_i32 s5, 0, s4 +; GFX1250-NEXT: s_mov_b32 s9, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1) ; GFX1250-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX1250-NEXT: v_nop ; GFX1250-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1250-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1250-NEXT: s_mul_i32 s1, s1, s0 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v0 +; GFX1250-NEXT: s_mul_i32 s5, s5, s3 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_hi_u32 s1, s0, s1 -; GFX1250-NEXT: s_add_co_i32 s0, s0, s1 +; GFX1250-NEXT: s_mul_hi_u32 s5, s3, s5 +; GFX1250-NEXT: s_add_co_i32 s3, s3, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_hi_u32 s0, s10, s0 -; GFX1250-NEXT: s_mul_i32 s1, s0, s2 -; GFX1250-NEXT: s_add_co_i32 s3, s0, 1 -; GFX1250-NEXT: s_sub_co_i32 s1, s10, s1 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_sub_co_i32 s4, s1, s2 -; GFX1250-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1250-NEXT: s_cselect_b32 s0, s3, s0 -; GFX1250-NEXT: s_cselect_b32 s1, s4, s1 -; GFX1250-NEXT: s_add_co_i32 s3, s0, 1 -; GFX1250-NEXT: s_cmp_ge_u32 s1, s2 -; GFX1250-NEXT: s_mov_b32 s1, 0 -; GFX1250-NEXT: s_cselect_b32 s0, s3, s0 +; GFX1250-NEXT: s_mul_hi_u32 s3, s2, s3 +; GFX1250-NEXT: s_mul_i32 s5, s3, s4 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: s_sub_co_i32 s2, s2, s5 +; GFX1250-NEXT: s_add_co_i32 s5, s3, 1 +; GFX1250-NEXT: s_sub_co_i32 s6, s2, s4 +; GFX1250-NEXT: s_cmp_ge_u32 s2, s4 +; GFX1250-NEXT: s_cselect_b32 s3, s5, s3 +; GFX1250-NEXT: s_cselect_b32 s2, s6, s2 +; GFX1250-NEXT: s_add_co_i32 s5, s3, 1 +; GFX1250-NEXT: s_cmp_ge_u32 s2, s4 +; GFX1250-NEXT: s_cselect_b32 s8, s5, s3 ; GFX1250-NEXT: .LBB16_3: +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9] ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; GFX1250-NEXT: .LBB16_4: -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GFX1250-NEXT: s_branch .LBB16_2 %result = udiv i64 %x, %y store i64 %result, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll index a92b99aa1e2c1..1c5f8c84e447d 100644 --- a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll +++ b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll @@ -173,11 +173,12 @@ ret: ; GCN-LABEL: {{^}}sink_ubfe_i64_span_midpoint: ; GCN: s_cbranch_scc{{[0-1]}} .LBB3_2 -; GCN: v_alignbit_b32 v[[LO:[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}, 30 -; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7f, v[[LO]] +; GCN: s_lshr_b64 s[[[LO:[0-9]+]]:[[HI:[0-9]+]]], s[[[LO2:[0-9]+]]:[[HI2:[0-9]+]]], 30 +; GCN: s_and_b32 s{{[0-9]+}}, s[[LO]], 0x7f ; GCN: .LBB3_3: -; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]] +; GCN: s_lshr_b64 s[[[LO3:[0-9]+]]:[[HI3:[0-9]+]]], s[[[LO4:[0-9]+]]:[[HI4:[0-9]+]]], 30 +; GCN: s_and_b32 s{{[0-9]+}}, s[[LO3]], 0xff ; GCN: buffer_store_dwordx2 define amdgpu_kernel void @sink_ubfe_i64_span_midpoint(ptr addrspace(1) %out, i64 %arg1, i1 %arg) #0 { diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll b/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll index f9fae025e0bf8..79b44d6a92caa 100644 --- a/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll +++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate.ll @@ -686,7 +686,7 @@ define double @v_mul_f64_vop2_literal_64(double %x) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf] ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf] -; GFX1250-NEXT: v_mul_f64_e32 v[0:1], lit64(0x405ec66666666666), v[0:1] ; encoding: [0xfe,0x00,0x00,0x0c,0x66,0x66,0x66,0x66,0x66,0xc6,0x5e,0x40] +; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 0x405ec66666666666, v[0:1] ; encoding: [0xfe,0x00,0x00,0x0c,0x66,0x66,0x66,0x66,0x66,0xc6,0x5e,0x40] ; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe] %mul = fmul double %x, 123.1 ret double %mul @@ -788,7 +788,7 @@ define i64 @v_add_u64_vop2_literal_64(i64 %x) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf] ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf] -; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0x112345678), v[0:1] ; encoding: [0xfe,0x00,0x00,0x50,0x78,0x56,0x34,0x12,0x01,0x00,0x00,0x00] +; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 0x112345678, v[0:1] ; encoding: [0xfe,0x00,0x00,0x50,0x78,0x56,0x34,0x12,0x01,0x00,0x00,0x00] ; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe] %add = add i64 %x, 4600387192 ret i64 %add diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll index 5134159e3e406..0fc54aeaef77b 100644 --- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll +++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll @@ -619,43 +619,43 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: s_mov_b64 s[8:9], 0 ; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7 ; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15 -; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f -; GISEL-NEXT: v_mov_b32_e32 v11, 0 +; GISEL-NEXT: v_mov_b32_e32 v16, 0x7f +; GISEL-NEXT: v_mov_b32_e32 v17, 0 ; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4 ; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5 ; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6 ; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7 ; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12 ; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13 -; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14 -; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15 +; GISEL-NEXT: v_xor_b32_e32 v12, v19, v14 +; GISEL-NEXT: v_xor_b32_e32 v13, v19, v15 ; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18 ; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc ; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], v4, v19 ; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], v5, v19, s[4:5] -; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc -; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc -; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5] -; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc -; GISEL-NEXT: v_ffbh_u32_e32 v14, v21 -; GISEL-NEXT: v_ffbh_u32_e32 v15, v20 -; GISEL-NEXT: v_ffbh_u32_e32 v16, v7 -; GISEL-NEXT: v_ffbh_u32_e32 v17, v6 +; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v2, v18, vcc +; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v18, vcc +; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v12, v19, s[4:5] +; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v13, v19, vcc +; GISEL-NEXT: v_ffbh_u32_e32 v12, v21 +; GISEL-NEXT: v_ffbh_u32_e32 v13, v20 +; GISEL-NEXT: v_ffbh_u32_e32 v14, v7 +; GISEL-NEXT: v_ffbh_u32_e32 v15, v6 ; GISEL-NEXT: v_or_b32_e32 v0, v20, v4 ; GISEL-NEXT: v_or_b32_e32 v1, v21, v5 -; GISEL-NEXT: v_or_b32_e32 v2, v6, v12 -; GISEL-NEXT: v_or_b32_e32 v3, v7, v13 -; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15 +; GISEL-NEXT: v_or_b32_e32 v2, v6, v10 +; GISEL-NEXT: v_or_b32_e32 v3, v7, v11 +; GISEL-NEXT: v_add_i32_e32 v13, vcc, 32, v13 ; GISEL-NEXT: v_ffbh_u32_e32 v26, v5 ; GISEL-NEXT: v_ffbh_u32_e32 v27, v4 -; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17 -; GISEL-NEXT: v_ffbh_u32_e32 v28, v13 -; GISEL-NEXT: v_ffbh_u32_e32 v29, v12 +; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15 +; GISEL-NEXT: v_ffbh_u32_e32 v28, v11 +; GISEL-NEXT: v_ffbh_u32_e32 v29, v10 ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3] -; GISEL-NEXT: v_min_u32_e32 v0, v14, v15 +; GISEL-NEXT: v_min_u32_e32 v0, v12, v13 ; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27 -; GISEL-NEXT: v_min_u32_e32 v2, v16, v17 +; GISEL-NEXT: v_min_u32_e32 v2, v14, v15 ; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29 ; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0 ; GISEL-NEXT: v_min_u32_e32 v1, v26, v1 @@ -665,32 +665,32 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5] ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] ; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13] +; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] ; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc ; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v0, v1 ; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, vcc ; GISEL-NEXT: v_subb_u32_e64 v0, s[4:5], 0, 0, s[4:5] ; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5] -; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11] +; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[16:17] ; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc -; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v2 +; GISEL-NEXT: v_xor_b32_e32 v12, 0x7f, v2 ; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc -; GISEL-NEXT: v_or_b32_e32 v10, v10, v0 -; GISEL-NEXT: v_or_b32_e32 v11, v3, v1 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v0 +; GISEL-NEXT: v_or_b32_e32 v13, v3, v1 ; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc -; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] -; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc -; GISEL-NEXT: v_or_b32_e32 v11, v14, v15 -; GISEL-NEXT: v_and_b32_e32 v14, 1, v11 -; GISEL-NEXT: v_or_b32_e32 v10, v11, v10 +; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13] +; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc +; GISEL-NEXT: v_or_b32_e32 v13, v14, v15 +; GISEL-NEXT: v_and_b32_e32 v14, 1, v13 +; GISEL-NEXT: v_or_b32_e32 v12, v13, v12 ; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc -; GISEL-NEXT: v_and_b32_e32 v16, 1, v10 +; GISEL-NEXT: v_and_b32_e32 v16, 1, v12 ; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc -; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc +; GISEL-NEXT: v_cndmask_b32_e64 v12, v10, 0, vcc +; GISEL-NEXT: v_cndmask_b32_e64 v13, v11, 0, vcc ; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 ; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1 ; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5] @@ -703,22 +703,22 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v0, s[4:5] ; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v1, vcc ; GISEL-NEXT: v_add_i32_e64 v14, s[4:5], v30, v2 -; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30 +; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], 64, v30 ; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30 -; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30 +; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], v30 ; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1 -; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10 +; GISEL-NEXT: v_lshr_b64 v[12:13], v[6:7], v12 ; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14 ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30 ; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc -; GISEL-NEXT: v_or_b32_e32 v0, v10, v2 -; GISEL-NEXT: v_or_b32_e32 v1, v11, v3 +; GISEL-NEXT: v_or_b32_e32 v0, v12, v2 +; GISEL-NEXT: v_or_b32_e32 v1, v13, v3 ; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30 -; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v12, v0, v10, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v13, v1, v11, vcc ; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9] ; GISEL-NEXT: v_mov_b32_e32 v0, s8 ; GISEL-NEXT: v_mov_b32_e32 v1, s9 @@ -730,26 +730,26 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: ; %bb.8: ; %udiv-preheader ; GISEL-NEXT: v_add_i32_e32 v32, vcc, 0xffffffc0, v26 ; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26 -; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26 +; GISEL-NEXT: v_lshr_b64 v[0:1], v[10:11], v26 ; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26 ; GISEL-NEXT: s_mov_b64 s[4:5], 0 ; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v20 ; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v21, vcc -; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16 -; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32 +; GISEL-NEXT: v_lshl_b64 v[16:17], v[10:11], v16 +; GISEL-NEXT: v_lshr_b64 v[10:11], v[10:11], v32 ; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc ; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc ; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5] ; GISEL-NEXT: v_or_b32_e32 v2, v2, v16 ; GISEL-NEXT: v_or_b32_e32 v3, v3, v17 ; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26 -; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc ; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc ; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26 -; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc -; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v10, v2, v6, vcc +; GISEL-NEXT: v_cndmask_b32_e32 v11, v3, v7, vcc ; GISEL-NEXT: v_mov_b32_e32 v7, 0 ; GISEL-NEXT: v_mov_b32_e32 v0, s4 ; GISEL-NEXT: v_mov_b32_e32 v1, s5 @@ -757,20 +757,20 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_mov_b32_e32 v3, s7 ; GISEL-NEXT: .LBB0_9: ; %udiv-do-while ; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1 +; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], 1 ; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1 -; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13 -; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11 -; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1 -; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v11 +; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v13 +; GISEL-NEXT: v_lshl_b64 v[10:11], v[14:15], 1 +; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 ; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15 ; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26 ; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc ; GISEL-NEXT: v_or_b32_e32 v16, v16, v6 ; GISEL-NEXT: v_or_b32_e32 v2, v2, v34 -; GISEL-NEXT: v_or_b32_e32 v10, v10, v14 -; GISEL-NEXT: v_or_b32_e32 v14, v0, v12 -; GISEL-NEXT: v_or_b32_e32 v15, v1, v13 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v14 +; GISEL-NEXT: v_or_b32_e32 v14, v0, v10 +; GISEL-NEXT: v_or_b32_e32 v15, v1, v11 ; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc ; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc ; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2 @@ -783,14 +783,14 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6 ; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GISEL-NEXT: v_and_b32_e32 v6, 1, v0 -; GISEL-NEXT: v_and_b32_e32 v12, v0, v20 -; GISEL-NEXT: v_and_b32_e32 v13, v0, v21 +; GISEL-NEXT: v_and_b32_e32 v10, v0, v20 +; GISEL-NEXT: v_and_b32_e32 v11, v0, v21 ; GISEL-NEXT: v_and_b32_e32 v34, v0, v4 ; GISEL-NEXT: v_and_b32_e32 v35, v0, v5 ; GISEL-NEXT: v_mov_b32_e32 v0, v6 ; GISEL-NEXT: v_mov_b32_e32 v1, v7 -; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12 -; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc +; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v2, v10 +; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v11, vcc ; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc ; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc ; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5] @@ -800,9 +800,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: .LBB0_11: ; %Flow11 ; GISEL-NEXT: s_or_b64 exec, exec, s[8:9] ; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1 -; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1 +; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1 ; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15 -; GISEL-NEXT: v_or_b32_e32 v10, v10, v4 +; GISEL-NEXT: v_or_b32_e32 v12, v12, v4 ; GISEL-NEXT: v_or_b32_e32 v14, v0, v2 ; GISEL-NEXT: v_or_b32_e32 v15, v1, v3 ; GISEL-NEXT: .LBB0_12: ; %Flow12 @@ -815,8 +815,8 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) { ; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3 ; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7 ; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7 -; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7 -; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7 +; GISEL-NEXT: v_xor_b32_e32 v8, v12, v7 +; GISEL-NEXT: v_xor_b32_e32 v9, v13, v7 ; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3 ; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc ; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7 diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll index f82bb59eb7906..be60a00145c8a 100644 --- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll +++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll @@ -1012,7 +1012,7 @@ define amdgpu_kernel void @store_constant_adjacent_offsets() { ; ; GFX1250-LABEL: store_constant_adjacent_offsets: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], lit64(0x7b0000007b) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 0x7b0000007b ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: ds_store_b64 v2, v[0:1] ; GFX1250-NEXT: s_endpgm @@ -1350,7 +1350,7 @@ define amdgpu_kernel void @write2_v2i32_align1_odd_offset() { ; ; GFX1250-LABEL: write2_v2i32_align1_odd_offset: ; GFX1250: ; %bb.0: ; %entry -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], lit64(0x1c80000007b) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 0x1c80000007b ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: ds_store_b64 v2, v[0:1] offset:65 ; GFX1250-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/dynamic-vgpr-reserve-stack-for-cwsr.ll b/llvm/test/CodeGen/AMDGPU/dynamic-vgpr-reserve-stack-for-cwsr.ll index ac30297770807..bcccf50e3805c 100644 --- a/llvm/test/CodeGen/AMDGPU/dynamic-vgpr-reserve-stack-for-cwsr.ll +++ b/llvm/test/CodeGen/AMDGPU/dynamic-vgpr-reserve-stack-for-cwsr.ll @@ -7,7 +7,7 @@ define amdgpu_cs void @amdgpu_cs() #0 { ; CHECK-LABEL: amdgpu_cs: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; CHECK-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -19,7 +19,7 @@ define amdgpu_cs void @amdgpu_cs() #0 { define amdgpu_kernel void @kernel() #0 { ; CHECK-LABEL: kernel: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; CHECK-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -31,7 +31,7 @@ define amdgpu_kernel void @kernel() #0 { define amdgpu_cs void @with_local() #0 { ; CHECK-TRUE16-LABEL: with_local: ; CHECK-TRUE16: ; %bb.0: -; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-TRUE16-NEXT: v_mov_b16_e32 v0.l, 13 ; CHECK-TRUE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-TRUE16-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -42,7 +42,7 @@ define amdgpu_cs void @with_local() #0 { ; ; CHECK-FAKE16-LABEL: with_local: ; CHECK-FAKE16: ; %bb.0: -; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-FAKE16-NEXT: v_mov_b32_e32 v0, 13 ; CHECK-FAKE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-FAKE16-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -60,7 +60,7 @@ define amdgpu_cs void @with_local() #0 { define amdgpu_cs void @with_calls_inline_const() #0 { ; CHECK-TRUE16-LABEL: with_calls_inline_const: ; CHECK-TRUE16: ; %bb.0: -; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-TRUE16-NEXT: v_mov_b16_e32 v0.l, 15 ; CHECK-TRUE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-TRUE16-NEXT: s_mov_b32 s1, callee@abs32@hi @@ -76,7 +76,7 @@ define amdgpu_cs void @with_calls_inline_const() #0 { ; ; CHECK-FAKE16-LABEL: with_calls_inline_const: ; CHECK-FAKE16: ; %bb.0: -; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-FAKE16-NEXT: v_mov_b32_e32 v0, 15 ; CHECK-FAKE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-FAKE16-NEXT: s_mov_b32 s1, callee@abs32@hi @@ -100,7 +100,7 @@ define amdgpu_cs void @with_calls_inline_const() #0 { define amdgpu_cs void @with_calls_no_inline_const() #0 { ; CHECK-TRUE16-LABEL: with_calls_no_inline_const: ; CHECK-TRUE16: ; %bb.0: -; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-TRUE16-NEXT: v_mov_b16_e32 v0.l, 15 ; CHECK-TRUE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-TRUE16-NEXT: s_mov_b32 s1, callee@abs32@hi @@ -117,7 +117,7 @@ define amdgpu_cs void @with_calls_no_inline_const() #0 { ; ; CHECK-FAKE16-LABEL: with_calls_no_inline_const: ; CHECK-FAKE16: ; %bb.0: -; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-FAKE16-NEXT: v_mov_b32_e32 v0, 15 ; CHECK-FAKE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-FAKE16-NEXT: s_mov_b32 s1, callee@abs32@hi @@ -140,7 +140,7 @@ define amdgpu_cs void @with_calls_no_inline_const() #0 { define amdgpu_cs void @with_spills() #0 { ; CHECK-LABEL: with_spills: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; CHECK-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -153,7 +153,7 @@ define amdgpu_cs void @with_spills() #0 { define amdgpu_cs void @realign_stack(<32 x i32> %x) #0 { ; CHECK-LABEL: realign_stack: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-NEXT: v_mov_b32_e32 v32, 0 ; CHECK-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-NEXT: s_mov_b32 s1, callee@abs32@hi @@ -187,7 +187,7 @@ define amdgpu_cs void @realign_stack(<32 x i32> %x) #0 { define amdgpu_cs void @frame_pointer_none() #1 { ; CHECK-TRUE16-LABEL: frame_pointer_none: ; CHECK-TRUE16: ; %bb.0: -; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-TRUE16-NEXT: v_mov_b16_e32 v0.l, 13 ; CHECK-TRUE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-TRUE16-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -198,7 +198,7 @@ define amdgpu_cs void @frame_pointer_none() #1 { ; ; CHECK-FAKE16-LABEL: frame_pointer_none: ; CHECK-FAKE16: ; %bb.0: -; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-FAKE16-NEXT: v_mov_b32_e32 v0, 13 ; CHECK-FAKE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-FAKE16-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -214,7 +214,7 @@ define amdgpu_cs void @frame_pointer_none() #1 { define amdgpu_cs void @frame_pointer_all() #2 { ; CHECK-TRUE16-LABEL: frame_pointer_all: ; CHECK-TRUE16: ; %bb.0: -; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-TRUE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-TRUE16-NEXT: v_mov_b16_e32 v0.l, 13 ; CHECK-TRUE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-TRUE16-NEXT: s_cmovk_i32 s33, 0x1c0 @@ -225,7 +225,7 @@ define amdgpu_cs void @frame_pointer_all() #2 { ; ; CHECK-FAKE16-LABEL: frame_pointer_all: ; CHECK-FAKE16: ; %bb.0: -; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_HW_ID2, 8, 2) +; CHECK-FAKE16-NEXT: s_getreg_b32 s33, hwreg(HW_REG_WAVE_HW_ID2, 8, 2) ; CHECK-FAKE16-NEXT: v_mov_b32_e32 v0, 13 ; CHECK-FAKE16-NEXT: s_cmp_lg_u32 0, s33 ; CHECK-FAKE16-NEXT: s_cmovk_i32 s33, 0x1c0 diff --git a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll index e6f02295e67d5..d8a5e7fa3b029 100644 --- a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll +++ b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll @@ -11,17 +11,17 @@ define i32 @s_add_co_select_user() { ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: s_load_dword s6, s[4:5], 0x0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: v_add_i32_e64 v0, s[4:5], s6, s6 +; GFX7-NEXT: s_add_u32 s7, s6, s6 +; GFX7-NEXT: s_cselect_b64 s[4:5], -1, 0 ; GFX7-NEXT: s_or_b32 s4, s4, s5 ; GFX7-NEXT: s_cmp_lg_u32 s4, 0 -; GFX7-NEXT: s_addc_u32 s7, s6, 0 +; GFX7-NEXT: s_addc_u32 s8, s6, 0 ; GFX7-NEXT: s_cselect_b64 s[4:5], -1, 0 ; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX7-NEXT: s_cselect_b32 s4, s7, 0 +; GFX7-NEXT: s_cselect_b32 s4, s8, 0 ; GFX7-NEXT: s_cmp_gt_u32 s6, 31 -; GFX7-NEXT: v_mov_b32_e32 v1, s4 -; GFX7-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX7-NEXT: s_cselect_b32 s4, s7, s4 +; GFX7-NEXT: v_mov_b32_e32 v0, s4 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: s_add_co_select_user: @@ -30,16 +30,16 @@ define i32 @s_add_co_select_user() { ; GFX9-NEXT: s_mov_b64 s[4:5], 0 ; GFX9-NEXT: s_load_dword s6, s[4:5], 0x0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_add_co_u32_e64 v0, s[4:5], s6, s6 +; GFX9-NEXT: s_add_u32 s7, s6, s6 +; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 ; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 -; GFX9-NEXT: s_addc_u32 s7, s6, 0 +; GFX9-NEXT: s_addc_u32 s8, s6, 0 ; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0 ; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX9-NEXT: s_cselect_b32 s4, s7, 0 +; GFX9-NEXT: s_cselect_b32 s4, s8, 0 ; GFX9-NEXT: s_cmp_gt_u32 s6, 31 -; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_cselect_b32 s4, s7, s4 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: s_add_co_select_user: @@ -48,15 +48,16 @@ define i32 @s_add_co_select_user() { ; GFX10-NEXT: s_mov_b64 s[4:5], 0 ; GFX10-NEXT: s_load_dword s4, s[4:5], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: v_add_co_u32 v0, s5, s4, s4 -; GFX10-NEXT: s_cmp_lg_u32 s5, 0 -; GFX10-NEXT: s_addc_u32 s5, s4, 0 +; GFX10-NEXT: s_add_u32 s5, s4, s4 ; GFX10-NEXT: s_cselect_b32 s6, -1, 0 -; GFX10-NEXT: s_and_b32 s6, s6, exec_lo -; GFX10-NEXT: s_cselect_b32 s5, s5, 0 +; GFX10-NEXT: s_cmp_lg_u32 s6, 0 +; GFX10-NEXT: s_addc_u32 s6, s4, 0 +; GFX10-NEXT: s_cselect_b32 s7, -1, 0 +; GFX10-NEXT: s_and_b32 s7, s7, exec_lo +; GFX10-NEXT: s_cselect_b32 s6, s6, 0 ; GFX10-NEXT: s_cmp_gt_u32 s4, 31 -; GFX10-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, s5, v0, vcc_lo +; GFX10-NEXT: s_cselect_b32 s4, s5, s6 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-LABEL: s_add_co_select_user: @@ -65,16 +66,18 @@ define i32 @s_add_co_select_user() { ; GFX11-NEXT: s_mov_b64 s[0:1], 0 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: v_add_co_u32 v0, s1, s0, s0 -; GFX11-NEXT: s_cmp_lg_u32 s1, 0 -; GFX11-NEXT: s_addc_u32 s1, s0, 0 +; GFX11-NEXT: s_add_u32 s1, s0, s0 ; GFX11-NEXT: s_cselect_b32 s2, -1, 0 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX11-NEXT: s_and_b32 s2, s2, exec_lo -; GFX11-NEXT: s_cselect_b32 s1, s1, 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_cmp_lg_u32 s2, 0 +; GFX11-NEXT: s_addc_u32 s2, s0, 0 +; GFX11-NEXT: s_cselect_b32 s3, -1, 0 +; GFX11-NEXT: s_and_b32 s3, s3, exec_lo +; GFX11-NEXT: s_cselect_b32 s2, s2, 0 ; GFX11-NEXT: s_cmp_gt_u32 s0, 31 -; GFX11-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX11-NEXT: v_cndmask_b32_e32 v0, s1, v0, vcc_lo +; GFX11-NEXT: s_cselect_b32 s0, s1, s2 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_mov_b32_e32 v0, s0 ; GFX11-NEXT: s_setpc_b64 s[30:31] bb: %i = load volatile i32, ptr addrspace(4) null, align 8 @@ -98,14 +101,13 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) { ; GFX7-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; GFX7-NEXT: s_mov_b32 flat_scratch_lo, s13 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_add_i32 s0, s2, s2 -; GFX7-NEXT: s_cmp_lt_u32 s0, s2 +; GFX7-NEXT: s_add_u32 s0, s2, s2 ; GFX7-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX7-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GFX7-NEXT: s_or_b32 s0, s0, s1 ; GFX7-NEXT: s_cmp_lg_u32 s0, 0 ; GFX7-NEXT: s_addc_u32 s0, s2, 0 -; GFX7-NEXT: v_cmp_ge_u32_e32 vcc, s0, v0 +; GFX7-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX7-NEXT: s_andn2_b64 vcc, exec, s[0:1] ; GFX7-NEXT: s_cbranch_vccnz .LBB1_2 ; GFX7-NEXT: ; %bb.1: ; %bb0 ; GFX7-NEXT: v_mov_b32_e32 v0, 0 @@ -125,13 +127,12 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) { ; GFX9: ; %bb.0: ; %bb ; GFX9-NEXT: s_load_dword s2, s[8:9], 0x0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_add_i32 s0, s2, s2 -; GFX9-NEXT: s_cmp_lt_u32 s0, s2 +; GFX9-NEXT: s_add_u32 s0, s2, s2 ; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GFX9-NEXT: s_addc_u32 s0, s2, 0 -; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, s0, v0 +; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GFX9-NEXT: s_andn2_b64 vcc, exec, s[0:1] ; GFX9-NEXT: s_cbranch_vccnz .LBB1_2 ; GFX9-NEXT: ; %bb.1: ; %bb0 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 @@ -151,13 +152,12 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) { ; GFX10: ; %bb.0: ; %bb ; GFX10-NEXT: s_load_dword s0, s[8:9], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_add_i32 s1, s0, s0 -; GFX10-NEXT: s_cmp_lt_u32 s1, s0 +; GFX10-NEXT: s_add_u32 s1, s0, s0 ; GFX10-NEXT: s_cselect_b32 s1, -1, 0 -; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1 ; GFX10-NEXT: s_cmp_lg_u32 s1, 0 ; GFX10-NEXT: s_addc_u32 s0, s0, 0 -; GFX10-NEXT: v_cmp_ge_u32_e32 vcc_lo, s0, v0 +; GFX10-NEXT: s_cselect_b32 s0, -1, 0 +; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0 ; GFX10-NEXT: s_cbranch_vccnz .LBB1_2 ; GFX10-NEXT: ; %bb.1: ; %bb0 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 @@ -177,15 +177,13 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) { ; GFX11: ; %bb.0: ; %bb ; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_add_i32 s1, s0, s0 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_cmp_lt_u32 s1, s0 +; GFX11-NEXT: s_add_u32 s1, s0, s0 ; GFX11-NEXT: s_cselect_b32 s1, -1, 0 -; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: s_cmp_lg_u32 s1, 0 ; GFX11-NEXT: s_addc_u32 s0, s0, 0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_cmp_ge_u32_e32 vcc_lo, s0, v0 +; GFX11-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0 ; GFX11-NEXT: s_cbranch_vccnz .LBB1_2 ; GFX11-NEXT: ; %bb.1: ; %bb0 ; GFX11-NEXT: v_mov_b32_e32 v0, 0 diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll new file mode 100644 index 0000000000000..d747fb7cce7dc --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll @@ -0,0 +1,1347 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250 %s + +declare bfloat @llvm.fabs.bf16(bfloat) #0 +declare bfloat @llvm.canonicalize.bf16(bfloat) #0 +declare <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat>) #0 +declare <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat>) #0 +declare <3 x bfloat> @llvm.canonicalize.v3bf16(<3 x bfloat>) #0 +declare <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat>) #0 +declare <6 x bfloat> @llvm.canonicalize.v6bf16(<6 x bfloat>) #0 +declare <8 x bfloat> @llvm.canonicalize.v8bf16(<8 x bfloat>) #0 +declare <12 x bfloat> @llvm.canonicalize.v12bf16(<12 x bfloat>) #0 +declare <16 x bfloat> @llvm.canonicalize.v16bf16(<16 x bfloat>) #0 +declare <32 x bfloat> @llvm.canonicalize.v32bf16(<32 x bfloat>) #0 +declare <64 x bfloat> @llvm.canonicalize.v64bf16(<64 x bfloat>) #0 +declare i32 @llvm.amdgcn.workitem.id.x() #0 + +define amdgpu_kernel void @test_fold_canonicalize_undef_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_undef_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat undef) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_canonicalize_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v0, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: global_store_b16 v[0:1], v0, off +; GFX1250-NEXT: s_endpgm + %val = load bfloat, ptr addrspace(1) %out + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val) + store bfloat %canonicalized, ptr addrspace(1) poison + ret void +} + +define amdgpu_kernel void @s_test_canonicalize_var_bf16(ptr addrspace(1) %out, i16 zeroext %val.arg) #1 { +; GFX1250-LABEL: s_test_canonicalize_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v1, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e64 v0, s2, s2 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: global_store_b16 v1, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = bitcast i16 %val.arg to bfloat + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define <2 x bfloat> @v_test_canonicalize_build_vector_v2bf16(bfloat %lo, bfloat %hi) #1 { +; GFX1250-LABEL: v_test_canonicalize_build_vector_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %ins0 = insertelement <2 x bfloat> poison, bfloat %lo, i32 0 + %ins1 = insertelement <2 x bfloat> %ins0, bfloat %hi, i32 1 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %ins1) + ret <2 x bfloat> %canonicalized +} + + +define amdgpu_kernel void @v_test_canonicalize_fabs_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fabs_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = load bfloat, ptr addrspace(1) %out + %val.fabs = call bfloat @llvm.fabs.bf16(bfloat %val) + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fabs) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + + +define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_fabs_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = load bfloat, ptr addrspace(1) %out + %val.fabs = call bfloat @llvm.fabs.bf16(bfloat %val) + %val.fabs.fneg = fneg bfloat %val.fabs + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fabs.fneg) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_canonicalize_fneg_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = load bfloat, ptr addrspace(1) %out + %val.fneg = fneg bfloat %val + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fneg) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_var_bf16(ptr addrspace(1) %out) #2 { +; GFX1250-LABEL: v_test_no_denormals_canonicalize_fneg_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = load bfloat, ptr addrspace(1) %out + %val.fneg = fneg bfloat %val + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fneg) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_fabs_var_bf16(ptr addrspace(1) %out) #2 { +; GFX1250-LABEL: v_test_no_denormals_canonicalize_fneg_fabs_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = load bfloat, ptr addrspace(1) %out + %val.fabs = call bfloat @llvm.fabs.bf16(bfloat %val) + %val.fabs.fneg = fneg bfloat %val.fabs + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fabs.fneg) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_p0_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0.0) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_n0_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff8000 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat -0.0) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_p1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3f80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 1.0) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_n1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffffbf80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat -1.0) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_literal_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_literal_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x4180 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 16.0) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal0_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_default_denormals_fold_canonicalize_denormal0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR03FF) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR03FF) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_default_denormals_fold_canonicalize_denormal1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR83FF) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR83FF) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_qnan_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c00 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR7C00) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat bitcast (i16 -1 to bfloat)) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg2_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat bitcast (i16 -2 to bfloat)) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan0_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan0_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR7C01) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan1_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan1_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7dff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR7DFF) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan2_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan2_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfffffdff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xRFDFF) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan3_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan3_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfffffc01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xRFC01) + store bfloat %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_canonicalize_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v1, 0xffff0000, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid + %val = load <2 x bfloat>, ptr addrspace(1) %gep + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %val) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fabs_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid + %val = load <2 x bfloat>, ptr addrspace(1) %gep + %val.fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %val) + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %val.fabs) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_fabs_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX1250-NEXT: v_or_b32_e32 v0, 0x8000, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid + %val = load <2 x bfloat>, ptr addrspace(1) %gep + %val.fabs = call <2 x bfloat> @llvm.fabs.v2bf16(<2 x bfloat> %val) + %val.fabs.fneg = fneg <2 x bfloat> %val.fabs + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %val.fabs.fneg) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX1250-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid + %val = load <2 x bfloat>, ptr addrspace(1) %gep + %fneg.val = fneg <2 x bfloat> %val + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %fneg.val) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @s_test_canonicalize_var_v2bf16(ptr addrspace(1) %out, i32 zeroext %val.arg) #1 { +; GFX1250-LABEL: s_test_canonicalize_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_and_b32 s3, s2, 0xffff0000 +; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 +; GFX1250-NEXT: v_max_num_f32_e64 v0, s3, s3 +; GFX1250-NEXT: v_max_num_f32_e64 v1, s2, s2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v1, v0 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %val = bitcast i32 %val.arg to <2 x bfloat> + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %val) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_p0_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> zeroinitializer) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_n0_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x80008000 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_p1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3f803f80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_n1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xbf80bf80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_literal_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_literal_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x41804180 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal0_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_no_denormals_fold_canonicalize_denormal0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff03ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_v2bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff03ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_no_denormals_fold_canonicalize_denormal1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x83ff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_v2bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x83ff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_qnan_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c007c00 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc07fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> bitcast (i32 -1 to <2 x bfloat>)) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg2_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc07fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan0_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan0_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c017c01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan1_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan1_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7dff7dff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan2_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan2_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfdfffdff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @test_fold_canonicalize_snan3_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan3_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfc01fc01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define <3 x bfloat> @v_test_canonicalize_var_v3bf16(<3 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v3bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v2, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v2 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <3 x bfloat> @llvm.canonicalize.v3bf16(<3 x bfloat> %val) + ret <3 x bfloat> %canonicalized +} + +define <4 x bfloat> @v_test_canonicalize_var_v4bf16(<4 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v2 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %val) + ret <4 x bfloat> %canonicalized +} + +define amdgpu_kernel void @s_test_canonicalize_undef_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: s_test_canonicalize_undef_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> undef) + store <2 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define <2 x bfloat> @v_test_canonicalize_reg_undef_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_undef_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <2 x bfloat> poison, bfloat %val, i32 0 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_undef_reg_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_undef_reg_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, v0, s0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <2 x bfloat> poison, bfloat %val, i32 1 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_undef_lo_imm_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_undef_lo_imm_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <2 x bfloat> undef, bfloat 1.0, i32 1 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_imm_lo_undef_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_imm_lo_undef_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0x3f80 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <2 x bfloat> undef, bfloat 1.0, i32 0 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_undef_lo_k_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_undef_lo_k_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0x41800000 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <2 x bfloat> undef, bfloat 16.0, i32 1 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_k_lo_undef_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_k_lo_undef_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0x4180 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <2 x bfloat> undef, bfloat 16.0, i32 0 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_reg_k_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_k_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x4000 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec0 = insertelement <2 x bfloat> poison, bfloat %val, i32 0 + %vec1 = insertelement <2 x bfloat> %vec0, bfloat 2.0, i32 1 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec1) + ret <2 x bfloat> %canonicalized +} + +define <2 x bfloat> @v_test_canonicalize_k_reg_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_k_reg_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x4000 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, v0, s0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec0 = insertelement <2 x bfloat> poison, bfloat 2.0, i32 0 + %vec1 = insertelement <2 x bfloat> %vec0, bfloat %val, i32 1 + %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec1) + ret <2 x bfloat> %canonicalized +} + +define amdgpu_kernel void @s_test_canonicalize_undef_v4bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: s_test_canonicalize_undef_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_mov_b32_e32 v1, v0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b64 v0, v[0:1], s[0:1] +; GFX1250-NEXT: s_endpgm + %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> undef) + store <4 x bfloat> %canonicalized, ptr addrspace(1) %out + ret void +} + +define <4 x bfloat> @v_test_canonicalize_reg_undef_undef_undef_v4bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_undef_undef_undef_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v1, 0x7fc07fc0 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec = insertelement <4 x bfloat> poison, bfloat %val, i32 0 + %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %vec) + ret <4 x bfloat> %canonicalized +} + +define <4 x bfloat> @v_test_canonicalize_reg_reg_undef_undef_v4bf16(bfloat %val0, bfloat %val1) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_reg_undef_undef_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: v_mov_b32_e32 v1, 0x7fc07fc0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec0 = insertelement <4 x bfloat> poison, bfloat %val0, i32 0 + %vec1 = insertelement <4 x bfloat> %vec0, bfloat %val1, i32 1 + %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %vec1) + ret <4 x bfloat> %canonicalized +} + +define <4 x bfloat> @v_test_canonicalize_reg_undef_reg_reg_v4bf16(bfloat %val0, bfloat %val1, bfloat %val2) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_undef_reg_reg_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %vec0 = insertelement <4 x bfloat> poison, bfloat %val0, i32 0 + %vec1 = insertelement <4 x bfloat> %vec0, bfloat %val1, i32 2 + %vec2 = insertelement <4 x bfloat> %vec1, bfloat %val2, i32 3 + %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %vec2) + ret <4 x bfloat> %canonicalized +} + +define <6 x bfloat> @v_test_canonicalize_var_v6bf16(<6 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v6bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v4 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v3 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <6 x bfloat> @llvm.canonicalize.v6bf16(<6 x bfloat> %val) + ret <6 x bfloat> %canonicalized +} + +define <8 x bfloat> @v_test_canonicalize_var_v8bf16(<8 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v8bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 +; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff0000, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v6, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v7, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v6, v6, v6 +; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_max_num_f32_e32 v3, v3, v3 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v4 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <8 x bfloat> @llvm.canonicalize.v8bf16(<8 x bfloat> %val) + ret <8 x bfloat> %canonicalized +} + +define <12 x bfloat> @v_test_canonicalize_var_v12bf16(<12 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v12bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX1250-NEXT: v_and_b32_e32 v7, 0xffff0000, v4 +; GFX1250-NEXT: v_and_b32_e32 v8, 0xffff0000, v3 +; GFX1250-NEXT: v_and_b32_e32 v9, 0xffff0000, v2 +; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v11, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v6, v6, v6 :: v_dual_max_num_f32 v5, v5, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_max_num_f32 v8, v8, v8 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v10, v10, v10 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v11, v11, v11 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v11 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v10 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v9 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v8 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v7 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v6 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <12 x bfloat> @llvm.canonicalize.v12bf16(<12 x bfloat> %val) + ret <12 x bfloat> %canonicalized +} + +define <16 x bfloat> @v_test_canonicalize_var_v16bf16(<16 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v16bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX1250-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX1250-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 +; GFX1250-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_lshlrev_b32 v5, 16, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_lshlrev_b32 v6, 16, v6 +; GFX1250-NEXT: v_and_b32_e32 v11, 0xffff0000, v4 +; GFX1250-NEXT: v_and_b32_e32 v12, 0xffff0000, v3 +; GFX1250-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v14, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v15, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: v_max_num_f32_e32 v6, v6, v6 +; GFX1250-NEXT: v_dual_max_num_f32 v10, v10, v10 :: v_dual_max_num_f32 v5, v5, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v11, v11, v11 :: v_dual_max_num_f32 v12, v12, v12 +; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v14, v14, v14 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v15, v15, v15 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v15 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v14 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v13 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v12 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v11 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v10 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v9 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v8 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <16 x bfloat> @llvm.canonicalize.v16bf16(<16 x bfloat> %val) + ret <16 x bfloat> %canonicalized +} + +define <32 x bfloat> @v_test_canonicalize_var_v32bf16(<32 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v32bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v16, 0xffff0000, v15 +; GFX1250-NEXT: v_and_b32_e32 v18, 0xffff0000, v13 +; GFX1250-NEXT: v_and_b32_e32 v20, 0xffff0000, v11 +; GFX1250-NEXT: v_and_b32_e32 v22, 0xffff0000, v9 +; GFX1250-NEXT: v_and_b32_e32 v24, 0xffff0000, v7 +; GFX1250-NEXT: v_dual_max_num_f32 v16, v16, v16 :: v_dual_lshlrev_b32 v15, 16, v15 +; GFX1250-NEXT: v_and_b32_e32 v17, 0xffff0000, v14 +; GFX1250-NEXT: v_dual_lshlrev_b32 v14, 16, v14 :: v_dual_lshlrev_b32 v13, 16, v13 +; GFX1250-NEXT: v_max_num_f32_e32 v18, v18, v18 +; GFX1250-NEXT: v_and_b32_e32 v19, 0xffff0000, v12 +; GFX1250-NEXT: v_dual_lshlrev_b32 v12, 16, v12 :: v_dual_lshlrev_b32 v11, 16, v11 +; GFX1250-NEXT: v_max_num_f32_e32 v20, v20, v20 +; GFX1250-NEXT: v_and_b32_e32 v21, 0xffff0000, v10 +; GFX1250-NEXT: v_dual_lshlrev_b32 v10, 16, v10 :: v_dual_lshlrev_b32 v9, 16, v9 +; GFX1250-NEXT: v_max_num_f32_e32 v22, v22, v22 +; GFX1250-NEXT: v_and_b32_e32 v23, 0xffff0000, v8 +; GFX1250-NEXT: v_dual_lshlrev_b32 v8, 16, v8 :: v_dual_lshlrev_b32 v7, 16, v7 +; GFX1250-NEXT: v_max_num_f32_e32 v24, v24, v24 +; GFX1250-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 +; GFX1250-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX1250-NEXT: v_and_b32_e32 v26, 0xffff0000, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX1250-NEXT: v_and_b32_e32 v27, 0xffff0000, v4 +; GFX1250-NEXT: v_and_b32_e32 v28, 0xffff0000, v3 +; GFX1250-NEXT: v_and_b32_e32 v29, 0xffff0000, v2 +; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v30, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v31, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_dual_max_num_f32 v15, v15, v15 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v17, v17, v17 :: v_dual_max_num_f32 v14, v14, v14 +; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v19, v19, v19 +; GFX1250-NEXT: v_dual_max_num_f32 v12, v12, v12 :: v_dual_max_num_f32 v11, v11, v11 +; GFX1250-NEXT: v_dual_max_num_f32 v21, v21, v21 :: v_dual_max_num_f32 v10, v10, v10 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v23, v23, v23 +; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_max_num_f32 v7, v7, v7 +; GFX1250-NEXT: v_dual_max_num_f32 v25, v25, v25 :: v_dual_max_num_f32 v6, v6, v6 +; GFX1250-NEXT: v_dual_max_num_f32 v26, v26, v26 :: v_dual_max_num_f32 v5, v5, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v27, v27, v27 :: v_dual_max_num_f32 v28, v28, v28 +; GFX1250-NEXT: v_dual_max_num_f32 v29, v29, v29 :: v_dual_max_num_f32 v30, v30, v30 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v31, v31, v31 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v31 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v30 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v29 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v28 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v27 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v26 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v25 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v24 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v8, v8, v23 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v9, v9, v22 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v10, v10, v21 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v11, v11, v20 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v12, v12, v19 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v13, v13, v18 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v14, v14, v17 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v15, v15, v16 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <32 x bfloat> @llvm.canonicalize.v32bf16(<32 x bfloat> %val) + ret <32 x bfloat> %canonicalized +} + +define <64 x bfloat> @v_test_canonicalize_var_v64bf16(<64 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v64bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v31, off, s32 +; GFX1250-NEXT: v_and_b32_e32 v81, 0xffff0000, v0 +; GFX1250-NEXT: v_and_b32_e32 v38, 0xffff0000, v24 +; GFX1250-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX1250-NEXT: v_and_b32_e32 v39, 0xffff0000, v23 +; GFX1250-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX1250-NEXT: v_and_b32_e32 v80, 0xffff0000, v6 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v6, 16, v6 +; GFX1250-NEXT: v_and_b32_e32 v82, 0xffff0000, v1 +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v81, v81, v81 +; GFX1250-NEXT: v_and_b32_e32 v83, 0xffff0000, v2 +; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX1250-NEXT: v_and_b32_e32 v34, 0xffff0000, v28 +; GFX1250-NEXT: v_lshlrev_b32_e32 v28, 16, v28 +; GFX1250-NEXT: v_and_b32_e32 v35, 0xffff0000, v27 +; GFX1250-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX1250-NEXT: v_and_b32_e32 v36, 0xffff0000, v26 +; GFX1250-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX1250-NEXT: v_and_b32_e32 v48, 0xffff0000, v22 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v82, v82, v82 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v83, v83, v83 +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v24, v24, v24 +; GFX1250-NEXT: v_max_num_f32_e32 v39, v39, v39 +; GFX1250-NEXT: v_dual_max_num_f32 v23, v23, v23 :: v_dual_max_num_f32 v48, v48, v48 +; GFX1250-NEXT: v_and_b32_e32 v32, 0xffff0000, v30 +; GFX1250-NEXT: v_lshlrev_b32_e32 v30, 16, v30 +; GFX1250-NEXT: v_and_b32_e32 v33, 0xffff0000, v29 +; GFX1250-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX1250-NEXT: v_and_b32_e32 v37, 0xffff0000, v25 +; GFX1250-NEXT: v_dual_lshlrev_b32 v25, 16, v25 :: v_dual_lshlrev_b32 v22, 16, v22 +; GFX1250-NEXT: v_and_b32_e32 v49, 0xffff0000, v21 +; GFX1250-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX1250-NEXT: v_and_b32_e32 v50, 0xffff0000, v20 +; GFX1250-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX1250-NEXT: v_and_b32_e32 v51, 0xffff0000, v19 +; GFX1250-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX1250-NEXT: v_and_b32_e32 v52, 0xffff0000, v18 +; GFX1250-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX1250-NEXT: v_and_b32_e32 v53, 0xffff0000, v17 +; GFX1250-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX1250-NEXT: v_and_b32_e32 v54, 0xffff0000, v16 +; GFX1250-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX1250-NEXT: v_and_b32_e32 v55, 0xffff0000, v15 +; GFX1250-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX1250-NEXT: v_and_b32_e32 v64, 0xffff0000, v14 +; GFX1250-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX1250-NEXT: v_and_b32_e32 v65, 0xffff0000, v13 +; GFX1250-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX1250-NEXT: v_and_b32_e32 v66, 0xffff0000, v12 +; GFX1250-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX1250-NEXT: v_and_b32_e32 v67, 0xffff0000, v11 +; GFX1250-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX1250-NEXT: v_and_b32_e32 v68, 0xffff0000, v10 +; GFX1250-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX1250-NEXT: v_and_b32_e32 v69, 0xffff0000, v9 +; GFX1250-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX1250-NEXT: v_and_b32_e32 v70, 0xffff0000, v8 +; GFX1250-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX1250-NEXT: v_and_b32_e32 v71, 0xffff0000, v7 +; GFX1250-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v81 +; GFX1250-NEXT: v_and_b32_e32 v81, 0xffff0000, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v82 +; GFX1250-NEXT: v_and_b32_e32 v82, 0xffff0000, v4 +; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v83 +; GFX1250-NEXT: v_and_b32_e32 v83, 0xffff0000, v3 +; GFX1250-NEXT: v_dual_max_num_f32 v32, v32, v32 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_dual_max_num_f32 v27, v27, v27 :: v_dual_max_num_f32 v36, v36, v36 +; GFX1250-NEXT: v_dual_max_num_f32 v26, v26, v26 :: v_dual_max_num_f32 v37, v37, v37 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v23, v23, v39 +; GFX1250-NEXT: v_dual_max_num_f32 v30, v30, v30 :: v_dual_max_num_f32 v33, v33, v33 +; GFX1250-NEXT: v_dual_max_num_f32 v29, v29, v29 :: v_dual_max_num_f32 v34, v34, v34 +; GFX1250-NEXT: v_dual_max_num_f32 v28, v28, v28 :: v_dual_max_num_f32 v35, v35, v35 +; GFX1250-NEXT: v_dual_max_num_f32 v25, v25, v25 :: v_dual_max_num_f32 v38, v38, v38 +; GFX1250-NEXT: v_dual_max_num_f32 v22, v22, v22 :: v_dual_max_num_f32 v49, v49, v49 +; GFX1250-NEXT: v_dual_max_num_f32 v21, v21, v21 :: v_dual_max_num_f32 v50, v50, v50 +; GFX1250-NEXT: v_dual_max_num_f32 v20, v20, v20 :: v_dual_max_num_f32 v51, v51, v51 +; GFX1250-NEXT: v_dual_max_num_f32 v19, v19, v19 :: v_dual_max_num_f32 v52, v52, v52 +; GFX1250-NEXT: v_dual_max_num_f32 v18, v18, v18 :: v_dual_max_num_f32 v53, v53, v53 +; GFX1250-NEXT: v_dual_max_num_f32 v17, v17, v17 :: v_dual_max_num_f32 v54, v54, v54 +; GFX1250-NEXT: v_dual_max_num_f32 v16, v16, v16 :: v_dual_max_num_f32 v55, v55, v55 +; GFX1250-NEXT: v_dual_max_num_f32 v15, v15, v15 :: v_dual_max_num_f32 v64, v64, v64 +; GFX1250-NEXT: v_dual_max_num_f32 v14, v14, v14 :: v_dual_max_num_f32 v65, v65, v65 +; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v66, v66, v66 +; GFX1250-NEXT: v_dual_max_num_f32 v12, v12, v12 :: v_dual_max_num_f32 v67, v67, v67 +; GFX1250-NEXT: v_dual_max_num_f32 v11, v11, v11 :: v_dual_max_num_f32 v68, v68, v68 +; GFX1250-NEXT: v_dual_max_num_f32 v10, v10, v10 :: v_dual_max_num_f32 v69, v69, v69 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v70, v70, v70 +; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_max_num_f32 v71, v71, v71 +; GFX1250-NEXT: v_dual_max_num_f32 v80, v80, v80 :: v_dual_max_num_f32 v81, v81, v81 +; GFX1250-NEXT: v_dual_max_num_f32 v82, v82, v82 :: v_dual_max_num_f32 v83, v83, v83 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_max_num_f32 v6, v6, v6 +; GFX1250-NEXT: v_max_num_f32_e32 v7, v7, v7 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v26, v26, v36 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v83 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v82 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v81 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v80 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v71 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v8, v8, v70 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v9, v9, v69 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v10, v10, v68 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v11, v11, v67 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v12, v12, v66 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v13, v13, v65 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v14, v14, v64 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v15, v15, v55 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v16, v16, v54 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v17, v17, v53 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v18, v18, v52 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v19, v19, v51 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v20, v20, v50 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v21, v21, v49 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v22, v22, v48 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v24, v24, v38 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v25, v25, v37 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v27, v27, v35 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v28, v28, v34 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v29, v29, v33 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v30, v30, v32 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v39, 0xffff0000, v31 +; GFX1250-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_max_num_f32_e32 v36, v39, v39 +; GFX1250-NEXT: v_max_num_f32_e32 v31, v31, v31 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v31, v31, v36 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %canonicalized = call <64 x bfloat> @llvm.canonicalize.v64bf16(<64 x bfloat> %val) + ret <64 x bfloat> %canonicalized +} + +attributes #0 = { nounwind readnone } +attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" } +attributes #2 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" } +attributes #3 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" } diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll index 6a898fa799f3e..30bcdf97e26fd 100644 --- a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll @@ -231,22 +231,13 @@ define bfloat @v_copysign_bf16_f32(bfloat %mag, float %sign.f32) { ; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11TRUE16-LABEL: v_copysign_bf16_f32: -; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11FAKE16-LABEL: v_copysign_bf16_f32: -; GFX11FAKE16: ; %bb.0: -; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_bf16_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign = fptrunc float %sign.f32 to bfloat %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign) ret bfloat %op @@ -298,22 +289,13 @@ define bfloat @v_copysign_bf16_f64(bfloat %mag, double %sign.f64) { ; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11TRUE16-LABEL: v_copysign_bf16_f64: -; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v2 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11FAKE16-LABEL: v_copysign_bf16_f64: -; GFX11FAKE16: ; %bb.0: -; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_bf16_f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign = fptrunc double %sign.f64 to bfloat %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign) ret bfloat %op @@ -499,9 +481,10 @@ define amdgpu_ps i32 @s_copysign_bf16_f32(bfloat inreg %mag, float inreg %sign.f ; ; GFX11TRUE16-LABEL: s_copysign_bf16_f32: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s1 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s1 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 @@ -575,9 +558,10 @@ define amdgpu_ps i32 @s_copysign_bf16_f64(bfloat inreg %mag, double inreg %sign. ; ; GFX11TRUE16-LABEL: s_copysign_bf16_f64: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s2 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s2 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 @@ -1153,12 +1137,12 @@ define amdgpu_ps i32 @s_copysign_v2bf16(<2 x bfloat> inreg %arg_mag, <2 x bfloat define amdgpu_ps <3 x i16> @s_copysign_v3bf16(<3 x bfloat> inreg %arg_mag, <3 x bfloat> inreg %arg_sign) { ; GCN-LABEL: s_copysign_v3bf16: ; GCN: ; %bb.0: -; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s5 +; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s3 ; GCN-NEXT: v_mul_f32_e64 v1, 1.0, s4 -; GCN-NEXT: v_mul_f32_e64 v2, 1.0, s3 -; GCN-NEXT: v_mul_f32_e64 v3, 1.0, s2 +; GCN-NEXT: v_mul_f32_e64 v2, 1.0, s5 +; GCN-NEXT: v_mul_f32_e64 v3, 1.0, s0 ; GCN-NEXT: v_mul_f32_e64 v4, 1.0, s1 -; GCN-NEXT: v_mul_f32_e64 v5, 1.0, s0 +; GCN-NEXT: v_mul_f32_e64 v5, 1.0, s2 ; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2 @@ -1166,45 +1150,45 @@ define amdgpu_ps <3 x i16> @s_copysign_v3bf16(<3 x bfloat> inreg %arg_mag, <3 x ; GCN-NEXT: v_bfe_u32 v4, v4, 16, 15 ; GCN-NEXT: v_bfe_u32 v3, v3, 16, 15 ; GCN-NEXT: v_and_b32_e32 v2, 0x8000, v2 -; GCN-NEXT: v_and_b32_e32 v1, 0x8000, v1 +; GCN-NEXT: v_and_b32_e32 v6, 0x8000, v1 ; GCN-NEXT: v_and_b32_e32 v0, 0x8000, v0 -; GCN-NEXT: v_or_b32_e32 v2, v5, v2 -; GCN-NEXT: v_or_b32_e32 v1, v4, v1 -; GCN-NEXT: v_or_b32_e32 v0, v3, v0 -; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GCN-NEXT: v_or_b32_e32 v2, v2, v1 -; GCN-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; GCN-NEXT: v_readfirstlane_b32 s1, v1 -; GCN-NEXT: v_readfirstlane_b32 s0, v2 -; GCN-NEXT: v_readfirstlane_b32 s2, v0 +; GCN-NEXT: v_or_b32_e32 v1, v5, v2 +; GCN-NEXT: v_or_b32_e32 v2, v4, v6 +; GCN-NEXT: v_or_b32_e32 v3, v3, v0 +; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; GCN-NEXT: v_or_b32_e32 v4, v3, v0 +; GCN-NEXT: v_lshr_b64 v[2:3], v[0:1], 16 +; GCN-NEXT: v_readfirstlane_b32 s0, v4 +; GCN-NEXT: v_readfirstlane_b32 s1, v2 +; GCN-NEXT: v_readfirstlane_b32 s2, v1 ; GCN-NEXT: ; return to shader part epilog ; ; GFX7-LABEL: s_copysign_v3bf16: ; GFX7: ; %bb.0: -; GFX7-NEXT: v_mul_f32_e64 v1, 1.0, s4 -; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s5 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_mul_f32_e64 v2, 1.0, s3 -; GFX7-NEXT: v_mul_f32_e64 v4, 1.0, s1 +; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s3 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s4 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_mul_f32_e64 v3, 1.0, s2 -; GFX7-NEXT: v_mul_f32_e64 v5, 1.0, s0 -; GFX7-NEXT: v_and_b32_e32 v1, 0x8000, v1 +; GFX7-NEXT: v_mul_f32_e64 v1, 1.0, s5 +; GFX7-NEXT: v_mul_f32_e64 v4, 1.0, s1 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_mul_f32_e64 v3, 1.0, s0 +; GFX7-NEXT: v_mul_f32_e64 v5, 1.0, s2 +; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0 ; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 15 -; GFX7-NEXT: v_and_b32_e32 v2, 0x8000, v2 +; GFX7-NEXT: v_and_b32_e32 v1, 0x8000, v1 ; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 15 -; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 -; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0x8000, v2 ; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 15 -; GFX7-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v1 -; GFX7-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; GFX7-NEXT: v_readfirstlane_b32 s1, v1 -; GFX7-NEXT: v_readfirstlane_b32 s0, v2 -; GFX7-NEXT: v_readfirstlane_b32 s2, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX7-NEXT: v_or_b32_e32 v4, v2, v0 +; GFX7-NEXT: v_lshr_b64 v[2:3], v[0:1], 16 +; GFX7-NEXT: v_readfirstlane_b32 s0, v4 +; GFX7-NEXT: v_readfirstlane_b32 s1, v2 +; GFX7-NEXT: v_readfirstlane_b32 s2, v1 ; GFX7-NEXT: ; return to shader part epilog ; ; GFX8-LABEL: s_copysign_v3bf16: @@ -3677,9 +3661,10 @@ define amdgpu_ps i16 @s_copysign_out_bf16_mag_bf16_sign_f32(bfloat inreg %mag, f ; ; GFX11TRUE16-LABEL: s_copysign_out_bf16_mag_bf16_sign_f32: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s1 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s1 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11TRUE16-NEXT: ; return to shader part epilog ; @@ -3744,9 +3729,10 @@ define amdgpu_ps i16 @s_copysign_out_bf16_mag_bf16_sign_f64(bfloat inreg %mag, d ; ; GFX11TRUE16-LABEL: s_copysign_out_bf16_mag_bf16_sign_f64: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s2 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s2 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11TRUE16-NEXT: ; return to shader part epilog ; @@ -6700,15 +6686,16 @@ define <3 x bfloat> @v_copysign_out_v3bf16_mag_v3bf16_sign_v3f64(<3 x bfloat> %m ; GFX11TRUE16-LABEL: v_copysign_out_v3bf16_mag_v3bf16_sign_v3f64: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfi_b32 v2, 0x7fff, v2, v4 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v7 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v3 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0x7fff0000, v1, v3 -; GFX11TRUE16-NEXT: v_bfi_b32 v2, 0x7fff0000, v2, v7 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l +; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0x7fff, v1, v4 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_copysign_out_v3bf16_mag_v3bf16_sign_v3f64: diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll index 574c1042859aa..62847b15d3443 100644 --- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll @@ -776,22 +776,13 @@ define half @v_copysign_out_f16_mag_f16_sign_f32(half %mag, float %sign) { ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-TRUE16-LABEL: v_copysign_out_f16_mag_f16_sign_f32: -; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-FAKE16-LABEL: v_copysign_out_f16_mag_f16_sign_f32: -; GFX11-FAKE16: ; %bb.0: -; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_out_f16_mag_f16_sign_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign.trunc = fptrunc float %sign to half %out = call half @llvm.copysign.f16(half %mag, half %sign.trunc) ret half %out @@ -823,22 +814,13 @@ define half @v_copysign_out_f16_mag_f16_sign_f64(half %mag, double %sign) { ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-TRUE16-LABEL: v_copysign_out_f16_mag_f16_sign_f64: -; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-FAKE16-LABEL: v_copysign_out_f16_mag_f16_sign_f64: -; GFX11-FAKE16: ; %bb.0: -; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_out_f16_mag_f16_sign_f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign.trunc = fptrunc double %sign to half %out = call half @llvm.copysign.f16(half %mag, half %sign.trunc) ret half %out @@ -1450,29 +1432,29 @@ define amdgpu_ps <3 x i16> @s_copysign_v3f16(<3 x half> inreg %arg_mag, <3 x hal ; SI: ; %bb.0: ; SI-NEXT: v_cvt_f16_f32_e32 v2, s4 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, s5 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s2 -; SI-NEXT: v_cvt_f16_f32_e32 v4, s3 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, s5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, s2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, s0 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: s_brev_b32 s0, -2 ; SI-NEXT: v_bfi_b32 v2, s0, v3, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_bfi_b32 v3, s0, v5, v4 ; SI-NEXT: v_bfi_b32 v0, s0, v1, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v1 -; SI-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; SI-NEXT: v_readfirstlane_b32 s1, v1 -; SI-NEXT: v_readfirstlane_b32 s0, v2 -; SI-NEXT: v_readfirstlane_b32 s2, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16 +; SI-NEXT: v_or_b32_e32 v4, v4, v0 +; SI-NEXT: v_readfirstlane_b32 s0, v4 +; SI-NEXT: v_readfirstlane_b32 s1, v2 +; SI-NEXT: v_readfirstlane_b32 s2, v1 ; SI-NEXT: ; return to shader part epilog ; ; VI-LABEL: s_copysign_v3f16: @@ -2832,9 +2814,10 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f16_sign_f32(half inreg %mag, float ; ; GFX11-TRUE16-LABEL: s_copysign_out_f16_mag_f16_sign_f32: ; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s1 +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-TRUE16-NEXT: ; return to shader part epilog ; @@ -2883,9 +2866,10 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f16_sign_f64(half inreg %mag, doubl ; ; GFX11-TRUE16-LABEL: s_copysign_out_f16_mag_f16_sign_f64: ; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s2 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s2 +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-TRUE16-NEXT: ; return to shader part epilog ; @@ -5590,15 +5574,16 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f16_sign_v3f64(<3 x half> %mag, <3 ; GFX11-TRUE16-LABEL: v_copysign_out_v3f16_mag_v3f16_sign_v3f64: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0x7fff, v2, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v7 +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfi_b32 v1, 0x7fff0000, v1, v3 -; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0x7fff0000, v2, v7 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l +; GFX11-TRUE16-NEXT: v_bfi_b32 v1, 0x7fff, v1, v4 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: v_copysign_out_v3f16_mag_v3f16_sign_v3f64: diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll index 210e09fd9169a..7f6a920d25016 100644 --- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll @@ -137,33 +137,31 @@ define amdgpu_kernel void @v_fdiv_f16( ; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v0 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] glc dlc +; GFX11-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] glc dlc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] glc dlc +; GFX11-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] glc dlc ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v0.l -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v3, v3 +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v2.l +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v3.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v0, v0 ; GFX11-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v4, v4, v3 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v4, v6 op_sel_hi:[1,0,1] +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v4, v4, v0 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v4, v2 op_sel_hi:[1,0,1] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v4, v7, v3 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v4, v6 op_sel_hi:[1,0,1] +; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v4, v5, v0 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v4, v2 op_sel_hi:[1,0,1] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff800000, v3 +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v0, v5, v0 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff800000, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v4 -; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v4 +; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.h, v1.l, v0.l -; GFX11-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l +; GFX11-TRUE16-NEXT: global_store_b16 v1, v0, s[0:1] ; GFX11-TRUE16-NEXT: s_endpgm ; ; GFX11-FAKE16-LABEL: v_fdiv_f16: diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 605026614c614..1e7855ccb3642 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -723,7 +723,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -1065,7 +1065,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -1586,7 +1586,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -1946,7 +1946,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -2483,7 +2483,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -2847,7 +2847,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -3386,7 +3386,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -3750,7 +3750,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -4289,7 +4289,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -4653,7 +4653,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -5192,7 +5192,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -5556,7 +5556,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -6057,7 +6057,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -6405,7 +6405,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -6898,7 +6898,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -7246,7 +7246,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -7739,7 +7739,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -8087,7 +8087,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -8580,7 +8580,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -8928,7 +8928,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -9480,7 +9480,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] @@ -9864,7 +9864,7 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] @@ -10382,7 +10382,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -10750,7 +10750,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] @@ -11264,7 +11264,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] @@ -11644,7 +11644,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xffffffffffffff80) +; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], 0xffffffffffffff80 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll index 4eaa1965c66f1..fc8883924dfbc 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll @@ -4152,7 +4152,8 @@ define void @store_load_i64_aligned(ptr addrspace(5) nocapture %arg) { ; GFX942-LABEL: store_load_i64_aligned: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[2:3], 15 +; GFX942-NEXT: v_mov_b32_e32 v2, 15 +; GFX942-NEXT: v_mov_b32_e32 v3, 0 ; GFX942-NEXT: scratch_store_dwordx2 v0, v[2:3], off sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dwordx2 v[0:1], v0, off sc0 sc1 @@ -4262,7 +4263,8 @@ define void @store_load_i64_unaligned(ptr addrspace(5) nocapture %arg) { ; GFX942-LABEL: store_load_i64_unaligned: ; GFX942: ; %bb.0: ; %bb ; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942-NEXT: v_mov_b64_e32 v[2:3], 15 +; GFX942-NEXT: v_mov_b32_e32 v2, 15 +; GFX942-NEXT: v_mov_b32_e32 v3, 0 ; GFX942-NEXT: scratch_store_dwordx2 v0, v[2:3], off sc0 sc1 ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: scratch_load_dwordx2 v[0:1], v0, off sc0 sc1 diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll index e74ad3d62bea4..47161954cc332 100644 --- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll @@ -8946,8 +8946,7 @@ define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) { ; GCN1-NEXT: .LBB141_1: ; %atomicrmw.start ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN1-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4 ; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -8971,8 +8970,7 @@ define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) { ; GCN2-NEXT: .LBB141_1: ; %atomicrmw.start ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN2-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4 ; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -8996,9 +8994,8 @@ define void @flat_atomic_udec_wrap_i32_noret(ptr %ptr, i32 %in) { ; GCN3-NEXT: .LBB141_1: ; %atomicrmw.start ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GCN3-NEXT: v_add_u32_e32 v3, -1, v4 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc @@ -9027,8 +9024,7 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) { ; GCN1-NEXT: .LBB142_1: ; %atomicrmw.start ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN1-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4 ; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -9054,8 +9050,7 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) { ; GCN2-NEXT: .LBB142_1: ; %atomicrmw.start ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN2-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4 ; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -9079,9 +9074,8 @@ define void @flat_atomic_udec_wrap_i32_noret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: .LBB142_1: ; %atomicrmw.start ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GCN3-NEXT: v_add_u32_e32 v3, -1, v4 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc @@ -9110,8 +9104,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) { ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v4, v3 -; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN1-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4 ; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -9136,8 +9129,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) { ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v4, v3 -; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN2-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4 ; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -9162,9 +9154,8 @@ define i32 @flat_atomic_udec_wrap_i32_ret(ptr %ptr, i32 %in) { ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v4, v3 -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GCN3-NEXT: v_add_u32_e32 v3, -1, v4 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc @@ -9194,8 +9185,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) { ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v1, v0 -; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v1 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN1-NEXT: v_subrev_i32_e32 v0, vcc, 1, v1 ; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN1-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc @@ -9221,8 +9211,7 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) { ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v1, v0 -; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v1 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN2-NEXT: v_subrev_u32_e32 v0, vcc, 1, v1 ; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN2-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc @@ -9246,9 +9235,8 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset(ptr %out, i32 %in) { ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v4, v3 -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GCN3-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GCN3-NEXT: v_add_u32_e32 v3, -1, v4 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc @@ -9279,8 +9267,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i ; GCN1-NEXT: .LBB145_1: ; %atomicrmw.start ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: v_add_i32_e32 v2, vcc, -1, v3 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GCN1-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3 ; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN1-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc @@ -9307,8 +9294,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i ; GCN2-NEXT: .LBB145_1: ; %atomicrmw.start ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: v_add_u32_e32 v2, vcc, -1, v3 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GCN2-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3 ; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN2-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc @@ -9335,9 +9321,8 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_scalar(ptr inreg %ptr, i ; GCN3-NEXT: .LBB145_1: ; %atomicrmw.start ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GCN3-NEXT: v_subrev_co_u32_e32 v2, vcc, 1, v3 ; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 -; GCN3-NEXT: v_add_u32_e32 v2, -1, v3 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN3-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GCN3-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc @@ -9369,8 +9354,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg ; GCN1-NEXT: .LBB146_1: ; %atomicrmw.start ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: v_add_i32_e32 v2, vcc, -1, v3 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GCN1-NEXT: v_subrev_i32_e32 v2, vcc, 1, v3 ; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN1-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc @@ -9399,8 +9383,7 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg ; GCN2-NEXT: .LBB146_1: ; %atomicrmw.start ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: v_add_u32_e32 v2, vcc, -1, v3 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GCN2-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3 ; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN2-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc @@ -9427,9 +9410,8 @@ define amdgpu_gfx void @flat_atomic_udec_wrap_i32_noret_offset_scalar(ptr inreg ; GCN3-NEXT: .LBB146_1: ; %atomicrmw.start ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; GCN3-NEXT: v_subrev_co_u32_e32 v2, vcc, 1, v3 ; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 -; GCN3-NEXT: v_add_u32_e32 v2, -1, v3 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN3-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GCN3-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] offset:16 glc @@ -9463,8 +9445,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32 ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v5, v0 -; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v5 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; GCN1-NEXT: v_subrev_i32_e32 v0, vcc, 1, v5 ; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN1-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc @@ -9493,8 +9474,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32 ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v5, v0 -; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v5 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; GCN2-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5 ; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN2-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc @@ -9523,9 +9503,8 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_scalar(ptr inreg %ptr, i32 ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v5, v0 -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; GCN3-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v5 ; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 -; GCN3-NEXT: v_add_u32_e32 v0, -1, v5 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN3-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc ; GCN3-NEXT: flat_atomic_cmpswap v0, v[1:2], v[4:5] glc @@ -9557,8 +9536,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou ; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v5, v0 -; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v5 -; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; GCN1-NEXT: v_subrev_i32_e32 v0, vcc, 1, v5 ; GCN1-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 ; GCN1-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN1-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc @@ -9587,8 +9565,7 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou ; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v5, v0 -; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v5 -; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; GCN2-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5 ; GCN2-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 ; GCN2-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN2-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc @@ -9617,9 +9594,8 @@ define amdgpu_gfx i32 @flat_atomic_udec_wrap_i32_ret_offset_scalar(ptr inreg %ou ; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v5, v0 -; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; GCN3-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v5 ; GCN3-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 -; GCN3-NEXT: v_add_u32_e32 v0, -1, v5 ; GCN3-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GCN3-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc ; GCN3-NEXT: flat_atomic_cmpswap v0, v[1:2], v[4:5] offset:16 glc diff --git a/llvm/test/CodeGen/AMDGPU/fma-mix.gfx11plus.ll b/llvm/test/CodeGen/AMDGPU/fma-mix.gfx11plus.ll new file mode 100644 index 0000000000000..1ba13b287be46 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fma-mix.gfx11plus.ll @@ -0,0 +1,93 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -stop-after=amdgpu-isel | FileCheck %s --check-prefixes=GFX11-REAL16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -stop-after=amdgpu-isel | FileCheck %s --check-prefixes=GFX11-FAKE16 + +; Make sure no "vgpr32 = copy vgpr16" is generated + +define amdgpu_kernel void @fma_mix_f16 (ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(1) %out) { + ; GFX11-REAL16-LABEL: name: fma_mix_f16 + ; GFX11-REAL16: bb.0.entry: + ; GFX11-REAL16-NEXT: liveins: $vgpr0, $sgpr4_sgpr5 + ; GFX11-REAL16-NEXT: {{ $}} + ; GFX11-REAL16-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 + ; GFX11-REAL16-NEXT: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX11-REAL16-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s256) from %ir.a.kernarg.offset, align 4, addrspace 4) + ; GFX11-REAL16-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub1 + ; GFX11-REAL16-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub0 + ; GFX11-REAL16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY3]], %subreg.sub0, killed [[COPY2]], %subreg.sub1 + ; GFX11-REAL16-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub3 + ; GFX11-REAL16-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub2 + ; GFX11-REAL16-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY5]], %subreg.sub0, killed [[COPY4]], %subreg.sub1 + ; GFX11-REAL16-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub5 + ; GFX11-REAL16-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub4 + ; GFX11-REAL16-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY7]], %subreg.sub0, killed [[COPY6]], %subreg.sub1 + ; GFX11-REAL16-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub7 + ; GFX11-REAL16-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub6 + ; GFX11-REAL16-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY9]], %subreg.sub0, killed [[COPY8]], %subreg.sub1 + ; GFX11-REAL16-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GFX11-REAL16-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1023 + ; GFX11-REAL16-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[COPY1]](s32), killed [[S_MOV_B32_]], implicit $exec + ; GFX11-REAL16-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 2 + ; GFX11-REAL16-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = nuw nsw V_LSHLREV_B32_e64 killed [[S_MOV_B32_1]], killed [[V_AND_B32_e64_]], implicit $exec + ; GFX11-REAL16-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[REG_SEQUENCE]], [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s32) from %ir.in.gep1, addrspace 1) + ; GFX11-REAL16-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[REG_SEQUENCE1]], [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s32) from %ir.in.gep2, addrspace 1) + ; GFX11-REAL16-NEXT: [[GLOBAL_LOAD_SHORT_D16_SADDR_t16_:%[0-9]+]]:vgpr_16 = GLOBAL_LOAD_SHORT_D16_SADDR_t16 killed [[REG_SEQUENCE2]], [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s16) from %ir.in.gep3, addrspace 1) + ; GFX11-REAL16-NEXT: [[V_MOV_B16_t16_e64_:%[0-9]+]]:vgpr_16 = V_MOV_B16_t16_e64 0, 14336, 0, implicit $exec + ; GFX11-REAL16-NEXT: [[V_ADD_F16_t16_e64_:%[0-9]+]]:vgpr_16 = nofpexcept V_ADD_F16_t16_e64 0, killed [[GLOBAL_LOAD_SHORT_D16_SADDR_t16_]], 0, killed [[V_MOV_B16_t16_e64_]], 0, 0, 0, implicit $mode, implicit $exec + ; GFX11-REAL16-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; GFX11-REAL16-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; GFX11-REAL16-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vgpr_32 = REG_SEQUENCE killed [[V_ADD_F16_t16_e64_]], %subreg.lo16, killed [[DEF]], %subreg.hi16 + ; GFX11-REAL16-NEXT: [[V_FMA_MIX_F16_t16_:%[0-9]+]]:vgpr_16 = nofpexcept V_FMA_MIX_F16_t16 0, killed [[GLOBAL_LOAD_DWORD_SADDR]], 0, killed [[GLOBAL_LOAD_DWORD_SADDR1]], 8, killed [[REG_SEQUENCE4]], 0, 0, 0, implicit $mode, implicit $exec + ; GFX11-REAL16-NEXT: GLOBAL_STORE_SHORT_SADDR_t16 killed [[V_MOV_B32_e32_]], killed [[V_FMA_MIX_F16_t16_]], killed [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (s16) into %ir.4, addrspace 1) + ; GFX11-REAL16-NEXT: S_ENDPGM 0 + ; + ; GFX11-FAKE16-LABEL: name: fma_mix_f16 + ; GFX11-FAKE16: bb.0.entry: + ; GFX11-FAKE16-NEXT: liveins: $vgpr0, $sgpr4_sgpr5 + ; GFX11-FAKE16-NEXT: {{ $}} + ; GFX11-FAKE16-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 + ; GFX11-FAKE16-NEXT: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 + ; GFX11-FAKE16-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s256) from %ir.a.kernarg.offset, align 4, addrspace 4) + ; GFX11-FAKE16-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub1 + ; GFX11-FAKE16-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub0 + ; GFX11-FAKE16-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY3]], %subreg.sub0, killed [[COPY2]], %subreg.sub1 + ; GFX11-FAKE16-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub3 + ; GFX11-FAKE16-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub2 + ; GFX11-FAKE16-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY5]], %subreg.sub0, killed [[COPY4]], %subreg.sub1 + ; GFX11-FAKE16-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub5 + ; GFX11-FAKE16-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub4 + ; GFX11-FAKE16-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY7]], %subreg.sub0, killed [[COPY6]], %subreg.sub1 + ; GFX11-FAKE16-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub7 + ; GFX11-FAKE16-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX8_IMM]].sub6 + ; GFX11-FAKE16-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec_xnull = REG_SEQUENCE killed [[COPY9]], %subreg.sub0, killed [[COPY8]], %subreg.sub1 + ; GFX11-FAKE16-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GFX11-FAKE16-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1023 + ; GFX11-FAKE16-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[COPY1]](s32), killed [[S_MOV_B32_]], implicit $exec + ; GFX11-FAKE16-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 2 + ; GFX11-FAKE16-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = nuw nsw V_LSHLREV_B32_e64 killed [[S_MOV_B32_1]], killed [[V_AND_B32_e64_]], implicit $exec + ; GFX11-FAKE16-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[REG_SEQUENCE]], [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s32) from %ir.in.gep1, addrspace 1) + ; GFX11-FAKE16-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[REG_SEQUENCE1]], [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s32) from %ir.in.gep2, addrspace 1) + ; GFX11-FAKE16-NEXT: [[GLOBAL_LOAD_USHORT_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_USHORT_SADDR killed [[REG_SEQUENCE2]], [[V_LSHLREV_B32_e64_]], 0, 0, implicit $exec :: (load (s16) from %ir.in.gep3, addrspace 1) + ; GFX11-FAKE16-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 14336 + ; GFX11-FAKE16-NEXT: [[V_ADD_F16_fake16_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F16_fake16_e64 0, killed [[GLOBAL_LOAD_USHORT_SADDR]], 0, killed [[S_MOV_B32_2]], 0, 0, implicit $mode, implicit $exec + ; GFX11-FAKE16-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; GFX11-FAKE16-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[DEF]] + ; GFX11-FAKE16-NEXT: [[V_FMA_MIXLO_F16_:%[0-9]+]]:vgpr_32 = nofpexcept V_FMA_MIXLO_F16 0, killed [[GLOBAL_LOAD_DWORD_SADDR]], 0, killed [[GLOBAL_LOAD_DWORD_SADDR1]], 8, killed [[V_ADD_F16_fake16_e64_]], 0, [[COPY10]], 0, 0, implicit $mode, implicit $exec + ; GFX11-FAKE16-NEXT: GLOBAL_STORE_SHORT_SADDR killed [[V_MOV_B32_e32_]], killed [[V_FMA_MIXLO_F16_]], killed [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (s16) into %ir.4, addrspace 1) + ; GFX11-FAKE16-NEXT: S_ENDPGM 0 +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %in.gep1 = getelementptr i32, ptr addrspace(1) %a, i32 %tid + %in.gep2 = getelementptr i32, ptr addrspace(1) %b, i32 %tid + %in.gep3 = getelementptr i32, ptr addrspace(1) %c, i32 %tid + %load.a = load float, ptr addrspace(1) %in.gep1 + %load.b = load float, ptr addrspace(1) %in.gep2 + %load.c = load half, ptr addrspace(1) %in.gep3 + %add.c = fadd half %load.c, 0.5 + %load.float.c = fpext half %add.c to float + %result = tail call float @llvm.fmuladd.f32(float %load.a, float %load.b, float %load.float.c) + %half = fptrunc float %result to half + store half %half, ptr addrspace(1) %out + ret void +} + diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll index ed48999e6d1e7..bd28f72bb8913 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll @@ -1,734 +1,759 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmax_legacy_ugt_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_max_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmax_legacy_ugt_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmax_legacy_ugt_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_max_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmax_legacy_ugt_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v2 +; SI-NEXT: v_max_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmax_legacy_ugt_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_max_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmax_legacy_ugt_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v3 +; SI-NEXT: v_max_f32_e32 v1, v1, v4 +; SI-NEXT: v_max_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmax_legacy_ugt_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_max_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmax_legacy_ugt_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v4 +; SI-NEXT: v_max_f32_e32 v1, v1, v5 +; SI-NEXT: v_max_f32_e32 v2, v2, v6 +; SI-NEXT: v_max_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmax_legacy_ugt_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_max_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_max_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_max_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_max_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_max_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_max_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_max_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_max_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_max_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_max_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmax_legacy_ugt_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v3, v3, v7 +; VI-NEXT: v_max_f16_e32 v2, v2, v6 +; VI-NEXT: v_max_f16_e32 v1, v1, v5 +; VI-NEXT: v_max_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v8 +; SI-NEXT: v_max_f32_e32 v1, v1, v9 +; SI-NEXT: v_max_f32_e32 v2, v2, v10 +; SI-NEXT: v_max_f32_e32 v3, v3, v11 +; SI-NEXT: v_max_f32_e32 v4, v4, v12 +; SI-NEXT: v_max_f32_e32 v5, v5, v13 +; SI-NEXT: v_max_f32_e32 v6, v6, v14 +; SI-NEXT: v_max_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll index eee2bd1b3725d..f3a84e6e45260 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -12,12 +10,10 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -34,18 +30,38 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp uge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] ; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] ; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] +; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -64,16 +80,40 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %o ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] +; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] +; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + + %cmp = fcmp uge float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -89,17 +129,35 @@ define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_oge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp oge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -115,16 +173,35 @@ define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ugt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ugt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -140,17 +217,35 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ogt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -166,23 +261,39 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32: -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 - -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE-NOT: v_cmp -; VI-SAFE-NOT: v_cndmask - -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 + +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI-NOT: v_cmp +; VI-NOT: v_cndmask ; GCN-NOT: v_max define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -199,6 +310,27 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32_fast: + +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 + +; GCN-NOT: v_max +define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll index 2ac5891773d73..37f077d53cf94 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll @@ -1,16 +1,12 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope --check-prefixes=GCN,VI-NNAN %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN %s ; GCN-LABEL: {{^}}min_fneg_select_regression_0: ; GCN-NOT: v_mul -; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 - -; VI-SAFE: v_cmp_nle_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, 1.0 @@ -18,15 +14,23 @@ define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}min_fneg_select_regression_0_fast: +; GCN-NOT: v_mul + +define amdgpu_ps float @min_fneg_select_regression_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0: ; GCN-NOT: v_mul ; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, -1.0 @@ -34,15 +38,24 @@ define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 +; VI: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +define amdgpu_ps float @min_fneg_select_regression_posk_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +; VI: v_cmp_nge_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, 1.0 @@ -50,15 +63,24 @@ define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-LABEL: {{^}}max_fneg_select_regression_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +define amdgpu_ps float @max_fneg_select_regression_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, -1.0 @@ -66,13 +88,22 @@ define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { ret float %min.a } +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0_fast: +; GCN-NOT: v_mul + +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +define amdgpu_ps float @max_fneg_select_regression_posk_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -1.0 @@ -80,13 +111,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1: ; SI: v_max_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -1.0 @@ -94,13 +133,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1_fast: + +; VI: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_lt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -1.0 @@ -108,13 +155,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1: ; SI: v_max_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -1.0 @@ -122,17 +177,24 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1_fast: + +; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nge_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -8.0 @@ -140,17 +202,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nle_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -8.0 @@ -158,17 +228,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_lt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -8.0 @@ -176,18 +254,26 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_gt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -8.0 @@ -195,13 +281,22 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1: ; SI: v_max_legacy_f32_e64 v0, -v0, -1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, -1.0 +; VI: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, 1.0 @@ -209,15 +304,22 @@ define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1_fast: + +; VI: v_max_f32_e64 v0, -v0, -1.0 +define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}ult_a_select_fneg_a_b: ; SI: v_cmp_nge_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_lt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nge_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, %b @@ -225,15 +327,23 @@ define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ult_a_select_fneg_a_b_fast: + +; VI: v_cmp_lt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ult_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ult float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + ; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b: ; SI: v_cmp_nle_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_gt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, %b @@ -241,5 +351,16 @@ define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b_fast: + +; VI: v_cmp_gt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ugt_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ugt float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll index 34cb0b1ba29b7..40c2ec0a39f51 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll @@ -1,735 +1,760 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmin_legacy_ule_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_min_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmin_legacy_ule_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmin_legacy_ule_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_min_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmin_legacy_ule_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v2 +; SI-NEXT: v_min_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmin_legacy_ule_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_min_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmin_legacy_ule_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v3 +; SI-NEXT: v_min_f32_e32 v1, v1, v4 +; SI-NEXT: v_min_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmin_legacy_ule_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_min_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmin_legacy_ule_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v4 +; SI-NEXT: v_min_f32_e32 v1, v1, v5 +; SI-NEXT: v_min_f32_e32 v2, v2, v6 +; SI-NEXT: v_min_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmin_legacy_ule_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_min_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_min_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_min_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_min_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_min_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_min_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_min_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_min_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_min_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_min_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmin_legacy_ule_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v3, v3, v7 +; VI-NEXT: v_min_f16_e32 v2, v2, v6 +; VI-NEXT: v_min_f16_e32 v1, v1, v5 +; VI-NEXT: v_min_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v8 +; SI-NEXT: v_min_f32_e32 v1, v1, v9 +; SI-NEXT: v_min_f32_e32 v2, v2, v10 +; SI-NEXT: v_min_f32_e32 v3, v3, v11 +; SI-NEXT: v_min_f32_e32 v4, v4, v12 +; SI-NEXT: v_min_f32_e32 v5, v5, v13 +; SI-NEXT: v_min_f32_e32 v6, v6, v14 +; SI-NEXT: v_min_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll index ec4dd858b92ea..defcffa641e64 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -14,13 +12,9 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32: ; EG: MIN * -; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; SI: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; VI: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) %out, <4 x float> %reg0) #0 { %r0 = extractelement <4 x float> %reg0, i32 0 %r1 = extractelement <4 x float> %reg0, i32 1 @@ -30,22 +24,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) ret void } -; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: -; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} +; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32_fast: -; SI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; SI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; GCN-NONAN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; VI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32_fast(ptr addrspace(1) %out, <4 x float> %reg0) #0 { + %r0 = extractelement <4 x float> %reg0, i32 0 + %r1 = extractelement <4 x float> %reg0, i32 1 + %r2 = fcmp nnan nsz uge float %r0, %r1 + %r3 = select nnan nsz i1 %r2, float %r1, float %r0 + store float %r3, ptr addrspace(1) %out + ret void +} -; VI-SAFE: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; SI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] +; VI: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] -; VI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +; VI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; VI: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, float %a, float %b) #0 { %cmp = fcmp ule float %a, %b %val = select i1 %cmp, float %a, float %b @@ -53,6 +57,19 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ret void } +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32_fast: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; Nsz also needed ; FIXME: Should separate tests ; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src: @@ -61,12 +78,10 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 ; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] - -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 { %a.nnan = fadd nnan float %a, 1.0 %b.nnan = fadd nnan float %b, 2.0 @@ -76,16 +91,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) ret void } +; Nsz also needed +; FIXME: Should separate tests +; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast: +; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 +; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + %cmp = fcmp ule float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -100,16 +131,33 @@ define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_le_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_le_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -124,16 +172,33 @@ define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ole_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ole float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -148,16 +213,33 @@ define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_olt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp olt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -172,16 +254,33 @@ define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp ult float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid @@ -196,19 +295,35 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32: ; GCN: {{buffer|flat}}_load_dwordx2 ; GCN: {{buffer|flat}}_load_dwordx2 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid @@ -223,25 +338,40 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32_fast: +; GCN: {{buffer|flat}}_load_dwordx2 +; GCN: {{buffer|flat}}_load_dwordx2 + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +define amdgpu_kernel void @test_fmin_legacy_ult_v2f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <2 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <2 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <2 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <2 x float> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b + store <2 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32: -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE-NOT: v_min_ - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI-NOT: v_min_ + +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 ; VI-NOT: v_cmp ; VI-NOT: v_cndmask - -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN-NOT: v_min_ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid @@ -256,6 +386,28 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32_fast: +; VI-NOT: v_cmp +; VI-NOT: v_cndmask + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN-NOT: v_min_ +define amdgpu_kernel void @test_fmin_legacy_ult_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll index 462d7748b86cd..b14e8c44ffcce 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll @@ -581,145 +581,63 @@ define { half, half } @v_fneg_add_multi_use_fneg_x_f16(half %a, half %b, half %c ret { half, half } %insert.1 } -; This one asserted with -enable-no-signed-zeros-fp-math -define amdgpu_ps half @fneg_fadd_0_f16(half inreg %tmp2, half inreg %tmp6, <4 x i32> %arg) #0 { -; SI-SAFE-LABEL: fneg_fadd_0_f16: -; SI-SAFE: ; %bb.0: ; %.entry -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, s1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, s0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_div_scale_f32 v2, s[0:1], v0, v0, 1.0 -; SI-SAFE-NEXT: v_rcp_f32_e32 v3, v2 -; SI-SAFE-NEXT: v_div_scale_f32 v4, vcc, 1.0, v0, 1.0 -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; SI-SAFE-NEXT: v_fma_f32 v5, -v2, v3, 1.0 -; SI-SAFE-NEXT: v_fma_f32 v3, v5, v3, v3 -; SI-SAFE-NEXT: v_mul_f32_e32 v5, v4, v3 -; SI-SAFE-NEXT: v_fma_f32 v6, -v2, v5, v4 -; SI-SAFE-NEXT: v_fma_f32 v5, v6, v3, v5 -; SI-SAFE-NEXT: v_fma_f32 v2, -v2, v5, v4 -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; SI-SAFE-NEXT: v_div_fmas_f32 v2, v2, v3, v5 -; SI-SAFE-NEXT: v_div_fixup_f32 v0, v2, v0, 1.0 -; SI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0 -; SI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, v0, v1 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; SI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-SAFE-NEXT: ; return to shader part epilog -; -; SI-NSZ-LABEL: fneg_fadd_0_f16: -; SI-NSZ: ; %bb.0: ; %.entry -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0 -; SI-NSZ-NEXT: v_cvt_f16_f32_e32 v0, s1 -; SI-NSZ-NEXT: v_cvt_f16_f32_e32 v1, s0 -; SI-NSZ-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NSZ-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NSZ-NEXT: v_div_scale_f32 v2, s[0:1], v0, v0, 1.0 -; SI-NSZ-NEXT: v_rcp_f32_e32 v3, v2 -; SI-NSZ-NEXT: v_div_scale_f32 v4, vcc, 1.0, v0, 1.0 -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; SI-NSZ-NEXT: v_fma_f32 v5, -v2, v3, 1.0 -; SI-NSZ-NEXT: v_fma_f32 v3, v5, v3, v3 -; SI-NSZ-NEXT: v_mul_f32_e32 v5, v4, v3 -; SI-NSZ-NEXT: v_fma_f32 v6, -v2, v5, v4 -; SI-NSZ-NEXT: v_fma_f32 v5, v6, v3, v5 -; SI-NSZ-NEXT: v_fma_f32 v2, -v2, v5, v4 -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; SI-NSZ-NEXT: v_div_fmas_f32 v2, v2, v3, v5 -; SI-NSZ-NEXT: v_div_fixup_f32 v0, v2, v0, 1.0 -; SI-NSZ-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 -; SI-NSZ-NEXT: v_cmp_nlt_f32_e64 vcc, -v0, v1 -; SI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-NSZ-NEXT: ; return to shader part epilog -; -; VI-SAFE-LABEL: fneg_fadd_0_f16: -; VI-SAFE: ; %bb.0: ; %.entry -; VI-SAFE-NEXT: v_rcp_f16_e32 v0, s1 -; VI-SAFE-NEXT: v_mov_b32_e32 v1, s0 -; VI-SAFE-NEXT: v_mul_f16_e32 v0, 0, v0 -; VI-SAFE-NEXT: v_add_f16_e32 v0, 0, v0 -; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x8000, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, s0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc -; VI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7e00 -; VI-SAFE-NEXT: v_cmp_nlt_f16_e32 vcc, 0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-SAFE-NEXT: ; return to shader part epilog -; -; VI-NSZ-LABEL: fneg_fadd_0_f16: -; VI-NSZ: ; %bb.0: ; %.entry -; VI-NSZ-NEXT: v_rcp_f16_e32 v0, s1 -; VI-NSZ-NEXT: v_mov_b32_e32 v1, s0 -; VI-NSZ-NEXT: v_mul_f16_e32 v0, 0x8000, v0 -; VI-NSZ-NEXT: v_cmp_nlt_f16_e64 vcc, -v0, s0 -; VI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; VI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7e00 -; VI-NSZ-NEXT: v_cmp_nlt_f16_e32 vcc, 0, v0 -; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-NSZ-NEXT: ; return to shader part epilog -; -; GFX11-SAFE-LABEL: fneg_fadd_0_f16: -; GFX11-SAFE: ; %bb.0: ; %.entry -; GFX11-SAFE-NEXT: v_rcp_f16_e32 v0, s1 -; GFX11-SAFE-NEXT: s_waitcnt_depctr 0xfff -; GFX11-SAFE-NEXT: v_mul_f16_e32 v0, 0, v0 -; GFX11-SAFE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-SAFE-NEXT: v_add_f16_e32 v0, 0, v0 -; GFX11-SAFE-NEXT: v_xor_b32_e32 v1, 0x8000, v0 -; GFX11-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc_lo, s0, v0 -; GFX11-SAFE-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, s0, vcc_lo -; GFX11-SAFE-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0 -; GFX11-SAFE-NEXT: v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo -; GFX11-SAFE-NEXT: ; return to shader part epilog -; -; GFX11-NSZ-LABEL: fneg_fadd_0_f16: -; GFX11-NSZ: ; %bb.0: ; %.entry -; GFX11-NSZ-NEXT: v_rcp_f16_e32 v0, s1 -; GFX11-NSZ-NEXT: s_waitcnt_depctr 0xfff -; GFX11-NSZ-NEXT: v_mul_f16_e32 v0, 0x8000, v0 -; GFX11-NSZ-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NSZ-NEXT: v_cmp_nlt_f16_e64 s1, -v0, s0 -; GFX11-NSZ-NEXT: v_cndmask_b32_e64 v0, v0, s0, s1 -; GFX11-NSZ-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NSZ-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0 -; GFX11-NSZ-NEXT: v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo -; GFX11-NSZ-NEXT: ; return to shader part epilog -; GFX11-SAFE-TRUE16-LABEL: fneg_fadd_0_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: ; %.entry -; GFX11-SAFE-TRUE16-NEXT: v_rcp_f16_e32 v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-SAFE-TRUE16-NEXT: v_mul_f16_e32 v0.l, 0, v0.l -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-SAFE-TRUE16-NEXT: v_add_f16_e32 v0.l, 0, v0.l -; GFX11-SAFE-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, s0, v0.l -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-SAFE-TRUE16-NEXT: v_xor_b32_e32 v0, 0x8000, v1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v0/*Invalid register, operand has 'VS_16' register class*/, s0, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, 0, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: ; return to shader part epilog -; GFX11-NSZ-TRUE16-LABEL: fneg_fadd_0_f16: -; GFX11-NSZ-TRUE16: ; %bb.0: ; %.entry -; GFX11-NSZ-TRUE16-NEXT: v_rcp_f16_e32 v0.l, s1 -; GFX11-NSZ-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-NSZ-TRUE16-NEXT: v_mul_f16_e32 v0.l, 0x8000, v0.l -; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NSZ-TRUE16-NEXT: v_cmp_nlt_f16_e64 s1, -v0.l, s0 -; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, s0, s1 -; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NSZ-TRUE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0.l -; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, 0, vcc_lo -; GFX11-NSZ-TRUE16-NEXT: ; return to shader part epilog +define amdgpu_ps half @fneg_fadd_0_safe_f16(half inreg %tmp2, half inreg %tmp6, <4 x i32> %arg) #0 { +; SI-LABEL: fneg_fadd_0_safe_f16: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, s0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_div_scale_f32 v2, s[0:1], v0, v0, 1.0 +; SI-NEXT: v_rcp_f32_e32 v3, v2 +; SI-NEXT: v_div_scale_f32 v4, vcc, 1.0, v0, 1.0 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 +; SI-NEXT: v_fma_f32 v5, -v2, v3, 1.0 +; SI-NEXT: v_fma_f32 v3, v5, v3, v3 +; SI-NEXT: v_mul_f32_e32 v5, v4, v3 +; SI-NEXT: v_fma_f32 v6, -v2, v5, v4 +; SI-NEXT: v_fma_f32 v5, v6, v3, v5 +; SI-NEXT: v_fma_f32 v2, -v2, v5, v4 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 +; SI-NEXT: v_div_fmas_f32 v2, v2, v3, v5 +; SI-NEXT: v_div_fixup_f32 v0, v2, v0, 1.0 +; SI-NEXT: v_mad_f32 v0, v0, 0, 0 +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, v0, v1 +; SI-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc +; SI-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; SI-NEXT: ; return to shader part epilog +; +; VI-LABEL: fneg_fadd_0_safe_f16: +; VI: ; %bb.0: ; %.entry +; VI-NEXT: v_rcp_f16_e32 v0, s1 +; VI-NEXT: v_mov_b32_e32 v1, s0 +; VI-NEXT: v_mul_f16_e32 v0, 0, v0 +; VI-NEXT: v_add_f16_e32 v0, 0, v0 +; VI-NEXT: v_xor_b32_e32 v2, 0x8000, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, s0, v0 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc +; VI-NEXT: v_mov_b32_e32 v1, 0x7e00 +; VI-NEXT: v_cmp_nlt_f16_e32 vcc, 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; VI-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: fneg_fadd_0_safe_f16: +; GFX11: ; %bb.0: ; %.entry +; GFX11-NEXT: v_rcp_f16_e32 v0, s1 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_f16_e32 v0, 0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_add_f16_e32 v0, 0, v0 +; GFX11-NEXT: v_xor_b32_e32 v1, 0x8000, v0 +; GFX11-NEXT: v_cmp_ngt_f16_e32 vcc_lo, s0, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_cndmask_b32_e64 v0, v1, s0, vcc_lo +; GFX11-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo +; GFX11-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv half 1.000000e+00, %tmp6 %tmp8 = fmul half 0.000000e+00, %tmp7 @@ -733,108 +651,51 @@ define amdgpu_ps half @fneg_fadd_0_f16(half inreg %tmp2, half inreg %tmp6, <4 x ret half %.i198 } -; This is a workaround because -enable-no-signed-zeros-fp-math does not set up -; function attribute unsafe-fp-math automatically. Combine with the previous test -; when that is done. define amdgpu_ps half @fneg_fadd_0_nsz_f16(half inreg %tmp2, half inreg %tmp6, <4 x i32> %arg) #2 { -; SI-SAFE-LABEL: fneg_fadd_0_nsz_f16: -; SI-SAFE: ; %bb.0: ; %.entry -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, s0 -; SI-SAFE-NEXT: s_brev_b32 s0, 1 -; SI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, 0, v0 -; SI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-SAFE-NEXT: ; return to shader part epilog -; -; SI-NSZ-LABEL: fneg_fadd_0_nsz_f16: -; SI-NSZ: ; %bb.0: ; %.entry -; SI-NSZ-NEXT: v_cvt_f16_f32_e32 v0, s1 -; SI-NSZ-NEXT: v_cvt_f16_f32_e32 v1, s0 -; SI-NSZ-NEXT: v_mov_b32_e32 v2, 0x7fc00000 -; SI-NSZ-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NSZ-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NSZ-NEXT: v_rcp_f32_e32 v0, v0 -; SI-NSZ-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 -; SI-NSZ-NEXT: v_cmp_nlt_f32_e64 vcc, -v0, v1 -; SI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; SI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc -; SI-NSZ-NEXT: ; return to shader part epilog -; -; VI-SAFE-LABEL: fneg_fadd_0_nsz_f16: -; VI-SAFE: ; %bb.0: ; %.entry -; VI-SAFE-NEXT: v_mov_b32_e32 v0, 0x8000 -; VI-SAFE-NEXT: v_mov_b32_e32 v1, s0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e64 vcc, s0, 0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; VI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7e00 -; VI-SAFE-NEXT: v_cmp_nlt_f16_e32 vcc, 0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-SAFE-NEXT: ; return to shader part epilog -; -; VI-NSZ-LABEL: fneg_fadd_0_nsz_f16: -; VI-NSZ: ; %bb.0: ; %.entry -; VI-NSZ-NEXT: v_rcp_f16_e32 v0, s1 -; VI-NSZ-NEXT: v_mov_b32_e32 v1, s0 -; VI-NSZ-NEXT: v_mul_f16_e32 v0, 0x8000, v0 -; VI-NSZ-NEXT: v_cmp_nlt_f16_e64 vcc, -v0, s0 -; VI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; VI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7e00 -; VI-NSZ-NEXT: v_cmp_nlt_f16_e32 vcc, 0, v0 -; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-NSZ-NEXT: ; return to shader part epilog -; -; GFX11-SAFE-LABEL: fneg_fadd_0_nsz_f16: -; GFX11-SAFE: ; %bb.0: ; %.entry -; GFX11-SAFE-NEXT: v_mov_b32_e32 v0, s0 -; GFX11-SAFE-NEXT: v_cmp_ngt_f16_e64 vcc_lo, s0, 0 -; GFX11-SAFE-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-SAFE-NEXT: v_cndmask_b32_e32 v0, 0x8000, v0, vcc_lo -; GFX11-SAFE-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0 -; GFX11-SAFE-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11-SAFE-NEXT: v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo -; GFX11-SAFE-NEXT: ; return to shader part epilog -; -; GFX11-NSZ-LABEL: fneg_fadd_0_nsz_f16: -; GFX11-NSZ: ; %bb.0: ; %.entry -; GFX11-NSZ-NEXT: v_rcp_f16_e32 v0, s1 -; GFX11-NSZ-NEXT: s_waitcnt_depctr 0xfff -; GFX11-NSZ-NEXT: v_mul_f16_e32 v0, 0x8000, v0 -; GFX11-NSZ-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NSZ-NEXT: v_cmp_nlt_f16_e64 s1, -v0, s0 -; GFX11-NSZ-NEXT: v_cndmask_b32_e64 v0, v0, s0, s1 -; GFX11-NSZ-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NSZ-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0 -; GFX11-NSZ-NEXT: v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo -; GFX11-NSZ-NEXT: ; return to shader part epilog -; GFX11-SAFE-TRUE16-LABEL: fneg_fadd_0_nsz_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: ; %.entry -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, s0, 0 -; GFX11-SAFE-TRUE16-NEXT: v_mov_b16_e32 v0.l, 0x8000 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, s0, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, 0, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: ; return to shader part epilog -; GFX11-NSZ-TRUE16-LABEL: fneg_fadd_0_nsz_f16: -; GFX11-NSZ-TRUE16: ; %bb.0: ; %.entry -; GFX11-NSZ-TRUE16-NEXT: v_rcp_f16_e32 v0.l, s1 -; GFX11-NSZ-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-NSZ-TRUE16-NEXT: v_mul_f16_e32 v0.l, 0x8000, v0.l -; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NSZ-TRUE16-NEXT: v_cmp_nlt_f16_e64 s1, -v0.l, s0 -; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, v0.l, s0, s1 -; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NSZ-TRUE16-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0.l -; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x7e00, 0, vcc_lo -; GFX11-NSZ-TRUE16-NEXT: ; return to shader part epilog +; SI-LABEL: fneg_fadd_0_nsz_f16: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_cvt_f16_f32_e32 v0, s1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, s0 +; SI-NEXT: v_mov_b32_e32 v2, 0x7fc00000 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_rcp_f32_e32 v0, v0 +; SI-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 +; SI-NEXT: v_cmp_nlt_f32_e64 vcc, -v0, v1 +; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc +; SI-NEXT: ; return to shader part epilog +; +; VI-LABEL: fneg_fadd_0_nsz_f16: +; VI: ; %bb.0: ; %.entry +; VI-NEXT: v_rcp_f16_e32 v0, s1 +; VI-NEXT: v_mov_b32_e32 v1, s0 +; VI-NEXT: v_mul_f16_e32 v0, 0x8000, v0 +; VI-NEXT: v_cmp_nlt_f16_e64 vcc, -v0, s0 +; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; VI-NEXT: v_mov_b32_e32 v1, 0x7e00 +; VI-NEXT: v_cmp_nlt_f16_e32 vcc, 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; VI-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: fneg_fadd_0_nsz_f16: +; GFX11: ; %bb.0: ; %.entry +; GFX11-NEXT: v_rcp_f16_e32 v0, s1 +; GFX11-NEXT: s_waitcnt_depctr 0xfff +; GFX11-NEXT: v_mul_f16_e32 v0, 0x8000, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_cmp_nlt_f16_e64 s1, -v0, s0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s0, s1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_nlt_f16_e32 vcc_lo, 0, v0 +; GFX11-NEXT: v_cndmask_b32_e64 v0, 0x7e00, 0, vcc_lo +; GFX11-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv afn half 1.000000e+00, %tmp6 %tmp8 = fmul contract half 0.000000e+00, %tmp7 %tmp9 = fmul reassoc nnan arcp contract half 0.000000e+00, %tmp8 - %.i188 = fadd nnan ninf contract half %tmp9, 0.000000e+00 + %.i188 = fadd nsz half %tmp9, 0.000000e+00 %tmp10 = fcmp uge half %.i188, %tmp2 %tmp11 = fneg half %.i188 %.i092 = select i1 %tmp10, half %tmp2, half %tmp11 diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll index ba34e9245f39c..aaea4f76ea49b 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll @@ -880,102 +880,54 @@ define amdgpu_kernel void @v_fneg_add_multi_use_fneg_x_f32(ptr addrspace(1) %out } ; This one asserted with -enable-no-signed-zeros-fp-math -define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #0 { -; SI-SAFE-LABEL: fneg_fadd_0: -; SI-SAFE: ; %bb.0: ; %.entry -; SI-SAFE-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; SI-SAFE-NEXT: v_rcp_f32_e32 v1, v0 -; SI-SAFE-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0 -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; SI-SAFE-NEXT: v_fma_f32 v3, -v0, v1, 1.0 -; SI-SAFE-NEXT: v_fma_f32 v1, v3, v1, v1 -; SI-SAFE-NEXT: v_mul_f32_e32 v3, v2, v1 -; SI-SAFE-NEXT: v_fma_f32 v4, -v0, v3, v2 -; SI-SAFE-NEXT: v_fma_f32 v3, v4, v1, v3 -; SI-SAFE-NEXT: v_fma_f32 v0, -v0, v3, v2 -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; SI-SAFE-NEXT: v_div_fmas_f32 v0, v0, v1, v3 -; SI-SAFE-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; SI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0 -; SI-SAFE-NEXT: v_mov_b32_e32 v1, s0 -; SI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; SI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-SAFE-NEXT: ; return to shader part epilog -; -; SI-NSZ-LABEL: fneg_fadd_0: -; SI-NSZ: ; %bb.0: ; %.entry -; SI-NSZ-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; SI-NSZ-NEXT: v_rcp_f32_e32 v1, v0 -; SI-NSZ-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0 -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; SI-NSZ-NEXT: v_fma_f32 v3, -v0, v1, 1.0 -; SI-NSZ-NEXT: v_fma_f32 v1, v3, v1, v1 -; SI-NSZ-NEXT: v_mul_f32_e32 v3, v2, v1 -; SI-NSZ-NEXT: v_fma_f32 v4, -v0, v3, v2 -; SI-NSZ-NEXT: v_fma_f32 v3, v4, v1, v3 -; SI-NSZ-NEXT: v_fma_f32 v0, -v0, v3, v2 -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; SI-NSZ-NEXT: v_div_fmas_f32 v0, v0, v1, v3 -; SI-NSZ-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; SI-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0 -; SI-NSZ-NEXT: v_mov_b32_e32 v1, s0 -; SI-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; SI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-NSZ-NEXT: ; return to shader part epilog -; -; VI-SAFE-LABEL: fneg_fadd_0: -; VI-SAFE: ; %bb.0: ; %.entry -; VI-SAFE-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; VI-SAFE-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0 -; VI-SAFE-NEXT: v_rcp_f32_e32 v2, v0 -; VI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; VI-SAFE-NEXT: v_fma_f32 v3, -v0, v2, 1.0 -; VI-SAFE-NEXT: v_fma_f32 v2, v3, v2, v2 -; VI-SAFE-NEXT: v_mul_f32_e32 v3, v1, v2 -; VI-SAFE-NEXT: v_fma_f32 v4, -v0, v3, v1 -; VI-SAFE-NEXT: v_fma_f32 v3, v4, v2, v3 -; VI-SAFE-NEXT: v_fma_f32 v0, -v0, v3, v1 -; VI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; VI-SAFE-NEXT: v_div_fmas_f32 v0, v0, v2, v3 -; VI-SAFE-NEXT: v_mov_b32_e32 v2, s0 -; VI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; VI-SAFE-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; VI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0 -; VI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc -; VI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-SAFE-NEXT: ; return to shader part epilog -; -; VI-NSZ-LABEL: fneg_fadd_0: -; VI-NSZ: ; %bb.0: ; %.entry -; VI-NSZ-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; VI-NSZ-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0 -; VI-NSZ-NEXT: v_rcp_f32_e32 v2, v0 -; VI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; VI-NSZ-NEXT: v_fma_f32 v3, -v0, v2, 1.0 -; VI-NSZ-NEXT: v_fma_f32 v2, v3, v2, v2 -; VI-NSZ-NEXT: v_mul_f32_e32 v3, v1, v2 -; VI-NSZ-NEXT: v_fma_f32 v4, -v0, v3, v1 -; VI-NSZ-NEXT: v_fma_f32 v3, v4, v2, v3 -; VI-NSZ-NEXT: v_fma_f32 v0, -v0, v3, v1 -; VI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; VI-NSZ-NEXT: v_div_fmas_f32 v0, v0, v2, v3 -; VI-NSZ-NEXT: v_mov_b32_e32 v2, s0 -; VI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; VI-NSZ-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; VI-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0 -; VI-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc -; VI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-NSZ-NEXT: ; return to shader part epilog +define amdgpu_ps float @fneg_fadd_0_safe(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #0 { +; SI-LABEL: fneg_fadd_0_safe: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 +; SI-NEXT: v_rcp_f32_e32 v1, v0 +; SI-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 +; SI-NEXT: v_fma_f32 v3, -v0, v1, 1.0 +; SI-NEXT: v_fma_f32 v1, v3, v1, v1 +; SI-NEXT: v_mul_f32_e32 v3, v2, v1 +; SI-NEXT: v_fma_f32 v4, -v0, v3, v2 +; SI-NEXT: v_fma_f32 v3, v4, v1, v3 +; SI-NEXT: v_fma_f32 v0, -v0, v3, v2 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 +; SI-NEXT: v_div_fmas_f32 v0, v0, v1, v3 +; SI-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 +; SI-NEXT: v_mad_f32 v0, v0, 0, 0 +; SI-NEXT: v_mov_b32_e32 v1, s0 +; SI-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc +; SI-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; SI-NEXT: ; return to shader part epilog +; +; VI-LABEL: fneg_fadd_0_safe: +; VI: ; %bb.0: ; %.entry +; VI-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 +; VI-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0 +; VI-NEXT: v_rcp_f32_e32 v2, v0 +; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 +; VI-NEXT: v_fma_f32 v3, -v0, v2, 1.0 +; VI-NEXT: v_fma_f32 v2, v3, v2, v2 +; VI-NEXT: v_mul_f32_e32 v3, v1, v2 +; VI-NEXT: v_fma_f32 v4, -v0, v3, v1 +; VI-NEXT: v_fma_f32 v3, v4, v2, v3 +; VI-NEXT: v_fma_f32 v0, -v0, v3, v1 +; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 +; VI-NEXT: v_div_fmas_f32 v0, v0, v2, v3 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; VI-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 +; VI-NEXT: v_mad_f32 v0, v0, 0, 0 +; VI-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc +; VI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; VI-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv float 1.000000e+00, %tmp6 %tmp8 = fmul float 0.000000e+00, %tmp7 @@ -989,39 +941,23 @@ define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i ret float %.i198 } -; This is a workaround because -enable-no-signed-zeros-fp-math does not set up -; function attribute unsafe-fp-math automatically. Combine with the previous test -; when that is done. -define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #2 { -; GCN-SAFE-LABEL: fneg_fadd_0_nsz: -; GCN-SAFE: ; %bb.0: ; %.entry -; GCN-SAFE-NEXT: v_rcp_f32_e32 v0, s1 -; GCN-SAFE-NEXT: v_mov_b32_e32 v1, s0 -; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0 -; GCN-SAFE-NEXT: v_add_f32_e32 v0, 0, v0 -; GCN-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; GCN-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; GCN-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; GCN-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; GCN-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; GCN-SAFE-NEXT: ; return to shader part epilog -; -; GCN-NSZ-LABEL: fneg_fadd_0_nsz: -; GCN-NSZ: ; %bb.0: ; %.entry -; GCN-NSZ-NEXT: v_rcp_f32_e32 v0, s1 -; GCN-NSZ-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0 -; GCN-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; GCN-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; GCN-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; GCN-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; GCN-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; GCN-NSZ-NEXT: ; return to shader part epilog +define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr { +; GCN-LABEL: fneg_fadd_0_nsz: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: v_rcp_f32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: v_mul_f32_e32 v0, 0, v0 +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc +; GCN-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; GCN-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv afn float 1.000000e+00, %tmp6 %tmp8 = fmul float 0.000000e+00, %tmp7 %tmp9 = fmul reassoc nnan arcp contract float 0.000000e+00, %tmp8 - %.i188 = fadd float %tmp9, 0.000000e+00 + %.i188 = fadd nsz float %tmp9, 0.000000e+00 %tmp10 = fcmp uge float %.i188, %tmp2 %tmp11 = fneg float %.i188 %.i092 = select i1 %tmp10, float %tmp2, float %tmp11 @@ -5079,7 +5015,7 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i64 %tid.ext %a = load volatile double, ptr addrspace(1) %a.gep - %fneg.a = fsub double -0.000000e+00, %a + %fneg.a = fsub nsz double -0.000000e+00, %a %fpround = fptrunc double %fneg.a to float %fneg = fneg float %fpround store float %fneg, ptr addrspace(1) %out.gep @@ -8072,3 +8008,6 @@ attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" attributes #1 = { nounwind readnone } attributes #2 = { nounwind "unsafe-fp-math"="true" } attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GCN-NSZ: {{.*}} +; GCN-SAFE: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll index e687745469014..3de6df211ac7c 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll @@ -175,103 +175,54 @@ define { float, float } @v_fneg_add_multi_use_fneg_x_f32(float %a, float %b, flo ret { float, float } %insert.1 } -; This one asserted with -enable-no-signed-zeros-fp-math -define amdgpu_ps float @fneg_fadd_0_f32(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) #0 { -; SI-SAFE-LABEL: fneg_fadd_0_f32: -; SI-SAFE: ; %bb.0: ; %.entry -; SI-SAFE-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; SI-SAFE-NEXT: v_rcp_f32_e32 v1, v0 -; SI-SAFE-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0 -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; SI-SAFE-NEXT: v_fma_f32 v3, -v0, v1, 1.0 -; SI-SAFE-NEXT: v_fma_f32 v1, v3, v1, v1 -; SI-SAFE-NEXT: v_mul_f32_e32 v3, v2, v1 -; SI-SAFE-NEXT: v_fma_f32 v4, -v0, v3, v2 -; SI-SAFE-NEXT: v_fma_f32 v3, v4, v1, v3 -; SI-SAFE-NEXT: v_fma_f32 v0, -v0, v3, v2 -; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; SI-SAFE-NEXT: v_div_fmas_f32 v0, v0, v1, v3 -; SI-SAFE-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; SI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0 -; SI-SAFE-NEXT: v_mov_b32_e32 v1, s0 -; SI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; SI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-SAFE-NEXT: ; return to shader part epilog -; -; SI-NSZ-LABEL: fneg_fadd_0_f32: -; SI-NSZ: ; %bb.0: ; %.entry -; SI-NSZ-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; SI-NSZ-NEXT: v_rcp_f32_e32 v1, v0 -; SI-NSZ-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0 -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; SI-NSZ-NEXT: v_fma_f32 v3, -v0, v1, 1.0 -; SI-NSZ-NEXT: v_fma_f32 v1, v3, v1, v1 -; SI-NSZ-NEXT: v_mul_f32_e32 v3, v2, v1 -; SI-NSZ-NEXT: v_fma_f32 v4, -v0, v3, v2 -; SI-NSZ-NEXT: v_fma_f32 v3, v4, v1, v3 -; SI-NSZ-NEXT: v_fma_f32 v0, -v0, v3, v2 -; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; SI-NSZ-NEXT: v_div_fmas_f32 v0, v0, v1, v3 -; SI-NSZ-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; SI-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0 -; SI-NSZ-NEXT: v_mov_b32_e32 v1, s0 -; SI-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; SI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; SI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; SI-NSZ-NEXT: ; return to shader part epilog -; -; VI-SAFE-LABEL: fneg_fadd_0_f32: -; VI-SAFE: ; %bb.0: ; %.entry -; VI-SAFE-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; VI-SAFE-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0 -; VI-SAFE-NEXT: v_rcp_f32_e32 v2, v0 -; VI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; VI-SAFE-NEXT: v_fma_f32 v3, -v0, v2, 1.0 -; VI-SAFE-NEXT: v_fma_f32 v2, v3, v2, v2 -; VI-SAFE-NEXT: v_mul_f32_e32 v3, v1, v2 -; VI-SAFE-NEXT: v_fma_f32 v4, -v0, v3, v1 -; VI-SAFE-NEXT: v_fma_f32 v3, v4, v2, v3 -; VI-SAFE-NEXT: v_fma_f32 v0, -v0, v3, v1 -; VI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; VI-SAFE-NEXT: v_div_fmas_f32 v0, v0, v2, v3 -; VI-SAFE-NEXT: v_mov_b32_e32 v2, s0 -; VI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; VI-SAFE-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; VI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0 -; VI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc -; VI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-SAFE-NEXT: ; return to shader part epilog +define amdgpu_ps float @fneg_fadd_0_safe_f32(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) #0 { +; SI-LABEL: fneg_fadd_0_safe_f32: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 +; SI-NEXT: v_rcp_f32_e32 v1, v0 +; SI-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 +; SI-NEXT: v_fma_f32 v3, -v0, v1, 1.0 +; SI-NEXT: v_fma_f32 v1, v3, v1, v1 +; SI-NEXT: v_mul_f32_e32 v3, v2, v1 +; SI-NEXT: v_fma_f32 v4, -v0, v3, v2 +; SI-NEXT: v_fma_f32 v3, v4, v1, v3 +; SI-NEXT: v_fma_f32 v0, -v0, v3, v2 +; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 +; SI-NEXT: v_div_fmas_f32 v0, v0, v1, v3 +; SI-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 +; SI-NEXT: v_mad_f32 v0, v0, 0, 0 +; SI-NEXT: v_mov_b32_e32 v1, s0 +; SI-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc +; SI-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; SI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; SI-NEXT: ; return to shader part epilog ; -; VI-NSZ-LABEL: fneg_fadd_0_f32: -; VI-NSZ: ; %bb.0: ; %.entry -; VI-NSZ-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 -; VI-NSZ-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0 -; VI-NSZ-NEXT: v_rcp_f32_e32 v2, v0 -; VI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 -; VI-NSZ-NEXT: v_fma_f32 v3, -v0, v2, 1.0 -; VI-NSZ-NEXT: v_fma_f32 v2, v3, v2, v2 -; VI-NSZ-NEXT: v_mul_f32_e32 v3, v1, v2 -; VI-NSZ-NEXT: v_fma_f32 v4, -v0, v3, v1 -; VI-NSZ-NEXT: v_fma_f32 v3, v4, v2, v3 -; VI-NSZ-NEXT: v_fma_f32 v0, -v0, v3, v1 -; VI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 -; VI-NSZ-NEXT: v_div_fmas_f32 v0, v0, v2, v3 -; VI-NSZ-NEXT: v_mov_b32_e32 v2, s0 -; VI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; VI-NSZ-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 -; VI-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0 -; VI-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc -; VI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; VI-NSZ-NEXT: ; return to shader part epilog +; VI-LABEL: fneg_fadd_0_safe_f32: +; VI: ; %bb.0: ; %.entry +; VI-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0 +; VI-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0 +; VI-NEXT: v_rcp_f32_e32 v2, v0 +; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3 +; VI-NEXT: v_fma_f32 v3, -v0, v2, 1.0 +; VI-NEXT: v_fma_f32 v2, v3, v2, v2 +; VI-NEXT: v_mul_f32_e32 v3, v1, v2 +; VI-NEXT: v_fma_f32 v4, -v0, v3, v1 +; VI-NEXT: v_fma_f32 v3, v4, v2, v3 +; VI-NEXT: v_fma_f32 v0, -v0, v3, v1 +; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0 +; VI-NEXT: v_div_fmas_f32 v0, v0, v2, v3 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; VI-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0 +; VI-NEXT: v_mad_f32 v0, v0, 0, 0 +; VI-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc +; VI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; VI-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv float 1.000000e+00, %tmp6 %tmp8 = fmul float 0.000000e+00, %tmp7 @@ -289,35 +240,22 @@ define amdgpu_ps float @fneg_fadd_0_f32(float inreg %tmp2, float inreg %tmp6, <4 ; function attribute unsafe-fp-math automatically. Combine with the previous test ; when that is done. define amdgpu_ps float @fneg_fadd_0_nsz_f32(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) #2 { -; GCN-SAFE-LABEL: fneg_fadd_0_nsz_f32: -; GCN-SAFE: ; %bb.0: ; %.entry -; GCN-SAFE-NEXT: v_rcp_f32_e32 v0, s1 -; GCN-SAFE-NEXT: v_mov_b32_e32 v1, s0 -; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0 -; GCN-SAFE-NEXT: v_add_f32_e32 v0, 0, v0 -; GCN-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; GCN-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; GCN-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; GCN-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; GCN-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; GCN-SAFE-NEXT: ; return to shader part epilog -; -; GCN-NSZ-LABEL: fneg_fadd_0_nsz_f32: -; GCN-NSZ: ; %bb.0: ; %.entry -; GCN-NSZ-NEXT: v_rcp_f32_e32 v0, s1 -; GCN-NSZ-NEXT: v_mov_b32_e32 v1, s0 -; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0 -; GCN-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 -; GCN-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc -; GCN-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000 -; GCN-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 -; GCN-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc -; GCN-NSZ-NEXT: ; return to shader part epilog +; GCN-LABEL: fneg_fadd_0_nsz_f32: +; GCN: ; %bb.0: ; %.entry +; GCN-NEXT: v_rcp_f32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: v_mul_f32_e32 v0, 0, v0 +; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc +; GCN-NEXT: v_mov_b32_e32 v1, 0x7fc00000 +; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc +; GCN-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv afn float 1.000000e+00, %tmp6 %tmp8 = fmul float 0.000000e+00, %tmp7 %tmp9 = fmul reassoc nnan arcp contract float 0.000000e+00, %tmp8 - %.i188 = fadd float %tmp9, 0.000000e+00 + %.i188 = fadd nsz float %tmp9, 0.000000e+00 %tmp10 = fcmp uge float %.i188, %tmp2 %tmp11 = fneg float %.i188 %.i092 = select i1 %tmp10, float %tmp2, float %tmp11 @@ -569,8 +507,6 @@ define amdgpu_ps double @fneg_fadd_0_f64(double inreg %tmp2, double inreg %tmp6, ; SI-NSZ-LABEL: fneg_fadd_0_f64: ; SI-NSZ: ; %bb.0: ; %.entry ; SI-NSZ-NEXT: v_div_scale_f64 v[0:1], s[4:5], s[2:3], s[2:3], 1.0 -; SI-NSZ-NEXT: s_mov_b32 s4, 0 -; SI-NSZ-NEXT: s_brev_b32 s5, 1 ; SI-NSZ-NEXT: v_rcp_f64_e32 v[2:3], v[0:1] ; SI-NSZ-NEXT: v_fma_f64 v[4:5], -v[0:1], v[2:3], 1.0 ; SI-NSZ-NEXT: v_fma_f64 v[2:3], v[2:3], v[4:5], v[2:3] @@ -583,7 +519,10 @@ define amdgpu_ps double @fneg_fadd_0_f64(double inreg %tmp2, double inreg %tmp6, ; SI-NSZ-NEXT: v_mov_b32_e32 v2, s1 ; SI-NSZ-NEXT: v_mov_b32_e32 v3, s0 ; SI-NSZ-NEXT: v_div_fixup_f64 v[0:1], v[0:1], s[2:3], 1.0 -; SI-NSZ-NEXT: v_mul_f64 v[0:1], v[0:1], s[4:5] +; SI-NSZ-NEXT: s_mov_b32 s2, 0 +; SI-NSZ-NEXT: v_mul_f64 v[0:1], v[0:1], 0 +; SI-NSZ-NEXT: s_brev_b32 s3, 1 +; SI-NSZ-NEXT: v_fma_f64 v[0:1], v[0:1], s[2:3], s[2:3] ; SI-NSZ-NEXT: v_cmp_nlt_f64_e64 vcc, -v[0:1], s[0:1] ; SI-NSZ-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; SI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc @@ -637,7 +576,8 @@ define amdgpu_ps double @fneg_fadd_0_f64(double inreg %tmp2, double inreg %tmp6, ; VI-NSZ-NEXT: v_div_fixup_f64 v[0:1], v[0:1], s[2:3], 1.0 ; VI-NSZ-NEXT: s_mov_b32 s2, 0 ; VI-NSZ-NEXT: s_brev_b32 s3, 1 -; VI-NSZ-NEXT: v_mul_f64 v[0:1], v[0:1], s[2:3] +; VI-NSZ-NEXT: v_mul_f64 v[0:1], v[0:1], 0 +; VI-NSZ-NEXT: v_fma_f64 v[0:1], v[0:1], s[2:3], s[2:3] ; VI-NSZ-NEXT: v_cmp_nlt_f64_e64 vcc, -v[0:1], s[0:1] ; VI-NSZ-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; VI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc @@ -663,102 +603,56 @@ define amdgpu_ps double @fneg_fadd_0_f64(double inreg %tmp2, double inreg %tmp6, ; function attribute unsafe-fp-math automatically. Combine with the previous test ; when that is done. define amdgpu_ps double @fneg_fadd_0_nsz_f64(double inreg %tmp2, double inreg %tmp6, <4 x i32> %arg) #2 { -; SI-SAFE-LABEL: fneg_fadd_0_nsz_f64: -; SI-SAFE: ; %bb.0: ; %.entry -; SI-SAFE-NEXT: v_rcp_f64_e32 v[0:1], s[2:3] -; SI-SAFE-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; SI-SAFE-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; SI-SAFE-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; SI-SAFE-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; SI-SAFE-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; SI-SAFE-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; SI-SAFE-NEXT: v_mov_b32_e32 v2, s1 -; SI-SAFE-NEXT: v_mul_f64 v[0:1], v[0:1], 0 -; SI-SAFE-NEXT: v_mov_b32_e32 v3, s0 -; SI-SAFE-NEXT: v_add_f64 v[0:1], v[0:1], 0 -; SI-SAFE-NEXT: v_cmp_ngt_f64_e32 vcc, s[0:1], v[0:1] -; SI-SAFE-NEXT: v_xor_b32_e32 v4, 0x80000000, v1 -; SI-SAFE-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc -; SI-SAFE-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; SI-SAFE-NEXT: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] -; SI-SAFE-NEXT: s_and_b64 s[0:1], vcc, exec -; SI-SAFE-NEXT: s_cselect_b32 s1, 0, 0x7ff80000 -; SI-SAFE-NEXT: s_mov_b32 s0, 0 -; SI-SAFE-NEXT: ; return to shader part epilog -; -; SI-NSZ-LABEL: fneg_fadd_0_nsz_f64: -; SI-NSZ: ; %bb.0: ; %.entry -; SI-NSZ-NEXT: v_rcp_f64_e32 v[0:1], s[2:3] -; SI-NSZ-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; SI-NSZ-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; SI-NSZ-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; SI-NSZ-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; SI-NSZ-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; SI-NSZ-NEXT: s_mov_b32 s2, 0 -; SI-NSZ-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; SI-NSZ-NEXT: s_brev_b32 s3, 1 -; SI-NSZ-NEXT: v_mul_f64 v[0:1], v[0:1], s[2:3] -; SI-NSZ-NEXT: v_mov_b32_e32 v2, s1 -; SI-NSZ-NEXT: v_cmp_nlt_f64_e64 vcc, -v[0:1], s[0:1] -; SI-NSZ-NEXT: v_mov_b32_e32 v3, s0 -; SI-NSZ-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; SI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; SI-NSZ-NEXT: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] -; SI-NSZ-NEXT: s_and_b64 s[0:1], vcc, exec -; SI-NSZ-NEXT: s_cselect_b32 s1, 0, 0x7ff80000 -; SI-NSZ-NEXT: s_mov_b32 s0, 0 -; SI-NSZ-NEXT: ; return to shader part epilog -; -; VI-SAFE-LABEL: fneg_fadd_0_nsz_f64: -; VI-SAFE: ; %bb.0: ; %.entry -; VI-SAFE-NEXT: v_rcp_f64_e32 v[0:1], s[2:3] -; VI-SAFE-NEXT: v_mov_b32_e32 v4, s0 -; VI-SAFE-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; VI-SAFE-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; VI-SAFE-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; VI-SAFE-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; VI-SAFE-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; VI-SAFE-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; VI-SAFE-NEXT: v_mov_b32_e32 v2, s1 -; VI-SAFE-NEXT: v_mul_f64 v[0:1], v[0:1], 0 -; VI-SAFE-NEXT: v_add_f64 v[0:1], v[0:1], 0 -; VI-SAFE-NEXT: v_cmp_ngt_f64_e32 vcc, s[0:1], v[0:1] -; VI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc -; VI-SAFE-NEXT: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] -; VI-SAFE-NEXT: s_and_b64 s[0:1], vcc, exec -; VI-SAFE-NEXT: s_cselect_b32 s1, 0, 0x7ff80000 -; VI-SAFE-NEXT: s_mov_b32 s0, 0 -; VI-SAFE-NEXT: ; return to shader part epilog +; SI-LABEL: fneg_fadd_0_nsz_f64: +; SI: ; %bb.0: ; %.entry +; SI-NEXT: v_rcp_f64_e32 v[0:1], s[2:3] +; SI-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 +; SI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] +; SI-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 +; SI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] +; SI-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] +; SI-NEXT: s_brev_b32 s3, 1 +; SI-NEXT: v_mul_f64 v[0:1], v[0:1], s[2:3] +; SI-NEXT: v_mov_b32_e32 v2, s1 +; SI-NEXT: v_cmp_nlt_f64_e64 vcc, -v[0:1], s[0:1] +; SI-NEXT: v_mov_b32_e32 v3, s0 +; SI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; SI-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; SI-NEXT: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] +; SI-NEXT: s_and_b64 s[0:1], vcc, exec +; SI-NEXT: s_cselect_b32 s1, 0, 0x7ff80000 +; SI-NEXT: s_mov_b32 s0, 0 +; SI-NEXT: ; return to shader part epilog ; -; VI-NSZ-LABEL: fneg_fadd_0_nsz_f64: -; VI-NSZ: ; %bb.0: ; %.entry -; VI-NSZ-NEXT: v_rcp_f64_e32 v[0:1], s[2:3] -; VI-NSZ-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; VI-NSZ-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; VI-NSZ-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; VI-NSZ-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; VI-NSZ-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 -; VI-NSZ-NEXT: s_mov_b32 s2, 0 -; VI-NSZ-NEXT: s_brev_b32 s3, 1 -; VI-NSZ-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] -; VI-NSZ-NEXT: v_mov_b32_e32 v2, s1 -; VI-NSZ-NEXT: v_mov_b32_e32 v3, s0 -; VI-NSZ-NEXT: v_mul_f64 v[0:1], v[0:1], s[2:3] -; VI-NSZ-NEXT: v_cmp_nlt_f64_e64 vcc, -v[0:1], s[0:1] -; VI-NSZ-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; VI-NSZ-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; VI-NSZ-NEXT: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] -; VI-NSZ-NEXT: s_and_b64 s[0:1], vcc, exec -; VI-NSZ-NEXT: s_cselect_b32 s1, 0, 0x7ff80000 -; VI-NSZ-NEXT: s_mov_b32 s0, 0 -; VI-NSZ-NEXT: ; return to shader part epilog +; VI-LABEL: fneg_fadd_0_nsz_f64: +; VI: ; %bb.0: ; %.entry +; VI-NEXT: v_rcp_f64_e32 v[0:1], s[2:3] +; VI-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 +; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] +; VI-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 +; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] +; VI-NEXT: v_fma_f64 v[2:3], -s[2:3], v[0:1], 1.0 +; VI-NEXT: s_mov_b32 s2, 0 +; VI-NEXT: s_brev_b32 s3, 1 +; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1] +; VI-NEXT: v_mov_b32_e32 v2, s1 +; VI-NEXT: v_mov_b32_e32 v3, s0 +; VI-NEXT: v_mul_f64 v[0:1], v[0:1], s[2:3] +; VI-NEXT: v_cmp_nlt_f64_e64 vcc, -v[0:1], s[0:1] +; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; VI-NEXT: v_cmp_nlt_f64_e32 vcc, 0, v[0:1] +; VI-NEXT: s_and_b64 s[0:1], vcc, exec +; VI-NEXT: s_cselect_b32 s1, 0, 0x7ff80000 +; VI-NEXT: s_mov_b32 s0, 0 +; VI-NEXT: ; return to shader part epilog .entry: %tmp7 = fdiv afn double 1.000000e+00, %tmp6 %tmp8 = fmul double 0.000000e+00, %tmp7 %tmp9 = fmul reassoc nnan arcp contract double 0.000000e+00, %tmp8 - %.i188 = fadd double %tmp9, 0.000000e+00 + %.i188 = fadd nsz double %tmp9, 0.000000e+00 %tmp10 = fcmp uge double %.i188, %tmp2 %tmp11 = fneg double %.i188 %.i092 = select i1 %tmp10, double %tmp2, double %tmp11 @@ -4547,25 +4441,40 @@ define float @v_fneg_fabs_select_infloop_regression(float %arg, i1 %arg1) { ret float %i3 } -define float @v_fmul_0_fsub_0_infloop_regression(float %arg) { -; GCN-SAFE-LABEL: v_fmul_0_fsub_0_infloop_regression: +define float @v_fmul_0_fsub_0_safe_infloop_regression(float %arg) { +; GCN-SAFE-LABEL: v_fmul_0_fsub_0_safe_infloop_regression: ; GCN-SAFE: ; %bb.0: ; %bb ; GCN-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0 ; GCN-SAFE-NEXT: v_sub_f32_e32 v0, 0, v0 ; GCN-SAFE-NEXT: s_setpc_b64 s[30:31] ; -; GCN-NSZ-LABEL: v_fmul_0_fsub_0_infloop_regression: -; GCN-NSZ: ; %bb.0: ; %bb -; GCN-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 -; GCN-NSZ-NEXT: s_setpc_b64 s[30:31] +; SI-NSZ-LABEL: v_fmul_0_fsub_0_safe_infloop_regression: +; SI-NSZ: ; %bb.0: ; %bb +; SI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NSZ-NEXT: s_brev_b32 s4, 1 +; SI-NSZ-NEXT: v_fma_f32 v0, v0, s4, 0 +; SI-NSZ-NEXT: s_setpc_b64 s[30:31] +; FIXME: utils/update_llc_test_checks.py will generate redundant VI +; labels, remove them, they will cause test failure. bb: %i = fmul float %arg, 0.0 %i1 = fsub float 0.0, %i ret float %i1 } +define float @v_fmul_0_fsub_0_nsz_infloop_regression(float %arg) { +; GCN-LABEL: v_fmul_0_fsub_0_nsz_infloop_regression: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v0, 0x80000000, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +bb: + %i = fmul float %arg, 0.0 + %i1 = fsub nsz float 0.0, %i + ret float %i1 +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 declare float @llvm.fma.f32(float, float, float) #1 declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir index 73cdcddbef135..a3b2191695734 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir @@ -209,8 +209,8 @@ body: | bb.0: ; GCN-LABEL: name: s_mov_b32_imm_65_copy_to_av_32 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65, implicit $exec - ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec - ; GCN-NEXT: S_ENDPGM 0, implicit [[AV_MOV_]] + ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[S_MOV_B32_]] + ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]] %0:sreg_32 = S_MOV_B32 65, implicit $exec %1:av_32 = COPY %0 S_ENDPGM 0, implicit %1 diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir index dfcf9a1f5c5ae..bec188e4e8378 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir @@ -240,8 +240,8 @@ body: | bb.0: ; GCN-LABEL: name: s_mov_b32_imm_literal_copy_s_to_av_32 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 999 - ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec - ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]] + ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[S_MOV_B32_]] + ; GCN-NEXT: $agpr0 = COPY [[COPY]] ; GCN-NEXT: S_ENDPGM 0 %0:sreg_32 = S_MOV_B32 999 %1:av_32 = COPY %0 @@ -257,8 +257,8 @@ body: | bb.0: ; GCN-LABEL: name: v_mov_b32_imm_literal_copy_v_to_av_32 ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 999, implicit $exec - ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[V_MOV_B32_e32_]], implicit $exec - ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]] + ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[V_MOV_B32_e32_]] + ; GCN-NEXT: $agpr0 = COPY [[COPY]] ; GCN-NEXT: S_ENDPGM 0 %0:vgpr_32 = V_MOV_B32_e32 999, implicit $exec %1:av_32 = COPY %0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/AMDGPU/fold-int-pow2-with-fmul-or-fdiv.ll index a859cc91b7fde..fe95d4561d0cd 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-int-pow2-with-fmul-or-fdiv.ll +++ b/llvm/test/CodeGen/AMDGPU/fold-int-pow2-with-fmul-or-fdiv.ll @@ -1571,25 +1571,24 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind { ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0x46000000 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_cvt_f32_u32_e32 v0, v0 -; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v1, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v0.l -; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v1, v1 +; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v1.l, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v1.l +; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v0, v0 ; GFX11-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v2, 0x46000000, v1 +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v2, 0x46000000, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v4, -v3, v2, s0 op_sel_hi:[1,0,0] -; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v2, v4, v1 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v3, -v1, v2, s0 op_sel_hi:[1,0,0] +; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v2, v3, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v3, -v3, v2, s0 op_sel_hi:[1,0,0] -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v1, v3, v1 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v3, -v1, v2, s0 op_sel_hi:[1,0,0] +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v0, v3, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff800000, v1 -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, v1, v2 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff800000, v0 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v2 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v1 -; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.h, v0.l, 0x7000 +; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v1.l, 0x7000 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: fdiv_pow_shl_cnt_fail_out_of_bounds: @@ -1739,25 +1738,24 @@ define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind { ; GFX11-TRUE16-NEXT: v_lshlrev_b16 v0.l, v0.l, 1 ; GFX11-TRUE16-NEXT: s_mov_b32 s0, 2.0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v0.l, v0.l -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v1, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v1, v1 +; GFX11-TRUE16-NEXT: v_cvt_f16_u16_e32 v1.l, v0.l +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v1.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v0, v0 ; GFX11-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v1, v1 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v4, -v3, v2, s0 op_sel_hi:[1,0,0] +; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, v0, v0 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v3, -v1, v2, s0 op_sel_hi:[1,0,0] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v2, v4, v1 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v3, -v3, v2, s0 op_sel_hi:[1,0,0] +; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v2, v3, v0 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v3, -v1, v2, s0 op_sel_hi:[1,0,0] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v1, v3, v1 -; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff800000, v1 +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v0, v3, v0 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff800000, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, v1, v2 -; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v1 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v0, v2 +; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.h, v0.l, 2.0 +; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v1.l, 2.0 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: fdiv_pow_shl_cnt_fail_out_of_bound2: diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index-agpr.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index-agpr.mir new file mode 100644 index 0000000000000..32a209608a4d0 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index-agpr.mir @@ -0,0 +1,131 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -run-pass=si-fold-operands %s -o - | FileCheck %s + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_av +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_av + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: SI_RETURN implicit [[AV_MOV_]] + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + SI_RETURN implicit %1 + +... + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_v +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_v + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_lit_to_v +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_lit_to_v + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1234 + ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 1234 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_imm_to_v +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_imm_to_v + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 8, implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[V_MOV_B32_e32_]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 8 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_regression_0 +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_regression_0 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:av_32 = COPY %0 + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_regression_1 +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_regression_1 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:sreg_32 = S_MOV_B32 killed %0 + %2:sreg_64 = S_MOV_B64 0 + %3:av_32 = COPY %1 + %4:vgpr_32 = COPY %3, implicit $exec + $vgpr0 = COPY %4 + SI_RETURN implicit $vgpr0 + +... + diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll index e7af7467171c3..e0421575c3174 100644 --- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll @@ -20,7 +20,8 @@ define i128 @fptosi_f64_to_i128(double %x) { ; SDAG-NEXT: s_cbranch_execz .LBB0_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc @@ -386,7 +387,8 @@ define i128 @fptoui_f64_to_i128(double %x) { ; SDAG-NEXT: s_cbranch_execz .LBB1_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc @@ -749,9 +751,10 @@ define i128 @fptosi_f32_to_i128(float %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB2_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1100,9 +1103,10 @@ define i128 @fptoui_f32_to_i128(float %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB3_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1489,9 +1493,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB6_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc @@ -1836,9 +1841,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) { ; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc ; SDAG-NEXT: s_cbranch_execz .LBB7_10 ; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end -; SDAG-NEXT: v_mov_b32_e32 v6, 0 ; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5 -; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc +; SDAG-NEXT: v_mov_b32_e32 v1, -1 +; SDAG-NEXT: v_mov_b32_e32 v6, 0 +; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc ; SDAG-NEXT: s_movk_i32 s6, 0xff7f ; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll index c4a38dcd7b5f3..78a961ea0da17 100644 --- a/llvm/test/CodeGen/AMDGPU/frem.ll +++ b/llvm/test/CodeGen/AMDGPU/frem.ll @@ -1433,37 +1433,35 @@ define amdgpu_kernel void @fast_frem_f16(ptr addrspace(1) %out, ptr addrspace(1) ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] -; GFX11-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] offset:8 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] offset:8 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v0.l +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v2.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v3.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v4, v4 ; GFX11-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v3, v3, v4 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v0, v0, v4 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v3, v7, v4 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v0, v5, v4 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mul_f32_e32 v4, v5, v4 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff800000, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v4, v3 -; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v4, v0 +; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.h, v0.h, v1.l, v0.l -; GFX11-TRUE16-NEXT: v_trunc_f16_e32 v0.h, v0.h +; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l +; GFX11-TRUE16-NEXT: v_trunc_f16_e32 v0.l, v0.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fma_f16 v0.l, -v0.h, v1.l, v0.l -; GFX11-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX11-TRUE16-NEXT: v_fma_f16 v0.l, -v0.l, v3.l, v2.l +; GFX11-TRUE16-NEXT: global_store_b16 v1, v0, s[0:1] ; GFX11-TRUE16-NEXT: s_endpgm ; ; GFX11-FAKE16-LABEL: fast_frem_f16: @@ -1507,38 +1505,36 @@ define amdgpu_kernel void @fast_frem_f16(ptr addrspace(1) %out, ptr addrspace(1) ; GFX1150-TRUE16-NEXT: s_clause 0x1 ; GFX1150-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1150-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX1150-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX1150-TRUE16-NEXT: s_clause 0x1 -; GFX1150-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] -; GFX1150-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] offset:8 +; GFX1150-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] +; GFX1150-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] offset:8 ; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v0.l +; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v2.l ; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v1.l -; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(TRANS32_DEP_1) +; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v3.l +; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_1) ; GFX1150-TRUE16-NEXT: v_rcp_f32_e32 v4, v4 -; GFX1150-TRUE16-NEXT: v_mul_f32_e32 v3, v3, v4 +; GFX1150-TRUE16-NEXT: v_mul_f32_e32 v0, v0, v4 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v3, v6 op_sel_hi:[1,0,1] -; GFX1150-TRUE16-NEXT: v_fmac_f32_e32 v3, v7, v4 +; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] +; GFX1150-TRUE16-NEXT: v_fmac_f32_e32 v0, v5, v4 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX1150-TRUE16-NEXT: v_mul_f32_e32 v4, v5, v4 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_and_b32_e32 v4, 0xff800000, v4 -; GFX1150-TRUE16-NEXT: v_add_f32_e32 v3, v4, v3 +; GFX1150-TRUE16-NEXT: v_add_f32_e32 v0, v4, v0 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 -; GFX1150-TRUE16-NEXT: v_div_fixup_f16 v0.h, v0.h, v1.l, v0.l +; GFX1150-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX1150-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_trunc_f16_e32 v3.l, v0.h -; GFX1150-TRUE16-NEXT: v_xor_b32_e32 v3, 0x8000, v3 +; GFX1150-TRUE16-NEXT: v_trunc_f16_e32 v0.l, v0.l +; GFX1150-TRUE16-NEXT: v_xor_b32_e32 v0, 0x8000, v0 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_fmac_f16_e32 v0.l, v3.l, v1.l -; GFX1150-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX1150-TRUE16-NEXT: v_fmac_f16_e32 v2.l, v0.l, v3.l +; GFX1150-TRUE16-NEXT: global_store_b16 v1, v2, s[0:1] ; GFX1150-TRUE16-NEXT: s_endpgm ; ; GFX1150-FAKE16-LABEL: fast_frem_f16: @@ -1583,38 +1579,36 @@ define amdgpu_kernel void @fast_frem_f16(ptr addrspace(1) %out, ptr addrspace(1) ; GFX1200-TRUE16-NEXT: s_clause 0x1 ; GFX1200-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1200-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX1200-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX1200-TRUE16-NEXT: s_clause 0x1 -; GFX1200-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] -; GFX1200-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] offset:8 +; GFX1200-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] +; GFX1200-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] offset:8 ; GFX1200-TRUE16-NEXT: s_wait_loadcnt 0x1 -; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v0.l +; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v2.l ; GFX1200-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v1.l -; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(TRANS32_DEP_1) +; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v3.l +; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_1) ; GFX1200-TRUE16-NEXT: v_rcp_f32_e32 v4, v4 -; GFX1200-TRUE16-NEXT: v_mul_f32_e32 v3, v3, v4 +; GFX1200-TRUE16-NEXT: v_mul_f32_e32 v0, v0, v4 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v3, v6 op_sel_hi:[1,0,1] -; GFX1200-TRUE16-NEXT: v_fmac_f32_e32 v3, v7, v4 +; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] +; GFX1200-TRUE16-NEXT: v_fmac_f32_e32 v0, v5, v4 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX1200-TRUE16-NEXT: v_mul_f32_e32 v4, v5, v4 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_and_b32_e32 v4, 0xff800000, v4 -; GFX1200-TRUE16-NEXT: v_add_f32_e32 v3, v4, v3 +; GFX1200-TRUE16-NEXT: v_add_f32_e32 v0, v4, v0 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 -; GFX1200-TRUE16-NEXT: v_div_fixup_f16 v0.h, v0.h, v1.l, v0.l +; GFX1200-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX1200-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_trunc_f16_e32 v3.l, v0.h -; GFX1200-TRUE16-NEXT: v_xor_b32_e32 v3, 0x8000, v3 +; GFX1200-TRUE16-NEXT: v_trunc_f16_e32 v0.l, v0.l +; GFX1200-TRUE16-NEXT: v_xor_b32_e32 v0, 0x8000, v0 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_fmac_f16_e32 v0.l, v3.l, v1.l -; GFX1200-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX1200-TRUE16-NEXT: v_fmac_f16_e32 v2.l, v0.l, v3.l +; GFX1200-TRUE16-NEXT: global_store_b16 v1, v2, s[0:1] ; GFX1200-TRUE16-NEXT: s_endpgm ; ; GFX1200-FAKE16-LABEL: fast_frem_f16: @@ -1840,37 +1834,35 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace( ; GFX11-TRUE16-NEXT: s_clause 0x1 ; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-TRUE16-NEXT: s_clause 0x1 -; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] -; GFX11-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] offset:8 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] offset:8 ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v0.l +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v2.l ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v3.l +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_rcp_f32_e32 v4, v4 ; GFX11-TRUE16-NEXT: s_waitcnt_depctr 0xfff -; GFX11-TRUE16-NEXT: v_mul_f32_e32 v3, v3, v4 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX11-TRUE16-NEXT: v_mul_f32_e32 v0, v0, v4 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v3, v7, v4 -; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX11-TRUE16-NEXT: v_fmac_f32_e32 v0, v5, v4 +; GFX11-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-TRUE16-NEXT: v_mul_f32_e32 v4, v5, v4 ; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xff800000, v4 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v4, v3 -; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 +; GFX11-TRUE16-NEXT: v_add_f32_e32 v0, v4, v0 +; GFX11-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.h, v0.h, v1.l, v0.l -; GFX11-TRUE16-NEXT: v_trunc_f16_e32 v0.h, v0.h +; GFX11-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l +; GFX11-TRUE16-NEXT: v_trunc_f16_e32 v0.l, v0.l ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_fma_f16 v0.l, -v0.h, v1.l, v0.l -; GFX11-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX11-TRUE16-NEXT: v_fma_f16 v0.l, -v0.l, v3.l, v2.l +; GFX11-TRUE16-NEXT: global_store_b16 v1, v0, s[0:1] ; GFX11-TRUE16-NEXT: s_endpgm ; ; GFX11-FAKE16-LABEL: unsafe_frem_f16: @@ -1914,38 +1906,36 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace( ; GFX1150-TRUE16-NEXT: s_clause 0x1 ; GFX1150-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1150-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX1150-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX1150-TRUE16-NEXT: s_waitcnt lgkmcnt(0) ; GFX1150-TRUE16-NEXT: s_clause 0x1 -; GFX1150-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] -; GFX1150-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] offset:8 +; GFX1150-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] +; GFX1150-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] offset:8 ; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(1) -; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v0.l +; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v2.l ; GFX1150-TRUE16-NEXT: s_waitcnt vmcnt(0) -; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v1.l -; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX1150-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(TRANS32_DEP_1) +; GFX1150-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v3.l +; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_1) ; GFX1150-TRUE16-NEXT: v_rcp_f32_e32 v4, v4 -; GFX1150-TRUE16-NEXT: v_mul_f32_e32 v3, v3, v4 +; GFX1150-TRUE16-NEXT: v_mul_f32_e32 v0, v0, v4 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v3, v6 op_sel_hi:[1,0,1] -; GFX1150-TRUE16-NEXT: v_fmac_f32_e32 v3, v7, v4 +; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] +; GFX1150-TRUE16-NEXT: v_fmac_f32_e32 v0, v5, v4 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX1150-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX1150-TRUE16-NEXT: v_mul_f32_e32 v4, v5, v4 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1150-TRUE16-NEXT: v_and_b32_e32 v4, 0xff800000, v4 -; GFX1150-TRUE16-NEXT: v_add_f32_e32 v3, v4, v3 +; GFX1150-TRUE16-NEXT: v_add_f32_e32 v0, v4, v0 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 -; GFX1150-TRUE16-NEXT: v_div_fixup_f16 v0.h, v0.h, v1.l, v0.l +; GFX1150-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX1150-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_trunc_f16_e32 v3.l, v0.h -; GFX1150-TRUE16-NEXT: v_xor_b32_e32 v3, 0x8000, v3 +; GFX1150-TRUE16-NEXT: v_trunc_f16_e32 v0.l, v0.l +; GFX1150-TRUE16-NEXT: v_xor_b32_e32 v0, 0x8000, v0 ; GFX1150-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1150-TRUE16-NEXT: v_fmac_f16_e32 v0.l, v3.l, v1.l -; GFX1150-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX1150-TRUE16-NEXT: v_fmac_f16_e32 v2.l, v0.l, v3.l +; GFX1150-TRUE16-NEXT: global_store_b16 v1, v2, s[0:1] ; GFX1150-TRUE16-NEXT: s_endpgm ; ; GFX1150-FAKE16-LABEL: unsafe_frem_f16: @@ -1990,38 +1980,36 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace( ; GFX1200-TRUE16-NEXT: s_clause 0x1 ; GFX1200-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1200-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v2, 0 +; GFX1200-TRUE16-NEXT: v_mov_b32_e32 v1, 0 ; GFX1200-TRUE16-NEXT: s_wait_kmcnt 0x0 ; GFX1200-TRUE16-NEXT: s_clause 0x1 -; GFX1200-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] -; GFX1200-TRUE16-NEXT: global_load_d16_b16 v1, v2, s[4:5] offset:8 +; GFX1200-TRUE16-NEXT: global_load_d16_b16 v2, v1, s[2:3] +; GFX1200-TRUE16-NEXT: global_load_d16_b16 v3, v1, s[4:5] offset:8 ; GFX1200-TRUE16-NEXT: s_wait_loadcnt 0x1 -; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v3, v0.l +; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v0, v2.l ; GFX1200-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v1.l -; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v5.l, v1.l -; GFX1200-TRUE16-NEXT: v_mov_b16_e32 v6.l, v0.l -; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(TRANS32_DEP_1) +; GFX1200-TRUE16-NEXT: v_cvt_f32_f16_e32 v4, v3.l +; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(TRANS32_DEP_1) ; GFX1200-TRUE16-NEXT: v_rcp_f32_e32 v4, v4 -; GFX1200-TRUE16-NEXT: v_mul_f32_e32 v3, v3, v4 +; GFX1200-TRUE16-NEXT: v_mul_f32_e32 v0, v0, v4 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v7, -v5, v3, v6 op_sel_hi:[1,0,1] -; GFX1200-TRUE16-NEXT: v_fmac_f32_e32 v3, v7, v4 +; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] +; GFX1200-TRUE16-NEXT: v_fmac_f32_e32 v0, v5, v4 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v5, -v5, v3, v6 op_sel_hi:[1,0,1] +; GFX1200-TRUE16-NEXT: v_fma_mix_f32 v5, -v3, v0, v2 op_sel_hi:[1,0,1] ; GFX1200-TRUE16-NEXT: v_mul_f32_e32 v4, v5, v4 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1200-TRUE16-NEXT: v_and_b32_e32 v4, 0xff800000, v4 -; GFX1200-TRUE16-NEXT: v_add_f32_e32 v3, v4, v3 +; GFX1200-TRUE16-NEXT: v_add_f32_e32 v0, v4, v0 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v3 -; GFX1200-TRUE16-NEXT: v_div_fixup_f16 v0.h, v0.h, v1.l, v0.l +; GFX1200-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX1200-TRUE16-NEXT: v_div_fixup_f16 v0.l, v0.l, v3.l, v2.l ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_trunc_f16_e32 v3.l, v0.h -; GFX1200-TRUE16-NEXT: v_xor_b32_e32 v3, 0x8000, v3 +; GFX1200-TRUE16-NEXT: v_trunc_f16_e32 v0.l, v0.l +; GFX1200-TRUE16-NEXT: v_xor_b32_e32 v0, 0x8000, v0 ; GFX1200-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1200-TRUE16-NEXT: v_fmac_f16_e32 v0.l, v3.l, v1.l -; GFX1200-TRUE16-NEXT: global_store_b16 v2, v0, s[0:1] +; GFX1200-TRUE16-NEXT: v_fmac_f16_e32 v2.l, v0.l, v3.l +; GFX1200-TRUE16-NEXT: global_store_b16 v1, v2, s[0:1] ; GFX1200-TRUE16-NEXT: s_endpgm ; ; GFX1200-FAKE16-LABEL: unsafe_frem_f16: diff --git a/llvm/test/CodeGen/AMDGPU/function-returns.ll b/llvm/test/CodeGen/AMDGPU/function-returns.ll index 38003f6075c35..0084d936ec03b 100644 --- a/llvm/test/CodeGen/AMDGPU/function-returns.ll +++ b/llvm/test/CodeGen/AMDGPU/function-returns.ll @@ -895,11 +895,11 @@ define <3 x i16> @v3i16_func_void() #0 { ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 -; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 +; CI-NEXT: buffer_load_dwordx2 v[3:4], off, s[4:7], 0 ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; CI-NEXT: v_mov_b32_e32 v0, v2 -; CI-NEXT: v_mov_b32_e32 v2, v3 +; CI-NEXT: v_lshr_b64 v[1:2], v[3:4], 16 +; CI-NEXT: v_mov_b32_e32 v0, v3 +; CI-NEXT: v_mov_b32_e32 v2, v4 ; CI-NEXT: s_setpc_b64 s[30:31] ; ; GFX89-LABEL: v3i16_func_void: @@ -1008,7 +1008,7 @@ define <5 x i16> @v5i16_func_void() #0 { ; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 ; CI-NEXT: buffer_load_sshort v4, off, s[4:7], 0 offset:8 ; CI-NEXT: s_waitcnt vmcnt(1) -; CI-NEXT: v_alignbit_b32 v5, v1, v0, 16 +; CI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16 ; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; CI-NEXT: v_mov_b32_e32 v2, v1 ; CI-NEXT: v_mov_b32_e32 v1, v5 diff --git a/llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll b/llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll deleted file mode 100644 index f0c9258358316..0000000000000 --- a/llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll +++ /dev/null @@ -1,94 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GCN-SDAG %s -; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GCN-GISEL %s - -; Test that stores that may hit scratch are correctly promoted to SCOPE_SE. - -define void @test_scratch_store(ptr addrspace(5) %ptr, i32 %val) { -; GCN-LABEL: test_scratch_store: -; GCN: ; %bb.0: -; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: scratch_store_b32 v0, v1, off -; GCN-NEXT: s_set_pc_i64 s[30:31] - store i32 %val, ptr addrspace(5) %ptr - ret void -} - -define void @test_unknown_flat_store(ptr %ptr, i32 %val) { -; GCN-LABEL: test_unknown_flat_store: -; GCN: ; %bb.0: -; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: flat_store_b32 v[0:1], v2 -; GCN-NEXT: s_wait_dscnt 0x0 -; GCN-NEXT: s_set_pc_i64 s[30:31] - store i32 %val, ptr %ptr - ret void -} - -define void @test_flat_store_no_scratch_alloc(ptr %ptr, i32 %val) #0 { -; GCN-LABEL: test_flat_store_no_scratch_alloc: -; GCN: ; %bb.0: -; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: flat_store_b32 v[0:1], v2 -; GCN-NEXT: s_wait_dscnt 0x0 -; GCN-NEXT: s_set_pc_i64 s[30:31] - store i32 %val, ptr %ptr - ret void -} - -define void @test_flat_store_noalias_addrspace(ptr %ptr, i32 %val) { -; GCN-LABEL: test_flat_store_noalias_addrspace: -; GCN: ; %bb.0: -; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 -; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: flat_store_b32 v[0:1], v2 -; GCN-NEXT: s_wait_dscnt 0x0 -; GCN-NEXT: s_set_pc_i64 s[30:31] - store i32 %val, ptr %ptr, !noalias.addrspace !{i32 5, i32 6} - ret void -} - -; TODO: would be nice to handle -define void @test_flat_store_select(ptr addrspace(1) %a, ptr addrspace(3) %b, i1 %cond, i32 %val) { -; GCN-SDAG-LABEL: test_flat_store_select: -; GCN-SDAG: ; %bb.0: -; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 -; GCN-SDAG-NEXT: v_cmp_ne_u32_e32 vcc_lo, -1, v2 -; GCN-SDAG-NEXT: v_and_b32_e32 v3, 1, v3 -; GCN-SDAG-NEXT: s_mov_b64 s[0:1], src_shared_base -; GCN-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc_lo -; GCN-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, s1, vcc_lo -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GCN-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 -; GCN-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v2, v0 -; GCN-SDAG-NEXT: flat_store_b32 v[0:1], v4 -; GCN-SDAG-NEXT: s_wait_dscnt 0x0 -; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] -; -; GCN-GISEL-LABEL: test_flat_store_select: -; GCN-GISEL: ; %bb.0: -; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 -; GCN-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, -1, v2 -; GCN-GISEL-NEXT: v_and_b32_e32 v3, 1, v3 -; GCN-GISEL-NEXT: s_mov_b64 s[0:1], src_shared_base -; GCN-GISEL-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc_lo -; GCN-GISEL-NEXT: v_cndmask_b32_e64 v5, 0, s1, vcc_lo -; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GCN-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GCN-GISEL-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v5, v1 -; GCN-GISEL-NEXT: flat_store_b32 v[0:1], v4 -; GCN-GISEL-NEXT: s_wait_dscnt 0x0 -; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31] - %a.ascast = addrspacecast ptr addrspace(1) %a to ptr - %b.ascast = addrspacecast ptr addrspace(3) %b to ptr - %ptr = select i1 %cond, ptr %a.ascast, ptr %b.ascast - store i32 %val, ptr %ptr - ret void -} - -attributes #0 = { "amdgpu-no-flat-scratch-init" } diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll index b67a1c513c49f..a50791e10f5a2 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll @@ -7575,15 +7575,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -7593,7 +7591,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB38_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7609,15 +7609,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB38_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -7628,7 +7626,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB38_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7809,15 +7809,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -7827,7 +7825,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB39_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7843,15 +7843,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB39_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -7862,7 +7860,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB39_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8039,34 +8039,32 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: s_movk_i32 s4, 0xf800 -; GFX7-NEXT: v_mov_b32_e32 v7, v1 -; GFX7-NEXT: v_mov_b32_e32 v6, v0 ; GFX7-NEXT: s_mov_b32 s5, -1 ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 -; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 ; GFX7-NEXT: .LBB40_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB40_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8077,35 +8075,33 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_movk_i32 s4, 0xf800 -; GFX6-NEXT: v_mov_b32_e32 v7, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, v0 ; GFX6-NEXT: s_mov_b32 s5, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, 0 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 ; GFX6-NEXT: .LBB40_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB40_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9818,7 +9814,7 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_gra ; GFX1250-TRUE16: ; %bb.0: ; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0 @@ -9861,7 +9857,7 @@ define half @global_agent_atomic_fadd_ret_f16__offset12b_neg__amdgpu_no_fine_gra ; GFX1250-FAKE16: ; %bb.0: ; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0 @@ -11339,7 +11335,7 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_g ; GFX1250-TRUE16: ; %bb.0: ; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0 @@ -11382,7 +11378,7 @@ define void @global_agent_atomic_fadd_noret_f16__offset12b_neg__amdgpu_no_fine_g ; GFX1250-FAKE16: ; %bb.0: ; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0 @@ -14855,7 +14851,7 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_ ; GFX1250-TRUE16: ; %bb.0: ; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0 @@ -14905,7 +14901,7 @@ define bfloat @global_agent_atomic_fadd_ret_bf16__offset12b_neg__amdgpu_no_fine_ ; GFX1250-FAKE16: ; %bb.0: ; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0 @@ -16648,7 +16644,7 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_ ; GFX1250-TRUE16: ; %bb.0: ; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-TRUE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-TRUE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-TRUE16-NEXT: s_mov_b32 s0, 0 @@ -16697,7 +16693,7 @@ define void @global_agent_atomic_fadd_noret_bf16__offset12b_neg__amdgpu_no_fine_ ; GFX1250-FAKE16: ; %bb.0: ; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], lit64(0xfffffffffffff800) +; GFX1250-FAKE16-NEXT: s_mov_b64 s[0:1], 0xfffffffffffff800 ; GFX1250-FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2 ; GFX1250-FAKE16-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] ; GFX1250-FAKE16-NEXT: s_mov_b32 s0, 0 diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll index ac223fd6030bd..311faac1b7c29 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll @@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 +; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 ; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB24_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX6-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7] -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 ; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB24_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll index 5653f85c67339..e2808ee9bf706 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll @@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 +; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX7-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 ; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB24_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 -; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3] +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64 +; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3] ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11] -; GFX6-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7] -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 +; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9] +; GFX6-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 ; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB24_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll index f0e16150c9e79..11f0f38d2b6fa 100644 --- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll +++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll @@ -3913,15 +3913,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -3931,7 +3929,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB16_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3947,15 +3947,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -3966,7 +3964,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB16_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4165,15 +4165,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX7-NEXT: v_mov_b32_e32 v0, v8 ; GFX7-NEXT: v_mov_b32_e32 v1, v9 @@ -4183,7 +4181,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 ; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_mov_b32_e32 v11, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v10, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB17_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4199,15 +4199,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040 +; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) ; GFX6-NEXT: v_mov_b32_e32 v0, v8 @@ -4218,7 +4216,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1) ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 ; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_mov_b32_e32 v11, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v10, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB17_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4413,34 +4413,32 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1) ; GFX7: ; %bb.0: ; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX7-NEXT: s_movk_i32 s4, 0xf800 -; GFX7-NEXT: v_mov_b32_e32 v7, v1 -; GFX7-NEXT: v_mov_b32_e32 v6, v0 ; GFX7-NEXT: s_mov_b32 s5, -1 ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b32 s6, 0 -; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, v3 ; GFX7-NEXT: v_mov_b32_e32 v4, v2 -; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX7-NEXT: s_mov_b64 s[8:9], 0 ; GFX7-NEXT: s_mov_b32 s4, s6 ; GFX7-NEXT: s_mov_b32 s5, s6 ; GFX7-NEXT: .LBB18_1: ; %atomicrmw.start ; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_mov_b32_e32 v11, v1 -; GFX7-NEXT: v_mov_b32_e32 v10, v0 -; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] -; GFX7-NEXT: v_mov_b32_e32 v0, v8 -; GFX7-NEXT: v_mov_b32_e32 v1, v9 -; GFX7-NEXT: v_mov_b32_e32 v2, v10 -; GFX7-NEXT: v_mov_b32_e32 v3, v11 -; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5] +; GFX7-NEXT: v_mov_b32_e32 v0, v6 +; GFX7-NEXT: v_mov_b32_e32 v1, v7 +; GFX7-NEXT: v_mov_b32_e32 v2, v8 +; GFX7-NEXT: v_mov_b32_e32 v3, v9 +; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX7-NEXT: s_waitcnt vmcnt(0) ; GFX7-NEXT: buffer_wbinvl1 -; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX7-NEXT: v_mov_b32_e32 v9, v1 ; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX7-NEXT: v_mov_b32_e32 v8, v0 ; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX7-NEXT: s_cbranch_execnz .LBB18_1 ; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4451,35 +4449,33 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1) ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX6-NEXT: s_movk_i32 s4, 0xf800 -; GFX6-NEXT: v_mov_b32_e32 v7, v1 -; GFX6-NEXT: v_mov_b32_e32 v6, v0 ; GFX6-NEXT: s_mov_b32 s5, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_mov_b32 s6, 0 -; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 -; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6 +; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64 +; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0 ; GFX6-NEXT: v_mov_b32_e32 v5, v3 ; GFX6-NEXT: v_mov_b32_e32 v4, v2 -; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc +; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc ; GFX6-NEXT: s_mov_b64 s[8:9], 0 ; GFX6-NEXT: s_mov_b32 s4, s6 ; GFX6-NEXT: s_mov_b32 s5, s6 ; GFX6-NEXT: .LBB18_1: ; %atomicrmw.start ; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX6-NEXT: s_waitcnt vmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v11, v1 -; GFX6-NEXT: v_mov_b32_e32 v10, v0 -; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5] +; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5] ; GFX6-NEXT: s_waitcnt expcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, v8 -; GFX6-NEXT: v_mov_b32_e32 v1, v9 -; GFX6-NEXT: v_mov_b32_e32 v2, v10 -; GFX6-NEXT: v_mov_b32_e32 v3, v11 -; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc +; GFX6-NEXT: v_mov_b32_e32 v0, v6 +; GFX6-NEXT: v_mov_b32_e32 v1, v7 +; GFX6-NEXT: v_mov_b32_e32 v2, v8 +; GFX6-NEXT: v_mov_b32_e32 v3, v9 +; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc ; GFX6-NEXT: s_waitcnt vmcnt(0) ; GFX6-NEXT: buffer_wbinvl1 -; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] +; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; GFX6-NEXT: v_mov_b32_e32 v9, v1 ; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; GFX6-NEXT: v_mov_b32_e32 v8, v0 ; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9] ; GFX6-NEXT: s_cbranch_execnz .LBB18_1 ; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll index ffab56847edca..1a45bd978ccc1 100644 --- a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll @@ -10195,8 +10195,7 @@ define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) { ; SI-NEXT: .LBB144_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4 ; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; SI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -10224,8 +10223,7 @@ define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) { ; VI-NEXT: .LBB144_1: ; %atomicrmw.start ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; VI-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4 ; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; VI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -10249,9 +10247,8 @@ define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) { ; GFX9-NEXT: .LBB144_1: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GFX9-NEXT: v_add_u32_e32 v3, -1, v4 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc @@ -10282,8 +10279,7 @@ define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 ; SI-NEXT: .LBB145_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v4 ; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; SI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -10313,8 +10309,7 @@ define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 ; VI-NEXT: .LBB145_1: ; %atomicrmw.start ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; VI-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4 ; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; VI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -10338,9 +10333,8 @@ define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 ; GFX9-NEXT: .LBB145_1: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GFX9-NEXT: v_add_u32_e32 v3, -1, v4 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc @@ -10374,8 +10368,7 @@ define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) { ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mov_b32_e32 v5, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v5 ; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2 ; SI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc @@ -10403,8 +10396,7 @@ define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) { ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_mov_b32_e32 v4, v3 -; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; VI-NEXT: v_subrev_u32_e32 v3, vcc, 1, v4 ; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 ; VI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc @@ -10429,9 +10421,8 @@ define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) { ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, v3 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GFX9-NEXT: v_add_u32_e32 v3, -1, v4 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc @@ -10464,8 +10455,7 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %i ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mov_b32_e32 v5, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; SI-NEXT: v_subrev_i32_e32 v3, vcc, 1, v5 ; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2 ; SI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc @@ -10495,8 +10485,7 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %i ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_mov_b32_e32 v1, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v1 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_subrev_u32_e32 v0, vcc, 1, v1 ; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2 ; VI-NEXT: s_or_b64 vcc, vcc, s[4:5] ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc @@ -10520,9 +10509,8 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %i ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, v3 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, 1, v4 ; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2 -; GFX9-NEXT: v_add_u32_e32 v3, -1, v4 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5] ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc ; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc @@ -10560,8 +10548,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1 ; SI-NEXT: .LBB148_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v1 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v1 ; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v1 ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] ; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc @@ -10597,8 +10584,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1 ; VI-NEXT: .LBB148_1: ; %atomicrmw.start ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3 ; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 ; VI-NEXT: s_or_b64 vcc, vcc, s[34:35] ; VI-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc @@ -10624,9 +10610,8 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1 ; GFX9-NEXT: .LBB148_1: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v1 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v1 -; GFX9-NEXT: v_add_u32_e32 v0, -1, v1 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc @@ -10663,8 +10648,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addr ; SI-NEXT: .LBB149_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v1 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v1 ; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v1 ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] ; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc @@ -10702,8 +10686,7 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addr ; VI-NEXT: .LBB149_1: ; %atomicrmw.start ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3 +; VI-NEXT: v_subrev_u32_e32 v2, vcc, 1, v3 ; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3 ; VI-NEXT: s_or_b64 vcc, vcc, s[34:35] ; VI-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc @@ -10729,9 +10712,8 @@ define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addr ; GFX9-NEXT: .LBB149_1: ; %atomicrmw.start ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v1 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v1 -; GFX9-NEXT: v_add_u32_e32 v0, -1, v1 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc @@ -10771,8 +10753,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) i ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v4 ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] ; SI-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc @@ -10809,8 +10790,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) i ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_mov_b32_e32 v5, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v5 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; VI-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5 ; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 ; VI-NEXT: s_or_b64 vcc, vcc, s[34:35] ; VI-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc @@ -10836,9 +10816,8 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) i ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, v0 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v4 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v4 -; GFX9-NEXT: v_add_u32_e32 v0, -1, v4 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc ; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[3:4], s[4:5] glc @@ -10876,8 +10855,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspa ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 -; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; SI-NEXT: v_subrev_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v4 ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] ; SI-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc @@ -10914,8 +10892,7 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspa ; VI-NEXT: ; =>This Inner Loop Header: Depth=1 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_mov_b32_e32 v5, v0 -; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v5 -; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5 +; VI-NEXT: v_subrev_u32_e32 v0, vcc, 1, v5 ; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5 ; VI-NEXT: s_or_b64 vcc, vcc, s[34:35] ; VI-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc @@ -10941,9 +10918,8 @@ define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspa ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, v0 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4 +; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v4 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v4 -; GFX9-NEXT: v_add_u32_e32 v0, -1, v4 ; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35] ; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc ; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[3:4], s[4:5] offset:16 glc diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll index 74f0f64c935b4..6a4c2849ba4a3 100644 --- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll @@ -1502,13 +1502,11 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB32_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7 ; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc ; SI-NEXT: s_waitcnt expcnt(0) @@ -1521,6 +1519,8 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB32_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1593,13 +1593,11 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB33_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7 ; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc ; SI-NEXT: s_waitcnt expcnt(0) @@ -1612,6 +1610,8 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB33_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -1883,43 +1883,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v9, s6, 0 -; SI-NEXT: v_writelane_b32 v9, s7, 1 +; SI-NEXT: v_writelane_b32 v7, s6, 0 +; SI-NEXT: v_writelane_b32 v7, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 +; SI-NEXT: v_mov_b32_e32 v6, s35 ; SI-NEXT: .LBB36_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v1 -; SI-NEXT: v_mov_b32_e32 v7, v0 -; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7 -; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v5 -; SI-NEXT: v_mov_b32_e32 v1, v6 -; SI-NEXT: v_mov_b32_e32 v2, v7 -; SI-NEXT: v_mov_b32_e32 v3, v8 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4 +; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB36_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v9, 1 -; SI-NEXT: v_readlane_b32 s6, v9, 0 +; SI-NEXT: v_readlane_b32 s7, v7, 1 +; SI-NEXT: v_readlane_b32 s6, v7, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -1985,43 +1984,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v9, s6, 0 -; SI-NEXT: v_writelane_b32 v9, s7, 1 +; SI-NEXT: v_writelane_b32 v7, s6, 0 +; SI-NEXT: v_writelane_b32 v7, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 +; SI-NEXT: v_mov_b32_e32 v6, s35 ; SI-NEXT: .LBB37_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v8, v1 -; SI-NEXT: v_mov_b32_e32 v7, v0 -; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7 -; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v5 -; SI-NEXT: v_mov_b32_e32 v1, v6 -; SI-NEXT: v_mov_b32_e32 v2, v7 -; SI-NEXT: v_mov_b32_e32 v3, v8 +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4 +; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB37_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v9, 1 -; SI-NEXT: v_readlane_b32 s6, v9, 0 +; SI-NEXT: v_readlane_b32 s7, v7, 1 +; SI-NEXT: v_readlane_b32 s6, v7, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -2342,13 +2340,11 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB42_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_and_b32_e32 v9, v11, v6 ; SI-NEXT: v_and_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -2361,6 +2357,8 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB42_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2433,13 +2431,11 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB43_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_and_b32_e32 v9, v11, v6 ; SI-NEXT: v_and_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -2452,6 +2448,8 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB43_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2726,14 +2724,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB46_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v3, s34, v5 ; SI-NEXT: v_and_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -2745,6 +2740,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB46_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -2825,14 +2822,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB47_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v3, s34, v5 ; SI-NEXT: v_and_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -2844,6 +2838,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB47_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3182,14 +3178,11 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB52_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3203,6 +3196,8 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB52_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3279,14 +3274,11 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB53_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3300,6 +3292,8 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB53_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3590,14 +3584,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB56_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, s34, v5 ; SI-NEXT: v_and_b32_e32 v1, s35, v4 ; SI-NEXT: v_not_b32_e32 v3, v0 @@ -3611,6 +3602,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB56_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3695,14 +3688,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB57_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, s34, v5 ; SI-NEXT: v_and_b32_e32 v1, s35, v4 ; SI-NEXT: v_not_b32_e32 v3, v0 @@ -3716,6 +3706,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB57_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -3891,14 +3883,11 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB59_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_and_b32_e32 v0, v11, v6 ; SI-NEXT: v_and_b32_e32 v1, v10, v7 ; SI-NEXT: v_not_b32_e32 v9, v0 @@ -3912,6 +3901,8 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB59_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4162,13 +4153,11 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB62_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_or_b32_e32 v9, v11, v6 ; SI-NEXT: v_or_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -4181,6 +4170,8 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB62_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4253,13 +4244,11 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB63_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_or_b32_e32 v9, v11, v6 ; SI-NEXT: v_or_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -4272,6 +4261,8 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB63_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4546,14 +4537,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB66_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_or_b32_e32 v3, s34, v5 ; SI-NEXT: v_or_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -4565,6 +4553,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB66_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4645,14 +4635,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB67_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_or_b32_e32 v3, s34, v5 ; SI-NEXT: v_or_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -4664,6 +4651,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB67_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -4990,13 +4979,11 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB72_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_xor_b32_e32 v9, v11, v6 ; SI-NEXT: v_xor_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -5009,6 +4996,8 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB72_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5081,13 +5070,11 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB73_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_xor_b32_e32 v9, v11, v6 ; SI-NEXT: v_xor_b32_e32 v8, v10, v7 ; SI-NEXT: s_waitcnt expcnt(0) @@ -5100,6 +5087,8 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB73_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5374,14 +5363,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB76_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_xor_b32_e32 v3, s34, v5 ; SI-NEXT: v_xor_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -5393,6 +5379,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg % ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB76_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5473,14 +5461,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: s_mov_b32 s35, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB77_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_xor_b32_e32 v3, s34, v5 ; SI-NEXT: v_xor_b32_e32 v2, s35, v4 ; SI-NEXT: v_mov_b32_e32 v0, v2 @@ -5492,6 +5477,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB77_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5824,13 +5811,11 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB82_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -5844,6 +5829,8 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB82_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -5918,13 +5905,11 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB83_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -5938,6 +5923,8 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB83_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -6223,45 +6210,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB86_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB86_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -6331,45 +6318,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB87_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB87_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -7176,13 +7163,11 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB96_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -7196,6 +7181,8 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB96_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7270,13 +7257,11 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB97_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -7290,6 +7275,8 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB97_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -7575,45 +7562,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB100_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB100_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -7683,45 +7670,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB101_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB101_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -8416,13 +8403,11 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB109_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -8436,6 +8421,8 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB109_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8510,13 +8497,11 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB110_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -8530,6 +8515,8 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB110_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -8815,45 +8802,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB113_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB113_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -8923,45 +8910,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB114_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB114_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -9292,13 +9279,11 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB119_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -9312,6 +9297,8 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB119_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9386,13 +9373,11 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB120_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5] ; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc ; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc @@ -9406,6 +9391,8 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB120_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -9691,45 +9678,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg % ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB123_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB123_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -9799,45 +9786,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1) ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB124_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9] -; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB124_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[36:37] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -10645,14 +10632,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB133_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc ; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5] @@ -10667,6 +10651,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB133_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -10745,14 +10731,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s4, s6 ; SI-NEXT: s_mov_b32 s5, s6 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[8:9], 0 ; SI-NEXT: .LBB134_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc ; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5] @@ -10767,6 +10750,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] ; SI-NEXT: s_cbranch_execnz .LBB134_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11065,14 +11050,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB137_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc ; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5] @@ -11087,6 +11069,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB137_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11173,14 +11157,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[36:37], 0 ; SI-NEXT: .LBB138_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v5, v1 -; SI-NEXT: v_mov_b32_e32 v4, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4 ; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc ; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5] @@ -11195,6 +11176,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[36:37] ; SI-NEXT: s_cbranch_execnz .LBB138_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11557,14 +11540,11 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: s_mov_b32 s11, 0xf000 ; SI-NEXT: s_mov_b32 s8, s10 ; SI-NEXT: s_mov_b32 s9, s10 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 ; SI-NEXT: s_mov_b64 s[6:7], 0 ; SI-NEXT: .LBB143_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc ; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] @@ -11581,6 +11561,8 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) { ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[6:7] ; SI-NEXT: s_cbranch_execnz .LBB143_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -11663,14 +11645,11 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: s_mov_b32 s11, 0xf000 ; SI-NEXT: s_mov_b32 s8, s10 ; SI-NEXT: s_mov_b32 s9, s10 -; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 offset:32 ; SI-NEXT: s_mov_b64 s[6:7], 0 ; SI-NEXT: .LBB144_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v1 -; SI-NEXT: v_mov_b32_e32 v10, v0 -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10 ; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc ; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11] @@ -11687,6 +11666,8 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i ; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11] ; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; SI-NEXT: v_mov_b32_e32 v11, v1 +; SI-NEXT: v_mov_b32_e32 v10, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[6:7] ; SI-NEXT: s_cbranch_execnz .LBB144_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end @@ -12004,49 +11985,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 ; SI-NEXT: s_mov_b64 s[38:39], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB147_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8 -; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc -; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9] +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 +; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc +; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5] ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] -; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[38:39] ; SI-NEXT: s_cbranch_execnz .LBB147_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[38:39] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] @@ -12124,49 +12104,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspa ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v10, s6, 0 -; SI-NEXT: v_writelane_b32 v10, s7, 1 +; SI-NEXT: v_writelane_b32 v8, s6, 0 +; SI-NEXT: v_writelane_b32 v8, s7, 1 ; SI-NEXT: s_mov_b32 s35, s7 ; SI-NEXT: s_mov_b32 s34, s6 ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32 +; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32 ; SI-NEXT: s_mov_b64 s[38:39], 0 -; SI-NEXT: v_mov_b32_e32 v4, s35 -; SI-NEXT: v_mov_b32_e32 v5, s34 +; SI-NEXT: v_mov_b32_e32 v6, s35 +; SI-NEXT: v_mov_b32_e32 v7, s34 ; SI-NEXT: .LBB148_1: ; %atomicrmw.start ; SI-NEXT: ; =>This Inner Loop Header: Depth=1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mov_b32_e32 v9, v1 -; SI-NEXT: v_mov_b32_e32 v8, v0 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8 -; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc -; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9] -; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9] +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4 +; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc +; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5] +; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5] ; SI-NEXT: s_or_b64 vcc, vcc, s[36:37] -; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc -; SI-NEXT: v_mov_b32_e32 v0, v6 -; SI-NEXT: v_mov_b32_e32 v1, v7 -; SI-NEXT: v_mov_b32_e32 v2, v8 -; SI-NEXT: v_mov_b32_e32 v3, v9 +; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc +; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc +; SI-NEXT: v_mov_b32_e32 v0, v2 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v3, v5 ; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 -; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9] +; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5] ; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39] +; SI-NEXT: v_mov_b32_e32 v5, v1 +; SI-NEXT: v_mov_b32_e32 v4, v0 ; SI-NEXT: s_andn2_b64 exec, exec, s[38:39] ; SI-NEXT: s_cbranch_execnz .LBB148_1 ; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_or_b64 exec, exec, s[38:39] -; SI-NEXT: v_readlane_b32 s7, v10, 1 -; SI-NEXT: v_readlane_b32 s6, v10, 0 +; SI-NEXT: v_readlane_b32 s7, v8, 1 +; SI-NEXT: v_readlane_b32 s6, v8, 0 ; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[34:35] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses.mir index 1341a5916df4b..ff8ca8688bb85 100644 --- a/llvm/test/CodeGen/AMDGPU/hard-clauses.mir +++ b/llvm/test/CodeGen/AMDGPU/hard-clauses.mir @@ -2,6 +2,7 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s # RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefix=GFX11 # RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefix=GFX12 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - -debugify-and-strip-all-safe | FileCheck %s -check-prefix=GFX12 --- name: nop1 diff --git a/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll b/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll index 2c03113e8af47..10d61deed71cc 100644 --- a/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll +++ b/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll @@ -6,96 +6,134 @@ define void @main(i1 %arg) #0 { ; CHECK: ; %bb.0: ; %bb ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; CHECK-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill -; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill +; CHECK-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill ; CHECK-NEXT: s_mov_b64 exec, s[4:5] -; CHECK-NEXT: v_writelane_b32 v5, s30, 0 -; CHECK-NEXT: v_writelane_b32 v5, s31, 1 -; CHECK-NEXT: v_writelane_b32 v5, s36, 2 -; CHECK-NEXT: v_writelane_b32 v5, s37, 3 -; CHECK-NEXT: v_writelane_b32 v5, s38, 4 -; CHECK-NEXT: v_writelane_b32 v5, s39, 5 -; CHECK-NEXT: v_writelane_b32 v5, s48, 6 -; CHECK-NEXT: v_writelane_b32 v5, s49, 7 -; CHECK-NEXT: v_writelane_b32 v5, s50, 8 -; CHECK-NEXT: v_writelane_b32 v5, s51, 9 -; CHECK-NEXT: v_writelane_b32 v5, s52, 10 -; CHECK-NEXT: v_writelane_b32 v5, s53, 11 -; CHECK-NEXT: v_writelane_b32 v5, s54, 12 -; CHECK-NEXT: v_writelane_b32 v5, s55, 13 -; CHECK-NEXT: s_getpc_b64 s[24:25] -; CHECK-NEXT: v_writelane_b32 v5, s64, 14 -; CHECK-NEXT: s_movk_i32 s4, 0xf0 -; CHECK-NEXT: s_mov_b32 s5, s24 -; CHECK-NEXT: v_writelane_b32 v5, s65, 15 -; CHECK-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x0 -; CHECK-NEXT: s_mov_b64 s[4:5], 0 -; CHECK-NEXT: v_writelane_b32 v5, s66, 16 -; CHECK-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0 -; CHECK-NEXT: v_writelane_b32 v5, s67, 17 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: s_movk_i32 s6, 0x130 -; CHECK-NEXT: s_mov_b32 s7, s24 -; CHECK-NEXT: v_writelane_b32 v5, s68, 18 -; CHECK-NEXT: s_load_dwordx16 s[36:51], s[6:7], 0x0 -; CHECK-NEXT: v_writelane_b32 v5, s69, 19 -; CHECK-NEXT: v_writelane_b32 v5, s70, 20 +; CHECK-NEXT: v_writelane_b32 v6, s30, 0 +; CHECK-NEXT: v_writelane_b32 v6, s31, 1 +; CHECK-NEXT: v_writelane_b32 v6, s36, 2 +; CHECK-NEXT: v_writelane_b32 v6, s37, 3 +; CHECK-NEXT: v_writelane_b32 v6, s38, 4 +; CHECK-NEXT: v_writelane_b32 v6, s39, 5 +; CHECK-NEXT: v_writelane_b32 v6, s48, 6 +; CHECK-NEXT: v_writelane_b32 v6, s49, 7 +; CHECK-NEXT: v_writelane_b32 v6, s50, 8 +; CHECK-NEXT: v_writelane_b32 v6, s51, 9 +; CHECK-NEXT: v_writelane_b32 v6, s52, 10 +; CHECK-NEXT: v_writelane_b32 v6, s53, 11 +; CHECK-NEXT: v_writelane_b32 v6, s54, 12 +; CHECK-NEXT: v_writelane_b32 v6, s55, 13 +; CHECK-NEXT: v_writelane_b32 v6, s64, 14 +; CHECK-NEXT: v_writelane_b32 v6, s65, 15 +; CHECK-NEXT: v_writelane_b32 v6, s66, 16 +; CHECK-NEXT: v_writelane_b32 v6, s67, 17 +; CHECK-NEXT: v_writelane_b32 v6, s68, 18 +; CHECK-NEXT: s_getpc_b64 s[4:5] +; CHECK-NEXT: s_mov_b64 s[8:9], 0 +; CHECK-NEXT: v_writelane_b32 v6, s69, 19 ; CHECK-NEXT: s_mov_b32 s68, 0 -; CHECK-NEXT: v_writelane_b32 v5, s71, 21 +; CHECK-NEXT: s_mov_b32 s69, s4 +; CHECK-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x0 +; CHECK-NEXT: s_load_dwordx8 s[24:31], s[68:69], 0x30 +; CHECK-NEXT: s_load_dwordx16 s[52:67], s[68:69], 0xf0 +; CHECK-NEXT: ; kill: killed $sgpr8_sgpr9 +; CHECK-NEXT: s_nop 0 +; CHECK-NEXT: s_load_dwordx16 s[8:23], s[68:69], 0x130 +; CHECK-NEXT: ; implicit-def: $vgpr7 : SGPR spill to VGPR lane +; CHECK-NEXT: v_writelane_b32 v6, s70, 20 +; CHECK-NEXT: v_writelane_b32 v6, s71, 21 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v1, s4 ; CHECK-NEXT: v_mov_b32_e32 v2, 0 +; CHECK-NEXT: v_writelane_b32 v7, s8, 0 +; CHECK-NEXT: v_writelane_b32 v7, s9, 1 +; CHECK-NEXT: v_writelane_b32 v7, s10, 2 +; CHECK-NEXT: v_writelane_b32 v7, s11, 3 +; CHECK-NEXT: v_writelane_b32 v7, s12, 4 +; CHECK-NEXT: v_writelane_b32 v7, s13, 5 +; CHECK-NEXT: v_writelane_b32 v7, s14, 6 +; CHECK-NEXT: v_writelane_b32 v7, s15, 7 +; CHECK-NEXT: v_writelane_b32 v7, s16, 8 +; CHECK-NEXT: v_writelane_b32 v7, s17, 9 +; CHECK-NEXT: v_writelane_b32 v7, s18, 10 +; CHECK-NEXT: v_writelane_b32 v7, s19, 11 +; CHECK-NEXT: v_writelane_b32 v7, s20, 12 +; CHECK-NEXT: v_writelane_b32 v7, s21, 13 +; CHECK-NEXT: v_writelane_b32 v7, s22, 14 +; CHECK-NEXT: v_writelane_b32 v7, s23, 15 +; CHECK-NEXT: v_writelane_b32 v7, s52, 16 +; CHECK-NEXT: v_writelane_b32 v7, s53, 17 +; CHECK-NEXT: v_writelane_b32 v7, s54, 18 +; CHECK-NEXT: v_writelane_b32 v7, s55, 19 +; CHECK-NEXT: v_writelane_b32 v7, s56, 20 +; CHECK-NEXT: v_writelane_b32 v7, s57, 21 +; CHECK-NEXT: v_writelane_b32 v7, s58, 22 +; CHECK-NEXT: v_writelane_b32 v7, s59, 23 +; CHECK-NEXT: v_writelane_b32 v7, s60, 24 +; CHECK-NEXT: v_writelane_b32 v7, s61, 25 +; CHECK-NEXT: v_writelane_b32 v7, s62, 26 +; CHECK-NEXT: v_writelane_b32 v7, s63, 27 +; CHECK-NEXT: v_writelane_b32 v7, s64, 28 +; CHECK-NEXT: v_writelane_b32 v7, s65, 29 +; CHECK-NEXT: v_writelane_b32 v7, s66, 30 +; CHECK-NEXT: s_load_dwordx16 s[8:23], s[68:69], 0x1f0 +; CHECK-NEXT: s_load_dwordx16 s[36:51], s[68:69], 0x2f0 ; CHECK-NEXT: s_mov_b32 s69, s68 ; CHECK-NEXT: s_mov_b32 s70, s68 ; CHECK-NEXT: s_mov_b32 s71, s68 -; CHECK-NEXT: image_sample_lz v3, v[1:2], s[16:23], s[68:71] dmask:0x1 +; CHECK-NEXT: v_writelane_b32 v7, s67, 31 +; CHECK-NEXT: image_sample_lz v3, v[1:2], s[60:67], s[68:71] dmask:0x1 +; CHECK-NEXT: v_readlane_b32 s52, v7, 0 ; CHECK-NEXT: v_mov_b32_e32 v1, v2 -; CHECK-NEXT: ; implicit-def: $vgpr6 : SGPR spill to VGPR lane -; CHECK-NEXT: s_mov_b32 s6, 48 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_writelane_b32 v6, s36, 0 -; CHECK-NEXT: v_writelane_b32 v6, s37, 1 -; CHECK-NEXT: v_writelane_b32 v6, s38, 2 -; CHECK-NEXT: v_writelane_b32 v6, s39, 3 -; CHECK-NEXT: v_writelane_b32 v6, s40, 4 -; CHECK-NEXT: v_writelane_b32 v6, s41, 5 -; CHECK-NEXT: image_sample_lz v4, v[1:2], s[36:43], s[68:71] dmask:0x1 -; CHECK-NEXT: v_writelane_b32 v6, s42, 6 -; CHECK-NEXT: v_writelane_b32 v6, s43, 7 -; CHECK-NEXT: v_writelane_b32 v6, s44, 8 -; CHECK-NEXT: v_writelane_b32 v6, s45, 9 -; CHECK-NEXT: v_writelane_b32 v6, s46, 10 -; CHECK-NEXT: v_writelane_b32 v6, s47, 11 -; CHECK-NEXT: v_writelane_b32 v6, s48, 12 -; CHECK-NEXT: v_writelane_b32 v6, s49, 13 -; CHECK-NEXT: v_writelane_b32 v6, s50, 14 -; CHECK-NEXT: s_movk_i32 s56, 0x1f0 -; CHECK-NEXT: s_movk_i32 s72, 0x2f0 -; CHECK-NEXT: s_mov_b32 s57, s24 -; CHECK-NEXT: s_mov_b32 s73, s24 -; CHECK-NEXT: v_writelane_b32 v6, s51, 15 -; CHECK-NEXT: s_load_dwordx8 s[24:31], s[6:7], 0x0 -; CHECK-NEXT: s_load_dwordx16 s[36:51], s[56:57], 0x0 -; CHECK-NEXT: v_and_b32_e32 v0, 1, v0 -; CHECK-NEXT: s_load_dwordx16 s[52:67], s[72:73], 0x0 -; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 1, v0 +; CHECK-NEXT: v_readlane_b32 s53, v7, 1 +; CHECK-NEXT: v_readlane_b32 s54, v7, 2 +; CHECK-NEXT: v_readlane_b32 s55, v7, 3 +; CHECK-NEXT: v_readlane_b32 s56, v7, 4 +; CHECK-NEXT: v_readlane_b32 s57, v7, 5 +; CHECK-NEXT: v_readlane_b32 s58, v7, 6 +; CHECK-NEXT: v_readlane_b32 s59, v7, 7 +; CHECK-NEXT: v_and_b32_e32 v5, 1, v0 +; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 1, v5 +; CHECK-NEXT: v_readlane_b32 s60, v7, 8 +; CHECK-NEXT: v_readlane_b32 s61, v7, 9 +; CHECK-NEXT: v_readlane_b32 s62, v7, 10 +; CHECK-NEXT: image_sample_lz v4, v[1:2], s[52:59], s[68:71] dmask:0x1 +; CHECK-NEXT: v_readlane_b32 s63, v7, 11 +; CHECK-NEXT: v_readlane_b32 s64, v7, 12 +; CHECK-NEXT: v_readlane_b32 s65, v7, 13 +; CHECK-NEXT: v_readlane_b32 s66, v7, 14 +; CHECK-NEXT: v_readlane_b32 s67, v7, 15 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: v_mul_f32_e32 v0, v4, v3 ; CHECK-NEXT: s_and_saveexec_b64 s[6:7], s[4:5] ; CHECK-NEXT: s_xor_b64 s[6:7], exec, s[6:7] ; CHECK-NEXT: s_cbranch_execz .LBB0_3 ; CHECK-NEXT: ; %bb.1: ; %bb48 -; CHECK-NEXT: image_sample_lz v3, v[1:2], s[16:23], s[68:71] dmask:0x1 -; CHECK-NEXT: v_mov_b32_e32 v1, v2 +; CHECK-NEXT: v_readlane_b32 s52, v7, 16 +; CHECK-NEXT: v_readlane_b32 s60, v7, 24 +; CHECK-NEXT: v_readlane_b32 s61, v7, 25 +; CHECK-NEXT: v_readlane_b32 s62, v7, 26 +; CHECK-NEXT: v_readlane_b32 s63, v7, 27 +; CHECK-NEXT: v_readlane_b32 s64, v7, 28 +; CHECK-NEXT: v_readlane_b32 s65, v7, 29 +; CHECK-NEXT: v_readlane_b32 s66, v7, 30 +; CHECK-NEXT: v_readlane_b32 s67, v7, 31 ; CHECK-NEXT: s_and_b64 vcc, exec, -1 +; CHECK-NEXT: v_readlane_b32 s53, v7, 17 +; CHECK-NEXT: v_readlane_b32 s54, v7, 18 +; CHECK-NEXT: v_readlane_b32 s55, v7, 19 +; CHECK-NEXT: v_readlane_b32 s56, v7, 20 +; CHECK-NEXT: image_sample_lz v3, v[1:2], s[60:67], s[68:71] dmask:0x1 +; CHECK-NEXT: v_mov_b32_e32 v1, v2 +; CHECK-NEXT: v_readlane_b32 s57, v7, 21 +; CHECK-NEXT: v_readlane_b32 s58, v7, 22 +; CHECK-NEXT: v_readlane_b32 s59, v7, 23 ; CHECK-NEXT: .LBB0_2: ; %bb50 ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_mov_b32 s69, s68 -; CHECK-NEXT: s_mov_b32 s70, s68 -; CHECK-NEXT: s_mov_b32 s71, s68 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: image_sample_lz v4, v[1:2], s[44:51], s[28:31] dmask:0x1 +; CHECK-NEXT: image_sample_lz v4, v[1:2], s[16:23], s[28:31] dmask:0x1 ; CHECK-NEXT: s_nop 0 -; CHECK-NEXT: image_sample_lz v1, v[1:2], s[60:67], s[68:71] dmask:0x1 +; CHECK-NEXT: image_sample_lz v1, v[1:2], s[44:51], s[68:71] dmask:0x1 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: v_sub_f32_e32 v1, v1, v4 ; CHECK-NEXT: v_mul_f32_e32 v1, v1, v0 @@ -106,47 +144,55 @@ define void @main(i1 %arg) #0 { ; CHECK-NEXT: s_andn2_saveexec_b64 s[6:7], s[6:7] ; CHECK-NEXT: s_cbranch_execz .LBB0_10 ; CHECK-NEXT: ; %bb.4: ; %bb32 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_and_saveexec_b64 s[16:17], s[4:5] ; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[16:17] ; CHECK-NEXT: s_cbranch_execz .LBB0_6 ; CHECK-NEXT: ; %bb.5: ; %bb43 ; CHECK-NEXT: s_mov_b32 s16, 0 ; CHECK-NEXT: s_mov_b32 s17, s16 -; CHECK-NEXT: v_mov_b32_e32 v2, s16 -; CHECK-NEXT: v_mov_b32_e32 v3, s17 +; CHECK-NEXT: v_mov_b32_e32 v0, s16 +; CHECK-NEXT: v_readlane_b32 s44, v7, 16 +; CHECK-NEXT: v_mov_b32_e32 v1, s17 ; CHECK-NEXT: s_mov_b32 s18, s16 ; CHECK-NEXT: s_mov_b32 s19, s16 -; CHECK-NEXT: image_sample_lz v1, v[2:3], s[8:15], s[16:19] dmask:0x1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: s_mov_b64 s[8:9], s[36:37] -; CHECK-NEXT: s_mov_b64 s[10:11], s[38:39] -; CHECK-NEXT: s_mov_b64 s[12:13], s[40:41] -; CHECK-NEXT: s_mov_b64 s[14:15], s[42:43] -; CHECK-NEXT: v_readlane_b32 s36, v6, 0 -; CHECK-NEXT: v_readlane_b32 s44, v6, 8 -; CHECK-NEXT: v_readlane_b32 s45, v6, 9 -; CHECK-NEXT: v_readlane_b32 s46, v6, 10 -; CHECK-NEXT: v_readlane_b32 s47, v6, 11 -; CHECK-NEXT: v_readlane_b32 s48, v6, 12 -; CHECK-NEXT: v_readlane_b32 s49, v6, 13 -; CHECK-NEXT: v_readlane_b32 s50, v6, 14 -; CHECK-NEXT: v_readlane_b32 s51, v6, 15 -; CHECK-NEXT: v_readlane_b32 s37, v6, 1 -; CHECK-NEXT: v_readlane_b32 s38, v6, 2 -; CHECK-NEXT: v_readlane_b32 s39, v6, 3 -; CHECK-NEXT: v_readlane_b32 s40, v6, 4 -; CHECK-NEXT: v_readlane_b32 s41, v6, 5 -; CHECK-NEXT: image_sample_lz v0, v[2:3], s[44:51], s[24:27] dmask:0x1 -; CHECK-NEXT: v_readlane_b32 s42, v6, 6 -; CHECK-NEXT: v_readlane_b32 s43, v6, 7 -; CHECK-NEXT: v_mov_b32_e32 v2, 0 -; CHECK-NEXT: s_mov_b64 s[42:43], s[14:15] -; CHECK-NEXT: v_mov_b32_e32 v3, v2 -; CHECK-NEXT: s_mov_b64 s[40:41], s[12:13] -; CHECK-NEXT: s_mov_b64 s[38:39], s[10:11] -; CHECK-NEXT: s_mov_b64 s[36:37], s[8:9] +; CHECK-NEXT: v_readlane_b32 s45, v7, 17 +; CHECK-NEXT: v_readlane_b32 s46, v7, 18 +; CHECK-NEXT: v_readlane_b32 s47, v7, 19 +; CHECK-NEXT: v_readlane_b32 s48, v7, 20 +; CHECK-NEXT: v_readlane_b32 s49, v7, 21 +; CHECK-NEXT: v_readlane_b32 s50, v7, 22 +; CHECK-NEXT: v_readlane_b32 s51, v7, 23 +; CHECK-NEXT: v_readlane_b32 s52, v7, 24 +; CHECK-NEXT: v_readlane_b32 s53, v7, 25 +; CHECK-NEXT: v_readlane_b32 s54, v7, 26 +; CHECK-NEXT: v_readlane_b32 s55, v7, 27 +; CHECK-NEXT: v_readlane_b32 s56, v7, 28 +; CHECK-NEXT: v_readlane_b32 s57, v7, 29 +; CHECK-NEXT: v_readlane_b32 s58, v7, 30 +; CHECK-NEXT: v_readlane_b32 s59, v7, 31 +; CHECK-NEXT: image_sample_lz v2, v[0:1], s[44:51], s[16:19] dmask:0x1 +; CHECK-NEXT: v_readlane_b32 s44, v7, 0 +; CHECK-NEXT: v_readlane_b32 s52, v7, 8 +; CHECK-NEXT: v_readlane_b32 s53, v7, 9 +; CHECK-NEXT: v_readlane_b32 s54, v7, 10 +; CHECK-NEXT: v_readlane_b32 s55, v7, 11 +; CHECK-NEXT: v_readlane_b32 s56, v7, 12 +; CHECK-NEXT: v_readlane_b32 s57, v7, 13 +; CHECK-NEXT: v_readlane_b32 s58, v7, 14 +; CHECK-NEXT: v_readlane_b32 s59, v7, 15 +; CHECK-NEXT: v_mov_b32_e32 v3, 0 +; CHECK-NEXT: v_mov_b32_e32 v4, v3 +; CHECK-NEXT: v_readlane_b32 s45, v7, 1 +; CHECK-NEXT: v_readlane_b32 s46, v7, 2 +; CHECK-NEXT: v_readlane_b32 s47, v7, 3 +; CHECK-NEXT: image_sample_lz v0, v[0:1], s[52:59], s[24:27] dmask:0x1 +; CHECK-NEXT: v_readlane_b32 s48, v7, 4 +; CHECK-NEXT: v_readlane_b32 s49, v7, 5 +; CHECK-NEXT: v_readlane_b32 s50, v7, 6 +; CHECK-NEXT: v_readlane_b32 s51, v7, 7 ; CHECK-NEXT: s_waitcnt vmcnt(1) -; CHECK-NEXT: buffer_store_dwordx3 v[1:3], off, s[16:19], 0 +; CHECK-NEXT: buffer_store_dwordx3 v[2:4], off, s[16:19], 0 ; CHECK-NEXT: s_waitcnt vmcnt(1) ; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0 ; CHECK-NEXT: ; implicit-def: $vgpr0 @@ -154,17 +200,16 @@ define void @main(i1 %arg) #0 { ; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] ; CHECK-NEXT: s_cbranch_execz .LBB0_9 ; CHECK-NEXT: ; %bb.7: ; %bb33.preheader -; CHECK-NEXT: s_mov_b32 s8, 0 -; CHECK-NEXT: s_mov_b32 s12, s8 -; CHECK-NEXT: s_mov_b32 s13, s8 -; CHECK-NEXT: v_mov_b32_e32 v1, s12 -; CHECK-NEXT: s_mov_b32 s9, s8 -; CHECK-NEXT: s_mov_b32 s10, s8 -; CHECK-NEXT: s_mov_b32 s11, s8 -; CHECK-NEXT: v_mov_b32_e32 v2, s13 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: image_sample_lz v3, v[1:2], s[36:43], s[8:11] dmask:0x1 -; CHECK-NEXT: image_sample_lz v4, v[1:2], s[52:59], s[8:11] dmask:0x1 +; CHECK-NEXT: s_mov_b32 s16, 0 +; CHECK-NEXT: s_mov_b32 s20, s16 +; CHECK-NEXT: s_mov_b32 s21, s16 +; CHECK-NEXT: v_mov_b32_e32 v1, s20 +; CHECK-NEXT: s_mov_b32 s17, s16 +; CHECK-NEXT: s_mov_b32 s18, s16 +; CHECK-NEXT: s_mov_b32 s19, s16 +; CHECK-NEXT: v_mov_b32_e32 v2, s21 +; CHECK-NEXT: image_sample_lz v3, v[1:2], s[8:15], s[16:19] dmask:0x1 +; CHECK-NEXT: image_sample_lz v4, v[1:2], s[36:43], s[16:19] dmask:0x1 ; CHECK-NEXT: s_and_b64 vcc, exec, 0 ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: v_sub_f32_e32 v1, v4, v3 @@ -180,32 +225,32 @@ define void @main(i1 %arg) #0 { ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: .LBB0_10: ; %UnifiedReturnBlock ; CHECK-NEXT: s_or_b64 exec, exec, s[6:7] -; CHECK-NEXT: v_readlane_b32 s71, v5, 21 -; CHECK-NEXT: v_readlane_b32 s70, v5, 20 -; CHECK-NEXT: v_readlane_b32 s69, v5, 19 -; CHECK-NEXT: v_readlane_b32 s68, v5, 18 +; CHECK-NEXT: v_readlane_b32 s71, v6, 21 +; CHECK-NEXT: v_readlane_b32 s70, v6, 20 +; CHECK-NEXT: v_readlane_b32 s69, v6, 19 +; CHECK-NEXT: v_readlane_b32 s68, v6, 18 +; CHECK-NEXT: v_readlane_b32 s67, v6, 17 +; CHECK-NEXT: v_readlane_b32 s66, v6, 16 +; CHECK-NEXT: v_readlane_b32 s65, v6, 15 +; CHECK-NEXT: v_readlane_b32 s64, v6, 14 +; CHECK-NEXT: v_readlane_b32 s55, v6, 13 +; CHECK-NEXT: v_readlane_b32 s54, v6, 12 +; CHECK-NEXT: v_readlane_b32 s53, v6, 11 +; CHECK-NEXT: v_readlane_b32 s52, v6, 10 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_readlane_b32 s67, v5, 17 -; CHECK-NEXT: v_readlane_b32 s66, v5, 16 -; CHECK-NEXT: v_readlane_b32 s65, v5, 15 -; CHECK-NEXT: v_readlane_b32 s64, v5, 14 -; CHECK-NEXT: v_readlane_b32 s55, v5, 13 -; CHECK-NEXT: v_readlane_b32 s54, v5, 12 -; CHECK-NEXT: v_readlane_b32 s53, v5, 11 -; CHECK-NEXT: v_readlane_b32 s52, v5, 10 -; CHECK-NEXT: v_readlane_b32 s51, v5, 9 -; CHECK-NEXT: v_readlane_b32 s50, v5, 8 -; CHECK-NEXT: v_readlane_b32 s49, v5, 7 -; CHECK-NEXT: v_readlane_b32 s48, v5, 6 -; CHECK-NEXT: v_readlane_b32 s39, v5, 5 -; CHECK-NEXT: v_readlane_b32 s38, v5, 4 -; CHECK-NEXT: v_readlane_b32 s37, v5, 3 -; CHECK-NEXT: v_readlane_b32 s36, v5, 2 -; CHECK-NEXT: v_readlane_b32 s31, v5, 1 -; CHECK-NEXT: v_readlane_b32 s30, v5, 0 +; CHECK-NEXT: v_readlane_b32 s51, v6, 9 +; CHECK-NEXT: v_readlane_b32 s50, v6, 8 +; CHECK-NEXT: v_readlane_b32 s49, v6, 7 +; CHECK-NEXT: v_readlane_b32 s48, v6, 6 +; CHECK-NEXT: v_readlane_b32 s39, v6, 5 +; CHECK-NEXT: v_readlane_b32 s38, v6, 4 +; CHECK-NEXT: v_readlane_b32 s37, v6, 3 +; CHECK-NEXT: v_readlane_b32 s36, v6, 2 +; CHECK-NEXT: v_readlane_b32 s31, v6, 1 +; CHECK-NEXT: v_readlane_b32 s30, v6, 0 ; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload -; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload +; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload ; CHECK-NEXT: s_mov_b64 exec, s[4:5] ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: s_setpc_b64 s[30:31] diff --git a/llvm/test/CodeGen/AMDGPU/iglp-no-clobber.ll b/llvm/test/CodeGen/AMDGPU/iglp-no-clobber.ll index 9f5bbf834fdff..83e34906fa30c 100644 --- a/llvm/test/CodeGen/AMDGPU/iglp-no-clobber.ll +++ b/llvm/test/CodeGen/AMDGPU/iglp-no-clobber.ll @@ -43,7 +43,7 @@ loop: ; preds = %1, %.lr.ph %addr = phi ptr addrspace(1) [ null, %.lr.ph ], [ %gep, %loop ] %offset = phi i64 [ 0, %.lr.ph ], [ %nextOff, %loop ] %inc = phi i32 [0, %.lr.ph], [ %incCond, %loop ] - %rsrc = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %addr, i16 0, i32 0, i32 0) + %rsrc = tail call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %addr, i16 0, i64 0, i32 0) %load = tail call <2 x i32> @llvm.amdgcn.raw.ptr.buffer.load.v2i32(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) %load.bc = bitcast <2 x i32> %load to <8 x i8> %load.elem = extractelement <8 x i8> %load.bc, i64 0 @@ -63,6 +63,6 @@ end: ret void } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) readnone, i16, i32, i32) #0 +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) readnone, i16, i64, i32) #0 declare <2 x i32> @llvm.amdgcn.raw.ptr.buffer.load.v2i32(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) #1 diff --git a/llvm/test/CodeGen/AMDGPU/imm.ll b/llvm/test/CodeGen/AMDGPU/imm.ll index b764ee50c3978..21390003ee565 100644 --- a/llvm/test/CodeGen/AMDGPU/imm.ll +++ b/llvm/test/CodeGen/AMDGPU/imm.ll @@ -1969,9 +1969,10 @@ define amdgpu_kernel void @add_inline_imm_neg_1_f64(ptr addrspace(1) %out, [8 x ; GFX942-LABEL: add_inline_imm_neg_1_f64: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX942-NEXT: v_mov_b32_e32 v0, -1 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -1 +; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2008,7 +2009,8 @@ define amdgpu_kernel void @add_inline_imm_neg_2_f64(ptr addrspace(1) %out, [8 x ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -2 +; GFX942-NEXT: v_mov_b32_e32 v0, -2 +; GFX942-NEXT: v_mov_b32_e32 v1, -1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2045,7 +2047,8 @@ define amdgpu_kernel void @add_inline_imm_neg_16_f64(ptr addrspace(1) %out, [8 x ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -16 +; GFX942-NEXT: v_mov_b32_e32 v0, -16 +; GFX942-NEXT: v_mov_b32_e32 v1, -1 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2160,9 +2163,10 @@ define amdgpu_kernel void @store_inline_imm_0.0_f64(ptr addrspace(1) %out) { ; GFX942-LABEL: store_inline_imm_0.0_f64: ; GFX942: ; %bb.0: ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0 +; GFX942-NEXT: v_mov_b32_e32 v1, v0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2235,7 +2239,8 @@ define amdgpu_kernel void @store_inline_imm_0.5_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0.5 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 0x3fe00000 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2271,7 +2276,8 @@ define amdgpu_kernel void @store_inline_imm_m_0.5_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -0.5 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 0xbfe00000 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2307,7 +2313,8 @@ define amdgpu_kernel void @store_inline_imm_1.0_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], 1.0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 0x3ff00000 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2343,7 +2350,8 @@ define amdgpu_kernel void @store_inline_imm_m_1.0_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -1.0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 0xbff00000 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2379,7 +2387,8 @@ define amdgpu_kernel void @store_inline_imm_2.0_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], 2.0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 2.0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2415,7 +2424,8 @@ define amdgpu_kernel void @store_inline_imm_m_2.0_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -2.0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, -2.0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2451,7 +2461,8 @@ define amdgpu_kernel void @store_inline_imm_4.0_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], 4.0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 0x40100000 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2487,7 +2498,8 @@ define amdgpu_kernel void @store_inline_imm_m_4.0_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], -4.0 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: v_mov_b32_e32 v1, 0xc0100000 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm @@ -2523,7 +2535,8 @@ define amdgpu_kernel void @store_inv_2pi_f64(ptr addrspace(1) %out) { ; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX942-NEXT: s_mov_b32 s3, 0xf000 ; GFX942-NEXT: s_mov_b32 s2, -1 -; GFX942-NEXT: v_mov_b64_e32 v[0:1], 0.15915494309189532 +; GFX942-NEXT: v_mov_b32_e32 v0, 0x6dc9c882 +; GFX942-NEXT: v_mov_b32_e32 v1, 0x3fc45f30 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GFX942-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll index 59dfd713ef4fd..bd11b0710fadd 100644 --- a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll +++ b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll @@ -11,8 +11,8 @@ define protected amdgpu_kernel void @InferNothing(i32 %a, ptr %b, double %c) { ; CHECK-NEXT: v_mov_b32_e32 v2, s2 ; CHECK-NEXT: v_mov_b32_e32 v3, s3 ; CHECK-NEXT: s_lshl_b64 s[2:3], s[6:7], 3 -; CHECK-NEXT: s_add_u32 s0, s2, s0 -; CHECK-NEXT: s_addc_u32 s1, s3, s1 +; CHECK-NEXT: s_add_u32 s0, s0, s2 +; CHECK-NEXT: s_addc_u32 s1, s1, s3 ; CHECK-NEXT: v_mov_b32_e32 v1, s1 ; CHECK-NEXT: v_add_co_u32_e64 v0, vcc, -8, s0 ; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc @@ -69,13 +69,13 @@ define protected amdgpu_kernel void @InferMixed(i32 %a, ptr addrspace(1) %b, dou ; CHECK-NEXT: s_lshl_b64 s[2:3], s[6:7], 3 ; CHECK-NEXT: s_add_u32 s0, s0, s2 ; CHECK-NEXT: s_addc_u32 s1, s1, s3 +; CHECK-NEXT: s_add_u32 s0, s0, -8 +; CHECK-NEXT: s_addc_u32 s1, s1, -1 ; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: buffer_wbinvl1_vol -; CHECK-NEXT: v_mov_b32_e32 v1, s1 -; CHECK-NEXT: v_add_co_u32_e64 v0, vcc, -7, s0 -; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc -; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] +; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1] +; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] offset:1 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: buffer_wbinvl1_vol ; CHECK-NEXT: s_endpgm @@ -113,7 +113,7 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, doubl ; CHECK-NEXT: s_addc_u32 s1, s1, s5 ; CHECK-NEXT: s_add_u32 s4, s0, -8 ; CHECK-NEXT: s_addc_u32 s5, s1, -1 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 9 +; CHECK-NEXT: s_cmp_eq_u64 s[4:5], 1 ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll index eb5c5ef15ed56..6b094247e113c 100644 --- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll +++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll @@ -111,22 +111,20 @@ define amdgpu_kernel void @f2(i32 %arg, i32 %arg1, i32 %arg2, i1 %arg3, i32 %arg ; GFX11-NEXT: s_cbranch_scc0 .LBB2_8 ; GFX11-NEXT: ; %bb.5: ; %bb18.preheader ; GFX11-NEXT: s_load_b128 s[28:31], s[16:17], 0x44 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_mul_hi_u32 s0, s29, s28 -; GFX11-NEXT: s_mul_i32 s1, s29, s28 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, 1 +; GFX11-NEXT: s_mul_hi_u32 s1, s29, s28 +; GFX11-NEXT: s_mul_i32 s0, s29, s28 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_lshr_b64 s[0:1], s[0:1], 1 ; GFX11-NEXT: s_mov_b32 s1, 0 -; GFX11-NEXT: v_readfirstlane_b32 s0, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, 0 ; GFX11-NEXT: s_or_b32 s0, s0, 1 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: s_lshr_b32 s0, s0, s30 -; GFX11-NEXT: s_mul_i32 s0, s0, s22 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_mul_i32 s0, s0, s22 ; GFX11-NEXT: s_mul_i32 s0, s0, s20 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: s_or_b32 s0, s19, s0 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_lshl_b64 s[20:21], s[0:1], 1 ; GFX11-NEXT: s_mov_b32 s0, s1 ; GFX11-NEXT: global_load_u16 v1, v0, s[20:21] diff --git a/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll b/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll index 48bf7fbe0a3cb..3eef616ba267d 100644 --- a/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll +++ b/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll @@ -46,8 +46,8 @@ define void @use_extern_normal() #0 { ; CHECK-NEXT: s_ashr_i32 s5, s15, 31 ; CHECK-NEXT: v_mov_b32_e32 v0, 0x4048f5c3 ; CHECK-NEXT: s_lshl_b64 s[4:5], s[4:5], 2 -; CHECK-NEXT: s_add_u32 s4, s4, s6 -; CHECK-NEXT: s_addc_u32 s5, s5, s7 +; CHECK-NEXT: s_add_u32 s4, s6, s4 +; CHECK-NEXT: s_addc_u32 s5, s7, s5 ; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v1, s4 @@ -70,8 +70,8 @@ define void @use_extern_overalign() #0 { ; CHECK-NEXT: s_ashr_i32 s5, s15, 31 ; CHECK-NEXT: v_mov_b32_e32 v0, 0x42280000 ; CHECK-NEXT: s_lshl_b64 s[4:5], s[4:5], 2 -; CHECK-NEXT: s_add_u32 s4, s4, s6 -; CHECK-NEXT: s_addc_u32 s5, s5, s7 +; CHECK-NEXT: s_add_u32 s4, s6, s4 +; CHECK-NEXT: s_addc_u32 s5, s7, s5 ; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v1, s4 diff --git a/llvm/test/CodeGen/AMDGPU/lds-run-twice-absolute-md.ll b/llvm/test/CodeGen/AMDGPU/lds-run-twice-absolute-md.ll index 3f1dda53ef1b6..1fe29f699f31b 100644 --- a/llvm/test/CodeGen/AMDGPU/lds-run-twice-absolute-md.ll +++ b/llvm/test/CodeGen/AMDGPU/lds-run-twice-absolute-md.ll @@ -1,5 +1,3 @@ -; XFAIL: target={{.*}}-aix{{.*}} - ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds %s -o %t.ll ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds %t.ll -o %t.second.ll ; RUN: diff -ub %t.ll %t.second.ll -I ".*ModuleID.*" diff --git a/llvm/test/CodeGen/AMDGPU/lds-run-twice.ll b/llvm/test/CodeGen/AMDGPU/lds-run-twice.ll index 55280129c49ad..58228fd252322 100644 --- a/llvm/test/CodeGen/AMDGPU/lds-run-twice.ll +++ b/llvm/test/CodeGen/AMDGPU/lds-run-twice.ll @@ -1,5 +1,3 @@ -; XFAIL: target={{.*}}-aix{{.*}} - ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds %s -o %t.ll ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds %t.ll -o %t.second.ll ; RUN: diff -ub %t.ll %t.second.ll -I ".*ModuleID.*" diff --git a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir index ca774825f4dde..fa52b96e9ea95 100644 --- a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir +++ b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir @@ -1,19 +1,9 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 # RUN: llc -mtriple=amdgcn -run-pass register-coalescer -o - %s | FileCheck %s -# Check that coalescer does not create wider register tuple than in source - -# CHECK: - { id: 2, class: vreg_64, preferred-register: '', flags: [ ] } -# CHECK: - { id: 3, class: vreg_64, preferred-register: '', flags: [ ] } -# CHECK: - { id: 4, class: vreg_64, preferred-register: '', flags: [ ] } -# CHECK: - { id: 5, class: vreg_96, preferred-register: '', flags: [ ] } -# CHECK: - { id: 6, class: vreg_96, preferred-register: '', flags: [ ] } -# CHECK: - { id: 7, class: vreg_128, preferred-register: '', flags: [ ] } -# CHECK: - { id: 8, class: vreg_128, preferred-register: '', flags: [ ] } +# Check that coalescer does not create wider register tuple than in +# source. # No more registers shall be defined -# CHECK-NEXT: liveins: -# CHECK: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4, -# CHECK: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6, - --- name: main alignment: 1 @@ -52,6 +42,23 @@ body: | bb.0.entry: liveins: $sgpr0, $vgpr0_vgpr1 + ; CHECK-LABEL: name: main + ; CHECK: liveins: $sgpr0, $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF + ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $sgpr0 + ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[DEF]].sub0 + ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0:vreg_64 = COPY [[COPY]].sub1 + ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0 + ; CHECK-NEXT: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, [[COPY1]], 0, 0, implicit $exec, implicit $flat_scr + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vreg_96 = IMPLICIT_DEF + ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:vreg_96 = COPY [[DEF1]] + ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2:vreg_96 = COPY [[DEF]].sub0 + ; CHECK-NEXT: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr + ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vreg_128 = IMPLICIT_DEF + ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1_sub2:vreg_128 = COPY [[DEF2]] + ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub3:vreg_128 = COPY [[DEF]].sub0 + ; CHECK-NEXT: FLAT_STORE_DWORDX4 $vgpr0_vgpr1, [[COPY3]], 0, 0, implicit $exec, implicit $flat_scr %3 = IMPLICIT_DEF undef %4.sub0 = COPY $sgpr0 %4.sub1 = COPY %3.sub0 diff --git a/llvm/test/CodeGen/AMDGPU/literal64.ll b/llvm/test/CodeGen/AMDGPU/literal64.ll index 98691d394abb3..20b876836082e 100644 --- a/llvm/test/CodeGen/AMDGPU/literal64.ll +++ b/llvm/test/CodeGen/AMDGPU/literal64.ll @@ -5,7 +5,7 @@ define amdgpu_ps i64 @s_add_u64(i64 inreg %a) { ; GCN-LABEL: s_add_u64: ; GCN: ; %bb.0: -; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], lit64(0xf12345678) +; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0xf12345678 ; GCN-NEXT: ; return to shader part epilog %result = add i64 %a, 64729929336 ret i64 %result @@ -14,7 +14,7 @@ define amdgpu_ps i64 @s_add_u64(i64 inreg %a) { define amdgpu_ps void @v_add_u64(i64 %a, ptr addrspace(1) %out) { ; GCN-LABEL: v_add_u64: ; GCN: ; %bb.0: -; GCN-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0xf12345678), v[0:1] +; GCN-NEXT: v_add_nc_u64_e32 v[0:1], 0xf12345678, v[0:1] ; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off ; GCN-NEXT: s_endpgm %result = add i64 %a, 64729929336 @@ -25,7 +25,7 @@ define amdgpu_ps void @v_add_u64(i64 %a, ptr addrspace(1) %out) { define amdgpu_ps i64 @s_add_neg_u64(i64 inreg %a) { ; GCN-LABEL: s_add_neg_u64: ; GCN: ; %bb.0: -; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], lit64(0xfffffff0edcba988) +; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0xfffffff0edcba988 ; GCN-NEXT: ; return to shader part epilog %result = sub i64 %a, 64729929336 ret i64 %result @@ -34,7 +34,7 @@ define amdgpu_ps i64 @s_add_neg_u64(i64 inreg %a) { define amdgpu_ps void @v_add_neg_u64(i64 %a, ptr addrspace(1) %out) { ; GCN-LABEL: v_add_neg_u64: ; GCN: ; %bb.0: -; GCN-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0xfffffff0edcba988), v[0:1] +; GCN-NEXT: v_add_nc_u64_e32 v[0:1], 0xfffffff0edcba988, v[0:1] ; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off ; GCN-NEXT: s_endpgm %result = sub i64 %a, 64729929336 @@ -45,7 +45,7 @@ define amdgpu_ps void @v_add_neg_u64(i64 %a, ptr addrspace(1) %out) { define amdgpu_ps i64 @s_sub_u64(i64 inreg %a) { ; GCN-LABEL: s_sub_u64: ; GCN: ; %bb.0: -; GCN-NEXT: s_sub_nc_u64 s[0:1], lit64(0xf12345678), s[0:1] +; GCN-NEXT: s_sub_nc_u64 s[0:1], 0xf12345678, s[0:1] ; GCN-NEXT: ; return to shader part epilog %result = sub i64 64729929336, %a ret i64 %result @@ -54,7 +54,7 @@ define amdgpu_ps i64 @s_sub_u64(i64 inreg %a) { define amdgpu_ps void @v_sub_u64(i64 %a, ptr addrspace(1) %out) { ; GCN-LABEL: v_sub_u64: ; GCN: ; %bb.0: -; GCN-NEXT: v_sub_nc_u64_e32 v[0:1], lit64(0xf12345678), v[0:1] +; GCN-NEXT: v_sub_nc_u64_e32 v[0:1], 0xf12345678, v[0:1] ; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off ; GCN-NEXT: s_endpgm %result = sub i64 64729929336, %a @@ -67,7 +67,7 @@ define void @v_mov_b64_double(ptr addrspace(1) %ptr) { ; GCN: ; %bb.0: ; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333) +; GCN-NEXT: v_mov_b64_e32 v[2:3], 0x4063233333333333 ; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS ; GCN-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic @@ -79,7 +79,7 @@ define void @v_mov_b64_int(ptr addrspace(1) %ptr) { ; GCN: ; %bb.0: ; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0xf12345678) +; GCN-NEXT: v_mov_b64_e32 v[2:3], 0xf12345678 ; GCN-NEXT: global_atomic_add_u64 v[0:1], v[2:3], off scope:SCOPE_SYS ; GCN-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw add ptr addrspace(1) %ptr, i64 64729929336 monotonic @@ -91,7 +91,7 @@ define void @store_double(ptr addrspace(1) %ptr) { ; GCN: ; %bb.0: ; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333) +; GCN-NEXT: v_mov_b64_e32 v[2:3], 0x4063233333333333 ; GCN-NEXT: global_store_b64 v[0:1], v[2:3], off ; GCN-NEXT: s_set_pc_i64 s[30:31] store double 153.1, ptr addrspace(1) %ptr @@ -104,7 +104,7 @@ define i1 @class_f64() noinline optnone { ; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 ; GCN-SDAG-NEXT: s_mov_b32 s2, 1 -; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0x4063233333333333) +; GCN-SDAG-NEXT: s_mov_b64 s[0:1], 0x4063233333333333 ; GCN-SDAG-NEXT: v_cmp_class_f64_e64 s0, s[0:1], s2 ; GCN-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] @@ -114,7 +114,7 @@ define i1 @class_f64() noinline optnone { ; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 ; GCN-GISEL-NEXT: s_mov_b32 s2, 1 -; GCN-GISEL-NEXT: s_mov_b64 s[0:1], lit64(0x4063233333333333) +; GCN-GISEL-NEXT: s_mov_b64 s[0:1], 0x4063233333333333 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] ; GCN-GISEL-NEXT: v_mov_b32_e32 v2, s2 ; GCN-GISEL-NEXT: v_cmp_class_f64_e64 s0, v[0:1], v2 @@ -131,7 +131,7 @@ define double @rsq_f64() { ; GCN: ; %bb.0: ; GCN-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_rsq_f64_e32 v[0:1], lit64(0x4063233333333333) +; GCN-NEXT: v_rsq_f64_e32 v[0:1], 0x4063233333333333 ; GCN-NEXT: s_set_pc_i64 s[30:31] %result = call double @llvm.amdgcn.rsq.f64(double 153.1) nounwind readnone ret double %result @@ -140,7 +140,7 @@ define double @rsq_f64() { define amdgpu_ps i64 @s_and_b64(i64 inreg %a) { ; GCN-LABEL: s_and_b64: ; GCN: ; %bb.0: -; GCN-NEXT: s_and_b64 s[0:1], s[0:1], lit64(0xf12345678) +; GCN-NEXT: s_and_b64 s[0:1], s[0:1], 0xf12345678 ; GCN-NEXT: ; return to shader part epilog %result = and i64 %a, 64729929336 ret i64 %result @@ -170,7 +170,7 @@ define amdgpu_ps void @v_and_b64(i64 %a, ptr addrspace(1) %out) { define amdgpu_ps <2 x float> @v_add_f64_200.1(double %a) { ; GCN-LABEL: v_add_f64_200.1: ; GCN: ; %bb.0: -; GCN-NEXT: v_add_f64_e32 v[0:1], lit64(0x4069033333333333), v[0:1] +; GCN-NEXT: v_add_f64_e32 v[0:1], 0x4069033333333333, v[0:1] ; GCN-NEXT: ; return to shader part epilog %add = fadd double %a, 200.1 %ret = bitcast double %add to <2 x float> @@ -194,14 +194,14 @@ define amdgpu_ps <2 x float> @v_add_f64_200.0(double %a) { define amdgpu_ps <2 x float> @v_lshl_add_u64(i64 %a) { ; GCN-SDAG-LABEL: v_lshl_add_u64: ; GCN-SDAG: ; %bb.0: -; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xf12345678) +; GCN-SDAG-NEXT: s_mov_b64 s[0:1], 0xf12345678 ; GCN-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GCN-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 1, s[0:1] ; GCN-SDAG-NEXT: ; return to shader part epilog ; ; GCN-GISEL-LABEL: v_lshl_add_u64: ; GCN-GISEL: ; %bb.0: -; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0xf12345678) +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0xf12345678 ; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 1, v[2:3] ; GCN-GISEL-NEXT: ; return to shader part epilog @@ -216,10 +216,10 @@ define amdgpu_ps <2 x float> @v_lshl_add_u64(i64 %a) { define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) { ; GCN-SDAG-LABEL: v_fma_f64: ; GCN-SDAG: ; %bb.0: -; GCN-SDAG-NEXT: v_fmaak_f64 v[4:5], v[0:1], v[2:3], lit64(0x4063233333333333) -; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333) +; GCN-SDAG-NEXT: v_fmaak_f64 v[4:5], v[0:1], v[2:3], 0x4063233333333333 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x4069033333333333 ; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GCN-SDAG-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], lit64(0x4069033333333333) +; GCN-SDAG-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], 0x4069033333333333 ; GCN-SDAG-NEXT: v_fmac_f64_e32 v[2:3], v[0:1], v[4:5] ; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-SDAG-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 @@ -227,11 +227,11 @@ define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) { ; ; GCN-GISEL-LABEL: v_fma_f64: ; GCN-GISEL: ; %bb.0: -; GCN-GISEL-NEXT: v_mov_b64_e32 v[4:5], lit64(0x4063233333333333) +; GCN-GISEL-NEXT: v_mov_b64_e32 v[4:5], 0x4063233333333333 ; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GCN-GISEL-NEXT: v_fmac_f64_e32 v[4:5], v[0:1], v[2:3] -; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333) -; GCN-GISEL-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], lit64(0x4069033333333333) +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0x4069033333333333 +; GCN-GISEL-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], 0x4069033333333333 ; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GCN-GISEL-NEXT: v_fmac_f64_e32 v[2:3], v[0:1], v[4:5] ; GCN-GISEL-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 @@ -246,7 +246,7 @@ define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) { define amdgpu_ps <2 x float> @v_add_neg_f64(double %a) { ; GCN-SDAG-LABEL: v_add_neg_f64: ; GCN-SDAG: ; %bb.0: -; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0x4069033333333333) +; GCN-SDAG-NEXT: s_mov_b64 s[0:1], 0x4069033333333333 ; GCN-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GCN-SDAG-NEXT: v_add_f64_e64 v[0:1], -v[0:1], s[0:1] ; GCN-SDAG-NEXT: ; return to shader part epilog @@ -254,7 +254,7 @@ define amdgpu_ps <2 x float> @v_add_neg_f64(double %a) { ; GCN-GISEL-LABEL: v_add_neg_f64: ; GCN-GISEL: ; %bb.0: ; GCN-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333) +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0x4069033333333333 ; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-GISEL-NEXT: v_add_f64_e64 v[0:1], -v[0:1], v[2:3] ; GCN-GISEL-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll index bc3d3785a68a4..3aa36635a0ab6 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll @@ -11,9 +11,9 @@ ; GCN-O0: require,require,require,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O2: require,require,require,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require,si-opt-vgpr-liverange,require,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy,virt-reg-rewriter,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy,si-lower-wwm-copies,virt-reg-rewriter,amdgpu-reserve-wwm-regs,greedy,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O2: require,require,require,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require,si-opt-vgpr-liverange,require,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy,virt-reg-rewriter,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy,si-lower-wwm-copies,virt-reg-rewriter,amdgpu-reserve-wwm-regs,greedy,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O3: require,require,require,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require,si-opt-vgpr-liverange,require,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy,virt-reg-rewriter,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy,si-lower-wwm-copies,virt-reg-rewriter,amdgpu-reserve-wwm-regs,greedy,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O3: require,require,require,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require,si-opt-vgpr-liverange,require,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy,virt-reg-rewriter,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy,si-lower-wwm-copies,virt-reg-rewriter,amdgpu-reserve-wwm-regs,greedy,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) define void @empty() { ret void diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll index 65d0102a9d0dc..6e5212580ba2e 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll @@ -232,15 +232,15 @@ ; GCN-O1-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O1-NEXT: FunctionPass Manager ; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O1-NEXT: Dominator Tree Construction +; GCN-O1-NEXT: Natural Loop Information +; GCN-O1-NEXT: CodeGen Prepare ; GCN-O1-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O1-NEXT: AMDGPU lower intrinsics ; GCN-O1-NEXT: CallGraph Construction ; GCN-O1-NEXT: Call Graph SCC Pass Manager ; GCN-O1-NEXT: DummyCGSCCPass ; GCN-O1-NEXT: FunctionPass Manager -; GCN-O1-NEXT: Dominator Tree Construction -; GCN-O1-NEXT: Natural Loop Information -; GCN-O1-NEXT: CodeGen Prepare ; GCN-O1-NEXT: Lazy Value Information Analysis ; GCN-O1-NEXT: Lower SwitchInst's to branches ; GCN-O1-NEXT: Lower invoke and unwind, for unwindless code generators @@ -533,21 +533,21 @@ ; GCN-O1-OPTS-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O1-OPTS-NEXT: FunctionPass Manager ; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O1-OPTS-NEXT: Dominator Tree Construction +; GCN-O1-OPTS-NEXT: Natural Loop Information +; GCN-O1-OPTS-NEXT: CodeGen Prepare +; GCN-O1-OPTS-NEXT: Dominator Tree Construction +; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl) +; GCN-O1-OPTS-NEXT: Function Alias Analysis Results +; GCN-O1-OPTS-NEXT: Natural Loop Information +; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis +; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer ; GCN-O1-OPTS-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O1-OPTS-NEXT: AMDGPU lower intrinsics ; GCN-O1-OPTS-NEXT: CallGraph Construction ; GCN-O1-OPTS-NEXT: Call Graph SCC Pass Manager ; GCN-O1-OPTS-NEXT: DummyCGSCCPass ; GCN-O1-OPTS-NEXT: FunctionPass Manager -; GCN-O1-OPTS-NEXT: Dominator Tree Construction -; GCN-O1-OPTS-NEXT: Natural Loop Information -; GCN-O1-OPTS-NEXT: CodeGen Prepare -; GCN-O1-OPTS-NEXT: Dominator Tree Construction -; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl) -; GCN-O1-OPTS-NEXT: Function Alias Analysis Results -; GCN-O1-OPTS-NEXT: Natural Loop Information -; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis -; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer ; GCN-O1-OPTS-NEXT: Lazy Value Information Analysis ; GCN-O1-OPTS-NEXT: Lower SwitchInst's to branches ; GCN-O1-OPTS-NEXT: Lower invoke and unwind, for unwindless code generators @@ -852,21 +852,21 @@ ; GCN-O2-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O2-NEXT: FunctionPass Manager ; GCN-O2-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O2-NEXT: Dominator Tree Construction +; GCN-O2-NEXT: Natural Loop Information +; GCN-O2-NEXT: CodeGen Prepare +; GCN-O2-NEXT: Dominator Tree Construction +; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl) +; GCN-O2-NEXT: Function Alias Analysis Results +; GCN-O2-NEXT: Natural Loop Information +; GCN-O2-NEXT: Scalar Evolution Analysis +; GCN-O2-NEXT: GPU Load and Store Vectorizer ; GCN-O2-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O2-NEXT: AMDGPU lower intrinsics ; GCN-O2-NEXT: CallGraph Construction ; GCN-O2-NEXT: Call Graph SCC Pass Manager ; GCN-O2-NEXT: DummyCGSCCPass ; GCN-O2-NEXT: FunctionPass Manager -; GCN-O2-NEXT: Dominator Tree Construction -; GCN-O2-NEXT: Natural Loop Information -; GCN-O2-NEXT: CodeGen Prepare -; GCN-O2-NEXT: Dominator Tree Construction -; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl) -; GCN-O2-NEXT: Function Alias Analysis Results -; GCN-O2-NEXT: Natural Loop Information -; GCN-O2-NEXT: Scalar Evolution Analysis -; GCN-O2-NEXT: GPU Load and Store Vectorizer ; GCN-O2-NEXT: Lazy Value Information Analysis ; GCN-O2-NEXT: Lower SwitchInst's to branches ; GCN-O2-NEXT: Lower invoke and unwind, for unwindless code generators @@ -1186,21 +1186,21 @@ ; GCN-O3-NEXT: AMDGPU Preload Kernel Arguments ; GCN-O3-NEXT: FunctionPass Manager ; GCN-O3-NEXT: AMDGPU Lower Kernel Arguments +; GCN-O3-NEXT: Dominator Tree Construction +; GCN-O3-NEXT: Natural Loop Information +; GCN-O3-NEXT: CodeGen Prepare +; GCN-O3-NEXT: Dominator Tree Construction +; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl) +; GCN-O3-NEXT: Function Alias Analysis Results +; GCN-O3-NEXT: Natural Loop Information +; GCN-O3-NEXT: Scalar Evolution Analysis +; GCN-O3-NEXT: GPU Load and Store Vectorizer ; GCN-O3-NEXT: Lower buffer fat pointer operations to buffer resources ; GCN-O3-NEXT: AMDGPU lower intrinsics ; GCN-O3-NEXT: CallGraph Construction ; GCN-O3-NEXT: Call Graph SCC Pass Manager ; GCN-O3-NEXT: DummyCGSCCPass ; GCN-O3-NEXT: FunctionPass Manager -; GCN-O3-NEXT: Dominator Tree Construction -; GCN-O3-NEXT: Natural Loop Information -; GCN-O3-NEXT: CodeGen Prepare -; GCN-O3-NEXT: Dominator Tree Construction -; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl) -; GCN-O3-NEXT: Function Alias Analysis Results -; GCN-O3-NEXT: Natural Loop Information -; GCN-O3-NEXT: Scalar Evolution Analysis -; GCN-O3-NEXT: GPU Load and Store Vectorizer ; GCN-O3-NEXT: Lazy Value Information Analysis ; GCN-O3-NEXT: Lower SwitchInst's to branches ; GCN-O3-NEXT: Lower invoke and unwind, for unwindless code generators diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll index 847957dab72d9..fa6d878ad7556 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -stop-after=amdgpu-isel < %s | FileCheck %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -enable-new-pm -stop-after=amdgpu-isel < %s | FileCheck %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -stop-after=amdgpu-isel < %s | FileCheck --check-prefix=CHECK45 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -enable-new-pm -stop-after=amdgpu-isel < %s | FileCheck --check-prefix=CHECK45 %s define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg %p) { ; CHECK-LABEL: name: basic_raw_buffer @@ -24,7 +26,32 @@ define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr inreg %p) { ; CHECK-NEXT: $sgpr2 = COPY [[S_MOV_B32_2]] ; CHECK-NEXT: $sgpr3 = COPY [[S_MOV_B32_4]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i32 1234, i32 5678) + ; + ; CHECK45-LABEL: name: basic_raw_buffer + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -6629298651489370112 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], killed [[S_MOV_B]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY4]], implicit $exec + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY2]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY5]], implicit $exec + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 9 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 killed [[S_MOV_B32_]] + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -536870912 + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 killed [[S_MOV_B32_2]] + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: $sgpr2 = COPY [[S_MOV_B32_1]] + ; CHECK45-NEXT: $sgpr3 = COPY [[S_MOV_B32_3]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } @@ -42,7 +69,22 @@ define amdgpu_ps float @read_raw_buffer(ptr addrspace(1) inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 4, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + ; + ; CHECK45-LABEL: name: read_raw_buffer + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY3]], %subreg.sub0, killed [[COPY2]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_]], %subreg.sub3 + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET killed [[REG_SEQUENCE1]], $sgpr_null, 4, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p, i16 0, i64 0, i32 0) %loaded = call float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 4, i32 0, i32 0) ret float %loaded } @@ -71,117 +113,345 @@ define amdgpu_ps ptr addrspace(8) @basic_struct_buffer(ptr inreg %p) { ; CHECK-NEXT: $sgpr2 = COPY [[S_MOV_B32_3]] ; CHECK-NEXT: $sgpr3 = COPY [[S_MOV_B32_5]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 1234, i32 5678) + ; + ; CHECK45-LABEL: name: basic_struct_buffer + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -6629298651489370112 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], killed [[S_MOV_B]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY4]], implicit $exec + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY2]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY5]], implicit $exec + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 9 + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 killed [[S_MOV_B32_]] + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -536854528 + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 killed [[S_MOV_B32_2]] + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: $sgpr2 = COPY [[S_MOV_B32_1]] + ; CHECK45-NEXT: $sgpr3 = COPY [[S_MOV_B32_3]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } -define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i32 inreg %numVals, i32 inreg %flags) { +define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i64 inreg %numVals, i32 inreg %flags) { ; CHECK-LABEL: name: variable_top_half ; CHECK: bb.0 (%ir-block.0): - ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr4 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr4 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[DEF]], %subreg.sub1 + ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY2]], killed [[S_MOV_B32_]], implicit-def dead $scc ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 262144 ; CHECK-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 killed [[S_AND_B32_]], killed [[S_MOV_B32_1]], implicit-def dead $scc - ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[S_OR_B32_]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY4]], implicit $exec - ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; CHECK-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_OR_B32_]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY5]], implicit $exec + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY6]], implicit $exec + ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; CHECK-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]] ; CHECK-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_]] - ; CHECK-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_1]] ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: variable_top_half + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr4 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY5]], killed [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_MOV_B32_1]], %subreg.sub0, killed [[S_LSHL_B32_]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], killed [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1 + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY]], killed [[S_MOV_B32_2]], implicit-def dead $scc + ; CHECK45-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK45-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[DEF]], %subreg.sub0, killed [[S_LSHL_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], killed [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], killed [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 16384 + ; CHECK45-NEXT: [[DEF2:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK45-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[DEF2]], %subreg.sub0, killed [[S_MOV_B32_4]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_OR_B64_1]], killed [[REG_SEQUENCE4]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY8]], implicit $exec + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY9]], implicit $exec + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY11]], implicit $exec + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[S_LSHR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY13]], implicit $exec + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i64 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } -define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, i32 inreg %numVals, i32 inreg %flags) { +define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, i64 inreg %numVals, i32 inreg %flags) { ; CHECK-LABEL: name: general_case ; CHECK: bb.0 (%ir-block.0): - ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr5 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr4 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr5 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr3 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr1 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[DEF]], %subreg.sub1 + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY3]], killed [[S_MOV_B32_]], implicit-def dead $scc ; CHECK-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY2]], 16, implicit-def dead $scc ; CHECK-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 killed [[S_AND_B32_]], killed [[S_LSHL_B32_]], implicit-def dead $scc - ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_OR_B32_]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY5]], implicit $exec - ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec - ; CHECK-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_OR_B32_]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY6]], implicit $exec + ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY7]], implicit $exec + ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; CHECK-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; CHECK-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; CHECK-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]] ; CHECK-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_]] - ; CHECK-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_1]] ; CHECK-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_3]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: general_case + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr5 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr4 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], killed [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 12 + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY6]], killed [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], killed [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY]], killed [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_OR_B64_]], killed [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_1]].sub1 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY8]], killed [[S_MOV_B32_4]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_2]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], killed [[REG_SEQUENCE4]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY10]], implicit $exec + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY11]], implicit $exec + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY13]], implicit $exec + ; CHECK45-NEXT: [[COPY14:%[0-9]+]]:sreg_32 = COPY [[S_LSHR_B64_]].sub0 + ; CHECK45-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY14]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY15]], implicit $exec + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc } -define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i32 inreg %numVals, i32 inreg %flags) { +define amdgpu_ps float @general_case_load(ptr inreg %p, i16 inreg %stride, i64 inreg %numVals, i32 inreg %flags) { ; CHECK-LABEL: name: general_case_load ; CHECK: bb.0 (%ir-block.0): - ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4 + ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr5 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr4 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr5 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr3 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr1 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[DEF]], %subreg.sub1 ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY3]], killed [[S_MOV_B32_]], implicit-def dead $scc ; CHECK-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY2]], 16, implicit-def dead $scc ; CHECK-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 killed [[S_AND_B32_]], killed [[S_LSHL_B32_]], implicit-def dead $scc - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, killed [[S_OR_B32_]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, killed [[S_OR_B32_]], %subreg.sub1, killed [[COPY5]], %subreg.sub2, [[COPY]], %subreg.sub3 ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] - ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY5]], killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY6]], killed [[REG_SEQUENCE1]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: general_case_load + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $sgpr4, $sgpr5 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr5 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr4 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr3 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], killed [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 12 + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY6]], killed [[S_MOV_B32_1]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_LSHR_B64_]], killed [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY]], killed [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_OR_B64_]], killed [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_1]].sub1 + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_1]].sub0 + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY9]], killed [[S_MOV_B32_4]], implicit-def dead $scc + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_2]], %subreg.sub1 + ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[REG_SEQUENCE]], killed [[REG_SEQUENCE4]], implicit-def dead $scc + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub0 + ; CHECK45-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY11]], %subreg.sub0, killed [[COPY10]], %subreg.sub1, killed [[COPY8]], %subreg.sub2, killed [[COPY7]], %subreg.sub3 + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]] + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN [[COPY12]], killed [[REG_SEQUENCE5]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } ; None of the components are uniform due to the lack of an inreg -define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i32 %numVals, i32 %flags) { +define amdgpu_ps float @general_case_load_with_waterfall(ptr %p, i16 %stride, i64 %numVals, i32 %flags) { ; CHECK-LABEL: name: general_case_load_with_waterfall ; CHECK: bb.0 (%ir-block.0): - ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr5 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr5 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, killed [[DEF]], %subreg.sub1 ; CHECK-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 16, [[COPY2]], implicit $exec ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535 ; CHECK-NEXT: [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[COPY3]], killed [[S_MOV_B32_]], killed [[V_LSHLREV_B32_e64_]], implicit $exec - ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, killed [[V_AND_OR_B32_e64_]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3 + ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, killed [[V_AND_OR_B32_e64_]], %subreg.sub1, killed [[COPY5]], %subreg.sub2, [[COPY]], %subreg.sub3 ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] - ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY5]], killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_IDXEN [[COPY6]], killed [[REG_SEQUENCE1]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_IDXEN]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i32 %numVals, i32 %flags) + ; + ; CHECK45-LABEL: name: general_case_load_with_waterfall + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK45-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 28 + ; CHECK45-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 killed [[S_MOV_B32_]], [[COPY]], implicit $exec + ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, killed [[V_LSHLREV_B32_e64_]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE2]].sub1 + ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 7 + ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; CHECK45-NEXT: [[V_LSHRREV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_LSHRREV_B64_e64 killed [[S_MOV_B32_2]], [[COPY7]], implicit $exec + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[V_LSHRREV_B64_e64_]].sub1 + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY3]] + ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 12 + ; CHECK45-NEXT: [[V_LSHLREV_B32_e64_1:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 killed [[S_MOV_B32_3]], killed [[COPY9]], implicit $exec + ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, killed [[V_LSHLREV_B32_e64_1]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE3]].sub1 + ; CHECK45-NEXT: [[V_OR3_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR3_B32_e64 killed [[COPY8]], killed [[COPY10]], killed [[COPY6]], implicit $exec + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE2]].sub0 + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[V_LSHRREV_B64_e64_]].sub0 + ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE3]].sub0 + ; CHECK45-NEXT: [[V_OR3_B32_e64_1:%[0-9]+]]:vgpr_32 = V_OR3_B32_e64 killed [[COPY12]], killed [[COPY13]], killed [[COPY11]], implicit $exec + ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[V_OR3_B32_e64_1]], %subreg.sub0, killed [[V_OR3_B32_e64_]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE4]].sub1 + ; CHECK45-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE4]].sub0 + ; CHECK45-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 25 + ; CHECK45-NEXT: [[V_LSHLREV_B32_e64_2:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 killed [[S_MOV_B32_4]], killed [[COPY17]], implicit $exec + ; CHECK45-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, killed [[V_LSHLREV_B32_e64_2]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE5]].sub1 + ; CHECK45-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 killed [[COPY16]], killed [[COPY18]], implicit $exec + ; CHECK45-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE5]].sub0 + ; CHECK45-NEXT: [[V_OR_B32_e64_1:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 killed [[COPY19]], killed [[COPY20]], implicit $exec + ; CHECK45-NEXT: [[REG_SEQUENCE6:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[V_OR_B32_e64_1]], %subreg.sub0, killed [[V_OR_B32_e64_]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE6]].sub1 + ; CHECK45-NEXT: [[COPY22:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE6]].sub0 + ; CHECK45-NEXT: [[REG_SEQUENCE7:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY22]], %subreg.sub0, killed [[COPY21]], %subreg.sub1, killed [[COPY15]], %subreg.sub2, killed [[COPY14]], %subreg.sub3 + ; CHECK45-NEXT: [[COPY23:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]] + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_IDXEN [[COPY23]], killed [[REG_SEQUENCE7]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.rsrc, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_IDXEN]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) %value = call float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0, i32 0) ret float %value } @@ -200,7 +470,22 @@ define amdgpu_ps float @read_buffer_fat_ptr_p0(ptr inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i32 0, i32 0) + ; + ; CHECK45-LABEL: name: read_buffer_fat_ptr_p0 + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY3]], %subreg.sub0, killed [[COPY2]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_]], %subreg.sub3 + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET killed [[REG_SEQUENCE1]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %p, i16 0, i64 0, i32 0) %loaded = load float, ptr addrspace(7) %ptr ret float %loaded } @@ -219,14 +504,29 @@ define amdgpu_ps float @read_buffer_fat_ptr_p1(ptr addrspace(1) inreg %p) { ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET killed [[REG_SEQUENCE]], [[S_MOV_B32_1]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) ; CHECK-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]] ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0 - %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %p, i16 0, i32 0, i32 0) + ; + ; CHECK45-LABEL: name: read_buffer_fat_ptr_p1 + ; CHECK45: bb.0 (%ir-block.0): + ; CHECK45-NEXT: liveins: $sgpr0, $sgpr1 + ; CHECK45-NEXT: {{ $}} + ; CHECK45-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr1 + ; CHECK45-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr0 + ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1 + ; CHECK45-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1 + ; CHECK45-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY3]], %subreg.sub0, killed [[COPY2]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_]], %subreg.sub3 + ; CHECK45-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFSET killed [[REG_SEQUENCE1]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr, align 1, addrspace 8) + ; CHECK45-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFSET]] + ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $vgpr0 + %ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %p, i16 0, i64 0, i32 0) %loaded = load float, ptr addrspace(7) %ptr ret float %loaded } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture readnone, i16, i32, i32) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) -declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture readnone, i16, i32, i32) -declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr nocapture readnone, i16, i64, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i64, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr nocapture readnone, i16, i64, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) nocapture readnone, i16, i64, i32) declare float @llvm.amdgcn.raw.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) declare float @llvm.amdgcn.struct.ptr.buffer.load(ptr addrspace(8) nocapture readonly, i32, i32, i32, i32 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll index de7d2346a0b42..b9bf76c1423b6 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG %s declare i32 @llvm.amdgcn.s.quadmask.i32(i32) declare i64 @llvm.amdgcn.s.quadmask.i64(i64) @@ -172,3 +172,91 @@ entry: %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask) ret i64 %qm } + +;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC. +define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) { +; GFX11-GISEL-LABEL: test_scc_quadmask_32: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1 +; GFX11-GISEL-NEXT: s_quadmask_b32 s1, s1 +; GFX11-GISEL-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0 +; GFX11-GISEL-NEXT: global_store_b32 v2, v3, s[2:3] +; GFX11-GISEL-NEXT: global_store_b32 v[0:1], v4, off +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX11-SDAG-LABEL: test_scc_quadmask_32: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-NEXT: s_and_b32 s0, s0, 1 +; GFX11-SDAG-NEXT: s_quadmask_b32 s1, s1 +; GFX11-SDAG-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0 +; GFX11-SDAG-NEXT: global_store_b32 v2, v3, s[2:3] +; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v4, off +; GFX11-SDAG-NEXT: s_endpgm + %and = and i32 %val0, 1 + %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone + store i32 %result, ptr addrspace(1) %ptr + %cmp = icmp eq i32 %and, 0 + %sel = select i1 %cmp, i32 1, i32 0 + store i32 %sel, ptr addrspace(1) null, align 4 + ret void +} + +;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC. +define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) { +; GFX11-GISEL-LABEL: test_scc_quadmask_64: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-GISEL-NEXT: s_load_b32 s4, s[4:5], 0x24 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: s_quadmask_b64 s[0:1], s[0:1] +; GFX11-GISEL-NEXT: s_and_b32 s4, s4, 1 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-GISEL-NEXT: s_cmp_eq_u32 s4, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1 +; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-GISEL-NEXT: global_store_b64 v4, v[0:1], s[2:3] +; GFX11-GISEL-NEXT: global_store_b32 v[2:3], v5, off +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX11-SDAG-LABEL: test_scc_quadmask_64: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_clause 0x1 +; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x24 +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-NEXT: s_and_b32 s4, s6, 1 +; GFX11-SDAG-NEXT: s_quadmask_b64 s[0:1], s[0:1] +; GFX11-SDAG-NEXT: s_cmp_eq_u32 s4, 0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0 +; GFX11-SDAG-NEXT: global_store_b64 v4, v[2:3], s[2:3] +; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v5, off +; GFX11-SDAG-NEXT: s_endpgm + %and = and i32 %val0, 1 + %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone + store i64 %result, ptr addrspace(1) %ptr + %cmp = icmp eq i32 %and, 0 + %sel = select i1 %cmp, i32 1, i32 0 + store i32 %sel, ptr addrspace(1) null, align 4 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.load.ll index f01e85a2e4a02..65111f14cab45 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.load.ll @@ -4,7 +4,7 @@ ;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1010 | FileCheck -check-prefix=GFX10 %s ;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck -check-prefix=GFX11 %s ;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefix=GFX12 %s -;RUN: llc < %s -global-isel -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefix=GFX12 %s +;RUN: llc < %s -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefix=GFX12 %s define amdgpu_vs {<4 x float>, <4 x float>, <4 x float>, <4 x float>} @tbuffer_load(<4 x i32> inreg) { ; PREGFX10-LABEL: tbuffer_load: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.load.ll index b555c37d15703..a6afb757cd6c0 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.load.ll @@ -4,7 +4,7 @@ ;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1010 | FileCheck -check-prefixes=GFX10 %s ;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck -check-prefixes=GFX11 %s ;RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s -;RUN: llc < %s -global-isel -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s +;RUN: llc < %s -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s define amdgpu_vs {<4 x float>, <4 x float>, <4 x float>, <4 x float>} @tbuffer_load(<4 x i32> inreg) { ; PREGFX10-LABEL: tbuffer_load: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll b/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll index 6e24a6a348f2c..c265b05813ee7 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.prefetch.ll @@ -87,7 +87,7 @@ define amdgpu_ps void @prefetch_data_sgpr_min_offset(ptr addrspace(4) inreg %ptr ; ; GFX1250-SPREFETCH-SDAG-LABEL: prefetch_data_sgpr_min_offset: ; GFX1250-SPREFETCH-SDAG: ; %bb.0: ; %entry -; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0xffffffffff800000) +; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], 0xffffffffff800000 ; GFX1250-SPREFETCH-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SPREFETCH-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] ; GFX1250-SPREFETCH-SDAG-NEXT: s_prefetch_data s[0:1], 0x0, null, 0 @@ -424,7 +424,7 @@ define amdgpu_ps void @prefetch_inst_sgpr_min_offset(ptr addrspace(4) inreg %ptr ; ; GFX1250-SPREFETCH-SDAG-LABEL: prefetch_inst_sgpr_min_offset: ; GFX1250-SPREFETCH-SDAG: ; %bb.0: ; %entry -; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0xffffffffff800000) +; GFX1250-SPREFETCH-SDAG-NEXT: s_mov_b64 s[2:3], 0xffffffffff800000 ; GFX1250-SPREFETCH-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SPREFETCH-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3] ; GFX1250-SPREFETCH-SDAG-NEXT: s_prefetch_inst s[0:1], 0x0, null, 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll index ba5ce8bb5fae7..8bb7274c84620 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll @@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) % ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_mov_b64 s[4:5], s[2:3] ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 -; SI-NEXT: s_movk_i32 s4, 0xfc01 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b32 s3, 0xfffff ; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_bfe_u32 v4, v3, 20, 11 -; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4 ; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6 ; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3 ; SI-NEXT: v_bfi_b32 v5, v5, 0, v3 diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll index 5b2213592f495..f93e5f06beff9 100644 --- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll +++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll @@ -10159,14 +10159,14 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; ; GFX8-LABEL: constant_sextload_v64i1_to_v64i64: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 +; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX8-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_load_dwordx2 s[2:3], s[10:11], 0x0 +; GFX8-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s0, s3, 8 +; GFX8-NEXT: s_lshr_b32 s4, s3, 8 ; GFX8-NEXT: s_lshr_b32 s48, s3, 15 -; GFX8-NEXT: v_writelane_b32 v62, s0, 0 +; GFX8-NEXT: v_writelane_b32 v62, s4, 0 ; GFX8-NEXT: s_lshr_b32 s74, s3, 30 ; GFX8-NEXT: s_lshr_b32 s30, s3, 31 ; GFX8-NEXT: s_lshr_b32 s72, s3, 28 @@ -10186,11 +10186,11 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: s_lshr_b32 s58, s3, 14 ; GFX8-NEXT: s_lshr_b32 s62, s3, 12 ; GFX8-NEXT: s_lshr_b32 s54, s3, 10 -; GFX8-NEXT: v_writelane_b32 v62, s1, 1 -; GFX8-NEXT: s_lshr_b32 s0, s3, 9 +; GFX8-NEXT: v_writelane_b32 v62, s5, 1 +; GFX8-NEXT: s_lshr_b32 s4, s3, 9 ; GFX8-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x10000 ; GFX8-NEXT: s_lshr_b32 s52, s3, 11 -; GFX8-NEXT: v_writelane_b32 v62, s0, 2 +; GFX8-NEXT: v_writelane_b32 v62, s4, 2 ; GFX8-NEXT: s_bfe_i64 s[60:61], s[60:61], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[64:65], s[64:65], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[68:69], s[68:69], 0x10000 @@ -10213,8 +10213,8 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: v_mov_b32_e32 v34, s48 ; GFX8-NEXT: s_lshr_b32 s48, s2, 1 ; GFX8-NEXT: s_lshr_b32 s50, s3, 13 -; GFX8-NEXT: v_writelane_b32 v62, s1, 3 -; GFX8-NEXT: s_lshr_b32 s6, s3, 6 +; GFX8-NEXT: v_writelane_b32 v62, s5, 3 +; GFX8-NEXT: s_lshr_b32 s8, s3, 6 ; GFX8-NEXT: s_lshr_b32 s10, s3, 7 ; GFX8-NEXT: s_lshr_b32 s12, s3, 4 ; GFX8-NEXT: s_lshr_b32 s14, s3, 5 @@ -10264,8 +10264,8 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: s_lshr_b32 s54, s2, 13 ; GFX8-NEXT: s_lshr_b32 s52, s2, 10 ; GFX8-NEXT: v_mov_b32_e32 v30, s46 -; GFX8-NEXT: s_lshr_b32 s4, s2, 11 -; GFX8-NEXT: s_lshr_b32 s0, s2, 8 +; GFX8-NEXT: s_lshr_b32 s6, s2, 11 +; GFX8-NEXT: s_lshr_b32 s4, s2, 8 ; GFX8-NEXT: s_lshr_b32 s46, s2, 9 ; GFX8-NEXT: s_lshr_b32 s44, s2, 6 ; GFX8-NEXT: s_lshr_b32 s42, s2, 7 @@ -10278,14 +10278,12 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: v_writelane_b32 v62, s2, 4 ; GFX8-NEXT: v_writelane_b32 v62, s3, 5 ; GFX8-NEXT: v_readlane_b32 s2, v62, 2 -; GFX8-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x10000 ; GFX8-NEXT: v_readlane_b32 s3, v62, 3 -; GFX8-NEXT: v_mov_b32_e32 v38, s50 -; GFX8-NEXT: v_mov_b32_e32 v39, s51 -; GFX8-NEXT: s_bfe_i64 s[50:51], s[4:5], 0x10000 -; GFX8-NEXT: s_bfe_i64 s[4:5], s[6:7], 0x10000 -; GFX8-NEXT: s_bfe_i64 s[6:7], s[2:3], 0x10000 +; GFX8-NEXT: v_mov_b32_e32 v35, s49 +; GFX8-NEXT: s_bfe_i64 s[48:49], s[4:5], 0x10000 +; GFX8-NEXT: s_bfe_i64 s[4:5], s[2:3], 0x10000 ; GFX8-NEXT: v_readlane_b32 s2, v62, 0 +; GFX8-NEXT: s_bfe_i64 s[50:51], s[50:51], 0x10000 ; GFX8-NEXT: v_readlane_b32 s3, v62, 1 ; GFX8-NEXT: v_mov_b32_e32 v5, s75 ; GFX8-NEXT: v_mov_b32_e32 v13, s73 @@ -10303,8 +10301,9 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: v_mov_b32_e32 v29, s57 ; GFX8-NEXT: v_mov_b32_e32 v31, s47 ; GFX8-NEXT: v_mov_b32_e32 v33, s59 -; GFX8-NEXT: v_mov_b32_e32 v35, s49 ; GFX8-NEXT: v_mov_b32_e32 v37, s63 +; GFX8-NEXT: v_mov_b32_e32 v38, s50 +; GFX8-NEXT: v_mov_b32_e32 v39, s51 ; GFX8-NEXT: v_mov_b32_e32 v41, s55 ; GFX8-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x10000 @@ -10313,7 +10312,7 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x10000 -; GFX8-NEXT: s_bfe_i64 s[48:49], s[0:1], 0x10000 +; GFX8-NEXT: s_bfe_i64 s[50:51], s[6:7], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[52:53], s[52:53], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[54:55], s[54:55], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[56:57], s[56:57], 0x10000 @@ -10341,83 +10340,84 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x10000 ; GFX8-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x10000 -; GFX8-NEXT: s_bfe_i64 s[0:1], s[10:11], 0x10000 -; GFX8-NEXT: s_bfe_i64 s[10:11], s[2:3], 0x10000 -; GFX8-NEXT: s_add_u32 s2, s8, 0x1f0 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_bfe_i64 s[10:11], s[10:11], 0x10000 +; GFX8-NEXT: s_bfe_i64 s[8:9], s[8:9], 0x10000 +; GFX8-NEXT: s_bfe_i64 s[6:7], s[2:3], 0x10000 +; GFX8-NEXT: s_add_u32 s2, s0, 0x1f0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v43, s3 ; GFX8-NEXT: v_mov_b32_e32 v42, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x1e0 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x1e0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v45, s3 ; GFX8-NEXT: v_mov_b32_e32 v44, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x1d0 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x1d0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v47, s3 ; GFX8-NEXT: v_mov_b32_e32 v46, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x1c0 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x1c0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v49, s3 ; GFX8-NEXT: v_mov_b32_e32 v48, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x1b0 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x1b0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v51, s3 ; GFX8-NEXT: v_mov_b32_e32 v50, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x1a0 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x1a0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v53, s3 ; GFX8-NEXT: v_mov_b32_e32 v52, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x190 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x190 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v55, s3 ; GFX8-NEXT: v_mov_b32_e32 v54, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x180 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x180 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v57, s3 ; GFX8-NEXT: v_mov_b32_e32 v56, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x170 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x170 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v59, s3 ; GFX8-NEXT: v_mov_b32_e32 v58, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x160 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x160 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v61, s3 ; GFX8-NEXT: v_mov_b32_e32 v60, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x150 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 +; GFX8-NEXT: s_add_u32 s2, s0, 0x150 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[44:45], v[12:15] -; GFX8-NEXT: flat_store_dwordx4 v[46:47], v[0:3] +; GFX8-NEXT: flat_store_dwordx4 v[42:43], v[4:7] ; GFX8-NEXT: v_mov_b32_e32 v13, s3 ; GFX8-NEXT: v_mov_b32_e32 v12, s2 -; GFX8-NEXT: s_add_u32 s2, s8, 0x140 -; GFX8-NEXT: s_addc_u32 s3, s9, 0 -; GFX8-NEXT: v_mov_b32_e32 v2, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x130 -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 -; GFX8-NEXT: flat_store_dwordx4 v[42:43], v[4:7] +; GFX8-NEXT: s_add_u32 s2, s0, 0x140 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 +; GFX8-NEXT: v_mov_b32_e32 v15, s3 +; GFX8-NEXT: v_mov_b32_e32 v14, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x130 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 +; GFX8-NEXT: flat_store_dwordx4 v[46:47], v[0:3] ; GFX8-NEXT: flat_store_dwordx4 v[48:49], v[8:11] ; GFX8-NEXT: flat_store_dwordx4 v[50:51], v[16:19] -; GFX8-NEXT: v_mov_b32_e32 v4, s10 -; GFX8-NEXT: v_mov_b32_e32 v17, s1 -; GFX8-NEXT: v_mov_b32_e32 v16, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x120 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 -; GFX8-NEXT: v_mov_b32_e32 v19, s1 -; GFX8-NEXT: v_mov_b32_e32 v18, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x110 -; GFX8-NEXT: v_mov_b32_e32 v5, s11 -; GFX8-NEXT: v_mov_b32_e32 v15, s3 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: v_mov_b32_e32 v4, s6 +; GFX8-NEXT: v_mov_b32_e32 v17, s3 +; GFX8-NEXT: v_mov_b32_e32 v16, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x120 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 +; GFX8-NEXT: v_mov_b32_e32 v19, s3 +; GFX8-NEXT: v_mov_b32_e32 v18, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x110 +; GFX8-NEXT: v_mov_b32_e32 v5, s7 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: v_mov_b32_e32 v42, vcc_lo ; GFX8-NEXT: v_mov_b32_e32 v43, vcc_hi -; GFX8-NEXT: v_mov_b32_e32 v14, s2 -; GFX8-NEXT: v_mov_b32_e32 v6, s6 -; GFX8-NEXT: v_mov_b32_e32 v7, s7 -; GFX8-NEXT: v_mov_b32_e32 v0, s4 -; GFX8-NEXT: v_mov_b32_e32 v1, s5 +; GFX8-NEXT: v_mov_b32_e32 v6, s4 +; GFX8-NEXT: v_mov_b32_e32 v7, s5 +; GFX8-NEXT: v_mov_b32_e32 v0, s8 +; GFX8-NEXT: v_mov_b32_e32 v1, s9 ; GFX8-NEXT: v_mov_b32_e32 v8, s12 ; GFX8-NEXT: flat_store_dwordx4 v[52:53], v[20:23] +; GFX8-NEXT: v_mov_b32_e32 v2, s10 +; GFX8-NEXT: v_mov_b32_e32 v3, s11 ; GFX8-NEXT: v_mov_b32_e32 v9, s13 ; GFX8-NEXT: flat_store_dwordx4 v[54:55], v[24:27] ; GFX8-NEXT: v_mov_b32_e32 v10, s14 @@ -10429,165 +10429,165 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o ; GFX8-NEXT: flat_store_dwordx4 v[14:15], v[4:7] ; GFX8-NEXT: flat_store_dwordx4 v[16:17], v[0:3] ; GFX8-NEXT: flat_store_dwordx4 v[18:19], v[8:11] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x100 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x100 ; GFX8-NEXT: v_mov_b32_e32 v0, s16 ; GFX8-NEXT: v_mov_b32_e32 v1, s17 ; GFX8-NEXT: v_mov_b32_e32 v2, s18 ; GFX8-NEXT: v_mov_b32_e32 v3, s19 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0xf0 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0xf0 ; GFX8-NEXT: v_mov_b32_e32 v0, s22 ; GFX8-NEXT: v_mov_b32_e32 v1, s23 ; GFX8-NEXT: v_mov_b32_e32 v2, s20 ; GFX8-NEXT: v_mov_b32_e32 v3, s21 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0xe0 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0xe0 ; GFX8-NEXT: v_mov_b32_e32 v0, s24 ; GFX8-NEXT: v_mov_b32_e32 v1, s25 ; GFX8-NEXT: v_mov_b32_e32 v2, s26 ; GFX8-NEXT: v_mov_b32_e32 v3, s27 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0xd0 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0xd0 ; GFX8-NEXT: v_mov_b32_e32 v0, s28 ; GFX8-NEXT: v_mov_b32_e32 v1, s29 ; GFX8-NEXT: v_mov_b32_e32 v2, s86 ; GFX8-NEXT: v_mov_b32_e32 v3, s87 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0xc0 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0xc0 ; GFX8-NEXT: v_mov_b32_e32 v0, s84 ; GFX8-NEXT: v_mov_b32_e32 v1, s85 ; GFX8-NEXT: v_mov_b32_e32 v2, s82 ; GFX8-NEXT: v_mov_b32_e32 v3, s83 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0xb0 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0xb0 ; GFX8-NEXT: v_mov_b32_e32 v0, s80 ; GFX8-NEXT: v_mov_b32_e32 v1, s81 ; GFX8-NEXT: v_mov_b32_e32 v2, s78 ; GFX8-NEXT: v_mov_b32_e32 v3, s79 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0xa0 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0xa0 ; GFX8-NEXT: v_mov_b32_e32 v0, s76 ; GFX8-NEXT: v_mov_b32_e32 v1, s77 ; GFX8-NEXT: v_mov_b32_e32 v2, s74 ; GFX8-NEXT: v_mov_b32_e32 v3, s75 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x90 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x90 ; GFX8-NEXT: v_mov_b32_e32 v0, s72 ; GFX8-NEXT: v_mov_b32_e32 v1, s73 ; GFX8-NEXT: v_mov_b32_e32 v2, s70 ; GFX8-NEXT: v_mov_b32_e32 v3, s71 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x80 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x80 ; GFX8-NEXT: v_mov_b32_e32 v0, s68 ; GFX8-NEXT: v_mov_b32_e32 v1, s69 ; GFX8-NEXT: v_mov_b32_e32 v2, s66 ; GFX8-NEXT: v_mov_b32_e32 v3, s67 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x70 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x70 ; GFX8-NEXT: v_mov_b32_e32 v0, s64 ; GFX8-NEXT: v_mov_b32_e32 v1, s65 ; GFX8-NEXT: v_mov_b32_e32 v2, s62 ; GFX8-NEXT: v_mov_b32_e32 v3, s63 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x60 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x60 ; GFX8-NEXT: v_mov_b32_e32 v0, s60 ; GFX8-NEXT: v_mov_b32_e32 v1, s61 ; GFX8-NEXT: v_mov_b32_e32 v2, s58 ; GFX8-NEXT: v_mov_b32_e32 v3, s59 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 0x50 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 0x50 ; GFX8-NEXT: v_mov_b32_e32 v0, s56 ; GFX8-NEXT: v_mov_b32_e32 v1, s57 ; GFX8-NEXT: v_mov_b32_e32 v2, s54 ; GFX8-NEXT: v_mov_b32_e32 v3, s55 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 64 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 64 ; GFX8-NEXT: v_mov_b32_e32 v0, s52 ; GFX8-NEXT: v_mov_b32_e32 v1, s53 ; GFX8-NEXT: v_mov_b32_e32 v2, s50 ; GFX8-NEXT: v_mov_b32_e32 v3, s51 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 48 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 48 ; GFX8-NEXT: v_mov_b32_e32 v0, s48 ; GFX8-NEXT: v_mov_b32_e32 v1, s49 ; GFX8-NEXT: v_mov_b32_e32 v2, s46 ; GFX8-NEXT: v_mov_b32_e32 v3, s47 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 32 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 32 ; GFX8-NEXT: v_mov_b32_e32 v0, s44 ; GFX8-NEXT: v_mov_b32_e32 v1, s45 ; GFX8-NEXT: v_mov_b32_e32 v2, s42 ; GFX8-NEXT: v_mov_b32_e32 v3, s43 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: s_add_u32 s0, s8, 16 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: s_add_u32 s2, s0, 16 ; GFX8-NEXT: v_mov_b32_e32 v0, s40 ; GFX8-NEXT: v_mov_b32_e32 v1, s41 ; GFX8-NEXT: v_mov_b32_e32 v2, s38 ; GFX8-NEXT: v_mov_b32_e32 v3, s39 -; GFX8-NEXT: s_addc_u32 s1, s9, 0 +; GFX8-NEXT: s_addc_u32 s3, s1, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NEXT: v_mov_b32_e32 v5, s3 ; GFX8-NEXT: v_mov_b32_e32 v0, s36 ; GFX8-NEXT: v_mov_b32_e32 v1, s37 ; GFX8-NEXT: v_mov_b32_e32 v2, s34 ; GFX8-NEXT: v_mov_b32_e32 v3, s35 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 -; GFX8-NEXT: v_readlane_b32 s0, v62, 4 +; GFX8-NEXT: v_mov_b32_e32 v4, s2 +; GFX8-NEXT: v_readlane_b32 s2, v62, 4 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] -; GFX8-NEXT: v_readlane_b32 s1, v62, 5 -; GFX8-NEXT: v_mov_b32_e32 v4, s8 +; GFX8-NEXT: v_readlane_b32 s3, v62, 5 +; GFX8-NEXT: v_mov_b32_e32 v5, s1 ; GFX8-NEXT: v_mov_b32_e32 v0, s30 ; GFX8-NEXT: v_mov_b32_e32 v1, s31 -; GFX8-NEXT: v_mov_b32_e32 v2, s0 -; GFX8-NEXT: v_mov_b32_e32 v3, s1 -; GFX8-NEXT: v_mov_b32_e32 v5, s9 +; GFX8-NEXT: v_mov_b32_e32 v2, s2 +; GFX8-NEXT: v_mov_b32_e32 v3, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, s0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll index f44a0b0ac2c65..bd191a37582c0 100644 --- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll +++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll @@ -3807,53 +3807,64 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out ; VI-DS128-NEXT: s_mov_b32 s91, 0xe80000 ; VI-DS128-NEXT: s_add_u32 s88, s88, s11 ; VI-DS128-NEXT: s_addc_u32 s89, s89, 0 -; VI-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) -; VI-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v19 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v18 -; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19 -; VI-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v11 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v10 +; VI-DS128-NEXT: v_mov_b32_e32 v4, v3 +; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v11 +; VI-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v10 ; VI-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v2, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:8 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:12 ; 4-byte Folded Spill -; VI-DS128-NEXT: v_lshrrev_b32_e32 v7, 16, v17 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v5, 16, v16 -; VI-DS128-NEXT: v_and_b32_e32 v6, 0xffff, v17 -; VI-DS128-NEXT: v_and_b32_e32 v4, 0xffff, v16 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v7, 16, v9 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; VI-DS128-NEXT: v_and_b32_e32 v6, 0xffff, v9 +; VI-DS128-NEXT: v_and_b32_e32 v4, 0xffff, v8 ; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:16 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v5, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v6, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v7, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill -; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48 -; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) -; VI-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v23 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v22 -; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v23 -; VI-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; VI-DS128-NEXT: s_waitcnt lgkmcnt(0) +; VI-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v19 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v18 +; VI-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19 +; VI-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; VI-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32 ; VI-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:32 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v2, off, s[88:91], 0 offset:36 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:40 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:44 ; 4-byte Folded Spill -; VI-DS128-NEXT: v_lshrrev_b32_e32 v19, 16, v21 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v20 -; VI-DS128-NEXT: v_and_b32_e32 v18, 0xffff, v21 -; VI-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v20 -; VI-DS128-NEXT: s_waitcnt lgkmcnt(0) +; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48 +; VI-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v17 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v16 +; VI-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v17 +; VI-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v16 +; VI-DS128-NEXT: s_waitcnt lgkmcnt(2) +; VI-DS128-NEXT: v_lshrrev_b32_e32 v19, 16, v23 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v22 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v31, 16, v21 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v29, 16, v20 +; VI-DS128-NEXT: v_and_b32_e32 v18, 0xffff, v23 +; VI-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v22 +; VI-DS128-NEXT: v_and_b32_e32 v30, 0xffff, v21 +; VI-DS128-NEXT: v_and_b32_e32 v28, 0xffff, v20 +; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) ; VI-DS128-NEXT: v_lshrrev_b32_e32 v23, 16, v27 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v21, 16, v26 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v35, 16, v25 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v33, 16, v24 ; VI-DS128-NEXT: v_and_b32_e32 v22, 0xffff, v27 -; VI-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64 ; VI-DS128-NEXT: v_and_b32_e32 v20, 0xffff, v26 ; VI-DS128-NEXT: v_and_b32_e32 v34, 0xffff, v25 ; VI-DS128-NEXT: v_and_b32_e32 v32, 0xffff, v24 ; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:80 ; VI-DS128-NEXT: ds_read_b128 v[55:58], v0 offset:96 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v31, 16, v11 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v29, 16, v10 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v9 +; VI-DS128-NEXT: s_waitcnt lgkmcnt(2) +; VI-DS128-NEXT: v_lshrrev_b32_e32 v42, 16, v39 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v40, 16, v38 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) ; VI-DS128-NEXT: v_lshrrev_b32_e32 v50, 16, v27 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v48, 16, v26 @@ -3864,24 +3875,16 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out ; VI-DS128-NEXT: v_and_b32_e32 v53, 0xffff, v25 ; VI-DS128-NEXT: v_and_b32_e32 v51, 0xffff, v24 ; VI-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:112 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v8 -; VI-DS128-NEXT: v_and_b32_e32 v30, 0xffff, v11 -; VI-DS128-NEXT: v_and_b32_e32 v28, 0xffff, v10 -; VI-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v9 +; VI-DS128-NEXT: v_lshrrev_b32_e32 v44, 16, v36 +; VI-DS128-NEXT: v_and_b32_e32 v41, 0xffff, v39 +; VI-DS128-NEXT: v_and_b32_e32 v39, 0xffff, v38 +; VI-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(0) ; VI-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v25 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v24 ; VI-DS128-NEXT: v_and_b32_e32 v2, 0xffff, v25 ; VI-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; VI-DS128-NEXT: v_mov_b32_e32 v24, s0 -; VI-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v8 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v42, 16, v39 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v40, 16, v38 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37 -; VI-DS128-NEXT: v_lshrrev_b32_e32 v44, 16, v36 -; VI-DS128-NEXT: v_and_b32_e32 v41, 0xffff, v39 -; VI-DS128-NEXT: v_and_b32_e32 v39, 0xffff, v38 -; VI-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37 ; VI-DS128-NEXT: v_and_b32_e32 v43, 0xffff, v36 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v61, 16, v58 ; VI-DS128-NEXT: v_lshrrev_b32_e32 v59, 16, v57 @@ -3905,27 +3908,27 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out ; VI-DS128-NEXT: ds_write_b128 v24, v[39:42] offset:144 ; VI-DS128-NEXT: ds_write_b128 v24, v[32:35] offset:96 ; VI-DS128-NEXT: ds_write_b128 v24, v[20:23] offset:112 -; VI-DS128-NEXT: ds_write_b128 v24, v[16:19] offset:64 +; VI-DS128-NEXT: ds_write_b128 v24, v[28:31] offset:64 +; VI-DS128-NEXT: ds_write_b128 v24, v[16:19] offset:80 +; VI-DS128-NEXT: ds_write_b128 v24, v[12:15] offset:32 ; VI-DS128-NEXT: buffer_load_dword v0, off, s[88:91], 0 offset:32 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v1, off, s[88:91], 0 offset:36 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v2, off, s[88:91], 0 offset:40 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v3, off, s[88:91], 0 offset:44 ; 4-byte Folded Reload ; VI-DS128-NEXT: s_waitcnt vmcnt(0) -; VI-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:80 +; VI-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:48 ; VI-DS128-NEXT: buffer_load_dword v0, off, s[88:91], 0 offset:16 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v1, off, s[88:91], 0 offset:20 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v2, off, s[88:91], 0 offset:24 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v3, off, s[88:91], 0 offset:28 ; 4-byte Folded Reload ; VI-DS128-NEXT: s_waitcnt vmcnt(0) -; VI-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:32 +; VI-DS128-NEXT: ds_write_b128 v24, v[0:3] ; VI-DS128-NEXT: buffer_load_dword v0, off, s[88:91], 0 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v2, off, s[88:91], 0 offset:8 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v3, off, s[88:91], 0 offset:12 ; 4-byte Folded Reload ; VI-DS128-NEXT: s_waitcnt vmcnt(0) -; VI-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:48 -; VI-DS128-NEXT: ds_write_b128 v24, v[12:15] -; VI-DS128-NEXT: ds_write_b128 v24, v[28:31] offset:16 +; VI-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:16 ; VI-DS128-NEXT: s_endpgm ; ; GFX9-DS128-LABEL: local_zextload_v64i16_to_v64i32: @@ -3941,58 +3944,67 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out ; GFX9-DS128-NEXT: ds_read_b128 v[16:19], v0 offset:16 ; GFX9-DS128-NEXT: s_add_u32 s12, s12, s11 ; GFX9-DS128-NEXT: s_addc_u32 s13, s13, 0 -; GFX9-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32 -; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2) -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v31, 16, v11 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v19 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v18 -; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19 -; GFX9-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v11 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v10 +; GFX9-DS128-NEXT: v_mov_b32_e32 v4, v3 +; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v11 +; GFX9-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v10 ; GFX9-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: s_nop 0 ; GFX9-DS128-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v7, 16, v17 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v5, 16, v16 -; GFX9-DS128-NEXT: v_and_b32_e32 v6, 0xffff, v17 -; GFX9-DS128-NEXT: v_and_b32_e32 v4, 0xffff, v16 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v7, 16, v9 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v5, 16, v8 +; GFX9-DS128-NEXT: v_and_b32_e32 v6, 0xffff, v9 +; GFX9-DS128-NEXT: v_and_b32_e32 v4, 0xffff, v8 ; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: s_nop 0 ; GFX9-DS128-NEXT: buffer_store_dword v5, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v7, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill -; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48 -; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v23 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v22 -; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v23 -; GFX9-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v4, 16, v19 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v2, 16, v18 +; GFX9-DS128-NEXT: v_and_b32_e32 v3, 0xffff, v19 +; GFX9-DS128-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; GFX9-DS128-NEXT: ds_read_b128 v[20:23], v0 offset:32 ; GFX9-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:32 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: s_nop 0 ; GFX9-DS128-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:36 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:40 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:44 ; 4-byte Folded Spill -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v19, 16, v21 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v20 -; GFX9-DS128-NEXT: v_and_b32_e32 v18, 0xffff, v21 -; GFX9-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v20 -; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:48 +; GFX9-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v17 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v16 +; GFX9-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v17 +; GFX9-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v16 +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v19, 16, v23 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v17, 16, v22 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v31, 16, v21 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v29, 16, v20 +; GFX9-DS128-NEXT: v_and_b32_e32 v18, 0xffff, v23 +; GFX9-DS128-NEXT: v_and_b32_e32 v16, 0xffff, v22 +; GFX9-DS128-NEXT: v_and_b32_e32 v30, 0xffff, v21 +; GFX9-DS128-NEXT: v_and_b32_e32 v28, 0xffff, v20 +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v23, 16, v27 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v21, 16, v26 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v35, 16, v25 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v33, 16, v24 ; GFX9-DS128-NEXT: v_and_b32_e32 v22, 0xffff, v27 -; GFX9-DS128-NEXT: ds_read_b128 v[36:39], v0 offset:64 ; GFX9-DS128-NEXT: v_and_b32_e32 v20, 0xffff, v26 ; GFX9-DS128-NEXT: v_and_b32_e32 v34, 0xffff, v25 ; GFX9-DS128-NEXT: v_and_b32_e32 v32, 0xffff, v24 ; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:80 ; GFX9-DS128-NEXT: ds_read_b128 v[55:58], v0 offset:96 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v29, 16, v10 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v15, 16, v9 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v13, 16, v8 +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v42, 16, v39 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v40, 16, v38 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v50, 16, v27 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v48, 16, v26 @@ -4003,23 +4015,16 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out ; GFX9-DS128-NEXT: v_and_b32_e32 v53, 0xffff, v25 ; GFX9-DS128-NEXT: v_and_b32_e32 v51, 0xffff, v24 ; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v0 offset:112 -; GFX9-DS128-NEXT: v_and_b32_e32 v30, 0xffff, v11 -; GFX9-DS128-NEXT: v_and_b32_e32 v28, 0xffff, v10 -; GFX9-DS128-NEXT: v_and_b32_e32 v14, 0xffff, v9 -; GFX9-DS128-NEXT: v_and_b32_e32 v12, 0xffff, v8 +; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v44, 16, v36 +; GFX9-DS128-NEXT: v_and_b32_e32 v41, 0xffff, v39 +; GFX9-DS128-NEXT: v_and_b32_e32 v39, 0xffff, v38 +; GFX9-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v3, 16, v25 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v1, 16, v24 ; GFX9-DS128-NEXT: v_and_b32_e32 v2, 0xffff, v25 ; GFX9-DS128-NEXT: v_and_b32_e32 v0, 0xffff, v24 ; GFX9-DS128-NEXT: v_mov_b32_e32 v24, s0 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v42, 16, v39 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v40, 16, v38 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v46, 16, v37 -; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v44, 16, v36 -; GFX9-DS128-NEXT: v_and_b32_e32 v41, 0xffff, v39 -; GFX9-DS128-NEXT: v_and_b32_e32 v39, 0xffff, v38 -; GFX9-DS128-NEXT: v_and_b32_e32 v45, 0xffff, v37 ; GFX9-DS128-NEXT: v_and_b32_e32 v43, 0xffff, v36 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v61, 16, v58 ; GFX9-DS128-NEXT: v_lshrrev_b32_e32 v59, 16, v57 @@ -4043,27 +4048,27 @@ define amdgpu_kernel void @local_zextload_v64i16_to_v64i32(ptr addrspace(3) %out ; GFX9-DS128-NEXT: ds_write_b128 v24, v[39:42] offset:144 ; GFX9-DS128-NEXT: ds_write_b128 v24, v[32:35] offset:96 ; GFX9-DS128-NEXT: ds_write_b128 v24, v[20:23] offset:112 -; GFX9-DS128-NEXT: ds_write_b128 v24, v[16:19] offset:64 +; GFX9-DS128-NEXT: ds_write_b128 v24, v[28:31] offset:64 +; GFX9-DS128-NEXT: ds_write_b128 v24, v[16:19] offset:80 +; GFX9-DS128-NEXT: ds_write_b128 v24, v[12:15] offset:32 ; GFX9-DS128-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:32 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:36 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:40 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v3, off, s[12:15], 0 offset:44 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: s_waitcnt vmcnt(0) -; GFX9-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:80 +; GFX9-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:48 ; GFX9-DS128-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:16 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:20 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:24 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v3, off, s[12:15], 0 offset:28 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: s_waitcnt vmcnt(0) -; GFX9-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:32 +; GFX9-DS128-NEXT: ds_write_b128 v24, v[0:3] ; GFX9-DS128-NEXT: buffer_load_dword v0, off, s[12:15], 0 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: s_waitcnt vmcnt(0) -; GFX9-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:48 -; GFX9-DS128-NEXT: ds_write_b128 v24, v[12:15] -; GFX9-DS128-NEXT: ds_write_b128 v24, v[28:31] offset:16 +; GFX9-DS128-NEXT: ds_write_b128 v24, v[0:3] offset:16 ; GFX9-DS128-NEXT: s_endpgm %load = load <64 x i16>, ptr addrspace(3) %in %ext = zext <64 x i16> %load to <64 x i32> @@ -4844,8 +4849,8 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out ; VI-DS128-LABEL: local_sextload_v64i16_to_v64i32: ; VI-DS128: ; %bb.0: ; VI-DS128-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 -; VI-DS128-NEXT: s_mov_b32 m0, -1 ; VI-DS128-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0 +; VI-DS128-NEXT: s_mov_b32 m0, -1 ; VI-DS128-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1 ; VI-DS128-NEXT: s_mov_b32 s90, -1 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(0) @@ -4855,66 +4860,67 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out ; VI-DS128-NEXT: s_mov_b32 s91, 0xe80000 ; VI-DS128-NEXT: s_add_u32 s88, s88, s11 ; VI-DS128-NEXT: s_addc_u32 s89, s89, 0 -; VI-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) -; VI-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v19 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v18 -; VI-DS128-NEXT: v_bfe_i32 v2, v19, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v0, v18, 0, 16 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v11 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v10 +; VI-DS128-NEXT: v_bfe_i32 v2, v11, 0, 16 +; VI-DS128-NEXT: v_bfe_i32 v0, v10, 0, 16 ; VI-DS128-NEXT: buffer_store_dword v0, off, s[88:91], 0 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v2, off, s[88:91], 0 offset:8 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:12 ; 4-byte Folded Spill -; VI-DS128-NEXT: v_ashrrev_i32_e32 v6, 16, v17 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v4, 16, v16 -; VI-DS128-NEXT: v_bfe_i32 v5, v17, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v3, v16, 0, 16 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v6, 16, v9 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v4, 16, v8 +; VI-DS128-NEXT: v_bfe_i32 v5, v9, 0, 16 +; VI-DS128-NEXT: v_bfe_i32 v3, v8, 0, 16 ; VI-DS128-NEXT: buffer_store_dword v3, off, s[88:91], 0 offset:16 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v4, off, s[88:91], 0 offset:20 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v5, off, s[88:91], 0 offset:24 ; 4-byte Folded Spill ; VI-DS128-NEXT: buffer_store_dword v6, off, s[88:91], 0 offset:28 ; 4-byte Folded Spill +; VI-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32 ; VI-DS128-NEXT: ds_read_b128 v[33:36], v32 offset:48 -; VI-DS128-NEXT: ds_read_b128 v[40:43], v32 offset:80 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(2) +; VI-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v19 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v18 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v15, 16, v17 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v13, 16, v16 +; VI-DS128-NEXT: v_bfe_i32 v10, v19, 0, 16 +; VI-DS128-NEXT: v_bfe_i32 v8, v18, 0, 16 +; VI-DS128-NEXT: v_bfe_i32 v14, v17, 0, 16 +; VI-DS128-NEXT: v_bfe_i32 v12, v16, 0, 16 +; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) ; VI-DS128-NEXT: v_ashrrev_i32_e32 v19, 16, v27 ; VI-DS128-NEXT: v_ashrrev_i32_e32 v17, 16, v26 ; VI-DS128-NEXT: v_bfe_i32 v18, v27, 0, 16 ; VI-DS128-NEXT: v_bfe_i32 v16, v26, 0, 16 -; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) +; VI-DS128-NEXT: s_waitcnt lgkmcnt(0) ; VI-DS128-NEXT: v_ashrrev_i32_e32 v27, 16, v36 ; VI-DS128-NEXT: v_bfe_i32 v26, v36, 0, 16 ; VI-DS128-NEXT: ds_read_b128 v[36:39], v32 offset:64 +; VI-DS128-NEXT: ds_read_b128 v[40:43], v32 offset:80 ; VI-DS128-NEXT: ds_read_b128 v[56:59], v32 offset:96 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v23, 16, v25 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v24 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(2) -; VI-DS128-NEXT: v_ashrrev_i32_e32 v53, 16, v40 -; VI-DS128-NEXT: v_bfe_i32 v52, v40, 0, 16 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v23, 16, v11 -; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) ; VI-DS128-NEXT: v_ashrrev_i32_e32 v47, 16, v39 ; VI-DS128-NEXT: v_ashrrev_i32_e32 v45, 16, v38 ; VI-DS128-NEXT: v_ashrrev_i32_e32 v51, 16, v37 ; VI-DS128-NEXT: v_bfe_i32 v46, v39, 0, 16 ; VI-DS128-NEXT: v_bfe_i32 v44, v38, 0, 16 ; VI-DS128-NEXT: v_bfe_i32 v50, v37, 0, 16 +; VI-DS128-NEXT: s_waitcnt lgkmcnt(1) +; VI-DS128-NEXT: v_ashrrev_i32_e32 v53, 16, v40 +; VI-DS128-NEXT: v_bfe_i32 v52, v40, 0, 16 ; VI-DS128-NEXT: ds_read_b128 v[37:40], v32 offset:112 ; VI-DS128-NEXT: v_mov_b32_e32 v32, s0 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v10 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v15, 16, v9 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v13, 16, v8 +; VI-DS128-NEXT: v_bfe_i32 v22, v25, 0, 16 +; VI-DS128-NEXT: v_bfe_i32 v20, v24, 0, 16 +; VI-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35 ; VI-DS128-NEXT: s_waitcnt lgkmcnt(0) ; VI-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v38 ; VI-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v37 ; VI-DS128-NEXT: v_bfe_i32 v2, v38, 0, 16 ; VI-DS128-NEXT: v_bfe_i32 v0, v37, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v22, v11, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v20, v10, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v14, v9, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v12, v8, 0, 16 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v25 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v24 -; VI-DS128-NEXT: v_bfe_i32 v10, v25, 0, 16 -; VI-DS128-NEXT: v_bfe_i32 v8, v24, 0, 16 -; VI-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35 ; VI-DS128-NEXT: v_ashrrev_i32_e32 v31, 16, v34 ; VI-DS128-NEXT: v_ashrrev_i32_e32 v29, 16, v33 ; VI-DS128-NEXT: v_bfe_i32 v24, v35, 0, 16 @@ -4950,22 +4956,22 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out ; VI-DS128-NEXT: ds_write_b128 v32, v[44:47] offset:144 ; VI-DS128-NEXT: ds_write_b128 v32, v[28:31] offset:96 ; VI-DS128-NEXT: ds_write_b128 v32, v[24:27] offset:112 -; VI-DS128-NEXT: ds_write_b128 v32, v[8:11] offset:64 +; VI-DS128-NEXT: ds_write_b128 v32, v[20:23] offset:64 ; VI-DS128-NEXT: ds_write_b128 v32, v[16:19] offset:80 +; VI-DS128-NEXT: ds_write_b128 v32, v[12:15] offset:32 +; VI-DS128-NEXT: ds_write_b128 v32, v[8:11] offset:48 ; VI-DS128-NEXT: buffer_load_dword v0, off, s[88:91], 0 offset:16 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v1, off, s[88:91], 0 offset:20 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v2, off, s[88:91], 0 offset:24 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v3, off, s[88:91], 0 offset:28 ; 4-byte Folded Reload ; VI-DS128-NEXT: s_waitcnt vmcnt(0) -; VI-DS128-NEXT: ds_write_b128 v32, v[0:3] offset:32 +; VI-DS128-NEXT: ds_write_b128 v32, v[0:3] ; VI-DS128-NEXT: buffer_load_dword v0, off, s[88:91], 0 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v1, off, s[88:91], 0 offset:4 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v2, off, s[88:91], 0 offset:8 ; 4-byte Folded Reload ; VI-DS128-NEXT: buffer_load_dword v3, off, s[88:91], 0 offset:12 ; 4-byte Folded Reload ; VI-DS128-NEXT: s_waitcnt vmcnt(0) -; VI-DS128-NEXT: ds_write_b128 v32, v[0:3] offset:48 -; VI-DS128-NEXT: ds_write_b128 v32, v[12:15] -; VI-DS128-NEXT: ds_write_b128 v32, v[20:23] offset:16 +; VI-DS128-NEXT: ds_write_b128 v32, v[0:3] offset:16 ; VI-DS128-NEXT: s_endpgm ; ; GFX9-DS128-LABEL: local_sextload_v64i16_to_v64i32: @@ -4981,69 +4987,69 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out ; GFX9-DS128-NEXT: ds_read_b128 v[16:19], v32 offset:16 ; GFX9-DS128-NEXT: s_add_u32 s12, s12, s11 ; GFX9-DS128-NEXT: s_addc_u32 s13, s13, 0 -; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32 -; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2) -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v23, 16, v11 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v19 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v18 -; GFX9-DS128-NEXT: v_bfe_i32 v2, v19, 0, 16 -; GFX9-DS128-NEXT: v_bfe_i32 v0, v18, 0, 16 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v11 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v10 +; GFX9-DS128-NEXT: v_bfe_i32 v2, v11, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v0, v10, 0, 16 ; GFX9-DS128-NEXT: buffer_store_dword v0, off, s[12:15], 0 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: s_nop 0 ; GFX9-DS128-NEXT: buffer_store_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Spill -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v6, 16, v17 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v4, 16, v16 -; GFX9-DS128-NEXT: v_bfe_i32 v5, v17, 0, 16 -; GFX9-DS128-NEXT: v_bfe_i32 v3, v16, 0, 16 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v6, 16, v9 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v4, 16, v8 +; GFX9-DS128-NEXT: v_bfe_i32 v5, v9, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v3, v8, 0, 16 ; GFX9-DS128-NEXT: buffer_store_dword v3, off, s[12:15], 0 offset:16 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: s_nop 0 ; GFX9-DS128-NEXT: buffer_store_dword v4, off, s[12:15], 0 offset:20 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v5, off, s[12:15], 0 offset:24 ; 4-byte Folded Spill ; GFX9-DS128-NEXT: buffer_store_dword v6, off, s[12:15], 0 offset:28 ; 4-byte Folded Spill +; GFX9-DS128-NEXT: ds_read_b128 v[24:27], v32 offset:32 ; GFX9-DS128-NEXT: ds_read_b128 v[33:36], v32 offset:48 -; GFX9-DS128-NEXT: ds_read_b128 v[40:43], v32 offset:80 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2) +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v19 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v18 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v15, 16, v17 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v13, 16, v16 +; GFX9-DS128-NEXT: v_bfe_i32 v10, v19, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v8, v18, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v14, v17, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v12, v16, 0, 16 +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v19, 16, v27 ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v17, 16, v26 ; GFX9-DS128-NEXT: v_bfe_i32 v18, v27, 0, 16 ; GFX9-DS128-NEXT: v_bfe_i32 v16, v26, 0, 16 -; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v27, 16, v36 ; GFX9-DS128-NEXT: v_bfe_i32 v26, v36, 0, 16 ; GFX9-DS128-NEXT: ds_read_b128 v[36:39], v32 offset:64 +; GFX9-DS128-NEXT: ds_read_b128 v[40:43], v32 offset:80 ; GFX9-DS128-NEXT: ds_read_b128 v[56:59], v32 offset:96 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v23, 16, v25 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v24 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(2) -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v53, 16, v40 -; GFX9-DS128-NEXT: v_bfe_i32 v52, v40, 0, 16 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v21, 16, v10 -; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v47, 16, v39 ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v45, 16, v38 ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v51, 16, v37 ; GFX9-DS128-NEXT: v_bfe_i32 v46, v39, 0, 16 ; GFX9-DS128-NEXT: v_bfe_i32 v44, v38, 0, 16 ; GFX9-DS128-NEXT: v_bfe_i32 v50, v37, 0, 16 +; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(1) +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v53, 16, v40 +; GFX9-DS128-NEXT: v_bfe_i32 v52, v40, 0, 16 ; GFX9-DS128-NEXT: ds_read_b128 v[37:40], v32 offset:112 ; GFX9-DS128-NEXT: v_mov_b32_e32 v32, s0 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v15, 16, v9 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v13, 16, v8 -; GFX9-DS128-NEXT: v_bfe_i32 v22, v11, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v22, v25, 0, 16 +; GFX9-DS128-NEXT: v_bfe_i32 v20, v24, 0, 16 +; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35 ; GFX9-DS128-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v3, 16, v38 ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v1, 16, v37 ; GFX9-DS128-NEXT: v_bfe_i32 v2, v38, 0, 16 ; GFX9-DS128-NEXT: v_bfe_i32 v0, v37, 0, 16 -; GFX9-DS128-NEXT: v_bfe_i32 v20, v10, 0, 16 -; GFX9-DS128-NEXT: v_bfe_i32 v14, v9, 0, 16 -; GFX9-DS128-NEXT: v_bfe_i32 v12, v8, 0, 16 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v11, 16, v25 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v9, 16, v24 -; GFX9-DS128-NEXT: v_bfe_i32 v10, v25, 0, 16 -; GFX9-DS128-NEXT: v_bfe_i32 v8, v24, 0, 16 -; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v25, 16, v35 ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v31, 16, v34 ; GFX9-DS128-NEXT: v_ashrrev_i32_e32 v29, 16, v33 ; GFX9-DS128-NEXT: v_bfe_i32 v24, v35, 0, 16 @@ -5079,22 +5085,22 @@ define amdgpu_kernel void @local_sextload_v64i16_to_v64i32(ptr addrspace(3) %out ; GFX9-DS128-NEXT: ds_write_b128 v32, v[44:47] offset:144 ; GFX9-DS128-NEXT: ds_write_b128 v32, v[28:31] offset:96 ; GFX9-DS128-NEXT: ds_write_b128 v32, v[24:27] offset:112 -; GFX9-DS128-NEXT: ds_write_b128 v32, v[8:11] offset:64 +; GFX9-DS128-NEXT: ds_write_b128 v32, v[20:23] offset:64 ; GFX9-DS128-NEXT: ds_write_b128 v32, v[16:19] offset:80 +; GFX9-DS128-NEXT: ds_write_b128 v32, v[12:15] offset:32 +; GFX9-DS128-NEXT: ds_write_b128 v32, v[8:11] offset:48 ; GFX9-DS128-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:16 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:20 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:24 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v3, off, s[12:15], 0 offset:28 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: s_waitcnt vmcnt(0) -; GFX9-DS128-NEXT: ds_write_b128 v32, v[0:3] offset:32 +; GFX9-DS128-NEXT: ds_write_b128 v32, v[0:3] ; GFX9-DS128-NEXT: buffer_load_dword v0, off, s[12:15], 0 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v1, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v2, off, s[12:15], 0 offset:8 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: buffer_load_dword v3, off, s[12:15], 0 offset:12 ; 4-byte Folded Reload ; GFX9-DS128-NEXT: s_waitcnt vmcnt(0) -; GFX9-DS128-NEXT: ds_write_b128 v32, v[0:3] offset:48 -; GFX9-DS128-NEXT: ds_write_b128 v32, v[12:15] -; GFX9-DS128-NEXT: ds_write_b128 v32, v[20:23] offset:16 +; GFX9-DS128-NEXT: ds_write_b128 v32, v[0:3] offset:16 ; GFX9-DS128-NEXT: s_endpgm %load = load <64 x i16>, ptr addrspace(3) %in %ext = sext <64 x i16> %load to <64 x i32> diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll index ea9d5e8a0bc1f..1e6b77ecea85e 100644 --- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll +++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll @@ -400,9 +400,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6 +; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1 +; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1 ; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0 ; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2 ; GFX12-NEXT: s_wait_alu 0xf1ff @@ -438,9 +438,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r ; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0 -; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6 +; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0 ; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1 +; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1 ; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0 ; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2 ; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff @@ -531,9 +531,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d, ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6 +; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1 +; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1 ; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0 ; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2 ; GFX12-NEXT: s_wait_alu 0xf1ff @@ -569,9 +569,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d, ; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0 -; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6 +; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0 ; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1 +; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1 ; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0 ; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2 ; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff diff --git a/llvm/test/CodeGen/AMDGPU/lower-brcond-with-xor.ll b/llvm/test/CodeGen/AMDGPU/lower-brcond-with-xor.ll new file mode 100644 index 0000000000000..e2f8df0448f82 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lower-brcond-with-xor.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a --debug-counter=dagcombine=0 -start-before=si-annotate-control-flow %s -o - | FileCheck %s + +define amdgpu_kernel void @test(i32 %N, ptr addrspace(1) %p) { +; CHECK-LABEL: test: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 1, v0 +; CHECK-NEXT: s_and_saveexec_b64 s[0:1], vcc +; CHECK-NEXT: s_endpgm +entry: + %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() + %cmp2 = icmp slt i32 %id.x, 1 + br i1 %cmp2, label %if.then, label %exit + +if.then: + %idx.ext = zext i32 %N to i64 + %add.ptr = getelementptr i8, ptr addrspace(1) %p, i64 %idx.ext + ret void + +exit: + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll new file mode 100644 index 0000000000000..d6198f5000c34 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-dead-intrinsics.ll @@ -0,0 +1,9 @@ +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=amdgpu-lower-buffer-fat-pointers < %s | FileCheck %s + +; CHECK: @arbitrary +declare amdgpu_kernel void @arbitrary(ptr addrspace(1)) + +; COM: This used to cause verifier errors when "lowered" +declare <4 x i8> @llvm.masked.load.v4i8.p7(ptr addrspace(7) captures(none), i32 immarg, <4 x i1>, <4 x i8>) +; CHECK-NOT: llvm.masked.load diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll index 66de953043f10..610c3e2c02867 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-pointer-ops.ll @@ -282,9 +282,9 @@ define i160 @ptrtoaddr_ext(ptr addrspace(7) %ptr) { ; CHECK-LABEL: define i160 @ptrtoaddr_ext ; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0 -; CHECK-NEXT: [[PTR_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 -; CHECK-NEXT: [[RET:%.*]] = zext i32 [[PTR_OFF]] to i160 -; CHECK-NEXT: ret i160 [[RET]] +; CHECK-NEXT: [[ADDR:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 +; CHECK-NEXT: [[EXT:%.*]] = zext i32 [[ADDR]] to i160 +; CHECK-NEXT: ret i160 [[EXT]] ; %addr = ptrtoaddr ptr addrspace(7) %ptr to i32 %ext = zext i32 %addr to i160 @@ -296,9 +296,9 @@ define i16 @ptrtoaddr_trunc(ptr addrspace(7) %ptr) { ; CHECK-LABEL: define i16 @ptrtoaddr_trunc ; CHECK-SAME: ({ ptr addrspace(8), i32 } [[PTR:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[PTR_RSRC:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 0 -; CHECK-NEXT: [[PTR_OFF:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 -; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[PTR_OFF]] to i16 -; CHECK-NEXT: ret i16 [[RET]] +; CHECK-NEXT: [[ADDR:%.*]] = extractvalue { ptr addrspace(8), i32 } [[PTR]], 1 +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ADDR]] to i16 +; CHECK-NEXT: ret i16 [[TRUNC]] ; %addr = ptrtoaddr ptr addrspace(7) %ptr to i32 %trunc = trunc i32 %addr to i16 @@ -450,17 +450,17 @@ define <2 x ptr addrspace(7)> @addrspacecast_poison_vec() { ret <2 x ptr addrspace(7)> %ret } -declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1), i16, i32, i32) +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1), i16, i64, i32) -define ptr addrspace(7) @make_buffer_rsrc(ptr addrspace(1) %buf, i16 %stride, i32 %numRecords, i32 %flags) { +define ptr addrspace(7) @make_buffer_rsrc(ptr addrspace(1) %buf, i16 %stride, i64 %numRecords, i32 %flags) { ; CHECK-LABEL: define { ptr addrspace(8), i32 } @make_buffer_rsrc -; CHECK-SAME: (ptr addrspace(1) [[BUF:%.*]], i16 [[STRIDE:%.*]], i32 [[NUMRECORDS:%.*]], i32 [[FLAGS:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[RET:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[BUF]], i16 [[STRIDE]], i32 [[NUMRECORDS]], i32 [[FLAGS]]) +; CHECK-SAME: (ptr addrspace(1) [[BUF:%.*]], i16 [[STRIDE:%.*]], i64 [[NUMRECORDS:%.*]], i32 [[FLAGS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[RET:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[BUF]], i16 [[STRIDE]], i64 [[NUMRECORDS]], i32 [[FLAGS]]) ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { ptr addrspace(8), i32 } poison, ptr addrspace(8) [[RET]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { ptr addrspace(8), i32 } [[TMP1]], i32 0, 1 ; CHECK-NEXT: ret { ptr addrspace(8), i32 } [[TMP2]] ; - %ret = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %buf, i16 %stride, i32 %numRecords, i32 %flags) + %ret = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %buf, i16 %stride, i64 %numRecords, i32 %flags) ret ptr addrspace(7) %ret } diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll new file mode 100644 index 0000000000000..bd29e9e5855ff --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll @@ -0,0 +1,141 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals +; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s + +; Regression test for issue 160181 +; One variable is chosen to be assigned at zero. Here, that's @both +; Then other variables should be allocated at fixed offsets from that provided +; they are allocated by all the other kernels that presently allocate the +; variable at address zero. +; The failure mode was in that second check - variables could be added to +; the module scope zero address struct even when some of the kernels allocating +; that struct do not need the additional variable. + +; With current llvm, all three of these integers are put in the module scope struct, when +; neither kern_one or kern_two access all three. + +@both = addrspace(3) global i32 poison +@both_second = addrspace(3) global i16 poison ; a second field in the module struct +@one = addrspace(3) global i32 poison +@two = addrspace(3) global i32 poison + + +;. +; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t poison, align 4, !absolute_symbol [[META0:![0-9]+]] +; CHECK: @llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @llvm.amdgcn.module.lds to ptr)], section "llvm.metadata" +; CHECK: @llvm.amdgcn.kernel.kern_one.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_one.lds.t poison, align 4, !absolute_symbol [[META1:![0-9]+]] +; CHECK: @llvm.amdgcn.kernel.kern_two.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_two.lds.t poison, align 4, !absolute_symbol [[META1]] +; CHECK: @llvm.amdgcn.kernel.kern_block_direct_allocation.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_block_direct_allocation.lds.t poison, align 4, !absolute_symbol [[META1]] + +;. +define void @func_one() { +; CHECK-LABEL: define {{[^@]+}}@func_one() { +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2:![0-9]+]] +; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4 +; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[ONE1]], align 4 +; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11:![0-9]+]] +; CHECK-NEXT: ret void +; + %val0 = load i32, ptr addrspace(3) @both + store i32 %val0, ptr addrspace(3) @one + store i16 10, ptr addrspace(3) @both_second + ret void +} + +define amdgpu_kernel void @kern_one() { +; CHECK-LABEL: define {{[^@]+}}@kern_one +; CHECK-SAME: () #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META16:![0-9]+]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_one.lds) ] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META17:![0-9]+]] +; CHECK-NEXT: call void @func_one() +; CHECK-NEXT: ret void +; +entry: + call void @func_one() + ret void +} + +define void @func_two() { +; CHECK-LABEL: define {{[^@]+}}@func_two() { +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]] +; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4 +; CHECK-NEXT: [[TWO1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[TWO1]], align 4 +; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]] +; CHECK-NEXT: ret void +; + %val0 = load i32, ptr addrspace(3) @both + store i32 %val0, ptr addrspace(3) @two + store i16 20, ptr addrspace(3) @both_second + ret void +} + +define amdgpu_kernel void @kern_two() { +; CHECK-LABEL: define {{[^@]+}}@kern_two +; CHECK-SAME: () #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META18:![0-9]+]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_two.lds) ] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]] +; CHECK-NEXT: call void @func_two() +; CHECK-NEXT: ret void +; +entry: + call void @func_two() + ret void +} + +; Unrelated to the bug at hand, but if a variable is only +; reachable from a single kernel, it gets allocated to a fixed +; address independent of the module scope struct. This kernel +; means the key variables miss that optimisation while @both +; remains the best candidate for address zero allocation. +define void @func_block_direct_allocation() { +; CHECK-LABEL: define {{[^@]+}}@func_block_direct_allocation() { +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4 +; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) [[ONE1]], align 4 +; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4 +; CHECK-NEXT: [[TWO2:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3) +; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) [[TWO2]], align 4 +; CHECK-NEXT: [[SUM:%.*]] = add i32 [[VAL1]], [[VAL2]] +; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]] +; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]] +; CHECK-NEXT: ret void +; + %val1 = load i32, ptr addrspace(3) @one + %val2 = load i32, ptr addrspace(3) @two + %sum = add i32 %val1, %val2 + store i32 %sum, ptr addrspace(3) @both + store i16 30, ptr addrspace(3) @both_second + ret void +} + +define amdgpu_kernel void @kern_block_direct_allocation() { +; CHECK-LABEL: define {{[^@]+}}@kern_block_direct_allocation +; CHECK-SAME: () #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META21:![0-9]+]] { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_block_direct_allocation.lds) ], !alias.scope [[META22:![0-9]+]], !noalias [[META25:![0-9]+]] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ] +; CHECK-NEXT: call void @func_block_direct_allocation() +; CHECK-NEXT: call void @func_one() +; CHECK-NEXT: call void @func_two() +; CHECK-NEXT: ret void +; + call void @func_block_direct_allocation() + call void @func_one() + call void @func_two() + ret void +} +;. +; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="12" } +; CHECK: attributes #[[ATTR1]] = { "amdgpu-lds-size"="16" } +; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) } +; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } +;. diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll index b6f70fa6a9892..12212a0968c96 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll @@ -84,8 +84,8 @@ define void @f2() { ; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+12 ; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 2 -; GCN-NEXT: s_add_u32 s4, s4, s6 -; GCN-NEXT: s_addc_u32 s5, s5, s7 +; GCN-NEXT: s_add_u32 s4, s6, s4 +; GCN-NEXT: s_addc_u32 s5, s7, s5 ; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v2, s4 diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll index c316f03dde89b..b689e1e51c2a4 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll @@ -49,8 +49,8 @@ define void @f0() { ; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+4 ; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+12 ; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4 -; GCN-NEXT: s_add_u32 s4, s4, s6 -; GCN-NEXT: s_addc_u32 s5, s5, s7 +; GCN-NEXT: s_add_u32 s4, s6, s4 +; GCN-NEXT: s_addc_u32 s5, s7, s5 ; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v0, s4 @@ -90,8 +90,8 @@ define void @f1() { ; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+8 ; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+16 ; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4 -; GCN-NEXT: s_add_u32 s4, s4, s6 -; GCN-NEXT: s_addc_u32 s5, s5, s7 +; GCN-NEXT: s_add_u32 s4, s6, s4 +; GCN-NEXT: s_addc_u32 s5, s7, s5 ; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v0, s4 @@ -131,8 +131,8 @@ define void @f2() { ; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+12 ; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+20 ; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4 -; GCN-NEXT: s_add_u32 s4, s4, s6 -; GCN-NEXT: s_addc_u32 s5, s5, s7 +; GCN-NEXT: s_add_u32 s4, s6, s4 +; GCN-NEXT: s_addc_u32 s5, s7, s5 ; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v2, s4 @@ -172,8 +172,8 @@ define void @f3() { ; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+16 ; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+24 ; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4 -; GCN-NEXT: s_add_u32 s4, s4, s6 -; GCN-NEXT: s_addc_u32 s5, s5, s7 +; GCN-NEXT: s_add_u32 s4, s6, s4 +; GCN-NEXT: s_addc_u32 s5, s7, s5 ; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v0, s4 diff --git a/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll new file mode 100644 index 0000000000000..b508f739e7fd3 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lro-phi-samebb-nonlookthrough-store.ll @@ -0,0 +1,46 @@ +; RUN: opt -S -passes=amdgpu-late-codegenprepare \ +; RUN: -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s | FileCheck %s + +; Goal: With a loop-header PHI in illegal vector type and a same-BB +; non-lookthrough user (vector add) in the header, LRO should still coerce +; the PHI to i32 because a profitable sink (store) exists across BB. + +define amdgpu_kernel void @phi_samebb_nonlookthrough_store( + ptr addrspace(1) %out, <4 x i8> %v, i1 %exit) { +; CHECK-LABEL: @phi_samebb_nonlookthrough_store( +entry: + br label %loop + +loop: ; preds = %entry, %loop + ; Loop-carried PHI in illegal vector type. + %acc = phi <4 x i8> [ zeroinitializer, %entry ], [ %acc.next, %loop ] + + ; Same-BB non-lookthrough use in header. + %acc.next = add <4 x i8> %acc, %v + + ; Make it a real loop: either iterate or exit to the sink block. + br i1 %exit, label %store, label %loop + +store: ; preds = %loop + ; The across-BB sink: storing the PHI coerced to i32. + %acc.bc = bitcast <4 x i8> %acc to i32 + store i32 %acc.bc, ptr addrspace(1) %out, align 4 + ret void +} + +; After AMDGPULateCodeGenPrepare we expect: +; - PHI is coerced to i32 +; - A header bitcast materializes for the add +; This proves the same-BB non-lookthrough user (add) did not get pruned +; when the def is a PHI. + +; CHECK: loop: +; CHECK: %[[ACC_TC:[^ ]+]] = phi i32 +; CHECK: %[[ACC_TC_BC:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8> +; CHECK: %[[ACC_NEXT:[^ ]+]] = add <4 x i8> %[[ACC_TC_BC]], %v +; CHECK: br i1 %exit, label %store, label %loop +; CHECK: store: +; CHECK: %[[ACC_TC_BC2:[^ ]+]] = bitcast i32 %[[ACC_TC]] to <4 x i8> +; CHECK: %[[ST_I32:[^ ]+]] = bitcast <4 x i8> %[[ACC_TC_BC2]] to i32 +; CHECK: store i32 %[[ST_I32]], + diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll index 1ae3434db6da5..3f66c23e1a73b 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll @@ -65,10 +65,9 @@ define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_constlo(half %src0, half %s ; SDAG-GFX11-TRUE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_constlo: ; SDAG-GFX11-TRUE16: ; %bb.0: ; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, 0x3c00 -; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; SDAG-GFX11-TRUE16-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] -; SDAG-GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX11-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX11-TRUE16-NEXT: v_pack_b32_f16 v0, 1.0, v0.l ; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX11-FAKE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_constlo: @@ -137,13 +136,20 @@ define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_constlo(half %src0, half %s } define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo(half %src0, half %src1, half %src2, half %lo) #0 { -; GFX11-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_mov_b32_e32 v0, v3 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; SDAG-GFX11-TRUE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo: +; SDAG-GFX11-TRUE16: ; %bb.0: +; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX11-TRUE16-NEXT: v_fma_mixhi_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v3.l +; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; SDAG-GFX11-FAKE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo: +; SDAG-GFX11-FAKE16: ; %bb.0: +; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX11-FAKE16-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo: ; GFX9: ; %bb.0: @@ -172,6 +178,14 @@ define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo(half %src0, half %src ; SDAG-CI-NEXT: v_mov_b32_e32 v0, v3 ; SDAG-CI-NEXT: s_setpc_b64 s[30:31] ; +; GISEL-GFX11-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo: +; GISEL-GFX11: ; %bb.0: +; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GISEL-GFX11-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] +; GISEL-GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31] +; ; GISEL-CI-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_reglo: ; GISEL-CI: ; %bb.0: ; GISEL-CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -196,10 +210,8 @@ define i32 @v_mad_mixhi_f16_f16lo_f16lo_f16lo_intpack(half %src0, half %src1, ha ; SDAG-GFX11-TRUE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_intpack: ; SDAG-GFX11-TRUE16: ; %bb.0: ; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX11-TRUE16-NEXT: v_fma_mixlo_f16 v1, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-TRUE16-NEXT: v_fma_mixhi_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] ; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, 0 -; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l ; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX11-FAKE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_intpack: @@ -277,10 +289,8 @@ define i32 @v_mad_mixhi_f16_f16lo_f16lo_f16lo_intpack_sext(half %src0, half %src ; SDAG-GFX11-TRUE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_intpack_sext: ; SDAG-GFX11-TRUE16: ; %bb.0: ; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX11-TRUE16-NEXT: v_fma_mixlo_f16 v1, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-TRUE16-NEXT: v_fma_mixhi_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] ; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, 0 -; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v1.l ; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX11-FAKE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_intpack_sext: @@ -499,14 +509,25 @@ define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt(half } define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use(half %src0, half %src1, half %src2) #0 { -; GFX11-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] -; GFX11-NEXT: v_fma_mixhi_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] clamp -; GFX11-NEXT: global_store_b16 v[0:1], v3, off dlc -; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX11-NEXT: s_setpc_b64 s[30:31] +; SDAG-GFX11-TRUE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use: +; SDAG-GFX11-TRUE16: ; %bb.0: +; SDAG-GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v0.l +; SDAG-GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX11-TRUE16-NEXT: v_fma_mixlo_f16 v0, v3, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-TRUE16-NEXT: v_fma_mixhi_f16 v0, v3, v1, v2 op_sel_hi:[1,1,1] clamp +; SDAG-GFX11-TRUE16-NEXT: global_store_b16 v[0:1], v0, off dlc +; SDAG-GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0 +; SDAG-GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; SDAG-GFX11-FAKE16-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use: +; SDAG-GFX11-FAKE16: ; %bb.0: +; SDAG-GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX11-FAKE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX11-FAKE16-NEXT: v_fma_mixhi_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] clamp +; SDAG-GFX11-FAKE16-NEXT: global_store_b16 v[0:1], v3, off dlc +; SDAG-GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0 +; SDAG-GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use: ; GFX9: ; %bb.0: @@ -542,6 +563,15 @@ define <2 x half> @v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi ; SDAG-CI-NEXT: s_waitcnt vmcnt(0) ; SDAG-CI-NEXT: s_setpc_b64 s[30:31] ; +; GISEL-GFX11-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use: +; GISEL-GFX11: ; %bb.0: +; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GISEL-GFX11-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] +; GISEL-GFX11-NEXT: v_fma_mixhi_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] clamp +; GISEL-GFX11-NEXT: global_store_b16 v[0:1], v3, off dlc +; GISEL-GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31] +; ; GISEL-CI-LABEL: v_mad_mixhi_f16_f16lo_f16lo_f16lo_undeflo_clamp_postcvt_multi_use: ; GISEL-CI: ; %bb.0: ; GISEL-CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll index eab92668c536b..21e6faf46f58d 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo.ll @@ -412,11 +412,9 @@ define <2 x half> @v_mad_mix_v2f32(<2 x half> %src0, <2 x half> %src1, <2 x half ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v3.h, v3.l -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] ; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v0, v0.l, v3.l ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v2f32: @@ -535,12 +533,10 @@ define <3 x half> @v_mad_mix_v3f32(<3 x half> %src0, <3 x half> %src1, <3 x half ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v2, v4 op_sel_hi:[1,1,1] ; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v6 +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v0, v0.l, v6.l ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v3f32: @@ -704,16 +700,13 @@ define <4 x half> @v_mad_mix_v4f32(<4 x half> %src0, <4 x half> %src1, <4 x half ; SDAG-GFX1100-TRUE16-LABEL: v_mad_mix_v4f32: ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v6.h, v6.l -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v7.h, v7.l +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v6, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v2, v4 op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1] ; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v0, v0.l, v6.h +; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v1, v1.l, v6.l ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v4f32: @@ -914,14 +907,23 @@ define <4 x half> @v_mad_mix_v4f32(<4 x half> %src0, <4 x half> %src1, <4 x half ; FIXME (DAG): Fold clamp define <2 x half> @v_mad_mix_v2f32_clamp_postcvt(<2 x half> %src0, <2 x half> %src1, <2 x half> %src2) #0 { -; GFX1100-LABEL: v_mad_mix_v2f32_clamp_postcvt: -; GFX1100: ; %bb.0: -; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX1100-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] clamp -; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1100-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp -; GFX1100-NEXT: v_mov_b32_e32 v0, v3 -; GFX1100-NEXT: s_setpc_b64 s[30:31] +; SDAG-GFX1100-TRUE16-LABEL: v_mad_mix_v2f32_clamp_postcvt: +; SDAG-GFX1100-TRUE16: ; %bb.0: +; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v2f32_clamp_postcvt: +; SDAG-GFX1100-FAKE16: ; %bb.0: +; SDAG-GFX1100-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX1100-FAKE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; SDAG-GFX1100-FAKE16-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-FAKE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX1100-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX900-LABEL: v_mad_mix_v2f32_clamp_postcvt: ; GFX900: ; %bb.0: @@ -978,6 +980,15 @@ define <2 x half> @v_mad_mix_v2f32_clamp_postcvt(<2 x half> %src0, <2 x half> %s ; SDAG-CI-NEXT: v_cvt_f32_f16_e64 v1, v1 clamp ; SDAG-CI-NEXT: s_setpc_b64 s[30:31] ; +; GISEL-GFX1100-LABEL: v_mad_mix_v2f32_clamp_postcvt: +; GISEL-GFX1100: ; %bb.0: +; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] clamp +; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; GISEL-GFX1100-NEXT: v_mov_b32_e32 v0, v3 +; GISEL-GFX1100-NEXT: s_setpc_b64 s[30:31] +; ; GISEL-VI-LABEL: v_mad_mix_v2f32_clamp_postcvt: ; GISEL-VI: ; %bb.0: ; GISEL-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -1040,13 +1051,13 @@ define <3 x half> @v_mad_mix_v3f32_clamp_postcvt(<3 x half> %src0, <3 x half> %s ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v1, v1, v3, v5 op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp ; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v2, v4 op_sel_hi:[1,1,1] clamp -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) ; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v1, v1.l, 0 -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v3, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; SDAG-GFX1100-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1 clamp ; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; SDAG-GFX1100-TRUE16-NEXT: v_pk_max_f16 v1, v1, v1 clamp ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v3f32_clamp_postcvt: @@ -1247,17 +1258,29 @@ define <3 x half> @v_mad_mix_v3f32_clamp_postcvt(<3 x half> %src0, <3 x half> %s } define <4 x half> @v_mad_mix_v4f32_clamp_postcvt(<4 x half> %src0, <4 x half> %src1, <4 x half> %src2) #0 { -; GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt: -; GFX1100: ; %bb.0: -; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp -; GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp -; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp -; GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp -; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1100-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 -; GFX1100-NEXT: s_setpc_b64 s[30:31] +; SDAG-GFX1100-TRUE16-LABEL: v_mad_mix_v4f32_clamp_postcvt: +; SDAG-GFX1100-TRUE16: ; %bb.0: +; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v2, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v2, v1, v3, v5 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v6 +; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v1, v2 +; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v4f32_clamp_postcvt: +; SDAG-GFX1100-FAKE16: ; %bb.0: +; SDAG-GFX1100-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SDAG-GFX1100-FAKE16-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-FAKE16-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; SDAG-GFX1100-FAKE16-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-FAKE16-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX1100-FAKE16-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; SDAG-GFX1100-FAKE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX900-LABEL: v_mad_mix_v4f32_clamp_postcvt: ; GFX900: ; %bb.0: @@ -1358,6 +1381,18 @@ define <4 x half> @v_mad_mix_v4f32_clamp_postcvt(<4 x half> %src0, <4 x half> %s ; SDAG-CI-NEXT: v_cvt_f32_f16_e64 v3, v3 clamp ; SDAG-CI-NEXT: s_setpc_b64 s[30:31] ; +; GISEL-GFX1100-LABEL: v_mad_mix_v4f32_clamp_postcvt: +; GISEL-GFX1100: ; %bb.0: +; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v6, v0, v2, v4 op_sel_hi:[1,1,1] clamp +; GISEL-GFX1100-NEXT: v_fma_mixlo_f16 v7, v1, v3, v5 op_sel_hi:[1,1,1] clamp +; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v6, v0, v2, v4 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; GISEL-GFX1100-NEXT: v_fma_mixhi_f16 v7, v1, v3, v5 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; GISEL-GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GISEL-GFX1100-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7 +; GISEL-GFX1100-NEXT: s_setpc_b64 s[30:31] +; ; GISEL-VI-LABEL: v_mad_mix_v4f32_clamp_postcvt: ; GISEL-VI: ; %bb.0: ; GISEL-VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -1452,10 +1487,10 @@ define <2 x half> @v_mad_mix_v2f32_clamp_postcvt_lo(<2 x half> %src0, <2 x half> ; SDAG-GFX1100-TRUE16-LABEL: v_mad_mix_v2f32_clamp_postcvt_lo: ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] clamp -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v1, v2 op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v0, v0.l, v3.l ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v2f32_clamp_postcvt_lo: @@ -1618,9 +1653,9 @@ define <2 x half> @v_mad_mix_v2f32_clamp_postcvt_hi(<2 x half> %src0, <2 x half> ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v3, v0, v1, v2 op_sel_hi:[1,1,1] -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixhi_f16 v3, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b32_e32 v0, v3 +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v1, v2 op_sel:[1,1,1] op_sel_hi:[1,1,1] clamp +; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-GFX1100-TRUE16-NEXT: v_pack_b32_f16 v0, v3.l, v0.l ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_v2f32_clamp_postcvt_hi: @@ -2385,10 +2420,8 @@ define i32 @mixlo_zext(float %src0, float %src1, float %src2) #0 { ; SDAG-GFX1100-TRUE16-LABEL: mixlo_zext: ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v1, v0, v1, v2 +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v1, v2 ; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v0.h, 0 -; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: mixlo_zext: diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix.ll b/llvm/test/CodeGen/AMDGPU/mad-mix.ll index a4878539b1c74..95df131e21358 100644 --- a/llvm/test/CodeGen/AMDGPU/mad-mix.ll +++ b/llvm/test/CodeGen/AMDGPU/mad-mix.ll @@ -2253,9 +2253,10 @@ define float @v_mad_mix_f32_precvtnegf16hi_abs_f16lo_f16lo(i32 %src0.arg, half % ; SDAG-GFX1100-TRUE16-LABEL: v_mad_mix_f32_precvtnegf16hi_abs_f16lo_f16lo: ; SDAG-GFX1100-TRUE16: ; %bb.0: ; SDAG-GFX1100-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SDAG-GFX1100-TRUE16-NEXT: v_xor_b16 v0.l, 0x8000, v0.h +; SDAG-GFX1100-TRUE16-NEXT: v_mov_b16_e32 v0.l, v2.l +; SDAG-GFX1100-TRUE16-NEXT: v_xor_b16 v2.l, 0x8000, v0.h ; SDAG-GFX1100-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; SDAG-GFX1100-TRUE16-NEXT: v_fma_mix_f32 v0, |v0|, v1, v2 op_sel_hi:[1,1,1] +; SDAG-GFX1100-TRUE16-NEXT: v_fma_mix_f32 v0, |v2|, v1, v0 op_sel_hi:[1,1,1] ; SDAG-GFX1100-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; SDAG-GFX1100-FAKE16-LABEL: v_mad_mix_f32_precvtnegf16hi_abs_f16lo_f16lo: diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll index e6960a3f710da..dbcd3700a1605 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -2233,7 +2233,7 @@ define amdgpu_ps i64 @lshr_mad_i64_sgpr(i64 inreg %arg0) #0 { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_mov_b32 s3, 0 ; GFX1250-NEXT: s_mov_b32 s2, s1 -; GFX1250-NEXT: s_mov_b64 s[4:5], lit64(0xffffffffffff1c18) +; GFX1250-NEXT: s_mov_b64 s[4:5], 0xffffffffffff1c18 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_u64 s[2:3], s[2:3], s[4:5] ; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[2:3], s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll b/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll index 4f88077e3b0ee..74f15ac6e074e 100644 --- a/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll +++ b/llvm/test/CodeGen/AMDGPU/make-buffer-rsrc-lds-fails.ll @@ -3,7 +3,7 @@ ; RUN: not --crash llc -global-isel -mtriple=amdgcn -mcpu=gfx900 < %s define amdgpu_ps ptr addrspace(8) @basic_raw_buffer(ptr addrspace(3) inreg %p) { - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) %p, i16 0, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) %p, i16 0, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) nocapture readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p3(ptr addrspace(3) nocapture readnone, i16, i64, i32) diff --git a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll index 65b4d37a8d583..93d772fdb7854 100644 --- a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll +++ b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll @@ -13,9 +13,9 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) { ; GFX9-NEXT: s_and_b32 s4, s4, 0xffff ; GFX9-NEXT: s_mul_i32 s14, s14, s4 ; GFX9-NEXT: s_add_i32 s5, s5, s14 -; GFX9-NEXT: v_add_u32_e32 v0, s5, v0 -; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GFX9-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1] +; GFX9-NEXT: v_add_u32_e32 v1, s5, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_ashrrev_i64 v[4:5], 28, v[0:1] ; GFX9-NEXT: v_mov_b32_e32 v1, s1 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v4 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc @@ -37,12 +37,12 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) { ; GFX10-NEXT: s_load_dword s4, s[8:9], 0x1c ; GFX10-NEXT: s_load_dword s5, s[8:9], 0x38 ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0 +; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_and_b32 s4, s4, 0xffff ; GFX10-NEXT: s_mul_i32 s14, s14, s4 -; GFX10-NEXT: v_add3_u32 v0, s5, s14, v0 -; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GFX10-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1] +; GFX10-NEXT: v_add3_u32 v2, s5, s14, v0 +; GFX10-NEXT: v_ashrrev_i64 v[4:5], 28, v[1:2] ; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4 ; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v5, vcc_lo ; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4 @@ -62,21 +62,19 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) { ; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x1c ; GFX11-NEXT: s_load_b32 s7, s[4:5], 0x38 ; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, 0x3ff, v0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_and_b32 s4, s6, 0xffff ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_mul_i32 s13, s13, s4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: v_add3_u32 v0, s7, s13, v0 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; GFX11-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1] +; GFX11-NEXT: v_add3_u32 v1, s7, s13, v1 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_ashrrev_i64 v[4:5], 28, v[0:1] ; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v5, vcc_lo ; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4 -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, s3, v5, vcc_lo ; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off ; GFX11-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll index c92c672dda2ad..ca4f5d22ca9a0 100644 --- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll +++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll @@ -51,7 +51,7 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) { ; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s4, v2 ; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc ; CHECK-NEXT: s_add_u32 s4, s4, 1 -; CHECK-NEXT: s_addc_u32 s5, s5, 0 +; CHECK-NEXT: s_addc_u32 s5, 0, s5 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: flat_store_byte v[6:7], v10 ; CHECK-NEXT: ; %bb.7: diff --git a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll index 4c0ab91b7d622..02f39e25cb447 100644 --- a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll +++ b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll @@ -3749,7 +3749,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:24 ; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:25 ; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:26 -; ALIGNED-NEXT: buffer_load_ubyte v127, v2, s[0:3], 0 offen offset:19 +; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:19 ; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:28 ; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:29 ; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:30 @@ -3953,7 +3953,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill -; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 offset:1152 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:1188 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:87 ; ALIGNED-NEXT: s_waitcnt vmcnt(7) @@ -4185,8 +4185,12 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:988 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v7 +; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:146 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6 +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:150 +; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:151 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1020 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v3 ; ALIGNED-NEXT: s_clause 0x1 @@ -4198,6 +4202,10 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:139 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1028 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:140 +; ALIGNED-NEXT: s_waitcnt vmcnt(6) +; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1120 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(5) +; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1124 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(4) ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1036 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(3) @@ -4210,376 +4218,346 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: v_lshl_or_b32 v1, v5, 8, v3 ; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:138 ; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1056 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:149 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:137 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1048 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:136 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: s_waitcnt vmcnt(3) ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1060 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1116 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1052 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1044 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v3 +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:147 +; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:148 +; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1108 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:145 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1064 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:144 +; ALIGNED-NEXT: s_waitcnt vmcnt(3) +; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1100 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1104 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1084 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1072 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x5 -; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:146 -; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:147 -; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:148 -; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:149 -; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:150 -; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:151 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v126, 8, v125 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1104 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v7 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1136 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v3 +; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:158 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:157 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1148 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:156 ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1112 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1160 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1116 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1152 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v5 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1124 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 8, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1132 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:156 -; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:157 -; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:158 -; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:159 -; ALIGNED-NEXT: buffer_load_ubyte v106, v2, s[0:3], 0 offen offset:155 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 8, v121 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1144 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v127, v2, s[0:3], 0 offen offset:159 +; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:155 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v107, 8, v108 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v127, 8, v3 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1136 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1172 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:152 -; ALIGNED-NEXT: buffer_load_ubyte v93, v2, s[0:3], 0 offen offset:153 -; ALIGNED-NEXT: buffer_load_ubyte v91, v2, s[0:3], 0 offen offset:154 +; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:152 +; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:153 +; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:154 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v93, 8, v105 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v123, 8, v125 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v106, 8, v91 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v111 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1144 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v89, v2, s[0:3], 0 offen offset:160 -; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:161 -; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:162 -; ALIGNED-NEXT: buffer_load_ubyte v74, v2, s[0:3], 0 offen offset:163 -; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:164 -; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:165 -; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:166 -; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:167 +; ALIGNED-NEXT: buffer_load_ubyte v110, v2, s[0:3], 0 offen offset:160 +; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:161 +; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:162 +; ALIGNED-NEXT: buffer_load_ubyte v93, v2, s[0:3], 0 offen offset:163 +; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:164 +; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:165 +; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:166 +; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:167 ; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v78, 8, v89 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v110 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v74, 8, v73 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v93, 8, v94 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v72, 8, v77 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1156 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v75, 8, v79 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v92, 8, v104 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1192 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v95, 8, v108 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1160 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1196 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:172 -; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:173 -; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:174 -; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:175 -; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:171 +; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:172 +; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:173 +; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:174 +; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:175 +; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:171 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v61, 8, v63 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v90 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v59, 8, v62 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v78, 8, v88 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1164 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1200 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v58, v2, s[0:3], 0 offen offset:168 -; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:169 -; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:170 +; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:168 +; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:169 +; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:170 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v56, 8, v58 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v76 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v57, 8, v47 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v75, 8, v63 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1168 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v46, v2, s[0:3], 0 offen offset:176 -; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:177 -; ALIGNED-NEXT: buffer_load_ubyte v119, v2, s[0:3], 0 offen offset:178 -; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:179 -; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:180 -; ALIGNED-NEXT: buffer_load_ubyte v41, v2, s[0:3], 0 offen offset:181 -; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:182 -; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:183 +; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:176 +; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:177 +; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:178 +; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:179 +; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:180 +; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:181 +; ALIGNED-NEXT: buffer_load_ubyte v58, v2, s[0:3], 0 offen offset:182 +; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:183 ; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v46 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 8, v62 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v119 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v56, 8, v57 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v118, 8, v42 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1172 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v41, 8, v44 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v47, 8, v58 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1208 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v59, 8, v61 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1176 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1212 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v117, v2, s[0:3], 0 offen offset:188 -; ALIGNED-NEXT: buffer_load_ubyte v115, v2, s[0:3], 0 offen offset:189 -; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:190 -; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:191 -; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:187 +; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:188 +; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:189 +; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:190 +; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:191 +; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:187 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v115, 8, v117 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v45 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v114, 8, v116 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v42, 8, v44 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1216 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:184 -; ALIGNED-NEXT: buffer_load_ubyte v103, v2, s[0:3], 0 offen offset:185 -; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:186 +; ALIGNED-NEXT: buffer_load_ubyte v41, v2, s[0:3], 0 offen offset:184 +; ALIGNED-NEXT: buffer_load_ubyte v119, v2, s[0:3], 0 offen offset:185 +; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:186 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v103, 8, v113 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v119, 8, v41 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v112, 8, v102 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v118 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1184 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:192 -; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:193 -; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:194 -; ALIGNED-NEXT: buffer_load_ubyte v86, v2, s[0:3], 0 offen offset:195 -; ALIGNED-NEXT: buffer_load_ubyte v99, v2, s[0:3], 0 offen offset:196 -; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:197 -; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:198 -; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:199 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v98, 8, v100 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v86, 8, v87 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v97, 8, v99 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v85, 8, v96 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1188 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1192 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:204 -; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:205 -; ALIGNED-NEXT: buffer_load_ubyte v82, v2, s[0:3], 0 offen offset:206 -; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:207 -; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:203 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v81, 8, v83 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v80, 8, v82 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1196 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v70, v2, s[0:3], 0 offen offset:200 -; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:201 -; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:202 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v69, 8, v70 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v71, 8, v68 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1200 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:212 -; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:213 -; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:214 -; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:215 -; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:211 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v54, 8, v67 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v52, 8, v65 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:216 -; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:217 -; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:218 -; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:219 -; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:220 -; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:221 -; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:222 -; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:223 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v53, 8, v66 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v48, 8, v49 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v51, 8, v64 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v39, 8, v50 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1208 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1212 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:208 -; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:209 -; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:210 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v36, 8, v38 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v55, 8, v37 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1216 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:224 -; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:225 -; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:226 -; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:227 -; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:228 -; ALIGNED-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:229 -; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:230 -; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:231 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v33, 8, v35 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v30, 8, v29 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v31, 8, v34 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v28, 8, v32 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1220 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x17 -; ALIGNED-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:236 -; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:237 -; ALIGNED-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:238 -; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:239 -; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:235 -; ALIGNED-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:232 -; ALIGNED-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:233 -; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:234 -; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:240 -; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:241 -; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:242 +; ALIGNED-NEXT: s_clause 0x3e +; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:192 +; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:193 +; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:194 +; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:195 +; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:196 +; ALIGNED-NEXT: buffer_load_ubyte v103, v2, s[0:3], 0 offen offset:197 +; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:198 +; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:199 +; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:204 +; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:205 +; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:206 +; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:207 +; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:203 +; ALIGNED-NEXT: buffer_load_ubyte v86, v2, s[0:3], 0 offen offset:200 +; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:201 +; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:202 +; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:212 +; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:213 +; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:214 +; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:215 +; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:211 +; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:216 +; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:217 +; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:218 +; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:219 +; ALIGNED-NEXT: buffer_load_ubyte v70, v2, s[0:3], 0 offen offset:220 +; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:221 +; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:222 +; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:223 +; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:208 +; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:209 +; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:210 +; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:224 +; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:225 +; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:226 +; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:227 +; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:228 +; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:229 +; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:230 +; ALIGNED-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:231 +; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:236 +; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:237 +; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:238 +; ALIGNED-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:239 +; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:235 +; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:232 +; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:233 +; ALIGNED-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:234 +; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:240 +; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:241 +; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:242 ; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:243 -; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:244 -; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:245 +; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:244 +; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:245 ; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:246 -; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:247 -; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:252 -; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:253 +; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:247 +; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:252 +; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:253 ; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:254 -; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:255 +; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:255 ; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:251 -; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:248 -; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:249 -; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:250 -; ALIGNED-NEXT: v_lshl_or_b32 v123, v4, 16, v3 -; ALIGNED-NEXT: s_clause 0x5 +; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:248 +; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:249 +; ALIGNED-NEXT: s_clause 0x6 +; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:250 ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen -; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:2 -; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:4 -; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:5 -; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:6 -; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:7 -; ALIGNED-NEXT: s_waitcnt vmcnt(28) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v25, 8, v27 -; ALIGNED-NEXT: s_waitcnt vmcnt(26) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v24, 8, v26 +; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:2 +; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:4 +; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:5 +; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:6 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:7 +; ALIGNED-NEXT: s_waitcnt vmcnt(62) +; ALIGNED-NEXT: v_lshl_or_b32 v3, v113, 8, v116 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v102, 8, v101 +; ALIGNED-NEXT: v_lshl_or_b32 v106, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v103, 8, v114 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v100, 8, v112 +; ALIGNED-NEXT: v_lshl_or_b32 v91, v4, 16, v3 +; ALIGNED-NEXT: s_waitcnt vmcnt(60) +; ALIGNED-NEXT: v_lshl_or_b32 v3, v97, 8, v98 +; ALIGNED-NEXT: s_waitcnt vmcnt(58) +; ALIGNED-NEXT: v_lshl_or_b32 v4, v87, 8, v96 ; ALIGNED-NEXT: s_waitcnt vmcnt(14) -; ALIGNED-NEXT: v_lshl_or_b32 v45, v12, 8, v16 +; ALIGNED-NEXT: v_lshl_or_b32 v73, v13, 8, v16 ; ALIGNED-NEXT: s_waitcnt vmcnt(10) -; ALIGNED-NEXT: v_lshl_or_b32 v60, v8, 8, v10 -; ALIGNED-NEXT: v_lshl_or_b32 v95, v4, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v21, 8, v22 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v23, 8, v20 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: buffer_store_dword v88, off, s[0:3], s32 offset:1088 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v90, off, s[0:3], s32 offset:1096 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: buffer_store_dword v92, off, s[0:3], s32 offset:1100 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v76, v4, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v17, 8, v19 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v14, 8, v13 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1108 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v101, v4, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v15, 8, v18 -; ALIGNED-NEXT: v_lshl_or_b32 v84, v45, 16, v4 -; ALIGNED-NEXT: v_lshl_or_b32 v45, v9, 8, v11 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v60, 16, v45 -; ALIGNED-NEXT: v_lshl_or_b32 v45, v5, 8, v6 -; ALIGNED-NEXT: v_lshl_or_b32 v60, v7, 8, v1 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v60, 16, v45 +; ALIGNED-NEXT: v_lshl_or_b32 v77, v9, 8, v10 +; ALIGNED-NEXT: s_waitcnt vmcnt(3) +; ALIGNED-NEXT: buffer_store_dword v107, off, s[0:3], s32 offset:1088 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v89, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v84, 8, v86 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v85, 8, v83 +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1112 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:1096 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v120, off, s[0:3], s32 offset:1132 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v74, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v68, 8, v81 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v65, 8, v71 +; ALIGNED-NEXT: v_lshl_or_b32 v46, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v67, 8, v80 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v54, 8, v53 +; ALIGNED-NEXT: v_lshl_or_b32 v117, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v55, 8, v70 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v52, 8, v64 +; ALIGNED-NEXT: v_lshl_or_b32 v115, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v39, 8, v50 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v69, 8, v48 +; ALIGNED-NEXT: v_lshl_or_b32 v99, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v36, 8, v38 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v32, 8, v33 +; ALIGNED-NEXT: v_lshl_or_b32 v82, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v35, 8, v37 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v31, 8, v34 +; ALIGNED-NEXT: v_lshl_or_b32 v66, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v29, 8, v30 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v26, 8, v28 +; ALIGNED-NEXT: v_lshl_or_b32 v51, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v23, 8, v24 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v25, 8, v21 +; ALIGNED-NEXT: v_lshl_or_b32 v49, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v18, 8, v20 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v14, 8, v15 +; ALIGNED-NEXT: v_lshl_or_b32 v27, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v17, 8, v19 +; ALIGNED-NEXT: v_lshl_or_b32 v22, v73, 16, v4 +; ALIGNED-NEXT: v_lshl_or_b32 v73, v11, 8, v12 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v77, 16, v73 +; ALIGNED-NEXT: v_lshl_or_b32 v73, v6, 8, v8 +; ALIGNED-NEXT: v_lshl_or_b32 v77, v7, 8, v5 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v77, 16, v73 ; ALIGNED-NEXT: s_clause 0x1 -; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:1 -; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:3 +; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:1 +; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:3 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1068 ; 4-byte Folded Spill -; ALIGNED-NEXT: buffer_store_dword v94, off, s[0:3], s32 offset:1092 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:1076 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:1080 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v45, v45, 8, v0 -; ALIGNED-NEXT: v_lshl_or_b32 v60, v60, 8, v94 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 16, v45 -; ALIGNED-NEXT: v_lshl_or_b32 v45, v90, 8, v88 -; ALIGNED-NEXT: v_lshl_or_b32 v60, v104, 8, v92 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1120 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 16, v45 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1128 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:12 -; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:13 -; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:14 -; ALIGNED-NEXT: buffer_load_ubyte v110, v2, s[0:3], 0 offen offset:15 -; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:11 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v45, v111, 8, v122 +; ALIGNED-NEXT: buffer_store_dword v121, off, s[0:3], s32 offset:1092 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v60, v110, 8, v120 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 16, v45 +; ALIGNED-NEXT: buffer_store_dword v73, off, s[0:3], s32 offset:1076 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v77, off, s[0:3], s32 offset:1080 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v73, v73, 8, v0 +; ALIGNED-NEXT: v_lshl_or_b32 v77, v77, 8, v121 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73 +; ALIGNED-NEXT: v_lshl_or_b32 v73, v109, 8, v107 +; ALIGNED-NEXT: v_lshl_or_b32 v77, v1, 8, v120 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:12 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1128 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73 +; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:13 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1140 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:14 +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1156 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: buffer_store_dword v73, off, s[0:3], s32 offset:1168 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v73, v73, 8, v1 +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1164 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:15 +; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:11 +; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: v_lshl_or_b32 v77, v107, 8, v0 +; ALIGNED-NEXT: v_mov_b32_e32 v1, v107 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1176 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:8 -; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:9 -; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:10 +; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:8 +; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:9 +; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:10 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v45, v92, 8, v104 +; ALIGNED-NEXT: v_lshl_or_b32 v73, v120, 8, v122 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v60, v94, 8, v90 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v60, 16, v45 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1148 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v77, v121, 8, v109 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 16, v73 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1184 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:18 -; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:16 -; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:17 +; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:18 +; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:16 +; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:17 ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 -; ALIGNED-NEXT: buffer_store_dword v84, off, s[0:3], s32 offset:228 -; ALIGNED-NEXT: buffer_store_dword v101, off, s[0:3], s32 offset:224 +; ALIGNED-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228 +; ALIGNED-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:224 ; ALIGNED-NEXT: v_add_nc_u32_e32 v2, 0x100, v2 ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v127, 8, v60 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v126, 8, v77 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v127, v45, 8, v88 -; ALIGNED-NEXT: v_lshl_or_b32 v127, v0, 16, v127 +; ALIGNED-NEXT: v_lshl_or_b32 v126, v73, 8, v107 +; ALIGNED-NEXT: v_lshl_or_b32 v126, v0, 16, v126 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1228 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: v_add_co_u32 v3, vcc_lo, v0, s4 @@ -4587,190 +4565,184 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: s_add_u32 s4, s4, 0x100 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: v_add_co_ci_u32_e64 v4, null, s5, v0, vcc_lo -; ALIGNED-NEXT: flat_store_byte v[3:4], v1 offset:250 +; ALIGNED-NEXT: flat_store_byte v[3:4], v5 offset:250 ; ALIGNED-NEXT: flat_store_byte v[3:4], v7 offset:251 -; ALIGNED-NEXT: flat_store_byte v[3:4], v5 offset:249 -; ALIGNED-NEXT: flat_store_byte v[3:4], v8 offset:255 -; ALIGNED-NEXT: flat_store_byte v[3:4], v9 offset:253 +; ALIGNED-NEXT: flat_store_byte v[3:4], v6 offset:249 +; ALIGNED-NEXT: flat_store_byte v[3:4], v9 offset:255 +; ALIGNED-NEXT: flat_store_byte v[3:4], v11 offset:253 ; ALIGNED-NEXT: flat_store_byte v[3:4], v10 offset:254 -; ALIGNED-NEXT: flat_store_byte v[3:4], v11 offset:252 -; ALIGNED-NEXT: flat_store_byte v[3:4], v6 offset:248 -; ALIGNED-NEXT: flat_store_byte v[3:4], v13 offset:242 +; ALIGNED-NEXT: flat_store_byte v[3:4], v12 offset:252 +; ALIGNED-NEXT: flat_store_byte v[3:4], v8 offset:248 +; ALIGNED-NEXT: flat_store_byte v[3:4], v15 offset:242 ; ALIGNED-NEXT: flat_store_byte v[3:4], v14 offset:243 -; ALIGNED-NEXT: flat_store_byte v[3:4], v17 offset:241 -; ALIGNED-NEXT: flat_store_byte v[3:4], v12 offset:247 -; ALIGNED-NEXT: flat_store_byte v[3:4], v15 offset:245 +; ALIGNED-NEXT: flat_store_byte v[3:4], v18 offset:241 +; ALIGNED-NEXT: flat_store_byte v[3:4], v13 offset:247 +; ALIGNED-NEXT: flat_store_byte v[3:4], v17 offset:245 ; ALIGNED-NEXT: flat_store_byte v[3:4], v16 offset:246 -; ALIGNED-NEXT: flat_store_byte v[3:4], v18 offset:244 -; ALIGNED-NEXT: flat_store_byte v[3:4], v19 offset:240 -; ALIGNED-NEXT: buffer_store_dword v76, off, s[0:3], s32 offset:248 -; ALIGNED-NEXT: buffer_store_dword v95, off, s[0:3], s32 offset:252 -; ALIGNED-NEXT: buffer_store_dword v123, off, s[0:3], s32 offset:244 +; ALIGNED-NEXT: flat_store_byte v[3:4], v19 offset:244 +; ALIGNED-NEXT: flat_store_byte v[3:4], v20 offset:240 +; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:248 +; ALIGNED-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:252 +; ALIGNED-NEXT: buffer_store_dword v66, off, s[0:3], s32 offset:244 +; ALIGNED-NEXT: buffer_store_dword v82, off, s[0:3], s32 offset:240 +; ALIGNED-NEXT: flat_store_byte v[3:4], v21 offset:234 +; ALIGNED-NEXT: flat_store_byte v[3:4], v25 offset:235 +; ALIGNED-NEXT: flat_store_byte v[3:4], v23 offset:233 +; ALIGNED-NEXT: flat_store_byte v[3:4], v26 offset:239 +; ALIGNED-NEXT: flat_store_byte v[3:4], v29 offset:237 +; ALIGNED-NEXT: flat_store_byte v[3:4], v28 offset:238 +; ALIGNED-NEXT: flat_store_byte v[3:4], v30 offset:236 +; ALIGNED-NEXT: flat_store_byte v[3:4], v24 offset:232 +; ALIGNED-NEXT: flat_store_byte v[3:4], v33 offset:226 +; ALIGNED-NEXT: flat_store_byte v[3:4], v32 offset:227 +; ALIGNED-NEXT: flat_store_byte v[3:4], v36 offset:225 +; ALIGNED-NEXT: flat_store_byte v[3:4], v31 offset:231 +; ALIGNED-NEXT: flat_store_byte v[3:4], v35 offset:229 +; ALIGNED-NEXT: flat_store_byte v[3:4], v34 offset:230 +; ALIGNED-NEXT: flat_store_byte v[3:4], v37 offset:228 +; ALIGNED-NEXT: flat_store_byte v[3:4], v38 offset:224 +; ALIGNED-NEXT: buffer_store_dword v99, off, s[0:3], s32 offset:192 +; ALIGNED-NEXT: buffer_store_dword v115, off, s[0:3], s32 offset:204 +; ALIGNED-NEXT: buffer_store_dword v117, off, s[0:3], s32 offset:200 +; ALIGNED-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:196 +; ALIGNED-NEXT: flat_store_byte v[3:4], v68 offset:213 +; ALIGNED-NEXT: flat_store_byte v[3:4], v65 offset:215 +; ALIGNED-NEXT: flat_store_byte v[3:4], v39 offset:209 +; ALIGNED-NEXT: flat_store_byte v[3:4], v69 offset:211 +; ALIGNED-NEXT: flat_store_byte v[3:4], v48 offset:210 +; ALIGNED-NEXT: flat_store_byte v[3:4], v71 offset:214 +; ALIGNED-NEXT: flat_store_byte v[3:4], v81 offset:212 +; ALIGNED-NEXT: flat_store_byte v[3:4], v53 offset:218 +; ALIGNED-NEXT: flat_store_byte v[3:4], v54 offset:219 +; ALIGNED-NEXT: flat_store_byte v[3:4], v67 offset:217 +; ALIGNED-NEXT: flat_store_byte v[3:4], v52 offset:223 +; ALIGNED-NEXT: flat_store_byte v[3:4], v55 offset:221 +; ALIGNED-NEXT: flat_store_byte v[3:4], v64 offset:222 +; ALIGNED-NEXT: flat_store_byte v[3:4], v70 offset:220 +; ALIGNED-NEXT: flat_store_byte v[3:4], v80 offset:216 +; ALIGNED-NEXT: flat_store_byte v[3:4], v50 offset:208 +; ALIGNED-NEXT: buffer_store_dword v74, off, s[0:3], s32 offset:216 +; ALIGNED-NEXT: buffer_store_dword v89, off, s[0:3], s32 offset:220 +; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:212 +; ALIGNED-NEXT: buffer_store_dword v106, off, s[0:3], s32 offset:208 +; ALIGNED-NEXT: flat_store_byte v[3:4], v83 offset:202 +; ALIGNED-NEXT: flat_store_byte v[3:4], v85 offset:203 +; ALIGNED-NEXT: flat_store_byte v[3:4], v84 offset:201 +; ALIGNED-NEXT: flat_store_byte v[3:4], v87 offset:207 +; ALIGNED-NEXT: flat_store_byte v[3:4], v97 offset:205 +; ALIGNED-NEXT: flat_store_byte v[3:4], v96 offset:206 +; ALIGNED-NEXT: flat_store_byte v[3:4], v98 offset:204 +; ALIGNED-NEXT: flat_store_byte v[3:4], v86 offset:200 +; ALIGNED-NEXT: flat_store_byte v[3:4], v101 offset:194 +; ALIGNED-NEXT: flat_store_byte v[3:4], v102 offset:195 +; ALIGNED-NEXT: flat_store_byte v[3:4], v113 offset:193 +; ALIGNED-NEXT: flat_store_byte v[3:4], v100 offset:199 +; ALIGNED-NEXT: flat_store_byte v[3:4], v103 offset:197 +; ALIGNED-NEXT: flat_store_byte v[3:4], v112 offset:198 +; ALIGNED-NEXT: flat_store_byte v[3:4], v114 offset:196 +; ALIGNED-NEXT: flat_store_byte v[3:4], v116 offset:192 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1220 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_addc_u32 s5, s5, 0 ; ALIGNED-NEXT: v_cmp_gt_u64_e64 s6, 0x800, s[4:5] ; ALIGNED-NEXT: s_and_b32 vcc_lo, exec_lo, s6 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 -; ALIGNED-NEXT: flat_store_byte v[3:4], v20 offset:234 -; ALIGNED-NEXT: flat_store_byte v[3:4], v23 offset:235 -; ALIGNED-NEXT: flat_store_byte v[3:4], v21 offset:233 -; ALIGNED-NEXT: flat_store_byte v[3:4], v24 offset:239 -; ALIGNED-NEXT: flat_store_byte v[3:4], v25 offset:237 -; ALIGNED-NEXT: flat_store_byte v[3:4], v26 offset:238 -; ALIGNED-NEXT: flat_store_byte v[3:4], v27 offset:236 -; ALIGNED-NEXT: flat_store_byte v[3:4], v22 offset:232 -; ALIGNED-NEXT: flat_store_byte v[3:4], v29 offset:226 -; ALIGNED-NEXT: flat_store_byte v[3:4], v30 offset:227 -; ALIGNED-NEXT: flat_store_byte v[3:4], v33 offset:225 -; ALIGNED-NEXT: flat_store_byte v[3:4], v28 offset:231 -; ALIGNED-NEXT: flat_store_byte v[3:4], v31 offset:229 -; ALIGNED-NEXT: flat_store_byte v[3:4], v32 offset:230 -; ALIGNED-NEXT: flat_store_byte v[3:4], v34 offset:228 -; ALIGNED-NEXT: flat_store_byte v[3:4], v35 offset:224 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1216 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1212 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1208 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 +; ALIGNED-NEXT: flat_store_byte v[3:4], v118 offset:186 +; ALIGNED-NEXT: flat_store_byte v[3:4], v40 offset:187 +; ALIGNED-NEXT: flat_store_byte v[3:4], v119 offset:185 +; ALIGNED-NEXT: flat_store_byte v[3:4], v42 offset:191 +; ALIGNED-NEXT: flat_store_byte v[3:4], v43 offset:189 +; ALIGNED-NEXT: flat_store_byte v[3:4], v44 offset:190 +; ALIGNED-NEXT: flat_store_byte v[3:4], v45 offset:188 +; ALIGNED-NEXT: flat_store_byte v[3:4], v41 offset:184 +; ALIGNED-NEXT: flat_store_byte v[3:4], v57 offset:178 +; ALIGNED-NEXT: flat_store_byte v[3:4], v56 offset:179 +; ALIGNED-NEXT: flat_store_byte v[3:4], v60 offset:177 +; ALIGNED-NEXT: flat_store_byte v[3:4], v47 offset:183 +; ALIGNED-NEXT: flat_store_byte v[3:4], v59 offset:181 +; ALIGNED-NEXT: flat_store_byte v[3:4], v58 offset:182 +; ALIGNED-NEXT: flat_store_byte v[3:4], v61 offset:180 +; ALIGNED-NEXT: flat_store_byte v[3:4], v62 offset:176 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1204 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 -; ALIGNED-NEXT: flat_store_byte v[3:4], v54 offset:213 -; ALIGNED-NEXT: flat_store_byte v[3:4], v52 offset:215 -; ALIGNED-NEXT: flat_store_byte v[3:4], v36 offset:209 -; ALIGNED-NEXT: flat_store_byte v[3:4], v55 offset:211 -; ALIGNED-NEXT: flat_store_byte v[3:4], v37 offset:210 -; ALIGNED-NEXT: flat_store_byte v[3:4], v65 offset:214 -; ALIGNED-NEXT: flat_store_byte v[3:4], v67 offset:212 -; ALIGNED-NEXT: flat_store_byte v[3:4], v49 offset:218 -; ALIGNED-NEXT: flat_store_byte v[3:4], v48 offset:219 -; ALIGNED-NEXT: flat_store_byte v[3:4], v53 offset:217 -; ALIGNED-NEXT: flat_store_byte v[3:4], v39 offset:223 -; ALIGNED-NEXT: flat_store_byte v[3:4], v51 offset:221 -; ALIGNED-NEXT: flat_store_byte v[3:4], v50 offset:222 -; ALIGNED-NEXT: flat_store_byte v[3:4], v64 offset:220 -; ALIGNED-NEXT: flat_store_byte v[3:4], v66 offset:216 -; ALIGNED-NEXT: flat_store_byte v[3:4], v38 offset:208 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1200 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:316 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1196 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1192 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1188 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:208 -; ALIGNED-NEXT: flat_store_byte v[3:4], v68 offset:202 -; ALIGNED-NEXT: flat_store_byte v[3:4], v71 offset:203 -; ALIGNED-NEXT: flat_store_byte v[3:4], v69 offset:201 -; ALIGNED-NEXT: flat_store_byte v[3:4], v80 offset:207 -; ALIGNED-NEXT: flat_store_byte v[3:4], v81 offset:205 -; ALIGNED-NEXT: flat_store_byte v[3:4], v82 offset:206 -; ALIGNED-NEXT: flat_store_byte v[3:4], v83 offset:204 -; ALIGNED-NEXT: flat_store_byte v[3:4], v70 offset:200 -; ALIGNED-NEXT: flat_store_byte v[3:4], v87 offset:194 -; ALIGNED-NEXT: flat_store_byte v[3:4], v86 offset:195 -; ALIGNED-NEXT: flat_store_byte v[3:4], v98 offset:193 -; ALIGNED-NEXT: flat_store_byte v[3:4], v85 offset:199 -; ALIGNED-NEXT: flat_store_byte v[3:4], v97 offset:197 -; ALIGNED-NEXT: flat_store_byte v[3:4], v96 offset:198 -; ALIGNED-NEXT: flat_store_byte v[3:4], v99 offset:196 -; ALIGNED-NEXT: flat_store_byte v[3:4], v100 offset:192 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1184 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 +; ALIGNED-NEXT: flat_store_byte v[3:4], v63 offset:170 +; ALIGNED-NEXT: flat_store_byte v[3:4], v75 offset:171 +; ALIGNED-NEXT: flat_store_byte v[3:4], v72 offset:169 +; ALIGNED-NEXT: flat_store_byte v[3:4], v78 offset:175 +; ALIGNED-NEXT: flat_store_byte v[3:4], v79 offset:173 +; ALIGNED-NEXT: flat_store_byte v[3:4], v88 offset:174 +; ALIGNED-NEXT: flat_store_byte v[3:4], v90 offset:172 +; ALIGNED-NEXT: flat_store_byte v[3:4], v76 offset:168 +; ALIGNED-NEXT: flat_store_byte v[3:4], v94 offset:162 +; ALIGNED-NEXT: flat_store_byte v[3:4], v93 offset:163 +; ALIGNED-NEXT: flat_store_byte v[3:4], v105 offset:161 +; ALIGNED-NEXT: flat_store_byte v[3:4], v92 offset:167 +; ALIGNED-NEXT: flat_store_byte v[3:4], v95 offset:165 +; ALIGNED-NEXT: flat_store_byte v[3:4], v104 offset:166 +; ALIGNED-NEXT: flat_store_byte v[3:4], v108 offset:164 +; ALIGNED-NEXT: flat_store_byte v[3:4], v110 offset:160 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1180 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1176 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1172 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 -; ALIGNED-NEXT: flat_store_byte v[3:4], v102 offset:186 -; ALIGNED-NEXT: flat_store_byte v[3:4], v112 offset:187 -; ALIGNED-NEXT: flat_store_byte v[3:4], v103 offset:185 -; ALIGNED-NEXT: flat_store_byte v[3:4], v114 offset:191 -; ALIGNED-NEXT: flat_store_byte v[3:4], v115 offset:189 -; ALIGNED-NEXT: flat_store_byte v[3:4], v116 offset:190 -; ALIGNED-NEXT: flat_store_byte v[3:4], v117 offset:188 -; ALIGNED-NEXT: flat_store_byte v[3:4], v113 offset:184 -; ALIGNED-NEXT: flat_store_byte v[3:4], v119 offset:178 -; ALIGNED-NEXT: flat_store_byte v[3:4], v40 offset:179 -; ALIGNED-NEXT: flat_store_byte v[3:4], v43 offset:177 -; ALIGNED-NEXT: flat_store_byte v[3:4], v118 offset:183 -; ALIGNED-NEXT: flat_store_byte v[3:4], v41 offset:181 -; ALIGNED-NEXT: flat_store_byte v[3:4], v42 offset:182 -; ALIGNED-NEXT: flat_store_byte v[3:4], v44 offset:180 -; ALIGNED-NEXT: flat_store_byte v[3:4], v46 offset:176 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1168 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1148 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1164 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1136 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:316 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1160 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 +; ALIGNED-NEXT: flat_store_byte v[3:4], v111 offset:154 +; ALIGNED-NEXT: flat_store_byte v[3:4], v124 offset:155 +; ALIGNED-NEXT: flat_store_byte v[3:4], v123 offset:153 +; ALIGNED-NEXT: flat_store_byte v[3:4], v127 offset:159 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1152 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1156 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:157 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1160 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 -; ALIGNED-NEXT: flat_store_byte v[3:4], v47 offset:170 -; ALIGNED-NEXT: flat_store_byte v[3:4], v57 offset:171 -; ALIGNED-NEXT: flat_store_byte v[3:4], v56 offset:169 -; ALIGNED-NEXT: flat_store_byte v[3:4], v59 offset:175 -; ALIGNED-NEXT: flat_store_byte v[3:4], v61 offset:173 -; ALIGNED-NEXT: flat_store_byte v[3:4], v62 offset:174 -; ALIGNED-NEXT: flat_store_byte v[3:4], v63 offset:172 -; ALIGNED-NEXT: flat_store_byte v[3:4], v58 offset:168 -; ALIGNED-NEXT: flat_store_byte v[3:4], v73 offset:162 -; ALIGNED-NEXT: flat_store_byte v[3:4], v74 offset:163 -; ALIGNED-NEXT: flat_store_byte v[3:4], v78 offset:161 -; ALIGNED-NEXT: flat_store_byte v[3:4], v72 offset:167 -; ALIGNED-NEXT: flat_store_byte v[3:4], v75 offset:165 -; ALIGNED-NEXT: flat_store_byte v[3:4], v77 offset:166 -; ALIGNED-NEXT: flat_store_byte v[3:4], v79 offset:164 -; ALIGNED-NEXT: flat_store_byte v[3:4], v89 offset:160 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:158 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1144 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1136 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1132 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:156 +; ALIGNED-NEXT: flat_store_byte v[3:4], v125 offset:152 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1108 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1124 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:146 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1100 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 -; ALIGNED-NEXT: flat_store_byte v[3:4], v91 offset:154 -; ALIGNED-NEXT: flat_store_byte v[3:4], v106 offset:155 -; ALIGNED-NEXT: flat_store_byte v[3:4], v93 offset:153 -; ALIGNED-NEXT: flat_store_byte v[3:4], v107 offset:159 -; ALIGNED-NEXT: flat_store_byte v[3:4], v109 offset:157 -; ALIGNED-NEXT: flat_store_byte v[3:4], v108 offset:158 -; ALIGNED-NEXT: flat_store_byte v[3:4], v121 offset:156 -; ALIGNED-NEXT: flat_store_byte v[3:4], v105 offset:152 -; ALIGNED-NEXT: flat_store_byte v[3:4], v125 offset:146 -; ALIGNED-NEXT: flat_store_byte v[3:4], v126 offset:147 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:147 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1084 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:145 -; ALIGNED-NEXT: flat_store_byte v[3:4], v124 offset:151 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1112 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1124 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:149 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:151 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1116 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:149 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1120 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:150 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1104 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) @@ -5207,7 +5179,7 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:388 -; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 offset:384 +; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:384 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:26 @@ -5232,11 +5204,11 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:24 -; ALIGNED-NEXT: flat_store_byte v[3:4], v60 offset:18 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1152 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v77 offset:18 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1188 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:19 -; ALIGNED-NEXT: flat_store_byte v[3:4], v45 offset:17 +; ALIGNED-NEXT: flat_store_byte v[3:4], v73 offset:17 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:23 @@ -5249,27 +5221,33 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:20 -; ALIGNED-NEXT: flat_store_byte v[3:4], v88 offset:16 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1148 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v107 offset:16 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1184 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1140 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1176 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1128 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1140 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1120 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1128 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 -; ALIGNED-NEXT: flat_store_byte v[3:4], v90 offset:10 -; ALIGNED-NEXT: flat_store_byte v[3:4], v94 offset:11 -; ALIGNED-NEXT: flat_store_byte v[3:4], v111 offset:13 -; ALIGNED-NEXT: flat_store_byte v[3:4], v92 offset:9 -; ALIGNED-NEXT: flat_store_byte v[3:4], v110 offset:15 -; ALIGNED-NEXT: flat_store_byte v[3:4], v120 offset:14 -; ALIGNED-NEXT: flat_store_byte v[3:4], v122 offset:12 -; ALIGNED-NEXT: flat_store_byte v[3:4], v104 offset:8 +; ALIGNED-NEXT: flat_store_byte v[3:4], v109 offset:10 +; ALIGNED-NEXT: flat_store_byte v[3:4], v121 offset:11 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1168 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:13 +; ALIGNED-NEXT: flat_store_byte v[3:4], v120 offset:9 +; ALIGNED-NEXT: flat_store_byte v[3:4], v1 offset:15 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1164 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:14 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1156 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:12 +; ALIGNED-NEXT: flat_store_byte v[3:4], v122 offset:8 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1092 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:2 @@ -5279,13 +5257,13 @@ define void @memcpy_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5) ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1076 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:1 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1108 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1112 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:7 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1096 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:5 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1100 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1132 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:6 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1088 ; 4-byte Folded Reload @@ -12939,7 +12917,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:1000 ; 4-byte Folded Spill -; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v127, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:87 ; ALIGNED-NEXT: s_waitcnt vmcnt(7) ; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1032 ; 4-byte Folded Spill @@ -13170,9 +13148,13 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1252 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v7 +; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:146 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1312 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:150 +; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:151 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1308 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v3 ; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:142 @@ -13181,584 +13163,557 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:141 ; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:139 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1328 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1320 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:140 +; ALIGNED-NEXT: s_waitcnt vmcnt(6) +; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1428 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(5) +; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1432 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1336 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1328 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1340 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1336 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1332 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1324 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1324 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1316 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v5, 8, v3 ; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:138 -; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1360 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:149 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:137 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1352 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:136 +; ALIGNED-NEXT: s_waitcnt vmcnt(3) +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1372 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1376 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1424 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1356 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1344 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v3 +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:147 +; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:148 +; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1420 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:145 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1384 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:144 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x3 -; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:145 -; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:146 -; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:147 -; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:148 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v124, 8, v0 +; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1408 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1416 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v120, 8, v111 +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1404 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1404 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:149 -; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:150 -; ALIGNED-NEXT: buffer_load_ubyte v110, v2, s[0:3], 0 offen offset:151 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v4, 8, v7 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v3 +; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:158 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:157 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:156 ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v121, 8, v3 +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v110, 8, v122 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:159 +; ALIGNED-NEXT: buffer_load_ubyte v124, v2, s[0:3], 0 offen offset:155 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 +; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: v_lshl_or_b32 v1, v126, 8, v3 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:156 -; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:157 -; ALIGNED-NEXT: buffer_load_ubyte v106, v2, s[0:3], 0 offen offset:158 -; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:159 -; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:155 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v108 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v104, 8, v106 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1416 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:152 -; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:153 -; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:154 +; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:152 +; ALIGNED-NEXT: buffer_load_ubyte v122, v2, s[0:3], 0 offen offset:153 +; ALIGNED-NEXT: buffer_load_ubyte v121, v2, s[0:3], 0 offen offset:154 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v92, 8, v95 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v122, 8, v123 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v94, 8, v90 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v121 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1420 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v89, v2, s[0:3], 0 offen offset:160 -; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:161 -; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:162 -; ALIGNED-NEXT: buffer_load_ubyte v74, v2, s[0:3], 0 offen offset:163 -; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:164 -; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:165 -; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:166 -; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:167 +; ALIGNED-NEXT: buffer_load_ubyte v120, v2, s[0:3], 0 offen offset:160 +; ALIGNED-NEXT: buffer_load_ubyte v108, v2, s[0:3], 0 offen offset:161 +; ALIGNED-NEXT: buffer_load_ubyte v104, v2, s[0:3], 0 offen offset:162 +; ALIGNED-NEXT: buffer_load_ubyte v105, v2, s[0:3], 0 offen offset:163 +; ALIGNED-NEXT: buffer_load_ubyte v111, v2, s[0:3], 0 offen offset:164 +; ALIGNED-NEXT: buffer_load_ubyte v106, v2, s[0:3], 0 offen offset:165 +; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:166 +; ALIGNED-NEXT: buffer_load_ubyte v94, v2, s[0:3], 0 offen offset:167 ; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v89 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v108, 8, v120 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v74, 8, v73 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v105, 8, v104 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v72, 8, v75 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 8, v88 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v94, 8, v107 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v106, 8, v111 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:172 -; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:173 -; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:174 -; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:175 -; ALIGNED-NEXT: buffer_load_ubyte v58, v2, s[0:3], 0 offen offset:171 +; ALIGNED-NEXT: buffer_load_ubyte v92, v2, s[0:3], 0 offen offset:172 +; ALIGNED-NEXT: buffer_load_ubyte v89, v2, s[0:3], 0 offen offset:173 +; ALIGNED-NEXT: buffer_load_ubyte v90, v2, s[0:3], 0 offen offset:174 +; ALIGNED-NEXT: buffer_load_ubyte v88, v2, s[0:3], 0 offen offset:175 +; ALIGNED-NEXT: buffer_load_ubyte v78, v2, s[0:3], 0 offen offset:171 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v61, 8, v63 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v89, 8, v92 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v60, 8, v62 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v88, 8, v90 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:168 -; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:169 -; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:170 +; ALIGNED-NEXT: buffer_load_ubyte v79, v2, s[0:3], 0 offen offset:168 +; ALIGNED-NEXT: buffer_load_ubyte v76, v2, s[0:3], 0 offen offset:169 +; ALIGNED-NEXT: buffer_load_ubyte v75, v2, s[0:3], 0 offen offset:170 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 8, v59 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 8, v79 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v58, 8, v56 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v78, 8, v75 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:176 -; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:177 -; ALIGNED-NEXT: buffer_load_ubyte v41, v2, s[0:3], 0 offen offset:178 -; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:179 -; ALIGNED-NEXT: buffer_load_ubyte v46, v2, s[0:3], 0 offen offset:180 -; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:181 -; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:182 -; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:183 +; ALIGNED-NEXT: buffer_load_ubyte v74, v2, s[0:3], 0 offen offset:176 +; ALIGNED-NEXT: buffer_load_ubyte v72, v2, s[0:3], 0 offen offset:177 +; ALIGNED-NEXT: buffer_load_ubyte v61, v2, s[0:3], 0 offen offset:178 +; ALIGNED-NEXT: buffer_load_ubyte v60, v2, s[0:3], 0 offen offset:179 +; ALIGNED-NEXT: buffer_load_ubyte v73, v2, s[0:3], 0 offen offset:180 +; ALIGNED-NEXT: buffer_load_ubyte v63, v2, s[0:3], 0 offen offset:181 +; ALIGNED-NEXT: buffer_load_ubyte v62, v2, s[0:3], 0 offen offset:182 +; ALIGNED-NEXT: buffer_load_ubyte v59, v2, s[0:3], 0 offen offset:183 ; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v45, 8, v47 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v74 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v42, 8, v41 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v60, 8, v61 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v44 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v46 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v59, 8, v62 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v63, 8, v73 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v119, v2, s[0:3], 0 offen offset:188 -; ALIGNED-NEXT: buffer_load_ubyte v117, v2, s[0:3], 0 offen offset:189 -; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:190 -; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:191 -; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:187 +; ALIGNED-NEXT: buffer_load_ubyte v57, v2, s[0:3], 0 offen offset:188 +; ALIGNED-NEXT: buffer_load_ubyte v47, v2, s[0:3], 0 offen offset:189 +; ALIGNED-NEXT: buffer_load_ubyte v56, v2, s[0:3], 0 offen offset:190 +; ALIGNED-NEXT: buffer_load_ubyte v46, v2, s[0:3], 0 offen offset:191 +; ALIGNED-NEXT: buffer_load_ubyte v44, v2, s[0:3], 0 offen offset:187 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v117, 8, v119 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v47, 8, v57 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v116, 8, v118 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v46, 8, v56 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1488 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v115, v2, s[0:3], 0 offen offset:184 -; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:185 -; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:186 +; ALIGNED-NEXT: buffer_load_ubyte v45, v2, s[0:3], 0 offen offset:184 +; ALIGNED-NEXT: buffer_load_ubyte v43, v2, s[0:3], 0 offen offset:185 +; ALIGNED-NEXT: buffer_load_ubyte v42, v2, s[0:3], 0 offen offset:186 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v113, 8, v115 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v45 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v114, 8, v112 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v44, 8, v42 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:192 -; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:193 -; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:194 -; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:195 -; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:196 -; ALIGNED-NEXT: buffer_load_ubyte v99, v2, s[0:3], 0 offen offset:197 -; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:198 -; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:199 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v100, 8, v102 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v96, 8, v97 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v99, 8, v101 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v87, 8, v98 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:204 -; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:205 -; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:206 -; ALIGNED-NEXT: buffer_load_ubyte v82, v2, s[0:3], 0 offen offset:207 -; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:203 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v83, 8, v85 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v82, 8, v84 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:200 -; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:201 -; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:202 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v71, 8, v80 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v81, 8, v69 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:212 -; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:213 -; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:214 -; ALIGNED-NEXT: buffer_load_ubyte v53, v2, s[0:3], 0 offen offset:215 -; ALIGNED-NEXT: buffer_load_ubyte v55, v2, s[0:3], 0 offen offset:211 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v54, 8, v68 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v53, 8, v66 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:216 -; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:217 -; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:218 -; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:219 -; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:220 -; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:221 -; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:222 -; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:223 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v64, 8, v67 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v50, 8, v49 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v51, 8, v65 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v48, 8, v52 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:208 -; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:209 -; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:210 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v37, 8, v39 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v55, 8, v38 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:224 -; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:225 -; ALIGNED-NEXT: buffer_load_ubyte v31, v2, s[0:3], 0 offen offset:226 -; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:227 -; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:228 -; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:229 -; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:230 -; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:231 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v34, 8, v36 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v30, 8, v31 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v4, 16, v3 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v33, 8, v35 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v29, 8, v32 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1488 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x17 -; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:236 -; ALIGNED-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:237 -; ALIGNED-NEXT: buffer_load_ubyte v26, v2, s[0:3], 0 offen offset:238 -; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:239 -; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:235 -; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:232 -; ALIGNED-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:233 -; ALIGNED-NEXT: buffer_load_ubyte v21, v2, s[0:3], 0 offen offset:234 -; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:240 -; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:241 -; ALIGNED-NEXT: buffer_load_ubyte v15, v2, s[0:3], 0 offen offset:242 -; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:243 -; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:244 -; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:245 -; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:246 -; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:247 -; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:252 -; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:253 -; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:254 -; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:255 -; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:251 -; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:248 -; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:249 -; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:250 -; ALIGNED-NEXT: v_lshl_or_b32 v109, v4, 16, v3 -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1492 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x3e +; ALIGNED-NEXT: buffer_load_ubyte v40, v2, s[0:3], 0 offen offset:192 +; ALIGNED-NEXT: buffer_load_ubyte v117, v2, s[0:3], 0 offen offset:193 +; ALIGNED-NEXT: buffer_load_ubyte v113, v2, s[0:3], 0 offen offset:194 +; ALIGNED-NEXT: buffer_load_ubyte v114, v2, s[0:3], 0 offen offset:195 +; ALIGNED-NEXT: buffer_load_ubyte v118, v2, s[0:3], 0 offen offset:196 +; ALIGNED-NEXT: buffer_load_ubyte v115, v2, s[0:3], 0 offen offset:197 +; ALIGNED-NEXT: buffer_load_ubyte v116, v2, s[0:3], 0 offen offset:198 +; ALIGNED-NEXT: buffer_load_ubyte v112, v2, s[0:3], 0 offen offset:199 +; ALIGNED-NEXT: buffer_load_ubyte v102, v2, s[0:3], 0 offen offset:204 +; ALIGNED-NEXT: buffer_load_ubyte v101, v2, s[0:3], 0 offen offset:205 +; ALIGNED-NEXT: buffer_load_ubyte v100, v2, s[0:3], 0 offen offset:206 +; ALIGNED-NEXT: buffer_load_ubyte v99, v2, s[0:3], 0 offen offset:207 +; ALIGNED-NEXT: buffer_load_ubyte v97, v2, s[0:3], 0 offen offset:203 +; ALIGNED-NEXT: buffer_load_ubyte v98, v2, s[0:3], 0 offen offset:200 +; ALIGNED-NEXT: buffer_load_ubyte v96, v2, s[0:3], 0 offen offset:201 +; ALIGNED-NEXT: buffer_load_ubyte v87, v2, s[0:3], 0 offen offset:202 +; ALIGNED-NEXT: buffer_load_ubyte v85, v2, s[0:3], 0 offen offset:212 +; ALIGNED-NEXT: buffer_load_ubyte v71, v2, s[0:3], 0 offen offset:213 +; ALIGNED-NEXT: buffer_load_ubyte v83, v2, s[0:3], 0 offen offset:214 +; ALIGNED-NEXT: buffer_load_ubyte v69, v2, s[0:3], 0 offen offset:215 +; ALIGNED-NEXT: buffer_load_ubyte v80, v2, s[0:3], 0 offen offset:211 +; ALIGNED-NEXT: buffer_load_ubyte v84, v2, s[0:3], 0 offen offset:216 +; ALIGNED-NEXT: buffer_load_ubyte v81, v2, s[0:3], 0 offen offset:217 +; ALIGNED-NEXT: buffer_load_ubyte v65, v2, s[0:3], 0 offen offset:218 +; ALIGNED-NEXT: buffer_load_ubyte v66, v2, s[0:3], 0 offen offset:219 +; ALIGNED-NEXT: buffer_load_ubyte v82, v2, s[0:3], 0 offen offset:220 +; ALIGNED-NEXT: buffer_load_ubyte v67, v2, s[0:3], 0 offen offset:221 +; ALIGNED-NEXT: buffer_load_ubyte v68, v2, s[0:3], 0 offen offset:222 +; ALIGNED-NEXT: buffer_load_ubyte v64, v2, s[0:3], 0 offen offset:223 +; ALIGNED-NEXT: buffer_load_ubyte v54, v2, s[0:3], 0 offen offset:208 +; ALIGNED-NEXT: buffer_load_ubyte v51, v2, s[0:3], 0 offen offset:209 +; ALIGNED-NEXT: buffer_load_ubyte v52, v2, s[0:3], 0 offen offset:210 +; ALIGNED-NEXT: buffer_load_ubyte v50, v2, s[0:3], 0 offen offset:224 +; ALIGNED-NEXT: buffer_load_ubyte v48, v2, s[0:3], 0 offen offset:225 +; ALIGNED-NEXT: buffer_load_ubyte v37, v2, s[0:3], 0 offen offset:226 +; ALIGNED-NEXT: buffer_load_ubyte v36, v2, s[0:3], 0 offen offset:227 +; ALIGNED-NEXT: buffer_load_ubyte v49, v2, s[0:3], 0 offen offset:228 +; ALIGNED-NEXT: buffer_load_ubyte v39, v2, s[0:3], 0 offen offset:229 +; ALIGNED-NEXT: buffer_load_ubyte v38, v2, s[0:3], 0 offen offset:230 +; ALIGNED-NEXT: buffer_load_ubyte v35, v2, s[0:3], 0 offen offset:231 +; ALIGNED-NEXT: buffer_load_ubyte v34, v2, s[0:3], 0 offen offset:236 +; ALIGNED-NEXT: buffer_load_ubyte v33, v2, s[0:3], 0 offen offset:237 +; ALIGNED-NEXT: buffer_load_ubyte v32, v2, s[0:3], 0 offen offset:238 +; ALIGNED-NEXT: buffer_load_ubyte v30, v2, s[0:3], 0 offen offset:239 +; ALIGNED-NEXT: buffer_load_ubyte v29, v2, s[0:3], 0 offen offset:235 +; ALIGNED-NEXT: buffer_load_ubyte v28, v2, s[0:3], 0 offen offset:232 +; ALIGNED-NEXT: buffer_load_ubyte v27, v2, s[0:3], 0 offen offset:233 +; ALIGNED-NEXT: buffer_load_ubyte v25, v2, s[0:3], 0 offen offset:234 +; ALIGNED-NEXT: buffer_load_ubyte v24, v2, s[0:3], 0 offen offset:240 +; ALIGNED-NEXT: buffer_load_ubyte v22, v2, s[0:3], 0 offen offset:241 +; ALIGNED-NEXT: buffer_load_ubyte v17, v2, s[0:3], 0 offen offset:242 +; ALIGNED-NEXT: buffer_load_ubyte v18, v2, s[0:3], 0 offen offset:243 +; ALIGNED-NEXT: buffer_load_ubyte v23, v2, s[0:3], 0 offen offset:244 +; ALIGNED-NEXT: buffer_load_ubyte v19, v2, s[0:3], 0 offen offset:245 +; ALIGNED-NEXT: buffer_load_ubyte v20, v2, s[0:3], 0 offen offset:246 +; ALIGNED-NEXT: buffer_load_ubyte v16, v2, s[0:3], 0 offen offset:247 +; ALIGNED-NEXT: buffer_load_ubyte v14, v2, s[0:3], 0 offen offset:252 +; ALIGNED-NEXT: buffer_load_ubyte v13, v2, s[0:3], 0 offen offset:253 +; ALIGNED-NEXT: buffer_load_ubyte v12, v2, s[0:3], 0 offen offset:254 +; ALIGNED-NEXT: buffer_load_ubyte v11, v2, s[0:3], 0 offen offset:255 +; ALIGNED-NEXT: buffer_load_ubyte v9, v2, s[0:3], 0 offen offset:251 +; ALIGNED-NEXT: buffer_load_ubyte v10, v2, s[0:3], 0 offen offset:248 +; ALIGNED-NEXT: buffer_load_ubyte v8, v2, s[0:3], 0 offen offset:249 +; ALIGNED-NEXT: s_clause 0x5 +; ALIGNED-NEXT: buffer_load_ubyte v7, v2, s[0:3], 0 offen offset:250 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:2 ; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:3 -; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:4 -; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:5 -; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:6 -; ALIGNED-NEXT: s_waitcnt vmcnt(27) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v27, 8, v28 -; ALIGNED-NEXT: s_waitcnt vmcnt(25) -; ALIGNED-NEXT: v_lshl_or_b32 v4, v25, 8, v26 +; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:4 +; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:5 +; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:6 +; ALIGNED-NEXT: s_waitcnt vmcnt(62) +; ALIGNED-NEXT: v_lshl_or_b32 v3, v117, 8, v40 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v114, 8, v113 +; ALIGNED-NEXT: v_lshl_or_b32 v110, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v115, 8, v118 +; ALIGNED-NEXT: s_waitcnt vmcnt(61) +; ALIGNED-NEXT: v_lshl_or_b32 v4, v112, 8, v116 +; ALIGNED-NEXT: v_lshl_or_b32 v93, v4, 16, v3 +; ALIGNED-NEXT: s_waitcnt vmcnt(59) +; ALIGNED-NEXT: v_lshl_or_b32 v3, v101, 8, v102 +; ALIGNED-NEXT: s_waitcnt vmcnt(57) +; ALIGNED-NEXT: v_lshl_or_b32 v4, v99, 8, v100 ; ALIGNED-NEXT: s_waitcnt vmcnt(13) -; ALIGNED-NEXT: v_lshl_or_b32 v77, v13, 8, v16 +; ALIGNED-NEXT: v_lshl_or_b32 v95, v16, 8, v20 ; ALIGNED-NEXT: s_waitcnt vmcnt(9) -; ALIGNED-NEXT: v_lshl_or_b32 v91, v9, 8, v10 -; ALIGNED-NEXT: v_lshl_or_b32 v93, v4, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v22, 8, v24 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v23, 8, v21 -; ALIGNED-NEXT: v_lshl_or_b32 v78, v4, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v18, 8, v20 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v14, 8, v15 +; ALIGNED-NEXT: v_lshl_or_b32 v109, v11, 8, v12 +; ALIGNED-NEXT: v_lshl_or_b32 v91, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v96, 8, v98 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v97, 8, v87 +; ALIGNED-NEXT: v_lshl_or_b32 v77, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v71, 8, v85 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v69, 8, v83 +; ALIGNED-NEXT: v_lshl_or_b32 v58, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v81, 8, v84 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v66, 8, v65 +; ALIGNED-NEXT: v_lshl_or_b32 v41, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v67, 8, v82 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v64, 8, v68 +; ALIGNED-NEXT: v_lshl_or_b32 v119, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v51, 8, v54 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v80, 8, v52 ; ALIGNED-NEXT: v_lshl_or_b32 v103, v4, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v17, 8, v19 -; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:7 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) +; ALIGNED-NEXT: v_lshl_or_b32 v3, v48, 8, v50 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v36, 8, v37 +; ALIGNED-NEXT: v_lshl_or_b32 v86, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v39, 8, v49 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v35, 8, v38 +; ALIGNED-NEXT: v_lshl_or_b32 v70, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v33, 8, v34 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v30, 8, v32 +; ALIGNED-NEXT: v_lshl_or_b32 v55, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v27, 8, v28 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v29, 8, v25 +; ALIGNED-NEXT: v_lshl_or_b32 v53, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v22, 8, v24 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v18, 8, v17 +; ALIGNED-NEXT: v_lshl_or_b32 v31, v4, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v4, v19, 8, v23 +; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen +; ALIGNED-NEXT: v_lshl_or_b32 v26, v95, 16, v4 +; ALIGNED-NEXT: v_lshl_or_b32 v95, v13, 8, v14 +; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:7 +; ALIGNED-NEXT: s_waitcnt vmcnt(5) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1292 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1300 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(4) +; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1296 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v21, v109, 16, v95 +; ALIGNED-NEXT: v_lshl_or_b32 v95, v8, 8, v10 +; ALIGNED-NEXT: v_lshl_or_b32 v109, v9, 8, v7 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: buffer_store_dword v107, off, s[0:3], s32 offset:1296 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1304 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v123, off, s[0:3], s32 offset:1304 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v86, v77, 16, v4 -; ALIGNED-NEXT: v_lshl_or_b32 v77, v11, 8, v12 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1308 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v70, v91, 16, v77 -; ALIGNED-NEXT: v_lshl_or_b32 v77, v6, 8, v8 -; ALIGNED-NEXT: v_lshl_or_b32 v91, v7, 8, v5 -; ALIGNED-NEXT: v_lshl_or_b32 v4, v91, 16, v77 -; ALIGNED-NEXT: s_clause 0x1 -; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:1 -; ALIGNED-NEXT: buffer_load_ubyte v91, v2, s[0:3], 0 offen offset:2 -; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1260 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1332 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v15, v109, 16, v95 +; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:1 +; ALIGNED-NEXT: v_lshl_or_b32 v109, v0, 8, v1 ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1320 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1260 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: buffer_store_dword v77, off, s[0:3], s32 offset:1284 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1340 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:1300 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v77, v77, 8, v1 -; ALIGNED-NEXT: v_lshl_or_b32 v91, v0, 8, v91 -; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:12 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v91, 16, v77 -; ALIGNED-NEXT: v_lshl_or_b32 v77, v123, 8, v107 -; ALIGNED-NEXT: v_lshl_or_b32 v91, v3, 8, v125 -; ALIGNED-NEXT: buffer_load_ubyte v3, v2, s[0:3], 0 offen offset:13 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1316 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v91, 16, v77 -; ALIGNED-NEXT: buffer_load_ubyte v91, v2, s[0:3], 0 offen offset:15 +; ALIGNED-NEXT: buffer_store_dword v95, off, s[0:3], s32 offset:1284 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v95, v95, 8, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95 +; ALIGNED-NEXT: v_lshl_or_b32 v95, v5, 8, v125 +; ALIGNED-NEXT: v_lshl_or_b32 v109, v4, 8, v6 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1312 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1348 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x1 -; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:14 -; ALIGNED-NEXT: buffer_load_ubyte v126, v2, s[0:3], 0 offen offset:11 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1360 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x4 +; ALIGNED-NEXT: buffer_load_ubyte v6, v2, s[0:3], 0 offen offset:12 +; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:13 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:14 +; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:15 +; ALIGNED-NEXT: buffer_load_ubyte v5, v2, s[0:3], 0 offen offset:11 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v77, v3, 8, v1 -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:1372 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v95, v4, 8, v6 +; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1376 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v91, v91, 8, v0 +; ALIGNED-NEXT: v_lshl_or_b32 v109, v0, 8, v1 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1368 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v91, 16, v77 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:8 -; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:9 -; ALIGNED-NEXT: buffer_load_ubyte v123, v2, s[0:3], 0 offen offset:10 +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v4, v2, s[0:3], 0 offen offset:9 +; ALIGNED-NEXT: buffer_load_ubyte v1, v2, s[0:3], 0 offen offset:10 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v0, v2, s[0:3], 0 offen offset:8 +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:1388 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v77, v125, 8, v1 +; ALIGNED-NEXT: v_lshl_or_b32 v109, v5, 8, v1 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v91, v126, 8, v123 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v91, 16, v77 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v95, v4, 8, v0 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v109, 16, v95 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v91, v2, s[0:3], 0 offen offset:18 -; ALIGNED-NEXT: buffer_load_ubyte v107, v2, s[0:3], 0 offen offset:16 -; ALIGNED-NEXT: buffer_load_ubyte v77, v2, s[0:3], 0 offen offset:17 -; ALIGNED-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 -; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:236 -; ALIGNED-NEXT: buffer_store_dword v86, off, s[0:3], s32 offset:228 -; ALIGNED-NEXT: buffer_store_dword v103, off, s[0:3], s32 offset:224 +; ALIGNED-NEXT: buffer_load_ubyte v109, v2, s[0:3], 0 offen offset:18 +; ALIGNED-NEXT: buffer_load_ubyte v125, v2, s[0:3], 0 offen offset:16 +; ALIGNED-NEXT: buffer_load_ubyte v95, v2, s[0:3], 0 offen offset:17 +; ALIGNED-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:232 +; ALIGNED-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:236 +; ALIGNED-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 +; ALIGNED-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224 ; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; ALIGNED-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:708 ; ALIGNED-NEXT: v_add_nc_u32_e32 v2, 0x100, v2 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v127, 8, v91 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v127, 8, v109 ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v127, v77, 8, v107 +; ALIGNED-NEXT: v_lshl_or_b32 v127, v95, 8, v125 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: v_add_co_u32 v3, vcc_lo, v3, s4 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: v_add_co_ci_u32_e64 v4, null, s5, v4, vcc_lo -; ALIGNED-NEXT: flat_store_byte v[3:4], v5 offset:250 -; ALIGNED-NEXT: flat_store_byte v[3:4], v7 offset:251 -; ALIGNED-NEXT: flat_store_byte v[3:4], v6 offset:249 -; ALIGNED-NEXT: flat_store_byte v[3:4], v9 offset:255 -; ALIGNED-NEXT: flat_store_byte v[3:4], v11 offset:253 -; ALIGNED-NEXT: flat_store_byte v[3:4], v10 offset:254 -; ALIGNED-NEXT: flat_store_byte v[3:4], v12 offset:252 -; ALIGNED-NEXT: flat_store_byte v[3:4], v8 offset:248 -; ALIGNED-NEXT: flat_store_byte v[3:4], v15 offset:242 -; ALIGNED-NEXT: flat_store_byte v[3:4], v14 offset:243 -; ALIGNED-NEXT: flat_store_byte v[3:4], v18 offset:241 -; ALIGNED-NEXT: flat_store_byte v[3:4], v13 offset:247 -; ALIGNED-NEXT: flat_store_byte v[3:4], v17 offset:245 -; ALIGNED-NEXT: flat_store_byte v[3:4], v16 offset:246 -; ALIGNED-NEXT: flat_store_byte v[3:4], v19 offset:244 -; ALIGNED-NEXT: flat_store_byte v[3:4], v20 offset:240 -; ALIGNED-NEXT: buffer_store_dword v78, off, s[0:3], s32 offset:248 -; ALIGNED-NEXT: buffer_store_dword v93, off, s[0:3], s32 offset:252 -; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:244 +; ALIGNED-NEXT: flat_store_byte v[3:4], v7 offset:250 +; ALIGNED-NEXT: flat_store_byte v[3:4], v9 offset:251 +; ALIGNED-NEXT: flat_store_byte v[3:4], v8 offset:249 +; ALIGNED-NEXT: flat_store_byte v[3:4], v11 offset:255 +; ALIGNED-NEXT: flat_store_byte v[3:4], v13 offset:253 +; ALIGNED-NEXT: flat_store_byte v[3:4], v12 offset:254 +; ALIGNED-NEXT: flat_store_byte v[3:4], v14 offset:252 +; ALIGNED-NEXT: flat_store_byte v[3:4], v10 offset:248 +; ALIGNED-NEXT: flat_store_byte v[3:4], v17 offset:242 +; ALIGNED-NEXT: flat_store_byte v[3:4], v18 offset:243 +; ALIGNED-NEXT: flat_store_byte v[3:4], v22 offset:241 +; ALIGNED-NEXT: flat_store_byte v[3:4], v16 offset:247 +; ALIGNED-NEXT: flat_store_byte v[3:4], v19 offset:245 +; ALIGNED-NEXT: flat_store_byte v[3:4], v20 offset:246 +; ALIGNED-NEXT: flat_store_byte v[3:4], v23 offset:244 +; ALIGNED-NEXT: flat_store_byte v[3:4], v24 offset:240 +; ALIGNED-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:248 +; ALIGNED-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:252 +; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:244 +; ALIGNED-NEXT: buffer_store_dword v86, off, s[0:3], s32 offset:240 +; ALIGNED-NEXT: flat_store_byte v[3:4], v25 offset:234 +; ALIGNED-NEXT: flat_store_byte v[3:4], v29 offset:235 +; ALIGNED-NEXT: flat_store_byte v[3:4], v27 offset:233 +; ALIGNED-NEXT: flat_store_byte v[3:4], v30 offset:239 +; ALIGNED-NEXT: flat_store_byte v[3:4], v33 offset:237 +; ALIGNED-NEXT: flat_store_byte v[3:4], v32 offset:238 +; ALIGNED-NEXT: flat_store_byte v[3:4], v34 offset:236 +; ALIGNED-NEXT: flat_store_byte v[3:4], v28 offset:232 +; ALIGNED-NEXT: flat_store_byte v[3:4], v37 offset:226 +; ALIGNED-NEXT: flat_store_byte v[3:4], v36 offset:227 +; ALIGNED-NEXT: flat_store_byte v[3:4], v48 offset:225 +; ALIGNED-NEXT: flat_store_byte v[3:4], v35 offset:231 +; ALIGNED-NEXT: flat_store_byte v[3:4], v39 offset:229 +; ALIGNED-NEXT: flat_store_byte v[3:4], v38 offset:230 +; ALIGNED-NEXT: flat_store_byte v[3:4], v49 offset:228 +; ALIGNED-NEXT: flat_store_byte v[3:4], v50 offset:224 +; ALIGNED-NEXT: buffer_store_dword v103, off, s[0:3], s32 offset:192 +; ALIGNED-NEXT: buffer_store_dword v119, off, s[0:3], s32 offset:204 +; ALIGNED-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:200 +; ALIGNED-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:196 +; ALIGNED-NEXT: flat_store_byte v[3:4], v71 offset:213 +; ALIGNED-NEXT: flat_store_byte v[3:4], v69 offset:215 +; ALIGNED-NEXT: flat_store_byte v[3:4], v51 offset:209 +; ALIGNED-NEXT: flat_store_byte v[3:4], v80 offset:211 +; ALIGNED-NEXT: flat_store_byte v[3:4], v52 offset:210 +; ALIGNED-NEXT: flat_store_byte v[3:4], v83 offset:214 +; ALIGNED-NEXT: flat_store_byte v[3:4], v85 offset:212 +; ALIGNED-NEXT: flat_store_byte v[3:4], v65 offset:218 +; ALIGNED-NEXT: flat_store_byte v[3:4], v66 offset:219 +; ALIGNED-NEXT: flat_store_byte v[3:4], v81 offset:217 +; ALIGNED-NEXT: flat_store_byte v[3:4], v64 offset:223 +; ALIGNED-NEXT: flat_store_byte v[3:4], v67 offset:221 +; ALIGNED-NEXT: flat_store_byte v[3:4], v68 offset:222 +; ALIGNED-NEXT: flat_store_byte v[3:4], v82 offset:220 +; ALIGNED-NEXT: flat_store_byte v[3:4], v84 offset:216 +; ALIGNED-NEXT: flat_store_byte v[3:4], v54 offset:208 +; ALIGNED-NEXT: buffer_store_dword v77, off, s[0:3], s32 offset:216 +; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:220 +; ALIGNED-NEXT: buffer_store_dword v93, off, s[0:3], s32 offset:212 +; ALIGNED-NEXT: buffer_store_dword v110, off, s[0:3], s32 offset:208 +; ALIGNED-NEXT: flat_store_byte v[3:4], v87 offset:202 +; ALIGNED-NEXT: flat_store_byte v[3:4], v97 offset:203 +; ALIGNED-NEXT: flat_store_byte v[3:4], v96 offset:201 +; ALIGNED-NEXT: flat_store_byte v[3:4], v99 offset:207 +; ALIGNED-NEXT: flat_store_byte v[3:4], v101 offset:205 +; ALIGNED-NEXT: flat_store_byte v[3:4], v100 offset:206 +; ALIGNED-NEXT: flat_store_byte v[3:4], v102 offset:204 +; ALIGNED-NEXT: flat_store_byte v[3:4], v98 offset:200 +; ALIGNED-NEXT: flat_store_byte v[3:4], v113 offset:194 +; ALIGNED-NEXT: flat_store_byte v[3:4], v114 offset:195 +; ALIGNED-NEXT: flat_store_byte v[3:4], v117 offset:193 +; ALIGNED-NEXT: flat_store_byte v[3:4], v112 offset:199 +; ALIGNED-NEXT: flat_store_byte v[3:4], v115 offset:197 +; ALIGNED-NEXT: flat_store_byte v[3:4], v116 offset:198 +; ALIGNED-NEXT: flat_store_byte v[3:4], v118 offset:196 +; ALIGNED-NEXT: flat_store_byte v[3:4], v40 offset:192 ; ALIGNED-NEXT: v_lshl_or_b32 v127, v0, 16, v127 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1488 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1492 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_add_u32 s4, s4, 0x100 ; ALIGNED-NEXT: s_addc_u32 s5, s5, 0 ; ALIGNED-NEXT: s_cmp_lg_u64 s[4:5], 0x800 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:240 -; ALIGNED-NEXT: flat_store_byte v[3:4], v21 offset:234 -; ALIGNED-NEXT: flat_store_byte v[3:4], v23 offset:235 -; ALIGNED-NEXT: flat_store_byte v[3:4], v22 offset:233 -; ALIGNED-NEXT: flat_store_byte v[3:4], v25 offset:239 -; ALIGNED-NEXT: flat_store_byte v[3:4], v27 offset:237 -; ALIGNED-NEXT: flat_store_byte v[3:4], v26 offset:238 -; ALIGNED-NEXT: flat_store_byte v[3:4], v28 offset:236 -; ALIGNED-NEXT: flat_store_byte v[3:4], v24 offset:232 -; ALIGNED-NEXT: flat_store_byte v[3:4], v31 offset:226 -; ALIGNED-NEXT: flat_store_byte v[3:4], v30 offset:227 -; ALIGNED-NEXT: flat_store_byte v[3:4], v34 offset:225 -; ALIGNED-NEXT: flat_store_byte v[3:4], v29 offset:231 -; ALIGNED-NEXT: flat_store_byte v[3:4], v33 offset:229 -; ALIGNED-NEXT: flat_store_byte v[3:4], v32 offset:230 -; ALIGNED-NEXT: flat_store_byte v[3:4], v35 offset:228 -; ALIGNED-NEXT: flat_store_byte v[3:4], v36 offset:224 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1488 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:204 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 +; ALIGNED-NEXT: flat_store_byte v[3:4], v42 offset:186 +; ALIGNED-NEXT: flat_store_byte v[3:4], v44 offset:187 +; ALIGNED-NEXT: flat_store_byte v[3:4], v43 offset:185 +; ALIGNED-NEXT: flat_store_byte v[3:4], v46 offset:191 +; ALIGNED-NEXT: flat_store_byte v[3:4], v47 offset:189 +; ALIGNED-NEXT: flat_store_byte v[3:4], v56 offset:190 +; ALIGNED-NEXT: flat_store_byte v[3:4], v57 offset:188 +; ALIGNED-NEXT: flat_store_byte v[3:4], v45 offset:184 +; ALIGNED-NEXT: flat_store_byte v[3:4], v61 offset:178 +; ALIGNED-NEXT: flat_store_byte v[3:4], v60 offset:179 +; ALIGNED-NEXT: flat_store_byte v[3:4], v72 offset:177 +; ALIGNED-NEXT: flat_store_byte v[3:4], v59 offset:183 +; ALIGNED-NEXT: flat_store_byte v[3:4], v63 offset:181 +; ALIGNED-NEXT: flat_store_byte v[3:4], v62 offset:182 +; ALIGNED-NEXT: flat_store_byte v[3:4], v73 offset:180 +; ALIGNED-NEXT: flat_store_byte v[3:4], v74 offset:176 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 -; ALIGNED-NEXT: flat_store_byte v[3:4], v54 offset:213 -; ALIGNED-NEXT: flat_store_byte v[3:4], v53 offset:215 -; ALIGNED-NEXT: flat_store_byte v[3:4], v37 offset:209 -; ALIGNED-NEXT: flat_store_byte v[3:4], v55 offset:211 -; ALIGNED-NEXT: flat_store_byte v[3:4], v38 offset:210 -; ALIGNED-NEXT: flat_store_byte v[3:4], v66 offset:214 -; ALIGNED-NEXT: flat_store_byte v[3:4], v68 offset:212 -; ALIGNED-NEXT: flat_store_byte v[3:4], v49 offset:218 -; ALIGNED-NEXT: flat_store_byte v[3:4], v50 offset:219 -; ALIGNED-NEXT: flat_store_byte v[3:4], v64 offset:217 -; ALIGNED-NEXT: flat_store_byte v[3:4], v48 offset:223 -; ALIGNED-NEXT: flat_store_byte v[3:4], v51 offset:221 -; ALIGNED-NEXT: flat_store_byte v[3:4], v52 offset:222 -; ALIGNED-NEXT: flat_store_byte v[3:4], v65 offset:220 -; ALIGNED-NEXT: flat_store_byte v[3:4], v67 offset:216 -; ALIGNED-NEXT: flat_store_byte v[3:4], v39 offset:208 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:316 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:216 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:220 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 +; ALIGNED-NEXT: flat_store_byte v[3:4], v75 offset:170 +; ALIGNED-NEXT: flat_store_byte v[3:4], v78 offset:171 +; ALIGNED-NEXT: flat_store_byte v[3:4], v76 offset:169 +; ALIGNED-NEXT: flat_store_byte v[3:4], v88 offset:175 +; ALIGNED-NEXT: flat_store_byte v[3:4], v89 offset:173 +; ALIGNED-NEXT: flat_store_byte v[3:4], v90 offset:174 +; ALIGNED-NEXT: flat_store_byte v[3:4], v92 offset:172 +; ALIGNED-NEXT: flat_store_byte v[3:4], v79 offset:168 +; ALIGNED-NEXT: flat_store_byte v[3:4], v104 offset:162 +; ALIGNED-NEXT: flat_store_byte v[3:4], v105 offset:163 +; ALIGNED-NEXT: flat_store_byte v[3:4], v108 offset:161 +; ALIGNED-NEXT: flat_store_byte v[3:4], v94 offset:167 +; ALIGNED-NEXT: flat_store_byte v[3:4], v106 offset:165 +; ALIGNED-NEXT: flat_store_byte v[3:4], v107 offset:166 +; ALIGNED-NEXT: flat_store_byte v[3:4], v111 offset:164 +; ALIGNED-NEXT: flat_store_byte v[3:4], v120 offset:160 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:208 -; ALIGNED-NEXT: flat_store_byte v[3:4], v69 offset:202 -; ALIGNED-NEXT: flat_store_byte v[3:4], v81 offset:203 -; ALIGNED-NEXT: flat_store_byte v[3:4], v71 offset:201 -; ALIGNED-NEXT: flat_store_byte v[3:4], v82 offset:207 -; ALIGNED-NEXT: flat_store_byte v[3:4], v83 offset:205 -; ALIGNED-NEXT: flat_store_byte v[3:4], v84 offset:206 -; ALIGNED-NEXT: flat_store_byte v[3:4], v85 offset:204 -; ALIGNED-NEXT: flat_store_byte v[3:4], v80 offset:200 -; ALIGNED-NEXT: flat_store_byte v[3:4], v97 offset:194 -; ALIGNED-NEXT: flat_store_byte v[3:4], v96 offset:195 -; ALIGNED-NEXT: flat_store_byte v[3:4], v100 offset:193 -; ALIGNED-NEXT: flat_store_byte v[3:4], v87 offset:199 -; ALIGNED-NEXT: flat_store_byte v[3:4], v99 offset:197 -; ALIGNED-NEXT: flat_store_byte v[3:4], v98 offset:198 -; ALIGNED-NEXT: flat_store_byte v[3:4], v101 offset:196 -; ALIGNED-NEXT: flat_store_byte v[3:4], v102 offset:192 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:296 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 +; ALIGNED-NEXT: flat_store_byte v[3:4], v121 offset:154 +; ALIGNED-NEXT: flat_store_byte v[3:4], v124 offset:155 +; ALIGNED-NEXT: flat_store_byte v[3:4], v122 offset:153 +; ALIGNED-NEXT: flat_store_byte v[3:4], v126 offset:159 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:300 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:157 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:292 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:158 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:288 -; ALIGNED-NEXT: flat_store_byte v[3:4], v112 offset:186 -; ALIGNED-NEXT: flat_store_byte v[3:4], v114 offset:187 -; ALIGNED-NEXT: flat_store_byte v[3:4], v113 offset:185 -; ALIGNED-NEXT: flat_store_byte v[3:4], v116 offset:191 -; ALIGNED-NEXT: flat_store_byte v[3:4], v117 offset:189 -; ALIGNED-NEXT: flat_store_byte v[3:4], v118 offset:190 -; ALIGNED-NEXT: flat_store_byte v[3:4], v119 offset:188 -; ALIGNED-NEXT: flat_store_byte v[3:4], v115 offset:184 -; ALIGNED-NEXT: flat_store_byte v[3:4], v41 offset:178 -; ALIGNED-NEXT: flat_store_byte v[3:4], v42 offset:179 -; ALIGNED-NEXT: flat_store_byte v[3:4], v45 offset:177 -; ALIGNED-NEXT: flat_store_byte v[3:4], v40 offset:183 -; ALIGNED-NEXT: flat_store_byte v[3:4], v43 offset:181 -; ALIGNED-NEXT: flat_store_byte v[3:4], v44 offset:182 -; ALIGNED-NEXT: flat_store_byte v[3:4], v46 offset:180 -; ALIGNED-NEXT: flat_store_byte v[3:4], v47 offset:176 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:156 +; ALIGNED-NEXT: flat_store_byte v[3:4], v123 offset:152 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1420 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:146 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:316 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:147 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:145 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:151 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 -; ALIGNED-NEXT: flat_store_byte v[3:4], v56 offset:170 -; ALIGNED-NEXT: flat_store_byte v[3:4], v58 offset:171 -; ALIGNED-NEXT: flat_store_byte v[3:4], v57 offset:169 -; ALIGNED-NEXT: flat_store_byte v[3:4], v60 offset:175 -; ALIGNED-NEXT: flat_store_byte v[3:4], v61 offset:173 -; ALIGNED-NEXT: flat_store_byte v[3:4], v62 offset:174 -; ALIGNED-NEXT: flat_store_byte v[3:4], v63 offset:172 -; ALIGNED-NEXT: flat_store_byte v[3:4], v59 offset:168 -; ALIGNED-NEXT: flat_store_byte v[3:4], v73 offset:162 -; ALIGNED-NEXT: flat_store_byte v[3:4], v74 offset:163 -; ALIGNED-NEXT: flat_store_byte v[3:4], v79 offset:161 -; ALIGNED-NEXT: flat_store_byte v[3:4], v72 offset:167 -; ALIGNED-NEXT: flat_store_byte v[3:4], v76 offset:165 -; ALIGNED-NEXT: flat_store_byte v[3:4], v75 offset:166 -; ALIGNED-NEXT: flat_store_byte v[3:4], v88 offset:164 -; ALIGNED-NEXT: flat_store_byte v[3:4], v89 offset:160 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1420 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:149 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:264 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:150 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1416 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:268 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:260 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:256 -; ALIGNED-NEXT: flat_store_byte v[3:4], v90 offset:154 -; ALIGNED-NEXT: flat_store_byte v[3:4], v94 offset:155 -; ALIGNED-NEXT: flat_store_byte v[3:4], v92 offset:153 -; ALIGNED-NEXT: flat_store_byte v[3:4], v104 offset:159 -; ALIGNED-NEXT: flat_store_byte v[3:4], v105 offset:157 -; ALIGNED-NEXT: flat_store_byte v[3:4], v106 offset:158 -; ALIGNED-NEXT: flat_store_byte v[3:4], v108 offset:156 -; ALIGNED-NEXT: flat_store_byte v[3:4], v95 offset:152 -; ALIGNED-NEXT: flat_store_byte v[3:4], v111 offset:146 -; ALIGNED-NEXT: flat_store_byte v[3:4], v120 offset:147 -; ALIGNED-NEXT: flat_store_byte v[3:4], v124 offset:145 -; ALIGNED-NEXT: flat_store_byte v[3:4], v110 offset:151 -; ALIGNED-NEXT: flat_store_byte v[3:4], v121 offset:149 -; ALIGNED-NEXT: flat_store_byte v[3:4], v122 offset:150 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:148 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:144 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1384 ; 4-byte Folded Reload @@ -13767,31 +13722,31 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1352 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:284 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1328 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1320 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:276 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1312 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1308 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:272 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1376 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1372 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:138 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1364 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1360 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:139 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1356 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:137 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1340 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1336 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:143 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1332 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1324 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:141 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1336 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1328 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:142 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1324 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1316 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:140 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1344 ; 4-byte Folded Reload @@ -14215,11 +14170,11 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:24 -; ALIGNED-NEXT: flat_store_byte v[3:4], v91 offset:18 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v109 offset:18 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:19 -; ALIGNED-NEXT: flat_store_byte v[3:4], v77 offset:17 +; ALIGNED-NEXT: flat_store_byte v[3:4], v95 offset:17 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:23 @@ -14232,35 +14187,37 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:20 -; ALIGNED-NEXT: flat_store_byte v[3:4], v107 offset:16 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v125 offset:16 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1348 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1316 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1312 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 -; ALIGNED-NEXT: flat_store_byte v[3:4], v123 offset:10 -; ALIGNED-NEXT: flat_store_byte v[3:4], v126 offset:11 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1380 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v1 offset:10 +; ALIGNED-NEXT: flat_store_byte v[3:4], v5 offset:11 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1376 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:13 -; ALIGNED-NEXT: flat_store_byte v[3:4], v125 offset:9 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1372 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:15 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:9 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1368 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:15 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1364 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:14 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1360 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[3:4], v6 offset:12 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1380 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:12 -; ALIGNED-NEXT: flat_store_byte v[3:4], v1 offset:8 +; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:8 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1300 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:2 @@ -14270,13 +14227,13 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1284 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:1 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1320 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1340 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:7 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1304 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:5 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1308 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1332 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[3:4], v0 offset:6 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1296 ; 4-byte Folded Reload @@ -14296,7 +14253,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: s_mov_b32 s7, -1 ; ALIGNED-NEXT: .LBB9_4: ; %memmove_bwd_loop ; ALIGNED-NEXT: ; =>This Inner Loop Header: Depth=1 -; ALIGNED-NEXT: s_clause 0x3a +; ALIGNED-NEXT: s_clause 0x39 ; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:20 ; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:21 ; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:22 @@ -14304,7 +14261,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:24 ; ALIGNED-NEXT: buffer_load_ubyte v10, v4, s[0:3], 0 offen offset:25 ; ALIGNED-NEXT: buffer_load_ubyte v12, v4, s[0:3], 0 offen offset:26 -; ALIGNED-NEXT: buffer_load_ubyte v126, v4, s[0:3], 0 offen offset:19 +; ALIGNED-NEXT: buffer_load_ubyte v125, v4, s[0:3], 0 offen offset:19 ; ALIGNED-NEXT: buffer_load_ubyte v5, v4, s[0:3], 0 offen offset:28 ; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:29 ; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:30 @@ -14355,55 +14312,54 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_ubyte v81, v4, s[0:3], 0 offen offset:75 ; ALIGNED-NEXT: buffer_load_ubyte v71, v4, s[0:3], 0 offen offset:78 ; ALIGNED-NEXT: buffer_load_ubyte v80, v4, s[0:3], 0 offen offset:79 -; ALIGNED-NEXT: buffer_load_ubyte v125, v4, s[0:3], 0 offen offset:151 -; ALIGNED-NEXT: s_waitcnt vmcnt(58) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(57) -; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(56) -; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(55) -; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(54) -; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(53) -; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(52) +; ALIGNED-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(51) ; ALIGNED-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 -; ALIGNED-NEXT: s_waitcnt vmcnt(50) -; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(49) -; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(48) -; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(47) -; ALIGNED-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(46) +; ALIGNED-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(45) ; ALIGNED-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v2 ; ALIGNED-NEXT: v_lshl_or_b32 v2, v7, 8, v5 -; ALIGNED-NEXT: s_waitcnt vmcnt(43) +; ALIGNED-NEXT: s_waitcnt vmcnt(42) ; ALIGNED-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v3, v9, 8, v8 -; ALIGNED-NEXT: s_waitcnt vmcnt(41) +; ALIGNED-NEXT: s_waitcnt vmcnt(40) ; ALIGNED-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v5, v10, 8, v6 ; ALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v12 ; ALIGNED-NEXT: v_lshl_or_b32 v7, v15, 8, v14 ; ALIGNED-NEXT: v_lshl_or_b32 v8, v19, 8, v17 -; ALIGNED-NEXT: s_waitcnt vmcnt(40) +; ALIGNED-NEXT: s_waitcnt vmcnt(39) ; ALIGNED-NEXT: v_lshl_or_b32 v9, v16, 8, v13 -; ALIGNED-NEXT: s_waitcnt vmcnt(38) +; ALIGNED-NEXT: s_waitcnt vmcnt(37) ; ALIGNED-NEXT: v_lshl_or_b32 v10, v20, 8, v18 -; ALIGNED-NEXT: s_waitcnt vmcnt(36) +; ALIGNED-NEXT: s_waitcnt vmcnt(35) ; ALIGNED-NEXT: v_lshl_or_b32 v11, v23, 8, v22 -; ALIGNED-NEXT: s_waitcnt vmcnt(34) +; ALIGNED-NEXT: s_waitcnt vmcnt(33) ; ALIGNED-NEXT: v_lshl_or_b32 v12, v28, 8, v25 -; ALIGNED-NEXT: s_waitcnt vmcnt(32) +; ALIGNED-NEXT: s_waitcnt vmcnt(31) ; ALIGNED-NEXT: v_lshl_or_b32 v13, v24, 8, v21 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: s_waitcnt vmcnt(30) +; ALIGNED-NEXT: s_waitcnt vmcnt(29) ; ALIGNED-NEXT: v_lshl_or_b32 v14, v27, 8, v26 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 16, v2 ; ALIGNED-NEXT: v_lshl_or_b32 v2, v6, 16, v5 @@ -14412,27 +14368,27 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: v_lshl_or_b32 v6, v12, 16, v11 ; ALIGNED-NEXT: v_lshl_or_b32 v7, v14, 16, v13 ; ALIGNED-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(28) +; ALIGNED-NEXT: s_waitcnt vmcnt(27) ; ALIGNED-NEXT: v_lshl_or_b32 v15, v31, 8, v30 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(26) +; ALIGNED-NEXT: s_waitcnt vmcnt(25) ; ALIGNED-NEXT: v_lshl_or_b32 v0, v34, 8, v33 ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(24) +; ALIGNED-NEXT: s_waitcnt vmcnt(23) ; ALIGNED-NEXT: v_lshl_or_b32 v1, v37, 8, v32 ; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(22) +; ALIGNED-NEXT: s_waitcnt vmcnt(21) ; ALIGNED-NEXT: v_lshl_or_b32 v2, v36, 8, v35 ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(17) +; ALIGNED-NEXT: s_waitcnt vmcnt(16) ; ALIGNED-NEXT: v_lshl_or_b32 v3, v50, 8, v38 ; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:852 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(15) +; ALIGNED-NEXT: s_waitcnt vmcnt(14) ; ALIGNED-NEXT: v_lshl_or_b32 v5, v49, 8, v39 ; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:868 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v6, v51, 8, v48 ; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:880 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(11) +; ALIGNED-NEXT: s_waitcnt vmcnt(10) ; ALIGNED-NEXT: v_lshl_or_b32 v7, v53, 8, v52 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v0, 16, v15 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v2, 16, v1 @@ -14442,13 +14398,13 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:916 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v55, 8, v29 ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:920 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(11) +; ALIGNED-NEXT: s_waitcnt vmcnt(10) ; ALIGNED-NEXT: v_lshl_or_b32 v1, v67, 8, v66 ; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:928 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(9) +; ALIGNED-NEXT: s_waitcnt vmcnt(8) ; ALIGNED-NEXT: v_lshl_or_b32 v2, v64, 8, v54 ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:932 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(7) +; ALIGNED-NEXT: s_waitcnt vmcnt(6) ; ALIGNED-NEXT: v_lshl_or_b32 v3, v68, 8, v65 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_clause 0x1 @@ -14457,13 +14413,13 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 16, v2 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:976 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(7) +; ALIGNED-NEXT: s_waitcnt vmcnt(6) ; ALIGNED-NEXT: v_lshl_or_b32 v0, v70, 8, v69 ; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_ubyte v3, v4, s[0:3], 0 offen offset:83 ; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:74 ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:988 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(6) +; ALIGNED-NEXT: s_waitcnt vmcnt(5) ; ALIGNED-NEXT: v_lshl_or_b32 v1, v80, 8, v71 ; ALIGNED-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill @@ -14509,7 +14465,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_store_dword v70, off, s[0:3], s32 offset:968 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v71, off, s[0:3], s32 offset:972 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v80, off, s[0:3], s32 offset:980 ; 4-byte Folded Spill -; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:1416 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:1000 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:87 ; ALIGNED-NEXT: s_waitcnt vmcnt(7) @@ -14744,7 +14700,9 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:146 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6 +; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:150 +; ALIGNED-NEXT: buffer_load_ubyte v8, v4, s[0:3], 0 offen offset:151 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1284 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v2 ; ALIGNED-NEXT: s_clause 0x1 @@ -14756,8 +14714,10 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_ubyte v3, v4, s[0:3], 0 offen offset:139 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1292 ; 4-byte Folded Spill ; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:140 +; ALIGNED-NEXT: s_waitcnt vmcnt(6) +; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1376 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(5) -; ALIGNED-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:1372 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:1388 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(4) ; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1300 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(3) @@ -14778,7 +14738,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) ; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1324 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1368 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:1372 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1316 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) @@ -14788,7 +14748,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_ubyte v3, v4, s[0:3], 0 offen offset:147 ; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:148 -; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1356 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:1360 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:145 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1328 ; 4-byte Folded Spill @@ -14796,7 +14756,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:1348 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1352 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1356 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1340 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) @@ -14804,231 +14764,171 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 ; ALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v7 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: v_lshl_or_b32 v1, v125, 8, v6 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1384 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v1, v8, 8, v6 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v0, v5, 8, v2 +; ALIGNED-NEXT: buffer_load_ubyte v2, v4, s[0:3], 0 offen offset:158 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v122, v4, s[0:3], 0 offen offset:156 -; ALIGNED-NEXT: buffer_load_ubyte v111, v4, s[0:3], 0 offen offset:157 -; ALIGNED-NEXT: buffer_load_ubyte v120, v4, s[0:3], 0 offen offset:158 -; ALIGNED-NEXT: buffer_load_ubyte v109, v4, s[0:3], 0 offen offset:159 -; ALIGNED-NEXT: buffer_load_ubyte v106, v4, s[0:3], 0 offen offset:155 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v111, 8, v122 +; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:157 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:156 +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:1420 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v109, 8, v120 +; ALIGNED-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:1416 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v126, v4, s[0:3], 0 offen offset:159 +; ALIGNED-NEXT: buffer_load_ubyte v124, v4, s[0:3], 0 offen offset:155 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 8, v0 +; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: v_lshl_or_b32 v1, v126, 8, v2 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v105, v4, s[0:3], 0 offen offset:152 -; ALIGNED-NEXT: buffer_load_ubyte v94, v4, s[0:3], 0 offen offset:153 -; ALIGNED-NEXT: buffer_load_ubyte v92, v4, s[0:3], 0 offen offset:154 +; ALIGNED-NEXT: buffer_load_ubyte v123, v4, s[0:3], 0 offen offset:152 +; ALIGNED-NEXT: buffer_load_ubyte v121, v4, s[0:3], 0 offen offset:153 +; ALIGNED-NEXT: buffer_load_ubyte v111, v4, s[0:3], 0 offen offset:154 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v94, 8, v105 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v121, 8, v123 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v106, 8, v92 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v124, 8, v111 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v89, v4, s[0:3], 0 offen offset:160 -; ALIGNED-NEXT: buffer_load_ubyte v79, v4, s[0:3], 0 offen offset:161 -; ALIGNED-NEXT: buffer_load_ubyte v75, v4, s[0:3], 0 offen offset:162 -; ALIGNED-NEXT: buffer_load_ubyte v74, v4, s[0:3], 0 offen offset:163 -; ALIGNED-NEXT: buffer_load_ubyte v88, v4, s[0:3], 0 offen offset:164 -; ALIGNED-NEXT: buffer_load_ubyte v77, v4, s[0:3], 0 offen offset:165 -; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:166 -; ALIGNED-NEXT: buffer_load_ubyte v72, v4, s[0:3], 0 offen offset:167 +; ALIGNED-NEXT: buffer_load_ubyte v108, v4, s[0:3], 0 offen offset:160 +; ALIGNED-NEXT: buffer_load_ubyte v105, v4, s[0:3], 0 offen offset:161 +; ALIGNED-NEXT: buffer_load_ubyte v93, v4, s[0:3], 0 offen offset:162 +; ALIGNED-NEXT: buffer_load_ubyte v92, v4, s[0:3], 0 offen offset:163 +; ALIGNED-NEXT: buffer_load_ubyte v107, v4, s[0:3], 0 offen offset:164 +; ALIGNED-NEXT: buffer_load_ubyte v95, v4, s[0:3], 0 offen offset:165 +; ALIGNED-NEXT: buffer_load_ubyte v94, v4, s[0:3], 0 offen offset:166 +; ALIGNED-NEXT: buffer_load_ubyte v91, v4, s[0:3], 0 offen offset:167 ; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v89 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v105, 8, v108 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v74, 8, v75 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v92, 8, v93 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v72, 8, v76 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1420 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v77, 8, v88 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v91, 8, v94 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v95, 8, v107 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v63, v4, s[0:3], 0 offen offset:172 -; ALIGNED-NEXT: buffer_load_ubyte v61, v4, s[0:3], 0 offen offset:173 -; ALIGNED-NEXT: buffer_load_ubyte v62, v4, s[0:3], 0 offen offset:174 -; ALIGNED-NEXT: buffer_load_ubyte v60, v4, s[0:3], 0 offen offset:175 -; ALIGNED-NEXT: buffer_load_ubyte v58, v4, s[0:3], 0 offen offset:171 +; ALIGNED-NEXT: buffer_load_ubyte v89, v4, s[0:3], 0 offen offset:172 +; ALIGNED-NEXT: buffer_load_ubyte v79, v4, s[0:3], 0 offen offset:173 +; ALIGNED-NEXT: buffer_load_ubyte v78, v4, s[0:3], 0 offen offset:174 +; ALIGNED-NEXT: buffer_load_ubyte v77, v4, s[0:3], 0 offen offset:175 +; ALIGNED-NEXT: buffer_load_ubyte v75, v4, s[0:3], 0 offen offset:171 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v61, 8, v63 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v79, 8, v89 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v60, 8, v62 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v77, 8, v78 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v59, v4, s[0:3], 0 offen offset:168 -; ALIGNED-NEXT: buffer_load_ubyte v56, v4, s[0:3], 0 offen offset:169 -; ALIGNED-NEXT: buffer_load_ubyte v47, v4, s[0:3], 0 offen offset:170 +; ALIGNED-NEXT: buffer_load_ubyte v74, v4, s[0:3], 0 offen offset:168 +; ALIGNED-NEXT: buffer_load_ubyte v72, v4, s[0:3], 0 offen offset:169 +; ALIGNED-NEXT: buffer_load_ubyte v63, v4, s[0:3], 0 offen offset:170 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v56, 8, v59 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v72, 8, v74 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v58, 8, v47 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v75, 8, v63 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v46, v4, s[0:3], 0 offen offset:176 -; ALIGNED-NEXT: buffer_load_ubyte v44, v4, s[0:3], 0 offen offset:177 -; ALIGNED-NEXT: buffer_load_ubyte v119, v4, s[0:3], 0 offen offset:178 -; ALIGNED-NEXT: buffer_load_ubyte v40, v4, s[0:3], 0 offen offset:179 -; ALIGNED-NEXT: buffer_load_ubyte v45, v4, s[0:3], 0 offen offset:180 -; ALIGNED-NEXT: buffer_load_ubyte v41, v4, s[0:3], 0 offen offset:181 -; ALIGNED-NEXT: buffer_load_ubyte v42, v4, s[0:3], 0 offen offset:182 -; ALIGNED-NEXT: buffer_load_ubyte v118, v4, s[0:3], 0 offen offset:183 +; ALIGNED-NEXT: buffer_load_ubyte v61, v4, s[0:3], 0 offen offset:176 +; ALIGNED-NEXT: buffer_load_ubyte v59, v4, s[0:3], 0 offen offset:177 +; ALIGNED-NEXT: buffer_load_ubyte v47, v4, s[0:3], 0 offen offset:178 +; ALIGNED-NEXT: buffer_load_ubyte v56, v4, s[0:3], 0 offen offset:179 +; ALIGNED-NEXT: buffer_load_ubyte v60, v4, s[0:3], 0 offen offset:180 +; ALIGNED-NEXT: buffer_load_ubyte v57, v4, s[0:3], 0 offen offset:181 +; ALIGNED-NEXT: buffer_load_ubyte v58, v4, s[0:3], 0 offen offset:182 +; ALIGNED-NEXT: buffer_load_ubyte v46, v4, s[0:3], 0 offen offset:183 ; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v44, 8, v46 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v59, 8, v61 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v119 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v56, 8, v47 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v118, 8, v42 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v41, 8, v45 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v46, 8, v58 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 8, v60 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v117, v4, s[0:3], 0 offen offset:188 -; ALIGNED-NEXT: buffer_load_ubyte v115, v4, s[0:3], 0 offen offset:189 -; ALIGNED-NEXT: buffer_load_ubyte v116, v4, s[0:3], 0 offen offset:190 -; ALIGNED-NEXT: buffer_load_ubyte v114, v4, s[0:3], 0 offen offset:191 -; ALIGNED-NEXT: buffer_load_ubyte v112, v4, s[0:3], 0 offen offset:187 +; ALIGNED-NEXT: buffer_load_ubyte v44, v4, s[0:3], 0 offen offset:188 +; ALIGNED-NEXT: buffer_load_ubyte v43, v4, s[0:3], 0 offen offset:189 +; ALIGNED-NEXT: buffer_load_ubyte v42, v4, s[0:3], 0 offen offset:190 +; ALIGNED-NEXT: buffer_load_ubyte v41, v4, s[0:3], 0 offen offset:191 +; ALIGNED-NEXT: buffer_load_ubyte v40, v4, s[0:3], 0 offen offset:187 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v115, 8, v117 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v43, 8, v44 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v114, 8, v116 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v41, 8, v42 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v113, v4, s[0:3], 0 offen offset:184 -; ALIGNED-NEXT: buffer_load_ubyte v103, v4, s[0:3], 0 offen offset:185 -; ALIGNED-NEXT: buffer_load_ubyte v102, v4, s[0:3], 0 offen offset:186 +; ALIGNED-NEXT: buffer_load_ubyte v119, v4, s[0:3], 0 offen offset:184 +; ALIGNED-NEXT: buffer_load_ubyte v118, v4, s[0:3], 0 offen offset:185 +; ALIGNED-NEXT: buffer_load_ubyte v117, v4, s[0:3], 0 offen offset:186 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v103, 8, v113 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v118, 8, v119 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v1, v112, 8, v102 +; ALIGNED-NEXT: v_lshl_or_b32 v1, v40, 8, v117 ; ALIGNED-NEXT: v_lshl_or_b32 v0, v1, 16, v0 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v100, v4, s[0:3], 0 offen offset:192 -; ALIGNED-NEXT: buffer_load_ubyte v98, v4, s[0:3], 0 offen offset:193 -; ALIGNED-NEXT: buffer_load_ubyte v87, v4, s[0:3], 0 offen offset:194 -; ALIGNED-NEXT: buffer_load_ubyte v86, v4, s[0:3], 0 offen offset:195 -; ALIGNED-NEXT: buffer_load_ubyte v99, v4, s[0:3], 0 offen offset:196 -; ALIGNED-NEXT: buffer_load_ubyte v97, v4, s[0:3], 0 offen offset:197 -; ALIGNED-NEXT: buffer_load_ubyte v96, v4, s[0:3], 0 offen offset:198 -; ALIGNED-NEXT: buffer_load_ubyte v85, v4, s[0:3], 0 offen offset:199 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v98, 8, v100 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v86, 8, v87 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v97, 8, v99 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v85, 8, v96 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v83, v4, s[0:3], 0 offen offset:204 -; ALIGNED-NEXT: buffer_load_ubyte v81, v4, s[0:3], 0 offen offset:205 -; ALIGNED-NEXT: buffer_load_ubyte v82, v4, s[0:3], 0 offen offset:206 -; ALIGNED-NEXT: buffer_load_ubyte v80, v4, s[0:3], 0 offen offset:207 -; ALIGNED-NEXT: buffer_load_ubyte v71, v4, s[0:3], 0 offen offset:203 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v81, 8, v83 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v80, 8, v82 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v70, v4, s[0:3], 0 offen offset:200 -; ALIGNED-NEXT: buffer_load_ubyte v69, v4, s[0:3], 0 offen offset:201 -; ALIGNED-NEXT: buffer_load_ubyte v68, v4, s[0:3], 0 offen offset:202 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v69, 8, v70 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v71, 8, v68 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v67, v4, s[0:3], 0 offen offset:212 -; ALIGNED-NEXT: buffer_load_ubyte v54, v4, s[0:3], 0 offen offset:213 -; ALIGNED-NEXT: buffer_load_ubyte v65, v4, s[0:3], 0 offen offset:214 -; ALIGNED-NEXT: buffer_load_ubyte v52, v4, s[0:3], 0 offen offset:215 -; ALIGNED-NEXT: buffer_load_ubyte v55, v4, s[0:3], 0 offen offset:211 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v54, 8, v67 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v52, 8, v65 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v66, v4, s[0:3], 0 offen offset:216 -; ALIGNED-NEXT: buffer_load_ubyte v53, v4, s[0:3], 0 offen offset:217 -; ALIGNED-NEXT: buffer_load_ubyte v49, v4, s[0:3], 0 offen offset:218 -; ALIGNED-NEXT: buffer_load_ubyte v48, v4, s[0:3], 0 offen offset:219 -; ALIGNED-NEXT: buffer_load_ubyte v64, v4, s[0:3], 0 offen offset:220 -; ALIGNED-NEXT: buffer_load_ubyte v51, v4, s[0:3], 0 offen offset:221 -; ALIGNED-NEXT: buffer_load_ubyte v50, v4, s[0:3], 0 offen offset:222 -; ALIGNED-NEXT: buffer_load_ubyte v39, v4, s[0:3], 0 offen offset:223 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v53, 8, v66 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v48, 8, v49 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v51, 8, v64 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v39, 8, v50 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v38, v4, s[0:3], 0 offen offset:208 -; ALIGNED-NEXT: buffer_load_ubyte v36, v4, s[0:3], 0 offen offset:209 -; ALIGNED-NEXT: buffer_load_ubyte v37, v4, s[0:3], 0 offen offset:210 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v36, 8, v38 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v55, 8, v37 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x7 -; ALIGNED-NEXT: buffer_load_ubyte v35, v4, s[0:3], 0 offen offset:224 -; ALIGNED-NEXT: buffer_load_ubyte v33, v4, s[0:3], 0 offen offset:225 -; ALIGNED-NEXT: buffer_load_ubyte v29, v4, s[0:3], 0 offen offset:226 -; ALIGNED-NEXT: buffer_load_ubyte v30, v4, s[0:3], 0 offen offset:227 -; ALIGNED-NEXT: buffer_load_ubyte v34, v4, s[0:3], 0 offen offset:228 -; ALIGNED-NEXT: buffer_load_ubyte v31, v4, s[0:3], 0 offen offset:229 -; ALIGNED-NEXT: buffer_load_ubyte v32, v4, s[0:3], 0 offen offset:230 -; ALIGNED-NEXT: buffer_load_ubyte v28, v4, s[0:3], 0 offen offset:231 -; ALIGNED-NEXT: s_waitcnt vmcnt(6) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v33, 8, v35 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v30, 8, v29 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v3, 16, v2 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v31, 8, v34 -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v28, 8, v32 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x17 -; ALIGNED-NEXT: buffer_load_ubyte v27, v4, s[0:3], 0 offen offset:236 -; ALIGNED-NEXT: buffer_load_ubyte v25, v4, s[0:3], 0 offen offset:237 -; ALIGNED-NEXT: buffer_load_ubyte v26, v4, s[0:3], 0 offen offset:238 -; ALIGNED-NEXT: buffer_load_ubyte v24, v4, s[0:3], 0 offen offset:239 +; ALIGNED-NEXT: s_clause 0x3e +; ALIGNED-NEXT: buffer_load_ubyte v115, v4, s[0:3], 0 offen offset:192 +; ALIGNED-NEXT: buffer_load_ubyte v112, v4, s[0:3], 0 offen offset:193 +; ALIGNED-NEXT: buffer_load_ubyte v101, v4, s[0:3], 0 offen offset:194 +; ALIGNED-NEXT: buffer_load_ubyte v100, v4, s[0:3], 0 offen offset:195 +; ALIGNED-NEXT: buffer_load_ubyte v113, v4, s[0:3], 0 offen offset:196 +; ALIGNED-NEXT: buffer_load_ubyte v103, v4, s[0:3], 0 offen offset:197 +; ALIGNED-NEXT: buffer_load_ubyte v102, v4, s[0:3], 0 offen offset:198 +; ALIGNED-NEXT: buffer_load_ubyte v99, v4, s[0:3], 0 offen offset:199 +; ALIGNED-NEXT: buffer_load_ubyte v97, v4, s[0:3], 0 offen offset:204 +; ALIGNED-NEXT: buffer_load_ubyte v87, v4, s[0:3], 0 offen offset:205 +; ALIGNED-NEXT: buffer_load_ubyte v96, v4, s[0:3], 0 offen offset:206 +; ALIGNED-NEXT: buffer_load_ubyte v86, v4, s[0:3], 0 offen offset:207 +; ALIGNED-NEXT: buffer_load_ubyte v85, v4, s[0:3], 0 offen offset:203 +; ALIGNED-NEXT: buffer_load_ubyte v84, v4, s[0:3], 0 offen offset:200 +; ALIGNED-NEXT: buffer_load_ubyte v83, v4, s[0:3], 0 offen offset:201 +; ALIGNED-NEXT: buffer_load_ubyte v82, v4, s[0:3], 0 offen offset:202 +; ALIGNED-NEXT: buffer_load_ubyte v80, v4, s[0:3], 0 offen offset:212 +; ALIGNED-NEXT: buffer_load_ubyte v68, v4, s[0:3], 0 offen offset:213 +; ALIGNED-NEXT: buffer_load_ubyte v70, v4, s[0:3], 0 offen offset:214 +; ALIGNED-NEXT: buffer_load_ubyte v65, v4, s[0:3], 0 offen offset:215 +; ALIGNED-NEXT: buffer_load_ubyte v66, v4, s[0:3], 0 offen offset:211 +; ALIGNED-NEXT: buffer_load_ubyte v71, v4, s[0:3], 0 offen offset:216 +; ALIGNED-NEXT: buffer_load_ubyte v67, v4, s[0:3], 0 offen offset:217 +; ALIGNED-NEXT: buffer_load_ubyte v53, v4, s[0:3], 0 offen offset:218 +; ALIGNED-NEXT: buffer_load_ubyte v52, v4, s[0:3], 0 offen offset:219 +; ALIGNED-NEXT: buffer_load_ubyte v69, v4, s[0:3], 0 offen offset:220 +; ALIGNED-NEXT: buffer_load_ubyte v55, v4, s[0:3], 0 offen offset:221 +; ALIGNED-NEXT: buffer_load_ubyte v54, v4, s[0:3], 0 offen offset:222 +; ALIGNED-NEXT: buffer_load_ubyte v51, v4, s[0:3], 0 offen offset:223 +; ALIGNED-NEXT: buffer_load_ubyte v50, v4, s[0:3], 0 offen offset:208 +; ALIGNED-NEXT: buffer_load_ubyte v38, v4, s[0:3], 0 offen offset:209 +; ALIGNED-NEXT: buffer_load_ubyte v39, v4, s[0:3], 0 offen offset:210 +; ALIGNED-NEXT: buffer_load_ubyte v37, v4, s[0:3], 0 offen offset:224 +; ALIGNED-NEXT: buffer_load_ubyte v35, v4, s[0:3], 0 offen offset:225 +; ALIGNED-NEXT: buffer_load_ubyte v31, v4, s[0:3], 0 offen offset:226 +; ALIGNED-NEXT: buffer_load_ubyte v32, v4, s[0:3], 0 offen offset:227 +; ALIGNED-NEXT: buffer_load_ubyte v36, v4, s[0:3], 0 offen offset:228 +; ALIGNED-NEXT: buffer_load_ubyte v33, v4, s[0:3], 0 offen offset:229 +; ALIGNED-NEXT: buffer_load_ubyte v34, v4, s[0:3], 0 offen offset:230 +; ALIGNED-NEXT: buffer_load_ubyte v30, v4, s[0:3], 0 offen offset:231 +; ALIGNED-NEXT: buffer_load_ubyte v29, v4, s[0:3], 0 offen offset:236 +; ALIGNED-NEXT: buffer_load_ubyte v27, v4, s[0:3], 0 offen offset:237 +; ALIGNED-NEXT: buffer_load_ubyte v28, v4, s[0:3], 0 offen offset:238 +; ALIGNED-NEXT: buffer_load_ubyte v26, v4, s[0:3], 0 offen offset:239 ; ALIGNED-NEXT: buffer_load_ubyte v23, v4, s[0:3], 0 offen offset:235 -; ALIGNED-NEXT: buffer_load_ubyte v22, v4, s[0:3], 0 offen offset:232 -; ALIGNED-NEXT: buffer_load_ubyte v21, v4, s[0:3], 0 offen offset:233 -; ALIGNED-NEXT: buffer_load_ubyte v20, v4, s[0:3], 0 offen offset:234 +; ALIGNED-NEXT: buffer_load_ubyte v24, v4, s[0:3], 0 offen offset:232 +; ALIGNED-NEXT: buffer_load_ubyte v22, v4, s[0:3], 0 offen offset:233 +; ALIGNED-NEXT: buffer_load_ubyte v21, v4, s[0:3], 0 offen offset:234 ; ALIGNED-NEXT: buffer_load_ubyte v19, v4, s[0:3], 0 offen offset:240 ; ALIGNED-NEXT: buffer_load_ubyte v17, v4, s[0:3], 0 offen offset:241 ; ALIGNED-NEXT: buffer_load_ubyte v13, v4, s[0:3], 0 offen offset:242 @@ -15044,100 +14944,135 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_ubyte v7, v4, s[0:3], 0 offen offset:251 ; ALIGNED-NEXT: buffer_load_ubyte v6, v4, s[0:3], 0 offen offset:248 ; ALIGNED-NEXT: buffer_load_ubyte v5, v4, s[0:3], 0 offen offset:249 +; ALIGNED-NEXT: s_clause 0x6 ; ALIGNED-NEXT: buffer_load_ubyte v1, v4, s[0:3], 0 offen offset:250 -; ALIGNED-NEXT: v_lshl_or_b32 v123, v3, 16, v2 ; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen -; ALIGNED-NEXT: s_waitcnt vmcnt(23) -; ALIGNED-NEXT: v_lshl_or_b32 v2, v25, 8, v27 -; ALIGNED-NEXT: s_waitcnt vmcnt(21) -; ALIGNED-NEXT: v_lshl_or_b32 v3, v24, 8, v26 -; ALIGNED-NEXT: s_waitcnt vmcnt(9) -; ALIGNED-NEXT: v_lshl_or_b32 v43, v12, 8, v16 -; ALIGNED-NEXT: s_waitcnt vmcnt(5) -; ALIGNED-NEXT: v_lshl_or_b32 v57, v8, 8, v10 -; ALIGNED-NEXT: v_lshl_or_b32 v104, v3, 16, v2 -; ALIGNED-NEXT: v_lshl_or_b32 v2, v21, 8, v22 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v23, 8, v20 +; ALIGNED-NEXT: buffer_load_ubyte v120, v4, s[0:3], 0 offen offset:2 +; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:4 +; ALIGNED-NEXT: buffer_load_ubyte v109, v4, s[0:3], 0 offen offset:5 +; ALIGNED-NEXT: buffer_load_ubyte v110, v4, s[0:3], 0 offen offset:6 +; ALIGNED-NEXT: buffer_load_ubyte v122, v4, s[0:3], 0 offen offset:7 +; ALIGNED-NEXT: s_waitcnt vmcnt(62) +; ALIGNED-NEXT: v_lshl_or_b32 v2, v112, 8, v115 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v100, 8, v101 +; ALIGNED-NEXT: v_lshl_or_b32 v106, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v103, 8, v113 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v99, 8, v102 +; ALIGNED-NEXT: v_lshl_or_b32 v90, v3, 16, v2 +; ALIGNED-NEXT: s_waitcnt vmcnt(60) +; ALIGNED-NEXT: v_lshl_or_b32 v2, v87, 8, v97 +; ALIGNED-NEXT: s_waitcnt vmcnt(58) +; ALIGNED-NEXT: v_lshl_or_b32 v3, v86, 8, v96 +; ALIGNED-NEXT: s_waitcnt vmcnt(14) +; ALIGNED-NEXT: v_lshl_or_b32 v62, v12, 8, v16 +; ALIGNED-NEXT: s_waitcnt vmcnt(10) +; ALIGNED-NEXT: v_lshl_or_b32 v76, v8, 8, v10 +; ALIGNED-NEXT: s_waitcnt vmcnt(3) +; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v88, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v83, 8, v84 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v85, 8, v82 +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v109, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: buffer_store_dword v110, off, s[0:3], s32 offset:1384 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v122, off, s[0:3], s32 offset:1392 ; 4-byte Folded Spill ; ALIGNED-NEXT: v_lshl_or_b32 v73, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v68, 8, v80 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v65, 8, v70 +; ALIGNED-NEXT: v_lshl_or_b32 v45, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v67, 8, v71 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v52, 8, v53 +; ALIGNED-NEXT: v_lshl_or_b32 v116, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v55, 8, v69 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v51, 8, v54 +; ALIGNED-NEXT: v_lshl_or_b32 v114, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v38, 8, v50 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v66, 8, v39 +; ALIGNED-NEXT: v_lshl_or_b32 v98, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v35, 8, v37 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v32, 8, v31 +; ALIGNED-NEXT: v_lshl_or_b32 v81, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v33, 8, v36 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v30, 8, v34 +; ALIGNED-NEXT: v_lshl_or_b32 v64, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v27, 8, v29 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v26, 8, v28 +; ALIGNED-NEXT: v_lshl_or_b32 v49, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v22, 8, v24 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v23, 8, v21 +; ALIGNED-NEXT: v_lshl_or_b32 v48, v3, 16, v2 ; ALIGNED-NEXT: v_lshl_or_b32 v2, v17, 8, v19 ; ALIGNED-NEXT: v_lshl_or_b32 v3, v14, 8, v13 -; ALIGNED-NEXT: v_lshl_or_b32 v101, v3, 16, v2 +; ALIGNED-NEXT: v_lshl_or_b32 v25, v3, 16, v2 ; ALIGNED-NEXT: v_lshl_or_b32 v3, v15, 8, v18 -; ALIGNED-NEXT: v_lshl_or_b32 v84, v43, 16, v3 -; ALIGNED-NEXT: v_lshl_or_b32 v43, v9, 8, v11 -; ALIGNED-NEXT: v_lshl_or_b32 v3, v57, 16, v43 -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v43, v5, 8, v6 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v57, v7, 8, v1 -; ALIGNED-NEXT: v_lshl_or_b32 v2, v57, 16, v43 -; ALIGNED-NEXT: buffer_load_ubyte v43, v4, s[0:3], 0 offen offset:1 -; ALIGNED-NEXT: s_waitcnt vmcnt(1) +; ALIGNED-NEXT: v_lshl_or_b32 v20, v62, 16, v3 +; ALIGNED-NEXT: v_lshl_or_b32 v62, v9, 8, v11 +; ALIGNED-NEXT: v_lshl_or_b32 v3, v76, 16, v62 +; ALIGNED-NEXT: v_lshl_or_b32 v62, v5, 8, v6 +; ALIGNED-NEXT: v_lshl_or_b32 v76, v7, 8, v1 +; ALIGNED-NEXT: v_lshl_or_b32 v2, v76, 16, v62 +; ALIGNED-NEXT: s_clause 0x1 +; ALIGNED-NEXT: buffer_load_ubyte v62, v4, s[0:3], 0 offen offset:1 +; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:3 ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1336 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:1344 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x5 -; ALIGNED-NEXT: buffer_load_ubyte v127, v4, s[0:3], 0 offen offset:2 -; ALIGNED-NEXT: buffer_load_ubyte v57, v4, s[0:3], 0 offen offset:3 -; ALIGNED-NEXT: buffer_load_ubyte v78, v4, s[0:3], 0 offen offset:4 -; ALIGNED-NEXT: buffer_load_ubyte v90, v4, s[0:3], 0 offen offset:5 -; ALIGNED-NEXT: buffer_load_ubyte v91, v4, s[0:3], 0 offen offset:6 -; ALIGNED-NEXT: buffer_load_ubyte v124, v4, s[0:3], 0 offen offset:7 -; ALIGNED-NEXT: v_lshl_or_b32 v43, v43, 8, v0 -; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:1360 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v57, v57, 8, v127 -; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: buffer_store_dword v78, off, s[0:3], s32 offset:1364 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: buffer_store_dword v90, off, s[0:3], s32 offset:1376 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_store_dword v120, off, s[0:3], s32 offset:1368 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: buffer_store_dword v91, off, s[0:3], s32 offset:1380 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 16, v43 -; ALIGNED-NEXT: v_lshl_or_b32 v43, v90, 8, v78 +; ALIGNED-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:1344 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v57, v124, 8, v91 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Spill -; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 16, v43 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Spill -; ALIGNED-NEXT: s_clause 0x4 -; ALIGNED-NEXT: buffer_load_ubyte v121, v4, s[0:3], 0 offen offset:12 -; ALIGNED-NEXT: buffer_load_ubyte v107, v4, s[0:3], 0 offen offset:13 -; ALIGNED-NEXT: buffer_load_ubyte v110, v4, s[0:3], 0 offen offset:14 -; ALIGNED-NEXT: buffer_load_ubyte v108, v4, s[0:3], 0 offen offset:15 -; ALIGNED-NEXT: buffer_load_ubyte v93, v4, s[0:3], 0 offen offset:11 +; ALIGNED-NEXT: buffer_store_dword v76, off, s[0:3], s32 offset:1352 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v62, v62, 8, v0 +; ALIGNED-NEXT: v_lshl_or_b32 v76, v76, 8, v120 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62 +; ALIGNED-NEXT: v_lshl_or_b32 v62, v109, 8, v104 +; ALIGNED-NEXT: v_lshl_or_b32 v76, v122, 8, v110 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill +; ALIGNED-NEXT: buffer_load_ubyte v0, v4, s[0:3], 0 offen offset:12 +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Spill +; ALIGNED-NEXT: s_clause 0x3 +; ALIGNED-NEXT: buffer_load_ubyte v127, v4, s[0:3], 0 offen offset:13 +; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:14 +; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:15 +; ALIGNED-NEXT: buffer_load_ubyte v120, v4, s[0:3], 0 offen offset:11 ; ALIGNED-NEXT: s_waitcnt vmcnt(3) -; ALIGNED-NEXT: v_lshl_or_b32 v43, v107, 8, v121 +; ALIGNED-NEXT: v_lshl_or_b32 v62, v127, 8, v0 +; ALIGNED-NEXT: s_waitcnt vmcnt(2) +; ALIGNED-NEXT: buffer_store_dword v76, off, s[0:3], s32 offset:1428 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v57, v108, 8, v110 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 16, v43 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v76, v104, 8, v76 +; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:1432 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v95, v4, s[0:3], 0 offen offset:8 -; ALIGNED-NEXT: buffer_load_ubyte v91, v4, s[0:3], 0 offen offset:9 -; ALIGNED-NEXT: buffer_load_ubyte v90, v4, s[0:3], 0 offen offset:10 +; ALIGNED-NEXT: buffer_load_ubyte v122, v4, s[0:3], 0 offen offset:8 +; ALIGNED-NEXT: buffer_load_ubyte v110, v4, s[0:3], 0 offen offset:9 +; ALIGNED-NEXT: buffer_load_ubyte v109, v4, s[0:3], 0 offen offset:10 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) -; ALIGNED-NEXT: v_lshl_or_b32 v43, v91, 8, v95 +; ALIGNED-NEXT: v_lshl_or_b32 v62, v110, 8, v122 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: v_lshl_or_b32 v57, v93, 8, v90 -; ALIGNED-NEXT: v_lshl_or_b32 v0, v57, 16, v43 -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Spill +; ALIGNED-NEXT: v_lshl_or_b32 v76, v120, 8, v109 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v76, 16, v62 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Spill ; ALIGNED-NEXT: s_clause 0x2 -; ALIGNED-NEXT: buffer_load_ubyte v57, v4, s[0:3], 0 offen offset:18 -; ALIGNED-NEXT: buffer_load_ubyte v78, v4, s[0:3], 0 offen offset:16 -; ALIGNED-NEXT: buffer_load_ubyte v43, v4, s[0:3], 0 offen offset:17 +; ALIGNED-NEXT: buffer_load_ubyte v62, v4, s[0:3], 0 offen offset:18 +; ALIGNED-NEXT: buffer_load_ubyte v104, v4, s[0:3], 0 offen offset:16 +; ALIGNED-NEXT: buffer_load_ubyte v76, v4, s[0:3], 0 offen offset:17 ; ALIGNED-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:488 ; ALIGNED-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:492 -; ALIGNED-NEXT: buffer_store_dword v84, off, s[0:3], s32 offset:484 -; ALIGNED-NEXT: buffer_store_dword v101, off, s[0:3], s32 offset:480 +; ALIGNED-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:484 +; ALIGNED-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:480 ; ALIGNED-NEXT: s_clause 0x1 ; ALIGNED-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; ALIGNED-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; ALIGNED-NEXT: v_add_nc_u32_e32 v4, 0xffffff00, v4 ; ALIGNED-NEXT: s_waitcnt vmcnt(4) -; ALIGNED-NEXT: v_lshl_or_b32 v0, v126, 8, v57 +; ALIGNED-NEXT: v_lshl_or_b32 v0, v125, 8, v62 ; ALIGNED-NEXT: s_waitcnt vmcnt(2) -; ALIGNED-NEXT: v_lshl_or_b32 v126, v43, 8, v78 +; ALIGNED-NEXT: v_lshl_or_b32 v125, v76, 8, v104 ; ALIGNED-NEXT: s_waitcnt vmcnt(1) ; ALIGNED-NEXT: v_add_co_u32 v2, vcc_lo, v2, s4 ; ALIGNED-NEXT: s_waitcnt vmcnt(0) @@ -15158,165 +15093,153 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: flat_store_byte v[2:3], v16 offset:246 ; ALIGNED-NEXT: flat_store_byte v[2:3], v18 offset:244 ; ALIGNED-NEXT: flat_store_byte v[2:3], v19 offset:240 -; ALIGNED-NEXT: buffer_store_dword v73, off, s[0:3], s32 offset:504 -; ALIGNED-NEXT: buffer_store_dword v104, off, s[0:3], s32 offset:508 -; ALIGNED-NEXT: buffer_store_dword v123, off, s[0:3], s32 offset:500 -; ALIGNED-NEXT: v_lshl_or_b32 v126, v0, 16, v126 +; ALIGNED-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:504 +; ALIGNED-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:508 +; ALIGNED-NEXT: buffer_store_dword v64, off, s[0:3], s32 offset:500 +; ALIGNED-NEXT: buffer_store_dword v81, off, s[0:3], s32 offset:496 +; ALIGNED-NEXT: flat_store_byte v[2:3], v21 offset:234 +; ALIGNED-NEXT: flat_store_byte v[2:3], v23 offset:235 +; ALIGNED-NEXT: flat_store_byte v[2:3], v22 offset:233 +; ALIGNED-NEXT: flat_store_byte v[2:3], v26 offset:239 +; ALIGNED-NEXT: flat_store_byte v[2:3], v27 offset:237 +; ALIGNED-NEXT: flat_store_byte v[2:3], v28 offset:238 +; ALIGNED-NEXT: flat_store_byte v[2:3], v29 offset:236 +; ALIGNED-NEXT: flat_store_byte v[2:3], v24 offset:232 +; ALIGNED-NEXT: flat_store_byte v[2:3], v31 offset:226 +; ALIGNED-NEXT: flat_store_byte v[2:3], v32 offset:227 +; ALIGNED-NEXT: flat_store_byte v[2:3], v35 offset:225 +; ALIGNED-NEXT: flat_store_byte v[2:3], v30 offset:231 +; ALIGNED-NEXT: flat_store_byte v[2:3], v33 offset:229 +; ALIGNED-NEXT: flat_store_byte v[2:3], v34 offset:230 +; ALIGNED-NEXT: flat_store_byte v[2:3], v36 offset:228 +; ALIGNED-NEXT: flat_store_byte v[2:3], v37 offset:224 +; ALIGNED-NEXT: buffer_store_dword v98, off, s[0:3], s32 offset:448 +; ALIGNED-NEXT: buffer_store_dword v114, off, s[0:3], s32 offset:460 +; ALIGNED-NEXT: buffer_store_dword v116, off, s[0:3], s32 offset:456 +; ALIGNED-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:452 +; ALIGNED-NEXT: flat_store_byte v[2:3], v68 offset:213 +; ALIGNED-NEXT: flat_store_byte v[2:3], v65 offset:215 +; ALIGNED-NEXT: flat_store_byte v[2:3], v38 offset:209 +; ALIGNED-NEXT: flat_store_byte v[2:3], v66 offset:211 +; ALIGNED-NEXT: flat_store_byte v[2:3], v39 offset:210 +; ALIGNED-NEXT: flat_store_byte v[2:3], v70 offset:214 +; ALIGNED-NEXT: flat_store_byte v[2:3], v80 offset:212 +; ALIGNED-NEXT: flat_store_byte v[2:3], v53 offset:218 +; ALIGNED-NEXT: flat_store_byte v[2:3], v52 offset:219 +; ALIGNED-NEXT: flat_store_byte v[2:3], v67 offset:217 +; ALIGNED-NEXT: flat_store_byte v[2:3], v51 offset:223 +; ALIGNED-NEXT: flat_store_byte v[2:3], v55 offset:221 +; ALIGNED-NEXT: flat_store_byte v[2:3], v54 offset:222 +; ALIGNED-NEXT: flat_store_byte v[2:3], v69 offset:220 +; ALIGNED-NEXT: flat_store_byte v[2:3], v71 offset:216 +; ALIGNED-NEXT: flat_store_byte v[2:3], v50 offset:208 +; ALIGNED-NEXT: buffer_store_dword v73, off, s[0:3], s32 offset:472 +; ALIGNED-NEXT: buffer_store_dword v88, off, s[0:3], s32 offset:476 +; ALIGNED-NEXT: buffer_store_dword v90, off, s[0:3], s32 offset:468 +; ALIGNED-NEXT: buffer_store_dword v106, off, s[0:3], s32 offset:464 +; ALIGNED-NEXT: flat_store_byte v[2:3], v82 offset:202 +; ALIGNED-NEXT: flat_store_byte v[2:3], v85 offset:203 +; ALIGNED-NEXT: flat_store_byte v[2:3], v83 offset:201 +; ALIGNED-NEXT: flat_store_byte v[2:3], v86 offset:207 +; ALIGNED-NEXT: flat_store_byte v[2:3], v87 offset:205 +; ALIGNED-NEXT: flat_store_byte v[2:3], v96 offset:206 +; ALIGNED-NEXT: flat_store_byte v[2:3], v97 offset:204 +; ALIGNED-NEXT: flat_store_byte v[2:3], v84 offset:200 +; ALIGNED-NEXT: flat_store_byte v[2:3], v101 offset:194 +; ALIGNED-NEXT: flat_store_byte v[2:3], v100 offset:195 +; ALIGNED-NEXT: flat_store_byte v[2:3], v112 offset:193 +; ALIGNED-NEXT: flat_store_byte v[2:3], v99 offset:199 +; ALIGNED-NEXT: flat_store_byte v[2:3], v103 offset:197 +; ALIGNED-NEXT: flat_store_byte v[2:3], v102 offset:198 +; ALIGNED-NEXT: flat_store_byte v[2:3], v113 offset:196 +; ALIGNED-NEXT: flat_store_byte v[2:3], v115 offset:192 +; ALIGNED-NEXT: v_lshl_or_b32 v125, v0, 16, v125 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1484 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_add_u32 s4, s4, 0xffffff00 ; ALIGNED-NEXT: s_addc_u32 s5, s5, -1 ; ALIGNED-NEXT: s_cmp_eq_u64 s[4:5], s[6:7] ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 -; ALIGNED-NEXT: flat_store_byte v[2:3], v20 offset:234 -; ALIGNED-NEXT: flat_store_byte v[2:3], v23 offset:235 -; ALIGNED-NEXT: flat_store_byte v[2:3], v21 offset:233 -; ALIGNED-NEXT: flat_store_byte v[2:3], v24 offset:239 -; ALIGNED-NEXT: flat_store_byte v[2:3], v25 offset:237 -; ALIGNED-NEXT: flat_store_byte v[2:3], v26 offset:238 -; ALIGNED-NEXT: flat_store_byte v[2:3], v27 offset:236 -; ALIGNED-NEXT: flat_store_byte v[2:3], v22 offset:232 -; ALIGNED-NEXT: flat_store_byte v[2:3], v29 offset:226 -; ALIGNED-NEXT: flat_store_byte v[2:3], v30 offset:227 -; ALIGNED-NEXT: flat_store_byte v[2:3], v33 offset:225 -; ALIGNED-NEXT: flat_store_byte v[2:3], v28 offset:231 -; ALIGNED-NEXT: flat_store_byte v[2:3], v31 offset:229 -; ALIGNED-NEXT: flat_store_byte v[2:3], v32 offset:230 -; ALIGNED-NEXT: flat_store_byte v[2:3], v34 offset:228 -; ALIGNED-NEXT: flat_store_byte v[2:3], v35 offset:224 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1480 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1476 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1472 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 +; ALIGNED-NEXT: flat_store_byte v[2:3], v117 offset:186 +; ALIGNED-NEXT: flat_store_byte v[2:3], v40 offset:187 +; ALIGNED-NEXT: flat_store_byte v[2:3], v118 offset:185 +; ALIGNED-NEXT: flat_store_byte v[2:3], v41 offset:191 +; ALIGNED-NEXT: flat_store_byte v[2:3], v43 offset:189 +; ALIGNED-NEXT: flat_store_byte v[2:3], v42 offset:190 +; ALIGNED-NEXT: flat_store_byte v[2:3], v44 offset:188 +; ALIGNED-NEXT: flat_store_byte v[2:3], v119 offset:184 +; ALIGNED-NEXT: flat_store_byte v[2:3], v47 offset:178 +; ALIGNED-NEXT: flat_store_byte v[2:3], v56 offset:179 +; ALIGNED-NEXT: flat_store_byte v[2:3], v59 offset:177 +; ALIGNED-NEXT: flat_store_byte v[2:3], v46 offset:183 +; ALIGNED-NEXT: flat_store_byte v[2:3], v57 offset:181 +; ALIGNED-NEXT: flat_store_byte v[2:3], v58 offset:182 +; ALIGNED-NEXT: flat_store_byte v[2:3], v60 offset:180 +; ALIGNED-NEXT: flat_store_byte v[2:3], v61 offset:176 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1468 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 -; ALIGNED-NEXT: flat_store_byte v[2:3], v54 offset:213 -; ALIGNED-NEXT: flat_store_byte v[2:3], v52 offset:215 -; ALIGNED-NEXT: flat_store_byte v[2:3], v36 offset:209 -; ALIGNED-NEXT: flat_store_byte v[2:3], v55 offset:211 -; ALIGNED-NEXT: flat_store_byte v[2:3], v37 offset:210 -; ALIGNED-NEXT: flat_store_byte v[2:3], v65 offset:214 -; ALIGNED-NEXT: flat_store_byte v[2:3], v67 offset:212 -; ALIGNED-NEXT: flat_store_byte v[2:3], v49 offset:218 -; ALIGNED-NEXT: flat_store_byte v[2:3], v48 offset:219 -; ALIGNED-NEXT: flat_store_byte v[2:3], v53 offset:217 -; ALIGNED-NEXT: flat_store_byte v[2:3], v39 offset:223 -; ALIGNED-NEXT: flat_store_byte v[2:3], v51 offset:221 -; ALIGNED-NEXT: flat_store_byte v[2:3], v50 offset:222 -; ALIGNED-NEXT: flat_store_byte v[2:3], v64 offset:220 -; ALIGNED-NEXT: flat_store_byte v[2:3], v66 offset:216 -; ALIGNED-NEXT: flat_store_byte v[2:3], v38 offset:208 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1464 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1460 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1456 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 -; ALIGNED-NEXT: flat_store_byte v[2:3], v68 offset:202 -; ALIGNED-NEXT: flat_store_byte v[2:3], v71 offset:203 -; ALIGNED-NEXT: flat_store_byte v[2:3], v69 offset:201 -; ALIGNED-NEXT: flat_store_byte v[2:3], v80 offset:207 -; ALIGNED-NEXT: flat_store_byte v[2:3], v81 offset:205 -; ALIGNED-NEXT: flat_store_byte v[2:3], v82 offset:206 -; ALIGNED-NEXT: flat_store_byte v[2:3], v83 offset:204 -; ALIGNED-NEXT: flat_store_byte v[2:3], v70 offset:200 -; ALIGNED-NEXT: flat_store_byte v[2:3], v87 offset:194 -; ALIGNED-NEXT: flat_store_byte v[2:3], v86 offset:195 -; ALIGNED-NEXT: flat_store_byte v[2:3], v98 offset:193 -; ALIGNED-NEXT: flat_store_byte v[2:3], v85 offset:199 -; ALIGNED-NEXT: flat_store_byte v[2:3], v97 offset:197 -; ALIGNED-NEXT: flat_store_byte v[2:3], v96 offset:198 -; ALIGNED-NEXT: flat_store_byte v[2:3], v99 offset:196 -; ALIGNED-NEXT: flat_store_byte v[2:3], v100 offset:192 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 +; ALIGNED-NEXT: flat_store_byte v[2:3], v63 offset:170 +; ALIGNED-NEXT: flat_store_byte v[2:3], v75 offset:171 +; ALIGNED-NEXT: flat_store_byte v[2:3], v72 offset:169 +; ALIGNED-NEXT: flat_store_byte v[2:3], v77 offset:175 +; ALIGNED-NEXT: flat_store_byte v[2:3], v79 offset:173 +; ALIGNED-NEXT: flat_store_byte v[2:3], v78 offset:174 +; ALIGNED-NEXT: flat_store_byte v[2:3], v89 offset:172 +; ALIGNED-NEXT: flat_store_byte v[2:3], v74 offset:168 +; ALIGNED-NEXT: flat_store_byte v[2:3], v93 offset:162 +; ALIGNED-NEXT: flat_store_byte v[2:3], v92 offset:163 +; ALIGNED-NEXT: flat_store_byte v[2:3], v105 offset:161 +; ALIGNED-NEXT: flat_store_byte v[2:3], v91 offset:167 +; ALIGNED-NEXT: flat_store_byte v[2:3], v95 offset:165 +; ALIGNED-NEXT: flat_store_byte v[2:3], v94 offset:166 +; ALIGNED-NEXT: flat_store_byte v[2:3], v107 offset:164 +; ALIGNED-NEXT: flat_store_byte v[2:3], v108 offset:160 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1444 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1436 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 -; ALIGNED-NEXT: flat_store_byte v[2:3], v102 offset:186 -; ALIGNED-NEXT: flat_store_byte v[2:3], v112 offset:187 -; ALIGNED-NEXT: flat_store_byte v[2:3], v103 offset:185 -; ALIGNED-NEXT: flat_store_byte v[2:3], v114 offset:191 -; ALIGNED-NEXT: flat_store_byte v[2:3], v115 offset:189 -; ALIGNED-NEXT: flat_store_byte v[2:3], v116 offset:190 -; ALIGNED-NEXT: flat_store_byte v[2:3], v117 offset:188 -; ALIGNED-NEXT: flat_store_byte v[2:3], v113 offset:184 -; ALIGNED-NEXT: flat_store_byte v[2:3], v119 offset:178 -; ALIGNED-NEXT: flat_store_byte v[2:3], v40 offset:179 -; ALIGNED-NEXT: flat_store_byte v[2:3], v44 offset:177 -; ALIGNED-NEXT: flat_store_byte v[2:3], v118 offset:183 -; ALIGNED-NEXT: flat_store_byte v[2:3], v41 offset:181 -; ALIGNED-NEXT: flat_store_byte v[2:3], v42 offset:182 -; ALIGNED-NEXT: flat_store_byte v[2:3], v45 offset:180 -; ALIGNED-NEXT: flat_store_byte v[2:3], v46 offset:176 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 +; ALIGNED-NEXT: flat_store_byte v[2:3], v111 offset:154 +; ALIGNED-NEXT: flat_store_byte v[2:3], v124 offset:155 +; ALIGNED-NEXT: flat_store_byte v[2:3], v121 offset:153 +; ALIGNED-NEXT: flat_store_byte v[2:3], v126 offset:159 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1416 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:157 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1420 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 -; ALIGNED-NEXT: flat_store_byte v[2:3], v47 offset:170 -; ALIGNED-NEXT: flat_store_byte v[2:3], v58 offset:171 -; ALIGNED-NEXT: flat_store_byte v[2:3], v56 offset:169 -; ALIGNED-NEXT: flat_store_byte v[2:3], v60 offset:175 -; ALIGNED-NEXT: flat_store_byte v[2:3], v61 offset:173 -; ALIGNED-NEXT: flat_store_byte v[2:3], v62 offset:174 -; ALIGNED-NEXT: flat_store_byte v[2:3], v63 offset:172 -; ALIGNED-NEXT: flat_store_byte v[2:3], v59 offset:168 -; ALIGNED-NEXT: flat_store_byte v[2:3], v75 offset:162 -; ALIGNED-NEXT: flat_store_byte v[2:3], v74 offset:163 -; ALIGNED-NEXT: flat_store_byte v[2:3], v79 offset:161 -; ALIGNED-NEXT: flat_store_byte v[2:3], v72 offset:167 -; ALIGNED-NEXT: flat_store_byte v[2:3], v77 offset:165 -; ALIGNED-NEXT: flat_store_byte v[2:3], v76 offset:166 -; ALIGNED-NEXT: flat_store_byte v[2:3], v88 offset:164 -; ALIGNED-NEXT: flat_store_byte v[2:3], v89 offset:160 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1408 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Reload -; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1384 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:158 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 -; ALIGNED-NEXT: flat_store_byte v[2:3], v92 offset:154 -; ALIGNED-NEXT: flat_store_byte v[2:3], v106 offset:155 -; ALIGNED-NEXT: flat_store_byte v[2:3], v94 offset:153 -; ALIGNED-NEXT: flat_store_byte v[2:3], v109 offset:159 -; ALIGNED-NEXT: flat_store_byte v[2:3], v111 offset:157 -; ALIGNED-NEXT: flat_store_byte v[2:3], v120 offset:158 -; ALIGNED-NEXT: flat_store_byte v[2:3], v122 offset:156 -; ALIGNED-NEXT: flat_store_byte v[2:3], v105 offset:152 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1356 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:156 +; ALIGNED-NEXT: flat_store_byte v[2:3], v123 offset:152 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1360 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:146 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1348 ; 4-byte Folded Reload @@ -15325,14 +15248,16 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1340 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:145 -; ALIGNED-NEXT: flat_store_byte v[2:3], v125 offset:151 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1368 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:149 +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:151 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1372 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:149 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1376 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:150 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1352 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1356 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:148 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1332 ; 4-byte Folded Reload @@ -15767,7 +15692,7 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:644 -; ALIGNED-NEXT: buffer_store_dword v126, off, s[0:3], s32 offset:640 +; ALIGNED-NEXT: buffer_store_dword v125, off, s[0:3], s32 offset:640 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:26 @@ -15792,11 +15717,11 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:24 -; ALIGNED-NEXT: flat_store_byte v[2:3], v57 offset:18 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1416 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[2:3], v62 offset:18 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1452 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:19 -; ALIGNED-NEXT: flat_store_byte v[2:3], v43 offset:17 +; ALIGNED-NEXT: flat_store_byte v[2:3], v76 offset:17 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:23 @@ -15809,40 +15734,50 @@ define void @memmove_p0_p5_sz2048(ptr addrspace(0) align 1 %dst, ptr addrspace(5 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:20 -; ALIGNED-NEXT: flat_store_byte v[2:3], v78 offset:16 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[2:3], v104 offset:16 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1448 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:664 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1404 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1440 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:668 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1396 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1412 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:660 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1388 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1400 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:656 -; ALIGNED-NEXT: flat_store_byte v[2:3], v90 offset:10 -; ALIGNED-NEXT: flat_store_byte v[2:3], v93 offset:11 -; ALIGNED-NEXT: flat_store_byte v[2:3], v107 offset:13 -; ALIGNED-NEXT: flat_store_byte v[2:3], v91 offset:9 -; ALIGNED-NEXT: flat_store_byte v[2:3], v108 offset:15 -; ALIGNED-NEXT: flat_store_byte v[2:3], v110 offset:14 -; ALIGNED-NEXT: flat_store_byte v[2:3], v121 offset:12 -; ALIGNED-NEXT: flat_store_byte v[2:3], v95 offset:8 -; ALIGNED-NEXT: flat_store_byte v[2:3], v127 offset:2 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1360 ; 4-byte Folded Reload +; ALIGNED-NEXT: flat_store_byte v[2:3], v109 offset:10 +; ALIGNED-NEXT: flat_store_byte v[2:3], v120 offset:11 +; ALIGNED-NEXT: flat_store_byte v[2:3], v127 offset:13 +; ALIGNED-NEXT: flat_store_byte v[2:3], v110 offset:9 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1432 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:15 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1428 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:14 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1424 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:12 +; ALIGNED-NEXT: flat_store_byte v[2:3], v122 offset:8 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1368 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:2 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1352 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:3 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1344 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:1 -; ALIGNED-NEXT: flat_store_byte v[2:3], v124 offset:7 -; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1376 ; 4-byte Folded Reload +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1392 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) -; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:5 +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:7 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1380 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) +; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:5 +; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1384 ; 4-byte Folded Reload +; ALIGNED-NEXT: s_waitcnt vmcnt(0) ; ALIGNED-NEXT: flat_store_byte v[2:3], v0 offset:6 ; ALIGNED-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:1364 ; 4-byte Folded Reload ; ALIGNED-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll index dd5c247f6ef35..14b0729b37302 100644 --- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll +++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll @@ -388,8 +388,8 @@ define void @memmove_p0_p3(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align ; CHECK-NEXT: s_and_saveexec_b32 s7, s4 ; CHECK-NEXT: s_cbranch_execz .LBB2_13 ; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v0 -; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v1, s4 +; CHECK-NEXT: v_add_co_u32 v9, s4, v0, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v1, v4, s4 ; CHECK-NEXT: v_add3_u32 v4, v3, v2, -1 ; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4 @@ -684,8 +684,8 @@ define void @memmove_p0_p5(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align ; CHECK-NEXT: s_and_saveexec_b32 s7, s4 ; CHECK-NEXT: s_cbranch_execz .LBB4_13 ; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v0 -; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v1, s4 +; CHECK-NEXT: v_add_co_u32 v9, s4, v0, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v1, v4, s4 ; CHECK-NEXT: v_add3_u32 v4, v3, v2, -1 ; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4 @@ -1411,8 +1411,8 @@ define void @memmove_p3_p0(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: s_and_saveexec_b32 s7, s4 ; CHECK-NEXT: s_cbranch_execz .LBB10_13 ; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v2, s4 +; CHECK-NEXT: v_add_co_u32 v9, s4, v1, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v2, v4, s4 ; CHECK-NEXT: v_add3_u32 v4, v3, v0, -1 ; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4 @@ -1889,8 +1889,8 @@ define void @memmove_p5_p0(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align ; CHECK-NEXT: s_and_saveexec_b32 s7, s4 ; CHECK-NEXT: s_cbranch_execz .LBB15_13 ; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader -; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v1 -; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v2, s4 +; CHECK-NEXT: v_add_co_u32 v9, s4, v1, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v2, v4, s4 ; CHECK-NEXT: v_add3_u32 v4, v3, v0, -1 ; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1 ; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-lastuse.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-lastuse.ll index bc905fa564f8a..80ea48be0b893 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-lastuse.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-lastuse.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 < %s | FileCheck --check-prefix=GFX12 %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 -mattr=+cumode < %s | FileCheck --check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GFX1250 %s define amdgpu_kernel void @private_last_use_load_0(ptr addrspace(5) %in, ptr addrspace(1) %out) { ; GFX12-LABEL: private_last_use_load_0: @@ -13,6 +14,17 @@ define amdgpu_kernel void @private_last_use_load_0(ptr addrspace(5) %in, ptr add ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-NEXT: s_endpgm +; +; GFX1250-LABEL: private_last_use_load_0: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, off, s2 th:TH_LOAD_LU +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm entry: %val = load i32, ptr addrspace(5) %in, align 4, !amdgpu.last.use !{} store i32 %val, ptr addrspace(1) %out @@ -36,6 +48,20 @@ define amdgpu_kernel void @private_last_use_load_1(ptr addrspace(5) %in, ptr add ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-NEXT: s_endpgm +; +; GFX1250-LABEL: private_last_use_load_1: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: v_mov_b32_e32 v1, v0 +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_mov_b32 s3, 0x3ff +; GFX1250-NEXT: v_and_b32_e64 v1, v1, s3 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, v1, s2 scale_offset th:TH_LOAD_LU +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %val.gep = getelementptr inbounds i32, ptr addrspace(5) %in, i32 %tid @@ -57,6 +83,17 @@ define amdgpu_kernel void @private_last_use_and_volatile_load(ptr addrspace(5) % ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-NEXT: s_endpgm +; +; GFX1250-LABEL: private_last_use_and_volatile_load: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, off, s2 th:TH_LOAD_BYPASS scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm entry: %val = load volatile i32, ptr addrspace(5) %in, align 4, !amdgpu.last.use !{} store i32 %val, ptr addrspace(1) %out @@ -74,6 +111,17 @@ define amdgpu_kernel void @private_last_use_and_nontemporal_load(ptr addrspace(5 ; GFX12-NEXT: s_wait_loadcnt 0x0 ; GFX12-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-NEXT: s_endpgm +; +; GFX1250-LABEL: private_last_use_and_nontemporal_load: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, off, s2 th:TH_LOAD_LU +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm entry: %val = load i32, ptr addrspace(5) %in, align 4, !amdgpu.last.use !{}, !nontemporal !0 store i32 %val, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-nontemporal.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-nontemporal.ll index 2aa4f021c259c..89de17ecbd1e8 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-nontemporal.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-nontemporal.ll @@ -12,6 +12,7 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1100 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX11-CU %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 < %s | FileCheck --check-prefixes=GFX12-WGP %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX12-CU %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GFX1250 %s define amdgpu_kernel void @private_nontemporal_load_0( ; GFX6-LABEL: private_nontemporal_load_0: @@ -201,6 +202,17 @@ define amdgpu_kernel void @private_nontemporal_load_0( ; GFX12-CU-NEXT: s_wait_loadcnt 0x0 ; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_nontemporal_load_0: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, off, s2 th:TH_LOAD_NT +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm ptr addrspace(5) %in, ptr addrspace(1) %out) { entry: %val = load i32, ptr addrspace(5) %in, align 4, !nontemporal !0 @@ -450,6 +462,20 @@ define amdgpu_kernel void @private_nontemporal_load_1( ; GFX12-CU-NEXT: s_wait_loadcnt 0x0 ; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_nontemporal_load_1: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: v_mov_b32_e32 v1, v0 +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_mov_b32 s3, 0x3ff +; GFX1250-NEXT: v_and_b32_e64 v1, v1, s3 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, v1, s2 scale_offset th:TH_LOAD_NT +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm ptr addrspace(5) %in, ptr addrspace(1) %out) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() @@ -627,6 +653,17 @@ define amdgpu_kernel void @private_nontemporal_store_0( ; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1 ; GFX12-CU-NEXT: scratch_store_b32 off, v0, s0 th:TH_STORE_NT ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_nontemporal_store_0: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x8 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_load_b32 s1, s[2:3], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, s1 +; GFX1250-NEXT: scratch_store_b32 off, v0, s0 th:TH_STORE_NT +; GFX1250-NEXT: s_endpgm ptr addrspace(1) %in, ptr addrspace(5) %out) { entry: %val = load i32, ptr addrspace(1) %in, align 4 @@ -846,6 +883,20 @@ define amdgpu_kernel void @private_nontemporal_store_1( ; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1 ; GFX12-CU-NEXT: scratch_store_b32 v1, v0, s0 th:TH_STORE_NT ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_nontemporal_store_1: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x8 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_load_b32 s1, s[2:3], 0x0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s2, 0x3ff +; GFX1250-NEXT: v_and_b32_e64 v1, v0, s2 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, s1 +; GFX1250-NEXT: scratch_store_b32 v1, v0, s0 scale_offset th:TH_STORE_NT +; GFX1250-NEXT: s_endpgm ptr addrspace(1) %in, ptr addrspace(5) %out) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() @@ -1047,6 +1098,17 @@ define amdgpu_kernel void @private_nontemporal_volatile_load( ; GFX12-CU-NEXT: s_wait_loadcnt 0x0 ; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_nontemporal_volatile_load: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, off, s2 th:TH_LOAD_NT scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm ptr addrspace(5) %in, ptr addrspace(1) %out) { entry: %val = load volatile i32, ptr addrspace(5) %in, align 4, !nontemporal !0 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-volatile.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-volatile.ll index df4193969f8a0..7faa0621aa6d0 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-volatile.ll +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-private-volatile.ll @@ -8,6 +8,7 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1100 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX11-CU %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 < %s | FileCheck --check-prefixes=GFX12-WGP %s ; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 -mattr=+cumode < %s | FileCheck --check-prefixes=GFX12-CU %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GFX1250 %s define amdgpu_kernel void @private_volatile_load_0( ; GFX6-LABEL: private_volatile_load_0: @@ -155,6 +156,17 @@ define amdgpu_kernel void @private_volatile_load_0( ; GFX12-CU-NEXT: s_wait_loadcnt 0x0 ; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_volatile_load_0: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, off, s2 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm ptr addrspace(5) %in, ptr addrspace(1) %out) { entry: %val = load volatile i32, ptr addrspace(5) %in, align 4 @@ -340,6 +352,20 @@ define amdgpu_kernel void @private_volatile_load_1( ; GFX12-CU-NEXT: s_wait_loadcnt 0x0 ; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_volatile_load_1: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: v_mov_b32_e32 v1, v0 +; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_mov_b32 s3, 0x3ff +; GFX1250-NEXT: v_and_b32_e64 v1, v1, s3 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v1, v1, s2 scale_offset scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm ptr addrspace(5) %in, ptr addrspace(1) %out) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() @@ -490,6 +516,18 @@ define amdgpu_kernel void @private_volatile_store_0( ; GFX12-CU-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS ; GFX12-CU-NEXT: s_wait_storecnt 0x0 ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_volatile_store_0: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x8 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_load_b32 s1, s[2:3], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, s1 +; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_endpgm ptr addrspace(1) %in, ptr addrspace(5) %out) { entry: %val = load i32, ptr addrspace(1) %in, align 4 @@ -664,6 +702,21 @@ define amdgpu_kernel void @private_volatile_store_1( ; GFX12-CU-NEXT: scratch_store_b32 v1, v0, s0 scope:SCOPE_SYS ; GFX12-CU-NEXT: s_wait_storecnt 0x0 ; GFX12-CU-NEXT: s_endpgm +; +; GFX1250-LABEL: private_volatile_store_1: +; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x8 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_load_b32 s1, s[2:3], 0x0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_mov_b32 s2, 0x3ff +; GFX1250-NEXT: v_and_b32_e64 v1, v0, s2 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, s1 +; GFX1250-NEXT: scratch_store_b32 v1, v0, s0 scale_offset scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_storecnt 0x0 +; GFX1250-NEXT: s_endpgm ptr addrspace(1) %in, ptr addrspace(5) %out) { entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll index 7e3d5c97391e1..baccb4c7d0859 100644 --- a/llvm/test/CodeGen/AMDGPU/mul.ll +++ b/llvm/test/CodeGen/AMDGPU/mul.ll @@ -3221,7 +3221,7 @@ define amdgpu_kernel void @s_mul_i128(ptr addrspace(1) %out, [8 x i32], i128 %a, ; GFX1250-NEXT: s_load_b128 s[12:15], s[4:5], 0x4c ; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_mov_b64 s[4:5], lit64(0xffffffff) +; GFX1250-NEXT: s_mov_b64 s[4:5], 0xffffffff ; GFX1250-NEXT: s_mov_b32 s3, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_mov_b32 s7, s3 diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll index bf8994e005fc5..3d9c2a29cb9c1 100644 --- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll +++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll @@ -815,9 +815,10 @@ define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_lshr_b32 s2, s2, 9 -; SI-NEXT: v_mul_hi_u32 v0, s2, v0 -; SI-NEXT: s_mul_i32 s2, s2, 0xff803fe1 -; SI-NEXT: v_alignbit_b32 v0, v0, s2, 1 +; SI-NEXT: s_mul_i32 s4, s2, 0xff803fe1 +; SI-NEXT: v_mul_hi_u32 v1, s2, v0 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 1 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm @@ -832,7 +833,7 @@ define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { ; VI-NEXT: s_lshr_b32 s0, s0, 9 ; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s0, v0, 0 ; VI-NEXT: s_mov_b64 s[0:1], 0 -; VI-NEXT: v_alignbit_b32 v0, v1, v0, 1 +; VI-NEXT: v_lshrrev_b64 v[0:1], 1, v[0:1] ; VI-NEXT: s_nop 2 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm @@ -844,11 +845,11 @@ define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { ; GFX9-NEXT: s_mov_b32 s3, 0xf000 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: s_lshr_b32 s2, s2, 9 -; GFX9-NEXT: s_mul_hi_u32 s4, s2, 0xff803fe1 -; GFX9-NEXT: s_mul_i32 s2, s2, 0xff803fe1 -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s4, v0, 1 +; GFX9-NEXT: s_mul_hi_u32 s5, s2, 0xff803fe1 +; GFX9-NEXT: s_mul_i32 s4, s2, 0xff803fe1 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[4:5], 1 ; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX9-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll index 6d0aa1e784530..7e4be65898b65 100644 --- a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll +++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll @@ -9,92 +9,65 @@ define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %v ; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4 ; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4 ; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base -; CHECK-NEXT: s_movk_i32 s33, 0x70 -; CHECK-NEXT: s_movk_i32 s34, 0x60 -; CHECK-NEXT: s_or_b32 s44, 0x80, s33 -; CHECK-NEXT: s_mov_b32 s45, s35 -; CHECK-NEXT: s_or_b32 s46, 0x80, s34 -; CHECK-NEXT: s_mov_b32 s47, s35 -; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45 -; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47 ; CHECK-NEXT: s_movk_i32 s34, 0x80 ; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35 +; CHECK-NEXT: v_dual_mov_b32 v20, s34 :: v_dual_mov_b32 v21, s35 ; CHECK-NEXT: s_wait_kmcnt 0x0 ; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41 ; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43 ; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37 ; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39 -; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS -; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21 -; CHECK-NEXT: s_movk_i32 s20, 0x50 ; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29 ; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31 -; CHECK-NEXT: s_wait_alu 0xfffe -; CHECK-NEXT: s_or_b32 s20, 0x80, s20 -; CHECK-NEXT: s_mov_b32 s21, s35 ; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25 ; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27 -; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23 -; CHECK-NEXT: s_wait_alu 0xfffe -; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20 +; CHECK-NEXT: v_dual_mov_b32 v16, s20 :: v_dual_mov_b32 v17, s21 +; CHECK-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23 +; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS +; CHECK-NEXT: s_wait_storecnt 0x0 ; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] offset:112 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[12:15] offset:96 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[16:19] offset:80 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 ; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17 -; CHECK-NEXT: s_or_b32 s16, 0x80, 64 -; CHECK-NEXT: s_mov_b32 s17, s35 -; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13 -; CHECK-NEXT: s_or_b32 s12, 0x80, 48 -; CHECK-NEXT: s_mov_b32 s13, s35 -; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 -; CHECK-NEXT: s_or_b32 s8, 0x80, 32 -; CHECK-NEXT: s_mov_b32 s9, s35 -; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5 -; CHECK-NEXT: s_or_b32 s4, 0x80, 16 -; CHECK-NEXT: s_mov_b32 s5, s35 ; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19 -; CHECK-NEXT: s_wait_alu 0xfffe -; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16 +; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13 ; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15 -; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12 -; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8 -; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4 +; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 ; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 +; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5 ; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7 ; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1 ; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3 -; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[0:3] offset:64 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[4:7] offset:48 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] offset:32 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[12:15] offset:16 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS +; CHECK-NEXT: flat_store_b128 v[20:21], v[16:19] scope:SCOPE_SYS ; CHECK-NEXT: s_wait_storecnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:96 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:112 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:64 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:80 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:32 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:48 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0 -; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS +; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:16 scope:SCOPE_SYS ; CHECK-NEXT: s_wait_loadcnt 0x0 ; CHECK-NEXT: s_endpgm bb: diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index 9f27e1ffd9130..b0651ef53dd1b 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -791,7 +791,7 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) { ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x3f80000000000000) +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x3f80000000000000 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -803,7 +803,7 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) { ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 -; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000) +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x3f80000000000000 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 @@ -851,7 +851,7 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x400000003f800000) +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x400000003f800000 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -863,7 +863,7 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 -; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x400000003f800000 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 @@ -1989,7 +1989,7 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x4040000040800000 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -2001,7 +2001,7 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 -; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x4040000040800000 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 @@ -2907,8 +2907,8 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v6, 0x3ff, v0 -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000) -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], lit64(0x400000003f800000) +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x4040000040800000 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], 0x400000003f800000 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -2920,9 +2920,9 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 -; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x4040000040800000 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], lit64(0x400000003f800000) +; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], 0x400000003f800000 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir index f1f2eb6baf008..c9645c31aad75 100644 --- a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir +++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir @@ -80,3 +80,151 @@ body: | %4:vreg_128 = REG_SEQUENCE %3.sub0, %subreg.sub0, %3.sub1, %subreg.sub1, %3.sub2, %subreg.sub2, %3.sub3, %subreg.sub3 KILL implicit %4 ... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GCN-NEXT: $vgpr0 = COPY [[COPY2]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %3:vgpr_32 = COPY %2.sub0 + $vgpr0 = COPY %3 +... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; GCN-NEXT: $vgpr0 = COPY [[COPY3]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %3:vreg_64 = COPY %2 + %4:vgpr_32 = COPY %3.sub0 + $vgpr0 = COPY %4 +... + +--- +name: copy_av_64_subreg_from_vgpr_reg_sequence +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GCN-LABEL: name: copy_av_64_subreg_from_vgpr_reg_sequence + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]] + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; GCN-NEXT: $vgpr0 = COPY [[COPY3]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %3:av_64_align2 = COPY %2 + %4:vgpr_32 = COPY %3.sub0 + $vgpr0 = COPY %4 +... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 + ; GCN-NEXT: $vgpr0 = COPY [[COPY2]] + %0:vreg_64 = COPY $vgpr0_vgpr1 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0.sub0, %subreg.sub0, %1, %subreg.sub1 + %3:vgpr_32 = COPY %2.sub0 + $vgpr0 = COPY %3 +... + +--- +name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub1, %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1 + ; GCN-NEXT: $vgpr0 = COPY [[COPY2]] + %0:vreg_64 = COPY $vgpr0_vgpr1 + %1:vgpr_32 = COPY $vgpr1 + %2:vreg_64 = REG_SEQUENCE %0.sub1, %subreg.sub0, %1, %subreg.sub1 + %3:vgpr_32 = COPY %2.sub0 + $vgpr0 = COPY %3 +... + +--- +name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GCN-LABEL: name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence + ; GCN: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub1_sub2 + ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 + ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 + ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[COPY4]] + ; GCN-NEXT: $vgpr2_vgpr3 = COPY [[COPY5]] + ; GCN-NEXT: $vgpr4_vgpr5 = COPY [[COPY6]] + ; GCN-NEXT: $vgpr6 = COPY [[COPY7]] + ; GCN-NEXT: $vgpr6 = COPY [[COPY8]] + %0:vgpr_32 = COPY $vgpr0 + %1:vgpr_32 = COPY $vgpr1 + %2:vgpr_32 = COPY $vgpr2 + %3:vgpr_32 = COPY $vgpr3 + %4:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 + %5:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1 + %6:vreg_128 = REG_SEQUENCE %4, %subreg.sub0_sub1, %5, %subreg.sub2_sub3 + %7:vreg_64 = COPY %6.sub0_sub1 + %8:vreg_64 = COPY %6.sub1_sub2 + %9:vreg_64 = COPY %6.sub2_sub3 + %10:vgpr_32 = COPY %6.sub2 + %11:vgpr_32 = COPY %6.sub0 + $vgpr0_vgpr1 = COPY %7 + $vgpr2_vgpr3 = COPY %8 + $vgpr4_vgpr5 = COPY %9 + $vgpr6 = COPY %10 + $vgpr6 = COPY %11 +... diff --git a/llvm/test/CodeGen/AMDGPU/postra-machine-sink-livein-subrange.mir b/llvm/test/CodeGen/AMDGPU/postra-machine-sink-livein-subrange.mir new file mode 100644 index 0000000000000..eb48ff08f1b7c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/postra-machine-sink-livein-subrange.mir @@ -0,0 +1,113 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -run-pass=postra-machine-sink -verify-machineinstrs -o - %s | FileCheck --check-prefixes=GCN %s + +# Test live-in with subrange is updated accordingly in postra-machine-sink. +--- +name: test_postra_machine_sink_livein_update +tracksRegLiveness: true +frameInfo: + adjustsStack: true +stack: + - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4, + stack-id: default, callee-saved-register: '', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +machineFunctionInfo: + scratchRSrcReg: '$sgpr0_sgpr1_sgpr2_sgpr3' + stackPtrOffsetReg: '$sgpr32' +body: | + ; GCN-LABEL: name: test_postra_machine_sink_livein_update + ; GCN: bb.0: + ; GCN-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000) + ; GCN-NEXT: liveins: $sgpr30, $sgpr31, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr40, $sgpr30_sgpr31 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: renamable $vgpr44 = COPY $vgpr13, implicit $exec + ; GCN-NEXT: renamable $vgpr43 = COPY $vgpr12, implicit $exec + ; GCN-NEXT: S_CBRANCH_SCC1 %bb.2, implicit undef $scc + ; GCN-NEXT: S_BRANCH %bb.1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.1: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: liveins: $exec, $sgpr30, $sgpr31, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr40, $sgpr30_sgpr31, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr43_vgpr44:0x000000000000000F + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: renamable $vgpr57 = COPY $vgpr9, implicit $exec + ; GCN-NEXT: renamable $vgpr56 = COPY $vgpr8, implicit $exec + ; GCN-NEXT: renamable $vgpr59 = COPY $vgpr7, implicit $exec + ; GCN-NEXT: renamable $vgpr58 = COPY $vgpr6, implicit $exec + ; GCN-NEXT: renamable $vgpr61 = COPY $vgpr5, implicit $exec + ; GCN-NEXT: renamable $vgpr60 = COPY $vgpr4, implicit $exec + ; GCN-NEXT: renamable $vgpr42 = COPY $vgpr3, implicit $exec + ; GCN-NEXT: renamable $vgpr41 = COPY $vgpr2, implicit $exec + ; GCN-NEXT: renamable $vgpr46 = COPY $vgpr1, implicit $exec + ; GCN-NEXT: renamable $vgpr45 = COPY $vgpr0, implicit $exec + ; GCN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32 + ; GCN-NEXT: renamable $sgpr16_sgpr17 = IMPLICIT_DEF + ; GCN-NEXT: $vgpr40 = SI_SPILL_S32_TO_VGPR $sgpr30, 0, $vgpr40, implicit-def $sgpr30_sgpr31, implicit $sgpr30_sgpr31 + ; GCN-NEXT: $vgpr40 = SI_SPILL_S32_TO_VGPR $sgpr31, 1, $vgpr40, implicit $sgpr30_sgpr31 + ; GCN-NEXT: SI_SPILL_AV64_SAVE killed $vgpr14_vgpr15, %stack.1, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.1, align 4, addrspace 5) + ; GCN-NEXT: SI_SPILL_AV64_SAVE killed $vgpr10_vgpr11, %stack.2, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.2, align 4, addrspace 5) + ; GCN-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr16_sgpr17, 0, csr_amdgpu, implicit-def dead $vgpr0 + ; GCN-NEXT: renamable $vgpr14_vgpr15 = SI_SPILL_AV64_RESTORE %stack.1, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.1, align 4, addrspace 5) + ; GCN-NEXT: renamable $vgpr0_vgpr1 = nofpexcept V_FMA_F64_e64 0, killed $vgpr45_vgpr46, 0, killed $vgpr41_vgpr42, 0, killed $vgpr60_vgpr61, 0, 0, implicit $mode, implicit $exec + ; GCN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32 + ; GCN-NEXT: FLAT_STORE_DWORDX2 killed renamable $vgpr58_vgpr59, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + ; GCN-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.2, align 4, addrspace 5) + ; GCN-NEXT: FLAT_STORE_DWORDX2 killed renamable $vgpr0_vgpr1, killed renamable $vgpr56_vgpr57, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.2: + ; GCN-NEXT: liveins: $vgpr40, $vgpr14_vgpr15:0x000000000000000F, $vgpr43_vgpr44:0x000000000000000F + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: renamable $vgpr0_vgpr1 = V_MOV_B64_PSEUDO 0, implicit $exec + ; GCN-NEXT: FLAT_STORE_DWORDX2 undef renamable $vgpr0_vgpr1, killed renamable $vgpr43_vgpr44, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + ; GCN-NEXT: FLAT_STORE_DWORDX2 killed renamable $vgpr0_vgpr1, killed renamable $vgpr14_vgpr15, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + ; GCN-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31 + bb.0: + successors: %bb.2(0x40000000), %bb.1(0x40000000) + liveins: $sgpr30, $sgpr31, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr40, $sgpr30_sgpr31 + + renamable $vgpr44 = COPY $vgpr13, implicit $exec + renamable $vgpr43 = COPY $vgpr12, implicit $exec + renamable $vgpr57 = COPY $vgpr9, implicit $exec + renamable $vgpr56 = COPY $vgpr8, implicit $exec + renamable $vgpr59 = COPY $vgpr7, implicit $exec + renamable $vgpr58 = COPY $vgpr6, implicit $exec + renamable $vgpr61 = COPY $vgpr5, implicit $exec + renamable $vgpr60 = COPY $vgpr4, implicit $exec + renamable $vgpr42 = COPY $vgpr3, implicit $exec + renamable $vgpr41 = COPY $vgpr2, implicit $exec + renamable $vgpr46 = COPY $vgpr1, implicit $exec + renamable $vgpr45 = COPY $vgpr0, implicit $exec + S_CBRANCH_SCC1 %bb.2, implicit undef $scc + S_BRANCH %bb.1 + + bb.1: + successors: %bb.2(0x80000000) + liveins: $sgpr30, $sgpr31, $vgpr40, $sgpr30_sgpr31, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr41_vgpr42:0x000000000000000F, $vgpr43_vgpr44:0x000000000000000F, $vgpr45_vgpr46:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F + + ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32 + renamable $sgpr16_sgpr17 = IMPLICIT_DEF + $vgpr40 = SI_SPILL_S32_TO_VGPR $sgpr30, 0, $vgpr40, implicit-def $sgpr30_sgpr31, implicit $sgpr30_sgpr31 + $vgpr40 = SI_SPILL_S32_TO_VGPR $sgpr31, 1, $vgpr40, implicit $sgpr30_sgpr31 + SI_SPILL_AV64_SAVE killed $vgpr14_vgpr15, %stack.1, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.1, align 4, addrspace 5) + SI_SPILL_AV64_SAVE killed $vgpr10_vgpr11, %stack.2, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.2, align 4, addrspace 5) + dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr16_sgpr17, 0, csr_amdgpu, implicit-def dead $vgpr0 + renamable $vgpr14_vgpr15 = SI_SPILL_AV64_RESTORE %stack.1, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.1, align 4, addrspace 5) + renamable $vgpr0_vgpr1 = nofpexcept V_FMA_F64_e64 0, killed $vgpr45_vgpr46, 0, killed $vgpr41_vgpr42, 0, killed $vgpr60_vgpr61, 0, 0, implicit $mode, implicit $exec + ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32 + FLAT_STORE_DWORDX2 killed renamable $vgpr58_vgpr59, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.2, align 4, addrspace 5) + FLAT_STORE_DWORDX2 killed renamable $vgpr0_vgpr1, killed renamable $vgpr56_vgpr57, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + + bb.2: + liveins: $vgpr40, $vgpr14_vgpr15:0x000000000000000F, $vgpr43_vgpr44:0x000000000000000F + + renamable $vgpr0_vgpr1 = V_MOV_B64_PSEUDO 0, implicit $exec + FLAT_STORE_DWORDX2 undef renamable $vgpr0_vgpr1, killed renamable $vgpr43_vgpr44, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + FLAT_STORE_DWORDX2 killed renamable $vgpr0_vgpr1, killed renamable $vgpr14_vgpr15, 0, 0, implicit $exec, implicit $flat_scr :: (store (s64)) + S_SETPC_B64_return undef $sgpr30_sgpr31 +... diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll index f5e136a80b4a8..b717f85e179b3 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll @@ -337,8 +337,7 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) ; GFX942-NEXT: .p2align 8 ; GFX942-NEXT: ; %bb.2: ; GFX942-NEXT: .LBB8_0: -; GFX942-NEXT: s_mov_b32 s4, 8 -; GFX942-NEXT: s_load_dword s0, s[0:1], s4 offset:0x2 +; GFX942-NEXT: s_load_dword s0, s[0:1], 0xa ; GFX942-NEXT: v_mov_b32_e32 v0, 0 ; GFX942-NEXT: s_waitcnt lgkmcnt(0) ; GFX942-NEXT: v_mov_b32_e32 v1, s0 @@ -353,8 +352,7 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) ; GFX90a-NEXT: .p2align 8 ; GFX90a-NEXT: ; %bb.2: ; GFX90a-NEXT: .LBB8_0: -; GFX90a-NEXT: s_mov_b32 s0, 8 -; GFX90a-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2 +; GFX90a-NEXT: s_load_dword s0, s[4:5], 0xa ; GFX90a-NEXT: v_mov_b32_e32 v0, 0 ; GFX90a-NEXT: s_waitcnt lgkmcnt(0) ; GFX90a-NEXT: v_mov_b32_e32 v1, s0 diff --git a/llvm/test/CodeGen/AMDGPU/preserve-hi16.ll b/llvm/test/CodeGen/AMDGPU/preserve-hi16.ll index 79910af5c0434..93f4ea37117ba 100644 --- a/llvm/test/CodeGen/AMDGPU/preserve-hi16.ll +++ b/llvm/test/CodeGen/AMDGPU/preserve-hi16.ll @@ -929,9 +929,8 @@ define i32 @zext_fptrunc_fma_f16(float %x, float %y, float %z) { ; GFX11-TRUE16-LABEL: zext_fptrunc_fma_f16: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_fma_mixlo_f16 v1, v0, v1, v2 +; GFX11-TRUE16-NEXT: v_fma_mixlo_f16 v0, v0, v1, v2 ; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, 0 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: zext_fptrunc_fma_f16: diff --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll index c572185e7bbf6..4ea58a5890d35 100644 --- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll @@ -619,8 +619,7 @@ define i32 @atomicrmw_dec_private_i32(ptr addrspace(5) %ptr) { ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_add_i32_e32 v2, vcc, -1, v1 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GCN-NEXT: v_subrev_i32_e32 v2, vcc, 1, v1 ; GCN-NEXT: v_cmp_lt_u32_e64 s[4:5], 4, v1 ; GCN-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; GCN-NEXT: v_cndmask_b32_e64 v2, v2, 4, s[4:5] diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll index 83c521043025c..85a9aba1a0e51 100644 --- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll @@ -377,63 +377,63 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX8-NEXT: v_mov_b32_e32 v10, 0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GFX8-NEXT: v_mov_b32_e32 v11, 0 -; GFX8-NEXT: s_movk_i32 s0, 0x7f +; GFX8-NEXT: v_mov_b32_e32 v13, 0x7f ; GFX8-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX8-NEXT: ; =>This Loop Header: Depth=1 ; GFX8-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX8-NEXT: v_mov_b32_e32 v3, v1 +; GFX8-NEXT: s_mov_b32 s0, 0 ; GFX8-NEXT: v_mov_b32_e32 v2, v0 -; GFX8-NEXT: s_mov_b32 s1, 0 ; GFX8-NEXT: .LBB1_2: ; %for.body ; GFX8-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX8-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffb000, v2 ; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[13:14], v[4:5] +; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[4:5] ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffb800, v2 ; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[15:16], v[6:7] +; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[6:7] ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffc000, v2 ; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[17:18], v[4:5] +; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[4:5] ; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0xffffc800, v2 ; GFX8-NEXT: v_addc_u32_e32 v7, vcc, -1, v3, vcc ; GFX8-NEXT: flat_load_dwordx2 v[6:7], v[6:7] ; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffd000, v2 ; GFX8-NEXT: v_addc_u32_e32 v5, vcc, -1, v3, vcc -; GFX8-NEXT: v_add_u32_e32 v19, vcc, 0xffffd800, v2 -; GFX8-NEXT: v_addc_u32_e32 v20, vcc, -1, v3, vcc -; GFX8-NEXT: v_add_u32_e32 v21, vcc, 0xffffe000, v2 -; GFX8-NEXT: v_addc_u32_e32 v22, vcc, -1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v20, vcc, 0xffffd800, v2 +; GFX8-NEXT: v_addc_u32_e32 v21, vcc, -1, v3, vcc +; GFX8-NEXT: v_add_u32_e32 v22, vcc, 0xffffe000, v2 +; GFX8-NEXT: v_addc_u32_e32 v23, vcc, -1, v3, vcc ; GFX8-NEXT: flat_load_dwordx2 v[8:9], v[4:5] -; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[19:20] -; GFX8-NEXT: s_addk_i32 s1, 0x2000 -; GFX8-NEXT: s_cmp_gt_u32 s1, 0x3fffff +; GFX8-NEXT: flat_load_dwordx2 v[4:5], v[20:21] +; GFX8-NEXT: s_addk_i32 s0, 0x2000 +; GFX8-NEXT: s_cmp_gt_u32 s0, 0x3fffff ; GFX8-NEXT: s_waitcnt vmcnt(5) -; GFX8-NEXT: v_add_u32_e32 v23, vcc, v13, v10 -; GFX8-NEXT: v_addc_u32_e32 v24, vcc, v14, v11, vcc +; GFX8-NEXT: v_add_u32_e32 v24, vcc, v14, v10 +; GFX8-NEXT: v_addc_u32_e32 v25, vcc, v15, v11, vcc ; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0xffffe800, v2 ; GFX8-NEXT: v_addc_u32_e32 v11, vcc, -1, v3, vcc -; GFX8-NEXT: v_add_u32_e32 v13, vcc, 0xfffff000, v2 -; GFX8-NEXT: flat_load_dwordx2 v[19:20], v[21:22] +; GFX8-NEXT: v_add_u32_e32 v14, vcc, 0xfffff000, v2 +; GFX8-NEXT: flat_load_dwordx2 v[20:21], v[22:23] ; GFX8-NEXT: flat_load_dwordx2 v[10:11], v[10:11] -; GFX8-NEXT: v_addc_u32_e32 v14, vcc, -1, v3, vcc +; GFX8-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc ; GFX8-NEXT: s_waitcnt vmcnt(6) -; GFX8-NEXT: v_add_u32_e32 v21, vcc, v15, v23 -; GFX8-NEXT: v_addc_u32_e32 v22, vcc, v16, v24, vcc -; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0xfffff800, v2 -; GFX8-NEXT: flat_load_dwordx2 v[13:14], v[13:14] -; GFX8-NEXT: v_addc_u32_e32 v16, vcc, -1, v3, vcc -; GFX8-NEXT: flat_load_dwordx2 v[15:16], v[15:16] +; GFX8-NEXT: v_add_u32_e32 v22, vcc, v16, v24 +; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v17, v25, vcc +; GFX8-NEXT: v_add_u32_e32 v16, vcc, 0xfffff800, v2 +; GFX8-NEXT: flat_load_dwordx2 v[14:15], v[14:15] +; GFX8-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc +; GFX8-NEXT: flat_load_dwordx2 v[16:17], v[16:17] ; GFX8-NEXT: s_waitcnt vmcnt(7) -; GFX8-NEXT: v_add_u32_e32 v21, vcc, v17, v21 -; GFX8-NEXT: v_addc_u32_e32 v22, vcc, v18, v22, vcc -; GFX8-NEXT: flat_load_dwordx2 v[17:18], v[2:3] +; GFX8-NEXT: v_add_u32_e32 v22, vcc, v18, v22 +; GFX8-NEXT: v_addc_u32_e32 v23, vcc, v19, v23, vcc +; GFX8-NEXT: flat_load_dwordx2 v[18:19], v[2:3] ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x10000, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc ; GFX8-NEXT: s_waitcnt vmcnt(7) -; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v21 -; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v22, vcc +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v22 +; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v23, vcc ; GFX8-NEXT: s_waitcnt vmcnt(6) ; GFX8-NEXT: v_add_u32_e32 v6, vcc, v8, v6 ; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v9, v7, vcc @@ -441,30 +441,27 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6 ; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc ; GFX8-NEXT: s_waitcnt vmcnt(4) -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v19, v4 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v20, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v20, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v21, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(3) ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v10, v4 ; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v11, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(2) -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v13, v4 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v14, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v14, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(1) -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v15, v4 -; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v16, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v16, v4 +; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v17, v5, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_add_u32_e32 v10, vcc, v17, v4 -; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v18, v5, vcc +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v18, v4 +; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v19, v5, vcc ; GFX8-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX8-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX8-NEXT: ; in Loop: Header=BB1_1 Depth=1 -; GFX8-NEXT: s_add_i32 s1, s0, -1 -; GFX8-NEXT: s_cmp_eq_u32 s0, 0 -; GFX8-NEXT: s_cbranch_scc1 .LBB1_5 -; GFX8-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1 -; GFX8-NEXT: s_mov_b32 s0, s1 -; GFX8-NEXT: s_branch .LBB1_1 -; GFX8-NEXT: .LBB1_5: ; %while.end +; GFX8-NEXT: v_subrev_u32_e32 v13, vcc, 1, v13 +; GFX8-NEXT: s_and_b64 vcc, exec, vcc +; GFX8-NEXT: s_cbranch_vccz .LBB1_1 +; GFX8-NEXT: ; %bb.4: ; %while.end ; GFX8-NEXT: v_mov_b32_e32 v1, s35 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s34, v12 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc @@ -498,12 +495,11 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX900-NEXT: v_mov_b32_e32 v1, s35 ; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; GFX900-NEXT: s_movk_i32 s0, 0x5000 -; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x5000, v0 ; GFX900-NEXT: v_mov_b32_e32 v4, 0 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc ; GFX900-NEXT: v_mov_b32_e32 v5, 0 -; GFX900-NEXT: s_movk_i32 s5, 0x7f +; GFX900-NEXT: v_mov_b32_e32 v7, 0x7f ; GFX900-NEXT: s_movk_i32 s2, 0xd000 ; GFX900-NEXT: s_movk_i32 s3, 0xe000 ; GFX900-NEXT: s_movk_i32 s4, 0xf000 @@ -511,77 +507,74 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX900-NEXT: ; =>This Loop Header: Depth=1 ; GFX900-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX900-NEXT: v_mov_b32_e32 v3, v1 +; GFX900-NEXT: s_mov_b32 s5, 0 ; GFX900-NEXT: v_mov_b32_e32 v2, v0 -; GFX900-NEXT: s_mov_b32 s6, 0 ; GFX900-NEXT: .LBB1_2: ; %for.body ; GFX900-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX900-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, 0xffffb000, v2 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, -1, v3, vcc -; GFX900-NEXT: global_load_dwordx2 v[9:10], v[2:3], off offset:-4096 -; GFX900-NEXT: global_load_dwordx2 v[11:12], v[2:3], off offset:-2048 -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, 0xffffc000, v2 -; GFX900-NEXT: global_load_dwordx2 v[7:8], v[7:8], off -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v3, vcc -; GFX900-NEXT: global_load_dwordx2 v[17:18], v[13:14], off offset:-2048 -; GFX900-NEXT: global_load_dwordx2 v[19:20], v[13:14], off -; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, s2, v2 -; GFX900-NEXT: v_addc_co_u32_e32 v16, vcc, -1, v3, vcc -; GFX900-NEXT: v_add_co_u32_e32 v13, vcc, s3, v2 -; GFX900-NEXT: global_load_dwordx2 v[15:16], v[15:16], off offset:-2048 -; GFX900-NEXT: v_addc_co_u32_e32 v14, vcc, -1, v3, vcc -; GFX900-NEXT: s_addk_i32 s6, 0x2000 -; GFX900-NEXT: s_cmp_gt_u32 s6, 0x3fffff +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, 0xffffb000, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v3, vcc +; GFX900-NEXT: global_load_dwordx2 v[10:11], v[2:3], off offset:-4096 +; GFX900-NEXT: global_load_dwordx2 v[12:13], v[2:3], off offset:-2048 +; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, 0xffffc000, v2 +; GFX900-NEXT: global_load_dwordx2 v[8:9], v[8:9], off +; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc +; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048 +; GFX900-NEXT: global_load_dwordx2 v[20:21], v[14:15], off +; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, s2, v2 +; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, -1, v3, vcc +; GFX900-NEXT: v_add_co_u32_e32 v14, vcc, s3, v2 +; GFX900-NEXT: global_load_dwordx2 v[16:17], v[16:17], off offset:-2048 +; GFX900-NEXT: v_addc_co_u32_e32 v15, vcc, -1, v3, vcc +; GFX900-NEXT: s_addk_i32 s5, 0x2000 +; GFX900-NEXT: s_cmp_gt_u32 s5, 0x3fffff ; GFX900-NEXT: s_waitcnt vmcnt(3) -; GFX900-NEXT: v_add_co_u32_e32 v21, vcc, v7, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v8, v5, vcc -; GFX900-NEXT: global_load_dwordx2 v[7:8], v[13:14], off offset:-4096 +; GFX900-NEXT: v_add_co_u32_e32 v22, vcc, v8, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc +; GFX900-NEXT: global_load_dwordx2 v[8:9], v[14:15], off offset:-4096 ; GFX900-NEXT: s_waitcnt vmcnt(3) -; GFX900-NEXT: v_add_co_u32_e64 v23, s[0:1], v17, v21 -; GFX900-NEXT: v_addc_co_u32_e64 v24, s[0:1], v18, v5, s[0:1] -; GFX900-NEXT: global_load_dwordx2 v[17:18], v[13:14], off offset:-2048 -; GFX900-NEXT: global_load_dwordx2 v[21:22], v[13:14], off +; GFX900-NEXT: v_add_co_u32_e64 v24, s[0:1], v18, v22 +; GFX900-NEXT: v_addc_co_u32_e64 v25, s[0:1], v19, v5, s[0:1] +; GFX900-NEXT: global_load_dwordx2 v[18:19], v[14:15], off offset:-2048 +; GFX900-NEXT: global_load_dwordx2 v[22:23], v[14:15], off ; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, s4, v2 ; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, -1, v3, vcc ; GFX900-NEXT: global_load_dwordx2 v[4:5], v[4:5], off offset:-2048 ; GFX900-NEXT: s_waitcnt vmcnt(5) -; GFX900-NEXT: v_add_co_u32_e32 v19, vcc, v19, v23 -; GFX900-NEXT: global_load_dwordx2 v[13:14], v[2:3], off -; GFX900-NEXT: v_addc_co_u32_e32 v20, vcc, v20, v24, vcc +; GFX900-NEXT: v_add_co_u32_e32 v20, vcc, v20, v24 +; GFX900-NEXT: global_load_dwordx2 v[14:15], v[2:3], off +; GFX900-NEXT: v_addc_co_u32_e32 v21, vcc, v21, v25, vcc ; GFX900-NEXT: v_add_co_u32_e32 v2, vcc, 0x10000, v2 ; GFX900-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX900-NEXT: s_waitcnt vmcnt(5) -; GFX900-NEXT: v_add_co_u32_e32 v15, vcc, v15, v19 -; GFX900-NEXT: v_addc_co_u32_e32 v16, vcc, v16, v20, vcc +; GFX900-NEXT: v_add_co_u32_e32 v16, vcc, v16, v20 +; GFX900-NEXT: v_addc_co_u32_e32 v17, vcc, v17, v21, vcc ; GFX900-NEXT: s_waitcnt vmcnt(4) -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v7, v15 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v8, v16, vcc +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v8, v16 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v9, v17, vcc ; GFX900-NEXT: s_waitcnt vmcnt(3) -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v17, v7 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v18, v8, vcc +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v18, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v19, v9, vcc ; GFX900-NEXT: s_waitcnt vmcnt(2) -; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, v21, v7 -; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, v22, v8, vcc +; GFX900-NEXT: v_add_co_u32_e32 v8, vcc, v22, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v9, vcc, v23, v9, vcc ; GFX900-NEXT: s_waitcnt vmcnt(1) -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v8, vcc -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v10, v5, vcc -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v11, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v12, v5, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v9, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc ; GFX900-NEXT: s_waitcnt vmcnt(0) -; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v13, v4 -; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v14, v5, vcc +; GFX900-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4 +; GFX900-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc ; GFX900-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX900-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX900-NEXT: ; in Loop: Header=BB1_1 Depth=1 -; GFX900-NEXT: s_add_i32 s0, s5, -1 -; GFX900-NEXT: s_cmp_eq_u32 s5, 0 -; GFX900-NEXT: s_cbranch_scc1 .LBB1_5 -; GFX900-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1 -; GFX900-NEXT: s_mov_b32 s5, s0 -; GFX900-NEXT: s_branch .LBB1_1 -; GFX900-NEXT: .LBB1_5: ; %while.end +; GFX900-NEXT: v_subrev_co_u32_e32 v7, vcc, 1, v7 +; GFX900-NEXT: s_and_b64 vcc, exec, vcc +; GFX900-NEXT: s_cbranch_vccz .LBB1_1 +; GFX900-NEXT: ; %bb.4: ; %while.end ; GFX900-NEXT: v_mov_b32_e32 v1, s35 ; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v6 ; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc @@ -612,11 +605,11 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX10-NEXT: v_and_b32_e32 v0, 0xff, v0 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: s_movk_i32 s1, 0x7f +; GFX10-NEXT: v_mov_b32_e32 v7, 0x7f ; GFX10-NEXT: v_and_b32_e32 v6, 0xfe000000, v1 ; GFX10-NEXT: v_lshl_or_b32 v0, v0, 3, v6 -; GFX10-NEXT: v_add_co_u32 v0, s0, v0, s34 -; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, 0, s35, s0 +; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0 ; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0 ; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo ; GFX10-NEXT: .LBB1_1: ; %for.cond.preheader @@ -624,77 +617,74 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX10-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX10-NEXT: v_mov_b32_e32 v5, v1 ; GFX10-NEXT: v_mov_b32_e32 v4, v0 -; GFX10-NEXT: s_mov_b32 s2, 0 +; GFX10-NEXT: s_mov_b32 s1, 0 ; GFX10-NEXT: .LBB1_2: ; %for.body ; GFX10-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v4, 0xffffb800 -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo -; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v4, 0xffffc800 -; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo -; GFX10-NEXT: v_add_co_u32 v13, vcc_lo, v4, 0xffffd800 -; GFX10-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, -1, v5, vcc_lo -; GFX10-NEXT: v_add_co_u32 v17, vcc_lo, v4, 0xffffe800 +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffb800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v10, vcc_lo, v4, 0xffffc800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v14, vcc_lo, v4, 0xffffd800 +; GFX10-NEXT: v_add_co_ci_u32_e32 v15, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v18, vcc_lo, v4, 0xffffe800 ; GFX10-NEXT: s_clause 0x2 -; GFX10-NEXT: global_load_dwordx2 v[11:12], v[7:8], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[15:16], v[9:10], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[19:20], v[13:14], off offset:-2048 -; GFX10-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo -; GFX10-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v4 -; GFX10-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: global_load_dwordx2 v[12:13], v[8:9], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[16:17], v[10:11], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[20:21], v[14:15], off offset:-2048 +; GFX10-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, -1, v5, vcc_lo +; GFX10-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v23, vcc_lo, -1, v5, vcc_lo ; GFX10-NEXT: s_clause 0x7 -; GFX10-NEXT: global_load_dwordx2 v[23:24], v[17:18], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[7:8], v[7:8], off -; GFX10-NEXT: global_load_dwordx2 v[9:10], v[9:10], off -; GFX10-NEXT: global_load_dwordx2 v[13:14], v[13:14], off -; GFX10-NEXT: global_load_dwordx2 v[25:26], v[17:18], off -; GFX10-NEXT: global_load_dwordx2 v[27:28], v[21:22], off -; GFX10-NEXT: global_load_dwordx2 v[29:30], v[4:5], off offset:-2048 -; GFX10-NEXT: global_load_dwordx2 v[31:32], v[4:5], off +; GFX10-NEXT: global_load_dwordx2 v[24:25], v[18:19], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[8:9], v[8:9], off +; GFX10-NEXT: global_load_dwordx2 v[10:11], v[10:11], off +; GFX10-NEXT: global_load_dwordx2 v[14:15], v[14:15], off +; GFX10-NEXT: global_load_dwordx2 v[26:27], v[18:19], off +; GFX10-NEXT: global_load_dwordx2 v[28:29], v[22:23], off +; GFX10-NEXT: global_load_dwordx2 v[30:31], v[4:5], off offset:-2048 +; GFX10-NEXT: global_load_dwordx2 v[32:33], v[4:5], off ; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4 ; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo -; GFX10-NEXT: s_addk_i32 s2, 0x2000 -; GFX10-NEXT: s_cmp_gt_u32 s2, 0x3fffff +; GFX10-NEXT: s_addk_i32 s1, 0x2000 +; GFX10-NEXT: s_cmp_gt_u32 s1, 0x3fffff ; GFX10-NEXT: s_waitcnt vmcnt(10) -; GFX10-NEXT: v_add_co_u32 v2, s0, v11, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v12, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v12, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v13, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(6) -; GFX10-NEXT: v_add_co_u32 v2, s0, v7, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v8, v3, s0 -; GFX10-NEXT: v_add_co_u32 v2, s0, v15, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v16, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v8, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v9, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v16, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v17, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(5) -; GFX10-NEXT: v_add_co_u32 v2, s0, v9, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v10, v3, s0 -; GFX10-NEXT: v_add_co_u32 v2, s0, v19, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v20, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v10, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v11, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v20, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v21, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(4) -; GFX10-NEXT: v_add_co_u32 v2, s0, v13, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v14, v3, s0 -; GFX10-NEXT: v_add_co_u32 v2, s0, v23, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v24, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v14, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v15, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v24, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v25, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(3) -; GFX10-NEXT: v_add_co_u32 v2, s0, v25, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v26, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v26, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v27, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(2) -; GFX10-NEXT: v_add_co_u32 v2, s0, v27, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v28, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v28, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v29, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(1) -; GFX10-NEXT: v_add_co_u32 v2, s0, v29, v2 -; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v30, v3, s0 +; GFX10-NEXT: v_add_co_u32 v2, s0, v30, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s0, v31, v3, s0 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v31, v2 -; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v32, v3, vcc_lo +; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v32, v2 +; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v33, v3, vcc_lo ; GFX10-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX10-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX10-NEXT: ; in Loop: Header=BB1_1 Depth=1 -; GFX10-NEXT: s_add_i32 s0, s1, -1 -; GFX10-NEXT: s_cmp_eq_u32 s1, 0 -; GFX10-NEXT: s_cbranch_scc1 .LBB1_5 -; GFX10-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1 -; GFX10-NEXT: s_mov_b32 s1, s0 -; GFX10-NEXT: s_branch .LBB1_1 -; GFX10-NEXT: .LBB1_5: ; %while.end +; GFX10-NEXT: v_sub_co_u32 v7, s0, v7, 1 +; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s0 +; GFX10-NEXT: s_cbranch_vccz .LBB1_1 +; GFX10-NEXT: ; %bb.4: ; %while.end ; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v6 ; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0 ; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off @@ -727,19 +717,18 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX90A-NEXT: v_mov_b32_e32 v2, s35 ; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v2, vcc -; GFX90A-NEXT: s_movk_i32 s0, 0x5000 -; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1 +; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x5000, v1 ; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc ; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0 -; GFX90A-NEXT: s_movk_i32 s3, 0x7f +; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f ; GFX90A-NEXT: s_movk_i32 s0, 0xd000 ; GFX90A-NEXT: s_movk_i32 s1, 0xe000 ; GFX90A-NEXT: s_movk_i32 s2, 0xf000 ; GFX90A-NEXT: .LBB1_1: ; %for.cond.preheader ; GFX90A-NEXT: ; =>This Loop Header: Depth=1 ; GFX90A-NEXT: ; Child Loop BB1_2 Depth 2 +; GFX90A-NEXT: s_mov_b32 s3, 0 ; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[2:3], v[2:3] op_sel:[0,1] -; GFX90A-NEXT: s_mov_b32 s4, 0 ; GFX90A-NEXT: .LBB1_2: ; %for.body ; GFX90A-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2 @@ -766,49 +755,46 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX90A-NEXT: global_load_dwordx2 v[30:31], v[6:7], off ; GFX90A-NEXT: v_add_co_u32_e32 v6, vcc, 0x10000, v6 ; GFX90A-NEXT: v_addc_co_u32_e32 v7, vcc, 0, v7, vcc -; GFX90A-NEXT: s_addk_i32 s4, 0x2000 -; GFX90A-NEXT: s_cmp_gt_u32 s4, 0x3fffff +; GFX90A-NEXT: s_addk_i32 s3, 0x2000 +; GFX90A-NEXT: s_cmp_gt_u32 s3, 0x3fffff ; GFX90A-NEXT: s_waitcnt vmcnt(8) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v12, v4 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v13, v5, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v13, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(7) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v18, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v19, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v18, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v19, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(6) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v20, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v21, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v20, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v21, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(5) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v16, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v17, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v16, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v17, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(4) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v24, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v25, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v24, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v25, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(3) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v26, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v27, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v26, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v27, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(2) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v28, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v29, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v28, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v29, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(1) -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v14, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v15, v4, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v8, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v4, vcc, v9, v4, vcc -; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, v10, v1 -; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v4, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v14, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v8, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v9, v5, vcc +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4 +; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v11, v5, vcc ; GFX90A-NEXT: s_waitcnt vmcnt(0) -; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v1 +; GFX90A-NEXT: v_add_co_u32_e32 v4, vcc, v30, v4 ; GFX90A-NEXT: v_addc_co_u32_e32 v5, vcc, v31, v5, vcc ; GFX90A-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX90A-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX90A-NEXT: ; in Loop: Header=BB1_1 Depth=1 -; GFX90A-NEXT: s_add_i32 s4, s3, -1 -; GFX90A-NEXT: s_cmp_eq_u32 s3, 0 -; GFX90A-NEXT: s_cbranch_scc1 .LBB1_5 -; GFX90A-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1 -; GFX90A-NEXT: s_mov_b32 s3, s4 -; GFX90A-NEXT: s_branch .LBB1_1 -; GFX90A-NEXT: .LBB1_5: ; %while.end +; GFX90A-NEXT: v_subrev_co_u32_e32 v1, vcc, 1, v1 +; GFX90A-NEXT: s_and_b64 vcc, exec, vcc +; GFX90A-NEXT: s_cbranch_vccz .LBB1_1 +; GFX90A-NEXT: ; %bb.4: ; %while.end ; GFX90A-NEXT: v_mov_b32_e32 v1, s35 ; GFX90A-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0 ; GFX90A-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc @@ -828,13 +814,13 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1] ; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 17, v0 ; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v0, 0xff, v0 -; GFX11-NEXT: s_movk_i32 s1, 0x7f -; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_mov_b32_e32 v7, 0x7f +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_and_b32_e32 v6, 0xfe000000, v1 ; GFX11-NEXT: v_lshl_or_b32 v0, v0, 3, v6 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v0, s0, v0, s34 -; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, s35, s0 +; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0 ; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo @@ -843,95 +829,92 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) { ; GFX11-NEXT: ; Child Loop BB1_2 Depth 2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 -; GFX11-NEXT: s_mov_b32 s2, 0 +; GFX11-NEXT: s_mov_b32 s1, 0 ; GFX11-NEXT: .LBB1_2: ; %for.body ; GFX11-NEXT: ; Parent Loop BB1_1 Depth=1 ; GFX11-NEXT: ; => This Inner Loop Header: Depth=2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v4, 0xffffc000 -; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, -1, v5, vcc_lo -; GFX11-NEXT: v_add_co_u32 v9, vcc_lo, 0xffffc000, v4 +; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v4, 0xffffc000 +; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v10, vcc_lo, 0xffffc000, v4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v5, vcc_lo -; GFX11-NEXT: global_load_b64 v[13:14], v[7:8], off offset:-4096 -; GFX11-NEXT: v_add_co_u32 v11, vcc_lo, 0xffffd000, v4 -; GFX11-NEXT: v_add_co_ci_u32_e64 v12, null, -1, v5, vcc_lo -; GFX11-NEXT: v_add_co_u32 v15, vcc_lo, v4, 0xffffe000 -; GFX11-NEXT: global_load_b64 v[9:10], v[9:10], off offset:-2048 -; GFX11-NEXT: v_add_co_ci_u32_e64 v16, null, -1, v5, vcc_lo -; GFX11-NEXT: global_load_b64 v[11:12], v[11:12], off offset:-2048 -; GFX11-NEXT: v_add_co_u32 v17, vcc_lo, 0xffffe000, v4 +; GFX11-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v5, vcc_lo +; GFX11-NEXT: global_load_b64 v[14:15], v[8:9], off offset:-4096 +; GFX11-NEXT: v_add_co_u32 v12, vcc_lo, 0xffffd000, v4 +; GFX11-NEXT: v_add_co_ci_u32_e64 v13, null, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v4, 0xffffe000 +; GFX11-NEXT: global_load_b64 v[10:11], v[10:11], off offset:-2048 +; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, -1, v5, vcc_lo +; GFX11-NEXT: global_load_b64 v[12:13], v[12:13], off offset:-2048 +; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, 0xffffe000, v4 ; GFX11-NEXT: s_clause 0x1 -; GFX11-NEXT: global_load_b64 v[19:20], v[15:16], off offset:-4096 -; GFX11-NEXT: global_load_b64 v[7:8], v[7:8], off -; GFX11-NEXT: v_add_co_ci_u32_e64 v18, null, -1, v5, vcc_lo -; GFX11-NEXT: v_add_co_u32 v21, vcc_lo, 0xfffff000, v4 +; GFX11-NEXT: global_load_b64 v[20:21], v[16:17], off offset:-4096 +; GFX11-NEXT: global_load_b64 v[8:9], v[8:9], off +; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, 0xfffff000, v4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_add_co_ci_u32_e64 v22, null, -1, v5, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, -1, v5, vcc_lo ; GFX11-NEXT: s_clause 0x5 -; GFX11-NEXT: global_load_b64 v[17:18], v[17:18], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[15:16], v[15:16], off -; GFX11-NEXT: global_load_b64 v[21:22], v[21:22], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[23:24], v[4:5], off offset:-4096 -; GFX11-NEXT: global_load_b64 v[25:26], v[4:5], off offset:-2048 -; GFX11-NEXT: global_load_b64 v[27:28], v[4:5], off +; GFX11-NEXT: global_load_b64 v[18:19], v[18:19], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[16:17], v[16:17], off +; GFX11-NEXT: global_load_b64 v[22:23], v[22:23], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[24:25], v[4:5], off offset:-4096 +; GFX11-NEXT: global_load_b64 v[26:27], v[4:5], off offset:-2048 +; GFX11-NEXT: global_load_b64 v[28:29], v[4:5], off ; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, 0x10000, v4 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v5, vcc_lo -; GFX11-NEXT: s_addk_i32 s2, 0x2000 -; GFX11-NEXT: s_cmp_gt_u32 s2, 0x3fffff +; GFX11-NEXT: s_addk_i32 s1, 0x2000 +; GFX11-NEXT: s_cmp_gt_u32 s1, 0x3fffff ; GFX11-NEXT: s_waitcnt vmcnt(10) -; GFX11-NEXT: v_add_co_u32 v2, s0, v13, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v14, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v14, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v15, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(9) -; GFX11-NEXT: v_add_co_u32 v2, s0, v9, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v10, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v10, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v11, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(6) -; GFX11-NEXT: v_add_co_u32 v2, s0, v7, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v8, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v8, v3, s0 -; GFX11-NEXT: v_add_co_u32 v2, s0, v11, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v9, v3, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v12, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v12, v3, s0 -; GFX11-NEXT: v_add_co_u32 v2, s0, v19, v2 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v13, v3, s0 +; GFX11-NEXT: v_add_co_u32 v2, s0, v20, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v20, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v21, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(5) -; GFX11-NEXT: v_add_co_u32 v2, s0, v17, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v18, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v18, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v19, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(4) -; GFX11-NEXT: v_add_co_u32 v2, s0, v15, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v16, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v16, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v17, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(3) -; GFX11-NEXT: v_add_co_u32 v2, s0, v21, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v22, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v22, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v23, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(2) -; GFX11-NEXT: v_add_co_u32 v2, s0, v23, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v24, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v24, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v25, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(1) -; GFX11-NEXT: v_add_co_u32 v2, s0, v25, v2 +; GFX11-NEXT: v_add_co_u32 v2, s0, v26, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v26, v3, s0 +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v27, v3, s0 ; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v27, v2 +; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v28, v2 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v28, v3, vcc_lo +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v29, v3, vcc_lo ; GFX11-NEXT: s_cbranch_scc0 .LBB1_2 ; GFX11-NEXT: ; %bb.3: ; %while.cond.loopexit ; GFX11-NEXT: ; in Loop: Header=BB1_1 Depth=1 -; GFX11-NEXT: s_add_i32 s0, s1, -1 -; GFX11-NEXT: s_cmp_eq_u32 s1, 0 -; GFX11-NEXT: s_cbranch_scc1 .LBB1_5 -; GFX11-NEXT: ; %bb.4: ; in Loop: Header=BB1_1 Depth=1 -; GFX11-NEXT: s_mov_b32 s1, s0 -; GFX11-NEXT: s_branch .LBB1_1 -; GFX11-NEXT: .LBB1_5: ; %while.end +; GFX11-NEXT: v_sub_co_u32 v7, s0, v7, 1 +; GFX11-NEXT: s_and_b32 vcc_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_vccz .LBB1_1 +; GFX11-NEXT: ; %bb.4: ; %while.end ; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v6 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0 diff --git a/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll b/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll index e674fafb79d9f..4355495621593 100644 --- a/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll +++ b/llvm/test/CodeGen/AMDGPU/ptr-buffer-alias-scheduling.ll @@ -85,8 +85,8 @@ define amdgpu_kernel void @buffers_from_flat_dont_alias(ptr noalias %a.flat, ptr ; GISEL-NEXT: v_mul_f32_e32 v3, v3, v3 ; GISEL-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 ; GISEL-NEXT: s_endpgm - %a = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %a.flat, i16 0, i32 16, i32 0) - %b = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %b.flat, i16 0, i32 16, i32 0) + %a = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %a.flat, i16 0, i64 16, i32 0) + %b = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %b.flat, i16 0, i64 16, i32 0) %l0 = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) %a, i32 0, i32 0, i32 0) %s0 = fmul float %l0, %l0 @@ -211,4 +211,4 @@ declare i32 @llvm.amdgcn.workitem.id.x() declare float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8), i32, i32, i32) declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32, i32, i32 immarg) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone nocapture, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone nocapture, i16, i64, i32) diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll index ff90f1f175c3c..40f39a24d7a99 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX6,GFX6_PTRADD %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX6,GFX6_LEGACY %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti < %s | FileCheck --check-prefixes=GFX6 %s ; Test PTRADD handling in AMDGPUDAGToDAGISel::SelectMUBUF. @@ -34,7 +33,3 @@ define amdgpu_kernel void @v_add_i32(ptr addrspace(1) %out, ptr addrspace(1) %in store i32 %result, ptr addrspace(1) %out ret void } - -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; GFX6_LEGACY: {{.*}} -; GFX6_PTRADD: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll index 7d3b19e885877..1c986a02e8bd6 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX942,GFX942_PTRADD %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX942,GFX942_LEGACY %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 < %s | FileCheck --check-prefixes=GFX942 %s ; Tests for DAG combines and folds related to the ISD::PTRADD SelectionDAG ; opcode. The RUN lines uses -disable-separate-const-offset-from-gep to disable @@ -24,21 +23,13 @@ define i64 @global_load_ZTwoUses(ptr addrspace(1) %base, i64 %voffset) { } define i64 @global_load_gep_add_reassoc(ptr addrspace(1) %base, i64 %voffset) { -; GFX942_PTRADD-LABEL: global_load_gep_add_reassoc: -; GFX942_PTRADD: ; %bb.0: -; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3] -; GFX942_PTRADD-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24 -; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) -; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31] -; -; GFX942_LEGACY-LABEL: global_load_gep_add_reassoc: -; GFX942_LEGACY: ; %bb.0: -; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[2:3], 0, v[0:1] -; GFX942_LEGACY-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24 -; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) -; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: global_load_gep_add_reassoc: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3] +; GFX942-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24 +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: s_setpc_b64 s[30:31] %add0 = add nuw nsw i64 %voffset, 24 %gep0 = getelementptr nuw inbounds i8, ptr addrspace(1) %base, i64 %add0 %l = load i64, ptr addrspace(1) %gep0, align 8 @@ -221,23 +212,14 @@ define ptr addrspace(1) @shl_neg_offset(ptr addrspace(1) %p, i64 %noffset, i64 % ; Check that offsets are folded into global addresses if possible. For example, ; this is relevant when using --amdgpu-lower-module-lds-strategy=table. define ptr addrspace(1) @complextype_global_gep(i64 %offset) { -; GFX942_PTRADD-LABEL: complextype_global_gep: -; GFX942_PTRADD: ; %bb.0: -; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942_PTRADD-NEXT: s_getpc_b64 s[0:1] -; GFX942_PTRADD-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14 -; GFX942_PTRADD-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22 -; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1] -; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31] -; -; GFX942_LEGACY-LABEL: complextype_global_gep: -; GFX942_LEGACY: ; %bb.0: -; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX942_LEGACY-NEXT: s_getpc_b64 s[0:1] -; GFX942_LEGACY-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14 -; GFX942_LEGACY-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22 -; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1] -; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31] +; GFX942-LABEL: complextype_global_gep: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: s_getpc_b64 s[0:1] +; GFX942-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14 +; GFX942-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22 +; GFX942-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1] +; GFX942-NEXT: s_setpc_b64 s[30:31] %gep0 = getelementptr inbounds %complextype, ptr addrspace(1) @v0, i64 0, i32 1, i64 %offset %gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 2 ret ptr addrspace(1) %gep1 @@ -430,36 +412,20 @@ define ptr @gep_disjoint_or(ptr %base) { ; Check that AssertAlign nodes between ptradd nodes don't block offset folding, ; taken from preload-implicit-kernargs.ll define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) { -; GFX942_PTRADD-LABEL: random_incorrect_offset: -; GFX942_PTRADD: ; %bb.1: -; GFX942_PTRADD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 -; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) -; GFX942_PTRADD-NEXT: s_branch .LBB21_0 -; GFX942_PTRADD-NEXT: .p2align 8 -; GFX942_PTRADD-NEXT: ; %bb.2: -; GFX942_PTRADD-NEXT: .LBB21_0: -; GFX942_PTRADD-NEXT: s_load_dword s0, s[4:5], 0xa -; GFX942_PTRADD-NEXT: v_mov_b32_e32 v0, 0 -; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) -; GFX942_PTRADD-NEXT: v_mov_b32_e32 v1, s0 -; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[8:9] -; GFX942_PTRADD-NEXT: s_endpgm -; -; GFX942_LEGACY-LABEL: random_incorrect_offset: -; GFX942_LEGACY: ; %bb.1: -; GFX942_LEGACY-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 -; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) -; GFX942_LEGACY-NEXT: s_branch .LBB21_0 -; GFX942_LEGACY-NEXT: .p2align 8 -; GFX942_LEGACY-NEXT: ; %bb.2: -; GFX942_LEGACY-NEXT: .LBB21_0: -; GFX942_LEGACY-NEXT: s_mov_b32 s0, 8 -; GFX942_LEGACY-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2 -; GFX942_LEGACY-NEXT: v_mov_b32_e32 v0, 0 -; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) -; GFX942_LEGACY-NEXT: v_mov_b32_e32 v1, s0 -; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[8:9] -; GFX942_LEGACY-NEXT: s_endpgm +; GFX942-LABEL: random_incorrect_offset: +; GFX942: ; %bb.1: +; GFX942-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: s_branch .LBB21_0 +; GFX942-NEXT: .p2align 8 +; GFX942-NEXT: ; %bb.2: +; GFX942-NEXT: .LBB21_0: +; GFX942-NEXT: s_load_dword s0, s[4:5], 0xa +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: v_mov_b32_e32 v1, s0 +; GFX942-NEXT: global_store_dword v0, v1, s[8:9] +; GFX942-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2 %load = load i32, ptr addrspace(4) %gep diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll index 1934ce395e63d..e7c715f0a38bf 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX942,GFX942_PTRADD %s -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX942,GFX942_LEGACY %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel < %s | FileCheck --check-prefixes=GFX942 %s ; Tests for undef and poison DAG folds for the ISD::PTRADD SelectionDAG opcode. ; If any additions are generated for these tests, the folds don't work. @@ -44,6 +43,3 @@ define ptr @undef_base(ptr %p, i64 %offset) { %gep1 = getelementptr i8, ptr undef, i64 %offset ret ptr %gep1 } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; GFX942_LEGACY: {{.*}} -; GFX942_PTRADD: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll index 9dd25025d4381..f4f5a78f0e2b6 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll @@ -1,14 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_PTRADD -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_LEGACY -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_PTRADD -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_LEGACY -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_PTRADD -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_LEGACY -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_PTRADD -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_LEGACY -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_PTRADD -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_LEGACY +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck %s -check-prefixes=GFX8 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck %s -check-prefixes=GFX942 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 < %s | FileCheck %s -check-prefixes=GFX10 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GFX11 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck %s -check-prefixes=GFX12 ; Tests for the ISD::PTRADD SelectionDAG opcode. This only tests 64-bit address ; spaces since PTRADD is currently only used for these. @@ -511,15 +506,3 @@ entry: store i32 %val, ptr addrspace(1) %gep.to, align 4 ret void } - -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; GFX10_LEGACY: {{.*}} -; GFX10_PTRADD: {{.*}} -; GFX11_LEGACY: {{.*}} -; GFX11_PTRADD: {{.*}} -; GFX12_LEGACY: {{.*}} -; GFX12_PTRADD: {{.*}} -; GFX8_LEGACY: {{.*}} -; GFX8_PTRADD: {{.*}} -; GFX942_LEGACY: {{.*}} -; GFX942_PTRADD: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/remat-sop.mir b/llvm/test/CodeGen/AMDGPU/remat-sop.mir index 1da55cf535449..cb652db425421 100644 --- a/llvm/test/CodeGen/AMDGPU/remat-sop.mir +++ b/llvm/test/CodeGen/AMDGPU/remat-sop.mir @@ -91,15 +91,17 @@ body: | bb.0: ; GCN-LABEL: name: test_no_remat_s_mov_b32_vreg_src_short_lr ; GCN: renamable $sgpr0 = IMPLICIT_DEF - ; GCN-NEXT: renamable $sgpr1 = S_MOV_B32 renamable $sgpr0 - ; GCN-NEXT: SI_SPILL_S32_SAVE killed renamable $sgpr1, %stack.1, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.1, addrspace 5) - ; GCN-NEXT: renamable $sgpr1 = S_MOV_B32 renamable $sgpr0 - ; GCN-NEXT: SI_SPILL_S32_SAVE killed renamable $sgpr1, %stack.0, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.0, addrspace 5) + ; GCN-NEXT: renamable $sgpr1 = S_MOV_B32 killed renamable $sgpr0 + ; GCN-NEXT: renamable $sgpr0 = IMPLICIT_DEF ; GCN-NEXT: renamable $sgpr0 = S_MOV_B32 killed renamable $sgpr0 - ; GCN-NEXT: renamable $sgpr1 = SI_SPILL_S32_RESTORE %stack.1, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.1, addrspace 5) - ; GCN-NEXT: S_NOP 0, implicit killed renamable $sgpr1 - ; GCN-NEXT: renamable $sgpr1 = SI_SPILL_S32_RESTORE %stack.0, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.0, addrspace 5) + ; GCN-NEXT: SI_SPILL_S32_SAVE killed renamable $sgpr0, %stack.1, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.1, addrspace 5) + ; GCN-NEXT: renamable $sgpr0 = IMPLICIT_DEF + ; GCN-NEXT: renamable $sgpr0 = S_MOV_B32 killed renamable $sgpr0 + ; GCN-NEXT: SI_SPILL_S32_SAVE killed renamable $sgpr0, %stack.0, implicit $exec, implicit $sgpr32 :: (store (s32) into %stack.0, addrspace 5) ; GCN-NEXT: S_NOP 0, implicit killed renamable $sgpr1 + ; GCN-NEXT: renamable $sgpr0 = SI_SPILL_S32_RESTORE %stack.1, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.1, addrspace 5) + ; GCN-NEXT: S_NOP 0, implicit killed renamable $sgpr0 + ; GCN-NEXT: renamable $sgpr0 = SI_SPILL_S32_RESTORE %stack.0, implicit $exec, implicit $sgpr32 :: (load (s32) from %stack.0, addrspace 5) ; GCN-NEXT: S_NOP 0, implicit killed renamable $sgpr0 ; GCN-NEXT: S_ENDPGM 0 %0:sreg_32 = IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll index 5d5aad76afd09..566eb1e14dc02 100644 --- a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll +++ b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll @@ -7,16 +7,12 @@ @gv.fptr0 = external hidden unnamed_addr addrspace(4) constant ptr, align 4 -; GCN-LABEL: unreachable: -; Function info: -; codeLenInByte = 4 define internal fastcc void @unreachable() { %fptr = load ptr, ptr addrspace(4) @gv.fptr0 call void %fptr() unreachable } - ; GCN-LABEL: entry: ; GCN-NOT: s_swappc_b64 ; GCN: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/s-barrier.ll b/llvm/test/CodeGen/AMDGPU/s-barrier.ll index 8a9beb73a6baa..4c7cef9cc1a0f 100644 --- a/llvm/test/CodeGen/AMDGPU/s-barrier.ll +++ b/llvm/test/CodeGen/AMDGPU/s-barrier.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-SDAG %s -; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12-GISEL %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL %s @bar = internal addrspace(3) global target("amdgcn.named.barrier", 0) poison @bar2 = internal addrspace(3) global target("amdgcn.named.barrier", 0) poison @@ -102,6 +102,7 @@ define amdgpu_kernel void @kernel1(ptr addrspace(1) %out, ptr addrspace(3) %in) ; GFX12-SDAG-NEXT: s_mov_b32 m0, 2 ; GFX12-SDAG-NEXT: s_barrier_signal_isfirst -1 ; GFX12-SDAG-NEXT: s_barrier_wait 1 +; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX12-SDAG-NEXT: s_barrier_leave ; GFX12-SDAG-NEXT: s_get_barrier_state s3, m0 ; GFX12-SDAG-NEXT: s_mov_b32 m0, s2 @@ -155,10 +156,11 @@ define amdgpu_kernel void @kernel1(ptr addrspace(1) %out, ptr addrspace(3) %in) ; GFX12-GISEL-NEXT: s_barrier_signal -1 ; GFX12-GISEL-NEXT: s_barrier_join m0 ; GFX12-GISEL-NEXT: s_barrier_signal_isfirst -1 -; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX12-GISEL-NEXT: s_add_co_u32 s8, s12, 48 ; GFX12-GISEL-NEXT: s_barrier_wait 1 +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX12-GISEL-NEXT: s_barrier_leave +; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-NEXT: s_add_co_u32 s8, s12, 48 ; GFX12-GISEL-NEXT: s_get_barrier_state s0, 2 ; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX12-GISEL-NEXT: s_get_barrier_state s0, m0 @@ -256,6 +258,25 @@ define amdgpu_kernel void @kernel2(ptr addrspace(1) %out, ptr addrspace(3) %in) ret void } +define amdgpu_ps void @test_barrier_leave_write_to_scc(i32 inreg %val, ptr addrspace(1) %out) { +; GFX12-LABEL: test_barrier_leave_write_to_scc: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_barrier_leave +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: s_cmp_lg_u32 s0, 0 +; GFX12-NEXT: s_movk_i32 s0, 0x7b +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_cselect_b32 s0, s0, 0x1c8 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + call void @llvm.amdgcn.s.barrier.leave(i16 1) + %cmp = icmp ne i32 %val, 0 + %ret = select i1 %cmp, i32 123, i32 456 + store i32 %ret, ptr addrspace(1) %out + ret void +} + declare void @llvm.amdgcn.s.barrier() #1 declare void @llvm.amdgcn.s.barrier.wait(i16) #1 declare void @llvm.amdgcn.s.barrier.signal(i32) #1 diff --git a/llvm/test/CodeGen/AMDGPU/s-cluster-barrier.ll b/llvm/test/CodeGen/AMDGPU/s-cluster-barrier.ll new file mode 100644 index 0000000000000..dc2e09dda2193 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/s-cluster-barrier.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG %s +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX12,GFX12-ISEL %s + +define amdgpu_kernel void @kernel1() #0 { +; GFX12-LABEL: kernel1: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_cmp_eq_u32 0, 0 +; GFX12-NEXT: s_barrier_signal_isfirst -1 +; GFX12-NEXT: s_barrier_wait -1 +; GFX12-NEXT: s_cselect_b32 s0, -1, 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_vccnz .LBB0_2 +; GFX12-NEXT: ; %bb.1: +; GFX12-NEXT: s_barrier_signal -3 +; GFX12-NEXT: .LBB0_2: +; GFX12-NEXT: s_barrier_wait -3 +; GFX12-NEXT: s_get_barrier_state s0, -3 +; GFX12-NEXT: s_endpgm + call void @llvm.amdgcn.s.cluster.barrier() + %state3 = call i32 @llvm.amdgcn.s.get.barrier.state(i32 -3) + ret void +} + +declare void @llvm.amdgcn.s.cluster.barrier() #1 +declare i32 @llvm.amdgcn.s.get.barrier.state(i32) #1 + +attributes #0 = { nounwind } +attributes #1 = { convergent nounwind } +attributes #2 = { nounwind readnone } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GFX12-ISEL: {{.*}} +; GFX12-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll new file mode 100644 index 0000000000000..a828ee0a7883c --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll @@ -0,0 +1,46 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -stop-after=amdgpu-isel < %s | FileCheck -check-prefixes=GCN-ISEL %s +; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s +; Ensure that S_UADDO_PSEUDO is selected when carryout user is S_ADD_CO_PSEUDO + +; GCN-ISEL-LABEL: name: s_uaddo_pseudo +; GCN-ISEL-LABEL: body: +; GCN-ISEL: S_UADDO_PSEUDO +; GCN-ISEL: S_ADD_CO_PSEUDO + +define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) { +; CHECK-LABEL: s_uaddo_pseudo: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_add_u32 s0, s0, 1 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0 +; CHECK-NEXT: s_addc_u32 s0, 1, 0 +; CHECK-NEXT: ; return to shader part epilog + %pair = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %val0, i32 1) + %carryout = extractvalue { i32, i1 } %pair, 1 + %zext_carryout = zext i1 %carryout to i32 + %result = add i32 %zext_carryout, 1 + ret i32 %result +} + +; GCN-ISEL-LABEL: name: s_usubo_pseudo +; GCN-ISEL-LABEL: body: +; GCN-ISEL: S_USUBO_PSEUDO +; GCN-ISEL: S_SUB_CO_PSEUDO + +define amdgpu_ps i32 @s_usubo_pseudo(i32 inreg %val0, i32 inreg %val1) { +; CHECK-LABEL: s_usubo_pseudo: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_sub_u32 s0, s0, 1 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0 +; CHECK-NEXT: s_subb_u32 s0, s1, 0 +; CHECK-NEXT: ; return to shader part epilog + %pair = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %val0, i32 1) + %carryout = extractvalue { i32, i1 } %pair, 1 + %zext_carryout = zext i1 %carryout to i32 + %result = sub i32 %val1, %zext_carryout + ret i32 %result +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; GCN-ISEL: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll index 0b58b328bbfb6..68c33487b0596 100644 --- a/llvm/test/CodeGen/AMDGPU/sad.ll +++ b/llvm/test/CodeGen/AMDGPU/sad.ll @@ -67,9 +67,9 @@ define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, ; GCN-NEXT: s_mov_b32 flat_scratch_lo, s13 ; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 ; GCN-NEXT: v_mov_b32_e32 v1, s2 -; GCN-NEXT: v_sad_u32 v2, s0, v0, v1 +; GCN-NEXT: v_sad_u32 v2, s1, v0, v1 ; GCN-NEXT: v_mov_b32_e32 v0, s4 ; GCN-NEXT: v_mov_b32_e32 v1, s5 ; GCN-NEXT: flat_store_dword v[0:1], v2 @@ -249,10 +249,10 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i ; GCN-NEXT: s_addc_u32 s21, s21, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_sub_i32 s3, s0, s1 -; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 ; GCN-NEXT: v_mov_b32_e32 v1, s2 ; GCN-NEXT: v_mov_b32_e32 v2, s3 -; GCN-NEXT: v_sad_u32 v3, s0, v0, v1 +; GCN-NEXT: v_sad_u32 v3, s1, v0, v1 ; GCN-NEXT: buffer_store_dword v2, v0, s[20:23], 0 offen ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v0, s4 @@ -284,8 +284,8 @@ define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out ; GCN-NEXT: s_add_u32 s20, s20, s17 ; GCN-NEXT: s_addc_u32 s21, s21, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_min_u32 s3, s0, s1 -; GCN-NEXT: s_max_u32 s0, s0, s1 +; GCN-NEXT: s_min_u32 s3, s1, s0 +; GCN-NEXT: s_max_u32 s0, s1, s0 ; GCN-NEXT: s_sub_i32 s0, s0, s3 ; GCN-NEXT: v_mov_b32_e32 v0, s4 ; GCN-NEXT: v_mov_b32_e32 v2, s0 @@ -583,17 +583,17 @@ define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(ptr addrspace(1) % ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x2 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 ; GCN-NEXT: s_add_i32 s12, s12, s17 -; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; GCN-NEXT: s_mov_b32 flat_scratch_lo, s13 +; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_sub_i32 s3, s0, s3 -; GCN-NEXT: s_sub_i32 s6, s1, s0 -; GCN-NEXT: s_cmp_lt_u32 s1, s0 -; GCN-NEXT: s_cselect_b32 s0, s3, s6 -; GCN-NEXT: s_add_i32 s0, s0, s2 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_sub_i32 s0, s0, s3 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: v_sub_i32_e32 v0, vcc, s1, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GCN-NEXT: v_add_i32_e32 v2, vcc, s2, v0 ; GCN-NEXT: v_mov_b32_e32 v0, s4 ; GCN-NEXT: v_mov_b32_e32 v1, s5 -; GCN-NEXT: v_mov_b32_e32 v2, s0 ; GCN-NEXT: flat_store_dword v[0:1], v2 ; GCN-NEXT: s_endpgm %icmp0 = icmp ugt i32 %a, %b diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll index 5e76c7d7c734f..697bcc3b8fb47 100644 --- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -6,8 +6,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GCN-LABEL: s_test_sdiv: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_ashr_i32 s8, s1, 31 ; GCN-NEXT: s_add_u32 s0, s0, s8 @@ -16,126 +17,158 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GCN-NEXT: s_xor_b64 s[10:11], s[0:1], s[8:9] ; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10 ; GCN-NEXT: v_cvt_f32_u32_e32 v1, s11 -; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_sub_u32 s4, 0, s10 -; GCN-NEXT: s_subb_u32 s5, 0, s11 +; GCN-NEXT: s_sub_u32 s12, 0, s10 +; GCN-NEXT: s_subb_u32 s13, 0, s11 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 -; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_ashr_i32 s12, s3, 31 -; GCN-NEXT: s_add_u32 s2, s2, s12 -; GCN-NEXT: s_mov_b32 s13, s12 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: s_addc_u32 s3, s3, s12 -; GCN-NEXT: s_xor_b64 s[2:3], s[2:3], s[12:13] -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s5, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_lo_u32 v6, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v6 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v4, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s5, v0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s2, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s2, v0 -; GCN-NEXT: v_mul_hi_u32 v4, s2, v1 -; GCN-NEXT: v_mul_hi_u32 v5, s3, v1 -; GCN-NEXT: v_mul_lo_u32 v1, s3, v1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_mul_lo_u32 v4, s3, v0 -; GCN-NEXT: v_mul_hi_u32 v0, s3, v0 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s10, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s10, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s11, v0 -; GCN-NEXT: v_mov_b32_e32 v5, s11 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s10, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v4, v2 -; GCN-NEXT: v_sub_i32_e32 v4, vcc, s3, v2 -; GCN-NEXT: v_sub_i32_e32 v3, vcc, s2, v3 -; GCN-NEXT: v_subb_u32_e64 v4, s[0:1], v4, v5, vcc -; GCN-NEXT: v_subrev_i32_e64 v5, s[0:1], s10, v3 -; GCN-NEXT: v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v4 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v5 -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1] -; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v4 -; GCN-NEXT: v_cndmask_b32_e64 v4, v6, v5, s[0:1] -; GCN-NEXT: v_add_i32_e64 v5, s[0:1], 1, v0 -; GCN-NEXT: v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1] -; GCN-NEXT: v_add_i32_e64 v7, s[0:1], 2, v0 -; GCN-NEXT: v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GCN-NEXT: v_cndmask_b32_e64 v4, v5, v7, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, v6, v8, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v6, s3 -; GCN-NEXT: v_subb_u32_e32 v2, vcc, v6, v2, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s11, v2 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s10, v3 -; GCN-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s11, v2 -; GCN-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc -; GCN-NEXT: s_xor_b64 s[0:1], s[12:13], s[8:9] -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; GCN-NEXT: v_xor_b32_e32 v0, s0, v0 -; GCN-NEXT: v_xor_b32_e32 v1, s1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s12, v0 +; GCN-NEXT: v_readfirstlane_b32 s14, v1 +; GCN-NEXT: v_readfirstlane_b32 s0, v0 +; GCN-NEXT: s_mul_i32 s1, s12, s14 +; GCN-NEXT: v_readfirstlane_b32 s17, v2 +; GCN-NEXT: s_mul_i32 s15, s13, s0 +; GCN-NEXT: s_mul_i32 s16, s12, s0 +; GCN-NEXT: s_add_i32 s1, s17, s1 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s16 +; GCN-NEXT: s_add_i32 s1, s1, s15 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s1 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s16 +; GCN-NEXT: v_readfirstlane_b32 s15, v3 +; GCN-NEXT: s_mul_i32 s17, s0, s1 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s1 +; GCN-NEXT: s_add_u32 s15, s15, s17 +; GCN-NEXT: v_readfirstlane_b32 s17, v0 +; GCN-NEXT: s_addc_u32 s17, 0, s17 +; GCN-NEXT: s_mul_i32 s16, s14, s16 +; GCN-NEXT: v_readfirstlane_b32 s18, v4 +; GCN-NEXT: s_add_u32 s15, s15, s16 +; GCN-NEXT: s_addc_u32 s15, s17, s18 +; GCN-NEXT: v_readfirstlane_b32 s16, v1 +; GCN-NEXT: s_addc_u32 s16, s16, 0 +; GCN-NEXT: s_mul_i32 s1, s14, s1 +; GCN-NEXT: s_add_u32 s1, s15, s1 +; GCN-NEXT: s_addc_u32 s15, 0, s16 +; GCN-NEXT: s_add_u32 s16, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s16 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s12, v0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_addc_u32 s14, s14, s15 +; GCN-NEXT: s_mul_i32 s0, s12, s14 +; GCN-NEXT: v_readfirstlane_b32 s1, v0 +; GCN-NEXT: s_add_i32 s0, s1, s0 +; GCN-NEXT: s_mul_i32 s13, s13, s16 +; GCN-NEXT: s_mul_i32 s1, s12, s16 +; GCN-NEXT: s_add_i32 s0, s0, s13 ; GCN-NEXT: v_mov_b32_e32 v2, s1 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mul_hi_u32 v3, s14, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s16, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s14, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s16, v0 +; GCN-NEXT: s_mul_i32 s13, s16, s0 +; GCN-NEXT: v_readfirstlane_b32 s17, v2 +; GCN-NEXT: s_add_u32 s13, s17, s13 +; GCN-NEXT: v_readfirstlane_b32 s15, v0 +; GCN-NEXT: s_mul_i32 s1, s14, s1 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: v_readfirstlane_b32 s12, v3 +; GCN-NEXT: s_add_u32 s1, s13, s1 +; GCN-NEXT: s_addc_u32 s1, s15, s12 +; GCN-NEXT: v_readfirstlane_b32 s12, v1 +; GCN-NEXT: s_addc_u32 s12, s12, 0 +; GCN-NEXT: s_mul_i32 s0, s14, s0 +; GCN-NEXT: s_add_u32 s0, s1, s0 +; GCN-NEXT: s_addc_u32 s12, 0, s12 +; GCN-NEXT: s_add_u32 s15, s16, s0 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_addc_u32 s14, s14, s12 +; GCN-NEXT: s_ashr_i32 s12, s7, 31 +; GCN-NEXT: s_add_u32 s0, s6, s12 +; GCN-NEXT: s_mov_b32 s13, s12 +; GCN-NEXT: s_addc_u32 s1, s7, s12 +; GCN-NEXT: s_xor_b64 s[6:7], s[0:1], s[12:13] +; GCN-NEXT: v_mov_b32_e32 v0, s14 +; GCN-NEXT: v_mul_hi_u32 v1, s6, v0 +; GCN-NEXT: v_mov_b32_e32 v2, s15 +; GCN-NEXT: v_mul_hi_u32 v3, s6, v2 +; GCN-NEXT: s_mov_b32 s0, s4 +; GCN-NEXT: v_readfirstlane_b32 s4, v1 +; GCN-NEXT: v_mul_hi_u32 v1, s7, v2 +; GCN-NEXT: s_mul_i32 s1, s6, s14 +; GCN-NEXT: v_readfirstlane_b32 s16, v3 +; GCN-NEXT: v_mul_hi_u32 v0, s7, v0 +; GCN-NEXT: s_add_u32 s1, s16, s1 +; GCN-NEXT: s_addc_u32 s4, 0, s4 +; GCN-NEXT: s_mul_i32 s15, s7, s15 +; GCN-NEXT: v_readfirstlane_b32 s16, v1 +; GCN-NEXT: s_add_u32 s1, s1, s15 +; GCN-NEXT: s_addc_u32 s1, s4, s16 +; GCN-NEXT: v_readfirstlane_b32 s4, v0 +; GCN-NEXT: s_addc_u32 s4, s4, 0 +; GCN-NEXT: s_mul_i32 s14, s7, s14 +; GCN-NEXT: s_add_u32 s14, s1, s14 +; GCN-NEXT: v_mov_b32_e32 v0, s14 +; GCN-NEXT: v_mul_hi_u32 v0, s10, v0 +; GCN-NEXT: s_addc_u32 s15, 0, s4 +; GCN-NEXT: s_mov_b32 s1, s5 +; GCN-NEXT: s_mul_i32 s4, s10, s15 +; GCN-NEXT: v_readfirstlane_b32 s5, v0 +; GCN-NEXT: s_add_i32 s4, s5, s4 +; GCN-NEXT: s_mul_i32 s5, s11, s14 +; GCN-NEXT: s_add_i32 s16, s4, s5 +; GCN-NEXT: s_sub_i32 s17, s7, s16 +; GCN-NEXT: s_mul_i32 s4, s10, s14 +; GCN-NEXT: s_sub_u32 s6, s6, s4 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s18, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s18, 0 +; GCN-NEXT: s_subb_u32 s17, s17, s11 +; GCN-NEXT: s_sub_u32 s19, s6, s10 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s4, s17, 0 +; GCN-NEXT: s_cmp_ge_u32 s4, s11 +; GCN-NEXT: s_cselect_b32 s5, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s19, s10 +; GCN-NEXT: s_cselect_b32 s17, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s4, s11 +; GCN-NEXT: s_cselect_b32 s4, s17, s5 +; GCN-NEXT: s_add_u32 s5, s14, 1 +; GCN-NEXT: s_addc_u32 s17, s15, 0 +; GCN-NEXT: s_add_u32 s19, s14, 2 +; GCN-NEXT: s_addc_u32 s20, s15, 0 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_cselect_b32 s4, s19, s5 +; GCN-NEXT: s_cselect_b32 s5, s20, s17 +; GCN-NEXT: s_cmp_lg_u32 s18, 0 +; GCN-NEXT: s_subb_u32 s7, s7, s16 +; GCN-NEXT: s_cmp_ge_u32 s7, s11 +; GCN-NEXT: s_cselect_b32 s16, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s6, s10 +; GCN-NEXT: s_cselect_b32 s6, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s7, s11 +; GCN-NEXT: s_cselect_b32 s6, s6, s16 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_cselect_b32 s5, s5, s15 +; GCN-NEXT: s_cselect_b32 s4, s4, s14 +; GCN-NEXT: s_xor_b64 s[6:7], s[12:13], s[8:9] +; GCN-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7] +; GCN-NEXT: s_sub_u32 s4, s4, s6 +; GCN-NEXT: s_subb_u32 s5, s5, s7 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, s5 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_sdiv: @@ -1040,27 +1073,26 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 % ; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: s_sext_i32_i16 s1, s9 -; GCN-NEXT: v_mov_b32_e32 v0, s8 -; GCN-NEXT: v_alignbit_b32 v0, s1, v0, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v1, v0 ; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_sext_i32_i16 s0, s3 -; GCN-NEXT: v_mov_b32_e32 v2, s2 -; GCN-NEXT: v_alignbit_b32 v2, s0, v2, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GCN-NEXT: v_or_b32_e32 v0, 1, v0 -; GCN-NEXT: v_mul_f32_e32 v2, v3, v4 +; GCN-NEXT: s_sext_i32_i16 s9, s9 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_lshr_b64 s[0:1], s[8:9], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v0, s0 +; GCN-NEXT: s_sext_i32_i16 s3, s3 +; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-NEXT: s_xor_b32 s0, s2, s0 +; GCN-NEXT: s_ashr_i32 s0, s0, 30 +; GCN-NEXT: s_or_b32 s2, s0, 1 +; GCN-NEXT: v_mul_f32_e32 v2, v1, v2 ; GCN-NEXT: v_trunc_f32_e32 v2, v2 -; GCN-NEXT: v_mad_f32 v3, -v2, v1, v3 +; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1 ; GCN-NEXT: v_cvt_i32_f32_e32 v2, v2 -; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GCN-NEXT: v_cmp_ge_f32_e64 s[0:1], |v1|, |v0| +; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec +; GCN-NEXT: s_cselect_b32 s0, s2, 0 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v2 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1074,27 +1106,26 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 % ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_mov_b32 s5, s1 -; GCN-IR-NEXT: s_sext_i32_i16 s1, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 -; GCN-IR-NEXT: v_alignbit_b32 v0, s1, v0, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0 ; GCN-IR-NEXT: s_mov_b32 s4, s0 -; GCN-IR-NEXT: s_sext_i32_i16 s0, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s2 -; GCN-IR-NEXT: v_alignbit_b32 v2, s0, v2, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GCN-IR-NEXT: v_or_b32_e32 v0, 1, v0 -; GCN-IR-NEXT: v_mul_f32_e32 v2, v3, v4 +; GCN-IR-NEXT: s_sext_i32_i16 s9, s9 +; GCN-IR-NEXT: s_mov_b32 s5, s1 +; GCN-IR-NEXT: s_lshr_b64 s[0:1], s[8:9], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v0, s0 +; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-IR-NEXT: s_xor_b32 s0, s2, s0 +; GCN-IR-NEXT: s_ashr_i32 s0, s0, 30 +; GCN-IR-NEXT: s_or_b32 s2, s0, 1 +; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2 ; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2 -; GCN-IR-NEXT: v_mad_f32 v3, -v2, v1, v3 +; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1 ; GCN-IR-NEXT: v_cvt_i32_f32_e32 v2, v2 -; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-IR-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GCN-IR-NEXT: v_cmp_ge_f32_e64 s[0:1], |v1|, |v0| +; GCN-IR-NEXT: s_and_b64 s[0:1], s[0:1], exec +; GCN-IR-NEXT: s_cselect_b32 s0, s2, 0 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s0, v2 ; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1111,116 +1142,145 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x) ; GCN-LABEL: s_test_sdiv_k_num_i64: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_ashr_i32 s8, s3, 31 -; GCN-NEXT: s_add_u32 s2, s2, s8 -; GCN-NEXT: s_mov_b32 s9, s8 -; GCN-NEXT: s_addc_u32 s3, s3, s8 -; GCN-NEXT: s_xor_b64 s[2:3], s[2:3], s[8:9] -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GCN-NEXT: s_sub_u32 s4, 0, s2 -; GCN-NEXT: s_subb_u32 s5, 0, s3 +; GCN-NEXT: s_ashr_i32 s4, s3, 31 +; GCN-NEXT: s_add_u32 s2, s2, s4 +; GCN-NEXT: s_mov_b32 s5, s4 +; GCN-NEXT: s_addc_u32 s3, s3, s4 +; GCN-NEXT: s_xor_b64 s[6:7], s[2:3], s[4:5] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GCN-NEXT: s_sub_u32 s2, 0, s6 +; GCN-NEXT: s_subb_u32 s10, 0, s7 +; GCN-NEXT: s_mov_b32 s3, 0xf000 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s5, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v6, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s5, v0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, 24 -; GCN-NEXT: v_mul_hi_u32 v0, v0, 24 -; GCN-NEXT: v_mul_hi_u32 v1, v1, 24 -; GCN-NEXT: v_mov_b32_e32 v4, s3 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v1, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s3, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_mul_hi_u32 v2, s2, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_mul_lo_u32 v2, s2, v0 -; GCN-NEXT: v_sub_i32_e32 v3, vcc, 0, v1 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, 24, v2 -; GCN-NEXT: v_subb_u32_e64 v3, s[0:1], v3, v4, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s2, v2 -; GCN-NEXT: v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v3 -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v4 -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] -; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v3 -; GCN-NEXT: v_cndmask_b32_e64 v3, v5, v4, s[0:1] -; GCN-NEXT: v_add_i32_e64 v4, s[0:1], 1, v0 -; GCN-NEXT: v_addc_u32_e64 v5, s[0:1], 0, 0, s[0:1] -; GCN-NEXT: v_add_i32_e64 v6, s[0:1], 2, v0 -; GCN-NEXT: v_addc_u32_e64 v7, s[0:1], 0, 0, s[0:1] -; GCN-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v3 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s3, v1 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v6, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v4, v5, v7, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v2 -; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s3, v1 -; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v4, vcc -; GCN-NEXT: v_xor_b32_e32 v0, s8, v0 -; GCN-NEXT: v_xor_b32_e32 v1, s8, v1 -; GCN-NEXT: v_mov_b32_e32 v2, s8 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s8, v0 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_readfirstlane_b32 s11, v1 +; GCN-NEXT: v_readfirstlane_b32 s8, v0 +; GCN-NEXT: s_mul_i32 s9, s2, s11 +; GCN-NEXT: v_readfirstlane_b32 s14, v2 +; GCN-NEXT: s_mul_i32 s12, s10, s8 +; GCN-NEXT: s_mul_i32 s13, s2, s8 +; GCN-NEXT: s_add_i32 s9, s14, s9 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s13 +; GCN-NEXT: s_add_i32 s9, s9, s12 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s9 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s13 +; GCN-NEXT: v_readfirstlane_b32 s12, v3 +; GCN-NEXT: s_mul_i32 s15, s8, s9 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s9 +; GCN-NEXT: s_add_u32 s12, s12, s15 +; GCN-NEXT: v_readfirstlane_b32 s15, v0 +; GCN-NEXT: s_mul_i32 s13, s11, s13 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: v_readfirstlane_b32 s14, v4 +; GCN-NEXT: s_add_u32 s12, s12, s13 +; GCN-NEXT: s_addc_u32 s12, s15, s14 +; GCN-NEXT: v_readfirstlane_b32 s13, v1 +; GCN-NEXT: s_addc_u32 s13, s13, 0 +; GCN-NEXT: s_mul_i32 s9, s11, s9 +; GCN-NEXT: s_add_u32 s9, s12, s9 +; GCN-NEXT: s_addc_u32 s12, 0, s13 +; GCN-NEXT: s_add_u32 s13, s8, s9 +; GCN-NEXT: v_mov_b32_e32 v0, s13 +; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s2, v0 +; GCN-NEXT: s_or_b32 s8, s8, s9 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_addc_u32 s11, s11, s12 +; GCN-NEXT: s_mul_i32 s8, s2, s11 +; GCN-NEXT: v_readfirstlane_b32 s9, v0 +; GCN-NEXT: s_add_i32 s8, s9, s8 +; GCN-NEXT: s_mul_i32 s10, s10, s13 +; GCN-NEXT: s_mul_i32 s2, s2, s13 +; GCN-NEXT: s_add_i32 s8, s8, s10 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v0, s8 +; GCN-NEXT: v_mul_hi_u32 v3, s11, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s13, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s11, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s13, v0 +; GCN-NEXT: s_mul_i32 s10, s13, s8 +; GCN-NEXT: v_readfirstlane_b32 s14, v2 +; GCN-NEXT: s_add_u32 s10, s14, s10 +; GCN-NEXT: v_readfirstlane_b32 s12, v0 +; GCN-NEXT: s_mul_i32 s2, s11, s2 +; GCN-NEXT: s_addc_u32 s12, 0, s12 +; GCN-NEXT: v_readfirstlane_b32 s9, v3 +; GCN-NEXT: s_add_u32 s2, s10, s2 +; GCN-NEXT: s_addc_u32 s2, s12, s9 +; GCN-NEXT: v_readfirstlane_b32 s9, v1 +; GCN-NEXT: s_addc_u32 s9, s9, 0 +; GCN-NEXT: s_mul_i32 s8, s11, s8 +; GCN-NEXT: s_add_u32 s2, s2, s8 +; GCN-NEXT: s_addc_u32 s10, 0, s9 +; GCN-NEXT: s_add_u32 s2, s13, s2 +; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GCN-NEXT: s_or_b32 s8, s8, s9 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_addc_u32 s8, s11, s10 +; GCN-NEXT: v_mul_hi_u32 v1, s2, 24 +; GCN-NEXT: v_mul_hi_u32 v0, s8, 24 +; GCN-NEXT: s_mul_i32 s8, s8, 24 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_readfirstlane_b32 s10, v1 +; GCN-NEXT: v_readfirstlane_b32 s9, v0 +; GCN-NEXT: s_add_u32 s8, s10, s8 +; GCN-NEXT: s_addc_u32 s10, 0, s9 +; GCN-NEXT: v_mov_b32_e32 v0, s10 +; GCN-NEXT: v_mul_hi_u32 v0, s6, v0 +; GCN-NEXT: s_mul_i32 s8, s7, s10 +; GCN-NEXT: v_readfirstlane_b32 s9, v0 +; GCN-NEXT: s_add_i32 s11, s9, s8 +; GCN-NEXT: s_sub_i32 s12, 0, s11 +; GCN-NEXT: s_mul_i32 s8, s6, s10 +; GCN-NEXT: s_sub_u32 s13, 24, s8 +; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GCN-NEXT: s_or_b32 s14, s8, s9 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_subb_u32 s12, s12, s7 +; GCN-NEXT: s_sub_u32 s15, s13, s6 +; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GCN-NEXT: s_or_b32 s8, s8, s9 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_subb_u32 s8, s12, 0 +; GCN-NEXT: s_cmp_ge_u32 s8, s7 +; GCN-NEXT: s_cselect_b32 s9, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s15, s6 +; GCN-NEXT: s_cselect_b32 s12, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s8, s7 +; GCN-NEXT: s_cselect_b32 s8, s12, s9 +; GCN-NEXT: s_add_u32 s9, s10, 1 +; GCN-NEXT: s_addc_u32 s12, 0, 0 +; GCN-NEXT: s_add_u32 s15, s10, 2 +; GCN-NEXT: s_addc_u32 s16, 0, 0 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_cselect_b32 s8, s15, s9 +; GCN-NEXT: s_cselect_b32 s9, s16, s12 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_subb_u32 s11, 0, s11 +; GCN-NEXT: s_cmp_ge_u32 s11, s7 +; GCN-NEXT: s_cselect_b32 s12, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s6 +; GCN-NEXT: s_cselect_b32 s6, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s11, s7 +; GCN-NEXT: s_cselect_b32 s6, s6, s12 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_cselect_b32 s7, s9, 0 +; GCN-NEXT: s_cselect_b32 s6, s8, s10 +; GCN-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5] +; GCN-NEXT: s_sub_u32 s6, s6, s4 +; GCN-NEXT: s_subb_u32 s7, s7, s4 +; GCN-NEXT: v_mov_b32_e32 v0, s6 +; GCN-NEXT: v_mov_b32_e32 v1, s7 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_sdiv_k_num_i64: diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll index 92d3277d5d3e3..bb22144b815a1 100644 --- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll +++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll @@ -4148,28 +4148,28 @@ define <2 x half> @mul_select_negk_negfabs_v2f16(<2 x i32> %c, <2 x half> %x, <2 ; -------------------------------------------------------------------------------- define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { -; CI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_add_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_add_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_add_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: ; VI-SAFE: ; %bb.0: @@ -4229,21 +4229,6 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4302,6 +4287,105 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_add_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { +; CI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_sub_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_sub_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_mov_b32_e32 v1, 0xc400 +; VI-NEXT: v_sub_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; VI-NEXT: v_sub_f16_e32 v2, -4.0, v2 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_add_f16 v1, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %add = fadd nsz <2 x half> %x, + %fneg = fneg <2 x half> %add + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> + ret <2 x half> %select +} + define <2 x half> @select_fneg_posk_src_sub_v2f16(<2 x i32> %c, <2 x half> %x) { ; CI-SAFE-LABEL: select_fneg_posk_src_sub_v2f16: ; CI-SAFE: ; %bb.0: @@ -4704,34 +4788,34 @@ define <2 x half> @select_fneg_posk_src_fma_v2f16(<2 x i32> %c, <2 x half> %x, < } define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { -; CI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v3, v3, v5 -; CI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, v2, v4 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_fmad_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_mul_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v3, v3, v5 +; CI-NEXT: v_mul_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_add_f32_e32 v2, v2, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-SAFE: ; %bb.0: @@ -4793,27 +4877,6 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-NSZ-NEXT: v_mul_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, v2, v4 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, v3, v5 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4873,6 +4936,112 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_fmad_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { +; CI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_mul_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_mul_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_sub_f32_e32 v2, v2, v4 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_sub_f32_e32 v3, v3, v5 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_fma_f16 v1, v4, -4.0, -v1 +; VI-NEXT: v_fma_f16 v2, v2, -4.0, -v3 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_fma_f16 v1, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %fmad = call nsz <2 x half> @llvm.fmuladd.v2f16(<2 x half> %x, <2 x half> , <2 x half> %z) + %fneg = fneg <2 x half> %fmad + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> + ret <2 x half> %select +} + declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0 declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 diff --git a/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll index 59a884c829312..760a126afa995 100644 --- a/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll +++ b/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll @@ -281,9 +281,9 @@ define amdgpu_kernel void @v_uextract_bit_31_32_i64(ptr addrspace(1) %out, ptr a ; GCN-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[8:11], 0 addr64 ; GCN-NEXT: s_mov_b64 s[6:7], s[10:11] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_alignbit_b32 v2, v3, v2, 31 -; GCN-NEXT: v_and_b32_e32 v2, 3, v2 +; GCN-NEXT: v_lshr_b64 v[2:3], v[2:3], 31 ; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: v_and_b32_e32 v2, 3, v2 ; GCN-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 ; GCN-NEXT: s_endpgm %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll index f614f58d8e1dc..5944342b2642a 100644 --- a/llvm/test/CodeGen/AMDGPU/srem.ll +++ b/llvm/test/CodeGen/AMDGPU/srem.ll @@ -1491,29 +1491,29 @@ define amdgpu_kernel void @srem_v4i32_4(ptr addrspace(1) %out, ptr addrspace(1) define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GCN-LABEL: srem_i64: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GCN-NEXT: v_mov_b32_e32 v0, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[10:11] +; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[2:3] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_readfirstlane_b32 s7, v1 -; GCN-NEXT: v_readfirstlane_b32 s6, v0 -; GCN-NEXT: v_readfirstlane_b32 s5, v3 -; GCN-NEXT: v_readfirstlane_b32 s4, v2 -; GCN-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5] -; GCN-NEXT: s_mov_b32 s0, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GCN-NEXT: v_readfirstlane_b32 s5, v1 +; GCN-NEXT: v_readfirstlane_b32 s4, v0 +; GCN-NEXT: v_readfirstlane_b32 s3, v3 +; GCN-NEXT: v_readfirstlane_b32 s2, v2 +; GCN-NEXT: s_or_b64 s[6:7], s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: s_cmp_lg_u64 s[6:7], 0 ; GCN-NEXT: s_cbranch_scc0 .LBB8_4 ; GCN-NEXT: ; %bb.1: -; GCN-NEXT: s_ashr_i32 s0, s5, 31 -; GCN-NEXT: s_add_u32 s2, s4, s0 -; GCN-NEXT: s_mov_b32 s1, s0 -; GCN-NEXT: s_addc_u32 s3, s5, s0 -; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[0:1] -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s12 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s13 -; GCN-NEXT: s_sub_u32 s0, 0, s12 -; GCN-NEXT: s_subb_u32 s1, 0, s13 +; GCN-NEXT: s_ashr_i32 s6, s3, 31 +; GCN-NEXT: s_add_u32 s8, s2, s6 +; GCN-NEXT: s_mov_b32 s7, s6 +; GCN-NEXT: s_addc_u32 s9, s3, s6 +; GCN-NEXT: s_xor_b64 s[8:9], s[8:9], s[6:7] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9 +; GCN-NEXT: s_sub_u32 s3, 0, s8 +; GCN-NEXT: s_subb_u32 s12, 0, s9 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -1522,155 +1522,148 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 ; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_readfirstlane_b32 s2, v1 -; GCN-NEXT: v_readfirstlane_b32 s3, v0 -; GCN-NEXT: s_mul_i32 s5, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s15, s0, s3 -; GCN-NEXT: s_mul_i32 s14, s1, s3 -; GCN-NEXT: s_add_i32 s5, s15, s5 -; GCN-NEXT: s_add_i32 s5, s5, s14 -; GCN-NEXT: s_mul_i32 s16, s0, s3 -; GCN-NEXT: s_mul_hi_u32 s14, s3, s5 -; GCN-NEXT: s_mul_i32 s15, s3, s5 -; GCN-NEXT: s_mul_hi_u32 s3, s3, s16 -; GCN-NEXT: s_add_u32 s3, s3, s15 +; GCN-NEXT: v_readfirstlane_b32 s13, v1 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_mul_i32 s11, s3, s13 +; GCN-NEXT: s_mul_hi_u32 s15, s3, s10 +; GCN-NEXT: s_mul_i32 s14, s12, s10 +; GCN-NEXT: s_add_i32 s11, s15, s11 +; GCN-NEXT: s_add_i32 s11, s11, s14 +; GCN-NEXT: s_mul_i32 s16, s3, s10 +; GCN-NEXT: s_mul_i32 s15, s10, s11 +; GCN-NEXT: s_mul_hi_u32 s17, s10, s16 +; GCN-NEXT: s_mul_hi_u32 s14, s10, s11 +; GCN-NEXT: s_add_u32 s15, s17, s15 ; GCN-NEXT: s_addc_u32 s14, 0, s14 -; GCN-NEXT: s_mul_hi_u32 s17, s2, s16 -; GCN-NEXT: s_mul_i32 s16, s2, s16 -; GCN-NEXT: s_add_u32 s3, s3, s16 -; GCN-NEXT: s_mul_hi_u32 s15, s2, s5 -; GCN-NEXT: s_addc_u32 s3, s14, s17 -; GCN-NEXT: s_addc_u32 s14, s15, 0 -; GCN-NEXT: s_mul_i32 s5, s2, s5 -; GCN-NEXT: s_add_u32 s3, s3, s5 -; GCN-NEXT: s_addc_u32 s5, 0, s14 -; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s3, v0 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s5 -; GCN-NEXT: v_readfirstlane_b32 s5, v0 -; GCN-NEXT: s_mul_i32 s3, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s14, s0, s5 -; GCN-NEXT: s_add_i32 s3, s14, s3 -; GCN-NEXT: s_mul_i32 s1, s1, s5 -; GCN-NEXT: s_add_i32 s3, s3, s1 -; GCN-NEXT: s_mul_i32 s0, s0, s5 -; GCN-NEXT: s_mul_hi_u32 s14, s2, s0 -; GCN-NEXT: s_mul_i32 s15, s2, s0 -; GCN-NEXT: s_mul_i32 s17, s5, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s5, s0 -; GCN-NEXT: s_mul_hi_u32 s16, s5, s3 -; GCN-NEXT: s_add_u32 s0, s0, s17 -; GCN-NEXT: s_addc_u32 s5, 0, s16 -; GCN-NEXT: s_add_u32 s0, s0, s15 -; GCN-NEXT: s_mul_hi_u32 s1, s2, s3 -; GCN-NEXT: s_addc_u32 s0, s5, s14 -; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_mul_i32 s3, s2, s3 -; GCN-NEXT: s_add_u32 s0, s0, s3 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s1 -; GCN-NEXT: s_ashr_i32 s14, s7, 31 -; GCN-NEXT: s_add_u32 s0, s6, s14 -; GCN-NEXT: s_mov_b32 s15, s14 -; GCN-NEXT: s_addc_u32 s1, s7, s14 -; GCN-NEXT: s_xor_b64 s[16:17], s[0:1], s[14:15] -; GCN-NEXT: v_readfirstlane_b32 s3, v0 -; GCN-NEXT: s_mul_i32 s1, s16, s2 -; GCN-NEXT: s_mul_hi_u32 s5, s16, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s16, s2 -; GCN-NEXT: s_add_u32 s1, s5, s1 -; GCN-NEXT: s_addc_u32 s0, 0, s0 -; GCN-NEXT: s_mul_hi_u32 s7, s17, s3 -; GCN-NEXT: s_mul_i32 s3, s17, s3 -; GCN-NEXT: s_add_u32 s1, s1, s3 -; GCN-NEXT: s_mul_hi_u32 s5, s17, s2 -; GCN-NEXT: s_addc_u32 s0, s0, s7 -; GCN-NEXT: s_addc_u32 s1, s5, 0 -; GCN-NEXT: s_mul_i32 s2, s17, s2 -; GCN-NEXT: s_add_u32 s0, s0, s2 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: s_mul_i32 s1, s12, s1 -; GCN-NEXT: s_mul_hi_u32 s2, s12, s0 -; GCN-NEXT: s_add_i32 s1, s2, s1 -; GCN-NEXT: s_mul_i32 s2, s13, s0 -; GCN-NEXT: s_mul_i32 s0, s12, s0 -; GCN-NEXT: s_add_i32 s5, s1, s2 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: s_sub_i32 s1, s17, s5 -; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, s16, v0 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_subb_u32 s7, s1, s13 -; GCN-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s12, v0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s15, s7, 0 -; GCN-NEXT: s_cmp_ge_u32 s15, s13 -; GCN-NEXT: s_cselect_b32 s16, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v1 -; GCN-NEXT: s_cmp_eq_u32 s15, s13 -; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[2:3] -; GCN-NEXT: v_mov_b32_e32 v3, s16 -; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[2:3] -; GCN-NEXT: s_subb_u32 s2, s7, s13 -; GCN-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v1 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s2, s2, 0 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2 -; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v2, s15 -; GCN-NEXT: v_mov_b32_e32 v3, s2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GCN-NEXT: s_subb_u32 s0, s17, s5 -; GCN-NEXT: s_cmp_ge_u32 s0, s13 -; GCN-NEXT: s_cselect_b32 s1, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 -; GCN-NEXT: s_cmp_eq_u32 s0, s13 -; GCN-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc -; GCN-NEXT: v_mov_b32_e32 v4, s1 -; GCN-NEXT: s_cselect_b64 vcc, -1, 0 -; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; GCN-NEXT: v_mov_b32_e32 v4, s0 -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GCN-NEXT: v_xor_b32_e32 v0, s14, v0 -; GCN-NEXT: v_xor_b32_e32 v1, s14, v2 -; GCN-NEXT: v_mov_b32_e32 v2, s14 -; GCN-NEXT: v_subrev_co_u32_e32 v0, vcc, s14, v0 -; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc +; GCN-NEXT: s_mul_hi_u32 s18, s13, s16 +; GCN-NEXT: s_mul_i32 s16, s13, s16 +; GCN-NEXT: s_add_u32 s15, s15, s16 +; GCN-NEXT: s_mul_hi_u32 s17, s13, s11 +; GCN-NEXT: s_addc_u32 s14, s14, s18 +; GCN-NEXT: s_addc_u32 s15, s17, 0 +; GCN-NEXT: s_mul_i32 s11, s13, s11 +; GCN-NEXT: s_add_u32 s11, s14, s11 +; GCN-NEXT: s_addc_u32 s14, 0, s15 +; GCN-NEXT: s_add_u32 s15, s10, s11 +; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GCN-NEXT: s_addc_u32 s13, s13, s14 +; GCN-NEXT: s_mul_i32 s10, s3, s13 +; GCN-NEXT: s_mul_hi_u32 s11, s3, s15 +; GCN-NEXT: s_add_i32 s10, s11, s10 +; GCN-NEXT: s_mul_i32 s12, s12, s15 +; GCN-NEXT: s_add_i32 s10, s10, s12 +; GCN-NEXT: s_mul_i32 s3, s3, s15 +; GCN-NEXT: s_mul_hi_u32 s12, s13, s3 +; GCN-NEXT: s_mul_i32 s14, s13, s3 +; GCN-NEXT: s_mul_i32 s17, s15, s10 +; GCN-NEXT: s_mul_hi_u32 s3, s15, s3 +; GCN-NEXT: s_mul_hi_u32 s16, s15, s10 +; GCN-NEXT: s_add_u32 s3, s3, s17 +; GCN-NEXT: s_addc_u32 s16, 0, s16 +; GCN-NEXT: s_add_u32 s3, s3, s14 +; GCN-NEXT: s_mul_hi_u32 s11, s13, s10 +; GCN-NEXT: s_addc_u32 s3, s16, s12 +; GCN-NEXT: s_addc_u32 s11, s11, 0 +; GCN-NEXT: s_mul_i32 s10, s13, s10 +; GCN-NEXT: s_add_u32 s3, s3, s10 +; GCN-NEXT: s_addc_u32 s12, 0, s11 +; GCN-NEXT: s_add_u32 s3, s15, s3 +; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[10:11], 0 +; GCN-NEXT: s_addc_u32 s14, s13, s12 +; GCN-NEXT: s_ashr_i32 s10, s5, 31 +; GCN-NEXT: s_add_u32 s12, s4, s10 +; GCN-NEXT: s_mov_b32 s11, s10 +; GCN-NEXT: s_addc_u32 s13, s5, s10 +; GCN-NEXT: s_xor_b64 s[12:13], s[12:13], s[10:11] +; GCN-NEXT: s_mul_i32 s15, s12, s14 +; GCN-NEXT: s_mul_hi_u32 s16, s12, s3 +; GCN-NEXT: s_mul_hi_u32 s5, s12, s14 +; GCN-NEXT: s_add_u32 s15, s16, s15 +; GCN-NEXT: s_addc_u32 s5, 0, s5 +; GCN-NEXT: s_mul_hi_u32 s17, s13, s3 +; GCN-NEXT: s_mul_i32 s3, s13, s3 +; GCN-NEXT: s_add_u32 s3, s15, s3 +; GCN-NEXT: s_mul_hi_u32 s16, s13, s14 +; GCN-NEXT: s_addc_u32 s3, s5, s17 +; GCN-NEXT: s_addc_u32 s5, s16, 0 +; GCN-NEXT: s_mul_i32 s14, s13, s14 +; GCN-NEXT: s_add_u32 s3, s3, s14 +; GCN-NEXT: s_addc_u32 s5, 0, s5 +; GCN-NEXT: s_mul_i32 s5, s8, s5 +; GCN-NEXT: s_mul_hi_u32 s14, s8, s3 +; GCN-NEXT: s_add_i32 s5, s14, s5 +; GCN-NEXT: s_mul_i32 s14, s9, s3 +; GCN-NEXT: s_add_i32 s5, s5, s14 +; GCN-NEXT: s_sub_i32 s16, s13, s5 +; GCN-NEXT: s_mul_i32 s3, s8, s3 +; GCN-NEXT: s_sub_u32 s3, s12, s3 +; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GCN-NEXT: s_subb_u32 s12, s16, s9 +; GCN-NEXT: s_sub_u32 s18, s3, s8 +; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s19, s12, 0 +; GCN-NEXT: s_cmp_ge_u32 s19, s9 +; GCN-NEXT: s_cselect_b32 s20, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s18, s8 +; GCN-NEXT: s_cselect_b32 s21, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s19, s9 +; GCN-NEXT: s_cselect_b32 s20, s21, s20 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s12, s12, s9 +; GCN-NEXT: s_sub_u32 s21, s18, s8 +; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s12, s12, 0 +; GCN-NEXT: s_cmp_lg_u32 s20, 0 +; GCN-NEXT: s_cselect_b32 s16, s21, s18 +; GCN-NEXT: s_cselect_b32 s12, s12, s19 +; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GCN-NEXT: s_subb_u32 s5, s13, s5 +; GCN-NEXT: s_cmp_ge_u32 s5, s9 +; GCN-NEXT: s_cselect_b32 s13, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s3, s8 +; GCN-NEXT: s_cselect_b32 s8, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s5, s9 +; GCN-NEXT: s_cselect_b32 s8, s8, s13 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_cselect_b32 s9, s12, s5 +; GCN-NEXT: s_cselect_b32 s8, s16, s3 +; GCN-NEXT: s_xor_b64 s[8:9], s[8:9], s[10:11] +; GCN-NEXT: s_sub_u32 s8, s8, s10 +; GCN-NEXT: s_subb_u32 s9, s9, s10 ; GCN-NEXT: s_cbranch_execnz .LBB8_3 ; GCN-NEXT: .LBB8_2: -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GCN-NEXT: s_sub_i32 s0, 0, s4 -; GCN-NEXT: s_mov_b32 s1, 0 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GCN-NEXT: s_sub_i32 s3, 0, s2 +; GCN-NEXT: s_mov_b32 s9, 0 ; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_readfirstlane_b32 s2, v0 -; GCN-NEXT: s_mul_i32 s0, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s0, s2, s0 -; GCN-NEXT: s_add_i32 s2, s2, s0 -; GCN-NEXT: s_mul_hi_u32 s0, s6, s2 -; GCN-NEXT: s_mul_i32 s0, s0, s4 -; GCN-NEXT: s_sub_i32 s0, s6, s0 -; GCN-NEXT: s_sub_i32 s2, s0, s4 -; GCN-NEXT: s_cmp_ge_u32 s0, s4 -; GCN-NEXT: s_cselect_b32 s0, s2, s0 -; GCN-NEXT: s_sub_i32 s2, s0, s4 -; GCN-NEXT: s_cmp_ge_u32 s0, s4 -; GCN-NEXT: s_cselect_b32 s0, s2, s0 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_readfirstlane_b32 s5, v0 +; GCN-NEXT: s_mul_i32 s3, s3, s5 +; GCN-NEXT: s_mul_hi_u32 s3, s5, s3 +; GCN-NEXT: s_add_i32 s5, s5, s3 +; GCN-NEXT: s_mul_hi_u32 s3, s4, s5 +; GCN-NEXT: s_mul_i32 s3, s3, s2 +; GCN-NEXT: s_sub_i32 s3, s4, s3 +; GCN-NEXT: s_sub_i32 s4, s3, s2 +; GCN-NEXT: s_cmp_ge_u32 s3, s2 +; GCN-NEXT: s_cselect_b32 s3, s4, s3 +; GCN-NEXT: s_sub_i32 s4, s3, s2 +; GCN-NEXT: s_cmp_ge_u32 s3, s2 +; GCN-NEXT: s_cselect_b32 s8, s4, s3 ; GCN-NEXT: .LBB8_3: +; GCN-NEXT: v_mov_b32_e32 v0, s8 ; GCN-NEXT: v_mov_b32_e32 v2, 0 -; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9] +; GCN-NEXT: v_mov_b32_e32 v1, s9 +; GCN-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; GCN-NEXT: s_endpgm ; GCN-NEXT: .LBB8_4: -; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GCN-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GCN-NEXT: s_branch .LBB8_2 ; ; TAHITI-LABEL: srem_i64: @@ -1732,7 +1725,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) ; TAHITI-NEXT: v_mul_lo_u32 v8, v8, v5 ; TAHITI-NEXT: v_mul_lo_u32 v7, v7, v5 ; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v9, v10 -; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v9, v8 +; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v8, v9 ; TAHITI-NEXT: v_mul_lo_u32 v11, v5, v8 ; TAHITI-NEXT: v_mul_hi_u32 v12, v5, v7 ; TAHITI-NEXT: v_mul_hi_u32 v13, v5, v8 @@ -1819,7 +1812,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) ; TAHITI-NEXT: v_mul_hi_u32 v1, v0, v1 ; TAHITI-NEXT: v_mul_lo_u32 v1, v1, v2 ; TAHITI-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 -; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0 +; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 ; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 ; TAHITI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, v0, v2 @@ -1836,150 +1829,175 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) ; ; TONGA-LABEL: srem_i64: ; TONGA: ; %bb.0: -; TONGA-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24 -; TONGA-NEXT: v_mov_b32_e32 v4, 0 +; TONGA-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; TONGA-NEXT: s_waitcnt lgkmcnt(0) -; TONGA-NEXT: v_mov_b32_e32 v0, s6 -; TONGA-NEXT: v_mov_b32_e32 v1, s7 +; TONGA-NEXT: v_mov_b32_e32 v0, s2 +; TONGA-NEXT: v_mov_b32_e32 v1, s3 ; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; TONGA-NEXT: s_waitcnt vmcnt(0) -; TONGA-NEXT: v_or_b32_e32 v5, v1, v3 -; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5] -; TONGA-NEXT: s_cbranch_vccz .LBB8_4 +; TONGA-NEXT: v_readfirstlane_b32 s5, v1 +; TONGA-NEXT: v_readfirstlane_b32 s4, v0 +; TONGA-NEXT: v_readfirstlane_b32 s3, v3 +; TONGA-NEXT: v_readfirstlane_b32 s2, v2 +; TONGA-NEXT: s_or_b64 s[6:7], s[4:5], s[2:3] +; TONGA-NEXT: s_mov_b32 s6, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[6:7], 0 +; TONGA-NEXT: s_cbranch_scc0 .LBB8_3 ; TONGA-NEXT: ; %bb.1: -; TONGA-NEXT: v_ashrrev_i32_e32 v4, 31, v3 -; TONGA-NEXT: v_add_u32_e32 v5, vcc, v2, v4 -; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v4, vcc -; TONGA-NEXT: v_xor_b32_e32 v9, v5, v4 -; TONGA-NEXT: v_xor_b32_e32 v10, v3, v4 -; TONGA-NEXT: v_cvt_f32_u32_e32 v3, v9 -; TONGA-NEXT: v_cvt_f32_u32_e32 v4, v10 -; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v9 -; TONGA-NEXT: v_subb_u32_e32 v12, vcc, 0, v10, vcc -; TONGA-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3 -; TONGA-NEXT: v_rcp_f32_e32 v3, v3 -; TONGA-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3 -; TONGA-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3 -; TONGA-NEXT: v_trunc_f32_e32 v4, v4 -; TONGA-NEXT: v_madmk_f32 v3, v4, 0xcf800000, v3 -; TONGA-NEXT: v_cvt_u32_f32_e32 v7, v4 -; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v3 -; TONGA-NEXT: v_mul_lo_u32 v5, v11, v7 -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v11, v8, 0 -; TONGA-NEXT: v_mul_lo_u32 v6, v12, v8 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v4, v5 -; TONGA-NEXT: v_add_u32_e32 v6, vcc, v4, v6 -; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[0:1], v8, v6, 0 -; TONGA-NEXT: v_mul_hi_u32 v13, v8, v3 -; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v4 -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v7, v3, 0 -; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v5, vcc -; TONGA-NEXT: v_mad_u64_u32 v[5:6], s[0:1], v7, v6, 0 -; TONGA-NEXT: v_add_u32_e32 v3, vcc, v13, v3 -; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v14, v4, vcc -; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v6, vcc -; TONGA-NEXT: v_add_u32_e32 v3, vcc, v3, v5 -; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; TONGA-NEXT: v_add_u32_e32 v13, vcc, v8, v3 -; TONGA-NEXT: v_addc_u32_e32 v14, vcc, v7, v4, vcc -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v11, v13, 0 -; TONGA-NEXT: v_mul_lo_u32 v7, v11, v14 -; TONGA-NEXT: v_mul_lo_u32 v8, v12, v13 -; TONGA-NEXT: v_mul_hi_u32 v11, v13, v3 -; TONGA-NEXT: v_mad_u64_u32 v[5:6], s[0:1], v14, v3, 0 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v7, v4 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v4, v8 -; TONGA-NEXT: v_mad_u64_u32 v[7:8], s[0:1], v13, v4, 0 -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v14, v4, 0 -; TONGA-NEXT: v_add_u32_e32 v7, vcc, v11, v7 -; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v8, vcc -; TONGA-NEXT: v_add_u32_e32 v5, vcc, v7, v5 -; TONGA-NEXT: v_addc_u32_e32 v5, vcc, v8, v6, vcc -; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; TONGA-NEXT: v_add_u32_e32 v3, vcc, v5, v3 -; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; TONGA-NEXT: v_add_u32_e32 v5, vcc, v13, v3 -; TONGA-NEXT: v_addc_u32_e32 v6, vcc, v14, v4, vcc -; TONGA-NEXT: v_ashrrev_i32_e32 v7, 31, v1 -; TONGA-NEXT: v_add_u32_e32 v3, vcc, v0, v7 -; TONGA-NEXT: v_xor_b32_e32 v8, v3, v7 -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v8, v6, 0 -; TONGA-NEXT: v_mul_hi_u32 v11, v8, v5 -; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc -; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v3 -; TONGA-NEXT: v_addc_u32_e32 v12, vcc, 0, v4, vcc -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v1, v5, 0 -; TONGA-NEXT: v_mad_u64_u32 v[5:6], s[0:1], v1, v6, 0 -; TONGA-NEXT: v_add_u32_e32 v3, vcc, v11, v3 -; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v12, v4, vcc -; TONGA-NEXT: v_addc_u32_e32 v4, vcc, 0, v6, vcc -; TONGA-NEXT: v_add_u32_e32 v5, vcc, v3, v5 -; TONGA-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; TONGA-NEXT: v_mul_lo_u32 v6, v9, v3 -; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v9, v5, 0 -; TONGA-NEXT: v_mul_lo_u32 v5, v10, v5 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v6, v4 -; TONGA-NEXT: v_add_u32_e32 v4, vcc, v5, v4 -; TONGA-NEXT: v_sub_u32_e32 v5, vcc, v1, v4 -; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v8, v3 -; TONGA-NEXT: v_subb_u32_e64 v5, s[0:1], v5, v10, vcc -; TONGA-NEXT: v_sub_u32_e64 v6, s[0:1], v3, v9 -; TONGA-NEXT: v_subbrev_u32_e64 v8, s[2:3], 0, v5, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v8, v10 -; TONGA-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v6, v9 -; TONGA-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v8, v10 -; TONGA-NEXT: v_subb_u32_e64 v5, s[0:1], v5, v10, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[2:3] -; TONGA-NEXT: v_sub_u32_e64 v12, s[0:1], v6, v9 -; TONGA-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc -; TONGA-NEXT: v_subbrev_u32_e64 v5, s[0:1], 0, v5, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v10 -; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v11 -; TONGA-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v3, v9 -; TONGA-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc -; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v1, v10 -; TONGA-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc -; TONGA-NEXT: v_cndmask_b32_e64 v6, v6, v12, s[0:1] -; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; TONGA-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc -; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; TONGA-NEXT: v_xor_b32_e32 v3, v3, v7 -; TONGA-NEXT: v_xor_b32_e32 v1, v1, v7 -; TONGA-NEXT: v_sub_u32_e32 v3, vcc, v3, v7 -; TONGA-NEXT: v_subb_u32_e32 v4, vcc, v1, v7, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB8_3 +; TONGA-NEXT: s_ashr_i32 s6, s3, 31 +; TONGA-NEXT: s_add_u32 s8, s2, s6 +; TONGA-NEXT: s_mov_b32 s7, s6 +; TONGA-NEXT: s_addc_u32 s9, s3, s6 +; TONGA-NEXT: s_xor_b64 s[6:7], s[8:9], s[6:7] +; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s6 +; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s7 +; TONGA-NEXT: s_sub_u32 s3, 0, s6 +; TONGA-NEXT: s_subb_u32 s10, 0, s7 +; TONGA-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; TONGA-NEXT: v_rcp_f32_e32 v0, v0 +; TONGA-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; TONGA-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; TONGA-NEXT: v_trunc_f32_e32 v1, v1 +; TONGA-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; TONGA-NEXT: v_cvt_u32_f32_e32 v4, v1 +; TONGA-NEXT: v_cvt_u32_f32_e32 v5, v0 +; TONGA-NEXT: v_mul_lo_u32 v2, s3, v4 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s3, v5, 0 +; TONGA-NEXT: v_mul_lo_u32 v3, s10, v5 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v2 +; TONGA-NEXT: v_add_u32_e32 v3, vcc, v1, v3 +; TONGA-NEXT: v_mul_hi_u32 v6, v5, v0 +; TONGA-NEXT: v_mad_u64_u32 v[1:2], s[8:9], v5, v3, 0 +; TONGA-NEXT: v_add_u32_e32 v6, vcc, v6, v1 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v4, v0, 0 +; TONGA-NEXT: v_addc_u32_e32 v7, vcc, 0, v2, vcc +; TONGA-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v4, v3, 0 +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v6, v0 +; TONGA-NEXT: v_addc_u32_e32 v0, vcc, v7, v1, vcc +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v2 +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; TONGA-NEXT: v_add_u32_e32 v6, vcc, v5, v0 +; TONGA-NEXT: v_addc_u32_e32 v7, vcc, v4, v1, vcc +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s3, v6, 0 +; TONGA-NEXT: v_mul_lo_u32 v4, s3, v7 +; TONGA-NEXT: v_mul_lo_u32 v5, s10, v6 +; TONGA-NEXT: v_mul_hi_u32 v8, v6, v0 +; TONGA-NEXT: v_mad_u64_u32 v[2:3], s[8:9], v7, v0, 0 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v4, v1 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v5, v1 +; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[8:9], v6, v1, 0 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v7, v1, 0 +; TONGA-NEXT: v_add_u32_e32 v4, vcc, v8, v4 +; TONGA-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc +; TONGA-NEXT: v_add_u32_e32 v2, vcc, v4, v2 +; TONGA-NEXT: v_addc_u32_e32 v2, vcc, v5, v3, vcc +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; TONGA-NEXT: s_ashr_i32 s10, s5, 31 +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; TONGA-NEXT: s_add_u32 s8, s4, s10 +; TONGA-NEXT: v_add_u32_e32 v2, vcc, v6, v0 +; TONGA-NEXT: s_mov_b32 s11, s10 +; TONGA-NEXT: s_addc_u32 s9, s5, s10 +; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v7, v1, vcc +; TONGA-NEXT: s_xor_b64 s[12:13], s[8:9], s[10:11] +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s12, v3, 0 +; TONGA-NEXT: v_mul_hi_u32 v4, s12, v2 +; TONGA-NEXT: v_readfirstlane_b32 s3, v1 +; TONGA-NEXT: v_readfirstlane_b32 s5, v0 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s13, v3, 0 +; TONGA-NEXT: v_mad_u64_u32 v[2:3], s[8:9], s13, v2, 0 +; TONGA-NEXT: v_readfirstlane_b32 s14, v4 +; TONGA-NEXT: s_add_u32 s5, s14, s5 +; TONGA-NEXT: s_addc_u32 s3, 0, s3 +; TONGA-NEXT: v_readfirstlane_b32 s14, v2 +; TONGA-NEXT: v_readfirstlane_b32 s9, v3 +; TONGA-NEXT: s_add_u32 s5, s5, s14 +; TONGA-NEXT: v_readfirstlane_b32 s8, v1 +; TONGA-NEXT: s_addc_u32 s3, s3, s9 +; TONGA-NEXT: s_addc_u32 s5, s8, 0 +; TONGA-NEXT: v_readfirstlane_b32 s8, v0 +; TONGA-NEXT: s_add_u32 s3, s3, s8 +; TONGA-NEXT: v_mov_b32_e32 v0, s3 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s6, v0, 0 +; TONGA-NEXT: s_addc_u32 s5, 0, s5 +; TONGA-NEXT: s_mul_i32 s5, s6, s5 +; TONGA-NEXT: v_readfirstlane_b32 s14, v1 +; TONGA-NEXT: s_add_i32 s5, s14, s5 +; TONGA-NEXT: s_mul_i32 s3, s7, s3 +; TONGA-NEXT: s_add_i32 s5, s5, s3 +; TONGA-NEXT: s_sub_i32 s3, s13, s5 +; TONGA-NEXT: v_readfirstlane_b32 s14, v0 +; TONGA-NEXT: s_sub_u32 s12, s12, s14 +; TONGA-NEXT: s_cselect_b64 s[14:15], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0 +; TONGA-NEXT: s_subb_u32 s3, s3, s7 +; TONGA-NEXT: s_sub_u32 s18, s12, s6 +; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s19, s3, 0 +; TONGA-NEXT: s_cmp_ge_u32 s19, s7 +; TONGA-NEXT: s_cselect_b32 s20, -1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s18, s6 +; TONGA-NEXT: s_cselect_b32 s21, -1, 0 +; TONGA-NEXT: s_cmp_eq_u32 s19, s7 +; TONGA-NEXT: s_cselect_b32 s20, s21, s20 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s3, s3, s7 +; TONGA-NEXT: s_sub_u32 s21, s18, s6 +; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s3, s3, 0 +; TONGA-NEXT: s_cmp_lg_u32 s20, 0 +; TONGA-NEXT: s_cselect_b32 s16, s21, s18 +; TONGA-NEXT: s_cselect_b32 s3, s3, s19 +; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0 +; TONGA-NEXT: s_subb_u32 s5, s13, s5 +; TONGA-NEXT: s_cmp_ge_u32 s5, s7 +; TONGA-NEXT: s_cselect_b32 s13, -1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s12, s6 +; TONGA-NEXT: s_cselect_b32 s6, -1, 0 +; TONGA-NEXT: s_cmp_eq_u32 s5, s7 +; TONGA-NEXT: s_cselect_b32 s6, s6, s13 +; TONGA-NEXT: s_cmp_lg_u32 s6, 0 +; TONGA-NEXT: s_cselect_b32 s7, s3, s5 +; TONGA-NEXT: s_cselect_b32 s6, s16, s12 +; TONGA-NEXT: s_xor_b64 s[6:7], s[6:7], s[10:11] +; TONGA-NEXT: s_sub_u32 s6, s6, s10 +; TONGA-NEXT: s_subb_u32 s7, s7, s10 +; TONGA-NEXT: s_cbranch_execnz .LBB8_4 ; TONGA-NEXT: .LBB8_2: -; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v2 -; TONGA-NEXT: v_sub_u32_e32 v3, vcc, 0, v2 -; TONGA-NEXT: v_mov_b32_e32 v4, 0 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 -; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1 -; TONGA-NEXT: v_mul_lo_u32 v3, v3, v1 -; TONGA-NEXT: v_mul_hi_u32 v3, v1, v3 -; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v3 +; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s2 +; TONGA-NEXT: s_sub_i32 s3, 0, s2 +; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 +; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0 +; TONGA-NEXT: v_mul_lo_u32 v1, s3, v0 ; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1 -; TONGA-NEXT: v_mul_lo_u32 v1, v1, v2 -; TONGA-NEXT: v_sub_u32_e32 v0, vcc, v0, v1 -; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; TONGA-NEXT: v_mul_hi_u32 v0, s4, v0 +; TONGA-NEXT: v_mul_lo_u32 v0, v0, s2 +; TONGA-NEXT: v_sub_u32_e32 v0, vcc, s4, v0 +; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s2, v0 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 ; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v0, v2 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; TONGA-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc +; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s2, v0 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 +; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; TONGA-NEXT: v_mov_b32_e32 v1, 0 +; TONGA-NEXT: s_branch .LBB8_5 ; TONGA-NEXT: .LBB8_3: -; TONGA-NEXT: v_mov_b32_e32 v0, s4 -; TONGA-NEXT: v_mov_b32_e32 v1, s5 -; TONGA-NEXT: flat_store_dwordx2 v[0:1], v[3:4] -; TONGA-NEXT: s_endpgm -; TONGA-NEXT: .LBB8_4: -; TONGA-NEXT: ; implicit-def: $vgpr3_vgpr4 +; TONGA-NEXT: ; implicit-def: $sgpr6_sgpr7 ; TONGA-NEXT: s_branch .LBB8_2 +; TONGA-NEXT: .LBB8_4: +; TONGA-NEXT: v_mov_b32_e32 v0, s6 +; TONGA-NEXT: v_mov_b32_e32 v1, s7 +; TONGA-NEXT: .LBB8_5: +; TONGA-NEXT: v_mov_b32_e32 v2, s0 +; TONGA-NEXT: v_mov_b32_e32 v3, s1 +; TONGA-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; TONGA-NEXT: s_endpgm ; ; EG-LABEL: srem_i64: ; EG: ; %bb.0: @@ -2684,35 +2702,35 @@ define amdgpu_kernel void @srem_i64_4(ptr addrspace(1) %out, ptr addrspace(1) %i define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GCN-LABEL: srem_v2i64: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GCN-NEXT: v_mov_b32_e32 v8, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[10:11] offset:16 -; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[10:11] +; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[2:3] offset:16 +; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[2:3] ; GCN-NEXT: s_waitcnt vmcnt(1) -; GCN-NEXT: v_readfirstlane_b32 s11, v1 -; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: v_readfirstlane_b32 s9, v1 +; GCN-NEXT: v_readfirstlane_b32 s8, v0 ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_readfirstlane_b32 s13, v5 -; GCN-NEXT: v_readfirstlane_b32 s12, v4 -; GCN-NEXT: s_or_b64 s[0:1], s[12:13], s[10:11] -; GCN-NEXT: s_mov_b32 s0, 0 -; GCN-NEXT: v_readfirstlane_b32 s5, v3 -; GCN-NEXT: v_readfirstlane_b32 s4, v2 -; GCN-NEXT: v_readfirstlane_b32 s7, v7 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: v_readfirstlane_b32 s6, v6 -; GCN-NEXT: s_cbranch_scc0 .LBB10_7 +; GCN-NEXT: v_readfirstlane_b32 s11, v5 +; GCN-NEXT: v_readfirstlane_b32 s10, v4 +; GCN-NEXT: s_or_b64 s[6:7], s[10:11], s[8:9] +; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: v_readfirstlane_b32 s3, v3 +; GCN-NEXT: v_readfirstlane_b32 s2, v2 +; GCN-NEXT: v_readfirstlane_b32 s5, v7 +; GCN-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GCN-NEXT: v_readfirstlane_b32 s4, v6 +; GCN-NEXT: s_cbranch_scc0 .LBB10_6 ; GCN-NEXT: ; %bb.1: -; GCN-NEXT: s_ashr_i32 s0, s11, 31 -; GCN-NEXT: s_add_u32 s2, s10, s0 -; GCN-NEXT: s_mov_b32 s1, s0 -; GCN-NEXT: s_addc_u32 s3, s11, s0 -; GCN-NEXT: s_xor_b64 s[16:17], s[2:3], s[0:1] -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s16 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s17 -; GCN-NEXT: s_sub_u32 s0, 0, s16 -; GCN-NEXT: s_subb_u32 s1, 0, s17 +; GCN-NEXT: s_ashr_i32 s6, s9, 31 +; GCN-NEXT: s_add_u32 s12, s8, s6 +; GCN-NEXT: s_mov_b32 s7, s6 +; GCN-NEXT: s_addc_u32 s13, s9, s6 +; GCN-NEXT: s_xor_b64 s[6:7], s[12:13], s[6:7] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GCN-NEXT: s_sub_u32 s9, 0, s6 +; GCN-NEXT: s_subb_u32 s16, 0, s7 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -2721,321 +2739,312 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 ; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_readfirstlane_b32 s2, v1 -; GCN-NEXT: v_readfirstlane_b32 s3, v0 -; GCN-NEXT: s_mul_i32 s11, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s19, s0, s3 -; GCN-NEXT: s_mul_i32 s18, s1, s3 -; GCN-NEXT: s_add_i32 s11, s19, s11 -; GCN-NEXT: s_add_i32 s11, s11, s18 -; GCN-NEXT: s_mul_i32 s20, s0, s3 -; GCN-NEXT: s_mul_hi_u32 s18, s3, s11 -; GCN-NEXT: s_mul_i32 s19, s3, s11 -; GCN-NEXT: s_mul_hi_u32 s3, s3, s20 -; GCN-NEXT: s_add_u32 s3, s3, s19 +; GCN-NEXT: v_readfirstlane_b32 s17, v1 +; GCN-NEXT: v_readfirstlane_b32 s14, v0 +; GCN-NEXT: s_mul_i32 s15, s9, s17 +; GCN-NEXT: s_mul_hi_u32 s19, s9, s14 +; GCN-NEXT: s_mul_i32 s18, s16, s14 +; GCN-NEXT: s_add_i32 s15, s19, s15 +; GCN-NEXT: s_add_i32 s15, s15, s18 +; GCN-NEXT: s_mul_i32 s20, s9, s14 +; GCN-NEXT: s_mul_i32 s19, s14, s15 +; GCN-NEXT: s_mul_hi_u32 s21, s14, s20 +; GCN-NEXT: s_mul_hi_u32 s18, s14, s15 +; GCN-NEXT: s_add_u32 s19, s21, s19 ; GCN-NEXT: s_addc_u32 s18, 0, s18 -; GCN-NEXT: s_mul_hi_u32 s21, s2, s20 -; GCN-NEXT: s_mul_i32 s20, s2, s20 -; GCN-NEXT: s_add_u32 s3, s3, s20 -; GCN-NEXT: s_mul_hi_u32 s19, s2, s11 -; GCN-NEXT: s_addc_u32 s3, s18, s21 -; GCN-NEXT: s_addc_u32 s18, s19, 0 -; GCN-NEXT: s_mul_i32 s11, s2, s11 -; GCN-NEXT: s_add_u32 s3, s3, s11 -; GCN-NEXT: s_addc_u32 s11, 0, s18 -; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s3, v0 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s11 -; GCN-NEXT: v_readfirstlane_b32 s11, v0 -; GCN-NEXT: s_mul_i32 s3, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s18, s0, s11 -; GCN-NEXT: s_add_i32 s3, s18, s3 -; GCN-NEXT: s_mul_i32 s1, s1, s11 -; GCN-NEXT: s_add_i32 s3, s3, s1 -; GCN-NEXT: s_mul_i32 s0, s0, s11 -; GCN-NEXT: s_mul_hi_u32 s18, s2, s0 -; GCN-NEXT: s_mul_i32 s19, s2, s0 -; GCN-NEXT: s_mul_i32 s21, s11, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s11, s0 -; GCN-NEXT: s_mul_hi_u32 s20, s11, s3 -; GCN-NEXT: s_add_u32 s0, s0, s21 -; GCN-NEXT: s_addc_u32 s11, 0, s20 -; GCN-NEXT: s_add_u32 s0, s0, s19 -; GCN-NEXT: s_mul_hi_u32 s1, s2, s3 -; GCN-NEXT: s_addc_u32 s0, s11, s18 -; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_mul_i32 s3, s2, s3 -; GCN-NEXT: s_add_u32 s0, s0, s3 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s1 -; GCN-NEXT: s_ashr_i32 s18, s13, 31 -; GCN-NEXT: s_add_u32 s0, s12, s18 -; GCN-NEXT: s_mov_b32 s19, s18 -; GCN-NEXT: s_addc_u32 s1, s13, s18 -; GCN-NEXT: s_xor_b64 s[20:21], s[0:1], s[18:19] -; GCN-NEXT: v_readfirstlane_b32 s3, v0 -; GCN-NEXT: s_mul_i32 s1, s20, s2 -; GCN-NEXT: s_mul_hi_u32 s11, s20, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s20, s2 -; GCN-NEXT: s_add_u32 s1, s11, s1 -; GCN-NEXT: s_addc_u32 s0, 0, s0 -; GCN-NEXT: s_mul_hi_u32 s13, s21, s3 -; GCN-NEXT: s_mul_i32 s3, s21, s3 -; GCN-NEXT: s_add_u32 s1, s1, s3 -; GCN-NEXT: s_mul_hi_u32 s11, s21, s2 -; GCN-NEXT: s_addc_u32 s0, s0, s13 -; GCN-NEXT: s_addc_u32 s1, s11, 0 -; GCN-NEXT: s_mul_i32 s2, s21, s2 -; GCN-NEXT: s_add_u32 s0, s0, s2 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: s_mul_i32 s1, s16, s1 -; GCN-NEXT: s_mul_hi_u32 s2, s16, s0 -; GCN-NEXT: s_add_i32 s1, s2, s1 -; GCN-NEXT: s_mul_i32 s2, s17, s0 -; GCN-NEXT: s_mul_i32 s0, s16, s0 -; GCN-NEXT: s_add_i32 s11, s1, s2 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: s_sub_i32 s1, s21, s11 -; GCN-NEXT: v_sub_co_u32_e32 v0, vcc, s20, v0 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_subb_u32 s13, s1, s17 -; GCN-NEXT: v_subrev_co_u32_e64 v1, s[0:1], s16, v0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s19, s13, 0 -; GCN-NEXT: s_cmp_ge_u32 s19, s17 -; GCN-NEXT: s_cselect_b32 s20, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s16, v1 -; GCN-NEXT: s_cmp_eq_u32 s19, s17 -; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[2:3] -; GCN-NEXT: v_mov_b32_e32 v3, s20 -; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[2:3] -; GCN-NEXT: s_subb_u32 s2, s13, s17 -; GCN-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s16, v1 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s2, s2, 0 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v2 -; GCN-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v2, s19 -; GCN-NEXT: v_mov_b32_e32 v3, s2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1] -; GCN-NEXT: s_subb_u32 s0, s21, s11 -; GCN-NEXT: s_cmp_ge_u32 s0, s17 -; GCN-NEXT: s_cselect_b32 s1, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s16, v0 -; GCN-NEXT: s_cmp_eq_u32 s0, s17 -; GCN-NEXT: v_cndmask_b32_e64 v3, 0, -1, vcc -; GCN-NEXT: v_mov_b32_e32 v4, s1 -; GCN-NEXT: s_cselect_b64 vcc, -1, 0 -; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; GCN-NEXT: v_mov_b32_e32 v4, s0 -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc -; GCN-NEXT: v_xor_b32_e32 v0, s18, v0 -; GCN-NEXT: v_xor_b32_e32 v1, s18, v2 -; GCN-NEXT: v_mov_b32_e32 v2, s18 -; GCN-NEXT: v_subrev_co_u32_e32 v0, vcc, s18, v0 -; GCN-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc +; GCN-NEXT: s_mul_hi_u32 s22, s17, s20 +; GCN-NEXT: s_mul_i32 s20, s17, s20 +; GCN-NEXT: s_add_u32 s19, s19, s20 +; GCN-NEXT: s_mul_hi_u32 s21, s17, s15 +; GCN-NEXT: s_addc_u32 s18, s18, s22 +; GCN-NEXT: s_addc_u32 s19, s21, 0 +; GCN-NEXT: s_mul_i32 s15, s17, s15 +; GCN-NEXT: s_add_u32 s15, s18, s15 +; GCN-NEXT: s_addc_u32 s18, 0, s19 +; GCN-NEXT: s_add_u32 s19, s14, s15 +; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GCN-NEXT: s_addc_u32 s17, s17, s18 +; GCN-NEXT: s_mul_i32 s14, s9, s17 +; GCN-NEXT: s_mul_hi_u32 s15, s9, s19 +; GCN-NEXT: s_add_i32 s14, s15, s14 +; GCN-NEXT: s_mul_i32 s16, s16, s19 +; GCN-NEXT: s_add_i32 s14, s14, s16 +; GCN-NEXT: s_mul_i32 s9, s9, s19 +; GCN-NEXT: s_mul_hi_u32 s16, s17, s9 +; GCN-NEXT: s_mul_i32 s18, s17, s9 +; GCN-NEXT: s_mul_i32 s21, s19, s14 +; GCN-NEXT: s_mul_hi_u32 s9, s19, s9 +; GCN-NEXT: s_mul_hi_u32 s20, s19, s14 +; GCN-NEXT: s_add_u32 s9, s9, s21 +; GCN-NEXT: s_addc_u32 s20, 0, s20 +; GCN-NEXT: s_add_u32 s9, s9, s18 +; GCN-NEXT: s_mul_hi_u32 s15, s17, s14 +; GCN-NEXT: s_addc_u32 s9, s20, s16 +; GCN-NEXT: s_addc_u32 s15, s15, 0 +; GCN-NEXT: s_mul_i32 s14, s17, s14 +; GCN-NEXT: s_add_u32 s9, s9, s14 +; GCN-NEXT: s_addc_u32 s16, 0, s15 +; GCN-NEXT: s_add_u32 s9, s19, s9 +; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0 +; GCN-NEXT: s_addc_u32 s18, s17, s16 +; GCN-NEXT: s_ashr_i32 s14, s11, 31 +; GCN-NEXT: s_add_u32 s16, s10, s14 +; GCN-NEXT: s_mov_b32 s15, s14 +; GCN-NEXT: s_addc_u32 s17, s11, s14 +; GCN-NEXT: s_xor_b64 s[16:17], s[16:17], s[14:15] +; GCN-NEXT: s_mul_i32 s19, s16, s18 +; GCN-NEXT: s_mul_hi_u32 s20, s16, s9 +; GCN-NEXT: s_mul_hi_u32 s11, s16, s18 +; GCN-NEXT: s_add_u32 s19, s20, s19 +; GCN-NEXT: s_addc_u32 s11, 0, s11 +; GCN-NEXT: s_mul_hi_u32 s21, s17, s9 +; GCN-NEXT: s_mul_i32 s9, s17, s9 +; GCN-NEXT: s_add_u32 s9, s19, s9 +; GCN-NEXT: s_mul_hi_u32 s20, s17, s18 +; GCN-NEXT: s_addc_u32 s9, s11, s21 +; GCN-NEXT: s_addc_u32 s11, s20, 0 +; GCN-NEXT: s_mul_i32 s18, s17, s18 +; GCN-NEXT: s_add_u32 s9, s9, s18 +; GCN-NEXT: s_addc_u32 s11, 0, s11 +; GCN-NEXT: s_mul_i32 s11, s6, s11 +; GCN-NEXT: s_mul_hi_u32 s18, s6, s9 +; GCN-NEXT: s_add_i32 s11, s18, s11 +; GCN-NEXT: s_mul_i32 s18, s7, s9 +; GCN-NEXT: s_add_i32 s11, s11, s18 +; GCN-NEXT: s_sub_i32 s20, s17, s11 +; GCN-NEXT: s_mul_i32 s9, s6, s9 +; GCN-NEXT: s_sub_u32 s9, s16, s9 +; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s16, s20, s7 +; GCN-NEXT: s_sub_u32 s22, s9, s6 +; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_subb_u32 s23, s16, 0 +; GCN-NEXT: s_cmp_ge_u32 s23, s7 +; GCN-NEXT: s_cselect_b32 s24, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s22, s6 +; GCN-NEXT: s_cselect_b32 s25, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s23, s7 +; GCN-NEXT: s_cselect_b32 s24, s25, s24 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_subb_u32 s16, s16, s7 +; GCN-NEXT: s_sub_u32 s25, s22, s6 +; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_subb_u32 s16, s16, 0 +; GCN-NEXT: s_cmp_lg_u32 s24, 0 +; GCN-NEXT: s_cselect_b32 s20, s25, s22 +; GCN-NEXT: s_cselect_b32 s16, s16, s23 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s11, s17, s11 +; GCN-NEXT: s_cmp_ge_u32 s11, s7 +; GCN-NEXT: s_cselect_b32 s17, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s9, s6 +; GCN-NEXT: s_cselect_b32 s6, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s11, s7 +; GCN-NEXT: s_cselect_b32 s6, s6, s17 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_cselect_b32 s7, s16, s11 +; GCN-NEXT: s_cselect_b32 s6, s20, s9 +; GCN-NEXT: s_xor_b64 s[6:7], s[6:7], s[14:15] +; GCN-NEXT: s_sub_u32 s6, s6, s14 +; GCN-NEXT: s_subb_u32 s7, s7, s14 ; GCN-NEXT: s_cbranch_execnz .LBB10_3 ; GCN-NEXT: .LBB10_2: -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10 -; GCN-NEXT: s_sub_i32 s0, 0, s10 -; GCN-NEXT: s_mov_b32 s1, 0 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GCN-NEXT: s_sub_i32 s6, 0, s8 +; GCN-NEXT: s_mov_b32 s7, 0 ; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_readfirstlane_b32 s2, v0 -; GCN-NEXT: s_mul_i32 s0, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s0, s2, s0 -; GCN-NEXT: s_add_i32 s2, s2, s0 -; GCN-NEXT: s_mul_hi_u32 s0, s12, s2 -; GCN-NEXT: s_mul_i32 s0, s0, s10 -; GCN-NEXT: s_sub_i32 s0, s12, s0 -; GCN-NEXT: s_sub_i32 s2, s0, s10 -; GCN-NEXT: s_cmp_ge_u32 s0, s10 -; GCN-NEXT: s_cselect_b32 s0, s2, s0 -; GCN-NEXT: s_sub_i32 s2, s0, s10 -; GCN-NEXT: s_cmp_ge_u32 s0, s10 -; GCN-NEXT: s_cselect_b32 s0, s2, s0 -; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: v_mov_b32_e32 v1, s1 +; GCN-NEXT: v_readfirstlane_b32 s9, v0 +; GCN-NEXT: s_mul_i32 s6, s6, s9 +; GCN-NEXT: s_mul_hi_u32 s6, s9, s6 +; GCN-NEXT: s_add_i32 s9, s9, s6 +; GCN-NEXT: s_mul_hi_u32 s6, s10, s9 +; GCN-NEXT: s_mul_i32 s6, s6, s8 +; GCN-NEXT: s_sub_i32 s6, s10, s6 +; GCN-NEXT: s_sub_i32 s9, s6, s8 +; GCN-NEXT: s_cmp_ge_u32 s6, s8 +; GCN-NEXT: s_cselect_b32 s6, s9, s6 +; GCN-NEXT: s_sub_i32 s9, s6, s8 +; GCN-NEXT: s_cmp_ge_u32 s6, s8 +; GCN-NEXT: s_cselect_b32 s6, s9, s6 ; GCN-NEXT: .LBB10_3: -; GCN-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5] -; GCN-NEXT: s_mov_b32 s0, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_cbranch_scc0 .LBB10_8 +; GCN-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s8, 0 +; GCN-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GCN-NEXT: s_cbranch_scc0 .LBB10_7 ; GCN-NEXT: ; %bb.4: -; GCN-NEXT: s_ashr_i32 s0, s5, 31 -; GCN-NEXT: s_add_u32 s2, s4, s0 -; GCN-NEXT: s_mov_b32 s1, s0 -; GCN-NEXT: s_addc_u32 s3, s5, s0 -; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[0:1] -; GCN-NEXT: v_cvt_f32_u32_e32 v2, s12 -; GCN-NEXT: v_cvt_f32_u32_e32 v3, s13 -; GCN-NEXT: s_sub_u32 s0, 0, s12 -; GCN-NEXT: s_subb_u32 s1, 0, s13 -; GCN-NEXT: v_madmk_f32 v2, v3, 0x4f800000, v2 -; GCN-NEXT: v_rcp_f32_e32 v2, v2 -; GCN-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2 -; GCN-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2 -; GCN-NEXT: v_trunc_f32_e32 v3, v3 -; GCN-NEXT: v_madmk_f32 v2, v3, 0xcf800000, v2 -; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3 -; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 -; GCN-NEXT: v_readfirstlane_b32 s2, v3 -; GCN-NEXT: v_readfirstlane_b32 s3, v2 -; GCN-NEXT: s_mul_i32 s5, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s15, s0, s3 -; GCN-NEXT: s_mul_i32 s14, s1, s3 -; GCN-NEXT: s_add_i32 s5, s15, s5 -; GCN-NEXT: s_add_i32 s5, s5, s14 -; GCN-NEXT: s_mul_i32 s16, s0, s3 -; GCN-NEXT: s_mul_hi_u32 s14, s3, s5 -; GCN-NEXT: s_mul_i32 s15, s3, s5 -; GCN-NEXT: s_mul_hi_u32 s3, s3, s16 -; GCN-NEXT: s_add_u32 s3, s3, s15 -; GCN-NEXT: s_addc_u32 s14, 0, s14 -; GCN-NEXT: s_mul_hi_u32 s17, s2, s16 -; GCN-NEXT: s_mul_i32 s16, s2, s16 +; GCN-NEXT: s_ashr_i32 s8, s3, 31 +; GCN-NEXT: s_add_u32 s10, s2, s8 +; GCN-NEXT: s_mov_b32 s9, s8 +; GCN-NEXT: s_addc_u32 s11, s3, s8 +; GCN-NEXT: s_xor_b64 s[10:11], s[10:11], s[8:9] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s11 +; GCN-NEXT: s_sub_u32 s3, 0, s10 +; GCN-NEXT: s_subb_u32 s14, 0, s11 +; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; GCN-NEXT: v_rcp_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GCN-NEXT: v_trunc_f32_e32 v1, v1 +; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_readfirstlane_b32 s15, v1 +; GCN-NEXT: v_readfirstlane_b32 s12, v0 +; GCN-NEXT: s_mul_i32 s13, s3, s15 +; GCN-NEXT: s_mul_hi_u32 s17, s3, s12 +; GCN-NEXT: s_mul_i32 s16, s14, s12 +; GCN-NEXT: s_add_i32 s13, s17, s13 +; GCN-NEXT: s_add_i32 s13, s13, s16 +; GCN-NEXT: s_mul_i32 s18, s3, s12 +; GCN-NEXT: s_mul_i32 s17, s12, s13 +; GCN-NEXT: s_mul_hi_u32 s19, s12, s18 +; GCN-NEXT: s_mul_hi_u32 s16, s12, s13 +; GCN-NEXT: s_add_u32 s17, s19, s17 +; GCN-NEXT: s_addc_u32 s16, 0, s16 +; GCN-NEXT: s_mul_hi_u32 s20, s15, s18 +; GCN-NEXT: s_mul_i32 s18, s15, s18 +; GCN-NEXT: s_add_u32 s17, s17, s18 +; GCN-NEXT: s_mul_hi_u32 s19, s15, s13 +; GCN-NEXT: s_addc_u32 s16, s16, s20 +; GCN-NEXT: s_addc_u32 s17, s19, 0 +; GCN-NEXT: s_mul_i32 s13, s15, s13 +; GCN-NEXT: s_add_u32 s13, s16, s13 +; GCN-NEXT: s_addc_u32 s16, 0, s17 +; GCN-NEXT: s_add_u32 s17, s12, s13 +; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GCN-NEXT: s_addc_u32 s15, s15, s16 +; GCN-NEXT: s_mul_i32 s12, s3, s15 +; GCN-NEXT: s_mul_hi_u32 s13, s3, s17 +; GCN-NEXT: s_add_i32 s12, s13, s12 +; GCN-NEXT: s_mul_i32 s14, s14, s17 +; GCN-NEXT: s_add_i32 s12, s12, s14 +; GCN-NEXT: s_mul_i32 s3, s3, s17 +; GCN-NEXT: s_mul_hi_u32 s14, s15, s3 +; GCN-NEXT: s_mul_i32 s16, s15, s3 +; GCN-NEXT: s_mul_i32 s19, s17, s12 +; GCN-NEXT: s_mul_hi_u32 s3, s17, s3 +; GCN-NEXT: s_mul_hi_u32 s18, s17, s12 +; GCN-NEXT: s_add_u32 s3, s3, s19 +; GCN-NEXT: s_addc_u32 s18, 0, s18 ; GCN-NEXT: s_add_u32 s3, s3, s16 -; GCN-NEXT: s_mul_hi_u32 s15, s2, s5 -; GCN-NEXT: s_addc_u32 s3, s14, s17 -; GCN-NEXT: s_addc_u32 s14, s15, 0 -; GCN-NEXT: s_mul_i32 s5, s2, s5 -; GCN-NEXT: s_add_u32 s3, s3, s5 -; GCN-NEXT: s_addc_u32 s5, 0, s14 -; GCN-NEXT: v_add_co_u32_e32 v2, vcc, s3, v2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s5 -; GCN-NEXT: v_readfirstlane_b32 s5, v2 -; GCN-NEXT: s_mul_i32 s3, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s14, s0, s5 -; GCN-NEXT: s_add_i32 s3, s14, s3 -; GCN-NEXT: s_mul_i32 s1, s1, s5 -; GCN-NEXT: s_add_i32 s3, s3, s1 -; GCN-NEXT: s_mul_i32 s0, s0, s5 -; GCN-NEXT: s_mul_hi_u32 s14, s2, s0 -; GCN-NEXT: s_mul_i32 s15, s2, s0 -; GCN-NEXT: s_mul_i32 s17, s5, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s5, s0 -; GCN-NEXT: s_mul_hi_u32 s16, s5, s3 -; GCN-NEXT: s_add_u32 s0, s0, s17 -; GCN-NEXT: s_addc_u32 s5, 0, s16 -; GCN-NEXT: s_add_u32 s0, s0, s15 -; GCN-NEXT: s_mul_hi_u32 s1, s2, s3 -; GCN-NEXT: s_addc_u32 s0, s5, s14 -; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_mul_i32 s3, s2, s3 -; GCN-NEXT: s_add_u32 s0, s0, s3 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s1 -; GCN-NEXT: s_ashr_i32 s14, s7, 31 -; GCN-NEXT: s_add_u32 s0, s6, s14 -; GCN-NEXT: s_mov_b32 s15, s14 -; GCN-NEXT: s_addc_u32 s1, s7, s14 -; GCN-NEXT: s_xor_b64 s[16:17], s[0:1], s[14:15] -; GCN-NEXT: v_readfirstlane_b32 s3, v2 -; GCN-NEXT: s_mul_i32 s1, s16, s2 -; GCN-NEXT: s_mul_hi_u32 s5, s16, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s16, s2 -; GCN-NEXT: s_add_u32 s1, s5, s1 -; GCN-NEXT: s_addc_u32 s0, 0, s0 -; GCN-NEXT: s_mul_hi_u32 s7, s17, s3 -; GCN-NEXT: s_mul_i32 s3, s17, s3 -; GCN-NEXT: s_add_u32 s1, s1, s3 -; GCN-NEXT: s_mul_hi_u32 s5, s17, s2 -; GCN-NEXT: s_addc_u32 s0, s0, s7 -; GCN-NEXT: s_addc_u32 s1, s5, 0 -; GCN-NEXT: s_mul_i32 s2, s17, s2 -; GCN-NEXT: s_add_u32 s0, s0, s2 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: s_mul_i32 s1, s12, s1 -; GCN-NEXT: s_mul_hi_u32 s2, s12, s0 -; GCN-NEXT: s_add_i32 s1, s2, s1 -; GCN-NEXT: s_mul_i32 s2, s13, s0 -; GCN-NEXT: s_mul_i32 s0, s12, s0 -; GCN-NEXT: s_add_i32 s5, s1, s2 -; GCN-NEXT: v_mov_b32_e32 v2, s0 -; GCN-NEXT: s_sub_i32 s1, s17, s5 -; GCN-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_subb_u32 s7, s1, s13 -; GCN-NEXT: v_subrev_co_u32_e64 v3, s[0:1], s12, v2 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s15, s7, 0 -; GCN-NEXT: s_cmp_ge_u32 s15, s13 -; GCN-NEXT: s_cselect_b32 s16, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v3 -; GCN-NEXT: s_cmp_eq_u32 s15, s13 -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[2:3] -; GCN-NEXT: v_mov_b32_e32 v5, s16 -; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: v_cndmask_b32_e64 v4, v5, v4, s[2:3] -; GCN-NEXT: s_subb_u32 s2, s7, s13 -; GCN-NEXT: v_subrev_co_u32_e64 v5, s[0:1], s12, v3 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s2, s2, 0 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4 -; GCN-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v4, s15 -; GCN-NEXT: v_mov_b32_e32 v5, s2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[0:1] -; GCN-NEXT: s_subb_u32 s0, s17, s5 -; GCN-NEXT: s_cmp_ge_u32 s0, s13 -; GCN-NEXT: s_cselect_b32 s1, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v2 -; GCN-NEXT: s_cmp_eq_u32 s0, s13 -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_mov_b32_e32 v6, s1 -; GCN-NEXT: s_cselect_b64 vcc, -1, 0 -; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; GCN-NEXT: v_mov_b32_e32 v6, s0 -; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GCN-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc -; GCN-NEXT: v_xor_b32_e32 v2, s14, v2 -; GCN-NEXT: v_xor_b32_e32 v3, s14, v4 -; GCN-NEXT: v_mov_b32_e32 v4, s14 -; GCN-NEXT: v_subrev_co_u32_e32 v2, vcc, s14, v2 -; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc -; GCN-NEXT: s_cbranch_execnz .LBB10_6 +; GCN-NEXT: s_mul_hi_u32 s13, s15, s12 +; GCN-NEXT: s_addc_u32 s3, s18, s14 +; GCN-NEXT: s_addc_u32 s13, s13, 0 +; GCN-NEXT: s_mul_i32 s12, s15, s12 +; GCN-NEXT: s_add_u32 s3, s3, s12 +; GCN-NEXT: s_addc_u32 s14, 0, s13 +; GCN-NEXT: s_add_u32 s3, s17, s3 +; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GCN-NEXT: s_addc_u32 s16, s15, s14 +; GCN-NEXT: s_ashr_i32 s12, s5, 31 +; GCN-NEXT: s_add_u32 s14, s4, s12 +; GCN-NEXT: s_mov_b32 s13, s12 +; GCN-NEXT: s_addc_u32 s15, s5, s12 +; GCN-NEXT: s_xor_b64 s[14:15], s[14:15], s[12:13] +; GCN-NEXT: s_mul_i32 s17, s14, s16 +; GCN-NEXT: s_mul_hi_u32 s18, s14, s3 +; GCN-NEXT: s_mul_hi_u32 s5, s14, s16 +; GCN-NEXT: s_add_u32 s17, s18, s17 +; GCN-NEXT: s_addc_u32 s5, 0, s5 +; GCN-NEXT: s_mul_hi_u32 s19, s15, s3 +; GCN-NEXT: s_mul_i32 s3, s15, s3 +; GCN-NEXT: s_add_u32 s3, s17, s3 +; GCN-NEXT: s_mul_hi_u32 s18, s15, s16 +; GCN-NEXT: s_addc_u32 s3, s5, s19 +; GCN-NEXT: s_addc_u32 s5, s18, 0 +; GCN-NEXT: s_mul_i32 s16, s15, s16 +; GCN-NEXT: s_add_u32 s3, s3, s16 +; GCN-NEXT: s_addc_u32 s5, 0, s5 +; GCN-NEXT: s_mul_i32 s5, s10, s5 +; GCN-NEXT: s_mul_hi_u32 s16, s10, s3 +; GCN-NEXT: s_add_i32 s5, s16, s5 +; GCN-NEXT: s_mul_i32 s16, s11, s3 +; GCN-NEXT: s_add_i32 s5, s5, s16 +; GCN-NEXT: s_sub_i32 s18, s15, s5 +; GCN-NEXT: s_mul_i32 s3, s10, s3 +; GCN-NEXT: s_sub_u32 s3, s14, s3 +; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s14, s18, s11 +; GCN-NEXT: s_sub_u32 s20, s3, s10 +; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s21, s14, 0 +; GCN-NEXT: s_cmp_ge_u32 s21, s11 +; GCN-NEXT: s_cselect_b32 s22, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s20, s10 +; GCN-NEXT: s_cselect_b32 s23, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s21, s11 +; GCN-NEXT: s_cselect_b32 s22, s23, s22 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s14, s14, s11 +; GCN-NEXT: s_sub_u32 s23, s20, s10 +; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s14, s14, 0 +; GCN-NEXT: s_cmp_lg_u32 s22, 0 +; GCN-NEXT: s_cselect_b32 s18, s23, s20 +; GCN-NEXT: s_cselect_b32 s14, s14, s21 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s5, s15, s5 +; GCN-NEXT: s_cmp_ge_u32 s5, s11 +; GCN-NEXT: s_cselect_b32 s15, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s3, s10 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s5, s11 +; GCN-NEXT: s_cselect_b32 s10, s10, s15 +; GCN-NEXT: s_cmp_lg_u32 s10, 0 +; GCN-NEXT: s_cselect_b32 s11, s14, s5 +; GCN-NEXT: s_cselect_b32 s10, s18, s3 +; GCN-NEXT: s_xor_b64 s[10:11], s[10:11], s[12:13] +; GCN-NEXT: s_sub_u32 s10, s10, s12 +; GCN-NEXT: s_subb_u32 s11, s11, s12 +; GCN-NEXT: s_cbranch_execnz .LBB10_8 ; GCN-NEXT: .LBB10_5: -; GCN-NEXT: v_cvt_f32_u32_e32 v2, s4 -; GCN-NEXT: s_sub_i32 s0, 0, s4 -; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2 -; GCN-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2 -; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2 -; GCN-NEXT: v_mul_lo_u32 v3, s0, v2 -; GCN-NEXT: v_mul_hi_u32 v3, v2, v3 -; GCN-NEXT: v_add_u32_e32 v2, v2, v3 -; GCN-NEXT: v_mul_hi_u32 v2, s6, v2 -; GCN-NEXT: v_mul_lo_u32 v2, v2, s4 -; GCN-NEXT: v_sub_u32_e32 v2, s6, v2 -; GCN-NEXT: v_subrev_u32_e32 v3, s4, v2 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s4, v2 -; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GCN-NEXT: v_subrev_u32_e32 v3, s4, v2 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s4, v2 -; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GCN-NEXT: s_sub_i32 s3, 0, s2 ; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_mul_lo_u32 v1, s3, v0 +; GCN-NEXT: v_mul_hi_u32 v1, v0, v1 +; GCN-NEXT: v_add_u32_e32 v0, v0, v1 +; GCN-NEXT: v_mul_hi_u32 v0, s4, v0 +; GCN-NEXT: v_mul_lo_u32 v0, v0, s2 +; GCN-NEXT: v_sub_u32_e32 v0, s4, v0 +; GCN-NEXT: v_subrev_u32_e32 v1, s2, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GCN-NEXT: v_subrev_u32_e32 v1, s2, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 +; GCN-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc +; GCN-NEXT: s_branch .LBB10_9 ; GCN-NEXT: .LBB10_6: -; GCN-NEXT: v_mov_b32_e32 v4, 0 -; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[8:9] -; GCN-NEXT: s_endpgm -; GCN-NEXT: .LBB10_7: -; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7 ; GCN-NEXT: s_branch .LBB10_2 -; GCN-NEXT: .LBB10_8: +; GCN-NEXT: .LBB10_7: +; GCN-NEXT: ; implicit-def: $sgpr10_sgpr11 ; GCN-NEXT: s_branch .LBB10_5 +; GCN-NEXT: .LBB10_8: +; GCN-NEXT: v_mov_b32_e32 v2, s10 +; GCN-NEXT: v_mov_b32_e32 v3, s11 +; GCN-NEXT: .LBB10_9: +; GCN-NEXT: v_mov_b32_e32 v4, 0 +; GCN-NEXT: v_mov_b32_e32 v0, s6 +; GCN-NEXT: v_mov_b32_e32 v1, s7 +; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1] +; GCN-NEXT: s_endpgm ; ; TAHITI-LABEL: srem_v2i64: ; TAHITI: ; %bb.0: @@ -3097,7 +3106,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TAHITI-NEXT: v_mul_lo_u32 v12, v12, v9 ; TAHITI-NEXT: v_mul_lo_u32 v11, v11, v9 ; TAHITI-NEXT: v_add_i32_e32 v13, vcc, v13, v14 -; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v13, v12 +; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v12, v13 ; TAHITI-NEXT: v_mul_lo_u32 v15, v9, v12 ; TAHITI-NEXT: v_mul_hi_u32 v16, v9, v11 ; TAHITI-NEXT: v_mul_hi_u32 v17, v9, v12 @@ -3240,7 +3249,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TAHITI-NEXT: v_mul_lo_u32 v10, v10, v3 ; TAHITI-NEXT: v_mul_lo_u32 v5, v5, v3 ; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v11, v12 -; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v11, v10 +; TAHITI-NEXT: v_add_i32_e32 v10, vcc, v10, v11 ; TAHITI-NEXT: v_mul_lo_u32 v13, v3, v10 ; TAHITI-NEXT: v_mul_hi_u32 v14, v3, v5 ; TAHITI-NEXT: v_mul_hi_u32 v15, v3, v10 @@ -3347,152 +3356,181 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-LABEL: srem_v2i64: ; TONGA: ; %bb.0: ; TONGA-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24 -; TONGA-NEXT: v_mov_b32_e32 v8, 0 ; TONGA-NEXT: s_waitcnt lgkmcnt(0) ; TONGA-NEXT: s_add_u32 s0, s6, 16 -; TONGA-NEXT: v_mov_b32_e32 v4, s6 ; TONGA-NEXT: s_addc_u32 s1, s7, 0 ; TONGA-NEXT: v_mov_b32_e32 v0, s0 -; TONGA-NEXT: v_mov_b32_e32 v5, s7 +; TONGA-NEXT: v_mov_b32_e32 v4, s6 ; TONGA-NEXT: v_mov_b32_e32 v1, s1 +; TONGA-NEXT: v_mov_b32_e32 v5, s7 ; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5] +; TONGA-NEXT: s_waitcnt vmcnt(1) +; TONGA-NEXT: v_readfirstlane_b32 s1, v1 +; TONGA-NEXT: v_readfirstlane_b32 s0, v0 ; TONGA-NEXT: s_waitcnt vmcnt(0) -; TONGA-NEXT: v_or_b32_e32 v9, v5, v1 -; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9] -; TONGA-NEXT: s_cbranch_vccz .LBB10_7 +; TONGA-NEXT: v_readfirstlane_b32 s3, v5 +; TONGA-NEXT: v_readfirstlane_b32 s2, v4 +; TONGA-NEXT: s_or_b64 s[6:7], s[2:3], s[0:1] +; TONGA-NEXT: s_mov_b32 s6, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[6:7], 0 +; TONGA-NEXT: s_cbranch_scc0 .LBB10_3 ; TONGA-NEXT: ; %bb.1: -; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v1 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v0, v8 -; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v8, vcc -; TONGA-NEXT: v_xor_b32_e32 v14, v9, v8 -; TONGA-NEXT: v_xor_b32_e32 v1, v1, v8 -; TONGA-NEXT: v_cvt_f32_u32_e32 v8, v14 -; TONGA-NEXT: v_cvt_f32_u32_e32 v9, v1 -; TONGA-NEXT: v_sub_u32_e32 v15, vcc, 0, v14 -; TONGA-NEXT: v_subb_u32_e32 v16, vcc, 0, v1, vcc -; TONGA-NEXT: v_madmk_f32 v8, v9, 0x4f800000, v8 -; TONGA-NEXT: v_rcp_f32_e32 v8, v8 -; TONGA-NEXT: v_mul_f32_e32 v8, 0x5f7ffffc, v8 -; TONGA-NEXT: v_mul_f32_e32 v9, 0x2f800000, v8 -; TONGA-NEXT: v_trunc_f32_e32 v9, v9 -; TONGA-NEXT: v_madmk_f32 v8, v9, 0xcf800000, v8 -; TONGA-NEXT: v_cvt_u32_f32_e32 v12, v9 -; TONGA-NEXT: v_cvt_u32_f32_e32 v13, v8 -; TONGA-NEXT: v_mul_lo_u32 v10, v15, v12 -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v15, v13, 0 -; TONGA-NEXT: v_mul_lo_u32 v11, v16, v13 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v10 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v9, v11 -; TONGA-NEXT: v_mad_u64_u32 v[9:10], s[0:1], v13, v11, 0 -; TONGA-NEXT: v_mul_hi_u32 v17, v13, v8 -; TONGA-NEXT: v_add_u32_e32 v17, vcc, v17, v9 -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v12, v8, 0 -; TONGA-NEXT: v_addc_u32_e32 v18, vcc, 0, v10, vcc -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v12, v11, 0 -; TONGA-NEXT: v_add_u32_e32 v8, vcc, v17, v8 -; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v18, v9, vcc -; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v11, vcc -; TONGA-NEXT: v_add_u32_e32 v8, vcc, v8, v10 -; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc -; TONGA-NEXT: v_add_u32_e32 v17, vcc, v13, v8 -; TONGA-NEXT: v_addc_u32_e32 v18, vcc, v12, v9, vcc -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v15, v17, 0 -; TONGA-NEXT: v_mul_lo_u32 v12, v15, v18 -; TONGA-NEXT: v_mul_lo_u32 v13, v16, v17 -; TONGA-NEXT: v_mul_hi_u32 v15, v17, v8 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v18, v8, 0 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v12, v9 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v13 -; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v17, v9, 0 -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v18, v9, 0 -; TONGA-NEXT: v_add_u32_e32 v12, vcc, v15, v12 -; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v12, v10 -; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v13, v11, vcc -; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc -; TONGA-NEXT: v_add_u32_e32 v8, vcc, v10, v8 +; TONGA-NEXT: s_ashr_i32 s6, s1, 31 +; TONGA-NEXT: s_add_u32 s8, s0, s6 +; TONGA-NEXT: s_mov_b32 s7, s6 +; TONGA-NEXT: s_addc_u32 s9, s1, s6 +; TONGA-NEXT: s_xor_b64 s[6:7], s[8:9], s[6:7] +; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s6 +; TONGA-NEXT: v_cvt_f32_u32_e32 v1, s7 +; TONGA-NEXT: s_sub_u32 s1, 0, s6 +; TONGA-NEXT: s_subb_u32 s10, 0, s7 +; TONGA-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; TONGA-NEXT: v_rcp_f32_e32 v0, v0 +; TONGA-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; TONGA-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; TONGA-NEXT: v_trunc_f32_e32 v1, v1 +; TONGA-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v1 +; TONGA-NEXT: v_cvt_u32_f32_e32 v9, v0 +; TONGA-NEXT: v_mul_lo_u32 v4, s1, v8 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s1, v9, 0 +; TONGA-NEXT: v_mul_lo_u32 v5, s10, v9 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v4 +; TONGA-NEXT: v_add_u32_e32 v11, vcc, v1, v5 +; TONGA-NEXT: v_mul_hi_u32 v10, v9, v0 +; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[8:9], v9, v11, 0 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v8, v0, 0 +; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v4 +; TONGA-NEXT: v_addc_u32_e32 v12, vcc, 0, v5, vcc +; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[8:9], v8, v11, 0 +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v10, v0 +; TONGA-NEXT: v_addc_u32_e32 v0, vcc, v12, v1, vcc +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v4 +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; TONGA-NEXT: v_add_u32_e32 v10, vcc, v9, v0 +; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v8, v1, vcc +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s1, v10, 0 +; TONGA-NEXT: v_mul_lo_u32 v8, s1, v11 +; TONGA-NEXT: v_mul_lo_u32 v9, s10, v10 +; TONGA-NEXT: v_mul_hi_u32 v12, v10, v0 +; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[8:9], v11, v0, 0 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v8, v1 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v9, v1 +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], v10, v1, 0 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], v11, v1, 0 +; TONGA-NEXT: v_add_u32_e32 v8, vcc, v12, v8 ; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v17, v8 -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v18, v9, vcc -; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v5 -; TONGA-NEXT: v_add_u32_e32 v8, vcc, v4, v12 -; TONGA-NEXT: v_xor_b32_e32 v13, v8, v12 -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v13, v11, 0 -; TONGA-NEXT: v_mul_hi_u32 v15, v13, v10 -; TONGA-NEXT: v_addc_u32_e32 v5, vcc, v5, v12, vcc -; TONGA-NEXT: v_xor_b32_e32 v5, v5, v12 -; TONGA-NEXT: v_add_u32_e32 v15, vcc, v15, v8 -; TONGA-NEXT: v_addc_u32_e32 v16, vcc, 0, v9, vcc -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v5, v10, 0 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v5, v11, 0 -; TONGA-NEXT: v_add_u32_e32 v8, vcc, v15, v8 -; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v16, v9, vcc -; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v11, vcc -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v8, v10 -; TONGA-NEXT: v_addc_u32_e32 v8, vcc, 0, v9, vcc -; TONGA-NEXT: v_mul_lo_u32 v11, v14, v8 -; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[0:1], v14, v10, 0 -; TONGA-NEXT: v_mul_lo_u32 v10, v1, v10 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v11, v9 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v10, v9 -; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v5, v9 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v13, v8 -; TONGA-NEXT: v_subb_u32_e64 v10, s[0:1], v10, v1, vcc -; TONGA-NEXT: v_sub_u32_e64 v11, s[0:1], v8, v14 -; TONGA-NEXT: v_subbrev_u32_e64 v13, s[2:3], 0, v10, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v13, v1 -; TONGA-NEXT: v_cndmask_b32_e64 v15, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v11, v14 -; TONGA-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v13, v1 -; TONGA-NEXT: v_subb_u32_e64 v10, s[0:1], v10, v1, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e64 v15, v15, v16, s[2:3] -; TONGA-NEXT: v_sub_u32_e64 v16, s[0:1], v11, v14 -; TONGA-NEXT: v_subb_u32_e32 v5, vcc, v5, v9, vcc -; TONGA-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v10, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1 -; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v15 -; TONGA-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v14 -; TONGA-NEXT: v_cndmask_b32_e64 v10, v13, v10, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e64 v13, 0, -1, vcc -; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 -; TONGA-NEXT: v_cndmask_b32_e32 v1, v9, v13, vcc -; TONGA-NEXT: v_cndmask_b32_e64 v11, v11, v16, s[0:1] -; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; TONGA-NEXT: v_cndmask_b32_e32 v1, v5, v10, vcc -; TONGA-NEXT: v_cndmask_b32_e32 v5, v8, v11, vcc -; TONGA-NEXT: v_xor_b32_e32 v5, v5, v12 -; TONGA-NEXT: v_xor_b32_e32 v1, v1, v12 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v5, v12 -; TONGA-NEXT: v_subb_u32_e32 v9, vcc, v1, v12, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB10_3 +; TONGA-NEXT: v_add_u32_e32 v4, vcc, v8, v4 +; TONGA-NEXT: v_addc_u32_e32 v4, vcc, v9, v5, vcc +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v4, v0 +; TONGA-NEXT: s_ashr_i32 s10, s3, 31 +; TONGA-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; TONGA-NEXT: s_add_u32 s8, s2, s10 +; TONGA-NEXT: v_add_u32_e32 v4, vcc, v10, v0 +; TONGA-NEXT: s_mov_b32 s11, s10 +; TONGA-NEXT: s_addc_u32 s9, s3, s10 +; TONGA-NEXT: v_addc_u32_e32 v5, vcc, v11, v1, vcc +; TONGA-NEXT: s_xor_b64 s[12:13], s[8:9], s[10:11] +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s12, v5, 0 +; TONGA-NEXT: v_mul_hi_u32 v8, s12, v4 +; TONGA-NEXT: v_readfirstlane_b32 s1, v1 +; TONGA-NEXT: v_readfirstlane_b32 s3, v0 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s13, v5, 0 +; TONGA-NEXT: v_mad_u64_u32 v[4:5], s[8:9], s13, v4, 0 +; TONGA-NEXT: v_readfirstlane_b32 s14, v8 +; TONGA-NEXT: s_add_u32 s3, s14, s3 +; TONGA-NEXT: s_addc_u32 s1, 0, s1 +; TONGA-NEXT: v_readfirstlane_b32 s14, v4 +; TONGA-NEXT: v_readfirstlane_b32 s9, v5 +; TONGA-NEXT: s_add_u32 s3, s3, s14 +; TONGA-NEXT: v_readfirstlane_b32 s8, v1 +; TONGA-NEXT: s_addc_u32 s1, s1, s9 +; TONGA-NEXT: s_addc_u32 s3, s8, 0 +; TONGA-NEXT: v_readfirstlane_b32 s8, v0 +; TONGA-NEXT: s_add_u32 s1, s1, s8 +; TONGA-NEXT: v_mov_b32_e32 v0, s1 +; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s6, v0, 0 +; TONGA-NEXT: s_addc_u32 s3, 0, s3 +; TONGA-NEXT: s_mul_i32 s3, s6, s3 +; TONGA-NEXT: v_readfirstlane_b32 s14, v1 +; TONGA-NEXT: s_add_i32 s3, s14, s3 +; TONGA-NEXT: s_mul_i32 s1, s7, s1 +; TONGA-NEXT: s_add_i32 s3, s3, s1 +; TONGA-NEXT: s_sub_i32 s1, s13, s3 +; TONGA-NEXT: v_readfirstlane_b32 s14, v0 +; TONGA-NEXT: s_sub_u32 s12, s12, s14 +; TONGA-NEXT: s_cselect_b64 s[14:15], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0 +; TONGA-NEXT: s_subb_u32 s1, s1, s7 +; TONGA-NEXT: s_sub_u32 s18, s12, s6 +; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s19, s1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s19, s7 +; TONGA-NEXT: s_cselect_b32 s20, -1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s18, s6 +; TONGA-NEXT: s_cselect_b32 s21, -1, 0 +; TONGA-NEXT: s_cmp_eq_u32 s19, s7 +; TONGA-NEXT: s_cselect_b32 s20, s21, s20 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s1, s1, s7 +; TONGA-NEXT: s_sub_u32 s21, s18, s6 +; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s1, s1, 0 +; TONGA-NEXT: s_cmp_lg_u32 s20, 0 +; TONGA-NEXT: s_cselect_b32 s16, s21, s18 +; TONGA-NEXT: s_cselect_b32 s1, s1, s19 +; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0 +; TONGA-NEXT: s_subb_u32 s3, s13, s3 +; TONGA-NEXT: s_cmp_ge_u32 s3, s7 +; TONGA-NEXT: s_cselect_b32 s13, -1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s12, s6 +; TONGA-NEXT: s_cselect_b32 s6, -1, 0 +; TONGA-NEXT: s_cmp_eq_u32 s3, s7 +; TONGA-NEXT: s_cselect_b32 s6, s6, s13 +; TONGA-NEXT: s_cmp_lg_u32 s6, 0 +; TONGA-NEXT: s_cselect_b32 s7, s1, s3 +; TONGA-NEXT: s_cselect_b32 s6, s16, s12 +; TONGA-NEXT: s_xor_b64 s[6:7], s[6:7], s[10:11] +; TONGA-NEXT: s_sub_u32 s6, s6, s10 +; TONGA-NEXT: s_subb_u32 s7, s7, s10 +; TONGA-NEXT: s_cbranch_execnz .LBB10_4 ; TONGA-NEXT: .LBB10_2: -; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v0 -; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0 +; TONGA-NEXT: v_cvt_f32_u32_e32 v0, s0 +; TONGA-NEXT: s_sub_i32 s1, 0, s0 ; TONGA-NEXT: v_mov_b32_e32 v9, 0 -; TONGA-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; TONGA-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 -; TONGA-NEXT: v_cvt_u32_f32_e32 v1, v1 -; TONGA-NEXT: v_mul_lo_u32 v5, v5, v1 -; TONGA-NEXT: v_mul_hi_u32 v5, v1, v5 -; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v5 -; TONGA-NEXT: v_mul_hi_u32 v1, v4, v1 -; TONGA-NEXT: v_mul_lo_u32 v1, v1, v0 -; TONGA-NEXT: v_sub_u32_e32 v1, vcc, v4, v1 -; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0 -; TONGA-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; TONGA-NEXT: v_sub_u32_e32 v4, vcc, v1, v0 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0 -; TONGA-NEXT: v_cndmask_b32_e32 v8, v1, v4, vcc +; TONGA-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; TONGA-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 +; TONGA-NEXT: v_cvt_u32_f32_e32 v0, v0 +; TONGA-NEXT: v_mul_lo_u32 v1, s1, v0 +; TONGA-NEXT: v_mul_hi_u32 v1, v0, v1 +; TONGA-NEXT: v_add_u32_e32 v0, vcc, v0, v1 +; TONGA-NEXT: v_mul_hi_u32 v0, s2, v0 +; TONGA-NEXT: v_mul_lo_u32 v0, v0, s0 +; TONGA-NEXT: v_sub_u32_e32 v0, vcc, s2, v0 +; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s0, v0 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s0, v0 +; TONGA-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, s0, v0 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s0, v0 +; TONGA-NEXT: v_cndmask_b32_e32 v8, v0, v1, vcc +; TONGA-NEXT: s_branch .LBB10_5 ; TONGA-NEXT: .LBB10_3: +; TONGA-NEXT: ; implicit-def: $sgpr6_sgpr7 +; TONGA-NEXT: s_branch .LBB10_2 +; TONGA-NEXT: .LBB10_4: +; TONGA-NEXT: v_mov_b32_e32 v9, s7 +; TONGA-NEXT: v_mov_b32_e32 v8, s6 +; TONGA-NEXT: .LBB10_5: ; TONGA-NEXT: v_or_b32_e32 v1, v7, v3 ; TONGA-NEXT: v_mov_b32_e32 v0, 0 ; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; TONGA-NEXT: s_cbranch_vccz .LBB10_8 -; TONGA-NEXT: ; %bb.4: +; TONGA-NEXT: s_cbranch_vccz .LBB10_9 +; TONGA-NEXT: ; %bb.6: ; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3 ; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0 ; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc @@ -3534,7 +3572,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_mul_hi_u32 v13, v15, v0 ; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v16, v0, 0 ; TONGA-NEXT: v_add_u32_e32 v1, vcc, v10, v1 -; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v11 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v11, v1 ; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v15, v1, 0 ; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v16, v1, 0 ; TONGA-NEXT: v_add_u32_e32 v10, vcc, v13, v10 @@ -3598,8 +3636,8 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11 ; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v0, v11 ; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v1, v11, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB10_6 -; TONGA-NEXT: .LBB10_5: +; TONGA-NEXT: s_cbranch_execnz .LBB10_8 +; TONGA-NEXT: .LBB10_7: ; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2 ; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2 ; TONGA-NEXT: v_mov_b32_e32 v11, 0 @@ -3618,16 +3656,13 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0 ; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 ; TONGA-NEXT: v_cndmask_b32_e32 v10, v0, v1, vcc -; TONGA-NEXT: .LBB10_6: +; TONGA-NEXT: .LBB10_8: ; TONGA-NEXT: v_mov_b32_e32 v0, s4 ; TONGA-NEXT: v_mov_b32_e32 v1, s5 ; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[8:11] ; TONGA-NEXT: s_endpgm -; TONGA-NEXT: .LBB10_7: -; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9 -; TONGA-NEXT: s_branch .LBB10_2 -; TONGA-NEXT: .LBB10_8: -; TONGA-NEXT: s_branch .LBB10_5 +; TONGA-NEXT: .LBB10_9: +; TONGA-NEXT: s_branch .LBB10_7 ; ; EG-LABEL: srem_v2i64: ; EG: ; %bb.0: @@ -4860,629 +4895,687 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1) define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %in) { ; GCN-LABEL: srem_v4i64: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24 -; GCN-NEXT: v_mov_b32_e32 v8, 0 +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; GCN-NEXT: v_mov_b32_e32 v16, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: global_load_dwordx4 v[10:13], v8, s[10:11] offset:32 -; GCN-NEXT: global_load_dwordx4 v[14:17], v8, s[10:11] -; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[10:11] offset:48 -; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[10:11] offset:16 +; GCN-NEXT: global_load_dwordx4 v[0:3], v16, s[2:3] offset:48 +; GCN-NEXT: global_load_dwordx4 v[4:7], v16, s[2:3] offset:32 +; GCN-NEXT: global_load_dwordx4 v[8:11], v16, s[2:3] offset:16 +; GCN-NEXT: global_load_dwordx4 v[12:15], v16, s[2:3] ; GCN-NEXT: s_waitcnt vmcnt(3) +; GCN-NEXT: v_readfirstlane_b32 s3, v3 +; GCN-NEXT: s_waitcnt vmcnt(2) +; GCN-NEXT: v_readfirstlane_b32 s17, v5 +; GCN-NEXT: v_readfirstlane_b32 s16, v4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_readfirstlane_b32 s19, v13 +; GCN-NEXT: v_readfirstlane_b32 s18, v12 +; GCN-NEXT: s_or_b64 s[6:7], s[18:19], s[16:17] +; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: v_readfirstlane_b32 s2, v2 +; GCN-NEXT: v_readfirstlane_b32 s9, v1 +; GCN-NEXT: v_readfirstlane_b32 s8, v0 +; GCN-NEXT: v_readfirstlane_b32 s13, v7 +; GCN-NEXT: v_readfirstlane_b32 s12, v6 ; GCN-NEXT: v_readfirstlane_b32 s5, v11 ; GCN-NEXT: v_readfirstlane_b32 s4, v10 -; GCN-NEXT: s_waitcnt vmcnt(2) -; GCN-NEXT: v_readfirstlane_b32 s7, v15 -; GCN-NEXT: v_readfirstlane_b32 s6, v14 -; GCN-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5] -; GCN-NEXT: s_mov_b32 s0, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_cbranch_scc0 .LBB12_13 +; GCN-NEXT: v_readfirstlane_b32 s11, v9 +; GCN-NEXT: v_readfirstlane_b32 s10, v8 +; GCN-NEXT: v_readfirstlane_b32 s15, v15 +; GCN-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GCN-NEXT: v_readfirstlane_b32 s14, v14 +; GCN-NEXT: s_cbranch_scc0 .LBB12_6 ; GCN-NEXT: ; %bb.1: -; GCN-NEXT: s_ashr_i32 s0, s5, 31 -; GCN-NEXT: s_add_u32 s2, s4, s0 -; GCN-NEXT: s_mov_b32 s1, s0 -; GCN-NEXT: s_addc_u32 s3, s5, s0 -; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[0:1] -; GCN-NEXT: v_cvt_f32_u32_e32 v8, s12 -; GCN-NEXT: v_cvt_f32_u32_e32 v9, s13 -; GCN-NEXT: s_sub_u32 s0, 0, s12 -; GCN-NEXT: s_subb_u32 s1, 0, s13 -; GCN-NEXT: v_madmk_f32 v8, v9, 0x4f800000, v8 -; GCN-NEXT: v_rcp_f32_e32 v8, v8 -; GCN-NEXT: v_mul_f32_e32 v8, 0x5f7ffffc, v8 -; GCN-NEXT: v_mul_f32_e32 v9, 0x2f800000, v8 -; GCN-NEXT: v_trunc_f32_e32 v9, v9 -; GCN-NEXT: v_madmk_f32 v8, v9, 0xcf800000, v8 -; GCN-NEXT: v_cvt_u32_f32_e32 v9, v9 -; GCN-NEXT: v_cvt_u32_f32_e32 v8, v8 -; GCN-NEXT: v_readfirstlane_b32 s2, v9 -; GCN-NEXT: v_readfirstlane_b32 s3, v8 -; GCN-NEXT: s_mul_i32 s5, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s15, s0, s3 -; GCN-NEXT: s_mul_i32 s14, s1, s3 -; GCN-NEXT: s_add_i32 s5, s15, s5 -; GCN-NEXT: s_add_i32 s5, s5, s14 -; GCN-NEXT: s_mul_i32 s16, s0, s3 -; GCN-NEXT: s_mul_hi_u32 s14, s3, s5 -; GCN-NEXT: s_mul_i32 s15, s3, s5 -; GCN-NEXT: s_mul_hi_u32 s3, s3, s16 -; GCN-NEXT: s_add_u32 s3, s3, s15 -; GCN-NEXT: s_addc_u32 s14, 0, s14 -; GCN-NEXT: s_mul_hi_u32 s17, s2, s16 -; GCN-NEXT: s_mul_i32 s16, s2, s16 -; GCN-NEXT: s_add_u32 s3, s3, s16 -; GCN-NEXT: s_mul_hi_u32 s15, s2, s5 -; GCN-NEXT: s_addc_u32 s3, s14, s17 -; GCN-NEXT: s_addc_u32 s14, s15, 0 -; GCN-NEXT: s_mul_i32 s5, s2, s5 -; GCN-NEXT: s_add_u32 s3, s3, s5 -; GCN-NEXT: s_addc_u32 s5, 0, s14 -; GCN-NEXT: v_add_co_u32_e32 v8, vcc, s3, v8 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s5 -; GCN-NEXT: v_readfirstlane_b32 s5, v8 -; GCN-NEXT: s_mul_i32 s3, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s14, s0, s5 -; GCN-NEXT: s_add_i32 s3, s14, s3 -; GCN-NEXT: s_mul_i32 s1, s1, s5 -; GCN-NEXT: s_add_i32 s3, s3, s1 -; GCN-NEXT: s_mul_i32 s0, s0, s5 -; GCN-NEXT: s_mul_hi_u32 s14, s2, s0 -; GCN-NEXT: s_mul_i32 s15, s2, s0 -; GCN-NEXT: s_mul_i32 s17, s5, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s5, s0 -; GCN-NEXT: s_mul_hi_u32 s16, s5, s3 -; GCN-NEXT: s_add_u32 s0, s0, s17 -; GCN-NEXT: s_addc_u32 s5, 0, s16 -; GCN-NEXT: s_add_u32 s0, s0, s15 -; GCN-NEXT: s_mul_hi_u32 s1, s2, s3 -; GCN-NEXT: s_addc_u32 s0, s5, s14 -; GCN-NEXT: s_addc_u32 s1, s1, 0 -; GCN-NEXT: s_mul_i32 s3, s2, s3 -; GCN-NEXT: s_add_u32 s0, s0, s3 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: v_add_co_u32_e32 v8, vcc, s0, v8 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_addc_u32 s2, s2, s1 -; GCN-NEXT: s_ashr_i32 s14, s7, 31 -; GCN-NEXT: s_add_u32 s0, s6, s14 -; GCN-NEXT: s_mov_b32 s15, s14 -; GCN-NEXT: s_addc_u32 s1, s7, s14 -; GCN-NEXT: s_xor_b64 s[16:17], s[0:1], s[14:15] -; GCN-NEXT: v_readfirstlane_b32 s3, v8 -; GCN-NEXT: s_mul_i32 s1, s16, s2 -; GCN-NEXT: s_mul_hi_u32 s5, s16, s3 -; GCN-NEXT: s_mul_hi_u32 s0, s16, s2 -; GCN-NEXT: s_add_u32 s1, s5, s1 -; GCN-NEXT: s_addc_u32 s0, 0, s0 -; GCN-NEXT: s_mul_hi_u32 s7, s17, s3 -; GCN-NEXT: s_mul_i32 s3, s17, s3 -; GCN-NEXT: s_add_u32 s1, s1, s3 -; GCN-NEXT: s_mul_hi_u32 s5, s17, s2 -; GCN-NEXT: s_addc_u32 s0, s0, s7 -; GCN-NEXT: s_addc_u32 s1, s5, 0 -; GCN-NEXT: s_mul_i32 s2, s17, s2 -; GCN-NEXT: s_add_u32 s0, s0, s2 -; GCN-NEXT: s_addc_u32 s1, 0, s1 -; GCN-NEXT: s_mul_i32 s1, s12, s1 -; GCN-NEXT: s_mul_hi_u32 s2, s12, s0 -; GCN-NEXT: s_add_i32 s1, s2, s1 -; GCN-NEXT: s_mul_i32 s2, s13, s0 -; GCN-NEXT: s_mul_i32 s0, s12, s0 -; GCN-NEXT: s_add_i32 s5, s1, s2 -; GCN-NEXT: v_mov_b32_e32 v8, s0 -; GCN-NEXT: s_sub_i32 s1, s17, s5 -; GCN-NEXT: v_sub_co_u32_e32 v8, vcc, s16, v8 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: s_subb_u32 s7, s1, s13 -; GCN-NEXT: v_subrev_co_u32_e64 v9, s[0:1], s12, v8 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s15, s7, 0 -; GCN-NEXT: s_cmp_ge_u32 s15, s13 -; GCN-NEXT: s_cselect_b32 s16, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v9 -; GCN-NEXT: s_cmp_eq_u32 s15, s13 -; GCN-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[2:3] -; GCN-NEXT: v_mov_b32_e32 v11, s16 -; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: v_cndmask_b32_e64 v10, v11, v10, s[2:3] -; GCN-NEXT: s_subb_u32 s2, s7, s13 -; GCN-NEXT: v_subrev_co_u32_e64 v11, s[0:1], s12, v9 -; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GCN-NEXT: s_subb_u32 s2, s2, 0 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v10 -; GCN-NEXT: v_cndmask_b32_e64 v9, v9, v11, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v10, s15 -; GCN-NEXT: v_mov_b32_e32 v11, s2 -; GCN-NEXT: s_cmp_lg_u64 vcc, 0 -; GCN-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[0:1] -; GCN-NEXT: s_subb_u32 s0, s17, s5 -; GCN-NEXT: s_cmp_ge_u32 s0, s13 -; GCN-NEXT: s_cselect_b32 s1, -1, 0 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v8 -; GCN-NEXT: s_cmp_eq_u32 s0, s13 -; GCN-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc -; GCN-NEXT: v_mov_b32_e32 v14, s1 -; GCN-NEXT: s_cselect_b64 vcc, -1, 0 -; GCN-NEXT: v_cndmask_b32_e32 v11, v14, v11, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; GCN-NEXT: v_mov_b32_e32 v14, s0 -; GCN-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GCN-NEXT: v_cndmask_b32_e32 v10, v14, v10, vcc -; GCN-NEXT: v_xor_b32_e32 v8, s14, v8 -; GCN-NEXT: v_xor_b32_e32 v9, s14, v10 -; GCN-NEXT: v_mov_b32_e32 v10, s14 -; GCN-NEXT: v_subrev_co_u32_e32 v8, vcc, s14, v8 -; GCN-NEXT: v_subb_co_u32_e32 v9, vcc, v9, v10, vcc +; GCN-NEXT: s_ashr_i32 s6, s17, 31 +; GCN-NEXT: s_add_u32 s20, s16, s6 +; GCN-NEXT: s_mov_b32 s7, s6 +; GCN-NEXT: s_addc_u32 s21, s17, s6 +; GCN-NEXT: s_xor_b64 s[6:7], s[20:21], s[6:7] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7 +; GCN-NEXT: s_sub_u32 s17, 0, s6 +; GCN-NEXT: s_subb_u32 s24, 0, s7 +; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; GCN-NEXT: v_rcp_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GCN-NEXT: v_trunc_f32_e32 v1, v1 +; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_readfirstlane_b32 s25, v1 +; GCN-NEXT: v_readfirstlane_b32 s22, v0 +; GCN-NEXT: s_mul_i32 s23, s17, s25 +; GCN-NEXT: s_mul_hi_u32 s27, s17, s22 +; GCN-NEXT: s_mul_i32 s26, s24, s22 +; GCN-NEXT: s_add_i32 s23, s27, s23 +; GCN-NEXT: s_add_i32 s23, s23, s26 +; GCN-NEXT: s_mul_i32 s28, s17, s22 +; GCN-NEXT: s_mul_i32 s27, s22, s23 +; GCN-NEXT: s_mul_hi_u32 s29, s22, s28 +; GCN-NEXT: s_mul_hi_u32 s26, s22, s23 +; GCN-NEXT: s_add_u32 s27, s29, s27 +; GCN-NEXT: s_addc_u32 s26, 0, s26 +; GCN-NEXT: s_mul_hi_u32 s30, s25, s28 +; GCN-NEXT: s_mul_i32 s28, s25, s28 +; GCN-NEXT: s_add_u32 s27, s27, s28 +; GCN-NEXT: s_mul_hi_u32 s29, s25, s23 +; GCN-NEXT: s_addc_u32 s26, s26, s30 +; GCN-NEXT: s_addc_u32 s27, s29, 0 +; GCN-NEXT: s_mul_i32 s23, s25, s23 +; GCN-NEXT: s_add_u32 s23, s26, s23 +; GCN-NEXT: s_addc_u32 s26, 0, s27 +; GCN-NEXT: s_add_u32 s27, s22, s23 +; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0 +; GCN-NEXT: s_addc_u32 s25, s25, s26 +; GCN-NEXT: s_mul_i32 s22, s17, s25 +; GCN-NEXT: s_mul_hi_u32 s23, s17, s27 +; GCN-NEXT: s_add_i32 s22, s23, s22 +; GCN-NEXT: s_mul_i32 s24, s24, s27 +; GCN-NEXT: s_add_i32 s22, s22, s24 +; GCN-NEXT: s_mul_i32 s17, s17, s27 +; GCN-NEXT: s_mul_hi_u32 s24, s25, s17 +; GCN-NEXT: s_mul_i32 s26, s25, s17 +; GCN-NEXT: s_mul_i32 s29, s27, s22 +; GCN-NEXT: s_mul_hi_u32 s17, s27, s17 +; GCN-NEXT: s_mul_hi_u32 s28, s27, s22 +; GCN-NEXT: s_add_u32 s17, s17, s29 +; GCN-NEXT: s_addc_u32 s28, 0, s28 +; GCN-NEXT: s_add_u32 s17, s17, s26 +; GCN-NEXT: s_mul_hi_u32 s23, s25, s22 +; GCN-NEXT: s_addc_u32 s17, s28, s24 +; GCN-NEXT: s_addc_u32 s23, s23, 0 +; GCN-NEXT: s_mul_i32 s22, s25, s22 +; GCN-NEXT: s_add_u32 s17, s17, s22 +; GCN-NEXT: s_addc_u32 s24, 0, s23 +; GCN-NEXT: s_add_u32 s17, s27, s17 +; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0 +; GCN-NEXT: s_addc_u32 s26, s25, s24 +; GCN-NEXT: s_ashr_i32 s22, s19, 31 +; GCN-NEXT: s_add_u32 s24, s18, s22 +; GCN-NEXT: s_mov_b32 s23, s22 +; GCN-NEXT: s_addc_u32 s25, s19, s22 +; GCN-NEXT: s_xor_b64 s[24:25], s[24:25], s[22:23] +; GCN-NEXT: s_mul_i32 s27, s24, s26 +; GCN-NEXT: s_mul_hi_u32 s28, s24, s17 +; GCN-NEXT: s_mul_hi_u32 s19, s24, s26 +; GCN-NEXT: s_add_u32 s27, s28, s27 +; GCN-NEXT: s_addc_u32 s19, 0, s19 +; GCN-NEXT: s_mul_hi_u32 s29, s25, s17 +; GCN-NEXT: s_mul_i32 s17, s25, s17 +; GCN-NEXT: s_add_u32 s17, s27, s17 +; GCN-NEXT: s_mul_hi_u32 s28, s25, s26 +; GCN-NEXT: s_addc_u32 s17, s19, s29 +; GCN-NEXT: s_addc_u32 s19, s28, 0 +; GCN-NEXT: s_mul_i32 s26, s25, s26 +; GCN-NEXT: s_add_u32 s17, s17, s26 +; GCN-NEXT: s_addc_u32 s19, 0, s19 +; GCN-NEXT: s_mul_i32 s19, s6, s19 +; GCN-NEXT: s_mul_hi_u32 s26, s6, s17 +; GCN-NEXT: s_add_i32 s19, s26, s19 +; GCN-NEXT: s_mul_i32 s26, s7, s17 +; GCN-NEXT: s_add_i32 s19, s19, s26 +; GCN-NEXT: s_sub_i32 s28, s25, s19 +; GCN-NEXT: s_mul_i32 s17, s6, s17 +; GCN-NEXT: s_sub_u32 s17, s24, s17 +; GCN-NEXT: s_cselect_b64 s[26:27], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0 +; GCN-NEXT: s_subb_u32 s24, s28, s7 +; GCN-NEXT: s_sub_u32 s30, s17, s6 +; GCN-NEXT: s_cselect_b64 s[28:29], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[28:29], 0 +; GCN-NEXT: s_subb_u32 s31, s24, 0 +; GCN-NEXT: s_cmp_ge_u32 s31, s7 +; GCN-NEXT: s_cselect_b32 s33, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s30, s6 +; GCN-NEXT: s_cselect_b32 s34, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s31, s7 +; GCN-NEXT: s_cselect_b32 s33, s34, s33 +; GCN-NEXT: s_cmp_lg_u64 s[28:29], 0 +; GCN-NEXT: s_subb_u32 s24, s24, s7 +; GCN-NEXT: s_sub_u32 s34, s30, s6 +; GCN-NEXT: s_cselect_b64 s[28:29], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[28:29], 0 +; GCN-NEXT: s_subb_u32 s24, s24, 0 +; GCN-NEXT: s_cmp_lg_u32 s33, 0 +; GCN-NEXT: s_cselect_b32 s28, s34, s30 +; GCN-NEXT: s_cselect_b32 s24, s24, s31 +; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0 +; GCN-NEXT: s_subb_u32 s19, s25, s19 +; GCN-NEXT: s_cmp_ge_u32 s19, s7 +; GCN-NEXT: s_cselect_b32 s25, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s17, s6 +; GCN-NEXT: s_cselect_b32 s6, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s19, s7 +; GCN-NEXT: s_cselect_b32 s6, s6, s25 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_cselect_b32 s7, s24, s19 +; GCN-NEXT: s_cselect_b32 s6, s28, s17 +; GCN-NEXT: s_xor_b64 s[6:7], s[6:7], s[22:23] +; GCN-NEXT: s_sub_u32 s6, s6, s22 +; GCN-NEXT: s_subb_u32 s7, s7, s22 ; GCN-NEXT: s_cbranch_execnz .LBB12_3 ; GCN-NEXT: .LBB12_2: -; GCN-NEXT: v_cvt_f32_u32_e32 v8, s4 -; GCN-NEXT: s_sub_i32 s0, 0, s4 -; GCN-NEXT: s_mov_b32 s1, 0 -; GCN-NEXT: v_rcp_iflag_f32_e32 v8, v8 -; GCN-NEXT: v_mul_f32_e32 v8, 0x4f7ffffe, v8 -; GCN-NEXT: v_cvt_u32_f32_e32 v8, v8 -; GCN-NEXT: v_readfirstlane_b32 s2, v8 -; GCN-NEXT: s_mul_i32 s0, s0, s2 -; GCN-NEXT: s_mul_hi_u32 s0, s2, s0 -; GCN-NEXT: s_add_i32 s2, s2, s0 -; GCN-NEXT: s_mul_hi_u32 s0, s6, s2 -; GCN-NEXT: s_mul_i32 s0, s0, s4 -; GCN-NEXT: s_sub_i32 s0, s6, s0 -; GCN-NEXT: s_sub_i32 s2, s0, s4 -; GCN-NEXT: s_cmp_ge_u32 s0, s4 -; GCN-NEXT: s_cselect_b32 s0, s2, s0 -; GCN-NEXT: s_sub_i32 s2, s0, s4 -; GCN-NEXT: s_cmp_ge_u32 s0, s4 -; GCN-NEXT: s_cselect_b32 s0, s2, s0 -; GCN-NEXT: v_mov_b32_e32 v9, s1 -; GCN-NEXT: v_mov_b32_e32 v8, s0 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s16 +; GCN-NEXT: s_sub_i32 s6, 0, s16 +; GCN-NEXT: s_mov_b32 s7, 0 +; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_readfirstlane_b32 s17, v0 +; GCN-NEXT: s_mul_i32 s6, s6, s17 +; GCN-NEXT: s_mul_hi_u32 s6, s17, s6 +; GCN-NEXT: s_add_i32 s17, s17, s6 +; GCN-NEXT: s_mul_hi_u32 s6, s18, s17 +; GCN-NEXT: s_mul_i32 s6, s6, s16 +; GCN-NEXT: s_sub_i32 s6, s18, s6 +; GCN-NEXT: s_sub_i32 s17, s6, s16 +; GCN-NEXT: s_cmp_ge_u32 s6, s16 +; GCN-NEXT: s_cselect_b32 s6, s17, s6 +; GCN-NEXT: s_sub_i32 s17, s6, s16 +; GCN-NEXT: s_cmp_ge_u32 s6, s16 +; GCN-NEXT: s_cselect_b32 s6, s17, s6 ; GCN-NEXT: .LBB12_3: -; GCN-NEXT: v_or_b32_e32 v11, v17, v13 -; GCN-NEXT: v_mov_b32_e32 v10, 0 -; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11] -; GCN-NEXT: s_cbranch_vccz .LBB12_14 +; GCN-NEXT: s_or_b64 s[16:17], s[14:15], s[12:13] +; GCN-NEXT: s_mov_b32 s16, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_cbranch_scc0 .LBB12_7 ; GCN-NEXT: ; %bb.4: -; GCN-NEXT: v_ashrrev_i32_e32 v10, 31, v13 -; GCN-NEXT: v_add_co_u32_e32 v11, vcc, v12, v10 -; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, v13, v10, vcc -; GCN-NEXT: v_xor_b32_e32 v11, v11, v10 -; GCN-NEXT: v_xor_b32_e32 v10, v13, v10 -; GCN-NEXT: v_cvt_f32_u32_e32 v13, v11 -; GCN-NEXT: v_cvt_f32_u32_e32 v14, v10 -; GCN-NEXT: v_sub_co_u32_e32 v15, vcc, 0, v11 -; GCN-NEXT: v_subb_co_u32_e32 v18, vcc, 0, v10, vcc -; GCN-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13 -; GCN-NEXT: v_rcp_f32_e32 v13, v13 -; GCN-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13 -; GCN-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13 -; GCN-NEXT: v_trunc_f32_e32 v14, v14 -; GCN-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13 -; GCN-NEXT: v_cvt_u32_f32_e32 v14, v14 -; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13 -; GCN-NEXT: v_mul_lo_u32 v20, v15, v14 -; GCN-NEXT: v_mul_hi_u32 v19, v15, v13 -; GCN-NEXT: v_mul_lo_u32 v21, v18, v13 -; GCN-NEXT: v_mul_lo_u32 v22, v15, v13 -; GCN-NEXT: v_add_u32_e32 v19, v19, v20 -; GCN-NEXT: v_add_u32_e32 v19, v19, v21 -; GCN-NEXT: v_mul_lo_u32 v20, v13, v19 -; GCN-NEXT: v_mul_hi_u32 v21, v13, v22 -; GCN-NEXT: v_mul_hi_u32 v23, v13, v19 -; GCN-NEXT: v_mul_hi_u32 v24, v14, v19 -; GCN-NEXT: v_mul_lo_u32 v19, v14, v19 -; GCN-NEXT: v_add_co_u32_e32 v20, vcc, v21, v20 -; GCN-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v23, vcc -; GCN-NEXT: v_mul_lo_u32 v23, v14, v22 -; GCN-NEXT: v_mul_hi_u32 v22, v14, v22 -; GCN-NEXT: v_add_co_u32_e32 v20, vcc, v20, v23 -; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, v21, v22, vcc -; GCN-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v24, vcc -; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v20, v19 -; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v21, vcc -; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v19 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v20, vcc -; GCN-NEXT: v_mul_lo_u32 v19, v15, v14 -; GCN-NEXT: v_mul_hi_u32 v20, v15, v13 -; GCN-NEXT: v_mul_lo_u32 v18, v18, v13 -; GCN-NEXT: v_mul_lo_u32 v15, v15, v13 -; GCN-NEXT: v_add_u32_e32 v19, v20, v19 -; GCN-NEXT: v_add_u32_e32 v18, v19, v18 -; GCN-NEXT: v_mul_lo_u32 v21, v13, v18 -; GCN-NEXT: v_mul_hi_u32 v22, v13, v15 -; GCN-NEXT: v_mul_hi_u32 v23, v13, v18 -; GCN-NEXT: v_mul_hi_u32 v20, v14, v15 -; GCN-NEXT: v_mul_lo_u32 v15, v14, v15 -; GCN-NEXT: v_mul_hi_u32 v19, v14, v18 -; GCN-NEXT: v_add_co_u32_e32 v21, vcc, v22, v21 -; GCN-NEXT: v_addc_co_u32_e32 v22, vcc, 0, v23, vcc -; GCN-NEXT: v_mul_lo_u32 v18, v14, v18 -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v21, v15 -; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, v22, v20, vcc -; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v15, v18 -; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc -; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v15 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v18, vcc -; GCN-NEXT: v_ashrrev_i32_e32 v15, 31, v17 -; GCN-NEXT: v_add_co_u32_e32 v18, vcc, v16, v15 -; GCN-NEXT: v_xor_b32_e32 v18, v18, v15 -; GCN-NEXT: v_mul_lo_u32 v19, v18, v14 -; GCN-NEXT: v_mul_hi_u32 v20, v18, v13 -; GCN-NEXT: v_mul_hi_u32 v21, v18, v14 -; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, v17, v15, vcc -; GCN-NEXT: v_xor_b32_e32 v17, v17, v15 -; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v20, v19 -; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v21, vcc -; GCN-NEXT: v_mul_lo_u32 v21, v17, v13 -; GCN-NEXT: v_mul_hi_u32 v13, v17, v13 -; GCN-NEXT: v_mul_hi_u32 v22, v17, v14 -; GCN-NEXT: v_mul_lo_u32 v14, v17, v14 -; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v19, v21 -; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, v20, v13, vcc -; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v22, vcc -; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v14 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v19, vcc -; GCN-NEXT: v_mul_lo_u32 v14, v11, v14 -; GCN-NEXT: v_mul_hi_u32 v19, v11, v13 -; GCN-NEXT: v_mul_lo_u32 v20, v10, v13 -; GCN-NEXT: v_mul_lo_u32 v13, v11, v13 -; GCN-NEXT: v_add_u32_e32 v14, v19, v14 -; GCN-NEXT: v_add_u32_e32 v14, v14, v20 -; GCN-NEXT: v_sub_u32_e32 v19, v17, v14 -; GCN-NEXT: v_sub_co_u32_e32 v13, vcc, v18, v13 -; GCN-NEXT: v_subb_co_u32_e64 v18, s[0:1], v19, v10, vcc -; GCN-NEXT: v_sub_co_u32_e64 v19, s[0:1], v13, v11 -; GCN-NEXT: v_subbrev_co_u32_e64 v20, s[2:3], 0, v18, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v20, v10 -; GCN-NEXT: v_cndmask_b32_e64 v21, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v19, v11 -; GCN-NEXT: v_subb_co_u32_e32 v14, vcc, v17, v14, vcc -; GCN-NEXT: v_cndmask_b32_e64 v22, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], v20, v10 -; GCN-NEXT: v_subb_co_u32_e64 v18, s[0:1], v18, v10, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v14, v10 -; GCN-NEXT: v_cndmask_b32_e64 v21, v21, v22, s[2:3] -; GCN-NEXT: v_sub_co_u32_e64 v22, s[0:1], v19, v11 -; GCN-NEXT: v_cndmask_b32_e64 v17, 0, -1, vcc -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v13, v11 -; GCN-NEXT: v_subbrev_co_u32_e64 v18, s[0:1], 0, v18, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v14, v10 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v21 -; GCN-NEXT: v_cndmask_b32_e32 v10, v17, v11, vcc -; GCN-NEXT: v_cndmask_b32_e64 v19, v19, v22, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10 -; GCN-NEXT: v_cndmask_b32_e64 v18, v20, v18, s[0:1] -; GCN-NEXT: v_cndmask_b32_e32 v11, v13, v19, vcc -; GCN-NEXT: v_cndmask_b32_e32 v10, v14, v18, vcc -; GCN-NEXT: v_xor_b32_e32 v11, v11, v15 -; GCN-NEXT: v_xor_b32_e32 v13, v10, v15 -; GCN-NEXT: v_sub_co_u32_e32 v10, vcc, v11, v15 -; GCN-NEXT: v_subb_co_u32_e32 v11, vcc, v13, v15, vcc -; GCN-NEXT: s_cbranch_execnz .LBB12_6 +; GCN-NEXT: s_ashr_i32 s16, s13, 31 +; GCN-NEXT: s_add_u32 s18, s12, s16 +; GCN-NEXT: s_mov_b32 s17, s16 +; GCN-NEXT: s_addc_u32 s19, s13, s16 +; GCN-NEXT: s_xor_b64 s[18:19], s[18:19], s[16:17] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s18 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s19 +; GCN-NEXT: s_sub_u32 s13, 0, s18 +; GCN-NEXT: s_subb_u32 s22, 0, s19 +; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; GCN-NEXT: v_rcp_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GCN-NEXT: v_trunc_f32_e32 v1, v1 +; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_readfirstlane_b32 s23, v1 +; GCN-NEXT: v_readfirstlane_b32 s20, v0 +; GCN-NEXT: s_mul_i32 s21, s13, s23 +; GCN-NEXT: s_mul_hi_u32 s25, s13, s20 +; GCN-NEXT: s_mul_i32 s24, s22, s20 +; GCN-NEXT: s_add_i32 s21, s25, s21 +; GCN-NEXT: s_add_i32 s21, s21, s24 +; GCN-NEXT: s_mul_i32 s26, s13, s20 +; GCN-NEXT: s_mul_i32 s25, s20, s21 +; GCN-NEXT: s_mul_hi_u32 s27, s20, s26 +; GCN-NEXT: s_mul_hi_u32 s24, s20, s21 +; GCN-NEXT: s_add_u32 s25, s27, s25 +; GCN-NEXT: s_addc_u32 s24, 0, s24 +; GCN-NEXT: s_mul_hi_u32 s28, s23, s26 +; GCN-NEXT: s_mul_i32 s26, s23, s26 +; GCN-NEXT: s_add_u32 s25, s25, s26 +; GCN-NEXT: s_mul_hi_u32 s27, s23, s21 +; GCN-NEXT: s_addc_u32 s24, s24, s28 +; GCN-NEXT: s_addc_u32 s25, s27, 0 +; GCN-NEXT: s_mul_i32 s21, s23, s21 +; GCN-NEXT: s_add_u32 s21, s24, s21 +; GCN-NEXT: s_addc_u32 s24, 0, s25 +; GCN-NEXT: s_add_u32 s25, s20, s21 +; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_addc_u32 s23, s23, s24 +; GCN-NEXT: s_mul_i32 s20, s13, s23 +; GCN-NEXT: s_mul_hi_u32 s21, s13, s25 +; GCN-NEXT: s_add_i32 s20, s21, s20 +; GCN-NEXT: s_mul_i32 s22, s22, s25 +; GCN-NEXT: s_add_i32 s20, s20, s22 +; GCN-NEXT: s_mul_i32 s13, s13, s25 +; GCN-NEXT: s_mul_hi_u32 s22, s23, s13 +; GCN-NEXT: s_mul_i32 s24, s23, s13 +; GCN-NEXT: s_mul_i32 s27, s25, s20 +; GCN-NEXT: s_mul_hi_u32 s13, s25, s13 +; GCN-NEXT: s_mul_hi_u32 s26, s25, s20 +; GCN-NEXT: s_add_u32 s13, s13, s27 +; GCN-NEXT: s_addc_u32 s26, 0, s26 +; GCN-NEXT: s_add_u32 s13, s13, s24 +; GCN-NEXT: s_mul_hi_u32 s21, s23, s20 +; GCN-NEXT: s_addc_u32 s13, s26, s22 +; GCN-NEXT: s_addc_u32 s21, s21, 0 +; GCN-NEXT: s_mul_i32 s20, s23, s20 +; GCN-NEXT: s_add_u32 s13, s13, s20 +; GCN-NEXT: s_addc_u32 s22, 0, s21 +; GCN-NEXT: s_add_u32 s13, s25, s13 +; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_addc_u32 s24, s23, s22 +; GCN-NEXT: s_ashr_i32 s20, s15, 31 +; GCN-NEXT: s_add_u32 s22, s14, s20 +; GCN-NEXT: s_mov_b32 s21, s20 +; GCN-NEXT: s_addc_u32 s23, s15, s20 +; GCN-NEXT: s_xor_b64 s[22:23], s[22:23], s[20:21] +; GCN-NEXT: s_mul_i32 s25, s22, s24 +; GCN-NEXT: s_mul_hi_u32 s26, s22, s13 +; GCN-NEXT: s_mul_hi_u32 s15, s22, s24 +; GCN-NEXT: s_add_u32 s25, s26, s25 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: s_mul_hi_u32 s27, s23, s13 +; GCN-NEXT: s_mul_i32 s13, s23, s13 +; GCN-NEXT: s_add_u32 s13, s25, s13 +; GCN-NEXT: s_mul_hi_u32 s26, s23, s24 +; GCN-NEXT: s_addc_u32 s13, s15, s27 +; GCN-NEXT: s_addc_u32 s15, s26, 0 +; GCN-NEXT: s_mul_i32 s24, s23, s24 +; GCN-NEXT: s_add_u32 s13, s13, s24 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: s_mul_i32 s15, s18, s15 +; GCN-NEXT: s_mul_hi_u32 s24, s18, s13 +; GCN-NEXT: s_add_i32 s15, s24, s15 +; GCN-NEXT: s_mul_i32 s24, s19, s13 +; GCN-NEXT: s_add_i32 s15, s15, s24 +; GCN-NEXT: s_sub_i32 s26, s23, s15 +; GCN-NEXT: s_mul_i32 s13, s18, s13 +; GCN-NEXT: s_sub_u32 s13, s22, s13 +; GCN-NEXT: s_cselect_b64 s[24:25], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[24:25], 0 +; GCN-NEXT: s_subb_u32 s22, s26, s19 +; GCN-NEXT: s_sub_u32 s28, s13, s18 +; GCN-NEXT: s_cselect_b64 s[26:27], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0 +; GCN-NEXT: s_subb_u32 s29, s22, 0 +; GCN-NEXT: s_cmp_ge_u32 s29, s19 +; GCN-NEXT: s_cselect_b32 s30, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s28, s18 +; GCN-NEXT: s_cselect_b32 s31, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s29, s19 +; GCN-NEXT: s_cselect_b32 s30, s31, s30 +; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0 +; GCN-NEXT: s_subb_u32 s22, s22, s19 +; GCN-NEXT: s_sub_u32 s31, s28, s18 +; GCN-NEXT: s_cselect_b64 s[26:27], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0 +; GCN-NEXT: s_subb_u32 s22, s22, 0 +; GCN-NEXT: s_cmp_lg_u32 s30, 0 +; GCN-NEXT: s_cselect_b32 s26, s31, s28 +; GCN-NEXT: s_cselect_b32 s22, s22, s29 +; GCN-NEXT: s_cmp_lg_u64 s[24:25], 0 +; GCN-NEXT: s_subb_u32 s15, s23, s15 +; GCN-NEXT: s_cmp_ge_u32 s15, s19 +; GCN-NEXT: s_cselect_b32 s23, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s18 +; GCN-NEXT: s_cselect_b32 s18, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s15, s19 +; GCN-NEXT: s_cselect_b32 s18, s18, s23 +; GCN-NEXT: s_cmp_lg_u32 s18, 0 +; GCN-NEXT: s_cselect_b32 s19, s22, s15 +; GCN-NEXT: s_cselect_b32 s18, s26, s13 +; GCN-NEXT: s_xor_b64 s[18:19], s[18:19], s[20:21] +; GCN-NEXT: s_sub_u32 s18, s18, s20 +; GCN-NEXT: s_subb_u32 s19, s19, s20 +; GCN-NEXT: s_cbranch_execnz .LBB12_8 ; GCN-NEXT: .LBB12_5: -; GCN-NEXT: v_cvt_f32_u32_e32 v10, v12 -; GCN-NEXT: v_sub_u32_e32 v11, 0, v12 -; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10 -; GCN-NEXT: v_mul_f32_e32 v10, 0x4f7ffffe, v10 -; GCN-NEXT: v_cvt_u32_f32_e32 v10, v10 -; GCN-NEXT: v_mul_lo_u32 v11, v11, v10 -; GCN-NEXT: v_mul_hi_u32 v11, v10, v11 -; GCN-NEXT: v_add_u32_e32 v10, v10, v11 -; GCN-NEXT: v_mul_hi_u32 v10, v16, v10 -; GCN-NEXT: v_mul_lo_u32 v10, v10, v12 -; GCN-NEXT: v_sub_u32_e32 v10, v16, v10 -; GCN-NEXT: v_sub_u32_e32 v11, v10, v12 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12 -; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GCN-NEXT: v_sub_u32_e32 v11, v10, v12 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12 -; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GCN-NEXT: v_mov_b32_e32 v11, 0 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s12 +; GCN-NEXT: s_sub_i32 s13, 0, s12 +; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_mul_lo_u32 v1, s13, v0 +; GCN-NEXT: v_mul_hi_u32 v1, v0, v1 +; GCN-NEXT: v_add_u32_e32 v0, v0, v1 +; GCN-NEXT: v_mul_hi_u32 v0, s14, v0 +; GCN-NEXT: v_mul_lo_u32 v0, v0, s12 +; GCN-NEXT: v_sub_u32_e32 v0, s14, v0 +; GCN-NEXT: v_subrev_u32_e32 v1, s12, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GCN-NEXT: v_subrev_u32_e32 v1, s12, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 +; GCN-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc +; GCN-NEXT: s_branch .LBB12_9 ; GCN-NEXT: .LBB12_6: -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_or_b32_e32 v13, v5, v1 -; GCN-NEXT: v_mov_b32_e32 v12, 0 -; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13] -; GCN-NEXT: s_cbranch_vccz .LBB12_15 -; GCN-NEXT: ; %bb.7: -; GCN-NEXT: v_ashrrev_i32_e32 v13, 31, v1 -; GCN-NEXT: v_add_co_u32_e32 v12, vcc, v0, v13 -; GCN-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v13, vcc -; GCN-NEXT: v_xor_b32_e32 v12, v12, v13 -; GCN-NEXT: v_xor_b32_e32 v1, v1, v13 -; GCN-NEXT: v_cvt_f32_u32_e32 v13, v12 -; GCN-NEXT: v_cvt_f32_u32_e32 v14, v1 -; GCN-NEXT: v_sub_co_u32_e32 v15, vcc, 0, v12 -; GCN-NEXT: v_subb_co_u32_e32 v16, vcc, 0, v1, vcc -; GCN-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13 -; GCN-NEXT: v_rcp_f32_e32 v13, v13 -; GCN-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13 -; GCN-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13 -; GCN-NEXT: v_trunc_f32_e32 v14, v14 -; GCN-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13 -; GCN-NEXT: v_cvt_u32_f32_e32 v14, v14 -; GCN-NEXT: v_cvt_u32_f32_e32 v13, v13 -; GCN-NEXT: v_mul_lo_u32 v18, v15, v14 -; GCN-NEXT: v_mul_hi_u32 v17, v15, v13 -; GCN-NEXT: v_mul_lo_u32 v19, v16, v13 -; GCN-NEXT: v_mul_lo_u32 v20, v15, v13 -; GCN-NEXT: v_add_u32_e32 v17, v17, v18 -; GCN-NEXT: v_add_u32_e32 v17, v17, v19 -; GCN-NEXT: v_mul_lo_u32 v18, v13, v17 -; GCN-NEXT: v_mul_hi_u32 v19, v13, v20 -; GCN-NEXT: v_mul_hi_u32 v21, v13, v17 -; GCN-NEXT: v_mul_hi_u32 v22, v14, v17 -; GCN-NEXT: v_mul_lo_u32 v17, v14, v17 -; GCN-NEXT: v_add_co_u32_e32 v18, vcc, v19, v18 -; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v21, vcc -; GCN-NEXT: v_mul_lo_u32 v21, v14, v20 -; GCN-NEXT: v_mul_hi_u32 v20, v14, v20 -; GCN-NEXT: v_add_co_u32_e32 v18, vcc, v18, v21 -; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, v19, v20, vcc -; GCN-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v22, vcc -; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v18, v17 -; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc -; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v17 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v18, vcc -; GCN-NEXT: v_mul_lo_u32 v17, v15, v14 -; GCN-NEXT: v_mul_hi_u32 v18, v15, v13 -; GCN-NEXT: v_mul_lo_u32 v16, v16, v13 -; GCN-NEXT: v_mul_lo_u32 v15, v15, v13 -; GCN-NEXT: v_add_u32_e32 v17, v18, v17 -; GCN-NEXT: v_add_u32_e32 v16, v17, v16 -; GCN-NEXT: v_mul_lo_u32 v19, v13, v16 -; GCN-NEXT: v_mul_hi_u32 v20, v13, v15 -; GCN-NEXT: v_mul_hi_u32 v21, v13, v16 -; GCN-NEXT: v_mul_hi_u32 v18, v14, v15 -; GCN-NEXT: v_mul_lo_u32 v15, v14, v15 -; GCN-NEXT: v_mul_hi_u32 v17, v14, v16 -; GCN-NEXT: v_add_co_u32_e32 v19, vcc, v20, v19 -; GCN-NEXT: v_addc_co_u32_e32 v20, vcc, 0, v21, vcc -; GCN-NEXT: v_mul_lo_u32 v16, v14, v16 -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v19, v15 -; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, v20, v18, vcc -; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v15, v16 -; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v17, vcc -; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v15 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, v14, v16, vcc -; GCN-NEXT: v_ashrrev_i32_e32 v15, 31, v5 -; GCN-NEXT: v_add_co_u32_e32 v16, vcc, v4, v15 -; GCN-NEXT: v_xor_b32_e32 v16, v16, v15 -; GCN-NEXT: v_mul_lo_u32 v17, v16, v14 -; GCN-NEXT: v_mul_hi_u32 v18, v16, v13 -; GCN-NEXT: v_mul_hi_u32 v19, v16, v14 -; GCN-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v15, vcc -; GCN-NEXT: v_xor_b32_e32 v5, v5, v15 -; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v18, v17 -; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc -; GCN-NEXT: v_mul_lo_u32 v19, v5, v13 -; GCN-NEXT: v_mul_hi_u32 v13, v5, v13 -; GCN-NEXT: v_mul_hi_u32 v20, v5, v14 -; GCN-NEXT: v_mul_lo_u32 v14, v5, v14 -; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v17, v19 -; GCN-NEXT: v_addc_co_u32_e32 v13, vcc, v18, v13, vcc -; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v20, vcc -; GCN-NEXT: v_add_co_u32_e32 v13, vcc, v13, v14 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v17, vcc -; GCN-NEXT: v_mul_lo_u32 v14, v12, v14 -; GCN-NEXT: v_mul_hi_u32 v17, v12, v13 -; GCN-NEXT: v_mul_lo_u32 v18, v1, v13 -; GCN-NEXT: v_mul_lo_u32 v13, v12, v13 -; GCN-NEXT: v_add_u32_e32 v14, v17, v14 -; GCN-NEXT: v_add_u32_e32 v14, v14, v18 -; GCN-NEXT: v_sub_u32_e32 v17, v5, v14 -; GCN-NEXT: v_sub_co_u32_e32 v13, vcc, v16, v13 -; GCN-NEXT: v_subb_co_u32_e64 v16, s[0:1], v17, v1, vcc -; GCN-NEXT: v_sub_co_u32_e64 v17, s[0:1], v13, v12 -; GCN-NEXT: v_subbrev_co_u32_e64 v18, s[2:3], 0, v16, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v18, v1 -; GCN-NEXT: v_cndmask_b32_e64 v19, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v17, v12 -; GCN-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v14, vcc -; GCN-NEXT: v_cndmask_b32_e64 v20, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], v18, v1 -; GCN-NEXT: v_subb_co_u32_e64 v16, s[0:1], v16, v1, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v5, v1 -; GCN-NEXT: v_cndmask_b32_e64 v19, v19, v20, s[2:3] -; GCN-NEXT: v_sub_co_u32_e64 v20, s[0:1], v17, v12 -; GCN-NEXT: v_cndmask_b32_e64 v14, 0, -1, vcc -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v13, v12 -; GCN-NEXT: v_subbrev_co_u32_e64 v16, s[0:1], 0, v16, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v12, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v19 -; GCN-NEXT: v_cndmask_b32_e32 v1, v14, v12, vcc -; GCN-NEXT: v_cndmask_b32_e64 v17, v17, v20, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v16, v18, v16, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v16, vcc -; GCN-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc -; GCN-NEXT: v_xor_b32_e32 v5, v5, v15 -; GCN-NEXT: v_xor_b32_e32 v1, v1, v15 -; GCN-NEXT: v_sub_co_u32_e32 v12, vcc, v5, v15 -; GCN-NEXT: v_subb_co_u32_e32 v13, vcc, v1, v15, vcc -; GCN-NEXT: s_cbranch_execnz .LBB12_9 +; GCN-NEXT: ; implicit-def: $sgpr6_sgpr7 +; GCN-NEXT: s_branch .LBB12_2 +; GCN-NEXT: .LBB12_7: +; GCN-NEXT: ; implicit-def: $sgpr18_sgpr19 +; GCN-NEXT: s_branch .LBB12_5 ; GCN-NEXT: .LBB12_8: -; GCN-NEXT: v_cvt_f32_u32_e32 v1, v0 -; GCN-NEXT: v_sub_u32_e32 v5, 0, v0 -; GCN-NEXT: v_mov_b32_e32 v13, 0 -; GCN-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GCN-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GCN-NEXT: v_mul_lo_u32 v5, v5, v1 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v5 -; GCN-NEXT: v_add_u32_e32 v1, v1, v5 -; GCN-NEXT: v_mul_hi_u32 v1, v4, v1 -; GCN-NEXT: v_mul_lo_u32 v1, v1, v0 -; GCN-NEXT: v_sub_u32_e32 v1, v4, v1 -; GCN-NEXT: v_sub_u32_e32 v4, v1, v0 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0 -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GCN-NEXT: v_sub_u32_e32 v4, v1, v0 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0 -; GCN-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc +; GCN-NEXT: v_mov_b32_e32 v2, s18 +; GCN-NEXT: v_mov_b32_e32 v3, s19 ; GCN-NEXT: .LBB12_9: -; GCN-NEXT: v_or_b32_e32 v1, v7, v3 -; GCN-NEXT: v_mov_b32_e32 v0, 0 -; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; GCN-NEXT: s_cbranch_vccz .LBB12_16 +; GCN-NEXT: s_or_b64 s[12:13], s[10:11], s[8:9] +; GCN-NEXT: s_mov_b32 s12, 0 +; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GCN-NEXT: s_cbranch_scc0 .LBB12_12 ; GCN-NEXT: ; %bb.10: -; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v3 -; GCN-NEXT: v_add_co_u32_e32 v1, vcc, v2, v0 -; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v0, vcc -; GCN-NEXT: v_xor_b32_e32 v1, v1, v0 -; GCN-NEXT: v_xor_b32_e32 v0, v3, v0 -; GCN-NEXT: v_cvt_f32_u32_e32 v3, v1 -; GCN-NEXT: v_cvt_f32_u32_e32 v4, v0 -; GCN-NEXT: v_sub_co_u32_e32 v5, vcc, 0, v1 -; GCN-NEXT: v_subb_co_u32_e32 v14, vcc, 0, v0, vcc -; GCN-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3 -; GCN-NEXT: v_rcp_f32_e32 v3, v3 -; GCN-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3 -; GCN-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3 -; GCN-NEXT: v_trunc_f32_e32 v4, v4 -; GCN-NEXT: v_madmk_f32 v3, v4, 0xcf800000, v3 -; GCN-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GCN-NEXT: v_cvt_u32_f32_e32 v3, v3 -; GCN-NEXT: v_mul_lo_u32 v16, v5, v4 -; GCN-NEXT: v_mul_hi_u32 v15, v5, v3 -; GCN-NEXT: v_mul_lo_u32 v17, v14, v3 -; GCN-NEXT: v_mul_lo_u32 v18, v5, v3 -; GCN-NEXT: v_add_u32_e32 v15, v15, v16 -; GCN-NEXT: v_add_u32_e32 v15, v15, v17 -; GCN-NEXT: v_mul_lo_u32 v16, v3, v15 -; GCN-NEXT: v_mul_hi_u32 v17, v3, v18 -; GCN-NEXT: v_mul_hi_u32 v19, v3, v15 -; GCN-NEXT: v_mul_hi_u32 v20, v4, v15 -; GCN-NEXT: v_mul_lo_u32 v15, v4, v15 -; GCN-NEXT: v_add_co_u32_e32 v16, vcc, v17, v16 -; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v19, vcc -; GCN-NEXT: v_mul_lo_u32 v19, v4, v18 -; GCN-NEXT: v_mul_hi_u32 v18, v4, v18 -; GCN-NEXT: v_add_co_u32_e32 v16, vcc, v16, v19 -; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, v17, v18, vcc -; GCN-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v20, vcc -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v16, v15 -; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v17, vcc -; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v3, v15 -; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v16, vcc -; GCN-NEXT: v_mul_lo_u32 v15, v5, v4 -; GCN-NEXT: v_mul_hi_u32 v16, v5, v3 -; GCN-NEXT: v_mul_lo_u32 v14, v14, v3 -; GCN-NEXT: v_mul_lo_u32 v5, v5, v3 -; GCN-NEXT: v_add_u32_e32 v15, v16, v15 -; GCN-NEXT: v_add_u32_e32 v14, v15, v14 -; GCN-NEXT: v_mul_lo_u32 v17, v3, v14 -; GCN-NEXT: v_mul_hi_u32 v18, v3, v5 -; GCN-NEXT: v_mul_hi_u32 v19, v3, v14 -; GCN-NEXT: v_mul_hi_u32 v16, v4, v5 -; GCN-NEXT: v_mul_lo_u32 v5, v4, v5 -; GCN-NEXT: v_mul_hi_u32 v15, v4, v14 -; GCN-NEXT: v_add_co_u32_e32 v17, vcc, v18, v17 -; GCN-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v19, vcc -; GCN-NEXT: v_mul_lo_u32 v14, v4, v14 -; GCN-NEXT: v_add_co_u32_e32 v5, vcc, v17, v5 -; GCN-NEXT: v_addc_co_u32_e32 v5, vcc, v18, v16, vcc -; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc -; GCN-NEXT: v_add_co_u32_e32 v5, vcc, v5, v14 -; GCN-NEXT: v_addc_co_u32_e32 v14, vcc, 0, v15, vcc -; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v14, vcc -; GCN-NEXT: v_ashrrev_i32_e32 v5, 31, v7 -; GCN-NEXT: v_add_co_u32_e32 v14, vcc, v6, v5 -; GCN-NEXT: v_xor_b32_e32 v14, v14, v5 -; GCN-NEXT: v_mul_lo_u32 v15, v14, v4 -; GCN-NEXT: v_mul_hi_u32 v16, v14, v3 -; GCN-NEXT: v_mul_hi_u32 v17, v14, v4 -; GCN-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v5, vcc -; GCN-NEXT: v_xor_b32_e32 v7, v7, v5 -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v16, v15 -; GCN-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v17, vcc -; GCN-NEXT: v_mul_lo_u32 v17, v7, v3 -; GCN-NEXT: v_mul_hi_u32 v3, v7, v3 -; GCN-NEXT: v_mul_hi_u32 v18, v7, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v7, v4 -; GCN-NEXT: v_add_co_u32_e32 v15, vcc, v15, v17 -; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v16, v3, vcc -; GCN-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v18, vcc -; GCN-NEXT: v_add_co_u32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v15, vcc -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v15, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v16, v0, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_add_u32_e32 v4, v15, v4 -; GCN-NEXT: v_add_u32_e32 v4, v4, v16 -; GCN-NEXT: v_sub_u32_e32 v15, v7, v4 -; GCN-NEXT: v_sub_co_u32_e32 v3, vcc, v14, v3 -; GCN-NEXT: v_subb_co_u32_e64 v14, s[0:1], v15, v0, vcc -; GCN-NEXT: v_sub_co_u32_e64 v15, s[0:1], v3, v1 -; GCN-NEXT: v_subbrev_co_u32_e64 v16, s[2:3], 0, v14, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v16, v0 -; GCN-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_ge_u32_e64 s[2:3], v15, v1 -; GCN-NEXT: v_subb_co_u32_e32 v4, vcc, v7, v4, vcc -; GCN-NEXT: v_cndmask_b32_e64 v18, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], v16, v0 -; GCN-NEXT: v_subb_co_u32_e64 v14, s[0:1], v14, v0, s[0:1] -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v4, v0 -; GCN-NEXT: v_cndmask_b32_e64 v17, v17, v18, s[2:3] -; GCN-NEXT: v_sub_co_u32_e64 v18, s[0:1], v15, v1 -; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, vcc -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v3, v1 -; GCN-NEXT: v_subbrev_co_u32_e64 v14, s[0:1], 0, v14, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v4, v0 -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v17 -; GCN-NEXT: v_cndmask_b32_e32 v0, v7, v1, vcc -; GCN-NEXT: v_cndmask_b32_e64 v15, v15, v18, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GCN-NEXT: v_cndmask_b32_e64 v14, v16, v14, s[0:1] -; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v15, vcc -; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v14, vcc -; GCN-NEXT: v_xor_b32_e32 v1, v1, v5 -; GCN-NEXT: v_xor_b32_e32 v0, v0, v5 -; GCN-NEXT: v_sub_co_u32_e32 v14, vcc, v1, v5 -; GCN-NEXT: v_subb_co_u32_e32 v15, vcc, v0, v5, vcc -; GCN-NEXT: s_cbranch_execnz .LBB12_12 +; GCN-NEXT: s_ashr_i32 s12, s9, 31 +; GCN-NEXT: s_add_u32 s14, s8, s12 +; GCN-NEXT: s_mov_b32 s13, s12 +; GCN-NEXT: s_addc_u32 s15, s9, s12 +; GCN-NEXT: s_xor_b64 s[14:15], s[14:15], s[12:13] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s14 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s15 +; GCN-NEXT: s_sub_u32 s9, 0, s14 +; GCN-NEXT: s_subb_u32 s18, 0, s15 +; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; GCN-NEXT: v_rcp_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GCN-NEXT: v_trunc_f32_e32 v1, v1 +; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_readfirstlane_b32 s19, v1 +; GCN-NEXT: v_readfirstlane_b32 s16, v0 +; GCN-NEXT: s_mul_i32 s17, s9, s19 +; GCN-NEXT: s_mul_hi_u32 s21, s9, s16 +; GCN-NEXT: s_mul_i32 s20, s18, s16 +; GCN-NEXT: s_add_i32 s17, s21, s17 +; GCN-NEXT: s_add_i32 s17, s17, s20 +; GCN-NEXT: s_mul_i32 s22, s9, s16 +; GCN-NEXT: s_mul_i32 s21, s16, s17 +; GCN-NEXT: s_mul_hi_u32 s23, s16, s22 +; GCN-NEXT: s_mul_hi_u32 s20, s16, s17 +; GCN-NEXT: s_add_u32 s21, s23, s21 +; GCN-NEXT: s_addc_u32 s20, 0, s20 +; GCN-NEXT: s_mul_hi_u32 s24, s19, s22 +; GCN-NEXT: s_mul_i32 s22, s19, s22 +; GCN-NEXT: s_add_u32 s21, s21, s22 +; GCN-NEXT: s_mul_hi_u32 s23, s19, s17 +; GCN-NEXT: s_addc_u32 s20, s20, s24 +; GCN-NEXT: s_addc_u32 s21, s23, 0 +; GCN-NEXT: s_mul_i32 s17, s19, s17 +; GCN-NEXT: s_add_u32 s17, s20, s17 +; GCN-NEXT: s_addc_u32 s20, 0, s21 +; GCN-NEXT: s_add_u32 s21, s16, s17 +; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_addc_u32 s19, s19, s20 +; GCN-NEXT: s_mul_i32 s16, s9, s19 +; GCN-NEXT: s_mul_hi_u32 s17, s9, s21 +; GCN-NEXT: s_add_i32 s16, s17, s16 +; GCN-NEXT: s_mul_i32 s18, s18, s21 +; GCN-NEXT: s_add_i32 s16, s16, s18 +; GCN-NEXT: s_mul_i32 s9, s9, s21 +; GCN-NEXT: s_mul_hi_u32 s18, s19, s9 +; GCN-NEXT: s_mul_i32 s20, s19, s9 +; GCN-NEXT: s_mul_i32 s23, s21, s16 +; GCN-NEXT: s_mul_hi_u32 s9, s21, s9 +; GCN-NEXT: s_mul_hi_u32 s22, s21, s16 +; GCN-NEXT: s_add_u32 s9, s9, s23 +; GCN-NEXT: s_addc_u32 s22, 0, s22 +; GCN-NEXT: s_add_u32 s9, s9, s20 +; GCN-NEXT: s_mul_hi_u32 s17, s19, s16 +; GCN-NEXT: s_addc_u32 s9, s22, s18 +; GCN-NEXT: s_addc_u32 s17, s17, 0 +; GCN-NEXT: s_mul_i32 s16, s19, s16 +; GCN-NEXT: s_add_u32 s9, s9, s16 +; GCN-NEXT: s_addc_u32 s18, 0, s17 +; GCN-NEXT: s_add_u32 s9, s21, s9 +; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_addc_u32 s20, s19, s18 +; GCN-NEXT: s_ashr_i32 s16, s11, 31 +; GCN-NEXT: s_add_u32 s18, s10, s16 +; GCN-NEXT: s_mov_b32 s17, s16 +; GCN-NEXT: s_addc_u32 s19, s11, s16 +; GCN-NEXT: s_xor_b64 s[18:19], s[18:19], s[16:17] +; GCN-NEXT: s_mul_i32 s21, s18, s20 +; GCN-NEXT: s_mul_hi_u32 s22, s18, s9 +; GCN-NEXT: s_mul_hi_u32 s11, s18, s20 +; GCN-NEXT: s_add_u32 s21, s22, s21 +; GCN-NEXT: s_addc_u32 s11, 0, s11 +; GCN-NEXT: s_mul_hi_u32 s23, s19, s9 +; GCN-NEXT: s_mul_i32 s9, s19, s9 +; GCN-NEXT: s_add_u32 s9, s21, s9 +; GCN-NEXT: s_mul_hi_u32 s22, s19, s20 +; GCN-NEXT: s_addc_u32 s9, s11, s23 +; GCN-NEXT: s_addc_u32 s11, s22, 0 +; GCN-NEXT: s_mul_i32 s20, s19, s20 +; GCN-NEXT: s_add_u32 s9, s9, s20 +; GCN-NEXT: s_addc_u32 s11, 0, s11 +; GCN-NEXT: s_mul_i32 s11, s14, s11 +; GCN-NEXT: s_mul_hi_u32 s20, s14, s9 +; GCN-NEXT: s_add_i32 s11, s20, s11 +; GCN-NEXT: s_mul_i32 s20, s15, s9 +; GCN-NEXT: s_add_i32 s11, s11, s20 +; GCN-NEXT: s_sub_i32 s22, s19, s11 +; GCN-NEXT: s_mul_i32 s9, s14, s9 +; GCN-NEXT: s_sub_u32 s9, s18, s9 +; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_subb_u32 s18, s22, s15 +; GCN-NEXT: s_sub_u32 s24, s9, s14 +; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0 +; GCN-NEXT: s_subb_u32 s25, s18, 0 +; GCN-NEXT: s_cmp_ge_u32 s25, s15 +; GCN-NEXT: s_cselect_b32 s26, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s24, s14 +; GCN-NEXT: s_cselect_b32 s27, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s25, s15 +; GCN-NEXT: s_cselect_b32 s26, s27, s26 +; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0 +; GCN-NEXT: s_subb_u32 s18, s18, s15 +; GCN-NEXT: s_sub_u32 s27, s24, s14 +; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0 +; GCN-NEXT: s_subb_u32 s18, s18, 0 +; GCN-NEXT: s_cmp_lg_u32 s26, 0 +; GCN-NEXT: s_cselect_b32 s22, s27, s24 +; GCN-NEXT: s_cselect_b32 s18, s18, s25 +; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0 +; GCN-NEXT: s_subb_u32 s11, s19, s11 +; GCN-NEXT: s_cmp_ge_u32 s11, s15 +; GCN-NEXT: s_cselect_b32 s19, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s9, s14 +; GCN-NEXT: s_cselect_b32 s14, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s11, s15 +; GCN-NEXT: s_cselect_b32 s14, s14, s19 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_cselect_b32 s15, s18, s11 +; GCN-NEXT: s_cselect_b32 s14, s22, s9 +; GCN-NEXT: s_xor_b64 s[14:15], s[14:15], s[16:17] +; GCN-NEXT: s_sub_u32 s14, s14, s16 +; GCN-NEXT: s_subb_u32 s15, s15, s16 +; GCN-NEXT: s_cbranch_execnz .LBB12_13 ; GCN-NEXT: .LBB12_11: -; GCN-NEXT: v_cvt_f32_u32_e32 v0, v2 -; GCN-NEXT: v_sub_u32_e32 v1, 0, v2 -; GCN-NEXT: v_mov_b32_e32 v15, 0 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GCN-NEXT: s_sub_i32 s9, 0, s8 +; GCN-NEXT: v_mov_b32_e32 v5, 0 ; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v1, v1, v0 +; GCN-NEXT: v_mul_lo_u32 v1, s9, v0 ; GCN-NEXT: v_mul_hi_u32 v1, v0, v1 ; GCN-NEXT: v_add_u32_e32 v0, v0, v1 -; GCN-NEXT: v_mul_hi_u32 v0, v6, v0 -; GCN-NEXT: v_mul_lo_u32 v0, v0, v2 -; GCN-NEXT: v_sub_u32_e32 v0, v6, v0 -; GCN-NEXT: v_sub_u32_e32 v1, v0, v2 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 +; GCN-NEXT: v_mul_hi_u32 v0, s10, v0 +; GCN-NEXT: v_mul_lo_u32 v0, v0, s8 +; GCN-NEXT: v_sub_u32_e32 v0, s10, v0 +; GCN-NEXT: v_subrev_u32_e32 v1, s8, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s8, v0 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GCN-NEXT: v_sub_u32_e32 v1, v0, v2 -; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 -; GCN-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc +; GCN-NEXT: v_subrev_u32_e32 v1, s8, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s8, v0 +; GCN-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc +; GCN-NEXT: s_branch .LBB12_14 ; GCN-NEXT: .LBB12_12: -; GCN-NEXT: v_mov_b32_e32 v0, 0 -; GCN-NEXT: global_store_dwordx4 v0, v[12:15], s[8:9] offset:16 -; GCN-NEXT: global_store_dwordx4 v0, v[8:11], s[8:9] -; GCN-NEXT: s_endpgm +; GCN-NEXT: ; implicit-def: $sgpr14_sgpr15 +; GCN-NEXT: s_branch .LBB12_11 ; GCN-NEXT: .LBB12_13: -; GCN-NEXT: ; implicit-def: $vgpr8_vgpr9 -; GCN-NEXT: s_branch .LBB12_2 +; GCN-NEXT: v_mov_b32_e32 v4, s14 +; GCN-NEXT: v_mov_b32_e32 v5, s15 ; GCN-NEXT: .LBB12_14: -; GCN-NEXT: s_branch .LBB12_5 -; GCN-NEXT: .LBB12_15: -; GCN-NEXT: ; implicit-def: $vgpr12_vgpr13 -; GCN-NEXT: s_branch .LBB12_8 +; GCN-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3] +; GCN-NEXT: s_mov_b32 s8, 0 +; GCN-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GCN-NEXT: s_cbranch_scc0 .LBB12_17 +; GCN-NEXT: ; %bb.15: +; GCN-NEXT: s_ashr_i32 s8, s3, 31 +; GCN-NEXT: s_add_u32 s10, s2, s8 +; GCN-NEXT: s_mov_b32 s9, s8 +; GCN-NEXT: s_addc_u32 s11, s3, s8 +; GCN-NEXT: s_xor_b64 s[10:11], s[10:11], s[8:9] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s11 +; GCN-NEXT: s_sub_u32 s3, 0, s10 +; GCN-NEXT: s_subb_u32 s14, 0, s11 +; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 +; GCN-NEXT: v_rcp_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 +; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 +; GCN-NEXT: v_trunc_f32_e32 v1, v1 +; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_readfirstlane_b32 s15, v1 +; GCN-NEXT: v_readfirstlane_b32 s12, v0 +; GCN-NEXT: s_mul_i32 s13, s3, s15 +; GCN-NEXT: s_mul_hi_u32 s17, s3, s12 +; GCN-NEXT: s_mul_i32 s16, s14, s12 +; GCN-NEXT: s_add_i32 s13, s17, s13 +; GCN-NEXT: s_add_i32 s13, s13, s16 +; GCN-NEXT: s_mul_i32 s18, s3, s12 +; GCN-NEXT: s_mul_i32 s17, s12, s13 +; GCN-NEXT: s_mul_hi_u32 s19, s12, s18 +; GCN-NEXT: s_mul_hi_u32 s16, s12, s13 +; GCN-NEXT: s_add_u32 s17, s19, s17 +; GCN-NEXT: s_addc_u32 s16, 0, s16 +; GCN-NEXT: s_mul_hi_u32 s20, s15, s18 +; GCN-NEXT: s_mul_i32 s18, s15, s18 +; GCN-NEXT: s_add_u32 s17, s17, s18 +; GCN-NEXT: s_mul_hi_u32 s19, s15, s13 +; GCN-NEXT: s_addc_u32 s16, s16, s20 +; GCN-NEXT: s_addc_u32 s17, s19, 0 +; GCN-NEXT: s_mul_i32 s13, s15, s13 +; GCN-NEXT: s_add_u32 s13, s16, s13 +; GCN-NEXT: s_addc_u32 s16, 0, s17 +; GCN-NEXT: s_add_u32 s17, s12, s13 +; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GCN-NEXT: s_addc_u32 s15, s15, s16 +; GCN-NEXT: s_mul_i32 s12, s3, s15 +; GCN-NEXT: s_mul_hi_u32 s13, s3, s17 +; GCN-NEXT: s_add_i32 s12, s13, s12 +; GCN-NEXT: s_mul_i32 s14, s14, s17 +; GCN-NEXT: s_add_i32 s12, s12, s14 +; GCN-NEXT: s_mul_i32 s3, s3, s17 +; GCN-NEXT: s_mul_hi_u32 s14, s15, s3 +; GCN-NEXT: s_mul_i32 s16, s15, s3 +; GCN-NEXT: s_mul_i32 s19, s17, s12 +; GCN-NEXT: s_mul_hi_u32 s3, s17, s3 +; GCN-NEXT: s_mul_hi_u32 s18, s17, s12 +; GCN-NEXT: s_add_u32 s3, s3, s19 +; GCN-NEXT: s_addc_u32 s18, 0, s18 +; GCN-NEXT: s_add_u32 s3, s3, s16 +; GCN-NEXT: s_mul_hi_u32 s13, s15, s12 +; GCN-NEXT: s_addc_u32 s3, s18, s14 +; GCN-NEXT: s_addc_u32 s13, s13, 0 +; GCN-NEXT: s_mul_i32 s12, s15, s12 +; GCN-NEXT: s_add_u32 s3, s3, s12 +; GCN-NEXT: s_addc_u32 s14, 0, s13 +; GCN-NEXT: s_add_u32 s3, s17, s3 +; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0 +; GCN-NEXT: s_addc_u32 s16, s15, s14 +; GCN-NEXT: s_ashr_i32 s12, s5, 31 +; GCN-NEXT: s_add_u32 s14, s4, s12 +; GCN-NEXT: s_mov_b32 s13, s12 +; GCN-NEXT: s_addc_u32 s15, s5, s12 +; GCN-NEXT: s_xor_b64 s[14:15], s[14:15], s[12:13] +; GCN-NEXT: s_mul_i32 s17, s14, s16 +; GCN-NEXT: s_mul_hi_u32 s18, s14, s3 +; GCN-NEXT: s_mul_hi_u32 s5, s14, s16 +; GCN-NEXT: s_add_u32 s17, s18, s17 +; GCN-NEXT: s_addc_u32 s5, 0, s5 +; GCN-NEXT: s_mul_hi_u32 s19, s15, s3 +; GCN-NEXT: s_mul_i32 s3, s15, s3 +; GCN-NEXT: s_add_u32 s3, s17, s3 +; GCN-NEXT: s_mul_hi_u32 s18, s15, s16 +; GCN-NEXT: s_addc_u32 s3, s5, s19 +; GCN-NEXT: s_addc_u32 s5, s18, 0 +; GCN-NEXT: s_mul_i32 s16, s15, s16 +; GCN-NEXT: s_add_u32 s3, s3, s16 +; GCN-NEXT: s_addc_u32 s5, 0, s5 +; GCN-NEXT: s_mul_i32 s5, s10, s5 +; GCN-NEXT: s_mul_hi_u32 s16, s10, s3 +; GCN-NEXT: s_add_i32 s5, s16, s5 +; GCN-NEXT: s_mul_i32 s16, s11, s3 +; GCN-NEXT: s_add_i32 s5, s5, s16 +; GCN-NEXT: s_sub_i32 s18, s15, s5 +; GCN-NEXT: s_mul_i32 s3, s10, s3 +; GCN-NEXT: s_sub_u32 s3, s14, s3 +; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s14, s18, s11 +; GCN-NEXT: s_sub_u32 s20, s3, s10 +; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s21, s14, 0 +; GCN-NEXT: s_cmp_ge_u32 s21, s11 +; GCN-NEXT: s_cselect_b32 s22, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s20, s10 +; GCN-NEXT: s_cselect_b32 s23, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s21, s11 +; GCN-NEXT: s_cselect_b32 s22, s23, s22 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s14, s14, s11 +; GCN-NEXT: s_sub_u32 s23, s20, s10 +; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0 +; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0 +; GCN-NEXT: s_subb_u32 s14, s14, 0 +; GCN-NEXT: s_cmp_lg_u32 s22, 0 +; GCN-NEXT: s_cselect_b32 s18, s23, s20 +; GCN-NEXT: s_cselect_b32 s14, s14, s21 +; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0 +; GCN-NEXT: s_subb_u32 s5, s15, s5 +; GCN-NEXT: s_cmp_ge_u32 s5, s11 +; GCN-NEXT: s_cselect_b32 s15, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s3, s10 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s5, s11 +; GCN-NEXT: s_cselect_b32 s10, s10, s15 +; GCN-NEXT: s_cmp_lg_u32 s10, 0 +; GCN-NEXT: s_cselect_b32 s11, s14, s5 +; GCN-NEXT: s_cselect_b32 s10, s18, s3 +; GCN-NEXT: s_xor_b64 s[10:11], s[10:11], s[12:13] +; GCN-NEXT: s_sub_u32 s10, s10, s12 +; GCN-NEXT: s_subb_u32 s11, s11, s12 +; GCN-NEXT: s_cbranch_execnz .LBB12_18 ; GCN-NEXT: .LBB12_16: -; GCN-NEXT: s_branch .LBB12_11 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GCN-NEXT: s_sub_i32 s3, 0, s2 +; GCN-NEXT: v_mov_b32_e32 v7, 0 +; GCN-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GCN-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GCN-NEXT: v_mul_lo_u32 v1, s3, v0 +; GCN-NEXT: v_mul_hi_u32 v1, v0, v1 +; GCN-NEXT: v_add_u32_e32 v0, v0, v1 +; GCN-NEXT: v_mul_hi_u32 v0, s4, v0 +; GCN-NEXT: v_mul_lo_u32 v0, v0, s2 +; GCN-NEXT: v_sub_u32_e32 v0, s4, v0 +; GCN-NEXT: v_subrev_u32_e32 v1, s2, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GCN-NEXT: v_subrev_u32_e32 v1, s2, v0 +; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v0 +; GCN-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc +; GCN-NEXT: s_branch .LBB12_19 +; GCN-NEXT: .LBB12_17: +; GCN-NEXT: ; implicit-def: $sgpr10_sgpr11 +; GCN-NEXT: s_branch .LBB12_16 +; GCN-NEXT: .LBB12_18: +; GCN-NEXT: v_mov_b32_e32 v6, s10 +; GCN-NEXT: v_mov_b32_e32 v7, s11 +; GCN-NEXT: .LBB12_19: +; GCN-NEXT: v_mov_b32_e32 v8, 0 +; GCN-NEXT: v_mov_b32_e32 v0, s6 +; GCN-NEXT: v_mov_b32_e32 v1, s7 +; GCN-NEXT: global_store_dwordx4 v8, v[4:7], s[0:1] offset:16 +; GCN-NEXT: global_store_dwordx4 v8, v[0:3], s[0:1] +; GCN-NEXT: s_endpgm ; ; TAHITI-LABEL: srem_v4i64: ; TAHITI: ; %bb.0: @@ -5546,7 +5639,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TAHITI-NEXT: v_mul_lo_u32 v20, v20, v11 ; TAHITI-NEXT: v_mul_lo_u32 v19, v19, v11 ; TAHITI-NEXT: v_add_i32_e32 v21, vcc, v21, v22 -; TAHITI-NEXT: v_add_i32_e32 v20, vcc, v21, v20 +; TAHITI-NEXT: v_add_i32_e32 v20, vcc, v20, v21 ; TAHITI-NEXT: v_mul_lo_u32 v23, v11, v20 ; TAHITI-NEXT: v_mul_hi_u32 v24, v11, v19 ; TAHITI-NEXT: v_mul_hi_u32 v25, v11, v20 @@ -5689,7 +5782,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TAHITI-NEXT: v_mul_lo_u32 v18, v18, v13 ; TAHITI-NEXT: v_mul_lo_u32 v15, v15, v13 ; TAHITI-NEXT: v_add_i32_e32 v19, vcc, v19, v20 -; TAHITI-NEXT: v_add_i32_e32 v18, vcc, v19, v18 +; TAHITI-NEXT: v_add_i32_e32 v18, vcc, v18, v19 ; TAHITI-NEXT: v_mul_lo_u32 v21, v13, v18 ; TAHITI-NEXT: v_mul_hi_u32 v22, v13, v15 ; TAHITI-NEXT: v_mul_hi_u32 v23, v13, v18 @@ -5833,7 +5926,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TAHITI-NEXT: v_mul_lo_u32 v16, v16, v13 ; TAHITI-NEXT: v_mul_lo_u32 v15, v15, v13 ; TAHITI-NEXT: v_add_i32_e32 v17, vcc, v17, v18 -; TAHITI-NEXT: v_add_i32_e32 v16, vcc, v17, v16 +; TAHITI-NEXT: v_add_i32_e32 v16, vcc, v16, v17 ; TAHITI-NEXT: v_mul_lo_u32 v19, v13, v16 ; TAHITI-NEXT: v_mul_hi_u32 v20, v13, v15 ; TAHITI-NEXT: v_mul_hi_u32 v21, v13, v16 @@ -5976,7 +6069,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TAHITI-NEXT: v_mul_lo_u32 v14, v14, v3 ; TAHITI-NEXT: v_mul_lo_u32 v5, v5, v3 ; TAHITI-NEXT: v_add_i32_e32 v15, vcc, v15, v16 -; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v15, v14 +; TAHITI-NEXT: v_add_i32_e32 v14, vcc, v14, v15 ; TAHITI-NEXT: v_mul_lo_u32 v17, v3, v14 ; TAHITI-NEXT: v_mul_hi_u32 v18, v3, v5 ; TAHITI-NEXT: v_mul_hi_u32 v19, v3, v14 @@ -6089,7 +6182,6 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-LABEL: srem_v4i64: ; TONGA: ; %bb.0: ; TONGA-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24 -; TONGA-NEXT: v_mov_b32_e32 v8, 0 ; TONGA-NEXT: s_waitcnt lgkmcnt(0) ; TONGA-NEXT: s_add_u32 s0, s6, 48 ; TONGA-NEXT: v_mov_b32_e32 v0, s6 @@ -6109,249 +6201,279 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_mov_b32_e32 v4, s0 ; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1] ; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5] +; TONGA-NEXT: s_waitcnt vmcnt(3) +; TONGA-NEXT: v_readfirstlane_b32 s3, v15 +; TONGA-NEXT: v_readfirstlane_b32 s2, v14 ; TONGA-NEXT: s_waitcnt vmcnt(2) -; TONGA-NEXT: v_or_b32_e32 v9, v15, v11 -; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9] -; TONGA-NEXT: s_cbranch_vccz .LBB12_13 +; TONGA-NEXT: v_readfirstlane_b32 s1, v11 +; TONGA-NEXT: v_readfirstlane_b32 s0, v10 +; TONGA-NEXT: s_or_b64 s[6:7], s[2:3], s[0:1] +; TONGA-NEXT: s_mov_b32 s6, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[6:7], 0 +; TONGA-NEXT: s_cbranch_scc0 .LBB12_3 ; TONGA-NEXT: ; %bb.1: -; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v11 -; TONGA-NEXT: v_add_u32_e32 v9, vcc, v10, v8 -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v11, v8, vcc -; TONGA-NEXT: v_xor_b32_e32 v9, v9, v8 -; TONGA-NEXT: v_xor_b32_e32 v8, v11, v8 -; TONGA-NEXT: v_cvt_f32_u32_e32 v11, v9 -; TONGA-NEXT: v_cvt_f32_u32_e32 v18, v8 -; TONGA-NEXT: v_sub_u32_e32 v23, vcc, 0, v9 -; TONGA-NEXT: v_subb_u32_e32 v24, vcc, 0, v8, vcc -; TONGA-NEXT: v_madmk_f32 v11, v18, 0x4f800000, v11 -; TONGA-NEXT: v_rcp_f32_e32 v11, v11 -; TONGA-NEXT: v_mul_f32_e32 v11, 0x5f7ffffc, v11 -; TONGA-NEXT: v_mul_f32_e32 v18, 0x2f800000, v11 -; TONGA-NEXT: v_trunc_f32_e32 v18, v18 -; TONGA-NEXT: v_madmk_f32 v11, v18, 0xcf800000, v11 -; TONGA-NEXT: v_cvt_u32_f32_e32 v22, v18 -; TONGA-NEXT: v_cvt_u32_f32_e32 v11, v11 -; TONGA-NEXT: v_mul_lo_u32 v20, v23, v22 -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0 -; TONGA-NEXT: v_mul_lo_u32 v21, v24, v11 -; TONGA-NEXT: v_add_u32_e32 v19, vcc, v19, v20 -; TONGA-NEXT: v_add_u32_e32 v21, vcc, v19, v21 -; TONGA-NEXT: v_mad_u64_u32 v[19:20], s[0:1], v11, v21, 0 -; TONGA-NEXT: v_mul_hi_u32 v25, v11, v18 -; TONGA-NEXT: v_add_u32_e32 v25, vcc, v25, v19 -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v22, v18, 0 -; TONGA-NEXT: v_addc_u32_e32 v26, vcc, 0, v20, vcc -; TONGA-NEXT: v_mad_u64_u32 v[20:21], s[0:1], v22, v21, 0 -; TONGA-NEXT: v_add_u32_e32 v18, vcc, v25, v18 -; TONGA-NEXT: v_addc_u32_e32 v18, vcc, v26, v19, vcc -; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v21, vcc -; TONGA-NEXT: v_add_u32_e32 v18, vcc, v18, v20 -; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v18 -; TONGA-NEXT: v_addc_u32_e32 v25, vcc, v22, v19, vcc -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0 -; TONGA-NEXT: v_mul_lo_u32 v22, v23, v25 -; TONGA-NEXT: v_mul_lo_u32 v23, v24, v11 -; TONGA-NEXT: v_mul_hi_u32 v24, v11, v18 -; TONGA-NEXT: v_mad_u64_u32 v[20:21], s[0:1], v25, v18, 0 -; TONGA-NEXT: v_add_u32_e32 v19, vcc, v22, v19 -; TONGA-NEXT: v_add_u32_e32 v19, vcc, v19, v23 -; TONGA-NEXT: v_mad_u64_u32 v[22:23], s[0:1], v11, v19, 0 -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v25, v19, 0 -; TONGA-NEXT: v_add_u32_e32 v22, vcc, v24, v22 -; TONGA-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc -; TONGA-NEXT: v_add_u32_e32 v20, vcc, v22, v20 -; TONGA-NEXT: v_addc_u32_e32 v20, vcc, v23, v21, vcc -; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc -; TONGA-NEXT: v_add_u32_e32 v18, vcc, v20, v18 -; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v18 -; TONGA-NEXT: v_addc_u32_e32 v20, vcc, v25, v19, vcc -; TONGA-NEXT: v_ashrrev_i32_e32 v22, 31, v15 -; TONGA-NEXT: v_add_u32_e32 v18, vcc, v14, v22 -; TONGA-NEXT: v_xor_b32_e32 v23, v18, v22 -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v23, v20, 0 -; TONGA-NEXT: v_mul_hi_u32 v21, v23, v11 -; TONGA-NEXT: v_addc_u32_e32 v15, vcc, v15, v22, vcc -; TONGA-NEXT: v_xor_b32_e32 v15, v15, v22 -; TONGA-NEXT: v_add_u32_e32 v24, vcc, v21, v18 -; TONGA-NEXT: v_addc_u32_e32 v25, vcc, 0, v19, vcc -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v15, v11, 0 -; TONGA-NEXT: v_mad_u64_u32 v[20:21], s[0:1], v15, v20, 0 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v24, v18 -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v25, v19, vcc -; TONGA-NEXT: v_addc_u32_e32 v18, vcc, 0, v21, vcc -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v20 -; TONGA-NEXT: v_addc_u32_e32 v18, vcc, 0, v18, vcc -; TONGA-NEXT: v_mul_lo_u32 v20, v9, v18 -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v9, v11, 0 -; TONGA-NEXT: v_mul_lo_u32 v11, v8, v11 -; TONGA-NEXT: v_add_u32_e32 v19, vcc, v20, v19 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v19 -; TONGA-NEXT: v_sub_u32_e32 v19, vcc, v15, v11 -; TONGA-NEXT: v_sub_u32_e32 v18, vcc, v23, v18 -; TONGA-NEXT: v_subb_u32_e64 v19, s[0:1], v19, v8, vcc -; TONGA-NEXT: v_sub_u32_e64 v20, s[0:1], v18, v9 -; TONGA-NEXT: v_subbrev_u32_e64 v21, s[2:3], 0, v19, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v21, v8 -; TONGA-NEXT: v_cndmask_b32_e64 v23, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v20, v9 -; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v15, v11, vcc -; TONGA-NEXT: v_cndmask_b32_e64 v24, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v21, v8 -; TONGA-NEXT: v_subb_u32_e64 v19, s[0:1], v19, v8, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v11, v8 -; TONGA-NEXT: v_cndmask_b32_e64 v23, v23, v24, s[2:3] -; TONGA-NEXT: v_sub_u32_e64 v24, s[0:1], v20, v9 -; TONGA-NEXT: v_cndmask_b32_e64 v15, 0, -1, vcc -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v18, v9 -; TONGA-NEXT: v_subbrev_u32_e64 v19, s[0:1], 0, v19, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e64 v9, 0, -1, vcc -; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v11, v8 -; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v23 -; TONGA-NEXT: v_cndmask_b32_e32 v8, v15, v9, vcc -; TONGA-NEXT: v_cndmask_b32_e64 v20, v20, v24, s[0:1] -; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8 -; TONGA-NEXT: v_cndmask_b32_e64 v19, v21, v19, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e32 v9, v18, v20, vcc -; TONGA-NEXT: v_cndmask_b32_e32 v8, v11, v19, vcc -; TONGA-NEXT: v_xor_b32_e32 v9, v9, v22 -; TONGA-NEXT: v_xor_b32_e32 v11, v8, v22 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v9, v22 -; TONGA-NEXT: v_subb_u32_e32 v9, vcc, v11, v22, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB12_3 +; TONGA-NEXT: s_ashr_i32 s6, s1, 31 +; TONGA-NEXT: s_add_u32 s8, s0, s6 +; TONGA-NEXT: s_mov_b32 s7, s6 +; TONGA-NEXT: s_addc_u32 s9, s1, s6 +; TONGA-NEXT: s_xor_b64 s[6:7], s[8:9], s[6:7] +; TONGA-NEXT: v_cvt_f32_u32_e32 v8, s6 +; TONGA-NEXT: v_cvt_f32_u32_e32 v9, s7 +; TONGA-NEXT: s_sub_u32 s1, 0, s6 +; TONGA-NEXT: s_subb_u32 s10, 0, s7 +; TONGA-NEXT: v_madmk_f32 v8, v9, 0x4f800000, v8 +; TONGA-NEXT: v_rcp_f32_e32 v8, v8 +; TONGA-NEXT: v_mul_f32_e32 v8, 0x5f7ffffc, v8 +; TONGA-NEXT: v_mul_f32_e32 v9, 0x2f800000, v8 +; TONGA-NEXT: v_trunc_f32_e32 v9, v9 +; TONGA-NEXT: v_madmk_f32 v8, v9, 0xcf800000, v8 +; TONGA-NEXT: v_cvt_u32_f32_e32 v14, v9 +; TONGA-NEXT: v_cvt_u32_f32_e32 v15, v8 +; TONGA-NEXT: v_mul_lo_u32 v10, s1, v14 +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], s1, v15, 0 +; TONGA-NEXT: v_mul_lo_u32 v11, s10, v15 +; TONGA-NEXT: v_add_u32_e32 v9, vcc, v9, v10 +; TONGA-NEXT: v_add_u32_e32 v11, vcc, v9, v11 +; TONGA-NEXT: v_mul_hi_u32 v18, v15, v8 +; TONGA-NEXT: v_mad_u64_u32 v[9:10], s[8:9], v15, v11, 0 +; TONGA-NEXT: v_add_u32_e32 v18, vcc, v18, v9 +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], v14, v8, 0 +; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v10, vcc +; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[8:9], v14, v11, 0 +; TONGA-NEXT: v_add_u32_e32 v8, vcc, v18, v8 +; TONGA-NEXT: v_addc_u32_e32 v8, vcc, v19, v9, vcc +; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v11, vcc +; TONGA-NEXT: v_add_u32_e32 v8, vcc, v8, v10 +; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc +; TONGA-NEXT: v_add_u32_e32 v18, vcc, v15, v8 +; TONGA-NEXT: v_addc_u32_e32 v19, vcc, v14, v9, vcc +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], s1, v18, 0 +; TONGA-NEXT: v_mul_lo_u32 v14, s1, v19 +; TONGA-NEXT: v_mul_lo_u32 v15, s10, v18 +; TONGA-NEXT: v_mul_hi_u32 v20, v18, v8 +; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[8:9], v19, v8, 0 +; TONGA-NEXT: v_add_u32_e32 v9, vcc, v14, v9 +; TONGA-NEXT: v_add_u32_e32 v9, vcc, v15, v9 +; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[8:9], v18, v9, 0 +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], v19, v9, 0 +; TONGA-NEXT: v_add_u32_e32 v14, vcc, v20, v14 +; TONGA-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc +; TONGA-NEXT: v_add_u32_e32 v10, vcc, v14, v10 +; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v15, v11, vcc +; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc +; TONGA-NEXT: v_add_u32_e32 v8, vcc, v10, v8 +; TONGA-NEXT: s_ashr_i32 s10, s3, 31 +; TONGA-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc +; TONGA-NEXT: s_add_u32 s8, s2, s10 +; TONGA-NEXT: v_add_u32_e32 v10, vcc, v18, v8 +; TONGA-NEXT: s_mov_b32 s11, s10 +; TONGA-NEXT: s_addc_u32 s9, s3, s10 +; TONGA-NEXT: v_addc_u32_e32 v11, vcc, v19, v9, vcc +; TONGA-NEXT: s_xor_b64 s[12:13], s[8:9], s[10:11] +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], s12, v11, 0 +; TONGA-NEXT: v_mul_hi_u32 v14, s12, v10 +; TONGA-NEXT: v_readfirstlane_b32 s1, v9 +; TONGA-NEXT: v_readfirstlane_b32 s3, v8 +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], s13, v11, 0 +; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[8:9], s13, v10, 0 +; TONGA-NEXT: v_readfirstlane_b32 s14, v14 +; TONGA-NEXT: s_add_u32 s3, s14, s3 +; TONGA-NEXT: s_addc_u32 s1, 0, s1 +; TONGA-NEXT: v_readfirstlane_b32 s14, v10 +; TONGA-NEXT: v_readfirstlane_b32 s9, v11 +; TONGA-NEXT: s_add_u32 s3, s3, s14 +; TONGA-NEXT: v_readfirstlane_b32 s8, v9 +; TONGA-NEXT: s_addc_u32 s1, s1, s9 +; TONGA-NEXT: s_addc_u32 s3, s8, 0 +; TONGA-NEXT: v_readfirstlane_b32 s8, v8 +; TONGA-NEXT: s_add_u32 s1, s1, s8 +; TONGA-NEXT: v_mov_b32_e32 v8, s1 +; TONGA-NEXT: v_mad_u64_u32 v[8:9], s[8:9], s6, v8, 0 +; TONGA-NEXT: s_addc_u32 s3, 0, s3 +; TONGA-NEXT: s_mul_i32 s3, s6, s3 +; TONGA-NEXT: v_readfirstlane_b32 s14, v9 +; TONGA-NEXT: s_add_i32 s3, s14, s3 +; TONGA-NEXT: s_mul_i32 s1, s7, s1 +; TONGA-NEXT: s_add_i32 s3, s3, s1 +; TONGA-NEXT: s_sub_i32 s1, s13, s3 +; TONGA-NEXT: v_readfirstlane_b32 s14, v8 +; TONGA-NEXT: s_sub_u32 s12, s12, s14 +; TONGA-NEXT: s_cselect_b64 s[14:15], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0 +; TONGA-NEXT: s_subb_u32 s1, s1, s7 +; TONGA-NEXT: s_sub_u32 s18, s12, s6 +; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s19, s1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s19, s7 +; TONGA-NEXT: s_cselect_b32 s20, -1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s18, s6 +; TONGA-NEXT: s_cselect_b32 s21, -1, 0 +; TONGA-NEXT: s_cmp_eq_u32 s19, s7 +; TONGA-NEXT: s_cselect_b32 s20, s21, s20 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s1, s1, s7 +; TONGA-NEXT: s_sub_u32 s21, s18, s6 +; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0 +; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0 +; TONGA-NEXT: s_subb_u32 s1, s1, 0 +; TONGA-NEXT: s_cmp_lg_u32 s20, 0 +; TONGA-NEXT: s_cselect_b32 s16, s21, s18 +; TONGA-NEXT: s_cselect_b32 s1, s1, s19 +; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0 +; TONGA-NEXT: s_subb_u32 s3, s13, s3 +; TONGA-NEXT: s_cmp_ge_u32 s3, s7 +; TONGA-NEXT: s_cselect_b32 s13, -1, 0 +; TONGA-NEXT: s_cmp_ge_u32 s12, s6 +; TONGA-NEXT: s_cselect_b32 s6, -1, 0 +; TONGA-NEXT: s_cmp_eq_u32 s3, s7 +; TONGA-NEXT: s_cselect_b32 s6, s6, s13 +; TONGA-NEXT: s_cmp_lg_u32 s6, 0 +; TONGA-NEXT: s_cselect_b32 s7, s1, s3 +; TONGA-NEXT: s_cselect_b32 s6, s16, s12 +; TONGA-NEXT: s_xor_b64 s[6:7], s[6:7], s[10:11] +; TONGA-NEXT: s_sub_u32 s6, s6, s10 +; TONGA-NEXT: s_subb_u32 s7, s7, s10 +; TONGA-NEXT: s_cbranch_execnz .LBB12_4 ; TONGA-NEXT: .LBB12_2: -; TONGA-NEXT: v_cvt_f32_u32_e32 v8, v10 -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, 0, v10 +; TONGA-NEXT: v_cvt_f32_u32_e32 v8, s0 +; TONGA-NEXT: s_sub_i32 s1, 0, s0 ; TONGA-NEXT: v_rcp_iflag_f32_e32 v8, v8 ; TONGA-NEXT: v_mul_f32_e32 v8, 0x4f7ffffe, v8 ; TONGA-NEXT: v_cvt_u32_f32_e32 v8, v8 -; TONGA-NEXT: v_mul_lo_u32 v9, v9, v8 +; TONGA-NEXT: v_mul_lo_u32 v9, s1, v8 ; TONGA-NEXT: v_mul_hi_u32 v9, v8, v9 ; TONGA-NEXT: v_add_u32_e32 v8, vcc, v8, v9 -; TONGA-NEXT: v_mul_hi_u32 v8, v14, v8 -; TONGA-NEXT: v_mul_lo_u32 v8, v8, v10 -; TONGA-NEXT: v_sub_u32_e32 v8, vcc, v14, v8 -; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, v10, v8 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10 +; TONGA-NEXT: v_mul_hi_u32 v8, s2, v8 +; TONGA-NEXT: v_mul_lo_u32 v8, v8, s0 +; TONGA-NEXT: v_sub_u32_e32 v8, vcc, s2, v8 +; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, s0, v8 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s0, v8 ; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; TONGA-NEXT: v_sub_u32_e32 v9, vcc, v8, v10 -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v8, v10 +; TONGA-NEXT: v_subrev_u32_e32 v9, vcc, s0, v8 +; TONGA-NEXT: v_cmp_le_u32_e32 vcc, s0, v8 ; TONGA-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc ; TONGA-NEXT: v_mov_b32_e32 v9, 0 +; TONGA-NEXT: s_branch .LBB12_5 ; TONGA-NEXT: .LBB12_3: +; TONGA-NEXT: ; implicit-def: $sgpr6_sgpr7 +; TONGA-NEXT: s_branch .LBB12_2 +; TONGA-NEXT: .LBB12_4: +; TONGA-NEXT: v_mov_b32_e32 v9, s7 +; TONGA-NEXT: v_mov_b32_e32 v8, s6 +; TONGA-NEXT: .LBB12_5: ; TONGA-NEXT: v_or_b32_e32 v11, v17, v13 ; TONGA-NEXT: v_mov_b32_e32 v10, 0 ; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11] -; TONGA-NEXT: s_cbranch_vccz .LBB12_14 -; TONGA-NEXT: ; %bb.4: +; TONGA-NEXT: s_cbranch_vccz .LBB12_15 +; TONGA-NEXT: ; %bb.6: ; TONGA-NEXT: v_ashrrev_i32_e32 v10, 31, v13 ; TONGA-NEXT: v_add_u32_e32 v11, vcc, v12, v10 ; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v13, v10, vcc -; TONGA-NEXT: v_xor_b32_e32 v15, v11, v10 -; TONGA-NEXT: v_xor_b32_e32 v20, v13, v10 -; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v15 -; TONGA-NEXT: v_cvt_f32_u32_e32 v11, v20 -; TONGA-NEXT: v_sub_u32_e32 v21, vcc, 0, v15 -; TONGA-NEXT: v_subb_u32_e32 v22, vcc, 0, v20, vcc -; TONGA-NEXT: v_madmk_f32 v10, v11, 0x4f800000, v10 -; TONGA-NEXT: v_rcp_f32_e32 v10, v10 -; TONGA-NEXT: v_mul_f32_e32 v10, 0x5f7ffffc, v10 -; TONGA-NEXT: v_mul_f32_e32 v11, 0x2f800000, v10 -; TONGA-NEXT: v_trunc_f32_e32 v11, v11 -; TONGA-NEXT: v_madmk_f32 v10, v11, 0xcf800000, v10 -; TONGA-NEXT: v_cvt_u32_f32_e32 v18, v11 -; TONGA-NEXT: v_cvt_u32_f32_e32 v19, v10 -; TONGA-NEXT: v_mul_lo_u32 v13, v21, v18 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v21, v19, 0 -; TONGA-NEXT: v_mul_lo_u32 v14, v22, v19 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v13 -; TONGA-NEXT: v_add_u32_e32 v23, vcc, v11, v14 -; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v19, v23, 0 -; TONGA-NEXT: v_mul_hi_u32 v11, v19, v10 -; TONGA-NEXT: v_add_u32_e32 v24, vcc, v11, v13 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v18, v10, 0 -; TONGA-NEXT: v_addc_u32_e32 v25, vcc, 0, v14, vcc -; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v18, v23, 0 -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v24, v10 -; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v25, v11, vcc -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v14, vcc -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v10, v13 -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc -; TONGA-NEXT: v_add_u32_e32 v23, vcc, v19, v10 -; TONGA-NEXT: v_addc_u32_e32 v24, vcc, v18, v11, vcc -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v21, v23, 0 -; TONGA-NEXT: v_mul_lo_u32 v18, v21, v24 -; TONGA-NEXT: v_mul_lo_u32 v19, v22, v23 -; TONGA-NEXT: v_mul_hi_u32 v21, v23, v10 -; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v24, v10, 0 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v18, v11 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v11, v19 -; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v24, v11, 0 -; TONGA-NEXT: v_add_u32_e32 v18, vcc, v21, v18 -; TONGA-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc -; TONGA-NEXT: v_add_u32_e32 v13, vcc, v18, v13 -; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v19, v14, vcc -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v13, v10 -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc -; TONGA-NEXT: v_add_u32_e32 v13, vcc, v23, v10 -; TONGA-NEXT: v_addc_u32_e32 v14, vcc, v24, v11, vcc -; TONGA-NEXT: v_ashrrev_i32_e32 v18, 31, v17 -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v16, v18 -; TONGA-NEXT: v_xor_b32_e32 v19, v10, v18 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v19, v14, 0 -; TONGA-NEXT: v_mul_hi_u32 v21, v19, v13 -; TONGA-NEXT: v_addc_u32_e32 v17, vcc, v17, v18, vcc -; TONGA-NEXT: v_xor_b32_e32 v17, v17, v18 -; TONGA-NEXT: v_add_u32_e32 v21, vcc, v21, v10 -; TONGA-NEXT: v_addc_u32_e32 v22, vcc, 0, v11, vcc -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v17, v13, 0 -; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v17, v14, 0 -; TONGA-NEXT: v_add_u32_e32 v10, vcc, v21, v10 -; TONGA-NEXT: v_addc_u32_e32 v10, vcc, v22, v11, vcc -; TONGA-NEXT: v_addc_u32_e32 v11, vcc, 0, v14, vcc -; TONGA-NEXT: v_add_u32_e32 v13, vcc, v10, v13 -; TONGA-NEXT: v_addc_u32_e32 v10, vcc, 0, v11, vcc -; TONGA-NEXT: v_mul_lo_u32 v14, v15, v10 -; TONGA-NEXT: v_mad_u64_u32 v[10:11], s[0:1], v15, v13, 0 -; TONGA-NEXT: v_mul_lo_u32 v13, v20, v13 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v14, v11 -; TONGA-NEXT: v_add_u32_e32 v11, vcc, v13, v11 -; TONGA-NEXT: v_sub_u32_e32 v13, vcc, v17, v11 -; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v19, v10 -; TONGA-NEXT: v_subb_u32_e64 v13, s[0:1], v13, v20, vcc -; TONGA-NEXT: v_sub_u32_e64 v14, s[0:1], v10, v15 -; TONGA-NEXT: v_subbrev_u32_e64 v19, s[2:3], 0, v13, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v19, v20 +; TONGA-NEXT: v_xor_b32_e32 v11, v11, v10 +; TONGA-NEXT: v_xor_b32_e32 v10, v13, v10 +; TONGA-NEXT: v_cvt_f32_u32_e32 v13, v11 +; TONGA-NEXT: v_cvt_f32_u32_e32 v14, v10 +; TONGA-NEXT: v_sub_u32_e32 v22, vcc, 0, v11 +; TONGA-NEXT: v_subb_u32_e32 v23, vcc, 0, v10, vcc +; TONGA-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13 +; TONGA-NEXT: v_rcp_f32_e32 v13, v13 +; TONGA-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13 +; TONGA-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13 +; TONGA-NEXT: v_trunc_f32_e32 v14, v14 +; TONGA-NEXT: v_madmk_f32 v13, v14, 0xcf800000, v13 +; TONGA-NEXT: v_cvt_u32_f32_e32 v20, v14 +; TONGA-NEXT: v_cvt_u32_f32_e32 v21, v13 +; TONGA-NEXT: v_mul_lo_u32 v15, v22, v20 +; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v22, v21, 0 +; TONGA-NEXT: v_mul_lo_u32 v18, v23, v21 +; TONGA-NEXT: v_add_u32_e32 v14, vcc, v14, v15 +; TONGA-NEXT: v_add_u32_e32 v18, vcc, v14, v18 +; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v21, v18, 0 +; TONGA-NEXT: v_mul_hi_u32 v19, v21, v13 +; TONGA-NEXT: v_add_u32_e32 v24, vcc, v19, v14 +; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v20, v13, 0 +; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v20, v18, 0 +; TONGA-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v24, v13 +; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v15, v14, vcc +; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v19, vcc +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v18 +; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v14, vcc +; TONGA-NEXT: v_add_u32_e32 v24, vcc, v21, v13 +; TONGA-NEXT: v_addc_u32_e32 v25, vcc, v20, v14, vcc +; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v22, v24, 0 +; TONGA-NEXT: v_mul_lo_u32 v15, v22, v25 +; TONGA-NEXT: v_mul_lo_u32 v20, v23, v24 +; TONGA-NEXT: v_mad_u64_u32 v[18:19], s[0:1], v25, v13, 0 +; TONGA-NEXT: v_add_u32_e32 v14, vcc, v15, v14 +; TONGA-NEXT: v_add_u32_e32 v20, vcc, v20, v14 +; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v24, v20, 0 +; TONGA-NEXT: v_mul_hi_u32 v13, v24, v13 +; TONGA-NEXT: v_mad_u64_u32 v[20:21], s[0:1], v25, v20, 0 +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v14 +; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v15, vcc +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v18 +; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v14, v19, vcc +; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v21, vcc +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v20 +; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v14, vcc +; TONGA-NEXT: v_add_u32_e32 v15, vcc, v24, v13 +; TONGA-NEXT: v_addc_u32_e32 v18, vcc, v25, v14, vcc +; TONGA-NEXT: v_ashrrev_i32_e32 v19, 31, v17 +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v16, v19 +; TONGA-NEXT: v_xor_b32_e32 v20, v13, v19 +; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v20, v18, 0 +; TONGA-NEXT: v_mul_hi_u32 v21, v20, v15 +; TONGA-NEXT: v_addc_u32_e32 v17, vcc, v17, v19, vcc +; TONGA-NEXT: v_xor_b32_e32 v22, v17, v19 +; TONGA-NEXT: v_add_u32_e32 v21, vcc, v21, v13 +; TONGA-NEXT: v_addc_u32_e32 v23, vcc, 0, v14, vcc +; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v22, v15, 0 +; TONGA-NEXT: v_mad_u64_u32 v[17:18], s[0:1], v22, v18, 0 +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v21, v13 +; TONGA-NEXT: v_addc_u32_e32 v13, vcc, v23, v14, vcc +; TONGA-NEXT: v_addc_u32_e32 v14, vcc, 0, v18, vcc +; TONGA-NEXT: v_add_u32_e32 v15, vcc, v13, v17 +; TONGA-NEXT: v_addc_u32_e32 v13, vcc, 0, v14, vcc +; TONGA-NEXT: v_mul_lo_u32 v17, v11, v13 +; TONGA-NEXT: v_mad_u64_u32 v[13:14], s[0:1], v11, v15, 0 +; TONGA-NEXT: v_mul_lo_u32 v15, v10, v15 +; TONGA-NEXT: v_add_u32_e32 v14, vcc, v17, v14 +; TONGA-NEXT: v_add_u32_e32 v14, vcc, v15, v14 +; TONGA-NEXT: v_sub_u32_e32 v15, vcc, v22, v14 +; TONGA-NEXT: v_sub_u32_e32 v13, vcc, v20, v13 +; TONGA-NEXT: v_subb_u32_e64 v15, s[0:1], v15, v10, vcc +; TONGA-NEXT: v_sub_u32_e64 v17, s[0:1], v13, v11 +; TONGA-NEXT: v_subbrev_u32_e64 v18, s[2:3], 0, v15, s[0:1] +; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v18, v10 +; TONGA-NEXT: v_cndmask_b32_e64 v20, 0, -1, s[2:3] +; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v17, v11 ; TONGA-NEXT: v_cndmask_b32_e64 v21, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_ge_u32_e64 s[2:3], v14, v15 -; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v17, v11, vcc -; TONGA-NEXT: v_cndmask_b32_e64 v22, 0, -1, s[2:3] -; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v19, v20 -; TONGA-NEXT: v_subb_u32_e64 v13, s[0:1], v13, v20, s[0:1] -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v11, v20 -; TONGA-NEXT: v_cndmask_b32_e64 v21, v21, v22, s[2:3] -; TONGA-NEXT: v_sub_u32_e64 v22, s[0:1], v14, v15 -; TONGA-NEXT: v_cndmask_b32_e64 v17, 0, -1, vcc -; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v15 -; TONGA-NEXT: v_subbrev_u32_e64 v13, s[0:1], 0, v13, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e64 v15, 0, -1, vcc -; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v11, v20 -; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v21 -; TONGA-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc -; TONGA-NEXT: v_cndmask_b32_e64 v14, v14, v22, s[0:1] -; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; TONGA-NEXT: v_cndmask_b32_e64 v13, v19, v13, s[0:1] -; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v14, vcc -; TONGA-NEXT: v_cndmask_b32_e32 v11, v11, v13, vcc -; TONGA-NEXT: v_xor_b32_e32 v10, v10, v18 -; TONGA-NEXT: v_xor_b32_e32 v11, v11, v18 -; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v10, v18 -; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v11, v18, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB12_6 -; TONGA-NEXT: .LBB12_5: +; TONGA-NEXT: v_cmp_eq_u32_e64 s[2:3], v18, v10 +; TONGA-NEXT: v_subb_u32_e64 v15, s[0:1], v15, v10, s[0:1] +; TONGA-NEXT: v_cndmask_b32_e64 v20, v20, v21, s[2:3] +; TONGA-NEXT: v_sub_u32_e64 v21, s[0:1], v17, v11 +; TONGA-NEXT: v_subbrev_u32_e64 v15, s[0:1], 0, v15, s[0:1] +; TONGA-NEXT: v_subb_u32_e32 v14, vcc, v22, v14, vcc +; TONGA-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v20 +; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v14, v10 +; TONGA-NEXT: v_cndmask_b32_e64 v15, v18, v15, s[0:1] +; TONGA-NEXT: v_cndmask_b32_e64 v18, 0, -1, vcc +; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v13, v11 +; TONGA-NEXT: v_cndmask_b32_e64 v11, 0, -1, vcc +; TONGA-NEXT: v_cmp_eq_u32_e32 vcc, v14, v10 +; TONGA-NEXT: v_cndmask_b32_e32 v10, v18, v11, vcc +; TONGA-NEXT: v_cndmask_b32_e64 v17, v17, v21, s[0:1] +; TONGA-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10 +; TONGA-NEXT: v_cndmask_b32_e32 v11, v13, v17, vcc +; TONGA-NEXT: v_cndmask_b32_e32 v10, v14, v15, vcc +; TONGA-NEXT: v_xor_b32_e32 v11, v11, v19 +; TONGA-NEXT: v_xor_b32_e32 v13, v10, v19 +; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v11, v19 +; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v13, v19, vcc +; TONGA-NEXT: s_cbranch_execnz .LBB12_8 +; TONGA-NEXT: .LBB12_7: ; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v12 ; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v12 ; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10 @@ -6370,13 +6492,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12 ; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc ; TONGA-NEXT: v_mov_b32_e32 v11, 0 -; TONGA-NEXT: .LBB12_6: +; TONGA-NEXT: .LBB12_8: ; TONGA-NEXT: s_waitcnt vmcnt(0) ; TONGA-NEXT: v_or_b32_e32 v13, v5, v1 ; TONGA-NEXT: v_mov_b32_e32 v12, 0 ; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13] -; TONGA-NEXT: s_cbranch_vccz .LBB12_15 -; TONGA-NEXT: ; %bb.7: +; TONGA-NEXT: s_cbranch_vccz .LBB12_16 +; TONGA-NEXT: ; %bb.9: ; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v1 ; TONGA-NEXT: v_add_u32_e32 v13, vcc, v0, v12 ; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v12, vcc @@ -6418,7 +6540,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_mul_hi_u32 v19, v21, v12 ; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v22, v12, 0 ; TONGA-NEXT: v_add_u32_e32 v13, vcc, v16, v13 -; TONGA-NEXT: v_add_u32_e32 v13, vcc, v13, v17 +; TONGA-NEXT: v_add_u32_e32 v13, vcc, v17, v13 ; TONGA-NEXT: v_mad_u64_u32 v[16:17], s[0:1], v21, v13, 0 ; TONGA-NEXT: v_mad_u64_u32 v[12:13], s[0:1], v22, v13, 0 ; TONGA-NEXT: v_add_u32_e32 v16, vcc, v19, v16 @@ -6482,8 +6604,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_xor_b32_e32 v1, v1, v16 ; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v5, v16 ; TONGA-NEXT: v_subb_u32_e32 v13, vcc, v1, v16, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB12_9 -; TONGA-NEXT: .LBB12_8: +; TONGA-NEXT: s_cbranch_execnz .LBB12_11 +; TONGA-NEXT: .LBB12_10: ; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v0 ; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0 ; TONGA-NEXT: v_mov_b32_e32 v13, 0 @@ -6502,12 +6624,12 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1 ; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0 ; TONGA-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc -; TONGA-NEXT: .LBB12_9: +; TONGA-NEXT: .LBB12_11: ; TONGA-NEXT: v_or_b32_e32 v1, v7, v3 ; TONGA-NEXT: v_mov_b32_e32 v0, 0 ; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] -; TONGA-NEXT: s_cbranch_vccz .LBB12_16 -; TONGA-NEXT: ; %bb.10: +; TONGA-NEXT: s_cbranch_vccz .LBB12_17 +; TONGA-NEXT: ; %bb.12: ; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3 ; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0 ; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc @@ -6549,7 +6671,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_mul_hi_u32 v17, v19, v0 ; TONGA-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v20, v0, 0 ; TONGA-NEXT: v_add_u32_e32 v1, vcc, v14, v1 -; TONGA-NEXT: v_add_u32_e32 v1, vcc, v1, v15 +; TONGA-NEXT: v_add_u32_e32 v1, vcc, v15, v1 ; TONGA-NEXT: v_mad_u64_u32 v[14:15], s[0:1], v19, v1, 0 ; TONGA-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v20, v1, 0 ; TONGA-NEXT: v_add_u32_e32 v14, vcc, v17, v14 @@ -6613,8 +6735,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_xor_b32_e32 v1, v1, v15 ; TONGA-NEXT: v_sub_u32_e32 v14, vcc, v0, v15 ; TONGA-NEXT: v_subb_u32_e32 v15, vcc, v1, v15, vcc -; TONGA-NEXT: s_cbranch_execnz .LBB12_12 -; TONGA-NEXT: .LBB12_11: +; TONGA-NEXT: s_cbranch_execnz .LBB12_14 +; TONGA-NEXT: .LBB12_13: ; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2 ; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2 ; TONGA-NEXT: v_mov_b32_e32 v15, 0 @@ -6633,7 +6755,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0 ; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2 ; TONGA-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc -; TONGA-NEXT: .LBB12_12: +; TONGA-NEXT: .LBB12_14: ; TONGA-NEXT: v_mov_b32_e32 v0, s4 ; TONGA-NEXT: v_mov_b32_e32 v1, s5 ; TONGA-NEXT: s_add_u32 s0, s4, 16 @@ -6643,16 +6765,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i ; TONGA-NEXT: v_mov_b32_e32 v1, s1 ; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[12:15] ; TONGA-NEXT: s_endpgm -; TONGA-NEXT: .LBB12_13: -; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9 -; TONGA-NEXT: s_branch .LBB12_2 -; TONGA-NEXT: .LBB12_14: -; TONGA-NEXT: s_branch .LBB12_5 ; TONGA-NEXT: .LBB12_15: -; TONGA-NEXT: ; implicit-def: $vgpr12_vgpr13 -; TONGA-NEXT: s_branch .LBB12_8 +; TONGA-NEXT: s_branch .LBB12_7 ; TONGA-NEXT: .LBB12_16: -; TONGA-NEXT: s_branch .LBB12_11 +; TONGA-NEXT: ; implicit-def: $vgpr12_vgpr13 +; TONGA-NEXT: s_branch .LBB12_10 +; TONGA-NEXT: .LBB12_17: +; TONGA-NEXT: s_branch .LBB12_13 ; ; EG-LABEL: srem_v4i64: ; EG: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll index c7b690fbd4a21..465024a699d43 100644 --- a/llvm/test/CodeGen/AMDGPU/srem64.ll +++ b/llvm/test/CodeGen/AMDGPU/srem64.ll @@ -5,119 +5,159 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GCN-LABEL: s_test_srem: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0xd -; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd +; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s12 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s13 -; GCN-NEXT: s_sub_u32 s0, 0, s12 -; GCN-NEXT: s_subb_u32 s1, 0, s13 -; GCN-NEXT: s_mov_b32 s4, s8 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9 +; GCN-NEXT: s_sub_u32 s10, 0, s8 +; GCN-NEXT: s_subb_u32 s11, 0, s9 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 -; GCN-NEXT: s_mov_b32 s5, s9 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v2, s0, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s0, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s1, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s0, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v6, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s0, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s0, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s1, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s0, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s10, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s10, v0 -; GCN-NEXT: v_mul_hi_u32 v4, s10, v1 -; GCN-NEXT: v_mul_hi_u32 v5, s11, v1 -; GCN-NEXT: v_mul_lo_u32 v1, s11, v1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_mul_lo_u32 v4, s11, v0 -; GCN-NEXT: v_mul_hi_u32 v0, s11, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s12, v1 -; GCN-NEXT: v_mul_hi_u32 v2, s12, v0 -; GCN-NEXT: v_mul_lo_u32 v3, s13, v0 -; GCN-NEXT: v_mul_lo_u32 v0, s12, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, s11, v1 -; GCN-NEXT: v_mov_b32_e32 v3, s13 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, s10, v0 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s12, v0 -; GCN-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s13, v5 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v4 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], s13, v5 -; GCN-NEXT: v_subrev_i32_e64 v3, s[0:1], s12, v4 -; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GCN-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v4, s11 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s13, v1 -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s13, v1 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s10, v0 +; GCN-NEXT: v_readfirstlane_b32 s12, v1 +; GCN-NEXT: v_readfirstlane_b32 s0, v0 +; GCN-NEXT: s_mul_i32 s1, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s15, v2 +; GCN-NEXT: s_mul_i32 s13, s11, s0 +; GCN-NEXT: s_mul_i32 s14, s10, s0 +; GCN-NEXT: s_add_i32 s1, s15, s1 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s14 +; GCN-NEXT: s_add_i32 s1, s1, s13 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s1 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s14 +; GCN-NEXT: v_readfirstlane_b32 s13, v3 +; GCN-NEXT: s_mul_i32 s15, s0, s1 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s1 +; GCN-NEXT: s_add_u32 s13, s13, s15 +; GCN-NEXT: v_readfirstlane_b32 s15, v0 +; GCN-NEXT: s_mul_i32 s14, s12, s14 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: v_readfirstlane_b32 s16, v4 +; GCN-NEXT: s_add_u32 s13, s13, s14 +; GCN-NEXT: s_addc_u32 s13, s15, s16 +; GCN-NEXT: v_readfirstlane_b32 s14, v1 +; GCN-NEXT: s_addc_u32 s14, s14, 0 +; GCN-NEXT: s_mul_i32 s1, s12, s1 +; GCN-NEXT: s_add_u32 s1, s13, s1 +; GCN-NEXT: s_addc_u32 s13, 0, s14 +; GCN-NEXT: s_add_u32 s14, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s14 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s10, v0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_addc_u32 s12, s12, s13 +; GCN-NEXT: s_mul_i32 s0, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s1, v0 +; GCN-NEXT: s_add_i32 s0, s1, s0 +; GCN-NEXT: s_mul_i32 s11, s11, s14 +; GCN-NEXT: s_mul_i32 s1, s10, s14 +; GCN-NEXT: s_add_i32 s0, s0, s11 +; GCN-NEXT: v_mov_b32_e32 v2, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mul_hi_u32 v3, s12, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s14, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s12, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s14, v0 +; GCN-NEXT: s_mul_i32 s11, s14, s0 +; GCN-NEXT: v_readfirstlane_b32 s15, v2 +; GCN-NEXT: s_add_u32 s11, s15, s11 +; GCN-NEXT: v_readfirstlane_b32 s13, v0 +; GCN-NEXT: s_mul_i32 s1, s12, s1 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: v_readfirstlane_b32 s10, v3 +; GCN-NEXT: s_add_u32 s1, s11, s1 +; GCN-NEXT: s_addc_u32 s1, s13, s10 +; GCN-NEXT: v_readfirstlane_b32 s10, v1 +; GCN-NEXT: s_addc_u32 s10, s10, 0 +; GCN-NEXT: s_mul_i32 s0, s12, s0 +; GCN-NEXT: s_add_u32 s0, s1, s0 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: s_add_u32 s11, s14, s0 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_addc_u32 s1, s12, s10 +; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mul_hi_u32 v1, s6, v0 +; GCN-NEXT: v_mov_b32_e32 v2, s11 +; GCN-NEXT: v_mul_hi_u32 v3, s6, v2 +; GCN-NEXT: s_mov_b32 s0, s4 +; GCN-NEXT: v_readfirstlane_b32 s10, v1 +; GCN-NEXT: v_mul_hi_u32 v1, s7, v2 +; GCN-NEXT: s_mul_i32 s4, s6, s1 +; GCN-NEXT: v_readfirstlane_b32 s12, v3 +; GCN-NEXT: v_mul_hi_u32 v0, s7, v0 +; GCN-NEXT: s_add_u32 s4, s12, s4 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: s_mul_i32 s11, s7, s11 +; GCN-NEXT: v_readfirstlane_b32 s12, v1 +; GCN-NEXT: s_add_u32 s4, s4, s11 +; GCN-NEXT: s_addc_u32 s4, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_addc_u32 s10, s10, 0 +; GCN-NEXT: s_mul_i32 s1, s7, s1 +; GCN-NEXT: s_add_u32 s4, s4, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mul_hi_u32 v0, s8, v0 +; GCN-NEXT: s_mov_b32 s1, s5 +; GCN-NEXT: s_addc_u32 s5, 0, s10 +; GCN-NEXT: s_mul_i32 s5, s8, s5 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_add_i32 s5, s10, s5 +; GCN-NEXT: s_mul_i32 s10, s9, s4 +; GCN-NEXT: s_add_i32 s10, s5, s10 +; GCN-NEXT: s_sub_i32 s11, s7, s10 +; GCN-NEXT: s_mul_i32 s4, s8, s4 +; GCN-NEXT: s_sub_u32 s6, s6, s4 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s12, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s12, 0 +; GCN-NEXT: s_subb_u32 s11, s11, s9 +; GCN-NEXT: s_sub_u32 s13, s6, s8 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s14, s11, 0 +; GCN-NEXT: s_cmp_ge_u32 s14, s9 +; GCN-NEXT: s_cselect_b32 s5, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s8 +; GCN-NEXT: s_cselect_b32 s15, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s14, s9 +; GCN-NEXT: s_cselect_b32 s15, s15, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s11, s11, s9 +; GCN-NEXT: s_sub_u32 s16, s13, s8 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s4, s11, 0 +; GCN-NEXT: s_cmp_lg_u32 s15, 0 +; GCN-NEXT: s_cselect_b32 s5, s16, s13 +; GCN-NEXT: s_cselect_b32 s4, s4, s14 +; GCN-NEXT: s_cmp_lg_u32 s12, 0 +; GCN-NEXT: s_subb_u32 s7, s7, s10 +; GCN-NEXT: s_cmp_ge_u32 s7, s9 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s6, s8 +; GCN-NEXT: s_cselect_b32 s8, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s7, s9 +; GCN-NEXT: s_cselect_b32 s8, s8, s10 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_cselect_b32 s4, s4, s7 +; GCN-NEXT: s_cselect_b32 s5, s5, s6 +; GCN-NEXT: v_mov_b32_e32 v0, s5 +; GCN-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_srem: @@ -921,133 +961,169 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 % ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_ashr_i64 s[2:3], s[2:3], 31 -; GCN-NEXT: s_ashr_i64 s[4:5], s[4:5], 31 -; GCN-NEXT: s_ashr_i32 s6, s5, 31 -; GCN-NEXT: s_add_u32 s4, s4, s6 -; GCN-NEXT: s_mov_b32 s7, s6 -; GCN-NEXT: s_addc_u32 s5, s5, s6 -; GCN-NEXT: s_xor_b64 s[8:9], s[4:5], s[6:7] -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9 -; GCN-NEXT: s_sub_u32 s4, 0, s8 -; GCN-NEXT: s_subb_u32 s5, 0, s9 -; GCN-NEXT: s_ashr_i32 s10, s3, 31 +; GCN-NEXT: s_ashr_i64 s[6:7], s[2:3], 31 +; GCN-NEXT: s_ashr_i64 s[2:3], s[4:5], 31 +; GCN-NEXT: s_ashr_i32 s4, s3, 31 +; GCN-NEXT: s_add_u32 s2, s2, s4 +; GCN-NEXT: s_mov_b32 s5, s4 +; GCN-NEXT: s_addc_u32 s3, s3, s4 +; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GCN-NEXT: s_sub_u32 s10, 0, s4 +; GCN-NEXT: s_subb_u32 s11, 0, s5 +; GCN-NEXT: s_mov_b32 s3, 0xf000 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 -; GCN-NEXT: s_add_u32 s2, s2, s10 -; GCN-NEXT: s_mov_b32 s11, s10 -; GCN-NEXT: s_addc_u32 s3, s3, s10 +; GCN-NEXT: s_mov_b32 s2, -1 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: s_xor_b64 s[12:13], s[2:3], s[10:11] -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s5, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s4, v0 -; GCN-NEXT: s_mov_b32 s6, -1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_lo_u32 v6, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v4 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_hi_u32 v7, v1, v2 -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v6 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v4, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v7, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s5, v0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s12, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s12, v0 -; GCN-NEXT: v_mul_hi_u32 v4, s12, v1 -; GCN-NEXT: v_mul_hi_u32 v5, s13, v1 -; GCN-NEXT: v_mul_lo_u32 v1, s13, v1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_mul_lo_u32 v4, s13, v0 -; GCN-NEXT: v_mul_hi_u32 v0, s13, v0 -; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s8, v1 -; GCN-NEXT: v_mul_hi_u32 v2, s8, v0 -; GCN-NEXT: v_mul_lo_u32 v3, s9, v0 -; GCN-NEXT: v_mul_lo_u32 v0, s8, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, s13, v1 -; GCN-NEXT: v_mov_b32_e32 v3, s9 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, s12, v0 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s8, v0 -; GCN-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s9, v5 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s8, v4 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], s9, v5 -; GCN-NEXT: v_subrev_i32_e64 v3, s[0:1], s8, v4 -; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GCN-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v4, s13 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s9, v1 -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s8, v0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s9, v1 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GCN-NEXT: v_xor_b32_e32 v0, s10, v0 -; GCN-NEXT: v_xor_b32_e32 v1, s10, v1 -; GCN-NEXT: v_mov_b32_e32 v2, s10 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s10, v0 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s10, v0 +; GCN-NEXT: v_readfirstlane_b32 s12, v1 +; GCN-NEXT: v_readfirstlane_b32 s8, v0 +; GCN-NEXT: s_mul_i32 s9, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s15, v2 +; GCN-NEXT: s_mul_i32 s13, s11, s8 +; GCN-NEXT: s_mul_i32 s14, s10, s8 +; GCN-NEXT: s_add_i32 s9, s15, s9 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s14 +; GCN-NEXT: s_add_i32 s9, s9, s13 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s9 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s14 +; GCN-NEXT: v_readfirstlane_b32 s13, v3 +; GCN-NEXT: s_mul_i32 s15, s8, s9 +; GCN-NEXT: s_add_u32 s13, s13, s15 +; GCN-NEXT: v_readfirstlane_b32 s15, v0 +; GCN-NEXT: v_mul_hi_u32 v0, v1, s9 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: s_mul_i32 s14, s12, s14 +; GCN-NEXT: v_readfirstlane_b32 s16, v4 +; GCN-NEXT: s_add_u32 s13, s13, s14 +; GCN-NEXT: s_addc_u32 s13, s15, s16 +; GCN-NEXT: v_readfirstlane_b32 s14, v0 +; GCN-NEXT: s_addc_u32 s14, s14, 0 +; GCN-NEXT: s_mul_i32 s9, s12, s9 +; GCN-NEXT: s_add_u32 s9, s13, s9 +; GCN-NEXT: s_addc_u32 s13, 0, s14 +; GCN-NEXT: s_add_u32 s14, s8, s9 +; GCN-NEXT: v_mov_b32_e32 v0, s14 +; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s10, v0 +; GCN-NEXT: s_or_b32 s8, s8, s9 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_addc_u32 s12, s12, s13 +; GCN-NEXT: s_mul_i32 s8, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s9, v0 +; GCN-NEXT: s_add_i32 s8, s9, s8 +; GCN-NEXT: s_mul_i32 s11, s11, s14 +; GCN-NEXT: s_mul_i32 s9, s10, s14 +; GCN-NEXT: s_add_i32 s8, s8, s11 +; GCN-NEXT: v_mov_b32_e32 v2, s9 +; GCN-NEXT: v_mov_b32_e32 v0, s8 +; GCN-NEXT: v_mul_hi_u32 v3, s12, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s14, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s12, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s14, v0 +; GCN-NEXT: s_mul_i32 s11, s14, s8 +; GCN-NEXT: v_readfirstlane_b32 s15, v2 +; GCN-NEXT: s_add_u32 s11, s15, s11 +; GCN-NEXT: v_readfirstlane_b32 s13, v0 +; GCN-NEXT: s_mul_i32 s9, s12, s9 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: v_readfirstlane_b32 s10, v3 +; GCN-NEXT: s_add_u32 s9, s11, s9 +; GCN-NEXT: s_addc_u32 s9, s13, s10 +; GCN-NEXT: v_readfirstlane_b32 s10, v1 +; GCN-NEXT: s_addc_u32 s10, s10, 0 +; GCN-NEXT: s_mul_i32 s8, s12, s8 +; GCN-NEXT: s_add_u32 s8, s9, s8 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: s_add_u32 s11, s14, s8 +; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0 +; GCN-NEXT: s_or_b32 s8, s8, s9 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_addc_u32 s10, s12, s10 +; GCN-NEXT: s_ashr_i32 s8, s7, 31 +; GCN-NEXT: s_add_u32 s6, s6, s8 +; GCN-NEXT: s_mov_b32 s9, s8 +; GCN-NEXT: s_addc_u32 s7, s7, s8 +; GCN-NEXT: s_xor_b64 s[6:7], s[6:7], s[8:9] +; GCN-NEXT: v_mov_b32_e32 v0, s10 +; GCN-NEXT: v_mul_hi_u32 v1, s6, v0 +; GCN-NEXT: v_mov_b32_e32 v2, s11 +; GCN-NEXT: v_mul_hi_u32 v3, s6, v2 +; GCN-NEXT: s_mul_i32 s12, s6, s10 +; GCN-NEXT: v_readfirstlane_b32 s13, v1 +; GCN-NEXT: v_mul_hi_u32 v1, s7, v2 +; GCN-NEXT: v_readfirstlane_b32 s14, v3 +; GCN-NEXT: v_mul_hi_u32 v0, s7, v0 +; GCN-NEXT: s_add_u32 s12, s14, s12 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: s_mul_i32 s11, s7, s11 +; GCN-NEXT: v_readfirstlane_b32 s14, v1 +; GCN-NEXT: s_add_u32 s11, s12, s11 +; GCN-NEXT: s_addc_u32 s11, s13, s14 +; GCN-NEXT: v_readfirstlane_b32 s12, v0 +; GCN-NEXT: s_addc_u32 s12, s12, 0 +; GCN-NEXT: s_mul_i32 s10, s7, s10 +; GCN-NEXT: s_add_u32 s10, s11, s10 +; GCN-NEXT: v_mov_b32_e32 v0, s10 +; GCN-NEXT: v_mul_hi_u32 v0, s4, v0 +; GCN-NEXT: s_addc_u32 s11, 0, s12 +; GCN-NEXT: s_mul_i32 s11, s4, s11 +; GCN-NEXT: v_readfirstlane_b32 s12, v0 +; GCN-NEXT: s_add_i32 s11, s12, s11 +; GCN-NEXT: s_mul_i32 s12, s5, s10 +; GCN-NEXT: s_add_i32 s12, s11, s12 +; GCN-NEXT: s_sub_i32 s13, s7, s12 +; GCN-NEXT: s_mul_i32 s10, s4, s10 +; GCN-NEXT: s_sub_u32 s6, s6, s10 +; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GCN-NEXT: s_or_b32 s14, s10, s11 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_subb_u32 s13, s13, s5 +; GCN-NEXT: s_sub_u32 s15, s6, s4 +; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GCN-NEXT: s_or_b32 s10, s10, s11 +; GCN-NEXT: s_cmp_lg_u32 s10, 0 +; GCN-NEXT: s_subb_u32 s16, s13, 0 +; GCN-NEXT: s_cmp_ge_u32 s16, s5 +; GCN-NEXT: s_cselect_b32 s11, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s15, s4 +; GCN-NEXT: s_cselect_b32 s17, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s16, s5 +; GCN-NEXT: s_cselect_b32 s17, s17, s11 +; GCN-NEXT: s_cmp_lg_u32 s10, 0 +; GCN-NEXT: s_subb_u32 s13, s13, s5 +; GCN-NEXT: s_sub_u32 s18, s15, s4 +; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0 +; GCN-NEXT: s_or_b32 s10, s10, s11 +; GCN-NEXT: s_cmp_lg_u32 s10, 0 +; GCN-NEXT: s_subb_u32 s10, s13, 0 +; GCN-NEXT: s_cmp_lg_u32 s17, 0 +; GCN-NEXT: s_cselect_b32 s11, s18, s15 +; GCN-NEXT: s_cselect_b32 s10, s10, s16 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_subb_u32 s7, s7, s12 +; GCN-NEXT: s_cmp_ge_u32 s7, s5 +; GCN-NEXT: s_cselect_b32 s12, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s6, s4 +; GCN-NEXT: s_cselect_b32 s4, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s7, s5 +; GCN-NEXT: s_cselect_b32 s4, s4, s12 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_cselect_b32 s5, s10, s7 +; GCN-NEXT: s_cselect_b32 s4, s11, s6 +; GCN-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9] +; GCN-NEXT: s_sub_u32 s4, s4, s8 +; GCN-NEXT: s_subb_u32 s5, s5, s8 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, s5 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_srem33_64: @@ -1156,34 +1232,33 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 % define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 %y) { ; GCN-LABEL: s_test_srem24_48: ; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_sext_i32_i16 s9, s9 +; GCN-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v0, s4 ; GCN-NEXT: s_sext_i32_i16 s3, s3 -; GCN-NEXT: s_sext_i32_i16 s5, s5 -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v1, v0 -; GCN-NEXT: v_mov_b32_e32 v2, s2 -; GCN-NEXT: v_alignbit_b32 v2, s3, v2, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-NEXT: v_xor_b32_e32 v5, v2, v0 -; GCN-NEXT: v_ashrrev_i32_e32 v5, 30, v5 -; GCN-NEXT: v_or_b32_e32 v5, 1, v5 -; GCN-NEXT: v_mul_f32_e32 v4, v3, v4 -; GCN-NEXT: v_trunc_f32_e32 v4, v4 -; GCN-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GCN-NEXT: v_cvt_i32_f32_e32 v4, v4 -; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v5, vcc +; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-NEXT: s_xor_b32 s3, s2, s4 +; GCN-NEXT: s_ashr_i32 s3, s3, 30 +; GCN-NEXT: s_or_b32 s3, s3, 1 +; GCN-NEXT: v_mul_f32_e32 v2, v1, v2 +; GCN-NEXT: v_trunc_f32_e32 v2, v2 +; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GCN-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GCN-NEXT: v_cmp_ge_f32_e64 s[8:9], |v1|, |v0| +; GCN-NEXT: s_and_b64 s[8:9], s[8:9], exec +; GCN-NEXT: s_cselect_b32 s3, s3, 0 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s3, v2 +; GCN-NEXT: v_mul_lo_u32 v0, v0, s4 ; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v0, v1, v0 ; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, v0, v2 +; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1192,34 +1267,33 @@ define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 % ; ; GCN-IR-LABEL: s_test_srem24_48: ; GCN-IR: ; %bb.0: +; GCN-IR-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd ; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) +; GCN-IR-NEXT: s_sext_i32_i16 s9, s9 +; GCN-IR-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v0, s4 ; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 -; GCN-IR-NEXT: s_sext_i32_i16 s5, s5 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s4 -; GCN-IR-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s2 -; GCN-IR-NEXT: v_alignbit_b32 v2, s3, v2, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v5, v2, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v5, 30, v5 -; GCN-IR-NEXT: v_or_b32_e32 v5, 1, v5 -; GCN-IR-NEXT: v_mul_f32_e32 v4, v3, v4 -; GCN-IR-NEXT: v_trunc_f32_e32 v4, v4 -; GCN-IR-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GCN-IR-NEXT: v_cvt_i32_f32_e32 v4, v4 -; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-IR-NEXT: v_cndmask_b32_e32 v1, 0, v5, vcc +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-IR-NEXT: s_xor_b32 s3, s2, s4 +; GCN-IR-NEXT: s_ashr_i32 s3, s3, 30 +; GCN-IR-NEXT: s_or_b32 s3, s3, 1 +; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2 +; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2 +; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GCN-IR-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GCN-IR-NEXT: v_cmp_ge_f32_e64 s[8:9], |v1|, |v0| +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], exec +; GCN-IR-NEXT: s_cselect_b32 s3, s3, 0 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s3, v2 +; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, s4 ; GCN-IR-NEXT: s_mov_b32 s4, s0 -; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v4 -; GCN-IR-NEXT: v_mul_lo_u32 v0, v1, v0 ; GCN-IR-NEXT: s_mov_b32 s5, s1 -; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, v0, v2 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 ; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1236,110 +1310,145 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x) ; GCN-LABEL: s_test_srem_k_num_i64: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_ashr_i32 s4, s3, 31 ; GCN-NEXT: s_add_u32 s2, s2, s4 ; GCN-NEXT: s_mov_b32 s5, s4 ; GCN-NEXT: s_addc_u32 s3, s3, s4 -; GCN-NEXT: s_xor_b64 s[8:9], s[2:3], s[4:5] -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9 -; GCN-NEXT: s_sub_u32 s2, 0, s8 -; GCN-NEXT: s_subb_u32 s3, 0, s9 -; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5] +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GCN-NEXT: s_sub_u32 s2, 0, s4 +; GCN-NEXT: s_subb_u32 s8, 0, s5 +; GCN-NEXT: s_mov_b32 s3, 0xf000 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 -; GCN-NEXT: s_mov_b32 s5, s1 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v2, s2, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s2, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s3, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s2, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v6, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s2, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s2, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s3, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s2, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, 24 -; GCN-NEXT: v_mul_hi_u32 v0, v0, 24 -; GCN-NEXT: v_mul_hi_u32 v1, v1, 24 -; GCN-NEXT: v_mov_b32_e32 v3, s9 -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v1, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s9, v0 -; GCN-NEXT: v_mul_hi_u32 v2, s8, v0 -; GCN-NEXT: v_mul_lo_u32 v0, s8, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, 0, v1 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s8, v0 -; GCN-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s9, v5 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s8, v4 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], s9, v5 -; GCN-NEXT: v_subrev_i32_e64 v3, s[0:1], s8, v4 -; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GCN-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GCN-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s9, v1 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s8, v0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s9, v1 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s2, v0 +; GCN-NEXT: v_readfirstlane_b32 s9, v1 +; GCN-NEXT: v_readfirstlane_b32 s6, v0 +; GCN-NEXT: s_mul_i32 s7, s2, s9 +; GCN-NEXT: v_readfirstlane_b32 s12, v2 +; GCN-NEXT: s_mul_i32 s10, s8, s6 +; GCN-NEXT: s_mul_i32 s11, s2, s6 +; GCN-NEXT: s_add_i32 s7, s12, s7 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s11 +; GCN-NEXT: s_add_i32 s7, s7, s10 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s7 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s11 +; GCN-NEXT: v_readfirstlane_b32 s10, v3 +; GCN-NEXT: s_mul_i32 s13, s6, s7 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s7 +; GCN-NEXT: s_add_u32 s10, s10, s13 +; GCN-NEXT: v_readfirstlane_b32 s13, v0 +; GCN-NEXT: s_mul_i32 s11, s9, s11 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: v_readfirstlane_b32 s12, v4 +; GCN-NEXT: s_add_u32 s10, s10, s11 +; GCN-NEXT: s_addc_u32 s10, s13, s12 +; GCN-NEXT: v_readfirstlane_b32 s11, v1 +; GCN-NEXT: s_addc_u32 s11, s11, 0 +; GCN-NEXT: s_mul_i32 s7, s9, s7 +; GCN-NEXT: s_add_u32 s7, s10, s7 +; GCN-NEXT: s_addc_u32 s10, 0, s11 +; GCN-NEXT: s_add_u32 s11, s6, s7 +; GCN-NEXT: v_mov_b32_e32 v0, s11 +; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s2, v0 +; GCN-NEXT: s_or_b32 s6, s6, s7 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_addc_u32 s9, s9, s10 +; GCN-NEXT: s_mul_i32 s6, s2, s9 +; GCN-NEXT: v_readfirstlane_b32 s7, v0 +; GCN-NEXT: s_add_i32 s6, s7, s6 +; GCN-NEXT: s_mul_i32 s8, s8, s11 +; GCN-NEXT: s_mul_i32 s2, s2, s11 +; GCN-NEXT: s_add_i32 s6, s6, s8 +; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_mov_b32_e32 v0, s6 +; GCN-NEXT: v_mul_hi_u32 v3, s9, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s11, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s9, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s11, v0 +; GCN-NEXT: s_mul_i32 s8, s11, s6 +; GCN-NEXT: v_readfirstlane_b32 s12, v2 +; GCN-NEXT: s_add_u32 s8, s12, s8 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_mul_i32 s2, s9, s2 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: v_readfirstlane_b32 s7, v3 +; GCN-NEXT: s_add_u32 s2, s8, s2 +; GCN-NEXT: s_addc_u32 s2, s10, s7 +; GCN-NEXT: v_readfirstlane_b32 s7, v1 +; GCN-NEXT: s_addc_u32 s7, s7, 0 +; GCN-NEXT: s_mul_i32 s6, s9, s6 +; GCN-NEXT: s_add_u32 s2, s2, s6 +; GCN-NEXT: s_addc_u32 s8, 0, s7 +; GCN-NEXT: s_add_u32 s2, s11, s2 +; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GCN-NEXT: s_or_b32 s6, s6, s7 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_addc_u32 s6, s9, s8 +; GCN-NEXT: v_mul_hi_u32 v1, s2, 24 +; GCN-NEXT: v_mul_hi_u32 v0, s6, 24 +; GCN-NEXT: s_mul_i32 s6, s6, 24 +; GCN-NEXT: s_mov_b32 s2, -1 +; GCN-NEXT: v_readfirstlane_b32 s8, v1 +; GCN-NEXT: v_readfirstlane_b32 s7, v0 +; GCN-NEXT: s_add_u32 s6, s8, s6 +; GCN-NEXT: s_addc_u32 s6, 0, s7 +; GCN-NEXT: v_mov_b32_e32 v0, s6 +; GCN-NEXT: v_mul_hi_u32 v0, s4, v0 +; GCN-NEXT: s_mul_i32 s7, s5, s6 +; GCN-NEXT: s_mul_i32 s6, s4, s6 +; GCN-NEXT: v_readfirstlane_b32 s8, v0 +; GCN-NEXT: s_add_i32 s8, s8, s7 +; GCN-NEXT: s_sub_i32 s9, 0, s8 +; GCN-NEXT: s_sub_u32 s10, 24, s6 +; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GCN-NEXT: s_or_b32 s11, s6, s7 +; GCN-NEXT: s_cmp_lg_u32 s11, 0 +; GCN-NEXT: s_subb_u32 s9, s9, s5 +; GCN-NEXT: s_sub_u32 s12, s10, s4 +; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GCN-NEXT: s_or_b32 s6, s6, s7 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_subb_u32 s13, s9, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s5 +; GCN-NEXT: s_cselect_b32 s7, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s12, s4 +; GCN-NEXT: s_cselect_b32 s14, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s13, s5 +; GCN-NEXT: s_cselect_b32 s14, s14, s7 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_subb_u32 s9, s9, s5 +; GCN-NEXT: s_sub_u32 s15, s12, s4 +; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0 +; GCN-NEXT: s_or_b32 s6, s6, s7 +; GCN-NEXT: s_cmp_lg_u32 s6, 0 +; GCN-NEXT: s_subb_u32 s6, s9, 0 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_cselect_b32 s7, s15, s12 +; GCN-NEXT: s_cselect_b32 s6, s6, s13 +; GCN-NEXT: s_cmp_lg_u32 s11, 0 +; GCN-NEXT: s_subb_u32 s8, 0, s8 +; GCN-NEXT: s_cmp_ge_u32 s8, s5 +; GCN-NEXT: s_cselect_b32 s9, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s10, s4 +; GCN-NEXT: s_cselect_b32 s4, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s8, s5 +; GCN-NEXT: s_cselect_b32 s4, s4, s9 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_cselect_b32 s4, s6, s8 +; GCN-NEXT: s_cselect_b32 s5, s7, s10 +; GCN-NEXT: v_mov_b32_e32 v0, s5 +; GCN-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_srem_k_num_i64: diff --git a/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll b/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll index 65a99d0d097f9..480eb0dd5fe9c 100644 --- a/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll +++ b/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll @@ -52,11 +52,12 @@ define amdgpu_kernel void @local_store_i55(ptr addrspace(3) %ptr, i55 %arg) #0 { ; HAWAII-LABEL: local_store_i55: ; HAWAII: ; %bb.0: ; HAWAII-NEXT: s_add_i32 s12, s12, s17 -; HAWAII-NEXT: s_or_b32 s0, s8, 14 -; HAWAII-NEXT: s_mov_b32 flat_scratch_lo, s13 ; HAWAII-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; HAWAII-NEXT: s_add_u32 s0, s8, 14 +; HAWAII-NEXT: s_addc_u32 s1, s9, 0 ; HAWAII-NEXT: v_mov_b32_e32 v0, s0 -; HAWAII-NEXT: v_mov_b32_e32 v1, s9 +; HAWAII-NEXT: s_mov_b32 flat_scratch_lo, s13 +; HAWAII-NEXT: v_mov_b32_e32 v1, s1 ; HAWAII-NEXT: flat_load_ubyte v0, v[0:1] ; HAWAII-NEXT: s_load_dword s2, s[8:9], 0x0 ; HAWAII-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x2 @@ -74,25 +75,27 @@ define amdgpu_kernel void @local_store_i55(ptr addrspace(3) %ptr, i55 %arg) #0 { ; ; FIJI-LABEL: local_store_i55: ; FIJI: ; %bb.0: +; FIJI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8 ; FIJI-NEXT: s_add_i32 s12, s12, s17 -; FIJI-NEXT: s_or_b32 s0, s8, 14 -; FIJI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; FIJI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 -; FIJI-NEXT: v_mov_b32_e32 v0, s0 -; FIJI-NEXT: v_mov_b32_e32 v1, s9 -; FIJI-NEXT: flat_load_ubyte v0, v[0:1] -; FIJI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8 -; FIJI-NEXT: s_load_dword s2, s[8:9], 0x0 +; FIJI-NEXT: s_mov_b32 flat_scratch_lo, s13 ; FIJI-NEXT: s_mov_b32 m0, -1 ; FIJI-NEXT: s_waitcnt lgkmcnt(0) -; FIJI-NEXT: s_and_b32 s3, s1, 0xffff -; FIJI-NEXT: v_mov_b32_e32 v1, s2 +; FIJI-NEXT: s_and_b32 s4, s1, 0xffff +; FIJI-NEXT: s_add_u32 s2, s8, 14 +; FIJI-NEXT: s_addc_u32 s3, s9, 0 +; FIJI-NEXT: v_mov_b32_e32 v0, s2 +; FIJI-NEXT: v_mov_b32_e32 v1, s3 +; FIJI-NEXT: flat_load_ubyte v0, v[0:1] +; FIJI-NEXT: s_load_dword s2, s[8:9], 0x0 ; FIJI-NEXT: v_mov_b32_e32 v2, s1 ; FIJI-NEXT: v_mov_b32_e32 v3, s0 +; FIJI-NEXT: s_waitcnt lgkmcnt(0) +; FIJI-NEXT: v_mov_b32_e32 v1, s2 ; FIJI-NEXT: ds_write_b16 v1, v2 offset:4 ; FIJI-NEXT: s_waitcnt vmcnt(0) ; FIJI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; FIJI-NEXT: v_or_b32_e32 v0, s3, v0 +; FIJI-NEXT: v_or_b32_e32 v0, s4, v0 ; FIJI-NEXT: v_bfe_u32 v0, v0, 16, 7 ; FIJI-NEXT: ds_write_b8 v1, v0 offset:6 ; FIJI-NEXT: ds_write_b32 v1, v3 diff --git a/llvm/test/CodeGen/AMDGPU/sub_u64.ll b/llvm/test/CodeGen/AMDGPU/sub_u64.ll index baaca4ddeaf05..f79fbd98f1e09 100644 --- a/llvm/test/CodeGen/AMDGPU/sub_u64.ll +++ b/llvm/test/CodeGen/AMDGPU/sub_u64.ll @@ -126,7 +126,7 @@ define amdgpu_ps <2 x float> @test_sub_u64_64bit_imm_v(i64 %a) { ; ; GFX1250-LABEL: test_sub_u64_64bit_imm_v: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: v_sub_nc_u64_e32 v[0:1], lit64(0x13b9ac9ff), v[0:1] +; GFX1250-NEXT: v_sub_nc_u64_e32 v[0:1], 0x13b9ac9ff, v[0:1] ; GFX1250-NEXT: ; return to shader part epilog %sub = sub i64 5294967295, %a %ret = bitcast i64 %sub to <2 x float> diff --git a/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir b/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir index 0430c8349f350..1dbeccf7cf984 100644 --- a/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir +++ b/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir @@ -107,7 +107,7 @@ body: | successors: %bb.13(0x80000000) ; GCN-LABEL: bb.7: - ; GCN: undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; GCN: undef %{{.+}}.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec %15.sub1:vreg_128 = COPY %15.sub0 @@ -123,7 +123,7 @@ body: | successors: %bb.12(0x80000000) ; GCN-LABEL: bb.9: - ; GCN: undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec + ; GCN: undef %{{.+}}.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec %15.sub1:vreg_128 = COPY %15.sub0 @@ -134,7 +134,7 @@ body: | successors: %bb.12(0x80000000) ; GCN-LABEL: bb.10: - ; GCN: undef %15.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec + ; GCN: undef %{{.+}}.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec undef %15.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec %15.sub1:vreg_128 = COPY %15.sub0 diff --git a/llvm/test/CodeGen/AMDGPU/swdev380865.ll b/llvm/test/CodeGen/AMDGPU/swdev380865.ll index 4a5dc8f300af3..d4a8a0d762afd 100644 --- a/llvm/test/CodeGen/AMDGPU/swdev380865.ll +++ b/llvm/test/CodeGen/AMDGPU/swdev380865.ll @@ -14,16 +14,15 @@ define amdgpu_kernel void @_Z6kernelILi4000ELi1EEvPd(ptr addrspace(1) %x.coerce) { ; CHECK-LABEL: _Z6kernelILi4000ELi1EEvPd: ; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_mov_b64 s[2:3], 0x100 +; CHECK-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x0 ; CHECK-NEXT: s_mov_b64 s[0:1], 0 -; CHECK-NEXT: s_load_dword s2, s[0:1], 0x0 -; CHECK-NEXT: s_mov_b64 s[0:1], 0x100 -; CHECK-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 +; CHECK-NEXT: s_load_dword s0, s[0:1], 0x0 +; CHECK-NEXT: s_mov_b32 s2, 0 ; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_mov_b32 s0, 0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: s_mov_b32 s1, s2 -; CHECK-NEXT: s_mov_b32 s2, 0 ; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: s_mov_b32 s1, 0 ; CHECK-NEXT: s_mov_b32 s3, 0x40260000 ; CHECK-NEXT: s_mov_b32 s5, 0x40280000 ; CHECK-NEXT: v_mov_b32_e32 v1, s7 @@ -32,8 +31,8 @@ define amdgpu_kernel void @_Z6kernelILi4000ELi1EEvPd(ptr addrspace(1) %x.coerce) ; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], 0 ; CHECK-NEXT: s_mov_b32 s6, 0 ; CHECK-NEXT: s_mov_b32 s7, 0x40140000 -; CHECK-NEXT: s_add_i32 s0, s0, s1 -; CHECK-NEXT: s_cmpk_lt_i32 s0, 0xa00 +; CHECK-NEXT: s_add_i32 s1, s1, s0 +; CHECK-NEXT: s_cmpk_lt_i32 s1, 0xa00 ; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], s[6:7] ; CHECK-NEXT: s_mov_b32 s6, 0 ; CHECK-NEXT: s_mov_b32 s7, 0x40180000 diff --git a/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir b/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir index 6966c3d8b6d6a..bc8a383a285b2 100644 --- a/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir +++ b/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir @@ -36,24 +36,18 @@ body: | ; CHECK-NEXT: SI_SPILL_AV128_SAVE killed $vgpr0_vgpr1_vgpr2_vgpr3, %stack.1, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.1, align 4, addrspace 5) ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) ; CHECK-NEXT: renamable $vgpr0 = V_FMA_F32_e64 0, $vgpr6, 0, $vgpr6, 0, killed $vgpr2, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: SI_SPILL_AV128_SAVE $vgpr4_vgpr5_vgpr6_vgpr7, %stack.4, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.4, align 4, addrspace 5) ; CHECK-NEXT: renamable $vgpr0 = V_TRUNC_F32_e32 killed $vgpr0, implicit $mode, implicit $exec ; CHECK-NEXT: SI_SPILL_AV32_SAVE killed $vgpr0, %stack.3, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.3, addrspace 5) + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) ; CHECK-NEXT: renamable $vgpr0 = IMPLICIT_DEF - ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) - ; CHECK-NEXT: renamable $vgpr5 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed $vgpr0, 0, killed $vgpr7, 0, killed $vgpr5, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr7 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed $vgpr0, 0, $vgpr7, 0, killed $vgpr3, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) ; CHECK-NEXT: renamable $vgpr0 = SI_SPILL_AV32_RESTORE %stack.3, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.3, addrspace 5) - ; CHECK-NEXT: renamable $vgpr9 = COPY killed renamable $vgpr5 - ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7 = SI_SPILL_AV128_RESTORE %stack.4, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.4, align 4, addrspace 5) - ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) - ; CHECK-NEXT: renamable $vgpr8 = nofpexcept V_FMA_F32_e64 1, killed $vgpr0, 0, killed $vgpr6, 0, killed $vgpr4, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: renamable $vgpr2_vgpr3 = COPY killed renamable $vgpr8_vgpr9 - ; CHECK-NEXT: renamable $vgpr0 = IMPLICIT_DEF - ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7 = SI_SPILL_AV128_RESTORE %stack.4, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.4, align 4, addrspace 5) - ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) - ; CHECK-NEXT: renamable $vgpr0 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed $vgpr0, 0, killed $vgpr4, 0, killed $vgpr6, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7 = SI_SPILL_AV128_RESTORE %stack.4, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.4, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr6 = nofpexcept V_FMA_F32_e64 1, killed $vgpr0, 0, $vgpr6, 0, killed $vgpr2, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: renamable $vgpr2_vgpr3 = COPY killed renamable $vgpr6_vgpr7 ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = SI_SPILL_AV128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5) + ; CHECK-NEXT: renamable $vgpr0 = IMPLICIT_DEF + ; CHECK-NEXT: renamable $vgpr0 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed $vgpr0, 0, $vgpr4, 0, $vgpr6, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: dead renamable $vgpr1 = V_FMA_F32_e64 0, killed $vgpr5, 0, $vgpr5, 0, killed $vgpr7, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: dead renamable $vgpr4_vgpr5_vgpr6_vgpr7 = SCRATCH_LOAD_DWORDX4_SADDR %stack.0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s128), addrspace 5) ; CHECK-NEXT: renamable $vgpr4_vgpr5 = IMPLICIT_DEF diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll index bf1f6980fe25a..1ed04f8782d5d 100644 --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -50,7 +50,7 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y ; GCN-NEXT: s_mov_b32 s5, s1 ; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 ; GCN-NEXT: v_mul_lo_u32 v3, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 +; GCN-NEXT: v_add_i32_e32 v2, vcc, v4, v2 ; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 ; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 ; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 @@ -721,16 +721,14 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_and_b32 s2, s2, 0xff000000 -; GCN-NEXT: s_and_b32 s4, s4, 0xff000000 -; GCN-NEXT: s_and_b32 s5, s5, 0xffff -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-NEXT: v_cvt_f32_u32_e32 v0, v0 ; GCN-NEXT: s_and_b32 s3, s3, 0xffff -; GCN-NEXT: v_mov_b32_e32 v1, s2 -; GCN-NEXT: v_alignbit_b32 v1, s3, v1, 24 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, v1 +; GCN-NEXT: s_and_b32 s5, s5, 0xffff +; GCN-NEXT: s_and_b32 s4, s4, 0xff000000 +; GCN-NEXT: s_lshr_b64 s[4:5], s[4:5], 24 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GCN-NEXT: s_and_b32 s2, s2, 0xff000000 +; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2 ; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 ; GCN-NEXT: s_mov_b32 s4, s0 ; GCN-NEXT: s_mov_b32 s5, s1 @@ -753,16 +751,14 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_and_b32 s2, s2, 0xff000000 -; GCN-IR-NEXT: s_and_b32 s4, s4, 0xff000000 -; GCN-IR-NEXT: s_and_b32 s5, s5, 0xffff -; GCN-IR-NEXT: v_mov_b32_e32 v0, s4 -; GCN-IR-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, v0 ; GCN-IR-NEXT: s_and_b32 s3, s3, 0xffff -; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 -; GCN-IR-NEXT: v_alignbit_b32 v1, s3, v1, 24 -; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, v1 +; GCN-IR-NEXT: s_and_b32 s5, s5, 0xffff +; GCN-IR-NEXT: s_and_b32 s4, s4, 0xff000000 +; GCN-IR-NEXT: s_lshr_b64 s[4:5], s[4:5], 24 +; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GCN-IR-NEXT: s_and_b32 s2, s2, 0xff000000 +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, s2 ; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 ; GCN-IR-NEXT: s_mov_b32 s4, s0 ; GCN-IR-NEXT: s_mov_b32 s5, s1 @@ -788,104 +784,137 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x) ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2 ; GCN-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GCN-NEXT: s_sub_u32 s4, 0, s2 -; GCN-NEXT: s_subb_u32 s5, 0, s3 +; GCN-NEXT: s_sub_u32 s6, 0, s2 +; GCN-NEXT: s_subb_u32 s8, 0, s3 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s5, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v6, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s4, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s4, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s5, v0 -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s4, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, 24 -; GCN-NEXT: v_mul_hi_u32 v0, v0, 24 -; GCN-NEXT: v_mul_hi_u32 v1, v1, 24 -; GCN-NEXT: v_mov_b32_e32 v4, s3 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s6, v0 +; GCN-NEXT: v_readfirstlane_b32 s9, v1 +; GCN-NEXT: v_readfirstlane_b32 s4, v0 +; GCN-NEXT: s_mul_i32 s5, s6, s9 +; GCN-NEXT: v_readfirstlane_b32 s12, v2 +; GCN-NEXT: s_mul_i32 s10, s8, s4 +; GCN-NEXT: s_mul_i32 s11, s6, s4 +; GCN-NEXT: s_add_i32 s5, s12, s5 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s11 +; GCN-NEXT: s_add_i32 s5, s5, s10 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s5 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s11 +; GCN-NEXT: v_readfirstlane_b32 s10, v3 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s5 +; GCN-NEXT: s_mul_i32 s13, s4, s5 +; GCN-NEXT: s_add_u32 s10, s10, s13 +; GCN-NEXT: v_readfirstlane_b32 s13, v0 +; GCN-NEXT: s_mul_i32 s11, s9, s11 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: v_readfirstlane_b32 s12, v4 +; GCN-NEXT: s_add_u32 s10, s10, s11 +; GCN-NEXT: v_readfirstlane_b32 s14, v1 +; GCN-NEXT: s_addc_u32 s10, s13, s12 +; GCN-NEXT: s_addc_u32 s11, s14, 0 +; GCN-NEXT: s_mul_i32 s5, s9, s5 +; GCN-NEXT: s_add_u32 s5, s10, s5 +; GCN-NEXT: s_addc_u32 s10, 0, s11 +; GCN-NEXT: s_add_u32 s11, s4, s5 +; GCN-NEXT: v_mov_b32_e32 v0, s11 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s6, v0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_addc_u32 s9, s9, s10 +; GCN-NEXT: s_mul_i32 s4, s6, s9 +; GCN-NEXT: v_readfirstlane_b32 s5, v0 +; GCN-NEXT: s_add_i32 s4, s5, s4 +; GCN-NEXT: s_mul_i32 s8, s8, s11 +; GCN-NEXT: s_mul_i32 s5, s6, s11 +; GCN-NEXT: s_add_i32 s4, s4, s8 +; GCN-NEXT: v_mov_b32_e32 v2, s5 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mul_hi_u32 v3, s9, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s11, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s9, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s11, v0 +; GCN-NEXT: s_mul_i32 s8, s11, s4 +; GCN-NEXT: v_readfirstlane_b32 s12, v2 +; GCN-NEXT: s_add_u32 s8, s12, s8 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_mul_i32 s5, s9, s5 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: v_readfirstlane_b32 s6, v3 +; GCN-NEXT: s_add_u32 s5, s8, s5 +; GCN-NEXT: s_addc_u32 s5, s10, s6 +; GCN-NEXT: v_readfirstlane_b32 s6, v1 +; GCN-NEXT: s_addc_u32 s6, s6, 0 +; GCN-NEXT: s_mul_i32 s4, s9, s4 +; GCN-NEXT: s_add_u32 s4, s5, s4 +; GCN-NEXT: s_addc_u32 s6, 0, s6 +; GCN-NEXT: s_add_u32 s8, s11, s4 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_addc_u32 s4, s9, s6 +; GCN-NEXT: v_mul_hi_u32 v1, s8, 24 +; GCN-NEXT: v_mul_hi_u32 v0, s4, 24 +; GCN-NEXT: s_mul_i32 s4, s4, 24 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: v_readfirstlane_b32 s8, v1 +; GCN-NEXT: v_readfirstlane_b32 s5, v0 +; GCN-NEXT: s_add_u32 s4, s8, s4 +; GCN-NEXT: s_addc_u32 s8, 0, s5 +; GCN-NEXT: v_mov_b32_e32 v0, s8 +; GCN-NEXT: v_mul_hi_u32 v0, s2, v0 ; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v1, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s3, v0 -; GCN-NEXT: v_mul_hi_u32 v2, s2, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_mul_lo_u32 v2, s2, v0 -; GCN-NEXT: v_sub_i32_e32 v3, vcc, 0, v1 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, 24, v2 -; GCN-NEXT: v_subb_u32_e64 v3, s[0:1], v3, v4, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s2, v2 -; GCN-NEXT: v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s3, v3 -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[0:1], s2, v4 -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1] -; GCN-NEXT: v_cmp_eq_u32_e64 s[0:1], s3, v3 -; GCN-NEXT: v_cndmask_b32_e64 v3, v5, v4, s[0:1] -; GCN-NEXT: v_add_i32_e64 v4, s[0:1], 1, v0 -; GCN-NEXT: v_addc_u32_e64 v5, s[0:1], 0, 0, s[0:1] -; GCN-NEXT: v_add_i32_e64 v6, s[0:1], 2, v0 -; GCN-NEXT: v_addc_u32_e64 v7, s[0:1], 0, 0, s[0:1] -; GCN-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v3 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s3, v1 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v6, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v4, v5, v7, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s2, v2 -; GCN-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s3, v1 -; GCN-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v4, vcc -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_mul_i32 s0, s3, s8 +; GCN-NEXT: v_readfirstlane_b32 s1, v0 +; GCN-NEXT: s_add_i32 s9, s1, s0 +; GCN-NEXT: s_sub_i32 s10, 0, s9 +; GCN-NEXT: s_mul_i32 s0, s2, s8 +; GCN-NEXT: s_sub_u32 s11, 24, s0 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s12, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s12, 0 +; GCN-NEXT: s_subb_u32 s10, s10, s3 +; GCN-NEXT: s_sub_u32 s13, s11, s2 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_subb_u32 s0, s10, 0 +; GCN-NEXT: s_cmp_ge_u32 s0, s3 +; GCN-NEXT: s_cselect_b32 s1, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s2 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s0, s3 +; GCN-NEXT: s_cselect_b32 s0, s10, s1 +; GCN-NEXT: s_add_u32 s1, s8, 1 +; GCN-NEXT: s_addc_u32 s10, 0, 0 +; GCN-NEXT: s_add_u32 s13, s8, 2 +; GCN-NEXT: s_addc_u32 s14, 0, 0 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_cselect_b32 s0, s13, s1 +; GCN-NEXT: s_cselect_b32 s1, s14, s10 +; GCN-NEXT: s_cmp_lg_u32 s12, 0 +; GCN-NEXT: s_subb_u32 s9, 0, s9 +; GCN-NEXT: s_cmp_ge_u32 s9, s3 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s11, s2 +; GCN-NEXT: s_cselect_b32 s2, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s9, s3 +; GCN-NEXT: s_cselect_b32 s2, s2, s10 +; GCN-NEXT: s_cmp_lg_u32 s2, 0 +; GCN-NEXT: s_cselect_b32 s1, s1, 0 +; GCN-NEXT: s_cselect_b32 s0, s0, s8 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mov_b32_e32 v1, s1 ; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/uniform-alignbit.ll b/llvm/test/CodeGen/AMDGPU/uniform-alignbit.ll new file mode 100644 index 0000000000000..fe8c90ee7b686 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/uniform-alignbit.ll @@ -0,0 +1,38 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GCN %s + +define amdgpu_kernel void @uniform_build_vector(i64 %in, ptr addrspace(1) %out) { +; GCN-LABEL: uniform_build_vector: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_lshr_b64 s[4:5], s[0:1], 1 +; GCN-NEXT: s_mov_b32 s5, 0 +; GCN-NEXT: s_mov_b32 s6, s5 +; GCN-NEXT: s_mov_b32 s7, s5 +; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: ; sched_barrier mask(0x00000000) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GCN-NEXT: global_store_dword v1, v0, s[2:3] +; GCN-NEXT: s_endpgm +entry: + %shifted = lshr i64 %in, 1 + %trunc = trunc i64 %shifted to i32 + %insert = insertelement <4 x i32> zeroinitializer, i32 %trunc, i64 0 + %load = tail call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %insert, i32 0, i32 0, i32 0) + tail call void @llvm.amdgcn.sched.barrier(i32 0) + %extract = extractelement <4 x i32> %load, i64 0 + %and = and i32 %extract, 1 + %convert = sitofp i32 %and to float + store float %convert, ptr addrspace(1) %out + ret void +} + +; Function Attrs: convergent nocallback nofree nounwind willreturn +declare void @llvm.amdgcn.sched.barrier(i32 immarg) #0 + +; Function Attrs: nocallback nofree nosync nounwind willreturn memory(read) +declare <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32>, i32, i32, i32 immarg) #1 \ No newline at end of file diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll index c4d928185d8f4..b846ce7f12466 100644 --- a/llvm/test/CodeGen/AMDGPU/urem64.ll +++ b/llvm/test/CodeGen/AMDGPU/urem64.ll @@ -5,119 +5,159 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GCN-LABEL: s_test_urem_i64: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0xd -; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s7, 0xf000 -; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd +; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s12 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s13 -; GCN-NEXT: s_sub_u32 s0, 0, s12 -; GCN-NEXT: s_subb_u32 s1, 0, s13 -; GCN-NEXT: s_mov_b32 s4, s8 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9 +; GCN-NEXT: s_sub_u32 s10, 0, s8 +; GCN-NEXT: s_subb_u32 s11, 0, s9 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 -; GCN-NEXT: s_mov_b32 s5, s9 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v2, s0, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s0, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s1, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s0, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v6, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s0, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s0, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s1, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s0, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s10, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s10, v0 -; GCN-NEXT: v_mul_hi_u32 v4, s10, v1 -; GCN-NEXT: v_mul_hi_u32 v5, s11, v1 -; GCN-NEXT: v_mul_lo_u32 v1, s11, v1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_mul_lo_u32 v4, s11, v0 -; GCN-NEXT: v_mul_hi_u32 v0, s11, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, v3, v0, vcc -; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s12, v1 -; GCN-NEXT: v_mul_hi_u32 v2, s12, v0 -; GCN-NEXT: v_mul_lo_u32 v3, s13, v0 -; GCN-NEXT: v_mul_lo_u32 v0, s12, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v3, v1 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, s11, v1 -; GCN-NEXT: v_mov_b32_e32 v3, s13 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, s10, v0 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s12, v0 -; GCN-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s13, v5 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s12, v4 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], s13, v5 -; GCN-NEXT: v_subrev_i32_e64 v3, s[0:1], s12, v4 -; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GCN-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GCN-NEXT: v_mov_b32_e32 v4, s11 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s13, v1 -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s12, v0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s13, v1 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GCN-NEXT: v_mul_hi_u32 v2, s10, v0 +; GCN-NEXT: v_readfirstlane_b32 s12, v1 +; GCN-NEXT: v_readfirstlane_b32 s0, v0 +; GCN-NEXT: s_mul_i32 s1, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s15, v2 +; GCN-NEXT: s_mul_i32 s13, s11, s0 +; GCN-NEXT: s_mul_i32 s14, s10, s0 +; GCN-NEXT: s_add_i32 s1, s15, s1 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s14 +; GCN-NEXT: s_add_i32 s1, s1, s13 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s1 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s14 +; GCN-NEXT: v_readfirstlane_b32 s13, v3 +; GCN-NEXT: s_mul_i32 s15, s0, s1 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s1 +; GCN-NEXT: s_add_u32 s13, s13, s15 +; GCN-NEXT: v_readfirstlane_b32 s15, v0 +; GCN-NEXT: s_mul_i32 s14, s12, s14 +; GCN-NEXT: s_addc_u32 s15, 0, s15 +; GCN-NEXT: v_readfirstlane_b32 s16, v4 +; GCN-NEXT: s_add_u32 s13, s13, s14 +; GCN-NEXT: s_addc_u32 s13, s15, s16 +; GCN-NEXT: v_readfirstlane_b32 s14, v1 +; GCN-NEXT: s_addc_u32 s14, s14, 0 +; GCN-NEXT: s_mul_i32 s1, s12, s1 +; GCN-NEXT: s_add_u32 s1, s13, s1 +; GCN-NEXT: s_addc_u32 s13, 0, s14 +; GCN-NEXT: s_add_u32 s14, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s14 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s10, v0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_addc_u32 s12, s12, s13 +; GCN-NEXT: s_mul_i32 s0, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s1, v0 +; GCN-NEXT: s_add_i32 s0, s1, s0 +; GCN-NEXT: s_mul_i32 s11, s11, s14 +; GCN-NEXT: s_mul_i32 s1, s10, s14 +; GCN-NEXT: s_add_i32 s0, s0, s11 +; GCN-NEXT: v_mov_b32_e32 v2, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: v_mul_hi_u32 v3, s12, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s14, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s12, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s14, v0 +; GCN-NEXT: s_mul_i32 s11, s14, s0 +; GCN-NEXT: v_readfirstlane_b32 s15, v2 +; GCN-NEXT: s_add_u32 s11, s15, s11 +; GCN-NEXT: v_readfirstlane_b32 s13, v0 +; GCN-NEXT: s_mul_i32 s1, s12, s1 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: v_readfirstlane_b32 s10, v3 +; GCN-NEXT: s_add_u32 s1, s11, s1 +; GCN-NEXT: s_addc_u32 s1, s13, s10 +; GCN-NEXT: v_readfirstlane_b32 s10, v1 +; GCN-NEXT: s_addc_u32 s10, s10, 0 +; GCN-NEXT: s_mul_i32 s0, s12, s0 +; GCN-NEXT: s_add_u32 s0, s1, s0 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: s_add_u32 s11, s14, s0 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_addc_u32 s1, s12, s10 +; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mul_hi_u32 v1, s6, v0 +; GCN-NEXT: v_mov_b32_e32 v2, s11 +; GCN-NEXT: v_mul_hi_u32 v3, s6, v2 +; GCN-NEXT: s_mov_b32 s0, s4 +; GCN-NEXT: v_readfirstlane_b32 s10, v1 +; GCN-NEXT: v_mul_hi_u32 v1, s7, v2 +; GCN-NEXT: s_mul_i32 s4, s6, s1 +; GCN-NEXT: v_readfirstlane_b32 s12, v3 +; GCN-NEXT: v_mul_hi_u32 v0, s7, v0 +; GCN-NEXT: s_add_u32 s4, s12, s4 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: s_mul_i32 s11, s7, s11 +; GCN-NEXT: v_readfirstlane_b32 s12, v1 +; GCN-NEXT: s_add_u32 s4, s4, s11 +; GCN-NEXT: s_addc_u32 s4, s10, s12 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_addc_u32 s10, s10, 0 +; GCN-NEXT: s_mul_i32 s1, s7, s1 +; GCN-NEXT: s_add_u32 s4, s4, s1 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mul_hi_u32 v0, s8, v0 +; GCN-NEXT: s_mov_b32 s1, s5 +; GCN-NEXT: s_addc_u32 s5, 0, s10 +; GCN-NEXT: s_mul_i32 s5, s8, s5 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_add_i32 s5, s10, s5 +; GCN-NEXT: s_mul_i32 s10, s9, s4 +; GCN-NEXT: s_add_i32 s10, s5, s10 +; GCN-NEXT: s_sub_i32 s11, s7, s10 +; GCN-NEXT: s_mul_i32 s4, s8, s4 +; GCN-NEXT: s_sub_u32 s6, s6, s4 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s12, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s12, 0 +; GCN-NEXT: s_subb_u32 s11, s11, s9 +; GCN-NEXT: s_sub_u32 s13, s6, s8 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s14, s11, 0 +; GCN-NEXT: s_cmp_ge_u32 s14, s9 +; GCN-NEXT: s_cselect_b32 s5, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s8 +; GCN-NEXT: s_cselect_b32 s15, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s14, s9 +; GCN-NEXT: s_cselect_b32 s15, s15, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s11, s11, s9 +; GCN-NEXT: s_sub_u32 s16, s13, s8 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_subb_u32 s4, s11, 0 +; GCN-NEXT: s_cmp_lg_u32 s15, 0 +; GCN-NEXT: s_cselect_b32 s5, s16, s13 +; GCN-NEXT: s_cselect_b32 s4, s4, s14 +; GCN-NEXT: s_cmp_lg_u32 s12, 0 +; GCN-NEXT: s_subb_u32 s7, s7, s10 +; GCN-NEXT: s_cmp_ge_u32 s7, s9 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s6, s8 +; GCN-NEXT: s_cselect_b32 s8, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s7, s9 +; GCN-NEXT: s_cselect_b32 s8, s8, s10 +; GCN-NEXT: s_cmp_lg_u32 s8, 0 +; GCN-NEXT: s_cselect_b32 s4, s4, s7 +; GCN-NEXT: s_cselect_b32 s5, s5, s6 +; GCN-NEXT: v_mov_b32_e32 v0, s5 +; GCN-NEXT: v_mov_b32_e32 v1, s4 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_urem_i64: @@ -764,106 +804,143 @@ define amdgpu_kernel void @s_test_urem23_64_v2i64(ptr addrspace(1) %out, <2 x i6 define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x) { ; GCN-LABEL: s_test_urem_k_num_i64: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9 -; GCN-NEXT: s_mov_b32 s11, 0xf000 -; GCN-NEXT: s_mov_b32 s10, -1 +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7 -; GCN-NEXT: s_sub_u32 s0, 0, s6 -; GCN-NEXT: s_subb_u32 s1, 0, s7 -; GCN-NEXT: s_mov_b32 s8, s4 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s2 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s3 +; GCN-NEXT: s_sub_u32 s6, 0, s2 +; GCN-NEXT: s_subb_u32 s8, 0, s3 ; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GCN-NEXT: v_rcp_f32_e32 v0, v0 -; GCN-NEXT: s_mov_b32 s9, s5 ; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 ; GCN-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0 ; GCN-NEXT: v_trunc_f32_e32 v1, v1 ; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 -; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GCN-NEXT: v_mul_lo_u32 v2, s0, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s0, v0 -; GCN-NEXT: v_mul_lo_u32 v5, s1, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s0, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v5 -; GCN-NEXT: v_mul_hi_u32 v3, v0, v4 -; GCN-NEXT: v_mul_lo_u32 v5, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v6, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v4, v1, v4 -; GCN-NEXT: v_mul_hi_u32 v8, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v5 -; GCN-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v3, v4 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v5, v6, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v8, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, s0, v1 -; GCN-NEXT: v_mul_hi_u32 v3, s0, v0 -; GCN-NEXT: v_mul_lo_u32 v4, s1, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v3 -; GCN-NEXT: v_mul_lo_u32 v3, s0, v0 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v2, v4 -; GCN-NEXT: v_mul_lo_u32 v6, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v7, v0, v3 -; GCN-NEXT: v_mul_hi_u32 v8, v0, v2 -; GCN-NEXT: v_mul_hi_u32 v5, v1, v3 -; GCN-NEXT: v_mul_lo_u32 v3, v1, v3 -; GCN-NEXT: v_mul_hi_u32 v4, v1, v2 -; GCN-NEXT: v_add_i32_e32 v6, vcc, v7, v6 -; GCN-NEXT: v_addc_u32_e32 v7, vcc, 0, v8, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, v2 -; GCN-NEXT: v_add_i32_e32 v3, vcc, v6, v3 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, v7, v5, vcc -; GCN-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v2, vcc, v3, v2 -; GCN-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc -; GCN-NEXT: v_mul_lo_u32 v2, v1, 24 -; GCN-NEXT: v_mul_hi_u32 v0, v0, 24 -; GCN-NEXT: v_mul_hi_u32 v1, v1, 24 -; GCN-NEXT: v_mov_b32_e32 v3, s7 -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v1, vcc -; GCN-NEXT: v_mul_lo_u32 v1, s7, v0 +; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GCN-NEXT: v_mul_hi_u32 v2, s6, v0 -; GCN-NEXT: v_mul_lo_u32 v0, s6, v0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 -; GCN-NEXT: v_sub_i32_e32 v2, vcc, 0, v1 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, 24, v0 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, vcc -; GCN-NEXT: v_subrev_i32_e64 v4, s[0:1], s6, v0 -; GCN-NEXT: v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s7, v5 -; GCN-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_le_u32_e64 s[2:3], s6, v4 -; GCN-NEXT: v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[2:3] -; GCN-NEXT: v_cmp_eq_u32_e64 s[2:3], s7, v5 -; GCN-NEXT: v_subrev_i32_e64 v3, s[0:1], s6, v4 -; GCN-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[2:3] -; GCN-NEXT: v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1] -; GCN-NEXT: v_subb_u32_e32 v1, vcc, 0, v1, vcc -; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v6 -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s7, v1 -; GCN-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, vcc -; GCN-NEXT: v_cmp_le_u32_e32 vcc, s6, v0 -; GCN-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[0:1] -; GCN-NEXT: v_cndmask_b32_e64 v5, 0, -1, vcc -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, s7, v1 -; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 -; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc -; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 +; GCN-NEXT: v_readfirstlane_b32 s9, v1 +; GCN-NEXT: v_readfirstlane_b32 s4, v0 +; GCN-NEXT: s_mul_i32 s5, s6, s9 +; GCN-NEXT: v_readfirstlane_b32 s12, v2 +; GCN-NEXT: s_mul_i32 s10, s8, s4 +; GCN-NEXT: s_mul_i32 s11, s6, s4 +; GCN-NEXT: s_add_i32 s5, s12, s5 +; GCN-NEXT: v_mul_hi_u32 v3, v0, s11 +; GCN-NEXT: s_add_i32 s5, s5, s10 +; GCN-NEXT: v_mul_hi_u32 v0, v0, s5 +; GCN-NEXT: v_mul_hi_u32 v4, v1, s11 +; GCN-NEXT: v_readfirstlane_b32 s10, v3 +; GCN-NEXT: v_mul_hi_u32 v1, v1, s5 +; GCN-NEXT: s_mul_i32 s13, s4, s5 +; GCN-NEXT: s_add_u32 s10, s10, s13 +; GCN-NEXT: v_readfirstlane_b32 s13, v0 +; GCN-NEXT: s_mul_i32 s11, s9, s11 +; GCN-NEXT: s_addc_u32 s13, 0, s13 +; GCN-NEXT: v_readfirstlane_b32 s12, v4 +; GCN-NEXT: s_add_u32 s10, s10, s11 +; GCN-NEXT: v_readfirstlane_b32 s14, v1 +; GCN-NEXT: s_addc_u32 s10, s13, s12 +; GCN-NEXT: s_addc_u32 s11, s14, 0 +; GCN-NEXT: s_mul_i32 s5, s9, s5 +; GCN-NEXT: s_add_u32 s5, s10, s5 +; GCN-NEXT: s_addc_u32 s10, 0, s11 +; GCN-NEXT: s_add_u32 s11, s4, s5 +; GCN-NEXT: v_mov_b32_e32 v0, s11 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: v_mul_hi_u32 v0, s6, v0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_addc_u32 s9, s9, s10 +; GCN-NEXT: s_mul_i32 s4, s6, s9 +; GCN-NEXT: v_readfirstlane_b32 s5, v0 +; GCN-NEXT: s_add_i32 s4, s5, s4 +; GCN-NEXT: s_mul_i32 s8, s8, s11 +; GCN-NEXT: s_mul_i32 s5, s6, s11 +; GCN-NEXT: s_add_i32 s4, s4, s8 +; GCN-NEXT: v_mov_b32_e32 v2, s5 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mul_hi_u32 v3, s9, v2 +; GCN-NEXT: v_mul_hi_u32 v2, s11, v2 +; GCN-NEXT: v_mul_hi_u32 v1, s9, v0 +; GCN-NEXT: v_mul_hi_u32 v0, s11, v0 +; GCN-NEXT: s_mul_i32 s8, s11, s4 +; GCN-NEXT: v_readfirstlane_b32 s12, v2 +; GCN-NEXT: s_add_u32 s8, s12, s8 +; GCN-NEXT: v_readfirstlane_b32 s10, v0 +; GCN-NEXT: s_mul_i32 s5, s9, s5 +; GCN-NEXT: s_addc_u32 s10, 0, s10 +; GCN-NEXT: v_readfirstlane_b32 s6, v3 +; GCN-NEXT: s_add_u32 s5, s8, s5 +; GCN-NEXT: s_addc_u32 s5, s10, s6 +; GCN-NEXT: v_readfirstlane_b32 s6, v1 +; GCN-NEXT: s_addc_u32 s6, s6, 0 +; GCN-NEXT: s_mul_i32 s4, s9, s4 +; GCN-NEXT: s_add_u32 s4, s5, s4 +; GCN-NEXT: s_addc_u32 s6, 0, s6 +; GCN-NEXT: s_add_u32 s8, s11, s4 +; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_cmp_lg_u32 s4, 0 +; GCN-NEXT: s_addc_u32 s4, s9, s6 +; GCN-NEXT: v_mul_hi_u32 v1, s8, 24 +; GCN-NEXT: v_mul_hi_u32 v0, s4, 24 +; GCN-NEXT: s_mul_i32 s4, s4, 24 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: v_readfirstlane_b32 s8, v1 +; GCN-NEXT: v_readfirstlane_b32 s5, v0 +; GCN-NEXT: s_add_u32 s4, s8, s4 +; GCN-NEXT: s_addc_u32 s8, 0, s5 +; GCN-NEXT: v_mov_b32_e32 v0, s8 +; GCN-NEXT: v_mul_hi_u32 v0, s2, v0 +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_mul_i32 s0, s3, s8 +; GCN-NEXT: v_readfirstlane_b32 s1, v0 +; GCN-NEXT: s_add_i32 s9, s1, s0 +; GCN-NEXT: s_sub_i32 s10, 0, s9 +; GCN-NEXT: s_mul_i32 s0, s2, s8 +; GCN-NEXT: s_sub_u32 s8, 24, s0 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s11, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s11, 0 +; GCN-NEXT: s_subb_u32 s10, s10, s3 +; GCN-NEXT: s_sub_u32 s12, s8, s2 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_subb_u32 s13, s10, 0 +; GCN-NEXT: s_cmp_ge_u32 s13, s3 +; GCN-NEXT: s_cselect_b32 s1, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s12, s2 +; GCN-NEXT: s_cselect_b32 s14, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s13, s3 +; GCN-NEXT: s_cselect_b32 s14, s14, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_subb_u32 s10, s10, s3 +; GCN-NEXT: s_sub_u32 s15, s12, s2 +; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 +; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_cmp_lg_u32 s0, 0 +; GCN-NEXT: s_subb_u32 s0, s10, 0 +; GCN-NEXT: s_cmp_lg_u32 s14, 0 +; GCN-NEXT: s_cselect_b32 s1, s15, s12 +; GCN-NEXT: s_cselect_b32 s0, s0, s13 +; GCN-NEXT: s_cmp_lg_u32 s11, 0 +; GCN-NEXT: s_subb_u32 s9, 0, s9 +; GCN-NEXT: s_cmp_ge_u32 s9, s3 +; GCN-NEXT: s_cselect_b32 s10, -1, 0 +; GCN-NEXT: s_cmp_ge_u32 s8, s2 +; GCN-NEXT: s_cselect_b32 s2, -1, 0 +; GCN-NEXT: s_cmp_eq_u32 s9, s3 +; GCN-NEXT: s_cselect_b32 s2, s2, s10 +; GCN-NEXT: s_cmp_lg_u32 s2, 0 +; GCN-NEXT: s_cselect_b32 s0, s0, s9 +; GCN-NEXT: s_cselect_b32 s1, s1, s8 +; GCN-NEXT: v_mov_b32_e32 v0, s1 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-NEXT: s_endpgm ; ; GCN-IR-LABEL: s_test_urem_k_num_i64: @@ -956,30 +1033,30 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x) ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mul_hi_u32 v3, s2, v2 ; GCN-NEXT: v_mul_hi_u32 v2, s3, v2 -; GCN-NEXT: v_mul_hi_u32 v1, s2, v0 ; GCN-NEXT: s_mul_i32 s5, s3, 0xaaaaaaab +; GCN-NEXT: v_mul_hi_u32 v1, s2, v0 ; GCN-NEXT: v_add_i32_e32 v3, vcc, s5, v3 ; GCN-NEXT: s_mov_b32 s4, s0 ; GCN-NEXT: s_mul_i32 s0, s2, 0xaaaaaaaa ; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc ; GCN-NEXT: v_add_i32_e32 v3, vcc, s0, v3 -; GCN-NEXT: v_mul_hi_u32 v0, s3, v0 +; GCN-NEXT: v_mul_hi_u32 v3, s3, v0 ; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GCN-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-NEXT: s_mul_i32 s0, s3, 0xaaaaaaaa ; GCN-NEXT: v_addc_u32_e64 v2, s[8:9], 0, 0, vcc -; GCN-NEXT: v_add_i32_e32 v1, vcc, s0, v1 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, v0, v2, vcc -; GCN-NEXT: v_alignbit_b32 v1, v0, v1, 4 -; GCN-NEXT: v_lshrrev_b32_e32 v0, 4, v0 -; GCN-NEXT: v_mul_hi_u32 v2, v1, 24 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v1 +; GCN-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc +; GCN-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], 4 +; GCN-NEXT: v_mul_lo_u32 v1, v2, 24 +; GCN-NEXT: v_mul_hi_u32 v2, v0, 24 ; GCN-NEXT: v_mul_lo_u32 v0, v0, 24 -; GCN-NEXT: v_mul_lo_u32 v1, v1, 24 -; GCN-NEXT: v_mov_b32_e32 v3, s3 ; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v0, v2 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v1 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v3, v2, vcc +; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 +; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 +; GCN-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc ; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll index c12871536bafa..f5dc824aae35f 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll @@ -116,7 +116,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src0: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -125,7 +125,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_a = fsub float 0.0, %a + %neg_a = fsub nsz float 0.0, %a %tmp0 = fmul float %neg_a, %b %tmp1 = fadd float %tmp0, %c @@ -176,7 +176,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src1: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -185,7 +185,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_b = fsub float 0.0, %b + %neg_b = fsub nsz float 0.0, %b %tmp0 = fmul float %a, %neg_b %tmp1 = fadd float %tmp0, %c @@ -310,6 +310,5 @@ define float @v_mac_f32_dynamic_ftz(float %a, float %b, float %c) "denormal-fp-m declare i32 @llvm.amdgcn.workitem.id.x() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" } attributes #2 = { nounwind readnone } attributes #3 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll index bcc60b06db291..8da6f2348690a 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll @@ -236,7 +236,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %a.neg = fsub half 0.0, %a.val + %a.neg = fsub nsz half 0.0, %a.val %t.val = fmul half %a.neg, %b.val %r.val = fadd half %t.val, %c.val @@ -263,7 +263,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %b.neg = fsub half 0.0, %b.val + %b.neg = fsub nsz half 0.0, %b.val %t.val = fmul half %a.val, %b.neg %r.val = fadd half %t.val, %c.val @@ -290,7 +290,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %c.neg = fsub half 0.0, %c.val + %c.neg = fsub nsz half 0.0, %c.val %t.val = fmul half %a.val, %b.val %r.val = fadd half %t.val, %c.neg @@ -601,7 +601,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %a.neg = fsub <2 x half> , %a.val + %a.neg = fsub nsz <2 x half> , %a.val %t.val = fmul <2 x half> %a.neg, %b.val %r.val = fadd <2 x half> %t.val, %c.val @@ -634,7 +634,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %b.neg = fsub <2 x half> , %b.val + %b.neg = fsub nsz <2 x half> , %b.val %t.val = fmul <2 x half> %a.val, %b.neg %r.val = fadd <2 x half> %t.val, %c.val @@ -667,7 +667,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %c.neg = fsub <2 x half> , %c.val + %c.neg = fsub nsz <2 x half> , %c.val %t.val = fmul <2 x half> %a.val, %b.val %r.val = fadd <2 x half> %t.val, %c.neg @@ -678,5 +678,5 @@ entry: declare void @llvm.amdgcn.s.barrier() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" "denormal-fp-math"="preserve-sign,preserve-sign" } +attributes #1 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" } attributes #2 = { nounwind convergent } diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-lowering-gfx1250-t16.mir b/llvm/test/CodeGen/AMDGPU/vgpr-lowering-gfx1250-t16.mir new file mode 100644 index 0000000000000..8a70a8acd28d3 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/vgpr-lowering-gfx1250-t16.mir @@ -0,0 +1,66 @@ +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -start-before=amdgpu-lower-vgpr-encoding -o - %s | FileCheck -check-prefixes=GCN,ASM %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -start-before=amdgpu-lower-vgpr-encoding -o - %s | llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -filetype=obj -o - | llvm-objdump -d --mcpu=gfx1250 --mattr=+real-true16 - | FileCheck -check-prefixes=GCN,DIS %s + +# ASM-LABEL: {{^}}high_vgprs: +# DIS-LABEL: : +--- +name: high_vgprs +tracksRegLiveness: true +body: | + bb.0: + ; ASM: %bb.0: + + ; GCN-NEXT: v_add_f16_e64 v0.h, v1.h, v2.h + $vgpr0_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr1_hi16, 0, undef $vgpr2_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v0.l, v1.l, v2.l + $vgpr0_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr1_lo16, 0, undef $vgpr2_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.h, v129.h, v130.h + $vgpr128_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr129_hi16, 0, undef $vgpr130_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.l, v129.l, v130.l + $vgpr128_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr129_lo16, 0, undef $vgpr130_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: s_set_vgpr_msb 0x45 + ; ASM-SAME: ; msbs: dst=1 src0=1 src1=1 src2=0 + ; GCN-NEXT: v_add_f16_e64 v0.h /*v256.h*/, v1.h /*v257.h*/, v2.h /*v258.h*/ + $vgpr256_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr257_hi16, 0, undef $vgpr258_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v0.l /*v256.l*/, v1.l /*v257.l*/, v2.l /*v258.l*/ + $vgpr256_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr257_lo16, 0, undef $vgpr258_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.h /*v384.h*/, v129.h /*v385.h*/, v130.h /*v386.h*/ + $vgpr384_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr385_hi16, 0, undef $vgpr386_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.l /*v384.l*/, v129.l /*v385.l*/, v130.l /*v386.l*/ + $vgpr384_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr385_lo16, 0, undef $vgpr386_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: s_set_vgpr_msb 0x8a + ; ASM-SAME: ; msbs: dst=2 src0=2 src1=2 src2=0 + ; GCN-NEXT: v_add_f16_e64 v0.h /*v512.h*/, v1.h /*v513.h*/, v2.h /*v514.h*/ + $vgpr512_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr513_hi16, 0, undef $vgpr514_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v0.l /*v512.l*/, v1.l /*v513.l*/, v2.l /*v514.l*/ + $vgpr512_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr513_lo16, 0, undef $vgpr514_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.h /*v640.h*/, v129.h /*v641.h*/, v130.h /*v642.h*/ + $vgpr640_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr641_hi16, 0, undef $vgpr642_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.l /*v640.l*/, v129.l /*v641.l*/, v130.l /*v642.l*/ + $vgpr640_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr641_lo16, 0, undef $vgpr642_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: s_set_vgpr_msb 0xcf + ; ASM-SAME: ; msbs: dst=3 src0=3 src1=3 src2=0 + ; GCN-NEXT: v_add_f16_e64 v0.h /*v768.h*/, v1.h /*v769.h*/, v2.h /*v770.h*/ + $vgpr768_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr769_hi16, 0, undef $vgpr770_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v0.l /*v768.l*/, v1.l /*v769.l*/, v2.l /*v770.l*/ + $vgpr768_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr769_lo16, 0, undef $vgpr770_lo16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.h /*v896.h*/, v129.h /*v897.h*/, v130.h /*v898.h*/ + $vgpr896_hi16 = V_ADD_F16_t16_e64 0, undef $vgpr897_hi16, 0, undef $vgpr898_hi16, 0, 0, 0, implicit $exec, implicit $mode + + ; GCN-NEXT: v_add_f16_e64 v128.l /*v896.l*/, v129.l /*v897.l*/, v130.l /*v898.l*/ + $vgpr896_lo16 = V_ADD_F16_t16_e64 0, undef $vgpr897_lo16, 0, undef $vgpr898_lo16, 0, 0, 0, implicit $exec, implicit $mode +... diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir index 0ddd2aa285b26..0d54bfaed8130 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir +++ b/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir @@ -1,4 +1,5 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX9 %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s -debugify-and-strip-all-safe | FileCheck -check-prefix=GFX9 %s # RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX10 %s # RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX12 %s diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll index 2a76d83cd7dac..75db3879e7b03 100644 --- a/llvm/test/CodeGen/AMDGPU/wave32.ll +++ b/llvm/test/CodeGen/AMDGPU/wave32.ll @@ -730,19 +730,19 @@ bb: define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 { ; GFX1032-LABEL: test_udiv64: ; GFX1032: ; %bb.0: ; %bb -; GFX1032-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24 +; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 ; GFX1032-NEXT: s_waitcnt lgkmcnt(0) -; GFX1032-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0 +; GFX1032-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0 ; GFX1032-NEXT: s_waitcnt lgkmcnt(0) -; GFX1032-NEXT: s_or_b64 s[8:9], s[6:7], s[4:5] -; GFX1032-NEXT: s_mov_b32 s8, 0 -; GFX1032-NEXT: s_cmp_lg_u64 s[8:9], 0 +; GFX1032-NEXT: s_or_b64 s[4:5], s[2:3], s[0:1] +; GFX1032-NEXT: s_mov_b32 s4, 0 +; GFX1032-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX1032-NEXT: s_cbranch_scc0 .LBB15_4 ; GFX1032-NEXT: ; %bb.1: -; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GFX1032-NEXT: v_cvt_f32_u32_e32 v1, s5 -; GFX1032-NEXT: s_sub_u32 s9, 0, s4 -; GFX1032-NEXT: s_subb_u32 s10, 0, s5 +; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s0 +; GFX1032-NEXT: v_cvt_f32_u32_e32 v1, s1 +; GFX1032-NEXT: s_sub_u32 s9, 0, s0 +; GFX1032-NEXT: s_subb_u32 s10, 0, s1 ; GFX1032-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX1032-NEXT: v_rcp_f32_e32 v0, v0 ; GFX1032-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -751,160 +751,158 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 { ; GFX1032-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0 ; GFX1032-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX1032-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1032-NEXT: v_readfirstlane_b32 s0, v1 -; GFX1032-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1032-NEXT: s_mul_i32 s11, s9, s0 -; GFX1032-NEXT: s_mul_hi_u32 s13, s9, s1 -; GFX1032-NEXT: s_mul_i32 s12, s10, s1 +; GFX1032-NEXT: v_readfirstlane_b32 s5, v1 +; GFX1032-NEXT: v_readfirstlane_b32 s8, v0 +; GFX1032-NEXT: s_mul_i32 s11, s9, s5 +; GFX1032-NEXT: s_mul_hi_u32 s13, s9, s8 +; GFX1032-NEXT: s_mul_i32 s12, s10, s8 ; GFX1032-NEXT: s_add_i32 s11, s13, s11 -; GFX1032-NEXT: s_mul_i32 s14, s9, s1 +; GFX1032-NEXT: s_mul_i32 s14, s9, s8 ; GFX1032-NEXT: s_add_i32 s11, s11, s12 -; GFX1032-NEXT: s_mul_hi_u32 s13, s1, s14 -; GFX1032-NEXT: s_mul_hi_u32 s15, s0, s14 -; GFX1032-NEXT: s_mul_i32 s12, s0, s14 -; GFX1032-NEXT: s_mul_hi_u32 s14, s1, s11 -; GFX1032-NEXT: s_mul_i32 s1, s1, s11 -; GFX1032-NEXT: s_mul_hi_u32 s16, s0, s11 -; GFX1032-NEXT: s_add_u32 s1, s13, s1 -; GFX1032-NEXT: s_addc_u32 s13, 0, s14 -; GFX1032-NEXT: s_add_u32 s1, s1, s12 -; GFX1032-NEXT: s_mul_i32 s11, s0, s11 -; GFX1032-NEXT: s_addc_u32 s1, s13, s15 -; GFX1032-NEXT: s_addc_u32 s12, s16, 0 -; GFX1032-NEXT: s_add_u32 s1, s1, s11 -; GFX1032-NEXT: s_addc_u32 s11, 0, s12 -; GFX1032-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX1032-NEXT: s_cmp_lg_u32 s1, 0 -; GFX1032-NEXT: s_addc_u32 s0, s0, s11 -; GFX1032-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1032-NEXT: s_mul_i32 s11, s9, s0 -; GFX1032-NEXT: s_mul_hi_u32 s12, s9, s1 -; GFX1032-NEXT: s_mul_i32 s10, s10, s1 -; GFX1032-NEXT: s_add_i32 s11, s12, s11 -; GFX1032-NEXT: s_mul_i32 s9, s9, s1 -; GFX1032-NEXT: s_add_i32 s11, s11, s10 -; GFX1032-NEXT: s_mul_hi_u32 s12, s0, s9 -; GFX1032-NEXT: s_mul_i32 s13, s0, s9 -; GFX1032-NEXT: s_mul_hi_u32 s9, s1, s9 -; GFX1032-NEXT: s_mul_hi_u32 s14, s1, s11 -; GFX1032-NEXT: s_mul_i32 s1, s1, s11 -; GFX1032-NEXT: s_mul_hi_u32 s10, s0, s11 -; GFX1032-NEXT: s_add_u32 s1, s9, s1 -; GFX1032-NEXT: s_addc_u32 s9, 0, s14 -; GFX1032-NEXT: s_add_u32 s1, s1, s13 -; GFX1032-NEXT: s_mul_i32 s11, s0, s11 -; GFX1032-NEXT: s_addc_u32 s1, s9, s12 -; GFX1032-NEXT: s_addc_u32 s9, s10, 0 -; GFX1032-NEXT: s_add_u32 s1, s1, s11 -; GFX1032-NEXT: s_addc_u32 s9, 0, s9 -; GFX1032-NEXT: v_add_co_u32 v0, s1, v0, s1 -; GFX1032-NEXT: s_cmp_lg_u32 s1, 0 -; GFX1032-NEXT: s_addc_u32 s0, s0, s9 -; GFX1032-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1032-NEXT: s_mul_i32 s10, s6, s0 -; GFX1032-NEXT: s_mul_hi_u32 s9, s6, s0 -; GFX1032-NEXT: s_mul_hi_u32 s11, s7, s0 -; GFX1032-NEXT: s_mul_i32 s0, s7, s0 -; GFX1032-NEXT: s_mul_hi_u32 s12, s6, s1 -; GFX1032-NEXT: s_mul_hi_u32 s13, s7, s1 -; GFX1032-NEXT: s_mul_i32 s1, s7, s1 -; GFX1032-NEXT: s_add_u32 s10, s12, s10 -; GFX1032-NEXT: s_addc_u32 s9, 0, s9 -; GFX1032-NEXT: s_add_u32 s1, s10, s1 -; GFX1032-NEXT: s_addc_u32 s1, s9, s13 -; GFX1032-NEXT: s_addc_u32 s9, s11, 0 -; GFX1032-NEXT: s_add_u32 s1, s1, s0 -; GFX1032-NEXT: s_addc_u32 s9, 0, s9 -; GFX1032-NEXT: s_mul_hi_u32 s0, s4, s1 -; GFX1032-NEXT: s_mul_i32 s11, s4, s9 -; GFX1032-NEXT: s_mul_i32 s12, s4, s1 -; GFX1032-NEXT: s_add_i32 s0, s0, s11 -; GFX1032-NEXT: v_sub_co_u32 v0, s11, s6, s12 -; GFX1032-NEXT: s_mul_i32 s10, s5, s1 -; GFX1032-NEXT: s_add_i32 s0, s0, s10 -; GFX1032-NEXT: v_sub_co_u32 v1, s12, v0, s4 -; GFX1032-NEXT: s_sub_i32 s10, s7, s0 +; GFX1032-NEXT: s_mul_hi_u32 s13, s8, s14 +; GFX1032-NEXT: s_mul_i32 s16, s8, s11 +; GFX1032-NEXT: s_mul_hi_u32 s15, s5, s14 +; GFX1032-NEXT: s_mul_i32 s12, s5, s14 +; GFX1032-NEXT: s_mul_hi_u32 s14, s8, s11 +; GFX1032-NEXT: s_add_u32 s13, s13, s16 +; GFX1032-NEXT: s_addc_u32 s14, 0, s14 +; GFX1032-NEXT: s_mul_hi_u32 s17, s5, s11 +; GFX1032-NEXT: s_add_u32 s12, s13, s12 +; GFX1032-NEXT: s_mul_i32 s11, s5, s11 +; GFX1032-NEXT: s_addc_u32 s12, s14, s15 +; GFX1032-NEXT: s_addc_u32 s13, s17, 0 +; GFX1032-NEXT: s_add_u32 s11, s12, s11 +; GFX1032-NEXT: s_addc_u32 s12, 0, s13 +; GFX1032-NEXT: s_add_u32 s8, s8, s11 +; GFX1032-NEXT: s_cselect_b32 s11, -1, 0 +; GFX1032-NEXT: s_mul_hi_u32 s13, s9, s8 ; GFX1032-NEXT: s_cmp_lg_u32 s11, 0 -; GFX1032-NEXT: s_subb_u32 s10, s10, s5 -; GFX1032-NEXT: s_cmp_lg_u32 s12, 0 -; GFX1032-NEXT: v_cmp_le_u32_e32 vcc_lo, s4, v1 -; GFX1032-NEXT: s_subb_u32 s10, s10, 0 -; GFX1032-NEXT: s_cmp_ge_u32 s10, s5 -; GFX1032-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX1032-NEXT: s_mul_i32 s11, s9, s8 +; GFX1032-NEXT: s_addc_u32 s5, s5, s12 +; GFX1032-NEXT: s_mul_i32 s10, s10, s8 +; GFX1032-NEXT: s_mul_i32 s9, s9, s5 +; GFX1032-NEXT: s_mul_hi_u32 s12, s8, s11 +; GFX1032-NEXT: s_add_i32 s9, s13, s9 +; GFX1032-NEXT: s_mul_hi_u32 s13, s5, s11 +; GFX1032-NEXT: s_add_i32 s9, s9, s10 +; GFX1032-NEXT: s_mul_i32 s10, s5, s11 +; GFX1032-NEXT: s_mul_i32 s15, s8, s9 +; GFX1032-NEXT: s_mul_hi_u32 s14, s8, s9 +; GFX1032-NEXT: s_add_u32 s12, s12, s15 +; GFX1032-NEXT: s_addc_u32 s14, 0, s14 +; GFX1032-NEXT: s_mul_hi_u32 s11, s5, s9 +; GFX1032-NEXT: s_add_u32 s10, s12, s10 +; GFX1032-NEXT: s_mul_i32 s9, s5, s9 +; GFX1032-NEXT: s_addc_u32 s10, s14, s13 +; GFX1032-NEXT: s_addc_u32 s11, s11, 0 +; GFX1032-NEXT: s_add_u32 s9, s10, s9 +; GFX1032-NEXT: s_addc_u32 s10, 0, s11 +; GFX1032-NEXT: s_add_u32 s8, s8, s9 +; GFX1032-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1032-NEXT: s_mul_hi_u32 s11, s2, s8 +; GFX1032-NEXT: s_cmp_lg_u32 s9, 0 +; GFX1032-NEXT: s_mul_hi_u32 s9, s3, s8 +; GFX1032-NEXT: s_addc_u32 s5, s5, s10 +; GFX1032-NEXT: s_mul_i32 s8, s3, s8 +; GFX1032-NEXT: s_mul_i32 s12, s2, s5 +; GFX1032-NEXT: s_mul_hi_u32 s10, s2, s5 +; GFX1032-NEXT: s_add_u32 s11, s11, s12 +; GFX1032-NEXT: s_addc_u32 s10, 0, s10 +; GFX1032-NEXT: s_mul_hi_u32 s13, s3, s5 +; GFX1032-NEXT: s_add_u32 s8, s11, s8 +; GFX1032-NEXT: s_mul_i32 s5, s3, s5 +; GFX1032-NEXT: s_addc_u32 s8, s10, s9 +; GFX1032-NEXT: s_addc_u32 s9, s13, 0 +; GFX1032-NEXT: s_add_u32 s5, s8, s5 +; GFX1032-NEXT: s_addc_u32 s8, 0, s9 +; GFX1032-NEXT: s_mul_hi_u32 s9, s0, s5 +; GFX1032-NEXT: s_mul_i32 s10, s0, s8 +; GFX1032-NEXT: s_mul_i32 s11, s1, s5 +; GFX1032-NEXT: s_add_i32 s9, s9, s10 +; GFX1032-NEXT: s_mul_i32 s10, s0, s5 +; GFX1032-NEXT: s_add_i32 s9, s9, s11 +; GFX1032-NEXT: s_sub_i32 s11, s3, s9 +; GFX1032-NEXT: s_sub_u32 s10, s2, s10 ; GFX1032-NEXT: s_cselect_b32 s12, -1, 0 -; GFX1032-NEXT: s_cmp_eq_u32 s10, s5 -; GFX1032-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX1032-NEXT: s_add_u32 s10, s1, 1 -; GFX1032-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX1032-NEXT: s_addc_u32 s12, s9, 0 -; GFX1032-NEXT: s_add_u32 s13, s1, 2 -; GFX1032-NEXT: s_addc_u32 s14, s9, 0 +; GFX1032-NEXT: s_cmp_lg_u32 s12, 0 +; GFX1032-NEXT: s_subb_u32 s11, s11, s1 +; GFX1032-NEXT: s_sub_u32 s13, s10, s0 +; GFX1032-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1032-NEXT: s_cmp_lg_u32 s14, 0 +; GFX1032-NEXT: s_subb_u32 s11, s11, 0 +; GFX1032-NEXT: s_cmp_ge_u32 s11, s1 +; GFX1032-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1032-NEXT: s_cmp_ge_u32 s13, s0 +; GFX1032-NEXT: s_cselect_b32 s13, -1, 0 +; GFX1032-NEXT: s_cmp_eq_u32 s11, s1 +; GFX1032-NEXT: s_cselect_b32 s11, s13, s14 +; GFX1032-NEXT: s_add_u32 s13, s5, 1 +; GFX1032-NEXT: s_addc_u32 s14, s8, 0 +; GFX1032-NEXT: s_add_u32 s15, s5, 2 +; GFX1032-NEXT: s_addc_u32 s16, s8, 0 ; GFX1032-NEXT: s_cmp_lg_u32 s11, 0 -; GFX1032-NEXT: v_cmp_le_u32_e32 vcc_lo, s4, v0 -; GFX1032-NEXT: s_subb_u32 s0, s7, s0 -; GFX1032-NEXT: v_mov_b32_e32 v2, s13 -; GFX1032-NEXT: s_cmp_ge_u32 s0, s5 -; GFX1032-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo -; GFX1032-NEXT: s_cselect_b32 s7, -1, 0 -; GFX1032-NEXT: s_cmp_eq_u32 s0, s5 -; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 -; GFX1032-NEXT: s_cselect_b32 s0, -1, 0 -; GFX1032-NEXT: v_mov_b32_e32 v1, s14 -; GFX1032-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0 -; GFX1032-NEXT: v_cndmask_b32_e32 v2, s10, v2, vcc_lo -; GFX1032-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo -; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 -; GFX1032-NEXT: v_cndmask_b32_e32 v1, s9, v1, vcc_lo -; GFX1032-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo -; GFX1032-NEXT: s_andn2_b32 vcc_lo, exec_lo, s8 +; GFX1032-NEXT: s_cselect_b32 s11, s15, s13 +; GFX1032-NEXT: s_cselect_b32 s13, s16, s14 +; GFX1032-NEXT: s_cmp_lg_u32 s12, 0 +; GFX1032-NEXT: s_subb_u32 s3, s3, s9 +; GFX1032-NEXT: s_cmp_ge_u32 s3, s1 +; GFX1032-NEXT: s_cselect_b32 s9, -1, 0 +; GFX1032-NEXT: s_cmp_ge_u32 s10, s0 +; GFX1032-NEXT: s_cselect_b32 s10, -1, 0 +; GFX1032-NEXT: s_cmp_eq_u32 s3, s1 +; GFX1032-NEXT: s_cselect_b32 s1, s10, s9 +; GFX1032-NEXT: s_cmp_lg_u32 s1, 0 +; GFX1032-NEXT: s_cselect_b32 s9, s13, s8 +; GFX1032-NEXT: s_cselect_b32 s8, s11, s5 +; GFX1032-NEXT: s_andn2_b32 vcc_lo, exec_lo, s4 ; GFX1032-NEXT: s_cbranch_vccnz .LBB15_3 ; GFX1032-NEXT: .LBB15_2: -; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GFX1032-NEXT: s_sub_i32 s1, 0, s4 +; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s0 +; GFX1032-NEXT: s_sub_i32 s3, 0, s0 +; GFX1032-NEXT: s_mov_b32 s9, 0 ; GFX1032-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX1032-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX1032-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1032-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1032-NEXT: s_mul_i32 s1, s1, s0 -; GFX1032-NEXT: s_mul_hi_u32 s1, s0, s1 -; GFX1032-NEXT: s_add_i32 s0, s0, s1 -; GFX1032-NEXT: s_mul_hi_u32 s0, s6, s0 -; GFX1032-NEXT: s_mul_i32 s1, s0, s4 -; GFX1032-NEXT: s_add_i32 s5, s0, 1 -; GFX1032-NEXT: s_sub_i32 s1, s6, s1 -; GFX1032-NEXT: s_sub_i32 s6, s1, s4 -; GFX1032-NEXT: s_cmp_ge_u32 s1, s4 -; GFX1032-NEXT: s_cselect_b32 s0, s5, s0 -; GFX1032-NEXT: s_cselect_b32 s1, s6, s1 -; GFX1032-NEXT: s_add_i32 s5, s0, 1 -; GFX1032-NEXT: s_cmp_ge_u32 s1, s4 -; GFX1032-NEXT: s_mov_b32 s1, 0 -; GFX1032-NEXT: s_cselect_b32 s0, s5, s0 -; GFX1032-NEXT: v_mov_b32_e32 v0, s0 -; GFX1032-NEXT: v_mov_b32_e32 v1, s1 +; GFX1032-NEXT: v_readfirstlane_b32 s1, v0 +; GFX1032-NEXT: s_mul_i32 s3, s3, s1 +; GFX1032-NEXT: s_mul_hi_u32 s3, s1, s3 +; GFX1032-NEXT: s_add_i32 s1, s1, s3 +; GFX1032-NEXT: s_mul_hi_u32 s1, s2, s1 +; GFX1032-NEXT: s_mul_i32 s3, s1, s0 +; GFX1032-NEXT: s_sub_i32 s2, s2, s3 +; GFX1032-NEXT: s_add_i32 s3, s1, 1 +; GFX1032-NEXT: s_sub_i32 s4, s2, s0 +; GFX1032-NEXT: s_cmp_ge_u32 s2, s0 +; GFX1032-NEXT: s_cselect_b32 s1, s3, s1 +; GFX1032-NEXT: s_cselect_b32 s2, s4, s2 +; GFX1032-NEXT: s_add_i32 s3, s1, 1 +; GFX1032-NEXT: s_cmp_ge_u32 s2, s0 +; GFX1032-NEXT: s_cselect_b32 s8, s3, s1 ; GFX1032-NEXT: .LBB15_3: +; GFX1032-NEXT: v_mov_b32_e32 v0, s8 ; GFX1032-NEXT: v_mov_b32_e32 v2, 0 -; GFX1032-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] offset:16 +; GFX1032-NEXT: v_mov_b32_e32 v1, s9 +; GFX1032-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] offset:16 ; GFX1032-NEXT: s_endpgm ; GFX1032-NEXT: .LBB15_4: -; GFX1032-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1032-NEXT: ; implicit-def: $sgpr8_sgpr9 ; GFX1032-NEXT: s_branch .LBB15_2 ; ; GFX1064-LABEL: test_udiv64: ; GFX1064: ; %bb.0: ; %bb -; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24 +; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 ; GFX1064-NEXT: s_waitcnt lgkmcnt(0) -; GFX1064-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0 +; GFX1064-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0 ; GFX1064-NEXT: s_waitcnt lgkmcnt(0) -; GFX1064-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5] -; GFX1064-NEXT: s_mov_b32 s0, 0 -; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1064-NEXT: s_or_b64 s[4:5], s[2:3], s[0:1] +; GFX1064-NEXT: s_mov_b32 s4, 0 +; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX1064-NEXT: s_cbranch_scc0 .LBB15_4 ; GFX1064-NEXT: ; %bb.1: -; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GFX1064-NEXT: v_cvt_f32_u32_e32 v1, s5 -; GFX1064-NEXT: s_sub_u32 s9, 0, s4 -; GFX1064-NEXT: s_subb_u32 s10, 0, s5 +; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s0 +; GFX1064-NEXT: v_cvt_f32_u32_e32 v1, s1 +; GFX1064-NEXT: s_sub_u32 s9, 0, s0 +; GFX1064-NEXT: s_subb_u32 s10, 0, s1 ; GFX1064-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0 ; GFX1064-NEXT: v_rcp_f32_e32 v0, v0 ; GFX1064-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0 @@ -914,141 +912,139 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 { ; GFX1064-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX1064-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX1064-NEXT: v_readfirstlane_b32 s8, v1 -; GFX1064-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1064-NEXT: s_mul_i32 s1, s9, s8 -; GFX1064-NEXT: s_mul_hi_u32 s12, s9, s0 -; GFX1064-NEXT: s_mul_i32 s11, s10, s0 -; GFX1064-NEXT: s_add_i32 s1, s12, s1 -; GFX1064-NEXT: s_mul_i32 s13, s9, s0 -; GFX1064-NEXT: s_add_i32 s1, s1, s11 -; GFX1064-NEXT: s_mul_hi_u32 s12, s0, s13 +; GFX1064-NEXT: v_readfirstlane_b32 s4, v0 +; GFX1064-NEXT: s_mul_i32 s5, s9, s8 +; GFX1064-NEXT: s_mul_hi_u32 s12, s9, s4 +; GFX1064-NEXT: s_mul_i32 s11, s10, s4 +; GFX1064-NEXT: s_add_i32 s5, s12, s5 +; GFX1064-NEXT: s_mul_i32 s13, s9, s4 +; GFX1064-NEXT: s_add_i32 s5, s5, s11 +; GFX1064-NEXT: s_mul_hi_u32 s12, s4, s13 +; GFX1064-NEXT: s_mul_i32 s15, s4, s5 ; GFX1064-NEXT: s_mul_hi_u32 s14, s8, s13 ; GFX1064-NEXT: s_mul_i32 s11, s8, s13 -; GFX1064-NEXT: s_mul_hi_u32 s13, s0, s1 -; GFX1064-NEXT: s_mul_i32 s0, s0, s1 -; GFX1064-NEXT: s_mul_hi_u32 s15, s8, s1 -; GFX1064-NEXT: s_add_u32 s0, s12, s0 -; GFX1064-NEXT: s_addc_u32 s12, 0, s13 -; GFX1064-NEXT: s_add_u32 s0, s0, s11 -; GFX1064-NEXT: s_mul_i32 s1, s8, s1 -; GFX1064-NEXT: s_addc_u32 s0, s12, s14 -; GFX1064-NEXT: s_addc_u32 s11, s15, 0 -; GFX1064-NEXT: s_add_u32 s0, s0, s1 -; GFX1064-NEXT: s_addc_u32 s11, 0, s11 -; GFX1064-NEXT: v_add_co_u32 v0, s[0:1], v0, s0 -; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1064-NEXT: s_mul_hi_u32 s13, s4, s5 +; GFX1064-NEXT: s_add_u32 s12, s12, s15 +; GFX1064-NEXT: s_addc_u32 s13, 0, s13 +; GFX1064-NEXT: s_mul_hi_u32 s16, s8, s5 +; GFX1064-NEXT: s_add_u32 s11, s12, s11 +; GFX1064-NEXT: s_mul_i32 s5, s8, s5 +; GFX1064-NEXT: s_addc_u32 s11, s13, s14 +; GFX1064-NEXT: s_addc_u32 s12, s16, 0 +; GFX1064-NEXT: s_add_u32 s5, s11, s5 +; GFX1064-NEXT: s_addc_u32 s11, 0, s12 +; GFX1064-NEXT: s_add_u32 s12, s4, s5 +; GFX1064-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX1064-NEXT: s_mul_hi_u32 s13, s9, s12 +; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064-NEXT: s_mul_i32 s4, s9, s12 ; GFX1064-NEXT: s_addc_u32 s8, s8, s11 -; GFX1064-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1064-NEXT: s_mul_i32 s1, s9, s8 -; GFX1064-NEXT: s_mul_hi_u32 s11, s9, s0 -; GFX1064-NEXT: s_mul_i32 s10, s10, s0 -; GFX1064-NEXT: s_add_i32 s1, s11, s1 -; GFX1064-NEXT: s_mul_i32 s9, s9, s0 -; GFX1064-NEXT: s_add_i32 s1, s1, s10 -; GFX1064-NEXT: s_mul_hi_u32 s11, s8, s9 -; GFX1064-NEXT: s_mul_i32 s12, s8, s9 -; GFX1064-NEXT: s_mul_hi_u32 s9, s0, s9 -; GFX1064-NEXT: s_mul_hi_u32 s13, s0, s1 -; GFX1064-NEXT: s_mul_i32 s0, s0, s1 -; GFX1064-NEXT: s_mul_hi_u32 s10, s8, s1 -; GFX1064-NEXT: s_add_u32 s0, s9, s0 -; GFX1064-NEXT: s_addc_u32 s9, 0, s13 -; GFX1064-NEXT: s_add_u32 s0, s0, s12 -; GFX1064-NEXT: s_mul_i32 s1, s8, s1 -; GFX1064-NEXT: s_addc_u32 s0, s9, s11 -; GFX1064-NEXT: s_addc_u32 s9, s10, 0 -; GFX1064-NEXT: s_add_u32 s0, s0, s1 +; GFX1064-NEXT: s_mul_i32 s10, s10, s12 +; GFX1064-NEXT: s_mul_i32 s9, s9, s8 +; GFX1064-NEXT: s_mul_hi_u32 s5, s12, s4 +; GFX1064-NEXT: s_add_i32 s9, s13, s9 +; GFX1064-NEXT: s_mul_hi_u32 s11, s8, s4 +; GFX1064-NEXT: s_add_i32 s9, s9, s10 +; GFX1064-NEXT: s_mul_i32 s4, s8, s4 +; GFX1064-NEXT: s_mul_i32 s14, s12, s9 +; GFX1064-NEXT: s_mul_hi_u32 s13, s12, s9 +; GFX1064-NEXT: s_add_u32 s5, s5, s14 +; GFX1064-NEXT: s_addc_u32 s13, 0, s13 +; GFX1064-NEXT: s_mul_hi_u32 s10, s8, s9 +; GFX1064-NEXT: s_add_u32 s4, s5, s4 +; GFX1064-NEXT: s_mul_i32 s9, s8, s9 +; GFX1064-NEXT: s_addc_u32 s4, s13, s11 +; GFX1064-NEXT: s_addc_u32 s5, s10, 0 +; GFX1064-NEXT: s_add_u32 s4, s4, s9 +; GFX1064-NEXT: s_addc_u32 s9, 0, s5 +; GFX1064-NEXT: s_add_u32 s10, s12, s4 +; GFX1064-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX1064-NEXT: s_mul_hi_u32 s11, s2, s10 +; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064-NEXT: s_mul_hi_u32 s4, s3, s10 +; GFX1064-NEXT: s_addc_u32 s5, s8, s9 +; GFX1064-NEXT: s_mul_i32 s8, s3, s10 +; GFX1064-NEXT: s_mul_i32 s10, s2, s5 +; GFX1064-NEXT: s_mul_hi_u32 s9, s2, s5 +; GFX1064-NEXT: s_add_u32 s10, s11, s10 ; GFX1064-NEXT: s_addc_u32 s9, 0, s9 -; GFX1064-NEXT: v_add_co_u32 v0, s[0:1], v0, s0 -; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1064-NEXT: s_addc_u32 s0, s8, s9 -; GFX1064-NEXT: v_readfirstlane_b32 s1, v0 -; GFX1064-NEXT: s_mul_i32 s9, s6, s0 -; GFX1064-NEXT: s_mul_hi_u32 s8, s6, s0 -; GFX1064-NEXT: s_mul_hi_u32 s10, s7, s0 -; GFX1064-NEXT: s_mul_i32 s0, s7, s0 -; GFX1064-NEXT: s_mul_hi_u32 s11, s6, s1 -; GFX1064-NEXT: s_mul_hi_u32 s12, s7, s1 -; GFX1064-NEXT: s_mul_i32 s1, s7, s1 -; GFX1064-NEXT: s_add_u32 s9, s11, s9 -; GFX1064-NEXT: s_addc_u32 s8, 0, s8 -; GFX1064-NEXT: s_add_u32 s1, s9, s1 -; GFX1064-NEXT: s_addc_u32 s1, s8, s12 -; GFX1064-NEXT: s_addc_u32 s8, s10, 0 -; GFX1064-NEXT: s_add_u32 s10, s1, s0 +; GFX1064-NEXT: s_mul_hi_u32 s12, s3, s5 +; GFX1064-NEXT: s_add_u32 s8, s10, s8 +; GFX1064-NEXT: s_mul_i32 s5, s3, s5 +; GFX1064-NEXT: s_addc_u32 s4, s9, s4 +; GFX1064-NEXT: s_addc_u32 s8, s12, 0 +; GFX1064-NEXT: s_add_u32 s10, s4, s5 ; GFX1064-NEXT: s_addc_u32 s11, 0, s8 -; GFX1064-NEXT: s_mul_hi_u32 s0, s4, s10 -; GFX1064-NEXT: s_mul_i32 s1, s4, s11 -; GFX1064-NEXT: s_mul_i32 s9, s4, s10 -; GFX1064-NEXT: s_add_i32 s12, s0, s1 -; GFX1064-NEXT: v_sub_co_u32 v0, s[0:1], s6, s9 -; GFX1064-NEXT: s_mul_i32 s8, s5, s10 -; GFX1064-NEXT: s_add_i32 s12, s12, s8 -; GFX1064-NEXT: v_sub_co_u32 v1, s[8:9], v0, s4 -; GFX1064-NEXT: s_sub_i32 s13, s7, s12 -; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1064-NEXT: s_subb_u32 s13, s13, s5 +; GFX1064-NEXT: s_mul_hi_u32 s4, s0, s10 +; GFX1064-NEXT: s_mul_i32 s5, s0, s11 +; GFX1064-NEXT: s_mul_i32 s8, s1, s10 +; GFX1064-NEXT: s_add_i32 s4, s4, s5 +; GFX1064-NEXT: s_add_i32 s12, s4, s8 +; GFX1064-NEXT: s_mul_i32 s4, s0, s10 +; GFX1064-NEXT: s_sub_i32 s8, s3, s12 +; GFX1064-NEXT: s_sub_u32 s13, s2, s4 +; GFX1064-NEXT: s_cselect_b64 s[4:5], -1, 0 +; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064-NEXT: s_subb_u32 s14, s8, s1 +; GFX1064-NEXT: s_sub_u32 s15, s13, s0 +; GFX1064-NEXT: s_cselect_b64 s[8:9], -1, 0 ; GFX1064-NEXT: s_cmp_lg_u64 s[8:9], 0 -; GFX1064-NEXT: v_cmp_le_u32_e32 vcc, s4, v1 -; GFX1064-NEXT: s_subb_u32 s8, s13, 0 -; GFX1064-NEXT: s_cmp_ge_u32 s8, s5 -; GFX1064-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc +; GFX1064-NEXT: s_subb_u32 s8, s14, 0 +; GFX1064-NEXT: s_cmp_ge_u32 s8, s1 ; GFX1064-NEXT: s_cselect_b32 s9, -1, 0 -; GFX1064-NEXT: s_cmp_eq_u32 s8, s5 -; GFX1064-NEXT: s_cselect_b64 vcc, -1, 0 -; GFX1064-NEXT: s_add_u32 s8, s10, 1 -; GFX1064-NEXT: v_cndmask_b32_e32 v1, s9, v1, vcc -; GFX1064-NEXT: s_addc_u32 s9, s11, 0 -; GFX1064-NEXT: s_add_u32 s13, s10, 2 +; GFX1064-NEXT: s_cmp_ge_u32 s15, s0 +; GFX1064-NEXT: s_cselect_b32 s14, -1, 0 +; GFX1064-NEXT: s_cmp_eq_u32 s8, s1 +; GFX1064-NEXT: s_cselect_b32 s8, s14, s9 +; GFX1064-NEXT: s_add_u32 s9, s10, 1 ; GFX1064-NEXT: s_addc_u32 s14, s11, 0 -; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0 -; GFX1064-NEXT: v_cmp_le_u32_e32 vcc, s4, v0 -; GFX1064-NEXT: s_subb_u32 s0, s7, s12 -; GFX1064-NEXT: v_mov_b32_e32 v2, s13 -; GFX1064-NEXT: s_cmp_ge_u32 s0, s5 -; GFX1064-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; GFX1064-NEXT: s_cselect_b32 s7, -1, 0 -; GFX1064-NEXT: s_cmp_eq_u32 s0, s5 -; GFX1064-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1 -; GFX1064-NEXT: s_cselect_b64 s[0:1], -1, 0 -; GFX1064-NEXT: v_mov_b32_e32 v1, s14 -; GFX1064-NEXT: v_cndmask_b32_e64 v0, s7, v0, s[0:1] -; GFX1064-NEXT: v_cndmask_b32_e32 v2, s8, v2, vcc -; GFX1064-NEXT: v_cndmask_b32_e32 v1, s9, v1, vcc -; GFX1064-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GFX1064-NEXT: v_cndmask_b32_e32 v1, s11, v1, vcc -; GFX1064-NEXT: v_cndmask_b32_e32 v0, s10, v2, vcc +; GFX1064-NEXT: s_add_u32 s15, s10, 2 +; GFX1064-NEXT: s_addc_u32 s16, s11, 0 +; GFX1064-NEXT: s_cmp_lg_u32 s8, 0 +; GFX1064-NEXT: s_cselect_b32 s15, s15, s9 +; GFX1064-NEXT: s_cselect_b32 s14, s16, s14 +; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064-NEXT: s_subb_u32 s3, s3, s12 +; GFX1064-NEXT: s_cmp_ge_u32 s3, s1 +; GFX1064-NEXT: s_cselect_b32 s4, -1, 0 +; GFX1064-NEXT: s_cmp_ge_u32 s13, s0 +; GFX1064-NEXT: s_cselect_b32 s5, -1, 0 +; GFX1064-NEXT: s_cmp_eq_u32 s3, s1 +; GFX1064-NEXT: s_cselect_b32 s1, s5, s4 +; GFX1064-NEXT: s_cmp_lg_u32 s1, 0 +; GFX1064-NEXT: s_cselect_b32 s5, s14, s11 +; GFX1064-NEXT: s_cselect_b32 s4, s15, s10 ; GFX1064-NEXT: s_cbranch_execnz .LBB15_3 ; GFX1064-NEXT: .LBB15_2: -; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GFX1064-NEXT: s_sub_i32 s1, 0, s4 +; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s0 +; GFX1064-NEXT: s_sub_i32 s3, 0, s0 +; GFX1064-NEXT: s_mov_b32 s5, 0 ; GFX1064-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX1064-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX1064-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX1064-NEXT: v_readfirstlane_b32 s0, v0 -; GFX1064-NEXT: s_mul_i32 s1, s1, s0 -; GFX1064-NEXT: s_mul_hi_u32 s1, s0, s1 -; GFX1064-NEXT: s_add_i32 s0, s0, s1 -; GFX1064-NEXT: s_mul_hi_u32 s0, s6, s0 -; GFX1064-NEXT: s_mul_i32 s1, s0, s4 -; GFX1064-NEXT: s_add_i32 s5, s0, 1 -; GFX1064-NEXT: s_sub_i32 s1, s6, s1 -; GFX1064-NEXT: s_sub_i32 s6, s1, s4 -; GFX1064-NEXT: s_cmp_ge_u32 s1, s4 -; GFX1064-NEXT: s_cselect_b32 s0, s5, s0 -; GFX1064-NEXT: s_cselect_b32 s1, s6, s1 -; GFX1064-NEXT: s_add_i32 s5, s0, 1 -; GFX1064-NEXT: s_cmp_ge_u32 s1, s4 -; GFX1064-NEXT: s_mov_b32 s1, 0 -; GFX1064-NEXT: s_cselect_b32 s0, s5, s0 -; GFX1064-NEXT: v_mov_b32_e32 v0, s0 -; GFX1064-NEXT: v_mov_b32_e32 v1, s1 +; GFX1064-NEXT: v_readfirstlane_b32 s1, v0 +; GFX1064-NEXT: s_mul_i32 s3, s3, s1 +; GFX1064-NEXT: s_mul_hi_u32 s3, s1, s3 +; GFX1064-NEXT: s_add_i32 s1, s1, s3 +; GFX1064-NEXT: s_mul_hi_u32 s1, s2, s1 +; GFX1064-NEXT: s_mul_i32 s3, s1, s0 +; GFX1064-NEXT: s_sub_i32 s2, s2, s3 +; GFX1064-NEXT: s_add_i32 s3, s1, 1 +; GFX1064-NEXT: s_sub_i32 s4, s2, s0 +; GFX1064-NEXT: s_cmp_ge_u32 s2, s0 +; GFX1064-NEXT: s_cselect_b32 s1, s3, s1 +; GFX1064-NEXT: s_cselect_b32 s2, s4, s2 +; GFX1064-NEXT: s_add_i32 s3, s1, 1 +; GFX1064-NEXT: s_cmp_ge_u32 s2, s0 +; GFX1064-NEXT: s_cselect_b32 s4, s3, s1 ; GFX1064-NEXT: .LBB15_3: +; GFX1064-NEXT: v_mov_b32_e32 v0, s4 ; GFX1064-NEXT: v_mov_b32_e32 v2, 0 -; GFX1064-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] offset:16 +; GFX1064-NEXT: v_mov_b32_e32 v1, s5 +; GFX1064-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] offset:16 ; GFX1064-NEXT: s_endpgm ; GFX1064-NEXT: .LBB15_4: -; GFX1064-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1064-NEXT: ; implicit-def: $sgpr4_sgpr5 ; GFX1064-NEXT: s_branch .LBB15_2 bb: %tmp = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 1 diff --git a/llvm/test/CodeGen/AMDGPU/whole-wave-functions.ll b/llvm/test/CodeGen/AMDGPU/whole-wave-functions.ll index b17050178c306..e3437fded0429 100644 --- a/llvm/test/CodeGen/AMDGPU/whole-wave-functions.ll +++ b/llvm/test/CodeGen/AMDGPU/whole-wave-functions.ll @@ -4537,6 +4537,3152 @@ define amdgpu_gfx_whole_wave <2 x half> @call_gfx_from_whole_wave(i1 %active, <2 ret <2 x half> %ret } +define amdgpu_gfx_whole_wave <2 x half> @tail_call_gfx_from_whole_wave(i1 %active, <2 x half> %x, <2 x half> %y) { + ; This should not be turned into a tail call. +; DAGISEL-LABEL: tail_call_gfx_from_whole_wave: +; DAGISEL: ; %bb.0: +; DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; DAGISEL-NEXT: s_wait_expcnt 0x0 +; DAGISEL-NEXT: s_wait_samplecnt 0x0 +; DAGISEL-NEXT: s_wait_bvhcnt 0x0 +; DAGISEL-NEXT: s_wait_kmcnt 0x0 +; DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_store_b32 off, v0, s32 +; DAGISEL-NEXT: scratch_store_b32 off, v1, s32 offset:4 +; DAGISEL-NEXT: scratch_store_b32 off, v2, s32 offset:8 +; DAGISEL-NEXT: scratch_store_b32 off, v3, s32 offset:12 +; DAGISEL-NEXT: scratch_store_b32 off, v4, s32 offset:16 +; DAGISEL-NEXT: scratch_store_b32 off, v5, s32 offset:20 +; DAGISEL-NEXT: scratch_store_b32 off, v6, s32 offset:24 +; DAGISEL-NEXT: scratch_store_b32 off, v7, s32 offset:28 +; DAGISEL-NEXT: scratch_store_b32 off, v8, s32 offset:32 +; DAGISEL-NEXT: scratch_store_b32 off, v9, s32 offset:36 +; DAGISEL-NEXT: scratch_store_b32 off, v10, s32 offset:40 +; DAGISEL-NEXT: scratch_store_b32 off, v11, s32 offset:44 +; DAGISEL-NEXT: scratch_store_b32 off, v12, s32 offset:48 +; DAGISEL-NEXT: scratch_store_b32 off, v13, s32 offset:52 +; DAGISEL-NEXT: scratch_store_b32 off, v14, s32 offset:56 +; DAGISEL-NEXT: scratch_store_b32 off, v15, s32 offset:60 +; DAGISEL-NEXT: scratch_store_b32 off, v16, s32 offset:64 +; DAGISEL-NEXT: scratch_store_b32 off, v17, s32 offset:68 +; DAGISEL-NEXT: scratch_store_b32 off, v18, s32 offset:72 +; DAGISEL-NEXT: scratch_store_b32 off, v19, s32 offset:76 +; DAGISEL-NEXT: scratch_store_b32 off, v20, s32 offset:80 +; DAGISEL-NEXT: scratch_store_b32 off, v21, s32 offset:84 +; DAGISEL-NEXT: scratch_store_b32 off, v22, s32 offset:88 +; DAGISEL-NEXT: scratch_store_b32 off, v23, s32 offset:92 +; DAGISEL-NEXT: scratch_store_b32 off, v24, s32 offset:96 +; DAGISEL-NEXT: scratch_store_b32 off, v25, s32 offset:100 +; DAGISEL-NEXT: scratch_store_b32 off, v26, s32 offset:104 +; DAGISEL-NEXT: scratch_store_b32 off, v27, s32 offset:108 +; DAGISEL-NEXT: scratch_store_b32 off, v28, s32 offset:112 +; DAGISEL-NEXT: scratch_store_b32 off, v29, s32 offset:116 +; DAGISEL-NEXT: scratch_store_b32 off, v30, s32 offset:120 +; DAGISEL-NEXT: scratch_store_b32 off, v31, s32 offset:124 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_store_b32 off, v32, s32 offset:128 +; DAGISEL-NEXT: scratch_store_b32 off, v33, s32 offset:132 +; DAGISEL-NEXT: scratch_store_b32 off, v34, s32 offset:136 +; DAGISEL-NEXT: scratch_store_b32 off, v35, s32 offset:140 +; DAGISEL-NEXT: scratch_store_b32 off, v36, s32 offset:144 +; DAGISEL-NEXT: scratch_store_b32 off, v37, s32 offset:148 +; DAGISEL-NEXT: scratch_store_b32 off, v38, s32 offset:152 +; DAGISEL-NEXT: scratch_store_b32 off, v39, s32 offset:156 +; DAGISEL-NEXT: scratch_store_b32 off, v48, s32 offset:160 +; DAGISEL-NEXT: scratch_store_b32 off, v49, s32 offset:164 +; DAGISEL-NEXT: scratch_store_b32 off, v50, s32 offset:168 +; DAGISEL-NEXT: scratch_store_b32 off, v51, s32 offset:172 +; DAGISEL-NEXT: scratch_store_b32 off, v52, s32 offset:176 +; DAGISEL-NEXT: scratch_store_b32 off, v53, s32 offset:180 +; DAGISEL-NEXT: scratch_store_b32 off, v54, s32 offset:184 +; DAGISEL-NEXT: scratch_store_b32 off, v55, s32 offset:188 +; DAGISEL-NEXT: scratch_store_b32 off, v64, s32 offset:192 +; DAGISEL-NEXT: scratch_store_b32 off, v65, s32 offset:196 +; DAGISEL-NEXT: scratch_store_b32 off, v66, s32 offset:200 +; DAGISEL-NEXT: scratch_store_b32 off, v67, s32 offset:204 +; DAGISEL-NEXT: scratch_store_b32 off, v68, s32 offset:208 +; DAGISEL-NEXT: scratch_store_b32 off, v69, s32 offset:212 +; DAGISEL-NEXT: scratch_store_b32 off, v70, s32 offset:216 +; DAGISEL-NEXT: scratch_store_b32 off, v71, s32 offset:220 +; DAGISEL-NEXT: scratch_store_b32 off, v80, s32 offset:224 +; DAGISEL-NEXT: scratch_store_b32 off, v81, s32 offset:228 +; DAGISEL-NEXT: scratch_store_b32 off, v82, s32 offset:232 +; DAGISEL-NEXT: scratch_store_b32 off, v83, s32 offset:236 +; DAGISEL-NEXT: scratch_store_b32 off, v84, s32 offset:240 +; DAGISEL-NEXT: scratch_store_b32 off, v85, s32 offset:244 +; DAGISEL-NEXT: scratch_store_b32 off, v86, s32 offset:248 +; DAGISEL-NEXT: scratch_store_b32 off, v87, s32 offset:252 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_store_b32 off, v96, s32 offset:256 +; DAGISEL-NEXT: scratch_store_b32 off, v97, s32 offset:260 +; DAGISEL-NEXT: scratch_store_b32 off, v98, s32 offset:264 +; DAGISEL-NEXT: scratch_store_b32 off, v99, s32 offset:268 +; DAGISEL-NEXT: scratch_store_b32 off, v100, s32 offset:272 +; DAGISEL-NEXT: scratch_store_b32 off, v101, s32 offset:276 +; DAGISEL-NEXT: scratch_store_b32 off, v102, s32 offset:280 +; DAGISEL-NEXT: scratch_store_b32 off, v103, s32 offset:284 +; DAGISEL-NEXT: scratch_store_b32 off, v112, s32 offset:288 +; DAGISEL-NEXT: scratch_store_b32 off, v113, s32 offset:292 +; DAGISEL-NEXT: scratch_store_b32 off, v114, s32 offset:296 +; DAGISEL-NEXT: scratch_store_b32 off, v115, s32 offset:300 +; DAGISEL-NEXT: scratch_store_b32 off, v116, s32 offset:304 +; DAGISEL-NEXT: scratch_store_b32 off, v117, s32 offset:308 +; DAGISEL-NEXT: scratch_store_b32 off, v118, s32 offset:312 +; DAGISEL-NEXT: scratch_store_b32 off, v119, s32 offset:316 +; DAGISEL-NEXT: scratch_store_b32 off, v128, s32 offset:320 +; DAGISEL-NEXT: scratch_store_b32 off, v129, s32 offset:324 +; DAGISEL-NEXT: scratch_store_b32 off, v130, s32 offset:328 +; DAGISEL-NEXT: scratch_store_b32 off, v131, s32 offset:332 +; DAGISEL-NEXT: scratch_store_b32 off, v132, s32 offset:336 +; DAGISEL-NEXT: scratch_store_b32 off, v133, s32 offset:340 +; DAGISEL-NEXT: scratch_store_b32 off, v134, s32 offset:344 +; DAGISEL-NEXT: scratch_store_b32 off, v135, s32 offset:348 +; DAGISEL-NEXT: scratch_store_b32 off, v144, s32 offset:352 +; DAGISEL-NEXT: scratch_store_b32 off, v145, s32 offset:356 +; DAGISEL-NEXT: scratch_store_b32 off, v146, s32 offset:360 +; DAGISEL-NEXT: scratch_store_b32 off, v147, s32 offset:364 +; DAGISEL-NEXT: scratch_store_b32 off, v148, s32 offset:368 +; DAGISEL-NEXT: scratch_store_b32 off, v149, s32 offset:372 +; DAGISEL-NEXT: scratch_store_b32 off, v150, s32 offset:376 +; DAGISEL-NEXT: scratch_store_b32 off, v151, s32 offset:380 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_store_b32 off, v160, s32 offset:384 +; DAGISEL-NEXT: scratch_store_b32 off, v161, s32 offset:388 +; DAGISEL-NEXT: scratch_store_b32 off, v162, s32 offset:392 +; DAGISEL-NEXT: scratch_store_b32 off, v163, s32 offset:396 +; DAGISEL-NEXT: scratch_store_b32 off, v164, s32 offset:400 +; DAGISEL-NEXT: scratch_store_b32 off, v165, s32 offset:404 +; DAGISEL-NEXT: scratch_store_b32 off, v166, s32 offset:408 +; DAGISEL-NEXT: scratch_store_b32 off, v167, s32 offset:412 +; DAGISEL-NEXT: scratch_store_b32 off, v176, s32 offset:416 +; DAGISEL-NEXT: scratch_store_b32 off, v177, s32 offset:420 +; DAGISEL-NEXT: scratch_store_b32 off, v178, s32 offset:424 +; DAGISEL-NEXT: scratch_store_b32 off, v179, s32 offset:428 +; DAGISEL-NEXT: scratch_store_b32 off, v180, s32 offset:432 +; DAGISEL-NEXT: scratch_store_b32 off, v181, s32 offset:436 +; DAGISEL-NEXT: scratch_store_b32 off, v182, s32 offset:440 +; DAGISEL-NEXT: scratch_store_b32 off, v183, s32 offset:444 +; DAGISEL-NEXT: scratch_store_b32 off, v192, s32 offset:448 +; DAGISEL-NEXT: scratch_store_b32 off, v193, s32 offset:452 +; DAGISEL-NEXT: scratch_store_b32 off, v194, s32 offset:456 +; DAGISEL-NEXT: scratch_store_b32 off, v195, s32 offset:460 +; DAGISEL-NEXT: scratch_store_b32 off, v196, s32 offset:464 +; DAGISEL-NEXT: scratch_store_b32 off, v197, s32 offset:468 +; DAGISEL-NEXT: scratch_store_b32 off, v198, s32 offset:472 +; DAGISEL-NEXT: scratch_store_b32 off, v199, s32 offset:476 +; DAGISEL-NEXT: scratch_store_b32 off, v208, s32 offset:480 +; DAGISEL-NEXT: scratch_store_b32 off, v209, s32 offset:484 +; DAGISEL-NEXT: scratch_store_b32 off, v210, s32 offset:488 +; DAGISEL-NEXT: scratch_store_b32 off, v211, s32 offset:492 +; DAGISEL-NEXT: scratch_store_b32 off, v212, s32 offset:496 +; DAGISEL-NEXT: scratch_store_b32 off, v213, s32 offset:500 +; DAGISEL-NEXT: scratch_store_b32 off, v214, s32 offset:504 +; DAGISEL-NEXT: scratch_store_b32 off, v215, s32 offset:508 +; DAGISEL-NEXT: s_clause 0xf +; DAGISEL-NEXT: scratch_store_b32 off, v224, s32 offset:512 +; DAGISEL-NEXT: scratch_store_b32 off, v225, s32 offset:516 +; DAGISEL-NEXT: scratch_store_b32 off, v226, s32 offset:520 +; DAGISEL-NEXT: scratch_store_b32 off, v227, s32 offset:524 +; DAGISEL-NEXT: scratch_store_b32 off, v228, s32 offset:528 +; DAGISEL-NEXT: scratch_store_b32 off, v229, s32 offset:532 +; DAGISEL-NEXT: scratch_store_b32 off, v230, s32 offset:536 +; DAGISEL-NEXT: scratch_store_b32 off, v231, s32 offset:540 +; DAGISEL-NEXT: scratch_store_b32 off, v240, s32 offset:544 +; DAGISEL-NEXT: scratch_store_b32 off, v241, s32 offset:548 +; DAGISEL-NEXT: scratch_store_b32 off, v242, s32 offset:552 +; DAGISEL-NEXT: scratch_store_b32 off, v243, s32 offset:556 +; DAGISEL-NEXT: scratch_store_b32 off, v244, s32 offset:560 +; DAGISEL-NEXT: scratch_store_b32 off, v245, s32 offset:564 +; DAGISEL-NEXT: scratch_store_b32 off, v246, s32 offset:568 +; DAGISEL-NEXT: scratch_store_b32 off, v247, s32 offset:572 +; DAGISEL-NEXT: s_mov_b32 exec_lo, -1 +; DAGISEL-NEXT: v_mov_b32_e32 v2, v0 +; DAGISEL-NEXT: s_mov_b32 s37, gfx_callee@abs32@hi +; DAGISEL-NEXT: s_mov_b32 s36, gfx_callee@abs32@lo +; DAGISEL-NEXT: v_swap_b32 v0, v1 +; DAGISEL-NEXT: s_wait_alu 0xfffe +; DAGISEL-NEXT: s_xor_b32 exec_lo, s0, -1 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_load_b32 v0, off, s32 +; DAGISEL-NEXT: scratch_load_b32 v1, off, s32 offset:4 +; DAGISEL-NEXT: scratch_load_b32 v2, off, s32 offset:8 +; DAGISEL-NEXT: scratch_load_b32 v3, off, s32 offset:12 +; DAGISEL-NEXT: scratch_load_b32 v4, off, s32 offset:16 +; DAGISEL-NEXT: scratch_load_b32 v5, off, s32 offset:20 +; DAGISEL-NEXT: scratch_load_b32 v6, off, s32 offset:24 +; DAGISEL-NEXT: scratch_load_b32 v7, off, s32 offset:28 +; DAGISEL-NEXT: scratch_load_b32 v8, off, s32 offset:32 +; DAGISEL-NEXT: scratch_load_b32 v9, off, s32 offset:36 +; DAGISEL-NEXT: scratch_load_b32 v10, off, s32 offset:40 +; DAGISEL-NEXT: scratch_load_b32 v11, off, s32 offset:44 +; DAGISEL-NEXT: scratch_load_b32 v12, off, s32 offset:48 +; DAGISEL-NEXT: scratch_load_b32 v13, off, s32 offset:52 +; DAGISEL-NEXT: scratch_load_b32 v14, off, s32 offset:56 +; DAGISEL-NEXT: scratch_load_b32 v15, off, s32 offset:60 +; DAGISEL-NEXT: scratch_load_b32 v16, off, s32 offset:64 +; DAGISEL-NEXT: scratch_load_b32 v17, off, s32 offset:68 +; DAGISEL-NEXT: scratch_load_b32 v18, off, s32 offset:72 +; DAGISEL-NEXT: scratch_load_b32 v19, off, s32 offset:76 +; DAGISEL-NEXT: scratch_load_b32 v20, off, s32 offset:80 +; DAGISEL-NEXT: scratch_load_b32 v21, off, s32 offset:84 +; DAGISEL-NEXT: scratch_load_b32 v22, off, s32 offset:88 +; DAGISEL-NEXT: scratch_load_b32 v23, off, s32 offset:92 +; DAGISEL-NEXT: scratch_load_b32 v24, off, s32 offset:96 +; DAGISEL-NEXT: scratch_load_b32 v25, off, s32 offset:100 +; DAGISEL-NEXT: scratch_load_b32 v26, off, s32 offset:104 +; DAGISEL-NEXT: scratch_load_b32 v27, off, s32 offset:108 +; DAGISEL-NEXT: scratch_load_b32 v28, off, s32 offset:112 +; DAGISEL-NEXT: scratch_load_b32 v29, off, s32 offset:116 +; DAGISEL-NEXT: scratch_load_b32 v30, off, s32 offset:120 +; DAGISEL-NEXT: scratch_load_b32 v31, off, s32 offset:124 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_load_b32 v32, off, s32 offset:128 +; DAGISEL-NEXT: scratch_load_b32 v33, off, s32 offset:132 +; DAGISEL-NEXT: scratch_load_b32 v34, off, s32 offset:136 +; DAGISEL-NEXT: scratch_load_b32 v35, off, s32 offset:140 +; DAGISEL-NEXT: scratch_load_b32 v36, off, s32 offset:144 +; DAGISEL-NEXT: scratch_load_b32 v37, off, s32 offset:148 +; DAGISEL-NEXT: scratch_load_b32 v38, off, s32 offset:152 +; DAGISEL-NEXT: scratch_load_b32 v39, off, s32 offset:156 +; DAGISEL-NEXT: scratch_load_b32 v48, off, s32 offset:160 +; DAGISEL-NEXT: scratch_load_b32 v49, off, s32 offset:164 +; DAGISEL-NEXT: scratch_load_b32 v50, off, s32 offset:168 +; DAGISEL-NEXT: scratch_load_b32 v51, off, s32 offset:172 +; DAGISEL-NEXT: scratch_load_b32 v52, off, s32 offset:176 +; DAGISEL-NEXT: scratch_load_b32 v53, off, s32 offset:180 +; DAGISEL-NEXT: scratch_load_b32 v54, off, s32 offset:184 +; DAGISEL-NEXT: scratch_load_b32 v55, off, s32 offset:188 +; DAGISEL-NEXT: scratch_load_b32 v64, off, s32 offset:192 +; DAGISEL-NEXT: scratch_load_b32 v65, off, s32 offset:196 +; DAGISEL-NEXT: scratch_load_b32 v66, off, s32 offset:200 +; DAGISEL-NEXT: scratch_load_b32 v67, off, s32 offset:204 +; DAGISEL-NEXT: scratch_load_b32 v68, off, s32 offset:208 +; DAGISEL-NEXT: scratch_load_b32 v69, off, s32 offset:212 +; DAGISEL-NEXT: scratch_load_b32 v70, off, s32 offset:216 +; DAGISEL-NEXT: scratch_load_b32 v71, off, s32 offset:220 +; DAGISEL-NEXT: scratch_load_b32 v80, off, s32 offset:224 +; DAGISEL-NEXT: scratch_load_b32 v81, off, s32 offset:228 +; DAGISEL-NEXT: scratch_load_b32 v82, off, s32 offset:232 +; DAGISEL-NEXT: scratch_load_b32 v83, off, s32 offset:236 +; DAGISEL-NEXT: scratch_load_b32 v84, off, s32 offset:240 +; DAGISEL-NEXT: scratch_load_b32 v85, off, s32 offset:244 +; DAGISEL-NEXT: scratch_load_b32 v86, off, s32 offset:248 +; DAGISEL-NEXT: scratch_load_b32 v87, off, s32 offset:252 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_load_b32 v96, off, s32 offset:256 +; DAGISEL-NEXT: scratch_load_b32 v97, off, s32 offset:260 +; DAGISEL-NEXT: scratch_load_b32 v98, off, s32 offset:264 +; DAGISEL-NEXT: scratch_load_b32 v99, off, s32 offset:268 +; DAGISEL-NEXT: scratch_load_b32 v100, off, s32 offset:272 +; DAGISEL-NEXT: scratch_load_b32 v101, off, s32 offset:276 +; DAGISEL-NEXT: scratch_load_b32 v102, off, s32 offset:280 +; DAGISEL-NEXT: scratch_load_b32 v103, off, s32 offset:284 +; DAGISEL-NEXT: scratch_load_b32 v112, off, s32 offset:288 +; DAGISEL-NEXT: scratch_load_b32 v113, off, s32 offset:292 +; DAGISEL-NEXT: scratch_load_b32 v114, off, s32 offset:296 +; DAGISEL-NEXT: scratch_load_b32 v115, off, s32 offset:300 +; DAGISEL-NEXT: scratch_load_b32 v116, off, s32 offset:304 +; DAGISEL-NEXT: scratch_load_b32 v117, off, s32 offset:308 +; DAGISEL-NEXT: scratch_load_b32 v118, off, s32 offset:312 +; DAGISEL-NEXT: scratch_load_b32 v119, off, s32 offset:316 +; DAGISEL-NEXT: scratch_load_b32 v128, off, s32 offset:320 +; DAGISEL-NEXT: scratch_load_b32 v129, off, s32 offset:324 +; DAGISEL-NEXT: scratch_load_b32 v130, off, s32 offset:328 +; DAGISEL-NEXT: scratch_load_b32 v131, off, s32 offset:332 +; DAGISEL-NEXT: scratch_load_b32 v132, off, s32 offset:336 +; DAGISEL-NEXT: scratch_load_b32 v133, off, s32 offset:340 +; DAGISEL-NEXT: scratch_load_b32 v134, off, s32 offset:344 +; DAGISEL-NEXT: scratch_load_b32 v135, off, s32 offset:348 +; DAGISEL-NEXT: scratch_load_b32 v144, off, s32 offset:352 +; DAGISEL-NEXT: scratch_load_b32 v145, off, s32 offset:356 +; DAGISEL-NEXT: scratch_load_b32 v146, off, s32 offset:360 +; DAGISEL-NEXT: scratch_load_b32 v147, off, s32 offset:364 +; DAGISEL-NEXT: scratch_load_b32 v148, off, s32 offset:368 +; DAGISEL-NEXT: scratch_load_b32 v149, off, s32 offset:372 +; DAGISEL-NEXT: scratch_load_b32 v150, off, s32 offset:376 +; DAGISEL-NEXT: scratch_load_b32 v151, off, s32 offset:380 +; DAGISEL-NEXT: s_clause 0x1f +; DAGISEL-NEXT: scratch_load_b32 v160, off, s32 offset:384 +; DAGISEL-NEXT: scratch_load_b32 v161, off, s32 offset:388 +; DAGISEL-NEXT: scratch_load_b32 v162, off, s32 offset:392 +; DAGISEL-NEXT: scratch_load_b32 v163, off, s32 offset:396 +; DAGISEL-NEXT: scratch_load_b32 v164, off, s32 offset:400 +; DAGISEL-NEXT: scratch_load_b32 v165, off, s32 offset:404 +; DAGISEL-NEXT: scratch_load_b32 v166, off, s32 offset:408 +; DAGISEL-NEXT: scratch_load_b32 v167, off, s32 offset:412 +; DAGISEL-NEXT: scratch_load_b32 v176, off, s32 offset:416 +; DAGISEL-NEXT: scratch_load_b32 v177, off, s32 offset:420 +; DAGISEL-NEXT: scratch_load_b32 v178, off, s32 offset:424 +; DAGISEL-NEXT: scratch_load_b32 v179, off, s32 offset:428 +; DAGISEL-NEXT: scratch_load_b32 v180, off, s32 offset:432 +; DAGISEL-NEXT: scratch_load_b32 v181, off, s32 offset:436 +; DAGISEL-NEXT: scratch_load_b32 v182, off, s32 offset:440 +; DAGISEL-NEXT: scratch_load_b32 v183, off, s32 offset:444 +; DAGISEL-NEXT: scratch_load_b32 v192, off, s32 offset:448 +; DAGISEL-NEXT: scratch_load_b32 v193, off, s32 offset:452 +; DAGISEL-NEXT: scratch_load_b32 v194, off, s32 offset:456 +; DAGISEL-NEXT: scratch_load_b32 v195, off, s32 offset:460 +; DAGISEL-NEXT: scratch_load_b32 v196, off, s32 offset:464 +; DAGISEL-NEXT: scratch_load_b32 v197, off, s32 offset:468 +; DAGISEL-NEXT: scratch_load_b32 v198, off, s32 offset:472 +; DAGISEL-NEXT: scratch_load_b32 v199, off, s32 offset:476 +; DAGISEL-NEXT: scratch_load_b32 v208, off, s32 offset:480 +; DAGISEL-NEXT: scratch_load_b32 v209, off, s32 offset:484 +; DAGISEL-NEXT: scratch_load_b32 v210, off, s32 offset:488 +; DAGISEL-NEXT: scratch_load_b32 v211, off, s32 offset:492 +; DAGISEL-NEXT: scratch_load_b32 v212, off, s32 offset:496 +; DAGISEL-NEXT: scratch_load_b32 v213, off, s32 offset:500 +; DAGISEL-NEXT: scratch_load_b32 v214, off, s32 offset:504 +; DAGISEL-NEXT: scratch_load_b32 v215, off, s32 offset:508 +; DAGISEL-NEXT: s_clause 0xf +; DAGISEL-NEXT: scratch_load_b32 v224, off, s32 offset:512 +; DAGISEL-NEXT: scratch_load_b32 v225, off, s32 offset:516 +; DAGISEL-NEXT: scratch_load_b32 v226, off, s32 offset:520 +; DAGISEL-NEXT: scratch_load_b32 v227, off, s32 offset:524 +; DAGISEL-NEXT: scratch_load_b32 v228, off, s32 offset:528 +; DAGISEL-NEXT: scratch_load_b32 v229, off, s32 offset:532 +; DAGISEL-NEXT: scratch_load_b32 v230, off, s32 offset:536 +; DAGISEL-NEXT: scratch_load_b32 v231, off, s32 offset:540 +; DAGISEL-NEXT: scratch_load_b32 v240, off, s32 offset:544 +; DAGISEL-NEXT: scratch_load_b32 v241, off, s32 offset:548 +; DAGISEL-NEXT: scratch_load_b32 v242, off, s32 offset:552 +; DAGISEL-NEXT: scratch_load_b32 v243, off, s32 offset:556 +; DAGISEL-NEXT: scratch_load_b32 v244, off, s32 offset:560 +; DAGISEL-NEXT: scratch_load_b32 v245, off, s32 offset:564 +; DAGISEL-NEXT: scratch_load_b32 v246, off, s32 offset:568 +; DAGISEL-NEXT: scratch_load_b32 v247, off, s32 offset:572 +; DAGISEL-NEXT: s_mov_b32 exec_lo, s0 +; DAGISEL-NEXT: s_setpc_b64 s[36:37] +; +; GISEL-LABEL: tail_call_gfx_from_whole_wave: +; GISEL: ; %bb.0: +; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GISEL-NEXT: s_wait_expcnt 0x0 +; GISEL-NEXT: s_wait_samplecnt 0x0 +; GISEL-NEXT: s_wait_bvhcnt 0x0 +; GISEL-NEXT: s_wait_kmcnt 0x0 +; GISEL-NEXT: s_xor_saveexec_b32 s0, -1 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_store_b32 off, v0, s32 +; GISEL-NEXT: scratch_store_b32 off, v1, s32 offset:4 +; GISEL-NEXT: scratch_store_b32 off, v2, s32 offset:8 +; GISEL-NEXT: scratch_store_b32 off, v3, s32 offset:12 +; GISEL-NEXT: scratch_store_b32 off, v4, s32 offset:16 +; GISEL-NEXT: scratch_store_b32 off, v5, s32 offset:20 +; GISEL-NEXT: scratch_store_b32 off, v6, s32 offset:24 +; GISEL-NEXT: scratch_store_b32 off, v7, s32 offset:28 +; GISEL-NEXT: scratch_store_b32 off, v8, s32 offset:32 +; GISEL-NEXT: scratch_store_b32 off, v9, s32 offset:36 +; GISEL-NEXT: scratch_store_b32 off, v10, s32 offset:40 +; GISEL-NEXT: scratch_store_b32 off, v11, s32 offset:44 +; GISEL-NEXT: scratch_store_b32 off, v12, s32 offset:48 +; GISEL-NEXT: scratch_store_b32 off, v13, s32 offset:52 +; GISEL-NEXT: scratch_store_b32 off, v14, s32 offset:56 +; GISEL-NEXT: scratch_store_b32 off, v15, s32 offset:60 +; GISEL-NEXT: scratch_store_b32 off, v16, s32 offset:64 +; GISEL-NEXT: scratch_store_b32 off, v17, s32 offset:68 +; GISEL-NEXT: scratch_store_b32 off, v18, s32 offset:72 +; GISEL-NEXT: scratch_store_b32 off, v19, s32 offset:76 +; GISEL-NEXT: scratch_store_b32 off, v20, s32 offset:80 +; GISEL-NEXT: scratch_store_b32 off, v21, s32 offset:84 +; GISEL-NEXT: scratch_store_b32 off, v22, s32 offset:88 +; GISEL-NEXT: scratch_store_b32 off, v23, s32 offset:92 +; GISEL-NEXT: scratch_store_b32 off, v24, s32 offset:96 +; GISEL-NEXT: scratch_store_b32 off, v25, s32 offset:100 +; GISEL-NEXT: scratch_store_b32 off, v26, s32 offset:104 +; GISEL-NEXT: scratch_store_b32 off, v27, s32 offset:108 +; GISEL-NEXT: scratch_store_b32 off, v28, s32 offset:112 +; GISEL-NEXT: scratch_store_b32 off, v29, s32 offset:116 +; GISEL-NEXT: scratch_store_b32 off, v30, s32 offset:120 +; GISEL-NEXT: scratch_store_b32 off, v31, s32 offset:124 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_store_b32 off, v32, s32 offset:128 +; GISEL-NEXT: scratch_store_b32 off, v33, s32 offset:132 +; GISEL-NEXT: scratch_store_b32 off, v34, s32 offset:136 +; GISEL-NEXT: scratch_store_b32 off, v35, s32 offset:140 +; GISEL-NEXT: scratch_store_b32 off, v36, s32 offset:144 +; GISEL-NEXT: scratch_store_b32 off, v37, s32 offset:148 +; GISEL-NEXT: scratch_store_b32 off, v38, s32 offset:152 +; GISEL-NEXT: scratch_store_b32 off, v39, s32 offset:156 +; GISEL-NEXT: scratch_store_b32 off, v48, s32 offset:160 +; GISEL-NEXT: scratch_store_b32 off, v49, s32 offset:164 +; GISEL-NEXT: scratch_store_b32 off, v50, s32 offset:168 +; GISEL-NEXT: scratch_store_b32 off, v51, s32 offset:172 +; GISEL-NEXT: scratch_store_b32 off, v52, s32 offset:176 +; GISEL-NEXT: scratch_store_b32 off, v53, s32 offset:180 +; GISEL-NEXT: scratch_store_b32 off, v54, s32 offset:184 +; GISEL-NEXT: scratch_store_b32 off, v55, s32 offset:188 +; GISEL-NEXT: scratch_store_b32 off, v64, s32 offset:192 +; GISEL-NEXT: scratch_store_b32 off, v65, s32 offset:196 +; GISEL-NEXT: scratch_store_b32 off, v66, s32 offset:200 +; GISEL-NEXT: scratch_store_b32 off, v67, s32 offset:204 +; GISEL-NEXT: scratch_store_b32 off, v68, s32 offset:208 +; GISEL-NEXT: scratch_store_b32 off, v69, s32 offset:212 +; GISEL-NEXT: scratch_store_b32 off, v70, s32 offset:216 +; GISEL-NEXT: scratch_store_b32 off, v71, s32 offset:220 +; GISEL-NEXT: scratch_store_b32 off, v80, s32 offset:224 +; GISEL-NEXT: scratch_store_b32 off, v81, s32 offset:228 +; GISEL-NEXT: scratch_store_b32 off, v82, s32 offset:232 +; GISEL-NEXT: scratch_store_b32 off, v83, s32 offset:236 +; GISEL-NEXT: scratch_store_b32 off, v84, s32 offset:240 +; GISEL-NEXT: scratch_store_b32 off, v85, s32 offset:244 +; GISEL-NEXT: scratch_store_b32 off, v86, s32 offset:248 +; GISEL-NEXT: scratch_store_b32 off, v87, s32 offset:252 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_store_b32 off, v96, s32 offset:256 +; GISEL-NEXT: scratch_store_b32 off, v97, s32 offset:260 +; GISEL-NEXT: scratch_store_b32 off, v98, s32 offset:264 +; GISEL-NEXT: scratch_store_b32 off, v99, s32 offset:268 +; GISEL-NEXT: scratch_store_b32 off, v100, s32 offset:272 +; GISEL-NEXT: scratch_store_b32 off, v101, s32 offset:276 +; GISEL-NEXT: scratch_store_b32 off, v102, s32 offset:280 +; GISEL-NEXT: scratch_store_b32 off, v103, s32 offset:284 +; GISEL-NEXT: scratch_store_b32 off, v112, s32 offset:288 +; GISEL-NEXT: scratch_store_b32 off, v113, s32 offset:292 +; GISEL-NEXT: scratch_store_b32 off, v114, s32 offset:296 +; GISEL-NEXT: scratch_store_b32 off, v115, s32 offset:300 +; GISEL-NEXT: scratch_store_b32 off, v116, s32 offset:304 +; GISEL-NEXT: scratch_store_b32 off, v117, s32 offset:308 +; GISEL-NEXT: scratch_store_b32 off, v118, s32 offset:312 +; GISEL-NEXT: scratch_store_b32 off, v119, s32 offset:316 +; GISEL-NEXT: scratch_store_b32 off, v128, s32 offset:320 +; GISEL-NEXT: scratch_store_b32 off, v129, s32 offset:324 +; GISEL-NEXT: scratch_store_b32 off, v130, s32 offset:328 +; GISEL-NEXT: scratch_store_b32 off, v131, s32 offset:332 +; GISEL-NEXT: scratch_store_b32 off, v132, s32 offset:336 +; GISEL-NEXT: scratch_store_b32 off, v133, s32 offset:340 +; GISEL-NEXT: scratch_store_b32 off, v134, s32 offset:344 +; GISEL-NEXT: scratch_store_b32 off, v135, s32 offset:348 +; GISEL-NEXT: scratch_store_b32 off, v144, s32 offset:352 +; GISEL-NEXT: scratch_store_b32 off, v145, s32 offset:356 +; GISEL-NEXT: scratch_store_b32 off, v146, s32 offset:360 +; GISEL-NEXT: scratch_store_b32 off, v147, s32 offset:364 +; GISEL-NEXT: scratch_store_b32 off, v148, s32 offset:368 +; GISEL-NEXT: scratch_store_b32 off, v149, s32 offset:372 +; GISEL-NEXT: scratch_store_b32 off, v150, s32 offset:376 +; GISEL-NEXT: scratch_store_b32 off, v151, s32 offset:380 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_store_b32 off, v160, s32 offset:384 +; GISEL-NEXT: scratch_store_b32 off, v161, s32 offset:388 +; GISEL-NEXT: scratch_store_b32 off, v162, s32 offset:392 +; GISEL-NEXT: scratch_store_b32 off, v163, s32 offset:396 +; GISEL-NEXT: scratch_store_b32 off, v164, s32 offset:400 +; GISEL-NEXT: scratch_store_b32 off, v165, s32 offset:404 +; GISEL-NEXT: scratch_store_b32 off, v166, s32 offset:408 +; GISEL-NEXT: scratch_store_b32 off, v167, s32 offset:412 +; GISEL-NEXT: scratch_store_b32 off, v176, s32 offset:416 +; GISEL-NEXT: scratch_store_b32 off, v177, s32 offset:420 +; GISEL-NEXT: scratch_store_b32 off, v178, s32 offset:424 +; GISEL-NEXT: scratch_store_b32 off, v179, s32 offset:428 +; GISEL-NEXT: scratch_store_b32 off, v180, s32 offset:432 +; GISEL-NEXT: scratch_store_b32 off, v181, s32 offset:436 +; GISEL-NEXT: scratch_store_b32 off, v182, s32 offset:440 +; GISEL-NEXT: scratch_store_b32 off, v183, s32 offset:444 +; GISEL-NEXT: scratch_store_b32 off, v192, s32 offset:448 +; GISEL-NEXT: scratch_store_b32 off, v193, s32 offset:452 +; GISEL-NEXT: scratch_store_b32 off, v194, s32 offset:456 +; GISEL-NEXT: scratch_store_b32 off, v195, s32 offset:460 +; GISEL-NEXT: scratch_store_b32 off, v196, s32 offset:464 +; GISEL-NEXT: scratch_store_b32 off, v197, s32 offset:468 +; GISEL-NEXT: scratch_store_b32 off, v198, s32 offset:472 +; GISEL-NEXT: scratch_store_b32 off, v199, s32 offset:476 +; GISEL-NEXT: scratch_store_b32 off, v208, s32 offset:480 +; GISEL-NEXT: scratch_store_b32 off, v209, s32 offset:484 +; GISEL-NEXT: scratch_store_b32 off, v210, s32 offset:488 +; GISEL-NEXT: scratch_store_b32 off, v211, s32 offset:492 +; GISEL-NEXT: scratch_store_b32 off, v212, s32 offset:496 +; GISEL-NEXT: scratch_store_b32 off, v213, s32 offset:500 +; GISEL-NEXT: scratch_store_b32 off, v214, s32 offset:504 +; GISEL-NEXT: scratch_store_b32 off, v215, s32 offset:508 +; GISEL-NEXT: s_clause 0xf +; GISEL-NEXT: scratch_store_b32 off, v224, s32 offset:512 +; GISEL-NEXT: scratch_store_b32 off, v225, s32 offset:516 +; GISEL-NEXT: scratch_store_b32 off, v226, s32 offset:520 +; GISEL-NEXT: scratch_store_b32 off, v227, s32 offset:524 +; GISEL-NEXT: scratch_store_b32 off, v228, s32 offset:528 +; GISEL-NEXT: scratch_store_b32 off, v229, s32 offset:532 +; GISEL-NEXT: scratch_store_b32 off, v230, s32 offset:536 +; GISEL-NEXT: scratch_store_b32 off, v231, s32 offset:540 +; GISEL-NEXT: scratch_store_b32 off, v240, s32 offset:544 +; GISEL-NEXT: scratch_store_b32 off, v241, s32 offset:548 +; GISEL-NEXT: scratch_store_b32 off, v242, s32 offset:552 +; GISEL-NEXT: scratch_store_b32 off, v243, s32 offset:556 +; GISEL-NEXT: scratch_store_b32 off, v244, s32 offset:560 +; GISEL-NEXT: scratch_store_b32 off, v245, s32 offset:564 +; GISEL-NEXT: scratch_store_b32 off, v246, s32 offset:568 +; GISEL-NEXT: scratch_store_b32 off, v247, s32 offset:572 +; GISEL-NEXT: s_mov_b32 exec_lo, -1 +; GISEL-NEXT: v_mov_b32_e32 v2, v0 +; GISEL-NEXT: v_swap_b32 v0, v1 +; GISEL-NEXT: s_mov_b32 s36, gfx_callee@abs32@lo +; GISEL-NEXT: s_mov_b32 s37, gfx_callee@abs32@hi +; GISEL-NEXT: s_wait_alu 0xfffe +; GISEL-NEXT: s_xor_b32 exec_lo, s0, -1 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_load_b32 v0, off, s32 +; GISEL-NEXT: scratch_load_b32 v1, off, s32 offset:4 +; GISEL-NEXT: scratch_load_b32 v2, off, s32 offset:8 +; GISEL-NEXT: scratch_load_b32 v3, off, s32 offset:12 +; GISEL-NEXT: scratch_load_b32 v4, off, s32 offset:16 +; GISEL-NEXT: scratch_load_b32 v5, off, s32 offset:20 +; GISEL-NEXT: scratch_load_b32 v6, off, s32 offset:24 +; GISEL-NEXT: scratch_load_b32 v7, off, s32 offset:28 +; GISEL-NEXT: scratch_load_b32 v8, off, s32 offset:32 +; GISEL-NEXT: scratch_load_b32 v9, off, s32 offset:36 +; GISEL-NEXT: scratch_load_b32 v10, off, s32 offset:40 +; GISEL-NEXT: scratch_load_b32 v11, off, s32 offset:44 +; GISEL-NEXT: scratch_load_b32 v12, off, s32 offset:48 +; GISEL-NEXT: scratch_load_b32 v13, off, s32 offset:52 +; GISEL-NEXT: scratch_load_b32 v14, off, s32 offset:56 +; GISEL-NEXT: scratch_load_b32 v15, off, s32 offset:60 +; GISEL-NEXT: scratch_load_b32 v16, off, s32 offset:64 +; GISEL-NEXT: scratch_load_b32 v17, off, s32 offset:68 +; GISEL-NEXT: scratch_load_b32 v18, off, s32 offset:72 +; GISEL-NEXT: scratch_load_b32 v19, off, s32 offset:76 +; GISEL-NEXT: scratch_load_b32 v20, off, s32 offset:80 +; GISEL-NEXT: scratch_load_b32 v21, off, s32 offset:84 +; GISEL-NEXT: scratch_load_b32 v22, off, s32 offset:88 +; GISEL-NEXT: scratch_load_b32 v23, off, s32 offset:92 +; GISEL-NEXT: scratch_load_b32 v24, off, s32 offset:96 +; GISEL-NEXT: scratch_load_b32 v25, off, s32 offset:100 +; GISEL-NEXT: scratch_load_b32 v26, off, s32 offset:104 +; GISEL-NEXT: scratch_load_b32 v27, off, s32 offset:108 +; GISEL-NEXT: scratch_load_b32 v28, off, s32 offset:112 +; GISEL-NEXT: scratch_load_b32 v29, off, s32 offset:116 +; GISEL-NEXT: scratch_load_b32 v30, off, s32 offset:120 +; GISEL-NEXT: scratch_load_b32 v31, off, s32 offset:124 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_load_b32 v32, off, s32 offset:128 +; GISEL-NEXT: scratch_load_b32 v33, off, s32 offset:132 +; GISEL-NEXT: scratch_load_b32 v34, off, s32 offset:136 +; GISEL-NEXT: scratch_load_b32 v35, off, s32 offset:140 +; GISEL-NEXT: scratch_load_b32 v36, off, s32 offset:144 +; GISEL-NEXT: scratch_load_b32 v37, off, s32 offset:148 +; GISEL-NEXT: scratch_load_b32 v38, off, s32 offset:152 +; GISEL-NEXT: scratch_load_b32 v39, off, s32 offset:156 +; GISEL-NEXT: scratch_load_b32 v48, off, s32 offset:160 +; GISEL-NEXT: scratch_load_b32 v49, off, s32 offset:164 +; GISEL-NEXT: scratch_load_b32 v50, off, s32 offset:168 +; GISEL-NEXT: scratch_load_b32 v51, off, s32 offset:172 +; GISEL-NEXT: scratch_load_b32 v52, off, s32 offset:176 +; GISEL-NEXT: scratch_load_b32 v53, off, s32 offset:180 +; GISEL-NEXT: scratch_load_b32 v54, off, s32 offset:184 +; GISEL-NEXT: scratch_load_b32 v55, off, s32 offset:188 +; GISEL-NEXT: scratch_load_b32 v64, off, s32 offset:192 +; GISEL-NEXT: scratch_load_b32 v65, off, s32 offset:196 +; GISEL-NEXT: scratch_load_b32 v66, off, s32 offset:200 +; GISEL-NEXT: scratch_load_b32 v67, off, s32 offset:204 +; GISEL-NEXT: scratch_load_b32 v68, off, s32 offset:208 +; GISEL-NEXT: scratch_load_b32 v69, off, s32 offset:212 +; GISEL-NEXT: scratch_load_b32 v70, off, s32 offset:216 +; GISEL-NEXT: scratch_load_b32 v71, off, s32 offset:220 +; GISEL-NEXT: scratch_load_b32 v80, off, s32 offset:224 +; GISEL-NEXT: scratch_load_b32 v81, off, s32 offset:228 +; GISEL-NEXT: scratch_load_b32 v82, off, s32 offset:232 +; GISEL-NEXT: scratch_load_b32 v83, off, s32 offset:236 +; GISEL-NEXT: scratch_load_b32 v84, off, s32 offset:240 +; GISEL-NEXT: scratch_load_b32 v85, off, s32 offset:244 +; GISEL-NEXT: scratch_load_b32 v86, off, s32 offset:248 +; GISEL-NEXT: scratch_load_b32 v87, off, s32 offset:252 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_load_b32 v96, off, s32 offset:256 +; GISEL-NEXT: scratch_load_b32 v97, off, s32 offset:260 +; GISEL-NEXT: scratch_load_b32 v98, off, s32 offset:264 +; GISEL-NEXT: scratch_load_b32 v99, off, s32 offset:268 +; GISEL-NEXT: scratch_load_b32 v100, off, s32 offset:272 +; GISEL-NEXT: scratch_load_b32 v101, off, s32 offset:276 +; GISEL-NEXT: scratch_load_b32 v102, off, s32 offset:280 +; GISEL-NEXT: scratch_load_b32 v103, off, s32 offset:284 +; GISEL-NEXT: scratch_load_b32 v112, off, s32 offset:288 +; GISEL-NEXT: scratch_load_b32 v113, off, s32 offset:292 +; GISEL-NEXT: scratch_load_b32 v114, off, s32 offset:296 +; GISEL-NEXT: scratch_load_b32 v115, off, s32 offset:300 +; GISEL-NEXT: scratch_load_b32 v116, off, s32 offset:304 +; GISEL-NEXT: scratch_load_b32 v117, off, s32 offset:308 +; GISEL-NEXT: scratch_load_b32 v118, off, s32 offset:312 +; GISEL-NEXT: scratch_load_b32 v119, off, s32 offset:316 +; GISEL-NEXT: scratch_load_b32 v128, off, s32 offset:320 +; GISEL-NEXT: scratch_load_b32 v129, off, s32 offset:324 +; GISEL-NEXT: scratch_load_b32 v130, off, s32 offset:328 +; GISEL-NEXT: scratch_load_b32 v131, off, s32 offset:332 +; GISEL-NEXT: scratch_load_b32 v132, off, s32 offset:336 +; GISEL-NEXT: scratch_load_b32 v133, off, s32 offset:340 +; GISEL-NEXT: scratch_load_b32 v134, off, s32 offset:344 +; GISEL-NEXT: scratch_load_b32 v135, off, s32 offset:348 +; GISEL-NEXT: scratch_load_b32 v144, off, s32 offset:352 +; GISEL-NEXT: scratch_load_b32 v145, off, s32 offset:356 +; GISEL-NEXT: scratch_load_b32 v146, off, s32 offset:360 +; GISEL-NEXT: scratch_load_b32 v147, off, s32 offset:364 +; GISEL-NEXT: scratch_load_b32 v148, off, s32 offset:368 +; GISEL-NEXT: scratch_load_b32 v149, off, s32 offset:372 +; GISEL-NEXT: scratch_load_b32 v150, off, s32 offset:376 +; GISEL-NEXT: scratch_load_b32 v151, off, s32 offset:380 +; GISEL-NEXT: s_clause 0x1f +; GISEL-NEXT: scratch_load_b32 v160, off, s32 offset:384 +; GISEL-NEXT: scratch_load_b32 v161, off, s32 offset:388 +; GISEL-NEXT: scratch_load_b32 v162, off, s32 offset:392 +; GISEL-NEXT: scratch_load_b32 v163, off, s32 offset:396 +; GISEL-NEXT: scratch_load_b32 v164, off, s32 offset:400 +; GISEL-NEXT: scratch_load_b32 v165, off, s32 offset:404 +; GISEL-NEXT: scratch_load_b32 v166, off, s32 offset:408 +; GISEL-NEXT: scratch_load_b32 v167, off, s32 offset:412 +; GISEL-NEXT: scratch_load_b32 v176, off, s32 offset:416 +; GISEL-NEXT: scratch_load_b32 v177, off, s32 offset:420 +; GISEL-NEXT: scratch_load_b32 v178, off, s32 offset:424 +; GISEL-NEXT: scratch_load_b32 v179, off, s32 offset:428 +; GISEL-NEXT: scratch_load_b32 v180, off, s32 offset:432 +; GISEL-NEXT: scratch_load_b32 v181, off, s32 offset:436 +; GISEL-NEXT: scratch_load_b32 v182, off, s32 offset:440 +; GISEL-NEXT: scratch_load_b32 v183, off, s32 offset:444 +; GISEL-NEXT: scratch_load_b32 v192, off, s32 offset:448 +; GISEL-NEXT: scratch_load_b32 v193, off, s32 offset:452 +; GISEL-NEXT: scratch_load_b32 v194, off, s32 offset:456 +; GISEL-NEXT: scratch_load_b32 v195, off, s32 offset:460 +; GISEL-NEXT: scratch_load_b32 v196, off, s32 offset:464 +; GISEL-NEXT: scratch_load_b32 v197, off, s32 offset:468 +; GISEL-NEXT: scratch_load_b32 v198, off, s32 offset:472 +; GISEL-NEXT: scratch_load_b32 v199, off, s32 offset:476 +; GISEL-NEXT: scratch_load_b32 v208, off, s32 offset:480 +; GISEL-NEXT: scratch_load_b32 v209, off, s32 offset:484 +; GISEL-NEXT: scratch_load_b32 v210, off, s32 offset:488 +; GISEL-NEXT: scratch_load_b32 v211, off, s32 offset:492 +; GISEL-NEXT: scratch_load_b32 v212, off, s32 offset:496 +; GISEL-NEXT: scratch_load_b32 v213, off, s32 offset:500 +; GISEL-NEXT: scratch_load_b32 v214, off, s32 offset:504 +; GISEL-NEXT: scratch_load_b32 v215, off, s32 offset:508 +; GISEL-NEXT: s_clause 0xf +; GISEL-NEXT: scratch_load_b32 v224, off, s32 offset:512 +; GISEL-NEXT: scratch_load_b32 v225, off, s32 offset:516 +; GISEL-NEXT: scratch_load_b32 v226, off, s32 offset:520 +; GISEL-NEXT: scratch_load_b32 v227, off, s32 offset:524 +; GISEL-NEXT: scratch_load_b32 v228, off, s32 offset:528 +; GISEL-NEXT: scratch_load_b32 v229, off, s32 offset:532 +; GISEL-NEXT: scratch_load_b32 v230, off, s32 offset:536 +; GISEL-NEXT: scratch_load_b32 v231, off, s32 offset:540 +; GISEL-NEXT: scratch_load_b32 v240, off, s32 offset:544 +; GISEL-NEXT: scratch_load_b32 v241, off, s32 offset:548 +; GISEL-NEXT: scratch_load_b32 v242, off, s32 offset:552 +; GISEL-NEXT: scratch_load_b32 v243, off, s32 offset:556 +; GISEL-NEXT: scratch_load_b32 v244, off, s32 offset:560 +; GISEL-NEXT: scratch_load_b32 v245, off, s32 offset:564 +; GISEL-NEXT: scratch_load_b32 v246, off, s32 offset:568 +; GISEL-NEXT: scratch_load_b32 v247, off, s32 offset:572 +; GISEL-NEXT: s_mov_b32 exec_lo, s0 +; GISEL-NEXT: s_setpc_b64 s[36:37] +; +; DAGISEL64-LABEL: tail_call_gfx_from_whole_wave: +; DAGISEL64: ; %bb.0: +; DAGISEL64-NEXT: s_wait_loadcnt_dscnt 0x0 +; DAGISEL64-NEXT: s_wait_expcnt 0x0 +; DAGISEL64-NEXT: s_wait_samplecnt 0x0 +; DAGISEL64-NEXT: s_wait_bvhcnt 0x0 +; DAGISEL64-NEXT: s_wait_kmcnt 0x0 +; DAGISEL64-NEXT: s_xor_saveexec_b64 s[0:1], -1 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_store_b32 off, v0, s32 +; DAGISEL64-NEXT: scratch_store_b32 off, v1, s32 offset:4 +; DAGISEL64-NEXT: scratch_store_b32 off, v2, s32 offset:8 +; DAGISEL64-NEXT: scratch_store_b32 off, v3, s32 offset:12 +; DAGISEL64-NEXT: scratch_store_b32 off, v4, s32 offset:16 +; DAGISEL64-NEXT: scratch_store_b32 off, v5, s32 offset:20 +; DAGISEL64-NEXT: scratch_store_b32 off, v6, s32 offset:24 +; DAGISEL64-NEXT: scratch_store_b32 off, v7, s32 offset:28 +; DAGISEL64-NEXT: scratch_store_b32 off, v8, s32 offset:32 +; DAGISEL64-NEXT: scratch_store_b32 off, v9, s32 offset:36 +; DAGISEL64-NEXT: scratch_store_b32 off, v10, s32 offset:40 +; DAGISEL64-NEXT: scratch_store_b32 off, v11, s32 offset:44 +; DAGISEL64-NEXT: scratch_store_b32 off, v12, s32 offset:48 +; DAGISEL64-NEXT: scratch_store_b32 off, v13, s32 offset:52 +; DAGISEL64-NEXT: scratch_store_b32 off, v14, s32 offset:56 +; DAGISEL64-NEXT: scratch_store_b32 off, v15, s32 offset:60 +; DAGISEL64-NEXT: scratch_store_b32 off, v16, s32 offset:64 +; DAGISEL64-NEXT: scratch_store_b32 off, v17, s32 offset:68 +; DAGISEL64-NEXT: scratch_store_b32 off, v18, s32 offset:72 +; DAGISEL64-NEXT: scratch_store_b32 off, v19, s32 offset:76 +; DAGISEL64-NEXT: scratch_store_b32 off, v20, s32 offset:80 +; DAGISEL64-NEXT: scratch_store_b32 off, v21, s32 offset:84 +; DAGISEL64-NEXT: scratch_store_b32 off, v22, s32 offset:88 +; DAGISEL64-NEXT: scratch_store_b32 off, v23, s32 offset:92 +; DAGISEL64-NEXT: scratch_store_b32 off, v24, s32 offset:96 +; DAGISEL64-NEXT: scratch_store_b32 off, v25, s32 offset:100 +; DAGISEL64-NEXT: scratch_store_b32 off, v26, s32 offset:104 +; DAGISEL64-NEXT: scratch_store_b32 off, v27, s32 offset:108 +; DAGISEL64-NEXT: scratch_store_b32 off, v28, s32 offset:112 +; DAGISEL64-NEXT: scratch_store_b32 off, v29, s32 offset:116 +; DAGISEL64-NEXT: scratch_store_b32 off, v30, s32 offset:120 +; DAGISEL64-NEXT: scratch_store_b32 off, v31, s32 offset:124 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_store_b32 off, v32, s32 offset:128 +; DAGISEL64-NEXT: scratch_store_b32 off, v33, s32 offset:132 +; DAGISEL64-NEXT: scratch_store_b32 off, v34, s32 offset:136 +; DAGISEL64-NEXT: scratch_store_b32 off, v35, s32 offset:140 +; DAGISEL64-NEXT: scratch_store_b32 off, v36, s32 offset:144 +; DAGISEL64-NEXT: scratch_store_b32 off, v37, s32 offset:148 +; DAGISEL64-NEXT: scratch_store_b32 off, v38, s32 offset:152 +; DAGISEL64-NEXT: scratch_store_b32 off, v39, s32 offset:156 +; DAGISEL64-NEXT: scratch_store_b32 off, v48, s32 offset:160 +; DAGISEL64-NEXT: scratch_store_b32 off, v49, s32 offset:164 +; DAGISEL64-NEXT: scratch_store_b32 off, v50, s32 offset:168 +; DAGISEL64-NEXT: scratch_store_b32 off, v51, s32 offset:172 +; DAGISEL64-NEXT: scratch_store_b32 off, v52, s32 offset:176 +; DAGISEL64-NEXT: scratch_store_b32 off, v53, s32 offset:180 +; DAGISEL64-NEXT: scratch_store_b32 off, v54, s32 offset:184 +; DAGISEL64-NEXT: scratch_store_b32 off, v55, s32 offset:188 +; DAGISEL64-NEXT: scratch_store_b32 off, v64, s32 offset:192 +; DAGISEL64-NEXT: scratch_store_b32 off, v65, s32 offset:196 +; DAGISEL64-NEXT: scratch_store_b32 off, v66, s32 offset:200 +; DAGISEL64-NEXT: scratch_store_b32 off, v67, s32 offset:204 +; DAGISEL64-NEXT: scratch_store_b32 off, v68, s32 offset:208 +; DAGISEL64-NEXT: scratch_store_b32 off, v69, s32 offset:212 +; DAGISEL64-NEXT: scratch_store_b32 off, v70, s32 offset:216 +; DAGISEL64-NEXT: scratch_store_b32 off, v71, s32 offset:220 +; DAGISEL64-NEXT: scratch_store_b32 off, v80, s32 offset:224 +; DAGISEL64-NEXT: scratch_store_b32 off, v81, s32 offset:228 +; DAGISEL64-NEXT: scratch_store_b32 off, v82, s32 offset:232 +; DAGISEL64-NEXT: scratch_store_b32 off, v83, s32 offset:236 +; DAGISEL64-NEXT: scratch_store_b32 off, v84, s32 offset:240 +; DAGISEL64-NEXT: scratch_store_b32 off, v85, s32 offset:244 +; DAGISEL64-NEXT: scratch_store_b32 off, v86, s32 offset:248 +; DAGISEL64-NEXT: scratch_store_b32 off, v87, s32 offset:252 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_store_b32 off, v96, s32 offset:256 +; DAGISEL64-NEXT: scratch_store_b32 off, v97, s32 offset:260 +; DAGISEL64-NEXT: scratch_store_b32 off, v98, s32 offset:264 +; DAGISEL64-NEXT: scratch_store_b32 off, v99, s32 offset:268 +; DAGISEL64-NEXT: scratch_store_b32 off, v100, s32 offset:272 +; DAGISEL64-NEXT: scratch_store_b32 off, v101, s32 offset:276 +; DAGISEL64-NEXT: scratch_store_b32 off, v102, s32 offset:280 +; DAGISEL64-NEXT: scratch_store_b32 off, v103, s32 offset:284 +; DAGISEL64-NEXT: scratch_store_b32 off, v112, s32 offset:288 +; DAGISEL64-NEXT: scratch_store_b32 off, v113, s32 offset:292 +; DAGISEL64-NEXT: scratch_store_b32 off, v114, s32 offset:296 +; DAGISEL64-NEXT: scratch_store_b32 off, v115, s32 offset:300 +; DAGISEL64-NEXT: scratch_store_b32 off, v116, s32 offset:304 +; DAGISEL64-NEXT: scratch_store_b32 off, v117, s32 offset:308 +; DAGISEL64-NEXT: scratch_store_b32 off, v118, s32 offset:312 +; DAGISEL64-NEXT: scratch_store_b32 off, v119, s32 offset:316 +; DAGISEL64-NEXT: scratch_store_b32 off, v128, s32 offset:320 +; DAGISEL64-NEXT: scratch_store_b32 off, v129, s32 offset:324 +; DAGISEL64-NEXT: scratch_store_b32 off, v130, s32 offset:328 +; DAGISEL64-NEXT: scratch_store_b32 off, v131, s32 offset:332 +; DAGISEL64-NEXT: scratch_store_b32 off, v132, s32 offset:336 +; DAGISEL64-NEXT: scratch_store_b32 off, v133, s32 offset:340 +; DAGISEL64-NEXT: scratch_store_b32 off, v134, s32 offset:344 +; DAGISEL64-NEXT: scratch_store_b32 off, v135, s32 offset:348 +; DAGISEL64-NEXT: scratch_store_b32 off, v144, s32 offset:352 +; DAGISEL64-NEXT: scratch_store_b32 off, v145, s32 offset:356 +; DAGISEL64-NEXT: scratch_store_b32 off, v146, s32 offset:360 +; DAGISEL64-NEXT: scratch_store_b32 off, v147, s32 offset:364 +; DAGISEL64-NEXT: scratch_store_b32 off, v148, s32 offset:368 +; DAGISEL64-NEXT: scratch_store_b32 off, v149, s32 offset:372 +; DAGISEL64-NEXT: scratch_store_b32 off, v150, s32 offset:376 +; DAGISEL64-NEXT: scratch_store_b32 off, v151, s32 offset:380 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_store_b32 off, v160, s32 offset:384 +; DAGISEL64-NEXT: scratch_store_b32 off, v161, s32 offset:388 +; DAGISEL64-NEXT: scratch_store_b32 off, v162, s32 offset:392 +; DAGISEL64-NEXT: scratch_store_b32 off, v163, s32 offset:396 +; DAGISEL64-NEXT: scratch_store_b32 off, v164, s32 offset:400 +; DAGISEL64-NEXT: scratch_store_b32 off, v165, s32 offset:404 +; DAGISEL64-NEXT: scratch_store_b32 off, v166, s32 offset:408 +; DAGISEL64-NEXT: scratch_store_b32 off, v167, s32 offset:412 +; DAGISEL64-NEXT: scratch_store_b32 off, v176, s32 offset:416 +; DAGISEL64-NEXT: scratch_store_b32 off, v177, s32 offset:420 +; DAGISEL64-NEXT: scratch_store_b32 off, v178, s32 offset:424 +; DAGISEL64-NEXT: scratch_store_b32 off, v179, s32 offset:428 +; DAGISEL64-NEXT: scratch_store_b32 off, v180, s32 offset:432 +; DAGISEL64-NEXT: scratch_store_b32 off, v181, s32 offset:436 +; DAGISEL64-NEXT: scratch_store_b32 off, v182, s32 offset:440 +; DAGISEL64-NEXT: scratch_store_b32 off, v183, s32 offset:444 +; DAGISEL64-NEXT: scratch_store_b32 off, v192, s32 offset:448 +; DAGISEL64-NEXT: scratch_store_b32 off, v193, s32 offset:452 +; DAGISEL64-NEXT: scratch_store_b32 off, v194, s32 offset:456 +; DAGISEL64-NEXT: scratch_store_b32 off, v195, s32 offset:460 +; DAGISEL64-NEXT: scratch_store_b32 off, v196, s32 offset:464 +; DAGISEL64-NEXT: scratch_store_b32 off, v197, s32 offset:468 +; DAGISEL64-NEXT: scratch_store_b32 off, v198, s32 offset:472 +; DAGISEL64-NEXT: scratch_store_b32 off, v199, s32 offset:476 +; DAGISEL64-NEXT: scratch_store_b32 off, v208, s32 offset:480 +; DAGISEL64-NEXT: scratch_store_b32 off, v209, s32 offset:484 +; DAGISEL64-NEXT: scratch_store_b32 off, v210, s32 offset:488 +; DAGISEL64-NEXT: scratch_store_b32 off, v211, s32 offset:492 +; DAGISEL64-NEXT: scratch_store_b32 off, v212, s32 offset:496 +; DAGISEL64-NEXT: scratch_store_b32 off, v213, s32 offset:500 +; DAGISEL64-NEXT: scratch_store_b32 off, v214, s32 offset:504 +; DAGISEL64-NEXT: scratch_store_b32 off, v215, s32 offset:508 +; DAGISEL64-NEXT: s_clause 0xf +; DAGISEL64-NEXT: scratch_store_b32 off, v224, s32 offset:512 +; DAGISEL64-NEXT: scratch_store_b32 off, v225, s32 offset:516 +; DAGISEL64-NEXT: scratch_store_b32 off, v226, s32 offset:520 +; DAGISEL64-NEXT: scratch_store_b32 off, v227, s32 offset:524 +; DAGISEL64-NEXT: scratch_store_b32 off, v228, s32 offset:528 +; DAGISEL64-NEXT: scratch_store_b32 off, v229, s32 offset:532 +; DAGISEL64-NEXT: scratch_store_b32 off, v230, s32 offset:536 +; DAGISEL64-NEXT: scratch_store_b32 off, v231, s32 offset:540 +; DAGISEL64-NEXT: scratch_store_b32 off, v240, s32 offset:544 +; DAGISEL64-NEXT: scratch_store_b32 off, v241, s32 offset:548 +; DAGISEL64-NEXT: scratch_store_b32 off, v242, s32 offset:552 +; DAGISEL64-NEXT: scratch_store_b32 off, v243, s32 offset:556 +; DAGISEL64-NEXT: scratch_store_b32 off, v244, s32 offset:560 +; DAGISEL64-NEXT: scratch_store_b32 off, v245, s32 offset:564 +; DAGISEL64-NEXT: scratch_store_b32 off, v246, s32 offset:568 +; DAGISEL64-NEXT: scratch_store_b32 off, v247, s32 offset:572 +; DAGISEL64-NEXT: s_mov_b64 exec, -1 +; DAGISEL64-NEXT: v_mov_b32_e32 v2, v0 +; DAGISEL64-NEXT: s_mov_b32 s37, gfx_callee@abs32@hi +; DAGISEL64-NEXT: s_mov_b32 s36, gfx_callee@abs32@lo +; DAGISEL64-NEXT: v_swap_b32 v0, v1 +; DAGISEL64-NEXT: s_wait_alu 0xfffe +; DAGISEL64-NEXT: s_xor_b64 exec, s[0:1], -1 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_load_b32 v0, off, s32 +; DAGISEL64-NEXT: scratch_load_b32 v1, off, s32 offset:4 +; DAGISEL64-NEXT: scratch_load_b32 v2, off, s32 offset:8 +; DAGISEL64-NEXT: scratch_load_b32 v3, off, s32 offset:12 +; DAGISEL64-NEXT: scratch_load_b32 v4, off, s32 offset:16 +; DAGISEL64-NEXT: scratch_load_b32 v5, off, s32 offset:20 +; DAGISEL64-NEXT: scratch_load_b32 v6, off, s32 offset:24 +; DAGISEL64-NEXT: scratch_load_b32 v7, off, s32 offset:28 +; DAGISEL64-NEXT: scratch_load_b32 v8, off, s32 offset:32 +; DAGISEL64-NEXT: scratch_load_b32 v9, off, s32 offset:36 +; DAGISEL64-NEXT: scratch_load_b32 v10, off, s32 offset:40 +; DAGISEL64-NEXT: scratch_load_b32 v11, off, s32 offset:44 +; DAGISEL64-NEXT: scratch_load_b32 v12, off, s32 offset:48 +; DAGISEL64-NEXT: scratch_load_b32 v13, off, s32 offset:52 +; DAGISEL64-NEXT: scratch_load_b32 v14, off, s32 offset:56 +; DAGISEL64-NEXT: scratch_load_b32 v15, off, s32 offset:60 +; DAGISEL64-NEXT: scratch_load_b32 v16, off, s32 offset:64 +; DAGISEL64-NEXT: scratch_load_b32 v17, off, s32 offset:68 +; DAGISEL64-NEXT: scratch_load_b32 v18, off, s32 offset:72 +; DAGISEL64-NEXT: scratch_load_b32 v19, off, s32 offset:76 +; DAGISEL64-NEXT: scratch_load_b32 v20, off, s32 offset:80 +; DAGISEL64-NEXT: scratch_load_b32 v21, off, s32 offset:84 +; DAGISEL64-NEXT: scratch_load_b32 v22, off, s32 offset:88 +; DAGISEL64-NEXT: scratch_load_b32 v23, off, s32 offset:92 +; DAGISEL64-NEXT: scratch_load_b32 v24, off, s32 offset:96 +; DAGISEL64-NEXT: scratch_load_b32 v25, off, s32 offset:100 +; DAGISEL64-NEXT: scratch_load_b32 v26, off, s32 offset:104 +; DAGISEL64-NEXT: scratch_load_b32 v27, off, s32 offset:108 +; DAGISEL64-NEXT: scratch_load_b32 v28, off, s32 offset:112 +; DAGISEL64-NEXT: scratch_load_b32 v29, off, s32 offset:116 +; DAGISEL64-NEXT: scratch_load_b32 v30, off, s32 offset:120 +; DAGISEL64-NEXT: scratch_load_b32 v31, off, s32 offset:124 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_load_b32 v32, off, s32 offset:128 +; DAGISEL64-NEXT: scratch_load_b32 v33, off, s32 offset:132 +; DAGISEL64-NEXT: scratch_load_b32 v34, off, s32 offset:136 +; DAGISEL64-NEXT: scratch_load_b32 v35, off, s32 offset:140 +; DAGISEL64-NEXT: scratch_load_b32 v36, off, s32 offset:144 +; DAGISEL64-NEXT: scratch_load_b32 v37, off, s32 offset:148 +; DAGISEL64-NEXT: scratch_load_b32 v38, off, s32 offset:152 +; DAGISEL64-NEXT: scratch_load_b32 v39, off, s32 offset:156 +; DAGISEL64-NEXT: scratch_load_b32 v48, off, s32 offset:160 +; DAGISEL64-NEXT: scratch_load_b32 v49, off, s32 offset:164 +; DAGISEL64-NEXT: scratch_load_b32 v50, off, s32 offset:168 +; DAGISEL64-NEXT: scratch_load_b32 v51, off, s32 offset:172 +; DAGISEL64-NEXT: scratch_load_b32 v52, off, s32 offset:176 +; DAGISEL64-NEXT: scratch_load_b32 v53, off, s32 offset:180 +; DAGISEL64-NEXT: scratch_load_b32 v54, off, s32 offset:184 +; DAGISEL64-NEXT: scratch_load_b32 v55, off, s32 offset:188 +; DAGISEL64-NEXT: scratch_load_b32 v64, off, s32 offset:192 +; DAGISEL64-NEXT: scratch_load_b32 v65, off, s32 offset:196 +; DAGISEL64-NEXT: scratch_load_b32 v66, off, s32 offset:200 +; DAGISEL64-NEXT: scratch_load_b32 v67, off, s32 offset:204 +; DAGISEL64-NEXT: scratch_load_b32 v68, off, s32 offset:208 +; DAGISEL64-NEXT: scratch_load_b32 v69, off, s32 offset:212 +; DAGISEL64-NEXT: scratch_load_b32 v70, off, s32 offset:216 +; DAGISEL64-NEXT: scratch_load_b32 v71, off, s32 offset:220 +; DAGISEL64-NEXT: scratch_load_b32 v80, off, s32 offset:224 +; DAGISEL64-NEXT: scratch_load_b32 v81, off, s32 offset:228 +; DAGISEL64-NEXT: scratch_load_b32 v82, off, s32 offset:232 +; DAGISEL64-NEXT: scratch_load_b32 v83, off, s32 offset:236 +; DAGISEL64-NEXT: scratch_load_b32 v84, off, s32 offset:240 +; DAGISEL64-NEXT: scratch_load_b32 v85, off, s32 offset:244 +; DAGISEL64-NEXT: scratch_load_b32 v86, off, s32 offset:248 +; DAGISEL64-NEXT: scratch_load_b32 v87, off, s32 offset:252 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_load_b32 v96, off, s32 offset:256 +; DAGISEL64-NEXT: scratch_load_b32 v97, off, s32 offset:260 +; DAGISEL64-NEXT: scratch_load_b32 v98, off, s32 offset:264 +; DAGISEL64-NEXT: scratch_load_b32 v99, off, s32 offset:268 +; DAGISEL64-NEXT: scratch_load_b32 v100, off, s32 offset:272 +; DAGISEL64-NEXT: scratch_load_b32 v101, off, s32 offset:276 +; DAGISEL64-NEXT: scratch_load_b32 v102, off, s32 offset:280 +; DAGISEL64-NEXT: scratch_load_b32 v103, off, s32 offset:284 +; DAGISEL64-NEXT: scratch_load_b32 v112, off, s32 offset:288 +; DAGISEL64-NEXT: scratch_load_b32 v113, off, s32 offset:292 +; DAGISEL64-NEXT: scratch_load_b32 v114, off, s32 offset:296 +; DAGISEL64-NEXT: scratch_load_b32 v115, off, s32 offset:300 +; DAGISEL64-NEXT: scratch_load_b32 v116, off, s32 offset:304 +; DAGISEL64-NEXT: scratch_load_b32 v117, off, s32 offset:308 +; DAGISEL64-NEXT: scratch_load_b32 v118, off, s32 offset:312 +; DAGISEL64-NEXT: scratch_load_b32 v119, off, s32 offset:316 +; DAGISEL64-NEXT: scratch_load_b32 v128, off, s32 offset:320 +; DAGISEL64-NEXT: scratch_load_b32 v129, off, s32 offset:324 +; DAGISEL64-NEXT: scratch_load_b32 v130, off, s32 offset:328 +; DAGISEL64-NEXT: scratch_load_b32 v131, off, s32 offset:332 +; DAGISEL64-NEXT: scratch_load_b32 v132, off, s32 offset:336 +; DAGISEL64-NEXT: scratch_load_b32 v133, off, s32 offset:340 +; DAGISEL64-NEXT: scratch_load_b32 v134, off, s32 offset:344 +; DAGISEL64-NEXT: scratch_load_b32 v135, off, s32 offset:348 +; DAGISEL64-NEXT: scratch_load_b32 v144, off, s32 offset:352 +; DAGISEL64-NEXT: scratch_load_b32 v145, off, s32 offset:356 +; DAGISEL64-NEXT: scratch_load_b32 v146, off, s32 offset:360 +; DAGISEL64-NEXT: scratch_load_b32 v147, off, s32 offset:364 +; DAGISEL64-NEXT: scratch_load_b32 v148, off, s32 offset:368 +; DAGISEL64-NEXT: scratch_load_b32 v149, off, s32 offset:372 +; DAGISEL64-NEXT: scratch_load_b32 v150, off, s32 offset:376 +; DAGISEL64-NEXT: scratch_load_b32 v151, off, s32 offset:380 +; DAGISEL64-NEXT: s_clause 0x1f +; DAGISEL64-NEXT: scratch_load_b32 v160, off, s32 offset:384 +; DAGISEL64-NEXT: scratch_load_b32 v161, off, s32 offset:388 +; DAGISEL64-NEXT: scratch_load_b32 v162, off, s32 offset:392 +; DAGISEL64-NEXT: scratch_load_b32 v163, off, s32 offset:396 +; DAGISEL64-NEXT: scratch_load_b32 v164, off, s32 offset:400 +; DAGISEL64-NEXT: scratch_load_b32 v165, off, s32 offset:404 +; DAGISEL64-NEXT: scratch_load_b32 v166, off, s32 offset:408 +; DAGISEL64-NEXT: scratch_load_b32 v167, off, s32 offset:412 +; DAGISEL64-NEXT: scratch_load_b32 v176, off, s32 offset:416 +; DAGISEL64-NEXT: scratch_load_b32 v177, off, s32 offset:420 +; DAGISEL64-NEXT: scratch_load_b32 v178, off, s32 offset:424 +; DAGISEL64-NEXT: scratch_load_b32 v179, off, s32 offset:428 +; DAGISEL64-NEXT: scratch_load_b32 v180, off, s32 offset:432 +; DAGISEL64-NEXT: scratch_load_b32 v181, off, s32 offset:436 +; DAGISEL64-NEXT: scratch_load_b32 v182, off, s32 offset:440 +; DAGISEL64-NEXT: scratch_load_b32 v183, off, s32 offset:444 +; DAGISEL64-NEXT: scratch_load_b32 v192, off, s32 offset:448 +; DAGISEL64-NEXT: scratch_load_b32 v193, off, s32 offset:452 +; DAGISEL64-NEXT: scratch_load_b32 v194, off, s32 offset:456 +; DAGISEL64-NEXT: scratch_load_b32 v195, off, s32 offset:460 +; DAGISEL64-NEXT: scratch_load_b32 v196, off, s32 offset:464 +; DAGISEL64-NEXT: scratch_load_b32 v197, off, s32 offset:468 +; DAGISEL64-NEXT: scratch_load_b32 v198, off, s32 offset:472 +; DAGISEL64-NEXT: scratch_load_b32 v199, off, s32 offset:476 +; DAGISEL64-NEXT: scratch_load_b32 v208, off, s32 offset:480 +; DAGISEL64-NEXT: scratch_load_b32 v209, off, s32 offset:484 +; DAGISEL64-NEXT: scratch_load_b32 v210, off, s32 offset:488 +; DAGISEL64-NEXT: scratch_load_b32 v211, off, s32 offset:492 +; DAGISEL64-NEXT: scratch_load_b32 v212, off, s32 offset:496 +; DAGISEL64-NEXT: scratch_load_b32 v213, off, s32 offset:500 +; DAGISEL64-NEXT: scratch_load_b32 v214, off, s32 offset:504 +; DAGISEL64-NEXT: scratch_load_b32 v215, off, s32 offset:508 +; DAGISEL64-NEXT: s_clause 0xf +; DAGISEL64-NEXT: scratch_load_b32 v224, off, s32 offset:512 +; DAGISEL64-NEXT: scratch_load_b32 v225, off, s32 offset:516 +; DAGISEL64-NEXT: scratch_load_b32 v226, off, s32 offset:520 +; DAGISEL64-NEXT: scratch_load_b32 v227, off, s32 offset:524 +; DAGISEL64-NEXT: scratch_load_b32 v228, off, s32 offset:528 +; DAGISEL64-NEXT: scratch_load_b32 v229, off, s32 offset:532 +; DAGISEL64-NEXT: scratch_load_b32 v230, off, s32 offset:536 +; DAGISEL64-NEXT: scratch_load_b32 v231, off, s32 offset:540 +; DAGISEL64-NEXT: scratch_load_b32 v240, off, s32 offset:544 +; DAGISEL64-NEXT: scratch_load_b32 v241, off, s32 offset:548 +; DAGISEL64-NEXT: scratch_load_b32 v242, off, s32 offset:552 +; DAGISEL64-NEXT: scratch_load_b32 v243, off, s32 offset:556 +; DAGISEL64-NEXT: scratch_load_b32 v244, off, s32 offset:560 +; DAGISEL64-NEXT: scratch_load_b32 v245, off, s32 offset:564 +; DAGISEL64-NEXT: scratch_load_b32 v246, off, s32 offset:568 +; DAGISEL64-NEXT: scratch_load_b32 v247, off, s32 offset:572 +; DAGISEL64-NEXT: s_mov_b64 exec, s[0:1] +; DAGISEL64-NEXT: s_setpc_b64 s[36:37] +; +; GISEL64-LABEL: tail_call_gfx_from_whole_wave: +; GISEL64: ; %bb.0: +; GISEL64-NEXT: s_wait_loadcnt_dscnt 0x0 +; GISEL64-NEXT: s_wait_expcnt 0x0 +; GISEL64-NEXT: s_wait_samplecnt 0x0 +; GISEL64-NEXT: s_wait_bvhcnt 0x0 +; GISEL64-NEXT: s_wait_kmcnt 0x0 +; GISEL64-NEXT: s_xor_saveexec_b64 s[0:1], -1 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_store_b32 off, v0, s32 +; GISEL64-NEXT: scratch_store_b32 off, v1, s32 offset:4 +; GISEL64-NEXT: scratch_store_b32 off, v2, s32 offset:8 +; GISEL64-NEXT: scratch_store_b32 off, v3, s32 offset:12 +; GISEL64-NEXT: scratch_store_b32 off, v4, s32 offset:16 +; GISEL64-NEXT: scratch_store_b32 off, v5, s32 offset:20 +; GISEL64-NEXT: scratch_store_b32 off, v6, s32 offset:24 +; GISEL64-NEXT: scratch_store_b32 off, v7, s32 offset:28 +; GISEL64-NEXT: scratch_store_b32 off, v8, s32 offset:32 +; GISEL64-NEXT: scratch_store_b32 off, v9, s32 offset:36 +; GISEL64-NEXT: scratch_store_b32 off, v10, s32 offset:40 +; GISEL64-NEXT: scratch_store_b32 off, v11, s32 offset:44 +; GISEL64-NEXT: scratch_store_b32 off, v12, s32 offset:48 +; GISEL64-NEXT: scratch_store_b32 off, v13, s32 offset:52 +; GISEL64-NEXT: scratch_store_b32 off, v14, s32 offset:56 +; GISEL64-NEXT: scratch_store_b32 off, v15, s32 offset:60 +; GISEL64-NEXT: scratch_store_b32 off, v16, s32 offset:64 +; GISEL64-NEXT: scratch_store_b32 off, v17, s32 offset:68 +; GISEL64-NEXT: scratch_store_b32 off, v18, s32 offset:72 +; GISEL64-NEXT: scratch_store_b32 off, v19, s32 offset:76 +; GISEL64-NEXT: scratch_store_b32 off, v20, s32 offset:80 +; GISEL64-NEXT: scratch_store_b32 off, v21, s32 offset:84 +; GISEL64-NEXT: scratch_store_b32 off, v22, s32 offset:88 +; GISEL64-NEXT: scratch_store_b32 off, v23, s32 offset:92 +; GISEL64-NEXT: scratch_store_b32 off, v24, s32 offset:96 +; GISEL64-NEXT: scratch_store_b32 off, v25, s32 offset:100 +; GISEL64-NEXT: scratch_store_b32 off, v26, s32 offset:104 +; GISEL64-NEXT: scratch_store_b32 off, v27, s32 offset:108 +; GISEL64-NEXT: scratch_store_b32 off, v28, s32 offset:112 +; GISEL64-NEXT: scratch_store_b32 off, v29, s32 offset:116 +; GISEL64-NEXT: scratch_store_b32 off, v30, s32 offset:120 +; GISEL64-NEXT: scratch_store_b32 off, v31, s32 offset:124 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_store_b32 off, v32, s32 offset:128 +; GISEL64-NEXT: scratch_store_b32 off, v33, s32 offset:132 +; GISEL64-NEXT: scratch_store_b32 off, v34, s32 offset:136 +; GISEL64-NEXT: scratch_store_b32 off, v35, s32 offset:140 +; GISEL64-NEXT: scratch_store_b32 off, v36, s32 offset:144 +; GISEL64-NEXT: scratch_store_b32 off, v37, s32 offset:148 +; GISEL64-NEXT: scratch_store_b32 off, v38, s32 offset:152 +; GISEL64-NEXT: scratch_store_b32 off, v39, s32 offset:156 +; GISEL64-NEXT: scratch_store_b32 off, v48, s32 offset:160 +; GISEL64-NEXT: scratch_store_b32 off, v49, s32 offset:164 +; GISEL64-NEXT: scratch_store_b32 off, v50, s32 offset:168 +; GISEL64-NEXT: scratch_store_b32 off, v51, s32 offset:172 +; GISEL64-NEXT: scratch_store_b32 off, v52, s32 offset:176 +; GISEL64-NEXT: scratch_store_b32 off, v53, s32 offset:180 +; GISEL64-NEXT: scratch_store_b32 off, v54, s32 offset:184 +; GISEL64-NEXT: scratch_store_b32 off, v55, s32 offset:188 +; GISEL64-NEXT: scratch_store_b32 off, v64, s32 offset:192 +; GISEL64-NEXT: scratch_store_b32 off, v65, s32 offset:196 +; GISEL64-NEXT: scratch_store_b32 off, v66, s32 offset:200 +; GISEL64-NEXT: scratch_store_b32 off, v67, s32 offset:204 +; GISEL64-NEXT: scratch_store_b32 off, v68, s32 offset:208 +; GISEL64-NEXT: scratch_store_b32 off, v69, s32 offset:212 +; GISEL64-NEXT: scratch_store_b32 off, v70, s32 offset:216 +; GISEL64-NEXT: scratch_store_b32 off, v71, s32 offset:220 +; GISEL64-NEXT: scratch_store_b32 off, v80, s32 offset:224 +; GISEL64-NEXT: scratch_store_b32 off, v81, s32 offset:228 +; GISEL64-NEXT: scratch_store_b32 off, v82, s32 offset:232 +; GISEL64-NEXT: scratch_store_b32 off, v83, s32 offset:236 +; GISEL64-NEXT: scratch_store_b32 off, v84, s32 offset:240 +; GISEL64-NEXT: scratch_store_b32 off, v85, s32 offset:244 +; GISEL64-NEXT: scratch_store_b32 off, v86, s32 offset:248 +; GISEL64-NEXT: scratch_store_b32 off, v87, s32 offset:252 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_store_b32 off, v96, s32 offset:256 +; GISEL64-NEXT: scratch_store_b32 off, v97, s32 offset:260 +; GISEL64-NEXT: scratch_store_b32 off, v98, s32 offset:264 +; GISEL64-NEXT: scratch_store_b32 off, v99, s32 offset:268 +; GISEL64-NEXT: scratch_store_b32 off, v100, s32 offset:272 +; GISEL64-NEXT: scratch_store_b32 off, v101, s32 offset:276 +; GISEL64-NEXT: scratch_store_b32 off, v102, s32 offset:280 +; GISEL64-NEXT: scratch_store_b32 off, v103, s32 offset:284 +; GISEL64-NEXT: scratch_store_b32 off, v112, s32 offset:288 +; GISEL64-NEXT: scratch_store_b32 off, v113, s32 offset:292 +; GISEL64-NEXT: scratch_store_b32 off, v114, s32 offset:296 +; GISEL64-NEXT: scratch_store_b32 off, v115, s32 offset:300 +; GISEL64-NEXT: scratch_store_b32 off, v116, s32 offset:304 +; GISEL64-NEXT: scratch_store_b32 off, v117, s32 offset:308 +; GISEL64-NEXT: scratch_store_b32 off, v118, s32 offset:312 +; GISEL64-NEXT: scratch_store_b32 off, v119, s32 offset:316 +; GISEL64-NEXT: scratch_store_b32 off, v128, s32 offset:320 +; GISEL64-NEXT: scratch_store_b32 off, v129, s32 offset:324 +; GISEL64-NEXT: scratch_store_b32 off, v130, s32 offset:328 +; GISEL64-NEXT: scratch_store_b32 off, v131, s32 offset:332 +; GISEL64-NEXT: scratch_store_b32 off, v132, s32 offset:336 +; GISEL64-NEXT: scratch_store_b32 off, v133, s32 offset:340 +; GISEL64-NEXT: scratch_store_b32 off, v134, s32 offset:344 +; GISEL64-NEXT: scratch_store_b32 off, v135, s32 offset:348 +; GISEL64-NEXT: scratch_store_b32 off, v144, s32 offset:352 +; GISEL64-NEXT: scratch_store_b32 off, v145, s32 offset:356 +; GISEL64-NEXT: scratch_store_b32 off, v146, s32 offset:360 +; GISEL64-NEXT: scratch_store_b32 off, v147, s32 offset:364 +; GISEL64-NEXT: scratch_store_b32 off, v148, s32 offset:368 +; GISEL64-NEXT: scratch_store_b32 off, v149, s32 offset:372 +; GISEL64-NEXT: scratch_store_b32 off, v150, s32 offset:376 +; GISEL64-NEXT: scratch_store_b32 off, v151, s32 offset:380 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_store_b32 off, v160, s32 offset:384 +; GISEL64-NEXT: scratch_store_b32 off, v161, s32 offset:388 +; GISEL64-NEXT: scratch_store_b32 off, v162, s32 offset:392 +; GISEL64-NEXT: scratch_store_b32 off, v163, s32 offset:396 +; GISEL64-NEXT: scratch_store_b32 off, v164, s32 offset:400 +; GISEL64-NEXT: scratch_store_b32 off, v165, s32 offset:404 +; GISEL64-NEXT: scratch_store_b32 off, v166, s32 offset:408 +; GISEL64-NEXT: scratch_store_b32 off, v167, s32 offset:412 +; GISEL64-NEXT: scratch_store_b32 off, v176, s32 offset:416 +; GISEL64-NEXT: scratch_store_b32 off, v177, s32 offset:420 +; GISEL64-NEXT: scratch_store_b32 off, v178, s32 offset:424 +; GISEL64-NEXT: scratch_store_b32 off, v179, s32 offset:428 +; GISEL64-NEXT: scratch_store_b32 off, v180, s32 offset:432 +; GISEL64-NEXT: scratch_store_b32 off, v181, s32 offset:436 +; GISEL64-NEXT: scratch_store_b32 off, v182, s32 offset:440 +; GISEL64-NEXT: scratch_store_b32 off, v183, s32 offset:444 +; GISEL64-NEXT: scratch_store_b32 off, v192, s32 offset:448 +; GISEL64-NEXT: scratch_store_b32 off, v193, s32 offset:452 +; GISEL64-NEXT: scratch_store_b32 off, v194, s32 offset:456 +; GISEL64-NEXT: scratch_store_b32 off, v195, s32 offset:460 +; GISEL64-NEXT: scratch_store_b32 off, v196, s32 offset:464 +; GISEL64-NEXT: scratch_store_b32 off, v197, s32 offset:468 +; GISEL64-NEXT: scratch_store_b32 off, v198, s32 offset:472 +; GISEL64-NEXT: scratch_store_b32 off, v199, s32 offset:476 +; GISEL64-NEXT: scratch_store_b32 off, v208, s32 offset:480 +; GISEL64-NEXT: scratch_store_b32 off, v209, s32 offset:484 +; GISEL64-NEXT: scratch_store_b32 off, v210, s32 offset:488 +; GISEL64-NEXT: scratch_store_b32 off, v211, s32 offset:492 +; GISEL64-NEXT: scratch_store_b32 off, v212, s32 offset:496 +; GISEL64-NEXT: scratch_store_b32 off, v213, s32 offset:500 +; GISEL64-NEXT: scratch_store_b32 off, v214, s32 offset:504 +; GISEL64-NEXT: scratch_store_b32 off, v215, s32 offset:508 +; GISEL64-NEXT: s_clause 0xf +; GISEL64-NEXT: scratch_store_b32 off, v224, s32 offset:512 +; GISEL64-NEXT: scratch_store_b32 off, v225, s32 offset:516 +; GISEL64-NEXT: scratch_store_b32 off, v226, s32 offset:520 +; GISEL64-NEXT: scratch_store_b32 off, v227, s32 offset:524 +; GISEL64-NEXT: scratch_store_b32 off, v228, s32 offset:528 +; GISEL64-NEXT: scratch_store_b32 off, v229, s32 offset:532 +; GISEL64-NEXT: scratch_store_b32 off, v230, s32 offset:536 +; GISEL64-NEXT: scratch_store_b32 off, v231, s32 offset:540 +; GISEL64-NEXT: scratch_store_b32 off, v240, s32 offset:544 +; GISEL64-NEXT: scratch_store_b32 off, v241, s32 offset:548 +; GISEL64-NEXT: scratch_store_b32 off, v242, s32 offset:552 +; GISEL64-NEXT: scratch_store_b32 off, v243, s32 offset:556 +; GISEL64-NEXT: scratch_store_b32 off, v244, s32 offset:560 +; GISEL64-NEXT: scratch_store_b32 off, v245, s32 offset:564 +; GISEL64-NEXT: scratch_store_b32 off, v246, s32 offset:568 +; GISEL64-NEXT: scratch_store_b32 off, v247, s32 offset:572 +; GISEL64-NEXT: s_mov_b64 exec, -1 +; GISEL64-NEXT: v_mov_b32_e32 v2, v0 +; GISEL64-NEXT: v_swap_b32 v0, v1 +; GISEL64-NEXT: s_mov_b32 s36, gfx_callee@abs32@lo +; GISEL64-NEXT: s_mov_b32 s37, gfx_callee@abs32@hi +; GISEL64-NEXT: s_wait_alu 0xfffe +; GISEL64-NEXT: s_xor_b64 exec, s[0:1], -1 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_load_b32 v0, off, s32 +; GISEL64-NEXT: scratch_load_b32 v1, off, s32 offset:4 +; GISEL64-NEXT: scratch_load_b32 v2, off, s32 offset:8 +; GISEL64-NEXT: scratch_load_b32 v3, off, s32 offset:12 +; GISEL64-NEXT: scratch_load_b32 v4, off, s32 offset:16 +; GISEL64-NEXT: scratch_load_b32 v5, off, s32 offset:20 +; GISEL64-NEXT: scratch_load_b32 v6, off, s32 offset:24 +; GISEL64-NEXT: scratch_load_b32 v7, off, s32 offset:28 +; GISEL64-NEXT: scratch_load_b32 v8, off, s32 offset:32 +; GISEL64-NEXT: scratch_load_b32 v9, off, s32 offset:36 +; GISEL64-NEXT: scratch_load_b32 v10, off, s32 offset:40 +; GISEL64-NEXT: scratch_load_b32 v11, off, s32 offset:44 +; GISEL64-NEXT: scratch_load_b32 v12, off, s32 offset:48 +; GISEL64-NEXT: scratch_load_b32 v13, off, s32 offset:52 +; GISEL64-NEXT: scratch_load_b32 v14, off, s32 offset:56 +; GISEL64-NEXT: scratch_load_b32 v15, off, s32 offset:60 +; GISEL64-NEXT: scratch_load_b32 v16, off, s32 offset:64 +; GISEL64-NEXT: scratch_load_b32 v17, off, s32 offset:68 +; GISEL64-NEXT: scratch_load_b32 v18, off, s32 offset:72 +; GISEL64-NEXT: scratch_load_b32 v19, off, s32 offset:76 +; GISEL64-NEXT: scratch_load_b32 v20, off, s32 offset:80 +; GISEL64-NEXT: scratch_load_b32 v21, off, s32 offset:84 +; GISEL64-NEXT: scratch_load_b32 v22, off, s32 offset:88 +; GISEL64-NEXT: scratch_load_b32 v23, off, s32 offset:92 +; GISEL64-NEXT: scratch_load_b32 v24, off, s32 offset:96 +; GISEL64-NEXT: scratch_load_b32 v25, off, s32 offset:100 +; GISEL64-NEXT: scratch_load_b32 v26, off, s32 offset:104 +; GISEL64-NEXT: scratch_load_b32 v27, off, s32 offset:108 +; GISEL64-NEXT: scratch_load_b32 v28, off, s32 offset:112 +; GISEL64-NEXT: scratch_load_b32 v29, off, s32 offset:116 +; GISEL64-NEXT: scratch_load_b32 v30, off, s32 offset:120 +; GISEL64-NEXT: scratch_load_b32 v31, off, s32 offset:124 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_load_b32 v32, off, s32 offset:128 +; GISEL64-NEXT: scratch_load_b32 v33, off, s32 offset:132 +; GISEL64-NEXT: scratch_load_b32 v34, off, s32 offset:136 +; GISEL64-NEXT: scratch_load_b32 v35, off, s32 offset:140 +; GISEL64-NEXT: scratch_load_b32 v36, off, s32 offset:144 +; GISEL64-NEXT: scratch_load_b32 v37, off, s32 offset:148 +; GISEL64-NEXT: scratch_load_b32 v38, off, s32 offset:152 +; GISEL64-NEXT: scratch_load_b32 v39, off, s32 offset:156 +; GISEL64-NEXT: scratch_load_b32 v48, off, s32 offset:160 +; GISEL64-NEXT: scratch_load_b32 v49, off, s32 offset:164 +; GISEL64-NEXT: scratch_load_b32 v50, off, s32 offset:168 +; GISEL64-NEXT: scratch_load_b32 v51, off, s32 offset:172 +; GISEL64-NEXT: scratch_load_b32 v52, off, s32 offset:176 +; GISEL64-NEXT: scratch_load_b32 v53, off, s32 offset:180 +; GISEL64-NEXT: scratch_load_b32 v54, off, s32 offset:184 +; GISEL64-NEXT: scratch_load_b32 v55, off, s32 offset:188 +; GISEL64-NEXT: scratch_load_b32 v64, off, s32 offset:192 +; GISEL64-NEXT: scratch_load_b32 v65, off, s32 offset:196 +; GISEL64-NEXT: scratch_load_b32 v66, off, s32 offset:200 +; GISEL64-NEXT: scratch_load_b32 v67, off, s32 offset:204 +; GISEL64-NEXT: scratch_load_b32 v68, off, s32 offset:208 +; GISEL64-NEXT: scratch_load_b32 v69, off, s32 offset:212 +; GISEL64-NEXT: scratch_load_b32 v70, off, s32 offset:216 +; GISEL64-NEXT: scratch_load_b32 v71, off, s32 offset:220 +; GISEL64-NEXT: scratch_load_b32 v80, off, s32 offset:224 +; GISEL64-NEXT: scratch_load_b32 v81, off, s32 offset:228 +; GISEL64-NEXT: scratch_load_b32 v82, off, s32 offset:232 +; GISEL64-NEXT: scratch_load_b32 v83, off, s32 offset:236 +; GISEL64-NEXT: scratch_load_b32 v84, off, s32 offset:240 +; GISEL64-NEXT: scratch_load_b32 v85, off, s32 offset:244 +; GISEL64-NEXT: scratch_load_b32 v86, off, s32 offset:248 +; GISEL64-NEXT: scratch_load_b32 v87, off, s32 offset:252 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_load_b32 v96, off, s32 offset:256 +; GISEL64-NEXT: scratch_load_b32 v97, off, s32 offset:260 +; GISEL64-NEXT: scratch_load_b32 v98, off, s32 offset:264 +; GISEL64-NEXT: scratch_load_b32 v99, off, s32 offset:268 +; GISEL64-NEXT: scratch_load_b32 v100, off, s32 offset:272 +; GISEL64-NEXT: scratch_load_b32 v101, off, s32 offset:276 +; GISEL64-NEXT: scratch_load_b32 v102, off, s32 offset:280 +; GISEL64-NEXT: scratch_load_b32 v103, off, s32 offset:284 +; GISEL64-NEXT: scratch_load_b32 v112, off, s32 offset:288 +; GISEL64-NEXT: scratch_load_b32 v113, off, s32 offset:292 +; GISEL64-NEXT: scratch_load_b32 v114, off, s32 offset:296 +; GISEL64-NEXT: scratch_load_b32 v115, off, s32 offset:300 +; GISEL64-NEXT: scratch_load_b32 v116, off, s32 offset:304 +; GISEL64-NEXT: scratch_load_b32 v117, off, s32 offset:308 +; GISEL64-NEXT: scratch_load_b32 v118, off, s32 offset:312 +; GISEL64-NEXT: scratch_load_b32 v119, off, s32 offset:316 +; GISEL64-NEXT: scratch_load_b32 v128, off, s32 offset:320 +; GISEL64-NEXT: scratch_load_b32 v129, off, s32 offset:324 +; GISEL64-NEXT: scratch_load_b32 v130, off, s32 offset:328 +; GISEL64-NEXT: scratch_load_b32 v131, off, s32 offset:332 +; GISEL64-NEXT: scratch_load_b32 v132, off, s32 offset:336 +; GISEL64-NEXT: scratch_load_b32 v133, off, s32 offset:340 +; GISEL64-NEXT: scratch_load_b32 v134, off, s32 offset:344 +; GISEL64-NEXT: scratch_load_b32 v135, off, s32 offset:348 +; GISEL64-NEXT: scratch_load_b32 v144, off, s32 offset:352 +; GISEL64-NEXT: scratch_load_b32 v145, off, s32 offset:356 +; GISEL64-NEXT: scratch_load_b32 v146, off, s32 offset:360 +; GISEL64-NEXT: scratch_load_b32 v147, off, s32 offset:364 +; GISEL64-NEXT: scratch_load_b32 v148, off, s32 offset:368 +; GISEL64-NEXT: scratch_load_b32 v149, off, s32 offset:372 +; GISEL64-NEXT: scratch_load_b32 v150, off, s32 offset:376 +; GISEL64-NEXT: scratch_load_b32 v151, off, s32 offset:380 +; GISEL64-NEXT: s_clause 0x1f +; GISEL64-NEXT: scratch_load_b32 v160, off, s32 offset:384 +; GISEL64-NEXT: scratch_load_b32 v161, off, s32 offset:388 +; GISEL64-NEXT: scratch_load_b32 v162, off, s32 offset:392 +; GISEL64-NEXT: scratch_load_b32 v163, off, s32 offset:396 +; GISEL64-NEXT: scratch_load_b32 v164, off, s32 offset:400 +; GISEL64-NEXT: scratch_load_b32 v165, off, s32 offset:404 +; GISEL64-NEXT: scratch_load_b32 v166, off, s32 offset:408 +; GISEL64-NEXT: scratch_load_b32 v167, off, s32 offset:412 +; GISEL64-NEXT: scratch_load_b32 v176, off, s32 offset:416 +; GISEL64-NEXT: scratch_load_b32 v177, off, s32 offset:420 +; GISEL64-NEXT: scratch_load_b32 v178, off, s32 offset:424 +; GISEL64-NEXT: scratch_load_b32 v179, off, s32 offset:428 +; GISEL64-NEXT: scratch_load_b32 v180, off, s32 offset:432 +; GISEL64-NEXT: scratch_load_b32 v181, off, s32 offset:436 +; GISEL64-NEXT: scratch_load_b32 v182, off, s32 offset:440 +; GISEL64-NEXT: scratch_load_b32 v183, off, s32 offset:444 +; GISEL64-NEXT: scratch_load_b32 v192, off, s32 offset:448 +; GISEL64-NEXT: scratch_load_b32 v193, off, s32 offset:452 +; GISEL64-NEXT: scratch_load_b32 v194, off, s32 offset:456 +; GISEL64-NEXT: scratch_load_b32 v195, off, s32 offset:460 +; GISEL64-NEXT: scratch_load_b32 v196, off, s32 offset:464 +; GISEL64-NEXT: scratch_load_b32 v197, off, s32 offset:468 +; GISEL64-NEXT: scratch_load_b32 v198, off, s32 offset:472 +; GISEL64-NEXT: scratch_load_b32 v199, off, s32 offset:476 +; GISEL64-NEXT: scratch_load_b32 v208, off, s32 offset:480 +; GISEL64-NEXT: scratch_load_b32 v209, off, s32 offset:484 +; GISEL64-NEXT: scratch_load_b32 v210, off, s32 offset:488 +; GISEL64-NEXT: scratch_load_b32 v211, off, s32 offset:492 +; GISEL64-NEXT: scratch_load_b32 v212, off, s32 offset:496 +; GISEL64-NEXT: scratch_load_b32 v213, off, s32 offset:500 +; GISEL64-NEXT: scratch_load_b32 v214, off, s32 offset:504 +; GISEL64-NEXT: scratch_load_b32 v215, off, s32 offset:508 +; GISEL64-NEXT: s_clause 0xf +; GISEL64-NEXT: scratch_load_b32 v224, off, s32 offset:512 +; GISEL64-NEXT: scratch_load_b32 v225, off, s32 offset:516 +; GISEL64-NEXT: scratch_load_b32 v226, off, s32 offset:520 +; GISEL64-NEXT: scratch_load_b32 v227, off, s32 offset:524 +; GISEL64-NEXT: scratch_load_b32 v228, off, s32 offset:528 +; GISEL64-NEXT: scratch_load_b32 v229, off, s32 offset:532 +; GISEL64-NEXT: scratch_load_b32 v230, off, s32 offset:536 +; GISEL64-NEXT: scratch_load_b32 v231, off, s32 offset:540 +; GISEL64-NEXT: scratch_load_b32 v240, off, s32 offset:544 +; GISEL64-NEXT: scratch_load_b32 v241, off, s32 offset:548 +; GISEL64-NEXT: scratch_load_b32 v242, off, s32 offset:552 +; GISEL64-NEXT: scratch_load_b32 v243, off, s32 offset:556 +; GISEL64-NEXT: scratch_load_b32 v244, off, s32 offset:560 +; GISEL64-NEXT: scratch_load_b32 v245, off, s32 offset:564 +; GISEL64-NEXT: scratch_load_b32 v246, off, s32 offset:568 +; GISEL64-NEXT: scratch_load_b32 v247, off, s32 offset:572 +; GISEL64-NEXT: s_mov_b64 exec, s[0:1] +; GISEL64-NEXT: s_setpc_b64 s[36:37] +; +; GFX1250-DAGISEL-LABEL: tail_call_gfx_from_whole_wave: +; GFX1250-DAGISEL: ; %bb.0: +; GFX1250-DAGISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-DAGISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-DAGISEL-NEXT: s_xor_saveexec_b32 s0, -1 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v0, s32 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v1, s32 offset:4 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v2, s32 offset:8 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v3, s32 offset:12 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v4, s32 offset:16 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v5, s32 offset:20 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v6, s32 offset:24 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v7, s32 offset:28 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v8, s32 offset:32 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v9, s32 offset:36 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v10, s32 offset:40 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v11, s32 offset:44 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v12, s32 offset:48 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v13, s32 offset:52 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v14, s32 offset:56 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v15, s32 offset:60 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v16, s32 offset:64 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v17, s32 offset:68 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v18, s32 offset:72 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v19, s32 offset:76 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v20, s32 offset:80 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v21, s32 offset:84 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v22, s32 offset:88 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v23, s32 offset:92 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v24, s32 offset:96 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v25, s32 offset:100 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v26, s32 offset:104 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v27, s32 offset:108 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v28, s32 offset:112 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v29, s32 offset:116 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v30, s32 offset:120 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v31, s32 offset:124 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v32, s32 offset:128 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v33, s32 offset:132 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v34, s32 offset:136 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v35, s32 offset:140 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v36, s32 offset:144 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v37, s32 offset:148 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v38, s32 offset:152 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v39, s32 offset:156 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v48, s32 offset:160 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v49, s32 offset:164 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v50, s32 offset:168 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v51, s32 offset:172 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v52, s32 offset:176 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v53, s32 offset:180 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v54, s32 offset:184 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v55, s32 offset:188 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v64, s32 offset:192 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v65, s32 offset:196 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v66, s32 offset:200 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v67, s32 offset:204 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v68, s32 offset:208 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v69, s32 offset:212 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v70, s32 offset:216 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v71, s32 offset:220 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v80, s32 offset:224 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v81, s32 offset:228 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v82, s32 offset:232 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v83, s32 offset:236 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v84, s32 offset:240 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v85, s32 offset:244 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v86, s32 offset:248 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v87, s32 offset:252 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v96, s32 offset:256 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v97, s32 offset:260 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v98, s32 offset:264 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v99, s32 offset:268 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v100, s32 offset:272 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v101, s32 offset:276 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v102, s32 offset:280 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v103, s32 offset:284 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v112, s32 offset:288 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v113, s32 offset:292 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v114, s32 offset:296 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v115, s32 offset:300 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v116, s32 offset:304 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v117, s32 offset:308 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v118, s32 offset:312 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v119, s32 offset:316 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v128, s32 offset:320 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v129, s32 offset:324 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v130, s32 offset:328 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v131, s32 offset:332 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v132, s32 offset:336 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v133, s32 offset:340 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v134, s32 offset:344 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v135, s32 offset:348 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v144, s32 offset:352 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v145, s32 offset:356 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v146, s32 offset:360 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v147, s32 offset:364 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v148, s32 offset:368 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v149, s32 offset:372 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v150, s32 offset:376 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v151, s32 offset:380 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v160, s32 offset:384 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v161, s32 offset:388 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v162, s32 offset:392 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v163, s32 offset:396 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v164, s32 offset:400 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v165, s32 offset:404 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v166, s32 offset:408 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v167, s32 offset:412 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v176, s32 offset:416 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v177, s32 offset:420 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v178, s32 offset:424 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v179, s32 offset:428 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v180, s32 offset:432 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v181, s32 offset:436 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v182, s32 offset:440 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v183, s32 offset:444 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v192, s32 offset:448 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v193, s32 offset:452 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v194, s32 offset:456 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v195, s32 offset:460 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v196, s32 offset:464 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v197, s32 offset:468 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v198, s32 offset:472 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v199, s32 offset:476 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v208, s32 offset:480 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v209, s32 offset:484 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v210, s32 offset:488 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v211, s32 offset:492 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v212, s32 offset:496 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v213, s32 offset:500 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v214, s32 offset:504 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v215, s32 offset:508 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v224, s32 offset:512 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v225, s32 offset:516 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v226, s32 offset:520 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v227, s32 offset:524 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v228, s32 offset:528 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v229, s32 offset:532 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v230, s32 offset:536 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v231, s32 offset:540 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v240, s32 offset:544 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v241, s32 offset:548 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v242, s32 offset:552 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v243, s32 offset:556 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v244, s32 offset:560 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v245, s32 offset:564 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v246, s32 offset:568 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v247, s32 offset:572 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 4 ; msbs: dst=0 src0=0 src1=1 src2=0 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v0 /*v256*/, s32 offset:576 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v1 /*v257*/, s32 offset:580 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v2 /*v258*/, s32 offset:584 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v3 /*v259*/, s32 offset:588 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v4 /*v260*/, s32 offset:592 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v5 /*v261*/, s32 offset:596 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v6 /*v262*/, s32 offset:600 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v7 /*v263*/, s32 offset:604 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v8 /*v264*/, s32 offset:608 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v9 /*v265*/, s32 offset:612 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v10 /*v266*/, s32 offset:616 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v11 /*v267*/, s32 offset:620 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v12 /*v268*/, s32 offset:624 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v13 /*v269*/, s32 offset:628 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v14 /*v270*/, s32 offset:632 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v15 /*v271*/, s32 offset:636 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v16 /*v272*/, s32 offset:640 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v17 /*v273*/, s32 offset:644 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v18 /*v274*/, s32 offset:648 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v19 /*v275*/, s32 offset:652 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v20 /*v276*/, s32 offset:656 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v21 /*v277*/, s32 offset:660 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v22 /*v278*/, s32 offset:664 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v23 /*v279*/, s32 offset:668 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v24 /*v280*/, s32 offset:672 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v25 /*v281*/, s32 offset:676 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v26 /*v282*/, s32 offset:680 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v27 /*v283*/, s32 offset:684 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v28 /*v284*/, s32 offset:688 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v29 /*v285*/, s32 offset:692 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v30 /*v286*/, s32 offset:696 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v31 /*v287*/, s32 offset:700 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v32 /*v288*/, s32 offset:704 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v33 /*v289*/, s32 offset:708 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v34 /*v290*/, s32 offset:712 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v35 /*v291*/, s32 offset:716 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v36 /*v292*/, s32 offset:720 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v37 /*v293*/, s32 offset:724 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v38 /*v294*/, s32 offset:728 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v39 /*v295*/, s32 offset:732 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v40 /*v296*/, s32 offset:736 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v41 /*v297*/, s32 offset:740 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v42 /*v298*/, s32 offset:744 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v43 /*v299*/, s32 offset:748 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v44 /*v300*/, s32 offset:752 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v45 /*v301*/, s32 offset:756 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v46 /*v302*/, s32 offset:760 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v47 /*v303*/, s32 offset:764 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v48 /*v304*/, s32 offset:768 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v49 /*v305*/, s32 offset:772 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v50 /*v306*/, s32 offset:776 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v51 /*v307*/, s32 offset:780 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v52 /*v308*/, s32 offset:784 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v53 /*v309*/, s32 offset:788 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v54 /*v310*/, s32 offset:792 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v55 /*v311*/, s32 offset:796 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v56 /*v312*/, s32 offset:800 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v57 /*v313*/, s32 offset:804 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v58 /*v314*/, s32 offset:808 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v59 /*v315*/, s32 offset:812 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v60 /*v316*/, s32 offset:816 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v61 /*v317*/, s32 offset:820 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v62 /*v318*/, s32 offset:824 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v63 /*v319*/, s32 offset:828 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v64 /*v320*/, s32 offset:832 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v65 /*v321*/, s32 offset:836 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v66 /*v322*/, s32 offset:840 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v67 /*v323*/, s32 offset:844 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v68 /*v324*/, s32 offset:848 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v69 /*v325*/, s32 offset:852 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v70 /*v326*/, s32 offset:856 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v71 /*v327*/, s32 offset:860 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v72 /*v328*/, s32 offset:864 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v73 /*v329*/, s32 offset:868 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v74 /*v330*/, s32 offset:872 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v75 /*v331*/, s32 offset:876 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v76 /*v332*/, s32 offset:880 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v77 /*v333*/, s32 offset:884 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v78 /*v334*/, s32 offset:888 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v79 /*v335*/, s32 offset:892 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v80 /*v336*/, s32 offset:896 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v81 /*v337*/, s32 offset:900 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v82 /*v338*/, s32 offset:904 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v83 /*v339*/, s32 offset:908 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v84 /*v340*/, s32 offset:912 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v85 /*v341*/, s32 offset:916 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v86 /*v342*/, s32 offset:920 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v87 /*v343*/, s32 offset:924 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v88 /*v344*/, s32 offset:928 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v89 /*v345*/, s32 offset:932 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v90 /*v346*/, s32 offset:936 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v91 /*v347*/, s32 offset:940 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v92 /*v348*/, s32 offset:944 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v93 /*v349*/, s32 offset:948 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v94 /*v350*/, s32 offset:952 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v95 /*v351*/, s32 offset:956 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v96 /*v352*/, s32 offset:960 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v97 /*v353*/, s32 offset:964 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v98 /*v354*/, s32 offset:968 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v99 /*v355*/, s32 offset:972 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v100 /*v356*/, s32 offset:976 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v101 /*v357*/, s32 offset:980 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v102 /*v358*/, s32 offset:984 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v103 /*v359*/, s32 offset:988 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v104 /*v360*/, s32 offset:992 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v105 /*v361*/, s32 offset:996 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v106 /*v362*/, s32 offset:1000 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v107 /*v363*/, s32 offset:1004 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v108 /*v364*/, s32 offset:1008 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v109 /*v365*/, s32 offset:1012 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v110 /*v366*/, s32 offset:1016 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v111 /*v367*/, s32 offset:1020 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v112 /*v368*/, s32 offset:1024 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v113 /*v369*/, s32 offset:1028 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v114 /*v370*/, s32 offset:1032 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v115 /*v371*/, s32 offset:1036 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v116 /*v372*/, s32 offset:1040 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v117 /*v373*/, s32 offset:1044 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v118 /*v374*/, s32 offset:1048 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v119 /*v375*/, s32 offset:1052 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v120 /*v376*/, s32 offset:1056 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v121 /*v377*/, s32 offset:1060 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v122 /*v378*/, s32 offset:1064 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v123 /*v379*/, s32 offset:1068 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v124 /*v380*/, s32 offset:1072 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v125 /*v381*/, s32 offset:1076 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v126 /*v382*/, s32 offset:1080 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v127 /*v383*/, s32 offset:1084 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v128 /*v384*/, s32 offset:1088 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v129 /*v385*/, s32 offset:1092 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v130 /*v386*/, s32 offset:1096 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v131 /*v387*/, s32 offset:1100 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v132 /*v388*/, s32 offset:1104 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v133 /*v389*/, s32 offset:1108 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v134 /*v390*/, s32 offset:1112 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v135 /*v391*/, s32 offset:1116 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v136 /*v392*/, s32 offset:1120 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v137 /*v393*/, s32 offset:1124 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v138 /*v394*/, s32 offset:1128 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v139 /*v395*/, s32 offset:1132 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v140 /*v396*/, s32 offset:1136 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v141 /*v397*/, s32 offset:1140 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v142 /*v398*/, s32 offset:1144 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v143 /*v399*/, s32 offset:1148 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v144 /*v400*/, s32 offset:1152 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v145 /*v401*/, s32 offset:1156 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v146 /*v402*/, s32 offset:1160 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v147 /*v403*/, s32 offset:1164 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v148 /*v404*/, s32 offset:1168 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v149 /*v405*/, s32 offset:1172 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v150 /*v406*/, s32 offset:1176 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v151 /*v407*/, s32 offset:1180 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v152 /*v408*/, s32 offset:1184 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v153 /*v409*/, s32 offset:1188 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v154 /*v410*/, s32 offset:1192 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v155 /*v411*/, s32 offset:1196 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v156 /*v412*/, s32 offset:1200 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v157 /*v413*/, s32 offset:1204 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v158 /*v414*/, s32 offset:1208 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v159 /*v415*/, s32 offset:1212 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v160 /*v416*/, s32 offset:1216 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v161 /*v417*/, s32 offset:1220 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v162 /*v418*/, s32 offset:1224 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v163 /*v419*/, s32 offset:1228 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v164 /*v420*/, s32 offset:1232 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v165 /*v421*/, s32 offset:1236 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v166 /*v422*/, s32 offset:1240 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v167 /*v423*/, s32 offset:1244 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v168 /*v424*/, s32 offset:1248 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v169 /*v425*/, s32 offset:1252 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v170 /*v426*/, s32 offset:1256 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v171 /*v427*/, s32 offset:1260 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v172 /*v428*/, s32 offset:1264 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v173 /*v429*/, s32 offset:1268 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v174 /*v430*/, s32 offset:1272 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v175 /*v431*/, s32 offset:1276 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v176 /*v432*/, s32 offset:1280 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v177 /*v433*/, s32 offset:1284 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v178 /*v434*/, s32 offset:1288 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v179 /*v435*/, s32 offset:1292 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v180 /*v436*/, s32 offset:1296 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v181 /*v437*/, s32 offset:1300 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v182 /*v438*/, s32 offset:1304 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v183 /*v439*/, s32 offset:1308 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v184 /*v440*/, s32 offset:1312 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v185 /*v441*/, s32 offset:1316 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v186 /*v442*/, s32 offset:1320 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v187 /*v443*/, s32 offset:1324 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v188 /*v444*/, s32 offset:1328 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v189 /*v445*/, s32 offset:1332 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v190 /*v446*/, s32 offset:1336 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v191 /*v447*/, s32 offset:1340 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v192 /*v448*/, s32 offset:1344 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v193 /*v449*/, s32 offset:1348 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v194 /*v450*/, s32 offset:1352 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v195 /*v451*/, s32 offset:1356 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v196 /*v452*/, s32 offset:1360 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v197 /*v453*/, s32 offset:1364 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v198 /*v454*/, s32 offset:1368 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v199 /*v455*/, s32 offset:1372 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v200 /*v456*/, s32 offset:1376 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v201 /*v457*/, s32 offset:1380 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v202 /*v458*/, s32 offset:1384 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v203 /*v459*/, s32 offset:1388 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v204 /*v460*/, s32 offset:1392 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v205 /*v461*/, s32 offset:1396 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v206 /*v462*/, s32 offset:1400 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v207 /*v463*/, s32 offset:1404 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v208 /*v464*/, s32 offset:1408 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v209 /*v465*/, s32 offset:1412 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v210 /*v466*/, s32 offset:1416 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v211 /*v467*/, s32 offset:1420 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v212 /*v468*/, s32 offset:1424 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v213 /*v469*/, s32 offset:1428 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v214 /*v470*/, s32 offset:1432 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v215 /*v471*/, s32 offset:1436 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v216 /*v472*/, s32 offset:1440 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v217 /*v473*/, s32 offset:1444 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v218 /*v474*/, s32 offset:1448 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v219 /*v475*/, s32 offset:1452 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v220 /*v476*/, s32 offset:1456 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v221 /*v477*/, s32 offset:1460 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v222 /*v478*/, s32 offset:1464 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v223 /*v479*/, s32 offset:1468 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v224 /*v480*/, s32 offset:1472 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v225 /*v481*/, s32 offset:1476 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v226 /*v482*/, s32 offset:1480 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v227 /*v483*/, s32 offset:1484 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v228 /*v484*/, s32 offset:1488 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v229 /*v485*/, s32 offset:1492 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v230 /*v486*/, s32 offset:1496 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v231 /*v487*/, s32 offset:1500 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v232 /*v488*/, s32 offset:1504 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v233 /*v489*/, s32 offset:1508 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v234 /*v490*/, s32 offset:1512 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v235 /*v491*/, s32 offset:1516 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v236 /*v492*/, s32 offset:1520 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v237 /*v493*/, s32 offset:1524 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v238 /*v494*/, s32 offset:1528 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v239 /*v495*/, s32 offset:1532 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v240 /*v496*/, s32 offset:1536 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v241 /*v497*/, s32 offset:1540 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v242 /*v498*/, s32 offset:1544 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v243 /*v499*/, s32 offset:1548 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v244 /*v500*/, s32 offset:1552 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v245 /*v501*/, s32 offset:1556 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v246 /*v502*/, s32 offset:1560 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v247 /*v503*/, s32 offset:1564 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v248 /*v504*/, s32 offset:1568 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v249 /*v505*/, s32 offset:1572 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v250 /*v506*/, s32 offset:1576 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v251 /*v507*/, s32 offset:1580 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v252 /*v508*/, s32 offset:1584 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v253 /*v509*/, s32 offset:1588 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v254 /*v510*/, s32 offset:1592 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v255 /*v511*/, s32 offset:1596 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 8 ; msbs: dst=0 src0=0 src1=2 src2=0 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v0 /*v512*/, s32 offset:1600 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v1 /*v513*/, s32 offset:1604 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v2 /*v514*/, s32 offset:1608 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v3 /*v515*/, s32 offset:1612 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v4 /*v516*/, s32 offset:1616 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v5 /*v517*/, s32 offset:1620 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v6 /*v518*/, s32 offset:1624 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v7 /*v519*/, s32 offset:1628 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v8 /*v520*/, s32 offset:1632 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v9 /*v521*/, s32 offset:1636 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v10 /*v522*/, s32 offset:1640 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v11 /*v523*/, s32 offset:1644 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v12 /*v524*/, s32 offset:1648 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v13 /*v525*/, s32 offset:1652 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v14 /*v526*/, s32 offset:1656 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v15 /*v527*/, s32 offset:1660 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v16 /*v528*/, s32 offset:1664 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v17 /*v529*/, s32 offset:1668 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v18 /*v530*/, s32 offset:1672 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v19 /*v531*/, s32 offset:1676 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v20 /*v532*/, s32 offset:1680 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v21 /*v533*/, s32 offset:1684 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v22 /*v534*/, s32 offset:1688 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v23 /*v535*/, s32 offset:1692 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v24 /*v536*/, s32 offset:1696 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v25 /*v537*/, s32 offset:1700 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v26 /*v538*/, s32 offset:1704 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v27 /*v539*/, s32 offset:1708 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v28 /*v540*/, s32 offset:1712 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v29 /*v541*/, s32 offset:1716 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v30 /*v542*/, s32 offset:1720 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v31 /*v543*/, s32 offset:1724 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v32 /*v544*/, s32 offset:1728 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v33 /*v545*/, s32 offset:1732 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v34 /*v546*/, s32 offset:1736 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v35 /*v547*/, s32 offset:1740 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v36 /*v548*/, s32 offset:1744 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v37 /*v549*/, s32 offset:1748 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v38 /*v550*/, s32 offset:1752 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v39 /*v551*/, s32 offset:1756 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v40 /*v552*/, s32 offset:1760 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v41 /*v553*/, s32 offset:1764 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v42 /*v554*/, s32 offset:1768 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v43 /*v555*/, s32 offset:1772 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v44 /*v556*/, s32 offset:1776 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v45 /*v557*/, s32 offset:1780 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v46 /*v558*/, s32 offset:1784 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v47 /*v559*/, s32 offset:1788 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v48 /*v560*/, s32 offset:1792 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v49 /*v561*/, s32 offset:1796 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v50 /*v562*/, s32 offset:1800 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v51 /*v563*/, s32 offset:1804 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v52 /*v564*/, s32 offset:1808 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v53 /*v565*/, s32 offset:1812 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v54 /*v566*/, s32 offset:1816 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v55 /*v567*/, s32 offset:1820 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v56 /*v568*/, s32 offset:1824 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v57 /*v569*/, s32 offset:1828 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v58 /*v570*/, s32 offset:1832 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v59 /*v571*/, s32 offset:1836 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v60 /*v572*/, s32 offset:1840 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v61 /*v573*/, s32 offset:1844 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v62 /*v574*/, s32 offset:1848 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v63 /*v575*/, s32 offset:1852 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v64 /*v576*/, s32 offset:1856 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v65 /*v577*/, s32 offset:1860 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v66 /*v578*/, s32 offset:1864 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v67 /*v579*/, s32 offset:1868 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v68 /*v580*/, s32 offset:1872 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v69 /*v581*/, s32 offset:1876 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v70 /*v582*/, s32 offset:1880 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v71 /*v583*/, s32 offset:1884 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v72 /*v584*/, s32 offset:1888 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v73 /*v585*/, s32 offset:1892 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v74 /*v586*/, s32 offset:1896 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v75 /*v587*/, s32 offset:1900 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v76 /*v588*/, s32 offset:1904 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v77 /*v589*/, s32 offset:1908 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v78 /*v590*/, s32 offset:1912 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v79 /*v591*/, s32 offset:1916 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v80 /*v592*/, s32 offset:1920 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v81 /*v593*/, s32 offset:1924 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v82 /*v594*/, s32 offset:1928 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v83 /*v595*/, s32 offset:1932 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v84 /*v596*/, s32 offset:1936 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v85 /*v597*/, s32 offset:1940 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v86 /*v598*/, s32 offset:1944 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v87 /*v599*/, s32 offset:1948 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v88 /*v600*/, s32 offset:1952 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v89 /*v601*/, s32 offset:1956 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v90 /*v602*/, s32 offset:1960 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v91 /*v603*/, s32 offset:1964 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v92 /*v604*/, s32 offset:1968 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v93 /*v605*/, s32 offset:1972 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v94 /*v606*/, s32 offset:1976 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v95 /*v607*/, s32 offset:1980 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v96 /*v608*/, s32 offset:1984 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v97 /*v609*/, s32 offset:1988 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v98 /*v610*/, s32 offset:1992 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v99 /*v611*/, s32 offset:1996 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v100 /*v612*/, s32 offset:2000 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v101 /*v613*/, s32 offset:2004 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v102 /*v614*/, s32 offset:2008 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v103 /*v615*/, s32 offset:2012 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v104 /*v616*/, s32 offset:2016 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v105 /*v617*/, s32 offset:2020 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v106 /*v618*/, s32 offset:2024 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v107 /*v619*/, s32 offset:2028 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v108 /*v620*/, s32 offset:2032 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v109 /*v621*/, s32 offset:2036 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v110 /*v622*/, s32 offset:2040 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v111 /*v623*/, s32 offset:2044 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v112 /*v624*/, s32 offset:2048 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v113 /*v625*/, s32 offset:2052 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v114 /*v626*/, s32 offset:2056 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v115 /*v627*/, s32 offset:2060 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v116 /*v628*/, s32 offset:2064 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v117 /*v629*/, s32 offset:2068 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v118 /*v630*/, s32 offset:2072 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v119 /*v631*/, s32 offset:2076 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v120 /*v632*/, s32 offset:2080 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v121 /*v633*/, s32 offset:2084 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v122 /*v634*/, s32 offset:2088 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v123 /*v635*/, s32 offset:2092 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v124 /*v636*/, s32 offset:2096 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v125 /*v637*/, s32 offset:2100 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v126 /*v638*/, s32 offset:2104 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v127 /*v639*/, s32 offset:2108 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v128 /*v640*/, s32 offset:2112 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v129 /*v641*/, s32 offset:2116 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v130 /*v642*/, s32 offset:2120 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v131 /*v643*/, s32 offset:2124 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v132 /*v644*/, s32 offset:2128 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v133 /*v645*/, s32 offset:2132 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v134 /*v646*/, s32 offset:2136 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v135 /*v647*/, s32 offset:2140 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v136 /*v648*/, s32 offset:2144 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v137 /*v649*/, s32 offset:2148 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v138 /*v650*/, s32 offset:2152 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v139 /*v651*/, s32 offset:2156 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v140 /*v652*/, s32 offset:2160 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v141 /*v653*/, s32 offset:2164 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v142 /*v654*/, s32 offset:2168 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v143 /*v655*/, s32 offset:2172 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v144 /*v656*/, s32 offset:2176 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v145 /*v657*/, s32 offset:2180 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v146 /*v658*/, s32 offset:2184 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v147 /*v659*/, s32 offset:2188 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v148 /*v660*/, s32 offset:2192 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v149 /*v661*/, s32 offset:2196 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v150 /*v662*/, s32 offset:2200 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v151 /*v663*/, s32 offset:2204 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v152 /*v664*/, s32 offset:2208 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v153 /*v665*/, s32 offset:2212 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v154 /*v666*/, s32 offset:2216 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v155 /*v667*/, s32 offset:2220 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v156 /*v668*/, s32 offset:2224 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v157 /*v669*/, s32 offset:2228 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v158 /*v670*/, s32 offset:2232 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v159 /*v671*/, s32 offset:2236 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v160 /*v672*/, s32 offset:2240 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v161 /*v673*/, s32 offset:2244 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v162 /*v674*/, s32 offset:2248 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v163 /*v675*/, s32 offset:2252 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v164 /*v676*/, s32 offset:2256 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v165 /*v677*/, s32 offset:2260 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v166 /*v678*/, s32 offset:2264 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v167 /*v679*/, s32 offset:2268 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v168 /*v680*/, s32 offset:2272 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v169 /*v681*/, s32 offset:2276 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v170 /*v682*/, s32 offset:2280 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v171 /*v683*/, s32 offset:2284 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v172 /*v684*/, s32 offset:2288 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v173 /*v685*/, s32 offset:2292 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v174 /*v686*/, s32 offset:2296 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v175 /*v687*/, s32 offset:2300 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v176 /*v688*/, s32 offset:2304 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v177 /*v689*/, s32 offset:2308 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v178 /*v690*/, s32 offset:2312 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v179 /*v691*/, s32 offset:2316 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v180 /*v692*/, s32 offset:2320 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v181 /*v693*/, s32 offset:2324 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v182 /*v694*/, s32 offset:2328 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v183 /*v695*/, s32 offset:2332 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v184 /*v696*/, s32 offset:2336 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v185 /*v697*/, s32 offset:2340 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v186 /*v698*/, s32 offset:2344 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v187 /*v699*/, s32 offset:2348 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v188 /*v700*/, s32 offset:2352 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v189 /*v701*/, s32 offset:2356 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v190 /*v702*/, s32 offset:2360 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v191 /*v703*/, s32 offset:2364 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v192 /*v704*/, s32 offset:2368 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v193 /*v705*/, s32 offset:2372 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v194 /*v706*/, s32 offset:2376 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v195 /*v707*/, s32 offset:2380 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v196 /*v708*/, s32 offset:2384 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v197 /*v709*/, s32 offset:2388 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v198 /*v710*/, s32 offset:2392 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v199 /*v711*/, s32 offset:2396 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v200 /*v712*/, s32 offset:2400 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v201 /*v713*/, s32 offset:2404 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v202 /*v714*/, s32 offset:2408 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v203 /*v715*/, s32 offset:2412 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v204 /*v716*/, s32 offset:2416 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v205 /*v717*/, s32 offset:2420 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v206 /*v718*/, s32 offset:2424 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v207 /*v719*/, s32 offset:2428 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v208 /*v720*/, s32 offset:2432 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v209 /*v721*/, s32 offset:2436 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v210 /*v722*/, s32 offset:2440 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v211 /*v723*/, s32 offset:2444 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v212 /*v724*/, s32 offset:2448 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v213 /*v725*/, s32 offset:2452 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v214 /*v726*/, s32 offset:2456 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v215 /*v727*/, s32 offset:2460 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v216 /*v728*/, s32 offset:2464 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v217 /*v729*/, s32 offset:2468 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v218 /*v730*/, s32 offset:2472 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v219 /*v731*/, s32 offset:2476 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v220 /*v732*/, s32 offset:2480 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v221 /*v733*/, s32 offset:2484 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v222 /*v734*/, s32 offset:2488 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v223 /*v735*/, s32 offset:2492 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v224 /*v736*/, s32 offset:2496 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v225 /*v737*/, s32 offset:2500 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v226 /*v738*/, s32 offset:2504 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v227 /*v739*/, s32 offset:2508 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v228 /*v740*/, s32 offset:2512 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v229 /*v741*/, s32 offset:2516 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v230 /*v742*/, s32 offset:2520 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v231 /*v743*/, s32 offset:2524 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v232 /*v744*/, s32 offset:2528 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v233 /*v745*/, s32 offset:2532 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v234 /*v746*/, s32 offset:2536 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v235 /*v747*/, s32 offset:2540 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v236 /*v748*/, s32 offset:2544 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v237 /*v749*/, s32 offset:2548 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v238 /*v750*/, s32 offset:2552 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v239 /*v751*/, s32 offset:2556 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v240 /*v752*/, s32 offset:2560 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v241 /*v753*/, s32 offset:2564 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v242 /*v754*/, s32 offset:2568 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v243 /*v755*/, s32 offset:2572 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v244 /*v756*/, s32 offset:2576 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v245 /*v757*/, s32 offset:2580 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v246 /*v758*/, s32 offset:2584 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v247 /*v759*/, s32 offset:2588 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v248 /*v760*/, s32 offset:2592 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v249 /*v761*/, s32 offset:2596 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v250 /*v762*/, s32 offset:2600 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v251 /*v763*/, s32 offset:2604 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v252 /*v764*/, s32 offset:2608 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v253 /*v765*/, s32 offset:2612 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v254 /*v766*/, s32 offset:2616 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v255 /*v767*/, s32 offset:2620 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 12 ; msbs: dst=0 src0=0 src1=3 src2=0 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v0 /*v768*/, s32 offset:2624 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v1 /*v769*/, s32 offset:2628 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v2 /*v770*/, s32 offset:2632 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v3 /*v771*/, s32 offset:2636 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v4 /*v772*/, s32 offset:2640 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v5 /*v773*/, s32 offset:2644 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v6 /*v774*/, s32 offset:2648 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v7 /*v775*/, s32 offset:2652 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v8 /*v776*/, s32 offset:2656 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v9 /*v777*/, s32 offset:2660 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v10 /*v778*/, s32 offset:2664 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v11 /*v779*/, s32 offset:2668 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v12 /*v780*/, s32 offset:2672 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v13 /*v781*/, s32 offset:2676 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v14 /*v782*/, s32 offset:2680 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v15 /*v783*/, s32 offset:2684 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v16 /*v784*/, s32 offset:2688 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v17 /*v785*/, s32 offset:2692 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v18 /*v786*/, s32 offset:2696 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v19 /*v787*/, s32 offset:2700 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v20 /*v788*/, s32 offset:2704 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v21 /*v789*/, s32 offset:2708 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v22 /*v790*/, s32 offset:2712 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v23 /*v791*/, s32 offset:2716 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v24 /*v792*/, s32 offset:2720 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v25 /*v793*/, s32 offset:2724 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v26 /*v794*/, s32 offset:2728 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v27 /*v795*/, s32 offset:2732 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v28 /*v796*/, s32 offset:2736 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v29 /*v797*/, s32 offset:2740 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v30 /*v798*/, s32 offset:2744 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v31 /*v799*/, s32 offset:2748 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v32 /*v800*/, s32 offset:2752 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v33 /*v801*/, s32 offset:2756 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v34 /*v802*/, s32 offset:2760 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v35 /*v803*/, s32 offset:2764 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v36 /*v804*/, s32 offset:2768 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v37 /*v805*/, s32 offset:2772 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v38 /*v806*/, s32 offset:2776 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v39 /*v807*/, s32 offset:2780 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v40 /*v808*/, s32 offset:2784 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v41 /*v809*/, s32 offset:2788 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v42 /*v810*/, s32 offset:2792 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v43 /*v811*/, s32 offset:2796 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v44 /*v812*/, s32 offset:2800 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v45 /*v813*/, s32 offset:2804 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v46 /*v814*/, s32 offset:2808 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v47 /*v815*/, s32 offset:2812 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v48 /*v816*/, s32 offset:2816 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v49 /*v817*/, s32 offset:2820 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v50 /*v818*/, s32 offset:2824 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v51 /*v819*/, s32 offset:2828 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v52 /*v820*/, s32 offset:2832 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v53 /*v821*/, s32 offset:2836 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v54 /*v822*/, s32 offset:2840 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v55 /*v823*/, s32 offset:2844 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v56 /*v824*/, s32 offset:2848 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v57 /*v825*/, s32 offset:2852 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v58 /*v826*/, s32 offset:2856 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v59 /*v827*/, s32 offset:2860 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v60 /*v828*/, s32 offset:2864 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v61 /*v829*/, s32 offset:2868 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v62 /*v830*/, s32 offset:2872 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v63 /*v831*/, s32 offset:2876 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v64 /*v832*/, s32 offset:2880 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v65 /*v833*/, s32 offset:2884 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v66 /*v834*/, s32 offset:2888 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v67 /*v835*/, s32 offset:2892 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v68 /*v836*/, s32 offset:2896 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v69 /*v837*/, s32 offset:2900 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v70 /*v838*/, s32 offset:2904 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v71 /*v839*/, s32 offset:2908 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v72 /*v840*/, s32 offset:2912 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v73 /*v841*/, s32 offset:2916 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v74 /*v842*/, s32 offset:2920 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v75 /*v843*/, s32 offset:2924 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v76 /*v844*/, s32 offset:2928 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v77 /*v845*/, s32 offset:2932 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v78 /*v846*/, s32 offset:2936 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v79 /*v847*/, s32 offset:2940 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v80 /*v848*/, s32 offset:2944 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v81 /*v849*/, s32 offset:2948 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v82 /*v850*/, s32 offset:2952 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v83 /*v851*/, s32 offset:2956 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v84 /*v852*/, s32 offset:2960 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v85 /*v853*/, s32 offset:2964 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v86 /*v854*/, s32 offset:2968 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v87 /*v855*/, s32 offset:2972 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v88 /*v856*/, s32 offset:2976 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v89 /*v857*/, s32 offset:2980 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v90 /*v858*/, s32 offset:2984 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v91 /*v859*/, s32 offset:2988 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v92 /*v860*/, s32 offset:2992 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v93 /*v861*/, s32 offset:2996 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v94 /*v862*/, s32 offset:3000 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v95 /*v863*/, s32 offset:3004 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v96 /*v864*/, s32 offset:3008 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v97 /*v865*/, s32 offset:3012 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v98 /*v866*/, s32 offset:3016 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v99 /*v867*/, s32 offset:3020 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v100 /*v868*/, s32 offset:3024 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v101 /*v869*/, s32 offset:3028 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v102 /*v870*/, s32 offset:3032 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v103 /*v871*/, s32 offset:3036 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v104 /*v872*/, s32 offset:3040 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v105 /*v873*/, s32 offset:3044 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v106 /*v874*/, s32 offset:3048 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v107 /*v875*/, s32 offset:3052 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v108 /*v876*/, s32 offset:3056 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v109 /*v877*/, s32 offset:3060 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v110 /*v878*/, s32 offset:3064 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v111 /*v879*/, s32 offset:3068 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v112 /*v880*/, s32 offset:3072 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v113 /*v881*/, s32 offset:3076 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v114 /*v882*/, s32 offset:3080 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v115 /*v883*/, s32 offset:3084 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v116 /*v884*/, s32 offset:3088 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v117 /*v885*/, s32 offset:3092 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v118 /*v886*/, s32 offset:3096 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v119 /*v887*/, s32 offset:3100 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v120 /*v888*/, s32 offset:3104 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v121 /*v889*/, s32 offset:3108 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v122 /*v890*/, s32 offset:3112 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v123 /*v891*/, s32 offset:3116 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v124 /*v892*/, s32 offset:3120 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v125 /*v893*/, s32 offset:3124 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v126 /*v894*/, s32 offset:3128 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v127 /*v895*/, s32 offset:3132 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v128 /*v896*/, s32 offset:3136 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v129 /*v897*/, s32 offset:3140 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v130 /*v898*/, s32 offset:3144 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v131 /*v899*/, s32 offset:3148 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v132 /*v900*/, s32 offset:3152 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v133 /*v901*/, s32 offset:3156 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v134 /*v902*/, s32 offset:3160 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v135 /*v903*/, s32 offset:3164 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v136 /*v904*/, s32 offset:3168 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v137 /*v905*/, s32 offset:3172 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v138 /*v906*/, s32 offset:3176 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v139 /*v907*/, s32 offset:3180 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v140 /*v908*/, s32 offset:3184 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v141 /*v909*/, s32 offset:3188 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v142 /*v910*/, s32 offset:3192 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v143 /*v911*/, s32 offset:3196 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v144 /*v912*/, s32 offset:3200 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v145 /*v913*/, s32 offset:3204 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v146 /*v914*/, s32 offset:3208 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v147 /*v915*/, s32 offset:3212 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v148 /*v916*/, s32 offset:3216 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v149 /*v917*/, s32 offset:3220 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v150 /*v918*/, s32 offset:3224 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v151 /*v919*/, s32 offset:3228 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v152 /*v920*/, s32 offset:3232 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v153 /*v921*/, s32 offset:3236 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v154 /*v922*/, s32 offset:3240 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v155 /*v923*/, s32 offset:3244 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v156 /*v924*/, s32 offset:3248 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v157 /*v925*/, s32 offset:3252 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v158 /*v926*/, s32 offset:3256 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v159 /*v927*/, s32 offset:3260 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v160 /*v928*/, s32 offset:3264 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v161 /*v929*/, s32 offset:3268 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v162 /*v930*/, s32 offset:3272 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v163 /*v931*/, s32 offset:3276 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v164 /*v932*/, s32 offset:3280 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v165 /*v933*/, s32 offset:3284 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v166 /*v934*/, s32 offset:3288 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v167 /*v935*/, s32 offset:3292 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v168 /*v936*/, s32 offset:3296 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v169 /*v937*/, s32 offset:3300 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v170 /*v938*/, s32 offset:3304 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v171 /*v939*/, s32 offset:3308 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v172 /*v940*/, s32 offset:3312 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v173 /*v941*/, s32 offset:3316 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v174 /*v942*/, s32 offset:3320 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v175 /*v943*/, s32 offset:3324 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v176 /*v944*/, s32 offset:3328 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v177 /*v945*/, s32 offset:3332 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v178 /*v946*/, s32 offset:3336 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v179 /*v947*/, s32 offset:3340 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v180 /*v948*/, s32 offset:3344 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v181 /*v949*/, s32 offset:3348 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v182 /*v950*/, s32 offset:3352 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v183 /*v951*/, s32 offset:3356 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v184 /*v952*/, s32 offset:3360 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v185 /*v953*/, s32 offset:3364 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v186 /*v954*/, s32 offset:3368 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v187 /*v955*/, s32 offset:3372 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v188 /*v956*/, s32 offset:3376 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v189 /*v957*/, s32 offset:3380 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v190 /*v958*/, s32 offset:3384 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v191 /*v959*/, s32 offset:3388 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v192 /*v960*/, s32 offset:3392 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v193 /*v961*/, s32 offset:3396 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v194 /*v962*/, s32 offset:3400 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v195 /*v963*/, s32 offset:3404 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v196 /*v964*/, s32 offset:3408 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v197 /*v965*/, s32 offset:3412 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v198 /*v966*/, s32 offset:3416 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v199 /*v967*/, s32 offset:3420 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v200 /*v968*/, s32 offset:3424 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v201 /*v969*/, s32 offset:3428 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v202 /*v970*/, s32 offset:3432 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v203 /*v971*/, s32 offset:3436 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v204 /*v972*/, s32 offset:3440 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v205 /*v973*/, s32 offset:3444 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v206 /*v974*/, s32 offset:3448 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v207 /*v975*/, s32 offset:3452 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v208 /*v976*/, s32 offset:3456 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v209 /*v977*/, s32 offset:3460 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v210 /*v978*/, s32 offset:3464 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v211 /*v979*/, s32 offset:3468 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v212 /*v980*/, s32 offset:3472 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v213 /*v981*/, s32 offset:3476 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v214 /*v982*/, s32 offset:3480 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v215 /*v983*/, s32 offset:3484 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v216 /*v984*/, s32 offset:3488 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v217 /*v985*/, s32 offset:3492 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v218 /*v986*/, s32 offset:3496 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v219 /*v987*/, s32 offset:3500 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v220 /*v988*/, s32 offset:3504 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v221 /*v989*/, s32 offset:3508 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v222 /*v990*/, s32 offset:3512 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v223 /*v991*/, s32 offset:3516 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v224 /*v992*/, s32 offset:3520 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v225 /*v993*/, s32 offset:3524 +; GFX1250-DAGISEL-NEXT: s_clause 0x1d +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v226 /*v994*/, s32 offset:3528 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v227 /*v995*/, s32 offset:3532 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v228 /*v996*/, s32 offset:3536 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v229 /*v997*/, s32 offset:3540 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v230 /*v998*/, s32 offset:3544 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v231 /*v999*/, s32 offset:3548 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v232 /*v1000*/, s32 offset:3552 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v233 /*v1001*/, s32 offset:3556 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v234 /*v1002*/, s32 offset:3560 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v235 /*v1003*/, s32 offset:3564 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v236 /*v1004*/, s32 offset:3568 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v237 /*v1005*/, s32 offset:3572 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v238 /*v1006*/, s32 offset:3576 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v239 /*v1007*/, s32 offset:3580 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v240 /*v1008*/, s32 offset:3584 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v241 /*v1009*/, s32 offset:3588 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v242 /*v1010*/, s32 offset:3592 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v243 /*v1011*/, s32 offset:3596 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v244 /*v1012*/, s32 offset:3600 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v245 /*v1013*/, s32 offset:3604 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v246 /*v1014*/, s32 offset:3608 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v247 /*v1015*/, s32 offset:3612 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v248 /*v1016*/, s32 offset:3616 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v249 /*v1017*/, s32 offset:3620 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v250 /*v1018*/, s32 offset:3624 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v251 /*v1019*/, s32 offset:3628 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v252 /*v1020*/, s32 offset:3632 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v253 /*v1021*/, s32 offset:3636 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v254 /*v1022*/, s32 offset:3640 +; GFX1250-DAGISEL-NEXT: scratch_store_b32 off, v255 /*v1023*/, s32 offset:3644 +; GFX1250-DAGISEL-NEXT: s_wait_xcnt 0x0 +; GFX1250-DAGISEL-NEXT: s_mov_b32 exec_lo, -1 +; GFX1250-DAGISEL-NEXT: v_mov_b32_e32 v2, v0 +; GFX1250-DAGISEL-NEXT: s_mov_b64 s[36:37], gfx_callee@abs64 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 0 ; msbs: dst=0 src0=0 src1=0 src2=0 +; GFX1250-DAGISEL-NEXT: v_swap_b32 v0, v1 +; GFX1250-DAGISEL-NEXT: s_xor_b32 exec_lo, s0, -1 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v0, off, s32 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v1, off, s32 offset:4 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v2, off, s32 offset:8 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v3, off, s32 offset:12 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v4, off, s32 offset:16 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v5, off, s32 offset:20 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v6, off, s32 offset:24 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v7, off, s32 offset:28 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v8, off, s32 offset:32 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v9, off, s32 offset:36 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v10, off, s32 offset:40 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v11, off, s32 offset:44 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v12, off, s32 offset:48 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v13, off, s32 offset:52 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v14, off, s32 offset:56 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v15, off, s32 offset:60 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v16, off, s32 offset:64 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v17, off, s32 offset:68 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v18, off, s32 offset:72 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v19, off, s32 offset:76 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v20, off, s32 offset:80 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v21, off, s32 offset:84 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v22, off, s32 offset:88 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v23, off, s32 offset:92 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v24, off, s32 offset:96 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v25, off, s32 offset:100 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v26, off, s32 offset:104 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v27, off, s32 offset:108 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v28, off, s32 offset:112 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v29, off, s32 offset:116 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v30, off, s32 offset:120 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v31, off, s32 offset:124 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v32, off, s32 offset:128 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v33, off, s32 offset:132 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v34, off, s32 offset:136 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v35, off, s32 offset:140 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v36, off, s32 offset:144 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v37, off, s32 offset:148 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v38, off, s32 offset:152 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v39, off, s32 offset:156 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v48, off, s32 offset:160 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v49, off, s32 offset:164 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v50, off, s32 offset:168 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v51, off, s32 offset:172 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v52, off, s32 offset:176 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v53, off, s32 offset:180 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v54, off, s32 offset:184 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v55, off, s32 offset:188 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v64, off, s32 offset:192 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v65, off, s32 offset:196 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v66, off, s32 offset:200 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v67, off, s32 offset:204 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v68, off, s32 offset:208 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v69, off, s32 offset:212 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v70, off, s32 offset:216 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v71, off, s32 offset:220 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v80, off, s32 offset:224 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v81, off, s32 offset:228 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v82, off, s32 offset:232 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v83, off, s32 offset:236 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v84, off, s32 offset:240 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v85, off, s32 offset:244 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v86, off, s32 offset:248 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v87, off, s32 offset:252 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v96, off, s32 offset:256 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v97, off, s32 offset:260 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v98, off, s32 offset:264 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v99, off, s32 offset:268 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v100, off, s32 offset:272 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v101, off, s32 offset:276 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v102, off, s32 offset:280 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v103, off, s32 offset:284 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v112, off, s32 offset:288 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v113, off, s32 offset:292 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v114, off, s32 offset:296 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v115, off, s32 offset:300 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v116, off, s32 offset:304 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v117, off, s32 offset:308 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v118, off, s32 offset:312 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v119, off, s32 offset:316 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v128, off, s32 offset:320 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v129, off, s32 offset:324 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v130, off, s32 offset:328 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v131, off, s32 offset:332 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v132, off, s32 offset:336 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v133, off, s32 offset:340 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v134, off, s32 offset:344 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v135, off, s32 offset:348 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v144, off, s32 offset:352 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v145, off, s32 offset:356 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v146, off, s32 offset:360 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v147, off, s32 offset:364 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v148, off, s32 offset:368 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v149, off, s32 offset:372 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v150, off, s32 offset:376 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v151, off, s32 offset:380 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v160, off, s32 offset:384 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v161, off, s32 offset:388 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v162, off, s32 offset:392 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v163, off, s32 offset:396 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v164, off, s32 offset:400 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v165, off, s32 offset:404 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v166, off, s32 offset:408 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v167, off, s32 offset:412 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v176, off, s32 offset:416 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v177, off, s32 offset:420 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v178, off, s32 offset:424 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v179, off, s32 offset:428 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v180, off, s32 offset:432 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v181, off, s32 offset:436 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v182, off, s32 offset:440 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v183, off, s32 offset:444 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v192, off, s32 offset:448 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v193, off, s32 offset:452 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v194, off, s32 offset:456 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v195, off, s32 offset:460 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v196, off, s32 offset:464 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v197, off, s32 offset:468 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v198, off, s32 offset:472 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v199, off, s32 offset:476 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v208, off, s32 offset:480 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v209, off, s32 offset:484 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v210, off, s32 offset:488 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v211, off, s32 offset:492 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v212, off, s32 offset:496 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v213, off, s32 offset:500 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v214, off, s32 offset:504 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v215, off, s32 offset:508 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v224, off, s32 offset:512 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v225, off, s32 offset:516 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v226, off, s32 offset:520 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v227, off, s32 offset:524 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v228, off, s32 offset:528 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v229, off, s32 offset:532 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v230, off, s32 offset:536 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v231, off, s32 offset:540 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v240, off, s32 offset:544 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v241, off, s32 offset:548 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v242, off, s32 offset:552 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v243, off, s32 offset:556 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v244, off, s32 offset:560 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v245, off, s32 offset:564 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v246, off, s32 offset:568 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v247, off, s32 offset:572 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 64 ; msbs: dst=1 src0=0 src1=0 src2=0 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v0 /*v256*/, off, s32 offset:576 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v1 /*v257*/, off, s32 offset:580 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v2 /*v258*/, off, s32 offset:584 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v3 /*v259*/, off, s32 offset:588 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v4 /*v260*/, off, s32 offset:592 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v5 /*v261*/, off, s32 offset:596 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v6 /*v262*/, off, s32 offset:600 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v7 /*v263*/, off, s32 offset:604 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v8 /*v264*/, off, s32 offset:608 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v9 /*v265*/, off, s32 offset:612 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v10 /*v266*/, off, s32 offset:616 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v11 /*v267*/, off, s32 offset:620 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v12 /*v268*/, off, s32 offset:624 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v13 /*v269*/, off, s32 offset:628 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v14 /*v270*/, off, s32 offset:632 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v15 /*v271*/, off, s32 offset:636 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v16 /*v272*/, off, s32 offset:640 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v17 /*v273*/, off, s32 offset:644 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v18 /*v274*/, off, s32 offset:648 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v19 /*v275*/, off, s32 offset:652 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v20 /*v276*/, off, s32 offset:656 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v21 /*v277*/, off, s32 offset:660 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v22 /*v278*/, off, s32 offset:664 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v23 /*v279*/, off, s32 offset:668 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v24 /*v280*/, off, s32 offset:672 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v25 /*v281*/, off, s32 offset:676 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v26 /*v282*/, off, s32 offset:680 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v27 /*v283*/, off, s32 offset:684 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v28 /*v284*/, off, s32 offset:688 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v29 /*v285*/, off, s32 offset:692 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v30 /*v286*/, off, s32 offset:696 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v31 /*v287*/, off, s32 offset:700 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v32 /*v288*/, off, s32 offset:704 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v33 /*v289*/, off, s32 offset:708 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v34 /*v290*/, off, s32 offset:712 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v35 /*v291*/, off, s32 offset:716 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v36 /*v292*/, off, s32 offset:720 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v37 /*v293*/, off, s32 offset:724 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v38 /*v294*/, off, s32 offset:728 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v39 /*v295*/, off, s32 offset:732 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v40 /*v296*/, off, s32 offset:736 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v41 /*v297*/, off, s32 offset:740 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v42 /*v298*/, off, s32 offset:744 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v43 /*v299*/, off, s32 offset:748 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v44 /*v300*/, off, s32 offset:752 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v45 /*v301*/, off, s32 offset:756 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v46 /*v302*/, off, s32 offset:760 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v47 /*v303*/, off, s32 offset:764 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v48 /*v304*/, off, s32 offset:768 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v49 /*v305*/, off, s32 offset:772 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v50 /*v306*/, off, s32 offset:776 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v51 /*v307*/, off, s32 offset:780 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v52 /*v308*/, off, s32 offset:784 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v53 /*v309*/, off, s32 offset:788 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v54 /*v310*/, off, s32 offset:792 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v55 /*v311*/, off, s32 offset:796 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v56 /*v312*/, off, s32 offset:800 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v57 /*v313*/, off, s32 offset:804 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v58 /*v314*/, off, s32 offset:808 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v59 /*v315*/, off, s32 offset:812 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v60 /*v316*/, off, s32 offset:816 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v61 /*v317*/, off, s32 offset:820 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v62 /*v318*/, off, s32 offset:824 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v63 /*v319*/, off, s32 offset:828 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v64 /*v320*/, off, s32 offset:832 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v65 /*v321*/, off, s32 offset:836 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v66 /*v322*/, off, s32 offset:840 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v67 /*v323*/, off, s32 offset:844 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v68 /*v324*/, off, s32 offset:848 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v69 /*v325*/, off, s32 offset:852 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v70 /*v326*/, off, s32 offset:856 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v71 /*v327*/, off, s32 offset:860 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v72 /*v328*/, off, s32 offset:864 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v73 /*v329*/, off, s32 offset:868 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v74 /*v330*/, off, s32 offset:872 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v75 /*v331*/, off, s32 offset:876 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v76 /*v332*/, off, s32 offset:880 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v77 /*v333*/, off, s32 offset:884 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v78 /*v334*/, off, s32 offset:888 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v79 /*v335*/, off, s32 offset:892 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v80 /*v336*/, off, s32 offset:896 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v81 /*v337*/, off, s32 offset:900 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v82 /*v338*/, off, s32 offset:904 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v83 /*v339*/, off, s32 offset:908 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v84 /*v340*/, off, s32 offset:912 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v85 /*v341*/, off, s32 offset:916 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v86 /*v342*/, off, s32 offset:920 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v87 /*v343*/, off, s32 offset:924 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v88 /*v344*/, off, s32 offset:928 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v89 /*v345*/, off, s32 offset:932 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v90 /*v346*/, off, s32 offset:936 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v91 /*v347*/, off, s32 offset:940 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v92 /*v348*/, off, s32 offset:944 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v93 /*v349*/, off, s32 offset:948 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v94 /*v350*/, off, s32 offset:952 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v95 /*v351*/, off, s32 offset:956 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v96 /*v352*/, off, s32 offset:960 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v97 /*v353*/, off, s32 offset:964 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v98 /*v354*/, off, s32 offset:968 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v99 /*v355*/, off, s32 offset:972 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v100 /*v356*/, off, s32 offset:976 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v101 /*v357*/, off, s32 offset:980 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v102 /*v358*/, off, s32 offset:984 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v103 /*v359*/, off, s32 offset:988 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v104 /*v360*/, off, s32 offset:992 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v105 /*v361*/, off, s32 offset:996 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v106 /*v362*/, off, s32 offset:1000 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v107 /*v363*/, off, s32 offset:1004 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v108 /*v364*/, off, s32 offset:1008 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v109 /*v365*/, off, s32 offset:1012 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v110 /*v366*/, off, s32 offset:1016 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v111 /*v367*/, off, s32 offset:1020 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v112 /*v368*/, off, s32 offset:1024 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v113 /*v369*/, off, s32 offset:1028 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v114 /*v370*/, off, s32 offset:1032 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v115 /*v371*/, off, s32 offset:1036 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v116 /*v372*/, off, s32 offset:1040 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v117 /*v373*/, off, s32 offset:1044 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v118 /*v374*/, off, s32 offset:1048 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v119 /*v375*/, off, s32 offset:1052 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v120 /*v376*/, off, s32 offset:1056 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v121 /*v377*/, off, s32 offset:1060 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v122 /*v378*/, off, s32 offset:1064 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v123 /*v379*/, off, s32 offset:1068 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v124 /*v380*/, off, s32 offset:1072 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v125 /*v381*/, off, s32 offset:1076 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v126 /*v382*/, off, s32 offset:1080 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v127 /*v383*/, off, s32 offset:1084 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v128 /*v384*/, off, s32 offset:1088 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v129 /*v385*/, off, s32 offset:1092 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v130 /*v386*/, off, s32 offset:1096 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v131 /*v387*/, off, s32 offset:1100 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v132 /*v388*/, off, s32 offset:1104 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v133 /*v389*/, off, s32 offset:1108 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v134 /*v390*/, off, s32 offset:1112 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v135 /*v391*/, off, s32 offset:1116 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v136 /*v392*/, off, s32 offset:1120 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v137 /*v393*/, off, s32 offset:1124 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v138 /*v394*/, off, s32 offset:1128 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v139 /*v395*/, off, s32 offset:1132 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v140 /*v396*/, off, s32 offset:1136 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v141 /*v397*/, off, s32 offset:1140 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v142 /*v398*/, off, s32 offset:1144 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v143 /*v399*/, off, s32 offset:1148 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v144 /*v400*/, off, s32 offset:1152 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v145 /*v401*/, off, s32 offset:1156 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v146 /*v402*/, off, s32 offset:1160 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v147 /*v403*/, off, s32 offset:1164 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v148 /*v404*/, off, s32 offset:1168 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v149 /*v405*/, off, s32 offset:1172 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v150 /*v406*/, off, s32 offset:1176 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v151 /*v407*/, off, s32 offset:1180 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v152 /*v408*/, off, s32 offset:1184 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v153 /*v409*/, off, s32 offset:1188 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v154 /*v410*/, off, s32 offset:1192 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v155 /*v411*/, off, s32 offset:1196 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v156 /*v412*/, off, s32 offset:1200 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v157 /*v413*/, off, s32 offset:1204 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v158 /*v414*/, off, s32 offset:1208 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v159 /*v415*/, off, s32 offset:1212 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v160 /*v416*/, off, s32 offset:1216 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v161 /*v417*/, off, s32 offset:1220 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v162 /*v418*/, off, s32 offset:1224 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v163 /*v419*/, off, s32 offset:1228 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v164 /*v420*/, off, s32 offset:1232 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v165 /*v421*/, off, s32 offset:1236 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v166 /*v422*/, off, s32 offset:1240 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v167 /*v423*/, off, s32 offset:1244 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v168 /*v424*/, off, s32 offset:1248 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v169 /*v425*/, off, s32 offset:1252 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v170 /*v426*/, off, s32 offset:1256 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v171 /*v427*/, off, s32 offset:1260 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v172 /*v428*/, off, s32 offset:1264 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v173 /*v429*/, off, s32 offset:1268 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v174 /*v430*/, off, s32 offset:1272 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v175 /*v431*/, off, s32 offset:1276 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v176 /*v432*/, off, s32 offset:1280 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v177 /*v433*/, off, s32 offset:1284 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v178 /*v434*/, off, s32 offset:1288 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v179 /*v435*/, off, s32 offset:1292 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v180 /*v436*/, off, s32 offset:1296 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v181 /*v437*/, off, s32 offset:1300 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v182 /*v438*/, off, s32 offset:1304 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v183 /*v439*/, off, s32 offset:1308 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v184 /*v440*/, off, s32 offset:1312 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v185 /*v441*/, off, s32 offset:1316 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v186 /*v442*/, off, s32 offset:1320 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v187 /*v443*/, off, s32 offset:1324 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v188 /*v444*/, off, s32 offset:1328 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v189 /*v445*/, off, s32 offset:1332 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v190 /*v446*/, off, s32 offset:1336 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v191 /*v447*/, off, s32 offset:1340 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v192 /*v448*/, off, s32 offset:1344 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v193 /*v449*/, off, s32 offset:1348 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v194 /*v450*/, off, s32 offset:1352 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v195 /*v451*/, off, s32 offset:1356 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v196 /*v452*/, off, s32 offset:1360 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v197 /*v453*/, off, s32 offset:1364 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v198 /*v454*/, off, s32 offset:1368 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v199 /*v455*/, off, s32 offset:1372 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v200 /*v456*/, off, s32 offset:1376 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v201 /*v457*/, off, s32 offset:1380 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v202 /*v458*/, off, s32 offset:1384 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v203 /*v459*/, off, s32 offset:1388 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v204 /*v460*/, off, s32 offset:1392 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v205 /*v461*/, off, s32 offset:1396 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v206 /*v462*/, off, s32 offset:1400 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v207 /*v463*/, off, s32 offset:1404 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v208 /*v464*/, off, s32 offset:1408 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v209 /*v465*/, off, s32 offset:1412 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v210 /*v466*/, off, s32 offset:1416 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v211 /*v467*/, off, s32 offset:1420 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v212 /*v468*/, off, s32 offset:1424 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v213 /*v469*/, off, s32 offset:1428 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v214 /*v470*/, off, s32 offset:1432 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v215 /*v471*/, off, s32 offset:1436 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v216 /*v472*/, off, s32 offset:1440 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v217 /*v473*/, off, s32 offset:1444 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v218 /*v474*/, off, s32 offset:1448 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v219 /*v475*/, off, s32 offset:1452 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v220 /*v476*/, off, s32 offset:1456 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v221 /*v477*/, off, s32 offset:1460 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v222 /*v478*/, off, s32 offset:1464 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v223 /*v479*/, off, s32 offset:1468 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v224 /*v480*/, off, s32 offset:1472 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v225 /*v481*/, off, s32 offset:1476 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v226 /*v482*/, off, s32 offset:1480 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v227 /*v483*/, off, s32 offset:1484 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v228 /*v484*/, off, s32 offset:1488 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v229 /*v485*/, off, s32 offset:1492 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v230 /*v486*/, off, s32 offset:1496 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v231 /*v487*/, off, s32 offset:1500 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v232 /*v488*/, off, s32 offset:1504 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v233 /*v489*/, off, s32 offset:1508 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v234 /*v490*/, off, s32 offset:1512 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v235 /*v491*/, off, s32 offset:1516 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v236 /*v492*/, off, s32 offset:1520 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v237 /*v493*/, off, s32 offset:1524 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v238 /*v494*/, off, s32 offset:1528 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v239 /*v495*/, off, s32 offset:1532 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v240 /*v496*/, off, s32 offset:1536 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v241 /*v497*/, off, s32 offset:1540 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v242 /*v498*/, off, s32 offset:1544 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v243 /*v499*/, off, s32 offset:1548 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v244 /*v500*/, off, s32 offset:1552 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v245 /*v501*/, off, s32 offset:1556 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v246 /*v502*/, off, s32 offset:1560 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v247 /*v503*/, off, s32 offset:1564 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v248 /*v504*/, off, s32 offset:1568 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v249 /*v505*/, off, s32 offset:1572 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v250 /*v506*/, off, s32 offset:1576 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v251 /*v507*/, off, s32 offset:1580 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v252 /*v508*/, off, s32 offset:1584 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v253 /*v509*/, off, s32 offset:1588 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v254 /*v510*/, off, s32 offset:1592 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v255 /*v511*/, off, s32 offset:1596 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 0x80 ; msbs: dst=2 src0=0 src1=0 src2=0 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v0 /*v512*/, off, s32 offset:1600 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v1 /*v513*/, off, s32 offset:1604 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v2 /*v514*/, off, s32 offset:1608 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v3 /*v515*/, off, s32 offset:1612 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v4 /*v516*/, off, s32 offset:1616 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v5 /*v517*/, off, s32 offset:1620 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v6 /*v518*/, off, s32 offset:1624 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v7 /*v519*/, off, s32 offset:1628 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v8 /*v520*/, off, s32 offset:1632 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v9 /*v521*/, off, s32 offset:1636 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v10 /*v522*/, off, s32 offset:1640 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v11 /*v523*/, off, s32 offset:1644 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v12 /*v524*/, off, s32 offset:1648 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v13 /*v525*/, off, s32 offset:1652 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v14 /*v526*/, off, s32 offset:1656 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v15 /*v527*/, off, s32 offset:1660 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v16 /*v528*/, off, s32 offset:1664 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v17 /*v529*/, off, s32 offset:1668 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v18 /*v530*/, off, s32 offset:1672 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v19 /*v531*/, off, s32 offset:1676 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v20 /*v532*/, off, s32 offset:1680 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v21 /*v533*/, off, s32 offset:1684 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v22 /*v534*/, off, s32 offset:1688 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v23 /*v535*/, off, s32 offset:1692 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v24 /*v536*/, off, s32 offset:1696 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v25 /*v537*/, off, s32 offset:1700 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v26 /*v538*/, off, s32 offset:1704 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v27 /*v539*/, off, s32 offset:1708 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v28 /*v540*/, off, s32 offset:1712 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v29 /*v541*/, off, s32 offset:1716 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v30 /*v542*/, off, s32 offset:1720 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v31 /*v543*/, off, s32 offset:1724 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v32 /*v544*/, off, s32 offset:1728 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v33 /*v545*/, off, s32 offset:1732 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v34 /*v546*/, off, s32 offset:1736 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v35 /*v547*/, off, s32 offset:1740 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v36 /*v548*/, off, s32 offset:1744 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v37 /*v549*/, off, s32 offset:1748 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v38 /*v550*/, off, s32 offset:1752 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v39 /*v551*/, off, s32 offset:1756 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v40 /*v552*/, off, s32 offset:1760 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v41 /*v553*/, off, s32 offset:1764 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v42 /*v554*/, off, s32 offset:1768 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v43 /*v555*/, off, s32 offset:1772 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v44 /*v556*/, off, s32 offset:1776 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v45 /*v557*/, off, s32 offset:1780 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v46 /*v558*/, off, s32 offset:1784 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v47 /*v559*/, off, s32 offset:1788 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v48 /*v560*/, off, s32 offset:1792 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v49 /*v561*/, off, s32 offset:1796 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v50 /*v562*/, off, s32 offset:1800 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v51 /*v563*/, off, s32 offset:1804 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v52 /*v564*/, off, s32 offset:1808 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v53 /*v565*/, off, s32 offset:1812 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v54 /*v566*/, off, s32 offset:1816 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v55 /*v567*/, off, s32 offset:1820 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v56 /*v568*/, off, s32 offset:1824 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v57 /*v569*/, off, s32 offset:1828 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v58 /*v570*/, off, s32 offset:1832 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v59 /*v571*/, off, s32 offset:1836 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v60 /*v572*/, off, s32 offset:1840 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v61 /*v573*/, off, s32 offset:1844 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v62 /*v574*/, off, s32 offset:1848 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v63 /*v575*/, off, s32 offset:1852 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v64 /*v576*/, off, s32 offset:1856 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v65 /*v577*/, off, s32 offset:1860 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v66 /*v578*/, off, s32 offset:1864 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v67 /*v579*/, off, s32 offset:1868 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v68 /*v580*/, off, s32 offset:1872 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v69 /*v581*/, off, s32 offset:1876 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v70 /*v582*/, off, s32 offset:1880 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v71 /*v583*/, off, s32 offset:1884 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v72 /*v584*/, off, s32 offset:1888 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v73 /*v585*/, off, s32 offset:1892 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v74 /*v586*/, off, s32 offset:1896 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v75 /*v587*/, off, s32 offset:1900 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v76 /*v588*/, off, s32 offset:1904 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v77 /*v589*/, off, s32 offset:1908 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v78 /*v590*/, off, s32 offset:1912 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v79 /*v591*/, off, s32 offset:1916 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v80 /*v592*/, off, s32 offset:1920 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v81 /*v593*/, off, s32 offset:1924 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v82 /*v594*/, off, s32 offset:1928 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v83 /*v595*/, off, s32 offset:1932 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v84 /*v596*/, off, s32 offset:1936 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v85 /*v597*/, off, s32 offset:1940 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v86 /*v598*/, off, s32 offset:1944 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v87 /*v599*/, off, s32 offset:1948 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v88 /*v600*/, off, s32 offset:1952 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v89 /*v601*/, off, s32 offset:1956 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v90 /*v602*/, off, s32 offset:1960 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v91 /*v603*/, off, s32 offset:1964 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v92 /*v604*/, off, s32 offset:1968 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v93 /*v605*/, off, s32 offset:1972 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v94 /*v606*/, off, s32 offset:1976 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v95 /*v607*/, off, s32 offset:1980 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v96 /*v608*/, off, s32 offset:1984 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v97 /*v609*/, off, s32 offset:1988 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v98 /*v610*/, off, s32 offset:1992 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v99 /*v611*/, off, s32 offset:1996 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v100 /*v612*/, off, s32 offset:2000 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v101 /*v613*/, off, s32 offset:2004 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v102 /*v614*/, off, s32 offset:2008 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v103 /*v615*/, off, s32 offset:2012 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v104 /*v616*/, off, s32 offset:2016 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v105 /*v617*/, off, s32 offset:2020 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v106 /*v618*/, off, s32 offset:2024 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v107 /*v619*/, off, s32 offset:2028 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v108 /*v620*/, off, s32 offset:2032 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v109 /*v621*/, off, s32 offset:2036 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v110 /*v622*/, off, s32 offset:2040 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v111 /*v623*/, off, s32 offset:2044 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v112 /*v624*/, off, s32 offset:2048 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v113 /*v625*/, off, s32 offset:2052 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v114 /*v626*/, off, s32 offset:2056 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v115 /*v627*/, off, s32 offset:2060 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v116 /*v628*/, off, s32 offset:2064 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v117 /*v629*/, off, s32 offset:2068 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v118 /*v630*/, off, s32 offset:2072 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v119 /*v631*/, off, s32 offset:2076 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v120 /*v632*/, off, s32 offset:2080 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v121 /*v633*/, off, s32 offset:2084 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v122 /*v634*/, off, s32 offset:2088 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v123 /*v635*/, off, s32 offset:2092 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v124 /*v636*/, off, s32 offset:2096 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v125 /*v637*/, off, s32 offset:2100 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v126 /*v638*/, off, s32 offset:2104 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v127 /*v639*/, off, s32 offset:2108 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v128 /*v640*/, off, s32 offset:2112 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v129 /*v641*/, off, s32 offset:2116 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v130 /*v642*/, off, s32 offset:2120 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v131 /*v643*/, off, s32 offset:2124 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v132 /*v644*/, off, s32 offset:2128 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v133 /*v645*/, off, s32 offset:2132 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v134 /*v646*/, off, s32 offset:2136 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v135 /*v647*/, off, s32 offset:2140 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v136 /*v648*/, off, s32 offset:2144 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v137 /*v649*/, off, s32 offset:2148 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v138 /*v650*/, off, s32 offset:2152 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v139 /*v651*/, off, s32 offset:2156 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v140 /*v652*/, off, s32 offset:2160 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v141 /*v653*/, off, s32 offset:2164 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v142 /*v654*/, off, s32 offset:2168 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v143 /*v655*/, off, s32 offset:2172 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v144 /*v656*/, off, s32 offset:2176 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v145 /*v657*/, off, s32 offset:2180 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v146 /*v658*/, off, s32 offset:2184 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v147 /*v659*/, off, s32 offset:2188 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v148 /*v660*/, off, s32 offset:2192 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v149 /*v661*/, off, s32 offset:2196 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v150 /*v662*/, off, s32 offset:2200 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v151 /*v663*/, off, s32 offset:2204 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v152 /*v664*/, off, s32 offset:2208 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v153 /*v665*/, off, s32 offset:2212 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v154 /*v666*/, off, s32 offset:2216 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v155 /*v667*/, off, s32 offset:2220 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v156 /*v668*/, off, s32 offset:2224 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v157 /*v669*/, off, s32 offset:2228 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v158 /*v670*/, off, s32 offset:2232 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v159 /*v671*/, off, s32 offset:2236 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v160 /*v672*/, off, s32 offset:2240 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v161 /*v673*/, off, s32 offset:2244 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v162 /*v674*/, off, s32 offset:2248 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v163 /*v675*/, off, s32 offset:2252 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v164 /*v676*/, off, s32 offset:2256 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v165 /*v677*/, off, s32 offset:2260 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v166 /*v678*/, off, s32 offset:2264 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v167 /*v679*/, off, s32 offset:2268 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v168 /*v680*/, off, s32 offset:2272 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v169 /*v681*/, off, s32 offset:2276 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v170 /*v682*/, off, s32 offset:2280 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v171 /*v683*/, off, s32 offset:2284 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v172 /*v684*/, off, s32 offset:2288 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v173 /*v685*/, off, s32 offset:2292 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v174 /*v686*/, off, s32 offset:2296 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v175 /*v687*/, off, s32 offset:2300 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v176 /*v688*/, off, s32 offset:2304 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v177 /*v689*/, off, s32 offset:2308 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v178 /*v690*/, off, s32 offset:2312 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v179 /*v691*/, off, s32 offset:2316 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v180 /*v692*/, off, s32 offset:2320 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v181 /*v693*/, off, s32 offset:2324 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v182 /*v694*/, off, s32 offset:2328 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v183 /*v695*/, off, s32 offset:2332 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v184 /*v696*/, off, s32 offset:2336 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v185 /*v697*/, off, s32 offset:2340 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v186 /*v698*/, off, s32 offset:2344 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v187 /*v699*/, off, s32 offset:2348 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v188 /*v700*/, off, s32 offset:2352 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v189 /*v701*/, off, s32 offset:2356 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v190 /*v702*/, off, s32 offset:2360 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v191 /*v703*/, off, s32 offset:2364 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v192 /*v704*/, off, s32 offset:2368 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v193 /*v705*/, off, s32 offset:2372 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v194 /*v706*/, off, s32 offset:2376 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v195 /*v707*/, off, s32 offset:2380 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v196 /*v708*/, off, s32 offset:2384 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v197 /*v709*/, off, s32 offset:2388 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v198 /*v710*/, off, s32 offset:2392 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v199 /*v711*/, off, s32 offset:2396 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v200 /*v712*/, off, s32 offset:2400 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v201 /*v713*/, off, s32 offset:2404 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v202 /*v714*/, off, s32 offset:2408 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v203 /*v715*/, off, s32 offset:2412 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v204 /*v716*/, off, s32 offset:2416 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v205 /*v717*/, off, s32 offset:2420 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v206 /*v718*/, off, s32 offset:2424 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v207 /*v719*/, off, s32 offset:2428 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v208 /*v720*/, off, s32 offset:2432 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v209 /*v721*/, off, s32 offset:2436 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v210 /*v722*/, off, s32 offset:2440 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v211 /*v723*/, off, s32 offset:2444 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v212 /*v724*/, off, s32 offset:2448 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v213 /*v725*/, off, s32 offset:2452 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v214 /*v726*/, off, s32 offset:2456 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v215 /*v727*/, off, s32 offset:2460 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v216 /*v728*/, off, s32 offset:2464 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v217 /*v729*/, off, s32 offset:2468 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v218 /*v730*/, off, s32 offset:2472 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v219 /*v731*/, off, s32 offset:2476 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v220 /*v732*/, off, s32 offset:2480 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v221 /*v733*/, off, s32 offset:2484 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v222 /*v734*/, off, s32 offset:2488 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v223 /*v735*/, off, s32 offset:2492 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v224 /*v736*/, off, s32 offset:2496 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v225 /*v737*/, off, s32 offset:2500 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v226 /*v738*/, off, s32 offset:2504 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v227 /*v739*/, off, s32 offset:2508 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v228 /*v740*/, off, s32 offset:2512 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v229 /*v741*/, off, s32 offset:2516 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v230 /*v742*/, off, s32 offset:2520 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v231 /*v743*/, off, s32 offset:2524 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v232 /*v744*/, off, s32 offset:2528 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v233 /*v745*/, off, s32 offset:2532 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v234 /*v746*/, off, s32 offset:2536 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v235 /*v747*/, off, s32 offset:2540 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v236 /*v748*/, off, s32 offset:2544 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v237 /*v749*/, off, s32 offset:2548 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v238 /*v750*/, off, s32 offset:2552 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v239 /*v751*/, off, s32 offset:2556 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v240 /*v752*/, off, s32 offset:2560 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v241 /*v753*/, off, s32 offset:2564 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v242 /*v754*/, off, s32 offset:2568 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v243 /*v755*/, off, s32 offset:2572 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v244 /*v756*/, off, s32 offset:2576 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v245 /*v757*/, off, s32 offset:2580 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v246 /*v758*/, off, s32 offset:2584 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v247 /*v759*/, off, s32 offset:2588 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v248 /*v760*/, off, s32 offset:2592 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v249 /*v761*/, off, s32 offset:2596 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v250 /*v762*/, off, s32 offset:2600 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v251 /*v763*/, off, s32 offset:2604 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v252 /*v764*/, off, s32 offset:2608 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v253 /*v765*/, off, s32 offset:2612 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v254 /*v766*/, off, s32 offset:2616 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v255 /*v767*/, off, s32 offset:2620 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 0xc0 ; msbs: dst=3 src0=0 src1=0 src2=0 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v0 /*v768*/, off, s32 offset:2624 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v1 /*v769*/, off, s32 offset:2628 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v2 /*v770*/, off, s32 offset:2632 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v3 /*v771*/, off, s32 offset:2636 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v4 /*v772*/, off, s32 offset:2640 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v5 /*v773*/, off, s32 offset:2644 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v6 /*v774*/, off, s32 offset:2648 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v7 /*v775*/, off, s32 offset:2652 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v8 /*v776*/, off, s32 offset:2656 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v9 /*v777*/, off, s32 offset:2660 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v10 /*v778*/, off, s32 offset:2664 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v11 /*v779*/, off, s32 offset:2668 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v12 /*v780*/, off, s32 offset:2672 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v13 /*v781*/, off, s32 offset:2676 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v14 /*v782*/, off, s32 offset:2680 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v15 /*v783*/, off, s32 offset:2684 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v16 /*v784*/, off, s32 offset:2688 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v17 /*v785*/, off, s32 offset:2692 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v18 /*v786*/, off, s32 offset:2696 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v19 /*v787*/, off, s32 offset:2700 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v20 /*v788*/, off, s32 offset:2704 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v21 /*v789*/, off, s32 offset:2708 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v22 /*v790*/, off, s32 offset:2712 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v23 /*v791*/, off, s32 offset:2716 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v24 /*v792*/, off, s32 offset:2720 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v25 /*v793*/, off, s32 offset:2724 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v26 /*v794*/, off, s32 offset:2728 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v27 /*v795*/, off, s32 offset:2732 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v28 /*v796*/, off, s32 offset:2736 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v29 /*v797*/, off, s32 offset:2740 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v30 /*v798*/, off, s32 offset:2744 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v31 /*v799*/, off, s32 offset:2748 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v32 /*v800*/, off, s32 offset:2752 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v33 /*v801*/, off, s32 offset:2756 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v34 /*v802*/, off, s32 offset:2760 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v35 /*v803*/, off, s32 offset:2764 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v36 /*v804*/, off, s32 offset:2768 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v37 /*v805*/, off, s32 offset:2772 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v38 /*v806*/, off, s32 offset:2776 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v39 /*v807*/, off, s32 offset:2780 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v40 /*v808*/, off, s32 offset:2784 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v41 /*v809*/, off, s32 offset:2788 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v42 /*v810*/, off, s32 offset:2792 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v43 /*v811*/, off, s32 offset:2796 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v44 /*v812*/, off, s32 offset:2800 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v45 /*v813*/, off, s32 offset:2804 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v46 /*v814*/, off, s32 offset:2808 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v47 /*v815*/, off, s32 offset:2812 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v48 /*v816*/, off, s32 offset:2816 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v49 /*v817*/, off, s32 offset:2820 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v50 /*v818*/, off, s32 offset:2824 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v51 /*v819*/, off, s32 offset:2828 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v52 /*v820*/, off, s32 offset:2832 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v53 /*v821*/, off, s32 offset:2836 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v54 /*v822*/, off, s32 offset:2840 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v55 /*v823*/, off, s32 offset:2844 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v56 /*v824*/, off, s32 offset:2848 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v57 /*v825*/, off, s32 offset:2852 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v58 /*v826*/, off, s32 offset:2856 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v59 /*v827*/, off, s32 offset:2860 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v60 /*v828*/, off, s32 offset:2864 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v61 /*v829*/, off, s32 offset:2868 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v62 /*v830*/, off, s32 offset:2872 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v63 /*v831*/, off, s32 offset:2876 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v64 /*v832*/, off, s32 offset:2880 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v65 /*v833*/, off, s32 offset:2884 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v66 /*v834*/, off, s32 offset:2888 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v67 /*v835*/, off, s32 offset:2892 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v68 /*v836*/, off, s32 offset:2896 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v69 /*v837*/, off, s32 offset:2900 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v70 /*v838*/, off, s32 offset:2904 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v71 /*v839*/, off, s32 offset:2908 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v72 /*v840*/, off, s32 offset:2912 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v73 /*v841*/, off, s32 offset:2916 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v74 /*v842*/, off, s32 offset:2920 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v75 /*v843*/, off, s32 offset:2924 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v76 /*v844*/, off, s32 offset:2928 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v77 /*v845*/, off, s32 offset:2932 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v78 /*v846*/, off, s32 offset:2936 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v79 /*v847*/, off, s32 offset:2940 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v80 /*v848*/, off, s32 offset:2944 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v81 /*v849*/, off, s32 offset:2948 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v82 /*v850*/, off, s32 offset:2952 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v83 /*v851*/, off, s32 offset:2956 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v84 /*v852*/, off, s32 offset:2960 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v85 /*v853*/, off, s32 offset:2964 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v86 /*v854*/, off, s32 offset:2968 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v87 /*v855*/, off, s32 offset:2972 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v88 /*v856*/, off, s32 offset:2976 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v89 /*v857*/, off, s32 offset:2980 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v90 /*v858*/, off, s32 offset:2984 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v91 /*v859*/, off, s32 offset:2988 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v92 /*v860*/, off, s32 offset:2992 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v93 /*v861*/, off, s32 offset:2996 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v94 /*v862*/, off, s32 offset:3000 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v95 /*v863*/, off, s32 offset:3004 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v96 /*v864*/, off, s32 offset:3008 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v97 /*v865*/, off, s32 offset:3012 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v98 /*v866*/, off, s32 offset:3016 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v99 /*v867*/, off, s32 offset:3020 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v100 /*v868*/, off, s32 offset:3024 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v101 /*v869*/, off, s32 offset:3028 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v102 /*v870*/, off, s32 offset:3032 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v103 /*v871*/, off, s32 offset:3036 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v104 /*v872*/, off, s32 offset:3040 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v105 /*v873*/, off, s32 offset:3044 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v106 /*v874*/, off, s32 offset:3048 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v107 /*v875*/, off, s32 offset:3052 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v108 /*v876*/, off, s32 offset:3056 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v109 /*v877*/, off, s32 offset:3060 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v110 /*v878*/, off, s32 offset:3064 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v111 /*v879*/, off, s32 offset:3068 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v112 /*v880*/, off, s32 offset:3072 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v113 /*v881*/, off, s32 offset:3076 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v114 /*v882*/, off, s32 offset:3080 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v115 /*v883*/, off, s32 offset:3084 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v116 /*v884*/, off, s32 offset:3088 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v117 /*v885*/, off, s32 offset:3092 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v118 /*v886*/, off, s32 offset:3096 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v119 /*v887*/, off, s32 offset:3100 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v120 /*v888*/, off, s32 offset:3104 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v121 /*v889*/, off, s32 offset:3108 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v122 /*v890*/, off, s32 offset:3112 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v123 /*v891*/, off, s32 offset:3116 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v124 /*v892*/, off, s32 offset:3120 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v125 /*v893*/, off, s32 offset:3124 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v126 /*v894*/, off, s32 offset:3128 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v127 /*v895*/, off, s32 offset:3132 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v128 /*v896*/, off, s32 offset:3136 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v129 /*v897*/, off, s32 offset:3140 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v130 /*v898*/, off, s32 offset:3144 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v131 /*v899*/, off, s32 offset:3148 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v132 /*v900*/, off, s32 offset:3152 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v133 /*v901*/, off, s32 offset:3156 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v134 /*v902*/, off, s32 offset:3160 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v135 /*v903*/, off, s32 offset:3164 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v136 /*v904*/, off, s32 offset:3168 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v137 /*v905*/, off, s32 offset:3172 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v138 /*v906*/, off, s32 offset:3176 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v139 /*v907*/, off, s32 offset:3180 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v140 /*v908*/, off, s32 offset:3184 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v141 /*v909*/, off, s32 offset:3188 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v142 /*v910*/, off, s32 offset:3192 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v143 /*v911*/, off, s32 offset:3196 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v144 /*v912*/, off, s32 offset:3200 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v145 /*v913*/, off, s32 offset:3204 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v146 /*v914*/, off, s32 offset:3208 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v147 /*v915*/, off, s32 offset:3212 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v148 /*v916*/, off, s32 offset:3216 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v149 /*v917*/, off, s32 offset:3220 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v150 /*v918*/, off, s32 offset:3224 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v151 /*v919*/, off, s32 offset:3228 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v152 /*v920*/, off, s32 offset:3232 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v153 /*v921*/, off, s32 offset:3236 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v154 /*v922*/, off, s32 offset:3240 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v155 /*v923*/, off, s32 offset:3244 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v156 /*v924*/, off, s32 offset:3248 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v157 /*v925*/, off, s32 offset:3252 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v158 /*v926*/, off, s32 offset:3256 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v159 /*v927*/, off, s32 offset:3260 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v160 /*v928*/, off, s32 offset:3264 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v161 /*v929*/, off, s32 offset:3268 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v162 /*v930*/, off, s32 offset:3272 +; GFX1250-DAGISEL-NEXT: s_clause 0x3e +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v163 /*v931*/, off, s32 offset:3276 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v164 /*v932*/, off, s32 offset:3280 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v165 /*v933*/, off, s32 offset:3284 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v166 /*v934*/, off, s32 offset:3288 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v167 /*v935*/, off, s32 offset:3292 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v168 /*v936*/, off, s32 offset:3296 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v169 /*v937*/, off, s32 offset:3300 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v170 /*v938*/, off, s32 offset:3304 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v171 /*v939*/, off, s32 offset:3308 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v172 /*v940*/, off, s32 offset:3312 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v173 /*v941*/, off, s32 offset:3316 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v174 /*v942*/, off, s32 offset:3320 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v175 /*v943*/, off, s32 offset:3324 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v176 /*v944*/, off, s32 offset:3328 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v177 /*v945*/, off, s32 offset:3332 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v178 /*v946*/, off, s32 offset:3336 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v179 /*v947*/, off, s32 offset:3340 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v180 /*v948*/, off, s32 offset:3344 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v181 /*v949*/, off, s32 offset:3348 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v182 /*v950*/, off, s32 offset:3352 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v183 /*v951*/, off, s32 offset:3356 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v184 /*v952*/, off, s32 offset:3360 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v185 /*v953*/, off, s32 offset:3364 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v186 /*v954*/, off, s32 offset:3368 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v187 /*v955*/, off, s32 offset:3372 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v188 /*v956*/, off, s32 offset:3376 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v189 /*v957*/, off, s32 offset:3380 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v190 /*v958*/, off, s32 offset:3384 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v191 /*v959*/, off, s32 offset:3388 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v192 /*v960*/, off, s32 offset:3392 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v193 /*v961*/, off, s32 offset:3396 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v194 /*v962*/, off, s32 offset:3400 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v195 /*v963*/, off, s32 offset:3404 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v196 /*v964*/, off, s32 offset:3408 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v197 /*v965*/, off, s32 offset:3412 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v198 /*v966*/, off, s32 offset:3416 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v199 /*v967*/, off, s32 offset:3420 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v200 /*v968*/, off, s32 offset:3424 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v201 /*v969*/, off, s32 offset:3428 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v202 /*v970*/, off, s32 offset:3432 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v203 /*v971*/, off, s32 offset:3436 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v204 /*v972*/, off, s32 offset:3440 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v205 /*v973*/, off, s32 offset:3444 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v206 /*v974*/, off, s32 offset:3448 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v207 /*v975*/, off, s32 offset:3452 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v208 /*v976*/, off, s32 offset:3456 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v209 /*v977*/, off, s32 offset:3460 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v210 /*v978*/, off, s32 offset:3464 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v211 /*v979*/, off, s32 offset:3468 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v212 /*v980*/, off, s32 offset:3472 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v213 /*v981*/, off, s32 offset:3476 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v214 /*v982*/, off, s32 offset:3480 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v215 /*v983*/, off, s32 offset:3484 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v216 /*v984*/, off, s32 offset:3488 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v217 /*v985*/, off, s32 offset:3492 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v218 /*v986*/, off, s32 offset:3496 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v219 /*v987*/, off, s32 offset:3500 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v220 /*v988*/, off, s32 offset:3504 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v221 /*v989*/, off, s32 offset:3508 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v222 /*v990*/, off, s32 offset:3512 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v223 /*v991*/, off, s32 offset:3516 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v224 /*v992*/, off, s32 offset:3520 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v225 /*v993*/, off, s32 offset:3524 +; GFX1250-DAGISEL-NEXT: s_clause 0x1d +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v226 /*v994*/, off, s32 offset:3528 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v227 /*v995*/, off, s32 offset:3532 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v228 /*v996*/, off, s32 offset:3536 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v229 /*v997*/, off, s32 offset:3540 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v230 /*v998*/, off, s32 offset:3544 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v231 /*v999*/, off, s32 offset:3548 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v232 /*v1000*/, off, s32 offset:3552 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v233 /*v1001*/, off, s32 offset:3556 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v234 /*v1002*/, off, s32 offset:3560 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v235 /*v1003*/, off, s32 offset:3564 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v236 /*v1004*/, off, s32 offset:3568 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v237 /*v1005*/, off, s32 offset:3572 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v238 /*v1006*/, off, s32 offset:3576 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v239 /*v1007*/, off, s32 offset:3580 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v240 /*v1008*/, off, s32 offset:3584 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v241 /*v1009*/, off, s32 offset:3588 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v242 /*v1010*/, off, s32 offset:3592 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v243 /*v1011*/, off, s32 offset:3596 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v244 /*v1012*/, off, s32 offset:3600 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v245 /*v1013*/, off, s32 offset:3604 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v246 /*v1014*/, off, s32 offset:3608 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v247 /*v1015*/, off, s32 offset:3612 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v248 /*v1016*/, off, s32 offset:3616 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v249 /*v1017*/, off, s32 offset:3620 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v250 /*v1018*/, off, s32 offset:3624 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v251 /*v1019*/, off, s32 offset:3628 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v252 /*v1020*/, off, s32 offset:3632 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v253 /*v1021*/, off, s32 offset:3636 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v254 /*v1022*/, off, s32 offset:3640 +; GFX1250-DAGISEL-NEXT: scratch_load_b32 v255 /*v1023*/, off, s32 offset:3644 +; GFX1250-DAGISEL-NEXT: s_wait_xcnt 0x0 +; GFX1250-DAGISEL-NEXT: s_mov_b32 exec_lo, s0 +; GFX1250-DAGISEL-NEXT: s_set_vgpr_msb 0 ; msbs: dst=0 src0=0 src1=0 src2=0 +; GFX1250-DAGISEL-NEXT: s_set_pc_i64 s[36:37] + %ret = tail call amdgpu_gfx <2 x half>(<2 x half>, <2 x half>) @gfx_callee(<2 x half> %y, <2 x half> %x) convergent + ret <2 x half> %ret +} + declare amdgpu_gfx_whole_wave float @callee(i1 %active, <8 x float> %x) define amdgpu_cs void @call_from_entry(<8 x float> %x, ptr %p) { diff --git a/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll b/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll index 484ad93bebeab..0e8d47347286b 100644 --- a/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll +++ b/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 -enable-unsafe-fp-math < %s +; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s ; PR5367 define arm_aapcs_vfpcc void @_Z27Benchmark_SceDualQuaternionPvm(ptr nocapture %pBuffer, i32 %numItems) nounwind { diff --git a/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll b/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll index 80c1968c85743..593fb9348506b 100644 --- a/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll +++ b/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -enable-unsafe-fp-math %s -o /dev/null +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o /dev/null ;target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64" ;target triple = "armv7-none-linux-gnueabi" diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll index 7f70c44c78f9c..27d1dc20bd815 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll @@ -11,7 +11,10 @@ define i32 @foo() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "no-trapping-math"="true" "denormal-fp-math"="ieee"} diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll index c99cb27adf155..9c8dd8d95c61c 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll @@ -10,7 +10,10 @@ define i32 @foo1() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="positive-zero,positive-zero" } diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll index ba1e7d7ce55c1..cda3ea0fc6d18 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll @@ -10,7 +10,10 @@ define i32 @foo1() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="preserve-sign,preserve-sign"} diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll index 1cd68aed1e051..59d0a40198392 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll @@ -11,6 +11,7 @@ define i32 @foo1() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } @@ -19,5 +20,7 @@ entry: ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="preserve-sign,preserve-sign"} attributes #1 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="positive-zero,positive-zero"} diff --git a/llvm/test/CodeGen/ARM/build-attributes.ll b/llvm/test/CodeGen/ARM/build-attributes.ll index 68844aed03630..306a4a31b79fa 100644 --- a/llvm/test/CodeGen/ARM/build-attributes.ll +++ b/llvm/test/CodeGen/ARM/build-attributes.ll @@ -3,23 +3,16 @@ ; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -mattr=+strict-align | FileCheck %s --check-prefix=XSCALE ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6 -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align | FileCheck %s --check-prefix=ARM1156T2F-S -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING @@ -31,35 +24,24 @@ ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi | FileCheck %s --check-prefix=V8MMAINLINE ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi -mattr=+dsp | FileCheck %s --check-prefix=V8MMAINLINE_DSP ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,-d32 | FileCheck %s --check-prefix=CORTEX-A5-NONEON ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A5-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A12-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15 -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A17-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE @@ -74,37 +56,26 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align | FileCheck %s --check-prefix=SC000 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33 -; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m35p | FileCheck %s --check-prefix=CORTEX-M35P @@ -113,49 +84,34 @@ ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 | FileCheck %s --check-prefix=CORTEX-R8 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 | FileCheck %s --check-prefix=CORTEX-A32 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 | FileCheck %s --check-prefix=CORTEX-A35 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a73 | FileCheck %s --check-prefix=CORTEX-A73 ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 | FileCheck %s --check-prefix=EXYNOS-M3 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 | FileCheck %s --check-prefix=EXYNOS-M4 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 | FileCheck %s --check-prefix=EXYNOS-M5 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,-d32,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER @@ -278,15 +234,6 @@ ; V6-NOT: .eabi_attribute 28 ; V6: .eabi_attribute 38, 1 -; V6-FAST-NOT: .eabi_attribute 19 -;; Despite the V6 CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6-FAST-NOT: .eabi_attribute 20 -; V6-FAST-NOT: .eabi_attribute 21 -; V6-FAST-NOT: .eabi_attribute 22 -; V6-FAST: .eabi_attribute 23, 1 - ;; We emit 6, 12 for both v6-M and v6S-M, technically this is incorrect for ;; V6-M, however we don't model the OS extension so this is fine. ; V6M: .eabi_attribute 6, 12 @@ -312,14 +259,6 @@ ; V6M-NOT: .eabi_attribute 28 ; V6M: .eabi_attribute 38, 1 -; V6M-FAST-NOT: .eabi_attribute 19 -;; Despite the V6M CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6M-FAST-NOT: .eabi_attribute 20 -; V6M-FAST-NOT: .eabi_attribute 21 -; V6M-FAST-NOT: .eabi_attribute 22 -; V6M-FAST: .eabi_attribute 23, 1 ; ARM1156T2F-S: .cpu arm1156t2f-s ; ARM1156T2F-S: .eabi_attribute 6, 8 @@ -342,14 +281,6 @@ ; ARM1156T2F-S-NOT: .eabi_attribute 28 ; ARM1156T2F-S: .eabi_attribute 38, 1 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 19 -;; V6 cores default to flush to positive zero (value 0). Note that value 2 is also equally -;; valid for this core, it's an implementation defined question as to which of 0 and 2 you -;; select. LLVM historically picks 0. -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 20 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 21 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 22 -; ARM1156T2F-S-FAST: .eabi_attribute 23, 1 ; V7M: .eabi_attribute 6, 10 ; V7M: .eabi_attribute 7, 77 @@ -374,15 +305,6 @@ ; V7M-NOT: .eabi_attribute 28 ; V7M: .eabi_attribute 38, 1 -; V7M-FAST-NOT: .eabi_attribute 19 -;; Despite the V7M CPU having no FPU by default, we chose to flush -;; preserving sign. This matches what the hardware would do in the -;; architecture revision were to exist on the current target. -; V7M-FAST: .eabi_attribute 20, 2 -; V7M-FAST-NOT: .eabi_attribute 21 -; V7M-FAST-NOT: .eabi_attribute 22 -; V7M-FAST: .eabi_attribute 23, 1 - ; V7: .syntax unified ; V7: .eabi_attribute 6, 10 ; V7-NOT: .eabi_attribute 27 @@ -401,13 +323,6 @@ ; V7-NOT: .eabi_attribute 28 ; V7: .eabi_attribute 38, 1 -; V7-FAST-NOT: .eabi_attribute 19 -;; The default CPU does have an FPU and it must be VFPv3 or better, so it flushes -;; denormals to zero preserving the sign. -; V7-FAST: .eabi_attribute 20, 2 -; V7-FAST-NOT: .eabi_attribute 21 -; V7-FAST-NOT: .eabi_attribute 22 -; V7-FAST: .eabi_attribute 23, 1 ; V7VE: .syntax unified ; V7VE: .eabi_attribute 6, 10 @ Tag_CPU_arch @@ -435,12 +350,6 @@ ; V8-NOT: .eabi_attribute 22 ; V8: .eabi_attribute 23, 3 -; V8-FAST-NOT: .eabi_attribute 19 -;; The default does have an FPU, and for V8-A, it flushes preserving sign. -; V8-FAST: .eabi_attribute 20, 2 -; V8-FAST-NOT: .eabi_attribute 21 -; V8-FAST-NOT: .eabi_attribute 22 -; V8-FAST: .eabi_attribute 23, 1 ; Vt8: .syntax unified ; Vt8: .eabi_attribute 6, 14 @@ -552,15 +461,11 @@ ;; We default to IEEE 754 compliance ; CORTEX-A7-CHECK: .eabi_attribute 20, 1 ;; The A7 has VFPv3 support by default, so flush preserving sign. -; CORTEX-A7-CHECK-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-NOFPU: .eabi_attribute 20, 1 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A7-NOFPU-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-FPUV4: .eabi_attribute 20, 1 -;; The VFPv4 FPU flushes preserving sign. -; CORTEX-A7-FPUV4-FAST: .eabi_attribute 20, 2 ; Tag_ABI_FP_exceptions ; CORTEX-A7-CHECK: .eabi_attribute 21, 1 @@ -610,13 +515,6 @@ ; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving the sign when -ffast-math -;; is given. -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NONEON: .cpu cortex-a5 ; CORTEX-A5-NONEON: .eabi_attribute 6, 10 @@ -634,13 +532,6 @@ ; CORTEX-A5-NONEON: .eabi_attribute 24, 1 ; CORTEX-A5-NONEON: .eabi_attribute 25, 1 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math -;; is given. -; CORTEX-A5-NONEON-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NONEON-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NOFPU: .cpu cortex-a5 ; CORTEX-A5-NOFPU: .eabi_attribute 6, 10 @@ -659,14 +550,9 @@ ; CORTEX-A5-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A5-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-SOFT: .cpu cortex-a8 ; CORTEX-A8-SOFT: .eabi_attribute 6, 10 @@ -712,15 +598,6 @@ ; CORTEX-A9-SOFT-NOT: .eabi_attribute 28 ; CORTEX-A9-SOFT: .eabi_attribute 38, 1 -; CORTEX-A8-SOFT-FAST-NOT: .eabi_attribute 19 -; CORTEX-A9-SOFT-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-HARD: .cpu cortex-a8 ; CORTEX-A8-HARD: .eabi_attribute 6, 10 @@ -766,21 +643,6 @@ ; CORTEX-A9-HARD: .eabi_attribute 28, 1 ; CORTEX-A9-HARD: .eabi_attribute 38, 1 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 19 -;; The A8 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A8-HARD-FAST: .eabi_attribute 23, 1 - -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A9-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A9-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-DEFAULT: .cpu cortex-a12 ; CORTEX-A12-DEFAULT: .eabi_attribute 6, 10 @@ -800,13 +662,6 @@ ; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A12-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A12 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A12-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-NOFPU: .cpu cortex-a12 ; CORTEX-A12-NOFPU: .eabi_attribute 6, 10 @@ -826,14 +681,6 @@ ; CORTEX-A12-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A12-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A15: .cpu cortex-a15 ; CORTEX-A15: .eabi_attribute 6, 10 @@ -857,13 +704,6 @@ ; CORTEX-A15-NOT: .eabi_attribute 28 ; CORTEX-A15: .eabi_attribute 38, 1 -; CORTEX-A15-FAST-NOT: .eabi_attribute 19 -;; The A15 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A15-FAST: .eabi_attribute 20, 2 -; CORTEX-A15-FAST-NOT: .eabi_attribute 21 -; CORTEX-A15-FAST-NOT: .eabi_attribute 22 -; CORTEX-A15-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-DEFAULT: .cpu cortex-a17 ; CORTEX-A17-DEFAULT: .eabi_attribute 6, 10 @@ -883,13 +723,6 @@ ; CORTEX-A17-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A17-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A17-FAST-NOT: .eabi_attribute 19 -;; The A17 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A17-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-NOFPU: .cpu cortex-a17 ; CORTEX-A17-NOFPU: .eabi_attribute 6, 10 @@ -910,13 +743,6 @@ ; CORTEX-A17-NOFPU: .eabi_attribute 25, 1 ; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 23, 1 ; Test flags -enable-no-trapping-fp-math and -denormal-fp-math: ; NO-TRAPPING-MATH: .eabi_attribute 21, 0 @@ -946,16 +772,6 @@ ; CORTEX-M0-NOT: .eabi_attribute 28 ; CORTEX-M0: .eabi_attribute 38, 1 -; CORTEX-M0-FAST-NOT: .eabi_attribute 19 -;; Despite the M0 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0-FAST: .eabi_attribute 23, 1 ; CORTEX-M0PLUS: .cpu cortex-m0plus ; CORTEX-M0PLUS: .eabi_attribute 6, 12 @@ -978,16 +794,6 @@ ; CORTEX-M0PLUS-NOT: .eabi_attribute 28 ; CORTEX-M0PLUS: .eabi_attribute 38, 1 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 19 -;; Despite the M0+ CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0PLUS-FAST: .eabi_attribute 23, 1 ; CORTEX-M1: .cpu cortex-m1 ; CORTEX-M1: .eabi_attribute 6, 12 @@ -1010,16 +816,6 @@ ; CORTEX-M1-NOT: .eabi_attribute 28 ; CORTEX-M1: .eabi_attribute 38, 1 -; CORTEX-M1-FAST-NOT: .eabi_attribute 19 -;; Despite the M1 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M1-FAST-NOT: .eabi_attribute 20 -; CORTEX-M1-FAST-NOT: .eabi_attribute 21 -; CORTEX-M1-FAST-NOT: .eabi_attribute 22 -; CORTEX-M1-FAST: .eabi_attribute 23, 1 ; SC000: .cpu sc000 ; SC000: .eabi_attribute 6, 12 @@ -1041,16 +837,6 @@ ; SC000-NOT: .eabi_attribute 28 ; SC000: .eabi_attribute 38, 1 -; SC000-FAST-NOT: .eabi_attribute 19 -;; Despite the SC000 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; SC000-FAST-NOT: .eabi_attribute 20 -; SC000-FAST-NOT: .eabi_attribute 21 -; SC000-FAST-NOT: .eabi_attribute 22 -; SC000-FAST: .eabi_attribute 23, 1 ; CORTEX-M3: .cpu cortex-m3 ; CORTEX-M3: .eabi_attribute 6, 10 @@ -1073,14 +859,6 @@ ; CORTEX-M3-NOT: .eabi_attribute 28 ; CORTEX-M3: .eabi_attribute 38, 1 -; CORTEX-M3-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M3-FAST: .eabi_attribute 20, 2 -; CORTEX-M3-FAST-NOT: .eabi_attribute 21 -; CORTEX-M3-FAST-NOT: .eabi_attribute 22 -; CORTEX-M3-FAST: .eabi_attribute 23, 1 ; SC300: .cpu sc300 ; SC300: .eabi_attribute 6, 10 @@ -1103,14 +881,6 @@ ; SC300-NOT: .eabi_attribute 28 ; SC300: .eabi_attribute 38, 1 -; SC300-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; SC300-FAST: .eabi_attribute 20, 2 -; SC300-FAST-NOT: .eabi_attribute 21 -; SC300-FAST-NOT: .eabi_attribute 22 -; SC300-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-SOFT: .cpu cortex-m4 ; CORTEX-M4-SOFT: .eabi_attribute 6, 13 @@ -1134,13 +904,6 @@ ; CORTEX-M4-SOFT-NOT: .eabi_attribute 28 ; CORTEX-M4-SOFT: .eabi_attribute 38, 1 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-HARD: .cpu cortex-m4 ; CORTEX-M4-HARD: .eabi_attribute 6, 13 @@ -1164,13 +927,6 @@ ; CORTEX-M4-HARD: .eabi_attribute 28, 1 ; CORTEX-M4-HARD: .eabi_attribute 38, 1 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-M7: .cpu cortex-m7 ; CORTEX-M7: .eabi_attribute 6, 13 @@ -1197,16 +953,6 @@ ; CORTEX-M7: .eabi_attribute 38, 1 ; CORTEX-M7: .eabi_attribute 14, 0 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 19 -;; The M7 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-M7-FAST: .eabi_attribute 20, 2 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-R4: .cpu cortex-r4 ; CORTEX-R4: .eabi_attribute 6, 10 @@ -1273,12 +1019,6 @@ ; CORTEX-R5-NOT: .eabi_attribute 28 ; CORTEX-R5: .eabi_attribute 38, 1 -; CORTEX-R5-FAST-NOT: .eabi_attribute 19 -;; The R5 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R5-FAST: .eabi_attribute 20, 2 -; CORTEX-R5-FAST-NOT: .eabi_attribute 21 -; CORTEX-R5-FAST-NOT: .eabi_attribute 22 -; CORTEX-R5-FAST: .eabi_attribute 23, 1 ; CORTEX-R7: .cpu cortex-r7 ; CORTEX-R7: .eabi_attribute 6, 10 @@ -1301,12 +1041,6 @@ ; CORTEX-R7-NOT: .eabi_attribute 28 ; CORTEX-R7: .eabi_attribute 38, 1 -; CORTEX-R7-FAST-NOT: .eabi_attribute 19 -;; The R7 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R7-FAST: .eabi_attribute 20, 2 -; CORTEX-R7-FAST-NOT: .eabi_attribute 21 -; CORTEX-R7-FAST-NOT: .eabi_attribute 22 -; CORTEX-R7-FAST: .eabi_attribute 23, 1 ; CORTEX-R8: .cpu cortex-r8 ; CORTEX-R8: .eabi_attribute 6, 10 @@ -1329,12 +1063,6 @@ ; CORTEX-R8-NOT: .eabi_attribute 28 ; CORTEX-R8: .eabi_attribute 38, 1 -; CORTEX-R8-FAST-NOT: .eabi_attribute 19 -;; The R8 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R8-FAST: .eabi_attribute 20, 2 -; CORTEX-R8-FAST-NOT: .eabi_attribute 21 -; CORTEX-R8-FAST-NOT: .eabi_attribute 22 -; CORTEX-R8-FAST: .eabi_attribute 23, 1 ; CORTEX-A32: .cpu cortex-a32 ; CORTEX-A32: .eabi_attribute 6, 14 @@ -1359,12 +1087,6 @@ ; CORTEX-A32-NOT: .eabi_attribute 28 ; CORTEX-A32: .eabi_attribute 38, 1 -; CORTEX-A32-FAST-NOT: .eabi_attribute 19 -;; The A32 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A32-FAST: .eabi_attribute 20, 2 -; CORTEX-A32-FAST-NOT: .eabi_attribute 21 -; CORTEX-A32-FAST-NOT: .eabi_attribute 22 -; CORTEX-A32-FAST: .eabi_attribute 23, 1 ; CORTEX-M23: .cpu cortex-m23 ; CORTEX-M23: .eabi_attribute 6, 16 @@ -1430,11 +1152,6 @@ ; CORTEX-M35P: .eabi_attribute 38, 1 ; CORTEX-M35P: .eabi_attribute 14, 0 -; CORTEX-M33-FAST-NOT: .eabi_attribute 19 -; CORTEX-M33-FAST: .eabi_attribute 20, 2 -; CORTEX-M33-FAST-NOT: .eabi_attribute 21 -; CORTEX-M33-FAST-NOT: .eabi_attribute 22 -; CORTEX-M33-FAST: .eabi_attribute 23, 1 ; CORTEX-A35: .cpu cortex-a35 ; CORTEX-A35: .eabi_attribute 6, 14 @@ -1459,12 +1176,6 @@ ; CORTEX-A35-NOT: .eabi_attribute 28 ; CORTEX-A35: .eabi_attribute 38, 1 -; CORTEX-A35-FAST-NOT: .eabi_attribute 19 -;; The A35 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A35-FAST: .eabi_attribute 20, 2 -; CORTEX-A35-FAST-NOT: .eabi_attribute 21 -; CORTEX-A35-FAST-NOT: .eabi_attribute 22 -; CORTEX-A35-FAST: .eabi_attribute 23, 1 ; CORTEX-A53: .cpu cortex-a53 ; CORTEX-A53: .eabi_attribute 6, 14 @@ -1489,12 +1200,6 @@ ; CORTEX-A53-NOT: .eabi_attribute 28 ; CORTEX-A53: .eabi_attribute 38, 1 -; CORTEX-A53-FAST-NOT: .eabi_attribute 19 -;; The A53 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A53-FAST: .eabi_attribute 20, 2 -; CORTEX-A53-FAST-NOT: .eabi_attribute 21 -; CORTEX-A53-FAST-NOT: .eabi_attribute 22 -; CORTEX-A53-FAST: .eabi_attribute 23, 1 ; CORTEX-A57: .cpu cortex-a57 ; CORTEX-A57: .eabi_attribute 6, 14 @@ -1519,12 +1224,6 @@ ; CORTEX-A57-NOT: .eabi_attribute 28 ; CORTEX-A57: .eabi_attribute 38, 1 -; CORTEX-A57-FAST-NOT: .eabi_attribute 19 -;; The A57 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A57-FAST: .eabi_attribute 20, 2 -; CORTEX-A57-FAST-NOT: .eabi_attribute 21 -; CORTEX-A57-FAST-NOT: .eabi_attribute 22 -; CORTEX-A57-FAST: .eabi_attribute 23, 1 ; CORTEX-A72: .cpu cortex-a72 ; CORTEX-A72: .eabi_attribute 6, 14 @@ -1549,12 +1248,6 @@ ; CORTEX-A72-NOT: .eabi_attribute 28 ; CORTEX-A72: .eabi_attribute 38, 1 -; CORTEX-A72-FAST-NOT: .eabi_attribute 19 -;; The A72 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A72-FAST: .eabi_attribute 20, 2 -; CORTEX-A72-FAST-NOT: .eabi_attribute 21 -; CORTEX-A72-FAST-NOT: .eabi_attribute 22 -; CORTEX-A72-FAST: .eabi_attribute 23, 1 ; CORTEX-A73: .cpu cortex-a73 ; CORTEX-A73: .eabi_attribute 6, 14 @@ -1580,12 +1273,6 @@ ; CORTEX-A73: .eabi_attribute 38, 1 ; CORTEX-A73: .eabi_attribute 14, 0 -; EXYNOS-FAST-NOT: .eabi_attribute 19 -;; The Exynos processors have the ARMv8 FP unit, which always flushes preserving sign. -; EXYNOS-FAST: .eabi_attribute 20, 2 -; EXYNOS-FAST-NOT: .eabi_attribute 21 -; EXYNOS-FAST-NOT: .eabi_attribute 22 -; EXYNOS-FAST: .eabi_attribute 23, 1 ; EXYNOS-M3: .cpu exynos-m3 ; EXYNOS-M3: .eabi_attribute 6, 14 @@ -1684,12 +1371,6 @@ ; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 28 ; GENERIC-ARMV8_1-A: .eabi_attribute 38, 1 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 19 -;; GENERIC-ARMV8_1-A has the ARMv8 FP unit, which always flushes preserving sign. -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 20, 2 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 21 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 22 -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 23, 1 ; RELOC-PIC: .eabi_attribute 15, 1 ; RELOC-PIC: .eabi_attribute 16, 1 diff --git a/llvm/test/CodeGen/ARM/fadds.ll b/llvm/test/CodeGen/ARM/fadds.ll index b5d3bdae1f9d3..191d5b3c13d26 100644 --- a/llvm/test/CodeGen/ARM/fadds.ll +++ b/llvm/test/CodeGen/ARM/fadds.ll @@ -7,7 +7,7 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ diff --git a/llvm/test/CodeGen/ARM/fmuls.ll b/llvm/test/CodeGen/ARM/fmuls.ll index b24d867a7e866..a390a242e5918 100644 --- a/llvm/test/CodeGen/ARM/fmuls.ll +++ b/llvm/test/CodeGen/ARM/fmuls.ll @@ -7,7 +7,7 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ diff --git a/llvm/test/CodeGen/ARM/fnegs.ll b/llvm/test/CodeGen/ARM/fnegs.ll index 435a600822e4d..6055b8f6dd93b 100644 --- a/llvm/test/CodeGen/ARM/fnegs.ll +++ b/llvm/test/CodeGen/ARM/fnegs.ll @@ -10,11 +10,11 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ -; RUN: | FileCheck %s -check-prefix=CORTEXA8U +; RUN: | FileCheck %s -check-prefix=CORTEXA8U-DARWIN ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA9 @@ -41,7 +41,10 @@ entry: ; CORTEXA8: vneg.f32 s{{.*}}, s{{.*}} ; CORTEXA8U-LABEL: test1: -; CORTEXA8U: vneg.f32 d{{.*}}, d{{.*}} +; CORTEXA8U: vsub.f32 d{{.*}}, d{{.*}}, d{{.*}} + +; CORTEXA8U-DARWIN-LABEL: test1: +; CORTEXA8U-DARWIN: vneg.f32 d{{.*}}, d{{.*}} ; CORTEXA9-LABEL: test1: ; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}} @@ -110,9 +113,13 @@ define <2 x float> @fneg_bitcast(i64 %i) { ; CORTEXA8-NOT: vneg.f32 ; CORTEXA8U-LABEL: fneg_bitcast: -; CORTEXA8U-DAG: eor r0, r0, #-2147483648 -; CORTEXA8U-DAG: eor r1, r1, #-2147483648 -; CORTEXA8U-NOT: vneg.f32 +; CORTEXA8U-DAG: vmov.i32 d{{.*}}, #0x80000000 +; CORTEXA8U-DAG: vsub.f32 d{{.*}}, d{{.*}}, d{{.*}} + +; CORTEXA8U-DARWIN-LABEL: fneg_bitcast: +; CORTEXA8U-DARWIN-DAG: eor r0, r0, #-2147483648 +; CORTEXA8U-DARWIN-DAG: eor r1, r1, #-2147483648 +; CORTEXA8U-DARWIN-NOT: vneg.f32 ; CORTEXA9-LABEL: fneg_bitcast: ; CORTEXA9-DAG: eor r0, r0, #-2147483648 diff --git a/llvm/test/CodeGen/ARM/fnmscs.ll b/llvm/test/CodeGen/ARM/fnmscs.ll index 0fa878c0c2f49..49f9dcf32f544 100644 --- a/llvm/test/CodeGen/ARM/fnmscs.ll +++ b/llvm/test/CodeGen/ARM/fnmscs.ll @@ -13,11 +13,11 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic %s -o - \ ; RUN: | FileCheck %s -check-prefix=A8 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=A8U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ -; RUN: | FileCheck %s -check-prefix=A8U +; RUN: | FileCheck %s -check-prefix=A8U-DARWIN define float @t1(float %acc, float %a, float %b) nounwind { entry: @@ -31,15 +31,20 @@ entry: ; NEON: vnmla.f32 ; A8U-LABEL: t1: -; A8U: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} -; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} +; A8U: vmov.i32 d{{[0-9]+}}, #0x80000000 +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + +; A8U-DARWIN-LABEL: t1: +; A8U-DARWIN: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} +; A8U-DARWIN: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} ; A8-LABEL: t1: ; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} %0 = fmul float %a, %b %1 = fsub float -0.0, %0 - %2 = fsub float %1, %acc + %2 = fsub float %1, %acc ret float %2 } @@ -55,8 +60,13 @@ entry: ; NEON: vnmla.f32 ; A8U-LABEL: t2: -; A8U: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} -; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} +; A8U: vmov.i32 d{{[0-9]+}}, #0x80000000 +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + +; A8U-DARWIN-LABEL: t2: +; A8U-DARWIN: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} +; A8U-DARWIN: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} ; A8-LABEL: t2: ; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} @@ -79,8 +89,12 @@ entry: ; NEON: vnmla.f64 ; A8U-LABEL: t3: -; A8U: vnmul.f64 d ; A8U: vsub.f64 d +; A8U: vsub.f64 d + +; A8U-DARWIN-LABEL: t3: +; A8U-DARWIN: vnmul.f64 d +; A8U-DARWIN: vsub.f64 d ; A8-LABEL: t3: ; A8: vnmul.f64 d @@ -103,8 +117,12 @@ entry: ; NEON: vnmla.f64 ; A8U-LABEL: t4: -; A8U: vnmul.f64 d ; A8U: vsub.f64 d +; A8U: vsub.f64 d + +; A8U-DARWIN-LABEL: t4: +; A8U-DARWIN: vnmul.f64 d +; A8U-DARWIN: vsub.f64 d ; A8-LABEL: t4: ; A8: vnmul.f64 d diff --git a/llvm/test/CodeGen/ARM/fnmul.ll b/llvm/test/CodeGen/ARM/fnmul.ll index b021de8b7ad00..655c9f8415402 100644 --- a/llvm/test/CodeGen/ARM/fnmul.ll +++ b/llvm/test/CodeGen/ARM/fnmul.ll @@ -1,15 +1,30 @@ -; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s -check-prefix STRICT - -; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 -enable-unsafe-fp-math %s -o - | FileCheck %s -check-prefix UNSAFE +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s define double @t1(double %a, double %b) { -; STRICT: vnmul.f64 -; -; UNSAFE: vnmul.f64 +; CHECK-LABEL: t1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vmov d1, r0, r1 +; CHECK-NEXT: vnmul.f64 d0, d1, d0 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: bx lr entry: - %tmp2 = fsub double -0.000000e+00, %a ; [#uses=1] - %tmp4 = fmul double %tmp2, %b ; [#uses=1] - ret double %tmp4 + %tmp2 = fsub double -0.000000e+00, %a + %tmp4 = fmul double %tmp2, %b + ret double %tmp4 } - +define double @tfast(double %a, double %b) { +; CHECK-LABEL: tfast: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vmov d1, r0, r1 +; CHECK-NEXT: vnmul.f64 d0, d1, d0 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: bx lr +entry: + %tmp2 = fsub fast double -0.000000e+00, %a + %tmp4 = fmul fast double %tmp2, %b + ret double %tmp4 +} diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll index 33ff71e8c473e..9d0ea0e2d37cf 100644 --- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll +++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll @@ -1,5 +1,6 @@ -; RUN: llc < %s -mtriple=arm-eabi -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s -; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=thumbv7a-none-eabihf -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s ; TODO: we can't pass half-precision arguments as "half" types yet. We do ; that for the time being by passing "float %f.coerce" and the necessary @@ -9,9 +10,11 @@ define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_o: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -22,9 +25,11 @@ entry: define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_o_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -35,9 +40,11 @@ entry: define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_u: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -48,9 +55,11 @@ entry: define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_ule: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -61,9 +70,11 @@ entry: define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_u_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -74,9 +85,11 @@ entry: define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_o: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -87,9 +100,11 @@ entry: define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_oge: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -100,9 +115,11 @@ entry: define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_o_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -113,9 +130,11 @@ entry: define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_ole_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -126,9 +145,11 @@ entry: define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_u: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -139,9 +160,11 @@ entry: define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_uge: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -152,9 +175,11 @@ entry: define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_u_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -167,11 +192,17 @@ entry: define half @fp16_vminnm_NNNo(i16 signext %a) { ; CHECK-LABEL: fp16_vminnm_NNNo: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI12_0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI12_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, 12. @@ -183,11 +214,19 @@ entry: define half @fp16_vminnm_NNNo_rev(i16 signext %a) { ; CHECK-LABEL: fp16_vminnm_NNNo_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI13_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI13_1 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI13_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI13_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ogt half %0, 56. @@ -199,11 +238,17 @@ entry: define half @fp16_vminnm_NNNu(i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_NNNu: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI14_0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI14_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ult half 12., %0 @@ -215,11 +260,19 @@ entry: define half @fp16_vminnm_NNNule(i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_NNNule: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI15_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI15_1 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI15_0: +; CHECK-NEXT: .short 0x5040 @ half 34 +; CHECK-NEXT: .LCPI15_1: +; CHECK-NEXT: .short 0x5300 @ half 56 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ule half 34., %0 @@ -231,11 +284,19 @@ entry: define half @fp16_vminnm_NNNu_rev(i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_NNNu_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI16_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI16_1 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI16_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI16_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ugt half 56., %0 @@ -247,11 +308,17 @@ entry: define half @fp16_vmaxnm_NNNo(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNo: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI17_0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI17_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ogt half %0, 12. @@ -263,11 +330,19 @@ entry: define half @fp16_vmaxnm_NNNoge(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNoge: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI18_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI18_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI18_0: +; CHECK-NEXT: .short 0x5040 @ half 34 +; CHECK-NEXT: .LCPI18_1: +; CHECK-NEXT: .short 0x5300 @ half 56 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast oge half %0, 34. @@ -279,11 +354,19 @@ entry: define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNo_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI19_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI19_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI19_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI19_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, 56. @@ -295,11 +378,19 @@ entry: define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNole_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI20_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI20_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI20_0: +; CHECK-NEXT: .short 0x54e0 @ half 78 +; CHECK-NEXT: .LCPI20_1: +; CHECK-NEXT: .short 0x55a0 @ half 90 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ole half %0, 78. @@ -311,11 +402,17 @@ entry: define half @fp16_vmaxnm_NNNu(i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_NNNu: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI21_0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI21_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ugt half 12., %0 @@ -327,11 +424,19 @@ entry: define half @fp16_vmaxnm_NNNuge(i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_NNNuge: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI22_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI22_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI22_0: +; CHECK-NEXT: .short 0x5040 @ half 34 +; CHECK-NEXT: .LCPI22_1: +; CHECK-NEXT: .short 0x5300 @ half 56 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast uge half 34., %0 @@ -343,11 +448,19 @@ entry: define half @fp16_vmaxnm_NNNu_rev(i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_NNNu_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI23_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI23_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI23_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI23_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ult half 56., %0 @@ -359,10 +472,16 @@ entry: define half @fp16_vminmaxnm_0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI24_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI24_0: +; CHECK-NEXT: .short 0x0000 @ half 0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, 0. @@ -374,10 +493,16 @@ entry: define half @fp16_vminmaxnm_neg0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_neg0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI25_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI25_0: +; CHECK-NEXT: .short 0x8000 @ half -0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, -0. @@ -389,10 +514,16 @@ entry: define half @fp16_vminmaxnm_e_0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_e_0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI26_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI26_0: +; CHECK-NEXT: .short 0x0000 @ half 0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ule half 0., %0 @@ -404,10 +535,16 @@ entry: define half @fp16_vminmaxnm_e_neg0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_e_neg0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI27_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI27_0: +; CHECK-NEXT: .short 0x8000 @ half -0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ule half -0., %0 diff --git a/llvm/test/CodeGen/ARM/fp16.ll b/llvm/test/CodeGen/ARM/fp16.ll deleted file mode 100644 index 9ff701050ac7e..0000000000000 --- a/llvm/test/CodeGen/ARM/fp16.ll +++ /dev/null @@ -1,105 +0,0 @@ -; RUN: llc -mtriple=armv7a--none-eabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-EABI %s -; RUN: llc -mtriple=armv7a--none-gnueabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-GNU %s -; RUN: llc -mtriple=armv7a--none-musleabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-GNU %s -; RUN: llc -mtriple=armv8-eabihf < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARMV8 %s -; RUN: llc -mtriple=thumbv7m-eabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-EABI %s -; RUN: llc -mtriple=thumbv7m-gnueabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-GNU %s -; RUN: llc -mtriple=thumbv7m-musleabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-GNU %s - -;; +fp16 is special: it has f32->f16 (unlike v7), but not f64->f16 (unlike v8). -;; This exposes unsafe-fp-math optimization opportunities; test that. -; RUN: llc -mattr=+vfp3,+fp16 < %s |\ -; RUN: FileCheck --check-prefix=CHECK --check-prefix=CHECK-FP16 --check-prefix=CHECK-FP16-SAFE %s -; RUN: llc -mattr=+vfp3,+fp16 < %s -enable-unsafe-fp-math |\ -; RUN: FileCheck --check-prefix=CHECK --check-prefix=CHECK-FP16 --check-prefix=CHECK-FP16-UNSAFE %s - -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32" -target triple = "armv7---eabihf" - -@x = global i16 12902 -@y = global i16 0 -@z = common global i16 0 - -define void @foo() nounwind { -; CHECK-LABEL: foo: -entry: - %0 = load i16, ptr @x, align 2 - %1 = load i16, ptr @y, align 2 - %2 = tail call float @llvm.convert.from.fp16.f32(i16 %0) -; CHECK-HARDFLOAT-EABI: __aeabi_h2f -; CHECK-HARDFLOAT-GNU: __gnu_h2f_ieee -; CHECK-FP16: vcvtb.f32.f16 -; CHECK-ARMV8: vcvtb.f32.f16 -; CHECK-SOFTFLOAT-EABI: __aeabi_h2f -; CHECK-SOFTFLOAT-GNU: __gnu_h2f_ieee - %3 = tail call float @llvm.convert.from.fp16.f32(i16 %1) -; CHECK-HARDFLOAT-EABI: __aeabi_h2f -; CHECK-HARDFLOAT-GNU: __gnu_h2f_ieee -; CHECK-FP16: vcvtb.f32.f16 -; CHECK-ARMV8: vcvtb.f32.f16 -; CHECK-SOFTFLOAT-EABI: __aeabi_h2f -; CHECK-SOFTFLOAT-GNU: __gnu_h2f_ieee - %4 = fadd float %2, %3 - %5 = tail call i16 @llvm.convert.to.fp16.f32(float %4) -; CHECK-HARDFLOAT-EABI: __aeabi_f2h -; CHECK-HARDFLOAT-GNU: __gnu_f2h_ieee -; CHECK-FP16: vcvtb.f16.f32 -; CHECK-ARMV8: vcvtb.f16.f32 -; CHECK-SOFTFLOAT-EABI: __aeabi_f2h -; CHECK-SOFTFLOAT-GNU: __gnu_f2h_ieee - store i16 %5, ptr @x, align 2 - ret void -} - -define double @test_from_fp16(i16 %in) { -; CHECK-LABEL: test_from_fp16: - %val = call double @llvm.convert.from.fp16.f64(i16 %in) -; CHECK-HARDFLOAT-EABI: bl __aeabi_h2f -; CHECK-HARDFLOAT-EABI: vmov [[TMP:s[0-9]+]], r0 -; CHECK-HARDFLOAT-EABI: vcvt.f64.f32 {{d[0-9]+}}, [[TMP]] - -; CHECK-HARDFLOAT-GNU: bl __gnu_h2f_ieee -; CHECK-HARDFLOAT-GNU: vmov [[TMP:s[0-9]+]], r0 -; CHECK-HARDFLOAT-GNU: vcvt.f64.f32 {{d[0-9]+}}, [[TMP]] - -; CHECK-FP16: vmov [[TMP16:s[0-9]+]], r0 -; CHECK-FP16: vcvtb.f32.f16 [[TMP32:s[0-9]+]], [[TMP16]] -; CHECK-FP16: vcvt.f64.f32 d0, [[TMP32]] - -; CHECK-ARMV8: vmov [[TMP:s[0-9]+]], r0 -; CHECK-ARMV8: vcvtb.f64.f16 d0, [[TMP]] - -; CHECK-SOFTFLOAT-EABI: bl __aeabi_h2f -; CHECK-SOFTFLOAT-EABI: bl __aeabi_f2d - -; CHECK-SOFTFLOAT-GNU: bl __gnu_h2f_ieee -; CHECK-SOFTFLOAT-GNU: bl __aeabi_f2d - ret double %val -} - -define i16 @test_to_fp16(double %in) { -; CHECK-LABEL: test_to_fp16: - %val = call i16 @llvm.convert.to.fp16.f64(double %in) -; CHECK-HARDFLOAT-EABI: bl __aeabi_d2h - -; CHECK-HARDFLOAT-GNU: bl __aeabi_d2h - -; CHECK-FP16-SAFE: bl __aeabi_d2h - -; CHECK-FP16-UNSAFE: vmov r0, r1, d0 -; CHECK-FP16-UNSAFE-NEXT: bl __aeabi_d2h - -; CHECK-ARMV8: vcvtb.f16.f64 [[TMP:s[0-9]+]], d0 -; CHECK-ARMV8: vmov r0, [[TMP]] - -; CHECK-SOFTFLOAT-EABI: bl __aeabi_d2h - -; CHECK-SOFTFLOAT-GNU: bl __aeabi_d2h - ret i16 %val -} - -declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone -declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone - -declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone -declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone diff --git a/llvm/test/CodeGen/ARM/fp_convert.ll b/llvm/test/CodeGen/ARM/fp_convert.ll index 6f4707573fb50..0b749bf1c7ad4 100644 --- a/llvm/test/CodeGen/ARM/fp_convert.ll +++ b/llvm/test/CodeGen/ARM/fp_convert.ll @@ -7,7 +7,7 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ ; RUN: | FileCheck %s -check-prefix=VFP2 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=NEON ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ diff --git a/llvm/test/CodeGen/ARM/fsubs.ll b/llvm/test/CodeGen/ARM/fsubs.ll index baff34ab31fcf..7170f04ea0dd3 100644 --- a/llvm/test/CodeGen/ARM/fsubs.ll +++ b/llvm/test/CodeGen/ARM/fsubs.ll @@ -4,7 +4,7 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ ; RUN: | FileCheck %s -check-prefix=NFP1 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=NFP1U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ diff --git a/llvm/test/CodeGen/ARM/inline-asm-clobber.ll b/llvm/test/CodeGen/ARM/inline-asm-clobber.ll index 7b1331f3f1e84..f44ad2a896ad4 100644 --- a/llvm/test/CodeGen/ARM/inline-asm-clobber.ll +++ b/llvm/test/CodeGen/ARM/inline-asm-clobber.ll @@ -6,12 +6,19 @@ ; RUN: llc <%s -mtriple=arm-none-eabi --frame-pointer=all 2>&1 \ ; RUN: | FileCheck %s -check-prefix=NO_FP_ELIM +; RUN: llc <%s -mtriple=armv6-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS2 +; RUN: llc <%s -mtriple=armv6k-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS2 +; RUN: llc <%s -mtriple=armv6k-apple-ios3 2>&1 | FileCheck %s -check-prefix=IOS3 +; RUN: llc <%s -mtriple=armv7-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS3 + ; CHECK: warning: inline asm clobber list contains reserved registers: SP, PC ; CHECK: warning: inline asm clobber list contains reserved registers: R11 ; RWPI: warning: inline asm clobber list contains reserved registers: R9, SP, PC ; RWPI: warning: inline asm clobber list contains reserved registers: R11 ; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11, SP, PC ; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11 +; IOS2: warning: inline asm clobber list contains reserved registers: R9, SP, PC +; IOS3: warning: inline asm clobber list contains reserved registers: SP, PC define void @foo() nounwind { call void asm sideeffect "mov r7, #1", diff --git a/llvm/test/CodeGen/ARM/issue159343.ll b/llvm/test/CodeGen/ARM/issue159343.ll new file mode 100644 index 0000000000000..03292582918a9 --- /dev/null +++ b/llvm/test/CodeGen/ARM/issue159343.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s | FileCheck %s + +; Make sure there's no assertion from peephole-opt introducing illegal +; subregister index uses. + +target triple = "thumbv7-unknown-linux-android29" + +define void @_ZN11VersionEdit10DecodeFromEv(i1 %call4, ptr %__profc__ZN11VersionEdit10DecodeFromEv) nounwind { +; CHECK-LABEL: _ZN11VersionEdit10DecodeFromEv: +; CHECK: @ %bb.0: @ %land.rhs.lr.ph +; CHECK-NEXT: lsls r0, r0, #31 +; CHECK-NEXT: beq .LBB0_2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: adr r0, .LCPI0_0 +; CHECK-NEXT: vld1.64 {d0, d1}, [r0:128] +; CHECK-NEXT: b .LBB0_3 +; CHECK-NEXT: .LBB0_2: @ %select.false +; CHECK-NEXT: vmov.i32 q0, #0x0 +; CHECK-NEXT: .LBB0_3: @ %select.end +; CHECK-NEXT: vldr s5, .LCPI0_1 +; CHECK-NEXT: vldr s4, .LCPI0_2 +; CHECK-NEXT: vmov.f32 s6, s0 +; CHECK-NEXT: vmov.f32 s7, s1 +; CHECK-NEXT: vst1.64 {d2, d3}, [r1] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.4: +; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .LCPI0_1: +; CHECK-NEXT: .long 0x00000000 @ float 0 +; CHECK-NEXT: .LCPI0_2: +; CHECK-NEXT: .long 0x00000001 @ float 1.40129846E-45 +land.rhs.lr.ph: + br i1 %call4, label %sw.bb, label %while.cond.while.end_crit_edge.split.loop.exit43 + +while.cond.while.end_crit_edge.split.loop.exit43: ; preds = %land.rhs.lr.ph + %ext0 = extractelement <4 x i64> zeroinitializer, i64 0 + br label %while.cond.while.end_crit_edge + +while.cond.while.end_crit_edge: ; preds = %sw.bb, %while.cond.while.end_crit_edge.split.loop.exit43 + %pgocount5374.ph = phi i64 [ %ext1, %sw.bb ], [ %ext0, %while.cond.while.end_crit_edge.split.loop.exit43 ] + %ins = insertelement <2 x i64> splat (i64 1), i64 %pgocount5374.ph, i64 1 + store <2 x i64> %ins, ptr %__profc__ZN11VersionEdit10DecodeFromEv, align 8 + ret void + +sw.bb: ; preds = %land.rhs.lr.ph + %ext1 = extractelement <4 x i64> splat (i64 1), i64 0 + br label %while.cond.while.end_crit_edge +} + diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll index 749ee00a3c68e..a1a04db8622c7 100644 --- a/llvm/test/CodeGen/ARM/llrint-conv.ll +++ b/llvm/test/CodeGen/ARM/llrint-conv.ll @@ -1,46 +1,71 @@ -; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP -; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16 +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 -; SOFTFP-LABEL: testmsxh_builtin: -; SOFTFP: bl llrintf -; HARDFP-LABEL: testmsxh_builtin: -; HARDFP: bl llrintf define i64 @testmsxh_builtin(half %x) { +; CHECK-SOFT-LABEL: testmsxh_builtin: +; CHECK-SOFT: @ %bb.0: @ %entry +; CHECK-SOFT-NEXT: .save {r11, lr} +; CHECK-SOFT-NEXT: push {r11, lr} +; CHECK-SOFT-NEXT: bl __aeabi_h2f +; CHECK-SOFT-NEXT: bl llrintf +; CHECK-SOFT-NEXT: pop {r11, pc} +; +; CHECK-NOFP16-LABEL: testmsxh_builtin: +; CHECK-NOFP16: @ %bb.0: @ %entry +; CHECK-NOFP16-NEXT: .save {r11, lr} +; CHECK-NOFP16-NEXT: push {r11, lr} +; CHECK-NOFP16-NEXT: vmov r0, s0 +; CHECK-NOFP16-NEXT: bl __aeabi_h2f +; CHECK-NOFP16-NEXT: vmov s0, r0 +; CHECK-NOFP16-NEXT: bl llrintf +; CHECK-NOFP16-NEXT: pop {r11, pc} +; +; CHECK-FP16-LABEL: testmsxh_builtin: +; CHECK-FP16: @ %bb.0: @ %entry +; CHECK-FP16-NEXT: .save {r11, lr} +; CHECK-FP16-NEXT: push {r11, lr} +; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-FP16-NEXT: bl llrintf +; CHECK-FP16-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f16(half %x) ret i64 %0 } -; SOFTFP-LABEL: testmsxs_builtin: -; SOFTFP: bl llrintf -; HARDFP-LABEL: testmsxs_builtin: -; HARDFP: bl llrintf define i64 @testmsxs_builtin(float %x) { +; CHECK-LABEL: testmsxs_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl llrintf +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f32(float %x) ret i64 %0 } -; SOFTFP-LABEL: testmsxd_builtin: -; SOFTFP: bl llrint -; HARDFP-LABEL: testmsxd_builtin: -; HARDFP: bl llrint define i64 @testmsxd_builtin(double %x) { +; CHECK-LABEL: testmsxd_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl llrint +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f64(double %x) ret i64 %0 } -; FIXME(#44744): incorrect libcall -; SOFTFP-LABEL: testmsxq_builtin: -; SOFTFP: bl llrintl -; HARDFP-LABEL: testmsxq_builtin: -; HARDFP: bl llrintl define i64 @testmsxq_builtin(fp128 %x) { +; CHECK-LABEL: testmsxq_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl llrintl +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x) ret i64 %0 } - -declare i64 @llvm.llrint.i64.f32(float) nounwind readnone -declare i64 @llvm.llrint.i64.f64(double) nounwind readnone diff --git a/llvm/test/CodeGen/ARM/llvm.exp10.ll b/llvm/test/CodeGen/ARM/llvm.exp10.ll index eb72fe8c1e1b7..49397ca386cb4 100644 --- a/llvm/test/CodeGen/ARM/llvm.exp10.ll +++ b/llvm/test/CodeGen/ARM/llvm.exp10.ll @@ -189,12 +189,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) { ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl exp10f +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov s17, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl exp10f ; CHECK-NEXT: vmov s16, r0 +; CHECK-NEXT: mov r1, r4 ; CHECK-NEXT: vmov s18, r6 -; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: vmov r2, r3, d9 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r6, pc} @@ -207,7 +208,6 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) { ; CHECK: @ %bb.0: ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: mov r6, r0 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: mov r4, r3 @@ -216,17 +216,15 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) { ; CHECK-NEXT: mov r7, r0 ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s19, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s18, r0 +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 -; CHECK-NEXT: vmov s17, r7 ; CHECK-NEXT: bl exp10f -; CHECK-NEXT: vmov s16, r0 -; CHECK-NEXT: vmov r2, r3, d9 -; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: mov r1, r7 +; CHECK-NEXT: mov r2, r5 +; CHECK-NEXT: mov r3, r4 ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x) diff --git a/llvm/test/CodeGen/ARM/llvm.frexp.ll b/llvm/test/CodeGen/ARM/llvm.frexp.ll index 376426d701b3e..80972b75cf283 100644 --- a/llvm/test/CodeGen/ARM/llvm.frexp.ll +++ b/llvm/test/CodeGen/ARM/llvm.frexp.ll @@ -362,33 +362,31 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) { define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) { ; CHECK-LABEL: test_frexp_v4f32_v4i32_only_use_fract: ; CHECK: @ %bb.0: -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: mov r5, r1 -; CHECK-NEXT: mov r6, r0 -; CHECK-NEXT: mov r1, sp -; CHECK-NEXT: mov r0, r3 -; CHECK-NEXT: mov r4, r2 -; CHECK-NEXT: bl frexpf +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: sub sp, #20 +; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: add r1, sp, #4 -; CHECK-NEXT: vmov s19, r0 -; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r7, r0 +; CHECK-NEXT: mov r0, r3 +; CHECK-NEXT: mov r5, r2 ; CHECK-NEXT: bl frexpf ; CHECK-NEXT: add r1, sp, #8 -; CHECK-NEXT: vmov s18, r0 +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: bl frexpf ; CHECK-NEXT: add r1, sp, #12 -; CHECK-NEXT: vmov s17, r0 +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: bl frexpf -; CHECK-NEXT: vmov s16, r0 -; CHECK-NEXT: vmov r2, r3, d9 -; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: add sp, #16 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: add r1, sp, #16 +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: mov r0, r7 +; CHECK-NEXT: bl frexpf +; CHECK-NEXT: mov r1, r6 +; CHECK-NEXT: mov r2, r5 +; CHECK-NEXT: mov r3, r4 +; CHECK-NEXT: add sp, #20 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %result = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> %a) %result.0 = extractvalue { <4 x float>, <4 x i32> } %result, 0 ret <4 x float> %result.0 diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll index 9aa95112af533..23a2685aa1122 100644 --- a/llvm/test/CodeGen/ARM/lrint-conv.ll +++ b/llvm/test/CodeGen/ARM/lrint-conv.ll @@ -1,5 +1,7 @@ -; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP -; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16 +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 ; FIXME: crash ; define i32 @testmswh_builtin(half %x) { @@ -8,36 +10,37 @@ ; ret i32 %0 ; } -; SOFTFP-LABEL: testmsws_builtin: -; SOFTFP: bl lrintf -; HARDFP-LABEL: testmsws_builtin: -; HARDFP: bl lrintf define i32 @testmsws_builtin(float %x) { +; CHECK-LABEL: testmsws_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: b lrintf entry: %0 = tail call i32 @llvm.lrint.i32.f32(float %x) ret i32 %0 } -; SOFTFP-LABEL: testmswd_builtin: -; SOFTFP: bl lrint -; HARDFP-LABEL: testmswd_builtin: -; HARDFP: bl lrint define i32 @testmswd_builtin(double %x) { +; CHECK-LABEL: testmswd_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: b lrint entry: %0 = tail call i32 @llvm.lrint.i32.f64(double %x) ret i32 %0 } -; FIXME(#44744): incorrect libcall -; SOFTFP-LABEL: testmswq_builtin: -; SOFTFP: bl lrintl -; HARDFP-LABEL: testmswq_builtin: -; HARDFP: bl lrintl define i32 @testmswq_builtin(fp128 %x) { +; CHECK-LABEL: testmswq_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl lrintl +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x) ret i32 %0 } -declare i32 @llvm.lrint.i32.f32(float) nounwind readnone -declare i32 @llvm.lrint.i32.f64(double) nounwind readnone +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-FP16: {{.*}} +; CHECK-NOFP16: {{.*}} +; CHECK-SOFT: {{.*}} diff --git a/llvm/test/CodeGen/ARM/neon-spfp.ll b/llvm/test/CodeGen/ARM/neon-spfp.ll index cbf25965a2fac..70a809583ff65 100644 --- a/llvm/test/CodeGen/ARM/neon-spfp.ll +++ b/llvm/test/CodeGen/ARM/neon-spfp.ll @@ -4,11 +4,11 @@ ; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 | FileCheck %s -check-prefix=CHECK-LINUXA15 ; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift | FileCheck %s -check-prefix=CHECK-LINUXSWIFT -; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a5 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA5 -; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA8 -; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a9 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA9 -; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA15 -; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFESWIFT +; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a5 --denormal-fp-math=preserve-sign | FileCheck %s -check-prefix=CHECK-UNSAFEA5 +; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a8 --denormal-fp-math=preserve-sign | FileCheck %s -check-prefix=CHECK-UNSAFEA8 +; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a9 --denormal-fp-math=preserve-sign | FileCheck %s -check-prefix=CHECK-UNSAFEA9 +; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 --denormal-fp-math=preserve-sign| FileCheck %s -check-prefix=CHECK-UNSAFEA15 +; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift --denormal-fp-math=preserve-sign | FileCheck %s -check-prefix=CHECK-UNSAFESWIFT ; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a5 | FileCheck %s -check-prefix=CHECK-DARWINA5 ; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK-DARWINA8 diff --git a/llvm/test/CodeGen/ARM/pr159343.mir b/llvm/test/CodeGen/ARM/pr159343.mir new file mode 100644 index 0000000000000..9b71b1ad94b2f --- /dev/null +++ b/llvm/test/CodeGen/ARM/pr159343.mir @@ -0,0 +1,31 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -run-pass=peephole-opt -verify-machineinstrs -mtriple=thumbv7-unknown-linux-android29 %s -o - | FileCheck %s +--- +name: Test_shouldRewriteCopySrc_Invalid_SubReg +tracksRegLiveness: true +body: | + bb.1: + liveins: $r0, $r1 + + ; CHECK-LABEL: name: Test_shouldRewriteCopySrc_Invalid_SubReg + ; CHECK: liveins: $r0, $r1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[DEF:%[0-9]+]]:dpair = IMPLICIT_DEF + ; CHECK-NEXT: [[COPY:%[0-9]+]]:dpr_vfp2 = COPY [[DEF]].dsub_0 + ; CHECK-NEXT: [[VMOVRRD:%[0-9]+]]:gpr, [[VMOVRRD1:%[0-9]+]]:gpr = VMOVRRD [[COPY]], 14 /* CC::al */, $noreg + ; CHECK-NEXT: [[COPY1:%[0-9]+]]:spr = COPY [[COPY]].ssub_1 + ; CHECK-NEXT: [[DEF1:%[0-9]+]]:spr = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF2:%[0-9]+]]:spr = IMPLICIT_DEF + ; CHECK-NEXT: [[DEF3:%[0-9]+]]:spr = IMPLICIT_DEF + ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:mqpr = REG_SEQUENCE killed [[DEF2]], %subreg.ssub_0, killed [[DEF1]], %subreg.ssub_1, killed [[DEF3]], %subreg.ssub_2, [[COPY]].ssub_1, %subreg.ssub_3 + ; CHECK-NEXT: VST1q64 $r1, 0, killed [[REG_SEQUENCE]], 14 /* CC::al */, $noreg + %0:dpair = IMPLICIT_DEF + %1:dpr = COPY %0.dsub_0 + %2:gpr, %3:gpr = VMOVRRD killed %1, 14 /* CC::al */, $noreg + %4:spr = VMOVSR killed %3, 14 /* CC::al */, $noreg + %5:spr = IMPLICIT_DEF + %6:spr = IMPLICIT_DEF + %7:spr = IMPLICIT_DEF + %8:mqpr = REG_SEQUENCE killed %6, %subreg.ssub_0, killed %5, %subreg.ssub_1, killed %7, %subreg.ssub_2, killed %4, %subreg.ssub_3 + VST1q64 $r1, 0, killed %8, 14 /* CC::al */, $noreg +... diff --git a/llvm/test/CodeGen/ARM/shouldRewriteCopySrc.ll b/llvm/test/CodeGen/ARM/shouldRewriteCopySrc.ll index e653aaa316fed..2bf8f29eccb40 100644 --- a/llvm/test/CodeGen/ARM/shouldRewriteCopySrc.ll +++ b/llvm/test/CodeGen/ARM/shouldRewriteCopySrc.ll @@ -12,8 +12,8 @@ define float @shouldRewriteCopySrc(double %arg) #0 { ; CHECK-NEXT: @APP ; CHECK-NEXT: nop ; CHECK-NEXT: @NO_APP -; CHECK-NEXT: vmov r0, r1, d16 -; CHECK-NEXT: vmov s0, r0 +; CHECK-NEXT: vmov.f64 d0, d16 +; CHECK-NEXT: @ kill: def $s0 killed $s0 killed $d0 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr bb: diff --git a/llvm/test/CodeGen/ARM/sincos.ll b/llvm/test/CodeGen/ARM/sincos.ll index dc8fdf69ca610..e1b683a8a6657 100644 --- a/llvm/test/CodeGen/ARM/sincos.ll +++ b/llvm/test/CodeGen/ARM/sincos.ll @@ -1,8 +1,7 @@ ; RUN: llc < %s -mtriple=armv7-apple-ios6 -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT ; RUN: llc < %s -mtriple=armv7-apple-ios7 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS ; RUN: llc < %s -mtriple=armv7-linux-gnu -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 \ -; RUN: --enable-unsafe-fp-math | FileCheck %s --check-prefix=SINCOS-GNU +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU ; RUN: llc < %s -mtriple=armv7-linux-android -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT-ANDROID ; RUN: llc < %s -mtriple=armv7-linux-android9 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU @@ -33,6 +32,28 @@ entry: ret float %add } +define float @test1_fast(float %x) nounwind { +entry: +; SINCOS-LABEL: test1_fast: +; SINCOS: bl ___sincosf_stret + +; SINCOS-GNU-LABEL: test1_fast: +; SINCOS-GNU: bl sincosf + +; NOOPT-LABEL: test1_fast: +; NOOPT: bl _sinf +; NOOPT: bl _cosf + +; NOOPT-ANDROID-LABEL: test1_fast: +; NOOPT-ANDROID: bl sinf +; NOOPT-ANDROID: bl cosf + + %call = tail call fast float @sinf(float %x) readnone + %call1 = tail call fast float @cosf(float %x) readnone + %add = fadd float %call, %call1 + ret float %add +} + define float @test1_errno(float %x) nounwind { entry: ; SINCOS-LABEL: test1_errno: @@ -79,6 +100,28 @@ entry: ret double %add } +define double @test2_fast(double %x) nounwind { +entry: +; SINCOS-LABEL: test2_fast: +; SINCOS: bl ___sincos_stret + +; SINCOS-GNU-LABEL: test2_fast: +; SINCOS-GNU: bl sincos + +; NOOPT-LABEL: test2_fast: +; NOOPT: bl _sin +; NOOPT: bl _cos + +; NOOPT-ANDROID-LABEL: test2_fast: +; NOOPT-ANDROID: bl sin +; NOOPT-ANDROID: bl cos + + %call = tail call fast double @sin(double %x) readnone + %call1 = tail call fast double @cos(double %x) readnone + %add = fadd double %call, %call1 + ret double %add +} + define double @test2_errno(double %x) nounwind { entry: ; SINCOS-LABEL: test2_errno: diff --git a/llvm/test/CodeGen/ARM/vector-lrint.ll b/llvm/test/CodeGen/ARM/vector-lrint.ll index fe5e3cbcdf771..c1159da77707c 100644 --- a/llvm/test/CodeGen/ARM/vector-lrint.ll +++ b/llvm/test/CodeGen/ARM/vector-lrint.ll @@ -14,31 +14,26 @@ ; %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x) ; ret <1 x iXLen> %a ; } -; declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>) ; define <2 x iXLen> @lrint_v2f16(<2 x half> %x) { ; %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x) ; ret <2 x iXLen> %a ; } -; declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>) ; define <4 x iXLen> @lrint_v4f16(<4 x half> %x) { ; %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x) ; ret <4 x iXLen> %a ; } -; declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>) ; define <8 x iXLen> @lrint_v8f16(<8 x half> %x) { ; %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x) ; ret <8 x iXLen> %a ; } -; declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>) ; define <16 x iXLen> @lrint_v16f16(<16 x half> %x) { ; %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x) ; ret <16 x iXLen> %a ; } -; declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>) define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { ; LE-I32-LABEL: lrint_v1f32: @@ -76,7 +71,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { ; LE-I32-LABEL: lrint_v2f32: @@ -160,7 +154,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>) define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { ; LE-I32-LABEL: lrint_v4f32: @@ -274,7 +267,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { ; LE-I32-LABEL: lrint_v8f32: @@ -488,7 +480,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { ; LE-I32-LABEL: lrint_v16f32: @@ -1005,7 +996,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { ; LE-I32-LABEL: lrint_v1f64: @@ -1043,7 +1033,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { ; LE-I32-LABEL: lrint_v2f64: @@ -1120,7 +1109,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { ; LE-I32-LABEL: lrint_v4f64: @@ -1237,7 +1225,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { ; LE-I32-LABEL: lrint_v8f64: @@ -1467,7 +1454,6 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>) define <16 x iXLen> @lrint_v16f64(<16 x double> %x) { ; LE-I32-LABEL: lrint_v16f64: @@ -2053,7 +2039,6 @@ define <16 x iXLen> @lrint_v16f64(<16 x double> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double>) define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) { ; LE-I32-LABEL: lrint_v1fp128: @@ -2091,7 +2076,6 @@ define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128>) define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) { ; LE-I32-LABEL: lrint_v2fp128: @@ -2194,7 +2178,6 @@ define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128>) define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) { ; LE-I32-LABEL: lrint_v4fp128: @@ -2347,7 +2330,6 @@ define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128>) define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) { ; LE-I32-LABEL: lrint_v8fp128: @@ -2664,7 +2646,6 @@ define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128>) define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) { ; LE-I32-LABEL: lrint_v16fp128: @@ -3262,4 +3243,3 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128>) diff --git a/llvm/test/CodeGen/ARM/vminmaxnm.ll b/llvm/test/CodeGen/ARM/vminmaxnm.ll index bb3ea3067541e..be33dbfc61b04 100644 --- a/llvm/test/CodeGen/ARM/vminmaxnm.ll +++ b/llvm/test/CodeGen/ARM/vminmaxnm.ll @@ -1,146 +1,163 @@ -; RUN: llc < %s -mtriple armv8 -mattr=+neon,+fp-armv8 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple armv8-none-eabihf -mattr=+neon,+fp-armv8 -enable-no-nans-fp-math | FileCheck %s ; scalars -define float @fp-armv8_vminnm_o(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_o": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_o(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_o: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast olt float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define double @fp-armv8_vminnm_ole(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_ole": -; CHECK-NOT: vcmp -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_ole(double %a, double %b) { +; CHECK-LABEL: fparmv8_vminnm_ole: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ole double %a, %b %cond = select nsz i1 %cmp, double %a, double %b ret double %cond } -define float @fp-armv8_vminnm_o_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_o_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_o_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_o_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ogt float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define double @fp-armv8_vminnm_oge_rev(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_oge_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_oge_rev(double %a, double %b) { +; CHECK-LABEL: fparmv8_vminnm_oge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast oge double %a, %b %cond = select nsz i1 %cmp, double %b, double %a ret double %cond } -define float @fp-armv8_vminnm_u(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_u": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_u(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_u: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ult float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vminnm_ule(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_ule": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_ule(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_ule: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ule float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vminnm_u_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_u_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_u_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_u_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ugt float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define double @fp-armv8_vminnm_uge_rev(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_uge_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_uge_rev(double %a, double %b) { +; CHECK-LABEL: fparmv8_vminnm_uge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast uge double %a, %b %cond = select nsz i1 %cmp, double %b, double %a ret double %cond } -define float @fp-armv8_vmaxnm_o(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_o": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_o(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_o: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ogt float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_oge(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_oge": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_oge(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_oge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast oge float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_o_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_o_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_o_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_o_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast olt float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define float @fp-armv8_vmaxnm_ole_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_ole_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_ole_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_ole_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ole float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define float @fp-armv8_vmaxnm_u(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_u": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_u(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_u: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ugt float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_uge(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_uge": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_uge(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_uge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast uge float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_u_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_u_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_u_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_u_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ult float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_ule_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f64 +define double @fparmv8_vmaxnm_ule_rev(double %a, double %b) { +; CHECK-LABEL: fparmv8_vmaxnm_ule_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ule double %a, %b %cond = select nsz i1 %cmp, double %b, double %a ret double %cond @@ -148,10 +165,18 @@ define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) { ; known non-NaNs -define float @fp-armv8_vminnm_NNNo(float %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNo": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNo(float %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNo: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI16_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI16_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast olt float %a, 12. %cond1 = select nsz i1 %cmp1, float %a, float 12. %cmp2 = fcmp fast olt float 34., %cond1 @@ -159,10 +184,22 @@ define float @fp-armv8_vminnm_NNNo(float %a) { ret float %cond2 } -define double @fp-armv8_vminnm_NNNole(double %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNole": -; CHECK: vminnm.f64 -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_NNNole(double %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNole: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI17_0 +; CHECK-NEXT: vldr d17, .LCPI17_1 +; CHECK-NEXT: vminnm.f64 d16, d0, d16 +; CHECK-NEXT: vminnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI17_0: +; CHECK-NEXT: .long 0 @ double 34 +; CHECK-NEXT: .long 1078001664 +; CHECK-NEXT: .LCPI17_1: +; CHECK-NEXT: .long 0 @ double 56 +; CHECK-NEXT: .long 1078722560 %cmp1 = fcmp fast ole double %a, 34. %cond1 = select nsz i1 %cmp1, double %a, double 34. %cmp2 = fcmp fast ole double 56., %cond1 @@ -170,10 +207,20 @@ define double @fp-armv8_vminnm_NNNole(double %a) { ret double %cond2 } -define float @fp-armv8_vminnm_NNNo_rev(float %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNo_rev": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNo_rev(float %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNo_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI18_0 +; CHECK-NEXT: vldr s4, .LCPI18_1 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI18_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI18_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast ogt float %a, 56. %cond1 = select nsz i1 %cmp1, float 56., float %a %cmp2 = fcmp fast ogt float 78., %cond1 @@ -181,10 +228,22 @@ define float @fp-armv8_vminnm_NNNo_rev(float %a) { ret float %cond2 } -define double @fp-armv8_vminnm_NNNoge_rev(double %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNoge_rev": -; CHECK: vminnm.f64 -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_NNNoge_rev(double %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNoge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI19_0 +; CHECK-NEXT: vldr d17, .LCPI19_1 +; CHECK-NEXT: vminnm.f64 d16, d0, d16 +; CHECK-NEXT: vminnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI19_0: +; CHECK-NEXT: .long 0 @ double 78 +; CHECK-NEXT: .long 1079214080 +; CHECK-NEXT: .LCPI19_1: +; CHECK-NEXT: .long 0 @ double 90 +; CHECK-NEXT: .long 1079410688 %cmp1 = fcmp fast oge double %a, 78. %cond1 = select nsz i1 %cmp1, double 78., double %a %cmp2 = fcmp fast oge double 90., %cond1 @@ -192,10 +251,18 @@ define double @fp-armv8_vminnm_NNNoge_rev(double %a) { ret double %cond2 } -define float @fp-armv8_vminnm_NNNu(float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNu": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNu(float %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNu: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI20_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI20_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast ult float 12., %b %cond1 = select nsz i1 %cmp1, float 12., float %b %cmp2 = fcmp fast ult float %cond1, 34. @@ -203,10 +270,20 @@ define float @fp-armv8_vminnm_NNNu(float %b) { ret float %cond2 } -define float @fp-armv8_vminnm_NNNule(float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNule": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNule(float %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNule: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI21_0 +; CHECK-NEXT: vldr s4, .LCPI21_1 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI21_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 +; CHECK-NEXT: .LCPI21_1: +; CHECK-NEXT: .long 0x42600000 @ float 56 %cmp1 = fcmp fast ule float 34., %b %cond1 = select nsz i1 %cmp1, float 34., float %b %cmp2 = fcmp fast ule float %cond1, 56. @@ -214,10 +291,20 @@ define float @fp-armv8_vminnm_NNNule(float %b) { ret float %cond2 } -define float @fp-armv8_vminnm_NNNu_rev(float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNu_rev": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNu_rev(float %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNu_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI22_0 +; CHECK-NEXT: vldr s4, .LCPI22_1 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI22_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI22_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast ugt float 56., %b %cond1 = select nsz i1 %cmp1, float %b, float 56. %cmp2 = fcmp fast ugt float %cond1, 78. @@ -225,10 +312,22 @@ define float @fp-armv8_vminnm_NNNu_rev(float %b) { ret float %cond2 } -define double @fp-armv8_vminnm_NNNuge_rev(double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNuge_rev": -; CHECK: vminnm.f64 -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_NNNuge_rev(double %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNuge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI23_0 +; CHECK-NEXT: vldr d17, .LCPI23_1 +; CHECK-NEXT: vminnm.f64 d16, d0, d16 +; CHECK-NEXT: vminnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI23_0: +; CHECK-NEXT: .long 0 @ double 78 +; CHECK-NEXT: .long 1079214080 +; CHECK-NEXT: .LCPI23_1: +; CHECK-NEXT: .long 0 @ double 90 +; CHECK-NEXT: .long 1079410688 %cmp1 = fcmp fast uge double 78., %b %cond1 = select nsz i1 %cmp1, double %b, double 78. %cmp2 = fcmp fast uge double %cond1, 90. @@ -236,10 +335,18 @@ define double @fp-armv8_vminnm_NNNuge_rev(double %b) { ret double %cond2 } -define float @fp-armv8_vmaxnm_NNNo(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNo": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNo(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNo: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI24_0 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI24_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast ogt float %a, 12. %cond1 = select nsz i1 %cmp1, float %a, float 12. %cmp2 = fcmp fast ogt float 34., %cond1 @@ -247,10 +354,20 @@ define float @fp-armv8_vmaxnm_NNNo(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNoge(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNoge": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNoge(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNoge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI25_0 +; CHECK-NEXT: vldr s4, .LCPI25_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI25_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 +; CHECK-NEXT: .LCPI25_1: +; CHECK-NEXT: .long 0x42600000 @ float 56 %cmp1 = fcmp fast oge float %a, 34. %cond1 = select nsz i1 %cmp1, float %a, float 34. %cmp2 = fcmp fast oge float 56., %cond1 @@ -258,10 +375,20 @@ define float @fp-armv8_vmaxnm_NNNoge(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNo_rev(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNo_rev": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNo_rev(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNo_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI26_0 +; CHECK-NEXT: vldr s4, .LCPI26_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI26_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI26_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast olt float %a, 56. %cond1 = select nsz i1 %cmp1, float 56., float %a %cmp2 = fcmp fast olt float 78., %cond1 @@ -269,10 +396,20 @@ define float @fp-armv8_vmaxnm_NNNo_rev(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNole_rev(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNole_rev": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNole_rev(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNole_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI27_0 +; CHECK-NEXT: vldr s4, .LCPI27_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI27_0: +; CHECK-NEXT: .long 0x429c0000 @ float 78 +; CHECK-NEXT: .LCPI27_1: +; CHECK-NEXT: .long 0x42b40000 @ float 90 %cmp1 = fcmp fast ole float %a, 78. %cond1 = select nsz i1 %cmp1, float 78., float %a %cmp2 = fcmp fast ole float 90., %cond1 @@ -280,10 +417,18 @@ define float @fp-armv8_vmaxnm_NNNole_rev(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNu(float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNu": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNu(float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNu: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI28_0 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI28_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast ugt float 12., %b %cond1 = select nsz i1 %cmp1, float 12., float %b %cmp2 = fcmp fast ugt float %cond1, 34. @@ -291,10 +436,20 @@ define float @fp-armv8_vmaxnm_NNNu(float %b) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNuge(float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNuge": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNuge(float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNuge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI29_0 +; CHECK-NEXT: vldr s4, .LCPI29_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI29_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 +; CHECK-NEXT: .LCPI29_1: +; CHECK-NEXT: .long 0x42600000 @ float 56 %cmp1 = fcmp fast uge float 34., %b %cond1 = select nsz i1 %cmp1, float 34., float %b %cmp2 = fcmp fast uge float %cond1, 56. @@ -302,10 +457,20 @@ define float @fp-armv8_vmaxnm_NNNuge(float %b) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNu_rev(float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNu_rev": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNu_rev(float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNu_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI30_0 +; CHECK-NEXT: vldr s4, .LCPI30_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI30_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI30_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast ult float 56., %b %cond1 = select nsz i1 %cmp1, float %b, float 56. %cmp2 = fcmp fast ult float %cond1, 78. @@ -313,10 +478,22 @@ define float @fp-armv8_vmaxnm_NNNu_rev(float %b) { ret float %cond2 } -define double @fp-armv8_vmaxnm_NNNule_rev( double %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNule_rev": -; CHECK: vmaxnm.f64 -; CHECK: vmaxnm.f64 +define double @fparmv8_vmaxnm_NNNule_rev( double %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNule_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI31_0 +; CHECK-NEXT: vldr d17, .LCPI31_1 +; CHECK-NEXT: vmaxnm.f64 d16, d0, d16 +; CHECK-NEXT: vmaxnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI31_0: +; CHECK-NEXT: .long 0 @ double 78 +; CHECK-NEXT: .long 1079214080 +; CHECK-NEXT: .LCPI31_1: +; CHECK-NEXT: .long 0 @ double 90 +; CHECK-NEXT: .long 1079410688 %cmp1 = fcmp fast ule double 78., %b %cond1 = select nsz i1 %cmp1, double %b, double 78. %cmp2 = fcmp fast ule double %cond1, 90. @@ -324,11 +501,17 @@ define double @fp-armv8_vmaxnm_NNNule_rev( double %b) { ret double %cond2 } -define float @fp-armv8_vminmaxnm_0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI32_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI32_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 %cmp1 = fcmp fast olt float %a, 0. %cond1 = select nsz i1 %cmp1, float %a, float 0. %cmp2 = fcmp fast ogt float %cond1, 0. @@ -336,11 +519,17 @@ define float @fp-armv8_vminmaxnm_0(float %a) { ret float %cond2 } -define float @fp-armv8_vminmaxnm_neg0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_neg0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_neg0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_neg0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI33_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI33_0: +; CHECK-NEXT: .long 0x80000000 @ float -0 %cmp1 = fcmp fast olt float %a, -0. %cond1 = select nsz i1 %cmp1, float %a, float -0. %cmp2 = fcmp fast ugt float %cond1, -0. @@ -348,11 +537,17 @@ define float @fp-armv8_vminmaxnm_neg0(float %a) { ret float %cond2 } -define float @fp-armv8_vminmaxnm_e_0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_e_0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_e_0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_e_0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI34_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI34_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 %cmp1 = fcmp fast ule float 0., %a %cond1 = select nsz i1 %cmp1, float 0., float %a %cmp2 = fcmp fast uge float 0., %cond1 @@ -360,11 +555,17 @@ define float @fp-armv8_vminmaxnm_e_0(float %a) { ret float %cond2 } -define float @fp-armv8_vminmaxnm_e_neg0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_e_neg0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_e_neg0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_e_neg0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI35_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI35_0: +; CHECK-NEXT: .long 0x80000000 @ float -0 %cmp1 = fcmp fast ule float -0., %a %cond1 = select nsz i1 %cmp1, float -0., float %a %cmp2 = fcmp fast oge float -0., %cond1 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll index d6cb05b5d0dd9..850b9a7f36ff3 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll @@ -11,8 +11,9 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature -!3 = !{ !5 } ; list of root signature elements -!5 = !{ !"DescriptorTable", i32 0, !6, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20 } +!3 = !{ !5, !21 } ; list of root signature elements +!5 = !{ !"DescriptorTable", i32 0, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20 } +!21 = !{ !"DescriptorTable", i32 0, !6, !8, !9 } ; typedef enum D3D12_DESCRIPTOR_RANGE_FLAGS { ; NONE = 0, @@ -53,37 +54,20 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !20 = !{ !"UAV", i32 5, i32 1, i32 15, i32 5, i32 65540 } ;DXC:- Name: RTS0 -;DXC-NEXT: Size: 380 +;DXC-NEXT: Size: 400 ;DXC-NEXT: RootSignature: ;DXC-NEXT: Version: 2 -;DXC-NEXT: NumRootParameters: 1 +;DXC-NEXT: NumRootParameters: 2 ;DXC-NEXT: RootParametersOffset: 24 ;DXC-NEXT: NumStaticSamplers: 0 -;DXC-NEXT: StaticSamplersOffset: 380 +;DXC-NEXT: StaticSamplersOffset: 400 ;DXC-NEXT: Parameters: ;DXC-NEXT: - ParameterType: DescriptorTable ;DXC-NEXT: ShaderVisibility: All ;DXC-NEXT: Table: -;DXC-NEXT: NumRanges: 14 -;DXC-NEXT: RangesOffset: 44 +;DXC-NEXT: NumRanges: 11 +;DXC-NEXT: RangesOffset: 56 ;DXC-NEXT: Ranges: -;DXC-NEXT: - RangeType: Sampler -;DXC-NEXT: NumDescriptors: 1 -;DXC-NEXT: BaseShaderRegister: 0 -;DXC-NEXT: RegisterSpace: 1 -;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -;DXC-NEXT: - RangeType: Sampler -;DXC-NEXT: NumDescriptors: 1 -;DXC-NEXT: BaseShaderRegister: 0 -;DXC-NEXT: RegisterSpace: 3 -;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -;DXC-NEXT: DESCRIPTORS_VOLATILE: true -;DXC-NEXT: - RangeType: Sampler -;DXC-NEXT: NumDescriptors: 1 -;DXC-NEXT: BaseShaderRegister: 0 -;DXC-NEXT: RegisterSpace: 4 -;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -;DXC-NEXT: DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS: true ;DXC-NEXT: - RangeType: SRV ;DXC-NEXT: NumDescriptors: 1 ;DXC-NEXT: BaseShaderRegister: 0 @@ -155,3 +139,26 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } ;DXC-NEXT: OffsetInDescriptorsFromTableStart: 5 ;DXC-NEXT: DATA_STATIC_WHILE_SET_AT_EXECUTE: true ;DXC-NEXT: DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS: true +;DXC-NEXT: - ParameterType: DescriptorTable +;DXC-NEXT: ShaderVisibility: All +;DXC-NEXT: Table: +;DXC-NEXT: NumRanges: 3 +;DXC-NEXT: RangesOffset: 328 +;DXC-NEXT: Ranges: +;DXC-NEXT: - RangeType: Sampler +;DXC-NEXT: NumDescriptors: 1 +;DXC-NEXT: BaseShaderRegister: 0 +;DXC-NEXT: RegisterSpace: 1 +;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 +;DXC-NEXT: - RangeType: Sampler +;DXC-NEXT: NumDescriptors: 1 +;DXC-NEXT: BaseShaderRegister: 0 +;DXC-NEXT: RegisterSpace: 3 +;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 +;DXC-NEXT: DESCRIPTORS_VOLATILE: true +;DXC-NEXT: - RangeType: Sampler +;DXC-NEXT: NumDescriptors: 1 +;DXC-NEXT: BaseShaderRegister: 0 +;DXC-NEXT: RegisterSpace: 4 +;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 +;DXC-NEXT: DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS: true diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll index c65eab5f4aa5f..098b2d51a0bf4 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll @@ -11,33 +11,40 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 1 } ; function, root signature -!3 = !{ !5 } ; list of root signature elements -!5 = !{ !"DescriptorTable", i32 0, !6, !7 } +!3 = !{ !5, !8 } ; list of root signature elements +!5 = !{ !"DescriptorTable", i32 0, !6 } !6 = !{ !"Sampler", i32 1, i32 1, i32 0, i32 -1, i32 1 } +!8 = !{ !"DescriptorTable", i32 0, !7 } !7 = !{ !"UAV", i32 5, i32 1, i32 10, i32 5, i32 3 } ; DXC: - Name: RTS0 -; DXC-NEXT: Size: 84 +; DXC-NEXT: Size: 104 ; DXC-NEXT: RootSignature: ; DXC-NEXT: Version: 1 -; DXC-NEXT: NumRootParameters: 1 +; DXC-NEXT: NumRootParameters: 2 ; DXC-NEXT: RootParametersOffset: 24 ; DXC-NEXT: NumStaticSamplers: 0 -; DXC-NEXT: StaticSamplersOffset: 84 +; DXC-NEXT: StaticSamplersOffset: 104 ; DXC-NEXT: Parameters: ; DXC-NEXT: - ParameterType: DescriptorTable ; DXC-NEXT: ShaderVisibility: All ; DXC-NEXT: Table: -; DXC-NEXT: NumRanges: 2 -; DXC-NEXT: RangesOffset: 44 +; DXC-NEXT: NumRanges: 1 +; DXC-NEXT: RangesOffset: 56 ; DXC-NEXT: Ranges: ; DXC-NEXT: - RangeType: Sampler ; DXC-NEXT: NumDescriptors: 1 ; DXC-NEXT: BaseShaderRegister: 1 ; DXC-NEXT: RegisterSpace: 0 ; DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -; DXC-NEXT: - RangeType: UAV +; DXC-NEXT: - ParameterType: DescriptorTable +; DXC-NEXT: ShaderVisibility: All +; DXC-NEXT: Table: +; DXC-NEXT: NumRanges: 1 +; DXC-NEXT: RangesOffset: 84 +; DXC-NEXT: Ranges: +; DXC-NEXT: - RangeType: UAV ; DXC-NEXT: NumDescriptors: 5 ; DXC-NEXT: BaseShaderRegister: 1 ; DXC-NEXT: RegisterSpace: 10 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Invalid-Version.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Invalid-Version.ll new file mode 100644 index 0000000000000..26867e6d7ec25 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Invalid-Version.ll @@ -0,0 +1,20 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + +target triple = "dxil-unknown-shadermodel6.0-compute" + + +; CHECK: error: Invalid value for Version: 4 +; CHECK-NOT: Root Signature Definitions +define void @main() #0 { +entry: + ret void +} +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + + +!dx.rootsignatures = !{!2, !3, !4, !5} ; list of function/root signature pairs +!2 = !{ ptr @main, !6, i32 1 } ; function, root signature +!3 = !{ ptr @main, !6, i32 4 } ; function, root signature +!4 = !{ ptr @main, !6, i32 2 } ; function, root signature +!5 = !{ ptr @main, !6, i32 3 } ; function, root signature +!6 = !{ } ; list of root signature elements diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll index 288dea00b6e55..b043ea1418df6 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll index e9abcf9669999..8219ffdd679d2 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll index 238f488ee78d6..31d8dd10f3e22 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll index 8dc69eb1f9d7c..2bb4af5d9c0f2 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll index b2c8faf8d4a0a..62fda735b6860 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll index 758d2629ed78e..7e8de14160ce9 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll new file mode 100644 index 0000000000000..8f7ef8857ad15 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll @@ -0,0 +1,19 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: error: Invalid value for Static Sampler Flag: 4 +; CHECK-NOT: Root Signature Definitions + +define void @main() #0 { +entry: + ret void +} +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + + +!dx.rootsignatures = !{!2} ; list of function/root signature pairs +!2 = !{ ptr @main, !3, i32 3 } ; function, root signature +!3 = !{ !5 } ; list of root signature elements +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 4 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll index 47d4b52d72e8e..312e7697d4f2a 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll index 855e0c0cb6e51..80fd208a1bceb 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll index 812749b9ed824..5daaf69a40062 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll index 6898aec6f2e49..423987b0e2624 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll index dc6ee4290b532..af630dcdd0300 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll index 6cee1dd95fd81..bd752f0519da4 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll index fa5bf12e2b8cd..ca0c02d64983b 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll @@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll index 1dd470d7fb822..77c5c7af66247 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll @@ -15,7 +15,7 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature !3 = !{ !5 } ; list of root signature elements -!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } ; DXC: - Name: RTS0 ; DXC-NEXT: Size: 76 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll new file mode 100644 index 0000000000000..7e56f0408e3f3 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll @@ -0,0 +1,42 @@ +; RUN: opt %s -dxil-embed -dxil-globals -S -o - | FileCheck %s +; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC + +target triple = "dxil-unknown-shadermodel6.0-compute" + +; CHECK: @dx.rts0 = private constant [248 x i8] c"{{.*}}", section "RTS0", align 4 + +define void @main() #0 { +entry: + ret void +} +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + + +!dx.rootsignatures = !{!2} ; list of function/root signature pairs +!2 = !{ ptr @main, !3, i32 3 } ; function, root signature +!3 = !{ !5, !6, !7, !8 } ; list of root signature elements +!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 1 } +!6 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 43, i32 0, i32 0, i32 2 } +!7 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 44, i32 0, i32 0, i32 0 } +!8 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 45, i32 0, i32 0, i32 3 } + +; DXC: - Name: RTS0 +; DXC-NEXT: Size: 248 +; DXC-NEXT: RootSignature: +; DXC-NEXT: Version: 3 +; DXC-NEXT: NumRootParameters: 0 +; DXC-NEXT: RootParametersOffset: 24 +; DXC-NEXT: NumStaticSamplers: 4 +; DXC-NEXT: StaticSamplersOffset: 24 +; DXC-NEXT: Parameters: [] +; DXC-NEXT: Samplers: +; DXC-LABEL: ShaderRegister: 42 +; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true +; DXC-LABEL: ShaderRegister: 43 +; DXC: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true +; DXC-LABEL: ShaderRegister: 44 +; DXC-NOT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: +; DXC-NOT: SAMPLER_FLAG_UINT_BORDER_COLOR: +; DXC-LABEL: ShaderRegister: 45 +; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true +; DXC-NEXT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true diff --git a/llvm/test/CodeGen/DirectX/isnan.ll b/llvm/test/CodeGen/DirectX/isnan.ll new file mode 100644 index 0000000000000..2becd75209331 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/isnan.ll @@ -0,0 +1,53 @@ +; RUN: opt -S -dxil-intrinsic-expansion -scalarizer -dxil-op-lower -mtriple=dxil-pc-shadermodel6.9-library %s | FileCheck %s --check-prefixes=CHECK,SM69CHECK +; RUN: opt -S -dxil-intrinsic-expansion -mtriple=dxil-pc-shadermodel6.8-library %s | FileCheck %s --check-prefixes=CHECK,SMOLDCHECK + +; Make sure dxil operation function calls for isnan are generated for float and half. + +define noundef i1 @isnan_float(float noundef %a) { +entry: + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f32(i32 8, float %{{.*}}) #[[#ATTR:]] + ; SMOLDCHECK: call i1 @llvm.dx.isnan.f32(float %{{.*}}) + %dx.isnan = call i1 @llvm.dx.isnan.f32(float %a) + ret i1 %dx.isnan +} + +define noundef i1 @isnan_half(half noundef %a) { +entry: + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 8, half %{{.*}}) #[[#ATTR]] + ; SMOLDCHECK: [[BITCAST:%.*]] = bitcast half %{{.*}} to i16 + ; SMOLDCHECK: [[ANDHIGH:%.*]] = and i16 [[BITCAST]], 31744 + ; SMOLDCHECK: [[CMPHIGH:%.*]] = icmp eq i16 [[ANDHIGH]], 31744 + ; SMOLDCHECK: [[ANDLOW:%.*]] = and i16 [[BITCAST]], 1023 + ; SMOLDCHECK: [[CMPLOW:%.*]] = icmp ne i16 [[ANDLOW]], 0 + ; SMOLDCHECK: [[AND:%.*]] = and i1 [[CMPHIGH]], [[CMPLOW]] + %dx.isnan = call i1 @llvm.dx.isnan.f16(half %a) + ret i1 %dx.isnan +} + +define noundef <4 x i1> @isnan_half4(<4 x half> noundef %p0) { +entry: + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 8, half + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 8, half + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 8, half + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f16(i32 8, half + ; SMOLDCHECK: [[BITCAST:%.*]] = bitcast <4 x half> %{{.*}} to <4 x i16> + ; SMOLDCHECK: [[ANDHIGH:%.*]] = and <4 x i16> [[BITCAST]], splat (i16 31744) + ; SMOLDCHECK: [[CMPHIGH:%.*]] = icmp eq <4 x i16> [[ANDHIGH]], splat (i16 31744) + ; SMOLDCHECK: [[ANDLOW:%.*]] = and <4 x i16> [[BITCAST]], splat (i16 1023) + ; SMOLDCHECK: [[CMPLOW:%.*]] = icmp ne <4 x i16> [[ANDLOW]], zeroinitializer + ; SMOLDCHECK: [[AND:%.*]] = and <4 x i1> [[CMPHIGH]], [[CMPLOW]] + %hlsl.isnan = call <4 x i1> @llvm.dx.isnan.v4f16(<4 x half> %p0) + ret <4 x i1> %hlsl.isnan +} + +define noundef <3 x i1> @isnan_float3(<3 x float> noundef %p0) { +entry: + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f32(i32 8, float + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f32(i32 8, float + ; SM69CHECK: call i1 @dx.op.isSpecialFloat.f32(i32 8, float + ; SMOLDCHECK: = call <3 x i1> @llvm.dx.isnan.v3f32(<3 x float> + %hlsl.isnan = call <3 x i1> @llvm.dx.isnan.v3f32(<3 x float> %p0) + ret <3 x i1> %hlsl.isnan +} + +; CHECK: attributes #{{[0-9]*}} = {{{.*}} memory(none) {{.*}}} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-appending-limits.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-appending-limits.ll new file mode 100644 index 0000000000000..7fa42e9697898 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-appending-limits.ll @@ -0,0 +1,16 @@ +; RUN: opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; A descriptor range can be placed at UINT_MAX, matching DXC's behaviour +; CHECK-NOT: error: + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 1, i32 1, i32 0, i32 4294967294, i32 0} +!5 = !{!"UAV", i32 1, i32 0, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-deny-no-binding.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-deny-no-binding.ll new file mode 100644 index 0000000000000..15326d438f021 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-deny-no-binding.ll @@ -0,0 +1,17 @@ +; RUN: opt -S -passes='dxil-post-optimization-validation' %s +; This is a valid case where no resource is being used +target triple = "dxil-pc-shadermodel6.6-pixel" + +define void @CSMain() #0 { +entry: + ret void +} +attributes #0 = { noinline nounwind "exp-shader"="cs" "hlsl.numthreads"="1,2,1" "hlsl.shader"="geometry" } + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3, !4} +!2 = !{!"RootConstants", i32 0, i32 2, i32 0, i32 4} +!3 = !{ !"RootFlags", i32 294 } ; 294 = deny_pixel/hull/vertex/amplification_shader_root_access +!4 = !{ !"RootSRV", i32 0, i32 1, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-limits-multiples.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-limits-multiples.ll new file mode 100644 index 0000000000000..e51f15a1d3fc2 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-limits-multiples.ll @@ -0,0 +1,16 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Offset overflow for descriptor range: CBV(register=2, space=0). + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5, !6} +!4 = !{!"CBV", i32 1, i32 0, i32 0, i32 4294967294, i32 0} +!5 = !{!"CBV", i32 1, i32 1, i32 0, i32 -1, i32 0} +!6 = !{!"CBV", i32 1, i32 2, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-overflow.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-overflow.ll new file mode 100644 index 0000000000000..1bc97d9ae2091 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-overflow.ll @@ -0,0 +1,17 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; This test checks if a resource is implicitly overflowing. That means, it is appending a resource after an unbounded range. + +; CHECK: error: Range UAV(register=0, space=0) cannot be appended after an unbounded range + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 -1, i32 1, i32 0, i32 2, i32 0} +!5 = !{!"UAV", i32 1, i32 0, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-multiple-shader.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-multiple-shader.ll new file mode 100644 index 0000000000000..b11cce694bd25 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-multiple-shader.ll @@ -0,0 +1,20 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' %s 2>&1 | FileCheck %s +; CHECK: error: Shader has root bindings but root signature uses a DENY flag to disallow root binding access to the shader stage. +target triple = "dxil-pc-shadermodel6.6-pixel" + +%__cblayout_CB = type <{ float }> + +@CB.str = private unnamed_addr constant [3 x i8] c"CB\00", align 1 + +define void @CSMain() "hlsl.shader"="compute" { +entry: + %CB = tail call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) @llvm.dx.resource.handlefrombinding(i32 0, i32 2, i32 1, i32 0, ptr nonnull @CB.str) + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{!"RootConstants", i32 0, i32 2, i32 0, i32 4} +!3 = !{!"RootFlags", i32 294} ; 294 = deny_pixel/hull/vertex/amplification_shader_root_access diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-root-descriptor.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-root-descriptor.ll new file mode 100644 index 0000000000000..6d323757d5897 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-root-descriptor.ll @@ -0,0 +1,20 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' %s 2>&1 | FileCheck %s + +; CHECK: error: Shader has root bindings but root signature uses a DENY flag to disallow root binding access to the shader stage. +target triple = "dxil-pc-shadermodel6.6-pixel" + +@SB.str = private unnamed_addr constant [3 x i8] c"SB\00", align 1 + +define void @CSMain() "hlsl.shader"="pixel" { +entry: + %SB = tail call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr nonnull @SB.str) + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{!"DescriptorTable", i32 0, !4} +!4 = !{!"SRV", i32 1, i32 0, i32 0, i32 -1, i32 4} +!3 = !{!"RootFlags", i32 32} ; 32 = deny_pixel_shader_root_access diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-single-shader.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-single-shader.ll new file mode 100644 index 0000000000000..4e50f50049b0e --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-single-shader.ll @@ -0,0 +1,19 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' %s 2>&1 | FileCheck %s + +; CHECK: error: Shader has root bindings but root signature uses a DENY flag to disallow root binding access to the shader stage. +target triple = "dxil-pc-shadermodel6.6-pixel" + +@SB.str = private unnamed_addr constant [3 x i8] c"SB\00", align 1 + +define void @CSMain() "hlsl.shader"="pixel" { +entry: + %SB = tail call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr nonnull @SB.str) + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{!"RootSRV", i32 0, i32 0, i32 0, i32 4} +!3 = !{!"RootFlags", i32 32} ; 32 = deny_pixel_shader_root_access diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-offset-overflow.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-offset-overflow.ll new file mode 100644 index 0000000000000..6e56949562740 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-offset-overflow.ll @@ -0,0 +1,15 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Offset overflow for descriptor range: UAV(register=0, space=0). + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 100, i32 0, i32 0, i32 4294967294, i32 0} +!5 = !{!"UAV", i32 1, i32 101, i32 0, i32 10, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-register-overflow.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-register-overflow.ll new file mode 100644 index 0000000000000..bff1727c18924 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-register-overflow.ll @@ -0,0 +1,13 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Overflow for shader register range: UAV(register=4294967295, space=0) +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4} +!4 = !{!"UAV", i32 100, i32 4294967295, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler-mix.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler-mix.ll new file mode 100644 index 0000000000000..95d00619b02a0 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler-mix.ll @@ -0,0 +1,15 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Samplers cannot be mixed with other resource types in a descriptor table, UAV(location=0) + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 1, i32 0, i32 0, i32 -1, i32 0} +!5 = !{!"Sampler", i32 2, i32 0, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll index c244095520468..b68606d656d75 100644 --- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll @@ -10,6 +10,6 @@ entry: !0 = !{ptr @CSMain, !1, i32 2} !1 = !{!2, !3} -!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 } +!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 } !3 = !{!"DescriptorTable", i32 0, !4} !4 = !{!"Sampler", i32 1, i32 42, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll index 9ac02ebbc0965..7c836e2f85d68 100644 --- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll @@ -10,5 +10,5 @@ entry: !0 = !{ptr @CSMain, !1, i32 2} !1 = !{!2, !3} -!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 } -!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 } +!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 } +!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-not-dening-shader.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-not-dening-shader.ll new file mode 100644 index 0000000000000..775fc3512ca84 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-not-dening-shader.ll @@ -0,0 +1,21 @@ +; RUN: opt -S -passes='dxil-post-optimization-validation' %s +; Valid scenario where shader stage is not blocked from accessing root bindings +target triple = "dxil-pc-shadermodel6.6-geometry" + +%__cblayout_CB = type <{ float }> + +@CB.str = private unnamed_addr constant [3 x i8] c"CB\00", align 1 + +define void @CSMain() "hlsl.shader"="geometry" { +entry: + %CB = tail call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) @llvm.dx.resource.handlefrombinding(i32 0, i32 2, i32 1, i32 0, ptr nonnull @CB.str) + ret void +} +attributes #0 = { noinline nounwind "exp-shader"="cs" "hlsl.numthreads"="1,2,1" "hlsl.shader"="geometry" } + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{ !"RootFlags", i32 294 } ; 294 = deny_pixel/hull/vertex/amplification_shader_root_access +!3 = !{ !"RootCBV", i32 0, i32 2, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/Hexagon/autohvx/deal-128b.ll b/llvm/test/CodeGen/Hexagon/autohvx/deal-128b.ll index 30a3b2d7e93a2..138beced0d2ec 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/deal-128b.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/deal-128b.ll @@ -517,7 +517,7 @@ define <256 x i8> @vdeal_3f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_40: -; CHECK: [[REG40:r[0-9]+]] = #64 +; CHECK: [[REG40:r[0-9]+]] = #-64 ; CHECK: vshuff(v1,v0,[[REG40]]) define <256 x i8> @vdeal_40(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -525,7 +525,7 @@ define <256 x i8> @vdeal_40(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_41: -; CHECK: [[REG41:r[0-9]+]] = #65 +; CHECK: [[REG41:r[0-9]+]] = #-63 ; CHECK: vdeal(v1,v0,[[REG41]]) define <256 x i8> @vdeal_41(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 64, i32 2, i32 66, i32 4, i32 68, i32 6, i32 70, i32 8, i32 72, i32 10, i32 74, i32 12, i32 76, i32 14, i32 78, i32 16, i32 80, i32 18, i32 82, i32 20, i32 84, i32 22, i32 86, i32 24, i32 88, i32 26, i32 90, i32 28, i32 92, i32 30, i32 94, i32 32, i32 96, i32 34, i32 98, i32 36, i32 100, i32 38, i32 102, i32 40, i32 104, i32 42, i32 106, i32 44, i32 108, i32 46, i32 110, i32 48, i32 112, i32 50, i32 114, i32 52, i32 116, i32 54, i32 118, i32 56, i32 120, i32 58, i32 122, i32 60, i32 124, i32 62, i32 126, i32 128, i32 192, i32 130, i32 194, i32 132, i32 196, i32 134, i32 198, i32 136, i32 200, i32 138, i32 202, i32 140, i32 204, i32 142, i32 206, i32 144, i32 208, i32 146, i32 210, i32 148, i32 212, i32 150, i32 214, i32 152, i32 216, i32 154, i32 218, i32 156, i32 220, i32 158, i32 222, i32 160, i32 224, i32 162, i32 226, i32 164, i32 228, i32 166, i32 230, i32 168, i32 232, i32 170, i32 234, i32 172, i32 236, i32 174, i32 238, i32 176, i32 240, i32 178, i32 242, i32 180, i32 244, i32 182, i32 246, i32 184, i32 248, i32 186, i32 250, i32 188, i32 252, i32 190, i32 254, i32 1, i32 65, i32 3, i32 67, i32 5, i32 69, i32 7, i32 71, i32 9, i32 73, i32 11, i32 75, i32 13, i32 77, i32 15, i32 79, i32 17, i32 81, i32 19, i32 83, i32 21, i32 85, i32 23, i32 87, i32 25, i32 89, i32 27, i32 91, i32 29, i32 93, i32 31, i32 95, i32 33, i32 97, i32 35, i32 99, i32 37, i32 101, i32 39, i32 103, i32 41, i32 105, i32 43, i32 107, i32 45, i32 109, i32 47, i32 111, i32 49, i32 113, i32 51, i32 115, i32 53, i32 117, i32 55, i32 119, i32 57, i32 121, i32 59, i32 123, i32 61, i32 125, i32 63, i32 127, i32 129, i32 193, i32 131, i32 195, i32 133, i32 197, i32 135, i32 199, i32 137, i32 201, i32 139, i32 203, i32 141, i32 205, i32 143, i32 207, i32 145, i32 209, i32 147, i32 211, i32 149, i32 213, i32 151, i32 215, i32 153, i32 217, i32 155, i32 219, i32 157, i32 221, i32 159, i32 223, i32 161, i32 225, i32 163, i32 227, i32 165, i32 229, i32 167, i32 231, i32 169, i32 233, i32 171, i32 235, i32 173, i32 237, i32 175, i32 239, i32 177, i32 241, i32 179, i32 243, i32 181, i32 245, i32 183, i32 247, i32 185, i32 249, i32 187, i32 251, i32 189, i32 253, i32 191, i32 255> @@ -533,7 +533,7 @@ define <256 x i8> @vdeal_41(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_42: -; CHECK: [[REG42:r[0-9]+]] = #66 +; CHECK: [[REG42:r[0-9]+]] = #-62 ; CHECK: vdeal(v1,v0,[[REG42]]) define <256 x i8> @vdeal_42(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 64, i32 65, i32 4, i32 5, i32 68, i32 69, i32 8, i32 9, i32 72, i32 73, i32 12, i32 13, i32 76, i32 77, i32 16, i32 17, i32 80, i32 81, i32 20, i32 21, i32 84, i32 85, i32 24, i32 25, i32 88, i32 89, i32 28, i32 29, i32 92, i32 93, i32 32, i32 33, i32 96, i32 97, i32 36, i32 37, i32 100, i32 101, i32 40, i32 41, i32 104, i32 105, i32 44, i32 45, i32 108, i32 109, i32 48, i32 49, i32 112, i32 113, i32 52, i32 53, i32 116, i32 117, i32 56, i32 57, i32 120, i32 121, i32 60, i32 61, i32 124, i32 125, i32 128, i32 129, i32 192, i32 193, i32 132, i32 133, i32 196, i32 197, i32 136, i32 137, i32 200, i32 201, i32 140, i32 141, i32 204, i32 205, i32 144, i32 145, i32 208, i32 209, i32 148, i32 149, i32 212, i32 213, i32 152, i32 153, i32 216, i32 217, i32 156, i32 157, i32 220, i32 221, i32 160, i32 161, i32 224, i32 225, i32 164, i32 165, i32 228, i32 229, i32 168, i32 169, i32 232, i32 233, i32 172, i32 173, i32 236, i32 237, i32 176, i32 177, i32 240, i32 241, i32 180, i32 181, i32 244, i32 245, i32 184, i32 185, i32 248, i32 249, i32 188, i32 189, i32 252, i32 253, i32 2, i32 3, i32 66, i32 67, i32 6, i32 7, i32 70, i32 71, i32 10, i32 11, i32 74, i32 75, i32 14, i32 15, i32 78, i32 79, i32 18, i32 19, i32 82, i32 83, i32 22, i32 23, i32 86, i32 87, i32 26, i32 27, i32 90, i32 91, i32 30, i32 31, i32 94, i32 95, i32 34, i32 35, i32 98, i32 99, i32 38, i32 39, i32 102, i32 103, i32 42, i32 43, i32 106, i32 107, i32 46, i32 47, i32 110, i32 111, i32 50, i32 51, i32 114, i32 115, i32 54, i32 55, i32 118, i32 119, i32 58, i32 59, i32 122, i32 123, i32 62, i32 63, i32 126, i32 127, i32 130, i32 131, i32 194, i32 195, i32 134, i32 135, i32 198, i32 199, i32 138, i32 139, i32 202, i32 203, i32 142, i32 143, i32 206, i32 207, i32 146, i32 147, i32 210, i32 211, i32 150, i32 151, i32 214, i32 215, i32 154, i32 155, i32 218, i32 219, i32 158, i32 159, i32 222, i32 223, i32 162, i32 163, i32 226, i32 227, i32 166, i32 167, i32 230, i32 231, i32 170, i32 171, i32 234, i32 235, i32 174, i32 175, i32 238, i32 239, i32 178, i32 179, i32 242, i32 243, i32 182, i32 183, i32 246, i32 247, i32 186, i32 187, i32 250, i32 251, i32 190, i32 191, i32 254, i32 255> @@ -541,7 +541,7 @@ define <256 x i8> @vdeal_42(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_43: -; CHECK: [[REG43:r[0-9]+]] = #67 +; CHECK: [[REG43:r[0-9]+]] = #-61 ; CHECK: vdeal(v1,v0,[[REG43]]) define <256 x i8> @vdeal_43(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 64, i32 66, i32 4, i32 6, i32 68, i32 70, i32 8, i32 10, i32 72, i32 74, i32 12, i32 14, i32 76, i32 78, i32 16, i32 18, i32 80, i32 82, i32 20, i32 22, i32 84, i32 86, i32 24, i32 26, i32 88, i32 90, i32 28, i32 30, i32 92, i32 94, i32 32, i32 34, i32 96, i32 98, i32 36, i32 38, i32 100, i32 102, i32 40, i32 42, i32 104, i32 106, i32 44, i32 46, i32 108, i32 110, i32 48, i32 50, i32 112, i32 114, i32 52, i32 54, i32 116, i32 118, i32 56, i32 58, i32 120, i32 122, i32 60, i32 62, i32 124, i32 126, i32 128, i32 130, i32 192, i32 194, i32 132, i32 134, i32 196, i32 198, i32 136, i32 138, i32 200, i32 202, i32 140, i32 142, i32 204, i32 206, i32 144, i32 146, i32 208, i32 210, i32 148, i32 150, i32 212, i32 214, i32 152, i32 154, i32 216, i32 218, i32 156, i32 158, i32 220, i32 222, i32 160, i32 162, i32 224, i32 226, i32 164, i32 166, i32 228, i32 230, i32 168, i32 170, i32 232, i32 234, i32 172, i32 174, i32 236, i32 238, i32 176, i32 178, i32 240, i32 242, i32 180, i32 182, i32 244, i32 246, i32 184, i32 186, i32 248, i32 250, i32 188, i32 190, i32 252, i32 254, i32 1, i32 3, i32 65, i32 67, i32 5, i32 7, i32 69, i32 71, i32 9, i32 11, i32 73, i32 75, i32 13, i32 15, i32 77, i32 79, i32 17, i32 19, i32 81, i32 83, i32 21, i32 23, i32 85, i32 87, i32 25, i32 27, i32 89, i32 91, i32 29, i32 31, i32 93, i32 95, i32 33, i32 35, i32 97, i32 99, i32 37, i32 39, i32 101, i32 103, i32 41, i32 43, i32 105, i32 107, i32 45, i32 47, i32 109, i32 111, i32 49, i32 51, i32 113, i32 115, i32 53, i32 55, i32 117, i32 119, i32 57, i32 59, i32 121, i32 123, i32 61, i32 63, i32 125, i32 127, i32 129, i32 131, i32 193, i32 195, i32 133, i32 135, i32 197, i32 199, i32 137, i32 139, i32 201, i32 203, i32 141, i32 143, i32 205, i32 207, i32 145, i32 147, i32 209, i32 211, i32 149, i32 151, i32 213, i32 215, i32 153, i32 155, i32 217, i32 219, i32 157, i32 159, i32 221, i32 223, i32 161, i32 163, i32 225, i32 227, i32 165, i32 167, i32 229, i32 231, i32 169, i32 171, i32 233, i32 235, i32 173, i32 175, i32 237, i32 239, i32 177, i32 179, i32 241, i32 243, i32 181, i32 183, i32 245, i32 247, i32 185, i32 187, i32 249, i32 251, i32 189, i32 191, i32 253, i32 255> @@ -549,7 +549,7 @@ define <256 x i8> @vdeal_43(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_44: -; CHECK: [[REG44:r[0-9]+]] = #68 +; CHECK: [[REG44:r[0-9]+]] = #-60 ; CHECK: vdeal(v1,v0,[[REG44]]) define <256 x i8> @vdeal_44(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 64, i32 65, i32 66, i32 67, i32 8, i32 9, i32 10, i32 11, i32 72, i32 73, i32 74, i32 75, i32 16, i32 17, i32 18, i32 19, i32 80, i32 81, i32 82, i32 83, i32 24, i32 25, i32 26, i32 27, i32 88, i32 89, i32 90, i32 91, i32 32, i32 33, i32 34, i32 35, i32 96, i32 97, i32 98, i32 99, i32 40, i32 41, i32 42, i32 43, i32 104, i32 105, i32 106, i32 107, i32 48, i32 49, i32 50, i32 51, i32 112, i32 113, i32 114, i32 115, i32 56, i32 57, i32 58, i32 59, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 192, i32 193, i32 194, i32 195, i32 136, i32 137, i32 138, i32 139, i32 200, i32 201, i32 202, i32 203, i32 144, i32 145, i32 146, i32 147, i32 208, i32 209, i32 210, i32 211, i32 152, i32 153, i32 154, i32 155, i32 216, i32 217, i32 218, i32 219, i32 160, i32 161, i32 162, i32 163, i32 224, i32 225, i32 226, i32 227, i32 168, i32 169, i32 170, i32 171, i32 232, i32 233, i32 234, i32 235, i32 176, i32 177, i32 178, i32 179, i32 240, i32 241, i32 242, i32 243, i32 184, i32 185, i32 186, i32 187, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 68, i32 69, i32 70, i32 71, i32 12, i32 13, i32 14, i32 15, i32 76, i32 77, i32 78, i32 79, i32 20, i32 21, i32 22, i32 23, i32 84, i32 85, i32 86, i32 87, i32 28, i32 29, i32 30, i32 31, i32 92, i32 93, i32 94, i32 95, i32 36, i32 37, i32 38, i32 39, i32 100, i32 101, i32 102, i32 103, i32 44, i32 45, i32 46, i32 47, i32 108, i32 109, i32 110, i32 111, i32 52, i32 53, i32 54, i32 55, i32 116, i32 117, i32 118, i32 119, i32 60, i32 61, i32 62, i32 63, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 196, i32 197, i32 198, i32 199, i32 140, i32 141, i32 142, i32 143, i32 204, i32 205, i32 206, i32 207, i32 148, i32 149, i32 150, i32 151, i32 212, i32 213, i32 214, i32 215, i32 156, i32 157, i32 158, i32 159, i32 220, i32 221, i32 222, i32 223, i32 164, i32 165, i32 166, i32 167, i32 228, i32 229, i32 230, i32 231, i32 172, i32 173, i32 174, i32 175, i32 236, i32 237, i32 238, i32 239, i32 180, i32 181, i32 182, i32 183, i32 244, i32 245, i32 246, i32 247, i32 188, i32 189, i32 190, i32 191, i32 252, i32 253, i32 254, i32 255> @@ -557,7 +557,7 @@ define <256 x i8> @vdeal_44(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_45: -; CHECK: [[REG45:r[0-9]+]] = #69 +; CHECK: [[REG45:r[0-9]+]] = #-59 ; CHECK: vdeal(v1,v0,[[REG45]]) define <256 x i8> @vdeal_45(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 64, i32 68, i32 66, i32 70, i32 8, i32 12, i32 10, i32 14, i32 72, i32 76, i32 74, i32 78, i32 16, i32 20, i32 18, i32 22, i32 80, i32 84, i32 82, i32 86, i32 24, i32 28, i32 26, i32 30, i32 88, i32 92, i32 90, i32 94, i32 32, i32 36, i32 34, i32 38, i32 96, i32 100, i32 98, i32 102, i32 40, i32 44, i32 42, i32 46, i32 104, i32 108, i32 106, i32 110, i32 48, i32 52, i32 50, i32 54, i32 112, i32 116, i32 114, i32 118, i32 56, i32 60, i32 58, i32 62, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 192, i32 196, i32 194, i32 198, i32 136, i32 140, i32 138, i32 142, i32 200, i32 204, i32 202, i32 206, i32 144, i32 148, i32 146, i32 150, i32 208, i32 212, i32 210, i32 214, i32 152, i32 156, i32 154, i32 158, i32 216, i32 220, i32 218, i32 222, i32 160, i32 164, i32 162, i32 166, i32 224, i32 228, i32 226, i32 230, i32 168, i32 172, i32 170, i32 174, i32 232, i32 236, i32 234, i32 238, i32 176, i32 180, i32 178, i32 182, i32 240, i32 244, i32 242, i32 246, i32 184, i32 188, i32 186, i32 190, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 65, i32 69, i32 67, i32 71, i32 9, i32 13, i32 11, i32 15, i32 73, i32 77, i32 75, i32 79, i32 17, i32 21, i32 19, i32 23, i32 81, i32 85, i32 83, i32 87, i32 25, i32 29, i32 27, i32 31, i32 89, i32 93, i32 91, i32 95, i32 33, i32 37, i32 35, i32 39, i32 97, i32 101, i32 99, i32 103, i32 41, i32 45, i32 43, i32 47, i32 105, i32 109, i32 107, i32 111, i32 49, i32 53, i32 51, i32 55, i32 113, i32 117, i32 115, i32 119, i32 57, i32 61, i32 59, i32 63, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 193, i32 197, i32 195, i32 199, i32 137, i32 141, i32 139, i32 143, i32 201, i32 205, i32 203, i32 207, i32 145, i32 149, i32 147, i32 151, i32 209, i32 213, i32 211, i32 215, i32 153, i32 157, i32 155, i32 159, i32 217, i32 221, i32 219, i32 223, i32 161, i32 165, i32 163, i32 167, i32 225, i32 229, i32 227, i32 231, i32 169, i32 173, i32 171, i32 175, i32 233, i32 237, i32 235, i32 239, i32 177, i32 181, i32 179, i32 183, i32 241, i32 245, i32 243, i32 247, i32 185, i32 189, i32 187, i32 191, i32 249, i32 253, i32 251, i32 255> @@ -565,7 +565,7 @@ define <256 x i8> @vdeal_45(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_46: -; CHECK: [[REG46:r[0-9]+]] = #70 +; CHECK: [[REG46:r[0-9]+]] = #-58 ; CHECK: vdeal(v1,v0,[[REG46]]) define <256 x i8> @vdeal_46(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 64, i32 65, i32 68, i32 69, i32 8, i32 9, i32 12, i32 13, i32 72, i32 73, i32 76, i32 77, i32 16, i32 17, i32 20, i32 21, i32 80, i32 81, i32 84, i32 85, i32 24, i32 25, i32 28, i32 29, i32 88, i32 89, i32 92, i32 93, i32 32, i32 33, i32 36, i32 37, i32 96, i32 97, i32 100, i32 101, i32 40, i32 41, i32 44, i32 45, i32 104, i32 105, i32 108, i32 109, i32 48, i32 49, i32 52, i32 53, i32 112, i32 113, i32 116, i32 117, i32 56, i32 57, i32 60, i32 61, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 192, i32 193, i32 196, i32 197, i32 136, i32 137, i32 140, i32 141, i32 200, i32 201, i32 204, i32 205, i32 144, i32 145, i32 148, i32 149, i32 208, i32 209, i32 212, i32 213, i32 152, i32 153, i32 156, i32 157, i32 216, i32 217, i32 220, i32 221, i32 160, i32 161, i32 164, i32 165, i32 224, i32 225, i32 228, i32 229, i32 168, i32 169, i32 172, i32 173, i32 232, i32 233, i32 236, i32 237, i32 176, i32 177, i32 180, i32 181, i32 240, i32 241, i32 244, i32 245, i32 184, i32 185, i32 188, i32 189, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 66, i32 67, i32 70, i32 71, i32 10, i32 11, i32 14, i32 15, i32 74, i32 75, i32 78, i32 79, i32 18, i32 19, i32 22, i32 23, i32 82, i32 83, i32 86, i32 87, i32 26, i32 27, i32 30, i32 31, i32 90, i32 91, i32 94, i32 95, i32 34, i32 35, i32 38, i32 39, i32 98, i32 99, i32 102, i32 103, i32 42, i32 43, i32 46, i32 47, i32 106, i32 107, i32 110, i32 111, i32 50, i32 51, i32 54, i32 55, i32 114, i32 115, i32 118, i32 119, i32 58, i32 59, i32 62, i32 63, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 194, i32 195, i32 198, i32 199, i32 138, i32 139, i32 142, i32 143, i32 202, i32 203, i32 206, i32 207, i32 146, i32 147, i32 150, i32 151, i32 210, i32 211, i32 214, i32 215, i32 154, i32 155, i32 158, i32 159, i32 218, i32 219, i32 222, i32 223, i32 162, i32 163, i32 166, i32 167, i32 226, i32 227, i32 230, i32 231, i32 170, i32 171, i32 174, i32 175, i32 234, i32 235, i32 238, i32 239, i32 178, i32 179, i32 182, i32 183, i32 242, i32 243, i32 246, i32 247, i32 186, i32 187, i32 190, i32 191, i32 250, i32 251, i32 254, i32 255> @@ -573,7 +573,7 @@ define <256 x i8> @vdeal_46(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_47: -; CHECK: [[REG47:r[0-9]+]] = #71 +; CHECK: [[REG47:r[0-9]+]] = #-57 ; CHECK: vdeal(v1,v0,[[REG47]]) define <256 x i8> @vdeal_47(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 64, i32 66, i32 68, i32 70, i32 8, i32 10, i32 12, i32 14, i32 72, i32 74, i32 76, i32 78, i32 16, i32 18, i32 20, i32 22, i32 80, i32 82, i32 84, i32 86, i32 24, i32 26, i32 28, i32 30, i32 88, i32 90, i32 92, i32 94, i32 32, i32 34, i32 36, i32 38, i32 96, i32 98, i32 100, i32 102, i32 40, i32 42, i32 44, i32 46, i32 104, i32 106, i32 108, i32 110, i32 48, i32 50, i32 52, i32 54, i32 112, i32 114, i32 116, i32 118, i32 56, i32 58, i32 60, i32 62, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 192, i32 194, i32 196, i32 198, i32 136, i32 138, i32 140, i32 142, i32 200, i32 202, i32 204, i32 206, i32 144, i32 146, i32 148, i32 150, i32 208, i32 210, i32 212, i32 214, i32 152, i32 154, i32 156, i32 158, i32 216, i32 218, i32 220, i32 222, i32 160, i32 162, i32 164, i32 166, i32 224, i32 226, i32 228, i32 230, i32 168, i32 170, i32 172, i32 174, i32 232, i32 234, i32 236, i32 238, i32 176, i32 178, i32 180, i32 182, i32 240, i32 242, i32 244, i32 246, i32 184, i32 186, i32 188, i32 190, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 65, i32 67, i32 69, i32 71, i32 9, i32 11, i32 13, i32 15, i32 73, i32 75, i32 77, i32 79, i32 17, i32 19, i32 21, i32 23, i32 81, i32 83, i32 85, i32 87, i32 25, i32 27, i32 29, i32 31, i32 89, i32 91, i32 93, i32 95, i32 33, i32 35, i32 37, i32 39, i32 97, i32 99, i32 101, i32 103, i32 41, i32 43, i32 45, i32 47, i32 105, i32 107, i32 109, i32 111, i32 49, i32 51, i32 53, i32 55, i32 113, i32 115, i32 117, i32 119, i32 57, i32 59, i32 61, i32 63, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 193, i32 195, i32 197, i32 199, i32 137, i32 139, i32 141, i32 143, i32 201, i32 203, i32 205, i32 207, i32 145, i32 147, i32 149, i32 151, i32 209, i32 211, i32 213, i32 215, i32 153, i32 155, i32 157, i32 159, i32 217, i32 219, i32 221, i32 223, i32 161, i32 163, i32 165, i32 167, i32 225, i32 227, i32 229, i32 231, i32 169, i32 171, i32 173, i32 175, i32 233, i32 235, i32 237, i32 239, i32 177, i32 179, i32 181, i32 183, i32 241, i32 243, i32 245, i32 247, i32 185, i32 187, i32 189, i32 191, i32 249, i32 251, i32 253, i32 255> @@ -581,7 +581,7 @@ define <256 x i8> @vdeal_47(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_48: -; CHECK: [[REG48:r[0-9]+]] = #72 +; CHECK: [[REG48:r[0-9]+]] = #-56 ; CHECK: vdeal(v1,v0,[[REG48]]) define <256 x i8> @vdeal_48(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -589,7 +589,7 @@ define <256 x i8> @vdeal_48(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_49: -; CHECK: [[REG49:r[0-9]+]] = #73 +; CHECK: [[REG49:r[0-9]+]] = #-55 ; CHECK: vdeal(v1,v0,[[REG49]]) define <256 x i8> @vdeal_49(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 64, i32 72, i32 66, i32 74, i32 68, i32 76, i32 70, i32 78, i32 16, i32 24, i32 18, i32 26, i32 20, i32 28, i32 22, i32 30, i32 80, i32 88, i32 82, i32 90, i32 84, i32 92, i32 86, i32 94, i32 32, i32 40, i32 34, i32 42, i32 36, i32 44, i32 38, i32 46, i32 96, i32 104, i32 98, i32 106, i32 100, i32 108, i32 102, i32 110, i32 48, i32 56, i32 50, i32 58, i32 52, i32 60, i32 54, i32 62, i32 112, i32 120, i32 114, i32 122, i32 116, i32 124, i32 118, i32 126, i32 128, i32 136, i32 130, i32 138, i32 132, i32 140, i32 134, i32 142, i32 192, i32 200, i32 194, i32 202, i32 196, i32 204, i32 198, i32 206, i32 144, i32 152, i32 146, i32 154, i32 148, i32 156, i32 150, i32 158, i32 208, i32 216, i32 210, i32 218, i32 212, i32 220, i32 214, i32 222, i32 160, i32 168, i32 162, i32 170, i32 164, i32 172, i32 166, i32 174, i32 224, i32 232, i32 226, i32 234, i32 228, i32 236, i32 230, i32 238, i32 176, i32 184, i32 178, i32 186, i32 180, i32 188, i32 182, i32 190, i32 240, i32 248, i32 242, i32 250, i32 244, i32 252, i32 246, i32 254, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, i32 65, i32 73, i32 67, i32 75, i32 69, i32 77, i32 71, i32 79, i32 17, i32 25, i32 19, i32 27, i32 21, i32 29, i32 23, i32 31, i32 81, i32 89, i32 83, i32 91, i32 85, i32 93, i32 87, i32 95, i32 33, i32 41, i32 35, i32 43, i32 37, i32 45, i32 39, i32 47, i32 97, i32 105, i32 99, i32 107, i32 101, i32 109, i32 103, i32 111, i32 49, i32 57, i32 51, i32 59, i32 53, i32 61, i32 55, i32 63, i32 113, i32 121, i32 115, i32 123, i32 117, i32 125, i32 119, i32 127, i32 129, i32 137, i32 131, i32 139, i32 133, i32 141, i32 135, i32 143, i32 193, i32 201, i32 195, i32 203, i32 197, i32 205, i32 199, i32 207, i32 145, i32 153, i32 147, i32 155, i32 149, i32 157, i32 151, i32 159, i32 209, i32 217, i32 211, i32 219, i32 213, i32 221, i32 215, i32 223, i32 161, i32 169, i32 163, i32 171, i32 165, i32 173, i32 167, i32 175, i32 225, i32 233, i32 227, i32 235, i32 229, i32 237, i32 231, i32 239, i32 177, i32 185, i32 179, i32 187, i32 181, i32 189, i32 183, i32 191, i32 241, i32 249, i32 243, i32 251, i32 245, i32 253, i32 247, i32 255> @@ -597,7 +597,7 @@ define <256 x i8> @vdeal_49(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_4a: -; CHECK: [[REG4a:r[0-9]+]] = #74 +; CHECK: [[REG4a:r[0-9]+]] = #-54 ; CHECK: vdeal(v1,v0,[[REG4a]]) define <256 x i8> @vdeal_4a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13, i32 64, i32 65, i32 72, i32 73, i32 68, i32 69, i32 76, i32 77, i32 16, i32 17, i32 24, i32 25, i32 20, i32 21, i32 28, i32 29, i32 80, i32 81, i32 88, i32 89, i32 84, i32 85, i32 92, i32 93, i32 32, i32 33, i32 40, i32 41, i32 36, i32 37, i32 44, i32 45, i32 96, i32 97, i32 104, i32 105, i32 100, i32 101, i32 108, i32 109, i32 48, i32 49, i32 56, i32 57, i32 52, i32 53, i32 60, i32 61, i32 112, i32 113, i32 120, i32 121, i32 116, i32 117, i32 124, i32 125, i32 128, i32 129, i32 136, i32 137, i32 132, i32 133, i32 140, i32 141, i32 192, i32 193, i32 200, i32 201, i32 196, i32 197, i32 204, i32 205, i32 144, i32 145, i32 152, i32 153, i32 148, i32 149, i32 156, i32 157, i32 208, i32 209, i32 216, i32 217, i32 212, i32 213, i32 220, i32 221, i32 160, i32 161, i32 168, i32 169, i32 164, i32 165, i32 172, i32 173, i32 224, i32 225, i32 232, i32 233, i32 228, i32 229, i32 236, i32 237, i32 176, i32 177, i32 184, i32 185, i32 180, i32 181, i32 188, i32 189, i32 240, i32 241, i32 248, i32 249, i32 244, i32 245, i32 252, i32 253, i32 2, i32 3, i32 10, i32 11, i32 6, i32 7, i32 14, i32 15, i32 66, i32 67, i32 74, i32 75, i32 70, i32 71, i32 78, i32 79, i32 18, i32 19, i32 26, i32 27, i32 22, i32 23, i32 30, i32 31, i32 82, i32 83, i32 90, i32 91, i32 86, i32 87, i32 94, i32 95, i32 34, i32 35, i32 42, i32 43, i32 38, i32 39, i32 46, i32 47, i32 98, i32 99, i32 106, i32 107, i32 102, i32 103, i32 110, i32 111, i32 50, i32 51, i32 58, i32 59, i32 54, i32 55, i32 62, i32 63, i32 114, i32 115, i32 122, i32 123, i32 118, i32 119, i32 126, i32 127, i32 130, i32 131, i32 138, i32 139, i32 134, i32 135, i32 142, i32 143, i32 194, i32 195, i32 202, i32 203, i32 198, i32 199, i32 206, i32 207, i32 146, i32 147, i32 154, i32 155, i32 150, i32 151, i32 158, i32 159, i32 210, i32 211, i32 218, i32 219, i32 214, i32 215, i32 222, i32 223, i32 162, i32 163, i32 170, i32 171, i32 166, i32 167, i32 174, i32 175, i32 226, i32 227, i32 234, i32 235, i32 230, i32 231, i32 238, i32 239, i32 178, i32 179, i32 186, i32 187, i32 182, i32 183, i32 190, i32 191, i32 242, i32 243, i32 250, i32 251, i32 246, i32 247, i32 254, i32 255> @@ -605,7 +605,7 @@ define <256 x i8> @vdeal_4a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_4b: -; CHECK: [[REG4b:r[0-9]+]] = #75 +; CHECK: [[REG4b:r[0-9]+]] = #-53 ; CHECK: vdeal(v1,v0,[[REG4b]]) define <256 x i8> @vdeal_4b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14, i32 64, i32 66, i32 72, i32 74, i32 68, i32 70, i32 76, i32 78, i32 16, i32 18, i32 24, i32 26, i32 20, i32 22, i32 28, i32 30, i32 80, i32 82, i32 88, i32 90, i32 84, i32 86, i32 92, i32 94, i32 32, i32 34, i32 40, i32 42, i32 36, i32 38, i32 44, i32 46, i32 96, i32 98, i32 104, i32 106, i32 100, i32 102, i32 108, i32 110, i32 48, i32 50, i32 56, i32 58, i32 52, i32 54, i32 60, i32 62, i32 112, i32 114, i32 120, i32 122, i32 116, i32 118, i32 124, i32 126, i32 128, i32 130, i32 136, i32 138, i32 132, i32 134, i32 140, i32 142, i32 192, i32 194, i32 200, i32 202, i32 196, i32 198, i32 204, i32 206, i32 144, i32 146, i32 152, i32 154, i32 148, i32 150, i32 156, i32 158, i32 208, i32 210, i32 216, i32 218, i32 212, i32 214, i32 220, i32 222, i32 160, i32 162, i32 168, i32 170, i32 164, i32 166, i32 172, i32 174, i32 224, i32 226, i32 232, i32 234, i32 228, i32 230, i32 236, i32 238, i32 176, i32 178, i32 184, i32 186, i32 180, i32 182, i32 188, i32 190, i32 240, i32 242, i32 248, i32 250, i32 244, i32 246, i32 252, i32 254, i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15, i32 65, i32 67, i32 73, i32 75, i32 69, i32 71, i32 77, i32 79, i32 17, i32 19, i32 25, i32 27, i32 21, i32 23, i32 29, i32 31, i32 81, i32 83, i32 89, i32 91, i32 85, i32 87, i32 93, i32 95, i32 33, i32 35, i32 41, i32 43, i32 37, i32 39, i32 45, i32 47, i32 97, i32 99, i32 105, i32 107, i32 101, i32 103, i32 109, i32 111, i32 49, i32 51, i32 57, i32 59, i32 53, i32 55, i32 61, i32 63, i32 113, i32 115, i32 121, i32 123, i32 117, i32 119, i32 125, i32 127, i32 129, i32 131, i32 137, i32 139, i32 133, i32 135, i32 141, i32 143, i32 193, i32 195, i32 201, i32 203, i32 197, i32 199, i32 205, i32 207, i32 145, i32 147, i32 153, i32 155, i32 149, i32 151, i32 157, i32 159, i32 209, i32 211, i32 217, i32 219, i32 213, i32 215, i32 221, i32 223, i32 161, i32 163, i32 169, i32 171, i32 165, i32 167, i32 173, i32 175, i32 225, i32 227, i32 233, i32 235, i32 229, i32 231, i32 237, i32 239, i32 177, i32 179, i32 185, i32 187, i32 181, i32 183, i32 189, i32 191, i32 241, i32 243, i32 249, i32 251, i32 245, i32 247, i32 253, i32 255> @@ -613,7 +613,7 @@ define <256 x i8> @vdeal_4b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_4c: -; CHECK: [[REG4c:r[0-9]+]] = #76 +; CHECK: [[REG4c:r[0-9]+]] = #-52 ; CHECK: vdeal(v1,v0,[[REG4c]]) define <256 x i8> @vdeal_4c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 64, i32 65, i32 66, i32 67, i32 72, i32 73, i32 74, i32 75, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27, i32 80, i32 81, i32 82, i32 83, i32 88, i32 89, i32 90, i32 91, i32 32, i32 33, i32 34, i32 35, i32 40, i32 41, i32 42, i32 43, i32 96, i32 97, i32 98, i32 99, i32 104, i32 105, i32 106, i32 107, i32 48, i32 49, i32 50, i32 51, i32 56, i32 57, i32 58, i32 59, i32 112, i32 113, i32 114, i32 115, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 136, i32 137, i32 138, i32 139, i32 192, i32 193, i32 194, i32 195, i32 200, i32 201, i32 202, i32 203, i32 144, i32 145, i32 146, i32 147, i32 152, i32 153, i32 154, i32 155, i32 208, i32 209, i32 210, i32 211, i32 216, i32 217, i32 218, i32 219, i32 160, i32 161, i32 162, i32 163, i32 168, i32 169, i32 170, i32 171, i32 224, i32 225, i32 226, i32 227, i32 232, i32 233, i32 234, i32 235, i32 176, i32 177, i32 178, i32 179, i32 184, i32 185, i32 186, i32 187, i32 240, i32 241, i32 242, i32 243, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 68, i32 69, i32 70, i32 71, i32 76, i32 77, i32 78, i32 79, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31, i32 84, i32 85, i32 86, i32 87, i32 92, i32 93, i32 94, i32 95, i32 36, i32 37, i32 38, i32 39, i32 44, i32 45, i32 46, i32 47, i32 100, i32 101, i32 102, i32 103, i32 108, i32 109, i32 110, i32 111, i32 52, i32 53, i32 54, i32 55, i32 60, i32 61, i32 62, i32 63, i32 116, i32 117, i32 118, i32 119, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 140, i32 141, i32 142, i32 143, i32 196, i32 197, i32 198, i32 199, i32 204, i32 205, i32 206, i32 207, i32 148, i32 149, i32 150, i32 151, i32 156, i32 157, i32 158, i32 159, i32 212, i32 213, i32 214, i32 215, i32 220, i32 221, i32 222, i32 223, i32 164, i32 165, i32 166, i32 167, i32 172, i32 173, i32 174, i32 175, i32 228, i32 229, i32 230, i32 231, i32 236, i32 237, i32 238, i32 239, i32 180, i32 181, i32 182, i32 183, i32 188, i32 189, i32 190, i32 191, i32 244, i32 245, i32 246, i32 247, i32 252, i32 253, i32 254, i32 255> @@ -621,7 +621,7 @@ define <256 x i8> @vdeal_4c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_4d: -; CHECK: [[REG4d:r[0-9]+]] = #77 +; CHECK: [[REG4d:r[0-9]+]] = #-51 ; CHECK: vdeal(v1,v0,[[REG4d]]) define <256 x i8> @vdeal_4d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 8, i32 12, i32 10, i32 14, i32 64, i32 68, i32 66, i32 70, i32 72, i32 76, i32 74, i32 78, i32 16, i32 20, i32 18, i32 22, i32 24, i32 28, i32 26, i32 30, i32 80, i32 84, i32 82, i32 86, i32 88, i32 92, i32 90, i32 94, i32 32, i32 36, i32 34, i32 38, i32 40, i32 44, i32 42, i32 46, i32 96, i32 100, i32 98, i32 102, i32 104, i32 108, i32 106, i32 110, i32 48, i32 52, i32 50, i32 54, i32 56, i32 60, i32 58, i32 62, i32 112, i32 116, i32 114, i32 118, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 136, i32 140, i32 138, i32 142, i32 192, i32 196, i32 194, i32 198, i32 200, i32 204, i32 202, i32 206, i32 144, i32 148, i32 146, i32 150, i32 152, i32 156, i32 154, i32 158, i32 208, i32 212, i32 210, i32 214, i32 216, i32 220, i32 218, i32 222, i32 160, i32 164, i32 162, i32 166, i32 168, i32 172, i32 170, i32 174, i32 224, i32 228, i32 226, i32 230, i32 232, i32 236, i32 234, i32 238, i32 176, i32 180, i32 178, i32 182, i32 184, i32 188, i32 186, i32 190, i32 240, i32 244, i32 242, i32 246, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 9, i32 13, i32 11, i32 15, i32 65, i32 69, i32 67, i32 71, i32 73, i32 77, i32 75, i32 79, i32 17, i32 21, i32 19, i32 23, i32 25, i32 29, i32 27, i32 31, i32 81, i32 85, i32 83, i32 87, i32 89, i32 93, i32 91, i32 95, i32 33, i32 37, i32 35, i32 39, i32 41, i32 45, i32 43, i32 47, i32 97, i32 101, i32 99, i32 103, i32 105, i32 109, i32 107, i32 111, i32 49, i32 53, i32 51, i32 55, i32 57, i32 61, i32 59, i32 63, i32 113, i32 117, i32 115, i32 119, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 137, i32 141, i32 139, i32 143, i32 193, i32 197, i32 195, i32 199, i32 201, i32 205, i32 203, i32 207, i32 145, i32 149, i32 147, i32 151, i32 153, i32 157, i32 155, i32 159, i32 209, i32 213, i32 211, i32 215, i32 217, i32 221, i32 219, i32 223, i32 161, i32 165, i32 163, i32 167, i32 169, i32 173, i32 171, i32 175, i32 225, i32 229, i32 227, i32 231, i32 233, i32 237, i32 235, i32 239, i32 177, i32 181, i32 179, i32 183, i32 185, i32 189, i32 187, i32 191, i32 241, i32 245, i32 243, i32 247, i32 249, i32 253, i32 251, i32 255> @@ -629,7 +629,7 @@ define <256 x i8> @vdeal_4d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_4e: -; CHECK: [[REG4e:r[0-9]+]] = #78 +; CHECK: [[REG4e:r[0-9]+]] = #-50 ; CHECK: vdeal(v1,v0,[[REG4e]]) define <256 x i8> @vdeal_4e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 64, i32 65, i32 68, i32 69, i32 72, i32 73, i32 76, i32 77, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29, i32 80, i32 81, i32 84, i32 85, i32 88, i32 89, i32 92, i32 93, i32 32, i32 33, i32 36, i32 37, i32 40, i32 41, i32 44, i32 45, i32 96, i32 97, i32 100, i32 101, i32 104, i32 105, i32 108, i32 109, i32 48, i32 49, i32 52, i32 53, i32 56, i32 57, i32 60, i32 61, i32 112, i32 113, i32 116, i32 117, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 136, i32 137, i32 140, i32 141, i32 192, i32 193, i32 196, i32 197, i32 200, i32 201, i32 204, i32 205, i32 144, i32 145, i32 148, i32 149, i32 152, i32 153, i32 156, i32 157, i32 208, i32 209, i32 212, i32 213, i32 216, i32 217, i32 220, i32 221, i32 160, i32 161, i32 164, i32 165, i32 168, i32 169, i32 172, i32 173, i32 224, i32 225, i32 228, i32 229, i32 232, i32 233, i32 236, i32 237, i32 176, i32 177, i32 180, i32 181, i32 184, i32 185, i32 188, i32 189, i32 240, i32 241, i32 244, i32 245, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 10, i32 11, i32 14, i32 15, i32 66, i32 67, i32 70, i32 71, i32 74, i32 75, i32 78, i32 79, i32 18, i32 19, i32 22, i32 23, i32 26, i32 27, i32 30, i32 31, i32 82, i32 83, i32 86, i32 87, i32 90, i32 91, i32 94, i32 95, i32 34, i32 35, i32 38, i32 39, i32 42, i32 43, i32 46, i32 47, i32 98, i32 99, i32 102, i32 103, i32 106, i32 107, i32 110, i32 111, i32 50, i32 51, i32 54, i32 55, i32 58, i32 59, i32 62, i32 63, i32 114, i32 115, i32 118, i32 119, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 138, i32 139, i32 142, i32 143, i32 194, i32 195, i32 198, i32 199, i32 202, i32 203, i32 206, i32 207, i32 146, i32 147, i32 150, i32 151, i32 154, i32 155, i32 158, i32 159, i32 210, i32 211, i32 214, i32 215, i32 218, i32 219, i32 222, i32 223, i32 162, i32 163, i32 166, i32 167, i32 170, i32 171, i32 174, i32 175, i32 226, i32 227, i32 230, i32 231, i32 234, i32 235, i32 238, i32 239, i32 178, i32 179, i32 182, i32 183, i32 186, i32 187, i32 190, i32 191, i32 242, i32 243, i32 246, i32 247, i32 250, i32 251, i32 254, i32 255> @@ -637,7 +637,7 @@ define <256 x i8> @vdeal_4e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_4f: -; CHECK: [[REG4f:r[0-9]+]] = #79 +; CHECK: [[REG4f:r[0-9]+]] = #-49 ; CHECK: vdeal(v1,v0,[[REG4f]]) define <256 x i8> @vdeal_4f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 136, i32 138, i32 140, i32 142, i32 192, i32 194, i32 196, i32 198, i32 200, i32 202, i32 204, i32 206, i32 144, i32 146, i32 148, i32 150, i32 152, i32 154, i32 156, i32 158, i32 208, i32 210, i32 212, i32 214, i32 216, i32 218, i32 220, i32 222, i32 160, i32 162, i32 164, i32 166, i32 168, i32 170, i32 172, i32 174, i32 224, i32 226, i32 228, i32 230, i32 232, i32 234, i32 236, i32 238, i32 176, i32 178, i32 180, i32 182, i32 184, i32 186, i32 188, i32 190, i32 240, i32 242, i32 244, i32 246, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 137, i32 139, i32 141, i32 143, i32 193, i32 195, i32 197, i32 199, i32 201, i32 203, i32 205, i32 207, i32 145, i32 147, i32 149, i32 151, i32 153, i32 155, i32 157, i32 159, i32 209, i32 211, i32 213, i32 215, i32 217, i32 219, i32 221, i32 223, i32 161, i32 163, i32 165, i32 167, i32 169, i32 171, i32 173, i32 175, i32 225, i32 227, i32 229, i32 231, i32 233, i32 235, i32 237, i32 239, i32 177, i32 179, i32 181, i32 183, i32 185, i32 187, i32 189, i32 191, i32 241, i32 243, i32 245, i32 247, i32 249, i32 251, i32 253, i32 255> @@ -645,7 +645,7 @@ define <256 x i8> @vdeal_4f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_50: -; CHECK: [[REG50:r[0-9]+]] = #80 +; CHECK: [[REG50:r[0-9]+]] = #-48 ; CHECK: vdeal(v1,v0,[[REG50]]) define <256 x i8> @vdeal_50(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -653,7 +653,7 @@ define <256 x i8> @vdeal_50(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_51: -; CHECK: [[REG51:r[0-9]+]] = #81 +; CHECK: [[REG51:r[0-9]+]] = #-47 ; CHECK: vdeal(v1,v0,[[REG51]]) define <256 x i8> @vdeal_51(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 64, i32 80, i32 66, i32 82, i32 68, i32 84, i32 70, i32 86, i32 72, i32 88, i32 74, i32 90, i32 76, i32 92, i32 78, i32 94, i32 32, i32 48, i32 34, i32 50, i32 36, i32 52, i32 38, i32 54, i32 40, i32 56, i32 42, i32 58, i32 44, i32 60, i32 46, i32 62, i32 96, i32 112, i32 98, i32 114, i32 100, i32 116, i32 102, i32 118, i32 104, i32 120, i32 106, i32 122, i32 108, i32 124, i32 110, i32 126, i32 128, i32 144, i32 130, i32 146, i32 132, i32 148, i32 134, i32 150, i32 136, i32 152, i32 138, i32 154, i32 140, i32 156, i32 142, i32 158, i32 192, i32 208, i32 194, i32 210, i32 196, i32 212, i32 198, i32 214, i32 200, i32 216, i32 202, i32 218, i32 204, i32 220, i32 206, i32 222, i32 160, i32 176, i32 162, i32 178, i32 164, i32 180, i32 166, i32 182, i32 168, i32 184, i32 170, i32 186, i32 172, i32 188, i32 174, i32 190, i32 224, i32 240, i32 226, i32 242, i32 228, i32 244, i32 230, i32 246, i32 232, i32 248, i32 234, i32 250, i32 236, i32 252, i32 238, i32 254, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31, i32 65, i32 81, i32 67, i32 83, i32 69, i32 85, i32 71, i32 87, i32 73, i32 89, i32 75, i32 91, i32 77, i32 93, i32 79, i32 95, i32 33, i32 49, i32 35, i32 51, i32 37, i32 53, i32 39, i32 55, i32 41, i32 57, i32 43, i32 59, i32 45, i32 61, i32 47, i32 63, i32 97, i32 113, i32 99, i32 115, i32 101, i32 117, i32 103, i32 119, i32 105, i32 121, i32 107, i32 123, i32 109, i32 125, i32 111, i32 127, i32 129, i32 145, i32 131, i32 147, i32 133, i32 149, i32 135, i32 151, i32 137, i32 153, i32 139, i32 155, i32 141, i32 157, i32 143, i32 159, i32 193, i32 209, i32 195, i32 211, i32 197, i32 213, i32 199, i32 215, i32 201, i32 217, i32 203, i32 219, i32 205, i32 221, i32 207, i32 223, i32 161, i32 177, i32 163, i32 179, i32 165, i32 181, i32 167, i32 183, i32 169, i32 185, i32 171, i32 187, i32 173, i32 189, i32 175, i32 191, i32 225, i32 241, i32 227, i32 243, i32 229, i32 245, i32 231, i32 247, i32 233, i32 249, i32 235, i32 251, i32 237, i32 253, i32 239, i32 255> @@ -661,7 +661,7 @@ define <256 x i8> @vdeal_51(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_52: -; CHECK: [[REG52:r[0-9]+]] = #82 +; CHECK: [[REG52:r[0-9]+]] = #-46 ; CHECK: vdeal(v1,v0,[[REG52]]) define <256 x i8> @vdeal_52(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 16, i32 17, i32 4, i32 5, i32 20, i32 21, i32 8, i32 9, i32 24, i32 25, i32 12, i32 13, i32 28, i32 29, i32 64, i32 65, i32 80, i32 81, i32 68, i32 69, i32 84, i32 85, i32 72, i32 73, i32 88, i32 89, i32 76, i32 77, i32 92, i32 93, i32 32, i32 33, i32 48, i32 49, i32 36, i32 37, i32 52, i32 53, i32 40, i32 41, i32 56, i32 57, i32 44, i32 45, i32 60, i32 61, i32 96, i32 97, i32 112, i32 113, i32 100, i32 101, i32 116, i32 117, i32 104, i32 105, i32 120, i32 121, i32 108, i32 109, i32 124, i32 125, i32 128, i32 129, i32 144, i32 145, i32 132, i32 133, i32 148, i32 149, i32 136, i32 137, i32 152, i32 153, i32 140, i32 141, i32 156, i32 157, i32 192, i32 193, i32 208, i32 209, i32 196, i32 197, i32 212, i32 213, i32 200, i32 201, i32 216, i32 217, i32 204, i32 205, i32 220, i32 221, i32 160, i32 161, i32 176, i32 177, i32 164, i32 165, i32 180, i32 181, i32 168, i32 169, i32 184, i32 185, i32 172, i32 173, i32 188, i32 189, i32 224, i32 225, i32 240, i32 241, i32 228, i32 229, i32 244, i32 245, i32 232, i32 233, i32 248, i32 249, i32 236, i32 237, i32 252, i32 253, i32 2, i32 3, i32 18, i32 19, i32 6, i32 7, i32 22, i32 23, i32 10, i32 11, i32 26, i32 27, i32 14, i32 15, i32 30, i32 31, i32 66, i32 67, i32 82, i32 83, i32 70, i32 71, i32 86, i32 87, i32 74, i32 75, i32 90, i32 91, i32 78, i32 79, i32 94, i32 95, i32 34, i32 35, i32 50, i32 51, i32 38, i32 39, i32 54, i32 55, i32 42, i32 43, i32 58, i32 59, i32 46, i32 47, i32 62, i32 63, i32 98, i32 99, i32 114, i32 115, i32 102, i32 103, i32 118, i32 119, i32 106, i32 107, i32 122, i32 123, i32 110, i32 111, i32 126, i32 127, i32 130, i32 131, i32 146, i32 147, i32 134, i32 135, i32 150, i32 151, i32 138, i32 139, i32 154, i32 155, i32 142, i32 143, i32 158, i32 159, i32 194, i32 195, i32 210, i32 211, i32 198, i32 199, i32 214, i32 215, i32 202, i32 203, i32 218, i32 219, i32 206, i32 207, i32 222, i32 223, i32 162, i32 163, i32 178, i32 179, i32 166, i32 167, i32 182, i32 183, i32 170, i32 171, i32 186, i32 187, i32 174, i32 175, i32 190, i32 191, i32 226, i32 227, i32 242, i32 243, i32 230, i32 231, i32 246, i32 247, i32 234, i32 235, i32 250, i32 251, i32 238, i32 239, i32 254, i32 255> @@ -669,7 +669,7 @@ define <256 x i8> @vdeal_52(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_53: -; CHECK: [[REG53:r[0-9]+]] = #83 +; CHECK: [[REG53:r[0-9]+]] = #-45 ; CHECK: vdeal(v1,v0,[[REG53]]) define <256 x i8> @vdeal_53(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 16, i32 18, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30, i32 64, i32 66, i32 80, i32 82, i32 68, i32 70, i32 84, i32 86, i32 72, i32 74, i32 88, i32 90, i32 76, i32 78, i32 92, i32 94, i32 32, i32 34, i32 48, i32 50, i32 36, i32 38, i32 52, i32 54, i32 40, i32 42, i32 56, i32 58, i32 44, i32 46, i32 60, i32 62, i32 96, i32 98, i32 112, i32 114, i32 100, i32 102, i32 116, i32 118, i32 104, i32 106, i32 120, i32 122, i32 108, i32 110, i32 124, i32 126, i32 128, i32 130, i32 144, i32 146, i32 132, i32 134, i32 148, i32 150, i32 136, i32 138, i32 152, i32 154, i32 140, i32 142, i32 156, i32 158, i32 192, i32 194, i32 208, i32 210, i32 196, i32 198, i32 212, i32 214, i32 200, i32 202, i32 216, i32 218, i32 204, i32 206, i32 220, i32 222, i32 160, i32 162, i32 176, i32 178, i32 164, i32 166, i32 180, i32 182, i32 168, i32 170, i32 184, i32 186, i32 172, i32 174, i32 188, i32 190, i32 224, i32 226, i32 240, i32 242, i32 228, i32 230, i32 244, i32 246, i32 232, i32 234, i32 248, i32 250, i32 236, i32 238, i32 252, i32 254, i32 1, i32 3, i32 17, i32 19, i32 5, i32 7, i32 21, i32 23, i32 9, i32 11, i32 25, i32 27, i32 13, i32 15, i32 29, i32 31, i32 65, i32 67, i32 81, i32 83, i32 69, i32 71, i32 85, i32 87, i32 73, i32 75, i32 89, i32 91, i32 77, i32 79, i32 93, i32 95, i32 33, i32 35, i32 49, i32 51, i32 37, i32 39, i32 53, i32 55, i32 41, i32 43, i32 57, i32 59, i32 45, i32 47, i32 61, i32 63, i32 97, i32 99, i32 113, i32 115, i32 101, i32 103, i32 117, i32 119, i32 105, i32 107, i32 121, i32 123, i32 109, i32 111, i32 125, i32 127, i32 129, i32 131, i32 145, i32 147, i32 133, i32 135, i32 149, i32 151, i32 137, i32 139, i32 153, i32 155, i32 141, i32 143, i32 157, i32 159, i32 193, i32 195, i32 209, i32 211, i32 197, i32 199, i32 213, i32 215, i32 201, i32 203, i32 217, i32 219, i32 205, i32 207, i32 221, i32 223, i32 161, i32 163, i32 177, i32 179, i32 165, i32 167, i32 181, i32 183, i32 169, i32 171, i32 185, i32 187, i32 173, i32 175, i32 189, i32 191, i32 225, i32 227, i32 241, i32 243, i32 229, i32 231, i32 245, i32 247, i32 233, i32 235, i32 249, i32 251, i32 237, i32 239, i32 253, i32 255> @@ -677,7 +677,7 @@ define <256 x i8> @vdeal_53(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_54: -; CHECK: [[REG54:r[0-9]+]] = #84 +; CHECK: [[REG54:r[0-9]+]] = #-44 ; CHECK: vdeal(v1,v0,[[REG54]]) define <256 x i8> @vdeal_54(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 64, i32 65, i32 66, i32 67, i32 80, i32 81, i32 82, i32 83, i32 72, i32 73, i32 74, i32 75, i32 88, i32 89, i32 90, i32 91, i32 32, i32 33, i32 34, i32 35, i32 48, i32 49, i32 50, i32 51, i32 40, i32 41, i32 42, i32 43, i32 56, i32 57, i32 58, i32 59, i32 96, i32 97, i32 98, i32 99, i32 112, i32 113, i32 114, i32 115, i32 104, i32 105, i32 106, i32 107, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 144, i32 145, i32 146, i32 147, i32 136, i32 137, i32 138, i32 139, i32 152, i32 153, i32 154, i32 155, i32 192, i32 193, i32 194, i32 195, i32 208, i32 209, i32 210, i32 211, i32 200, i32 201, i32 202, i32 203, i32 216, i32 217, i32 218, i32 219, i32 160, i32 161, i32 162, i32 163, i32 176, i32 177, i32 178, i32 179, i32 168, i32 169, i32 170, i32 171, i32 184, i32 185, i32 186, i32 187, i32 224, i32 225, i32 226, i32 227, i32 240, i32 241, i32 242, i32 243, i32 232, i32 233, i32 234, i32 235, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31, i32 68, i32 69, i32 70, i32 71, i32 84, i32 85, i32 86, i32 87, i32 76, i32 77, i32 78, i32 79, i32 92, i32 93, i32 94, i32 95, i32 36, i32 37, i32 38, i32 39, i32 52, i32 53, i32 54, i32 55, i32 44, i32 45, i32 46, i32 47, i32 60, i32 61, i32 62, i32 63, i32 100, i32 101, i32 102, i32 103, i32 116, i32 117, i32 118, i32 119, i32 108, i32 109, i32 110, i32 111, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 148, i32 149, i32 150, i32 151, i32 140, i32 141, i32 142, i32 143, i32 156, i32 157, i32 158, i32 159, i32 196, i32 197, i32 198, i32 199, i32 212, i32 213, i32 214, i32 215, i32 204, i32 205, i32 206, i32 207, i32 220, i32 221, i32 222, i32 223, i32 164, i32 165, i32 166, i32 167, i32 180, i32 181, i32 182, i32 183, i32 172, i32 173, i32 174, i32 175, i32 188, i32 189, i32 190, i32 191, i32 228, i32 229, i32 230, i32 231, i32 244, i32 245, i32 246, i32 247, i32 236, i32 237, i32 238, i32 239, i32 252, i32 253, i32 254, i32 255> @@ -685,7 +685,7 @@ define <256 x i8> @vdeal_54(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_55: -; CHECK: [[REG55:r[0-9]+]] = #85 +; CHECK: [[REG55:r[0-9]+]] = #-43 ; CHECK: vdeal(v1,v0,[[REG55]]) define <256 x i8> @vdeal_55(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 16, i32 20, i32 18, i32 22, i32 8, i32 12, i32 10, i32 14, i32 24, i32 28, i32 26, i32 30, i32 64, i32 68, i32 66, i32 70, i32 80, i32 84, i32 82, i32 86, i32 72, i32 76, i32 74, i32 78, i32 88, i32 92, i32 90, i32 94, i32 32, i32 36, i32 34, i32 38, i32 48, i32 52, i32 50, i32 54, i32 40, i32 44, i32 42, i32 46, i32 56, i32 60, i32 58, i32 62, i32 96, i32 100, i32 98, i32 102, i32 112, i32 116, i32 114, i32 118, i32 104, i32 108, i32 106, i32 110, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 144, i32 148, i32 146, i32 150, i32 136, i32 140, i32 138, i32 142, i32 152, i32 156, i32 154, i32 158, i32 192, i32 196, i32 194, i32 198, i32 208, i32 212, i32 210, i32 214, i32 200, i32 204, i32 202, i32 206, i32 216, i32 220, i32 218, i32 222, i32 160, i32 164, i32 162, i32 166, i32 176, i32 180, i32 178, i32 182, i32 168, i32 172, i32 170, i32 174, i32 184, i32 188, i32 186, i32 190, i32 224, i32 228, i32 226, i32 230, i32 240, i32 244, i32 242, i32 246, i32 232, i32 236, i32 234, i32 238, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 17, i32 21, i32 19, i32 23, i32 9, i32 13, i32 11, i32 15, i32 25, i32 29, i32 27, i32 31, i32 65, i32 69, i32 67, i32 71, i32 81, i32 85, i32 83, i32 87, i32 73, i32 77, i32 75, i32 79, i32 89, i32 93, i32 91, i32 95, i32 33, i32 37, i32 35, i32 39, i32 49, i32 53, i32 51, i32 55, i32 41, i32 45, i32 43, i32 47, i32 57, i32 61, i32 59, i32 63, i32 97, i32 101, i32 99, i32 103, i32 113, i32 117, i32 115, i32 119, i32 105, i32 109, i32 107, i32 111, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 145, i32 149, i32 147, i32 151, i32 137, i32 141, i32 139, i32 143, i32 153, i32 157, i32 155, i32 159, i32 193, i32 197, i32 195, i32 199, i32 209, i32 213, i32 211, i32 215, i32 201, i32 205, i32 203, i32 207, i32 217, i32 221, i32 219, i32 223, i32 161, i32 165, i32 163, i32 167, i32 177, i32 181, i32 179, i32 183, i32 169, i32 173, i32 171, i32 175, i32 185, i32 189, i32 187, i32 191, i32 225, i32 229, i32 227, i32 231, i32 241, i32 245, i32 243, i32 247, i32 233, i32 237, i32 235, i32 239, i32 249, i32 253, i32 251, i32 255> @@ -693,7 +693,7 @@ define <256 x i8> @vdeal_55(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_56: -; CHECK: [[REG56:r[0-9]+]] = #86 +; CHECK: [[REG56:r[0-9]+]] = #-42 ; CHECK: vdeal(v1,v0,[[REG56]]) define <256 x i8> @vdeal_56(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 16, i32 17, i32 20, i32 21, i32 8, i32 9, i32 12, i32 13, i32 24, i32 25, i32 28, i32 29, i32 64, i32 65, i32 68, i32 69, i32 80, i32 81, i32 84, i32 85, i32 72, i32 73, i32 76, i32 77, i32 88, i32 89, i32 92, i32 93, i32 32, i32 33, i32 36, i32 37, i32 48, i32 49, i32 52, i32 53, i32 40, i32 41, i32 44, i32 45, i32 56, i32 57, i32 60, i32 61, i32 96, i32 97, i32 100, i32 101, i32 112, i32 113, i32 116, i32 117, i32 104, i32 105, i32 108, i32 109, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 144, i32 145, i32 148, i32 149, i32 136, i32 137, i32 140, i32 141, i32 152, i32 153, i32 156, i32 157, i32 192, i32 193, i32 196, i32 197, i32 208, i32 209, i32 212, i32 213, i32 200, i32 201, i32 204, i32 205, i32 216, i32 217, i32 220, i32 221, i32 160, i32 161, i32 164, i32 165, i32 176, i32 177, i32 180, i32 181, i32 168, i32 169, i32 172, i32 173, i32 184, i32 185, i32 188, i32 189, i32 224, i32 225, i32 228, i32 229, i32 240, i32 241, i32 244, i32 245, i32 232, i32 233, i32 236, i32 237, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 18, i32 19, i32 22, i32 23, i32 10, i32 11, i32 14, i32 15, i32 26, i32 27, i32 30, i32 31, i32 66, i32 67, i32 70, i32 71, i32 82, i32 83, i32 86, i32 87, i32 74, i32 75, i32 78, i32 79, i32 90, i32 91, i32 94, i32 95, i32 34, i32 35, i32 38, i32 39, i32 50, i32 51, i32 54, i32 55, i32 42, i32 43, i32 46, i32 47, i32 58, i32 59, i32 62, i32 63, i32 98, i32 99, i32 102, i32 103, i32 114, i32 115, i32 118, i32 119, i32 106, i32 107, i32 110, i32 111, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 146, i32 147, i32 150, i32 151, i32 138, i32 139, i32 142, i32 143, i32 154, i32 155, i32 158, i32 159, i32 194, i32 195, i32 198, i32 199, i32 210, i32 211, i32 214, i32 215, i32 202, i32 203, i32 206, i32 207, i32 218, i32 219, i32 222, i32 223, i32 162, i32 163, i32 166, i32 167, i32 178, i32 179, i32 182, i32 183, i32 170, i32 171, i32 174, i32 175, i32 186, i32 187, i32 190, i32 191, i32 226, i32 227, i32 230, i32 231, i32 242, i32 243, i32 246, i32 247, i32 234, i32 235, i32 238, i32 239, i32 250, i32 251, i32 254, i32 255> @@ -701,7 +701,7 @@ define <256 x i8> @vdeal_56(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_57: -; CHECK: [[REG57:r[0-9]+]] = #87 +; CHECK: [[REG57:r[0-9]+]] = #-41 ; CHECK: vdeal(v1,v0,[[REG57]]) define <256 x i8> @vdeal_57(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30, i32 64, i32 66, i32 68, i32 70, i32 80, i32 82, i32 84, i32 86, i32 72, i32 74, i32 76, i32 78, i32 88, i32 90, i32 92, i32 94, i32 32, i32 34, i32 36, i32 38, i32 48, i32 50, i32 52, i32 54, i32 40, i32 42, i32 44, i32 46, i32 56, i32 58, i32 60, i32 62, i32 96, i32 98, i32 100, i32 102, i32 112, i32 114, i32 116, i32 118, i32 104, i32 106, i32 108, i32 110, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 144, i32 146, i32 148, i32 150, i32 136, i32 138, i32 140, i32 142, i32 152, i32 154, i32 156, i32 158, i32 192, i32 194, i32 196, i32 198, i32 208, i32 210, i32 212, i32 214, i32 200, i32 202, i32 204, i32 206, i32 216, i32 218, i32 220, i32 222, i32 160, i32 162, i32 164, i32 166, i32 176, i32 178, i32 180, i32 182, i32 168, i32 170, i32 172, i32 174, i32 184, i32 186, i32 188, i32 190, i32 224, i32 226, i32 228, i32 230, i32 240, i32 242, i32 244, i32 246, i32 232, i32 234, i32 236, i32 238, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31, i32 65, i32 67, i32 69, i32 71, i32 81, i32 83, i32 85, i32 87, i32 73, i32 75, i32 77, i32 79, i32 89, i32 91, i32 93, i32 95, i32 33, i32 35, i32 37, i32 39, i32 49, i32 51, i32 53, i32 55, i32 41, i32 43, i32 45, i32 47, i32 57, i32 59, i32 61, i32 63, i32 97, i32 99, i32 101, i32 103, i32 113, i32 115, i32 117, i32 119, i32 105, i32 107, i32 109, i32 111, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 145, i32 147, i32 149, i32 151, i32 137, i32 139, i32 141, i32 143, i32 153, i32 155, i32 157, i32 159, i32 193, i32 195, i32 197, i32 199, i32 209, i32 211, i32 213, i32 215, i32 201, i32 203, i32 205, i32 207, i32 217, i32 219, i32 221, i32 223, i32 161, i32 163, i32 165, i32 167, i32 177, i32 179, i32 181, i32 183, i32 169, i32 171, i32 173, i32 175, i32 185, i32 187, i32 189, i32 191, i32 225, i32 227, i32 229, i32 231, i32 241, i32 243, i32 245, i32 247, i32 233, i32 235, i32 237, i32 239, i32 249, i32 251, i32 253, i32 255> @@ -709,7 +709,7 @@ define <256 x i8> @vdeal_57(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_58: -; CHECK: [[REG58:r[0-9]+]] = #88 +; CHECK: [[REG58:r[0-9]+]] = #-40 ; CHECK: vdeal(v1,v0,[[REG58]]) define <256 x i8> @vdeal_58(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -717,7 +717,7 @@ define <256 x i8> @vdeal_58(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_59: -; CHECK: [[REG59:r[0-9]+]] = #89 +; CHECK: [[REG59:r[0-9]+]] = #-39 ; CHECK: vdeal(v1,v0,[[REG59]]) define <256 x i8> @vdeal_59(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 16, i32 24, i32 18, i32 26, i32 20, i32 28, i32 22, i32 30, i32 64, i32 72, i32 66, i32 74, i32 68, i32 76, i32 70, i32 78, i32 80, i32 88, i32 82, i32 90, i32 84, i32 92, i32 86, i32 94, i32 32, i32 40, i32 34, i32 42, i32 36, i32 44, i32 38, i32 46, i32 48, i32 56, i32 50, i32 58, i32 52, i32 60, i32 54, i32 62, i32 96, i32 104, i32 98, i32 106, i32 100, i32 108, i32 102, i32 110, i32 112, i32 120, i32 114, i32 122, i32 116, i32 124, i32 118, i32 126, i32 128, i32 136, i32 130, i32 138, i32 132, i32 140, i32 134, i32 142, i32 144, i32 152, i32 146, i32 154, i32 148, i32 156, i32 150, i32 158, i32 192, i32 200, i32 194, i32 202, i32 196, i32 204, i32 198, i32 206, i32 208, i32 216, i32 210, i32 218, i32 212, i32 220, i32 214, i32 222, i32 160, i32 168, i32 162, i32 170, i32 164, i32 172, i32 166, i32 174, i32 176, i32 184, i32 178, i32 186, i32 180, i32 188, i32 182, i32 190, i32 224, i32 232, i32 226, i32 234, i32 228, i32 236, i32 230, i32 238, i32 240, i32 248, i32 242, i32 250, i32 244, i32 252, i32 246, i32 254, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, i32 17, i32 25, i32 19, i32 27, i32 21, i32 29, i32 23, i32 31, i32 65, i32 73, i32 67, i32 75, i32 69, i32 77, i32 71, i32 79, i32 81, i32 89, i32 83, i32 91, i32 85, i32 93, i32 87, i32 95, i32 33, i32 41, i32 35, i32 43, i32 37, i32 45, i32 39, i32 47, i32 49, i32 57, i32 51, i32 59, i32 53, i32 61, i32 55, i32 63, i32 97, i32 105, i32 99, i32 107, i32 101, i32 109, i32 103, i32 111, i32 113, i32 121, i32 115, i32 123, i32 117, i32 125, i32 119, i32 127, i32 129, i32 137, i32 131, i32 139, i32 133, i32 141, i32 135, i32 143, i32 145, i32 153, i32 147, i32 155, i32 149, i32 157, i32 151, i32 159, i32 193, i32 201, i32 195, i32 203, i32 197, i32 205, i32 199, i32 207, i32 209, i32 217, i32 211, i32 219, i32 213, i32 221, i32 215, i32 223, i32 161, i32 169, i32 163, i32 171, i32 165, i32 173, i32 167, i32 175, i32 177, i32 185, i32 179, i32 187, i32 181, i32 189, i32 183, i32 191, i32 225, i32 233, i32 227, i32 235, i32 229, i32 237, i32 231, i32 239, i32 241, i32 249, i32 243, i32 251, i32 245, i32 253, i32 247, i32 255> @@ -725,7 +725,7 @@ define <256 x i8> @vdeal_59(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_5a: -; CHECK: [[REG5a:r[0-9]+]] = #90 +; CHECK: [[REG5a:r[0-9]+]] = #-38 ; CHECK: vdeal(v1,v0,[[REG5a]]) define <256 x i8> @vdeal_5a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13, i32 16, i32 17, i32 24, i32 25, i32 20, i32 21, i32 28, i32 29, i32 64, i32 65, i32 72, i32 73, i32 68, i32 69, i32 76, i32 77, i32 80, i32 81, i32 88, i32 89, i32 84, i32 85, i32 92, i32 93, i32 32, i32 33, i32 40, i32 41, i32 36, i32 37, i32 44, i32 45, i32 48, i32 49, i32 56, i32 57, i32 52, i32 53, i32 60, i32 61, i32 96, i32 97, i32 104, i32 105, i32 100, i32 101, i32 108, i32 109, i32 112, i32 113, i32 120, i32 121, i32 116, i32 117, i32 124, i32 125, i32 128, i32 129, i32 136, i32 137, i32 132, i32 133, i32 140, i32 141, i32 144, i32 145, i32 152, i32 153, i32 148, i32 149, i32 156, i32 157, i32 192, i32 193, i32 200, i32 201, i32 196, i32 197, i32 204, i32 205, i32 208, i32 209, i32 216, i32 217, i32 212, i32 213, i32 220, i32 221, i32 160, i32 161, i32 168, i32 169, i32 164, i32 165, i32 172, i32 173, i32 176, i32 177, i32 184, i32 185, i32 180, i32 181, i32 188, i32 189, i32 224, i32 225, i32 232, i32 233, i32 228, i32 229, i32 236, i32 237, i32 240, i32 241, i32 248, i32 249, i32 244, i32 245, i32 252, i32 253, i32 2, i32 3, i32 10, i32 11, i32 6, i32 7, i32 14, i32 15, i32 18, i32 19, i32 26, i32 27, i32 22, i32 23, i32 30, i32 31, i32 66, i32 67, i32 74, i32 75, i32 70, i32 71, i32 78, i32 79, i32 82, i32 83, i32 90, i32 91, i32 86, i32 87, i32 94, i32 95, i32 34, i32 35, i32 42, i32 43, i32 38, i32 39, i32 46, i32 47, i32 50, i32 51, i32 58, i32 59, i32 54, i32 55, i32 62, i32 63, i32 98, i32 99, i32 106, i32 107, i32 102, i32 103, i32 110, i32 111, i32 114, i32 115, i32 122, i32 123, i32 118, i32 119, i32 126, i32 127, i32 130, i32 131, i32 138, i32 139, i32 134, i32 135, i32 142, i32 143, i32 146, i32 147, i32 154, i32 155, i32 150, i32 151, i32 158, i32 159, i32 194, i32 195, i32 202, i32 203, i32 198, i32 199, i32 206, i32 207, i32 210, i32 211, i32 218, i32 219, i32 214, i32 215, i32 222, i32 223, i32 162, i32 163, i32 170, i32 171, i32 166, i32 167, i32 174, i32 175, i32 178, i32 179, i32 186, i32 187, i32 182, i32 183, i32 190, i32 191, i32 226, i32 227, i32 234, i32 235, i32 230, i32 231, i32 238, i32 239, i32 242, i32 243, i32 250, i32 251, i32 246, i32 247, i32 254, i32 255> @@ -733,7 +733,7 @@ define <256 x i8> @vdeal_5a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_5b: -; CHECK: [[REG5b:r[0-9]+]] = #91 +; CHECK: [[REG5b:r[0-9]+]] = #-37 ; CHECK: vdeal(v1,v0,[[REG5b]]) define <256 x i8> @vdeal_5b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14, i32 16, i32 18, i32 24, i32 26, i32 20, i32 22, i32 28, i32 30, i32 64, i32 66, i32 72, i32 74, i32 68, i32 70, i32 76, i32 78, i32 80, i32 82, i32 88, i32 90, i32 84, i32 86, i32 92, i32 94, i32 32, i32 34, i32 40, i32 42, i32 36, i32 38, i32 44, i32 46, i32 48, i32 50, i32 56, i32 58, i32 52, i32 54, i32 60, i32 62, i32 96, i32 98, i32 104, i32 106, i32 100, i32 102, i32 108, i32 110, i32 112, i32 114, i32 120, i32 122, i32 116, i32 118, i32 124, i32 126, i32 128, i32 130, i32 136, i32 138, i32 132, i32 134, i32 140, i32 142, i32 144, i32 146, i32 152, i32 154, i32 148, i32 150, i32 156, i32 158, i32 192, i32 194, i32 200, i32 202, i32 196, i32 198, i32 204, i32 206, i32 208, i32 210, i32 216, i32 218, i32 212, i32 214, i32 220, i32 222, i32 160, i32 162, i32 168, i32 170, i32 164, i32 166, i32 172, i32 174, i32 176, i32 178, i32 184, i32 186, i32 180, i32 182, i32 188, i32 190, i32 224, i32 226, i32 232, i32 234, i32 228, i32 230, i32 236, i32 238, i32 240, i32 242, i32 248, i32 250, i32 244, i32 246, i32 252, i32 254, i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15, i32 17, i32 19, i32 25, i32 27, i32 21, i32 23, i32 29, i32 31, i32 65, i32 67, i32 73, i32 75, i32 69, i32 71, i32 77, i32 79, i32 81, i32 83, i32 89, i32 91, i32 85, i32 87, i32 93, i32 95, i32 33, i32 35, i32 41, i32 43, i32 37, i32 39, i32 45, i32 47, i32 49, i32 51, i32 57, i32 59, i32 53, i32 55, i32 61, i32 63, i32 97, i32 99, i32 105, i32 107, i32 101, i32 103, i32 109, i32 111, i32 113, i32 115, i32 121, i32 123, i32 117, i32 119, i32 125, i32 127, i32 129, i32 131, i32 137, i32 139, i32 133, i32 135, i32 141, i32 143, i32 145, i32 147, i32 153, i32 155, i32 149, i32 151, i32 157, i32 159, i32 193, i32 195, i32 201, i32 203, i32 197, i32 199, i32 205, i32 207, i32 209, i32 211, i32 217, i32 219, i32 213, i32 215, i32 221, i32 223, i32 161, i32 163, i32 169, i32 171, i32 165, i32 167, i32 173, i32 175, i32 177, i32 179, i32 185, i32 187, i32 181, i32 183, i32 189, i32 191, i32 225, i32 227, i32 233, i32 235, i32 229, i32 231, i32 237, i32 239, i32 241, i32 243, i32 249, i32 251, i32 245, i32 247, i32 253, i32 255> @@ -741,7 +741,7 @@ define <256 x i8> @vdeal_5b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_5c: -; CHECK: [[REG5c:r[0-9]+]] = #92 +; CHECK: [[REG5c:r[0-9]+]] = #-36 ; CHECK: vdeal(v1,v0,[[REG5c]]) define <256 x i8> @vdeal_5c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27, i32 64, i32 65, i32 66, i32 67, i32 72, i32 73, i32 74, i32 75, i32 80, i32 81, i32 82, i32 83, i32 88, i32 89, i32 90, i32 91, i32 32, i32 33, i32 34, i32 35, i32 40, i32 41, i32 42, i32 43, i32 48, i32 49, i32 50, i32 51, i32 56, i32 57, i32 58, i32 59, i32 96, i32 97, i32 98, i32 99, i32 104, i32 105, i32 106, i32 107, i32 112, i32 113, i32 114, i32 115, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 136, i32 137, i32 138, i32 139, i32 144, i32 145, i32 146, i32 147, i32 152, i32 153, i32 154, i32 155, i32 192, i32 193, i32 194, i32 195, i32 200, i32 201, i32 202, i32 203, i32 208, i32 209, i32 210, i32 211, i32 216, i32 217, i32 218, i32 219, i32 160, i32 161, i32 162, i32 163, i32 168, i32 169, i32 170, i32 171, i32 176, i32 177, i32 178, i32 179, i32 184, i32 185, i32 186, i32 187, i32 224, i32 225, i32 226, i32 227, i32 232, i32 233, i32 234, i32 235, i32 240, i32 241, i32 242, i32 243, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31, i32 68, i32 69, i32 70, i32 71, i32 76, i32 77, i32 78, i32 79, i32 84, i32 85, i32 86, i32 87, i32 92, i32 93, i32 94, i32 95, i32 36, i32 37, i32 38, i32 39, i32 44, i32 45, i32 46, i32 47, i32 52, i32 53, i32 54, i32 55, i32 60, i32 61, i32 62, i32 63, i32 100, i32 101, i32 102, i32 103, i32 108, i32 109, i32 110, i32 111, i32 116, i32 117, i32 118, i32 119, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 140, i32 141, i32 142, i32 143, i32 148, i32 149, i32 150, i32 151, i32 156, i32 157, i32 158, i32 159, i32 196, i32 197, i32 198, i32 199, i32 204, i32 205, i32 206, i32 207, i32 212, i32 213, i32 214, i32 215, i32 220, i32 221, i32 222, i32 223, i32 164, i32 165, i32 166, i32 167, i32 172, i32 173, i32 174, i32 175, i32 180, i32 181, i32 182, i32 183, i32 188, i32 189, i32 190, i32 191, i32 228, i32 229, i32 230, i32 231, i32 236, i32 237, i32 238, i32 239, i32 244, i32 245, i32 246, i32 247, i32 252, i32 253, i32 254, i32 255> @@ -749,7 +749,7 @@ define <256 x i8> @vdeal_5c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_5d: -; CHECK: [[REG5d:r[0-9]+]] = #93 +; CHECK: [[REG5d:r[0-9]+]] = #-35 ; CHECK: vdeal(v1,v0,[[REG5d]]) define <256 x i8> @vdeal_5d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 8, i32 12, i32 10, i32 14, i32 16, i32 20, i32 18, i32 22, i32 24, i32 28, i32 26, i32 30, i32 64, i32 68, i32 66, i32 70, i32 72, i32 76, i32 74, i32 78, i32 80, i32 84, i32 82, i32 86, i32 88, i32 92, i32 90, i32 94, i32 32, i32 36, i32 34, i32 38, i32 40, i32 44, i32 42, i32 46, i32 48, i32 52, i32 50, i32 54, i32 56, i32 60, i32 58, i32 62, i32 96, i32 100, i32 98, i32 102, i32 104, i32 108, i32 106, i32 110, i32 112, i32 116, i32 114, i32 118, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 136, i32 140, i32 138, i32 142, i32 144, i32 148, i32 146, i32 150, i32 152, i32 156, i32 154, i32 158, i32 192, i32 196, i32 194, i32 198, i32 200, i32 204, i32 202, i32 206, i32 208, i32 212, i32 210, i32 214, i32 216, i32 220, i32 218, i32 222, i32 160, i32 164, i32 162, i32 166, i32 168, i32 172, i32 170, i32 174, i32 176, i32 180, i32 178, i32 182, i32 184, i32 188, i32 186, i32 190, i32 224, i32 228, i32 226, i32 230, i32 232, i32 236, i32 234, i32 238, i32 240, i32 244, i32 242, i32 246, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 9, i32 13, i32 11, i32 15, i32 17, i32 21, i32 19, i32 23, i32 25, i32 29, i32 27, i32 31, i32 65, i32 69, i32 67, i32 71, i32 73, i32 77, i32 75, i32 79, i32 81, i32 85, i32 83, i32 87, i32 89, i32 93, i32 91, i32 95, i32 33, i32 37, i32 35, i32 39, i32 41, i32 45, i32 43, i32 47, i32 49, i32 53, i32 51, i32 55, i32 57, i32 61, i32 59, i32 63, i32 97, i32 101, i32 99, i32 103, i32 105, i32 109, i32 107, i32 111, i32 113, i32 117, i32 115, i32 119, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 137, i32 141, i32 139, i32 143, i32 145, i32 149, i32 147, i32 151, i32 153, i32 157, i32 155, i32 159, i32 193, i32 197, i32 195, i32 199, i32 201, i32 205, i32 203, i32 207, i32 209, i32 213, i32 211, i32 215, i32 217, i32 221, i32 219, i32 223, i32 161, i32 165, i32 163, i32 167, i32 169, i32 173, i32 171, i32 175, i32 177, i32 181, i32 179, i32 183, i32 185, i32 189, i32 187, i32 191, i32 225, i32 229, i32 227, i32 231, i32 233, i32 237, i32 235, i32 239, i32 241, i32 245, i32 243, i32 247, i32 249, i32 253, i32 251, i32 255> @@ -757,7 +757,7 @@ define <256 x i8> @vdeal_5d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_5e: -; CHECK: [[REG5e:r[0-9]+]] = #94 +; CHECK: [[REG5e:r[0-9]+]] = #-34 ; CHECK: vdeal(v1,v0,[[REG5e]]) define <256 x i8> @vdeal_5e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29, i32 64, i32 65, i32 68, i32 69, i32 72, i32 73, i32 76, i32 77, i32 80, i32 81, i32 84, i32 85, i32 88, i32 89, i32 92, i32 93, i32 32, i32 33, i32 36, i32 37, i32 40, i32 41, i32 44, i32 45, i32 48, i32 49, i32 52, i32 53, i32 56, i32 57, i32 60, i32 61, i32 96, i32 97, i32 100, i32 101, i32 104, i32 105, i32 108, i32 109, i32 112, i32 113, i32 116, i32 117, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 136, i32 137, i32 140, i32 141, i32 144, i32 145, i32 148, i32 149, i32 152, i32 153, i32 156, i32 157, i32 192, i32 193, i32 196, i32 197, i32 200, i32 201, i32 204, i32 205, i32 208, i32 209, i32 212, i32 213, i32 216, i32 217, i32 220, i32 221, i32 160, i32 161, i32 164, i32 165, i32 168, i32 169, i32 172, i32 173, i32 176, i32 177, i32 180, i32 181, i32 184, i32 185, i32 188, i32 189, i32 224, i32 225, i32 228, i32 229, i32 232, i32 233, i32 236, i32 237, i32 240, i32 241, i32 244, i32 245, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 10, i32 11, i32 14, i32 15, i32 18, i32 19, i32 22, i32 23, i32 26, i32 27, i32 30, i32 31, i32 66, i32 67, i32 70, i32 71, i32 74, i32 75, i32 78, i32 79, i32 82, i32 83, i32 86, i32 87, i32 90, i32 91, i32 94, i32 95, i32 34, i32 35, i32 38, i32 39, i32 42, i32 43, i32 46, i32 47, i32 50, i32 51, i32 54, i32 55, i32 58, i32 59, i32 62, i32 63, i32 98, i32 99, i32 102, i32 103, i32 106, i32 107, i32 110, i32 111, i32 114, i32 115, i32 118, i32 119, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 138, i32 139, i32 142, i32 143, i32 146, i32 147, i32 150, i32 151, i32 154, i32 155, i32 158, i32 159, i32 194, i32 195, i32 198, i32 199, i32 202, i32 203, i32 206, i32 207, i32 210, i32 211, i32 214, i32 215, i32 218, i32 219, i32 222, i32 223, i32 162, i32 163, i32 166, i32 167, i32 170, i32 171, i32 174, i32 175, i32 178, i32 179, i32 182, i32 183, i32 186, i32 187, i32 190, i32 191, i32 226, i32 227, i32 230, i32 231, i32 234, i32 235, i32 238, i32 239, i32 242, i32 243, i32 246, i32 247, i32 250, i32 251, i32 254, i32 255> @@ -765,7 +765,7 @@ define <256 x i8> @vdeal_5e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_5f: -; CHECK: [[REG5f:r[0-9]+]] = #95 +; CHECK: [[REG5f:r[0-9]+]] = #-33 ; CHECK: vdeal(v1,v0,[[REG5f]]) define <256 x i8> @vdeal_5f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 136, i32 138, i32 140, i32 142, i32 144, i32 146, i32 148, i32 150, i32 152, i32 154, i32 156, i32 158, i32 192, i32 194, i32 196, i32 198, i32 200, i32 202, i32 204, i32 206, i32 208, i32 210, i32 212, i32 214, i32 216, i32 218, i32 220, i32 222, i32 160, i32 162, i32 164, i32 166, i32 168, i32 170, i32 172, i32 174, i32 176, i32 178, i32 180, i32 182, i32 184, i32 186, i32 188, i32 190, i32 224, i32 226, i32 228, i32 230, i32 232, i32 234, i32 236, i32 238, i32 240, i32 242, i32 244, i32 246, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 137, i32 139, i32 141, i32 143, i32 145, i32 147, i32 149, i32 151, i32 153, i32 155, i32 157, i32 159, i32 193, i32 195, i32 197, i32 199, i32 201, i32 203, i32 205, i32 207, i32 209, i32 211, i32 213, i32 215, i32 217, i32 219, i32 221, i32 223, i32 161, i32 163, i32 165, i32 167, i32 169, i32 171, i32 173, i32 175, i32 177, i32 179, i32 181, i32 183, i32 185, i32 187, i32 189, i32 191, i32 225, i32 227, i32 229, i32 231, i32 233, i32 235, i32 237, i32 239, i32 241, i32 243, i32 245, i32 247, i32 249, i32 251, i32 253, i32 255> @@ -773,7 +773,7 @@ define <256 x i8> @vdeal_5f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_60: -; CHECK: [[REG60:r[0-9]+]] = #96 +; CHECK: [[REG60:r[0-9]+]] = #-32 ; CHECK: vdeal(v1,v0,[[REG60]]) define <256 x i8> @vdeal_60(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -781,7 +781,7 @@ define <256 x i8> @vdeal_60(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_61: -; CHECK: [[REG61:r[0-9]+]] = #97 +; CHECK: [[REG61:r[0-9]+]] = #-31 ; CHECK: vdeal(v1,v0,[[REG61]]) define <256 x i8> @vdeal_61(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62, i32 64, i32 96, i32 66, i32 98, i32 68, i32 100, i32 70, i32 102, i32 72, i32 104, i32 74, i32 106, i32 76, i32 108, i32 78, i32 110, i32 80, i32 112, i32 82, i32 114, i32 84, i32 116, i32 86, i32 118, i32 88, i32 120, i32 90, i32 122, i32 92, i32 124, i32 94, i32 126, i32 128, i32 160, i32 130, i32 162, i32 132, i32 164, i32 134, i32 166, i32 136, i32 168, i32 138, i32 170, i32 140, i32 172, i32 142, i32 174, i32 144, i32 176, i32 146, i32 178, i32 148, i32 180, i32 150, i32 182, i32 152, i32 184, i32 154, i32 186, i32 156, i32 188, i32 158, i32 190, i32 192, i32 224, i32 194, i32 226, i32 196, i32 228, i32 198, i32 230, i32 200, i32 232, i32 202, i32 234, i32 204, i32 236, i32 206, i32 238, i32 208, i32 240, i32 210, i32 242, i32 212, i32 244, i32 214, i32 246, i32 216, i32 248, i32 218, i32 250, i32 220, i32 252, i32 222, i32 254, i32 1, i32 33, i32 3, i32 35, i32 5, i32 37, i32 7, i32 39, i32 9, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63, i32 65, i32 97, i32 67, i32 99, i32 69, i32 101, i32 71, i32 103, i32 73, i32 105, i32 75, i32 107, i32 77, i32 109, i32 79, i32 111, i32 81, i32 113, i32 83, i32 115, i32 85, i32 117, i32 87, i32 119, i32 89, i32 121, i32 91, i32 123, i32 93, i32 125, i32 95, i32 127, i32 129, i32 161, i32 131, i32 163, i32 133, i32 165, i32 135, i32 167, i32 137, i32 169, i32 139, i32 171, i32 141, i32 173, i32 143, i32 175, i32 145, i32 177, i32 147, i32 179, i32 149, i32 181, i32 151, i32 183, i32 153, i32 185, i32 155, i32 187, i32 157, i32 189, i32 159, i32 191, i32 193, i32 225, i32 195, i32 227, i32 197, i32 229, i32 199, i32 231, i32 201, i32 233, i32 203, i32 235, i32 205, i32 237, i32 207, i32 239, i32 209, i32 241, i32 211, i32 243, i32 213, i32 245, i32 215, i32 247, i32 217, i32 249, i32 219, i32 251, i32 221, i32 253, i32 223, i32 255> @@ -789,7 +789,7 @@ define <256 x i8> @vdeal_61(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_62: -; CHECK: [[REG62:r[0-9]+]] = #98 +; CHECK: [[REG62:r[0-9]+]] = #-30 ; CHECK: vdeal(v1,v0,[[REG62]]) define <256 x i8> @vdeal_62(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 32, i32 33, i32 4, i32 5, i32 36, i32 37, i32 8, i32 9, i32 40, i32 41, i32 12, i32 13, i32 44, i32 45, i32 16, i32 17, i32 48, i32 49, i32 20, i32 21, i32 52, i32 53, i32 24, i32 25, i32 56, i32 57, i32 28, i32 29, i32 60, i32 61, i32 64, i32 65, i32 96, i32 97, i32 68, i32 69, i32 100, i32 101, i32 72, i32 73, i32 104, i32 105, i32 76, i32 77, i32 108, i32 109, i32 80, i32 81, i32 112, i32 113, i32 84, i32 85, i32 116, i32 117, i32 88, i32 89, i32 120, i32 121, i32 92, i32 93, i32 124, i32 125, i32 128, i32 129, i32 160, i32 161, i32 132, i32 133, i32 164, i32 165, i32 136, i32 137, i32 168, i32 169, i32 140, i32 141, i32 172, i32 173, i32 144, i32 145, i32 176, i32 177, i32 148, i32 149, i32 180, i32 181, i32 152, i32 153, i32 184, i32 185, i32 156, i32 157, i32 188, i32 189, i32 192, i32 193, i32 224, i32 225, i32 196, i32 197, i32 228, i32 229, i32 200, i32 201, i32 232, i32 233, i32 204, i32 205, i32 236, i32 237, i32 208, i32 209, i32 240, i32 241, i32 212, i32 213, i32 244, i32 245, i32 216, i32 217, i32 248, i32 249, i32 220, i32 221, i32 252, i32 253, i32 2, i32 3, i32 34, i32 35, i32 6, i32 7, i32 38, i32 39, i32 10, i32 11, i32 42, i32 43, i32 14, i32 15, i32 46, i32 47, i32 18, i32 19, i32 50, i32 51, i32 22, i32 23, i32 54, i32 55, i32 26, i32 27, i32 58, i32 59, i32 30, i32 31, i32 62, i32 63, i32 66, i32 67, i32 98, i32 99, i32 70, i32 71, i32 102, i32 103, i32 74, i32 75, i32 106, i32 107, i32 78, i32 79, i32 110, i32 111, i32 82, i32 83, i32 114, i32 115, i32 86, i32 87, i32 118, i32 119, i32 90, i32 91, i32 122, i32 123, i32 94, i32 95, i32 126, i32 127, i32 130, i32 131, i32 162, i32 163, i32 134, i32 135, i32 166, i32 167, i32 138, i32 139, i32 170, i32 171, i32 142, i32 143, i32 174, i32 175, i32 146, i32 147, i32 178, i32 179, i32 150, i32 151, i32 182, i32 183, i32 154, i32 155, i32 186, i32 187, i32 158, i32 159, i32 190, i32 191, i32 194, i32 195, i32 226, i32 227, i32 198, i32 199, i32 230, i32 231, i32 202, i32 203, i32 234, i32 235, i32 206, i32 207, i32 238, i32 239, i32 210, i32 211, i32 242, i32 243, i32 214, i32 215, i32 246, i32 247, i32 218, i32 219, i32 250, i32 251, i32 222, i32 223, i32 254, i32 255> @@ -797,7 +797,7 @@ define <256 x i8> @vdeal_62(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_63: -; CHECK: [[REG63:r[0-9]+]] = #99 +; CHECK: [[REG63:r[0-9]+]] = #-29 ; CHECK: vdeal(v1,v0,[[REG63]]) define <256 x i8> @vdeal_63(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 32, i32 34, i32 4, i32 6, i32 36, i32 38, i32 8, i32 10, i32 40, i32 42, i32 12, i32 14, i32 44, i32 46, i32 16, i32 18, i32 48, i32 50, i32 20, i32 22, i32 52, i32 54, i32 24, i32 26, i32 56, i32 58, i32 28, i32 30, i32 60, i32 62, i32 64, i32 66, i32 96, i32 98, i32 68, i32 70, i32 100, i32 102, i32 72, i32 74, i32 104, i32 106, i32 76, i32 78, i32 108, i32 110, i32 80, i32 82, i32 112, i32 114, i32 84, i32 86, i32 116, i32 118, i32 88, i32 90, i32 120, i32 122, i32 92, i32 94, i32 124, i32 126, i32 128, i32 130, i32 160, i32 162, i32 132, i32 134, i32 164, i32 166, i32 136, i32 138, i32 168, i32 170, i32 140, i32 142, i32 172, i32 174, i32 144, i32 146, i32 176, i32 178, i32 148, i32 150, i32 180, i32 182, i32 152, i32 154, i32 184, i32 186, i32 156, i32 158, i32 188, i32 190, i32 192, i32 194, i32 224, i32 226, i32 196, i32 198, i32 228, i32 230, i32 200, i32 202, i32 232, i32 234, i32 204, i32 206, i32 236, i32 238, i32 208, i32 210, i32 240, i32 242, i32 212, i32 214, i32 244, i32 246, i32 216, i32 218, i32 248, i32 250, i32 220, i32 222, i32 252, i32 254, i32 1, i32 3, i32 33, i32 35, i32 5, i32 7, i32 37, i32 39, i32 9, i32 11, i32 41, i32 43, i32 13, i32 15, i32 45, i32 47, i32 17, i32 19, i32 49, i32 51, i32 21, i32 23, i32 53, i32 55, i32 25, i32 27, i32 57, i32 59, i32 29, i32 31, i32 61, i32 63, i32 65, i32 67, i32 97, i32 99, i32 69, i32 71, i32 101, i32 103, i32 73, i32 75, i32 105, i32 107, i32 77, i32 79, i32 109, i32 111, i32 81, i32 83, i32 113, i32 115, i32 85, i32 87, i32 117, i32 119, i32 89, i32 91, i32 121, i32 123, i32 93, i32 95, i32 125, i32 127, i32 129, i32 131, i32 161, i32 163, i32 133, i32 135, i32 165, i32 167, i32 137, i32 139, i32 169, i32 171, i32 141, i32 143, i32 173, i32 175, i32 145, i32 147, i32 177, i32 179, i32 149, i32 151, i32 181, i32 183, i32 153, i32 155, i32 185, i32 187, i32 157, i32 159, i32 189, i32 191, i32 193, i32 195, i32 225, i32 227, i32 197, i32 199, i32 229, i32 231, i32 201, i32 203, i32 233, i32 235, i32 205, i32 207, i32 237, i32 239, i32 209, i32 211, i32 241, i32 243, i32 213, i32 215, i32 245, i32 247, i32 217, i32 219, i32 249, i32 251, i32 221, i32 223, i32 253, i32 255> @@ -805,7 +805,7 @@ define <256 x i8> @vdeal_63(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_64: -; CHECK: [[REG64:r[0-9]+]] = #100 +; CHECK: [[REG64:r[0-9]+]] = #-28 ; CHECK: vdeal(v1,v0,[[REG64]]) define <256 x i8> @vdeal_64(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 32, i32 33, i32 34, i32 35, i32 8, i32 9, i32 10, i32 11, i32 40, i32 41, i32 42, i32 43, i32 16, i32 17, i32 18, i32 19, i32 48, i32 49, i32 50, i32 51, i32 24, i32 25, i32 26, i32 27, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 96, i32 97, i32 98, i32 99, i32 72, i32 73, i32 74, i32 75, i32 104, i32 105, i32 106, i32 107, i32 80, i32 81, i32 82, i32 83, i32 112, i32 113, i32 114, i32 115, i32 88, i32 89, i32 90, i32 91, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 160, i32 161, i32 162, i32 163, i32 136, i32 137, i32 138, i32 139, i32 168, i32 169, i32 170, i32 171, i32 144, i32 145, i32 146, i32 147, i32 176, i32 177, i32 178, i32 179, i32 152, i32 153, i32 154, i32 155, i32 184, i32 185, i32 186, i32 187, i32 192, i32 193, i32 194, i32 195, i32 224, i32 225, i32 226, i32 227, i32 200, i32 201, i32 202, i32 203, i32 232, i32 233, i32 234, i32 235, i32 208, i32 209, i32 210, i32 211, i32 240, i32 241, i32 242, i32 243, i32 216, i32 217, i32 218, i32 219, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 36, i32 37, i32 38, i32 39, i32 12, i32 13, i32 14, i32 15, i32 44, i32 45, i32 46, i32 47, i32 20, i32 21, i32 22, i32 23, i32 52, i32 53, i32 54, i32 55, i32 28, i32 29, i32 30, i32 31, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 100, i32 101, i32 102, i32 103, i32 76, i32 77, i32 78, i32 79, i32 108, i32 109, i32 110, i32 111, i32 84, i32 85, i32 86, i32 87, i32 116, i32 117, i32 118, i32 119, i32 92, i32 93, i32 94, i32 95, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 164, i32 165, i32 166, i32 167, i32 140, i32 141, i32 142, i32 143, i32 172, i32 173, i32 174, i32 175, i32 148, i32 149, i32 150, i32 151, i32 180, i32 181, i32 182, i32 183, i32 156, i32 157, i32 158, i32 159, i32 188, i32 189, i32 190, i32 191, i32 196, i32 197, i32 198, i32 199, i32 228, i32 229, i32 230, i32 231, i32 204, i32 205, i32 206, i32 207, i32 236, i32 237, i32 238, i32 239, i32 212, i32 213, i32 214, i32 215, i32 244, i32 245, i32 246, i32 247, i32 220, i32 221, i32 222, i32 223, i32 252, i32 253, i32 254, i32 255> @@ -813,7 +813,7 @@ define <256 x i8> @vdeal_64(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_65: -; CHECK: [[REG65:r[0-9]+]] = #101 +; CHECK: [[REG65:r[0-9]+]] = #-27 ; CHECK: vdeal(v1,v0,[[REG65]]) define <256 x i8> @vdeal_65(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 32, i32 36, i32 34, i32 38, i32 8, i32 12, i32 10, i32 14, i32 40, i32 44, i32 42, i32 46, i32 16, i32 20, i32 18, i32 22, i32 48, i32 52, i32 50, i32 54, i32 24, i32 28, i32 26, i32 30, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 96, i32 100, i32 98, i32 102, i32 72, i32 76, i32 74, i32 78, i32 104, i32 108, i32 106, i32 110, i32 80, i32 84, i32 82, i32 86, i32 112, i32 116, i32 114, i32 118, i32 88, i32 92, i32 90, i32 94, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 160, i32 164, i32 162, i32 166, i32 136, i32 140, i32 138, i32 142, i32 168, i32 172, i32 170, i32 174, i32 144, i32 148, i32 146, i32 150, i32 176, i32 180, i32 178, i32 182, i32 152, i32 156, i32 154, i32 158, i32 184, i32 188, i32 186, i32 190, i32 192, i32 196, i32 194, i32 198, i32 224, i32 228, i32 226, i32 230, i32 200, i32 204, i32 202, i32 206, i32 232, i32 236, i32 234, i32 238, i32 208, i32 212, i32 210, i32 214, i32 240, i32 244, i32 242, i32 246, i32 216, i32 220, i32 218, i32 222, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 33, i32 37, i32 35, i32 39, i32 9, i32 13, i32 11, i32 15, i32 41, i32 45, i32 43, i32 47, i32 17, i32 21, i32 19, i32 23, i32 49, i32 53, i32 51, i32 55, i32 25, i32 29, i32 27, i32 31, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 97, i32 101, i32 99, i32 103, i32 73, i32 77, i32 75, i32 79, i32 105, i32 109, i32 107, i32 111, i32 81, i32 85, i32 83, i32 87, i32 113, i32 117, i32 115, i32 119, i32 89, i32 93, i32 91, i32 95, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 161, i32 165, i32 163, i32 167, i32 137, i32 141, i32 139, i32 143, i32 169, i32 173, i32 171, i32 175, i32 145, i32 149, i32 147, i32 151, i32 177, i32 181, i32 179, i32 183, i32 153, i32 157, i32 155, i32 159, i32 185, i32 189, i32 187, i32 191, i32 193, i32 197, i32 195, i32 199, i32 225, i32 229, i32 227, i32 231, i32 201, i32 205, i32 203, i32 207, i32 233, i32 237, i32 235, i32 239, i32 209, i32 213, i32 211, i32 215, i32 241, i32 245, i32 243, i32 247, i32 217, i32 221, i32 219, i32 223, i32 249, i32 253, i32 251, i32 255> @@ -821,7 +821,7 @@ define <256 x i8> @vdeal_65(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_66: -; CHECK: [[REG66:r[0-9]+]] = #102 +; CHECK: [[REG66:r[0-9]+]] = #-26 ; CHECK: vdeal(v1,v0,[[REG66]]) define <256 x i8> @vdeal_66(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 32, i32 33, i32 36, i32 37, i32 8, i32 9, i32 12, i32 13, i32 40, i32 41, i32 44, i32 45, i32 16, i32 17, i32 20, i32 21, i32 48, i32 49, i32 52, i32 53, i32 24, i32 25, i32 28, i32 29, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 96, i32 97, i32 100, i32 101, i32 72, i32 73, i32 76, i32 77, i32 104, i32 105, i32 108, i32 109, i32 80, i32 81, i32 84, i32 85, i32 112, i32 113, i32 116, i32 117, i32 88, i32 89, i32 92, i32 93, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 160, i32 161, i32 164, i32 165, i32 136, i32 137, i32 140, i32 141, i32 168, i32 169, i32 172, i32 173, i32 144, i32 145, i32 148, i32 149, i32 176, i32 177, i32 180, i32 181, i32 152, i32 153, i32 156, i32 157, i32 184, i32 185, i32 188, i32 189, i32 192, i32 193, i32 196, i32 197, i32 224, i32 225, i32 228, i32 229, i32 200, i32 201, i32 204, i32 205, i32 232, i32 233, i32 236, i32 237, i32 208, i32 209, i32 212, i32 213, i32 240, i32 241, i32 244, i32 245, i32 216, i32 217, i32 220, i32 221, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 34, i32 35, i32 38, i32 39, i32 10, i32 11, i32 14, i32 15, i32 42, i32 43, i32 46, i32 47, i32 18, i32 19, i32 22, i32 23, i32 50, i32 51, i32 54, i32 55, i32 26, i32 27, i32 30, i32 31, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 98, i32 99, i32 102, i32 103, i32 74, i32 75, i32 78, i32 79, i32 106, i32 107, i32 110, i32 111, i32 82, i32 83, i32 86, i32 87, i32 114, i32 115, i32 118, i32 119, i32 90, i32 91, i32 94, i32 95, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 162, i32 163, i32 166, i32 167, i32 138, i32 139, i32 142, i32 143, i32 170, i32 171, i32 174, i32 175, i32 146, i32 147, i32 150, i32 151, i32 178, i32 179, i32 182, i32 183, i32 154, i32 155, i32 158, i32 159, i32 186, i32 187, i32 190, i32 191, i32 194, i32 195, i32 198, i32 199, i32 226, i32 227, i32 230, i32 231, i32 202, i32 203, i32 206, i32 207, i32 234, i32 235, i32 238, i32 239, i32 210, i32 211, i32 214, i32 215, i32 242, i32 243, i32 246, i32 247, i32 218, i32 219, i32 222, i32 223, i32 250, i32 251, i32 254, i32 255> @@ -829,7 +829,7 @@ define <256 x i8> @vdeal_66(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_67: -; CHECK: [[REG67:r[0-9]+]] = #103 +; CHECK: [[REG67:r[0-9]+]] = #-25 ; CHECK: vdeal(v1,v0,[[REG67]]) define <256 x i8> @vdeal_67(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 32, i32 34, i32 36, i32 38, i32 8, i32 10, i32 12, i32 14, i32 40, i32 42, i32 44, i32 46, i32 16, i32 18, i32 20, i32 22, i32 48, i32 50, i32 52, i32 54, i32 24, i32 26, i32 28, i32 30, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 96, i32 98, i32 100, i32 102, i32 72, i32 74, i32 76, i32 78, i32 104, i32 106, i32 108, i32 110, i32 80, i32 82, i32 84, i32 86, i32 112, i32 114, i32 116, i32 118, i32 88, i32 90, i32 92, i32 94, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 160, i32 162, i32 164, i32 166, i32 136, i32 138, i32 140, i32 142, i32 168, i32 170, i32 172, i32 174, i32 144, i32 146, i32 148, i32 150, i32 176, i32 178, i32 180, i32 182, i32 152, i32 154, i32 156, i32 158, i32 184, i32 186, i32 188, i32 190, i32 192, i32 194, i32 196, i32 198, i32 224, i32 226, i32 228, i32 230, i32 200, i32 202, i32 204, i32 206, i32 232, i32 234, i32 236, i32 238, i32 208, i32 210, i32 212, i32 214, i32 240, i32 242, i32 244, i32 246, i32 216, i32 218, i32 220, i32 222, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 33, i32 35, i32 37, i32 39, i32 9, i32 11, i32 13, i32 15, i32 41, i32 43, i32 45, i32 47, i32 17, i32 19, i32 21, i32 23, i32 49, i32 51, i32 53, i32 55, i32 25, i32 27, i32 29, i32 31, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 97, i32 99, i32 101, i32 103, i32 73, i32 75, i32 77, i32 79, i32 105, i32 107, i32 109, i32 111, i32 81, i32 83, i32 85, i32 87, i32 113, i32 115, i32 117, i32 119, i32 89, i32 91, i32 93, i32 95, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 161, i32 163, i32 165, i32 167, i32 137, i32 139, i32 141, i32 143, i32 169, i32 171, i32 173, i32 175, i32 145, i32 147, i32 149, i32 151, i32 177, i32 179, i32 181, i32 183, i32 153, i32 155, i32 157, i32 159, i32 185, i32 187, i32 189, i32 191, i32 193, i32 195, i32 197, i32 199, i32 225, i32 227, i32 229, i32 231, i32 201, i32 203, i32 205, i32 207, i32 233, i32 235, i32 237, i32 239, i32 209, i32 211, i32 213, i32 215, i32 241, i32 243, i32 245, i32 247, i32 217, i32 219, i32 221, i32 223, i32 249, i32 251, i32 253, i32 255> @@ -837,7 +837,7 @@ define <256 x i8> @vdeal_67(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_68: -; CHECK: [[REG68:r[0-9]+]] = #104 +; CHECK: [[REG68:r[0-9]+]] = #-24 ; CHECK: vdeal(v1,v0,[[REG68]]) define <256 x i8> @vdeal_68(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -845,7 +845,7 @@ define <256 x i8> @vdeal_68(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_69: -; CHECK: [[REG69:r[0-9]+]] = #105 +; CHECK: [[REG69:r[0-9]+]] = #-23 ; CHECK: vdeal(v1,v0,[[REG69]]) define <256 x i8> @vdeal_69(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 32, i32 40, i32 34, i32 42, i32 36, i32 44, i32 38, i32 46, i32 16, i32 24, i32 18, i32 26, i32 20, i32 28, i32 22, i32 30, i32 48, i32 56, i32 50, i32 58, i32 52, i32 60, i32 54, i32 62, i32 64, i32 72, i32 66, i32 74, i32 68, i32 76, i32 70, i32 78, i32 96, i32 104, i32 98, i32 106, i32 100, i32 108, i32 102, i32 110, i32 80, i32 88, i32 82, i32 90, i32 84, i32 92, i32 86, i32 94, i32 112, i32 120, i32 114, i32 122, i32 116, i32 124, i32 118, i32 126, i32 128, i32 136, i32 130, i32 138, i32 132, i32 140, i32 134, i32 142, i32 160, i32 168, i32 162, i32 170, i32 164, i32 172, i32 166, i32 174, i32 144, i32 152, i32 146, i32 154, i32 148, i32 156, i32 150, i32 158, i32 176, i32 184, i32 178, i32 186, i32 180, i32 188, i32 182, i32 190, i32 192, i32 200, i32 194, i32 202, i32 196, i32 204, i32 198, i32 206, i32 224, i32 232, i32 226, i32 234, i32 228, i32 236, i32 230, i32 238, i32 208, i32 216, i32 210, i32 218, i32 212, i32 220, i32 214, i32 222, i32 240, i32 248, i32 242, i32 250, i32 244, i32 252, i32 246, i32 254, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, i32 33, i32 41, i32 35, i32 43, i32 37, i32 45, i32 39, i32 47, i32 17, i32 25, i32 19, i32 27, i32 21, i32 29, i32 23, i32 31, i32 49, i32 57, i32 51, i32 59, i32 53, i32 61, i32 55, i32 63, i32 65, i32 73, i32 67, i32 75, i32 69, i32 77, i32 71, i32 79, i32 97, i32 105, i32 99, i32 107, i32 101, i32 109, i32 103, i32 111, i32 81, i32 89, i32 83, i32 91, i32 85, i32 93, i32 87, i32 95, i32 113, i32 121, i32 115, i32 123, i32 117, i32 125, i32 119, i32 127, i32 129, i32 137, i32 131, i32 139, i32 133, i32 141, i32 135, i32 143, i32 161, i32 169, i32 163, i32 171, i32 165, i32 173, i32 167, i32 175, i32 145, i32 153, i32 147, i32 155, i32 149, i32 157, i32 151, i32 159, i32 177, i32 185, i32 179, i32 187, i32 181, i32 189, i32 183, i32 191, i32 193, i32 201, i32 195, i32 203, i32 197, i32 205, i32 199, i32 207, i32 225, i32 233, i32 227, i32 235, i32 229, i32 237, i32 231, i32 239, i32 209, i32 217, i32 211, i32 219, i32 213, i32 221, i32 215, i32 223, i32 241, i32 249, i32 243, i32 251, i32 245, i32 253, i32 247, i32 255> @@ -853,7 +853,7 @@ define <256 x i8> @vdeal_69(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_6a: -; CHECK: [[REG6a:r[0-9]+]] = #106 +; CHECK: [[REG6a:r[0-9]+]] = #-22 ; CHECK: vdeal(v1,v0,[[REG6a]]) define <256 x i8> @vdeal_6a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13, i32 32, i32 33, i32 40, i32 41, i32 36, i32 37, i32 44, i32 45, i32 16, i32 17, i32 24, i32 25, i32 20, i32 21, i32 28, i32 29, i32 48, i32 49, i32 56, i32 57, i32 52, i32 53, i32 60, i32 61, i32 64, i32 65, i32 72, i32 73, i32 68, i32 69, i32 76, i32 77, i32 96, i32 97, i32 104, i32 105, i32 100, i32 101, i32 108, i32 109, i32 80, i32 81, i32 88, i32 89, i32 84, i32 85, i32 92, i32 93, i32 112, i32 113, i32 120, i32 121, i32 116, i32 117, i32 124, i32 125, i32 128, i32 129, i32 136, i32 137, i32 132, i32 133, i32 140, i32 141, i32 160, i32 161, i32 168, i32 169, i32 164, i32 165, i32 172, i32 173, i32 144, i32 145, i32 152, i32 153, i32 148, i32 149, i32 156, i32 157, i32 176, i32 177, i32 184, i32 185, i32 180, i32 181, i32 188, i32 189, i32 192, i32 193, i32 200, i32 201, i32 196, i32 197, i32 204, i32 205, i32 224, i32 225, i32 232, i32 233, i32 228, i32 229, i32 236, i32 237, i32 208, i32 209, i32 216, i32 217, i32 212, i32 213, i32 220, i32 221, i32 240, i32 241, i32 248, i32 249, i32 244, i32 245, i32 252, i32 253, i32 2, i32 3, i32 10, i32 11, i32 6, i32 7, i32 14, i32 15, i32 34, i32 35, i32 42, i32 43, i32 38, i32 39, i32 46, i32 47, i32 18, i32 19, i32 26, i32 27, i32 22, i32 23, i32 30, i32 31, i32 50, i32 51, i32 58, i32 59, i32 54, i32 55, i32 62, i32 63, i32 66, i32 67, i32 74, i32 75, i32 70, i32 71, i32 78, i32 79, i32 98, i32 99, i32 106, i32 107, i32 102, i32 103, i32 110, i32 111, i32 82, i32 83, i32 90, i32 91, i32 86, i32 87, i32 94, i32 95, i32 114, i32 115, i32 122, i32 123, i32 118, i32 119, i32 126, i32 127, i32 130, i32 131, i32 138, i32 139, i32 134, i32 135, i32 142, i32 143, i32 162, i32 163, i32 170, i32 171, i32 166, i32 167, i32 174, i32 175, i32 146, i32 147, i32 154, i32 155, i32 150, i32 151, i32 158, i32 159, i32 178, i32 179, i32 186, i32 187, i32 182, i32 183, i32 190, i32 191, i32 194, i32 195, i32 202, i32 203, i32 198, i32 199, i32 206, i32 207, i32 226, i32 227, i32 234, i32 235, i32 230, i32 231, i32 238, i32 239, i32 210, i32 211, i32 218, i32 219, i32 214, i32 215, i32 222, i32 223, i32 242, i32 243, i32 250, i32 251, i32 246, i32 247, i32 254, i32 255> @@ -861,7 +861,7 @@ define <256 x i8> @vdeal_6a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_6b: -; CHECK: [[REG6b:r[0-9]+]] = #107 +; CHECK: [[REG6b:r[0-9]+]] = #-21 ; CHECK: vdeal(v1,v0,[[REG6b]]) define <256 x i8> @vdeal_6b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14, i32 32, i32 34, i32 40, i32 42, i32 36, i32 38, i32 44, i32 46, i32 16, i32 18, i32 24, i32 26, i32 20, i32 22, i32 28, i32 30, i32 48, i32 50, i32 56, i32 58, i32 52, i32 54, i32 60, i32 62, i32 64, i32 66, i32 72, i32 74, i32 68, i32 70, i32 76, i32 78, i32 96, i32 98, i32 104, i32 106, i32 100, i32 102, i32 108, i32 110, i32 80, i32 82, i32 88, i32 90, i32 84, i32 86, i32 92, i32 94, i32 112, i32 114, i32 120, i32 122, i32 116, i32 118, i32 124, i32 126, i32 128, i32 130, i32 136, i32 138, i32 132, i32 134, i32 140, i32 142, i32 160, i32 162, i32 168, i32 170, i32 164, i32 166, i32 172, i32 174, i32 144, i32 146, i32 152, i32 154, i32 148, i32 150, i32 156, i32 158, i32 176, i32 178, i32 184, i32 186, i32 180, i32 182, i32 188, i32 190, i32 192, i32 194, i32 200, i32 202, i32 196, i32 198, i32 204, i32 206, i32 224, i32 226, i32 232, i32 234, i32 228, i32 230, i32 236, i32 238, i32 208, i32 210, i32 216, i32 218, i32 212, i32 214, i32 220, i32 222, i32 240, i32 242, i32 248, i32 250, i32 244, i32 246, i32 252, i32 254, i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15, i32 33, i32 35, i32 41, i32 43, i32 37, i32 39, i32 45, i32 47, i32 17, i32 19, i32 25, i32 27, i32 21, i32 23, i32 29, i32 31, i32 49, i32 51, i32 57, i32 59, i32 53, i32 55, i32 61, i32 63, i32 65, i32 67, i32 73, i32 75, i32 69, i32 71, i32 77, i32 79, i32 97, i32 99, i32 105, i32 107, i32 101, i32 103, i32 109, i32 111, i32 81, i32 83, i32 89, i32 91, i32 85, i32 87, i32 93, i32 95, i32 113, i32 115, i32 121, i32 123, i32 117, i32 119, i32 125, i32 127, i32 129, i32 131, i32 137, i32 139, i32 133, i32 135, i32 141, i32 143, i32 161, i32 163, i32 169, i32 171, i32 165, i32 167, i32 173, i32 175, i32 145, i32 147, i32 153, i32 155, i32 149, i32 151, i32 157, i32 159, i32 177, i32 179, i32 185, i32 187, i32 181, i32 183, i32 189, i32 191, i32 193, i32 195, i32 201, i32 203, i32 197, i32 199, i32 205, i32 207, i32 225, i32 227, i32 233, i32 235, i32 229, i32 231, i32 237, i32 239, i32 209, i32 211, i32 217, i32 219, i32 213, i32 215, i32 221, i32 223, i32 241, i32 243, i32 249, i32 251, i32 245, i32 247, i32 253, i32 255> @@ -869,7 +869,7 @@ define <256 x i8> @vdeal_6b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_6c: -; CHECK: [[REG6c:r[0-9]+]] = #108 +; CHECK: [[REG6c:r[0-9]+]] = #-20 ; CHECK: vdeal(v1,v0,[[REG6c]]) define <256 x i8> @vdeal_6c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 32, i32 33, i32 34, i32 35, i32 40, i32 41, i32 42, i32 43, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27, i32 48, i32 49, i32 50, i32 51, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 72, i32 73, i32 74, i32 75, i32 96, i32 97, i32 98, i32 99, i32 104, i32 105, i32 106, i32 107, i32 80, i32 81, i32 82, i32 83, i32 88, i32 89, i32 90, i32 91, i32 112, i32 113, i32 114, i32 115, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 136, i32 137, i32 138, i32 139, i32 160, i32 161, i32 162, i32 163, i32 168, i32 169, i32 170, i32 171, i32 144, i32 145, i32 146, i32 147, i32 152, i32 153, i32 154, i32 155, i32 176, i32 177, i32 178, i32 179, i32 184, i32 185, i32 186, i32 187, i32 192, i32 193, i32 194, i32 195, i32 200, i32 201, i32 202, i32 203, i32 224, i32 225, i32 226, i32 227, i32 232, i32 233, i32 234, i32 235, i32 208, i32 209, i32 210, i32 211, i32 216, i32 217, i32 218, i32 219, i32 240, i32 241, i32 242, i32 243, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 36, i32 37, i32 38, i32 39, i32 44, i32 45, i32 46, i32 47, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31, i32 52, i32 53, i32 54, i32 55, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 76, i32 77, i32 78, i32 79, i32 100, i32 101, i32 102, i32 103, i32 108, i32 109, i32 110, i32 111, i32 84, i32 85, i32 86, i32 87, i32 92, i32 93, i32 94, i32 95, i32 116, i32 117, i32 118, i32 119, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 140, i32 141, i32 142, i32 143, i32 164, i32 165, i32 166, i32 167, i32 172, i32 173, i32 174, i32 175, i32 148, i32 149, i32 150, i32 151, i32 156, i32 157, i32 158, i32 159, i32 180, i32 181, i32 182, i32 183, i32 188, i32 189, i32 190, i32 191, i32 196, i32 197, i32 198, i32 199, i32 204, i32 205, i32 206, i32 207, i32 228, i32 229, i32 230, i32 231, i32 236, i32 237, i32 238, i32 239, i32 212, i32 213, i32 214, i32 215, i32 220, i32 221, i32 222, i32 223, i32 244, i32 245, i32 246, i32 247, i32 252, i32 253, i32 254, i32 255> @@ -877,7 +877,7 @@ define <256 x i8> @vdeal_6c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_6d: -; CHECK: [[REG6d:r[0-9]+]] = #109 +; CHECK: [[REG6d:r[0-9]+]] = #-19 ; CHECK: vdeal(v1,v0,[[REG6d]]) define <256 x i8> @vdeal_6d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 8, i32 12, i32 10, i32 14, i32 32, i32 36, i32 34, i32 38, i32 40, i32 44, i32 42, i32 46, i32 16, i32 20, i32 18, i32 22, i32 24, i32 28, i32 26, i32 30, i32 48, i32 52, i32 50, i32 54, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 72, i32 76, i32 74, i32 78, i32 96, i32 100, i32 98, i32 102, i32 104, i32 108, i32 106, i32 110, i32 80, i32 84, i32 82, i32 86, i32 88, i32 92, i32 90, i32 94, i32 112, i32 116, i32 114, i32 118, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 136, i32 140, i32 138, i32 142, i32 160, i32 164, i32 162, i32 166, i32 168, i32 172, i32 170, i32 174, i32 144, i32 148, i32 146, i32 150, i32 152, i32 156, i32 154, i32 158, i32 176, i32 180, i32 178, i32 182, i32 184, i32 188, i32 186, i32 190, i32 192, i32 196, i32 194, i32 198, i32 200, i32 204, i32 202, i32 206, i32 224, i32 228, i32 226, i32 230, i32 232, i32 236, i32 234, i32 238, i32 208, i32 212, i32 210, i32 214, i32 216, i32 220, i32 218, i32 222, i32 240, i32 244, i32 242, i32 246, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 9, i32 13, i32 11, i32 15, i32 33, i32 37, i32 35, i32 39, i32 41, i32 45, i32 43, i32 47, i32 17, i32 21, i32 19, i32 23, i32 25, i32 29, i32 27, i32 31, i32 49, i32 53, i32 51, i32 55, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 73, i32 77, i32 75, i32 79, i32 97, i32 101, i32 99, i32 103, i32 105, i32 109, i32 107, i32 111, i32 81, i32 85, i32 83, i32 87, i32 89, i32 93, i32 91, i32 95, i32 113, i32 117, i32 115, i32 119, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 137, i32 141, i32 139, i32 143, i32 161, i32 165, i32 163, i32 167, i32 169, i32 173, i32 171, i32 175, i32 145, i32 149, i32 147, i32 151, i32 153, i32 157, i32 155, i32 159, i32 177, i32 181, i32 179, i32 183, i32 185, i32 189, i32 187, i32 191, i32 193, i32 197, i32 195, i32 199, i32 201, i32 205, i32 203, i32 207, i32 225, i32 229, i32 227, i32 231, i32 233, i32 237, i32 235, i32 239, i32 209, i32 213, i32 211, i32 215, i32 217, i32 221, i32 219, i32 223, i32 241, i32 245, i32 243, i32 247, i32 249, i32 253, i32 251, i32 255> @@ -885,7 +885,7 @@ define <256 x i8> @vdeal_6d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_6e: -; CHECK: [[REG6e:r[0-9]+]] = #110 +; CHECK: [[REG6e:r[0-9]+]] = #-18 ; CHECK: vdeal(v1,v0,[[REG6e]]) define <256 x i8> @vdeal_6e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 32, i32 33, i32 36, i32 37, i32 40, i32 41, i32 44, i32 45, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29, i32 48, i32 49, i32 52, i32 53, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 72, i32 73, i32 76, i32 77, i32 96, i32 97, i32 100, i32 101, i32 104, i32 105, i32 108, i32 109, i32 80, i32 81, i32 84, i32 85, i32 88, i32 89, i32 92, i32 93, i32 112, i32 113, i32 116, i32 117, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 136, i32 137, i32 140, i32 141, i32 160, i32 161, i32 164, i32 165, i32 168, i32 169, i32 172, i32 173, i32 144, i32 145, i32 148, i32 149, i32 152, i32 153, i32 156, i32 157, i32 176, i32 177, i32 180, i32 181, i32 184, i32 185, i32 188, i32 189, i32 192, i32 193, i32 196, i32 197, i32 200, i32 201, i32 204, i32 205, i32 224, i32 225, i32 228, i32 229, i32 232, i32 233, i32 236, i32 237, i32 208, i32 209, i32 212, i32 213, i32 216, i32 217, i32 220, i32 221, i32 240, i32 241, i32 244, i32 245, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 10, i32 11, i32 14, i32 15, i32 34, i32 35, i32 38, i32 39, i32 42, i32 43, i32 46, i32 47, i32 18, i32 19, i32 22, i32 23, i32 26, i32 27, i32 30, i32 31, i32 50, i32 51, i32 54, i32 55, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 74, i32 75, i32 78, i32 79, i32 98, i32 99, i32 102, i32 103, i32 106, i32 107, i32 110, i32 111, i32 82, i32 83, i32 86, i32 87, i32 90, i32 91, i32 94, i32 95, i32 114, i32 115, i32 118, i32 119, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 138, i32 139, i32 142, i32 143, i32 162, i32 163, i32 166, i32 167, i32 170, i32 171, i32 174, i32 175, i32 146, i32 147, i32 150, i32 151, i32 154, i32 155, i32 158, i32 159, i32 178, i32 179, i32 182, i32 183, i32 186, i32 187, i32 190, i32 191, i32 194, i32 195, i32 198, i32 199, i32 202, i32 203, i32 206, i32 207, i32 226, i32 227, i32 230, i32 231, i32 234, i32 235, i32 238, i32 239, i32 210, i32 211, i32 214, i32 215, i32 218, i32 219, i32 222, i32 223, i32 242, i32 243, i32 246, i32 247, i32 250, i32 251, i32 254, i32 255> @@ -893,7 +893,7 @@ define <256 x i8> @vdeal_6e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_6f: -; CHECK: [[REG6f:r[0-9]+]] = #111 +; CHECK: [[REG6f:r[0-9]+]] = #-17 ; CHECK: vdeal(v1,v0,[[REG6f]]) define <256 x i8> @vdeal_6f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 136, i32 138, i32 140, i32 142, i32 160, i32 162, i32 164, i32 166, i32 168, i32 170, i32 172, i32 174, i32 144, i32 146, i32 148, i32 150, i32 152, i32 154, i32 156, i32 158, i32 176, i32 178, i32 180, i32 182, i32 184, i32 186, i32 188, i32 190, i32 192, i32 194, i32 196, i32 198, i32 200, i32 202, i32 204, i32 206, i32 224, i32 226, i32 228, i32 230, i32 232, i32 234, i32 236, i32 238, i32 208, i32 210, i32 212, i32 214, i32 216, i32 218, i32 220, i32 222, i32 240, i32 242, i32 244, i32 246, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 137, i32 139, i32 141, i32 143, i32 161, i32 163, i32 165, i32 167, i32 169, i32 171, i32 173, i32 175, i32 145, i32 147, i32 149, i32 151, i32 153, i32 155, i32 157, i32 159, i32 177, i32 179, i32 181, i32 183, i32 185, i32 187, i32 189, i32 191, i32 193, i32 195, i32 197, i32 199, i32 201, i32 203, i32 205, i32 207, i32 225, i32 227, i32 229, i32 231, i32 233, i32 235, i32 237, i32 239, i32 209, i32 211, i32 213, i32 215, i32 217, i32 219, i32 221, i32 223, i32 241, i32 243, i32 245, i32 247, i32 249, i32 251, i32 253, i32 255> @@ -901,7 +901,7 @@ define <256 x i8> @vdeal_6f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_70: -; CHECK: [[REG70:r[0-9]+]] = #112 +; CHECK: [[REG70:r[0-9]+]] = #-16 ; CHECK: vdeal(v1,v0,[[REG70]]) define <256 x i8> @vdeal_70(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -909,7 +909,7 @@ define <256 x i8> @vdeal_70(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_71: -; CHECK: [[REG71:r[0-9]+]] = #113 +; CHECK: [[REG71:r[0-9]+]] = #-15 ; CHECK: vdeal(v1,v0,[[REG71]]) define <256 x i8> @vdeal_71(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 32, i32 48, i32 34, i32 50, i32 36, i32 52, i32 38, i32 54, i32 40, i32 56, i32 42, i32 58, i32 44, i32 60, i32 46, i32 62, i32 64, i32 80, i32 66, i32 82, i32 68, i32 84, i32 70, i32 86, i32 72, i32 88, i32 74, i32 90, i32 76, i32 92, i32 78, i32 94, i32 96, i32 112, i32 98, i32 114, i32 100, i32 116, i32 102, i32 118, i32 104, i32 120, i32 106, i32 122, i32 108, i32 124, i32 110, i32 126, i32 128, i32 144, i32 130, i32 146, i32 132, i32 148, i32 134, i32 150, i32 136, i32 152, i32 138, i32 154, i32 140, i32 156, i32 142, i32 158, i32 160, i32 176, i32 162, i32 178, i32 164, i32 180, i32 166, i32 182, i32 168, i32 184, i32 170, i32 186, i32 172, i32 188, i32 174, i32 190, i32 192, i32 208, i32 194, i32 210, i32 196, i32 212, i32 198, i32 214, i32 200, i32 216, i32 202, i32 218, i32 204, i32 220, i32 206, i32 222, i32 224, i32 240, i32 226, i32 242, i32 228, i32 244, i32 230, i32 246, i32 232, i32 248, i32 234, i32 250, i32 236, i32 252, i32 238, i32 254, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31, i32 33, i32 49, i32 35, i32 51, i32 37, i32 53, i32 39, i32 55, i32 41, i32 57, i32 43, i32 59, i32 45, i32 61, i32 47, i32 63, i32 65, i32 81, i32 67, i32 83, i32 69, i32 85, i32 71, i32 87, i32 73, i32 89, i32 75, i32 91, i32 77, i32 93, i32 79, i32 95, i32 97, i32 113, i32 99, i32 115, i32 101, i32 117, i32 103, i32 119, i32 105, i32 121, i32 107, i32 123, i32 109, i32 125, i32 111, i32 127, i32 129, i32 145, i32 131, i32 147, i32 133, i32 149, i32 135, i32 151, i32 137, i32 153, i32 139, i32 155, i32 141, i32 157, i32 143, i32 159, i32 161, i32 177, i32 163, i32 179, i32 165, i32 181, i32 167, i32 183, i32 169, i32 185, i32 171, i32 187, i32 173, i32 189, i32 175, i32 191, i32 193, i32 209, i32 195, i32 211, i32 197, i32 213, i32 199, i32 215, i32 201, i32 217, i32 203, i32 219, i32 205, i32 221, i32 207, i32 223, i32 225, i32 241, i32 227, i32 243, i32 229, i32 245, i32 231, i32 247, i32 233, i32 249, i32 235, i32 251, i32 237, i32 253, i32 239, i32 255> @@ -917,7 +917,7 @@ define <256 x i8> @vdeal_71(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_72: -; CHECK: [[REG72:r[0-9]+]] = #114 +; CHECK: [[REG72:r[0-9]+]] = #-14 ; CHECK: vdeal(v1,v0,[[REG72]]) define <256 x i8> @vdeal_72(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 16, i32 17, i32 4, i32 5, i32 20, i32 21, i32 8, i32 9, i32 24, i32 25, i32 12, i32 13, i32 28, i32 29, i32 32, i32 33, i32 48, i32 49, i32 36, i32 37, i32 52, i32 53, i32 40, i32 41, i32 56, i32 57, i32 44, i32 45, i32 60, i32 61, i32 64, i32 65, i32 80, i32 81, i32 68, i32 69, i32 84, i32 85, i32 72, i32 73, i32 88, i32 89, i32 76, i32 77, i32 92, i32 93, i32 96, i32 97, i32 112, i32 113, i32 100, i32 101, i32 116, i32 117, i32 104, i32 105, i32 120, i32 121, i32 108, i32 109, i32 124, i32 125, i32 128, i32 129, i32 144, i32 145, i32 132, i32 133, i32 148, i32 149, i32 136, i32 137, i32 152, i32 153, i32 140, i32 141, i32 156, i32 157, i32 160, i32 161, i32 176, i32 177, i32 164, i32 165, i32 180, i32 181, i32 168, i32 169, i32 184, i32 185, i32 172, i32 173, i32 188, i32 189, i32 192, i32 193, i32 208, i32 209, i32 196, i32 197, i32 212, i32 213, i32 200, i32 201, i32 216, i32 217, i32 204, i32 205, i32 220, i32 221, i32 224, i32 225, i32 240, i32 241, i32 228, i32 229, i32 244, i32 245, i32 232, i32 233, i32 248, i32 249, i32 236, i32 237, i32 252, i32 253, i32 2, i32 3, i32 18, i32 19, i32 6, i32 7, i32 22, i32 23, i32 10, i32 11, i32 26, i32 27, i32 14, i32 15, i32 30, i32 31, i32 34, i32 35, i32 50, i32 51, i32 38, i32 39, i32 54, i32 55, i32 42, i32 43, i32 58, i32 59, i32 46, i32 47, i32 62, i32 63, i32 66, i32 67, i32 82, i32 83, i32 70, i32 71, i32 86, i32 87, i32 74, i32 75, i32 90, i32 91, i32 78, i32 79, i32 94, i32 95, i32 98, i32 99, i32 114, i32 115, i32 102, i32 103, i32 118, i32 119, i32 106, i32 107, i32 122, i32 123, i32 110, i32 111, i32 126, i32 127, i32 130, i32 131, i32 146, i32 147, i32 134, i32 135, i32 150, i32 151, i32 138, i32 139, i32 154, i32 155, i32 142, i32 143, i32 158, i32 159, i32 162, i32 163, i32 178, i32 179, i32 166, i32 167, i32 182, i32 183, i32 170, i32 171, i32 186, i32 187, i32 174, i32 175, i32 190, i32 191, i32 194, i32 195, i32 210, i32 211, i32 198, i32 199, i32 214, i32 215, i32 202, i32 203, i32 218, i32 219, i32 206, i32 207, i32 222, i32 223, i32 226, i32 227, i32 242, i32 243, i32 230, i32 231, i32 246, i32 247, i32 234, i32 235, i32 250, i32 251, i32 238, i32 239, i32 254, i32 255> @@ -925,7 +925,7 @@ define <256 x i8> @vdeal_72(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_73: -; CHECK: [[REG73:r[0-9]+]] = #115 +; CHECK: [[REG73:r[0-9]+]] = #-13 ; CHECK: vdeal(v1,v0,[[REG73]]) define <256 x i8> @vdeal_73(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 16, i32 18, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30, i32 32, i32 34, i32 48, i32 50, i32 36, i32 38, i32 52, i32 54, i32 40, i32 42, i32 56, i32 58, i32 44, i32 46, i32 60, i32 62, i32 64, i32 66, i32 80, i32 82, i32 68, i32 70, i32 84, i32 86, i32 72, i32 74, i32 88, i32 90, i32 76, i32 78, i32 92, i32 94, i32 96, i32 98, i32 112, i32 114, i32 100, i32 102, i32 116, i32 118, i32 104, i32 106, i32 120, i32 122, i32 108, i32 110, i32 124, i32 126, i32 128, i32 130, i32 144, i32 146, i32 132, i32 134, i32 148, i32 150, i32 136, i32 138, i32 152, i32 154, i32 140, i32 142, i32 156, i32 158, i32 160, i32 162, i32 176, i32 178, i32 164, i32 166, i32 180, i32 182, i32 168, i32 170, i32 184, i32 186, i32 172, i32 174, i32 188, i32 190, i32 192, i32 194, i32 208, i32 210, i32 196, i32 198, i32 212, i32 214, i32 200, i32 202, i32 216, i32 218, i32 204, i32 206, i32 220, i32 222, i32 224, i32 226, i32 240, i32 242, i32 228, i32 230, i32 244, i32 246, i32 232, i32 234, i32 248, i32 250, i32 236, i32 238, i32 252, i32 254, i32 1, i32 3, i32 17, i32 19, i32 5, i32 7, i32 21, i32 23, i32 9, i32 11, i32 25, i32 27, i32 13, i32 15, i32 29, i32 31, i32 33, i32 35, i32 49, i32 51, i32 37, i32 39, i32 53, i32 55, i32 41, i32 43, i32 57, i32 59, i32 45, i32 47, i32 61, i32 63, i32 65, i32 67, i32 81, i32 83, i32 69, i32 71, i32 85, i32 87, i32 73, i32 75, i32 89, i32 91, i32 77, i32 79, i32 93, i32 95, i32 97, i32 99, i32 113, i32 115, i32 101, i32 103, i32 117, i32 119, i32 105, i32 107, i32 121, i32 123, i32 109, i32 111, i32 125, i32 127, i32 129, i32 131, i32 145, i32 147, i32 133, i32 135, i32 149, i32 151, i32 137, i32 139, i32 153, i32 155, i32 141, i32 143, i32 157, i32 159, i32 161, i32 163, i32 177, i32 179, i32 165, i32 167, i32 181, i32 183, i32 169, i32 171, i32 185, i32 187, i32 173, i32 175, i32 189, i32 191, i32 193, i32 195, i32 209, i32 211, i32 197, i32 199, i32 213, i32 215, i32 201, i32 203, i32 217, i32 219, i32 205, i32 207, i32 221, i32 223, i32 225, i32 227, i32 241, i32 243, i32 229, i32 231, i32 245, i32 247, i32 233, i32 235, i32 249, i32 251, i32 237, i32 239, i32 253, i32 255> @@ -933,7 +933,7 @@ define <256 x i8> @vdeal_73(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_74: -; CHECK: [[REG74:r[0-9]+]] = #116 +; CHECK: [[REG74:r[0-9]+]] = #-12 ; CHECK: vdeal(v1,v0,[[REG74]]) define <256 x i8> @vdeal_74(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 32, i32 33, i32 34, i32 35, i32 48, i32 49, i32 50, i32 51, i32 40, i32 41, i32 42, i32 43, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 80, i32 81, i32 82, i32 83, i32 72, i32 73, i32 74, i32 75, i32 88, i32 89, i32 90, i32 91, i32 96, i32 97, i32 98, i32 99, i32 112, i32 113, i32 114, i32 115, i32 104, i32 105, i32 106, i32 107, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 144, i32 145, i32 146, i32 147, i32 136, i32 137, i32 138, i32 139, i32 152, i32 153, i32 154, i32 155, i32 160, i32 161, i32 162, i32 163, i32 176, i32 177, i32 178, i32 179, i32 168, i32 169, i32 170, i32 171, i32 184, i32 185, i32 186, i32 187, i32 192, i32 193, i32 194, i32 195, i32 208, i32 209, i32 210, i32 211, i32 200, i32 201, i32 202, i32 203, i32 216, i32 217, i32 218, i32 219, i32 224, i32 225, i32 226, i32 227, i32 240, i32 241, i32 242, i32 243, i32 232, i32 233, i32 234, i32 235, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31, i32 36, i32 37, i32 38, i32 39, i32 52, i32 53, i32 54, i32 55, i32 44, i32 45, i32 46, i32 47, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 84, i32 85, i32 86, i32 87, i32 76, i32 77, i32 78, i32 79, i32 92, i32 93, i32 94, i32 95, i32 100, i32 101, i32 102, i32 103, i32 116, i32 117, i32 118, i32 119, i32 108, i32 109, i32 110, i32 111, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 148, i32 149, i32 150, i32 151, i32 140, i32 141, i32 142, i32 143, i32 156, i32 157, i32 158, i32 159, i32 164, i32 165, i32 166, i32 167, i32 180, i32 181, i32 182, i32 183, i32 172, i32 173, i32 174, i32 175, i32 188, i32 189, i32 190, i32 191, i32 196, i32 197, i32 198, i32 199, i32 212, i32 213, i32 214, i32 215, i32 204, i32 205, i32 206, i32 207, i32 220, i32 221, i32 222, i32 223, i32 228, i32 229, i32 230, i32 231, i32 244, i32 245, i32 246, i32 247, i32 236, i32 237, i32 238, i32 239, i32 252, i32 253, i32 254, i32 255> @@ -941,7 +941,7 @@ define <256 x i8> @vdeal_74(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_75: -; CHECK: [[REG75:r[0-9]+]] = #117 +; CHECK: [[REG75:r[0-9]+]] = #-11 ; CHECK: vdeal(v1,v0,[[REG75]]) define <256 x i8> @vdeal_75(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 16, i32 20, i32 18, i32 22, i32 8, i32 12, i32 10, i32 14, i32 24, i32 28, i32 26, i32 30, i32 32, i32 36, i32 34, i32 38, i32 48, i32 52, i32 50, i32 54, i32 40, i32 44, i32 42, i32 46, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 80, i32 84, i32 82, i32 86, i32 72, i32 76, i32 74, i32 78, i32 88, i32 92, i32 90, i32 94, i32 96, i32 100, i32 98, i32 102, i32 112, i32 116, i32 114, i32 118, i32 104, i32 108, i32 106, i32 110, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 144, i32 148, i32 146, i32 150, i32 136, i32 140, i32 138, i32 142, i32 152, i32 156, i32 154, i32 158, i32 160, i32 164, i32 162, i32 166, i32 176, i32 180, i32 178, i32 182, i32 168, i32 172, i32 170, i32 174, i32 184, i32 188, i32 186, i32 190, i32 192, i32 196, i32 194, i32 198, i32 208, i32 212, i32 210, i32 214, i32 200, i32 204, i32 202, i32 206, i32 216, i32 220, i32 218, i32 222, i32 224, i32 228, i32 226, i32 230, i32 240, i32 244, i32 242, i32 246, i32 232, i32 236, i32 234, i32 238, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 17, i32 21, i32 19, i32 23, i32 9, i32 13, i32 11, i32 15, i32 25, i32 29, i32 27, i32 31, i32 33, i32 37, i32 35, i32 39, i32 49, i32 53, i32 51, i32 55, i32 41, i32 45, i32 43, i32 47, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 81, i32 85, i32 83, i32 87, i32 73, i32 77, i32 75, i32 79, i32 89, i32 93, i32 91, i32 95, i32 97, i32 101, i32 99, i32 103, i32 113, i32 117, i32 115, i32 119, i32 105, i32 109, i32 107, i32 111, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 145, i32 149, i32 147, i32 151, i32 137, i32 141, i32 139, i32 143, i32 153, i32 157, i32 155, i32 159, i32 161, i32 165, i32 163, i32 167, i32 177, i32 181, i32 179, i32 183, i32 169, i32 173, i32 171, i32 175, i32 185, i32 189, i32 187, i32 191, i32 193, i32 197, i32 195, i32 199, i32 209, i32 213, i32 211, i32 215, i32 201, i32 205, i32 203, i32 207, i32 217, i32 221, i32 219, i32 223, i32 225, i32 229, i32 227, i32 231, i32 241, i32 245, i32 243, i32 247, i32 233, i32 237, i32 235, i32 239, i32 249, i32 253, i32 251, i32 255> @@ -949,7 +949,7 @@ define <256 x i8> @vdeal_75(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_76: -; CHECK: [[REG76:r[0-9]+]] = #118 +; CHECK: [[REG76:r[0-9]+]] = #-10 ; CHECK: vdeal(v1,v0,[[REG76]]) define <256 x i8> @vdeal_76(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 16, i32 17, i32 20, i32 21, i32 8, i32 9, i32 12, i32 13, i32 24, i32 25, i32 28, i32 29, i32 32, i32 33, i32 36, i32 37, i32 48, i32 49, i32 52, i32 53, i32 40, i32 41, i32 44, i32 45, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 80, i32 81, i32 84, i32 85, i32 72, i32 73, i32 76, i32 77, i32 88, i32 89, i32 92, i32 93, i32 96, i32 97, i32 100, i32 101, i32 112, i32 113, i32 116, i32 117, i32 104, i32 105, i32 108, i32 109, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 144, i32 145, i32 148, i32 149, i32 136, i32 137, i32 140, i32 141, i32 152, i32 153, i32 156, i32 157, i32 160, i32 161, i32 164, i32 165, i32 176, i32 177, i32 180, i32 181, i32 168, i32 169, i32 172, i32 173, i32 184, i32 185, i32 188, i32 189, i32 192, i32 193, i32 196, i32 197, i32 208, i32 209, i32 212, i32 213, i32 200, i32 201, i32 204, i32 205, i32 216, i32 217, i32 220, i32 221, i32 224, i32 225, i32 228, i32 229, i32 240, i32 241, i32 244, i32 245, i32 232, i32 233, i32 236, i32 237, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 18, i32 19, i32 22, i32 23, i32 10, i32 11, i32 14, i32 15, i32 26, i32 27, i32 30, i32 31, i32 34, i32 35, i32 38, i32 39, i32 50, i32 51, i32 54, i32 55, i32 42, i32 43, i32 46, i32 47, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 82, i32 83, i32 86, i32 87, i32 74, i32 75, i32 78, i32 79, i32 90, i32 91, i32 94, i32 95, i32 98, i32 99, i32 102, i32 103, i32 114, i32 115, i32 118, i32 119, i32 106, i32 107, i32 110, i32 111, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 146, i32 147, i32 150, i32 151, i32 138, i32 139, i32 142, i32 143, i32 154, i32 155, i32 158, i32 159, i32 162, i32 163, i32 166, i32 167, i32 178, i32 179, i32 182, i32 183, i32 170, i32 171, i32 174, i32 175, i32 186, i32 187, i32 190, i32 191, i32 194, i32 195, i32 198, i32 199, i32 210, i32 211, i32 214, i32 215, i32 202, i32 203, i32 206, i32 207, i32 218, i32 219, i32 222, i32 223, i32 226, i32 227, i32 230, i32 231, i32 242, i32 243, i32 246, i32 247, i32 234, i32 235, i32 238, i32 239, i32 250, i32 251, i32 254, i32 255> @@ -957,7 +957,7 @@ define <256 x i8> @vdeal_76(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_77: -; CHECK: [[REG77:r[0-9]+]] = #119 +; CHECK: [[REG77:r[0-9]+]] = #-9 ; CHECK: vdeal(v1,v0,[[REG77]]) define <256 x i8> @vdeal_77(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 48, i32 50, i32 52, i32 54, i32 40, i32 42, i32 44, i32 46, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 80, i32 82, i32 84, i32 86, i32 72, i32 74, i32 76, i32 78, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 112, i32 114, i32 116, i32 118, i32 104, i32 106, i32 108, i32 110, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 144, i32 146, i32 148, i32 150, i32 136, i32 138, i32 140, i32 142, i32 152, i32 154, i32 156, i32 158, i32 160, i32 162, i32 164, i32 166, i32 176, i32 178, i32 180, i32 182, i32 168, i32 170, i32 172, i32 174, i32 184, i32 186, i32 188, i32 190, i32 192, i32 194, i32 196, i32 198, i32 208, i32 210, i32 212, i32 214, i32 200, i32 202, i32 204, i32 206, i32 216, i32 218, i32 220, i32 222, i32 224, i32 226, i32 228, i32 230, i32 240, i32 242, i32 244, i32 246, i32 232, i32 234, i32 236, i32 238, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 49, i32 51, i32 53, i32 55, i32 41, i32 43, i32 45, i32 47, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 81, i32 83, i32 85, i32 87, i32 73, i32 75, i32 77, i32 79, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 113, i32 115, i32 117, i32 119, i32 105, i32 107, i32 109, i32 111, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 145, i32 147, i32 149, i32 151, i32 137, i32 139, i32 141, i32 143, i32 153, i32 155, i32 157, i32 159, i32 161, i32 163, i32 165, i32 167, i32 177, i32 179, i32 181, i32 183, i32 169, i32 171, i32 173, i32 175, i32 185, i32 187, i32 189, i32 191, i32 193, i32 195, i32 197, i32 199, i32 209, i32 211, i32 213, i32 215, i32 201, i32 203, i32 205, i32 207, i32 217, i32 219, i32 221, i32 223, i32 225, i32 227, i32 229, i32 231, i32 241, i32 243, i32 245, i32 247, i32 233, i32 235, i32 237, i32 239, i32 249, i32 251, i32 253, i32 255> @@ -965,7 +965,7 @@ define <256 x i8> @vdeal_77(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_78: -; CHECK: [[REG78:r[0-9]+]] = #120 +; CHECK: [[REG78:r[0-9]+]] = #-8 ; CHECK: vdeal(v1,v0,[[REG78]]) define <256 x i8> @vdeal_78(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -973,7 +973,7 @@ define <256 x i8> @vdeal_78(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_79: -; CHECK: [[REG79:r[0-9]+]] = #121 +; CHECK: [[REG79:r[0-9]+]] = #-7 ; CHECK: vdeal(v1,v0,[[REG79]]) define <256 x i8> @vdeal_79(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 16, i32 24, i32 18, i32 26, i32 20, i32 28, i32 22, i32 30, i32 32, i32 40, i32 34, i32 42, i32 36, i32 44, i32 38, i32 46, i32 48, i32 56, i32 50, i32 58, i32 52, i32 60, i32 54, i32 62, i32 64, i32 72, i32 66, i32 74, i32 68, i32 76, i32 70, i32 78, i32 80, i32 88, i32 82, i32 90, i32 84, i32 92, i32 86, i32 94, i32 96, i32 104, i32 98, i32 106, i32 100, i32 108, i32 102, i32 110, i32 112, i32 120, i32 114, i32 122, i32 116, i32 124, i32 118, i32 126, i32 128, i32 136, i32 130, i32 138, i32 132, i32 140, i32 134, i32 142, i32 144, i32 152, i32 146, i32 154, i32 148, i32 156, i32 150, i32 158, i32 160, i32 168, i32 162, i32 170, i32 164, i32 172, i32 166, i32 174, i32 176, i32 184, i32 178, i32 186, i32 180, i32 188, i32 182, i32 190, i32 192, i32 200, i32 194, i32 202, i32 196, i32 204, i32 198, i32 206, i32 208, i32 216, i32 210, i32 218, i32 212, i32 220, i32 214, i32 222, i32 224, i32 232, i32 226, i32 234, i32 228, i32 236, i32 230, i32 238, i32 240, i32 248, i32 242, i32 250, i32 244, i32 252, i32 246, i32 254, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, i32 17, i32 25, i32 19, i32 27, i32 21, i32 29, i32 23, i32 31, i32 33, i32 41, i32 35, i32 43, i32 37, i32 45, i32 39, i32 47, i32 49, i32 57, i32 51, i32 59, i32 53, i32 61, i32 55, i32 63, i32 65, i32 73, i32 67, i32 75, i32 69, i32 77, i32 71, i32 79, i32 81, i32 89, i32 83, i32 91, i32 85, i32 93, i32 87, i32 95, i32 97, i32 105, i32 99, i32 107, i32 101, i32 109, i32 103, i32 111, i32 113, i32 121, i32 115, i32 123, i32 117, i32 125, i32 119, i32 127, i32 129, i32 137, i32 131, i32 139, i32 133, i32 141, i32 135, i32 143, i32 145, i32 153, i32 147, i32 155, i32 149, i32 157, i32 151, i32 159, i32 161, i32 169, i32 163, i32 171, i32 165, i32 173, i32 167, i32 175, i32 177, i32 185, i32 179, i32 187, i32 181, i32 189, i32 183, i32 191, i32 193, i32 201, i32 195, i32 203, i32 197, i32 205, i32 199, i32 207, i32 209, i32 217, i32 211, i32 219, i32 213, i32 221, i32 215, i32 223, i32 225, i32 233, i32 227, i32 235, i32 229, i32 237, i32 231, i32 239, i32 241, i32 249, i32 243, i32 251, i32 245, i32 253, i32 247, i32 255> @@ -981,7 +981,7 @@ define <256 x i8> @vdeal_79(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_7a: -; CHECK: [[REG7a:r[0-9]+]] = #122 +; CHECK: [[REG7a:r[0-9]+]] = #-6 ; CHECK: vdeal(v1,v0,[[REG7a]]) define <256 x i8> @vdeal_7a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13, i32 16, i32 17, i32 24, i32 25, i32 20, i32 21, i32 28, i32 29, i32 32, i32 33, i32 40, i32 41, i32 36, i32 37, i32 44, i32 45, i32 48, i32 49, i32 56, i32 57, i32 52, i32 53, i32 60, i32 61, i32 64, i32 65, i32 72, i32 73, i32 68, i32 69, i32 76, i32 77, i32 80, i32 81, i32 88, i32 89, i32 84, i32 85, i32 92, i32 93, i32 96, i32 97, i32 104, i32 105, i32 100, i32 101, i32 108, i32 109, i32 112, i32 113, i32 120, i32 121, i32 116, i32 117, i32 124, i32 125, i32 128, i32 129, i32 136, i32 137, i32 132, i32 133, i32 140, i32 141, i32 144, i32 145, i32 152, i32 153, i32 148, i32 149, i32 156, i32 157, i32 160, i32 161, i32 168, i32 169, i32 164, i32 165, i32 172, i32 173, i32 176, i32 177, i32 184, i32 185, i32 180, i32 181, i32 188, i32 189, i32 192, i32 193, i32 200, i32 201, i32 196, i32 197, i32 204, i32 205, i32 208, i32 209, i32 216, i32 217, i32 212, i32 213, i32 220, i32 221, i32 224, i32 225, i32 232, i32 233, i32 228, i32 229, i32 236, i32 237, i32 240, i32 241, i32 248, i32 249, i32 244, i32 245, i32 252, i32 253, i32 2, i32 3, i32 10, i32 11, i32 6, i32 7, i32 14, i32 15, i32 18, i32 19, i32 26, i32 27, i32 22, i32 23, i32 30, i32 31, i32 34, i32 35, i32 42, i32 43, i32 38, i32 39, i32 46, i32 47, i32 50, i32 51, i32 58, i32 59, i32 54, i32 55, i32 62, i32 63, i32 66, i32 67, i32 74, i32 75, i32 70, i32 71, i32 78, i32 79, i32 82, i32 83, i32 90, i32 91, i32 86, i32 87, i32 94, i32 95, i32 98, i32 99, i32 106, i32 107, i32 102, i32 103, i32 110, i32 111, i32 114, i32 115, i32 122, i32 123, i32 118, i32 119, i32 126, i32 127, i32 130, i32 131, i32 138, i32 139, i32 134, i32 135, i32 142, i32 143, i32 146, i32 147, i32 154, i32 155, i32 150, i32 151, i32 158, i32 159, i32 162, i32 163, i32 170, i32 171, i32 166, i32 167, i32 174, i32 175, i32 178, i32 179, i32 186, i32 187, i32 182, i32 183, i32 190, i32 191, i32 194, i32 195, i32 202, i32 203, i32 198, i32 199, i32 206, i32 207, i32 210, i32 211, i32 218, i32 219, i32 214, i32 215, i32 222, i32 223, i32 226, i32 227, i32 234, i32 235, i32 230, i32 231, i32 238, i32 239, i32 242, i32 243, i32 250, i32 251, i32 246, i32 247, i32 254, i32 255> @@ -989,7 +989,7 @@ define <256 x i8> @vdeal_7a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_7b: -; CHECK: [[REG7b:r[0-9]+]] = #123 +; CHECK: [[REG7b:r[0-9]+]] = #-5 ; CHECK: vdeal(v1,v0,[[REG7b]]) define <256 x i8> @vdeal_7b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14, i32 16, i32 18, i32 24, i32 26, i32 20, i32 22, i32 28, i32 30, i32 32, i32 34, i32 40, i32 42, i32 36, i32 38, i32 44, i32 46, i32 48, i32 50, i32 56, i32 58, i32 52, i32 54, i32 60, i32 62, i32 64, i32 66, i32 72, i32 74, i32 68, i32 70, i32 76, i32 78, i32 80, i32 82, i32 88, i32 90, i32 84, i32 86, i32 92, i32 94, i32 96, i32 98, i32 104, i32 106, i32 100, i32 102, i32 108, i32 110, i32 112, i32 114, i32 120, i32 122, i32 116, i32 118, i32 124, i32 126, i32 128, i32 130, i32 136, i32 138, i32 132, i32 134, i32 140, i32 142, i32 144, i32 146, i32 152, i32 154, i32 148, i32 150, i32 156, i32 158, i32 160, i32 162, i32 168, i32 170, i32 164, i32 166, i32 172, i32 174, i32 176, i32 178, i32 184, i32 186, i32 180, i32 182, i32 188, i32 190, i32 192, i32 194, i32 200, i32 202, i32 196, i32 198, i32 204, i32 206, i32 208, i32 210, i32 216, i32 218, i32 212, i32 214, i32 220, i32 222, i32 224, i32 226, i32 232, i32 234, i32 228, i32 230, i32 236, i32 238, i32 240, i32 242, i32 248, i32 250, i32 244, i32 246, i32 252, i32 254, i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15, i32 17, i32 19, i32 25, i32 27, i32 21, i32 23, i32 29, i32 31, i32 33, i32 35, i32 41, i32 43, i32 37, i32 39, i32 45, i32 47, i32 49, i32 51, i32 57, i32 59, i32 53, i32 55, i32 61, i32 63, i32 65, i32 67, i32 73, i32 75, i32 69, i32 71, i32 77, i32 79, i32 81, i32 83, i32 89, i32 91, i32 85, i32 87, i32 93, i32 95, i32 97, i32 99, i32 105, i32 107, i32 101, i32 103, i32 109, i32 111, i32 113, i32 115, i32 121, i32 123, i32 117, i32 119, i32 125, i32 127, i32 129, i32 131, i32 137, i32 139, i32 133, i32 135, i32 141, i32 143, i32 145, i32 147, i32 153, i32 155, i32 149, i32 151, i32 157, i32 159, i32 161, i32 163, i32 169, i32 171, i32 165, i32 167, i32 173, i32 175, i32 177, i32 179, i32 185, i32 187, i32 181, i32 183, i32 189, i32 191, i32 193, i32 195, i32 201, i32 203, i32 197, i32 199, i32 205, i32 207, i32 209, i32 211, i32 217, i32 219, i32 213, i32 215, i32 221, i32 223, i32 225, i32 227, i32 233, i32 235, i32 229, i32 231, i32 237, i32 239, i32 241, i32 243, i32 249, i32 251, i32 245, i32 247, i32 253, i32 255> @@ -997,7 +997,7 @@ define <256 x i8> @vdeal_7b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_7c: -; CHECK: [[REG7c:r[0-9]+]] = #124 +; CHECK: [[REG7c:r[0-9]+]] = #-4 ; CHECK: vdeal(v1,v0,[[REG7c]]) define <256 x i8> @vdeal_7c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27, i32 32, i32 33, i32 34, i32 35, i32 40, i32 41, i32 42, i32 43, i32 48, i32 49, i32 50, i32 51, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 72, i32 73, i32 74, i32 75, i32 80, i32 81, i32 82, i32 83, i32 88, i32 89, i32 90, i32 91, i32 96, i32 97, i32 98, i32 99, i32 104, i32 105, i32 106, i32 107, i32 112, i32 113, i32 114, i32 115, i32 120, i32 121, i32 122, i32 123, i32 128, i32 129, i32 130, i32 131, i32 136, i32 137, i32 138, i32 139, i32 144, i32 145, i32 146, i32 147, i32 152, i32 153, i32 154, i32 155, i32 160, i32 161, i32 162, i32 163, i32 168, i32 169, i32 170, i32 171, i32 176, i32 177, i32 178, i32 179, i32 184, i32 185, i32 186, i32 187, i32 192, i32 193, i32 194, i32 195, i32 200, i32 201, i32 202, i32 203, i32 208, i32 209, i32 210, i32 211, i32 216, i32 217, i32 218, i32 219, i32 224, i32 225, i32 226, i32 227, i32 232, i32 233, i32 234, i32 235, i32 240, i32 241, i32 242, i32 243, i32 248, i32 249, i32 250, i32 251, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31, i32 36, i32 37, i32 38, i32 39, i32 44, i32 45, i32 46, i32 47, i32 52, i32 53, i32 54, i32 55, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 76, i32 77, i32 78, i32 79, i32 84, i32 85, i32 86, i32 87, i32 92, i32 93, i32 94, i32 95, i32 100, i32 101, i32 102, i32 103, i32 108, i32 109, i32 110, i32 111, i32 116, i32 117, i32 118, i32 119, i32 124, i32 125, i32 126, i32 127, i32 132, i32 133, i32 134, i32 135, i32 140, i32 141, i32 142, i32 143, i32 148, i32 149, i32 150, i32 151, i32 156, i32 157, i32 158, i32 159, i32 164, i32 165, i32 166, i32 167, i32 172, i32 173, i32 174, i32 175, i32 180, i32 181, i32 182, i32 183, i32 188, i32 189, i32 190, i32 191, i32 196, i32 197, i32 198, i32 199, i32 204, i32 205, i32 206, i32 207, i32 212, i32 213, i32 214, i32 215, i32 220, i32 221, i32 222, i32 223, i32 228, i32 229, i32 230, i32 231, i32 236, i32 237, i32 238, i32 239, i32 244, i32 245, i32 246, i32 247, i32 252, i32 253, i32 254, i32 255> @@ -1005,7 +1005,7 @@ define <256 x i8> @vdeal_7c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_7d: -; CHECK: [[REG7d:r[0-9]+]] = #125 +; CHECK: [[REG7d:r[0-9]+]] = #-3 ; CHECK: vdeal(v1,v0,[[REG7d]]) define <256 x i8> @vdeal_7d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 4, i32 2, i32 6, i32 8, i32 12, i32 10, i32 14, i32 16, i32 20, i32 18, i32 22, i32 24, i32 28, i32 26, i32 30, i32 32, i32 36, i32 34, i32 38, i32 40, i32 44, i32 42, i32 46, i32 48, i32 52, i32 50, i32 54, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 72, i32 76, i32 74, i32 78, i32 80, i32 84, i32 82, i32 86, i32 88, i32 92, i32 90, i32 94, i32 96, i32 100, i32 98, i32 102, i32 104, i32 108, i32 106, i32 110, i32 112, i32 116, i32 114, i32 118, i32 120, i32 124, i32 122, i32 126, i32 128, i32 132, i32 130, i32 134, i32 136, i32 140, i32 138, i32 142, i32 144, i32 148, i32 146, i32 150, i32 152, i32 156, i32 154, i32 158, i32 160, i32 164, i32 162, i32 166, i32 168, i32 172, i32 170, i32 174, i32 176, i32 180, i32 178, i32 182, i32 184, i32 188, i32 186, i32 190, i32 192, i32 196, i32 194, i32 198, i32 200, i32 204, i32 202, i32 206, i32 208, i32 212, i32 210, i32 214, i32 216, i32 220, i32 218, i32 222, i32 224, i32 228, i32 226, i32 230, i32 232, i32 236, i32 234, i32 238, i32 240, i32 244, i32 242, i32 246, i32 248, i32 252, i32 250, i32 254, i32 1, i32 5, i32 3, i32 7, i32 9, i32 13, i32 11, i32 15, i32 17, i32 21, i32 19, i32 23, i32 25, i32 29, i32 27, i32 31, i32 33, i32 37, i32 35, i32 39, i32 41, i32 45, i32 43, i32 47, i32 49, i32 53, i32 51, i32 55, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 73, i32 77, i32 75, i32 79, i32 81, i32 85, i32 83, i32 87, i32 89, i32 93, i32 91, i32 95, i32 97, i32 101, i32 99, i32 103, i32 105, i32 109, i32 107, i32 111, i32 113, i32 117, i32 115, i32 119, i32 121, i32 125, i32 123, i32 127, i32 129, i32 133, i32 131, i32 135, i32 137, i32 141, i32 139, i32 143, i32 145, i32 149, i32 147, i32 151, i32 153, i32 157, i32 155, i32 159, i32 161, i32 165, i32 163, i32 167, i32 169, i32 173, i32 171, i32 175, i32 177, i32 181, i32 179, i32 183, i32 185, i32 189, i32 187, i32 191, i32 193, i32 197, i32 195, i32 199, i32 201, i32 205, i32 203, i32 207, i32 209, i32 213, i32 211, i32 215, i32 217, i32 221, i32 219, i32 223, i32 225, i32 229, i32 227, i32 231, i32 233, i32 237, i32 235, i32 239, i32 241, i32 245, i32 243, i32 247, i32 249, i32 253, i32 251, i32 255> @@ -1013,7 +1013,7 @@ define <256 x i8> @vdeal_7d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_7e: -; CHECK: [[REG7e:r[0-9]+]] = #126 +; CHECK: [[REG7e:r[0-9]+]] = #-2 ; CHECK: vdeal(v1,v0,[[REG7e]]) define <256 x i8> @vdeal_7e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29, i32 32, i32 33, i32 36, i32 37, i32 40, i32 41, i32 44, i32 45, i32 48, i32 49, i32 52, i32 53, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 72, i32 73, i32 76, i32 77, i32 80, i32 81, i32 84, i32 85, i32 88, i32 89, i32 92, i32 93, i32 96, i32 97, i32 100, i32 101, i32 104, i32 105, i32 108, i32 109, i32 112, i32 113, i32 116, i32 117, i32 120, i32 121, i32 124, i32 125, i32 128, i32 129, i32 132, i32 133, i32 136, i32 137, i32 140, i32 141, i32 144, i32 145, i32 148, i32 149, i32 152, i32 153, i32 156, i32 157, i32 160, i32 161, i32 164, i32 165, i32 168, i32 169, i32 172, i32 173, i32 176, i32 177, i32 180, i32 181, i32 184, i32 185, i32 188, i32 189, i32 192, i32 193, i32 196, i32 197, i32 200, i32 201, i32 204, i32 205, i32 208, i32 209, i32 212, i32 213, i32 216, i32 217, i32 220, i32 221, i32 224, i32 225, i32 228, i32 229, i32 232, i32 233, i32 236, i32 237, i32 240, i32 241, i32 244, i32 245, i32 248, i32 249, i32 252, i32 253, i32 2, i32 3, i32 6, i32 7, i32 10, i32 11, i32 14, i32 15, i32 18, i32 19, i32 22, i32 23, i32 26, i32 27, i32 30, i32 31, i32 34, i32 35, i32 38, i32 39, i32 42, i32 43, i32 46, i32 47, i32 50, i32 51, i32 54, i32 55, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 74, i32 75, i32 78, i32 79, i32 82, i32 83, i32 86, i32 87, i32 90, i32 91, i32 94, i32 95, i32 98, i32 99, i32 102, i32 103, i32 106, i32 107, i32 110, i32 111, i32 114, i32 115, i32 118, i32 119, i32 122, i32 123, i32 126, i32 127, i32 130, i32 131, i32 134, i32 135, i32 138, i32 139, i32 142, i32 143, i32 146, i32 147, i32 150, i32 151, i32 154, i32 155, i32 158, i32 159, i32 162, i32 163, i32 166, i32 167, i32 170, i32 171, i32 174, i32 175, i32 178, i32 179, i32 182, i32 183, i32 186, i32 187, i32 190, i32 191, i32 194, i32 195, i32 198, i32 199, i32 202, i32 203, i32 206, i32 207, i32 210, i32 211, i32 214, i32 215, i32 218, i32 219, i32 222, i32 223, i32 226, i32 227, i32 230, i32 231, i32 234, i32 235, i32 238, i32 239, i32 242, i32 243, i32 246, i32 247, i32 250, i32 251, i32 254, i32 255> @@ -1021,7 +1021,7 @@ define <256 x i8> @vdeal_7e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_7f: -; CHECK: [[REG7f:r[0-9]+]] = #127 +; CHECK: [[REG7f:r[0-9]+]] = #-1 ; CHECK: vdeal(v1,v0,[[REG7f]]) define <256 x i8> @vdeal_7f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126, i32 128, i32 130, i32 132, i32 134, i32 136, i32 138, i32 140, i32 142, i32 144, i32 146, i32 148, i32 150, i32 152, i32 154, i32 156, i32 158, i32 160, i32 162, i32 164, i32 166, i32 168, i32 170, i32 172, i32 174, i32 176, i32 178, i32 180, i32 182, i32 184, i32 186, i32 188, i32 190, i32 192, i32 194, i32 196, i32 198, i32 200, i32 202, i32 204, i32 206, i32 208, i32 210, i32 212, i32 214, i32 216, i32 218, i32 220, i32 222, i32 224, i32 226, i32 228, i32 230, i32 232, i32 234, i32 236, i32 238, i32 240, i32 242, i32 244, i32 246, i32 248, i32 250, i32 252, i32 254, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127, i32 129, i32 131, i32 133, i32 135, i32 137, i32 139, i32 141, i32 143, i32 145, i32 147, i32 149, i32 151, i32 153, i32 155, i32 157, i32 159, i32 161, i32 163, i32 165, i32 167, i32 169, i32 171, i32 173, i32 175, i32 177, i32 179, i32 181, i32 183, i32 185, i32 187, i32 189, i32 191, i32 193, i32 195, i32 197, i32 199, i32 201, i32 203, i32 205, i32 207, i32 209, i32 211, i32 213, i32 215, i32 217, i32 219, i32 221, i32 223, i32 225, i32 227, i32 229, i32 231, i32 233, i32 235, i32 237, i32 239, i32 241, i32 243, i32 245, i32 247, i32 249, i32 251, i32 253, i32 255> diff --git a/llvm/test/CodeGen/Hexagon/autohvx/deal-64b.ll b/llvm/test/CodeGen/Hexagon/autohvx/deal-64b.ll index 525d942d518e8..efd6a327876c4 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/deal-64b.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/deal-64b.ll @@ -261,7 +261,7 @@ define <128 x i8> @vdeal_1f(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_20: -; CHECK: [[REG20:r[0-9]+]] = #32 +; CHECK: [[REG20:r[0-9]+]] = #-32 ; CHECK: vshuff(v1,v0,[[REG20]]) define <128 x i8> @vdeal_20(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -269,7 +269,7 @@ define <128 x i8> @vdeal_20(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_21: -; CHECK: [[REG21:r[0-9]+]] = #33 +; CHECK: [[REG21:r[0-9]+]] = #-31 ; CHECK: vdeal(v1,v0,[[REG21]]) define <128 x i8> @vdeal_21(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 32, i32 2, i32 34, i32 4, i32 36, i32 6, i32 38, i32 8, i32 40, i32 10, i32 42, i32 12, i32 44, i32 14, i32 46, i32 16, i32 48, i32 18, i32 50, i32 20, i32 52, i32 22, i32 54, i32 24, i32 56, i32 26, i32 58, i32 28, i32 60, i32 30, i32 62, i32 64, i32 96, i32 66, i32 98, i32 68, i32 100, i32 70, i32 102, i32 72, i32 104, i32 74, i32 106, i32 76, i32 108, i32 78, i32 110, i32 80, i32 112, i32 82, i32 114, i32 84, i32 116, i32 86, i32 118, i32 88, i32 120, i32 90, i32 122, i32 92, i32 124, i32 94, i32 126, i32 1, i32 33, i32 3, i32 35, i32 5, i32 37, i32 7, i32 39, i32 9, i32 41, i32 11, i32 43, i32 13, i32 45, i32 15, i32 47, i32 17, i32 49, i32 19, i32 51, i32 21, i32 53, i32 23, i32 55, i32 25, i32 57, i32 27, i32 59, i32 29, i32 61, i32 31, i32 63, i32 65, i32 97, i32 67, i32 99, i32 69, i32 101, i32 71, i32 103, i32 73, i32 105, i32 75, i32 107, i32 77, i32 109, i32 79, i32 111, i32 81, i32 113, i32 83, i32 115, i32 85, i32 117, i32 87, i32 119, i32 89, i32 121, i32 91, i32 123, i32 93, i32 125, i32 95, i32 127> @@ -277,7 +277,7 @@ define <128 x i8> @vdeal_21(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_22: -; CHECK: [[REG22:r[0-9]+]] = #34 +; CHECK: [[REG22:r[0-9]+]] = #-30 ; CHECK: vdeal(v1,v0,[[REG22]]) define <128 x i8> @vdeal_22(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 32, i32 33, i32 4, i32 5, i32 36, i32 37, i32 8, i32 9, i32 40, i32 41, i32 12, i32 13, i32 44, i32 45, i32 16, i32 17, i32 48, i32 49, i32 20, i32 21, i32 52, i32 53, i32 24, i32 25, i32 56, i32 57, i32 28, i32 29, i32 60, i32 61, i32 64, i32 65, i32 96, i32 97, i32 68, i32 69, i32 100, i32 101, i32 72, i32 73, i32 104, i32 105, i32 76, i32 77, i32 108, i32 109, i32 80, i32 81, i32 112, i32 113, i32 84, i32 85, i32 116, i32 117, i32 88, i32 89, i32 120, i32 121, i32 92, i32 93, i32 124, i32 125, i32 2, i32 3, i32 34, i32 35, i32 6, i32 7, i32 38, i32 39, i32 10, i32 11, i32 42, i32 43, i32 14, i32 15, i32 46, i32 47, i32 18, i32 19, i32 50, i32 51, i32 22, i32 23, i32 54, i32 55, i32 26, i32 27, i32 58, i32 59, i32 30, i32 31, i32 62, i32 63, i32 66, i32 67, i32 98, i32 99, i32 70, i32 71, i32 102, i32 103, i32 74, i32 75, i32 106, i32 107, i32 78, i32 79, i32 110, i32 111, i32 82, i32 83, i32 114, i32 115, i32 86, i32 87, i32 118, i32 119, i32 90, i32 91, i32 122, i32 123, i32 94, i32 95, i32 126, i32 127> @@ -285,7 +285,7 @@ define <128 x i8> @vdeal_22(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_23: -; CHECK: [[REG23:r[0-9]+]] = #35 +; CHECK: [[REG23:r[0-9]+]] = #-29 ; CHECK: vdeal(v1,v0,[[REG23]]) define <128 x i8> @vdeal_23(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 32, i32 34, i32 4, i32 6, i32 36, i32 38, i32 8, i32 10, i32 40, i32 42, i32 12, i32 14, i32 44, i32 46, i32 16, i32 18, i32 48, i32 50, i32 20, i32 22, i32 52, i32 54, i32 24, i32 26, i32 56, i32 58, i32 28, i32 30, i32 60, i32 62, i32 64, i32 66, i32 96, i32 98, i32 68, i32 70, i32 100, i32 102, i32 72, i32 74, i32 104, i32 106, i32 76, i32 78, i32 108, i32 110, i32 80, i32 82, i32 112, i32 114, i32 84, i32 86, i32 116, i32 118, i32 88, i32 90, i32 120, i32 122, i32 92, i32 94, i32 124, i32 126, i32 1, i32 3, i32 33, i32 35, i32 5, i32 7, i32 37, i32 39, i32 9, i32 11, i32 41, i32 43, i32 13, i32 15, i32 45, i32 47, i32 17, i32 19, i32 49, i32 51, i32 21, i32 23, i32 53, i32 55, i32 25, i32 27, i32 57, i32 59, i32 29, i32 31, i32 61, i32 63, i32 65, i32 67, i32 97, i32 99, i32 69, i32 71, i32 101, i32 103, i32 73, i32 75, i32 105, i32 107, i32 77, i32 79, i32 109, i32 111, i32 81, i32 83, i32 113, i32 115, i32 85, i32 87, i32 117, i32 119, i32 89, i32 91, i32 121, i32 123, i32 93, i32 95, i32 125, i32 127> @@ -293,7 +293,7 @@ define <128 x i8> @vdeal_23(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_24: -; CHECK: [[REG24:r[0-9]+]] = #36 +; CHECK: [[REG24:r[0-9]+]] = #-28 ; CHECK: vdeal(v1,v0,[[REG24]]) define <128 x i8> @vdeal_24(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 32, i32 33, i32 34, i32 35, i32 8, i32 9, i32 10, i32 11, i32 40, i32 41, i32 42, i32 43, i32 16, i32 17, i32 18, i32 19, i32 48, i32 49, i32 50, i32 51, i32 24, i32 25, i32 26, i32 27, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 96, i32 97, i32 98, i32 99, i32 72, i32 73, i32 74, i32 75, i32 104, i32 105, i32 106, i32 107, i32 80, i32 81, i32 82, i32 83, i32 112, i32 113, i32 114, i32 115, i32 88, i32 89, i32 90, i32 91, i32 120, i32 121, i32 122, i32 123, i32 4, i32 5, i32 6, i32 7, i32 36, i32 37, i32 38, i32 39, i32 12, i32 13, i32 14, i32 15, i32 44, i32 45, i32 46, i32 47, i32 20, i32 21, i32 22, i32 23, i32 52, i32 53, i32 54, i32 55, i32 28, i32 29, i32 30, i32 31, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 100, i32 101, i32 102, i32 103, i32 76, i32 77, i32 78, i32 79, i32 108, i32 109, i32 110, i32 111, i32 84, i32 85, i32 86, i32 87, i32 116, i32 117, i32 118, i32 119, i32 92, i32 93, i32 94, i32 95, i32 124, i32 125, i32 126, i32 127> @@ -301,7 +301,7 @@ define <128 x i8> @vdeal_24(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_25: -; CHECK: [[REG25:r[0-9]+]] = #37 +; CHECK: [[REG25:r[0-9]+]] = #-27 ; CHECK: vdeal(v1,v0,[[REG25]]) define <128 x i8> @vdeal_25(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 4, i32 2, i32 6, i32 32, i32 36, i32 34, i32 38, i32 8, i32 12, i32 10, i32 14, i32 40, i32 44, i32 42, i32 46, i32 16, i32 20, i32 18, i32 22, i32 48, i32 52, i32 50, i32 54, i32 24, i32 28, i32 26, i32 30, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 96, i32 100, i32 98, i32 102, i32 72, i32 76, i32 74, i32 78, i32 104, i32 108, i32 106, i32 110, i32 80, i32 84, i32 82, i32 86, i32 112, i32 116, i32 114, i32 118, i32 88, i32 92, i32 90, i32 94, i32 120, i32 124, i32 122, i32 126, i32 1, i32 5, i32 3, i32 7, i32 33, i32 37, i32 35, i32 39, i32 9, i32 13, i32 11, i32 15, i32 41, i32 45, i32 43, i32 47, i32 17, i32 21, i32 19, i32 23, i32 49, i32 53, i32 51, i32 55, i32 25, i32 29, i32 27, i32 31, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 97, i32 101, i32 99, i32 103, i32 73, i32 77, i32 75, i32 79, i32 105, i32 109, i32 107, i32 111, i32 81, i32 85, i32 83, i32 87, i32 113, i32 117, i32 115, i32 119, i32 89, i32 93, i32 91, i32 95, i32 121, i32 125, i32 123, i32 127> @@ -309,7 +309,7 @@ define <128 x i8> @vdeal_25(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_26: -; CHECK: [[REG26:r[0-9]+]] = #38 +; CHECK: [[REG26:r[0-9]+]] = #-26 ; CHECK: vdeal(v1,v0,[[REG26]]) define <128 x i8> @vdeal_26(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 4, i32 5, i32 32, i32 33, i32 36, i32 37, i32 8, i32 9, i32 12, i32 13, i32 40, i32 41, i32 44, i32 45, i32 16, i32 17, i32 20, i32 21, i32 48, i32 49, i32 52, i32 53, i32 24, i32 25, i32 28, i32 29, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 96, i32 97, i32 100, i32 101, i32 72, i32 73, i32 76, i32 77, i32 104, i32 105, i32 108, i32 109, i32 80, i32 81, i32 84, i32 85, i32 112, i32 113, i32 116, i32 117, i32 88, i32 89, i32 92, i32 93, i32 120, i32 121, i32 124, i32 125, i32 2, i32 3, i32 6, i32 7, i32 34, i32 35, i32 38, i32 39, i32 10, i32 11, i32 14, i32 15, i32 42, i32 43, i32 46, i32 47, i32 18, i32 19, i32 22, i32 23, i32 50, i32 51, i32 54, i32 55, i32 26, i32 27, i32 30, i32 31, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 98, i32 99, i32 102, i32 103, i32 74, i32 75, i32 78, i32 79, i32 106, i32 107, i32 110, i32 111, i32 82, i32 83, i32 86, i32 87, i32 114, i32 115, i32 118, i32 119, i32 90, i32 91, i32 94, i32 95, i32 122, i32 123, i32 126, i32 127> @@ -317,7 +317,7 @@ define <128 x i8> @vdeal_26(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_27: -; CHECK: [[REG27:r[0-9]+]] = #39 +; CHECK: [[REG27:r[0-9]+]] = #-25 ; CHECK: vdeal(v1,v0,[[REG27]]) define <128 x i8> @vdeal_27(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 4, i32 6, i32 32, i32 34, i32 36, i32 38, i32 8, i32 10, i32 12, i32 14, i32 40, i32 42, i32 44, i32 46, i32 16, i32 18, i32 20, i32 22, i32 48, i32 50, i32 52, i32 54, i32 24, i32 26, i32 28, i32 30, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 96, i32 98, i32 100, i32 102, i32 72, i32 74, i32 76, i32 78, i32 104, i32 106, i32 108, i32 110, i32 80, i32 82, i32 84, i32 86, i32 112, i32 114, i32 116, i32 118, i32 88, i32 90, i32 92, i32 94, i32 120, i32 122, i32 124, i32 126, i32 1, i32 3, i32 5, i32 7, i32 33, i32 35, i32 37, i32 39, i32 9, i32 11, i32 13, i32 15, i32 41, i32 43, i32 45, i32 47, i32 17, i32 19, i32 21, i32 23, i32 49, i32 51, i32 53, i32 55, i32 25, i32 27, i32 29, i32 31, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 97, i32 99, i32 101, i32 103, i32 73, i32 75, i32 77, i32 79, i32 105, i32 107, i32 109, i32 111, i32 81, i32 83, i32 85, i32 87, i32 113, i32 115, i32 117, i32 119, i32 89, i32 91, i32 93, i32 95, i32 121, i32 123, i32 125, i32 127> @@ -325,7 +325,7 @@ define <128 x i8> @vdeal_27(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_28: -; CHECK: [[REG28:r[0-9]+]] = #40 +; CHECK: [[REG28:r[0-9]+]] = #-24 ; CHECK: vdeal(v1,v0,[[REG28]]) define <128 x i8> @vdeal_28(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -333,7 +333,7 @@ define <128 x i8> @vdeal_28(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_29: -; CHECK: [[REG29:r[0-9]+]] = #41 +; CHECK: [[REG29:r[0-9]+]] = #-23 ; CHECK: vdeal(v1,v0,[[REG29]]) define <128 x i8> @vdeal_29(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 32, i32 40, i32 34, i32 42, i32 36, i32 44, i32 38, i32 46, i32 16, i32 24, i32 18, i32 26, i32 20, i32 28, i32 22, i32 30, i32 48, i32 56, i32 50, i32 58, i32 52, i32 60, i32 54, i32 62, i32 64, i32 72, i32 66, i32 74, i32 68, i32 76, i32 70, i32 78, i32 96, i32 104, i32 98, i32 106, i32 100, i32 108, i32 102, i32 110, i32 80, i32 88, i32 82, i32 90, i32 84, i32 92, i32 86, i32 94, i32 112, i32 120, i32 114, i32 122, i32 116, i32 124, i32 118, i32 126, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, i32 33, i32 41, i32 35, i32 43, i32 37, i32 45, i32 39, i32 47, i32 17, i32 25, i32 19, i32 27, i32 21, i32 29, i32 23, i32 31, i32 49, i32 57, i32 51, i32 59, i32 53, i32 61, i32 55, i32 63, i32 65, i32 73, i32 67, i32 75, i32 69, i32 77, i32 71, i32 79, i32 97, i32 105, i32 99, i32 107, i32 101, i32 109, i32 103, i32 111, i32 81, i32 89, i32 83, i32 91, i32 85, i32 93, i32 87, i32 95, i32 113, i32 121, i32 115, i32 123, i32 117, i32 125, i32 119, i32 127> @@ -341,7 +341,7 @@ define <128 x i8> @vdeal_29(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_2a: -; CHECK: [[REG2a:r[0-9]+]] = #42 +; CHECK: [[REG2a:r[0-9]+]] = #-22 ; CHECK: vdeal(v1,v0,[[REG2a]]) define <128 x i8> @vdeal_2a(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13, i32 32, i32 33, i32 40, i32 41, i32 36, i32 37, i32 44, i32 45, i32 16, i32 17, i32 24, i32 25, i32 20, i32 21, i32 28, i32 29, i32 48, i32 49, i32 56, i32 57, i32 52, i32 53, i32 60, i32 61, i32 64, i32 65, i32 72, i32 73, i32 68, i32 69, i32 76, i32 77, i32 96, i32 97, i32 104, i32 105, i32 100, i32 101, i32 108, i32 109, i32 80, i32 81, i32 88, i32 89, i32 84, i32 85, i32 92, i32 93, i32 112, i32 113, i32 120, i32 121, i32 116, i32 117, i32 124, i32 125, i32 2, i32 3, i32 10, i32 11, i32 6, i32 7, i32 14, i32 15, i32 34, i32 35, i32 42, i32 43, i32 38, i32 39, i32 46, i32 47, i32 18, i32 19, i32 26, i32 27, i32 22, i32 23, i32 30, i32 31, i32 50, i32 51, i32 58, i32 59, i32 54, i32 55, i32 62, i32 63, i32 66, i32 67, i32 74, i32 75, i32 70, i32 71, i32 78, i32 79, i32 98, i32 99, i32 106, i32 107, i32 102, i32 103, i32 110, i32 111, i32 82, i32 83, i32 90, i32 91, i32 86, i32 87, i32 94, i32 95, i32 114, i32 115, i32 122, i32 123, i32 118, i32 119, i32 126, i32 127> @@ -349,7 +349,7 @@ define <128 x i8> @vdeal_2a(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_2b: -; CHECK: [[REG2b:r[0-9]+]] = #43 +; CHECK: [[REG2b:r[0-9]+]] = #-21 ; CHECK: vdeal(v1,v0,[[REG2b]]) define <128 x i8> @vdeal_2b(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14, i32 32, i32 34, i32 40, i32 42, i32 36, i32 38, i32 44, i32 46, i32 16, i32 18, i32 24, i32 26, i32 20, i32 22, i32 28, i32 30, i32 48, i32 50, i32 56, i32 58, i32 52, i32 54, i32 60, i32 62, i32 64, i32 66, i32 72, i32 74, i32 68, i32 70, i32 76, i32 78, i32 96, i32 98, i32 104, i32 106, i32 100, i32 102, i32 108, i32 110, i32 80, i32 82, i32 88, i32 90, i32 84, i32 86, i32 92, i32 94, i32 112, i32 114, i32 120, i32 122, i32 116, i32 118, i32 124, i32 126, i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15, i32 33, i32 35, i32 41, i32 43, i32 37, i32 39, i32 45, i32 47, i32 17, i32 19, i32 25, i32 27, i32 21, i32 23, i32 29, i32 31, i32 49, i32 51, i32 57, i32 59, i32 53, i32 55, i32 61, i32 63, i32 65, i32 67, i32 73, i32 75, i32 69, i32 71, i32 77, i32 79, i32 97, i32 99, i32 105, i32 107, i32 101, i32 103, i32 109, i32 111, i32 81, i32 83, i32 89, i32 91, i32 85, i32 87, i32 93, i32 95, i32 113, i32 115, i32 121, i32 123, i32 117, i32 119, i32 125, i32 127> @@ -357,7 +357,7 @@ define <128 x i8> @vdeal_2b(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_2c: -; CHECK: [[REG2c:r[0-9]+]] = #44 +; CHECK: [[REG2c:r[0-9]+]] = #-20 ; CHECK: vdeal(v1,v0,[[REG2c]]) define <128 x i8> @vdeal_2c(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 32, i32 33, i32 34, i32 35, i32 40, i32 41, i32 42, i32 43, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27, i32 48, i32 49, i32 50, i32 51, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 72, i32 73, i32 74, i32 75, i32 96, i32 97, i32 98, i32 99, i32 104, i32 105, i32 106, i32 107, i32 80, i32 81, i32 82, i32 83, i32 88, i32 89, i32 90, i32 91, i32 112, i32 113, i32 114, i32 115, i32 120, i32 121, i32 122, i32 123, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 36, i32 37, i32 38, i32 39, i32 44, i32 45, i32 46, i32 47, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31, i32 52, i32 53, i32 54, i32 55, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 76, i32 77, i32 78, i32 79, i32 100, i32 101, i32 102, i32 103, i32 108, i32 109, i32 110, i32 111, i32 84, i32 85, i32 86, i32 87, i32 92, i32 93, i32 94, i32 95, i32 116, i32 117, i32 118, i32 119, i32 124, i32 125, i32 126, i32 127> @@ -365,7 +365,7 @@ define <128 x i8> @vdeal_2c(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_2d: -; CHECK: [[REG2d:r[0-9]+]] = #45 +; CHECK: [[REG2d:r[0-9]+]] = #-19 ; CHECK: vdeal(v1,v0,[[REG2d]]) define <128 x i8> @vdeal_2d(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 4, i32 2, i32 6, i32 8, i32 12, i32 10, i32 14, i32 32, i32 36, i32 34, i32 38, i32 40, i32 44, i32 42, i32 46, i32 16, i32 20, i32 18, i32 22, i32 24, i32 28, i32 26, i32 30, i32 48, i32 52, i32 50, i32 54, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 72, i32 76, i32 74, i32 78, i32 96, i32 100, i32 98, i32 102, i32 104, i32 108, i32 106, i32 110, i32 80, i32 84, i32 82, i32 86, i32 88, i32 92, i32 90, i32 94, i32 112, i32 116, i32 114, i32 118, i32 120, i32 124, i32 122, i32 126, i32 1, i32 5, i32 3, i32 7, i32 9, i32 13, i32 11, i32 15, i32 33, i32 37, i32 35, i32 39, i32 41, i32 45, i32 43, i32 47, i32 17, i32 21, i32 19, i32 23, i32 25, i32 29, i32 27, i32 31, i32 49, i32 53, i32 51, i32 55, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 73, i32 77, i32 75, i32 79, i32 97, i32 101, i32 99, i32 103, i32 105, i32 109, i32 107, i32 111, i32 81, i32 85, i32 83, i32 87, i32 89, i32 93, i32 91, i32 95, i32 113, i32 117, i32 115, i32 119, i32 121, i32 125, i32 123, i32 127> @@ -373,7 +373,7 @@ define <128 x i8> @vdeal_2d(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_2e: -; CHECK: [[REG2e:r[0-9]+]] = #46 +; CHECK: [[REG2e:r[0-9]+]] = #-18 ; CHECK: vdeal(v1,v0,[[REG2e]]) define <128 x i8> @vdeal_2e(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 32, i32 33, i32 36, i32 37, i32 40, i32 41, i32 44, i32 45, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29, i32 48, i32 49, i32 52, i32 53, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 72, i32 73, i32 76, i32 77, i32 96, i32 97, i32 100, i32 101, i32 104, i32 105, i32 108, i32 109, i32 80, i32 81, i32 84, i32 85, i32 88, i32 89, i32 92, i32 93, i32 112, i32 113, i32 116, i32 117, i32 120, i32 121, i32 124, i32 125, i32 2, i32 3, i32 6, i32 7, i32 10, i32 11, i32 14, i32 15, i32 34, i32 35, i32 38, i32 39, i32 42, i32 43, i32 46, i32 47, i32 18, i32 19, i32 22, i32 23, i32 26, i32 27, i32 30, i32 31, i32 50, i32 51, i32 54, i32 55, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 74, i32 75, i32 78, i32 79, i32 98, i32 99, i32 102, i32 103, i32 106, i32 107, i32 110, i32 111, i32 82, i32 83, i32 86, i32 87, i32 90, i32 91, i32 94, i32 95, i32 114, i32 115, i32 118, i32 119, i32 122, i32 123, i32 126, i32 127> @@ -381,7 +381,7 @@ define <128 x i8> @vdeal_2e(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_2f: -; CHECK: [[REG2f:r[0-9]+]] = #47 +; CHECK: [[REG2f:r[0-9]+]] = #-17 ; CHECK: vdeal(v1,v0,[[REG2f]]) define <128 x i8> @vdeal_2f(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127> @@ -389,7 +389,7 @@ define <128 x i8> @vdeal_2f(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_30: -; CHECK: [[REG30:r[0-9]+]] = #48 +; CHECK: [[REG30:r[0-9]+]] = #-16 ; CHECK: vdeal(v1,v0,[[REG30]]) define <128 x i8> @vdeal_30(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -397,7 +397,7 @@ define <128 x i8> @vdeal_30(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_31: -; CHECK: [[REG31:r[0-9]+]] = #49 +; CHECK: [[REG31:r[0-9]+]] = #-15 ; CHECK: vdeal(v1,v0,[[REG31]]) define <128 x i8> @vdeal_31(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 32, i32 48, i32 34, i32 50, i32 36, i32 52, i32 38, i32 54, i32 40, i32 56, i32 42, i32 58, i32 44, i32 60, i32 46, i32 62, i32 64, i32 80, i32 66, i32 82, i32 68, i32 84, i32 70, i32 86, i32 72, i32 88, i32 74, i32 90, i32 76, i32 92, i32 78, i32 94, i32 96, i32 112, i32 98, i32 114, i32 100, i32 116, i32 102, i32 118, i32 104, i32 120, i32 106, i32 122, i32 108, i32 124, i32 110, i32 126, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31, i32 33, i32 49, i32 35, i32 51, i32 37, i32 53, i32 39, i32 55, i32 41, i32 57, i32 43, i32 59, i32 45, i32 61, i32 47, i32 63, i32 65, i32 81, i32 67, i32 83, i32 69, i32 85, i32 71, i32 87, i32 73, i32 89, i32 75, i32 91, i32 77, i32 93, i32 79, i32 95, i32 97, i32 113, i32 99, i32 115, i32 101, i32 117, i32 103, i32 119, i32 105, i32 121, i32 107, i32 123, i32 109, i32 125, i32 111, i32 127> @@ -405,7 +405,7 @@ define <128 x i8> @vdeal_31(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_32: -; CHECK: [[REG32:r[0-9]+]] = #50 +; CHECK: [[REG32:r[0-9]+]] = #-14 ; CHECK: vdeal(v1,v0,[[REG32]]) define <128 x i8> @vdeal_32(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 16, i32 17, i32 4, i32 5, i32 20, i32 21, i32 8, i32 9, i32 24, i32 25, i32 12, i32 13, i32 28, i32 29, i32 32, i32 33, i32 48, i32 49, i32 36, i32 37, i32 52, i32 53, i32 40, i32 41, i32 56, i32 57, i32 44, i32 45, i32 60, i32 61, i32 64, i32 65, i32 80, i32 81, i32 68, i32 69, i32 84, i32 85, i32 72, i32 73, i32 88, i32 89, i32 76, i32 77, i32 92, i32 93, i32 96, i32 97, i32 112, i32 113, i32 100, i32 101, i32 116, i32 117, i32 104, i32 105, i32 120, i32 121, i32 108, i32 109, i32 124, i32 125, i32 2, i32 3, i32 18, i32 19, i32 6, i32 7, i32 22, i32 23, i32 10, i32 11, i32 26, i32 27, i32 14, i32 15, i32 30, i32 31, i32 34, i32 35, i32 50, i32 51, i32 38, i32 39, i32 54, i32 55, i32 42, i32 43, i32 58, i32 59, i32 46, i32 47, i32 62, i32 63, i32 66, i32 67, i32 82, i32 83, i32 70, i32 71, i32 86, i32 87, i32 74, i32 75, i32 90, i32 91, i32 78, i32 79, i32 94, i32 95, i32 98, i32 99, i32 114, i32 115, i32 102, i32 103, i32 118, i32 119, i32 106, i32 107, i32 122, i32 123, i32 110, i32 111, i32 126, i32 127> @@ -413,7 +413,7 @@ define <128 x i8> @vdeal_32(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_33: -; CHECK: [[REG33:r[0-9]+]] = #51 +; CHECK: [[REG33:r[0-9]+]] = #-13 ; CHECK: vdeal(v1,v0,[[REG33]]) define <128 x i8> @vdeal_33(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 16, i32 18, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30, i32 32, i32 34, i32 48, i32 50, i32 36, i32 38, i32 52, i32 54, i32 40, i32 42, i32 56, i32 58, i32 44, i32 46, i32 60, i32 62, i32 64, i32 66, i32 80, i32 82, i32 68, i32 70, i32 84, i32 86, i32 72, i32 74, i32 88, i32 90, i32 76, i32 78, i32 92, i32 94, i32 96, i32 98, i32 112, i32 114, i32 100, i32 102, i32 116, i32 118, i32 104, i32 106, i32 120, i32 122, i32 108, i32 110, i32 124, i32 126, i32 1, i32 3, i32 17, i32 19, i32 5, i32 7, i32 21, i32 23, i32 9, i32 11, i32 25, i32 27, i32 13, i32 15, i32 29, i32 31, i32 33, i32 35, i32 49, i32 51, i32 37, i32 39, i32 53, i32 55, i32 41, i32 43, i32 57, i32 59, i32 45, i32 47, i32 61, i32 63, i32 65, i32 67, i32 81, i32 83, i32 69, i32 71, i32 85, i32 87, i32 73, i32 75, i32 89, i32 91, i32 77, i32 79, i32 93, i32 95, i32 97, i32 99, i32 113, i32 115, i32 101, i32 103, i32 117, i32 119, i32 105, i32 107, i32 121, i32 123, i32 109, i32 111, i32 125, i32 127> @@ -421,7 +421,7 @@ define <128 x i8> @vdeal_33(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_34: -; CHECK: [[REG34:r[0-9]+]] = #52 +; CHECK: [[REG34:r[0-9]+]] = #-12 ; CHECK: vdeal(v1,v0,[[REG34]]) define <128 x i8> @vdeal_34(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 32, i32 33, i32 34, i32 35, i32 48, i32 49, i32 50, i32 51, i32 40, i32 41, i32 42, i32 43, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 80, i32 81, i32 82, i32 83, i32 72, i32 73, i32 74, i32 75, i32 88, i32 89, i32 90, i32 91, i32 96, i32 97, i32 98, i32 99, i32 112, i32 113, i32 114, i32 115, i32 104, i32 105, i32 106, i32 107, i32 120, i32 121, i32 122, i32 123, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31, i32 36, i32 37, i32 38, i32 39, i32 52, i32 53, i32 54, i32 55, i32 44, i32 45, i32 46, i32 47, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 84, i32 85, i32 86, i32 87, i32 76, i32 77, i32 78, i32 79, i32 92, i32 93, i32 94, i32 95, i32 100, i32 101, i32 102, i32 103, i32 116, i32 117, i32 118, i32 119, i32 108, i32 109, i32 110, i32 111, i32 124, i32 125, i32 126, i32 127> @@ -429,7 +429,7 @@ define <128 x i8> @vdeal_34(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_35: -; CHECK: [[REG35:r[0-9]+]] = #53 +; CHECK: [[REG35:r[0-9]+]] = #-11 ; CHECK: vdeal(v1,v0,[[REG35]]) define <128 x i8> @vdeal_35(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 4, i32 2, i32 6, i32 16, i32 20, i32 18, i32 22, i32 8, i32 12, i32 10, i32 14, i32 24, i32 28, i32 26, i32 30, i32 32, i32 36, i32 34, i32 38, i32 48, i32 52, i32 50, i32 54, i32 40, i32 44, i32 42, i32 46, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 80, i32 84, i32 82, i32 86, i32 72, i32 76, i32 74, i32 78, i32 88, i32 92, i32 90, i32 94, i32 96, i32 100, i32 98, i32 102, i32 112, i32 116, i32 114, i32 118, i32 104, i32 108, i32 106, i32 110, i32 120, i32 124, i32 122, i32 126, i32 1, i32 5, i32 3, i32 7, i32 17, i32 21, i32 19, i32 23, i32 9, i32 13, i32 11, i32 15, i32 25, i32 29, i32 27, i32 31, i32 33, i32 37, i32 35, i32 39, i32 49, i32 53, i32 51, i32 55, i32 41, i32 45, i32 43, i32 47, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 81, i32 85, i32 83, i32 87, i32 73, i32 77, i32 75, i32 79, i32 89, i32 93, i32 91, i32 95, i32 97, i32 101, i32 99, i32 103, i32 113, i32 117, i32 115, i32 119, i32 105, i32 109, i32 107, i32 111, i32 121, i32 125, i32 123, i32 127> @@ -437,7 +437,7 @@ define <128 x i8> @vdeal_35(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_36: -; CHECK: [[REG36:r[0-9]+]] = #54 +; CHECK: [[REG36:r[0-9]+]] = #-10 ; CHECK: vdeal(v1,v0,[[REG36]]) define <128 x i8> @vdeal_36(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 4, i32 5, i32 16, i32 17, i32 20, i32 21, i32 8, i32 9, i32 12, i32 13, i32 24, i32 25, i32 28, i32 29, i32 32, i32 33, i32 36, i32 37, i32 48, i32 49, i32 52, i32 53, i32 40, i32 41, i32 44, i32 45, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 80, i32 81, i32 84, i32 85, i32 72, i32 73, i32 76, i32 77, i32 88, i32 89, i32 92, i32 93, i32 96, i32 97, i32 100, i32 101, i32 112, i32 113, i32 116, i32 117, i32 104, i32 105, i32 108, i32 109, i32 120, i32 121, i32 124, i32 125, i32 2, i32 3, i32 6, i32 7, i32 18, i32 19, i32 22, i32 23, i32 10, i32 11, i32 14, i32 15, i32 26, i32 27, i32 30, i32 31, i32 34, i32 35, i32 38, i32 39, i32 50, i32 51, i32 54, i32 55, i32 42, i32 43, i32 46, i32 47, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 82, i32 83, i32 86, i32 87, i32 74, i32 75, i32 78, i32 79, i32 90, i32 91, i32 94, i32 95, i32 98, i32 99, i32 102, i32 103, i32 114, i32 115, i32 118, i32 119, i32 106, i32 107, i32 110, i32 111, i32 122, i32 123, i32 126, i32 127> @@ -445,7 +445,7 @@ define <128 x i8> @vdeal_36(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_37: -; CHECK: [[REG37:r[0-9]+]] = #55 +; CHECK: [[REG37:r[0-9]+]] = #-9 ; CHECK: vdeal(v1,v0,[[REG37]]) define <128 x i8> @vdeal_37(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 48, i32 50, i32 52, i32 54, i32 40, i32 42, i32 44, i32 46, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 80, i32 82, i32 84, i32 86, i32 72, i32 74, i32 76, i32 78, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 112, i32 114, i32 116, i32 118, i32 104, i32 106, i32 108, i32 110, i32 120, i32 122, i32 124, i32 126, i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 49, i32 51, i32 53, i32 55, i32 41, i32 43, i32 45, i32 47, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 81, i32 83, i32 85, i32 87, i32 73, i32 75, i32 77, i32 79, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 113, i32 115, i32 117, i32 119, i32 105, i32 107, i32 109, i32 111, i32 121, i32 123, i32 125, i32 127> @@ -453,7 +453,7 @@ define <128 x i8> @vdeal_37(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_38: -; CHECK: [[REG38:r[0-9]+]] = #56 +; CHECK: [[REG38:r[0-9]+]] = #-8 ; CHECK: vdeal(v1,v0,[[REG38]]) define <128 x i8> @vdeal_38(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -461,7 +461,7 @@ define <128 x i8> @vdeal_38(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_39: -; CHECK: [[REG39:r[0-9]+]] = #57 +; CHECK: [[REG39:r[0-9]+]] = #-7 ; CHECK: vdeal(v1,v0,[[REG39]]) define <128 x i8> @vdeal_39(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 16, i32 24, i32 18, i32 26, i32 20, i32 28, i32 22, i32 30, i32 32, i32 40, i32 34, i32 42, i32 36, i32 44, i32 38, i32 46, i32 48, i32 56, i32 50, i32 58, i32 52, i32 60, i32 54, i32 62, i32 64, i32 72, i32 66, i32 74, i32 68, i32 76, i32 70, i32 78, i32 80, i32 88, i32 82, i32 90, i32 84, i32 92, i32 86, i32 94, i32 96, i32 104, i32 98, i32 106, i32 100, i32 108, i32 102, i32 110, i32 112, i32 120, i32 114, i32 122, i32 116, i32 124, i32 118, i32 126, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15, i32 17, i32 25, i32 19, i32 27, i32 21, i32 29, i32 23, i32 31, i32 33, i32 41, i32 35, i32 43, i32 37, i32 45, i32 39, i32 47, i32 49, i32 57, i32 51, i32 59, i32 53, i32 61, i32 55, i32 63, i32 65, i32 73, i32 67, i32 75, i32 69, i32 77, i32 71, i32 79, i32 81, i32 89, i32 83, i32 91, i32 85, i32 93, i32 87, i32 95, i32 97, i32 105, i32 99, i32 107, i32 101, i32 109, i32 103, i32 111, i32 113, i32 121, i32 115, i32 123, i32 117, i32 125, i32 119, i32 127> @@ -469,7 +469,7 @@ define <128 x i8> @vdeal_39(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_3a: -; CHECK: [[REG3a:r[0-9]+]] = #58 +; CHECK: [[REG3a:r[0-9]+]] = #-6 ; CHECK: vdeal(v1,v0,[[REG3a]]) define <128 x i8> @vdeal_3a(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13, i32 16, i32 17, i32 24, i32 25, i32 20, i32 21, i32 28, i32 29, i32 32, i32 33, i32 40, i32 41, i32 36, i32 37, i32 44, i32 45, i32 48, i32 49, i32 56, i32 57, i32 52, i32 53, i32 60, i32 61, i32 64, i32 65, i32 72, i32 73, i32 68, i32 69, i32 76, i32 77, i32 80, i32 81, i32 88, i32 89, i32 84, i32 85, i32 92, i32 93, i32 96, i32 97, i32 104, i32 105, i32 100, i32 101, i32 108, i32 109, i32 112, i32 113, i32 120, i32 121, i32 116, i32 117, i32 124, i32 125, i32 2, i32 3, i32 10, i32 11, i32 6, i32 7, i32 14, i32 15, i32 18, i32 19, i32 26, i32 27, i32 22, i32 23, i32 30, i32 31, i32 34, i32 35, i32 42, i32 43, i32 38, i32 39, i32 46, i32 47, i32 50, i32 51, i32 58, i32 59, i32 54, i32 55, i32 62, i32 63, i32 66, i32 67, i32 74, i32 75, i32 70, i32 71, i32 78, i32 79, i32 82, i32 83, i32 90, i32 91, i32 86, i32 87, i32 94, i32 95, i32 98, i32 99, i32 106, i32 107, i32 102, i32 103, i32 110, i32 111, i32 114, i32 115, i32 122, i32 123, i32 118, i32 119, i32 126, i32 127> @@ -477,7 +477,7 @@ define <128 x i8> @vdeal_3a(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_3b: -; CHECK: [[REG3b:r[0-9]+]] = #59 +; CHECK: [[REG3b:r[0-9]+]] = #-5 ; CHECK: vdeal(v1,v0,[[REG3b]]) define <128 x i8> @vdeal_3b(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14, i32 16, i32 18, i32 24, i32 26, i32 20, i32 22, i32 28, i32 30, i32 32, i32 34, i32 40, i32 42, i32 36, i32 38, i32 44, i32 46, i32 48, i32 50, i32 56, i32 58, i32 52, i32 54, i32 60, i32 62, i32 64, i32 66, i32 72, i32 74, i32 68, i32 70, i32 76, i32 78, i32 80, i32 82, i32 88, i32 90, i32 84, i32 86, i32 92, i32 94, i32 96, i32 98, i32 104, i32 106, i32 100, i32 102, i32 108, i32 110, i32 112, i32 114, i32 120, i32 122, i32 116, i32 118, i32 124, i32 126, i32 1, i32 3, i32 9, i32 11, i32 5, i32 7, i32 13, i32 15, i32 17, i32 19, i32 25, i32 27, i32 21, i32 23, i32 29, i32 31, i32 33, i32 35, i32 41, i32 43, i32 37, i32 39, i32 45, i32 47, i32 49, i32 51, i32 57, i32 59, i32 53, i32 55, i32 61, i32 63, i32 65, i32 67, i32 73, i32 75, i32 69, i32 71, i32 77, i32 79, i32 81, i32 83, i32 89, i32 91, i32 85, i32 87, i32 93, i32 95, i32 97, i32 99, i32 105, i32 107, i32 101, i32 103, i32 109, i32 111, i32 113, i32 115, i32 121, i32 123, i32 117, i32 119, i32 125, i32 127> @@ -485,7 +485,7 @@ define <128 x i8> @vdeal_3b(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_3c: -; CHECK: [[REG3c:r[0-9]+]] = #60 +; CHECK: [[REG3c:r[0-9]+]] = #-4 ; CHECK: vdeal(v1,v0,[[REG3c]]) define <128 x i8> @vdeal_3c(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27, i32 32, i32 33, i32 34, i32 35, i32 40, i32 41, i32 42, i32 43, i32 48, i32 49, i32 50, i32 51, i32 56, i32 57, i32 58, i32 59, i32 64, i32 65, i32 66, i32 67, i32 72, i32 73, i32 74, i32 75, i32 80, i32 81, i32 82, i32 83, i32 88, i32 89, i32 90, i32 91, i32 96, i32 97, i32 98, i32 99, i32 104, i32 105, i32 106, i32 107, i32 112, i32 113, i32 114, i32 115, i32 120, i32 121, i32 122, i32 123, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 20, i32 21, i32 22, i32 23, i32 28, i32 29, i32 30, i32 31, i32 36, i32 37, i32 38, i32 39, i32 44, i32 45, i32 46, i32 47, i32 52, i32 53, i32 54, i32 55, i32 60, i32 61, i32 62, i32 63, i32 68, i32 69, i32 70, i32 71, i32 76, i32 77, i32 78, i32 79, i32 84, i32 85, i32 86, i32 87, i32 92, i32 93, i32 94, i32 95, i32 100, i32 101, i32 102, i32 103, i32 108, i32 109, i32 110, i32 111, i32 116, i32 117, i32 118, i32 119, i32 124, i32 125, i32 126, i32 127> @@ -493,7 +493,7 @@ define <128 x i8> @vdeal_3c(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_3d: -; CHECK: [[REG3d:r[0-9]+]] = #61 +; CHECK: [[REG3d:r[0-9]+]] = #-3 ; CHECK: vdeal(v1,v0,[[REG3d]]) define <128 x i8> @vdeal_3d(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 4, i32 2, i32 6, i32 8, i32 12, i32 10, i32 14, i32 16, i32 20, i32 18, i32 22, i32 24, i32 28, i32 26, i32 30, i32 32, i32 36, i32 34, i32 38, i32 40, i32 44, i32 42, i32 46, i32 48, i32 52, i32 50, i32 54, i32 56, i32 60, i32 58, i32 62, i32 64, i32 68, i32 66, i32 70, i32 72, i32 76, i32 74, i32 78, i32 80, i32 84, i32 82, i32 86, i32 88, i32 92, i32 90, i32 94, i32 96, i32 100, i32 98, i32 102, i32 104, i32 108, i32 106, i32 110, i32 112, i32 116, i32 114, i32 118, i32 120, i32 124, i32 122, i32 126, i32 1, i32 5, i32 3, i32 7, i32 9, i32 13, i32 11, i32 15, i32 17, i32 21, i32 19, i32 23, i32 25, i32 29, i32 27, i32 31, i32 33, i32 37, i32 35, i32 39, i32 41, i32 45, i32 43, i32 47, i32 49, i32 53, i32 51, i32 55, i32 57, i32 61, i32 59, i32 63, i32 65, i32 69, i32 67, i32 71, i32 73, i32 77, i32 75, i32 79, i32 81, i32 85, i32 83, i32 87, i32 89, i32 93, i32 91, i32 95, i32 97, i32 101, i32 99, i32 103, i32 105, i32 109, i32 107, i32 111, i32 113, i32 117, i32 115, i32 119, i32 121, i32 125, i32 123, i32 127> @@ -501,7 +501,7 @@ define <128 x i8> @vdeal_3d(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_3e: -; CHECK: [[REG3e:r[0-9]+]] = #62 +; CHECK: [[REG3e:r[0-9]+]] = #-2 ; CHECK: vdeal(v1,v0,[[REG3e]]) define <128 x i8> @vdeal_3e(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29, i32 32, i32 33, i32 36, i32 37, i32 40, i32 41, i32 44, i32 45, i32 48, i32 49, i32 52, i32 53, i32 56, i32 57, i32 60, i32 61, i32 64, i32 65, i32 68, i32 69, i32 72, i32 73, i32 76, i32 77, i32 80, i32 81, i32 84, i32 85, i32 88, i32 89, i32 92, i32 93, i32 96, i32 97, i32 100, i32 101, i32 104, i32 105, i32 108, i32 109, i32 112, i32 113, i32 116, i32 117, i32 120, i32 121, i32 124, i32 125, i32 2, i32 3, i32 6, i32 7, i32 10, i32 11, i32 14, i32 15, i32 18, i32 19, i32 22, i32 23, i32 26, i32 27, i32 30, i32 31, i32 34, i32 35, i32 38, i32 39, i32 42, i32 43, i32 46, i32 47, i32 50, i32 51, i32 54, i32 55, i32 58, i32 59, i32 62, i32 63, i32 66, i32 67, i32 70, i32 71, i32 74, i32 75, i32 78, i32 79, i32 82, i32 83, i32 86, i32 87, i32 90, i32 91, i32 94, i32 95, i32 98, i32 99, i32 102, i32 103, i32 106, i32 107, i32 110, i32 111, i32 114, i32 115, i32 118, i32 119, i32 122, i32 123, i32 126, i32 127> @@ -509,7 +509,7 @@ define <128 x i8> @vdeal_3e(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vdeal_3f: -; CHECK: [[REG3f:r[0-9]+]] = #63 +; CHECK: [[REG3f:r[0-9]+]] = #-1 ; CHECK: vdeal(v1,v0,[[REG3f]]) define <128 x i8> @vdeal_3f(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62, i32 64, i32 66, i32 68, i32 70, i32 72, i32 74, i32 76, i32 78, i32 80, i32 82, i32 84, i32 86, i32 88, i32 90, i32 92, i32 94, i32 96, i32 98, i32 100, i32 102, i32 104, i32 106, i32 108, i32 110, i32 112, i32 114, i32 116, i32 118, i32 120, i32 122, i32 124, i32 126, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 33, i32 35, i32 37, i32 39, i32 41, i32 43, i32 45, i32 47, i32 49, i32 51, i32 53, i32 55, i32 57, i32 59, i32 61, i32 63, i32 65, i32 67, i32 69, i32 71, i32 73, i32 75, i32 77, i32 79, i32 81, i32 83, i32 85, i32 87, i32 89, i32 91, i32 93, i32 95, i32 97, i32 99, i32 101, i32 103, i32 105, i32 107, i32 109, i32 111, i32 113, i32 115, i32 117, i32 119, i32 121, i32 123, i32 125, i32 127> diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-shuff-single.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-shuff-single.ll index 97dc25931dba5..e4eb7bed6fabf 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-shuff-single.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-shuff-single.ll @@ -6,9 +6,9 @@ ; was missing). ; CHECK-LABEL: f0: -; CHECK-DAG: r[[R0:[0-9]+]] = #66 +; CHECK-DAG: r[[R0:[0-9]+]] = #-62 ; CHECK-DAG: r[[R1:[0-9]+]] = #40 -; CHECK-DAG: r[[R2:[0-9]+]] = #85 +; CHECK-DAG: r[[R2:[0-9]+]] = #-43 ; CHECK: v1:0 = vdeal(v{{[0-9]+}},v0,r[[R0]]) ; CHECK: v1:0 = vshuff(v1,v0,r[[R1]]) ; CHECK: v1:0 = vshuff(v1,v0,r[[R2]]) diff --git a/llvm/test/CodeGen/Hexagon/autohvx/isel-vpackew.ll b/llvm/test/CodeGen/Hexagon/autohvx/isel-vpackew.ll index 716e0367efe6f..67d9e19b8975e 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/isel-vpackew.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/isel-vpackew.ll @@ -5,7 +5,7 @@ define void @f0(ptr %a0, ptr %a1, ptr %a2) #0 { ; CHECK-LABEL: f0: ; CHECK: // %bb.0: // %b0 ; CHECK-NEXT: { -; CHECK-NEXT: r7 = #124 +; CHECK-NEXT: r7 = #-4 ; CHECK-NEXT: v0 = vmem(r0+#0) ; CHECK-NEXT: } ; CHECK-NEXT: { diff --git a/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll b/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll index ef0173880f024..b5ad0ab703146 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/mulh.ll @@ -10,7 +10,7 @@ define <64 x i16> @mulhs16(<64 x i16> %a0, <64 x i16> %a1) #0 { ; V60-NEXT: v1:0.w = vmpy(v1.h,v0.h) ; V60-NEXT: } ; V60-NEXT: { -; V60-NEXT: r7 = #124 +; V60-NEXT: r7 = #-4 ; V60-NEXT: } ; V60-NEXT: { ; V60-NEXT: v1:0 = vshuff(v1,v0,r7) @@ -28,7 +28,7 @@ define <64 x i16> @mulhs16(<64 x i16> %a0, <64 x i16> %a1) #0 { ; V65-NEXT: v1:0.w = vmpy(v1.h,v0.h) ; V65-NEXT: } ; V65-NEXT: { -; V65-NEXT: r7 = #124 +; V65-NEXT: r7 = #-4 ; V65-NEXT: } ; V65-NEXT: { ; V65-NEXT: v1:0 = vshuff(v1,v0,r7) @@ -46,7 +46,7 @@ define <64 x i16> @mulhs16(<64 x i16> %a0, <64 x i16> %a1) #0 { ; V69-NEXT: v1:0.w = vmpy(v1.h,v0.h) ; V69-NEXT: } ; V69-NEXT: { -; V69-NEXT: r7 = #124 +; V69-NEXT: r7 = #-4 ; V69-NEXT: } ; V69-NEXT: { ; V69-NEXT: v1:0 = vshuff(v1,v0,r7) @@ -72,7 +72,7 @@ define <64 x i16> @mulhu16(<64 x i16> %a0, <64 x i16> %a1) #0 { ; V60-NEXT: v1:0.uw = vmpy(v1.uh,v0.uh) ; V60-NEXT: } ; V60-NEXT: { -; V60-NEXT: r7 = #124 +; V60-NEXT: r7 = #-4 ; V60-NEXT: } ; V60-NEXT: { ; V60-NEXT: v1:0 = vshuff(v1,v0,r7) @@ -90,7 +90,7 @@ define <64 x i16> @mulhu16(<64 x i16> %a0, <64 x i16> %a1) #0 { ; V65-NEXT: v1:0.uw = vmpy(v1.uh,v0.uh) ; V65-NEXT: } ; V65-NEXT: { -; V65-NEXT: r7 = #124 +; V65-NEXT: r7 = #-4 ; V65-NEXT: } ; V65-NEXT: { ; V65-NEXT: v1:0 = vshuff(v1,v0,r7) diff --git a/llvm/test/CodeGen/Hexagon/autohvx/qmul.ll b/llvm/test/CodeGen/Hexagon/autohvx/qmul.ll index 872c93fa7cb23..0021a626b5fcd 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/qmul.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/qmul.ll @@ -75,7 +75,7 @@ define void @f2(ptr %a0, ptr %a1, ptr %a2) #0 { ; CHECK-NEXT: v0 = vmem(r1+#0) ; CHECK-NEXT: } ; CHECK-NEXT: { -; CHECK-NEXT: r7 = #124 +; CHECK-NEXT: r7 = #-4 ; CHECK-NEXT: } ; CHECK-NEXT: { ; CHECK-NEXT: r3 = #15 diff --git a/llvm/test/CodeGen/Hexagon/autohvx/shuff-128b.ll b/llvm/test/CodeGen/Hexagon/autohvx/shuff-128b.ll index 7b815496bcb56..607118d76f043 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/shuff-128b.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/shuff-128b.ll @@ -515,7 +515,7 @@ define <256 x i8> @vshuff_3f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_40: -; CHECK: [[REG40:r[0-9]+]] = #64 +; CHECK: [[REG40:r[0-9]+]] = #-64 ; CHECK: vshuff(v1,v0,[[REG40]]) define <256 x i8> @vshuff_40(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -523,7 +523,7 @@ define <256 x i8> @vshuff_40(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_41: -; CHECK: [[REG41:r[0-9]+]] = #65 +; CHECK: [[REG41:r[0-9]+]] = #-63 ; CHECK: vshuff(v1,v0,[[REG41]]) define <256 x i8> @vshuff_41(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -531,7 +531,7 @@ define <256 x i8> @vshuff_41(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_42: -; CHECK: [[REG42:r[0-9]+]] = #66 +; CHECK: [[REG42:r[0-9]+]] = #-62 ; CHECK: vshuff(v1,v0,[[REG42]]) define <256 x i8> @vshuff_42(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -539,7 +539,7 @@ define <256 x i8> @vshuff_42(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_43: -; CHECK: [[REG43:r[0-9]+]] = #67 +; CHECK: [[REG43:r[0-9]+]] = #-61 ; CHECK: vshuff(v1,v0,[[REG43]]) define <256 x i8> @vshuff_43(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -547,7 +547,7 @@ define <256 x i8> @vshuff_43(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_44: -; CHECK: [[REG44:r[0-9]+]] = #68 +; CHECK: [[REG44:r[0-9]+]] = #-60 ; CHECK: vshuff(v1,v0,[[REG44]]) define <256 x i8> @vshuff_44(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -555,7 +555,7 @@ define <256 x i8> @vshuff_44(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_45: -; CHECK: [[REG45:r[0-9]+]] = #69 +; CHECK: [[REG45:r[0-9]+]] = #-59 ; CHECK: vshuff(v1,v0,[[REG45]]) define <256 x i8> @vshuff_45(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -563,7 +563,7 @@ define <256 x i8> @vshuff_45(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_46: -; CHECK: [[REG46:r[0-9]+]] = #70 +; CHECK: [[REG46:r[0-9]+]] = #-58 ; CHECK: vshuff(v1,v0,[[REG46]]) define <256 x i8> @vshuff_46(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -571,7 +571,7 @@ define <256 x i8> @vshuff_46(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_47: -; CHECK: [[REG47:r[0-9]+]] = #71 +; CHECK: [[REG47:r[0-9]+]] = #-57 ; CHECK: vshuff(v1,v0,[[REG47]]) define <256 x i8> @vshuff_47(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -579,7 +579,7 @@ define <256 x i8> @vshuff_47(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_48: -; CHECK: [[REG48:r[0-9]+]] = #72 +; CHECK: [[REG48:r[0-9]+]] = #-56 ; CHECK: vshuff(v1,v0,[[REG48]]) define <256 x i8> @vshuff_48(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -587,7 +587,7 @@ define <256 x i8> @vshuff_48(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_49: -; CHECK: [[REG49:r[0-9]+]] = #73 +; CHECK: [[REG49:r[0-9]+]] = #-55 ; CHECK: vshuff(v1,v0,[[REG49]]) define <256 x i8> @vshuff_49(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -595,7 +595,7 @@ define <256 x i8> @vshuff_49(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_4a: -; CHECK: [[REG4a:r[0-9]+]] = #74 +; CHECK: [[REG4a:r[0-9]+]] = #-54 ; CHECK: vshuff(v1,v0,[[REG4a]]) define <256 x i8> @vshuff_4a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -603,7 +603,7 @@ define <256 x i8> @vshuff_4a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_4b: -; CHECK: [[REG4b:r[0-9]+]] = #75 +; CHECK: [[REG4b:r[0-9]+]] = #-53 ; CHECK: vshuff(v1,v0,[[REG4b]]) define <256 x i8> @vshuff_4b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -611,7 +611,7 @@ define <256 x i8> @vshuff_4b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_4c: -; CHECK: [[REG4c:r[0-9]+]] = #76 +; CHECK: [[REG4c:r[0-9]+]] = #-52 ; CHECK: vshuff(v1,v0,[[REG4c]]) define <256 x i8> @vshuff_4c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -619,7 +619,7 @@ define <256 x i8> @vshuff_4c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_4d: -; CHECK: [[REG4d:r[0-9]+]] = #77 +; CHECK: [[REG4d:r[0-9]+]] = #-51 ; CHECK: vshuff(v1,v0,[[REG4d]]) define <256 x i8> @vshuff_4d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -627,7 +627,7 @@ define <256 x i8> @vshuff_4d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_4e: -; CHECK: [[REG4e:r[0-9]+]] = #78 +; CHECK: [[REG4e:r[0-9]+]] = #-50 ; CHECK: vshuff(v1,v0,[[REG4e]]) define <256 x i8> @vshuff_4e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -635,7 +635,7 @@ define <256 x i8> @vshuff_4e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_4f: -; CHECK: [[REG4f:r[0-9]+]] = #79 +; CHECK: [[REG4f:r[0-9]+]] = #-49 ; CHECK: vshuff(v1,v0,[[REG4f]]) define <256 x i8> @vshuff_4f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -643,7 +643,7 @@ define <256 x i8> @vshuff_4f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_50: -; CHECK: [[REG50:r[0-9]+]] = #80 +; CHECK: [[REG50:r[0-9]+]] = #-48 ; CHECK: vshuff(v1,v0,[[REG50]]) define <256 x i8> @vshuff_50(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -651,7 +651,7 @@ define <256 x i8> @vshuff_50(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_51: -; CHECK: [[REG51:r[0-9]+]] = #81 +; CHECK: [[REG51:r[0-9]+]] = #-47 ; CHECK: vshuff(v1,v0,[[REG51]]) define <256 x i8> @vshuff_51(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -659,7 +659,7 @@ define <256 x i8> @vshuff_51(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_52: -; CHECK: [[REG52:r[0-9]+]] = #82 +; CHECK: [[REG52:r[0-9]+]] = #-46 ; CHECK: vshuff(v1,v0,[[REG52]]) define <256 x i8> @vshuff_52(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -667,7 +667,7 @@ define <256 x i8> @vshuff_52(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_53: -; CHECK: [[REG53:r[0-9]+]] = #83 +; CHECK: [[REG53:r[0-9]+]] = #-45 ; CHECK: vshuff(v1,v0,[[REG53]]) define <256 x i8> @vshuff_53(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -675,7 +675,7 @@ define <256 x i8> @vshuff_53(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_54: -; CHECK: [[REG54:r[0-9]+]] = #84 +; CHECK: [[REG54:r[0-9]+]] = #-44 ; CHECK: vshuff(v1,v0,[[REG54]]) define <256 x i8> @vshuff_54(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -683,7 +683,7 @@ define <256 x i8> @vshuff_54(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_55: -; CHECK: [[REG55:r[0-9]+]] = #85 +; CHECK: [[REG55:r[0-9]+]] = #-43 ; CHECK: vshuff(v1,v0,[[REG55]]) define <256 x i8> @vshuff_55(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -691,7 +691,7 @@ define <256 x i8> @vshuff_55(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_56: -; CHECK: [[REG56:r[0-9]+]] = #86 +; CHECK: [[REG56:r[0-9]+]] = #-42 ; CHECK: vshuff(v1,v0,[[REG56]]) define <256 x i8> @vshuff_56(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -699,7 +699,7 @@ define <256 x i8> @vshuff_56(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_57: -; CHECK: [[REG57:r[0-9]+]] = #87 +; CHECK: [[REG57:r[0-9]+]] = #-41 ; CHECK: vshuff(v1,v0,[[REG57]]) define <256 x i8> @vshuff_57(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -707,7 +707,7 @@ define <256 x i8> @vshuff_57(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_58: -; CHECK: [[REG58:r[0-9]+]] = #88 +; CHECK: [[REG58:r[0-9]+]] = #-40 ; CHECK: vshuff(v1,v0,[[REG58]]) define <256 x i8> @vshuff_58(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -715,7 +715,7 @@ define <256 x i8> @vshuff_58(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_59: -; CHECK: [[REG59:r[0-9]+]] = #89 +; CHECK: [[REG59:r[0-9]+]] = #-39 ; CHECK: vshuff(v1,v0,[[REG59]]) define <256 x i8> @vshuff_59(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -723,7 +723,7 @@ define <256 x i8> @vshuff_59(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_5a: -; CHECK: [[REG5a:r[0-9]+]] = #90 +; CHECK: [[REG5a:r[0-9]+]] = #-38 ; CHECK: vshuff(v1,v0,[[REG5a]]) define <256 x i8> @vshuff_5a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -731,7 +731,7 @@ define <256 x i8> @vshuff_5a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_5b: -; CHECK: [[REG5b:r[0-9]+]] = #91 +; CHECK: [[REG5b:r[0-9]+]] = #-37 ; CHECK: vshuff(v1,v0,[[REG5b]]) define <256 x i8> @vshuff_5b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -739,7 +739,7 @@ define <256 x i8> @vshuff_5b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_5c: -; CHECK: [[REG5c:r[0-9]+]] = #92 +; CHECK: [[REG5c:r[0-9]+]] = #-36 ; CHECK: vshuff(v1,v0,[[REG5c]]) define <256 x i8> @vshuff_5c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -747,7 +747,7 @@ define <256 x i8> @vshuff_5c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_5d: -; CHECK: [[REG5d:r[0-9]+]] = #93 +; CHECK: [[REG5d:r[0-9]+]] = #-35 ; CHECK: vshuff(v1,v0,[[REG5d]]) define <256 x i8> @vshuff_5d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -755,7 +755,7 @@ define <256 x i8> @vshuff_5d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_5e: -; CHECK: [[REG5e:r[0-9]+]] = #94 +; CHECK: [[REG5e:r[0-9]+]] = #-34 ; CHECK: vshuff(v1,v0,[[REG5e]]) define <256 x i8> @vshuff_5e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -763,7 +763,7 @@ define <256 x i8> @vshuff_5e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_5f: -; CHECK: [[REG5f:r[0-9]+]] = #95 +; CHECK: [[REG5f:r[0-9]+]] = #-33 ; CHECK: vshuff(v1,v0,[[REG5f]]) define <256 x i8> @vshuff_5f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -771,7 +771,7 @@ define <256 x i8> @vshuff_5f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_60: -; CHECK: [[REG60:r[0-9]+]] = #96 +; CHECK: [[REG60:r[0-9]+]] = #-32 ; CHECK: vshuff(v1,v0,[[REG60]]) define <256 x i8> @vshuff_60(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -779,7 +779,7 @@ define <256 x i8> @vshuff_60(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_61: -; CHECK: [[REG61:r[0-9]+]] = #97 +; CHECK: [[REG61:r[0-9]+]] = #-31 ; CHECK: vshuff(v1,v0,[[REG61]]) define <256 x i8> @vshuff_61(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -787,7 +787,7 @@ define <256 x i8> @vshuff_61(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_62: -; CHECK: [[REG62:r[0-9]+]] = #98 +; CHECK: [[REG62:r[0-9]+]] = #-30 ; CHECK: vshuff(v1,v0,[[REG62]]) define <256 x i8> @vshuff_62(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -795,7 +795,7 @@ define <256 x i8> @vshuff_62(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_63: -; CHECK: [[REG63:r[0-9]+]] = #99 +; CHECK: [[REG63:r[0-9]+]] = #-29 ; CHECK: vshuff(v1,v0,[[REG63]]) define <256 x i8> @vshuff_63(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -803,7 +803,7 @@ define <256 x i8> @vshuff_63(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_64: -; CHECK: [[REG64:r[0-9]+]] = #100 +; CHECK: [[REG64:r[0-9]+]] = #-28 ; CHECK: vshuff(v1,v0,[[REG64]]) define <256 x i8> @vshuff_64(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -811,7 +811,7 @@ define <256 x i8> @vshuff_64(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_65: -; CHECK: [[REG65:r[0-9]+]] = #101 +; CHECK: [[REG65:r[0-9]+]] = #-27 ; CHECK: vshuff(v1,v0,[[REG65]]) define <256 x i8> @vshuff_65(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -819,7 +819,7 @@ define <256 x i8> @vshuff_65(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_66: -; CHECK: [[REG66:r[0-9]+]] = #102 +; CHECK: [[REG66:r[0-9]+]] = #-26 ; CHECK: vshuff(v1,v0,[[REG66]]) define <256 x i8> @vshuff_66(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -827,7 +827,7 @@ define <256 x i8> @vshuff_66(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_67: -; CHECK: [[REG67:r[0-9]+]] = #103 +; CHECK: [[REG67:r[0-9]+]] = #-25 ; CHECK: vshuff(v1,v0,[[REG67]]) define <256 x i8> @vshuff_67(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -835,7 +835,7 @@ define <256 x i8> @vshuff_67(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_68: -; CHECK: [[REG68:r[0-9]+]] = #104 +; CHECK: [[REG68:r[0-9]+]] = #-24 ; CHECK: vshuff(v1,v0,[[REG68]]) define <256 x i8> @vshuff_68(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -843,7 +843,7 @@ define <256 x i8> @vshuff_68(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_69: -; CHECK: [[REG69:r[0-9]+]] = #105 +; CHECK: [[REG69:r[0-9]+]] = #-23 ; CHECK: vshuff(v1,v0,[[REG69]]) define <256 x i8> @vshuff_69(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -851,7 +851,7 @@ define <256 x i8> @vshuff_69(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_6a: -; CHECK: [[REG6a:r[0-9]+]] = #106 +; CHECK: [[REG6a:r[0-9]+]] = #-22 ; CHECK: vshuff(v1,v0,[[REG6a]]) define <256 x i8> @vshuff_6a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -859,7 +859,7 @@ define <256 x i8> @vshuff_6a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_6b: -; CHECK: [[REG6b:r[0-9]+]] = #107 +; CHECK: [[REG6b:r[0-9]+]] = #-21 ; CHECK: vshuff(v1,v0,[[REG6b]]) define <256 x i8> @vshuff_6b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -867,7 +867,7 @@ define <256 x i8> @vshuff_6b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_6c: -; CHECK: [[REG6c:r[0-9]+]] = #108 +; CHECK: [[REG6c:r[0-9]+]] = #-20 ; CHECK: vshuff(v1,v0,[[REG6c]]) define <256 x i8> @vshuff_6c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -875,7 +875,7 @@ define <256 x i8> @vshuff_6c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_6d: -; CHECK: [[REG6d:r[0-9]+]] = #109 +; CHECK: [[REG6d:r[0-9]+]] = #-19 ; CHECK: vshuff(v1,v0,[[REG6d]]) define <256 x i8> @vshuff_6d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -883,7 +883,7 @@ define <256 x i8> @vshuff_6d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_6e: -; CHECK: [[REG6e:r[0-9]+]] = #110 +; CHECK: [[REG6e:r[0-9]+]] = #-18 ; CHECK: vshuff(v1,v0,[[REG6e]]) define <256 x i8> @vshuff_6e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -891,7 +891,7 @@ define <256 x i8> @vshuff_6e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_6f: -; CHECK: [[REG6f:r[0-9]+]] = #111 +; CHECK: [[REG6f:r[0-9]+]] = #-17 ; CHECK: vshuff(v1,v0,[[REG6f]]) define <256 x i8> @vshuff_6f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -899,7 +899,7 @@ define <256 x i8> @vshuff_6f(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_70: -; CHECK: [[REG70:r[0-9]+]] = #112 +; CHECK: [[REG70:r[0-9]+]] = #-16 ; CHECK: vshuff(v1,v0,[[REG70]]) define <256 x i8> @vshuff_70(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -907,7 +907,7 @@ define <256 x i8> @vshuff_70(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_71: -; CHECK: [[REG71:r[0-9]+]] = #113 +; CHECK: [[REG71:r[0-9]+]] = #-15 ; CHECK: vshuff(v1,v0,[[REG71]]) define <256 x i8> @vshuff_71(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -915,7 +915,7 @@ define <256 x i8> @vshuff_71(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_72: -; CHECK: [[REG72:r[0-9]+]] = #114 +; CHECK: [[REG72:r[0-9]+]] = #-14 ; CHECK: vshuff(v1,v0,[[REG72]]) define <256 x i8> @vshuff_72(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -923,7 +923,7 @@ define <256 x i8> @vshuff_72(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_73: -; CHECK: [[REG73:r[0-9]+]] = #115 +; CHECK: [[REG73:r[0-9]+]] = #-13 ; CHECK: vshuff(v1,v0,[[REG73]]) define <256 x i8> @vshuff_73(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -931,7 +931,7 @@ define <256 x i8> @vshuff_73(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_74: -; CHECK: [[REG74:r[0-9]+]] = #116 +; CHECK: [[REG74:r[0-9]+]] = #-12 ; CHECK: vshuff(v1,v0,[[REG74]]) define <256 x i8> @vshuff_74(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -939,7 +939,7 @@ define <256 x i8> @vshuff_74(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_75: -; CHECK: [[REG75:r[0-9]+]] = #117 +; CHECK: [[REG75:r[0-9]+]] = #-11 ; CHECK: vshuff(v1,v0,[[REG75]]) define <256 x i8> @vshuff_75(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -947,7 +947,7 @@ define <256 x i8> @vshuff_75(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_76: -; CHECK: [[REG76:r[0-9]+]] = #118 +; CHECK: [[REG76:r[0-9]+]] = #-10 ; CHECK: vshuff(v1,v0,[[REG76]]) define <256 x i8> @vshuff_76(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -955,7 +955,7 @@ define <256 x i8> @vshuff_76(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_77: -; CHECK: [[REG77:r[0-9]+]] = #119 +; CHECK: [[REG77:r[0-9]+]] = #-9 ; CHECK: vshuff(v1,v0,[[REG77]]) define <256 x i8> @vshuff_77(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> @@ -963,7 +963,7 @@ define <256 x i8> @vshuff_77(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_78: -; CHECK: [[REG78:r[0-9]+]] = #120 +; CHECK: [[REG78:r[0-9]+]] = #-8 ; CHECK: vshuff(v1,v0,[[REG78]]) define <256 x i8> @vshuff_78(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255> @@ -971,7 +971,7 @@ define <256 x i8> @vshuff_78(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_79: -; CHECK: [[REG79:r[0-9]+]] = #121 +; CHECK: [[REG79:r[0-9]+]] = #-7 ; CHECK: vshuff(v1,v0,[[REG79]]) define <256 x i8> @vshuff_79(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 4, i32 132, i32 6, i32 134, i32 1, i32 129, i32 3, i32 131, i32 5, i32 133, i32 7, i32 135, i32 8, i32 136, i32 10, i32 138, i32 12, i32 140, i32 14, i32 142, i32 9, i32 137, i32 11, i32 139, i32 13, i32 141, i32 15, i32 143, i32 16, i32 144, i32 18, i32 146, i32 20, i32 148, i32 22, i32 150, i32 17, i32 145, i32 19, i32 147, i32 21, i32 149, i32 23, i32 151, i32 24, i32 152, i32 26, i32 154, i32 28, i32 156, i32 30, i32 158, i32 25, i32 153, i32 27, i32 155, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 36, i32 164, i32 38, i32 166, i32 33, i32 161, i32 35, i32 163, i32 37, i32 165, i32 39, i32 167, i32 40, i32 168, i32 42, i32 170, i32 44, i32 172, i32 46, i32 174, i32 41, i32 169, i32 43, i32 171, i32 45, i32 173, i32 47, i32 175, i32 48, i32 176, i32 50, i32 178, i32 52, i32 180, i32 54, i32 182, i32 49, i32 177, i32 51, i32 179, i32 53, i32 181, i32 55, i32 183, i32 56, i32 184, i32 58, i32 186, i32 60, i32 188, i32 62, i32 190, i32 57, i32 185, i32 59, i32 187, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 68, i32 196, i32 70, i32 198, i32 65, i32 193, i32 67, i32 195, i32 69, i32 197, i32 71, i32 199, i32 72, i32 200, i32 74, i32 202, i32 76, i32 204, i32 78, i32 206, i32 73, i32 201, i32 75, i32 203, i32 77, i32 205, i32 79, i32 207, i32 80, i32 208, i32 82, i32 210, i32 84, i32 212, i32 86, i32 214, i32 81, i32 209, i32 83, i32 211, i32 85, i32 213, i32 87, i32 215, i32 88, i32 216, i32 90, i32 218, i32 92, i32 220, i32 94, i32 222, i32 89, i32 217, i32 91, i32 219, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 100, i32 228, i32 102, i32 230, i32 97, i32 225, i32 99, i32 227, i32 101, i32 229, i32 103, i32 231, i32 104, i32 232, i32 106, i32 234, i32 108, i32 236, i32 110, i32 238, i32 105, i32 233, i32 107, i32 235, i32 109, i32 237, i32 111, i32 239, i32 112, i32 240, i32 114, i32 242, i32 116, i32 244, i32 118, i32 246, i32 113, i32 241, i32 115, i32 243, i32 117, i32 245, i32 119, i32 247, i32 120, i32 248, i32 122, i32 250, i32 124, i32 252, i32 126, i32 254, i32 121, i32 249, i32 123, i32 251, i32 125, i32 253, i32 127, i32 255> @@ -979,7 +979,7 @@ define <256 x i8> @vshuff_79(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_7a: -; CHECK: [[REG7a:r[0-9]+]] = #122 +; CHECK: [[REG7a:r[0-9]+]] = #-6 ; CHECK: vshuff(v1,v0,[[REG7a]]) define <256 x i8> @vshuff_7a(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 4, i32 5, i32 132, i32 133, i32 2, i32 3, i32 130, i32 131, i32 6, i32 7, i32 134, i32 135, i32 8, i32 9, i32 136, i32 137, i32 12, i32 13, i32 140, i32 141, i32 10, i32 11, i32 138, i32 139, i32 14, i32 15, i32 142, i32 143, i32 16, i32 17, i32 144, i32 145, i32 20, i32 21, i32 148, i32 149, i32 18, i32 19, i32 146, i32 147, i32 22, i32 23, i32 150, i32 151, i32 24, i32 25, i32 152, i32 153, i32 28, i32 29, i32 156, i32 157, i32 26, i32 27, i32 154, i32 155, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 36, i32 37, i32 164, i32 165, i32 34, i32 35, i32 162, i32 163, i32 38, i32 39, i32 166, i32 167, i32 40, i32 41, i32 168, i32 169, i32 44, i32 45, i32 172, i32 173, i32 42, i32 43, i32 170, i32 171, i32 46, i32 47, i32 174, i32 175, i32 48, i32 49, i32 176, i32 177, i32 52, i32 53, i32 180, i32 181, i32 50, i32 51, i32 178, i32 179, i32 54, i32 55, i32 182, i32 183, i32 56, i32 57, i32 184, i32 185, i32 60, i32 61, i32 188, i32 189, i32 58, i32 59, i32 186, i32 187, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 68, i32 69, i32 196, i32 197, i32 66, i32 67, i32 194, i32 195, i32 70, i32 71, i32 198, i32 199, i32 72, i32 73, i32 200, i32 201, i32 76, i32 77, i32 204, i32 205, i32 74, i32 75, i32 202, i32 203, i32 78, i32 79, i32 206, i32 207, i32 80, i32 81, i32 208, i32 209, i32 84, i32 85, i32 212, i32 213, i32 82, i32 83, i32 210, i32 211, i32 86, i32 87, i32 214, i32 215, i32 88, i32 89, i32 216, i32 217, i32 92, i32 93, i32 220, i32 221, i32 90, i32 91, i32 218, i32 219, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 100, i32 101, i32 228, i32 229, i32 98, i32 99, i32 226, i32 227, i32 102, i32 103, i32 230, i32 231, i32 104, i32 105, i32 232, i32 233, i32 108, i32 109, i32 236, i32 237, i32 106, i32 107, i32 234, i32 235, i32 110, i32 111, i32 238, i32 239, i32 112, i32 113, i32 240, i32 241, i32 116, i32 117, i32 244, i32 245, i32 114, i32 115, i32 242, i32 243, i32 118, i32 119, i32 246, i32 247, i32 120, i32 121, i32 248, i32 249, i32 124, i32 125, i32 252, i32 253, i32 122, i32 123, i32 250, i32 251, i32 126, i32 127, i32 254, i32 255> @@ -987,7 +987,7 @@ define <256 x i8> @vshuff_7a(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_7b: -; CHECK: [[REG7b:r[0-9]+]] = #123 +; CHECK: [[REG7b:r[0-9]+]] = #-5 ; CHECK: vshuff(v1,v0,[[REG7b]]) define <256 x i8> @vshuff_7b(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 4, i32 132, i32 5, i32 133, i32 2, i32 130, i32 3, i32 131, i32 6, i32 134, i32 7, i32 135, i32 8, i32 136, i32 9, i32 137, i32 12, i32 140, i32 13, i32 141, i32 10, i32 138, i32 11, i32 139, i32 14, i32 142, i32 15, i32 143, i32 16, i32 144, i32 17, i32 145, i32 20, i32 148, i32 21, i32 149, i32 18, i32 146, i32 19, i32 147, i32 22, i32 150, i32 23, i32 151, i32 24, i32 152, i32 25, i32 153, i32 28, i32 156, i32 29, i32 157, i32 26, i32 154, i32 27, i32 155, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 36, i32 164, i32 37, i32 165, i32 34, i32 162, i32 35, i32 163, i32 38, i32 166, i32 39, i32 167, i32 40, i32 168, i32 41, i32 169, i32 44, i32 172, i32 45, i32 173, i32 42, i32 170, i32 43, i32 171, i32 46, i32 174, i32 47, i32 175, i32 48, i32 176, i32 49, i32 177, i32 52, i32 180, i32 53, i32 181, i32 50, i32 178, i32 51, i32 179, i32 54, i32 182, i32 55, i32 183, i32 56, i32 184, i32 57, i32 185, i32 60, i32 188, i32 61, i32 189, i32 58, i32 186, i32 59, i32 187, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 68, i32 196, i32 69, i32 197, i32 66, i32 194, i32 67, i32 195, i32 70, i32 198, i32 71, i32 199, i32 72, i32 200, i32 73, i32 201, i32 76, i32 204, i32 77, i32 205, i32 74, i32 202, i32 75, i32 203, i32 78, i32 206, i32 79, i32 207, i32 80, i32 208, i32 81, i32 209, i32 84, i32 212, i32 85, i32 213, i32 82, i32 210, i32 83, i32 211, i32 86, i32 214, i32 87, i32 215, i32 88, i32 216, i32 89, i32 217, i32 92, i32 220, i32 93, i32 221, i32 90, i32 218, i32 91, i32 219, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 100, i32 228, i32 101, i32 229, i32 98, i32 226, i32 99, i32 227, i32 102, i32 230, i32 103, i32 231, i32 104, i32 232, i32 105, i32 233, i32 108, i32 236, i32 109, i32 237, i32 106, i32 234, i32 107, i32 235, i32 110, i32 238, i32 111, i32 239, i32 112, i32 240, i32 113, i32 241, i32 116, i32 244, i32 117, i32 245, i32 114, i32 242, i32 115, i32 243, i32 118, i32 246, i32 119, i32 247, i32 120, i32 248, i32 121, i32 249, i32 124, i32 252, i32 125, i32 253, i32 122, i32 250, i32 123, i32 251, i32 126, i32 254, i32 127, i32 255> @@ -995,7 +995,7 @@ define <256 x i8> @vshuff_7b(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_7c: -; CHECK: [[REG7c:r[0-9]+]] = #124 +; CHECK: [[REG7c:r[0-9]+]] = #-4 ; CHECK: vshuff(v1,v0,[[REG7c]]) define <256 x i8> @vshuff_7c(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 2, i32 3, i32 128, i32 129, i32 130, i32 131, i32 4, i32 5, i32 6, i32 7, i32 132, i32 133, i32 134, i32 135, i32 8, i32 9, i32 10, i32 11, i32 136, i32 137, i32 138, i32 139, i32 12, i32 13, i32 14, i32 15, i32 140, i32 141, i32 142, i32 143, i32 16, i32 17, i32 18, i32 19, i32 144, i32 145, i32 146, i32 147, i32 20, i32 21, i32 22, i32 23, i32 148, i32 149, i32 150, i32 151, i32 24, i32 25, i32 26, i32 27, i32 152, i32 153, i32 154, i32 155, i32 28, i32 29, i32 30, i32 31, i32 156, i32 157, i32 158, i32 159, i32 32, i32 33, i32 34, i32 35, i32 160, i32 161, i32 162, i32 163, i32 36, i32 37, i32 38, i32 39, i32 164, i32 165, i32 166, i32 167, i32 40, i32 41, i32 42, i32 43, i32 168, i32 169, i32 170, i32 171, i32 44, i32 45, i32 46, i32 47, i32 172, i32 173, i32 174, i32 175, i32 48, i32 49, i32 50, i32 51, i32 176, i32 177, i32 178, i32 179, i32 52, i32 53, i32 54, i32 55, i32 180, i32 181, i32 182, i32 183, i32 56, i32 57, i32 58, i32 59, i32 184, i32 185, i32 186, i32 187, i32 60, i32 61, i32 62, i32 63, i32 188, i32 189, i32 190, i32 191, i32 64, i32 65, i32 66, i32 67, i32 192, i32 193, i32 194, i32 195, i32 68, i32 69, i32 70, i32 71, i32 196, i32 197, i32 198, i32 199, i32 72, i32 73, i32 74, i32 75, i32 200, i32 201, i32 202, i32 203, i32 76, i32 77, i32 78, i32 79, i32 204, i32 205, i32 206, i32 207, i32 80, i32 81, i32 82, i32 83, i32 208, i32 209, i32 210, i32 211, i32 84, i32 85, i32 86, i32 87, i32 212, i32 213, i32 214, i32 215, i32 88, i32 89, i32 90, i32 91, i32 216, i32 217, i32 218, i32 219, i32 92, i32 93, i32 94, i32 95, i32 220, i32 221, i32 222, i32 223, i32 96, i32 97, i32 98, i32 99, i32 224, i32 225, i32 226, i32 227, i32 100, i32 101, i32 102, i32 103, i32 228, i32 229, i32 230, i32 231, i32 104, i32 105, i32 106, i32 107, i32 232, i32 233, i32 234, i32 235, i32 108, i32 109, i32 110, i32 111, i32 236, i32 237, i32 238, i32 239, i32 112, i32 113, i32 114, i32 115, i32 240, i32 241, i32 242, i32 243, i32 116, i32 117, i32 118, i32 119, i32 244, i32 245, i32 246, i32 247, i32 120, i32 121, i32 122, i32 123, i32 248, i32 249, i32 250, i32 251, i32 124, i32 125, i32 126, i32 127, i32 252, i32 253, i32 254, i32 255> @@ -1003,7 +1003,7 @@ define <256 x i8> @vshuff_7c(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_7d: -; CHECK: [[REG7d:r[0-9]+]] = #125 +; CHECK: [[REG7d:r[0-9]+]] = #-3 ; CHECK: vshuff(v1,v0,[[REG7d]]) define <256 x i8> @vshuff_7d(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 2, i32 130, i32 1, i32 129, i32 3, i32 131, i32 4, i32 132, i32 6, i32 134, i32 5, i32 133, i32 7, i32 135, i32 8, i32 136, i32 10, i32 138, i32 9, i32 137, i32 11, i32 139, i32 12, i32 140, i32 14, i32 142, i32 13, i32 141, i32 15, i32 143, i32 16, i32 144, i32 18, i32 146, i32 17, i32 145, i32 19, i32 147, i32 20, i32 148, i32 22, i32 150, i32 21, i32 149, i32 23, i32 151, i32 24, i32 152, i32 26, i32 154, i32 25, i32 153, i32 27, i32 155, i32 28, i32 156, i32 30, i32 158, i32 29, i32 157, i32 31, i32 159, i32 32, i32 160, i32 34, i32 162, i32 33, i32 161, i32 35, i32 163, i32 36, i32 164, i32 38, i32 166, i32 37, i32 165, i32 39, i32 167, i32 40, i32 168, i32 42, i32 170, i32 41, i32 169, i32 43, i32 171, i32 44, i32 172, i32 46, i32 174, i32 45, i32 173, i32 47, i32 175, i32 48, i32 176, i32 50, i32 178, i32 49, i32 177, i32 51, i32 179, i32 52, i32 180, i32 54, i32 182, i32 53, i32 181, i32 55, i32 183, i32 56, i32 184, i32 58, i32 186, i32 57, i32 185, i32 59, i32 187, i32 60, i32 188, i32 62, i32 190, i32 61, i32 189, i32 63, i32 191, i32 64, i32 192, i32 66, i32 194, i32 65, i32 193, i32 67, i32 195, i32 68, i32 196, i32 70, i32 198, i32 69, i32 197, i32 71, i32 199, i32 72, i32 200, i32 74, i32 202, i32 73, i32 201, i32 75, i32 203, i32 76, i32 204, i32 78, i32 206, i32 77, i32 205, i32 79, i32 207, i32 80, i32 208, i32 82, i32 210, i32 81, i32 209, i32 83, i32 211, i32 84, i32 212, i32 86, i32 214, i32 85, i32 213, i32 87, i32 215, i32 88, i32 216, i32 90, i32 218, i32 89, i32 217, i32 91, i32 219, i32 92, i32 220, i32 94, i32 222, i32 93, i32 221, i32 95, i32 223, i32 96, i32 224, i32 98, i32 226, i32 97, i32 225, i32 99, i32 227, i32 100, i32 228, i32 102, i32 230, i32 101, i32 229, i32 103, i32 231, i32 104, i32 232, i32 106, i32 234, i32 105, i32 233, i32 107, i32 235, i32 108, i32 236, i32 110, i32 238, i32 109, i32 237, i32 111, i32 239, i32 112, i32 240, i32 114, i32 242, i32 113, i32 241, i32 115, i32 243, i32 116, i32 244, i32 118, i32 246, i32 117, i32 245, i32 119, i32 247, i32 120, i32 248, i32 122, i32 250, i32 121, i32 249, i32 123, i32 251, i32 124, i32 252, i32 126, i32 254, i32 125, i32 253, i32 127, i32 255> @@ -1011,7 +1011,7 @@ define <256 x i8> @vshuff_7d(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_7e: -; CHECK: [[REG7e:r[0-9]+]] = #126 +; CHECK: [[REG7e:r[0-9]+]] = #-2 ; CHECK: vshuff(v1,v0,[[REG7e]]) define <256 x i8> @vshuff_7e(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 1, i32 128, i32 129, i32 2, i32 3, i32 130, i32 131, i32 4, i32 5, i32 132, i32 133, i32 6, i32 7, i32 134, i32 135, i32 8, i32 9, i32 136, i32 137, i32 10, i32 11, i32 138, i32 139, i32 12, i32 13, i32 140, i32 141, i32 14, i32 15, i32 142, i32 143, i32 16, i32 17, i32 144, i32 145, i32 18, i32 19, i32 146, i32 147, i32 20, i32 21, i32 148, i32 149, i32 22, i32 23, i32 150, i32 151, i32 24, i32 25, i32 152, i32 153, i32 26, i32 27, i32 154, i32 155, i32 28, i32 29, i32 156, i32 157, i32 30, i32 31, i32 158, i32 159, i32 32, i32 33, i32 160, i32 161, i32 34, i32 35, i32 162, i32 163, i32 36, i32 37, i32 164, i32 165, i32 38, i32 39, i32 166, i32 167, i32 40, i32 41, i32 168, i32 169, i32 42, i32 43, i32 170, i32 171, i32 44, i32 45, i32 172, i32 173, i32 46, i32 47, i32 174, i32 175, i32 48, i32 49, i32 176, i32 177, i32 50, i32 51, i32 178, i32 179, i32 52, i32 53, i32 180, i32 181, i32 54, i32 55, i32 182, i32 183, i32 56, i32 57, i32 184, i32 185, i32 58, i32 59, i32 186, i32 187, i32 60, i32 61, i32 188, i32 189, i32 62, i32 63, i32 190, i32 191, i32 64, i32 65, i32 192, i32 193, i32 66, i32 67, i32 194, i32 195, i32 68, i32 69, i32 196, i32 197, i32 70, i32 71, i32 198, i32 199, i32 72, i32 73, i32 200, i32 201, i32 74, i32 75, i32 202, i32 203, i32 76, i32 77, i32 204, i32 205, i32 78, i32 79, i32 206, i32 207, i32 80, i32 81, i32 208, i32 209, i32 82, i32 83, i32 210, i32 211, i32 84, i32 85, i32 212, i32 213, i32 86, i32 87, i32 214, i32 215, i32 88, i32 89, i32 216, i32 217, i32 90, i32 91, i32 218, i32 219, i32 92, i32 93, i32 220, i32 221, i32 94, i32 95, i32 222, i32 223, i32 96, i32 97, i32 224, i32 225, i32 98, i32 99, i32 226, i32 227, i32 100, i32 101, i32 228, i32 229, i32 102, i32 103, i32 230, i32 231, i32 104, i32 105, i32 232, i32 233, i32 106, i32 107, i32 234, i32 235, i32 108, i32 109, i32 236, i32 237, i32 110, i32 111, i32 238, i32 239, i32 112, i32 113, i32 240, i32 241, i32 114, i32 115, i32 242, i32 243, i32 116, i32 117, i32 244, i32 245, i32 118, i32 119, i32 246, i32 247, i32 120, i32 121, i32 248, i32 249, i32 122, i32 123, i32 250, i32 251, i32 124, i32 125, i32 252, i32 253, i32 126, i32 127, i32 254, i32 255> @@ -1019,7 +1019,7 @@ define <256 x i8> @vshuff_7e(<256 x i8> %v0, <256 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_7f: -; CHECK: [[REG7f:r[0-9]+]] = #127 +; CHECK: [[REG7f:r[0-9]+]] = #-1 ; CHECK: vshuff(v1,v0,[[REG7f]]) define <256 x i8> @vshuff_7f(<256 x i8> %v0, <256 x i8> %v1) #0 { %p = shufflevector <256 x i8> %v0, <256 x i8> undef, <256 x i32> < i32 0, i32 128, i32 1, i32 129, i32 2, i32 130, i32 3, i32 131, i32 4, i32 132, i32 5, i32 133, i32 6, i32 134, i32 7, i32 135, i32 8, i32 136, i32 9, i32 137, i32 10, i32 138, i32 11, i32 139, i32 12, i32 140, i32 13, i32 141, i32 14, i32 142, i32 15, i32 143, i32 16, i32 144, i32 17, i32 145, i32 18, i32 146, i32 19, i32 147, i32 20, i32 148, i32 21, i32 149, i32 22, i32 150, i32 23, i32 151, i32 24, i32 152, i32 25, i32 153, i32 26, i32 154, i32 27, i32 155, i32 28, i32 156, i32 29, i32 157, i32 30, i32 158, i32 31, i32 159, i32 32, i32 160, i32 33, i32 161, i32 34, i32 162, i32 35, i32 163, i32 36, i32 164, i32 37, i32 165, i32 38, i32 166, i32 39, i32 167, i32 40, i32 168, i32 41, i32 169, i32 42, i32 170, i32 43, i32 171, i32 44, i32 172, i32 45, i32 173, i32 46, i32 174, i32 47, i32 175, i32 48, i32 176, i32 49, i32 177, i32 50, i32 178, i32 51, i32 179, i32 52, i32 180, i32 53, i32 181, i32 54, i32 182, i32 55, i32 183, i32 56, i32 184, i32 57, i32 185, i32 58, i32 186, i32 59, i32 187, i32 60, i32 188, i32 61, i32 189, i32 62, i32 190, i32 63, i32 191, i32 64, i32 192, i32 65, i32 193, i32 66, i32 194, i32 67, i32 195, i32 68, i32 196, i32 69, i32 197, i32 70, i32 198, i32 71, i32 199, i32 72, i32 200, i32 73, i32 201, i32 74, i32 202, i32 75, i32 203, i32 76, i32 204, i32 77, i32 205, i32 78, i32 206, i32 79, i32 207, i32 80, i32 208, i32 81, i32 209, i32 82, i32 210, i32 83, i32 211, i32 84, i32 212, i32 85, i32 213, i32 86, i32 214, i32 87, i32 215, i32 88, i32 216, i32 89, i32 217, i32 90, i32 218, i32 91, i32 219, i32 92, i32 220, i32 93, i32 221, i32 94, i32 222, i32 95, i32 223, i32 96, i32 224, i32 97, i32 225, i32 98, i32 226, i32 99, i32 227, i32 100, i32 228, i32 101, i32 229, i32 102, i32 230, i32 103, i32 231, i32 104, i32 232, i32 105, i32 233, i32 106, i32 234, i32 107, i32 235, i32 108, i32 236, i32 109, i32 237, i32 110, i32 238, i32 111, i32 239, i32 112, i32 240, i32 113, i32 241, i32 114, i32 242, i32 115, i32 243, i32 116, i32 244, i32 117, i32 245, i32 118, i32 246, i32 119, i32 247, i32 120, i32 248, i32 121, i32 249, i32 122, i32 250, i32 123, i32 251, i32 124, i32 252, i32 125, i32 253, i32 126, i32 254, i32 127, i32 255> diff --git a/llvm/test/CodeGen/Hexagon/autohvx/shuff-64b.ll b/llvm/test/CodeGen/Hexagon/autohvx/shuff-64b.ll index b33b3be8052a4..40d5907a02c0e 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/shuff-64b.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/shuff-64b.ll @@ -259,7 +259,7 @@ define <128 x i8> @vshuff_1f(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_20: -; CHECK: [[REG20:r[0-9]+]] = #32 +; CHECK: [[REG20:r[0-9]+]] = #-32 ; CHECK: vshuff(v1,v0,[[REG20]]) define <128 x i8> @vshuff_20(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -267,7 +267,7 @@ define <128 x i8> @vshuff_20(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_21: -; CHECK: [[REG21:r[0-9]+]] = #33 +; CHECK: [[REG21:r[0-9]+]] = #-31 ; CHECK: vshuff(v1,v0,[[REG21]]) define <128 x i8> @vshuff_21(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 4, i32 68, i32 6, i32 70, i32 8, i32 72, i32 10, i32 74, i32 12, i32 76, i32 14, i32 78, i32 16, i32 80, i32 18, i32 82, i32 20, i32 84, i32 22, i32 86, i32 24, i32 88, i32 26, i32 90, i32 28, i32 92, i32 30, i32 94, i32 1, i32 65, i32 3, i32 67, i32 5, i32 69, i32 7, i32 71, i32 9, i32 73, i32 11, i32 75, i32 13, i32 77, i32 15, i32 79, i32 17, i32 81, i32 19, i32 83, i32 21, i32 85, i32 23, i32 87, i32 25, i32 89, i32 27, i32 91, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 36, i32 100, i32 38, i32 102, i32 40, i32 104, i32 42, i32 106, i32 44, i32 108, i32 46, i32 110, i32 48, i32 112, i32 50, i32 114, i32 52, i32 116, i32 54, i32 118, i32 56, i32 120, i32 58, i32 122, i32 60, i32 124, i32 62, i32 126, i32 33, i32 97, i32 35, i32 99, i32 37, i32 101, i32 39, i32 103, i32 41, i32 105, i32 43, i32 107, i32 45, i32 109, i32 47, i32 111, i32 49, i32 113, i32 51, i32 115, i32 53, i32 117, i32 55, i32 119, i32 57, i32 121, i32 59, i32 123, i32 61, i32 125, i32 63, i32 127> @@ -275,7 +275,7 @@ define <128 x i8> @vshuff_21(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_22: -; CHECK: [[REG22:r[0-9]+]] = #34 +; CHECK: [[REG22:r[0-9]+]] = #-30 ; CHECK: vshuff(v1,v0,[[REG22]]) define <128 x i8> @vshuff_22(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 4, i32 5, i32 68, i32 69, i32 8, i32 9, i32 72, i32 73, i32 12, i32 13, i32 76, i32 77, i32 16, i32 17, i32 80, i32 81, i32 20, i32 21, i32 84, i32 85, i32 24, i32 25, i32 88, i32 89, i32 28, i32 29, i32 92, i32 93, i32 2, i32 3, i32 66, i32 67, i32 6, i32 7, i32 70, i32 71, i32 10, i32 11, i32 74, i32 75, i32 14, i32 15, i32 78, i32 79, i32 18, i32 19, i32 82, i32 83, i32 22, i32 23, i32 86, i32 87, i32 26, i32 27, i32 90, i32 91, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 36, i32 37, i32 100, i32 101, i32 40, i32 41, i32 104, i32 105, i32 44, i32 45, i32 108, i32 109, i32 48, i32 49, i32 112, i32 113, i32 52, i32 53, i32 116, i32 117, i32 56, i32 57, i32 120, i32 121, i32 60, i32 61, i32 124, i32 125, i32 34, i32 35, i32 98, i32 99, i32 38, i32 39, i32 102, i32 103, i32 42, i32 43, i32 106, i32 107, i32 46, i32 47, i32 110, i32 111, i32 50, i32 51, i32 114, i32 115, i32 54, i32 55, i32 118, i32 119, i32 58, i32 59, i32 122, i32 123, i32 62, i32 63, i32 126, i32 127> @@ -283,7 +283,7 @@ define <128 x i8> @vshuff_22(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_23: -; CHECK: [[REG23:r[0-9]+]] = #35 +; CHECK: [[REG23:r[0-9]+]] = #-29 ; CHECK: vshuff(v1,v0,[[REG23]]) define <128 x i8> @vshuff_23(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 4, i32 68, i32 5, i32 69, i32 8, i32 72, i32 9, i32 73, i32 12, i32 76, i32 13, i32 77, i32 16, i32 80, i32 17, i32 81, i32 20, i32 84, i32 21, i32 85, i32 24, i32 88, i32 25, i32 89, i32 28, i32 92, i32 29, i32 93, i32 2, i32 66, i32 3, i32 67, i32 6, i32 70, i32 7, i32 71, i32 10, i32 74, i32 11, i32 75, i32 14, i32 78, i32 15, i32 79, i32 18, i32 82, i32 19, i32 83, i32 22, i32 86, i32 23, i32 87, i32 26, i32 90, i32 27, i32 91, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 36, i32 100, i32 37, i32 101, i32 40, i32 104, i32 41, i32 105, i32 44, i32 108, i32 45, i32 109, i32 48, i32 112, i32 49, i32 113, i32 52, i32 116, i32 53, i32 117, i32 56, i32 120, i32 57, i32 121, i32 60, i32 124, i32 61, i32 125, i32 34, i32 98, i32 35, i32 99, i32 38, i32 102, i32 39, i32 103, i32 42, i32 106, i32 43, i32 107, i32 46, i32 110, i32 47, i32 111, i32 50, i32 114, i32 51, i32 115, i32 54, i32 118, i32 55, i32 119, i32 58, i32 122, i32 59, i32 123, i32 62, i32 126, i32 63, i32 127> @@ -291,7 +291,7 @@ define <128 x i8> @vshuff_23(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_24: -; CHECK: [[REG24:r[0-9]+]] = #36 +; CHECK: [[REG24:r[0-9]+]] = #-28 ; CHECK: vshuff(v1,v0,[[REG24]]) define <128 x i8> @vshuff_24(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 64, i32 65, i32 66, i32 67, i32 8, i32 9, i32 10, i32 11, i32 72, i32 73, i32 74, i32 75, i32 16, i32 17, i32 18, i32 19, i32 80, i32 81, i32 82, i32 83, i32 24, i32 25, i32 26, i32 27, i32 88, i32 89, i32 90, i32 91, i32 4, i32 5, i32 6, i32 7, i32 68, i32 69, i32 70, i32 71, i32 12, i32 13, i32 14, i32 15, i32 76, i32 77, i32 78, i32 79, i32 20, i32 21, i32 22, i32 23, i32 84, i32 85, i32 86, i32 87, i32 28, i32 29, i32 30, i32 31, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 96, i32 97, i32 98, i32 99, i32 40, i32 41, i32 42, i32 43, i32 104, i32 105, i32 106, i32 107, i32 48, i32 49, i32 50, i32 51, i32 112, i32 113, i32 114, i32 115, i32 56, i32 57, i32 58, i32 59, i32 120, i32 121, i32 122, i32 123, i32 36, i32 37, i32 38, i32 39, i32 100, i32 101, i32 102, i32 103, i32 44, i32 45, i32 46, i32 47, i32 108, i32 109, i32 110, i32 111, i32 52, i32 53, i32 54, i32 55, i32 116, i32 117, i32 118, i32 119, i32 60, i32 61, i32 62, i32 63, i32 124, i32 125, i32 126, i32 127> @@ -299,7 +299,7 @@ define <128 x i8> @vshuff_24(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_25: -; CHECK: [[REG25:r[0-9]+]] = #37 +; CHECK: [[REG25:r[0-9]+]] = #-27 ; CHECK: vshuff(v1,v0,[[REG25]]) define <128 x i8> @vshuff_25(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 1, i32 65, i32 3, i32 67, i32 8, i32 72, i32 10, i32 74, i32 9, i32 73, i32 11, i32 75, i32 16, i32 80, i32 18, i32 82, i32 17, i32 81, i32 19, i32 83, i32 24, i32 88, i32 26, i32 90, i32 25, i32 89, i32 27, i32 91, i32 4, i32 68, i32 6, i32 70, i32 5, i32 69, i32 7, i32 71, i32 12, i32 76, i32 14, i32 78, i32 13, i32 77, i32 15, i32 79, i32 20, i32 84, i32 22, i32 86, i32 21, i32 85, i32 23, i32 87, i32 28, i32 92, i32 30, i32 94, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 33, i32 97, i32 35, i32 99, i32 40, i32 104, i32 42, i32 106, i32 41, i32 105, i32 43, i32 107, i32 48, i32 112, i32 50, i32 114, i32 49, i32 113, i32 51, i32 115, i32 56, i32 120, i32 58, i32 122, i32 57, i32 121, i32 59, i32 123, i32 36, i32 100, i32 38, i32 102, i32 37, i32 101, i32 39, i32 103, i32 44, i32 108, i32 46, i32 110, i32 45, i32 109, i32 47, i32 111, i32 52, i32 116, i32 54, i32 118, i32 53, i32 117, i32 55, i32 119, i32 60, i32 124, i32 62, i32 126, i32 61, i32 125, i32 63, i32 127> @@ -307,7 +307,7 @@ define <128 x i8> @vshuff_25(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_26: -; CHECK: [[REG26:r[0-9]+]] = #38 +; CHECK: [[REG26:r[0-9]+]] = #-26 ; CHECK: vshuff(v1,v0,[[REG26]]) define <128 x i8> @vshuff_26(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 2, i32 3, i32 66, i32 67, i32 8, i32 9, i32 72, i32 73, i32 10, i32 11, i32 74, i32 75, i32 16, i32 17, i32 80, i32 81, i32 18, i32 19, i32 82, i32 83, i32 24, i32 25, i32 88, i32 89, i32 26, i32 27, i32 90, i32 91, i32 4, i32 5, i32 68, i32 69, i32 6, i32 7, i32 70, i32 71, i32 12, i32 13, i32 76, i32 77, i32 14, i32 15, i32 78, i32 79, i32 20, i32 21, i32 84, i32 85, i32 22, i32 23, i32 86, i32 87, i32 28, i32 29, i32 92, i32 93, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 34, i32 35, i32 98, i32 99, i32 40, i32 41, i32 104, i32 105, i32 42, i32 43, i32 106, i32 107, i32 48, i32 49, i32 112, i32 113, i32 50, i32 51, i32 114, i32 115, i32 56, i32 57, i32 120, i32 121, i32 58, i32 59, i32 122, i32 123, i32 36, i32 37, i32 100, i32 101, i32 38, i32 39, i32 102, i32 103, i32 44, i32 45, i32 108, i32 109, i32 46, i32 47, i32 110, i32 111, i32 52, i32 53, i32 116, i32 117, i32 54, i32 55, i32 118, i32 119, i32 60, i32 61, i32 124, i32 125, i32 62, i32 63, i32 126, i32 127> @@ -315,7 +315,7 @@ define <128 x i8> @vshuff_26(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_27: -; CHECK: [[REG27:r[0-9]+]] = #39 +; CHECK: [[REG27:r[0-9]+]] = #-25 ; CHECK: vshuff(v1,v0,[[REG27]]) define <128 x i8> @vshuff_27(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127> @@ -323,7 +323,7 @@ define <128 x i8> @vshuff_27(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_28: -; CHECK: [[REG28:r[0-9]+]] = #40 +; CHECK: [[REG28:r[0-9]+]] = #-24 ; CHECK: vshuff(v1,v0,[[REG28]]) define <128 x i8> @vshuff_28(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -331,7 +331,7 @@ define <128 x i8> @vshuff_28(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_29: -; CHECK: [[REG29:r[0-9]+]] = #41 +; CHECK: [[REG29:r[0-9]+]] = #-23 ; CHECK: vshuff(v1,v0,[[REG29]]) define <128 x i8> @vshuff_29(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 4, i32 68, i32 6, i32 70, i32 1, i32 65, i32 3, i32 67, i32 5, i32 69, i32 7, i32 71, i32 16, i32 80, i32 18, i32 82, i32 20, i32 84, i32 22, i32 86, i32 17, i32 81, i32 19, i32 83, i32 21, i32 85, i32 23, i32 87, i32 8, i32 72, i32 10, i32 74, i32 12, i32 76, i32 14, i32 78, i32 9, i32 73, i32 11, i32 75, i32 13, i32 77, i32 15, i32 79, i32 24, i32 88, i32 26, i32 90, i32 28, i32 92, i32 30, i32 94, i32 25, i32 89, i32 27, i32 91, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 36, i32 100, i32 38, i32 102, i32 33, i32 97, i32 35, i32 99, i32 37, i32 101, i32 39, i32 103, i32 48, i32 112, i32 50, i32 114, i32 52, i32 116, i32 54, i32 118, i32 49, i32 113, i32 51, i32 115, i32 53, i32 117, i32 55, i32 119, i32 40, i32 104, i32 42, i32 106, i32 44, i32 108, i32 46, i32 110, i32 41, i32 105, i32 43, i32 107, i32 45, i32 109, i32 47, i32 111, i32 56, i32 120, i32 58, i32 122, i32 60, i32 124, i32 62, i32 126, i32 57, i32 121, i32 59, i32 123, i32 61, i32 125, i32 63, i32 127> @@ -339,7 +339,7 @@ define <128 x i8> @vshuff_29(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_2a: -; CHECK: [[REG2a:r[0-9]+]] = #42 +; CHECK: [[REG2a:r[0-9]+]] = #-22 ; CHECK: vshuff(v1,v0,[[REG2a]]) define <128 x i8> @vshuff_2a(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 4, i32 5, i32 68, i32 69, i32 2, i32 3, i32 66, i32 67, i32 6, i32 7, i32 70, i32 71, i32 16, i32 17, i32 80, i32 81, i32 20, i32 21, i32 84, i32 85, i32 18, i32 19, i32 82, i32 83, i32 22, i32 23, i32 86, i32 87, i32 8, i32 9, i32 72, i32 73, i32 12, i32 13, i32 76, i32 77, i32 10, i32 11, i32 74, i32 75, i32 14, i32 15, i32 78, i32 79, i32 24, i32 25, i32 88, i32 89, i32 28, i32 29, i32 92, i32 93, i32 26, i32 27, i32 90, i32 91, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 36, i32 37, i32 100, i32 101, i32 34, i32 35, i32 98, i32 99, i32 38, i32 39, i32 102, i32 103, i32 48, i32 49, i32 112, i32 113, i32 52, i32 53, i32 116, i32 117, i32 50, i32 51, i32 114, i32 115, i32 54, i32 55, i32 118, i32 119, i32 40, i32 41, i32 104, i32 105, i32 44, i32 45, i32 108, i32 109, i32 42, i32 43, i32 106, i32 107, i32 46, i32 47, i32 110, i32 111, i32 56, i32 57, i32 120, i32 121, i32 60, i32 61, i32 124, i32 125, i32 58, i32 59, i32 122, i32 123, i32 62, i32 63, i32 126, i32 127> @@ -347,7 +347,7 @@ define <128 x i8> @vshuff_2a(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_2b: -; CHECK: [[REG2b:r[0-9]+]] = #43 +; CHECK: [[REG2b:r[0-9]+]] = #-21 ; CHECK: vshuff(v1,v0,[[REG2b]]) define <128 x i8> @vshuff_2b(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 4, i32 68, i32 5, i32 69, i32 2, i32 66, i32 3, i32 67, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 20, i32 84, i32 21, i32 85, i32 18, i32 82, i32 19, i32 83, i32 22, i32 86, i32 23, i32 87, i32 8, i32 72, i32 9, i32 73, i32 12, i32 76, i32 13, i32 77, i32 10, i32 74, i32 11, i32 75, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 28, i32 92, i32 29, i32 93, i32 26, i32 90, i32 27, i32 91, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 36, i32 100, i32 37, i32 101, i32 34, i32 98, i32 35, i32 99, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 52, i32 116, i32 53, i32 117, i32 50, i32 114, i32 51, i32 115, i32 54, i32 118, i32 55, i32 119, i32 40, i32 104, i32 41, i32 105, i32 44, i32 108, i32 45, i32 109, i32 42, i32 106, i32 43, i32 107, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 60, i32 124, i32 61, i32 125, i32 58, i32 122, i32 59, i32 123, i32 62, i32 126, i32 63, i32 127> @@ -355,7 +355,7 @@ define <128 x i8> @vshuff_2b(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_2c: -; CHECK: [[REG2c:r[0-9]+]] = #44 +; CHECK: [[REG2c:r[0-9]+]] = #-20 ; CHECK: vshuff(v1,v0,[[REG2c]]) define <128 x i8> @vshuff_2c(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 64, i32 65, i32 66, i32 67, i32 4, i32 5, i32 6, i32 7, i32 68, i32 69, i32 70, i32 71, i32 16, i32 17, i32 18, i32 19, i32 80, i32 81, i32 82, i32 83, i32 20, i32 21, i32 22, i32 23, i32 84, i32 85, i32 86, i32 87, i32 8, i32 9, i32 10, i32 11, i32 72, i32 73, i32 74, i32 75, i32 12, i32 13, i32 14, i32 15, i32 76, i32 77, i32 78, i32 79, i32 24, i32 25, i32 26, i32 27, i32 88, i32 89, i32 90, i32 91, i32 28, i32 29, i32 30, i32 31, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 96, i32 97, i32 98, i32 99, i32 36, i32 37, i32 38, i32 39, i32 100, i32 101, i32 102, i32 103, i32 48, i32 49, i32 50, i32 51, i32 112, i32 113, i32 114, i32 115, i32 52, i32 53, i32 54, i32 55, i32 116, i32 117, i32 118, i32 119, i32 40, i32 41, i32 42, i32 43, i32 104, i32 105, i32 106, i32 107, i32 44, i32 45, i32 46, i32 47, i32 108, i32 109, i32 110, i32 111, i32 56, i32 57, i32 58, i32 59, i32 120, i32 121, i32 122, i32 123, i32 60, i32 61, i32 62, i32 63, i32 124, i32 125, i32 126, i32 127> @@ -363,7 +363,7 @@ define <128 x i8> @vshuff_2c(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_2d: -; CHECK: [[REG2d:r[0-9]+]] = #45 +; CHECK: [[REG2d:r[0-9]+]] = #-19 ; CHECK: vshuff(v1,v0,[[REG2d]]) define <128 x i8> @vshuff_2d(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 1, i32 65, i32 3, i32 67, i32 4, i32 68, i32 6, i32 70, i32 5, i32 69, i32 7, i32 71, i32 16, i32 80, i32 18, i32 82, i32 17, i32 81, i32 19, i32 83, i32 20, i32 84, i32 22, i32 86, i32 21, i32 85, i32 23, i32 87, i32 8, i32 72, i32 10, i32 74, i32 9, i32 73, i32 11, i32 75, i32 12, i32 76, i32 14, i32 78, i32 13, i32 77, i32 15, i32 79, i32 24, i32 88, i32 26, i32 90, i32 25, i32 89, i32 27, i32 91, i32 28, i32 92, i32 30, i32 94, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 33, i32 97, i32 35, i32 99, i32 36, i32 100, i32 38, i32 102, i32 37, i32 101, i32 39, i32 103, i32 48, i32 112, i32 50, i32 114, i32 49, i32 113, i32 51, i32 115, i32 52, i32 116, i32 54, i32 118, i32 53, i32 117, i32 55, i32 119, i32 40, i32 104, i32 42, i32 106, i32 41, i32 105, i32 43, i32 107, i32 44, i32 108, i32 46, i32 110, i32 45, i32 109, i32 47, i32 111, i32 56, i32 120, i32 58, i32 122, i32 57, i32 121, i32 59, i32 123, i32 60, i32 124, i32 62, i32 126, i32 61, i32 125, i32 63, i32 127> @@ -371,7 +371,7 @@ define <128 x i8> @vshuff_2d(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_2e: -; CHECK: [[REG2e:r[0-9]+]] = #46 +; CHECK: [[REG2e:r[0-9]+]] = #-18 ; CHECK: vshuff(v1,v0,[[REG2e]]) define <128 x i8> @vshuff_2e(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 2, i32 3, i32 66, i32 67, i32 4, i32 5, i32 68, i32 69, i32 6, i32 7, i32 70, i32 71, i32 16, i32 17, i32 80, i32 81, i32 18, i32 19, i32 82, i32 83, i32 20, i32 21, i32 84, i32 85, i32 22, i32 23, i32 86, i32 87, i32 8, i32 9, i32 72, i32 73, i32 10, i32 11, i32 74, i32 75, i32 12, i32 13, i32 76, i32 77, i32 14, i32 15, i32 78, i32 79, i32 24, i32 25, i32 88, i32 89, i32 26, i32 27, i32 90, i32 91, i32 28, i32 29, i32 92, i32 93, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 34, i32 35, i32 98, i32 99, i32 36, i32 37, i32 100, i32 101, i32 38, i32 39, i32 102, i32 103, i32 48, i32 49, i32 112, i32 113, i32 50, i32 51, i32 114, i32 115, i32 52, i32 53, i32 116, i32 117, i32 54, i32 55, i32 118, i32 119, i32 40, i32 41, i32 104, i32 105, i32 42, i32 43, i32 106, i32 107, i32 44, i32 45, i32 108, i32 109, i32 46, i32 47, i32 110, i32 111, i32 56, i32 57, i32 120, i32 121, i32 58, i32 59, i32 122, i32 123, i32 60, i32 61, i32 124, i32 125, i32 62, i32 63, i32 126, i32 127> @@ -379,7 +379,7 @@ define <128 x i8> @vshuff_2e(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_2f: -; CHECK: [[REG2f:r[0-9]+]] = #47 +; CHECK: [[REG2f:r[0-9]+]] = #-17 ; CHECK: vshuff(v1,v0,[[REG2f]]) define <128 x i8> @vshuff_2f(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127> @@ -387,7 +387,7 @@ define <128 x i8> @vshuff_2f(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_30: -; CHECK: [[REG30:r[0-9]+]] = #48 +; CHECK: [[REG30:r[0-9]+]] = #-16 ; CHECK: vshuff(v1,v0,[[REG30]]) define <128 x i8> @vshuff_30(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -395,7 +395,7 @@ define <128 x i8> @vshuff_30(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_31: -; CHECK: [[REG31:r[0-9]+]] = #49 +; CHECK: [[REG31:r[0-9]+]] = #-15 ; CHECK: vshuff(v1,v0,[[REG31]]) define <128 x i8> @vshuff_31(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 4, i32 68, i32 6, i32 70, i32 8, i32 72, i32 10, i32 74, i32 12, i32 76, i32 14, i32 78, i32 1, i32 65, i32 3, i32 67, i32 5, i32 69, i32 7, i32 71, i32 9, i32 73, i32 11, i32 75, i32 13, i32 77, i32 15, i32 79, i32 16, i32 80, i32 18, i32 82, i32 20, i32 84, i32 22, i32 86, i32 24, i32 88, i32 26, i32 90, i32 28, i32 92, i32 30, i32 94, i32 17, i32 81, i32 19, i32 83, i32 21, i32 85, i32 23, i32 87, i32 25, i32 89, i32 27, i32 91, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 36, i32 100, i32 38, i32 102, i32 40, i32 104, i32 42, i32 106, i32 44, i32 108, i32 46, i32 110, i32 33, i32 97, i32 35, i32 99, i32 37, i32 101, i32 39, i32 103, i32 41, i32 105, i32 43, i32 107, i32 45, i32 109, i32 47, i32 111, i32 48, i32 112, i32 50, i32 114, i32 52, i32 116, i32 54, i32 118, i32 56, i32 120, i32 58, i32 122, i32 60, i32 124, i32 62, i32 126, i32 49, i32 113, i32 51, i32 115, i32 53, i32 117, i32 55, i32 119, i32 57, i32 121, i32 59, i32 123, i32 61, i32 125, i32 63, i32 127> @@ -403,7 +403,7 @@ define <128 x i8> @vshuff_31(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_32: -; CHECK: [[REG32:r[0-9]+]] = #50 +; CHECK: [[REG32:r[0-9]+]] = #-14 ; CHECK: vshuff(v1,v0,[[REG32]]) define <128 x i8> @vshuff_32(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 4, i32 5, i32 68, i32 69, i32 8, i32 9, i32 72, i32 73, i32 12, i32 13, i32 76, i32 77, i32 2, i32 3, i32 66, i32 67, i32 6, i32 7, i32 70, i32 71, i32 10, i32 11, i32 74, i32 75, i32 14, i32 15, i32 78, i32 79, i32 16, i32 17, i32 80, i32 81, i32 20, i32 21, i32 84, i32 85, i32 24, i32 25, i32 88, i32 89, i32 28, i32 29, i32 92, i32 93, i32 18, i32 19, i32 82, i32 83, i32 22, i32 23, i32 86, i32 87, i32 26, i32 27, i32 90, i32 91, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 36, i32 37, i32 100, i32 101, i32 40, i32 41, i32 104, i32 105, i32 44, i32 45, i32 108, i32 109, i32 34, i32 35, i32 98, i32 99, i32 38, i32 39, i32 102, i32 103, i32 42, i32 43, i32 106, i32 107, i32 46, i32 47, i32 110, i32 111, i32 48, i32 49, i32 112, i32 113, i32 52, i32 53, i32 116, i32 117, i32 56, i32 57, i32 120, i32 121, i32 60, i32 61, i32 124, i32 125, i32 50, i32 51, i32 114, i32 115, i32 54, i32 55, i32 118, i32 119, i32 58, i32 59, i32 122, i32 123, i32 62, i32 63, i32 126, i32 127> @@ -411,7 +411,7 @@ define <128 x i8> @vshuff_32(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_33: -; CHECK: [[REG33:r[0-9]+]] = #51 +; CHECK: [[REG33:r[0-9]+]] = #-13 ; CHECK: vshuff(v1,v0,[[REG33]]) define <128 x i8> @vshuff_33(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 4, i32 68, i32 5, i32 69, i32 8, i32 72, i32 9, i32 73, i32 12, i32 76, i32 13, i32 77, i32 2, i32 66, i32 3, i32 67, i32 6, i32 70, i32 7, i32 71, i32 10, i32 74, i32 11, i32 75, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 20, i32 84, i32 21, i32 85, i32 24, i32 88, i32 25, i32 89, i32 28, i32 92, i32 29, i32 93, i32 18, i32 82, i32 19, i32 83, i32 22, i32 86, i32 23, i32 87, i32 26, i32 90, i32 27, i32 91, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 36, i32 100, i32 37, i32 101, i32 40, i32 104, i32 41, i32 105, i32 44, i32 108, i32 45, i32 109, i32 34, i32 98, i32 35, i32 99, i32 38, i32 102, i32 39, i32 103, i32 42, i32 106, i32 43, i32 107, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 52, i32 116, i32 53, i32 117, i32 56, i32 120, i32 57, i32 121, i32 60, i32 124, i32 61, i32 125, i32 50, i32 114, i32 51, i32 115, i32 54, i32 118, i32 55, i32 119, i32 58, i32 122, i32 59, i32 123, i32 62, i32 126, i32 63, i32 127> @@ -419,7 +419,7 @@ define <128 x i8> @vshuff_33(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_34: -; CHECK: [[REG34:r[0-9]+]] = #52 +; CHECK: [[REG34:r[0-9]+]] = #-12 ; CHECK: vshuff(v1,v0,[[REG34]]) define <128 x i8> @vshuff_34(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 64, i32 65, i32 66, i32 67, i32 8, i32 9, i32 10, i32 11, i32 72, i32 73, i32 74, i32 75, i32 4, i32 5, i32 6, i32 7, i32 68, i32 69, i32 70, i32 71, i32 12, i32 13, i32 14, i32 15, i32 76, i32 77, i32 78, i32 79, i32 16, i32 17, i32 18, i32 19, i32 80, i32 81, i32 82, i32 83, i32 24, i32 25, i32 26, i32 27, i32 88, i32 89, i32 90, i32 91, i32 20, i32 21, i32 22, i32 23, i32 84, i32 85, i32 86, i32 87, i32 28, i32 29, i32 30, i32 31, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 96, i32 97, i32 98, i32 99, i32 40, i32 41, i32 42, i32 43, i32 104, i32 105, i32 106, i32 107, i32 36, i32 37, i32 38, i32 39, i32 100, i32 101, i32 102, i32 103, i32 44, i32 45, i32 46, i32 47, i32 108, i32 109, i32 110, i32 111, i32 48, i32 49, i32 50, i32 51, i32 112, i32 113, i32 114, i32 115, i32 56, i32 57, i32 58, i32 59, i32 120, i32 121, i32 122, i32 123, i32 52, i32 53, i32 54, i32 55, i32 116, i32 117, i32 118, i32 119, i32 60, i32 61, i32 62, i32 63, i32 124, i32 125, i32 126, i32 127> @@ -427,7 +427,7 @@ define <128 x i8> @vshuff_34(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_35: -; CHECK: [[REG35:r[0-9]+]] = #53 +; CHECK: [[REG35:r[0-9]+]] = #-11 ; CHECK: vshuff(v1,v0,[[REG35]]) define <128 x i8> @vshuff_35(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 1, i32 65, i32 3, i32 67, i32 8, i32 72, i32 10, i32 74, i32 9, i32 73, i32 11, i32 75, i32 4, i32 68, i32 6, i32 70, i32 5, i32 69, i32 7, i32 71, i32 12, i32 76, i32 14, i32 78, i32 13, i32 77, i32 15, i32 79, i32 16, i32 80, i32 18, i32 82, i32 17, i32 81, i32 19, i32 83, i32 24, i32 88, i32 26, i32 90, i32 25, i32 89, i32 27, i32 91, i32 20, i32 84, i32 22, i32 86, i32 21, i32 85, i32 23, i32 87, i32 28, i32 92, i32 30, i32 94, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 33, i32 97, i32 35, i32 99, i32 40, i32 104, i32 42, i32 106, i32 41, i32 105, i32 43, i32 107, i32 36, i32 100, i32 38, i32 102, i32 37, i32 101, i32 39, i32 103, i32 44, i32 108, i32 46, i32 110, i32 45, i32 109, i32 47, i32 111, i32 48, i32 112, i32 50, i32 114, i32 49, i32 113, i32 51, i32 115, i32 56, i32 120, i32 58, i32 122, i32 57, i32 121, i32 59, i32 123, i32 52, i32 116, i32 54, i32 118, i32 53, i32 117, i32 55, i32 119, i32 60, i32 124, i32 62, i32 126, i32 61, i32 125, i32 63, i32 127> @@ -435,7 +435,7 @@ define <128 x i8> @vshuff_35(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_36: -; CHECK: [[REG36:r[0-9]+]] = #54 +; CHECK: [[REG36:r[0-9]+]] = #-10 ; CHECK: vshuff(v1,v0,[[REG36]]) define <128 x i8> @vshuff_36(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 2, i32 3, i32 66, i32 67, i32 8, i32 9, i32 72, i32 73, i32 10, i32 11, i32 74, i32 75, i32 4, i32 5, i32 68, i32 69, i32 6, i32 7, i32 70, i32 71, i32 12, i32 13, i32 76, i32 77, i32 14, i32 15, i32 78, i32 79, i32 16, i32 17, i32 80, i32 81, i32 18, i32 19, i32 82, i32 83, i32 24, i32 25, i32 88, i32 89, i32 26, i32 27, i32 90, i32 91, i32 20, i32 21, i32 84, i32 85, i32 22, i32 23, i32 86, i32 87, i32 28, i32 29, i32 92, i32 93, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 34, i32 35, i32 98, i32 99, i32 40, i32 41, i32 104, i32 105, i32 42, i32 43, i32 106, i32 107, i32 36, i32 37, i32 100, i32 101, i32 38, i32 39, i32 102, i32 103, i32 44, i32 45, i32 108, i32 109, i32 46, i32 47, i32 110, i32 111, i32 48, i32 49, i32 112, i32 113, i32 50, i32 51, i32 114, i32 115, i32 56, i32 57, i32 120, i32 121, i32 58, i32 59, i32 122, i32 123, i32 52, i32 53, i32 116, i32 117, i32 54, i32 55, i32 118, i32 119, i32 60, i32 61, i32 124, i32 125, i32 62, i32 63, i32 126, i32 127> @@ -443,7 +443,7 @@ define <128 x i8> @vshuff_36(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_37: -; CHECK: [[REG37:r[0-9]+]] = #55 +; CHECK: [[REG37:r[0-9]+]] = #-9 ; CHECK: vshuff(v1,v0,[[REG37]]) define <128 x i8> @vshuff_37(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127> @@ -451,7 +451,7 @@ define <128 x i8> @vshuff_37(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_38: -; CHECK: [[REG38:r[0-9]+]] = #56 +; CHECK: [[REG38:r[0-9]+]] = #-8 ; CHECK: vshuff(v1,v0,[[REG38]]) define <128 x i8> @vshuff_38(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> @@ -459,7 +459,7 @@ define <128 x i8> @vshuff_38(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_39: -; CHECK: [[REG39:r[0-9]+]] = #57 +; CHECK: [[REG39:r[0-9]+]] = #-7 ; CHECK: vshuff(v1,v0,[[REG39]]) define <128 x i8> @vshuff_39(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 4, i32 68, i32 6, i32 70, i32 1, i32 65, i32 3, i32 67, i32 5, i32 69, i32 7, i32 71, i32 8, i32 72, i32 10, i32 74, i32 12, i32 76, i32 14, i32 78, i32 9, i32 73, i32 11, i32 75, i32 13, i32 77, i32 15, i32 79, i32 16, i32 80, i32 18, i32 82, i32 20, i32 84, i32 22, i32 86, i32 17, i32 81, i32 19, i32 83, i32 21, i32 85, i32 23, i32 87, i32 24, i32 88, i32 26, i32 90, i32 28, i32 92, i32 30, i32 94, i32 25, i32 89, i32 27, i32 91, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 36, i32 100, i32 38, i32 102, i32 33, i32 97, i32 35, i32 99, i32 37, i32 101, i32 39, i32 103, i32 40, i32 104, i32 42, i32 106, i32 44, i32 108, i32 46, i32 110, i32 41, i32 105, i32 43, i32 107, i32 45, i32 109, i32 47, i32 111, i32 48, i32 112, i32 50, i32 114, i32 52, i32 116, i32 54, i32 118, i32 49, i32 113, i32 51, i32 115, i32 53, i32 117, i32 55, i32 119, i32 56, i32 120, i32 58, i32 122, i32 60, i32 124, i32 62, i32 126, i32 57, i32 121, i32 59, i32 123, i32 61, i32 125, i32 63, i32 127> @@ -467,7 +467,7 @@ define <128 x i8> @vshuff_39(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_3a: -; CHECK: [[REG3a:r[0-9]+]] = #58 +; CHECK: [[REG3a:r[0-9]+]] = #-6 ; CHECK: vshuff(v1,v0,[[REG3a]]) define <128 x i8> @vshuff_3a(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 4, i32 5, i32 68, i32 69, i32 2, i32 3, i32 66, i32 67, i32 6, i32 7, i32 70, i32 71, i32 8, i32 9, i32 72, i32 73, i32 12, i32 13, i32 76, i32 77, i32 10, i32 11, i32 74, i32 75, i32 14, i32 15, i32 78, i32 79, i32 16, i32 17, i32 80, i32 81, i32 20, i32 21, i32 84, i32 85, i32 18, i32 19, i32 82, i32 83, i32 22, i32 23, i32 86, i32 87, i32 24, i32 25, i32 88, i32 89, i32 28, i32 29, i32 92, i32 93, i32 26, i32 27, i32 90, i32 91, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 36, i32 37, i32 100, i32 101, i32 34, i32 35, i32 98, i32 99, i32 38, i32 39, i32 102, i32 103, i32 40, i32 41, i32 104, i32 105, i32 44, i32 45, i32 108, i32 109, i32 42, i32 43, i32 106, i32 107, i32 46, i32 47, i32 110, i32 111, i32 48, i32 49, i32 112, i32 113, i32 52, i32 53, i32 116, i32 117, i32 50, i32 51, i32 114, i32 115, i32 54, i32 55, i32 118, i32 119, i32 56, i32 57, i32 120, i32 121, i32 60, i32 61, i32 124, i32 125, i32 58, i32 59, i32 122, i32 123, i32 62, i32 63, i32 126, i32 127> @@ -475,7 +475,7 @@ define <128 x i8> @vshuff_3a(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_3b: -; CHECK: [[REG3b:r[0-9]+]] = #59 +; CHECK: [[REG3b:r[0-9]+]] = #-5 ; CHECK: vshuff(v1,v0,[[REG3b]]) define <128 x i8> @vshuff_3b(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 4, i32 68, i32 5, i32 69, i32 2, i32 66, i32 3, i32 67, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 12, i32 76, i32 13, i32 77, i32 10, i32 74, i32 11, i32 75, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 20, i32 84, i32 21, i32 85, i32 18, i32 82, i32 19, i32 83, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 28, i32 92, i32 29, i32 93, i32 26, i32 90, i32 27, i32 91, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 36, i32 100, i32 37, i32 101, i32 34, i32 98, i32 35, i32 99, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 44, i32 108, i32 45, i32 109, i32 42, i32 106, i32 43, i32 107, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 52, i32 116, i32 53, i32 117, i32 50, i32 114, i32 51, i32 115, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 60, i32 124, i32 61, i32 125, i32 58, i32 122, i32 59, i32 123, i32 62, i32 126, i32 63, i32 127> @@ -483,7 +483,7 @@ define <128 x i8> @vshuff_3b(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_3c: -; CHECK: [[REG3c:r[0-9]+]] = #60 +; CHECK: [[REG3c:r[0-9]+]] = #-4 ; CHECK: vshuff(v1,v0,[[REG3c]]) define <128 x i8> @vshuff_3c(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 2, i32 3, i32 64, i32 65, i32 66, i32 67, i32 4, i32 5, i32 6, i32 7, i32 68, i32 69, i32 70, i32 71, i32 8, i32 9, i32 10, i32 11, i32 72, i32 73, i32 74, i32 75, i32 12, i32 13, i32 14, i32 15, i32 76, i32 77, i32 78, i32 79, i32 16, i32 17, i32 18, i32 19, i32 80, i32 81, i32 82, i32 83, i32 20, i32 21, i32 22, i32 23, i32 84, i32 85, i32 86, i32 87, i32 24, i32 25, i32 26, i32 27, i32 88, i32 89, i32 90, i32 91, i32 28, i32 29, i32 30, i32 31, i32 92, i32 93, i32 94, i32 95, i32 32, i32 33, i32 34, i32 35, i32 96, i32 97, i32 98, i32 99, i32 36, i32 37, i32 38, i32 39, i32 100, i32 101, i32 102, i32 103, i32 40, i32 41, i32 42, i32 43, i32 104, i32 105, i32 106, i32 107, i32 44, i32 45, i32 46, i32 47, i32 108, i32 109, i32 110, i32 111, i32 48, i32 49, i32 50, i32 51, i32 112, i32 113, i32 114, i32 115, i32 52, i32 53, i32 54, i32 55, i32 116, i32 117, i32 118, i32 119, i32 56, i32 57, i32 58, i32 59, i32 120, i32 121, i32 122, i32 123, i32 60, i32 61, i32 62, i32 63, i32 124, i32 125, i32 126, i32 127> @@ -491,7 +491,7 @@ define <128 x i8> @vshuff_3c(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_3d: -; CHECK: [[REG3d:r[0-9]+]] = #61 +; CHECK: [[REG3d:r[0-9]+]] = #-3 ; CHECK: vshuff(v1,v0,[[REG3d]]) define <128 x i8> @vshuff_3d(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 2, i32 66, i32 1, i32 65, i32 3, i32 67, i32 4, i32 68, i32 6, i32 70, i32 5, i32 69, i32 7, i32 71, i32 8, i32 72, i32 10, i32 74, i32 9, i32 73, i32 11, i32 75, i32 12, i32 76, i32 14, i32 78, i32 13, i32 77, i32 15, i32 79, i32 16, i32 80, i32 18, i32 82, i32 17, i32 81, i32 19, i32 83, i32 20, i32 84, i32 22, i32 86, i32 21, i32 85, i32 23, i32 87, i32 24, i32 88, i32 26, i32 90, i32 25, i32 89, i32 27, i32 91, i32 28, i32 92, i32 30, i32 94, i32 29, i32 93, i32 31, i32 95, i32 32, i32 96, i32 34, i32 98, i32 33, i32 97, i32 35, i32 99, i32 36, i32 100, i32 38, i32 102, i32 37, i32 101, i32 39, i32 103, i32 40, i32 104, i32 42, i32 106, i32 41, i32 105, i32 43, i32 107, i32 44, i32 108, i32 46, i32 110, i32 45, i32 109, i32 47, i32 111, i32 48, i32 112, i32 50, i32 114, i32 49, i32 113, i32 51, i32 115, i32 52, i32 116, i32 54, i32 118, i32 53, i32 117, i32 55, i32 119, i32 56, i32 120, i32 58, i32 122, i32 57, i32 121, i32 59, i32 123, i32 60, i32 124, i32 62, i32 126, i32 61, i32 125, i32 63, i32 127> @@ -499,7 +499,7 @@ define <128 x i8> @vshuff_3d(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_3e: -; CHECK: [[REG3e:r[0-9]+]] = #62 +; CHECK: [[REG3e:r[0-9]+]] = #-2 ; CHECK: vshuff(v1,v0,[[REG3e]]) define <128 x i8> @vshuff_3e(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 1, i32 64, i32 65, i32 2, i32 3, i32 66, i32 67, i32 4, i32 5, i32 68, i32 69, i32 6, i32 7, i32 70, i32 71, i32 8, i32 9, i32 72, i32 73, i32 10, i32 11, i32 74, i32 75, i32 12, i32 13, i32 76, i32 77, i32 14, i32 15, i32 78, i32 79, i32 16, i32 17, i32 80, i32 81, i32 18, i32 19, i32 82, i32 83, i32 20, i32 21, i32 84, i32 85, i32 22, i32 23, i32 86, i32 87, i32 24, i32 25, i32 88, i32 89, i32 26, i32 27, i32 90, i32 91, i32 28, i32 29, i32 92, i32 93, i32 30, i32 31, i32 94, i32 95, i32 32, i32 33, i32 96, i32 97, i32 34, i32 35, i32 98, i32 99, i32 36, i32 37, i32 100, i32 101, i32 38, i32 39, i32 102, i32 103, i32 40, i32 41, i32 104, i32 105, i32 42, i32 43, i32 106, i32 107, i32 44, i32 45, i32 108, i32 109, i32 46, i32 47, i32 110, i32 111, i32 48, i32 49, i32 112, i32 113, i32 50, i32 51, i32 114, i32 115, i32 52, i32 53, i32 116, i32 117, i32 54, i32 55, i32 118, i32 119, i32 56, i32 57, i32 120, i32 121, i32 58, i32 59, i32 122, i32 123, i32 60, i32 61, i32 124, i32 125, i32 62, i32 63, i32 126, i32 127> @@ -507,7 +507,7 @@ define <128 x i8> @vshuff_3e(<128 x i8> %v0, <128 x i8> %v1) #0 { } ; CHECK-LABEL: vshuff_3f: -; CHECK: [[REG3f:r[0-9]+]] = #63 +; CHECK: [[REG3f:r[0-9]+]] = #-1 ; CHECK: vshuff(v1,v0,[[REG3f]]) define <128 x i8> @vshuff_3f(<128 x i8> %v0, <128 x i8> %v1) #0 { %p = shufflevector <128 x i8> %v0, <128 x i8> undef, <128 x i32> < i32 0, i32 64, i32 1, i32 65, i32 2, i32 66, i32 3, i32 67, i32 4, i32 68, i32 5, i32 69, i32 6, i32 70, i32 7, i32 71, i32 8, i32 72, i32 9, i32 73, i32 10, i32 74, i32 11, i32 75, i32 12, i32 76, i32 13, i32 77, i32 14, i32 78, i32 15, i32 79, i32 16, i32 80, i32 17, i32 81, i32 18, i32 82, i32 19, i32 83, i32 20, i32 84, i32 21, i32 85, i32 22, i32 86, i32 23, i32 87, i32 24, i32 88, i32 25, i32 89, i32 26, i32 90, i32 27, i32 91, i32 28, i32 92, i32 29, i32 93, i32 30, i32 94, i32 31, i32 95, i32 32, i32 96, i32 33, i32 97, i32 34, i32 98, i32 35, i32 99, i32 36, i32 100, i32 37, i32 101, i32 38, i32 102, i32 39, i32 103, i32 40, i32 104, i32 41, i32 105, i32 42, i32 106, i32 43, i32 107, i32 44, i32 108, i32 45, i32 109, i32 46, i32 110, i32 47, i32 111, i32 48, i32 112, i32 49, i32 113, i32 50, i32 114, i32 51, i32 115, i32 52, i32 116, i32 53, i32 117, i32 54, i32 118, i32 55, i32 119, i32 56, i32 120, i32 57, i32 121, i32 58, i32 122, i32 59, i32 123, i32 60, i32 124, i32 61, i32 125, i32 62, i32 126, i32 63, i32 127> diff --git a/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-128b.ll b/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-128b.ll index 8691243351325..f359244d78f00 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-128b.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-128b.ll @@ -2,7 +2,7 @@ ; Generator: vdeal(0x37), vdeal(0x53), vshuff(0x2f), vdeal(0x4b), vdeal(0x27), vdeal(0x43), vshuff(0x1f), vdeal(0x5b), vshuff(0x7e), vshuff(0x6c), vdeal(0x5a), vdeal(0x38), vshuff(0x16), vshuff(0x44), vdeal(0x72) ; CHECK-LABEL: test_0000: -; CHECK-DAG: [[R00:r[0-9]+]] = #66 +; CHECK-DAG: [[R00:r[0-9]+]] = #-62 ; CHECK-DAG: [[R01:r[0-9]+]] = #46 ; CHECK-DAG: [[R02:r[0-9]+]] = #1 ; CHECK: v[[H00:[0-9]+]]:[[L00:[0-9]+]] = vshuff(v1,v0,[[R00]]) @@ -18,7 +18,7 @@ define <256 x i8> @test_0000(<256 x i8> %v0) #0 { ; CHECK-LABEL: test_0001: ; CHECK-DAG: [[R10:r[0-9]+]] = #24 ; CHECK-DAG: [[R11:r[0-9]+]] = #9 -; CHECK-DAG: [[R12:r[0-9]+]] = #68 +; CHECK-DAG: [[R12:r[0-9]+]] = #-60 ; CHECK-DAG: [[R13:r[0-9]+]] = #34 ; CHECK: v[[H10:[0-9]+]]:[[L10:[0-9]+]] = vshuff(v1,v0,[[R10]]) ; CHECK: v[[H11:[0-9]+]]:[[L11:[0-9]+]] = vdeal(v[[H10]],v[[L10]],[[R11]]) @@ -34,7 +34,7 @@ define <256 x i8> @test_0001(<256 x i8> %v0) #0 { ; CHECK-LABEL: test_0002: ; CHECK-DAG: [[R20:r[0-9]+]] = #18 ; CHECK-DAG: [[R21:r[0-9]+]] = #10 -; CHECK-DAG: [[R22:r[0-9]+]] = #68 +; CHECK-DAG: [[R22:r[0-9]+]] = #-60 ; CHECK-DAG: [[R23:r[0-9]+]] = #5 ; CHECK: v[[H20:[0-9]+]]:[[L20:[0-9]+]] = vshuff(v1,v0,[[R20]]) ; CHECK: v[[H21:[0-9]+]]:[[L21:[0-9]+]] = vdeal(v[[H20]],v[[L20]],[[R21]]) @@ -51,7 +51,7 @@ define <256 x i8> @test_0002(<256 x i8> %v0) #0 { ; CHECK-DAG: [[R30:r[0-9]+]] = #21 ; CHECK-DAG: [[R31:r[0-9]+]] = #9 ; CHECK-DAG: [[R32:r[0-9]+]] = #34 -; CHECK-DAG: [[R33:r[0-9]+]] = #66 +; CHECK-DAG: [[R33:r[0-9]+]] = #-62 ; CHECK: v[[H30:[0-9]+]]:[[L30:[0-9]+]] = vshuff(v1,v0,[[R30]]) ; CHECK: v[[H31:[0-9]+]]:[[L31:[0-9]+]] = vdeal(v[[H30]],v[[L30]],[[R31]]) ; CHECK: v[[H32:[0-9]+]]:[[L32:[0-9]+]] = vshuff(v[[H31]],v[[L31]],[[R32]]) @@ -65,7 +65,7 @@ define <256 x i8> @test_0003(<256 x i8> %v0) #0 { ; Generator: vdeal(0x63), vshuff(0x6f), vdeal(0x77), vshuff(0x75), vdeal(0x3d), vshuff(0x2d), vshuff(0x00), vshuff(0x5c), vdeal(0x04), vshuff(0x79), vshuff(0x21), vdeal(0x7b), vdeal(0x66), vshuff(0x59), vdeal(0x54) ; CHECK-LABEL: test_0004: ; CHECK-DAG: [[R40:r[0-9]+]] = #38 -; CHECK-DAG: [[R41:r[0-9]+]] = #72 +; CHECK-DAG: [[R41:r[0-9]+]] = #-56 ; CHECK-DAG: [[R42:r[0-9]+]] = #18 ; CHECK: v[[H40:[0-9]+]]:[[L40:[0-9]+]] = vshuff(v1,v0,[[R40]]) ; CHECK: v[[H41:[0-9]+]]:[[L41:[0-9]+]] = vshuff(v[[H40]],v[[L40]],[[R41]]) @@ -81,7 +81,7 @@ define <256 x i8> @test_0004(<256 x i8> %v0) #0 { ; CHECK-DAG: [[R50:r[0-9]+]] = #9 ; CHECK-DAG: [[R51:r[0-9]+]] = #3 ; CHECK-DAG: [[R52:r[0-9]+]] = #48 -; CHECK-DAG: [[R53:r[0-9]+]] = #68 +; CHECK-DAG: [[R53:r[0-9]+]] = #-60 ; CHECK: v[[H50:[0-9]+]]:[[L50:[0-9]+]] = vshuff(v1,v0,[[R50]]) ; CHECK: v[[H51:[0-9]+]]:[[L51:[0-9]+]] = vdeal(v[[H50]],v[[L50]],[[R51]]) ; CHECK: v[[H52:[0-9]+]]:[[L52:[0-9]+]] = vdeal(v[[H51]],v[[L51]],[[R52]]) @@ -94,7 +94,7 @@ define <256 x i8> @test_0005(<256 x i8> %v0) #0 { ; Generator: vshuff(0x34), vshuff(0x07), vdeal(0x5d), vshuff(0x05), vshuff(0x50), vshuff(0x13), vdeal(0x31), vdeal(0x6e), vdeal(0x0f), vdeal(0x2c), vdeal(0x28), vdeal(0x76), vdeal(0x22), vdeal(0x3a), vdeal(0x51) ; CHECK-LABEL: test_0006: -; CHECK-DAG: [[R60:r[0-9]+]] = #85 +; CHECK-DAG: [[R60:r[0-9]+]] = #-43 ; CHECK-DAG: [[R61:r[0-9]+]] = #2 ; CHECK: v[[H60:[0-9]+]]:[[L60:[0-9]+]] = vdeal(v1,v0,[[R60]]) ; CHECK: v[[H61:[0-9]+]]:[[L61:[0-9]+]] = vshuff(v[[H60]],v[[L60]],[[R61]]) @@ -106,7 +106,7 @@ define <256 x i8> @test_0006(<256 x i8> %v0) #0 { ; Generator: vshuff(0x25), vshuff(0x4c), vshuff(0x72), vdeal(0x70), vshuff(0x3b), vshuff(0x26), vshuff(0x4d), vdeal(0x20), vshuff(0x7f), vdeal(0x6a), vdeal(0x78), vshuff(0x5f), vdeal(0x10), vdeal(0x71), vshuff(0x6d) ; CHECK-LABEL: test_0007: -; CHECK-DAG: [[R70:r[0-9]+]] = #74 +; CHECK-DAG: [[R70:r[0-9]+]] = #-54 ; CHECK-DAG: [[R71:r[0-9]+]] = #20 ; CHECK-DAG: [[R72:r[0-9]+]] = #34 ; CHECK: v[[H70:[0-9]+]]:[[L70:[0-9]+]] = vshuff(v1,v0,[[R70]]) @@ -120,7 +120,7 @@ define <256 x i8> @test_0007(<256 x i8> %v0) #0 { ; Generator: vshuff(0x2e), vshuff(0x40), vdeal(0x35), vdeal(0x3e), vdeal(0x06), vshuff(0x4b), vshuff(0x24), vshuff(0x09), vdeal(0x18), vshuff(0x42), vshuff(0x43), vshuff(0x41), vshuff(0x23), vdeal(0x3f), vdeal(0x39) ; CHECK-LABEL: test_0008: -; CHECK-DAG: [[R80:r[0-9]+]] = #73 +; CHECK-DAG: [[R80:r[0-9]+]] = #-55 ; CHECK-DAG: [[R81:r[0-9]+]] = #5 ; CHECK-DAG: [[R82:r[0-9]+]] = #48 ; CHECK-DAG: [[R83:r[0-9]+]] = #2 @@ -136,7 +136,7 @@ define <256 x i8> @test_0008(<256 x i8> %v0) #0 { ; Generator: vshuff(0x33), vshuff(0x5e), vshuff(0x2a), vdeal(0x2f), vdeal(0x1f), vshuff(0x14), vshuff(0x17), vshuff(0x1b), vdeal(0x1c), vdeal(0x15), vshuff(0x37), vshuff(0x3c), vdeal(0x4e), vdeal(0x7d), vshuff(0x61) ; CHECK-LABEL: test_0009: -; CHECK-DAG: [[R90:r[0-9]+]] = #96 +; CHECK-DAG: [[R90:r[0-9]+]] = #-32 ; CHECK-DAG: [[R91:r[0-9]+]] = #18 ; CHECK-DAG: [[R92:r[0-9]+]] = #5 ; CHECK: v[[H90:[0-9]+]]:[[L90:[0-9]+]] = vshuff(v1,v0,[[R90]]) @@ -152,7 +152,7 @@ define <256 x i8> @test_0009(<256 x i8> %v0) #0 { ; CHECK-LABEL: test_000a: ; CHECK-DAG: [[Ra0:r[0-9]+]] = #44 ; CHECK-DAG: [[Ra1:r[0-9]+]] = #6 -; CHECK-DAG: [[Ra2:r[0-9]+]] = #80 +; CHECK-DAG: [[Ra2:r[0-9]+]] = #-48 ; CHECK: v[[Ha0:[0-9]+]]:[[La0:[0-9]+]] = vshuff(v1,v0,[[Ra0]]) ; CHECK: v[[Ha1:[0-9]+]]:[[La1:[0-9]+]] = vdeal(v[[Ha0]],v[[La0]],[[Ra1]]) ; CHECK: v[[Ha2:[0-9]+]]:[[La2:[0-9]+]] = vshuff(v[[Ha1]],v[[La1]],[[Ra2]]) @@ -164,7 +164,7 @@ define <256 x i8> @test_000a(<256 x i8> %v0) #0 { ; Generator: vshuff(0x74), vshuff(0x11), vshuff(0x53), vshuff(0x66), vshuff(0x1d), vdeal(0x59), vshuff(0x63), vshuff(0x49), vdeal(0x00), vshuff(0x38), vshuff(0x45), vdeal(0x68), vshuff(0x65), vshuff(0x6e), vdeal(0x62) ; CHECK-LABEL: test_000b: -; CHECK-DAG: [[Rb0:r[0-9]+]] = #68 +; CHECK-DAG: [[Rb0:r[0-9]+]] = #-60 ; CHECK-DAG: [[Rb1:r[0-9]+]] = #5 ; CHECK-DAG: [[Rb2:r[0-9]+]] = #18 ; CHECK-DAG: [[Rb3:r[0-9]+]] = #40 @@ -182,7 +182,7 @@ define <256 x i8> @test_000b(<256 x i8> %v0) #0 { ; CHECK-LABEL: test_000c: ; CHECK-DAG: [[Rc0:r[0-9]+]] = #10 ; CHECK-DAG: [[Rc1:r[0-9]+]] = #3 -; CHECK-DAG: [[Rc2:r[0-9]+]] = #84 +; CHECK-DAG: [[Rc2:r[0-9]+]] = #-44 ; CHECK: v[[Hc0:[0-9]+]]:[[Lc0:[0-9]+]] = vshuff(v1,v0,[[Rc0]]) ; CHECK: v[[Hc1:[0-9]+]]:[[Lc1:[0-9]+]] = vdeal(v[[Hc0]],v[[Lc0]],[[Rc1]]) ; CHECK: v[[Hc2:[0-9]+]]:[[Lc2:[0-9]+]] = vshuff(v[[Hc1]],v[[Lc1]],[[Rc2]]) @@ -195,7 +195,7 @@ define <256 x i8> @test_000c(<256 x i8> %v0) #0 { ; Generator: vdeal(0x58), vdeal(0x0b), vdeal(0x21), vdeal(0x7f), vshuff(0x6a), vshuff(0x78), vshuff(0x52), vshuff(0x73), vshuff(0x06), vdeal(0x2d), vdeal(0x32), vdeal(0x48), vdeal(0x75), vdeal(0x55), vshuff(0x0e) ; CHECK-LABEL: test_000d: ; CHECK-DAG: [[Rd0:r[0-9]+]] = #36 -; CHECK-DAG: [[Rd1:r[0-9]+]] = #80 +; CHECK-DAG: [[Rd1:r[0-9]+]] = #-48 ; CHECK-DAG: [[Rd2:r[0-9]+]] = #9 ; CHECK: v[[Hd0:[0-9]+]]:[[Ld0:[0-9]+]] = vshuff(v1,v0,[[Rd0]]) ; CHECK: v[[Hd1:[0-9]+]]:[[Ld1:[0-9]+]] = vshuff(v[[Hd0]],v[[Ld0]],[[Rd1]]) @@ -208,7 +208,7 @@ define <256 x i8> @test_000d(<256 x i8> %v0) #0 { ; Generator: vdeal(0x6f), vdeal(0x13), vdeal(0x07), vdeal(0x56), vshuff(0x2c), vdeal(0x0c), vdeal(0x33), vshuff(0x22), vdeal(0x02), vshuff(0x18), vdeal(0x4d), vshuff(0x51), vshuff(0x3e), vshuff(0x77), vshuff(0x30) ; CHECK-LABEL: test_000e: -; CHECK-DAG: [[Re0:r[0-9]+]] = #65 +; CHECK-DAG: [[Re0:r[0-9]+]] = #-63 ; CHECK-DAG: [[Re1:r[0-9]+]] = #24 ; CHECK-DAG: [[Re2:r[0-9]+]] = #36 ; CHECK: v[[He0:[0-9]+]]:[[Le0:[0-9]+]] = vshuff(v1,v0,[[Re0]]) diff --git a/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-64b.ll b/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-64b.ll index c81b3534e2eb4..7298cebe08481 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-64b.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/shuff-combos-64b.ll @@ -2,7 +2,7 @@ ; Generator: vdeal(0x1f), vshuff(0x32), vshuff(0x24), vshuff(0x26), vshuff(0x08), vdeal(0x3a), vshuff(0x0c), vdeal(0x0e), vdeal(0x30), vdeal(0x22), vdeal(0x14), vdeal(0x36), vdeal(0x18), vdeal(0x0a), vdeal(0x3c) ; CHECK-LABEL: test_0000: -; CHECK-DAG: [[R00:r[0-9]+]] = #49 +; CHECK-DAG: [[R00:r[0-9]+]] = #-15 ; CHECK-DAG: [[R01:r[0-9]+]] = #3 ; CHECK: v[[H00:[0-9]+]]:[[L00:[0-9]+]] = vshuff(v1,v0,[[R00]]) ; CHECK: v[[H01:[0-9]+]]:[[L01:[0-9]+]] = vdeal(v[[H00]],v[[L00]],[[R01]]) @@ -15,7 +15,7 @@ define <128 x i8> @test_0000(<128 x i8> %v0) #0 { ; Generator: vshuff(0x1e), vshuff(0x00), vdeal(0x12), vshuff(0x34), vshuff(0x0b), vshuff(0x2b), vdeal(0x16), vshuff(0x2e), vshuff(0x1a), vdeal(0x28), vshuff(0x2d), vdeal(0x15), vdeal(0x1d), vshuff(0x25), vshuff(0x0d) ; CHECK-LABEL: test_0001: ; CHECK-DAG: [[R10:r[0-9]+]] = #10 -; CHECK-DAG: [[R11:r[0-9]+]] = #34 +; CHECK-DAG: [[R11:r[0-9]+]] = #-30 ; CHECK-DAG: [[R12:r[0-9]+]] = #16 ; CHECK: v[[H10:[0-9]+]]:[[L10:[0-9]+]] = vshuff(v1,v0,[[R10]]) ; CHECK: v[[H11:[0-9]+]]:[[L11:[0-9]+]] = vshuff(v[[H10]],v[[L10]],[[R11]]) @@ -41,7 +41,7 @@ define <128 x i8> @test_0002(<128 x i8> %v0) #0 { ; Generator: vshuff(0x11), vshuff(0x2b), vdeal(0x3d), vdeal(0x3e), vshuff(0x02), vdeal(0x1c), vdeal(0x2f), vdeal(0x0f), vshuff(0x36), vshuff(0x38), vdeal(0x35), vshuff(0x1b), vshuff(0x3b), vdeal(0x21), vdeal(0x15) ; CHECK-LABEL: test_0003: -; CHECK-DAG: [[R30:r[0-9]+]] = #34 +; CHECK-DAG: [[R30:r[0-9]+]] = #-30 ; CHECK-DAG: [[R31:r[0-9]+]] = #10 ; CHECK-DAG: [[R32:r[0-9]+]] = #5 ; CHECK: v[[H30:[0-9]+]]:[[L30:[0-9]+]] = vshuff(v1,v0,[[R30]]) @@ -55,7 +55,7 @@ define <128 x i8> @test_0003(<128 x i8> %v0) #0 { ; Generator: vdeal(0x0a), vdeal(0x10), vdeal(0x31), vshuff(0x30), vdeal(0x00), vdeal(0x39), vdeal(0x0e), vshuff(0x37), vshuff(0x17), vshuff(0x06), vshuff(0x07), vshuff(0x09), vshuff(0x3c), vshuff(0x33), vshuff(0x33) ; CHECK-LABEL: test_0004: -; CHECK-DAG: [[R40:r[0-9]+]] = #57 +; CHECK-DAG: [[R40:r[0-9]+]] = #-7 ; CHECK-DAG: [[R41:r[0-9]+]] = #6 ; CHECK-DAG: [[R42:r[0-9]+]] = #1 ; CHECK: v[[H40:[0-9]+]]:[[L40:[0-9]+]] = vshuff(v1,v0,[[R40]]) @@ -69,7 +69,7 @@ define <128 x i8> @test_0004(<128 x i8> %v0) #0 { ; Generator: vdeal(0x1c), vshuff(0x31), vdeal(0x1f), vshuff(0x29), vdeal(0x1a), vshuff(0x2a), vshuff(0x25), vshuff(0x05), vshuff(0x04), vshuff(0x23), vdeal(0x0d), vdeal(0x20), vshuff(0x29), vdeal(0x2f), vshuff(0x1d) ; CHECK-LABEL: test_0005: -; CHECK-DAG: [[R50:r[0-9]+]] = #33 +; CHECK-DAG: [[R50:r[0-9]+]] = #-31 ; CHECK-DAG: [[R51:r[0-9]+]] = #12 ; CHECK-DAG: [[R52:r[0-9]+]] = #1{{$}} ; CHECK: v[[H50:[0-9]+]]:[[L50:[0-9]+]] = vshuff(v1,v0,[[R50]]) @@ -84,7 +84,7 @@ define <128 x i8> @test_0005(<128 x i8> %v0) #0 { ; Generator: vdeal(0x22), vshuff(0x24), vdeal(0x16), vdeal(0x18), vshuff(0x17), vdeal(0x2d), vshuff(0x38), vshuff(0x20), vshuff(0x37), vdeal(0x3f), vdeal(0x10), vdeal(0x32), vshuff(0x14), vshuff(0x13), vdeal(0x0b) ; CHECK-LABEL: test_0006: ; CHECK-DAG: [[R60:r[0-9]+]] = #3{{$}} -; CHECK-DAG: [[R61:r[0-9]+]] = #36 +; CHECK-DAG: [[R61:r[0-9]+]] = #-28 ; CHECK: v[[H60:[0-9]+]]:[[L60:[0-9]+]] = vdeal(v1,v0,[[R60]]) ; CHECK: v[[H61:[0-9]+]]:[[L61:[0-9]+]] = vshuff(v[[H60]],v[[L60]],[[R61]]) ; CHECK-NOT: v{{[0-9:]+}} = @@ -95,7 +95,7 @@ define <128 x i8> @test_0006(<128 x i8> %v0) #0 { ; Generator: vdeal(0x0f), vdeal(0x01), vshuff(0x3b), vdeal(0x0c), vdeal(0x3f), vdeal(0x26), vshuff(0x28), vdeal(0x3a), vdeal(0x02), vdeal(0x1b), vshuff(0x0e), vdeal(0x03), vshuff(0x3d), vshuff(0x2c), vshuff(0x15) ; CHECK-LABEL: test_0007: -; CHECK-DAG: [[R70:r[0-9]+]] = #50 +; CHECK-DAG: [[R70:r[0-9]+]] = #-14 ; CHECK-DAG: [[R71:r[0-9]+]] = #5{{$}} ; CHECK-DAG: [[R72:r[0-9]+]] = #8 ; CHECK: v[[H70:[0-9]+]]:[[L70:[0-9]+]] = vshuff(v1,v0,[[R70]]) @@ -124,7 +124,7 @@ define <128 x i8> @test_0008(<128 x i8> %v0) #0 { ; Generator: vshuff(0x1d), vshuff(0x18), vdeal(0x09), vshuff(0x2a), vdeal(0x03), vdeal(0x27), vdeal(0x25), vdeal(0x13), vshuff(0x3a), vshuff(0x19), vshuff(0x06), vshuff(0x0f), vshuff(0x3c), vshuff(0x2e), vshuff(0x36) ; CHECK-LABEL: test_0009: ; CHECK-DAG: [[R90:r[0-9]+]] = #17 -; CHECK-DAG: [[R91:r[0-9]+]] = #40 +; CHECK-DAG: [[R91:r[0-9]+]] = #-24 ; CHECK-DAG: [[R92:r[0-9]+]] = #6 ; CHECK: v[[H90:[0-9]+]]:[[L90:[0-9]+]] = vdeal(v1,v0,[[R90]]) ; CHECK: v[[H91:[0-9]+]]:[[L91:[0-9]+]] = vshuff(v[[H90]],v[[L90]],[[R91]]) @@ -137,7 +137,7 @@ define <128 x i8> @test_0009(<128 x i8> %v0) #0 { ; Generator: vdeal(0x05), vshuff(0x10), vdeal(0x0d), vshuff(0x12), vdeal(0x08), vshuff(0x22), vdeal(0x24), vshuff(0x3e), vdeal(0x00), vshuff(0x14), vdeal(0x3b), vdeal(0x33), vshuff(0x2f), vdeal(0x13), vdeal(0x14) ; CHECK-LABEL: test_000a: -; CHECK-DAG: [[Ra0:r[0-9]+]] = #56 +; CHECK-DAG: [[Ra0:r[0-9]+]] = #-8 ; CHECK-DAG: [[Ra1:r[0-9]+]] = #13 ; CHECK-DAG: [[Ra2:r[0-9]+]] = #2 ; CHECK: v[[Ha0:[0-9]+]]:[[La0:[0-9]+]] = vshuff(v1,v0,[[Ra0]]) @@ -152,7 +152,7 @@ define <128 x i8> @test_000a(<128 x i8> %v0) #0 { ; Generator: vdeal(0x12), vshuff(0x2c), vdeal(0x2d), vshuff(0x01), vshuff(0x1f), vshuff(0x30), vdeal(0x2a), vdeal(0x0b), vdeal(0x32), vshuff(0x08), vdeal(0x1b), vdeal(0x09), vshuff(0x1c), vshuff(0x16), vdeal(0x38) ; CHECK-LABEL: test_000b: ; CHECK-DAG: [[Rb0:r[0-9]+]] = #12 -; CHECK-DAG: [[Rb1:r[0-9]+]] = #33 +; CHECK-DAG: [[Rb1:r[0-9]+]] = #-31 ; CHECK-DAG: [[Rb2:r[0-9]+]] = #18 ; CHECK: v[[Hb0:[0-9]+]]:[[Lb0:[0-9]+]] = vdeal(v1,v0,[[Rb0]]) ; CHECK: v[[Hb1:[0-9]+]]:[[Lb1:[0-9]+]] = vdeal(v[[Hb0]],v[[Lb0]],[[Rb1]]) @@ -168,7 +168,7 @@ define <128 x i8> @test_000b(<128 x i8> %v0) #0 { ; CHECK-DAG: [[Rc0:r[0-9]+]] = #12 ; CHECK-DAG: [[Rc1:r[0-9]+]] = #6 ; CHECK-DAG: [[Rc2:r[0-9]+]] = #17 -; CHECK-DAG: [[Rc3:r[0-9]+]] = #32 +; CHECK-DAG: [[Rc3:r[0-9]+]] = #-32 ; CHECK: v[[Hc0:[0-9]+]]:[[Lc0:[0-9]+]] = vshuff(v1,v0,[[Rc0]]) ; CHECK: v[[Hc1:[0-9]+]]:[[Lc1:[0-9]+]] = vdeal(v[[Hc0]],v[[Lc0]],[[Rc1]]) ; CHECK: v[[Hc2:[0-9]+]]:[[Lc2:[0-9]+]] = vdeal(v[[Hc1]],v[[Lc1]],[[Rc2]]) @@ -181,7 +181,7 @@ define <128 x i8> @test_000c(<128 x i8> %v0) #0 { ; Generator: vdeal(0x3c), vdeal(0x24), vdeal(0x05), vdeal(0x37), vshuff(0x21), vdeal(0x11), vdeal(0x1d), vshuff(0x00), vshuff(0x34), vshuff(0x0d), vshuff(0x3a), vshuff(0x1f), vshuff(0x03), vshuff(0x1e), vdeal(0x29) ; CHECK-LABEL: test_000d: -; CHECK-DAG: [[Rd0:r[0-9]+]] = #40 +; CHECK-DAG: [[Rd0:r[0-9]+]] = #-24 ; CHECK-DAG: [[Rd1:r[0-9]+]] = #28 ; CHECK: v[[Hd0:[0-9]+]]:[[Ld0:[0-9]+]] = vshuff(v1,v0,[[Rd0]]) ; CHECK: v[[Hd1:[0-9]+]]:[[Ld1:[0-9]+]] = vdeal(v[[Hd0]],v[[Ld0]],[[Rd1]]) @@ -193,7 +193,7 @@ define <128 x i8> @test_000d(<128 x i8> %v0) #0 { ; Generator: vshuff(0x18), vdeal(0x36), vdeal(0x33), vdeal(0x26), vshuff(0x04), vshuff(0x2d), vshuff(0x35), vdeal(0x34), vdeal(0x2e), vdeal(0x25), vdeal(0x28), vshuff(0x0c), vdeal(0x07), vshuff(0x35), vshuff(0x01) ; CHECK-LABEL: test_000e: -; CHECK-DAG: [[Re0:r[0-9]+]] = #58 +; CHECK-DAG: [[Re0:r[0-9]+]] = #-6 ; CHECK: v[[He0:[0-9]+]]:[[Le0:[0-9]+]] = vshuff(v1,v0,[[Re0]]) ; CHECK-NOT: v{{[0-9:]+}} = define <128 x i8> @test_000e(<128 x i8> %v0) #0 { @@ -203,7 +203,7 @@ define <128 x i8> @test_000e(<128 x i8> %v0) #0 { ; Generator: vshuff(0x1a), vshuff(0x10), vdeal(0x2b), vshuff(0x15), vdeal(0x12), vdeal(0x30), vshuff(0x23), vshuff(0x02), vshuff(0x32), vshuff(0x08), vshuff(0x05), vdeal(0x3e), vshuff(0x39), vshuff(0x0a), vshuff(0x0e) ; CHECK-LABEL: test_000f: -; CHECK-DAG: [[Rf0:r[0-9]+]] = #44 +; CHECK-DAG: [[Rf0:r[0-9]+]] = #-20 ; CHECK-DAG: [[Rf1:r[0-9]+]] = #18 ; CHECK: v[[Hf0:[0-9]+]]:[[Lf0:[0-9]+]] = vshuff(v1,v0,[[Rf0]]) ; CHECK: v[[Hf1:[0-9]+]]:[[Lf1:[0-9]+]] = vshuff(v[[Hf0]],v[[Lf0]],[[Rf1]]) diff --git a/llvm/test/CodeGen/Hexagon/autohvx/shuff-perfect-inverted-pair.ll b/llvm/test/CodeGen/Hexagon/autohvx/shuff-perfect-inverted-pair.ll index 9ce849e464d9a..946658429bc25 100644 --- a/llvm/test/CodeGen/Hexagon/autohvx/shuff-perfect-inverted-pair.ll +++ b/llvm/test/CodeGen/Hexagon/autohvx/shuff-perfect-inverted-pair.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple=hexagon < %s | FileCheck %s ; CHECK-LABEL: f0: -; CHECK: r[[R0:[0-9]+]] = #60 +; CHECK: r[[R0:[0-9]+]] = #-4 ; CHECK: v1:0 = vshuff(v0,v2,r[[R0]]) define <128 x i8> @f0(<128 x i8> %a0, <128 x i8> %a1) #0 { %v0 = shufflevector <128 x i8> %a0, <128 x i8> %a1, <128 x i32> @@ -9,7 +9,7 @@ define <128 x i8> @f0(<128 x i8> %a0, <128 x i8> %a1) #0 { } ; CHECK-LABEL: f1: -; CHECK: r[[R0:[0-9]+]] = #124 +; CHECK: r[[R0:[0-9]+]] = #-4 ; CHECK: v1:0 = vshuff(v0,v2,r[[R0]]) define <256 x i8> @f1(<256 x i8> %a0, <256 x i8> %a1) #1 { %v0 = shufflevector <256 x i8> %a0, <256 x i8> %a1, <256 x i32> diff --git a/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll b/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll new file mode 100644 index 0000000000000..267e365243711 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll @@ -0,0 +1,23 @@ +; RUN: llc -march=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128B < %s | FileCheck %s +; REQUIRES: asserts + +; Check that the test does not assert when unaligned vector store V6_vS32Ub_npred_ai is generated. +; CHECK: if (!p{{[0-3]}}) vmemu + +target triple = "hexagon-unknown-unknown-elf" + +define fastcc void @test(i1 %cmp.i.i) { +entry: + %call.i.i.i172 = load ptr, ptr null, align 4 + %add.ptr = getelementptr i8, ptr %call.i.i.i172, i32 1 + store <32 x i32> zeroinitializer, ptr %add.ptr, align 128 + %add.ptr4.i4 = getelementptr i8, ptr %call.i.i.i172, i32 129 + br i1 %cmp.i.i, label %common.ret, label %if.end.i.i + +common.ret: ; preds = %if.end.i.i, %entry + ret void + +if.end.i.i: ; preds = %entry + store <32 x i32> zeroinitializer, ptr %add.ptr4.i4, align 1 + br label %common.ret +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/abs.ll b/llvm/test/CodeGen/LoongArch/lasx/abs.ll new file mode 100644 index 0000000000000..e3b0d04d92d75 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 true) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 true) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 true) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 true) + store <4 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false) + store <4 x i64> %b, ptr %dst + ret void +} + +declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) +declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) +declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lasx/and-not-combine.ll b/llvm/test/CodeGen/LoongArch/lasx/and-not-combine.ll new file mode 100644 index 0000000000000..67549599db2f3 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/and-not-combine.ll @@ -0,0 +1,87 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @and_not_combine_v32i8(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a3, 0 +; CHECK-NEXT: xvld $xr2, $a1, 0 +; CHECK-NEXT: xvsub.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <32 x i8>, ptr %a0 + %v1 = load <32 x i8>, ptr %a1 + %v2 = load <32 x i8>, ptr %a2 + %not = xor <32 x i8> %v1, + %add = add <32 x i8> %not, %v2 + %and = and <32 x i8> %v0, %add + store <32 x i8> %and, ptr %res + ret void +} + +define void @and_not_combine_v16i16(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a3, 0 +; CHECK-NEXT: xvld $xr2, $a1, 0 +; CHECK-NEXT: xvsub.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <16 x i16>, ptr %a0 + %v1 = load <16 x i16>, ptr %a1 + %v2 = load <16 x i16>, ptr %a2 + %not = xor <16 x i16> %v1, + %add = add <16 x i16> %not, %v2 + %and = and <16 x i16> %v0, %add + store <16 x i16> %and, ptr %res + ret void +} + +define void @and_not_combine_v8i32(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a3, 0 +; CHECK-NEXT: xvld $xr2, $a1, 0 +; CHECK-NEXT: xvsub.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <8 x i32>, ptr %a0 + %v1 = load <8 x i32>, ptr %a1 + %v2 = load <8 x i32>, ptr %a2 + %not = xor <8 x i32> %v1, + %add = add <8 x i32> %not, %v2 + %and = and <8 x i32> %v0, %add + store <8 x i32> %and, ptr %res + ret void +} + +define void @and_not_combine_v4i64(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a3, 0 +; CHECK-NEXT: xvld $xr2, $a1, 0 +; CHECK-NEXT: xvsub.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvandn.v $xr0, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <4 x i64>, ptr %a0 + %v1 = load <4 x i64>, ptr %a1 + %v2 = load <4 x i64>, ptr %a2 + %not = xor <4 x i64> %v1, + %add = add <4 x i64> %not, %v2 + %and = and <4 x i64> %v0, %add + store <4 x i64> %and, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll b/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll index 87ee4ad025395..8b12216d0f856 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/bitreverse.ll @@ -1,27 +1,46 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx --verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=LA32 ; RUN: llc --mtriple=loongarch64 -mattr=+lasx --verify-machineinstrs < %s \ -; RUN: | FileCheck %s +; RUN: | FileCheck %s --check-prefix=LA64 declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>) define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 -; CHECK-NEXT: bitrev.8b $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: bitrev.8b $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: bitrev.8b $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: bitrev.8b $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: xvpermi.q $xr1, $xr2, 2 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v32i8: +; LA32: # %bb.0: +; LA32-NEXT: xvslli.b $xr1, $xr0, 4 +; LA32-NEXT: xvsrli.b $xr0, $xr0, 4 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvandi.b $xr1, $xr0, 51 +; LA32-NEXT: xvslli.b $xr1, $xr1, 2 +; LA32-NEXT: xvsrli.b $xr0, $xr0, 2 +; LA32-NEXT: xvandi.b $xr0, $xr0, 51 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvandi.b $xr1, $xr0, 85 +; LA32-NEXT: xvslli.b $xr1, $xr1, 1 +; LA32-NEXT: xvsrli.b $xr0, $xr0, 1 +; LA32-NEXT: xvandi.b $xr0, $xr0, 85 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr1 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v32i8: +; LA64: # %bb.0: +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 2 +; LA64-NEXT: bitrev.8b $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 3 +; LA64-NEXT: bitrev.8b $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 1 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: bitrev.8b $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 1 +; LA64-NEXT: bitrev.8b $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: xvpermi.q $xr1, $xr2, 2 +; LA64-NEXT: xvori.b $xr0, $xr1, 0 +; LA64-NEXT: ret %b = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a) ret <32 x i8> %b } @@ -29,23 +48,53 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind { declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1 -; CHECK-NEXT: xvpermi.q $xr2, $xr1, 2 -; CHECK-NEXT: xvshuf4i.h $xr0, $xr2, 27 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v16i16: +; LA32: # %bb.0: +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 5 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 4 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 7 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 2 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 6 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 3 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 1 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 0 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 1 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 3 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 2 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 2 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 3 +; LA32-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA32-NEXT: xvshuf4i.h $xr0, $xr2, 27 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v16i16: +; LA64: # %bb.0: +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 2 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 3 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 1 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 1 +; LA64-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA64-NEXT: xvshuf4i.h $xr0, $xr2, 27 +; LA64-NEXT: ret %b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a) ret <16 x i16> %b } @@ -53,23 +102,53 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind { declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1 -; CHECK-NEXT: xvpermi.q $xr2, $xr1, 2 -; CHECK-NEXT: xvshuf4i.w $xr0, $xr2, 177 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 4 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 5 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 1 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 6 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 2 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 7 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr2, $a0, 3 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 0 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 1 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 2 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 2 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 3 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 3 +; LA32-NEXT: xvpermi.q $xr1, $xr2, 2 +; LA32-NEXT: xvori.b $xr0, $xr1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 2 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 3 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 1 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 1 +; LA64-NEXT: xvpermi.q $xr2, $xr1, 2 +; LA64-NEXT: xvshuf4i.w $xr0, $xr2, 177 +; LA64-NEXT: ret %b = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a) ret <8 x i32> %b } @@ -77,23 +156,43 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind { declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr2, $a0, 1 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: xvpermi.q $xr1, $xr2, 2 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0) +; LA32-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0) +; LA32-NEXT: xvshuf.b $xr0, $xr0, $xr0, $xr1 +; LA32-NEXT: xvslli.b $xr1, $xr0, 4 +; LA32-NEXT: xvsrli.b $xr0, $xr0, 4 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvandi.b $xr1, $xr0, 51 +; LA32-NEXT: xvslli.b $xr1, $xr1, 2 +; LA32-NEXT: xvsrli.b $xr0, $xr0, 2 +; LA32-NEXT: xvandi.b $xr0, $xr0, 51 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvandi.b $xr1, $xr0, 85 +; LA32-NEXT: xvslli.b $xr1, $xr1, 1 +; LA32-NEXT: xvsrli.b $xr0, $xr0, 1 +; LA32-NEXT: xvandi.b $xr0, $xr0, 85 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr1 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 2 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 3 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr2, $a0, 1 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 1 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: xvpermi.q $xr1, $xr2, 2 +; LA64-NEXT: xvori.b $xr0, $xr1, 0 +; LA64-NEXT: ret %b = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a) ret <4 x i64> %b } diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll index 7575bc1a9d3d2..d09ef0e2c6ac0 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll @@ -192,11 +192,11 @@ entry: ret void } -define void @buildvector_v2f32_const_splat(ptr %dst) nounwind { -; CHECK-LABEL: buildvector_v2f32_const_splat: +;; Also check buildvector_const_splat_xvldi_1010. +define void @buildvector_v8f32_const_splat(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_v8f32_const_splat: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 260096 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -1424 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -204,30 +204,112 @@ entry: ret void } +;; Also check buildvector_const_splat_xvldi_1100. define void @buildvector_v4f64_const_splat(ptr %dst) nounwind { -; LA32-LABEL: buildvector_v4f64_const_splat: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI14_0) -; LA32-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI14_0) -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_v4f64_const_splat: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: xvreplgr2vr.d $xr0, $a1 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_v4f64_const_splat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -912 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret entry: store <4 x double> , ptr %dst ret void } +;; imm[11:8] == 4'b0000/4'b0100/4'b1000 can be represented using xvrepli.[whb]. +define void @buildvector_const_splat_xvldi_0001(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_0001: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -3837 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_0010(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_0010: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -3583 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_0011(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_0011: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -3327 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_0101(ptr %dst) { +; CHECK-LABEL: buildvector_const_splat_xvldi_0101: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -2813 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <16 x i16> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_0110(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_0110: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -2557 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_0111(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_0111: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -2305 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_1001(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_1001: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -1789 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_xvldi_1011(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_xvldi_1011: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -1280 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x float> , ptr %dst + ret void +} + define void @buildvector_v32i8_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v32i8_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI15_0) -; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI15_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI23_0) +; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI23_0) ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -238,8 +320,8 @@ entry: define void @buildvector_v16i16_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v16i16_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI16_0) -; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI16_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI24_0) +; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI24_0) ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -250,8 +332,8 @@ entry: define void @buildvector_v8i32_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v8i32_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI17_0) -; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI17_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI25_0) +; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI25_0) ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -262,8 +344,8 @@ entry: define void @buildvector_v4i64_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v4i64_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI18_0) -; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI18_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI26_0) +; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI26_0) ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -274,8 +356,8 @@ entry: define void @buildvector_v2f32_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v2f32_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI19_0) -; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI19_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI27_0) +; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI27_0) ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -286,8 +368,8 @@ entry: define void @buildvector_v4f64_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v4f64_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI20_0) -; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI20_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI28_0) +; CHECK-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI28_0) ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -1511,8 +1593,7 @@ define void @buildvector_v8f32_with_constant(ptr %dst, float %a1, float %a2, flo ; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2 ; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1 ; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 -; CHECK-NEXT: lu12i.w $a1, 262144 -; CHECK-NEXT: xvreplgr2vr.w $xr4, $a1 +; CHECK-NEXT: xvldi $xr4, -3264 ; CHECK-NEXT: xvinsve0.w $xr4, $xr0, 1 ; CHECK-NEXT: xvinsve0.w $xr4, $xr1, 2 ; CHECK-NEXT: xvinsve0.w $xr4, $xr2, 5 diff --git a/llvm/test/CodeGen/LoongArch/lasx/extract-binop.ll b/llvm/test/CodeGen/LoongArch/lasx/extract-binop.ll new file mode 100644 index 0000000000000..4986b12199c31 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/extract-binop.ll @@ -0,0 +1,100 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 + +define i8 @extractelt_add_v32i8(ptr %p) { +; CHECK-LABEL: extractelt_add_v32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a0, 0 +; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 13 +; CHECK-NEXT: vpickve2gr.b $a0, $vr0, 2 +; CHECK-NEXT: ret +entry: + %x = load <32 x i8>, ptr %p + %add = add <32 x i8> %x, + %ext = extractelement <32 x i8> %add, i32 2 + ret i8 %ext +} + +define i16 @extractelt_add_v16i16(ptr %p) { +; CHECK-LABEL: extractelt_add_v16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a0, 0 +; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 13 +; CHECK-NEXT: vpickve2gr.h $a0, $vr0, 2 +; CHECK-NEXT: ret +entry: + %x = load <16 x i16>, ptr %p + %add = add <16 x i16> %x, + %ext = extractelement <16 x i16> %add, i32 2 + ret i16 %ext +} + +define i32 @extractelt_add_v8i32(ptr %p) { +; LA32-LABEL: extractelt_add_v8i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ld.w $a0, $a0, 8 +; LA32-NEXT: addi.w $a0, $a0, 13 +; LA32-NEXT: ret +; +; LA64-LABEL: extractelt_add_v8i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvaddi.wu $xr0, $xr0, 13 +; LA64-NEXT: xvpickve2gr.w $a0, $xr0, 2 +; LA64-NEXT: ret +entry: + %x = load <8 x i32>, ptr %p + %add = add <8 x i32> %x, + %ext = extractelement <8 x i32> %add, i32 2 + ret i32 %ext +} + +define i64 @extractelt_add_v4i64(ptr %p) { +; LA32-LABEL: extractelt_add_v4i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvaddi.du $xr0, $xr0, 12 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 2 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 3 +; LA32-NEXT: ret +; +; LA64-LABEL: extractelt_add_v4i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a0, $a0, 8 +; LA64-NEXT: addi.d $a0, $a0, 12 +; LA64-NEXT: ret +entry: + %x = load <4 x i64>, ptr %p + %add = add <4 x i64> %x, + %ext = extractelement <4 x i64> %add, i32 1 + ret i64 %ext +} + +define float @extractelt_fadd_v8f32(ptr %p) { +; CHECK-LABEL: extractelt_fadd_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fld.s $fa0, $a0, 8 +; CHECK-NEXT: vldi $vr1, -1238 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %x = load <8 x float>, ptr %p + %add = fadd <8 x float> %x, + %ext = extractelement <8 x float> %add, i32 2 + ret float %ext +} + +define double @extractelt_fadd_v4f64(ptr %p) { +; CHECK-LABEL: extractelt_fadd_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fld.d $fa0, $a0, 8 +; CHECK-NEXT: vldi $vr1, -984 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %x = load <4 x double>, ptr %p + %add = fadd <4 x double> %x, + %ext = extractelement <4 x double> %add, i32 1 + ret double %ext +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll index 7514dafa8000b..d75985b1ac215 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA32 -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA64 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx,+frecipe < %s | FileCheck %s define void @fdiv_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-LABEL: fdiv_v8f32: @@ -40,35 +40,19 @@ define void @fdiv_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-NEXT: xvst $xr0, $a0, 0 ; FAULT-NEXT: ret ; -; LA32-LABEL: fdiv_v4f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI1_0) -; LA32-NEXT: xvld $xr0, $a2, 0 -; LA32-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI1_0) -; LA32-NEXT: xvld $xr2, $a1, 0 -; LA32-NEXT: xvfrecipe.d $xr3, $xr0 -; LA32-NEXT: xvfmadd.d $xr1, $xr0, $xr3, $xr1 -; LA32-NEXT: xvfnmsub.d $xr1, $xr1, $xr3, $xr3 -; LA32-NEXT: xvfmul.d $xr3, $xr2, $xr1 -; LA32-NEXT: xvfnmsub.d $xr0, $xr0, $xr3, $xr2 -; LA32-NEXT: xvfmadd.d $xr0, $xr1, $xr0, $xr3 -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: fdiv_v4f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: xvld $xr0, $a2, 0 -; LA64-NEXT: xvld $xr1, $a1, 0 -; LA64-NEXT: lu52i.d $a1, $zero, -1025 -; LA64-NEXT: xvreplgr2vr.d $xr2, $a1 -; LA64-NEXT: xvfrecipe.d $xr3, $xr0 -; LA64-NEXT: xvfmadd.d $xr2, $xr0, $xr3, $xr2 -; LA64-NEXT: xvfnmsub.d $xr2, $xr2, $xr3, $xr3 -; LA64-NEXT: xvfmul.d $xr3, $xr1, $xr2 -; LA64-NEXT: xvfnmsub.d $xr0, $xr0, $xr3, $xr1 -; LA64-NEXT: xvfmadd.d $xr0, $xr2, $xr0, $xr3 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: fdiv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a1, 0 +; CHECK-NEXT: xvfrecipe.d $xr2, $xr0 +; CHECK-NEXT: xvldi $xr3, -784 +; CHECK-NEXT: xvfmadd.d $xr3, $xr0, $xr2, $xr3 +; CHECK-NEXT: xvfnmsub.d $xr2, $xr3, $xr2, $xr2 +; CHECK-NEXT: xvfmul.d $xr3, $xr1, $xr2 +; CHECK-NEXT: xvfnmsub.d $xr0, $xr0, $xr3, $xr1 +; CHECK-NEXT: xvfmadd.d $xr0, $xr2, $xr0, $xr3 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret entry: %v0 = load <4 x double>, ptr %a0 %v1 = load <4 x double>, ptr %a1 @@ -90,8 +74,7 @@ define void @one_fdiv_v8f32(ptr %res, ptr %a0) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvfrecipe.s $xr1, $xr0 -; CHECK-NEXT: lu12i.w $a1, -264192 -; CHECK-NEXT: xvreplgr2vr.w $xr2, $a1 +; CHECK-NEXT: xvldi $xr2, -1296 ; CHECK-NEXT: xvfmadd.s $xr0, $xr0, $xr1, $xr2 ; CHECK-NEXT: xvfnmsub.s $xr0, $xr0, $xr1, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 @@ -107,24 +90,22 @@ define void @one_fdiv_v4f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-LABEL: one_fdiv_v4f64: ; FAULT-LA32: # %bb.0: # %entry ; FAULT-LA32-NEXT: xvld $xr0, $a1, 0 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; FAULT-LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) +; FAULT-LA32-NEXT: xvldi $xr1, -912 ; FAULT-LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; FAULT-LA32-NEXT: xvst $xr0, $a0, 0 ; FAULT-LA32-NEXT: ret ; -; LA32-LABEL: one_fdiv_v4f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: xvld $xr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: xvfrecipe.d $xr2, $xr0 -; LA32-NEXT: xvfnmsub.d $xr3, $xr0, $xr2, $xr1 -; LA32-NEXT: xvfmadd.d $xr2, $xr2, $xr3, $xr2 -; LA32-NEXT: xvfnmsub.d $xr0, $xr0, $xr2, $xr1 -; LA32-NEXT: xvfmadd.d $xr0, $xr2, $xr0, $xr2 -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret +; CHECK-LABEL: one_fdiv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvfrecipe.d $xr1, $xr0 +; CHECK-NEXT: xvldi $xr2, -912 +; CHECK-NEXT: xvfnmsub.d $xr3, $xr0, $xr1, $xr2 +; CHECK-NEXT: xvfmadd.d $xr1, $xr1, $xr3, $xr1 +; CHECK-NEXT: xvfnmsub.d $xr0, $xr0, $xr1, $xr2 +; CHECK-NEXT: xvfmadd.d $xr0, $xr1, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret ; ; FAULT-LA64-LABEL: one_fdiv_v4f64: ; FAULT-LA64: # %bb.0: # %entry @@ -132,19 +113,6 @@ define void @one_fdiv_v4f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA64-NEXT: xvfrecip.d $xr0, $xr0 ; FAULT-LA64-NEXT: xvst $xr0, $a0, 0 ; FAULT-LA64-NEXT: ret -; -; LA64-LABEL: one_fdiv_v4f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: xvld $xr0, $a1, 0 -; LA64-NEXT: xvfrecipe.d $xr1, $xr0 -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: xvreplgr2vr.d $xr2, $a1 -; LA64-NEXT: xvfnmsub.d $xr3, $xr0, $xr1, $xr2 -; LA64-NEXT: xvfmadd.d $xr1, $xr1, $xr3, $xr1 -; LA64-NEXT: xvfnmsub.d $xr0, $xr0, $xr1, $xr2 -; LA64-NEXT: xvfmadd.d $xr0, $xr1, $xr0, $xr1 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret entry: %v0 = load <4 x double>, ptr %a0 %div = fdiv fast <4 x double> , %v0 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll index 156c829c2dfb6..45b25013c9173 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll @@ -1,97 +1,178 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+lasx < %s | FileCheck %s --check-prefix=LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefix=LA64 declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32) define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind { -; CHECK-LABEL: powi_v8f32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -128 -; CHECK-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill -; CHECK-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill -; CHECK-NEXT: xvst $xr0, $sp, 80 # 32-byte Folded Spill -; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 5 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 4 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 -; CHECK-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload -; CHECK-NEXT: vextrins.w $vr0, $vr1, 16 -; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 6 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: vextrins.w $vr1, $vr0, 32 -; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 7 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: vextrins.w $vr1, $vr0, 48 -; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 1 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 -; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vextrins.w $vr0, $vr1, 16 -; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 2 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: vextrins.w $vr1, $vr0, 32 -; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3 -; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload -; CHECK-NEXT: vextrins.w $vr1, $vr0, 48 -; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 2 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 -; CHECK-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload -; CHECK-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload -; CHECK-NEXT: addi.d $sp, $sp, 128 -; CHECK-NEXT: ret +; LA32-LABEL: powi_v8f32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: addi.w $sp, $sp, -128 +; LA32-NEXT: st.w $ra, $sp, 124 # 4-byte Folded Spill +; LA32-NEXT: st.w $fp, $sp, 120 # 4-byte Folded Spill +; LA32-NEXT: move $fp, $a0 +; LA32-NEXT: xvst $xr0, $sp, 80 # 32-byte Folded Spill +; LA32-NEXT: xvpickve.w $xr0, $xr0, 5 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 4 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $xr0 +; LA32-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload +; LA32-NEXT: vextrins.w $vr0, $vr1, 16 +; LA32-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 6 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; LA32-NEXT: vextrins.w $vr1, $vr0, 32 +; LA32-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 7 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; LA32-NEXT: vextrins.w $vr1, $vr0, 48 +; LA32-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 1 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 0 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $xr0 +; LA32-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; LA32-NEXT: vextrins.w $vr0, $vr1, 16 +; LA32-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 2 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; LA32-NEXT: vextrins.w $vr1, $vr0, 32 +; LA32-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.w $xr0, $xr0, 3 +; LA32-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powisf2 +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; LA32-NEXT: vextrins.w $vr1, $vr0, 48 +; LA32-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload +; LA32-NEXT: xvpermi.q $xr1, $xr0, 2 +; LA32-NEXT: xvori.b $xr0, $xr1, 0 +; LA32-NEXT: ld.w $fp, $sp, 120 # 4-byte Folded Reload +; LA32-NEXT: ld.w $ra, $sp, 124 # 4-byte Folded Reload +; LA32-NEXT: addi.w $sp, $sp, 128 +; LA32-NEXT: ret +; +; LA64-LABEL: powi_v8f32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: addi.d $sp, $sp, -128 +; LA64-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill +; LA64-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill +; LA64-NEXT: xvst $xr0, $sp, 80 # 32-byte Folded Spill +; LA64-NEXT: addi.w $fp, $a0, 0 +; LA64-NEXT: xvpickve.w $xr0, $xr0, 5 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 4 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $xr0 +; LA64-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr0, $vr1, 16 +; LA64-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 6 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 32 +; LA64-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 7 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: xvld $xr1, $sp, 48 # 32-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 48 +; LA64-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 1 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $xr0 +; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; LA64-NEXT: vextrins.w $vr0, $vr1, 16 +; LA64-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 2 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 32 +; LA64-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.w $xr0, $xr0, 3 +; LA64-NEXT: # kill: def $f0 killed $f0 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powisf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: xvld $xr1, $sp, 16 # 32-byte Folded Reload +; LA64-NEXT: vextrins.w $vr1, $vr0, 48 +; LA64-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload +; LA64-NEXT: xvpermi.q $xr1, $xr0, 2 +; LA64-NEXT: xvori.b $xr0, $xr1, 0 +; LA64-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload +; LA64-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload +; LA64-NEXT: addi.d $sp, $sp, 128 +; LA64-NEXT: ret entry: %res = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %va, i32 %b) ret <8 x float> %res @@ -100,53 +181,96 @@ entry: declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32) define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind { -; CHECK-LABEL: powi_v4f64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi.d $sp, $sp, -112 -; CHECK-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill -; CHECK-NEXT: st.d $fp, $sp, 96 # 8-byte Folded Spill -; CHECK-NEXT: xvst $xr0, $sp, 64 # 32-byte Folded Spill -; CHECK-NEXT: addi.w $fp, $a0, 0 -; CHECK-NEXT: xvpickve.d $xr0, $xr0, 3 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 -; CHECK-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.d $xr0, $xr0, 2 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 -; CHECK-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload -; CHECK-NEXT: vextrins.d $vr0, $vr1, 16 -; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.d $xr0, $xr0, 1 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 -; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill -; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload -; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 -; CHECK-NEXT: move $a0, $fp -; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2) -; CHECK-NEXT: jirl $ra, $ra, 0 -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 -; CHECK-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload -; CHECK-NEXT: vextrins.d $vr0, $vr1, 16 -; CHECK-NEXT: xvld $xr1, $sp, 32 # 32-byte Folded Reload -; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2 -; CHECK-NEXT: ld.d $fp, $sp, 96 # 8-byte Folded Reload -; CHECK-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload -; CHECK-NEXT: addi.d $sp, $sp, 112 -; CHECK-NEXT: ret +; LA32-LABEL: powi_v4f64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: addi.w $sp, $sp, -112 +; LA32-NEXT: st.w $ra, $sp, 108 # 4-byte Folded Spill +; LA32-NEXT: st.w $fp, $sp, 104 # 4-byte Folded Spill +; LA32-NEXT: move $fp, $a0 +; LA32-NEXT: xvst $xr0, $sp, 64 # 32-byte Folded Spill +; LA32-NEXT: xvpickve.d $xr0, $xr0, 3 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA32-NEXT: bl __powidf2 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA32-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.d $xr0, $xr0, 2 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powidf2 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; LA32-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload +; LA32-NEXT: vextrins.d $vr0, $vr1, 16 +; LA32-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.d $xr0, $xr0, 1 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powidf2 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA32-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill +; LA32-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload +; LA32-NEXT: xvpickve.d $xr0, $xr0, 0 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: bl __powidf2 +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; LA32-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; LA32-NEXT: vextrins.d $vr0, $vr1, 16 +; LA32-NEXT: xvld $xr1, $sp, 32 # 32-byte Folded Reload +; LA32-NEXT: xvpermi.q $xr0, $xr1, 2 +; LA32-NEXT: ld.w $fp, $sp, 104 # 4-byte Folded Reload +; LA32-NEXT: ld.w $ra, $sp, 108 # 4-byte Folded Reload +; LA32-NEXT: addi.w $sp, $sp, 112 +; LA32-NEXT: ret +; +; LA64-LABEL: powi_v4f64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: addi.d $sp, $sp, -112 +; LA64-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill +; LA64-NEXT: st.d $fp, $sp, 96 # 8-byte Folded Spill +; LA64-NEXT: xvst $xr0, $sp, 64 # 32-byte Folded Spill +; LA64-NEXT: addi.w $fp, $a0, 0 +; LA64-NEXT: xvpickve.d $xr0, $xr0, 3 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powidf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.d $xr0, $xr0, 2 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powidf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr0, $vr1, 16 +; LA64-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.d $xr0, $xr0, 1 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powidf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill +; LA64-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload +; LA64-NEXT: xvpickve.d $xr0, $xr0, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0 +; LA64-NEXT: move $a0, $fp +; LA64-NEXT: pcaddu18i $ra, %call36(__powidf2) +; LA64-NEXT: jirl $ra, $ra, 0 +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload +; LA64-NEXT: vextrins.d $vr0, $vr1, 16 +; LA64-NEXT: xvld $xr1, $sp, 32 # 32-byte Folded Reload +; LA64-NEXT: xvpermi.q $xr0, $xr1, 2 +; LA64-NEXT: ld.d $fp, $sp, 96 # 8-byte Folded Reload +; LA64-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload +; LA64-NEXT: addi.d $sp, $sp, 112 +; LA64-NEXT: ret entry: %res = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %va, i32 %b) ret <4 x double> %res diff --git a/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll index 4e475daa8ced3..e696129acb862 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll @@ -63,11 +63,9 @@ define void @one_div_sqrt_v8f32(ptr %res, ptr %a0) nounwind { ; LA32-NEXT: xvfrsqrte.s $xr1, $xr0 ; LA32-NEXT: xvfmul.s $xr1, $xr0, $xr1 ; LA32-NEXT: xvfmul.s $xr0, $xr0, $xr1 -; LA32-NEXT: lu12i.w $a1, -261120 -; LA32-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA32-NEXT: xvldi $xr2, -1400 ; LA32-NEXT: xvfmadd.s $xr0, $xr0, $xr1, $xr2 -; LA32-NEXT: lu12i.w $a1, -266240 -; LA32-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA32-NEXT: xvldi $xr2, -3137 ; LA32-NEXT: xvfmul.s $xr1, $xr1, $xr2 ; LA32-NEXT: xvfmul.s $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $sp, 64 @@ -100,11 +98,9 @@ define void @one_div_sqrt_v8f32(ptr %res, ptr %a0) nounwind { ; LA64-NEXT: xvfrsqrte.s $xr1, $xr0 ; LA64-NEXT: xvfmul.s $xr1, $xr0, $xr1 ; LA64-NEXT: xvfmul.s $xr0, $xr0, $xr1 -; LA64-NEXT: lu12i.w $a1, -261120 -; LA64-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA64-NEXT: xvldi $xr2, -1400 ; LA64-NEXT: xvfmadd.s $xr0, $xr0, $xr1, $xr2 -; LA64-NEXT: lu12i.w $a1, -266240 -; LA64-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA64-NEXT: xvldi $xr2, -3137 ; LA64-NEXT: xvfmul.s $xr1, $xr1, $xr2 ; LA64-NEXT: xvfmul.s $xr0, $xr1, $xr0 ; LA64-NEXT: xvst $xr0, $a0, 0 @@ -136,9 +132,8 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-NEXT: ld.w $a1, $a1, 0 ; FAULT-LA32-NEXT: st.w $a1, $sp, 32 ; FAULT-LA32-NEXT: xvld $xr0, $sp, 32 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; FAULT-LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI1_0) ; FAULT-LA32-NEXT: xvfsqrt.d $xr0, $xr0 +; FAULT-LA32-NEXT: xvldi $xr1, -912 ; FAULT-LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; FAULT-LA32-NEXT: xvst $xr0, $sp, 64 ; FAULT-LA32-NEXT: vld $vr0, $sp, 80 @@ -176,18 +171,16 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; LA32-NEXT: st.w $a1, $sp, 32 ; LA32-NEXT: xvld $xr0, $sp, 32 ; LA32-NEXT: xvfrsqrte.d $xr1, $xr0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; LA32-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI1_0) -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_1) -; LA32-NEXT: xvld $xr3, $a1, %pc_lo12(.LCPI1_1) ; LA32-NEXT: xvfmul.d $xr1, $xr0, $xr1 -; LA32-NEXT: xvfmul.d $xr4, $xr0, $xr1 -; LA32-NEXT: xvfmadd.d $xr4, $xr4, $xr1, $xr2 -; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr3 +; LA32-NEXT: xvfmul.d $xr2, $xr0, $xr1 +; LA32-NEXT: xvldi $xr3, -888 +; LA32-NEXT: xvfmadd.d $xr2, $xr2, $xr1, $xr3 +; LA32-NEXT: xvldi $xr4, -800 ; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr4 +; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr2 ; LA32-NEXT: xvfmul.d $xr0, $xr0, $xr1 -; LA32-NEXT: xvfmadd.d $xr0, $xr0, $xr1, $xr2 -; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr3 +; LA32-NEXT: xvfmadd.d $xr0, $xr0, $xr1, $xr3 +; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr4 ; LA32-NEXT: xvfmul.d $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $sp, 64 ; LA32-NEXT: vld $vr0, $sp, 80 @@ -219,13 +212,9 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; LA64-NEXT: xvfrsqrte.d $xr1, $xr0 ; LA64-NEXT: xvfmul.d $xr1, $xr0, $xr1 ; LA64-NEXT: xvfmul.d $xr2, $xr0, $xr1 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -524288 -; LA64-NEXT: lu52i.d $a1, $a1, -1024 -; LA64-NEXT: xvreplgr2vr.d $xr3, $a1 +; LA64-NEXT: xvldi $xr3, -888 ; LA64-NEXT: xvfmadd.d $xr2, $xr2, $xr1, $xr3 -; LA64-NEXT: lu52i.d $a1, $zero, -1026 -; LA64-NEXT: xvreplgr2vr.d $xr4, $a1 +; LA64-NEXT: xvldi $xr4, -800 ; LA64-NEXT: xvfmul.d $xr1, $xr1, $xr4 ; LA64-NEXT: xvfmul.d $xr1, $xr1, $xr2 ; LA64-NEXT: xvfmul.d $xr0, $xr0, $xr1 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll b/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll index f8a3284f04dc8..9ae651d612f18 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll @@ -174,9 +174,8 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; LA32-NEXT: ld.w $a1, $a1, 0 ; LA32-NEXT: st.w $a1, $sp, 32 ; LA32-NEXT: xvld $xr0, $sp, 32 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) ; LA32-NEXT: xvfsqrt.d $xr0, $xr0 +; LA32-NEXT: xvldi $xr1, -912 ; LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $sp, 64 ; LA32-NEXT: vld $vr0, $sp, 80 diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecipe.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecipe.ll index 215436823af83..623a6de1bc402 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecipe.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frecipe.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx,+frecipe < %s | FileCheck %s declare <8 x float> @llvm.loongarch.lasx.xvfrecipe.s(<8 x float>) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrte.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrte.ll index ad36c3aa5c29d..743ab10cc9b00 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrte.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-frsqrte.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx,+frecipe < %s | FileCheck %s declare <8 x float> @llvm.loongarch.lasx.xvfrsqrte.s(<8 x float>) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max-invalid-imm.ll index a671e9979b2fe..e6688bacd3bf9 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max-invalid-imm.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-max-invalid-imm.ll @@ -1,3 +1,4 @@ +; RUN: not llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s 2>&1 | FileCheck %s ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s declare <32 x i8> @llvm.loongarch.lasx.xvmaxi.b(<32 x i8>, i32) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min-invalid-imm.ll index 5ed4104c295fa..cfe9ec575222a 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min-invalid-imm.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-min-invalid-imm.ll @@ -1,3 +1,4 @@ +; RUN: not llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s 2>&1 | FileCheck %s ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s declare <32 x i8> @llvm.loongarch.lasx.xvmini.b(<32 x i8>, i32) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-d-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-d-invalid-imm.ll new file mode 100644 index 0000000000000..5a5af4356f714 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-d-invalid-imm.ll @@ -0,0 +1,33 @@ +; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s + +declare i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64>, i32) + +define i64 @lasx_xvpickve2gr_d_lo(<4 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lasx.xvpickve2gr.d: argument out of range +entry: + %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 -1) + ret i64 %res +} + +define i64 @lasx_xvpickve2gr_d_hi(<4 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lasx.xvpickve2gr.d: argument out of range +entry: + %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 4) + ret i64 %res +} + +declare i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64>, i32) + +define i64 @lasx_xvpickve2gr_du_lo(<4 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lasx.xvpickve2gr.du: argument out of range +entry: + %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 -1) + ret i64 %res +} + +define i64 @lasx_xvpickve2gr_du_hi(<4 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lasx.xvpickve2gr.du: argument out of range +entry: + %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 4) + ret i64 %res +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-d.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-d.ll new file mode 100644 index 0000000000000..178dd92cbdb80 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-d.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +declare i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64>, i32) + +define i64 @lasx_xvpickve2gr_d(<4 x i64> %va) nounwind { +; CHECK-LABEL: lasx_xvpickve2gr_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 +; CHECK-NEXT: ret +entry: + %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 1) + ret i64 %res +} + +declare i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64>, i32) + +define i64 @lasx_xvpickve2gr_du(<4 x i64> %va) nounwind { +; CHECK-LABEL: lasx_xvpickve2gr_du: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvpickve2gr.du $a0, $xr0, 1 +; CHECK-NEXT: ret +entry: + %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 1) + ret i64 %res +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-invalid-imm.ll index 93056b272dfc5..0c91b56387f79 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-invalid-imm.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr-invalid-imm.ll @@ -1,3 +1,4 @@ +; RUN: not llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s 2>&1 | FileCheck %s ; RUN: not llc --mtriple=loongarch64 --mattr=+lasx < %s 2>&1 | FileCheck %s declare i32 @llvm.loongarch.lasx.xvpickve2gr.w(<8 x i32>, i32) @@ -16,22 +17,6 @@ entry: ret i32 %res } -declare i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64>, i32) - -define i64 @lasx_xvpickve2gr_d_lo(<4 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lasx.xvpickve2gr.d: argument out of range -entry: - %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 -1) - ret i64 %res -} - -define i64 @lasx_xvpickve2gr_d_hi(<4 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lasx.xvpickve2gr.d: argument out of range -entry: - %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 4) - ret i64 %res -} - declare i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32>, i32) define i32 @lasx_xvpickve2gr_wu_lo(<8 x i32> %va) nounwind { @@ -47,19 +32,3 @@ entry: %res = call i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32> %va, i32 8) ret i32 %res } - -declare i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64>, i32) - -define i64 @lasx_xvpickve2gr_du_lo(<4 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lasx.xvpickve2gr.du: argument out of range -entry: - %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 -1) - ret i64 %res -} - -define i64 @lasx_xvpickve2gr_du_hi(<4 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lasx.xvpickve2gr.du: argument out of range -entry: - %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 4) - ret i64 %res -} diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll index 0617e7424321b..a6f19ce0c0140 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-pickve2gr.ll @@ -1,9 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s - - - declare i32 @llvm.loongarch.lasx.xvpickve2gr.w(<8 x i32>, i32) define i32 @lasx_xvpickve2gr_w(<8 x i32> %va) nounwind { @@ -16,18 +14,6 @@ entry: ret i32 %res } -declare i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64>, i32) - -define i64 @lasx_xvpickve2gr_d(<4 x i64> %va) nounwind { -; CHECK-LABEL: lasx_xvpickve2gr_d: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1 -; CHECK-NEXT: ret -entry: - %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.d(<4 x i64> %va, i32 1) - ret i64 %res -} - declare i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32>, i32) define i32 @lasx_xvpickve2gr_wu(<8 x i32> %va) nounwind { @@ -39,15 +25,3 @@ entry: %res = call i32 @llvm.loongarch.lasx.xvpickve2gr.wu(<8 x i32> %va, i32 1) ret i32 %res } - -declare i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64>, i32) - -define i64 @lasx_xvpickve2gr_du(<4 x i64> %va) nounwind { -; CHECK-LABEL: lasx_xvpickve2gr_du: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve2gr.du $a0, $xr0, 1 -; CHECK-NEXT: ret -entry: - %res = call i64 @llvm.loongarch.lasx.xvpickve2gr.du(<4 x i64> %va, i32 1) - ret i64 %res -} diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr-d.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr-d.ll new file mode 100644 index 0000000000000..79ec7b51f6278 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr-d.ll @@ -0,0 +1,17 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define <4 x i64> @xvrepl_ins_d(i64 %a, i64 %b) { +; CHECK-LABEL: xvrepl_ins_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvreplgr2vr.d $xr0, $a0 +; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 1 +; CHECK-NEXT: ret +entry: + %0 = call <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64 %a) + %1 = call <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64> %0, i64 %b, i32 1) + ret <4 x i64> %1 +} + +declare <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64>, i64, i32 immarg) +declare <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr.ll index 2e538ed66b250..31b809e016564 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-repl-ins-gr2vr.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s define <8 x i32> @xvrepl_ins_w(i32 %a, i32 %b) { @@ -13,19 +14,5 @@ entry: ret <8 x i32> %1 } -define <4 x i64> @xvrepl_ins_d(i64 %a, i64 %b) { -; CHECK-LABEL: xvrepl_ins_d: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvreplgr2vr.d $xr0, $a0 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 1 -; CHECK-NEXT: ret -entry: - %0 = call <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64 %a) - %1 = call <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64> %0, i64 %b, i32 1) - ret <4 x i64> %1 -} - declare <8 x i32> @llvm.loongarch.lasx.xvinsgr2vr.w(<8 x i32>, i32, i32 immarg) declare <8 x i32> @llvm.loongarch.lasx.xvreplgr2vr.w(i32) -declare <4 x i64> @llvm.loongarch.lasx.xvinsgr2vr.d(<4 x i64>, i64, i32 immarg) -declare <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr-d.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr-d.ll new file mode 100644 index 0000000000000..61bc89249d97e --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr-d.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +declare <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64) + +define <4 x i64> @lasx_xvreplgr2vr_d(i64 %a) nounwind { +; CHECK-LABEL: lasx_xvreplgr2vr_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvreplgr2vr.d $xr0, $a0 +; CHECK-NEXT: ret +entry: + %res = call <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64 %a) + ret <4 x i64> %res +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll index c71abd2205c67..a3c0e261e7122 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-replgr2vr.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s declare <32 x i8> @llvm.loongarch.lasx.xvreplgr2vr.b(i32) @@ -36,15 +37,3 @@ entry: %res = call <8 x i32> @llvm.loongarch.lasx.xvreplgr2vr.w(i32 %a) ret <8 x i32> %res } - -declare <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64) - -define <4 x i64> @lasx_xvreplgr2vr_d(i64 %a) nounwind { -; CHECK-LABEL: lasx_xvreplgr2vr_d: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvreplgr2vr.d $xr0, $a0 -; CHECK-NEXT: ret -entry: - %res = call <4 x i64> @llvm.loongarch.lasx.xvreplgr2vr.d(i64 %a) - ret <4 x i64> %res -} diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll index 6e3e2e0330f52..5e234e4bd8210 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-set.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s declare i32 @llvm.loongarch.lasx.xbz.v(<32 x i8>) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll index a466b78bf8d2d..38e3289ef4cba 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setallnez.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s declare i32 @llvm.loongarch.lasx.xbnz.b(<32 x i8>) diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll index 36e65fc5b3281..f6917cffb36b5 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-setanyeqz.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s declare i32 @llvm.loongarch.lasx.xbz.b(<32 x i8>) diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll new file mode 100644 index 0000000000000..98687755fcfb4 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll @@ -0,0 +1,91 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadda.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <32 x i8>, ptr %a + %vb = load <32 x i8>, ptr %b + %conda = icmp slt <32 x i8> %va, zeroinitializer + %nega = sub <32 x i8> zeroinitializer, %va + %absa = select <32 x i1> %conda, <32 x i8> %nega, <32 x i8> %va + %condb = icmp slt <32 x i8> %vb, zeroinitializer + %negb = sub <32 x i8> zeroinitializer, %vb + %absb = select <32 x i1> %condb, <32 x i8> %negb, <32 x i8> %vb + %add = add <32 x i8> %absa, %absb + store <32 x i8> %add, ptr %res + ret void +} + +define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadda.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i16>, ptr %a + %vb = load <16 x i16>, ptr %b + %conda = icmp slt <16 x i16> %va, zeroinitializer + %nega = sub <16 x i16> zeroinitializer, %va + %absa = select <16 x i1> %conda, <16 x i16> %nega, <16 x i16> %va + %condb = icmp slt <16 x i16> %vb, zeroinitializer + %negb = sub <16 x i16> zeroinitializer, %vb + %absb = select <16 x i1> %condb, <16 x i16> %negb, <16 x i16> %vb + %add = add <16 x i16> %absa, %absb + store <16 x i16> %add, ptr %res + ret void +} + +define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadda.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %conda = icmp slt <8 x i32> %va, zeroinitializer + %nega = sub <8 x i32> zeroinitializer, %va + %absa = select <8 x i1> %conda, <8 x i32> %nega, <8 x i32> %va + %condb = icmp slt <8 x i32> %vb, zeroinitializer + %negb = sub <8 x i32> zeroinitializer, %vb + %absb = select <8 x i1> %condb, <8 x i32> %negb, <8 x i32> %vb + %add = add <8 x i32> %absa, %absb + store <8 x i32> %add, ptr %res + ret void +} + +define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvadda.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %conda = icmp slt <4 x i64> %va, zeroinitializer + %nega = sub <4 x i64> zeroinitializer, %va + %absa = select <4 x i1> %conda, <4 x i64> %nega, <4 x i64> %va + %condb = icmp slt <4 x i64> %vb, zeroinitializer + %negb = sub <4 x i64> zeroinitializer, %vb + %absb = select <4 x i1> %condb, <4 x i64> %negb, <4 x i64> %vb + %add = add <4 x i64> %absa, %absb + store <4 x i64> %add, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll index cf0496fb8fb89..60b51755681a4 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/extractelement.ll @@ -3,18 +3,11 @@ ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define void @extract_32xi8(ptr %src, ptr %dst) nounwind { -; LA32-LABEL: extract_32xi8: -; LA32: # %bb.0: -; LA32-NEXT: xvld $xr0, $a0, 0 -; LA32-NEXT: vpickve2gr.b $a0, $vr0, 1 -; LA32-NEXT: st.b $a0, $a1, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: extract_32xi8: -; LA64: # %bb.0: -; LA64-NEXT: xvld $xr0, $a0, 0 -; LA64-NEXT: xvstelm.b $xr0, $a1, 0, 1 -; LA64-NEXT: ret +; CHECK-LABEL: extract_32xi8: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a0, 0 +; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 1 +; CHECK-NEXT: ret %v = load volatile <32 x i8>, ptr %src %e = extractelement <32 x i8> %v, i32 1 store i8 %e, ptr %dst @@ -22,18 +15,11 @@ define void @extract_32xi8(ptr %src, ptr %dst) nounwind { } define void @extract_16xi16(ptr %src, ptr %dst) nounwind { -; LA32-LABEL: extract_16xi16: -; LA32: # %bb.0: -; LA32-NEXT: xvld $xr0, $a0, 0 -; LA32-NEXT: vpickve2gr.h $a0, $vr0, 1 -; LA32-NEXT: st.h $a0, $a1, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: extract_16xi16: -; LA64: # %bb.0: -; LA64-NEXT: xvld $xr0, $a0, 0 -; LA64-NEXT: xvstelm.h $xr0, $a1, 0, 1 -; LA64-NEXT: ret +; CHECK-LABEL: extract_16xi16: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a0, 0 +; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 1 +; CHECK-NEXT: ret %v = load volatile <16 x i16>, ptr %src %e = extractelement <16 x i16> %v, i32 1 store i16 %e, ptr %dst @@ -111,8 +97,7 @@ define void @extract_32xi8_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; LA32-NEXT: movgr2fr.w $fa1, $a2 ; LA32-NEXT: xvpermi.q $xr2, $xr0, 1 ; LA32-NEXT: xvshuf.b $xr0, $xr2, $xr0, $xr1 -; LA32-NEXT: vpickve2gr.b $a0, $vr0, 0 -; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: xvstelm.b $xr0, $a1, 0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: extract_32xi8_idx: @@ -136,8 +121,7 @@ define void @extract_16xi16_idx(ptr %src, ptr %dst, i32 %idx) nounwind { ; LA32-NEXT: movgr2fr.w $fa1, $a2 ; LA32-NEXT: xvpermi.q $xr2, $xr0, 1 ; LA32-NEXT: xvshuf.h $xr1, $xr2, $xr0 -; LA32-NEXT: vpickve2gr.h $a0, $vr1, 0 -; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: xvstelm.h $xr1, $a1, 0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: extract_16xi16_idx: diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll index ae6f091ddb498..aefaa0efb079c 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll @@ -53,8 +53,7 @@ define void @one_fdiv_v4f64(ptr %res, ptr %a0) nounwind { ; LA32-LABEL: one_fdiv_v4f64: ; LA32: # %bb.0: # %entry ; LA32-NEXT: xvld $xr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) +; LA32-NEXT: xvldi $xr1, -912 ; LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $a0, 0 ; LA32-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll index 765473ce166df..0b8015ddbdd4a 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll @@ -7,13 +7,12 @@ define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: shufflevector_v4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve.d $xr2, $xr1, 3 -; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 -; CHECK-NEXT: xvrepl128vei.d $xr3, $xr3, 1 -; CHECK-NEXT: vextrins.d $vr3, $vr2, 16 +; CHECK-NEXT: xvpermi.d $xr2, $xr0, 3 +; CHECK-NEXT: xvpickve.d $xr3, $xr1, 3 +; CHECK-NEXT: vextrins.d $vr2, $vr3, 16 ; CHECK-NEXT: xvpickve.d $xr1, $xr1, 2 ; CHECK-NEXT: vextrins.d $vr0, $vr1, 16 -; CHECK-NEXT: xvpermi.q $xr0, $xr3, 2 +; CHECK-NEXT: xvpermi.q $xr0, $xr2, 2 ; CHECK-NEXT: ret entry: %c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll index ca405314686e6..af1598f69569e 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define <32 x i8> @insert_extract_v32i8(<32 x i8> %a) nounwind { ; CHECK-LABEL: insert_extract_v32i8: @@ -68,11 +69,19 @@ entry: } define <4 x i64> @insert_extract_v4i64(<4 x i64> %a) nounwind { -; CHECK-LABEL: insert_extract_v4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvpickve.d $xr1, $xr0, 3 -; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 -; CHECK-NEXT: ret +; LA32-LABEL: insert_extract_v4i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvpickve.w $xr1, $xr0, 6 +; LA32-NEXT: xvpickve.w $xr2, $xr0, 7 +; LA32-NEXT: xvinsve0.w $xr0, $xr1, 2 +; LA32-NEXT: xvinsve0.w $xr0, $xr2, 3 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_extract_v4i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvpickve.d $xr1, $xr0, 3 +; LA64-NEXT: xvinsve0.d $xr0, $xr1, 1 +; LA64-NEXT: ret entry: %b = extractelement <4 x i64> %a, i32 3 %c = insertelement <4 x i64> %a, i64 %b, i32 1 @@ -80,10 +89,17 @@ entry: } define <4 x i64> @insert_extract0_v4i64(<4 x i64> %a) nounwind { -; CHECK-LABEL: insert_extract0_v4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvinsve0.d $xr0, $xr0, 1 -; CHECK-NEXT: ret +; LA32-LABEL: insert_extract0_v4i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvpickve.w $xr1, $xr0, 1 +; LA32-NEXT: xvinsve0.w $xr0, $xr0, 2 +; LA32-NEXT: xvinsve0.w $xr0, $xr1, 3 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_extract0_v4i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvinsve0.d $xr0, $xr0, 1 +; LA64-NEXT: ret entry: %b = extractelement <4 x i64> %a, i32 0 %c = insertelement <4 x i64> %a, i64 %b, i32 1 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-pair-elements.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-pair-elements.ll index 4e173c4feadba..c5d20003742e5 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-pair-elements.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-pair-elements.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define <32 x i8> @insert_extract_v32i8(<32 x i8> %a) nounwind { ; CHECK-LABEL: insert_extract_v32i8: @@ -54,10 +55,22 @@ entry: } define <4 x i64> @insert_extract_v4i64(<4 x i64> %a) nounwind { -; CHECK-LABEL: insert_extract_v4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvextrins.d $xr0, $xr0, 1 -; CHECK-NEXT: ret +; LA32-LABEL: insert_extract_v4i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvpickve.w $xr1, $xr0, 2 +; LA32-NEXT: xvpickve.w $xr2, $xr0, 3 +; LA32-NEXT: xvpickve.w $xr3, $xr0, 6 +; LA32-NEXT: xvpickve.w $xr4, $xr0, 7 +; LA32-NEXT: xvinsve0.w $xr0, $xr1, 0 +; LA32-NEXT: xvinsve0.w $xr0, $xr2, 1 +; LA32-NEXT: xvinsve0.w $xr0, $xr3, 4 +; LA32-NEXT: xvinsve0.w $xr0, $xr4, 5 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_extract_v4i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvextrins.d $xr0, $xr0, 1 +; LA64-NEXT: ret entry: %b_lo = extractelement <4 x i64> %a, i32 1 %b_hi = extractelement <4 x i64> %a, i32 3 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll index aa29264924df9..2f1db43e68fef 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insertelement.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define void @insert_32xi8(ptr %src, ptr %dst, i8 %in) nounwind { ; CHECK-LABEL: insert_32xi8: @@ -121,12 +122,20 @@ define void @insert_8xi32(ptr %src, ptr %dst, i32 %in) nounwind { } define void @insert_4xi64(ptr %src, ptr %dst, i64 %in) nounwind { -; CHECK-LABEL: insert_4xi64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvinsgr2vr.d $xr0, $a2, 1 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_4xi64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvinsgr2vr.w $xr0, $a2, 2 +; LA32-NEXT: xvinsgr2vr.w $xr0, $a3, 3 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_4xi64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvinsgr2vr.d $xr0, $a2, 1 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <4 x i64>, ptr %src %v_new = insertelement <4 x i64> %v, i64 %in, i32 1 store <4 x i64> %v_new, ptr %dst @@ -162,18 +171,30 @@ define void @insert_4xdouble(ptr %src, ptr %dst, double %in) nounwind { } define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind { -; CHECK-LABEL: insert_32xi8_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI12_0) -; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI12_0) -; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: xvreplgr2vr.b $xr2, $a0 -; CHECK-NEXT: xvseq.b $xr0, $xr2, $xr0 -; CHECK-NEXT: xvreplgr2vr.b $xr2, $a2 -; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_32xi8_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a4, %pc_hi20(.LCPI12_0) +; LA32-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI12_0) +; LA32-NEXT: xvld $xr1, $a0, 0 +; LA32-NEXT: xvreplgr2vr.b $xr2, $a3 +; LA32-NEXT: xvseq.b $xr0, $xr2, $xr0 +; LA32-NEXT: xvreplgr2vr.b $xr2, $a2 +; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_32xi8_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI12_0) +; LA64-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI12_0) +; LA64-NEXT: xvld $xr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: xvreplgr2vr.b $xr2, $a0 +; LA64-NEXT: xvseq.b $xr0, $xr2, $xr0 +; LA64-NEXT: xvreplgr2vr.b $xr2, $a2 +; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <32 x i8>, ptr %src %v_new = insertelement <32 x i8> %v, i8 %in, i32 %idx store <32 x i8> %v_new, ptr %dst @@ -181,18 +202,30 @@ define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind { } define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind { -; CHECK-LABEL: insert_16xi16_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI13_0) -; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI13_0) -; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: xvreplgr2vr.h $xr2, $a0 -; CHECK-NEXT: xvseq.h $xr0, $xr2, $xr0 -; CHECK-NEXT: xvreplgr2vr.h $xr2, $a2 -; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_16xi16_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a4, %pc_hi20(.LCPI13_0) +; LA32-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI13_0) +; LA32-NEXT: xvld $xr1, $a0, 0 +; LA32-NEXT: xvreplgr2vr.h $xr2, $a3 +; LA32-NEXT: xvseq.h $xr0, $xr2, $xr0 +; LA32-NEXT: xvreplgr2vr.h $xr2, $a2 +; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_16xi16_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI13_0) +; LA64-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI13_0) +; LA64-NEXT: xvld $xr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: xvreplgr2vr.h $xr2, $a0 +; LA64-NEXT: xvseq.h $xr0, $xr2, $xr0 +; LA64-NEXT: xvreplgr2vr.h $xr2, $a2 +; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <16 x i16>, ptr %src %v_new = insertelement <16 x i16> %v, i16 %in, i32 %idx store <16 x i16> %v_new, ptr %dst @@ -200,18 +233,30 @@ define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind { } define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind { -; CHECK-LABEL: insert_8xi32_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI14_0) -; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI14_0) -; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: xvreplgr2vr.w $xr2, $a0 -; CHECK-NEXT: xvseq.w $xr0, $xr2, $xr0 -; CHECK-NEXT: xvreplgr2vr.w $xr2, $a2 -; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_8xi32_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a4, %pc_hi20(.LCPI14_0) +; LA32-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI14_0) +; LA32-NEXT: xvld $xr1, $a0, 0 +; LA32-NEXT: xvreplgr2vr.w $xr2, $a3 +; LA32-NEXT: xvseq.w $xr0, $xr2, $xr0 +; LA32-NEXT: xvreplgr2vr.w $xr2, $a2 +; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_8xi32_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI14_0) +; LA64-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI14_0) +; LA64-NEXT: xvld $xr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: xvreplgr2vr.w $xr2, $a0 +; LA64-NEXT: xvseq.w $xr0, $xr2, $xr0 +; LA64-NEXT: xvreplgr2vr.w $xr2, $a2 +; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <8 x i32>, ptr %src %v_new = insertelement <8 x i32> %v, i32 %in, i32 %idx store <8 x i32> %v_new, ptr %dst @@ -219,18 +264,36 @@ define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind { } define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind { -; CHECK-LABEL: insert_4xi64_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI15_0) -; CHECK-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI15_0) -; CHECK-NEXT: xvld $xr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: xvreplgr2vr.d $xr2, $a0 -; CHECK-NEXT: xvseq.d $xr0, $xr2, $xr0 -; CHECK-NEXT: xvreplgr2vr.d $xr2, $a2 -; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_4xi64_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a5, %pc_hi20(.LCPI15_0) +; LA32-NEXT: xvld $xr0, $a5, %pc_lo12(.LCPI15_0) +; LA32-NEXT: add.w $a4, $a4, $a4 +; LA32-NEXT: xvld $xr1, $a0, 0 +; LA32-NEXT: xvreplgr2vr.w $xr2, $a4 +; LA32-NEXT: xvseq.w $xr2, $xr2, $xr0 +; LA32-NEXT: xvreplgr2vr.w $xr3, $a2 +; LA32-NEXT: xvbitsel.v $xr1, $xr1, $xr3, $xr2 +; LA32-NEXT: addi.w $a0, $a4, 1 +; LA32-NEXT: xvreplgr2vr.w $xr2, $a0 +; LA32-NEXT: xvseq.w $xr0, $xr2, $xr0 +; LA32-NEXT: xvreplgr2vr.w $xr2, $a3 +; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_4xi64_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI15_0) +; LA64-NEXT: xvld $xr0, $a4, %pc_lo12(.LCPI15_0) +; LA64-NEXT: xvld $xr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: xvreplgr2vr.d $xr2, $a0 +; LA64-NEXT: xvseq.d $xr0, $xr2, $xr0 +; LA64-NEXT: xvreplgr2vr.d $xr2, $a2 +; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr2, $xr0 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <4 x i64>, ptr %src %v_new = insertelement <4 x i64> %v, i64 %in, i32 %idx store <4 x i64> %v_new, ptr %dst @@ -238,19 +301,32 @@ define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind { } define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwind { -; CHECK-LABEL: insert_8xfloat_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 -; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI16_0) -; CHECK-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI16_0) -; CHECK-NEXT: xvld $xr2, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 -; CHECK-NEXT: xvreplgr2vr.w $xr3, $a0 -; CHECK-NEXT: xvseq.w $xr1, $xr3, $xr1 -; CHECK-NEXT: xvreplve0.w $xr0, $xr0 -; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_8xfloat_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI16_0) +; LA32-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI16_0) +; LA32-NEXT: # kill: def $f0 killed $f0 def $xr0 +; LA32-NEXT: xvld $xr2, $a0, 0 +; LA32-NEXT: xvreplgr2vr.w $xr3, $a2 +; LA32-NEXT: xvseq.w $xr1, $xr3, $xr1 +; LA32-NEXT: xvreplve0.w $xr0, $xr0 +; LA32-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_8xfloat_idx: +; LA64: # %bb.0: +; LA64-NEXT: # kill: def $f0 killed $f0 def $xr0 +; LA64-NEXT: pcalau12i $a3, %pc_hi20(.LCPI16_0) +; LA64-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI16_0) +; LA64-NEXT: xvld $xr2, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a2, 31, 0 +; LA64-NEXT: xvreplgr2vr.w $xr3, $a0 +; LA64-NEXT: xvseq.w $xr1, $xr3, $xr1 +; LA64-NEXT: xvreplve0.w $xr0, $xr0 +; LA64-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <8 x float>, ptr %src %v_new = insertelement <8 x float> %v, float %in, i32 %idx store <8 x float> %v_new, ptr %dst @@ -258,19 +334,36 @@ define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwin } define void @insert_4xdouble_idx(ptr %src, ptr %dst, double %in, i32 %idx) nounwind { -; CHECK-LABEL: insert_4xdouble_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 -; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI17_0) -; CHECK-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI17_0) -; CHECK-NEXT: xvld $xr2, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 -; CHECK-NEXT: xvreplgr2vr.d $xr3, $a0 -; CHECK-NEXT: xvseq.d $xr1, $xr3, $xr1 -; CHECK-NEXT: xvreplve0.d $xr0, $xr0 -; CHECK-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1 -; CHECK-NEXT: xvst $xr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_4xdouble_idx: +; LA32: # %bb.0: +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; LA32-NEXT: xvld $xr1, $a0, 0 +; LA32-NEXT: xvrepli.b $xr2, 0 +; LA32-NEXT: xvinsgr2vr.w $xr2, $a2, 0 +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI17_0) +; LA32-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI17_0) +; LA32-NEXT: xvinsgr2vr.w $xr2, $a2, 2 +; LA32-NEXT: xvinsgr2vr.w $xr2, $a2, 4 +; LA32-NEXT: xvinsgr2vr.w $xr2, $a2, 6 +; LA32-NEXT: xvseq.d $xr2, $xr2, $xr3 +; LA32-NEXT: xvreplve0.d $xr0, $xr0 +; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; LA32-NEXT: xvst $xr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_4xdouble_idx: +; LA64: # %bb.0: +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0 +; LA64-NEXT: pcalau12i $a3, %pc_hi20(.LCPI17_0) +; LA64-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI17_0) +; LA64-NEXT: xvld $xr2, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a2, 31, 0 +; LA64-NEXT: xvreplgr2vr.d $xr3, $a0 +; LA64-NEXT: xvseq.d $xr1, $xr3, $xr1 +; LA64-NEXT: xvreplve0.d $xr0, $xr0 +; LA64-NEXT: xvbitsel.v $xr0, $xr2, $xr0, $xr1 +; LA64-NEXT: xvst $xr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <4 x double>, ptr %src %v_new = insertelement <4 x double> %v, double %in, i32 %idx store <4 x double> %v_new, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll new file mode 100644 index 0000000000000..e1784f81c2a07 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +;; xvinsve0.w +define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 4 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> + store <8 x float> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> + store <8 x float> %vc, ptr %d + ret void +} + +;; xvinsve0.d +define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> + store <4 x double> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> + store <4 x double> %vc, ptr %d + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvrepl128vei.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvrepl128vei.ll index dce1e4b777e29..9afe16d029fb2 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvrepl128vei.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvrepl128vei.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s ;; xvrepl128vei.b @@ -12,6 +13,17 @@ define <32 x i8> @shufflevector_v32i8(<32 x i8> %a, <32 x i8> %b) { ret <32 x i8> %c } +;; xvrepl128vei.b +define <32 x i8> @shufflevector_v32i8_undef(<32 x i8> %a) { +; CHECK-LABEL: shufflevector_v32i8_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: xvrepl128vei.b $xr0, $xr0, 1 +; CHECK-NEXT: ret + %c = shufflevector <32 x i8> %a, <32 x i8> poison, <32 x i32> + ret <32 x i8> %c +} + ;; xvrepl128vei.h define <16 x i16> @shufflevector_v16i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: shufflevector_v16i16: @@ -23,6 +35,17 @@ define <16 x i16> @shufflevector_v16i16(<16 x i16> %a, <16 x i16> %b) { ret <16 x i16> %c } +;; xvrepl128vei.h +define <16 x i16> @shufflevector_v16i16_undef(<16 x i16> %a) { +; CHECK-LABEL: shufflevector_v16i16_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: xvrepl128vei.h $xr0, $xr0, 3 +; CHECK-NEXT: ret + %c = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> + ret <16 x i16> %c +} + ;; xvrepl128vei.w define <8 x i32> @shufflevector_v8i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: shufflevector_v8i32: @@ -34,6 +57,16 @@ define <8 x i32> @shufflevector_v8i32(<8 x i32> %a, <8 x i32> %b) { ret <8 x i32> %c } +;; xvrepl128vei.w +define <8 x i32> @shufflevector_v8i32_undef(<8 x i32> %a) { +; CHECK-LABEL: shufflevector_v8i32_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: xvrepl128vei.w $xr0, $xr0, 2 +; CHECK-NEXT: ret + %c = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> + ret <8 x i32> %c +} + ;; xvrepl128vei.d define <4 x i64> @shufflevector_v4i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: shufflevector_v4i64: @@ -44,6 +77,16 @@ define <4 x i64> @shufflevector_v4i64(<4 x i64> %a, <4 x i64> %b) { ret <4 x i64> %c } +;; xvrepl128vei.d +define <4 x i64> @shufflevector_v4i64_undef(<4 x i64> %a) { +; CHECK-LABEL: shufflevector_v4i64_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: xvrepl128vei.d $xr0, $xr0, 1 +; CHECK-NEXT: ret + %c = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> + ret <4 x i64> %c +} + ;; xvrepl128vei.w define <8 x float> @shufflevector_v8f32(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: shufflevector_v8f32: @@ -54,6 +97,17 @@ define <8 x float> @shufflevector_v8f32(<8 x float> %a, <8 x float> %b) { ret <8 x float> %c } +;; xvrepl128vei.w +define <8 x float> @shufflevector_v8f32_undef(<8 x float> %a) { +; CHECK-LABEL: shufflevector_v8f32_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 238 +; CHECK-NEXT: xvrepl128vei.w $xr0, $xr0, 1 +; CHECK-NEXT: ret + %c = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> + ret <8 x float> %c +} + ;; xvrepl128vei.d define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: shufflevector_v4f64: @@ -63,3 +117,13 @@ define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) { %c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> ret <4 x double> %c } + +;; xvrepl128vei.d +define <4 x double> @shufflevector_v4f64_undef(<4 x double> %a) { +; CHECK-LABEL: shufflevector_v4f64_undef: +; CHECK: # %bb.0: +; CHECK-NEXT: xvrepl128vei.d $xr0, $xr0, 0 +; CHECK-NEXT: ret + %c = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> + ret <4 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf.ll index 6a88805148715..4900146b69a25 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s ;; xvshuf.b diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf4i.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf4i.ll index 02186d23e31e5..37b62ca989edb 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf4i.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvshuf4i.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s ;; xxvshuf4i.b @@ -40,4 +41,4 @@ define <8 x float> @shufflevector_xvshuf4i_v8f32(<8 x float> %a, <8 x float> %b) ; CHECK-NEXT: ret %c = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %c -} \ No newline at end of file +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll new file mode 100644 index 0000000000000..39ac647d6875c --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s + +define <8 x float> @fadd_elt0_v8f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <8 x float> poison, float %a, i32 0 + %c = fadd <8 x float> %b, + ret <8 x float> %c +} + +define <4 x double> @fadd_elt0_v4f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x double> poison, double %a, i32 0 + %c = fadd <4 x double> %b, + ret <4 x double> %c +} + +define <8 x float> @fsub_splat_v8f32(float %a, float %b) nounwind { +; CHECK-LABEL: fsub_splat_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.s $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.w $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <8 x float> poison, float %a, i32 0 + %insb = insertelement <8 x float> poison, float %b, i32 0 + %va = shufflevector <8 x float> %insa, <8 x float> poison, <8 x i32> zeroinitializer + %vb = shufflevector <8 x float> %insb, <8 x float> poison, <8 x i32> zeroinitializer + %c = fsub <8 x float> %va, %vb + ret <8 x float> %c +} + +define <4 x double> @fsub_splat_v4f64(double %a) nounwind { +; CHECK-LABEL: fsub_splat_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -784 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.d $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x double> poison, double %a, i32 0 + %insb = insertelement <4 x double> poison, double 1.0, i32 0 + %va = shufflevector <4 x double> %insa, <4 x double> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x double> %insb, <4 x double> poison, <4 x i32> zeroinitializer + %c = fsub <4 x double> %va, %vb + ret <4 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll index 5f76d9951df9c..245f76472b844 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll @@ -1,15 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s define <32 x i8> @shuffle_v32i8(<32 x i8> %a) { ; CHECK-LABEL: shuffle_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI0_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI0_1) -; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 -; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI0_0) +; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78 ; CHECK-NEXT: xvshuf.h $xr1, $xr2, $xr0 ; CHECK-NEXT: xvori.b $xr0, $xr1, 0 ; CHECK-NEXT: ret @@ -33,11 +31,8 @@ define <16 x i16> @shuffle_v16i16(<16 x i16> %a) { ; CHECK-LABEL: shuffle_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI2_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_1) -; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 -; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 +; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_0) +; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78 ; CHECK-NEXT: xvshuf.w $xr1, $xr2, $xr0 ; CHECK-NEXT: xvori.b $xr0, $xr1, 0 ; CHECK-NEXT: ret @@ -71,10 +66,7 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %a) { define <8 x i32> @shuffle_v8i32_same_lane(<8 x i32> %a) { ; CHECK-LABEL: shuffle_v8i32_same_lane: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI5_0) -; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 225 ; CHECK-NEXT: ret %shuffle = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> ret <8 x i32> %shuffle @@ -83,14 +75,7 @@ define <8 x i32> @shuffle_v8i32_same_lane(<8 x i32> %a) { define <4 x i64> @shuffle_v4i64(<4 x i64> %a) { ; CHECK-LABEL: shuffle_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI6_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI6_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI6_1) -; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 -; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 -; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 39 ; CHECK-NEXT: ret %shuffle = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> ret <4 x i64> %shuffle @@ -99,10 +84,7 @@ define <4 x i64> @shuffle_v4i64(<4 x i64> %a) { define <4 x i64> @shuffle_v4i64_same_lane(<4 x i64> %a) { ; CHECK-LABEL: shuffle_v4i64_same_lane: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI7_0) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI7_0) -; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 225 ; CHECK-NEXT: ret %shuffle = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> ret <4 x i64> %shuffle @@ -135,14 +117,7 @@ define <8 x float> @shuffle_v8f32_same_lane(<8 x float> %a) { define <4 x double> @shuffle_v4f64(<4 x double> %a) { ; CHECK-LABEL: shuffle_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_0) -; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI10_0) -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI10_1) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI10_1) -; CHECK-NEXT: xvpermi.d $xr3, $xr0, 78 -; CHECK-NEXT: xvshuf.d $xr2, $xr0, $xr3 -; CHECK-NEXT: xvshuf.d $xr1, $xr2, $xr0 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 39 ; CHECK-NEXT: ret %shuffle = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> ret <4 x double> %shuffle @@ -151,11 +126,7 @@ define <4 x double> @shuffle_v4f64(<4 x double> %a) { define <4 x double> @shuffle_v4f64_same_lane(<4 x double> %a) { ; CHECK-LABEL: shuffle_v4f64_same_lane: ; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_0) -; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI11_0) -; CHECK-NEXT: xvpermi.d $xr0, $xr0, 78 -; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr0 -; CHECK-NEXT: xvori.b $xr0, $xr1, 0 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 75 ; CHECK-NEXT: ret %shuffle = shufflevector <4 x double> %a, <4 x double> poison, <4 x i32> ret <4 x double> %shuffle diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-add.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-add.ll index 7268eb24ee51c..3e815a174d232 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-add.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-add.ll @@ -1,19 +1,33 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefix=LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefix=LA64 define void @vec_reduce_add_v32i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvhaddw.h.b $xr0, $xr0, $xr0 -; CHECK-NEXT: xvhaddw.w.h $xr0, $xr0, $xr0 -; CHECK-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 -; CHECK-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 -; CHECK-NEXT: xvpermi.d $xr1, $xr0, 2 -; CHECK-NEXT: xvadd.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: st.b $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v32i8: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvhaddw.h.b $xr0, $xr0, $xr0 +; LA32-NEXT: xvhaddw.w.h $xr0, $xr0, $xr0 +; LA32-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 +; LA32-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA32-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA32-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 0 +; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v32i8: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvhaddw.h.b $xr0, $xr0, $xr0 +; LA64-NEXT: xvhaddw.w.h $xr0, $xr0, $xr0 +; LA64-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 +; LA64-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA64-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA64-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: ret %v = load <32 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %v) store i8 %res, ptr %dst @@ -21,17 +35,29 @@ define void @vec_reduce_add_v32i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v16i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvhaddw.w.h $xr0, $xr0, $xr0 -; CHECK-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 -; CHECK-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 -; CHECK-NEXT: xvpermi.d $xr1, $xr0, 2 -; CHECK-NEXT: xvadd.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: st.h $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v16i16: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvhaddw.w.h $xr0, $xr0, $xr0 +; LA32-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 +; LA32-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA32-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA32-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 0 +; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v16i16: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvhaddw.w.h $xr0, $xr0, $xr0 +; LA64-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 +; LA64-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA64-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA64-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: ret %v = load <16 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %v) store i16 %res, ptr %dst @@ -39,16 +65,27 @@ define void @vec_reduce_add_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 -; CHECK-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 -; CHECK-NEXT: xvpermi.d $xr1, $xr0, 2 -; CHECK-NEXT: xvadd.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0 -; CHECK-NEXT: st.w $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 +; LA32-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA32-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA32-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvhaddw.d.w $xr0, $xr0, $xr0 +; LA64-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA64-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA64-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 0 +; LA64-NEXT: st.w $a0, $a1, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -56,14 +93,31 @@ define void @vec_reduce_add_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 -; CHECK-NEXT: xvpermi.d $xr1, $xr0, 2 -; CHECK-NEXT: xvadd.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vadd.d $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 1 +; LA32-NEXT: add.w $a0, $a2, $a0 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 0 +; LA32-NEXT: add.w $a2, $a3, $a2 +; LA32-NEXT: sltu $a3, $a2, $a3 +; LA32-NEXT: add.w $a0, $a0, $a3 +; LA32-NEXT: st.w $a2, $a1, 0 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvhaddw.q.d $xr0, $xr0, $xr0 +; LA64-NEXT: xvpermi.d $xr1, $xr0, 2 +; LA64-NEXT: xvadd.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvstelm.d $xr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll index fd64beab57bf0..23cc230f04503 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-and.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_and_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_and_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_and_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vand.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vand.v $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vand.v $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,30 @@ define void @vec_reduce_and_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vand.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vand.v $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 +; LA32-NEXT: and $a0, $a2, $a0 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 1 +; LA32-NEXT: and $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vand.v $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll index cdb08d9de3821..d7d3afc6dd1da 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-or.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_or_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_or_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_or_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vor.v $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,30 @@ define void @vec_reduce_or_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vor.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 +; LA32-NEXT: or $a0, $a2, $a0 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 1 +; LA32-NEXT: or $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vor.v $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll index 1d182731c93be..8cbbb52884865 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smax.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_smax_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_smax_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_smax_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmax.w $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmax.w $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,41 @@ define void @vec_reduce_smax_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.d $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmax.d $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 3 +; LA32-NEXT: slt $a3, $a2, $a0 +; LA32-NEXT: xor $a4, $a0, $a2 +; LA32-NEXT: sltui $a4, $a4, 1 +; LA32-NEXT: masknez $a3, $a3, $a4 +; LA32-NEXT: vpickve2gr.w $a5, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a6, $vr0, 2 +; LA32-NEXT: sltu $a7, $a6, $a5 +; LA32-NEXT: maskeqz $a4, $a7, $a4 +; LA32-NEXT: or $a3, $a4, $a3 +; LA32-NEXT: masknez $a4, $a6, $a3 +; LA32-NEXT: maskeqz $a5, $a5, $a3 +; LA32-NEXT: or $a4, $a5, $a4 +; LA32-NEXT: masknez $a2, $a2, $a3 +; LA32-NEXT: maskeqz $a0, $a0, $a3 +; LA32-NEXT: or $a0, $a0, $a2 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a4, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmax.d $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.d $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll index 369afdd1fc7bc..c34852aa8a28f 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-smin.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_smin_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_smin_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_smin_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmin.w $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmin.w $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmin.w $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,41 @@ define void @vec_reduce_smin_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmin.d $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.d $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmin.d $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 1 +; LA32-NEXT: slt $a3, $a2, $a0 +; LA32-NEXT: xor $a4, $a2, $a0 +; LA32-NEXT: sltui $a4, $a4, 1 +; LA32-NEXT: masknez $a3, $a3, $a4 +; LA32-NEXT: vpickve2gr.w $a5, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a6, $vr0, 0 +; LA32-NEXT: sltu $a7, $a6, $a5 +; LA32-NEXT: maskeqz $a4, $a7, $a4 +; LA32-NEXT: or $a3, $a4, $a3 +; LA32-NEXT: masknez $a4, $a5, $a3 +; LA32-NEXT: maskeqz $a5, $a6, $a3 +; LA32-NEXT: or $a4, $a5, $a4 +; LA32-NEXT: masknez $a0, $a0, $a3 +; LA32-NEXT: maskeqz $a2, $a2, $a3 +; LA32-NEXT: or $a0, $a2, $a0 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a4, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmin.d $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.d $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll index 5256a72ad7d97..c44f83a909a68 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umax.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_umax_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umax_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_umax_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmax.wu $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmax.wu $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,41 @@ define void @vec_reduce_umax_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmax.du $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.du $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmax.du $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 3 +; LA32-NEXT: sltu $a3, $a2, $a0 +; LA32-NEXT: xor $a4, $a0, $a2 +; LA32-NEXT: sltui $a4, $a4, 1 +; LA32-NEXT: masknez $a3, $a3, $a4 +; LA32-NEXT: vpickve2gr.w $a5, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a6, $vr0, 2 +; LA32-NEXT: sltu $a7, $a6, $a5 +; LA32-NEXT: maskeqz $a4, $a7, $a4 +; LA32-NEXT: or $a3, $a4, $a3 +; LA32-NEXT: masknez $a4, $a6, $a3 +; LA32-NEXT: maskeqz $a5, $a5, $a3 +; LA32-NEXT: or $a4, $a5, $a4 +; LA32-NEXT: masknez $a2, $a2, $a3 +; LA32-NEXT: maskeqz $a0, $a0, $a3 +; LA32-NEXT: or $a0, $a0, $a2 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a4, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmax.du $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.du $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll index a82c886d8eed1..f91a1b34dffe9 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-umin.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_umin_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umin_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_umin_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmin.wu $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmin.wu $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmin.wu $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,41 @@ define void @vec_reduce_umin_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vmin.du $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.du $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vmin.du $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 1 +; LA32-NEXT: sltu $a3, $a2, $a0 +; LA32-NEXT: xor $a4, $a2, $a0 +; LA32-NEXT: sltui $a4, $a4, 1 +; LA32-NEXT: masknez $a3, $a3, $a4 +; LA32-NEXT: vpickve2gr.w $a5, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a6, $vr0, 0 +; LA32-NEXT: sltu $a7, $a6, $a5 +; LA32-NEXT: maskeqz $a4, $a7, $a4 +; LA32-NEXT: or $a3, $a4, $a3 +; LA32-NEXT: masknez $a4, $a5, $a3 +; LA32-NEXT: maskeqz $a5, $a6, $a3 +; LA32-NEXT: or $a4, $a5, $a4 +; LA32-NEXT: masknez $a0, $a0, $a3 +; LA32-NEXT: maskeqz $a2, $a2, $a3 +; LA32-NEXT: or $a0, $a2, $a0 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a4, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vmin.du $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.du $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll index 429fadcdd156e..af1a66b574c03 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-reduce-xor.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_xor_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_xor_v32i8: @@ -44,17 +45,30 @@ define void @vec_reduce_xor_v16i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v8i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vxor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vxor.v $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %v) store i32 %res, ptr %dst @@ -62,15 +76,30 @@ define void @vec_reduce_xor_v8i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v4i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvld $xr0, $a0, 0 -; CHECK-NEXT: xvpermi.q $xr1, $xr0, 1 -; CHECK-NEXT: vxor.v $vr0, $vr0, $vr1 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvld $xr0, $a0, 0 +; LA32-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA32-NEXT: vxor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 +; LA32-NEXT: xor $a0, $a2, $a0 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 1 +; LA32-NEXT: xor $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvld $xr0, $a0, 0 +; LA64-NEXT: xvpermi.q $xr1, $xr0, 1 +; LA64-NEXT: vxor.v $vr0, $vr0, $vr1 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lasx/vec-shuffle-byte-rotate.ll b/llvm/test/CodeGen/LoongArch/lasx/vec-shuffle-byte-rotate.ll index b697a2fd07435..2007f851129e8 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vec-shuffle-byte-rotate.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vec-shuffle-byte-rotate.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s ;; TODO For these special shuffle mask, we can lower it to xvbsll + xvbsrl + xvor. @@ -126,9 +127,7 @@ define <4 x i64> @byte_rotate_v4i64_2(<4 x i64> %a, <4 x i64> %b) nounwind { define <4 x i64> @byte_rotate_v4i64_3(<4 x i64> %a) nounwind { ; CHECK-LABEL: byte_rotate_v4i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: xvbsrl.v $xr1, $xr0, 8 -; CHECK-NEXT: xvbsll.v $xr0, $xr0, 8 -; CHECK-NEXT: xvor.v $xr0, $xr0, $xr1 +; CHECK-NEXT: xvpermi.d $xr0, $xr0, 177 ; CHECK-NEXT: ret %shuffle = shufflevector <4 x i64> %a, <4 x i64> poison, <4 x i32> ret <4 x i64> %shuffle diff --git a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll index 44e4f71c8d08d..bf31ccb1d0104 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s define void @select_v32i8_imm(ptr %res, ptr %a0) nounwind { ; CHECK-LABEL: select_v32i8_imm: @@ -50,26 +50,14 @@ define void @select_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { } define void @select_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { -; LA32-LABEL: select_v8i32: -; LA32: # %bb.0: -; LA32-NEXT: xvld $xr0, $a1, 0 -; LA32-NEXT: xvld $xr1, $a2, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: select_v8i32: -; LA64: # %bb.0: -; LA64-NEXT: xvld $xr0, $a1, 0 -; LA64-NEXT: xvld $xr1, $a2, 0 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -1 -; LA64-NEXT: xvreplgr2vr.d $xr2, $a1 -; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: select_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvldi $xr2, -1552 +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret %v0 = load <8 x i32>, ptr %a0 %v1 = load <8 x i32>, ptr %a1 %sel = select <8 x i1> , <8 x i32> %v0, <8 x i32> %v1 diff --git a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll index 06d4a5d03f276..09908f619fa1f 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll @@ -1,15 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx --verify-machineinstrs < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx --verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx --verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LA64 define i32 @xmsk_eq_allzeros_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_eq_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmsknz.b $xr0, $xr0 -; CHECK-NEXT: xvnor.v $xr0, $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_eq_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmsknz.b $xr0, $xr0 +; LA32-NEXT: xvnor.v $xr0, $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_eq_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmsknz.b $xr0, $xr0 +; LA64-NEXT: xvnor.v $xr0, $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp eq <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -17,15 +27,25 @@ entry: } define i32 @xmsk_sgt_allzeros_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_sgt_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvrepli.b $xr1, 0 -; CHECK-NEXT: xvslt.b $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sgt_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvrepli.b $xr1, 0 +; LA32-NEXT: xvslt.b $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sgt_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvrepli.b $xr1, 0 +; LA64-NEXT: xvslt.b $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp sgt <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -33,13 +53,21 @@ entry: } define i32 @xmsk_sgt_allones_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_sgt_allones_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskgez.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sgt_allones_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskgez.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sgt_allones_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskgez.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp sgt <32 x i8> %a, splat (i8 -1) %2 = bitcast <32 x i1> %1 to i32 @@ -47,13 +75,21 @@ entry: } define i32 @xmsk_sge_allzeros_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_sge_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskgez.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sge_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskgez.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sge_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskgez.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp sge <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -61,13 +97,21 @@ entry: } define i32 @xmsk_slt_allzeros_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_slt_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_slt_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_slt_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp slt <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -75,13 +119,21 @@ entry: } define i16 @xmsk_slt_allzeros_i16(<16 x i16 > %a) { -; CHECK-LABEL: xmsk_slt_allzeros_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.h $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 15, 8 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_slt_allzeros_i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.h $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 15, 8 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_slt_allzeros_i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.h $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 15, 8 +; LA64-NEXT: ret entry: %1 = icmp slt <16 x i16> %a, splat (i16 0) %2 = bitcast <16 x i1> %1 to i16 @@ -89,13 +141,21 @@ entry: } define i8 @xmsk_slt_allzeros_i32(<8 x i32 > %a) { -; CHECK-LABEL: xmsk_slt_allzeros_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_slt_allzeros_i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_slt_allzeros_i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret entry: %1 = icmp slt <8 x i32> %a, splat (i32 0) %2 = bitcast <8 x i1> %1 to i8 @@ -103,13 +163,21 @@ entry: } define i4 @xmsk_slt_allzeros_i64(<4 x i64 > %a) { -; CHECK-LABEL: xmsk_slt_allzeros_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_slt_allzeros_i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_slt_allzeros_i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret entry: %1 = icmp slt <4 x i64> %a, splat (i64 0) %2 = bitcast <4 x i1> %1 to i4 @@ -117,14 +185,23 @@ entry: } define i32 @xmsk_sle_allzeros_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_sle_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvslei.b $xr0, $xr0, 0 -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sle_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvslei.b $xr0, $xr0, 0 +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sle_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvslei.b $xr0, $xr0, 0 +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp sle <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -132,13 +209,21 @@ entry: } define i32 @xmsk_sle_allones_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_sle_allones_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sle_allones_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sle_allones_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp sle <32 x i8> %a, splat (i8 -1) %2 = bitcast <32 x i1> %1 to i32 @@ -146,13 +231,21 @@ entry: } define i16 @xmsk_sle_allones_i32(<16 x i16 > %a) { -; CHECK-LABEL: xmsk_sle_allones_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.h $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 15, 8 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sle_allones_i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.h $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 15, 8 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sle_allones_i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.h $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 15, 8 +; LA64-NEXT: ret entry: %1 = icmp sle <16 x i16> %a, splat (i16 -1) %2 = bitcast <16 x i1> %1 to i16 @@ -160,13 +253,21 @@ entry: } define i8 @xmsk_sle_allones_i16(<8 x i32 > %a) { -; CHECK-LABEL: xmsk_sle_allones_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sle_allones_i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sle_allones_i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret entry: %1 = icmp sle <8 x i32> %a, splat (i32 -1) %2 = bitcast <8 x i1> %1 to i8 @@ -174,13 +275,21 @@ entry: } define i4 @xmsk_sle_allones_i64(<4 x i64 > %a) { -; CHECK-LABEL: xmsk_sle_allones_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_sle_allones_i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_sle_allones_i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret entry: %1 = icmp sle <4 x i64> %a, splat (i64 -1) %2 = bitcast <4 x i1> %1 to i4 @@ -188,13 +297,21 @@ entry: } define i32 @xmsk_ne_allzeros_i8(<32 x i8 > %a) { -; CHECK-LABEL: xmsk_ne_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xvmsknz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xmsk_ne_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: xvmsknz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xmsk_ne_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: xvmsknz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret entry: %1 = icmp ne <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -202,100 +319,165 @@ entry: } define i4 @xvmsk_sgt_v4i64(<4 x i64> %a, <4 x i64> %b) { -; CHECK-LABEL: xvmsk_sgt_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret %x = icmp sgt <4 x i64> %a, %b %res = bitcast <4 x i1> %x to i4 ret i4 %res } define i4 @xvmsk_ogt_v4f64(<4 x double> %a, <4 x double> %b) { -; CHECK-LABEL: xvmsk_ogt_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_ogt_v4f64: +; LA32: # %bb.0: +; LA32-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_ogt_v4f64: +; LA64: # %bb.0: +; LA64-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret %x = fcmp ogt <4 x double> %a, %b %res = bitcast <4 x i1> %x to i4 ret i4 %res } define i8 @xvmsk_sgt_v8i32(<8 x i32> %a, <8 x i32> %b) { -; CHECK-LABEL: xvmsk_sgt_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x = icmp sgt <8 x i32> %a, %b %res = bitcast <8 x i1> %x to i8 ret i8 %res } define i8 @xvmsk_ogt_v8f32(<8 x float> %a, <8 x float> %b) { -; CHECK-LABEL: xvmsk_ogt_v8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_ogt_v8f32: +; LA32: # %bb.0: +; LA32-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_ogt_v8f32: +; LA64: # %bb.0: +; LA64-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x = fcmp ogt <8 x float> %a, %b %res = bitcast <8 x i1> %x to i8 ret i8 %res } define i16 @xvmsk_sgt_v16i16(<16 x i16> %a, <16 x i16> %b) { -; CHECK-LABEL: xvmsk_sgt_v16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.h $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.h $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 15, 8 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_v16i16: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.h $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.h $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 15, 8 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_v16i16: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.h $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.h $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 15, 8 +; LA64-NEXT: ret %x = icmp sgt <16 x i16> %a, %b %res = bitcast <16 x i1> %x to i16 ret i16 %res } define i32 @xvmsk_sgt_v32i8(<32 x i8> %a, <32 x i8> %b) { -; CHECK-LABEL: xvmsk_sgt_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.b $xr0, $xr1, $xr0 -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_v32i8: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.b $xr0, $xr1, $xr0 +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_v32i8: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.b $xr0, $xr1, $xr0 +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret %x = icmp sgt <32 x i8> %a, %b %res = bitcast <32 x i1> %x to i32 ret i32 %res } define i4 @xvmsk_sgt_and_sgt_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { -; CHECK-LABEL: xvmsk_sgt_and_sgt_v4i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.d $xr2, $xr3, $xr2 -; CHECK-NEXT: xvslt.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_and_sgt_v4i64: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.d $xr2, $xr3, $xr2 +; LA32-NEXT: xvslt.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_and_sgt_v4i64: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.d $xr2, $xr3, $xr2 +; LA64-NEXT: xvslt.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret %x0 = icmp sgt <4 x i64> %a, %b %x1 = icmp sgt <4 x i64> %c, %d %y = and <4 x i1> %x0, %x1 @@ -304,16 +486,27 @@ define i4 @xvmsk_sgt_and_sgt_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 } define i4 @xvmsk_ogt_and_ogt_v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) { -; CHECK-LABEL: xvmsk_ogt_and_ogt_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvfcmp.clt.d $xr2, $xr3, $xr2 -; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_ogt_and_ogt_v4f64: +; LA32: # %bb.0: +; LA32-NEXT: xvfcmp.clt.d $xr2, $xr3, $xr2 +; LA32-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_ogt_and_ogt_v4f64: +; LA64: # %bb.0: +; LA64-NEXT: xvfcmp.clt.d $xr2, $xr3, $xr2 +; LA64-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret %x0 = fcmp ogt <4 x double> %a, %b %x1 = fcmp ogt <4 x double> %c, %d %y = and <4 x i1> %x0, %x1 @@ -322,16 +515,27 @@ define i4 @xvmsk_ogt_and_ogt_v4f64(<4 x double> %a, <4 x double> %b, <4 x double } define i8 @xvmsk_sgt_and_sgt_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { -; CHECK-LABEL: xvmsk_sgt_and_sgt_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.w $xr2, $xr3, $xr2 -; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_and_sgt_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.w $xr2, $xr3, $xr2 +; LA32-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_and_sgt_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.w $xr2, $xr3, $xr2 +; LA64-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x0 = icmp sgt <8 x i32> %a, %b %x1 = icmp sgt <8 x i32> %c, %d %y = and <8 x i1> %x0, %x1 @@ -340,16 +544,27 @@ define i8 @xvmsk_sgt_and_sgt_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 } define i8 @xvmsk_sgt_or_sgt_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) { -; CHECK-LABEL: xvmsk_sgt_or_sgt_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.w $xr2, $xr3, $xr2 -; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0 -; CHECK-NEXT: xvor.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_or_sgt_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.w $xr2, $xr3, $xr2 +; LA32-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_or_sgt_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.w $xr2, $xr3, $xr2 +; LA64-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA64-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x0 = icmp sgt <8 x i32> %a, %b %x1 = icmp sgt <8 x i32> %c, %d %y = or <8 x i1> %x0, %x1 @@ -358,18 +573,31 @@ define i8 @xvmsk_sgt_or_sgt_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x } define i8 @xvmsk_sgt_or_slt_and_eq_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d, <8 x i32> %e, <8 x i32> %f) { -; CHECK-LABEL: xvmsk_sgt_or_slt_and_eq_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.w $xr2, $xr2, $xr3 -; CHECK-NEXT: xvslt.w $xr0, $xr1, $xr0 -; CHECK-NEXT: xvor.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvseq.w $xr1, $xr4, $xr5 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_or_slt_and_eq_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.w $xr2, $xr2, $xr3 +; LA32-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvseq.w $xr1, $xr4, $xr5 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_or_slt_and_eq_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.w $xr2, $xr2, $xr3 +; LA64-NEXT: xvslt.w $xr0, $xr1, $xr0 +; LA64-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvseq.w $xr1, $xr4, $xr5 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr1 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x0 = icmp sgt <8 x i32> %a, %b %x1 = icmp slt <8 x i32> %c, %d %x2 = icmp eq <8 x i32> %e, %f @@ -380,15 +608,25 @@ define i8 @xvmsk_sgt_or_slt_and_eq_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> % } define i8 @xvmsk_eq_vsel_slt_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { -; CHECK-LABEL: xvmsk_eq_vsel_slt_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvseq.w $xr0, $xr0, $xr1 -; CHECK-NEXT: xvor.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_eq_vsel_slt_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: xvseq.w $xr0, $xr0, $xr1 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_eq_vsel_slt_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: xvseq.w $xr0, $xr0, $xr1 +; LA64-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %cmp = icmp eq <8 x i32> %a0, %a1 %slt = icmp slt <8 x i32> %a2, zeroinitializer %sel = select <8 x i1> %cmp, <8 x i1> , <8 x i1> %slt @@ -397,22 +635,39 @@ define i8 @xvmsk_eq_vsel_slt_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) } define i8 @xvmsk_sel_eq_or_eq_or_slt_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3, i1 %a4) { -; CHECK-LABEL: xvmsk_sel_eq_or_eq_or_slt_v8i32: -; CHECK: # %bb.0: -; CHECK-NEXT: andi $a0, $a0, 1 -; CHECK-NEXT: xvseq.w $xr2, $xr0, $xr2 -; CHECK-NEXT: addi.d $a1, $zero, -1 -; CHECK-NEXT: maskeqz $a0, $a1, $a0 -; CHECK-NEXT: xvreplgr2vr.w $xr4, $a0 -; CHECK-NEXT: xvand.v $xr2, $xr2, $xr4 -; CHECK-NEXT: xvseq.w $xr0, $xr0, $xr1 -; CHECK-NEXT: xvor.v $xr0, $xr3, $xr0 -; CHECK-NEXT: xvor.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sel_eq_or_eq_or_slt_v8i32: +; LA32: # %bb.0: +; LA32-NEXT: andi $a0, $a0, 1 +; LA32-NEXT: xvseq.w $xr2, $xr0, $xr2 +; LA32-NEXT: addi.w $a1, $zero, -1 +; LA32-NEXT: maskeqz $a0, $a1, $a0 +; LA32-NEXT: xvreplgr2vr.w $xr4, $a0 +; LA32-NEXT: xvand.v $xr2, $xr2, $xr4 +; LA32-NEXT: xvseq.w $xr0, $xr0, $xr1 +; LA32-NEXT: xvor.v $xr0, $xr3, $xr0 +; LA32-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sel_eq_or_eq_or_slt_v8i32: +; LA64: # %bb.0: +; LA64-NEXT: andi $a0, $a0, 1 +; LA64-NEXT: xvseq.w $xr2, $xr0, $xr2 +; LA64-NEXT: addi.d $a1, $zero, -1 +; LA64-NEXT: maskeqz $a0, $a1, $a0 +; LA64-NEXT: xvreplgr2vr.w $xr4, $a0 +; LA64-NEXT: xvand.v $xr2, $xr2, $xr4 +; LA64-NEXT: xvseq.w $xr0, $xr0, $xr1 +; LA64-NEXT: xvor.v $xr0, $xr3, $xr0 +; LA64-NEXT: xvor.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %cmp0 = icmp eq <8 x i32> %a0, %a1 %cmp1 = icmp eq <8 x i32> %a0, %a2 %cmp2 = icmp slt <8 x i32> %a3, zeroinitializer @@ -424,16 +679,27 @@ define i8 @xvmsk_sel_eq_or_eq_or_slt_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 } define i8 @xvmsk_ogt_and_ogt_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) { -; CHECK-LABEL: xvmsk_ogt_and_ogt_v8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvfcmp.clt.s $xr2, $xr3, $xr2 -; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_ogt_and_ogt_v8f32: +; LA32: # %bb.0: +; LA32-NEXT: xvfcmp.clt.s $xr2, $xr3, $xr2 +; LA32-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_ogt_and_ogt_v8f32: +; LA64: # %bb.0: +; LA64-NEXT: xvfcmp.clt.s $xr2, $xr3, $xr2 +; LA64-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x0 = fcmp ogt <8 x float> %a, %b %x1 = fcmp ogt <8 x float> %c, %d %y = and <8 x i1> %x0, %x1 @@ -442,16 +708,27 @@ define i8 @xvmsk_ogt_and_ogt_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> % } define i8 @xvmsk_sgt_xor_sgt_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) { -; CHECK-LABEL: xvmsk_sgt_xor_sgt_v8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvfcmp.clt.s $xr2, $xr3, $xr2 -; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 -; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_xor_sgt_v8f32: +; LA32: # %bb.0: +; LA32-NEXT: xvfcmp.clt.s $xr2, $xr3, $xr2 +; LA32-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 +; LA32-NEXT: xvxor.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_xor_sgt_v8f32: +; LA64: # %bb.0: +; LA64-NEXT: xvfcmp.clt.s $xr2, $xr3, $xr2 +; LA64-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0 +; LA64-NEXT: xvxor.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x0 = fcmp ogt <8 x float> %a, %b %x1 = fcmp ogt <8 x float> %c, %d %y = xor <8 x i1> %x0, %x1 @@ -460,18 +737,31 @@ define i8 @xvmsk_sgt_xor_sgt_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> % } define i8 @xvmsk_ugt_xor_ueq_and_ogt_v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d, <8 x float> %e, <8 x float> %f) { -; CHECK-LABEL: xvmsk_ugt_xor_ueq_and_ogt_v8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvfcmp.cueq.s $xr2, $xr2, $xr3 -; CHECK-NEXT: xvfcmp.cult.s $xr0, $xr1, $xr0 -; CHECK-NEXT: xvxor.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvfcmp.clt.s $xr1, $xr5, $xr4 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_ugt_xor_ueq_and_ogt_v8f32: +; LA32: # %bb.0: +; LA32-NEXT: xvfcmp.cueq.s $xr2, $xr2, $xr3 +; LA32-NEXT: xvfcmp.cult.s $xr0, $xr1, $xr0 +; LA32-NEXT: xvxor.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvfcmp.clt.s $xr1, $xr5, $xr4 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_ugt_xor_ueq_and_ogt_v8f32: +; LA64: # %bb.0: +; LA64-NEXT: xvfcmp.cueq.s $xr2, $xr2, $xr3 +; LA64-NEXT: xvfcmp.cult.s $xr0, $xr1, $xr0 +; LA64-NEXT: xvxor.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvfcmp.clt.s $xr1, $xr5, $xr4 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr1 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %x0 = fcmp ugt <8 x float> %a, %b %x1 = fcmp ueq <8 x float> %c, %d %x2 = fcmp ogt <8 x float> %e, %f @@ -482,16 +772,27 @@ define i8 @xvmsk_ugt_xor_ueq_and_ogt_v8f32(<8 x float> %a, <8 x float> %b, <8 x } define i16 @xvmsk_sgt_and_sgt_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) { -; CHECK-LABEL: xvmsk_sgt_and_sgt_v16i16: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.h $xr2, $xr3, $xr2 -; CHECK-NEXT: xvslt.h $xr0, $xr1, $xr0 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr2 -; CHECK-NEXT: xvmskltz.h $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 15, 8 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_and_sgt_v16i16: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.h $xr2, $xr3, $xr2 +; LA32-NEXT: xvslt.h $xr0, $xr1, $xr0 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA32-NEXT: xvmskltz.h $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 15, 8 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_and_sgt_v16i16: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.h $xr2, $xr3, $xr2 +; LA64-NEXT: xvslt.h $xr0, $xr1, $xr0 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr2 +; LA64-NEXT: xvmskltz.h $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 15, 8 +; LA64-NEXT: ret %x0 = icmp sgt <16 x i16> %a, %b %x1 = icmp sgt <16 x i16> %c, %d %y = and <16 x i1> %x0, %x1 @@ -500,16 +801,27 @@ define i16 @xvmsk_sgt_and_sgt_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c } define i32 @xvmsk_sgt_and_sgt_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { -; CHECK-LABEL: xvmsk_sgt_and_sgt_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslt.b $xr0, $xr1, $xr0 -; CHECK-NEXT: xvslt.b $xr1, $xr3, $xr2 -; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1 -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_sgt_and_sgt_v32i8: +; LA32: # %bb.0: +; LA32-NEXT: xvslt.b $xr0, $xr1, $xr0 +; LA32-NEXT: xvslt.b $xr1, $xr3, $xr2 +; LA32-NEXT: xvand.v $xr0, $xr0, $xr1 +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_sgt_and_sgt_v32i8: +; LA64: # %bb.0: +; LA64-NEXT: xvslt.b $xr0, $xr1, $xr0 +; LA64-NEXT: xvslt.b $xr1, $xr3, $xr2 +; LA64-NEXT: xvand.v $xr0, $xr0, $xr1 +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret %x0 = icmp sgt <32 x i8> %a, %b %x1 = icmp sgt <32 x i8> %c, %d %y = and <32 x i1> %x0, %x1 @@ -518,17 +830,29 @@ define i32 @xvmsk_sgt_and_sgt_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <3 } define i8 @xvmsk_eq_v2i64_concat_poison(<2 x i64> %vec) { -; CHECK-LABEL: xvmsk_eq_v2i64_concat_poison: -; CHECK: # %bb.0: -; CHECK-NEXT: vseqi.d $vr0, $vr0, 0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: vinsgr2vr.h $vr1, $a0, 1 -; CHECK-NEXT: vslli.h $vr0, $vr1, 15 -; CHECK-NEXT: vmskltz.h $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_eq_v2i64_concat_poison: +; LA32: # %bb.0: +; LA32-NEXT: vseqi.d $vr0, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: vinsgr2vr.h $vr1, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vinsgr2vr.h $vr1, $a0, 1 +; LA32-NEXT: vslli.h $vr0, $vr1, 15 +; LA32-NEXT: vmskltz.h $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_eq_v2i64_concat_poison: +; LA64: # %bb.0: +; LA64-NEXT: vseqi.d $vr0, $vr0, 0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: vinsgr2vr.h $vr1, $a0, 0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 1 +; LA64-NEXT: vinsgr2vr.h $vr1, $a0, 1 +; LA64-NEXT: vslli.h $vr0, $vr1, 15 +; LA64-NEXT: vmskltz.h $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: ret %tobool = icmp eq <2 x i64> %vec, zeroinitializer %insertvec = shufflevector <2 x i1> %tobool, <2 x i1> poison, <8 x i32> %res = bitcast <8 x i1> %insertvec to i8 @@ -560,22 +884,39 @@ define i8 @xvmsk_ne_v4i32_concat_poison(<4 x i32> %vec) { } define i8 @xvmsk_ogt_v4f64_concat_poison(<4 x double> %vec) { -; CHECK-LABEL: xvmsk_ogt_v4f64_concat_poison: -; CHECK: # %bb.0: -; CHECK-NEXT: xvrepli.b $xr1, 0 -; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 -; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3 -; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 2 -; CHECK-NEXT: xvpickve2gr.d $a2, $xr0, 1 -; CHECK-NEXT: xvpickve2gr.d $a3, $xr0, 0 -; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 0 -; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 1 -; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 2 -; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 3 -; CHECK-NEXT: vslli.h $vr0, $vr0, 15 -; CHECK-NEXT: vmskltz.h $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_ogt_v4f64_concat_poison: +; LA32: # %bb.0: +; LA32-NEXT: xvrepli.b $xr1, 0 +; LA32-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 +; LA32-NEXT: xvpickve2gr.w $a0, $xr0, 6 +; LA32-NEXT: xvpickve2gr.w $a1, $xr0, 4 +; LA32-NEXT: xvpickve2gr.w $a2, $xr0, 2 +; LA32-NEXT: xvpickve2gr.w $a3, $xr0, 0 +; LA32-NEXT: vinsgr2vr.h $vr0, $a3, 0 +; LA32-NEXT: vinsgr2vr.h $vr0, $a2, 1 +; LA32-NEXT: vinsgr2vr.h $vr0, $a1, 2 +; LA32-NEXT: vinsgr2vr.h $vr0, $a0, 3 +; LA32-NEXT: vslli.h $vr0, $vr0, 15 +; LA32-NEXT: vmskltz.h $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_ogt_v4f64_concat_poison: +; LA64: # %bb.0: +; LA64-NEXT: xvrepli.b $xr1, 0 +; LA64-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0 +; LA64-NEXT: xvpickve2gr.d $a0, $xr0, 3 +; LA64-NEXT: xvpickve2gr.d $a1, $xr0, 2 +; LA64-NEXT: xvpickve2gr.d $a2, $xr0, 1 +; LA64-NEXT: xvpickve2gr.d $a3, $xr0, 0 +; LA64-NEXT: vinsgr2vr.h $vr0, $a3, 0 +; LA64-NEXT: vinsgr2vr.h $vr0, $a2, 1 +; LA64-NEXT: vinsgr2vr.h $vr0, $a1, 2 +; LA64-NEXT: vinsgr2vr.h $vr0, $a0, 3 +; LA64-NEXT: vslli.h $vr0, $vr0, 15 +; LA64-NEXT: vmskltz.h $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: ret %tobool = fcmp ogt <4 x double> %vec, zeroinitializer %insertvec = shufflevector <4 x i1> %tobool, <4 x i1> poison, <8 x i32> %res = bitcast <8 x i1> %insertvec to i8 @@ -583,56 +924,92 @@ define i8 @xvmsk_ogt_v4f64_concat_poison(<4 x double> %vec) { } define i32 @xvmsk_trunc_i8(<32 x i8> %a) { -; CHECK-LABEL: xvmsk_trunc_i8: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslli.b $xr0, $xr0, 7 -; CHECK-NEXT: xvmskltz.b $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 31, 16 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_trunc_i8: +; LA32: # %bb.0: +; LA32-NEXT: xvslli.b $xr0, $xr0, 7 +; LA32-NEXT: xvmskltz.b $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 31, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_trunc_i8: +; LA64: # %bb.0: +; LA64-NEXT: xvslli.b $xr0, $xr0, 7 +; LA64-NEXT: xvmskltz.b $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 31, 16 +; LA64-NEXT: ret %y = trunc <32 x i8> %a to <32 x i1> %res = bitcast <32 x i1> %y to i32 ret i32 %res } define i16 @xvmsk_trunc_i16(<16 x i16> %a) { -; CHECK-LABEL: xvmsk_trunc_i16: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslli.h $xr0, $xr0, 15 -; CHECK-NEXT: xvmskltz.h $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 15, 8 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_trunc_i16: +; LA32: # %bb.0: +; LA32-NEXT: xvslli.h $xr0, $xr0, 15 +; LA32-NEXT: xvmskltz.h $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 15, 8 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_trunc_i16: +; LA64: # %bb.0: +; LA64-NEXT: xvslli.h $xr0, $xr0, 15 +; LA64-NEXT: xvmskltz.h $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 15, 8 +; LA64-NEXT: ret %y = trunc <16 x i16> %a to <16 x i1> %res = bitcast <16 x i1> %y to i16 ret i16 %res } define i8 @xvmsk_trunc_i32(<8 x i32> %a) { -; CHECK-LABEL: xvmsk_trunc_i32: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslli.w $xr0, $xr0, 31 -; CHECK-NEXT: xvmskltz.w $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 7, 4 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_trunc_i32: +; LA32: # %bb.0: +; LA32-NEXT: xvslli.w $xr0, $xr0, 31 +; LA32-NEXT: xvmskltz.w $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 7, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_trunc_i32: +; LA64: # %bb.0: +; LA64-NEXT: xvslli.w $xr0, $xr0, 31 +; LA64-NEXT: xvmskltz.w $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 7, 4 +; LA64-NEXT: ret %y = trunc <8 x i32> %a to <8 x i1> %res = bitcast <8 x i1> %y to i8 ret i8 %res } define i4 @xvmsk_trunc_i64(<4 x i64> %a) { -; CHECK-LABEL: xvmsk_trunc_i64: -; CHECK: # %bb.0: -; CHECK-NEXT: xvslli.d $xr0, $xr0, 63 -; CHECK-NEXT: xvmskltz.d $xr0, $xr0 -; CHECK-NEXT: xvpickve2gr.wu $a0, $xr0, 0 -; CHECK-NEXT: xvpickve2gr.wu $a1, $xr0, 4 -; CHECK-NEXT: bstrins.d $a0, $a1, 3, 2 -; CHECK-NEXT: ret +; LA32-LABEL: xvmsk_trunc_i64: +; LA32: # %bb.0: +; LA32-NEXT: xvslli.d $xr0, $xr0, 63 +; LA32-NEXT: xvmskltz.d $xr0, $xr0 +; LA32-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA32-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA32-NEXT: bstrins.w $a0, $a1, 3, 2 +; LA32-NEXT: ret +; +; LA64-LABEL: xvmsk_trunc_i64: +; LA64: # %bb.0: +; LA64-NEXT: xvslli.d $xr0, $xr0, 63 +; LA64-NEXT: xvmskltz.d $xr0, $xr0 +; LA64-NEXT: xvpickve2gr.wu $a0, $xr0, 0 +; LA64-NEXT: xvpickve2gr.wu $a1, $xr0, 4 +; LA64-NEXT: bstrins.d $a0, $a1, 3, 2 +; LA64-NEXT: ret %y = trunc <4 x i64> %a to <4 x i1> %res = bitcast <4 x i1> %y to i4 ret i4 %res diff --git a/llvm/test/CodeGen/LoongArch/lsx/abs.ll b/llvm/test/CodeGen/LoongArch/lsx/abs.ll new file mode 100644 index 0000000000000..85fe1fe5c0da7 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 true) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 true) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 true) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 true) + store <2 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false) + store <2 x i64> %b, ptr %dst + ret void +} + +declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) +declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) +declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) +declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lsx/and-not-combine.ll b/llvm/test/CodeGen/LoongArch/lsx/and-not-combine.ll new file mode 100644 index 0000000000000..3c6d34505e114 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/and-not-combine.ll @@ -0,0 +1,87 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @and_not_combine_v16i8(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a3, 0 +; CHECK-NEXT: vld $vr2, $a1, 0 +; CHECK-NEXT: vsub.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vandn.v $vr0, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <16 x i8>, ptr %a0 + %v1 = load <16 x i8>, ptr %a1 + %v2 = load <16 x i8>, ptr %a2 + %not = xor <16 x i8> %v1, + %add = add <16 x i8> %not, %v2 + %and = and <16 x i8> %v0, %add + store <16 x i8> %and, ptr %res + ret void +} + +define void @and_not_combine_v8i16(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a3, 0 +; CHECK-NEXT: vld $vr2, $a1, 0 +; CHECK-NEXT: vsub.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vandn.v $vr0, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <8 x i16>, ptr %a0 + %v1 = load <8 x i16>, ptr %a1 + %v2 = load <8 x i16>, ptr %a2 + %not = xor <8 x i16> %v1, + %add = add <8 x i16> %not, %v2 + %and = and <8 x i16> %v0, %add + store <8 x i16> %and, ptr %res + ret void +} + +define void @and_not_combine_v4i32(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a3, 0 +; CHECK-NEXT: vld $vr2, $a1, 0 +; CHECK-NEXT: vsub.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vandn.v $vr0, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <4 x i32>, ptr %a0 + %v1 = load <4 x i32>, ptr %a1 + %v2 = load <4 x i32>, ptr %a2 + %not = xor <4 x i32> %v1, + %add = add <4 x i32> %not, %v2 + %and = and <4 x i32> %v0, %add + store <4 x i32> %and, ptr %res + ret void +} + +define void @and_not_combine_v2i64(ptr %res, ptr %a0, ptr %a1, ptr %a2) nounwind { +; CHECK-LABEL: and_not_combine_v2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a3, 0 +; CHECK-NEXT: vld $vr2, $a1, 0 +; CHECK-NEXT: vsub.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vandn.v $vr0, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %v0 = load <2 x i64>, ptr %a0 + %v1 = load <2 x i64>, ptr %a1 + %v2 = load <2 x i64>, ptr %a2 + %not = xor <2 x i64> %v1, + %add = add <2 x i64> %not, %v2 + %and = and <2 x i64> %v0, %add + store <2 x i64> %and, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/bitreverse.ll b/llvm/test/CodeGen/LoongArch/lsx/bitreverse.ll index 4c17d3fd8d7b2..b0d36a8143fa1 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/bitreverse.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/bitreverse.ll @@ -1,20 +1,39 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx --verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=LA32 ; RUN: llc --mtriple=loongarch64 -mattr=+lsx --verify-machineinstrs < %s \ -; RUN: | FileCheck %s +; RUN: | FileCheck %s --check-prefix=LA64 declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>) define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: bitrev.8b $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: bitrev.8b $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: vori.b $vr0, $vr1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v16i8: +; LA32: # %bb.0: +; LA32-NEXT: vslli.b $vr1, $vr0, 4 +; LA32-NEXT: vsrli.b $vr0, $vr0, 4 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vandi.b $vr1, $vr0, 51 +; LA32-NEXT: vslli.b $vr1, $vr1, 2 +; LA32-NEXT: vsrli.b $vr0, $vr0, 2 +; LA32-NEXT: vandi.b $vr0, $vr0, 51 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vandi.b $vr1, $vr0, 85 +; LA32-NEXT: vslli.b $vr1, $vr1, 1 +; LA32-NEXT: vsrli.b $vr0, $vr0, 1 +; LA32-NEXT: vandi.b $vr0, $vr0, 85 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v16i8: +; LA64: # %bb.0: +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: bitrev.8b $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 1 +; LA64-NEXT: bitrev.8b $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: vori.b $vr0, $vr1, 0 +; LA64-NEXT: ret %b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a) ret <16 x i8> %b } @@ -22,16 +41,33 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind { declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: vshuf4i.h $vr0, $vr1, 27 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v8i16: +; LA32: # %bb.0: +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 1 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 2 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 3 +; LA32-NEXT: vshuf4i.h $vr0, $vr1, 27 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v8i16: +; LA64: # %bb.0: +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 1 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: vshuf4i.h $vr0, $vr1, 27 +; LA64-NEXT: ret %b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a) ret <8 x i16> %b } @@ -39,16 +75,33 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind { declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: vshuf4i.w $vr0, $vr1, 177 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 1 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 2 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: bitrev.w $a0, $a0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 3 +; LA32-NEXT: vori.b $vr0, $vr1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 1 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: vshuf4i.w $vr0, $vr1, 177 +; LA64-NEXT: ret %b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a) ret <4 x i32> %b } @@ -56,16 +109,36 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind { declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind { -; CHECK-LABEL: test_bitreverse_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: bitrev.d $a0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 1 -; CHECK-NEXT: vori.b $vr0, $vr1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: test_bitreverse_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0) +; LA32-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI3_0) +; LA32-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 +; LA32-NEXT: vslli.b $vr1, $vr0, 4 +; LA32-NEXT: vsrli.b $vr0, $vr0, 4 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vandi.b $vr1, $vr0, 51 +; LA32-NEXT: vslli.b $vr1, $vr1, 2 +; LA32-NEXT: vsrli.b $vr0, $vr0, 2 +; LA32-NEXT: vandi.b $vr0, $vr0, 51 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: vandi.b $vr1, $vr0, 85 +; LA32-NEXT: vslli.b $vr1, $vr1, 1 +; LA32-NEXT: vsrli.b $vr0, $vr0, 1 +; LA32-NEXT: vandi.b $vr0, $vr0, 85 +; LA32-NEXT: vor.v $vr0, $vr0, $vr1 +; LA32-NEXT: ret +; +; LA64-LABEL: test_bitreverse_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 1 +; LA64-NEXT: bitrev.d $a0, $a0 +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1 +; LA64-NEXT: vori.b $vr0, $vr1, 0 +; LA64-NEXT: ret %b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a) ret <2 x i64> %b } diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll index cae7c08f2d685..fe45e73b36f51 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll @@ -192,11 +192,11 @@ entry: ret void } -define void @buildvector_v2f32_const_splat(ptr %dst) nounwind { -; CHECK-LABEL: buildvector_v2f32_const_splat: +;; Also check buildvector_const_splat_vldi_1010. +define void @buildvector_v4f32_const_splat(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_v4f32_const_splat: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 260096 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -1424 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -204,30 +204,112 @@ entry: ret void } +;; Also check buildvector_const_splat_vldi_1100. define void @buildvector_v2f64_const_splat(ptr %dst) nounwind { -; LA32-LABEL: buildvector_v2f64_const_splat: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI14_0) -; LA32-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI14_0) -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_v2f64_const_splat: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: vreplgr2vr.d $vr0, $a1 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_v2f64_const_splat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -912 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret entry: store <2 x double> , ptr %dst ret void } +;; imm[11:8] == 4'b0000/4'b0100/4'b1000 can be represented using vrepli.[whb]. +define void @buildvector_const_splat_vldi_0001(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_0001: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -3837 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_0010(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_0010: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -3583 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_0011(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_0011: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -3327 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_0101(ptr %dst) { +; CHECK-LABEL: buildvector_const_splat_vldi_0101: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -2813 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <8 x i16> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_0110(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_0110: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -2557 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_0111(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_0111: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -2305 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_1001(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_1001: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -1789 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x i32> , ptr %dst + ret void +} + +define void @buildvector_const_splat_vldi_1011(ptr %dst) nounwind { +; CHECK-LABEL: buildvector_const_splat_vldi_1011: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -1280 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + store <4 x float> , ptr %dst + ret void +} + define void @buildvector_v16i8_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v16i8_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI15_0) -; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI15_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI23_0) +; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI23_0) ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -238,8 +320,8 @@ entry: define void @buildvector_v8i16_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v8i16_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI16_0) -; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI16_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI24_0) +; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI24_0) ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -250,8 +332,8 @@ entry: define void @buildvector_v4i32_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v4i32_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI17_0) -; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI17_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI25_0) +; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI25_0) ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -262,8 +344,8 @@ entry: define void @buildvector_v2i64_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v2i64_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI18_0) -; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI18_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI26_0) +; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI26_0) ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -274,8 +356,8 @@ entry: define void @buildvector_v2f32_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v2f32_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI19_0) -; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI19_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI27_0) +; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI27_0) ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -286,8 +368,8 @@ entry: define void @buildvector_v2f64_const(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v2f64_const: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI20_0) -; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI20_0) +; CHECK-NEXT: pcalau12i $a1, %pc_hi20(.LCPI28_0) +; CHECK-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI28_0) ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lsx/extract-binop.ll b/llvm/test/CodeGen/LoongArch/lsx/extract-binop.ll new file mode 100644 index 0000000000000..e8ddf84de6dff --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/extract-binop.ll @@ -0,0 +1,100 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 + +define i8 @extractelt_add_v16i8(ptr %p) { +; CHECK-LABEL: extractelt_add_v16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a0, 0 +; CHECK-NEXT: vaddi.bu $vr0, $vr0, 13 +; CHECK-NEXT: vpickve2gr.b $a0, $vr0, 2 +; CHECK-NEXT: ret +entry: + %x = load <16 x i8>, ptr %p + %add = add <16 x i8> %x, + %ext = extractelement <16 x i8> %add, i32 2 + ret i8 %ext +} + +define i16 @extractelt_add_v8i16(ptr %p) { +; CHECK-LABEL: extractelt_add_v8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a0, 0 +; CHECK-NEXT: vaddi.hu $vr0, $vr0, 13 +; CHECK-NEXT: vpickve2gr.h $a0, $vr0, 2 +; CHECK-NEXT: ret +entry: + %x = load <8 x i16>, ptr %p + %add = add <8 x i16> %x, + %ext = extractelement <8 x i16> %add, i32 2 + ret i16 %ext +} + +define i32 @extractelt_add_v4i32(ptr %p) { +; LA32-LABEL: extractelt_add_v4i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ld.w $a0, $a0, 8 +; LA32-NEXT: addi.w $a0, $a0, 13 +; LA32-NEXT: ret +; +; LA64-LABEL: extractelt_add_v4i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vaddi.wu $vr0, $vr0, 13 +; LA64-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA64-NEXT: ret +entry: + %x = load <4 x i32>, ptr %p + %add = add <4 x i32> %x, + %ext = extractelement <4 x i32> %add, i32 2 + ret i32 %ext +} + +define i64 @extractelt_add_v2i64(ptr %p) { +; LA32-LABEL: extractelt_add_v2i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vaddi.du $vr0, $vr0, 12 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a1, $vr0, 3 +; LA32-NEXT: ret +; +; LA64-LABEL: extractelt_add_v2i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a0, $a0, 8 +; LA64-NEXT: addi.d $a0, $a0, 12 +; LA64-NEXT: ret +entry: + %x = load <2 x i64>, ptr %p + %add = add <2 x i64> %x, + %ext = extractelement <2 x i64> %add, i32 1 + ret i64 %ext +} + +define float @extractelt_fadd_v4f32(ptr %p) { +; CHECK-LABEL: extractelt_fadd_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fld.s $fa0, $a0, 8 +; CHECK-NEXT: vldi $vr1, -1238 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %x = load <4 x float>, ptr %p + %add = fadd <4 x float> %x, + %ext = extractelement <4 x float> %add, i32 2 + ret float %ext +} + +define double @extractelt_fadd_v2f64(ptr %p) { +; CHECK-LABEL: extractelt_fadd_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fld.d $fa0, $a0, 8 +; CHECK-NEXT: vldi $vr1, -984 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %x = load <2 x double>, ptr %p + %add = fadd <2 x double> %x, + %ext = extractelement <2 x double> %add, i32 1 + ret double %ext +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll index 58e16d37ae278..46eb91e4079bf 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA32 -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA64 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s define void @fdiv_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-LABEL: fdiv_v4f32: @@ -40,35 +40,19 @@ define void @fdiv_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-NEXT: vst $vr0, $a0, 0 ; FAULT-NEXT: ret ; -; LA32-LABEL: fdiv_v2f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI1_0) -; LA32-NEXT: vld $vr0, $a2, 0 -; LA32-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI1_0) -; LA32-NEXT: vld $vr2, $a1, 0 -; LA32-NEXT: vfrecipe.d $vr3, $vr0 -; LA32-NEXT: vfmadd.d $vr1, $vr0, $vr3, $vr1 -; LA32-NEXT: vfnmsub.d $vr1, $vr1, $vr3, $vr3 -; LA32-NEXT: vfmul.d $vr3, $vr2, $vr1 -; LA32-NEXT: vfnmsub.d $vr0, $vr0, $vr3, $vr2 -; LA32-NEXT: vfmadd.d $vr0, $vr1, $vr0, $vr3 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: fdiv_v2f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: vld $vr0, $a2, 0 -; LA64-NEXT: vld $vr1, $a1, 0 -; LA64-NEXT: lu52i.d $a1, $zero, -1025 -; LA64-NEXT: vreplgr2vr.d $vr2, $a1 -; LA64-NEXT: vfrecipe.d $vr3, $vr0 -; LA64-NEXT: vfmadd.d $vr2, $vr0, $vr3, $vr2 -; LA64-NEXT: vfnmsub.d $vr2, $vr2, $vr3, $vr3 -; LA64-NEXT: vfmul.d $vr3, $vr1, $vr2 -; LA64-NEXT: vfnmsub.d $vr0, $vr0, $vr3, $vr1 -; LA64-NEXT: vfmadd.d $vr0, $vr2, $vr0, $vr3 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: fdiv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a1, 0 +; CHECK-NEXT: vfrecipe.d $vr2, $vr0 +; CHECK-NEXT: vldi $vr3, -784 +; CHECK-NEXT: vfmadd.d $vr3, $vr0, $vr2, $vr3 +; CHECK-NEXT: vfnmsub.d $vr2, $vr3, $vr2, $vr2 +; CHECK-NEXT: vfmul.d $vr3, $vr1, $vr2 +; CHECK-NEXT: vfnmsub.d $vr0, $vr0, $vr3, $vr1 +; CHECK-NEXT: vfmadd.d $vr0, $vr2, $vr0, $vr3 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret entry: %v0 = load <2 x double>, ptr %a0 %v1 = load <2 x double>, ptr %a1 @@ -90,8 +74,7 @@ define void @one_fdiv_v4f32(ptr %res, ptr %a0) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vfrecipe.s $vr1, $vr0 -; CHECK-NEXT: lu12i.w $a1, -264192 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a1 +; CHECK-NEXT: vldi $vr2, -1296 ; CHECK-NEXT: vfmadd.s $vr0, $vr0, $vr1, $vr2 ; CHECK-NEXT: vfnmsub.s $vr0, $vr0, $vr1, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 @@ -107,24 +90,22 @@ define void @one_fdiv_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-LABEL: one_fdiv_v2f64: ; FAULT-LA32: # %bb.0: # %entry ; FAULT-LA32-NEXT: vld $vr0, $a1, 0 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; FAULT-LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) +; FAULT-LA32-NEXT: vldi $vr1, -912 ; FAULT-LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; FAULT-LA32-NEXT: vst $vr0, $a0, 0 ; FAULT-LA32-NEXT: ret ; -; LA32-LABEL: one_fdiv_v2f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: vfrecipe.d $vr2, $vr0 -; LA32-NEXT: vfnmsub.d $vr3, $vr0, $vr2, $vr1 -; LA32-NEXT: vfmadd.d $vr2, $vr2, $vr3, $vr2 -; LA32-NEXT: vfnmsub.d $vr0, $vr0, $vr2, $vr1 -; LA32-NEXT: vfmadd.d $vr0, $vr2, $vr0, $vr2 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret +; CHECK-LABEL: one_fdiv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vfrecipe.d $vr1, $vr0 +; CHECK-NEXT: vldi $vr2, -912 +; CHECK-NEXT: vfnmsub.d $vr3, $vr0, $vr1, $vr2 +; CHECK-NEXT: vfmadd.d $vr1, $vr1, $vr3, $vr1 +; CHECK-NEXT: vfnmsub.d $vr0, $vr0, $vr1, $vr2 +; CHECK-NEXT: vfmadd.d $vr0, $vr1, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret ; ; FAULT-LA64-LABEL: one_fdiv_v2f64: ; FAULT-LA64: # %bb.0: # %entry @@ -132,19 +113,6 @@ define void @one_fdiv_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA64-NEXT: vfrecip.d $vr0, $vr0 ; FAULT-LA64-NEXT: vst $vr0, $a0, 0 ; FAULT-LA64-NEXT: ret -; -; LA64-LABEL: one_fdiv_v2f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: vld $vr0, $a1, 0 -; LA64-NEXT: vfrecipe.d $vr1, $vr0 -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: vreplgr2vr.d $vr2, $a1 -; LA64-NEXT: vfnmsub.d $vr3, $vr0, $vr1, $vr2 -; LA64-NEXT: vfmadd.d $vr1, $vr1, $vr3, $vr1 -; LA64-NEXT: vfnmsub.d $vr0, $vr0, $vr1, $vr2 -; LA64-NEXT: vfmadd.d $vr0, $vr1, $vr0, $vr1 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret entry: %v0 = load <2 x double>, ptr %a0 %div = fdiv fast <2 x double> , %v0 diff --git a/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll index 1f744830bd56b..4951696e05a94 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA32 -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA64 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s ;; 1.0 / (fsqrt vec) define void @one_div_sqrt_v4f32(ptr %res, ptr %a0) nounwind { @@ -19,11 +19,9 @@ define void @one_div_sqrt_v4f32(ptr %res, ptr %a0) nounwind { ; CHECK-NEXT: vfrsqrte.s $vr1, $vr0 ; CHECK-NEXT: vfmul.s $vr1, $vr0, $vr1 ; CHECK-NEXT: vfmul.s $vr0, $vr0, $vr1 -; CHECK-NEXT: lu12i.w $a1, -261120 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a1 +; CHECK-NEXT: vldi $vr2, -1400 ; CHECK-NEXT: vfmadd.s $vr0, $vr0, $vr1, $vr2 -; CHECK-NEXT: lu12i.w $a1, -266240 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a1 +; CHECK-NEXT: vldi $vr2, -3137 ; CHECK-NEXT: vfmul.s $vr1, $vr1, $vr2 ; CHECK-NEXT: vfmul.s $vr0, $vr1, $vr0 ; CHECK-NEXT: vst $vr0, $a0, 0 @@ -40,32 +38,29 @@ define void @one_div_sqrt_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-LABEL: one_div_sqrt_v2f64: ; FAULT-LA32: # %bb.0: # %entry ; FAULT-LA32-NEXT: vld $vr0, $a1, 0 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; FAULT-LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI1_0) ; FAULT-LA32-NEXT: vfsqrt.d $vr0, $vr0 +; FAULT-LA32-NEXT: vldi $vr1, -912 ; FAULT-LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; FAULT-LA32-NEXT: vst $vr0, $a0, 0 ; FAULT-LA32-NEXT: ret ; -; LA32-LABEL: one_div_sqrt_v2f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: vfrsqrte.d $vr1, $vr0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; LA32-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI1_0) -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_1) -; LA32-NEXT: vld $vr3, $a1, %pc_lo12(.LCPI1_1) -; LA32-NEXT: vfmul.d $vr1, $vr0, $vr1 -; LA32-NEXT: vfmul.d $vr4, $vr0, $vr1 -; LA32-NEXT: vfmadd.d $vr4, $vr4, $vr1, $vr2 -; LA32-NEXT: vfmul.d $vr1, $vr1, $vr3 -; LA32-NEXT: vfmul.d $vr1, $vr1, $vr4 -; LA32-NEXT: vfmul.d $vr0, $vr0, $vr1 -; LA32-NEXT: vfmadd.d $vr0, $vr0, $vr1, $vr2 -; LA32-NEXT: vfmul.d $vr1, $vr1, $vr3 -; LA32-NEXT: vfmul.d $vr0, $vr1, $vr0 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret +; CHECK-LABEL: one_div_sqrt_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vfrsqrte.d $vr1, $vr0 +; CHECK-NEXT: vfmul.d $vr1, $vr0, $vr1 +; CHECK-NEXT: vfmul.d $vr2, $vr0, $vr1 +; CHECK-NEXT: vldi $vr3, -888 +; CHECK-NEXT: vfmadd.d $vr2, $vr2, $vr1, $vr3 +; CHECK-NEXT: vldi $vr4, -800 +; CHECK-NEXT: vfmul.d $vr1, $vr1, $vr4 +; CHECK-NEXT: vfmul.d $vr1, $vr1, $vr2 +; CHECK-NEXT: vfmul.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vfmadd.d $vr0, $vr0, $vr1, $vr3 +; CHECK-NEXT: vfmul.d $vr1, $vr1, $vr4 +; CHECK-NEXT: vfmul.d $vr0, $vr1, $vr0 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret ; ; FAULT-LA64-LABEL: one_div_sqrt_v2f64: ; FAULT-LA64: # %bb.0: # %entry @@ -73,28 +68,6 @@ define void @one_div_sqrt_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA64-NEXT: vfrsqrt.d $vr0, $vr0 ; FAULT-LA64-NEXT: vst $vr0, $a0, 0 ; FAULT-LA64-NEXT: ret -; -; LA64-LABEL: one_div_sqrt_v2f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: vld $vr0, $a1, 0 -; LA64-NEXT: vfrsqrte.d $vr1, $vr0 -; LA64-NEXT: vfmul.d $vr1, $vr0, $vr1 -; LA64-NEXT: vfmul.d $vr2, $vr0, $vr1 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -524288 -; LA64-NEXT: lu52i.d $a1, $a1, -1024 -; LA64-NEXT: vreplgr2vr.d $vr3, $a1 -; LA64-NEXT: vfmadd.d $vr2, $vr2, $vr1, $vr3 -; LA64-NEXT: lu52i.d $a1, $zero, -1026 -; LA64-NEXT: vreplgr2vr.d $vr4, $a1 -; LA64-NEXT: vfmul.d $vr1, $vr1, $vr4 -; LA64-NEXT: vfmul.d $vr1, $vr1, $vr2 -; LA64-NEXT: vfmul.d $vr0, $vr0, $vr1 -; LA64-NEXT: vfmadd.d $vr0, $vr0, $vr1, $vr3 -; LA64-NEXT: vfmul.d $vr1, $vr1, $vr4 -; LA64-NEXT: vfmul.d $vr0, $vr1, $vr0 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret entry: %v0 = load <2 x double>, ptr %a0, align 16 %sqrt = call fast <2 x double> @llvm.sqrt.v2f64 (<2 x double> %v0) diff --git a/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll b/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll index d88e0d1ea7c2d..9664808681bb8 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll @@ -51,9 +51,8 @@ define void @one_div_sqrt_v2f64(ptr %res, ptr %a0) nounwind { ; LA32-LABEL: one_div_sqrt_v2f64: ; LA32: # %bb.0: # %entry ; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) ; LA32-NEXT: vfsqrt.d $vr0, $vr0 +; LA32-NEXT: vldi $vr1, -912 ; LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; LA32-NEXT: vst $vr0, $a0, 0 ; LA32-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll index 669c53b73b16f..92981211adeb8 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fcmp.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare <4 x i32> @llvm.loongarch.lsx.vfcmp.caf.s(<4 x float>, <4 x float>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecipe.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecipe.ll index 1b7a97d9f9720..324098b918890 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecipe.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frecipe.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s declare <4 x float> @llvm.loongarch.lsx.vfrecipe.s(<4 x float>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrte.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrte.ll index 3cd6c78e87d78..ad46b47c82c86 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrte.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-frsqrte.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s declare <4 x float> @llvm.loongarch.lsx.vfrsqrte.s(<4 x float>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max-invalid-imm.ll index 667ba32723fc4..2ecbe685ff20b 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max-invalid-imm.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-max-invalid-imm.ll @@ -1,3 +1,4 @@ +; RUN: not llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s 2>&1 | FileCheck %s ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s declare <16 x i8> @llvm.loongarch.lsx.vmaxi.b(<16 x i8>, i32) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min-invalid-imm.ll index b73bada4f06fb..f4348f57442e6 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min-invalid-imm.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-min-invalid-imm.ll @@ -1,3 +1,4 @@ +; RUN: not llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s 2>&1 | FileCheck %s ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s declare <16 x i8> @llvm.loongarch.lsx.vmini.b(<16 x i8>, i32) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-d-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-d-invalid-imm.ll new file mode 100644 index 0000000000000..4dc5163e721ce --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-d-invalid-imm.ll @@ -0,0 +1,33 @@ +; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s + +declare i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64>, i32) + +define i64 @lsx_vpickve2gr_d_lo(<2 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lsx.vpickve2gr.d: argument out of range +entry: + %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 -1) + ret i64 %res +} + +define i64 @lsx_vpickve2gr_d_hi(<2 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lsx.vpickve2gr.d: argument out of range +entry: + %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 2) + ret i64 %res +} + +declare i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64>, i32) + +define i64 @lsx_vpickve2gr_du_lo(<2 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lsx.vpickve2gr.du: argument out of range +entry: + %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 -1) + ret i64 %res +} + +define i64 @lsx_vpickve2gr_du_hi(<2 x i64> %va) nounwind { +; CHECK: llvm.loongarch.lsx.vpickve2gr.du: argument out of range +entry: + %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 2) + ret i64 %res +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-d.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-d.ll new file mode 100644 index 0000000000000..78f4e3c1bc18b --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-d.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +declare i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64>, i32) + +define i64 @lsx_vpickve2gr_d(<2 x i64> %va) nounwind { +; CHECK-LABEL: lsx_vpickve2gr_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 +; CHECK-NEXT: ret +entry: + %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 1) + ret i64 %res +} + +declare i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64>, i32) + +define i64 @lsx_vpickve2gr_du(<2 x i64> %va) nounwind { +; CHECK-LABEL: lsx_vpickve2gr_du: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpickve2gr.du $a0, $vr0, 1 +; CHECK-NEXT: ret +entry: + %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 1) + ret i64 %res +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-invalid-imm.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-invalid-imm.ll index 3430c54d21941..492b97c8316c1 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-invalid-imm.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr-invalid-imm.ll @@ -1,3 +1,4 @@ +; RUN: not llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s 2>&1 | FileCheck %s ; RUN: not llc --mtriple=loongarch64 --mattr=+lsx < %s 2>&1 | FileCheck %s declare i32 @llvm.loongarch.lsx.vpickve2gr.b(<16 x i8>, i32) @@ -48,22 +49,6 @@ entry: ret i32 %res } -declare i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64>, i32) - -define i64 @lsx_vpickve2gr_d_lo(<2 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lsx.vpickve2gr.d: argument out of range -entry: - %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 -1) - ret i64 %res -} - -define i64 @lsx_vpickve2gr_d_hi(<2 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lsx.vpickve2gr.d: argument out of range -entry: - %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 2) - ret i64 %res -} - declare i32 @llvm.loongarch.lsx.vpickve2gr.bu(<16 x i8>, i32) define i32 @lsx_vpickve2gr_bu_lo(<16 x i8> %va) nounwind { @@ -111,19 +96,3 @@ entry: %res = call i32 @llvm.loongarch.lsx.vpickve2gr.wu(<4 x i32> %va, i32 4) ret i32 %res } - -declare i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64>, i32) - -define i64 @lsx_vpickve2gr_du_lo(<2 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lsx.vpickve2gr.du: argument out of range -entry: - %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 -1) - ret i64 %res -} - -define i64 @lsx_vpickve2gr_du_hi(<2 x i64> %va) nounwind { -; CHECK: llvm.loongarch.lsx.vpickve2gr.du: argument out of range -entry: - %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 2) - ret i64 %res -} diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll index ed56d30ce3c46..4e77f6b72fed9 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-pickve2gr.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare i32 @llvm.loongarch.lsx.vpickve2gr.b(<16 x i8>, i32) @@ -37,18 +38,6 @@ entry: ret i32 %res } -declare i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64>, i32) - -define i64 @lsx_vpickve2gr_d(<2 x i64> %va) nounwind { -; CHECK-LABEL: lsx_vpickve2gr_d: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 1 -; CHECK-NEXT: ret -entry: - %res = call i64 @llvm.loongarch.lsx.vpickve2gr.d(<2 x i64> %va, i32 1) - ret i64 %res -} - declare i32 @llvm.loongarch.lsx.vpickve2gr.bu(<16 x i8>, i32) define i32 @lsx_vpickve2gr_bu(<16 x i8> %va) nounwind { @@ -84,15 +73,3 @@ entry: %res = call i32 @llvm.loongarch.lsx.vpickve2gr.wu(<4 x i32> %va, i32 3) ret i32 %res } - -declare i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64>, i32) - -define i64 @lsx_vpickve2gr_du(<2 x i64> %va) nounwind { -; CHECK-LABEL: lsx_vpickve2gr_du: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vpickve2gr.du $a0, $vr0, 1 -; CHECK-NEXT: ret -entry: - %res = call i64 @llvm.loongarch.lsx.vpickve2gr.du(<2 x i64> %va, i32 1) - ret i64 %res -} diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr-d.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr-d.ll new file mode 100644 index 0000000000000..51533e4b2474c --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr-d.ll @@ -0,0 +1,17 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define <2 x i64> @vrepl_ins_d(i64 %a, i64 %b) { +; CHECK-LABEL: vrepl_ins_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vreplgr2vr.d $vr0, $a0 +; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 1 +; CHECK-NEXT: ret +entry: + %0 = call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 %a) + %1 = call <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64> %0, i64 %b, i32 1) + ret <2 x i64> %1 +} + +declare <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64>, i64, i32 immarg) +declare <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr.ll index aee7492946829..9d7ab6e1ab5ef 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-repl-ins-gr2vr.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s define <16 x i8> @vrepl_ins_b(i32 %a, i32 %b) { @@ -37,23 +38,9 @@ entry: ret <4 x i32> %1 } -define <2 x i64> @vrepl_ins_d(i64 %a, i64 %b) { -; CHECK-LABEL: vrepl_ins_d: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vreplgr2vr.d $vr0, $a0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 1 -; CHECK-NEXT: ret -entry: - %0 = call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 %a) - %1 = call <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64> %0, i64 %b, i32 1) - ret <2 x i64> %1 -} - declare <16 x i8> @llvm.loongarch.lsx.vinsgr2vr.b(<16 x i8>, i32, i32 immarg) declare <16 x i8> @llvm.loongarch.lsx.vreplgr2vr.b(i32) declare <8 x i16> @llvm.loongarch.lsx.vinsgr2vr.h(<8 x i16>, i32, i32 immarg) declare <8 x i16> @llvm.loongarch.lsx.vreplgr2vr.h(i32) declare <4 x i32> @llvm.loongarch.lsx.vinsgr2vr.w(<4 x i32>, i32, i32 immarg) declare <4 x i32> @llvm.loongarch.lsx.vreplgr2vr.w(i32) -declare <2 x i64> @llvm.loongarch.lsx.vinsgr2vr.d(<2 x i64>, i64, i32 immarg) -declare <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr-d.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr-d.ll new file mode 100644 index 0000000000000..c8d0fce6ed5a2 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr-d.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +declare <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64) + +define <2 x i64> @lsx_vreplgr2vr_d(i64 %a) nounwind { +; CHECK-LABEL: lsx_vreplgr2vr_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vreplgr2vr.d $vr0, $a0 +; CHECK-NEXT: ret +entry: + %res = call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 %a) + ret <2 x i64> %res +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll index 091f1c98c2289..edaa20792012d 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-replgr2vr.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare <16 x i8> @llvm.loongarch.lsx.vreplgr2vr.b(i32) @@ -36,15 +37,3 @@ entry: %res = call <4 x i32> @llvm.loongarch.lsx.vreplgr2vr.w(i32 %a) ret <4 x i32> %res } - -declare <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64) - -define <2 x i64> @lsx_vreplgr2vr_d(i64 %a) nounwind { -; CHECK-LABEL: lsx_vreplgr2vr_d: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vreplgr2vr.d $vr0, $a0 -; CHECK-NEXT: ret -entry: - %res = call <2 x i64> @llvm.loongarch.lsx.vreplgr2vr.d(i64 %a) - ret <2 x i64> %res -} diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll index 3188fb4e2c2ef..004bcde90907a 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-set.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare i32 @llvm.loongarch.lsx.bz.v(<16 x i8>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll index 22e01922e87bb..6544f91f045a7 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setallnez.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare i32 @llvm.loongarch.lsx.bnz.b(<16 x i8>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll index 96c79c10e4688..5ba3eb788c1d7 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-setanyeqz.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s declare i32 @llvm.loongarch.lsx.bz.b(<16 x i8>) diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll new file mode 100644 index 0000000000000..34f22e1f6bf45 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll @@ -0,0 +1,91 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadda.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <16 x i8>, ptr %a + %vb = load <16 x i8>, ptr %b + %conda = icmp slt <16 x i8> %va, zeroinitializer + %nega = sub <16 x i8> zeroinitializer, %va + %absa = select <16 x i1> %conda, <16 x i8> %nega, <16 x i8> %va + %condb = icmp slt <16 x i8> %vb, zeroinitializer + %negb = sub <16 x i8> zeroinitializer, %vb + %absb = select <16 x i1> %condb, <16 x i8> %negb, <16 x i8> %vb + %add = add <16 x i8> %absa, %absb + store <16 x i8> %add, ptr %res + ret void +} + +define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadda.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i16>, ptr %a + %vb = load <8 x i16>, ptr %b + %conda = icmp slt <8 x i16> %va, zeroinitializer + %nega = sub <8 x i16> zeroinitializer, %va + %absa = select <8 x i1> %conda, <8 x i16> %nega, <8 x i16> %va + %condb = icmp slt <8 x i16> %vb, zeroinitializer + %negb = sub <8 x i16> zeroinitializer, %vb + %absb = select <8 x i1> %condb, <8 x i16> %negb, <8 x i16> %vb + %add = add <8 x i16> %absa, %absb + store <8 x i16> %add, ptr %res + ret void +} + +define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadda.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i32>, ptr %a + %vb = load <4 x i32>, ptr %b + %conda = icmp slt <4 x i32> %va, zeroinitializer + %nega = sub <4 x i32> zeroinitializer, %va + %absa = select <4 x i1> %conda, <4 x i32> %nega, <4 x i32> %va + %condb = icmp slt <4 x i32> %vb, zeroinitializer + %negb = sub <4 x i32> zeroinitializer, %vb + %absb = select <4 x i1> %condb, <4 x i32> %negb, <4 x i32> %vb + %add = add <4 x i32> %absa, %absb + store <4 x i32> %add, ptr %res + ret void +} + +define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: vadda_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vadda.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <2 x i64>, ptr %a + %vb = load <2 x i64>, ptr %b + %conda = icmp slt <2 x i64> %va, zeroinitializer + %nega = sub <2 x i64> zeroinitializer, %va + %absa = select <2 x i1> %conda, <2 x i64> %nega, <2 x i64> %va + %condb = icmp slt <2 x i64> %vb, zeroinitializer + %negb = sub <2 x i64> zeroinitializer, %vb + %absb = select <2 x i1> %condb, <2 x i64> %negb, <2 x i64> %vb + %add = add <2 x i64> %absa, %absb + store <2 x i64> %add, ptr %res + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/extractelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/extractelement.ll index 3fb55d4806160..b17a90e71e85a 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/extractelement.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/extractelement.ll @@ -3,18 +3,11 @@ ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define void @extract_16xi8(ptr %src, ptr %dst) nounwind { -; LA32-LABEL: extract_16xi8: -; LA32: # %bb.0: -; LA32-NEXT: vld $vr0, $a0, 0 -; LA32-NEXT: vpickve2gr.b $a0, $vr0, 1 -; LA32-NEXT: st.b $a0, $a1, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: extract_16xi8: -; LA64: # %bb.0: -; LA64-NEXT: vld $vr0, $a0, 0 -; LA64-NEXT: vstelm.b $vr0, $a1, 0, 1 -; LA64-NEXT: ret +; CHECK-LABEL: extract_16xi8: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a0, 0 +; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 1 +; CHECK-NEXT: ret %v = load volatile <16 x i8>, ptr %src %e = extractelement <16 x i8> %v, i32 1 store i8 %e, ptr %dst @@ -22,18 +15,11 @@ define void @extract_16xi8(ptr %src, ptr %dst) nounwind { } define void @extract_8xi16(ptr %src, ptr %dst) nounwind { -; LA32-LABEL: extract_8xi16: -; LA32: # %bb.0: -; LA32-NEXT: vld $vr0, $a0, 0 -; LA32-NEXT: vpickve2gr.h $a0, $vr0, 1 -; LA32-NEXT: st.h $a0, $a1, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: extract_8xi16: -; LA64: # %bb.0: -; LA64-NEXT: vld $vr0, $a0, 0 -; LA64-NEXT: vstelm.h $vr0, $a1, 0, 1 -; LA64-NEXT: ret +; CHECK-LABEL: extract_8xi16: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a0, 0 +; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 1 +; CHECK-NEXT: ret %v = load volatile <8 x i16>, ptr %src %e = extractelement <8 x i16> %v, i32 1 store i16 %e, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll index 603bd21ab9af9..fb0b9cee67df5 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll @@ -53,8 +53,7 @@ define void @one_fdiv_v2f64(ptr %res, ptr %a0) nounwind { ; LA32-LABEL: one_fdiv_v2f64: ; LA32: # %bb.0: # %entry ; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) +; LA32-NEXT: vldi $vr1, -912 ; LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; LA32-NEXT: vst $vr0, $a0, 0 ; LA32-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll index 4bb1941724dc6..496a1aed39fb5 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/insertelement.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define void @insert_16xi8(ptr %src, ptr %dst, i8 %ins) nounwind { ; CHECK-LABEL: insert_16xi8: @@ -41,12 +42,20 @@ define void @insert_4xi32(ptr %src, ptr %dst, i32 %ins) nounwind { } define void @insert_2xi64(ptr %src, ptr %dst, i64 %ins) nounwind { -; CHECK-LABEL: insert_2xi64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a2, 1 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_2xi64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 2 +; LA32-NEXT: vinsgr2vr.w $vr0, $a3, 3 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_2xi64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a2, 1 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <2 x i64>, ptr %src %v_new = insertelement <2 x i64> %v, i64 %ins, i32 1 store <2 x i64> %v_new, ptr %dst @@ -82,18 +91,30 @@ define void @insert_2xdouble(ptr %src, ptr %dst, double %ins) nounwind { } define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind { -; CHECK-LABEL: insert_16xi8_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI6_0) -; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI6_0) -; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: vreplgr2vr.b $vr2, $a0 -; CHECK-NEXT: vseq.b $vr0, $vr2, $vr0 -; CHECK-NEXT: vreplgr2vr.b $vr2, $a2 -; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_16xi8_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a4, %pc_hi20(.LCPI6_0) +; LA32-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI6_0) +; LA32-NEXT: vld $vr1, $a0, 0 +; LA32-NEXT: vreplgr2vr.b $vr2, $a3 +; LA32-NEXT: vseq.b $vr0, $vr2, $vr0 +; LA32-NEXT: vreplgr2vr.b $vr2, $a2 +; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_16xi8_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI6_0) +; LA64-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI6_0) +; LA64-NEXT: vld $vr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: vreplgr2vr.b $vr2, $a0 +; LA64-NEXT: vseq.b $vr0, $vr2, $vr0 +; LA64-NEXT: vreplgr2vr.b $vr2, $a2 +; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <16 x i8>, ptr %src %v_new = insertelement <16 x i8> %v, i8 %ins, i32 %idx store <16 x i8> %v_new, ptr %dst @@ -101,18 +122,30 @@ define void @insert_16xi8_idx(ptr %src, ptr %dst, i8 %ins, i32 %idx) nounwind { } define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind { -; CHECK-LABEL: insert_8xi16_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI7_0) -; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI7_0) -; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: vreplgr2vr.h $vr2, $a0 -; CHECK-NEXT: vseq.h $vr0, $vr2, $vr0 -; CHECK-NEXT: vreplgr2vr.h $vr2, $a2 -; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_8xi16_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a4, %pc_hi20(.LCPI7_0) +; LA32-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI7_0) +; LA32-NEXT: vld $vr1, $a0, 0 +; LA32-NEXT: vreplgr2vr.h $vr2, $a3 +; LA32-NEXT: vseq.h $vr0, $vr2, $vr0 +; LA32-NEXT: vreplgr2vr.h $vr2, $a2 +; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_8xi16_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI7_0) +; LA64-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI7_0) +; LA64-NEXT: vld $vr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: vreplgr2vr.h $vr2, $a0 +; LA64-NEXT: vseq.h $vr0, $vr2, $vr0 +; LA64-NEXT: vreplgr2vr.h $vr2, $a2 +; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <8 x i16>, ptr %src %v_new = insertelement <8 x i16> %v, i16 %ins, i32 %idx store <8 x i16> %v_new, ptr %dst @@ -120,18 +153,30 @@ define void @insert_8xi16_idx(ptr %src, ptr %dst, i16 %ins, i32 %idx) nounwind { } define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind { -; CHECK-LABEL: insert_4xi32_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI8_0) -; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI8_0) -; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a0 -; CHECK-NEXT: vseq.w $vr0, $vr2, $vr0 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a2 -; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_4xi32_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a4, %pc_hi20(.LCPI8_0) +; LA32-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI8_0) +; LA32-NEXT: vld $vr1, $a0, 0 +; LA32-NEXT: vreplgr2vr.w $vr2, $a3 +; LA32-NEXT: vseq.w $vr0, $vr2, $vr0 +; LA32-NEXT: vreplgr2vr.w $vr2, $a2 +; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_4xi32_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI8_0) +; LA64-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI8_0) +; LA64-NEXT: vld $vr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: vreplgr2vr.w $vr2, $a0 +; LA64-NEXT: vseq.w $vr0, $vr2, $vr0 +; LA64-NEXT: vreplgr2vr.w $vr2, $a2 +; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <4 x i32>, ptr %src %v_new = insertelement <4 x i32> %v, i32 %ins, i32 %idx store <4 x i32> %v_new, ptr %dst @@ -139,18 +184,36 @@ define void @insert_4xi32_idx(ptr %src, ptr %dst, i32 %ins, i32 %idx) nounwind { } define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind { -; CHECK-LABEL: insert_2xi64_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: pcalau12i $a4, %pc_hi20(.LCPI9_0) -; CHECK-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI9_0) -; CHECK-NEXT: vld $vr1, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a3, 31, 0 -; CHECK-NEXT: vreplgr2vr.d $vr2, $a0 -; CHECK-NEXT: vseq.d $vr0, $vr2, $vr0 -; CHECK-NEXT: vreplgr2vr.d $vr2, $a2 -; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_2xi64_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a5, %pc_hi20(.LCPI9_0) +; LA32-NEXT: vld $vr0, $a5, %pc_lo12(.LCPI9_0) +; LA32-NEXT: add.w $a4, $a4, $a4 +; LA32-NEXT: vld $vr1, $a0, 0 +; LA32-NEXT: vreplgr2vr.w $vr2, $a4 +; LA32-NEXT: vseq.w $vr2, $vr2, $vr0 +; LA32-NEXT: vreplgr2vr.w $vr3, $a2 +; LA32-NEXT: vbitsel.v $vr1, $vr1, $vr3, $vr2 +; LA32-NEXT: addi.w $a0, $a4, 1 +; LA32-NEXT: vreplgr2vr.w $vr2, $a0 +; LA32-NEXT: vseq.w $vr0, $vr2, $vr0 +; LA32-NEXT: vreplgr2vr.w $vr2, $a3 +; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_2xi64_idx: +; LA64: # %bb.0: +; LA64-NEXT: pcalau12i $a4, %pc_hi20(.LCPI9_0) +; LA64-NEXT: vld $vr0, $a4, %pc_lo12(.LCPI9_0) +; LA64-NEXT: vld $vr1, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a3, 31, 0 +; LA64-NEXT: vreplgr2vr.d $vr2, $a0 +; LA64-NEXT: vseq.d $vr0, $vr2, $vr0 +; LA64-NEXT: vreplgr2vr.d $vr2, $a2 +; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr2, $vr0 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <2 x i64>, ptr %src %v_new = insertelement <2 x i64> %v, i64 %ins, i32 %idx store <2 x i64> %v_new, ptr %dst @@ -158,19 +221,32 @@ define void @insert_2xi64_idx(ptr %src, ptr %dst, i64 %ins, i32 %idx) nounwind { } define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwind { -; CHECK-LABEL: insert_4xfloat_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0 -; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI10_0) -; CHECK-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI10_0) -; CHECK-NEXT: vld $vr2, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 -; CHECK-NEXT: vreplgr2vr.w $vr3, $a0 -; CHECK-NEXT: vseq.w $vr1, $vr3, $vr1 -; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 -; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_4xfloat_idx: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI10_0) +; LA32-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI10_0) +; LA32-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA32-NEXT: vld $vr2, $a0, 0 +; LA32-NEXT: vreplgr2vr.w $vr3, $a2 +; LA32-NEXT: vseq.w $vr1, $vr3, $vr1 +; LA32-NEXT: vreplvei.w $vr0, $vr0, 0 +; LA32-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_4xfloat_idx: +; LA64: # %bb.0: +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 +; LA64-NEXT: pcalau12i $a3, %pc_hi20(.LCPI10_0) +; LA64-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI10_0) +; LA64-NEXT: vld $vr2, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a2, 31, 0 +; LA64-NEXT: vreplgr2vr.w $vr3, $a0 +; LA64-NEXT: vseq.w $vr1, $vr3, $vr1 +; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 +; LA64-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <4 x float>, ptr %src %v_new = insertelement <4 x float> %v, float %ins, i32 %idx store <4 x float> %v_new, ptr %dst @@ -178,19 +254,34 @@ define void @insert_4xfloat_idx(ptr %src, ptr %dst, float %ins, i32 %idx) nounwi } define void @insert_2xdouble_idx(ptr %src, ptr %dst, double %ins, i32 %idx) nounwind { -; CHECK-LABEL: insert_2xdouble_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 -; CHECK-NEXT: pcalau12i $a3, %pc_hi20(.LCPI11_0) -; CHECK-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI11_0) -; CHECK-NEXT: vld $vr2, $a0, 0 -; CHECK-NEXT: bstrpick.d $a0, $a2, 31, 0 -; CHECK-NEXT: vreplgr2vr.d $vr3, $a0 -; CHECK-NEXT: vseq.d $vr1, $vr3, $vr1 -; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 -; CHECK-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: insert_2xdouble_idx: +; LA32: # %bb.0: +; LA32-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA32-NEXT: vld $vr1, $a0, 0 +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI11_0) +; LA32-NEXT: vld $vr2, $a0, %pc_lo12(.LCPI11_0) +; LA32-NEXT: vrepli.b $vr3, 0 +; LA32-NEXT: vinsgr2vr.w $vr3, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr3, $a2, 2 +; LA32-NEXT: vseq.d $vr2, $vr3, $vr2 +; LA32-NEXT: vreplvei.d $vr0, $vr0, 0 +; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: insert_2xdouble_idx: +; LA64: # %bb.0: +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 def $vr0 +; LA64-NEXT: pcalau12i $a3, %pc_hi20(.LCPI11_0) +; LA64-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI11_0) +; LA64-NEXT: vld $vr2, $a0, 0 +; LA64-NEXT: bstrpick.d $a0, $a2, 31, 0 +; LA64-NEXT: vreplgr2vr.d $vr3, $a0 +; LA64-NEXT: vseq.d $vr1, $vr3, $vr1 +; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 +; LA64-NEXT: vbitsel.v $vr0, $vr2, $vr0, $vr1 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %v = load volatile <2 x double>, ptr %src %v_new = insertelement <2 x double> %v, double %ins, i32 %idx store <2 x double> %v_new, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vreplvei.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vreplvei.ll index 10510786f3216..40961bc9a08b9 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vreplvei.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vreplvei.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s ;; vreplvei.b diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf.ll index d1c071b45ddff..b13433ee5d159 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s define <16 x i8> @shufflevector_v16i8(<16 x i8> %a, <16 x i8> %b) { diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf4i.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf4i.ll index cd80dcb44e433..bee4ba6a84334 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf4i.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/shuffle-as-vshuf4i.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s ;; vshuf4i.b diff --git a/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll new file mode 100644 index 0000000000000..b651f11596c82 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s + +define <4 x float> @fadd_elt0_v4f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x float> poison, float %a, i32 0 + %c = fadd <4 x float> %b, + ret <4 x float> %c +} + +define <2 x double> @fadd_elt0_v2f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <2 x double> poison, double %a, i32 0 + %c = fadd <2 x double> %b, + ret <2 x double> %c +} + +define <4 x float> @fsub_splat_v4f32(float %b) nounwind { +; CHECK-LABEL: fsub_splat_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fsub.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x float> poison, float 1.0, i32 0 + %insb = insertelement <4 x float> poison, float %b, i32 0 + %va = shufflevector <4 x float> %insa, <4 x float> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x float> %insb, <4 x float> poison, <4 x i32> zeroinitializer + %c = fsub <4 x float> %va, %vb + ret <4 x float> %c +} + +define <2 x double> @fsub_splat_v2f64(double %a, double %b) nounwind { +; CHECK-LABEL: fsub_splat_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.d $fa0, $fa0, $fa1 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <2 x double> poison, double %a, i32 0 + %insb = insertelement <2 x double> poison, double %b, i32 0 + %va = shufflevector <2 x double> %insa, <2 x double> poison, <2 x i32> zeroinitializer + %vb = shufflevector <2 x double> %insb, <2 x double> poison, <2 x i32> zeroinitializer + %c = fsub <2 x double> %va, %vb + ret <2 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-add.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-add.ll index 57fd09ed2e09b..9c3a6f7be0542 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-add.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-add.ll @@ -1,17 +1,29 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefix=LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefix=LA64 define void @vec_reduce_add_v16i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v16i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: st.b $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v16i8: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v16i8: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: ret %v = load <16 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %v) store i8 %res, ptr %dst @@ -19,16 +31,29 @@ define void @vec_reduce_add_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 0 -; CHECK-NEXT: st.b $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -36,15 +61,25 @@ define void @vec_reduce_add_v8i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v4i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.w $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.h $a0, $vr0, 0 -; CHECK-NEXT: st.b $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v4i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; LA32-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v4i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; LA64-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: ret %v = load <4 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> %v) store i8 %res, ptr %dst @@ -52,13 +87,23 @@ define void @vec_reduce_add_v4i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v2i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.h $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v2i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.h $a0, $a0, 0 +; LA32-NEXT: vinsgr2vr.h $vr0, $a0, 0 +; LA32-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v2i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.h $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.h $vr0, $a0, 0 +; LA64-NEXT: vhaddw.h.b $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: ret %v = load <2 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> %v) store i8 %res, ptr %dst @@ -66,15 +111,25 @@ define void @vec_reduce_add_v2i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v8i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: st.h $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v8i16: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v8i16: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: ret %v = load <8 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %v) store i16 %res, ptr %dst @@ -82,15 +137,27 @@ define void @vec_reduce_add_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.w $a0, $vr0, 0 -; CHECK-NEXT: st.h $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -98,13 +165,23 @@ define void @vec_reduce_add_v4i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v2i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.w $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v2i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; LA32-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v2i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; LA64-NEXT: vhaddw.w.h $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: ret %v = load <2 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> %v) store i16 %res, ptr %dst @@ -112,14 +189,23 @@ define void @vec_reduce_add_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 -; CHECK-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.d $a0, $vr0, 0 -; CHECK-NEXT: st.w $a0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA32-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA64-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.w $a0, $a1, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -127,13 +213,25 @@ define void @vec_reduce_add_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vhaddw.d.w $vr0, $vr0, $vr0 +; LA64-NEXT: vpickve2gr.d $a0, $vr0, 0 +; LA64-NEXT: st.w $a0, $a1, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -141,12 +239,27 @@ define void @vec_reduce_add_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_add_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_add_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_add_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 1 +; LA32-NEXT: add.w $a3, $a4, $a3 +; LA32-NEXT: add.w $a0, $a2, $a0 +; LA32-NEXT: sltu $a2, $a0, $a2 +; LA32-NEXT: add.w $a2, $a3, $a2 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_add_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vhaddw.q.d $vr0, $vr0, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll index cca4ce30758f1..734ecba843a4e 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-and.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_and_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_and_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_and_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_and_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_and_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,27 @@ define void @vec_reduce_and_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vori.b $vr1, $vr0, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr1, 4 +; LA32-NEXT: vand.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +207,26 @@ define void @vec_reduce_and_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_and_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_and_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vand.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_and_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 0 +; LA32-NEXT: and $a3, $a4, $a3 +; LA32-NEXT: and $a0, $a2, $a0 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a3, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_and_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vand.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll index ce431f0cf6a74..e833930830c3f 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-or.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_or_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_or_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_or_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_or_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_or_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,27 @@ define void @vec_reduce_or_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vori.b $vr1, $vr0, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr1, 4 +; LA32-NEXT: vor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +207,26 @@ define void @vec_reduce_or_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_or_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_or_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_or_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 0 +; LA32-NEXT: or $a3, $a4, $a3 +; LA32-NEXT: or $a0, $a2, $a0 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a3, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_or_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll index bdf153ad7794f..2220df68cddfd 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smax.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_smax_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_smax_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_smax_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vmax.b $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.b $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmax.b $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vmax.b $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.b $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmax.b $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vmax.b $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_smax_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmax.h $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.h $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmax.h $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.h $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmax.h $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_smax_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,26 @@ define void @vec_reduce_smax_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.w $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +206,37 @@ define void @vec_reduce_smax_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smax_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smax_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.d $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smax_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 3 +; LA32-NEXT: slt $a5, $a4, $a3 +; LA32-NEXT: xor $a6, $a3, $a4 +; LA32-NEXT: sltui $a6, $a6, 1 +; LA32-NEXT: masknez $a5, $a5, $a6 +; LA32-NEXT: sltu $a7, $a2, $a0 +; LA32-NEXT: maskeqz $a6, $a7, $a6 +; LA32-NEXT: or $a5, $a6, $a5 +; LA32-NEXT: masknez $a2, $a2, $a5 +; LA32-NEXT: maskeqz $a0, $a0, $a5 +; LA32-NEXT: or $a0, $a0, $a2 +; LA32-NEXT: masknez $a2, $a4, $a5 +; LA32-NEXT: maskeqz $a3, $a3, $a5 +; LA32-NEXT: or $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smax_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.d $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll index e3b3c5e6f2410..50d76a3872e1e 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-smin.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_smin_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_smin_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_smin_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vmin.b $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.b $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmin.b $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vmin.b $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.b $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmin.b $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vmin.b $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_smin_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmin.h $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.h $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmin.h $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.h $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmin.h $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_smin_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,26 @@ define void @vec_reduce_smin_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.w $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.w $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +206,37 @@ define void @vec_reduce_smin_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_smin_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_smin_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.d $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_smin_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 1 +; LA32-NEXT: slt $a5, $a4, $a3 +; LA32-NEXT: xor $a6, $a4, $a3 +; LA32-NEXT: sltui $a6, $a6, 1 +; LA32-NEXT: masknez $a5, $a5, $a6 +; LA32-NEXT: sltu $a7, $a2, $a0 +; LA32-NEXT: maskeqz $a6, $a7, $a6 +; LA32-NEXT: or $a5, $a6, $a5 +; LA32-NEXT: masknez $a0, $a0, $a5 +; LA32-NEXT: maskeqz $a2, $a2, $a5 +; LA32-NEXT: or $a0, $a2, $a0 +; LA32-NEXT: masknez $a2, $a3, $a5 +; LA32-NEXT: maskeqz $a3, $a4, $a5 +; LA32-NEXT: or $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_smin_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.d $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll index fff2304befd68..88146c78a969d 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umax.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_umax_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umax_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_umax_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vmax.bu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.bu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmax.bu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vmax.bu $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.bu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmax.bu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vmax.bu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_umax_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmax.hu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.hu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmax.hu $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.hu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmax.hu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_umax_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,26 @@ define void @vec_reduce_umax_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmax.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmax.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +206,37 @@ define void @vec_reduce_umax_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umax_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umax_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmax.du $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umax_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 3 +; LA32-NEXT: sltu $a5, $a4, $a3 +; LA32-NEXT: xor $a6, $a3, $a4 +; LA32-NEXT: sltui $a6, $a6, 1 +; LA32-NEXT: masknez $a5, $a5, $a6 +; LA32-NEXT: sltu $a7, $a2, $a0 +; LA32-NEXT: maskeqz $a6, $a7, $a6 +; LA32-NEXT: or $a5, $a6, $a5 +; LA32-NEXT: masknez $a2, $a2, $a5 +; LA32-NEXT: maskeqz $a0, $a0, $a5 +; LA32-NEXT: or $a0, $a0, $a2 +; LA32-NEXT: masknez $a2, $a4, $a5 +; LA32-NEXT: maskeqz $a3, $a3, $a5 +; LA32-NEXT: or $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umax_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmax.du $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll index e14a294cbcfb6..e9d4b4aab6f91 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-umin.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_umin_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umin_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_umin_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vmin.bu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.bu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmin.bu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vmin.bu $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.bu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmin.bu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vmin.bu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_umin_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vmin.hu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.hu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vmin.hu $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.hu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vmin.hu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_umin_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,26 @@ define void @vec_reduce_umin_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vmin.wu $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vmin.wu $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +206,37 @@ define void @vec_reduce_umin_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_umin_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_umin_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vmin.du $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_umin_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 0 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 1 +; LA32-NEXT: sltu $a5, $a4, $a3 +; LA32-NEXT: xor $a6, $a4, $a3 +; LA32-NEXT: sltui $a6, $a6, 1 +; LA32-NEXT: masknez $a5, $a5, $a6 +; LA32-NEXT: sltu $a7, $a2, $a0 +; LA32-NEXT: maskeqz $a6, $a7, $a6 +; LA32-NEXT: or $a5, $a6, $a5 +; LA32-NEXT: masknez $a0, $a0, $a5 +; LA32-NEXT: maskeqz $a2, $a2, $a5 +; LA32-NEXT: or $a0, $a2, $a0 +; LA32-NEXT: masknez $a2, $a3, $a5 +; LA32-NEXT: maskeqz $a3, $a4, $a5 +; LA32-NEXT: or $a2, $a3, $a2 +; LA32-NEXT: st.w $a2, $a1, 4 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_umin_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vmin.du $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll index ae2bb8f91de05..ed965e9e10ee7 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-reduce-xor.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @vec_reduce_xor_v16i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_xor_v16i8: @@ -22,18 +23,33 @@ define void @vec_reduce_xor_v16i8(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v8i8(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 1 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v8i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v8i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 1 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.b $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <8 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %v) store i8 %res, ptr %dst @@ -91,16 +107,29 @@ define void @vec_reduce_xor_v8i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v4i16(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 2 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v4i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v4i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 2 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %v) store i16 %res, ptr %dst @@ -123,15 +152,26 @@ define void @vec_reduce_xor_v2i16(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v4i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v4i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v4i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <4 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %v) store i32 %res, ptr %dst @@ -139,14 +179,27 @@ define void @vec_reduce_xor_v4i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v2i32(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 4 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v2i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vori.b $vr1, $vr0, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: vbsrl.v $vr1, $vr1, 4 +; LA32-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v2i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 4 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %v) store i32 %res, ptr %dst @@ -154,13 +207,26 @@ define void @vec_reduce_xor_v2i32(ptr %src, ptr %dst) nounwind { } define void @vec_reduce_xor_v2i64(ptr %src, ptr %dst) nounwind { -; CHECK-LABEL: vec_reduce_xor_v2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8 -; CHECK-NEXT: vxor.v $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: vec_reduce_xor_v2i64: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 3 +; LA32-NEXT: vpickve2gr.w $a2, $vr0, 1 +; LA32-NEXT: vpickve2gr.w $a3, $vr0, 2 +; LA32-NEXT: vpickve2gr.w $a4, $vr0, 0 +; LA32-NEXT: xor $a3, $a4, $a3 +; LA32-NEXT: xor $a0, $a2, $a0 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: st.w $a3, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: vec_reduce_xor_v2i64: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vbsrl.v $vr1, $vr0, 8 +; LA64-NEXT: vxor.v $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %v = load <2 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %v) store i64 %res, ptr %dst diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-sext.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-sext.ll index 9485df746ff1c..dce6dc9f2aa37 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-sext.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-sext.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s - +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefixes=CHECK,LA64 define void @load_sext_2i8_to_2i64(ptr %ptr, ptr %dst) { ; CHECK-LABEL: load_sext_2i8_to_2i64: @@ -40,15 +40,27 @@ entry: } define void @load_sext_8i8_to_8i16(ptr %ptr, ptr %dst) { -; CHECK-LABEL: load_sext_8i8_to_8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vilvl.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vslli.h $vr0, $vr0, 8 -; CHECK-NEXT: vsrai.h $vr0, $vr0, 8 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_sext_8i8_to_8i16: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vilvl.b $vr0, $vr0, $vr0 +; LA32-NEXT: vslli.h $vr0, $vr0, 8 +; LA32-NEXT: vsrai.h $vr0, $vr0, 8 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_sext_8i8_to_8i16: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vilvl.b $vr0, $vr0, $vr0 +; LA64-NEXT: vslli.h $vr0, $vr0, 8 +; LA64-NEXT: vsrai.h $vr0, $vr0, 8 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret entry: %A = load <8 x i8>, ptr %ptr %B = sext <8 x i8> %A to <8 x i16> @@ -75,15 +87,27 @@ entry: } define void @load_sext_4i16_to_4i32(ptr %ptr, ptr %dst) { -; CHECK-LABEL: load_sext_4i16_to_4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vilvl.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vslli.w $vr0, $vr0, 16 -; CHECK-NEXT: vsrai.w $vr0, $vr0, 16 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_sext_4i16_to_4i32: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vilvl.h $vr0, $vr0, $vr0 +; LA32-NEXT: vslli.w $vr0, $vr0, 16 +; LA32-NEXT: vsrai.w $vr0, $vr0, 16 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_sext_4i16_to_4i32: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vilvl.h $vr0, $vr0, $vr0 +; LA64-NEXT: vslli.w $vr0, $vr0, 16 +; LA64-NEXT: vsrai.w $vr0, $vr0, 16 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret entry: %A = load <4 x i16>, ptr %ptr %B = sext <4 x i16> %A to <4 x i32> @@ -92,15 +116,26 @@ entry: } define void @load_sext_2i32_to_2i64(ptr %ptr, ptr %dst) { -; CHECK-LABEL: load_sext_2i32_to_2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vshuf4i.w $vr0, $vr0, 16 -; CHECK-NEXT: vslli.d $vr0, $vr0, 32 -; CHECK-NEXT: vsrai.d $vr0, $vr0, 32 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_sext_2i32_to_2i64: +; LA32: # %bb.0: # %entry +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 2 +; LA32-NEXT: vslli.d $vr0, $vr0, 32 +; LA32-NEXT: vsrai.d $vr0, $vr0, 32 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_sext_2i32_to_2i64: +; LA64: # %bb.0: # %entry +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vshuf4i.w $vr0, $vr0, 16 +; LA64-NEXT: vslli.d $vr0, $vr0, 32 +; LA64-NEXT: vsrai.d $vr0, $vr0, 32 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret entry: %A = load <2 x i32>, ptr %ptr %B = sext <2 x i32> %A to <2 x i64> diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll index 9b1b584bd9c76..bb008ee5eb903 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-any-ext.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 define void @shuffle_any_ext_2i8_to_2i64(ptr %ptr, ptr %dst) nounwind { ; CHECK-LABEL: shuffle_any_ext_2i8_to_2i64: @@ -35,13 +36,22 @@ define void @shuffle_any_ext_2i16_to_2i64(ptr %ptr, ptr %dst) nounwind { } define void @shuffle_any_ext_2i32_to_2i64(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: shuffle_any_ext_2i32_to_2i64: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vshuf4i.w $vr0, $vr0, 16 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: shuffle_any_ext_2i32_to_2i64: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 2 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: shuffle_any_ext_2i32_to_2i64: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vshuf4i.w $vr0, $vr0, 16 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %x = load <2 x i32>, ptr %ptr %y = shufflevector <2 x i32> %x, <2 x i32> poison, <4 x i32> %r = bitcast <4 x i32> %y to <2 x i64> @@ -66,13 +76,23 @@ define void @shuffle_any_ext_4i8_to_4i32(ptr %ptr, ptr %dst) nounwind { } define void @shuffle_any_ext_4i16_to_4i32(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: shuffle_any_ext_4i16_to_4i32: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vilvl.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: shuffle_any_ext_4i16_to_4i32: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vilvl.h $vr0, $vr0, $vr0 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: shuffle_any_ext_4i16_to_4i32: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vilvl.h $vr0, $vr0, $vr0 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %x = load <4 x i16>, ptr %ptr %y = shufflevector <4 x i16> %x, <4 x i16> poison, <8 x i32> %r = bitcast <8 x i16> %y to <4 x i32> @@ -81,13 +101,23 @@ define void @shuffle_any_ext_4i16_to_4i32(ptr %ptr, ptr %dst) nounwind { } define void @shuffle_any_ext_8i8_to_8i16(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: shuffle_any_ext_8i8_to_8i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vilvl.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vst $vr0, $a1, 0 -; CHECK-NEXT: ret +; LA32-LABEL: shuffle_any_ext_8i8_to_8i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vilvl.b $vr0, $vr0, $vr0 +; LA32-NEXT: vst $vr0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: shuffle_any_ext_8i8_to_8i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vilvl.b $vr0, $vr0, $vr0 +; LA64-NEXT: vst $vr0, $a1, 0 +; LA64-NEXT: ret %x = load <8 x i8>, ptr %ptr %y = shufflevector <8 x i8> %x, <8 x i8> poison, <16 x i32> %r = bitcast <16 x i8> %y to <8 x i16> diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-rotate.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-rotate.ll index b1e3f74cd1739..be241925a2788 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-rotate.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-rotate.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s ;; TODO For these special shuffle mask, we can lower it to vbsll + vbsrl + vor. diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-shift.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-shift.ll index ff0f252ba2bdf..5275d5326f73a 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-shift.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-shuffle-byte-shift.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s define <16 x i8> @shuffle_16i8_vbsll_v_1(<16 x i8> %a) nounwind { diff --git a/llvm/test/CodeGen/LoongArch/lsx/vec-trunc.ll b/llvm/test/CodeGen/LoongArch/lsx/vec-trunc.ll index e056e7c38ddcd..314350acd23d6 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vec-trunc.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vec-trunc.ll @@ -1,13 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx %s -o - | FileCheck %s --check-prefix=LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s --check-prefix=LA64 define void @load_trunc_2i64_to_2i32(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_2i64_to_2i32: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vshuf4i.w $vr0, $vr0, 8 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_2i64_to_2i32: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 2 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_2i64_to_2i32: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vshuf4i.w $vr0, $vr0, 8 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <2 x i64>, ptr %ptr %trunc = trunc <2 x i64> %a to <2 x i32> store <2 x i32> %trunc, ptr %dst @@ -15,14 +25,24 @@ define void @load_trunc_2i64_to_2i32(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_2i64_to_2i16(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_2i64_to_2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0) -; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI1_0) -; CHECK-NEXT: vshuf.h $vr1, $vr0, $vr0 -; CHECK-NEXT: vstelm.w $vr1, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_2i64_to_2i16: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0) +; LA32-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI1_0) +; LA32-NEXT: vshuf.h $vr1, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr1, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_2i64_to_2i16: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: pcalau12i $a0, %pc_hi20(.LCPI1_0) +; LA64-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI1_0) +; LA64-NEXT: vshuf.h $vr1, $vr0, $vr0 +; LA64-NEXT: vstelm.w $vr1, $a1, 0, 0 +; LA64-NEXT: ret %a = load <2 x i64>, ptr %ptr %trunc = trunc <2 x i64> %a to <2 x i16> store <2 x i16> %trunc, ptr %dst @@ -30,14 +50,23 @@ define void @load_trunc_2i64_to_2i16(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_2i64_to_2i8(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_2i64_to_2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) -; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI2_0) -; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_2i64_to_2i8: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) +; LA32-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI2_0) +; LA32-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_2i64_to_2i8: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0) +; LA64-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI2_0) +; LA64-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <2 x i64>, ptr %ptr %trunc = trunc <2 x i64> %a to <2 x i8> store <2 x i8> %trunc, ptr %dst @@ -45,12 +74,22 @@ define void @load_trunc_2i64_to_2i8(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_4i32_to_4i16(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_4i32_to_4i16: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vpickev.h $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_4i32_to_4i16: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickev.h $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 1 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_4i32_to_4i16: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vpickev.h $vr0, $vr0, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <4 x i32>, ptr %ptr %trunc = trunc <4 x i32> %a to <4 x i16> store <4 x i16> %trunc, ptr %dst @@ -58,14 +97,24 @@ define void @load_trunc_4i32_to_4i16(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_4i32_to_4i8(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_4i32_to_4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0) -; CHECK-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI4_0) -; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_4i32_to_4i8: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0) +; LA32-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI4_0) +; LA32-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_4i32_to_4i8: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0) +; LA64-NEXT: vld $vr1, $a0, %pc_lo12(.LCPI4_0) +; LA64-NEXT: vshuf.b $vr0, $vr0, $vr0, $vr1 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <4 x i32>, ptr %ptr %trunc = trunc <4 x i32> %a to <4 x i8> store <4 x i8> %trunc, ptr %dst @@ -73,12 +122,22 @@ define void @load_trunc_4i32_to_4i8(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_8i16_to_8i8(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_8i16_to_8i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vld $vr0, $a0, 0 -; CHECK-NEXT: vpickev.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_8i16_to_8i8: +; LA32: # %bb.0: +; LA32-NEXT: vld $vr0, $a0, 0 +; LA32-NEXT: vpickev.b $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 1 +; LA32-NEXT: st.w $a0, $a1, 4 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_8i16_to_8i8: +; LA64: # %bb.0: +; LA64-NEXT: vld $vr0, $a0, 0 +; LA64-NEXT: vpickev.b $vr0, $vr0, $vr0 +; LA64-NEXT: vstelm.d $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <8 x i16>, ptr %ptr %trunc = trunc <8 x i16> %a to <8 x i8> store <8 x i8> %trunc, ptr %dst @@ -86,13 +145,24 @@ define void @load_trunc_8i16_to_8i8(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_2i32_to_2i16(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_2i32_to_2i16: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vshuf4i.h $vr0, $vr0, 8 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_2i32_to_2i16: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vshuf4i.h $vr0, $vr0, 8 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_2i32_to_2i16: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vshuf4i.h $vr0, $vr0, 8 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <2 x i32>, ptr %ptr %trunc = trunc <2 x i32> %a to <2 x i16> store <2 x i16> %trunc, ptr %dst @@ -100,15 +170,27 @@ define void @load_trunc_2i32_to_2i16(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_2i32_to_2i8(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_2i32_to_2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: pcalau12i $a2, %pc_hi20(.LCPI7_0) -; CHECK-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI7_0) -; CHECK-NEXT: vinsgr2vr.d $vr1, $a0, 0 -; CHECK-NEXT: vshuf.b $vr0, $vr0, $vr1, $vr0 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_2i32_to_2i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI7_0) +; LA32-NEXT: vld $vr0, $a3, %pc_lo12(.LCPI7_0) +; LA32-NEXT: vinsgr2vr.w $vr1, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr1, $a0, 1 +; LA32-NEXT: vshuf.b $vr0, $vr0, $vr1, $vr0 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_2i32_to_2i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: pcalau12i $a2, %pc_hi20(.LCPI7_0) +; LA64-NEXT: vld $vr0, $a2, %pc_lo12(.LCPI7_0) +; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 0 +; LA64-NEXT: vshuf.b $vr0, $vr0, $vr1, $vr0 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <2 x i32>, ptr %ptr %trunc = trunc <2 x i32> %a to <2 x i8> store <2 x i8> %trunc, ptr %dst @@ -116,13 +198,24 @@ define void @load_trunc_2i32_to_2i8(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_4i16_to_4i8(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_4i16_to_4i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.d $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0 -; CHECK-NEXT: vpickev.b $vr0, $vr0, $vr0 -; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_4i16_to_4i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a0, 0 +; LA32-NEXT: ld.w $a0, $a0, 4 +; LA32-NEXT: vinsgr2vr.w $vr0, $a2, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 1 +; LA32-NEXT: vpickev.b $vr0, $vr0, $vr0 +; LA32-NEXT: vpickve2gr.w $a0, $vr0, 0 +; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_4i16_to_4i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.d $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 +; LA64-NEXT: vpickev.b $vr0, $vr0, $vr0 +; LA64-NEXT: vstelm.w $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <4 x i16>, ptr %ptr %trunc = trunc <4 x i16> %a to <4 x i8> store <4 x i8> %trunc, ptr %dst @@ -130,17 +223,23 @@ define void @load_trunc_4i16_to_4i8(ptr %ptr, ptr %dst) nounwind { } define void @load_trunc_2i16_to_2i8(ptr %ptr, ptr %dst) nounwind { -; CHECK-LABEL: load_trunc_2i16_to_2i8: -; CHECK: # %bb.0: -; CHECK-NEXT: ld.w $a0, $a0, 0 -; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0 -; CHECK-NEXT: vshuf4i.b $vr0, $vr0, 8 -; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0 -; CHECK-NEXT: ret +; LA32-LABEL: load_trunc_2i16_to_2i8: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a0, $a0, 0 +; LA32-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; LA32-NEXT: vshuf4i.b $vr0, $vr0, 8 +; LA32-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: load_trunc_2i16_to_2i8: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a0, $a0, 0 +; LA64-NEXT: vinsgr2vr.w $vr0, $a0, 0 +; LA64-NEXT: vshuf4i.b $vr0, $vr0, 8 +; LA64-NEXT: vstelm.h $vr0, $a1, 0, 0 +; LA64-NEXT: ret %a = load <2 x i16>, ptr %ptr %trunc = trunc <2 x i16> %a to <2 x i8> store <2 x i8> %trunc, ptr %dst ret void } - - diff --git a/llvm/test/CodeGen/LoongArch/lsx/vmskcond.ll b/llvm/test/CodeGen/LoongArch/lsx/vmskcond.ll index 7fa591db5d1fa..8bdeebef13dd2 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vmskcond.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vmskcond.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx --verify-machineinstrs < %s | FileCheck %s +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx --verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx --verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LA64 define i16 @vmsk_eq_allzeros_i8(<16 x i8 > %a) { ; CHECK-LABEL: vmsk_eq_allzeros_i8: @@ -605,17 +606,29 @@ define i4 @vmsk_eq_allzeros_v4i8(<4 x i8> %a) { } define i32 @vmsk2_eq_allzeros_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_eq_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vseqi.b $vr0, $vr0, 0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vseqi.b $vr0, $vr1, 0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_eq_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vseqi.b $vr0, $vr0, 0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vseqi.b $vr0, $vr1, 0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_eq_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vseqi.b $vr0, $vr0, 0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vseqi.b $vr0, $vr1, 0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp eq <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -623,18 +636,31 @@ entry: } define i32 @vmsk2_sgt_allzeros_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_sgt_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vrepli.b $vr2, 0 -; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vslt.b $vr0, $vr2, $vr1 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sgt_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vrepli.b $vr2, 0 +; LA32-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vslt.b $vr0, $vr2, $vr1 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sgt_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vrepli.b $vr2, 0 +; LA64-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vslt.b $vr0, $vr2, $vr1 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp sgt <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -642,18 +668,31 @@ entry: } define i32 @vmsk2_sgt_allones_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_sgt_allones_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vrepli.b $vr2, -1 -; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vslt.b $vr0, $vr2, $vr1 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sgt_allones_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vrepli.b $vr2, -1 +; LA32-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vslt.b $vr0, $vr2, $vr1 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sgt_allones_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vrepli.b $vr2, -1 +; LA64-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vslt.b $vr0, $vr2, $vr1 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp sgt <32 x i8> %a, splat (i8 -1) %2 = bitcast <32 x i1> %1 to i32 @@ -661,18 +700,31 @@ entry: } define i32 @vmsk2_sge_allzeros_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_sge_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vrepli.b $vr2, 0 -; CHECK-NEXT: vsle.b $vr0, $vr2, $vr0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vsle.b $vr0, $vr2, $vr1 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sge_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vrepli.b $vr2, 0 +; LA32-NEXT: vsle.b $vr0, $vr2, $vr0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vsle.b $vr0, $vr2, $vr1 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sge_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vrepli.b $vr2, 0 +; LA64-NEXT: vsle.b $vr0, $vr2, $vr0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vsle.b $vr0, $vr2, $vr1 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp sge <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -680,15 +732,25 @@ entry: } define i32 @vmsk2_slt_allzeros_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_slt_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vmskltz.b $vr0, $vr1 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_slt_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vmskltz.b $vr0, $vr1 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_slt_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vmskltz.b $vr0, $vr1 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp slt <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -696,17 +758,29 @@ entry: } define i32 @vmsk2_sle_allzeros_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_sle_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vslei.b $vr0, $vr0, 0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vslei.b $vr0, $vr1, 0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sle_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vslei.b $vr0, $vr0, 0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vslei.b $vr0, $vr1, 0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sle_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vslei.b $vr0, $vr0, 0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vslei.b $vr0, $vr1, 0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp sle <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -714,17 +788,29 @@ entry: } define i32 @vmsk2_sle_allones_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_sle_allones_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vslei.b $vr0, $vr0, -1 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vslei.b $vr0, $vr1, -1 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sle_allones_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vslei.b $vr0, $vr0, -1 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vslei.b $vr0, $vr1, -1 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sle_allones_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vslei.b $vr0, $vr0, -1 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vslei.b $vr0, $vr1, -1 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp sle <32 x i8> %a, splat (i8 -1) %2 = bitcast <32 x i1> %1 to i32 @@ -732,19 +818,33 @@ entry: } define i32 @vmsk2_ne_allzeros_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_ne_allzeros_i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vseqi.b $vr0, $vr0, 0 -; CHECK-NEXT: vxori.b $vr0, $vr0, 255 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vseqi.b $vr0, $vr1, 0 -; CHECK-NEXT: vxori.b $vr0, $vr0, 255 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_ne_allzeros_i8: +; LA32: # %bb.0: # %entry +; LA32-NEXT: vseqi.b $vr0, $vr0, 0 +; LA32-NEXT: vxori.b $vr0, $vr0, 255 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vseqi.b $vr0, $vr1, 0 +; LA32-NEXT: vxori.b $vr0, $vr0, 255 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_ne_allzeros_i8: +; LA64: # %bb.0: # %entry +; LA64-NEXT: vseqi.b $vr0, $vr0, 0 +; LA64-NEXT: vxori.b $vr0, $vr0, 255 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vseqi.b $vr0, $vr1, 0 +; LA64-NEXT: vxori.b $vr0, $vr0, 255 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret entry: %1 = icmp ne <32 x i8> %a, splat (i8 0) %2 = bitcast <32 x i1> %1 to i32 @@ -752,38 +852,66 @@ entry: } define i32 @vmsk2_sgt_v32i8(<32 x i8> %a, <32 x i8> %b) { -; CHECK-LABEL: vmsk2_sgt_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vslt.b $vr0, $vr3, $vr1 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sgt_v32i8: +; LA32: # %bb.0: +; LA32-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vslt.b $vr0, $vr3, $vr1 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sgt_v32i8: +; LA64: # %bb.0: +; LA64-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vslt.b $vr0, $vr3, $vr1 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret %x = icmp sgt <32 x i8> %a, %b %res = bitcast <32 x i1> %x to i32 ret i32 %res } define i32 @vmsk2_sgt_and_sgt_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { -; CHECK-LABEL: vmsk2_sgt_and_sgt_v32i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vslt.b $vr0, $vr2, $vr0 -; CHECK-NEXT: vslt.b $vr1, $vr3, $vr1 -; CHECK-NEXT: vslt.b $vr2, $vr6, $vr4 -; CHECK-NEXT: vslt.b $vr3, $vr7, $vr5 -; CHECK-NEXT: vand.v $vr1, $vr1, $vr3 -; CHECK-NEXT: vand.v $vr0, $vr0, $vr2 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vmskltz.b $vr0, $vr1 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_sgt_and_sgt_v32i8: +; LA32: # %bb.0: +; LA32-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA32-NEXT: vslt.b $vr1, $vr3, $vr1 +; LA32-NEXT: vslt.b $vr2, $vr6, $vr4 +; LA32-NEXT: vslt.b $vr3, $vr7, $vr5 +; LA32-NEXT: vand.v $vr1, $vr1, $vr3 +; LA32-NEXT: vand.v $vr0, $vr0, $vr2 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vmskltz.b $vr0, $vr1 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_sgt_and_sgt_v32i8: +; LA64: # %bb.0: +; LA64-NEXT: vslt.b $vr0, $vr2, $vr0 +; LA64-NEXT: vslt.b $vr1, $vr3, $vr1 +; LA64-NEXT: vslt.b $vr2, $vr6, $vr4 +; LA64-NEXT: vslt.b $vr3, $vr7, $vr5 +; LA64-NEXT: vand.v $vr1, $vr1, $vr3 +; LA64-NEXT: vand.v $vr0, $vr0, $vr2 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vmskltz.b $vr0, $vr1 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret %x0 = icmp sgt <32 x i8> %a, %b %x1 = icmp sgt <32 x i8> %c, %d %y = and <32 x i1> %x0, %x1 @@ -792,17 +920,29 @@ define i32 @vmsk2_sgt_and_sgt_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <3 } define i32 @vmsk2_trunc_i8(<32 x i8> %a) { -; CHECK-LABEL: vmsk2_trunc_i8: -; CHECK: # %bb.0: -; CHECK-NEXT: vslli.b $vr0, $vr0, 7 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a0, $vr0, 0 -; CHECK-NEXT: vslli.b $vr0, $vr1, 7 -; CHECK-NEXT: vmskltz.b $vr0, $vr0 -; CHECK-NEXT: vpickve2gr.hu $a1, $vr0, 0 -; CHECK-NEXT: slli.d $a1, $a1, 16 -; CHECK-NEXT: or $a0, $a0, $a1 -; CHECK-NEXT: ret +; LA32-LABEL: vmsk2_trunc_i8: +; LA32: # %bb.0: +; LA32-NEXT: vslli.b $vr0, $vr0, 7 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA32-NEXT: vslli.b $vr0, $vr1, 7 +; LA32-NEXT: vmskltz.b $vr0, $vr0 +; LA32-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA32-NEXT: slli.w $a1, $a1, 16 +; LA32-NEXT: or $a0, $a0, $a1 +; LA32-NEXT: ret +; +; LA64-LABEL: vmsk2_trunc_i8: +; LA64: # %bb.0: +; LA64-NEXT: vslli.b $vr0, $vr0, 7 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a0, $vr0, 0 +; LA64-NEXT: vslli.b $vr0, $vr1, 7 +; LA64-NEXT: vmskltz.b $vr0, $vr0 +; LA64-NEXT: vpickve2gr.hu $a1, $vr0, 0 +; LA64-NEXT: slli.d $a1, $a1, 16 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: ret %y = trunc <32 x i8> %a to <32 x i1> %res = bitcast <32 x i1> %y to i32 ret i32 %res diff --git a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll index 5dbff4a402b3d..8f25a6ba62f9f 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s define void @select_v16i8_imm(ptr %res, ptr %a0) nounwind { ; CHECK-LABEL: select_v16i8_imm: @@ -50,26 +50,14 @@ define void @select_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind { } define void @select_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind { -; LA32-LABEL: select_v4i32: -; LA32: # %bb.0: -; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: vld $vr1, $a2, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: select_v4i32: -; LA64: # %bb.0: -; LA64-NEXT: vld $vr0, $a1, 0 -; LA64-NEXT: vld $vr1, $a2, 0 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -1 -; LA64-NEXT: vreplgr2vr.d $vr2, $a1 -; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: select_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vldi $vr2, -1552 +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret %v0 = load <4 x i32>, ptr %a0 %v1 = load <4 x i32>, ptr %a1 %sel = select <4 x i1> , <4 x i32> %v0, <4 x i32> %v1 diff --git a/llvm/test/CodeGen/LoongArch/lsx/widen-shuffle-mask.ll b/llvm/test/CodeGen/LoongArch/lsx/widen-shuffle-mask.ll index 54328260d9d14..42ef9133bf04d 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/widen-shuffle-mask.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/widen-shuffle-mask.ll @@ -1,4 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s define <16 x i8> @widen_shuffle_mask_v16i8_to_v8i16(<16 x i8> %a, <16 x i8> %b) { diff --git a/llvm/test/CodeGen/LoongArch/merge-offset-option.ll b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll new file mode 100644 index 0000000000000..e5351a6589cf7 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: < %s | FileCheck %s --check-prefix=MERGE +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: --loongarch-enable-merge-offset=false < %s | FileCheck %s --check-prefix=NO_MERGE + +@g = dso_local global i32 zeroinitializer, align 4 + +define void @foo() nounwind { +; MERGE-LABEL: foo: +; MERGE: # %bb.0: +; MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; MERGE-NEXT: ld.w $zero, $a0, %pc_lo12(g) +; MERGE-NEXT: ret +; +; NO_MERGE-LABEL: foo: +; NO_MERGE: # %bb.0: +; NO_MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; NO_MERGE-NEXT: addi.d $a0, $a0, %pc_lo12(g) +; NO_MERGE-NEXT: ld.w $zero, $a0, 0 +; NO_MERGE-NEXT: ret + %v = load volatile i32, ptr @g + ret void +} diff --git a/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir b/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir index d23f2f7bd585e..e01114726385c 100644 --- a/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir +++ b/llvm/test/CodeGen/MIR/X86/frame-info-save-restore-points-with-regs-parse.mir @@ -1,6 +1,8 @@ # RUN: llc -run-pass none -o - %s | FileCheck %s --- | + target triple = "x86_64-unknown-linux-gnu" + define ptr @foo(ptr %ptr, i64 %p2, i64 %p3, i64 %p4, i64 %p5, i64 %p6) { entry: %tobool.not = icmp eq ptr %ptr, null diff --git a/llvm/test/CodeGen/Mips/atomic-min-max.ll b/llvm/test/CodeGen/Mips/atomic-min-max.ll index 85bf6d02c7d8f..02ae8d2b7480e 100644 --- a/llvm/test/CodeGen/Mips/atomic-min-max.ll +++ b/llvm/test/CodeGen/Mips/atomic-min-max.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=mips-elf -O0 -mcpu=mips32r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSR6 ; RUN: llc -mtriple=mips-elf -O0 -mcpu=mips32r2 -mattr=+micromips -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MM ; RUN: llc -mtriple=mips-elf -O0 -mcpu=mips32r6 -mattr=+micromips -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MMR6 +; RUN: llc -mtriple=mipsel-elf -O0 -mcpu=mips2 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPS2 ; RUN: llc -mtriple=mipsel-elf -O0 -mcpu=mips32 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPS32 ; RUN: llc -mtriple=mipsel-elf -O0 -mcpu=mips32r2 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSEL ; RUN: llc -mtriple=mipsel-elf -O0 -mcpu=mips32r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSELR6 @@ -31,6 +32,33 @@ define i32 @test_max_32(ptr nocapture %ptr, i32 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_max_32: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: $BB0_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($4) +; MIPS2-NEXT: slt $3, $2, $5 +; MIPS2-NEXT: move $1, $5 +; MIPS2-NEXT: beqz $3, $BB0_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB0_1 Depth=1 +; MIPS2-NEXT: j $BB0_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB0_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB0_1 Depth=1 +; MIPS2-NEXT: move $1, $2 +; MIPS2-NEXT: $BB0_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB0_1 Depth=1 +; MIPS2-NEXT: sc $1, 0($4) +; MIPS2-NEXT: beqz $1, $BB0_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_max_32: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: sync @@ -251,6 +279,33 @@ define i32 @test_min_32(ptr nocapture %ptr, i32 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_min_32: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: $BB1_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($4) +; MIPS2-NEXT: slt $3, $2, $5 +; MIPS2-NEXT: move $1, $2 +; MIPS2-NEXT: beqz $3, $BB1_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB1_1 Depth=1 +; MIPS2-NEXT: j $BB1_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB1_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB1_1 Depth=1 +; MIPS2-NEXT: move $1, $5 +; MIPS2-NEXT: $BB1_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB1_1 Depth=1 +; MIPS2-NEXT: sc $1, 0($4) +; MIPS2-NEXT: beqz $1, $BB1_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_min_32: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: sync @@ -471,6 +526,33 @@ define i32 @test_umax_32(ptr nocapture %ptr, i32 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_umax_32: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: $BB2_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($4) +; MIPS2-NEXT: sltu $3, $2, $5 +; MIPS2-NEXT: move $1, $5 +; MIPS2-NEXT: beqz $3, $BB2_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB2_1 Depth=1 +; MIPS2-NEXT: j $BB2_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB2_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB2_1 Depth=1 +; MIPS2-NEXT: move $1, $2 +; MIPS2-NEXT: $BB2_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB2_1 Depth=1 +; MIPS2-NEXT: sc $1, 0($4) +; MIPS2-NEXT: beqz $1, $BB2_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_umax_32: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: sync @@ -691,6 +773,33 @@ define i32 @test_umin_32(ptr nocapture %ptr, i32 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_umin_32: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: $BB3_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($4) +; MIPS2-NEXT: sltu $3, $2, $5 +; MIPS2-NEXT: move $1, $2 +; MIPS2-NEXT: beqz $3, $BB3_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB3_1 Depth=1 +; MIPS2-NEXT: j $BB3_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB3_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB3_1 Depth=1 +; MIPS2-NEXT: move $1, $5 +; MIPS2-NEXT: $BB3_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB3_1 Depth=1 +; MIPS2-NEXT: sc $1, 0($4) +; MIPS2-NEXT: beqz $1, $BB3_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: sync +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_umin_32: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: sync @@ -936,6 +1045,58 @@ define i16 @test_max_16(ptr nocapture %ptr, i16 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_max_16: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 65535 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB4_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: sll $4, $4, 16 +; MIPS2-NEXT: sra $4, $4, 16 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: slt $5, $4, $7 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: beqz $5, $BB4_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB4_1 Depth=1 +; MIPS2-NEXT: j $BB4_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB4_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB4_1 Depth=1 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: $BB4_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB4_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB4_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_max_16: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -1476,6 +1637,58 @@ define i16 @test_min_16(ptr nocapture %ptr, i16 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_min_16: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 65535 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB5_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: sll $4, $4, 16 +; MIPS2-NEXT: sra $4, $4, 16 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: slt $5, $4, $7 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: beqz $5, $BB5_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB5_1 Depth=1 +; MIPS2-NEXT: j $BB5_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB5_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB5_1 Depth=1 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: $BB5_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB5_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB5_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_min_16: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -2015,6 +2228,57 @@ define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_umax_16: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 65535 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB6_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: andi $4, $4, 65535 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: sltu $5, $4, $7 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: beqz $5, $BB6_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB6_1 Depth=1 +; MIPS2-NEXT: j $BB6_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB6_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB6_1 Depth=1 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: $BB6_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB6_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB6_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_umax_16: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -2553,6 +2817,57 @@ define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_umin_16: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 65535 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB7_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: andi $4, $4, 65535 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: sltu $5, $4, $7 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: beqz $5, $BB7_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB7_1 Depth=1 +; MIPS2-NEXT: j $BB7_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB7_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB7_1 Depth=1 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: $BB7_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB7_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB7_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_umin_16: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -3092,6 +3407,58 @@ define i8 @test_max_8(ptr nocapture %ptr, i8 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_max_8: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 255 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB8_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: sll $4, $4, 24 +; MIPS2-NEXT: sra $4, $4, 24 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: slt $5, $4, $7 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: beqz $5, $BB8_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB8_1 Depth=1 +; MIPS2-NEXT: j $BB8_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB8_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB8_1 Depth=1 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: $BB8_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB8_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB8_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_max_8: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -3631,6 +3998,58 @@ define i8 @test_min_8(ptr nocapture %ptr, i8 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_min_8: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 255 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB9_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: sll $4, $4, 24 +; MIPS2-NEXT: sra $4, $4, 24 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: slt $5, $4, $7 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: beqz $5, $BB9_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB9_1 Depth=1 +; MIPS2-NEXT: j $BB9_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB9_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB9_1 Depth=1 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: $BB9_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB9_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB9_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_min_8: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -4170,6 +4589,57 @@ define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_umax_8: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 255 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB10_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: andi $4, $4, 255 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: sltu $5, $4, $7 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: beqz $5, $BB10_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB10_1 Depth=1 +; MIPS2-NEXT: j $BB10_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB10_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB10_1 Depth=1 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: $BB10_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB10_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB10_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_umax_8: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 @@ -4708,6 +5178,57 @@ define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) { ; MIPS-NEXT: jr $ra ; MIPS-NEXT: nop ; +; MIPS2-LABEL: test_umin_8: +; MIPS2: # %bb.0: # %entry +; MIPS2-NEXT: addiu $sp, $sp, -8 +; MIPS2-NEXT: .cfi_def_cfa_offset 8 +; MIPS2-NEXT: # kill: def $at killed $a1 +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $1, $zero, -4 +; MIPS2-NEXT: and $6, $4, $1 +; MIPS2-NEXT: andi $1, $4, 3 +; MIPS2-NEXT: sll $10, $1, 3 +; MIPS2-NEXT: ori $1, $zero, 255 +; MIPS2-NEXT: sllv $8, $1, $10 +; MIPS2-NEXT: nor $9, $zero, $8 +; MIPS2-NEXT: sllv $7, $5, $10 +; MIPS2-NEXT: $BB11_1: # %entry +; MIPS2-NEXT: # =>This Inner Loop Header: Depth=1 +; MIPS2-NEXT: ll $2, 0($6) +; MIPS2-NEXT: srav $4, $2, $10 +; MIPS2-NEXT: andi $4, $4, 255 +; MIPS2-NEXT: or $1, $zero, $4 +; MIPS2-NEXT: sllv $4, $4, $10 +; MIPS2-NEXT: sltu $5, $4, $7 +; MIPS2-NEXT: move $3, $4 +; MIPS2-NEXT: beqz $5, $BB11_3 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.2: # %entry +; MIPS2-NEXT: # in Loop: Header=BB11_1 Depth=1 +; MIPS2-NEXT: j $BB11_4 +; MIPS2-NEXT: nop +; MIPS2-NEXT: $BB11_3: # %entry +; MIPS2-NEXT: # in Loop: Header=BB11_1 Depth=1 +; MIPS2-NEXT: move $3, $7 +; MIPS2-NEXT: $BB11_4: # %entry +; MIPS2-NEXT: # in Loop: Header=BB11_1 Depth=1 +; MIPS2-NEXT: and $3, $3, $8 +; MIPS2-NEXT: and $4, $2, $9 +; MIPS2-NEXT: or $4, $4, $3 +; MIPS2-NEXT: sc $4, 0($6) +; MIPS2-NEXT: beqz $4, $BB11_1 +; MIPS2-NEXT: nop +; MIPS2-NEXT: # %bb.5: # %entry +; MIPS2-NEXT: .insn +; MIPS2-NEXT: # %bb.6: # %entry +; MIPS2-NEXT: sw $1, 4($sp) # 4-byte Folded Spill +; MIPS2-NEXT: # %bb.7: # %entry +; MIPS2-NEXT: lw $2, 4($sp) # 4-byte Folded Reload +; MIPS2-NEXT: sync +; MIPS2-NEXT: addiu $sp, $sp, 8 +; MIPS2-NEXT: jr $ra +; MIPS2-NEXT: nop +; ; MIPSR6-LABEL: test_umin_8: ; MIPSR6: # %bb.0: # %entry ; MIPSR6-NEXT: addiu $sp, $sp, -8 diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float-varargs.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float-varargs.ll new file mode 100644 index 0000000000000..8cbc879310f61 --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float-varargs.ll @@ -0,0 +1,148 @@ +; RUN: llc -mtriple=mips -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s +; RUN: llc -mtriple=mipsel -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,N32,NEW,NEWBE %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,N32,NEW,NEWLE %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,N64,NEW,NEWBE %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,N64,NEW,NEWLE %s + +@floats = global [11 x float] zeroinitializer +@doubles = global [11 x double] zeroinitializer + +define void @double_args(double %a, ...) + nounwind { +entry: + %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1 + store volatile double %a, ptr %0 + + %ap = alloca ptr + call void @llvm.va_start(ptr %ap) + %b = va_arg ptr %ap, double + %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2 + store volatile double %b, ptr %1 + call void @llvm.va_end(ptr %ap) + ret void +} + +; ALL-LABEL: double_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) + +; O32 forbids using floating point registers for the non-variable portion. +; N32/N64 allow it. +; O32-DAG: sw $4, 8([[R2]]) +; O32-DAG: sw $5, 12([[R2]]) +; NEW-DAG: sd $4, 8([[R2]]) + +; The varargs portion is dumped to stack +; O32-DAG: sw $6, 16($sp) +; O32-DAG: sw $7, 20($sp) +; NEW-DAG: sd $5, 8($sp) +; NEW-DAG: sd $6, 16($sp) +; NEW-DAG: sd $7, 24($sp) +; NEW-DAG: sd $8, 32($sp) +; NEW-DAG: sd $9, 40($sp) +; NEW-DAG: sd $10, 48($sp) +; NEW-DAG: sd $11, 56($sp) + +; Get the varargs pointer +; O32 has 4 bytes padding, 4 bytes for the varargs pointer, and 8 bytes reserved +; for arguments 1 and 2. +; N32/N64 has 8 bytes for the varargs pointer, and no reserved area. +; O32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 16 +; O32-DAG: sw [[VAPTR]], 4($sp) +; N32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N32-DAG: sw [[VAPTR]], 4($sp) +; N64-DAG: daddiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N64-DAG: sd [[VAPTR]], 0($sp) + +; Increment the pointer then get the varargs arg +; LLVM will rebind the load to the stack pointer instead of the varargs pointer +; during lowering. This is fine and doesn't change the behaviour. +; O32-DAG: addiu [[VAPTR]], [[VAPTR]], 8 +; N32-DAG: addiu [[VAPTR]], [[VAPTR]], 8 +; N64-DAG: daddiu [[VAPTR]], [[VAPTR]], 8 +; O32-DAG: lw [[R3:\$[0-9]+]], 16($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 20($sp) +; O32-DAG: sw [[R3]], 16([[R2]]) +; O32-DAG: sw [[R4]], 20([[R2]]) +; NEW-DAG: ld [[R3:\$[0-9]+]], 8($sp) +; NEW-DAG: sd [[R3]], 16([[R2]]) + +define void @float_args(float %a, ...) nounwind { +entry: + %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1 + store volatile float %a, ptr %0 + + %ap = alloca ptr + call void @llvm.va_start(ptr %ap) + %b = va_arg ptr %ap, float + %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2 + store volatile float %b, ptr %1 + call void @llvm.va_end(ptr %ap) + ret void +} + +; ALL-LABEL: float_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) + +; The first four arguments are the same in O32/N32/N64. +; The non-variable portion should be unaffected. +; O32-DAG: mtc1 $4, $f0 +; O32-DAG: swc1 $f0, 4([[R2]]) +; NEW-DAG: swc1 $f12, 4([[R2]]) + +; The varargs portion is dumped to stack +; O32-DAG: sw $5, 12($sp) +; O32-DAG: sw $6, 16($sp) +; O32-DAG: sw $7, 20($sp) +; NEW-DAG: sd $5, 8($sp) +; NEW-DAG: sd $6, 16($sp) +; NEW-DAG: sd $7, 24($sp) +; NEW-DAG: sd $8, 32($sp) +; NEW-DAG: sd $9, 40($sp) +; NEW-DAG: sd $10, 48($sp) +; NEW-DAG: sd $11, 56($sp) + +; Get the varargs pointer +; O32 has 4 bytes padding, 4 bytes for the varargs pointer, and should have 8 +; bytes reserved for arguments 1 and 2 (the first float arg) but as discussed in +; arguments-float.ll, GCC doesn't agree with MD00305 and treats floats as 4 +; bytes so we only have 12 bytes total. +; N32/N64 has 8 bytes for the varargs pointer, and no reserved area. +; O32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 12 +; O32-DAG: sw [[VAPTR]], 4($sp) +; N32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N32-DAG: sw [[VAPTR]], 4($sp) +; N64-DAG: daddiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N64-DAG: sd [[VAPTR]], 0($sp) + +; Increment the pointer then get the varargs arg +; LLVM will rebind the load to the stack pointer instead of the varargs pointer +; during lowering. This is fine and doesn't change the behaviour. +; Also, in big-endian mode the offset must be increased by 4 to retrieve the +; correct half of the argument slot. +; +; O32-DAG: addiu [[VAPTR]], [[VAPTR]], 4 +; N32-DAG: addiu [[VAPTR]], [[VAPTR]], 8 +; N64-DAG: daddiu [[VAPTR]], [[VAPTR]], 8 +; O32-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 12($sp) +; NEWLE-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 8($sp) +; NEWBE-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 12($sp) +; ALL-DAG: swc1 [[FTMP1]], 8([[R2]]) + +declare void @llvm.va_start(ptr) +declare void @llvm.va_copy(ptr, ptr) +declare void @llvm.va_end(ptr) diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float.ll new file mode 100644 index 0000000000000..6b7ad03c8e1c2 --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float.ll @@ -0,0 +1,224 @@ +; RUN: llc -mtriple=mips -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s +; RUN: llc -mtriple=mipsel -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,NEW %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,NEW %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,NEW %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,NEW %s + +@bytes = global [11 x i8] zeroinitializer +@dwords = global [11 x i64] zeroinitializer +@floats = global [11 x float] zeroinitializer +@doubles = global [11 x double] zeroinitializer + +define void @double_args(double %a, double %b, double %c, double %d, double %e, + double %f, double %g, double %h, double %i) nounwind { +entry: + %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1 + store volatile double %a, ptr %0 + %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2 + store volatile double %b, ptr %1 + %2 = getelementptr [11 x double], ptr @doubles, i32 0, i32 3 + store volatile double %c, ptr %2 + %3 = getelementptr [11 x double], ptr @doubles, i32 0, i32 4 + store volatile double %d, ptr %3 + %4 = getelementptr [11 x double], ptr @doubles, i32 0, i32 5 + store volatile double %e, ptr %4 + %5 = getelementptr [11 x double], ptr @doubles, i32 0, i32 6 + store volatile double %f, ptr %5 + %6 = getelementptr [11 x double], ptr @doubles, i32 0, i32 7 + store volatile double %g, ptr %6 + %7 = getelementptr [11 x double], ptr @doubles, i32 0, i32 8 + store volatile double %h, ptr %7 + %8 = getelementptr [11 x double], ptr @doubles, i32 0, i32 9 + store volatile double %i, ptr %8 + ret void +} + +; ALL-LABEL: double_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) + +; The first four arguments are the same in O32/N32/N64. +; The first argument is floating point but single-float is enabled so floating +; point registers are not used. +; O32-DAG: sw $4, 8([[R2]]) +; O32-DAG: sw $5, 12([[R2]]) +; NEW-DAG: sd $4, 8([[R2]]) + +; O32-DAG: sw $6, 16([[R2]]) +; O32-DAG: sw $7, 20([[R2]]) +; NEW-DAG: sd $5, 16([[R2]]) + +; O32 has run out of argument registers and starts using the stack +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 16($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 20($sp) +; O32-DAG: sw [[R3]], 24([[R2]]) +; O32-DAG: sw [[R4]], 28([[R2]]) +; NEW-DAG: sd $6, 24([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) +; O32-DAG: sw [[R3]], 32([[R2]]) +; O32-DAG: sw [[R4]], 36([[R2]]) +; NEW-DAG: sd $7, 32([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) +; O32-DAG: sw [[R3]], 40([[R2]]) +; O32-DAG: sw [[R4]], 44([[R2]]) +; NEW-DAG: sd $8, 40([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) +; O32-DAG: sw [[R3]], 48([[R2]]) +; O32-DAG: sw [[R4]], 52([[R2]]) +; NEW-DAG: sd $9, 48([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) +; O32-DAG: sw [[R3]], 56([[R2]]) +; O32-DAG: sw [[R4]], 60([[R2]]) +; NEW-DAG: sd $10, 56([[R2]]) + +; N32/N64 have run out of registers and starts using the stack too +; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp) +; O32-DAG: sw [[R3]], 64([[R2]]) +; O32-DAG: sw [[R4]], 68([[R2]]) +; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp) +; NEW-DAG: sd $11, 64([[R2]]) + +define void @float_args(float %a, float %b, float %c, float %d, float %e, + float %f, float %g, float %h, float %i) nounwind { +entry: + %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1 + store volatile float %a, ptr %0 + %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2 + store volatile float %b, ptr %1 + %2 = getelementptr [11 x float], ptr @floats, i32 0, i32 3 + store volatile float %c, ptr %2 + %3 = getelementptr [11 x float], ptr @floats, i32 0, i32 4 + store volatile float %d, ptr %3 + %4 = getelementptr [11 x float], ptr @floats, i32 0, i32 5 + store volatile float %e, ptr %4 + %5 = getelementptr [11 x float], ptr @floats, i32 0, i32 6 + store volatile float %f, ptr %5 + %6 = getelementptr [11 x float], ptr @floats, i32 0, i32 7 + store volatile float %g, ptr %6 + %7 = getelementptr [11 x float], ptr @floats, i32 0, i32 8 + store volatile float %h, ptr %7 + %8 = getelementptr [11 x float], ptr @floats, i32 0, i32 9 + store volatile float %i, ptr %8 + ret void +} + +; ALL-LABEL: float_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) +; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) + +; The first argument is floating point so floating point registers are used. +; The first argument is the same for O32/N32/N64 but the second argument differs +; by register +; ALL-DAG: swc1 $f12, 4([[R1]]) +; O32-DAG: swc1 $f14, 8([[R1]]) +; NEW-DAG: swc1 $f13, 8([[R1]]) + +; O32 has run out of argument registers and (in theory) starts using the stack +; I've yet to find a reference in the documentation about this but GCC uses up +; the remaining two argument slots in the GPR's first. We'll do the same for +; compatibility. +; O32-DAG: mtc1 $6, $f0 +; O32-DAG: swc1 $f0, 12([[R1]]) +; NEW-DAG: swc1 $f14, 12([[R1]]) +; O32-DAG: mtc1 $7, $f0 +; O32-DAG: swc1 $f0, 16([[R1]]) +; NEW-DAG: swc1 $f15, 16([[R1]]) + +; O32 is definitely out of registers now and switches to the stack. +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 16($sp) +; O32-DAG: swc1 [[F1]], 20([[R1]]) +; NEW-DAG: swc1 $f16, 20([[R1]]) +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 20($sp) +; O32-DAG: swc1 [[F1]], 24([[R1]]) +; NEW-DAG: swc1 $f17, 24([[R1]]) +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 24($sp) +; O32-DAG: swc1 [[F1]], 28([[R1]]) +; NEW-DAG: swc1 $f18, 28([[R1]]) +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 28($sp) +; O32-DAG: swc1 [[F1]], 32([[R1]]) +; NEW-DAG: swc1 $f19, 32([[R1]]) + +; N32/N64 have run out of registers and start using the stack too +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 32($sp) +; O32-DAG: swc1 [[F1]], 36([[R1]]) +; NEW-DAG: lwc1 [[F1:\$f[0-9]+]], 0($sp) +; NEW-DAG: swc1 [[F1]], 36([[R1]]) + + +define void @double_arg2(i8 %a, double %b) nounwind { +entry: + %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1 + store volatile i8 %a, ptr %0 + %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1 + store volatile double %b, ptr %1 + ret void +} + +; ALL-LABEL: double_arg2: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) + +; The first four arguments are the same in O32/N32/N64. +; The first argument isn't floating point so floating point registers are not +; used. +; The second slot is insufficiently aligned for double on O32 so it is skipped. +; Also, double occupies two slots on O32 and only one for N32/N64. +; ALL-DAG: sb $4, 1([[R1]]) +; O32-DAG: sw $6, 8([[R2]]) +; O32-DAG: sw $7, 12([[R2]]) +; NEW-DAG: sd $5, 8([[R2]]) + +define void @float_arg2(i8 %a, float %b) nounwind { +entry: + %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1 + store volatile i8 %a, ptr %0 + %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 1 + store volatile float %b, ptr %1 + ret void +} + +; ALL-LABEL: float_arg2: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) + +; The first argument is the same in O32/N32/N64. +; ALL-DAG: sb $4, 1([[R1]]) + +; The first argument isn't floating point so floating point registers are not +; used in O32, but N32/N64 will still use them. +; MD00305 and GCC disagree on this one. MD00305 says that floats are treated +; as 8-byte aligned and occupy two slots on O32. GCC is treating them as 4-byte +; aligned and occupying one slot. We'll use GCC's definition. +; O32-DAG: mtc1 $5, $f0 +; O32-DAG: swc1 $f0, 4([[R2]]) +; NEW-DAG: swc1 $f13, 4([[R2]]) diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-fp128.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-fp128.ll new file mode 100644 index 0000000000000..9268e37b02fb5 --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-fp128.ll @@ -0,0 +1,42 @@ +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s + +@ldoubles = global [11 x fp128] zeroinitializer + +define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind { +entry: + %0 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 1 + store volatile fp128 %a, ptr %0 + %1 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 2 + store volatile fp128 %b, ptr %1 + %2 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 3 + store volatile fp128 %c, ptr %2 + %3 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 4 + store volatile fp128 %d, ptr %3 + %4 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 5 + store volatile fp128 %e, ptr %4 + ret void +} + +; ALL-LABEL: ldouble_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles) + +; The first four arguments are the same in N32/N64. +; ALL-DAG: sd $5, 24([[R2]]) +; ALL-DAG: sd $4, 16([[R2]]) +; ALL-DAG: sd $7, 40([[R2]]) +; ALL-DAG: sd $6, 32([[R2]]) +; ALL-DAG: sd $9, 56([[R2]]) +; ALL-DAG: sd $8, 48([[R2]]) +; ALL-DAG: sd $11, 72([[R2]]) +; ALL-DAG: sd $10, 64([[R2]]) diff --git a/llvm/test/CodeGen/Mips/cconv/callee-saved-singlefloat.ll b/llvm/test/CodeGen/Mips/cconv/callee-saved-singlefloat.ll new file mode 100644 index 0000000000000..5bf1f2c2d60da --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/callee-saved-singlefloat.ll @@ -0,0 +1,111 @@ +; RUN: llc -mtriple=mips -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,O32 %s +; RUN: llc -mtriple=mipsel -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,O32 %s + +; RUN: llc -mtriple=mips64 -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N32 %s +; RUN: llc -mtriple=mips64el -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N32 %s +; RUN: llc -mtriple=mips64 -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N32-INV %s +; RUN: llc -mtriple=mips64el -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N32-INV %s + +; RUN: llc -mtriple=mips64 -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N64 %s +; RUN: llc -mtriple=mips64el -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N64 %s +; RUN: llc -mtriple=mips64 -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N64-INV %s +; RUN: llc -mtriple=mips64el -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N64-INV %s + +define void @fpu_clobber() nounwind { +entry: + call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f13},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() + ret void +} + +; ALL-LABEL: fpu_clobber: +; ALL-INV-NOT: swc1 $f0, +; ALL-INV-NOT: swc1 $f1, +; ALL-INV-NOT: swc1 $f2, +; ALL-INV-NOT: swc1 $f3, +; ALL-INV-NOT: swc1 $f4, +; ALL-INV-NOT: swc1 $f5, +; ALL-INV-NOT: swc1 $f6, +; ALL-INV-NOT: swc1 $f7, +; ALL-INV-NOT: swc1 $f8, +; ALL-INV-NOT: swc1 $f9, +; ALL-INV-NOT: swc1 $f10, +; ALL-INV-NOT: swc1 $f11, +; ALL-INV-NOT: swc1 $f12, +; ALL-INV-NOT: swc1 $f13, +; ALL-INV-NOT: swc1 $f14, +; ALL-INV-NOT: swc1 $f15, +; ALL-INV-NOT: swc1 $f16, +; ALL-INV-NOT: swc1 $f17, +; ALL-INV-NOT: swc1 $f18, +; ALL-INV-NOT: swc1 $f19, + +; O32: addiu $sp, $sp, -48 +; O32-DAG: swc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp) +; O32-DAG: swc1 [[F21:\$f21]], [[OFF21:[0-9]+]]($sp) +; O32-DAG: swc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp) +; O32-DAG: swc1 [[F23:\$f23]], [[OFF23:[0-9]+]]($sp) +; O32-DAG: swc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp) +; O32-DAG: swc1 [[F25:\$f25]], [[OFF25:[0-9]+]]($sp) +; O32-DAG: swc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp) +; O32-DAG: swc1 [[F27:\$f27]], [[OFF27:[0-9]+]]($sp) +; O32-DAG: swc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp) +; O32-DAG: swc1 [[F29:\$f29]], [[OFF29:[0-9]+]]($sp) +; O32-DAG: swc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp) +; O32-DAG: swc1 [[F31:\$f31]], [[OFF31:[0-9]+]]($sp) +; O32-DAG: lwc1 [[F20]], [[OFF20]]($sp) +; O32-DAG: lwc1 [[F21]], [[OFF21]]($sp) +; O32-DAG: lwc1 [[F22]], [[OFF22]]($sp) +; O32-DAG: lwc1 [[F23]], [[OFF23]]($sp) +; O32-DAG: lwc1 [[F24]], [[OFF24]]($sp) +; O32-DAG: lwc1 [[F25]], [[OFF25]]($sp) +; O32-DAG: lwc1 [[F26]], [[OFF26]]($sp) +; O32-DAG: lwc1 [[F27]], [[OFF27]]($sp) +; O32-DAG: lwc1 [[F28]], [[OFF28]]($sp) +; O32-DAG: lwc1 [[F29]], [[OFF29]]($sp) +; O32-DAG: lwc1 [[F30]], [[OFF30]]($sp) +; O32-DAG: lwc1 [[F31]], [[OFF31]]($sp) +; O32: addiu $sp, $sp, 48 + +; N32: addiu $sp, $sp, -32 +; N32-DAG: swc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f21, +; N32-DAG: swc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f23, +; N32-DAG: swc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f25, +; N32-DAG: swc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f27, +; N32-DAG: swc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f29, +; N32-DAG: swc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f31, +; N32-DAG: lwc1 [[F20]], [[OFF20]]($sp) +; N32-DAG: lwc1 [[F22]], [[OFF22]]($sp) +; N32-DAG: lwc1 [[F24]], [[OFF24]]($sp) +; N32-DAG: lwc1 [[F26]], [[OFF26]]($sp) +; N32-DAG: lwc1 [[F28]], [[OFF28]]($sp) +; N32-DAG: lwc1 [[F30]], [[OFF30]]($sp) +; N32: addiu $sp, $sp, 32 + +; N64: addiu $sp, $sp, -32 +; N64-INV-NOT: swc1 $f20, +; N64-INV-NOT: swc1 $f21, +; N64-INV-NOT: swc1 $f22, +; N64-INV-NOT: swc1 $f23, +; N64-DAG: swc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp) +; N64-DAG: swc1 [[F25:\$f25]], [[OFF25:[0-9]+]]($sp) +; N64-DAG: swc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp) +; N64-DAG: swc1 [[F27:\$f27]], [[OFF27:[0-9]+]]($sp) +; N64-DAG: swc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp) +; N64-DAG: swc1 [[F29:\$f29]], [[OFF29:[0-9]+]]($sp) +; N64-DAG: swc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp) +; N64-DAG: swc1 [[F31:\$f31]], [[OFF31:[0-9]+]]($sp) +; N64-DAG: lwc1 [[F24]], [[OFF24]]($sp) +; N64-DAG: lwc1 [[F25]], [[OFF25]]($sp) +; N64-DAG: lwc1 [[F26]], [[OFF26]]($sp) +; N64-DAG: lwc1 [[F27]], [[OFF27]]($sp) +; N64-DAG: lwc1 [[F28]], [[OFF28]]($sp) +; N64-DAG: lwc1 [[F29]], [[OFF29]]($sp) +; N64-DAG: lwc1 [[F30]], [[OFF30]]($sp) +; N64-DAG: lwc1 [[F31]], [[OFF31]]($sp) +; N64: addiu $sp, $sp, 32 \ No newline at end of file diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-single-float.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-single-float.ll new file mode 100644 index 0000000000000..1abf08d8200fb --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-single-float.ll @@ -0,0 +1,43 @@ +; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,O32 %s +; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,O32 %s + +; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n32 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N32 %s +; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n32 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N32 %s + +; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n64 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N64 %s +; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n64 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N64 %s + +@float = global float zeroinitializer +@double = global double zeroinitializer + +define float @retfloat() nounwind { +entry: + %0 = load volatile float, ptr @float + ret float %0 +} + +; ALL-LABEL: retfloat: +; O32-DAG: lui [[R1:\$[0-9]+]], %hi(float) +; O32-DAG: lwc1 $f0, %lo(float)([[R1]]) +; N32-DAG: lui [[R1:\$[0-9]+]], %hi(float) +; N32-DAG: lwc1 $f0, %lo(float)([[R1]]) +; N64-DAG: lwc1 $f0, %lo(float)([[R1:\$[0-9+]]]) + +define double @retdouble() nounwind { +entry: + %0 = load volatile double, ptr @double + ret double %0 +} + +; ALL-LABEL: retdouble: +; O32-DAG: lw $2, %lo(double)([[R1:\$[0-9]+]]) +; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], %lo(double) +; O32-DAG: lw $3, 4([[R2]]) +; N32-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]]) +; N64-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]]) diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-single-fp128.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-single-fp128.ll new file mode 100644 index 0000000000000..e4d04146ecc2f --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-single-fp128.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s + +@fp128 = global fp128 zeroinitializer + +define fp128 @retldouble() nounwind { +entry: + %0 = load volatile fp128, ptr @fp128 + ret fp128 %0 +} + +; ALL-LABEL: retldouble: +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(fp128) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(fp128) + +; ALL-DAG: ld $2, %lo(fp128)([[R2]]) +; ALL-DAG: ld $3, 8([[R2]]) diff --git a/llvm/test/CodeGen/Mips/inlineasm-constraints-singlefloat.ll b/llvm/test/CodeGen/Mips/inlineasm-constraints-singlefloat.ll new file mode 100644 index 0000000000000..ddebddcdab260 --- /dev/null +++ b/llvm/test/CodeGen/Mips/inlineasm-constraints-singlefloat.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=mips -mattr=+single-float < %s | FileCheck %s --check-prefix=MIPS32 +; RUN: llc -mtriple=mips64 -mattr=+single-float < %s | FileCheck %s --check-prefix=MIPS64 + +define void @read_double(ptr %0) { +; MIPS32-LABEL: read_double: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 4($4) +; MIPS32-NEXT: lw $3, 0($4) +; MIPS32-NEXT: #APP +; MIPS32-NEXT: #NO_APP +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +; +; MIPS64-LABEL: read_double: +; MIPS64: # %bb.0: +; MIPS64-NEXT: ld $2, 0($4) +; MIPS64-NEXT: #APP +; MIPS64-NEXT: #NO_APP +; MIPS64-NEXT: jr $ra +; MIPS64-NEXT: nop + %2 = load double, ptr %0, align 8 + tail call void asm sideeffect "", "r,~{$1}"(double %2) + ret void +} + +define void @read_float(ptr %0) { +; MIPS32-LABEL: read_float: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f0, 0($4) +; MIPS32-NEXT: #APP +; MIPS32-NEXT: #NO_APP +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +; +; MIPS64-LABEL: read_float: +; MIPS64: # %bb.0: +; MIPS64-NEXT: lwc1 $f0, 0($4) +; MIPS64-NEXT: #APP +; MIPS64-NEXT: #NO_APP +; MIPS64-NEXT: jr $ra +; MIPS64-NEXT: nop + %2 = load float, ptr %0, align 8 + tail call void asm sideeffect "", "f"(float %2) + ret void +} + +; Test that a proper register class is assigned to clobbers in single-float mode +define float @explicit_float_register_clobber(ptr %0) { +; MIPS32-LABEL: explicit_float_register_clobber: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f1, 0($4) +; MIPS32-NEXT: #APP +; MIPS32-NEXT: #NO_APP +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: mov.s $f0, $f1 +; +; MIPS64-LABEL: explicit_float_register_clobber: +; MIPS64: # %bb.0: +; MIPS64-NEXT: lwc1 $f1, 0($4) +; MIPS64-NEXT: #APP +; MIPS64-NEXT: #NO_APP +; MIPS64-NEXT: jr $ra +; MIPS64-NEXT: mov.s $f0, $f1 + %2 = load float, ptr %0, align 8 + tail call void asm sideeffect "", "~{$f0}"() + ret float %2 +} diff --git a/llvm/test/CodeGen/Mips/int-to-float-conversion.ll b/llvm/test/CodeGen/Mips/int-to-float-conversion.ll index 84bc6a253595a..1c8ad9ad07e15 100644 --- a/llvm/test/CodeGen/Mips/int-to-float-conversion.ll +++ b/llvm/test/CodeGen/Mips/int-to-float-conversion.ll @@ -1,13 +1,24 @@ -; RUN: llc -mtriple=mipsel < %s | FileCheck %s -check-prefix=32 -; RUN: llc -mtriple=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=64 -; RUN: llc -mtriple=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64 +; RUN: llc -mtriple=mipsel < %s | FileCheck %s -check-prefixes=ALL,32,32DF +; RUN: llc -mtriple=mipsel -mattr=+single-float < %s | FileCheck %s -check-prefixes=ALL,32,32SF + +; RUN: llc -mtriple=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefixes=ALL,64,64DF +; RUN: llc -mtriple=mips64el -mcpu=mips4 -mattr=+single-float < %s \ +; RUN: | FileCheck %s -check-prefixes=ALL,64,64SF + +; RUN: llc -mtriple=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefixes=ALL,64,64DF +; RUN: llc -mtriple=mips64el -mcpu=mips64 -mattr=+single-float < %s \ +; RUN: | FileCheck %s -check-prefixes=ALL,64,64SF + +; Test various combinations of 32/64bit GP registers and single/double floating point support. @i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4 @i3 = common global ptr null, align 4 -; 32-LABEL: test_float_int_: -; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 32: cvt.s.w $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_float_int_: +; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 32: cvt.s.w $f{{[0-9]+}}, $f[[R0]] +; 64: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64: cvt.s.w $f{{[0-9]+}}, $f[[R0]] define float @test_float_int_(i32 %a) { entry: @@ -15,12 +26,13 @@ entry: ret float %conv } -; 32-LABEL: test_double_int_: -; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 32: cvt.d.w $f{{[0-9]+}}, $f[[R0]] -; 64-LABEL: test_double_int_: -; 64: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 64: cvt.d.w $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_double_int_: +; 32DF: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 32DF: cvt.d.w $f{{[0-9]+}}, $f[[R0]] +; 32SF: jal __floatsidf +; 64DF: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64DF: cvt.d.w $f{{[0-9]+}}, $f[[R0]] +; 64SF: jal __floatsidf define double @test_double_int_(i32 %a) { entry: @@ -28,9 +40,11 @@ entry: ret double %conv } -; 64-LABEL: test_float_LL_: -; 64: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 64: cvt.s.l $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_float_LL_: +; 32: jal __floatdisf +; 64DF: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64DF: cvt.s.l $f{{[0-9]+}}, $f[[R0]] +; 64SF: jal __floatdisf define float @test_float_LL_(i64 %a) { entry: @@ -38,9 +52,11 @@ entry: ret float %conv } -; 64-LABEL: test_double_LL_: -; 64: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 64: cvt.d.l $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_double_LL_: +; 32: jal __floatdidf +; 64DF: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64DF: cvt.d.l $f{{[0-9]+}}, $f[[R0]] +; 64SF: jal __floatdidf define double @test_double_LL_(i64 %a) { entry: diff --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll index 7c9f375cbb9ad..40d36fbb6fe76 100644 --- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll +++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll @@ -97,7 +97,6 @@ entry: ; ALL: lw $[[R0:[0-9]+]], %got(v4f32)( ; ALL: ld.w $w12, 0($[[R0]]) ; ALL: move.v $w[[W0:13]], $w12 -; NOODDSPREG: move.v $w[[W0:12]], $w13 ; ALL: teqi $zero, 1 ; ALL-NOT: st.w ; ALL-NOT: ld.w diff --git a/llvm/test/CodeGen/NVPTX/bug22322.ll b/llvm/test/CodeGen/NVPTX/bug22322.ll index 055c512401b4c..71e180b39fcf9 100644 --- a/llvm/test/CodeGen/NVPTX/bug22322.ll +++ b/llvm/test/CodeGen/NVPTX/bug22322.ll @@ -20,12 +20,12 @@ _ZL11compute_vecRK6float3jb.exit: call void @llvm.lifetime.start.p0(i64 4, ptr %ret_vec.sroa.8.i) %6 = and i32 %4, 15 %7 = icmp eq i32 %6, 0 - %8 = select i1 %7, float 0.000000e+00, float -1.000000e+00 + %8 = select nnan nsz i1 %7, float 0.000000e+00, float -1.000000e+00 store float %8, ptr %ret_vec.sroa.8.i, align 4 ; CHECK: max.f32 %r{{[0-9]+}}, %r{{[0-9]+}}, 0f00000000 %9 = fcmp olt float %8, 0.000000e+00 %ret_vec.sroa.8.i.val = load float, ptr %ret_vec.sroa.8.i, align 4 - %10 = select i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val + %10 = select nnan nsz i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val call void @llvm.lifetime.end.p0(i64 4, ptr %ret_vec.sroa.8.i) %11 = getelementptr inbounds %class.float3, ptr %dst, i64 %5, i32 0 store float 0.000000e+00, ptr %11, align 4 @@ -51,7 +51,7 @@ declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 ; Function Attrs: nounwind declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "no-signed-zeros-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/NVPTX/combine-wide.ll b/llvm/test/CodeGen/NVPTX/combine-wide.ll index b5948d37c3505..63e0f3789f49f 100644 --- a/llvm/test/CodeGen/NVPTX/combine-wide.ll +++ b/llvm/test/CodeGen/NVPTX/combine-wide.ll @@ -1,24 +1,37 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -O1 | FileCheck %s --check-prefixes=CHECK,O1 +; RUN: llc < %s -O1 | FileCheck %s --check-prefixes=CHECK,O1,O1-NO-MAD +; RUN: llc < %s -O1 -nvptx-mad-wide-opt | FileCheck %s --check-prefixes=CHECK,O1,O1-MAD ; RUN: llc < %s -O0 | FileCheck %s --check-prefixes=CHECK,O0 target triple = "nvptx64-nvidia-cuda" define i64 @t1(i32 %a, i32 %b, i64 %c) { -; -; O1-LABEL: t1( -; O1: { -; O1-NEXT: .reg .b32 %r<3>; -; O1-NEXT: .reg .b64 %rd<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t1_param_0]; -; O1-NEXT: ld.param.b32 %r2, [t1_param_1]; -; O1-NEXT: mul.wide.s32 %rd1, %r1, %r2; -; O1-NEXT: ld.param.b64 %rd2, [t1_param_2]; -; O1-NEXT: add.s64 %rd3, %rd2, %rd1; -; O1-NEXT: st.param.b64 [func_retval0], %rd3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t1( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t1_param_0]; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t1_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s32 %rd1, %r1, %r2; +; O1-NO-MAD-NEXT: ld.param.b64 %rd2, [t1_param_2]; +; O1-NO-MAD-NEXT: add.s64 %rd3, %rd2, %rd1; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t1( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-NEXT: .reg .b64 %rd<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t1_param_0]; +; O1-MAD-NEXT: ld.param.b32 %r2, [t1_param_1]; +; O1-MAD-NEXT: ld.param.b64 %rd1, [t1_param_2]; +; O1-MAD-NEXT: mad.wide.s32 %rd2, %r1, %r2, %rd1; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t1( ; O0: { @@ -41,20 +54,32 @@ define i64 @t1(i32 %a, i32 %b, i64 %c) { } define i64 @t2(i32 %a, i32 %b, i64 %c) { -; -; O1-LABEL: t2( -; O1: { -; O1-NEXT: .reg .b32 %r<3>; -; O1-NEXT: .reg .b64 %rd<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t2_param_0]; -; O1-NEXT: ld.param.b32 %r2, [t2_param_1]; -; O1-NEXT: mul.wide.s32 %rd1, %r1, %r2; -; O1-NEXT: ld.param.b64 %rd2, [t2_param_2]; -; O1-NEXT: add.s64 %rd3, %rd1, %rd2; -; O1-NEXT: st.param.b64 [func_retval0], %rd3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t2( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t2_param_0]; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t2_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s32 %rd1, %r1, %r2; +; O1-NO-MAD-NEXT: ld.param.b64 %rd2, [t2_param_2]; +; O1-NO-MAD-NEXT: add.s64 %rd3, %rd1, %rd2; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t2( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-NEXT: .reg .b64 %rd<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t2_param_0]; +; O1-MAD-NEXT: ld.param.b32 %r2, [t2_param_1]; +; O1-MAD-NEXT: ld.param.b64 %rd1, [t2_param_2]; +; O1-MAD-NEXT: mad.wide.s32 %rd2, %r1, %r2, %rd1; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t2( ; O0: { @@ -77,19 +102,30 @@ define i64 @t2(i32 %a, i32 %b, i64 %c) { } define i64 @t3(i32 %a, i32 %b) { -; -; O1-LABEL: t3( -; O1: { -; O1-NEXT: .reg .b32 %r<3>; -; O1-NEXT: .reg .b64 %rd<3>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t3_param_0]; -; O1-NEXT: ld.param.b32 %r2, [t3_param_1]; -; O1-NEXT: mul.wide.s32 %rd1, %r1, %r2; -; O1-NEXT: add.s64 %rd2, %rd1, 1; -; O1-NEXT: st.param.b64 [func_retval0], %rd2; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t3( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<3>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t3_param_0]; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t3_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s32 %rd1, %r1, %r2; +; O1-NO-MAD-NEXT: add.s64 %rd2, %rd1, 1; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t3( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-NEXT: .reg .b64 %rd<2>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t3_param_0]; +; O1-MAD-NEXT: ld.param.b32 %r2, [t3_param_1]; +; O1-MAD-NEXT: mad.wide.s32 %rd1, %r1, %r2, 1; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd1; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t3( ; O0: { @@ -111,19 +147,30 @@ define i64 @t3(i32 %a, i32 %b) { } define i64 @t4(i32 %a, i64 %c) { -; -; O1-LABEL: t4( -; O1: { -; O1-NEXT: .reg .b32 %r<2>; -; O1-NEXT: .reg .b64 %rd<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t4_param_0]; -; O1-NEXT: ld.param.b64 %rd1, [t4_param_1]; -; O1-NEXT: mul.wide.s32 %rd2, %r1, 3; -; O1-NEXT: add.s64 %rd3, %rd1, %rd2; -; O1-NEXT: st.param.b64 [func_retval0], %rd3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t4( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<2>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t4_param_0]; +; O1-NO-MAD-NEXT: ld.param.b64 %rd1, [t4_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s32 %rd2, %r1, 3; +; O1-NO-MAD-NEXT: add.s64 %rd3, %rd1, %rd2; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t4( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<2>; +; O1-MAD-NEXT: .reg .b64 %rd<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t4_param_0]; +; O1-MAD-NEXT: ld.param.b64 %rd1, [t4_param_1]; +; O1-MAD-NEXT: mad.wide.s32 %rd2, %r1, 3, %rd1; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t4( ; O0: { @@ -145,18 +192,28 @@ define i64 @t4(i32 %a, i64 %c) { } define i64 @t4_1(i32 %a, i64 %c) { -; -; O1-LABEL: t4_1( -; O1: { -; O1-NEXT: .reg .b32 %r<2>; -; O1-NEXT: .reg .b64 %rd<3>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t4_1_param_0]; -; O1-NEXT: mul.wide.s32 %rd1, %r1, 3; -; O1-NEXT: add.s64 %rd2, %rd1, 5; -; O1-NEXT: st.param.b64 [func_retval0], %rd2; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t4_1( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<2>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<3>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t4_1_param_0]; +; O1-NO-MAD-NEXT: mul.wide.s32 %rd1, %r1, 3; +; O1-NO-MAD-NEXT: add.s64 %rd2, %rd1, 5; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t4_1( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<2>; +; O1-MAD-NEXT: .reg .b64 %rd<2>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t4_1_param_0]; +; O1-MAD-NEXT: mad.wide.s32 %rd1, %r1, 3, 5; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd1; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t4_1( ; O0: { @@ -177,20 +234,32 @@ define i64 @t4_1(i32 %a, i64 %c) { } define i64 @t5(i32 %a, i32 %b, i64 %c) { -; -; O1-LABEL: t5( -; O1: { -; O1-NEXT: .reg .b32 %r<3>; -; O1-NEXT: .reg .b64 %rd<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t5_param_0]; -; O1-NEXT: ld.param.b32 %r2, [t5_param_1]; -; O1-NEXT: mul.wide.u32 %rd1, %r1, %r2; -; O1-NEXT: ld.param.b64 %rd2, [t5_param_2]; -; O1-NEXT: add.s64 %rd3, %rd2, %rd1; -; O1-NEXT: st.param.b64 [func_retval0], %rd3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t5( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t5_param_0]; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t5_param_1]; +; O1-NO-MAD-NEXT: mul.wide.u32 %rd1, %r1, %r2; +; O1-NO-MAD-NEXT: ld.param.b64 %rd2, [t5_param_2]; +; O1-NO-MAD-NEXT: add.s64 %rd3, %rd2, %rd1; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t5( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-NEXT: .reg .b64 %rd<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t5_param_0]; +; O1-MAD-NEXT: ld.param.b32 %r2, [t5_param_1]; +; O1-MAD-NEXT: ld.param.b64 %rd1, [t5_param_2]; +; O1-MAD-NEXT: mad.wide.u32 %rd2, %r1, %r2, %rd1; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t5( ; O0: { @@ -213,20 +282,32 @@ define i64 @t5(i32 %a, i32 %b, i64 %c) { } define i64 @t6(i32 %a, i32 %b, i64 %c) { -; -; O1-LABEL: t6( -; O1: { -; O1-NEXT: .reg .b32 %r<3>; -; O1-NEXT: .reg .b64 %rd<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b32 %r1, [t6_param_0]; -; O1-NEXT: ld.param.b32 %r2, [t6_param_1]; -; O1-NEXT: mul.wide.u32 %rd1, %r1, %r2; -; O1-NEXT: ld.param.b64 %rd2, [t6_param_2]; -; O1-NEXT: add.s64 %rd3, %rd1, %rd2; -; O1-NEXT: st.param.b64 [func_retval0], %rd3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t6( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-NEXT: .reg .b64 %rd<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t6_param_0]; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t6_param_1]; +; O1-NO-MAD-NEXT: mul.wide.u32 %rd1, %r1, %r2; +; O1-NO-MAD-NEXT: ld.param.b64 %rd2, [t6_param_2]; +; O1-NO-MAD-NEXT: add.s64 %rd3, %rd1, %rd2; +; O1-NO-MAD-NEXT: st.param.b64 [func_retval0], %rd3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t6( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-NEXT: .reg .b64 %rd<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b32 %r1, [t6_param_0]; +; O1-MAD-NEXT: ld.param.b32 %r2, [t6_param_1]; +; O1-MAD-NEXT: ld.param.b64 %rd1, [t6_param_2]; +; O1-MAD-NEXT: mad.wide.u32 %rd2, %r1, %r2, %rd1; +; O1-MAD-NEXT: st.param.b64 [func_retval0], %rd2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t6( ; O0: { @@ -249,7 +330,6 @@ define i64 @t6(i32 %a, i32 %b, i64 %c) { } define i32 @t7(i16 %a, i16 %b) { -; ; O1-LABEL: t7( ; O1: { ; O1-NEXT: .reg .b16 %rs<4>; @@ -281,7 +361,6 @@ define i32 @t7(i16 %a, i16 %b) { } define i32 @t8(i16 %a, i16 %b) { -; ; O1-LABEL: t8( ; O1: { ; O1-NEXT: .reg .b16 %rs<4>; @@ -313,7 +392,6 @@ define i32 @t8(i16 %a, i16 %b) { } define i64 @t9(i32 %a, i32 %b) { -; ; O1-LABEL: t9( ; O1: { ; O1-NEXT: .reg .b32 %r<4>; @@ -345,7 +423,6 @@ define i64 @t9(i32 %a, i32 %b) { } define i64 @t10(i32 %a, i32 %b) { -; ; O1-LABEL: t10( ; O1: { ; O1-NEXT: .reg .b32 %r<4>; @@ -377,7 +454,6 @@ define i64 @t10(i32 %a, i32 %b) { } define i32 @t11(i16 %a, i16 %b) { -; ; O1-LABEL: t11( ; O1: { ; O1-NEXT: .reg .b16 %rs<4>; @@ -409,7 +485,6 @@ define i32 @t11(i16 %a, i16 %b) { } define i32 @t12(i16 %a, i16 %b) { -; ; O1-LABEL: t12( ; O1: { ; O1-NEXT: .reg .b16 %rs<3>; @@ -440,7 +515,6 @@ define i32 @t12(i16 %a, i16 %b) { } define i64 @t13(i32 %a, i32 %b) { -; ; O1-LABEL: t13( ; O1: { ; O1-NEXT: .reg .b32 %r<4>; @@ -472,7 +546,6 @@ define i64 @t13(i32 %a, i32 %b) { } define i64 @t14(i32 %a, i32 %b) { -; ; O1-LABEL: t14( ; O1: { ; O1-NEXT: .reg .b32 %r<3>; @@ -503,7 +576,6 @@ define i64 @t14(i32 %a, i32 %b) { } define i32 @t15(i16 %a, i16 %b) { -; ; O1-LABEL: t15( ; O1: { ; O1-NEXT: .reg .b16 %rs<3>; @@ -534,7 +606,6 @@ define i32 @t15(i16 %a, i16 %b) { } define i32 @t16(i16 %a, i16 %b) { -; ; O1-LABEL: t16( ; O1: { ; O1-NEXT: .reg .b16 %rs<4>; @@ -566,7 +637,6 @@ define i32 @t16(i16 %a, i16 %b) { } define i64 @t17(i32 %a, i32 %b) { -; ; O1-LABEL: t17( ; O1: { ; O1-NEXT: .reg .b32 %r<3>; @@ -597,7 +667,6 @@ define i64 @t17(i32 %a, i32 %b) { } define i64 @t18(i32 %a, i32 %b) { -; ; O1-LABEL: t18( ; O1: { ; O1-NEXT: .reg .b32 %r<4>; @@ -629,7 +698,6 @@ define i64 @t18(i32 %a, i32 %b) { } define i32 @t19(i16 %a, i16 %b) { -; ; O1-LABEL: t19( ; O1: { ; O1-NEXT: .reg .b16 %rs<4>; @@ -661,7 +729,6 @@ define i32 @t19(i16 %a, i16 %b) { } define i32 @t20(i16 %a) { -; ; CHECK-LABEL: t20( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<3>; @@ -679,7 +746,6 @@ define i32 @t20(i16 %a) { } define i64 @t21(i32 %a) { -; ; CHECK-LABEL: t21( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<3>; @@ -697,7 +763,6 @@ define i64 @t21(i32 %a) { } define i64 @t22(i32 %a) { -; ; CHECK-LABEL: t22( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<3>; @@ -715,7 +780,6 @@ define i64 @t22(i32 %a) { } define i32 @t23(i16 %a, i16 %b) { -; ; CHECK-LABEL: t23( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<3>; @@ -733,7 +797,6 @@ define i32 @t23(i16 %a, i16 %b) { } define i32 @t24(i16 %a, i16 %b) { -; ; O1-LABEL: t24( ; O1: { ; O1-NEXT: .reg .b16 %rs<2>; @@ -762,7 +825,6 @@ define i32 @t24(i16 %a, i16 %b) { } define i64 @t25(i32 %a) { -; ; CHECK-LABEL: t25( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<3>; @@ -780,7 +842,6 @@ define i64 @t25(i32 %a) { } define i64 @t26(i32 %a) { -; ; O1-LABEL: t26( ; O1: { ; O1-NEXT: .reg .b32 %r<2>; @@ -809,7 +870,6 @@ define i64 @t26(i32 %a) { } define i32 @t27(i16 %a, i16 %b) { -; ; O1-LABEL: t27( ; O1: { ; O1-NEXT: .reg .b16 %rs<2>; @@ -838,7 +898,6 @@ define i32 @t27(i16 %a, i16 %b) { } define i32 @t28(i16 %a, i16 %b) { -; ; CHECK-LABEL: t28( ; CHECK: { ; CHECK-NEXT: .reg .b16 %rs<3>; @@ -856,7 +915,6 @@ define i32 @t28(i16 %a, i16 %b) { } define i64 @t29(i32 %a) { -; ; O1-LABEL: t29( ; O1: { ; O1-NEXT: .reg .b32 %r<2>; @@ -885,7 +943,6 @@ define i64 @t29(i32 %a) { } define i64 @t30(i32 %a) { -; ; CHECK-LABEL: t30( ; CHECK: { ; CHECK-NEXT: .reg .b32 %r<3>; @@ -903,7 +960,6 @@ define i64 @t30(i32 %a) { } define i64 @t31(i32 %a, i32 %b) { -; ; O1-LABEL: t31( ; O1: { ; O1-NEXT: .reg .b32 %r<4>; @@ -935,20 +991,32 @@ define i64 @t31(i32 %a, i32 %b) { } define i32 @t32(i16 %a, i16 %b, i32 %c) { -; -; O1-LABEL: t32( -; O1: { -; O1-NEXT: .reg .b16 %rs<3>; -; O1-NEXT: .reg .b32 %r<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t32_param_0]; -; O1-NEXT: ld.param.b16 %rs2, [t32_param_1]; -; O1-NEXT: mul.wide.s16 %r1, %rs1, %rs2; -; O1-NEXT: ld.param.b32 %r2, [t32_param_2]; -; O1-NEXT: add.s32 %r3, %r2, %r1; -; O1-NEXT: st.param.b32 [func_retval0], %r3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t32( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<3>; +; O1-NO-MAD-NEXT: .reg .b32 %r<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t32_param_0]; +; O1-NO-MAD-NEXT: ld.param.b16 %rs2, [t32_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s16 %r1, %rs1, %rs2; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t32_param_2]; +; O1-NO-MAD-NEXT: add.s32 %r3, %r2, %r1; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t32( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<3>; +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t32_param_0]; +; O1-MAD-NEXT: ld.param.b16 %rs2, [t32_param_1]; +; O1-MAD-NEXT: ld.param.b32 %r1, [t32_param_2]; +; O1-MAD-NEXT: mad.wide.s16 %r2, %rs1, %rs2, %r1; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t32( ; O0: { @@ -971,20 +1039,32 @@ define i32 @t32(i16 %a, i16 %b, i32 %c) { } define i32 @t33(i16 %a, i16 %b, i32 %c) { -; -; O1-LABEL: t33( -; O1: { -; O1-NEXT: .reg .b16 %rs<3>; -; O1-NEXT: .reg .b32 %r<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t33_param_0]; -; O1-NEXT: ld.param.b16 %rs2, [t33_param_1]; -; O1-NEXT: mul.wide.s16 %r1, %rs1, %rs2; -; O1-NEXT: ld.param.b32 %r2, [t33_param_2]; -; O1-NEXT: add.s32 %r3, %r2, %r1; -; O1-NEXT: st.param.b32 [func_retval0], %r3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t33( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<3>; +; O1-NO-MAD-NEXT: .reg .b32 %r<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t33_param_0]; +; O1-NO-MAD-NEXT: ld.param.b16 %rs2, [t33_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s16 %r1, %rs1, %rs2; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t33_param_2]; +; O1-NO-MAD-NEXT: add.s32 %r3, %r2, %r1; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t33( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<3>; +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t33_param_0]; +; O1-MAD-NEXT: ld.param.b16 %rs2, [t33_param_1]; +; O1-MAD-NEXT: ld.param.b32 %r1, [t33_param_2]; +; O1-MAD-NEXT: mad.wide.s16 %r2, %rs1, %rs2, %r1; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t33( ; O0: { @@ -1007,19 +1087,30 @@ define i32 @t33(i16 %a, i16 %b, i32 %c) { } define i32 @t34(i16 %a, i16 %b) { -; -; O1-LABEL: t34( -; O1: { -; O1-NEXT: .reg .b16 %rs<3>; -; O1-NEXT: .reg .b32 %r<3>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t34_param_0]; -; O1-NEXT: ld.param.b16 %rs2, [t34_param_1]; -; O1-NEXT: mul.wide.s16 %r1, %rs1, %rs2; -; O1-NEXT: add.s32 %r2, %r1, 1; -; O1-NEXT: st.param.b32 [func_retval0], %r2; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t34( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<3>; +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t34_param_0]; +; O1-NO-MAD-NEXT: ld.param.b16 %rs2, [t34_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s16 %r1, %rs1, %rs2; +; O1-NO-MAD-NEXT: add.s32 %r2, %r1, 1; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t34( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<3>; +; O1-MAD-NEXT: .reg .b32 %r<2>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t34_param_0]; +; O1-MAD-NEXT: ld.param.b16 %rs2, [t34_param_1]; +; O1-MAD-NEXT: mad.wide.s16 %r1, %rs1, %rs2, 1; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r1; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t34( ; O0: { @@ -1041,19 +1132,30 @@ define i32 @t34(i16 %a, i16 %b) { } define i32 @t35(i16 %a, i32 %c) { -; -; O1-LABEL: t35( -; O1: { -; O1-NEXT: .reg .b16 %rs<2>; -; O1-NEXT: .reg .b32 %r<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t35_param_0]; -; O1-NEXT: ld.param.b32 %r1, [t35_param_1]; -; O1-NEXT: mul.wide.s16 %r2, %rs1, 3; -; O1-NEXT: add.s32 %r3, %r1, %r2; -; O1-NEXT: st.param.b32 [func_retval0], %r3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t35( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<2>; +; O1-NO-MAD-NEXT: .reg .b32 %r<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t35_param_0]; +; O1-NO-MAD-NEXT: ld.param.b32 %r1, [t35_param_1]; +; O1-NO-MAD-NEXT: mul.wide.s16 %r2, %rs1, 3; +; O1-NO-MAD-NEXT: add.s32 %r3, %r1, %r2; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t35( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<2>; +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t35_param_0]; +; O1-MAD-NEXT: ld.param.b32 %r1, [t35_param_1]; +; O1-MAD-NEXT: mad.wide.s16 %r2, %rs1, 3, %r1; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t35( ; O0: { @@ -1075,18 +1177,28 @@ define i32 @t35(i16 %a, i32 %c) { } define i32 @t36(i16 %a, i32 %c) { -; -; O1-LABEL: t36( -; O1: { -; O1-NEXT: .reg .b16 %rs<2>; -; O1-NEXT: .reg .b32 %r<3>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t36_param_0]; -; O1-NEXT: mul.wide.s16 %r1, %rs1, 3; -; O1-NEXT: add.s32 %r2, %r1, 5; -; O1-NEXT: st.param.b32 [func_retval0], %r2; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t36( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<2>; +; O1-NO-MAD-NEXT: .reg .b32 %r<3>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t36_param_0]; +; O1-NO-MAD-NEXT: mul.wide.s16 %r1, %rs1, 3; +; O1-NO-MAD-NEXT: add.s32 %r2, %r1, 5; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t36( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<2>; +; O1-MAD-NEXT: .reg .b32 %r<2>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t36_param_0]; +; O1-MAD-NEXT: mad.wide.s16 %r1, %rs1, 3, 5; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r1; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t36( ; O0: { @@ -1107,20 +1219,32 @@ define i32 @t36(i16 %a, i32 %c) { } define i32 @t37(i16 %a, i16 %b, i32 %c) { -; -; O1-LABEL: t37( -; O1: { -; O1-NEXT: .reg .b16 %rs<3>; -; O1-NEXT: .reg .b32 %r<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t37_param_0]; -; O1-NEXT: ld.param.b16 %rs2, [t37_param_1]; -; O1-NEXT: mul.wide.u16 %r1, %rs1, %rs2; -; O1-NEXT: ld.param.b32 %r2, [t37_param_2]; -; O1-NEXT: add.s32 %r3, %r2, %r1; -; O1-NEXT: st.param.b32 [func_retval0], %r3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t37( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<3>; +; O1-NO-MAD-NEXT: .reg .b32 %r<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t37_param_0]; +; O1-NO-MAD-NEXT: ld.param.b16 %rs2, [t37_param_1]; +; O1-NO-MAD-NEXT: mul.wide.u16 %r1, %rs1, %rs2; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t37_param_2]; +; O1-NO-MAD-NEXT: add.s32 %r3, %r2, %r1; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t37( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<3>; +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t37_param_0]; +; O1-MAD-NEXT: ld.param.b16 %rs2, [t37_param_1]; +; O1-MAD-NEXT: ld.param.b32 %r1, [t37_param_2]; +; O1-MAD-NEXT: mad.wide.u16 %r2, %rs1, %rs2, %r1; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t37( ; O0: { @@ -1143,20 +1267,32 @@ define i32 @t37(i16 %a, i16 %b, i32 %c) { } define i32 @t38(i16 %a, i16 %b, i32 %c) { -; -; O1-LABEL: t38( -; O1: { -; O1-NEXT: .reg .b16 %rs<3>; -; O1-NEXT: .reg .b32 %r<4>; -; O1-EMPTY: -; O1-NEXT: // %bb.0: -; O1-NEXT: ld.param.b16 %rs1, [t38_param_0]; -; O1-NEXT: ld.param.b16 %rs2, [t38_param_1]; -; O1-NEXT: mul.wide.u16 %r1, %rs1, %rs2; -; O1-NEXT: ld.param.b32 %r2, [t38_param_2]; -; O1-NEXT: add.s32 %r3, %r1, %r2; -; O1-NEXT: st.param.b32 [func_retval0], %r3; -; O1-NEXT: ret; +; O1-NO-MAD-LABEL: t38( +; O1-NO-MAD: { +; O1-NO-MAD-NEXT: .reg .b16 %rs<3>; +; O1-NO-MAD-NEXT: .reg .b32 %r<4>; +; O1-NO-MAD-EMPTY: +; O1-NO-MAD-NEXT: // %bb.0: +; O1-NO-MAD-NEXT: ld.param.b16 %rs1, [t38_param_0]; +; O1-NO-MAD-NEXT: ld.param.b16 %rs2, [t38_param_1]; +; O1-NO-MAD-NEXT: mul.wide.u16 %r1, %rs1, %rs2; +; O1-NO-MAD-NEXT: ld.param.b32 %r2, [t38_param_2]; +; O1-NO-MAD-NEXT: add.s32 %r3, %r1, %r2; +; O1-NO-MAD-NEXT: st.param.b32 [func_retval0], %r3; +; O1-NO-MAD-NEXT: ret; +; +; O1-MAD-LABEL: t38( +; O1-MAD: { +; O1-MAD-NEXT: .reg .b16 %rs<3>; +; O1-MAD-NEXT: .reg .b32 %r<3>; +; O1-MAD-EMPTY: +; O1-MAD-NEXT: // %bb.0: +; O1-MAD-NEXT: ld.param.b16 %rs1, [t38_param_0]; +; O1-MAD-NEXT: ld.param.b16 %rs2, [t38_param_1]; +; O1-MAD-NEXT: ld.param.b32 %r1, [t38_param_2]; +; O1-MAD-NEXT: mad.wide.u16 %r2, %rs1, %rs2, %r1; +; O1-MAD-NEXT: st.param.b32 [func_retval0], %r2; +; O1-MAD-NEXT: ret; ; ; O0-LABEL: t38( ; O0: { diff --git a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll new file mode 100644 index 0000000000000..21ca041f6220a --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_90a -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-SM90A %s +; RUN: %if ptxas-12.7 %{ \ +; RUN: llc < %s -mcpu=sm_90a -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_90a \ +; RUN: %} +; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-SM100 %s +; RUN: %if ptxas-12.7 %{ \ +; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_100 \ +; RUN: %} + +; Test that v2i32 -> v2f32 conversions don't emit bitwise operations on i64. + +target triple = "nvptx64-nvidia-cuda" + +declare <2 x i32> @return_i32x2(i32 %0) + +; Test with v2i32. +define ptx_kernel void @store_i32x2(i32 %0, ptr %p) { +; CHECK-SM90A-LABEL: store_i32x2( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b32 %r<6>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<2>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.b64 %rd1, [store_i32x2_param_1]; +; CHECK-SM90A-NEXT: ld.param.b32 %r1, [store_i32x2_param_0]; +; CHECK-SM90A-NEXT: { // callseq 0, 0 +; CHECK-SM90A-NEXT: .param .b32 param0; +; CHECK-SM90A-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-SM90A-NEXT: st.param.b32 [param0], %r1; +; CHECK-SM90A-NEXT: call.uni (retval0), return_i32x2, (param0); +; CHECK-SM90A-NEXT: ld.param.v2.b32 {%r2, %r3}, [retval0]; +; CHECK-SM90A-NEXT: } // callseq 0 +; CHECK-SM90A-NEXT: add.rn.f32 %r4, %r3, %r3; +; CHECK-SM90A-NEXT: add.rn.f32 %r5, %r2, %r2; +; CHECK-SM90A-NEXT: st.v2.b32 [%rd1], {%r5, %r4}; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: store_i32x2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<2>; +; CHECK-SM100-NEXT: .reg .b64 %rd<4>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [store_i32x2_param_1]; +; CHECK-SM100-NEXT: ld.param.b32 %r1, [store_i32x2_param_0]; +; CHECK-SM100-NEXT: { // callseq 0, 0 +; CHECK-SM100-NEXT: .param .b32 param0; +; CHECK-SM100-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-SM100-NEXT: st.param.b32 [param0], %r1; +; CHECK-SM100-NEXT: call.uni (retval0), return_i32x2, (param0); +; CHECK-SM100-NEXT: ld.param.b64 %rd2, [retval0]; +; CHECK-SM100-NEXT: } // callseq 0 +; CHECK-SM100-NEXT: add.rn.f32x2 %rd3, %rd2, %rd2; +; CHECK-SM100-NEXT: st.b64 [%rd1], %rd3; +; CHECK-SM100-NEXT: ret; + %v = call <2 x i32> @return_i32x2(i32 %0) + %v.f32x2 = bitcast <2 x i32> %v to <2 x float> + %res = fadd <2 x float> %v.f32x2, %v.f32x2 + store <2 x float> %res, ptr %p, align 8 + ret void +} + +; Test with inline ASM returning { <1 x float>, <1 x float> }, which decays to +; v2i32. +define ptx_kernel void @inlineasm(ptr %p) { +; CHECK-SM90A-LABEL: inlineasm( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b32 %r<7>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<2>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.b64 %rd1, [inlineasm_param_0]; +; CHECK-SM90A-NEXT: mov.b32 %r3, 0; +; CHECK-SM90A-NEXT: mov.b32 %r4, %r3; +; CHECK-SM90A-NEXT: mov.b32 %r2, %r4; +; CHECK-SM90A-NEXT: mov.b32 %r1, %r3; +; CHECK-SM90A-NEXT: // begin inline asm +; CHECK-SM90A-NEXT: // nop +; CHECK-SM90A-NEXT: // end inline asm +; CHECK-SM90A-NEXT: mul.rn.f32 %r5, %r2, 0f00000000; +; CHECK-SM90A-NEXT: mul.rn.f32 %r6, %r1, 0f00000000; +; CHECK-SM90A-NEXT: st.v2.b32 [%rd1], {%r6, %r5}; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: inlineasm( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<6>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [inlineasm_param_0]; +; CHECK-SM100-NEXT: mov.b32 %r3, 0; +; CHECK-SM100-NEXT: mov.b32 %r4, %r3; +; CHECK-SM100-NEXT: mov.b32 %r2, %r4; +; CHECK-SM100-NEXT: mov.b32 %r1, %r3; +; CHECK-SM100-NEXT: // begin inline asm +; CHECK-SM100-NEXT: // nop +; CHECK-SM100-NEXT: // end inline asm +; CHECK-SM100-NEXT: mov.b64 %rd2, {%r1, %r2}; +; CHECK-SM100-NEXT: mov.b32 %r5, 0f00000000; +; CHECK-SM100-NEXT: mov.b64 %rd3, {%r5, %r5}; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd4, %rd2, %rd3; +; CHECK-SM100-NEXT: st.b64 [%rd1], %rd4; +; CHECK-SM100-NEXT: ret; + %r = call { <1 x float>, <1 x float> } asm sideeffect "// nop", "=f,=f,0,1"(<1 x float> zeroinitializer, <1 x float> zeroinitializer) + %i0 = extractvalue { <1 x float>, <1 x float> } %r, 0 + %i1 = extractvalue { <1 x float>, <1 x float> } %r, 1 + %i4 = shufflevector <1 x float> %i0, <1 x float> %i1, <2 x i32> + %mul = fmul < 2 x float> %i4, zeroinitializer + store <2 x float> %mul, ptr %p, align 8 + ret void +} + +define ptx_kernel void @trunc_v2i32(<2 x i32> %0) { +; CHECK-SM90A-LABEL: trunc_v2i32( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b32 %r<7>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<2>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.v2.b32 {%r1, %r2}, [trunc_v2i32_param_0]; +; CHECK-SM90A-NEXT: prmt.b32 %r3, %r1, %r2, 0x3340U; +; CHECK-SM90A-NEXT: mov.b32 %r4, 0; +; CHECK-SM90A-NEXT: prmt.b32 %r5, %r4, 0, 0x3340U; +; CHECK-SM90A-NEXT: prmt.b32 %r6, %r5, %r3, 0x5410U; +; CHECK-SM90A-NEXT: mov.b64 %rd1, 0; +; CHECK-SM90A-NEXT: st.b32 [%rd1], %r6; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: trunc_v2i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<7>; +; CHECK-SM100-NEXT: .reg .b64 %rd<3>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [trunc_v2i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM100-NEXT: mov.b32 %r3, 0; +; CHECK-SM100-NEXT: prmt.b32 %r4, %r3, 0, 0x3340U; +; CHECK-SM100-NEXT: prmt.b32 %r5, %r1, %r2, 0x3340U; +; CHECK-SM100-NEXT: prmt.b32 %r6, %r4, %r5, 0x5410U; +; CHECK-SM100-NEXT: mov.b64 %rd2, 0; +; CHECK-SM100-NEXT: st.b32 [%rd2], %r6; +; CHECK-SM100-NEXT: ret; + %2 = trunc <2 x i32> %0 to <2 x i8> + %3 = shufflevector <2 x i8> zeroinitializer, <2 x i8> %2, <4 x i32> + store <4 x i8> %3, ptr null, align 4 + ret void +} + +define ptx_kernel void @zextend_to_v2i32(<2 x i8> %0) { +; CHECK-SM90A-LABEL: zextend_to_v2i32( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b16 %rs<3>; +; CHECK-SM90A-NEXT: .reg .b32 %r<4>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<5>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0]; +; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r3, %rs2; +; CHECK-SM90A-NEXT: mov.b64 %rd1, 12; +; CHECK-SM90A-NEXT: st.b32 [%rd1], %r3; +; CHECK-SM90A-NEXT: mov.b64 %rd2, 8; +; CHECK-SM90A-NEXT: st.b32 [%rd2], %r2; +; CHECK-SM90A-NEXT: mov.b64 %rd3, 4; +; CHECK-SM90A-NEXT: st.b32 [%rd3], 0; +; CHECK-SM90A-NEXT: mov.b64 %rd4, 0; +; CHECK-SM90A-NEXT: st.b32 [%rd4], 0; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: zextend_to_v2i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b16 %rs<3>; +; CHECK-SM100-NEXT: .reg .b32 %r<5>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0]; +; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r3, %rs1; +; CHECK-SM100-NEXT: mov.b64 %rd1, {%r3, %r2}; +; CHECK-SM100-NEXT: mov.b32 %r4, 0; +; CHECK-SM100-NEXT: mov.b64 %rd2, {%r4, %r4}; +; CHECK-SM100-NEXT: mov.b64 %rd3, 4; +; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd4, 0; +; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd5, 8; +; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1; +; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32; +; CHECK-SM100-NEXT: mov.b64 %rd7, 12; +; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6; +; CHECK-SM100-NEXT: ret; + %2 = zext <2 x i8> %0 to <2 x i32> + %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> + store <4 x i32> %3, ptr null, align 4 + ret void +} + +define ptx_kernel void @sextend_to_v2i32(<2 x i8> %0) { +; CHECK-SM90A-LABEL: sextend_to_v2i32( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b16 %rs<3>; +; CHECK-SM90A-NEXT: .reg .b32 %r<6>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<5>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0]; +; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1; +; CHECK-SM90A-NEXT: cvt.s32.s8 %r3, %r2; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r4, %rs2; +; CHECK-SM90A-NEXT: cvt.s32.s8 %r5, %r4; +; CHECK-SM90A-NEXT: mov.b64 %rd1, 12; +; CHECK-SM90A-NEXT: st.b32 [%rd1], %r5; +; CHECK-SM90A-NEXT: mov.b64 %rd2, 8; +; CHECK-SM90A-NEXT: st.b32 [%rd2], %r3; +; CHECK-SM90A-NEXT: mov.b64 %rd3, 4; +; CHECK-SM90A-NEXT: st.b32 [%rd3], 0; +; CHECK-SM90A-NEXT: mov.b64 %rd4, 0; +; CHECK-SM90A-NEXT: st.b32 [%rd4], 0; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: sextend_to_v2i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b16 %rs<3>; +; CHECK-SM100-NEXT: .reg .b32 %r<7>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0]; +; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-SM100-NEXT: cvt.s32.s8 %r3, %r2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r4, %rs1; +; CHECK-SM100-NEXT: cvt.s32.s8 %r5, %r4; +; CHECK-SM100-NEXT: mov.b64 %rd1, {%r5, %r3}; +; CHECK-SM100-NEXT: mov.b32 %r6, 0; +; CHECK-SM100-NEXT: mov.b64 %rd2, {%r6, %r6}; +; CHECK-SM100-NEXT: mov.b64 %rd3, 4; +; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd4, 0; +; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd5, 8; +; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1; +; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32; +; CHECK-SM100-NEXT: mov.b64 %rd7, 12; +; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6; +; CHECK-SM100-NEXT: ret; + %2 = sext <2 x i8> %0 to <2 x i32> + %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> + store <4 x i32> %3, ptr null, align 4 + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll index 217bb483682ff..a90cfff51e2c6 100644 --- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll @@ -1938,16 +1938,29 @@ define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 { } define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 { -; CHECK-LABEL: test_uitofp_2xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0]; -; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_uitofp_2xi32( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_uitofp_2xi32( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = uitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } @@ -1969,16 +1982,29 @@ define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 { } define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 { -; CHECK-LABEL: test_sitofp_2xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0]; -; CHECK-NEXT: cvt.rn.f32.s32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.s32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_sitofp_2xi32( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_sitofp_2xi32( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_sitofp_2xi32_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = sitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } @@ -2017,16 +2043,17 @@ define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 { ; CHECK-F32X2-LABEL: test_uitofp_2xi32_fadd( ; CHECK-F32X2: { ; CHECK-F32X2-NEXT: .reg .b32 %r<5>; -; CHECK-F32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<5>; ; CHECK-F32X2-EMPTY: ; CHECK-F32X2-NEXT: // %bb.0: -; CHECK-F32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0]; -; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_1]; +; CHECK-F32X2-NEXT: ld.param.b64 %rd2, [test_uitofp_2xi32_fadd_param_1]; +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; ; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; ; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; -; CHECK-F32X2-NEXT: mov.b64 %rd2, {%r4, %r3}; -; CHECK-F32X2-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2; -; CHECK-F32X2-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-F32X2-NEXT: mov.b64 %rd3, {%r4, %r3}; +; CHECK-F32X2-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3; +; CHECK-F32X2-NEXT: st.param.b64 [func_retval0], %rd4; ; CHECK-F32X2-NEXT: ret; %c = uitofp <2 x i32> %a to <2 x float> %r = fadd <2 x float> %b, %c @@ -2114,14 +2141,23 @@ define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 { } define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 { -; CHECK-LABEL: test_bitcast_2xi32_to_2xfloat( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0]; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_bitcast_2xi32_to_2xfloat( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0]; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_bitcast_2xi32_to_2xfloat( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_bitcast_2xi32_to_2xfloat_param_0]; +; CHECK-F32X2-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-F32X2-NEXT: ret; %r = bitcast <2 x i32> %a to <2 x float> ret <2 x float> %r } @@ -2851,31 +2887,57 @@ define <2 x float> @test_insertelement(<2 x float> %a, float %x) #0 { } define <2 x float> @test_sitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 { -; CHECK-LABEL: test_sitofp_2xi32_to_2xfloat( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0]; -; CHECK-NEXT: cvt.rn.f32.s32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.s32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_sitofp_2xi32_to_2xfloat( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_sitofp_2xi32_to_2xfloat( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_sitofp_2xi32_to_2xfloat_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = sitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } define <2 x float> @test_uitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 { -; CHECK-LABEL: test_uitofp_2xi32_to_2xfloat( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0]; -; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_uitofp_2xi32_to_2xfloat( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_uitofp_2xi32_to_2xfloat( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_to_2xfloat_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = uitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll index d219493d2b31b..3fac29f74125b 100644 --- a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll +++ b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll @@ -346,15 +346,19 @@ define i32 @ld_global_v8i32(ptr addrspace(1) %ptr) { ; SM100-LABEL: ld_global_v8i32( ; SM100: { ; SM100-NEXT: .reg .b32 %r<16>; -; SM100-NEXT: .reg .b64 %rd<2>; +; SM100-NEXT: .reg .b64 %rd<6>; ; SM100-EMPTY: ; SM100-NEXT: // %bb.0: ; SM100-NEXT: ld.param.b64 %rd1, [ld_global_v8i32_param_0]; -; SM100-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; SM100-NEXT: add.s32 %r9, %r1, %r2; -; SM100-NEXT: add.s32 %r10, %r3, %r4; -; SM100-NEXT: add.s32 %r11, %r5, %r6; -; SM100-NEXT: add.s32 %r12, %r7, %r8; +; SM100-NEXT: ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; SM100-NEXT: mov.b64 {%r1, %r2}, %rd5; +; SM100-NEXT: mov.b64 {%r3, %r4}, %rd4; +; SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; SM100-NEXT: add.s32 %r9, %r7, %r8; +; SM100-NEXT: add.s32 %r10, %r5, %r6; +; SM100-NEXT: add.s32 %r11, %r3, %r4; +; SM100-NEXT: add.s32 %r12, %r1, %r2; ; SM100-NEXT: add.s32 %r13, %r9, %r10; ; SM100-NEXT: add.s32 %r14, %r11, %r12; ; SM100-NEXT: add.s32 %r15, %r13, %r14; diff --git a/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll b/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll index 12e3287e73f0f..57852451c0c72 100644 --- a/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll +++ b/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll @@ -82,11 +82,11 @@ define void @avar_bfloat() { define void @avar_i32() { ; PTX-LABEL: avar_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin]; -; PTX-NEXT: st.global.v8.b32 [globalout], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin]; +; PTX-NEXT: st.global.v4.b64 [globalout], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) @globalin, !invariant.load !0 store <8 x i32> %load, ptr addrspace(1) @globalout @@ -202,11 +202,11 @@ define void @asi_bfloat() { define void @asi_i32() { ; PTX-LABEL: asi_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin+32]; -; PTX-NEXT: st.global.v8.b32 [globalout+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin+32]; +; PTX-NEXT: st.global.v4.b64 [globalout+32], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) @globalin, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset, !invariant.load !0 @@ -331,14 +331,13 @@ define void @areg_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @areg_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: areg_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [areg_64_i32_param_0]; -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; PTX-NEXT: ld.param.b64 %rd2, [areg_64_i32_param_1]; -; PTX-NEXT: st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; PTX-NEXT: ld.param.b64 %rd6, [areg_64_i32_param_1]; +; PTX-NEXT: st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) %in, !invariant.load !0 store <8 x i32> %load, ptr addrspace(1) %out @@ -472,14 +471,13 @@ define void @ari_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @ari_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: ari_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [ari_64_i32_param_0]; ; PTX-NEXT: ld.param.b64 %rd2, [ari_64_i32_param_1]; -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1+32]; -; PTX-NEXT: st.global.v8.b32 [%rd2+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd3, %rd4, %rd5, %rd6}, [%rd1+32]; +; PTX-NEXT: st.global.v4.b64 [%rd2+32], {%rd3, %rd4, %rd5, %rd6}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset, !invariant.load !0 diff --git a/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll b/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll index b7fa1dd5f2c4d..21604dfbf0013 100644 --- a/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll +++ b/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll @@ -78,11 +78,11 @@ define void @avar_bfloat() { define void @avar_i32() { ; PTX-LABEL: avar_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin]; -; PTX-NEXT: st.global.v8.b32 [globalout], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin]; +; PTX-NEXT: st.global.v4.b64 [globalout], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) @globalin store <8 x i32> %load, ptr addrspace(1) @globalout @@ -198,11 +198,11 @@ define void @asi_bfloat() { define void @asi_i32() { ; PTX-LABEL: asi_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin+32]; -; PTX-NEXT: st.global.v8.b32 [globalout+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin+32]; +; PTX-NEXT: st.global.v4.b64 [globalout+32], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) @globalin, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset @@ -327,14 +327,13 @@ define void @areg_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @areg_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: areg_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [areg_64_i32_param_0]; -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; PTX-NEXT: ld.param.b64 %rd2, [areg_64_i32_param_1]; -; PTX-NEXT: st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; PTX-NEXT: ld.param.b64 %rd6, [areg_64_i32_param_1]; +; PTX-NEXT: st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) %in store <8 x i32> %load, ptr addrspace(1) %out @@ -468,14 +467,13 @@ define void @ari_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @ari_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: ari_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [ari_64_i32_param_0]; ; PTX-NEXT: ld.param.b64 %rd2, [ari_64_i32_param_1]; -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1+32]; -; PTX-NEXT: st.global.v8.b32 [%rd2+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd3, %rd4, %rd5, %rd6}, [%rd1+32]; +; PTX-NEXT: st.global.v4.b64 [%rd2+32], {%rd3, %rd4, %rd5, %rd6}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset diff --git a/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll b/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll index e8b43ad28ad27..b5319935f0f9d 100644 --- a/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll +++ b/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll @@ -100,19 +100,32 @@ define void @generic_16xbfloat(ptr %a, ptr %b) { } define void @generic_8xi32(ptr %a, ptr %b) { -; CHECK-LABEL: generic_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [generic_8xi32_param_0]; -; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [generic_8xi32_param_1]; -; CHECK-NEXT: st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: generic_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [generic_8xi32_param_0]; +; SM90-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [generic_8xi32_param_1]; +; SM90-NEXT: st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: generic_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [generic_8xi32_param_0]; +; SM100-NEXT: ld.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [generic_8xi32_param_1]; +; SM100-NEXT: st.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr %a store <8 x i32> %a.load, ptr %b ret void @@ -265,19 +278,32 @@ define void @generic_volatile_16xbfloat(ptr %a, ptr %b) { } define void @generic_volatile_8xi32(ptr %a, ptr %b) { -; CHECK-LABEL: generic_volatile_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [generic_volatile_8xi32_param_0]; -; CHECK-NEXT: ld.volatile.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.volatile.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [generic_volatile_8xi32_param_1]; -; CHECK-NEXT: st.volatile.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.volatile.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: generic_volatile_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [generic_volatile_8xi32_param_0]; +; SM90-NEXT: ld.volatile.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.volatile.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [generic_volatile_8xi32_param_1]; +; SM90-NEXT: st.volatile.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.volatile.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: generic_volatile_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [generic_volatile_8xi32_param_0]; +; SM100-NEXT: ld.volatile.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.volatile.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [generic_volatile_8xi32_param_1]; +; SM100-NEXT: st.volatile.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.volatile.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr %a store volatile <8 x i32> %a.load, ptr %b ret void @@ -496,14 +522,13 @@ define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) { ; ; SM100-LABEL: global_8xi32( ; SM100: { -; SM100-NEXT: .reg .b32 %r<9>; -; SM100-NEXT: .reg .b64 %rd<3>; +; SM100-NEXT: .reg .b64 %rd<7>; ; SM100-EMPTY: ; SM100-NEXT: // %bb.0: ; SM100-NEXT: ld.param.b64 %rd1, [global_8xi32_param_0]; -; SM100-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; SM100-NEXT: ld.param.b64 %rd2, [global_8xi32_param_1]; -; SM100-NEXT: st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; SM100-NEXT: ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; SM100-NEXT: ld.param.b64 %rd6, [global_8xi32_param_1]; +; SM100-NEXT: st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr addrspace(1) %a store <8 x i32> %a.load, ptr addrspace(1) %b @@ -741,14 +766,13 @@ define void @global_volatile_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) { ; ; SM100-LABEL: global_volatile_8xi32( ; SM100: { -; SM100-NEXT: .reg .b32 %r<9>; -; SM100-NEXT: .reg .b64 %rd<3>; +; SM100-NEXT: .reg .b64 %rd<7>; ; SM100-EMPTY: ; SM100-NEXT: // %bb.0: ; SM100-NEXT: ld.param.b64 %rd1, [global_volatile_8xi32_param_0]; -; SM100-NEXT: ld.volatile.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; SM100-NEXT: ld.param.b64 %rd2, [global_volatile_8xi32_param_1]; -; SM100-NEXT: st.volatile.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; SM100-NEXT: ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; SM100-NEXT: ld.param.b64 %rd6, [global_volatile_8xi32_param_1]; +; SM100-NEXT: st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr addrspace(1) %a store volatile <8 x i32> %a.load, ptr addrspace(1) %b @@ -924,19 +948,32 @@ define void @shared_16xbfloat(ptr addrspace(3) %a, ptr addrspace(3) %b) { } define void @shared_8xi32(ptr addrspace(3) %a, ptr addrspace(3) %b) { -; CHECK-LABEL: shared_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [shared_8xi32_param_0]; -; CHECK-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [shared_8xi32_param_1]; -; CHECK-NEXT: st.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: shared_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [shared_8xi32_param_0]; +; SM90-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [shared_8xi32_param_1]; +; SM90-NEXT: st.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: shared_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [shared_8xi32_param_0]; +; SM100-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.shared.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [shared_8xi32_param_1]; +; SM100-NEXT: st.shared.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.shared.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr addrspace(3) %a store <8 x i32> %a.load, ptr addrspace(3) %b ret void @@ -1089,19 +1126,32 @@ define void @shared_volatile_16xbfloat(ptr addrspace(3) %a, ptr addrspace(3) %b) } define void @shared_volatile_8xi32(ptr addrspace(3) %a, ptr addrspace(3) %b) { -; CHECK-LABEL: shared_volatile_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [shared_volatile_8xi32_param_0]; -; CHECK-NEXT: ld.volatile.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.volatile.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [shared_volatile_8xi32_param_1]; -; CHECK-NEXT: st.volatile.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.volatile.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: shared_volatile_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [shared_volatile_8xi32_param_0]; +; SM90-NEXT: ld.volatile.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.volatile.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [shared_volatile_8xi32_param_1]; +; SM90-NEXT: st.volatile.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.volatile.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: shared_volatile_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [shared_volatile_8xi32_param_0]; +; SM100-NEXT: ld.volatile.shared.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.volatile.shared.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [shared_volatile_8xi32_param_1]; +; SM100-NEXT: st.volatile.shared.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.volatile.shared.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr addrspace(3) %a store volatile <8 x i32> %a.load, ptr addrspace(3) %b ret void @@ -1256,19 +1306,32 @@ define void @local_16xbfloat(ptr addrspace(5) %a, ptr addrspace(5) %b) { } define void @local_8xi32(ptr addrspace(5) %a, ptr addrspace(5) %b) { -; CHECK-LABEL: local_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [local_8xi32_param_0]; -; CHECK-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [local_8xi32_param_1]; -; CHECK-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: local_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [local_8xi32_param_0]; +; SM90-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [local_8xi32_param_1]; +; SM90-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: local_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [local_8xi32_param_0]; +; SM100-NEXT: ld.local.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [local_8xi32_param_1]; +; SM100-NEXT: st.local.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.local.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr addrspace(5) %a store <8 x i32> %a.load, ptr addrspace(5) %b ret void @@ -1421,19 +1484,32 @@ define void @local_volatile_16xbfloat(ptr addrspace(5) %a, ptr addrspace(5) %b) } define void @local_volatile_8xi32(ptr addrspace(5) %a, ptr addrspace(5) %b) { -; CHECK-LABEL: local_volatile_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [local_volatile_8xi32_param_0]; -; CHECK-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [local_volatile_8xi32_param_1]; -; CHECK-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: local_volatile_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [local_volatile_8xi32_param_0]; +; SM90-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [local_volatile_8xi32_param_1]; +; SM90-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: local_volatile_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [local_volatile_8xi32_param_0]; +; SM100-NEXT: ld.local.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [local_volatile_8xi32_param_1]; +; SM100-NEXT: st.local.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.local.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr addrspace(5) %a store volatile <8 x i32> %a.load, ptr addrspace(5) %b ret void diff --git a/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir b/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir new file mode 100644 index 0000000000000..0b2d85600a2ef --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir @@ -0,0 +1,80 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=nvptx64 -mcpu=sm_20 -run-pass=early-machinelicm %s -o - | FileCheck %s + +# This test checks that the early-machineLICM pass successfully creates a new +# loop preheader by splitting the critical edge and hoisting the loop invariant +# value `%8` to the preheader. +# Since the critical edge successor is a loop header, the splitting does not +# break the structured CFG, which is a requirement for the NVPTX target. + +--- +name: test_hoist +tracksRegLiveness: true +registers: + - { id: 0, class: b64, preferred-register: '', flags: [ ] } + - { id: 1, class: b32, preferred-register: '', flags: [ ] } + - { id: 2, class: b32, preferred-register: '', flags: [ ] } + - { id: 3, class: b32, preferred-register: '', flags: [ ] } + - { id: 4, class: b32, preferred-register: '', flags: [ ] } + - { id: 5, class: b32, preferred-register: '', flags: [ ] } + - { id: 6, class: b64, preferred-register: '', flags: [ ] } + - { id: 7, class: b1, preferred-register: '', flags: [ ] } + - { id: 8, class: b32, preferred-register: '', flags: [ ] } + - { id: 9, class: b1, preferred-register: '', flags: [ ] } +body: | + ; CHECK-LABEL: name: test_hoist + ; CHECK: bb.0.entry: + ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.3(0x50000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[LD_i32_:%[0-9]+]]:b32 = LD_i32 0, 0, 101, 3, 32, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101) + ; CHECK-NEXT: [[LD_i64_:%[0-9]+]]:b64 = LD_i64 0, 0, 101, 3, 64, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101) + ; CHECK-NEXT: [[ADD64ri:%[0-9]+]]:b64 = nuw ADD64ri killed [[LD_i64_]], 2 + ; CHECK-NEXT: [[LD_i32_1:%[0-9]+]]:b32 = LD_i32 0, 0, 1, 3, 32, [[ADD64ri]], 0 + ; CHECK-NEXT: [[SETP_i32ri:%[0-9]+]]:b1 = SETP_i32ri [[LD_i32_]], 0, 0 + ; CHECK-NEXT: CBranch killed [[SETP_i32ri]], %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ADD32ri:%[0-9]+]]:b32 = ADD32ri [[LD_i32_]], -1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[PHI:%[0-9]+]]:b32 = PHI [[LD_i32_1]], %bb.3, %3, %bb.1 + ; CHECK-NEXT: [[SREM32rr:%[0-9]+]]:b32 = SREM32rr [[PHI]], [[ADD32ri]] + ; CHECK-NEXT: [[SETP_i32ri1:%[0-9]+]]:b1 = SETP_i32ri [[SREM32rr]], 0, 1 + ; CHECK-NEXT: CBranch killed [[SETP_i32ri1]], %bb.1 + ; CHECK-NEXT: GOTO %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:b32 = PHI [[LD_i32_1]], %bb.0, [[SREM32rr]], %bb.1 + ; CHECK-NEXT: ST_i32 [[PHI1]], 0, 0, 1, 32, [[ADD64ri]], 0 + ; CHECK-NEXT: Return + bb.0.entry: + successors: %bb.2(0x30000000), %bb.1(0x50000000) + + %5:b32 = LD_i32 0, 0, 101, 3, 32, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101) + %6:b64 = LD_i64 0, 0, 101, 3, 64, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101) + %0:b64 = nuw ADD64ri killed %6, 2 + %1:b32 = LD_i32 0, 0, 1, 3, 32, %0, 0 + %7:b1 = SETP_i32ri %5, 0, 0 + CBranch killed %7, %bb.2 + GOTO %bb.1 + + + bb.1: + successors: %bb.2(0x04000000), %bb.1(0x7c000000) + + %2:b32 = PHI %1, %bb.0, %3, %bb.1 + %8:b32 = ADD32ri %5, -1 + %3:b32 = SREM32rr %2, %8 + %9:b1 = SETP_i32ri %3, 0, 1 + CBranch killed %9, %bb.1 + GOTO %bb.2 + + bb.2: + %4:b32 = PHI %1, %bb.0, %3, %bb.1 + ST_i32 %4, 0, 0, 1, 32, %0, 0 + Return +... diff --git a/llvm/test/CodeGen/NVPTX/math-intrins.ll b/llvm/test/CodeGen/NVPTX/math-intrins.ll index 5a55fa97033b7..625c93c3f0a53 100644 --- a/llvm/test/CodeGen/NVPTX/math-intrins.ll +++ b/llvm/test/CodeGen/NVPTX/math-intrins.ll @@ -1586,54 +1586,25 @@ define double @minimumnum_double(double %a, double %b) { ret double %x } -; TODO Improve the "Expand" path for minimumnum vectors on targets where -; f16 is not supported. Ideally it should use two f32 minimumnums first instead of -; fully expanding the minimumnum instruction into compare/select instructions. define <2 x half> @minimumnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-LABEL: minimumnum_v2half( ; CHECK-NOF16: { -; CHECK-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [minimumnum_v2half_param_0]; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [minimumnum_v2half_param_1]; -; CHECK-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-NOF16-NEXT: setp.lt.f32 %p3, %r2, %r4; -; CHECK-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-NOF16-NEXT: setp.eq.b16 %p4, %rs5, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-NOF16-NEXT: setp.eq.b16 %p5, %rs6, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-NOF16-NEXT: setp.lt.f32 %p9, %r7, %r9; -; CHECK-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-NOF16-NEXT: setp.eq.b16 %p10, %rs11, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-NOF16-NEXT: setp.eq.b16 %p11, %rs12, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-NOF16-NEXT: min.f32 %r3, %r2, %r1; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-NOF16-NEXT: min.f32 %r6, %r5, %r4; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimumnum_v2half( @@ -1649,48 +1620,22 @@ define <2 x half> @minimumnum_v2half(<2 x half> %a, <2 x half> %b) { ; ; CHECK-SM80-NOF16-LABEL: minimumnum_v2half( ; CHECK-SM80-NOF16: { -; CHECK-SM80-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-SM80-NOF16-EMPTY: ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [minimumnum_v2half_param_0]; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [minimumnum_v2half_param_1]; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-SM80-NOF16-NEXT: setp.lt.f32 %p3, %r2, %r4; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p4, %rs5, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p5, %rs6, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-SM80-NOF16-NEXT: setp.lt.f32 %p9, %r7, %r9; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p10, %rs11, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p11, %rs12, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-SM80-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-SM80-NOF16-NEXT: min.f32 %r3, %r2, %r1; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-SM80-NOF16-NEXT: min.f32 %r6, %r5, %r4; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-SM80-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x @@ -1788,54 +1733,25 @@ define double @maximumnum_double(double %a, double %b) { ret double %x } -; TODO Improve the "Expand" path for maximumnum vectors on targets where -; f16 is not supported. Ideally it should use two f32 maximumnums first instead of -; fully expanding the maximumnum instruction into compare/select instructions. define <2 x half> @maximumnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-LABEL: maximumnum_v2half( ; CHECK-NOF16: { -; CHECK-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [maximumnum_v2half_param_0]; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [maximumnum_v2half_param_1]; -; CHECK-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-NOF16-NEXT: setp.gt.f32 %p3, %r2, %r4; -; CHECK-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-NOF16-NEXT: setp.eq.b16 %p4, %rs5, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-NOF16-NEXT: setp.eq.b16 %p5, %rs6, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-NOF16-NEXT: setp.gt.f32 %p9, %r7, %r9; -; CHECK-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-NOF16-NEXT: setp.eq.b16 %p10, %rs11, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-NOF16-NEXT: setp.eq.b16 %p11, %rs12, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-NOF16-NEXT: max.f32 %r3, %r2, %r1; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-NOF16-NEXT: max.f32 %r6, %r5, %r4; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximumnum_v2half( @@ -1851,48 +1767,22 @@ define <2 x half> @maximumnum_v2half(<2 x half> %a, <2 x half> %b) { ; ; CHECK-SM80-NOF16-LABEL: maximumnum_v2half( ; CHECK-SM80-NOF16: { -; CHECK-SM80-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-SM80-NOF16-EMPTY: ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [maximumnum_v2half_param_0]; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [maximumnum_v2half_param_1]; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-SM80-NOF16-NEXT: setp.gt.f32 %p3, %r2, %r4; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p4, %rs5, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p5, %rs6, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-SM80-NOF16-NEXT: setp.gt.f32 %p9, %r7, %r9; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p10, %rs11, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p11, %rs12, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-SM80-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-SM80-NOF16-NEXT: max.f32 %r3, %r2, %r1; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-SM80-NOF16-NEXT: max.f32 %r6, %r5, %r4; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-SM80-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x diff --git a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll index bc67471209bf8..32b55a38e55ef 100644 --- a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll +++ b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll @@ -11,7 +11,6 @@ target triple = "nvptx64-unknown-unknown" define void @test_infer_const_from_cast() { ; INFER-LABEL: @test_infer_const_from_cast ; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap) -; BOTH: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap) ; PTX-LABEL: .visible .func test_infer_const_from_cast( ; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap; ; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}; @@ -69,12 +68,40 @@ entry: %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4) %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr - call void @llvm.nvvm.prefetch.tensormap(ptr %cast3) + call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast3) + ret void +} + +; Kernel Function Test +; Cast from Param space to Generic +define ptx_kernel void @test_param_to_generic_cast_kernel(ptr addrspace(101) %param_ptr) { +; INFER-LABEL: @test_param_to_generic_cast_kernel +; INFER: call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr) +; PTX-LABEL: .visible .entry test_param_to_generic_cast_kernel( +; PTX: prefetch.param.tensormap [%rd{{[0-9]+}}]; +entry: + %cast = addrspacecast ptr addrspace(101) %param_ptr to ptr + call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast) + ret void +} + +; Kernel Function Test +; Multiple casts in sequence +define ptx_kernel void @test_infer_through_multiple_casts_kernel() { +; INFER-LABEL: @test_infer_through_multiple_casts_kernel +; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap) +; PTX-LABEL: .visible .entry test_infer_through_multiple_casts_kernel( +; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap; +; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}; +; PTX: prefetch.tensormap [%rd{{[0-9]+}}]; +entry: + %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr + %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4) + %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr + call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast3) ret void } declare void @llvm.nvvm.prefetch.tensormap.p0(ptr) declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4)) declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101)) - - diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll index a1c5ec8f50a6b..c0489cc6fd73a 100644 --- a/llvm/test/CodeGen/NVPTX/prefetch.ll +++ b/llvm/test/CodeGen/NVPTX/prefetch.ll @@ -121,4 +121,40 @@ define void @prefetch_param_tensormap(ptr addrspace(101) %param_ptr) { ; CHECK-PTX64-NEXT: ret; tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr) ret void -} \ No newline at end of file +} + +define ptx_kernel void @prefetch_generic_tensormap_kernel(ptr %ptr) { +; CHECK-PTX64-LABEL: prefetch_generic_tensormap_kernel( +; CHECK-PTX64: { +; CHECK-PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK-PTX64-EMPTY: +; CHECK-PTX64-NEXT: // %bb.0: +; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_generic_tensormap_kernel_param_0]; +; CHECK-PTX64-NEXT: prefetch.tensormap [%rd1]; +; CHECK-PTX64-NEXT: ret; + tail call void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr) + ret void +} + +define ptx_kernel void @prefetch_param_tensormap_kernel(ptr addrspace(101) %param_ptr) { +; CHECK-PTX64-LABEL: prefetch_param_tensormap_kernel( +; CHECK-PTX64: { +; CHECK-PTX64-NEXT: .reg .b64 %rd<2>; +; CHECK-PTX64-EMPTY: +; CHECK-PTX64-NEXT: // %bb.0: +; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_param_tensormap_kernel_param_0]; +; CHECK-PTX64-NEXT: prefetch.param.tensormap [%rd1]; +; CHECK-PTX64-NEXT: ret; + tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr) + ret void +} + +define ptx_kernel void @prefetch_grid_const_tensormap(ptr byval([64 x i8]) align 64 "nvvm.grid_constant" %ptr) { +; CHECK-PTX64-LABEL: .visible .entry prefetch_grid_const_tensormap( +; CHECK-PTX64: prefetch.tensormap [%{{(SP|rd[0-9]+).*}}]; +; CHECK-PTX64: ret; + +entry: + call void @llvm.nvvm.prefetch.tensormap.p0(ptr addrspace(0) %ptr) + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll index f871e4039a558..87787ba2bf81c 100644 --- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll +++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll @@ -1452,22 +1452,44 @@ define i16 @reduce_add_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_add_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_add_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; -; CHECK-NEXT: add.s32 %r9, %r4, %r8; -; CHECK-NEXT: add.s32 %r10, %r2, %r6; -; CHECK-NEXT: add.s32 %r11, %r10, %r9; -; CHECK-NEXT: add.s32 %r12, %r3, %r7; -; CHECK-NEXT: add.s32 %r13, %r1, %r5; -; CHECK-NEXT: add.s32 %r14, %r13, %r12; -; CHECK-NEXT: add.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_add_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; +; CHECK-SM80-NEXT: add.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: add.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: add.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: add.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: add.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: add.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_add_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_add_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_add_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: add.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: add.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: add.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: add.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: add.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: add.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.add(<8 x i32> %in) ret i32 %res } @@ -1543,22 +1565,44 @@ define i16 @reduce_mul_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_mul_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_mul_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; -; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8; -; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6; -; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9; -; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7; -; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5; -; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12; -; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_mul_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; +; CHECK-SM80-NEXT: mul.lo.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: mul.lo.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: mul.lo.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: mul.lo.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: mul.lo.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: mul.lo.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_mul_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_mul_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_mul_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: mul.lo.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: mul.lo.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: mul.lo.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: mul.lo.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: mul.lo.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: mul.lo.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in) ret i32 %res } @@ -1673,22 +1717,44 @@ define i16 @reduce_umax_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_umax_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_umax_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; -; CHECK-NEXT: max.u32 %r9, %r4, %r8; -; CHECK-NEXT: max.u32 %r10, %r2, %r6; -; CHECK-NEXT: max.u32 %r11, %r10, %r9; -; CHECK-NEXT: max.u32 %r12, %r3, %r7; -; CHECK-NEXT: max.u32 %r13, %r1, %r5; -; CHECK-NEXT: max.u32 %r14, %r13, %r12; -; CHECK-NEXT: max.u32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_umax_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; +; CHECK-SM80-NEXT: max.u32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: max.u32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: max.u32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.u32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: max.u32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.u32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_umax_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_umax_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_umax_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: max.u32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: max.u32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: max.u32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: max.u32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: max.u32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: max.u32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in) ret i32 %res } @@ -1803,22 +1869,44 @@ define i16 @reduce_umin_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_umin_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_umin_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; -; CHECK-NEXT: min.u32 %r9, %r4, %r8; -; CHECK-NEXT: min.u32 %r10, %r2, %r6; -; CHECK-NEXT: min.u32 %r11, %r10, %r9; -; CHECK-NEXT: min.u32 %r12, %r3, %r7; -; CHECK-NEXT: min.u32 %r13, %r1, %r5; -; CHECK-NEXT: min.u32 %r14, %r13, %r12; -; CHECK-NEXT: min.u32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_umin_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; +; CHECK-SM80-NEXT: min.u32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: min.u32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: min.u32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.u32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: min.u32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.u32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_umin_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_umin_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_umin_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: min.u32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: min.u32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: min.u32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: min.u32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: min.u32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: min.u32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in) ret i32 %res } @@ -1933,22 +2021,44 @@ define i16 @reduce_smax_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_smax_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_smax_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; -; CHECK-NEXT: max.s32 %r9, %r4, %r8; -; CHECK-NEXT: max.s32 %r10, %r2, %r6; -; CHECK-NEXT: max.s32 %r11, %r10, %r9; -; CHECK-NEXT: max.s32 %r12, %r3, %r7; -; CHECK-NEXT: max.s32 %r13, %r1, %r5; -; CHECK-NEXT: max.s32 %r14, %r13, %r12; -; CHECK-NEXT: max.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_smax_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; +; CHECK-SM80-NEXT: max.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: max.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: max.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: max.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_smax_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_smax_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_smax_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: max.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: max.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: max.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: max.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: max.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: max.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in) ret i32 %res } @@ -2063,22 +2173,44 @@ define i16 @reduce_smin_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_smin_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_smin_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; -; CHECK-NEXT: min.s32 %r9, %r4, %r8; -; CHECK-NEXT: min.s32 %r10, %r2, %r6; -; CHECK-NEXT: min.s32 %r11, %r10, %r9; -; CHECK-NEXT: min.s32 %r12, %r3, %r7; -; CHECK-NEXT: min.s32 %r13, %r1, %r5; -; CHECK-NEXT: min.s32 %r14, %r13, %r12; -; CHECK-NEXT: min.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_smin_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; +; CHECK-SM80-NEXT: min.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: min.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: min.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: min.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_smin_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_smin_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_smin_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: min.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: min.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: min.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: min.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: min.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: min.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in) ret i32 %res } @@ -2152,22 +2284,44 @@ define i16 @reduce_and_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_and_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_and_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; -; CHECK-NEXT: and.b32 %r9, %r4, %r8; -; CHECK-NEXT: and.b32 %r10, %r2, %r6; -; CHECK-NEXT: and.b32 %r11, %r10, %r9; -; CHECK-NEXT: and.b32 %r12, %r3, %r7; -; CHECK-NEXT: and.b32 %r13, %r1, %r5; -; CHECK-NEXT: and.b32 %r14, %r13, %r12; -; CHECK-NEXT: and.b32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_and_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; +; CHECK-SM80-NEXT: and.b32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: and.b32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: and.b32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: and.b32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: and.b32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: and.b32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_and_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_and_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_and_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: and.b32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: and.b32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: and.b32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: and.b32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: and.b32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: and.b32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.and(<8 x i32> %in) ret i32 %res } @@ -2241,22 +2395,44 @@ define i16 @reduce_or_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_or_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_or_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; -; CHECK-NEXT: or.b32 %r9, %r4, %r8; -; CHECK-NEXT: or.b32 %r10, %r2, %r6; -; CHECK-NEXT: or.b32 %r11, %r10, %r9; -; CHECK-NEXT: or.b32 %r12, %r3, %r7; -; CHECK-NEXT: or.b32 %r13, %r1, %r5; -; CHECK-NEXT: or.b32 %r14, %r13, %r12; -; CHECK-NEXT: or.b32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_or_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; +; CHECK-SM80-NEXT: or.b32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: or.b32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: or.b32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: or.b32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: or.b32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: or.b32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_or_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_or_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_or_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: or.b32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: or.b32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: or.b32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: or.b32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: or.b32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: or.b32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.or(<8 x i32> %in) ret i32 %res } @@ -2330,22 +2506,44 @@ define i16 @reduce_xor_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_xor_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_xor_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; -; CHECK-NEXT: xor.b32 %r9, %r4, %r8; -; CHECK-NEXT: xor.b32 %r10, %r2, %r6; -; CHECK-NEXT: xor.b32 %r11, %r10, %r9; -; CHECK-NEXT: xor.b32 %r12, %r3, %r7; -; CHECK-NEXT: xor.b32 %r13, %r1, %r5; -; CHECK-NEXT: xor.b32 %r14, %r13, %r12; -; CHECK-NEXT: xor.b32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_xor_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; +; CHECK-SM80-NEXT: xor.b32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: xor.b32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: xor.b32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: xor.b32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: xor.b32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: xor.b32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_xor_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_xor_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_xor_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: xor.b32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: xor.b32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: xor.b32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: xor.b32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: xor.b32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: xor.b32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in) ret i32 %res } diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-invalid.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-invalid.ll new file mode 100644 index 0000000000000..c0f6f4c7c46bd --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-invalid.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: not llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx88 -o /dev/null 2>&1 | FileCheck %s +; RUN: not llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx88 -o /dev/null 2>&1 | FileCheck %s +; RUN: not llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 -o /dev/null 2>&1 | FileCheck %s + +define void @tcgen05_mma_block_scale_invalid_flags(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { + ; CHECK: immarg value 0 out of range [1, 3) + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 0, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 5) + ; CHECK: immarg value 0 out of range [1, 3) + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 0, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 5) + ; CHECK: immarg value 0 out of range [1, 3) + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 0, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 5) + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 0, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 5) + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll new file mode 100644 index 0000000000000..f6c219107a677 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll @@ -0,0 +1,670 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-sm_101a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} +; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %} + +define void @tcgen05_mma_mxf8f6f4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf8f6f4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf8f6f4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf8f6f4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf8f6f4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf8f6f4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf8f6f4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf8f6f4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf8f6f4_cta1_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf8f6f4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_mxf8f6f4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf8f6f4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf8f6f4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf8f6f4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf8f6f4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf8f6f4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf8f6f4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf8f6f4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf8f6f4_cta2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf8f6f4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf8f6f4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf8f6f4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf8f6f4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf8f6f4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf8f6f4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf8f6f4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf8f6f4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf8f6f4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf8f6f4_cta1_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf8f6f4_cta1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf8f6f4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf8f6f4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf8f6f4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf8f6f4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf8f6f4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf8f6f4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf8f6f4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf8f6f4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf8f6f4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf8f6f4_cta2_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf8f6f4_cta2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf8f6f4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4_cta1_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_mxf4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4_cta2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4_cta1_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4_cta1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_mxf4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4_cta2_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4_cta2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf4nvf4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4nvf4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4nvf4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4nvf4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4nvf4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4nvf4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4nvf4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4nvf4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4nvf4_cta1_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4nvf4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf4nvf4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4nvf4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4nvf4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4nvf4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4nvf4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4nvf4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4nvf4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4nvf4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4nvf4_cta2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4nvf4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4nvf4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4nvf4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4nvf4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4nvf4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4nvf4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4nvf4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4nvf4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4nvf4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4nvf4_cta1_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4nvf4_cta1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4nvf4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4nvf4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4nvf4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4nvf4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4nvf4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4nvf4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4nvf4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4nvf4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4nvf4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4nvf4_cta2_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4nvf4_cta2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4nvf4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block16.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4nvf4.block_scale.block32.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4nvf4.block_scale.block32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll new file mode 100644 index 0000000000000..e071eaaf107fc --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale.ll @@ -0,0 +1,387 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx88 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-sm_101a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} +; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %} + +define void @tcgen05_mma_mxf8f6f4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf8f6f4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf8f6f4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf8f6f4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf8f6f4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf8f6f4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf8f6f4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf8f6f4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf8f6f4_cta1_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf8f6f4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf8f6f4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf8f6f4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf8f6f4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf8f6f4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf8f6f4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf8f6f4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf8f6f4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf8f6f4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf8f6f4_cta2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf8f6f4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf8f6f4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf8f6f4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf8f6f4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf8f6f4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf8f6f4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf8f6f4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf8f6f4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf8f6f4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf8f6f4_cta1_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf8f6f4_cta1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf8f6f4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf8f6f4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf8f6f4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf8f6f4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf8f6f4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf8f6f4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf8f6f4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf8f6f4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf8f6f4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf8f6f4_cta2_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf8f6f4_cta2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf8f6f4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf8f6f4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf8f6f4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4_cta1_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_mxf4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) { +; CHECK-LABEL: tcgen05_mma_mxf4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<6>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_mxf4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_mxf4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_mxf4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_mxf4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_mxf4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_mxf4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_mxf4_cta2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_mxf4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], [%r5], %rd2, %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4_cta1_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4_cta1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::mxf4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_mxf4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_mxf4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_mxf4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_mxf4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_mxf4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_mxf4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_mxf4_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_mxf4_cta2_param_6]; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_mxf4_cta2_param_7]; +; CHECK-NEXT: ld.param.b32 %r5, [tcgen05_mma_sp_mxf4_cta2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ld.param.b32 %r6, [tcgen05_mma_sp_mxf4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::discard [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::lastuse [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::fill [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], %rd1, %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::mxf4.block_scale.collector::a::use [%r1], [%r6], %rd2, [%r5], %r2, [%r3], [%r4], %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.mxf4.block_scale(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.mxf4.block_scale(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b, i32 2, i32 3) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll new file mode 100644 index 0000000000000..f2d6c02b2cd7e --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-disable-output-lane.ll @@ -0,0 +1,855 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} +; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %} + +define void @tcgen05_mma_fp16_shared_disable_output_lane_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_fp16_shared_disable_output_lane_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_fp16_shared_disable_output_lane_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + ret void +} + +define void @tcgen05_mma_fp16_shared_disable_output_lane_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_fp16_shared_disable_output_lane_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<12>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_fp16_shared_disable_output_lane_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.ashift.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.ashift.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 0, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<13>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_7]; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r12, [tcgen05_mma_sp_fp16_shared_disable_output_lane_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.ashift.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.ashift.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 0, i32 3) + + ret void +} + +define void @tcgen05_mma_tf32_shared_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_tf32_shared_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_tf32_shared_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_tf32_shared_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_tf32_shared_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<12>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_tf32_shared_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.ashift.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.ashift.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<13>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r12, [tcgen05_mma_sp_tf32_shared_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.ashift.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.ashift.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 1, i32 3) + + ret void +} + +define void @tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<12>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_f8f6f4_shared_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<13>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r12, [tcgen05_mma_sp_f8f6f4_shared_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.ashift.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.ashift.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 2, i32 3) + + ret void + +} + +define void @tcgen05_mma_i8_shared_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_i8_shared_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_i8_shared_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.ashift.collector::a::lastuse [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + ret void +} + +define void @tcgen05_mma_i8_shared_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_i8_shared_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<12>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_i8_shared_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.ashift.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.ashift.collector::a::lastuse [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_i8_shared_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_i8_shared_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<9>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r8, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.ashift.collector::a::discard [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.ashift.collector::a::lastuse [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], [%r8], %rd2, [%r7], %r2, {%r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <4 x i32> %disable_output_lanev4, i32 3, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_i8_shared_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_i8_shared_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<13>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_8]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ld.param.b32 %r12, [tcgen05_mma_sp_i8_shared_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.ashift.collector::a::discard [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.ashift.collector::a::lastuse [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], [%r12], %rd2, [%r11], %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, <8 x i32> %disable_output_lanev8, i32 3, i32 3) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-invalid.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-invalid.ll new file mode 100644 index 0000000000000..dff829ecf5321 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-invalid.ll @@ -0,0 +1,37 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: not llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx86 -o /dev/null 2>&1 | FileCheck %s +; RUN: not llc < %s -mtriple=nvptx64 -mcpu=sm_101a -mattr=+ptx86 -o /dev/null 2>&1 | FileCheck %s +; RUN: not llc < %s -mtriple=nvptx64 -mcpu=sm_110a -mattr=+ptx90 -o /dev/null 2>&1 | FileCheck %s +target triple = "nvptx64-nvidia-cuda" + +define void @tcgen05_mma_invalid_flag_values(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 5, i32 1, i32 3) + ; CHECK: immarg value 0 out of range [1, 3) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + ; CHECK: immarg value 3 out of range [1, 3) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 3, i32 3) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 5) + ; CHECK: immarg value 2 out of range [0, 2) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 2) + ret void +} + +define void @tcgen05_mma_disable_output_lane_invalid_flag_values(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4) { + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 5, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.tensor.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, i32 0, i32 5) + ret void +} + +define void @tcgen05_mma_ws_invalid_flag_values(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 5, i32 0, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 5, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 5) + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d-invalid.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d-invalid.ll new file mode 100644 index 0000000000000..7c884a70b7530 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d-invalid.ll @@ -0,0 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: not llc < %s -mtriple=nvptx64 -mcpu=sm_100a -mattr=+ptx86 -o /dev/null 2>&1 | FileCheck %s +target triple = "nvptx64-nvidia-cuda" + +define void @tcgen05_mma_scale_d_invalid_flag_values(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { + ; CHECK: immarg value 16 out of range [0, 16) + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 16, i32 0, i32 1, i32 0) + ; CHECK: immarg value 3 out of range [0, 2) + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 3, i32 1, i32 0) + ; CHECK: immarg value 0 out of range [1, 3) + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 0, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 5) + ret void +} + +define void @tcgen05_mma_scale_d_disable_output_lane_invalid_flag_values(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4) { + ; CHECK: immarg value 16 out of range [0, 16) + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 16, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + ; CHECK: immarg value 3 out of range [0, 2) + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 3, i32 0) + ; CHECK: immarg value 5 out of range [0, 4) + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0, i32 5) + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll new file mode 100644 index 0000000000000..ffe88616af10d --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-scale-d.ll @@ -0,0 +1,537 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -mtriple=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -mtriple=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} + +define void @tcgen05_mma_fp16_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_fp16_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_cg1_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_fp16_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_fp16_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_fp16_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_cg2_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_fp16_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 0, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_fp16_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_cg1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_fp16_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_fp16_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_fp16_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_cg2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_fp16_cg2_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_fp16_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 0, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_tf32_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_tf32_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_cg1_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_tf32_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_tf32_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_tf32_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_cg2_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_tf32_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse.ashift [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, i32 1, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_sp_tf32_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_cg1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_tf32_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_tf32_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_tf32_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_cg2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_tf32_cg2_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_tf32_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 0) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 1) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 2) + + call void @llvm.nvvm.tcgen05.mma.sp.shared.scale_d(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 3) + + call void @llvm.nvvm.tcgen05.mma.sp.tensor.scale_d(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i64 0, i32 1, i32 2, i32 3) + + ret void +} + +define void @tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 0, i32 0) + + ret void +} + +define void @tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<12>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_fp16_shared_scale_d_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.ashift.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 0, i32 0) + + ret void +} + +define void @tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<8>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_6]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r7, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.ashift.collector::a::discard [%r1], [%r7], %rd2, %r2, {%r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg1.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <4 x i32> %disable_output_lanev4, i32 1, i32 0) + + ret void +} + +define void @tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, <4 x i32> %disable_output_lanev4, <8 x i32> %disable_output_lanev8) { +; CHECK-LABEL: tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<12>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_4]; +; CHECK-NEXT: ld.param.v4.b32 {%r3, %r4, %r5, %r6}, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_7+16]; +; CHECK-NEXT: ld.param.v4.b32 {%r7, %r8, %r9, %r10}, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_7]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ld.param.b32 %r11, [tcgen05_mma_tf32_shared_scale_d_disable_output_lane_cg2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.ashift.collector::a::discard [%r1], [%r11], %rd2, %r2, {%r7, %r8, %r9, %r10, %r3, %r4, %r5, %r6}, %p1, 0; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + call void @llvm.nvvm.tcgen05.mma.tensor.scale_d.disable_output_lane.cg2.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 0, <8 x i32> %disable_output_lanev8, i32 1, i32 0) + + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll new file mode 100644 index 0000000000000..7e60b3a3ece6e --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-ws.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} +; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %} + +define void @tcgen05_mma_ws_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_fp16_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_fp16_zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask) { +; CHECK-LABEL: tcgen05_mma_ws_fp16_zero_col_mask( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_fp16_zero_col_mask_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_fp16_zero_col_mask_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_fp16_zero_col_mask_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_fp16_zero_col_mask_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_fp16_zero_col_mask_param_4]; +; CHECK-NEXT: ld.param.b64 %rd3, [tcgen05_mma_ws_fp16_zero_col_mask_param_6]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_fp16_zero_col_mask_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f16.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1, %rd3; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_sp_fp16(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta) { +; CHECK-LABEL: tcgen05_mma_ws_sp_fp16( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_sp_fp16_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_sp_fp16_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_sp_fp16_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_sp_fp16_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_sp_fp16_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_sp_fp16_param_6]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_ws_sp_fp16_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_sp_fp16_zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i64 %zero_col_mask, ptr addrspace(6) %spmeta) { +; CHECK-LABEL: tcgen05_mma_ws_sp_fp16_zero_col_mask( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_4]; +; CHECK-NEXT: ld.param.b64 %rd3, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_6]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_7]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_ws_sp_fp16_zero_col_mask_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], %rd1, %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: tcgen05.mma.ws.sp.cta_group::1.kind::f16.collector::b0::use [%r1], [%r4], %rd2, [%r3], %r2, %p1, %rd3; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.shared.zero_col_mask(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.sp.tensor.zero_col_mask(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmeta, i64 %zero_col_mask, i32 0, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_tf32(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_tf32( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_tf32_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_tf32_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_tf32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_tf32_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_tf32_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_tf32_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::tf32.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_f8f6f4(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_f8f6f4( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_f8f6f4_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_f8f6f4_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_f8f6f4_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_f8f6f4_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_f8f6f4_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_f8f6f4_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::f8f6f4.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 0, i32 3) + ret void +} + +define void @tcgen05_mma_ws_i8(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_ws_i8( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_ws_i8_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_ws_i8_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_ws_i8_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_ws_i8_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_ws_i8_param_4]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_ws_i8_param_1]; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.ws.cta_group::1.kind::i8.collector::b0::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 0) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 1) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 2) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + + call void @llvm.nvvm.tcgen05.mma.ws.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 0, i32 3) + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll new file mode 100644 index 0000000000000..711e566df5034 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll @@ -0,0 +1,639 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s +; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s +; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %} +; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %} +; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %} + +define void @tcgen05_mma_fp16_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_fp16_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_cta1_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_fp16_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_fp16_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_fp16_cta1_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_fp16_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_tf32_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_tf32_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_cta1_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_tf32_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_tf32_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_tf32_cta1_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_tf32_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_f8f6f4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_f8f6f4_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_f8f6f4_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_f8f6f4_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_f8f6f4_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_f8f6f4_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_f8f6f4_cta1_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_f8f6f4_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_f8f6fr_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_f8f6fr_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_f8f6fr_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_f8f6fr_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_f8f6fr_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_f8f6fr_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_f8f6fr_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_f8f6fr_cta1_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_f8f6fr_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::f8f6f4.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_i8_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_i8_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_i8_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_i8_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_i8_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_i8_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_i8_cta1_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_i8_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::1.kind::i8.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_sp_i8_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_i8_cta1( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_i8_cta1_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_i8_cta1_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_i8_cta1_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_i8_cta1_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_i8_cta1_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_i8_cta1_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_i8_cta1_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::1.kind::i8.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 1, i32 3) + ret void +} + +define void @tcgen05_mma_fp16_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_fp16_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_fp16_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_fp16_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_fp16_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_fp16_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_fp16_cta2_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_fp16_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f16.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_fp16_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_fp16_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_fp16_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_fp16_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_fp16_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_fp16_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_fp16_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_fp16_cta2_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_fp16_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f16.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 0, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_tf32_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_tf32_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_tf32_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_tf32_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_tf32_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_tf32_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_tf32_cta2_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_tf32_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::tf32.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 1, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_tf32_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_tf32_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_tf32_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_tf32_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_tf32_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_tf32_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_tf32_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_tf32_cta2_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_tf32_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::tf32.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 1, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_f8f6f4_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_f8f6f4_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_f8f6f4_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_f8f6f4_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_f8f6f4_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_f8f6f4_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_f8f6f4_cta2_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_f8f6f4_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 2, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_f8f6fr_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_f8f6fr_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_f8f6fr_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_f8f6fr_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_f8f6fr_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_f8f6fr_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_f8f6fr_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_f8f6fr_cta2_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_f8f6fr_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::f8f6f4.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 2, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_i8_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) { +; CHECK-LABEL: tcgen05_mma_i8_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_i8_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_i8_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_i8_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_i8_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_i8_cta2_param_4]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_i8_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::discard.ashift [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::fill [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, %r2, %p1; +; CHECK-NEXT: tcgen05.mma.cta_group::2.kind::i8.collector::a::use [%r1], [%r3], %rd2, %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, i32 3, i32 2, i32 3) + ret void +} + +define void @tcgen05_mma_sp_i8_cta2(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata) { +; CHECK-LABEL: tcgen05_mma_sp_i8_cta2( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<5>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b8 %rs1, [tcgen05_mma_sp_i8_cta2_param_5]; +; CHECK-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NEXT: ld.param.b32 %r1, [tcgen05_mma_sp_i8_cta2_param_0]; +; CHECK-NEXT: ld.param.b64 %rd1, [tcgen05_mma_sp_i8_cta2_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [tcgen05_mma_sp_i8_cta2_param_3]; +; CHECK-NEXT: ld.param.b32 %r2, [tcgen05_mma_sp_i8_cta2_param_4]; +; CHECK-NEXT: ld.param.b32 %r3, [tcgen05_mma_sp_i8_cta2_param_6]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ld.param.b32 %r4, [tcgen05_mma_sp_i8_cta2_param_1]; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::discard.ashift [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::lastuse [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::fill [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], %rd1, %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: tcgen05.mma.sp.cta_group::2.kind::i8.collector::a::use [%r1], [%r4], %rd2, [%r3], %r2, %p1; +; CHECK-NEXT: ret; + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.tensor.ashift(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 0) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 1) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 2) + call void @llvm.nvvm.tcgen05.mma.sp.shared(ptr addrspace(6) %dtmem, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 3) + call void @llvm.nvvm.tcgen05.mma.sp.tensor(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %spmetadata, i32 3, i32 2, i32 3) + ret void +} diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll index 216d498e85411..5f637e3ecddd3 100644 --- a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll @@ -1,36 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s -; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s ; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P9 +; RUN: --check-prefix=P9 ; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P8 +; RUN: --check-prefix=P8 define dso_local float @testfmax(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -38,23 +25,18 @@ entry: } define dso_local double @testdmax(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -62,23 +44,18 @@ entry: } define dso_local float @testfmin(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -86,23 +63,18 @@ entry: } define dso_local double @testdmin(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -110,86 +82,62 @@ entry: } define dso_local float @testfmax_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmax_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } define dso_local float @testfmin_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmin_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll index aaabd76e163bb..fd0b494d57677 100644 --- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll +++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll @@ -20,18 +20,18 @@ define float @select_oeq_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_oeq_float: ; FAST-P8: # %bb.0: # %entry -; FAST-P8-NEXT: xssubsp f0, f2, f1 -; FAST-P8-NEXT: xssubsp f1, f1, f2 -; FAST-P8-NEXT: fsel f1, f1, f3, f4 -; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f3, f4 +; FAST-P8-NEXT: fsel f1, f1, f0, f4 ; FAST-P8-NEXT: blr ; ; FAST-P9-LABEL: select_oeq_float: ; FAST-P9: # %bb.0: # %entry -; FAST-P9-NEXT: xssubsp f0, f2, f1 -; FAST-P9-NEXT: xssubsp f1, f1, f2 -; FAST-P9-NEXT: fsel f1, f1, f3, f4 -; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f3, f4 +; FAST-P9-NEXT: fsel f1, f1, f0, f4 ; FAST-P9-NEXT: blr ; ; NO-FAST-P8-LABEL: select_oeq_float: @@ -59,6 +59,48 @@ entry: ret float %cond } +define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_oeq_float_nsz: +; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f2, f1 +; FAST-P8-NEXT: xssubsp f1, f1, f2 +; FAST-P8-NEXT: fsel f1, f1, f3, f4 +; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_oeq_float_nsz: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f2, f1 +; FAST-P9-NEXT: xssubsp f1, f1, f2 +; FAST-P9-NEXT: fsel f1, f1, f3, f4 +; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_oeq_float_nsz: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_oeq_float_nsz: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp nsz oeq float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; FAST-P8-LABEL: select_oeq_double: ; FAST-P8: # %bb.0: # %entry @@ -79,20 +121,20 @@ define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_oeq_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: .LBB2_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_oeq_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: .LBB2_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -182,13 +224,57 @@ entry: define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_one_float: ; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f4, f3 +; FAST-P8-NEXT: fsel f1, f1, f0, f3 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_one_float: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f4, f3 +; FAST-P9-NEXT: fsel f1, f1, f0, f3 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp one float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + +define float @select_one_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_one_float_nsz: +; FAST-P8: # %bb.0: # %entry ; FAST-P8-NEXT: xssubsp f0, f2, f1 ; FAST-P8-NEXT: xssubsp f1, f1, f2 ; FAST-P8-NEXT: fsel f1, f1, f4, f3 ; FAST-P8-NEXT: fsel f1, f0, f1, f3 ; FAST-P8-NEXT: blr ; -; FAST-P9-LABEL: select_one_float: +; FAST-P9-LABEL: select_one_float_nsz: ; FAST-P9: # %bb.0: # %entry ; FAST-P9-NEXT: xssubsp f0, f2, f1 ; FAST-P9-NEXT: xssubsp f1, f1, f2 @@ -196,29 +282,29 @@ define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P9-NEXT: fsel f1, f0, f1, f3 ; FAST-P9-NEXT: blr ; -; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8-LABEL: select_one_float_nsz: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB4_2: # %entry +; NO-FAST-P8-NEXT: .LBB6_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; -; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9-LABEL: select_one_float_nsz: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB4_2: # %entry +; NO-FAST-P9-NEXT: .LBB6_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: - %cmp = fcmp one float %a, %b + %cmp = fcmp nsz one float %a, %b %cond = select i1 %cmp, float %c, float %d ret float %cond } @@ -244,10 +330,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: .LBB7_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -255,10 +341,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: .LBB7_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -362,10 +448,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB8_2: # %entry +; NO-FAST-P8-NEXT: .LBB10_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -373,10 +459,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB8_2: # %entry +; NO-FAST-P9-NEXT: .LBB10_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -402,10 +488,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB9_2: # %entry +; NO-FAST-P8-NEXT: .LBB11_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -413,10 +499,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB9_2: # %entry +; NO-FAST-P9-NEXT: .LBB11_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -503,20 +589,20 @@ define float @select_olt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_olt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB12_2: # %entry +; NO-FAST-P8-NEXT: .LBB14_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB12_2: # %entry +; NO-FAST-P9-NEXT: .LBB14_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -541,20 +627,20 @@ define double @select_olt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_olt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB13_2: # %entry +; NO-FAST-P8-NEXT: .LBB15_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB13_2: # %entry +; NO-FAST-P9-NEXT: .LBB15_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -641,20 +727,20 @@ define float @select_ogt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_ogt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB16_2: # %entry +; NO-FAST-P8-NEXT: .LBB18_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB16_2: # %entry +; NO-FAST-P9-NEXT: .LBB18_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -679,20 +765,20 @@ define double @select_ogt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_ogt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB17_2: # %entry +; NO-FAST-P8-NEXT: .LBB19_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB17_2: # %entry +; NO-FAST-P9-NEXT: .LBB19_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -780,10 +866,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB20_2: # %entry +; NO-FAST-P8-NEXT: .LBB22_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -791,10 +877,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB20_2: # %entry +; NO-FAST-P9-NEXT: .LBB22_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -820,10 +906,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB21_2: # %entry +; NO-FAST-P8-NEXT: .LBB23_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -831,10 +917,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB21_2: # %entry +; NO-FAST-P9-NEXT: .LBB23_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -926,13 +1012,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P8-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P8-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.2: # %entry ; NO-FAST-P8-NEXT: fmr f3, f2 -; NO-FAST-P8-NEXT: .LBB24_3: # %entry +; NO-FAST-P8-NEXT: .LBB26_3: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -941,13 +1027,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P9-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P9-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.2: # %entry ; NO-FAST-P9-NEXT: fmr f3, f2 -; NO-FAST-P9-NEXT: .LBB24_3: # %entry +; NO-FAST-P9-NEXT: .LBB26_3: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -978,10 +1064,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB25_2: # %entry +; NO-FAST-P8-NEXT: .LBB27_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -990,10 +1076,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB25_2: # %entry +; NO-FAST-P9-NEXT: .LBB27_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: @@ -1028,10 +1114,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB26_2: # %entry +; NO-FAST-P8-NEXT: .LBB28_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -1040,10 +1126,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB26_2: # %entry +; NO-FAST-P9-NEXT: .LBB28_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll index a2ad2946cc8ec..98314a02c23fe 100644 --- a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll @@ -897,31 +897,31 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) { ; P8LE-NEXT: mfvsrd r6, v2 ; P8LE-NEXT: mfvsrd r8, v3 ; P8LE-NEXT: ori r3, r3, 51289 +; P8LE-NEXT: mffprd r4, f0 ; P8LE-NEXT: ori r5, r5, 42889 -; P8LE-NEXT: rldic r4, r3, 36, 1 -; P8LE-NEXT: mffprd r3, f0 +; P8LE-NEXT: rldic r3, r3, 36, 1 ; P8LE-NEXT: rldic r5, r5, 35, 1 ; P8LE-NEXT: rldicl r7, r6, 63, 1 -; P8LE-NEXT: oris r4, r4, 45590 +; P8LE-NEXT: oris r3, r3, 45590 ; P8LE-NEXT: oris r5, r5, 1603 -; P8LE-NEXT: ori r4, r4, 17097 +; P8LE-NEXT: ori r3, r3, 17097 ; P8LE-NEXT: ori r5, r5, 21445 -; P8LE-NEXT: mulhdu r4, r3, r4 +; P8LE-NEXT: mulhdu r3, r4, r3 ; P8LE-NEXT: mulhdu r5, r7, r5 -; P8LE-NEXT: sub r7, r3, r4 +; P8LE-NEXT: sub r7, r4, r3 ; P8LE-NEXT: rldicl r5, r5, 57, 7 ; P8LE-NEXT: rldicl r7, r7, 63, 1 ; P8LE-NEXT: mulli r5, r5, 654 -; P8LE-NEXT: add r4, r7, r4 +; P8LE-NEXT: add r3, r7, r3 ; P8LE-NEXT: lis r7, -16037 ; P8LE-NEXT: ori r7, r7, 28749 -; P8LE-NEXT: rldicl r4, r4, 60, 4 +; P8LE-NEXT: rldicl r3, r3, 60, 4 ; P8LE-NEXT: sub r5, r6, r5 ; P8LE-NEXT: rldic r7, r7, 32, 0 -; P8LE-NEXT: mulli r4, r4, 23 +; P8LE-NEXT: mulli r3, r3, 23 ; P8LE-NEXT: oris r7, r7, 52170 ; P8LE-NEXT: ori r7, r7, 12109 -; P8LE-NEXT: sub r3, r3, r4 +; P8LE-NEXT: sub r3, r4, r3 ; P8LE-NEXT: mulhdu r7, r8, r7 ; P8LE-NEXT: mtfprd f1, r3 ; P8LE-NEXT: li r3, 0 diff --git a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll index 435b0ab3fea6c..816b12e2d8e5b 100644 --- a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll +++ b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll @@ -35,12 +35,12 @@ define i64 @test2elt(<2 x i64> %a) local_unnamed_addr #0 { ; ; CHECK-BE-LABEL: test2elt: ; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xscvuxdsp f0, v2 +; CHECK-BE-NEXT: xscvdpspn v3, f0 ; CHECK-BE-NEXT: xxswapd vs0, v2 -; CHECK-BE-NEXT: xscvuxdsp f1, v2 ; CHECK-BE-NEXT: xscvuxdsp f0, f0 -; CHECK-BE-NEXT: xscvdpspn v2, f1 -; CHECK-BE-NEXT: xscvdpspn v3, f0 -; CHECK-BE-NEXT: vmrgow v2, v2, v3 +; CHECK-BE-NEXT: xscvdpspn v2, f0 +; CHECK-BE-NEXT: vmrgow v2, v3, v2 ; CHECK-BE-NEXT: mfvsrd r3, v2 ; CHECK-BE-NEXT: blr entry: @@ -327,12 +327,12 @@ define i64 @test2elt_signed(<2 x i64> %a) local_unnamed_addr #0 { ; ; CHECK-BE-LABEL: test2elt_signed: ; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xscvsxdsp f0, v2 +; CHECK-BE-NEXT: xscvdpspn v3, f0 ; CHECK-BE-NEXT: xxswapd vs0, v2 -; CHECK-BE-NEXT: xscvsxdsp f1, v2 ; CHECK-BE-NEXT: xscvsxdsp f0, f0 -; CHECK-BE-NEXT: xscvdpspn v2, f1 -; CHECK-BE-NEXT: xscvdpspn v3, f0 -; CHECK-BE-NEXT: vmrgow v2, v2, v3 +; CHECK-BE-NEXT: xscvdpspn v2, f0 +; CHECK-BE-NEXT: vmrgow v2, v3, v2 ; CHECK-BE-NEXT: mfvsrd r3, v2 ; CHECK-BE-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/vector-all-ones.ll b/llvm/test/CodeGen/PowerPC/vector-all-ones.ll new file mode 100644 index 0000000000000..e4c93adcf50a6 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vector-all-ones.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; Currently the generated code uses `vspltisw` to generate vector of 1s followed by add operation. +; This pattern is expected to be optimized in a future patch by using `xxleqv` to generate vector of -1s +; followed by subtraction operation. +define dso_local noundef <4 x i32> @test1(<4 x i32> %a) { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vspltisw v3, 1 +; CHECK-NEXT: vadduwm v2, v2, v3 +; CHECK-NEXT: blr +entry: + %add = add <4 x i32> %a, splat (i32 1) + ret <4 x i32> %add +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll index 9a1ed8f115b35..1d5d918422b28 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll @@ -37,7 +37,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind { ; ; RV32IA-LABEL: atomic_load_i8_unordered: ; RV32IA: # %bb.0: -; RV32IA-NEXT: lb a0, 0(a0) +; RV32IA-NEXT: lbu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_unordered: @@ -52,7 +52,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind { ; ; RV64IA-LABEL: atomic_load_i8_unordered: ; RV64IA: # %bb.0: -; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: lbu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a unordered, align 1 ret i8 %1 @@ -71,7 +71,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { ; ; RV32IA-LABEL: atomic_load_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: lb a0, 0(a0) +; RV32IA-NEXT: lbu a0, 0(a0) ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_monotonic: @@ -86,7 +86,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { ; ; RV64IA-LABEL: atomic_load_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: lb a0, 0(a0) +; RV64IA-NEXT: lbu a0, 0(a0) ; RV64IA-NEXT: ret %1 = load atomic i8, ptr %a monotonic, align 1 ret i8 %1 @@ -105,13 +105,13 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind { ; ; RV32IA-WMO-LABEL: atomic_load_i8_acquire: ; RV32IA-WMO: # %bb.0: -; RV32IA-WMO-NEXT: lb a0, 0(a0) +; RV32IA-WMO-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i8_acquire: ; RV32IA-TSO: # %bb.0: -; RV32IA-TSO-NEXT: lb a0, 0(a0) +; RV32IA-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_acquire: @@ -126,35 +126,35 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind { ; ; RV64IA-WMO-LABEL: atomic_load_i8_acquire: ; RV64IA-WMO: # %bb.0: -; RV64IA-WMO-NEXT: lb a0, 0(a0) +; RV64IA-WMO-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i8_acquire: ; RV64IA-TSO: # %bb.0: -; RV64IA-TSO-NEXT: lb a0, 0(a0) +; RV64IA-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: -; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: -; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: -; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: -; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret %1 = load atomic i8, ptr %a acquire, align 1 ret i8 %1 @@ -174,14 +174,14 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { ; RV32IA-WMO-LABEL: atomic_load_i8_seq_cst: ; RV32IA-WMO: # %bb.0: ; RV32IA-WMO-NEXT: fence rw, rw -; RV32IA-WMO-NEXT: lb a0, 0(a0) +; RV32IA-WMO-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-NEXT: fence r, rw ; RV32IA-WMO-NEXT: ret ; ; RV32IA-TSO-LABEL: atomic_load_i8_seq_cst: ; RV32IA-TSO: # %bb.0: ; RV32IA-TSO-NEXT: fence rw, rw -; RV32IA-TSO-NEXT: lb a0, 0(a0) +; RV32IA-TSO-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-NEXT: ret ; ; RV64I-LABEL: atomic_load_i8_seq_cst: @@ -197,40 +197,40 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { ; RV64IA-WMO-LABEL: atomic_load_i8_seq_cst: ; RV64IA-WMO: # %bb.0: ; RV64IA-WMO-NEXT: fence rw, rw -; RV64IA-WMO-NEXT: lb a0, 0(a0) +; RV64IA-WMO-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-NEXT: fence r, rw ; RV64IA-WMO-NEXT: ret ; ; RV64IA-TSO-LABEL: atomic_load_i8_seq_cst: ; RV64IA-TSO: # %bb.0: ; RV64IA-TSO-NEXT: fence rw, rw -; RV64IA-TSO-NEXT: lb a0, 0(a0) +; RV64IA-TSO-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-NEXT: ret ; ; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV32IA-WMO-TRAILING-FENCE: # %bb.0: ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw -; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV32IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV32IA-TSO-TRAILING-FENCE: # %bb.0: ; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw -; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV32IA-TSO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV64IA-WMO-TRAILING-FENCE: # %bb.0: ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw -; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw ; RV64IA-WMO-TRAILING-FENCE-NEXT: ret ; ; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst: ; RV64IA-TSO-TRAILING-FENCE: # %bb.0: ; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw -; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0) +; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0) ; RV64IA-TSO-TRAILING-FENCE-NEXT: ret %1 = load atomic i8, ptr %a seq_cst, align 1 ret i8 %1 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll index 7f387a763b6da..23f660bb026a7 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll @@ -10,7 +10,7 @@ ; RUN: | FileCheck -check-prefix=RV64IF %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d \ ; RUN: -target-abi=lp64d \ -; RUN: | FileCheck -check-prefix=RV64IF %s +; RUN: | FileCheck -check-prefix=RV64IFD %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \ @@ -27,6 +27,11 @@ define float @sqrt_f32(float %a) nounwind { ; RV64IF-NEXT: fsqrt.s fa0, fa0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: sqrt_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsqrt.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sqrt_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -68,6 +73,16 @@ define float @powi_f32(float %a, i32 %b) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: powi_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: sext.w a0, a0 +; RV64IFD-NEXT: call __powisf2 +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: powi_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -109,6 +124,15 @@ define float @sin_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: sin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call sinf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -149,6 +173,15 @@ define float @cos_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: cos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call cosf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: cos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -190,6 +223,42 @@ define float @sincos_f32(float %a) nounwind { ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; +; RV64IF-LABEL: sincos_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fmv.s fs0, fa0 +; RV64IF-NEXT: call sinf +; RV64IF-NEXT: fmv.s fs1, fa0 +; RV64IF-NEXT: fmv.s fa0, fs0 +; RV64IF-NEXT: call cosf +; RV64IF-NEXT: fadd.s fa0, fs1, fa0 +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload +; RV64IF-NEXT: flw fs1, 0(sp) # 4-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV64IFD-LABEL: sincos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fmv.s fs0, fa0 +; RV64IFD-NEXT: call sinf +; RV64IFD-NEXT: fmv.s fs1, fa0 +; RV64IFD-NEXT: fmv.s fa0, fs0 +; RV64IFD-NEXT: call cosf +; RV64IFD-NEXT: fadd.s fa0, fs1, fa0 +; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sincos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -254,6 +323,15 @@ define float @pow_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: pow_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call powf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: pow_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -294,6 +372,15 @@ define float @exp_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: exp_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call expf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: exp_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -334,6 +421,15 @@ define float @exp2_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: exp2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call exp2f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: exp2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -374,6 +470,15 @@ define float @exp10_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: exp10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call exp10f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: exp10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -414,6 +519,15 @@ define float @log_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: log_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call logf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: log_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -454,6 +568,15 @@ define float @log10_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: log10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call log10f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: log10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -494,6 +617,15 @@ define float @log2_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: log2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call log2f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: log2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -526,6 +658,11 @@ define float @fma_f32(float %a, float %b, float %c) nounwind { ; RV64IF-NEXT: fmadd.s fa0, fa0, fa1, fa2 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fma_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fma_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -558,6 +695,11 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; RV64IF-NEXT: fmadd.s fa0, fa0, fa1, fa2 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fmuladd_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fmuladd_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -600,6 +742,11 @@ define float @fabs_f32(float %a) nounwind { ; RV64IF-NEXT: fabs.s fa0, fa0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fabs_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fabs.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fabs_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -626,6 +773,11 @@ define float @minnum_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: fmin.s fa0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: minnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmin.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: minnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -658,6 +810,11 @@ define float @maxnum_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: fmax.s fa0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: maxnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmax.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: maxnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -690,6 +847,11 @@ define float @copysign_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: fsgnj.s fa0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: copysign_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsgnj.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: copysign_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 524288 @@ -730,6 +892,15 @@ define float @ceil_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: ceil_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call ceilf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ceil_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -770,6 +941,15 @@ define float @trunc_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: trunc_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call truncf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: trunc_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -810,6 +990,15 @@ define float @rint_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: rint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call rintf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: rint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -850,6 +1039,15 @@ define float @nearbyint_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: nearbyint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call nearbyintf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: nearbyint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -890,6 +1088,15 @@ define float @round_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: round_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call roundf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: round_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -930,6 +1137,15 @@ define float @roundeven_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: roundeven_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call roundevenf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: roundeven_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -966,6 +1182,13 @@ define i1 @fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 927 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1039,6 +1262,13 @@ define i1 @isnan_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 768 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1073,6 +1303,13 @@ define i1 @isqnan_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isqnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 512 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isqnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1109,6 +1346,13 @@ define i1 @issnan_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: issnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 256 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: issnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1149,6 +1393,13 @@ define i1 @isinf_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 129 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1185,6 +1436,13 @@ define i1 @isposinf_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isposinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 128 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1218,6 +1476,13 @@ define i1 @isneginf_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isneginf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 1 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isneginf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 1046528 @@ -1251,6 +1516,13 @@ define i1 @isfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 126 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1285,6 +1557,13 @@ define i1 @isposfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isposfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 112 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1316,6 +1595,13 @@ define i1 @isnegfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isnegfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 14 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnegfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1357,6 +1643,13 @@ define i1 @isnotfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isnotfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 897 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnotfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1401,6 +1694,15 @@ define float @tan_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: tan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call tanf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: tan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1442,6 +1744,16 @@ define float @ldexp_float(float %x, i32 %y) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: ldexp_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: sext.w a0, a0 +; RV64IFD-NEXT: call ldexpf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ldexp_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1483,6 +1795,15 @@ define float @asin_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: asin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call asinf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: asin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1523,6 +1844,15 @@ define float @acos_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: acos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call acosf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: acos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1563,6 +1893,15 @@ define float @atan_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: atan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call atanf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: atan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1603,6 +1942,15 @@ define float @atan2_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: atan2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call atan2f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: atan2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1643,6 +1991,15 @@ define float @sinh_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: sinh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call sinhf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sinh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1683,6 +2040,15 @@ define float @cosh_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: cosh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call coshf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: cosh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1723,6 +2089,15 @@ define float @tanh_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: tanh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call tanhf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: tanh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fpr-gpr-copy-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fpr-gpr-copy-rv64.ll index 287bbbad6d52d..2a2abbdf9fa35 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/fpr-gpr-copy-rv64.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/fpr-gpr-copy-rv64.ll @@ -30,6 +30,13 @@ define float @fadd_f32(float %x, float %y) { ; RV32I-NEXT: fadd.d fa5, fa5, fa4 ; RV32I-NEXT: fmv.x.d a0, fa5 ; RV32I-NEXT: ret +; RV64I-LABEL: fadd_f32: +; RV64I: # %bb.0: +; RV64I-NEXT: fmv.w.x fa5, a0 +; RV64I-NEXT: fmv.w.x fa4, a1 +; RV64I-NEXT: fadd.s fa5, fa5, fa4 +; RV64I-NEXT: fmv.x.w a0, fa5 +; RV64I-NEXT: ret %a = fadd float %x, %y ret float %a } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir index 86084ae531cdb..f1d17f9f02b90 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir @@ -505,6 +505,9 @@ # DEBUG-NEXT: G_FREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK +# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices +# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. the first uncovered type index: 1, OK @@ -607,11 +610,11 @@ # DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_FMINIMUMNUM (opcode {{[0-9]+}}): 1 type index, 0 imm indices -# DEBUG-NEXT: .. opcode 219 is aliased to 183 +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FMAXIMUMNUM (opcode {{[0-9]+}}): 1 type index, 0 imm indices -# DEBUG-NEXT: .. opcode 220 is aliased to 183 +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_GET_FPENV (opcode {{[0-9]+}}): 1 type index, 0 imm indices @@ -635,6 +638,9 @@ # DEBUG-NEXT: G_GET_ROUNDING (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: G_SET_ROUNDING (opcode {{[0-9]+}}): 1 type index, 0 imm indices +# DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined +# DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined # DEBUG-NEXT: G_PTR_ADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT: .. the first uncovered type index: 2, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK diff --git a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll index 55b0d1f0bf7be..2a46a59e90535 100644 --- a/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll +++ b/llvm/test/CodeGen/RISCV/and-negpow2-cmp.ll @@ -155,3 +155,109 @@ define i1 @test9(i64 %x) { %b = icmp eq i64 %a, u0x08000000 ret i1 %b } + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15. +define i64 @test10(i64 %0) #0 { +; RV32-LABEL: test10: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: andi a0, a0, -16 +; RV32-NEXT: snez a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test10: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sraiw a0, a0, 4 +; RV64-NEXT: snez a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xffffffff + %2 = and i64 %1, u0xfffffff0 + %3 = icmp ne i64 %2, 0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant, we'll have addi -16 and addi 15. +define i64 @test11(i64 %0) #0 { +; RV32-LABEL: test11: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: srai a0, a0, 4 +; RV32-NEXT: addi a0, a0, 1621 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test11: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: sraiw a0, a0, 4 +; RV64-NEXT: addi a0, a0, 1621 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xffffffff + %2 = and i64 %1, u0xfffffff0 + %3 = icmp eq i64 %2, u0xffff9ab0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. If it's an opaque constant we'll end up with constant +; materialization sequences on RV64. +define i64 @test12(i64 %0) #0 { +; RV32-LABEL: test12: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi a0, a0, -3 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test12: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addiw a0, a0, -16 +; RV64-NEXT: addi a0, a0, 13 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0xfffffff0 + %2 = and i64 %1, u0xffffffff + %3 = icmp eq i64 %2, u0xfffffff3 + %4 = zext i1 %3 to i64 + ret i64 %4 +} + +; Make sure the and constant doesn't get converted to an opaque constant by +; ConstantHoisting. +define i64 @test13(i64 %0) #0 { +; RV32-LABEL: test13: +; RV32: # %bb.0: # %entry +; RV32-NEXT: lui a1, 524288 +; RV32-NEXT: addi a1, a1, 15 +; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: test13: +; RV64: # %bb.0: # %entry +; RV64-NEXT: lui a1, 524288 +; RV64-NEXT: addi a1, a1, -15 +; RV64-NEXT: sub a0, a0, a1 +; RV64-NEXT: sraiw a0, a0, 31 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: ret +entry: + %1 = add nuw nsw i64 %0, u0x8000000f + %2 = and i64 %1, u0x80000000 + %3 = icmp eq i64 %2, 0 + %4 = zext i1 %3 to i64 + ret i64 %4 +} diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll index 6207a17734d62..73ff888e44b3b 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll @@ -51,13 +51,14 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_si_bf16_sat: ; CHECK32ZFBFMIN: # %bb.0: # %start ; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK32ZFBFMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK32ZFBFMIN-NEXT: feq.s a1, fa5, fa5 -; CHECK32ZFBFMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; CHECK32ZFBFMIN-NEXT: lui a0, 815104 -; CHECK32ZFBFMIN-NEXT: fmv.w.x fa3, a0 -; CHECK32ZFBFMIN-NEXT: fmax.s fa5, fa5, fa3 -; CHECK32ZFBFMIN-NEXT: neg a0, a1 +; CHECK32ZFBFMIN-NEXT: lui a1, 290816 +; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, a0 +; CHECK32ZFBFMIN-NEXT: feq.s a0, fa5, fa5 +; CHECK32ZFBFMIN-NEXT: addi a1, a1, -512 +; CHECK32ZFBFMIN-NEXT: neg a0, a0 +; CHECK32ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, a1 ; CHECK32ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK32ZFBFMIN-NEXT: fcvt.w.s a1, fa5, rtz ; CHECK32ZFBFMIN-NEXT: and a0, a0, a1 @@ -68,12 +69,13 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind { ; RV32ID-NEXT: fmv.x.w a0, fa0 ; RV32ID-NEXT: lui a1, 815104 ; RV32ID-NEXT: fmv.w.x fa5, a1 -; RV32ID-NEXT: lui a1, %hi(.LCPI1_0) +; RV32ID-NEXT: lui a1, 290816 ; RV32ID-NEXT: slli a0, a0, 16 -; RV32ID-NEXT: flw fa4, %lo(.LCPI1_0)(a1) -; RV32ID-NEXT: fmv.w.x fa3, a0 -; RV32ID-NEXT: feq.s a0, fa3, fa3 -; RV32ID-NEXT: fmax.s fa5, fa3, fa5 +; RV32ID-NEXT: addi a1, a1, -512 +; RV32ID-NEXT: fmv.w.x fa4, a0 +; RV32ID-NEXT: feq.s a0, fa4, fa4 +; RV32ID-NEXT: fmax.s fa5, fa4, fa5 +; RV32ID-NEXT: fmv.w.x fa4, a1 ; RV32ID-NEXT: neg a0, a0 ; RV32ID-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-NEXT: fcvt.w.s a1, fa5, rtz @@ -83,13 +85,14 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind { ; CHECK64ZFBFMIN-LABEL: fcvt_si_bf16_sat: ; CHECK64ZFBFMIN: # %bb.0: # %start ; CHECK64ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK64ZFBFMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK64ZFBFMIN-NEXT: feq.s a1, fa5, fa5 -; CHECK64ZFBFMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; CHECK64ZFBFMIN-NEXT: lui a0, 815104 -; CHECK64ZFBFMIN-NEXT: fmv.w.x fa3, a0 -; CHECK64ZFBFMIN-NEXT: fmax.s fa5, fa5, fa3 -; CHECK64ZFBFMIN-NEXT: neg a0, a1 +; CHECK64ZFBFMIN-NEXT: lui a1, 290816 +; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, a0 +; CHECK64ZFBFMIN-NEXT: feq.s a0, fa5, fa5 +; CHECK64ZFBFMIN-NEXT: addi a1, a1, -512 +; CHECK64ZFBFMIN-NEXT: neg a0, a0 +; CHECK64ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, a1 ; CHECK64ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64ZFBFMIN-NEXT: fcvt.l.s a1, fa5, rtz ; CHECK64ZFBFMIN-NEXT: and a0, a0, a1 @@ -100,12 +103,13 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind { ; RV64ID-NEXT: fmv.x.w a0, fa0 ; RV64ID-NEXT: lui a1, 815104 ; RV64ID-NEXT: fmv.w.x fa5, a1 -; RV64ID-NEXT: lui a1, %hi(.LCPI1_0) +; RV64ID-NEXT: lui a1, 290816 ; RV64ID-NEXT: slli a0, a0, 16 -; RV64ID-NEXT: flw fa4, %lo(.LCPI1_0)(a1) -; RV64ID-NEXT: fmv.w.x fa3, a0 -; RV64ID-NEXT: feq.s a0, fa3, fa3 -; RV64ID-NEXT: fmax.s fa5, fa3, fa5 +; RV64ID-NEXT: addi a1, a1, -512 +; RV64ID-NEXT: fmv.w.x fa4, a0 +; RV64ID-NEXT: feq.s a0, fa4, fa4 +; RV64ID-NEXT: fmax.s fa5, fa4, fa5 +; RV64ID-NEXT: fmv.w.x fa4, a1 ; RV64ID-NEXT: neg a0, a0 ; RV64ID-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-NEXT: fcvt.l.s a1, fa5, rtz @@ -152,49 +156,53 @@ define i16 @fcvt_ui_bf16(bfloat %a) nounwind { define i16 @fcvt_ui_bf16_sat(bfloat %a) nounwind { ; CHECK32ZFBFMIN-LABEL: fcvt_ui_bf16_sat: ; CHECK32ZFBFMIN: # %bb.0: # %start -; CHECK32ZFBFMIN-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK32ZFBFMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa4, fa0 -; CHECK32ZFBFMIN-NEXT: fmv.w.x fa3, zero -; CHECK32ZFBFMIN-NEXT: fmax.s fa4, fa4, fa3 -; CHECK32ZFBFMIN-NEXT: fmin.s fa5, fa4, fa5 +; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, zero +; CHECK32ZFBFMIN-NEXT: lui a0, 292864 +; CHECK32ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK32ZFBFMIN-NEXT: addi a0, a0, -256 +; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, a0 +; CHECK32ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK32ZFBFMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECK32ZFBFMIN-NEXT: ret ; ; RV32ID-LABEL: fcvt_ui_bf16_sat: ; RV32ID: # %bb.0: # %start -; RV32ID-NEXT: lui a0, %hi(.LCPI3_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0) ; RV32ID-NEXT: fmv.x.w a0, fa0 +; RV32ID-NEXT: fmv.w.x fa5, zero ; RV32ID-NEXT: slli a0, a0, 16 ; RV32ID-NEXT: fmv.w.x fa4, a0 -; RV32ID-NEXT: fmv.w.x fa3, zero -; RV32ID-NEXT: fmax.s fa4, fa4, fa3 -; RV32ID-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-NEXT: lui a0, 292864 +; RV32ID-NEXT: addi a0, a0, -256 +; RV32ID-NEXT: fmax.s fa5, fa4, fa5 +; RV32ID-NEXT: fmv.w.x fa4, a0 +; RV32ID-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32ID-NEXT: ret ; ; CHECK64ZFBFMIN-LABEL: fcvt_ui_bf16_sat: ; CHECK64ZFBFMIN: # %bb.0: # %start -; CHECK64ZFBFMIN-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK64ZFBFMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; CHECK64ZFBFMIN-NEXT: fcvt.s.bf16 fa4, fa0 -; CHECK64ZFBFMIN-NEXT: fmv.w.x fa3, zero -; CHECK64ZFBFMIN-NEXT: fmax.s fa4, fa4, fa3 -; CHECK64ZFBFMIN-NEXT: fmin.s fa5, fa4, fa5 +; CHECK64ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, zero +; CHECK64ZFBFMIN-NEXT: lui a0, 292864 +; CHECK64ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK64ZFBFMIN-NEXT: addi a0, a0, -256 +; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, a0 +; CHECK64ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64ZFBFMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64ZFBFMIN-NEXT: ret ; ; RV64ID-LABEL: fcvt_ui_bf16_sat: ; RV64ID: # %bb.0: # %start -; RV64ID-NEXT: lui a0, %hi(.LCPI3_0) -; RV64ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0) ; RV64ID-NEXT: fmv.x.w a0, fa0 +; RV64ID-NEXT: fmv.w.x fa5, zero ; RV64ID-NEXT: slli a0, a0, 16 ; RV64ID-NEXT: fmv.w.x fa4, a0 -; RV64ID-NEXT: fmv.w.x fa3, zero -; RV64ID-NEXT: fmax.s fa4, fa4, fa3 -; RV64ID-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-NEXT: lui a0, 292864 +; RV64ID-NEXT: addi a0, a0, -256 +; RV64ID-NEXT: fmax.s fa5, fa4, fa5 +; RV64ID-NEXT: fmv.w.x fa4, a0 +; RV64ID-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64ID-NEXT: ret start: @@ -472,20 +480,21 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind { ; RV32IZFBFMIN-NEXT: # %bb.1: # %start ; RV32IZFBFMIN-NEXT: mv a2, a1 ; RV32IZFBFMIN-NEXT: .LBB10_2: # %start -; RV32IZFBFMIN-NEXT: lui a1, %hi(.LCPI10_0) -; RV32IZFBFMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32IZFBFMIN-NEXT: lui a1, 389120 +; RV32IZFBFMIN-NEXT: addi a1, a1, -1 +; RV32IZFBFMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFBFMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFBFMIN-NEXT: beqz a1, .LBB10_4 ; RV32IZFBFMIN-NEXT: # %bb.3: ; RV32IZFBFMIN-NEXT: addi a2, a3, -1 ; RV32IZFBFMIN-NEXT: .LBB10_4: # %start ; RV32IZFBFMIN-NEXT: feq.s a3, fs0, fs0 -; RV32IZFBFMIN-NEXT: neg a4, a1 -; RV32IZFBFMIN-NEXT: neg a1, s0 +; RV32IZFBFMIN-NEXT: neg a4, s0 +; RV32IZFBFMIN-NEXT: neg a5, a1 ; RV32IZFBFMIN-NEXT: neg a3, a3 -; RV32IZFBFMIN-NEXT: and a0, a1, a0 +; RV32IZFBFMIN-NEXT: and a0, a4, a0 ; RV32IZFBFMIN-NEXT: and a1, a3, a2 -; RV32IZFBFMIN-NEXT: or a0, a4, a0 +; RV32IZFBFMIN-NEXT: or a0, a5, a0 ; RV32IZFBFMIN-NEXT: and a0, a3, a0 ; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -511,20 +520,21 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind { ; R32IDZFBFMIN-NEXT: # %bb.1: # %start ; R32IDZFBFMIN-NEXT: mv a2, a1 ; R32IDZFBFMIN-NEXT: .LBB10_2: # %start -; R32IDZFBFMIN-NEXT: lui a1, %hi(.LCPI10_0) -; R32IDZFBFMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; R32IDZFBFMIN-NEXT: lui a1, 389120 +; R32IDZFBFMIN-NEXT: addi a1, a1, -1 +; R32IDZFBFMIN-NEXT: fmv.w.x fa5, a1 ; R32IDZFBFMIN-NEXT: flt.s a1, fa5, fs0 ; R32IDZFBFMIN-NEXT: beqz a1, .LBB10_4 ; R32IDZFBFMIN-NEXT: # %bb.3: ; R32IDZFBFMIN-NEXT: addi a2, a3, -1 ; R32IDZFBFMIN-NEXT: .LBB10_4: # %start ; R32IDZFBFMIN-NEXT: feq.s a3, fs0, fs0 -; R32IDZFBFMIN-NEXT: neg a4, a1 -; R32IDZFBFMIN-NEXT: neg a1, s0 +; R32IDZFBFMIN-NEXT: neg a4, s0 +; R32IDZFBFMIN-NEXT: neg a5, a1 ; R32IDZFBFMIN-NEXT: neg a3, a3 -; R32IDZFBFMIN-NEXT: and a0, a1, a0 +; R32IDZFBFMIN-NEXT: and a0, a4, a0 ; R32IDZFBFMIN-NEXT: and a1, a3, a2 -; R32IDZFBFMIN-NEXT: or a0, a4, a0 +; R32IDZFBFMIN-NEXT: or a0, a5, a0 ; R32IDZFBFMIN-NEXT: and a0, a3, a0 ; R32IDZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; R32IDZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -552,8 +562,9 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind { ; RV32ID-NEXT: # %bb.1: # %start ; RV32ID-NEXT: mv a2, a1 ; RV32ID-NEXT: .LBB10_2: # %start -; RV32ID-NEXT: lui a1, %hi(.LCPI10_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32ID-NEXT: lui a1, 389120 +; RV32ID-NEXT: addi a1, a1, -1 +; RV32ID-NEXT: fmv.w.x fa5, a1 ; RV32ID-NEXT: flt.s a1, fa5, fs0 ; RV32ID-NEXT: beqz a1, .LBB10_4 ; RV32ID-NEXT: # %bb.3: @@ -641,30 +652,59 @@ define i64 @fcvt_lu_bf16(bfloat %a) nounwind { } define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind { -; CHECK32ZFBFMIN-LABEL: fcvt_lu_bf16_sat: -; CHECK32ZFBFMIN: # %bb.0: # %start -; CHECK32ZFBFMIN-NEXT: addi sp, sp, -16 -; CHECK32ZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; CHECK32ZFBFMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; CHECK32ZFBFMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; CHECK32ZFBFMIN-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK32ZFBFMIN-NEXT: flw fa5, %lo(.LCPI12_0)(a0) -; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0 -; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, zero -; CHECK32ZFBFMIN-NEXT: fle.s a0, fa4, fa0 -; CHECK32ZFBFMIN-NEXT: flt.s a1, fa5, fa0 -; CHECK32ZFBFMIN-NEXT: neg s0, a1 -; CHECK32ZFBFMIN-NEXT: neg s1, a0 -; CHECK32ZFBFMIN-NEXT: call __fixunssfdi -; CHECK32ZFBFMIN-NEXT: and a0, s1, a0 -; CHECK32ZFBFMIN-NEXT: and a1, s1, a1 -; CHECK32ZFBFMIN-NEXT: or a0, s0, a0 -; CHECK32ZFBFMIN-NEXT: or a1, s0, a1 -; CHECK32ZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; CHECK32ZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; CHECK32ZFBFMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload -; CHECK32ZFBFMIN-NEXT: addi sp, sp, 16 -; CHECK32ZFBFMIN-NEXT: ret +; RV32IZFBFMIN-LABEL: fcvt_lu_bf16_sat: +; RV32IZFBFMIN: # %bb.0: # %start +; RV32IZFBFMIN-NEXT: addi sp, sp, -16 +; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFBFMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFBFMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fs0, fa0 +; RV32IZFBFMIN-NEXT: fmv.w.x fa5, zero +; RV32IZFBFMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IZFBFMIN-NEXT: neg s0, a0 +; RV32IZFBFMIN-NEXT: fmv.s fa0, fs0 +; RV32IZFBFMIN-NEXT: call __fixunssfdi +; RV32IZFBFMIN-NEXT: and a0, s0, a0 +; RV32IZFBFMIN-NEXT: lui a2, 391168 +; RV32IZFBFMIN-NEXT: and a1, s0, a1 +; RV32IZFBFMIN-NEXT: addi a2, a2, -1 +; RV32IZFBFMIN-NEXT: fmv.w.x fa5, a2 +; RV32IZFBFMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IZFBFMIN-NEXT: neg a2, a2 +; RV32IZFBFMIN-NEXT: or a0, a2, a0 +; RV32IZFBFMIN-NEXT: or a1, a2, a1 +; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZFBFMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload +; RV32IZFBFMIN-NEXT: addi sp, sp, 16 +; RV32IZFBFMIN-NEXT: ret +; +; R32IDZFBFMIN-LABEL: fcvt_lu_bf16_sat: +; R32IDZFBFMIN: # %bb.0: # %start +; R32IDZFBFMIN-NEXT: addi sp, sp, -16 +; R32IDZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; R32IDZFBFMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; R32IDZFBFMIN-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill +; R32IDZFBFMIN-NEXT: fcvt.s.bf16 fs0, fa0 +; R32IDZFBFMIN-NEXT: fmv.w.x fa5, zero +; R32IDZFBFMIN-NEXT: fle.s a0, fa5, fs0 +; R32IDZFBFMIN-NEXT: neg s0, a0 +; R32IDZFBFMIN-NEXT: fmv.s fa0, fs0 +; R32IDZFBFMIN-NEXT: call __fixunssfdi +; R32IDZFBFMIN-NEXT: and a0, s0, a0 +; R32IDZFBFMIN-NEXT: lui a2, 391168 +; R32IDZFBFMIN-NEXT: and a1, s0, a1 +; R32IDZFBFMIN-NEXT: addi a2, a2, -1 +; R32IDZFBFMIN-NEXT: fmv.w.x fa5, a2 +; R32IDZFBFMIN-NEXT: flt.s a2, fa5, fs0 +; R32IDZFBFMIN-NEXT: neg a2, a2 +; R32IDZFBFMIN-NEXT: or a0, a2, a0 +; R32IDZFBFMIN-NEXT: or a1, a2, a1 +; R32IDZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; R32IDZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; R32IDZFBFMIN-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload +; R32IDZFBFMIN-NEXT: addi sp, sp, 16 +; R32IDZFBFMIN-NEXT: ret ; ; RV32ID-LABEL: fcvt_lu_bf16_sat: ; RV32ID: # %bb.0: # %start @@ -673,15 +713,16 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind { ; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32ID-NEXT: fmv.x.w a0, fa0 -; RV32ID-NEXT: lui a1, %hi(.LCPI12_0) -; RV32ID-NEXT: fmv.w.x fa5, zero -; RV32ID-NEXT: flw fa4, %lo(.LCPI12_0)(a1) +; RV32ID-NEXT: lui a1, 391168 ; RV32ID-NEXT: slli a0, a0, 16 +; RV32ID-NEXT: addi a1, a1, -1 ; RV32ID-NEXT: fmv.w.x fa0, a0 -; RV32ID-NEXT: fle.s a0, fa5, fa0 -; RV32ID-NEXT: flt.s a1, fa4, fa0 -; RV32ID-NEXT: neg s0, a1 -; RV32ID-NEXT: neg s1, a0 +; RV32ID-NEXT: fmv.w.x fa5, a1 +; RV32ID-NEXT: flt.s a0, fa5, fa0 +; RV32ID-NEXT: fmv.w.x fa5, zero +; RV32ID-NEXT: fle.s a1, fa5, fa0 +; RV32ID-NEXT: neg s0, a0 +; RV32ID-NEXT: neg s1, a1 ; RV32ID-NEXT: call __fixunssfdi ; RV32ID-NEXT: and a0, s1, a0 ; RV32ID-NEXT: and a1, s1, a1 diff --git a/llvm/test/CodeGen/RISCV/bfloat-imm.ll b/llvm/test/CodeGen/RISCV/bfloat-imm.ll index 76ff720b1c268..61014891414d8 100644 --- a/llvm/test/CodeGen/RISCV/bfloat-imm.ll +++ b/llvm/test/CodeGen/RISCV/bfloat-imm.ll @@ -7,8 +7,9 @@ define bfloat @bfloat_imm() nounwind { ; CHECK-LABEL: bfloat_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa0, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: lui a0, 4 +; CHECK-NEXT: addi a0, a0, 64 +; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret ret bfloat 3.0 } diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll index d7957540d1b29..d8e6b7f3ede9a 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll @@ -519,15 +519,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV32-ILP32F: # %bb.0: ; RV32-ILP32F-NEXT: addi sp, sp, -16 ; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32F-NEXT: lui a4, %hi(.LCPI3_0) +; RV32-ILP32F-NEXT: lui a7, 1048565 ; RV32-ILP32F-NEXT: li a0, 1 ; RV32-ILP32F-NEXT: li a1, 2 ; RV32-ILP32F-NEXT: li a2, 3 ; RV32-ILP32F-NEXT: li a3, 4 -; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI3_0)(a4) ; RV32-ILP32F-NEXT: li a4, 5 ; RV32-ILP32F-NEXT: li a5, 6 ; RV32-ILP32F-NEXT: li a6, 7 +; RV32-ILP32F-NEXT: addi a7, a7, -1792 +; RV32-ILP32F-NEXT: fmv.w.x fa0, a7 ; RV32-ILP32F-NEXT: li a7, 8 ; RV32-ILP32F-NEXT: call callee_half_on_stack ; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -538,15 +539,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV64-LP64F: # %bb.0: ; RV64-LP64F-NEXT: addi sp, sp, -16 ; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-LP64F-NEXT: lui a4, %hi(.LCPI3_0) +; RV64-LP64F-NEXT: lui a7, 1048565 ; RV64-LP64F-NEXT: li a0, 1 ; RV64-LP64F-NEXT: li a1, 2 ; RV64-LP64F-NEXT: li a2, 3 ; RV64-LP64F-NEXT: li a3, 4 -; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI3_0)(a4) ; RV64-LP64F-NEXT: li a4, 5 ; RV64-LP64F-NEXT: li a5, 6 ; RV64-LP64F-NEXT: li a6, 7 +; RV64-LP64F-NEXT: addi a7, a7, -1792 +; RV64-LP64F-NEXT: fmv.w.x fa0, a7 ; RV64-LP64F-NEXT: li a7, 8 ; RV64-LP64F-NEXT: call callee_half_on_stack ; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -557,15 +559,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV32-ILP32ZFHMIN: # %bb.0: ; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, -16 ; RV32-ILP32ZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32ZFHMIN-NEXT: lui a4, %hi(.LCPI3_0) +; RV32-ILP32ZFHMIN-NEXT: lui a7, 5 ; RV32-ILP32ZFHMIN-NEXT: li a0, 1 ; RV32-ILP32ZFHMIN-NEXT: li a1, 2 ; RV32-ILP32ZFHMIN-NEXT: li a2, 3 ; RV32-ILP32ZFHMIN-NEXT: li a3, 4 -; RV32-ILP32ZFHMIN-NEXT: flh fa0, %lo(.LCPI3_0)(a4) ; RV32-ILP32ZFHMIN-NEXT: li a4, 5 ; RV32-ILP32ZFHMIN-NEXT: li a5, 6 ; RV32-ILP32ZFHMIN-NEXT: li a6, 7 +; RV32-ILP32ZFHMIN-NEXT: addi a7, a7, -1792 +; RV32-ILP32ZFHMIN-NEXT: fmv.h.x fa0, a7 ; RV32-ILP32ZFHMIN-NEXT: li a7, 8 ; RV32-ILP32ZFHMIN-NEXT: call callee_half_on_stack ; RV32-ILP32ZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -576,15 +579,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV64-LP64ZFHMIN: # %bb.0: ; RV64-LP64ZFHMIN-NEXT: addi sp, sp, -16 ; RV64-LP64ZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-LP64ZFHMIN-NEXT: lui a4, %hi(.LCPI3_0) +; RV64-LP64ZFHMIN-NEXT: lui a7, 5 ; RV64-LP64ZFHMIN-NEXT: li a0, 1 ; RV64-LP64ZFHMIN-NEXT: li a1, 2 ; RV64-LP64ZFHMIN-NEXT: li a2, 3 ; RV64-LP64ZFHMIN-NEXT: li a3, 4 -; RV64-LP64ZFHMIN-NEXT: flh fa0, %lo(.LCPI3_0)(a4) ; RV64-LP64ZFHMIN-NEXT: li a4, 5 ; RV64-LP64ZFHMIN-NEXT: li a5, 6 ; RV64-LP64ZFHMIN-NEXT: li a6, 7 +; RV64-LP64ZFHMIN-NEXT: addi a7, a7, -1792 +; RV64-LP64ZFHMIN-NEXT: fmv.h.x fa0, a7 ; RV64-LP64ZFHMIN-NEXT: li a7, 8 ; RV64-LP64ZFHMIN-NEXT: call callee_half_on_stack ; RV64-LP64ZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -595,15 +599,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV32-ZFH-ILP32: # %bb.0: ; RV32-ZFH-ILP32-NEXT: addi sp, sp, -16 ; RV32-ZFH-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ZFH-ILP32-NEXT: lui a4, %hi(.LCPI3_0) +; RV32-ZFH-ILP32-NEXT: lui a7, 5 ; RV32-ZFH-ILP32-NEXT: li a0, 1 ; RV32-ZFH-ILP32-NEXT: li a1, 2 ; RV32-ZFH-ILP32-NEXT: li a2, 3 ; RV32-ZFH-ILP32-NEXT: li a3, 4 -; RV32-ZFH-ILP32-NEXT: flh fa5, %lo(.LCPI3_0)(a4) ; RV32-ZFH-ILP32-NEXT: li a4, 5 ; RV32-ZFH-ILP32-NEXT: li a5, 6 ; RV32-ZFH-ILP32-NEXT: li a6, 7 +; RV32-ZFH-ILP32-NEXT: addi a7, a7, -1792 +; RV32-ZFH-ILP32-NEXT: fmv.h.x fa5, a7 ; RV32-ZFH-ILP32-NEXT: li a7, 8 ; RV32-ZFH-ILP32-NEXT: fsh fa5, 0(sp) ; RV32-ZFH-ILP32-NEXT: call callee_half_on_stack @@ -615,15 +620,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV32-ZFH-ILP32F: # %bb.0: ; RV32-ZFH-ILP32F-NEXT: addi sp, sp, -16 ; RV32-ZFH-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ZFH-ILP32F-NEXT: lui a4, %hi(.LCPI3_0) +; RV32-ZFH-ILP32F-NEXT: lui a7, 5 ; RV32-ZFH-ILP32F-NEXT: li a0, 1 ; RV32-ZFH-ILP32F-NEXT: li a1, 2 ; RV32-ZFH-ILP32F-NEXT: li a2, 3 ; RV32-ZFH-ILP32F-NEXT: li a3, 4 -; RV32-ZFH-ILP32F-NEXT: flh fa0, %lo(.LCPI3_0)(a4) ; RV32-ZFH-ILP32F-NEXT: li a4, 5 ; RV32-ZFH-ILP32F-NEXT: li a5, 6 ; RV32-ZFH-ILP32F-NEXT: li a6, 7 +; RV32-ZFH-ILP32F-NEXT: addi a7, a7, -1792 +; RV32-ZFH-ILP32F-NEXT: fmv.h.x fa0, a7 ; RV32-ZFH-ILP32F-NEXT: li a7, 8 ; RV32-ZFH-ILP32F-NEXT: call callee_half_on_stack ; RV32-ZFH-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -634,15 +640,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV64-ZFH-LP64: # %bb.0: ; RV64-ZFH-LP64-NEXT: addi sp, sp, -16 ; RV64-ZFH-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-ZFH-LP64-NEXT: lui a4, %hi(.LCPI3_0) +; RV64-ZFH-LP64-NEXT: lui a7, 5 ; RV64-ZFH-LP64-NEXT: li a0, 1 ; RV64-ZFH-LP64-NEXT: li a1, 2 ; RV64-ZFH-LP64-NEXT: li a2, 3 ; RV64-ZFH-LP64-NEXT: li a3, 4 -; RV64-ZFH-LP64-NEXT: flh fa5, %lo(.LCPI3_0)(a4) ; RV64-ZFH-LP64-NEXT: li a4, 5 ; RV64-ZFH-LP64-NEXT: li a5, 6 ; RV64-ZFH-LP64-NEXT: li a6, 7 +; RV64-ZFH-LP64-NEXT: addi a7, a7, -1792 +; RV64-ZFH-LP64-NEXT: fmv.h.x fa5, a7 ; RV64-ZFH-LP64-NEXT: li a7, 8 ; RV64-ZFH-LP64-NEXT: fsh fa5, 0(sp) ; RV64-ZFH-LP64-NEXT: call callee_half_on_stack @@ -654,15 +661,16 @@ define i32 @caller_half_on_stack() nounwind { ; RV64-ZFH-LP64F: # %bb.0: ; RV64-ZFH-LP64F-NEXT: addi sp, sp, -16 ; RV64-ZFH-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-ZFH-LP64F-NEXT: lui a4, %hi(.LCPI3_0) +; RV64-ZFH-LP64F-NEXT: lui a7, 5 ; RV64-ZFH-LP64F-NEXT: li a0, 1 ; RV64-ZFH-LP64F-NEXT: li a1, 2 ; RV64-ZFH-LP64F-NEXT: li a2, 3 ; RV64-ZFH-LP64F-NEXT: li a3, 4 -; RV64-ZFH-LP64F-NEXT: flh fa0, %lo(.LCPI3_0)(a4) ; RV64-ZFH-LP64F-NEXT: li a4, 5 ; RV64-ZFH-LP64F-NEXT: li a5, 6 ; RV64-ZFH-LP64F-NEXT: li a6, 7 +; RV64-ZFH-LP64F-NEXT: addi a7, a7, -1792 +; RV64-ZFH-LP64F-NEXT: fmv.h.x fa0, a7 ; RV64-ZFH-LP64F-NEXT: li a7, 8 ; RV64-ZFH-LP64F-NEXT: call callee_half_on_stack ; RV64-ZFH-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -1038,31 +1046,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind { ; RV32-ILP32ZFHMIN: # %bb.0: ; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, -16 ; RV32-ILP32ZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32ZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; RV32-ILP32ZFHMIN-NEXT: lui a1, 260096 -; RV32-ILP32ZFHMIN-NEXT: lui a2, 262144 -; RV32-ILP32ZFHMIN-NEXT: lui a3, 263168 -; RV32-ILP32ZFHMIN-NEXT: lui a4, 264192 -; RV32-ILP32ZFHMIN-NEXT: lui a5, 264704 -; RV32-ILP32ZFHMIN-NEXT: lui a6, 265216 -; RV32-ILP32ZFHMIN-NEXT: lui a7, 265728 -; RV32-ILP32ZFHMIN-NEXT: flh ft0, %lo(.LCPI5_0)(a0) +; RV32-ILP32ZFHMIN-NEXT: lui a7, 5 +; RV32-ILP32ZFHMIN-NEXT: lui a0, 260096 +; RV32-ILP32ZFHMIN-NEXT: lui a1, 262144 +; RV32-ILP32ZFHMIN-NEXT: lui a2, 263168 +; RV32-ILP32ZFHMIN-NEXT: lui a3, 264192 +; RV32-ILP32ZFHMIN-NEXT: lui a4, 264704 +; RV32-ILP32ZFHMIN-NEXT: lui a5, 265216 +; RV32-ILP32ZFHMIN-NEXT: lui a6, 265728 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa0, a0 ; RV32-ILP32ZFHMIN-NEXT: lui t0, 266240 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa0, a1 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa1, a1 ; RV32-ILP32ZFHMIN-NEXT: li a0, 1 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa1, a2 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa2, a2 ; RV32-ILP32ZFHMIN-NEXT: li a1, 2 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa2, a3 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa3, a3 ; RV32-ILP32ZFHMIN-NEXT: li a2, 3 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa3, a4 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa4, a4 ; RV32-ILP32ZFHMIN-NEXT: li a3, 4 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa4, a5 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa5, a6 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa6, a7 -; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa7, t0 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa5, a5 ; RV32-ILP32ZFHMIN-NEXT: li a4, 5 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa6, a6 ; RV32-ILP32ZFHMIN-NEXT: li a5, 6 +; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa7, t0 ; RV32-ILP32ZFHMIN-NEXT: li a6, 7 +; RV32-ILP32ZFHMIN-NEXT: addi a7, a7, -1792 +; RV32-ILP32ZFHMIN-NEXT: fmv.h.x ft0, a7 ; RV32-ILP32ZFHMIN-NEXT: li a7, 8 ; RV32-ILP32ZFHMIN-NEXT: fsh ft0, 0(sp) ; RV32-ILP32ZFHMIN-NEXT: call callee_half_on_stack @@ -1074,31 +1083,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind { ; RV64-LP64ZFHMIN: # %bb.0: ; RV64-LP64ZFHMIN-NEXT: addi sp, sp, -16 ; RV64-LP64ZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-LP64ZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; RV64-LP64ZFHMIN-NEXT: lui a1, 260096 -; RV64-LP64ZFHMIN-NEXT: lui a2, 262144 -; RV64-LP64ZFHMIN-NEXT: lui a3, 263168 -; RV64-LP64ZFHMIN-NEXT: lui a4, 264192 -; RV64-LP64ZFHMIN-NEXT: lui a5, 264704 -; RV64-LP64ZFHMIN-NEXT: lui a6, 265216 -; RV64-LP64ZFHMIN-NEXT: lui a7, 265728 -; RV64-LP64ZFHMIN-NEXT: flh ft0, %lo(.LCPI5_0)(a0) +; RV64-LP64ZFHMIN-NEXT: lui a7, 5 +; RV64-LP64ZFHMIN-NEXT: lui a0, 260096 +; RV64-LP64ZFHMIN-NEXT: lui a1, 262144 +; RV64-LP64ZFHMIN-NEXT: lui a2, 263168 +; RV64-LP64ZFHMIN-NEXT: lui a3, 264192 +; RV64-LP64ZFHMIN-NEXT: lui a4, 264704 +; RV64-LP64ZFHMIN-NEXT: lui a5, 265216 +; RV64-LP64ZFHMIN-NEXT: lui a6, 265728 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa0, a0 ; RV64-LP64ZFHMIN-NEXT: lui t0, 266240 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa0, a1 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa1, a1 ; RV64-LP64ZFHMIN-NEXT: li a0, 1 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa1, a2 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa2, a2 ; RV64-LP64ZFHMIN-NEXT: li a1, 2 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa2, a3 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa3, a3 ; RV64-LP64ZFHMIN-NEXT: li a2, 3 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa3, a4 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa4, a4 ; RV64-LP64ZFHMIN-NEXT: li a3, 4 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa4, a5 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa5, a6 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa6, a7 -; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa7, t0 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa5, a5 ; RV64-LP64ZFHMIN-NEXT: li a4, 5 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa6, a6 ; RV64-LP64ZFHMIN-NEXT: li a5, 6 +; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa7, t0 ; RV64-LP64ZFHMIN-NEXT: li a6, 7 +; RV64-LP64ZFHMIN-NEXT: addi a7, a7, -1792 +; RV64-LP64ZFHMIN-NEXT: fmv.h.x ft0, a7 ; RV64-LP64ZFHMIN-NEXT: li a7, 8 ; RV64-LP64ZFHMIN-NEXT: fsh ft0, 0(sp) ; RV64-LP64ZFHMIN-NEXT: call callee_half_on_stack @@ -1110,31 +1120,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind { ; RV32-ZFH-ILP32: # %bb.0: ; RV32-ZFH-ILP32-NEXT: addi sp, sp, -48 ; RV32-ZFH-ILP32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill -; RV32-ZFH-ILP32-NEXT: lui a2, %hi(.LCPI5_0) -; RV32-ZFH-ILP32-NEXT: lui a3, 266240 -; RV32-ZFH-ILP32-NEXT: li a4, 8 -; RV32-ZFH-ILP32-NEXT: lui a5, 265728 -; RV32-ZFH-ILP32-NEXT: li a6, 7 -; RV32-ZFH-ILP32-NEXT: lui a7, 265216 -; RV32-ZFH-ILP32-NEXT: li t0, 6 -; RV32-ZFH-ILP32-NEXT: lui t1, 264704 -; RV32-ZFH-ILP32-NEXT: li t2, 5 +; RV32-ZFH-ILP32-NEXT: lui a5, 266240 +; RV32-ZFH-ILP32-NEXT: li a6, 8 +; RV32-ZFH-ILP32-NEXT: lui a7, 265728 +; RV32-ZFH-ILP32-NEXT: li t0, 7 +; RV32-ZFH-ILP32-NEXT: lui t1, 265216 +; RV32-ZFH-ILP32-NEXT: li t2, 6 +; RV32-ZFH-ILP32-NEXT: lui t3, 264704 +; RV32-ZFH-ILP32-NEXT: li t4, 5 +; RV32-ZFH-ILP32-NEXT: lui t5, 5 ; RV32-ZFH-ILP32-NEXT: li a0, 1 ; RV32-ZFH-ILP32-NEXT: lui a1, 260096 -; RV32-ZFH-ILP32-NEXT: flh fa5, %lo(.LCPI5_0)(a2) ; RV32-ZFH-ILP32-NEXT: li a2, 2 -; RV32-ZFH-ILP32-NEXT: sw a6, 16(sp) -; RV32-ZFH-ILP32-NEXT: sw a5, 20(sp) -; RV32-ZFH-ILP32-NEXT: sw a4, 24(sp) -; RV32-ZFH-ILP32-NEXT: sw a3, 28(sp) ; RV32-ZFH-ILP32-NEXT: lui a3, 262144 -; RV32-ZFH-ILP32-NEXT: sw t2, 0(sp) -; RV32-ZFH-ILP32-NEXT: sw t1, 4(sp) -; RV32-ZFH-ILP32-NEXT: sw t0, 8(sp) -; RV32-ZFH-ILP32-NEXT: sw a7, 12(sp) ; RV32-ZFH-ILP32-NEXT: li a4, 3 +; RV32-ZFH-ILP32-NEXT: sw t0, 16(sp) +; RV32-ZFH-ILP32-NEXT: sw a7, 20(sp) +; RV32-ZFH-ILP32-NEXT: sw a6, 24(sp) +; RV32-ZFH-ILP32-NEXT: sw a5, 28(sp) ; RV32-ZFH-ILP32-NEXT: lui a5, 263168 +; RV32-ZFH-ILP32-NEXT: sw t4, 0(sp) +; RV32-ZFH-ILP32-NEXT: sw t3, 4(sp) +; RV32-ZFH-ILP32-NEXT: sw t2, 8(sp) +; RV32-ZFH-ILP32-NEXT: sw t1, 12(sp) ; RV32-ZFH-ILP32-NEXT: li a6, 4 +; RV32-ZFH-ILP32-NEXT: addi a7, t5, -1792 +; RV32-ZFH-ILP32-NEXT: fmv.h.x fa5, a7 ; RV32-ZFH-ILP32-NEXT: lui a7, 264192 ; RV32-ZFH-ILP32-NEXT: fsh fa5, 32(sp) ; RV32-ZFH-ILP32-NEXT: call callee_half_on_stack @@ -1146,31 +1157,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind { ; RV32-ZFH-ILP32F: # %bb.0: ; RV32-ZFH-ILP32F-NEXT: addi sp, sp, -16 ; RV32-ZFH-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ZFH-ILP32F-NEXT: lui a0, %hi(.LCPI5_0) -; RV32-ZFH-ILP32F-NEXT: lui a1, 260096 -; RV32-ZFH-ILP32F-NEXT: lui a2, 262144 -; RV32-ZFH-ILP32F-NEXT: lui a3, 263168 -; RV32-ZFH-ILP32F-NEXT: lui a4, 264192 -; RV32-ZFH-ILP32F-NEXT: lui a5, 264704 -; RV32-ZFH-ILP32F-NEXT: lui a6, 265216 -; RV32-ZFH-ILP32F-NEXT: lui a7, 265728 -; RV32-ZFH-ILP32F-NEXT: flh ft0, %lo(.LCPI5_0)(a0) +; RV32-ZFH-ILP32F-NEXT: lui a7, 5 +; RV32-ZFH-ILP32F-NEXT: lui a0, 260096 +; RV32-ZFH-ILP32F-NEXT: lui a1, 262144 +; RV32-ZFH-ILP32F-NEXT: lui a2, 263168 +; RV32-ZFH-ILP32F-NEXT: lui a3, 264192 +; RV32-ZFH-ILP32F-NEXT: lui a4, 264704 +; RV32-ZFH-ILP32F-NEXT: lui a5, 265216 +; RV32-ZFH-ILP32F-NEXT: lui a6, 265728 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa0, a0 ; RV32-ZFH-ILP32F-NEXT: lui t0, 266240 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa0, a1 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa1, a1 ; RV32-ZFH-ILP32F-NEXT: li a0, 1 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa1, a2 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa2, a2 ; RV32-ZFH-ILP32F-NEXT: li a1, 2 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa2, a3 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa3, a3 ; RV32-ZFH-ILP32F-NEXT: li a2, 3 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa3, a4 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa4, a4 ; RV32-ZFH-ILP32F-NEXT: li a3, 4 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa4, a5 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa5, a6 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa6, a7 -; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa7, t0 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa5, a5 ; RV32-ZFH-ILP32F-NEXT: li a4, 5 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa6, a6 ; RV32-ZFH-ILP32F-NEXT: li a5, 6 +; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa7, t0 ; RV32-ZFH-ILP32F-NEXT: li a6, 7 +; RV32-ZFH-ILP32F-NEXT: addi a7, a7, -1792 +; RV32-ZFH-ILP32F-NEXT: fmv.h.x ft0, a7 ; RV32-ZFH-ILP32F-NEXT: li a7, 8 ; RV32-ZFH-ILP32F-NEXT: fsh ft0, 0(sp) ; RV32-ZFH-ILP32F-NEXT: call callee_half_on_stack @@ -1182,31 +1194,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind { ; RV64-ZFH-LP64: # %bb.0: ; RV64-ZFH-LP64-NEXT: addi sp, sp, -80 ; RV64-ZFH-LP64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill -; RV64-ZFH-LP64-NEXT: lui a2, %hi(.LCPI5_0) -; RV64-ZFH-LP64-NEXT: lui a3, 266240 -; RV64-ZFH-LP64-NEXT: li a4, 8 -; RV64-ZFH-LP64-NEXT: lui a5, 265728 -; RV64-ZFH-LP64-NEXT: li a6, 7 -; RV64-ZFH-LP64-NEXT: lui a7, 265216 -; RV64-ZFH-LP64-NEXT: li t0, 6 -; RV64-ZFH-LP64-NEXT: lui t1, 264704 -; RV64-ZFH-LP64-NEXT: li t2, 5 +; RV64-ZFH-LP64-NEXT: lui a5, 266240 +; RV64-ZFH-LP64-NEXT: li a6, 8 +; RV64-ZFH-LP64-NEXT: lui a7, 265728 +; RV64-ZFH-LP64-NEXT: li t0, 7 +; RV64-ZFH-LP64-NEXT: lui t1, 265216 +; RV64-ZFH-LP64-NEXT: li t2, 6 +; RV64-ZFH-LP64-NEXT: lui t3, 264704 +; RV64-ZFH-LP64-NEXT: li t4, 5 +; RV64-ZFH-LP64-NEXT: lui t5, 5 ; RV64-ZFH-LP64-NEXT: li a0, 1 ; RV64-ZFH-LP64-NEXT: lui a1, 260096 -; RV64-ZFH-LP64-NEXT: flh fa5, %lo(.LCPI5_0)(a2) ; RV64-ZFH-LP64-NEXT: li a2, 2 -; RV64-ZFH-LP64-NEXT: sd a6, 32(sp) -; RV64-ZFH-LP64-NEXT: sw a5, 40(sp) -; RV64-ZFH-LP64-NEXT: sd a4, 48(sp) -; RV64-ZFH-LP64-NEXT: sw a3, 56(sp) ; RV64-ZFH-LP64-NEXT: lui a3, 262144 -; RV64-ZFH-LP64-NEXT: sd t2, 0(sp) -; RV64-ZFH-LP64-NEXT: sw t1, 8(sp) -; RV64-ZFH-LP64-NEXT: sd t0, 16(sp) -; RV64-ZFH-LP64-NEXT: sw a7, 24(sp) ; RV64-ZFH-LP64-NEXT: li a4, 3 +; RV64-ZFH-LP64-NEXT: sd t0, 32(sp) +; RV64-ZFH-LP64-NEXT: sw a7, 40(sp) +; RV64-ZFH-LP64-NEXT: sd a6, 48(sp) +; RV64-ZFH-LP64-NEXT: sw a5, 56(sp) ; RV64-ZFH-LP64-NEXT: lui a5, 263168 +; RV64-ZFH-LP64-NEXT: sd t4, 0(sp) +; RV64-ZFH-LP64-NEXT: sw t3, 8(sp) +; RV64-ZFH-LP64-NEXT: sd t2, 16(sp) +; RV64-ZFH-LP64-NEXT: sw t1, 24(sp) ; RV64-ZFH-LP64-NEXT: li a6, 4 +; RV64-ZFH-LP64-NEXT: addi a7, t5, -1792 +; RV64-ZFH-LP64-NEXT: fmv.h.x fa5, a7 ; RV64-ZFH-LP64-NEXT: lui a7, 264192 ; RV64-ZFH-LP64-NEXT: fsh fa5, 64(sp) ; RV64-ZFH-LP64-NEXT: call callee_half_on_stack @@ -1218,31 +1231,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind { ; RV64-ZFH-LP64F: # %bb.0: ; RV64-ZFH-LP64F-NEXT: addi sp, sp, -16 ; RV64-ZFH-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-ZFH-LP64F-NEXT: lui a0, %hi(.LCPI5_0) -; RV64-ZFH-LP64F-NEXT: lui a1, 260096 -; RV64-ZFH-LP64F-NEXT: lui a2, 262144 -; RV64-ZFH-LP64F-NEXT: lui a3, 263168 -; RV64-ZFH-LP64F-NEXT: lui a4, 264192 -; RV64-ZFH-LP64F-NEXT: lui a5, 264704 -; RV64-ZFH-LP64F-NEXT: lui a6, 265216 -; RV64-ZFH-LP64F-NEXT: lui a7, 265728 -; RV64-ZFH-LP64F-NEXT: flh ft0, %lo(.LCPI5_0)(a0) +; RV64-ZFH-LP64F-NEXT: lui a7, 5 +; RV64-ZFH-LP64F-NEXT: lui a0, 260096 +; RV64-ZFH-LP64F-NEXT: lui a1, 262144 +; RV64-ZFH-LP64F-NEXT: lui a2, 263168 +; RV64-ZFH-LP64F-NEXT: lui a3, 264192 +; RV64-ZFH-LP64F-NEXT: lui a4, 264704 +; RV64-ZFH-LP64F-NEXT: lui a5, 265216 +; RV64-ZFH-LP64F-NEXT: lui a6, 265728 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa0, a0 ; RV64-ZFH-LP64F-NEXT: lui t0, 266240 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa0, a1 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa1, a1 ; RV64-ZFH-LP64F-NEXT: li a0, 1 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa1, a2 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa2, a2 ; RV64-ZFH-LP64F-NEXT: li a1, 2 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa2, a3 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa3, a3 ; RV64-ZFH-LP64F-NEXT: li a2, 3 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa3, a4 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa4, a4 ; RV64-ZFH-LP64F-NEXT: li a3, 4 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa4, a5 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa5, a6 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa6, a7 -; RV64-ZFH-LP64F-NEXT: fmv.w.x fa7, t0 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa5, a5 ; RV64-ZFH-LP64F-NEXT: li a4, 5 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa6, a6 ; RV64-ZFH-LP64F-NEXT: li a5, 6 +; RV64-ZFH-LP64F-NEXT: fmv.w.x fa7, t0 ; RV64-ZFH-LP64F-NEXT: li a6, 7 +; RV64-ZFH-LP64F-NEXT: addi a7, a7, -1792 +; RV64-ZFH-LP64F-NEXT: fmv.h.x ft0, a7 ; RV64-ZFH-LP64F-NEXT: li a7, 8 ; RV64-ZFH-LP64F-NEXT: fsh ft0, 0(sp) ; RV64-ZFH-LP64F-NEXT: call callee_half_on_stack @@ -1280,26 +1294,30 @@ define half @callee_half_ret() nounwind { ; ; RV32-ILP32F-LABEL: callee_half_ret: ; RV32-ILP32F: # %bb.0: -; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI6_0) -; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI6_0)(a0) +; RV32-ILP32F-NEXT: lui a0, 1048564 +; RV32-ILP32F-NEXT: addi a0, a0, -1024 +; RV32-ILP32F-NEXT: fmv.w.x fa0, a0 ; RV32-ILP32F-NEXT: ret ; ; RV64-LP64F-LABEL: callee_half_ret: ; RV64-LP64F: # %bb.0: -; RV64-LP64F-NEXT: lui a0, %hi(.LCPI6_0) -; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI6_0)(a0) +; RV64-LP64F-NEXT: lui a0, 1048564 +; RV64-LP64F-NEXT: addi a0, a0, -1024 +; RV64-LP64F-NEXT: fmv.w.x fa0, a0 ; RV64-LP64F-NEXT: ret ; ; RV32-ILP32ZFHMIN-LABEL: callee_half_ret: ; RV32-ILP32ZFHMIN: # %bb.0: -; RV32-ILP32ZFHMIN-NEXT: lui a0, %hi(.LCPI6_0) -; RV32-ILP32ZFHMIN-NEXT: flh fa0, %lo(.LCPI6_0)(a0) +; RV32-ILP32ZFHMIN-NEXT: li a0, 15 +; RV32-ILP32ZFHMIN-NEXT: slli a0, a0, 10 +; RV32-ILP32ZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV32-ILP32ZFHMIN-NEXT: ret ; ; RV64-LP64ZFHMIN-LABEL: callee_half_ret: ; RV64-LP64ZFHMIN: # %bb.0: -; RV64-LP64ZFHMIN-NEXT: lui a0, %hi(.LCPI6_0) -; RV64-LP64ZFHMIN-NEXT: flh fa0, %lo(.LCPI6_0)(a0) +; RV64-LP64ZFHMIN-NEXT: li a0, 15 +; RV64-LP64ZFHMIN-NEXT: slli a0, a0, 10 +; RV64-LP64ZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV64-LP64ZFHMIN-NEXT: ret ; ; RV32-ZFH-ILP32-LABEL: callee_half_ret: @@ -1310,8 +1328,9 @@ define half @callee_half_ret() nounwind { ; ; RV32-ZFH-ILP32F-LABEL: callee_half_ret: ; RV32-ZFH-ILP32F: # %bb.0: -; RV32-ZFH-ILP32F-NEXT: lui a0, %hi(.LCPI6_0) -; RV32-ZFH-ILP32F-NEXT: flh fa0, %lo(.LCPI6_0)(a0) +; RV32-ZFH-ILP32F-NEXT: li a0, 15 +; RV32-ZFH-ILP32F-NEXT: slli a0, a0, 10 +; RV32-ZFH-ILP32F-NEXT: fmv.h.x fa0, a0 ; RV32-ZFH-ILP32F-NEXT: ret ; ; RV64-ZFH-LP64-LABEL: callee_half_ret: @@ -1322,8 +1341,9 @@ define half @callee_half_ret() nounwind { ; ; RV64-ZFH-LP64F-LABEL: callee_half_ret: ; RV64-ZFH-LP64F: # %bb.0: -; RV64-ZFH-LP64F-NEXT: lui a0, %hi(.LCPI6_0) -; RV64-ZFH-LP64F-NEXT: flh fa0, %lo(.LCPI6_0)(a0) +; RV64-ZFH-LP64F-NEXT: li a0, 15 +; RV64-ZFH-LP64F-NEXT: slli a0, a0, 10 +; RV64-ZFH-LP64F-NEXT: fmv.h.x fa0, a0 ; RV64-ZFH-LP64F-NEXT: ret ret half 1.0 } diff --git a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll index 6608874286e34..edec1d0b649ce 100644 --- a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll +++ b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll @@ -6,11 +6,13 @@ ; RUN: llc -mtriple=riscv64 -mattr=+conditional-cmv-fusion,+c,+zicond -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=CMOV,CMOV-ZICOND %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND %s +; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-NOC %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+c -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND %s +; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-C %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+zicond -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-ZICOND %s +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=RV32IXQCI ; The conditional move optimization in sifive-p450 requires that only a ; single c.mv instruction appears in the branch shadow. @@ -42,6 +44,14 @@ define signext i32 @test1(i32 signext %x, i32 signext %y, i32 signext %z) { ; SHORT_FORWARD-NEXT: xor a0, a0, a1 ; SHORT_FORWARD-NEXT: .LBB0_2: ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: test1: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: bnez a2, .LBB0_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB0_2: +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = xor i32 %x, %y %b = select i1 %c, i32 %a, i32 %x @@ -73,6 +83,14 @@ define signext i32 @test2(i32 signext %x, i32 signext %y, i32 signext %z) { ; SHORT_FORWARD-NEXT: xor a0, a0, a1 ; SHORT_FORWARD-NEXT: .LBB1_2: ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: test2: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: beqz a2, .LBB1_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB1_2: +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = xor i32 %x, %y %b = select i1 %c, i32 %x, i32 %a @@ -120,6 +138,19 @@ define signext i32 @test3(i32 signext %v, i32 signext %w, i32 signext %x, i32 si ; SHORT_FORWARD-NEXT: .LBB2_4: ; SHORT_FORWARD-NEXT: addw a0, a0, a2 ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: test3: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: beqz a4, .LBB2_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB2_2: +; RV32IXQCI-NEXT: beqz a4, .LBB2_4 +; RV32IXQCI-NEXT: # %bb.3: +; RV32IXQCI-NEXT: xor a2, a2, a3 +; RV32IXQCI-NEXT: .LBB2_4: +; RV32IXQCI-NEXT: add a0, a0, a2 +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = xor i32 %v, %w %b = select i1 %c, i32 %v, i32 %a @@ -167,6 +198,12 @@ define signext i32 @test4(i32 signext %x, i32 signext %y, i32 signext %z) { ; SFB-ZICOND-NEXT: li a0, 3 ; SFB-ZICOND-NEXT: czero.nez a0, a0, a2 ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: test4: +; RV32IXQCI: # %bb.0: +; RV32IXQCI-NEXT: li a0, 0 +; RV32IXQCI-NEXT: qc.lieqi a0, a2, 0, 3 +; RV32IXQCI-NEXT: ret %c = icmp eq i32 %z, 0 %a = select i1 %c, i32 3, i32 0 ret i32 %a @@ -199,6 +236,15 @@ define i16 @select_xor_1(i16 %A, i8 %cond) { ; SHORT_FORWARD-NEXT: xori a0, a0, 43 ; SHORT_FORWARD-NEXT: .LBB4_2: # %entry ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_1: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB4_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB4_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -236,6 +282,15 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) { ; SHORT_FORWARD-NEXT: xori a0, a0, 43 ; SHORT_FORWARD-NEXT: .LBB5_2: # %entry ; SHORT_FORWARD-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_1b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB5_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB5_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -263,6 +318,24 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB6_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_xor_2: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB6_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: xor a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB6_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_xor_2: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB6_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: xor a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB6_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_xor_2: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -271,6 +344,15 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: xor a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB6_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_2: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB6_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB6_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -300,6 +382,24 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB7_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_xor_2b: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB7_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: xor a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB7_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_xor_2b: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB7_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: xor a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB7_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_xor_2b: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -308,6 +408,15 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: xor a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB7_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_xor_2b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB7_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB7_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -335,6 +444,24 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB8_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB8_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB8_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB8_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB8_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -343,6 +470,15 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB8_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB8_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB8_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp eq i8 %and, 0 @@ -372,6 +508,24 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB9_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or_b: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB9_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB9_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or_b: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB9_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB9_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or_b: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -380,6 +534,15 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB9_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or_b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB9_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB9_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 %cmp10 = icmp ne i8 %and, 1 @@ -407,6 +570,24 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { ; CMOV-NEXT: .LBB10_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or_1: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB10_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB10_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or_1: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB10_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB10_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or_1: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -415,6 +596,15 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB10_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or_1: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB10_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB10_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 %cmp10 = icmp eq i32 %and, 0 @@ -444,6 +634,24 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { ; CMOV-NEXT: .LBB11_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or_1b: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB11_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB11_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or_1b: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB11_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB11_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or_1b: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -452,6 +660,15 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { ; SFB-ZICOND-NEXT: or a0, a1, a0 ; SFB-ZICOND-NEXT: .LBB11_2: # %entry ; SFB-ZICOND-NEXT: ret +; +; RV32IXQCI-LABEL: select_or_1b: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB11_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB11_2: # %entry +; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 %cmp10 = icmp ne i32 %and, 1 diff --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll index 94f8d7cab9b95..220494a4c4ff8 100644 --- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll +++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll @@ -287,8 +287,9 @@ indirectgoto: define float @lower_constantpool(float %a) nounwind { ; RV32F-SMALL-LABEL: lower_constantpool: ; RV32F-SMALL: # %bb.0: -; RV32F-SMALL-NEXT: lui a0, %hi(.LCPI3_0) -; RV32F-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0) +; RV32F-SMALL-NEXT: lui a0, 260097 +; RV32F-SMALL-NEXT: addi a0, a0, -2048 +; RV32F-SMALL-NEXT: fmv.w.x fa5, a0 ; RV32F-SMALL-NEXT: fadd.s fa0, fa0, fa5 ; RV32F-SMALL-NEXT: ret ; @@ -301,32 +302,33 @@ define float @lower_constantpool(float %a) nounwind { ; ; RV32F-MEDIUM-LABEL: lower_constantpool: ; RV32F-MEDIUM: # %bb.0: -; RV32F-MEDIUM-NEXT: .Lpcrel_hi3: -; RV32F-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0) -; RV32F-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV32F-MEDIUM-NEXT: lui a0, 260097 +; RV32F-MEDIUM-NEXT: addi a0, a0, -2048 +; RV32F-MEDIUM-NEXT: fmv.w.x fa5, a0 ; RV32F-MEDIUM-NEXT: fadd.s fa0, fa0, fa5 ; RV32F-MEDIUM-NEXT: ret ; ; RV64F-SMALL-LABEL: lower_constantpool: ; RV64F-SMALL: # %bb.0: -; RV64F-SMALL-NEXT: lui a0, %hi(.LCPI3_0) -; RV64F-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0) +; RV64F-SMALL-NEXT: lui a0, 260097 +; RV64F-SMALL-NEXT: addi a0, a0, -2048 +; RV64F-SMALL-NEXT: fmv.w.x fa5, a0 ; RV64F-SMALL-NEXT: fadd.s fa0, fa0, fa5 ; RV64F-SMALL-NEXT: ret ; ; RV64F-MEDIUM-LABEL: lower_constantpool: ; RV64F-MEDIUM: # %bb.0: -; RV64F-MEDIUM-NEXT: .Lpcrel_hi3: -; RV64F-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0) -; RV64F-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV64F-MEDIUM-NEXT: lui a0, 260097 +; RV64F-MEDIUM-NEXT: addi a0, a0, -2048 +; RV64F-MEDIUM-NEXT: fmv.w.x fa5, a0 ; RV64F-MEDIUM-NEXT: fadd.s fa0, fa0, fa5 ; RV64F-MEDIUM-NEXT: ret ; ; RV64F-LARGE-LABEL: lower_constantpool: ; RV64F-LARGE: # %bb.0: -; RV64F-LARGE-NEXT: .Lpcrel_hi3: -; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI3_0) -; RV64F-LARGE-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV64F-LARGE-NEXT: lui a0, 260097 +; RV64F-LARGE-NEXT: addi a0, a0, -2048 +; RV64F-LARGE-NEXT: fmv.w.x fa5, a0 ; RV64F-LARGE-NEXT: fadd.s fa0, fa0, fa5 ; RV64F-LARGE-NEXT: ret ; @@ -390,13 +392,13 @@ define i32 @lower_extern_weak(i32 %a) nounwind { ; RV32IXQCILI-SMALL-NEXT: lw a0, 0(a0) ; RV32IXQCILI-SMALL-NEXT: ret ; -; RV32F-MEDIUM-LABEL: lower_extern_weak: -; RV32F-MEDIUM: # %bb.0: -; RV32F-MEDIUM-NEXT: .Lpcrel_hi4: -; RV32F-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W) -; RV32F-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi4)(a0) -; RV32F-MEDIUM-NEXT: lw a0, 0(a0) -; RV32F-MEDIUM-NEXT: ret +; RV32I-MEDIUM-LABEL: lower_extern_weak: +; RV32I-MEDIUM: # %bb.0: +; RV32I-MEDIUM-NEXT: .Lpcrel_hi3: +; RV32I-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W) +; RV32I-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV32I-MEDIUM-NEXT: lw a0, 0(a0) +; RV32I-MEDIUM-NEXT: ret ; ; RV64I-SMALL-LABEL: lower_extern_weak: ; RV64I-SMALL: # %bb.0: @@ -404,45 +406,21 @@ define i32 @lower_extern_weak(i32 %a) nounwind { ; RV64I-SMALL-NEXT: lw a0, %lo(W)(a0) ; RV64I-SMALL-NEXT: ret ; -; RV64F-MEDIUM-LABEL: lower_extern_weak: -; RV64F-MEDIUM: # %bb.0: -; RV64F-MEDIUM-NEXT: .Lpcrel_hi4: -; RV64F-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W) -; RV64F-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0) -; RV64F-MEDIUM-NEXT: lw a0, 0(a0) -; RV64F-MEDIUM-NEXT: ret -; -; RV64F-LARGE-LABEL: lower_extern_weak: -; RV64F-LARGE: # %bb.0: -; RV64F-LARGE-NEXT: .Lpcrel_hi4: -; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0) -; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0) -; RV64F-LARGE-NEXT: lw a0, 0(a0) -; RV64F-LARGE-NEXT: ret -; -; RV32FINX-MEDIUM-LABEL: lower_extern_weak: -; RV32FINX-MEDIUM: # %bb.0: -; RV32FINX-MEDIUM-NEXT: .Lpcrel_hi3: -; RV32FINX-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W) -; RV32FINX-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi3)(a0) -; RV32FINX-MEDIUM-NEXT: lw a0, 0(a0) -; RV32FINX-MEDIUM-NEXT: ret -; -; RV64FINX-MEDIUM-LABEL: lower_extern_weak: -; RV64FINX-MEDIUM: # %bb.0: -; RV64FINX-MEDIUM-NEXT: .Lpcrel_hi3: -; RV64FINX-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W) -; RV64FINX-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0) -; RV64FINX-MEDIUM-NEXT: lw a0, 0(a0) -; RV64FINX-MEDIUM-NEXT: ret +; RV64I-MEDIUM-LABEL: lower_extern_weak: +; RV64I-MEDIUM: # %bb.0: +; RV64I-MEDIUM-NEXT: .Lpcrel_hi3: +; RV64I-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W) +; RV64I-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV64I-MEDIUM-NEXT: lw a0, 0(a0) +; RV64I-MEDIUM-NEXT: ret ; -; RV64FINX-LARGE-LABEL: lower_extern_weak: -; RV64FINX-LARGE: # %bb.0: -; RV64FINX-LARGE-NEXT: .Lpcrel_hi3: -; RV64FINX-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0) -; RV64FINX-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0) -; RV64FINX-LARGE-NEXT: lw a0, 0(a0) -; RV64FINX-LARGE-NEXT: ret +; RV64I-LARGE-LABEL: lower_extern_weak: +; RV64I-LARGE: # %bb.0: +; RV64I-LARGE-NEXT: .Lpcrel_hi3: +; RV64I-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0) +; RV64I-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV64I-LARGE-NEXT: lw a0, 0(a0) +; RV64I-LARGE-NEXT: ret %1 = load volatile i32, ptr @W ret i32 %1 } @@ -466,9 +444,9 @@ define half @lower_global_half(half %a) nounwind { ; ; RV32F-MEDIUM-LABEL: lower_global_half: ; RV32F-MEDIUM: # %bb.0: -; RV32F-MEDIUM-NEXT: .Lpcrel_hi5: +; RV32F-MEDIUM-NEXT: .Lpcrel_hi4: ; RV32F-MEDIUM-NEXT: auipc a0, %pcrel_hi(X) -; RV32F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi5)(a0) +; RV32F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi4)(a0) ; RV32F-MEDIUM-NEXT: fadd.h fa0, fa0, fa5 ; RV32F-MEDIUM-NEXT: ret ; @@ -481,17 +459,17 @@ define half @lower_global_half(half %a) nounwind { ; ; RV64F-MEDIUM-LABEL: lower_global_half: ; RV64F-MEDIUM: # %bb.0: -; RV64F-MEDIUM-NEXT: .Lpcrel_hi5: +; RV64F-MEDIUM-NEXT: .Lpcrel_hi4: ; RV64F-MEDIUM-NEXT: auipc a0, %pcrel_hi(X) -; RV64F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi5)(a0) +; RV64F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi4)(a0) ; RV64F-MEDIUM-NEXT: fadd.h fa0, fa0, fa5 ; RV64F-MEDIUM-NEXT: ret ; ; RV64F-LARGE-LABEL: lower_global_half: ; RV64F-LARGE: # %bb.0: -; RV64F-LARGE-NEXT: .Lpcrel_hi5: +; RV64F-LARGE-NEXT: .Lpcrel_hi4: ; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI5_0) -; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi5)(a0) +; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0) ; RV64F-LARGE-NEXT: flh fa5, 0(a0) ; RV64F-LARGE-NEXT: fadd.h fa0, fa0, fa5 ; RV64F-LARGE-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll index 8124d00e63fa7..c3e729800616d 100644 --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -1636,14 +1636,15 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind { ; ; RV64IFD-LABEL: fcvt_w_s_sat_i16: ; RV64IFD: # %bb.0: # %start -; RV64IFD-NEXT: lui a0, %hi(.LCPI26_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI26_0)(a0) -; RV64IFD-NEXT: lui a0, %hi(.LCPI26_1) -; RV64IFD-NEXT: fld fa4, %lo(.LCPI26_1)(a0) ; RV64IFD-NEXT: feq.d a0, fa0, fa0 -; RV64IFD-NEXT: fmax.d fa5, fa0, fa5 +; RV64IFD-NEXT: lui a1, %hi(.LCPI26_0) +; RV64IFD-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV64IFD-NEXT: li a1, -505 +; RV64IFD-NEXT: slli a1, a1, 53 +; RV64IFD-NEXT: fmv.d.x fa4, a1 +; RV64IFD-NEXT: fmax.d fa4, fa0, fa4 ; RV64IFD-NEXT: neg a0, a0 -; RV64IFD-NEXT: fmin.d fa5, fa5, fa4 +; RV64IFD-NEXT: fmin.d fa5, fa4, fa5 ; RV64IFD-NEXT: fcvt.l.d a1, fa5, rtz ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: ret @@ -1668,16 +1669,17 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind { ; ; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i16: ; RV64IZFINXZDINX: # %bb.0: # %start -; RV64IZFINXZDINX-NEXT: li a1, -505 -; RV64IZFINXZDINX-NEXT: lui a2, %hi(.LCPI26_0) -; RV64IZFINXZDINX-NEXT: slli a1, a1, 53 -; RV64IZFINXZDINX-NEXT: ld a2, %lo(.LCPI26_0)(a2) -; RV64IZFINXZDINX-NEXT: fmax.d a1, a0, a1 -; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 -; RV64IZFINXZDINX-NEXT: neg a0, a0 -; RV64IZFINXZDINX-NEXT: fmin.d a1, a1, a2 -; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a1, rtz -; RV64IZFINXZDINX-NEXT: and a0, a0, a1 +; RV64IZFINXZDINX-NEXT: feq.d a1, a0, a0 +; RV64IZFINXZDINX-NEXT: li a2, -505 +; RV64IZFINXZDINX-NEXT: slli a2, a2, 53 +; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, a2 +; RV64IZFINXZDINX-NEXT: lui a2, 4152 +; RV64IZFINXZDINX-NEXT: neg a1, a1 +; RV64IZFINXZDINX-NEXT: addi a2, a2, -1 +; RV64IZFINXZDINX-NEXT: slli a2, a2, 38 +; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a2 +; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz +; RV64IZFINXZDINX-NEXT: and a0, a1, a0 ; RV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcvt_w_s_sat_i16: @@ -1859,9 +1861,10 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind { ; ; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16: ; RV64IZFINXZDINX: # %bb.0: # %start -; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI28_0) -; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI28_0)(a1) ; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, zero +; RV64IZFINXZDINX-NEXT: lui a1, 8312 +; RV64IZFINXZDINX-NEXT: addi a1, a1, -1 +; RV64IZFINXZDINX-NEXT: slli a1, a1, 37 ; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a1 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz ; RV64IZFINXZDINX-NEXT: ret @@ -2012,13 +2015,15 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind { ; ; RV64IFD-LABEL: fcvt_w_s_sat_i8: ; RV64IFD: # %bb.0: # %start -; RV64IFD-NEXT: lui a0, %hi(.LCPI30_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI30_0)(a0) -; RV64IFD-NEXT: lui a0, %hi(.LCPI30_1) -; RV64IFD-NEXT: fld fa4, %lo(.LCPI30_1)(a0) ; RV64IFD-NEXT: feq.d a0, fa0, fa0 -; RV64IFD-NEXT: fmax.d fa5, fa0, fa5 +; RV64IFD-NEXT: li a1, -509 +; RV64IFD-NEXT: slli a1, a1, 53 +; RV64IFD-NEXT: fmv.d.x fa5, a1 +; RV64IFD-NEXT: lui a1, 65919 ; RV64IFD-NEXT: neg a0, a0 +; RV64IFD-NEXT: slli a1, a1, 34 +; RV64IFD-NEXT: fmax.d fa5, fa0, fa5 +; RV64IFD-NEXT: fmv.d.x fa4, a1 ; RV64IFD-NEXT: fmin.d fa5, fa5, fa4 ; RV64IFD-NEXT: fcvt.l.d a1, fa5, rtz ; RV64IFD-NEXT: and a0, a0, a1 @@ -2214,11 +2219,12 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind { ; ; RV64IFD-LABEL: fcvt_wu_s_sat_i8: ; RV64IFD: # %bb.0: # %start -; RV64IFD-NEXT: lui a0, %hi(.LCPI32_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI32_0)(a0) -; RV64IFD-NEXT: fmv.d.x fa4, zero -; RV64IFD-NEXT: fmax.d fa4, fa0, fa4 -; RV64IFD-NEXT: fmin.d fa5, fa4, fa5 +; RV64IFD-NEXT: fmv.d.x fa5, zero +; RV64IFD-NEXT: lui a0, 131967 +; RV64IFD-NEXT: fmax.d fa5, fa0, fa5 +; RV64IFD-NEXT: slli a0, a0, 33 +; RV64IFD-NEXT: fmv.d.x fa4, a0 +; RV64IFD-NEXT: fmin.d fa5, fa5, fa4 ; RV64IFD-NEXT: fcvt.lu.d a0, fa5, rtz ; RV64IFD-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll index 1119fd6d74a25..6f7c30edba3ea 100644 --- a/llvm/test/CodeGen/RISCV/double-imm.ll +++ b/llvm/test/CodeGen/RISCV/double-imm.ll @@ -47,8 +47,9 @@ define double @double_imm_op(double %a) nounwind { ; ; CHECK64D-LABEL: double_imm_op: ; CHECK64D: # %bb.0: -; CHECK64D-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK64D-NEXT: fld fa5, %lo(.LCPI1_0)(a0) +; CHECK64D-NEXT: li a0, 1023 +; CHECK64D-NEXT: slli a0, a0, 52 +; CHECK64D-NEXT: fmv.d.x fa5, a0 ; CHECK64D-NEXT: fadd.d fa0, fa0, fa5 ; CHECK64D-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index bb57665fa1801..caeb6e6ce70af 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -866,8 +866,9 @@ define double @floor_f64(double %a) nounwind { ; ; RV64IFD-LABEL: floor_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI18_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB18_2 @@ -931,8 +932,9 @@ define double @ceil_f64(double %a) nounwind { ; ; RV64IFD-LABEL: ceil_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI19_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB19_2 @@ -996,8 +998,9 @@ define double @trunc_f64(double %a) nounwind { ; ; RV64IFD-LABEL: trunc_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI20_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB20_2 @@ -1061,8 +1064,9 @@ define double @rint_f64(double %a) nounwind { ; ; RV64IFD-LABEL: rint_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI21_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB21_2 @@ -1167,8 +1171,9 @@ define double @round_f64(double %a) nounwind { ; ; RV64IFD-LABEL: round_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI23_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI23_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB23_2 @@ -1232,8 +1237,9 @@ define double @roundeven_f64(double %a) nounwind { ; ; RV64IFD-LABEL: roundeven_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI24_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB24_2 diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll index 3edbda3a4bf6b..6dd24c056e386 100644 --- a/llvm/test/CodeGen/RISCV/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll @@ -1145,8 +1145,9 @@ define double @test_floor_double(double %x) { ; ; RV64IFD-LABEL: test_floor_double: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI40_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB40_2 @@ -1194,8 +1195,9 @@ define double @test_ceil_double(double %x) { ; ; RV64IFD-LABEL: test_ceil_double: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI41_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI41_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB41_2 @@ -1243,8 +1245,9 @@ define double @test_trunc_double(double %x) { ; ; RV64IFD-LABEL: test_trunc_double: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI42_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB42_2 @@ -1292,8 +1295,9 @@ define double @test_round_double(double %x) { ; ; RV64IFD-LABEL: test_round_double: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI43_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI43_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB43_2 @@ -1341,8 +1345,9 @@ define double @test_roundeven_double(double %x) { ; ; RV64IFD-LABEL: test_roundeven_double: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a0, %hi(.LCPI44_0) -; RV64IFD-NEXT: fld fa5, %lo(.LCPI44_0)(a0) +; RV64IFD-NEXT: li a0, 1075 +; RV64IFD-NEXT: slli a0, a0, 52 +; RV64IFD-NEXT: fmv.d.x fa5, a0 ; RV64IFD-NEXT: fabs.d fa4, fa0 ; RV64IFD-NEXT: flt.d a0, fa4, fa5 ; RV64IFD-NEXT: beqz a0, .LBB44_2 diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll index 1deea55b083ce..cd3ff779d8cd3 100644 --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ -; RUN: -target-abi=ilp32d | FileCheck %s +; RUN: -target-abi=ilp32d | FileCheck --check-prefixes=CHECK,RV32D %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ -; RUN: -target-abi=lp64d | FileCheck %s +; RUN: -target-abi=lp64d | FileCheck --check-prefixes=CHECK,RV64D %s ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKRV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ @@ -640,6 +640,39 @@ define signext i32 @select_fcmp_uge_1_2(double %a, double %b) nounwind { } define double @CascadedSelect(double noundef %a) { +; RV32D-LABEL: CascadedSelect: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: lui a0, %hi(.LCPI20_0) +; RV32D-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32D-NEXT: flt.d a0, fa5, fa0 +; RV32D-NEXT: bnez a0, .LBB20_3 +; RV32D-NEXT: # %bb.1: # %entry +; RV32D-NEXT: fcvt.d.w fa5, zero +; RV32D-NEXT: flt.d a0, fa0, fa5 +; RV32D-NEXT: bnez a0, .LBB20_3 +; RV32D-NEXT: # %bb.2: # %entry +; RV32D-NEXT: fmv.d fa5, fa0 +; RV32D-NEXT: .LBB20_3: # %entry +; RV32D-NEXT: fmv.d fa0, fa5 +; RV32D-NEXT: ret +; +; RV64D-LABEL: CascadedSelect: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: li a0, 1023 +; RV64D-NEXT: slli a0, a0, 52 +; RV64D-NEXT: fmv.d.x fa5, a0 +; RV64D-NEXT: flt.d a0, fa5, fa0 +; RV64D-NEXT: bnez a0, .LBB20_3 +; RV64D-NEXT: # %bb.1: # %entry +; RV64D-NEXT: fmv.d.x fa5, zero +; RV64D-NEXT: flt.d a0, fa0, fa5 +; RV64D-NEXT: bnez a0, .LBB20_3 +; RV64D-NEXT: # %bb.2: # %entry +; RV64D-NEXT: fmv.d fa5, fa0 +; RV64D-NEXT: .LBB20_3: # %entry +; RV64D-NEXT: fmv.d fa0, fa5 +; RV64D-NEXT: ret +; ; CHECKRV32ZDINX-LABEL: CascadedSelect: ; CHECKRV32ZDINX: # %bb.0: # %entry ; CHECKRV32ZDINX-NEXT: lui a3, %hi(.LCPI20_0) diff --git a/llvm/test/CodeGen/RISCV/double-zfa.ll b/llvm/test/CodeGen/RISCV/double-zfa.ll index 2f35496b9b32c..f17c63ddb6cae 100644 --- a/llvm/test/CodeGen/RISCV/double-zfa.ll +++ b/llvm/test/CodeGen/RISCV/double-zfa.ll @@ -69,21 +69,35 @@ define double @loadfpimm8() { } define double @loadfpimm9() { -; CHECK-LABEL: loadfpimm9: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI8_0)(a0) -; CHECK-NEXT: ret +; RV32IDZFA-LABEL: loadfpimm9: +; RV32IDZFA: # %bb.0: +; RV32IDZFA-NEXT: lui a0, %hi(.LCPI8_0) +; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI8_0)(a0) +; RV32IDZFA-NEXT: ret +; +; RV64DZFA-LABEL: loadfpimm9: +; RV64DZFA: # %bb.0: +; RV64DZFA-NEXT: lui a0, 131967 +; RV64DZFA-NEXT: slli a0, a0, 33 +; RV64DZFA-NEXT: fmv.d.x fa0, a0 +; RV64DZFA-NEXT: ret ret double 255.0 } ; Negative test. This is 1 * 2^256. define double @loadfpimm10() { -; CHECK-LABEL: loadfpimm10: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: ret +; RV32IDZFA-LABEL: loadfpimm10: +; RV32IDZFA: # %bb.0: +; RV32IDZFA-NEXT: lui a0, %hi(.LCPI9_0) +; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI9_0)(a0) +; RV32IDZFA-NEXT: ret +; +; RV64DZFA-LABEL: loadfpimm10: +; RV64DZFA: # %bb.0: +; RV64DZFA-NEXT: li a0, 1 +; RV64DZFA-NEXT: slli a0, a0, 60 +; RV64DZFA-NEXT: fmv.d.x fa0, a0 +; RV64DZFA-NEXT: ret ret double 0x1000000000000000 } @@ -125,11 +139,18 @@ define double @loadfpimm13() { ; Negative test. This is 2^-1023, a denormal. define double @loadfpimm15() { -; CHECK-LABEL: loadfpimm15: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: ret +; RV32IDZFA-LABEL: loadfpimm15: +; RV32IDZFA: # %bb.0: +; RV32IDZFA-NEXT: lui a0, %hi(.LCPI13_0) +; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI13_0)(a0) +; RV32IDZFA-NEXT: ret +; +; RV64DZFA-LABEL: loadfpimm15: +; RV64DZFA: # %bb.0: +; RV64DZFA-NEXT: li a0, 1 +; RV64DZFA-NEXT: slli a0, a0, 51 +; RV64DZFA-NEXT: fmv.d.x fa0, a0 +; RV64DZFA-NEXT: ret ret double 0x0008000000000000 } diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll index 72578193ee4bf..e6e4f6642f685 100644 --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -623,20 +623,21 @@ define i64 @fcvt_l_s_sat(float %a) nounwind { ; RV32IF-NEXT: # %bb.1: # %start ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB12_2: # %start -; RV32IF-NEXT: lui a1, %hi(.LCPI12_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB12_4 ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: addi a2, a3, -1 ; RV32IF-NEXT: .LBB12_4: # %start ; RV32IF-NEXT: feq.s a3, fs0, fs0 -; RV32IF-NEXT: neg a4, a1 -; RV32IF-NEXT: neg a1, s0 +; RV32IF-NEXT: neg a4, s0 +; RV32IF-NEXT: neg a5, a1 ; RV32IF-NEXT: neg a3, a3 -; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: and a0, a4, a0 ; RV32IF-NEXT: and a1, a3, a2 -; RV32IF-NEXT: or a0, a4, a0 +; RV32IF-NEXT: or a0, a5, a0 ; RV32IF-NEXT: and a0, a3, a0 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -864,10 +865,11 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind { ; RV32IF-NEXT: fle.s a0, fa5, fa0 ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI14_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI14_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 @@ -1405,13 +1407,14 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind { ; RV32IF-LABEL: fcvt_w_s_sat_i16: ; RV32IF: # %bb.0: # %start ; RV32IF-NEXT: feq.s a0, fa0, fa0 -; RV32IF-NEXT: lui a1, %hi(.LCPI24_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI24_0)(a1) ; RV32IF-NEXT: lui a1, 815104 -; RV32IF-NEXT: fmv.w.x fa4, a1 -; RV32IF-NEXT: fmax.s fa4, fa0, fa4 +; RV32IF-NEXT: fmv.w.x fa5, a1 +; RV32IF-NEXT: lui a1, 290816 ; RV32IF-NEXT: neg a0, a0 -; RV32IF-NEXT: fmin.s fa5, fa4, fa5 +; RV32IF-NEXT: addi a1, a1, -512 +; RV32IF-NEXT: fmax.s fa5, fa0, fa5 +; RV32IF-NEXT: fmv.w.x fa4, a1 +; RV32IF-NEXT: fmin.s fa5, fa5, fa4 ; RV32IF-NEXT: fcvt.w.s a1, fa5, rtz ; RV32IF-NEXT: and a0, a0, a1 ; RV32IF-NEXT: ret @@ -1419,13 +1422,14 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind { ; RV64IF-LABEL: fcvt_w_s_sat_i16: ; RV64IF: # %bb.0: # %start ; RV64IF-NEXT: feq.s a0, fa0, fa0 -; RV64IF-NEXT: lui a1, %hi(.LCPI24_0) -; RV64IF-NEXT: flw fa5, %lo(.LCPI24_0)(a1) ; RV64IF-NEXT: lui a1, 815104 -; RV64IF-NEXT: fmv.w.x fa4, a1 -; RV64IF-NEXT: fmax.s fa4, fa0, fa4 +; RV64IF-NEXT: fmv.w.x fa5, a1 +; RV64IF-NEXT: lui a1, 290816 ; RV64IF-NEXT: neg a0, a0 -; RV64IF-NEXT: fmin.s fa5, fa4, fa5 +; RV64IF-NEXT: addi a1, a1, -512 +; RV64IF-NEXT: fmax.s fa5, fa0, fa5 +; RV64IF-NEXT: fmv.w.x fa4, a1 +; RV64IF-NEXT: fmin.s fa5, fa5, fa4 ; RV64IF-NEXT: fcvt.l.s a1, fa5, rtz ; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: ret @@ -1590,21 +1594,23 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind { define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_sat_i16: ; RV32IF: # %bb.0: # %start -; RV32IF-NEXT: lui a0, %hi(.LCPI26_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI26_0)(a0) -; RV32IF-NEXT: fmv.w.x fa4, zero -; RV32IF-NEXT: fmax.s fa4, fa0, fa4 -; RV32IF-NEXT: fmin.s fa5, fa4, fa5 +; RV32IF-NEXT: fmv.w.x fa5, zero +; RV32IF-NEXT: lui a0, 292864 +; RV32IF-NEXT: fmax.s fa5, fa0, fa5 +; RV32IF-NEXT: addi a0, a0, -256 +; RV32IF-NEXT: fmv.w.x fa4, a0 +; RV32IF-NEXT: fmin.s fa5, fa5, fa4 ; RV32IF-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_wu_s_sat_i16: ; RV64IF: # %bb.0: # %start -; RV64IF-NEXT: lui a0, %hi(.LCPI26_0) -; RV64IF-NEXT: flw fa5, %lo(.LCPI26_0)(a0) -; RV64IF-NEXT: fmv.w.x fa4, zero -; RV64IF-NEXT: fmax.s fa4, fa0, fa4 -; RV64IF-NEXT: fmin.s fa5, fa4, fa5 +; RV64IF-NEXT: fmv.w.x fa5, zero +; RV64IF-NEXT: lui a0, 292864 +; RV64IF-NEXT: fmax.s fa5, fa0, fa5 +; RV64IF-NEXT: addi a0, a0, -256 +; RV64IF-NEXT: fmv.w.x fa4, a0 +; RV64IF-NEXT: fmin.s fa5, fa5, fa4 ; RV64IF-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IF-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll index a010ab49b2827..e4e34543d6314 100644 --- a/llvm/test/CodeGen/RISCV/float-imm.ll +++ b/llvm/test/CodeGen/RISCV/float-imm.ll @@ -12,8 +12,9 @@ define float @float_imm() nounwind { ; CHECK-LABEL: float_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flw fa0, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: lui a0, 263313 +; CHECK-NEXT: addi a0, a0, -37 +; CHECK-NEXT: fmv.w.x fa0, a0 ; CHECK-NEXT: ret ; ; CHECKZFINX-LABEL: float_imm: diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index 8b8a3257a0027..b1230ae9dd6bf 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -16,7 +16,7 @@ ; RUN: | FileCheck -check-prefix=RV64IZFINX %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -verify-machineinstrs -target-abi=lp64d \ -; RUN: | FileCheck -check-prefix=RV64IF %s +; RUN: | FileCheck -check-prefixes=RV64IFD %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \ ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV32I %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \ @@ -45,6 +45,11 @@ define float @sqrt_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fsqrt.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: sqrt_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsqrt.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sqrt_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -97,6 +102,16 @@ define float @powi_f32(float %a, i32 %b) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 16 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: powi_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: sext.w a0, a0 +; RV64IFD-NEXT: call __powisf2 +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: powi_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -138,6 +153,10 @@ define float @sin_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail sinf ; +; RV64IFD-LABEL: sin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail sinf +; ; RV32I-LABEL: sin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -178,6 +197,10 @@ define float @cos_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail cosf ; +; RV64IFD-LABEL: cos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail cosf +; ; RV32I-LABEL: cos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -237,6 +260,24 @@ define float @sincos_f32(float %a) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; +; RV64IF-LABEL: sincos_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fmv.s fs0, fa0 +; RV64IF-NEXT: call sinf +; RV64IF-NEXT: fmv.s fs1, fa0 +; RV64IF-NEXT: fmv.s fa0, fs0 +; RV64IF-NEXT: call cosf +; RV64IF-NEXT: fadd.s fa0, fs1, fa0 +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload +; RV64IF-NEXT: flw fs1, 0(sp) # 4-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; ; RV64IZFINX-LABEL: sincos_f32: ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: addi sp, sp, -32 @@ -255,6 +296,24 @@ define float @sincos_f32(float %a) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 32 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: sincos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fmv.s fs0, fa0 +; RV64IFD-NEXT: call sinf +; RV64IFD-NEXT: fmv.s fs1, fa0 +; RV64IFD-NEXT: fmv.s fa0, fs0 +; RV64IFD-NEXT: call cosf +; RV64IFD-NEXT: fadd.s fa0, fs1, fa0 +; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sincos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -319,6 +378,10 @@ define float @pow_f32(float %a, float %b) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail powf ; +; RV64IFD-LABEL: pow_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail powf +; ; RV32I-LABEL: pow_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -359,6 +422,10 @@ define float @exp_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail expf ; +; RV64IFD-LABEL: exp_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail expf +; ; RV32I-LABEL: exp_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -399,6 +466,10 @@ define float @exp2_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail exp2f ; +; RV64IFD-LABEL: exp2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail exp2f +; ; RV32I-LABEL: exp2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -437,6 +508,10 @@ define float @exp10_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail exp10f ; +; RV64IFD-LABEL: exp10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail exp10f +; ; RV32I-LABEL: exp10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -477,6 +552,10 @@ define float @log_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail logf ; +; RV64IFD-LABEL: log_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail logf +; ; RV32I-LABEL: log_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -517,6 +596,10 @@ define float @log10_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail log10f ; +; RV64IFD-LABEL: log10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail log10f +; ; RV32I-LABEL: log10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -557,6 +640,10 @@ define float @log2_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail log2f ; +; RV64IFD-LABEL: log2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail log2f +; ; RV32I-LABEL: log2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -601,6 +688,11 @@ define float @fma_f32(float %a, float %b, float %c) nounwind { ; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fma_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fma_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -645,6 +737,11 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fmuladd_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fmuladd_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -699,6 +796,11 @@ define float @fabs_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fabs.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fabs_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fabs.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fabs_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -737,6 +839,11 @@ define float @minnum_f32(float %a, float %b) nounwind { ; RV64IZFINX-NEXT: fmin.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: minnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmin.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: minnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -781,6 +888,11 @@ define float @maxnum_f32(float %a, float %b) nounwind { ; RV64IZFINX-NEXT: fmax.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: maxnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmax.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: maxnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -825,6 +937,11 @@ define float @copysign_f32(float %a, float %b) nounwind { ; RV64IZFINX-NEXT: fsgnj.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: copysign_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsgnj.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: copysign_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 524288 @@ -903,6 +1020,20 @@ define float @floor_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB18_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: floor_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB18_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rdn +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rdn +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB18_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: floor_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -981,6 +1112,20 @@ define float @ceil_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB19_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: ceil_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB19_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rup +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rup +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB19_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ceil_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1059,6 +1204,20 @@ define float @trunc_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB20_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: trunc_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB20_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rtz +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rtz +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB20_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: trunc_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1137,6 +1296,20 @@ define float @rint_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB21_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: rint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB21_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0 +; RV64IFD-NEXT: fcvt.s.w fa5, a0 +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB21_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: rint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1177,6 +1350,10 @@ define float @nearbyint_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail nearbyintf ; +; RV64IFD-LABEL: nearbyint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail nearbyintf +; ; RV32I-LABEL: nearbyint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1255,6 +1432,20 @@ define float @round_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB23_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: round_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB23_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rmm +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rmm +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB23_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: round_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1333,6 +1524,20 @@ define float @roundeven_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB24_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: roundeven_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB24_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rne +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rne +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB24_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: roundeven_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1377,6 +1582,11 @@ define iXLen @lrint_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: lrint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: lrint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1422,6 +1632,11 @@ define iXLen @lround_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: lround_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0, rmm +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: lround_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1466,6 +1681,11 @@ define i32 @lround_i32_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rmm ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: lround_i32_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rmm +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: lround_i32_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1518,6 +1738,11 @@ define i64 @llrint_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: llrint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: llrint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1570,6 +1795,11 @@ define i64 @llround_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: llround_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0, rmm +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: llround_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1621,6 +1851,13 @@ define i1 @fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 927 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 1 @@ -1705,6 +1942,13 @@ define i1 @isnan_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 768 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1749,6 +1993,12 @@ define i1 @isqnan_fpclass(float %x) { ; RV64IZFINX-NEXT: srli a0, a0, 9 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isqnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: srli a0, a0, 9 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isqnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1799,6 +2049,13 @@ define i1 @issnan_fpclass(float %x) { ; RV64IZFINX-NEXT: srli a0, a0, 63 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: issnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: slli a0, a0, 55 +; RV64IFD-NEXT: srli a0, a0, 63 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: issnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1853,6 +2110,13 @@ define i1 @isinf_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 129 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1903,6 +2167,13 @@ define i1 @isposinf_fpclass(float %x) { ; RV64IZFINX-NEXT: srli a0, a0, 63 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isposinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: slli a0, a0, 56 +; RV64IFD-NEXT: srli a0, a0, 63 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1946,6 +2217,12 @@ define i1 @isneginf_fpclass(float %x) { ; RV64IZFINX-NEXT: andi a0, a0, 1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isneginf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isneginf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 1046528 @@ -1993,6 +2270,13 @@ define i1 @isfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 126 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -2041,6 +2325,13 @@ define i1 @isposfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isposfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 112 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 23 @@ -2085,6 +2376,13 @@ define i1 @isnegfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isnegfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 14 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnegfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 1 @@ -2137,6 +2435,13 @@ define i1 @isnotfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isnotfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 897 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnotfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -2175,6 +2480,10 @@ define float @tan_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail tanf ; +; RV64IFD-LABEL: tan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail tanf +; ; RV32I-LABEL: tan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2219,6 +2528,11 @@ define float @maximumnum_float(float %x, float %y) { ; RV64IZFINX-NEXT: fmax.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: maximumnum_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmax.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: maximumnum_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2271,6 +2585,11 @@ define float @minimumnum_float(float %x, float %y) { ; RV64IZFINX-NEXT: fmin.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: minimumnum_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmin.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: minimumnum_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2327,6 +2646,15 @@ define float @ldexp_float(float %x, i32 signext %y) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 16 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: ldexp_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call ldexpf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ldexp_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2393,6 +2721,17 @@ define {float, i32} @frexp_float(float %x) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 16 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: frexp_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call frexpf +; RV64IFD-NEXT: ld a0, 0(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: frexp_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2435,6 +2774,10 @@ define float @asin_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail asinf ; +; RV64IFD-LABEL: asin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail asinf +; ; RV32I-LABEL: asin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2473,6 +2816,10 @@ define float @acos_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail acosf ; +; RV64IFD-LABEL: acos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail acosf +; ; RV32I-LABEL: acos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2511,6 +2858,10 @@ define float @atan_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail atanf ; +; RV64IFD-LABEL: atan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail atanf +; ; RV32I-LABEL: atan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2549,6 +2900,10 @@ define float @atan2_f32(float %a, float %b) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail atan2f ; +; RV64IFD-LABEL: atan2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail atan2f +; ; RV32I-LABEL: atan2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2587,6 +2942,10 @@ define float @sinh_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail sinhf ; +; RV64IFD-LABEL: sinh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail sinhf +; ; RV32I-LABEL: sinh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2625,6 +2984,10 @@ define float @cosh_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail coshf ; +; RV64IFD-LABEL: cosh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail coshf +; ; RV32I-LABEL: cosh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2663,6 +3026,10 @@ define float @tanh_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail tanhf ; +; RV64IFD-LABEL: tanh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail tanhf +; ; RV32I-LABEL: tanh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll index 809cc31abe612..6871f29cb8b05 100644 --- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll @@ -60,8 +60,9 @@ define i64 @test_floor_si64(float %x) nounwind { ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB1_4: -; RV32IF-NEXT: lui a1, %hi(.LCPI1_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI1_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB1_6 ; RV32IF-NEXT: # %bb.5: @@ -196,10 +197,11 @@ define i64 @test_floor_ui64(float %x) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI3_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI3_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 @@ -318,8 +320,9 @@ define i64 @test_ceil_si64(float %x) nounwind { ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB5_4: -; RV32IF-NEXT: lui a1, %hi(.LCPI5_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI5_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB5_6 ; RV32IF-NEXT: # %bb.5: @@ -454,10 +457,11 @@ define i64 @test_ceil_ui64(float %x) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI7_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI7_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 @@ -576,8 +580,9 @@ define i64 @test_trunc_si64(float %x) nounwind { ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB9_4: -; RV32IF-NEXT: lui a1, %hi(.LCPI9_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI9_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB9_6 ; RV32IF-NEXT: # %bb.5: @@ -712,10 +717,11 @@ define i64 @test_trunc_ui64(float %x) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI11_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI11_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 @@ -834,8 +840,9 @@ define i64 @test_round_si64(float %x) nounwind { ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB13_4: -; RV32IF-NEXT: lui a1, %hi(.LCPI13_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI13_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB13_6 ; RV32IF-NEXT: # %bb.5: @@ -970,10 +977,11 @@ define i64 @test_round_ui64(float %x) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI15_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI15_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 @@ -1092,8 +1100,9 @@ define i64 @test_roundeven_si64(float %x) nounwind { ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB17_4: -; RV32IF-NEXT: lui a1, %hi(.LCPI17_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI17_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB17_6 ; RV32IF-NEXT: # %bb.5: @@ -1228,10 +1237,11 @@ define i64 @test_roundeven_ui64(float %x) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI19_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI19_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 @@ -1350,8 +1360,9 @@ define i64 @test_rint_si64(float %x) nounwind { ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: mv a2, a1 ; RV32IF-NEXT: .LBB21_4: -; RV32IF-NEXT: lui a1, %hi(.LCPI21_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI21_0)(a1) +; RV32IF-NEXT: lui a1, 389120 +; RV32IF-NEXT: addi a1, a1, -1 +; RV32IF-NEXT: fmv.w.x fa5, a1 ; RV32IF-NEXT: flt.s a1, fa5, fs0 ; RV32IF-NEXT: beqz a1, .LBB21_6 ; RV32IF-NEXT: # %bb.5: @@ -1486,10 +1497,11 @@ define i64 @test_rint_ui64(float %x) nounwind { ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi -; RV32IF-NEXT: lui a2, %hi(.LCPI23_0) -; RV32IF-NEXT: flw fa5, %lo(.LCPI23_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 +; RV32IF-NEXT: lui a2, 391168 ; RV32IF-NEXT: and a1, s0, a1 +; RV32IF-NEXT: addi a2, a2, -1 +; RV32IF-NEXT: fmv.w.x fa5, a2 ; RV32IF-NEXT: flt.s a2, fa5, fs0 ; RV32IF-NEXT: neg a2, a2 ; RV32IF-NEXT: or a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll index 84163b52bb98d..2ebb6e9b97a4d 100644 --- a/llvm/test/CodeGen/RISCV/half-arith.ll +++ b/llvm/test/CodeGen/RISCV/half-arith.ll @@ -2883,39 +2883,20 @@ define half @fsgnjx_f16(half %x, half %y) nounwind { ; RV64I-NEXT: addi sp, sp, 32 ; RV64I-NEXT: ret ; -; RV32IZFHMIN-LABEL: fsgnjx_f16: -; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI23_0) -; RV32IZFHMIN-NEXT: lhu a0, %lo(.LCPI23_0)(a0) -; RV32IZFHMIN-NEXT: fmv.x.h a1, fa0 -; RV32IZFHMIN-NEXT: lui a2, 1048568 -; RV32IZFHMIN-NEXT: and a1, a1, a2 -; RV32IZFHMIN-NEXT: slli a0, a0, 17 -; RV32IZFHMIN-NEXT: srli a0, a0, 17 -; RV32IZFHMIN-NEXT: or a0, a0, a1 -; RV32IZFHMIN-NEXT: fmv.h.x fa5, a0 -; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa5 -; RV32IZFHMIN-NEXT: fcvt.s.h fa4, fa1 -; RV32IZFHMIN-NEXT: fmul.s fa5, fa5, fa4 -; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5 -; RV32IZFHMIN-NEXT: ret -; -; RV64IZFHMIN-LABEL: fsgnjx_f16: -; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI23_0) -; RV64IZFHMIN-NEXT: lhu a0, %lo(.LCPI23_0)(a0) -; RV64IZFHMIN-NEXT: fmv.x.h a1, fa0 -; RV64IZFHMIN-NEXT: lui a2, 1048568 -; RV64IZFHMIN-NEXT: and a1, a1, a2 -; RV64IZFHMIN-NEXT: slli a0, a0, 49 -; RV64IZFHMIN-NEXT: srli a0, a0, 49 -; RV64IZFHMIN-NEXT: or a0, a0, a1 -; RV64IZFHMIN-NEXT: fmv.h.x fa5, a0 -; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 -; RV64IZFHMIN-NEXT: fcvt.s.h fa4, fa1 -; RV64IZFHMIN-NEXT: fmul.s fa5, fa5, fa4 -; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 -; RV64IZFHMIN-NEXT: ret +; CHECKIZFHMIN-LABEL: fsgnjx_f16: +; CHECKIZFHMIN: # %bb.0: +; CHECKIZFHMIN-NEXT: fmv.x.h a0, fa0 +; CHECKIZFHMIN-NEXT: lui a1, 1048568 +; CHECKIZFHMIN-NEXT: and a0, a0, a1 +; CHECKIZFHMIN-NEXT: li a1, 15 +; CHECKIZFHMIN-NEXT: slli a1, a1, 10 +; CHECKIZFHMIN-NEXT: or a0, a0, a1 +; CHECKIZFHMIN-NEXT: fmv.h.x fa5, a0 +; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 +; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 +; CHECKIZFHMIN-NEXT: fmul.s fa5, fa5, fa4 +; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 +; CHECKIZFHMIN-NEXT: ret ; ; CHECKIZHINXMIN-LABEL: fsgnjx_f16: ; CHECKIZHINXMIN: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index 6cebf8b2828bf..c3c06e192f76f 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -194,13 +194,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_si_h_sat: ; RV32IZFH: # %bb.0: # %start ; RV32IZFH-NEXT: fcvt.s.h fa5, fa0 -; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IZFH-NEXT: feq.s a1, fa5, fa5 -; RV32IZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; RV32IZFH-NEXT: lui a0, 815104 -; RV32IZFH-NEXT: fmv.w.x fa3, a0 -; RV32IZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV32IZFH-NEXT: neg a0, a1 +; RV32IZFH-NEXT: lui a1, 290816 +; RV32IZFH-NEXT: fmv.w.x fa4, a0 +; RV32IZFH-NEXT: feq.s a0, fa5, fa5 +; RV32IZFH-NEXT: addi a1, a1, -512 +; RV32IZFH-NEXT: neg a0, a0 +; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IZFH-NEXT: fmv.w.x fa4, a1 ; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IZFH-NEXT: fcvt.w.s a1, fa5, rtz ; RV32IZFH-NEXT: and a0, a0, a1 @@ -209,13 +210,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV64IZFH-LABEL: fcvt_si_h_sat: ; RV64IZFH: # %bb.0: # %start ; RV64IZFH-NEXT: fcvt.s.h fa5, fa0 -; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IZFH-NEXT: feq.s a1, fa5, fa5 -; RV64IZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; RV64IZFH-NEXT: lui a0, 815104 -; RV64IZFH-NEXT: fmv.w.x fa3, a0 -; RV64IZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV64IZFH-NEXT: neg a0, a1 +; RV64IZFH-NEXT: lui a1, 290816 +; RV64IZFH-NEXT: fmv.w.x fa4, a0 +; RV64IZFH-NEXT: feq.s a0, fa5, fa5 +; RV64IZFH-NEXT: addi a1, a1, -512 +; RV64IZFH-NEXT: neg a0, a0 +; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IZFH-NEXT: fmv.w.x fa4, a1 ; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IZFH-NEXT: fcvt.l.s a1, fa5, rtz ; RV64IZFH-NEXT: and a0, a0, a1 @@ -224,13 +226,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV32IDZFH-LABEL: fcvt_si_h_sat: ; RV32IDZFH: # %bb.0: # %start ; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0 -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IDZFH-NEXT: feq.s a1, fa5, fa5 -; RV32IDZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; RV32IDZFH-NEXT: lui a0, 815104 -; RV32IDZFH-NEXT: fmv.w.x fa3, a0 -; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV32IDZFH-NEXT: neg a0, a1 +; RV32IDZFH-NEXT: lui a1, 290816 +; RV32IDZFH-NEXT: fmv.w.x fa4, a0 +; RV32IDZFH-NEXT: feq.s a0, fa5, fa5 +; RV32IDZFH-NEXT: addi a1, a1, -512 +; RV32IDZFH-NEXT: neg a0, a0 +; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IDZFH-NEXT: fmv.w.x fa4, a1 ; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IDZFH-NEXT: fcvt.w.s a1, fa5, rtz ; RV32IDZFH-NEXT: and a0, a0, a1 @@ -239,13 +242,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV64IDZFH-LABEL: fcvt_si_h_sat: ; RV64IDZFH: # %bb.0: # %start ; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0 -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IDZFH-NEXT: feq.s a1, fa5, fa5 -; RV64IDZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; RV64IDZFH-NEXT: lui a0, 815104 -; RV64IDZFH-NEXT: fmv.w.x fa3, a0 -; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV64IDZFH-NEXT: neg a0, a1 +; RV64IDZFH-NEXT: lui a1, 290816 +; RV64IDZFH-NEXT: fmv.w.x fa4, a0 +; RV64IDZFH-NEXT: feq.s a0, fa5, fa5 +; RV64IDZFH-NEXT: addi a1, a1, -512 +; RV64IDZFH-NEXT: neg a0, a0 +; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IDZFH-NEXT: fmv.w.x fa4, a1 ; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IDZFH-NEXT: fcvt.l.s a1, fa5, rtz ; RV64IDZFH-NEXT: and a0, a0, a1 @@ -399,13 +403,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-ILP32-NEXT: call __extendhfsf2 ; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0 -; RV32ID-ILP32-NEXT: lui a0, %hi(.LCPI1_0) -; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5 -; RV32ID-ILP32-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; RV32ID-ILP32-NEXT: lui a0, 815104 -; RV32ID-ILP32-NEXT: fmv.w.x fa3, a0 -; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa3 -; RV32ID-ILP32-NEXT: neg a0, a1 +; RV32ID-ILP32-NEXT: lui a1, 290816 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0 +; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5 +; RV32ID-ILP32-NEXT: addi a1, a1, -512 +; RV32ID-ILP32-NEXT: neg a0, a0 +; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1 ; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-ILP32-NEXT: fcvt.w.s a1, fa5, rtz ; RV32ID-ILP32-NEXT: and a0, a0, a1 @@ -419,13 +424,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-LP64-NEXT: call __extendhfsf2 ; RV64ID-LP64-NEXT: fmv.w.x fa5, a0 -; RV64ID-LP64-NEXT: lui a0, %hi(.LCPI1_0) -; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5 -; RV64ID-LP64-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; RV64ID-LP64-NEXT: lui a0, 815104 -; RV64ID-LP64-NEXT: fmv.w.x fa3, a0 -; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa3 -; RV64ID-LP64-NEXT: neg a0, a1 +; RV64ID-LP64-NEXT: lui a1, 290816 +; RV64ID-LP64-NEXT: fmv.w.x fa4, a0 +; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5 +; RV64ID-LP64-NEXT: addi a1, a1, -512 +; RV64ID-LP64-NEXT: neg a0, a0 +; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4 +; RV64ID-LP64-NEXT: fmv.w.x fa4, a1 ; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-LP64-NEXT: fcvt.l.s a1, fa5, rtz ; RV64ID-LP64-NEXT: and a0, a0, a1 @@ -439,13 +445,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-NEXT: call __extendhfsf2 ; RV32ID-NEXT: feq.s a0, fa0, fa0 -; RV32ID-NEXT: lui a1, %hi(.LCPI1_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI1_0)(a1) ; RV32ID-NEXT: lui a1, 815104 -; RV32ID-NEXT: fmv.w.x fa4, a1 -; RV32ID-NEXT: fmax.s fa4, fa0, fa4 +; RV32ID-NEXT: fmv.w.x fa5, a1 +; RV32ID-NEXT: lui a1, 290816 ; RV32ID-NEXT: neg a0, a0 -; RV32ID-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-NEXT: addi a1, a1, -512 +; RV32ID-NEXT: fmax.s fa5, fa0, fa5 +; RV32ID-NEXT: fmv.w.x fa4, a1 +; RV32ID-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-NEXT: fcvt.w.s a1, fa5, rtz ; RV32ID-NEXT: and a0, a0, a1 ; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -458,13 +465,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-NEXT: call __extendhfsf2 ; RV64ID-NEXT: feq.s a0, fa0, fa0 -; RV64ID-NEXT: lui a1, %hi(.LCPI1_0) -; RV64ID-NEXT: flw fa5, %lo(.LCPI1_0)(a1) ; RV64ID-NEXT: lui a1, 815104 -; RV64ID-NEXT: fmv.w.x fa4, a1 -; RV64ID-NEXT: fmax.s fa4, fa0, fa4 +; RV64ID-NEXT: fmv.w.x fa5, a1 +; RV64ID-NEXT: lui a1, 290816 ; RV64ID-NEXT: neg a0, a0 -; RV64ID-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-NEXT: addi a1, a1, -512 +; RV64ID-NEXT: fmax.s fa5, fa0, fa5 +; RV64ID-NEXT: fmv.w.x fa4, a1 +; RV64ID-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-NEXT: fcvt.l.s a1, fa5, rtz ; RV64ID-NEXT: and a0, a0, a1 ; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -474,13 +482,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; CHECK32-IZFHMIN-LABEL: fcvt_si_h_sat: ; CHECK32-IZFHMIN: # %bb.0: # %start ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK32-IZFHMIN-NEXT: feq.s a1, fa5, fa5 -; CHECK32-IZFHMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; CHECK32-IZFHMIN-NEXT: lui a0, 815104 -; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, a0 -; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3 -; CHECK32-IZFHMIN-NEXT: neg a0, a1 +; CHECK32-IZFHMIN-NEXT: lui a1, 290816 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK32-IZFHMIN-NEXT: feq.s a0, fa5, fa5 +; CHECK32-IZFHMIN-NEXT: addi a1, a1, -512 +; CHECK32-IZFHMIN-NEXT: neg a0, a0 +; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a1 ; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK32-IZFHMIN-NEXT: fcvt.w.s a1, fa5, rtz ; CHECK32-IZFHMIN-NEXT: and a0, a0, a1 @@ -489,13 +498,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; CHECK64-IZFHMIN-LABEL: fcvt_si_h_sat: ; CHECK64-IZFHMIN: # %bb.0: # %start ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK64-IZFHMIN-NEXT: feq.s a1, fa5, fa5 -; CHECK64-IZFHMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0) ; CHECK64-IZFHMIN-NEXT: lui a0, 815104 -; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, a0 -; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3 -; CHECK64-IZFHMIN-NEXT: neg a0, a1 +; CHECK64-IZFHMIN-NEXT: lui a1, 290816 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK64-IZFHMIN-NEXT: feq.s a0, fa5, fa5 +; CHECK64-IZFHMIN-NEXT: addi a1, a1, -512 +; CHECK64-IZFHMIN-NEXT: neg a0, a0 +; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a1 ; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 @@ -711,45 +721,49 @@ define i16 @fcvt_ui_h(half %a) nounwind { define i16 @fcvt_ui_h_sat(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_ui_h_sat: ; RV32IZFH: # %bb.0: # %start -; RV32IZFH-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; RV32IZFH-NEXT: fcvt.s.h fa4, fa0 -; RV32IZFH-NEXT: fmv.w.x fa3, zero -; RV32IZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV32IZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV32IZFH-NEXT: fcvt.s.h fa5, fa0 +; RV32IZFH-NEXT: fmv.w.x fa4, zero +; RV32IZFH-NEXT: lui a0, 292864 +; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IZFH-NEXT: addi a0, a0, -256 +; RV32IZFH-NEXT: fmv.w.x fa4, a0 +; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IZFH-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_ui_h_sat: ; RV64IZFH: # %bb.0: # %start -; RV64IZFH-NEXT: lui a0, %hi(.LCPI3_0) -; RV64IZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; RV64IZFH-NEXT: fcvt.s.h fa4, fa0 -; RV64IZFH-NEXT: fmv.w.x fa3, zero -; RV64IZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV64IZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV64IZFH-NEXT: fcvt.s.h fa5, fa0 +; RV64IZFH-NEXT: fmv.w.x fa4, zero +; RV64IZFH-NEXT: lui a0, 292864 +; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IZFH-NEXT: addi a0, a0, -256 +; RV64IZFH-NEXT: fmv.w.x fa4, a0 +; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_ui_h_sat: ; RV32IDZFH: # %bb.0: # %start -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; RV32IDZFH-NEXT: fcvt.s.h fa4, fa0 -; RV32IDZFH-NEXT: fmv.w.x fa3, zero -; RV32IDZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV32IDZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0 +; RV32IDZFH-NEXT: fmv.w.x fa4, zero +; RV32IDZFH-NEXT: lui a0, 292864 +; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IDZFH-NEXT: addi a0, a0, -256 +; RV32IDZFH-NEXT: fmv.w.x fa4, a0 +; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IDZFH-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32IDZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_ui_h_sat: ; RV64IDZFH: # %bb.0: # %start -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI3_0) -; RV64IDZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; RV64IDZFH-NEXT: fcvt.s.h fa4, fa0 -; RV64IDZFH-NEXT: fmv.w.x fa3, zero -; RV64IDZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV64IDZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0 +; RV64IDZFH-NEXT: fmv.w.x fa4, zero +; RV64IDZFH-NEXT: lui a0, 292864 +; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IDZFH-NEXT: addi a0, a0, -256 +; RV64IDZFH-NEXT: fmv.w.x fa4, a0 +; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IDZFH-NEXT: ret ; @@ -874,12 +888,13 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind { ; RV32ID-ILP32-NEXT: addi sp, sp, -16 ; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-ILP32-NEXT: call __extendhfsf2 -; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI3_0) -; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI3_0)(a1) +; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero +; RV32ID-ILP32-NEXT: lui a0, 292864 +; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4 +; RV32ID-ILP32-NEXT: addi a0, a0, -256 ; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0 -; RV32ID-ILP32-NEXT: fmv.w.x fa3, zero -; RV32ID-ILP32-NEXT: fmax.s fa4, fa4, fa3 -; RV32ID-ILP32-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ID-ILP32-NEXT: addi sp, sp, 16 @@ -890,12 +905,13 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind { ; RV64ID-LP64-NEXT: addi sp, sp, -16 ; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-LP64-NEXT: call __extendhfsf2 -; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI3_0) -; RV64ID-LP64-NEXT: flw fa5, %lo(.LCPI3_0)(a1) +; RV64ID-LP64-NEXT: fmv.w.x fa5, a0 +; RV64ID-LP64-NEXT: fmv.w.x fa4, zero +; RV64ID-LP64-NEXT: lui a0, 292864 +; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4 +; RV64ID-LP64-NEXT: addi a0, a0, -256 ; RV64ID-LP64-NEXT: fmv.w.x fa4, a0 -; RV64ID-LP64-NEXT: fmv.w.x fa3, zero -; RV64ID-LP64-NEXT: fmax.s fa4, fa4, fa3 -; RV64ID-LP64-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64ID-LP64-NEXT: addi sp, sp, 16 @@ -906,11 +922,12 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind { ; RV32ID-NEXT: addi sp, sp, -16 ; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-NEXT: call __extendhfsf2 -; RV32ID-NEXT: lui a0, %hi(.LCPI3_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; RV32ID-NEXT: fmv.w.x fa4, zero -; RV32ID-NEXT: fmax.s fa4, fa0, fa4 -; RV32ID-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-NEXT: fmv.w.x fa5, zero +; RV32ID-NEXT: lui a0, 292864 +; RV32ID-NEXT: fmax.s fa5, fa0, fa5 +; RV32ID-NEXT: addi a0, a0, -256 +; RV32ID-NEXT: fmv.w.x fa4, a0 +; RV32ID-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ID-NEXT: addi sp, sp, 16 @@ -921,11 +938,12 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind { ; RV64ID-NEXT: addi sp, sp, -16 ; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-NEXT: call __extendhfsf2 -; RV64ID-NEXT: lui a0, %hi(.LCPI3_0) -; RV64ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; RV64ID-NEXT: fmv.w.x fa4, zero -; RV64ID-NEXT: fmax.s fa4, fa0, fa4 -; RV64ID-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-NEXT: fmv.w.x fa5, zero +; RV64ID-NEXT: lui a0, 292864 +; RV64ID-NEXT: fmax.s fa5, fa0, fa5 +; RV64ID-NEXT: addi a0, a0, -256 +; RV64ID-NEXT: fmv.w.x fa4, a0 +; RV64ID-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64ID-NEXT: addi sp, sp, 16 @@ -933,23 +951,25 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind { ; ; CHECK32-IZFHMIN-LABEL: fcvt_ui_h_sat: ; CHECK32-IZFHMIN: # %bb.0: # %start -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK32-IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa4, fa0 -; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, zero -; CHECK32-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3 -; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5 +; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, zero +; CHECK32-IZFHMIN-NEXT: lui a0, 292864 +; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK32-IZFHMIN-NEXT: addi a0, a0, -256 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK32-IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECK32-IZFHMIN-NEXT: ret ; ; CHECK64-IZFHMIN-LABEL: fcvt_ui_h_sat: ; CHECK64-IZFHMIN: # %bb.0: # %start -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK64-IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0) -; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa4, fa0 -; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, zero -; CHECK64-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3 -; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5 +; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, zero +; CHECK64-IZFHMIN-NEXT: lui a0, 292864 +; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK64-IZFHMIN-NEXT: addi a0, a0, -256 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret ; @@ -2159,20 +2179,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IZFH-NEXT: # %bb.1: # %start ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB10_2: # %start -; RV32IZFH-NEXT: lui a1, %hi(.LCPI10_0) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB10_4 ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: addi a2, a3, -1 ; RV32IZFH-NEXT: .LBB10_4: # %start ; RV32IZFH-NEXT: feq.s a3, fs0, fs0 -; RV32IZFH-NEXT: neg a4, a1 -; RV32IZFH-NEXT: neg a1, s0 +; RV32IZFH-NEXT: neg a4, s0 +; RV32IZFH-NEXT: neg a5, a1 ; RV32IZFH-NEXT: neg a3, a3 -; RV32IZFH-NEXT: and a0, a1, a0 +; RV32IZFH-NEXT: and a0, a4, a0 ; RV32IZFH-NEXT: and a1, a3, a2 -; RV32IZFH-NEXT: or a0, a4, a0 +; RV32IZFH-NEXT: or a0, a5, a0 ; RV32IZFH-NEXT: and a0, a3, a0 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2207,20 +2228,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IDZFH-NEXT: # %bb.1: # %start ; RV32IDZFH-NEXT: mv a2, a1 ; RV32IDZFH-NEXT: .LBB10_2: # %start -; RV32IDZFH-NEXT: lui a1, %hi(.LCPI10_0) -; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32IDZFH-NEXT: lui a1, 389120 +; RV32IDZFH-NEXT: addi a1, a1, -1 +; RV32IDZFH-NEXT: fmv.w.x fa5, a1 ; RV32IDZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IDZFH-NEXT: beqz a1, .LBB10_4 ; RV32IDZFH-NEXT: # %bb.3: ; RV32IDZFH-NEXT: addi a2, a3, -1 ; RV32IDZFH-NEXT: .LBB10_4: # %start ; RV32IDZFH-NEXT: feq.s a3, fs0, fs0 -; RV32IDZFH-NEXT: neg a4, a1 -; RV32IDZFH-NEXT: neg a1, s0 +; RV32IDZFH-NEXT: neg a4, s0 +; RV32IDZFH-NEXT: neg a5, a1 ; RV32IDZFH-NEXT: neg a3, a3 -; RV32IDZFH-NEXT: and a0, a1, a0 +; RV32IDZFH-NEXT: and a0, a4, a0 ; RV32IDZFH-NEXT: and a1, a3, a2 -; RV32IDZFH-NEXT: or a0, a4, a0 +; RV32IDZFH-NEXT: or a0, a5, a0 ; RV32IDZFH-NEXT: and a0, a3, a0 ; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IDZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2450,8 +2472,9 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32ID-ILP32-NEXT: # %bb.1: # %start ; RV32ID-ILP32-NEXT: mv a2, a1 ; RV32ID-ILP32-NEXT: .LBB10_2: # %start -; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0) -; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32ID-ILP32-NEXT: lui a1, 389120 +; RV32ID-ILP32-NEXT: addi a1, a1, -1 +; RV32ID-ILP32-NEXT: fmv.w.x fa5, a1 ; RV32ID-ILP32-NEXT: flw fa4, 4(sp) # 4-byte Folded Reload ; RV32ID-ILP32-NEXT: flt.s a1, fa5, fa4 ; RV32ID-ILP32-NEXT: fmv.s fa5, fa4 @@ -2505,8 +2528,9 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32ID-NEXT: # %bb.1: # %start ; RV32ID-NEXT: mv a2, a1 ; RV32ID-NEXT: .LBB10_2: # %start -; RV32ID-NEXT: lui a1, %hi(.LCPI10_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32ID-NEXT: lui a1, 389120 +; RV32ID-NEXT: addi a1, a1, -1 +; RV32ID-NEXT: fmv.w.x fa5, a1 ; RV32ID-NEXT: flt.s a1, fa5, fs0 ; RV32ID-NEXT: beqz a1, .LBB10_4 ; RV32ID-NEXT: # %bb.3: @@ -2558,20 +2582,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IFZFHMIN-NEXT: # %bb.1: # %start ; RV32IFZFHMIN-NEXT: mv a2, a1 ; RV32IFZFHMIN-NEXT: .LBB10_2: # %start -; RV32IFZFHMIN-NEXT: lui a1, %hi(.LCPI10_0) -; RV32IFZFHMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32IFZFHMIN-NEXT: lui a1, 389120 +; RV32IFZFHMIN-NEXT: addi a1, a1, -1 +; RV32IFZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IFZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IFZFHMIN-NEXT: beqz a1, .LBB10_4 ; RV32IFZFHMIN-NEXT: # %bb.3: ; RV32IFZFHMIN-NEXT: addi a2, a3, -1 ; RV32IFZFHMIN-NEXT: .LBB10_4: # %start ; RV32IFZFHMIN-NEXT: feq.s a3, fs0, fs0 -; RV32IFZFHMIN-NEXT: neg a4, a1 -; RV32IFZFHMIN-NEXT: neg a1, s0 +; RV32IFZFHMIN-NEXT: neg a4, s0 +; RV32IFZFHMIN-NEXT: neg a5, a1 ; RV32IFZFHMIN-NEXT: neg a3, a3 -; RV32IFZFHMIN-NEXT: and a0, a1, a0 +; RV32IFZFHMIN-NEXT: and a0, a4, a0 ; RV32IFZFHMIN-NEXT: and a1, a3, a2 -; RV32IFZFHMIN-NEXT: or a0, a4, a0 +; RV32IFZFHMIN-NEXT: or a0, a5, a0 ; RV32IFZFHMIN-NEXT: and a0, a3, a0 ; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2607,20 +2632,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind { ; RV32IDZFHMIN-NEXT: # %bb.1: # %start ; RV32IDZFHMIN-NEXT: mv a2, a1 ; RV32IDZFHMIN-NEXT: .LBB10_2: # %start -; RV32IDZFHMIN-NEXT: lui a1, %hi(.LCPI10_0) -; RV32IDZFHMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1) +; RV32IDZFHMIN-NEXT: lui a1, 389120 +; RV32IDZFHMIN-NEXT: addi a1, a1, -1 +; RV32IDZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IDZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IDZFHMIN-NEXT: beqz a1, .LBB10_4 ; RV32IDZFHMIN-NEXT: # %bb.3: ; RV32IDZFHMIN-NEXT: addi a2, a3, -1 ; RV32IDZFHMIN-NEXT: .LBB10_4: # %start ; RV32IDZFHMIN-NEXT: feq.s a3, fs0, fs0 -; RV32IDZFHMIN-NEXT: neg a4, a1 -; RV32IDZFHMIN-NEXT: neg a1, s0 +; RV32IDZFHMIN-NEXT: neg a4, s0 +; RV32IDZFHMIN-NEXT: neg a5, a1 ; RV32IDZFHMIN-NEXT: neg a3, a3 -; RV32IDZFHMIN-NEXT: and a0, a1, a0 +; RV32IDZFHMIN-NEXT: and a0, a4, a0 ; RV32IDZFHMIN-NEXT: and a1, a3, a2 -; RV32IDZFHMIN-NEXT: or a0, a4, a0 +; RV32IDZFHMIN-NEXT: or a0, a5, a0 ; RV32IDZFHMIN-NEXT: and a0, a3, a0 ; RV32IDZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IDZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -2903,23 +2929,25 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: lui a0, %hi(.LCPI12_0) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI12_0)(a0) -; RV32IZFH-NEXT: fcvt.s.h fa0, fa0 -; RV32IZFH-NEXT: fmv.w.x fa4, zero -; RV32IZFH-NEXT: fle.s a0, fa4, fa0 -; RV32IZFH-NEXT: flt.s a1, fa5, fa0 -; RV32IZFH-NEXT: neg s0, a1 -; RV32IZFH-NEXT: neg s1, a0 +; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: fmv.w.x fa5, zero +; RV32IZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IZFH-NEXT: neg s0, a0 +; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: and a0, s1, a0 -; RV32IZFH-NEXT: and a1, s1, a1 -; RV32IZFH-NEXT: or a0, s0, a0 -; RV32IZFH-NEXT: or a1, s0, a1 +; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 +; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 +; RV32IZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IZFH-NEXT: neg a2, a2 +; RV32IZFH-NEXT: or a0, a2, a0 +; RV32IZFH-NEXT: or a1, a2, a1 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; @@ -2937,23 +2965,25 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32IDZFH-NEXT: addi sp, sp, -16 ; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IDZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IDZFH-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI12_0) -; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI12_0)(a0) -; RV32IDZFH-NEXT: fcvt.s.h fa0, fa0 -; RV32IDZFH-NEXT: fmv.w.x fa4, zero -; RV32IDZFH-NEXT: fle.s a0, fa4, fa0 -; RV32IDZFH-NEXT: flt.s a1, fa5, fa0 -; RV32IDZFH-NEXT: neg s0, a1 -; RV32IDZFH-NEXT: neg s1, a0 +; RV32IDZFH-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill +; RV32IDZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IDZFH-NEXT: fmv.w.x fa5, zero +; RV32IDZFH-NEXT: fle.s a0, fa5, fs0 +; RV32IDZFH-NEXT: neg s0, a0 +; RV32IDZFH-NEXT: fmv.s fa0, fs0 ; RV32IDZFH-NEXT: call __fixunssfdi -; RV32IDZFH-NEXT: and a0, s1, a0 -; RV32IDZFH-NEXT: and a1, s1, a1 -; RV32IDZFH-NEXT: or a0, s0, a0 -; RV32IDZFH-NEXT: or a1, s0, a1 +; RV32IDZFH-NEXT: and a0, s0, a0 +; RV32IDZFH-NEXT: lui a2, 391168 +; RV32IDZFH-NEXT: and a1, s0, a1 +; RV32IDZFH-NEXT: addi a2, a2, -1 +; RV32IDZFH-NEXT: fmv.w.x fa5, a2 +; RV32IDZFH-NEXT: flt.s a2, fa5, fs0 +; RV32IDZFH-NEXT: neg a2, a2 +; RV32IDZFH-NEXT: or a0, a2, a0 +; RV32IDZFH-NEXT: or a1, a2, a1 ; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IDZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; RV32IDZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IDZFH-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IDZFH-NEXT: addi sp, sp, 16 ; RV32IDZFH-NEXT: ret ; @@ -3105,14 +3135,15 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ID-ILP32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32ID-ILP32-NEXT: call __extendhfsf2 -; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI12_0) -; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI12_0)(a1) -; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0 -; RV32ID-ILP32-NEXT: fmv.w.x fa3, zero -; RV32ID-ILP32-NEXT: fle.s a1, fa3, fa4 -; RV32ID-ILP32-NEXT: flt.s a2, fa5, fa4 -; RV32ID-ILP32-NEXT: neg s0, a2 -; RV32ID-ILP32-NEXT: neg s1, a1 +; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0 +; RV32ID-ILP32-NEXT: lui a1, 391168 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero +; RV32ID-ILP32-NEXT: addi a1, a1, -1 +; RV32ID-ILP32-NEXT: fle.s a2, fa4, fa5 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1 +; RV32ID-ILP32-NEXT: flt.s a1, fa4, fa5 +; RV32ID-ILP32-NEXT: neg s0, a1 +; RV32ID-ILP32-NEXT: neg s1, a2 ; RV32ID-ILP32-NEXT: call __fixunssfdi ; RV32ID-ILP32-NEXT: and a0, s1, a0 ; RV32ID-ILP32-NEXT: and a1, s1, a1 @@ -3144,23 +3175,25 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV32ID-NEXT: addi sp, sp, -16 ; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32ID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32ID-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32ID-NEXT: call __extendhfsf2 -; RV32ID-NEXT: lui a0, %hi(.LCPI12_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI12_0)(a0) -; RV32ID-NEXT: fmv.w.x fa4, zero -; RV32ID-NEXT: fle.s a0, fa4, fa0 -; RV32ID-NEXT: flt.s a1, fa5, fa0 -; RV32ID-NEXT: neg s0, a1 -; RV32ID-NEXT: neg s1, a0 +; RV32ID-NEXT: fmv.s fs0, fa0 +; RV32ID-NEXT: fmv.w.x fa5, zero +; RV32ID-NEXT: fle.s a0, fa5, fa0 +; RV32ID-NEXT: neg s0, a0 ; RV32ID-NEXT: call __fixunssfdi -; RV32ID-NEXT: and a0, s1, a0 -; RV32ID-NEXT: and a1, s1, a1 -; RV32ID-NEXT: or a0, s0, a0 -; RV32ID-NEXT: or a1, s0, a1 +; RV32ID-NEXT: and a0, s0, a0 +; RV32ID-NEXT: lui a2, 391168 +; RV32ID-NEXT: and a1, s0, a1 +; RV32ID-NEXT: addi a2, a2, -1 +; RV32ID-NEXT: fmv.w.x fa5, a2 +; RV32ID-NEXT: flt.s a2, fa5, fs0 +; RV32ID-NEXT: neg a2, a2 +; RV32ID-NEXT: or a0, a2, a0 +; RV32ID-NEXT: or a1, a2, a1 ; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; RV32ID-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32ID-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32ID-NEXT: addi sp, sp, 16 ; RV32ID-NEXT: ret ; @@ -3178,30 +3211,32 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; RV64ID-NEXT: addi sp, sp, 16 ; RV64ID-NEXT: ret ; -; CHECK32-IZFHMIN-LABEL: fcvt_lu_h_sat: -; CHECK32-IZFHMIN: # %bb.0: # %start -; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16 -; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; CHECK32-IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; CHECK32-IZFHMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK32-IZFHMIN-NEXT: flw fa5, %lo(.LCPI12_0)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa0, fa0 -; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, zero -; CHECK32-IZFHMIN-NEXT: fle.s a0, fa4, fa0 -; CHECK32-IZFHMIN-NEXT: flt.s a1, fa5, fa0 -; CHECK32-IZFHMIN-NEXT: neg s0, a1 -; CHECK32-IZFHMIN-NEXT: neg s1, a0 -; CHECK32-IZFHMIN-NEXT: call __fixunssfdi -; CHECK32-IZFHMIN-NEXT: and a0, s1, a0 -; CHECK32-IZFHMIN-NEXT: and a1, s1, a1 -; CHECK32-IZFHMIN-NEXT: or a0, s0, a0 -; CHECK32-IZFHMIN-NEXT: or a1, s0, a1 -; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; CHECK32-IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; CHECK32-IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload -; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16 -; CHECK32-IZFHMIN-NEXT: ret +; RV32IFZFHMIN-LABEL: fcvt_lu_h_sat: +; RV32IFZFHMIN: # %bb.0: # %start +; RV32IFZFHMIN-NEXT: addi sp, sp, -16 +; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IFZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV32IFZFHMIN-NEXT: fcvt.s.h fs0, fa0 +; RV32IFZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IFZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IFZFHMIN-NEXT: neg s0, a0 +; RV32IFZFHMIN-NEXT: fmv.s fa0, fs0 +; RV32IFZFHMIN-NEXT: call __fixunssfdi +; RV32IFZFHMIN-NEXT: and a0, s0, a0 +; RV32IFZFHMIN-NEXT: lui a2, 391168 +; RV32IFZFHMIN-NEXT: and a1, s0, a1 +; RV32IFZFHMIN-NEXT: addi a2, a2, -1 +; RV32IFZFHMIN-NEXT: fmv.w.x fa5, a2 +; RV32IFZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IFZFHMIN-NEXT: neg a2, a2 +; RV32IFZFHMIN-NEXT: or a0, a2, a0 +; RV32IFZFHMIN-NEXT: or a1, a2, a1 +; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IFZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload +; RV32IFZFHMIN-NEXT: addi sp, sp, 16 +; RV32IFZFHMIN-NEXT: ret ; ; CHECK64-IZFHMIN-LABEL: fcvt_lu_h_sat: ; CHECK64-IZFHMIN: # %bb.0: # %start @@ -3213,6 +3248,33 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind { ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 ; CHECK64-IZFHMIN-NEXT: ret ; +; RV32IDZFHMIN-LABEL: fcvt_lu_h_sat: +; RV32IDZFHMIN: # %bb.0: # %start +; RV32IDZFHMIN-NEXT: addi sp, sp, -16 +; RV32IDZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IDZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IDZFHMIN-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill +; RV32IDZFHMIN-NEXT: fcvt.s.h fs0, fa0 +; RV32IDZFHMIN-NEXT: fmv.w.x fa5, zero +; RV32IDZFHMIN-NEXT: fle.s a0, fa5, fs0 +; RV32IDZFHMIN-NEXT: neg s0, a0 +; RV32IDZFHMIN-NEXT: fmv.s fa0, fs0 +; RV32IDZFHMIN-NEXT: call __fixunssfdi +; RV32IDZFHMIN-NEXT: and a0, s0, a0 +; RV32IDZFHMIN-NEXT: lui a2, 391168 +; RV32IDZFHMIN-NEXT: and a1, s0, a1 +; RV32IDZFHMIN-NEXT: addi a2, a2, -1 +; RV32IDZFHMIN-NEXT: fmv.w.x fa5, a2 +; RV32IDZFHMIN-NEXT: flt.s a2, fa5, fs0 +; RV32IDZFHMIN-NEXT: neg a2, a2 +; RV32IDZFHMIN-NEXT: or a0, a2, a0 +; RV32IDZFHMIN-NEXT: or a1, a2, a1 +; RV32IDZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IDZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IDZFHMIN-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload +; RV32IDZFHMIN-NEXT: addi sp, sp, 16 +; RV32IDZFHMIN-NEXT: ret +; ; CHECK32-IZHINXMIN-LABEL: fcvt_lu_h_sat: ; CHECK32-IZHINXMIN: # %bb.0: # %start ; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 @@ -6282,13 +6344,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_w_s_sat_i16: ; RV32IZFH: # %bb.0: # %start ; RV32IZFH-NEXT: fcvt.s.h fa5, fa0 -; RV32IZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV32IZFH-NEXT: feq.s a1, fa5, fa5 -; RV32IZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; RV32IZFH-NEXT: lui a0, 815104 -; RV32IZFH-NEXT: fmv.w.x fa3, a0 -; RV32IZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV32IZFH-NEXT: neg a0, a1 +; RV32IZFH-NEXT: lui a1, 290816 +; RV32IZFH-NEXT: fmv.w.x fa4, a0 +; RV32IZFH-NEXT: feq.s a0, fa5, fa5 +; RV32IZFH-NEXT: addi a1, a1, -512 +; RV32IZFH-NEXT: neg a0, a0 +; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IZFH-NEXT: fmv.w.x fa4, a1 ; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IZFH-NEXT: fcvt.w.s a1, fa5, rtz ; RV32IZFH-NEXT: and a0, a0, a1 @@ -6297,13 +6360,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV64IZFH-LABEL: fcvt_w_s_sat_i16: ; RV64IZFH: # %bb.0: # %start ; RV64IZFH-NEXT: fcvt.s.h fa5, fa0 -; RV64IZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV64IZFH-NEXT: feq.s a1, fa5, fa5 -; RV64IZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; RV64IZFH-NEXT: lui a0, 815104 -; RV64IZFH-NEXT: fmv.w.x fa3, a0 -; RV64IZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV64IZFH-NEXT: neg a0, a1 +; RV64IZFH-NEXT: lui a1, 290816 +; RV64IZFH-NEXT: fmv.w.x fa4, a0 +; RV64IZFH-NEXT: feq.s a0, fa5, fa5 +; RV64IZFH-NEXT: addi a1, a1, -512 +; RV64IZFH-NEXT: neg a0, a0 +; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IZFH-NEXT: fmv.w.x fa4, a1 ; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IZFH-NEXT: fcvt.l.s a1, fa5, rtz ; RV64IZFH-NEXT: and a0, a0, a1 @@ -6312,13 +6376,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV32IDZFH-LABEL: fcvt_w_s_sat_i16: ; RV32IDZFH: # %bb.0: # %start ; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0 -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV32IDZFH-NEXT: feq.s a1, fa5, fa5 -; RV32IDZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; RV32IDZFH-NEXT: lui a0, 815104 -; RV32IDZFH-NEXT: fmv.w.x fa3, a0 -; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV32IDZFH-NEXT: neg a0, a1 +; RV32IDZFH-NEXT: lui a1, 290816 +; RV32IDZFH-NEXT: fmv.w.x fa4, a0 +; RV32IDZFH-NEXT: feq.s a0, fa5, fa5 +; RV32IDZFH-NEXT: addi a1, a1, -512 +; RV32IDZFH-NEXT: neg a0, a0 +; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IDZFH-NEXT: fmv.w.x fa4, a1 ; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IDZFH-NEXT: fcvt.w.s a1, fa5, rtz ; RV32IDZFH-NEXT: and a0, a0, a1 @@ -6327,13 +6392,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV64IDZFH-LABEL: fcvt_w_s_sat_i16: ; RV64IDZFH: # %bb.0: # %start ; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0 -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV64IDZFH-NEXT: feq.s a1, fa5, fa5 -; RV64IDZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; RV64IDZFH-NEXT: lui a0, 815104 -; RV64IDZFH-NEXT: fmv.w.x fa3, a0 -; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa3 -; RV64IDZFH-NEXT: neg a0, a1 +; RV64IDZFH-NEXT: lui a1, 290816 +; RV64IDZFH-NEXT: fmv.w.x fa4, a0 +; RV64IDZFH-NEXT: feq.s a0, fa5, fa5 +; RV64IDZFH-NEXT: addi a1, a1, -512 +; RV64IDZFH-NEXT: neg a0, a0 +; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IDZFH-NEXT: fmv.w.x fa4, a1 ; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IDZFH-NEXT: fcvt.l.s a1, fa5, rtz ; RV64IDZFH-NEXT: and a0, a0, a1 @@ -6491,13 +6557,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-ILP32-NEXT: call __extendhfsf2 ; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0 -; RV32ID-ILP32-NEXT: lui a0, %hi(.LCPI32_0) -; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5 -; RV32ID-ILP32-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; RV32ID-ILP32-NEXT: lui a0, 815104 -; RV32ID-ILP32-NEXT: fmv.w.x fa3, a0 -; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa3 -; RV32ID-ILP32-NEXT: neg a0, a1 +; RV32ID-ILP32-NEXT: lui a1, 290816 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0 +; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5 +; RV32ID-ILP32-NEXT: addi a1, a1, -512 +; RV32ID-ILP32-NEXT: neg a0, a0 +; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1 ; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-ILP32-NEXT: fcvt.w.s a1, fa5, rtz ; RV32ID-ILP32-NEXT: and a0, a0, a1 @@ -6511,13 +6578,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-LP64-NEXT: call __extendhfsf2 ; RV64ID-LP64-NEXT: fmv.w.x fa5, a0 -; RV64ID-LP64-NEXT: lui a0, %hi(.LCPI32_0) -; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5 -; RV64ID-LP64-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; RV64ID-LP64-NEXT: lui a0, 815104 -; RV64ID-LP64-NEXT: fmv.w.x fa3, a0 -; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa3 -; RV64ID-LP64-NEXT: neg a0, a1 +; RV64ID-LP64-NEXT: lui a1, 290816 +; RV64ID-LP64-NEXT: fmv.w.x fa4, a0 +; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5 +; RV64ID-LP64-NEXT: addi a1, a1, -512 +; RV64ID-LP64-NEXT: neg a0, a0 +; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4 +; RV64ID-LP64-NEXT: fmv.w.x fa4, a1 ; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-LP64-NEXT: fcvt.l.s a1, fa5, rtz ; RV64ID-LP64-NEXT: and a0, a0, a1 @@ -6531,13 +6599,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-NEXT: call __extendhfsf2 ; RV32ID-NEXT: feq.s a0, fa0, fa0 -; RV32ID-NEXT: lui a1, %hi(.LCPI32_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI32_0)(a1) ; RV32ID-NEXT: lui a1, 815104 -; RV32ID-NEXT: fmv.w.x fa4, a1 -; RV32ID-NEXT: fmax.s fa4, fa0, fa4 +; RV32ID-NEXT: fmv.w.x fa5, a1 +; RV32ID-NEXT: lui a1, 290816 ; RV32ID-NEXT: neg a0, a0 -; RV32ID-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-NEXT: addi a1, a1, -512 +; RV32ID-NEXT: fmax.s fa5, fa0, fa5 +; RV32ID-NEXT: fmv.w.x fa4, a1 +; RV32ID-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-NEXT: fcvt.w.s a1, fa5, rtz ; RV32ID-NEXT: and a0, a0, a1 ; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -6550,13 +6619,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-NEXT: call __extendhfsf2 ; RV64ID-NEXT: feq.s a0, fa0, fa0 -; RV64ID-NEXT: lui a1, %hi(.LCPI32_0) -; RV64ID-NEXT: flw fa5, %lo(.LCPI32_0)(a1) ; RV64ID-NEXT: lui a1, 815104 -; RV64ID-NEXT: fmv.w.x fa4, a1 -; RV64ID-NEXT: fmax.s fa4, fa0, fa4 +; RV64ID-NEXT: fmv.w.x fa5, a1 +; RV64ID-NEXT: lui a1, 290816 ; RV64ID-NEXT: neg a0, a0 -; RV64ID-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-NEXT: addi a1, a1, -512 +; RV64ID-NEXT: fmax.s fa5, fa0, fa5 +; RV64ID-NEXT: fmv.w.x fa4, a1 +; RV64ID-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-NEXT: fcvt.l.s a1, fa5, rtz ; RV64ID-NEXT: and a0, a0, a1 ; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -6566,13 +6636,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_sat_i16: ; CHECK32-IZFHMIN: # %bb.0: # %start ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0) -; CHECK32-IZFHMIN-NEXT: feq.s a1, fa5, fa5 -; CHECK32-IZFHMIN-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; CHECK32-IZFHMIN-NEXT: lui a0, 815104 -; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, a0 -; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3 -; CHECK32-IZFHMIN-NEXT: neg a0, a1 +; CHECK32-IZFHMIN-NEXT: lui a1, 290816 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK32-IZFHMIN-NEXT: feq.s a0, fa5, fa5 +; CHECK32-IZFHMIN-NEXT: addi a1, a1, -512 +; CHECK32-IZFHMIN-NEXT: neg a0, a0 +; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a1 ; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK32-IZFHMIN-NEXT: fcvt.w.s a1, fa5, rtz ; CHECK32-IZFHMIN-NEXT: and a0, a0, a1 @@ -6581,13 +6652,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_sat_i16: ; CHECK64-IZFHMIN: # %bb.0: # %start ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0) -; CHECK64-IZFHMIN-NEXT: feq.s a1, fa5, fa5 -; CHECK64-IZFHMIN-NEXT: flw fa4, %lo(.LCPI32_0)(a0) ; CHECK64-IZFHMIN-NEXT: lui a0, 815104 -; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, a0 -; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3 -; CHECK64-IZFHMIN-NEXT: neg a0, a1 +; CHECK64-IZFHMIN-NEXT: lui a1, 290816 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK64-IZFHMIN-NEXT: feq.s a0, fa5, fa5 +; CHECK64-IZFHMIN-NEXT: addi a1, a1, -512 +; CHECK64-IZFHMIN-NEXT: neg a0, a0 +; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a1 ; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 @@ -6802,45 +6874,49 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind { define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_wu_s_sat_i16: ; RV32IZFH: # %bb.0: # %start -; RV32IZFH-NEXT: lui a0, %hi(.LCPI34_0) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; RV32IZFH-NEXT: fcvt.s.h fa4, fa0 -; RV32IZFH-NEXT: fmv.w.x fa3, zero -; RV32IZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV32IZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV32IZFH-NEXT: fcvt.s.h fa5, fa0 +; RV32IZFH-NEXT: fmv.w.x fa4, zero +; RV32IZFH-NEXT: lui a0, 292864 +; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IZFH-NEXT: addi a0, a0, -256 +; RV32IZFH-NEXT: fmv.w.x fa4, a0 +; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IZFH-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_wu_s_sat_i16: ; RV64IZFH: # %bb.0: # %start -; RV64IZFH-NEXT: lui a0, %hi(.LCPI34_0) -; RV64IZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; RV64IZFH-NEXT: fcvt.s.h fa4, fa0 -; RV64IZFH-NEXT: fmv.w.x fa3, zero -; RV64IZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV64IZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV64IZFH-NEXT: fcvt.s.h fa5, fa0 +; RV64IZFH-NEXT: fmv.w.x fa4, zero +; RV64IZFH-NEXT: lui a0, 292864 +; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IZFH-NEXT: addi a0, a0, -256 +; RV64IZFH-NEXT: fmv.w.x fa4, a0 +; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_wu_s_sat_i16: ; RV32IDZFH: # %bb.0: # %start -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI34_0) -; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; RV32IDZFH-NEXT: fcvt.s.h fa4, fa0 -; RV32IDZFH-NEXT: fmv.w.x fa3, zero -; RV32IDZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV32IDZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0 +; RV32IDZFH-NEXT: fmv.w.x fa4, zero +; RV32IDZFH-NEXT: lui a0, 292864 +; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV32IDZFH-NEXT: addi a0, a0, -256 +; RV32IDZFH-NEXT: fmv.w.x fa4, a0 +; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV32IDZFH-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32IDZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_wu_s_sat_i16: ; RV64IDZFH: # %bb.0: # %start -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI34_0) -; RV64IDZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; RV64IDZFH-NEXT: fcvt.s.h fa4, fa0 -; RV64IDZFH-NEXT: fmv.w.x fa3, zero -; RV64IDZFH-NEXT: fmax.s fa4, fa4, fa3 -; RV64IDZFH-NEXT: fmin.s fa5, fa4, fa5 +; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0 +; RV64IDZFH-NEXT: fmv.w.x fa4, zero +; RV64IDZFH-NEXT: lui a0, 292864 +; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4 +; RV64IDZFH-NEXT: addi a0, a0, -256 +; RV64IDZFH-NEXT: fmv.w.x fa4, a0 +; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4 ; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IDZFH-NEXT: ret ; @@ -6971,12 +7047,13 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind { ; RV32ID-ILP32-NEXT: addi sp, sp, -16 ; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-ILP32-NEXT: call __extendhfsf2 -; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI34_0) -; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI34_0)(a1) +; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0 +; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero +; RV32ID-ILP32-NEXT: lui a0, 292864 +; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4 +; RV32ID-ILP32-NEXT: addi a0, a0, -256 ; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0 -; RV32ID-ILP32-NEXT: fmv.w.x fa3, zero -; RV32ID-ILP32-NEXT: fmax.s fa4, fa4, fa3 -; RV32ID-ILP32-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ID-ILP32-NEXT: addi sp, sp, 16 @@ -6987,12 +7064,13 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind { ; RV64ID-LP64-NEXT: addi sp, sp, -16 ; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-LP64-NEXT: call __extendhfsf2 -; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI34_0) -; RV64ID-LP64-NEXT: flw fa5, %lo(.LCPI34_0)(a1) +; RV64ID-LP64-NEXT: fmv.w.x fa5, a0 +; RV64ID-LP64-NEXT: fmv.w.x fa4, zero +; RV64ID-LP64-NEXT: lui a0, 292864 +; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4 +; RV64ID-LP64-NEXT: addi a0, a0, -256 ; RV64ID-LP64-NEXT: fmv.w.x fa4, a0 -; RV64ID-LP64-NEXT: fmv.w.x fa3, zero -; RV64ID-LP64-NEXT: fmax.s fa4, fa4, fa3 -; RV64ID-LP64-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64ID-LP64-NEXT: addi sp, sp, 16 @@ -7003,11 +7081,12 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind { ; RV32ID-NEXT: addi sp, sp, -16 ; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ID-NEXT: call __extendhfsf2 -; RV32ID-NEXT: lui a0, %hi(.LCPI34_0) -; RV32ID-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; RV32ID-NEXT: fmv.w.x fa4, zero -; RV32ID-NEXT: fmax.s fa4, fa0, fa4 -; RV32ID-NEXT: fmin.s fa5, fa4, fa5 +; RV32ID-NEXT: fmv.w.x fa5, zero +; RV32ID-NEXT: lui a0, 292864 +; RV32ID-NEXT: fmax.s fa5, fa0, fa5 +; RV32ID-NEXT: addi a0, a0, -256 +; RV32ID-NEXT: fmv.w.x fa4, a0 +; RV32ID-NEXT: fmin.s fa5, fa5, fa4 ; RV32ID-NEXT: fcvt.wu.s a0, fa5, rtz ; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ID-NEXT: addi sp, sp, 16 @@ -7018,11 +7097,12 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind { ; RV64ID-NEXT: addi sp, sp, -16 ; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ID-NEXT: call __extendhfsf2 -; RV64ID-NEXT: lui a0, %hi(.LCPI34_0) -; RV64ID-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; RV64ID-NEXT: fmv.w.x fa4, zero -; RV64ID-NEXT: fmax.s fa4, fa0, fa4 -; RV64ID-NEXT: fmin.s fa5, fa4, fa5 +; RV64ID-NEXT: fmv.w.x fa5, zero +; RV64ID-NEXT: lui a0, 292864 +; RV64ID-NEXT: fmax.s fa5, fa0, fa5 +; RV64ID-NEXT: addi a0, a0, -256 +; RV64ID-NEXT: fmv.w.x fa4, a0 +; RV64ID-NEXT: fmin.s fa5, fa5, fa4 ; RV64ID-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64ID-NEXT: addi sp, sp, 16 @@ -7030,23 +7110,25 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind { ; ; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_sat_i16: ; CHECK32-IZFHMIN: # %bb.0: # %start -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI34_0) -; CHECK32-IZFHMIN-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa4, fa0 -; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, zero -; CHECK32-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3 -; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5 +; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, zero +; CHECK32-IZFHMIN-NEXT: lui a0, 292864 +; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK32-IZFHMIN-NEXT: addi a0, a0, -256 +; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK32-IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECK32-IZFHMIN-NEXT: ret ; ; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_sat_i16: ; CHECK64-IZFHMIN: # %bb.0: # %start -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI34_0) -; CHECK64-IZFHMIN-NEXT: flw fa5, %lo(.LCPI34_0)(a0) -; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa4, fa0 -; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, zero -; CHECK64-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3 -; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5 +; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, zero +; CHECK64-IZFHMIN-NEXT: lui a0, 292864 +; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4 +; CHECK64-IZFHMIN-NEXT: addi a0, a0, -256 +; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0 +; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll index d68e19d15b4bb..1dc0da8c04dba 100644 --- a/llvm/test/CodeGen/RISCV/half-imm.ll +++ b/llvm/test/CodeGen/RISCV/half-imm.ll @@ -24,8 +24,9 @@ define half @half_imm() nounwind { ; CHECK-LABEL: half_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa0, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: lui a0, 4 +; CHECK-NEXT: addi a0, a0, 512 +; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret ; ; RV32IZHINX-LABEL: half_imm: @@ -44,8 +45,9 @@ define half @half_imm() nounwind { ; ; CHECKIZFHMIN-LABEL: half_imm: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI0_0) -; CHECKIZFHMIN-NEXT: flh fa0, %lo(.LCPI0_0)(a0) +; CHECKIZFHMIN-NEXT: lui a0, 4 +; CHECKIZFHMIN-NEXT: addi a0, a0, 512 +; CHECKIZFHMIN-NEXT: fmv.h.x fa0, a0 ; CHECKIZFHMIN-NEXT: ret ; ; CHECKIZHINXMIN-LABEL: half_imm: @@ -60,8 +62,9 @@ define half @half_imm() nounwind { define half @half_imm_op(half %a) nounwind { ; CHECK-LABEL: half_imm_op: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: fadd.h fa0, fa0, fa5 ; CHECK-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll index 4f0026175e7c7..e16d788f66ede 100644 --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -2222,8 +2222,9 @@ declare half @llvm.floor.f16(half) define half @floor_f16(half %a) nounwind { ; CHECKIZFH-LABEL: floor_f16: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI18_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB18_2 @@ -2313,8 +2314,9 @@ declare half @llvm.ceil.f16(half) define half @ceil_f16(half %a) nounwind { ; CHECKIZFH-LABEL: ceil_f16: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI19_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI19_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB19_2 @@ -2404,8 +2406,9 @@ declare half @llvm.trunc.f16(half) define half @trunc_f16(half %a) nounwind { ; CHECKIZFH-LABEL: trunc_f16: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI20_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB20_2 @@ -2495,8 +2498,9 @@ declare half @llvm.rint.f16(half) define half @rint_f16(half %a) nounwind { ; CHECKIZFH-LABEL: rint_f16: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI21_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI21_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB21_2 @@ -2706,8 +2710,9 @@ declare half @llvm.round.f16(half) define half @round_f16(half %a) nounwind { ; CHECKIZFH-LABEL: round_f16: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI23_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI23_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB23_2 @@ -2797,8 +2802,9 @@ declare half @llvm.roundeven.f16(half) define half @roundeven_f16(half %a) nounwind { ; CHECKIZFH-LABEL: roundeven_f16: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI24_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI24_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB24_2 diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll index 3b645bf8aef91..c815bc19e280c 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll @@ -95,8 +95,9 @@ define signext i32 @test_floor_si32(half %x) { define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFH-LABEL: test_floor_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB1_2 @@ -121,8 +122,9 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB1_4: -; RV32IZFH-NEXT: lui a1, %hi(.LCPI1_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI1_1)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB1_6 ; RV32IZFH-NEXT: # %bb.5: @@ -248,8 +250,9 @@ define i64 @test_floor_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: mv a2, a1 ; RV32IZFHMIN-NEXT: .LBB1_4: -; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI1_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI1_0)(a1) +; RV32IZFHMIN-NEXT: lui a1, 389120 +; RV32IZFHMIN-NEXT: addi a1, a1, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFHMIN-NEXT: beqz a1, .LBB1_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -506,8 +509,9 @@ define signext i32 @test_floor_ui32(half %x) { define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZFH-LABEL: test_floor_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB3_2 @@ -526,10 +530,11 @@ define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: lui a2, %hi(.LCPI3_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_1)(a2) ; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 ; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 ; RV32IZFH-NEXT: flt.s a2, fa5, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: or a0, a2, a0 @@ -627,10 +632,11 @@ define i64 @test_floor_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI3_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: lui a2, 391168 ; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: addi a2, a2, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2 ; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: or a0, a2, a0 @@ -803,8 +809,9 @@ define signext i32 @test_ceil_si32(half %x) { define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFH-LABEL: test_ceil_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI5_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB5_2 @@ -829,8 +836,9 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB5_4: -; RV32IZFH-NEXT: lui a1, %hi(.LCPI5_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI5_1)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB5_6 ; RV32IZFH-NEXT: # %bb.5: @@ -956,8 +964,9 @@ define i64 @test_ceil_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: mv a2, a1 ; RV32IZFHMIN-NEXT: .LBB5_4: -; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI5_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI5_0)(a1) +; RV32IZFHMIN-NEXT: lui a1, 389120 +; RV32IZFHMIN-NEXT: addi a1, a1, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFHMIN-NEXT: beqz a1, .LBB5_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -1214,8 +1223,9 @@ define signext i32 @test_ceil_ui32(half %x) { define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZFH-LABEL: test_ceil_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI7_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB7_2 @@ -1234,10 +1244,11 @@ define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: lui a2, %hi(.LCPI7_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI7_1)(a2) ; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 ; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 ; RV32IZFH-NEXT: flt.s a2, fa5, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: or a0, a2, a0 @@ -1335,10 +1346,11 @@ define i64 @test_ceil_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI7_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI7_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: lui a2, 391168 ; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: addi a2, a2, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2 ; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: or a0, a2, a0 @@ -1511,8 +1523,9 @@ define signext i32 @test_trunc_si32(half %x) { define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFH-LABEL: test_trunc_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI9_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB9_2 @@ -1537,8 +1550,9 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB9_4: -; RV32IZFH-NEXT: lui a1, %hi(.LCPI9_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI9_1)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB9_6 ; RV32IZFH-NEXT: # %bb.5: @@ -1664,8 +1678,9 @@ define i64 @test_trunc_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: mv a2, a1 ; RV32IZFHMIN-NEXT: .LBB9_4: -; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI9_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI9_0)(a1) +; RV32IZFHMIN-NEXT: lui a1, 389120 +; RV32IZFHMIN-NEXT: addi a1, a1, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFHMIN-NEXT: beqz a1, .LBB9_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -1922,8 +1937,9 @@ define signext i32 @test_trunc_ui32(half %x) { define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZFH-LABEL: test_trunc_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB11_2 @@ -1942,10 +1958,11 @@ define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: lui a2, %hi(.LCPI11_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI11_1)(a2) ; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 ; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 ; RV32IZFH-NEXT: flt.s a2, fa5, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: or a0, a2, a0 @@ -2043,10 +2060,11 @@ define i64 @test_trunc_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI11_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI11_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: lui a2, 391168 ; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: addi a2, a2, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2 ; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: or a0, a2, a0 @@ -2219,8 +2237,9 @@ define signext i32 @test_round_si32(half %x) { define i64 @test_round_si64(half %x) nounwind { ; RV32IZFH-LABEL: test_round_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI13_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI13_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB13_2 @@ -2245,8 +2264,9 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB13_4: -; RV32IZFH-NEXT: lui a1, %hi(.LCPI13_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI13_1)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB13_6 ; RV32IZFH-NEXT: # %bb.5: @@ -2372,8 +2392,9 @@ define i64 @test_round_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: mv a2, a1 ; RV32IZFHMIN-NEXT: .LBB13_4: -; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI13_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI13_0)(a1) +; RV32IZFHMIN-NEXT: lui a1, 389120 +; RV32IZFHMIN-NEXT: addi a1, a1, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFHMIN-NEXT: beqz a1, .LBB13_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -2630,8 +2651,9 @@ define signext i32 @test_round_ui32(half %x) { define i64 @test_round_ui64(half %x) nounwind { ; RV32IZFH-LABEL: test_round_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI15_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB15_2 @@ -2650,10 +2672,11 @@ define i64 @test_round_ui64(half %x) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: lui a2, %hi(.LCPI15_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI15_1)(a2) ; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 ; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 ; RV32IZFH-NEXT: flt.s a2, fa5, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: or a0, a2, a0 @@ -2751,10 +2774,11 @@ define i64 @test_round_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI15_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI15_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: lui a2, 391168 ; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: addi a2, a2, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2 ; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: or a0, a2, a0 @@ -2927,8 +2951,9 @@ define signext i32 @test_roundeven_si32(half %x) { define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFH-LABEL: test_roundeven_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI17_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI17_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB17_2 @@ -2953,8 +2978,9 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB17_4: -; RV32IZFH-NEXT: lui a1, %hi(.LCPI17_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI17_1)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB17_6 ; RV32IZFH-NEXT: # %bb.5: @@ -3080,8 +3106,9 @@ define i64 @test_roundeven_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: mv a2, a1 ; RV32IZFHMIN-NEXT: .LBB17_4: -; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI17_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI17_0)(a1) +; RV32IZFHMIN-NEXT: lui a1, 389120 +; RV32IZFHMIN-NEXT: addi a1, a1, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFHMIN-NEXT: beqz a1, .LBB17_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -3338,8 +3365,9 @@ define signext i32 @test_roundeven_ui32(half %x) { define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZFH-LABEL: test_roundeven_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI19_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB19_2 @@ -3358,10 +3386,11 @@ define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: lui a2, %hi(.LCPI19_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI19_1)(a2) ; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 ; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 ; RV32IZFH-NEXT: flt.s a2, fa5, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: or a0, a2, a0 @@ -3459,10 +3488,11 @@ define i64 @test_roundeven_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI19_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI19_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: lui a2, 391168 ; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: addi a2, a2, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2 ; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: or a0, a2, a0 @@ -3635,8 +3665,9 @@ define signext i32 @test_rint_si32(half %x) { define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFH-LABEL: test_rint_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI21_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI21_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB21_2 @@ -3661,8 +3692,9 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: mv a2, a1 ; RV32IZFH-NEXT: .LBB21_4: -; RV32IZFH-NEXT: lui a1, %hi(.LCPI21_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI21_1)(a1) +; RV32IZFH-NEXT: lui a1, 389120 +; RV32IZFH-NEXT: addi a1, a1, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a1 ; RV32IZFH-NEXT: flt.s a1, fa5, fs0 ; RV32IZFH-NEXT: beqz a1, .LBB21_6 ; RV32IZFH-NEXT: # %bb.5: @@ -3788,8 +3820,9 @@ define i64 @test_rint_si64(half %x) nounwind { ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: mv a2, a1 ; RV32IZFHMIN-NEXT: .LBB21_4: -; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI21_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI21_0)(a1) +; RV32IZFHMIN-NEXT: lui a1, 389120 +; RV32IZFHMIN-NEXT: addi a1, a1, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1 ; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0 ; RV32IZFHMIN-NEXT: beqz a1, .LBB21_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -4046,8 +4079,9 @@ define signext i32 @test_rint_ui32(half %x) { define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZFH-LABEL: test_rint_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI23_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI23_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB23_2 @@ -4066,10 +4100,11 @@ define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZFH-NEXT: neg s0, a0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixunssfdi -; RV32IZFH-NEXT: lui a2, %hi(.LCPI23_1) -; RV32IZFH-NEXT: flw fa5, %lo(.LCPI23_1)(a2) ; RV32IZFH-NEXT: and a0, s0, a0 +; RV32IZFH-NEXT: lui a2, 391168 ; RV32IZFH-NEXT: and a1, s0, a1 +; RV32IZFH-NEXT: addi a2, a2, -1 +; RV32IZFH-NEXT: fmv.w.x fa5, a2 ; RV32IZFH-NEXT: flt.s a2, fa5, fs0 ; RV32IZFH-NEXT: neg a2, a2 ; RV32IZFH-NEXT: or a0, a2, a0 @@ -4167,10 +4202,11 @@ define i64 @test_rint_ui64(half %x) nounwind { ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI23_0) -; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI23_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 +; RV32IZFHMIN-NEXT: lui a2, 391168 ; RV32IZFHMIN-NEXT: and a1, s0, a1 +; RV32IZFHMIN-NEXT: addi a2, a2, -1 +; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2 ; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 ; RV32IZFHMIN-NEXT: or a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll index 8a787ee578990..cfc997d66ec56 100644 --- a/llvm/test/CodeGen/RISCV/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll @@ -309,8 +309,9 @@ define signext i32 @test_floor_si32(half %x) { define i64 @test_floor_si64(half %x) { ; RV32IZFH-LABEL: test_floor_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB3_2 @@ -754,8 +755,9 @@ define signext i32 @test_floor_ui32(half %x) { define i64 @test_floor_ui64(half %x) { ; RV32IZFH-LABEL: test_floor_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI7_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB7_2 @@ -1199,8 +1201,9 @@ define signext i32 @test_ceil_si32(half %x) { define i64 @test_ceil_si64(half %x) { ; RV32IZFH-LABEL: test_ceil_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB11_2 @@ -1644,8 +1647,9 @@ define signext i32 @test_ceil_ui32(half %x) { define i64 @test_ceil_ui64(half %x) { ; RV32IZFH-LABEL: test_ceil_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI15_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB15_2 @@ -2089,8 +2093,9 @@ define signext i32 @test_trunc_si32(half %x) { define i64 @test_trunc_si64(half %x) { ; RV32IZFH-LABEL: test_trunc_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI19_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB19_2 @@ -2534,8 +2539,9 @@ define signext i32 @test_trunc_ui32(half %x) { define i64 @test_trunc_ui64(half %x) { ; RV32IZFH-LABEL: test_trunc_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI23_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI23_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB23_2 @@ -2979,8 +2985,9 @@ define signext i32 @test_round_si32(half %x) { define i64 @test_round_si64(half %x) { ; RV32IZFH-LABEL: test_round_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI27_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI27_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB27_2 @@ -3424,8 +3431,9 @@ define signext i32 @test_round_ui32(half %x) { define i64 @test_round_ui64(half %x) { ; RV32IZFH-LABEL: test_round_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI31_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI31_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB31_2 @@ -3869,8 +3877,9 @@ define signext i32 @test_roundeven_si32(half %x) { define i64 @test_roundeven_si64(half %x) { ; RV32IZFH-LABEL: test_roundeven_si64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI35_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI35_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB35_2 @@ -4314,8 +4323,9 @@ define signext i32 @test_roundeven_ui32(half %x) { define i64 @test_roundeven_ui64(half %x) { ; RV32IZFH-LABEL: test_roundeven_ui64: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI39_0) -; RV32IZFH-NEXT: flh fa5, %lo(.LCPI39_0)(a0) +; RV32IZFH-NEXT: li a0, 25 +; RV32IZFH-NEXT: slli a0, a0, 10 +; RV32IZFH-NEXT: fmv.h.x fa5, a0 ; RV32IZFH-NEXT: fabs.h fa4, fa0 ; RV32IZFH-NEXT: flt.h a0, fa4, fa5 ; RV32IZFH-NEXT: beqz a0, .LBB39_2 @@ -4490,8 +4500,9 @@ define half @test_floor_half(half %x) { ; RV64IFD-NEXT: ret ; CHECKIZFH-LABEL: test_floor_half: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI40_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI40_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB40_2 @@ -4574,8 +4585,9 @@ define half @test_ceil_half(half %x) { ; RV64IFD-NEXT: ret ; CHECKIZFH-LABEL: test_ceil_half: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI41_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI41_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB41_2 @@ -4658,8 +4670,9 @@ define half @test_trunc_half(half %x) { ; RV64IFD-NEXT: ret ; CHECKIZFH-LABEL: test_trunc_half: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI42_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI42_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB42_2 @@ -4742,8 +4755,9 @@ define half @test_round_half(half %x) { ; RV64IFD-NEXT: ret ; CHECKIZFH-LABEL: test_round_half: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI43_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI43_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB43_2 @@ -4826,8 +4840,9 @@ define half @test_roundeven_half(half %x) { ; RV64IFD-NEXT: ret ; CHECKIZFH-LABEL: test_roundeven_half: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: lui a0, %hi(.LCPI44_0) -; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI44_0)(a0) +; CHECKIZFH-NEXT: li a0, 25 +; CHECKIZFH-NEXT: slli a0, a0, 10 +; CHECKIZFH-NEXT: fmv.h.x fa5, a0 ; CHECKIZFH-NEXT: fabs.h fa4, fa0 ; CHECKIZFH-NEXT: flt.h a0, fa4, fa5 ; CHECKIZFH-NEXT: beqz a0, .LBB44_2 diff --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll index bf535b1cbd084..e9699502ed3a9 100644 --- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll @@ -878,8 +878,9 @@ define signext i32 @select_fcmp_uge_1_2(half %a, half %b) nounwind { define half @CascadedSelect(half noundef %a) { ; CHECK-LABEL: CascadedSelect: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI20_0)(a0) +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: flt.h a0, fa5, fa0 ; CHECK-NEXT: bnez a0, .LBB20_3 ; CHECK-NEXT: # %bb.1: # %entry @@ -910,23 +911,24 @@ define half @CascadedSelect(half noundef %a) { ; ; CHECKIZFHMIN-LABEL: CascadedSelect: ; CHECKIZFHMIN: # %bb.0: # %entry -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI20_0) -; CHECKIZFHMIN-NEXT: flh fa5, %lo(.LCPI20_0)(a0) -; CHECKIZFHMIN-NEXT: fcvt.s.h fa3, fa5 -; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa0 -; CHECKIZFHMIN-NEXT: flt.s a0, fa3, fa4 -; CHECKIZFHMIN-NEXT: bnez a0, .LBB20_3 -; CHECKIZFHMIN-NEXT: # %bb.1: # %entry -; CHECKIZFHMIN-NEXT: fmv.w.x fa5, zero +; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 260096 +; CHECKIZFHMIN-NEXT: fmv.w.x fa4, zero +; CHECKIZFHMIN-NEXT: flt.s a1, fa5, fa4 +; CHECKIZFHMIN-NEXT: fmv.w.x fa4, a0 ; CHECKIZFHMIN-NEXT: flt.s a0, fa4, fa5 +; CHECKIZFHMIN-NEXT: bnez a1, .LBB20_3 +; CHECKIZFHMIN-NEXT: # %bb.1: # %entry ; CHECKIZFHMIN-NEXT: bnez a0, .LBB20_4 -; CHECKIZFHMIN-NEXT: # %bb.2: # %entry -; CHECKIZFHMIN-NEXT: fmv.s fa5, fa0 -; CHECKIZFHMIN-NEXT: .LBB20_3: # %entry -; CHECKIZFHMIN-NEXT: fmv.s fa0, fa5 +; CHECKIZFHMIN-NEXT: .LBB20_2: # %entry ; CHECKIZFHMIN-NEXT: ret -; CHECKIZFHMIN-NEXT: .LBB20_4: +; CHECKIZFHMIN-NEXT: .LBB20_3: ; CHECKIZFHMIN-NEXT: fmv.h.x fa0, zero +; CHECKIZFHMIN-NEXT: beqz a0, .LBB20_2 +; CHECKIZFHMIN-NEXT: .LBB20_4: +; CHECKIZFHMIN-NEXT: li a0, 15 +; CHECKIZFHMIN-NEXT: slli a0, a0, 10 +; CHECKIZFHMIN-NEXT: fmv.h.x fa0, a0 ; CHECKIZFHMIN-NEXT: ret ; ; CHECKIZHINXMIN-LABEL: CascadedSelect: diff --git a/llvm/test/CodeGen/RISCV/half-zfa-fli.ll b/llvm/test/CodeGen/RISCV/half-zfa-fli.ll index 281a873235623..928535d79f02c 100644 --- a/llvm/test/CodeGen/RISCV/half-zfa-fli.ll +++ b/llvm/test/CodeGen/RISCV/half-zfa-fli.ll @@ -16,8 +16,9 @@ define half @loadfpimm1() { ; ; ZFHMIN-LABEL: loadfpimm1: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI0_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI0_0)(a0) +; ZFHMIN-NEXT: li a0, 11 +; ZFHMIN-NEXT: slli a0, a0, 10 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 0.0625 } @@ -30,8 +31,9 @@ define half @loadfpimm2() { ; ; ZFHMIN-LABEL: loadfpimm2: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; ZFHMIN-NEXT: li a0, 29 +; ZFHMIN-NEXT: slli a0, a0, 9 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 0.75 } @@ -44,8 +46,9 @@ define half @loadfpimm3() { ; ; ZFHMIN-LABEL: loadfpimm3: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI2_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI2_0)(a0) +; ZFHMIN-NEXT: lui a0, 4 +; ZFHMIN-NEXT: addi a0, a0, -768 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 1.25 } @@ -58,8 +61,9 @@ define half @loadfpimm4() { ; ; ZFHMIN-LABEL: loadfpimm4: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI3_0)(a0) +; ZFHMIN-NEXT: lui a0, 4 +; ZFHMIN-NEXT: addi a0, a0, 512 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 3.0 } @@ -72,8 +76,9 @@ define half @loadfpimm5() { ; ; ZFHMIN-LABEL: loadfpimm5: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI4_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI4_0)(a0) +; ZFHMIN-NEXT: li a0, 23 +; ZFHMIN-NEXT: slli a0, a0, 10 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 256.0 } @@ -86,8 +91,9 @@ define half @loadfpimm6() { ; ; ZFHMIN-LABEL: loadfpimm6: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI5_0)(a0) +; ZFHMIN-NEXT: li a0, 31 +; ZFHMIN-NEXT: slli a0, a0, 10 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 0xH7C00 } @@ -100,8 +106,9 @@ define half @loadfpimm7() { ; ; ZFHMIN-LABEL: loadfpimm7: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI6_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI6_0)(a0) +; ZFHMIN-NEXT: lui a0, 8 +; ZFHMIN-NEXT: addi a0, a0, -512 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 0xH7E00 } @@ -123,14 +130,16 @@ define half @loadfpimm8() { define half @loadfpimm9() { ; CHECK-LABEL: loadfpimm9: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flh fa0, %lo(.LCPI8_0)(a0) +; CHECK-NEXT: lui a0, 6 +; CHECK-NEXT: addi a0, a0, -1032 +; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret ; ; ZFHMIN-LABEL: loadfpimm9: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI8_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI8_0)(a0) +; ZFHMIN-NEXT: lui a0, 6 +; ZFHMIN-NEXT: addi a0, a0, -1032 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 255.0 } @@ -169,14 +178,16 @@ define half @loadfpimm11() { define half @loadfpimm12() { ; CHECK-LABEL: loadfpimm12: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: flh fa0, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1023 +; CHECK-NEXT: fmv.h.x fa0, a0 ; CHECK-NEXT: ret ; ; ZFHMIN-LABEL: loadfpimm12: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI11_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI11_0)(a0) +; ZFHMIN-NEXT: lui a0, 8 +; ZFHMIN-NEXT: addi a0, a0, -1023 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 0xH7c01 } @@ -189,8 +200,9 @@ define half @loadfpimm13() { ; ; ZFHMIN-LABEL: loadfpimm13: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI12_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI12_0)(a0) +; ZFHMIN-NEXT: li a0, -17 +; ZFHMIN-NEXT: slli a0, a0, 10 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half -1.0 } @@ -222,8 +234,9 @@ define half @loadfpimm15() { ; ; ZFHMIN-LABEL: loadfpimm15: ; ZFHMIN: # %bb.0: -; ZFHMIN-NEXT: lui a0, %hi(.LCPI14_0) -; ZFHMIN-NEXT: flh fa0, %lo(.LCPI14_0)(a0) +; ZFHMIN-NEXT: li a0, -31 +; ZFHMIN-NEXT: slli a0, a0, 10 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret ret half 0xH8400 } diff --git a/llvm/test/CodeGen/RISCV/half-zfa.ll b/llvm/test/CodeGen/RISCV/half-zfa.ll index 960c7c4a73e4f..90c66e7fe2ca4 100644 --- a/llvm/test/CodeGen/RISCV/half-zfa.ll +++ b/llvm/test/CodeGen/RISCV/half-zfa.ll @@ -350,12 +350,15 @@ define half @select_loadfpimm(half %x) nounwind { ; ZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; ZFHMIN-NEXT: fmv.w.x fa4, zero ; ZFHMIN-NEXT: fle.s a0, fa4, fa5 -; ZFHMIN-NEXT: xori a0, a0, 1 -; ZFHMIN-NEXT: slli a0, a0, 1 -; ZFHMIN-NEXT: lui a1, %hi(.LCPI17_0) -; ZFHMIN-NEXT: addi a1, a1, %lo(.LCPI17_0) -; ZFHMIN-NEXT: add a0, a1, a0 -; ZFHMIN-NEXT: flh fa0, 0(a0) +; ZFHMIN-NEXT: beqz a0, .LBB17_2 +; ZFHMIN-NEXT: # %bb.1: # %entry +; ZFHMIN-NEXT: li a0, 7 +; ZFHMIN-NEXT: j .LBB17_3 +; ZFHMIN-NEXT: .LBB17_2: +; ZFHMIN-NEXT: li a0, -9 +; ZFHMIN-NEXT: .LBB17_3: # %entry +; ZFHMIN-NEXT: slli a0, a0, 11 +; ZFHMIN-NEXT: fmv.h.x fa0, a0 ; ZFHMIN-NEXT: ret entry: %cmp = fcmp ult half %x, 0.000000e+00 diff --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll index fb7e4a4d103d0..9937627962208 100644 --- a/llvm/test/CodeGen/RISCV/idiv_large.ll +++ b/llvm/test/CodeGen/RISCV/idiv_large.ll @@ -1,4 +1,3 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 < %s | FileCheck %s diff --git a/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll b/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll index f183c936fc672..f3b4319ccc4fa 100644 --- a/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll +++ b/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll @@ -17,8 +17,9 @@ entry: define void @two_fdivs(double %a0, double %a1, double %a2, ptr %res) { ; CHECK-LABEL: two_fdivs: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lui a1, %hi(.LCPI1_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI1_0)(a1) +; CHECK-NEXT: li a1, 1023 +; CHECK-NEXT: slli a1, a1, 52 +; CHECK-NEXT: fmv.d.x fa5, a1 ; CHECK-NEXT: fdiv.d fa5, fa5, fa0 ; CHECK-NEXT: fmul.d fa4, fa1, fa5 ; CHECK-NEXT: fmul.d fa5, fa2, fa5 diff --git a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll index caa6c2f8ff96f..a919452389c43 100644 --- a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll @@ -122,9 +122,10 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind { ; RV64ID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64ID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64ID-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill -; RV64ID-NEXT: lui a0, %hi(.LCPI4_0) -; RV64ID-NEXT: fld fa5, %lo(.LCPI4_0)(a0) ; RV64ID-NEXT: fmv.d fs0, fa0 +; RV64ID-NEXT: li a0, -449 +; RV64ID-NEXT: slli a0, a0, 53 +; RV64ID-NEXT: fmv.d.x fa5, a0 ; RV64ID-NEXT: fle.d s0, fa5, fa0 ; RV64ID-NEXT: call __fixdfti ; RV64ID-NEXT: li a2, -1 @@ -132,8 +133,8 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind { ; RV64ID-NEXT: # %bb.1: ; RV64ID-NEXT: slli a1, a2, 63 ; RV64ID-NEXT: .LBB4_2: -; RV64ID-NEXT: lui a3, %hi(.LCPI4_1) -; RV64ID-NEXT: fld fa5, %lo(.LCPI4_1)(a3) +; RV64ID-NEXT: lui a3, %hi(.LCPI4_0) +; RV64ID-NEXT: fld fa5, %lo(.LCPI4_0)(a3) ; RV64ID-NEXT: flt.d a3, fa5, fs0 ; RV64ID-NEXT: beqz a3, .LBB4_4 ; RV64ID-NEXT: # %bb.3: @@ -170,16 +171,17 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind { ; RV64IDINX-NEXT: # %bb.1: ; RV64IDINX-NEXT: slli a1, a2, 63 ; RV64IDINX-NEXT: .LBB4_2: -; RV64IDINX-NEXT: lui a3, %hi(.LCPI4_0) -; RV64IDINX-NEXT: ld a3, %lo(.LCPI4_0)(a3) +; RV64IDINX-NEXT: li a3, 575 +; RV64IDINX-NEXT: slli a3, a3, 53 +; RV64IDINX-NEXT: addi a3, a3, -1 ; RV64IDINX-NEXT: flt.d a3, a3, s0 ; RV64IDINX-NEXT: beqz a3, .LBB4_4 ; RV64IDINX-NEXT: # %bb.3: ; RV64IDINX-NEXT: srli a1, a2, 1 ; RV64IDINX-NEXT: .LBB4_4: ; RV64IDINX-NEXT: feq.d a2, s0, s0 -; RV64IDINX-NEXT: neg a3, a3 ; RV64IDINX-NEXT: neg a4, s1 +; RV64IDINX-NEXT: neg a3, a3 ; RV64IDINX-NEXT: neg a2, a2 ; RV64IDINX-NEXT: and a0, a4, a0 ; RV64IDINX-NEXT: and a1, a2, a1 @@ -267,10 +269,11 @@ define i128 @fptoui_sat_f64_to_i128(double %a) nounwind { ; RV64IDINX-NEXT: neg s1, a0 ; RV64IDINX-NEXT: mv a0, s0 ; RV64IDINX-NEXT: call __fixunsdfti -; RV64IDINX-NEXT: lui a2, %hi(.LCPI5_0) -; RV64IDINX-NEXT: ld a2, %lo(.LCPI5_0)(a2) ; RV64IDINX-NEXT: and a0, s1, a0 +; RV64IDINX-NEXT: li a2, 1151 ; RV64IDINX-NEXT: and a1, s1, a1 +; RV64IDINX-NEXT: slli a2, a2, 52 +; RV64IDINX-NEXT: addi a2, a2, -1 ; RV64IDINX-NEXT: flt.d a2, a2, s0 ; RV64IDINX-NEXT: neg a2, a2 ; RV64IDINX-NEXT: or a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll index ebda78528810f..0af75a789f7a2 100644 --- a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll @@ -130,16 +130,17 @@ define i128 @fptosi_sat_f32_to_i128(float %a) nounwind { ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: slli a1, a2, 63 ; RV64IF-NEXT: .LBB4_2: -; RV64IF-NEXT: lui a3, %hi(.LCPI4_0) -; RV64IF-NEXT: flw fa5, %lo(.LCPI4_0)(a3) +; RV64IF-NEXT: lui a3, 520192 +; RV64IF-NEXT: addi a3, a3, -1 +; RV64IF-NEXT: fmv.w.x fa5, a3 ; RV64IF-NEXT: flt.s a3, fa5, fs0 ; RV64IF-NEXT: beqz a3, .LBB4_4 ; RV64IF-NEXT: # %bb.3: ; RV64IF-NEXT: srli a1, a2, 1 ; RV64IF-NEXT: .LBB4_4: ; RV64IF-NEXT: feq.s a2, fs0, fs0 -; RV64IF-NEXT: neg a3, a3 ; RV64IF-NEXT: neg a4, s0 +; RV64IF-NEXT: neg a3, a3 ; RV64IF-NEXT: neg a2, a2 ; RV64IF-NEXT: and a0, a4, a0 ; RV64IF-NEXT: and a1, a2, a1 @@ -235,10 +236,11 @@ define i128 @fptoui_sat_f32_to_i128(float %a) nounwind { ; RV64IF-NEXT: fle.s a0, fa5, fa0 ; RV64IF-NEXT: neg s0, a0 ; RV64IF-NEXT: call __fixunssfti -; RV64IF-NEXT: lui a2, %hi(.LCPI5_0) -; RV64IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2) ; RV64IF-NEXT: and a0, s0, a0 +; RV64IF-NEXT: lui a2, 522240 ; RV64IF-NEXT: and a1, s0, a1 +; RV64IF-NEXT: addi a2, a2, -1 +; RV64IF-NEXT: fmv.w.x fa5, a2 ; RV64IF-NEXT: flt.s a2, fa5, fs0 ; RV64IF-NEXT: neg a2, a2 ; RV64IF-NEXT: or a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll index 648f3789953aa..d8f3816b85485 100644 --- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll @@ -208,16 +208,17 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind { ; RV64IZFH-NEXT: # %bb.1: ; RV64IZFH-NEXT: slli a1, a2, 63 ; RV64IZFH-NEXT: .LBB4_2: -; RV64IZFH-NEXT: lui a3, %hi(.LCPI4_0) -; RV64IZFH-NEXT: flw fa5, %lo(.LCPI4_0)(a3) +; RV64IZFH-NEXT: lui a3, 520192 +; RV64IZFH-NEXT: addi a3, a3, -1 +; RV64IZFH-NEXT: fmv.w.x fa5, a3 ; RV64IZFH-NEXT: flt.s a3, fa5, fs0 ; RV64IZFH-NEXT: beqz a3, .LBB4_4 ; RV64IZFH-NEXT: # %bb.3: ; RV64IZFH-NEXT: srli a1, a2, 1 ; RV64IZFH-NEXT: .LBB4_4: ; RV64IZFH-NEXT: feq.s a2, fs0, fs0 -; RV64IZFH-NEXT: neg a3, a3 ; RV64IZFH-NEXT: neg a4, s0 +; RV64IZFH-NEXT: neg a3, a3 ; RV64IZFH-NEXT: neg a2, a2 ; RV64IZFH-NEXT: and a0, a4, a0 ; RV64IZFH-NEXT: and a1, a2, a1 @@ -308,23 +309,25 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind { ; RV64IZFH-NEXT: addi sp, sp, -32 ; RV64IZFH-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64IZFH-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64IZFH-NEXT: lui a0, %hi(.LCPI5_0) -; RV64IZFH-NEXT: flw fa5, %lo(.LCPI5_0)(a0) -; RV64IZFH-NEXT: fcvt.s.h fa0, fa0 -; RV64IZFH-NEXT: fmv.w.x fa4, zero -; RV64IZFH-NEXT: fle.s a0, fa4, fa0 -; RV64IZFH-NEXT: flt.s a1, fa5, fa0 -; RV64IZFH-NEXT: neg s0, a1 -; RV64IZFH-NEXT: neg s1, a0 +; RV64IZFH-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill +; RV64IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV64IZFH-NEXT: fmv.w.x fa5, zero +; RV64IZFH-NEXT: fle.s a0, fa5, fs0 +; RV64IZFH-NEXT: neg s0, a0 +; RV64IZFH-NEXT: fmv.s fa0, fs0 ; RV64IZFH-NEXT: call __fixunssfti -; RV64IZFH-NEXT: and a0, s1, a0 -; RV64IZFH-NEXT: and a1, s1, a1 -; RV64IZFH-NEXT: or a0, s0, a0 -; RV64IZFH-NEXT: or a1, s0, a1 +; RV64IZFH-NEXT: and a0, s0, a0 +; RV64IZFH-NEXT: lui a2, 522240 +; RV64IZFH-NEXT: and a1, s0, a1 +; RV64IZFH-NEXT: addi a2, a2, -1 +; RV64IZFH-NEXT: fmv.w.x fa5, a2 +; RV64IZFH-NEXT: flt.s a2, fa5, fs0 +; RV64IZFH-NEXT: neg a2, a2 +; RV64IZFH-NEXT: or a0, a2, a0 +; RV64IZFH-NEXT: or a1, a2, a1 ; RV64IZFH-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64IZFH-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 32 ; RV64IZFH-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll index 2fe8c8ce7975a..6507349f45a2f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -1,16 +1,16 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFH declare @llvm.vp.ceil.nxv1bf16(, , i32) @@ -407,10 +407,11 @@ declare @llvm.vp.ceil.nxv1f16(, @vp_ceil_vv_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -453,10 +454,11 @@ define @vp_ceil_vv_nxv1f16( %va, @vp_ceil_vv_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -493,10 +495,11 @@ declare @llvm.vp.ceil.nxv2f16(, @vp_ceil_vv_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -539,10 +542,11 @@ define @vp_ceil_vv_nxv2f16( %va, @vp_ceil_vv_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -579,10 +583,11 @@ declare @llvm.vp.ceil.nxv4f16(, @vp_ceil_vv_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -625,10 +630,11 @@ define @vp_ceil_vv_nxv4f16( %va, @vp_ceil_vv_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -667,9 +673,10 @@ define @vp_ceil_vv_nxv8f16( %va, @vp_ceil_vv_nxv8f16( %va, @vp_ceil_vv_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -755,9 +763,10 @@ define @vp_ceil_vv_nxv16f16( %va, @vp_ceil_vv_nxv16f16( %va, @vp_ceil_vv_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -843,9 +853,10 @@ define @vp_ceil_vv_nxv32f16( %va, @vp_ceil_vv_nxv32f16( %va, @vp_ceil_vv_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_vv_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -1210,41 +1222,75 @@ define @vp_ceil_vv_nxv16f32_unmasked( declare @llvm.vp.ceil.nxv1f64(, , i32) define @vp_ceil_vv_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv1f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_ceil_vv_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64_unmasked: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZFH-NEXT: vfabs.v v9, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv1f64_unmasked: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZFH-NEXT: vfabs.v v9, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1252,43 +1298,79 @@ define @vp_ceil_vv_nxv1f64_unmasked( declare @llvm.vp.ceil.nxv2f64(, , i32) define @vp_ceil_vv_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZFH-NEXT: vmv1r.v v10, v0 +; RV32ZFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vmv1r.v v0, v10 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv2f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZFH-NEXT: vmv1r.v v10, v0 +; RV64ZFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vmv1r.v v0, v10 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_ceil_vv_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64_unmasked: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZFH-NEXT: vfabs.v v10, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv2f64_unmasked: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZFH-NEXT: vfabs.v v10, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1296,43 +1378,79 @@ define @vp_ceil_vv_nxv2f64_unmasked( declare @llvm.vp.ceil.nxv4f64(, , i32) define @vp_ceil_vv_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZFH-NEXT: vmv1r.v v12, v0 +; RV32ZFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vmv1r.v v0, v12 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv4f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZFH-NEXT: vmv1r.v v12, v0 +; RV64ZFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vmv1r.v v0, v12 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_ceil_vv_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64_unmasked: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZFH-NEXT: vfabs.v v12, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv4f64_unmasked: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZFH-NEXT: vfabs.v v12, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1340,43 +1458,79 @@ define @vp_ceil_vv_nxv4f64_unmasked( declare @llvm.vp.ceil.nxv7f64(, , i32) define @vp_ceil_vv_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZFH-NEXT: vmv1r.v v16, v0 +; RV32ZFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vmv1r.v v0, v16 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv7f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZFH-NEXT: vmv1r.v v16, v0 +; RV64ZFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vmv1r.v v0, v16 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_ceil_vv_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64_unmasked: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v16, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv7f64_unmasked: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v16, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1384,43 +1538,79 @@ define @vp_ceil_vv_nxv7f64_unmasked( declare @llvm.vp.ceil.nxv8f64(, , i32) define @vp_ceil_vv_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZFH-NEXT: vmv1r.v v16, v0 +; RV32ZFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vmv1r.v v0, v16 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv8f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZFH-NEXT: vmv1r.v v16, v0 +; RV64ZFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vmv1r.v v0, v16 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_ceil_vv_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64_unmasked: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v16, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv8f64_unmasked: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v16, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1429,87 +1619,167 @@ define @vp_ceil_vv_nxv8f64_unmasked( declare @llvm.vp.ceil.nxv16f64(, , i32) define @vp_ceil_vv_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a2, 3 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZFH-NEXT: vmv1r.v v7, v0 +; RV32ZFH-NEXT: csrr a1, vlenb +; RV32ZFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZFH-NEXT: srli a3, a1, 3 +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZFH-NEXT: sub a2, a0, a1 +; RV32ZFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZFH-NEXT: sltu a3, a0, a2 +; RV32ZFH-NEXT: addi a3, a3, -1 +; RV32ZFH-NEXT: and a2, a3, a2 +; RV32ZFH-NEXT: vmv1r.v v0, v6 +; RV32ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a2, 3 +; RV32ZFH-NEXT: vmv1r.v v0, v6 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZFH-NEXT: fsrm a2 +; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZFH-NEXT: # %bb.1: +; RV32ZFH-NEXT: mv a0, a1 +; RV32ZFH-NEXT: .LBB44_2: +; RV32ZFH-NEXT: vmv1r.v v0, v7 +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vmv1r.v v0, v7 +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv16f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZFH-NEXT: vmv1r.v v7, v0 +; RV64ZFH-NEXT: csrr a1, vlenb +; RV64ZFH-NEXT: li a2, 1075 +; RV64ZFH-NEXT: srli a3, a1, 3 +; RV64ZFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZFH-NEXT: sub a3, a0, a1 +; RV64ZFH-NEXT: slli a2, a2, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a2 +; RV64ZFH-NEXT: sltu a2, a0, a3 +; RV64ZFH-NEXT: addi a2, a2, -1 +; RV64ZFH-NEXT: and a2, a2, a3 +; RV64ZFH-NEXT: vmv1r.v v0, v6 +; RV64ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a2, 3 +; RV64ZFH-NEXT: vmv1r.v v0, v6 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZFH-NEXT: fsrm a2 +; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZFH-NEXT: # %bb.1: +; RV64ZFH-NEXT: mv a0, a1 +; RV64ZFH-NEXT: .LBB44_2: +; RV64ZFH-NEXT: vmv1r.v v0, v7 +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vmv1r.v v0, v7 +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_ceil_vv_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_vv_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a2, 3 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64_unmasked: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: csrr a1, vlenb +; RV32ZFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZFH-NEXT: sub a3, a0, a1 +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZFH-NEXT: sltu a2, a0, a3 +; RV32ZFH-NEXT: addi a2, a2, -1 +; RV32ZFH-NEXT: and a2, a2, a3 +; RV32ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v24, v16 +; RV32ZFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZFH-NEXT: fsrmi a2, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZFH-NEXT: fsrm a2 +; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZFH-NEXT: # %bb.1: +; RV32ZFH-NEXT: mv a0, a1 +; RV32ZFH-NEXT: .LBB45_2: +; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v24, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: vp_ceil_vv_nxv16f64_unmasked: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: csrr a1, vlenb +; RV64ZFH-NEXT: li a2, 1075 +; RV64ZFH-NEXT: sub a3, a0, a1 +; RV64ZFH-NEXT: slli a2, a2, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a2 +; RV64ZFH-NEXT: sltu a2, a0, a3 +; RV64ZFH-NEXT: addi a2, a2, -1 +; RV64ZFH-NEXT: and a2, a2, a3 +; RV64ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v24, v16 +; RV64ZFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZFH-NEXT: fsrmi a2, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZFH-NEXT: fsrm a2 +; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZFH-NEXT: # %bb.1: +; RV64ZFH-NEXT: mv a0, a1 +; RV64ZFH-NEXT: .LBB45_2: +; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v24, v8 +; RV64ZFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZFH-NEXT: ret %v = call @llvm.vp.ceil.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll index fba27e3d548cf..ee18a426c1b12 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll @@ -2025,7 +2025,8 @@ define @vp_ctpop_nxv16i64( %va, @vp_ctpop_nxv16i64( %va, @vp_ctpop_nxv16i64( %va, @vp_ctpop_nxv16i64_unmasked( %va, ; RV32-NEXT: addi a4, a4, 16 ; RV32-NEXT: vs8r.v v0, (a4) # vscale x 64-byte Folded Spill ; RV32-NEXT: vand.vv v24, v24, v0 -; RV32-NEXT: vsub.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v24, v16, v24 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v0, a3 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vand.vv v24, v16, v0 -; RV32-NEXT: vsrl.vi v16, v16, 2 +; RV32-NEXT: vand.vv v16, v24, v0 +; RV32-NEXT: vsrl.vi v24, v24, 2 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v0, (a3) # vscale x 64-byte Folded Spill -; RV32-NEXT: vand.vv v16, v16, v0 +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vadd.vv v24, v16, v24 +; RV32-NEXT: vsrl.vi v16, v24, 4 ; RV32-NEXT: vadd.vv v16, v24, v16 -; RV32-NEXT: vsrl.vi v24, v16, 4 -; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: lui a3, 61681 ; RV32-NEXT: lui a4, 4112 ; RV32-NEXT: addi a3, a3, -241 @@ -2312,16 +2312,16 @@ define @vp_ctpop_nxv16i64_unmasked( %va, ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; RV32-NEXT: vand.vv v24, v24, v0 -; RV32-NEXT: vsub.vv v8, v8, v24 +; RV32-NEXT: vsub.vv v24, v8, v24 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; RV32-NEXT: vand.vv v24, v8, v0 -; RV32-NEXT: vsrl.vi v8, v8, 2 -; RV32-NEXT: vand.vv v8, v8, v0 -; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vand.vv v8, v24, v0 +; RV32-NEXT: vsrl.vi v24, v24, 2 +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: vsrl.vi v24, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll index 6bf882fe47fef..52eaa51051631 100644 --- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll @@ -2193,7 +2193,8 @@ define @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64( %va, @vp_cttz_nxv16i64_unmasked( %va, i ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vadd.vi v24, v16, -1 ; RV32-NEXT: vnot.v v16, v16 -; RV32-NEXT: vand.vv v16, v16, v24 -; RV32-NEXT: vsrl.vi v24, v16, 1 +; RV32-NEXT: vand.vv v24, v16, v24 +; RV32-NEXT: vsrl.vi v16, v24, 1 ; RV32-NEXT: csrr a4, vlenb ; RV32-NEXT: li a5, 24 ; RV32-NEXT: mul a4, a4, a5 ; RV32-NEXT: add a4, sp, a4 ; RV32-NEXT: addi a4, a4, 16 ; RV32-NEXT: vs8r.v v0, (a4) # vscale x 64-byte Folded Spill -; RV32-NEXT: vand.vv v24, v24, v0 -; RV32-NEXT: vsub.vv v16, v16, v24 +; RV32-NEXT: vand.vv v16, v16, v0 +; RV32-NEXT: vsub.vv v24, v24, v16 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v0, a3 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vand.vv v24, v16, v0 -; RV32-NEXT: vsrl.vi v16, v16, 2 +; RV32-NEXT: vand.vv v16, v24, v0 +; RV32-NEXT: vsrl.vi v24, v24, 2 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v0, (a3) # vscale x 64-byte Folded Spill -; RV32-NEXT: vand.vv v16, v16, v0 +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vadd.vv v24, v16, v24 +; RV32-NEXT: vsrl.vi v16, v24, 4 ; RV32-NEXT: vadd.vv v16, v24, v16 -; RV32-NEXT: vsrl.vi v24, v16, 4 -; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: lui a3, 61681 ; RV32-NEXT: lui a4, 4112 ; RV32-NEXT: addi a3, a3, -241 diff --git a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll index 8c63c2d4be8c1..51dc7b0714d7f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll @@ -32,10 +32,11 @@ define @trunc_nxv1f64_to_si8( %x) { ; ; RV64-LABEL: trunc_nxv1f64_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI0_0) -; RV64-NEXT: fld fa5, %lo(.LCPI0_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -75,10 +76,11 @@ define @trunc_nxv1f64_to_ui8( %x) { ; ; RV64-LABEL: trunc_nxv1f64_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI1_0) -; RV64-NEXT: fld fa5, %lo(.LCPI1_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -116,10 +118,11 @@ define @trunc_nxv1f64_to_si16( %x) { ; ; RV64-LABEL: trunc_nxv1f64_to_si16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI2_0) -; RV64-NEXT: fld fa5, %lo(.LCPI2_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -155,10 +158,11 @@ define @trunc_nxv1f64_to_ui16( %x) { ; ; RV64-LABEL: trunc_nxv1f64_to_ui16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI3_0) -; RV64-NEXT: fld fa5, %lo(.LCPI3_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -274,10 +278,11 @@ define @trunc_nxv4f64_to_si8( %x) { ; ; RV64-LABEL: trunc_nxv4f64_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI8_0) -; RV64-NEXT: fld fa5, %lo(.LCPI8_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -317,10 +322,11 @@ define @trunc_nxv4f64_to_ui8( %x) { ; ; RV64-LABEL: trunc_nxv4f64_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI9_0) -; RV64-NEXT: fld fa5, %lo(.LCPI9_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -358,10 +364,11 @@ define @trunc_nxv4f64_to_si16( %x) { ; ; RV64-LABEL: trunc_nxv4f64_to_si16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI10_0) -; RV64-NEXT: fld fa5, %lo(.LCPI10_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -397,10 +404,11 @@ define @trunc_nxv4f64_to_ui16( %x) { ; ; RV64-LABEL: trunc_nxv4f64_to_ui16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI11_0) -; RV64-NEXT: fld fa5, %lo(.LCPI11_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -518,10 +526,11 @@ define @ceil_nxv1f64_to_si8( %x) { ; ; RV64-LABEL: ceil_nxv1f64_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI16_0) -; RV64-NEXT: fld fa5, %lo(.LCPI16_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -565,10 +574,11 @@ define @ceil_nxv1f64_to_ui8( %x) { ; ; RV64-LABEL: ceil_nxv1f64_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI17_0) -; RV64-NEXT: fld fa5, %lo(.LCPI17_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -610,10 +620,11 @@ define @ceil_nxv1f64_to_si16( %x) { ; ; RV64-LABEL: ceil_nxv1f64_to_si16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI18_0) -; RV64-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -653,10 +664,11 @@ define @ceil_nxv1f64_to_ui16( %x) { ; ; RV64-LABEL: ceil_nxv1f64_to_ui16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI19_0) -; RV64-NEXT: fld fa5, %lo(.LCPI19_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -792,10 +804,11 @@ define @ceil_nxv4f64_to_si8( %x) { ; ; RV64-LABEL: ceil_nxv4f64_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI24_0) -; RV64-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -839,10 +852,11 @@ define @ceil_nxv4f64_to_ui8( %x) { ; ; RV64-LABEL: ceil_nxv4f64_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI25_0) -; RV64-NEXT: fld fa5, %lo(.LCPI25_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -884,10 +898,11 @@ define @ceil_nxv4f64_to_si16( %x) { ; ; RV64-LABEL: ceil_nxv4f64_to_si16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI26_0) -; RV64-NEXT: fld fa5, %lo(.LCPI26_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -927,10 +942,11 @@ define @ceil_nxv4f64_to_ui16( %x) { ; ; RV64-LABEL: ceil_nxv4f64_to_ui16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI27_0) -; RV64-NEXT: fld fa5, %lo(.LCPI27_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -1064,10 +1080,11 @@ define @rint_nxv1f64_to_si8( %x) { ; ; RV64-LABEL: rint_nxv1f64_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI32_0) -; RV64-NEXT: fld fa5, %lo(.LCPI32_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -1107,10 +1124,11 @@ define @rint_nxv1f64_to_ui8( %x) { ; ; RV64-LABEL: rint_nxv1f64_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI33_0) -; RV64-NEXT: fld fa5, %lo(.LCPI33_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -1148,10 +1166,11 @@ define @rint_nxv1f64_to_si16( %x) { ; ; RV64-LABEL: rint_nxv1f64_to_si16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI34_0) -; RV64-NEXT: fld fa5, %lo(.LCPI34_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -1187,10 +1206,11 @@ define @rint_nxv1f64_to_ui16( %x) { ; ; RV64-LABEL: rint_nxv1f64_to_ui16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI35_0) -; RV64-NEXT: fld fa5, %lo(.LCPI35_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -1306,10 +1326,11 @@ define @rint_nxv4f64_to_si8( %x) { ; ; RV64-LABEL: rint_nxv4f64_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI40_0) -; RV64-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -1349,10 +1370,11 @@ define @rint_nxv4f64_to_ui8( %x) { ; ; RV64-LABEL: rint_nxv4f64_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI41_0) -; RV64-NEXT: fld fa5, %lo(.LCPI41_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -1390,10 +1412,11 @@ define @rint_nxv4f64_to_si16( %x) { ; ; RV64-LABEL: rint_nxv4f64_to_si16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI42_0) -; RV64-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -1429,10 +1452,11 @@ define @rint_nxv4f64_to_ui16( %x) { ; ; RV64-LABEL: rint_nxv4f64_to_ui16: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI43_0) -; RV64-NEXT: fld fa5, %lo(.LCPI43_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll index 9173fa4622487..cc1282a9119da 100644 --- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll @@ -1666,20 +1666,20 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, < ; CHECK-RV32-NEXT: .LBB61_32: # %else114 ; CHECK-RV32-NEXT: slli a2, a3, 1 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-RV32-NEXT: vsrl.vx v16, v0, a1 +; CHECK-RV32-NEXT: vsrl.vx v24, v0, a1 ; CHECK-RV32-NEXT: bgez a2, .LBB61_34 ; CHECK-RV32-NEXT: # %bb.33: # %cond.load117 ; CHECK-RV32-NEXT: lbu a2, 0(a0) -; CHECK-RV32-NEXT: vmv8r.v v24, v8 +; CHECK-RV32-NEXT: vmv8r.v v16, v8 ; CHECK-RV32-NEXT: vmv.s.x v9, a2 ; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma ; CHECK-RV32-NEXT: vslideup.vi v8, v9, 30 ; CHECK-RV32-NEXT: addi a0, a0, 1 -; CHECK-RV32-NEXT: vmv1r.v v24, v8 -; CHECK-RV32-NEXT: vmv8r.v v8, v24 +; CHECK-RV32-NEXT: vmv1r.v v16, v8 +; CHECK-RV32-NEXT: vmv8r.v v8, v16 ; CHECK-RV32-NEXT: .LBB61_34: # %else118 ; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-RV32-NEXT: vmv.x.s a2, v16 +; CHECK-RV32-NEXT: vmv.x.s a2, v24 ; CHECK-RV32-NEXT: bgez a3, .LBB61_35 ; CHECK-RV32-NEXT: j .LBB61_572 ; CHECK-RV32-NEXT: .LBB61_35: # %else122 diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll index 1626b362fed15..316a84f98be2b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define @ceil_nxv1f16( %x) strictfp { ; CHECK-LABEL: ceil_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -32,10 +33,11 @@ define @ceil_nxv2f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -55,10 +57,11 @@ define @ceil_nxv4f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -78,10 +81,11 @@ define @ceil_nxv8f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -101,10 +105,11 @@ define @ceil_nxv16f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -124,10 +129,11 @@ define @ceil_nxv32f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v16, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma @@ -258,92 +264,168 @@ define @ceil_nxv16f32( %x) strictfp { declare @llvm.experimental.constrained.ceil.nxv16f32(, metadata) define @ceil_nxv1f64( %x) strictfp { -; CHECK-LABEL: ceil_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.ceil.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.ceil.nxv1f64(, metadata) define @ceil_nxv2f64( %x) strictfp { -; CHECK-LABEL: ceil_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.ceil.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.ceil.nxv2f64(, metadata) define @ceil_nxv4f64( %x) strictfp { -; CHECK-LABEL: ceil_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.ceil.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.ceil.nxv4f64(, metadata) define @ceil_nxv8f64( %x) strictfp { -; CHECK-LABEL: ceil_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.ceil.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll index 4aca2d694dfbb..56edec1cc7a68 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFHMIN define @ceil_nxv1bf16( %x) { ; CHECK-LABEL: ceil_nxv1bf16: @@ -167,10 +167,11 @@ define @ceil_nxv32bf16( %x) { define @ceil_nxv1f16( %x) { ; ZVFH-LABEL: ceil_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -206,10 +207,11 @@ declare @llvm.ceil.nxv1f16() define @ceil_nxv2f16( %x) { ; ZVFH-LABEL: ceil_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -245,10 +247,11 @@ declare @llvm.ceil.nxv2f16() define @ceil_nxv4f16( %x) { ; ZVFH-LABEL: ceil_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -284,10 +287,11 @@ declare @llvm.ceil.nxv4f16() define @ceil_nxv8f16( %x) { ; ZVFH-LABEL: ceil_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -323,10 +327,11 @@ declare @llvm.ceil.nxv8f16() define @ceil_nxv16f16( %x) { ; ZVFH-LABEL: ceil_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -362,10 +367,11 @@ declare @llvm.ceil.nxv16f16() define @ceil_nxv32f16( %x) { ; ZVFH-LABEL: ceil_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -513,80 +519,268 @@ define @ceil_nxv16f32( %x) { declare @llvm.ceil.nxv16f32() define @ceil_nxv1f64( %x) { -; CHECK-LABEL: ceil_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: ceil_nxv1f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZFH-NEXT: vfabs.v v9, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: ceil_nxv1f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZFH-NEXT: vfabs.v v9, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: ceil_nxv1f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 3 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: ceil_nxv1f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 3 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) ret %a } declare @llvm.ceil.nxv1f64() define @ceil_nxv2f64( %x) { -; CHECK-LABEL: ceil_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: ceil_nxv2f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZFH-NEXT: vfabs.v v10, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: ceil_nxv2f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZFH-NEXT: vfabs.v v10, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: ceil_nxv2f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 3 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: ceil_nxv2f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 3 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.ceil.nxv2f64( %x) ret %a } declare @llvm.ceil.nxv2f64() define @ceil_nxv4f64( %x) { -; CHECK-LABEL: ceil_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: ceil_nxv4f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZFH-NEXT: vfabs.v v12, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: ceil_nxv4f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZFH-NEXT: vfabs.v v12, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: ceil_nxv4f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 3 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: ceil_nxv4f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 3 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) ret %a } declare @llvm.ceil.nxv4f64() define @ceil_nxv8f64( %x) { -; CHECK-LABEL: ceil_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: ceil_nxv8f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v16, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZFH-NEXT: fsrmi a0, 3 +; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: ceil_nxv8f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v16, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZFH-NEXT: fsrmi a0, 3 +; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: ceil_nxv8f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 3 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: ceil_nxv8f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 3 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.ceil.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll index d93f15ec44053..7045fc7c50847 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define @floor_nxv1f16( %x) strictfp { ; CHECK-LABEL: floor_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -32,10 +33,11 @@ define @floor_nxv2f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -55,10 +57,11 @@ define @floor_nxv4f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -78,10 +81,11 @@ define @floor_nxv8f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -101,10 +105,11 @@ define @floor_nxv16f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -124,10 +129,11 @@ define @floor_nxv32f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v16, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma @@ -258,92 +264,168 @@ define @floor_nxv16f32( %x) strictfp declare @llvm.experimental.constrained.floor.nxv16f32(, metadata) define @floor_nxv1f64( %x) strictfp { -; CHECK-LABEL: floor_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.floor.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.floor.nxv1f64(, metadata) define @floor_nxv2f64( %x) strictfp { -; CHECK-LABEL: floor_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.floor.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.floor.nxv2f64(, metadata) define @floor_nxv4f64( %x) strictfp { -; CHECK-LABEL: floor_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.floor.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.floor.nxv4f64(, metadata) define @floor_nxv8f64( %x) strictfp { -; CHECK-LABEL: floor_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.floor.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll index 010d7786c8891..9adbca55bcd01 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFHMIN define @floor_nxv1bf16( %x) { ; CHECK-LABEL: floor_nxv1bf16: @@ -173,10 +173,11 @@ declare @llvm.floor.nxv32bf16() define @floor_nxv1f16( %x) { ; ZVFH-LABEL: floor_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -212,10 +213,11 @@ declare @llvm.floor.nxv1f16() define @floor_nxv2f16( %x) { ; ZVFH-LABEL: floor_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -251,10 +253,11 @@ declare @llvm.floor.nxv2f16() define @floor_nxv4f16( %x) { ; ZVFH-LABEL: floor_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -290,10 +293,11 @@ declare @llvm.floor.nxv4f16() define @floor_nxv8f16( %x) { ; ZVFH-LABEL: floor_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -329,10 +333,11 @@ declare @llvm.floor.nxv8f16() define @floor_nxv16f16( %x) { ; ZVFH-LABEL: floor_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -368,10 +373,11 @@ declare @llvm.floor.nxv16f16() define @floor_nxv32f16( %x) { ; ZVFH-LABEL: floor_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -519,80 +525,268 @@ define @floor_nxv16f32( %x) { declare @llvm.floor.nxv16f32() define @floor_nxv1f64( %x) { -; CHECK-LABEL: floor_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: floor_nxv1f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZFH-NEXT: vfabs.v v9, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZFH-NEXT: fsrmi a0, 2 +; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: floor_nxv1f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZFH-NEXT: vfabs.v v9, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZFH-NEXT: fsrmi a0, 2 +; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: floor_nxv1f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 2 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: floor_nxv1f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 2 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.floor.nxv1f64( %x) ret %a } declare @llvm.floor.nxv1f64() define @floor_nxv2f64( %x) { -; CHECK-LABEL: floor_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: floor_nxv2f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZFH-NEXT: vfabs.v v10, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZFH-NEXT: fsrmi a0, 2 +; RV32ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: floor_nxv2f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZFH-NEXT: vfabs.v v10, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZFH-NEXT: fsrmi a0, 2 +; RV64ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: floor_nxv2f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 2 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: floor_nxv2f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 2 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.floor.nxv2f64( %x) ret %a } declare @llvm.floor.nxv2f64() define @floor_nxv4f64( %x) { -; CHECK-LABEL: floor_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: floor_nxv4f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZFH-NEXT: vfabs.v v12, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZFH-NEXT: fsrmi a0, 2 +; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: floor_nxv4f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZFH-NEXT: vfabs.v v12, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZFH-NEXT: fsrmi a0, 2 +; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: floor_nxv4f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 2 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: floor_nxv4f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 2 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.floor.nxv4f64( %x) ret %a } declare @llvm.floor.nxv4f64() define @floor_nxv8f64( %x) { -; CHECK-LABEL: floor_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZFH-LABEL: floor_nxv8f64: +; RV32ZFH: # %bb.0: +; RV32ZFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZFH-NEXT: vfabs.v v16, v8 +; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZFH-NEXT: fsrmi a0, 2 +; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFH-NEXT: fsrm a0 +; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFH-NEXT: ret +; +; RV64ZFH-LABEL: floor_nxv8f64: +; RV64ZFH: # %bb.0: +; RV64ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZFH-NEXT: vfabs.v v16, v8 +; RV64ZFH-NEXT: li a0, 1075 +; RV64ZFH-NEXT: slli a0, a0, 52 +; RV64ZFH-NEXT: fmv.d.x fa5, a0 +; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZFH-NEXT: fsrmi a0, 2 +; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFH-NEXT: fsrm a0 +; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFH-NEXT: ret +; +; RV32ZFHMIN-LABEL: floor_nxv8f64: +; RV32ZFHMIN: # %bb.0: +; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZFHMIN-NEXT: fsrmi a0, 2 +; RV32ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZFHMIN-NEXT: fsrm a0 +; RV32ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZFHMIN-NEXT: ret +; +; RV64ZFHMIN-LABEL: floor_nxv8f64: +; RV64ZFHMIN: # %bb.0: +; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZFHMIN-NEXT: li a0, 1075 +; RV64ZFHMIN-NEXT: slli a0, a0, 52 +; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZFHMIN-NEXT: fsrmi a0, 2 +; RV64ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZFHMIN-NEXT: fsrm a0 +; RV64ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZFHMIN-NEXT: ret %a = call @llvm.floor.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll index c6ff39ad10d6b..4b42c517379ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare <2 x half> @llvm.vp.ceil.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -59,10 +60,11 @@ define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.ceil.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -145,10 +148,11 @@ define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.ceil.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -231,10 +236,11 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -273,9 +279,10 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 3 @@ -319,10 +326,11 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_ceil_v16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 3 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -529,41 +537,141 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) declare <2 x double> @llvm.vp.ceil.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -571,43 +679,149 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -615,43 +829,149 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -659,43 +979,149 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v15f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v15f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v15f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v15f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v15f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v15f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -703,43 +1129,149 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -747,91 +1279,341 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev declare <32 x double> @llvm.vp.ceil.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a1, 3 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v32f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v6, v0 +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB26_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFH-NEXT: addi a1, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a1 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 3 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 3 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v32f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v6, v0 +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB26_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: addi a1, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a1 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: and a0, a0, a1 +; RV64ZVFH-NEXT: fsrmi a1, 3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 3 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v32f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB26_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFHMIN-NEXT: addi a1, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 3 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 3 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v32f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB26_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: addi a1, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: and a0, a0, a1 +; RV64ZVFHMIN-NEXT: fsrmi a1, 3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_ceil_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: fsrmi a2, 3 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsrmi a1, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_ceil_v32f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB27_2: +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFH-NEXT: addi a2, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a2 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a2 +; RV32ZVFH-NEXT: fsrmi a2, 3 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 3 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_ceil_v32f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB27_2: +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: addi a2, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a2 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: and a0, a0, a2 +; RV64ZVFH-NEXT: fsrmi a2, 3 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsrmi a1, 3 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB27_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFHMIN-NEXT: addi a2, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a2 +; RV32ZVFHMIN-NEXT: fsrmi a2, 3 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 3 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB27_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: addi a2, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: and a0, a0, a2 +; RV64ZVFHMIN-NEXT: fsrmi a2, 3 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a1, 3 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll index 2250ab5bd0bbe..b1af4e685c58f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll @@ -1945,49 +1945,49 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB34_2: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV32-NEXT: li a1, 32 ; RV32-NEXT: lui a3, 349525 -; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a3, a3, 1365 -; RV32-NEXT: vsrl.vi v24, v8, 2, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 8, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 16, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a3 +; RV32-NEXT: vmv.v.x v24, a3 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 5 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vnot.v v8, v8, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t -; RV32-NEXT: vand.vv v24, v24, v16, v0.t -; RV32-NEXT: vsub.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: vmv.v.x v16, a3 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; RV32-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vand.vv v16, v8, v24, v0.t +; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t -; RV32-NEXT: vand.vv v8, v8, v24, v0.t -; RV32-NEXT: vadd.vv v8, v16, v8, v0.t -; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t -; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v24, v0.t ; RV32-NEXT: lui a3, 61681 ; RV32-NEXT: addi a3, a3, -241 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma @@ -2025,12 +2025,12 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl ; RV32-NEXT: mul a3, a3, a4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vl8r.v v8, (a3) # vscale x 64-byte Folded Reload +; RV32-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t -; RV32-NEXT: vor.vv v16, v8, v16, v0.t -; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t +; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t ; RV32-NEXT: vor.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t @@ -2247,14 +2247,14 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-NEXT: vsrl.vx v8, v16, a2 ; RV32-NEXT: vor.vv v24, v16, v8 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a3 -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill +; RV32-NEXT: vmv.v.x v8, a3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vand.vv v8, v0, v16 +; RV32-NEXT: vand.vv v16, v0, v8 ; RV32-NEXT: vsrl.vi v0, v0, 2 -; RV32-NEXT: vand.vv v0, v0, v16 -; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v0, v0, v8 +; RV32-NEXT: vadd.vv v16, v16, v0 +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vnot.v v24, v24 ; RV32-NEXT: vsrl.vi v0, v24, 1 @@ -2265,39 +2265,39 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vand.vv v0, v0, v16 ; RV32-NEXT: vsub.vv v24, v24, v0 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v0, v8, 4 -; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v0, v16, 4 +; RV32-NEXT: vadd.vv v16, v16, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vand.vv v0, v24, v16 +; RV32-NEXT: vand.vv v0, v24, v8 ; RV32-NEXT: vsrl.vi v24, v24, 2 -; RV32-NEXT: vand.vv v16, v24, v16 +; RV32-NEXT: vand.vv v8, v24, v8 ; RV32-NEXT: lui a2, 61681 ; RV32-NEXT: lui a3, 4112 ; RV32-NEXT: addi a2, a2, -241 ; RV32-NEXT: addi a3, a3, 257 -; RV32-NEXT: vadd.vv v16, v0, v16 -; RV32-NEXT: vsrl.vi v24, v16, 4 -; RV32-NEXT: vadd.vv v16, v16, v24 +; RV32-NEXT: vadd.vv v8, v0, v8 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a2 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vand.vv v8, v8, v24 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v16, v16, v24 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v24, v8, v24 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vsrl.vx v8, v8, a2 +; RV32-NEXT: vsrl.vx v8, v16, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vx v16, v16, a2 +; RV32-NEXT: vsrl.vx v16, v24, a2 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 @@ -4320,49 +4320,49 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB70_2: ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV32-NEXT: li a1, 32 ; RV32-NEXT: lui a3, 349525 -; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a3, a3, 1365 -; RV32-NEXT: vsrl.vi v24, v8, 2, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 8, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 16, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t -; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t -; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a3 +; RV32-NEXT: vmv.v.x v24, a3 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 5 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vnot.v v8, v8, v0.t -; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t -; RV32-NEXT: vand.vv v24, v24, v16, v0.t -; RV32-NEXT: vsub.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: vmv.v.x v16, a3 ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; RV32-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; RV32-NEXT: vand.vv v16, v8, v24, v0.t +; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t -; RV32-NEXT: vand.vv v8, v8, v24, v0.t -; RV32-NEXT: vadd.vv v8, v16, v8, v0.t -; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t -; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v24, v0.t ; RV32-NEXT: lui a3, 61681 ; RV32-NEXT: addi a3, a3, -241 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma @@ -4400,12 +4400,12 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z ; RV32-NEXT: mul a3, a3, a4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 -; RV32-NEXT: vl8r.v v8, (a3) # vscale x 64-byte Folded Reload +; RV32-NEXT: vl8r.v v16, (a3) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t -; RV32-NEXT: vor.vv v16, v8, v16, v0.t -; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t +; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t ; RV32-NEXT: vor.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t @@ -4622,14 +4622,14 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex ; RV32-NEXT: vsrl.vx v8, v16, a2 ; RV32-NEXT: vor.vv v24, v16, v8 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a3 -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill +; RV32-NEXT: vmv.v.x v8, a3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vand.vv v8, v0, v16 +; RV32-NEXT: vand.vv v16, v0, v8 ; RV32-NEXT: vsrl.vi v0, v0, 2 -; RV32-NEXT: vand.vv v0, v0, v16 -; RV32-NEXT: vadd.vv v8, v8, v0 +; RV32-NEXT: vand.vv v0, v0, v8 +; RV32-NEXT: vadd.vv v16, v16, v0 +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vnot.v v24, v24 ; RV32-NEXT: vsrl.vi v0, v24, 1 @@ -4640,39 +4640,39 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex ; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vand.vv v0, v0, v16 ; RV32-NEXT: vsub.vv v24, v24, v0 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v0, v8, 4 -; RV32-NEXT: vadd.vv v8, v8, v0 ; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v0, v16, 4 +; RV32-NEXT: vadd.vv v16, v16, v0 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vand.vv v0, v24, v16 +; RV32-NEXT: vand.vv v0, v24, v8 ; RV32-NEXT: vsrl.vi v24, v24, 2 -; RV32-NEXT: vand.vv v16, v24, v16 +; RV32-NEXT: vand.vv v8, v24, v8 ; RV32-NEXT: lui a2, 61681 ; RV32-NEXT: lui a3, 4112 ; RV32-NEXT: addi a2, a2, -241 ; RV32-NEXT: addi a3, a3, 257 -; RV32-NEXT: vadd.vv v16, v0, v16 -; RV32-NEXT: vsrl.vi v24, v16, 4 -; RV32-NEXT: vadd.vv v16, v16, v24 +; RV32-NEXT: vadd.vv v8, v0, v8 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a2 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vand.vv v8, v8, v24 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vmul.vv v8, v8, v24 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v16, v16, v24 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v24, v8, v24 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vsrl.vx v8, v8, a2 +; RV32-NEXT: vsrl.vx v8, v16, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vx v16, v16, a2 +; RV32-NEXT: vsrl.vx v16, v24, a2 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll index 94fecbdfde18e..a993ed909d940 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -1449,27 +1449,24 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev ; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB34_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV32-NEXT: lui a2, 349525 ; RV32-NEXT: addi a2, a2, 1365 ; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma -; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vmv.v.x v24, a2 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 5 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill +; RV32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; RV32-NEXT: vand.vv v24, v24, v16, v0.t -; RV32-NEXT: vsub.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a2 -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: slli a2, a2, 4 -; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vs8r.v v24, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v8, v24, v0.t @@ -1494,14 +1491,17 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev ; RV32-NEXT: addi a2, a2, 257 ; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v16, a2 -; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 ; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsrl.vx v8, v8, a1, v0.t ; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 16 ; RV32-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill @@ -1515,25 +1515,22 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev ; RV32-NEXT: mul a2, a2, a3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload +; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vand.vv v8, v24, v8, v0.t +; RV32-NEXT: vsub.vv v8, v16, v8, v0.t +; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload -; RV32-NEXT: vand.vv v16, v16, v24, v0.t -; RV32-NEXT: vsub.vv v16, v8, v16, v0.t -; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 4 -; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 16 -; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload -; RV32-NEXT: vand.vv v8, v16, v24, v0.t -; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t -; RV32-NEXT: vand.vv v16, v16, v24, v0.t -; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vand.vv v16, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v24, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: csrr a0, vlenb @@ -1543,12 +1540,15 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16, v0.t -; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t ; RV32-NEXT: csrr a0, vlenb -; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll index bdce00b10e5a7..1922006b8a581 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll @@ -1810,10 +1810,10 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-NEXT: vadd.vi v0, v16, -1 ; RV32-NEXT: vnot.v v16, v16 ; RV32-NEXT: vand.vv v0, v16, v0 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v0, (a3) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v16, a2 -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v24, v16 ; RV32-NEXT: vsrl.vi v24, v24, 2 @@ -1825,14 +1825,14 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload -; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: vl8r.v v0, (a2) # vscale x 64-byte Folded Reload +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl8r.v v0, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v24, v0, v24 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v0, v24, v16 ; RV32-NEXT: vsrl.vi v24, v24, 2 @@ -3715,10 +3715,10 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex ; RV32-NEXT: vadd.vi v0, v16, -1 ; RV32-NEXT: vnot.v v16, v16 ; RV32-NEXT: vand.vv v0, v16, v0 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v0, (a3) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli a3, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v16, a2 -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v24, v16 ; RV32-NEXT: vsrl.vi v24, v24, 2 @@ -3730,14 +3730,14 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 16 -; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload -; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: vl8r.v v0, (a2) # vscale x 64-byte Folded Reload +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vl8r.v v0, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsub.vv v24, v0, v24 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v0, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v0 -; RV32-NEXT: addi a2, sp, 16 -; RV32-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v0, v24, v16 ; RV32-NEXT: vsrl.vi v24, v24, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll index ab2d00b9b9137..71b0624d91f22 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define <1 x half> @ceil_v1f16(<1 x half> %x) strictfp { ; CHECK-LABEL: ceil_v1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -32,10 +33,11 @@ define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -55,10 +57,11 @@ define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -78,10 +81,11 @@ define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -101,10 +105,11 @@ define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -123,11 +128,12 @@ define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: ceil_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) +; CHECK-NEXT: li a1, 25 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: fmv.h.x fa5, a1 ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 3 @@ -259,92 +265,168 @@ define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp { declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata) define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp { -; CHECK-LABEL: ceil_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata) define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp { -; CHECK-LABEL: ceil_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp { -; CHECK-LABEL: ceil_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata) define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp { -; CHECK-LABEL: ceil_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 3 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: ceil_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 3 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: ceil_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 3 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll index c6ce7c1bbe8b4..9eca66eea865c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define <1 x half> @floor_v1f16(<1 x half> %x) strictfp { ; CHECK-LABEL: floor_v1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -32,10 +33,11 @@ define <2 x half> @floor_v2f16(<2 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -55,10 +57,11 @@ define <4 x half> @floor_v4f16(<4 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -78,10 +81,11 @@ define <8 x half> @floor_v8f16(<8 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -101,10 +105,11 @@ define <16 x half> @floor_v16f16(<16 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -123,11 +128,12 @@ define <32 x half> @floor_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: floor_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) +; CHECK-NEXT: li a1, 25 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: fmv.h.x fa5, a1 ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 2 @@ -259,92 +265,168 @@ define <16 x float> @floor_v16f32(<16 x float> %x) strictfp { declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata) define <1 x double> @floor_v1f64(<1 x double> %x) strictfp { -; CHECK-LABEL: floor_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata) define <2 x double> @floor_v2f64(<2 x double> %x) strictfp { -; CHECK-LABEL: floor_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) define <4 x double> @floor_v4f64(<4 x double> %x) strictfp { -; CHECK-LABEL: floor_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata) define <8 x double> @floor_v8f64(<8 x double> %x) strictfp { -; CHECK-LABEL: floor_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: floor_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 2 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: floor_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 2 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll index 6fc0165d7e77f..4494b97119403 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare <2 x half> @llvm.vp.floor.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -59,10 +60,11 @@ define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.floor.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -145,10 +148,11 @@ define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.floor.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -231,10 +236,11 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -273,9 +279,10 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext % ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -319,10 +326,11 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext % define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_v16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -529,41 +537,141 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl declare <2 x double> @llvm.vp.floor.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -571,43 +679,149 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -615,43 +829,149 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -659,43 +979,149 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v15f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v15f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v15f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v15f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v15f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v15f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v15f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v15f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -703,43 +1129,149 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -747,91 +1279,341 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e declare <32 x double> @llvm.vp.floor.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a1, 2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v32f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v6, v0 +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB26_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFH-NEXT: addi a1, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a1 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v32f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v6, v0 +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB26_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: addi a1, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a1 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: and a0, a0, a1 +; RV64ZVFH-NEXT: fsrmi a1, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v32f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB26_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFHMIN-NEXT: addi a1, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v32f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB26_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: addi a1, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: and a0, a0, a1 +; RV64ZVFHMIN-NEXT: fsrmi a1, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: fsrmi a2, 2 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsrmi a1, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_v32f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB27_2: +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFH-NEXT: addi a2, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a2 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a2 +; RV32ZVFH-NEXT: fsrmi a2, 2 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 2 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_v32f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB27_2: +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: addi a2, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a2 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: and a0, a0, a2 +; RV64ZVFH-NEXT: fsrmi a2, 2 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsrmi a1, 2 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_v32f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB27_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFHMIN-NEXT: addi a2, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a2 +; RV32ZVFHMIN-NEXT: fsrmi a2, 2 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 2 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_v32f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB27_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: addi a2, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: and a0, a0, a2 +; RV64ZVFHMIN-NEXT: fsrmi a2, 2 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a1, 2 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll index 3a7ded1537ef6..dd1b99bee6d55 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s declare <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half>, metadata, metadata) @@ -11,10 +11,11 @@ define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -35,10 +36,11 @@ define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -59,10 +61,11 @@ define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -83,10 +86,11 @@ define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -106,11 +110,12 @@ define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp { ; CHECK-LABEL: nearbyint_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) +; CHECK-NEXT: li a1, 25 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1) +; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: fmv.h.x fa5, a1 ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: frflags a0 @@ -224,23 +229,42 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp { declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp { -; CHECK-LABEL: nearbyint_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI9_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI9_0) +; RV32-NEXT: fld fa5, %lo(.LCPI9_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %r } @@ -248,23 +272,42 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp { declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata) define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp { -; CHECK-LABEL: nearbyint_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI10_0) +; RV32-NEXT: fld fa5, %lo(.LCPI10_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %r } @@ -272,23 +315,42 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp { declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata) define <8 x double> @nearbyint_v8f64(<8 x double> %v) strictfp { -; CHECK-LABEL: nearbyint_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <8 x double> %r } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll index abb929eaaf6e6..e256ba9dd5997 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN define void @fcmp_oeq_vv_v8f16(ptr %x, ptr %y, ptr %z) { ; ZVFH-LABEL: fcmp_oeq_vv_v8f16: @@ -437,6 +437,1036 @@ define void @fcmp_ugt_vv_v64f16(ptr %x, ptr %y, ptr %z) { ; ZVFH-NEXT: vmnot.m v8, v24 ; ZVFH-NEXT: vsm.v v8, (a2) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV32ZVFHMIN-NEXT: sw ra, 508(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 504(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a3, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v16, (a1) +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: addi a1, sp, 256 +; RV32ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV32ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: lh a1, 320(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: lh a1, 318(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: lh a1, 316(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: lh a1, 314(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: lh a1, 312(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: lh a1, 310(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: lh a1, 308(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: lh a1, 306(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a1, 176(sp) +; RV32ZVFHMIN-NEXT: lh a4, 304(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: sb a1, 88(sp) +; RV32ZVFHMIN-NEXT: lh a4, 174(sp) +; RV32ZVFHMIN-NEXT: lh a5, 302(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 87(sp) +; RV32ZVFHMIN-NEXT: lh a4, 172(sp) +; RV32ZVFHMIN-NEXT: lh a5, 300(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 86(sp) +; RV32ZVFHMIN-NEXT: lh a4, 170(sp) +; RV32ZVFHMIN-NEXT: lh a5, 298(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 85(sp) +; RV32ZVFHMIN-NEXT: lh a4, 168(sp) +; RV32ZVFHMIN-NEXT: lh a5, 296(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 84(sp) +; RV32ZVFHMIN-NEXT: lh a4, 166(sp) +; RV32ZVFHMIN-NEXT: lh a5, 294(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 83(sp) +; RV32ZVFHMIN-NEXT: lh a4, 164(sp) +; RV32ZVFHMIN-NEXT: lh a5, 292(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 82(sp) +; RV32ZVFHMIN-NEXT: lh a4, 162(sp) +; RV32ZVFHMIN-NEXT: lh a5, 290(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v16, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: xori a0, a4, 1 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: lh a1, 288(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a4, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: lh a1, 354(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v27, v8, 1 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a4, 224(sp) +; RV32ZVFHMIN-NEXT: lh a5, 352(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 112(sp) +; RV32ZVFHMIN-NEXT: lh a4, 222(sp) +; RV32ZVFHMIN-NEXT: lh a6, 350(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 111(sp) +; RV32ZVFHMIN-NEXT: lh a4, 220(sp) +; RV32ZVFHMIN-NEXT: lh a6, 348(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 110(sp) +; RV32ZVFHMIN-NEXT: lh t0, 218(sp) +; RV32ZVFHMIN-NEXT: lh t1, 346(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: sb t0, 109(sp) +; RV32ZVFHMIN-NEXT: lh t0, 216(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v20 +; RV32ZVFHMIN-NEXT: lh t2, 344(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v21 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV32ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: xori a1, t2, 1 +; RV32ZVFHMIN-NEXT: sb a1, 108(sp) +; RV32ZVFHMIN-NEXT: lh a1, 214(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV32ZVFHMIN-NEXT: lh t3, 342(sp) +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: sb a1, 107(sp) +; RV32ZVFHMIN-NEXT: lh a5, 212(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh a7, 340(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: sb a5, 106(sp) +; RV32ZVFHMIN-NEXT: lh a5, 210(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: lh a6, 338(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: sb a5, 105(sp) +; RV32ZVFHMIN-NEXT: lh a6, 208(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t0, 336(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: sb t0, 104(sp) +; RV32ZVFHMIN-NEXT: lh t0, 206(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 334(sp) +; RV32ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v26 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: xori a7, t1, 1 +; RV32ZVFHMIN-NEXT: sb a7, 103(sp) +; RV32ZVFHMIN-NEXT: lh a7, 204(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t1, 332(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v27 +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: xori a7, t1, 1 +; RV32ZVFHMIN-NEXT: sb a7, 102(sp) +; RV32ZVFHMIN-NEXT: lh a7, 202(sp) +; RV32ZVFHMIN-NEXT: lh t0, 330(sp) +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 101(sp) +; RV32ZVFHMIN-NEXT: lh a7, 200(sp) +; RV32ZVFHMIN-NEXT: lh t0, 328(sp) +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 100(sp) +; RV32ZVFHMIN-NEXT: lh a7, 198(sp) +; RV32ZVFHMIN-NEXT: lh t0, 326(sp) +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori t2, t2, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 99(sp) +; RV32ZVFHMIN-NEXT: lh a7, 196(sp) +; RV32ZVFHMIN-NEXT: lh t0, 324(sp) +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: xori t1, t1, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 98(sp) +; RV32ZVFHMIN-NEXT: lh a7, 194(sp) +; RV32ZVFHMIN-NEXT: lh t0, 322(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb a6, 66(sp) +; RV32ZVFHMIN-NEXT: sb t2, 67(sp) +; RV32ZVFHMIN-NEXT: sb a5, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: sb a4, 69(sp) +; RV32ZVFHMIN-NEXT: sb a1, 70(sp) +; RV32ZVFHMIN-NEXT: sb a0, 71(sp) +; RV32ZVFHMIN-NEXT: sb a5, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: lh a1, 382(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV32ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: lh a1, 380(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV32ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: lh a1, 378(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV32ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: lh a1, 376(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: lh a1, 374(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV32ZVFHMIN-NEXT: lh a1, 372(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a1, 242(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: lh a4, 370(sp) +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: sb a1, 121(sp) +; RV32ZVFHMIN-NEXT: lh a4, 240(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a6, 368(sp) +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 120(sp) +; RV32ZVFHMIN-NEXT: lh a6, 238(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: lh t0, 366(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: sb t0, 119(sp) +; RV32ZVFHMIN-NEXT: lh t0, 236(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 364(sp) +; RV32ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a5, t1, 1 +; RV32ZVFHMIN-NEXT: sb a5, 118(sp) +; RV32ZVFHMIN-NEXT: lh a5, 234(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a7, 362(sp) +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a6, a7, 1 +; RV32ZVFHMIN-NEXT: sb a6, 117(sp) +; RV32ZVFHMIN-NEXT: lh a6, 232(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: lh a7, 360(sp) +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a5, a7, 1 +; RV32ZVFHMIN-NEXT: sb a5, 116(sp) +; RV32ZVFHMIN-NEXT: lh a5, 230(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: lh a6, 358(sp) +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, t2, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: sb a6, 115(sp) +; RV32ZVFHMIN-NEXT: lh a6, 228(sp) +; RV32ZVFHMIN-NEXT: lh t2, 356(sp) +; RV32ZVFHMIN-NEXT: sb a5, 76(sp) +; RV32ZVFHMIN-NEXT: sb a4, 77(sp) +; RV32ZVFHMIN-NEXT: sb a1, 78(sp) +; RV32ZVFHMIN-NEXT: sb a0, 79(sp) +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a1, t1, 1 +; RV32ZVFHMIN-NEXT: xori a4, t0, 1 +; RV32ZVFHMIN-NEXT: xori a5, a7, 1 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 72(sp) +; RV32ZVFHMIN-NEXT: sb a5, 73(sp) +; RV32ZVFHMIN-NEXT: sb a4, 74(sp) +; RV32ZVFHMIN-NEXT: sb a1, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV32ZVFHMIN-NEXT: addi sp, s0, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV32ZVFHMIN-NEXT: lw ra, 508(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 504(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV64ZVFHMIN-NEXT: sd ra, 504(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 496(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a3, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v16, (a1) +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: addi a1, sp, 256 +; RV64ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV64ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: lh a1, 320(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: lh a1, 318(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: lh a1, 316(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: lh a1, 314(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: lh a1, 312(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: lh a1, 310(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: lh a1, 308(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: lh a1, 306(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a1, 176(sp) +; RV64ZVFHMIN-NEXT: lh a4, 304(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: sb a1, 88(sp) +; RV64ZVFHMIN-NEXT: lh a4, 174(sp) +; RV64ZVFHMIN-NEXT: lh a5, 302(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 87(sp) +; RV64ZVFHMIN-NEXT: lh a4, 172(sp) +; RV64ZVFHMIN-NEXT: lh a5, 300(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 86(sp) +; RV64ZVFHMIN-NEXT: lh a4, 170(sp) +; RV64ZVFHMIN-NEXT: lh a5, 298(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 85(sp) +; RV64ZVFHMIN-NEXT: lh a4, 168(sp) +; RV64ZVFHMIN-NEXT: lh a5, 296(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 84(sp) +; RV64ZVFHMIN-NEXT: lh a4, 166(sp) +; RV64ZVFHMIN-NEXT: lh a5, 294(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 83(sp) +; RV64ZVFHMIN-NEXT: lh a4, 164(sp) +; RV64ZVFHMIN-NEXT: lh a5, 292(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 82(sp) +; RV64ZVFHMIN-NEXT: lh a4, 162(sp) +; RV64ZVFHMIN-NEXT: lh a5, 290(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v16, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: xori a0, a4, 1 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: lh a1, 288(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a4, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: lh a1, 354(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v27, v8, 1 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a4, 224(sp) +; RV64ZVFHMIN-NEXT: lh a5, 352(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 112(sp) +; RV64ZVFHMIN-NEXT: lh a4, 222(sp) +; RV64ZVFHMIN-NEXT: lh a6, 350(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 111(sp) +; RV64ZVFHMIN-NEXT: lh a4, 220(sp) +; RV64ZVFHMIN-NEXT: lh a6, 348(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 110(sp) +; RV64ZVFHMIN-NEXT: lh t0, 218(sp) +; RV64ZVFHMIN-NEXT: lh t1, 346(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: sb t0, 109(sp) +; RV64ZVFHMIN-NEXT: lh t0, 216(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v20 +; RV64ZVFHMIN-NEXT: lh t2, 344(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v21 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV64ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: xori a1, t2, 1 +; RV64ZVFHMIN-NEXT: sb a1, 108(sp) +; RV64ZVFHMIN-NEXT: lh a1, 214(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV64ZVFHMIN-NEXT: lh t3, 342(sp) +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: sb a1, 107(sp) +; RV64ZVFHMIN-NEXT: lh a5, 212(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh a7, 340(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: sb a5, 106(sp) +; RV64ZVFHMIN-NEXT: lh a5, 210(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: lh a6, 338(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: sb a5, 105(sp) +; RV64ZVFHMIN-NEXT: lh a6, 208(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t0, 336(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: sb t0, 104(sp) +; RV64ZVFHMIN-NEXT: lh t0, 206(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 334(sp) +; RV64ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v26 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: xori a7, t1, 1 +; RV64ZVFHMIN-NEXT: sb a7, 103(sp) +; RV64ZVFHMIN-NEXT: lh a7, 204(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t1, 332(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v27 +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: xori a7, t1, 1 +; RV64ZVFHMIN-NEXT: sb a7, 102(sp) +; RV64ZVFHMIN-NEXT: lh a7, 202(sp) +; RV64ZVFHMIN-NEXT: lh t0, 330(sp) +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 101(sp) +; RV64ZVFHMIN-NEXT: lh a7, 200(sp) +; RV64ZVFHMIN-NEXT: lh t0, 328(sp) +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 100(sp) +; RV64ZVFHMIN-NEXT: lh a7, 198(sp) +; RV64ZVFHMIN-NEXT: lh t0, 326(sp) +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori t2, t2, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 99(sp) +; RV64ZVFHMIN-NEXT: lh a7, 196(sp) +; RV64ZVFHMIN-NEXT: lh t0, 324(sp) +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: xori t1, t1, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 98(sp) +; RV64ZVFHMIN-NEXT: lh a7, 194(sp) +; RV64ZVFHMIN-NEXT: lh t0, 322(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb a6, 66(sp) +; RV64ZVFHMIN-NEXT: sb t2, 67(sp) +; RV64ZVFHMIN-NEXT: sb a5, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: sb a4, 69(sp) +; RV64ZVFHMIN-NEXT: sb a1, 70(sp) +; RV64ZVFHMIN-NEXT: sb a0, 71(sp) +; RV64ZVFHMIN-NEXT: sb a5, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: lh a1, 382(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV64ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: lh a1, 380(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV64ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: lh a1, 378(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV64ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: lh a1, 376(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: lh a1, 374(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV64ZVFHMIN-NEXT: lh a1, 372(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a1, 242(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: lh a4, 370(sp) +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: sb a1, 121(sp) +; RV64ZVFHMIN-NEXT: lh a4, 240(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a6, 368(sp) +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 120(sp) +; RV64ZVFHMIN-NEXT: lh a6, 238(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: lh t0, 366(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: sb t0, 119(sp) +; RV64ZVFHMIN-NEXT: lh t0, 236(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 364(sp) +; RV64ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a5, t1, 1 +; RV64ZVFHMIN-NEXT: sb a5, 118(sp) +; RV64ZVFHMIN-NEXT: lh a5, 234(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a7, 362(sp) +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a6, a7, 1 +; RV64ZVFHMIN-NEXT: sb a6, 117(sp) +; RV64ZVFHMIN-NEXT: lh a6, 232(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: lh a7, 360(sp) +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a5, a7, 1 +; RV64ZVFHMIN-NEXT: sb a5, 116(sp) +; RV64ZVFHMIN-NEXT: lh a5, 230(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: lh a6, 358(sp) +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, t2, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: sb a6, 115(sp) +; RV64ZVFHMIN-NEXT: lh a6, 228(sp) +; RV64ZVFHMIN-NEXT: lh t2, 356(sp) +; RV64ZVFHMIN-NEXT: sb a5, 76(sp) +; RV64ZVFHMIN-NEXT: sb a4, 77(sp) +; RV64ZVFHMIN-NEXT: sb a1, 78(sp) +; RV64ZVFHMIN-NEXT: sb a0, 79(sp) +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a1, t1, 1 +; RV64ZVFHMIN-NEXT: xori a4, t0, 1 +; RV64ZVFHMIN-NEXT: xori a5, a7, 1 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 72(sp) +; RV64ZVFHMIN-NEXT: sb a5, 73(sp) +; RV64ZVFHMIN-NEXT: sb a4, 74(sp) +; RV64ZVFHMIN-NEXT: sb a1, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV64ZVFHMIN-NEXT: addi sp, s0, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV64ZVFHMIN-NEXT: ld ra, 504(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 496(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = load <64 x half>, ptr %y %c = fcmp ugt <64 x half> %a, %b @@ -454,6 +1484,908 @@ define void @fcmp_ugt_vv_v64f16_nonans(ptr %x, ptr %y, ptr %z) { ; ZVFH-NEXT: vmflt.vv v24, v16, v8 ; ZVFH-NEXT: vsm.v v24, (a2) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16_nonans: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV32ZVFHMIN-NEXT: sw ra, 508(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 504(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a3, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v16, (a0) +; RV32ZVFHMIN-NEXT: vle16.v v8, (a1) +; RV32ZVFHMIN-NEXT: addi a0, sp, 256 +; RV32ZVFHMIN-NEXT: addi a1, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV32ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV32ZVFHMIN-NEXT: lh a0, 320(sp) +; RV32ZVFHMIN-NEXT: lh a1, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 318(sp) +; RV32ZVFHMIN-NEXT: lh a1, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 316(sp) +; RV32ZVFHMIN-NEXT: lh a1, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 314(sp) +; RV32ZVFHMIN-NEXT: lh a1, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 312(sp) +; RV32ZVFHMIN-NEXT: lh a1, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 310(sp) +; RV32ZVFHMIN-NEXT: lh a1, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 308(sp) +; RV32ZVFHMIN-NEXT: lh a1, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 306(sp) +; RV32ZVFHMIN-NEXT: lh a1, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 304(sp) +; RV32ZVFHMIN-NEXT: lh a1, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 302(sp) +; RV32ZVFHMIN-NEXT: lh a1, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 300(sp) +; RV32ZVFHMIN-NEXT: lh a1, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a1, 298(sp) +; RV32ZVFHMIN-NEXT: lh a4, 170(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a1, 85(sp) +; RV32ZVFHMIN-NEXT: lh a4, 296(sp) +; RV32ZVFHMIN-NEXT: lh a5, 168(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 84(sp) +; RV32ZVFHMIN-NEXT: lh a4, 294(sp) +; RV32ZVFHMIN-NEXT: lh a5, 166(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 83(sp) +; RV32ZVFHMIN-NEXT: lh a4, 292(sp) +; RV32ZVFHMIN-NEXT: lh a5, 164(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 82(sp) +; RV32ZVFHMIN-NEXT: lh a4, 290(sp) +; RV32ZVFHMIN-NEXT: lh a5, 162(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: sb a4, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 288(sp) +; RV32ZVFHMIN-NEXT: lh a4, 160(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a1, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 354(sp) +; RV32ZVFHMIN-NEXT: lh a1, 226(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 352(sp) +; RV32ZVFHMIN-NEXT: lh a1, 224(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 350(sp) +; RV32ZVFHMIN-NEXT: lh a1, 222(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v27, v16, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 348(sp) +; RV32ZVFHMIN-NEXT: lh a1, 220(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v8, 1 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a4, 346(sp) +; RV32ZVFHMIN-NEXT: lh a5, 218(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 109(sp) +; RV32ZVFHMIN-NEXT: lh a4, 344(sp) +; RV32ZVFHMIN-NEXT: lh a6, 216(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 108(sp) +; RV32ZVFHMIN-NEXT: lh a4, 342(sp) +; RV32ZVFHMIN-NEXT: lh a6, 214(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 107(sp) +; RV32ZVFHMIN-NEXT: lh t0, 340(sp) +; RV32ZVFHMIN-NEXT: lh t1, 212(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb t0, 106(sp) +; RV32ZVFHMIN-NEXT: lh t1, 338(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: lh t2, 210(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v21 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV32ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: sb t2, 105(sp) +; RV32ZVFHMIN-NEXT: lh a1, 336(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV32ZVFHMIN-NEXT: lh t3, 208(sp) +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: sb a1, 104(sp) +; RV32ZVFHMIN-NEXT: lh a5, 334(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh a7, 206(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a5, 103(sp) +; RV32ZVFHMIN-NEXT: lh a5, 332(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: lh a6, 204(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a5, 102(sp) +; RV32ZVFHMIN-NEXT: lh a6, 330(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t0, 202(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb t0, 101(sp) +; RV32ZVFHMIN-NEXT: lh t0, 328(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 200(sp) +; RV32ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v27 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: sb t1, 100(sp) +; RV32ZVFHMIN-NEXT: lh a7, 326(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t1, 198(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v26 +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb t1, 99(sp) +; RV32ZVFHMIN-NEXT: lh t0, 324(sp) +; RV32ZVFHMIN-NEXT: lh t1, 196(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb t0, 98(sp) +; RV32ZVFHMIN-NEXT: lh t0, 322(sp) +; RV32ZVFHMIN-NEXT: lh t1, 194(sp) +; RV32ZVFHMIN-NEXT: sb a7, 65(sp) +; RV32ZVFHMIN-NEXT: sb a6, 66(sp) +; RV32ZVFHMIN-NEXT: sb t2, 67(sp) +; RV32ZVFHMIN-NEXT: sb a5, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 69(sp) +; RV32ZVFHMIN-NEXT: sb a1, 70(sp) +; RV32ZVFHMIN-NEXT: sb a0, 71(sp) +; RV32ZVFHMIN-NEXT: sb a5, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 382(sp) +; RV32ZVFHMIN-NEXT: lh a1, 254(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV32ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 380(sp) +; RV32ZVFHMIN-NEXT: lh a1, 252(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV32ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 378(sp) +; RV32ZVFHMIN-NEXT: lh a1, 250(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV32ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 376(sp) +; RV32ZVFHMIN-NEXT: lh a1, 248(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 374(sp) +; RV32ZVFHMIN-NEXT: lh a1, 246(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 372(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV32ZVFHMIN-NEXT: lh a1, 244(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a1, 370(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: lh a4, 242(sp) +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a1, 121(sp) +; RV32ZVFHMIN-NEXT: lh a4, 368(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a6, 240(sp) +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a4, 120(sp) +; RV32ZVFHMIN-NEXT: lh a6, 366(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: lh t0, 238(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb t0, 119(sp) +; RV32ZVFHMIN-NEXT: lh t0, 364(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 236(sp) +; RV32ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: sb t1, 118(sp) +; RV32ZVFHMIN-NEXT: lh a5, 362(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a7, 234(sp) +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a7, 117(sp) +; RV32ZVFHMIN-NEXT: lh a6, 360(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: lh a7, 232(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: sb a7, 116(sp) +; RV32ZVFHMIN-NEXT: lh a5, 358(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: lh a6, 230(sp) +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: sb a6, 115(sp) +; RV32ZVFHMIN-NEXT: lh a5, 356(sp) +; RV32ZVFHMIN-NEXT: lh a6, 228(sp) +; RV32ZVFHMIN-NEXT: sb t2, 76(sp) +; RV32ZVFHMIN-NEXT: sb a4, 77(sp) +; RV32ZVFHMIN-NEXT: sb a1, 78(sp) +; RV32ZVFHMIN-NEXT: sb a0, 79(sp) +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 72(sp) +; RV32ZVFHMIN-NEXT: sb a7, 73(sp) +; RV32ZVFHMIN-NEXT: sb t0, 74(sp) +; RV32ZVFHMIN-NEXT: sb t1, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV32ZVFHMIN-NEXT: addi sp, s0, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV32ZVFHMIN-NEXT: lw ra, 508(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 504(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16_nonans: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV64ZVFHMIN-NEXT: sd ra, 504(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 496(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a3, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v16, (a0) +; RV64ZVFHMIN-NEXT: vle16.v v8, (a1) +; RV64ZVFHMIN-NEXT: addi a0, sp, 256 +; RV64ZVFHMIN-NEXT: addi a1, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV64ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV64ZVFHMIN-NEXT: lh a0, 320(sp) +; RV64ZVFHMIN-NEXT: lh a1, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 318(sp) +; RV64ZVFHMIN-NEXT: lh a1, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 316(sp) +; RV64ZVFHMIN-NEXT: lh a1, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 314(sp) +; RV64ZVFHMIN-NEXT: lh a1, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 312(sp) +; RV64ZVFHMIN-NEXT: lh a1, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 310(sp) +; RV64ZVFHMIN-NEXT: lh a1, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 308(sp) +; RV64ZVFHMIN-NEXT: lh a1, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 306(sp) +; RV64ZVFHMIN-NEXT: lh a1, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 304(sp) +; RV64ZVFHMIN-NEXT: lh a1, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 302(sp) +; RV64ZVFHMIN-NEXT: lh a1, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 300(sp) +; RV64ZVFHMIN-NEXT: lh a1, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a1, 298(sp) +; RV64ZVFHMIN-NEXT: lh a4, 170(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a1, 85(sp) +; RV64ZVFHMIN-NEXT: lh a4, 296(sp) +; RV64ZVFHMIN-NEXT: lh a5, 168(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 84(sp) +; RV64ZVFHMIN-NEXT: lh a4, 294(sp) +; RV64ZVFHMIN-NEXT: lh a5, 166(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 83(sp) +; RV64ZVFHMIN-NEXT: lh a4, 292(sp) +; RV64ZVFHMIN-NEXT: lh a5, 164(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 82(sp) +; RV64ZVFHMIN-NEXT: lh a4, 290(sp) +; RV64ZVFHMIN-NEXT: lh a5, 162(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: sb a4, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 288(sp) +; RV64ZVFHMIN-NEXT: lh a4, 160(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a1, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 354(sp) +; RV64ZVFHMIN-NEXT: lh a1, 226(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 352(sp) +; RV64ZVFHMIN-NEXT: lh a1, 224(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 350(sp) +; RV64ZVFHMIN-NEXT: lh a1, 222(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v27, v16, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 348(sp) +; RV64ZVFHMIN-NEXT: lh a1, 220(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v8, 1 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a4, 346(sp) +; RV64ZVFHMIN-NEXT: lh a5, 218(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 109(sp) +; RV64ZVFHMIN-NEXT: lh a4, 344(sp) +; RV64ZVFHMIN-NEXT: lh a6, 216(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 108(sp) +; RV64ZVFHMIN-NEXT: lh a4, 342(sp) +; RV64ZVFHMIN-NEXT: lh a6, 214(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 107(sp) +; RV64ZVFHMIN-NEXT: lh t0, 340(sp) +; RV64ZVFHMIN-NEXT: lh t1, 212(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb t0, 106(sp) +; RV64ZVFHMIN-NEXT: lh t1, 338(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: lh t2, 210(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v21 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV64ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: sb t2, 105(sp) +; RV64ZVFHMIN-NEXT: lh a1, 336(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV64ZVFHMIN-NEXT: lh t3, 208(sp) +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: sb a1, 104(sp) +; RV64ZVFHMIN-NEXT: lh a5, 334(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh a7, 206(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a5, 103(sp) +; RV64ZVFHMIN-NEXT: lh a5, 332(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: lh a6, 204(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a5, 102(sp) +; RV64ZVFHMIN-NEXT: lh a6, 330(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t0, 202(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb t0, 101(sp) +; RV64ZVFHMIN-NEXT: lh t0, 328(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 200(sp) +; RV64ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v27 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: sb t1, 100(sp) +; RV64ZVFHMIN-NEXT: lh a7, 326(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t1, 198(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v26 +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb t1, 99(sp) +; RV64ZVFHMIN-NEXT: lh t0, 324(sp) +; RV64ZVFHMIN-NEXT: lh t1, 196(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb t0, 98(sp) +; RV64ZVFHMIN-NEXT: lh t0, 322(sp) +; RV64ZVFHMIN-NEXT: lh t1, 194(sp) +; RV64ZVFHMIN-NEXT: sb a7, 65(sp) +; RV64ZVFHMIN-NEXT: sb a6, 66(sp) +; RV64ZVFHMIN-NEXT: sb t2, 67(sp) +; RV64ZVFHMIN-NEXT: sb a5, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 69(sp) +; RV64ZVFHMIN-NEXT: sb a1, 70(sp) +; RV64ZVFHMIN-NEXT: sb a0, 71(sp) +; RV64ZVFHMIN-NEXT: sb a5, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 382(sp) +; RV64ZVFHMIN-NEXT: lh a1, 254(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV64ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 380(sp) +; RV64ZVFHMIN-NEXT: lh a1, 252(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV64ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 378(sp) +; RV64ZVFHMIN-NEXT: lh a1, 250(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV64ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 376(sp) +; RV64ZVFHMIN-NEXT: lh a1, 248(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 374(sp) +; RV64ZVFHMIN-NEXT: lh a1, 246(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 372(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV64ZVFHMIN-NEXT: lh a1, 244(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a1, 370(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: lh a4, 242(sp) +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a1, 121(sp) +; RV64ZVFHMIN-NEXT: lh a4, 368(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a6, 240(sp) +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a4, 120(sp) +; RV64ZVFHMIN-NEXT: lh a6, 366(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: lh t0, 238(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb t0, 119(sp) +; RV64ZVFHMIN-NEXT: lh t0, 364(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 236(sp) +; RV64ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: sb t1, 118(sp) +; RV64ZVFHMIN-NEXT: lh a5, 362(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a7, 234(sp) +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a7, 117(sp) +; RV64ZVFHMIN-NEXT: lh a6, 360(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: lh a7, 232(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: sb a7, 116(sp) +; RV64ZVFHMIN-NEXT: lh a5, 358(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: lh a6, 230(sp) +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: sb a6, 115(sp) +; RV64ZVFHMIN-NEXT: lh a5, 356(sp) +; RV64ZVFHMIN-NEXT: lh a6, 228(sp) +; RV64ZVFHMIN-NEXT: sb t2, 76(sp) +; RV64ZVFHMIN-NEXT: sb a4, 77(sp) +; RV64ZVFHMIN-NEXT: sb a1, 78(sp) +; RV64ZVFHMIN-NEXT: sb a0, 79(sp) +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 72(sp) +; RV64ZVFHMIN-NEXT: sb a7, 73(sp) +; RV64ZVFHMIN-NEXT: sb t0, 74(sp) +; RV64ZVFHMIN-NEXT: sb t1, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV64ZVFHMIN-NEXT: addi sp, s0, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV64ZVFHMIN-NEXT: ld ra, 504(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 496(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = load <64 x half>, ptr %y %c = fcmp nnan ugt <64 x half> %a, %b @@ -1069,6 +3001,744 @@ define void @fcmp_ugt_vf_v64f16(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmnot.m v8, v16 ; ZVFH-NEXT: vsm.v v8, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: xori t1, t1, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a4, a7, 1 +; RV32ZVFHMIN-NEXT: xori a5, t0, 1 +; RV32ZVFHMIN-NEXT: xori a6, t1, 1 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb a6, 73(sp) +; RV32ZVFHMIN-NEXT: sb a5, 74(sp) +; RV32ZVFHMIN-NEXT: sb a4, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: xori t1, t1, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a4, a7, 1 +; RV64ZVFHMIN-NEXT: xori a5, t0, 1 +; RV64ZVFHMIN-NEXT: xori a6, t1, 1 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb a6, 73(sp) +; RV64ZVFHMIN-NEXT: sb a5, 74(sp) +; RV64ZVFHMIN-NEXT: sb a4, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer @@ -1086,6 +3756,616 @@ define void @fcmp_ugt_vf_v64f16_nonans(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmfgt.vf v16, v8, fa0 ; ZVFH-NEXT: vsm.v v16, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16_nonans: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb t1, 73(sp) +; RV32ZVFHMIN-NEXT: sb t0, 74(sp) +; RV32ZVFHMIN-NEXT: sb a7, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16_nonans: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb t1, 73(sp) +; RV64ZVFHMIN-NEXT: sb t0, 74(sp) +; RV64ZVFHMIN-NEXT: sb a7, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer @@ -1710,6 +4990,744 @@ define void @fcmp_ugt_fv_v64f16(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmnot.m v8, v16 ; ZVFH-NEXT: vsm.v v8, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: xori t1, t1, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a7, 1 +; RV32ZVFHMIN-NEXT: xori a5, t0, 1 +; RV32ZVFHMIN-NEXT: xori a6, t1, 1 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb a6, 73(sp) +; RV32ZVFHMIN-NEXT: sb a5, 74(sp) +; RV32ZVFHMIN-NEXT: sb a4, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: xori t1, t1, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a7, 1 +; RV64ZVFHMIN-NEXT: xori a5, t0, 1 +; RV64ZVFHMIN-NEXT: xori a6, t1, 1 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb a6, 73(sp) +; RV64ZVFHMIN-NEXT: sb a5, 74(sp) +; RV64ZVFHMIN-NEXT: sb a4, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer @@ -1727,6 +5745,616 @@ define void @fcmp_ugt_fv_v64f16_nonans(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmflt.vf v16, v8, fa0 ; ZVFH-NEXT: vsm.v v16, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16_nonans: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb t1, 73(sp) +; RV32ZVFHMIN-NEXT: sb t0, 74(sp) +; RV32ZVFHMIN-NEXT: sb a7, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16_nonans: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb t1, 73(sp) +; RV64ZVFHMIN-NEXT: sb t0, 74(sp) +; RV64ZVFHMIN-NEXT: sb a7, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll index 38df622998bf9..dd415116c2327 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN define void @fadd_v8bf16(ptr %x, ptr %y) { @@ -3925,8 +3925,9 @@ define void @trunc_v8f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI171_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI171_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -3965,8 +3966,9 @@ define void @trunc_v6f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI172_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI172_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -4022,20 +4024,67 @@ define void @trunc_v4f32(ptr %x) { } define void @trunc_v2f64(ptr %x) { -; CHECK-LABEL: trunc_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI174_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI174_0)(a1) -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: trunc_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vle64.v v8, (a0) +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI174_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI174_0)(a1) +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: vse64.v v8, (a0) +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: trunc_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vle64.v v8, (a0) +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: vse64.v v8, (a0) +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: trunc_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI174_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI174_0)(a1) +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: trunc_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV64ZVFHMIN-NEXT: ret %a = load <2 x double>, ptr %x %b = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a) store <2 x double> %b, ptr %x @@ -4101,8 +4150,9 @@ define void @ceil_v8f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI177_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI177_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a1, 3 @@ -4145,8 +4195,9 @@ define void @ceil_v6f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI178_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI178_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a1, 3 @@ -4208,22 +4259,75 @@ define void @ceil_v4f32(ptr %x) { } define void @ceil_v2f64(ptr %x) { -; CHECK-LABEL: ceil_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI180_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI180_0)(a1) -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a1, 3 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: ceil_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vle64.v v8, (a0) +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI180_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI180_0)(a1) +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a1, 3 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: vse64.v v8, (a0) +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: ceil_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vle64.v v8, (a0) +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a1, 3 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: vse64.v v8, (a0) +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: ceil_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI180_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI180_0)(a1) +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a1, 3 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: ceil_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a1, 3 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV64ZVFHMIN-NEXT: ret %a = load <2 x double>, ptr %x %b = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a) store <2 x double> %b, ptr %x @@ -4289,8 +4393,9 @@ define void @floor_v8f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI183_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI183_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a1, 2 @@ -4333,8 +4438,9 @@ define void @floor_v6f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI184_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI184_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a1, 2 @@ -4396,22 +4502,75 @@ define void @floor_v4f32(ptr %x) { } define void @floor_v2f64(ptr %x) { -; CHECK-LABEL: floor_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI186_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI186_0)(a1) -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a1, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: floor_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vle64.v v8, (a0) +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI186_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI186_0)(a1) +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a1, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: vse64.v v8, (a0) +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: floor_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vle64.v v8, (a0) +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a1, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: vse64.v v8, (a0) +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: floor_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI186_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI186_0)(a1) +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a1, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: floor_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a1, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV64ZVFHMIN-NEXT: ret %a = load <2 x double>, ptr %x %b = call <2 x double> @llvm.floor.v2f64(<2 x double> %a) store <2 x double> %b, ptr %x @@ -4477,8 +4636,9 @@ define void @round_v8f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI189_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI189_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a1, 4 @@ -4521,8 +4681,9 @@ define void @round_v6f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI190_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI190_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a1, 4 @@ -4584,22 +4745,75 @@ define void @round_v4f32(ptr %x) { } define void @round_v2f64(ptr %x) { -; CHECK-LABEL: round_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI192_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI192_0)(a1) -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a1, 4 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vle64.v v8, (a0) +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI192_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI192_0)(a1) +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a1, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: vse64.v v8, (a0) +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vle64.v v8, (a0) +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a1, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: vse64.v v8, (a0) +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI192_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI192_0)(a1) +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a1, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a1, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV64ZVFHMIN-NEXT: ret %a = load <2 x double>, ptr %x %b = call <2 x double> @llvm.round.v2f64(<2 x double> %a) store <2 x double> %b, ptr %x @@ -4636,8 +4850,9 @@ define void @rint_v8f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI194_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI194_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -4693,20 +4908,67 @@ define void @rint_v4f32(ptr %x) { } define void @rint_v2f64(ptr %x) { -; CHECK-LABEL: rint_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI196_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI196_0)(a1) -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: rint_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vle64.v v8, (a0) +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI196_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI196_0)(a1) +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: vse64.v v8, (a0) +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: rint_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vle64.v v8, (a0) +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: vse64.v v8, (a0) +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: rint_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI196_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI196_0)(a1) +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: rint_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV64ZVFHMIN-NEXT: ret %a = load <2 x double>, ptr %x %b = call <2 x double> @llvm.rint.v2f64(<2 x double> %a) store <2 x double> %b, ptr %x @@ -4745,8 +5007,9 @@ define void @nearbyint_v8f16(ptr %x) { ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vle16.v v8, (a0) -; ZVFH-NEXT: lui a1, %hi(.LCPI198_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI198_0)(a1) +; ZVFH-NEXT: li a1, 25 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vfabs.v v9, v8 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a1 @@ -4808,22 +5071,75 @@ define void @nearbyint_v4f32(ptr %x) { } define void @nearbyint_v2f64(ptr %x) { -; CHECK-LABEL: nearbyint_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI200_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI200_0)(a1) -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: frflags a1 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: fsflags a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: nearbyint_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vle64.v v8, (a0) +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI200_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI200_0)(a1) +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: frflags a1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: fsflags a1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: vse64.v v8, (a0) +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: nearbyint_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vle64.v v8, (a0) +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: frflags a1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: fsflags a1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: vse64.v v8, (a0) +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: nearbyint_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI200_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI200_0)(a1) +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: frflags a1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: fsflags a1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: nearbyint_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vle64.v v8, (a0) +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: frflags a1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: fsflags a1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vse64.v v8, (a0) +; RV64ZVFHMIN-NEXT: ret %a = load <2 x double>, ptr %x %b = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a) store <2 x double> %b, ptr %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll index be32c033fe373..c0b67dd603ebb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s ; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type. @@ -11,10 +11,11 @@ define <1 x half> @round_v1f16(<1 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -34,10 +35,11 @@ define <2 x half> @round_v2f16(<2 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -57,10 +59,11 @@ define <4 x half> @round_v4f16(<4 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -80,10 +83,11 @@ define <8 x half> @round_v8f16(<8 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -103,10 +107,11 @@ define <16 x half> @round_v16f16(<16 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -125,11 +130,12 @@ define <32 x half> @round_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: round_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) +; CHECK-NEXT: li a1, 25 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: fmv.h.x fa5, a1 ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 4 @@ -261,92 +267,168 @@ define <16 x float> @round_v16f32(<16 x float> %x) strictfp { declare <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float>, metadata) define <1 x double> @round_v1f64(<1 x double> %x) strictfp { -; CHECK-LABEL: round_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata) define <2 x double> @round_v2f64(<2 x double> %x) strictfp { -; CHECK-LABEL: round_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) define <4 x double> @round_v4f64(<4 x double> %x) strictfp { -; CHECK-LABEL: round_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata) define <8 x double> @round_v8f64(<8 x double> %x) strictfp { -; CHECK-LABEL: round_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll index 774ce5c7859c9..455dc0b83c03d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN ; This file tests the code generation for `llvm.round.*` on fixed vector type. define <1 x half> @round_v1f16(<1 x half> %x) { ; ZVFH-LABEL: round_v1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0) ; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -52,10 +53,11 @@ declare <1 x half> @llvm.round.v1f16(<1 x half>) define <2 x half> @round_v2f16(<2 x half> %x) { ; ZVFH-LABEL: round_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0) ; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -91,10 +93,11 @@ declare <2 x half> @llvm.round.v2f16(<2 x half>) define <4 x half> @round_v4f16(<4 x half> %x) { ; ZVFH-LABEL: round_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0) ; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -130,10 +133,11 @@ declare <4 x half> @llvm.round.v4f16(<4 x half>) define <8 x half> @round_v8f16(<8 x half> %x) { ; ZVFH-LABEL: round_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0) ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -169,10 +173,11 @@ declare <8 x half> @llvm.round.v8f16(<8 x half>) define <16 x half> @round_v16f16(<16 x half> %x) { ; ZVFH-LABEL: round_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0) ; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -208,11 +213,12 @@ declare <16 x half> @llvm.round.v16f16(<16 x half>) define <32 x half> @round_v32f16(<32 x half> %x) { ; ZVFH-LABEL: round_v32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0) ; ZVFH-NEXT: li a0, 32 +; ZVFH-NEXT: li a1, 25 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -347,80 +353,268 @@ define <16 x float> @round_v16f32(<16 x float> %x) { declare <16 x float> @llvm.round.v16f32(<16 x float>) define <1 x double> @round_v1f64(<1 x double> %x) { -; CHECK-LABEL: round_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_v1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI11_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_v1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_v1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI11_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_v1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <1 x double> @llvm.round.v1f64(<1 x double> %x) ret <1 x double> %a } declare <1 x double> @llvm.round.v1f64(<1 x double>) define <2 x double> @round_v2f64(<2 x double> %x) { -; CHECK-LABEL: round_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI12_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI12_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <2 x double> @llvm.round.v2f64(<2 x double> %x) ret <2 x double> %a } declare <2 x double> @llvm.round.v2f64(<2 x double>) define <4 x double> @round_v4f64(<4 x double> %x) { -; CHECK-LABEL: round_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI13_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI13_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <4 x double> @llvm.round.v4f64(<4 x double> %x) ret <4 x double> %a } declare <4 x double> @llvm.round.v4f64(<4 x double>) define <8 x double> @round_v8f64(<8 x double> %x) { -; CHECK-LABEL: round_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI14_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI14_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <8 x double> @llvm.round.v8f64(<8 x double> %x) ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll index 5c0279e133dfa..b1d35d3bcdc1d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s ; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type. @@ -11,10 +11,11 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -34,10 +35,11 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -57,10 +59,11 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -80,10 +83,11 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -103,10 +107,11 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -125,11 +130,12 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: roundeven_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) +; CHECK-NEXT: li a1, 25 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: fmv.h.x fa5, a1 ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 0 @@ -261,92 +267,168 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp { declare <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float>, metadata) define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp { -; CHECK-LABEL: roundeven_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata) define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp { -; CHECK-LABEL: roundeven_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata) define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp { -; CHECK-LABEL: roundeven_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } declare <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double>, metadata) define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp { -; CHECK-LABEL: roundeven_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll index 0b6baad127643..f8b3cb5897dfa 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN ; This file tests the code generation for `llvm.roundeven.*` on fixed vector type. define <1 x half> @roundeven_v1f16(<1 x half> %x) { ; ZVFH-LABEL: roundeven_v1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0) ; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -52,10 +53,11 @@ declare <1 x half> @llvm.roundeven.v1f16(<1 x half>) define <2 x half> @roundeven_v2f16(<2 x half> %x) { ; ZVFH-LABEL: roundeven_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0) ; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -91,10 +93,11 @@ declare <2 x half> @llvm.roundeven.v2f16(<2 x half>) define <4 x half> @roundeven_v4f16(<4 x half> %x) { ; ZVFH-LABEL: roundeven_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0) ; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -130,10 +133,11 @@ declare <4 x half> @llvm.roundeven.v4f16(<4 x half>) define <8 x half> @roundeven_v8f16(<8 x half> %x) { ; ZVFH-LABEL: roundeven_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0) ; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -169,10 +173,11 @@ declare <8 x half> @llvm.roundeven.v8f16(<8 x half>) define <16 x half> @roundeven_v16f16(<16 x half> %x) { ; ZVFH-LABEL: roundeven_v16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0) ; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -208,11 +213,12 @@ declare <16 x half> @llvm.roundeven.v16f16(<16 x half>) define <32 x half> @roundeven_v32f16(<32 x half> %x) { ; ZVFH-LABEL: roundeven_v32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0) ; ZVFH-NEXT: li a0, 32 +; ZVFH-NEXT: li a1, 25 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: slli a1, a1, 10 +; ZVFH-NEXT: fmv.h.x fa5, a1 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -347,80 +353,268 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) { declare <16 x float> @llvm.roundeven.v16f32(<16 x float>) define <1 x double> @roundeven_v1f64(<1 x double> %x) { -; CHECK-LABEL: roundeven_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_v1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI11_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_v1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_v1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI11_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_v1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x) ret <1 x double> %a } declare <1 x double> @llvm.roundeven.v1f64(<1 x double>) define <2 x double> @roundeven_v2f64(<2 x double> %x) { -; CHECK-LABEL: roundeven_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI12_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI12_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x) ret <2 x double> %a } declare <2 x double> @llvm.roundeven.v2f64(<2 x double>) define <4 x double> @roundeven_v4f64(<4 x double> %x) { -; CHECK-LABEL: roundeven_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI13_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI13_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x) ret <4 x double> %a } declare <4 x double> @llvm.roundeven.v4f64(<4 x double>) define <8 x double> @roundeven_v8f64(<8 x double> %x) { -; CHECK-LABEL: roundeven_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI14_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI14_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x) ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll index 2173887e85417..b7cf84fba4210 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp { ; CHECK-LABEL: trunc_v1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -30,10 +31,11 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -51,10 +53,11 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -72,10 +75,11 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -93,10 +97,11 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t @@ -113,11 +118,12 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp { ; CHECK-LABEL: trunc_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) +; CHECK-NEXT: li a1, 25 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) +; CHECK-NEXT: slli a1, a1, 10 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: fmv.h.x fa5, a1 ; CHECK-NEXT: vfabs.v v12, v8 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -237,84 +243,152 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp { declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata) define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp { -; CHECK-LABEL: trunc_v1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_v1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_v1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") ret <1 x double> %a } declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata) define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp { -; CHECK-LABEL: trunc_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") ret <2 x double> %a } declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp { -; CHECK-LABEL: trunc_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict") ret <4 x double> %a } declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata) define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp { -; CHECK-LABEL: trunc_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict") ret <8 x double> %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll index 4bec67d91847d..ca72905a0f39b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -3597,5 +3597,322 @@ define <4 x i32> @buildvec_vredmax_slideup(<8 x i32> %arg0, <8 x i32> %arg1, <8 ret <4 x i32> %255 } +define <16 x i16> @PR159294(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) { +; RV32-ONLY-LABEL: PR159294: +; RV32-ONLY: # %bb.0: # %entry +; RV32-ONLY-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-ONLY-NEXT: vmv.x.s a0, v8 +; RV32-ONLY-NEXT: vmv.x.s a1, v9 +; RV32-ONLY-NEXT: vmv.x.s a2, v10 +; RV32-ONLY-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; RV32-ONLY-NEXT: vmv.v.x v8, a2 +; RV32-ONLY-NEXT: vslide1down.vx v8, v8, a0 +; RV32-ONLY-NEXT: vslide1down.vx v8, v8, a1 +; RV32-ONLY-NEXT: vslidedown.vi v8, v8, 13 +; RV32-ONLY-NEXT: ret +; +; RV32VB-LABEL: PR159294: +; RV32VB: # %bb.0: # %entry +; RV32VB-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32VB-NEXT: vmv.x.s a0, v8 +; RV32VB-NEXT: vmv.x.s a1, v10 +; RV32VB-NEXT: slli a0, a0, 16 +; RV32VB-NEXT: zext.h a1, a1 +; RV32VB-NEXT: or a0, a1, a0 +; RV32VB-NEXT: vmv.x.s a1, v9 +; RV32VB-NEXT: vmv.v.i v8, 0 +; RV32VB-NEXT: zext.h a1, a1 +; RV32VB-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV32VB-NEXT: vmv.s.x v8, a0 +; RV32VB-NEXT: vmv.s.x v10, a1 +; RV32VB-NEXT: vsetivli zero, 2, e32, m1, tu, ma +; RV32VB-NEXT: vslideup.vi v8, v10, 1 +; RV32VB-NEXT: ret +; +; RV32VB-PACK-LABEL: PR159294: +; RV32VB-PACK: # %bb.0: # %entry +; RV32VB-PACK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32VB-PACK-NEXT: vmv.x.s a0, v8 +; RV32VB-PACK-NEXT: vmv.x.s a1, v10 +; RV32VB-PACK-NEXT: vmv.x.s a2, v9 +; RV32VB-PACK-NEXT: pack a0, a1, a0 +; RV32VB-PACK-NEXT: pack a1, a0, a0 +; RV32VB-PACK-NEXT: vmv.v.x v8, a1 +; RV32VB-PACK-NEXT: pack a1, a2, a0 +; RV32VB-PACK-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RV32VB-PACK-NEXT: vmv.s.x v8, a0 +; RV32VB-PACK-NEXT: vmv.s.x v10, a1 +; RV32VB-PACK-NEXT: vsetivli zero, 2, e32, m1, tu, ma +; RV32VB-PACK-NEXT: vslideup.vi v8, v10, 1 +; RV32VB-PACK-NEXT: ret +; +; RV64V-ONLY-LABEL: PR159294: +; RV64V-ONLY: # %bb.0: # %entry +; RV64V-ONLY-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64V-ONLY-NEXT: vmv.x.s a0, v8 +; RV64V-ONLY-NEXT: vmv.x.s a1, v9 +; RV64V-ONLY-NEXT: vmv.x.s a2, v10 +; RV64V-ONLY-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; RV64V-ONLY-NEXT: vmv.v.x v8, a2 +; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0 +; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a1 +; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13 +; RV64V-ONLY-NEXT: ret +; +; RVA22U64-LABEL: PR159294: +; RVA22U64: # %bb.0: # %entry +; RVA22U64-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RVA22U64-NEXT: vmv.x.s a0, v8 +; RVA22U64-NEXT: vmv.x.s a1, v10 +; RVA22U64-NEXT: slli a0, a0, 16 +; RVA22U64-NEXT: zext.h a1, a1 +; RVA22U64-NEXT: or a0, a0, a1 +; RVA22U64-NEXT: vmv.x.s a1, v9 +; RVA22U64-NEXT: vmv.v.i v8, 0 +; RVA22U64-NEXT: zext.h a1, a1 +; RVA22U64-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RVA22U64-NEXT: vmv.s.x v8, a0 +; RVA22U64-NEXT: vmv.s.x v10, a1 +; RVA22U64-NEXT: vsetivli zero, 2, e32, m1, tu, ma +; RVA22U64-NEXT: vslideup.vi v8, v10, 1 +; RVA22U64-NEXT: ret +; +; RVA22U64-PACK-LABEL: PR159294: +; RVA22U64-PACK: # %bb.0: # %entry +; RVA22U64-PACK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RVA22U64-PACK-NEXT: vmv.x.s a0, v8 +; RVA22U64-PACK-NEXT: vmv.x.s a1, v10 +; RVA22U64-PACK-NEXT: vmv.x.s a2, v9 +; RVA22U64-PACK-NEXT: packw a0, a1, a0 +; RVA22U64-PACK-NEXT: packw a1, a0, a0 +; RVA22U64-PACK-NEXT: vmv.v.x v8, a1 +; RVA22U64-PACK-NEXT: packw a1, a2, a0 +; RVA22U64-PACK-NEXT: vsetvli zero, zero, e32, m2, tu, ma +; RVA22U64-PACK-NEXT: vmv.s.x v8, a0 +; RVA22U64-PACK-NEXT: vmv.s.x v10, a1 +; RVA22U64-PACK-NEXT: vsetivli zero, 2, e32, m1, tu, ma +; RVA22U64-PACK-NEXT: vslideup.vi v8, v10, 1 +; RVA22U64-PACK-NEXT: ret +; +; RV64ZVE32-LABEL: PR159294: +; RV64ZVE32: # %bb.0: # %entry +; RV64ZVE32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64ZVE32-NEXT: vmv.x.s a0, v8 +; RV64ZVE32-NEXT: vmv.x.s a1, v9 +; RV64ZVE32-NEXT: vmv.x.s a2, v10 +; RV64ZVE32-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; RV64ZVE32-NEXT: vmv.v.x v8, a2 +; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0 +; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a1 +; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13 +; RV64ZVE32-NEXT: ret +entry: + %vecext3 = extractelement <2 x i32> %a, i32 0 + %conv4 = trunc i32 %vecext3 to i16 + %vecinit5 = insertelement <16 x i16> , i16 %conv4, i32 1 + %vecext7 = extractelement <2 x i32> %b, i32 0 + %conv8 = trunc i32 %vecext7 to i16 + %vecinit9 = insertelement <16 x i16> %vecinit5, i16 %conv8, i32 2 + %vecext59 = extractelement <2 x i32> %c, i32 0 + %conv60 = trunc i32 %vecext59 to i16 + %vecinit61 = insertelement <16 x i16> %vecinit9, i16 %conv60, i32 0 + ret <16 x i16> %vecinit61 +} + +define <16 x i32> @PR159294_zext(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) { +; RV32-LABEL: PR159294_zext: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: vmv.x.s a1, v9 +; RV32-NEXT: vmv.x.s a2, v10 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v8, a2 +; RV32-NEXT: lui a2, 16 +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: vslidedown.vi v8, v8, 13 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: vand.vx v8, v8, a2 +; RV32-NEXT: ret +; +; RV64V-ONLY-LABEL: PR159294_zext: +; RV64V-ONLY: # %bb.0: # %entry +; RV64V-ONLY-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64V-ONLY-NEXT: vmv.x.s a0, v8 +; RV64V-ONLY-NEXT: lui a1, 16 +; RV64V-ONLY-NEXT: vmv.x.s a2, v9 +; RV64V-ONLY-NEXT: vmv.x.s a3, v10 +; RV64V-ONLY-NEXT: addi a1, a1, -1 +; RV64V-ONLY-NEXT: and a0, a0, a1 +; RV64V-ONLY-NEXT: and a2, a2, a1 +; RV64V-ONLY-NEXT: and a1, a3, a1 +; RV64V-ONLY-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64V-ONLY-NEXT: vmv.v.x v8, a1 +; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0 +; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a2 +; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13 +; RV64V-ONLY-NEXT: ret +; +; RVA22U64-LABEL: PR159294_zext: +; RVA22U64: # %bb.0: # %entry +; RVA22U64-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RVA22U64-NEXT: vmv.x.s a0, v8 +; RVA22U64-NEXT: vmv.x.s a1, v10 +; RVA22U64-NEXT: slli a0, a0, 48 +; RVA22U64-NEXT: zext.h a1, a1 +; RVA22U64-NEXT: srli a0, a0, 16 +; RVA22U64-NEXT: or a0, a0, a1 +; RVA22U64-NEXT: vmv.x.s a1, v9 +; RVA22U64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RVA22U64-NEXT: vmv.v.i v8, 0 +; RVA22U64-NEXT: zext.h a1, a1 +; RVA22U64-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RVA22U64-NEXT: vmv.s.x v8, a0 +; RVA22U64-NEXT: vmv.s.x v12, a1 +; RVA22U64-NEXT: vsetivli zero, 2, e64, m1, tu, ma +; RVA22U64-NEXT: vslideup.vi v8, v12, 1 +; RVA22U64-NEXT: ret +; +; RVA22U64-PACK-LABEL: PR159294_zext: +; RVA22U64-PACK: # %bb.0: # %entry +; RVA22U64-PACK-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RVA22U64-PACK-NEXT: vmv1r.v v12, v9 +; RVA22U64-PACK-NEXT: vmv.x.s a0, v8 +; RVA22U64-PACK-NEXT: vmv.x.s a1, v10 +; RVA22U64-PACK-NEXT: pack a2, a0, a0 +; RVA22U64-PACK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RVA22U64-PACK-NEXT: vmv.v.x v8, a2 +; RVA22U64-PACK-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; RVA22U64-PACK-NEXT: vmv.x.s a2, v12 +; RVA22U64-PACK-NEXT: zext.h a0, a0 +; RVA22U64-PACK-NEXT: zext.h a1, a1 +; RVA22U64-PACK-NEXT: zext.h a2, a2 +; RVA22U64-PACK-NEXT: pack a0, a1, a0 +; RVA22U64-PACK-NEXT: pack a1, a2, a0 +; RVA22U64-PACK-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RVA22U64-PACK-NEXT: vmv.s.x v8, a0 +; RVA22U64-PACK-NEXT: vmv.s.x v12, a1 +; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, tu, ma +; RVA22U64-PACK-NEXT: vslideup.vi v8, v12, 1 +; RVA22U64-PACK-NEXT: ret +; +; RV64ZVE32-LABEL: PR159294_zext: +; RV64ZVE32: # %bb.0: # %entry +; RV64ZVE32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVE32-NEXT: vmv.x.s a0, v8 +; RV64ZVE32-NEXT: lui a1, 16 +; RV64ZVE32-NEXT: vmv.x.s a2, v9 +; RV64ZVE32-NEXT: vmv.x.s a3, v10 +; RV64ZVE32-NEXT: addi a1, a1, -1 +; RV64ZVE32-NEXT: and a0, a0, a1 +; RV64ZVE32-NEXT: and a2, a2, a1 +; RV64ZVE32-NEXT: and a1, a3, a1 +; RV64ZVE32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64ZVE32-NEXT: vmv.v.x v8, a1 +; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0 +; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a2 +; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13 +; RV64ZVE32-NEXT: ret +entry: + %vecext3 = extractelement <2 x i16> %a, i32 0 + %conv4 = zext i16 %vecext3 to i32 + %vecinit5 = insertelement <16 x i32> , i32 %conv4, i32 1 + %vecext7 = extractelement <2 x i16> %b, i32 0 + %conv8 = zext i16 %vecext7 to i32 + %vecinit9 = insertelement <16 x i32> %vecinit5, i32 %conv8, i32 2 + %vecext59 = extractelement <2 x i16> %c, i32 0 + %conv60 = zext i16 %vecext59 to i32 + %vecinit61 = insertelement <16 x i32> %vecinit9, i32 %conv60, i32 0 + ret <16 x i32> %vecinit61 +} + +define <16 x i32> @PR159294_sext(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) { +; RV32-LABEL: PR159294_sext: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32-NEXT: vmv.x.s a0, v8 +; RV32-NEXT: vmv.x.s a1, v9 +; RV32-NEXT: vmv.x.s a2, v10 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v8, a2 +; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslide1down.vx v8, v8, a1 +; RV32-NEXT: vslidedown.vi v8, v8, 13 +; RV32-NEXT: ret +; +; RV64V-ONLY-LABEL: PR159294_sext: +; RV64V-ONLY: # %bb.0: # %entry +; RV64V-ONLY-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64V-ONLY-NEXT: vmv.x.s a0, v8 +; RV64V-ONLY-NEXT: vmv.x.s a1, v9 +; RV64V-ONLY-NEXT: vmv.x.s a2, v10 +; RV64V-ONLY-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64V-ONLY-NEXT: vmv.v.x v8, a2 +; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0 +; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a1 +; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13 +; RV64V-ONLY-NEXT: ret +; +; RVA22U64-LABEL: PR159294_sext: +; RVA22U64: # %bb.0: # %entry +; RVA22U64-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RVA22U64-NEXT: vmv.x.s a0, v8 +; RVA22U64-NEXT: vmv.x.s a1, v10 +; RVA22U64-NEXT: slli a0, a0, 32 +; RVA22U64-NEXT: add.uw a0, a1, a0 +; RVA22U64-NEXT: vmv.x.s a1, v9 +; RVA22U64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RVA22U64-NEXT: vmv.v.i v8, 0 +; RVA22U64-NEXT: zext.w a1, a1 +; RVA22U64-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RVA22U64-NEXT: vmv.s.x v8, a0 +; RVA22U64-NEXT: vmv.s.x v12, a1 +; RVA22U64-NEXT: vsetivli zero, 2, e64, m1, tu, ma +; RVA22U64-NEXT: vslideup.vi v8, v12, 1 +; RVA22U64-NEXT: ret +; +; RVA22U64-PACK-LABEL: PR159294_sext: +; RVA22U64-PACK: # %bb.0: # %entry +; RVA22U64-PACK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RVA22U64-PACK-NEXT: vmv.x.s a0, v8 +; RVA22U64-PACK-NEXT: vmv.x.s a1, v10 +; RVA22U64-PACK-NEXT: vmv.x.s a2, v9 +; RVA22U64-PACK-NEXT: pack a0, a1, a0 +; RVA22U64-PACK-NEXT: pack a1, a0, a0 +; RVA22U64-PACK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RVA22U64-PACK-NEXT: vmv.v.x v8, a1 +; RVA22U64-PACK-NEXT: pack a1, a2, a0 +; RVA22U64-PACK-NEXT: vsetvli zero, zero, e64, m4, tu, ma +; RVA22U64-PACK-NEXT: vmv.s.x v8, a0 +; RVA22U64-PACK-NEXT: vmv.s.x v12, a1 +; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, tu, ma +; RVA22U64-PACK-NEXT: vslideup.vi v8, v12, 1 +; RVA22U64-PACK-NEXT: ret +; +; RV64ZVE32-LABEL: PR159294_sext: +; RV64ZVE32: # %bb.0: # %entry +; RV64ZVE32-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVE32-NEXT: vmv.x.s a0, v8 +; RV64ZVE32-NEXT: vmv.x.s a1, v9 +; RV64ZVE32-NEXT: vmv.x.s a2, v10 +; RV64ZVE32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV64ZVE32-NEXT: vmv.v.x v8, a2 +; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0 +; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a1 +; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13 +; RV64ZVE32-NEXT: ret +entry: + %vecext3 = extractelement <2 x i16> %a, i32 0 + %conv4 = sext i16 %vecext3 to i32 + %vecinit5 = insertelement <16 x i32> , i32 %conv4, i32 1 + %vecext7 = extractelement <2 x i16> %b, i32 0 + %conv8 = sext i16 %vecext7 to i32 + %vecinit9 = insertelement <16 x i32> %vecinit5, i32 %conv8, i32 2 + %vecext59 = extractelement <2 x i16> %c, i32 0 + %conv60 = sext i16 %vecext59 to i32 + %vecinit61 = insertelement <16 x i32> %vecinit9, i32 %conv60, i32 0 + ret <16 x i32> %vecinit61 +} ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV64: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll index 1d691b130b3da..a2fcd7962b8b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll @@ -661,8 +661,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t3, 36 -; RV32-NEXT: mul a7, a7, t3 +; RV32-NEXT: slli a7, a7, 5 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 ; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill @@ -682,7 +681,11 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vl8r.v v8, (t1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v24, v8, v0 -; RV32-NEXT: addi t1, sp, 16 +; RV32-NEXT: csrr t1, vlenb +; RV32-NEXT: li t2, 44 +; RV32-NEXT: mul t1, t1, t2 +; RV32-NEXT: add t1, sp, t1 +; RV32-NEXT: addi t1, t1, 16 ; RV32-NEXT: vs4r.v v8, (t1) # vscale x 32-byte Folded Spill ; RV32-NEXT: vmv.s.x v0, a7 ; RV32-NEXT: addi a3, a3, 12 @@ -694,8 +697,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t1, 20 -; RV32-NEXT: mul a7, a7, t1 +; RV32-NEXT: slli a7, a7, 4 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 ; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill @@ -733,7 +735,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a7, vlenb -; RV32-NEXT: li t0, 28 +; RV32-NEXT: li t0, 24 ; RV32-NEXT: mul a7, a7, t0 ; RV32-NEXT: add a7, sp, a7 ; RV32-NEXT: addi a7, a7, 16 @@ -755,7 +757,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v24, v8, v0 ; RV32-NEXT: csrr a6, vlenb -; RV32-NEXT: li a7, 44 +; RV32-NEXT: li a7, 40 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 16 @@ -772,24 +774,19 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a4, 12 -; RV32-NEXT: mul a1, a1, a4 +; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: vmv.s.x v0, a3 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 36 -; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vrgatherei16.vv v24, v8, v6 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a3, 92 @@ -812,8 +809,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a3, 20 -; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload @@ -835,12 +831,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 84 -; RV32-NEXT: mul a1, a1, a2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill -; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 72 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 @@ -860,30 +850,36 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs4r.v v28, (a1) # vscale x 32-byte Folded Spill +; RV32-NEXT: addi a1, sp, 16 +; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 60 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vmv.v.v v20, v16 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: slli a1, a1, 2 +; RV32-NEXT: li a2, 60 +; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload -; RV32-NEXT: vmv.v.v v16, v8 +; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 60 +; RV32-NEXT: li a2, 44 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vs4r.v v16, (a1) # vscale x 32-byte Folded Spill -; RV32-NEXT: addi a1, sp, 16 -; RV32-NEXT: vl4r.v v8, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV32-NEXT: vrgatherei16.vv v28, v8, v3 +; RV32-NEXT: vrgatherei16.vv v20, v16, v3 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma -; RV32-NEXT: vmv.v.v v28, v24 +; RV32-NEXT: vmv.v.v v20, v24 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 6 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill ; RV32-NEXT: lui a1, %hi(.LCPI27_4) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_4) ; RV32-NEXT: lui a2, %hi(.LCPI27_5) @@ -891,13 +887,25 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; RV32-NEXT: vle16.v v24, (a2) ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v16, (a1) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 84 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs1r.v v16, (a1) # vscale x 8-byte Folded Spill ; RV32-NEXT: lui a1, %hi(.LCPI27_7) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_7) ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vle16.v v10, (a1) +; RV32-NEXT: vle16.v v16, (a1) ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 28 +; RV32-NEXT: li a2, 76 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs2r.v v16, (a1) # vscale x 16-byte Folded Spill +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 @@ -909,18 +917,29 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 84 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vl1r.v v7, (a1) # vscale x 8-byte Folded Reload ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; RV32-NEXT: vrgatherei16.vv v24, v20, v8 +; RV32-NEXT: vrgatherei16.vv v24, v20, v7 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma ; RV32-NEXT: vmv.v.v v24, v16 ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 12 -; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 76 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vl2r.v v28, (a1) # vscale x 16-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; RV32-NEXT: vrgatherei16.vv v16, v0, v10 +; RV32-NEXT: vrgatherei16.vv v16, v0, v28 ; RV32-NEXT: lui a1, %hi(.LCPI27_6) ; RV32-NEXT: addi a1, a1, %lo(.LCPI27_6) ; RV32-NEXT: lui a2, %hi(.LCPI27_8) @@ -934,7 +953,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle16.v v5, (a2) ; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 44 +; RV32-NEXT: li a2, 40 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 @@ -942,12 +961,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: vrgatherei16.vv v0, v20, v4 ; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma ; RV32-NEXT: vmv.v.v v0, v16 -; RV32-NEXT: csrr a1, vlenb -; RV32-NEXT: li a2, 84 -; RV32-NEXT: mul a1, a1, a2 -; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 16 -; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vrgatherei16.vv v16, v8, v6 ; RV32-NEXT: csrr a1, vlenb @@ -968,7 +981,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_ ; RV32-NEXT: addi a1, a0, 192 ; RV32-NEXT: vse32.v v24, (a1) ; RV32-NEXT: addi a1, a0, 128 -; RV32-NEXT: vse32.v v28, (a1) +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 6 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl4r.v v8, (a2) # vscale x 32-byte Folded Reload +; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: addi a1, a0, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: li a3, 60 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll index b6c441290ee45..08da7d6bc50f7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s declare <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t ; CHECK-NEXT: frflags a0 @@ -30,10 +31,11 @@ define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext % define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -51,10 +53,11 @@ declare <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t ; CHECK-NEXT: frflags a0 @@ -72,10 +75,11 @@ define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext % define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -93,10 +97,11 @@ declare <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t ; CHECK-NEXT: frflags a0 @@ -114,10 +119,11 @@ define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext % define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -137,9 +143,10 @@ define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t ; CHECK-NEXT: frflags a0 @@ -158,10 +165,11 @@ define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI7_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -349,41 +357,75 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext declare <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI16_0) +; RV32-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vfabs.v v9, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vfabs.v v9, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v2f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI17_0) +; RV32-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v2f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -391,43 +433,79 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext % declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: lui a0, %hi(.LCPI18_0) +; RV32-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32-NEXT: vfabs.v v12, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32-NEXT: frflags a0 +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vfabs.v v12, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64-NEXT: frflags a0 +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v4f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI19_0) +; RV32-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v4f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -435,43 +513,79 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext % declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmv1r.v v12, v0 +; RV32-NEXT: lui a0, %hi(.LCPI20_0) +; RV32-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32-NEXT: vfabs.v v16, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32-NEXT: frflags a0 +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vmv1r.v v12, v0 +; RV64-NEXT: vfabs.v v16, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64-NEXT: frflags a0 +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v8f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI21_0) +; RV32-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v8f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -479,43 +593,79 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext % declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v15f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v16, v0 +; RV32-NEXT: lui a0, %hi(.LCPI22_0) +; RV32-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32-NEXT: vfabs.v v24, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32-NEXT: frflags a0 +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v15f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v16, v0 +; RV64-NEXT: vfabs.v v24, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64-NEXT: frflags a0 +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v15f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI23_0) +; RV32-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v15f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -523,43 +673,79 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v16, v0 +; RV32-NEXT: lui a0, %hi(.LCPI24_0) +; RV32-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32-NEXT: vfabs.v v24, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32-NEXT: frflags a0 +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v16, v0 +; RV64-NEXT: vfabs.v v24, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64-NEXT: frflags a0 +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v16f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI25_0) +; RV32-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v16f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -567,91 +753,175 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: frflags a1 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsflags a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: vmv1r.v v6, v0 +; RV32-NEXT: li a2, 16 +; RV32-NEXT: vslidedown.vi v7, v0, 2 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB26_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB26_2: +; RV32-NEXT: vmv1r.v v0, v6 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v8, v0.t +; RV32-NEXT: lui a1, %hi(.LCPI26_0) +; RV32-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a0, a0, a1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a1 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32-NEXT: frflags a1 +; RV32-NEXT: vmv1r.v v0, v6 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: fsflags a1 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32-NEXT: frflags a0 +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: vmv1r.v v6, v0 +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v7, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB26_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB26_2: +; RV64-NEXT: vmv1r.v v0, v6 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v8, v0.t +; RV64-NEXT: li a1, 1075 +; RV64-NEXT: slli a1, a1, 52 +; RV64-NEXT: fmv.d.x fa5, a1 +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a0, a0, a1 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64-NEXT: and a0, a0, a1 +; RV64-NEXT: frflags a1 +; RV64-NEXT: vmv1r.v v0, v6 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: fsflags a1 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64-NEXT: frflags a0 +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: frflags a2 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsflags a2 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: frflags a1 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: fsflags a1 -; CHECK-NEXT: ret +; RV32-LABEL: vp_nearbyint_v32f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB27_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB27_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v8 +; RV32-NEXT: lui a2, %hi(.LCPI27_0) +; RV32-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: sltu a0, a0, a2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a2 +; RV32-NEXT: frflags a2 +; RV32-NEXT: vmflt.vf v0, v24, fa5 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v16 +; RV32-NEXT: vmflt.vf v7, v24, fa5 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: fsflags a2 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: frflags a1 +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32-NEXT: fsflags a1 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_nearbyint_v32f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a2, 16 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB27_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB27_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v8 +; RV64-NEXT: li a2, 1075 +; RV64-NEXT: slli a2, a2, 52 +; RV64-NEXT: fmv.d.x fa5, a2 +; RV64-NEXT: addi a2, a0, -16 +; RV64-NEXT: sltu a0, a0, a2 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a2 +; RV64-NEXT: frflags a2 +; RV64-NEXT: vmflt.vf v0, v24, fa5 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v16 +; RV64-NEXT: vmflt.vf v7, v24, fa5 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: fsflags a2 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: frflags a1 +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64-NEXT: fsflags a1 +; RV64-NEXT: ret %v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll index a426f8c619e99..eec12212d0d37 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>) @@ -2083,21 +2083,38 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) { declare double @llvm.vector.reduce.fminimum.v2f64(<2 x double>) define double @vreduce_fminimum_v2f64(ptr %x) { -; CHECK-LABEL: vreduce_fminimum_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v9, v8, v8 -; CHECK-NEXT: vcpop.m a0, v9 -; CHECK-NEXT: beqz a0, .LBB123_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI123_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI123_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB123_2: -; CHECK-NEXT: vfredmin.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fminimum_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v9, v8, v8 +; RV32-NEXT: vcpop.m a0, v9 +; RV32-NEXT: beqz a0, .LBB123_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI123_0) +; RV32-NEXT: fld fa0, %lo(.LCPI123_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB123_2: +; RV32-NEXT: vfredmin.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fminimum_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v9, v8, v8 +; RV64-NEXT: vcpop.m a0, v9 +; RV64-NEXT: beqz a0, .LBB123_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB123_2: +; RV64-NEXT: vfredmin.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <2 x double>, ptr %x %red = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> %v) ret double %red @@ -2119,21 +2136,38 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fminimum.v4f64(<4 x double>) define double @vreduce_fminimum_v4f64(ptr %x) { -; CHECK-LABEL: vreduce_fminimum_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v10, v8, v8 -; CHECK-NEXT: vcpop.m a0, v10 -; CHECK-NEXT: beqz a0, .LBB125_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI125_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI125_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB125_2: -; CHECK-NEXT: vfredmin.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fminimum_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v10, v8, v8 +; RV32-NEXT: vcpop.m a0, v10 +; RV32-NEXT: beqz a0, .LBB125_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI125_0) +; RV32-NEXT: fld fa0, %lo(.LCPI125_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB125_2: +; RV32-NEXT: vfredmin.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fminimum_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v10, v8, v8 +; RV64-NEXT: vcpop.m a0, v10 +; RV64-NEXT: beqz a0, .LBB125_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB125_2: +; RV64-NEXT: vfredmin.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <4 x double>, ptr %x %red = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> %v) ret double %red @@ -2155,21 +2189,38 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fminimum.v8f64(<8 x double>) define double @vreduce_fminimum_v8f64(ptr %x) { -; CHECK-LABEL: vreduce_fminimum_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v12, v8, v8 -; CHECK-NEXT: vcpop.m a0, v12 -; CHECK-NEXT: beqz a0, .LBB127_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI127_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI127_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB127_2: -; CHECK-NEXT: vfredmin.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fminimum_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v12, v8, v8 +; RV32-NEXT: vcpop.m a0, v12 +; RV32-NEXT: beqz a0, .LBB127_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI127_0) +; RV32-NEXT: fld fa0, %lo(.LCPI127_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB127_2: +; RV32-NEXT: vfredmin.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fminimum_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v12, v8, v8 +; RV64-NEXT: vcpop.m a0, v12 +; RV64-NEXT: beqz a0, .LBB127_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB127_2: +; RV64-NEXT: vfredmin.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <8 x double>, ptr %x %red = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> %v) ret double %red @@ -2191,21 +2242,38 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fminimum.v16f64(<16 x double>) define double @vreduce_fminimum_v16f64(ptr %x) { -; CHECK-LABEL: vreduce_fminimum_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v16, v8, v8 -; CHECK-NEXT: vcpop.m a0, v16 -; CHECK-NEXT: beqz a0, .LBB129_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI129_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI129_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB129_2: -; CHECK-NEXT: vfredmin.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fminimum_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v16, v8, v8 +; RV32-NEXT: vcpop.m a0, v16 +; RV32-NEXT: beqz a0, .LBB129_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI129_0) +; RV32-NEXT: fld fa0, %lo(.LCPI129_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB129_2: +; RV32-NEXT: vfredmin.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fminimum_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v16, v8, v8 +; RV64-NEXT: vcpop.m a0, v16 +; RV64-NEXT: beqz a0, .LBB129_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB129_2: +; RV64-NEXT: vfredmin.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <16 x double>, ptr %x %red = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> %v) ret double %red @@ -2227,29 +2295,54 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fminimum.v32f64(<32 x double>) define double @vreduce_fminimum_v32f64(ptr %x) { -; CHECK-LABEL: vreduce_fminimum_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: vmfeq.vv v0, v16, v16 -; CHECK-NEXT: vmfeq.vv v7, v24, v24 -; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 -; CHECK-NEXT: vfmin.vv v8, v16, v8 -; CHECK-NEXT: vmfne.vv v16, v8, v8 -; CHECK-NEXT: vcpop.m a0, v16 -; CHECK-NEXT: beqz a0, .LBB131_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI131_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI131_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB131_2: -; CHECK-NEXT: vfredmin.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fminimum_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: addi a1, a0, 128 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v16, (a0) +; RV32-NEXT: vle64.v v24, (a1) +; RV32-NEXT: vmfeq.vv v0, v16, v16 +; RV32-NEXT: vmfeq.vv v7, v24, v24 +; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV32-NEXT: vfmin.vv v8, v16, v8 +; RV32-NEXT: vmfne.vv v16, v8, v8 +; RV32-NEXT: vcpop.m a0, v16 +; RV32-NEXT: beqz a0, .LBB131_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI131_0) +; RV32-NEXT: fld fa0, %lo(.LCPI131_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB131_2: +; RV32-NEXT: vfredmin.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fminimum_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: addi a1, a0, 128 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: vle64.v v24, (a1) +; RV64-NEXT: vmfeq.vv v0, v16, v16 +; RV64-NEXT: vmfeq.vv v7, v24, v24 +; RV64-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV64-NEXT: vfmin.vv v8, v16, v8 +; RV64-NEXT: vmfne.vv v16, v8, v8 +; RV64-NEXT: vcpop.m a0, v16 +; RV64-NEXT: beqz a0, .LBB131_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB131_2: +; RV64-NEXT: vfredmin.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <32 x double>, ptr %x %red = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> %v) ret double %red @@ -2274,85 +2367,166 @@ define double @vreduce_fminimum_v32f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fminimum.v64f64(<64 x double>) define double @vreduce_fminimum_v64f64(ptr %x) { -; CHECK-LABEL: vreduce_fminimum_v64f64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a1, a0, 384 -; CHECK-NEXT: vle64.v v16, (a1) -; CHECK-NEXT: addi a1, a0, 256 -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vmfeq.vv v0, v24, v24 -; CHECK-NEXT: vmfeq.vv v7, v16, v16 -; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfmin.vv v24, v16, v24 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vmfeq.vv v0, v16, v16 -; CHECK-NEXT: vmfeq.vv v7, v8, v8 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfmin.vv v16, v8, v16 -; CHECK-NEXT: vmfeq.vv v0, v16, v16 -; CHECK-NEXT: vmfeq.vv v7, v24, v24 -; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 -; CHECK-NEXT: vfmin.vv v8, v16, v8 -; CHECK-NEXT: vmfne.vv v16, v8, v8 -; CHECK-NEXT: vcpop.m a0, v16 -; CHECK-NEXT: beqz a0, .LBB133_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI133_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI133_0)(a0) -; CHECK-NEXT: j .LBB133_3 -; CHECK-NEXT: .LBB133_2: -; CHECK-NEXT: vfredmin.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: .LBB133_3: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fminimum_v64f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 4 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV32-NEXT: addi a1, a0, 128 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v24, (a1) +; RV32-NEXT: addi a1, a0, 384 +; RV32-NEXT: vle64.v v16, (a1) +; RV32-NEXT: addi a1, a0, 256 +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV32-NEXT: vmfeq.vv v0, v24, v24 +; RV32-NEXT: vmfeq.vv v7, v16, v16 +; RV32-NEXT: vmerge.vvm v8, v24, v16, v0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV32-NEXT: vle64.v v8, (a1) +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vmerge.vvm v16, v16, v24, v0 +; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vfmin.vv v24, v16, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vmfeq.vv v0, v16, v16 +; RV32-NEXT: vmfeq.vv v7, v8, v8 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vmerge.vvm v16, v16, v8, v0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vfmin.vv v16, v8, v16 +; RV32-NEXT: vmfeq.vv v0, v16, v16 +; RV32-NEXT: vmfeq.vv v7, v24, v24 +; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV32-NEXT: vfmin.vv v8, v16, v8 +; RV32-NEXT: vmfne.vv v16, v8, v8 +; RV32-NEXT: vcpop.m a0, v16 +; RV32-NEXT: beqz a0, .LBB133_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI133_0) +; RV32-NEXT: fld fa0, %lo(.LCPI133_0)(a0) +; RV32-NEXT: j .LBB133_3 +; RV32-NEXT: .LBB133_2: +; RV32-NEXT: vfredmin.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: .LBB133_3: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fminimum_v64f64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: addi a1, a0, 128 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v24, (a1) +; RV64-NEXT: addi a1, a0, 384 +; RV64-NEXT: vle64.v v16, (a1) +; RV64-NEXT: addi a1, a0, 256 +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV64-NEXT: vmfeq.vv v0, v24, v24 +; RV64-NEXT: vmfeq.vv v7, v16, v16 +; RV64-NEXT: vmerge.vvm v8, v24, v16, v0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vmerge.vvm v16, v16, v24, v0 +; RV64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vfmin.vv v24, v16, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vmfeq.vv v0, v16, v16 +; RV64-NEXT: vmfeq.vv v7, v8, v8 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vmerge.vvm v16, v16, v8, v0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vmerge.vvm v8, v8, v16, v0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vfmin.vv v16, v8, v16 +; RV64-NEXT: vmfeq.vv v0, v16, v16 +; RV64-NEXT: vmfeq.vv v7, v24, v24 +; RV64-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV64-NEXT: vfmin.vv v8, v16, v8 +; RV64-NEXT: vmfne.vv v16, v8, v8 +; RV64-NEXT: vcpop.m a0, v16 +; RV64-NEXT: beqz a0, .LBB133_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: j .LBB133_3 +; RV64-NEXT: .LBB133_2: +; RV64-NEXT: vfredmin.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: .LBB133_3: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret %v = load <64 x double>, ptr %x %red = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> %v) ret double %red @@ -2765,21 +2939,38 @@ define float @vreduce_fmaximum_v128f32_nonans(ptr %x) { declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>) define double @vreduce_fmaximum_v2f64(ptr %x) { -; CHECK-LABEL: vreduce_fmaximum_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v9, v8, v8 -; CHECK-NEXT: vcpop.m a0, v9 -; CHECK-NEXT: beqz a0, .LBB151_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI151_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI151_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB151_2: -; CHECK-NEXT: vfredmax.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fmaximum_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v9, v8, v8 +; RV32-NEXT: vcpop.m a0, v9 +; RV32-NEXT: beqz a0, .LBB151_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI151_0) +; RV32-NEXT: fld fa0, %lo(.LCPI151_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB151_2: +; RV32-NEXT: vfredmax.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fmaximum_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v9, v8, v8 +; RV64-NEXT: vcpop.m a0, v9 +; RV64-NEXT: beqz a0, .LBB151_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB151_2: +; RV64-NEXT: vfredmax.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <2 x double>, ptr %x %red = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> %v) ret double %red @@ -2801,21 +2992,38 @@ define double @vreduce_fmaximum_v2f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fmaximum.v4f64(<4 x double>) define double @vreduce_fmaximum_v4f64(ptr %x) { -; CHECK-LABEL: vreduce_fmaximum_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v10, v8, v8 -; CHECK-NEXT: vcpop.m a0, v10 -; CHECK-NEXT: beqz a0, .LBB153_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI153_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI153_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB153_2: -; CHECK-NEXT: vfredmax.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fmaximum_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v10, v8, v8 +; RV32-NEXT: vcpop.m a0, v10 +; RV32-NEXT: beqz a0, .LBB153_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI153_0) +; RV32-NEXT: fld fa0, %lo(.LCPI153_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB153_2: +; RV32-NEXT: vfredmax.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fmaximum_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v10, v8, v8 +; RV64-NEXT: vcpop.m a0, v10 +; RV64-NEXT: beqz a0, .LBB153_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB153_2: +; RV64-NEXT: vfredmax.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <4 x double>, ptr %x %red = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> %v) ret double %red @@ -2837,21 +3045,38 @@ define double @vreduce_fmaximum_v4f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fmaximum.v8f64(<8 x double>) define double @vreduce_fmaximum_v8f64(ptr %x) { -; CHECK-LABEL: vreduce_fmaximum_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v12, v8, v8 -; CHECK-NEXT: vcpop.m a0, v12 -; CHECK-NEXT: beqz a0, .LBB155_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI155_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI155_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB155_2: -; CHECK-NEXT: vfredmax.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fmaximum_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v12, v8, v8 +; RV32-NEXT: vcpop.m a0, v12 +; RV32-NEXT: beqz a0, .LBB155_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI155_0) +; RV32-NEXT: fld fa0, %lo(.LCPI155_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB155_2: +; RV32-NEXT: vfredmax.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fmaximum_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v12, v8, v8 +; RV64-NEXT: vcpop.m a0, v12 +; RV64-NEXT: beqz a0, .LBB155_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB155_2: +; RV64-NEXT: vfredmax.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <8 x double>, ptr %x %red = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> %v) ret double %red @@ -2873,21 +3098,38 @@ define double @vreduce_fmaximum_v8f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fmaximum.v16f64(<16 x double>) define double @vreduce_fmaximum_v16f64(ptr %x) { -; CHECK-LABEL: vreduce_fmaximum_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vmfne.vv v16, v8, v8 -; CHECK-NEXT: vcpop.m a0, v16 -; CHECK-NEXT: beqz a0, .LBB157_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI157_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI157_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB157_2: -; CHECK-NEXT: vfredmax.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fmaximum_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: vmfne.vv v16, v8, v8 +; RV32-NEXT: vcpop.m a0, v16 +; RV32-NEXT: beqz a0, .LBB157_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI157_0) +; RV32-NEXT: fld fa0, %lo(.LCPI157_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB157_2: +; RV32-NEXT: vfredmax.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fmaximum_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: vmfne.vv v16, v8, v8 +; RV64-NEXT: vcpop.m a0, v16 +; RV64-NEXT: beqz a0, .LBB157_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB157_2: +; RV64-NEXT: vfredmax.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <16 x double>, ptr %x %red = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> %v) ret double %red @@ -2909,29 +3151,54 @@ define double @vreduce_fmaximum_v16f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fmaximum.v32f64(<32 x double>) define double @vreduce_fmaximum_v32f64(ptr %x) { -; CHECK-LABEL: vreduce_fmaximum_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: vmfeq.vv v0, v16, v16 -; CHECK-NEXT: vmfeq.vv v7, v24, v24 -; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 -; CHECK-NEXT: vfmax.vv v8, v16, v8 -; CHECK-NEXT: vmfne.vv v16, v8, v8 -; CHECK-NEXT: vcpop.m a0, v16 -; CHECK-NEXT: beqz a0, .LBB159_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI159_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI159_0)(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB159_2: -; CHECK-NEXT: vfredmax.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fmaximum_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: addi a1, a0, 128 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v16, (a0) +; RV32-NEXT: vle64.v v24, (a1) +; RV32-NEXT: vmfeq.vv v0, v16, v16 +; RV32-NEXT: vmfeq.vv v7, v24, v24 +; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV32-NEXT: vfmax.vv v8, v16, v8 +; RV32-NEXT: vmfne.vv v16, v8, v8 +; RV32-NEXT: vcpop.m a0, v16 +; RV32-NEXT: beqz a0, .LBB159_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI159_0) +; RV32-NEXT: fld fa0, %lo(.LCPI159_0)(a0) +; RV32-NEXT: ret +; RV32-NEXT: .LBB159_2: +; RV32-NEXT: vfredmax.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fmaximum_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: addi a1, a0, 128 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: vle64.v v24, (a1) +; RV64-NEXT: vmfeq.vv v0, v16, v16 +; RV64-NEXT: vmfeq.vv v7, v24, v24 +; RV64-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV64-NEXT: vfmax.vv v8, v16, v8 +; RV64-NEXT: vmfne.vv v16, v8, v8 +; RV64-NEXT: vcpop.m a0, v16 +; RV64-NEXT: beqz a0, .LBB159_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: ret +; RV64-NEXT: .LBB159_2: +; RV64-NEXT: vfredmax.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: ret %v = load <32 x double>, ptr %x %red = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> %v) ret double %red @@ -2956,85 +3223,166 @@ define double @vreduce_fmaximum_v32f64_nonans(ptr %x) { declare double @llvm.vector.reduce.fmaximum.v64f64(<64 x double>) define double @vreduce_fmaximum_v64f64(ptr %x) { -; CHECK-LABEL: vreduce_fmaximum_v64f64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb -; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma -; CHECK-NEXT: vle64.v v24, (a1) -; CHECK-NEXT: addi a1, a0, 384 -; CHECK-NEXT: vle64.v v16, (a1) -; CHECK-NEXT: addi a1, a0, 256 -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vmfeq.vv v0, v24, v24 -; CHECK-NEXT: vmfeq.vv v7, v16, v16 -; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfmax.vv v24, v16, v24 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vmfeq.vv v0, v16, v16 -; CHECK-NEXT: vmfeq.vv v7, v8, v8 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfmax.vv v16, v8, v16 -; CHECK-NEXT: vmfeq.vv v0, v16, v16 -; CHECK-NEXT: vmfeq.vv v7, v24, v24 -; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 -; CHECK-NEXT: vfmax.vv v8, v16, v8 -; CHECK-NEXT: vmfne.vv v16, v8, v8 -; CHECK-NEXT: vcpop.m a0, v16 -; CHECK-NEXT: beqz a0, .LBB161_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: lui a0, %hi(.LCPI161_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI161_0)(a0) -; CHECK-NEXT: j .LBB161_3 -; CHECK-NEXT: .LBB161_2: -; CHECK-NEXT: vfredmax.vs v8, v8, v8 -; CHECK-NEXT: vfmv.f.s fa0, v8 -; CHECK-NEXT: .LBB161_3: -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fmaximum_v64f64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 4 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV32-NEXT: addi a1, a0, 128 +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV32-NEXT: vle64.v v24, (a1) +; RV32-NEXT: addi a1, a0, 384 +; RV32-NEXT: vle64.v v16, (a1) +; RV32-NEXT: addi a1, a0, 256 +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV32-NEXT: vmfeq.vv v0, v24, v24 +; RV32-NEXT: vmfeq.vv v7, v16, v16 +; RV32-NEXT: vmerge.vvm v8, v24, v16, v0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV32-NEXT: vle64.v v8, (a1) +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vmerge.vvm v16, v16, v24, v0 +; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vfmax.vv v24, v16, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vmfeq.vv v0, v16, v16 +; RV32-NEXT: vmfeq.vv v7, v8, v8 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vmerge.vvm v16, v16, v8, v0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV32-NEXT: vfmax.vv v16, v8, v16 +; RV32-NEXT: vmfeq.vv v0, v16, v16 +; RV32-NEXT: vmfeq.vv v7, v24, v24 +; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV32-NEXT: vfmax.vv v8, v16, v8 +; RV32-NEXT: vmfne.vv v16, v8, v8 +; RV32-NEXT: vcpop.m a0, v16 +; RV32-NEXT: beqz a0, .LBB161_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: lui a0, %hi(.LCPI161_0) +; RV32-NEXT: fld fa0, %lo(.LCPI161_0)(a0) +; RV32-NEXT: j .LBB161_3 +; RV32-NEXT: .LBB161_2: +; RV32-NEXT: vfredmax.vs v8, v8, v8 +; RV32-NEXT: vfmv.f.s fa0, v8 +; RV32-NEXT: .LBB161_3: +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: .cfi_def_cfa sp, 16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fmaximum_v64f64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: addi a1, a0, 128 +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-NEXT: vle64.v v24, (a1) +; RV64-NEXT: addi a1, a0, 384 +; RV64-NEXT: vle64.v v16, (a1) +; RV64-NEXT: addi a1, a0, 256 +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV64-NEXT: vmfeq.vv v0, v24, v24 +; RV64-NEXT: vmfeq.vv v7, v16, v16 +; RV64-NEXT: vmerge.vvm v8, v24, v16, v0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; RV64-NEXT: vle64.v v8, (a1) +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vmerge.vvm v16, v16, v24, v0 +; RV64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vfmax.vv v24, v16, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vmfeq.vv v0, v16, v16 +; RV64-NEXT: vmfeq.vv v7, v8, v8 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vmerge.vvm v16, v16, v8, v0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vmerge.vvm v8, v8, v16, v0 +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; RV64-NEXT: vfmax.vv v16, v8, v16 +; RV64-NEXT: vmfeq.vv v0, v16, v16 +; RV64-NEXT: vmfeq.vv v7, v24, v24 +; RV64-NEXT: vmerge.vvm v8, v16, v24, v0 +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vmerge.vvm v16, v24, v16, v0 +; RV64-NEXT: vfmax.vv v8, v16, v8 +; RV64-NEXT: vmfne.vv v16, v8, v8 +; RV64-NEXT: vcpop.m a0, v16 +; RV64-NEXT: beqz a0, .LBB161_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: lui a0, 4095 +; RV64-NEXT: slli a0, a0, 39 +; RV64-NEXT: fmv.d.x fa0, a0 +; RV64-NEXT: j .LBB161_3 +; RV64-NEXT: .LBB161_2: +; RV64-NEXT: vfredmax.vs v8, v8, v8 +; RV64-NEXT: vfmv.f.s fa0, v8 +; RV64-NEXT: .LBB161_3: +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa sp, 16 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret %v = load <64 x double>, ptr %x %red = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> %v) ret double %red diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll index 35cd789acfcc8..97cf7e6902e32 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s declare <2 x half> @llvm.vp.rint.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -28,10 +29,11 @@ define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -47,10 +49,11 @@ declare <4 x half> @llvm.vp.rint.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -66,10 +69,11 @@ define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -85,10 +89,11 @@ declare <8 x half> @llvm.vp.rint.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -104,10 +109,11 @@ define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -125,9 +131,10 @@ define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t ; CHECK-NEXT: vmv1r.v v0, v10 @@ -144,10 +151,11 @@ define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI7_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -317,37 +325,67 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) declare <2 x double> @llvm.vp.rint.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI16_0) +; RV32-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vfabs.v v9, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vfabs.v v9, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v2f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI17_0) +; RV32-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v2f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -355,39 +393,71 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmv1r.v v10, v0 +; RV32-NEXT: lui a0, %hi(.LCPI18_0) +; RV32-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32-NEXT: vfabs.v v12, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32-NEXT: vmv1r.v v0, v10 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vmv1r.v v10, v0 +; RV64-NEXT: vfabs.v v12, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64-NEXT: vmv1r.v v0, v10 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v4f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI19_0) +; RV32-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v4f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -395,39 +465,71 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmv1r.v v12, v0 +; RV32-NEXT: lui a0, %hi(.LCPI20_0) +; RV32-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32-NEXT: vfabs.v v16, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vmv1r.v v12, v0 +; RV64-NEXT: vfabs.v v16, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v8f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI21_0) +; RV32-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v8f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -435,39 +537,71 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v15f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v16, v0 +; RV32-NEXT: lui a0, %hi(.LCPI22_0) +; RV32-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32-NEXT: vfabs.v v24, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v15f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v16, v0 +; RV64-NEXT: vfabs.v v24, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: ret %v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v15f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI23_0) +; RV32-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v15f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -475,39 +609,71 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v16, v0 +; RV32-NEXT: lui a0, %hi(.LCPI24_0) +; RV32-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32-NEXT: vfabs.v v24, v8, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v16, v0 +; RV64-NEXT: vfabs.v v24, v8, v0.t +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: ret %v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v16f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI25_0) +; RV32-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v16f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -515,83 +681,159 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: vmv1r.v v6, v0 +; RV32-NEXT: li a2, 16 +; RV32-NEXT: vslidedown.vi v7, v0, 2 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB26_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB26_2: +; RV32-NEXT: vmv1r.v v0, v6 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v8, v0.t +; RV32-NEXT: lui a1, %hi(.LCPI26_0) +; RV32-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32-NEXT: addi a1, a0, -16 +; RV32-NEXT: sltu a0, a0, a1 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32-NEXT: and a0, a0, a1 +; RV32-NEXT: vmv1r.v v0, v6 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: vmv1r.v v6, v0 +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v7, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB26_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB26_2: +; RV64-NEXT: vmv1r.v v0, v6 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v8, v0.t +; RV64-NEXT: li a1, 1075 +; RV64-NEXT: slli a1, a1, 52 +; RV64-NEXT: fmv.d.x fa5, a1 +; RV64-NEXT: addi a1, a0, -16 +; RV64-NEXT: sltu a0, a0, a1 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64-NEXT: and a0, a0, a1 +; RV64-NEXT: vmv1r.v v0, v6 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64-NEXT: ret %v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: vp_rint_v32f64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB27_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB27_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v8 +; RV32-NEXT: lui a2, %hi(.LCPI27_0) +; RV32-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: sltu a0, a0, a2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a2 +; RV32-NEXT: vmflt.vf v0, v24, fa5 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfabs.v v24, v16 +; RV32-NEXT: vmflt.vf v7, v24, fa5 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v7 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_rint_v32f64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a2, 16 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB27_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB27_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v8 +; RV64-NEXT: li a2, 1075 +; RV64-NEXT: slli a2, a2, 52 +; RV64-NEXT: fmv.d.x fa5, a2 +; RV64-NEXT: addi a2, a0, -16 +; RV64-NEXT: sltu a0, a0, a2 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: vmflt.vf v0, v24, fa5 +; RV64-NEXT: and a0, a0, a2 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfabs.v v24, v16 +; RV64-NEXT: vmflt.vf v7, v24, fa5 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64-NEXT: ret %v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll index d8ff7062f033e..16c8b2b9da682 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare <2 x half> @llvm.vp.round.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -59,10 +60,11 @@ define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.round.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -145,10 +148,11 @@ define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.round.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -231,10 +236,11 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -273,9 +279,10 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext % ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -319,10 +326,11 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext % define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_v16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -529,41 +537,141 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl declare <2 x double> @llvm.vp.round.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -571,43 +679,149 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -615,43 +829,149 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -659,43 +979,149 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v15f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v15f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v15f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v15f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v15f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v15f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v15f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v15f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -703,43 +1129,149 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -747,91 +1279,341 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e declare <32 x double> @llvm.vp.round.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a1, 4 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v32f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v6, v0 +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB26_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFH-NEXT: addi a1, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a1 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v32f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v6, v0 +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB26_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: addi a1, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a1 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: and a0, a0, a1 +; RV64ZVFH-NEXT: fsrmi a1, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v32f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB26_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFHMIN-NEXT: addi a1, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v32f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB26_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: addi a1, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: and a0, a0, a1 +; RV64ZVFHMIN-NEXT: fsrmi a1, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: fsrmi a2, 4 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsrmi a1, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_v32f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB27_2: +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFH-NEXT: addi a2, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a2 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a2 +; RV32ZVFH-NEXT: fsrmi a2, 4 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 4 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_v32f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB27_2: +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: addi a2, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a2 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: and a0, a0, a2 +; RV64ZVFH-NEXT: fsrmi a2, 4 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsrmi a1, 4 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_v32f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB27_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFHMIN-NEXT: addi a2, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a2 +; RV32ZVFHMIN-NEXT: fsrmi a2, 4 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 4 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_v32f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB27_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: addi a2, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: and a0, a0, a2 +; RV64ZVFHMIN-NEXT: fsrmi a2, 4 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a1, 4 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll index 2649f234375d2..14c550d555cf7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare <2 x half> @llvm.vp.roundeven.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -59,10 +60,11 @@ define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext % define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.roundeven.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -145,10 +148,11 @@ define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext % define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.roundeven.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -231,10 +236,11 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext % define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -273,9 +279,10 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -319,10 +326,11 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_v16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -529,41 +537,141 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext declare <2 x double> @llvm.vp.roundeven.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -571,43 +679,149 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext % declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -615,43 +829,149 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext % declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -659,43 +979,149 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext % declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v15f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v15f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v15f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v15f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v15f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v15f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -703,43 +1129,149 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -747,91 +1279,341 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex declare <32 x double> @llvm.vp.roundeven.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a1, 0 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v32f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v6, v0 +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB26_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFH-NEXT: addi a1, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a1 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v32f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v6, v0 +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB26_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: addi a1, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a1 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: and a0, a0, a1 +; RV64ZVFH-NEXT: fsrmi a1, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v32f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB26_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFHMIN-NEXT: addi a1, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v32f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB26_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: addi a1, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: and a0, a0, a1 +; RV64ZVFHMIN-NEXT: fsrmi a1, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: fsrmi a2, 0 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsrmi a1, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_v32f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB27_2: +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFH-NEXT: addi a2, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a2 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a2 +; RV32ZVFH-NEXT: fsrmi a2, 0 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 0 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_v32f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB27_2: +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: addi a2, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a2 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: and a0, a0, a2 +; RV64ZVFH-NEXT: fsrmi a2, 0 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsrmi a1, 0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB27_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFHMIN-NEXT: addi a2, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a2 +; RV32ZVFHMIN-NEXT: fsrmi a2, 0 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 0 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB27_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: addi a2, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: and a0, a0, a2 +; RV64ZVFHMIN-NEXT: fsrmi a2, 0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a1, 0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll index 50e65b62e7848..16f04f14721d0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -1,22 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half>, <2 x i1>, i32) define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI0_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -59,10 +60,11 @@ define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI1_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half>, <4 x i1>, i32) define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI2_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -145,10 +148,11 @@ define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI3_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half>, <8 x i1>, i32) define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -231,10 +236,11 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -273,9 +279,10 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vmv1r.v v10, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vfabs.v v12, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -319,10 +326,11 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_v16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -529,41 +537,141 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex declare <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double>, <2 x i1>, i32) define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl) ret <2 x double> %v } define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl) ret <2 x double> %v } @@ -571,43 +679,149 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32) define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl) ret <4 x double> %v } define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl) ret <4 x double> %v } @@ -615,43 +829,149 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32) define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl) ret <8 x double> %v } define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl) ret <8 x double> %v } @@ -659,43 +979,149 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32) define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v15f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v15f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v15f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v15f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v15f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl) ret <15 x double> %v } define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v15f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI23_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v15f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v15f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl) ret <15 x double> %v } @@ -703,43 +1129,149 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32) define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl) ret <16 x double> %v } define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI25_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl) ret <16 x double> %v } @@ -747,91 +1279,341 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero declare <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double>, <32 x i1>, i32) define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v6, v0 -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: vslidedown.vi v7, v0, 2 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB26_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: lui a1, %hi(.LCPI26_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1) -; CHECK-NEXT: addi a1, a0, -16 -; CHECK-NEXT: sltu a0, a0, a1 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a1, 1 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v32f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v6, v0 +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB26_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFH-NEXT: addi a1, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a1 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v32f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v6, v0 +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB26_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a1, 1075 +; RV64ZVFH-NEXT: slli a1, a1, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a1 +; RV64ZVFH-NEXT: addi a1, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a1 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: and a0, a0, a1 +; RV64ZVFH-NEXT: fsrmi a1, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v32f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB26_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1) +; RV32ZVFHMIN-NEXT: addi a1, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v32f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0 +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB26_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a1, 1075 +; RV64ZVFHMIN-NEXT: slli a1, a1, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1 +; RV64ZVFHMIN-NEXT: addi a1, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a1 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: and a0, a0, a1 +; RV64ZVFHMIN-NEXT: fsrmi a1, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl) ret <32 x double> %v } define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_v32f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 16 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: bltu a0, a2, .LBB27_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a1, 16 -; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: lui a2, %hi(.LCPI27_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2) -; CHECK-NEXT: addi a2, a0, -16 -; CHECK-NEXT: sltu a0, a0, a2 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: and a0, a0, a2 -; CHECK-NEXT: fsrmi a2, 1 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v7, v24, fa5 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsrmi a1, 1 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a1 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_v32f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: li a2, 16 +; RV32ZVFH-NEXT: mv a1, a0 +; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: li a1, 16 +; RV32ZVFH-NEXT: .LBB27_2: +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFH-NEXT: addi a2, a0, -16 +; RV32ZVFH-NEXT: sltu a0, a0, a2 +; RV32ZVFH-NEXT: addi a0, a0, -1 +; RV32ZVFH-NEXT: and a0, a0, a2 +; RV32ZVFH-NEXT: fsrmi a2, 1 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsrmi a1, 1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a1 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_v32f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: li a2, 16 +; RV64ZVFH-NEXT: mv a1, a0 +; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: li a1, 16 +; RV64ZVFH-NEXT: .LBB27_2: +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: addi a2, a0, -16 +; RV64ZVFH-NEXT: sltu a0, a0, a2 +; RV64ZVFH-NEXT: addi a0, a0, -1 +; RV64ZVFH-NEXT: and a0, a0, a2 +; RV64ZVFH-NEXT: fsrmi a2, 1 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsrmi a1, 1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a1 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: li a2, 16 +; RV32ZVFHMIN-NEXT: mv a1, a0 +; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: li a1, 16 +; RV32ZVFHMIN-NEXT: .LBB27_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2) +; RV32ZVFHMIN-NEXT: addi a2, a0, -16 +; RV32ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV32ZVFHMIN-NEXT: addi a0, a0, -1 +; RV32ZVFHMIN-NEXT: and a0, a0, a2 +; RV32ZVFHMIN-NEXT: fsrmi a2, 1 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a1, 1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a1 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: li a2, 16 +; RV64ZVFHMIN-NEXT: mv a1, a0 +; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: li a1, 16 +; RV64ZVFHMIN-NEXT: .LBB27_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: addi a2, a0, -16 +; RV64ZVFHMIN-NEXT: sltu a0, a0, a2 +; RV64ZVFHMIN-NEXT: addi a0, a0, -1 +; RV64ZVFHMIN-NEXT: and a0, a0, a2 +; RV64ZVFHMIN-NEXT: fsrmi a2, 1 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5 +; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a1, 1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a1 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl) ret <32 x double> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll index af79ace04cf54..965d0b0fe0f9b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define <4 x bfloat> @shuffle_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) { ; CHECK-LABEL: shuffle_v4bf16: @@ -39,29 +39,49 @@ define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) { } define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) { -; CHECK-LABEL: shuffle_fv_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI3_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 9 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; CHECK-NEXT: ret +; RV32-LABEL: shuffle_fv_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: lui a0, %hi(.LCPI3_0) +; RV32-NEXT: fld fa5, %lo(.LCPI3_0)(a0) +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 9 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_fv_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 9 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: slli a0, a0, 62 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> ret <4 x double> %s } define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) { -; CHECK-LABEL: shuffle_vf_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI4_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 6 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; CHECK-NEXT: ret +; RV32-LABEL: shuffle_vf_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: lui a0, %hi(.LCPI4_0) +; RV32-NEXT: fld fa5, %lo(.LCPI4_0)(a0) +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 6 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: shuffle_vf_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 6 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: slli a0, a0, 62 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s } @@ -79,15 +99,25 @@ define <4 x float> @vfmerge_constant_v4f32(<4 x float> %x) { } define <4 x double> @vfmerge_constant_v4f64(<4 x double> %x) { -; CHECK-LABEL: vfmerge_constant_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI6_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 6 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vfmerge_constant_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: lui a0, %hi(.LCPI6_0) +; RV32-NEXT: fld fa5, %lo(.LCPI6_0)(a0) +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 6 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: vfmerge_constant_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 6 +; RV64-NEXT: lui a0, 4101 +; RV64-NEXT: slli a0, a0, 38 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s } @@ -161,40 +191,71 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y) } define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) { -; CHECK-LABEL: vrgather_shuffle_xv_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vslideup.vi v10, v8, 2, v0.t -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 12 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfmv.v.f v8, fa5 -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vrgather_shuffle_xv_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vmv2r.v v10, v8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vslideup.vi v10, v8, 2, v0.t +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 12 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vfmv.v.f v8, fa5 +; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: vrgather_shuffle_xv_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 8 +; RV64-NEXT: vmv2r.v v10, v8 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vslideup.vi v10, v8, 2, v0.t +; RV64-NEXT: slli a0, a0, 62 +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 12 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v8, a0 +; RV64-NEXT: vmerge.vvm v8, v8, v10, v0 +; RV64-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> ret <4 x double> %s } define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) { -; CHECK-LABEL: vrgather_shuffle_vx_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 2 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vslidedown.vi v8, v8, 2, v0.t -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v0, 3 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma -; CHECK-NEXT: vfmv.v.f v10, fa5 -; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 -; CHECK-NEXT: ret +; RV32-LABEL: vrgather_shuffle_vx_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 2 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vslidedown.vi v8, v8, 2, v0.t +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV32-NEXT: vmv.v.i v0, 3 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vfmv.v.f v10, fa5 +; RV32-NEXT: vmerge.vvm v8, v10, v8, v0 +; RV32-NEXT: ret +; +; RV64-LABEL: vrgather_shuffle_vx_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 2 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vslidedown.vi v8, v8, 2, v0.t +; RV64-NEXT: slli a0, a0, 62 +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; RV64-NEXT: vmv.v.i v0, 3 +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 +; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll index c76aa7c4d317d..5c17283cacd1b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll @@ -3,8 +3,8 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-V ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-V -; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X -; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X +; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X,RV32ZVKB-ZVE32X +; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X,RV64ZVKB-ZVE32X define <8 x i1> @shuffle_v8i1_as_i8_1(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_1: @@ -926,6 +926,136 @@ define <8 x i64> @shuffle_v8i64_as_i128(<8 x i64> %v) { ; ZVKB-V-NEXT: vslideup.vi v12, v8, 1, v0.t ; ZVKB-V-NEXT: vmv.v.v v8, v12 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV32ZVKB-ZVE32X-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s2, 116(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s3, 112(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset ra, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s2, -12 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s3, -16 +; RV32ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 0(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 4(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 8(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 12(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t5, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t6, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s2, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s3, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: sw t4, 48(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t5, 52(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t2, 56(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t3, 60(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s3, 32(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a1, 36(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t6, 40(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s2, 44(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t0, 16(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t1, 20(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a6, 24(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a7, 28(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a4, 0(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a5, 4(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a2, 8(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a3, 12(sp) +; RV32ZVKB-ZVE32X-NEXT: mv a1, sp +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV32ZVKB-ZVE32X-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s2, 116(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s3, 112(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV64ZVKB-ZVE32X-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s2, 104(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s3, 96(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset ra, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s2, -24 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s3, -32 +; RV64ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 0(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 8(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a7, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld t0, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: srli t1, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t5, a7, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t6, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s2, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s3, t0, 32 +; RV64ZVKB-ZVE32X-NEXT: sw a1, 48(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s2, 52(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t0, 56(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s3, 60(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a7, 32(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t5, 36(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a6, 40(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t6, 44(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a5, 16(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t3, 20(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a4, 24(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t4, 28(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a3, 0(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t1, 4(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a2, 8(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t2, 12(sp) +; RV64ZVKB-ZVE32X-NEXT: mv a1, sp +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV64ZVKB-ZVE32X-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s2, 104(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s3, 96(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } @@ -951,6 +1081,104 @@ define <8 x i64> @shuffle_v8i64_as_i128_2(<8 x i64> %v) { ; ZVKB-V-NEXT: vslideup.vi v12, v8, 1, v0.t ; ZVKB-V-NEXT: vmv.v.v v8, v12 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128_2: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV32ZVKB-ZVE32X-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset ra, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV32ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: sw t0, 48(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t1, 52(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a6, 56(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a7, 60(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t4, 32(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a1, 36(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t2, 40(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t3, 44(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a4, 16(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a5, 20(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a2, 24(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a3, 28(sp) +; RV32ZVKB-ZVE32X-NEXT: mv a1, sp +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV32ZVKB-ZVE32X-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128_2: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV64ZVKB-ZVE32X-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset ra, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -16 +; RV64ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: srli a7, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t0, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t1, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: sw a1, 48(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t3, 52(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a6, 56(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t4, 60(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a5, 32(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t1, 36(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a4, 40(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t2, 44(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a3, 16(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a7, 20(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a2, 24(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t0, 28(sp) +; RV64ZVKB-ZVE32X-NEXT: mv a1, sp +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV64ZVKB-ZVE32X-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } @@ -975,6 +1203,136 @@ define <8 x i64> @shuffle_v8i64_as_i256(<8 x i64> %v) { ; ZVKB-V-NEXT: vrgatherei16.vv v12, v8, v16 ; ZVKB-V-NEXT: vmv.v.v v8, v12 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV32ZVKB-ZVE32X-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s2, 116(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s3, 112(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset ra, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s2, -12 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s3, -16 +; RV32ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 0(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 4(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 8(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 12(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t5, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t6, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s2, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s3, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: sw t2, 48(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t3, 52(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s3, 56(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a1, 60(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t6, 32(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s2, 36(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t4, 40(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t5, 44(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a2, 16(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a3, 20(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t0, 24(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t1, 28(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a6, 0(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a7, 4(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a4, 8(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a5, 12(sp) +; RV32ZVKB-ZVE32X-NEXT: mv a1, sp +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV32ZVKB-ZVE32X-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s2, 116(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s3, 112(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV64ZVKB-ZVE32X-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s2, 104(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s3, 96(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset ra, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s2, -24 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s3, -32 +; RV64ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 0(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 8(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a7, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld t0, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: srli t1, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t5, t0, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t6, a7, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s2, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s3, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: sw a6, 48(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s2, 52(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a1, 56(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s3, 60(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t0, 32(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t5, 36(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a7, 40(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t6, 44(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a2, 16(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t3, 20(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a5, 24(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t4, 28(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a4, 0(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t1, 4(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a3, 8(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t2, 12(sp) +; RV64ZVKB-ZVE32X-NEXT: mv a1, sp +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV64ZVKB-ZVE32X-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s2, 104(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s3, 96(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } @@ -1003,6 +1361,106 @@ define <8 x i64> @shuffle_v8i64_as_i256_zvl256b(<8 x i64> %v) vscale_range(4,0) ; ZVKB-V-NEXT: vrgatherei16.vv v10, v8, v12 ; ZVKB-V-NEXT: vmv2r.v v8, v10 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256_zvl256b: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -16 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 16 +; RV32ZVKB-ZVE32X-NEXT: sw s0, 12(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s1, 8(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s1, -8 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t5, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t6, 0(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s0, 4(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s1, 8(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 12(a1) +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m2, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vmv.v.x v8, t2 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t3 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s1 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a1 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t6 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s0 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t4 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t5 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a2 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a3 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t0 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t1 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a6 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a7 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a4 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a5 +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s1, 8(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s1 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 16 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256_zvl256b: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 16 +; RV64ZVKB-ZVE32X-NEXT: sd s0, 8(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s1, 0(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s1, -16 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 0(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a7, 8(a1) +; RV64ZVKB-ZVE32X-NEXT: ld t0, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: srli t1, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t5, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t6, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s0, a7, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s1, t0, 32 +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m2, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vmv.v.x v8, t0 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s1 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a7 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s0 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a6 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t6 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a1 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t5 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a4 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t4 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a3 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t3 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a2 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t2 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a5 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t1 +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: ld s0, 8(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s1, 0(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s1 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll index 5aa3a246d7616..0561ee9addc7b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll @@ -5,7 +5,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,ZVFHMIN ; Check that the default value enables the web folding and ; that it is bigger than 3. -; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING,ZVFH define void @vfwmul_v2f116_multiple_users(ptr %x, ptr %y, ptr %z, <2 x half> %a, <2 x half> %b, <2 x half> %b2) { ; NO_FOLDING1-LABEL: vfwmul_v2f116_multiple_users: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll index 90e9ffdcb320a..eeb232ec1555c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s define <2 x i16> @vwmulu_v2i16(ptr %x, ptr %y) { ; CHECK-LABEL: vwmulu_v2i16: @@ -750,28 +750,25 @@ define <2 x i64> @vwmulu_vx_v2i64_i8(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: lb a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: srai a0, a1, 31 +; RV32-NEXT: lbu a1, 0(a1) +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) -; RV64-NEXT: lb a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-NEXT: vle32.v v9, (a0) +; RV64-NEXT: lbu a0, 0(a1) +; RV64-NEXT: vwmulu.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i8, ptr %y @@ -788,28 +785,25 @@ define <2 x i64> @vwmulu_vx_v2i64_i16(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: lh a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: srai a0, a1, 31 +; RV32-NEXT: lhu a1, 0(a1) +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) -; RV64-NEXT: lh a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-NEXT: vle32.v v9, (a0) +; RV64-NEXT: lhu a0, 0(a1) +; RV64-NEXT: vwmulu.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i16, ptr %y @@ -826,28 +820,25 @@ define <2 x i64> @vwmulu_vx_v2i64_i32(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: lw a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: srai a0, a1, 31 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lw a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vwmulu.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i32, ptr %y @@ -864,28 +855,27 @@ define <2 x i64> @vwmulu_vx_v2i64_i64(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: lw a2, 4(a1) -; RV32-NEXT: lw a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: sw a2, 12(sp) -; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lw a2, 0(a1) +; RV32-NEXT: lw a1, 4(a1) +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: ld a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vzext.vf2 v9, v8 +; RV64-NEXT: vmul.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i64, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll index 6ebb03ff0297e..8f2aec3140e9d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare @llvm.vp.floor.nxv1bf16(, , i32) @@ -407,10 +407,11 @@ declare @llvm.vp.floor.nxv1f16(, @vp_floor_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -453,10 +454,11 @@ define @vp_floor_nxv1f16( %va, @vp_floor_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -493,10 +495,11 @@ declare @llvm.vp.floor.nxv2f16(, @vp_floor_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -539,10 +542,11 @@ define @vp_floor_nxv2f16( %va, @vp_floor_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -579,10 +583,11 @@ declare @llvm.vp.floor.nxv4f16(, @vp_floor_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 2 @@ -625,10 +630,11 @@ define @vp_floor_nxv4f16( %va, @vp_floor_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -667,9 +673,10 @@ define @vp_floor_nxv8f16( %va, @vp_floor_nxv8f16( %va, @vp_floor_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -755,9 +763,10 @@ define @vp_floor_nxv16f16( %va, @vp_floor_nxv16f16( %va, @vp_floor_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -843,9 +853,10 @@ define @vp_floor_nxv32f16( %va, @vp_floor_nxv32f16( %va, @vp_floor_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_floor_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 2 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -1210,41 +1222,141 @@ define @vp_floor_nxv16f32_unmasked( % declare @llvm.vp.floor.nxv1f64(, , i32) define @vp_floor_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_floor_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv1f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv1f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv1f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv1f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1252,43 +1364,149 @@ define @vp_floor_nxv1f64_unmasked( %v declare @llvm.vp.floor.nxv2f64(, , i32) define @vp_floor_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_floor_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1296,43 +1514,149 @@ define @vp_floor_nxv2f64_unmasked( %v declare @llvm.vp.floor.nxv4f64(, , i32) define @vp_floor_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_floor_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1340,43 +1664,149 @@ define @vp_floor_nxv4f64_unmasked( %v declare @llvm.vp.floor.nxv7f64(, , i32) define @vp_floor_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv7f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv7f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv7f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv7f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_floor_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv7f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv7f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv7f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv7f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1384,43 +1814,149 @@ define @vp_floor_nxv7f64_unmasked( %v declare @llvm.vp.floor.nxv8f64(, , i32) define @vp_floor_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_floor_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1429,87 +1965,325 @@ define @vp_floor_nxv8f64_unmasked( %v declare @llvm.vp.floor.nxv16f64(, , i32) define @vp_floor_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a2, 2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v7, v0 +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFH-NEXT: srli a3, a1, 3 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFH-NEXT: sub a2, a0, a1 +; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFH-NEXT: sltu a3, a0, a2 +; RV32ZVFH-NEXT: addi a3, a3, -1 +; RV32ZVFH-NEXT: and a2, a3, a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a2, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB44_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v7, v0 +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: srli a3, a1, 3 +; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a2, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB44_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFHMIN-NEXT: srli a3, a1, 3 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFHMIN-NEXT: sub a2, a0, a1 +; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFHMIN-NEXT: sltu a3, a0, a2 +; RV32ZVFHMIN-NEXT: addi a3, a3, -1 +; RV32ZVFHMIN-NEXT: and a2, a3, a2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a2, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB44_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: srli a3, a1, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a2, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB44_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_floor_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_floor_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a2, 2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a0, 2 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_floor_nxv16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFH-NEXT: sub a3, a0, a1 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFH-NEXT: sltu a2, a0, a3 +; RV32ZVFH-NEXT: addi a2, a2, -1 +; RV32ZVFH-NEXT: and a2, a2, a3 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a2, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB45_2: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_floor_nxv16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a2, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB45_2: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_floor_nxv16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFHMIN-NEXT: sub a3, a0, a1 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV32ZVFHMIN-NEXT: addi a2, a2, -1 +; RV32ZVFHMIN-NEXT: and a2, a2, a3 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a2, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB45_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_floor_nxv16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a2, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB45_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.floor.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll index 7a4695d1c25c1..409235f7e1b2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s declare @llvm.experimental.constrained.nearbyint.nxv1f16(, metadata, metadata) @@ -11,10 +11,11 @@ define @nearbyint_nxv1f16( %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -35,10 +36,11 @@ define @nearbyint_nxv2f16( %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -59,10 +61,11 @@ define @nearbyint_nxv4f16( %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -83,10 +86,11 @@ define @nearbyint_nxv8f16( %v) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -107,10 +111,11 @@ define @nearbyint_nxv16f16( %v) strictf ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -131,10 +136,11 @@ define @nearbyint_nxv32f16( %v) strictf ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v16, fa5 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma @@ -271,23 +277,42 @@ define @nearbyint_nxv16f32( %v) stric declare @llvm.experimental.constrained.nearbyint.nxv1f64(, metadata, metadata) define @nearbyint_nxv1f64( %v) strictfp { -; CHECK-LABEL: nearbyint_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call @llvm.experimental.constrained.nearbyint.nxv1f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret %r } @@ -295,23 +320,42 @@ define @nearbyint_nxv1f64( %v) strict declare @llvm.experimental.constrained.nearbyint.nxv2f64(, metadata, metadata) define @nearbyint_nxv2f64( %v) strictfp { -; CHECK-LABEL: nearbyint_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call @llvm.experimental.constrained.nearbyint.nxv2f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret %r } @@ -319,23 +363,42 @@ define @nearbyint_nxv2f64( %v) strict declare @llvm.experimental.constrained.nearbyint.nxv4f64(, metadata, metadata) define @nearbyint_nxv4f64( %v) strictfp { -; CHECK-LABEL: nearbyint_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call @llvm.experimental.constrained.nearbyint.nxv4f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret %r } @@ -343,23 +406,42 @@ define @nearbyint_nxv4f64( %v) strict declare @llvm.experimental.constrained.nearbyint.nxv8f64(, metadata, metadata) define @nearbyint_nxv8f64( %v) strictfp { -; CHECK-LABEL: nearbyint_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32-LABEL: nearbyint_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: frflags a0 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: fsflags a0 +; RV32-NEXT: ret +; +; RV64-LABEL: nearbyint_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: frflags a0 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: fsflags a0 +; RV64-NEXT: ret %r = call @llvm.experimental.constrained.nearbyint.nxv8f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") ret %r } diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll index 4ea3269cec0b1..97e65f4e4b53a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN define @nearbyint_nxv1bf16( %x) { ; CHECK-LABEL: nearbyint_nxv1bf16: @@ -167,10 +167,11 @@ define @nearbyint_nxv32bf16( %x) { define @nearbyint_nxv1f16( %x) { ; ZVFH-LABEL: nearbyint_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -206,10 +207,11 @@ declare @llvm.nearbyint.nxv1f16() define @nearbyint_nxv2f16( %x) { ; ZVFH-LABEL: nearbyint_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -245,10 +247,11 @@ declare @llvm.nearbyint.nxv2f16() define @nearbyint_nxv4f16( %x) { ; ZVFH-LABEL: nearbyint_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -284,10 +287,11 @@ declare @llvm.nearbyint.nxv4f16() define @nearbyint_nxv8f16( %x) { ; ZVFH-LABEL: nearbyint_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -323,10 +327,11 @@ declare @llvm.nearbyint.nxv8f16() define @nearbyint_nxv16f16( %x) { ; ZVFH-LABEL: nearbyint_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -362,10 +367,11 @@ declare @llvm.nearbyint.nxv16f16() define @nearbyint_nxv32f16( %x) { ; ZVFH-LABEL: nearbyint_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -513,80 +519,268 @@ define @nearbyint_nxv16f32( %x) { declare @llvm.nearbyint.nxv16f32() define @nearbyint_nxv1f64( %x) { -; CHECK-LABEL: nearbyint_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: nearbyint_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: nearbyint_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: nearbyint_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: nearbyint_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.nearbyint.nxv1f64( %x) ret %a } declare @llvm.nearbyint.nxv1f64() define @nearbyint_nxv2f64( %x) { -; CHECK-LABEL: nearbyint_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: nearbyint_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: nearbyint_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: nearbyint_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: nearbyint_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.nearbyint.nxv2f64( %x) ret %a } declare @llvm.nearbyint.nxv2f64() define @nearbyint_nxv4f64( %x) { -; CHECK-LABEL: nearbyint_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: nearbyint_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: nearbyint_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: nearbyint_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: nearbyint_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.nearbyint.nxv4f64( %x) ret %a } declare @llvm.nearbyint.nxv4f64() define @nearbyint_nxv8f64( %x) { -; CHECK-LABEL: nearbyint_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: nearbyint_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: nearbyint_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: nearbyint_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: nearbyint_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.nearbyint.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll index 5fe59f3b3933d..5ed921d39590d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN define @rint_nxv1bf16( %x) { ; CHECK-LABEL: rint_nxv1bf16: @@ -153,10 +153,11 @@ define @rint_nxv32bf16( %x) { define @rint_nxv1f16( %x) { ; ZVFH-LABEL: rint_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -188,10 +189,11 @@ declare @llvm.rint.nxv1f16() define @rint_nxv2f16( %x) { ; ZVFH-LABEL: rint_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -223,10 +225,11 @@ declare @llvm.rint.nxv2f16() define @rint_nxv4f16( %x) { ; ZVFH-LABEL: rint_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -258,10 +261,11 @@ declare @llvm.rint.nxv4f16() define @rint_nxv8f16( %x) { ; ZVFH-LABEL: rint_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -293,10 +297,11 @@ declare @llvm.rint.nxv8f16() define @rint_nxv16f16( %x) { ; ZVFH-LABEL: rint_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -328,10 +333,11 @@ declare @llvm.rint.nxv16f16() define @rint_nxv32f16( %x) { ; ZVFH-LABEL: rint_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t @@ -463,72 +469,236 @@ define @rint_nxv16f32( %x) { declare @llvm.rint.nxv16f32() define @rint_nxv1f64( %x) { -; CHECK-LABEL: rint_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: rint_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: rint_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: rint_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: rint_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) ret %a } declare @llvm.rint.nxv1f64() define @rint_nxv2f64( %x) { -; CHECK-LABEL: rint_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: rint_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: rint_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: rint_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: rint_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.rint.nxv2f64( %x) ret %a } declare @llvm.rint.nxv2f64() define @rint_nxv4f64( %x) { -; CHECK-LABEL: rint_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: rint_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: rint_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: rint_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: rint_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) ret %a } declare @llvm.rint.nxv4f64() define @rint_nxv8f64( %x) { -; CHECK-LABEL: rint_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: rint_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: rint_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: rint_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: rint_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.rint.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll index 3d992aa13e379..295c264e7d924 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s ; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type. @@ -11,10 +11,11 @@ define @round_nxv1f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -34,10 +35,11 @@ define @round_nxv2f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -57,10 +59,11 @@ define @round_nxv4f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -80,10 +83,11 @@ define @round_nxv8f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -103,10 +107,11 @@ define @round_nxv16f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -126,10 +131,11 @@ define @round_nxv32f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v16, fa5 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma @@ -260,92 +266,168 @@ define @round_nxv16f32( %x) strictfp declare @llvm.experimental.constrained.round.nxv16f32(, metadata) define @round_nxv1f64( %x) strictfp { -; CHECK-LABEL: round_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.round.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.round.nxv1f64(, metadata) define @round_nxv2f64( %x) strictfp { -; CHECK-LABEL: round_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.round.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.round.nxv2f64(, metadata) define @round_nxv4f64( %x) strictfp { -; CHECK-LABEL: round_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.round.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.round.nxv4f64(, metadata) define @round_nxv8f64( %x) strictfp { -; CHECK-LABEL: round_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: round_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: fsrmi a0, 4 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: round_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: fsrmi a0, 4 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.round.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll index f7422b279149f..d420636a573fe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN ; This file tests the code generation for `llvm.round.*` on scalable vector type. @@ -169,10 +169,11 @@ define @round_nxv32bf16( %x) { define @round_nxv1f16( %x) { ; ZVFH-LABEL: round_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -208,10 +209,11 @@ declare @llvm.round.nxv1f16() define @round_nxv2f16( %x) { ; ZVFH-LABEL: round_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -247,10 +249,11 @@ declare @llvm.round.nxv2f16() define @round_nxv4f16( %x) { ; ZVFH-LABEL: round_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -286,10 +289,11 @@ declare @llvm.round.nxv4f16() define @round_nxv8f16( %x) { ; ZVFH-LABEL: round_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -325,10 +329,11 @@ declare @llvm.round.nxv8f16() define @round_nxv16f16( %x) { ; ZVFH-LABEL: round_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -364,10 +369,11 @@ declare @llvm.round.nxv16f16() define @round_nxv32f16( %x) { ; ZVFH-LABEL: round_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -515,80 +521,268 @@ define @round_nxv16f32( %x) { declare @llvm.round.nxv16f32() define @round_nxv1f64( %x) { -; CHECK-LABEL: round_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.round.nxv1f64( %x) ret %a } declare @llvm.round.nxv1f64() define @round_nxv2f64( %x) { -; CHECK-LABEL: round_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.round.nxv2f64( %x) ret %a } declare @llvm.round.nxv2f64() define @round_nxv4f64( %x) { -; CHECK-LABEL: round_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.round.nxv4f64( %x) ret %a } declare @llvm.round.nxv4f64() define @round_nxv8f64( %x) { -; CHECK-LABEL: round_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: round_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: round_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: round_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: round_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.round.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll index c293ac91b63bf..de766895c734f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s ; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type. @@ -11,10 +11,11 @@ define @roundeven_nxv1f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -34,10 +35,11 @@ define @roundeven_nxv2f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -57,10 +59,11 @@ define @roundeven_nxv4f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -80,10 +83,11 @@ define @roundeven_nxv8f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma @@ -103,10 +107,11 @@ define @roundeven_nxv16f16( %x) strictf ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -126,10 +131,11 @@ define @roundeven_nxv32f16( %x) strictf ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v16, fa5 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma @@ -260,92 +266,168 @@ define @roundeven_nxv16f32( %x) stric declare @llvm.experimental.constrained.roundeven.nxv16f32(, metadata) define @roundeven_nxv1f64( %x) strictfp { -; CHECK-LABEL: roundeven_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.roundeven.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.roundeven.nxv1f64(, metadata) define @roundeven_nxv2f64( %x) strictfp { -; CHECK-LABEL: roundeven_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.roundeven.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.roundeven.nxv2f64(, metadata) define @roundeven_nxv4f64( %x) strictfp { -; CHECK-LABEL: roundeven_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.roundeven.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.roundeven.nxv4f64(, metadata) define @roundeven_nxv8f64( %x) strictfp { -; CHECK-LABEL: roundeven_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: roundeven_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: fsrmi a0, 0 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32-NEXT: fsrm a0 +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: roundeven_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: fsrmi a0, 0 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64-NEXT: fsrm a0 +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.roundeven.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll index 865531b77eb29..b9121c55684ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN ; This file tests the code generation for `llvm.roundeven.*` on scalable vector type. define @roundeven_nxv1bf16( %x) { @@ -168,10 +168,11 @@ define @roundeven_nxv32bf16( %x) { define @roundeven_nxv1f16( %x) { ; ZVFH-LABEL: roundeven_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -207,10 +208,11 @@ declare @llvm.roundeven.nxv1f16() define @roundeven_nxv2f16( %x) { ; ZVFH-LABEL: roundeven_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -246,10 +248,11 @@ declare @llvm.roundeven.nxv2f16() define @roundeven_nxv4f16( %x) { ; ZVFH-LABEL: roundeven_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -285,10 +288,11 @@ declare @llvm.roundeven.nxv4f16() define @roundeven_nxv8f16( %x) { ; ZVFH-LABEL: roundeven_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -324,10 +328,11 @@ declare @llvm.roundeven.nxv8f16() define @roundeven_nxv16f16( %x) { ; ZVFH-LABEL: roundeven_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -363,10 +368,11 @@ declare @llvm.roundeven.nxv16f16() define @roundeven_nxv32f16( %x) { ; ZVFH-LABEL: roundeven_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -514,80 +520,268 @@ define @roundeven_nxv16f32( %x) { declare @llvm.roundeven.nxv16f32() define @roundeven_nxv1f64( %x) { -; CHECK-LABEL: roundeven_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.roundeven.nxv1f64( %x) ret %a } declare @llvm.roundeven.nxv1f64() define @roundeven_nxv2f64( %x) { -; CHECK-LABEL: roundeven_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.roundeven.nxv2f64( %x) ret %a } declare @llvm.roundeven.nxv2f64() define @roundeven_nxv4f64( %x) { -; CHECK-LABEL: roundeven_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.roundeven.nxv4f64( %x) ret %a } declare @llvm.roundeven.nxv4f64() define @roundeven_nxv8f64( %x) { -; CHECK-LABEL: roundeven_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: roundeven_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: roundeven_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: roundeven_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: roundeven_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.roundeven.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll index 8a5f118d8f6ac..63cb72e8795e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll @@ -1,18 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s define @trunc_nxv1f16( %x) strictfp { ; CHECK-LABEL: trunc_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -30,10 +31,11 @@ define @trunc_nxv2f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -51,10 +53,11 @@ define @trunc_nxv4f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -72,10 +75,11 @@ define @trunc_nxv8f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI3_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v10, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t @@ -93,10 +97,11 @@ define @trunc_nxv16f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v12, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t @@ -114,10 +119,11 @@ define @trunc_nxv32f16( %x) strictfp { ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0) +; CHECK-NEXT: li a0, 25 ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: slli a0, a0, 10 ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v16, fa5 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t @@ -236,84 +242,152 @@ define @trunc_nxv16f32( %x) strictfp declare @llvm.experimental.constrained.trunc.nxv16f32(, metadata) define @trunc_nxv1f64( %x) strictfp { -; CHECK-LABEL: trunc_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI11_0) +; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: vmflt.vf v0, v9, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v9, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.trunc.nxv1f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.trunc.nxv1f64(, metadata) define @trunc_nxv2f64( %x) strictfp { -; CHECK-LABEL: trunc_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI12_0) +; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: vmflt.vf v0, v10, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v10, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.trunc.nxv2f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.trunc.nxv2f64(, metadata) define @trunc_nxv4f64( %x) strictfp { -; CHECK-LABEL: trunc_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI13_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI13_0) +; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v12, v8 +; RV32-NEXT: vmflt.vf v0, v12, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v12, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v12, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.trunc.nxv4f64( %x, metadata !"fpexcept.strict") ret %a } declare @llvm.experimental.constrained.trunc.nxv4f64(, metadata) define @trunc_nxv8f64( %x) strictfp { -; CHECK-LABEL: trunc_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0) -; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: trunc_nxv8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vmfne.vv v0, v8, v8 +; RV32-NEXT: lui a0, %hi(.LCPI14_0) +; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0) +; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV32-NEXT: vfabs.v v16, v8 +; RV32-NEXT: vmflt.vf v0, v16, fa5 +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: trunc_nxv8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vmfne.vv v0, v8, v8 +; RV64-NEXT: li a0, 1075 +; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t +; RV64-NEXT: slli a0, a0, 52 +; RV64-NEXT: vfabs.v v16, v8 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vmflt.vf v0, v16, fa5 +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64-NEXT: ret %a = call @llvm.experimental.constrained.trunc.nxv8f64( %x, metadata !"fpexcept.strict") ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll index d597e166be4ee..34b3e8d2849b7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN define @trunc_nxv1bf16( %x) { ; CHECK-LABEL: trunc_nxv1bf16: @@ -153,10 +153,11 @@ define @trunc_nxv32bf16( %x) { define @trunc_nxv1f16( %x) { ; ZVFH-LABEL: trunc_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI6_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -188,10 +189,11 @@ declare @llvm.trunc.nxv1f16() define @trunc_nxv2f16( %x) { ; ZVFH-LABEL: trunc_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI7_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -223,10 +225,11 @@ declare @llvm.trunc.nxv2f16() define @trunc_nxv4f16( %x) { ; ZVFH-LABEL: trunc_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI8_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -258,10 +261,11 @@ declare @llvm.trunc.nxv4f16() define @trunc_nxv8f16( %x) { ; ZVFH-LABEL: trunc_nxv8f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI9_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -293,10 +297,11 @@ declare @llvm.trunc.nxv8f16() define @trunc_nxv16f16( %x) { ; ZVFH-LABEL: trunc_nxv16f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI10_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -328,10 +333,11 @@ declare @llvm.trunc.nxv16f16() define @trunc_nxv32f16( %x) { ; ZVFH-LABEL: trunc_nxv32f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a0, %hi(.LCPI11_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0) ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t @@ -463,72 +469,236 @@ define @trunc_nxv16f32( %x) { declare @llvm.trunc.nxv16f32() define @trunc_nxv1f64( %x) { -; CHECK-LABEL: trunc_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI17_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: trunc_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: trunc_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: trunc_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: trunc_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) ret %a } declare @llvm.trunc.nxv1f64() define @trunc_nxv2f64( %x) { -; CHECK-LABEL: trunc_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: trunc_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: trunc_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: trunc_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: trunc_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.trunc.nxv2f64( %x) ret %a } declare @llvm.trunc.nxv2f64() define @trunc_nxv4f64( %x) { -; CHECK-LABEL: trunc_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: trunc_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: trunc_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: trunc_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: trunc_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) ret %a } declare @llvm.trunc.nxv4f64() define @trunc_nxv8f64( %x) { -; CHECK-LABEL: trunc_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: trunc_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: trunc_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: trunc_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: trunc_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %a = call @llvm.trunc.nxv8f64( %x) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll index ae0542fb5b74f..d7bf566b9b5f4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll @@ -83,10 +83,11 @@ define @trunc_nxv1f16_to_ui32( %x) { define @trunc_nxv1f16_to_si64( %x) { ; CHECK-LABEL: trunc_nxv1f16_to_si64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -104,10 +105,11 @@ define @trunc_nxv1f16_to_si64( %x) { define @trunc_nxv1f16_to_ui64( %x) { ; CHECK-LABEL: trunc_nxv1f16_to_ui64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -201,10 +203,11 @@ define @trunc_nxv4f16_to_ui32( %x) { define @trunc_nxv4f16_to_si64( %x) { ; CHECK-LABEL: trunc_nxv4f16_to_si64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI14_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -222,10 +225,11 @@ define @trunc_nxv4f16_to_si64( %x) { define @trunc_nxv4f16_to_ui64( %x) { ; CHECK-LABEL: trunc_nxv4f16_to_ui64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI15_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI15_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -331,10 +335,11 @@ define @ceil_nxv1f16_to_ui32( %x) { define @ceil_nxv1f16_to_si64( %x) { ; CHECK-LABEL: ceil_nxv1f16_to_si64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -388,10 +393,11 @@ define @ceil_nxv1f16_to_si64( %x) { define @ceil_nxv1f16_to_ui64( %x) { ; CHECK-LABEL: ceil_nxv1f16_to_ui64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI23_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI23_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -533,10 +539,11 @@ define @ceil_nxv4f16_to_ui32( %x) { define @ceil_nxv4f16_to_si64( %x) { ; CHECK-LABEL: ceil_nxv4f16_to_si64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI30_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI30_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -590,10 +597,11 @@ define @ceil_nxv4f16_to_si64( %x) { define @ceil_nxv4f16_to_ui64( %x) { ; CHECK-LABEL: ceil_nxv4f16_to_ui64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI31_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI31_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -723,10 +731,11 @@ define @rint_nxv1f16_to_ui32( %x) { define @rint_nxv1f16_to_si64( %x) { ; CHECK-LABEL: rint_nxv1f16_to_si64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI38_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -778,10 +787,11 @@ define @rint_nxv1f16_to_si64( %x) { define @rint_nxv1f16_to_ui64( %x) { ; CHECK-LABEL: rint_nxv1f16_to_ui64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI39_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI39_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -909,10 +919,11 @@ define @rint_nxv4f16_to_ui32( %x) { define @rint_nxv4f16_to_si64( %x) { ; CHECK-LABEL: rint_nxv4f16_to_si64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI46_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI46_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -964,10 +975,11 @@ define @rint_nxv4f16_to_si64( %x) { define @rint_nxv4f16_to_ui64( %x) { ; CHECK-LABEL: rint_nxv4f16_to_ui64: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI47_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI47_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: li a0, 25 +; CHECK-NEXT: slli a0, a0, 10 +; CHECK-NEXT: fmv.h.x fa5, a0 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll index d995a31f243d3..acc68491d5aee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -416,14 +416,14 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16 -; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i1: @@ -437,14 +437,14 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-256-NEXT: vrgather.vv v11, v12, v16 -; RV32-BITS-256-NEXT: vrgather.vv v10, v13, v16 -; RV32-BITS-256-NEXT: vrgather.vv v9, v14, v16 -; RV32-BITS-256-NEXT: vrgather.vv v8, v15, v16 +; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16 +; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16 +; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16 +; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i1: @@ -458,14 +458,14 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV32-BITS-512-NEXT: vrgather.vv v11, v12, v16 -; RV32-BITS-512-NEXT: vrgather.vv v10, v13, v16 -; RV32-BITS-512-NEXT: vrgather.vv v9, v14, v16 -; RV32-BITS-512-NEXT: vrgather.vv v8, v15, v16 +; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16 +; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16 +; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16 +; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1: @@ -479,14 +479,14 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16 -; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i1: @@ -500,14 +500,14 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-256-NEXT: vrgather.vv v11, v12, v16 -; RV64-BITS-256-NEXT: vrgather.vv v10, v13, v16 -; RV64-BITS-256-NEXT: vrgather.vv v9, v14, v16 -; RV64-BITS-256-NEXT: vrgather.vv v8, v15, v16 +; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16 +; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16 +; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16 +; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i1: @@ -521,14 +521,14 @@ define @reverse_nxv32i1( %a) { ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma -; RV64-BITS-512-NEXT: vrgather.vv v11, v12, v16 -; RV64-BITS-512-NEXT: vrgather.vv v10, v13, v16 -; RV64-BITS-512-NEXT: vrgather.vv v9, v14, v16 -; RV64-BITS-512-NEXT: vrgather.vv v8, v15, v16 +; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16 +; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16 +; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16 +; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma -; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv32i1( %a) ret %res diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll index 9bb5717d6fc25..64e305f130dd7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare @llvm.vp.nearbyint.nxv1bf16(, , i32) @@ -407,10 +407,11 @@ declare @llvm.vp.nearbyint.nxv1f16(, @vp_nearbyint_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: frflags a0 @@ -453,10 +454,11 @@ define @vp_nearbyint_nxv1f16( %va, @vp_nearbyint_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -493,10 +495,11 @@ declare @llvm.vp.nearbyint.nxv2f16(, @vp_nearbyint_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: frflags a0 @@ -539,10 +542,11 @@ define @vp_nearbyint_nxv2f16( %va, @vp_nearbyint_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -579,10 +583,11 @@ declare @llvm.vp.nearbyint.nxv4f16(, @vp_nearbyint_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: frflags a0 @@ -625,10 +630,11 @@ define @vp_nearbyint_nxv4f16( %va, @vp_nearbyint_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -667,9 +673,10 @@ define @vp_nearbyint_nxv8f16( %va, @vp_nearbyint_nxv8f16( %va, @vp_nearbyint_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -755,9 +763,10 @@ define @vp_nearbyint_nxv16f16( %va, @vp_nearbyint_nxv16f16( %va, @vp_nearbyint_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -843,9 +853,10 @@ define @vp_nearbyint_nxv32f16( %va, @vp_nearbyint_nxv32f16( %va, @vp_nearbyint_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_nearbyint_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: frflags a0 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -1210,41 +1222,141 @@ define @vp_nearbyint_nxv16f32_unmasked( @llvm.vp.nearbyint.nxv1f64(, , i32) define @vp_nearbyint_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_nearbyint_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv1f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv1f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv1f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1252,43 +1364,149 @@ define @vp_nearbyint_nxv1f64_unmasked( @llvm.vp.nearbyint.nxv2f64(, , i32) define @vp_nearbyint_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_nearbyint_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1296,43 +1514,149 @@ define @vp_nearbyint_nxv2f64_unmasked( @llvm.vp.nearbyint.nxv4f64(, , i32) define @vp_nearbyint_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_nearbyint_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1340,43 +1664,149 @@ define @vp_nearbyint_nxv4f64_unmasked( @llvm.vp.nearbyint.nxv7f64(, , i32) define @vp_nearbyint_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv7f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv7f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv7f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_nearbyint_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv7f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv7f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv7f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1384,43 +1814,149 @@ define @vp_nearbyint_nxv7f64_unmasked( @llvm.vp.nearbyint.nxv8f64(, , i32) define @vp_nearbyint_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_nearbyint_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1429,87 +1965,325 @@ define @vp_nearbyint_nxv8f64_unmasked( @llvm.vp.nearbyint.nxv16f64(, , i32) define @vp_nearbyint_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: frflags a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsflags a2 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v7, v0 +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFH-NEXT: srli a3, a1, 3 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFH-NEXT: sub a2, a0, a1 +; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFH-NEXT: sltu a3, a0, a2 +; RV32ZVFH-NEXT: addi a3, a3, -1 +; RV32ZVFH-NEXT: and a2, a3, a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: frflags a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsflags a2 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB44_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v7, v0 +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: srli a3, a1, 3 +; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: frflags a2 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsflags a2 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB44_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFHMIN-NEXT: srli a3, a1, 3 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFHMIN-NEXT: sub a2, a0, a1 +; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFHMIN-NEXT: sltu a3, a0, a2 +; RV32ZVFHMIN-NEXT: addi a3, a3, -1 +; RV32ZVFHMIN-NEXT: and a2, a3, a2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsflags a2 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB44_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: srli a3, a1, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a2 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsflags a2 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB44_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_nearbyint_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_nearbyint_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: frflags a2 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: fsflags a2 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: frflags a0 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFH-NEXT: sub a3, a0, a1 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFH-NEXT: sltu a2, a0, a3 +; RV32ZVFH-NEXT: addi a2, a2, -1 +; RV32ZVFH-NEXT: and a2, a2, a3 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: frflags a2 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: fsflags a2 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB45_2: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: frflags a0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: fsflags a0 +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_nearbyint_nxv16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: frflags a2 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: fsflags a2 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB45_2: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: frflags a0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: fsflags a0 +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFHMIN-NEXT: sub a3, a0, a1 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV32ZVFHMIN-NEXT: addi a2, a2, -1 +; RV32ZVFHMIN-NEXT: and a2, a2, a3 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: frflags a2 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: fsflags a2 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB45_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: frflags a0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsflags a0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: frflags a2 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: fsflags a2 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB45_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: frflags a0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsflags a0 +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.nearbyint.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll index 4bc6313494d41..1ee7e138654b9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll @@ -37772,18 +37772,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1( %val, %val, %val, < ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_PALL: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB916_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB916_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB916_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -38416,11 +38416,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL( %val, < ; CHECK-RV32VC-NEXT: .LBB916_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) @@ -39022,18 +39022,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1( %val, %val, %val, %val, %val ; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_DEFAULT: ; CHECK-RV32VC: # %bb.0: ; CHECK-RV32VC-NEXT: csrr a1, vlenb -; CHECK-RV32VC-NEXT: slli a5, a1, 4 +; CHECK-RV32VC-NEXT: slli a6, a1, 4 ; CHECK-RV32VC-NEXT: slli a2, a1, 2 -; CHECK-RV32VC-NEXT: slli a6, a1, 3 +; CHECK-RV32VC-NEXT: slli a5, a1, 3 ; CHECK-RV32VC-NEXT: mv a4, a3 ; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB919_2 ; CHECK-RV32VC-NEXT: # %bb.1: ; CHECK-RV32VC-NEXT: mv a4, a2 ; CHECK-RV32VC-NEXT: .LBB919_2: ; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0) -; CHECK-RV32VC-NEXT: add a7, a0, a5 +; CHECK-RV32VC-NEXT: add a6, a6, a0 ; CHECK-RV32VC-NEXT: slli a1, a1, 1 -; CHECK-RV32VC-NEXT: add a0, a0, a6 +; CHECK-RV32VC-NEXT: add a0, a0, a5 ; CHECK-RV32VC-NEXT: mv a5, a4 ; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB919_4 ; CHECK-RV32VC-NEXT: # %bb.3: @@ -40290,11 +40290,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT( %val ; CHECK-RV32VC-NEXT: .LBB919_4: ; CHECK-RV32VC-NEXT: addi sp, sp, -16 ; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16 -; CHECK-RV32VC-NEXT: csrr a6, vlenb -; CHECK-RV32VC-NEXT: slli a6, a6, 3 -; CHECK-RV32VC-NEXT: sub sp, sp, a6 +; CHECK-RV32VC-NEXT: csrr a7, vlenb +; CHECK-RV32VC-NEXT: slli a7, a7, 3 +; CHECK-RV32VC-NEXT: sub sp, sp, a7 ; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7) +; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6) ; CHECK-RV32VC-NEXT: addi a6, sp, 16 ; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill ; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll index 306cacb31bdef..95bff27fe8ca6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/remat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll @@ -126,18 +126,18 @@ define void @vmv.v.x_needs_extended(ptr %p, i64 %x) { ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vs8r.v v8, (a0) +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: vs8r.v v16, (a0) ; CHECK-NEXT: vs8r.v v0, (a0) ; CHECK-NEXT: vs8r.v v24, (a0) +; CHECK-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vs8r.v v16, (a0) -; CHECK-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -301,3 +301,135 @@ define void @vfmv.s.f(ptr %p, double %x) { store volatile double %x, ptr %p ret void } + +; This test is fairly fragile, but it's trying to cover the case which +; caused the revert of bba9172 due to interaction with how rematerialize +; instructions are pruned from the original live interval. In the result +; below, we remat the vmv.v.x into the loop, but fail to remat the vmv.v.x +; a second time after further splitting it's live range. We shouldn't need +; to spill it to the stack at all. +define i64 @dual_remat(i64 %0, %1, %2, ptr %p) #0 { +; CHECK-LABEL: dual_remat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 5 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x21, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 33 * vlenb +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a1, a2, 3 +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: vmv.v.i v0, 0 +; CHECK-NEXT: .LBB8_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: add a5, a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a5, a4, 4 +; CHECK-NEXT: add a4, a5, a4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: add a5, a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: vmsne.vi v24, v16, 0 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs1r.v v24, (a4) # vscale x 8-byte Folded Spill +; CHECK-NEXT: vand.vv v16, v0, v8 +; CHECK-NEXT: vmsne.vi v8, v16, 0 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: add a5, a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vl1r.v v9, (a4) # vscale x 8-byte Folded Reload +; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslideup.vx v9, v8, a1 +; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma +; CHECK-NEXT: vcpop.m a4, v9 +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a6, a5, 4 +; CHECK-NEXT: add a5, a6, a5 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vs8r.v v8, (a3) +; CHECK-NEXT: vs8r.v v8, (a2) +; CHECK-NEXT: addi a5, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; CHECK-NEXT: vor.vv v16, v16, v8 +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a5, a5, 3 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vor.vv v0, v0, v8 +; CHECK-NEXT: beqz a4, .LBB8_1 +; CHECK-NEXT: # %bb.2: # %middle.block +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 5 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: add sp, sp, a1 +; CHECK-NEXT: .cfi_def_cfa sp, 16 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement zeroinitializer, i64 %0, i64 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, zeroinitializer, zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %vec.ind = phi [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ] + %3 = and %vec.ind, %broadcast.splat + %4 = icmp ne %3, zeroinitializer + store %broadcast.splat, ptr %p + %5 = tail call i1 @llvm.vector.reduce.or.nxv16i1( %4) + %vec.ind.next = or %vec.ind, %1 + br i1 %5, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %and.i = and i64 1, %0 + ret i64 %and.i +} diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll index a9505dca97529..091caa6c65fd2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare @llvm.vp.rint.nxv1bf16(, , i32) @@ -379,10 +379,11 @@ declare @llvm.vp.rint.nxv1f16(, @vp_rint_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma @@ -421,10 +422,11 @@ define @vp_rint_nxv1f16( %va, @vp_rint_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -457,10 +459,11 @@ declare @llvm.vp.rint.nxv2f16(, @vp_rint_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -499,10 +502,11 @@ define @vp_rint_nxv2f16( %va, @vp_rint_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -535,10 +539,11 @@ declare @llvm.vp.rint.nxv4f16(, @vp_rint_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma @@ -577,10 +582,11 @@ define @vp_rint_nxv4f16( %va, @vp_rint_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -615,9 +621,10 @@ define @vp_rint_nxv8f16( %va, @vp_rint_nxv8f16( %va, @vp_rint_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -695,9 +703,10 @@ define @vp_rint_nxv16f16( %va, @vp_rint_nxv16f16( %va, @vp_rint_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -775,9 +785,10 @@ define @vp_rint_nxv32f16( %va, @vp_rint_nxv32f16( %va, @vp_rint_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_rint_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t ; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t @@ -1110,37 +1122,125 @@ define @vp_rint_nxv16f32_unmasked( %v declare @llvm.vp.rint.nxv1f64(, , i32) define @vp_rint_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv1f64: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_rint_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv1f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv1f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv1f64_unmasked: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv1f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1148,39 +1248,133 @@ define @vp_rint_nxv1f64_unmasked( %va declare @llvm.vp.rint.nxv2f64(, , i32) define @vp_rint_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv2f64: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_rint_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv2f64_unmasked: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1188,39 +1382,133 @@ define @vp_rint_nxv2f64_unmasked( %va declare @llvm.vp.rint.nxv4f64(, , i32) define @vp_rint_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv4f64: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_rint_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv4f64_unmasked: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1228,39 +1516,133 @@ define @vp_rint_nxv4f64_unmasked( %va declare @llvm.vp.rint.nxv7f64(, , i32) define @vp_rint_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv7f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv7f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv7f64: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv7f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_rint_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv7f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv7f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv7f64_unmasked: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv7f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1268,39 +1650,133 @@ define @vp_rint_nxv7f64_unmasked( %va declare @llvm.vp.rint.nxv8f64(, , i32) define @vp_rint_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv8f64: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_rint_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv8f64_unmasked: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1309,79 +1785,293 @@ define @vp_rint_nxv8f64_unmasked( %va declare @llvm.vp.rint.nxv16f64(, , i32) define @vp_rint_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v7, v0 +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFH-NEXT: srli a3, a1, 3 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFH-NEXT: sub a2, a0, a1 +; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFH-NEXT: sltu a3, a0, a2 +; RV32ZVFH-NEXT: addi a3, a3, -1 +; RV32ZVFH-NEXT: and a2, a3, a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB44_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v7, v0 +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: srli a3, a1, 3 +; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB44_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv16f64: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFMIN-NEXT: vmv1r.v v7, v0 +; RV32ZVFMIN-NEXT: csrr a1, vlenb +; RV32ZVFMIN-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFMIN-NEXT: srli a3, a1, 3 +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFMIN-NEXT: sub a2, a0, a1 +; RV32ZVFMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFMIN-NEXT: sltu a3, a0, a2 +; RV32ZVFMIN-NEXT: addi a3, a3, -1 +; RV32ZVFMIN-NEXT: and a2, a3, a2 +; RV32ZVFMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFMIN-NEXT: # %bb.1: +; RV32ZVFMIN-NEXT: mv a0, a1 +; RV32ZVFMIN-NEXT: .LBB44_2: +; RV32ZVFMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: srli a3, a1, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB44_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_rint_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_rint_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_rint_nxv16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFH-NEXT: sub a3, a0, a1 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFH-NEXT: sltu a2, a0, a3 +; RV32ZVFH-NEXT: addi a2, a2, -1 +; RV32ZVFH-NEXT: and a2, a2, a3 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB45_2: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_rint_nxv16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB45_2: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFMIN-LABEL: vp_rint_nxv16f64_unmasked: +; RV32ZVFMIN: # %bb.0: +; RV32ZVFMIN-NEXT: csrr a1, vlenb +; RV32ZVFMIN-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFMIN-NEXT: sub a3, a0, a1 +; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFMIN-NEXT: sltu a2, a0, a3 +; RV32ZVFMIN-NEXT: addi a2, a2, -1 +; RV32ZVFMIN-NEXT: and a2, a2, a3 +; RV32ZVFMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFMIN-NEXT: # %bb.1: +; RV32ZVFMIN-NEXT: mv a0, a1 +; RV32ZVFMIN-NEXT: .LBB45_2: +; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_rint_nxv16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB45_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.rint.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll index ccbc0ebb3b73e..d1ea5aa76268a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare @llvm.vp.round.nxv1bf16(, , i32) @@ -407,10 +407,11 @@ declare @llvm.vp.round.nxv1f16(, @vp_round_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -453,10 +454,11 @@ define @vp_round_nxv1f16( %va, @vp_round_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -493,10 +495,11 @@ declare @llvm.vp.round.nxv2f16(, @vp_round_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -539,10 +542,11 @@ define @vp_round_nxv2f16( %va, @vp_round_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -579,10 +583,11 @@ declare @llvm.vp.round.nxv4f16(, @vp_round_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 4 @@ -625,10 +630,11 @@ define @vp_round_nxv4f16( %va, @vp_round_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -667,9 +673,10 @@ define @vp_round_nxv8f16( %va, @vp_round_nxv8f16( %va, @vp_round_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -755,9 +763,10 @@ define @vp_round_nxv16f16( %va, @vp_round_nxv16f16( %va, @vp_round_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -843,9 +853,10 @@ define @vp_round_nxv32f16( %va, @vp_round_nxv32f16( %va, @vp_round_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_round_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 4 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -1210,41 +1222,141 @@ define @vp_round_nxv16f32_unmasked( % declare @llvm.vp.round.nxv1f64(, , i32) define @vp_round_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_round_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv1f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv1f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv1f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv1f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1252,43 +1364,149 @@ define @vp_round_nxv1f64_unmasked( %v declare @llvm.vp.round.nxv2f64(, , i32) define @vp_round_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_round_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1296,43 +1514,149 @@ define @vp_round_nxv2f64_unmasked( %v declare @llvm.vp.round.nxv4f64(, , i32) define @vp_round_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_round_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1340,43 +1664,149 @@ define @vp_round_nxv4f64_unmasked( %v declare @llvm.vp.round.nxv7f64(, , i32) define @vp_round_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv7f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv7f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv7f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv7f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_round_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv7f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv7f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv7f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv7f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1384,43 +1814,149 @@ define @vp_round_nxv7f64_unmasked( %v declare @llvm.vp.round.nxv8f64(, , i32) define @vp_round_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_round_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1429,87 +1965,325 @@ define @vp_round_nxv8f64_unmasked( %v declare @llvm.vp.round.nxv16f64(, , i32) define @vp_round_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a2, 4 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v7, v0 +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFH-NEXT: srli a3, a1, 3 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFH-NEXT: sub a2, a0, a1 +; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFH-NEXT: sltu a3, a0, a2 +; RV32ZVFH-NEXT: addi a3, a3, -1 +; RV32ZVFH-NEXT: and a2, a3, a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a2, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB44_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v7, v0 +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: srli a3, a1, 3 +; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a2, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB44_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFHMIN-NEXT: srli a3, a1, 3 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFHMIN-NEXT: sub a2, a0, a1 +; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFHMIN-NEXT: sltu a3, a0, a2 +; RV32ZVFHMIN-NEXT: addi a3, a3, -1 +; RV32ZVFHMIN-NEXT: and a2, a3, a2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a2, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB44_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: srli a3, a1, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a2, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB44_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_round_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_round_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a2, 4 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a0, 4 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_round_nxv16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFH-NEXT: sub a3, a0, a1 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFH-NEXT: sltu a2, a0, a3 +; RV32ZVFH-NEXT: addi a2, a2, -1 +; RV32ZVFH-NEXT: and a2, a2, a3 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a2, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB45_2: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 4 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_round_nxv16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a2, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB45_2: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 4 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_round_nxv16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFHMIN-NEXT: sub a3, a0, a1 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV32ZVFHMIN-NEXT: addi a2, a2, -1 +; RV32ZVFHMIN-NEXT: and a2, a2, a3 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a2, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB45_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 4 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_round_nxv16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a2, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB45_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 4 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.round.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll index 3975423e6f985..23d0e97c1c82b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare @llvm.vp.roundeven.nxv1bf16(, , i32) @@ -407,10 +407,11 @@ declare @llvm.vp.roundeven.nxv1f16(, @vp_roundeven_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -453,10 +454,11 @@ define @vp_roundeven_nxv1f16( %va, @vp_roundeven_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -493,10 +495,11 @@ declare @llvm.vp.roundeven.nxv2f16(, @vp_roundeven_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -539,10 +542,11 @@ define @vp_roundeven_nxv2f16( %va, @vp_roundeven_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -579,10 +583,11 @@ declare @llvm.vp.roundeven.nxv4f16(, @vp_roundeven_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 0 @@ -625,10 +630,11 @@ define @vp_roundeven_nxv4f16( %va, @vp_roundeven_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -667,9 +673,10 @@ define @vp_roundeven_nxv8f16( %va, @vp_roundeven_nxv8f16( %va, @vp_roundeven_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -755,9 +763,10 @@ define @vp_roundeven_nxv16f16( %va, @vp_roundeven_nxv16f16( %va, @vp_roundeven_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -843,9 +853,10 @@ define @vp_roundeven_nxv32f16( %va, @vp_roundeven_nxv32f16( %va, @vp_roundeven_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundeven_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 0 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -1210,41 +1222,141 @@ define @vp_roundeven_nxv16f32_unmasked( @llvm.vp.roundeven.nxv1f64(, , i32) define @vp_roundeven_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_roundeven_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv1f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv1f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv1f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv1f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1252,43 +1364,149 @@ define @vp_roundeven_nxv1f64_unmasked( @llvm.vp.roundeven.nxv2f64(, , i32) define @vp_roundeven_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_roundeven_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1296,43 +1514,149 @@ define @vp_roundeven_nxv2f64_unmasked( @llvm.vp.roundeven.nxv4f64(, , i32) define @vp_roundeven_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_roundeven_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1340,43 +1664,149 @@ define @vp_roundeven_nxv4f64_unmasked( @llvm.vp.roundeven.nxv7f64(, , i32) define @vp_roundeven_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv7f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv7f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv7f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv7f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_roundeven_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv7f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv7f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv7f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv7f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1384,43 +1814,149 @@ define @vp_roundeven_nxv7f64_unmasked( @llvm.vp.roundeven.nxv8f64(, , i32) define @vp_roundeven_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_roundeven_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1429,87 +1965,325 @@ define @vp_roundeven_nxv8f64_unmasked( @llvm.vp.roundeven.nxv16f64(, , i32) define @vp_roundeven_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a2, 0 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v7, v0 +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFH-NEXT: srli a3, a1, 3 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFH-NEXT: sub a2, a0, a1 +; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFH-NEXT: sltu a3, a0, a2 +; RV32ZVFH-NEXT: addi a3, a3, -1 +; RV32ZVFH-NEXT: and a2, a3, a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a2, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB44_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v7, v0 +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: srli a3, a1, 3 +; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a2, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB44_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFHMIN-NEXT: srli a3, a1, 3 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFHMIN-NEXT: sub a2, a0, a1 +; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFHMIN-NEXT: sltu a3, a0, a2 +; RV32ZVFHMIN-NEXT: addi a3, a3, -1 +; RV32ZVFHMIN-NEXT: and a2, a3, a2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a2, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB44_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: srli a3, a1, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a2, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB44_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_roundeven_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundeven_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a2, 0 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a0, 0 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundeven_nxv16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFH-NEXT: sub a3, a0, a1 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFH-NEXT: sltu a2, a0, a3 +; RV32ZVFH-NEXT: addi a2, a2, -1 +; RV32ZVFH-NEXT: and a2, a2, a3 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a2, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB45_2: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 0 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundeven_nxv16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a2, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB45_2: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 0 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundeven_nxv16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFHMIN-NEXT: sub a3, a0, a1 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV32ZVFHMIN-NEXT: addi a2, a2, -1 +; RV32ZVFHMIN-NEXT: and a2, a2, a3 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a2, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB45_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 0 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundeven_nxv16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a2, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB45_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 0 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundeven.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll index 7f617f48862c4..4d8066d12c9ad 100644 --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFH +; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ -; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN declare @llvm.vp.roundtozero.nxv1bf16(, , i32) @@ -407,10 +407,11 @@ declare @llvm.vp.roundtozero.nxv1f16(, @vp_roundtozero_nxv1f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv1f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI12_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -453,10 +454,11 @@ define @vp_roundtozero_nxv1f16( %va, @vp_roundtozero_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv1f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI13_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -493,10 +495,11 @@ declare @llvm.vp.roundtozero.nxv2f16(, @vp_roundtozero_nxv2f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv2f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI14_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -539,10 +542,11 @@ define @vp_roundtozero_nxv2f16( %va, @vp_roundtozero_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv2f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI15_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -579,10 +583,11 @@ declare @llvm.vp.roundtozero.nxv4f16(, @vp_roundtozero_nxv4f16( %va, %m, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv4f16: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI16_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -625,10 +630,11 @@ define @vp_roundtozero_nxv4f16( %va, @vp_roundtozero_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv4f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI17_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFH-NEXT: vfabs.v v9, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v9, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -667,9 +673,10 @@ define @vp_roundtozero_nxv8f16( %va, @vp_roundtozero_nxv8f16( %va, @vp_roundtozero_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv8f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI19_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFH-NEXT: vfabs.v v10, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v10, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -755,9 +763,10 @@ define @vp_roundtozero_nxv16f16( %va, < ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vmv1r.v v12, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI20_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0) ; ZVFH-NEXT: vfabs.v v16, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -801,10 +810,11 @@ define @vp_roundtozero_nxv16f16( %va, < define @vp_roundtozero_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv16f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI21_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFH-NEXT: vfabs.v v12, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v12, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -843,9 +853,10 @@ define @vp_roundtozero_nxv32f16( %va, < ; ZVFH: # %bb.0: ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vmv1r.v v16, v0 -; ZVFH-NEXT: lui a0, %hi(.LCPI22_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0) ; ZVFH-NEXT: vfabs.v v24, v8, v0.t +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t ; ZVFH-NEXT: fsrmi a0, 1 @@ -922,10 +933,11 @@ define @vp_roundtozero_nxv32f16( %va, < define @vp_roundtozero_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; ZVFH-LABEL: vp_roundtozero_nxv32f16_unmasked: ; ZVFH: # %bb.0: -; ZVFH-NEXT: lui a1, %hi(.LCPI23_0) -; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1) ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFH-NEXT: vfabs.v v16, v8 +; ZVFH-NEXT: li a0, 25 +; ZVFH-NEXT: slli a0, a0, 10 +; ZVFH-NEXT: fmv.h.x fa5, a0 ; ZVFH-NEXT: vmflt.vf v0, v16, fa5 ; ZVFH-NEXT: fsrmi a0, 1 ; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t @@ -1210,41 +1222,141 @@ define @vp_roundtozero_nxv16f32_unmasked( @llvm.vp.roundtozero.nxv1f64(, , i32) define @vp_roundtozero_nxv1f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI34_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv1f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv1f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv1f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv1f64( %va, %m, i32 %evl) ret %v } define @vp_roundtozero_nxv1f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv1f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI35_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfabs.v v9, v8 -; CHECK-NEXT: vmflt.vf v0, v9, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFH-NEXT: vfabs.v v9, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv1f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFH-NEXT: vfabs.v v9, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv1f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv1f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v9, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv1f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1252,43 +1364,149 @@ define @vp_roundtozero_nxv1f64_unmasked( @llvm.vp.roundtozero.nxv2f64(, , i32) define @vp_roundtozero_nxv2f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vfabs.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v10, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v10 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv2f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v10, v0 +; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v10 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv2f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv2f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv2f64( %va, %m, i32 %evl) ret %v } define @vp_roundtozero_nxv2f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv2f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI37_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma -; CHECK-NEXT: vfabs.v v10, v8 -; CHECK-NEXT: vmflt.vf v0, v10, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFH-NEXT: vfabs.v v10, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv2f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFH-NEXT: vfabs.v v10, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv2f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv2f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v10, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1296,43 +1514,149 @@ define @vp_roundtozero_nxv2f64_unmasked( @llvm.vp.roundtozero.nxv4f64(, , i32) define @vp_roundtozero_nxv4f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0) -; CHECK-NEXT: vfabs.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v12 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v12, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v12 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv4f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v12, v0 +; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v12 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv4f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv4f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv4f64( %va, %m, i32 %evl) ret %v } define @vp_roundtozero_nxv4f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv4f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI39_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma -; CHECK-NEXT: vfabs.v v12, v8 -; CHECK-NEXT: vmflt.vf v0, v12, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFH-NEXT: vfabs.v v12, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv4f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFH-NEXT: vfabs.v v12, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv4f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv4f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v12, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv4f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1340,43 +1664,149 @@ define @vp_roundtozero_nxv4f64_unmasked( @llvm.vp.roundtozero.nxv7f64(, , i32) define @vp_roundtozero_nxv7f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv7f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv7f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv7f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv7f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv7f64( %va, %m, i32 %evl) ret %v } define @vp_roundtozero_nxv7f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv7f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI41_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv7f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv7f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv7f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv7f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1384,43 +1814,149 @@ define @vp_roundtozero_nxv7f64_unmasked( @llvm.vp.roundtozero.nxv8f64(, , i32) define @vp_roundtozero_nxv8f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v16, v0 +; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v16 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv8f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v16, v0 +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v16 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv8f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0) +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv8f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0 +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv8f64( %va, %m, i32 %evl) ret %v } define @vp_roundtozero_nxv8f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv8f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI43_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v16, v8 -; CHECK-NEXT: vmflt.vf v0, v16, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v16, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv8f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v16, v8 +; RV64ZVFH-NEXT: li a0, 1075 +; RV64ZVFH-NEXT: slli a0, a0, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a0 +; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv8f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0) +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1) +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv8f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v16, v8 +; RV64ZVFHMIN-NEXT: li a0, 1075 +; RV64ZVFHMIN-NEXT: slli a0, a0, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv8f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -1429,87 +1965,325 @@ define @vp_roundtozero_nxv8f64_unmasked( @llvm.vp.roundtozero.nxv16f64(, , i32) define @vp_roundtozero_nxv16f64( %va, %m, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI44_0) -; CHECK-NEXT: srli a3, a1, 3 -; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2) -; CHECK-NEXT: sub a2, a0, a1 -; CHECK-NEXT: vslidedown.vx v6, v0, a3 -; CHECK-NEXT: sltu a3, a0, a2 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a2, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a2, 1 -; CHECK-NEXT: vmv1r.v v0, v6 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB44_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB44_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFH-NEXT: vmv1r.v v7, v0 +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFH-NEXT: srli a3, a1, 3 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFH-NEXT: sub a2, a0, a1 +; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFH-NEXT: sltu a3, a0, a2 +; RV32ZVFH-NEXT: addi a3, a3, -1 +; RV32ZVFH-NEXT: and a2, a3, a2 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a2, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v6 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB44_2: +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vmv1r.v v0, v7 +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv16f64: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFH-NEXT: vmv1r.v v7, v0 +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: srli a3, a1, 3 +; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a2, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v6 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB44_2: +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vmv1r.v v0, v7 +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv16f64: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0) +; RV32ZVFHMIN-NEXT: srli a3, a1, 3 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2) +; RV32ZVFHMIN-NEXT: sub a2, a0, a1 +; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV32ZVFHMIN-NEXT: sltu a3, a0, a2 +; RV32ZVFHMIN-NEXT: addi a3, a3, -1 +; RV32ZVFHMIN-NEXT: and a2, a3, a2 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a2, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB44_2: +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv16f64: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0 +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: srli a3, a1, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a2, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB44_2: +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7 +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv16f64( %va, %m, i32 %evl) ret %v } define @vp_roundtozero_nxv16f64_unmasked( %va, i32 zeroext %evl) { -; CHECK-LABEL: vp_roundtozero_nxv16f64_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: lui a2, %hi(.LCPI45_0) -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2) -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v16 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a2, 1 -; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t -; CHECK-NEXT: fsrm a2 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB45_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB45_2: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vfabs.v v24, v8 -; CHECK-NEXT: vmflt.vf v0, v24, fa5 -; CHECK-NEXT: fsrmi a0, 1 -; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t -; CHECK-NEXT: fsrm a0 -; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu -; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t -; CHECK-NEXT: ret +; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64_unmasked: +; RV32ZVFH: # %bb.0: +; RV32ZVFH-NEXT: csrr a1, vlenb +; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFH-NEXT: sub a3, a0, a1 +; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFH-NEXT: sltu a2, a0, a3 +; RV32ZVFH-NEXT: addi a2, a2, -1 +; RV32ZVFH-NEXT: and a2, a2, a3 +; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v16 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a2, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFH-NEXT: fsrm a2 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFH-NEXT: # %bb.1: +; RV32ZVFH-NEXT: mv a0, a1 +; RV32ZVFH-NEXT: .LBB45_2: +; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFH-NEXT: vfabs.v v24, v8 +; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFH-NEXT: fsrmi a0, 1 +; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFH-NEXT: fsrm a0 +; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFH-NEXT: ret +; +; RV64ZVFH-LABEL: vp_roundtozero_nxv16f64_unmasked: +; RV64ZVFH: # %bb.0: +; RV64ZVFH-NEXT: csrr a1, vlenb +; RV64ZVFH-NEXT: li a2, 1075 +; RV64ZVFH-NEXT: sub a3, a0, a1 +; RV64ZVFH-NEXT: slli a2, a2, 52 +; RV64ZVFH-NEXT: fmv.d.x fa5, a2 +; RV64ZVFH-NEXT: sltu a2, a0, a3 +; RV64ZVFH-NEXT: addi a2, a2, -1 +; RV64ZVFH-NEXT: and a2, a2, a3 +; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v16 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a2, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFH-NEXT: fsrm a2 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFH-NEXT: # %bb.1: +; RV64ZVFH-NEXT: mv a0, a1 +; RV64ZVFH-NEXT: .LBB45_2: +; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFH-NEXT: vfabs.v v24, v8 +; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFH-NEXT: fsrmi a0, 1 +; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFH-NEXT: fsrm a0 +; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv16f64_unmasked: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: csrr a1, vlenb +; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0) +; RV32ZVFHMIN-NEXT: sub a3, a0, a1 +; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2) +; RV32ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV32ZVFHMIN-NEXT: addi a2, a2, -1 +; RV32ZVFHMIN-NEXT: and a2, a2, a3 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a2, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV32ZVFHMIN-NEXT: fsrm a2 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV32ZVFHMIN-NEXT: # %bb.1: +; RV32ZVFHMIN-NEXT: mv a0, a1 +; RV32ZVFHMIN-NEXT: .LBB45_2: +; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV32ZVFHMIN-NEXT: fsrmi a0, 1 +; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV32ZVFHMIN-NEXT: fsrm a0 +; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv16f64_unmasked: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: csrr a1, vlenb +; RV64ZVFHMIN-NEXT: li a2, 1075 +; RV64ZVFHMIN-NEXT: sub a3, a0, a1 +; RV64ZVFHMIN-NEXT: slli a2, a2, 52 +; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2 +; RV64ZVFHMIN-NEXT: sltu a2, a0, a3 +; RV64ZVFHMIN-NEXT: addi a2, a2, -1 +; RV64ZVFHMIN-NEXT: and a2, a2, a3 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v16 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a2, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t +; RV64ZVFHMIN-NEXT: fsrm a2 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t +; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2 +; RV64ZVFHMIN-NEXT: # %bb.1: +; RV64ZVFHMIN-NEXT: mv a0, a1 +; RV64ZVFHMIN-NEXT: .LBB45_2: +; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64ZVFHMIN-NEXT: vfabs.v v24, v8 +; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5 +; RV64ZVFHMIN-NEXT: fsrmi a0, 1 +; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t +; RV64ZVFHMIN-NEXT: fsrm a0 +; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t +; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t +; RV64ZVFHMIN-NEXT: ret %v = call @llvm.vp.roundtozero.nxv16f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll index 02825b2bda484..19a184148c0b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -6018,3 +6018,39 @@ vector.latch: ; preds = %for.body419 for.cond.cleanup: ; preds = %vector.latch ret void } + +;; This is exactly like sink_add_splat except that the splat has operands +;; which haven't been converted to undef. +define void @sink_non_canonical_splat(ptr nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_non_canonical_splat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lui a2, 1 +; CHECK-NEXT: add a2, a0, a2 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: .LBB131_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vadd.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bne a0, a2, .LBB131_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, ptr %a, i64 %index + %wide.load = load <4 x i32>, ptr %0, align 4 + %1 = add <4 x i32> %wide.load, %broadcast.splat + store <4 x i32> %1, ptr %0, align 4 + %index.next = add nuw i64 %index, 4 + %2 = icmp eq i64 %index.next, 1024 + br i1 %2, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll index 818b882a402ac..bb121416ddec3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll @@ -249,42 +249,32 @@ define @vfdiv_vf_nxv32bf16( %va, bf ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: fmv.x.h a0, fa0 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v16 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v24 +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfdiv.vv v24, v16, v0 +; CHECK-NEXT: vfdiv.vv v24, v24, v0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v12 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfdiv.vv v16, v0, v8 +; CHECK-NEXT: vfdiv.vv v16, v0, v16 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v24 ; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 @@ -617,42 +607,32 @@ define @vfdiv_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: sub sp, sp, a0 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; ZVFHMIN-NEXT: fmv.x.h a0, fa0 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8 ; ZVFHMIN-NEXT: addi a1, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v16, a0 +; ZVFHMIN-NEXT: vmv.v.x v24, a0 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28 ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfdiv.vv v24, v16, v0 +; ZVFHMIN-NEXT: vfdiv.vv v24, v24, v0 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfdiv.vv v16, v0, v8 +; ZVFHMIN-NEXT: vfdiv.vv v16, v0, v16 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll index 03e6e6b7a624d..7e580d1057525 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+m -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s declare @llvm.vp.fma.nxv1f64(, , , , i32) declare @llvm.vp.fneg.nxv1f64(, , i32) @@ -24,17 +24,30 @@ define @test1( %a, (fmul x, c1+c2) define @test2( %a, %m, i32 zeroext %evl) { -; CHECK-LABEL: test2: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI1_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: lui a1, %hi(.LCPI1_1) -; CHECK-NEXT: fld fa4, %lo(.LCPI1_1)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v9, fa5 -; CHECK-NEXT: vfadd.vf v9, v9, fa4, v0.t -; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t -; CHECK-NEXT: ret +; RV32-LABEL: test2: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI1_0) +; RV32-NEXT: fld fa5, %lo(.LCPI1_0)(a1) +; RV32-NEXT: lui a1, %hi(.LCPI1_1) +; RV32-NEXT: fld fa4, %lo(.LCPI1_1)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vfmv.v.f v9, fa5 +; RV32-NEXT: vfadd.vf v9, v9, fa4, v0.t +; RV32-NEXT: vfmul.vv v8, v8, v9, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: test2: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1025 +; RV64-NEXT: slli a1, a1, 52 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v9, a1 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: slli a0, a0, 62 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vfadd.vf v9, v9, fa5, v0.t +; RV64-NEXT: vfmul.vv v8, v8, v9, v0.t +; RV64-NEXT: ret %t = call @llvm.vp.fmul.nxv1f64( %a, splat (double 2.0), %m, i32 %evl) %v = call fast @llvm.vp.fma.nxv1f64( %a, splat (double 4.0), %t, %m, i32 %evl) ret %v @@ -42,18 +55,32 @@ define @test2( %a, ; (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) define @test3( %a, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: test3: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI2_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI2_0)(a1) -; CHECK-NEXT: lui a1, %hi(.LCPI2_1) -; CHECK-NEXT: fld fa4, %lo(.LCPI2_1)(a1) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v10, fa5 -; CHECK-NEXT: vfmul.vf v10, v10, fa4, v0.t -; CHECK-NEXT: vfmadd.vv v10, v8, v9, v0.t -; CHECK-NEXT: vmv.v.v v8, v10 -; CHECK-NEXT: ret +; RV32-LABEL: test3: +; RV32: # %bb.0: +; RV32-NEXT: lui a1, %hi(.LCPI2_0) +; RV32-NEXT: fld fa5, %lo(.LCPI2_0)(a1) +; RV32-NEXT: lui a1, %hi(.LCPI2_1) +; RV32-NEXT: fld fa4, %lo(.LCPI2_1)(a1) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vfmv.v.f v10, fa5 +; RV32-NEXT: vfmul.vf v10, v10, fa4, v0.t +; RV32-NEXT: vfmadd.vv v10, v8, v9, v0.t +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: ret +; +; RV64-LABEL: test3: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1025 +; RV64-NEXT: slli a1, a1, 52 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v10, a1 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: slli a0, a0, 62 +; RV64-NEXT: fmv.d.x fa5, a0 +; RV64-NEXT: vfmul.vf v10, v10, fa5, v0.t +; RV64-NEXT: vfmadd.vv v10, v8, v9, v0.t +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret %t = call @llvm.vp.fmul.nxv1f64( %a, splat (double 2.0), %m, i32 %evl) %v = call fast @llvm.vp.fma.nxv1f64( %t, splat (double 4.0), %b, %m, i32 %evl) ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll index 0a11501905b81..728fa07a7d4e5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -820,6 +820,7 @@ define @vfma_vf_nxv32bf16( %va, bfl ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: fmv.x.h a2, fa0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a1, a3, 1 @@ -835,41 +836,33 @@ define @vfma_vf_nxv32bf16( %va, bfl ; CHECK-NEXT: addi a4, a4, 16 ; CHECK-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v20, v0.t ; CHECK-NEXT: addi a4, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; CHECK-NEXT: csrr a4, vlenb -; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 ; CHECK-NEXT: add a4, sp, a4 ; CHECK-NEXT: addi a4, a4, 16 -; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; CHECK-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t ; CHECK-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: slli a2, a2, 4 ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: add a2, a2, a4 -; CHECK-NEXT: add a2, sp, a2 -; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v28, v0.t +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t ; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vfmadd.vv v24, v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v4, v8, v0.t +; CHECK-NEXT: vfncvtbf16.f.f.w v4, v24, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB32_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 @@ -879,27 +872,27 @@ define @vfma_vf_nxv32bf16( %va, bfl ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: slli a0, a0, 1 -; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload @@ -1061,81 +1054,87 @@ define @vfma_vf_nxv32bf16_unmasked( ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: slli a1, a1, 2 +; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: fmv.x.h a2, fa0 ; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma -; CHECK-NEXT: vmset.m v24 +; CHECK-NEXT: vmset.m v8 ; CHECK-NEXT: slli a1, a3, 1 ; CHECK-NEXT: srli a3, a3, 2 ; CHECK-NEXT: sub a4, a0, a1 ; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v24, a3 +; CHECK-NEXT: vslidedown.vx v0, v8, a3 ; CHECK-NEXT: sltu a3, a0, a4 ; CHECK-NEXT: addi a3, a3, -1 ; CHECK-NEXT: and a3, a3, a4 ; CHECK-NEXT: csrr a4, vlenb -; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: slli a4, a4, 5 ; CHECK-NEXT: add a4, sp, a4 ; CHECK-NEXT: addi a4, a4, 16 ; CHECK-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: addi a4, sp, 16 +; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v20, v0.t +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 ; CHECK-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v28, v0.t ; CHECK-NEXT: csrr a4, vlenb ; CHECK-NEXT: slli a4, a4, 3 ; CHECK-NEXT: add a4, sp, a4 ; CHECK-NEXT: addi a4, a4, 16 ; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t ; CHECK-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t ; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: slli a2, a2, 4 ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: mv a4, a2 -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: add a2, a2, a4 ; CHECK-NEXT: add a2, sp, a2 ; CHECK-NEXT: addi a2, a2, 16 -; CHECK-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v28, v0.t -; CHECK-NEXT: addi a2, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: vfmadd.vv v24, v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v20, v8, v0.t +; CHECK-NEXT: vfncvtbf16.f.f.w v12, v24, v0.t ; CHECK-NEXT: bltu a0, a1, .LBB34_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: .LBB34_2: ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: slli a1, a1, 5 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0 +; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: mv a1, a0 @@ -1143,17 +1142,25 @@ define @vfma_vf_nxv32bf16_unmasked( ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 +; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v24 ; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfmadd.vv v0, v24, v8 +; CHECK-NEXT: vfmadd.vv v24, v0, v16 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v16, v0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vfncvtbf16.f.f.w v8, v24 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2287,6 +2294,7 @@ define @vfma_vf_nxv32f16( %va, half %b, ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb ; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v3, v0 +; ZVFHMIN-NEXT: vmv8r.v v24, v8 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: csrr a3, vlenb ; ZVFHMIN-NEXT: slli a1, a3, 1 @@ -2302,41 +2310,33 @@ define @vfma_vf_nxv32f16( %va, half %b, ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t ; ZVFHMIN-NEXT: addi a4, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a4, vlenb -; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v8, a2 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 -; ZVFHMIN-NEXT: add a2, sp, a2 -; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB68_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 @@ -2346,27 +2346,27 @@ define @vfma_vf_nxv32f16( %va, half %b, ; ZVFHMIN-NEXT: slli a1, a1, 3 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 1 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: mv a1, a0 -; ZVFHMIN-NEXT: slli a0, a0, 1 -; ZVFHMIN-NEXT: add a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload @@ -2540,81 +2540,87 @@ define @vfma_vf_nxv32f16_unmasked( %va, ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: sub sp, sp, a1 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmv8r.v v24, v8 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: csrr a3, vlenb -; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma -; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: vmset.m v8 ; ZVFHMIN-NEXT: slli a1, a3, 1 ; ZVFHMIN-NEXT: srli a3, a3, 2 ; ZVFHMIN-NEXT: sub a4, a0, a1 ; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a3 +; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3 ; ZVFHMIN-NEXT: sltu a3, a0, a4 ; ZVFHMIN-NEXT: addi a3, a3, -1 ; ZVFHMIN-NEXT: and a3, a3, a4 ; ZVFHMIN-NEXT: csrr a4, vlenb -; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: slli a4, a4, 5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t -; ZVFHMIN-NEXT: addi a4, sp, 16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 3 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v8, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t -; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB70_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 ; ZVFHMIN-NEXT: .LBB70_2: ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: slli a1, a1, 5 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 -; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: mv a1, a0 @@ -2622,17 +2628,25 @@ define @vfma_vf_nxv32f16_unmasked( %va, ; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24 ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8 +; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0 -; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 @@ -8266,7 +8280,8 @@ define @vfmsub_vf_nxv32f16( %va, half % ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 ; ZVFHMIN-NEXT: csrr a3, vlenb -; ZVFHMIN-NEXT: vxor.vx v16, v16, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v24, v16, a1, v0.t +; ZVFHMIN-NEXT: vmv8r.v v16, v8 ; ZVFHMIN-NEXT: slli a1, a3, 1 ; ZVFHMIN-NEXT: srli a3, a3, 2 ; ZVFHMIN-NEXT: sub a4, a0, a1 @@ -8277,45 +8292,37 @@ define @vfmsub_vf_nxv32f16( %va, half % ; ZVFHMIN-NEXT: and a3, a3, a4 ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t ; ZVFHMIN-NEXT: addi a4, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a4, vlenb -; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: slli a4, a4, 3 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v8, a2 -; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 -; ZVFHMIN-NEXT: add a2, sp, a2 -; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vmv.v.x v16, a2 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t ; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB282_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 @@ -8323,24 +8330,22 @@ define @vfmsub_vf_nxv32f16( %va, half % ; ZVFHMIN-NEXT: vmv1r.v v0, v3 ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 1 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24, v0.t ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: mv a1, a0 @@ -8348,12 +8353,20 @@ define @vfmsub_vf_nxv32f16( %va, half % ; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 1 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload @@ -8524,13 +8537,17 @@ define @vfmsub_vf_nxv32f16_unmasked( %v ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: sub sp, sp, a1 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmv8r.v v24, v8 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma -; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: vmset.m v8 ; ZVFHMIN-NEXT: csrr a3, vlenb ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 @@ -8538,88 +8555,98 @@ define @vfmsub_vf_nxv32f16_unmasked( %v ; ZVFHMIN-NEXT: srli a3, a3, 2 ; ZVFHMIN-NEXT: sub a4, a0, a1 ; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a3 +; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3 ; ZVFHMIN-NEXT: sltu a3, a0, a4 ; ZVFHMIN-NEXT: addi a3, a3, -1 ; ZVFHMIN-NEXT: and a3, a3, a4 ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t -; ZVFHMIN-NEXT: addi a4, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 4 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v8, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t -; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB284_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 ; ZVFHMIN-NEXT: .LBB284_2: ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 1 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24 -; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: mv a1, a0 -; ZVFHMIN-NEXT: slli a0, a0, 1 -; ZVFHMIN-NEXT: add a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 5 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24 ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8 +; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0 -; ZVFHMIN-NEXT: vmv8r.v v8, v16 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 @@ -9534,100 +9561,116 @@ define @vfnmadd_vf_nxv32f16_unmasked( % ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: sub sp, sp, a1 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 ; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma -; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: vmset.m v7 ; ZVFHMIN-NEXT: csrr a3, vlenb ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma -; ZVFHMIN-NEXT: vxor.vx v8, v8, a1 -; ZVFHMIN-NEXT: vxor.vx v16, v16, a1 +; ZVFHMIN-NEXT: vxor.vx v24, v8, a1 +; ZVFHMIN-NEXT: vxor.vx v8, v16, a1 ; ZVFHMIN-NEXT: slli a1, a3, 1 ; ZVFHMIN-NEXT: srli a3, a3, 2 ; ZVFHMIN-NEXT: sub a4, a0, a1 ; ZVFHMIN-NEXT: vsetvli a5, zero, e8, mf2, ta, ma -; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a3 +; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3 ; ZVFHMIN-NEXT: sltu a3, a0, a4 ; ZVFHMIN-NEXT: addi a3, a3, -1 ; ZVFHMIN-NEXT: and a3, a3, a4 ; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 4 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t ; ZVFHMIN-NEXT: csrr a4, vlenb -; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: slli a4, a4, 5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28, v0.t ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 3 -; ZVFHMIN-NEXT: mv a5, a4 -; ZVFHMIN-NEXT: slli a4, a4, 1 -; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v8, a2 ; ZVFHMIN-NEXT: addi a2, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 ; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a2, a2, 3 +; ZVFHMIN-NEXT: add a2, sp, a2 +; ZVFHMIN-NEXT: addi a2, a2, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB292_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 ; ZVFHMIN-NEXT: .LBB292_2: ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 1 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: mv a1, a0 -; ZVFHMIN-NEXT: slli a0, a0, 1 -; ZVFHMIN-NEXT: add a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 5 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24 ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16 +; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 @@ -9673,44 +9716,39 @@ define @vfnmadd_vf_nxv32f16_unmasked_commute( @vfnmadd_vf_nxv32f16_unmasked_commute( @vfnmadd_vf_nxv32f16_unmasked_commute( @vfnmsub_vf_nxv32f16( %va, half ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v3, v0 +; ZVFHMIN-NEXT: vmv8r.v v24, v16 ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 ; ZVFHMIN-NEXT: csrr a3, vlenb -; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v8, a1, v0.t ; ZVFHMIN-NEXT: slli a1, a3, 1 ; ZVFHMIN-NEXT: srli a3, a3, 2 ; ZVFHMIN-NEXT: sub a4, a0, a1 @@ -10804,43 +10846,35 @@ define @vfnmsub_vf_nxv32f16( %va, half ; ZVFHMIN-NEXT: slli a4, a4, 3 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t ; ZVFHMIN-NEXT: addi a4, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a4, vlenb -; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v16, a2 -; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 -; ZVFHMIN-NEXT: add a2, sp, a2 -; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vmv.v.x v8, a2 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB302_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 @@ -10850,40 +10884,40 @@ define @vfnmsub_vf_nxv32f16( %va, half ; ZVFHMIN-NEXT: slli a1, a1, 3 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 1 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16, v0.t ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: mv a1, a0 -; ZVFHMIN-NEXT: slli a0, a0, 1 -; ZVFHMIN-NEXT: add a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t ; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v24, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16, v0.t ; ZVFHMIN-NEXT: vmv.v.v v16, v8 ; ZVFHMIN-NEXT: vmv4r.v v12, v4 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma @@ -10919,10 +10953,18 @@ define @vfnmsub_vf_nxv32f16_commute( %v ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v3, v0 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 1 +; ZVFHMIN-NEXT: add a1, a1, a2 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 ; ZVFHMIN-NEXT: csrr a3, vlenb -; ZVFHMIN-NEXT: vxor.vx v8, v8, a1, v0.t +; ZVFHMIN-NEXT: vxor.vx v16, v8, a1, v0.t ; ZVFHMIN-NEXT: slli a1, a3, 1 ; ZVFHMIN-NEXT: srli a3, a3, 2 ; ZVFHMIN-NEXT: sub a4, a0, a1 @@ -10935,41 +10977,33 @@ define @vfnmsub_vf_nxv32f16_commute( %v ; ZVFHMIN-NEXT: slli a4, a4, 3 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20, v0.t ; ZVFHMIN-NEXT: csrr a4, vlenb -; ZVFHMIN-NEXT: slli a4, a4, 4 +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t +; ZVFHMIN-NEXT: vl8r.v v8, (a4) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t ; ZVFHMIN-NEXT: addi a4, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a2 ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 -; ZVFHMIN-NEXT: add a2, sp, a2 -; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t ; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB303_2 @@ -10987,7 +11021,10 @@ define @vfnmsub_vf_nxv32f16_commute( %v ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 1 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload @@ -10998,10 +11035,7 @@ define @vfnmsub_vf_nxv32f16_commute( %v ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: mv a1, a0 -; ZVFHMIN-NEXT: slli a0, a0, 1 -; ZVFHMIN-NEXT: add a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload @@ -11044,9 +11078,12 @@ define @vfnmsub_vf_nxv32f16_unmasked( % ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 5 +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 2 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: sub sp, sp, a1 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a1, 8 ; ZVFHMIN-NEXT: vsetvli a3, zero, e8, m4, ta, ma @@ -11064,81 +11101,92 @@ define @vfnmsub_vf_nxv32f16_unmasked( % ; ZVFHMIN-NEXT: and a3, a3, a4 ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: mv a5, a4 +; ZVFHMIN-NEXT: slli a4, a4, 1 +; ZVFHMIN-NEXT: add a4, a4, a5 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t -; ZVFHMIN-NEXT: addi a4, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a4, vlenb ; ZVFHMIN-NEXT: slli a4, a4, 4 ; ZVFHMIN-NEXT: add a4, sp, a4 ; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a4) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 5 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t +; ZVFHMIN-NEXT: csrr a4, vlenb +; ZVFHMIN-NEXT: slli a4, a4, 3 +; ZVFHMIN-NEXT: add a4, sp, a4 +; ZVFHMIN-NEXT: addi a4, a4, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v16, a2 +; ZVFHMIN-NEXT: vmv.v.x v8, a2 +; ZVFHMIN-NEXT: addi a2, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t ; ZVFHMIN-NEXT: csrr a2, vlenb -; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 +; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: slli a2, a2, 3 -; ZVFHMIN-NEXT: mv a4, a2 -; ZVFHMIN-NEXT: slli a2, a2, 1 -; ZVFHMIN-NEXT: add a2, a2, a4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28, v0.t -; ZVFHMIN-NEXT: addi a2, sp, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a2) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v16, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8, v0.t +; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16, v0.t ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24, v0.t ; ZVFHMIN-NEXT: bltu a0, a1, .LBB304_2 ; ZVFHMIN-NEXT: # %bb.1: ; ZVFHMIN-NEXT: mv a0, a1 ; ZVFHMIN-NEXT: .LBB304_2: ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: mv a2, a1 +; ZVFHMIN-NEXT: slli a1, a1, 1 +; ZVFHMIN-NEXT: add a1, a1, a2 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24 -; ZVFHMIN-NEXT: addi a0, sp, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 4 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 3 -; ZVFHMIN-NEXT: mv a1, a0 -; ZVFHMIN-NEXT: slli a0, a0, 1 -; ZVFHMIN-NEXT: add a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 5 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16 +; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24 ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24 +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v0 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0 +; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: mv a1, a0 +; ZVFHMIN-NEXT: slli a0, a0, 2 +; ZVFHMIN-NEXT: add a0, a0, a1 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 @@ -11182,42 +11230,37 @@ define @vfnmsub_vf_nxv32f16_unmasked_commute( @vfnmsub_vf_nxv32f16_unmasked_commute( @vfnmsub_vf_nxv32f16_unmasked_commute( @vfnmsub_vf_nxv32f16_neg_splat( ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv1r.v v3, v0 ; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: add a1, sp, a1 +; ZVFHMIN-NEXT: addi a1, a1, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: slli a1, a1, 3 ; ZVFHMIN-NEXT: mv a2, a1 ; ZVFHMIN-NEXT: slli a1, a1, 1 @@ -11298,10 +11349,10 @@ define @vfnmsub_vf_nxv32f16_neg_splat( ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: lui a2, 8 ; ZVFHMIN-NEXT: csrr a3, vlenb -; ZVFHMIN-NEXT: vmv.v.x v8, a1 +; ZVFHMIN-NEXT: vmv.v.x v16, a1 ; ZVFHMIN-NEXT: slli a1, a3, 1 ; ZVFHMIN-NEXT: srli a3, a3, 2 -; ZVFHMIN-NEXT: vxor.vx v8, v8, a2, v0.t +; ZVFHMIN-NEXT: vxor.vx v8, v16, a2, v0.t ; ZVFHMIN-NEXT: sub a2, a0, a1 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a3 @@ -11315,12 +11366,11 @@ define @vfnmsub_vf_nxv32f16_neg_splat( ; ZVFHMIN-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t -; ZVFHMIN-NEXT: vmv8r.v v8, v16 ; ZVFHMIN-NEXT: csrr a2, vlenb ; ZVFHMIN-NEXT: slli a2, a2, 4 ; ZVFHMIN-NEXT: add a2, sp, a2 ; ZVFHMIN-NEXT: addi a2, a2, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vl8r.v v8, (a2) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12, v0.t ; ZVFHMIN-NEXT: addi a2, sp, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill @@ -11563,21 +11613,22 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked( @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( @vfmadd_vf_nxv32bf16( %va, < ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv8r.v v24, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-NEXT: fmv.x.h a0, fa0 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfmadd.vv v24, v16, v0 +; CHECK-NEXT: vfmadd.vv v24, v0, v16 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v4 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v4 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v16, v8, v0 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v24 ; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 ; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: li a1, 24 -; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: slli a0, a0, 5 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 @@ -759,65 +771,77 @@ define @vfmadd_vf_nxv32f16( %va, @vfmadd_vf_nxv32bf16( %va, < ; ZVFH-NEXT: addi sp, sp, -16 ; ZVFH-NEXT: .cfi_def_cfa_offset 16 ; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 3 -; ZVFH-NEXT: mv a1, a0 -; ZVFH-NEXT: slli a0, a0, 2 -; ZVFH-NEXT: add a0, a0, a1 +; ZVFH-NEXT: slli a0, a0, 5 ; ZVFH-NEXT: sub sp, sp, a0 -; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb -; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vmv8r.v v24, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 4 +; ZVFH-NEXT: add a0, sp, a0 +; ZVFH-NEXT: addi a0, a0, 16 ; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vmv8r.v v16, v8 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; ZVFH-NEXT: fmv.x.h a0, fa0 -; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16 ; ZVFH-NEXT: csrr a1, vlenb -; ZVFH-NEXT: slli a1, a1, 4 +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: mv a2, a1 +; ZVFH-NEXT: slli a1, a1, 1 +; ZVFH-NEXT: add a1, a1, a2 ; ZVFH-NEXT: add a1, sp, a1 ; ZVFH-NEXT: addi a1, a1, 16 -; ZVFH-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill -; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16 +; ZVFH-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24 ; ZVFH-NEXT: csrr a1, vlenb ; ZVFH-NEXT: slli a1, a1, 3 ; ZVFH-NEXT: add a1, sp, a1 ; ZVFH-NEXT: addi a1, a1, 16 -; ZVFH-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; ZVFH-NEXT: vmv.v.x v24, a0 -; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 3 -; ZVFH-NEXT: mv a1, a0 -; ZVFH-NEXT: slli a0, a0, 1 -; ZVFH-NEXT: add a0, a0, a1 -; ZVFH-NEXT: add a0, sp, a0 -; ZVFH-NEXT: addi a0, a0, 16 -; ZVFH-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vmv.v.x v8, a0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 ; ZVFH-NEXT: csrr a0, vlenb ; ZVFH-NEXT: slli a0, a0, 3 ; ZVFH-NEXT: mv a1, a0 @@ -433,40 +434,17 @@ define @vfmadd_vf_nxv32bf16( %va, < ; ZVFH-NEXT: add a0, sp, a0 ; ZVFH-NEXT: addi a0, a0, 16 ; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v0 -; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 5 -; ZVFH-NEXT: add a0, sp, a0 -; ZVFH-NEXT: addi a0, a0, 16 -; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 4 -; ZVFH-NEXT: add a0, sp, a0 -; ZVFH-NEXT: addi a0, a0, 16 -; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; ZVFH-NEXT: csrr a0, vlenb ; ZVFH-NEXT: slli a0, a0, 3 ; ZVFH-NEXT: add a0, sp, a0 ; ZVFH-NEXT: addi a0, a0, 16 -; ZVFH-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload -; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 5 -; ZVFH-NEXT: add a0, sp, a0 -; ZVFH-NEXT: addi a0, a0, 16 ; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFH-NEXT: vfmadd.vv v16, v24, v0 -; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 5 -; ZVFH-NEXT: add a0, sp, a0 -; ZVFH-NEXT: addi a0, a0, 16 -; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12 +; ZVFH-NEXT: vfmadd.vv v24, v16, v0 ; ZVFH-NEXT: addi a0, sp, 16 ; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v20 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v20 ; ZVFH-NEXT: csrr a0, vlenb ; ZVFH-NEXT: slli a0, a0, 3 ; ZVFH-NEXT: mv a1, a0 @@ -474,23 +452,29 @@ define @vfmadd_vf_nxv32bf16( %va, < ; ZVFH-NEXT: add a0, a0, a1 ; ZVFH-NEXT: add a0, sp, a0 ; ZVFH-NEXT: addi a0, a0, 16 +; ZVFH-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 4 +; ZVFH-NEXT: add a0, sp, a0 +; ZVFH-NEXT: addi a0, a0, 16 ; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v4 -; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFH-NEXT: vfmadd.vv v16, v8, v24 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v12 ; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 5 +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: mv a1, a0 +; ZVFH-NEXT: slli a0, a0, 1 +; ZVFH-NEXT: add a0, a0, a1 ; ZVFH-NEXT: add a0, sp, a0 ; ZVFH-NEXT: addi a0, a0, 16 -; ZVFH-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfmadd.vv v0, v16, v8 ; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v24 -; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v0 ; ZVFH-NEXT: csrr a0, vlenb -; ZVFH-NEXT: slli a0, a0, 3 -; ZVFH-NEXT: mv a1, a0 -; ZVFH-NEXT: slli a0, a0, 2 -; ZVFH-NEXT: add a0, a0, a1 +; ZVFH-NEXT: slli a0, a0, 5 ; ZVFH-NEXT: add sp, sp, a0 ; ZVFH-NEXT: .cfi_def_cfa sp, 16 ; ZVFH-NEXT: addi sp, sp, 16 @@ -502,94 +486,80 @@ define @vfmadd_vf_nxv32bf16( %va, < ; ZVFHMIN-NEXT: addi sp, sp, -16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: li a1, 40 -; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 5 ; ZVFHMIN-NEXT: sub sp, sp, a0 -; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb -; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv8r.v v24, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vmv8r.v v16, v8 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: fmv.x.h a0, fa0 -; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: li a2, 24 +; ZVFHMIN-NEXT: mul a1, a1, a2 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16 +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24 ; ZVFHMIN-NEXT: csrr a1, vlenb ; ZVFHMIN-NEXT: slli a1, a1, 3 ; ZVFHMIN-NEXT: add a1, sp, a1 ; ZVFHMIN-NEXT: addi a1, a1, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v24, a0 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: li a1, 24 -; ZVFHMIN-NEXT: mul a0, a0, a1 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vmv.v.x v8, a0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: li a1, 24 ; ZVFHMIN-NEXT: mul a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v0 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 4 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: slli a0, a0, 3 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0 -; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 -; ZVFHMIN-NEXT: add a0, sp, a0 -; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12 +; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v0 ; ZVFHMIN-NEXT: addi a0, sp, 16 ; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v20 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v20 ; ZVFHMIN-NEXT: csrr a0, vlenb ; ZVFHMIN-NEXT: li a1, 24 ; ZVFHMIN-NEXT: mul a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vs8r.v v0, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 ; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload ; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v4 -; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v12 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: slli a0, a0, 5 +; ZVFHMIN-NEXT: li a1, 24 +; ZVFHMIN-NEXT: mul a0, a0, a1 ; ZVFHMIN-NEXT: add a0, sp, a0 ; ZVFHMIN-NEXT: addi a0, a0, 16 -; ZVFHMIN-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v8 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v24 -; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v0 ; ZVFHMIN-NEXT: csrr a0, vlenb -; ZVFHMIN-NEXT: li a1, 40 -; ZVFHMIN-NEXT: mul a0, a0, a1 +; ZVFHMIN-NEXT: slli a0, a0, 5 ; ZVFHMIN-NEXT: add sp, sp, a0 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 ; ZVFHMIN-NEXT: addi sp, sp, 16 @@ -973,94 +943,80 @@ define @vfmadd_vf_nxv32f16( %va, @vfmsub_vf_nxv32f16( %va, @llvm.riscv.vmerge.nxv1i8.nxv1i8( , @@ -972,6 +972,22 @@ declare @llvm.riscv.vmerge.nxv1i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.i64( poison, @@ -991,6 +1007,22 @@ declare @llvm.riscv.vmerge.nxv2i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.i64( poison, @@ -1010,6 +1042,22 @@ declare @llvm.riscv.vmerge.nxv4i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.i64( poison, @@ -1029,6 +1077,22 @@ declare @llvm.riscv.vmerge.nxv8i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.i64( poison, diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll index e269b13137d44..93b12ad14d7e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll @@ -100,8 +100,9 @@ define half @vreduce_fminimum_nxv4f16( %val) { ; ZVFH-NEXT: vcpop.m a0, v9 ; ZVFH-NEXT: beqz a0, .LBB4_2 ; ZVFH-NEXT: # %bb.1: -; ZVFH-NEXT: lui a0, %hi(.LCPI4_0) -; ZVFH-NEXT: flh fa0, %lo(.LCPI4_0)(a0) +; ZVFH-NEXT: lui a0, 8 +; ZVFH-NEXT: addi a0, a0, -512 +; ZVFH-NEXT: fmv.h.x fa0, a0 ; ZVFH-NEXT: ret ; ZVFH-NEXT: .LBB4_2: ; ZVFH-NEXT: vfredmin.vs v8, v8, v8 @@ -138,8 +139,9 @@ define half @vreduce_fmaximum_nxv4f16( %val) { ; ZVFH-NEXT: vcpop.m a0, v9 ; ZVFH-NEXT: beqz a0, .LBB5_2 ; ZVFH-NEXT: # %bb.1: -; ZVFH-NEXT: lui a0, %hi(.LCPI5_0) -; ZVFH-NEXT: flh fa0, %lo(.LCPI5_0)(a0) +; ZVFH-NEXT: lui a0, 8 +; ZVFH-NEXT: addi a0, a0, -512 +; ZVFH-NEXT: fmv.h.x fa0, a0 ; ZVFH-NEXT: ret ; ZVFH-NEXT: .LBB5_2: ; ZVFH-NEXT: vfredmax.vs v8, v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll index 78aae96242fd3..861998a2ba51a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -998,13 +998,13 @@ declare half @llvm.vector.reduce.fmin.nxv10f16() define half @vreduce_fmin_nxv10f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv10f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI73_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI73_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma -; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: lui a1, 8 +; CHECK-NEXT: addi a1, a1, -512 +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.s.x v12, a1 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v12, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll index 8993bf8a767d8..7fb26fb6f6258 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll @@ -124,8 +124,9 @@ define half @vpreduce_fminimum_nxv4f16(half %start, %val, %val, @test4(i64 %avl, i8 zeroext %cond, @test6(i64 %avl, i8 zeroext %cond, This Inner Loop Header: Depth=1 +; CHECK-NEXT: addi s0, s0, 4 +; CHECK-NEXT: bltu a0, s0, .LBB0_7 +; CHECK-NEXT: # %bb.8: # %exit +; CHECK-NEXT: mv a0, s0 +; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; CHECK-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; CHECK-NEXT: .cfi_restore ra +; CHECK-NEXT: .cfi_restore s0 +; CHECK-NEXT: .cfi_restore s1 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret +entry: + %sel_1 = select i1 %arg_1, i32 %arg_2, i32 1 + %div = udiv i32 %arg_2, 7 + %cond_1 = icmp ugt i32 %div, %sel_1 + %sel_2 = select i1 %arg_1, i32 %div, i32 3 + %sel = select i1 %arg_1, i32 %sel_1, i32 %sel_2 + br label %body + +body: + %res = phi i32 [ %sel, %entry ], [ %add_loop, %body ] + %add_loop = add i32 4, %res + %cond_2 = icmp ugt i32 %add_loop, 3 + br i1 %cond_2, label %body, label %exit + +exit: + ret i32 %add_loop +} diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll index 19fade67afc3d..1eb47e4c0ede2 100644 --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -4,7 +4,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64IMXVTCONDOPS %s ; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV32IMZICOND %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV64IMZICOND %s -; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i16 @select_xor_1(i16 %A, i8 %cond) { @@ -44,10 +44,11 @@ define i16 @select_xor_1(i16 %A, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a1, a1, 31 -; RV32IXQCI-NEXT: srai a1, a1, 31 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB0_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB0_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -102,10 +103,11 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_1b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a1, a1, 31 -; RV32IXQCI-NEXT: srai a1, a1, 31 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: andi a1, a1, 1 +; RV32IXQCI-NEXT: beqz a1, .LBB1_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB1_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -148,10 +150,11 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB2_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB2_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -196,10 +199,11 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_xor_2b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB3_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB3_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -221,9 +225,10 @@ define i16 @select_xor_3(i16 %A, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_3: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a1, a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -1 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: bnez a1, .LBB4_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB4_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -247,9 +252,10 @@ define i16 @select_xor_3b(i16 %A, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_3b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a1, a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -1 -; RV32IXQCI-NEXT: andi a1, a1, 43 -; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: bnez a1, .LBB5_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: xori a0, a0, 43 +; RV32IXQCI-NEXT: .LBB5_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -293,9 +299,10 @@ define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_4: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB6_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB6_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -341,9 +348,10 @@ define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_xor_4b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB7_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: xor a0, a0, a1 +; RV32IXQCI-NEXT: .LBB7_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -382,9 +390,12 @@ define i32 @select_xor_5(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_xor_5: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: xori a0, a0, 128 +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB8_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: xori a2, a1, 128 +; RV32IXQCI-NEXT: .LBB8_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = xor i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -424,10 +435,11 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_or: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB9_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB9_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -472,10 +484,11 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { ; ; RV32IXQCI-LABEL: select_or_b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB10_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB10_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -518,10 +531,11 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { ; ; RV32IXQCI-LABEL: select_or_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB11_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB11_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -566,10 +580,11 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { ; ; RV32IXQCI-LABEL: select_or_1b: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: slli a2, a2, 31 -; RV32IXQCI-NEXT: srai a2, a2, 31 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: andi a2, a2, 1 +; RV32IXQCI-NEXT: beqz a2, .LBB12_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB12_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -613,9 +628,10 @@ define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_or_2: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB13_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB13_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -661,9 +677,10 @@ define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) { ; RV32IXQCI-LABEL: select_or_2b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB14_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB14_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i8 %cond, 1 @@ -707,9 +724,10 @@ define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) { ; RV32IXQCI-LABEL: select_or_3: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB15_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB15_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -755,9 +773,10 @@ define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) { ; RV32IXQCI-LABEL: select_or_3b: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a2, a2, 1 -; RV32IXQCI-NEXT: addi a2, a2, -1 -; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: bnez a2, .LBB16_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry ; RV32IXQCI-NEXT: or a0, a0, a1 +; RV32IXQCI-NEXT: .LBB16_2: # %entry ; RV32IXQCI-NEXT: ret entry: %and = and i32 %cond, 1 @@ -796,9 +815,12 @@ define i32 @select_or_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_or_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: ori a0, a0, 128 +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB17_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: ori a2, a1, 128 +; RV32IXQCI-NEXT: .LBB17_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = or i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -840,9 +862,11 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_add_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: neg a0, a0 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: add a0, a0, a2 +; RV32IXQCI-NEXT: beqz a0, .LBB18_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: add a2, a2, a1 +; RV32IXQCI-NEXT: .LBB18_2: # %entry +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %c = add i32 %a, %b @@ -885,9 +909,11 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_add_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: add a0, a0, a1 +; RV32IXQCI-NEXT: bnez a0, .LBB19_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: add a1, a1, a2 +; RV32IXQCI-NEXT: .LBB19_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = add i32 %a, %b @@ -933,9 +959,11 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) { ; ; RV32IXQCI-LABEL: select_add_3: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: andi a0, a0, 42 -; RV32IXQCI-NEXT: add a0, a0, a1 +; RV32IXQCI-NEXT: bnez a0, .LBB20_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: addi a1, a1, 42 +; RV32IXQCI-NEXT: .LBB20_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = add i32 %a, 42 @@ -978,9 +1006,12 @@ define i32 @select_add_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_add_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a1 -; RV32IXQCI-NEXT: addi a0, a0, 128 +; RV32IXQCI-NEXT: li a2, 128 +; RV32IXQCI-NEXT: bnez a0, .LBB21_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a2, a1, 128 +; RV32IXQCI-NEXT: .LBB21_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = add i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -1029,12 +1060,14 @@ define i64 @select_add_5(i1 zeroext %cond, i64 %x) { ; ; RV32IXQCI-LABEL: select_add_5: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a3, a0, -1 -; RV32IXQCI-NEXT: and a1, a1, a3 -; RV32IXQCI-NEXT: addi a0, a1, 128 -; RV32IXQCI-NEXT: sltu a1, a0, a1 -; RV32IXQCI-NEXT: and a2, a2, a3 -; RV32IXQCI-NEXT: add a1, a1, a2 +; RV32IXQCI-NEXT: mv a3, a0 +; RV32IXQCI-NEXT: addi a4, a1, 128 +; RV32IXQCI-NEXT: sltu a0, a4, a1 +; RV32IXQCI-NEXT: add a2, a2, a0 +; RV32IXQCI-NEXT: li a0, 128 +; RV32IXQCI-NEXT: qc.mveqi a0, a3, 0, a4 +; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a2, 0 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret %add = add i64 %x, 128 %sel = select i1 %cond, i64 128, i64 %add @@ -1093,14 +1126,15 @@ define i64 @select_add_6(i1 zeroext %cond, i64 %x) { ; ; RV32IXQCI-LABEL: select_add_6: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a3, a0, -1 +; RV32IXQCI-NEXT: mv a3, a0 ; RV32IXQCI-NEXT: lui a0, 14 -; RV32IXQCI-NEXT: and a1, a1, a3 -; RV32IXQCI-NEXT: addi a0, a0, 1005 -; RV32IXQCI-NEXT: add a0, a0, a1 +; RV32IXQCI-NEXT: addi a4, a0, 1005 +; RV32IXQCI-NEXT: add a0, a1, a4 ; RV32IXQCI-NEXT: sltu a1, a0, a1 -; RV32IXQCI-NEXT: and a2, a2, a3 ; RV32IXQCI-NEXT: add a1, a1, a2 +; RV32IXQCI-NEXT: qc.mvnei a0, a3, 0, a4 +; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a1, 0 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret %add = add i64 %x, 58349 %sel = select i1 %cond, i64 58349, i64 %add @@ -1152,8 +1186,10 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_sub_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: sub a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB24_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sub a2, a1, a2 +; RV32IXQCI-NEXT: .LBB24_2: # %entry ; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: @@ -1197,9 +1233,11 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_sub_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: sub a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB25_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sub a1, a1, a2 +; RV32IXQCI-NEXT: .LBB25_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, %b @@ -1245,9 +1283,11 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) { ; ; RV32IXQCI-LABEL: select_sub_3: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: andi a0, a0, 42 -; RV32IXQCI-NEXT: sub a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB26_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: addi a1, a1, -42 +; RV32IXQCI-NEXT: .LBB26_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, 42 @@ -1301,10 +1341,12 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_sub_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a1, a1, -128 ; RV32IXQCI-NEXT: li a2, 128 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: bnez a0, .LBB27_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a2, a1, -128 +; RV32IXQCI-NEXT: .LBB27_2: +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret %add = sub i32 %x, 128 %sel = select i1 %cond, i32 128, i32 %add @@ -1347,8 +1389,10 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_and_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: and a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB28_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: and a2, a2, a1 +; RV32IXQCI-NEXT: .LBB28_2: # %entry ; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: @@ -1392,9 +1436,11 @@ define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_and_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: and a2, a2, a1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB29_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: and a1, a1, a2 +; RV32IXQCI-NEXT: .LBB29_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, %b @@ -1437,9 +1483,11 @@ define i32 @select_and_3(i1 zeroext %cond, i32 %a) { ; ; RV32IXQCI-LABEL: select_and_3: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: andi a2, a1, 42 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB30_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: andi a1, a1, 42 +; RV32IXQCI-NEXT: .LBB30_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, 42 @@ -1493,8 +1541,8 @@ define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_udiv_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: divu a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = udiv i32 %a, %b @@ -1626,9 +1674,11 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) { ; RV32IXQCI-NEXT: lui a3, 199729 ; RV32IXQCI-NEXT: addi a3, a3, -975 ; RV32IXQCI-NEXT: mulhu a2, a2, a3 -; RV32IXQCI-NEXT: srli a2, a2, 2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: bnez a0, .LBB33_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: srli a1, a2, 2 +; RV32IXQCI-NEXT: .LBB33_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = udiv i32 %a, 42 @@ -1681,8 +1731,10 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_shl_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: sll a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB34_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sll a2, a1, a2 +; RV32IXQCI-NEXT: .LBB34_2: # %entry ; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: @@ -1726,9 +1778,11 @@ define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_shl_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: sll a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB35_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sll a1, a1, a2 +; RV32IXQCI-NEXT: .LBB35_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = shl i32 %a, %b @@ -1797,8 +1851,10 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_ashr_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: sra a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB37_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sra a2, a1, a2 +; RV32IXQCI-NEXT: .LBB37_2: # %entry ; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: @@ -1842,9 +1898,11 @@ define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_ashr_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: sra a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB38_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: sra a1, a1, a2 +; RV32IXQCI-NEXT: .LBB38_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = ashr i32 %a, %b @@ -1913,8 +1971,10 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_lshr_1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: srl a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 +; RV32IXQCI-NEXT: beqz a0, .LBB40_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: srl a2, a1, a2 +; RV32IXQCI-NEXT: .LBB40_2: # %entry ; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: @@ -1958,9 +2018,11 @@ define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) { ; ; RV32IXQCI-LABEL: select_lshr_2: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: addi a0, a0, -1 -; RV32IXQCI-NEXT: and a0, a0, a2 -; RV32IXQCI-NEXT: srl a0, a1, a0 +; RV32IXQCI-NEXT: bnez a0, .LBB41_2 +; RV32IXQCI-NEXT: # %bb.1: # %entry +; RV32IXQCI-NEXT: srl a1, a1, a2 +; RV32IXQCI-NEXT: .LBB41_2: # %entry +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = lshr i32 %a, %b @@ -2304,11 +2366,13 @@ define i32 @select_cst3(i1 zeroext %cond) { ; ; RV32IXQCI-LABEL: select_cst3: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lui a1, 7 -; RV32IXQCI-NEXT: lui a2, 5 -; RV32IXQCI-NEXT: addi a3, a1, 1328 -; RV32IXQCI-NEXT: addi a1, a2, -480 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3 +; RV32IXQCI-NEXT: lui a2, 7 +; RV32IXQCI-NEXT: lui a1, 5 +; RV32IXQCI-NEXT: addi a1, a1, -480 +; RV32IXQCI-NEXT: beqz a0, .LBB51_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a1, a2, 1328 +; RV32IXQCI-NEXT: .LBB51_2: ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 30000, i32 20000 @@ -2370,10 +2434,12 @@ define i32 @select_cst5(i1 zeroext %cond) { ; ; RV32IXQCI-LABEL: select_cst5: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -2047 -; RV32IXQCI-NEXT: li a2, 2047 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: lui a2, 1 +; RV32IXQCI-NEXT: li a1, 2047 +; RV32IXQCI-NEXT: bnez a0, .LBB53_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a1, a2, -2047 +; RV32IXQCI-NEXT: .LBB53_2: ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2047, i32 2049 @@ -2417,10 +2483,12 @@ define i32 @select_cst5_invert(i1 zeroext %cond) { ; ; RV32IXQCI-LABEL: select_cst5_invert: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a2, a1, -2047 +; RV32IXQCI-NEXT: lui a2, 1 ; RV32IXQCI-NEXT: li a1, 2047 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: beqz a0, .LBB54_2 +; RV32IXQCI-NEXT: # %bb.1: +; RV32IXQCI-NEXT: addi a1, a2, -2047 +; RV32IXQCI-NEXT: .LBB54_2: ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2049, i32 2047 @@ -2870,8 +2938,8 @@ define void @select_redundant_czero_eqz1(ptr %0, ptr %1) { ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: lui a2, %hi(select_redundant_czero_eqz_data) ; RV32IXQCI-NEXT: addi a2, a2, %lo(select_redundant_czero_eqz_data) -; RV32IXQCI-NEXT: qc.mveqi a0, a0, 0, a2 -; RV32IXQCI-NEXT: sw a0, 0(a1) +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a0 +; RV32IXQCI-NEXT: sw a2, 0(a1) ; RV32IXQCI-NEXT: ret entry: %3 = icmp eq ptr %0, null diff --git a/llvm/test/CodeGen/RISCV/srodata.ll b/llvm/test/CodeGen/RISCV/srodata.ll index 1d5bd904f233f..71ced1743efcd 100644 --- a/llvm/test/CodeGen/RISCV/srodata.ll +++ b/llvm/test/CodeGen/RISCV/srodata.ll @@ -4,12 +4,6 @@ ; RUN: sed 's/SMALL_DATA_LIMIT/0/g' %s | \ ; RUN: llc -mtriple=riscv64 -mattr=+d | \ ; RUN: FileCheck -check-prefix=CHECK-SDL-0 %s -; RUN: sed 's/SMALL_DATA_LIMIT/4/g' %s | \ -; RUN: llc -mtriple=riscv32 -mattr=+d | \ -; RUN: FileCheck -check-prefix=CHECK-SDL-4 %s -; RUN: sed 's/SMALL_DATA_LIMIT/4/g' %s | \ -; RUN: llc -mtriple=riscv64 -mattr=+d | \ -; RUN: FileCheck -check-prefix=CHECK-SDL-4 %s ; RUN: sed 's/SMALL_DATA_LIMIT/8/g' %s | \ ; RUN: llc -mtriple=riscv32 -mattr=+d | \ ; RUN: FileCheck -check-prefix=CHECK-SDL-8 %s @@ -23,11 +17,6 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d | \ ; RUN: FileCheck -check-prefix=CHECK-SDL-16 %s -define dso_local float @foof() { -entry: - ret float 0x400A08ACA0000000 -} - define dso_local double @foo() { entry: ret double 0x400A08AC91C3E242 @@ -39,9 +28,5 @@ entry: ; CHECK-SDL-0-NOT: .section .srodata.cst4 ; CHECK-SDL-0-NOT: .section .srodata.cst8 -; CHECK-SDL-4: .section .srodata.cst4 -; CHECK-SDL-4-NOT: .section .srodata.cst8 -; CHECK-SDL-8: .section .srodata.cst4 ; CHECK-SDL-8: .section .srodata.cst8 -; CHECK-SDL-16: .section .srodata.cst4 ; CHECK-SDL-16: .section .srodata.cst8 diff --git a/llvm/test/CodeGen/RISCV/xqcicli.ll b/llvm/test/CodeGen/RISCV/xqcicli.ll index 8b976163351ae..8d4caa177513b 100644 --- a/llvm/test/CodeGen/RISCV/xqcicli.ll +++ b/llvm/test/CodeGen/RISCV/xqcicli.ll @@ -4,7 +4,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32I ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicli -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICLI -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll index 1741be742323d..8e934963c258b 100644 --- a/llvm/test/CodeGen/RISCV/xqcicm.ll +++ b/llvm/test/CodeGen/RISCV/xqcicm.ll @@ -6,7 +6,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @select_example(i32 %cond, i32 %x, i32 %y) { @@ -23,15 +23,15 @@ define i32 @select_example(i32 %cond, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_example: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: andi a0, a0, 1 -; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCICM-NEXT: mv a0, a2 +; RV32IXQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCICM-NEXT: mv a0, a1 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_example: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %cond_trunc = trunc i32 %cond to i1 @@ -52,14 +52,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -80,14 +80,14 @@ define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -108,14 +108,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -136,14 +136,14 @@ define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a @@ -164,14 +164,14 @@ define i32 @select_cc_example_slt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, 11 @@ -192,14 +192,14 @@ define i32 @select_cc_example_slt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 11, %a @@ -220,14 +220,14 @@ define i32 @select_cc_example_sle(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, 11 @@ -248,14 +248,14 @@ define i32 @select_cc_example_sle1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 11, %a @@ -276,14 +276,14 @@ define i32 @select_cc_example_sgt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, 11 @@ -304,14 +304,14 @@ define i32 @select_cc_example_sgt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 11, %a @@ -332,14 +332,14 @@ define i32 @select_cc_example_sge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, 11 @@ -360,14 +360,14 @@ define i32 @select_cc_example_sge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 11, %a @@ -388,14 +388,14 @@ define i32 @select_cc_example_ule(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, 11 @@ -416,14 +416,14 @@ define i32 @select_cc_example_ule1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 11, %a @@ -444,14 +444,14 @@ define i32 @select_cc_example_ugt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, 11 @@ -472,14 +472,14 @@ define i32 @select_cc_example_ugt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 11, %a @@ -500,14 +500,14 @@ define i32 @select_cc_example_ult(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, 11 @@ -528,14 +528,14 @@ define i32 @select_cc_example_ult1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 11, %a @@ -556,14 +556,14 @@ define i32 @select_cc_example_uge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, 11 @@ -584,14 +584,14 @@ define i32 @select_cc_example_uge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 11, %a @@ -611,14 +611,14 @@ define i32 @select_cc_example_eq_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, %b @@ -638,14 +638,14 @@ define i32 @select_cc_example_ne_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, %b @@ -665,14 +665,14 @@ define i32 @select_cc_example_slt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, %b @@ -692,14 +692,14 @@ define i32 @select_cc_example_sge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, %b @@ -719,14 +719,14 @@ define i32 @select_cc_example_sgt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, %b @@ -746,14 +746,14 @@ define i32 @select_cc_example_sle_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, %b @@ -773,14 +773,14 @@ define i32 @select_cc_example_ugt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, %b @@ -800,14 +800,14 @@ define i32 @select_cc_example_ult_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, %b @@ -827,14 +827,14 @@ define i32 @select_cc_example_uge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, %b @@ -854,14 +854,14 @@ define i32 @select_cc_example_ule_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, %b @@ -883,18 +883,263 @@ define i32 @select_cc_example_ule_neg(i32 %a, i32 %b, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_cc_example_ule_neg: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: li a1, -10 -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_neg: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: li a1, -10 -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, -11 %sel = select i1 %cmp, i32 %x, i32 %y ret i32 %sel } + +define i32 @select_cc_example_eq_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: beq a2, a1, .LBB32_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB32_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: blt a2, a1, .LBB33_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB33_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bge a2, a1, .LBB34_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB34_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bltu a2, a1, .LBB35_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB35_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bgeu a2, a1, .LBB36_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB36_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_eq_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: beq a2, a1, .LBB37_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB37_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: blt a2, a1, .LBB38_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB38_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: blt a1, a2, .LBB39_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB39_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: bltu a2, a1, .LBB40_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB40_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: bltu a1, a2, .LBB41_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB41_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll index 38de8fbd78b36..c0839c98c1348 100644 --- a/llvm/test/CodeGen/RISCV/xqcics.ll +++ b/llvm/test/CodeGen/RISCV/xqcics.ll @@ -6,7 +6,7 @@ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICS ; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics,+experimental-xqcicm -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM -; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=RV32IXQCI define i32 @select_cc_example_eq_s1(i32 %a, i32 %b, i32 %x, i32 %y) { @@ -134,14 +134,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -167,14 +167,14 @@ define i32 @select_cc_example_eq_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -200,14 +200,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -233,14 +233,14 @@ define i32 @select_cc_example_ne_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a @@ -690,3 +690,127 @@ entry: ret i32 %sel } +define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq1: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: beq a1, a0, .LBB21_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: li a2, 11 +; RV32I-NEXT: .LBB21_2: # %entry +; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: ret +; +; RV32IXQCICS-LABEL: select_cc_example_eq1: +; RV32IXQCICS: # %bb.0: # %entry +; RV32IXQCICS-NEXT: qc.selectieq a0, a1, a2, 11 +; RV32IXQCICS-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq1: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.selectieq a0, a1, a2, 11 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq1: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.line a2, a1, a0, 11 +; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %b, %a + %sel = select i1 %cmp, i32 %x, i32 11 + ret i32 %sel +} + +define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ne1: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bne a1, a0, .LBB22_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: li a2, 11 +; RV32I-NEXT: .LBB22_2: # %entry +; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: ret +; +; RV32IXQCICS-LABEL: select_cc_example_ne1: +; RV32IXQCICS: # %bb.0: # %entry +; RV32IXQCICS-NEXT: qc.selectine a0, a1, a2, 11 +; RV32IXQCICS-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ne1: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.selectine a0, a1, a2, 11 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ne1: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.lieq a2, a1, a0, 11 +; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ne i32 %b, %a + %sel = select i1 %cmp, i32 %x, i32 11 + ret i32 %sel +} + + +define i32 @select_cc_example_eq2(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq2: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: beq a1, a0, .LBB23_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: li a0, 11 +; RV32I-NEXT: ret +; RV32I-NEXT: .LBB23_2: +; RV32I-NEXT: li a0, 15 +; RV32I-NEXT: ret +; +; RV32IXQCICS-LABEL: select_cc_example_eq2: +; RV32IXQCICS: # %bb.0: # %entry +; RV32IXQCICS-NEXT: qc.selectiieq a0, a1, 15, 11 +; RV32IXQCICS-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq2: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.selectiieq a0, a1, 15, 11 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq2: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.selectiieq a0, a1, 15, 11 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %b, %a + %sel = select i1 %cmp, i32 15, i32 11 + ret i32 %sel +} + +define i32 @select_cc_example_ne2(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ne2: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bne a1, a0, .LBB24_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: li a0, 11 +; RV32I-NEXT: ret +; RV32I-NEXT: .LBB24_2: +; RV32I-NEXT: li a0, 15 +; RV32I-NEXT: ret +; +; RV32IXQCICS-LABEL: select_cc_example_ne2: +; RV32IXQCICS: # %bb.0: # %entry +; RV32IXQCICS-NEXT: qc.selectiine a0, a1, 15, 11 +; RV32IXQCICS-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ne2: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.selectiine a0, a1, 15, 11 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ne2: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.selectiine a0, a1, 15, 11 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ne i32 %b, %a + %sel = select i1 %cmp, i32 15, i32 11 + ret i32 %sel +} diff --git a/llvm/test/CodeGen/SPARC/64abi.ll b/llvm/test/CodeGen/SPARC/64abi.ll index 6485a7f13e8d5..dc8c9af4a5185 100644 --- a/llvm/test/CodeGen/SPARC/64abi.ll +++ b/llvm/test/CodeGen/SPARC/64abi.ll @@ -473,8 +473,8 @@ declare i64 @receive_fp128(i64 %a, ...) ; HARD-DAG: ldx [%sp+[[Offset0]]], %o2 ; HARD-DAG: ldx [%sp+[[Offset1]]], %o3 ; SOFT-DAG: mov %i0, %o0 -; SOFT-DAG: mov %i1, %o1 ; SOFT-DAG: mov %i2, %o2 +; SOFT-DAG: mov %i3, %o3 ; CHECK: call receive_fp128 define i64 @test_fp128_variable_args(i64 %a, fp128 %b) { entry: @@ -482,6 +482,19 @@ entry: ret i64 %0 } +declare i64 @receive_i128(i64 %a, i128 %b) + +; CHECK-LABEL: test_i128_args: +; CHECK: mov %i3, %o3 +; CHECK: mov %i2, %o2 +; CHECK: mov %i0, %o0 +; CHECK: call receive_i128 +define i64 @test_i128_args(i64 %a, i128 %b) { +entry: + %0 = call i64 @receive_i128(i64 %a, i128 %b) + ret i64 %0 +} + ; CHECK-LABEL: test_call_libfunc: ; HARD: st %f1, [%fp+[[Offset0:[0-9]+]]] ; HARD: fmovs %f3, %f1 diff --git a/llvm/test/CodeGen/SPIRV/capability-FloatControl2.ll b/llvm/test/CodeGen/SPIRV/capability-FloatControl2.ll index aa60e13232b46..b4e283e746125 100644 --- a/llvm/test/CodeGen/SPIRV/capability-FloatControl2.ll +++ b/llvm/test/CodeGen/SPIRV/capability-FloatControl2.ll @@ -8,7 +8,7 @@ ; CHECK-EXT: OpCapability FloatControls2 ; CHECK-EXT: OpExtension "SPV_KHR_float_controls2" -; CHECK-EXT: OpDecorate {{%[0-9]+}} FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|Fast +; CHECK-EXT: OpDecorate {{%[0-9]+}} FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform define hidden spir_func float @foo(float %0) local_unnamed_addr { %2 = fmul reassoc nnan ninf nsz arcp afn float %0, 2.000000e+00 diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll new file mode 100644 index 0000000000000..093d172c5c1b1 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll @@ -0,0 +1,24 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] printf + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@.str = private unnamed_addr addrspace(1) constant [4 x i8] c"%d\0A\00", align 1 + +declare spir_func i32 @printf(ptr addrspace(4), ...) + +define spir_kernel void @test_kernel() { +entry: + ; Format string in addrspace(1) → cast to addrspace(4) + %format = addrspacecast ptr addrspace(1) @.str to ptr addrspace(4) + %val = alloca i32, align 4 + store i32 123, ptr %val, align 4 + %loaded = load i32, ptr %val, align 4 + + ; Call printf with non-constant format string + %call = call spir_func i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) %format, i32 %loaded) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll new file mode 100644 index 0000000000000..b54d59b30309f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll @@ -0,0 +1,48 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#ExtInstSetId:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#TypeInt32Id:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#TypeInt8Id:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#TypeInt64Id:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#TypeArrayId:]] = OpTypeArray %[[#TypeInt8Id]] %[[#]] +; CHECK-DAG: %[[#ConstantStorClassGlobalPtrTy:]] = OpTypePointer UniformConstant %[[#TypeArrayId]] +; CHECK-DAG: %[[#WGStorClassGlobalPtrTy:]] = OpTypePointer Workgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#CrossWFStorClassGlobalPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#FunctionStorClassPtrTy:]] = OpTypePointer Function %[[#TypeInt8Id]] +; CHECK-DAG: %[[#WGStorClassPtrTy:]] = OpTypePointer Workgroup %[[#TypeInt8Id]] +; CHECK-DAG: %[[#CrossWFStorClassPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeInt8Id]] +; CHECK: %[[#ConstantCompositeId:]] = OpConstantComposite %[[#TypeArrayId]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpVariable %[[#ConstantStorClassGlobalPtrTy]] UniformConstant %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#CrossWFStorClassGlobalPtrTy]] CrossWorkgroup %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#WGStorClassGlobalPtrTy]] Workgroup %[[#ConstantCompositeId]] +; CHECK: %[[#GEP1:]] = OpInBoundsPtrAccessChain %[[#FunctionStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP1]] +; CHECK: %[[#GEP2:]] = OpInBoundsPtrAccessChain %[[#CrossWFStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP2]] +; CHECK: %[[#GEP3:]] = OpInBoundsPtrAccessChain %[[#WGStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP3]] + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@0 = internal unnamed_addr addrspace(2) constant [6 x i8] c"Test\0A\00", align 1 +@1 = internal unnamed_addr addrspace(1) constant [6 x i8] c"Test\0A\00", align 1 +@2 = internal unnamed_addr addrspace(3) constant [6 x i8] c"Test\0A\00", align 1 + +define spir_kernel void @test() { + %tmp1 = alloca [6 x i8], align 1 + call void @llvm.memcpy.p0.p2.i64(ptr align 1 %tmp1, ptr addrspace(2) align 1 @0, i64 6, i1 false) + %1 = getelementptr inbounds [6 x i8], ptr %tmp1, i32 0, i32 0 + %2 = call spir_func i32 @_Z18__spirv_ocl_printfPc(ptr %1) + %3 = getelementptr inbounds [6 x i8], ptr addrspace(1) @1, i32 0, i32 0 + %4 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1) %3) + %5 = getelementptr inbounds [6 x i8], ptr addrspace(3) @2, i32 0, i32 0 + %6 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3) %5) + ret void +} + +declare spir_func i32 @_Z18__spirv_ocl_printfPc(ptr) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1)) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3)) +declare void @llvm.memcpy.p0.p2.i64(ptr captures(none), ptr addrspace(2) captures(none) readonly, i64, i1) diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll new file mode 100644 index 0000000000000..3624f149cb491 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll @@ -0,0 +1,19 @@ +; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bindless_images %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK-ERROR: LLVM ERROR: Parameter value must be a 32-bit scalar in case of Physical32 addressing model or a 64-bit scalar in case of Physical64 addressing model + +target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" +target triple = "spir64-unknown-unknown" + +define spir_func void @foo(i32 %in) { + %img = call spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32 %in) + %samp = call spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64 42) + %sampImage = call spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64 43) + ret void +} + +declare spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32) + +declare spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64) + +declare spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64) diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll new file mode 100644 index 0000000000000..d3fe9e43450cd --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll @@ -0,0 +1,148 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: Capability FloatControls2 +; CHECK: Extension "SPV_KHR_float_controls2" + +; CHECK: OpName %[[#addRes:]] "addRes" +; CHECK: OpName %[[#subRes:]] "subRes" +; CHECK: OpName %[[#mulRes:]] "mulRes" +; CHECK: OpName %[[#divRes:]] "divRes" +; CHECK: OpName %[[#remRes:]] "remRes" +; CHECK: OpName %[[#negRes:]] "negRes" +; CHECK: OpName %[[#oeqRes:]] "oeqRes" +; CHECK: OpName %[[#oneRes:]] "oneRes" +; CHECK: OpName %[[#oltRes:]] "oltRes" +; CHECK: OpName %[[#ogtRes:]] "ogtRes" +; CHECK: OpName %[[#oleRes:]] "oleRes" +; CHECK: OpName %[[#ogeRes:]] "ogeRes" +; CHECK: OpName %[[#ordRes:]] "ordRes" +; CHECK: OpName %[[#ueqRes:]] "ueqRes" +; CHECK: OpName %[[#uneRes:]] "uneRes" +; CHECK: OpName %[[#ultRes:]] "ultRes" +; CHECK: OpName %[[#ugtRes:]] "ugtRes" +; CHECK: OpName %[[#uleRes:]] "uleRes" +; CHECK: OpName %[[#ugeRes:]] "ugeRes" +; CHECK: OpName %[[#unoRes:]] "unoRes" +; CHECK: OpName %[[#modRes:]] "modRes" +; CHECK: OpName %[[#maxRes:]] "maxRes" +; CHECK: OpName %[[#maxCommonRes:]] "maxCommonRes" +; CHECK: OpName %[[#addResV:]] "addResV" +; CHECK: OpName %[[#subResV:]] "subResV" +; CHECK: OpName %[[#mulResV:]] "mulResV" +; CHECK: OpName %[[#divResV:]] "divResV" +; CHECK: OpName %[[#remResV:]] "remResV" +; CHECK: OpName %[[#negResV:]] "negResV" +; CHECK: OpName %[[#oeqResV:]] "oeqResV" +; CHECK: OpName %[[#oneResV:]] "oneResV" +; CHECK: OpName %[[#oltResV:]] "oltResV" +; CHECK: OpName %[[#ogtResV:]] "ogtResV" +; CHECK: OpName %[[#oleResV:]] "oleResV" +; CHECK: OpName %[[#ogeResV:]] "ogeResV" +; CHECK: OpName %[[#ordResV:]] "ordResV" +; CHECK: OpName %[[#ueqResV:]] "ueqResV" +; CHECK: OpName %[[#uneResV:]] "uneResV" +; CHECK: OpName %[[#ultResV:]] "ultResV" +; CHECK: OpName %[[#ugtResV:]] "ugtResV" +; CHECK: OpName %[[#uleResV:]] "uleResV" +; CHECK: OpName %[[#ugeResV:]] "ugeResV" +; CHECK: OpName %[[#unoResV:]] "unoResV" +; CHECK: OpName %[[#modResV:]] "modResV" +; CHECK: OpName %[[#maxResV:]] "maxResV" +; CHECK: OpName %[[#maxCommonResV:]] "maxCommonResV" +; CHECK: OpDecorate %[[#subRes]] FPFastMathMode NotNaN +; CHECK: OpDecorate %[[#mulRes]] FPFastMathMode NotInf +; CHECK: OpDecorate %[[#divRes]] FPFastMathMode NSZ +; CHECK: OpDecorate %[[#remRes]] FPFastMathMode AllowRecip +; CHECK: OpDecorate %[[#negRes]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#oeqRes]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#oltRes]] FPFastMathMode NotNaN +; CHECK: OpDecorate %[[#ogtRes]] FPFastMathMode NotInf +; CHECK: OpDecorate %[[#oleRes]] FPFastMathMode NSZ +; CHECK: OpDecorate %[[#ogeRes]] FPFastMathMode AllowRecip +; CHECK: OpDecorate %[[#ordRes]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#ueqRes]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#maxRes]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#maxCommonRes]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#subResV]] FPFastMathMode NotNaN +; CHECK: OpDecorate %[[#mulResV]] FPFastMathMode NotInf +; CHECK: OpDecorate %[[#divResV]] FPFastMathMode NSZ +; CHECK: OpDecorate %[[#remResV]] FPFastMathMode AllowRecip +; CHECK: OpDecorate %[[#negResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#oeqResV]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#oltResV]] FPFastMathMode NotNaN +; CHECK: OpDecorate %[[#ogtResV]] FPFastMathMode NotInf +; CHECK: OpDecorate %[[#oleResV]] FPFastMathMode NSZ +; CHECK: OpDecorate %[[#ogeResV]] FPFastMathMode AllowRecip +; CHECK: OpDecorate %[[#ordResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#ueqResV]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#maxResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#maxCommonResV]] FPFastMathMode NotNaN|NotInf + +; Function Attrs: convergent mustprogress nofree nounwind willreturn memory(none) +declare spir_func float @_Z4fmodff(float, float) +declare dso_local spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare spir_func <2 x float> @_Z4fmodDv2_fDv2_f(<2 x float>, <2 x float>) +declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf), <2 x float> noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf), <2 x float> noundef nofpclass(nan inf)) local_unnamed_addr #1 + +; Function Attrs: convergent mustprogress norecurse nounwind +define weak_odr dso_local spir_kernel void @foo(float %1, float %2) { +entry: + %addRes = fadd float %1, %2 + %subRes = fsub nnan float %1, %2 + %mulRes = fmul ninf float %1, %2 + %divRes = fdiv nsz float %1, %2 + %remRes = frem arcp float %1, %2 + %negRes = fneg fast float %1 + %oeqRes = fcmp nnan ninf oeq float %1, %2 + %oneRes = fcmp one float %1, %2, !spirv.Decorations !3 + %oltRes = fcmp nnan olt float %1, %2, !spirv.Decorations !3 + %ogtRes = fcmp ninf ogt float %1, %2, !spirv.Decorations !3 + %oleRes = fcmp nsz ole float %1, %2, !spirv.Decorations !3 + %ogeRes = fcmp arcp oge float %1, %2, !spirv.Decorations !3 + %ordRes = fcmp fast ord float %1, %2, !spirv.Decorations !3 + %ueqRes = fcmp nnan ninf ueq float %1, %2, !spirv.Decorations !3 + %uneRes = fcmp une float %1, %2, !spirv.Decorations !3 + %ultRes = fcmp ult float %1, %2, !spirv.Decorations !3 + %ugtRes = fcmp ugt float %1, %2, !spirv.Decorations !3 + %uleRes = fcmp ule float %1, %2, !spirv.Decorations !3 + %ugeRes = fcmp uge float %1, %2, !spirv.Decorations !3 + %unoRes = fcmp uno float %1, %2, !spirv.Decorations !3 + %modRes = call spir_func float @_Z4fmodff(float %1, float %2) + %maxRes = tail call fast spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + %maxCommonRes = tail call spir_func noundef float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + ret void +} + +define weak_odr dso_local spir_kernel void @fooV(<2 x float> %v1, <2 x float> %v2) { + %addResV = fadd <2 x float> %v1, %v2 + %subResV = fsub nnan <2 x float> %v1, %v2 + %mulResV = fmul ninf <2 x float> %v1, %v2 + %divResV = fdiv nsz <2 x float> %v1, %v2 + %remResV = frem arcp <2 x float> %v1, %v2 + %negResV = fneg fast <2 x float> %v1 + %oeqResV = fcmp nnan ninf oeq <2 x float> %v1, %v2 + %oneResV = fcmp one <2 x float> %v1, %v2, !spirv.Decorations !3 + %oltResV = fcmp nnan olt <2 x float> %v1, %v2, !spirv.Decorations !3 + %ogtResV = fcmp ninf ogt <2 x float> %v1, %v2, !spirv.Decorations !3 + %oleResV = fcmp nsz ole <2 x float> %v1, %v2, !spirv.Decorations !3 + %ogeResV = fcmp arcp oge <2 x float> %v1, %v2, !spirv.Decorations !3 + %ordResV = fcmp fast ord <2 x float> %v1, %v2, !spirv.Decorations !3 + %ueqResV = fcmp nnan ninf ueq <2 x float> %v1, %v2, !spirv.Decorations !3 + %uneResV = fcmp une <2 x float> %v1, %v2, !spirv.Decorations !3 + %ultResV = fcmp ult <2 x float> %v1, %v2, !spirv.Decorations !3 + %ugtResV = fcmp ugt <2 x float> %v1, %v2, !spirv.Decorations !3 + %uleResV = fcmp ule <2 x float> %v1, %v2, !spirv.Decorations !3 + %ugeResV = fcmp uge <2 x float> %v1, %v2, !spirv.Decorations !3 + %unoResV = fcmp uno <2 x float> %v1, %v2, !spirv.Decorations !3 + %modResV = call spir_func <2 x float> @_Z4fmodDv2_fDv2_f(<2 x float> %v1, <2 x float> %v2) + %maxResV = tail call fast spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + %maxCommonResV = tail call spir_func noundef <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + ret void +} + +!3 = !{!5, !4} +!4 = !{i32 42} ; 42 is NoContraction decoration +!5 = !{i32 40, i32 393216} ; 40 is FPFastMathMode diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode.ll new file mode 100644 index 0000000000000..4b3c13c260c51 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode.ll @@ -0,0 +1,81 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2,+SPV_KHR_bfloat16 %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2,+SPV_KHR_bfloat16 %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: Capability FloatControls2 +; CHECK: Extension "SPV_KHR_float_controls2" + +define dso_local dllexport spir_kernel void @k_float_controls_half(half %h) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_bfloat(bfloat %b) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_float(float %f) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_double(double %d) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_all(half %h, bfloat %b, float %f, double %d) { +entry: + ret void +} + +!spirv.ExecutionMode = !{!17, !18, !19, !20, !22, !23, !24, !25} + +; CHECK: OpEntryPoint Kernel %[[#KERNEL_HALF:]] "k_float_controls_half" +!0 = !{ptr @k_float_controls_half, !"k_float_controls_half", !6, i32 0, !6, !7, !8, i32 0, i32 0} + +; CHECK: OpEntryPoint Kernel %[[#KERNEL_BFLOAT:]] "k_float_controls_bfloat" +!1 = !{ptr @k_float_controls_bfloat, !"k_float_controls_bfloat", !6, i32 0, !6, !7, !8, i32 0, i32 0} + +; CHECK: OpEntryPoint Kernel %[[#KERNEL_FLOAT:]] "k_float_controls_float" +!2 = !{ptr @k_float_controls_float, !"k_float_controls_float", !6, i32 0, !6, !7, !8, i32 0, i32 0} + +; CHECK: OpEntryPoint Kernel %[[#KERNEL_DOUBLE:]] "k_float_controls_double" +!3 = !{ptr @k_float_controls_double, !"k_float_controls_double", !6, i32 0, !6, !7, !8, i32 0, i32 0} + +; CHECK: OpEntryPoint Kernel %[[#KERNEL_ALL:]] "k_float_controls_all" +!5 = !{ptr @k_float_controls_all, !"k_float_controls_all", !6, i32 0, !6, !7, !8, i32 0, i32 0} +!6 = !{i32 2, i32 2} +!7 = !{i32 32, i32 36} +!8 = !{i32 0, i32 0} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_HALF]] FPFastMathDefault %[[#HALF_TYPE:]] %[[#CONST1:]] +!17 = !{ptr @k_float_controls_half, i32 6028, half poison, i32 1} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_BFLOAT]] FPFastMathDefault %[[#BFLOAT_TYPE:]] %[[#CONST2:]] +!18 = !{ptr @k_float_controls_bfloat, i32 6028, bfloat poison, i32 2} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_FLOAT]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST4:]] +!19 = !{ptr @k_float_controls_float, i32 6028, float poison, i32 4} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_DOUBLE]] FPFastMathDefault %[[#DOUBLE_TYPE:]] %[[#CONST7:]] +!20 = !{ptr @k_float_controls_double, i32 6028, double poison, i32 7} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#HALF_TYPE]] %[[#CONST131072:]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#FLOAT_TYPE]] %[[#CONST458752:]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#DOUBLE_TYPE]] %[[#CONST458752:]] +!22 = !{ptr @k_float_controls_all, i32 6028, half poison, i32 131072} +!23 = !{ptr @k_float_controls_all, i32 6028, bfloat poison, i32 131072} +!24 = !{ptr @k_float_controls_all, i32 6028, float poison, i32 458752} +!25 = !{ptr @k_float_controls_all, i32 6028, double poison, i32 458752} + +; CHECK-DAG: %[[#INT32_TYPE:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#HALF_TYPE]] = OpTypeFloat 16 +; CHECK-DAG: %[[#FLOAT_TYPE]] = OpTypeFloat 32 +; CHECK-DAG: %[[#DOUBLE_TYPE]] = OpTypeFloat 64 +; CHECK-DAG: %[[#CONST1]] = OpConstant %[[#INT32_TYPE]] 1 +; CHECK-DAG: %[[#CONST2]] = OpConstant %[[#INT32_TYPE]] 2 +; CHECK-DAG: %[[#CONST4]] = OpConstant %[[#INT32_TYPE]] 4 +; CHECK-DAG: %[[#CONST7]] = OpConstant %[[#INT32_TYPE]] 7 +; CHECK-DAG: %[[#CONST131072]] = OpConstant %[[#INT32_TYPE]] 131072 +; CHECK-DAG: %[[#CONST458752]] = OpConstant %[[#INT32_TYPE]] 458752 diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode2.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode2.ll new file mode 100644 index 0000000000000..c0632725e38d9 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode2.ll @@ -0,0 +1,73 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: Capability FloatControls2 +; CHECK: Extension "SPV_KHR_float_controls2" + +; CHECK: OpEntryPoint Kernel %[[#KERNEL_FLOAT:]] "k_float_controls_float" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_ALL:]] "k_float_controls_all" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_FLOAT_V:]] "k_float_controls_float_v" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_ALL_V:]] "k_float_controls_all_v" + +define dso_local dllexport spir_kernel void @k_float_controls_float(float %f) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_all(half %h, float %f, double %d) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_float_v(<2 x float> %f) { +entry: + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_all_v(<2 x half> %h, <2 x float> %f, <2 x double> %d) { +entry: + ret void +} + +!spirv.ExecutionMode = !{!19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_FLOAT]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079:]] +!19 = !{ptr @k_float_controls_float, i32 6028, float poison, i32 131079} +; We expect 130179 for float type. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079]] +; We expect 0 for the rest of types because it's SignedZeroInfNanPreserve. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#HALF_TYPE:]] %[[#CONST0:]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#DOUBLE_TYPE:]] %[[#CONST0]] +!20 = !{ptr @k_float_controls_all, i32 6028, float poison, i32 131079} +; ContractionOff is now replaced with FPFastMathDefault with AllowContract bit set to false. +!21 = !{ptr @k_float_controls_float, i32 31} +!22 = !{ptr @k_float_controls_all, i32 31} +; SignedZeroInfNanPreserve is now replaced with FPFastMathDefault with flags 0. +!23 = !{ptr @k_float_controls_float, i32 4461, i32 32} +!24 = !{ptr @k_float_controls_all, i32 4461, i32 16} +!25 = !{ptr @k_float_controls_all, i32 4461, i32 32} +!26 = !{ptr @k_float_controls_all, i32 4461, i32 64} + +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_FLOAT_V]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079]] +!27 = !{ptr @k_float_controls_float_v, i32 6028, float poison, i32 131079} +; We expect 130179 for float type. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL_V]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079]] +; We expect 0 for the rest of types because it's SignedZeroInfNanPreserve. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL_V]] FPFastMathDefault %[[#HALF_TYPE:]] %[[#CONST0]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL_V]] FPFastMathDefault %[[#DOUBLE_TYPE:]] %[[#CONST0]] +!28 = !{ptr @k_float_controls_all_v, i32 6028, float poison, i32 131079} +; ContractionOff is now replaced with FPFastMathDefault with AllowContract bit set to false. +!29 = !{ptr @k_float_controls_float_v, i32 31} +!30 = !{ptr @k_float_controls_all_v, i32 31} +; SignedZeroInfNanPreserve is now replaced with FPFastMathDefault with flags 0. +!31 = !{ptr @k_float_controls_float_v, i32 4461, i32 32} +!32 = !{ptr @k_float_controls_all_v, i32 4461, i32 16} +!33 = !{ptr @k_float_controls_all_v, i32 4461, i32 32} +!34 = !{ptr @k_float_controls_all_v, i32 4461, i32 64} + +; CHECK-DAG: %[[#INT32_TYPE:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#HALF_TYPE]] = OpTypeFloat 16 +; CHECK-DAG: %[[#FLOAT_TYPE]] = OpTypeFloat 32 +; CHECK-DAG: %[[#DOUBLE_TYPE]] = OpTypeFloat 64 +; CHECK-DAG: %[[#CONST0]] = OpConstantNull %[[#INT32_TYPE]] +; CHECK-DAG: %[[#CONST131079]] = OpConstant %[[#INT32_TYPE]] 131079 diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode3.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode3.ll new file mode 100644 index 0000000000000..1d09187b7f6a1 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/exec_mode3.ll @@ -0,0 +1,103 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: Capability FloatControls2 +; CHECK: Extension "SPV_KHR_float_controls2" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_FLOAT:]] "k_float_controls_float" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_ALL:]] "k_float_controls_all" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_FLOAT_V:]] "k_float_controls_float_v" +; CHECK: OpEntryPoint Kernel %[[#KERNEL_ALL_V:]] "k_float_controls_all_v" + +; We expect 130179 for float type. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_FLOAT]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079:]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079]] +; We expect 0 for the rest of types because it's SignedZeroInfNanPreserve. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#HALF_TYPE:]] %[[#CONST0:]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL]] FPFastMathDefault %[[#DOUBLE_TYPE:]] %[[#CONST0]] + +; We expect 130179 for float type. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_FLOAT_V]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL_V]] FPFastMathDefault %[[#FLOAT_TYPE:]] %[[#CONST131079]] +; We expect 0 for the rest of types because it's SignedZeroInfNanPreserve. +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL_V]] FPFastMathDefault %[[#HALF_TYPE:]] %[[#CONST0]] +; CHECK-DAG: OpExecutionModeId %[[#KERNEL_ALL_V]] FPFastMathDefault %[[#DOUBLE_TYPE:]] %[[#CONST0]] + +; CHECK-DAG: OpDecorate %[[#addRes:]] FPFastMathMode NotNaN|NotInf|NSZ|AllowReassoc +; CHECK-DAG: OpDecorate %[[#addResH:]] FPFastMathMode None +; CHECK-DAG: OpDecorate %[[#addResF:]] FPFastMathMode NotNaN|NotInf|NSZ|AllowReassoc +; CHECK-DAG: OpDecorate %[[#addResD:]] FPFastMathMode None +; CHECK-DAG: OpDecorate %[[#addRes_V:]] FPFastMathMode NotNaN|NotInf|NSZ|AllowReassoc +; CHECK-DAG: OpDecorate %[[#addResH_V:]] FPFastMathMode None +; CHECK-DAG: OpDecorate %[[#addResF_V:]] FPFastMathMode NotNaN|NotInf|NSZ|AllowReassoc +; CHECK-DAG: OpDecorate %[[#addResD_V:]] FPFastMathMode None + +; CHECK-DAG: %[[#INT32_TYPE:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#HALF_TYPE]] = OpTypeFloat 16 +; CHECK-DAG: %[[#FLOAT_TYPE]] = OpTypeFloat 32 +; CHECK-DAG: %[[#DOUBLE_TYPE]] = OpTypeFloat 64 +; CHECK-DAG: %[[#CONST0]] = OpConstantNull %[[#INT32_TYPE]] +; CHECK-DAG: %[[#CONST131079]] = OpConstant %[[#INT32_TYPE]] 131079 + +; CHECK-DAG: %[[#HALF_V_TYPE:]] = OpTypeVector %[[#HALF_TYPE]] +; CHECK-DAG: %[[#FLOAT_V_TYPE:]] = OpTypeVector %[[#FLOAT_TYPE]] +; CHECK-DAG: %[[#DOUBLE_V_TYPE:]] = OpTypeVector %[[#DOUBLE_TYPE]] + +define dso_local dllexport spir_kernel void @k_float_controls_float(float %f) { +entry: +; CHECK-DAG: %[[#addRes]] = OpFAdd %[[#FLOAT_TYPE]] + %addRes = fadd float %f, %f + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_all(half %h, float %f, double %d) { +entry: +; CHECK-DAG: %[[#addResH]] = OpFAdd %[[#HALF_TYPE]] +; CHECK-DAG: %[[#addResF]] = OpFAdd %[[#FLOAT_TYPE]] +; CHECK-DAG: %[[#addResD]] = OpFAdd %[[#DOUBLE_TYPE]] + %addResH = fadd half %h, %h + %addResF = fadd float %f, %f + %addResD = fadd double %d, %d + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_float_v(<2 x float> %f) { +entry: +; CHECK-DAG: %[[#addRes_V]] = OpFAdd %[[#FLOAT_V_TYPE]] + %addRes = fadd <2 x float> %f, %f + ret void +} + +define dso_local dllexport spir_kernel void @k_float_controls_all_v(<2 x half> %h, <2 x float> %f, <2 x double> %d) { +entry: +; CHECK-DAG: %[[#addResH_V]] = OpFAdd %[[#HALF_V_TYPE]] +; CHECK-DAG: %[[#addResF_V]] = OpFAdd %[[#FLOAT_V_TYPE]] +; CHECK-DAG: %[[#addResD_V]] = OpFAdd %[[#DOUBLE_V_TYPE]] + %addResH = fadd <2 x half> %h, %h + %addResF = fadd <2 x float> %f, %f + %addResD = fadd <2 x double> %d, %d + ret void +} + +!spirv.ExecutionMode = !{!19, !20, !21, !22, !23, !24, !25, !26, !27, !28, !29, !30, !31, !32, !33, !34} + +!19 = !{ptr @k_float_controls_float, i32 6028, float poison, i32 131079} +!20 = !{ptr @k_float_controls_all, i32 6028, float poison, i32 131079} +; ContractionOff is now replaced with FPFastMathDefault with AllowContract bit set to false. +!21 = !{ptr @k_float_controls_float, i32 31} +!22 = !{ptr @k_float_controls_all, i32 31} +; SignedZeroInfNanPreserve is now replaced with FPFastMathDefault with flags 0. +!23 = !{ptr @k_float_controls_float, i32 4461, i32 32} +!24 = !{ptr @k_float_controls_all, i32 4461, i32 16} +!25 = !{ptr @k_float_controls_all, i32 4461, i32 32} +!26 = !{ptr @k_float_controls_all, i32 4461, i32 64} + +!27 = !{ptr @k_float_controls_float_v, i32 6028, float poison, i32 131079} +!28 = !{ptr @k_float_controls_all_v, i32 6028, float poison, i32 131079} +; ContractionOff is now replaced with FPFastMathDefault with AllowContract bit set to false. +!29 = !{ptr @k_float_controls_float_v, i32 31} +!30 = !{ptr @k_float_controls_all_v, i32 31} +; SignedZeroInfNanPreserve is now replaced with FPFastMathDefault with flags 0. +!31 = !{ptr @k_float_controls_float_v, i32 4461, i32 32} +!32 = !{ptr @k_float_controls_all_v, i32 4461, i32 16} +!33 = !{ptr @k_float_controls_all_v, i32 4461, i32 32} +!34 = !{ptr @k_float_controls_all_v, i32 4461, i32 64} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/replacements.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/replacements.ll new file mode 100644 index 0000000000000..bba1c93a7e78d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/replacements.ll @@ -0,0 +1,61 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_KHR_float_controls2 %s -o - -filetype=obj | spirv-val %} + +;; This test checks that the OpenCL.std instructions fmin_common, fmax_common are replaced with fmin, fmax with NInf and NNaN instead. + +; CHECK-DAG: Capability FloatControls2 +; CHECK: Extension "SPV_KHR_float_controls2" + +; CHECK: OpName %[[#maxRes:]] "maxRes" +; CHECK: OpName %[[#maxCommonRes:]] "maxCommonRes" +; CHECK: OpName %[[#minRes:]] "minRes" +; CHECK: OpName %[[#minCommonRes:]] "minCommonRes" +; CHECK: OpName %[[#maxResV:]] "maxResV" +; CHECK: OpName %[[#maxCommonResV:]] "maxCommonResV" +; CHECK: OpName %[[#minResV:]] "minResV" +; CHECK: OpName %[[#minCommonResV:]] "minCommonResV" +; CHECK: OpDecorate %[[#maxRes]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#maxCommonRes]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#minRes]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#minCommonRes]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#maxResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#maxCommonResV]] FPFastMathMode NotNaN|NotInf +; CHECK: OpDecorate %[[#minResV]] FPFastMathMode NotNaN|NotInf|NSZ|AllowRecip|AllowContract|AllowReassoc|AllowTransform +; CHECK: OpDecorate %[[#minCommonResV]] FPFastMathMode NotNaN|NotInf +; CHECK: %[[#maxRes]] = OpExtInst {{.*}} fmax +; CHECK: %[[#maxCommonRes]] = OpExtInst {{.*}} fmax +; CHECK: %[[#minRes]] = OpExtInst {{.*}} fmin +; CHECK: %[[#minCommonRes]] = OpExtInst {{.*}} fmin +; CHECK: %[[#maxResV]] = OpExtInst {{.*}} fmax +; CHECK: %[[#maxCommonResV]] = OpExtInst {{.*}} fmax +; CHECK: %[[#minResV]] = OpExtInst {{.*}} fmin +; CHECK: %[[#minCommonResV]] = OpExtInst {{.*}} fmin + +; Function Attrs: convergent mustprogress nofree nounwind willreturn memory(none) +declare spir_func float @_Z4fmodff(float, float) +declare dso_local spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fminff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) float @_Z23__spirv_ocl_fmin_commonff(float noundef nofpclass(nan inf), float noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf), <2 x float> noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf), <2 x float> noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fminDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf), <2 x float> noundef nofpclass(nan inf)) local_unnamed_addr #1 +declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z23__spirv_ocl_fmin_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf), <2 x float> noundef nofpclass(nan inf)) local_unnamed_addr #1 + +; Function Attrs: convergent mustprogress norecurse nounwind +define weak_odr dso_local spir_kernel void @foo(float %1, float %2) { +entry: + %maxRes = tail call fast spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + %maxCommonRes = tail call spir_func noundef float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + %minRes = tail call fast spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fminff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + %minCommonRes = tail call spir_func noundef float @_Z23__spirv_ocl_fmin_commonff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2) + ret void +} + +define weak_odr dso_local spir_kernel void @fooV(<2 x float> %v1, <2 x float> %v2) { + %maxResV = tail call fast spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + %maxCommonResV = tail call spir_func noundef <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + %minResV = tail call fast spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fminDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + %minCommonResV = tail call spir_func noundef <2 x float> @_Z23__spirv_ocl_fmin_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/isnan.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/isnan.ll new file mode 100644 index 0000000000000..67bb0cd8240f3 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/isnan.ll @@ -0,0 +1,45 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val --target-env spv1.4 %} + +; CHECK-DAG: %[[#float_16:]] = OpTypeFloat 16 +; CHECK-DAG: %[[#float_32:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#vec4_float_16:]] = OpTypeVector %[[#float_16]] 4 +; CHECK-DAG: %[[#vec4_float_32:]] = OpTypeVector %[[#float_32]] 4 +; CHECK-DAG: %[[#bool:]] = OpTypeBool +; CHECK-DAG: %[[#vec4_bool:]] = OpTypeVector %[[#bool]] 4 + +define noundef i1 @isnan_half(half noundef %a) { +entry: + ; CHECK: %[[#]] = OpFunction %[[#bool]] None %[[#]] + ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#float_16]] + ; CHECK: %[[#]] = OpIsNan %[[#bool]] %[[#arg0]] + %hlsl.isnan = call i1 @llvm.spv.isnan.f16(half %a) + ret i1 %hlsl.isnan +} + +define noundef i1 @isnan_float(float noundef %a) { +entry: + ; CHECK: %[[#]] = OpFunction %[[#bool]] None %[[#]] + ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#float_32]] + ; CHECK: %[[#]] = OpIsNan %[[#bool]] %[[#arg0]] + %hlsl.isnan = call i1 @llvm.spv.isnan.f32(float %a) + ret i1 %hlsl.isnan +} + +define noundef <4 x i1> @isnan_half4(<4 x half> noundef %a) { +entry: + ; CHECK: %[[#]] = OpFunction %[[#vec4_bool]] None %[[#]] + ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec4_float_16]] + ; CHECK: %[[#]] = OpIsNan %[[#vec4_bool]] %[[#arg0]] + %hlsl.isnan = call <4 x i1> @llvm.spv.isnan.v4f16(<4 x half> %a) + ret <4 x i1> %hlsl.isnan +} + +define noundef <4 x i1> @isnan_float4(<4 x float> noundef %a) { +entry: + ; CHECK: %[[#]] = OpFunction %[[#vec4_bool]] None %[[#]] + ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec4_float_32]] + ; CHECK: %[[#]] = OpIsNan %[[#vec4_bool]] %[[#arg0]] + %hlsl.isnan = call <4 x i1> @llvm.spv.isnan.v4f32(<4 x float> %a) + ret <4 x i1> %hlsl.isnan +} diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll index cd524980ed275..2964da9058104 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll @@ -32,6 +32,7 @@ ; CHECK-DAG: OpDecorate [[g]] Binding 0 ; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10 ; CHECK-DAG: OpDecorate [[h]] Binding 3 +; CHECK-NOT: OpDecorate [[h]] Binding 4 ; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10 ; CHECK-DAG: OpDecorate [[i]] Binding 2 @@ -44,30 +45,34 @@ entry: %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, ptr nonnull @.str.6) %4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, ptr nonnull @.str.8) %5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, ptr nonnull @.str.10) - %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, ptr nonnull @.str.12) - %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, ptr nonnull @.str.14) - %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0) - %9 = load i32, ptr addrspace(11) %8, align 4 - %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0) - %11 = load i32, ptr addrspace(11) %10, align 4 - %add.i = add nsw i32 %11, %9 - %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0) - %13 = load i32, ptr addrspace(11) %12, align 4 - %add4.i = add nsw i32 %add.i, %13 - %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0) - %15 = load i32, ptr addrspace(11) %14, align 4 - %add6.i = add nsw i32 %add4.i, %15 - %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0) - %17 = load i32, ptr addrspace(11) %16, align 4 - %add8.i = add nsw i32 %add6.i, %17 - %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0) - %19 = load i32, ptr addrspace(11) %18, align 4 - %add10.i = add nsw i32 %add8.i, %19 - %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0) - %21 = load i32, ptr addrspace(11) %20, align 4 - %add12.i = add nsw i32 %add10.i, %21 - %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0) - store i32 %add12.i, ptr addrspace(11) %22, align 4 + %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 2, i32 0, ptr nonnull @.str.12) + %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 2, i32 1, ptr nonnull @.str.12) + %8 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, ptr nonnull @.str.14) + %9 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0) + %10 = load i32, ptr addrspace(11) %9, align 4 + %11 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0) + %12 = load i32, ptr addrspace(11) %11, align 4 + %add.i = add nsw i32 %12, %10 + %13 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0) + %14 = load i32, ptr addrspace(11) %13, align 4 + %add4.i = add nsw i32 %add.i, %14 + %15 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0) + %16 = load i32, ptr addrspace(11) %15, align 4 + %add6.i = add nsw i32 %add4.i, %16 + %17 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0) + %18 = load i32, ptr addrspace(11) %17, align 4 + %add8.i = add nsw i32 %add6.i, %18 + %19 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0) + %20 = load i32, ptr addrspace(11) %19, align 4 + %add10.i = add nsw i32 %add8.i, %20 + %21 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0) + %22 = load i32, ptr addrspace(11) %21, align 4 + %add12.i = add nsw i32 %add10.i, %22 + %23 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %8, i32 0) + %24 = load i32, ptr addrspace(11) %23, align 4 + %add14.i = add nsw i32 %add12.i, %24 + %25 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0) + store i32 %add14.i, ptr addrspace(11) %25, align 4 ret void } diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageNonUniformIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageNonUniformIdx.ll index 08b2756fbab00..5e15aab7ddee0 100644 --- a/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageNonUniformIdx.ll +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/StorageImageNonUniformIdx.ll @@ -1,8 +1,8 @@ ; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.5-vulkan-library %s -o - | FileCheck %s ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.5-vulkan-library %s -o - -filetype=obj | spirv-val %} -; This test depends on NonUniform resource analysis -; https://github.com/llvm/llvm-project/issues/155701 +; This test depends on llvm.svp.resource.nonuniformindex support (not yet implemented) +; https://github.com/llvm/llvm-project/issues/160231 ; XFAIL: * @.str.b0 = private unnamed_addr constant [3 x i8] c"B0\00", align 1 diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll new file mode 100644 index 0000000000000..c968c99e4d58a --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll @@ -0,0 +1,19 @@ +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR +; CHECK-ERROR: LLVM ERROR: Implicit binding calls with the same order ID must have the same descriptor set + +@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1 +@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1 + +define void @main() local_unnamed_addr #0 { +entry: + %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str) + %1 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0) + %2 = load i32, ptr addrspace(11) %1, align 4 + %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 1, i32 1, i32 0, ptr nonnull @.str.2) + %4 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0) + store i32 %2, ptr addrspace(11) %4, align 4 + ret void +} + + +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } diff --git a/llvm/test/CodeGen/SPIRV/image_store.ll b/llvm/test/CodeGen/SPIRV/image_store.ll new file mode 100644 index 0000000000000..a70651c974f36 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/image_store.ll @@ -0,0 +1,22 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Image types may be represented in two ways while translating to SPIR-V: +; - OpenCL form, for example, '%opencl.image2d_ro_t', +; - SPIR-V form, for example, '%spirv.Image._void_1_0_0_0_0_0_0', +; but it is still one type which should be translated to one SPIR-V type. +; +; The test checks that the code below is successfully translated and only one +; SPIR-V type for images is generated (no duplicate OpTypeImage instructions). + +; CHECK: %[[#]] = OpTypeImage %[[#]] 2D +; CHECK-NOT: %[[#]] = OpTypeImage %[[#]] 2D + +declare spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1), ptr addrspace(2), <2 x float>, float) + +define spir_kernel void @read_image(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler){ +entry: + %spirvimg.addr = alloca target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), align 8 + %val = call <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler, <2 x float> zeroinitializer, float 0.0) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll new file mode 100644 index 0000000000000..b788f34bf7238 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll @@ -0,0 +1,28 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-LABEL: Begin function original_testcase +define fastcc void @original_testcase() { +top: + ; CHECK: OpCompositeInsert + %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + ret void +} + +; CHECK-LABEL: Begin function additional_testcases +define fastcc void @additional_testcases() { +top: + ; Test with different pointer types + ; CHECK: OpCompositeInsert + %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0 + ; CHECK-NEXT: OpCompositeInsert + %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0 + ; CHECK-NEXT: OpCompositeInsert + %3 = insertvalue {ptr, ptr} undef, ptr null, 0 + + ; Test with undef aggregate + ; CHECK-NEXT: OpCompositeInsert + %4 = insertvalue [1 x ptr] undef, ptr undef, 0 + + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll b/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll index 6a4b4f593bf3b..5fe2cc883ceb9 100644 --- a/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll +++ b/llvm/test/CodeGen/SPIRV/instructions/integer-casts.ll @@ -14,11 +14,11 @@ ; CHECK-DAG: OpName [[ZEXT8_16:%.*]] "u8tou16" ; CHECK-DAG: OpName [[ZEXT16_32:%.*]] "u16tou32" +; CHECK-DAG: OpName %[[#R16:]] "r16" ; CHECK-DAG: OpName %[[#R17:]] "r17" ; CHECK-DAG: OpName %[[#R18:]] "r18" ; CHECK-DAG: OpName %[[#R19:]] "r19" ; CHECK-DAG: OpName %[[#R20:]] "r20" -; CHECK-DAG: OpName %[[#R21:]] "r21" ; CHECK-DAG: OpName [[TRUNC32_16v4:%.*]] "i32toi16v4" ; CHECK-DAG: OpName [[TRUNC32_8v4:%.*]] "i32toi8v4" @@ -30,11 +30,11 @@ ; CHECK-DAG: OpName [[ZEXT8_16v4:%.*]] "u8tou16v4" ; CHECK-DAG: OpName [[ZEXT16_32v4:%.*]] "u16tou32v4" -; CHECK-DAG: OpDecorate %[[#R17]] FPRoundingMode RTZ -; CHECK-DAG: OpDecorate %[[#R18]] FPRoundingMode RTE -; CHECK-DAG: OpDecorate %[[#R19]] FPRoundingMode RTP -; CHECK-DAG: OpDecorate %[[#R20]] FPRoundingMode RTN -; CHECK-DAG: OpDecorate %[[#R21]] SaturatedConversion +; CHECK-DAG: OpDecorate %[[#R16]] FPRoundingMode RTZ +; CHECK-DAG: OpDecorate %[[#R17]] FPRoundingMode RTE +; CHECK-DAG: OpDecorate %[[#R18]] FPRoundingMode RTP +; CHECK-DAG: OpDecorate %[[#R19]] FPRoundingMode RTN +; CHECK-DAG: OpDecorate %[[#R20]] SaturatedConversion ; CHECK-DAG: [[F32:%.*]] = OpTypeFloat 32 ; CHECK-DAG: [[F16:%.*]] = OpTypeFloat 16 @@ -258,7 +258,6 @@ define <4 x i32> @u16tou32v4(<4 x i16> %a) { ; CHECK: %[[#]] = OpUConvert [[U32]] %[[#]] ; CHECK: %[[#]] = OpSConvert [[U32]] %[[#]] ; CHECK: %[[#]] = OpFConvert [[F16]] %[[#]] -; CHECK: %[[#]] = OpQuantizeToF16 [[F32]] %[[#]] ; CHECK: %[[#]] = OpSatConvertSToU [[U64]] %[[#]] ; CHECK: %[[#]] = OpSatConvertUToS [[U64]] %[[#]] ; CHECK: %[[#]] = OpConvertPtrToU [[U64]] [[Arg1]] @@ -267,11 +266,11 @@ define <4 x i32> @u16tou32v4(<4 x i16> %a) { ; CHECK: %[[#]] = OpSConvert [[U32v4]] %[[#]] ; CHECK: %[[#]] = OpConvertUToF [[F32]] %[[#]] ; CHECK: %[[#]] = OpConvertUToF [[F32]] %[[#]] +; CHECK: %[[#R16]] = OpFConvert [[F32v2]] %[[#]] ; CHECK: %[[#R17]] = OpFConvert [[F32v2]] %[[#]] ; CHECK: %[[#R18]] = OpFConvert [[F32v2]] %[[#]] ; CHECK: %[[#R19]] = OpFConvert [[F32v2]] %[[#]] -; CHECK: %[[#R20]] = OpFConvert [[F32v2]] %[[#]] -; CHECK: %[[#R21]] = OpConvertFToU [[U8]] %[[#]] +; CHECK: %[[#R20]] = OpConvertFToU [[U8]] %[[#]] ; CHECK: OpFunctionEnd define dso_local spir_kernel void @test_wrappers(ptr addrspace(4) %arg, i64 %arg_ptr, <4 x i8> %arg_v2) { %r1 = call spir_func i32 @__spirv_ConvertFToU(float 0.000000e+00) @@ -281,20 +280,19 @@ define dso_local spir_kernel void @test_wrappers(ptr addrspace(4) %arg, i64 %arg %r5 = call spir_func i32 @__spirv_UConvert(i64 1) %r6 = call spir_func i32 @__spirv_SConvert(i64 1) %r7 = call spir_func half @__spirv_FConvert(float 0.000000e+00) - %r8 = call spir_func float @__spirv_QuantizeToF16(float 0.000000e+00) - %r9 = call spir_func i64 @__spirv_SatConvertSToU(i64 1) - %r10 = call spir_func i64 @__spirv_SatConvertUToS(i64 1) - %r11 = call spir_func i64 @__spirv_ConvertPtrToU(ptr addrspace(4) %arg) - %r12 = call spir_func ptr addrspace(4) @__spirv_ConvertUToPtr(i64 %arg_ptr) - %r13 = call spir_func <4 x i32> @_Z22__spirv_UConvert_Rint2Dv2_a(<4 x i8> %arg_v2) - %r14 = call spir_func <4 x i32> @_Z22__spirv_SConvert_Rint2Dv2_a(<4 x i8> %arg_v2) - %r15 = call spir_func float @_Z30__spirv_ConvertUToF_Rfloat_rtz(i64 %arg_ptr) - %r16 = call spir_func float @__spirv_ConvertUToF_Rfloat_rtz(i64 %arg_ptr) - %r17 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rtzDv2_DF16_(<2 x half> noundef ) - %r18 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rteDv2_DF16_(<2 x half> noundef ) - %r19 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rtpDv2_DF16_(<2 x half> noundef ) - %r20 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rtnDv2_DF16_(<2 x half> noundef ) - %r21 = call spir_func i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef 42.0) + %r8 = call spir_func i64 @__spirv_SatConvertSToU(i64 1) + %r9 = call spir_func i64 @__spirv_SatConvertUToS(i64 1) + %r10 = call spir_func i64 @__spirv_ConvertPtrToU(ptr addrspace(4) %arg) + %r11 = call spir_func ptr addrspace(4) @__spirv_ConvertUToPtr(i64 %arg_ptr) + %r12 = call spir_func <4 x i32> @_Z22__spirv_UConvert_Rint2Dv2_a(<4 x i8> %arg_v2) + %r13 = call spir_func <4 x i32> @_Z22__spirv_SConvert_Rint2Dv2_a(<4 x i8> %arg_v2) + %r14 = call spir_func float @_Z30__spirv_ConvertUToF_Rfloat_rtz(i64 %arg_ptr) + %r15 = call spir_func float @__spirv_ConvertUToF_Rfloat_rtz(i64 %arg_ptr) + %r16 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rtzDv2_DF16_(<2 x half> noundef ) + %r17 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rteDv2_DF16_(<2 x half> noundef ) + %r18 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rtpDv2_DF16_(<2 x half> noundef ) + %r19 = call spir_func <2 x float> @_Z28__spirv_FConvert_Rfloat2_rtnDv2_DF16_(<2 x half> noundef ) + %r20 = call spir_func i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef 42.0) ret void } @@ -305,7 +303,6 @@ declare dso_local spir_func float @__spirv_ConvertUToF(i32) declare dso_local spir_func i32 @__spirv_UConvert(i64) declare dso_local spir_func i32 @__spirv_SConvert(i64) declare dso_local spir_func half @__spirv_FConvert(float) -declare dso_local spir_func float @__spirv_QuantizeToF16(float) declare dso_local spir_func i64 @__spirv_SatConvertSToU(i64) declare dso_local spir_func i64 @__spirv_SatConvertUToS(i64) declare dso_local spir_func i64 @__spirv_ConvertPtrToU(ptr addrspace(4)) diff --git a/llvm/test/CodeGen/SPIRV/instructions/quantizeto16.ll b/llvm/test/CodeGen/SPIRV/instructions/quantizeto16.ll new file mode 100644 index 0000000000000..0b12ba465b289 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/instructions/quantizeto16.ll @@ -0,0 +1,15 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %} + +; TODO: Implement support for the SPIR-V QuantizeToF16 operation +; XFAIL: * + +; CHECK-DAG: [[F32:%.*]] = OpTypeFloat 32 +; CHECK: %[[#]] = OpQuantizeToF16 [[F32]] %[[#]] +define spir_func void @test_wrappers() { + entry: + %r8 = call spir_func float @__spirv_QuantizeToF16(float 0.000000e+00) + ret void +} + +declare dso_local spir_func float @__spirv_QuantizeToF16(float) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll new file mode 100644 index 0000000000000..49bb8eac10be8 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll @@ -0,0 +1,56 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpFOrdEqual +; CHECK-DAG: OpFOrdGreaterThan +; CHECK-DAG: OpFOrdGreaterThanEqual +; CHECK-DAG: OpFOrdLessThan +; CHECK-DAG: OpFOrdLessThanEqual +; CHECK-DAG: OpFOrdNotEqual +; CHECK-DAG: OpOrdered +; CHECK-DAG: OpFUnordEqual +; CHECK-DAG: OpFUnordGreaterThan +; CHECK-DAG: OpFUnordGreaterThanEqual +; CHECK-DAG: OpFUnordLessThan +; CHECK-DAG: OpFUnordLessThanEqual +; CHECK-DAG: OpFUnordNotEqual +; CHECK-DAG: OpUnordered + +define dso_local spir_kernel void @test(float %a){ +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oeq", metadata !"fpexcept.strict") + %cmp1 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"ogt", metadata !"fpexcept.strict") + %cmp2 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oge", metadata !"fpexcept.strict") + %cmp3 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"olt", metadata !"fpexcept.strict") + %cmp4 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ole", metadata !"fpexcept.strict") + %cmp5 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"one", metadata !"fpexcept.strict") + %cmp6 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ord", metadata !"fpexcept.strict") + %cmp7 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ueq", metadata !"fpexcept.strict") + %cmp8 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ugt", metadata !"fpexcept.strict") + %cmp9 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uge", metadata !"fpexcept.strict") + %cmp10 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ult", metadata !"fpexcept.strict") + %cmp11 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ule", metadata !"fpexcept.strict") + %cmp12 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"une", metadata !"fpexcept.strict") + %cmp13 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uno", metadata !"fpexcept.strict") + + %or1 = or i1 %cmp, %cmp1 + %or2 = or i1 %or1, %cmp2 + %or3 = or i1 %or2, %cmp3 + %or4 = or i1 %or3, %cmp4 + %or5 = or i1 %or4, %cmp5 + %or6 = or i1 %or5, %cmp6 + %or7 = or i1 %or6, %cmp7 + %or8 = or i1 %or7, %cmp8 + %or9 = or i1 %or8, %cmp9 + %or10 = or i1 %or9, %cmp10 + %or11 = or i1 %or10, %cmp11 + %or12 = or i1 %or11, %cmp12 + %or13 = or i1 %or12, %cmp13 + br i1 %or13, label %true_block, label %false_block +true_block: + ret void +false_block: + ret void +} +declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll new file mode 100644 index 0000000000000..fd8cb9d7ff6f0 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll @@ -0,0 +1,14 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s + +; CHECK: OpNop +; CHECK-NEXT: OpReturn + +declare void @llvm.debugtrap() + +define spir_kernel void @foo(ptr addrspace(1) %a){ +entry: + %a.addr = alloca ptr addrspace(1), align 4 + store ptr addrspace(1) %a, ptr %a.addr, align 4 + call void @llvm.debugtrap() + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll new file mode 100644 index 0000000000000..f6434e94a9d79 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll @@ -0,0 +1,114 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[#extinst_id:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#float_32_type:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#int_32_type:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#fn_ptr_type_i32:]] = OpTypePointer Function %[[#int_32_type]] +; CHECK-DAG: %[[#const_negzero:]] = OpConstant %[[#float_32_type]] -0 +; CHECK-DAG: %[[#vec2_float_type:]] = OpTypeVector %[[#float_32_type]] 2 +; CHECK-DAG: %[[#vec2_int_type:]] = OpTypeVector %[[#int_32_type]] 2 +; CHECK-DAG: %[[#fn_ptr_type_vec2_i32:]] = OpTypePointer Function %[[#vec2_int_type]] +; CHECK-DAG: %[[#vec2_null:]] = OpConstantNull %[[#vec2_float_type]] +; CHECK-DAG: %[[#scalar_null:]] = OpConstantNull %[[#float_32_type]] +; CHECK-DAG: %[[#const_composite1:]] = OpConstantComposite %[[#vec2_float_type]] %[[#scalar_null]] %[[#const_negzero]] +; CHECK-DAG: %[[#vec4_float_type:]] = OpTypeVector %[[#float_32_type]] 4 +; CHECK-DAG: %[[#vec4_int_type:]] = OpTypeVector %[[#int_32_type]] 4 +; CHECK-DAG: %[[#fn_ptr_type_vec4_i32:]] = OpTypePointer Function %[[#vec4_int_type]] +; CHECK-DAG: %[[#const_composite2:]] = OpConstantComposite %[[#vec4_float_type]] %[[#const_16:]] %[[#const_neg32:]] %[[#const_0:]] %[[#const_9999:]] +; CHECK-DAG: %[[#float_64_type:]] = OpTypeFloat 64 +; CHECK-DAG: %[[#vec2_double_type:]] = OpTypeVector %[[#float_64_type]] 2 + +; CHECK: %[[#]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var1:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst1:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#const_negzero]] %[[#var1]] +; CHECK: %[[#exp_part_var:]] = OpLoad %[[#int_32_type]] %[[#var1]] +; CHECK: OpReturnValue %[[#exp_part_var]] +define i32 @frexp_negzero(float %x) { + %ret = call { float, i32 } @llvm.frexp.f32.i32(float -0.0) + %f_part = extractvalue { float, i32 } %ret, 0 + %exp_part = extractvalue { float, i32 } %ret, 1 + ret i32 %exp_part +} + +; CHECK: %[[#x_var4:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var10:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst10:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var4]] %[[#var10]] +; CHECK: %[[#exp_part_var2:]] = OpLoad %[[#int_32_type]] %[[#var10]] +; CHECK: OpReturnValue %[[#exp_part_var2]] +define i32 @frexp_frexp_get_int(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %f_part = extractvalue { float, i32 } %frexp0, 0 + %exp_part = extractvalue { float, i32 } %frexp0, 1 + ret i32 %exp_part +} + +; CHECK: %[[#var3:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst3:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#vec2_null]] %[[#var3]] +; CHECK: %[[#f_part_var2:]] = OpLoad %[[#vec2_int_type]] %[[#var3]] +; CHECK: OpReturnValue %[[#extinst3]] +define <2 x float> @frexp_zero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> zeroinitializer) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var4:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst4:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#const_composite1]] %[[#var4]] +; CHECK: %[[#f_part_var3:]] = OpLoad %[[#vec2_int_type]] %[[#var4]] +; CHECK: OpReturnValue %[[#extinst4]] +define <2 x float> @frexp_zero_negzero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> ) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var5:]] = OpVariable %[[#fn_ptr_type_vec4_i32]] Function +; CHECK: %[[#extinst5:]] = OpExtInst %[[#vec4_float_type]] %[[#extinst_id]] frexp %[[#const_composite2]] %[[#var5]] +; CHECK: %[[#f_part_var4:]] = OpLoad %[[#vec4_int_type]] %[[#var5]] +; CHECK: OpReturnValue %[[#extinst5]] +define <4 x float> @frexp_nonsplat_vector() { + %ret = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> ) + %f_part = extractvalue { <4 x float>, <4 x i32> } %ret, 0 + %exp_part = extractvalue { <4 x float>, <4 x i32> } %ret, 1 + ret <4 x float> %f_part +} + +; CHECK: %[[#x_var2:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var6:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#var7:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst6:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var2]] %[[#var6]] +; CHECK: %[[#load1:]] = OpLoad %[[#int_32_type]] %[[#var6]] +; CHECK: %[[#extinst7:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#extinst6]] %[[#var7]] +; CHECK: %[[#f_part_var5:]] = OpLoad %[[#int_32_type]] %[[#var7]] +; CHECK: OpReturnValue %[[#extinst7]] +define float @frexp_frexp(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %frexp0_f_part = extractvalue { float, i32 } %frexp0, 0 + %frexp0_exp_part = extractvalue { float, i32 } %frexp0, 1 + %frexp1 = call { float, i32 } @llvm.frexp.f32.i32(float %frexp0_f_part) + %frexp1_f_part = extractvalue { float, i32 } %frexp1, 0 + %frexp1_exp_part = extractvalue { float, i32 } %frexp1, 1 + ret float %frexp1_f_part +} + +; CHECK: %[[#x_var3:]] = OpFunctionParameter %[[#vec2_double_type]] +; CHECK: %[[#var9:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst9:]] = OpExtInst %[[#vec2_double_type]] %[[#extinst_id]] frexp %[[#x_var3]] %[[#var9]] +; CHECK: %[[#f_part_var6:]] = OpLoad %[[#vec2_int_type]] %[[#var9]] +; CHECK: OpReturnValue %[[#extinst9]] +define <2 x double> @frexp_frexp_vector(<2 x double> %x) { + %frexp0 = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %x) + %f_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 0 + %exp_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 1 + ret <2 x double> %f_part +} + +declare { float, i32 } @llvm.frexp.f32.i32(float) +declare { double, i32 } @llvm.frexp.f64.i32(double) +declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) +declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>) +declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) +declare { float, i8 } @llvm.frexp.f32.i8(float) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll index a15a80754cd60..b3ef6d6bbced9 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll @@ -11,7 +11,6 @@ define spir_kernel void @foo(ptr %p) { entry: call void @llvm.trap() - call void @llvm.debugtrap() call void @llvm.ubsantrap(i8 100) %r1 = call ptr @llvm.invariant.start.p0(i64 1024, ptr %p) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llround.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llround.ll new file mode 100644 index 0000000000000..2695237508af0 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/llround.ll @@ -0,0 +1,87 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: [[opencl:%[0-9]+]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: [[f32:%[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: [[i32:%[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: [[f64:%[0-9]+]] = OpTypeFloat 64 +; CHECK-DAG: [[i64:%[0-9]+]] = OpTypeInt 64 0 +; CHECK-DAG: [[vecf32:%[0-9]+]] = OpTypeVector [[f32]] +; CHECK-DAG: [[veci32:%[0-9]+]] = OpTypeVector [[i32]] +; CHECK-DAG: [[vecf64:%[0-9]+]] = OpTypeVector [[f64]] +; CHECK-DAG: [[veci64:%[0-9]+]] = OpTypeVector [[i64]] + +; CHECK: [[rounded_i32_f32:%[0-9]+]] = OpExtInst [[f32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i32]] [[rounded_i32_f32]] +; CHECK: [[rounded_i32_f64:%[0-9]+]] = OpExtInst [[f64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i32]] [[rounded_i32_f64]] +; CHECK: [[rounded_i64_f32:%[0-9]+]] = OpExtInst [[f32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i64]] [[rounded_i64_f32]] +; CHECK: [[rounded_i64_f64:%[0-9]+]] = OpExtInst [[f64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i64]] [[rounded_i64_f64]] +; CHECK: [[rounded_v4i32_f32:%[0-9]+]] = OpExtInst [[vecf32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci32]] [[rounded_v4i32_f32]] +; CHECK: [[rounded_v4i32_f64:%[0-9]+]] = OpExtInst [[vecf64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci32]] [[rounded_v4i32_f64]] +; CHECK: [[rounded_v4i64_f32:%[0-9]+]] = OpExtInst [[vecf32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci64]] [[rounded_v4i64_f32]] +; CHECK: [[rounded_v4i64_f64:%[0-9]+]] = OpExtInst [[vecf64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci64]] [[rounded_v4i64_f64]] + +define spir_func i32 @test_llround_i32_f32(float %arg0) { +entry: + %0 = call i32 @llvm.llround.i32.f32(float %arg0) + ret i32 %0 +} + +define spir_func i32 @test_llround_i32_f64(double %arg0) { +entry: + %0 = call i32 @llvm.llround.i32.f64(double %arg0) + ret i32 %0 +} + +define spir_func i64 @test_llround_i64_f32(float %arg0) { +entry: + %0 = call i64 @llvm.llround.i64.f32(float %arg0) + ret i64 %0 +} + +define spir_func i64 @test_llround_i64_f64(double %arg0) { +entry: + %0 = call i64 @llvm.llround.i64.f64(double %arg0) + ret i64 %0 +} + +define spir_func <4 x i32> @test_llround_v4i32_f32(<4 x float> %arg0) { +entry: + %0 = call <4 x i32> @llvm.llround.v4i32.f32(<4 x float> %arg0) + ret <4 x i32> %0 +} + +define spir_func <4 x i32> @test_llround_v4i32_f64(<4 x double> %arg0) { +entry: + %0 = call <4 x i32> @llvm.llround.v4i32.f64(<4 x double> %arg0) + ret <4 x i32> %0 +} + +define spir_func <4 x i64> @test_llround_v4i64_f32(<4 x float> %arg0) { +entry: + %0 = call <4 x i64> @llvm.llround.v4i64.f32(<4 x float> %arg0) + ret <4 x i64> %0 +} + +define spir_func <4 x i64> @test_llround_v4i64_f64(<4 x double> %arg0) { +entry: + %0 = call <4 x i64> @llvm.llround.v4i64.f64(<4 x double> %arg0) + ret <4 x i64> %0 +} + +declare i32 @llvm.llround.i32.f32(float) +declare i32 @llvm.llround.i32.f64(double) +declare i64 @llvm.llround.i64.f32(float) +declare i64 @llvm.llround.i64.f64(double) + +declare <4 x i32> @llvm.llround.v4i32.f32(<4 x float>) +declare <4 x i32> @llvm.llround.v4i32.f64(<4 x double>) +declare <4 x i64> @llvm.llround.v4i64.f32(<4 x float>) +declare <4 x i64> @llvm.llround.v4i64.f64(<4 x double>) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lround.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lround.ll new file mode 100644 index 0000000000000..891f1ceb5b238 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lround.ll @@ -0,0 +1,87 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: [[opencl:%[0-9]+]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: [[f32:%[0-9]+]] = OpTypeFloat 32 +; CHECK-DAG: [[i32:%[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: [[f64:%[0-9]+]] = OpTypeFloat 64 +; CHECK-DAG: [[i64:%[0-9]+]] = OpTypeInt 64 0 +; CHECK-DAG: [[vecf32:%[0-9]+]] = OpTypeVector [[f32]] +; CHECK-DAG: [[veci32:%[0-9]+]] = OpTypeVector [[i32]] +; CHECK-DAG: [[vecf64:%[0-9]+]] = OpTypeVector [[f64]] +; CHECK-DAG: [[veci64:%[0-9]+]] = OpTypeVector [[i64]] + +; CHECK: [[rounded_i32_f32:%[0-9]+]] = OpExtInst [[f32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i32]] [[rounded_i32_f32]] +; CHECK: [[rounded_i32_f64:%[0-9]+]] = OpExtInst [[f64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i32]] [[rounded_i32_f64]] +; CHECK: [[rounded_i64_f32:%[0-9]+]] = OpExtInst [[f32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i64]] [[rounded_i64_f32]] +; CHECK: [[rounded_i64_f64:%[0-9]+]] = OpExtInst [[f64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[i64]] [[rounded_i64_f64]] +; CHECK: [[rounded_v4i32_f32:%[0-9]+]] = OpExtInst [[vecf32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci32]] [[rounded_v4i32_f32]] +; CHECK: [[rounded_v4i32_f64:%[0-9]+]] = OpExtInst [[vecf64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci32]] [[rounded_v4i32_f64]] +; CHECK: [[rounded_v4i64_f32:%[0-9]+]] = OpExtInst [[vecf32]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci64]] [[rounded_v4i64_f32]] +; CHECK: [[rounded_v4i64_f64:%[0-9]+]] = OpExtInst [[vecf64]] [[opencl]] round %[[#]] +; CHECK-NEXT: %[[#]] = OpConvertFToS [[veci64]] [[rounded_v4i64_f64]] + +define spir_func i32 @test_lround_i32_f32(float %arg0) { +entry: + %0 = call i32 @llvm.lround.i32.f32(float %arg0) + ret i32 %0 +} + +define spir_func i32 @test_lround_i32_f64(double %arg0) { +entry: + %0 = call i32 @llvm.lround.i32.f64(double %arg0) + ret i32 %0 +} + +define spir_func i64 @test_lround_i64_f32(float %arg0) { +entry: + %0 = call i64 @llvm.lround.i64.f32(float %arg0) + ret i64 %0 +} + +define spir_func i64 @test_lround_i64_f64(double %arg0) { +entry: + %0 = call i64 @llvm.lround.i64.f64(double %arg0) + ret i64 %0 +} + +define spir_func <4 x i32> @test_lround_v4i32_f32(<4 x float> %arg0) { +entry: + %0 = call <4 x i32> @llvm.lround.v4i32.f32(<4 x float> %arg0) + ret <4 x i32> %0 +} + +define spir_func <4 x i32> @test_lround_v4i32_f64(<4 x double> %arg0) { +entry: + %0 = call <4 x i32> @llvm.lround.v4i32.f64(<4 x double> %arg0) + ret <4 x i32> %0 +} + +define spir_func <4 x i64> @test_lround_v4i64_f32(<4 x float> %arg0) { +entry: + %0 = call <4 x i64> @llvm.lround.v4i64.f32(<4 x float> %arg0) + ret <4 x i64> %0 +} + +define spir_func <4 x i64> @test_lround_v4i64_f64(<4 x double> %arg0) { +entry: + %0 = call <4 x i64> @llvm.lround.v4i64.f64(<4 x double> %arg0) + ret <4 x i64> %0 +} + +declare i32 @llvm.lround.i32.f32(float) +declare i32 @llvm.lround.i32.f64(double) +declare i64 @llvm.lround.i64.f32(float) +declare i64 @llvm.lround.i64.f64(double) + +declare <4 x i32> @llvm.lround.v4i32.f32(<4 x float>) +declare <4 x i32> @llvm.lround.v4i32.f64(<4 x double>) +declare <4 x i64> @llvm.lround.v4i64.f32(<4 x float>) +declare <4 x i64> @llvm.lround.v4i64.f64(<4 x double>) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll new file mode 100644 index 0000000000000..51b76640cc056 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll @@ -0,0 +1,86 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV-NOT: llvm.memmove + +; CHECK-DAG: %[[#Int8:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#Int32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Int64:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#Ptr_CrossWG_8:]] = OpTypePointer CrossWorkgroup %[[#Int8]] +; CHECK-DAG: %[[#Ptr_Generic_32:]] = OpTypePointer Generic %[[#Int32]] +; CHECK-DAG: %[[#Const_64:]] = OpConstant %[[#Int32]] 64 +; CHECK-DAG: %[[#Const_36:]] = OpConstant %[[#Int32]] 36 +; CHECK-DAG: %[[#Const_30:]] = OpConstant %[[#Int32]] 30 +; CHECK-DAG: %[[#Const_32_64:]] = OpConstant %[[#Int64]] 32 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size1:]] = OpUConvert %[[#Int64]] %[[#Const_64]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size1]] Aligned 64 + +; CHECK: %[[#Src:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#CastDst2:]] = OpGenericCastToPtr %[[#Ptr_CrossWG_8]] %[[#GenPtr:]] +; CHECK: %[[#Size2:]] = OpUConvert %[[#Int64]] %[[#Const_36]] +; CHECK: OpCopyMemorySized %[[#CastDst2]] %[[#Src]] %[[#Size2]] Aligned 64 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size3:]] = OpUConvert %[[#Int64]] %[[#Const_30]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size3]] Aligned 1 + +; CHECK: %[[#Phi:]] = OpPhi %[[#Ptr_Generic_32]] %[[#Op1:]] %[[#Lbl1:]] %[[#Op2:]] %[[#Lbl2:]] +; CHECK: %[[#Cast:]] = OpPtrCastToGeneric %[[#]] %[[#]] +; CHECK: OpCopyMemorySized %[[#Cast]] %[[#Phi]] %[[#Const_32_64]] Aligned 8 + +%struct.SomeStruct = type { <16 x float>, i32, [60 x i8] } +%class.kfunc = type <{ i32, i32, i32, [4 x i8] }> + +@InvocIndex = external local_unnamed_addr addrspace(1) constant i64, align 8 +@"func_object1" = internal addrspace(3) global %class.kfunc zeroinitializer, align 8 + +define spir_kernel void @test_full_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(1)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(1)* %out to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %2, i8 addrspace(1)* align 64 %1, i32 64, i1 false) + ret void +} + +define spir_kernel void @test_partial_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(4)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(4)* %out to i8 addrspace(4)* + %3 = addrspacecast i8 addrspace(4)* %2 to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %3, i8 addrspace(1)* align 64 %1, i32 36, i1 false) + ret void +} + +define spir_kernel void @test_array(i8 addrspace(1)* %in, i8 addrspace(1)* %out) { + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i32 30, i1 false) + ret void +} + +define weak_odr dso_local spir_kernel void @test_phi() local_unnamed_addr { +entry: + %0 = alloca i32, align 8 + %1 = addrspacecast i32* %0 to i32 addrspace(4)* + %2 = load i64, i64 addrspace(1)* @InvocIndex, align 8 + %cmp = icmp eq i64 %2, 0 + br i1 %cmp, label %leader, label %entry.merge_crit_edge + +entry.merge_crit_edge: ; preds = %entry + %3 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +leader: ; preds = %entry + %4 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +merge: ; preds = %entry.merge_crit_edge, %leader + %phi = phi i8 addrspace(4)* [ %3, %entry.merge_crit_edge ], [ %4, %leader ] + %5 = addrspacecast i8 addrspace(3)* bitcast (%class.kfunc addrspace(3)* @"func_object1" to i8 addrspace(3)*) to i8 addrspace(4)* + call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* align 8 dereferenceable(32) %5, i8 addrspace(4)* align 8 dereferenceable(32) %phi, i64 32, i1 false) + ret void +} + +declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* captures(none) writeonly, i8 addrspace(4)* captures(none) readonly, i64, i1 immarg) + +declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* captures(none), i8 addrspace(1)* captures(none) readonly, i32, i1) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll new file mode 100644 index 0000000000000..52f939faf0a9f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -filetype=obj -o - | spirv-val %} +; XFAIL: * +;@llvm.sadd.with.overflow and @llvm.ssub.with.overflow has not been implemented. + +define spir_func void @test_sadd_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) + +define spir_func void @test_ssub_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll index e405ef0ed58a5..5e66b8b639f17 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll @@ -7,10 +7,11 @@ ;; ;; Positive tests: ;; -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV ;; ;; Negative tests: ;; +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV-NEGATIVE ;; Check that backend is able to skip nsw/nuw attributes if extension is ;; disabled implicitly or explicitly and if max SPIR-V version is lower then 1.4 diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll new file mode 100644 index 0000000000000..c8953c701d47d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll @@ -0,0 +1,11 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: [[#PtrT:]] = OpTypePointer Workgroup %[[#]] +; CHECK-SPIRV: %[[#]] = OpVariable %[[#PtrT]] Workgroup + +@test_atomic_fn.L = internal addrspace(3) global [64 x i32] zeroinitializer, align 4 + +define spir_kernel void @test_atomic_fn() { + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll new file mode 100644 index 0000000000000..607997d034f09 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll @@ -0,0 +1,140 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability Kernel +; CHECK: OpCapability Addresses +; CHECK: OpCapability Pipes +; CHECK: OpCapability Int8 +; CHECK: OpCapability GenericPointer + +; CHECK-DAG: %[[#PipeWriteTy:]] = OpTypePipe WriteOnly +; CHECK-DAG: %[[#PipeReadTy:]] = OpTypePipe ReadOnly +; CHECK-DAG: %[[#ReserveIdTy:]] = OpTypeReserveId +; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool +; CHECK-DAG: %[[#Int32Ty:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Uint1:]] = OpConstant %[[#Int32Ty]] 1 +; CHECK-DAG: %[[#Uint2:]] = OpConstant %[[#Int32Ty]] 2 +; CHECK-DAG: %[[#Uint3:]] = OpConstant %[[#Int32Ty]] 3 +; CHECK-DAG: %[[#Uint4:]] = OpConstant %[[#Int32Ty]] 4 +; CHECK-DAG: %[[#NullUint:]] = OpConstantNull %[[#Int32Ty]] + +; CHECK: OpFunction +; CHECK: %[[#FuncParam1:]] = OpFunctionParameter %[[#PipeWriteTy]] +; CHECK: %[[#FuncParam2:]] = OpFunctionParameter %[[#PipeReadTy]] + +; CHECK: %[[#BasicWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#BasicWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#BasicReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#BasicReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Reserved pipe operations --- +; CHECK: %[[#ReservedWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedWrite:]] = OpReservedWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidWrite:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedWriteReserve]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedRead:]] = OpReservedReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidRead:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedReadReserve]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Pipe packet queries --- +; CHECK: %[[#MaxPacketsWO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsWO]] Aligned 4 +; CHECK: %[[#NumPacketsWO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsWO]] Aligned 4 +; CHECK: %[[#MaxPacketsRO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsRO]] Aligned 4 +; CHECK: %[[#NumPacketsRO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsRO]] Aligned 4 + +; --- Workgroup operations --- +; CHECK: %[[#WorkgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitWritePipe %[[#Uint2]] %[[#FuncParam1]] %[[#WorkgroupWriteReserve]] %[[#Uint1]] %[[#Uint1]] +; CHECK: %[[#WorkgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitReadPipe %[[#Uint2]] %[[#FuncParam2]] %[[#WorkgroupReadReserve]] %[[#Uint1]] %[[#Uint1]] + +; --- Subgroup operations --- +; CHECK: %[[#SubgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitWritePipe %[[#Uint3]] %[[#FuncParam1]] %[[#SubgroupWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#SubgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitReadPipe %[[#Uint3]] %[[#FuncParam2]] %[[#SubgroupReadReserve]] %[[#Uint4]] %[[#Uint4]] + +define spir_kernel void @test_pipe_builtins( + target("spirv.Pipe", 1) %out_pipe, + target("spirv.Pipe", 0) %in_pipe, + ptr addrspace(4) %src, + ptr addrspace(4) %dst, + ptr addrspace(1) %max_packets_wo, + ptr addrspace(1) %num_packets_wo, + ptr addrspace(1) %max_packets_ro, + ptr addrspace(1) %num_packets_ro +) { +entry: + ; Basic pipe operations + %0 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %1 = call spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1) %out_pipe, ptr addrspace(4) %src, i32 4, i32 4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %0, i32 4, i32 4) + + %2 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %3 = call spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0) %in_pipe, ptr addrspace(4) %dst, i32 4, i32 4) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %2, i32 4, i32 4) + + ; Reserved pipe operations + %4 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %5 = call spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 0, ptr addrspace(4) %src, i32 4, i32 4) + %6 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 4, i32 4) + + %7 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %8 = call spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 0, ptr addrspace(4) %dst, i32 4, i32 4) + %9 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %7) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 4, i32 4) + + ; Pipe packet queries + %10 = call spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %10, ptr addrspace(1) %max_packets_wo, align 4 + %11 = call spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %11, ptr addrspace(1) %num_packets_wo, align 4 + %12 = call spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %12, ptr addrspace(1) %max_packets_ro, align 4 + %13 = call spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %13, ptr addrspace(1) %num_packets_ro, align 4 + + ; Workgroup operations + %14 = call spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %14, i32 1, i32 1) + %15 = call spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %15, i32 1, i32 1) + + ; Subgroup operations + %16 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %16, i32 4, i32 4) + %17 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %17, i32 4, i32 4) + + ret void +} + +declare spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1), ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0), ptr addrspace(4), i32, i32) +declare spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func void @__commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func void @__commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId")) +declare spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll new file mode 100644 index 0000000000000..4c64a127a7019 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll @@ -0,0 +1,16 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId +; CHECK: %[[#Id]] = OpVariable %[[#]] CrossWorkgroup + +@__spirv_BuiltInGlobalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32 + +define spir_kernel void @f() { +entry: + %0 = load i64, ptr addrspace(1) @__spirv_BuiltInGlobalInvocationId, align 32 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll new file mode 100644 index 0000000000000..74ce26bee9cf3 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Check saturation conversion is translated when there is forward declaration +; of SPIRV entry. + +; CHECK: OpDecorate %[[#SAT:]] SaturatedConversion +; CHECK: %[[#SAT]] = OpConvertFToU %[[#]] %[[#]] + +declare spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float) + +define spir_func void @forward(float %val, i8 %initval, ptr addrspace(1) %dst) { +entry: + br label %for.cond + +for.cond: ; preds = %for.body, %entry + %new_val.0 = phi i8 [ %initval, %entry ], [ %call1, %for.body ] + %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %cmp = icmp ult i32 %i.0, 1 + br i1 %cmp, label %for.body, label %for.end + +for.body: ; preds = %for.cond + %call1 = call spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef %val) + %inc = add i32 %i.0, 1 + br label %for.cond + +for.end: ; preds = %for.cond + store i8 %new_val.0, ptr addrspace(1) %dst, align 1 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/float16.ll b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll new file mode 100644 index 0000000000000..0018dba68d4ea --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll @@ -0,0 +1,25 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: %[[#HALF:]] = OpTypeFloat 16 +; CHECK-SPIRV: %[[#HALFPTR:]] = OpTypePointer Function %[[#HALF]] +; CHECK-SPIRV: %[[#HALFV2:]] = OpTypeVector %[[#HALF]] 2 +; CHECK-SPIRV: %[[#HALFV2PTR:]] = OpTypePointer Function %[[#HALFV2]] +; CHECK-SPIRV: %[[#CONST:]] = OpConstant %[[#HALF]] 14788 +; CHECK-SPIRV: %[[#ADDR:]] = OpVariable %[[#HALFPTR]] Function +; CHECK-SPIRV: %[[#ADDR2:]] = OpVariable %[[#HALFV2PTR]] Function +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALF]] %[[#]] fract %[[#CONST]] %[[#ADDR]] +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALFV2]] %[[#]] fract %[[#]] %[[#ADDR2]] + +define spir_kernel void @test() { +entry: + %addr = alloca half + %addr2 = alloca <2 x half> + %res = call spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef 0xH39C4, ptr noundef %addr) + %res2 = call spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef , ptr noundef %addr2) + ret void +} + +declare spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef, ptr noundef) local_unnamed_addr + +declare spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef, ptr noundef) local_unnamed_addr diff --git a/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll b/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll index d3d641357ae58..eb7c1b632dba9 100644 --- a/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll +++ b/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll @@ -235,7 +235,7 @@ define half @f12_half(half %dummy, half %val, ptr %dest) { ; CHECK-NEXT: blah %f0 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: brasl %r14, __extendhfsf2@PLT -; CHECK-NEXT: ltebr %f0, %f0 +; CHECK-NEXT: ltebr %f1, %f0 ; CHECK-NEXT: jl .LBB11_2 ; CHECK-NEXT:# %bb.1: ; CHECK-NEXT: lgdr %r0, %f8 @@ -344,7 +344,7 @@ define half @f15_half(half %val, half %dummy, ptr %dest) { ; CHECK-NEXT: blah %f2 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: brasl %r14, __extendhfsf2@PLT -; CHECK-NEXT: ltebr %f0, %f0 +; CHECK-NEXT: ltebr %f1, %f0 ; CHECK-NEXT: jl .LBB15_2 ; CHECK-NEXT:# %bb.1: ; CHECK-NEXT: lgdr %r0, %f8 diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll index 9346098f0371b..696938c27b0f5 100644 --- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll +++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll @@ -173,8 +173,8 @@ define dso_local i32 @and_mul_reduce_add(ptr noalias nocapture readonly %a, ptr ; CHECK-LABEL: and_mul_reduce_add: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: push {r4, lr} -; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: ldr.w r12, [sp, #12] +; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: ldr.w r12, [sp, #16] ; CHECK-NEXT: cmp.w r12, #0 ; CHECK-NEXT: beq .LBB2_4 ; CHECK-NEXT: @ %bb.1: @ %vector.ph @@ -195,9 +195,14 @@ define dso_local i32 @and_mul_reduce_add(ptr noalias nocapture readonly %a, ptr ; CHECK-NEXT: vstr p0, [sp] @ 4-byte Spill ; CHECK-NEXT: sub.w r12, r12, #4 ; CHECK-NEXT: vsub.i32 q1, q2, q1 -; CHECK-NEXT: vpsttt +; CHECK-NEXT: vpst ; CHECK-NEXT: vcmpt.i32 eq, q1, zr +; CHECK-NEXT: vstr p0, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: vldr p0, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: vpst ; CHECK-NEXT: vldrwt.u32 q1, [r3], #16 +; CHECK-NEXT: vldr p0, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: vpst ; CHECK-NEXT: vldrwt.u32 q2, [r2], #16 ; CHECK-NEXT: vmul.i32 q1, q2, q1 ; CHECK-NEXT: vadd.i32 q1, q1, q0 @@ -206,11 +211,11 @@ define dso_local i32 @and_mul_reduce_add(ptr noalias nocapture readonly %a, ptr ; CHECK-NEXT: vldr p0, [sp] @ 4-byte Reload ; CHECK-NEXT: vpsel q0, q1, q0 ; CHECK-NEXT: vaddv.u32 r0, q0 -; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r4, pc} ; CHECK-NEXT: .LBB2_4: ; CHECK-NEXT: movs r0, #0 -; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r4, pc} ptr noalias nocapture readonly %c, ptr noalias nocapture readonly %d, i32 %N) { entry: diff --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll index 41d2c02a73cd0..5a79659436f34 100644 --- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll +++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll @@ -348,38 +348,35 @@ entry: define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) { ; CHECK-MVE-LABEL: vector_add_f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, lr} -; CHECK-MVE-NEXT: push {r4, r5, r6, r7, lr} -; CHECK-MVE-NEXT: .pad #4 -; CHECK-MVE-NEXT: sub sp, #4 +; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-MVE-NEXT: push.w {r4, r5, r6, r7, r8, lr} ; CHECK-MVE-NEXT: .vsave {d8, d9} ; CHECK-MVE-NEXT: vpush {d8, d9} -; CHECK-MVE-NEXT: mov r4, r0 +; CHECK-MVE-NEXT: mov r8, r0 ; CHECK-MVE-NEXT: add r0, sp, #40 ; CHECK-MVE-NEXT: vldrw.u32 q4, [r0] -; CHECK-MVE-NEXT: mov r6, r1 +; CHECK-MVE-NEXT: mov r7, r1 ; CHECK-MVE-NEXT: mov r0, r3 -; CHECK-MVE-NEXT: mov r5, r2 -; CHECK-MVE-NEXT: vmov r7, r1, d9 +; CHECK-MVE-NEXT: mov r6, r2 +; CHECK-MVE-NEXT: vmov r4, r1, d9 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s19, r0 -; CHECK-MVE-NEXT: mov r0, r5 -; CHECK-MVE-NEXT: mov r1, r7 -; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov r5, r1, d8 -; CHECK-MVE-NEXT: vmov s18, r0 +; CHECK-MVE-NEXT: mov r5, r0 ; CHECK-MVE-NEXT: mov r0, r6 +; CHECK-MVE-NEXT: mov r1, r4 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s17, r0 -; CHECK-MVE-NEXT: mov r0, r4 -; CHECK-MVE-NEXT: mov r1, r5 +; CHECK-MVE-NEXT: vmov r6, r1, d8 +; CHECK-MVE-NEXT: mov r4, r0 +; CHECK-MVE-NEXT: mov r0, r7 ; CHECK-MVE-NEXT: bl __aeabi_fadd -; CHECK-MVE-NEXT: vmov s16, r0 -; CHECK-MVE-NEXT: vmov r2, r3, d9 -; CHECK-MVE-NEXT: vmov r0, r1, d8 +; CHECK-MVE-NEXT: mov r7, r0 +; CHECK-MVE-NEXT: mov r0, r8 +; CHECK-MVE-NEXT: mov r1, r6 +; CHECK-MVE-NEXT: bl __aeabi_fadd +; CHECK-MVE-NEXT: mov r1, r7 +; CHECK-MVE-NEXT: mov r2, r4 +; CHECK-MVE-NEXT: mov r3, r5 ; CHECK-MVE-NEXT: vpop {d8, d9} -; CHECK-MVE-NEXT: add sp, #4 -; CHECK-MVE-NEXT: pop {r4, r5, r6, r7, pc} +; CHECK-MVE-NEXT: pop.w {r4, r5, r6, r7, r8, pc} ; ; CHECK-BE-LABEL: vector_add_f32: ; CHECK-BE: @ %bb.0: @ %entry diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll index 4dd9173e2d418..93b5e3f266b0a 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll @@ -33,53 +33,29 @@ entry: } define void @vld3_v4i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v4i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9} -; CHECK-LV-NEXT: vpush {d8, d9} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #32] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vstrw.32 q0, [r1] -; CHECK-LV-NEXT: vpop {d8, d9} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v4i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9} -; CHECK-LIS-NEXT: vpush {d8, d9} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1] -; CHECK-LIS-NEXT: vpop {d8, d9} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vldrw.u32 q4, [r0, #32] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr entry: %l1 = load <12 x i32>, ptr %src, align 4 @@ -93,87 +69,46 @@ entry: } define void @vld3_v8i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v8i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-LV-NEXT: vpush {d8, d9, d10, d11} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0] -; CHECK-LV-NEXT: vmov.f32 s17, s4 -; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LV-NEXT: vmov.f32 s18, s7 -; CHECK-LV-NEXT: vmov.f32 s22, s6 -; CHECK-LV-NEXT: vmov.f32 s16, s9 -; CHECK-LV-NEXT: vmov.f32 s19, s14 -; CHECK-LV-NEXT: vmov.f32 s20, s8 -; CHECK-LV-NEXT: vmov.f32 s21, s11 -; CHECK-LV-NEXT: vmov.f32 s23, s13 -; CHECK-LV-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LV-NEXT: vmov.f32 s4, s10 -; CHECK-LV-NEXT: vmov.f32 s6, s12 -; CHECK-LV-NEXT: vmov.f32 s7, s15 -; CHECK-LV-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LV-NEXT: vstrw.32 q1, [r1] -; CHECK-LV-NEXT: vpop {d8, d9, d10, d11} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v8i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0] -; CHECK-LIS-NEXT: vmov.f32 s17, s4 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LIS-NEXT: vmov.f32 s18, s7 -; CHECK-LIS-NEXT: vmov.f32 s22, s6 -; CHECK-LIS-NEXT: vmov.f32 s16, s9 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s20, s8 -; CHECK-LIS-NEXT: vmov.f32 s21, s11 -; CHECK-LIS-NEXT: vmov.f32 s23, s13 -; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LIS-NEXT: vmov.f32 s4, s10 -; CHECK-LIS-NEXT: vmov.f32 s6, s12 -; CHECK-LIS-NEXT: vmov.f32 s7, s15 -; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LIS-NEXT: vstrw.32 q1, [r1] -; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v8i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vldrw.u32 q0, [r0, #64] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s18, s7 +; CHECK-NEXT: vmov.f32 s22, s6 +; CHECK-NEXT: vmov.f32 s16, s9 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmov.f32 s20, s8 +; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vmov.f32 s23, s13 +; CHECK-NEXT: vadd.i32 q4, q5, q4 +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vmov.f32 s6, s12 +; CHECK-NEXT: vmov.f32 s7, s15 +; CHECK-NEXT: vadd.i32 q1, q4, q1 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: bx lr entry: %l1 = load <24 x i32>, ptr %src, align 4 @@ -187,155 +122,80 @@ entry: } define void @vld3_v16i32(ptr %src, ptr %dst) { -; CHECK-LV-LABEL: vld3_v16i32: -; CHECK-LV: @ %bb.0: @ %entry -; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-LV-NEXT: vldrw.u32 q6, [r0, #176] -; CHECK-LV-NEXT: vmov.f32 s10, s2 -; CHECK-LV-NEXT: vmov.f32 s13, s0 -; CHECK-LV-NEXT: vmov.f32 s14, s3 -; CHECK-LV-NEXT: vmov.f32 s8, s4 -; CHECK-LV-NEXT: vmov.f32 s9, s7 -; CHECK-LV-NEXT: vmov.f32 s12, s5 -; CHECK-LV-NEXT: vmov.f32 s15, s18 -; CHECK-LV-NEXT: vmov.f32 s11, s17 -; CHECK-LV-NEXT: vadd.i32 q2, q2, q3 -; CHECK-LV-NEXT: vmov.f32 s0, s6 -; CHECK-LV-NEXT: vmov.f32 s2, s16 -; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LV-NEXT: vmov.f32 s3, s19 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LV-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0] -; CHECK-LV-NEXT: vmov.f32 s17, s4 -; CHECK-LV-NEXT: vmov.f32 s18, s7 -; CHECK-LV-NEXT: vmov.f32 s22, s6 -; CHECK-LV-NEXT: vmov.f32 s16, s9 -; CHECK-LV-NEXT: vmov.f32 s19, s14 -; CHECK-LV-NEXT: vmov.f32 s20, s8 -; CHECK-LV-NEXT: vmov.f32 s21, s11 -; CHECK-LV-NEXT: vmov.f32 s23, s13 -; CHECK-LV-NEXT: vmov.f32 s4, s10 -; CHECK-LV-NEXT: vldrw.u32 q2, [r0, #160] -; CHECK-LV-NEXT: vmov.f32 s6, s12 -; CHECK-LV-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LV-NEXT: vmov.f32 s7, s15 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #144] -; CHECK-LV-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LV-NEXT: vmov.f32 s18, s10 -; CHECK-LV-NEXT: vmov.f32 s21, s8 -; CHECK-LV-NEXT: vmov.f32 s22, s11 -; CHECK-LV-NEXT: vmov.f32 s16, s12 -; CHECK-LV-NEXT: vmov.f32 s17, s15 -; CHECK-LV-NEXT: vmov.f32 s20, s13 -; CHECK-LV-NEXT: vmov.f32 s23, s26 -; CHECK-LV-NEXT: vmov.f32 s19, s25 -; CHECK-LV-NEXT: vadd.i32 q4, q4, q5 -; CHECK-LV-NEXT: vmov.f32 s8, s14 -; CHECK-LV-NEXT: vmov.f32 s10, s24 -; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-LV-NEXT: vmov.f32 s11, s27 -; CHECK-LV-NEXT: vldrw.u32 q5, [r0, #128] -; CHECK-LV-NEXT: vadd.i32 q2, q4, q2 -; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #96] -; CHECK-LV-NEXT: vmov.f32 s25, s12 -; CHECK-LV-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-LV-NEXT: vmov.f32 s26, s15 -; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LV-NEXT: vmov.f32 s30, s14 -; CHECK-LV-NEXT: vstrw.32 q1, [r1] -; CHECK-LV-NEXT: vmov.f32 s24, s17 -; CHECK-LV-NEXT: vmov.f32 s27, s22 -; CHECK-LV-NEXT: vmov.f32 s28, s16 -; CHECK-LV-NEXT: vmov.f32 s29, s19 -; CHECK-LV-NEXT: vmov.f32 s31, s21 -; CHECK-LV-NEXT: vadd.i32 q6, q7, q6 -; CHECK-LV-NEXT: vmov.f32 s12, s18 -; CHECK-LV-NEXT: vmov.f32 s14, s20 -; CHECK-LV-NEXT: vmov.f32 s15, s23 -; CHECK-LV-NEXT: vadd.i32 q3, q6, q3 -; CHECK-LV-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-LV-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LV-NEXT: bx lr -; -; CHECK-LIS-LABEL: vld3_v16i32: -; CHECK-LIS: @ %bb.0: @ %entry -; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64] -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48] -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80] -; CHECK-LIS-NEXT: vmov.f32 s10, s2 -; CHECK-LIS-NEXT: vmov.f32 s17, s0 -; CHECK-LIS-NEXT: vmov.f32 s18, s3 -; CHECK-LIS-NEXT: vmov.f32 s8, s4 -; CHECK-LIS-NEXT: vmov.f32 s9, s7 -; CHECK-LIS-NEXT: vmov.f32 s16, s5 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s11, s13 -; CHECK-LIS-NEXT: vmov.f32 s0, s6 -; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4 -; CHECK-LIS-NEXT: vmov.f32 s2, s12 -; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-LIS-NEXT: vmov.f32 s3, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32] -; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0] -; CHECK-LIS-NEXT: vmov.f32 s17, s4 -; CHECK-LIS-NEXT: vmov.f32 s18, s7 -; CHECK-LIS-NEXT: vmov.f32 s22, s6 -; CHECK-LIS-NEXT: vmov.f32 s16, s9 -; CHECK-LIS-NEXT: vmov.f32 s19, s14 -; CHECK-LIS-NEXT: vmov.f32 s20, s8 -; CHECK-LIS-NEXT: vmov.f32 s21, s11 -; CHECK-LIS-NEXT: vmov.f32 s23, s13 -; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4 -; CHECK-LIS-NEXT: vmov.f32 s4, s10 -; CHECK-LIS-NEXT: vldrw.u32 q2, [r0, #160] -; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #176] -; CHECK-LIS-NEXT: vmov.f32 s6, s12 -; CHECK-LIS-NEXT: vmov.f32 s7, s15 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #144] -; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1 -; CHECK-LIS-NEXT: vmov.f32 s18, s10 -; CHECK-LIS-NEXT: vmov.f32 s25, s8 -; CHECK-LIS-NEXT: vmov.f32 s26, s11 -; CHECK-LIS-NEXT: vmov.f32 s16, s12 -; CHECK-LIS-NEXT: vmov.f32 s17, s15 -; CHECK-LIS-NEXT: vmov.f32 s24, s13 -; CHECK-LIS-NEXT: vmov.f32 s27, s22 -; CHECK-LIS-NEXT: vmov.f32 s19, s21 -; CHECK-LIS-NEXT: vmov.f32 s8, s14 -; CHECK-LIS-NEXT: vadd.i32 q4, q4, q6 -; CHECK-LIS-NEXT: vmov.f32 s10, s20 -; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #112] -; CHECK-LIS-NEXT: vmov.f32 s11, s23 -; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #128] -; CHECK-LIS-NEXT: vadd.i32 q2, q4, q2 -; CHECK-LIS-NEXT: vldrw.u32 q4, [r0, #96] -; CHECK-LIS-NEXT: vmov.f32 s25, s12 -; CHECK-LIS-NEXT: vstrw.32 q2, [r1, #48] -; CHECK-LIS-NEXT: vmov.f32 s26, s15 -; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-LIS-NEXT: vmov.f32 s30, s14 -; CHECK-LIS-NEXT: vstrw.32 q1, [r1] -; CHECK-LIS-NEXT: vmov.f32 s24, s17 -; CHECK-LIS-NEXT: vmov.f32 s27, s22 -; CHECK-LIS-NEXT: vmov.f32 s28, s16 -; CHECK-LIS-NEXT: vmov.f32 s29, s19 -; CHECK-LIS-NEXT: vmov.f32 s31, s21 -; CHECK-LIS-NEXT: vadd.i32 q6, q7, q6 -; CHECK-LIS-NEXT: vmov.f32 s12, s18 -; CHECK-LIS-NEXT: vmov.f32 s14, s20 -; CHECK-LIS-NEXT: vmov.f32 s15, s23 -; CHECK-LIS-NEXT: vadd.i32 q3, q6, q3 -; CHECK-LIS-NEXT: vstrw.32 q3, [r1, #32] -; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-LIS-NEXT: bx lr +; CHECK-LABEL: vld3_v16i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q0, [r0, #64] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vldrw.u32 q4, [r0, #80] +; CHECK-NEXT: vldrw.u32 q6, [r0, #176] +; CHECK-NEXT: vmov.f32 s10, s2 +; CHECK-NEXT: vmov.f32 s13, s0 +; CHECK-NEXT: vmov.f32 s14, s3 +; CHECK-NEXT: vmov.f32 s8, s4 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.f32 s15, s18 +; CHECK-NEXT: vmov.f32 s11, s17 +; CHECK-NEXT: vadd.i32 q2, q2, q3 +; CHECK-NEXT: vmov.f32 s0, s6 +; CHECK-NEXT: vmov.f32 s2, s16 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s3, s19 +; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: vadd.i32 q0, q2, q0 +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vmov.f32 s18, s7 +; CHECK-NEXT: vmov.f32 s22, s6 +; CHECK-NEXT: vmov.f32 s16, s9 +; CHECK-NEXT: vmov.f32 s19, s14 +; CHECK-NEXT: vmov.f32 s20, s8 +; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vmov.f32 s23, s13 +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vldrw.u32 q2, [r0, #160] +; CHECK-NEXT: vmov.f32 s6, s12 +; CHECK-NEXT: vadd.i32 q4, q5, q4 +; CHECK-NEXT: vmov.f32 s7, s15 +; CHECK-NEXT: vldrw.u32 q3, [r0, #144] +; CHECK-NEXT: vadd.i32 q1, q4, q1 +; CHECK-NEXT: vmov.f32 s18, s10 +; CHECK-NEXT: vmov.f32 s21, s8 +; CHECK-NEXT: vmov.f32 s22, s11 +; CHECK-NEXT: vmov.f32 s16, s12 +; CHECK-NEXT: vmov.f32 s17, s15 +; CHECK-NEXT: vmov.f32 s20, s13 +; CHECK-NEXT: vmov.f32 s23, s26 +; CHECK-NEXT: vmov.f32 s19, s25 +; CHECK-NEXT: vadd.i32 q4, q4, q5 +; CHECK-NEXT: vmov.f32 s8, s14 +; CHECK-NEXT: vmov.f32 s10, s24 +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] +; CHECK-NEXT: vmov.f32 s11, s27 +; CHECK-NEXT: vldrw.u32 q5, [r0, #128] +; CHECK-NEXT: vadd.i32 q2, q4, q2 +; CHECK-NEXT: vldrw.u32 q4, [r0, #96] +; CHECK-NEXT: vmov.f32 s25, s12 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vmov.f32 s26, s15 +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s30, s14 +; CHECK-NEXT: vstrw.32 q1, [r1] +; CHECK-NEXT: vmov.f32 s24, s17 +; CHECK-NEXT: vmov.f32 s27, s22 +; CHECK-NEXT: vmov.f32 s28, s16 +; CHECK-NEXT: vmov.f32 s29, s19 +; CHECK-NEXT: vmov.f32 s31, s21 +; CHECK-NEXT: vadd.i32 q6, q7, q6 +; CHECK-NEXT: vmov.f32 s12, s18 +; CHECK-NEXT: vmov.f32 s14, s20 +; CHECK-NEXT: vmov.f32 s15, s23 +; CHECK-NEXT: vadd.i32 q3, q6, q3 +; CHECK-NEXT: vstrw.32 q3, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr entry: %l1 = load <48 x i32>, ptr %src, align 4 diff --git a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll index eb1527ff3dc4a..32648b6b449a8 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll @@ -790,15 +790,250 @@ entry: ret i16 %result } -declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) - - -declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) -declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) -declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) -declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) -declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>) -declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>) -declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32) -declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>) -declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>) +define arm_aapcs_vfpcc <4 x i32> @vmulhs_kb_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhs_kb_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: smmul r0, r0, r1 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: smmul r1, r1, r2 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: smmul r0, r0, r1 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: smmul r1, r1, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: bx lr +entry: + %s0s = sext <4 x i32> %s0 to <4 x i64> + %s1s = ashr <4 x i64> %s1, + %m = mul <4 x i64> %s0s, %s1s + %s = ashr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <4 x i32> @vmulhu_kb_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhu_kb_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: umull r0, r1, r0, r1 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: umull r0, r2, r0, r2 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: umull r0, r1, r0, r1 +; CHECK-NEXT: vmov r0, s10 +; CHECK-NEXT: umull r0, r2, r0, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: bx lr +entry: + %s0s = zext <4 x i32> %s0 to <4 x i64> + %s1s = lshr <4 x i64> %s1, + %m = mul <4 x i64> %s0s, %s1s + %s = lshr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <4 x i32> @vmulhs_kbc_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhs_kbc_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: smmul r0, r1, r0 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: smmul r1, r2, r1 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: smmul r0, r1, r0 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: smmul r1, r2, r1 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: bx lr +entry: + %s0s = sext <4 x i32> %s0 to <4 x i64> + %s1s = ashr <4 x i64> %s1, + %m = mul <4 x i64> %s1s, %s0s + %s = ashr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <4 x i32> @vmulhu_kbc_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhu_kbc_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: umull r0, r1, r1, r0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: umull r0, r2, r2, r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: umull r0, r1, r1, r0 +; CHECK-NEXT: vmov r0, s10 +; CHECK-NEXT: umull r0, r2, r2, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: bx lr +entry: + %s0s = zext <4 x i32> %s0 to <4 x i64> + %s1s = lshr <4 x i64> %s1, + %m = mul <4 x i64> %s1s, %s0s + %s = lshr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhs_kb_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhs_kb_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.s16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.s32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q4, q3 +; CHECK-NEXT: vshr.s32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q0, q1 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = sext <8 x i16> %s0 to <8 x i32> + %s1s = ashr <8 x i32> %s1, + %m = mul <8 x i32> %s0s, %s1s + %s = ashr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhu_kb_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhu_kb_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.u16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q4, q3 +; CHECK-NEXT: vshr.u32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q0, q1 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = zext <8 x i16> %s0 to <8 x i32> + %s1s = lshr <8 x i32> %s1, + %m = mul <8 x i32> %s0s, %s1s + %s = lshr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhs_kbc_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhs_kbc_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.s16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.s32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q3, q4 +; CHECK-NEXT: vshr.s32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q1, q0 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = sext <8 x i16> %s0 to <8 x i32> + %s1s = ashr <8 x i32> %s1, + %m = mul <8 x i32> %s1s, %s0s + %s = ashr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhu_kbc_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhu_kbc_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.u16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q3, q4 +; CHECK-NEXT: vshr.u32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q1, q0 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = zext <8 x i16> %s0 to <8 x i32> + %s1s = lshr <8 x i32> %s1, + %m = mul <8 x i32> %s1s, %s0s + %s = lshr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} diff --git a/llvm/test/CodeGen/VE/Scalar/max.ll b/llvm/test/CodeGen/VE/Scalar/max.ll index 51da557c6c49f..7950842670afb 100644 --- a/llvm/test/CodeGen/VE/Scalar/max.ll +++ b/llvm/test/CodeGen/VE/Scalar/max.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @maxf64(double, double) { ; CHECK-LABEL: maxf64: @@ -10,16 +8,21 @@ define double @maxf64(double, double) { ; CHECK-NEXT: cmov.d.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxf64_fast(double, double) { +; CHECK-LABEL: maxf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @max2f64(double, double) { ; CHECK-LABEL: max2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @max2f64(double, double) { ; CHECK-NEXT: cmov.d.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2f64_fast(double, double) { +; CHECK-LABEL: max2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @maxuf64(double, double) { ; CHECK-LABEL: maxuf64: @@ -45,16 +53,21 @@ define double @maxuf64(double, double) { ; CHECK-NEXT: cmov.d.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxuf64_fast(double, double) { +; CHECK-LABEL: maxuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @max2uf64(double, double) { ; CHECK-LABEL: max2uf64: @@ -63,16 +76,21 @@ define double @max2uf64(double, double) { ; CHECK-NEXT: cmov.d.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2uf64_fast(double, double) { +; CHECK-LABEL: max2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @maxf32(float, float) { ; CHECK-LABEL: maxf32: ; CHECK: # %bb.0: @@ -80,16 +98,21 @@ define float @maxf32(float, float) { ; CHECK-NEXT: cmov.s.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxf32_fast(float, float) { +; CHECK-LABEL: maxf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2f32(float, float) { ; CHECK-LABEL: max2f32: ; CHECK: # %bb.0: @@ -97,16 +120,21 @@ define float @max2f32(float, float) { ; CHECK-NEXT: cmov.s.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2f32_fast(float, float) { +; CHECK-LABEL: max2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @maxuf32(float, float) { ; CHECK-LABEL: maxuf32: ; CHECK: # %bb.0: @@ -114,16 +142,21 @@ define float @maxuf32(float, float) { ; CHECK-NEXT: cmov.s.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxuf32_fast(float, float) { +; CHECK-LABEL: maxuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2uf32(float, float) { ; CHECK-LABEL: max2uf32: ; CHECK: # %bb.0: @@ -131,26 +164,26 @@ define float @max2uf32(float, float) { ; CHECK-NEXT: cmov.s.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2uf32_fast(float, float) { +; CHECK-LABEL: max2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @maxi64(i64, i64) { ; CHECK-LABEL: maxi64: ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -161,11 +194,6 @@ define i64 @max2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -178,13 +206,6 @@ define i64 @maxu64(i64, i64) { ; CHECK-NEXT: cmov.l.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -197,13 +218,6 @@ define i64 @max2u64(i64, i64) { ; CHECK-NEXT: cmov.l.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -214,11 +228,6 @@ define i32 @maxi32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -229,11 +238,6 @@ define i32 @max2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -246,13 +250,6 @@ define i32 @maxu32(i32, i32) { ; CHECK-NEXT: cmov.w.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -265,13 +262,6 @@ define i32 @max2u32(i32, i32) { ; CHECK-NEXT: cmov.w.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,12 +273,6 @@ define zeroext i1 @maxi1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: or %s0, %s0, %s1 ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi1: -; OPT: # %bb.0: -; OPT-NEXT: or %s0, %s0, %s1 -; OPT-NEXT: and %s0, 1, %s0 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %1, true %4 = and i1 %3, %0 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll index e8f4939f9149e..36a2e06a2c9d4 100644 --- a/llvm/test/CodeGen/VE/Scalar/min.ll +++ b/llvm/test/CodeGen/VE/Scalar/min.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @minf64(double, double) { ; CHECK-LABEL: minf64: @@ -10,16 +8,21 @@ define double @minf64(double, double) { ; CHECK-NEXT: cmov.d.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minf64_fast(double, double) { +; CHECK-LABEL: minf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2f64(double, double) { ; CHECK-LABEL: min2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @min2f64(double, double) { ; CHECK-NEXT: cmov.d.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2f64_fast(double, double) { +; CHECK-LABEL: min2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @minuf64(double, double) { ; CHECK-LABEL: minuf64: ; CHECK: # %bb.0: @@ -44,16 +52,21 @@ define double @minuf64(double, double) { ; CHECK-NEXT: cmov.d.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minuf64_fast(double, double) { +; CHECK-LABEL: minuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2uf64(double, double) { ; CHECK-LABEL: min2uf64: ; CHECK: # %bb.0: @@ -61,16 +74,21 @@ define double @min2uf64(double, double) { ; CHECK-NEXT: cmov.d.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2uf64_fast(double, double) { +; CHECK-LABEL: min2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @minf32(float, float) { ; CHECK-LABEL: minf32: ; CHECK: # %bb.0: @@ -78,16 +96,21 @@ define float @minf32(float, float) { ; CHECK-NEXT: cmov.s.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minf32_fast(float, float) { +; CHECK-LABEL: minf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2f32(float, float) { ; CHECK-LABEL: min2f32: ; CHECK: # %bb.0: @@ -95,16 +118,21 @@ define float @min2f32(float, float) { ; CHECK-NEXT: cmov.s.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2f32_fast(float, float) { +; CHECK-LABEL: min2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @minuf32(float, float) { ; CHECK-LABEL: minuf32: ; CHECK: # %bb.0: @@ -112,16 +140,21 @@ define float @minuf32(float, float) { ; CHECK-NEXT: cmov.s.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minuf32_fast(float, float) { +; CHECK-LABEL: minuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2uf32(float, float) { ; CHECK-LABEL: min2uf32: ; CHECK: # %bb.0: @@ -129,26 +162,26 @@ define float @min2uf32(float, float) { ; CHECK-NEXT: cmov.s.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2uf32_fast(float, float) { +; CHECK-LABEL: min2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @mini64(i64, i64) { ; CHECK-LABEL: mini64: ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -159,11 +192,6 @@ define i64 @min2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -176,13 +204,6 @@ define i64 @minu64(i64, i64) { ; CHECK-NEXT: cmov.l.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -195,13 +216,6 @@ define i64 @min2u64(i64, i64) { ; CHECK-NEXT: cmov.l.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -212,11 +226,6 @@ define i32 @mini32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -227,11 +236,6 @@ define i32 @min2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -244,13 +248,6 @@ define i32 @minu32(i32, i32) { ; CHECK-NEXT: cmov.w.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -263,13 +260,6 @@ define i32 @min2u32(i32, i32) { ; CHECK-NEXT: cmov.w.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,14 +273,6 @@ define zeroext i1 @mini1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: cmov.w.ne %s0, %s1, %s2 ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini1: -; OPT: # %bb.0: -; OPT-NEXT: and %s2, 1, %s0 -; OPT-NEXT: and %s0, %s1, %s0 -; OPT-NEXT: cmov.w.ne %s0, %s1, %s2 -; OPT-NEXT: adds.w.zx %s0, %s0, (0)1 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %0, true %4 = and i1 %3, %1 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll index 3bc0aba8d4264..93e2889793ba5 100644 --- a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll +++ b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll @@ -7,19 +7,22 @@ define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) { ; CHECK-LABEL: udiv_by_minus_one: ; CHECK: # %bb.0: -; CHECK-NEXT: and %s0, %s0, (56)0 -; CHECK-NEXT: lea %s4, 16843010 -; CHECK-NEXT: muls.l %s0, %s0, %s4 -; CHECK-NEXT: srl %s0, %s0, 32 +; CHECK-NEXT: and %s4, %s0, (56)0 ; CHECK-NEXT: and %s1, %s1, (56)0 -; CHECK-NEXT: muls.l %s1, %s1, %s4 -; CHECK-NEXT: srl %s1, %s1, 32 ; CHECK-NEXT: and %s2, %s2, (56)0 -; CHECK-NEXT: muls.l %s2, %s2, %s4 -; CHECK-NEXT: srl %s2, %s2, 32 ; CHECK-NEXT: and %s3, %s3, (56)0 -; CHECK-NEXT: muls.l %s3, %s3, %s4 -; CHECK-NEXT: srl %s3, %s3, 32 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: cmpu.w %s5, %s3, (56)0 +; CHECK-NEXT: or %s3, 0, (0)1 +; CHECK-NEXT: cmov.w.eq %s3, (63)0, %s5 +; CHECK-NEXT: cmpu.w %s5, %s2, (56)0 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s5 +; CHECK-NEXT: cmpu.w %s5, %s1, (56)0 +; CHECK-NEXT: or %s1, 0, (0)1 +; CHECK-NEXT: cmov.w.eq %s1, (63)0, %s5 +; CHECK-NEXT: cmpu.w %s4, %s4, (56)0 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s4 ; CHECK-NEXT: b.l.t (, %s10) %r = udiv <4 x i8> %x, ret <4 x i8> %r @@ -28,27 +31,18 @@ define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) { define <4 x i8> @urem_by_minus_one(<4 x i8> %x) { ; CHECK-LABEL: urem_by_minus_one: ; CHECK: # %bb.0: -; CHECK-NEXT: and %s0, %s0, (56)0 -; CHECK-NEXT: and %s1, %s1, (56)0 -; CHECK-NEXT: and %s2, %s2, (56)0 -; CHECK-NEXT: and %s3, %s3, (56)0 -; CHECK-NEXT: lea %s4, 16843010 -; CHECK-NEXT: muls.l %s5, %s3, %s4 -; CHECK-NEXT: srl %s5, %s5, 32 -; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0 -; CHECK-NEXT: subs.w.sx %s3, %s3, %s5 -; CHECK-NEXT: muls.l %s5, %s2, %s4 -; CHECK-NEXT: srl %s5, %s5, 32 -; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0 -; CHECK-NEXT: subs.w.sx %s2, %s2, %s5 -; CHECK-NEXT: muls.l %s5, %s1, %s4 -; CHECK-NEXT: srl %s5, %s5, 32 -; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0 -; CHECK-NEXT: subs.w.sx %s1, %s1, %s5 -; CHECK-NEXT: muls.l %s4, %s0, %s4 -; CHECK-NEXT: srl %s4, %s4, 32 -; CHECK-NEXT: muls.w.sx %s4, %s4, (56)0 -; CHECK-NEXT: subs.w.sx %s0, %s0, %s4 +; CHECK-NEXT: and %s4, %s0, (56)0 +; CHECK-NEXT: and %s5, %s1, (56)0 +; CHECK-NEXT: and %s6, %s2, (56)0 +; CHECK-NEXT: and %s7, %s3, (56)0 +; CHECK-NEXT: cmpu.w %s7, %s7, (56)0 +; CHECK-NEXT: cmov.w.eq %s3, (0)1, %s7 +; CHECK-NEXT: cmpu.w %s6, %s6, (56)0 +; CHECK-NEXT: cmov.w.eq %s2, (0)1, %s6 +; CHECK-NEXT: cmpu.w %s5, %s5, (56)0 +; CHECK-NEXT: cmov.w.eq %s1, (0)1, %s5 +; CHECK-NEXT: cmpu.w %s4, %s4, (56)0 +; CHECK-NEXT: cmov.w.eq %s0, (0)1, %s4 ; CHECK-NEXT: b.l.t (, %s10) %r = urem <4 x i8> %x, ret <4 x i8> %r diff --git a/llvm/test/CodeGen/WebAssembly/fake-use.ll b/llvm/test/CodeGen/WebAssembly/fake-use.ll new file mode 100644 index 0000000000000..a18ce33566df0 --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/fake-use.ll @@ -0,0 +1,25 @@ +; RUN: llc < %s | llvm-mc -triple=wasm32-unknown-unknown + +target triple = "wasm32-unknown-unknown" + +define void @fake_use() { + %t = call i32 @foo() + tail call void (...) @llvm.fake.use(i32 %t) + ret void +} + +; %t shouldn't be converted to TEE in RegStackify, because the FAKE_USE will be +; deleted in the beginning of ExplicitLocals. +define void @fake_use_no_tee() { + %t = call i32 @foo() + tail call void (...) @llvm.fake.use(i32 %t) + call void @use(i32 %t) + ret void +} + +declare i32 @foo() +declare void @use(i32 %t) +; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) +declare void @llvm.fake.use(...) #0 + +attributes #0 = { mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) } diff --git a/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll b/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll index 47ea762864cc2..a599f4653f323 100644 --- a/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll +++ b/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll @@ -19,11 +19,11 @@ define hidden i32 @accumulate_add_u8_u8(ptr noundef readonly %a, ptr noundef re ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 @@ -65,11 +65,11 @@ define hidden i32 @accumulate_add_s8_s8(ptr noundef readonly %a, ptr noundef re ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body @@ -108,12 +108,11 @@ define hidden i32 @accumulate_add_s8_u8(ptr noundef readonly %a, ptr noundef re ; MAX-BANDWIDTH: loop ; MAX-BANDWIDTH: v128.load -; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s -; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s -; MAX-BANDWIDTH: i32x4.add -; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u +; MAX-BANDWIDTH: v128.load +; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s +; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s ; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 @@ -363,10 +362,10 @@ define hidden i32 @accumulate_add_u16_u16(ptr noundef readonly %a, ptr noundef ; MAX-BANDWIDTH: loop ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body @@ -402,10 +401,10 @@ define hidden i32 @accumulate_add_s16_s16(ptr noundef readonly %a, ptr noundef ; MAX-BANDWIDTH: loop ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s -; MAX-BANDWIDTH: i32x4.add ; MAX-BANDWIDTH: v128.load ; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s ; MAX-BANDWIDTH: i32x4.add +; MAX-BANDWIDTH: i32x4.add entry: %cmp8.not = icmp eq i32 %N, 0 br i1 %cmp8.not, label %for.cond.cleanup, label %for.body diff --git a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll index 6ef7219cfebdb..9cf7aab0b3655 100644 --- a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll +++ b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll @@ -56,14 +56,9 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind { ; CHECK-LABEL: PR90954: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: pushq %r15 ; CHECK-NEXT: pushq %r14 -; CHECK-NEXT: pushq %r13 -; CHECK-NEXT: pushq %r12 ; CHECK-NEXT: pushq %rbx -; CHECK-NEXT: andq $-1024, %rsp # imm = 0xFC00 -; CHECK-NEXT: subq $5120, %rsp # imm = 0x1400 +; CHECK-NEXT: subq $2912, %rsp # imm = 0xB60 ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp) @@ -79,29 +74,26 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind { ; CHECK-NEXT: movw $64, %cx ; CHECK-NEXT: movw $16, %di ; CHECK-NEXT: movb $1, %r8b -; CHECK-NEXT: movl $64, %r9d -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r10 -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r11 -; CHECK-NEXT: xorl %ebx, %ebx -; CHECK-NEXT: xorl %r14d, %r14d +; CHECK-NEXT: xorl %r9d, %r9d +; CHECK-NEXT: xorl %r10d, %r10d ; CHECK-NEXT: jmp .LBB1_1 ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB1_5: # in Loop: Header=BB1_1 Depth=1 -; CHECK-NEXT: incq %r14 -; CHECK-NEXT: addl %edx, %ebx +; CHECK-NEXT: incq %r10 +; CHECK-NEXT: addl %edx, %r9d ; CHECK-NEXT: .LBB1_1: # =>This Loop Header: Depth=1 ; CHECK-NEXT: # Child Loop BB1_2 Depth 2 -; CHECK-NEXT: movslq %ebx, %r15 -; CHECK-NEXT: leaq (%rsi,%r15,4), %r15 -; CHECK-NEXT: xorl %r12d, %r12d -; CHECK-NEXT: xorl %r13d, %r13d +; CHECK-NEXT: movslq %r9d, %r11 +; CHECK-NEXT: leaq (%rsi,%r11,4), %r11 +; CHECK-NEXT: xorl %ebx, %ebx +; CHECK-NEXT: xorl %r14d, %r14d ; CHECK-NEXT: jmp .LBB1_2 ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB1_4: # in Loop: Header=BB1_2 Depth=2 -; CHECK-NEXT: tilestored %tmm1, (%r15,%rax) -; CHECK-NEXT: incq %r13 -; CHECK-NEXT: addq $64, %r15 -; CHECK-NEXT: decq %r12 +; CHECK-NEXT: tilestored %tmm1, (%r11,%rax) +; CHECK-NEXT: incq %r14 +; CHECK-NEXT: addq $64, %r11 +; CHECK-NEXT: decq %rbx ; CHECK-NEXT: je .LBB1_5 ; CHECK-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1 ; CHECK-NEXT: # => This Inner Loop Header: Depth=2 @@ -110,46 +102,12 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind { ; CHECK-NEXT: testb %r8b, %r8b ; CHECK-NEXT: jne .LBB1_4 ; CHECK-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2 -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: tileloadd (%r10,%r9), %tmm1 -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: tileloadd (%r11,%r9), %tmm2 +; CHECK-NEXT: tilezero %tmm1 +; CHECK-NEXT: tilezero %tmm2 ; CHECK-NEXT: tdpbf16ps %tmm2, %tmm1, %tmm0 -; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-NEXT: movabsq $64, %rax -; CHECK-NEXT: tilestored %tmm0, 3072(%rsp,%rax) # 1024-byte Folded Spill -; CHECK-NEXT: tileloadd 3072(%rsp,%rax), %tmm1 # 1024-byte Folded Reload -; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; CHECK-NEXT: movabsq $64, %rbp +; CHECK-NEXT: tilestored %tmm0, 896(%rsp,%rbp) # 1024-byte Folded Spill +; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm1 # 1024-byte Folded Reload ; CHECK-NEXT: jmp .LBB1_4 %4 = shl i32 %2, 4 %5 = icmp eq i64 0, 0 diff --git a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll index 6679b5f58e8c1..41fa34667af86 100644 --- a/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll +++ b/llvm/test/CodeGen/X86/apx/ndd-neg-addr-index.ll @@ -8,7 +8,7 @@ define void @neg_8bit_1(i1 %cmp) { ; NDD-NEXT: andb $1, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x80,0xe7,0x01] ; NDD-NEXT: movzbl 0, %ecx # encoding: [0x0f,0xb6,0x0c,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8] -; NDD-NEXT: leab 2(%rcx,%rax), %al # encoding: [0x66,0x8d,0x44,0x01,0x02] +; NDD-NEXT: leal 2(%rcx,%rax), %eax # encoding: [0x8d,0x44,0x01,0x02] ; NDD-NEXT: movb %al, 0 # encoding: [0x88,0x04,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: retq # encoding: [0xc3] entry: @@ -25,7 +25,8 @@ define void @neg_8bit_2(i8 %int8) { ; NDD-NEXT: # kill: def $edi killed $edi def $rdi ; NDD-NEXT: addb %dil, %dil, %al # encoding: [0x62,0xf4,0x7c,0x18,0x00,0xff] ; NDD-NEXT: negb %al, %al # encoding: [0x62,0xf4,0x7c,0x18,0xf6,0xd8] -; NDD-NEXT: leab 1(%rdi,%rax), %al # encoding: [0x66,0x8d,0x44,0x07,0x01] +; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01] +; NDD-NEXT: # kill: def $al killed $al killed $eax ; NDD-NEXT: mulb %dil # encoding: [0x40,0xf6,0xe7] ; NDD-NEXT: testb %al, %al # encoding: [0x84,0xc0] ; NDD-NEXT: retq # encoding: [0xc3] @@ -55,7 +56,7 @@ define i32 @neg_16bit(i16 %0) { ; NDD-NEXT: cmovsl %ecx, %eax # EVEX TO LEGACY Compression encoding: [0x0f,0x48,0xc1] ; NDD-NEXT: andw $-256, %ax # EVEX TO LEGACY Compression encoding: [0x66,0x25,0x00,0xff] ; NDD-NEXT: negw %ax, %ax # encoding: [0x62,0xf4,0x7d,0x18,0xf7,0xd8] -; NDD-NEXT: leaw 1(%rdi,%rax), %ax # encoding: [0x66,0x8d,0x44,0x07,0x01] +; NDD-NEXT: leal 1(%rdi,%rax), %eax # encoding: [0x8d,0x44,0x07,0x01] ; NDD-NEXT: movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0] ; NDD-NEXT: movq %rax, 0 # encoding: [0x48,0x89,0x04,0x25,0x00,0x00,0x00,0x00] ; NDD-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] diff --git a/llvm/test/CodeGen/X86/atomic-bit-test.ll b/llvm/test/CodeGen/X86/atomic-bit-test.ll index 8f91f4120842b..b06bef44a5e9e 100644 --- a/llvm/test/CodeGen/X86/atomic-bit-test.ll +++ b/llvm/test/CodeGen/X86/atomic-bit-test.ll @@ -469,52 +469,56 @@ entry: define i16 @use_in_diff_bb() nounwind { ; X86-LABEL: use_in_diff_bb: ; X86: # %bb.0: # %entry -; X86-NEXT: pushl %esi -; X86-NEXT: movzwl v16, %esi +; X86-NEXT: movzwl v16, %eax ; X86-NEXT: .p2align 4 ; X86-NEXT: .LBB17_1: # %atomicrmw.start ; X86-NEXT: # =>This Inner Loop Header: Depth=1 -; X86-NEXT: movl %esi, %ecx +; X86-NEXT: movl %eax, %ecx ; X86-NEXT: orl $1, %ecx -; X86-NEXT: movl %esi, %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: lock cmpxchgw %cx, v16 -; X86-NEXT: movl %eax, %esi +; X86-NEXT: # kill: def $ax killed $ax def $eax ; X86-NEXT: jne .LBB17_1 ; X86-NEXT: # %bb.2: # %atomicrmw.end -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: testb %al, %al +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: testb %cl, %cl ; X86-NEXT: jne .LBB17_4 ; X86-NEXT: # %bb.3: +; X86-NEXT: pushl %esi +; X86-NEXT: movl %eax, %esi ; X86-NEXT: calll foo@PLT -; X86-NEXT: .LBB17_4: -; X86-NEXT: andl $1, %esi ; X86-NEXT: movl %esi, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .LBB17_4: +; X86-NEXT: andl $1, %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: use_in_diff_bb: ; X64: # %bb.0: # %entry -; X64-NEXT: pushq %rbx -; X64-NEXT: movzwl v16(%rip), %ebx +; X64-NEXT: movzwl v16(%rip), %eax ; X64-NEXT: .p2align 4 ; X64-NEXT: .LBB17_1: # %atomicrmw.start ; X64-NEXT: # =>This Inner Loop Header: Depth=1 -; X64-NEXT: movl %ebx, %ecx +; X64-NEXT: movl %eax, %ecx ; X64-NEXT: orl $1, %ecx -; X64-NEXT: movl %ebx, %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: lock cmpxchgw %cx, v16(%rip) -; X64-NEXT: movl %eax, %ebx +; X64-NEXT: # kill: def $ax killed $ax def $eax ; X64-NEXT: jne .LBB17_1 ; X64-NEXT: # %bb.2: # %atomicrmw.end -; X64-NEXT: xorl %eax, %eax -; X64-NEXT: testb %al, %al +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: testb %cl, %cl ; X64-NEXT: jne .LBB17_4 ; X64-NEXT: # %bb.3: +; X64-NEXT: pushq %rbx +; X64-NEXT: movl %eax, %ebx ; X64-NEXT: callq foo@PLT -; X64-NEXT: .LBB17_4: -; X64-NEXT: andl $1, %ebx ; X64-NEXT: movl %ebx, %eax ; X64-NEXT: popq %rbx +; X64-NEXT: .LBB17_4: +; X64-NEXT: andl $1, %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq entry: %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2 diff --git a/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll b/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll new file mode 100644 index 0000000000000..76d84c1159ee4 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll @@ -0,0 +1,99 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=X64 + +declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbssd_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbssd_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbssds_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbssds_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbsud_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbsud_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbsud_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbsuds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbsuds_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbsuds_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbuud_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbuud_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbuud_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} + +declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>) + +define <16 x i32>@test_int_x86_avx10_vpdpbuuds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) { +; X86-LABEL: test_int_x86_avx10_vpdpbuuds_512: +; X86: # %bb.0: +; X86-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx10_vpdpbuuds_512: +; X64: # %bb.0: +; X64-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) + ret <16 x i32> %res +} diff --git a/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll index 09eb53faaaada..a2aad604f19bc 100644 --- a/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll @@ -53,7 +53,7 @@ declare <16 x float> @llvm.x86.avx10.vdpphps.512(<16 x float>, <32 x half>, <32 ; VNNI INT8 -define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) { +define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) { ; X86-LABEL: test_mm512_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -64,12 +64,12 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; X64: # %bb.0: ; X64-NEXT: vpdpbssd (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0x07] ; X64-NEXT: retq # encoding: [0xc3] - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_mask_dpbssds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -81,13 +81,13 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x77,0x49,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_maskz_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -99,16 +99,16 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0xc9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <64 x i8>, <64 x i8>) -define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) { +define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) { ; X86-LABEL: test_mm512_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -119,12 +119,12 @@ define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; X64: # %bb.0: ; X64-NEXT: vpdpbsud (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0x07] ; X64-NEXT: retq # encoding: [0xc3] - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_mask_dpbsuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -136,13 +136,13 @@ define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x76,0x49,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_maskz_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -154,16 +154,16 @@ define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0xc9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <64 x i8>, <64 x i8>) -define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) { +define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) { ; X86-LABEL: test_mm512_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -174,12 +174,12 @@ define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; X64: # %bb.0: ; X64-NEXT: vpdpbuud (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0x07] ; X64-NEXT: retq # encoding: [0xc3] - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_mask_dpbuuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -191,13 +191,13 @@ define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x74,0x49,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) { +define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) { ; X86-LABEL: test_mm512_maskz_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] @@ -209,14 +209,14 @@ define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %_ ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0xc9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <64 x i8>, <64 x i8>) ; VNNI INT16 diff --git a/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll index 0c5fd3bf9d241..1f270d539cdb4 100644 --- a/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll @@ -101,7 +101,7 @@ declare <8 x float> @llvm.x86.avx10.vdpphps.256(<8 x float>, <16 x half>, <16 x ; VNNI INT8 -define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_mask_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -113,13 +113,13 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x77,0x09,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_maskz_dpbssds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -131,13 +131,13 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0x89,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_maskz_dpbssds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -149,13 +149,13 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x77,0x29,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_mask_dpbssd_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -167,18 +167,18 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0xa9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_mask_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -190,13 +190,13 @@ define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x76,0x09,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_maskz_dpbsuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -208,13 +208,13 @@ define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0x89,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_maskz_dpbsuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -226,13 +226,13 @@ define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x76,0x29,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_mask_dpbsud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -244,18 +244,18 @@ define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0xa9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_mask_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -267,13 +267,13 @@ define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x74,0x09,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) { +define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) { ; X86-LABEL: test_mm_maskz_dpbuuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -285,13 +285,13 @@ define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0x89,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_maskz_dpbuuds_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -303,13 +303,13 @@ define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x74,0x29,0x51,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) { +define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) { ; X86-LABEL: test_mm256_mask_dpbuud_epi32: ; X86: # %bb.0: ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] @@ -321,16 +321,16 @@ define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, ; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] ; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0xa9,0x50,0xc2] ; X64-NEXT: retq # encoding: [0xc3] - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) ; VNNI INT16 diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 8aa898f3ec576..da0cef0e4e99b 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -2119,8 +2119,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; KNL-LABEL: ktest_1: ; KNL: ## %bb.0: ; KNL-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; KNL-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; KNL-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; KNL-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: je LBB44_2 @@ -2152,8 +2151,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; AVX512BW-LABEL: ktest_1: ; AVX512BW: ## %bb.0: ; AVX512BW-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; AVX512BW-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: testb %al, %al ; AVX512BW-NEXT: je LBB44_2 diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll new file mode 100644 index 0000000000000..ce9a0fb0d5336 --- /dev/null +++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll @@ -0,0 +1,318 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avxvnniint8 --show-mc-encoding | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnniint8 --show-mc-encoding | FileCheck %s --check-prefixes=X64 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X64 + +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssd_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssd_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssd_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssd_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssds_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssds_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssds_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssds_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssd_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssd_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssd_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssd_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbssds_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbssds_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssds_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssds_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsud_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsud_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsud_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsud_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsuds_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsud_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsud_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsud_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsud_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsuds_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbuud(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuud: +; X86: # %bb.0: +; X86-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuud: +; X64: # %bb.0: +; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuud: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuud: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) + +define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; X86: # %bb.0: +; X86-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; X64: # %bb.0: +; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuuds_128: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) + ret <4 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuud_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuud_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuud_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuud_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) + +define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) { +; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; X86: # %bb.0: +; X86-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; X64: # %bb.0: +; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2] +; X64-NEXT: retq # encoding: [0xc3] +; +; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; AVX10-X86: # %bb.0: +; AVX10-X86-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2] +; AVX10-X86-NEXT: retl # encoding: [0xc3] +; +; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuuds_256: +; AVX10-X64: # %bb.0: +; AVX10-X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2] +; AVX10-X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) + ret <8 x i32> %res +} + diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll index 0ddd0171a58a0..6c3d90aab77e8 100644 --- a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll @@ -5,9 +5,9 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X64 -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssd_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -41,16 +41,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssds_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -84,16 +84,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssd_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -127,16 +127,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbssds_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -170,16 +170,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsud_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -213,16 +213,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -256,16 +256,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsud_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -299,16 +299,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -342,16 +342,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuud_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -385,16 +385,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) { +define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -428,16 +428,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuud_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -471,16 +471,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, pt ; AVX10-X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) { +define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) { ; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] @@ -514,9 +514,9 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, p ; AVX10-X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2] ; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0] ; AVX10-X64-NEXT: retq # encoding: [0xc3] - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } diff --git a/llvm/test/CodeGen/X86/basic-block-sections-cfg.ll b/llvm/test/CodeGen/X86/basic-block-sections-cfg.ll new file mode 100644 index 0000000000000..b8eadc3cac36e --- /dev/null +++ b/llvm/test/CodeGen/X86/basic-block-sections-cfg.ll @@ -0,0 +1,40 @@ +; BB section test with CFG. +; +;; Profile for version 1: +; RUN: echo 'v1' > %t +; RUN: echo 'f foo' >> %t +; RUN: echo 'g 0:10,1:9,2:1 1:8,3:8 2:2,3:2 3:11' >> %t +; RUN: echo 'c 0 2 3' >> %t +; +; RUN: llc < %s -O0 -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t | FileCheck %s +; +define void @foo(i1 zeroext) nounwind { + %2 = alloca i8, align 1 + %3 = zext i1 %0 to i8 + store i8 %3, ptr %2, align 1 + %4 = load i8, ptr %2, align 1 + %5 = trunc i8 %4 to i1 + br i1 %5, label %6, label %8 + +6: ; preds = %1 + %7 = call i32 @bar() + br label %10 + +8: ; preds = %1 + %9 = call i32 @baz() + br label %10 + +10: ; preds = %8, %6 + ret void +} + +declare i32 @bar() #1 + +declare i32 @baz() #1 + +; CHECK: .section .text.foo,"ax",@progbits +; CHECK: callq baz +; CHECK: retq +; CHECK: .section .text.split.foo,"ax",@progbits +; CHECK: callq bar + diff --git a/llvm/test/CodeGen/X86/basic-block-sections-clusters-error.ll b/llvm/test/CodeGen/X86/basic-block-sections-clusters-error.ll index d6f3d5010b556..751ab76722c07 100644 --- a/llvm/test/CodeGen/X86/basic-block-sections-clusters-error.ll +++ b/llvm/test/CodeGen/X86/basic-block-sections-clusters-error.ll @@ -57,6 +57,19 @@ ; RUN: echo 'p 1 2 3 2' >> %t13 ; RUN: not --crash llc < %s -O0 -mtriple=x86_64 -function-sections -basic-block-sections=%t13 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR13 ; CHECK-ERROR13: LLVM ERROR: invalid profile {{.*}} at line 4: duplicate cloned block in path: '2' +; RUN: echo 'v1' > %t14 +; RUN: echo 'f dummy1' >> %t14 +; RUN: echo 'c 0 1' >> %t14 +; RUN: echo 'g 0,1:2' >> %t14 +; RUN: not --crash llc < %s -O0 -mtriple=x86_64 -function-sections -basic-block-sections=%t14 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR14 +; CHECK-ERROR14: LLVM ERROR: invalid profile {{.*}} at line 4: unsigned integer expected: '' +; RUN: echo 'v1' > %t15 +; RUN: echo 'f dummy1' >> %t15 +; RUN: echo 'c 0 1' >> %t15 +; RUN: echo 'g 0:4,1:2:3' >> %t15 +; RUN: not --crash llc < %s -O0 -mtriple=x86_64 -function-sections -basic-block-sections=%t15 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR15 +; CHECK-ERROR15: LLVM ERROR: invalid profile {{.*}} at line 4: unsigned integer expected: '2:3' + define i32 @dummy1(i32 %x, i32 %y, i32 %z) { entry: diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll index ff9f995c4765b..51a8bf5b48415 100644 --- a/llvm/test/CodeGen/X86/combine-add.ll +++ b/llvm/test/CodeGen/X86/combine-add.ll @@ -235,10 +235,10 @@ define void @PR52039(ptr %pa, ptr %pb) { ; SSE-NEXT: psubd %xmm1, %xmm3 ; SSE-NEXT: psubd %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: paddd %xmm3, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm1 ; SSE-NEXT: movdqu %xmm3, 16(%rsi) ; SSE-NEXT: movdqu %xmm2, (%rsi) diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index 8e4a50ea266c3..ae4d24f91ffc0 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -81,7 +81,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) { ; SSE-LABEL: combine_vec_mul_pow2c: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: paddq %xmm0, %xmm2 +; SSE-NEXT: paddq %xmm2, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psllq $4, %xmm2 diff --git a/llvm/test/CodeGen/X86/combine-pack.ll b/llvm/test/CodeGen/X86/combine-pack.ll new file mode 100644 index 0000000000000..2f5454dc2c3ec --- /dev/null +++ b/llvm/test/CodeGen/X86/combine-pack.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX + +declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) + +define <8 x i16> @combine_packss_v4i32_signsplat(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_packss_v4i32_signsplat: +; SSE: # %bb.0: +; SSE-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: packssdw %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_packss_v4i32_signsplat: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %cmp = icmp sgt <4 x i32> %a0, %a1 + %ext = sext <4 x i1> %cmp to <4 x i32> + %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1)) + %signsplat = ashr <8 x i16> %pack, splat (i16 15) + ret <8 x i16> %signsplat +} + +define <8 x i16> @combine_packss_v4i32_freeze_signsplat(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_packss_v4i32_freeze_signsplat: +; SSE: # %bb.0: +; SSE-NEXT: pcmpgtd %xmm1, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: packssdw %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_packss_v4i32_freeze_signsplat: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %cmp = icmp sgt <4 x i32> %a0, %a1 + %ext = sext <4 x i1> %cmp to <4 x i32> + %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1)) + %freeze = freeze <8 x i16> %pack + %signsplat = ashr <8 x i16> %freeze, splat (i16 15) + ret <8 x i16> %signsplat +} diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 98187d61c1f84..6bcbfe1808933 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -2187,13 +2187,13 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,2,2,2,2,128,2,128] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: paddw %xmm4, %xmm4 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5],xmm4[6],xmm2[7] +; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; SSE41-NEXT: paddw %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4,5],xmm2[6],xmm0[7] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: paddb %xmm1, %xmm2 @@ -2201,15 +2201,14 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; SSE41-NEXT: psraw $8, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: paddw %xmm0, %xmm3 -; SSE41-NEXT: psllw $7, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7] -; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: psllw $7, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7] +; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE41-NEXT: psraw $8, %xmm2 ; SSE41-NEXT: psllw $7, %xmm2 ; SSE41-NEXT: psrlw $8, %xmm2 -; SSE41-NEXT: packuswb %xmm0, %xmm2 +; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255] @@ -2225,18 +2224,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [256,2,2,2,2,128,2,128] ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 -; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/debug-loclists.ll b/llvm/test/CodeGen/X86/debug-loclists.ll index 406bce587b904..62388a4c91e0c 100644 --- a/llvm/test/CodeGen/X86/debug-loclists.ll +++ b/llvm/test/CodeGen/X86/debug-loclists.ll @@ -2,6 +2,10 @@ ; RUN: llvm-dwarfdump -v -debug-info -debug-loclists %t | \ ; RUN: FileCheck %s --check-prefixes=CHECK,DWARF32 +; RUN: llc -mtriple=x86_64-pc-mingw -filetype=obj -function-sections -o %t -experimental-debug-variable-locations=true < %s +; RUN: llvm-dwarfdump -v -debug-info -debug-loclists %t | \ +; RUN: FileCheck %s --check-prefixes=CHECK,DWARF32 + ; RUN: llc -dwarf64 -mtriple=x86_64-pc-linux -filetype=obj -function-sections -o %t -experimental-debug-variable-locations=true < %s ; RUN: llvm-dwarfdump -v -debug-info -debug-loclists %t | \ ; RUN: FileCheck %s --check-prefixes=CHECK,DWARF64 @@ -10,6 +14,10 @@ ; RUN: llvm-dwarfdump -v -debug-info -debug-loclists %t | \ ; RUN: FileCheck %s --check-prefixes=DWO,DWO32 +; RUN: llc -dwarf-version=5 -split-dwarf-file=foo.dwo -mtriple=x86_64-pc-mingw -filetype=obj -function-sections -o %t -experimental-debug-variable-locations=true < %s +; RUN: llvm-dwarfdump -v -debug-info -debug-loclists %t | \ +; RUN: FileCheck %s --check-prefixes=DWO,DWO32 + ; RUN: llc -dwarf64 -dwarf-version=5 -split-dwarf-file=foo.dwo -mtriple=x86_64-pc-linux -filetype=obj -function-sections -o %t -experimental-debug-variable-locations=true < %s ; RUN: llvm-dwarfdump -v -debug-info -debug-loclists %t | \ ; RUN: FileCheck %s --check-prefixes=DWO,DWO64 @@ -17,20 +25,20 @@ ; CHECK: DW_TAG_variable ; DWARF32-NEXT: DW_AT_location [DW_FORM_loclistx] (indexed (0x0) loclist = 0x00000018: ; DWARF64-NEXT: DW_AT_location [DW_FORM_loclistx] (indexed (0x0) loclist = 0x0000002c: -; CHECK-NEXT: [0x0000000000000000, 0x0000000000000003) ".text._Z2f1ii": DW_OP_consts +5, DW_OP_stack_value) +; CHECK-NEXT: [0x0000000000000000, 0x0000000000000003) ".text{{[.$]}}_Z2f1ii": DW_OP_consts +5, DW_OP_stack_value) ; CHECK-NEXT: DW_AT_name {{.*}} "x" ; CHECK: DW_TAG_variable ; DWARF32-NEXT: DW_AT_location [DW_FORM_loclistx] (indexed (0x1) loclist = 0x00000020: ; DWARF64-NEXT: DW_AT_location [DW_FORM_loclistx] (indexed (0x1) loclist = 0x00000034: -; CHECK-NEXT: [0x0000000000000000, 0x0000000000000003) ".text._Z2f1ii": DW_OP_consts +3, DW_OP_stack_value -; CHECK-NEXT: [0x0000000000000003, 0x0000000000000004) ".text._Z2f1ii": DW_OP_consts +4, DW_OP_stack_value) +; CHECK-NEXT: [0x0000000000000000, 0x0000000000000003) ".text{{[.$]}}_Z2f1ii": DW_OP_consts +3, DW_OP_stack_value +; CHECK-NEXT: [0x0000000000000003, 0x0000000000000004) ".text{{[.$]}}_Z2f1ii": DW_OP_consts +4, DW_OP_stack_value) ; CHECK-NEXT: DW_AT_name {{.*}} "y" ; CHECK: DW_TAG_variable ; DWARF32-NEXT: DW_AT_location [DW_FORM_loclistx] (indexed (0x2) loclist = 0x00000031: ; DWARF64-NEXT: DW_AT_location [DW_FORM_loclistx] (indexed (0x2) loclist = 0x00000045: -; CHECK-NEXT: [0x0000000000000003, 0x0000000000000004) ".text._Z2f1ii": DW_OP_reg0 RAX) +; CHECK-NEXT: [0x0000000000000003, 0x0000000000000004) ".text{{[.$]}}_Z2f1ii": DW_OP_reg0 RAX) ; CHECK-NEXT: DW_AT_name {{.*}} "r" ; CHECK: .debug_loclists contents: diff --git a/llvm/test/CodeGen/X86/dpbusd.ll b/llvm/test/CodeGen/X86/dpbusd.ll index 3aa77c3955c63..7bd22d57347b3 100644 --- a/llvm/test/CodeGen/X86/dpbusd.ll +++ b/llvm/test/CodeGen/X86/dpbusd.ll @@ -1,40 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=AVXVNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=AVX512,AVX512VNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI define i32 @no_dpbusd(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: no_dpbusd: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq -; -; AVX512-LABEL: no_dpbusd: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: addl %edx, %eax -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; CHECK-LABEL: no_dpbusd: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; CHECK-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edx, %eax +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq entry: %0 = load <16 x i8>, ptr %a, align 16 %1 = zext <16 x i8> %0 to <16 x i32> @@ -99,25 +84,44 @@ entry: } define i32 @mul_zext(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: mul_zext: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1 -; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_zext: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_zext: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_zext: ; AVX512: # %bb.0: # %entry @@ -153,25 +157,44 @@ entry: } define i32 @mul_sext(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: mul_sext: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1 -; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVXVNNI-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_sext: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_sext: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1 +; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0 +; AVXVNNI-AVX512-NEXT: vpmovsxwd %ymm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_sext: ; AVX512: # %bb.0: # %entry @@ -312,17 +335,30 @@ entry: declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) define i32 @vpdpbusd_128(ptr%a, ptr%b, i32 %c, i32 %n) { -; AVXVNNI-LABEL: vpdpbusd_128: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVXVNNI-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVXVNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 -; AVXVNNI-NEXT: vmovd %xmm2, %eax -; AVXVNNI-NEXT: addl %edx, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: vpdpbusd_128: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXVNNI-AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 +; AVXVNNI-AVX-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX-NEXT: addl %edx, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: vpdpbusd_128: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2 +; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX512-NEXT: addl %edx, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: vpdpbusd_128: ; AVX512VNNI: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll index 456e6e8f263aa..bb47df59eefad 100644 --- a/llvm/test/CodeGen/X86/dpbusd_const.ll +++ b/llvm/test/CodeGen/X86/dpbusd_const.ll @@ -1,20 +1,21 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) { -; ALL-LABEL: mul_4xi8_zc_exceed: -; ALL: # %bb.0: # %entry -; ALL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0] -; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] -; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: addl %edi, %eax -; ALL-NEXT: retq +; CHECK-LABEL: mul_4xi8_zc_exceed: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edi, %eax +; CHECK-NEXT: retq entry: %0 = zext <4 x i8> %a to <4 x i32> %1 = mul nsw <4 x i32> %0, @@ -24,14 +25,24 @@ entry: } define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi8_zc: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi8_zc: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi8_zc: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi8_zc: ; AVX512VNNI: # %bb.0: # %entry @@ -62,16 +73,26 @@ entry: } define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi4_cz: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] -; AVXVNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi4_cz: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVXVNNI-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi4_cz: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpmovdb %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi4_cz: ; AVX512VNNI: # %bb.0: # %entry @@ -104,15 +125,26 @@ entry: } define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_4xi8_cs: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] -; AVXVNNI-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] -; AVXVNNI-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1 -; AVXVNNI-NEXT: vmovd %xmm1, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_4xi8_cs: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1 +; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_4xi8_cs: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVXVNNI-AVX512-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0] +; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm0, %xmm1, %xmm2 +; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512VNNI-LABEL: mul_4xi8_cs: ; AVX512VNNI: # %bb.0: # %entry @@ -145,17 +177,17 @@ entry: } define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) { -; ALL-LABEL: mul_4xi8_cs_exceed: -; ALL: # %bb.0: # %entry -; ALL-NEXT: vpmovsxbd %xmm0, %xmm0 -; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0] -; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] -; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; ALL-NEXT: vmovd %xmm0, %eax -; ALL-NEXT: addl %edi, %eax -; ALL-NEXT: retq +; CHECK-LABEL: mul_4xi8_cs_exceed: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0 +; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: addl %edi, %eax +; CHECK-NEXT: retq entry: %0 = sext <4 x i8> %a to <4 x i32> %1 = mul nsw <4 x i32> , %0 @@ -265,24 +297,44 @@ entry: } define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) { -; AVXVNNI-LABEL: mul_64xi8_zc: -; AVXVNNI: # %bb.0: # %entry -; AVXVNNI-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] -; AVXVNNI-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVXVNNI-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 -; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 -; AVXVNNI-NEXT: vpaddd %ymm4, %ymm3, %ymm0 -; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] -; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVXVNNI-NEXT: vmovd %xmm0, %eax -; AVXVNNI-NEXT: addl %edi, %eax -; AVXVNNI-NEXT: vzeroupper -; AVXVNNI-NEXT: retq +; AVXVNNI-AVX-LABEL: mul_64xi8_zc: +; AVXVNNI-AVX: # %bb.0: # %entry +; AVXVNNI-AVX-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] +; AVXVNNI-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVXVNNI-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 +; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 +; AVXVNNI-AVX-NEXT: vpaddd %ymm4, %ymm3, %ymm0 +; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX-NEXT: addl %edi, %eax +; AVXVNNI-AVX-NEXT: vzeroupper +; AVXVNNI-AVX-NEXT: retq +; +; AVXVNNI-AVX512-LABEL: mul_64xi8_zc: +; AVXVNNI-AVX512: # %bb.0: # %entry +; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; AVXVNNI-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64] +; AVXVNNI-AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVXVNNI-AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4 +; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3 +; AVXVNNI-AVX512-NEXT: vpaddd %ymm4, %ymm3, %ymm0 +; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1] +; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax +; AVXVNNI-AVX512-NEXT: addl %edi, %eax +; AVXVNNI-AVX512-NEXT: vzeroupper +; AVXVNNI-AVX512-NEXT: retq ; ; AVX512-LABEL: mul_64xi8_zc: ; AVX512: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/fadd-combines.ll b/llvm/test/CodeGen/X86/fadd-combines.ll index 1082177e3da19..2c06c538ae10d 100644 --- a/llvm/test/CodeGen/X86/fadd-combines.ll +++ b/llvm/test/CodeGen/X86/fadd-combines.ll @@ -5,7 +5,7 @@ define float @fadd_zero_f32(float %x) #0 { ; CHECK-LABEL: fadd_zero_f32: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %y = fadd float %x, 0.0 + %y = fadd nsz float %x, 0.0 ret float %y } @@ -13,7 +13,7 @@ define <4 x float> @fadd_zero_4f32(<4 x float> %x) #0 { ; CHECK-LABEL: fadd_zero_4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, zeroinitializer + %y = fadd nsz <4 x float> %x, zeroinitializer ret <4 x float> %y } @@ -31,8 +31,8 @@ define float @fadd_2const_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd float %x, 1.0 - %z = fadd float %y, 2.0 + %y = fadd reassoc nsz float %x, 1.0 + %z = fadd reassoc nsz float %y, 2.0 ret float %z } @@ -45,8 +45,8 @@ define <4 x float> @fadd_2const_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: addps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, - %z = fadd <4 x float> %y, + %y = fadd reassoc nsz <4 x float> %x, + %z = fadd reassoc nsz <4 x float> %y, ret <4 x float> %z } @@ -56,8 +56,8 @@ define float @fadd_x_fmul_x_c_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fmul float %x, 2.0 - %z = fadd float %x, %y + %y = fmul reassoc nsz float %x, 2.0 + %z = fadd reassoc nsz float %x, %y ret float %z } @@ -70,8 +70,8 @@ define <4 x float> @fadd_x_fmul_x_c_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fmul <4 x float> %x, - %z = fadd <4 x float> %x, %y + %y = fmul reassoc nsz <4 x float> %x, + %z = fadd reassoc nsz <4 x float> %x, %y ret <4 x float> %z } @@ -81,8 +81,8 @@ define float @fadd_fmul_x_c_x_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fmul float %x, 2.0 - %z = fadd float %y, %x + %y = fmul reassoc nsz float %x, 2.0 + %z = fadd reassoc nsz float %y, %x ret float %z } @@ -95,8 +95,8 @@ define <4 x float> @fadd_fmul_x_c_x_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fmul <4 x float> %x, - %z = fadd <4 x float> %y, %x + %y = fmul reassoc nsz <4 x float> %x, + %z = fadd reassoc nsz <4 x float> %y, %x ret <4 x float> %z } @@ -106,9 +106,9 @@ define float @fadd_fadd_x_x_fmul_x_c_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd float %x, %x - %z = fmul float %x, 2.0 - %w = fadd float %y, %z + %y = fadd reassoc nsz float %x, %x + %z = fmul reassoc nsz float %x, 2.0 + %w = fadd reassoc nsz float %y, %z ret float %w } @@ -121,9 +121,9 @@ define <4 x float> @fadd_fadd_x_x_fmul_x_c_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, %x - %z = fmul <4 x float> %x, - %w = fadd <4 x float> %y, %z + %y = fadd reassoc nsz <4 x float> %x, %x + %z = fmul reassoc nsz <4 x float> %x, + %w = fadd reassoc nsz <4 x float> %y, %z ret <4 x float> %w } @@ -133,9 +133,9 @@ define float @fadd_fmul_x_c_fadd_x_x_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd float %x, %x - %z = fmul float %x, 2.0 - %w = fadd float %z, %y + %y = fadd reassoc nsz float %x, %x + %z = fmul reassoc nsz float %x, 2.0 + %w = fadd reassoc nsz float %z, %y ret float %w } @@ -148,9 +148,9 @@ define <4 x float> @fadd_fmul_x_c_fadd_x_x_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, %x - %z = fmul <4 x float> %x, - %w = fadd <4 x float> %z, %y + %y = fadd reassoc nsz <4 x float> %x, %x + %z = fmul reassoc nsz <4 x float> %x, + %w = fadd reassoc nsz <4 x float> %z, %y ret <4 x float> %w } @@ -160,8 +160,8 @@ define float @fadd_x_fadd_x_x_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd float %x, %x - %z = fadd float %x, %y + %y = fadd reassoc nsz float %x, %x + %z = fadd reassoc nsz float %x, %y ret float %z } @@ -174,8 +174,8 @@ define <4 x float> @fadd_x_fadd_x_x_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, %x - %z = fadd <4 x float> %x, %y + %y = fadd reassoc nsz <4 x float> %x, %x + %z = fadd reassoc nsz <4 x float> %x, %y ret <4 x float> %z } @@ -185,8 +185,8 @@ define float @fadd_fadd_x_x_x_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd float %x, %x - %z = fadd float %y, %x + %y = fadd reassoc nsz float %x, %x + %z = fadd reassoc nsz float %y, %x ret float %z } @@ -199,8 +199,8 @@ define <4 x float> @fadd_fadd_x_x_x_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, %x - %z = fadd <4 x float> %y, %x + %y = fadd reassoc nsz <4 x float> %x, %x + %z = fadd reassoc nsz <4 x float> %y, %x ret <4 x float> %z } @@ -210,8 +210,8 @@ define float @fadd_fadd_x_x_fadd_x_x_f32(float %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd float %x, %x - %z = fadd float %y, %y + %y = fadd reassoc nsz float %x, %x + %z = fadd reassoc nsz float %y, %y ret float %z } @@ -224,8 +224,8 @@ define <4 x float> @fadd_fadd_x_x_fadd_x_x_4f32(<4 x float> %x) #0 { ; CHECK: # %bb.0: ; CHECK-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %y = fadd <4 x float> %x, %x - %z = fadd <4 x float> %y, %y + %y = fadd reassoc nsz <4 x float> %x, %x + %z = fadd reassoc nsz <4 x float> %y, %y ret <4 x float> %z } @@ -241,9 +241,9 @@ define float @fadd_const_multiuse_attr(float %x) #0 { ; CHECK-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: addss %xmm1, %xmm0 ; CHECK-NEXT: retq - %a1 = fadd float %x, 42.0 - %a2 = fadd float %a1, 17.0 - %a3 = fadd float %a1, %a2 + %a1 = fadd reassoc nsz float %x, 42.0 + %a2 = fadd reassoc nsz float %a1, 17.0 + %a3 = fadd reassoc nsz float %a1, %a2 ret float %a3 } @@ -275,4 +275,4 @@ define <2 x double> @fmul2_negated_vec(<2 x double> %a, <2 x double> %b, <2 x do ret <2 x double> %sub } -attributes #0 = { "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" } +attributes #0 = { "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" } diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll index 54d82b0c1c929..c66473e9edd19 100644 --- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll +++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll @@ -1756,263 +1756,131 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; ; AVX512-LABEL: test_fmaximumnum_v4f16: ; AVX512: # %bb.0: -; AVX512-NEXT: subq $56, %rsp -; AVX512-NEXT: vmovdqa %xmm1, %xmm5 -; AVX512-NEXT: vmovdqa %xmm0, %xmm6 -; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrldq {{.*#+}} xmm1 = xmm6[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm1 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm2 -; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[3,3,3,3] -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[3,3,3,3] -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm1 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; AVX512-NEXT: vpsrldq {{.*#+}} xmm1 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrldq {{.*#+}} xmm2 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: vucomiss %xmm2, %xmm2 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm2 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm3 -; AVX512-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm5[1,0] -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm6[1,0] +; AVX512-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: vucomiss %xmm2, %xmm2 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm13 -; AVX512-NEXT: vcvtph2ps %xmm13, %xmm2 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm12 -; AVX512-NEXT: vcvtph2ps %xmm12, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm2 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vpsrlq $48, %xmm5, %xmm0 -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrlq $48, %xmm6, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm11 -; AVX512-NEXT: vcvtph2ps %xmm11, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm15 -; AVX512-NEXT: vcvtph2ps %xmm15, %xmm7 -; AVX512-NEXT: vucomiss %xmm7, %xmm1 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm7, %xmm7 {%k1} -; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm5[1,1,3,3] -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al +; AVX512-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovdqa %xmm2, %xmm4 +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} +; AVX512-NEXT: vmaxss %xmm4, %xmm3, %xmm2 +; AVX512-NEXT: vcmpordss %xmm3, %xmm3, %k1 +; AVX512-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3] +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm6[1,1,3,3] -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm9 -; AVX512-NEXT: vcvtph2ps %xmm9, %xmm4 -; AVX512-NEXT: vmovss %xmm4, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm10 -; AVX512-NEXT: vcvtph2ps %xmm10, %xmm3 -; AVX512-NEXT: vucomiss %xmm3, %xmm4 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[3,3,3,3] +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovdqa %xmm3, %xmm5 +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 +; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; AVX512-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 +; AVX512-NEXT: vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovdqa %xmm3, %xmm5 +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 +; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 ; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} -; AVX512-NEXT: vcvtph2ps %xmm5, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0] +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vcvtph2ps %xmm6, %xmm4 -; AVX512-NEXT: vucomiss %xmm4, %xmm4 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm4, %xmm4 {%k2} +; AVX512-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0] +; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512-NEXT: vmovdqa %xmm4, %xmm6 +; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 +; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512-NEXT: vcvtph2ps %xmm4, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm8 -; AVX512-NEXT: vcvtph2ps %xmm8, %xmm2 -; AVX512-NEXT: vucomiss %xmm2, %xmm1 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm3 +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} -; AVX512-NEXT: vpsrld $16, %xmm5, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al +; AVX512-NEXT: vpsrlq $48, %xmm1, %xmm4 +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovdqa %xmm3, %xmm5 +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 +; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrld $16, %xmm6, %xmm5 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3] ; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512-NEXT: vucomiss %xmm5, %xmm5 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm5, %xmm6 -; AVX512-NEXT: vcvtph2ps %xmm6, %xmm5 -; AVX512-NEXT: vmovss %xmm5, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm5 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vmovdqa %xmm4, %xmm6 +; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 +; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm5, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm7, %xmm7 -; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm5 -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm2 -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1] -; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload -; AVX512-NEXT: # xmm0 = xmm0[0],mem[0] -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload -; AVX512-NEXT: # xmm14 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3] -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload -; AVX512-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1],xmm13[2],mem[2],xmm13[3],mem[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1] -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm13[0] -; AVX512-NEXT: vpxor %xmm6, %xmm6, %xmm6 -; AVX512-NEXT: vpcmpeqw %xmm6, %xmm4, %xmm9 -; AVX512-NEXT: vpblendvb %xmm9, %xmm4, %xmm0, %xmm4 -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload -; AVX512-NEXT: # xmm9 = xmm9[0],mem[0],xmm9[1],mem[1],xmm9[2],mem[2],xmm9[3],mem[3] -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm11 # 16-byte Folded Reload -; AVX512-NEXT: # xmm11 = xmm12[0],mem[0],xmm12[1],mem[1],xmm12[2],mem[2],xmm12[3],mem[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm9[0] -; AVX512-NEXT: vpcmpeqw %xmm6, %xmm1, %xmm6 -; AVX512-NEXT: vpblendvb %xmm6, %xmm1, %xmm4, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: xorl %eax, %eax -; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $65535, %ecx # imm = 0xFFFF -; AVX512-NEXT: movl $0, %edx -; AVX512-NEXT: cmovel %ecx, %edx -; AVX512-NEXT: vcvtph2ps %xmm5, %xmm2 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %esi -; AVX512-NEXT: cmovel %ecx, %esi -; AVX512-NEXT: vcvtph2ps %xmm3, %xmm2 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %edi -; AVX512-NEXT: cmovel %ecx, %edi -; AVX512-NEXT: vcvtph2ps %xmm7, %xmm2 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r8d -; AVX512-NEXT: cmovel %ecx, %r8d -; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r9d -; AVX512-NEXT: cmovel %ecx, %r9d -; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r10d -; AVX512-NEXT: cmovel %ecx, %r10d -; AVX512-NEXT: vcvtph2ps (%rsp), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r11d -; AVX512-NEXT: cmovel %ecx, %r11d -; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: vmovd %esi, %xmm2 -; AVX512-NEXT: vpinsrw $1, %edx, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $2, %edi, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $3, %r8d, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $4, %r9d, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $5, %r10d, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $6, %r11d, %xmm2, %xmm2 -; AVX512-NEXT: cmovel %ecx, %eax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: addq $56, %rsp +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm5 +; AVX512-NEXT: vmovdqa %xmm4, %xmm6 +; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 +; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al +; AVX512-NEXT: kmovw %eax, %k1 +; AVX512-NEXT: vpsrld $16, %xmm1, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa %xmm0, %xmm5 +; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm1, %xmm0 +; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; AVX512-NEXT: retq ; ; AVX10_2-LABEL: test_fmaximumnum_v4f16: diff --git a/llvm/test/CodeGen/X86/fshl.ll b/llvm/test/CodeGen/X86/fshl.ll index ec1b8a3c8d6d9..f998128af95f8 100644 --- a/llvm/test/CodeGen/X86/fshl.ll +++ b/llvm/test/CodeGen/X86/fshl.ll @@ -335,84 +335,83 @@ define i128 @var_shift_i128(i128 %x, i128 %y, i128 %z) nounwind { ; X86-SLOW-NEXT: pushl %esi ; X86-SLOW-NEXT: andl $-16, %esp ; X86-SLOW-NEXT: subl $32, %esp -; X86-SLOW-NEXT: movl 24(%ebp), %esi +; X86-SLOW-NEXT: movl 24(%ebp), %edi ; X86-SLOW-NEXT: movl 28(%ebp), %eax ; X86-SLOW-NEXT: movl 48(%ebp), %edx ; X86-SLOW-NEXT: movl 56(%ebp), %ecx ; X86-SLOW-NEXT: testb $64, %cl -; X86-SLOW-NEXT: movl 52(%ebp), %edi +; X86-SLOW-NEXT: movl 52(%ebp), %ebx ; X86-SLOW-NEXT: jne .LBB6_1 ; X86-SLOW-NEXT: # %bb.2: ; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-SLOW-NEXT: movl %esi, %edx -; X86-SLOW-NEXT: movl 32(%ebp), %esi -; X86-SLOW-NEXT: movl %edi, %ecx -; X86-SLOW-NEXT: movl %eax, %edi +; X86-SLOW-NEXT: movl %edi, %edx +; X86-SLOW-NEXT: movl 32(%ebp), %edi +; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl %eax, %ebx ; X86-SLOW-NEXT: movl 36(%ebp), %eax ; X86-SLOW-NEXT: jmp .LBB6_3 ; X86-SLOW-NEXT: .LBB6_1: ; X86-SLOW-NEXT: movl 40(%ebp), %ecx ; X86-SLOW-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-SLOW-NEXT: movl 44(%ebp), %ecx +; X86-SLOW-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-SLOW-NEXT: .LBB6_3: -; X86-SLOW-NEXT: movl 56(%ebp), %ebx -; X86-SLOW-NEXT: testb $32, %bl +; X86-SLOW-NEXT: movl 56(%ebp), %ecx +; X86-SLOW-NEXT: testb $32, %cl ; X86-SLOW-NEXT: jne .LBB6_4 ; X86-SLOW-NEXT: # %bb.5: -; X86-SLOW-NEXT: movl %ecx, %ebx ; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl %edx, %edi +; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload ; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-SLOW-NEXT: jmp .LBB6_6 ; X86-SLOW-NEXT: .LBB6_4: -; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-SLOW-NEXT: movl %ecx, %edx -; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload +; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl %edx, %ebx +; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload +; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload ; X86-SLOW-NEXT: .LBB6_6: -; X86-SLOW-NEXT: movl %edx, %esi +; X86-SLOW-NEXT: movl %edi, %eax +; X86-SLOW-NEXT: shll %cl, %eax +; X86-SLOW-NEXT: shrl %esi +; X86-SLOW-NEXT: movl %ecx, %edx +; X86-SLOW-NEXT: notb %dl +; X86-SLOW-NEXT: movl %edx, %ecx +; X86-SLOW-NEXT: shrl %cl, %esi +; X86-SLOW-NEXT: orl %eax, %esi +; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl %ebx, %eax ; X86-SLOW-NEXT: movl 56(%ebp), %ecx -; X86-SLOW-NEXT: shll %cl, %esi -; X86-SLOW-NEXT: movl %ebx, %edi +; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-SLOW-NEXT: shll %cl, %eax ; X86-SLOW-NEXT: shrl %edi -; X86-SLOW-NEXT: movl %ecx, %ebx -; X86-SLOW-NEXT: notb %bl -; X86-SLOW-NEXT: movl %ebx, %ecx -; X86-SLOW-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill +; X86-SLOW-NEXT: movl %edx, %ecx ; X86-SLOW-NEXT: shrl %cl, %edi -; X86-SLOW-NEXT: orl %esi, %edi +; X86-SLOW-NEXT: orl %eax, %edi ; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload ; X86-SLOW-NEXT: movl %esi, %eax ; X86-SLOW-NEXT: movl 56(%ebp), %ecx ; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-SLOW-NEXT: shll %cl, %eax -; X86-SLOW-NEXT: shrl %edx -; X86-SLOW-NEXT: movl %ebx, %ecx -; X86-SLOW-NEXT: shrl %cl, %edx -; X86-SLOW-NEXT: orl %eax, %edx -; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-SLOW-NEXT: movl %ebx, %eax +; X86-SLOW-NEXT: shrl %ebx +; X86-SLOW-NEXT: movl %edx, %ecx +; X86-SLOW-NEXT: shrl %cl, %ebx +; X86-SLOW-NEXT: orl %eax, %ebx ; X86-SLOW-NEXT: movl 56(%ebp), %ecx ; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-SLOW-NEXT: shll %cl, %eax ; X86-SLOW-NEXT: shrl %esi -; X86-SLOW-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload +; X86-SLOW-NEXT: movl %edx, %ecx ; X86-SLOW-NEXT: shrl %cl, %esi ; X86-SLOW-NEXT: orl %eax, %esi -; X86-SLOW-NEXT: movl 56(%ebp), %ecx -; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; X86-SLOW-NEXT: shll %cl, %eax -; X86-SLOW-NEXT: shrl %ebx -; X86-SLOW-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload -; X86-SLOW-NEXT: shrl %cl, %ebx -; X86-SLOW-NEXT: orl %eax, %ebx ; X86-SLOW-NEXT: movl 8(%ebp), %eax -; X86-SLOW-NEXT: movl %ebx, 12(%eax) -; X86-SLOW-NEXT: movl %esi, 8(%eax) -; X86-SLOW-NEXT: movl %edx, 4(%eax) -; X86-SLOW-NEXT: movl %edi, (%eax) +; X86-SLOW-NEXT: movl %esi, 12(%eax) +; X86-SLOW-NEXT: movl %ebx, 8(%eax) +; X86-SLOW-NEXT: movl %edi, 4(%eax) +; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload +; X86-SLOW-NEXT: movl %ecx, (%eax) ; X86-SLOW-NEXT: leal -12(%ebp), %esp ; X86-SLOW-NEXT: popl %esi ; X86-SLOW-NEXT: popl %edi diff --git a/llvm/test/CodeGen/X86/fshr.ll b/llvm/test/CodeGen/X86/fshr.ll index 544ab7fc77374..c307833e488c9 100644 --- a/llvm/test/CodeGen/X86/fshr.ll +++ b/llvm/test/CodeGen/X86/fshr.ll @@ -322,79 +322,79 @@ define i128 @var_shift_i128(i128 %x, i128 %y, i128 %z) nounwind { ; X86-SLOW-NEXT: subl $16, %esp ; X86-SLOW-NEXT: movl 24(%ebp), %edx ; X86-SLOW-NEXT: movl 28(%ebp), %esi -; X86-SLOW-NEXT: movl 48(%ebp), %ebx +; X86-SLOW-NEXT: movl 48(%ebp), %edi ; X86-SLOW-NEXT: movl 56(%ebp), %eax ; X86-SLOW-NEXT: testb $64, %al -; X86-SLOW-NEXT: movl 52(%ebp), %edi +; X86-SLOW-NEXT: movl 52(%ebp), %eax ; X86-SLOW-NEXT: je .LBB6_1 ; X86-SLOW-NEXT: # %bb.2: -; X86-SLOW-NEXT: movl %ebx, (%esp) # 4-byte Spill -; X86-SLOW-NEXT: movl %edx, %ebx +; X86-SLOW-NEXT: movl %edi, (%esp) # 4-byte Spill +; X86-SLOW-NEXT: movl %edx, %edi ; X86-SLOW-NEXT: movl 32(%ebp), %edx -; X86-SLOW-NEXT: movl %edi, %eax -; X86-SLOW-NEXT: movl %esi, %edi +; X86-SLOW-NEXT: movl %eax, %ecx +; X86-SLOW-NEXT: movl %esi, %eax ; X86-SLOW-NEXT: movl 36(%ebp), %esi ; X86-SLOW-NEXT: jmp .LBB6_3 ; X86-SLOW-NEXT: .LBB6_1: -; X86-SLOW-NEXT: movl 40(%ebp), %eax -; X86-SLOW-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-SLOW-NEXT: movl 44(%ebp), %eax +; X86-SLOW-NEXT: movl 40(%ebp), %ecx +; X86-SLOW-NEXT: movl %ecx, (%esp) # 4-byte Spill +; X86-SLOW-NEXT: movl 44(%ebp), %ecx ; X86-SLOW-NEXT: .LBB6_3: -; X86-SLOW-NEXT: movl 56(%ebp), %ecx -; X86-SLOW-NEXT: testb $32, %cl +; X86-SLOW-NEXT: movl 56(%ebp), %ebx +; X86-SLOW-NEXT: testb $32, %bl ; X86-SLOW-NEXT: je .LBB6_4 ; X86-SLOW-NEXT: # %bb.5: -; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl %ecx, %ebx ; X86-SLOW-NEXT: jmp .LBB6_6 ; X86-SLOW-NEXT: .LBB6_4: ; X86-SLOW-NEXT: movl %edx, %esi +; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-SLOW-NEXT: movl %eax, %ebx -; X86-SLOW-NEXT: movl (%esp), %eax # 4-byte Reload +; X86-SLOW-NEXT: movl %ecx, %edi +; X86-SLOW-NEXT: movl (%esp), %ebx # 4-byte Reload ; X86-SLOW-NEXT: .LBB6_6: -; X86-SLOW-NEXT: shrl %cl, %eax -; X86-SLOW-NEXT: movl %eax, %edx -; X86-SLOW-NEXT: movl %ecx, %eax -; X86-SLOW-NEXT: notb %al -; X86-SLOW-NEXT: movl %ebx, %edi -; X86-SLOW-NEXT: addl %ebx, %ebx -; X86-SLOW-NEXT: movl %eax, %ecx -; X86-SLOW-NEXT: shll %cl, %ebx -; X86-SLOW-NEXT: orl %edx, %ebx -; X86-SLOW-NEXT: movl %ebx, (%esp) # 4-byte Spill ; X86-SLOW-NEXT: movl 56(%ebp), %ecx -; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-SLOW-NEXT: shrl %cl, %edi -; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload -; X86-SLOW-NEXT: leal (%ebx,%ebx), %edx -; X86-SLOW-NEXT: movl %eax, %ecx -; X86-SLOW-NEXT: shll %cl, %edx -; X86-SLOW-NEXT: orl %edi, %edx +; X86-SLOW-NEXT: shrl %cl, %ebx +; X86-SLOW-NEXT: movl %ecx, %edx +; X86-SLOW-NEXT: notb %dl +; X86-SLOW-NEXT: movl %edi, %eax +; X86-SLOW-NEXT: addl %edi, %edi +; X86-SLOW-NEXT: movl %edx, %ecx +; X86-SLOW-NEXT: shll %cl, %edi +; X86-SLOW-NEXT: orl %ebx, %edi +; X86-SLOW-NEXT: movl %edi, (%esp) # 4-byte Spill ; X86-SLOW-NEXT: movl 56(%ebp), %ecx ; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx -; X86-SLOW-NEXT: shrl %cl, %ebx -; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: shrl %cl, %eax ; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload ; X86-SLOW-NEXT: leal (%edi,%edi), %ebx -; X86-SLOW-NEXT: movl %eax, %ecx +; X86-SLOW-NEXT: movl %edx, %ecx ; X86-SLOW-NEXT: shll %cl, %ebx -; X86-SLOW-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload +; X86-SLOW-NEXT: orl %eax, %ebx ; X86-SLOW-NEXT: movl 56(%ebp), %ecx ; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-SLOW-NEXT: shrl %cl, %edi +; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill +; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload +; X86-SLOW-NEXT: leal (%eax,%eax), %edi +; X86-SLOW-NEXT: movl %edx, %ecx +; X86-SLOW-NEXT: shll %cl, %edi +; X86-SLOW-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload +; X86-SLOW-NEXT: movl 56(%ebp), %ecx +; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx +; X86-SLOW-NEXT: shrl %cl, %eax ; X86-SLOW-NEXT: addl %esi, %esi -; X86-SLOW-NEXT: movl %eax, %ecx +; X86-SLOW-NEXT: movl %edx, %ecx ; X86-SLOW-NEXT: shll %cl, %esi -; X86-SLOW-NEXT: orl %edi, %esi -; X86-SLOW-NEXT: movl 8(%ebp), %ecx -; X86-SLOW-NEXT: movl %esi, 12(%ecx) -; X86-SLOW-NEXT: movl %ebx, 8(%ecx) -; X86-SLOW-NEXT: movl %edx, 4(%ecx) -; X86-SLOW-NEXT: movl (%esp), %eax # 4-byte Reload -; X86-SLOW-NEXT: movl %eax, (%ecx) -; X86-SLOW-NEXT: movl %ecx, %eax +; X86-SLOW-NEXT: orl %eax, %esi +; X86-SLOW-NEXT: movl 8(%ebp), %eax +; X86-SLOW-NEXT: movl %esi, 12(%eax) +; X86-SLOW-NEXT: movl %edi, 8(%eax) +; X86-SLOW-NEXT: movl %ebx, 4(%eax) +; X86-SLOW-NEXT: movl (%esp), %ecx # 4-byte Reload +; X86-SLOW-NEXT: movl %ecx, (%eax) ; X86-SLOW-NEXT: leal -12(%ebp), %esp ; X86-SLOW-NEXT: popl %esi ; X86-SLOW-NEXT: popl %edi diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll index 3ed98589767fb..9095fb1550e70 100644 --- a/llvm/test/CodeGen/X86/ftrunc.ll +++ b/llvm/test/CodeGen/X86/ftrunc.ll @@ -243,7 +243,7 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 { ret <4 x double> %r } -define float @trunc_signed_f32_no_fast_math(float %x) { +define float @trunc_signed_f32_no_fast_math(float %x) nounwind { ; SSE-LABEL: trunc_signed_f32_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 @@ -259,14 +259,12 @@ define float @trunc_signed_f32_no_fast_math(float %x) { ; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 ; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0 ; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ; X86-AVX1-NEXT: flds (%esp) ; X86-AVX1-NEXT: popl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 4 ; X86-AVX1-NEXT: retl %i = fptosi float %x to i32 %r = sitofp i32 %i to float @@ -306,7 +304,7 @@ define float @trunc_signed_f32_nsz(float %x) #0 { ret float %r } -define double @trunc_signed32_f64_no_fast_math(double %x) { +define double @trunc_signed32_f64_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_signed32_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 @@ -322,10 +320,7 @@ define double @trunc_signed32_f64_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $8, %esp ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -335,7 +330,6 @@ define double @trunc_signed32_f64_no_fast_math(double %x) { ; X86-AVX1-NEXT: fldl (%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i32 %r = sitofp i32 %i to double @@ -377,7 +371,7 @@ define double @trunc_signed32_f64_nsz(double %x) #0 { ret double %r } -define double @trunc_f32_signed32_f64_no_fast_math(float %x) { +define double @trunc_f32_signed32_f64_no_fast_math(float %x) nounwind { ; SSE-LABEL: trunc_f32_signed32_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttps2dq %xmm0, %xmm0 @@ -393,10 +387,7 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) { ; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $8, %esp ; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -406,7 +397,6 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) { ; X86-AVX1-NEXT: fldl (%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi float %x to i32 %r = sitofp i32 %i to double @@ -445,7 +435,7 @@ define double @trunc_f32_signed32_f64_nsz(float %x) #0 { ret double %r } -define float @trunc_f64_signed32_f32_no_fast_math(double %x) { +define float @trunc_f64_signed32_f32_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_f64_signed32_f32_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0 @@ -461,14 +451,12 @@ define float @trunc_f64_signed32_f32_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0 ; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX1-NEXT: vmovss %xmm0, (%esp) ; X86-AVX1-NEXT: flds (%esp) ; X86-AVX1-NEXT: popl %eax -; X86-AVX1-NEXT: .cfi_def_cfa_offset 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i32 %r = sitofp i32 %i to float @@ -503,7 +491,7 @@ define float @trunc_f64_signed32_f32_nsz(double %x) #0 { ret float %r } -define double @trunc_signed_f64_no_fast_math(double %x) { +define double @trunc_signed_f64_no_fast_math(double %x) nounwind { ; SSE-LABEL: trunc_signed_f64_no_fast_math: ; SSE: # %bb.0: ; SSE-NEXT: cvttsd2si %xmm0, %rax @@ -520,10 +508,7 @@ define double @trunc_signed_f64_no_fast_math(double %x) { ; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math: ; X86-AVX1: # %bb.0: ; X86-AVX1-NEXT: pushl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_offset 8 -; X86-AVX1-NEXT: .cfi_offset %ebp, -8 ; X86-AVX1-NEXT: movl %esp, %ebp -; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp ; X86-AVX1-NEXT: andl $-8, %esp ; X86-AVX1-NEXT: subl $24, %esp ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -537,7 +522,6 @@ define double @trunc_signed_f64_no_fast_math(double %x) { ; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp) ; X86-AVX1-NEXT: movl %ebp, %esp ; X86-AVX1-NEXT: popl %ebp -; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4 ; X86-AVX1-NEXT: retl %i = fptosi double %x to i64 %r = sitofp i64 %i to double diff --git a/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll b/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll index aebfc7d483d6f..3ece4beb9c22e 100644 --- a/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll +++ b/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll @@ -1,25 +1,26 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma | FileCheck %s --check-prefixes=X64,AVX +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma | FileCheck %s --check-prefixes=X64,AVX,AVXIFMA ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma | FileCheck %s --check-prefixes=X64,AVX512,AVX512-NOVL ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=X64,AVX512,AVX512VL +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma,+avx512vl | FileCheck %s --check-prefixes=X64,AVX,AVX512-NOIFMA ; 67108863 == (1 << 26) - 1 ; 4503599627370496 == (1 << 52) ; 4503599627370495 == (1 << 52) - 1 define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_combine: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX-NEXT: vpand %ymm6, %ymm0, %ymm0 -; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm0 -; AVX-NEXT: vpand %ymm6, %ymm1, %ymm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 -; AVX-NEXT: vmovdqa %ymm4, %ymm0 -; AVX-NEXT: vmovdqa %ymm5, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_combine: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVXIFMA-NEXT: vpand %ymm6, %ymm0, %ymm0 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm0 +; AVXIFMA-NEXT: vpand %ymm6, %ymm1, %ymm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 +; AVXIFMA-NEXT: vmovdqa %ymm4, %ymm0 +; AVXIFMA-NEXT: vmovdqa %ymm5, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_combine: ; AVX512: # %bb.0: @@ -29,6 +30,19 @@ define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { ; AVX512-NEXT: vpmadd52luq %zmm1, %zmm0, %zmm2 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_combine: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpbroadcastq {{.*#+}} zmm3 = [67108863,67108863,67108863,67108863,67108863,67108863,67108863,67108863] +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm1, %ymm3 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm4 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm2, %ymm5 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm3, %ymm4, %ymm5 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm2 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <8 x i64> %x, splat (i64 67108863) %y_masked = and <8 x i64> %y, splat (i64 67108863) %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked @@ -37,19 +51,19 @@ define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { } define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_combine_v2: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [3,3,3,3] -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1125899906842623,1125899906842623,1125899906842623,1125899906842623] -; AVX-NEXT: vpand %ymm7, %ymm0, %ymm0 -; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm0 -; AVX-NEXT: vpand %ymm7, %ymm1, %ymm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 -; AVX-NEXT: vmovdqa %ymm4, %ymm0 -; AVX-NEXT: vmovdqa %ymm5, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_combine_v2: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [3,3,3,3] +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1125899906842623,1125899906842623,1125899906842623,1125899906842623] +; AVXIFMA-NEXT: vpand %ymm7, %ymm0, %ymm0 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm0 +; AVXIFMA-NEXT: vpand %ymm7, %ymm1, %ymm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 +; AVXIFMA-NEXT: vmovdqa %ymm4, %ymm0 +; AVXIFMA-NEXT: vmovdqa %ymm5, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_combine_v2: ; AVX512: # %bb.0: @@ -58,6 +72,18 @@ define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) ; AVX512-NEXT: vpmadd52luq %zmm1, %zmm0, %zmm2 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_combine_v2: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm1, %ymm4 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm5 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm4, %ymm5, %ymm3 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm2 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <8 x i64> %x, splat (i64 1125899906842623) ; (1 << 50) - 1 %y_masked = and <8 x i64> %y, splat (i64 3) %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked @@ -66,32 +92,32 @@ define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) } define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_no_combine: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495] -; AVX-NEXT: vpand %ymm6, %ymm0, %ymm7 -; AVX-NEXT: vpand %ymm6, %ymm1, %ymm8 -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm9 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm6 -; AVX-NEXT: vpsrlq $32, %ymm8, %ymm8 -; AVX-NEXT: vpmuludq %ymm3, %ymm8, %ymm8 -; AVX-NEXT: vpsrlq $32, %ymm6, %ymm6 -; AVX-NEXT: vpmuludq %ymm6, %ymm1, %ymm6 -; AVX-NEXT: vpaddq %ymm6, %ymm8, %ymm6 -; AVX-NEXT: vpsllq $32, %ymm6, %ymm6 -; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX-NEXT: vpsrlq $32, %ymm7, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 -; AVX-NEXT: vpsrlq $32, %ymm9, %ymm7 -; AVX-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 -; AVX-NEXT: vpaddq %ymm3, %ymm7, %ymm3 -; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm4, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm5, %ymm1, %ymm1 -; AVX-NEXT: vpaddq %ymm6, %ymm1, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_no_combine: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495] +; AVXIFMA-NEXT: vpand %ymm6, %ymm0, %ymm7 +; AVXIFMA-NEXT: vpand %ymm6, %ymm1, %ymm8 +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm9 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm6 +; AVXIFMA-NEXT: vpsrlq $32, %ymm8, %ymm8 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm8, %ymm8 +; AVXIFMA-NEXT: vpsrlq $32, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm6, %ymm1, %ymm6 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm8, %ymm6 +; AVXIFMA-NEXT: vpsllq $32, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpsrlq $32, %ymm7, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpsrlq $32, %ymm9, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm7, %ymm3 +; AVXIFMA-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm4, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm5, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm1, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_no_combine: ; AVX512: # %bb.0: @@ -108,6 +134,22 @@ define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_no_combine: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpbroadcastq {{.*#+}} zmm3 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495] +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm0, %zmm4 +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm1, %zmm3 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm4, %zmm4 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm4, %zmm4 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm3, %zmm0, %zmm3 +; AVX512-NOIFMA-NEXT: vpaddq %zmm4, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpsllq $32, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <8 x i64> %x, splat (i64 4503599627370495) %y_masked = and <8 x i64> %y, splat (i64 4503599627370495) %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked @@ -116,27 +158,27 @@ define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) } define <8 x i64> @test_512_no_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_no_combine_v2: -; AVX: # %bb.0: -; AVX-NEXT: vpsrlq $32, %ymm1, %ymm6 -; AVX-NEXT: vpmuludq %ymm3, %ymm6, %ymm6 -; AVX-NEXT: vpsrlq $32, %ymm3, %ymm7 -; AVX-NEXT: vpmuludq %ymm7, %ymm1, %ymm7 -; AVX-NEXT: vpaddq %ymm6, %ymm7, %ymm6 -; AVX-NEXT: vpsllq $32, %ymm6, %ymm6 -; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX-NEXT: vpsrlq $32, %ymm0, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 -; AVX-NEXT: vpsrlq $32, %ymm2, %ymm7 -; AVX-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 -; AVX-NEXT: vpaddq %ymm3, %ymm7, %ymm3 -; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm4, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm5, %ymm1, %ymm1 -; AVX-NEXT: vpaddq %ymm6, %ymm1, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_no_combine_v2: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpsrlq $32, %ymm1, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpsrlq $32, %ymm3, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm7, %ymm1, %ymm7 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm7, %ymm6 +; AVXIFMA-NEXT: vpsllq $32, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpsrlq $32, %ymm2, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm7, %ymm3 +; AVXIFMA-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm4, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm5, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm1, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_no_combine_v2: ; AVX512: # %bb.0: @@ -150,6 +192,19 @@ define <8 x i64> @test_512_no_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> % ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_no_combine_v2: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm0, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm1, %zmm4 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm4, %zmm0, %zmm4 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm4, %zmm3 +; AVX512-NOIFMA-NEXT: vpsllq $32, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %mul = mul <8 x i64> %x, %y %res = add <8 x i64> %mul, %z ret <8 x i64> %res @@ -255,25 +310,25 @@ define <1 x i64> @test_scalar_no_ifma(<1 x i64> %x, <1 x i64> %y, <1 x i64> %z) ; 40-bit and 13-bit, too wide define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_mixed_width_too_wide: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [8191,8191,8191,8191] -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm3 -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm6 = [2155905028,2155905036,2155905044,2155905052] -; AVX-NEXT: vpshufb %ymm6, %ymm1, %ymm7 -; AVX-NEXT: vpmuludq %ymm3, %ymm7, %ymm7 -; AVX-NEXT: vpsllq $32, %ymm7, %ymm7 -; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX-NEXT: vpshufb %ymm6, %ymm0, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 -; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm0, %ymm4, %ymm0 -; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm1, %ymm5, %ymm1 -; AVX-NEXT: vpaddq %ymm7, %ymm1, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_mixed_width_too_wide: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [8191,8191,8191,8191] +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm6 = [2155905028,2155905036,2155905044,2155905052] +; AVXIFMA-NEXT: vpshufb %ymm6, %ymm1, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm7, %ymm7 +; AVXIFMA-NEXT: vpsllq $32, %ymm7, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpshufb %ymm6, %ymm0, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm0, %ymm4, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm1, %ymm5, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm7, %ymm1, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_mixed_width_too_wide: ; AVX512: # %bb.0: @@ -286,6 +341,18 @@ define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64 ; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm1 ; AVX512-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_mixed_width_too_wide: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm3 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpsllq $32, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm2, %zmm1 +; AVX512-NOIFMA-NEXT: vpaddq %zmm0, %zmm1, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x40 = and <8 x i64> %x, splat (i64 1099511627775) %y13 = and <8 x i64> %y, splat (i64 8191) %mul = mul <8 x i64> %x40, %y13 @@ -294,19 +361,19 @@ define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64 } define <8 x i64> @test_zext32_inputs_not_safe(<8 x i32> %xi32, <8 x i32> %yi32, <8 x i64> %z) { -; AVX-LABEL: test_zext32_inputs_not_safe: -; AVX: # %bb.0: -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX-NEXT: vpmuludq %ymm5, %ymm4, %ymm4 -; AVX-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1 -; AVX-NEXT: vpaddq %ymm4, %ymm2, %ymm0 -; AVX-NEXT: vpaddq %ymm1, %ymm3, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_zext32_inputs_not_safe: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVXIFMA-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVXIFMA-NEXT: vpmuludq %ymm5, %ymm4, %ymm4 +; AVXIFMA-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVXIFMA-NEXT: vpmuludq %ymm1, %ymm0, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm4, %ymm2, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm1, %ymm3, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_zext32_inputs_not_safe: ; AVX512: # %bb.0: @@ -315,6 +382,14 @@ define <8 x i64> @test_zext32_inputs_not_safe(<8 x i32> %xi32, <8 x i32> %yi32, ; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_zext32_inputs_not_safe: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero +; AVX512-NOIFMA-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm0, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x = zext <8 x i32> %xi32 to <8 x i64> %y = zext <8 x i32> %yi32 to <8 x i64> %mul = mul <8 x i64> %x, %y @@ -323,36 +398,36 @@ define <8 x i64> @test_zext32_inputs_not_safe(<8 x i32> %xi32, <8 x i32> %yi32, } define <16 x i64> @test_1024_combine_split(<16 x i64> %x, <16 x i64> %y, <16 x i64> %z) nounwind { -; AVX-LABEL: test_1024_combine_split: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $32, %rsp -; AVX-NEXT: vmovdqa 112(%rbp), %ymm8 -; AVX-NEXT: vmovdqa 80(%rbp), %ymm9 -; AVX-NEXT: vmovdqa 48(%rbp), %ymm10 -; AVX-NEXT: vmovdqa 16(%rbp), %ymm11 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm12 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm3, %ymm12, %ymm3 -; AVX-NEXT: vpand %ymm2, %ymm12, %ymm2 -; AVX-NEXT: vpand %ymm1, %ymm12, %ymm1 -; AVX-NEXT: vpand %ymm0, %ymm12, %ymm0 -; AVX-NEXT: vpand %ymm7, %ymm12, %ymm7 -; AVX-NEXT: {vex} vpmadd52luq %ymm7, %ymm3, %ymm8 -; AVX-NEXT: vpand %ymm6, %ymm12, %ymm3 -; AVX-NEXT: {vex} vpmadd52luq %ymm3, %ymm2, %ymm9 -; AVX-NEXT: vpand %ymm5, %ymm12, %ymm2 -; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm1, %ymm10 -; AVX-NEXT: vpand %ymm4, %ymm12, %ymm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm11 -; AVX-NEXT: vmovdqa %ymm11, %ymm0 -; AVX-NEXT: vmovdqa %ymm10, %ymm1 -; AVX-NEXT: vmovdqa %ymm9, %ymm2 -; AVX-NEXT: vmovdqa %ymm8, %ymm3 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_1024_combine_split: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: pushq %rbp +; AVXIFMA-NEXT: movq %rsp, %rbp +; AVXIFMA-NEXT: andq $-32, %rsp +; AVXIFMA-NEXT: subq $32, %rsp +; AVXIFMA-NEXT: vmovdqa 112(%rbp), %ymm8 +; AVXIFMA-NEXT: vmovdqa 80(%rbp), %ymm9 +; AVXIFMA-NEXT: vmovdqa 48(%rbp), %ymm10 +; AVXIFMA-NEXT: vmovdqa 16(%rbp), %ymm11 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm12 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm3, %ymm12, %ymm3 +; AVXIFMA-NEXT: vpand %ymm2, %ymm12, %ymm2 +; AVXIFMA-NEXT: vpand %ymm1, %ymm12, %ymm1 +; AVXIFMA-NEXT: vpand %ymm0, %ymm12, %ymm0 +; AVXIFMA-NEXT: vpand %ymm7, %ymm12, %ymm7 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm7, %ymm3, %ymm8 +; AVXIFMA-NEXT: vpand %ymm6, %ymm12, %ymm3 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm3, %ymm2, %ymm9 +; AVXIFMA-NEXT: vpand %ymm5, %ymm12, %ymm2 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm1, %ymm10 +; AVXIFMA-NEXT: vpand %ymm4, %ymm12, %ymm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm11 +; AVXIFMA-NEXT: vmovdqa %ymm11, %ymm0 +; AVXIFMA-NEXT: vmovdqa %ymm10, %ymm1 +; AVXIFMA-NEXT: vmovdqa %ymm9, %ymm2 +; AVXIFMA-NEXT: vmovdqa %ymm8, %ymm3 +; AVXIFMA-NEXT: movq %rbp, %rsp +; AVXIFMA-NEXT: popq %rbp +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_1024_combine_split: ; AVX512: # %bb.0: @@ -366,6 +441,27 @@ define <16 x i64> @test_1024_combine_split(<16 x i64> %x, <16 x i64> %y, <16 x i ; AVX512-NEXT: vmovdqa64 %zmm4, %zmm0 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm1 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_1024_combine_split: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpbroadcastq {{.*#+}} zmm6 = [67108863,67108863,67108863,67108863,67108863,67108863,67108863,67108863] +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm2, %zmm2 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm2, %ymm6 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm7 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm4, %ymm8 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm6, %ymm7, %ymm8 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm8, %zmm4, %zmm0 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm3, %ymm2 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm1, %ymm4 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm5, %ymm6 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm4, %ymm6 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm3, %ymm1, %ymm5 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm1 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <16 x i64> %x, splat (i64 67108863) %y_masked = and <16 x i64> %y, splat (i64 67108863) %mul = mul <16 x i64> %x_masked, %y_masked @@ -388,13 +484,13 @@ define <1 x i64> @test_not_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %z) { } define <3 x i64> @test_v3i64(<3 x i64> %x, <3 x i64> %y, <3 x i64> %z) { -; AVX-LABEL: test_v3i64: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v3i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm1 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: retq ; ; AVX512-NOVL-LABEL: test_v3i64: ; AVX512-NOVL: # %bb.0: @@ -410,6 +506,13 @@ define <3 x i64> @test_v3i64(<3 x i64> %x, <3 x i64> %y, <3 x i64> %z) { ; AVX512VL-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 ; AVX512VL-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v3i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 +; AVX512-NOIFMA-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 +; AVX512-NOIFMA-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <3 x i64> %x, splat (i64 67108863) %y_masked = and <3 x i64> %x, splat (i64 67108863) %mul = mul <3 x i64> %x_masked, %y_masked @@ -418,35 +521,35 @@ define <3 x i64> @test_v3i64(<3 x i64> %x, <3 x i64> %y, <3 x i64> %z) { } define <5 x i64> @test_v5i64(<5 x i64> %x, <5 x i64> %y, <5 x i64> %z) { -; AVX-LABEL: test_v5i64: -; AVX: # %bb.0: -; AVX-NEXT: movq %rdi, %rax -; AVX-NEXT: vmovq %r8, %xmm0 -; AVX-NEXT: vmovq %rcx, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vmovq %rdx, %xmm1 -; AVX-NEXT: vmovq %rsi, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm2 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF -; AVX-NEXT: vmovq %rcx, %xmm3 -; AVX-NEXT: vmovq %r9, %xmm4 -; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 -; AVX-NEXT: vpsrlq $32, %xmm3, %xmm4 -; AVX-NEXT: vpmuludq %xmm4, %xmm3, %xmm4 -; AVX-NEXT: vpsllq $33, %xmm4, %xmm4 -; AVX-NEXT: vpmuludq %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpaddq %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vpaddq %xmm4, %xmm1, %xmm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm2 -; AVX-NEXT: vmovdqa %ymm2, (%rdi) -; AVX-NEXT: vmovq %xmm1, 32(%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v5i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: movq %rdi, %rax +; AVXIFMA-NEXT: vmovq %r8, %xmm0 +; AVXIFMA-NEXT: vmovq %rcx, %xmm1 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVXIFMA-NEXT: vmovq %rdx, %xmm1 +; AVXIFMA-NEXT: vmovq %rsi, %xmm2 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm2 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF +; AVXIFMA-NEXT: vmovq %rcx, %xmm3 +; AVXIFMA-NEXT: vmovq %r9, %xmm4 +; AVXIFMA-NEXT: vpand %xmm3, %xmm4, %xmm3 +; AVXIFMA-NEXT: vpsrlq $32, %xmm3, %xmm4 +; AVXIFMA-NEXT: vpmuludq %xmm4, %xmm3, %xmm4 +; AVXIFMA-NEXT: vpsllq $33, %xmm4, %xmm4 +; AVXIFMA-NEXT: vpmuludq %xmm3, %xmm3, %xmm3 +; AVXIFMA-NEXT: vpaddq %xmm1, %xmm3, %xmm1 +; AVXIFMA-NEXT: vpaddq %xmm4, %xmm1, %xmm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm2 +; AVXIFMA-NEXT: vmovdqa %ymm2, (%rdi) +; AVXIFMA-NEXT: vmovq %xmm1, 32(%rdi) +; AVXIFMA-NEXT: vzeroupper +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_v5i64: ; AVX512: # %bb.0: @@ -454,6 +557,13 @@ define <5 x i64> @test_v5i64(<5 x i64> %x, <5 x i64> %y, <5 x i64> %z) { ; AVX512-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v5i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <5 x i64> %x, splat (i64 67108863) %y_masked = and <5 x i64> %x, splat (i64 67108863) %mul = mul <5 x i64> %x_masked, %y_masked @@ -462,30 +572,30 @@ define <5 x i64> @test_v5i64(<5 x i64> %x, <5 x i64> %y, <5 x i64> %z) { } define <6 x i64> @test_v6i64(<6 x i64> %x, <6 x i64> %y, <6 x i64> %z) { -; AVX-LABEL: test_v6i64: -; AVX: # %bb.0: -; AVX-NEXT: movq %rdi, %rax -; AVX-NEXT: vmovq %r8, %xmm0 -; AVX-NEXT: vmovq %rcx, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vmovq %rdx, %xmm1 -; AVX-NEXT: vmovq %rsi, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm1 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm1 -; AVX-NEXT: vmovq %r9, %xmm0 -; AVX-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpmuldq %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vpaddq {{[0-9]+}}(%rsp), %xmm0, %xmm0 -; AVX-NEXT: vmovdqa %xmm0, 32(%rdi) -; AVX-NEXT: vmovdqa %ymm1, (%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v6i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: movq %rdi, %rax +; AVXIFMA-NEXT: vmovq %r8, %xmm0 +; AVXIFMA-NEXT: vmovq %rcx, %xmm1 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVXIFMA-NEXT: vmovq %rdx, %xmm1 +; AVXIFMA-NEXT: vmovq %rsi, %xmm2 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm1 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm2 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm1 +; AVXIFMA-NEXT: vmovq %r9, %xmm0 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVXIFMA-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVXIFMA-NEXT: vpmuldq %xmm0, %xmm0, %xmm0 +; AVXIFMA-NEXT: vpaddq {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVXIFMA-NEXT: vmovdqa %xmm0, 32(%rdi) +; AVXIFMA-NEXT: vmovdqa %ymm1, (%rdi) +; AVXIFMA-NEXT: vzeroupper +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_v6i64: ; AVX512: # %bb.0: @@ -493,6 +603,13 @@ define <6 x i64> @test_v6i64(<6 x i64> %x, <6 x i64> %y, <6 x i64> %z) { ; AVX512-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v6i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <6 x i64> %x, splat (i64 67108863) %y_masked = and <6 x i64> %x, splat (i64 67108863) %mul = mul <6 x i64> %x_masked, %y_masked @@ -501,43 +618,43 @@ define <6 x i64> @test_v6i64(<6 x i64> %x, <6 x i64> %y, <6 x i64> %z) { } define <9 x i64> @test_v9i64(<9 x i64> %x, <9 x i64> %y, <9 x i64> %z) { -; AVX-LABEL: test_v9i64: -; AVX: # %bb.0: -; AVX-NEXT: movq %rdi, %rax -; AVX-NEXT: vmovq %r8, %xmm0 -; AVX-NEXT: vmovq %rcx, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vmovq %rdx, %xmm1 -; AVX-NEXT: vmovq %rsi, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: vmovq %r9, %xmm1 -; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 -; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm3 -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm4 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm5 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm5, %ymm0, %ymm0 -; AVX-NEXT: vpand %ymm5, %ymm1, %ymm1 -; AVX-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF -; AVX-NEXT: vmovq %rcx, %xmm5 -; AVX-NEXT: vmovq {{.*#+}} xmm6 = mem[0],zero -; AVX-NEXT: vpand %xmm5, %xmm6, %xmm5 -; AVX-NEXT: vpsrlq $32, %xmm5, %xmm6 -; AVX-NEXT: vpmuludq %xmm6, %xmm5, %xmm6 -; AVX-NEXT: vpsllq $33, %xmm6, %xmm6 -; AVX-NEXT: vpmuludq %xmm5, %xmm5, %xmm5 -; AVX-NEXT: vpaddq %xmm2, %xmm5, %xmm2 -; AVX-NEXT: vpaddq %xmm6, %xmm2, %xmm2 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm4 -; AVX-NEXT: {vex} vpmadd52luq %ymm1, %ymm1, %ymm3 -; AVX-NEXT: vmovdqa %ymm3, 32(%rdi) -; AVX-NEXT: vmovdqa %ymm4, (%rdi) -; AVX-NEXT: vmovq %xmm2, 64(%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v9i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: movq %rdi, %rax +; AVXIFMA-NEXT: vmovq %r8, %xmm0 +; AVXIFMA-NEXT: vmovq %rcx, %xmm1 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVXIFMA-NEXT: vmovq %rdx, %xmm1 +; AVXIFMA-NEXT: vmovq %rsi, %xmm2 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVXIFMA-NEXT: vmovq %r9, %xmm1 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVXIFMA-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm3 +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm4 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm5 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVXIFMA-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF +; AVXIFMA-NEXT: vmovq %rcx, %xmm5 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm6 = mem[0],zero +; AVXIFMA-NEXT: vpand %xmm5, %xmm6, %xmm5 +; AVXIFMA-NEXT: vpsrlq $32, %xmm5, %xmm6 +; AVXIFMA-NEXT: vpmuludq %xmm6, %xmm5, %xmm6 +; AVXIFMA-NEXT: vpsllq $33, %xmm6, %xmm6 +; AVXIFMA-NEXT: vpmuludq %xmm5, %xmm5, %xmm5 +; AVXIFMA-NEXT: vpaddq %xmm2, %xmm5, %xmm2 +; AVXIFMA-NEXT: vpaddq %xmm6, %xmm2, %xmm2 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm4 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm1, %ymm3 +; AVXIFMA-NEXT: vmovdqa %ymm3, 32(%rdi) +; AVXIFMA-NEXT: vmovdqa %ymm4, (%rdi) +; AVXIFMA-NEXT: vmovq %xmm2, 64(%rdi) +; AVXIFMA-NEXT: vzeroupper +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_v9i64: ; AVX512: # %bb.0: @@ -572,6 +689,44 @@ define <9 x i64> @test_v9i64(<9 x i64> %x, <9 x i64> %y, <9 x i64> %z) { ; AVX512-NEXT: vmovdqa64 %zmm2, (%rdi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v9i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: movq %rdi, %rax +; AVX512-NOIFMA-NEXT: vmovq %r8, %xmm0 +; AVX512-NOIFMA-NEXT: vmovq %rcx, %xmm1 +; AVX512-NOIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NOIFMA-NEXT: vmovq %rdx, %xmm1 +; AVX512-NOIFMA-NEXT: vmovq %rsi, %xmm2 +; AVX512-NOIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NOIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NOIFMA-NEXT: vmovq %r9, %xmm1 +; AVX512-NOIFMA-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX512-NOIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX512-NOIFMA-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF +; AVX512-NOIFMA-NEXT: vmovq %rcx, %xmm2 +; AVX512-NOIFMA-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; AVX512-NOIFMA-NEXT: vpand %xmm2, %xmm3, %xmm2 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %xmm2, %xmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %xmm3, %xmm2, %xmm3 +; AVX512-NOIFMA-NEXT: vpsllq $33, %xmm3, %xmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %xmm2, %xmm2, %xmm2 +; AVX512-NOIFMA-NEXT: vpaddq %xmm1, %xmm2, %xmm1 +; AVX512-NOIFMA-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512-NOIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm3 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm2, %ymm3 +; AVX512-NOIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm2 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm2 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: vmovq %xmm1, 64(%rdi) +; AVX512-NOIFMA-NEXT: vmovdqa64 %zmm0, (%rdi) +; AVX512-NOIFMA-NEXT: vzeroupper +; AVX512-NOIFMA-NEXT: retq %x_masked = and <9 x i64> %x, splat (i64 67108863) %y_masked = and <9 x i64> %x, splat (i64 67108863) %mul = mul <9 x i64> %x_masked, %y_masked diff --git a/llvm/test/CodeGen/X86/isel-llvm.set.rounding.ll b/llvm/test/CodeGen/X86/isel-llvm.set.rounding.ll index 688add1e92ab1..d271e97d8832a 100644 --- a/llvm/test/CodeGen/X86/isel-llvm.set.rounding.ll +++ b/llvm/test/CodeGen/X86/isel-llvm.set.rounding.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s --check-prefixes=X86-NOSSE,SDAG-X86-NOSSE ; RUN: llc < %s -mtriple=i686-- -fast-isel -fast-isel-abort=1 -mattr=-sse | FileCheck %s --check-prefixes=X86-NOSSE,FASTISEL-X86-NOSSE -; RUN: llc < %s -mtriple=i686-- -global-isel -global-isel-abort=2 -mattr=-sse | FileCheck %s --check-prefixes=X86-NOSSE,GISEL-X86-NOSSE +; RUN: llc < %s -mtriple=i686-- -global-isel -global-isel-abort=1 -mattr=-sse | FileCheck %s --check-prefixes=GISEL-X86-NOSSE ; RUN: llc < %s -mtriple=x86_64-- -mattr=-sse | FileCheck %s --check-prefixes=X64-NOSSE,SDAG-X64-NOSSE ; RUN: llc < %s -mtriple=x86_64-- -fast-isel -fast-isel-abort=1 -mattr=-sse | FileCheck %s --check-prefixes=X64-NOSSE,FASTISEL-X64-NOSSE -; RUN: llc < %s -mtriple=x86_64-- -global-isel -global-isel-abort=2 -mattr=-sse | FileCheck %s --check-prefixes=X64-NOSSE,GISEL-X64-NOSSE +; RUN: llc < %s -mtriple=x86_64-- -global-isel -global-isel-abort=1 -mattr=-sse | FileCheck %s --check-prefixes=GISEL-X64-NOSSE ; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=X86,SDAG-X86 ; RUN: llc < %s -mtriple=i686-- -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes=X86,FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-- -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86,GISEL-X86 +; RUN: llc < %s -mtriple=i686-- -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefixes=X64,SDAG-X64 ; RUN: llc < %s -mtriple=x86_64-- -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes=X64,FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-- -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64,GISEL-X64 +; RUN: llc < %s -mtriple=x86_64-- -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 declare void @llvm.set.rounding(i32 %x) @@ -24,6 +24,18 @@ define void @func_01() nounwind { ; X86-NOSSE-NEXT: popl %eax ; X86-NOSSE-NEXT: retl ; +; GISEL-X86-NOSSE-LABEL: func_01: +; GISEL-X86-NOSSE: # %bb.0: +; GISEL-X86-NOSSE-NEXT: pushl %eax +; GISEL-X86-NOSSE-NEXT: fnstcw (%esp) +; GISEL-X86-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NOSSE-NEXT: andw (%esp), %ax +; GISEL-X86-NOSSE-NEXT: orw $24576, %ax # imm = 0x6000 +; GISEL-X86-NOSSE-NEXT: movw %ax, (%esp) +; GISEL-X86-NOSSE-NEXT: fldcw (%esp) +; GISEL-X86-NOSSE-NEXT: popl %eax +; GISEL-X86-NOSSE-NEXT: retl +; ; X64-NOSSE-LABEL: func_01: ; X64-NOSSE: # %bb.0: ; X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -31,6 +43,16 @@ define void @func_01() nounwind { ; X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) ; X64-NOSSE-NEXT: retq ; +; GISEL-X64-NOSSE-LABEL: func_01: +; GISEL-X64-NOSSE: # %bb.0: +; GISEL-X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NOSSE-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NOSSE-NEXT: orw $24576, %ax # imm = 0x6000 +; GISEL-X64-NOSSE-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: retq +; ; X86-LABEL: func_01: ; X86: # %bb.0: ; X86-NEXT: pushl %eax @@ -40,6 +62,18 @@ define void @func_01() nounwind { ; X86-NEXT: popl %eax ; X86-NEXT: retl ; +; GISEL-X86-LABEL: func_01: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %eax +; GISEL-X86-NEXT: fnstcw (%esp) +; GISEL-X86-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NEXT: andw (%esp), %ax +; GISEL-X86-NEXT: orw $24576, %ax # imm = 0x6000 +; GISEL-X86-NEXT: movw %ax, (%esp) +; GISEL-X86-NEXT: fldcw (%esp) +; GISEL-X86-NEXT: popl %eax +; GISEL-X86-NEXT: retl +; ; X64-LABEL: func_01: ; X64: # %bb.0: ; X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -49,6 +83,22 @@ define void @func_01() nounwind { ; X64-NEXT: orb $96, -{{[0-9]+}}(%rsp) ; X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) ; X64-NEXT: retq +; +; GISEL-X64-LABEL: func_01: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NEXT: orw $24576, %ax # imm = 0x6000 +; GISEL-X64-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movl $-24577, %eax # imm = 0x9FFF +; GISEL-X64-NEXT: andl -{{[0-9]+}}(%rsp), %eax +; GISEL-X64-NEXT: orl $24576, %eax # imm = 0x6000 +; GISEL-X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: retq call void @llvm.set.rounding(i32 0) ; TowardZero (CW[11-10] = 11) ret void } @@ -63,6 +113,18 @@ define void @func_02() nounwind { ; X86-NOSSE-NEXT: popl %eax ; X86-NOSSE-NEXT: retl ; +; GISEL-X86-NOSSE-LABEL: func_02: +; GISEL-X86-NOSSE: # %bb.0: +; GISEL-X86-NOSSE-NEXT: pushl %eax +; GISEL-X86-NOSSE-NEXT: fnstcw (%esp) +; GISEL-X86-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NOSSE-NEXT: andw (%esp), %ax +; GISEL-X86-NOSSE-NEXT: orw $0, %ax +; GISEL-X86-NOSSE-NEXT: movw %ax, (%esp) +; GISEL-X86-NOSSE-NEXT: fldcw (%esp) +; GISEL-X86-NOSSE-NEXT: popl %eax +; GISEL-X86-NOSSE-NEXT: retl +; ; X64-NOSSE-LABEL: func_02: ; X64-NOSSE: # %bb.0: ; X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -70,6 +132,16 @@ define void @func_02() nounwind { ; X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) ; X64-NOSSE-NEXT: retq ; +; GISEL-X64-NOSSE-LABEL: func_02: +; GISEL-X64-NOSSE: # %bb.0: +; GISEL-X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NOSSE-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NOSSE-NEXT: orw $0, %ax +; GISEL-X64-NOSSE-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: retq +; ; X86-LABEL: func_02: ; X86: # %bb.0: ; X86-NEXT: pushl %eax @@ -79,6 +151,18 @@ define void @func_02() nounwind { ; X86-NEXT: popl %eax ; X86-NEXT: retl ; +; GISEL-X86-LABEL: func_02: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %eax +; GISEL-X86-NEXT: fnstcw (%esp) +; GISEL-X86-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NEXT: andw (%esp), %ax +; GISEL-X86-NEXT: orw $0, %ax +; GISEL-X86-NEXT: movw %ax, (%esp) +; GISEL-X86-NEXT: fldcw (%esp) +; GISEL-X86-NEXT: popl %eax +; GISEL-X86-NEXT: retl +; ; X64-LABEL: func_02: ; X64: # %bb.0: ; X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -88,6 +172,22 @@ define void @func_02() nounwind { ; X64-NEXT: andb $-97, -{{[0-9]+}}(%rsp) ; X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) ; X64-NEXT: retq +; +; GISEL-X64-LABEL: func_02: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NEXT: orw $0, %ax +; GISEL-X64-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movl $-24577, %eax # imm = 0x9FFF +; GISEL-X64-NEXT: andl -{{[0-9]+}}(%rsp), %eax +; GISEL-X64-NEXT: orl $0, %eax +; GISEL-X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: retq call void @llvm.set.rounding(i32 1) ; ToNearestTiesToEven (CW[11-10] = 00) ret void } @@ -105,6 +205,18 @@ define void @func_03() nounwind { ; X86-NOSSE-NEXT: popl %eax ; X86-NOSSE-NEXT: retl ; +; GISEL-X86-NOSSE-LABEL: func_03: +; GISEL-X86-NOSSE: # %bb.0: +; GISEL-X86-NOSSE-NEXT: pushl %eax +; GISEL-X86-NOSSE-NEXT: fnstcw (%esp) +; GISEL-X86-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NOSSE-NEXT: andw (%esp), %ax +; GISEL-X86-NOSSE-NEXT: orw $16384, %ax # imm = 0x4000 +; GISEL-X86-NOSSE-NEXT: movw %ax, (%esp) +; GISEL-X86-NOSSE-NEXT: fldcw (%esp) +; GISEL-X86-NOSSE-NEXT: popl %eax +; GISEL-X86-NOSSE-NEXT: retl +; ; X64-NOSSE-LABEL: func_03: ; X64-NOSSE: # %bb.0: ; X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -115,6 +227,16 @@ define void @func_03() nounwind { ; X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) ; X64-NOSSE-NEXT: retq ; +; GISEL-X64-NOSSE-LABEL: func_03: +; GISEL-X64-NOSSE: # %bb.0: +; GISEL-X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NOSSE-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NOSSE-NEXT: orw $16384, %ax # imm = 0x4000 +; GISEL-X64-NOSSE-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: retq +; ; X86-LABEL: func_03: ; X86: # %bb.0: ; X86-NEXT: pushl %eax @@ -127,6 +249,18 @@ define void @func_03() nounwind { ; X86-NEXT: popl %eax ; X86-NEXT: retl ; +; GISEL-X86-LABEL: func_03: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %eax +; GISEL-X86-NEXT: fnstcw (%esp) +; GISEL-X86-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NEXT: andw (%esp), %ax +; GISEL-X86-NEXT: orw $16384, %ax # imm = 0x4000 +; GISEL-X86-NEXT: movw %ax, (%esp) +; GISEL-X86-NEXT: fldcw (%esp) +; GISEL-X86-NEXT: popl %eax +; GISEL-X86-NEXT: retl +; ; X64-LABEL: func_03: ; X64: # %bb.0: ; X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -142,6 +276,22 @@ define void @func_03() nounwind { ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) ; X64-NEXT: retq +; +; GISEL-X64-LABEL: func_03: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NEXT: orw $16384, %ax # imm = 0x4000 +; GISEL-X64-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movl $-24577, %eax # imm = 0x9FFF +; GISEL-X64-NEXT: andl -{{[0-9]+}}(%rsp), %eax +; GISEL-X64-NEXT: orl $16384, %eax # imm = 0x4000 +; GISEL-X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: retq call void @llvm.set.rounding(i32 2) ; Upward (CW[11-10] = 10) ret void } @@ -159,6 +309,18 @@ define void @func_04() nounwind { ; X86-NOSSE-NEXT: popl %eax ; X86-NOSSE-NEXT: retl ; +; GISEL-X86-NOSSE-LABEL: func_04: +; GISEL-X86-NOSSE: # %bb.0: +; GISEL-X86-NOSSE-NEXT: pushl %eax +; GISEL-X86-NOSSE-NEXT: fnstcw (%esp) +; GISEL-X86-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NOSSE-NEXT: andw (%esp), %ax +; GISEL-X86-NOSSE-NEXT: orw $8192, %ax # imm = 0x2000 +; GISEL-X86-NOSSE-NEXT: movw %ax, (%esp) +; GISEL-X86-NOSSE-NEXT: fldcw (%esp) +; GISEL-X86-NOSSE-NEXT: popl %eax +; GISEL-X86-NOSSE-NEXT: retl +; ; X64-NOSSE-LABEL: func_04: ; X64-NOSSE: # %bb.0: ; X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -169,6 +331,16 @@ define void @func_04() nounwind { ; X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) ; X64-NOSSE-NEXT: retq ; +; GISEL-X64-NOSSE-LABEL: func_04: +; GISEL-X64-NOSSE: # %bb.0: +; GISEL-X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NOSSE-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NOSSE-NEXT: orw $8192, %ax # imm = 0x2000 +; GISEL-X64-NOSSE-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: retq +; ; X86-LABEL: func_04: ; X86: # %bb.0: ; X86-NEXT: pushl %eax @@ -181,6 +353,18 @@ define void @func_04() nounwind { ; X86-NEXT: popl %eax ; X86-NEXT: retl ; +; GISEL-X86-LABEL: func_04: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %eax +; GISEL-X86-NEXT: fnstcw (%esp) +; GISEL-X86-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NEXT: andw (%esp), %ax +; GISEL-X86-NEXT: orw $8192, %ax # imm = 0x2000 +; GISEL-X86-NEXT: movw %ax, (%esp) +; GISEL-X86-NEXT: fldcw (%esp) +; GISEL-X86-NEXT: popl %eax +; GISEL-X86-NEXT: retl +; ; X64-LABEL: func_04: ; X64: # %bb.0: ; X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) @@ -196,6 +380,22 @@ define void @func_04() nounwind { ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) ; X64-NEXT: retq +; +; GISEL-X64-LABEL: func_04: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NEXT: orw $8192, %ax # imm = 0x2000 +; GISEL-X64-NEXT: movw %ax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movl $-24577, %eax # imm = 0x9FFF +; GISEL-X64-NEXT: andl -{{[0-9]+}}(%rsp), %eax +; GISEL-X64-NEXT: orl $8192, %eax # imm = 0x2000 +; GISEL-X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: retq call void @llvm.set.rounding(i32 3) ; Downward (CW[11-10] = 01) ret void } @@ -219,6 +419,25 @@ define void @func_05(i32 %x) nounwind { ; X86-NOSSE-NEXT: popl %eax ; X86-NOSSE-NEXT: retl ; +; GISEL-X86-NOSSE-LABEL: func_05: +; GISEL-X86-NOSSE: # %bb.0: +; GISEL-X86-NOSSE-NEXT: pushl %eax +; GISEL-X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NOSSE-NEXT: fnstcw (%esp) +; GISEL-X86-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NOSSE-NEXT: andw (%esp), %ax +; GISEL-X86-NOSSE-NEXT: addl %ecx, %ecx +; GISEL-X86-NOSSE-NEXT: addl $4, %ecx +; GISEL-X86-NOSSE-NEXT: movw $201, %dx +; GISEL-X86-NOSSE-NEXT: # kill: def $cl killed $cl killed $ecx +; GISEL-X86-NOSSE-NEXT: shlw %cl, %dx +; GISEL-X86-NOSSE-NEXT: andw $3072, %dx # imm = 0xC00 +; GISEL-X86-NOSSE-NEXT: orw %ax, %dx +; GISEL-X86-NOSSE-NEXT: movw %dx, (%esp) +; GISEL-X86-NOSSE-NEXT: fldcw (%esp) +; GISEL-X86-NOSSE-NEXT: popl %eax +; GISEL-X86-NOSSE-NEXT: retl +; ; X64-NOSSE-LABEL: func_05: ; X64-NOSSE: # %bb.0: ; X64-NOSSE-NEXT: # kill: def $edi killed $edi def $rdi @@ -235,6 +454,23 @@ define void @func_05(i32 %x) nounwind { ; X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) ; X64-NOSSE-NEXT: retq ; +; GISEL-X64-NOSSE-LABEL: func_05: +; GISEL-X64-NOSSE: # %bb.0: +; GISEL-X64-NOSSE-NEXT: # kill: def $edi killed $edi def $rdi +; GISEL-X64-NOSSE-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NOSSE-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NOSSE-NEXT: leal (%rdi,%rdi), %ecx +; GISEL-X64-NOSSE-NEXT: addl $4, %ecx +; GISEL-X64-NOSSE-NEXT: movw $201, %dx +; GISEL-X64-NOSSE-NEXT: # kill: def $cl killed $cl killed $ecx +; GISEL-X64-NOSSE-NEXT: shlw %cl, %dx +; GISEL-X64-NOSSE-NEXT: andw $3072, %dx # imm = 0xC00 +; GISEL-X64-NOSSE-NEXT: orw %ax, %dx +; GISEL-X64-NOSSE-NEXT: movw %dx, -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NOSSE-NEXT: retq +; ; X86-LABEL: func_05: ; X86: # %bb.0: ; X86-NEXT: pushl %eax @@ -253,6 +489,25 @@ define void @func_05(i32 %x) nounwind { ; X86-NEXT: popl %eax ; X86-NEXT: retl ; +; GISEL-X86-LABEL: func_05: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: fnstcw (%esp) +; GISEL-X86-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X86-NEXT: andw (%esp), %ax +; GISEL-X86-NEXT: addl %ecx, %ecx +; GISEL-X86-NEXT: addl $4, %ecx +; GISEL-X86-NEXT: movw $201, %dx +; GISEL-X86-NEXT: # kill: def $cl killed $cl killed $ecx +; GISEL-X86-NEXT: shlw %cl, %dx +; GISEL-X86-NEXT: andw $3072, %dx # imm = 0xC00 +; GISEL-X86-NEXT: orw %ax, %dx +; GISEL-X86-NEXT: movw %dx, (%esp) +; GISEL-X86-NEXT: fldcw (%esp) +; GISEL-X86-NEXT: popl %eax +; GISEL-X86-NEXT: retl +; ; X64-LABEL: func_05: ; X64: # %bb.0: ; X64-NEXT: # kill: def $edi killed $edi def $rdi @@ -274,6 +529,31 @@ define void @func_05(i32 %x) nounwind { ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) ; X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) ; X64-NEXT: retq +; +; GISEL-X64-LABEL: func_05: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: # kill: def $edi killed $edi def $rdi +; GISEL-X64-NEXT: fnstcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movw $-3073, %ax # imm = 0xF3FF +; GISEL-X64-NEXT: andw -{{[0-9]+}}(%rsp), %ax +; GISEL-X64-NEXT: leal (%rdi,%rdi), %ecx +; GISEL-X64-NEXT: addl $4, %ecx +; GISEL-X64-NEXT: movw $201, %dx +; GISEL-X64-NEXT: # kill: def $cl killed $cl killed $ecx +; GISEL-X64-NEXT: shlw %cl, %dx +; GISEL-X64-NEXT: andw $3072, %dx # imm = 0xC00 +; GISEL-X64-NEXT: movzwl %dx, %ecx +; GISEL-X64-NEXT: leal (,%rcx,8), %edx +; GISEL-X64-NEXT: orw %ax, %cx +; GISEL-X64-NEXT: movw %cx, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: fldcw -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: stmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: movl $-24577, %eax # imm = 0x9FFF +; GISEL-X64-NEXT: andl -{{[0-9]+}}(%rsp), %eax +; GISEL-X64-NEXT: orl %edx, %eax +; GISEL-X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: ldmxcsr -{{[0-9]+}}(%rsp) +; GISEL-X64-NEXT: retq call void @llvm.set.rounding(i32 %x) ; Downward ret void } @@ -284,10 +564,6 @@ attributes #0 = { nounwind "use-soft-float"="true" } ; FASTISEL-X64-NOSSE: {{.*}} ; FASTISEL-X86: {{.*}} ; FASTISEL-X86-NOSSE: {{.*}} -; GISEL-X64: {{.*}} -; GISEL-X64-NOSSE: {{.*}} -; GISEL-X86: {{.*}} -; GISEL-X86-NOSSE: {{.*}} ; SDAG-X64: {{.*}} ; SDAG-X64-NOSSE: {{.*}} ; SDAG-X86: {{.*}} diff --git a/llvm/test/CodeGen/X86/isel-set-invalid-rounding.ll b/llvm/test/CodeGen/X86/isel-set-invalid-rounding.ll new file mode 100644 index 0000000000000..9fed9945532a0 --- /dev/null +++ b/llvm/test/CodeGen/X86/isel-set-invalid-rounding.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: not llc < %s -mtriple=i686-- -fast-isel -filetype=null 2>&1 | FileCheck %s --check-prefixes=ERROR +; RUN: not llc < %s -mtriple=i686-- -global-isel=0 -fast-isel=0 -filetype=null 2>&1 | FileCheck %s --check-prefixes=ERROR +; RUN: not llc < %s -mtriple=i686-- -global-isel -global-isel-abort=1 -filetype=null 2>&1 | FileCheck %s --check-prefixes=ERROR +; RUN: not llc < %s -mtriple=x86_64-- -fast-isel -filetype=null 2>&1 | FileCheck %s --check-prefixes=ERROR +; RUN: not llc < %s -mtriple=x86_64-- -global-isel=0 -fast-isel=0 -filetype=null 2>&1 | FileCheck %s --check-prefixes=ERROR +; RUN: not llc < %s -mtriple=x86_64-- -global-isel -global-isel-abort=1 -filetype=null 2>&1 | FileCheck %s --check-prefixes=ERROR + +; ERROR: error: isel-set-invalid-rounding:3:3: in function foo void (): rounding mode is not supported by X86 hardware + +define void @foo() !dbg !9 { +entry: + tail call void @llvm.set.rounding(i32 99), !dbg !12 + ret void, !dbg !13 +} + +declare void @llvm.set.rounding(i32) + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2, !3, !4, !5, !6, !7} +!llvm.ident = !{!8} + +!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) +!1 = !DIFile(filename: "isel-set-invalid-rounding", directory: "/tmp") +!2 = !{i32 7, !"Dwarf Version", i32 5} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = !{i32 1, !"wchar_size", i32 4} +!5 = !{i32 8, !"PIC Level", i32 2} +!6 = !{i32 7, !"PIE Level", i32 2} +!7 = !{i32 7, !"uwtable", i32 2} +!8 = !{!"clang"} +!9 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 2, type: !10, scopeLine: 2, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, keyInstructions: true) +!10 = !DISubroutineType(types: !11) +!11 = !{null} +!12 = !DILocation(line: 3, column: 3, scope: !9) +!13 = !DILocation(line: 4, column: 1, scope: !9, atomGroup: 1, atomRank: 1) diff --git a/llvm/test/CodeGen/X86/isel-smax.ll b/llvm/test/CodeGen/X86/isel-smax.ll index 9c9a48e3a1b3e..1ce0a8006bb74 100644 --- a/llvm/test/CodeGen/X86/isel-smax.ll +++ b/llvm/test/CodeGen/X86/isel-smax.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: smax_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovgl %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: smax_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovgl %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: smax_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: smax_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: smax_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smax_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jg .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smax_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.smax.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovgl %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smax_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jg .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: smax_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jg .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smax_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smax_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.smax.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovgl %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovgl %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: smax_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smax_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jg .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smax_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.smax.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovgq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovgq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smax_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: jl .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: smax_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: cmpl %eax, %ecx +; DAG-X86-NEXT: movl %esi, %edi +; DAG-X86-NEXT: sbbl %edx, %edi +; DAG-X86-NEXT: jl .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smax_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smax_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: seta %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setg %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.smax.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-smin.ll b/llvm/test/CodeGen/X86/isel-smin.ll index 7349a7c6a06f3..bbed3c356cb3b 100644 --- a/llvm/test/CodeGen/X86/isel-smin.ll +++ b/llvm/test/CodeGen/X86/isel-smin.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: smin_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovll %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: smin_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovll %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: smin_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: smin_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: smin_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smin_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jl .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smin_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.smin.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovll %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smin_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jl .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: smin_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jl .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smin_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smin_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.smin.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovll %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovll %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: smin_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smin_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jl .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smin_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.smin.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovlq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovlq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smin_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %edi -; X86-NEXT: sbbl %esi, %edi -; X86-NEXT: jl .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: smin_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: cmpl %ecx, %eax +; DAG-X86-NEXT: movl %edx, %edi +; DAG-X86-NEXT: sbbl %esi, %edi +; DAG-X86-NEXT: jl .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smin_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smin_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: setb %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setl %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.smin.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-umax.ll b/llvm/test/CodeGen/X86/isel-umax.ll index a90456cdbebb1..990af262065af 100644 --- a/llvm/test/CodeGen/X86/isel-umax.ll +++ b/llvm/test/CodeGen/X86/isel-umax.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: umax_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmoval %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: umax_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmoval %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: umax_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: umax_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: umax_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umax_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: ja .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umax_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.umax.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmoval %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umax_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: ja .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: umax_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: ja .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umax_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umax_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.umax.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmoval %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmoval %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: umax_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umax_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: ja .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umax_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.umax.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovaq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovaq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umax_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: jb .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: umax_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: cmpl %eax, %ecx +; DAG-X86-NEXT: movl %esi, %edi +; DAG-X86-NEXT: sbbl %edx, %edi +; DAG-X86-NEXT: jb .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umax_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umax_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: seta %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: seta %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.umax.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-umin.ll b/llvm/test/CodeGen/X86/isel-umin.ll index 53a0b277e6d7b..1710b9fbfa059 100644 --- a/llvm/test/CodeGen/X86/isel-umin.ll +++ b/llvm/test/CodeGen/X86/isel-umin.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: umin_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovbl %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: umin_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovbl %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: umin_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: umin_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: umin_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umin_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jb .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umin_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.umin.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovbl %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umin_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jb .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: umin_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jb .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umin_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umin_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.umin.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovbl %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovbl %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: umin_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umin_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jb .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umin_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.umin.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovbq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovbq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umin_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %edi -; X86-NEXT: sbbl %esi, %edi -; X86-NEXT: jb .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: umin_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: cmpl %ecx, %eax +; DAG-X86-NEXT: movl %edx, %edi +; DAG-X86-NEXT: sbbl %esi, %edi +; DAG-X86-NEXT: jb .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umin_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umin_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: setb %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setb %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.umin.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isint.ll b/llvm/test/CodeGen/X86/isint.ll index 8a56f49a6c755..8c11fe147f0d8 100644 --- a/llvm/test/CodeGen/X86/isint.ll +++ b/llvm/test/CodeGen/X86/isint.ll @@ -1,29 +1,29 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK64 %s -; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=CHECK32 %s +; RUN: llc < %s -mtriple=x86_64-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X64 %s +; RUN: llc < %s -mtriple=i686-pc-unknown -mattr=+sse2 | FileCheck -check-prefix=X86 %s ; PR19059 define i32 @isint_return(double %d) nounwind { -; CHECK64-LABEL: isint_return: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK64-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK64-NEXT: movq %xmm1, %rax -; CHECK64-NEXT: andl $1, %eax -; CHECK64-NEXT: # kill: def $eax killed $eax killed $rax -; CHECK64-NEXT: retq +; X64-LABEL: isint_return: +; X64: # %bb.0: +; X64-NEXT: cvttpd2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2pd %xmm1, %xmm1 +; X64-NEXT: cmpeqsd %xmm0, %xmm1 +; X64-NEXT: movq %xmm1, %rax +; X64-NEXT: andl $1, %eax +; X64-NEXT: # kill: def $eax killed $eax killed $rax +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_return: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK32-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK32-NEXT: movd %xmm1, %eax -; CHECK32-NEXT: andl $1, %eax -; CHECK32-NEXT: retl +; X86-LABEL: isint_return: +; X86: # %bb.0: +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: cvttpd2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2pd %xmm1, %xmm1 +; X86-NEXT: cmpeqsd %xmm0, %xmm1 +; X86-NEXT: movd %xmm1, %eax +; X86-NEXT: andl $1, %eax +; X86-NEXT: retl %i = fptosi double %d to i32 %e = sitofp i32 %i to double %c = fcmp oeq double %d, %e @@ -32,24 +32,24 @@ define i32 @isint_return(double %d) nounwind { } define i32 @isint_float_return(float %f) nounwind { -; CHECK64-LABEL: isint_float_return: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttps2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2ps %xmm1, %xmm1 -; CHECK64-NEXT: cmpeqss %xmm0, %xmm1 -; CHECK64-NEXT: movd %xmm1, %eax -; CHECK64-NEXT: andl $1, %eax -; CHECK64-NEXT: retq +; X64-LABEL: isint_float_return: +; X64: # %bb.0: +; X64-NEXT: cvttps2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2ps %xmm1, %xmm1 +; X64-NEXT: cmpeqss %xmm0, %xmm1 +; X64-NEXT: movd %xmm1, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_float_return: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK32-NEXT: cvttps2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2ps %xmm1, %xmm1 -; CHECK32-NEXT: cmpeqss %xmm0, %xmm1 -; CHECK32-NEXT: movd %xmm1, %eax -; CHECK32-NEXT: andl $1, %eax -; CHECK32-NEXT: retl +; X86-LABEL: isint_float_return: +; X86: # %bb.0: +; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: cvttps2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2ps %xmm1, %xmm1 +; X86-NEXT: cmpeqss %xmm0, %xmm1 +; X86-NEXT: movd %xmm1, %eax +; X86-NEXT: andl $1, %eax +; X86-NEXT: retl %i = fptosi float %f to i32 %g = sitofp i32 %i to float %c = fcmp oeq float %f, %g @@ -60,32 +60,32 @@ define i32 @isint_float_return(float %f) nounwind { declare void @foo() define void @isint_branch(double %d) nounwind { -; CHECK64-LABEL: isint_branch: -; CHECK64: # %bb.0: -; CHECK64-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK64-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK64-NEXT: ucomisd %xmm1, %xmm0 -; CHECK64-NEXT: jne .LBB2_2 -; CHECK64-NEXT: jp .LBB2_2 -; CHECK64-NEXT: # %bb.1: # %true -; CHECK64-NEXT: pushq %rax -; CHECK64-NEXT: callq foo@PLT -; CHECK64-NEXT: popq %rax -; CHECK64-NEXT: .LBB2_2: # %false -; CHECK64-NEXT: retq +; X64-LABEL: isint_branch: +; X64: # %bb.0: +; X64-NEXT: cvttpd2dq %xmm0, %xmm1 +; X64-NEXT: cvtdq2pd %xmm1, %xmm1 +; X64-NEXT: ucomisd %xmm1, %xmm0 +; X64-NEXT: jne .LBB2_2 +; X64-NEXT: jp .LBB2_2 +; X64-NEXT: # %bb.1: # %true +; X64-NEXT: pushq %rax +; X64-NEXT: callq foo@PLT +; X64-NEXT: popq %rax +; X64-NEXT: .LBB2_2: # %false +; X64-NEXT: retq ; -; CHECK32-LABEL: isint_branch: -; CHECK32: # %bb.0: -; CHECK32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK32-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK32-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK32-NEXT: ucomisd %xmm1, %xmm0 -; CHECK32-NEXT: jne .LBB2_2 -; CHECK32-NEXT: jp .LBB2_2 -; CHECK32-NEXT: # %bb.1: # %true -; CHECK32-NEXT: calll foo@PLT -; CHECK32-NEXT: .LBB2_2: # %false -; CHECK32-NEXT: retl +; X86-LABEL: isint_branch: +; X86: # %bb.0: +; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: cvttpd2dq %xmm0, %xmm1 +; X86-NEXT: cvtdq2pd %xmm1, %xmm1 +; X86-NEXT: ucomisd %xmm1, %xmm0 +; X86-NEXT: jne .LBB2_2 +; X86-NEXT: jp .LBB2_2 +; X86-NEXT: # %bb.1: # %true +; X86-NEXT: calll foo@PLT +; X86-NEXT: .LBB2_2: # %false +; X86-NEXT: retl %i = fptosi double %d to i32 %e = sitofp i32 %i to double %c = fcmp oeq double %d, %e diff --git a/llvm/test/CodeGen/X86/known-signbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll index 473fecc307ed4..57d557dec11b9 100644 --- a/llvm/test/CodeGen/X86/known-signbits-shl.ll +++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll @@ -137,7 +137,7 @@ define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind { ; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm2, %xmm1 ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: paddw %xmm0, %xmm2 +; X64-NEXT: paddw %xmm2, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 ; X64-NEXT: psraw $1, %xmm3 ; X64-NEXT: pcmpeqw %xmm0, %xmm3 diff --git a/llvm/test/CodeGen/X86/lea-16bit.ll b/llvm/test/CodeGen/X86/lea-16bit.ll index cec29ab1da6ab..40da01d9ab8f3 100644 --- a/llvm/test/CodeGen/X86/lea-16bit.ll +++ b/llvm/test/CodeGen/X86/lea-16bit.ll @@ -13,7 +13,8 @@ define i16 @lea16bit(i16 %in) { ; NDD-LABEL: lea16bit: ; NDD: # %bb.0: ; NDD-NEXT: # kill: def $edi killed $edi def $rdi -; NDD-NEXT: leaw 1(%rdi,%rdi), %ax +; NDD-NEXT: leal 1(%rdi,%rdi), %eax +; NDD-NEXT: # kill: def $ax killed $ax killed $eax ; NDD-NEXT: retq %shl = shl i16 %in, 1 %or = or i16 %shl, 1 diff --git a/llvm/test/CodeGen/X86/lea-8bit.ll b/llvm/test/CodeGen/X86/lea-8bit.ll index 98222dfc0407c..fc295f75e23c7 100644 --- a/llvm/test/CodeGen/X86/lea-8bit.ll +++ b/llvm/test/CodeGen/X86/lea-8bit.ll @@ -14,7 +14,8 @@ define i8 @lea8bit(i8 %in) { ; NDD-LABEL: lea8bit: ; NDD: # %bb.0: ; NDD-NEXT: # kill: def $edi killed $edi def $rdi -; NDD-NEXT: leab 1(%rdi,%rdi), %al +; NDD-NEXT: leal 1(%rdi,%rdi), %eax +; NDD-NEXT: # kill: def $al killed $al killed $eax ; NDD-NEXT: retq %shl = shl i8 %in, 1 %or = or i8 %shl, 1 diff --git a/llvm/test/CodeGen/X86/llvm.frexp.ll b/llvm/test/CodeGen/X86/llvm.frexp.ll index 83840dd85c533..e3a1b1b83b2e3 100644 --- a/llvm/test/CodeGen/X86/llvm.frexp.ll +++ b/llvm/test/CodeGen/X86/llvm.frexp.ll @@ -582,6 +582,22 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind { ret i32 %result.0 } +define { float, i32 } @pr160981() { +; X64-LABEL: pr160981: +; X64: # %bb.0: +; X64-NEXT: movss {{.*#+}} xmm0 = [9.9999988E-1,0.0E+0,0.0E+0,0.0E+0] +; X64-NEXT: movl $-126, %eax +; X64-NEXT: retq +; +; WIN32-LABEL: pr160981: +; WIN32: # %bb.0: +; WIN32-NEXT: flds __real@3f7ffffe +; WIN32-NEXT: movl $-126, %eax +; WIN32-NEXT: retl + %ret = call { float, i32 } @llvm.frexp.f32.i32(float bitcast (i32 8388607 to float)) + ret { float, i32 } %ret +} + ; FIXME: Widen vector result ; define { <2 x double>, <2 x i32> } @test_frexp_v2f64_v2i32(<2 x double> %a) nounwind { ; %result = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %a) diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 4e6f666fa05de..4cde581c10508 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -4806,9 +4806,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4830,9 +4829,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4842,10 +4840,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4875,9 +4872,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4899,9 +4895,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4911,10 +4906,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4944,9 +4938,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-KNL-NEXT: kmovw %k1, %k2 ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4972,9 +4965,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-SKX-SMALL-NEXT: kmovw %k1, %k2 ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4986,10 +4978,9 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax -; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm2 ; X64-SKX-LARGE-NEXT: kmovw %k1, %k2 ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} diff --git a/llvm/test/CodeGen/X86/masked_store_trunc.ll b/llvm/test/CodeGen/X86/masked_store_trunc.ll index 2f0d419132492..ecf4fbb603a8f 100644 --- a/llvm/test/CodeGen/X86/masked_store_trunc.ll +++ b/llvm/test/CodeGen/X86/masked_store_trunc.ll @@ -1,11 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=SSE4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefixes=SSE4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512FVL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw | FileCheck %s --check-prefixes=AVX512VL,AVX512BWVL define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { @@ -178,13 +178,13 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v8i64_v8i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512F-NEXT: vpmovqd %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v8i64_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512-NEXT: vpmovqd %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v8i64_v8i32: ; AVX512VL: # %bb.0: @@ -192,14 +192,6 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512VL-NEXT: vpmovqd %zmm0, (%rdi) {%k1} ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v8i64_v8i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 -; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512BW-NEXT: vpmovqd %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <8 x i32> %mask, zeroinitializer %b = trunc <8 x i64> %x to <8 x i32> call void @llvm.masked.store.v8i32.p0(<8 x i32> %b, ptr %p, i32 1, <8 x i1> %a) @@ -573,6 +565,70 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i64_v8i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB1_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB1_3 +; AVX512FVL-NEXT: .LBB1_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB1_5 +; AVX512FVL-NEXT: .LBB1_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB1_7 +; AVX512FVL-NEXT: .LBB1_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB1_9 +; AVX512FVL-NEXT: .LBB1_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB1_11 +; AVX512FVL-NEXT: .LBB1_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB1_13 +; AVX512FVL-NEXT: .LBB1_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB1_15 +; AVX512FVL-NEXT: .LBB1_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB1_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB1_4 +; AVX512FVL-NEXT: .LBB1_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB1_6 +; AVX512FVL-NEXT: .LBB1_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB1_8 +; AVX512FVL-NEXT: .LBB1_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB1_10 +; AVX512FVL-NEXT: .LBB1_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB1_12 +; AVX512FVL-NEXT: .LBB1_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB1_14 +; AVX512FVL-NEXT: .LBB1_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB1_16 +; AVX512FVL-NEXT: .LBB1_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i64_v8i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -960,6 +1016,70 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i64_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovqb %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB2_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB2_3 +; AVX512FVL-NEXT: .LBB2_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB2_5 +; AVX512FVL-NEXT: .LBB2_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB2_7 +; AVX512FVL-NEXT: .LBB2_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB2_9 +; AVX512FVL-NEXT: .LBB2_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB2_11 +; AVX512FVL-NEXT: .LBB2_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB2_13 +; AVX512FVL-NEXT: .LBB2_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB2_15 +; AVX512FVL-NEXT: .LBB2_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB2_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB2_4 +; AVX512FVL-NEXT: .LBB2_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB2_6 +; AVX512FVL-NEXT: .LBB2_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB2_8 +; AVX512FVL-NEXT: .LBB2_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB2_10 +; AVX512FVL-NEXT: .LBB2_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB2_12 +; AVX512FVL-NEXT: .LBB2_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB2_14 +; AVX512FVL-NEXT: .LBB2_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB2_16 +; AVX512FVL-NEXT: .LBB2_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i64_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -1080,17 +1200,17 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v4i64_v4i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $12, %k0, %k0 -; AVX512F-NEXT: kshiftrw $12, %k0, %k1 -; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v4i64_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512-NEXT: kshiftlw $12, %k0, %k0 +; AVX512-NEXT: kshiftrw $12, %k0, %k1 +; AVX512-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v4i64_v4i32: ; AVX512VL: # %bb.0: @@ -1098,18 +1218,6 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512VL-NEXT: vpmovqd %ymm0, (%rdi) {%k1} ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v4i64_v4i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512BW-NEXT: kshiftlw $12, %k0, %k0 -; AVX512BW-NEXT: kshiftrw $12, %k0, %k1 -; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 -; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <4 x i32> %mask, zeroinitializer %b = trunc <4 x i64> %x to <4 x i32> call void @llvm.masked.store.v4i32.p0(<4 x i32> %b, ptr %p, i32 1, <4 x i1> %a) @@ -1321,6 +1429,42 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i64_v4i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovqw %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB4_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB4_3 +; AVX512FVL-NEXT: .LBB4_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB4_5 +; AVX512FVL-NEXT: .LBB4_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB4_7 +; AVX512FVL-NEXT: .LBB4_8: # %else6 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB4_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB4_4 +; AVX512FVL-NEXT: .LBB4_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB4_6 +; AVX512FVL-NEXT: .LBB4_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB4_8 +; AVX512FVL-NEXT: .LBB4_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i64_v4i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -1552,6 +1696,42 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i64_v4i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovqb %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB5_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB5_3 +; AVX512FVL-NEXT: .LBB5_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB5_5 +; AVX512FVL-NEXT: .LBB5_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB5_7 +; AVX512FVL-NEXT: .LBB5_8: # %else6 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB5_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB5_4 +; AVX512FVL-NEXT: .LBB5_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB5_6 +; AVX512FVL-NEXT: .LBB5_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB5_8 +; AVX512FVL-NEXT: .LBB5_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i64_v4i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -1646,33 +1826,22 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi) ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v2i64_v2i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k0 -; AVX512F-NEXT: kshiftrw $14, %k0, %k1 -; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v2i64_v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: vptestmq %zmm1, %zmm1, %k0 +; AVX512-NEXT: kshiftlw $14, %k0, %k0 +; AVX512-NEXT: kshiftrw $14, %k0, %k1 +; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v2i64_v2i32: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vptestmq %xmm1, %xmm1, %k1 ; AVX512VL-NEXT: vpmovqd %xmm0, (%rdi) {%k1} ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v2i64_v2i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: vptestmq %zmm1, %zmm1, %k0 -; AVX512BW-NEXT: kshiftlw $14, %k0, %k0 -; AVX512BW-NEXT: kshiftrw $14, %k0, %k1 -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <2 x i64> %mask, zeroinitializer %b = trunc <2 x i64> %x to <2 x i32> call void @llvm.masked.store.v2i32.p0(<2 x i32> %b, ptr %p, i32 1, <2 x i1> %a) @@ -1777,6 +1946,26 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v2i64_v2i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovqw %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB7_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB7_3 +; AVX512FVL-NEXT: .LBB7_4: # %else2 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB7_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB7_4 +; AVX512FVL-NEXT: .LBB7_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v2i64_v2i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -1896,6 +2085,26 @@ define void @truncstore_v2i64_v2i8(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v2i64_v2i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovqb %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB8_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB8_3 +; AVX512FVL-NEXT: .LBB8_4: # %else2 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB8_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB8_4 +; AVX512FVL-NEXT: .LBB8_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v2i64_v2i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -2581,6 +2790,126 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, ptr %p, <16 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i32_v16i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512FVL-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB9_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB9_3 +; AVX512FVL-NEXT: .LBB9_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB9_5 +; AVX512FVL-NEXT: .LBB9_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB9_7 +; AVX512FVL-NEXT: .LBB9_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB9_9 +; AVX512FVL-NEXT: .LBB9_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB9_11 +; AVX512FVL-NEXT: .LBB9_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB9_13 +; AVX512FVL-NEXT: .LBB9_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB9_16 +; AVX512FVL-NEXT: .LBB9_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: .LBB9_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512FVL-NEXT: jne .LBB9_17 +; AVX512FVL-NEXT: # %bb.18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB9_19 +; AVX512FVL-NEXT: .LBB9_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB9_21 +; AVX512FVL-NEXT: .LBB9_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB9_23 +; AVX512FVL-NEXT: .LBB9_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB9_25 +; AVX512FVL-NEXT: .LBB9_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB9_27 +; AVX512FVL-NEXT: .LBB9_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB9_29 +; AVX512FVL-NEXT: .LBB9_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB9_31 +; AVX512FVL-NEXT: .LBB9_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB9_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB9_4 +; AVX512FVL-NEXT: .LBB9_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB9_6 +; AVX512FVL-NEXT: .LBB9_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB9_8 +; AVX512FVL-NEXT: .LBB9_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB9_10 +; AVX512FVL-NEXT: .LBB9_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB9_12 +; AVX512FVL-NEXT: .LBB9_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB9_14 +; AVX512FVL-NEXT: .LBB9_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB9_15 +; AVX512FVL-NEXT: jmp .LBB9_16 +; AVX512FVL-NEXT: .LBB9_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrw $0, %xmm0, 16(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB9_20 +; AVX512FVL-NEXT: .LBB9_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 18(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB9_22 +; AVX512FVL-NEXT: .LBB9_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 20(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB9_24 +; AVX512FVL-NEXT: .LBB9_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 22(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB9_26 +; AVX512FVL-NEXT: .LBB9_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 24(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB9_28 +; AVX512FVL-NEXT: .LBB9_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 26(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB9_30 +; AVX512FVL-NEXT: .LBB9_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 28(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB9_32 +; AVX512FVL-NEXT: .LBB9_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 30(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i32_v16i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 @@ -3247,6 +3576,126 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, ptr %p, <16 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i32_v16i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512FVL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB10_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB10_3 +; AVX512FVL-NEXT: .LBB10_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB10_5 +; AVX512FVL-NEXT: .LBB10_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB10_7 +; AVX512FVL-NEXT: .LBB10_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB10_9 +; AVX512FVL-NEXT: .LBB10_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB10_11 +; AVX512FVL-NEXT: .LBB10_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB10_13 +; AVX512FVL-NEXT: .LBB10_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB10_15 +; AVX512FVL-NEXT: .LBB10_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB10_17 +; AVX512FVL-NEXT: .LBB10_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB10_19 +; AVX512FVL-NEXT: .LBB10_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB10_21 +; AVX512FVL-NEXT: .LBB10_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB10_23 +; AVX512FVL-NEXT: .LBB10_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB10_25 +; AVX512FVL-NEXT: .LBB10_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB10_27 +; AVX512FVL-NEXT: .LBB10_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB10_29 +; AVX512FVL-NEXT: .LBB10_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB10_31 +; AVX512FVL-NEXT: .LBB10_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB10_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB10_4 +; AVX512FVL-NEXT: .LBB10_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB10_6 +; AVX512FVL-NEXT: .LBB10_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB10_8 +; AVX512FVL-NEXT: .LBB10_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB10_10 +; AVX512FVL-NEXT: .LBB10_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB10_12 +; AVX512FVL-NEXT: .LBB10_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB10_14 +; AVX512FVL-NEXT: .LBB10_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB10_16 +; AVX512FVL-NEXT: .LBB10_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB10_18 +; AVX512FVL-NEXT: .LBB10_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB10_20 +; AVX512FVL-NEXT: .LBB10_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB10_22 +; AVX512FVL-NEXT: .LBB10_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB10_24 +; AVX512FVL-NEXT: .LBB10_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB10_26 +; AVX512FVL-NEXT: .LBB10_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB10_28 +; AVX512FVL-NEXT: .LBB10_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB10_30 +; AVX512FVL-NEXT: .LBB10_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB10_32 +; AVX512FVL-NEXT: .LBB10_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i32_v16i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 @@ -3619,6 +4068,70 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i32_v8i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovdw %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB11_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB11_3 +; AVX512FVL-NEXT: .LBB11_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB11_5 +; AVX512FVL-NEXT: .LBB11_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB11_7 +; AVX512FVL-NEXT: .LBB11_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB11_9 +; AVX512FVL-NEXT: .LBB11_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB11_11 +; AVX512FVL-NEXT: .LBB11_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB11_13 +; AVX512FVL-NEXT: .LBB11_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB11_15 +; AVX512FVL-NEXT: .LBB11_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB11_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB11_4 +; AVX512FVL-NEXT: .LBB11_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB11_6 +; AVX512FVL-NEXT: .LBB11_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB11_8 +; AVX512FVL-NEXT: .LBB11_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB11_10 +; AVX512FVL-NEXT: .LBB11_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB11_12 +; AVX512FVL-NEXT: .LBB11_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB11_14 +; AVX512FVL-NEXT: .LBB11_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB11_16 +; AVX512FVL-NEXT: .LBB11_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i32_v8i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -3996,6 +4509,70 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i32_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovdb %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB12_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB12_3 +; AVX512FVL-NEXT: .LBB12_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB12_5 +; AVX512FVL-NEXT: .LBB12_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB12_7 +; AVX512FVL-NEXT: .LBB12_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB12_9 +; AVX512FVL-NEXT: .LBB12_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB12_11 +; AVX512FVL-NEXT: .LBB12_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB12_13 +; AVX512FVL-NEXT: .LBB12_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB12_15 +; AVX512FVL-NEXT: .LBB12_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB12_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB12_4 +; AVX512FVL-NEXT: .LBB12_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB12_6 +; AVX512FVL-NEXT: .LBB12_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB12_8 +; AVX512FVL-NEXT: .LBB12_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB12_10 +; AVX512FVL-NEXT: .LBB12_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB12_12 +; AVX512FVL-NEXT: .LBB12_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB12_14 +; AVX512FVL-NEXT: .LBB12_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB12_16 +; AVX512FVL-NEXT: .LBB12_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i32_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -4172,6 +4749,40 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i32_v4i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovdw %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB13_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB13_3 +; AVX512FVL-NEXT: .LBB13_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB13_5 +; AVX512FVL-NEXT: .LBB13_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB13_7 +; AVX512FVL-NEXT: .LBB13_8: # %else6 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB13_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB13_4 +; AVX512FVL-NEXT: .LBB13_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB13_6 +; AVX512FVL-NEXT: .LBB13_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB13_8 +; AVX512FVL-NEXT: .LBB13_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i32_v4i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -4346,6 +4957,40 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i32_v4i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovdb %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB14_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB14_3 +; AVX512FVL-NEXT: .LBB14_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB14_5 +; AVX512FVL-NEXT: .LBB14_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB14_7 +; AVX512FVL-NEXT: .LBB14_8: # %else6 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB14_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB14_4 +; AVX512FVL-NEXT: .LBB14_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB14_6 +; AVX512FVL-NEXT: .LBB14_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB14_8 +; AVX512FVL-NEXT: .LBB14_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i32_v4i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -5550,6 +6195,245 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v32i16_v32i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1 +; AVX512FVL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512FVL-NEXT: vpmovdb %zmm2, %xmm2 +; AVX512FVL-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; AVX512FVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512FVL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512FVL-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0 +; AVX512FVL-NEXT: vpmovmskb %ymm1, %eax +; AVX512FVL-NEXT: notl %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB15_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB15_3 +; AVX512FVL-NEXT: .LBB15_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB15_5 +; AVX512FVL-NEXT: .LBB15_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB15_7 +; AVX512FVL-NEXT: .LBB15_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB15_9 +; AVX512FVL-NEXT: .LBB15_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB15_11 +; AVX512FVL-NEXT: .LBB15_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB15_13 +; AVX512FVL-NEXT: .LBB15_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB15_15 +; AVX512FVL-NEXT: .LBB15_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB15_17 +; AVX512FVL-NEXT: .LBB15_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB15_19 +; AVX512FVL-NEXT: .LBB15_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB15_21 +; AVX512FVL-NEXT: .LBB15_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB15_23 +; AVX512FVL-NEXT: .LBB15_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB15_25 +; AVX512FVL-NEXT: .LBB15_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB15_27 +; AVX512FVL-NEXT: .LBB15_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB15_29 +; AVX512FVL-NEXT: .LBB15_30: # %else28 +; AVX512FVL-NEXT: testw %ax, %ax +; AVX512FVL-NEXT: jns .LBB15_32 +; AVX512FVL-NEXT: .LBB15_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: .LBB15_32: # %else30 +; AVX512FVL-NEXT: testl $65536, %eax # imm = 0x10000 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512FVL-NEXT: jne .LBB15_33 +; AVX512FVL-NEXT: # %bb.34: # %else32 +; AVX512FVL-NEXT: testl $131072, %eax # imm = 0x20000 +; AVX512FVL-NEXT: jne .LBB15_35 +; AVX512FVL-NEXT: .LBB15_36: # %else34 +; AVX512FVL-NEXT: testl $262144, %eax # imm = 0x40000 +; AVX512FVL-NEXT: jne .LBB15_37 +; AVX512FVL-NEXT: .LBB15_38: # %else36 +; AVX512FVL-NEXT: testl $524288, %eax # imm = 0x80000 +; AVX512FVL-NEXT: jne .LBB15_39 +; AVX512FVL-NEXT: .LBB15_40: # %else38 +; AVX512FVL-NEXT: testl $1048576, %eax # imm = 0x100000 +; AVX512FVL-NEXT: jne .LBB15_41 +; AVX512FVL-NEXT: .LBB15_42: # %else40 +; AVX512FVL-NEXT: testl $2097152, %eax # imm = 0x200000 +; AVX512FVL-NEXT: jne .LBB15_43 +; AVX512FVL-NEXT: .LBB15_44: # %else42 +; AVX512FVL-NEXT: testl $4194304, %eax # imm = 0x400000 +; AVX512FVL-NEXT: jne .LBB15_45 +; AVX512FVL-NEXT: .LBB15_46: # %else44 +; AVX512FVL-NEXT: testl $8388608, %eax # imm = 0x800000 +; AVX512FVL-NEXT: jne .LBB15_47 +; AVX512FVL-NEXT: .LBB15_48: # %else46 +; AVX512FVL-NEXT: testl $16777216, %eax # imm = 0x1000000 +; AVX512FVL-NEXT: jne .LBB15_49 +; AVX512FVL-NEXT: .LBB15_50: # %else48 +; AVX512FVL-NEXT: testl $33554432, %eax # imm = 0x2000000 +; AVX512FVL-NEXT: jne .LBB15_51 +; AVX512FVL-NEXT: .LBB15_52: # %else50 +; AVX512FVL-NEXT: testl $67108864, %eax # imm = 0x4000000 +; AVX512FVL-NEXT: jne .LBB15_53 +; AVX512FVL-NEXT: .LBB15_54: # %else52 +; AVX512FVL-NEXT: testl $134217728, %eax # imm = 0x8000000 +; AVX512FVL-NEXT: jne .LBB15_55 +; AVX512FVL-NEXT: .LBB15_56: # %else54 +; AVX512FVL-NEXT: testl $268435456, %eax # imm = 0x10000000 +; AVX512FVL-NEXT: jne .LBB15_57 +; AVX512FVL-NEXT: .LBB15_58: # %else56 +; AVX512FVL-NEXT: testl $536870912, %eax # imm = 0x20000000 +; AVX512FVL-NEXT: jne .LBB15_59 +; AVX512FVL-NEXT: .LBB15_60: # %else58 +; AVX512FVL-NEXT: testl $1073741824, %eax # imm = 0x40000000 +; AVX512FVL-NEXT: jne .LBB15_61 +; AVX512FVL-NEXT: .LBB15_62: # %else60 +; AVX512FVL-NEXT: testl $-2147483648, %eax # imm = 0x80000000 +; AVX512FVL-NEXT: jne .LBB15_63 +; AVX512FVL-NEXT: .LBB15_64: # %else62 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB15_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB15_4 +; AVX512FVL-NEXT: .LBB15_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB15_6 +; AVX512FVL-NEXT: .LBB15_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB15_8 +; AVX512FVL-NEXT: .LBB15_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB15_10 +; AVX512FVL-NEXT: .LBB15_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB15_12 +; AVX512FVL-NEXT: .LBB15_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB15_14 +; AVX512FVL-NEXT: .LBB15_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB15_16 +; AVX512FVL-NEXT: .LBB15_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB15_18 +; AVX512FVL-NEXT: .LBB15_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB15_20 +; AVX512FVL-NEXT: .LBB15_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB15_22 +; AVX512FVL-NEXT: .LBB15_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB15_24 +; AVX512FVL-NEXT: .LBB15_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB15_26 +; AVX512FVL-NEXT: .LBB15_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB15_28 +; AVX512FVL-NEXT: .LBB15_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB15_30 +; AVX512FVL-NEXT: .LBB15_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testw %ax, %ax +; AVX512FVL-NEXT: js .LBB15_31 +; AVX512FVL-NEXT: jmp .LBB15_32 +; AVX512FVL-NEXT: .LBB15_33: # %cond.store31 +; AVX512FVL-NEXT: vpextrb $0, %xmm0, 16(%rdi) +; AVX512FVL-NEXT: testl $131072, %eax # imm = 0x20000 +; AVX512FVL-NEXT: je .LBB15_36 +; AVX512FVL-NEXT: .LBB15_35: # %cond.store33 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 17(%rdi) +; AVX512FVL-NEXT: testl $262144, %eax # imm = 0x40000 +; AVX512FVL-NEXT: je .LBB15_38 +; AVX512FVL-NEXT: .LBB15_37: # %cond.store35 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 18(%rdi) +; AVX512FVL-NEXT: testl $524288, %eax # imm = 0x80000 +; AVX512FVL-NEXT: je .LBB15_40 +; AVX512FVL-NEXT: .LBB15_39: # %cond.store37 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 19(%rdi) +; AVX512FVL-NEXT: testl $1048576, %eax # imm = 0x100000 +; AVX512FVL-NEXT: je .LBB15_42 +; AVX512FVL-NEXT: .LBB15_41: # %cond.store39 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 20(%rdi) +; AVX512FVL-NEXT: testl $2097152, %eax # imm = 0x200000 +; AVX512FVL-NEXT: je .LBB15_44 +; AVX512FVL-NEXT: .LBB15_43: # %cond.store41 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 21(%rdi) +; AVX512FVL-NEXT: testl $4194304, %eax # imm = 0x400000 +; AVX512FVL-NEXT: je .LBB15_46 +; AVX512FVL-NEXT: .LBB15_45: # %cond.store43 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 22(%rdi) +; AVX512FVL-NEXT: testl $8388608, %eax # imm = 0x800000 +; AVX512FVL-NEXT: je .LBB15_48 +; AVX512FVL-NEXT: .LBB15_47: # %cond.store45 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 23(%rdi) +; AVX512FVL-NEXT: testl $16777216, %eax # imm = 0x1000000 +; AVX512FVL-NEXT: je .LBB15_50 +; AVX512FVL-NEXT: .LBB15_49: # %cond.store47 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 24(%rdi) +; AVX512FVL-NEXT: testl $33554432, %eax # imm = 0x2000000 +; AVX512FVL-NEXT: je .LBB15_52 +; AVX512FVL-NEXT: .LBB15_51: # %cond.store49 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 25(%rdi) +; AVX512FVL-NEXT: testl $67108864, %eax # imm = 0x4000000 +; AVX512FVL-NEXT: je .LBB15_54 +; AVX512FVL-NEXT: .LBB15_53: # %cond.store51 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 26(%rdi) +; AVX512FVL-NEXT: testl $134217728, %eax # imm = 0x8000000 +; AVX512FVL-NEXT: je .LBB15_56 +; AVX512FVL-NEXT: .LBB15_55: # %cond.store53 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 27(%rdi) +; AVX512FVL-NEXT: testl $268435456, %eax # imm = 0x10000000 +; AVX512FVL-NEXT: je .LBB15_58 +; AVX512FVL-NEXT: .LBB15_57: # %cond.store55 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 28(%rdi) +; AVX512FVL-NEXT: testl $536870912, %eax # imm = 0x20000000 +; AVX512FVL-NEXT: je .LBB15_60 +; AVX512FVL-NEXT: .LBB15_59: # %cond.store57 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 29(%rdi) +; AVX512FVL-NEXT: testl $1073741824, %eax # imm = 0x40000000 +; AVX512FVL-NEXT: je .LBB15_62 +; AVX512FVL-NEXT: .LBB15_61: # %cond.store59 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 30(%rdi) +; AVX512FVL-NEXT: testl $-2147483648, %eax # imm = 0x80000000 +; AVX512FVL-NEXT: je .LBB15_64 +; AVX512FVL-NEXT: .LBB15_63: # %cond.store61 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 31(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v32i16_v32i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -6177,6 +7061,129 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, ptr %p, <16 x i8> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i16_v16i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 +; AVX512FVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512FVL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512FVL-NEXT: vpmovmskb %xmm1, %eax +; AVX512FVL-NEXT: xorl $65535, %eax # imm = 0xFFFF +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB16_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB16_3 +; AVX512FVL-NEXT: .LBB16_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB16_5 +; AVX512FVL-NEXT: .LBB16_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB16_7 +; AVX512FVL-NEXT: .LBB16_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB16_9 +; AVX512FVL-NEXT: .LBB16_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB16_11 +; AVX512FVL-NEXT: .LBB16_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB16_13 +; AVX512FVL-NEXT: .LBB16_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB16_15 +; AVX512FVL-NEXT: .LBB16_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB16_17 +; AVX512FVL-NEXT: .LBB16_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB16_19 +; AVX512FVL-NEXT: .LBB16_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB16_21 +; AVX512FVL-NEXT: .LBB16_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB16_23 +; AVX512FVL-NEXT: .LBB16_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB16_25 +; AVX512FVL-NEXT: .LBB16_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB16_27 +; AVX512FVL-NEXT: .LBB16_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB16_29 +; AVX512FVL-NEXT: .LBB16_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB16_31 +; AVX512FVL-NEXT: .LBB16_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB16_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB16_4 +; AVX512FVL-NEXT: .LBB16_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB16_6 +; AVX512FVL-NEXT: .LBB16_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB16_8 +; AVX512FVL-NEXT: .LBB16_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB16_10 +; AVX512FVL-NEXT: .LBB16_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB16_12 +; AVX512FVL-NEXT: .LBB16_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB16_14 +; AVX512FVL-NEXT: .LBB16_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB16_16 +; AVX512FVL-NEXT: .LBB16_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB16_18 +; AVX512FVL-NEXT: .LBB16_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB16_20 +; AVX512FVL-NEXT: .LBB16_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB16_22 +; AVX512FVL-NEXT: .LBB16_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB16_24 +; AVX512FVL-NEXT: .LBB16_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB16_26 +; AVX512FVL-NEXT: .LBB16_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB16_28 +; AVX512FVL-NEXT: .LBB16_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB16_30 +; AVX512FVL-NEXT: .LBB16_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB16_32 +; AVX512FVL-NEXT: .LBB16_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i16_v16i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -6466,6 +7473,74 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, ptr %p, <8 x i16> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i16_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1 +; AVX512FVL-NEXT: vpternlogq {{.*#+}} xmm1 = ~xmm1 +; AVX512FVL-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB17_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB17_3 +; AVX512FVL-NEXT: .LBB17_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB17_5 +; AVX512FVL-NEXT: .LBB17_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB17_7 +; AVX512FVL-NEXT: .LBB17_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB17_9 +; AVX512FVL-NEXT: .LBB17_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB17_11 +; AVX512FVL-NEXT: .LBB17_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB17_13 +; AVX512FVL-NEXT: .LBB17_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB17_15 +; AVX512FVL-NEXT: .LBB17_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB17_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB17_4 +; AVX512FVL-NEXT: .LBB17_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB17_6 +; AVX512FVL-NEXT: .LBB17_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB17_8 +; AVX512FVL-NEXT: .LBB17_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB17_10 +; AVX512FVL-NEXT: .LBB17_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB17_12 +; AVX512FVL-NEXT: .LBB17_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB17_14 +; AVX512FVL-NEXT: .LBB17_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB17_16 +; AVX512FVL-NEXT: .LBB17_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i16_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll index c950ce64e8883..18d394e1281b4 100644 --- a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll +++ b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll @@ -1,11 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=SSE4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefixes=SSE4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512FVL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw | FileCheck %s --check-prefixes=AVX512VL,AVX512BWVL define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { @@ -340,15 +340,15 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v8i64_v8i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512F-NEXT: vpminsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512F-NEXT: vpmaxsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovqd %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v8i64_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512-NEXT: vpminsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NEXT: vpmaxsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NEXT: vpmovqd %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v8i64_v8i32: ; AVX512VL: # %bb.0: @@ -358,16 +358,6 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512VL-NEXT: vpmovqd %zmm0, (%rdi) {%k1} ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v8i64_v8i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 -; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512BW-NEXT: vpminsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmaxsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovqd %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <8 x i32> %mask, zeroinitializer %b = icmp slt <8 x i64> %x, %c = select <8 x i1> %b, <8 x i64> %x, <8 x i64> @@ -897,6 +887,70 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i64_v8i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovsqw %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB1_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB1_3 +; AVX512FVL-NEXT: .LBB1_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB1_5 +; AVX512FVL-NEXT: .LBB1_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB1_7 +; AVX512FVL-NEXT: .LBB1_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB1_9 +; AVX512FVL-NEXT: .LBB1_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB1_11 +; AVX512FVL-NEXT: .LBB1_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB1_13 +; AVX512FVL-NEXT: .LBB1_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB1_15 +; AVX512FVL-NEXT: .LBB1_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB1_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB1_4 +; AVX512FVL-NEXT: .LBB1_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB1_6 +; AVX512FVL-NEXT: .LBB1_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB1_8 +; AVX512FVL-NEXT: .LBB1_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB1_10 +; AVX512FVL-NEXT: .LBB1_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB1_12 +; AVX512FVL-NEXT: .LBB1_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB1_14 +; AVX512FVL-NEXT: .LBB1_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB1_16 +; AVX512FVL-NEXT: .LBB1_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i64_v8i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -1441,6 +1495,70 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i64_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovsqb %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB2_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB2_3 +; AVX512FVL-NEXT: .LBB2_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB2_5 +; AVX512FVL-NEXT: .LBB2_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB2_7 +; AVX512FVL-NEXT: .LBB2_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB2_9 +; AVX512FVL-NEXT: .LBB2_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB2_11 +; AVX512FVL-NEXT: .LBB2_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB2_13 +; AVX512FVL-NEXT: .LBB2_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB2_15 +; AVX512FVL-NEXT: .LBB2_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB2_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB2_4 +; AVX512FVL-NEXT: .LBB2_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB2_6 +; AVX512FVL-NEXT: .LBB2_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB2_8 +; AVX512FVL-NEXT: .LBB2_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB2_10 +; AVX512FVL-NEXT: .LBB2_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB2_12 +; AVX512FVL-NEXT: .LBB2_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB2_14 +; AVX512FVL-NEXT: .LBB2_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB2_16 +; AVX512FVL-NEXT: .LBB2_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i64_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -1658,17 +1776,17 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v4i64_v4i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $12, %k0, %k0 -; AVX512F-NEXT: kshiftrw $12, %k0, %k1 -; AVX512F-NEXT: vpmovsqd %zmm0, %ymm0 -; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v4i64_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512-NEXT: kshiftlw $12, %k0, %k0 +; AVX512-NEXT: kshiftrw $12, %k0, %k1 +; AVX512-NEXT: vpmovsqd %zmm0, %ymm0 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v4i64_v4i32: ; AVX512VL: # %bb.0: @@ -1678,18 +1796,6 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512VL-NEXT: vpmovqd %ymm0, (%rdi) {%k1} ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v4i64_v4i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512BW-NEXT: kshiftlw $12, %k0, %k0 -; AVX512BW-NEXT: kshiftrw $12, %k0, %k1 -; AVX512BW-NEXT: vpmovsqd %zmm0, %ymm0 -; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <4 x i32> %mask, zeroinitializer %b = icmp slt <4 x i64> %x, %c = select <4 x i1> %b, <4 x i64> %x, <4 x i64> @@ -1984,6 +2090,42 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i64_v4i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovsqw %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB4_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB4_3 +; AVX512FVL-NEXT: .LBB4_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB4_5 +; AVX512FVL-NEXT: .LBB4_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB4_7 +; AVX512FVL-NEXT: .LBB4_8: # %else6 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB4_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB4_4 +; AVX512FVL-NEXT: .LBB4_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB4_6 +; AVX512FVL-NEXT: .LBB4_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB4_8 +; AVX512FVL-NEXT: .LBB4_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i64_v4i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -2302,6 +2444,42 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i64_v4i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovsqb %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB5_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB5_3 +; AVX512FVL-NEXT: .LBB5_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB5_5 +; AVX512FVL-NEXT: .LBB5_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB5_7 +; AVX512FVL-NEXT: .LBB5_8: # %else6 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB5_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB5_4 +; AVX512FVL-NEXT: .LBB5_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB5_6 +; AVX512FVL-NEXT: .LBB5_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB5_8 +; AVX512FVL-NEXT: .LBB5_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i64_v4i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -2451,17 +2629,17 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi) ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v2i64_v2i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k0 -; AVX512F-NEXT: kshiftrw $14, %k0, %k1 -; AVX512F-NEXT: vpmovsqd %zmm0, %ymm0 -; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v2i64_v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512-NEXT: vptestmq %zmm1, %zmm1, %k0 +; AVX512-NEXT: kshiftlw $14, %k0, %k0 +; AVX512-NEXT: kshiftrw $14, %k0, %k1 +; AVX512-NEXT: vpmovsqd %zmm0, %ymm0 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v2i64_v2i32: ; AVX512VL: # %bb.0: @@ -2470,18 +2648,6 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512VL-NEXT: vpmaxsq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 ; AVX512VL-NEXT: vpmovqd %xmm0, (%rdi) {%k1} ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v2i64_v2i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vptestmq %zmm1, %zmm1, %k0 -; AVX512BW-NEXT: kshiftlw $14, %k0, %k0 -; AVX512BW-NEXT: kshiftrw $14, %k0, %k1 -; AVX512BW-NEXT: vpmovsqd %zmm0, %ymm0 -; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <2 x i64> %mask, zeroinitializer %b = icmp slt <2 x i64> %x, %c = select <2 x i1> %b, <2 x i64> %x, <2 x i64> @@ -2631,6 +2797,26 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v2i64_v2i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovsqw %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB7_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB7_3 +; AVX512FVL-NEXT: .LBB7_4: # %else2 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB7_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB7_4 +; AVX512FVL-NEXT: .LBB7_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v2i64_v2i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -2797,6 +2983,26 @@ define void @truncstore_v2i64_v2i8(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v2i64_v2i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovsqb %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB8_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB8_3 +; AVX512FVL-NEXT: .LBB8_4: # %else2 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB8_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB8_4 +; AVX512FVL-NEXT: .LBB8_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v2i64_v2i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -3478,6 +3684,126 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, ptr %p, <16 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i32_v16i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512FVL-NEXT: vpmovsdw %zmm0, %ymm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB9_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB9_3 +; AVX512FVL-NEXT: .LBB9_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB9_5 +; AVX512FVL-NEXT: .LBB9_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB9_7 +; AVX512FVL-NEXT: .LBB9_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB9_9 +; AVX512FVL-NEXT: .LBB9_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB9_11 +; AVX512FVL-NEXT: .LBB9_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB9_13 +; AVX512FVL-NEXT: .LBB9_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB9_16 +; AVX512FVL-NEXT: .LBB9_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: .LBB9_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512FVL-NEXT: jne .LBB9_17 +; AVX512FVL-NEXT: # %bb.18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB9_19 +; AVX512FVL-NEXT: .LBB9_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB9_21 +; AVX512FVL-NEXT: .LBB9_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB9_23 +; AVX512FVL-NEXT: .LBB9_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB9_25 +; AVX512FVL-NEXT: .LBB9_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB9_27 +; AVX512FVL-NEXT: .LBB9_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB9_29 +; AVX512FVL-NEXT: .LBB9_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB9_31 +; AVX512FVL-NEXT: .LBB9_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB9_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB9_4 +; AVX512FVL-NEXT: .LBB9_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB9_6 +; AVX512FVL-NEXT: .LBB9_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB9_8 +; AVX512FVL-NEXT: .LBB9_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB9_10 +; AVX512FVL-NEXT: .LBB9_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB9_12 +; AVX512FVL-NEXT: .LBB9_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB9_14 +; AVX512FVL-NEXT: .LBB9_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB9_15 +; AVX512FVL-NEXT: jmp .LBB9_16 +; AVX512FVL-NEXT: .LBB9_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrw $0, %xmm0, 16(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB9_20 +; AVX512FVL-NEXT: .LBB9_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 18(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB9_22 +; AVX512FVL-NEXT: .LBB9_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 20(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB9_24 +; AVX512FVL-NEXT: .LBB9_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 22(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB9_26 +; AVX512FVL-NEXT: .LBB9_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 24(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB9_28 +; AVX512FVL-NEXT: .LBB9_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 26(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB9_30 +; AVX512FVL-NEXT: .LBB9_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 28(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB9_32 +; AVX512FVL-NEXT: .LBB9_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 30(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i32_v16i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 @@ -4136,6 +4462,126 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, ptr %p, <16 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i32_v16i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512FVL-NEXT: vpmovsdb %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB10_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB10_3 +; AVX512FVL-NEXT: .LBB10_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB10_5 +; AVX512FVL-NEXT: .LBB10_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB10_7 +; AVX512FVL-NEXT: .LBB10_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB10_9 +; AVX512FVL-NEXT: .LBB10_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB10_11 +; AVX512FVL-NEXT: .LBB10_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB10_13 +; AVX512FVL-NEXT: .LBB10_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB10_15 +; AVX512FVL-NEXT: .LBB10_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB10_17 +; AVX512FVL-NEXT: .LBB10_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB10_19 +; AVX512FVL-NEXT: .LBB10_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB10_21 +; AVX512FVL-NEXT: .LBB10_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB10_23 +; AVX512FVL-NEXT: .LBB10_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB10_25 +; AVX512FVL-NEXT: .LBB10_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB10_27 +; AVX512FVL-NEXT: .LBB10_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB10_29 +; AVX512FVL-NEXT: .LBB10_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB10_31 +; AVX512FVL-NEXT: .LBB10_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB10_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB10_4 +; AVX512FVL-NEXT: .LBB10_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB10_6 +; AVX512FVL-NEXT: .LBB10_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB10_8 +; AVX512FVL-NEXT: .LBB10_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB10_10 +; AVX512FVL-NEXT: .LBB10_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB10_12 +; AVX512FVL-NEXT: .LBB10_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB10_14 +; AVX512FVL-NEXT: .LBB10_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB10_16 +; AVX512FVL-NEXT: .LBB10_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB10_18 +; AVX512FVL-NEXT: .LBB10_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB10_20 +; AVX512FVL-NEXT: .LBB10_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB10_22 +; AVX512FVL-NEXT: .LBB10_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB10_24 +; AVX512FVL-NEXT: .LBB10_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB10_26 +; AVX512FVL-NEXT: .LBB10_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB10_28 +; AVX512FVL-NEXT: .LBB10_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB10_30 +; AVX512FVL-NEXT: .LBB10_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB10_32 +; AVX512FVL-NEXT: .LBB10_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i32_v16i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 @@ -4509,6 +4955,70 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i32_v8i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovsdw %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB11_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB11_3 +; AVX512FVL-NEXT: .LBB11_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB11_5 +; AVX512FVL-NEXT: .LBB11_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB11_7 +; AVX512FVL-NEXT: .LBB11_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB11_9 +; AVX512FVL-NEXT: .LBB11_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB11_11 +; AVX512FVL-NEXT: .LBB11_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB11_13 +; AVX512FVL-NEXT: .LBB11_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB11_15 +; AVX512FVL-NEXT: .LBB11_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB11_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB11_4 +; AVX512FVL-NEXT: .LBB11_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB11_6 +; AVX512FVL-NEXT: .LBB11_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB11_8 +; AVX512FVL-NEXT: .LBB11_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB11_10 +; AVX512FVL-NEXT: .LBB11_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB11_12 +; AVX512FVL-NEXT: .LBB11_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB11_14 +; AVX512FVL-NEXT: .LBB11_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB11_16 +; AVX512FVL-NEXT: .LBB11_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i32_v8i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -4883,6 +5393,70 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i32_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovsdb %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB12_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB12_3 +; AVX512FVL-NEXT: .LBB12_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB12_5 +; AVX512FVL-NEXT: .LBB12_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB12_7 +; AVX512FVL-NEXT: .LBB12_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB12_9 +; AVX512FVL-NEXT: .LBB12_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB12_11 +; AVX512FVL-NEXT: .LBB12_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB12_13 +; AVX512FVL-NEXT: .LBB12_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB12_15 +; AVX512FVL-NEXT: .LBB12_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB12_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB12_4 +; AVX512FVL-NEXT: .LBB12_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB12_6 +; AVX512FVL-NEXT: .LBB12_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB12_8 +; AVX512FVL-NEXT: .LBB12_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB12_10 +; AVX512FVL-NEXT: .LBB12_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB12_12 +; AVX512FVL-NEXT: .LBB12_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB12_14 +; AVX512FVL-NEXT: .LBB12_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB12_16 +; AVX512FVL-NEXT: .LBB12_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i32_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -5064,6 +5638,40 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i32_v4i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB13_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB13_3 +; AVX512FVL-NEXT: .LBB13_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB13_5 +; AVX512FVL-NEXT: .LBB13_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB13_7 +; AVX512FVL-NEXT: .LBB13_8: # %else6 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB13_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB13_4 +; AVX512FVL-NEXT: .LBB13_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB13_6 +; AVX512FVL-NEXT: .LBB13_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB13_8 +; AVX512FVL-NEXT: .LBB13_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i32_v4i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -5246,6 +5854,41 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i32_v4i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX512FVL-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB14_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB14_3 +; AVX512FVL-NEXT: .LBB14_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB14_5 +; AVX512FVL-NEXT: .LBB14_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB14_7 +; AVX512FVL-NEXT: .LBB14_8: # %else6 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB14_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB14_4 +; AVX512FVL-NEXT: .LBB14_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB14_6 +; AVX512FVL-NEXT: .LBB14_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB14_8 +; AVX512FVL-NEXT: .LBB14_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i32_v4i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -6440,6 +7083,242 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v32i16_v32i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1 +; AVX512FVL-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512FVL-NEXT: vpacksswb %ymm2, %ymm0, %ymm0 +; AVX512FVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512FVL-NEXT: vpmovmskb %ymm1, %eax +; AVX512FVL-NEXT: notl %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB15_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB15_3 +; AVX512FVL-NEXT: .LBB15_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB15_5 +; AVX512FVL-NEXT: .LBB15_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB15_7 +; AVX512FVL-NEXT: .LBB15_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB15_9 +; AVX512FVL-NEXT: .LBB15_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB15_11 +; AVX512FVL-NEXT: .LBB15_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB15_13 +; AVX512FVL-NEXT: .LBB15_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB15_15 +; AVX512FVL-NEXT: .LBB15_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB15_17 +; AVX512FVL-NEXT: .LBB15_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB15_19 +; AVX512FVL-NEXT: .LBB15_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB15_21 +; AVX512FVL-NEXT: .LBB15_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB15_23 +; AVX512FVL-NEXT: .LBB15_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB15_25 +; AVX512FVL-NEXT: .LBB15_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB15_27 +; AVX512FVL-NEXT: .LBB15_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB15_29 +; AVX512FVL-NEXT: .LBB15_30: # %else28 +; AVX512FVL-NEXT: testw %ax, %ax +; AVX512FVL-NEXT: jns .LBB15_32 +; AVX512FVL-NEXT: .LBB15_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: .LBB15_32: # %else30 +; AVX512FVL-NEXT: testl $65536, %eax # imm = 0x10000 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512FVL-NEXT: jne .LBB15_33 +; AVX512FVL-NEXT: # %bb.34: # %else32 +; AVX512FVL-NEXT: testl $131072, %eax # imm = 0x20000 +; AVX512FVL-NEXT: jne .LBB15_35 +; AVX512FVL-NEXT: .LBB15_36: # %else34 +; AVX512FVL-NEXT: testl $262144, %eax # imm = 0x40000 +; AVX512FVL-NEXT: jne .LBB15_37 +; AVX512FVL-NEXT: .LBB15_38: # %else36 +; AVX512FVL-NEXT: testl $524288, %eax # imm = 0x80000 +; AVX512FVL-NEXT: jne .LBB15_39 +; AVX512FVL-NEXT: .LBB15_40: # %else38 +; AVX512FVL-NEXT: testl $1048576, %eax # imm = 0x100000 +; AVX512FVL-NEXT: jne .LBB15_41 +; AVX512FVL-NEXT: .LBB15_42: # %else40 +; AVX512FVL-NEXT: testl $2097152, %eax # imm = 0x200000 +; AVX512FVL-NEXT: jne .LBB15_43 +; AVX512FVL-NEXT: .LBB15_44: # %else42 +; AVX512FVL-NEXT: testl $4194304, %eax # imm = 0x400000 +; AVX512FVL-NEXT: jne .LBB15_45 +; AVX512FVL-NEXT: .LBB15_46: # %else44 +; AVX512FVL-NEXT: testl $8388608, %eax # imm = 0x800000 +; AVX512FVL-NEXT: jne .LBB15_47 +; AVX512FVL-NEXT: .LBB15_48: # %else46 +; AVX512FVL-NEXT: testl $16777216, %eax # imm = 0x1000000 +; AVX512FVL-NEXT: jne .LBB15_49 +; AVX512FVL-NEXT: .LBB15_50: # %else48 +; AVX512FVL-NEXT: testl $33554432, %eax # imm = 0x2000000 +; AVX512FVL-NEXT: jne .LBB15_51 +; AVX512FVL-NEXT: .LBB15_52: # %else50 +; AVX512FVL-NEXT: testl $67108864, %eax # imm = 0x4000000 +; AVX512FVL-NEXT: jne .LBB15_53 +; AVX512FVL-NEXT: .LBB15_54: # %else52 +; AVX512FVL-NEXT: testl $134217728, %eax # imm = 0x8000000 +; AVX512FVL-NEXT: jne .LBB15_55 +; AVX512FVL-NEXT: .LBB15_56: # %else54 +; AVX512FVL-NEXT: testl $268435456, %eax # imm = 0x10000000 +; AVX512FVL-NEXT: jne .LBB15_57 +; AVX512FVL-NEXT: .LBB15_58: # %else56 +; AVX512FVL-NEXT: testl $536870912, %eax # imm = 0x20000000 +; AVX512FVL-NEXT: jne .LBB15_59 +; AVX512FVL-NEXT: .LBB15_60: # %else58 +; AVX512FVL-NEXT: testl $1073741824, %eax # imm = 0x40000000 +; AVX512FVL-NEXT: jne .LBB15_61 +; AVX512FVL-NEXT: .LBB15_62: # %else60 +; AVX512FVL-NEXT: testl $-2147483648, %eax # imm = 0x80000000 +; AVX512FVL-NEXT: jne .LBB15_63 +; AVX512FVL-NEXT: .LBB15_64: # %else62 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB15_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB15_4 +; AVX512FVL-NEXT: .LBB15_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB15_6 +; AVX512FVL-NEXT: .LBB15_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB15_8 +; AVX512FVL-NEXT: .LBB15_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB15_10 +; AVX512FVL-NEXT: .LBB15_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB15_12 +; AVX512FVL-NEXT: .LBB15_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB15_14 +; AVX512FVL-NEXT: .LBB15_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB15_16 +; AVX512FVL-NEXT: .LBB15_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB15_18 +; AVX512FVL-NEXT: .LBB15_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB15_20 +; AVX512FVL-NEXT: .LBB15_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB15_22 +; AVX512FVL-NEXT: .LBB15_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB15_24 +; AVX512FVL-NEXT: .LBB15_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB15_26 +; AVX512FVL-NEXT: .LBB15_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB15_28 +; AVX512FVL-NEXT: .LBB15_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB15_30 +; AVX512FVL-NEXT: .LBB15_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testw %ax, %ax +; AVX512FVL-NEXT: js .LBB15_31 +; AVX512FVL-NEXT: jmp .LBB15_32 +; AVX512FVL-NEXT: .LBB15_33: # %cond.store31 +; AVX512FVL-NEXT: vpextrb $0, %xmm0, 16(%rdi) +; AVX512FVL-NEXT: testl $131072, %eax # imm = 0x20000 +; AVX512FVL-NEXT: je .LBB15_36 +; AVX512FVL-NEXT: .LBB15_35: # %cond.store33 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 17(%rdi) +; AVX512FVL-NEXT: testl $262144, %eax # imm = 0x40000 +; AVX512FVL-NEXT: je .LBB15_38 +; AVX512FVL-NEXT: .LBB15_37: # %cond.store35 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 18(%rdi) +; AVX512FVL-NEXT: testl $524288, %eax # imm = 0x80000 +; AVX512FVL-NEXT: je .LBB15_40 +; AVX512FVL-NEXT: .LBB15_39: # %cond.store37 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 19(%rdi) +; AVX512FVL-NEXT: testl $1048576, %eax # imm = 0x100000 +; AVX512FVL-NEXT: je .LBB15_42 +; AVX512FVL-NEXT: .LBB15_41: # %cond.store39 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 20(%rdi) +; AVX512FVL-NEXT: testl $2097152, %eax # imm = 0x200000 +; AVX512FVL-NEXT: je .LBB15_44 +; AVX512FVL-NEXT: .LBB15_43: # %cond.store41 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 21(%rdi) +; AVX512FVL-NEXT: testl $4194304, %eax # imm = 0x400000 +; AVX512FVL-NEXT: je .LBB15_46 +; AVX512FVL-NEXT: .LBB15_45: # %cond.store43 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 22(%rdi) +; AVX512FVL-NEXT: testl $8388608, %eax # imm = 0x800000 +; AVX512FVL-NEXT: je .LBB15_48 +; AVX512FVL-NEXT: .LBB15_47: # %cond.store45 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 23(%rdi) +; AVX512FVL-NEXT: testl $16777216, %eax # imm = 0x1000000 +; AVX512FVL-NEXT: je .LBB15_50 +; AVX512FVL-NEXT: .LBB15_49: # %cond.store47 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 24(%rdi) +; AVX512FVL-NEXT: testl $33554432, %eax # imm = 0x2000000 +; AVX512FVL-NEXT: je .LBB15_52 +; AVX512FVL-NEXT: .LBB15_51: # %cond.store49 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 25(%rdi) +; AVX512FVL-NEXT: testl $67108864, %eax # imm = 0x4000000 +; AVX512FVL-NEXT: je .LBB15_54 +; AVX512FVL-NEXT: .LBB15_53: # %cond.store51 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 26(%rdi) +; AVX512FVL-NEXT: testl $134217728, %eax # imm = 0x8000000 +; AVX512FVL-NEXT: je .LBB15_56 +; AVX512FVL-NEXT: .LBB15_55: # %cond.store53 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 27(%rdi) +; AVX512FVL-NEXT: testl $268435456, %eax # imm = 0x10000000 +; AVX512FVL-NEXT: je .LBB15_58 +; AVX512FVL-NEXT: .LBB15_57: # %cond.store55 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 28(%rdi) +; AVX512FVL-NEXT: testl $536870912, %eax # imm = 0x20000000 +; AVX512FVL-NEXT: je .LBB15_60 +; AVX512FVL-NEXT: .LBB15_59: # %cond.store57 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 29(%rdi) +; AVX512FVL-NEXT: testl $1073741824, %eax # imm = 0x40000000 +; AVX512FVL-NEXT: je .LBB15_62 +; AVX512FVL-NEXT: .LBB15_61: # %cond.store59 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 30(%rdi) +; AVX512FVL-NEXT: testl $-2147483648, %eax # imm = 0x80000000 +; AVX512FVL-NEXT: je .LBB15_64 +; AVX512FVL-NEXT: .LBB15_63: # %cond.store61 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 31(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v32i16_v32i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -7067,6 +7946,129 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, ptr %p, <16 x i8> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i16_v16i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512FVL-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 +; AVX512FVL-NEXT: vpmovmskb %xmm1, %eax +; AVX512FVL-NEXT: xorl $65535, %eax # imm = 0xFFFF +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB16_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB16_3 +; AVX512FVL-NEXT: .LBB16_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB16_5 +; AVX512FVL-NEXT: .LBB16_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB16_7 +; AVX512FVL-NEXT: .LBB16_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB16_9 +; AVX512FVL-NEXT: .LBB16_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB16_11 +; AVX512FVL-NEXT: .LBB16_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB16_13 +; AVX512FVL-NEXT: .LBB16_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB16_15 +; AVX512FVL-NEXT: .LBB16_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB16_17 +; AVX512FVL-NEXT: .LBB16_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB16_19 +; AVX512FVL-NEXT: .LBB16_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB16_21 +; AVX512FVL-NEXT: .LBB16_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB16_23 +; AVX512FVL-NEXT: .LBB16_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB16_25 +; AVX512FVL-NEXT: .LBB16_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB16_27 +; AVX512FVL-NEXT: .LBB16_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB16_29 +; AVX512FVL-NEXT: .LBB16_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB16_31 +; AVX512FVL-NEXT: .LBB16_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB16_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB16_4 +; AVX512FVL-NEXT: .LBB16_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB16_6 +; AVX512FVL-NEXT: .LBB16_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB16_8 +; AVX512FVL-NEXT: .LBB16_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB16_10 +; AVX512FVL-NEXT: .LBB16_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB16_12 +; AVX512FVL-NEXT: .LBB16_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB16_14 +; AVX512FVL-NEXT: .LBB16_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB16_16 +; AVX512FVL-NEXT: .LBB16_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB16_18 +; AVX512FVL-NEXT: .LBB16_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB16_20 +; AVX512FVL-NEXT: .LBB16_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB16_22 +; AVX512FVL-NEXT: .LBB16_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB16_24 +; AVX512FVL-NEXT: .LBB16_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB16_26 +; AVX512FVL-NEXT: .LBB16_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB16_28 +; AVX512FVL-NEXT: .LBB16_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB16_30 +; AVX512FVL-NEXT: .LBB16_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB16_32 +; AVX512FVL-NEXT: .LBB16_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i16_v16i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -7361,6 +8363,74 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, ptr %p, <8 x i16> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i16_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1 +; AVX512FVL-NEXT: vpternlogq {{.*#+}} xmm1 = ~xmm1 +; AVX512FVL-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB17_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB17_3 +; AVX512FVL-NEXT: .LBB17_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB17_5 +; AVX512FVL-NEXT: .LBB17_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB17_7 +; AVX512FVL-NEXT: .LBB17_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB17_9 +; AVX512FVL-NEXT: .LBB17_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB17_11 +; AVX512FVL-NEXT: .LBB17_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB17_13 +; AVX512FVL-NEXT: .LBB17_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB17_15 +; AVX512FVL-NEXT: .LBB17_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB17_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB17_4 +; AVX512FVL-NEXT: .LBB17_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB17_6 +; AVX512FVL-NEXT: .LBB17_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB17_8 +; AVX512FVL-NEXT: .LBB17_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB17_10 +; AVX512FVL-NEXT: .LBB17_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB17_12 +; AVX512FVL-NEXT: .LBB17_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB17_14 +; AVX512FVL-NEXT: .LBB17_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB17_16 +; AVX512FVL-NEXT: .LBB17_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i16_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll index da057dd084b36..4c4b6e78d1f8c 100644 --- a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll +++ b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll @@ -1,11 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefix=SSE4 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.2 | FileCheck %s --check-prefixes=SSE4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512FVL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw | FileCheck %s --check-prefixes=AVX512VL,AVX512BWVL define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { @@ -272,14 +272,14 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v8i64_v8i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512F-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovqd %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v8i64_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k1 +; AVX512-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NEXT: vpmovqd %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v8i64_v8i32: ; AVX512VL: # %bb.0: @@ -288,15 +288,6 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512VL-NEXT: vpmovqd %zmm0, (%rdi) {%k1} ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v8i64_v8i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 -; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 -; AVX512BW-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512BW-NEXT: vpmovqd %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <8 x i32> %mask, zeroinitializer %b = icmp ult <8 x i64> %x, %c = select <8 x i1> %b, <8 x i64> %x, <8 x i64> @@ -762,6 +753,70 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i64_v8i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovusqw %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB1_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB1_3 +; AVX512FVL-NEXT: .LBB1_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB1_5 +; AVX512FVL-NEXT: .LBB1_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB1_7 +; AVX512FVL-NEXT: .LBB1_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB1_9 +; AVX512FVL-NEXT: .LBB1_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB1_11 +; AVX512FVL-NEXT: .LBB1_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB1_13 +; AVX512FVL-NEXT: .LBB1_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB1_15 +; AVX512FVL-NEXT: .LBB1_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB1_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB1_4 +; AVX512FVL-NEXT: .LBB1_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB1_6 +; AVX512FVL-NEXT: .LBB1_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB1_8 +; AVX512FVL-NEXT: .LBB1_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB1_10 +; AVX512FVL-NEXT: .LBB1_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB1_12 +; AVX512FVL-NEXT: .LBB1_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB1_14 +; AVX512FVL-NEXT: .LBB1_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB1_16 +; AVX512FVL-NEXT: .LBB1_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i64_v8i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -1236,6 +1291,70 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i64_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovusqb %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB2_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB2_3 +; AVX512FVL-NEXT: .LBB2_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB2_5 +; AVX512FVL-NEXT: .LBB2_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB2_7 +; AVX512FVL-NEXT: .LBB2_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB2_9 +; AVX512FVL-NEXT: .LBB2_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB2_11 +; AVX512FVL-NEXT: .LBB2_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB2_13 +; AVX512FVL-NEXT: .LBB2_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB2_15 +; AVX512FVL-NEXT: .LBB2_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB2_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB2_4 +; AVX512FVL-NEXT: .LBB2_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB2_6 +; AVX512FVL-NEXT: .LBB2_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB2_8 +; AVX512FVL-NEXT: .LBB2_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB2_10 +; AVX512FVL-NEXT: .LBB2_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB2_12 +; AVX512FVL-NEXT: .LBB2_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB2_14 +; AVX512FVL-NEXT: .LBB2_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB2_16 +; AVX512FVL-NEXT: .LBB2_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i64_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -1416,17 +1535,17 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v4i64_v4i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $12, %k0, %k0 -; AVX512F-NEXT: kshiftrw $12, %k0, %k1 -; AVX512F-NEXT: vpmovusqd %zmm0, %ymm0 -; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v4i64_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512-NEXT: kshiftlw $12, %k0, %k0 +; AVX512-NEXT: kshiftrw $12, %k0, %k1 +; AVX512-NEXT: vpmovusqd %zmm0, %ymm0 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v4i64_v4i32: ; AVX512VL: # %bb.0: @@ -1435,18 +1554,6 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512VL-NEXT: vpmovqd %ymm0, (%rdi) {%k1} ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v4i64_v4i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k0 -; AVX512BW-NEXT: kshiftlw $12, %k0, %k0 -; AVX512BW-NEXT: kshiftrw $12, %k0, %k1 -; AVX512BW-NEXT: vpmovusqd %zmm0, %ymm0 -; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <4 x i32> %mask, zeroinitializer %b = icmp ult <4 x i64> %x, %c = select <4 x i1> %b, <4 x i64> %x, <4 x i64> @@ -1710,6 +1817,42 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i64_v4i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovusqw %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB4_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB4_3 +; AVX512FVL-NEXT: .LBB4_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB4_5 +; AVX512FVL-NEXT: .LBB4_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB4_7 +; AVX512FVL-NEXT: .LBB4_8: # %else6 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB4_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB4_4 +; AVX512FVL-NEXT: .LBB4_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB4_6 +; AVX512FVL-NEXT: .LBB4_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB4_8 +; AVX512FVL-NEXT: .LBB4_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i64_v4i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -1994,6 +2137,42 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i64_v4i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovusqb %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB5_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB5_3 +; AVX512FVL-NEXT: .LBB5_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB5_5 +; AVX512FVL-NEXT: .LBB5_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB5_7 +; AVX512FVL-NEXT: .LBB5_8: # %else6 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB5_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB5_4 +; AVX512FVL-NEXT: .LBB5_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB5_6 +; AVX512FVL-NEXT: .LBB5_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB5_8 +; AVX512FVL-NEXT: .LBB5_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i64_v4i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -2113,17 +2292,17 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi) ; AVX2-NEXT: retq ; -; AVX512F-LABEL: truncstore_v2i64_v2i32: -; AVX512F: # %bb.0: -; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k0 -; AVX512F-NEXT: kshiftlw $14, %k0, %k0 -; AVX512F-NEXT: kshiftrw $14, %k0, %k1 -; AVX512F-NEXT: vpmovusqd %zmm0, %ymm0 -; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq +; AVX512-LABEL: truncstore_v2i64_v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; AVX512-NEXT: vptestmq %zmm1, %zmm1, %k0 +; AVX512-NEXT: kshiftlw $14, %k0, %k0 +; AVX512-NEXT: kshiftrw $14, %k0, %k1 +; AVX512-NEXT: vpmovusqd %zmm0, %ymm0 +; AVX512-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq ; ; AVX512VL-LABEL: truncstore_v2i64_v2i32: ; AVX512VL: # %bb.0: @@ -2131,18 +2310,6 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512VL-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 ; AVX512VL-NEXT: vpmovqd %xmm0, (%rdi) {%k1} ; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: truncstore_v2i64_v2i32: -; AVX512BW: # %bb.0: -; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 -; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vptestmq %zmm1, %zmm1, %k0 -; AVX512BW-NEXT: kshiftlw $14, %k0, %k0 -; AVX512BW-NEXT: kshiftrw $14, %k0, %k1 -; AVX512BW-NEXT: vpmovusqd %zmm0, %ymm0 -; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1} -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq %a = icmp ne <2 x i64> %mask, zeroinitializer %b = icmp ult <2 x i64> %x, %c = select <2 x i1> %b, <2 x i64> %x, <2 x i64> @@ -2268,6 +2435,26 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v2i64_v2i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovusqw %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB7_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB7_3 +; AVX512FVL-NEXT: .LBB7_4: # %else2 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB7_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB7_4 +; AVX512FVL-NEXT: .LBB7_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v2i64_v2i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -2408,6 +2595,26 @@ define void @truncstore_v2i64_v2i8(<2 x i64> %x, ptr %p, <2 x i64> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v2i64_v2i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovusqb %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB8_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB8_3 +; AVX512FVL-NEXT: .LBB8_4: # %else2 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB8_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB8_4 +; AVX512FVL-NEXT: .LBB8_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v2i64_v2i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -3119,6 +3326,126 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, ptr %p, <16 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i32_v16i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512FVL-NEXT: vpmovusdw %zmm0, %ymm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB9_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB9_3 +; AVX512FVL-NEXT: .LBB9_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB9_5 +; AVX512FVL-NEXT: .LBB9_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB9_7 +; AVX512FVL-NEXT: .LBB9_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB9_9 +; AVX512FVL-NEXT: .LBB9_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB9_11 +; AVX512FVL-NEXT: .LBB9_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB9_13 +; AVX512FVL-NEXT: .LBB9_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB9_16 +; AVX512FVL-NEXT: .LBB9_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: .LBB9_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512FVL-NEXT: jne .LBB9_17 +; AVX512FVL-NEXT: # %bb.18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB9_19 +; AVX512FVL-NEXT: .LBB9_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB9_21 +; AVX512FVL-NEXT: .LBB9_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB9_23 +; AVX512FVL-NEXT: .LBB9_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB9_25 +; AVX512FVL-NEXT: .LBB9_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB9_27 +; AVX512FVL-NEXT: .LBB9_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB9_29 +; AVX512FVL-NEXT: .LBB9_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB9_31 +; AVX512FVL-NEXT: .LBB9_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB9_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB9_4 +; AVX512FVL-NEXT: .LBB9_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB9_6 +; AVX512FVL-NEXT: .LBB9_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB9_8 +; AVX512FVL-NEXT: .LBB9_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB9_10 +; AVX512FVL-NEXT: .LBB9_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB9_12 +; AVX512FVL-NEXT: .LBB9_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB9_14 +; AVX512FVL-NEXT: .LBB9_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB9_15 +; AVX512FVL-NEXT: jmp .LBB9_16 +; AVX512FVL-NEXT: .LBB9_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrw $0, %xmm0, 16(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB9_20 +; AVX512FVL-NEXT: .LBB9_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 18(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB9_22 +; AVX512FVL-NEXT: .LBB9_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 20(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB9_24 +; AVX512FVL-NEXT: .LBB9_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 22(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB9_26 +; AVX512FVL-NEXT: .LBB9_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 24(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB9_28 +; AVX512FVL-NEXT: .LBB9_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 26(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB9_30 +; AVX512FVL-NEXT: .LBB9_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 28(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB9_32 +; AVX512FVL-NEXT: .LBB9_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 30(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i32_v16i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 @@ -3815,6 +4142,126 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, ptr %p, <16 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i32_v16i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %zmm1, %zmm1, %k0 +; AVX512FVL-NEXT: vpmovusdb %zmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB10_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB10_3 +; AVX512FVL-NEXT: .LBB10_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB10_5 +; AVX512FVL-NEXT: .LBB10_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB10_7 +; AVX512FVL-NEXT: .LBB10_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB10_9 +; AVX512FVL-NEXT: .LBB10_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB10_11 +; AVX512FVL-NEXT: .LBB10_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB10_13 +; AVX512FVL-NEXT: .LBB10_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB10_15 +; AVX512FVL-NEXT: .LBB10_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB10_17 +; AVX512FVL-NEXT: .LBB10_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB10_19 +; AVX512FVL-NEXT: .LBB10_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB10_21 +; AVX512FVL-NEXT: .LBB10_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB10_23 +; AVX512FVL-NEXT: .LBB10_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB10_25 +; AVX512FVL-NEXT: .LBB10_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB10_27 +; AVX512FVL-NEXT: .LBB10_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB10_29 +; AVX512FVL-NEXT: .LBB10_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB10_31 +; AVX512FVL-NEXT: .LBB10_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB10_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB10_4 +; AVX512FVL-NEXT: .LBB10_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB10_6 +; AVX512FVL-NEXT: .LBB10_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB10_8 +; AVX512FVL-NEXT: .LBB10_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB10_10 +; AVX512FVL-NEXT: .LBB10_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB10_12 +; AVX512FVL-NEXT: .LBB10_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB10_14 +; AVX512FVL-NEXT: .LBB10_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB10_16 +; AVX512FVL-NEXT: .LBB10_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB10_18 +; AVX512FVL-NEXT: .LBB10_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB10_20 +; AVX512FVL-NEXT: .LBB10_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB10_22 +; AVX512FVL-NEXT: .LBB10_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB10_24 +; AVX512FVL-NEXT: .LBB10_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB10_26 +; AVX512FVL-NEXT: .LBB10_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB10_28 +; AVX512FVL-NEXT: .LBB10_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB10_30 +; AVX512FVL-NEXT: .LBB10_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB10_32 +; AVX512FVL-NEXT: .LBB10_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i32_v16i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vptestmd %zmm1, %zmm1, %k1 @@ -4211,6 +4658,70 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i32_v8i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovusdw %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB11_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB11_3 +; AVX512FVL-NEXT: .LBB11_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB11_5 +; AVX512FVL-NEXT: .LBB11_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB11_7 +; AVX512FVL-NEXT: .LBB11_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB11_9 +; AVX512FVL-NEXT: .LBB11_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB11_11 +; AVX512FVL-NEXT: .LBB11_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB11_13 +; AVX512FVL-NEXT: .LBB11_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB11_15 +; AVX512FVL-NEXT: .LBB11_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB11_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB11_4 +; AVX512FVL-NEXT: .LBB11_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB11_6 +; AVX512FVL-NEXT: .LBB11_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB11_8 +; AVX512FVL-NEXT: .LBB11_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB11_10 +; AVX512FVL-NEXT: .LBB11_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrw $4, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB11_12 +; AVX512FVL-NEXT: .LBB11_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrw $5, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB11_14 +; AVX512FVL-NEXT: .LBB11_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrw $6, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB11_16 +; AVX512FVL-NEXT: .LBB11_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrw $7, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i32_v8i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -4604,6 +5115,70 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, ptr %p, <8 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i32_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpmovusdb %ymm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB12_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB12_3 +; AVX512FVL-NEXT: .LBB12_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB12_5 +; AVX512FVL-NEXT: .LBB12_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB12_7 +; AVX512FVL-NEXT: .LBB12_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB12_9 +; AVX512FVL-NEXT: .LBB12_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB12_11 +; AVX512FVL-NEXT: .LBB12_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB12_13 +; AVX512FVL-NEXT: .LBB12_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB12_15 +; AVX512FVL-NEXT: .LBB12_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB12_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB12_4 +; AVX512FVL-NEXT: .LBB12_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB12_6 +; AVX512FVL-NEXT: .LBB12_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB12_8 +; AVX512FVL-NEXT: .LBB12_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB12_10 +; AVX512FVL-NEXT: .LBB12_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB12_12 +; AVX512FVL-NEXT: .LBB12_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB12_14 +; AVX512FVL-NEXT: .LBB12_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB12_16 +; AVX512FVL-NEXT: .LBB12_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i32_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -4831,6 +5406,40 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i32_v4i16: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovusdw %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB13_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB13_3 +; AVX512FVL-NEXT: .LBB13_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB13_5 +; AVX512FVL-NEXT: .LBB13_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB13_7 +; AVX512FVL-NEXT: .LBB13_8: # %else6 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB13_1: # %cond.store +; AVX512FVL-NEXT: vpextrw $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB13_4 +; AVX512FVL-NEXT: .LBB13_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrw $1, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB13_6 +; AVX512FVL-NEXT: .LBB13_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrw $2, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB13_8 +; AVX512FVL-NEXT: .LBB13_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrw $3, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i32_v4i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -5059,6 +5668,40 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, ptr %p, <4 x i32> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v4i32_v4i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512FVL-NEXT: vpmovusdb %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB14_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB14_3 +; AVX512FVL-NEXT: .LBB14_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB14_5 +; AVX512FVL-NEXT: .LBB14_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB14_7 +; AVX512FVL-NEXT: .LBB14_8: # %else6 +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB14_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB14_4 +; AVX512FVL-NEXT: .LBB14_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB14_6 +; AVX512FVL-NEXT: .LBB14_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB14_8 +; AVX512FVL-NEXT: .LBB14_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v4i32_v4i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -6277,6 +6920,245 @@ define void @truncstore_v32i16_v32i8(<32 x i16> %x, ptr %p, <32 x i8> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v32i16_v32i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1 +; AVX512FVL-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512FVL-NEXT: vpbroadcastd {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512FVL-NEXT: vpminuw %ymm3, %ymm2, %ymm2 +; AVX512FVL-NEXT: vpminuw %ymm3, %ymm0, %ymm0 +; AVX512FVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 +; AVX512FVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512FVL-NEXT: vpmovmskb %ymm1, %eax +; AVX512FVL-NEXT: notl %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB15_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB15_3 +; AVX512FVL-NEXT: .LBB15_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB15_5 +; AVX512FVL-NEXT: .LBB15_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB15_7 +; AVX512FVL-NEXT: .LBB15_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB15_9 +; AVX512FVL-NEXT: .LBB15_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB15_11 +; AVX512FVL-NEXT: .LBB15_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB15_13 +; AVX512FVL-NEXT: .LBB15_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB15_15 +; AVX512FVL-NEXT: .LBB15_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB15_17 +; AVX512FVL-NEXT: .LBB15_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB15_19 +; AVX512FVL-NEXT: .LBB15_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB15_21 +; AVX512FVL-NEXT: .LBB15_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB15_23 +; AVX512FVL-NEXT: .LBB15_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB15_25 +; AVX512FVL-NEXT: .LBB15_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB15_27 +; AVX512FVL-NEXT: .LBB15_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB15_29 +; AVX512FVL-NEXT: .LBB15_30: # %else28 +; AVX512FVL-NEXT: testw %ax, %ax +; AVX512FVL-NEXT: jns .LBB15_32 +; AVX512FVL-NEXT: .LBB15_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: .LBB15_32: # %else30 +; AVX512FVL-NEXT: testl $65536, %eax # imm = 0x10000 +; AVX512FVL-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512FVL-NEXT: jne .LBB15_33 +; AVX512FVL-NEXT: # %bb.34: # %else32 +; AVX512FVL-NEXT: testl $131072, %eax # imm = 0x20000 +; AVX512FVL-NEXT: jne .LBB15_35 +; AVX512FVL-NEXT: .LBB15_36: # %else34 +; AVX512FVL-NEXT: testl $262144, %eax # imm = 0x40000 +; AVX512FVL-NEXT: jne .LBB15_37 +; AVX512FVL-NEXT: .LBB15_38: # %else36 +; AVX512FVL-NEXT: testl $524288, %eax # imm = 0x80000 +; AVX512FVL-NEXT: jne .LBB15_39 +; AVX512FVL-NEXT: .LBB15_40: # %else38 +; AVX512FVL-NEXT: testl $1048576, %eax # imm = 0x100000 +; AVX512FVL-NEXT: jne .LBB15_41 +; AVX512FVL-NEXT: .LBB15_42: # %else40 +; AVX512FVL-NEXT: testl $2097152, %eax # imm = 0x200000 +; AVX512FVL-NEXT: jne .LBB15_43 +; AVX512FVL-NEXT: .LBB15_44: # %else42 +; AVX512FVL-NEXT: testl $4194304, %eax # imm = 0x400000 +; AVX512FVL-NEXT: jne .LBB15_45 +; AVX512FVL-NEXT: .LBB15_46: # %else44 +; AVX512FVL-NEXT: testl $8388608, %eax # imm = 0x800000 +; AVX512FVL-NEXT: jne .LBB15_47 +; AVX512FVL-NEXT: .LBB15_48: # %else46 +; AVX512FVL-NEXT: testl $16777216, %eax # imm = 0x1000000 +; AVX512FVL-NEXT: jne .LBB15_49 +; AVX512FVL-NEXT: .LBB15_50: # %else48 +; AVX512FVL-NEXT: testl $33554432, %eax # imm = 0x2000000 +; AVX512FVL-NEXT: jne .LBB15_51 +; AVX512FVL-NEXT: .LBB15_52: # %else50 +; AVX512FVL-NEXT: testl $67108864, %eax # imm = 0x4000000 +; AVX512FVL-NEXT: jne .LBB15_53 +; AVX512FVL-NEXT: .LBB15_54: # %else52 +; AVX512FVL-NEXT: testl $134217728, %eax # imm = 0x8000000 +; AVX512FVL-NEXT: jne .LBB15_55 +; AVX512FVL-NEXT: .LBB15_56: # %else54 +; AVX512FVL-NEXT: testl $268435456, %eax # imm = 0x10000000 +; AVX512FVL-NEXT: jne .LBB15_57 +; AVX512FVL-NEXT: .LBB15_58: # %else56 +; AVX512FVL-NEXT: testl $536870912, %eax # imm = 0x20000000 +; AVX512FVL-NEXT: jne .LBB15_59 +; AVX512FVL-NEXT: .LBB15_60: # %else58 +; AVX512FVL-NEXT: testl $1073741824, %eax # imm = 0x40000000 +; AVX512FVL-NEXT: jne .LBB15_61 +; AVX512FVL-NEXT: .LBB15_62: # %else60 +; AVX512FVL-NEXT: testl $-2147483648, %eax # imm = 0x80000000 +; AVX512FVL-NEXT: jne .LBB15_63 +; AVX512FVL-NEXT: .LBB15_64: # %else62 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB15_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB15_4 +; AVX512FVL-NEXT: .LBB15_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB15_6 +; AVX512FVL-NEXT: .LBB15_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB15_8 +; AVX512FVL-NEXT: .LBB15_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB15_10 +; AVX512FVL-NEXT: .LBB15_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB15_12 +; AVX512FVL-NEXT: .LBB15_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB15_14 +; AVX512FVL-NEXT: .LBB15_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB15_16 +; AVX512FVL-NEXT: .LBB15_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB15_18 +; AVX512FVL-NEXT: .LBB15_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB15_20 +; AVX512FVL-NEXT: .LBB15_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB15_22 +; AVX512FVL-NEXT: .LBB15_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB15_24 +; AVX512FVL-NEXT: .LBB15_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB15_26 +; AVX512FVL-NEXT: .LBB15_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB15_28 +; AVX512FVL-NEXT: .LBB15_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB15_30 +; AVX512FVL-NEXT: .LBB15_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testw %ax, %ax +; AVX512FVL-NEXT: js .LBB15_31 +; AVX512FVL-NEXT: jmp .LBB15_32 +; AVX512FVL-NEXT: .LBB15_33: # %cond.store31 +; AVX512FVL-NEXT: vpextrb $0, %xmm0, 16(%rdi) +; AVX512FVL-NEXT: testl $131072, %eax # imm = 0x20000 +; AVX512FVL-NEXT: je .LBB15_36 +; AVX512FVL-NEXT: .LBB15_35: # %cond.store33 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 17(%rdi) +; AVX512FVL-NEXT: testl $262144, %eax # imm = 0x40000 +; AVX512FVL-NEXT: je .LBB15_38 +; AVX512FVL-NEXT: .LBB15_37: # %cond.store35 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 18(%rdi) +; AVX512FVL-NEXT: testl $524288, %eax # imm = 0x80000 +; AVX512FVL-NEXT: je .LBB15_40 +; AVX512FVL-NEXT: .LBB15_39: # %cond.store37 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 19(%rdi) +; AVX512FVL-NEXT: testl $1048576, %eax # imm = 0x100000 +; AVX512FVL-NEXT: je .LBB15_42 +; AVX512FVL-NEXT: .LBB15_41: # %cond.store39 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 20(%rdi) +; AVX512FVL-NEXT: testl $2097152, %eax # imm = 0x200000 +; AVX512FVL-NEXT: je .LBB15_44 +; AVX512FVL-NEXT: .LBB15_43: # %cond.store41 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 21(%rdi) +; AVX512FVL-NEXT: testl $4194304, %eax # imm = 0x400000 +; AVX512FVL-NEXT: je .LBB15_46 +; AVX512FVL-NEXT: .LBB15_45: # %cond.store43 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 22(%rdi) +; AVX512FVL-NEXT: testl $8388608, %eax # imm = 0x800000 +; AVX512FVL-NEXT: je .LBB15_48 +; AVX512FVL-NEXT: .LBB15_47: # %cond.store45 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 23(%rdi) +; AVX512FVL-NEXT: testl $16777216, %eax # imm = 0x1000000 +; AVX512FVL-NEXT: je .LBB15_50 +; AVX512FVL-NEXT: .LBB15_49: # %cond.store47 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 24(%rdi) +; AVX512FVL-NEXT: testl $33554432, %eax # imm = 0x2000000 +; AVX512FVL-NEXT: je .LBB15_52 +; AVX512FVL-NEXT: .LBB15_51: # %cond.store49 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 25(%rdi) +; AVX512FVL-NEXT: testl $67108864, %eax # imm = 0x4000000 +; AVX512FVL-NEXT: je .LBB15_54 +; AVX512FVL-NEXT: .LBB15_53: # %cond.store51 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 26(%rdi) +; AVX512FVL-NEXT: testl $134217728, %eax # imm = 0x8000000 +; AVX512FVL-NEXT: je .LBB15_56 +; AVX512FVL-NEXT: .LBB15_55: # %cond.store53 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 27(%rdi) +; AVX512FVL-NEXT: testl $268435456, %eax # imm = 0x10000000 +; AVX512FVL-NEXT: je .LBB15_58 +; AVX512FVL-NEXT: .LBB15_57: # %cond.store55 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 28(%rdi) +; AVX512FVL-NEXT: testl $536870912, %eax # imm = 0x20000000 +; AVX512FVL-NEXT: je .LBB15_60 +; AVX512FVL-NEXT: .LBB15_59: # %cond.store57 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 29(%rdi) +; AVX512FVL-NEXT: testl $1073741824, %eax # imm = 0x40000000 +; AVX512FVL-NEXT: je .LBB15_62 +; AVX512FVL-NEXT: .LBB15_61: # %cond.store59 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 30(%rdi) +; AVX512FVL-NEXT: testl $-2147483648, %eax # imm = 0x80000000 +; AVX512FVL-NEXT: je .LBB15_64 +; AVX512FVL-NEXT: .LBB15_63: # %cond.store61 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 31(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v32i16_v32i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 @@ -6915,6 +7797,130 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, ptr %p, <16 x i8> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v16i16_v16i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 +; AVX512FVL-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX512FVL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero +; AVX512FVL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512FVL-NEXT: vpmovmskb %xmm1, %eax +; AVX512FVL-NEXT: xorl $65535, %eax # imm = 0xFFFF +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB16_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB16_3 +; AVX512FVL-NEXT: .LBB16_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB16_5 +; AVX512FVL-NEXT: .LBB16_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB16_7 +; AVX512FVL-NEXT: .LBB16_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB16_9 +; AVX512FVL-NEXT: .LBB16_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB16_11 +; AVX512FVL-NEXT: .LBB16_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB16_13 +; AVX512FVL-NEXT: .LBB16_14: # %else12 +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: js .LBB16_15 +; AVX512FVL-NEXT: .LBB16_16: # %else14 +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: jne .LBB16_17 +; AVX512FVL-NEXT: .LBB16_18: # %else16 +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: jne .LBB16_19 +; AVX512FVL-NEXT: .LBB16_20: # %else18 +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: jne .LBB16_21 +; AVX512FVL-NEXT: .LBB16_22: # %else20 +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: jne .LBB16_23 +; AVX512FVL-NEXT: .LBB16_24: # %else22 +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: jne .LBB16_25 +; AVX512FVL-NEXT: .LBB16_26: # %else24 +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: jne .LBB16_27 +; AVX512FVL-NEXT: .LBB16_28: # %else26 +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: jne .LBB16_29 +; AVX512FVL-NEXT: .LBB16_30: # %else28 +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: jne .LBB16_31 +; AVX512FVL-NEXT: .LBB16_32: # %else30 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB16_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB16_4 +; AVX512FVL-NEXT: .LBB16_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB16_6 +; AVX512FVL-NEXT: .LBB16_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB16_8 +; AVX512FVL-NEXT: .LBB16_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB16_10 +; AVX512FVL-NEXT: .LBB16_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB16_12 +; AVX512FVL-NEXT: .LBB16_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB16_14 +; AVX512FVL-NEXT: .LBB16_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb %al, %al +; AVX512FVL-NEXT: jns .LBB16_16 +; AVX512FVL-NEXT: .LBB16_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: testl $256, %eax # imm = 0x100 +; AVX512FVL-NEXT: je .LBB16_18 +; AVX512FVL-NEXT: .LBB16_17: # %cond.store15 +; AVX512FVL-NEXT: vpextrb $8, %xmm0, 8(%rdi) +; AVX512FVL-NEXT: testl $512, %eax # imm = 0x200 +; AVX512FVL-NEXT: je .LBB16_20 +; AVX512FVL-NEXT: .LBB16_19: # %cond.store17 +; AVX512FVL-NEXT: vpextrb $9, %xmm0, 9(%rdi) +; AVX512FVL-NEXT: testl $1024, %eax # imm = 0x400 +; AVX512FVL-NEXT: je .LBB16_22 +; AVX512FVL-NEXT: .LBB16_21: # %cond.store19 +; AVX512FVL-NEXT: vpextrb $10, %xmm0, 10(%rdi) +; AVX512FVL-NEXT: testl $2048, %eax # imm = 0x800 +; AVX512FVL-NEXT: je .LBB16_24 +; AVX512FVL-NEXT: .LBB16_23: # %cond.store21 +; AVX512FVL-NEXT: vpextrb $11, %xmm0, 11(%rdi) +; AVX512FVL-NEXT: testl $4096, %eax # imm = 0x1000 +; AVX512FVL-NEXT: je .LBB16_26 +; AVX512FVL-NEXT: .LBB16_25: # %cond.store23 +; AVX512FVL-NEXT: vpextrb $12, %xmm0, 12(%rdi) +; AVX512FVL-NEXT: testl $8192, %eax # imm = 0x2000 +; AVX512FVL-NEXT: je .LBB16_28 +; AVX512FVL-NEXT: .LBB16_27: # %cond.store25 +; AVX512FVL-NEXT: vpextrb $13, %xmm0, 13(%rdi) +; AVX512FVL-NEXT: testl $16384, %eax # imm = 0x4000 +; AVX512FVL-NEXT: je .LBB16_30 +; AVX512FVL-NEXT: .LBB16_29: # %cond.store27 +; AVX512FVL-NEXT: vpextrb $14, %xmm0, 14(%rdi) +; AVX512FVL-NEXT: testl $32768, %eax # imm = 0x8000 +; AVX512FVL-NEXT: je .LBB16_32 +; AVX512FVL-NEXT: .LBB16_31: # %cond.store29 +; AVX512FVL-NEXT: vpextrb $15, %xmm0, 15(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v16i16_v16i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 @@ -7212,6 +8218,75 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, ptr %p, <8 x i16> %mask) { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512FVL-LABEL: truncstore_v8i16_v8i8: +; AVX512FVL: # %bb.0: +; AVX512FVL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512FVL-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1 +; AVX512FVL-NEXT: vpternlogq {{.*#+}} xmm1 = ~xmm1 +; AVX512FVL-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX512FVL-NEXT: vptestmd %ymm1, %ymm1, %k0 +; AVX512FVL-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX512FVL-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 +; AVX512FVL-NEXT: kmovw %k0, %eax +; AVX512FVL-NEXT: testb $1, %al +; AVX512FVL-NEXT: jne .LBB17_1 +; AVX512FVL-NEXT: # %bb.2: # %else +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: jne .LBB17_3 +; AVX512FVL-NEXT: .LBB17_4: # %else2 +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: jne .LBB17_5 +; AVX512FVL-NEXT: .LBB17_6: # %else4 +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: jne .LBB17_7 +; AVX512FVL-NEXT: .LBB17_8: # %else6 +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: jne .LBB17_9 +; AVX512FVL-NEXT: .LBB17_10: # %else8 +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: jne .LBB17_11 +; AVX512FVL-NEXT: .LBB17_12: # %else10 +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: jne .LBB17_13 +; AVX512FVL-NEXT: .LBB17_14: # %else12 +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: jne .LBB17_15 +; AVX512FVL-NEXT: .LBB17_16: # %else14 +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; AVX512FVL-NEXT: .LBB17_1: # %cond.store +; AVX512FVL-NEXT: vpextrb $0, %xmm0, (%rdi) +; AVX512FVL-NEXT: testb $2, %al +; AVX512FVL-NEXT: je .LBB17_4 +; AVX512FVL-NEXT: .LBB17_3: # %cond.store1 +; AVX512FVL-NEXT: vpextrb $1, %xmm0, 1(%rdi) +; AVX512FVL-NEXT: testb $4, %al +; AVX512FVL-NEXT: je .LBB17_6 +; AVX512FVL-NEXT: .LBB17_5: # %cond.store3 +; AVX512FVL-NEXT: vpextrb $2, %xmm0, 2(%rdi) +; AVX512FVL-NEXT: testb $8, %al +; AVX512FVL-NEXT: je .LBB17_8 +; AVX512FVL-NEXT: .LBB17_7: # %cond.store5 +; AVX512FVL-NEXT: vpextrb $3, %xmm0, 3(%rdi) +; AVX512FVL-NEXT: testb $16, %al +; AVX512FVL-NEXT: je .LBB17_10 +; AVX512FVL-NEXT: .LBB17_9: # %cond.store7 +; AVX512FVL-NEXT: vpextrb $4, %xmm0, 4(%rdi) +; AVX512FVL-NEXT: testb $32, %al +; AVX512FVL-NEXT: je .LBB17_12 +; AVX512FVL-NEXT: .LBB17_11: # %cond.store9 +; AVX512FVL-NEXT: vpextrb $5, %xmm0, 5(%rdi) +; AVX512FVL-NEXT: testb $64, %al +; AVX512FVL-NEXT: je .LBB17_14 +; AVX512FVL-NEXT: .LBB17_13: # %cond.store11 +; AVX512FVL-NEXT: vpextrb $6, %xmm0, 6(%rdi) +; AVX512FVL-NEXT: testb $-128, %al +; AVX512FVL-NEXT: je .LBB17_16 +; AVX512FVL-NEXT: .LBB17_15: # %cond.store13 +; AVX512FVL-NEXT: vpextrb $7, %xmm0, 7(%rdi) +; AVX512FVL-NEXT: vzeroupper +; AVX512FVL-NEXT: retq +; ; AVX512BW-LABEL: truncstore_v8i16_v8i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll index f24507d3a4f38..4836da2ad7797 100644 --- a/llvm/test/CodeGen/X86/negative-sin.ll +++ b/llvm/test/CodeGen/X86/negative-sin.ll @@ -82,18 +82,13 @@ define double @semi_strict2(double %e) nounwind { ret double %h } -; FIXME: -; Auto-upgrade function attribute to IR-level fast-math-flags. - -define double @fn_attr(double %e) nounwind #0 { -; CHECK-LABEL: fn_attr: +define double @nsz_flag(double %e) nounwind { +; CHECK-LABEL: nsz_flag: ; CHECK: # %bb.0: ; CHECK-NEXT: jmp sin@PLT # TAILCALL - %f = fsub double 0.0, %e - %g = call double @sin(double %f) readonly - %h = fsub double 0.0, %g + %f = fsub nsz double 0.0, %e + %g = call nsz double @sin(double %f) readonly + %h = fsub nsz double 0.0, %g ret double %h } -attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" } - diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll index f53983036a016..5df1867f73c8e 100644 --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) { define void @PR42833() { ; SSE2-LABEL: PR42833: ; SSE2: # %bb.0: +; SSE2-NEXT: movl b(%rip), %eax ; SSE2-NEXT: movdqa c+144(%rip), %xmm2 ; SSE2-NEXT: movdqa c+128(%rip), %xmm0 -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: addl b(%rip), %eax +; SSE2-NEXT: addl c+128(%rip), %eax ; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: paddd %xmm0, %xmm3 @@ -166,7 +166,7 @@ define void @PR42833() { ; SSE2-NEXT: psubd %xmm2, %xmm4 ; SSE2-NEXT: paddd %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm5 +; SSE2-NEXT: paddd %xmm5, %xmm5 ; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3] ; SSE2-NEXT: movdqa %xmm2, c+144(%rip) ; SSE2-NEXT: movaps %xmm5, c+128(%rip) @@ -191,17 +191,17 @@ define void @PR42833() { ; ; SSE42-LABEL: PR42833: ; SSE42: # %bb.0: +; SSE42-NEXT: movl b(%rip), %eax ; SSE42-NEXT: movdqa c+144(%rip), %xmm1 ; SSE42-NEXT: movdqa c+128(%rip), %xmm0 -; SSE42-NEXT: movd %xmm0, %eax -; SSE42-NEXT: addl b(%rip), %eax +; SSE42-NEXT: addl c+128(%rip), %eax ; SSE42-NEXT: movd %eax, %xmm2 ; SSE42-NEXT: paddd %xmm0, %xmm2 ; SSE42-NEXT: movdqa d+144(%rip), %xmm3 ; SSE42-NEXT: psubd %xmm1, %xmm3 ; SSE42-NEXT: paddd %xmm1, %xmm1 ; SSE42-NEXT: movdqa %xmm0, %xmm4 -; SSE42-NEXT: paddd %xmm0, %xmm4 +; SSE42-NEXT: paddd %xmm4, %xmm4 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, c+144(%rip) ; SSE42-NEXT: movdqa %xmm4, c+128(%rip) diff --git a/llvm/test/CodeGen/X86/pr159723.ll b/llvm/test/CodeGen/X86/pr159723.ll index cab4abb043639..c66b101fff990 100644 --- a/llvm/test/CodeGen/X86/pr159723.ll +++ b/llvm/test/CodeGen/X86/pr159723.ll @@ -17,7 +17,7 @@ define <8 x i1> @test_cmp_v8half_ogt(<8 x half> %rhs, <8 x i1> %mask) nounwind { ; CHECK-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill ; CHECK-NEXT: callq test_call_8@PLT ; CHECK-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload -; CHECK-NEXT: vcmpltph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 {%k1} # 16-byte Folded Reload +; CHECK-NEXT: vcmpgtph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 {%k1} # 16-byte Folded Reload ; CHECK-NEXT: vpmovm2w %k0, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: retq @@ -79,7 +79,7 @@ define <16 x i1> @test_cmp_v16half_olt_commute(<16 x half> %rhs, <16 x i1> %mask ; CHECK-NEXT: kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill ; CHECK-NEXT: callq test_call_16@PLT ; CHECK-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload -; CHECK-NEXT: vcmpltph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 {%k1} # 32-byte Folded Reload +; CHECK-NEXT: vcmpgtph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 {%k1} # 32-byte Folded Reload ; CHECK-NEXT: vpmovm2b %k0, %xmm0 ; CHECK-NEXT: addq $56, %rsp ; CHECK-NEXT: vzeroupper @@ -100,7 +100,7 @@ define <32 x i1> @test_cmp_v32half_oge(<32 x half> %rhs, <32 x i1> %mask) nounwi ; CHECK-NEXT: kmovd %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; CHECK-NEXT: callq test_call_32@PLT ; CHECK-NEXT: kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload -; CHECK-NEXT: vcmpleph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 {%k1} # 64-byte Folded Reload +; CHECK-NEXT: vcmpgeph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 {%k1} # 64-byte Folded Reload ; CHECK-NEXT: vpmovm2b %k0, %ymm0 ; CHECK-NEXT: addq $88, %rsp ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll index ce03f8fad4a19..161e9651a9cf2 100644 --- a/llvm/test/CodeGen/X86/pr62286.ll +++ b/llvm/test/CodeGen/X86/pr62286.ll @@ -26,27 +26,33 @@ define i64 @PR62286(i32 %a) { ; AVX1-LABEL: PR62286: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 -; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 -; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR62286: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd %edi, %xmm0 -; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm1 -; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 @@ -59,12 +65,12 @@ define i64 @PR62286(i32 %a) { ; AVX512-LABEL: PR62286: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovd %edi, %xmm0 -; AVX512-NEXT: movb $8, %al +; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: movw $4369, %ax # imm = 0x1111 ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z} -; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/pr74736.ll b/llvm/test/CodeGen/X86/pr74736.ll index ceccee00c9457..58955265580bd 100644 --- a/llvm/test/CodeGen/X86/pr74736.ll +++ b/llvm/test/CodeGen/X86/pr74736.ll @@ -6,8 +6,8 @@ define void @main(<16 x i32> %0, i32 %1) { ; SSE-LABEL: main: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movd %edi, %xmm4 -; SSE-NEXT: movss {{.*#+}} xmm0 = [1,0,0,0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[1,0] +; SSE-NEXT: movsd {{.*#+}} xmm0 = [0,1,0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0] ; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm3 @@ -32,20 +32,20 @@ define void @main(<16 x i32> %0, i32 %1) { ; AVX-LABEL: main: ; AVX: # %bb.0: # %entry ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3] ; AVX-NEXT: movl $1, %eax ; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 ; AVX-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] -; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm1 -; AVX-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,1,3,3,5,5,7] -; AVX-NEXT: vpermd %ymm0, %ymm2, %ymm2 +; AVX-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: vpaddd %ymm2, %ymm2, %ymm2 +; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm3 ; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7] -; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7] +; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7] ; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] -; AVX-NEXT: vpxor %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,1,3,3,5,5,7] +; AVX-NEXT: vpermd %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vpxor %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] diff --git a/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir b/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir index b99c5fc8df0cb..44a2aecdc3672 100644 --- a/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir +++ b/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir @@ -165,5 +165,25 @@ body: | bb.3: $rax = COPY %t3 RET 0, $rax - ... +--- +name: rematerialize_superregister_into_subregister_def_with_impdef_physreg +body: | + bb.0.entry: + ; CHECK-LABEL: name: rematerialize_superregister_into_subregister_def_with_impdef_physreg + ; CHECK: dead $esi = MOV32r0 implicit-def dead $eflags, implicit-def $rsi + ; CHECK-NEXT: dead $edx = MOV32r0 implicit-def dead $eflags, implicit-def $rdx + ; CHECK-NEXT: FAKE_USE implicit killed $rsi, implicit killed $rdx + ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def dead $rax, implicit-def $al + ; CHECK-NEXT: FAKE_USE implicit killed $al + ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags + ; CHECK-NEXT: RET 0, $eax + undef %1.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def %1 + $rsi = COPY %1 + $rdx = COPY %1 + FAKE_USE implicit killed $rsi, implicit killed $rdx + %4:gr8 = COPY killed %1.sub_8bit + $al = COPY killed %4 + FAKE_USE implicit killed $al + $eax = MOV32r0 implicit-def dead $eflags + RET 0, killed $eax diff --git a/llvm/test/CodeGen/X86/sbb.ll b/llvm/test/CodeGen/X86/sbb.ll index 78d609d3a17e6..f5a34688d67b5 100644 --- a/llvm/test/CodeGen/X86/sbb.ll +++ b/llvm/test/CodeGen/X86/sbb.ll @@ -365,3 +365,32 @@ define i32 @uge_sext_add(i32 %0, i32 %1, i32 %2) { %6 = add nsw i32 %5, %0 ret i32 %6 } + +define i32 @sub_sub_ugt(i32 %a, i32 %b) { +; CHECK-LABEL: sub_sub_ugt: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: cmpl %edi, %esi +; CHECK-NEXT: sbbl %esi, %eax +; CHECK-NEXT: retq + %cmp = icmp ugt i32 %a, %b + %conv = zext i1 %cmp to i32 + %sub = sub i32 %a, %b + %res = sub i32 %sub, %conv + ret i32 %res +} + +define i32 @sub_sub_ult(i32 %a, i32 %b) { +; CHECK-LABEL: sub_sub_ult: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: cmpl %edi, %esi +; CHECK-NEXT: sbbl %esi, %eax +; CHECK-NEXT: retq + %cmp = icmp ult i32 %b, %a + %conv = zext i1 %cmp to i32 + %sub = sub i32 %a, %b + %res = sub i32 %sub, %conv + ret i32 %res +} + diff --git a/llvm/test/CodeGen/X86/setoeq.ll b/llvm/test/CodeGen/X86/setoeq.ll index f0addf4b64599..131e279aa645c 100644 --- a/llvm/test/CodeGen/X86/setoeq.ll +++ b/llvm/test/CodeGen/X86/setoeq.ll @@ -1,40 +1,532 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s - -define zeroext i8 @t(double %x) nounwind readnone { -; CHECK-LABEL: t: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK-NEXT: cmpeqsd %xmm0, %xmm1 -; CHECK-NEXT: movd %xmm1, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: retl -entry: - %0 = fptosi double %x to i32 ; [#uses=1] - %1 = sitofp i32 %0 to double ; [#uses=1] - %2 = fcmp oeq double %1, %x ; [#uses=1] - %retval12 = zext i1 %2 to i8 ; [#uses=1] - ret i8 %retval12 -} - -define zeroext i8 @u(double %x) nounwind readnone { -; CHECK-LABEL: u: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; CHECK-NEXT: cvttpd2dq %xmm0, %xmm1 -; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1 -; CHECK-NEXT: cmpneqsd %xmm0, %xmm1 -; CHECK-NEXT: movd %xmm1, %eax -; CHECK-NEXT: andl $1, %eax -; CHECK-NEXT: # kill: def $al killed $al killed $eax -; CHECK-NEXT: retl -entry: - %0 = fptosi double %x to i32 ; [#uses=1] - %1 = sitofp i32 %0 to double ; [#uses=1] - %2 = fcmp une double %1, %x ; [#uses=1] - %retval12 = zext i1 %2 to i8 ; [#uses=1] +; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX +; RUN: llc < %s -mtriple=i686-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512 + +define zeroext i8 @oeq_f64_i32(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_i32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttpd2dq %xmm0, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_i32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_i32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i32 + %1 = sitofp i32 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_u32(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_u32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cvttsd2si %xmm1, %edx +; SSE-NEXT: andl %ecx, %edx +; SSE-NEXT: orl %eax, %edx +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_u32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; AVX-NEXT: vcvttsd2si %xmm1, %edx +; AVX-NEXT: andl %ecx, %edx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_u32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i32 + %1 = uitofp i32 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_i64(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_i64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $32, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: orl $3072, %eax # imm = 0xC00 +; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp) +; SSE-NEXT: fildll {{[0-9]+}}(%esp) +; SSE-NEXT: fstpl {{[0-9]+}}(%esp) +; SSE-NEXT: cmpeqsd {{[0-9]+}}(%esp), %xmm0 +; SSE-NEXT: movd %xmm0, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_i64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $24, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd %xmm0, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp) +; AVX-NEXT: fildll {{[0-9]+}}(%esp) +; AVX-NEXT: fstpl {{[0-9]+}}(%esp) +; AVX-NEXT: vcmpeqsd {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_i64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1 +; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i64 + %1 = sitofp i64 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @oeq_f64_u64(double %x) nounwind readnone { +; SSE-LABEL: oeq_f64_u64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $16, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; SSE-NEXT: ucomisd %xmm0, %xmm1 +; SSE-NEXT: jbe .LBB3_2 +; SSE-NEXT: # %bb.1: # %entry +; SSE-NEXT: xorpd %xmm1, %xmm1 +; SSE-NEXT: .LBB3_2: # %entry +; SSE-NEXT: movapd %xmm0, %xmm2 +; SSE-NEXT: subsd %xmm1, %xmm2 +; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp) +; SSE-NEXT: setbe %al +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: shll $31, %eax +; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] +; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: addsd %xmm2, %xmm1 +; SSE-NEXT: cmpeqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: oeq_f64_u64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $8, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; AVX-NEXT: vucomisd %xmm0, %xmm1 +; AVX-NEXT: jbe .LBB3_2 +; AVX-NEXT: # %bb.1: # %entry +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: .LBB3_2: # %entry +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmovsd %xmm1, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: setbe %al +; AVX-NEXT: movzbl %al, %eax +; AVX-NEXT: shll $31, %eax +; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vcmpeqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: oeq_f64_u64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1 +; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpeqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i64 + %1 = uitofp i64 %0 to double + %2 = fcmp oeq double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_i32(double %x) nounwind readnone { +; SSE-LABEL: une_f64_i32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttpd2dq %xmm0, %xmm1 +; SSE-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_i32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_i32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2dq %xmm0, %xmm1 +; AVX512-NEXT: vcvtdq2pd %xmm1, %xmm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i32 + %1 = sitofp i32 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_u32(double %x) nounwind readnone { +; SSE-LABEL: une_f64_u32: +; SSE: # %bb.0: # %entry +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: cvttsd2si %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: movapd %xmm0, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cvttsd2si %xmm1, %edx +; SSE-NEXT: andl %ecx, %edx +; SSE-NEXT: orl %eax, %edx +; SSE-NEXT: movd %edx, %xmm1 +; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_u32: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vcvttsd2si %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 +; AVX-NEXT: vcvttsd2si %xmm1, %edx +; AVX-NEXT: andl %ecx, %edx +; AVX-NEXT: orl %eax, %edx +; AVX-NEXT: vmovd %edx, %xmm1 +; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_u32: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttsd2usi %xmm0, %eax +; AVX512-NEXT: vcvtusi2sd %eax, %xmm7, %xmm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i32 + %1 = uitofp i32 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_i64(double %x) nounwind readnone { +; SSE-LABEL: une_f64_i64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $32, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: orl $3072, %eax # imm = 0xC00 +; SSE-NEXT: movw %ax, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlps %xmm1, {{[0-9]+}}(%esp) +; SSE-NEXT: fildll {{[0-9]+}}(%esp) +; SSE-NEXT: fstpl {{[0-9]+}}(%esp) +; SSE-NEXT: cmpneqsd {{[0-9]+}}(%esp), %xmm0 +; SSE-NEXT: movd %xmm0, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_i64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $24, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd %xmm0, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp) +; AVX-NEXT: fildll {{[0-9]+}}(%esp) +; AVX-NEXT: fstpl {{[0-9]+}}(%esp) +; AVX-NEXT: vcmpneqsd {{[0-9]+}}(%esp), %xmm0, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_i64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm1 +; AVX512-NEXT: vcvtqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptosi double %x to i64 + %1 = sitofp i64 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 + ret i8 %retval12 +} + +define zeroext i8 @une_f64_u64(double %x) nounwind readnone { +; SSE-LABEL: une_f64_u64: +; SSE: # %bb.0: # %entry +; SSE-NEXT: pushl %ebp +; SSE-NEXT: movl %esp, %ebp +; SSE-NEXT: andl $-8, %esp +; SSE-NEXT: subl $16, %esp +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; SSE-NEXT: ucomisd %xmm0, %xmm1 +; SSE-NEXT: jbe .LBB7_2 +; SSE-NEXT: # %bb.1: # %entry +; SSE-NEXT: xorpd %xmm1, %xmm1 +; SSE-NEXT: .LBB7_2: # %entry +; SSE-NEXT: movapd %xmm0, %xmm2 +; SSE-NEXT: subsd %xmm1, %xmm2 +; SSE-NEXT: movsd %xmm2, {{[0-9]+}}(%esp) +; SSE-NEXT: setbe %al +; SSE-NEXT: fldl {{[0-9]+}}(%esp) +; SSE-NEXT: fnstcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; SSE-NEXT: orl $3072, %ecx # imm = 0xC00 +; SSE-NEXT: movw %cx, {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: fistpll {{[0-9]+}}(%esp) +; SSE-NEXT: fldcw {{[0-9]+}}(%esp) +; SSE-NEXT: movzbl %al, %eax +; SSE-NEXT: shll $31, %eax +; SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] +; SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2 +; SSE-NEXT: movapd %xmm2, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: addsd %xmm2, %xmm1 +; SSE-NEXT: cmpneqsd %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, %eax +; SSE-NEXT: andl $1, %eax +; SSE-NEXT: # kill: def $al killed $al killed $eax +; SSE-NEXT: movl %ebp, %esp +; SSE-NEXT: popl %ebp +; SSE-NEXT: retl +; +; AVX-LABEL: une_f64_u64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushl %ebp +; AVX-NEXT: movl %esp, %ebp +; AVX-NEXT: andl $-8, %esp +; AVX-NEXT: subl $8, %esp +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0] +; AVX-NEXT: vucomisd %xmm0, %xmm1 +; AVX-NEXT: jbe .LBB7_2 +; AVX-NEXT: # %bb.1: # %entry +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: .LBB7_2: # %entry +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmovsd %xmm1, (%esp) +; AVX-NEXT: fldl (%esp) +; AVX-NEXT: fisttpll (%esp) +; AVX-NEXT: setbe %al +; AVX-NEXT: movzbl %al, %eax +; AVX-NEXT: shll $31, %eax +; AVX-NEXT: xorl {{[0-9]+}}(%esp), %eax +; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; AVX-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vaddsd %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vcmpneqsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: andl $1, %eax +; AVX-NEXT: # kill: def $al killed $al killed $eax +; AVX-NEXT: movl %ebp, %esp +; AVX-NEXT: popl %ebp +; AVX-NEXT: retl +; +; AVX512-LABEL: une_f64_u64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm1 +; AVX512-NEXT: vcvtuqq2pd %ymm1, %ymm1 +; AVX512-NEXT: vcmpneqsd %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retl +entry: + %0 = fptoui double %x to i64 + %1 = uitofp i64 %0 to double + %2 = fcmp une double %1, %x + %retval12 = zext i1 %2 to i8 ret i8 %retval12 } diff --git a/llvm/test/CodeGen/X86/shift-i128.ll b/llvm/test/CodeGen/X86/shift-i128.ll index 7462c77482827..049ee47af9681 100644 --- a/llvm/test/CodeGen/X86/shift-i128.ll +++ b/llvm/test/CodeGen/X86/shift-i128.ll @@ -613,8 +613,7 @@ define void @test_shl_v2i128(<2 x i128> %x, <2 x i128> %a, ptr nocapture %r) nou ; i686-NEXT: shldl %cl, %esi, %ebx ; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload ; i686-NEXT: movl %edi, %esi -; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload -; i686-NEXT: movl %eax, %ecx +; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; i686-NEXT: shll %cl, %esi ; i686-NEXT: shldl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; i686-NEXT: negl %edx diff --git a/llvm/test/CodeGen/X86/shift-i512.ll b/llvm/test/CodeGen/X86/shift-i512.ll index 756019d0e98a0..03b61d9235254 100644 --- a/llvm/test/CodeGen/X86/shift-i512.ll +++ b/llvm/test/CodeGen/X86/shift-i512.ll @@ -10,7 +10,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2] ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3 +; AVX512VL-NEXT: vpaddq %xmm0, %xmm0, %xmm3 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4 ; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2 @@ -34,7 +34,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 -; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4 +; AVX512VBMI-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] @@ -51,7 +51,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] -; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4 +; ZNVER4-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 ; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2 diff --git a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll index 82c460fc55938..571915b47d297 100644 --- a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll +++ b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FAST-ALL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FAST-PERLANE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW,AVX512BW-FAST-ALL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW,AVX512BW-FAST-PERLANE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-ALL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-PERLANE @@ -21,6 +21,31 @@ define void @shuffle_v64i8_to_v32i8_1(ptr %L, ptr %S) nounwind { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512VL-FAST-ALL-LABEL: shuffle_v64i8_to_v32i8_1: +; AVX512VL-FAST-ALL: # %bb.0: +; AVX512VL-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VL-FAST-ALL-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512VL-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31] +; AVX512VL-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] +; AVX512VL-FAST-ALL-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,2,5,7] +; AVX512VL-FAST-ALL-NEXT: vpermi2q %ymm1, %ymm0, %ymm2 +; AVX512VL-FAST-ALL-NEXT: vmovdqa %ymm2, (%rsi) +; AVX512VL-FAST-ALL-NEXT: vzeroupper +; AVX512VL-FAST-ALL-NEXT: retq +; +; AVX512VL-FAST-PERLANE-LABEL: shuffle_v64i8_to_v32i8_1: +; AVX512VL-FAST-PERLANE: # %bb.0: +; AVX512VL-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VL-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512VL-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15] +; AVX512VL-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm1 +; AVX512VL-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm0 +; AVX512VL-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX512VL-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VL-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rsi) +; AVX512VL-FAST-PERLANE-NEXT: vzeroupper +; AVX512VL-FAST-PERLANE-NEXT: retq +; ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8_1: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $8, (%rdi), %zmm0 @@ -63,6 +88,40 @@ define void @shuffle_v16i32_to_v8i32_1(ptr %L, ptr %S) nounwind { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512VL-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512VL-FAST-ALL: # %bb.0: +; AVX512VL-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15] +; AVX512VL-FAST-ALL-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; AVX512VL-FAST-ALL-NEXT: vmovaps %ymm0, (%rsi) +; AVX512VL-FAST-ALL-NEXT: vzeroupper +; AVX512VL-FAST-ALL-NEXT: retq +; +; AVX512VL-FAST-PERLANE-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512VL-FAST-PERLANE: # %bb.0: +; AVX512VL-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 +; AVX512VL-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],mem[1,3],ymm0[5,7],mem[5,7] +; AVX512VL-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VL-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rsi) +; AVX512VL-FAST-PERLANE-NEXT: vzeroupper +; AVX512VL-FAST-PERLANE-NEXT: retq +; +; AVX512BW-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512BW-FAST-ALL: # %bb.0: +; AVX512BW-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15] +; AVX512BW-FAST-ALL-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; AVX512BW-FAST-ALL-NEXT: vmovaps %ymm0, (%rsi) +; AVX512BW-FAST-ALL-NEXT: vzeroupper +; AVX512BW-FAST-ALL-NEXT: retq +; +; AVX512BW-FAST-PERLANE-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512BW-FAST-PERLANE: # %bb.0: +; AVX512BW-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],mem[1,3],ymm0[5,7],mem[5,7] +; AVX512BW-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512BW-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rsi) +; AVX512BW-FAST-PERLANE-NEXT: vzeroupper +; AVX512BW-FAST-PERLANE-NEXT: retq +; ; AVX512BWVL-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: ; AVX512BWVL-FAST-ALL: # %bb.0: ; AVX512BWVL-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15] diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll index 3f48b22e2b9ff..a48be037ebebc 100644 --- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -5791,20 +5791,20 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi16: ; SSE: # %bb.0: -; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01] +; SSE-NEXT: psllw $2, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi16: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <8 x i16> - %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1) + %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 2) %bc = bitcast <8 x i16> %res to <2 x i64> ret <2 x i64> %bc } @@ -5813,20 +5813,20 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi32: ; SSE: # %bb.0: -; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01] +; SSE-NEXT: pslld $2, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX1-NEXT: vpslld $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX512-NEXT: vpslld $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <4 x i32> - %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1) + %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 2) %bc = bitcast <4 x i32> %res to <2 x i64> ret <2 x i64> %bc } @@ -5835,19 +5835,19 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi64: ; SSE: # %bb.0: -; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01] +; SSE-NEXT: psllq $2, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX1-NEXT: vpsllq $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX512-NEXT: vpsllq $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1) + %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 2) ret <2 x i64> %res } declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll b/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll index fd988f7d318fe..a49d3a552f556 100644 --- a/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll +++ b/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll @@ -1,20 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avxvnniint8 < %s | FileCheck %s -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -24,11 +24,11 @@ define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -38,11 +38,11 @@ define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -52,11 +52,11 @@ define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32 ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssd_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -66,11 +66,11 @@ define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1, ; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -80,11 +80,11 @@ define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> % ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -94,11 +94,11 @@ define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -108,11 +108,11 @@ define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbssds_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -122,11 +122,11 @@ define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <8 x i32> %a1 ; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -136,11 +136,11 @@ define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a ; CHECK-NEXT: vpdpbsud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -151,11 +151,11 @@ define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbsud %xmm1, %xmm2, %xmm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -165,11 +165,11 @@ define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32 ; CHECK-NEXT: vpdpbsud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsud_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -180,11 +180,11 @@ define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, ; CHECK-NEXT: vpdpbsud %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -194,11 +194,11 @@ define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> % ; CHECK-NEXT: vpdpbsuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -209,11 +209,11 @@ define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbsuds %xmm1, %xmm2, %xmm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -223,11 +223,11 @@ define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 ; CHECK-NEXT: vpdpbsuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbsuds_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -238,11 +238,11 @@ define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1 ; CHECK-NEXT: vpdpbsuds %ymm1, %ymm2, %ymm0 ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -252,11 +252,11 @@ define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -266,11 +266,11 @@ define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -280,11 +280,11 @@ define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32 ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuud_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -294,11 +294,11 @@ define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, ; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -308,11 +308,11 @@ define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> % ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) ret <4 x i32> %2 } -define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { +define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill @@ -322,11 +322,11 @@ define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1) ret <4 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds_256: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -336,11 +336,11 @@ define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3 ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) ret <8 x i32> %2 } -define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { +define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) { ; CHECK-LABEL: stack_fold_vpdpbuuds_256_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill @@ -350,6 +350,6 @@ define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1 ; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload ; CHECK-NEXT: retq %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1) ret <8 x i32> %2 } diff --git a/llvm/test/CodeGen/X86/usub_inc_iv.ll b/llvm/test/CodeGen/X86/usub_inc_iv.ll index 88bfddb51f2d4..ff06aaabd1b0c 100644 --- a/llvm/test/CodeGen/X86/usub_inc_iv.ll +++ b/llvm/test/CodeGen/X86/usub_inc_iv.ll @@ -303,14 +303,14 @@ define i32 @test_06(ptr %p, i64 %len, i32 %x) { ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[MATH:%.*]], [[BACKEDGE:%.*]] ], [ [[LEN:%.*]], [[ENTRY:%.*]] ] -; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[IV]], i64 1) -; CHECK-NEXT: [[MATH]] = extractvalue { i64, i1 } [[TMP0]], 0 -; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[OV:%.*]] = icmp eq i64 [[IV]], 0 ; CHECK-NEXT: br i1 [[OV]], label [[EXIT:%.*]], label [[BACKEDGE]] ; CHECK: backedge: -; CHECK-NEXT: [[SUNKADDR:%.*]] = mul i64 [[MATH]], 4 +; CHECK-NEXT: [[SUNKADDR:%.*]] = mul i64 [[IV]], 4 ; CHECK-NEXT: [[SUNKADDR1:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[SUNKADDR]] -; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SUNKADDR1]] unordered, align 4 +; CHECK-NEXT: [[SUNKADDR2:%.*]] = getelementptr i8, ptr [[SUNKADDR1]], i64 -4 +; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, ptr [[SUNKADDR2]] unordered, align 4 +; CHECK-NEXT: [[MATH]] = add i64 [[IV]], -1 ; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]] ; CHECK-NEXT: br i1 [[COND_2]], label [[FAILURE:%.*]], label [[LOOP]] ; CHECK: exit: diff --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll index 71e659c681d17..219e32c86c848 100644 --- a/llvm/test/CodeGen/X86/vec_shift6.ll +++ b/llvm/test/CodeGen/X86/vec_shift6.ll @@ -28,14 +28,14 @@ define <8 x i16> @test2(<8 x i16> %a) { ; SSE2-LABEL: test2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: paddw %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test2: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddw %xmm0, %xmm1 +; SSE41-NEXT: paddw %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; @@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %a) { ; SSE2-LABEL: test3: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: pslld $2, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -81,14 +81,14 @@ define <4 x i32> @test4(<4 x i32> %a) { ; SSE2-LABEL: test4: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test4: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddd %xmm0, %xmm1 +; SSE41-NEXT: paddd %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll index 23d22e75d1e9d..3f92d2b79c85d 100644 --- a/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll +++ b/llvm/test/CodeGen/X86/vec_unsafe-fp-math.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -enable-unsafe-fp-math -enable-no-signed-zeros-fp-math -mtriple=x86_64-unknown-unknown | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s ; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math. @@ -18,7 +18,7 @@ define <4 x float> @vec_fneg(<4 x float> %x) { ; CHECK: # %bb.0: ; CHECK-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; CHECK-NEXT: retq - %sub = fsub <4 x float> zeroinitializer, %x + %sub = fsub nsz <4 x float> zeroinitializer, %x ret <4 x float> %sub } diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll index 0b98a9388adc1..445e572aff403 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll @@ -679,6 +679,19 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) ; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VLBW-NEXT: retq ; +; AVX512VLVBMI2-LABEL: var_funnnel_v32i8: +; AVX512VLVBMI2: # %bb.0: +; AVX512VLVBMI2-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512VLVBMI2-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,64,1,65,2,66,3,67,4,68,5,69,6,70,7,71,8,72,9,73,10,74,11,75,12,76,13,77,14,78,15,79,16,80,17,81,18,82,19,83,20,84,21,85,22,86,23,87,24,88,25,89,26,90,27,91,28,92,29,93,30,94,31,95] +; AVX512VLVBMI2-NEXT: vpermi2b %zmm0, %zmm1, %zmm3 +; AVX512VLVBMI2-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm0 +; AVX512VLVBMI2-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512VLVBMI2-NEXT: vpsllvw %zmm0, %zmm3, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512VLVBMI2-NEXT: retq +; ; XOPAVX1-LABEL: var_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 @@ -1918,6 +1931,17 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VLBW-NEXT: retq ; +; AVX512VLVBMI2-LABEL: constant_funnnel_v32i8: +; AVX512VLVBMI2: # %bb.0: +; AVX512VLVBMI2-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512VLVBMI2-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,64,1,65,2,66,3,67,4,68,5,69,6,70,7,71,8,72,9,73,10,74,11,75,12,76,13,77,14,78,15,79,16,80,17,81,18,82,19,83,20,84,21,85,22,86,23,87,24,88,25,89,26,90,27,91,28,92,29,93,30,94,31,95] +; AVX512VLVBMI2-NEXT: vpermi2b %zmm0, %zmm1, %zmm2 +; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512VLVBMI2-NEXT: retq +; ; XOPAVX1-LABEL: constant_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll index 5c485592295d3..b4cffcd171b33 100644 --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -122,91 +122,87 @@ define <64 x ptr> @AGEP9(ptr %param, <64 x i32> %off) nounwind { ; CHECK-NEXT: movl %esp, %ebp ; CHECK-NEXT: andl $-32, %esp ; CHECK-NEXT: subl $160, %esp -; CHECK-NEXT: vmovdqa %ymm2, %ymm5 -; CHECK-NEXT: vmovdqa %ymm1, %ymm3 -; CHECK-NEXT: vmovdqa %ymm0, %ymm1 -; CHECK-NEXT: vmovdqa 72(%ebp), %ymm0 -; CHECK-NEXT: vmovdqa 40(%ebp), %ymm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm4 -; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm7 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 -; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 +; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 168(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, (%esp) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm2 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 -; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm7, %xmm1 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm6 -; CHECK-NEXT: vpaddd %xmm6, %xmm7, %xmm6 -; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm7, %xmm3 -; CHECK-NEXT: vmovdqa %ymm5, %ymm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm5 -; CHECK-NEXT: vpaddd %xmm5, %xmm7, %xmm5 -; CHECK-NEXT: vextractf128 $1, %ymm4, %xmm4 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 72(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill +; CHECK-NEXT: vmovdqa 88(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2 +; CHECK-NEXT: vmovdqa 104(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm1 +; CHECK-NEXT: vmovdqa 120(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa 136(%ebp), %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6 +; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7 +; CHECK-NEXT: vmovdqa 168(%ebp), %xmm4 ; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 ; CHECK-NEXT: movl 8(%ebp), %eax -; CHECK-NEXT: vmovdqa %xmm4, 80(%eax) -; CHECK-NEXT: vmovdqa %xmm5, 64(%eax) -; CHECK-NEXT: vmovdqa %xmm3, 48(%eax) -; CHECK-NEXT: vmovdqa %xmm6, 32(%eax) -; CHECK-NEXT: vmovdqa %xmm1, 16(%eax) -; CHECK-NEXT: vmovdqa %xmm0, (%eax) -; CHECK-NEXT: vmovdqa %xmm2, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm3, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm4, 224(%eax) +; CHECK-NEXT: vmovdqa %xmm7, 208(%eax) +; CHECK-NEXT: vmovdqa %xmm6, 192(%eax) +; CHECK-NEXT: vmovdqa %xmm0, 176(%eax) +; CHECK-NEXT: vmovdqa %xmm1, 160(%eax) +; CHECK-NEXT: vmovdqa %xmm2, 144(%eax) ; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 224(%eax) +; CHECK-NEXT: vmovaps %xmm0, 128(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 208(%eax) +; CHECK-NEXT: vmovaps %xmm0, 112(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 192(%eax) +; CHECK-NEXT: vmovaps %xmm0, 96(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 176(%eax) +; CHECK-NEXT: vmovaps %xmm0, 80(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 160(%eax) +; CHECK-NEXT: vmovaps %xmm0, 64(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 144(%eax) +; CHECK-NEXT: vmovaps %xmm0, 48(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 128(%eax) +; CHECK-NEXT: vmovaps %xmm0, 32(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 112(%eax) +; CHECK-NEXT: vmovaps %xmm0, 16(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 96(%eax) +; CHECK-NEXT: vmovaps %xmm0, (%eax) ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp ; CHECK-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 13f7d68ccb893..33d80f63dbcc8 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -652,7 +652,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psllw $1, %xmm2 +; SSE2-NEXT: paddw %xmm2, %xmm2 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -678,7 +678,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psllw $1, %xmm2 +; SSE41-NEXT: paddw %xmm2, %xmm2 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: psrlw $2, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -701,7 +701,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -720,7 +720,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX2NOBW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX2NOBW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -739,7 +739,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX512BW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 1a5c3730c1839..e43108fe7d784 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -590,7 +590,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpsllw $1, %xmm3, %xmm5 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 ; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 @@ -609,7 +609,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpsllw $1, %xmm2, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2 @@ -633,7 +633,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX2NOBW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 @@ -651,7 +651,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX512BW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index 9c56894f0c59c..bf98bcca59c04 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -485,7 +485,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5 ; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 -; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm5 +; AVX512F-NEXT: vpaddw %ymm3, %ymm3, %ymm5 ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 @@ -504,7 +504,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 ; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpsllw $1, %ymm2, %ymm3 +; AVX512F-NEXT: vpaddw %ymm2, %ymm2, %ymm3 ; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 ; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2 @@ -528,7 +528,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1 -; AVX512BW-NEXT: vpsllw $1, %zmm1, %zmm2 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 13b21a747878b..6e1bf25908302 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -821,10 +821,10 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X86-SSE-NEXT: andl $-16, %esp ; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: movdqa %xmm1, %xmm3 -; X86-SSE-NEXT: paddw %xmm1, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm3, %xmm1 ; X86-SSE-NEXT: movdqa %xmm0, %xmm3 -; X86-SSE-NEXT: paddw %xmm0, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm2, %xmm0 ; X86-SSE-NEXT: paddw %xmm3, %xmm0 ; X86-SSE-NEXT: paddw 8(%ebp), %xmm1 @@ -835,9 +835,9 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X64-SSE-LABEL: madd_v16i16_3: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movdqa %xmm1, %xmm4 -; X64-SSE-NEXT: paddw %xmm1, %xmm4 +; X64-SSE-NEXT: paddw %xmm4, %xmm4 ; X64-SSE-NEXT: movdqa %xmm0, %xmm5 -; X64-SSE-NEXT: paddw %xmm0, %xmm5 +; X64-SSE-NEXT: paddw %xmm5, %xmm5 ; X64-SSE-NEXT: paddw %xmm2, %xmm0 ; X64-SSE-NEXT: paddw %xmm5, %xmm0 ; X64-SSE-NEXT: paddw %xmm3, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll index 227e000c6be7f..ab1feba98b008 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll @@ -907,7 +907,7 @@ define i1 @mask_v8i32_2(<8 x i32> %a0) { ; SSE2-LABEL: mask_v8i32_2: ; SSE2: # %bb.0: ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: pslld $1, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax ; SSE2-NEXT: sete %al diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 2b1cf5b671e53..99dac74d8127b 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -927,7 +927,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddq %xmm0, %xmm1 +; SSE2-NEXT: paddq %xmm1, %xmm1 ; SSE2-NEXT: psllq $7, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -975,7 +975,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v2i64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE-NEXT: paddq %xmm0, %xmm1 +; X86-SSE-NEXT: paddq %xmm1, %xmm1 ; X86-SSE-NEXT: psllq $7, %xmm0 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X86-SSE-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll index 4378ee604459e..89cc7a638fa01 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll @@ -1051,28 +1051,11 @@ define <16 x i8> @shuffle_v16i8_01_03_05_07_09_11_13_15_17_19_21_23_25_27_29_31( ; PR159670 define <16 x i8> @shuffle_v16i8_00_24_01_25_02_26_03_27_04_28_05_29_06_30_07_31(<16 x i8> %a, <16 x i8> %b) { -; SSE2-LABEL: shuffle_v16i8_00_24_01_25_02_26_03_27_04_28_05_29_06_30_07_31: -; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm2, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: retq -; -; SSSE3-LABEL: shuffle_v16i8_00_24_01_25_02_26_03_27_04_28_05_29_06_30_07_31: -; SSSE3: # %bb.0: -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSSE3-NEXT: retq -; -; SSE41-LABEL: shuffle_v16i8_00_24_01_25_02_26_03_27_04_28_05_29_06_30_07_31: -; SSE41: # %bb.0: -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE41-NEXT: retq +; SSE-LABEL: shuffle_v16i8_00_24_01_25_02_26_03_27_04_28_05_29_06_30_07_31: +; SSE: # %bb.0: +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE-NEXT: retq ; ; AVX-LABEL: shuffle_v16i8_00_24_01_25_02_26_03_27_04_28_05_29_06_30_07_31: ; AVX: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll index 2df013d0ff3e3..3279a50a1265b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -370,6 +370,16 @@ define <8 x float> @constant_fold_vpermilvar_ps_256() { ret <8 x float> %1 } +define <8 x float> @freeze_vpermilvar_ps_256(<8 x float> %a0) { +; CHECK-LABEL: freeze_vpermilvar_ps_256: +; CHECK: # %bb.0: +; CHECK-NEXT: ret{{[l|q]}} + %s0 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> ) + %f0 = freeze <8 x float> %s0 + %s1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %f0, <8 x i32> ) + ret <8 x float> %s1 +} + define void @PR39483() { ; X86-AVX1-LABEL: PR39483: ; X86-AVX1: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index 298858a8fcc73..56c0b164b63d6 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -933,6 +933,16 @@ entry: ret i32 %tmp6 } +define <8 x float> @freeze_permps(<8 x float> %a0) { +; CHECK-LABEL: freeze_permps: +; CHECK: # %bb.0: +; CHECK-NEXT: ret{{[l|q]}} + %s0 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> ) + %f0 = freeze <8 x float> %s0 + %s1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %f0, <8 x i32> ) + ret <8 x float> %s1 +} + define <32 x i8> @PR27320(<8 x i32> %a0) { ; CHECK-LABEL: PR27320: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll index d3e4906450e43..3590c4d027be7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll @@ -7,6 +7,7 @@ ; Combine tests involving SSE41 target shuffles (BLEND,INSERTPS,MOVZX) declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) +declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) { ; SSE-LABEL: combine_vpshufb_as_movzx: @@ -58,6 +59,22 @@ define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) ret <4 x i32> %r } +define <4 x float> @freeze_insertps(<4 x float> %a0, <4 x float> %a1) { +; SSE-LABEL: freeze_insertps: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: freeze_insertps: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %s0 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 16) + %f0 = freeze <4 x float> %s0 + %s1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a1, <4 x float> %f0, i8 64) + ret <4 x float> %s1 +} + define <16 x i8> @PR50049(ptr %p1, ptr %p2) { ; SSE-LABEL: PR50049: ; SSE: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll index bd2710139d584..0e20b1813040a 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll @@ -896,6 +896,16 @@ define i32 @mask_z1z3_v16i8(<16 x i8> %a0) { ret i32 %4 } +define <16 x i8> @freeze_pshufb_v16i8(<16 x i8> %a0) { +; CHECK-LABEL: freeze_pshufb_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %s0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> ) + %f0 = freeze <16 x i8> %s0 + %s1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %f0, <16 x i8> ) + ret <16 x i8> %s1 +} + define i32 @PR22415(double %a0) { ; SSE-LABEL: PR22415: ; SSE: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 5b61de5a3b772..ee9d8a55aeb3e 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3550,14 +3550,14 @@ define <8 x i16> @PR141475(i32 %in) { ; SSE-LABEL: PR141475: ; SSE: # %bb.0: ; SSE-NEXT: movd %edi, %xmm0 -; SSE-NEXT: pslld $1, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: retq ; ; AVX-LABEL: PR141475: ; AVX: # %bb.0: ; AVX-NEXT: vmovd %edi, %xmm0 -; AVX-NEXT: vpslld $1, %xmm0, %xmm0 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX-NEXT: retq %mul = shl i32 %in, 1 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-sse4a.ll b/llvm/test/CodeGen/X86/vector-shuffle-sse4a.ll index b8db14c026bf8..3592ed8a84cb2 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-sse4a.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-sse4a.ll @@ -362,11 +362,9 @@ define <8 x i16> @shuf_089uuuuu(<8 x i16> %a0, <8 x i16> %a1) { define <16 x i8> @shuffle_8_18_uuuuuuuuuuuuuu(<16 x i8> %a, <16 x i8> %b) { ; AMD10H-LABEL: shuffle_8_18_uuuuuuuuuuuuuu: ; AMD10H: # %bb.0: -; AMD10H-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] -; AMD10H-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AMD10H-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; AMD10H-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7] -; AMD10H-NEXT: packuswb %xmm0, %xmm0 +; AMD10H-NEXT: psrld $16, %xmm1 +; AMD10H-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; AMD10H-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; AMD10H-NEXT: retq ; ; BTVER1-LABEL: shuffle_8_18_uuuuuuuuuuuuuu: diff --git a/llvm/test/CodeGen/X86/vector-trunc-usat.ll b/llvm/test/CodeGen/X86/vector-trunc-usat.ll index a5d83a86f295e..0806e4960e48a 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-usat.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-usat.ll @@ -10,7 +10,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefixes=SKX diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll index 54dc107fd0c10..3b93734c24deb 100644 --- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll +++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll @@ -1438,26 +1438,26 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, %t1 = shl <8 x i16> %t0, @@ -1656,26 +1656,26 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, %t1 = shl <8 x i16> %t0, @@ -2373,40 +2373,40 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, %t1 = shl <4 x i32> %t0, @@ -2675,40 +2675,40 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, %t1 = shl <4 x i32> %t0, @@ -3325,26 +3325,26 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> % define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, %t1 = shl <2 x i64> %t0, @@ -3543,26 +3543,26 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) { define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, %t1 = shl <2 x i64> %t0, diff --git a/llvm/test/DebugInfo/AArch64/abstract-sp-unit.ll b/llvm/test/DebugInfo/AArch64/abstract-sp-unit.ll new file mode 100644 index 0000000000000..559f20122cc47 --- /dev/null +++ b/llvm/test/DebugInfo/AArch64/abstract-sp-unit.ll @@ -0,0 +1,43 @@ +; RUN: llc --filetype=obj -O0 -o - %s | llvm-dwarfdump --verify - + +; Check that abstract DIE for a subprogram referenced from another compile unit +; is emitted in the correct CU. + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64" + +define void @a() !dbg !10 { + br label %for.b.c.c, !dbg !13 + for.b.c.c: + br label %for.b.c.c +} + +!llvm.dbg.cu = !{!0, !6} +!llvm.module.flags = !{!8} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_20, file: !1, emissionKind: FullDebug, globals: !2) +!1 = !DIFile(filename: "foo.cpp", directory: "") +!2 = !{!3} +!3 = !DIGlobalVariableExpression(var: !4, expr: !DIExpression()) +!4 = !DIGlobalVariable(type: !5) +!5 = !DICompositeType(tag: DW_TAG_class_type) +!6 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_20, file: !7, emissionKind: FullDebug) +!7 = !DIFile(filename: "bar.cpp", directory: "") +!8 = !{i32 2, !"Debug Info Version", i32 3} +!10 = distinct !DISubprogram(type: !11, unit: !6) +!11 = !DISubroutineType(types: !12) +!12 = !{} +!13 = !DILocation(scope: !14, inlinedAt: !15) +!14 = distinct !DISubprogram(unit: !6) +!15 = !DILocation(scope: !16, inlinedAt: !25) +!16 = distinct !DISubprogram(type: !11, unit: !6, declaration: !17) +!17 = !DISubprogram(scope: !5, type: !11, spFlags: DISPFlagOptimized, templateParams: !18) +!18 = !{!19} +!19 = !DITemplateTypeParameter(type: !20) +!20 = !DICompositeType(tag: DW_TAG_class_type, scope: !21) +!21 = distinct !DISubprogram(unit: !6, retainedNodes: !22) +!22 = !{!23} +!23 = !DILocalVariable(scope: !21, type: !24) +!24 = !DIBasicType() +!25 = !DILocation(scope: !21, inlinedAt: !26) +!26 = !DILocation(scope: !10) diff --git a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir index 5d644c3e5416c..718fa6f4c4cb1 100644 --- a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir +++ b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir @@ -366,7 +366,8 @@ frameInfo: maxCallFrameSize: 0 localFrameSize: 144 machineFunctionInfo: - stackSizeSVE: 0 + stackSizeZPR: 0 + stackSizePPR: 0 stack: - { id: 0, name: StackGuardSlot, offset: -40, size: 8, alignment: 8, stack-id: default, local-offset: -8 } diff --git a/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir b/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir index 013d93378a204..b7a9892f13977 100644 --- a/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir +++ b/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir @@ -69,7 +69,8 @@ frameInfo: hasCalls: true maxCallFrameSize: 0 machineFunctionInfo: - stackSizeSVE: 0 + stackSizeZPR: 0 + stackSizePPR: 0 stack: - { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, stack-id: default } - { id: 1, type: spill-slot, offset: -8, size: 8, alignment: 8, stack-id: default, diff --git a/llvm/test/DebugInfo/X86/convert-loclist.ll b/llvm/test/DebugInfo/X86/convert-loclist.ll index 720bc46896ced..0fb15d56da703 100644 --- a/llvm/test/DebugInfo/X86/convert-loclist.ll +++ b/llvm/test/DebugInfo/X86/convert-loclist.ll @@ -5,6 +5,13 @@ ; RUN: llc -mtriple=x86_64 -split-dwarf-file=foo.dwo -filetype=asm -dwarf-op-convert=Enable < %s \ ; RUN: | FileCheck --check-prefix=ASM %s +; RUN: llc -mtriple=x86_64-mingw -filetype=obj < %s \ +; RUN: | llvm-dwarfdump -debug-info -debug-loclists - | FileCheck %s +; RUN: llc -mtriple=x86_64-mingw -split-dwarf-file=foo.dwo -filetype=obj -dwarf-op-convert=Enable < %s \ +; RUN: | llvm-dwarfdump -debug-info -debug-loclists - | FileCheck --check-prefix=SPLIT --check-prefix=CHECK %s +; RUN: llc -mtriple=x86_64-mingw -split-dwarf-file=foo.dwo -filetype=asm -dwarf-op-convert=Enable < %s \ +; RUN: | FileCheck --check-prefix=ASM %s + ; A bit of a brittle test - this is testing the specific DWO_id. The ; alternative would be to test two files with different DW_OP_convert values & ; ensuring the DWO IDs differ when the DW_OP_convert parameter differs. diff --git a/llvm/test/DebugInfo/X86/dynamic-bitfield.ll b/llvm/test/DebugInfo/X86/dynamic-bitfield.ll index c9148ca4582f6..f8935977c64e7 100644 --- a/llvm/test/DebugInfo/X86/dynamic-bitfield.ll +++ b/llvm/test/DebugInfo/X86/dynamic-bitfield.ll @@ -27,7 +27,7 @@ source_filename = "bitfield.c" !6 = !{} !7 = !{!0, !2} !8 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "PackedBits", file: !5, line: 3, size: 40, elements: !9) -!9 = !{!10, !12, !16} +!9 = !{!10, !12, !16, !21} !10 = !DIDerivedType(tag: DW_TAG_member, name: "a", scope: !8, file: !5, line: 5, baseType: !11, size: 8) ; CHECK: DW_TAG_member ; CHECK-NEXT: DW_AT_name{{.*}}"a" @@ -60,5 +60,14 @@ source_filename = "bitfield.c" ; CHECK: DW_AT_bit_size [DW_FORM_exprloc] (DW_OP_lit27) ; CHECK-NEXT: DW_AT_data_bit_offset [DW_FORM_exprloc] (DW_OP_lit13) ; CHECK-NOT: DW_AT_data_member_location -; CHECK: DW_TAG !20 = !{!"clang version 3.9.0 (trunk 267633)"} +!21 = !DIDerivedType(tag: DW_TAG_member, name: "d", scope: !8, file: !5, line: 7, baseType: !13, offset: !DIExpression(DW_OP_constu, 15), flags: DIFlagBitField) +; CHECK: DW_TAG_member +; CHECK-NEXT: DW_AT_name{{.*}}"d" +; CHECK-NOT: DW_TAG +; CHECK-NOT: DW_AT_bit_offset +; CHECK-NOT: DW_AT_byte_size +; CHECK-NOT: DW_AT_bit_size +; CHECK: DW_AT_data_bit_offset [DW_FORM_exprloc] (DW_OP_lit15) +; CHECK-NOT: DW_AT_data_member_location +; CHECK: DW_TAG diff --git a/llvm/test/DebugInfo/X86/ranges_always_default.ll b/llvm/test/DebugInfo/X86/ranges_always_default.ll index 0cb2004a57d9f..0759327f3a741 100644 --- a/llvm/test/DebugInfo/X86/ranges_always_default.ll +++ b/llvm/test/DebugInfo/X86/ranges_always_default.ll @@ -3,11 +3,21 @@ ; RUN: | llvm-dwarfdump -debug-info -debug-addr -debug-rnglists -v - \ ; RUN: | FileCheck --check-prefix=RANGE %s +; RUN: llc -O0 %s -mtriple=x86_64-unknown-win32-gnu -filetype=obj -o - -minimize-addr-in-v5=Default \ +; RUN: -split-dwarf-file=test.dwo \ +; RUN: | llvm-dwarfdump -debug-info -debug-addr -debug-rnglists -v - \ +; RUN: | FileCheck --check-prefix=RANGE %s + ; RUN: llc -O0 %s -mtriple=x86_64-unknown-linux-gnu -filetype=obj -o - -minimize-addr-in-v5=Disabled \ ; RUN: -split-dwarf-file=test.dwo \ ; RUN: | llvm-dwarfdump -debug-info -debug-addr -debug-rnglists -v - \ ; RUN: | FileCheck --check-prefix=NORANGE %s +; RUN: llc -O0 %s -mtriple=x86_64-unknown-win32-gnu -filetype=obj -o - -minimize-addr-in-v5=Disabled \ +; RUN: -split-dwarf-file=test.dwo \ +; RUN: | llvm-dwarfdump -debug-info -debug-addr -debug-rnglists -v - \ +; RUN: | FileCheck --check-prefix=NORANGE %s + ; A simpler example than used in ranges_always.ll, since this doesn't test all ; the nuances of where minimizing ranges are useful. This is only testing the ; defaulting behavior - specifically that the "ranges" version of the diff --git a/llvm/test/DebugInfo/X86/split-dwarf-v5-ranges.ll b/llvm/test/DebugInfo/X86/split-dwarf-v5-ranges.ll index 0174efea1e0d9..01b1d8fb65a09 100644 --- a/llvm/test/DebugInfo/X86/split-dwarf-v5-ranges.ll +++ b/llvm/test/DebugInfo/X86/split-dwarf-v5-ranges.ll @@ -1,16 +1,22 @@ ; RUN: llc -split-dwarf-file=foo.dwo -mtriple=x86_64-unknown-linux-gnu -filetype=obj %s -o %t32 ; RUN: llvm-dwarfdump -v -debug-info -debug-rnglists %t32 | \ -; RUN: FileCheck %s --check-prefixes=CHECK,DWARF32 +; RUN: FileCheck %s --check-prefixes=CHECK,DWARF32,CHECK-ELF + +; RUN: llc -split-dwarf-file=foo.dwo -mtriple=x86_64-unknown-win32-gnu -filetype=obj %s -o %t32 +; RUN: llvm-dwarfdump -v -debug-info -debug-rnglists %t32 | \ +; RUN: FileCheck %s --check-prefixes=CHECK,DWARF32,CHECK-COFF ; RUN: llc -dwarf64 -split-dwarf-file=foo.dwo -mtriple=x86_64-unknown-linux-gnu -filetype=obj %s -o %t64 ; RUN: llvm-dwarfdump -v -debug-info -debug-rnglists %t64 | \ -; RUN: FileCheck %s --check-prefixes=CHECK,DWARF64 +; RUN: FileCheck %s --check-prefixes=CHECK,DWARF64,CHECK-ELF ; CHECK: .debug_info contents: ; CHECK: .debug_info.dwo contents: ; CHECK: DW_AT_ranges [DW_FORM_rnglistx] (indexed (0x0) rangelist = 0x[[#%.8x,RNG_OFF:]] -; CHECK: [0x0000000000000001, 0x000000000000000c) ".text" -; CHECK: [0x000000000000000e, 0x0000000000000013) ".text") +; CHECK-ELF: [0x[[#%.16x,BEGIN1:0x01]], 0x[[#%.16x,END1:0x0c]]) ".text" +; CHECK-ELF: [0x[[#%.16x,BEGIN2:0x0e]], 0x[[#%.16x,END2:0x13]]) ".text") +; CHECK-COFF: [0x[[#%.16x,BEGIN1:0x04]], 0x[[#%.16x,END1:0x0f]]) ".text" +; CHECK-COFF: [0x[[#%.16x,BEGIN2:0x11]], 0x[[#%.16x,END2:0x17]]) ".text") ; CHECK: .debug_rnglists.dwo contents: ; DWARF32: 0x00000000: range list header: length = 0x00000015, format = DWARF32, version = 0x0005, addr_size = 0x08, seg_size = 0x00, offset_entry_count = 0x00000001 @@ -21,8 +27,8 @@ ; CHECK: ] ; CHECK: ranges: ; CHECK: 0x[[#RNG_OFF]]: [DW_RLE_base_addressx]: 0x0000000000000000 -; CHECK: 0x[[#RNG_OFF+2]]: [DW_RLE_offset_pair ]: 0x0000000000000001, 0x000000000000000c => [0x0000000000000001, 0x000000000000000c) -; CHECK: 0x[[#RNG_OFF+5]]: [DW_RLE_offset_pair ]: 0x000000000000000e, 0x0000000000000013 => [0x000000000000000e, 0x0000000000000013) +; CHECK: 0x[[#RNG_OFF+2]]: [DW_RLE_offset_pair ]: 0x[[#%.16x,BEGIN1]], 0x[[#%.16x,END1]] => [0x[[#%.16x,BEGIN1]], 0x[[#%.16x,END1]]) +; CHECK: 0x[[#RNG_OFF+5]]: [DW_RLE_offset_pair ]: 0x[[#%.16x,BEGIN2]], 0x[[#%.16x,END2]] => [0x[[#%.16x,BEGIN2]], 0x[[#%.16x,END2]]) ; CHECK: 0x[[#RNG_OFF+8]]: [DW_RLE_end_of_list ] ; Function Attrs: noinline optnone uwtable diff --git a/llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir b/llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir new file mode 100644 index 0000000000000..b7149f0155ae3 --- /dev/null +++ b/llvm/test/DebugInfo/X86/x86fixupsetcc-debug-instr-num.mir @@ -0,0 +1,54 @@ +# RUN: llc %s --run-pass=x86-fixup-setcc -o - | FileCheck %s + +## Check the debug-isntr-number transfers from MOVZX32rr8 to the SETCC +## after the mov is replaced with an INSERT_SUBREG, updating the substitutions +## table. + +# CHECK: debugValueSubstitutions: +# CHECK: - { srcinst: 1, srcop: 0, dstinst: 2, dstop: 0, subreg: 0 } + +# CHECK: %[[#]]:gr8 = SETCCr 15, implicit $eflags, debug-instr-number 2 +# CHECK: INSERT_SUBREG +# CHECK-NOT: debug-instr-number +# CHECK-NEXT: DBG_INSTR_REF ![[#]], !DIExpression(DW_OP_LLVM_arg, 0), dbg-instr-ref(1, 0) + +--- | + source_filename = "reduced.ll" + target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + target triple = "x86_64-unknown-linux-gnu" + + define i32 @main(i32 %call2) { + entry: + %cmp12 = icmp sgt i32 %call2, 0 + %conv13 = zext i1 %cmp12 to i32 + #dbg_value(i32 %conv13, !4, !DIExpression(), !8) + ret i32 %conv13 + } + + !llvm.dbg.cu = !{!0} + !llvm.module.flags = !{!3} + + !0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 22.0.0git", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, globals: !2, splitDebugInlining: false, nameTableKind: None) + !1 = !DIFile(filename: "test.c", directory: "/") + !2 = !{} + !3 = !{i32 2, !"Debug Info Version", i32 3} + !4 = !DILocalVariable(name: "v_3", scope: !5, file: !1, line: 10, type: !7) + !5 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 5, type: !6, scopeLine: 6, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2, keyInstructions: true) + !6 = !DISubroutineType(types: !2) + !7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) + !8 = !DILocation(line: 0, scope: !5) +... +--- +name: main +body: | + bb.0.entry: + liveins: $edi + + %0:gr32 = COPY $edi + TEST32rr %0, %0, implicit-def $eflags + %1:gr8 = SETCCr 15, implicit $eflags + %2:gr32 = MOVZX32rr8 killed %1, debug-instr-number 1 + DBG_INSTR_REF !4, !DIExpression(DW_OP_LLVM_arg, 0), dbg-instr-ref(1, 0), debug-location !8 + $eax = COPY %2 + RET 0, $eax +... diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll index f3312ce0c5bd2..919f16b103090 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll @@ -1256,7 +1256,31 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vloxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1281,7 +1305,31 @@ define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vloxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1305,7 +1353,31 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16( @llvm.riscv.vloxei.nxv1f32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1f32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1328,7 +1400,31 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vluxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1353,7 +1449,31 @@ define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vluxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1377,7 +1497,31 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, < ; CHECK-LABEL: @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]] +; CHECK: 12: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP6]], i64 [[IV]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) ; CHECK-NEXT: ret void ; entry: @@ -1401,7 +1545,31 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-LABEL: @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]]) ; CHECK-NEXT: ret void ; entry: @@ -1425,7 +1593,31 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, < ; CHECK-LABEL: @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]] +; CHECK: 12: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP6]], i64 [[IV]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) ; CHECK-NEXT: ret void ; entry: @@ -1449,7 +1641,31 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-LABEL: @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Instrumentation/AddressSanitizer/coro-byval-param.ll b/llvm/test/Instrumentation/AddressSanitizer/coro-byval-param.ll index 290f1cbd38cdf..b0aec4e0426e6 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/coro-byval-param.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/coro-byval-param.ll @@ -58,7 +58,7 @@ coro.free: ; preds = %cleanup33 br label %coro.ret coro.ret: ; preds = %coro.free, %cleanup33, %init.ready, %coro.init - %10 = call i1 @llvm.coro.end(ptr null, i1 false, token none) #10 + call void @llvm.coro.end(ptr null, i1 false, token none) #10 ret ptr %call2 } @@ -105,7 +105,7 @@ declare i8 @llvm.coro.suspend(token, i1) #2 declare void @_ZN4task12promise_type13final_suspendEv(ptr nonnull dereferenceable(1)) local_unnamed_addr #7 align 2 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 ; Function Attrs: nobuiltin nounwind declare void @_ZdlPv(ptr) local_unnamed_addr #8 diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/coro-byval-param.ll b/llvm/test/Instrumentation/HWAddressSanitizer/coro-byval-param.ll index 0289b33a45882..064565ca2f3b2 100644 --- a/llvm/test/Instrumentation/HWAddressSanitizer/coro-byval-param.ll +++ b/llvm/test/Instrumentation/HWAddressSanitizer/coro-byval-param.ll @@ -58,7 +58,7 @@ coro.free: ; preds = %cleanup33 br label %coro.ret coro.ret: ; preds = %coro.free, %cleanup33, %init.ready, %coro.init - %10 = call i1 @llvm.coro.end(ptr null, i1 false, token none) #10 + call void @llvm.coro.end(ptr null, i1 false, token none) #10 ret ptr %call2 } @@ -105,7 +105,7 @@ declare i8 @llvm.coro.suspend(token, i1) #2 declare void @_ZN4task12promise_type13final_suspendEv(ptr nonnull dereferenceable(1)) local_unnamed_addr #7 align 2 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 ; Function Attrs: nobuiltin nounwind declare void @_ZdlPv(ptr) local_unnamed_addr #8 diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll index 99e9ab939847c..864f6a973334e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-ld1.ll @@ -877,7 +877,7 @@ define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, ptr ; CHECK-LABEL: define %struct.__neon_int8x16x2_t @ld2lane_16b( ; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -904,8 +904,8 @@ define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 ; CHECK-LABEL: define %struct.__neon_int8x16x3_t @ld3lane_16b( ; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], <16 x i8> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -936,9 +936,9 @@ define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 ; CHECK-LABEL: define %struct.__neon_int8x16x4_t @ld4lane_16b( ; CHECK-SAME: <16 x i8> [[L1:%.*]], <16 x i8> [[L2:%.*]], <16 x i8> [[L3:%.*]], <16 x i8> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -977,7 +977,7 @@ define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, ptr ; CHECK-LABEL: define %struct.__neon_int16x8x2_t @ld2lane_8h( ; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1004,8 +1004,8 @@ define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x ; CHECK-LABEL: define %struct.__neon_int16x8x3_t @ld3lane_8h( ; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], <8 x i16> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1036,9 +1036,9 @@ define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x ; CHECK-LABEL: define %struct.__neon_int16x8x4_t @ld4lane_8h( ; CHECK-SAME: <8 x i16> [[L1:%.*]], <8 x i16> [[L2:%.*]], <8 x i16> [[L3:%.*]], <8 x i16> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1077,7 +1077,7 @@ define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, ptr ; CHECK-LABEL: define %struct.__neon_int32x4x2_t @ld2lane_4s( ; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1104,8 +1104,8 @@ define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x ; CHECK-LABEL: define %struct.__neon_int32x4x3_t @ld3lane_4s( ; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], <4 x i32> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1136,9 +1136,9 @@ define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x ; CHECK-LABEL: define %struct.__neon_int32x4x4_t @ld4lane_4s( ; CHECK-SAME: <4 x i32> [[L1:%.*]], <4 x i32> [[L2:%.*]], <4 x i32> [[L3:%.*]], <4 x i32> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1177,7 +1177,7 @@ define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, ptr ; CHECK-LABEL: define %struct.__neon_int64x2x2_t @ld2lane_2d( ; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1204,8 +1204,8 @@ define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x ; CHECK-LABEL: define %struct.__neon_int64x2x3_t @ld3lane_2d( ; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], <2 x i64> [[L3:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1236,9 +1236,9 @@ define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x ; CHECK-LABEL: define %struct.__neon_int64x2x4_t @ld4lane_2d( ; CHECK-SAME: <2 x i64> [[L1:%.*]], <2 x i64> [[L2:%.*]], <2 x i64> [[L3:%.*]], <2 x i64> [[L4:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -2304,7 +2304,7 @@ define <16 x i8> @ld1_16b(<16 x i8> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <16 x i8> @ld1_16b( ; CHECK-SAME: <16 x i8> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2332,7 +2332,7 @@ define <8 x i16> @ld1_8h(<8 x i16> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <8 x i16> @ld1_8h( ; CHECK-SAME: <8 x i16> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2360,7 +2360,7 @@ define <4 x i32> @ld1_4s(<4 x i32> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <4 x i32> @ld1_4s( ; CHECK-SAME: <4 x i32> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2388,7 +2388,7 @@ define <4 x float> @ld1_4s_float(<4 x float> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <4 x float> @ld1_4s_float( ; CHECK-SAME: <4 x float> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2416,7 +2416,7 @@ define <2 x i64> @ld1_2d(<2 x i64> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <2 x i64> @ld1_2d( ; CHECK-SAME: <2 x i64> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2444,7 +2444,7 @@ define <2 x double> @ld1_2d_double(<2 x double> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <2 x double> @ld1_2d_double( ; CHECK-SAME: <2 x double> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2496,7 +2496,7 @@ define <8 x i8> @ld1_8b(<8 x i8> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <8 x i8> @ld1_8b( ; CHECK-SAME: <8 x i8> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2524,7 +2524,7 @@ define <4 x i16> @ld1_4h(<4 x i16> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <4 x i16> @ld1_4h( ; CHECK-SAME: <4 x i16> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2552,7 +2552,7 @@ define <2 x i32> @ld1_2s(<2 x i32> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <2 x i32> @ld1_2s( ; CHECK-SAME: <2 x i32> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2580,7 +2580,7 @@ define <2 x float> @ld1_2s_float(<2 x float> %V, ptr %bar) #0 { ; Make sure we are using the operands defined by the ABI ; CHECK-LABEL: define <2 x float> @ld1_2s_float( ; CHECK-SAME: <2 x float> [[V:%.*]], ptr [[BAR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2611,8 +2611,8 @@ define void @ld1r_2s_from_dup(ptr nocapture %a, ptr nocapture %b, ptr nocapture ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]], ptr captures(none) [[DIFF:%.*]]) #[[ATTR2:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP0]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll index 632268e08022c..1319544bf6662 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-smaxv.ll @@ -122,7 +122,7 @@ define <8 x i8> @test_vmaxv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-LABEL: define <8 x i8> @test_vmaxv_s8_used_by_laneop( ; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]]) @@ -146,7 +146,7 @@ define <4 x i16> @test_vmaxv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 ; CHECK-LABEL: define <4 x i16> @test_vmaxv_s16_used_by_laneop( ; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]]) @@ -170,7 +170,7 @@ define <2 x i32> @test_vmaxv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 ; CHECK-LABEL: define <2 x i32> @test_vmaxv_s32_used_by_laneop( ; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]]) @@ -190,7 +190,7 @@ define <16 x i8> @test_vmaxvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 ; CHECK-LABEL: define <16 x i8> @test_vmaxvq_s8_used_by_laneop( ; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]]) @@ -214,7 +214,7 @@ define <8 x i16> @test_vmaxvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) # ; CHECK-LABEL: define <8 x i16> @test_vmaxvq_s16_used_by_laneop( ; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]]) @@ -238,7 +238,7 @@ define <4 x i32> @test_vmaxvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) # ; CHECK-LABEL: define <4 x i32> @test_vmaxvq_s32_used_by_laneop( ; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll index 267061027cd52..272a910f2fc20 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-sminv.ll @@ -122,7 +122,7 @@ define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-LABEL: define <8 x i8> @test_vminv_s8_used_by_laneop( ; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]]) @@ -146,7 +146,7 @@ define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 ; CHECK-LABEL: define <4 x i16> @test_vminv_s16_used_by_laneop( ; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]]) @@ -170,7 +170,7 @@ define <2 x i32> @test_vminv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 ; CHECK-LABEL: define <2 x i32> @test_vminv_s32_used_by_laneop( ; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]]) @@ -190,7 +190,7 @@ define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 ; CHECK-LABEL: define <16 x i8> @test_vminvq_s8_used_by_laneop( ; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]]) @@ -214,7 +214,7 @@ define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) # ; CHECK-LABEL: define <8 x i16> @test_vminvq_s16_used_by_laneop( ; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]]) @@ -238,7 +238,7 @@ define <4 x i32> @test_vminvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) # ; CHECK-LABEL: define <4 x i32> @test_vminvq_s32_used_by_laneop( ; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll index deeb1d4b6ff85..fedf45f0d9166 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1.ll @@ -15,9 +15,9 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory { ; ; CHECK-LABEL: define void @st2_8b( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -41,8 +41,8 @@ define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m ; ; CHECK-LABEL: define void @st2_8b_undefA( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -66,7 +66,7 @@ define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m ; ; CHECK-LABEL: define void @st2_8b_undefB( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -91,7 +91,7 @@ define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_ ; ; CHECK-LABEL: define void @st2_8b_undefAB( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576 @@ -115,10 +115,10 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani ; ; CHECK-LABEL: define void @st3_8b( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -142,9 +142,9 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi ; ; CHECK-LABEL: define void @st3_8b_undefA( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -168,9 +168,9 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi ; ; CHECK-LABEL: define void @st3_8b_undefB( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -194,9 +194,9 @@ define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi ; ; CHECK-LABEL: define void @st3_8b_undefC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -220,8 +220,8 @@ define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw ; ; CHECK-LABEL: define void @st3_8b_undefAB( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -245,8 +245,8 @@ define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw ; ; CHECK-LABEL: define void @st3_8b_undefAC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -270,7 +270,7 @@ define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw ; ; CHECK-LABEL: define void @st3_8b_undefBC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -295,7 +295,7 @@ define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) noun ; ; CHECK-LABEL: define void @st3_8b_undefABC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576 @@ -319,11 +319,11 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) ; ; CHECK-LABEL: define void @st4_8b( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -347,10 +347,10 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p ; ; CHECK-LABEL: define void @st4_8b_undefA( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -374,10 +374,10 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p ; ; CHECK-LABEL: define void @st4_8b_undefB( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -401,10 +401,10 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p ; ; CHECK-LABEL: define void @st4_8b_undefC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -428,10 +428,10 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p ; ; CHECK-LABEL: define void @st4_8b_undefD( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -455,9 +455,9 @@ define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefAB( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -481,9 +481,9 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefAC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -507,9 +507,9 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefBC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -533,9 +533,9 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefBD( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -559,8 +559,8 @@ define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefABC( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -584,8 +584,8 @@ define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefABD( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -609,8 +609,8 @@ define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefACD( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -634,7 +634,7 @@ define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ; ; CHECK-LABEL: define void @st4_8b_undefBCD( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -659,7 +659,7 @@ define void @st4_8b_undefABCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D ; ; CHECK-LABEL: define void @st4_8b_undefABCD( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576 @@ -689,9 +689,9 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor ; ; CHECK-LABEL: define void @st2_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -715,10 +715,10 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind ; ; CHECK-LABEL: define void @st3_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -742,11 +742,11 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr ; ; CHECK-LABEL: define void @st4_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -776,9 +776,9 @@ define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory ; ; CHECK-LABEL: define void @st2_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -802,10 +802,10 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s ; ; CHECK-LABEL: define void @st3_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -829,11 +829,11 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr ; ; CHECK-LABEL: define void @st4_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -863,9 +863,9 @@ define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory ; ; CHECK-LABEL: define void @st2_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -889,10 +889,10 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s ; ; CHECK-LABEL: define void @st3_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -916,11 +916,11 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr ; ; CHECK-LABEL: define void @st4_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -950,9 +950,9 @@ define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory ; ; CHECK-LABEL: define void @st2_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -976,10 +976,10 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s ; ; CHECK-LABEL: define void @st3_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1003,11 +1003,11 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr ; ; CHECK-LABEL: define void @st4_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -1035,9 +1035,9 @@ define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory ; ; CHECK-LABEL: define void @st2_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1061,10 +1061,10 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s ; ; CHECK-LABEL: define void @st3_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1088,11 +1088,11 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr ; ; CHECK-LABEL: define void @st4_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -1123,9 +1123,9 @@ define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory ; ; CHECK-LABEL: define void @st2_1d( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1149,10 +1149,10 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s ; ; CHECK-LABEL: define void @st3_1d( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1176,11 +1176,11 @@ define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr ; ; CHECK-LABEL: define void @st4_1d( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -1210,9 +1210,9 @@ define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory ; ; CHECK-LABEL: define void @st2_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1236,8 +1236,8 @@ define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize ; ; CHECK-LABEL: define void @st2_2d_undefA( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1261,7 +1261,7 @@ define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize ; ; CHECK-LABEL: define void @st2_2d_undefB( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -1286,7 +1286,7 @@ define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitiz ; ; CHECK-LABEL: define void @st2_2d_undefAB( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576 @@ -1310,10 +1310,10 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s ; ; CHECK-LABEL: define void @st3_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1337,9 +1337,9 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou ; ; CHECK-LABEL: define void @st3_2d_undefA( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1363,9 +1363,9 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou ; ; CHECK-LABEL: define void @st3_2d_undefB( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1389,9 +1389,9 @@ define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou ; ; CHECK-LABEL: define void @st3_2d_undefC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1415,8 +1415,8 @@ define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no ; ; CHECK-LABEL: define void @st3_2d_undefAB( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1440,8 +1440,8 @@ define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no ; ; CHECK-LABEL: define void @st3_2d_undefAC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1465,7 +1465,7 @@ define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no ; ; CHECK-LABEL: define void @st3_2d_undefBC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -1490,7 +1490,7 @@ define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) n ; ; CHECK-LABEL: define void @st3_2d_undefABC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576 @@ -1514,11 +1514,11 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr ; ; CHECK-LABEL: define void @st4_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 @@ -1546,10 +1546,10 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> % ; ; CHECK-LABEL: define void @st4_2d_undefA( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1573,10 +1573,10 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> % ; ; CHECK-LABEL: define void @st4_2d_undefB( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1600,10 +1600,10 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> % ; ; CHECK-LABEL: define void @st4_2d_undefC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1627,10 +1627,10 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> % ; ; CHECK-LABEL: define void @st4_2d_undefD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 @@ -1654,9 +1654,9 @@ define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefAB( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1680,9 +1680,9 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefAC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1706,9 +1706,9 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefAD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1732,9 +1732,9 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefBC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1758,9 +1758,9 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefBD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1784,9 +1784,9 @@ define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefCD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 @@ -1810,8 +1810,8 @@ define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefABC( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1835,8 +1835,8 @@ define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefABD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1860,8 +1860,8 @@ define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefACD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576 @@ -1885,7 +1885,7 @@ define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> ; ; CHECK-LABEL: define void @st4_2d_undefBCD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -1910,7 +1910,7 @@ define void @st4_2d_undefABCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64 ; ; CHECK-LABEL: define void @st4_2d_undefABCD( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576 diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll index 9ed364df3e677..0617c8c7027fe 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_lane.ll @@ -13,7 +13,7 @@ target triple = "aarch64--linux-android9001" define void @st1lane_16b(<16 x i8> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -21,7 +21,7 @@ define void @st1lane_16b(<16 x i8> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable @@ -42,7 +42,7 @@ define void @st1lane_16b(<16 x i8> %A, ptr %D) sanitize_memory { define void @st1lane0_16b(<16 x i8> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -50,7 +50,7 @@ define void @st1lane0_16b(<16 x i8> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -71,7 +71,7 @@ define void @st1lane0_16b(<16 x i8> %A, ptr %D) sanitize_memory { define void @st1lane0u_16b(<16 x i8> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -79,7 +79,7 @@ define void @st1lane0u_16b(<16 x i8> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -100,8 +100,8 @@ define void @st1lane0u_16b(<16 x i8> %A, ptr %D) sanitize_memory { define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -109,7 +109,7 @@ define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -130,8 +130,8 @@ define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -139,7 +139,7 @@ define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <16 x i8> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <16 x i8> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -160,7 +160,7 @@ define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) sanitize_memory define void @st1lane_8h(<8 x i16> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -168,7 +168,7 @@ define void @st1lane_8h(<8 x i16> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -189,7 +189,7 @@ define void @st1lane_8h(<8 x i16> %A, ptr %D) sanitize_memory { define void @st1lane0_8h(<8 x i16> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -197,7 +197,7 @@ define void @st1lane0_8h(<8 x i16> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -218,7 +218,7 @@ define void @st1lane0_8h(<8 x i16> %A, ptr %D) sanitize_memory { define void @st1lane0u_8h(<8 x i16> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -226,7 +226,7 @@ define void @st1lane0u_8h(<8 x i16> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -247,8 +247,8 @@ define void @st1lane0u_8h(<8 x i16> %A, ptr %D) sanitize_memory { define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -256,7 +256,7 @@ define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -277,8 +277,8 @@ define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -286,7 +286,7 @@ define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i16> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i16> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -307,7 +307,7 @@ define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane_4s(<4 x i32> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -315,7 +315,7 @@ define void @st1lane_4s(<4 x i32> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -336,7 +336,7 @@ define void @st1lane_4s(<4 x i32> %A, ptr %D) sanitize_memory { define void @st1lane0_4s(<4 x i32> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -344,7 +344,7 @@ define void @st1lane0_4s(<4 x i32> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -365,7 +365,7 @@ define void @st1lane0_4s(<4 x i32> %A, ptr %D) sanitize_memory { define void @st1lane0u_4s(<4 x i32> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -373,7 +373,7 @@ define void @st1lane0u_4s(<4 x i32> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -394,8 +394,8 @@ define void @st1lane0u_4s(<4 x i32> %A, ptr %D) sanitize_memory { define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -403,7 +403,7 @@ define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -424,8 +424,8 @@ define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -433,7 +433,7 @@ define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i32> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -454,7 +454,7 @@ define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane_4s_float(<4 x float> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_4s_float( ; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -462,7 +462,7 @@ define void @st1lane_4s_float(<4 x float> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -483,7 +483,7 @@ define void @st1lane_4s_float(<4 x float> %A, ptr %D) sanitize_memory { define void @st1lane0_4s_float(<4 x float> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_4s_float( ; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -491,7 +491,7 @@ define void @st1lane0_4s_float(<4 x float> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -512,7 +512,7 @@ define void @st1lane0_4s_float(<4 x float> %A, ptr %D) sanitize_memory { define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_4s_float( ; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -520,7 +520,7 @@ define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -541,8 +541,8 @@ define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) sanitize_memory { define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_4s_float( ; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -550,7 +550,7 @@ define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_m ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -571,8 +571,8 @@ define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_m define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_4s_float( ; CHECK-SAME: <4 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -580,7 +580,7 @@ define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_ ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i32> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x float> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -601,7 +601,7 @@ define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) sanitize_ define void @st1lane_2d(<2 x i64> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -609,7 +609,7 @@ define void @st1lane_2d(<2 x i64> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -630,7 +630,7 @@ define void @st1lane_2d(<2 x i64> %A, ptr %D) sanitize_memory { define void @st1lane0_2d(<2 x i64> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -638,7 +638,7 @@ define void @st1lane0_2d(<2 x i64> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -659,7 +659,7 @@ define void @st1lane0_2d(<2 x i64> %A, ptr %D) sanitize_memory { define void @st1lane0u_2d(<2 x i64> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -667,7 +667,7 @@ define void @st1lane0u_2d(<2 x i64> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -688,8 +688,8 @@ define void @st1lane0u_2d(<2 x i64> %A, ptr %D) sanitize_memory { define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -697,7 +697,7 @@ define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -718,8 +718,8 @@ define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -727,7 +727,7 @@ define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i64> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -748,7 +748,7 @@ define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane_2d_double(<2 x double> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_2d_double( ; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -756,7 +756,7 @@ define void @st1lane_2d_double(<2 x double> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -777,7 +777,7 @@ define void @st1lane_2d_double(<2 x double> %A, ptr %D) sanitize_memory { define void @st1lane0_2d_double(<2 x double> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_2d_double( ; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -785,7 +785,7 @@ define void @st1lane0_2d_double(<2 x double> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -806,7 +806,7 @@ define void @st1lane0_2d_double(<2 x double> %A, ptr %D) sanitize_memory { define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_2d_double( ; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -814,7 +814,7 @@ define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -835,8 +835,8 @@ define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) sanitize_memory { define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_2d_double( ; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -844,7 +844,7 @@ define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -865,8 +865,8 @@ define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_2d_double( ; CHECK-SAME: <2 x double> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -874,7 +874,7 @@ define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitiz ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x double> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -895,7 +895,7 @@ define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) sanitiz define void @st1lane_8b(<8 x i8> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_8b( ; CHECK-SAME: <8 x i8> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -903,7 +903,7 @@ define void @st1lane_8b(<8 x i8> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i8> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i8> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -924,8 +924,8 @@ define void @st1lane_8b(<8 x i8> %A, ptr %D) sanitize_memory { define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_8b( ; CHECK-SAME: <8 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -933,7 +933,7 @@ define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i8> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i8> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -954,8 +954,8 @@ define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_8b( ; CHECK-SAME: <8 x i8> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -963,7 +963,7 @@ define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <8 x i8> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <8 x i8> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -984,7 +984,7 @@ define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane_4h(<4 x i16> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -992,7 +992,7 @@ define void @st1lane_4h(<4 x i16> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1013,7 +1013,7 @@ define void @st1lane_4h(<4 x i16> %A, ptr %D) sanitize_memory { define void @st1lane0_4h(<4 x i16> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1021,7 +1021,7 @@ define void @st1lane0_4h(<4 x i16> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1042,7 +1042,7 @@ define void @st1lane0_4h(<4 x i16> %A, ptr %D) sanitize_memory { define void @st1lane0u_4h(<4 x i16> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1050,7 +1050,7 @@ define void @st1lane0u_4h(<4 x i16> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1071,8 +1071,8 @@ define void @st1lane0u_4h(<4 x i16> %A, ptr %D) sanitize_memory { define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1080,7 +1080,7 @@ define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1101,8 +1101,8 @@ define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1110,7 +1110,7 @@ define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <4 x i16> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <4 x i16> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1131,7 +1131,7 @@ define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane_2s(<2 x i32> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1139,7 +1139,7 @@ define void @st1lane_2s(<2 x i32> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1160,7 +1160,7 @@ define void @st1lane_2s(<2 x i32> %A, ptr %D) sanitize_memory { define void @st1lane0_2s(<2 x i32> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1168,7 +1168,7 @@ define void @st1lane0_2s(<2 x i32> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1189,7 +1189,7 @@ define void @st1lane0_2s(<2 x i32> %A, ptr %D) sanitize_memory { define void @st1lane0u_2s(<2 x i32> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1197,7 +1197,7 @@ define void @st1lane0u_2s(<2 x i32> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1218,8 +1218,8 @@ define void @st1lane0u_2s(<2 x i32> %A, ptr %D) sanitize_memory { define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1227,7 +1227,7 @@ define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1248,8 +1248,8 @@ define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1257,7 +1257,7 @@ define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x i32> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1278,7 +1278,7 @@ define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane_2s_float(<2 x float> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane_2s_float( ; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1286,7 +1286,7 @@ define void @st1lane_2s_float(<2 x float> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1307,7 +1307,7 @@ define void @st1lane_2s_float(<2 x float> %A, ptr %D) sanitize_memory { define void @st1lane0_2s_float(<2 x float> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_2s_float( ; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1315,7 +1315,7 @@ define void @st1lane0_2s_float(<2 x float> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1336,7 +1336,7 @@ define void @st1lane0_2s_float(<2 x float> %A, ptr %D) sanitize_memory { define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_2s_float( ; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1344,7 +1344,7 @@ define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1365,8 +1365,8 @@ define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) sanitize_memory { define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane_ro_2s_float( ; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1374,7 +1374,7 @@ define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_m ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 1 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1395,8 +1395,8 @@ define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_m define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_2s_float( ; CHECK-SAME: <2 x float> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1404,7 +1404,7 @@ define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_ ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <2 x float> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1425,7 +1425,7 @@ define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) sanitize_ define void @st1lane0_1d(<1 x i64> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_1d( ; CHECK-SAME: <1 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1433,7 +1433,7 @@ define void @st1lane0_1d(<1 x i64> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x i64> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1454,7 +1454,7 @@ define void @st1lane0_1d(<1 x i64> %A, ptr %D) sanitize_memory { define void @st1lane0u_1d(<1 x i64> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_1d( ; CHECK-SAME: <1 x i64> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1462,7 +1462,7 @@ define void @st1lane0u_1d(<1 x i64> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x i64> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1483,8 +1483,8 @@ define void @st1lane0u_1d(<1 x i64> %A, ptr %D) sanitize_memory { define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_1d( ; CHECK-SAME: <1 x i64> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1492,7 +1492,7 @@ define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x i64> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1513,7 +1513,7 @@ define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) sanitize_memory { define void @st1lane0_1d_double(<1 x double> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_1d_double( ; CHECK-SAME: <1 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1521,7 +1521,7 @@ define void @st1lane0_1d_double(<1 x double> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x double> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1542,7 +1542,7 @@ define void @st1lane0_1d_double(<1 x double> %A, ptr %D) sanitize_memory { define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st1lane0u_1d_double( ; CHECK-SAME: <1 x double> [[A:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], 0 @@ -1550,7 +1550,7 @@ define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) sanitize_memory { ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x double> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1571,8 +1571,8 @@ define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) sanitize_memory { define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) sanitize_memory { ; CHECK-LABEL: define void @st1lane0_ro_1d_double( ; CHECK-SAME: <1 x double> [[A:%.*]], ptr [[D:%.*]], i64 [[OFFSET:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] @@ -1580,7 +1580,7 @@ define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) sanitiz ; CHECK-NEXT: [[_MSPROP1:%.*]] = extractelement <1 x i64> [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP:%.*]] = extractelement <1 x double> [[A]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSPROP]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] ; CHECK: 4: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1601,16 +1601,16 @@ define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) sanitiz define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st2lane_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], i64 1, ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1625,16 +1625,16 @@ define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, ptr %D) sanitize_memory { define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st2lane_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], i64 1, ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1649,16 +1649,16 @@ define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, ptr %D) sanitize_memory { define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st2lane_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], i64 1, ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1673,16 +1673,16 @@ define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, ptr %D) sanitize_memory { define void @st2lane_2d(<2 x i64> %A, <2 x i64> %B, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st2lane_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], i64 1, ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1702,17 +1702,17 @@ declare void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr) define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st3lane_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], i64 1, ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1727,17 +1727,17 @@ define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %D) sanit define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st3lane_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], i64 1, ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1752,17 +1752,17 @@ define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %D) saniti define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st3lane_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], i64 1, ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1777,17 +1777,17 @@ define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %D) saniti define void @st3lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %D) sanitize_memory { ; CHECK-LABEL: define void @st3lane_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[D]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], i64 1, ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1807,18 +1807,18 @@ declare void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64> define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %E) sanitize_memory { ; CHECK-LABEL: define void @st4lane_16b( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], i64 1, ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1833,18 +1833,18 @@ define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %E) sanitize_memory { ; CHECK-LABEL: define void @st4lane_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], i64 1, ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1859,18 +1859,18 @@ define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %E) sanitize_memory { ; CHECK-LABEL: define void @st4lane_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], i64 1, ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1885,18 +1885,18 @@ define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, define void @st4lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %E) sanitize_memory { ; CHECK-LABEL: define void @st4lane_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[E:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[E]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], i64 1, ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1913,5 +1913,5 @@ declare void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16> declare void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readnone declare void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readnone ;. -; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575} +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} ;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll index 52283811e3065..a121df9b195ac 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-st1_origins.ll @@ -17,12 +17,12 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor ; ; CHECK-LABEL: define void @st2_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576 @@ -50,7 +50,7 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP12]], i32 7 ; CHECK-NEXT: store i32 [[TMP15]], ptr [[TMP22]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF1:![0-9]+]] ; CHECK: 23: ; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable @@ -67,14 +67,14 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind ; ; CHECK-LABEL: define void @st3_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 48), align 4 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576 @@ -113,7 +113,7 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind ; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i32, ptr [[TMP14]], i32 11 ; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP31]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP32:%.*]], label [[TMP33:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP32:%.*]], label [[TMP33:%.*]], !prof [[PROF1]] ; CHECK: 32: ; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -130,16 +130,16 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr ; ; CHECK-LABEL: define void @st4_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 64), align 4 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 48), align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 193514046488576 @@ -189,7 +189,7 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr ; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i32, ptr [[TMP16]], i32 15 ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP40]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP41:%.*]], label [[TMP42:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP41:%.*]], label [[TMP42:%.*]], !prof [[PROF1]] ; CHECK: 41: ; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]] ; CHECK-NEXT: unreachable diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll index b0c71dc8b0851..3d6e7fa9ed4b8 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-tbl.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build/bin/opt --version 2 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 ; Test memory sanitizer instrumentation for Arm NEON tbl instructions. ; ; RUN: opt < %s -passes=msan -S | FileCheck %s @@ -14,7 +14,7 @@ define <8 x i8> @tbl1_8b(<16 x i8> %A, <8 x i8> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @tbl1_8b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <8 x i8> [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl1.v8i8(<16 x i8> [[TMP1]], <8 x i8> [[B]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP2]], [[TMP3]] @@ -30,7 +30,7 @@ define <16 x i8> @tbl1_16b(<16 x i8> %A, <16 x i8> %B) nounwind sanitize_memory ; CHECK-LABEL: define <16 x i8> @tbl1_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[B]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP2]], [[TMP3]] @@ -46,8 +46,8 @@ define <8 x i8> @tbl2_8b(<16 x i8> %A, <16 x i8> %B, <8 x i8> %C) sanitize_memor ; CHECK-LABEL: define <8 x i8> @tbl2_8b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <8 x i8> [[C]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP3]], [[TMP4]] @@ -63,8 +63,8 @@ define <16 x i8> @tbl2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) sanitize_me ; CHECK-LABEL: define <16 x i8> @tbl2_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[C]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP3]], [[TMP4]] @@ -80,9 +80,9 @@ define <8 x i8> @tbl3_8b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D) ; CHECK-LABEL: define <8 x i8> @tbl3_8b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <8 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl3.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <8 x i8> [[D]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP4]], [[TMP5]] @@ -98,9 +98,9 @@ define <16 x i8> @tbl3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @tbl3_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[D]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]] @@ -116,10 +116,10 @@ define <8 x i8> @tbl4_8b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ; CHECK-LABEL: define <8 x i8> @tbl4_8b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <8 x i8> [[E:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl4.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <8 x i8> [[E]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP5]], [[TMP6]] @@ -135,10 +135,10 @@ define <16 x i8> @tbl4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @tbl4_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[E]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP5]], [[TMP6]] @@ -156,9 +156,9 @@ define <8 x i8> @shuffled_tbl2_to_tbl4_v8i8(<16 x i8> %a, <16 x i8> %b, <16 x i8 ; CHECK-LABEL: define <8 x i8> @shuffled_tbl2_to_tbl4_v8i8 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <8 x i8> ) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> zeroinitializer, [[TMP5]] @@ -183,9 +183,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> ) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]] @@ -208,11 +208,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> splat (i8 -1), i8 [[TMP1]], i32 0 ; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 [[V]], i32 0 @@ -283,11 +283,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask(<16 x i8> %a, <16 x define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask2(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask2 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 1, i32 0 ; CHECK-NEXT: [[INS_1:%.*]] = insertelement <16 x i8> [[INS_0]], i8 1, i32 1 @@ -347,11 +347,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask2(<16 x i8> %a, <16 x define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> splat (i8 -1), i8 [[TMP1]], i32 0 ; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 [[V]], i32 0 @@ -423,11 +423,11 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask(<16 x i8> %a, <16 x define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask2(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask2 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], i8 [[V:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i8> splat (i8 -1), i8 [[TMP1]], i32 0 ; CHECK-NEXT: [[INS_0:%.*]] = insertelement <16 x i8> poison, i8 [[V]], i32 0 @@ -500,9 +500,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_shuffle(<16 x i8> %a, <16 x i8> %b ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_shuffle ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> ) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]] @@ -527,9 +527,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask1(<16 x i8> %a, <16 x i8> ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask1 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> ) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]] @@ -554,9 +554,9 @@ define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask2(<16 x i8> %a, <16 x i8> ; CHECK-LABEL: define <16 x i8> @shuffled_tbl2_to_tbl4_mixed_tbl2_mask2 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> ) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> zeroinitializer, [[TMP5]] @@ -588,8 +588,8 @@ define <8 x i8> @tbx1_8b(<8 x i8> %A, <16 x i8> %B, <8 x i8> %C) nounwind saniti ; CHECK-LABEL: define <8 x i8> @tbx1_8b ; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx1.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <8 x i8> [[C]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP3]], [[TMP4]] @@ -605,8 +605,8 @@ define <16 x i8> @tbx1_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) nounwind sa ; CHECK-LABEL: define <16 x i8> @tbx1_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[C]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP3]], [[TMP4]] @@ -622,9 +622,9 @@ define <8 x i8> @tbx2_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <8 x i8> %D) s ; CHECK-LABEL: define <8 x i8> @tbx2_8b ; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <8 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx2.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <8 x i8> [[D]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP4]], [[TMP5]] @@ -640,9 +640,9 @@ define <16 x i8> @tbx2_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @tbx2_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[D]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]] @@ -658,10 +658,10 @@ define <8 x i8> @tbx3_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ; CHECK-LABEL: define <8 x i8> @tbx3_8b ; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <8 x i8> [[E:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx3.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <8 x i8> [[E]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP5]], [[TMP6]] @@ -677,10 +677,10 @@ define <16 x i8> @tbx3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @tbx3_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx3.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[E]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP5]], [[TMP6]] @@ -696,11 +696,11 @@ define <8 x i8> @tbx4_8b(<8 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ; CHECK-LABEL: define <8 x i8> @tbx4_8b ; CHECK-SAME: (<8 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]], <8 x i8> [[F:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = call <8 x i8> @llvm.aarch64.neon.tbx4.v8i8(<8 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <8 x i8> [[F]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i8> [[TMP6]], [[TMP7]] @@ -716,11 +716,11 @@ define <16 x i8> @tbx4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @tbx4_16b ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], <16 x i8> [[E:%.*]], <16 x i8> [[F:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = call <16 x i8> @llvm.aarch64.neon.tbx4.v16i8(<16 x i8> [[TMP1]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], <16 x i8> [[F]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP6]], [[TMP7]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll index 95f11a05d9d2d..7f4213968b3f8 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-umaxv.ll @@ -216,7 +216,7 @@ define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-LABEL: define <8 x i8> @test_vmaxv_u8_used_by_laneop( ; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]]) @@ -240,7 +240,7 @@ define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 ; CHECK-LABEL: define <4 x i16> @test_vmaxv_u16_used_by_laneop( ; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]]) @@ -264,7 +264,7 @@ define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 ; CHECK-LABEL: define <2 x i32> @test_vmaxv_u32_used_by_laneop( ; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]]) @@ -284,7 +284,7 @@ define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 ; CHECK-LABEL: define <16 x i8> @test_vmaxvq_u8_used_by_laneop( ; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]]) @@ -308,7 +308,7 @@ define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) # ; CHECK-LABEL: define <8 x i16> @test_vmaxvq_u16_used_by_laneop( ; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]]) @@ -332,7 +332,7 @@ define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) # ; CHECK-LABEL: define <4 x i32> @test_vmaxvq_u32_used_by_laneop( ; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) @@ -356,3 +356,6 @@ declare i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32>) nounwind readnone declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>) nounwind readnone attributes #0 = { sanitize_memory } +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll index ad51395691050..441c21b1e5753 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-uminv.ll @@ -216,7 +216,7 @@ define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-LABEL: define <8 x i8> @test_vminv_u8_used_by_laneop( ; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]]) @@ -240,7 +240,7 @@ define <4 x i16> @test_vminv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 ; CHECK-LABEL: define <4 x i16> @test_vminv_u16_used_by_laneop( ; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]]) @@ -264,7 +264,7 @@ define <2 x i32> @test_vminv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 ; CHECK-LABEL: define <2 x i32> @test_vminv_u32_used_by_laneop( ; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]]) @@ -284,7 +284,7 @@ define <16 x i8> @test_vminvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 ; CHECK-LABEL: define <16 x i8> @test_vminvq_u8_used_by_laneop( ; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]]) @@ -308,7 +308,7 @@ define <8 x i16> @test_vminvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) # ; CHECK-LABEL: define <8 x i16> @test_vminvq_u16_used_by_laneop( ; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]]) @@ -332,7 +332,7 @@ define <4 x i32> @test_vminvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) # ; CHECK-LABEL: define <4 x i32> @test_vminvq_u32_used_by_laneop( ; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) @@ -355,3 +355,6 @@ declare i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32>) nounwind readnone declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>) nounwind readnone attributes #0 = { sanitize_memory } +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll index ad0856d38c1e9..5338031383d2f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vadd.ll @@ -17,7 +17,7 @@ define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @addhn8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]] @@ -65,7 +65,7 @@ define <4 x i16> @addhn4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @addhn4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -113,7 +113,7 @@ define <2 x i32> @addhn2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @addhn2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -161,7 +161,7 @@ define <16 x i8> @addhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @addhn2_16b( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -199,7 +199,7 @@ define <8 x i16> @addhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @addhn2_8h( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -237,7 +237,7 @@ define <4 x i32> @addhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @addhn2_4s( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -280,7 +280,7 @@ define <8 x i8> @raddhn8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @raddhn8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -328,7 +328,7 @@ define <4 x i16> @raddhn4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @raddhn4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -376,7 +376,7 @@ define <2 x i32> @raddhn2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @raddhn2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -424,7 +424,7 @@ define <16 x i8> @raddhn2_16b(<8 x i16> %a, <8 x i16> %b) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @raddhn2_16b( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -462,7 +462,7 @@ define <8 x i16> @raddhn2_8h(<4 x i32> %a, <4 x i32> %b) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @raddhn2_8h( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -500,7 +500,7 @@ define <4 x i32> @raddhn2_4s(<2 x i64> %a, <2 x i64> %b) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @raddhn2_4s( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -542,7 +542,7 @@ define <8 x i16> @saddl8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @saddl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -587,7 +587,7 @@ define <4 x i32> @saddl4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @saddl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -632,7 +632,7 @@ define <2 x i64> @saddl2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @saddl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -677,9 +677,9 @@ define <8 x i16> @saddl2_8h(<16 x i8> %a, <16 x i8> %b, <2 x i64> %param1, <2 x ; CHECK-LABEL: define <8 x i16> @saddl2_8h( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64> @@ -718,9 +718,9 @@ define <4 x i32> @saddl2_4s(<8 x i16> %a, <8 x i16> %b, <2 x i64> %param1, <2 x ; CHECK-LABEL: define <4 x i32> @saddl2_4s( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64> @@ -759,9 +759,9 @@ define <2 x i64> @saddl2_2d(<4 x i32> %a, <4 x i32> %b, <2 x i64> %param1, <2 x ; CHECK-LABEL: define <2 x i64> @saddl2_2d( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64> @@ -800,7 +800,7 @@ define <8 x i16> @uaddl8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @uaddl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -845,7 +845,7 @@ define <4 x i32> @uaddl4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @uaddl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -890,7 +890,7 @@ define <2 x i64> @uaddl2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @uaddl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -936,9 +936,9 @@ define <8 x i16> @uaddl2_8h(<16 x i8> %a, <16 x i8> %b, <2 x i64> %param1, <2 x ; CHECK-LABEL: define <8 x i16> @uaddl2_8h( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64> @@ -977,9 +977,9 @@ define <4 x i32> @uaddl2_4s(<8 x i16> %a, <8 x i16> %b, <2 x i64> %param1, <2 x ; CHECK-LABEL: define <4 x i32> @uaddl2_4s( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64> @@ -1018,9 +1018,9 @@ define <2 x i64> @uaddl2_2d(<4 x i32> %a, <4 x i32> %b, <2 x i64> %param1, <2 x ; CHECK-LABEL: define <2 x i64> @uaddl2_2d( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[PARAM1:%.*]], <2 x i64> [[PARAM2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64> @@ -1059,7 +1059,7 @@ define <8 x i16> @uaddw8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @uaddw8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -1101,7 +1101,7 @@ define <4 x i32> @uaddw4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @uaddw4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -1143,7 +1143,7 @@ define <2 x i64> @uaddw2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @uaddw2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -1185,8 +1185,8 @@ define <8 x i16> @uaddw2_8h(ptr %A, ptr %B, <16 x i8> %param1) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @uaddw2_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <16 x i8> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] @@ -1233,8 +1233,8 @@ define <4 x i32> @uaddw2_4s(ptr %A, ptr %B, <8 x i16> %param1) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @uaddw2_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <8 x i16> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] @@ -1281,8 +1281,8 @@ define <2 x i64> @uaddw2_2d(ptr %A, ptr %B, <4 x i32> %param1) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @uaddw2_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <4 x i32> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] @@ -1329,7 +1329,7 @@ define <8 x i16> @saddw8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @saddw8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -1371,7 +1371,7 @@ define <4 x i32> @saddw4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @saddw4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -1413,7 +1413,7 @@ define <2 x i64> @saddw2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @saddw2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -1455,8 +1455,8 @@ define <8 x i16> @saddw2_8h(ptr %A, ptr %B, <16 x i8> %param1) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @saddw2_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <16 x i8> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] @@ -1503,8 +1503,8 @@ define <4 x i32> @saddw2_4s(ptr %A, ptr %B, <8 x i16> %param1) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @saddw2_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <8 x i16> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] @@ -1551,8 +1551,8 @@ define <2 x i64> @saddw2_2d(ptr %A, ptr %B, <4 x i32> %param1) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @saddw2_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], <4 x i32> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] @@ -1963,7 +1963,7 @@ define <4 x i16> @sadalp4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @sadalp4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2008,7 +2008,7 @@ define <2 x i32> @sadalp2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @sadalp2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2053,7 +2053,7 @@ define <8 x i16> @sadalp8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @sadalp8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2098,7 +2098,7 @@ define <4 x i32> @sadalp4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @sadalp4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2143,7 +2143,7 @@ define <2 x i64> @sadalp2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @sadalp2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2188,7 +2188,7 @@ define <4 x i16> @uadalp4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @uadalp4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2233,7 +2233,7 @@ define <2 x i32> @uadalp2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @uadalp2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2278,7 +2278,7 @@ define <8 x i16> @uadalp8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @uadalp8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2323,7 +2323,7 @@ define <4 x i32> @uadalp4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @uadalp4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2368,7 +2368,7 @@ define <2 x i64> @uadalp2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @uadalp2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP16:%.*]], !prof [[PROF1]] @@ -2413,7 +2413,7 @@ define <8 x i8> @addp_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @addp_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2454,7 +2454,7 @@ define <16 x i8> @addp_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @addp_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2495,7 +2495,7 @@ define <4 x i16> @addp_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @addp_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2536,7 +2536,7 @@ define <8 x i16> @addp_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @addp_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2577,7 +2577,7 @@ define <2 x i32> @addp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @addp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2618,7 +2618,7 @@ define <4 x i32> @addp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @addp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2659,7 +2659,7 @@ define <2 x i64> @addp_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i64> @addp_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2708,7 +2708,7 @@ define <2 x float> @faddp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @faddp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2749,7 +2749,7 @@ define <4 x float> @faddp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @faddp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2790,7 +2790,7 @@ define <2 x double> @faddp_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @faddp_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2834,10 +2834,10 @@ declare <2 x double> @llvm.aarch64.neon.faddp.v2f64(<2 x double>, <2 x double>) define <2 x i64> @uaddl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @uaddl_duprhs( ; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2:[0-9]+]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0 @@ -2869,10 +2869,10 @@ define <2 x i64> @uaddl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @uaddl2_duprhs( ; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0 @@ -2904,10 +2904,10 @@ define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 define <2 x i64> @saddl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @saddl_duplhs( ; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0 @@ -2939,10 +2939,10 @@ define <2 x i64> @saddl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @saddl2_duplhs( ; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0 @@ -2974,10 +2974,10 @@ define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 define <2 x i64> @usubl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @usubl_duprhs( ; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0 @@ -3009,10 +3009,10 @@ define <2 x i64> @usubl_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @usubl2_duprhs( ; CHECK-SAME: <4 x i32> [[LHS:%.*]], i32 [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[RHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[RHS]], i32 0 @@ -3044,10 +3044,10 @@ define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs, <2 x i32> %param1, <4 define <2 x i64> @ssubl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @ssubl_duplhs( ; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0 @@ -3079,10 +3079,10 @@ define <2 x i64> @ssubl_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs, <2 x i32> %param1, <4 x i32> %param2) #0 { ; CHECK-LABEL: define <2 x i64> @ssubl2_duplhs( ; CHECK-SAME: i32 [[LHS:%.*]], <4 x i32> [[RHS:%.*]], <2 x i32> [[PARAM1:%.*]], <4 x i32> [[PARAM2:%.*]]) #[[ATTR2]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[LHSVEC_TMP:%.*]] = insertelement <2 x i32> [[PARAM1]], i32 [[LHS]], i32 0 @@ -3115,7 +3115,7 @@ define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @addhn8b_natural( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3161,7 +3161,7 @@ define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @addhn4h_natural( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3207,7 +3207,7 @@ define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @addhn2s_natural( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3252,8 +3252,8 @@ define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind #0 { define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @addhn2_16b_natural( ; CHECK-SAME: <8 x i8> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3302,8 +3302,8 @@ define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0 define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @addhn2_8h_natural( ; CHECK-SAME: <4 x i16> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3352,8 +3352,8 @@ define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0 define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @addhn2_4s_natural( ; CHECK-SAME: <2 x i32> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3403,9 +3403,9 @@ define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @addhn_addhn2_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], ptr [[D:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -3488,7 +3488,7 @@ define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @subhn8b_natural( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3534,7 +3534,7 @@ define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @subhn4h_natural( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3580,7 +3580,7 @@ define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @subhn2s_natural( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3625,8 +3625,8 @@ define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind #0 { define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @subhn2_16b_natural( ; CHECK-SAME: <8 x i8> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3675,8 +3675,8 @@ define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind #0 define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @subhn2_8h_natural( ; CHECK-SAME: <4 x i16> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3725,8 +3725,8 @@ define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind #0 define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @subhn2_4s_natural( ; CHECK-SAME: <2 x i32> [[LOW:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll index 3a2ecfefd209e..4ee7e4f20f0b3 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vaddv.ll @@ -30,7 +30,7 @@ define <8 x i8> @test_vaddv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-LABEL: define <8 x i8> @test_vaddv_s8_used_by_laneop( ; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]]) @@ -74,7 +74,7 @@ define <4 x i16> @test_vaddv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 ; CHECK-LABEL: define <4 x i16> @test_vaddv_s16_used_by_laneop( ; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]]) @@ -115,7 +115,7 @@ define <2 x i32> @test_vaddv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 ; CHECK-LABEL: define <2 x i32> @test_vaddv_s32_used_by_laneop( ; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]]) @@ -151,7 +151,7 @@ define <2 x i64> @test_vaddv_s64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) #0 ; CHECK-LABEL: define <2 x i64> @test_vaddv_s64_used_by_laneop( ; CHECK-SAME: <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP0]]) @@ -191,7 +191,7 @@ define <8 x i8> @test_vaddv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) #0 { ; CHECK-LABEL: define <8 x i8> @test_vaddv_u8_used_by_laneop( ; CHECK-SAME: <8 x i8> [[A1:%.*]], <8 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> [[TMP0]]) @@ -259,7 +259,7 @@ define <4 x i16> @test_vaddv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) #0 ; CHECK-LABEL: define <4 x i16> @test_vaddv_u16_used_by_laneop( ; CHECK-SAME: <4 x i16> [[A1:%.*]], <4 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP0]]) @@ -324,7 +324,7 @@ define <2 x i32> @test_vaddv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) #0 ; CHECK-LABEL: define <2 x i32> @test_vaddv_u32_used_by_laneop( ; CHECK-SAME: <2 x i32> [[A1:%.*]], <2 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]]) @@ -408,7 +408,7 @@ define <2 x i64> @test_vaddv_u64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) #0 ; CHECK-LABEL: define <2 x i64> @test_vaddv_u64_used_by_laneop( ; CHECK-SAME: <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP0]]) @@ -429,7 +429,7 @@ define <1 x i64> @test_vaddv_u64_to_vec(<2 x i64> %a1, <1 x i64> %param1) #0 { ; CHECK-SAME: <2 x i64> [[A1:%.*]], <1 x i64> [[PARAM1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP0]]) ; CHECK-NEXT: [[VADDV_I:%.*]] = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A1]]) @@ -468,7 +468,7 @@ define <16 x i8> @test_vaddvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 ; CHECK-LABEL: define <16 x i8> @test_vaddvq_s8_used_by_laneop( ; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]]) @@ -512,7 +512,7 @@ define <8 x i16> @test_vaddvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) # ; CHECK-LABEL: define <8 x i16> @test_vaddvq_s16_used_by_laneop( ; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]]) @@ -552,7 +552,7 @@ define <4 x i32> @test_vaddvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) # ; CHECK-LABEL: define <4 x i32> @test_vaddvq_s32_used_by_laneop( ; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) @@ -592,7 +592,7 @@ define <16 x i8> @test_vaddvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) #0 ; CHECK-LABEL: define <16 x i8> @test_vaddvq_u8_used_by_laneop( ; CHECK-SAME: <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> [[TMP0]]) @@ -636,7 +636,7 @@ define <8 x i16> @test_vaddvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) # ; CHECK-LABEL: define <8 x i16> @test_vaddvq_u16_used_by_laneop( ; CHECK-SAME: <8 x i16> [[A1:%.*]], <8 x i16> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP0]]) @@ -676,7 +676,7 @@ define <4 x i32> @test_vaddvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) # ; CHECK-LABEL: define <4 x i32> @test_vaddvq_u32_used_by_laneop( ; CHECK-SAME: <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll index 93a75df4b76cc..03f6113de0762 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vcvt.ll @@ -1083,7 +1083,7 @@ define <2 x float> @fcvtxn_2s(<2 x double> %A) nounwind #0 { define <4 x float> @fcvtxn_4s(<2 x float> %ret, <2 x double> %A) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fcvtxn_4s( ; CHECK-SAME: <2 x float> [[RET:%.*]], <2 x double> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP1]], zeroinitializer @@ -1358,7 +1358,7 @@ define void @autogen_SD28458(<8 x double> %val.f64, ptr %addr.f32) #0 { ; CHECK-LABEL: define void @autogen_SD28458( ; CHECK-SAME: <8 x double> [[VAL_F64:%.*]], ptr [[ADDR_F32:%.*]]) #[[ATTR3:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TR53:%.*]] = fptrunc <8 x double> [[VAL_F64]] to <8 x float> @@ -1383,7 +1383,7 @@ define void @autogen_SD28458(<8 x double> %val.f64, ptr %addr.f32) #0 { define void @autogen_SD19225(ptr %addr.f64, ptr %addr.f32) #0 { ; CHECK-LABEL: define void @autogen_SD19225( ; CHECK-SAME: ptr [[ADDR_F64:%.*]], ptr [[ADDR_F32:%.*]]) #[[ATTR3]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll index e2457c0a51d46..d6d88956a4f68 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmax.ll @@ -29,7 +29,7 @@ define <8 x i8> @smax_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @smax_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]] @@ -68,7 +68,7 @@ define <16 x i8> @smax_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @smax_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -107,7 +107,7 @@ define <4 x i16> @smax_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @smax_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -146,7 +146,7 @@ define <8 x i16> @smax_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @smax_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -185,7 +185,7 @@ define <2 x i32> @smax_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @smax_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -224,7 +224,7 @@ define <4 x i32> @smax_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @smax_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -270,7 +270,7 @@ define <8 x i8> @umax_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @umax_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -309,7 +309,7 @@ define <16 x i8> @umax_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @umax_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -348,7 +348,7 @@ define <4 x i16> @umax_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @umax_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -387,7 +387,7 @@ define <8 x i16> @umax_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @umax_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -426,7 +426,7 @@ define <2 x i32> @umax_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @umax_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -465,7 +465,7 @@ define <4 x i32> @umax_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @umax_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -511,7 +511,7 @@ define <8 x i8> @smin_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @smin_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -550,7 +550,7 @@ define <16 x i8> @smin_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @smin_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -589,7 +589,7 @@ define <4 x i16> @smin_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @smin_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -628,7 +628,7 @@ define <8 x i16> @smin_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @smin_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -667,7 +667,7 @@ define <2 x i32> @smin_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @smin_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -706,7 +706,7 @@ define <4 x i32> @smin_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @smin_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -752,7 +752,7 @@ define <8 x i8> @umin_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @umin_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -791,7 +791,7 @@ define <16 x i8> @umin_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @umin_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -830,7 +830,7 @@ define <4 x i16> @umin_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @umin_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -869,7 +869,7 @@ define <8 x i16> @umin_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @umin_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -908,7 +908,7 @@ define <2 x i32> @umin_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @umin_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -947,7 +947,7 @@ define <4 x i32> @umin_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @umin_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -994,7 +994,7 @@ define <8 x i8> @smaxp_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @smaxp_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1035,7 +1035,7 @@ define <16 x i8> @smaxp_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @smaxp_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1076,7 +1076,7 @@ define <4 x i16> @smaxp_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @smaxp_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1117,7 +1117,7 @@ define <8 x i16> @smaxp_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @smaxp_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1158,7 +1158,7 @@ define <2 x i32> @smaxp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @smaxp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1199,7 +1199,7 @@ define <4 x i32> @smaxp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @smaxp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1247,7 +1247,7 @@ define <8 x i8> @umaxp_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @umaxp_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1288,7 +1288,7 @@ define <16 x i8> @umaxp_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @umaxp_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1329,7 +1329,7 @@ define <4 x i16> @umaxp_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @umaxp_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1370,7 +1370,7 @@ define <8 x i16> @umaxp_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @umaxp_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1411,7 +1411,7 @@ define <2 x i32> @umaxp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @umaxp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1452,7 +1452,7 @@ define <4 x i32> @umaxp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @umaxp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1501,7 +1501,7 @@ define <8 x i8> @sminp_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @sminp_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1542,7 +1542,7 @@ define <16 x i8> @sminp_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @sminp_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1583,7 +1583,7 @@ define <4 x i16> @sminp_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @sminp_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1624,7 +1624,7 @@ define <8 x i16> @sminp_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @sminp_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1665,7 +1665,7 @@ define <2 x i32> @sminp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @sminp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1706,7 +1706,7 @@ define <4 x i32> @sminp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @sminp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1754,7 +1754,7 @@ define <8 x i8> @uminp_8b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i8> @uminp_8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1795,7 +1795,7 @@ define <16 x i8> @uminp_16b(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @uminp_16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1836,7 +1836,7 @@ define <4 x i16> @uminp_4h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i16> @uminp_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1877,7 +1877,7 @@ define <8 x i16> @uminp_8h(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @uminp_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1918,7 +1918,7 @@ define <2 x i32> @uminp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x i32> @uminp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1959,7 +1959,7 @@ define <4 x i32> @uminp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @uminp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2007,7 +2007,7 @@ define <2 x float> @fmax_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @fmax_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2046,7 +2046,7 @@ define <4 x float> @fmax_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fmax_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2085,7 +2085,7 @@ define <2 x double> @fmax_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @fmax_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2128,7 +2128,7 @@ define <2 x float> @fmaxp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @fmaxp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2169,7 +2169,7 @@ define <4 x float> @fmaxp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fmaxp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2210,7 +2210,7 @@ define <2 x double> @fmaxp_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @fmaxp_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2255,7 +2255,7 @@ define <2 x float> @fmin_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @fmin_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2294,7 +2294,7 @@ define <4 x float> @fmin_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fmin_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2333,7 +2333,7 @@ define <2 x double> @fmin_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @fmin_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2376,7 +2376,7 @@ define <2 x float> @fminp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @fminp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2417,7 +2417,7 @@ define <4 x float> @fminp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fminp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2458,7 +2458,7 @@ define <2 x double> @fminp_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @fminp_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2503,7 +2503,7 @@ define <2 x float> @fminnmp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @fminnmp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2544,7 +2544,7 @@ define <4 x float> @fminnmp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fminnmp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2585,7 +2585,7 @@ define <2 x double> @fminnmp_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @fminnmp_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2630,7 +2630,7 @@ define <2 x float> @fmaxnmp_2s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x float> @fmaxnmp_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2671,7 +2671,7 @@ define <4 x float> @fmaxnmp_4s(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <4 x float> @fmaxnmp_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2712,7 +2712,7 @@ define <2 x double> @fmaxnmp_2d(ptr %A, ptr %B) nounwind #0 { ; CHECK-LABEL: define <2 x double> @fmaxnmp_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -2754,3 +2754,6 @@ declare <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float>, <4 x float>) n declare <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double>, <2 x double>) nounwind readnone attributes #0 = { sanitize_memory } +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll index 8e9110fa836c7..ced0138ab747c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmovn.ll @@ -51,7 +51,7 @@ define <2 x i32> @xtn2s(<2 x i64> %A) nounwind #0 { define <16 x i8> @xtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @xtn2_16b( ; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i16> [[TMP1]] to <8 x i8> @@ -69,7 +69,7 @@ define <16 x i8> @xtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { define <8 x i16> @xtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @xtn2_8h( ; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <4 x i32> [[TMP1]] to <4 x i16> @@ -87,7 +87,7 @@ define <8 x i16> @xtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { define <4 x i32> @xtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @xtn2_4s( ; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i32> @@ -150,7 +150,7 @@ define <2 x i32> @sqxtn2s(<2 x i64> %A) nounwind #0 { define <16 x i8> @sqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @sqxtn2_16b( ; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i16> [[TMP1]], zeroinitializer @@ -169,7 +169,7 @@ define <16 x i8> @sqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { define <8 x i16> @sqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @sqxtn2_8h( ; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer @@ -188,7 +188,7 @@ define <8 x i16> @sqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { define <4 x i32> @sqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @sqxtn2_4s( ; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer @@ -256,7 +256,7 @@ define <2 x i32> @uqxtn2s(<2 x i64> %A) nounwind #0 { define <16 x i8> @uqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @uqxtn2_16b( ; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i16> [[TMP1]], zeroinitializer @@ -275,7 +275,7 @@ define <16 x i8> @uqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { define <8 x i16> @uqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @uqxtn2_8h( ; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer @@ -294,7 +294,7 @@ define <8 x i16> @uqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { define <4 x i32> @uqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @uqxtn2_4s( ; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer @@ -362,7 +362,7 @@ define <2 x i32> @sqxtun2s(<2 x i64> %A) nounwind #0 { define <16 x i8> @sqxtun2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @sqxtun2_16b( ; CHECK-SAME: <8 x i8> [[RET:%.*]], <8 x i16> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i16> [[TMP1]], zeroinitializer @@ -381,7 +381,7 @@ define <16 x i8> @sqxtun2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind #0 { define <8 x i16> @sqxtun2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { ; CHECK-LABEL: define <8 x i16> @sqxtun2_8h( ; CHECK-SAME: <4 x i16> [[RET:%.*]], <4 x i32> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer @@ -400,7 +400,7 @@ define <8 x i16> @sqxtun2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind #0 { define <4 x i32> @sqxtun2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @sqxtun2_4s( ; CHECK-SAME: <2 x i32> [[RET:%.*]], <2 x i64> [[A:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll index 38d6669671509..e9bb743b189fe 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vmul.ll @@ -13,7 +13,7 @@ define <8 x i16> @smull8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @smull8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]] @@ -54,7 +54,7 @@ define <4 x i32> @smull4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @smull4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -95,7 +95,7 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @smull2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -140,7 +140,7 @@ define <8 x i16> @umull8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @umull8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -181,7 +181,7 @@ define <4 x i32> @umull4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @umull4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -222,7 +222,7 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @umull2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -267,7 +267,7 @@ define <4 x i32> @sqdmull4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmull4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -315,7 +315,7 @@ define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmull2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -363,7 +363,7 @@ define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmull2_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -417,7 +417,7 @@ define <2 x i64> @sqdmull2_2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmull2_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -475,7 +475,7 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @pmull8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -518,7 +518,7 @@ define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sqdmulh_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -557,7 +557,7 @@ define <8 x i16> @sqdmulh_8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqdmulh_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -596,7 +596,7 @@ define <2 x i32> @sqdmulh_2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sqdmulh_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -635,7 +635,7 @@ define <4 x i32> @sqdmulh_4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmulh_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -674,7 +674,7 @@ define i32 @sqdmulh_1s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqdmulh_1s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -719,7 +719,7 @@ define <4 x i16> @sqrdmulh_4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sqrdmulh_4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -758,7 +758,7 @@ define <8 x i16> @sqrdmulh_8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqrdmulh_8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -797,7 +797,7 @@ define <2 x i32> @sqrdmulh_2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sqrdmulh_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -836,7 +836,7 @@ define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqrdmulh_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -875,7 +875,7 @@ define i32 @sqrdmulh_1s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqrdmulh_1s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -920,7 +920,7 @@ define <2 x float> @fmulx_2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x float> @fmulx_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -960,7 +960,7 @@ define <4 x float> @fmulx_4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x float> @fmulx_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1000,7 +1000,7 @@ define <2 x double> @fmulx_2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x double> @fmulx_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] @@ -1044,8 +1044,8 @@ define <4 x i32> @smlal4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @smlal4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1101,8 +1101,8 @@ define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @smlal2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1208,8 +1208,8 @@ define <4 x i32> @smlsl4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @smlsl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1265,8 +1265,8 @@ define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @smlsl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1377,8 +1377,8 @@ define <4 x i32> @sqdmlal4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmlal4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1441,8 +1441,8 @@ define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmlal2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1505,8 +1505,8 @@ define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmlal2_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1575,8 +1575,8 @@ define <2 x i64> @sqdmlal2_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmlal2_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1645,8 +1645,8 @@ define <4 x i32> @sqdmlsl4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmlsl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1709,8 +1709,8 @@ define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmlsl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1773,8 +1773,8 @@ define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmlsl2_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1843,8 +1843,8 @@ define <2 x i64> @sqdmlsl2_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmlsl2_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1913,8 +1913,8 @@ define <4 x i32> @umlal4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @umlal4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1970,8 +1970,8 @@ define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @umlal2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2077,8 +2077,8 @@ define <4 x i32> @umlsl4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @umlsl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2134,8 +2134,8 @@ define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @umlsl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2241,8 +2241,8 @@ define <2 x float> @fmla_2s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x float> @fmla_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2294,8 +2294,8 @@ define <4 x float> @fmla_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x float> @fmla_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2347,8 +2347,8 @@ define <2 x double> @fmla_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x double> @fmla_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2404,8 +2404,8 @@ define <2 x float> @fmls_2s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x float> @fmls_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2460,8 +2460,8 @@ define <4 x float> @fmls_4s(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x float> @fmls_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2516,8 +2516,8 @@ define <2 x double> @fmls_2d(ptr %A, ptr %B, ptr %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x double> @fmls_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2572,8 +2572,8 @@ define <2 x float> @fmls_commuted_neg_2s(ptr %A, ptr %B, ptr %C) nounwind saniti ; CHECK-LABEL: define <2 x float> @fmls_commuted_neg_2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2628,8 +2628,8 @@ define <4 x float> @fmls_commuted_neg_4s(ptr %A, ptr %B, ptr %C) nounwind saniti ; CHECK-LABEL: define <4 x float> @fmls_commuted_neg_4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2684,8 +2684,8 @@ define <2 x double> @fmls_commuted_neg_2d(ptr %A, ptr %B, ptr %C) nounwind sanit ; CHECK-LABEL: define <2 x double> @fmls_commuted_neg_2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -2969,7 +2969,7 @@ declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @mul_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> @@ -2987,7 +2987,7 @@ define <4 x i16> @mul_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { define <8 x i16> @mul_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @mul_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> @@ -3005,7 +3005,7 @@ define <8 x i16> @mul_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory { define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @mul_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3023,7 +3023,7 @@ define <2 x i32> @mul_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { define <4 x i32> @mul_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @mul_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> @@ -3042,7 +3042,7 @@ define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @mul_2d( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i64> [[A]], [[B]] @@ -3056,7 +3056,7 @@ define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind sanitize_memory { define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x float> @fmul_lane_2s( ; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3074,7 +3074,7 @@ define <2 x float> @fmul_lane_2s(<2 x float> %A, <2 x float> %B) nounwind saniti define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x float> @fmul_lane_4s( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> @@ -3092,7 +3092,7 @@ define <4 x float> @fmul_lane_4s(<4 x float> %A, <4 x float> %B) nounwind saniti define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x double> @fmul_lane_2d( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <2 x i32> @@ -3110,7 +3110,7 @@ define <2 x double> @fmul_lane_2d(<2 x double> %A, <2 x double> %B) nounwind san define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind sanitize_memory { ; CHECK-LABEL: define float @fmul_lane_s( ; CHECK-SAME: float [[A:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3 @@ -3128,7 +3128,7 @@ define float @fmul_lane_s(float %A, <4 x float> %vec) nounwind sanitize_memory { define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind sanitize_memory { ; CHECK-LABEL: define double @fmul_lane_d( ; CHECK-SAME: double [[A:%.*]], <2 x double> [[VEC:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 @@ -3148,7 +3148,7 @@ define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind sanitize_memor define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x float> @fmulx_lane_2s( ; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3167,7 +3167,7 @@ define <2 x float> @fmulx_lane_2s(<2 x float> %A, <2 x float> %B) nounwind sanit define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x float> @fmulx_lane_4s( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> @@ -3186,7 +3186,7 @@ define <4 x float> @fmulx_lane_4s(<4 x float> %A, <4 x float> %B) nounwind sanit define <2 x double> @fmulx_lane_2d(<2 x double> %A, <2 x double> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x double> @fmulx_lane_2d( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <2 x i32> @@ -3205,7 +3205,7 @@ define <2 x double> @fmulx_lane_2d(<2 x double> %A, <2 x double> %B) nounwind sa define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sqdmulh_lane_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> @@ -3223,7 +3223,7 @@ define <4 x i16> @sqdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_ define <8 x i16> @sqdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqdmulh_lane_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> @@ -3241,7 +3241,7 @@ define <8 x i16> @sqdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sqdmulh_lane_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3259,7 +3259,7 @@ define <2 x i32> @sqdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_ define <4 x i32> @sqdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmulh_lane_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> @@ -3277,7 +3277,7 @@ define <4 x i32> @sqdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_ define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqdmulh_lane_1s( ; CHECK-SAME: i32 [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1 @@ -3295,7 +3295,7 @@ define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory { define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sqrdmulh_lane_4h( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> @@ -3313,7 +3313,7 @@ define <4 x i16> @sqrdmulh_lane_4h(<4 x i16> %A, <4 x i16> %B) nounwind sanitize define <8 x i16> @sqrdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqrdmulh_lane_8h( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> @@ -3331,7 +3331,7 @@ define <8 x i16> @sqrdmulh_lane_8h(<8 x i16> %A, <8 x i16> %B) nounwind sanitize define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sqrdmulh_lane_2s( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3349,7 +3349,7 @@ define <2 x i32> @sqrdmulh_lane_2s(<2 x i32> %A, <2 x i32> %B) nounwind sanitize define <4 x i32> @sqrdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqrdmulh_lane_4s( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <4 x i32> @@ -3367,7 +3367,7 @@ define <4 x i32> @sqrdmulh_lane_4s(<4 x i32> %A, <4 x i32> %B) nounwind sanitize define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqrdmulh_lane_1s( ; CHECK-SAME: i32 [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 1 @@ -3385,7 +3385,7 @@ define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind sanitize_memory { define <4 x i32> @sqdmull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmull_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> @@ -3412,7 +3412,7 @@ define <4 x i32> @sqdmull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_ define <2 x i64> @sqdmull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmull_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3440,7 +3440,7 @@ define <4 x i32> @sqdmull2_lane_4s(<8 x i16> %A, <8 x i16> %B) nounwind sanitize ; CHECK-LABEL: define <4 x i32> @sqdmull2_lane_4s( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP5]], <8 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> undef, <4 x i32> @@ -3470,7 +3470,7 @@ define <2 x i64> @sqdmull2_lane_2d(<4 x i32> %A, <4 x i32> %B) nounwind sanitize ; CHECK-LABEL: define <2 x i64> @sqdmull2_lane_2d( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> @@ -3499,7 +3499,7 @@ define <2 x i64> @sqdmull2_lane_2d(<4 x i32> %A, <4 x i32> %B) nounwind sanitize define <4 x i32> @umull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @umull_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> @@ -3519,7 +3519,7 @@ define <4 x i32> @umull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_me define <2 x i64> @umull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @umull_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3539,7 +3539,7 @@ define <2 x i64> @umull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_me define <4 x i32> @smull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @smull_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> @@ -3559,7 +3559,7 @@ define <4 x i32> @smull_lane_4s(<4 x i16> %A, <4 x i16> %B) nounwind sanitize_me define <2 x i64> @smull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @smull_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> @@ -3579,9 +3579,9 @@ define <2 x i64> @smull_lane_2d(<2 x i32> %A, <2 x i32> %B) nounwind sanitize_me define <4 x i32> @smlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @smlal_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> @@ -3603,9 +3603,9 @@ define <4 x i32> @smlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi define <2 x i64> @smlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @smlal_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> @@ -3627,9 +3627,9 @@ define <2 x i64> @smlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi define <4 x i32> @sqdmlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmlal_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> @@ -3658,9 +3658,9 @@ define <4 x i32> @sqdmlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun define <2 x i64> @sqdmlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmlal_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> @@ -3690,8 +3690,8 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou ; CHECK-LABEL: define <4 x i32> @sqdmlal2_lane_4s( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> undef, <4 x i32> @@ -3724,8 +3724,8 @@ define <2 x i64> @sqdmlal2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou ; CHECK-LABEL: define <2 x i64> @sqdmlal2_lane_2d( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> @@ -3757,8 +3757,8 @@ define <2 x i64> @sqdmlal2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou define i32 @sqdmlal_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqdmlal_lane_1s( ; CHECK-SAME: i32 [[A:%.*]], i16 [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP1]], i32 0 @@ -3794,8 +3794,8 @@ declare i32 @llvm.aarch64.neon.sqadd.i32(i32, i32) define i32 @sqdmlsl_lane_1s(i32 %A, i16 %B, <4 x i16> %C) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqdmlsl_lane_1s( ; CHECK-SAME: i32 [[A:%.*]], i16 [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP1]], i32 0 @@ -3831,8 +3831,8 @@ declare i32 @llvm.aarch64.neon.sqsub.i32(i32, i32) define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqadd_lane1_sqdmull4s( ; CHECK-SAME: i32 [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to i64 @@ -3861,8 +3861,8 @@ define i32 @sqadd_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind s define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqsub_lane1_sqdmull4s( ; CHECK-SAME: i32 [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to i64 @@ -3891,8 +3891,8 @@ define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind s define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @sqdmlal_lane_1d( ; CHECK-SAME: i64 [[A:%.*]], i32 [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 @@ -3922,8 +3922,8 @@ declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64) define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @sqdmlsl_lane_1d( ; CHECK-SAME: i64 [[A:%.*]], i32 [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1 @@ -3953,9 +3953,9 @@ declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64) define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @umlal_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> @@ -3977,9 +3977,9 @@ define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi define <2 x i64> @umlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @umlal_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> @@ -4002,9 +4002,9 @@ define <2 x i64> @umlal_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi define <4 x i32> @smlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @smlsl_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> @@ -4026,9 +4026,9 @@ define <4 x i32> @smlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi define <2 x i64> @smlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @smlsl_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> @@ -4050,9 +4050,9 @@ define <2 x i64> @smlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwi define <4 x i32> @sqdmlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqdmlsl_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> @@ -4081,9 +4081,9 @@ define <4 x i32> @sqdmlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) noun define <2 x i64> @sqdmlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqdmlsl_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> @@ -4113,8 +4113,8 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16> %A, <8 x i16> %B, <4 x i32> %C) nou ; CHECK-LABEL: define <4 x i32> @sqdmlsl2_lane_4s( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> undef, <4 x i32> @@ -4147,8 +4147,8 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou ; CHECK-LABEL: define <2 x i64> @sqdmlsl2_lane_2d( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> @@ -4180,9 +4180,9 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32> %A, <4 x i32> %B, <2 x i64> %C) nou define <4 x i32> @umlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @umlsl_lane_4s( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> splat (i16 -1), <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[B]], <4 x i16> poison, <4 x i32> @@ -4204,9 +4204,9 @@ define <4 x i32> @umlsl_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwi define <2 x i64> @umlsl_lane_2d(<2 x i32> %A, <2 x i32> %B, <2 x i64> %C) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @umlsl_lane_2d( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i64> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> splat (i32 -1), <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i32> [[B]], <2 x i32> poison, <2 x i32> @@ -4230,7 +4230,7 @@ define float @fmulxs(float %a, float %b) nounwind sanitize_memory { ; CHECK-LABEL: define float @fmulxs( ; CHECK-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -4246,7 +4246,7 @@ define double @fmulxd(double %a, double %b) nounwind sanitize_memory { ; CHECK-LABEL: define double @fmulxd( ; CHECK-SAME: double [[A:%.*]], double [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -4261,7 +4261,7 @@ define double @fmulxd(double %a, double %b) nounwind sanitize_memory { define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind sanitize_memory { ; CHECK-LABEL: define float @fmulxs_lane( ; CHECK-SAME: float [[A:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3 @@ -4280,7 +4280,7 @@ define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind sanitize_memory { define double @fmulxd_lane(double %a, <2 x double> %vec) nounwind sanitize_memory { ; CHECK-LABEL: define double @fmulxd_lane( ; CHECK-SAME: double [[A:%.*]], <2 x double> [[VEC:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 @@ -4304,7 +4304,7 @@ define <8 x i16> @smull2_8h_simple(<16 x i8> %a, <16 x i8> %b) nounwind sanitize ; CHECK-LABEL: define <8 x i16> @smull2_8h_simple( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> splat (i8 -1), <8 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i8> [[A]], <16 x i8> undef, <8 x i32> @@ -4327,7 +4327,7 @@ define <8 x i16> @foo0(<16 x i8> %a, <16 x i8> %b) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @foo0( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64> @@ -4362,7 +4362,7 @@ define <4 x i32> @foo1(<8 x i16> %a, <8 x i16> %b) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @foo1( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP8]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64> @@ -4397,7 +4397,7 @@ define <2 x i64> @foo2(<4 x i32> %a, <4 x i32> %b) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @foo2( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP8]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64> @@ -4432,7 +4432,7 @@ define <8 x i16> @foo3(<16 x i8> %a, <16 x i8> %b) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @foo3( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x i8> [[TMP8]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <16 x i8> [[A]] to <2 x i64> @@ -4467,7 +4467,7 @@ define <4 x i32> @foo4(<8 x i16> %a, <8 x i16> %b) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @foo4( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i16> [[TMP8]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <8 x i16> [[A]] to <2 x i64> @@ -4502,7 +4502,7 @@ define <2 x i64> @foo5(<4 x i32> %a, <4 x i32> %b) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @foo5( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x i32> [[TMP8]] to <2 x i64> ; CHECK-NEXT: [[TMP:%.*]] = bitcast <4 x i32> [[A]] to <2 x i64> @@ -4713,8 +4713,8 @@ entry: define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @bar0( ; CHECK-SAME: <8 x i16> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP9]] to <2 x i64> @@ -4752,8 +4752,8 @@ define <8 x i16> @bar0(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind saniti define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @bar1( ; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP9]] to <2 x i64> @@ -4791,8 +4791,8 @@ define <4 x i32> @bar1(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind saniti define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @bar2( ; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP9]] to <2 x i64> @@ -4830,8 +4830,8 @@ define <2 x i64> @bar2(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind saniti define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @bar3( ; CHECK-SAME: <8 x i16> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP9]] to <2 x i64> @@ -4869,8 +4869,8 @@ define <8 x i16> @bar3(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) nounwind saniti define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @bar4( ; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP9]] to <2 x i64> @@ -4908,8 +4908,8 @@ define <4 x i32> @bar4(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) nounwind saniti define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @bar5( ; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP9]] to <2 x i64> @@ -4947,8 +4947,8 @@ define <2 x i64> @bar5(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) nounwind saniti define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @mlal2_1( ; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> splat (i16 -1), <8 x i32> @@ -4989,8 +4989,8 @@ define <4 x i32> @mlal2_1(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind san define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @mlal2_2( ; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> splat (i32 -1), <4 x i32> @@ -5031,8 +5031,8 @@ define <2 x i64> @mlal2_2(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind san define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @mlal2_4( ; CHECK-SAME: <4 x i32> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i16> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> splat (i16 -1), <8 x i32> @@ -5073,8 +5073,8 @@ define <4 x i32> @mlal2_4(<4 x i32> %a, <8 x i16> %b, <4 x i16> %c) nounwind san define <2 x i64> @mlal2_5(<2 x i64> %a, <4 x i32> %b, <2 x i32> %c) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @mlal2_5( ; CHECK-SAME: <2 x i64> [[A:%.*]], <4 x i32> [[B:%.*]], <2 x i32> [[C:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> splat (i32 -1), <4 x i32> zeroinitializer @@ -5960,7 +5960,7 @@ define <1 x double> @test_fmul_v1f64(<1 x double> %L, <1 x double> %R) nounwind ; CHECK-LABEL: define <1 x double> @test_fmul_v1f64( ; CHECK-SAME: <1 x double> [[L:%.*]], <1 x double> [[R:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[PROD:%.*]] = fmul <1 x double> [[L]], [[R]] @@ -5975,7 +5975,7 @@ define <1 x double> @test_fdiv_v1f64(<1 x double> %L, <1 x double> %R) nounwind ; CHECK-LABEL: define <1 x double> @test_fdiv_v1f64( ; CHECK-SAME: <1 x double> [[L:%.*]], <1 x double> [[R:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[PROD:%.*]] = fdiv <1 x double> [[L]], [[R]] @@ -5990,8 +5990,8 @@ define i32 @sqdmlal_s(i16 %A, i16 %B, i32 %C) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqdmlal_s( ; CHECK-SAME: i16 [[A:%.*]], i16 [[B:%.*]], i32 [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP6]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> undef, i16 [[A]], i64 0 @@ -6026,8 +6026,8 @@ define i64 @sqdmlal_d(i32 %A, i32 %B, i64 %C) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @sqdmlal_d( ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0 ; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0 @@ -6052,8 +6052,8 @@ define i32 @sqdmlsl_s(i16 %A, i16 %B, i32 %C) nounwind sanitize_memory { ; CHECK-LABEL: define i32 @sqdmlsl_s( ; CHECK-SAME: i16 [[A:%.*]], i16 [[B:%.*]], i32 [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i16> splat (i16 -1), i16 [[TMP6]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> undef, i16 [[A]], i64 0 @@ -6088,8 +6088,8 @@ define i64 @sqdmlsl_d(i32 %A, i32 %B, i64 %C) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @sqdmlsl_d( ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0 ; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[TMP2]], 0 @@ -6114,7 +6114,7 @@ define <16 x i8> @test_pmull_64(i64 %l, i64 %r) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @test_pmull_64( ; CHECK-SAME: i64 [[L:%.*]], i64 [[R:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -6132,7 +6132,7 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind saniti ; CHECK-LABEL: define <16 x i8> @test_pmull_high_64( ; CHECK-SAME: <2 x i64> [[L:%.*]], <2 x i64> [[R:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1 ; CHECK-NEXT: [[L_HI:%.*]] = extractelement <2 x i64> [[L]], i32 1 @@ -6158,7 +6158,7 @@ define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind saniti ; CHECK-LABEL: define <1 x i64> @test_mul_v1i64( ; CHECK-SAME: <1 x i64> [[LHS:%.*]], <1 x i64> [[RHS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[PROD:%.*]] = mul <1 x i64> [[LHS]], [[RHS]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll index 7fa9b412b0f03..42d2351a88cc2 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/arm64-vshift.ll @@ -11,7 +11,7 @@ define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @sqshl8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]] @@ -55,7 +55,7 @@ define <4 x i16> @sqshl4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sqshl4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -99,7 +99,7 @@ define <2 x i32> @sqshl2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sqshl2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -143,7 +143,7 @@ define <1 x i64> @sqshl1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @sqshl1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -214,7 +214,7 @@ define i64 @sqshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @sqshl_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -283,7 +283,7 @@ define <8 x i8> @uqshl8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @uqshl8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -327,7 +327,7 @@ define <4 x i16> @uqshl4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @uqshl4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -371,7 +371,7 @@ define <2 x i32> @uqshl2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @uqshl2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -415,7 +415,7 @@ define <16 x i8> @sqshl16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sqshl16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -460,7 +460,7 @@ define <8 x i16> @sqshl8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqshl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -505,7 +505,7 @@ define <4 x i32> @sqshl4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqshl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -550,7 +550,7 @@ define <2 x i64> @sqshl2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqshl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -595,7 +595,7 @@ define <16 x i8> @uqshl16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @uqshl16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -640,7 +640,7 @@ define <8 x i16> @uqshl8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @uqshl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -685,7 +685,7 @@ define <4 x i32> @uqshl4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @uqshl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -730,7 +730,7 @@ define <2 x i64> @uqshl2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @uqshl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -775,7 +775,7 @@ define <1 x i64> @uqshl1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @uqshl1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -846,7 +846,7 @@ define i64 @uqshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @uqshl_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -938,7 +938,7 @@ define <8 x i8> @srshl8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @srshl8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -982,7 +982,7 @@ define <4 x i16> @srshl4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @srshl4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1026,7 +1026,7 @@ define <2 x i32> @srshl2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @srshl2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1070,7 +1070,7 @@ define <1 x i64> @srshl1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @srshl1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1141,7 +1141,7 @@ define i64 @srshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @srshl_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1210,7 +1210,7 @@ define <8 x i8> @urshl8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @urshl8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1254,7 +1254,7 @@ define <4 x i16> @urshl4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @urshl4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1298,7 +1298,7 @@ define <2 x i32> @urshl2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @urshl2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1342,7 +1342,7 @@ define <1 x i64> @urshl1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @urshl1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1413,7 +1413,7 @@ define i64 @urshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @urshl_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1482,7 +1482,7 @@ define <16 x i8> @srshl16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @srshl16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1527,7 +1527,7 @@ define <8 x i16> @srshl8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @srshl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1572,7 +1572,7 @@ define <4 x i32> @srshl4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @srshl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1617,7 +1617,7 @@ define <2 x i64> @srshl2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @srshl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1662,7 +1662,7 @@ define <16 x i8> @urshl16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @urshl16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1707,7 +1707,7 @@ define <8 x i16> @urshl8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @urshl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1752,7 +1752,7 @@ define <4 x i32> @urshl4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @urshl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1797,7 +1797,7 @@ define <2 x i64> @urshl2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @urshl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1864,7 +1864,7 @@ define <8 x i8> @sqrshl8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @sqrshl8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1908,7 +1908,7 @@ define <4 x i16> @sqrshl4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sqrshl4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1952,7 +1952,7 @@ define <2 x i32> @sqrshl2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sqrshl2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1996,7 +1996,7 @@ define <8 x i8> @uqrshl8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @uqrshl8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2040,7 +2040,7 @@ define <4 x i16> @uqrshl4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @uqrshl4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2084,7 +2084,7 @@ define <2 x i32> @uqrshl2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @uqrshl2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2128,7 +2128,7 @@ define <16 x i8> @sqrshl16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sqrshl16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2173,7 +2173,7 @@ define <8 x i16> @sqrshl8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqrshl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2218,7 +2218,7 @@ define <4 x i32> @sqrshl4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqrshl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2263,7 +2263,7 @@ define <2 x i64> @sqrshl2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sqrshl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2308,7 +2308,7 @@ define <1 x i64> @sqrshl1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @sqrshl1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2379,7 +2379,7 @@ define i64 @sqrshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @sqrshl_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2448,7 +2448,7 @@ define <16 x i8> @uqrshl16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @uqrshl16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2493,7 +2493,7 @@ define <8 x i16> @uqrshl8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @uqrshl8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2538,7 +2538,7 @@ define <4 x i32> @uqrshl4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @uqrshl4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2583,7 +2583,7 @@ define <2 x i64> @uqrshl2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @uqrshl2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2628,7 +2628,7 @@ define <1 x i64> @uqrshl1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @uqrshl1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -2699,7 +2699,7 @@ define i64 @uqrshl_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @uqrshl_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3639,7 +3639,7 @@ define <16 x i8> @rshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @rshrn16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3682,7 +3682,7 @@ define <8 x i16> @rshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @rshrn8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3725,7 +3725,7 @@ define <4 x i32> @rshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @rshrn4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3862,7 +3862,7 @@ define <16 x i8> @shrn16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shrn16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3908,7 +3908,7 @@ define <8 x i16> @shrn8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @shrn8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -3954,7 +3954,7 @@ define <4 x i32> @shrn4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @shrn4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4101,7 +4101,7 @@ define <16 x i8> @sqshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sqshrn16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4144,7 +4144,7 @@ define <8 x i16> @sqshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqshrn8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4187,7 +4187,7 @@ define <4 x i32> @sqshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqshrn4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4331,7 +4331,7 @@ define <16 x i8> @sqshrun16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sqshrun16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4374,7 +4374,7 @@ define <8 x i16> @sqshrun8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqshrun8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4417,7 +4417,7 @@ define <4 x i32> @sqshrun4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqshrun4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4561,7 +4561,7 @@ define <16 x i8> @sqrshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sqrshrn16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4604,7 +4604,7 @@ define <8 x i16> @sqrshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqrshrn8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4647,7 +4647,7 @@ define <4 x i32> @sqrshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqrshrn4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4791,7 +4791,7 @@ define <16 x i8> @sqrshrun16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sqrshrun16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4834,7 +4834,7 @@ define <8 x i16> @sqrshrun8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sqrshrun8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -4877,7 +4877,7 @@ define <4 x i32> @sqrshrun4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sqrshrun4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -5021,7 +5021,7 @@ define <16 x i8> @uqrshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @uqrshrn16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -5064,7 +5064,7 @@ define <8 x i16> @uqrshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @uqrshrn8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -5107,7 +5107,7 @@ define <4 x i32> @uqrshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @uqrshrn4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -5251,7 +5251,7 @@ define <16 x i8> @uqshrn16b(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @uqshrn16b( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -5294,7 +5294,7 @@ define <8 x i16> @uqshrn8h(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @uqshrn8h( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -5337,7 +5337,7 @@ define <4 x i32> @uqshrn4s(ptr %ret, ptr %A) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @uqshrn4s( ; CHECK-SAME: ptr [[RET:%.*]], ptr [[A:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -6845,7 +6845,7 @@ define <8 x i8> @ursra8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @ursra8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -6888,7 +6888,7 @@ define <4 x i16> @ursra4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @ursra4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -6931,7 +6931,7 @@ define <2 x i32> @ursra2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @ursra2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -6974,7 +6974,7 @@ define <16 x i8> @ursra16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @ursra16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7017,7 +7017,7 @@ define <8 x i16> @ursra8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @ursra8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7060,7 +7060,7 @@ define <4 x i32> @ursra4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @ursra4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7103,7 +7103,7 @@ define <2 x i64> @ursra2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @ursra2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7146,7 +7146,7 @@ define <1 x i64> @ursra1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @ursra1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7189,7 +7189,7 @@ define i64 @ursra_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @ursra_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7232,7 +7232,7 @@ define <8 x i8> @srsra8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @srsra8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7275,7 +7275,7 @@ define <4 x i16> @srsra4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @srsra4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7318,7 +7318,7 @@ define <2 x i32> @srsra2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @srsra2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7361,7 +7361,7 @@ define <16 x i8> @srsra16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @srsra16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7404,7 +7404,7 @@ define <8 x i16> @srsra8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @srsra8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7447,7 +7447,7 @@ define <4 x i32> @srsra4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @srsra4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7490,7 +7490,7 @@ define <2 x i64> @srsra2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @srsra2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7533,7 +7533,7 @@ define <1 x i64> @srsra1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @srsra1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7576,7 +7576,7 @@ define i64 @srsra_scalar(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define i64 @srsra_scalar( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7619,7 +7619,7 @@ define <8 x i8> @usra8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @usra8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7662,7 +7662,7 @@ define <4 x i16> @usra4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @usra4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7705,7 +7705,7 @@ define <2 x i32> @usra2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @usra2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7748,7 +7748,7 @@ define <16 x i8> @usra16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @usra16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7791,7 +7791,7 @@ define <8 x i16> @usra8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @usra8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7834,7 +7834,7 @@ define <4 x i32> @usra4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @usra4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7877,7 +7877,7 @@ define <2 x i64> @usra2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @usra2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7920,7 +7920,7 @@ define <1 x i64> @usra1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @usra1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -7963,7 +7963,7 @@ define <8 x i8> @ssra8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @ssra8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8006,7 +8006,7 @@ define <4 x i16> @ssra4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @ssra4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8049,7 +8049,7 @@ define <2 x i32> @ssra2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @ssra2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8092,7 +8092,7 @@ define <16 x i8> @ssra16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @ssra16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8135,7 +8135,7 @@ define <8 x i16> @ssra8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @ssra8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8178,7 +8178,7 @@ define <4 x i32> @ssra4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @ssra4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8221,7 +8221,7 @@ define <2 x i64> @ssra2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @ssra2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8264,7 +8264,7 @@ define <8 x i8> @shr_orr8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @shr_orr8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8313,7 +8313,7 @@ define <4 x i16> @shr_orr4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @shr_orr4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8362,7 +8362,7 @@ define <2 x i32> @shr_orr2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @shr_orr2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8411,7 +8411,7 @@ define <16 x i8> @shr_orr16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shr_orr16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8460,7 +8460,7 @@ define <8 x i16> @shr_orr8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @shr_orr8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8509,7 +8509,7 @@ define <4 x i32> @shr_orr4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @shr_orr4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8558,7 +8558,7 @@ define <2 x i64> @shr_orr2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @shr_orr2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8607,7 +8607,7 @@ define <8 x i8> @shl_orr8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @shl_orr8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8656,7 +8656,7 @@ define <4 x i16> @shl_orr4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @shl_orr4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8705,7 +8705,7 @@ define <2 x i32> @shl_orr2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @shl_orr2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8754,7 +8754,7 @@ define <16 x i8> @shl_orr16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @shl_orr16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8803,7 +8803,7 @@ define <8 x i16> @shl_orr8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @shl_orr8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8852,7 +8852,7 @@ define <4 x i32> @shl_orr4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @shl_orr4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8901,7 +8901,7 @@ define <2 x i64> @shl_orr2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @shl_orr2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -8989,7 +8989,7 @@ define <8 x i8> @sli8b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i8> @sli8b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9037,7 +9037,7 @@ define <4 x i16> @sli4h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i16> @sli4h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9085,7 +9085,7 @@ define <2 x i32> @sli2s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i32> @sli2s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9133,7 +9133,7 @@ define <1 x i64> @sli1d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <1 x i64> @sli1d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9181,7 +9181,7 @@ define <16 x i8> @sli16b(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <16 x i8> @sli16b( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9229,7 +9229,7 @@ define <8 x i16> @sli8h(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <8 x i16> @sli8h( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9277,7 +9277,7 @@ define <4 x i32> @sli4s(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <4 x i32> @sli4s( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9325,7 +9325,7 @@ define <2 x i64> @sli2d(ptr %A, ptr %B) nounwind sanitize_memory { ; CHECK-LABEL: define <2 x i64> @sli2d( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -9383,7 +9383,7 @@ define <1 x i64> @ashr_v1i64(<1 x i64> %a, <1 x i64> %b) sanitize_memory { ; CHECK-LABEL: define <1 x i64> @ashr_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <1 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <1 x i1> [[TMP3]] to <1 x i64> @@ -9402,8 +9402,8 @@ define void @sqshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> @@ -9437,8 +9437,8 @@ define void @uqshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> @@ -9472,8 +9472,8 @@ define void @srshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> @@ -9507,8 +9507,8 @@ define void @urshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sanit ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> @@ -9542,8 +9542,8 @@ define void @sqshlu_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) sani ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> @@ -9577,8 +9577,8 @@ define void @sshl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) saniti ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> @@ -9612,8 +9612,8 @@ define void @ushl_zero_shift_amount(<2 x i64> %a, <2 x i64> %b, ptr %dst) saniti ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[DST:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP0]], <2 x i64> [[TMP1]], <2 x i32> diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll index 8fed5a78d6b79..ef200402fa15b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst_float.ll @@ -37,16 +37,16 @@ target triple = "aarch64--linux-android9001" define void @st1x2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x2_v1f64( ; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable @@ -61,16 +61,16 @@ define void @st1x2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memo define void @st1x2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x2_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -85,16 +85,16 @@ define void @st1x2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory { define void @st1x2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x2_v2f64( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -109,16 +109,16 @@ define void @st1x2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memo define void @st1x2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x2_v2i64( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -133,17 +133,17 @@ define void @st1x2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory { define void @st1x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x3_v1f64( ; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -158,17 +158,17 @@ define void @st1x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr define void @st1x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x3_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -183,17 +183,17 @@ define void @st1x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanit define void @st1x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x3_v2f64( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -208,17 +208,17 @@ define void @st1x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr define void @st1x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x3_v2i64( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -233,18 +233,18 @@ define void @st1x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanit define void @st1x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x4_v1f64( ; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], <1 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -259,18 +259,18 @@ define void @st1x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x define void @st1x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x4_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -285,18 +285,18 @@ define void @st1x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, define void @st1x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x4_v2f64( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], <2 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -311,18 +311,18 @@ define void @st1x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x define void @st1x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st1x4_v2i64( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -337,16 +337,16 @@ define void @st1x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, define void @st2_v16i8(<16 x i8> %A, <16 x i8> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v16i8( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -361,16 +361,16 @@ define void @st2_v16i8(<16 x i8> %A, <16 x i8> %B, ptr %p) sanitize_memory { define void @st2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v1f64( ; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -385,16 +385,16 @@ define void @st2_v1f64(<1 x double> %A, <1 x double> %B, ptr %p) sanitize_memory define void @st2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -409,16 +409,16 @@ define void @st2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %p) sanitize_memory { define void @st2_v2f32(<2 x float> %A, <2 x float> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v2f32( ; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -433,16 +433,16 @@ define void @st2_v2f32(<2 x float> %A, <2 x float> %B, ptr %p) sanitize_memory { define void @st2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v2f64( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -457,16 +457,16 @@ define void @st2_v2f64(<2 x double> %A, <2 x double> %B, ptr %p) sanitize_memory define void @st2_v2i32(<2 x i32> %A, <2 x i32> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v2i32( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -481,16 +481,16 @@ define void @st2_v2i32(<2 x i32> %A, <2 x i32> %B, ptr %p) sanitize_memory { define void @st2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v2i64( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -505,16 +505,16 @@ define void @st2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %p) sanitize_memory { define void @st2_v4f16(<4 x half> %A, <4 x half> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v4f16( ; CHECK-SAME: <4 x half> [[A:%.*]], <4 x half> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -529,16 +529,16 @@ define void @st2_v4f16(<4 x half> %A, <4 x half> %B, ptr %p) sanitize_memory { define void @st2_v4f32(<4 x float> %A, <4 x float> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v4f32( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -553,16 +553,16 @@ define void @st2_v4f32(<4 x float> %A, <4 x float> %B, ptr %p) sanitize_memory { define void @st2_v4i16(<4 x i16> %A, <4 x i16> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v4i16( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -577,16 +577,16 @@ define void @st2_v4i16(<4 x i16> %A, <4 x i16> %B, ptr %p) sanitize_memory { define void @st2_v4i32(<4 x i32> %A, <4 x i32> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v4i32( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -601,16 +601,16 @@ define void @st2_v4i32(<4 x i32> %A, <4 x i32> %B, ptr %p) sanitize_memory { define void @st2_v8f16(<8 x half> %A, <8 x half> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v8f16( ; CHECK-SAME: <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -625,16 +625,16 @@ define void @st2_v8f16(<8 x half> %A, <8 x half> %B, ptr %p) sanitize_memory { define void @st2_v8i16(<8 x i16> %A, <8 x i16> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v8i16( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -649,16 +649,16 @@ define void @st2_v8i16(<8 x i16> %A, <8 x i16> %B, ptr %p) sanitize_memory { define void @st2_v8i8(<8 x i8> %A, <8 x i8> %B, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st2_v8i8( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576 ; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], ptr [[TMP6]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -673,17 +673,17 @@ define void @st2_v8i8(<8 x i8> %A, <8 x i8> %B, ptr %p) sanitize_memory { define void @st3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v16i8( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -698,17 +698,17 @@ define void @st3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %p) sanitiz define void @st3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v1f64( ; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -723,17 +723,17 @@ define void @st3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %p define void @st3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -748,17 +748,17 @@ define void @st3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %p) sanitiz define void @st3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v2f32( ; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -773,17 +773,17 @@ define void @st3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %p) s define void @st3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v2f64( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -798,17 +798,17 @@ define void @st3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %p define void @st3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v2i32( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -823,17 +823,17 @@ define void @st3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %p) sanitiz define void @st3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v2i64( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -848,17 +848,17 @@ define void @st3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %p) sanitiz define void @st3_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v4f16( ; CHECK-SAME: <4 x half> [[A:%.*]], <4 x half> [[B:%.*]], <4 x half> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -873,17 +873,17 @@ define void @st3_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, ptr %p) sani define void @st3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v4f32( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -898,17 +898,17 @@ define void @st3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %p) s define void @st3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v4i16( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -923,17 +923,17 @@ define void @st3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %p) sanitiz define void @st3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v4i32( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -948,17 +948,17 @@ define void @st3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %p) sanitiz define void @st3_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v8f16( ; CHECK-SAME: <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -973,17 +973,17 @@ define void @st3_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, ptr %p) sani define void @st3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v8i16( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -998,17 +998,17 @@ define void @st3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %p) sanitiz define void @st3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st3_v8i8( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], ptr [[TMP7]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1023,18 +1023,18 @@ define void @st3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %p) sanitize_me define void @st4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v16i8( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP2]], <16 x i8> [[TMP3]], <16 x i8> [[TMP4]], <16 x i8> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1049,18 +1049,18 @@ define void @st4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, p define void @st4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v1f64( ; CHECK-SAME: <1 x double> [[A:%.*]], <1 x double> [[B:%.*]], <1 x double> [[C:%.*]], <1 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1075,18 +1075,18 @@ define void @st4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x d define void @st4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v1i64( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i64> [[TMP4]], <1 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1101,18 +1101,18 @@ define void @st4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, p define void @st4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v2f32( ; CHECK-SAME: <2 x float> [[A:%.*]], <2 x float> [[B:%.*]], <2 x float> [[C:%.*]], <2 x float> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1127,18 +1127,18 @@ define void @st4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x floa define void @st4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v2f64( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], <2 x double> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1153,18 +1153,18 @@ define void @st4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x d define void @st4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v2i32( ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1179,18 +1179,18 @@ define void @st4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, p define void @st4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v2i64( ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i64> [[TMP4]], <2 x i64> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1205,18 +1205,18 @@ define void @st4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, p define void @st4_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v4f16( ; CHECK-SAME: <4 x half> [[A:%.*]], <4 x half> [[B:%.*]], <4 x half> [[C:%.*]], <4 x half> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1231,18 +1231,18 @@ define void @st4_v4f16(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> % define void @st4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v4f32( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], <4 x float> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1257,18 +1257,18 @@ define void @st4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x floa define void @st4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v4i16( ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i16> [[TMP4]], <4 x i16> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1283,18 +1283,18 @@ define void @st4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, p define void @st4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v4i32( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> [[TMP4]], <4 x i32> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1309,18 +1309,18 @@ define void @st4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, p define void @st4_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, <8 x half> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v8f16( ; CHECK-SAME: <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]], <8 x half> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1335,18 +1335,18 @@ define void @st4_v8f16(<8 x half> %A, <8 x half> %B, <8 x half> %C, <8 x half> % define void @st4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v8i16( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i16> [[TMP4]], <8 x i16> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1361,18 +1361,18 @@ define void @st4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, p define void @st4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %p) sanitize_memory { ; CHECK-LABEL: define void @st4_v8i8( ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr ; CHECK-NEXT: call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP2]], <8 x i8> [[TMP3]], <8 x i8> [[TMP4]], <8 x i8> [[TMP5]], ptr [[TMP8]]) ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] ; CHECK: 9: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable @@ -1384,5 +1384,5 @@ define void @st4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %p ret void } ;. -; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575} +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} ;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll index f3cceb7c075b2..b8e54a700149c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll @@ -61,17 +61,17 @@ define i32 @bar() { ; array. General purpose registers are saved at positions from 0 to 64, Floating ; point and SIMD are saved from 64 to 192, and the remaining from 192. ; CHECK-LABEL: @bar -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 8 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 16 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 64 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 80 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 24 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 32 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 96 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 40 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 48 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 56 -; CHECK: store {{.*}} @__msan_va_arg_tls {{.*}} 192 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 8 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 16 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 64 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 80 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 24 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 32 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 96 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 40 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 48 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 56 +; CHECK: store {{.*}} @__msan_va_arg_tls, i64 192 ; CHECK: store {{.*}} 8, {{.*}} @__msan_va_arg_overflow_size_tls ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are @@ -97,6 +97,6 @@ entry: } ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) declare i64 @sum(i64 %n, ...) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll index 06a34ac469e8c..d246e969f2522 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll @@ -39,9 +39,9 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef %arg) sanitize_memo ; CHECK-NEXT: [[_MSPROP:%.*]] = zext i8 [[_MSLD]] to i32 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP7]] to i32 ; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef [[TMP7]], i32 noundef 1, i32 noundef [[CONV]]) ; CHECK-NEXT: ret void @@ -80,9 +80,9 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4 ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]]) ; CHECK-NEXT: ret void @@ -122,9 +122,9 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m ; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64 ; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP7]] to double ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]]) ; CHECK-NEXT: ret void @@ -163,9 +163,9 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_ ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]]) ; CHECK-NEXT: ret void @@ -203,9 +203,9 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(fp128 noundef %arg) sanitize_m ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i128, ptr [[TMP10]], align 16 ; CHECK-NEXT: store i128 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i128 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i128 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i128 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i128 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (fp128, i32, ...) @_Z5test2IeEvT_iz(fp128 noundef [[TMP7]], i32 noundef 1, fp128 noundef [[TMP7]]) ; CHECK-NEXT: ret void @@ -243,9 +243,9 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -302,9 +302,9 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_([2 x i64] %arg.coer ; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1 ; CHECK-NEXT: [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT2]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1 ; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([2 x i64], i32, ...) @_Z5test2I10Int64Int64EvT_iz([2 x i64] [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x i64] [[DOTFCA_1_INSERT3]]) ; CHECK-NEXT: ret void @@ -368,9 +368,9 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_([2 x double] alig ; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1 ; CHECK-NEXT: [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x double] [[DOTFCA_0_INSERT2]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], 1 ; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([2 x double], i32, ...) @_Z5test2I12DoubleDoubleEvT_iz([2 x double] alignstack(8) [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x double] alignstack(8) [[DOTFCA_1_INSERT3]]) ; CHECK-NEXT: ret void @@ -464,9 +464,9 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_([4 x double] alignstack ; CHECK-NEXT: [[TMP35:%.*]] = insertvalue [4 x i64] [[TMP34]], i64 [[_MSLD3]], 3 ; CHECK-NEXT: [[DOTFCA_3_INSERT7:%.*]] = insertvalue [4 x double] [[DOTFCA_2_INSERT6]], double [[AGG_TMP_SROA_4_0_COPYLOAD]], 3 ; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store [4 x i64] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([4 x double], i32, ...) @_Z5test2I7Double4EvT_iz([4 x double] alignstack(8) [[DOTFCA_3_INSERT7]], i32 noundef 1, [4 x double] alignstack(8) [[DOTFCA_3_INSERT7]]) ; CHECK-NEXT: ret void @@ -540,9 +540,9 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_([2 x i64] %arg.coe ; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1 ; CHECK-NEXT: [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT2]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1 ; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store [2 x i64] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([2 x i64], i32, ...) @_Z5test2I11DoubleFloatEvT_iz([2 x i64] [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x i64] [[DOTFCA_1_INSERT3]]) ; CHECK-NEXT: ret void @@ -606,9 +606,9 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_([2 x fp128] aligns ; CHECK-NEXT: [[TMP19:%.*]] = insertvalue [2 x i128] [[TMP18]], i128 [[_MSLD1]], 1 ; CHECK-NEXT: [[DOTFCA_1_INSERT5:%.*]] = insertvalue [2 x fp128] [[DOTFCA_0_INSERT4]], fp128 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1 ; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store [2 x i128] [[TMP19]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([2 x fp128], i32, ...) @_Z5test2I11LongDouble2EvT_iz([2 x fp128] alignstack(16) [[DOTFCA_1_INSERT5]], i32 noundef 1, [2 x fp128] alignstack(16) [[DOTFCA_1_INSERT5]]) ; CHECK-NEXT: ret void @@ -702,9 +702,9 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_([4 x fp128] aligns ; CHECK-NEXT: [[TMP35:%.*]] = insertvalue [4 x i128] [[TMP34]], i128 [[_MSLD3]], 3 ; CHECK-NEXT: [[DOTFCA_3_INSERT7:%.*]] = insertvalue [4 x fp128] [[DOTFCA_2_INSERT6]], fp128 [[AGG_TMP_SROA_4_0_COPYLOAD]], 3 ; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT7]], i32 noundef 1, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT7]]) ; CHECK-NEXT: ret void @@ -759,29 +759,19 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -852,29 +842,19 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -937,29 +917,19 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1022,29 +992,19 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1107,29 +1067,19 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1192,29 +1142,19 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1277,29 +1217,19 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1362,29 +1292,19 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1447,29 +1367,19 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1532,29 +1442,19 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1617,29 +1517,19 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1702,29 +1592,19 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 0 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 0 ; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 8 -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8 -; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 24 -; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[ARGS]], i64 24 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = sext i32 [[TMP22]] to i64 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]] ; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], 16 -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 28 -; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i8, ptr [[ARGS]], i64 28 ; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 ; CHECK-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]] @@ -1838,29 +1718,29 @@ define linkonce_odr dso_local void @_Z4test2I11LongDouble4EvT_([4 x fp128] align ; CHECK-NEXT: [[TMP35:%.*]] = insertvalue [4 x i128] [[TMP34]], i128 [[_MSLD3]], 3 ; CHECK-NEXT: [[DOTFCA_3_INSERT121:%.*]] = insertvalue [4 x fp128] [[DOTFCA_2_INSERT120]], fp128 [[AGG_TMP_SROA_4_0_COPYLOAD]], 3 ; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), i8 0, i32 32, i1 false) +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8 +; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), i8 0, i32 32, i1 false) ; CHECK-NEXT: store i64 1216, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], i32 noundef 20, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]]) ; CHECK-NEXT: ret void diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll index e05018c2d5372..cbdae2526eb38 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll @@ -59,12 +59,12 @@ define i32 @bar() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -87,11 +87,11 @@ define i32 @bar2() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -112,205 +112,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8 ; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll index e6d3a4b2994ad..a0dcefd498c25 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" @@ -9,12 +10,36 @@ declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @foo(i32 %guard, ...) { -; CHECK-LABEL: @foo -; CHECK: [[TMP1:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls -; CHECK: [[TMP3:%.*]] = alloca {{.*}} [[TMP1]] -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 [[TMP1]], i1 false) -; CHECK: [[TMP4:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP4]], i1 false) +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: i32 [[GUARD:%.*]], ...) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8 +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080 +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false) +; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 0 ; %vl = alloca ptr, align 8 call void @llvm.lifetime.start.p0(ptr %vl) @@ -27,11 +52,22 @@ define i32 @foo(i32 %guard, ...) { ;; Save the incoming shadow value from the arguments in the __msan_va_arg_tls ;; array. define i32 @bar() { -; CHECK-LABEL: @bar -; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls +; CHECK-LABEL: define i32 @bar() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 @@ -40,23 +76,36 @@ define i32 @bar() { ;; Check multiple fixed arguments. declare i32 @foo2(i32 %g1, i32 %g2, ...) define i32 @bar2() { -; CHECK-LABEL: @bar2 -; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls +; CHECK-LABEL: define i32 @bar2() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] ; %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } +; UTC_ARGS: --disable + ;; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are ;; passed to a variadic function. declare i64 @sum(i64 %n, ...) define dso_local i64 @many_args() { ;; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. ; CHECK-LABEL: @many_args -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) ; entry: %ret = call i64 (i64, ...) @sum(i64 120, diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll index 69a74a37a1f04..1187531e9a25c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll @@ -1,9 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128" target triple = "mips64--linux" define i32 @foo(i32 %guard, ...) { +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: i32 [[GUARD:%.*]], ...) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 549755813888 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 549755813888 +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8 +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 549755813888 +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false) +; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 0 +; %vl = alloca ptr, align 8 call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) @@ -12,23 +44,29 @@ define i32 @foo(i32 %guard, ...) { ret i32 0 } -; First, check allocation of the save area. - -; CHECK-LABEL: @foo -; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls -; CHECK: [[C:%.*]] = alloca {{.*}} [[A]] - -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false) - -; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) - declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { +; CHECK-LABEL: define i32 @bar() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } @@ -36,23 +74,32 @@ define i32 @bar() { ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls ; array. The first argument is stored at position 4, since it's right ; justified. -; CHECK-LABEL: @bar -; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls ; Check multiple fixed arguments. declare i32 @foo2(i32 %g1, i32 %g2, ...) define i32 @bar2() { +; CHECK-LABEL: define i32 @bar2() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } -; CHECK-LABEL: @bar2 -; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls + +; UTC_ARGS: --disable ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are ; passed to a variadic function. @@ -77,8 +124,8 @@ entry: ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. ; CHECK-LABEL: @many_args -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) declare i64 @sum(i64 %n, ...) ; CHECK: declare void @__msan_maybe_warning_1(i8 signext, i32 signext) diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll index b19da8e9ff14b..a78285a191c8c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll @@ -1,9 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128" target triple = "mips64el--linux" define i32 @foo(i32 %guard, ...) { +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: i32 [[GUARD:%.*]], ...) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 549755813888 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 549755813888 +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8 +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 549755813888 +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false) +; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 0 +; %vl = alloca ptr, align 8 call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) @@ -12,46 +44,60 @@ define i32 @foo(i32 %guard, ...) { ret i32 0 } -; First, check allocation of the save area. - -; CHECK-LABEL: @foo -; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls -; CHECK: [[C:%.*]] = alloca {{.*}} [[A]] - -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false) - -; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) - declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { +; CHECK-LABEL: define i32 @bar() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls ; array. -; CHECK-LABEL: @bar -; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls ; Check multiple fixed arguments. declare i32 @foo2(i32 %g1, i32 %g2, ...) define i32 @bar2() { +; CHECK-LABEL: define i32 @bar2() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } -; CHECK-LABEL: @bar2 -; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls +; UTC_ARGS: --disable ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are ; passed to a variadic function. @@ -76,6 +122,6 @@ entry: ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. ; CHECK-LABEL: @many_args -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) declare i64 @sum(i64 %n, ...) diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll index 4d47b02bb2713..9257622f86a2c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll @@ -59,12 +59,12 @@ define i32 @bar() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -87,11 +87,11 @@ define i32 @bar2() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -112,205 +112,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8 ; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll index 98294e7c0383c..690dc2a22bd1b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll @@ -59,12 +59,12 @@ define i32 @bar() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -86,11 +86,11 @@ define i32 @bar2() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -111,205 +111,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8 ; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll index 9351067969050..6dc896f2fc84f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll @@ -1,9 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s target datalayout = "E-m:e-i64:64-n32:64" target triple = "powerpc64--linux" define i32 @foo(i32 %guard, ...) { +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: i32 [[GUARD:%.*]], ...) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -246290604621825 +; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 17592186044416 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 8796093022208 +; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -246290604621825 +; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 17592186044416 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 8796093022208 +; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP13]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr +; CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64 +; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP17]], -246290604621825 +; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 17592186044416 +; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 8796093022208 +; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP21]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false) +; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 0 +; %vl = alloca ptr, align 8 call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) @@ -12,23 +50,29 @@ define i32 @foo(i32 %guard, ...) { ret i32 0 } -; First, check allocation of the save area. - -; CHECK-LABEL: @foo -; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls -; CHECK: [[C:%.*]] = alloca {{.*}} [[A]] - -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false) - -; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) - declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { +; CHECK-LABEL: define i32 @bar() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } @@ -36,14 +80,22 @@ define i32 @bar() { ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls ; array. The first argument is stored at position 4, since it's right ; justified. -; CHECK-LABEL: @bar -; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls ; Check vector argument. define i32 @bar2() { +; CHECK-LABEL: define i32 @bar2() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> ) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> ) ret i32 %1 } @@ -51,50 +103,110 @@ define i32 @bar2() { ; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls ; corresponds to offset 8+ of parameter save area - so the offset from ; __msan_va_arg_tls is actually misaligned. -; CHECK-LABEL: @bar2 -; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls ; Check i64 array. define i32 @bar4() { +; CHECK-LABEL: define i32 @bar4() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) ret i32 %1 } -; CHECK-LABEL: @bar4 -; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8 -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls ; Check i128 array. define i32 @bar5() { +; CHECK-LABEL: define i32 @bar5() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) ret i32 %1 } -; CHECK-LABEL: @bar5 -; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls ; Check 8-aligned byval. define i32 @bar6(ptr %arg) { +; CHECK-LABEL: define i32 @bar6( +; CHECK-SAME: ptr [[ARG:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825 +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 16, i1 false) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825 +; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP11]], i64 16, i1 false) +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP12]] +; %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg) ret i32 %1 } -; CHECK-LABEL: @bar6 -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false) -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls ; Check 16-aligned byval. define i32 @bar7(ptr %arg) { +; CHECK-LABEL: define i32 @bar7( +; CHECK-SAME: ptr [[ARG:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825 +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 32, i1 false) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825 +; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), ptr align 8 [[TMP11]], i64 32, i1 false) +; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP12]] +; %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg) ret i32 %1 } -; CHECK-LABEL: @bar7 -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false) -; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls +; UTC_ARGS: --disable ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are ; passed to a variadic function. @@ -119,6 +231,6 @@ entry: ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. ; CHECK-LABEL: @many_args -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) declare i64 @sum(i64 %n, ...) diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll index 4151f3b223b3a..e3db97cf8ba87 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll @@ -1,9 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s target datalayout = "e-m:e-i64:64-n32:64" target triple = "powerpc64le--linux" define i32 @foo(i32 %guard, ...) { +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: i32 [[GUARD:%.*]], ...) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false) +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -246290604621825 +; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 17592186044416 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 8796093022208 +; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -246290604621825 +; CHECK-NEXT: [[TMP11:%.*]] = xor i64 [[TMP10]], 17592186044416 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 8796093022208 +; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP13]], i8 0, i64 8, i1 false) +; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]]) +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[VL]] to i64 +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr +; CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64 +; CHECK-NEXT: [[TMP18:%.*]] = and i64 [[TMP17]], -246290604621825 +; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 17592186044416 +; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP19]], 8796093022208 +; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP21]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false) +; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]]) +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 0 +; %vl = alloca ptr, align 8 call void @llvm.lifetime.start.p0(ptr %vl) call void @llvm.va_start(ptr %vl) @@ -12,37 +50,51 @@ define i32 @foo(i32 %guard, ...) { ret i32 0 } -; First, check allocation of the save area. - -; CHECK-LABEL: @foo -; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls -; CHECK: [[C:%.*]] = alloca {{.*}} [[A]] - -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false) - -; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) - declare void @llvm.lifetime.start.p0(ptr nocapture) #1 declare void @llvm.va_start(ptr) #2 declare void @llvm.va_end(ptr) #2 declare void @llvm.lifetime.end.p0(ptr nocapture) #1 define i32 @bar() { +; CHECK-LABEL: define i32 @bar() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) ret i32 %1 } ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls ; array. -; CHECK-LABEL: @bar -; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls ; Check vector argument. define i32 @bar2() { +; CHECK-LABEL: define i32 @bar2() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> ) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> ) ret i32 %1 } @@ -50,49 +102,109 @@ define i32 @bar2() { ; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls ; corresponds to offset 8+ of parameter save area - so the offset from ; __msan_va_arg_tls is actually misaligned. -; CHECK-LABEL: @bar2 -; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls ; Check i64 array. define i32 @bar4() { +; CHECK-LABEL: define i32 @bar4() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8 +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) ret i32 %1 } -; CHECK-LABEL: @bar4 -; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8 -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls ; Check i128 array. define i32 @bar5() { +; CHECK-LABEL: define i32 @bar5() { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP2]] +; %1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) ret i32 %1 } -; CHECK-LABEL: @bar5 -; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls ; Check 8-aligned byval. define i32 @bar6(ptr %arg) { +; CHECK-LABEL: define i32 @bar6( +; CHECK-SAME: ptr [[ARG:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825 +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 16, i1 false) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825 +; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP11]], i64 16, i1 false) +; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP12]] +; %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg) ret i32 %1 } -; CHECK-LABEL: @bar6 -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false) -; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls ; Check 16-aligned byval. define i32 @bar7(ptr %arg) { +; CHECK-LABEL: define i32 @bar7( +; CHECK-SAME: ptr [[ARG:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -246290604621825 +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 8796093022208 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i8 0, i64 32, i1 false) +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -246290604621825 +; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 8796093022208 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), ptr align 8 [[TMP11]], i64 32, i1 false) +; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[TMP12:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[TMP12]] +; %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg) ret i32 %1 } -; CHECK-LABEL: @bar7 -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false) -; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls +; UTC_ARGS: --disable ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are ; passed to a variadic function. @@ -117,6 +229,6 @@ entry: ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. ; CHECK-LABEL: @many_args -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) declare i64 @sum(i64 %n, ...) diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll index 1c74431e96c01..8ba033061defe 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll @@ -16,39 +16,33 @@ define void @Store1(ptr %p, i8 %x) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8 -; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8 ; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[_MSARG1]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8 -; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1:![0-9]+]] -; CHECK: [[BB12]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2:[0-9]+]] -; CHECK-NEXT: br label %[[BB13]] -; CHECK: [[BB13]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB6]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2:[0-9]+]] +; CHECK-NEXT: br label %[[BB7]] +; CHECK: [[BB7]]: ; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[P]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1 ; CHECK-NEXT: store i8 [[TMP9]], ptr [[TMP16]], align 1 ; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i8 [[TMP9]], 0 -; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB19:.*]], !prof [[PROF1]] -; CHECK: [[BB17]]: +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB13:.*]], !prof [[PROF1]] +; CHECK: [[BB11]]: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]]) ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 4 -; CHECK-NEXT: br label %[[BB19]] -; CHECK: [[BB19]]: +; CHECK-NEXT: br label %[[BB13]] +; CHECK: [[BB13]]: ; CHECK-NEXT: store i8 [[X]], ptr [[P]], align 1 ; CHECK-NEXT: ret void ; @@ -70,39 +64,33 @@ define void @Store2(ptr %p, i16 %x) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8 -; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8 ; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[_MSARG1]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8 -; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]] -; CHECK: [[BB12]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB13]] -; CHECK: [[BB13]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] +; CHECK: [[BB6]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB7]] +; CHECK: [[BB7]]: ; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_2(ptr [[P]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1 ; CHECK-NEXT: store i16 [[TMP9]], ptr [[TMP16]], align 2 ; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i16 [[TMP9]], 0 -; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB19:.*]], !prof [[PROF1]] -; CHECK: [[BB17]]: +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB13:.*]], !prof [[PROF1]] +; CHECK: [[BB11]]: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]]) ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 4 -; CHECK-NEXT: br label %[[BB19]] -; CHECK: [[BB19]]: +; CHECK-NEXT: br label %[[BB13]] +; CHECK: [[BB13]]: ; CHECK-NEXT: store i16 [[X]], ptr [[P]], align 2 ; CHECK-NEXT: ret void ; @@ -124,39 +112,33 @@ define void @Store4(ptr %p, i32 %x) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8 -; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8 ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[_MSARG1]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8 -; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]] -; CHECK: [[BB12]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB13]] -; CHECK: [[BB13]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] +; CHECK: [[BB6]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB7]] +; CHECK: [[BB7]]: ; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr [[P]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1 ; CHECK-NEXT: store i32 [[TMP9]], ptr [[TMP16]], align 4 ; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i32 [[TMP9]], 0 -; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB19:.*]], !prof [[PROF1]] -; CHECK: [[BB17]]: +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB13:.*]], !prof [[PROF1]] +; CHECK: [[BB11]]: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]]) ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 4 -; CHECK-NEXT: br label %[[BB19]] -; CHECK: [[BB19]]: +; CHECK-NEXT: br label %[[BB13]] +; CHECK: [[BB13]]: ; CHECK-NEXT: store i32 [[X]], ptr [[P]], align 4 ; CHECK-NEXT: ret void ; @@ -178,41 +160,35 @@ define void @Store8(ptr %p, i64 %x) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8 -; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8 ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[_MSARG1]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8 -; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]] -; CHECK: [[BB12]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB13]] -; CHECK: [[BB13]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] +; CHECK: [[BB6]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB7]] +; CHECK: [[BB7]]: ; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr [[P]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1 ; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP16]], align 8 ; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i64 [[TMP9]], 0 -; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB20:.*]], !prof [[PROF1]] -; CHECK: [[BB17]]: +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB14:.*]], !prof [[PROF1]] +; CHECK: [[BB11]]: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]]) ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 8 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP17]], i32 1 ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP20]], align 4 -; CHECK-NEXT: br label %[[BB20]] -; CHECK: [[BB20]]: +; CHECK-NEXT: br label %[[BB14]] +; CHECK: [[BB14]]: ; CHECK-NEXT: store i64 [[X]], ptr [[P]], align 8 ; CHECK-NEXT: ret void ; @@ -234,35 +210,29 @@ define void @Store16(ptr %p, i128 %x) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 8 -; CHECK-NEXT: [[_MSARG1:%.*]] = inttoptr i32 [[TMP7]] to ptr +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 8 ; CHECK-NEXT: [[TMP9:%.*]] = load i128, ptr [[_MSARG1]], align 8 -; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[TMP11]], 8 -; CHECK-NEXT: [[_MSARG_O2:%.*]] = inttoptr i32 [[TMP10]] to ptr +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB12:.*]], label %[[BB13:.*]], !prof [[PROF1]] -; CHECK: [[BB12]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB13]] -; CHECK: [[BB13]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] +; CHECK: [[BB6]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB7]] +; CHECK: [[BB7]]: ; CHECK-NEXT: [[TMP15:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_n(ptr [[P]], i32 16) ; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 0 ; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { ptr, ptr } [[TMP15]], 1 ; CHECK-NEXT: store i128 [[TMP9]], ptr [[TMP16]], align 8 ; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i128 [[TMP9]], 0 -; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB17:.*]], label %[[BB22:.*]], !prof [[PROF1]] -; CHECK: [[BB17]]: +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB11:.*]], label %[[BB16:.*]], !prof [[PROF1]] +; CHECK: [[BB11]]: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP12]]) ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP17]], align 8 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP17]], i32 1 @@ -271,8 +241,8 @@ define void @Store16(ptr %p, i128 %x) sanitize_memory { ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP20]], align 4 ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i32 3 ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP21]], align 4 -; CHECK-NEXT: br label %[[BB22]] -; CHECK: [[BB22]]: +; CHECK-NEXT: br label %[[BB16]] +; CHECK: [[BB16]]: ; CHECK-NEXT: store i128 [[X]], ptr [[P]], align 8 ; CHECK-NEXT: ret void ; @@ -294,20 +264,18 @@ define i8 @Load1(ptr %p) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB7]] -; CHECK: [[BB7]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] +; CHECK: [[BB4]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB5]] +; CHECK: [[BB5]]: ; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[P]], align 1 ; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr [[P]]) ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0 @@ -336,20 +304,18 @@ define i16 @Load2(ptr %p) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB7]] -; CHECK: [[BB7]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] +; CHECK: [[BB4]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB5]] +; CHECK: [[BB5]]: ; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[P]], align 2 ; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_2(ptr [[P]]) ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0 @@ -378,20 +344,18 @@ define i32 @Load4(ptr %p) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB7]] -; CHECK: [[BB7]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] +; CHECK: [[BB4]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB5]] +; CHECK: [[BB5]]: ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[P]], align 4 ; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_4(ptr [[P]]) ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0 @@ -420,20 +384,18 @@ define i64 @Load8(ptr %p) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB7]] -; CHECK: [[BB7]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] +; CHECK: [[BB4]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB5]] +; CHECK: [[BB5]]: ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[P]], align 8 ; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_8(ptr [[P]]) ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0 @@ -462,20 +424,18 @@ define i128 @Load16(ptr %p) sanitize_memory { ; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 ; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 ; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PARAM_SHADOW]] to i32 -; CHECK-NEXT: [[_MSARG:%.*]] = inttoptr i32 [[TMP1]] to ptr +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[_MSARG]], align 8 -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PARAM_ORIGIN]] to i32 -; CHECK-NEXT: [[_MSARG_O:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[_MSARG_O]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: -; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR2]] -; CHECK-NEXT: br label %[[BB7]] -; CHECK: [[BB7]]: +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] +; CHECK: [[BB4]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP3]]) #[[ATTR2]] +; CHECK-NEXT: br label %[[BB5]] +; CHECK: [[BB5]]: ; CHECK-NEXT: [[TMP9:%.*]] = load i128, ptr [[P]], align 8 ; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_n(ptr [[P]], i32 16) ; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll index 29d1fbd053ecb..26aaa1e985e7f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll @@ -76,12 +76,12 @@ define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 4) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 16, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -102,9 +102,9 @@ define i32 @bar2() { ; CHECK-LABEL: define i32 @bar2() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> ) @@ -125,9 +125,9 @@ define i32 @bar4() { ; CHECK-LABEL: define i32 @bar4() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) @@ -145,9 +145,9 @@ define i32 @bar5() { ; CHECK-LABEL: define i32 @bar5() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) @@ -166,15 +166,15 @@ define i32 @bar6(ptr %arg) { ; CHECK-SAME: ptr [[ARG:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 16, i1 false) ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 16, i1 false) ; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]]) @@ -193,15 +193,15 @@ define i32 @bar7(ptr %arg) { ; CHECK-SAME: ptr [[ARG:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 32, i1 false) ; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]]) @@ -222,205 +222,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 792) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 792), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 792), align 8 ; CHECK-NEXT: store i32 968, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll index a4d2e165dd3a8..24f9dc3bd18c9 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll @@ -76,12 +76,12 @@ define i32 @bar() { ; CHECK-LABEL: define i32 @bar() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 4) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 16, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -101,9 +101,9 @@ define i32 @bar2() { ; CHECK-LABEL: define i32 @bar2() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> ) @@ -124,9 +124,9 @@ define i32 @bar4() { ; CHECK-LABEL: define i32 @bar4() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store [2 x i64] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) @@ -144,9 +144,9 @@ define i32 @bar5() { ; CHECK-LABEL: define i32 @bar5() { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 ; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) @@ -165,15 +165,15 @@ define i32 @bar6(ptr %arg) { ; CHECK-SAME: ptr [[ARG:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 16, i1 false) ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 16, i1 false) ; CHECK-NEXT: store i32 24, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]]) @@ -192,15 +192,15 @@ define i32 @bar7(ptr %arg) { ; CHECK-SAME: ptr [[ARG:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i32 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 2147483647 ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), i8 0, i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i32 8), i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i32 ; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP5]], 2147483647 ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), ptr align 8 [[TMP7]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), ptr align 8 [[TMP7]], i64 32, i1 false) ; CHECK-NEXT: store i32 40, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]]) @@ -220,205 +220,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 ptrtoint (ptr @__msan_param_tls to i32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_param_tls to i32), i32 792) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i32 add (i32 ptrtoint (ptr @__msan_va_arg_tls to i32), i32 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i32 792), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i32 792), align 8 ; CHECK-NEXT: store i32 968, ptr @__msan_va_arg_overflow_size_tls, align 4 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll index 0c6e75c331012..f707135261e3f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll @@ -59,12 +59,12 @@ define i32 @bar() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 4), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -87,11 +87,11 @@ define i32 @bar2() { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00) @@ -112,205 +112,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8 ; CHECK-NEXT: store i64 960, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll index 44545685b5121..af8533c18acdc 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx-intrinsics-x86.ll @@ -13,7 +13,7 @@ target triple = "x86_64-unknown-linux-gnu" define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_addsub_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]]) @@ -29,7 +29,7 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_addsub_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]]) @@ -44,8 +44,8 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_blendv_pd_256( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double> [[A2:%.*]] to <4 x i64> @@ -72,8 +72,8 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_blendv_ps_256( -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x float> [[A2:%.*]] to <8 x i32> @@ -101,7 +101,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_cmp_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -119,7 +119,7 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_cmp_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer @@ -135,7 +135,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_cmp_ps_256_pseudo_op( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer @@ -388,7 +388,7 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_dp_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> , <8 x i32> [[TMP3]], <8 x i32> zeroinitializer @@ -414,7 +414,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hadd_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> @@ -432,7 +432,7 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hadd_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -450,7 +450,7 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hsub_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> @@ -468,7 +468,7 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hsub_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -509,7 +509,7 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr) nounwind readonly define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_pd( -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080 @@ -535,7 +535,7 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(ptr, <2 x i64>) nounwind readonly define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_pd_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080 @@ -561,7 +561,7 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(ptr, <4 x i64>) nounwind read define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_ps( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080 @@ -587,7 +587,7 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(ptr, <4 x i32>) nounwind readonly define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_ps_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP7]], 87960930222080 @@ -613,9 +613,9 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>) nounwind reado define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_pd( -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -642,9 +642,9 @@ declare void @llvm.x86.avx.maskstore.pd(ptr, <2 x i64>, <2 x double>) nounwind define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_pd_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -671,9 +671,9 @@ declare void @llvm.x86.avx.maskstore.pd.256(ptr, <4 x i64>, <4 x double>) nounwi define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_ps( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -700,9 +700,9 @@ declare void @llvm.x86.avx.maskstore.ps(ptr, <4 x i32>, <4 x float>) nounwind define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_ps_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -730,7 +730,7 @@ declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>) nounwin define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_max_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]]) @@ -746,7 +746,7 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_max_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]]) @@ -762,7 +762,7 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_min_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]]) @@ -778,7 +778,7 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_min_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]]) @@ -836,7 +836,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_ptestc_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -855,7 +855,7 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_ptestnzc_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -874,7 +874,7 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_ptestz_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -948,7 +948,7 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <2 x i64> [[A1]] to <2 x i1> ; CHECK-NEXT: [[A0:%.*]] = bitcast <2 x i64> [[TMP1]] to <2 x double> @@ -974,7 +974,7 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i64> [[A1]] to <4 x i2> ; CHECK-NEXT: [[A0:%.*]] = bitcast <4 x i64> [[TMP1]] to <4 x double> @@ -1014,7 +1014,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 { define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <4 x i32> [[A1]] to <4 x i2> ; CHECK-NEXT: [[A0:%.*]] = bitcast <4 x i32> [[TMP1]] to <4 x float> @@ -1036,7 +1036,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) # } define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -1075,7 +1075,7 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <8 x i32> [[A1]] to <8 x i3> ; CHECK-NEXT: [[A0:%.*]] = bitcast <8 x i32> [[TMP1]] to <8 x float> @@ -1101,7 +1101,7 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -1120,7 +1120,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -1139,7 +1139,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer @@ -1158,7 +1158,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer @@ -1177,7 +1177,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -1196,7 +1196,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -1215,7 +1215,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer @@ -1234,7 +1234,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer @@ -1253,7 +1253,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -1272,7 +1272,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer @@ -1291,7 +1291,7 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer @@ -1310,7 +1310,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer @@ -1351,7 +1351,7 @@ declare void @llvm.x86.avx.vzeroupper() nounwind define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 { ; CHECK-LABEL: @movnt_dq( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer @@ -1381,7 +1381,7 @@ declare void @llvm.x86.avx.movnt.dq.256(ptr, <4 x i64>) nounwind define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 { ; CHECK-LABEL: @movnt_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1404,7 +1404,7 @@ declare void @llvm.x86.avx.movnt.ps.256(ptr, <8 x float>) nounwind define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 { ; add operation forces the execution domain. ; CHECK-LABEL: @movnt_pd( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer @@ -1432,7 +1432,7 @@ declare void @llvm.x86.avx.movnt.pd.256(ptr, <4 x double>) nounwind define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_pclmulqdq( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <2 x i32> zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll index 93006ae30f926..8900085af030d 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll @@ -16,8 +16,8 @@ define <16 x float> @test_mm512_dpph_ps(<16 x float> %__W, <32 x half> %__A, <32 ; CHECK-LABEL: define <16 x float> @test_mm512_dpph_ps( ; CHECK-SAME: <16 x float> [[__W:%.*]], <32 x half> [[__A:%.*]], <32 x half> [[__B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -44,9 +44,9 @@ define <16 x float> @test_mm512_mask_dpph_ps(<16 x float> %__W, i16 zeroext %__U ; CHECK-LABEL: define <16 x float> @test_mm512_mask_dpph_ps( ; CHECK-SAME: <16 x float> [[__W:%.*]], i16 zeroext [[__U:%.*]], <32 x half> [[__A:%.*]], <32 x half> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -84,9 +84,9 @@ define <16 x float> @test_mm512_mask_dpph_ps(<16 x float> %__W, i16 zeroext %__U define <16 x float> @test_mm512_maskz_dpph_ps(i16 zeroext %__U, <16 x float> %__W, <32 x half> %__A, <32 x half> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x float> @test_mm512_maskz_dpph_ps( ; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x float> [[__W:%.*]], <32 x half> [[__A:%.*]], <32 x half> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 @@ -124,11 +124,11 @@ define <16 x float> @test_mm512_maskz_dpph_ps(i16 zeroext %__U, <16 x float> %__ declare <16 x float> @llvm.x86.avx10.vdpphps.512(<16 x float>, <32 x half>, <32 x half>) -define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory { +define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_dpbssd_epi32( -; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -137,22 +137,18 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] ; CHECK-NEXT: unreachable ; CHECK: [[BB5]]: -; CHECK-NEXT: [[__B:%.*]] = load <16 x i32>, ptr [[PB]], align 64 +; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr [[PB]], align 64 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PB]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 64 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast <16 x i32> [[__A]] to <64 x i8> -; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x i32> [[__B]] to <64 x i8> -; CHECK-NEXT: [[TMP11:%.*]] = bitcast <16 x i32> [[TMP3]] to <64 x i8> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast <16 x i32> [[_MSLD]] to <64 x i8> -; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <64 x i8> [[TMP11]], zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = load <64 x i8>, ptr [[TMP8]], align 64 ; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <64 x i8> [[TMP12]], zeroinitializer ; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <64 x i8> [[TMP9]], zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <64 x i8> [[TMP10]], zeroinitializer -; CHECK-NEXT: [[TMP17:%.*]] = and <64 x i1> [[TMP13]], [[TMP14]] -; CHECK-NEXT: [[TMP18:%.*]] = and <64 x i1> [[TMP15]], [[TMP14]] -; CHECK-NEXT: [[TMP19:%.*]] = and <64 x i1> [[TMP13]], [[TMP16]] +; CHECK-NEXT: [[TMP17:%.*]] = and <64 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = and <64 x i1> [[TMP11]], [[TMP15]] +; CHECK-NEXT: [[TMP19:%.*]] = and <64 x i1> [[TMP14]], [[TMP16]] ; CHECK-NEXT: [[TMP20:%.*]] = or <64 x i1> [[TMP17]], [[TMP18]] ; CHECK-NEXT: [[TMP21:%.*]] = or <64 x i1> [[TMP20]], [[TMP19]] ; CHECK-NEXT: [[TMP22:%.*]] = sext <64 x i1> [[TMP21]] to <64 x i8> @@ -160,34 +156,30 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i32> [[TMP23]], zeroinitializer ; CHECK-NEXT: [[TMP27:%.*]] = sext <16 x i1> [[TMP24]] to <16 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[TMP27]], [[TMP4]] -; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[TMP10]]) ; CHECK-NEXT: store <16 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { +define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbssds_epi32( -; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP24:%.*]] = bitcast <16 x i32> [[__A]] to <64 x i8> -; CHECK-NEXT: [[TMP25:%.*]] = bitcast <16 x i32> [[__B]] to <64 x i8> -; CHECK-NEXT: [[TMP26:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8> -; CHECK-NEXT: [[TMP27:%.*]] = bitcast <16 x i32> [[TMP3]] to <64 x i8> -; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <64 x i8> [[TMP26]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP27]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[TMP25]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP28]], [[TMP10]] -; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP28]], [[TMP12]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP11]], [[TMP21]] ; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]] ; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8> @@ -195,7 +187,7 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_ ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[TMP23]], [[TMP1]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> [[TMP1]] @@ -207,31 +199,27 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_ ; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { +define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbssd_epi32( -; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP26:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP25:%.*]] = bitcast <16 x i32> [[__A]] to <64 x i8> -; CHECK-NEXT: [[TMP26:%.*]] = bitcast <16 x i32> [[__B]] to <64 x i8> -; CHECK-NEXT: [[TMP27:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8> -; CHECK-NEXT: [[TMP28:%.*]] = bitcast <16 x i32> [[TMP3]] to <64 x i8> -; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <64 x i8> [[TMP27]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP28]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[TMP25]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[TMP26]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP29]], [[TMP10]] -; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP29]], [[TMP12]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP11]], [[TMP21]] ; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]] ; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8> @@ -239,7 +227,7 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_ ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[TMP23]], [[TMP24]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> zeroinitializer @@ -251,21 +239,21 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_ ; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <64 x i8>, <64 x i8>) -define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory { +define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_dpbsud_epi32( -; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -273,87 +261,123 @@ define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] ; CHECK-NEXT: unreachable ; CHECK: [[BB5]]: -; CHECK-NEXT: [[__B:%.*]] = load <16 x i32>, ptr [[PB]], align 64 +; CHECK-NEXT: [[__B:%.*]] = load <64 x i8>, ptr [[PB]], align 64 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PB]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 64 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) -; CHECK-NEXT: store <16 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSLD:%.*]] = load <64 x i8>, ptr [[TMP8]], align 64 +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP9]], [[TMP12]] +; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]] +; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8> +; CHECK-NEXT: [[TMP19:%.*]] = bitcast <64 x i8> [[TMP18]] to <16 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32> +; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i32> [[TMP21]], [[TMP4]] +; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) +; CHECK-NEXT: store <16 x i32> [[TMP22]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { +define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbsuds_epi32( -; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP23:%.*]] = and <64 x i1> [[TMP19]], [[TMP20]] +; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP21]], [[TMP20]] +; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP19]], [[TMP22]] +; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP23]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1> -; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> [[TMP1]] ; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], [[__W]] -; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]] +; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]] ; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], [[TMP1]] ; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]] ; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> [[__W]] ; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { +define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbsud_epi32( -; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = and <64 x i1> [[TMP20]], [[TMP21]] +; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP22]], [[TMP21]] +; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP20]], [[TMP23]] +; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP24]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP19]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1> -; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], zeroinitializer -; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]] +; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]] ; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], zeroinitializer ; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]] ; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> zeroinitializer ; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <64 x i8>, <64 x i8>) -define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory { +define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_dpbuud_epi32( -; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -361,88 +385,124 @@ define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] ; CHECK-NEXT: unreachable ; CHECK: [[BB5]]: -; CHECK-NEXT: [[__B:%.*]] = load <16 x i32>, ptr [[PB]], align 64 +; CHECK-NEXT: [[__B:%.*]] = load <64 x i8>, ptr [[PB]], align 64 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PB]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 64 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) -; CHECK-NEXT: store <16 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: [[_MSLD:%.*]] = load <64 x i8>, ptr [[TMP8]], align 64 +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP9]], [[TMP12]] +; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]] +; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8> +; CHECK-NEXT: [[TMP19:%.*]] = bitcast <64 x i8> [[TMP18]] to <16 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32> +; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i32> [[TMP21]], [[TMP4]] +; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) +; CHECK-NEXT: store <16 x i32> [[TMP22]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %__B = load <16 x i32>, ptr %pB - %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %__B = load <64 x i8>, ptr %pB + %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) ret <16 x i32> %res } -define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { +define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbuuds_epi32( -; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP23:%.*]] = and <64 x i1> [[TMP19]], [[TMP20]] +; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP21]], [[TMP20]] +; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP19]], [[TMP22]] +; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP23]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1> -; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> [[TMP1]] ; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], [[__W]] -; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]] +; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]] ; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], [[TMP1]] ; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]] ; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> [[__W]] ; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W ret <16 x i32> %res } -define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { +define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbuud_epi32( -; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]]) +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = and <64 x i1> [[TMP20]], [[TMP21]] +; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP22]], [[TMP21]] +; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP20]], [[TMP23]] +; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP24]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP19]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1> -; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], zeroinitializer -; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]] +; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]] ; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], zeroinitializer ; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]] ; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> zeroinitializer ; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x i32> [[RES]] ; - %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) + %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) %bst = bitcast i16 %__U to <16 x i1> %res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer ret <16 x i32> %res } -declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>) -declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>) +declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <64 x i8>, <64 x i8>) +declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <64 x i8>, <64 x i8>) define <16 x i32> @test_mm512_dpwsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_dpwsud_epi32( ; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -470,9 +530,9 @@ define <16 x i32> @test_mm512_mask_dpwsuds_epi32(<16 x i32> %__W, i16 zeroext %_ ; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpwsuds_epi32( ; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] @@ -497,9 +557,9 @@ define <16 x i32> @test_mm512_mask_dpwsuds_epi32(<16 x i32> %__W, i16 zeroext %_ define <16 x i32> @test_mm512_maskz_dpwsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpwsud_epi32( ; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -528,9 +588,9 @@ declare <16 x i32> @llvm.x86.avx10.vpdpwsuds.512(<16 x i32>, <16 x i32>, <16 x i define <16 x i32> @test_mm512_dpwusd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_dpwusd_epi32( ; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -558,9 +618,9 @@ define <16 x i32> @test_mm512_mask_dpwusds_epi32(<16 x i32> %__W, i16 zeroext %_ ; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpwusds_epi32( ; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] @@ -585,9 +645,9 @@ define <16 x i32> @test_mm512_mask_dpwusds_epi32(<16 x i32> %__W, i16 zeroext %_ define <16 x i32> @test_mm512_maskz_dpwusd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpwusd_epi32( ; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -616,9 +676,9 @@ declare <16 x i32> @llvm.x86.avx10.vpdpwusds.512(<16 x i32>, <16 x i32>, <16 x i define <16 x i32> @test_mm512_dpwuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_dpwuud_epi32( ; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -646,9 +706,9 @@ define <16 x i32> @test_mm512_mask_dpwuuds_epi32(<16 x i32> %__W, i16 zeroext %_ ; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpwuuds_epi32( ; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]] @@ -673,9 +733,9 @@ define <16 x i32> @test_mm512_mask_dpwuuds_epi32(<16 x i32> %__W, i16 zeroext %_ define <16 x i32> @test_mm512_maskz_dpwuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpwuud_epi32( ; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -705,10 +765,10 @@ declare <16 x i32> @llvm.x86.avx10.vpdpwuuds.512(<16 x i32>, <16 x i32>, <16 x i define { <32 x i16>, <32 x i16>, <32 x i16> } @test_mm512_mask_mpsadbw(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) sanitize_memory { ; CHECK-LABEL: define { <32 x i16>, <32 x i16>, <32 x i16> } @test_mm512_mask_mpsadbw( ; CHECK-SAME: <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <32 x i16> [[X3:%.*]], i32 [[X4:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i32 [[X4]] to <32 x i1> @@ -784,7 +844,7 @@ define <8 x float> @avx_dp_ps(<8 x float> %a, <8 x float> %b) sanitize_memory { ; CHECK-LABEL: define <8 x float> @avx_dp_ps( ; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> , <8 x i32> [[TMP3]], <8 x i32> zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll index e121c3b6ea177..def7ba3f10770 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll @@ -19,8 +19,8 @@ define <4 x float> @test_mm_dpph_ps(<4 x float> %__W, <8 x half> %__A, <8 x half ; CHECK-LABEL: define <4 x float> @test_mm_dpph_ps( ; CHECK-SAME: <4 x float> [[__W:%.*]], <8 x half> [[__A:%.*]], <8 x half> [[__B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -47,9 +47,9 @@ define <4 x float> @test_mm_mask_dpph_ps(<4 x float> %__W, i8 zeroext %__U, <8 x ; CHECK-LABEL: define <4 x float> @test_mm_mask_dpph_ps( ; CHECK-SAME: <4 x float> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x half> [[__A:%.*]], <8 x half> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -90,9 +90,9 @@ define <4 x float> @test_mm_mask_dpph_ps(<4 x float> %__W, i8 zeroext %__U, <8 x define <4 x float> @test_mm_maskz_dpph_ps(i8 zeroext %__U, <4 x float> %__W, <8 x half> %__A, <8 x half> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x float> @test_mm_maskz_dpph_ps( ; CHECK-SAME: i8 zeroext [[__U:%.*]], <4 x float> [[__W:%.*]], <8 x half> [[__A:%.*]], <8 x half> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -134,8 +134,8 @@ define <8 x float> @test_mm256_dpph_ps(<8 x float> %__W, <16 x half> %__A, <16 x ; CHECK-LABEL: define <8 x float> @test_mm256_dpph_ps( ; CHECK-SAME: <8 x float> [[__W:%.*]], <16 x half> [[__A:%.*]], <16 x half> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -162,9 +162,9 @@ define <8 x float> @test_mm256_mask_dpph_ps(<8 x float> %__W, i8 zeroext %__U, < ; CHECK-LABEL: define <8 x float> @test_mm256_mask_dpph_ps( ; CHECK-SAME: <8 x float> [[__W:%.*]], i8 zeroext [[__U:%.*]], <16 x half> [[__A:%.*]], <16 x half> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -202,9 +202,9 @@ define <8 x float> @test_mm256_mask_dpph_ps(<8 x float> %__W, i8 zeroext %__U, < define <8 x float> @test_mm256_maskz_dpph_ps(i8 zeroext %__U, <8 x float> %__W, <16 x half> %__A, <16 x half> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x float> @test_mm256_maskz_dpph_ps( ; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x float> [[__W:%.*]], <16 x half> [[__A:%.*]], <16 x half> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 @@ -243,25 +243,21 @@ declare <4 x float> @llvm.x86.avx10.vdpphps.128(<4 x float>, <8 x half>, <8 x ha declare <8 x float> @llvm.x86.avx10.vdpphps.256(<8 x float>, <16 x half>, <16 x half>) -define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { +define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbssd_epi32( -; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x i32> [[__A]] to <16 x i8> -; CHECK-NEXT: [[TMP25:%.*]] = bitcast <4 x i32> [[__B]] to <16 x i8> -; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> -; CHECK-NEXT: [[TMP27:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> -; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[TMP26]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP27]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[TMP25]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP28]], [[TMP10]] -; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP28]], [[TMP12]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP11]], [[TMP21]] ; CHECK-NEXT: [[TMP16:%.*]] = or <16 x i1> [[TMP13]], [[TMP14]] ; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = sext <16 x i1> [[TMP17]] to <16 x i8> @@ -269,7 +265,7 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <4 x i32> [[TMP19]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = sext <4 x i1> [[TMP20]] to <4 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP23]], [[TMP1]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]]) +; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> [[TMP1]] @@ -281,31 +277,27 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { +define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbssds_epi32( -; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP24:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP26:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP25:%.*]] = bitcast <4 x i32> [[__A]] to <16 x i8> -; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x i32> [[__B]] to <16 x i8> -; CHECK-NEXT: [[TMP27:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> -; CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> -; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <16 x i8> [[TMP27]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP28]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <16 x i8> [[TMP25]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[TMP26]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP29]], [[TMP10]] -; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP29]], [[TMP12]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP11]], [[TMP21]] ; CHECK-NEXT: [[TMP16:%.*]] = or <16 x i1> [[TMP13]], [[TMP14]] ; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = sext <16 x i1> [[TMP17]] to <16 x i8> @@ -313,7 +305,7 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <4 x i32> [[TMP19]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = sext <4 x i1> [[TMP20]] to <4 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP23]], [[TMP24]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]]) +; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> zeroinitializer @@ -325,31 +317,27 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { +define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbssds_epi32( -; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x i32> [[__A]] to <32 x i8> -; CHECK-NEXT: [[TMP25:%.*]] = bitcast <8 x i32> [[__B]] to <32 x i8> -; CHECK-NEXT: [[TMP26:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8> -; CHECK-NEXT: [[TMP27:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8> -; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[TMP26]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP27]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[TMP25]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP28]], [[TMP10]] -; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP28]], [[TMP12]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP11]], [[TMP21]] ; CHECK-NEXT: [[TMP16:%.*]] = or <32 x i1> [[TMP13]], [[TMP14]] ; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = sext <32 x i1> [[TMP17]] to <32 x i8> @@ -357,7 +345,7 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <8 x i32> [[TMP19]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = sext <8 x i1> [[TMP20]] to <8 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP23]], [[TMP1]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]]) +; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> [[TMP1]] @@ -369,31 +357,27 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { +define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbssd_epi32( -; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP24:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP26:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP25:%.*]] = bitcast <8 x i32> [[__A]] to <32 x i8> -; CHECK-NEXT: [[TMP26:%.*]] = bitcast <8 x i32> [[__B]] to <32 x i8> -; CHECK-NEXT: [[TMP27:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8> -; CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8> -; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <32 x i8> [[TMP27]], zeroinitializer -; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP28]], zeroinitializer ; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <32 x i8> [[TMP25]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[TMP26]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP29]], [[TMP10]] -; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP29]], [[TMP12]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP11]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP10]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP11]], [[TMP21]] ; CHECK-NEXT: [[TMP16:%.*]] = or <32 x i1> [[TMP13]], [[TMP14]] ; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = sext <32 x i1> [[TMP17]] to <32 x i8> @@ -401,7 +385,7 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <8 x i32> [[TMP19]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = sext <8 x i1> [[TMP20]] to <8 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP23]], [[TMP24]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]]) +; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> zeroinitializer @@ -413,28 +397,40 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, ; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { +define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbsud_epi32( -; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> [[TMP1]] @@ -446,23 +442,35 @@ define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { +define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbsuds_epi32( -; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> zeroinitializer @@ -474,23 +482,35 @@ define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { +define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbsuds_epi32( -; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> [[TMP1]] @@ -502,23 +522,35 @@ define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { +define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbsud_epi32( -; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> zeroinitializer @@ -530,28 +562,40 @@ define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, ; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { +define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbuud_epi32( -; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> [[TMP1]] @@ -563,23 +607,35 @@ define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W ret <4 x i32> %res } -define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { +define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbuuds_epi32( -; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> zeroinitializer @@ -591,23 +647,35 @@ define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, < ; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) + %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) %bst = bitcast i4 %__U to <4 x i1> %res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer ret <4 x i32> %res } -define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { +define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbuuds_epi32( -; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> [[TMP1]] @@ -619,23 +687,35 @@ define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W ret <8 x i32> %res } -define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { +define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbuud_epi32( -; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] -; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]]) +; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]] +; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]] +; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8> +; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer +; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]] +; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> ; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> zeroinitializer @@ -647,25 +727,25 @@ define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, ; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) + %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) %bst = bitcast i8 %__U to <8 x i1> %res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) define <4 x i32> @test_mm_mask_dpwsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpwsud_epi32( ; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -690,9 +770,9 @@ define <4 x i32> @test_mm_mask_dpwsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 define <4 x i32> @test_mm_maskz_dpwsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpwsuds_epi32( ; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -719,9 +799,9 @@ define <8 x i32> @test_mm256_maskz_dpwsuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpwsuds_epi32( ; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -746,9 +826,9 @@ define <8 x i32> @test_mm256_maskz_dpwsuds_epi32(<8 x i32> %__W, i8 zeroext %__U define <8 x i32> @test_mm256_mask_dpwsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpwsud_epi32( ; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -780,9 +860,9 @@ define <4 x i32> @test_mm_mask_dpwusd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpwusd_epi32( ; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -807,9 +887,9 @@ define <4 x i32> @test_mm_mask_dpwusd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 define <4 x i32> @test_mm_maskz_dpwusds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpwusds_epi32( ; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -836,9 +916,9 @@ define <8 x i32> @test_mm256_maskz_dpwusds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpwusds_epi32( ; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -863,9 +943,9 @@ define <8 x i32> @test_mm256_maskz_dpwusds_epi32(<8 x i32> %__W, i8 zeroext %__U define <8 x i32> @test_mm256_mask_dpwusd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpwusd_epi32( ; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -897,9 +977,9 @@ define <4 x i32> @test_mm_mask_dpwuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 ; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpwuud_epi32( ; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -924,9 +1004,9 @@ define <4 x i32> @test_mm_mask_dpwuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 define <4 x i32> @test_mm_maskz_dpwuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpwuuds_epi32( ; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -953,9 +1033,9 @@ define <8 x i32> @test_mm256_maskz_dpwuuds_epi32(<8 x i32> %__W, i8 zeroext %__U ; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpwuuds_epi32( ; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -980,9 +1060,9 @@ define <8 x i32> @test_mm256_maskz_dpwuuds_epi32(<8 x i32> %__W, i8 zeroext %__U define <8 x i32> @test_mm256_mask_dpwuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpwuud_epi32( ; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -1014,10 +1094,10 @@ declare <8 x i32> @llvm.x86.avx2.vpdpwuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) define { <8 x i16>, <8 x i16>, <8 x i16> } @test_mask_mpsadbw_128(<16 x i8> %x0, <16 x i8> %x1, <8 x i16> %x3, i8 %x4) sanitize_memory { ; CHECK-LABEL: define { <8 x i16>, <8 x i16>, <8 x i16> } @test_mask_mpsadbw_128( ; CHECK-SAME: <16 x i8> [[X0:%.*]], <16 x i8> [[X1:%.*]], <8 x i16> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[X4]] to <8 x i1> @@ -1089,10 +1169,10 @@ define { <8 x i16>, <8 x i16>, <8 x i16> } @test_mask_mpsadbw_128(<16 x i8> %x0, define { <16 x i16>, <16 x i16>, <16 x i16> } @test_mask_mpsadbw_256(<32 x i8> %x0, <32 x i8> %x1, <16 x i16> %x3, i16 %x4) sanitize_memory { ; CHECK-LABEL: define { <16 x i16>, <16 x i16>, <16 x i16> } @test_mask_mpsadbw_256( ; CHECK-SAME: <32 x i8> [[X0:%.*]], <32 x i8> [[X1:%.*]], <16 x i16> [[X3:%.*]], i16 [[X4:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[X4]] to <16 x i1> diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll index 29269ff333771..e447cabc60e9f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx2-intrinsics-x86.ll @@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packssdw( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32> @@ -40,7 +40,7 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() #0 { define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packsswb( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16> @@ -73,7 +73,7 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() #0 { define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packuswb( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16> @@ -106,7 +106,7 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() #0 { define <32 x i8> @test_x86_avx2_pavg_b(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pavg_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]]) @@ -122,7 +122,7 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone define <16 x i16> @test_x86_avx2_pavg_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pavg_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]]) @@ -138,7 +138,7 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readno define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmadd_wd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i16> [[TMP2]], zeroinitializer @@ -187,7 +187,7 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmulh_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]]) @@ -203,7 +203,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmulhu_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]]) @@ -219,7 +219,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psad_bw( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <4 x i64> @@ -239,7 +239,7 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -261,7 +261,7 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -283,7 +283,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psll_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -353,7 +353,7 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -375,7 +375,7 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psra_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -429,7 +429,7 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -451,7 +451,7 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -473,7 +473,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -494,7 +494,7 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_w_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -576,7 +576,7 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phadd_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -594,7 +594,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phadd_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -612,7 +612,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phadd_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -630,7 +630,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phsub_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -648,7 +648,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phsub_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -666,7 +666,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phsub_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -684,7 +684,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer @@ -711,7 +711,7 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw_load_op0( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -749,7 +749,7 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) # define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmul_hr_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]]) @@ -765,7 +765,7 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pshuf_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> [[TMP1]], <32 x i8> [[A1:%.*]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP2]], [[TMP3]] @@ -782,7 +782,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psign_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]]) @@ -798,7 +798,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psign_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]]) @@ -814,7 +814,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psign_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]]) @@ -830,7 +830,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_mpsadbw( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i8> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -854,7 +854,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_mpsadbw_load_op0( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -889,7 +889,7 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 { define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packusdw( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32> @@ -921,8 +921,8 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 { define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendvb( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ashr <32 x i8> [[A2:%.*]], splat (i8 7) @@ -947,7 +947,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]], <16 x i32> @@ -963,7 +963,7 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendd_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> @@ -979,7 +979,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> @@ -995,7 +995,7 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_permd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]]) @@ -1011,7 +1011,7 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_permps( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1035,7 +1035,7 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado define <2 x i64> @test_x86_avx2_maskload_q(ptr %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_q( -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080 @@ -1060,7 +1060,7 @@ declare <2 x i64> @llvm.x86.avx2.maskload.q(ptr, <2 x i64>) nounwind readonly define <4 x i64> @test_x86_avx2_maskload_q_256(ptr %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_q_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080 @@ -1085,7 +1085,7 @@ declare <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr, <4 x i64>) nounwind readonl define <4 x i32> @test_x86_avx2_maskload_d(ptr %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_d( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080 @@ -1110,7 +1110,7 @@ declare <4 x i32> @llvm.x86.avx2.maskload.d(ptr, <4 x i32>) nounwind readonly define <8 x i32> @test_x86_avx2_maskload_d_256(ptr %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_d_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP6]], 87960930222080 @@ -1135,9 +1135,9 @@ declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>) nounwind readonl define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_q( -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -1163,9 +1163,9 @@ declare void @llvm.x86.avx2.maskstore.q(ptr, <2 x i64>, <2 x i64>) nounwind define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_q_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -1191,9 +1191,9 @@ declare void @llvm.x86.avx2.maskstore.q.256(ptr, <4 x i64>, <4 x i64>) nounwind define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_d( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -1219,9 +1219,9 @@ declare void @llvm.x86.avx2.maskstore.d(ptr, <4 x i32>, <4 x i32>) nounwind define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_d_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -1248,7 +1248,7 @@ declare void @llvm.x86.avx2.maskstore.d.256(ptr, <8 x i32>, <8 x i32>) nounwind define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> @@ -1287,7 +1287,7 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32> @@ -1326,7 +1326,7 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> @@ -1357,7 +1357,7 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64> @@ -1389,7 +1389,7 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> @@ -1428,7 +1428,7 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32> @@ -1467,7 +1467,7 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> @@ -1499,7 +1499,7 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64> @@ -1532,7 +1532,7 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrav_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> @@ -1563,7 +1563,7 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrav_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32> @@ -1594,9 +1594,9 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i32> %idx, <2 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1627,9 +1627,9 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr, define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4 x i32> %idx, <4 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -1660,9 +1660,9 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr, define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i64> %idx, <2 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1693,9 +1693,9 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, ptr, define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4 x i64> %idx, <4 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -1726,9 +1726,9 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, ptr, define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32> %idx, <4 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1759,9 +1759,9 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr, define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x i32> %idx, <8 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -1792,9 +1792,9 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr, define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64> %idx, <4 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1825,9 +1825,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, ptr, define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x i64> %idx, <4 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1858,9 +1858,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, ptr, define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %idx, <2 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1891,9 +1891,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, ptr, define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32> %idx, <4 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -1924,9 +1924,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, ptr, define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1957,9 +1957,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, ptr, define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64> %idx, <4 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -1990,9 +1990,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, ptr, define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %idx, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -2023,9 +2023,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr, define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32> %idx, <8 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -2056,9 +2056,9 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, ptr, define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %idx, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -2089,9 +2089,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, ptr, define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64> %idx, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -2122,10 +2122,10 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, ptr, define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <8 x float> %mask, ptr nocapture %out) #0 { ; CHECK-LABEL: @test_gather_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP6]], 0 @@ -2167,10 +2167,10 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, < define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i1> %mask) #0 { ; CHECK-LABEL: @test_mask_demanded_bits( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64> ; CHECK-NEXT: [[MASK1:%.*]] = sext <2 x i1> [[MASK:%.*]] to <2 x i64> diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll index 43da02d19693c..17bef29a05220 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-gfni-intrinsics.ll @@ -19,10 +19,10 @@ target triple = "x86_64-unknown-linux-gnu" declare <16 x i8> @llvm.x86.vgf2p8affineinvqb.128(<16 x i8>, <16 x i8>, i8) define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineinvqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8affineinvqb_128( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -94,10 +94,10 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineinvqb_128(<16 x i8> declare <32 x i8> @llvm.x86.vgf2p8affineinvqb.256(<32 x i8>, <32 x i8>, i8) define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineinvqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8affineinvqb_256( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> @@ -169,10 +169,10 @@ define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineinvqb_256(<32 x i8> declare <64 x i8> @llvm.x86.vgf2p8affineinvqb.512(<64 x i8>, <64 x i8>, i8) define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineinvqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8affineinvqb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> @@ -244,10 +244,10 @@ define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineinvqb_512(<64 x i8> declare <16 x i8> @llvm.x86.vgf2p8affineqb.128(<16 x i8>, <16 x i8>, i8) define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineqb_128(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8affineqb_128( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -308,10 +308,10 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_vgf2p8affineqb_128(<16 x i8> %s declare <32 x i8> @llvm.x86.vgf2p8affineqb.256(<32 x i8>, <32 x i8>, i8) define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineqb_256(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8affineqb_256( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> @@ -372,10 +372,10 @@ define { <32 x i8>, <32 x i8>, <32 x i8> } @test_vgf2p8affineqb_256(<32 x i8> %s declare <64 x i8> @llvm.x86.vgf2p8affineqb.512(<64 x i8>, <64 x i8>, i8) define { <64 x i8>, <64 x i8>, <64 x i8> } @test_vgf2p8affineqb_512(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8affineqb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> @@ -437,7 +437,7 @@ declare <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8>, <16 x i8>) define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i8> @llvm.x86.vgf2p8mulb.128(<16 x i8> [[SRC1:%.*]], <16 x i8> [[SRC2:%.*]]) @@ -450,10 +450,10 @@ define <16 x i8> @test_vgf2p8mulb_128(<16 x i8> %src1, <16 x i8> %src2) #0 { define <16 x i8> @test_vgf2p8mulb_128_mask(<16 x i8> %src1, <16 x i8> %src2, <16 x i8> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_128_mask( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -476,9 +476,9 @@ define <16 x i8> @test_vgf2p8mulb_128_mask(<16 x i8> %src1, <16 x i8> %src2, <16 define <16 x i8> @test_vgf2p8mulb_128_maskz(<16 x i8> %src1, <16 x i8> %src2, i16 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_128_maskz( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -503,7 +503,7 @@ declare <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8>, <32 x i8>) define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i8> @llvm.x86.vgf2p8mulb.256(<32 x i8> [[SRC1:%.*]], <32 x i8> [[SRC2:%.*]]) @@ -516,10 +516,10 @@ define <32 x i8> @test_vgf2p8mulb_256(<32 x i8> %src1, <32 x i8> %src2) #0 { define <32 x i8> @test_vgf2p8mulb_256_mask(<32 x i8> %src1, <32 x i8> %src2, <32 x i8> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_256_mask( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> @@ -542,9 +542,9 @@ define <32 x i8> @test_vgf2p8mulb_256_mask(<32 x i8> %src1, <32 x i8> %src2, <32 define <32 x i8> @test_vgf2p8mulb_256_maskz(<32 x i8> %src1, <32 x i8> %src2, i32 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_256_maskz( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[MASK:%.*]] to <32 x i1> @@ -569,7 +569,7 @@ declare <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8>, <64 x i8>) define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.vgf2p8mulb.512(<64 x i8> [[SRC1:%.*]], <64 x i8> [[SRC2:%.*]]) @@ -582,10 +582,10 @@ define <64 x i8> @test_vgf2p8mulb_512(<64 x i8> %src1, <64 x i8> %src2) #0 { define <64 x i8> @test_vgf2p8mulb_512_mask(<64 x i8> %src1, <64 x i8> %src2, <64 x i8> %passthru, i64 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_512_mask( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> @@ -608,9 +608,9 @@ define <64 x i8> @test_vgf2p8mulb_512_mask(<64 x i8> %src1, <64 x i8> %src2, <64 define <64 x i8> @test_vgf2p8mulb_512_maskz(<64 x i8> %src1, <64 x i8> %src2, i64 %mask) #0 { ; CHECK-LABEL: @test_vgf2p8mulb_512_maskz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i64 [[TMP1]] to <64 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[MASK:%.*]] to <64 x i1> diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll index 74cb49b0f602a..25a4a9af6f5a7 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics-upgrade.ll @@ -12,7 +12,7 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) #0 { ; ; CHECK-LABEL: @unpckbw_test( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1> @@ -37,8 +37,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_pbroadca ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastd_gpr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <16 x i32> splat (i32 -1), i32 [[TMP1]], i64 0 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[X0:%.*]], i64 0 @@ -92,8 +92,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_pbroadcastq ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastq_gpr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <8 x i64> splat (i64 -1), i64 [[TMP1]], i64 0 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[X0:%.*]], i64 0 @@ -162,8 +162,8 @@ define <16 x float> @test_x86_mask_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x f ; ; CHECK-LABEL: @test_x86_mask_vbroadcast_ss_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[A0:%.*]], <4 x float> poison, <16 x i32> zeroinitializer @@ -188,7 +188,7 @@ define <16 x float> @test_x86_maskz_vbroadcast_ss_ps_512(<4 x float> %a0, i16 %m ; ; CHECK-LABEL: @test_x86_maskz_vbroadcast_ss_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A0:%.*]], <4 x float> poison, <16 x i32> zeroinitializer @@ -227,8 +227,8 @@ define <8 x double> @test_x86_mask_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x d ; ; CHECK-LABEL: @test_x86_mask_vbroadcast_sd_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x double> [[A0:%.*]], <2 x double> poison, <8 x i32> zeroinitializer @@ -253,7 +253,7 @@ define <8 x double> @test_x86_maskz_vbroadcast_sd_pd_512(<2 x double> %a0, i8 %m ; ; CHECK-LABEL: @test_x86_maskz_vbroadcast_sd_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[A0:%.*]], <2 x double> poison, <8 x i32> zeroinitializer @@ -292,8 +292,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pbroadcastd_512(<4 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[X0:%.*]], <4 x i32> poison, <16 x i32> zeroinitializer @@ -316,7 +316,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pbroadcastd_512(<4 x i32> %x0, i16 % ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pbroadcastd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[X0:%.*]], <4 x i32> poison, <16 x i32> zeroinitializer @@ -354,8 +354,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pbroadcastq_512(<2 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcastq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[X0:%.*]], <2 x i64> poison, <8 x i32> zeroinitializer @@ -378,7 +378,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pbroadcastq_512(<2 x i64> %x0, i8 %ma ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pbroadcastq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> splat (i64 -1), <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[X0:%.*]], <2 x i64> poison, <8 x i32> zeroinitializer @@ -416,8 +416,8 @@ define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_movsldup_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> @@ -442,7 +442,7 @@ define <16 x float>@test_int_x86_avx512_maskz_movsldup_512(<16 x float> %x0, i16 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_movsldup_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> @@ -481,8 +481,8 @@ define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_movshdup_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> @@ -507,7 +507,7 @@ define <16 x float>@test_int_x86_avx512_maskz_movshdup_512(<16 x float> %x0, i16 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_movshdup_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> @@ -546,8 +546,8 @@ define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_movddup_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> @@ -572,7 +572,7 @@ define <8 x double>@test_int_x86_avx512_maskz_movddup_512(<8 x double> %x0, i8 % ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_movddup_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> @@ -611,8 +611,8 @@ define <8 x double>@test_int_x86_avx512_mask_perm_df_512(<8 x double> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_perm_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> @@ -637,7 +637,7 @@ define <8 x double>@test_int_x86_avx512_maskz_perm_df_512(<8 x double> %x0, i8 % ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_perm_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> @@ -676,8 +676,8 @@ define <8 x i64>@test_int_x86_avx512_mask_perm_di_512(<8 x i64> %x0, i32 %x1, <8 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_perm_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X0]], <8 x i32> @@ -700,7 +700,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_perm_di_512(<8 x i64> %x0, i32 %x1, i ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_perm_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X0]], <8 x i32> @@ -722,10 +722,10 @@ define <8 x i64>@test_int_x86_avx512_maskz_perm_di_512(<8 x i64> %x0, i32 %x1, i define void @test_store1(<16 x float> %data, ptr %ptr, ptr %ptr2, i16 %mask) #0 { ; ; CHECK-LABEL: @test_store1( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -766,10 +766,10 @@ declare void @llvm.x86.avx512.mask.storeu.ps.512(ptr, <16 x float>, i16 ) define void @test_store2(<8 x double> %data, ptr %ptr, ptr %ptr2, i8 %mask) #0 { ; ; CHECK-LABEL: @test_store2( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -810,10 +810,10 @@ declare void @llvm.x86.avx512.mask.storeu.pd.512(ptr, <8 x double>, i8) define void @test_mask_store_aligned_ps(<16 x float> %data, ptr %ptr, ptr %ptr2, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_store_aligned_ps( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -854,10 +854,10 @@ declare void @llvm.x86.avx512.mask.store.ps.512(ptr, <16 x float>, i16 ) define void @test_mask_store_aligned_pd(<8 x double> %data, ptr %ptr, ptr %ptr2, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_store_aligned_pd( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -898,10 +898,10 @@ declare void @llvm.x86.avx512.mask.store.pd.512(ptr, <8 x double>, i8) define void@test_int_x86_avx512_mask_storeu_q_512(ptr %ptr1, ptr %ptr2, <8 x i64> %x1, i8 %x2) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[X2:%.*]] to <8 x i1> @@ -942,10 +942,10 @@ declare void @llvm.x86.avx512.mask.storeu.q.512(ptr, <8 x i64>, i8) define void@test_int_x86_avx512_mask_storeu_d_512(ptr %ptr1, ptr %ptr2, <16 x i32> %x1, i16 %x2) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1> @@ -986,10 +986,10 @@ declare void @llvm.x86.avx512.mask.storeu.d.512(ptr, <16 x i32>, i16) define void@test_int_x86_avx512_mask_store_q_512(ptr %ptr1, ptr %ptr2, <8 x i64> %x1, i8 %x2) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_store_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[X2:%.*]] to <8 x i1> @@ -1030,10 +1030,10 @@ declare void @llvm.x86.avx512.mask.store.q.512(ptr, <8 x i64>, i8) define void@test_int_x86_avx512_mask_store_d_512(ptr %ptr1, ptr %ptr2, <16 x i32> %x1, i16 %x2) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_store_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1> @@ -1074,8 +1074,8 @@ declare void @llvm.x86.avx512.mask.store.d.512(ptr, <16 x i32>, i16) define <16 x float> @test_mask_load_aligned_ps(<16 x float> %data, ptr %ptr, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_load_aligned_ps( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1137,8 +1137,8 @@ declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(ptr, <16 x float>, i16) define <16 x float> @test_mask_load_unaligned_ps(<16 x float> %data, ptr %ptr, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_load_unaligned_ps( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1200,8 +1200,8 @@ declare <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(ptr, <16 x float>, i16) define <8 x double> @test_mask_load_aligned_pd(<8 x double> %data, ptr %ptr, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_load_aligned_pd( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1263,8 +1263,8 @@ declare <8 x double> @llvm.x86.avx512.mask.load.pd.512(ptr, <8 x double>, i8) define <8 x double> @test_mask_load_unaligned_pd(<8 x double> %data, ptr %ptr, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_load_unaligned_pd( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1329,8 +1329,8 @@ define <16 x i32> @test_mask_load_unaligned_d(ptr %ptr, ptr %ptr2, <16 x i32> %d ; ; CHECK-LABEL: @test_mask_load_unaligned_d( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -1393,8 +1393,8 @@ define <8 x i64> @test_mask_load_unaligned_q(ptr %ptr, ptr %ptr2, <8 x i64> %dat ; ; CHECK-LABEL: @test_mask_load_unaligned_q( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -1456,8 +1456,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.load.d.512(ptr, <16 x i32>, i16) define <16 x i32> @test_mask_load_aligned_d(<16 x i32> %data, ptr %ptr, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_load_aligned_d( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1519,8 +1519,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.load.q.512(ptr, <8 x i64>, i8) define <8 x i64> @test_mask_load_aligned_q(<8 x i64> %data, ptr %ptr, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_load_aligned_q( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -1596,8 +1596,8 @@ define <8 x double>@test_int_x86_avx512_mask_vpermil_pd_512(<8 x double> %x0, <8 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermil_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> @@ -1622,7 +1622,7 @@ define <8 x double>@test_int_x86_avx512_maskz_vpermil_pd_512(<8 x double> %x0, i ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermil_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X0]], <8 x i32> @@ -1661,8 +1661,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermil_ps_512(<16 x float> %x0, <1 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermil_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> @@ -1687,7 +1687,7 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermil_ps_512(<16 x float> %x0, i ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermil_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X0]], <16 x i32> @@ -1726,8 +1726,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i32 %x1, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pshuf_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X0]], <16 x i32> @@ -1750,7 +1750,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pshuf_d_512(<16 x i32> %x0, i32 %x1, ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pshuf_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X0]], <16 x i32> @@ -1772,7 +1772,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pshuf_d_512(<16 x i32> %x0, i32 %x1, define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_pcmpeq_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -1795,8 +1795,8 @@ define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_pcmpeq_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -1828,7 +1828,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32>, <16 x i32>, i16) define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_pcmpeq_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] @@ -1851,8 +1851,8 @@ define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_pcmpeq_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] @@ -1884,7 +1884,7 @@ declare i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64>, <8 x i64>, i8) define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_pcmpgt_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -2147483648) ; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[TMP1]], splat (i32 -1) @@ -1911,8 +1911,8 @@ define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_pcmpgt_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -2147483648) ; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[TMP1]], splat (i32 -1) @@ -1948,7 +1948,7 @@ declare i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32>, <16 x i32>, i16) define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_pcmpgt_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -9223372036854775808) ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 -1) @@ -1975,8 +1975,8 @@ define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_pcmpgt_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -9223372036854775808) ; CHECK-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[TMP1]], splat (i64 -1) @@ -2014,7 +2014,7 @@ declare <8 x double> @llvm.x86.avx512.mask.unpckh.pd.512(<8 x double>, <8 x doub define <8 x double>@test_int_x86_avx512_unpckh_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_unpckh_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -2029,9 +2029,9 @@ define <8 x double>@test_int_x86_avx512_mask_unpckh_pd_512(<8 x double> %x0, <8 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_unpckh_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -2057,7 +2057,7 @@ declare <16 x float> @llvm.x86.avx512.mask.unpckh.ps.512(<16 x float>, <16 x flo define <16 x float>@test_int_x86_avx512_unpckh_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_unpckh_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -2072,9 +2072,9 @@ define <16 x float>@test_int_x86_avx512_mask_unpckh_ps_512(<16 x float> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_unpckh_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -2100,7 +2100,7 @@ declare <8 x double> @llvm.x86.avx512.mask.unpckl.pd.512(<8 x double>, <8 x doub define <8 x double>@test_int_x86_avx512_unpckl_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_unpckl_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -2115,9 +2115,9 @@ define <8 x double>@test_int_x86_avx512_mask_unpckl_pd_512(<8 x double> %x0, <8 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_unpckl_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -2143,7 +2143,7 @@ declare <16 x float> @llvm.x86.avx512.mask.unpckl.ps.512(<16 x float>, <16 x flo define <16 x float>@test_int_x86_avx512_unpckl_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_unpckl_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -2158,9 +2158,9 @@ define <16 x float>@test_int_x86_avx512_mask_unpckl_ps_512(<16 x float> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_unpckl_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -2186,7 +2186,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64>, <8 x i64>, <8 define <8 x i64>@test_int_x86_avx512_punpcklqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpcklqd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -2201,9 +2201,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_punpcklqd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -2226,8 +2226,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_punpcklqd_q_512(<8 x i64> %x0, <8 x i ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_punpcklqd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -2251,7 +2251,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.punpckhqd.q.512(<8 x i64>, <8 x i64>, <8 define <8 x i64>@test_int_x86_avx512_punpckhqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpckhqd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -2266,9 +2266,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpckhqd_q_512(<8 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhqd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -2292,7 +2292,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.punpckhd.q.512(<16 x i32>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_punpckhd_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpckhd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> @@ -2307,9 +2307,9 @@ define <16 x i32>@test_int_x86_avx512_mask_punpckhd_q_512(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> @@ -2333,7 +2333,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.punpckld.q.512(<16 x i32>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_punpckld_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpckld_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> @@ -2348,9 +2348,9 @@ define <16 x i32>@test_int_x86_avx512_mask_punpckld_q_512(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_punpckld_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> @@ -2387,8 +2387,8 @@ define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_mask_pslli_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -2412,7 +2412,7 @@ define <16 x i32> @test_x86_avx512_maskz_pslli_d(<16 x i32> %a0, i16 %mask) #0 ; ; CHECK-LABEL: @test_x86_avx512_maskz_pslli_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer @@ -2452,8 +2452,8 @@ define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_mask_pslli_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -2477,7 +2477,7 @@ define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: @test_x86_avx512_maskz_pslli_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer @@ -2517,8 +2517,8 @@ define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_mask_psrli_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -2542,7 +2542,7 @@ define <16 x i32> @test_x86_avx512_maskz_psrli_d(<16 x i32> %a0, i16 %mask) #0 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrli_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer @@ -2582,8 +2582,8 @@ define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_mask_psrli_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -2607,7 +2607,7 @@ define <8 x i64> @test_x86_avx512_maskz_psrli_q(<8 x i64> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrli_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer @@ -2647,8 +2647,8 @@ define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_mask_psrai_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -2672,7 +2672,7 @@ define <16 x i32> @test_x86_avx512_maskz_psrai_d(<16 x i32> %a0, i16 %mask) #0 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrai_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer @@ -2712,8 +2712,8 @@ define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_mask_psrai_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -2737,7 +2737,7 @@ define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrai_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer @@ -2764,7 +2764,7 @@ declare void @llvm.x86.avx512.storent.q.512(ptr, <8 x i64>) define void@test_storent_q_512(<8 x i64> %data, ptr %ptr) #0 { ; ; CHECK-LABEL: @test_storent_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2789,7 +2789,7 @@ declare void @llvm.x86.avx512.storent.pd.512(ptr, <8 x double>) define void @test_storent_pd_512(<8 x double> %data, ptr %ptr) #0 { ; ; CHECK-LABEL: @test_storent_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2814,7 +2814,7 @@ declare void @llvm.x86.avx512.storent.ps.512(ptr, <16 x float>) define void @test_storent_ps_512(<16 x float> %data, ptr %ptr) #0 { ; ; CHECK-LABEL: @test_storent_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2837,7 +2837,7 @@ define void @test_storent_ps_512(<16 x float> %data, ptr %ptr) #0 { define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_xor_epi32( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]] @@ -2852,9 +2852,9 @@ define <16 x i32> @test_mask_xor_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> % ; ; CHECK-LABEL: @test_mask_xor_epi32( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[A:%.*]], [[B:%.*]] @@ -2878,7 +2878,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_or_epi32( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -1) ; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[B:%.*]], splat (i32 -1) @@ -2899,9 +2899,9 @@ define <16 x i32> @test_mask_or_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %p ; ; CHECK-LABEL: @test_mask_or_epi32( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[A:%.*]], splat (i32 -1) ; CHECK-NEXT: [[TMP6:%.*]] = xor <16 x i32> [[B:%.*]], splat (i32 -1) @@ -2931,7 +2931,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32>, <16 x i32>, <16 x define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_and_epi32( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = and <16 x i32> [[A:%.*]], [[TMP2]] @@ -2950,9 +2950,9 @@ define <16 x i32> @test_mask_and_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> % ; ; CHECK-LABEL: @test_mask_and_epi32( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP6:%.*]] = and <16 x i32> [[A:%.*]], [[TMP2]] @@ -2980,7 +2980,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32>, <16 x i32>, <16 define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_xor_epi64( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]] @@ -2995,9 +2995,9 @@ define <8 x i64> @test_mask_xor_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %pass ; ; CHECK-LABEL: @test_mask_xor_epi64( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[A:%.*]], [[B:%.*]] @@ -3021,7 +3021,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64>, <8 x i64>, <8 x i6 define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_or_epi64( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -1) ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[B:%.*]], splat (i64 -1) @@ -3042,9 +3042,9 @@ define <8 x i64> @test_mask_or_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passT ; ; CHECK-LABEL: @test_mask_or_epi64( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = xor <8 x i64> [[A:%.*]], splat (i64 -1) ; CHECK-NEXT: [[TMP6:%.*]] = xor <8 x i64> [[B:%.*]], splat (i64 -1) @@ -3074,7 +3074,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64>, <8 x i64>, <8 x i64 define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_and_epi64( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i64> [[A:%.*]], [[TMP2]] @@ -3093,9 +3093,9 @@ define <8 x i64> @test_mask_and_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %pass ; ; CHECK-LABEL: @test_mask_and_epi64( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP6:%.*]] = and <8 x i64> [[A:%.*]], [[TMP2]] @@ -3123,7 +3123,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64>, <8 x i64>, <8 x i6 define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_add_epi32_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = add <16 x i32> [[A:%.*]], [[B:%.*]] @@ -3138,9 +3138,9 @@ define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i ; ; CHECK-LABEL: @test_mask_add_epi32_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i32> [[A:%.*]], [[B:%.*]] @@ -3163,8 +3163,8 @@ define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m ; ; CHECK-LABEL: @test_mask_add_epi32_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = add <16 x i32> [[A:%.*]], [[B:%.*]] @@ -3186,7 +3186,7 @@ define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_add_epi32_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3213,10 +3213,10 @@ define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_add_epi32_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -3250,9 +3250,9 @@ define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32> define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_add_epi32_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -3286,9 +3286,9 @@ define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_add_epi32_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3321,12 +3321,12 @@ define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32> define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_add_epi32_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]] @@ -3366,11 +3366,11 @@ define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32 define <16 x i32> @test_mask_add_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_add_epi32_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]] @@ -3412,7 +3412,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_sub_epi32_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = sub <16 x i32> [[A:%.*]], [[B:%.*]] @@ -3427,9 +3427,9 @@ define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i ; ; CHECK-LABEL: @test_mask_sub_epi32_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = sub <16 x i32> [[A:%.*]], [[B:%.*]] @@ -3452,8 +3452,8 @@ define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m ; ; CHECK-LABEL: @test_mask_sub_epi32_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = sub <16 x i32> [[A:%.*]], [[B:%.*]] @@ -3475,7 +3475,7 @@ define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %m define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi32_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3502,10 +3502,10 @@ define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi32_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -3539,9 +3539,9 @@ define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <16 x i32> define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi32_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -3575,9 +3575,9 @@ define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i16 %mask define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi32_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3610,12 +3610,12 @@ define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <16 x i32> define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi32_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]] @@ -3655,10 +3655,10 @@ define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <16 x i32 define <16 x i32> @test_mask_sub_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i16 %mask, <16 x i32> %extra_param) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi32_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP17:%.*]], !prof [[PROF1]] @@ -3700,7 +3700,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32>, <16 x i32>, <16 define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_mask_add_epi64_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i64> [[A:%.*]], [[B:%.*]] @@ -3715,9 +3715,9 @@ define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> ; ; CHECK-LABEL: @test_mask_add_epi64_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = add <8 x i64> [[A:%.*]], [[B:%.*]] @@ -3740,8 +3740,8 @@ define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) ; ; CHECK-LABEL: @test_mask_add_epi64_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = add <8 x i64> [[A:%.*]], [[B:%.*]] @@ -3763,7 +3763,7 @@ define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_add_epi64_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3790,10 +3790,10 @@ define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 { define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_add_epi64_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -3827,9 +3827,9 @@ define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %p define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_add_epi64_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -3863,9 +3863,9 @@ define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask) define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_add_epi64_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3898,12 +3898,12 @@ define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %e define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_add_epi64_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]] @@ -3943,11 +3943,11 @@ define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> % define <8 x i64> @test_mask_add_epi64_rmbkz(<8 x i64> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_add_epi64_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]] @@ -3989,7 +3989,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64>, <8 x i64>, <8 x i6 define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_mask_sub_epi64_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = sub <8 x i64> [[A:%.*]], [[B:%.*]] @@ -4004,9 +4004,9 @@ define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> ; ; CHECK-LABEL: @test_mask_sub_epi64_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = sub <8 x i64> [[A:%.*]], [[B:%.*]] @@ -4029,8 +4029,8 @@ define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) ; ; CHECK-LABEL: @test_mask_sub_epi64_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = sub <8 x i64> [[A:%.*]], [[B:%.*]] @@ -4052,7 +4052,7 @@ define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi64_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -4079,10 +4079,10 @@ define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, ptr %ptr_b) #0 { define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi64_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -4116,9 +4116,9 @@ define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %p define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi64_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -4152,9 +4152,9 @@ define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, ptr %ptr_b, i8 %mask) define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi64_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -4187,12 +4187,12 @@ define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, ptr %ptr_b, <8 x i64> %e define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi64_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]] @@ -4232,11 +4232,11 @@ define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, ptr %ptr_b, <8 x i64> % define <8 x i64> @test_mask_sub_epi64_rmbkz(<8 x i64> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_sub_epi64_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]] @@ -4278,7 +4278,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64>, <8 x i64>, <8 x i6 define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_mullo_epi32_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = mul <16 x i32> [[A:%.*]], [[B:%.*]] @@ -4293,9 +4293,9 @@ define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, < ; ; CHECK-LABEL: @test_mask_mullo_epi32_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = mul <16 x i32> [[A:%.*]], [[B:%.*]] @@ -4318,8 +4318,8 @@ define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, ; ; CHECK-LABEL: @test_mask_mullo_epi32_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[A:%.*]], [[B:%.*]] @@ -4341,7 +4341,7 @@ define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_mullo_epi32_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -4368,10 +4368,10 @@ define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 { define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_mullo_epi32_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -4405,9 +4405,9 @@ define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <16 define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_mullo_epi32_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -4441,9 +4441,9 @@ define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i16 define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mullo_epi32_rmb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -4476,12 +4476,12 @@ define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b, <16 define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <16 x i32> %passThru, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mullo_epi32_rmbk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF1]] @@ -4521,11 +4521,11 @@ define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <16 define <16 x i32> @test_mask_mullo_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i16 %mask, <16 x i32> %extra_param, <16 x i32> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mullo_epi32_rmbkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF1]] @@ -4570,7 +4570,7 @@ declare <16 x float> @llvm.x86.avx512.mask.shuf.f32x4(<16 x float>, <16 x float> define <16 x float>@test_int_x86_avx512_shuf_f32x4(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_shuf_f32x4( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -4585,9 +4585,9 @@ define <16 x float>@test_int_x86_avx512_mask_shuf_f32x4(<16 x float> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_f32x4( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -4613,7 +4613,7 @@ declare <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double>, <8 x double> define <8 x double>@test_int_x86_avx512_shuf_f64x2(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_shuf_f64x2( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -4628,9 +4628,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_f64x2(<8 x double> %x0, <8 x d ; ; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_f64x2( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -4655,8 +4655,8 @@ define <8 x double>@test_int_x86_avx512_maskz_shuf_f64x2(<8 x double> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_shuf_f64x2( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -4681,7 +4681,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.shuf.i32x4(<16 x i32>, <16 x i32>, i32, define <16 x i32>@test_int_x86_avx512_shuf_i32x4(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_shuf_i32x4( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> @@ -4696,9 +4696,9 @@ define <16 x i32>@test_int_x86_avx512_mask_shuf_i32x4(<16 x i32> %x0, <16 x i32> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_i32x4( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> @@ -4722,7 +4722,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.shuf.i64x2(<8 x i64>, <8 x i64>, i32, <8 define <8 x i64>@test_int_x86_avx512_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_shuf_i64x2( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -4737,9 +4737,9 @@ define <8 x i64>@test_int_x86_avx512_mask_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_i64x2( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i32> @@ -4763,7 +4763,7 @@ declare <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double>, <8 x double define <8 x double>@test_int_x86_avx512_shuf_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_shuf_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -4778,9 +4778,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -4805,8 +4805,8 @@ define <8 x double>@test_int_x86_avx512_maskz_shuf_pd_512(<8 x double> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_shuf_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[X0:%.*]], <8 x double> [[X1:%.*]], <8 x i32> @@ -4831,7 +4831,7 @@ declare <16 x float> @llvm.x86.avx512.mask.shuf.ps.512(<16 x float>, <16 x float define <16 x float>@test_int_x86_avx512_shuf_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_shuf_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -4846,9 +4846,9 @@ define <16 x float>@test_int_x86_avx512_mask_shuf_ps_512(<16 x float> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_shuf_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <16 x float> [[X0:%.*]], <16 x float> [[X1:%.*]], <16 x i32> @@ -4874,7 +4874,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_pmaxs_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxs_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.smax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -4889,9 +4889,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pmaxs_d_512(<16 x i32> %x0, <16 x i32 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.smax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -4915,7 +4915,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxs_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.smax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -4930,9 +4930,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pmaxs_q_512(<8 x i64> %x0, <8 x i64> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.smax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -4956,7 +4956,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_pmaxu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxu_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.umax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -4971,9 +4971,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pmaxu_d_512(<16 x i32> %x0, <16 x i32 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.umax.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -4997,7 +4997,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxu_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.umax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -5012,9 +5012,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pmaxu_q_512(<8 x i64> %x0, <8 x i64> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.umax.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -5038,7 +5038,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_pmins_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmins_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.smin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -5053,9 +5053,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pmins_d_512(<16 x i32> %x0, <16 x i32 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.smin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -5079,7 +5079,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_pmins_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmins_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.smin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -5094,9 +5094,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pmins_q_512(<8 x i64> %x0, <8 x i64> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.smin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -5120,7 +5120,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_pminu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pminu_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -5135,9 +5135,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pminu_d_512(<16 x i32> %x0, <16 x i32 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.umin.v16i32(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -5161,7 +5161,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_pminu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pminu_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.umin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -5176,9 +5176,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.umin.v8i64(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -5201,10 +5201,10 @@ define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x ; ; CHECK-LABEL: @test_mm_mask_move_ss( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP0]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[__U:%.*]], 0 @@ -5248,8 +5248,8 @@ define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 ; CHECK-LABEL: @test_mm_maskz_move_ss( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP0]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[__U:%.*]], 0 @@ -5288,10 +5288,10 @@ define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 ; ; CHECK-LABEL: @test_mm_mask_move_sd( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP0]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = and i8 [[__U:%.*]], 0 @@ -5334,8 +5334,8 @@ define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, < ; CHECK-LABEL: @test_mm_maskz_move_sd( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP0]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[__U:%.*]], 0 @@ -5394,8 +5394,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovzxb_d_512(<16 x i8> %x0, <16 x i3 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxb_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> @@ -5420,7 +5420,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovzxb_d_512(<16 x i8> %x0, i16 %x2 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxb_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> @@ -5462,8 +5462,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovzxb_q_512(<16 x i8> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxb_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> @@ -5488,7 +5488,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovzxb_q_512(<16 x i8> %x0, i8 %x2) ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxb_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> @@ -5530,8 +5530,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovzxd_q_512(<8 x i32> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> @@ -5556,7 +5556,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovzxd_q_512(<8 x i32> %x0, i8 %x2) ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> @@ -5598,8 +5598,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovzxw_d_512(<16 x i16> %x0, <16 x i ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> @@ -5624,7 +5624,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovzxw_d_512(<16 x i16> %x0, i16 %x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> @@ -5666,8 +5666,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovzxw_q_512(<8 x i16> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxw_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> @@ -5692,7 +5692,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovzxw_q_512(<8 x i16> %x0, i8 %x2) ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxw_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> @@ -5734,8 +5734,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovsxb_d_512(<16 x i8> %x0, <16 x i3 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxb_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> @@ -5760,7 +5760,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovsxb_d_512(<16 x i8> %x0, i16 %x2 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxb_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <16 x i32> @@ -5802,8 +5802,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxb_q_512(<16 x i8> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxb_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> @@ -5828,7 +5828,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovsxb_q_512(<16 x i8> %x0, i8 %x2) ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxb_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> splat (i8 -1), <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[X0:%.*]], <16 x i8> poison, <8 x i32> @@ -5870,8 +5870,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxd_q_512(<8 x i32> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> @@ -5896,7 +5896,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovsxd_q_512(<8 x i32> %x0, i8 %x2) ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxd_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> splat (i32 -1), <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[X0:%.*]], <8 x i32> poison, <8 x i32> @@ -5938,8 +5938,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pmovsxw_d_512(<16 x i16> %x0, <16 x i ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> @@ -5964,7 +5964,7 @@ define <16 x i32>@test_int_x86_avx512_maskz_pmovsxw_d_512(<16 x i16> %x0, i16 %x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> splat (i16 -1), <16 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[X0:%.*]], <16 x i16> poison, <16 x i32> @@ -6006,8 +6006,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pmovsxw_q_512(<8 x i16> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxw_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> @@ -6032,7 +6032,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pmovsxw_q_512(<8 x i16> %x0, i8 %x2) ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxw_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> splat (i16 -1), <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i16> [[X0:%.*]], <8 x i16> poison, <8 x i32> @@ -6058,7 +6058,7 @@ declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>) define <16 x i32>@test_int_x86_avx512_prolv_d_512(<16 x i32> %x0, <16 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prolv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -6076,9 +6076,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512(<16 x i32> %x0, <16 x i32 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -6106,8 +6106,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prolv_d_512(<16 x i32> %x0, <16 x i3 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -6136,7 +6136,7 @@ declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>) define <8 x i64>@test_int_x86_avx512_prolv_q_512(<8 x i64> %x0, <8 x i64> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prolv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -6154,9 +6154,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512(<8 x i64> %x0, <8 x i64> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -6184,8 +6184,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prolv_q_512(<8 x i64> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -6214,7 +6214,7 @@ declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>) define <16 x i32>@test_int_x86_avx512_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prorv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -6232,9 +6232,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -6262,8 +6262,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prorv_d_512(<16 x i32> %x0, <16 x i3 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -6292,7 +6292,7 @@ declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>) define <8 x i64>@test_int_x86_avx512_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prorv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -6310,9 +6310,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -6340,8 +6340,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prorv_q_512(<8 x i64> %x0, <8 x i64> ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -6371,8 +6371,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_prol_d_512(<1 ; ; CHECK-LABEL: @test_int_x86_avx512_prol_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -6427,8 +6427,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_prol_q_512(<8 x ; ; CHECK-LABEL: @test_int_x86_avx512_prol_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -6483,8 +6483,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_pror_d_512(<1 ; ; CHECK-LABEL: @test_int_x86_avx512_pror_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -6539,8 +6539,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_pror_q_512(<8 x ; ; CHECK-LABEL: @test_int_x86_avx512_pror_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -6595,8 +6595,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psrl_qi_512 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_qi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 4) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -6647,8 +6647,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_psrl_di_ ; ; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 4) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -6699,8 +6699,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_psra_di_ ; ; CHECK-LABEL: @test_int_x86_avx512_mask_psra_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -6751,8 +6751,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psra_qi_512 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_psra_qi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -6803,8 +6803,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_psll_di_ ; ; CHECK-LABEL: @test_int_x86_avx512_mask_psll_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -6855,8 +6855,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psll_qi_512 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_psll_qi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -6904,7 +6904,7 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_psll_qi_512 define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -6925,9 +6925,9 @@ define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <1 ; ; CHECK-LABEL: @test_x86_avx512_mask_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -6956,8 +6956,8 @@ define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i ; ; CHECK-LABEL: @test_x86_avx512_maskz_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -6987,7 +6987,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32>, <4 x i32>, <16 x i32 define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -7008,9 +7008,9 @@ define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x ; ; CHECK-LABEL: @test_x86_avx512_mask_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -7039,8 +7039,8 @@ define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -7070,7 +7070,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64>, <2 x i64>, <8 x i64>, define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -7091,9 +7091,9 @@ define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <1 ; ; CHECK-LABEL: @test_x86_avx512_mask_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -7122,8 +7122,8 @@ define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -7153,7 +7153,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32>, <4 x i32>, <16 x i32 define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -7174,9 +7174,9 @@ define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x ; ; CHECK-LABEL: @test_x86_avx512_mask_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -7205,8 +7205,8 @@ define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -7236,7 +7236,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64>, <2 x i64>, <8 x i64>, define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -7257,9 +7257,9 @@ define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <1 ; ; CHECK-LABEL: @test_x86_avx512_mask_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -7288,8 +7288,8 @@ define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i ; ; CHECK-LABEL: @test_x86_avx512_maskz_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -7319,7 +7319,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32>, <4 x i32>, <16 x i32 define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psra_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -7340,9 +7340,9 @@ define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x ; ; CHECK-LABEL: @test_x86_avx512_mask_psra_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -7371,8 +7371,8 @@ define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psra_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -7402,7 +7402,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64>, <2 x i64>, <8 x i64>, define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psllv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -7420,9 +7420,9 @@ define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_mask_psllv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -7448,8 +7448,8 @@ define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_maskz_psllv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -7476,7 +7476,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32>, <16 x i32>, <16 x i define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psllv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -7494,9 +7494,9 @@ define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8 ; ; CHECK-LABEL: @test_x86_avx512_mask_psllv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -7522,8 +7522,8 @@ define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psllv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -7551,7 +7551,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64>, <8 x i64>, <8 x i64>, define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrav_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -7569,9 +7569,9 @@ define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_mask_psrav_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -7597,8 +7597,8 @@ define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrav_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -7625,7 +7625,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32>, <16 x i32>, <16 x i define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrav_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -7643,9 +7643,9 @@ define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8 ; ; CHECK-LABEL: @test_x86_avx512_mask_psrav_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -7671,8 +7671,8 @@ define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrav_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -7699,7 +7699,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64>, <8 x i64>, <8 x i64>, define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrlv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -7717,9 +7717,9 @@ define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_mask_psrlv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -7745,8 +7745,8 @@ define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -7773,7 +7773,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32>, <16 x i32>, <16 x i define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrlv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -7791,9 +7791,9 @@ define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8 ; ; CHECK-LABEL: @test_x86_avx512_mask_psrlv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -7819,8 +7819,8 @@ define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8 ; ; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -7847,7 +7847,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>, define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, ptr %ptr) #0 { ; ; CHECK-LABEL: @test_x86_avx512_psrlv_q_memop( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -7893,8 +7893,8 @@ define <8 x double>@test_int_x86_avx512_mask_cvt_dq2pd_512(<8 x i32> %x0, <8 x d ; ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_dq2pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[CVT:%.*]] = sitofp <8 x i32> [[X0:%.*]] to <8 x double> @@ -7934,8 +7934,8 @@ define <8 x double>@test_int_x86_avx512_mask_cvt_udq2pd_512(<8 x i32> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_udq2pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = zext <8 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[CVT:%.*]] = uitofp <8 x i32> [[X0:%.*]] to <8 x double> @@ -7998,8 +7998,8 @@ define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1, ; ; CHECK-LABEL: @test_x86_vcvtph2ps_512_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -8025,7 +8025,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask) ; ; CHECK-LABEL: @test_x86_vcvtph2ps_512_sae_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -8048,7 +8048,7 @@ define <16 x float> @test_x86_vcvtph2ps_512_rrkz(<16 x i16> %a0, i16 %mask) #0 ; ; CHECK-LABEL: @test_x86_vcvtph2ps_512_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -8071,7 +8071,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) #0 { ; CHECK-LABEL: @test_valign_q( -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> @@ -8086,10 +8086,10 @@ define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) #0 { define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_valign_q( -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP2]], <8 x i32> ; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <8 x i64> [[B:%.*]], <8 x i64> [[A:%.*]], <8 x i32> @@ -8113,9 +8113,9 @@ declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i32, define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_valign_d( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP2]], <16 x i32> ; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <16 x i32> [[B:%.*]], <16 x i32> [[A:%.*]], <16 x i32> @@ -8141,7 +8141,7 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double>, <8 x define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -8166,9 +8166,9 @@ define <8 x double>@test_int_x86_avx512_mask_vpermilvar_pd_512(<8 x double> %x0, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermilvar_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -8203,8 +8203,8 @@ define <8 x double>@test_int_x86_avx512_maskz_vpermilvar_pd_512(<8 x double> %x0 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermilvar_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -8239,7 +8239,7 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float>, <16 x define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -8264,9 +8264,9 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512(<16 x float> %x0, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermilvar_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -8302,8 +8302,8 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermilvar_ps_512(<16 x float> %x0 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermilvar_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -8338,8 +8338,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> ; CHECK-NEXT: [[TMP7:%.*]] = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> [[X0]], <16 x i32> ) @@ -8390,7 +8390,7 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16 define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_mul_epi32_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -8421,9 +8421,9 @@ define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64 ; ; CHECK-LABEL: @test_mask_mul_epi32_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -8462,8 +8462,8 @@ define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas ; ; CHECK-LABEL: @test_mask_mul_epi32_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -8501,7 +8501,7 @@ define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -8544,10 +8544,10 @@ define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -8597,9 +8597,9 @@ define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> % define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -8649,9 +8649,9 @@ define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -8703,12 +8703,12 @@ define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> % define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP35:%.*]], label [[TMP36:%.*]], !prof [[PROF1]] @@ -8767,11 +8767,11 @@ define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> define <8 x i64> @test_mask_mul_epi32_rmbk_buildvector(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmbk_buildvector( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP35:%.*]], !prof [[PROF1]] @@ -8848,11 +8848,11 @@ define <8 x i64> @test_mask_mul_epi32_rmbk_buildvector(<16 x i32> %a, ptr %ptr_b define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP34:%.*]], label [[TMP35:%.*]], !prof [[PROF1]] @@ -8911,10 +8911,10 @@ define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, define <8 x i64> @test_mask_mul_epi32_rmbkz_buildvector(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mask_mul_epi32_rmbkz_buildvector( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP34:%.*]], !prof [[PROF1]] @@ -8993,7 +8993,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_mul_epu32_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -9024,9 +9024,9 @@ define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64 ; ; CHECK-LABEL: @test_mask_mul_epu32_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -9065,8 +9065,8 @@ define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas ; ; CHECK-LABEL: @test_mask_mul_epu32_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -9104,7 +9104,7 @@ define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mas define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mask_mul_epu32_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -9147,10 +9147,10 @@ define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 { define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_mul_epu32_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -9200,9 +9200,9 @@ define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> % define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_mul_epu32_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -9252,9 +9252,9 @@ define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mul_epu32_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -9306,12 +9306,12 @@ define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> % define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mul_epu32_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP35:%.*]], label [[TMP36:%.*]], !prof [[PROF1]] @@ -9370,11 +9370,11 @@ define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param, <8 x i64> %extra_param2) #0 { ; ; CHECK-LABEL: @test_mask_mul_epu32_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP34:%.*]], label [[TMP35:%.*]], !prof [[PROF1]] @@ -9435,8 +9435,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8 define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_vextractf32x4( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <4 x i32> @@ -9465,8 +9465,8 @@ declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i32, < define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_vextracti64x4( -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <4 x i32> @@ -9494,7 +9494,7 @@ define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_vextracti32x4( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> [[A:%.*]], <16 x i32> [[A]], <4 x i32> @@ -9536,7 +9536,7 @@ declare <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float>, <4 x fl define <16 x float>@test_int_x86_avx512_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_insertf32x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> @@ -9553,10 +9553,10 @@ define <16 x float>@test_int_x86_avx512_insertf32x4_512(<16 x float> %x0, <4 x f define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_insertf32x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x float> [[X1:%.*]], <4 x float> poison, <16 x i32> @@ -9582,9 +9582,9 @@ define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, < define <16 x float>@test_int_x86_avx512_maskz_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, i16 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_insertf32x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[X1:%.*]], <4 x float> poison, <16 x i32> @@ -9610,7 +9610,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32>, <4 x i32>, define <16 x i32>@test_int_x86_avx512_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_inserti32x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> @@ -9627,10 +9627,10 @@ define <16 x i32>@test_int_x86_avx512_inserti32x4_512(<16 x i32> %x0, <4 x i32> define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_inserti32x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[X1:%.*]], <4 x i32> poison, <16 x i32> @@ -9654,9 +9654,9 @@ define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x define <16 x i32>@test_int_x86_avx512_maskz_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, i16 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_inserti32x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> splat (i32 -1), <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[X1:%.*]], <4 x i32> poison, <16 x i32> @@ -9681,7 +9681,7 @@ declare <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double>, <4 x do define <8 x double>@test_int_x86_avx512_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_insertf64x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> @@ -9698,10 +9698,10 @@ define <8 x double>@test_int_x86_avx512_insertf64x4_512(<8 x double> %x0, <4 x d define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3, i8 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_insertf64x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[X1:%.*]], <4 x double> poison, <8 x i32> @@ -9727,9 +9727,9 @@ define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, < define <8 x double>@test_int_x86_avx512_maskz_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, i8 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_insertf64x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x double> [[X1:%.*]], <4 x double> poison, <8 x i32> @@ -9755,7 +9755,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64>, <4 x i64>, i3 define <8 x i64>@test_int_x86_avx512_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_inserti64x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> @@ -9772,10 +9772,10 @@ define <8 x i64>@test_int_x86_avx512_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3, i8 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_inserti64x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[X1:%.*]], <4 x i64> poison, <8 x i32> @@ -9799,9 +9799,9 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6 define <8 x i64>@test_int_x86_avx512_maskz_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, i8 %x4) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_inserti64x4_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> splat (i64 -1), <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[X1:%.*]], <4 x i64> poison, <8 x i32> @@ -9850,8 +9850,8 @@ declare <8 x i64> @llvm.x86.avx512.movntdqa(ptr) nounwind readonly define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, <8 x i16> %extra_param) #0 { ; CHECK-LABEL: @test_cmp_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -9971,9 +9971,9 @@ define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask, ; ; CHECK-LABEL: @test_mask_cmp_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -10162,8 +10162,8 @@ declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) no define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, <8 x i16> %extra_param) #0 { ; CHECK-LABEL: @test_ucmp_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -10275,9 +10275,9 @@ define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask ; ; CHECK-LABEL: @test_mask_ucmp_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i32> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] @@ -10458,8 +10458,8 @@ declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) n define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i8> %extra_param) #0 { ; CHECK-LABEL: @test_cmp_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP77:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] @@ -10579,9 +10579,9 @@ define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask, <8 ; ; CHECK-LABEL: @test_mask_cmp_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP146:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] @@ -10770,8 +10770,8 @@ declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwi define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i8> %extra_param) #0 { ; CHECK-LABEL: @test_ucmp_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP69:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] @@ -10883,9 +10883,9 @@ define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask, <8 ; ; CHECK-LABEL: @test_mask_ucmp_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP138:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i64> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] @@ -11069,8 +11069,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf32x4_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[X0:%.*]], <4 x float> [[X0]], <16 x i32> @@ -11116,8 +11116,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512_load(ptr %x0ptr, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf32x4_512_load( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -11169,8 +11169,8 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf64x4_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x double> [[X0:%.*]], <4 x double> [[X0]], <8 x i32> @@ -11195,7 +11195,7 @@ define <8 x double>@test_int_x86_avx512_maskz_broadcastf64x4_512(<4 x double> %x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_broadcastf64x4_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x double> [[X0:%.*]], <4 x double> [[X0]], <8 x i32> @@ -11219,8 +11219,8 @@ define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512_load(ptr %x0ptr, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcastf64x4_512_load( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -11259,8 +11259,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_broadcas ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti32x4_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP1]], <16 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[X0:%.*]], <4 x i32> [[X0]], <16 x i32> @@ -11306,8 +11306,8 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512_load(ptr %x0ptr, < ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti32x4_512_load( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -11357,8 +11357,8 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti64x4_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[X0:%.*]], <4 x i64> [[X0]], <8 x i32> @@ -11381,7 +11381,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_broadcasti64x4_512(<4 x i64> %x0, i8 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_broadcasti64x4_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP1]], <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[X0:%.*]], <4 x i64> [[X0]], <8 x i32> @@ -11404,8 +11404,8 @@ define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512_load(ptr %x0ptr, <8 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_broadcasti64x4_512_load( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -11457,8 +11457,8 @@ define <16 x i32>@test_int_x86_avx512_mask_pabs_d_512(<16 x i32> %x0, <16 x i32> ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <16 x i32> [[X0:%.*]], splat (i32 -2147483648) ; CHECK-NEXT: [[TMP13:%.*]] = select <16 x i1> [[TMP12]], <16 x i32> splat (i32 -1), <16 x i32> [[TMP1]] @@ -11500,8 +11500,8 @@ define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <8 x i64> [[X0:%.*]], splat (i64 -9223372036854775808) ; CHECK-NEXT: [[TMP13:%.*]] = select <8 x i1> [[TMP12]], <8 x i64> splat (i64 -1), <8 x i64> [[TMP1]] @@ -11526,8 +11526,8 @@ define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) #0 { ; ; CHECK-LABEL: @test_vptestmq( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[A0:%.*]], [[TMP2]] @@ -11585,8 +11585,8 @@ define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) #0 { ; ; CHECK-LABEL: @test_vptestmd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[A0:%.*]], [[TMP2]] @@ -11646,8 +11646,8 @@ define i16@test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 ; ; CHECK-LABEL: @test_int_x86_avx512_ptestnm_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[X0:%.*]], [[TMP2]] @@ -11706,8 +11706,8 @@ define i8@test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2 ; ; CHECK-LABEL: @test_int_x86_avx512_ptestnm_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[X0:%.*]], [[TMP2]] @@ -11765,7 +11765,7 @@ define i16 @test_kand(i16 %a0, i16 %a1) #0 { ; ; CHECK-LABEL: @test_kand( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1> @@ -11802,7 +11802,7 @@ define i16 @test_kandn(i16 %a0, i16 %a1) #0 { ; ; CHECK-LABEL: @test_kandn( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1> @@ -11862,7 +11862,7 @@ define i16 @test_kor(i16 %a0, i16 %a1) #0 { ; ; CHECK-LABEL: @test_kor( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1> @@ -11904,7 +11904,7 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) #0 { ; ; CHECK-LABEL: @test_kxnor( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1> @@ -11937,7 +11937,7 @@ define i16 @test_kxor(i16 %a0, i16 %a1) #0 { ; ; CHECK-LABEL: @test_kxor( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[A0:%.*]] to <16 x i1> @@ -11966,9 +11966,9 @@ define i32 @test_kortestz(<8 x i64> %A, <8 x i64> %B, <8 x i64> %C, <8 x i64> %D ; CHECK-LABEL: @test_kortestz( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP0]] to <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[A:%.*]] to <16 x i32> @@ -12043,9 +12043,9 @@ define i32 @test_kortestc(<8 x i64> %A, <8 x i64> %B, <8 x i64> %C, <8 x i64> %D ; CHECK-LABEL: @test_kortestc( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP0]] to <16 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[A:%.*]] to <16 x i32> @@ -12118,7 +12118,7 @@ entry: define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) #0 { ; CHECK-LABEL: @test_cmpps( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -12143,7 +12143,7 @@ declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) #0 { ; CHECK-LABEL: @test_cmppd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -12168,7 +12168,7 @@ declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i define <8 x i64> @test_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mul_epi32_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -12199,9 +12199,9 @@ define <8 x i64> @test_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %pa ; ; CHECK-LABEL: @test_mul_epi32_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -12242,8 +12242,8 @@ define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) # ; ; CHECK-LABEL: @test_mul_epi32_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -12283,7 +12283,7 @@ define <8 x i64> @test_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) # define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mul_epi32_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -12326,10 +12326,10 @@ define <8 x i64> @test_mul_epi32_rm(<16 x i32> %a, ptr %ptr_b) #0 { define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mul_epi32_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -12381,9 +12381,9 @@ define <8 x i64> @test_mul_epi32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passT define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mul_epi32_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -12435,8 +12435,8 @@ define <8 x i64> @test_mul_epi32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 { define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mul_epi32_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -12488,11 +12488,11 @@ define <8 x i64> @test_mul_epi32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mul_epi32_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP33:%.*]], !prof [[PROF1]] @@ -12553,10 +12553,10 @@ define <8 x i64> @test_mul_epi32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %pass define <8 x i64> @test_mul_epi32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mul_epi32_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP32:%.*]], !prof [[PROF1]] @@ -12619,7 +12619,7 @@ declare <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32>, <16 x i32>) define <8 x i64> @test_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mul_epu32_rr( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -12650,9 +12650,9 @@ define <8 x i64> @test_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %pa ; ; CHECK-LABEL: @test_mul_epu32_rrk( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -12693,8 +12693,8 @@ define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) # ; ; CHECK-LABEL: @test_mul_epu32_rrkz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <8 x i64> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[A:%.*]] to <8 x i64> @@ -12734,7 +12734,7 @@ define <8 x i64> @test_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) # define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 { ; ; CHECK-LABEL: @test_mul_epu32_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -12777,10 +12777,10 @@ define <8 x i64> @test_mul_epu32_rm(<16 x i32> %a, ptr %ptr_b) #0 { define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mul_epu32_rmk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -12832,9 +12832,9 @@ define <8 x i64> @test_mul_epu32_rmk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passT define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mul_epu32_rmkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -12886,8 +12886,8 @@ define <8 x i64> @test_mul_epu32_rmkz(<16 x i32> %a, ptr %ptr_b, i8 %mask) #0 { define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mul_epu32_rmb( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -12939,11 +12939,11 @@ define <8 x i64> @test_mul_epu32_rmb(<16 x i32> %a, ptr %ptr_b, <8 x i64> %extra define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %passThru, i8 %mask, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mul_epu32_rmbk( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP33:%.*]], !prof [[PROF1]] @@ -13004,10 +13004,10 @@ define <8 x i64> @test_mul_epu32_rmbk(<16 x i32> %a, ptr %ptr_b, <8 x i64> %pass define <8 x i64> @test_mul_epu32_rmbkz(<16 x i32> %a, ptr %ptr_b, i8 %mask, <8 x i64> %extra_param) #0 { ; ; CHECK-LABEL: @test_mul_epu32_rmbkz( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP32:%.*]], !prof [[PROF1]] @@ -13070,7 +13070,7 @@ declare <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32>, <16 x i32>) define <2 x double> @test_x86_avx512_mm_cvtu32_sd(<2 x double> %a, i32 %b) ; ; CHECK-LABEL: @test_x86_avx512_mm_cvtu32_sd( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP1]] to i64 @@ -13187,7 +13187,7 @@ declare <8 x double> @llvm.x86.avx512.mask.permvar.df.512(<8 x double>, <8 x i64 define <8 x double>@test_int_x86_avx512_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -13211,9 +13211,9 @@ define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -13247,8 +13247,8 @@ define <8 x double>@test_int_x86_avx512_maskz_permvar_df_512(<8 x double> %x0, < ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -13282,7 +13282,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.permvar.di.512(<8 x i64>, <8 x i64>, <8 define <8 x i64>@test_int_x86_avx512_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -13297,9 +13297,9 @@ define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -13322,8 +13322,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_permvar_di_512(<8 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -13347,7 +13347,7 @@ declare <16 x float> @llvm.x86.avx512.mask.permvar.sf.512(<16 x float>, <16 x i3 define <16 x float>@test_int_x86_avx512_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -13371,9 +13371,9 @@ define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <1 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -13407,8 +13407,8 @@ define <16 x float>@test_int_x86_avx512_maskz_permvar_sf_512(<16 x float> %x0, < ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -13442,7 +13442,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.permvar.si.512(<16 x i32>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -13457,9 +13457,9 @@ define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -13482,8 +13482,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_permvar_si_512(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -13507,8 +13507,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pternlog_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -13535,9 +13535,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -13574,9 +13574,9 @@ define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -13612,8 +13612,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.pternlog.q.512(<8 x i64>, <8 x i64>, <8 define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pternlog_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -13640,9 +13640,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -13679,9 +13679,9 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -13716,10 +13716,10 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermi2var.d.512(<16 x i32>, <16 x i32> define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -13753,10 +13753,10 @@ define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32 define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -13800,8 +13800,8 @@ declare <8 x double> @llvm.x86.avx512.mask.vpermi2var.pd.512(<8 x double>, <8 x define <8 x double>@test_int_x86_avx512_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[TMP8]] to <8 x i3> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -13828,9 +13828,9 @@ define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -13868,8 +13868,8 @@ declare <16 x float> @llvm.x86.avx512.mask.vpermi2var.ps.512(<16 x float>, <16 x define <16 x float>@test_int_x86_avx512_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = trunc <16 x i32> [[TMP8]] to <16 x i4> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -13896,9 +13896,9 @@ define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[TMP2]] to <16 x i4> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -13936,8 +13936,8 @@ declare <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64>, <8 x i64>, < define <8 x i64>@test_int_x86_avx512_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X3:%.*]], <8 x i64> [[TMP3]]) @@ -13960,9 +13960,9 @@ define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X1:%.*]], <8 x i64> [[TMP3]]) @@ -13994,10 +13994,10 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32>, <16 x i32 define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, i16 %x3) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -14041,11 +14041,11 @@ declare <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64>, <8 x do define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, ptr %x2ptr, i8 %x3, <8 x double> %extra_param) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF1]] @@ -14099,10 +14099,10 @@ declare <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32>, <16 x define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -14139,10 +14139,10 @@ declare <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64>, <8 x i64>, define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[X0]] to <8 x i3> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X4:%.*]], <8 x i64> [[TMP3]]) @@ -14173,8 +14173,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.vpermt2var.d.512(<16 x i32>, <16 x i32> define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4> @@ -14197,10 +14197,10 @@ define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32 define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[TMP1]], <16 x i32> [[X4:%.*]], <16 x i32> [[TMP3]]) @@ -14234,7 +14234,7 @@ declare <8 x double> @llvm.x86.avx512.mask.mul.pd.512(<8 x double>, <8 x double> define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14258,7 +14258,7 @@ define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14282,7 +14282,7 @@ define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14306,7 +14306,7 @@ define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14330,7 +14330,7 @@ define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14354,7 +14354,7 @@ define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14378,7 +14378,7 @@ define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14402,7 +14402,7 @@ define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -14428,8 +14428,8 @@ define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 ; ; CHECK-LABEL: @test_vmulps_mask_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14463,8 +14463,8 @@ define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 ; ; CHECK-LABEL: @test_vmulps_mask_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14498,8 +14498,8 @@ define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 ; ; CHECK-LABEL: @test_vmulps_mask_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14533,8 +14533,8 @@ define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 ; ; CHECK-LABEL: @test_vmulps_mask_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14569,9 +14569,9 @@ define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> ; ; CHECK-LABEL: @test_vmulps_mask_passthru_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -14606,9 +14606,9 @@ define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> ; ; CHECK-LABEL: @test_vmulps_mask_passthru_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -14643,9 +14643,9 @@ define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> ; ; CHECK-LABEL: @test_vmulps_mask_passthru_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -14680,9 +14680,9 @@ define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> ; ; CHECK-LABEL: @test_vmulps_mask_passthru_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -14718,8 +14718,8 @@ define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 ; ; CHECK-LABEL: @test_vmulpd_mask_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14753,8 +14753,8 @@ define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 ; ; CHECK-LABEL: @test_vmulpd_mask_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14788,8 +14788,8 @@ define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 ; ; CHECK-LABEL: @test_vmulpd_mask_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14823,8 +14823,8 @@ define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 ; ; CHECK-LABEL: @test_vmulpd_mask_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14858,8 +14858,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14891,8 +14891,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14924,8 +14924,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14958,8 +14958,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -14993,8 +14993,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -15027,9 +15027,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_add_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15062,9 +15062,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_add_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15097,9 +15097,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_add_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15133,9 +15133,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_add_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15170,9 +15170,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_mask_add_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15206,7 +15206,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15228,7 +15228,7 @@ define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15250,7 +15250,7 @@ define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15273,7 +15273,7 @@ define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15296,7 +15296,7 @@ define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15321,9 +15321,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15356,9 +15356,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15391,9 +15391,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15427,9 +15427,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15464,9 +15464,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15499,7 +15499,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15521,7 +15521,7 @@ define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15543,7 +15543,7 @@ define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15566,7 +15566,7 @@ define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15589,7 +15589,7 @@ define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15613,8 +15613,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -15646,8 +15646,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -15679,8 +15679,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -15713,8 +15713,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -15748,8 +15748,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -15782,9 +15782,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_div_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15817,9 +15817,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_div_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15852,9 +15852,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_div_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15888,9 +15888,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x ; ; CHECK-LABEL: @test_mm512_mask_div_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15925,9 +15925,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_mask_div_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -15961,7 +15961,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -15983,7 +15983,7 @@ define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -16005,7 +16005,7 @@ define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -16028,7 +16028,7 @@ define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -16051,7 +16051,7 @@ define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -16075,9 +16075,9 @@ declare <16 x float> @llvm.x86.avx512.mask.div.ps.512(<16 x float>, <16 x float> define void @test_mask_compress_store_pd_512(ptr %addr, <8 x double> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_store_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -16107,7 +16107,7 @@ define void @test_compress_store_pd_512(ptr %addr, <8 x double> %data) #0 { ; ; CHECK-LABEL: @test_compress_store_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16129,9 +16129,9 @@ define void @test_compress_store_pd_512(ptr %addr, <8 x double> %data) #0 { define void @test_mask_compress_store_ps_512(ptr %addr, <16 x float> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_store_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -16161,7 +16161,7 @@ define void @test_compress_store_ps_512(ptr %addr, <16 x float> %data) #0 { ; ; CHECK-LABEL: @test_compress_store_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16183,9 +16183,9 @@ define void @test_compress_store_ps_512(ptr %addr, <16 x float> %data) #0 { define void @test_mask_compress_store_q_512(ptr %addr, <8 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_store_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -16215,7 +16215,7 @@ define void @test_compress_store_q_512(ptr %addr, <8 x i64> %data) #0 { ; ; CHECK-LABEL: @test_compress_store_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16237,9 +16237,9 @@ define void @test_compress_store_q_512(ptr %addr, <8 x i64> %data) #0 { define void @test_mask_compress_store_d_512(ptr %addr, <16 x i32> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_store_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -16269,7 +16269,7 @@ define void @test_compress_store_d_512(ptr %addr, <16 x i32> %data) #0 { ; ; CHECK-LABEL: @test_compress_store_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16291,9 +16291,9 @@ define void @test_compress_store_d_512(ptr %addr, <16 x i32> %data) #0 { define <8 x double> @test_mask_expand_load_pd_512(ptr %addr, <8 x double> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_load_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -16321,7 +16321,7 @@ define <8 x double> @test_mask_expand_load_pd_512(ptr %addr, <8 x double> %data, define <8 x double> @test_maskz_expand_load_pd_512(ptr %addr, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_load_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -16353,7 +16353,7 @@ define <8 x double> @test_expand_load_pd_512(ptr %addr, <8 x double> %data) #0 ; ; CHECK-LABEL: @test_expand_load_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16377,7 +16377,7 @@ define <8 x double> @test_expand_load_pd_512(ptr %addr, <8 x double> %data) #0 define <8 x double> @test_zero_mask_expand_load_pd_512(ptr %addr, <8 x double> %data, i8 %mask) #0 { ; CHECK-LABEL: @test_zero_mask_expand_load_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16400,9 +16400,9 @@ define <8 x double> @test_zero_mask_expand_load_pd_512(ptr %addr, <8 x double> % define <16 x float> @test_mask_expand_load_ps_512(ptr %addr, <16 x float> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_load_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -16430,7 +16430,7 @@ define <16 x float> @test_mask_expand_load_ps_512(ptr %addr, <16 x float> %data, define <16 x float> @test_maskz_expand_load_ps_512(ptr %addr, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_load_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -16462,7 +16462,7 @@ define <16 x float> @test_expand_load_ps_512(ptr %addr, <16 x float> %data) #0 ; ; CHECK-LABEL: @test_expand_load_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16485,9 +16485,9 @@ define <16 x float> @test_expand_load_ps_512(ptr %addr, <16 x float> %data) #0 define <8 x i64> @test_mask_expand_load_q_512(ptr %addr, <8 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_load_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -16515,7 +16515,7 @@ define <8 x i64> @test_mask_expand_load_q_512(ptr %addr, <8 x i64> %data, i8 %ma define <8 x i64> @test_maskz_expand_load_q_512(ptr %addr, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_load_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -16547,7 +16547,7 @@ define <8 x i64> @test_expand_load_q_512(ptr %addr, <8 x i64> %data) #0 { ; ; CHECK-LABEL: @test_expand_load_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16570,9 +16570,9 @@ define <8 x i64> @test_expand_load_q_512(ptr %addr, <8 x i64> %data) #0 { define <16 x i32> @test_mask_expand_load_d_512(ptr %addr, <16 x i32> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_load_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -16600,7 +16600,7 @@ define <16 x i32> @test_mask_expand_load_d_512(ptr %addr, <16 x i32> %data, i16 define <16 x i32> @test_maskz_expand_load_d_512(ptr %addr, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_load_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -16632,7 +16632,7 @@ define <16 x i32> @test_expand_load_d_512(ptr %addr, <16 x i32> %data) #0 { ; ; CHECK-LABEL: @test_expand_load_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ADDR:%.*]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -16656,8 +16656,8 @@ define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x f ; ; CHECK-LABEL: @test_mm512_maskz_min_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16682,8 +16682,8 @@ define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_min_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16708,9 +16708,9 @@ define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x fl ; ; CHECK-LABEL: @test_mm512_mask_min_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16736,9 +16736,9 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_mask_min_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16763,7 +16763,7 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_min_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16778,7 +16778,7 @@ define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_min_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16795,8 +16795,8 @@ define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x f ; ; CHECK-LABEL: @test_mm512_maskz_max_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16821,8 +16821,8 @@ define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_maskz_max_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16847,9 +16847,9 @@ define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x fl ; ; CHECK-LABEL: @test_mm512_mask_max_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16875,9 +16875,9 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 ; ; CHECK-LABEL: @test_mm512_mask_max_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16902,7 +16902,7 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_max_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16917,7 +16917,7 @@ define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_max_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -16945,8 +16945,8 @@ define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passt ; ; CHECK-LABEL: @test_mask_sqrt_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP2]] to <8 x i1> @@ -16969,7 +16969,7 @@ define <8 x double> @test_maskz_sqrt_pd_512(<8 x double> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_sqrt_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]]) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP2]] to <8 x i1> @@ -17009,8 +17009,8 @@ define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double> ; ; CHECK-LABEL: @test_mask_sqrt_round_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -17040,7 +17040,7 @@ define <8 x double> @test_maskz_sqrt_round_pd_512(<8 x double> %a0, i8 %mask) # ; ; CHECK-LABEL: @test_maskz_sqrt_round_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -17082,8 +17082,8 @@ define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passt ; ; CHECK-LABEL: @test_mask_sqrt_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -17106,7 +17106,7 @@ define <16 x float> @test_maskz_sqrt_ps_512(<16 x float> %a0, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_sqrt_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]]) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -17146,8 +17146,8 @@ define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float> ; ; CHECK-LABEL: @test_mask_sqrt_round_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -17177,7 +17177,7 @@ define <16 x float> @test_maskz_sqrt_round_ps_512(<16 x float> %a0, i16 %mask) ; ; CHECK-LABEL: @test_maskz_sqrt_round_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -17209,7 +17209,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_prolv_d_512_old(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prolv_d_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -17227,9 +17227,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512_old(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_d_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -17255,8 +17255,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prolv_d_512_old(<16 x i32> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_d_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -17283,7 +17283,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_prolv_q_512_old(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prolv_q_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -17301,9 +17301,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512_old(<8 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prolv_q_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -17329,8 +17329,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prolv_q_512_old(<8 x i64> %x0, <8 x i ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prolv_q_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -17357,7 +17357,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_prorv_d_512_old(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prorv_d_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -17375,9 +17375,9 @@ define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512_old(<16 x i32> %x0, <16 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_d_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -17403,8 +17403,8 @@ define <16 x i32>@test_int_x86_avx512_maskz_prorv_d_512_old(<16 x i32> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_d_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -17431,7 +17431,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_prorv_q_512_old(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_prorv_q_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -17449,9 +17449,9 @@ define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512_old(<8 x i64> %x0, <8 x i6 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prorv_q_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -17477,8 +17477,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_prorv_q_512_old(<8 x i64> %x0, <8 x i ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_prorv_q_512_old( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -17506,8 +17506,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_prol_d_5 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prol_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -17558,8 +17558,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_prol_q_512( ; ; CHECK-LABEL: @test_int_x86_avx512_mask_prol_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshl.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -17610,8 +17610,8 @@ define { <16 x i32>, <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_pror_d_5 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pror_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.fshr.v16i32(<16 x i32> [[TMP1]], <16 x i32> [[TMP1]], <16 x i32> splat (i32 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -17662,8 +17662,8 @@ define { <8 x i64>, <8 x i64>, <8 x i64> } @test_int_x86_avx512_mask_pror_q_512( ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pror_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.fshr.v8i64(<8 x i64> [[TMP1]], <8 x i64> [[TMP1]], <8 x i64> splat (i64 3)) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -17714,9 +17714,9 @@ define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x do ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0 @@ -17812,9 +17812,9 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x floa ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0 @@ -17910,9 +17910,9 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x d ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0 @@ -17983,9 +17983,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x flo ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0 @@ -18055,9 +18055,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x d ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0 @@ -18153,9 +18153,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x flo ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0 @@ -18249,9 +18249,9 @@ define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_par ; ; CHECK-LABEL: @fmadd_ss_mask_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] @@ -18352,9 +18352,9 @@ define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_pa ; ; CHECK-LABEL: @fmadd_ss_maskz_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] @@ -18454,9 +18454,9 @@ define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_pa ; ; CHECK-LABEL: @fmadd_sd_mask_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] @@ -18545,9 +18545,9 @@ define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_p ; ; CHECK-LABEL: @fmadd_sd_maskz_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] @@ -18636,10 +18636,10 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double> define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_sd( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X2:%.*]] ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP2]], i64 0 @@ -18743,10 +18743,10 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, < define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_ss( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X2:%.*]] ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i64 0 @@ -18851,9 +18851,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X0:%.*]] ; CHECK-NEXT: [[TMP6:%.*]] = fneg <2 x double> [[X2:%.*]] @@ -18961,9 +18961,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X0:%.*]] ; CHECK-NEXT: [[TMP6:%.*]] = fneg <4 x float> [[X2:%.*]] @@ -19068,11 +19068,11 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]] @@ -19122,11 +19122,11 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 { ; ; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]] @@ -19176,10 +19176,10 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP20:%.*]], !prof [[PROF1]] @@ -19240,8 +19240,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> % ; ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32> @@ -19264,7 +19264,7 @@ define <8 x i32>@test_int_x86_avx512_maskz_pmov_qd_512(<8 x i64> %x0, i8 %x2) # ; ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_qd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32> @@ -19289,8 +19289,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_dq2ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[CVT:%.*]] = sitofp <16 x i32> [[X0:%.*]] to <16 x float> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -19328,8 +19328,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 ; ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_udq2ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[CVT:%.*]] = uitofp <16 x i32> [[X0:%.*]] to <16 x float> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -19364,9 +19364,9 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -19394,7 +19394,7 @@ define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_compress_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -19420,7 +19420,7 @@ define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) # define <8 x double> @test_compress_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 { ; CHECK-LABEL: @test_compress_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19445,9 +19445,9 @@ declare <8 x double> @llvm.x86.avx512.mask.compress.pd.512(<8 x double> %data, < define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -19475,7 +19475,7 @@ define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_compress_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -19501,7 +19501,7 @@ define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) define <16 x float> @test_compress_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 { ; CHECK-LABEL: @test_compress_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19526,9 +19526,9 @@ declare <16 x float> @llvm.x86.avx512.mask.compress.ps.512(<16 x float> %data, < define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -19556,7 +19556,7 @@ define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_compress_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -19582,7 +19582,7 @@ define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 { define <8 x i64> @test_compress_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 { ; CHECK-LABEL: @test_compress_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19607,9 +19607,9 @@ declare <8 x i64> @llvm.x86.avx512.mask.compress.q.512(<8 x i64> %data, <8 x i64 define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_compress_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -19637,7 +19637,7 @@ define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passth define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_compress_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -19663,7 +19663,7 @@ define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 { define <16 x i32> @test_compress_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 { ; CHECK-LABEL: @test_compress_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19688,7 +19688,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.compress.d.512(<16 x i32> %data, <16 x define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 { ; CHECK-LABEL: @test_expand_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19711,9 +19711,9 @@ define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_ define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -19741,7 +19741,7 @@ define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %p define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -19769,7 +19769,7 @@ declare <8 x double> @llvm.x86.avx512.mask.expand.pd.512(<8 x double> %data, <8 define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 { ; CHECK-LABEL: @test_expand_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19792,9 +19792,9 @@ define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_ define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -19822,7 +19822,7 @@ define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %p define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -19850,7 +19850,7 @@ declare <16 x float> @llvm.x86.avx512.mask.expand.ps.512(<16 x float> %data, <16 define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 { ; CHECK-LABEL: @test_expand_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19873,9 +19873,9 @@ define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -19903,7 +19903,7 @@ define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -19931,7 +19931,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.expand.q.512(<8 x i64> %data, <8 x i64> define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 { ; CHECK-LABEL: @test_expand_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -19954,9 +19954,9 @@ define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param) define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 { ; ; CHECK-LABEL: @test_mask_expand_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -19984,7 +19984,7 @@ define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru define <16 x i32> @test_maskz_expand_d_512(<16 x i32> %data, i16 %mask) #0 { ; ; CHECK-LABEL: @test_maskz_expand_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -20014,10 +20014,10 @@ define <16 x float> @test_cmp_512(<16 x float> %a, <16 x float> %b, <16 x float> ; CHECK-LABEL: @test_cmp_512( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP0]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll index b2a4f0e582f9e..cc022e93bb7c0 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512-intrinsics.ll @@ -21,7 +21,7 @@ ; - llvm.x86.avx512.mask.pmov.db.mem.512, llvm.x86.avx512.mask.pmov.dw.mem.512, llvm.x86.avx512.mask.pmov.qb.mem.512, llvm.x86.avx512.mask.pmov.qd.mem.512llvm.x86.avx512.mask.pmov.qw.mem.512 ; - llvm.x86.avx512.mask.pmovs.db.mem.512, llvm.x86.avx512.mask.pmovs.dw.mem.512, llvm.x86.avx512.mask.pmovs.qb.mem.512, llvm.x86.avx512.mask.pmovs.qd.mem.512, llvm.x86.avx512.mask.pmovs.qw.mem.512 ; - llvm.x86.avx512.mask.pmovus.db.mem.512, llvm.x86.avx512.mask.pmovus.dw.mem.512, llvm.x86.avx512.mask.pmovus.qb.mem.512, llvm.x86.avx512.mask.pmovus.qd.mem.512, llvm.x86.avx512.mask.pmovus.qw.mem.512 -; - llvm.x86.avx512.mask.rndscale.pd.512, llvm.x86.avx512.mask.rndscale.ps.512, llvm.x86.avx512.mask.rndscale.sd, llvm.x86.avx512.mask.rndscale.ss +; - llvm.x86.avx512.mask.rndscale.sd, llvm.x86.avx512.mask.rndscale.ss ; - llvm.x86.avx512.mask.scalef.pd.512, llvm.x86.avx512.mask.scalef.ps.512 ; - llvm.x86.avx512.mask.sqrt.sd, llvm.x86.avx512.mask.sqrt.ss ; - llvm.x86.avx512.maskz.fixupimm.pd.512, llvm.x86.avx512.maskz.fixupimm.ps.512, llvm.x86.avx512.maskz.fixupimm.sd, llvm.x86.avx512.maskz.fixupimm.ss @@ -46,9 +46,9 @@ target triple = "x86_64-unknown-linux-gnu" define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_compress_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -76,7 +76,7 @@ define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_compress_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -103,7 +103,7 @@ define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) #0 define <8 x double> @test_compress_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 { ; CHECK-LABEL: @test_compress_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -125,9 +125,9 @@ define <8 x double> @test_compress_pd_512(<8 x double> %data, <8 x double> %extr define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_mask_compress_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -155,7 +155,7 @@ define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) #0 { ; CHECK-LABEL: @test_maskz_compress_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -182,7 +182,7 @@ define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) # define <16 x float> @test_compress_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 { ; CHECK-LABEL: @test_compress_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -204,9 +204,9 @@ define <16 x float> @test_compress_ps_512(<16 x float> %data, <16 x float> %extr define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_compress_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -234,7 +234,7 @@ define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_compress_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -261,7 +261,7 @@ define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) #0 { define <8 x i64> @test_compress_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 { ; CHECK-LABEL: @test_compress_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -283,9 +283,9 @@ define <8 x i64> @test_compress_q_512(<8 x i64> %data, <8 x i64> %extra_param) # define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_mask_compress_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -313,7 +313,7 @@ define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passth define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 { ; CHECK-LABEL: @test_maskz_compress_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -340,7 +340,7 @@ define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) #0 { define <16 x i32> @test_compress_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 { ; CHECK-LABEL: @test_compress_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -363,7 +363,7 @@ define <16 x i32> @test_compress_d_512(<16 x i32> %data, <16 x i32> %extra_param define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_param) #0 { ; CHECK-LABEL: @test_expand_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -385,9 +385,9 @@ define <8 x double> @test_expand_pd_512(<8 x double> %data, <8 x double> %extra_ define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_expand_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -415,7 +415,7 @@ define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %p define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_expand_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -442,7 +442,7 @@ define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) #0 { define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_param) #0 { ; CHECK-LABEL: @test_expand_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -464,9 +464,9 @@ define <16 x float> @test_expand_ps_512(<16 x float> %data, <16 x float> %extra_ define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_mask_expand_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -494,7 +494,7 @@ define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %p define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) #0 { ; CHECK-LABEL: @test_maskz_expand_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -521,7 +521,7 @@ define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) #0 define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 { ; CHECK-LABEL: @test_expand_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -543,9 +543,9 @@ define <8 x i64> @test_expand_q_512(<8 x i64> %data, <8 x i64> %extra_param) #0 define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_expand_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[MASK:%.*]] to <8 x i1> @@ -573,7 +573,7 @@ define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_expand_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -600,7 +600,7 @@ define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) #0 { define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param) #0 { ; CHECK-LABEL: @test_expand_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -622,9 +622,9 @@ define <16 x i32> @test_expand_d_512(<16 x i32> %data, <16 x i32> %extra_param) define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_mask_expand_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[MASK:%.*]] to <16 x i1> @@ -652,7 +652,7 @@ define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru define <16 x i32> @test_maskz_expand_d_512(<16 x i32> %data, i16 %mask) #0 { ; CHECK-LABEL: @test_maskz_expand_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -713,8 +713,8 @@ declare <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double>, <2 x double define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b, <2 x double> %extra_param) #0 { ; CHECK-LABEL: @test_rndscale_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -740,9 +740,9 @@ define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b, <2 x dou define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) #0 { ; CHECK-LABEL: @test_rndscale_sd_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -769,10 +769,10 @@ define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, ptr %bptr, <2 x double> %c, i8 %mask) #0 { ; CHECK-LABEL: @test_rndscale_sd_mask_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -812,8 +812,8 @@ define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, ptr %bptr, <2 x define <2 x double> @test_rndscale_sd_maskz(<2 x double> %a, <2 x double> %b, i8 %mask) #0 { ; CHECK-LABEL: @test_rndscale_sd_maskz( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -840,8 +840,8 @@ declare <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float>, <4 x float>, define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_rndscale_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -866,9 +866,9 @@ define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b, <4 x float> define <4 x float> @test_rndscale_ss_load(<4 x float> %a, ptr %bptr, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_rndscale_ss_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] @@ -906,9 +906,9 @@ define <4 x float> @test_rndscale_ss_load(<4 x float> %a, ptr %bptr, <4 x float> define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) #0 { ; CHECK-LABEL: @test_rndscale_ss_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -936,8 +936,8 @@ define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x f define <4 x float> @test_rndscale_ss_maskz(<4 x float> %a, <4 x float> %b, i8 %mask) #0 { ; CHECK-LABEL: @test_rndscale_ss_maskz( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -965,18 +965,11 @@ define <8 x double> @test7(<8 x double> %a) #0 { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP2]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] -; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] -; CHECK: 4: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR10]] -; CHECK-NEXT: unreachable -; CHECK: 5: +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i1> [[TMP2]] to <8 x i64> +; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> splat (i1 true), <8 x i64> [[TMP3]], <8 x i64> [[TMP1]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> [[A:%.*]], i32 11, <8 x double> [[A]], i8 -1, i32 4) -; CHECK-NEXT: store <8 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <8 x i64> [[TMP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x double> [[RES]] ; %res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4) @@ -989,18 +982,11 @@ define <16 x float> @test8(<16 x float> %a) #0 { ; CHECK-LABEL: @test8( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP2]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] -; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] -; CHECK: 4: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR10]] -; CHECK-NEXT: unreachable -; CHECK: 5: +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i1> [[TMP2]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = select <16 x i1> splat (i1 true), <16 x i32> [[TMP3]], <16 x i32> [[TMP1]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> [[A:%.*]], i32 11, <16 x float> [[A]], i16 -1, i32 4) -; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <16 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <16 x float> [[RES]] ; %res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4) @@ -1038,8 +1024,8 @@ define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) #0 { define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_sqrt_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP2]] to <8 x i1> @@ -1064,7 +1050,7 @@ define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passt define <8 x double> @test_maskz_sqrt_pd_512(<8 x double> %a0, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_sqrt_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.sqrt.v8f64(<8 x double> [[A0:%.*]]) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP2]] to <8 x i1> @@ -1108,8 +1094,8 @@ define <8 x double> @test_sqrt_round_pd_512(<8 x double> %a0) #0 { define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_sqrt_round_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -1141,7 +1127,7 @@ define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double> define <8 x double> @test_maskz_sqrt_round_pd_512(<8 x double> %a0, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_sqrt_round_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -1185,8 +1171,8 @@ define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) #0 { define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_mask_sqrt_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -1211,7 +1197,7 @@ define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passt define <16 x float> @test_maskz_sqrt_ps_512(<16 x float> %a0, i16 %mask) #0 { ; CHECK-LABEL: @test_maskz_sqrt_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[A0:%.*]]) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -1255,8 +1241,8 @@ define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) #0 { define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_mask_sqrt_round_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -1288,7 +1274,7 @@ define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float> define <16 x float> @test_maskz_sqrt_round_ps_512(<16 x float> %a0, i16 %mask) #0 { ; CHECK-LABEL: @test_maskz_sqrt_round_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -1399,9 +1385,9 @@ declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_sqrt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1481,9 +1467,9 @@ declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, < define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_sqrt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1884,9 +1870,9 @@ declare i32 @llvm.x86.avx512.vcvtss2si32(<4 x float>, i32) nounwind readnone define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 %mask, ptr %dst) #0 { ; CHECK-LABEL: @test_x86_vcvtps2ph_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = sext <16 x i1> [[TMP6]] to <16 x i16> @@ -1943,7 +1929,7 @@ declare <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float>, i32, <16 x define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) #0 { ; CHECK-LABEL: @test_cmpps( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -1969,7 +1955,7 @@ declare <16 x i1> @llvm.x86.avx512.mask.cmp.ps.512(<16 x float>, <16 x float>, i define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) #0 { ; CHECK-LABEL: @test_cmppd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -1997,7 +1983,7 @@ declare <8 x i1> @llvm.x86.avx512.mask.cmp.pd.512(<8 x double>, <8 x double>, i3 define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) #0 { ; CHECK-LABEL: @test_vmaxpd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i64> [[_MSPROP]], zeroinitializer @@ -2013,7 +1999,7 @@ declare <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double>, <8 x double>, i32 define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) #0 { ; CHECK-LABEL: @test_vminpd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i64> [[_MSPROP]], zeroinitializer @@ -2028,8 +2014,8 @@ declare <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double>, <8 x double>, i32 define void @test_mask_store_ss(ptr %ptr, <4 x float> %data, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_store_ss( -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and i8 [[TMP1]], 0 @@ -2074,7 +2060,7 @@ declare <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double>, <8 x double>, i32 define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2097,7 +2083,7 @@ define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2120,7 +2106,7 @@ define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2143,7 +2129,7 @@ define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vsubps_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2166,7 +2152,7 @@ define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2189,7 +2175,7 @@ define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2212,7 +2198,7 @@ define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2235,7 +2221,7 @@ define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: @test_vmulps_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -2258,8 +2244,8 @@ define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) #0 { define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2293,8 +2279,8 @@ define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2328,8 +2314,8 @@ define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2363,8 +2349,8 @@ define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2398,9 +2384,9 @@ define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_passthru_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2435,9 +2421,9 @@ define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_passthru_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2472,9 +2458,9 @@ define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_passthru_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2509,9 +2495,9 @@ define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_vmulps_mask_passthru_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2546,8 +2532,8 @@ define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_vmulpd_mask_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2581,8 +2567,8 @@ define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_vmulpd_mask_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2616,8 +2602,8 @@ define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_vmulpd_mask_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2651,8 +2637,8 @@ define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_vmulpd_mask_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2686,8 +2672,8 @@ define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2721,8 +2707,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2756,8 +2742,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2791,8 +2777,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2826,8 +2812,8 @@ define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_add_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -2861,9 +2847,9 @@ define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_add_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2898,9 +2884,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_add_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2935,9 +2921,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_add_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2972,9 +2958,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_add_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3009,9 +2995,9 @@ define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_add_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3046,7 +3032,7 @@ define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3069,7 +3055,7 @@ define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3092,7 +3078,7 @@ define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3115,7 +3101,7 @@ define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3138,7 +3124,7 @@ define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_add_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3162,9 +3148,9 @@ declare <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float>, <16 x float>, i32 define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3199,9 +3185,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3236,9 +3222,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3273,9 +3259,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3310,9 +3296,9 @@ define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_sub_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3347,7 +3333,7 @@ define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3370,7 +3356,7 @@ define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3393,7 +3379,7 @@ define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3416,7 +3402,7 @@ define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3439,7 +3425,7 @@ define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_sub_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3462,8 +3448,8 @@ define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x flo define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -3497,8 +3483,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -3532,8 +3518,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -3567,8 +3553,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -3602,8 +3588,8 @@ define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_div_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -3637,9 +3623,9 @@ define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_div_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3674,9 +3660,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_div_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3711,9 +3697,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_div_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3748,9 +3734,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_div_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3785,9 +3771,9 @@ define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_div_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -3822,7 +3808,7 @@ define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_rn_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3845,7 +3831,7 @@ define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_rd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3868,7 +3854,7 @@ define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_ru_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3891,7 +3877,7 @@ define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_rz_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3914,7 +3900,7 @@ define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x floa define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_div_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -3938,8 +3924,8 @@ declare <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float>, <16 x float>, i32 define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_min_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -3965,8 +3951,8 @@ define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x f define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_min_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -3992,9 +3978,9 @@ define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_min_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4021,9 +4007,9 @@ define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x fl define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_min_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4050,7 +4036,7 @@ define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_min_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4065,7 +4051,7 @@ define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_min_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4081,8 +4067,8 @@ declare <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float>, <16 x float>, i32 define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_max_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4108,8 +4094,8 @@ define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x f define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_maskz_max_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4135,9 +4121,9 @@ define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_max_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4164,9 +4150,9 @@ define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x fl define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_mask_max_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4193,7 +4179,7 @@ define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_max_round_ps_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4208,7 +4194,7 @@ define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_mm512_max_round_ps_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -4226,9 +4212,9 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_ss_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4256,9 +4242,9 @@ define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_ss_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4286,9 +4272,9 @@ define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_ss_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4316,9 +4302,9 @@ define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_ss_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4346,9 +4332,9 @@ define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_ss_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4376,8 +4362,8 @@ define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, < define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_add_ss_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4402,7 +4388,7 @@ define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %m define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_add_ss_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -4424,11 +4410,11 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) #0 { define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, ptr %a1, <4 x float> %a2, i8 %mask, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_mask_add_ss_current_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -4479,10 +4465,10 @@ define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, ptr %a1, < define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, ptr %a1, i8 %mask, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_maskz_add_ss_current_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]] @@ -4533,9 +4519,9 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_sd_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4563,9 +4549,9 @@ define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_sd_rd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4593,9 +4579,9 @@ define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_sd_ru( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4623,9 +4609,9 @@ define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_sd_rz( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4653,9 +4639,9 @@ define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_add_sd_current( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4683,8 +4669,8 @@ define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1 define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_add_sd_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4709,7 +4695,7 @@ define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_add_sd_rn( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -4731,11 +4717,11 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) #0 { define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, ptr %a1, <2 x double> %a2, i8 %mask, <2 x double> %extra_param) #0 { ; CHECK-LABEL: @test_mask_add_sd_current_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -4780,10 +4766,10 @@ define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, ptr %a1, define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, ptr %a1, i8 %mask, <2 x double> %extra_param) #0 { ; CHECK-LABEL: @test_maskz_add_sd_current_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]] @@ -4828,9 +4814,9 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_max_ss_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4858,8 +4844,8 @@ define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_max_ss_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4884,7 +4870,7 @@ define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 % define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_max_ss_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -4907,9 +4893,9 @@ define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) #0 { define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_max_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -4937,8 +4923,8 @@ define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x floa define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_max_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4963,7 +4949,7 @@ define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_max_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -4985,11 +4971,11 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) #0 { define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, ptr %a1, <4 x float> %a2, i8 %mask, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_mask_max_ss_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -5040,10 +5026,10 @@ define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, ptr %a1, <4 x floa define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, ptr %a1, i8 %mask, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_maskz_max_ss_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]] @@ -5093,9 +5079,9 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_max_sd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -5123,8 +5109,8 @@ define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_max_sd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5149,7 +5135,7 @@ define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_max_sd_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -5172,9 +5158,9 @@ define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) #0 { define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_max_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -5202,8 +5188,8 @@ define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x d define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_max_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5228,7 +5214,7 @@ define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %m define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_max_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -5250,11 +5236,11 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) #0 { define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, ptr %a1, <2 x double> %a2, i8 %mask, <2 x double> %extra_param) #0 { ; CHECK-LABEL: @test_mask_max_sd_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP13:%.*]], !prof [[PROF1]] @@ -5299,10 +5285,10 @@ define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, ptr %a1, <2 x do define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, ptr %a1, i8 %mask, <2 x double> %extra_param) #0 { ; CHECK-LABEL: @test_maskz_max_sd_memfold( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP11:%.*]], !prof [[PROF1]] @@ -5345,7 +5331,7 @@ define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, ptr %a1, i8 %ma define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) #0 { ; CHECK-LABEL: @test_x86_avx512_cvtsi2ss32( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -5367,7 +5353,7 @@ declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) #0 { ; CHECK-LABEL: @test_x86_avx512__mm_cvt_roundu32_ss( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0 @@ -5387,7 +5373,7 @@ define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, ptr %ptr) #0 { ; CHECK-LABEL: @test_x86_avx512__mm_cvt_roundu32_ss_mem( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5419,7 +5405,7 @@ define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, ptr define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) #0 { ; CHECK-LABEL: @test_x86_avx512__mm_cvtu32_ss( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0 @@ -5439,7 +5425,7 @@ define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) #0 { define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, ptr %ptr) #0 { ; CHECK-LABEL: @test_x86_avx512__mm_cvtu32_ss_mem( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5474,9 +5460,9 @@ declare <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -5509,10 +5495,10 @@ define <16 x i32>@test_int_x86_avx512_vpermi2var_d_512(<16 x i32> %x0, <16 x i32 define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5558,8 +5544,8 @@ declare <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double>, <8 x i64>, define <8 x double>@test_int_x86_avx512_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -5584,9 +5570,9 @@ define <8 x double>@test_int_x86_avx512_vpermi2var_pd_512(<8 x double> %x0, <8 x define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -5627,8 +5613,8 @@ declare <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float>, <16 x i32> define <16 x float>@test_int_x86_avx512_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -5653,9 +5639,9 @@ define <16 x float>@test_int_x86_avx512_vpermi2var_ps_512(<16 x float> %x0, <16 define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[TMP2]] to <16 x i4> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -5696,8 +5682,8 @@ declare <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i define <8 x i64>@test_int_x86_avx512_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X3:%.*]], <8 x i64> [[TMP3]]) @@ -5719,9 +5705,9 @@ define <8 x i64>@test_int_x86_avx512_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> % define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[TMP2]] to <8 x i3> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X1:%.*]], <8 x i64> [[TMP3]]) @@ -5752,10 +5738,10 @@ define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5798,12 +5784,12 @@ define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, ptr %x2ptr, i8 %x3, <8 x double> %extra_param, <8 x double> %extra_param2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_pd_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP12:%.*]], !prof [[PROF1]] @@ -5856,10 +5842,10 @@ define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, < define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_ps_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4> ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -5894,10 +5880,10 @@ define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_q_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = trunc <8 x i64> [[X0]] to <8 x i3> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> [[X4:%.*]], <8 x i64> [[TMP3]]) @@ -5928,8 +5914,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4> @@ -5951,10 +5937,10 @@ define <16 x i32>@test_int_x86_avx512_vpermt2var_d_512(<16 x i32> %x0, <16 x i32 define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_d_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[X0]] to <16 x i4> ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[TMP1]], <16 x i32> [[X4:%.*]], <16 x i32> [[TMP3]]) @@ -5987,9 +5973,9 @@ declare <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>, <8 x doub define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_scalef_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -6035,9 +6021,9 @@ declare <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>, <16 x flo define <16 x float>@test_int_x86_avx512_mask_scalef_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_scalef_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -6084,8 +6070,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64>, <16 x i8>, i8) define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> [[TMP1]], <16 x i8> [[TMP2]], i8 -1) ; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i8> zeroinitializer, [[TMP4]] @@ -6120,8 +6106,8 @@ declare void @llvm.x86.avx512.mask.pmov.qb.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmov_qb_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qb_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6157,8 +6143,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64>, <16 x i8>, i8) define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> [[TMP1]], <16 x i8> [[TMP2]], i8 -1) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i8> zeroinitializer, [[TMP4]] @@ -6193,8 +6179,8 @@ declare void @llvm.x86.avx512.mask.pmovs.qb.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmovs_qb_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qb_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6230,8 +6216,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64>, <16 x i8>, i8) define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> [[TMP1]], <16 x i8> [[TMP2]], i8 -1) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i8> zeroinitializer, [[TMP4]] @@ -6266,8 +6252,8 @@ declare void @llvm.x86.avx512.mask.pmovus.qb.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmovus_qb_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qb_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6303,8 +6289,8 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8) define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> [[TMP1]], <8 x i16> [[TMP2]], i8 -1) ; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i16> zeroinitializer, [[TMP8]] @@ -6339,8 +6325,8 @@ declare void @llvm.x86.avx512.mask.pmov.qw.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmov_qw_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qw_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6376,8 +6362,8 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64>, <8 x i16>, i8) define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> [[TMP1]], <8 x i16> [[TMP2]], i8 -1) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i16> zeroinitializer, [[TMP11]] @@ -6412,8 +6398,8 @@ declare void @llvm.x86.avx512.mask.pmovs.qw.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmovs_qw_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qw_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6449,8 +6435,8 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8) define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> [[TMP1]], <8 x i16> [[TMP2]], i8 -1) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i16> zeroinitializer, [[TMP11]] @@ -6485,8 +6471,8 @@ declare void @llvm.x86.avx512.mask.pmovus.qw.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmovus_qw_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qw_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6533,8 +6519,8 @@ define <8 x i32>@test_int_x86_avx512_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1) # define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32> @@ -6558,7 +6544,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> % define <8 x i32>@test_int_x86_avx512_maskz_pmov_qd_512(<8 x i64> %x0, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_qd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[X0:%.*]] to <8 x i32> @@ -6584,8 +6570,8 @@ declare void @llvm.x86.avx512.mask.pmov.qd.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmov_qd_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_qd_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6621,7 +6607,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64>, <8 x i32>, i8) define <8 x i32>@test_int_x86_avx512_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmovs_qd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> splat (i1 true), <8 x i32> [[TMP3]], <8 x i32> [[TMP2]] @@ -6635,9 +6621,9 @@ define <8 x i32>@test_int_x86_avx512_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1) define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qd_512( -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> @@ -6658,7 +6644,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> define <8 x i32>@test_int_x86_avx512_maskz_pmovs_qd_512(<8 x i64> %x0, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovs_qd_512( -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP3:%.*]] to <8 x i1> @@ -6683,8 +6669,8 @@ declare void @llvm.x86.avx512.mask.pmovs.qd.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmovs_qd_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_qd_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6720,7 +6706,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8) define <8 x i32>@test_int_x86_avx512_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmovus_qd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> splat (i1 true), <8 x i32> [[TMP3]], <8 x i32> [[TMP2]] @@ -6734,9 +6720,9 @@ define <8 x i32>@test_int_x86_avx512_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1) define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qd_512( -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4:%.*]] to <8 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i64> [[TMP1]] to <8 x i32> @@ -6757,7 +6743,7 @@ define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> define <8 x i32>@test_int_x86_avx512_maskz_pmovus_qd_512(<8 x i64> %x0, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovus_qd_512( -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP3:%.*]] to <8 x i1> @@ -6782,8 +6768,8 @@ declare void @llvm.x86.avx512.mask.pmovus.qd.mem.512(ptr %ptr, <8 x i64>, i8) define void @test_int_x86_avx512_mask_pmovus_qd_mem_512(ptr %ptr, <8 x i64> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_qd_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP2]] to i512 @@ -6819,8 +6805,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32>, <16 x i8>, i16) define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_db_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> [[TMP1]], <16 x i8> [[TMP2]], i16 -1) ; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i8> zeroinitializer, [[TMP8]] @@ -6855,8 +6841,8 @@ declare void @llvm.x86.avx512.mask.pmov.db.mem.512(ptr %ptr, <16 x i32>, i16) define void @test_int_x86_avx512_mask_pmov_db_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_db_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512 @@ -6892,8 +6878,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16) define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_db_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> [[TMP1]], <16 x i8> [[TMP2]], i16 -1) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i8> zeroinitializer, [[TMP11]] @@ -6928,8 +6914,8 @@ declare void @llvm.x86.avx512.mask.pmovs.db.mem.512(ptr %ptr, <16 x i32>, i16) define void @test_int_x86_avx512_mask_pmovs_db_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_db_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512 @@ -6965,8 +6951,8 @@ declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16 define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_db_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> [[TMP1]], <16 x i8> [[TMP2]], i16 -1) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i8> zeroinitializer, [[TMP11]] @@ -7001,8 +6987,8 @@ declare void @llvm.x86.avx512.mask.pmovus.db.mem.512(ptr %ptr, <16 x i32>, i16) define void @test_int_x86_avx512_mask_pmovus_db_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_db_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512 @@ -7038,8 +7024,8 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16 define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_dw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> [[TMP1]], <16 x i16> [[TMP2]], i16 -1) ; CHECK-NEXT: [[_MSPROP2:%.*]] = or <16 x i16> zeroinitializer, [[TMP8]] @@ -7074,8 +7060,8 @@ declare void @llvm.x86.avx512.mask.pmov.dw.mem.512(ptr %ptr, <16 x i32>, i16) define void @test_int_x86_avx512_mask_pmov_dw_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_dw_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512 @@ -7111,8 +7097,8 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32>, <16 x i16>, i1 define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_dw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> [[TMP1]], <16 x i16> [[TMP2]], i16 -1) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i16> zeroinitializer, [[TMP11]] @@ -7147,8 +7133,8 @@ declare void @llvm.x86.avx512.mask.pmovs.dw.mem.512(ptr %ptr, <16 x i32>, i16) define void @test_int_x86_avx512_mask_pmovs_dw_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_dw_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512 @@ -7184,8 +7170,8 @@ declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_dw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> [[TMP1]], <16 x i16> [[TMP2]], i16 -1) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i16> zeroinitializer, [[TMP11]] @@ -7220,8 +7206,8 @@ declare void @llvm.x86.avx512.mask.pmovus.dw.mem.512(ptr %ptr, <16 x i32>, i16) define void @test_int_x86_avx512_mask_pmovus_dw_mem_512(ptr %ptr, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_dw_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP2]] to i512 @@ -7257,8 +7243,8 @@ declare <16 x float> @llvm.x86.avx512.sitofp.round.v16f32.v16i32(<16 x i32>, i32 define <16 x float>@test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_dq2ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[CVT:%.*]] = sitofp <16 x i32> [[X0:%.*]] to <16 x float> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -7297,8 +7283,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double>, <8 x i32>, i8 define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_pd2dq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7339,8 +7325,8 @@ declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float> define <8 x float>@test_int_x86_avx512_mask_cvt_pd2ps_512(<8 x double> %x0, <8 x float> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_pd2ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7381,8 +7367,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_pd2udq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7422,9 +7408,9 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ps2dq_512( -; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[X2:%.*]] to <16 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer @@ -7457,8 +7443,8 @@ declare <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float>, <8 x double define <8 x double>@test_int_x86_avx512_mask_cvt_ps2pd_512(<8 x float> %x0, <8 x double> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ps2pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7499,8 +7485,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ps2udq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7541,8 +7527,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double>, <8 x i32>, i define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_pd2dq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7583,8 +7569,8 @@ declare <16 x float> @llvm.x86.avx512.uitofp.round.v16f32.v16i32(<16 x i32>, i32 define <16 x float>@test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_udq2ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[CVT:%.*]] = uitofp <16 x i32> [[X0:%.*]] to <16 x float> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP2]] to <16 x i1> @@ -7623,8 +7609,8 @@ declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double>, <8 x i32>, define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_pd2udq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7665,8 +7651,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_ps2dq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7707,8 +7693,8 @@ declare <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float>, <16 x i32> define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvtt_ps2udq_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -7749,7 +7735,7 @@ declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4 define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_getexp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -7772,9 +7758,9 @@ define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1) #0 { define <4 x float> @test_mask_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_getexp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -7821,8 +7807,8 @@ define <4 x float> @test_mask_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x f define <4 x float> @test_maskz_getexp_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_getexp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7849,7 +7835,7 @@ declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>, define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_getexp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -7872,9 +7858,9 @@ define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1) #0 { define <2 x double> @test_mask_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_mask_getexp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -7921,8 +7907,8 @@ define <2 x double> @test_mask_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 define <2 x double> @test_maskz_getexp_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_maskz_getexp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7949,8 +7935,8 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32 define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7975,8 +7961,8 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_sd_all( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -8067,8 +8053,8 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32) define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -8094,8 +8080,8 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 % define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cmp_ss_all( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -8180,8 +8166,8 @@ declare <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double>, i32, <8 define <8 x double>@test_int_x86_avx512_mask_getmant_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -8222,8 +8208,8 @@ declare <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float>, i32, <16 define <16 x float>@test_int_x86_avx512_mask_getmant_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -8264,9 +8250,9 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double> define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -8348,9 +8334,9 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -8427,9 +8413,9 @@ define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x flo define <4 x float> @test_int_x86_avx512_mask_getmant_ss_load(<4 x float> %x0, ptr %x1p, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_getmant_ss_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] @@ -8469,7 +8455,7 @@ declare <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double>, <8 x i64>) define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -8493,9 +8479,9 @@ define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -8531,8 +8517,8 @@ define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_maskz(<8 x double> %x0, <8 x i64> %x1, i8 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_pd_512_maskz( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc <8 x i64> [[X1]] to <8 x i3> ; CHECK-NEXT: [[X0:%.*]] = bitcast <8 x i64> [[TMP1]] to <8 x double> @@ -8569,7 +8555,7 @@ declare <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float>, <16 x i32> define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -8593,9 +8579,9 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -8631,8 +8617,8 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_maskz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc <16 x i32> [[X1]] to <16 x i4> ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> @@ -8682,8 +8668,8 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x fl define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> ; CHECK-NEXT: [[RES:%.*]] = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> [[X0]], <16 x i32> ) @@ -8711,7 +8697,7 @@ define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[X0:%.*]] = bitcast <16 x i32> [[TMP1]] to <16 x float> ; CHECK-NEXT: [[RES:%.*]] = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> [[X0]], <16 x i32> ) @@ -8740,9 +8726,9 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x flo define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_ss2sd_round( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -8789,9 +8775,9 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x doubl define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_cvt_sd2ss_round( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -8838,8 +8824,8 @@ declare <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pternlog_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -8865,9 +8851,9 @@ define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32> define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -8903,9 +8889,9 @@ define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -8943,8 +8929,8 @@ declare <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64 define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pternlog_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -8970,9 +8956,9 @@ define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1 define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pternlog_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9008,9 +8994,9 @@ define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64 define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pternlog_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9046,7 +9032,7 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6 define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_comi_sd_eq_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9069,7 +9055,7 @@ define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) # define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_ucomi_sd_eq_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9092,7 +9078,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_comi_sd_eq( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9115,7 +9101,7 @@ define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 { define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_ucomi_sd_eq( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9138,7 +9124,7 @@ define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) #0 { define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_comi_sd_lt_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9161,7 +9147,7 @@ define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) # define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_ucomi_sd_lt_sae( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9184,7 +9170,7 @@ define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_comi_sd_lt( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9207,7 +9193,7 @@ define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) #0 { define i32 @test_x86_avx512_ucomi_sd_lt(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_ucomi_sd_lt( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9232,7 +9218,7 @@ declare i32 @llvm.x86.avx512.vcomi.sd(<2 x double>, <2 x double>, i32, i32) define i32 @test_x86_avx512_ucomi_ss_lt(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_ucomi_ss_lt( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -9259,7 +9245,7 @@ declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>) define <8 x double>@test_int_x86_avx512_permvar_df_512(<8 x double> %x0, <8 x i64> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -9282,9 +9268,9 @@ define <8 x double>@test_int_x86_avx512_permvar_df_512(<8 x double> %x0, <8 x i6 define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9319,8 +9305,8 @@ define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 define <8 x double>@test_int_x86_avx512_maskz_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_df_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -9356,7 +9342,7 @@ declare <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64>, <8 x i64>) define <8 x i64>@test_int_x86_avx512_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -9370,9 +9356,9 @@ define <8 x i64>@test_int_x86_avx512_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1 define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -9396,8 +9382,8 @@ define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64 define <8 x i64>@test_int_x86_avx512_maskz_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_di_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) @@ -9423,7 +9409,7 @@ declare <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float>, <16 x i32>) define <16 x float>@test_int_x86_avx512_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -9446,9 +9432,9 @@ define <16 x float>@test_int_x86_avx512_permvar_sf_512(<16 x float> %x0, <16 x i define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9483,8 +9469,8 @@ define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <1 define <16 x float>@test_int_x86_avx512_maskz_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_sf_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -9520,7 +9506,7 @@ declare <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32>, <16 x i32>) define <16 x i32>@test_int_x86_avx512_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -9534,9 +9520,9 @@ define <16 x i32>@test_int_x86_avx512_permvar_si_512(<16 x i32> %x0, <16 x i32> define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -9560,8 +9546,8 @@ define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x define <16 x i32>@test_int_x86_avx512_maskz_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_si_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) @@ -9587,9 +9573,9 @@ declare <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double>, <8 x do define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9649,9 +9635,9 @@ define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, < define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512_load(<8 x double> %x0, <8 x double> %x1, ptr %x2ptr) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_pd_512_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -9691,9 +9677,9 @@ declare <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double>, <8 x d define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_pd_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9756,9 +9742,9 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>, define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -9821,9 +9807,9 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>, define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -9886,9 +9872,9 @@ declare <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float>, <16 x f define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -9948,9 +9934,9 @@ define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, < define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512_load(<16 x float> %x0, <16 x float> %x1, ptr %x2ptr) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_ps_512_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -9990,9 +9976,9 @@ declare <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float>, <16 x define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_ps_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -10055,9 +10041,9 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_fixupimm_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -10120,9 +10106,9 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x doubl define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_fixupimm_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -10188,9 +10174,9 @@ declare double @llvm.x86.avx512.vfmadd.f64(double, double, double, i32) #0 define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0 @@ -10301,9 +10287,9 @@ define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x d define <4 x float> @test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0 @@ -10526,9 +10512,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x flo define <4 x float> @test_int_x86_avx512_maskz_vfmadd_ss_load0(i8 zeroext %0, ptr nocapture readonly %1, float %2, float %3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss_load0( -; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0 @@ -10576,9 +10562,9 @@ define <4 x float> @test_int_x86_avx512_maskz_vfmadd_ss_load0(i8 zeroext %0, ptr define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[X0:%.*]], i64 0 @@ -10689,9 +10675,9 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x float> [[X0:%.*]], i64 0 @@ -10802,10 +10788,10 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x fl define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_param, <4 x float> %extra_param2) #0 { ; CHECK-LABEL: @fmadd_ss_mask_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] @@ -10910,10 +10896,10 @@ define void @fmadd_ss_mask_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_par define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_param, <4 x float> %extra_param2) #0 { ; CHECK-LABEL: @fmadd_ss_maskz_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] @@ -11017,10 +11003,10 @@ define void @fmadd_ss_maskz_memfold(ptr %a, ptr %b, i8 %c, <4 x float> %extra_pa define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_param, <2 x double> %extra_param2) #0 { ; CHECK-LABEL: @fmadd_sd_mask_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] @@ -11113,10 +11099,10 @@ define void @fmadd_sd_mask_memfold(ptr %a, ptr %b, i8 %c, <2 x double> %extra_pa define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c, <2x double> %extra_param, <2x double> %extra_param2) #0 { ; CHECK-LABEL: @fmadd_sd_maskz_memfold( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]] @@ -11207,10 +11193,10 @@ define void @fmadd_sd_maskz_memfold(ptr %a, ptr %b, i8 %c, <2x double> %extra_pa define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_sd( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X2:%.*]] ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <2 x i64> [[TMP2]], i64 0 @@ -11335,10 +11321,10 @@ define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmsub_ss( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X2:%.*]] ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i64 0 @@ -11464,9 +11450,9 @@ define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x fl define <2 x double> @test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <2 x double> [[X0:%.*]] ; CHECK-NEXT: [[TMP6:%.*]] = fneg <2 x double> [[X2:%.*]] @@ -11598,9 +11584,9 @@ define <2 x double> @test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x define <4 x float> @test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfnmsub_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = fneg <4 x float> [[X0:%.*]] ; CHECK-NEXT: [[TMP6:%.*]] = fneg <4 x float> [[X2:%.*]] @@ -11731,11 +11717,11 @@ define <4 x float> @test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x f define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask3_vfmadd_ss_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]] @@ -11791,11 +11777,11 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vfmadd_ss_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP25:%.*]], !prof [[PROF1]] @@ -11852,10 +11838,10 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,ptr%ptr_b ,i8 %x3,i32 %x4, <4 x float> %extra_param) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vfmadd_ss_rm( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP20:%.*]], !prof [[PROF1]] @@ -11905,7 +11891,7 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psll_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -11924,9 +11910,9 @@ define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psll_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -11955,8 +11941,8 @@ define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1 define <16 x i32> @test_x86_avx512_maskz_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psll_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -11988,7 +11974,7 @@ declare <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32>, <4 x i32>) nounwind r define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psll_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -12007,9 +11993,9 @@ define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 { define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psll_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -12038,8 +12024,8 @@ define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, define <8 x i64> @test_x86_avx512_maskz_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psll_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -12084,8 +12070,8 @@ define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) #0 { define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_pslli_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -12109,7 +12095,7 @@ define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> % define <16 x i32> @test_x86_avx512_maskz_pslli_d_512(<16 x i32> %a0, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_pslli_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer @@ -12149,8 +12135,8 @@ define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) #0 { define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_pslli_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -12174,7 +12160,7 @@ define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %pas define <8 x i64> @test_x86_avx512_maskz_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_pslli_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer @@ -12201,7 +12187,7 @@ declare <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64>, i32) nounwind readnone define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psra_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -12220,9 +12206,9 @@ define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 { define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psra_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -12251,8 +12237,8 @@ define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, define <8 x i64> @test_x86_avx512_maskz_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psra_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -12284,7 +12270,7 @@ declare <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64>, <2 x i64>) nounwind rea define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psra_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -12303,9 +12289,9 @@ define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psra_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -12334,8 +12320,8 @@ define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1 define <16 x i32> @test_x86_avx512_maskz_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psra_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -12381,8 +12367,8 @@ define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) #0 { define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrai_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -12406,7 +12392,7 @@ define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %pas define <8 x i64> @test_x86_avx512_maskz_psrai_q_512(<8 x i64> %a0, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrai_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer @@ -12446,8 +12432,8 @@ define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) #0 { define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrai_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -12471,7 +12457,7 @@ define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> % define <16 x i32> @test_x86_avx512_maskz_psrai_d_512(<16 x i32> %a0, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrai_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer @@ -12499,7 +12485,7 @@ declare <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32>, i32) nounwind readno define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrl_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -12518,9 +12504,9 @@ define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) #0 define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrl_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -12549,8 +12535,8 @@ define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1 define <16 x i32> @test_x86_avx512_maskz_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrl_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -12582,7 +12568,7 @@ declare <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32>, <4 x i32>) nounwind r define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrl_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -12601,9 +12587,9 @@ define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) #0 { define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrl_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -12632,8 +12618,8 @@ define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, define <8 x i64> @test_x86_avx512_maskz_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrl_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -12678,8 +12664,8 @@ define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) #0 { define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrli_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <16 x i32> [[TMP4]], zeroinitializer @@ -12703,7 +12689,7 @@ define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> % define <16 x i32> @test_x86_avx512_maskz_psrli_d_512(<16 x i32> %a0, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrli_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <16 x i32> [[TMP3]], zeroinitializer @@ -12743,8 +12729,8 @@ define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) #0 { define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrli_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <8 x i64> [[TMP4]], zeroinitializer @@ -12768,7 +12754,7 @@ define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %pas define <8 x i64> @test_x86_avx512_maskz_psrli_q_512(<8 x i64> %a0, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrli_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <8 x i64> [[TMP3]], zeroinitializer @@ -12794,7 +12780,7 @@ declare <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64>, i32) nounwind readnone define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psllv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -12831,9 +12817,9 @@ define <16 x i32> @test_x86_avx512_psllv_d_512_const() #0 { define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psllv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -12860,8 +12846,8 @@ define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> % define <16 x i32> @test_x86_avx512_maskz_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psllv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -12890,7 +12876,7 @@ declare <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32>, <16 x i32>) nounwind define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psllv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -12927,9 +12913,9 @@ define <8 x i64> @test_x86_avx512_psllv_q_512_const() #0 { define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psllv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -12956,8 +12942,8 @@ define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, define <8 x i64> @test_x86_avx512_maskz_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psllv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -12986,7 +12972,7 @@ declare <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64>, <8 x i64>) nounwind re define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrav_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -13003,9 +12989,9 @@ define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) # define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrav_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -13032,8 +13018,8 @@ define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> % define <16 x i32> @test_x86_avx512_maskz_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrav_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -13062,7 +13048,7 @@ declare <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32>, <16 x i32>) nounwind define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrav_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -13079,9 +13065,9 @@ define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 { define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrav_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -13108,8 +13094,8 @@ define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, define <8 x i64> @test_x86_avx512_maskz_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrav_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -13138,7 +13124,7 @@ declare <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64>, <8 x i64>) nounwind re define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrlv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -13175,9 +13161,9 @@ define <16 x i32> @test_x86_avx512_psrlv_d_512_const() #0 { define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrlv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -13204,8 +13190,8 @@ define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> % define <16 x i32> @test_x86_avx512_maskz_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -13234,7 +13220,7 @@ declare <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32>, <16 x i32>) nounwind define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrlv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i64> @@ -13271,9 +13257,9 @@ define <8 x i64> @test_x86_avx512_psrlv_q_512_const() #0 { define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrlv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i64> @@ -13300,8 +13286,8 @@ define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, define <8 x i64> @test_x86_avx512_maskz_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrlv_q_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i64> @@ -13428,13 +13414,13 @@ define <16 x float> @bad_mask_transition(<8 x double> %a, <8 x double> %b, <8 x ; CHECK-LABEL: @bad_mask_transition( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i64> [[TMP0]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP6]], 0 @@ -13501,9 +13487,9 @@ define <16 x float> @bad_mask_transition_2(<8 x double> %a, <8 x double> %b, <8 ; CHECK-LABEL: @bad_mask_transition_2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP0]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll index 7bd35182d5c90..dbef575b30cc4 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics-upgrade.ll @@ -28,7 +28,7 @@ declare i32 @llvm.x86.avx512.kunpck.wd(i32, i32) define i32 @test_int_x86_avx512_kunpck_wd(i32 %x0, i32 %x1) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_kunpck_wd( ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[X0:%.*]] to <32 x i1> @@ -54,7 +54,7 @@ declare i64 @llvm.x86.avx512.kunpck.dq(i64, i64) define i64 @test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_kunpck_qd( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[TMP1]] to <64 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i64 [[X0:%.*]] to <64 x i1> @@ -80,8 +80,8 @@ declare <64 x i8> @llvm.x86.avx512.mask.pbroadcast.b.gpr.512(i8, <64 x i8>, i64) define { <64 x i8>, <64 x i8>, <64 x i8> } @test_int_x86_avx512_mask_pbroadcast_b_gpr_512(i8 %x0, <64 x i8> %x1, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcast_b_gpr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <64 x i8> splat (i8 -1), i8 [[TMP1]], i64 0 ; CHECK-NEXT: [[DOTSPLATINSERT3:%.*]] = insertelement <64 x i8> poison, i8 [[X0:%.*]], i64 0 @@ -134,8 +134,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.pbroadcast.w.gpr.512(i16, <32 x i16>, i define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_pbroadcast_w_gpr_512(i16 %x0, <32 x i16> %x1, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pbroadcast_w_gpr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <32 x i16> splat (i16 -1), i16 [[TMP1]], i64 0 ; CHECK-NEXT: [[DOTSPLATINSERT3:%.*]] = insertelement <32 x i16> poison, i16 [[X0:%.*]], i64 0 @@ -187,10 +187,10 @@ declare void @llvm.x86.avx512.mask.storeu.b.512(ptr, <64 x i8>, i64) define void @test_int_x86_avx512_mask_storeu_b_512(ptr %ptr1, ptr %ptr2, <64 x i8> %x1, i64 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_b_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i64 [[TMP1]] to <64 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i64 [[X2:%.*]] to <64 x i1> @@ -230,10 +230,10 @@ declare void @llvm.x86.avx512.mask.storeu.w.512(ptr, <32 x i16>, i32) define void @test_int_x86_avx512_mask_storeu_w_512(ptr %ptr1, ptr %ptr2, <32 x i16> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_storeu_w_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1> @@ -274,8 +274,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.loadu.w.512(ptr, <32 x i16>, i32) define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_loadu_w_512(ptr %ptr, ptr %ptr2, <32 x i16> %x1, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_loadu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -343,8 +343,8 @@ declare <64 x i8> @llvm.x86.avx512.mask.loadu.b.512(ptr, <64 x i8>, i64) define { <64 x i8>, <64 x i8>, <64 x i8> } @test_int_x86_avx512_mask_loadu_b_512(ptr %ptr, ptr %ptr2, <64 x i8> %x1, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_loadu_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -533,7 +533,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, < define <64 x i8> @test_int_x86_avx512_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_palignr_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> @@ -547,10 +547,10 @@ define <64 x i8> @test_int_x86_avx512_palignr_512(<64 x i8> %x0, <64 x i8> %x1, define <64 x i8> @test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_palignr_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> ; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <64 x i8> [[X1:%.*]], <64 x i8> [[X0:%.*]], <64 x i32> @@ -571,9 +571,9 @@ define <64 x i8> @test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> define <64 x i8> @test_int_x86_avx512_maskz_palignr_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x4) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_palignr_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> ; CHECK-NEXT: [[PALIGNR:%.*]] = shufflevector <64 x i8> [[X1:%.*]], <64 x i8> [[X0:%.*]], <64 x i32> @@ -610,8 +610,8 @@ define <32 x i16> @test_int_x86_avx512_pshufh_w_512(<32 x i16> %x0, i32 %x1, <32 define <32 x i16> @test_int_x86_avx512_mask_pshufh_w_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pshufh_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> @@ -633,7 +633,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pshufh_w_512(<32 x i16> %x0, i32 %x1 define <32 x i16> @test_int_x86_avx512_maskz_pshufh_w_512(<32 x i16> %x0, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pshufh_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> @@ -670,8 +670,8 @@ define <32 x i16> @test_int_x86_avx512_pshufl_w_512(<32 x i16> %x0, i32 %x1, <32 define <32 x i16> @test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pshufl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> @@ -693,7 +693,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pshufl_w_512(<32 x i16> %x0, i32 %x1 define <32 x i16> @test_int_x86_avx512_maskz_pshufl_w_512(<32 x i16> %x0, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pshufl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP1]], <32 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X0]], <32 x i32> @@ -715,7 +715,7 @@ define <32 x i16> @test_int_x86_avx512_maskz_pshufl_w_512(<32 x i16> %x0, i32 %x define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 { ; CHECK-LABEL: @test_pcmpeq_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] @@ -737,8 +737,8 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 { define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_pcmpeq_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] @@ -770,7 +770,7 @@ declare i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8>, <64 x i8>, i64) define i32 @test_pcmpeq_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_pcmpeq_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] @@ -792,8 +792,8 @@ define i32 @test_pcmpeq_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 { define i32 @test_mask_pcmpeq_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_pcmpeq_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] @@ -825,7 +825,7 @@ declare i32 @llvm.x86.avx512.mask.pcmpeq.w.512(<32 x i16>, <32 x i16>, i32) define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 { ; CHECK-LABEL: @test_pcmpgt_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], splat (i8 -128) ; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[TMP1]], splat (i8 -1) @@ -851,8 +851,8 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) nounwind #0 { define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_pcmpgt_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A:%.*]], splat (i8 -128) ; CHECK-NEXT: [[TMP5:%.*]] = xor <64 x i8> [[TMP1]], splat (i8 -1) @@ -888,7 +888,7 @@ declare i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8>, <64 x i8>, i64) define i32 @test_pcmpgt_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_pcmpgt_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], splat (i16 -32768) ; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[TMP1]], splat (i16 -1) @@ -914,8 +914,8 @@ define i32 @test_pcmpgt_w(<32 x i16> %a, <32 x i16> %b) nounwind #0 { define i32 @test_mask_pcmpgt_w(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_pcmpgt_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A:%.*]], splat (i16 -32768) ; CHECK-NEXT: [[TMP5:%.*]] = xor <32 x i16> [[TMP1]], splat (i16 -1) @@ -953,7 +953,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.punpckhb.w.512(<64 x i8>, <64 x i8>, <64 define <64 x i8> @test_int_x86_avx512_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpckhb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> @@ -967,9 +967,9 @@ define <64 x i8> @test_int_x86_avx512_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x define <64 x i8> @test_int_x86_avx512_mask_punpckhb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> @@ -993,7 +993,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.punpcklb.w.512(<64 x i8>, <64 x i8>, <64 define <64 x i8> @test_int_x86_avx512_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpcklb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> @@ -1007,9 +1007,9 @@ define <64 x i8> @test_int_x86_avx512_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x define <64 x i8> @test_int_x86_avx512_mask_punpcklb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_punpcklb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <64 x i8> [[TMP1]], <64 x i8> [[TMP2]], <64 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i32> @@ -1033,7 +1033,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.punpckhw.d.512(<32 x i16>, <32 x i16>, define <32 x i16> @test_int_x86_avx512_punpckhw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpckhw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> @@ -1047,9 +1047,9 @@ define <32 x i16> @test_int_x86_avx512_punpckhw_d_512(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_mask_punpckhw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_punpckhw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> @@ -1073,7 +1073,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.punpcklw.d.512(<32 x i16>, <32 x i16>, define <32 x i16> @test_int_x86_avx512_punpcklw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_punpcklw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> @@ -1087,9 +1087,9 @@ define <32 x i16> @test_int_x86_avx512_punpcklw_d_512(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_mask_punpcklw_d_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_punpcklw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i16> [[TMP1]], <32 x i16> [[TMP2]], <32 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i32> @@ -1113,7 +1113,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmaxs.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_int_x86_avx512_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxs_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1127,9 +1127,9 @@ define <64 x i8> @test_int_x86_avx512_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, define <64 x i8> @test_int_x86_avx512_mask_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.smax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1153,7 +1153,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaxs.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_int_x86_avx512_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1167,9 +1167,9 @@ define <32 x i16> @test_int_x86_avx512_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x define <32 x i16> @test_int_x86_avx512_mask_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.smax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1193,7 +1193,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmaxu.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_int_x86_avx512_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxu_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1207,9 +1207,9 @@ define <64 x i8> @test_int_x86_avx512_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, define <64 x i8> @test_int_x86_avx512_mask_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.umax.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1233,7 +1233,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaxu.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_int_x86_avx512_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaxu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1247,9 +1247,9 @@ define <32 x i16> @test_int_x86_avx512_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x define <32 x i16> @test_int_x86_avx512_mask_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.umax.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1273,7 +1273,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pmins.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_int_x86_avx512_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmins_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1287,9 +1287,9 @@ define <64 x i8> @test_int_x86_avx512_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, define <64 x i8> @test_int_x86_avx512_mask_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.smin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1313,7 +1313,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmins.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_int_x86_avx512_pmins_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmins_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1327,9 +1327,9 @@ define <32 x i16> @test_int_x86_avx512_pmins_w_512(<32 x i16> %x0, <32 x i16> %x define <32 x i16> @test_int_x86_avx512_mask_pmins_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.smin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1353,7 +1353,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pminu.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_int_x86_avx512_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pminu_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1367,9 +1367,9 @@ define <64 x i8> @test_int_x86_avx512_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, define <64 x i8> @test_int_x86_avx512_mask_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.umin.v64i8(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1393,7 +1393,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pminu.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_int_x86_avx512_pminu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pminu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1407,9 +1407,9 @@ define <32 x i16> @test_int_x86_avx512_pminu_w_512(<32 x i16> %x0, <32 x i16> %x define <32 x i16> @test_int_x86_avx512_mask_pminu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.umin.v32i16(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1448,8 +1448,8 @@ define <32 x i16> @test_int_x86_avx512_pmovzxb_w_512(<32 x i8> %x0, <32 x i16> % define <32 x i16> @test_int_x86_avx512_mask_pmovzxb_w_512(<32 x i8> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovzxb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> @@ -1473,7 +1473,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pmovzxb_w_512(<32 x i8> %x0, <32 x i define <32 x i16> @test_int_x86_avx512_maskz_pmovzxb_w_512(<32 x i8> %x0, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovzxb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> @@ -1514,8 +1514,8 @@ define <32 x i16> @test_int_x86_avx512_pmovsxb_w_512(<32 x i8> %x0, <32 x i16> % define <32 x i16> @test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovsxb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> @@ -1539,7 +1539,7 @@ define <32 x i16> @test_int_x86_avx512_mask_pmovsxb_w_512(<32 x i8> %x0, <32 x i define <32 x i16> @test_int_x86_avx512_maskz_pmovsxb_w_512(<32 x i8> %x0, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovsxb_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> splat (i8 -1), <32 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x i8> [[X0:%.*]], <32 x i8> poison, <32 x i32> @@ -1565,7 +1565,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16>, <8 x i16>, <32 x define <32 x i16> @test_int_x86_avx512_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_psrl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1585,9 +1585,9 @@ define <32 x i16> @test_int_x86_avx512_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, define <32 x i16> @test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -1615,8 +1615,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16> define <32 x i16> @test_int_x86_avx512_maskz_psrl_w_512(<32 x i16> %x0, <8 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psrl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -1646,8 +1646,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16>, i32, <32 x i16> define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psrl_wi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer @@ -1697,7 +1697,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psra.w.512(<32 x i16>, <8 x i16>, <32 x define <32 x i16> @test_int_x86_avx512_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_psra_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1717,9 +1717,9 @@ define <32 x i16> @test_int_x86_avx512_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, define <32 x i16> @test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psra_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -1747,8 +1747,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psra_w_512(<32 x i16> %x0, <8 x i16> define <32 x i16> @test_int_x86_avx512_maskz_psra_w_512(<32 x i16> %x0, <8 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psra_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -1778,8 +1778,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.psra.wi.512(<32 x i16>, i32, <32 x i16> define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_psra_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psra_wi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer @@ -1829,7 +1829,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psll.w.512(<32 x i16>, <8 x i16>, <32 x define <32 x i16> @test_int_x86_avx512_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_psll_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1849,9 +1849,9 @@ define <32 x i16> @test_int_x86_avx512_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, define <32 x i16> @test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psll_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -1879,8 +1879,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psll_w_512(<32 x i16> %x0, <8 x i16> define <32 x i16> @test_int_x86_avx512_maskz_psll_w_512(<32 x i16> %x0, <8 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psll_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -1910,8 +1910,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.psll.wi.512(<32 x i16>, i32, <32 x i16> define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_psll_wi_512(<32 x i16> %x0, i32 %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psll_wi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> [[TMP1]], i32 3) ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer @@ -1961,7 +1961,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP4]] @@ -1976,9 +1976,9 @@ define <64 x i8> @test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1, define <64 x i8> @test_int_x86_avx512_mask_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pshuf_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP13]] @@ -2035,7 +2035,7 @@ define <32 x i16> @test_int_x86_avx512_cvtmask2w_512(i32 %x0) nounwind #0 { define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -2053,9 +2053,9 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) no define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -2081,8 +2081,8 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, < define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -2107,7 +2107,7 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2137,10 +2137,10 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) nounw define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -2177,9 +2177,9 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -2216,7 +2216,7 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2252,10 +2252,10 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) noun define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmbk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -2298,9 +2298,9 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmbkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -2346,7 +2346,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.packssdw.512(<16 x i32>, <16 x i32>, <3 define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16> @@ -2364,9 +2364,9 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nou define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16> @@ -2392,8 +2392,8 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6 define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16> @@ -2418,7 +2418,7 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2448,10 +2448,10 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -2488,9 +2488,9 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -2531,7 +2531,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.packsswb.512(<32 x i16>, <32 x i16>, <64 define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -2549,9 +2549,9 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) n define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -2577,8 +2577,8 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -2603,7 +2603,7 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2633,10 +2633,10 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) noun define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -2673,9 +2673,9 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -2712,7 +2712,7 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i3 define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2748,10 +2748,10 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) nou define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmbk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -2794,9 +2794,9 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <3 define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmbkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -2842,7 +2842,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.packusdw.512(<16 x i32>, <16 x i32>, <3 define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16> @@ -2860,9 +2860,9 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) no define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16> @@ -2888,8 +2888,8 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, < define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16> @@ -2914,7 +2914,7 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -2944,10 +2944,10 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounw define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -2984,9 +2984,9 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -3026,7 +3026,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.packuswb.512(<32 x i16>, <32 x i16>, <64 define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 { ; CHECK-LABEL: @test_cmp_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] @@ -3142,8 +3142,8 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 { define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_cmp_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] @@ -3329,7 +3329,7 @@ declare i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8>, <64 x i8>, i32, i64) noun define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 { ; CHECK-LABEL: @test_ucmp_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] @@ -3437,8 +3437,8 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) nounwind #0 { define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_x86_avx512_ucmp_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <64 x i8> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] @@ -3616,7 +3616,7 @@ declare i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8>, <64 x i8>, i32, i64) nou define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 { ; CHECK-LABEL: @test_cmp_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] @@ -3732,8 +3732,8 @@ define i32 @test_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 { define i32 @test_mask_cmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_cmp_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] @@ -3919,7 +3919,7 @@ declare i32 @llvm.x86.avx512.mask.cmp.w.512(<32 x i16>, <32 x i16>, i32, i32) no define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 { ; CHECK-LABEL: @test_ucmp_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] @@ -4027,8 +4027,8 @@ define i32 @test_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1) nounwind #0 { define i32 @test_mask_ucmp_w_512(<32 x i16> %a0, <32 x i16> %a1, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_ucmp_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = xor <32 x i16> [[A0:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] @@ -4209,7 +4209,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8>, <64 x i8>, <64 x i define <64 x i8> @mm512_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @mm512_avg_epu8( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -4223,9 +4223,9 @@ define <64 x i8> @mm512_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i6 define <64 x i8> @mm512_mask_avg_epu8(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) nounwind #0 { ; CHECK-LABEL: @mm512_mask_avg_epu8( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -4249,7 +4249,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @mm512_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @mm512_avg_epu16( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4263,9 +4263,9 @@ define <32 x i16> @mm512_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x define <32 x i16> @mm512_mask_avg_epu16(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @mm512_mask_avg_epu16( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4304,8 +4304,8 @@ define <32 x i16> @test_int_x86_avx512_pabs_w_512(<32 x i16> %x0, <32 x i16> %x1 define <32 x i16> @test_int_x86_avx512_mask_pabs_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <32 x i16> [[X0:%.*]], splat (i16 -32768) ; CHECK-NEXT: [[TMP13:%.*]] = select <32 x i1> [[TMP12]], <32 x i16> splat (i16 -1), <32 x i16> [[TMP1]] @@ -4346,8 +4346,8 @@ define <64 x i8> @test_int_x86_avx512_pabs_b_512(<64 x i8> %x0, <64 x i8> %x1) n define <64 x i8> @test_int_x86_avx512_mask_pabs_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pabs_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <64 x i8> [[X0:%.*]], splat (i8 -128) ; CHECK-NEXT: [[TMP13:%.*]] = select <64 x i1> [[TMP12]], <64 x i8> splat (i8 -1), <64 x i8> [[TMP1]] @@ -4373,8 +4373,8 @@ declare i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8>, <64 x i8>, i64) define i64 @test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_ptestm_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <64 x i8> [[X0:%.*]], [[TMP2]] @@ -4432,8 +4432,8 @@ declare i32 @llvm.x86.avx512.ptestm.w.512(<32 x i16>, <32 x i16>, i32) define i32 @test_int_x86_avx512_ptestm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_ptestm_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i16> [[X0:%.*]], [[TMP2]] @@ -4491,8 +4491,8 @@ declare i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8>, <64 x i8>, i64 %x2) define i64 @test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_ptestnm_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <64 x i8> [[X0:%.*]], [[TMP2]] @@ -4550,8 +4550,8 @@ declare i32 @llvm.x86.avx512.ptestnm.w.512(<32 x i16>, <32 x i16>, i32 %x2) define i32 @test_int_x86_avx512_ptestnm_w_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_ptestnm_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = and <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i16> [[X0:%.*]], [[TMP2]] @@ -4655,7 +4655,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmulhu.w.512(<32 x i16>, <32 x i16>, <3 define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmulhu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4669,9 +4669,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> % define <32 x i16> @test_int_x86_avx512_mask_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4695,7 +4695,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmulh.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmulh_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4709,9 +4709,9 @@ define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x define <32 x i16> @test_int_x86_avx512_mask_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmulh_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4735,7 +4735,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmul.hr.sw.512(<32 x i16>, <32 x i16>, define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmulhr_sw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4749,9 +4749,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_mask_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhr_sw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4775,7 +4775,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.pmaddubs.w.512(<64 x i8>, <64 x i8>, <3 define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaddubs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer @@ -4801,9 +4801,9 @@ define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> % define <32 x i16> @test_int_x86_avx512_mask_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddubs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer @@ -4839,7 +4839,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.pmaddw.d.512(<32 x i16>, <32 x i16>, <1 define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaddw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer @@ -4865,9 +4865,9 @@ define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> % define <16 x i32> @test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2, i16 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer @@ -4903,7 +4903,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16>, <32 x i16>, define <32 x i16> @test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4917,9 +4917,9 @@ define <32 x i16> @test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4941,8 +4941,8 @@ define <32 x i16> @test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x define <32 x i16> @test_int_x86_avx512_maskz_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -4965,8 +4965,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermt2var.hi.512(<32 x i16>, <32 x i16 define <32 x i16> @test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_hi_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5> @@ -4988,10 +4988,10 @@ define <32 x i16> @test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i define <32 x i16> @test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_hi_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5> ; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]]) @@ -5022,10 +5022,10 @@ declare <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16>, <32 x i1 define <32 x i16> @test_int_x86_avx512_maskz_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_hi_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5> ; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]]) @@ -5057,8 +5057,8 @@ declare <32 x i16> @llvm.x86.avx512.mask.vpermi2var.hi.512(<32 x i16>, <32 x i16 define <32 x i16> @test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X1]] to <32 x i5> ; CHECK-NEXT: [[TMP100:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X3:%.*]], <32 x i16> [[TMP2]]) @@ -5080,9 +5080,9 @@ define <32 x i16> @test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i define <32 x i16> @test_int_x86_avx512_mask_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[TMP3]] to <32 x i5> ; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X1:%.*]], <32 x i16> [[TMP2]]) @@ -5114,9 +5114,9 @@ declare <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8>, <64 x i8>, i32, define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_dbpsadbw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <64 x i8> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -5188,7 +5188,7 @@ define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_dbpsadbw define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5202,9 +5202,9 @@ define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nou define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5226,8 +5226,8 @@ define <32 x i16> @test_mask_adds_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3 define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5248,7 +5248,7 @@ define <32 x i16> @test_mask_adds_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5274,10 +5274,10 @@ define <32 x i16> @test_mask_adds_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5310,9 +5310,9 @@ define <32 x i16> @test_mask_adds_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x define <32 x i16> @test_mask_adds_epu16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -5348,7 +5348,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.paddus.w.512(<32 x i16>, <32 x i16>, <3 define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5362,9 +5362,9 @@ define <32 x i16> @test_mask_subs_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) nou define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5386,8 +5386,8 @@ define <32 x i16> @test_mask_subs_epu16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3 define <32 x i16> @test_mask_subs_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5408,7 +5408,7 @@ define <32 x i16> @test_mask_subs_epu16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i define <32 x i16> @test_mask_subs_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5434,10 +5434,10 @@ define <32 x i16> @test_mask_subs_epu16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi define <32 x i16> @test_mask_subs_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5470,9 +5470,9 @@ define <32 x i16> @test_mask_subs_epu16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x define <32 x i16> @test_mask_subs_epu16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -5508,7 +5508,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16>, <32 x i16>, <3 define <64 x i8> @test_mask_adds_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu8_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -5522,9 +5522,9 @@ define <64 x i8> @test_mask_adds_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin define <64 x i8> @test_mask_adds_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu8_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -5546,8 +5546,8 @@ define <64 x i8> @test_mask_adds_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x define <64 x i8> @test_mask_adds_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu8_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -5568,7 +5568,7 @@ define <64 x i8> @test_mask_adds_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 % define <64 x i8> @test_mask_adds_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu8_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5594,10 +5594,10 @@ define <64 x i8> @test_mask_adds_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind define <64 x i8> @test_mask_adds_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu8_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5630,9 +5630,9 @@ define <64 x i8> @test_mask_adds_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8 define <64 x i8> @test_mask_adds_epu8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epu8_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -5668,7 +5668,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.paddus.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_mask_subs_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu8_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -5682,9 +5682,9 @@ define <64 x i8> @test_mask_subs_epu8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin define <64 x i8> @test_mask_subs_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu8_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -5706,8 +5706,8 @@ define <64 x i8> @test_mask_subs_epu8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x define <64 x i8> @test_mask_subs_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu8_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -5728,7 +5728,7 @@ define <64 x i8> @test_mask_subs_epu8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 % define <64 x i8> @test_mask_subs_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu8_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5754,10 +5754,10 @@ define <64 x i8> @test_mask_subs_epu8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind define <64 x i8> @test_mask_subs_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu8_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5790,9 +5790,9 @@ define <64 x i8> @test_mask_subs_epu8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8 define <64 x i8> @test_mask_subs_epu8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epu8_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -5828,7 +5828,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.psubus.b.512(<64 x i8>, <64 x i8>, <64 x define <32 x i16> @test_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_adds_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5842,9 +5842,9 @@ define <32 x i16> @test_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind define <32 x i16> @test_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_adds_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5868,8 +5868,8 @@ define <32 x i16> @test_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i define <32 x i16> @test_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_adds_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -5892,7 +5892,7 @@ define <32 x i16> @test_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %m define <32 x i16> @test_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_adds_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -5918,10 +5918,10 @@ define <32 x i16> @test_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 define <32 x i16> @test_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_adds_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -5956,9 +5956,9 @@ define <32 x i16> @test_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> define <32 x i16> @test_adds_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_adds_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -5996,7 +5996,7 @@ declare <32 x i16> @llvm.x86.avx512.padds.w.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6010,9 +6010,9 @@ define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nou define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6034,8 +6034,8 @@ define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3 define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6056,7 +6056,7 @@ define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -6082,10 +6082,10 @@ define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -6118,9 +6118,9 @@ define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x define <32 x i16> @test_mask_adds_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -6156,7 +6156,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.padds.w.512(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_subs_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6170,9 +6170,9 @@ define <32 x i16> @test_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind define <32 x i16> @test_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_subs_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6196,8 +6196,8 @@ define <32 x i16> @test_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i define <32 x i16> @test_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_subs_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6220,7 +6220,7 @@ define <32 x i16> @test_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %m define <32 x i16> @test_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_subs_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -6246,10 +6246,10 @@ define <32 x i16> @test_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 define <32 x i16> @test_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_subs_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -6284,9 +6284,9 @@ define <32 x i16> @test_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> define <32 x i16> @test_subs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_subs_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -6324,7 +6324,7 @@ declare <32 x i16> @llvm.x86.avx512.psubs.w.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6338,9 +6338,9 @@ define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) nou define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6362,8 +6362,8 @@ define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <3 define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> [[A:%.*]], <32 x i16> [[B:%.*]]) @@ -6384,7 +6384,7 @@ define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -6410,10 +6410,10 @@ define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) nounwi define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -6446,9 +6446,9 @@ define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <32 x define <32 x i16> @test_mask_subs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i32 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -6484,7 +6484,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psubs.w.512(<32 x i16>, <32 x i16>, <32 define <64 x i8> @test_mask_adds_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi8_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -6498,9 +6498,9 @@ define <64 x i8> @test_mask_adds_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin define <64 x i8> @test_mask_adds_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi8_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -6522,8 +6522,8 @@ define <64 x i8> @test_mask_adds_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x define <64 x i8> @test_mask_adds_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi8_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -6544,7 +6544,7 @@ define <64 x i8> @test_mask_adds_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 % define <64 x i8> @test_mask_adds_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi8_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -6570,10 +6570,10 @@ define <64 x i8> @test_mask_adds_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind define <64 x i8> @test_mask_adds_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi8_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -6606,9 +6606,9 @@ define <64 x i8> @test_mask_adds_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8 define <64 x i8> @test_mask_adds_epi8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_adds_epi8_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -6644,7 +6644,7 @@ declare <64 x i8> @llvm.x86.avx512.mask.padds.b.512(<64 x i8>, <64 x i8>, <64 x define <64 x i8> @test_mask_subs_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi8_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -6658,9 +6658,9 @@ define <64 x i8> @test_mask_subs_epi8_rr_512(<64 x i8> %a, <64 x i8> %b) nounwin define <64 x i8> @test_mask_subs_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi8_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -6682,8 +6682,8 @@ define <64 x i8> @test_mask_subs_epi8_rrk_512(<64 x i8> %a, <64 x i8> %b, <64 x define <64 x i8> @test_mask_subs_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi8_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]]) @@ -6704,7 +6704,7 @@ define <64 x i8> @test_mask_subs_epi8_rrkz_512(<64 x i8> %a, <64 x i8> %b, i64 % define <64 x i8> @test_mask_subs_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi8_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -6730,10 +6730,10 @@ define <64 x i8> @test_mask_subs_epi8_rm_512(<64 x i8> %a, ptr %ptr_b) nounwind define <64 x i8> @test_mask_subs_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi8_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -6766,9 +6766,9 @@ define <64 x i8> @test_mask_subs_epi8_rmk_512(<64 x i8> %a, ptr %ptr_b, <64 x i8 define <64 x i8> @test_mask_subs_epi8_rmkz_512(<64 x i8> %a, ptr %ptr_b, i64 %mask) nounwind #0 { ; CHECK-LABEL: @test_mask_subs_epi8_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -6806,7 +6806,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrlv32hi(<32 x i16>, <32 x i16>, <32 x define <32 x i16> @test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_psrlv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6820,9 +6820,9 @@ define <32 x i16> @test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, define <32 x i16> @test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psrlv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6844,8 +6844,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_maskz_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psrlv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6869,7 +6869,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psrav32.hi(<32 x i16>, <32 x i16>, <32 define <32 x i16> @test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_psrav32_hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6883,9 +6883,9 @@ define <32 x i16> @test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1 define <32 x i16> @test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psrav32_hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6907,8 +6907,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16 define <32 x i16> @test_int_x86_avx512_maskz_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psrav32_hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6932,7 +6932,7 @@ declare <32 x i16> @llvm.x86.avx512.mask.psllv32hi(<32 x i16>, <32 x i16>, <32 x define <32 x i16> @test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_psllv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6946,9 +6946,9 @@ define <32 x i16> @test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, define <32 x i16> @test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psllv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -6970,8 +6970,8 @@ define <32 x i16> @test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_maskz_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psllv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -7008,8 +7008,8 @@ define <32 x i8> @test_int_x86_avx512_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1) define <32 x i8> @test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_wb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> ; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8> @@ -7031,7 +7031,7 @@ define <32 x i8> @test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> define <32 x i8> @test_int_x86_avx512_maskz_pmov_wb_512(<32 x i16> %x0, i32 %x2) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_wb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8> diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll index 8bf6d5acc21ba..481751b25eda1 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512bw-intrinsics.ll @@ -24,7 +24,7 @@ define i32 @test_int_x86_avx512_kadd_d(<32 x i16> %A, <32 x i16> %B) nounwind #0 ; CHECK-LABEL: @test_int_x86_avx512_kadd_d( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = xor <32 x i16> [[A:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i16> [[TMP0]], zeroinitializer @@ -74,7 +74,7 @@ define i32 @test_int_x86_avx512_kadd_q(<64 x i8> %A, <64 x i8> %B) nounwind #0 { ; CHECK-LABEL: @test_int_x86_avx512_kadd_q( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = xor <64 x i8> [[A:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = or <64 x i8> [[TMP0]], zeroinitializer @@ -123,7 +123,7 @@ declare <64 x i1> @llvm.x86.avx512.kadd.q(<64 x i1>, <64 x i1>) define i32 @test_x86_avx512_ktestc_d(<32 x i16> %A, <32 x i16> %B) #0 { ; CHECK-LABEL: @test_x86_avx512_ktestc_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], zeroinitializer @@ -165,7 +165,7 @@ declare i32 @llvm.x86.avx512.ktestc.d(<32 x i1>, <32 x i1>) nounwind readnone define i32 @test_x86_avx512_ktestz_d(<32 x i16> %A, <32 x i16> %B) #0 { ; CHECK-LABEL: @test_x86_avx512_ktestz_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i16> [[A:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP1]], zeroinitializer @@ -207,7 +207,7 @@ declare i32 @llvm.x86.avx512.ktestz.d(<32 x i1>, <32 x i1>) nounwind readnone define i32 @test_x86_avx512_ktestc_q(<64 x i8> %A, <64 x i8> %B) #0 { ; CHECK-LABEL: @test_x86_avx512_ktestc_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], zeroinitializer @@ -249,7 +249,7 @@ declare i32 @llvm.x86.avx512.ktestc.q(<64 x i1>, <64 x i1>) nounwind readnone define i32 @test_x86_avx512_ktestz_q(<64 x i8> %A, <64 x i8> %B) #0 { ; CHECK-LABEL: @test_x86_avx512_ktestz_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor <64 x i8> [[A:%.*]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = or <64 x i8> [[TMP1]], zeroinitializer @@ -291,7 +291,7 @@ declare i32 @llvm.x86.avx512.ktestz.q(<64 x i1>, <64 x i1>) nounwind readnone define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -309,9 +309,9 @@ define <32 x i16> @test_mask_packs_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -339,8 +339,8 @@ define <32 x i16> @test_mask_packs_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, < define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -367,7 +367,7 @@ define <32 x i16> @test_mask_packs_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -397,10 +397,10 @@ define <32 x i16> @test_mask_packs_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 { define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -439,9 +439,9 @@ define <32 x i16> @test_mask_packs_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -480,7 +480,7 @@ define <32 x i16> @test_mask_packs_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -516,10 +516,10 @@ define <32 x i16> @test_mask_packs_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 { define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmbk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -564,9 +564,9 @@ define <32 x i16> @test_mask_packs_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 define <32 x i16> @test_mask_packs_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi32_rmbkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -614,7 +614,7 @@ declare <32 x i16> @llvm.x86.avx512.packssdw.512(<16 x i32>, <16 x i32>) define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16> @@ -632,9 +632,9 @@ define <64 x i8> @test_mask_packs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0 define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16> @@ -662,8 +662,8 @@ define <64 x i8> @test_mask_packs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <6 define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[A:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16> @@ -690,7 +690,7 @@ define <64 x i8> @test_mask_packs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -720,10 +720,10 @@ define <64 x i8> @test_mask_packs_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 { define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -762,9 +762,9 @@ define <64 x i8> @test_mask_packs_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x define <64 x i8> @test_mask_packs_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packs_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -807,7 +807,7 @@ declare <64 x i8> @llvm.x86.avx512.packsswb.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i32> @@ -825,9 +825,9 @@ define <32 x i16> @test_mask_packus_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) # define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <32 x i16> %passThru, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i32> @@ -855,8 +855,8 @@ define <32 x i16> @test_mask_packus_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <16 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = sext <16 x i1> [[TMP4]] to <16 x i32> @@ -883,7 +883,7 @@ define <32 x i16> @test_mask_packus_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -913,10 +913,10 @@ define <32 x i16> @test_mask_packus_epi32_rm_512(<16 x i32> %a, ptr %ptr_b) #0 { define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -955,9 +955,9 @@ define <32 x i16> @test_mask_packus_epi32_rmk_512(<16 x i32> %a, ptr %ptr_b, <32 define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -996,7 +996,7 @@ define <32 x i16> @test_mask_packus_epi32_rmkz_512(<16 x i32> %a, ptr %ptr_b, i3 define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmb_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -1032,10 +1032,10 @@ define <32 x i16> @test_mask_packus_epi32_rmb_512(<16 x i32> %a, ptr %ptr_b) #0 define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <32 x i16> %passThru, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmbk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -1080,9 +1080,9 @@ define <32 x i16> @test_mask_packus_epi32_rmbk_512(<16 x i32> %a, ptr %ptr_b, <3 define <32 x i16> @test_mask_packus_epi32_rmbkz_512(<16 x i32> %a, ptr %ptr_b, i32 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi32_rmbkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -1130,7 +1130,7 @@ declare <32 x i16> @llvm.x86.avx512.packusdw.512(<16 x i32>, <16 x i32>) define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rr_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = sext <32 x i1> [[TMP3]] to <32 x i16> @@ -1148,9 +1148,9 @@ define <64 x i8> @test_mask_packus_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) #0 define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <64 x i8> %passThru, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rrk_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16> @@ -1178,8 +1178,8 @@ define <64 x i8> @test_mask_packus_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, < define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rrkz_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = sext <32 x i1> [[TMP4]] to <32 x i16> @@ -1206,7 +1206,7 @@ define <64 x i8> @test_mask_packus_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rm_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -1236,10 +1236,10 @@ define <64 x i8> @test_mask_packus_epi16_rm_512(<32 x i16> %a, ptr %ptr_b) #0 { define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 x i8> %passThru, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rmk_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] @@ -1278,9 +1278,9 @@ define <64 x i8> @test_mask_packus_epi16_rmk_512(<32 x i16> %a, ptr %ptr_b, <64 define <64 x i8> @test_mask_packus_epi16_rmkz_512(<32 x i16> %a, ptr %ptr_b, i64 %mask) #0 { ; CHECK-LABEL: @test_mask_packus_epi16_rmkz_512( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]] @@ -1321,8 +1321,8 @@ declare <64 x i8> @llvm.x86.avx512.packuswb.512(<32 x i16>, <32 x i16>) define <32 x i16>@test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermt2var_hi_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5> @@ -1344,10 +1344,10 @@ define <32 x i16>@test_int_x86_avx512_vpermt2var_hi_512(<32 x i16> %x0, <32 x i1 define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermt2var_hi_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5> ; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]]) @@ -1378,10 +1378,10 @@ define <32 x i16>@test_int_x86_avx512_mask_vpermt2var_hi_512(<32 x i16> %x0, <32 define <32 x i16>@test_int_x86_avx512_maskz_vpermt2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_vpermt2var_hi_512( -; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[X0:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[X0]] to <32 x i5> ; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X4:%.*]], <32 x i16> [[TMP2]]) @@ -1415,8 +1415,8 @@ declare <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16>, <32 x i16>, <3 define <32 x i16>@test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_vpermi2var_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[X1:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X1]] to <32 x i5> ; CHECK-NEXT: [[TMP100:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X3:%.*]], <32 x i16> [[TMP2]]) @@ -1438,9 +1438,9 @@ define <32 x i16>@test_int_x86_avx512_vpermi2var_hi_512(<32 x i16> %x0, <32 x i1 define <32 x i16>@test_int_x86_avx512_mask_vpermi2var_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_vpermi2var_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i16> [[TMP3]] to <32 x i5> ; CHECK-NEXT: [[TMP101:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> [[X1:%.*]], <32 x i16> [[TMP2]]) @@ -1474,7 +1474,7 @@ declare <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8>, <64 x i8>) define <64 x i8> @test_int_x86_avx512_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pavg_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1488,9 +1488,9 @@ define <64 x i8> @test_int_x86_avx512_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, < define <64 x i8> @test_int_x86_avx512_mask_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <64 x i8> @llvm.x86.avx512.pavg.b.512(<64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) @@ -1516,7 +1516,7 @@ declare <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_int_x86_avx512_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pavg_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1530,9 +1530,9 @@ define <32 x i16> @test_int_x86_avx512_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1 define <32 x i16> @test_int_x86_avx512_mask_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pavg.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1558,7 +1558,7 @@ declare <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8>, <64 x i8>) define <64 x i8>@test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP3]] @@ -1573,9 +1573,9 @@ define <64 x i8>@test_int_x86_avx512_pshuf_b_512(<64 x i8> %x0, <64 x i8> %x1) # define <64 x i8>@test_int_x86_avx512_pshuf_b_512_mask(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP10]] @@ -1600,8 +1600,8 @@ define <64 x i8>@test_int_x86_avx512_pshuf_b_512_mask(<64 x i8> %x0, <64 x i8> % define <64 x i8>@test_int_x86_avx512_pshuf_b_512_maskz(<64 x i8> %x0, <64 x i8> %x1, i64 %mask) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pshuf_b_512_maskz( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = call <64 x i8> @llvm.x86.avx512.pshuf.b.512(<64 x i8> [[TMP1]], <64 x i8> [[X1:%.*]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <64 x i8> [[TMP2]], [[TMP9]] @@ -1628,7 +1628,7 @@ declare <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmulhu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1642,9 +1642,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhu_w_512(<32 x i16> %x0, <32 x i16> % define <32 x i16> @test_int_x86_avx512_mask_pmulhu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhu_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulhu.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1670,7 +1670,7 @@ declare <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmulh_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1684,9 +1684,9 @@ define <32 x i16> @test_int_x86_avx512_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x define <32 x i16> @test_int_x86_avx512_mask_pmulh_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmulh_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmulh.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1712,7 +1712,7 @@ declare <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16>, <32 x i16>) define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmulhr_sw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1726,9 +1726,9 @@ define <32 x i16> @test_int_x86_avx512_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> define <32 x i16> @test_int_x86_avx512_mask_pmulhr_sw_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmulhr_sw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.pmul.hr.sw.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -1765,8 +1765,8 @@ define <32 x i8>@test_int_x86_avx512_pmov_wb_512(<32 x i16> %x0) #0 { define <32 x i8>@test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_wb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> ; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8> @@ -1790,7 +1790,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmov_wb_512(<32 x i16> %x0, <32 x i8> define <32 x i8>@test_int_x86_avx512_maskz_pmov_wb_512(<32 x i16> %x0, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmov_wb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[X0:%.*]] to <32 x i8> @@ -1816,8 +1816,8 @@ declare void @llvm.x86.avx512.mask.pmov.wb.mem.512(ptr %ptr, <32 x i16>, i32) define void @test_int_x86_avx512_mask_pmov_wb_mem_512(ptr %ptr, <32 x i16> %x1, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmov_wb_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP2]] to i512 @@ -1853,7 +1853,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmovs.wb.512(<32 x i16>, <32 x i8>, i32) define <32 x i8>@test_int_x86_avx512_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmovs_wb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> ; CHECK-NEXT: [[TMP4:%.*]] = select <32 x i1> splat (i1 true), <32 x i8> [[TMP3]], <32 x i8> [[TMP2]] @@ -1867,9 +1867,9 @@ define <32 x i8>@test_int_x86_avx512_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1) define <32 x i8>@test_int_x86_avx512_mask_pmovs_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_wb_512( -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> @@ -1890,7 +1890,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmovs_wb_512(<32 x i16> %x0, <32 x i8> define <32 x i8>@test_int_x86_avx512_maskz_pmovs_wb_512(<32 x i16> %x0, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovs_wb_512( -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1> @@ -1915,8 +1915,8 @@ declare void @llvm.x86.avx512.mask.pmovs.wb.mem.512(ptr %ptr, <32 x i16>, i32) define void @test_int_x86_avx512_mask_pmovs_wb_mem_512(ptr %ptr, <32 x i16> %x1, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovs_wb_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP2]] to i512 @@ -1952,7 +1952,7 @@ declare <32 x i8> @llvm.x86.avx512.mask.pmovus.wb.512(<32 x i16>, <32 x i8>, i32 define <32 x i8>@test_int_x86_avx512_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmovus_wb_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> ; CHECK-NEXT: [[TMP4:%.*]] = select <32 x i1> splat (i1 true), <32 x i8> [[TMP3]], <32 x i8> [[TMP2]] @@ -1966,9 +1966,9 @@ define <32 x i8>@test_int_x86_avx512_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1 define <32 x i8>@test_int_x86_avx512_mask_pmovus_wb_512(<32 x i16> %x0, <32 x i8> %x1, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_wb_512( -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = trunc <32 x i16> [[TMP1]] to <32 x i8> @@ -1989,7 +1989,7 @@ define <32 x i8>@test_int_x86_avx512_mask_pmovus_wb_512(<32 x i16> %x0, <32 x i8 define <32 x i8>@test_int_x86_avx512_maskz_pmovus_wb_512(<32 x i16> %x0, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_pmovus_wb_512( -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[X2:%.*]] to <32 x i1> @@ -2014,8 +2014,8 @@ declare void @llvm.x86.avx512.mask.pmovus.wb.mem.512(ptr %ptr, <32 x i16>, i32) define void @test_int_x86_avx512_mask_pmovus_wb_mem_512(ptr %ptr, <32 x i16> %x1, i32 %x2) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmovus_wb_mem_512( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP2]] to i512 @@ -2051,7 +2051,7 @@ declare <32 x i16> @llvm.x86.avx512.pmaddubs.w.512(<64 x i8>, <64 x i8>) define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaddubs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer @@ -2077,9 +2077,9 @@ define <32 x i16> @test_int_x86_avx512_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> % define <32 x i16> @test_int_x86_avx512_mask_pmaddubs_w_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddubs_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <64 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer @@ -2117,7 +2117,7 @@ declare <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16>, <32 x i16>) define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_pmaddw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer @@ -2143,9 +2143,9 @@ define <16 x i32> @test_int_x86_avx512_pmaddw_d_512(<32 x i16> %x0, <32 x i16> % define <16 x i32> @test_int_x86_avx512_mask_pmaddw_d_512(<32 x i16> %x0, <32 x i16> %x1, <16 x i32> %x2, i16 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_pmaddw_d_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <32 x i16> [[TMP2]], zeroinitializer @@ -2183,9 +2183,9 @@ declare <32 x i16> @llvm.x86.avx512.dbpsadbw.512(<64 x i8>, <64 x i8>, i32) define { <32 x i16>, <32 x i16>, <32 x i16> } @test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8> %x1, <32 x i16> %x3, i32 %x4) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_dbpsadbw_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <64 x i8> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP5]], 0 @@ -2293,7 +2293,7 @@ define <32 x i16> @test_x86_avx512_psrlv_w_512_const() optsize #0 { define <32 x i16>@test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_psrlv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2307,9 +2307,9 @@ define <32 x i16>@test_int_x86_avx512_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1) define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psrlv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2333,8 +2333,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrlv32hi(<32 x i16> %x0, <32 x i16> define <32 x i16>@test_int_x86_avx512_maskz_psrlv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psrlv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2360,7 +2360,7 @@ declare <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16>, <32 x i16>) define <32 x i16>@test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_psrav32_hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2374,9 +2374,9 @@ define <32 x i16>@test_int_x86_avx512_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1) define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psrav32_hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2400,8 +2400,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi(<32 x i16> %x0, <32 x i16> define <32 x i16>@test_int_x86_avx512_maskz_psrav32_hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psrav32_hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2436,7 +2436,7 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi_const(<32 x i16> %x0, <32 define <32 x i16>@test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_psllv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2450,9 +2450,9 @@ define <32 x i16>@test_int_x86_avx512_psllv32hi(<32 x i16> %x0, <32 x i16> %x1) define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_psllv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2476,8 +2476,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psllv32hi(<32 x i16> %x0, <32 x i16> define <32 x i16>@test_int_x86_avx512_maskz_psllv32hi(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_psllv32hi( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2503,7 +2503,7 @@ declare <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16>, <32 x i16>) define <32 x i16>@test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1) #0 { ; CHECK-LABEL: @test_int_x86_avx512_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2517,9 +2517,9 @@ define <32 x i16>@test_int_x86_avx512_permvar_hi_512(<32 x i16> %x0, <32 x i16> define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_mask_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2543,8 +2543,8 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x define <32 x i16>@test_int_x86_avx512_maskz_permvar_hi_512(<32 x i16> %x0, <32 x i16> %x1, i32 %x3) #0 { ; CHECK-LABEL: @test_int_x86_avx512_maskz_permvar_hi_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.permvar.hi.512(<32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) @@ -2568,7 +2568,7 @@ define <32 x i16>@test_int_x86_avx512_maskz_permvar_hi_512(<32 x i16> %x0, <32 x define <32 x i16> @test_x86_avx512_psll_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psll_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -2587,9 +2587,9 @@ define <32 x i16> @test_x86_avx512_psll_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 define <32 x i16> @test_x86_avx512_mask_psll_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psll_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -2618,8 +2618,8 @@ define <32 x i16> @test_x86_avx512_mask_psll_w_512(<32 x i16> %a0, <8 x i16> %a1 define <32 x i16> @test_x86_avx512_maskz_psll_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psll_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -2676,8 +2676,8 @@ define <32 x i16> @test_x86_avx512_pslli_w_512(<32 x i16> %a0) #0 { define <32 x i16> @test_x86_avx512_mask_pslli_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_pslli_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer @@ -2701,7 +2701,7 @@ define <32 x i16> @test_x86_avx512_mask_pslli_w_512(<32 x i16> %a0, <32 x i16> % define <32 x i16> @test_x86_avx512_maskz_pslli_w_512(<32 x i16> %a0, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_pslli_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP3]], zeroinitializer @@ -2728,7 +2728,7 @@ declare <32 x i16> @llvm.x86.avx512.pslli.w.512(<32 x i16>, i32) nounwind readno define <32 x i16> @test_x86_avx512_psra_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psra_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -2747,9 +2747,9 @@ define <32 x i16> @test_x86_avx512_psra_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 define <32 x i16> @test_x86_avx512_mask_psra_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psra_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -2778,8 +2778,8 @@ define <32 x i16> @test_x86_avx512_mask_psra_w_512(<32 x i16> %a0, <8 x i16> %a1 define <32 x i16> @test_x86_avx512_maskz_psra_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psra_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -2824,8 +2824,8 @@ define <32 x i16> @test_x86_avx512_psrai_w_512(<32 x i16> %a0) #0 { define <32 x i16> @test_x86_avx512_mask_psrai_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrai_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer @@ -2849,7 +2849,7 @@ define <32 x i16> @test_x86_avx512_mask_psrai_w_512(<32 x i16> %a0, <32 x i16> % define <32 x i16> @test_x86_avx512_maskz_psrai_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrai_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP3]], zeroinitializer @@ -2876,7 +2876,7 @@ declare <32 x i16> @llvm.x86.avx512.psrai.w.512(<32 x i16>, i32) nounwind readno define <32 x i16> @test_x86_avx512_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx512_psrl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -2895,9 +2895,9 @@ define <32 x i16> @test_x86_avx512_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1) #0 define <32 x i16> @test_x86_avx512_mask_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i128 [[TMP5]] to i64 @@ -2926,8 +2926,8 @@ define <32 x i16> @test_x86_avx512_mask_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1 define <32 x i16> @test_x86_avx512_maskz_psrl_w_512(<32 x i16> %a0, <8 x i16> %a1, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrl_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i128 [[TMP4]] to i64 @@ -2957,7 +2957,7 @@ declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>) nounwind r define <32 x i16> @test_x86_avx512_psrl_w_512_load(<32 x i16> %a0, ptr %p) #0 { ; CHECK-LABEL: @test_x86_avx512_psrl_w_512_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -3003,8 +3003,8 @@ define <32 x i16> @test_x86_avx512_psrli_w_512(<32 x i16> %a0) #0 { define <32 x i16> @test_x86_avx512_mask_psrli_w_512(<32 x i16> %a0, <32 x i16> %passthru, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_mask_psrli_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP5:%.*]] = or <32 x i16> [[TMP4]], zeroinitializer @@ -3028,7 +3028,7 @@ define <32 x i16> @test_x86_avx512_mask_psrli_w_512(<32 x i16> %a0, <32 x i16> % define <32 x i16> @test_x86_avx512_maskz_psrli_w_512(<32 x i16> %a0, i32 %mask) #0 { ; CHECK-LABEL: @test_x86_avx512_maskz_psrli_w_512( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[TMP1]], i32 7) ; CHECK-NEXT: [[TMP4:%.*]] = or <32 x i16> [[TMP3]], zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll index 69d49008e1b78..a79e293f54034 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-intrinsics.ll @@ -22,7 +22,7 @@ define <32 x half> @test_int_x86_avx512fp16_add_ph_512(<32 x half> %x1, <32 x ha ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_add_ph_512( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer @@ -37,9 +37,9 @@ define <32 x half> @test_int_x86_avx512fp16_add_ph_512(<32 x half> %x1, <32 x ha define <32 x half> @test_int_x86_avx512fp16_mask_add_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_add_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> @@ -67,10 +67,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_add_ph_512(<32 x half> %src, <3 define <32 x half> @test_int_x86_avx512fp16_maskz_add_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_add_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -123,10 +123,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_add_ph_512(<32 x half> %src, < define <32 x half> @test_int_x86_avx512fp16_add_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_add_ph_512_round( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -156,7 +156,7 @@ define <32 x half> @test_int_x86_avx512fp16_sub_ph_512(<32 x half> %x1, <32 x ha ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_sub_ph_512( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer @@ -171,9 +171,9 @@ define <32 x half> @test_int_x86_avx512fp16_sub_ph_512(<32 x half> %x1, <32 x ha define <32 x half> @test_int_x86_avx512fp16_mask_sub_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_sub_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> @@ -201,10 +201,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_sub_ph_512(<32 x half> %src, <3 define <32 x half> @test_int_x86_avx512fp16_maskz_sub_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_sub_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -257,10 +257,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_sub_ph_512(<32 x half> %src, < define <32 x half> @test_int_x86_avx512fp16_sub_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_sub_ph_512_round( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -290,7 +290,7 @@ define <32 x half> @test_int_x86_avx512fp16_mul_ph_512(<32 x half> %x1, <32 x ha ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mul_ph_512( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer @@ -305,9 +305,9 @@ define <32 x half> @test_int_x86_avx512fp16_mul_ph_512(<32 x half> %x1, <32 x ha define <32 x half> @test_int_x86_avx512fp16_mask_mul_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_mul_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> @@ -335,10 +335,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_mul_ph_512(<32 x half> %src, <3 define <32 x half> @test_int_x86_avx512fp16_maskz_mul_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_mul_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -391,10 +391,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_mul_ph_512(<32 x half> %src, < define <32 x half> @test_int_x86_avx512fp16_mul_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mul_ph_512_round( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -424,7 +424,7 @@ define <32 x half> @test_int_x86_avx512fp16_div_ph_512(<32 x half> %x1, <32 x ha ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_div_ph_512( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer @@ -439,9 +439,9 @@ define <32 x half> @test_int_x86_avx512fp16_div_ph_512(<32 x half> %x1, <32 x ha define <32 x half> @test_int_x86_avx512fp16_mask_div_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_mask_div_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> @@ -469,10 +469,10 @@ define <32 x half> @test_int_x86_avx512fp16_mask_div_ph_512(<32 x half> %src, <3 define <32 x half> @test_int_x86_avx512fp16_maskz_div_ph_512(<32 x half> %src, <32 x half> %x1, <32 x half> %x2, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_div_ph_512( ; CHECK-SAME: <32 x half> [[SRC:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -525,10 +525,10 @@ define <32 x half> @test_int_x86_avx512fp16_maskz_div_ph_512(<32 x half> %src, < define <32 x half> @test_int_x86_avx512fp16_div_ph_512_round(<32 x half> %x1, <32 x half> %x2, <32 x half> %src, i32 %msk, ptr %ptr) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_div_ph_512_round( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], <32 x half> [[SRC:%.*]], i32 [[MSK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -558,7 +558,7 @@ define <32 x half> @test_min_ph(<32 x half> %x1, <32 x half> %x2) #0 { ; CHECK-LABEL: define <32 x half> @test_min_ph( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[_MSPROP]] to <32 x i1> @@ -583,7 +583,7 @@ define <32 x half> @test_int_x86_avx512fp16_min_ph_512_sae(<32 x half> %x1, <32 ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_min_ph_512_sae( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer @@ -598,9 +598,9 @@ define <32 x half> @test_int_x86_avx512fp16_min_ph_512_sae(<32 x half> %x1, <32 define <32 x half> @test_int_x86_avx512fp16_maskz_min_ph_512_sae(<32 x half> %x1, <32 x half> %x2, i32 %msk) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_min_ph_512_sae( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -629,7 +629,7 @@ define <32 x half> @test_max_ph(<32 x half> %x1, <32 x half> %x2) #0 { ; CHECK-LABEL: define <32 x half> @test_max_ph( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = trunc <32 x i16> [[_MSPROP]] to <32 x i1> @@ -654,7 +654,7 @@ define <32 x half> @test_int_x86_avx512fp16_max_ph_512_sae(<32 x half> %x1, <32 ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_max_ph_512_sae( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <32 x i16> [[_MSPROP]], zeroinitializer @@ -669,9 +669,9 @@ define <32 x half> @test_int_x86_avx512fp16_max_ph_512_sae(<32 x half> %x1, <32 define <32 x half> @test_int_x86_avx512fp16_maskz_max_ph_512_sae(<32 x half> %x1, <32 x half> %x2, i32 %msk) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512fp16_maskz_max_ph_512_sae( ; CHECK-SAME: <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[MSK:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[MSK]] to <32 x i1> @@ -700,8 +700,8 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd(<8 x half> %x0, <8 x do ; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -727,8 +727,8 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_sae(<8 x half> %x0, <8 ; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_sae( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -754,7 +754,7 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_nomask(<8 x half> %x0, ; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x double> [[X1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -778,8 +778,8 @@ define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_load(ptr %px0, <8 x dou ; CHECK-LABEL: define <8 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_load( ; CHECK-SAME: ptr [[PX0:%.*]], <8 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -819,8 +819,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph(<8 x double> %x0, <8 x ha ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph( ; CHECK-SAME: <8 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -846,8 +846,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_r(<8 x double> %x0, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_r( ; CHECK-SAME: <8 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -873,8 +873,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_load(ptr %px0, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_load( ; CHECK-SAME: ptr [[PX0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -914,9 +914,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round(<8 x half> %x0, ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -945,9 +945,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_r(<8 x half> %x0 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_r( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -976,8 +976,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_nomask(<8 x half ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1004,8 +1004,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_z(<8 x half> %x0 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_ss2sh_round_z( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1033,9 +1033,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round(<8 x half> %x0, ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round( ; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1064,9 +1064,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_r(<8 x half> %x0 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_r( ; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1095,8 +1095,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_nomask(<8 x half ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1123,8 +1123,8 @@ define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_z(<8 x half> %x0 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_cvt_sd2sh_round_z( ; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1152,9 +1152,9 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round(<4 x float> %x0 ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round( ; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1183,9 +1183,9 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_r(<4 x float> % ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_r( ; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1214,8 +1214,8 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_nomask(<4 x flo ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_nomask( ; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], <4 x float> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1242,8 +1242,8 @@ define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_z(<4 x float> % ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512fp16_mask_cvt_sh2ss_round_z( ; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1271,9 +1271,9 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round(<2 x double> % ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round( ; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1302,9 +1302,9 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_r(<2 x double> ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_r( ; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1333,8 +1333,8 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_nomask(<2 x do ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_nomask( ; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], <2 x double> [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1361,8 +1361,8 @@ define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_z(<2 x double> ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512fp16_mask_cvt_sh2sd_round_z( ; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1404,8 +1404,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512(<16 x half> %x0, <1 ; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512( ; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x float> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -1431,7 +1431,7 @@ define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512(<16 x half> %x0, i ; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512( ; CHECK-SAME: <16 x half> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1468,8 +1468,8 @@ define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512r(<16 x half> %x0, < ; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_mask_cvt_ph2psx_512r( ; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x float> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -1495,7 +1495,7 @@ define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512r(<16 x half> %x0, ; CHECK-LABEL: define <16 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_512r( ; CHECK-SAME: <16 x half> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1534,8 +1534,8 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512(<16 x float> %x0, <1 ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512( ; CHECK-SAME: <16 x float> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -1561,7 +1561,7 @@ define <16 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_512(<16 x float> %x0, i ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_512( ; CHECK-SAME: <16 x float> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -1584,8 +1584,8 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512r(<16 x float> %x0, < ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512r( ; CHECK-SAME: <16 x float> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -1622,3 +1622,6 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_ps2phx_512r(<16 x float> %x0, < } attributes #0 = { sanitize_memory } +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll index e67e5e73134e9..c0ba3d599807f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-arith-vl-intrinsics.ll @@ -32,7 +32,7 @@ define <16 x half> @test_int_x86_avx512fp16_add_ph_256(<16 x half> %x1, <16 x ha ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_add_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fadd <16 x half> [[X1]], [[X2]] @@ -46,11 +46,11 @@ define <16 x half> @test_int_x86_avx512fp16_add_ph_256(<16 x half> %x1, <16 x ha define <16 x half> @test_int_x86_avx512fp16_mask_add_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_add_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -103,9 +103,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_add_ph_256(<16 x half> %x1, <16 define <16 x half> @test_int_x86_avx512fp16_maskz_add_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_add_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -131,7 +131,7 @@ define <8 x half> @test_int_x86_avx512fp16_add_ph_128(<8 x half> %x1, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_add_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fadd <8 x half> [[X1]], [[X2]] @@ -145,11 +145,11 @@ define <8 x half> @test_int_x86_avx512fp16_add_ph_128(<8 x half> %x1, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_add_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_add_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -202,9 +202,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_add_ph_128(<8 x half> %x1, <8 x define <8 x half> @test_int_x86_avx512fp16_maskz_add_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_add_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -230,7 +230,7 @@ define <16 x half> @test_int_x86_avx512fp16_sub_ph_256(<16 x half> %x1, <16 x ha ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_sub_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fsub <16 x half> [[X1]], [[X2]] @@ -244,11 +244,11 @@ define <16 x half> @test_int_x86_avx512fp16_sub_ph_256(<16 x half> %x1, <16 x ha define <16 x half> @test_int_x86_avx512fp16_mask_sub_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_sub_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -301,9 +301,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_sub_ph_256(<16 x half> %x1, <16 define <16 x half> @test_int_x86_avx512fp16_maskz_sub_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_sub_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -329,7 +329,7 @@ define <8 x half> @test_int_x86_avx512fp16_sub_ph_128(<8 x half> %x1, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_sub_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fsub <8 x half> [[X1]], [[X2]] @@ -343,11 +343,11 @@ define <8 x half> @test_int_x86_avx512fp16_sub_ph_128(<8 x half> %x1, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_sub_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_sub_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -400,9 +400,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_sub_ph_128(<8 x half> %x1, <8 x define <8 x half> @test_int_x86_avx512fp16_maskz_sub_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_sub_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -428,7 +428,7 @@ define <16 x half> @test_int_x86_avx512fp16_mul_ph_256(<16 x half> %x1, <16 x ha ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mul_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fmul <16 x half> [[X1]], [[X2]] @@ -442,11 +442,11 @@ define <16 x half> @test_int_x86_avx512fp16_mul_ph_256(<16 x half> %x1, <16 x ha define <16 x half> @test_int_x86_avx512fp16_mask_mul_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_mul_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -499,9 +499,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_mul_ph_256(<16 x half> %x1, <16 define <16 x half> @test_int_x86_avx512fp16_maskz_mul_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_mul_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -527,7 +527,7 @@ define <8 x half> @test_int_x86_avx512fp16_mul_ph_128(<8 x half> %x1, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mul_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fmul <8 x half> [[X1]], [[X2]] @@ -541,11 +541,11 @@ define <8 x half> @test_int_x86_avx512fp16_mul_ph_128(<8 x half> %x1, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_mul_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_mul_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -598,9 +598,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_mul_ph_128(<8 x half> %x1, <8 x define <8 x half> @test_int_x86_avx512fp16_maskz_mul_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_mul_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -626,7 +626,7 @@ define <16 x half> @test_int_x86_avx512fp16_div_ph_256(<16 x half> %x1, <16 x ha ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_div_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fdiv <16 x half> [[X1]], [[X2]] @@ -641,7 +641,7 @@ define <16 x half> @test_int_x86_avx512fp16_div_ph_256_fast(<16 x half> %x1, <16 ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_div_ph_256_fast( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fdiv fast <16 x half> [[X1]], [[X2]] @@ -655,11 +655,11 @@ define <16 x half> @test_int_x86_avx512fp16_div_ph_256_fast(<16 x half> %x1, <16 define <16 x half> @test_int_x86_avx512fp16_mask_div_ph_256(<16 x half> %x1, <16 x half> %x2, <16 x half> %src, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_mask_div_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], <16 x half> [[SRC:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -712,9 +712,9 @@ define <16 x half> @test_int_x86_avx512fp16_mask_div_ph_256(<16 x half> %x1, <16 define <16 x half> @test_int_x86_avx512fp16_maskz_div_ph_256(<16 x half> %x1, <16 x half> %x2, i16 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512fp16_maskz_div_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]], i16 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i16 [[MASK]] to <16 x i1> @@ -740,7 +740,7 @@ define <8 x half> @test_int_x86_avx512fp16_div_ph_128(<8 x half> %x1, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_div_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fdiv <8 x half> [[X1]], [[X2]] @@ -755,7 +755,7 @@ define <8 x half> @test_int_x86_avx512fp16_div_ph_128_fast(<8 x half> %x1, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_div_ph_128_fast( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = fdiv fast <8 x half> [[X1]], [[X2]] @@ -769,11 +769,11 @@ define <8 x half> @test_int_x86_avx512fp16_div_ph_128_fast(<8 x half> %x1, <8 x define <8 x half> @test_int_x86_avx512fp16_mask_div_ph_128(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_div_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -826,9 +826,9 @@ define <8 x half> @test_int_x86_avx512fp16_mask_div_ph_128(<8 x half> %x1, <8 x define <8 x half> @test_int_x86_avx512fp16_maskz_div_ph_128(<8 x half> %x1, <8 x half> %x2, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_maskz_div_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MSK:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -854,7 +854,7 @@ define <16 x half> @test_min_ph_256(<16 x half> %x1, <16 x half> %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_min_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = trunc <16 x i16> [[_MSPROP]] to <16 x i1> @@ -879,7 +879,7 @@ define <16 x half> @test_max_ph_256(<16 x half> %x1, <16 x half> %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_max_ph_256( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = trunc <16 x i16> [[_MSPROP]] to <16 x i1> @@ -904,7 +904,7 @@ define <8 x half> @test_min_ph_128(<8 x half> %x1, <8 x half> %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_min_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i16> [[_MSPROP]] to <8 x i1> @@ -929,7 +929,7 @@ define <8 x half> @test_max_ph_128(<8 x half> %x1, <8 x half> %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_max_ph_128( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i16> [[_MSPROP]] to <8 x i1> @@ -957,7 +957,7 @@ define <8 x half> @test_max_ph_128_2(<8 x half> %x1, <8 x half> %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_max_ph_128_2( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES0:%.*]] = call <8 x half> @llvm.x86.avx512fp16.max.ph.128(<8 x half> [[X1]], <8 x half> [[X2]]) @@ -972,7 +972,7 @@ define <16 x half> @test_max_ph_256_2(<16 x half> %x1, <16 x half> %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_max_ph_256_2( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES0:%.*]] = call <16 x half> @llvm.x86.avx512fp16.max.ph.256(<16 x half> [[X1]], <16 x half> [[X2]]) @@ -990,7 +990,7 @@ define <8 x half> @test_min_ph_128_2(<8 x half> %x1, <8 x half> %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_min_ph_128_2( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES0:%.*]] = call <8 x half> @llvm.x86.avx512fp16.min.ph.128(<8 x half> [[X1]], <8 x half> [[X2]]) @@ -1005,7 +1005,7 @@ define <16 x half> @test_min_ph_256_2(<16 x half> %x1, <16 x half> %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_min_ph_256_2( ; CHECK-SAME: <16 x half> [[X1:%.*]], <16 x half> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES0:%.*]] = call <16 x half> @llvm.x86.avx512fp16.min.ph.256(<16 x half> [[X1]], <16 x half> [[X2]]) @@ -1022,8 +1022,8 @@ define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256(<8 x half> %x0, <4 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1049,7 +1049,7 @@ define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256_nomask(<8 x half> % ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_256_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x double> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1075,8 +1075,8 @@ define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128(<8 x half> %x0, <2 ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1102,7 +1102,7 @@ define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128_nomask(<8 x half> % ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_vcvt_ph2pd_128_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <2 x double> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1128,8 +1128,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256(<4 x double> %x0, <8 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -1155,8 +1155,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256_load(ptr %px0, <8 x h ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_256_load( ; CHECK-SAME: ptr [[PX0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1196,8 +1196,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128(<2 x double> %x0, <8 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1223,8 +1223,8 @@ define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128_load(ptr %px0, <8 x h ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_vcvt_pd2ph_128_load( ; CHECK-SAME: ptr [[PX0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -1278,8 +1278,8 @@ define <4 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_128(<8 x half> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1305,7 +1305,7 @@ define <4 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_128(<8 x half> %x0, i8 %x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1344,8 +1344,8 @@ define <8 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_256(<8 x half> %x0, <8 x i ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1371,7 +1371,7 @@ define <8 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_256(<8 x half> %x0, i8 %x ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_cvt_ph2udq_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1410,8 +1410,8 @@ define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_128(<8 x half> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1437,7 +1437,7 @@ define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_128(<8 x half> %x0, i8 %x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1476,8 +1476,8 @@ define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_256(<8 x half> %x0, <8 x i ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1503,7 +1503,7 @@ define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_256(<8 x half> %x0, i8 %x ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2dq_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1542,8 +1542,8 @@ define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_128(<8 x half> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1569,7 +1569,7 @@ define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_128(<8 x half> %x0, i8 % ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1608,8 +1608,8 @@ define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_256(<8 x half> %x0, <8 x ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1635,7 +1635,7 @@ define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_256(<8 x half> %x0, i8 % ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_cvtt_ph2udq_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1674,8 +1674,8 @@ define <4 x float> @test_int_x86_avx512_mask_cvt_ph2psx_128(<8 x half> %x0, <4 x ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_cvt_ph2psx_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1701,7 +1701,7 @@ define <4 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_128(<8 x half> %x0, i8 ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_128( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1740,8 +1740,8 @@ define <8 x float> @test_int_x86_avx512_mask_cvt_ph2psx_256(<8 x half> %x0, <8 x ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_cvt_ph2psx_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1767,7 +1767,7 @@ define <8 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_256(<8 x half> %x0, i8 ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_cvt_ph2psx_256( ; CHECK-SAME: <8 x half> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1792,8 +1792,8 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_128(<4 x float> %x0, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1849,8 +1849,8 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_256(<8 x float> %x0, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_ps2phx_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -1876,7 +1876,7 @@ define <8 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_256(<8 x float> %x0, i8 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1896,3 +1896,6 @@ define <8 x half> @test_int_x86_avx512_maskz_cvt_ps2phx_256(<8 x float> %x0, i8 } attributes #0 = { sanitize_memory } +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll index e5cbe8c132238..e5d1af3841f10 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512fp16-intrinsics.ll @@ -17,7 +17,6 @@ ; - llvm.x86.avx512fp16.mask.rcp.sh ; - llvm.x86.avx512fp16.mask.reduce.ph.512 ; - llvm.x86.avx512fp16.mask.reduce.sh -; - llvm.x86.avx512fp16.mask.rndscale.ph.512 ; - llvm.x86.avx512fp16.mask.rndscale.sh ; - llvm.x86.avx512fp16.mask.rsqrt.sh ; - llvm.x86.avx512fp16.mask.scalef.ph.512 @@ -62,7 +61,7 @@ define i32 @test_x86_avx512fp16_ucomi_sh_lt(<8 x half> %a0, <8 x half> %a1) #0 { ; CHECK-LABEL: define i32 @test_x86_avx512fp16_ucomi_sh_lt( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -101,7 +100,7 @@ define <32 x half> @test_sqrt_ph_512_fast(<32 x half> %a0, <32 x half> %a1) #0 { ; CHECK-LABEL: define <32 x half> @test_sqrt_ph_512_fast( ; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[A1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call fast <32 x half> @llvm.sqrt.v32f16(<32 x half> [[A0]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP2]], [[TMP1]] @@ -146,8 +145,8 @@ define <32 x half> @test_mask_sqrt_ph_512(<32 x half> %a0, <32 x half> %passthru ; CHECK-LABEL: define <32 x half> @test_mask_sqrt_ph_512( ; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[PASSTHRU:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x half> @llvm.sqrt.v32f16(<32 x half> [[A0]]) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP2]] to <32 x i1> @@ -173,7 +172,7 @@ define <32 x half> @test_maskz_sqrt_ph_512(<32 x half> %a0, i32 %mask) #0 { ; CHECK-LABEL: define <32 x half> @test_maskz_sqrt_ph_512( ; CHECK-SAME: <32 x half> [[A0:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call <32 x half> @llvm.sqrt.v32f16(<32 x half> [[A0]]) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[TMP2]] to <32 x i1> @@ -220,8 +219,8 @@ define <32 x half> @test_mask_sqrt_round_ph_512(<32 x half> %a0, <32 x half> %pa ; CHECK-LABEL: define <32 x half> @test_mask_sqrt_round_ph_512( ; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[PASSTHRU:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -254,7 +253,7 @@ define <32 x half> @test_maskz_sqrt_round_ph_512(<32 x half> %a0, i32 %mask) #0 ; CHECK-LABEL: define <32 x half> @test_maskz_sqrt_round_ph_512( ; CHECK-SAME: <32 x half> [[A0:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP3]], 0 @@ -288,9 +287,9 @@ define <8 x half> @test_sqrt_sh(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, ; CHECK-LABEL: define <8 x half> @test_sqrt_sh( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -319,7 +318,7 @@ define half @test_sqrt_sh2(half %a0, half %a1) #0 { ; CHECK-LABEL: define half @test_sqrt_sh2( ; CHECK-SAME: half [[A0:%.*]], half [[A1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call fast half @llvm.sqrt.f16(half [[A0]]) ; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP2]], [[TMP1]] @@ -351,9 +350,9 @@ define <8 x half> @test_sqrt_sh_r(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2 ; CHECK-LABEL: define <8 x half> @test_sqrt_sh_r( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -382,8 +381,8 @@ define <8 x half> @test_sqrt_sh_nomask(<8 x half> %a0, <8 x half> %a1, <8 x half ; CHECK-LABEL: define <8 x half> @test_sqrt_sh_nomask( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -410,8 +409,8 @@ define <8 x half> @test_sqrt_sh_z(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2 ; CHECK-LABEL: define <8 x half> @test_sqrt_sh_z( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -456,7 +455,7 @@ define <8 x half> @test_rsqrt_sh(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) ; CHECK-LABEL: define <8 x half> @test_rsqrt_sh( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[A1:%.*]], <8 x half> [[A2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -482,7 +481,7 @@ define <8 x half> @test_rsqrt_sh(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) define <8 x half> @test_rsqrt_sh_load(<8 x half> %a0, ptr %a1ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_rsqrt_sh_load( ; CHECK-SAME: <8 x half> [[A0:%.*]], ptr [[A1PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -515,7 +514,7 @@ define <8 x half> @test_rsqrt_sh_maskz(<8 x half> %a0, i8 %mask) #0 { ; CHECK-LABEL: define <8 x half> @test_rsqrt_sh_maskz( ; CHECK-SAME: <8 x half> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -541,9 +540,9 @@ define <8 x half> @test_rsqrt_sh_mask(<8 x half> %a0, <8 x half> %b0, <8 x half> ; CHECK-LABEL: define <8 x half> @test_rsqrt_sh_mask( ; CHECK-SAME: <8 x half> [[A0:%.*]], <8 x half> [[B0:%.*]], <8 x half> [[C0:%.*]], i8 [[MASK:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -676,9 +675,9 @@ declare <32 x half> @llvm.x86.avx512fp16.mask.rcp.ph.512(<32 x half>, <32 x half define <32 x half> @test_rcp_ph_512(<32 x half> %a0, <32 x half> %a1, i32 %mask) #0 { ; CHECK-LABEL: define <32 x half> @test_rcp_ph_512( ; CHECK-SAME: <32 x half> [[A0:%.*]], <32 x half> [[A1:%.*]], i32 [[MASK:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[MASK]] to <32 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer @@ -726,7 +725,7 @@ define <8 x half> @test_rcp_sh(<8 x half> %a0) #0 { define <8 x half> @test_rcp_sh_load(<8 x half> %a0, ptr %a1ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_rcp_sh_load( ; CHECK-SAME: <8 x half> [[A0:%.*]], ptr [[A1PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -765,8 +764,8 @@ define <32 x half>@test_int_x86_avx512_mask_reduce_ph_512(<32 x half> %x0, <32 x ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_reduce_ph_512( ; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -808,9 +807,9 @@ define <8 x half>@test_int_x86_avx512_mask_reduce_sh(<8 x half> %x0, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_reduce_sh( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -839,8 +838,8 @@ define <8 x half>@test_int_x86_avx512_mask_reduce_sh_nomask(<8 x half> %x0, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_reduce_sh_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -868,36 +867,28 @@ declare <32 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.512(<32 x half>, i32, define <32 x half>@test_int_x86_avx512_mask_rndscale_ph_512(<32 x half> %x0, <32 x half> %x2, i32 %x3) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_rndscale_ph_512( ; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <32 x i16> [[TMP2]] to i512 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP5]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32 [[X3]] to <32 x i1> +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = sext <32 x i1> [[TMP5]] to <32 x i16> +; CHECK-NEXT: [[TMP7:%.*]] = select <32 x i1> [[TMP4]], <32 x i16> [[TMP6]], <32 x i16> [[TMP2]] ; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i32 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]] -; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 [[_MSCMP2]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]] +; CHECK: [[BB8]]: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]] ; CHECK-NEXT: unreachable -; CHECK: [[BB7]]: +; CHECK: [[BB9]]: ; CHECK-NEXT: [[RES:%.*]] = call <32 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.512(<32 x half> [[X0]], i32 8, <32 x half> [[X2]], i32 [[X3]], i32 4) -; CHECK-NEXT: [[TMP8:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 -; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i512 [[TMP8]], 0 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast <32 x i16> [[TMP2]] to i512 -; CHECK-NEXT: [[_MSCMP5:%.*]] = icmp ne i512 [[TMP9]], 0 -; CHECK-NEXT: [[_MSOR6:%.*]] = or i1 [[_MSCMP4]], [[_MSCMP5]] -; CHECK-NEXT: br i1 [[_MSOR6]], label %[[BB10:.*]], label %[[BB11:.*]], !prof [[PROF1]] -; CHECK: [[BB10]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR8]] -; CHECK-NEXT: unreachable -; CHECK: [[BB11]]: +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i16> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = sext <32 x i1> [[TMP10]] to <32 x i16> +; CHECK-NEXT: [[TMP12:%.*]] = select <32 x i1> splat (i1 true), <32 x i16> [[TMP11]], <32 x i16> [[TMP2]] ; CHECK-NEXT: [[RES1:%.*]] = call <32 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.512(<32 x half> [[X0]], i32 4, <32 x half> [[X2]], i32 -1, i32 8) +; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i16> [[TMP7]], [[TMP12]] ; CHECK-NEXT: [[RES2:%.*]] = fadd <32 x half> [[RES]], [[RES1]] -; CHECK-NEXT: store <32 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <32 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <32 x half> [[RES2]] ; %res = call <32 x half> @llvm.x86.avx512fp16.mask.rndscale.ph.512(<32 x half> %x0, i32 8, <32 x half> %x2, i32 %x3, i32 4) @@ -912,9 +903,9 @@ define <8 x half>@test_int_x86_avx512_mask_rndscale_sh(<8 x half> %x0, <8 x half ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_rndscale_sh( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -943,8 +934,8 @@ define <8 x half>@test_int_x86_avx512_mask_rndscale_sh_nomask(<8 x half> %x0, <8 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_rndscale_sh_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -973,8 +964,8 @@ define <32 x half>@test_int_x86_avx512_mask_getexp_ph_512(<32 x half> %x0, <32 x ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_getexp_ph_512( ; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X1:%.*]], i32 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -1013,9 +1004,9 @@ define <8 x half>@test_int_x86_avx512_mask_getexp_sh(<8 x half> %x0, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getexp_sh( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1044,8 +1035,8 @@ define <8 x half>@test_int_x86_avx512_mask_getexp_sh_nomask(<8 x half> %x0, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getexp_sh_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1071,7 +1062,7 @@ define <8 x half>@test_int_x86_avx512_mask_getexp_sh_nomask(<8 x half> %x0, <8 x define <8 x half>@test_int_x86_avx512_mask_getexp_sh_load(<8 x half> %x0, ptr %x1ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getexp_sh_load( ; CHECK-SAME: <8 x half> [[X0:%.*]], ptr [[X1PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -1106,8 +1097,8 @@ define <32 x half>@test_int_x86_avx512_mask_getmant_ph_512(<32 x half> %x0, <32 ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_getmant_ph_512( ; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i16> [[TMP1]] to i512 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0 @@ -1149,9 +1140,9 @@ define <8 x half>@test_int_x86_avx512_mask_getmant_sh(<8 x half> %x0, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getmant_sh( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1180,8 +1171,8 @@ define <8 x half>@test_int_x86_avx512_mask_getmant_sh_nomask(<8 x half> %x0, <8 ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getmant_sh_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1208,8 +1199,8 @@ define <8 x half>@test_int_x86_avx512_mask_getmant_sh_z(<8 x half> %x0, <8 x hal ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_getmant_sh_z( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1236,10 +1227,10 @@ declare <32 x half> @llvm.x86.avx512fp16.mask.scalef.ph.512(<32 x half>, <32 x h define <32 x half>@test_int_x86_avx512_mask_scalef_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3) #0 { ; CHECK-LABEL: define <32 x half> @test_int_x86_avx512_mask_scalef_ph_512( ; CHECK-SAME: <32 x half> [[X0:%.*]], <32 x half> [[X1:%.*]], <32 x half> [[X2:%.*]], i32 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP1]] to <32 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i32 [[X3]] to <32 x i1> @@ -1287,9 +1278,9 @@ define <8 x half>@test_int_x86_avx512_mask_scalef_sh(<8 x half> %x0, <8 x half> ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_scalef_sh( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]], i8 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -1318,8 +1309,8 @@ define <8 x half>@test_int_x86_avx512_mask_scalef_sh_nomask(<8 x half> %x0, <8 x ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_scalef_sh_nomask( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], <8 x half> [[X3:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1345,7 +1336,7 @@ define <8 x half>@test_int_x86_avx512_mask_scalef_sh_nomask(<8 x half> %x0, <8 x define <8 x half>@test_int_x86_avx512_mask_scalef_sh_load(<8 x half> %x0, ptr %x1ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_scalef_sh_load( ; CHECK-SAME: <8 x half> [[X0:%.*]], ptr [[X1PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -1379,11 +1370,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.add.sh.round(<8 x half>, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_add_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_add_sh( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -1465,11 +1456,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.sub.sh.round(<8 x half>, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_sub_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_sub_sh( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -1551,11 +1542,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.mul.sh.round(<8 x half>, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_mul_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_mul_sh( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -1637,11 +1628,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.div.sh.round(<8 x half>, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_div_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_div_sh( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -1723,11 +1714,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.min.sh.round(<8 x half>, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_min_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_min_sh( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -1809,11 +1800,11 @@ declare <8 x half> @llvm.x86.avx512fp16.mask.max.sh.round(<8 x half>, <8 x half> define <8 x half> @test_int_x86_avx512fp16_mask_max_sh(<8 x half> %x1, <8 x half> %x2, <8 x half> %src, i8 %mask, ptr %ptr) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512fp16_mask_max_sh( ; CHECK-SAME: <8 x half> [[X1:%.*]], <8 x half> [[X2:%.*]], <8 x half> [[SRC:%.*]], i8 [[MASK:%.*]], ptr [[PTR:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -1896,8 +1887,8 @@ define i8 @test_int_x86_avx512_mask_cmp_sh(<8 x half> %x0, <8 x half> %x1, i8 %x ; CHECK-LABEL: define i8 @test_int_x86_avx512_mask_cmp_sh( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X3:%.*]], i32 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -1924,8 +1915,8 @@ define i8 @test_int_x86_avx512_mask_cmp_sh_all(<8 x half> %x0, <8 x half> %x1, i ; CHECK-LABEL: define i8 @test_int_x86_avx512_mask_cmp_sh_all( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X3:%.*]], i32 [[X4:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -2010,9 +2001,9 @@ declare <16 x half> @llvm.x86.avx512.sitofp.round.v16f16.v16i32(<16 x i32>, i32) define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512(<16 x i32> %x0, <16 x half> %x1, i16 %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i16 [[X2]] to <16 x i1> @@ -2044,9 +2035,9 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512(<16 x i32> %x0, <16 x define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_r(<16 x i32> %x0, <16 x half> %x1, i16 %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_r( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i16 [[X2]] to <16 x i1> @@ -2098,7 +2089,7 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_nomask(<16 x i32> %x0 define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_z(<16 x i32> %x0, i16 %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_dq2ph_512_z( ; CHECK-SAME: <16 x i32> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -2146,9 +2137,9 @@ declare <16 x half> @llvm.x86.avx512.uitofp.round.v16f16.v16i32(<16 x i32>, i32) define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_r(<16 x i32> %x0, <16 x half> %x1, i16 %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_r( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x half> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i16 [[X2]] to <16 x i1> @@ -2200,7 +2191,7 @@ define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_nomask(<16 x i32> %x define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_z(<16 x i32> %x0, i16 %x2) #0 { ; CHECK-LABEL: define <16 x half> @test_int_x86_avx512_mask_cvt_udq2ph_512_z( ; CHECK-SAME: <16 x i32> [[X0:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i16 [[TMP1]] to <16 x i1> @@ -2249,8 +2240,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2dq_512(<16 x half> %x0, <16 x ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2dq_512( ; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -2292,8 +2283,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_512(<16 x half> %x0, <16 ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvt_ph2udq_512( ; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -2335,8 +2326,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_512(<16 x half> %x0, <16 ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2dq_512( ; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -2378,8 +2369,8 @@ define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_512(<16 x half> %x0, <16 ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_mask_cvtt_ph2udq_512( ; CHECK-SAME: <16 x half> [[X0:%.*]], <16 x i32> [[X1:%.*]], i16 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -2420,9 +2411,9 @@ declare <8 x half> @llvm.x86.avx512.sitofp.round.v8f16.v8i64(<8 x i64>, i32) define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512( ; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1> @@ -2454,9 +2445,9 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512(<8 x i64> %x0, <8 x ha define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_r(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_r( ; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1> @@ -2508,7 +2499,7 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_nomask(<8 x i64> %x0, define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_z(<8 x i64> %x0, i8 %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_qq2ph_512_z( ; CHECK-SAME: <8 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -2542,9 +2533,9 @@ declare <8 x half> @llvm.x86.avx512.uitofp.round.v8f16.v8i64(<8 x i64>, i32) define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512( ; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1> @@ -2576,9 +2567,9 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512(<8 x i64> %x0, <8 x h define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_r(<8 x i64> %x0, <8 x half> %x1, i8 %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_r( ; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x half> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> ; CHECK-NEXT: [[MASK:%.*]] = bitcast i8 [[X2]] to <8 x i1> @@ -2630,7 +2621,7 @@ define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_nomask(<8 x i64> %x0, define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_z(<8 x i64> %x0, i8 %x2) #0 { ; CHECK-LABEL: define <8 x half> @test_int_x86_avx512_mask_cvt_uqq2ph_512_z( ; CHECK-SAME: <8 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP1]] to <8 x i1> @@ -2665,8 +2656,8 @@ define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2qq_512(<8 x half> %x0, <8 x i6 ; CHECK-LABEL: define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2qq_512( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -2708,8 +2699,8 @@ define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2uqq_512(<8 x half> %x0, <8 x i ; CHECK-LABEL: define <8 x i64> @test_int_x86_avx512_mask_cvt_ph2uqq_512( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -2751,8 +2742,8 @@ define <8 x i64> @test_int_x86_avx512_mask_cvtt_ph2uqq_512(<8 x half> %x0, <8 x ; CHECK-LABEL: define <8 x i64> @test_int_x86_avx512_mask_cvtt_ph2uqq_512( ; CHECK-SAME: <8 x half> [[X0:%.*]], <8 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3060,7 +3051,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtsi2sh(<8 x half> %arg0, i32 %arg1) #0 ; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtsi2sh( ; CHECK-SAME: <8 x half> [[ARG0:%.*]], i32 [[ARG1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -3098,7 +3089,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtsi642sh(<8 x half> %arg0, i64 %arg1) ; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtsi642sh( ; CHECK-SAME: <8 x half> [[ARG0:%.*]], i64 [[ARG1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -3136,7 +3127,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtusi2sh(<8 x half> %arg0, i32 %arg1) # ; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtusi2sh( ; CHECK-SAME: <8 x half> [[ARG0:%.*]], i32 [[ARG1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -3174,7 +3165,7 @@ define <8 x half> @test_x86_avx512fp16_vcvtusi642sh(<8 x half> %arg0, i64 %arg1) ; CHECK-LABEL: define <8 x half> @test_x86_avx512fp16_vcvtusi642sh( ; CHECK-SAME: <8 x half> [[ARG0:%.*]], i64 [[ARG1:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll index 20114fe7d3151..f20d368e9abbc 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl-intrinsics.ll @@ -54,8 +54,6 @@ ; - llvm.x86.avx512.mask.pmovus.qd.mem.128, llvm.x86.avx512.mask.pmovus.qd.mem.256 ; - llvm.x86.avx512.mask.pmovus.qw.128, llvm.x86.avx512.mask.pmovus.qw.256 ; - llvm.x86.avx512.mask.pmovus.qw.mem.128, llvm.x86.avx512.mask.pmovus.qw.mem.256 -; - llvm.x86.avx512.mask.rndscale.pd.128, llvm.x86.avx512.mask.rndscale.pd.256 -; - llvm.x86.avx512.mask.rndscale.ps.128, llvm.x86.avx512.mask.rndscale.ps.256 ; - llvm.x86.avx512.mask.scalef.pd.128, llvm.x86.avx512.mask.scalef.pd.256 ; - llvm.x86.avx512.mask.scalef.ps.128, llvm.x86.avx512.mask.scalef.ps.256 ; - llvm.x86.avx512.maskz.fixupimm.pd.128, llvm.x86.avx512.maskz.fixupimm.pd.256 @@ -73,9 +71,9 @@ define <2 x double> @test_mask_compress_pd_128(<2 x double> %data, <2 x double> ; ; CHECK-LABEL: define <2 x double> @test_mask_compress_pd_128( ; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -108,7 +106,7 @@ define <2 x double> @test_maskz_compress_pd_128(<2 x double> %data, i8 %mask) #0 ; ; CHECK-LABEL: define <2 x double> @test_maskz_compress_pd_128( ; CHECK-SAME: <2 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -139,7 +137,7 @@ define <2 x double> @test_compress_pd_128(<2 x double> %data, <2 x double> %data ; CHECK-LABEL: define <2 x double> @test_compress_pd_128( ; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -163,9 +161,9 @@ define <4 x float> @test_mask_compress_ps_128(<4 x float> %data, <4 x float> %pa ; ; CHECK-LABEL: define <4 x float> @test_mask_compress_ps_128( ; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -198,7 +196,7 @@ define <4 x float> @test_maskz_compress_ps_128(<4 x float> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x float> @test_maskz_compress_ps_128( ; CHECK-SAME: <4 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -229,7 +227,7 @@ define <4 x float> @test_compress_ps_128(<4 x float> %data, <4 x float> %data2) ; CHECK-LABEL: define <4 x float> @test_compress_ps_128( ; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -253,9 +251,9 @@ define <2 x i64> @test_mask_compress_q_128(<2 x i64> %data, <2 x i64> %passthru, ; ; CHECK-LABEL: define <2 x i64> @test_mask_compress_q_128( ; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -288,7 +286,7 @@ define <2 x i64> @test_maskz_compress_q_128(<2 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <2 x i64> @test_maskz_compress_q_128( ; CHECK-SAME: <2 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -319,7 +317,7 @@ define <2 x i64> @test_compress_q_128(<2 x i64> %data, <2 x i64> %data2) #0 { ; CHECK-LABEL: define <2 x i64> @test_compress_q_128( ; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -343,9 +341,9 @@ define <4 x i32> @test_mask_compress_d_128(<4 x i32> %data, <4 x i32> %passthru, ; ; CHECK-LABEL: define <4 x i32> @test_mask_compress_d_128( ; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -378,7 +376,7 @@ define <4 x i32> @test_maskz_compress_d_128(<4 x i32> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x i32> @test_maskz_compress_d_128( ; CHECK-SAME: <4 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -409,7 +407,7 @@ define <4 x i32> @test_compress_d_128(<4 x i32> %data, <4 x i32> %data2) #0 { ; CHECK-LABEL: define <4 x i32> @test_compress_d_128( ; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -433,7 +431,7 @@ define <2 x double> @test_expand_pd_128(<2 x double> %data, <2 x double> %data2) ; CHECK-LABEL: define <2 x double> @test_expand_pd_128( ; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -457,9 +455,9 @@ define <2 x double> @test_mask_expand_pd_128(<2 x double> %data, <2 x double> %p ; ; CHECK-LABEL: define <2 x double> @test_mask_expand_pd_128( ; CHECK-SAME: <2 x double> [[DATA:%.*]], <2 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -492,7 +490,7 @@ define <2 x double> @test_maskz_expand_pd_128(<2 x double> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <2 x double> @test_maskz_expand_pd_128( ; CHECK-SAME: <2 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -523,7 +521,7 @@ define <4 x float> @test_expand_ps_128(<4 x float> %data, <4 x float> %data2) #0 ; CHECK-LABEL: define <4 x float> @test_expand_ps_128( ; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -547,9 +545,9 @@ define <4 x float> @test_mask_expand_ps_128(<4 x float> %data, <4 x float> %pass ; ; CHECK-LABEL: define <4 x float> @test_mask_expand_ps_128( ; CHECK-SAME: <4 x float> [[DATA:%.*]], <4 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -582,7 +580,7 @@ define <4 x float> @test_maskz_expand_ps_128(<4 x float> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x float> @test_maskz_expand_ps_128( ; CHECK-SAME: <4 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -613,7 +611,7 @@ define <2 x i64> @test_expand_q_128(<2 x i64> %data, <2 x i64> %data2) #0 { ; CHECK-LABEL: define <2 x i64> @test_expand_q_128( ; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -637,9 +635,9 @@ define <2 x i64> @test_mask_expand_q_128(<2 x i64> %data, <2 x i64> %passthru, i ; ; CHECK-LABEL: define <2 x i64> @test_mask_expand_q_128( ; CHECK-SAME: <2 x i64> [[DATA:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -672,7 +670,7 @@ define <2 x i64> @test_maskz_expand_q_128(<2 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <2 x i64> @test_maskz_expand_q_128( ; CHECK-SAME: <2 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -703,7 +701,7 @@ define <4 x i32> @test_expand_d_128(<4 x i32> %data, <4 x i32> %data2) #0 { ; CHECK-LABEL: define <4 x i32> @test_expand_d_128( ; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -727,9 +725,9 @@ define <4 x i32> @test_mask_expand_d_128(<4 x i32> %data, <4 x i32> %passthru, i ; ; CHECK-LABEL: define <4 x i32> @test_mask_expand_d_128( ; CHECK-SAME: <4 x i32> [[DATA:%.*]], <4 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -762,7 +760,7 @@ define <4 x i32> @test_maskz_expand_d_128(<4 x i32> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x i32> @test_maskz_expand_d_128( ; CHECK-SAME: <4 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -793,9 +791,9 @@ define <4 x double> @test_mask_compress_pd_256(<4 x double> %data, <4 x double> ; ; CHECK-LABEL: define <4 x double> @test_mask_compress_pd_256( ; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -828,7 +826,7 @@ define <4 x double> @test_maskz_compress_pd_256(<4 x double> %data, i8 %mask) #0 ; ; CHECK-LABEL: define <4 x double> @test_maskz_compress_pd_256( ; CHECK-SAME: <4 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -859,7 +857,7 @@ define <4 x double> @test_compress_pd_256(<4 x double> %data, <4 x double> %data ; CHECK-LABEL: define <4 x double> @test_compress_pd_256( ; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -883,9 +881,9 @@ define <8 x float> @test_mask_compress_ps_256(<8 x float> %data, <8 x float> %pa ; ; CHECK-LABEL: define <8 x float> @test_mask_compress_ps_256( ; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -915,7 +913,7 @@ define <8 x float> @test_maskz_compress_ps_256(<8 x float> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <8 x float> @test_maskz_compress_ps_256( ; CHECK-SAME: <8 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -943,7 +941,7 @@ define <8 x float> @test_compress_ps_256(<8 x float> %data, <8 x float> %data2) ; CHECK-LABEL: define <8 x float> @test_compress_ps_256( ; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -967,9 +965,9 @@ define <4 x i64> @test_mask_compress_q_256(<4 x i64> %data, <4 x i64> %passthru, ; ; CHECK-LABEL: define <4 x i64> @test_mask_compress_q_256( ; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -1002,7 +1000,7 @@ define <4 x i64> @test_maskz_compress_q_256(<4 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x i64> @test_maskz_compress_q_256( ; CHECK-SAME: <4 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -1033,7 +1031,7 @@ define <4 x i64> @test_compress_q_256(<4 x i64> %data, <4 x i64> %data2) #0 { ; CHECK-LABEL: define <4 x i64> @test_compress_q_256( ; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1057,9 +1055,9 @@ define <8 x i32> @test_mask_compress_d_256(<8 x i32> %data, <8 x i32> %passthru, ; ; CHECK-LABEL: define <8 x i32> @test_mask_compress_d_256( ; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -1089,7 +1087,7 @@ define <8 x i32> @test_maskz_compress_d_256(<8 x i32> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <8 x i32> @test_maskz_compress_d_256( ; CHECK-SAME: <8 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -1117,7 +1115,7 @@ define <8 x i32> @test_compress_d_256(<8 x i32> %data, <8 x i32> %data2) #0 { ; CHECK-LABEL: define <8 x i32> @test_compress_d_256( ; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1141,7 +1139,7 @@ define <4 x double> @test_expand_pd_256(<4 x double> %data, <4 x double> %data2) ; CHECK-LABEL: define <4 x double> @test_expand_pd_256( ; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1165,9 +1163,9 @@ define <4 x double> @test_mask_expand_pd_256(<4 x double> %data, <4 x double> %p ; ; CHECK-LABEL: define <4 x double> @test_mask_expand_pd_256( ; CHECK-SAME: <4 x double> [[DATA:%.*]], <4 x double> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -1200,7 +1198,7 @@ define <4 x double> @test_maskz_expand_pd_256(<4 x double> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x double> @test_maskz_expand_pd_256( ; CHECK-SAME: <4 x double> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -1231,7 +1229,7 @@ define <8 x float> @test_expand_ps_256(<8 x float> %data, <8 x float> %data2) #0 ; CHECK-LABEL: define <8 x float> @test_expand_ps_256( ; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1255,9 +1253,9 @@ define <8 x float> @test_mask_expand_ps_256(<8 x float> %data, <8 x float> %pass ; ; CHECK-LABEL: define <8 x float> @test_mask_expand_ps_256( ; CHECK-SAME: <8 x float> [[DATA:%.*]], <8 x float> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -1287,7 +1285,7 @@ define <8 x float> @test_maskz_expand_ps_256(<8 x float> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <8 x float> @test_maskz_expand_ps_256( ; CHECK-SAME: <8 x float> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -1315,7 +1313,7 @@ define <4 x i64> @test_expand_q_256(<4 x i64> %data, <4 x i64> %data2) #0 { ; CHECK-LABEL: define <4 x i64> @test_expand_q_256( ; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1339,9 +1337,9 @@ define <4 x i64> @test_mask_expand_q_256(<4 x i64> %data, <4 x i64> %passthru, i ; ; CHECK-LABEL: define <4 x i64> @test_mask_expand_q_256( ; CHECK-SAME: <4 x i64> [[DATA:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -1374,7 +1372,7 @@ define <4 x i64> @test_maskz_expand_q_256(<4 x i64> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x i64> @test_maskz_expand_q_256( ; CHECK-SAME: <4 x i64> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -1405,7 +1403,7 @@ define <8 x i32> @test_expand_d_256(<8 x i32> %data, <8 x i32> %data2) #0 { ; CHECK-LABEL: define <8 x i32> @test_expand_d_256( ; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[DATA2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1429,9 +1427,9 @@ define <8 x i32> @test_mask_expand_d_256(<8 x i32> %data, <8 x i32> %passthru, i ; ; CHECK-LABEL: define <8 x i32> @test_mask_expand_d_256( ; CHECK-SAME: <8 x i32> [[DATA:%.*]], <8 x i32> [[PASSTHRU:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[TMP5]] to <8 x i1> ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -1461,7 +1459,7 @@ define <8 x i32> @test_maskz_expand_d_256(<8 x i32> %data, i8 %mask) #0 { ; ; CHECK-LABEL: define <8 x i32> @test_maskz_expand_d_256( ; CHECK-SAME: <8 x i32> [[DATA:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[TMP4]] to <8 x i1> @@ -1489,7 +1487,7 @@ define i8 @test_cmpps_256(<8 x float> %a, <8 x float> %b) #0 { ; CHECK-LABEL: define i8 @test_cmpps_256( ; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1516,7 +1514,7 @@ define i8 @test_cmpps_128(<4 x float> %a, <4 x float> %b) #0 { ; CHECK-LABEL: define i8 @test_cmpps_128( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1545,7 +1543,7 @@ define i8 @test_cmppd_256(<4 x double> %a, <4 x double> %b) #0 { ; CHECK-LABEL: define i8 @test_cmppd_256( ; CHECK-SAME: <4 x double> [[A:%.*]], <4 x double> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -1574,7 +1572,7 @@ define i8 @test_cmppd_128(<2 x double> %a, <2 x double> %b) #0 { ; CHECK-LABEL: define i8 @test_cmppd_128( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP5]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -1604,8 +1602,8 @@ define <8 x float> @test_mm512_maskz_max_ps_256(<8 x float> %a0, <8 x float> %a1 ; CHECK-LABEL: define <8 x float> @test_mm512_maskz_max_ps_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0]], <8 x float> [[A1]]) @@ -1632,9 +1630,9 @@ define <8 x float> @test_mm512_mask_max_ps_256(<8 x float> %a0, <8 x float> %a1, ; CHECK-LABEL: define <8 x float> @test_mm512_mask_max_ps_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[SRC:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0]], <8 x float> [[A1]]) @@ -1661,7 +1659,7 @@ define <8 x float> @test_mm512_max_ps_256(<8 x float> %a0, <8 x float> %a1, i8 % ; CHECK-LABEL: define <8 x float> @test_mm512_max_ps_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0]], <8 x float> [[A1]]) @@ -1678,8 +1676,8 @@ define <4 x float> @test_mm512_maskz_max_ps_128(<4 x float> %a0, <4 x float> %a1 ; CHECK-LABEL: define <4 x float> @test_mm512_maskz_max_ps_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0]], <4 x float> [[A1]]) @@ -1709,9 +1707,9 @@ define <4 x float> @test_mm512_mask_max_ps_128(<4 x float> %a0, <4 x float> %a1, ; CHECK-LABEL: define <4 x float> @test_mm512_mask_max_ps_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[SRC:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0]], <4 x float> [[A1]]) @@ -1741,7 +1739,7 @@ define <4 x float> @test_mm512_max_ps_128(<4 x float> %a0, <4 x float> %a1, i8 % ; CHECK-LABEL: define <4 x float> @test_mm512_max_ps_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0]], <4 x float> [[A1]]) @@ -1758,8 +1756,8 @@ define <8 x float> @test_mm512_maskz_min_ps_256(<8 x float> %a0, <8 x float> %a1 ; CHECK-LABEL: define <8 x float> @test_mm512_maskz_min_ps_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0]], <8 x float> [[A1]]) @@ -1786,9 +1784,9 @@ define <8 x float> @test_mm512_mask_min_ps_256(<8 x float> %a0, <8 x float> %a1, ; CHECK-LABEL: define <8 x float> @test_mm512_mask_min_ps_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[SRC:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0]], <8 x float> [[A1]]) @@ -1815,7 +1813,7 @@ define <8 x float> @test_mm512_min_ps_256(<8 x float> %a0, <8 x float> %a1, i8 % ; CHECK-LABEL: define <8 x float> @test_mm512_min_ps_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0]], <8 x float> [[A1]]) @@ -1832,9 +1830,9 @@ define <4 x float> @test_mm512_maskz_min_ps_128(<4 x float> %a0, <4 x float> %a1 ; CHECK-LABEL: define <4 x float> @test_mm512_maskz_min_ps_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP11]], [[TMP12]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0]], <4 x float> [[A1]]) @@ -1867,10 +1865,10 @@ define <4 x float> @test_mm512_mask_min_ps_128(<4 x float> %a0, <4 x float> %a1, ; CHECK-LABEL: define <4 x float> @test_mm512_mask_min_ps_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[SRC:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP8]], [[TMP12]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0]], <4 x float> [[A1]]) @@ -1903,7 +1901,7 @@ define <4 x float> @test_mm512_min_ps_128(<4 x float> %a0, <4 x float> %a1, i8 % ; CHECK-LABEL: define <4 x float> @test_mm512_min_ps_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0]], <4 x float> [[A1]]) @@ -1964,8 +1962,8 @@ define <4 x i32>@test_int_x86_avx512_vpermi2var_d_128(<4 x i32> %x0, <4 x i32> % ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpermi2var_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP8]] to <4 x i2> ; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP6]], <4 x i32> [[X1]], <4 x i32> [[TMP5]]) @@ -1989,9 +1987,9 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermi2var_d_128(<4 x i32> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_vpermi2var_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i32> [[TMP3]] to <4 x i2> ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP8]], <4 x i32> [[X1]], <4 x i32> [[TMP6]]) @@ -2026,8 +2024,8 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermi2var_d_128(<4 x i32> %x0, <4 x i define <4 x i32>@test_int_x86_avx512_vpermt2var_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) #0 { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpermt2var_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP8]] to <4 x i2> @@ -2051,10 +2049,10 @@ define <4 x i32>@test_int_x86_avx512_mask_vpermt2var_d_128(<4 x i32> %x0, <4 x i ; ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_vpermt2var_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i32> [[TMP3]] to <4 x i2> ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP8]], <4 x i32> [[X0]], <4 x i32> [[TMP6]]) @@ -2090,10 +2088,10 @@ define <4 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_128(<4 x i32> %x0, <4 x ; ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_vpermt2var_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP14:%.*]] = trunc <4 x i32> [[TMP3]] to <4 x i2> ; CHECK-NEXT: [[TMP13:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP8]], <4 x i32> [[X0]], <4 x i32> [[TMP9]]) @@ -2131,8 +2129,8 @@ define <8 x i32>@test_int_x86_avx512_vpermi2var_d_256(<8 x i32> %x0, <8 x i32> % ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpermi2var_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[TMP8]] to <8 x i3> ; CHECK-NEXT: [[TMP4:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP6]], <8 x i32> [[X1]], <8 x i32> [[TMP5]]) @@ -2156,9 +2154,9 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermi2var_d_256(<8 x i32> %x0, <8 x i ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_vpermi2var_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <8 x i32> [[TMP3]] to <8 x i3> ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP8]], <8 x i32> [[X1]], <8 x i32> [[TMP6]]) @@ -2190,8 +2188,8 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermi2var_d_256(<8 x i32> %x0, <8 x i define <8 x i32>@test_int_x86_avx512_ask_vpermt2var_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) #0 { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_ask_vpermt2var_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[TMP8]] to <8 x i3> @@ -2215,10 +2213,10 @@ define <8 x i32>@test_int_x86_avx512_mask_vpermt2var_d_256(<8 x i32> %x0, <8 x i ; ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_vpermt2var_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <8 x i32> [[TMP3]] to <8 x i3> ; CHECK-NEXT: [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP8]], <8 x i32> [[X0]], <8 x i32> [[TMP6]]) @@ -2251,10 +2249,10 @@ define <8 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_256(<8 x i32> %x0, <8 x ; ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_vpermt2var_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP14:%.*]] = trunc <8 x i32> [[TMP3]] to <8 x i3> ; CHECK-NEXT: [[TMP13:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP8]], <8 x i32> [[X0]], <8 x i32> [[TMP9]]) @@ -2289,8 +2287,8 @@ define <2 x double>@test_int_x86_avx512_vpermi2var_pd_128(<2 x double> %x0, <2 x ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_vpermi2var_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x double> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[TMP6]] to <2 x i1> ; CHECK-NEXT: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP9]] to <2 x double> @@ -2317,9 +2315,9 @@ define <2 x double>@test_int_x86_avx512_mask_vpermi2var_pd_128(<2 x double> %x0, ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_vpermi2var_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP15:%.*]] = trunc <2 x i64> [[TMP13]] to <2 x i1> ; CHECK-NEXT: [[TMP9:%.*]] = bitcast <2 x i64> [[TMP11]] to <2 x double> @@ -2364,8 +2362,8 @@ define <4 x double>@test_int_x86_avx512_vpermi2var_pd_256(<4 x double> %x0, <4 x ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_vpermi2var_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[TMP6]] to <4 x i2> ; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i64> [[TMP9]] to <4 x double> @@ -2392,9 +2390,9 @@ define <4 x double>@test_int_x86_avx512_mask_vpermi2var_pd_256(<4 x double> %x0, ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_vpermi2var_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i64> [[TMP13]] to <4 x i2> ; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i64> [[TMP11]] to <4 x double> @@ -2439,8 +2437,8 @@ define <4 x float>@test_int_x86_avx512_vpermi2var_ps_128(<4 x float> %x0, <4 x i ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_vpermi2var_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x float> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP6]] to <4 x i2> ; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP9]] to <4 x float> @@ -2467,9 +2465,9 @@ define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128(<4 x float> %x0, < ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_vpermi2var_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP15:%.*]] = trunc <4 x i32> [[TMP13]] to <4 x i2> ; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP11]] to <4 x float> @@ -2512,10 +2510,10 @@ define <4 x float>@test_int_x86_avx512_mask_vpermi2var_ps_128_cast(<4 x float> % ; ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_vpermi2var_ps_128_cast( ; CHECK-SAME: <4 x float> [[X0:%.*]], <2 x i64> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP14:%.*]] = bitcast <2 x i64> [[TMP11]] to <4 x i32> ; CHECK-NEXT: [[X1CAST:%.*]] = bitcast <2 x i64> [[X1]] to <4 x i32> @@ -2563,8 +2561,8 @@ define <8 x float>@test_int_x86_avx512_vpermi2var_ps_256(<8 x float> %x0, <8 x i ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_vpermi2var_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x float> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[TMP6]] to <8 x i3> ; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP9]] to <8 x float> @@ -2591,9 +2589,9 @@ define <8 x float>@test_int_x86_avx512_mask_vpermi2var_ps_256(<8 x float> %x0, < ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_vpermi2var_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP15:%.*]] = trunc <8 x i32> [[TMP13]] to <8 x i3> ; CHECK-NEXT: [[TMP9:%.*]] = bitcast <8 x i32> [[TMP11]] to <8 x float> @@ -2635,8 +2633,8 @@ define <2 x i64>@test_int_x86_avx512_vpermi2var_q_128(<2 x i64> %x0, <2 x i64> % ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_vpermi2var_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[TMP8]] to <2 x i1> ; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP6]], <2 x i64> [[X1]], <2 x i64> [[TMP5]]) @@ -2660,9 +2658,9 @@ define <2 x i64>@test_int_x86_avx512_mask_vpermi2var_q_128(<2 x i64> %x0, <2 x i ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_mask_vpermi2var_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <2 x i64> [[TMP3]] to <2 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP8]], <2 x i64> [[X1]], <2 x i64> [[TMP6]]) @@ -2697,8 +2695,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpermi2var_q_128(<2 x i64> %x0, <2 x i define <2 x i64>@test_int_x86_avx512_vpermt2var_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) #0 { ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_vpermt2var_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[TMP8]] to <2 x i1> @@ -2722,10 +2720,10 @@ define <2 x i64>@test_int_x86_avx512_mask_vpermt2var_q_128(<2 x i64> %x0, <2 x i ; ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_mask_vpermt2var_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <2 x i64> [[TMP3]] to <2 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP8]], <2 x i64> [[X0]], <2 x i64> [[TMP6]]) @@ -2761,10 +2759,10 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_128(<2 x i64> %x0, <2 x ; ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_maskz_vpermt2var_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP14:%.*]] = trunc <2 x i64> [[TMP3]] to <2 x i1> ; CHECK-NEXT: [[TMP13:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP8]], <2 x i64> [[X0]], <2 x i64> [[TMP9]]) @@ -2802,8 +2800,8 @@ define <4 x i64>@test_int_x86_avx512_vpermi2var_q_256(<4 x i64> %x0, <4 x i64> % ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_vpermi2var_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[TMP8]] to <4 x i2> ; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP6]], <4 x i64> [[X1]], <4 x i64> [[TMP5]]) @@ -2827,9 +2825,9 @@ define <4 x i64>@test_int_x86_avx512_mask_vpermi2var_q_256(<4 x i64> %x0, <4 x i ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_vpermi2var_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i2> ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP8]], <4 x i64> [[X1]], <4 x i64> [[TMP6]]) @@ -2864,8 +2862,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpermi2var_q_256(<4 x i64> %x0, <4 x i define <4 x i64>@test_int_x86_avx512_vpermt2var_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) #0 { ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_vpermt2var_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[TMP8]] to <4 x i2> @@ -2889,10 +2887,10 @@ define <4 x i64>@test_int_x86_avx512_mask_vpermt2var_q_256(<4 x i64> %x0, <4 x i ; ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_vpermt2var_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i2> ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP8]], <4 x i64> [[X0]], <4 x i64> [[TMP6]]) @@ -2928,10 +2926,10 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_256(<4 x i64> %x0, <4 x ; ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_maskz_vpermt2var_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP14:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i2> ; CHECK-NEXT: [[TMP13:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP8]], <4 x i64> [[X0]], <4 x i64> [[TMP9]]) @@ -2969,8 +2967,8 @@ define <2 x double>@test_int_x86_avx512_scalef_pd_128(<2 x double> %x0, <2 x dou ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_scalef_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -2998,9 +2996,9 @@ define <2 x double>@test_int_x86_avx512_mask_scalef_pd_128(<2 x double> %x0, <2 ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_scalef_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -3031,8 +3029,8 @@ define <4 x double>@test_int_x86_avx512_scalef_pd_256(<4 x double> %x0, <4 x dou ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_scalef_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -3060,9 +3058,9 @@ define <4 x double>@test_int_x86_avx512_mask_scalef_pd_256(<4 x double> %x0, <4 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_scalef_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -3093,8 +3091,8 @@ define <4 x float>@test_int_x86_avx512_scalef_ps_128(<4 x float> %x0, <4 x float ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_scalef_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3122,9 +3120,9 @@ define <4 x float>@test_int_x86_avx512_mask_scalef_ps_128(<4 x float> %x0, <4 x ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_scalef_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -3155,8 +3153,8 @@ define <8 x float>@test_int_x86_avx512_scalef_ps_256(<8 x float> %x0, <8 x float ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_scalef_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -3184,9 +3182,9 @@ define <8 x float>@test_int_x86_avx512_mask_scalef_ps_256(<8 x float> %x0, <8 x ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_scalef_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -3218,8 +3216,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_128(<2 x i64> %x0, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_qb_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3275,8 +3273,8 @@ define void @test_int_x86_avx512_mask_pmov_qb_mem_128(ptr %ptr, <2 x i64> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qb_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -3314,8 +3312,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_128(<2 x i64> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_qb_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3371,8 +3369,8 @@ define void @test_int_x86_avx512_mask_pmovs_qb_mem_128(ptr %ptr, <2 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qb_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -3410,8 +3408,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_128(<2 x i64> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_qb_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3467,8 +3465,8 @@ define void @test_int_x86_avx512_mask_pmovus_qb_mem_128(ptr %ptr, <2 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qb_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -3506,8 +3504,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_256(<4 x i64> %x0, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_qb_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -3563,8 +3561,8 @@ define void @test_int_x86_avx512_mask_pmov_qb_mem_256(ptr %ptr, <4 x i64> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qb_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -3602,8 +3600,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_256(<4 x i64> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_qb_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -3659,8 +3657,8 @@ define void @test_int_x86_avx512_mask_pmovs_qb_mem_256(ptr %ptr, <4 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qb_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -3698,8 +3696,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_256(<4 x i64> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_qb_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -3755,8 +3753,8 @@ define void @test_int_x86_avx512_mask_pmovus_qb_mem_256(ptr %ptr, <4 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qb_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -3794,8 +3792,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_128(<2 x i64> %x0, <8 x i16> % ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_qw_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3851,8 +3849,8 @@ define void @test_int_x86_avx512_mask_pmov_qw_mem_128(ptr %ptr, <2 x i64> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qw_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -3890,8 +3888,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_128(<2 x i64> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_qw_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -3947,8 +3945,8 @@ define void @test_int_x86_avx512_mask_pmovs_qw_mem_128(ptr %ptr, <2 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qw_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -3986,8 +3984,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_128(<2 x i64> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_qw_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4043,8 +4041,8 @@ define void @test_int_x86_avx512_mask_pmovus_qw_mem_128(ptr %ptr, <2 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qw_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -4082,8 +4080,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_256(<4 x i64> %x0, <8 x i16> % ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_qw_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -4139,8 +4137,8 @@ define void @test_int_x86_avx512_mask_pmov_qw_mem_256(ptr %ptr, <4 x i64> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qw_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -4178,8 +4176,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_256(<4 x i64> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_qw_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -4235,8 +4233,8 @@ define void @test_int_x86_avx512_mask_pmovs_qw_mem_256(ptr %ptr, <4 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qw_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -4274,8 +4272,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_256(<4 x i64> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_qw_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -4331,8 +4329,8 @@ define void @test_int_x86_avx512_mask_pmovus_qw_mem_256(ptr %ptr, <4 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qw_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -4370,8 +4368,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_128(<2 x i64> %x0, <4 x i32> % ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmov_qd_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4427,8 +4425,8 @@ define void @test_int_x86_avx512_mask_pmov_qd_mem_128(ptr %ptr, <2 x i64> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qd_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -4466,8 +4464,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_128(<2 x i64> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovs_qd_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4523,8 +4521,8 @@ define void @test_int_x86_avx512_mask_pmovs_qd_mem_128(ptr %ptr, <2 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qd_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -4562,8 +4560,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_128(<2 x i64> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovus_qd_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -4619,8 +4617,8 @@ define void @test_int_x86_avx512_mask_pmovus_qd_mem_128(ptr %ptr, <2 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qd_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <2 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -4670,8 +4668,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmov_qd_256(<4 x i64> %x0, <4 x i32> % ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmov_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <4 x i64> [[TMP6]] to <4 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[X0]] to <4 x i32> @@ -4700,7 +4698,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_pmov_qd_256(<4 x i64> %x0, i8 %x2) #0 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pmov_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[X0]] to <4 x i32> @@ -4731,8 +4729,8 @@ define void @test_int_x86_avx512_mask_pmov_qd_mem_256(ptr %ptr, <4 x i64> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_qd_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -4769,7 +4767,7 @@ define <4 x i32>@test_int_x86_avx512_pmovs_qd_256(<4 x i64> %x0, <4 x i32> %x1) ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_pmovs_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -4794,8 +4792,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovs_qd_256(<4 x i64> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovs_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -4822,7 +4820,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_pmovs_qd_256(<4 x i64> %x0, i8 %x2) # ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pmovs_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -4848,8 +4846,8 @@ define void @test_int_x86_avx512_mask_pmovs_qd_mem_256(ptr %ptr, <4 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_qd_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -4886,7 +4884,7 @@ define <4 x i32>@test_int_x86_avx512_pmovus_qd_256(<4 x i64> %x0, <4 x i32> %x1) ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_pmovus_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -4911,8 +4909,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pmovus_qd_256(<4 x i64> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pmovus_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -4939,7 +4937,7 @@ define <4 x i32>@test_int_x86_avx512_maskz_pmovus_qd_256(<4 x i64> %x0, i8 %x2) ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pmovus_qd_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -4965,8 +4963,8 @@ define void @test_int_x86_avx512_mask_pmovus_qd_mem_256(ptr %ptr, <4 x i64> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_qd_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i64> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 @@ -5004,8 +5002,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_128(<4 x i32> %x0, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_db_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5061,8 +5059,8 @@ define void @test_int_x86_avx512_mask_pmov_db_mem_128(ptr %ptr, <4 x i32> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_db_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -5100,8 +5098,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_128(<4 x i32> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_db_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5157,8 +5155,8 @@ define void @test_int_x86_avx512_mask_pmovs_db_mem_128(ptr %ptr, <4 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_db_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -5196,8 +5194,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_128(<4 x i32> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_db_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5253,8 +5251,8 @@ define void @test_int_x86_avx512_mask_pmovus_db_mem_128(ptr %ptr, <4 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_db_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -5292,8 +5290,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmov_db_256(<8 x i32> %x0, <16 x i8> % ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmov_db_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -5349,8 +5347,8 @@ define void @test_int_x86_avx512_mask_pmov_db_mem_256(ptr %ptr, <8 x i32> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_db_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 @@ -5388,8 +5386,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_256(<8 x i32> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovs_db_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -5445,8 +5443,8 @@ define void @test_int_x86_avx512_mask_pmovs_db_mem_256(ptr %ptr, <8 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_db_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 @@ -5484,8 +5482,8 @@ define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_256(<8 x i32> %x0, <16 x i8> ; CHECK-LABEL: define <16 x i8> @test_int_x86_avx512_mask_pmovus_db_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -5541,8 +5539,8 @@ define void @test_int_x86_avx512_mask_pmovus_db_mem_256(ptr %ptr, <8 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_db_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 @@ -5580,8 +5578,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_128(<4 x i32> %x0, <8 x i16> % ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_dw_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5637,8 +5635,8 @@ define void @test_int_x86_avx512_mask_pmov_dw_mem_128(ptr %ptr, <4 x i32> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_dw_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -5676,8 +5674,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_128(<4 x i32> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_dw_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5733,8 +5731,8 @@ define void @test_int_x86_avx512_mask_pmovs_dw_mem_128(ptr %ptr, <4 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_dw_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -5772,8 +5770,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_128(<4 x i32> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_dw_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -5829,8 +5827,8 @@ define void @test_int_x86_avx512_mask_pmovus_dw_mem_128(ptr %ptr, <4 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_dw_mem_128( ; CHECK-SAME: ptr [[PTR:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -5868,8 +5866,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmov_dw_256(<8 x i32> %x0, <8 x i16> % ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmov_dw_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -5925,8 +5923,8 @@ define void @test_int_x86_avx512_mask_pmov_dw_mem_256(ptr %ptr, <8 x i32> %x1, i ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmov_dw_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 @@ -5964,8 +5962,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovs_dw_256(<8 x i32> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovs_dw_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -6021,8 +6019,8 @@ define void @test_int_x86_avx512_mask_pmovs_dw_mem_256(ptr %ptr, <8 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovs_dw_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 @@ -6060,8 +6058,8 @@ define <8 x i16>@test_int_x86_avx512_mask_pmovus_dw_256(<8 x i32> %x0, <8 x i16> ; CHECK-LABEL: define <8 x i16> @test_int_x86_avx512_mask_pmovus_dw_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i16> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -6117,8 +6115,8 @@ define void @test_int_x86_avx512_mask_pmovus_dw_mem_256(ptr %ptr, <8 x i32> %x1, ; CHECK-LABEL: define void @test_int_x86_avx512_mask_pmovus_dw_mem_256( ; CHECK-SAME: ptr [[PTR:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 @@ -6156,8 +6154,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128(<2 x double> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2dq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6197,7 +6195,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2dq_128_zext(<2 x double> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2dq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6224,8 +6222,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_128_zext(<2 x double> %x0, < ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2dq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6255,7 +6253,7 @@ define <4 x float>@test_int_x86_avx512_cvt_pd2ps(<2 x double> %x0, <4 x float> % ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_cvt_pd2ps( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6280,8 +6278,8 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps(<2 x double> %x0, <4 x flo ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_cvt_pd2ps( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6307,7 +6305,7 @@ define <4 x float>@test_int_x86_avx512_cvt_pd2ps_zext(<2 x double> %x0, <4 x flo ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_cvt_pd2ps_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6334,8 +6332,8 @@ define <4 x float>@test_int_x86_avx512_mask_cvt_pd2ps_zext(<2 x double> %x0, <4 ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_cvt_pd2ps_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x float> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6365,7 +6363,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2udq_128(<2 x double> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2udq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6390,8 +6388,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128(<2 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2udq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6417,7 +6415,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2udq_128_zext(<2 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2udq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6444,8 +6442,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_128_zext(<2 x double> %x0, ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2udq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6475,7 +6473,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_pd2udq_256(<4 x double> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_pd2udq_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -6500,8 +6498,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_256(<4 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_pd2udq_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -6529,7 +6527,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_ps2dq_128(<4 x float> %x0, <4 x i32> %x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_ps2dq_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6554,8 +6552,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_128(<4 x float> %x0, <4 x i3 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_ps2dq_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6583,7 +6581,7 @@ define <8 x i32>@test_int_x86_avx512_cvt_ps2dq_256(<8 x float> %x0, <8 x i32> %x ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_cvt_ps2dq_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -6608,8 +6606,8 @@ define <8 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_256(<8 x float> %x0, <8 x i3 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvt_ps2dq_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -6637,7 +6635,7 @@ define <4 x i32>@test_int_x86_avx512_cvt_ps2udq_128(<4 x float> %x0, <4 x i32> % ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvt_ps2udq_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6662,8 +6660,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_128(<4 x float> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvt_ps2udq_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6691,7 +6689,7 @@ define <8 x i32>@test_int_x86_avx512_cvt_ps2udq_256(<8 x float> %x0, <8 x i32> % ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_cvt_ps2udq_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -6716,8 +6714,8 @@ define <8 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_256(<8 x float> %x0, <8 x i ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvt_ps2udq_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -6745,7 +6743,7 @@ define <4 x i32>@test_int_x86_avx512_ask_cvtt_pd2dq_128(<2 x double> %x0, <4 x i ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_ask_cvtt_pd2dq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6770,8 +6768,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128(<2 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2dq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6797,7 +6795,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2dq_128_zext(<2 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2dq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6824,8 +6822,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_128_zext(<2 x double> %x0, ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2dq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6855,7 +6853,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2udq_128(<2 x double> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2udq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6880,8 +6878,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128(<2 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2udq_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6907,7 +6905,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2udq_128_zext(<2 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2udq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -6934,8 +6932,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_128_zext(<2 x double> %x0, ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2udq_128_zext( ; CHECK-SAME: <2 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -6965,7 +6963,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_pd2udq_256(<4 x double> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_pd2udq_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -6990,8 +6988,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_256(<4 x double> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_pd2udq_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7019,7 +7017,7 @@ define <4 x i32>@test_int_x86_avx512_cvtt_ps2udq_128(<4 x float> %x0, <4 x i32> ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_cvtt_ps2udq_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -7044,8 +7042,8 @@ define <4 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_128(<4 x float> %x0, <4 x ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_cvtt_ps2udq_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7073,7 +7071,7 @@ define <8 x i32>@test_int_x86_avx512_cvtt_ps2udq_256(<8 x float> %x0, <8 x i32> ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_cvtt_ps2udq_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -7098,8 +7096,8 @@ define <8 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_256(<8 x float> %x0, <8 x ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_cvtt_ps2udq_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x i32> [[X1:%.*]], i8 [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7127,36 +7125,29 @@ define <2 x double>@test_int_x86_avx512_mask_rndscale_pd_128(<2 x double> %x0, < ; ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_rndscale_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[X3]] to i2 +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i2 [[TMP4]] to <2 x i1> +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <2 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = sext <2 x i1> [[TMP6]] to <2 x i64> +; CHECK-NEXT: [[TMP8:%.*]] = select <2 x i1> [[TMP5]], <2 x i64> [[TMP7]], <2 x i64> [[TMP2]] ; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]] -; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 [[_MSCMP2]], label %[[BB9:.*]], label %[[BB10:.*]], !prof [[PROF1]] +; CHECK: [[BB9]]: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] ; CHECK-NEXT: unreachable -; CHECK: [[BB7]]: -; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx512.mask.rndscale.pd.128(<2 x double> [[X0]], i32 4, <2 x double> [[X2]], i8 [[X3]]) -; CHECK-NEXT: [[TMP8:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 -; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP8]], 0 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 -; CHECK-NEXT: [[_MSCMP5:%.*]] = icmp ne i128 [[TMP9]], 0 -; CHECK-NEXT: [[_MSOR6:%.*]] = or i1 [[_MSCMP4]], [[_MSCMP5]] -; CHECK-NEXT: br i1 [[_MSOR6]], label %[[BB10:.*]], label %[[BB11:.*]], !prof [[PROF1]] ; CHECK: [[BB10]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] -; CHECK-NEXT: unreachable -; CHECK: [[BB11]]: +; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx512.mask.rndscale.pd.128(<2 x double> [[X0]], i32 4, <2 x double> [[X2]], i8 [[X3]]) +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <2 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = sext <2 x i1> [[TMP11]] to <2 x i64> +; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> splat (i1 true), <2 x i64> [[TMP12]], <2 x i64> [[TMP2]] ; CHECK-NEXT: [[RES1:%.*]] = call <2 x double> @llvm.x86.avx512.mask.rndscale.pd.128(<2 x double> [[X0]], i32 88, <2 x double> [[X2]], i8 -1) +; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP8]], [[TMP13]] ; CHECK-NEXT: [[RES2:%.*]] = fadd <2 x double> [[RES]], [[RES1]] -; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <2 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <2 x double> [[RES2]] ; %res = call <2 x double> @llvm.x86.avx512.mask.rndscale.pd.128(<2 x double> %x0, i32 4, <2 x double> %x2, i8 %x3) @@ -7171,36 +7162,29 @@ define <4 x double>@test_int_x86_avx512_mask_rndscale_pd_256(<4 x double> %x0, < ; ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_rndscale_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP5]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[X3]] to i4 +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <4 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = sext <4 x i1> [[TMP6]] to <4 x i64> +; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[TMP2]] ; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]] -; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 [[_MSCMP2]], label %[[BB9:.*]], label %[[BB10:.*]], !prof [[PROF1]] +; CHECK: [[BB9]]: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] ; CHECK-NEXT: unreachable -; CHECK: [[BB7]]: -; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx512.mask.rndscale.pd.256(<4 x double> [[X0]], i32 4, <4 x double> [[X2]], i8 [[X3]]) -; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 -; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP8]], 0 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i64> [[TMP2]] to i256 -; CHECK-NEXT: [[_MSCMP5:%.*]] = icmp ne i256 [[TMP9]], 0 -; CHECK-NEXT: [[_MSOR6:%.*]] = or i1 [[_MSCMP4]], [[_MSCMP5]] -; CHECK-NEXT: br i1 [[_MSOR6]], label %[[BB10:.*]], label %[[BB11:.*]], !prof [[PROF1]] ; CHECK: [[BB10]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] -; CHECK-NEXT: unreachable -; CHECK: [[BB11]]: +; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx512.mask.rndscale.pd.256(<4 x double> [[X0]], i32 4, <4 x double> [[X2]], i8 [[X3]]) +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <4 x i64> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i1> [[TMP11]] to <4 x i64> +; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> splat (i1 true), <4 x i64> [[TMP12]], <4 x i64> [[TMP2]] ; CHECK-NEXT: [[RES1:%.*]] = call <4 x double> @llvm.x86.avx512.mask.rndscale.pd.256(<4 x double> [[X0]], i32 88, <4 x double> [[X2]], i8 -1) +; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP8]], [[TMP13]] ; CHECK-NEXT: [[RES2:%.*]] = fadd <4 x double> [[RES]], [[RES1]] -; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <4 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x double> [[RES2]] ; %res = call <4 x double> @llvm.x86.avx512.mask.rndscale.pd.256(<4 x double> %x0, i32 4, <4 x double> %x2, i8 %x3) @@ -7215,36 +7199,29 @@ define <4 x float>@test_int_x86_avx512_mask_rndscale_ps_128(<4 x float> %x0, <4 ; ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_rndscale_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[X3]] to i4 +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = sext <4 x i1> [[TMP6]] to <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP7]], <4 x i32> [[TMP2]] ; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]] -; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 [[_MSCMP2]], label %[[BB9:.*]], label %[[BB10:.*]], !prof [[PROF1]] +; CHECK: [[BB9]]: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] ; CHECK-NEXT: unreachable -; CHECK: [[BB7]]: -; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx512.mask.rndscale.ps.128(<4 x float> [[X0]], i32 88, <4 x float> [[X2]], i8 [[X3]]) -; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 -; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP8]], 0 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 -; CHECK-NEXT: [[_MSCMP5:%.*]] = icmp ne i128 [[TMP9]], 0 -; CHECK-NEXT: [[_MSOR6:%.*]] = or i1 [[_MSCMP4]], [[_MSCMP5]] -; CHECK-NEXT: br i1 [[_MSOR6]], label %[[BB10:.*]], label %[[BB11:.*]], !prof [[PROF1]] ; CHECK: [[BB10]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] -; CHECK-NEXT: unreachable -; CHECK: [[BB11]]: +; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx512.mask.rndscale.ps.128(<4 x float> [[X0]], i32 88, <4 x float> [[X2]], i8 [[X3]]) +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i1> [[TMP11]] to <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> splat (i1 true), <4 x i32> [[TMP12]], <4 x i32> [[TMP2]] ; CHECK-NEXT: [[RES1:%.*]] = call <4 x float> @llvm.x86.avx512.mask.rndscale.ps.128(<4 x float> [[X0]], i32 4, <4 x float> [[X2]], i8 -1) +; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP8]], [[TMP13]] ; CHECK-NEXT: [[RES2:%.*]] = fadd <4 x float> [[RES]], [[RES1]] -; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x float> [[RES2]] ; %res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ps.128(<4 x float> %x0, i32 88, <4 x float> %x2, i8 %x3) @@ -7259,36 +7236,28 @@ define <8 x float>@test_int_x86_avx512_mask_rndscale_ps_256(<8 x float> %x0, <8 ; ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_rndscale_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() -; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 -; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 -; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP5]], 0 -; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[X3]] to <8 x i1> +; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP4]], <8 x i32> [[TMP6]], <8 x i32> [[TMP2]] ; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP3]], 0 -; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]] -; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] -; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 [[_MSCMP2]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]] +; CHECK: [[BB8]]: ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] ; CHECK-NEXT: unreachable -; CHECK: [[BB7]]: +; CHECK: [[BB9]]: ; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx512.mask.rndscale.ps.256(<8 x float> [[X0]], i32 5, <8 x float> [[X2]], i8 [[X3]]) -; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 -; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP8]], 0 -; CHECK-NEXT: [[TMP9:%.*]] = bitcast <8 x i32> [[TMP2]] to i256 -; CHECK-NEXT: [[_MSCMP5:%.*]] = icmp ne i256 [[TMP9]], 0 -; CHECK-NEXT: [[_MSOR6:%.*]] = or i1 [[_MSCMP4]], [[_MSCMP5]] -; CHECK-NEXT: br i1 [[_MSOR6]], label %[[BB10:.*]], label %[[BB11:.*]], !prof [[PROF1]] -; CHECK: [[BB10]]: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]] -; CHECK-NEXT: unreachable -; CHECK: [[BB11]]: +; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[TMP11:%.*]] = sext <8 x i1> [[TMP10]] to <8 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = select <8 x i1> splat (i1 true), <8 x i32> [[TMP11]], <8 x i32> [[TMP2]] ; CHECK-NEXT: [[RES1:%.*]] = call <8 x float> @llvm.x86.avx512.mask.rndscale.ps.256(<8 x float> [[X0]], i32 66, <8 x float> [[X2]], i8 -1) +; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP12]] ; CHECK-NEXT: [[RES2:%.*]] = fadd <8 x float> [[RES]], [[RES1]] -; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x float> [[RES2]] ; %res = call <8 x float> @llvm.x86.avx512.mask.rndscale.ps.256(<8 x float> %x0, i32 5, <8 x float> %x2, i8 %x3) @@ -7304,8 +7273,8 @@ define <2 x double>@test_int_x86_avx512_mask_getmant_pd_128(<2 x double> %x0, <2 ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_getmant_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7361,8 +7330,8 @@ define <4 x double>@test_int_x86_avx512_mask_getmant_pd_256(<4 x double> %x0, <4 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_getmant_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7405,8 +7374,8 @@ define <4 x float>@test_int_x86_avx512_mask_getmant_ps_128(<4 x float> %x0, <4 x ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_getmant_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7449,8 +7418,8 @@ define <8 x float>@test_int_x86_avx512_mask_getmant_ps_256(<8 x float> %x0, <8 x ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_getmant_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7492,8 +7461,8 @@ define <4 x i32>@test_int_x86_avx512_pternlog_d_128(<4 x i32> %x0, <4 x i32> %x1 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_pternlog_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP7]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7521,9 +7490,9 @@ define <4 x i32>@test_int_x86_avx512_mask_pternlog_d_128(<4 x i32> %x0, <4 x i32 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_mask_pternlog_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP8]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP12]], 0 @@ -7566,9 +7535,9 @@ define <4 x i32>@test_int_x86_avx512_maskz_pternlog_d_128(<4 x i32> %x0, <4 x i3 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_maskz_pternlog_d_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[TMP8]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP13]], 0 @@ -7610,8 +7579,8 @@ define <8 x i32>@test_int_x86_avx512_pternlog_d_256(<8 x i32> %x0, <8 x i32> %x1 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_pternlog_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP7]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7639,9 +7608,9 @@ define <8 x i32>@test_int_x86_avx512_mask_pternlog_d_256(<8 x i32> %x0, <8 x i32 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_mask_pternlog_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i32> [[TMP8]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP12]], 0 @@ -7681,9 +7650,9 @@ define <8 x i32>@test_int_x86_avx512_maskz_pternlog_d_256(<8 x i32> %x0, <8 x i3 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_maskz_pternlog_d_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x i32> [[TMP8]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP13]], 0 @@ -7722,8 +7691,8 @@ define <2 x i64>@test_int_x86_avx512_pternlog_q_128(<2 x i64> %x0, <2 x i64> %x1 ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_pternlog_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP7]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0 @@ -7751,9 +7720,9 @@ define <2 x i64>@test_int_x86_avx512_mask_pternlog_q_128(<2 x i64> %x0, <2 x i64 ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_mask_pternlog_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <2 x i64> [[TMP8]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP12]], 0 @@ -7794,9 +7763,9 @@ define <2 x i64>@test_int_x86_avx512_maskz_pternlog_q_128(<2 x i64> %x0, <2 x i6 ; CHECK-LABEL: define <2 x i64> @test_int_x86_avx512_maskz_pternlog_q_128( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = bitcast <2 x i64> [[TMP8]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP13]], 0 @@ -7838,8 +7807,8 @@ define <4 x i64>@test_int_x86_avx512_pternlog_q_256(<4 x i64> %x0, <4 x i64> %x1 ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_pternlog_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP7]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0 @@ -7867,9 +7836,9 @@ define <4 x i64>@test_int_x86_avx512_mask_pternlog_q_256(<4 x i64> %x0, <4 x i64 ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_pternlog_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i64> [[TMP8]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP12]], 0 @@ -7910,9 +7879,9 @@ define <4 x i64>@test_int_x86_avx512_maskz_pternlog_q_256(<4 x i64> %x0, <4 x i6 ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_maskz_pternlog_q_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i64> [[TMP8]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP13]], 0 @@ -7953,8 +7922,8 @@ define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0, i8 %mask, <8 x i16> %s ; CHECK-LABEL: define <8 x i16> @test_x86_vcvtps2ph_128( ; CHECK-SAME: <4 x float> [[A0:%.*]], i8 [[MASK:%.*]], <8 x i16> [[SRC:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> zeroinitializer, <8 x i32> ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP4]], zeroinitializer @@ -8011,8 +7980,8 @@ define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0, i8 %mask, <8 x i16> %s ; CHECK-LABEL: define <8 x i16> @test_x86_vcvtps2ph_256( ; CHECK-SAME: <8 x float> [[A0:%.*]], i8 [[MASK:%.*]], <8 x i16> [[SRC:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i16> @@ -8077,7 +8046,7 @@ define <8 x float> @test_rsqrt_ps_256_rrkz(<8 x float> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <8 x float> @test_rsqrt_ps_256_rrkz( ; CHECK-SAME: <8 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -8102,9 +8071,9 @@ define <8 x float> @test_rsqrt_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 % ; ; CHECK-LABEL: define <8 x float> @test_rsqrt_ps_256_rrk( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer @@ -8144,7 +8113,7 @@ define <4 x float> @test_rsqrt_ps_128_rrkz(<4 x float> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x float> @test_rsqrt_ps_128_rrkz( ; CHECK-SAME: <4 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4 @@ -8170,9 +8139,9 @@ define <4 x float> @test_rsqrt_ps_128_rrk(<4 x float> %a0, <4 x float> %a1, i8 % ; ; CHECK-LABEL: define <4 x float> @test_rsqrt_ps_128_rrk( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> @@ -8216,7 +8185,7 @@ define <8 x float> @test_rcp_ps_256_rrkz(<8 x float> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <8 x float> @test_rcp_ps_256_rrkz( ; CHECK-SAME: <8 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 [[MASK]] to <8 x i1> @@ -8241,9 +8210,9 @@ define <8 x float> @test_rcp_ps_256_rrk(<8 x float> %a0, <8 x float> %a1, i8 %ma ; ; CHECK-LABEL: define <8 x float> @test_rcp_ps_256_rrk( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8 [[MASK]] to <8 x i1> ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer @@ -8283,7 +8252,7 @@ define <4 x float> @test_rcp_ps_128_rrkz(<4 x float> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x float> @test_rcp_ps_128_rrkz( ; CHECK-SAME: <4 x float> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4 @@ -8309,9 +8278,9 @@ define <4 x float> @test_rcp_ps_128_rrk(<4 x float> %a0, <4 x float> %a1, i8 %ma ; ; CHECK-LABEL: define <4 x float> @test_rcp_ps_128_rrk( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> @@ -8355,7 +8324,7 @@ define <4 x double> @test_rsqrt_pd_256_rrkz(<4 x double> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x double> @test_rsqrt_pd_256_rrkz( ; CHECK-SAME: <4 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4 @@ -8381,9 +8350,9 @@ define <4 x double> @test_rsqrt_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i ; ; CHECK-LABEL: define <4 x double> @test_rsqrt_pd_256_rrk( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> @@ -8424,7 +8393,7 @@ define <2 x double> @test_rsqrt_pd_128_rrkz(<2 x double> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <2 x double> @test_rsqrt_pd_128_rrkz( ; CHECK-SAME: <2 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i2 @@ -8450,9 +8419,9 @@ define <2 x double> @test_rsqrt_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i ; ; CHECK-LABEL: define <2 x double> @test_rsqrt_pd_128_rrk( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i2 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i2 [[TMP4]] to <2 x i1> @@ -8496,7 +8465,7 @@ define <4 x double> @test_rcp_pd_256_rrkz(<4 x double> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <4 x double> @test_rcp_pd_256_rrkz( ; CHECK-SAME: <4 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i4 @@ -8522,9 +8491,9 @@ define <4 x double> @test_rcp_pd_256_rrk(<4 x double> %a0, <4 x double> %a1, i8 ; ; CHECK-LABEL: define <4 x double> @test_rcp_pd_256_rrk( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i4 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1> @@ -8565,7 +8534,7 @@ define <2 x double> @test_rcp_pd_128_rrkz(<2 x double> %a0, i8 %mask) #0 { ; ; CHECK-LABEL: define <2 x double> @test_rcp_pd_128_rrkz( ; CHECK-SAME: <2 x double> [[A0:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc i8 [[MASK]] to i2 @@ -8591,9 +8560,9 @@ define <2 x double> @test_rcp_pd_128_rrk(<2 x double> %a0, <2 x double> %a1, i8 ; ; CHECK-LABEL: define <2 x double> @test_rcp_pd_128_rrk( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = trunc i8 [[MASK]] to i2 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i2 [[TMP4]] to <2 x i1> @@ -8623,7 +8592,7 @@ define <4 x double>@test_int_x86_avx512_permvar_df_256(<4 x double> %x0, <4 x i6 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_permvar_df_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP5]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0 @@ -8648,9 +8617,9 @@ define <4 x double>@test_int_x86_avx512_mask_permvar_df_256(<4 x double> %x0, <4 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_permvar_df_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP14:%.*]] = bitcast <4 x i64> [[TMP8]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP14]], 0 @@ -8690,8 +8659,8 @@ define <4 x double>@test_int_x86_avx512_maskz_permvar_df_256(<4 x double> %x0, < ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_permvar_df_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x i64> [[X1:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i64> [[TMP10]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP13]], 0 @@ -8731,7 +8700,7 @@ define <4 x i64>@test_int_x86_avx512_permvar_di_256(<4 x i64> %x0, <4 x i64> %x1 ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_permvar_di_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> [[X0]], <4 x i64> [[X1]]) @@ -8747,9 +8716,9 @@ define <4 x i64>@test_int_x86_avx512_mask_permvar_di_256(<4 x i64> %x0, <4 x i64 ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_mask_permvar_di_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP5]], [[TMP9]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> [[X0]], <4 x i64> [[X1]]) @@ -8778,8 +8747,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_permvar_di_256(<4 x i64> %x0, <4 x i6 ; CHECK-LABEL: define <4 x i64> @test_int_x86_avx512_maskz_permvar_di_256( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP8]], [[TMP9]] ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.permvar.di.256(<4 x i64> [[X0]], <4 x i64> [[X1]]) @@ -8810,9 +8779,9 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, < ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask_fixupimm_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -8877,9 +8846,9 @@ define <2 x double>@test_int_x86_avx512_maskz_fixupimm_pd_128(<2 x double> %x0, ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_maskz_fixupimm_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -8929,9 +8898,9 @@ define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, < ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask_fixupimm_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -8996,9 +8965,9 @@ define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0, ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_fixupimm_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x i64> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -9063,9 +9032,9 @@ define <4 x float>@test_int_x86_avx512_mask_fixupimm_ps_128(<4 x float> %x0, <4 ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask_fixupimm_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -9130,9 +9099,9 @@ define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ps_128(<4 x float> %x0, <4 ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_fixupimm_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0 @@ -9197,9 +9166,9 @@ define <8 x float>@test_int_x86_avx512_mask_fixupimm_ps_256(<8 x float> %x0, <8 ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask_fixupimm_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -9264,9 +9233,9 @@ define <8 x float>@test_int_x86_avx512_maskz_fixupimm_ps_256(<8 x float> %x0, <8 ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_fixupimm_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x i32> [[X2:%.*]], i8 [[X4:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0 @@ -9328,7 +9297,7 @@ define <2 x i64> @test_x86_avx512_psra_q_128(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_psra_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -9349,10 +9318,10 @@ define <2 x i64> @test_x86_avx512_mask_psra_q_128(<2 x i64> %a0, <2 x i64> %a1, ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_mask_psra_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP7:%.*]] = trunc i128 [[TMP6]] to i64 @@ -9389,9 +9358,9 @@ define <2 x i64> @test_x86_avx512_maskz_psra_q_128(<2 x i64> %a0, <2 x i64> %a1, ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_maskz_psra_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP17:%.*]] = trunc i128 [[TMP16]] to i64 @@ -9430,7 +9399,7 @@ define <4 x i64> @test_x86_avx512_psra_q_256(<4 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_psra_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -9451,10 +9420,10 @@ define <4 x i64> @test_x86_avx512_mask_psra_q_256(<4 x i64> %a0, <2 x i64> %a1, ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_mask_psra_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP7:%.*]] = trunc i128 [[TMP6]] to i64 @@ -9491,9 +9460,9 @@ define <4 x i64> @test_x86_avx512_maskz_psra_q_256(<4 x i64> %a0, <2 x i64> %a1, ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_maskz_psra_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP16:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP17:%.*]] = trunc i128 [[TMP16]] to i64 @@ -9547,9 +9516,9 @@ define <2 x i64> @test_x86_avx512_mask_psrai_q_128(<2 x i64> %a0, <2 x i64> %pas ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_mask_psrai_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx512.psrai.q.128(<2 x i64> [[TMP5]], i32 7) ; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer @@ -9581,8 +9550,8 @@ define <2 x i64> @test_x86_avx512_maskz_psrai_q_128(<2 x i64> %a0, i8 %mask, i8 ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_maskz_psrai_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx512.psrai.q.128(<2 x i64> [[TMP9]], i32 7) ; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer @@ -9631,9 +9600,9 @@ define <4 x i64> @test_x86_avx512_mask_psrai_q_256(<4 x i64> %a0, <4 x i64> %pas ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_mask_psrai_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[PASSTHRU:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.psrai.q.256(<4 x i64> [[TMP5]], i32 7) ; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer @@ -9665,8 +9634,8 @@ define <4 x i64> @test_x86_avx512_maskz_psrai_q_256(<4 x i64> %a0, i8 %mask, i8 ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_maskz_psrai_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx512.psrai.q.256(<4 x i64> [[TMP9]], i32 7) ; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer @@ -9699,7 +9668,7 @@ define <2 x i64> @test_x86_avx512_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_psrav_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> @@ -9718,10 +9687,10 @@ define <2 x i64> @test_x86_avx512_mask_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1, ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_mask_psrav_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = sext <2 x i1> [[TMP6]] to <2 x i64> @@ -9756,9 +9725,9 @@ define <2 x i64> @test_x86_avx512_maskz_psrav_q_128(<2 x i64> %a0, <2 x i64> %a1 ; CHECK-LABEL: define <2 x i64> @test_x86_avx512_maskz_psrav_q_128( ; CHECK-SAME: <2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = sext <2 x i1> [[TMP13]] to <2 x i64> @@ -9794,7 +9763,7 @@ define <4 x i64> @test_x86_avx512_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_psrav_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64> @@ -9813,10 +9782,10 @@ define <4 x i64> @test_x86_avx512_mask_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1, ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_mask_psrav_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]], <4 x i64> [[A2:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = sext <4 x i1> [[TMP6]] to <4 x i64> @@ -9851,9 +9820,9 @@ define <4 x i64> @test_x86_avx512_maskz_psrav_q_256(<4 x i64> %a0, <4 x i64> %a1 ; CHECK-LABEL: define <4 x i64> @test_x86_avx512_maskz_psrav_q_256( ; CHECK-SAME: <4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]], i8 [[MASK:%.*]], i8 [[MASK2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = sext <4 x i1> [[TMP13]] to <4 x i64> @@ -9889,8 +9858,8 @@ define <8 x float> @test_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x flo ; CHECK-LABEL: define <8 x float> @test_vfmadd256_ps( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP4]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -9907,9 +9876,9 @@ define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 ; CHECK-LABEL: define <8 x float> @test_mask_vfmadd256_ps( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -9937,8 +9906,8 @@ define <4 x float> @test_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x flo ; CHECK-LABEL: define <4 x float> @test_vfmadd128_ps( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP4]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -9955,9 +9924,9 @@ define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -9988,8 +9957,8 @@ define <4 x double> @test_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x dou ; CHECK-LABEL: define <4 x double> @test_fmadd256_pd( ; CHECK-SAME: <4 x double> [[A:%.*]], <4 x double> [[B:%.*]], <4 x double> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP4]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]] @@ -10006,9 +9975,9 @@ define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 ; CHECK-LABEL: define <4 x double> @test_mask_fmadd256_pd( ; CHECK-SAME: <4 x double> [[A:%.*]], <4 x double> [[B:%.*]], <4 x double> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]] @@ -10039,8 +10008,8 @@ define <2 x double> @test_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x dou ; CHECK-LABEL: define <2 x double> @test_fmadd128_pd( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP4]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]] @@ -10057,9 +10026,9 @@ define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 ; CHECK-LABEL: define <2 x double> @test_mask_fmadd128_pd( ; CHECK-SAME: <2 x double> [[A:%.*]], <2 x double> [[B:%.*]], <2 x double> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]] @@ -10091,9 +10060,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2 ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmadd_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]] @@ -10125,9 +10094,9 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2 ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_maskz_vfmadd_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]] @@ -10158,9 +10127,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmadd_pd_256(<4 x double> %x0, <4 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmadd_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]] @@ -10192,9 +10161,9 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4 ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_vfmadd_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]] @@ -10225,9 +10194,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ps_128(<4 x float> %x0, <4 x ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -10259,9 +10228,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_vfmadd_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -10292,9 +10261,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmadd_ps_256(<8 x float> %x0, <8 x ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmadd_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP7]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -10323,9 +10292,9 @@ define <8 x float>@test_int_x86_avx512_maskz_vfmadd_ps_256(<8 x float> %x0, <8 x ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_vfmadd_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -10352,10 +10321,10 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsub_pd_128(<2 x double> %x0, <2 ; ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmsub_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[X2]] @@ -10389,10 +10358,10 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmsub_pd_256(<4 x double> %x0, <4 ; ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmsub_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[X2]] @@ -10426,10 +10395,10 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ps_128(<4 x float> %x0, <4 x ; ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[X2]] @@ -10463,10 +10432,10 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x ; ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmsub_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[X2]] @@ -10496,9 +10465,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x define <8 x float> @test_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 { ; CHECK-LABEL: define <8 x float> @test_vfnmadd256_ps( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP4]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[A1]] @@ -10517,10 +10486,10 @@ define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 ; ; CHECK-LABEL: define <8 x float> @test_mask_vfnmadd256_ps( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[A1]] @@ -10550,9 +10519,9 @@ define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 define <4 x float> @test_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 { ; CHECK-LABEL: define <4 x float> @test_vfnmadd128_ps( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP4]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[A1]] @@ -10571,10 +10540,10 @@ define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 ; ; CHECK-LABEL: define <4 x float> @test_mask_vfnmadd128_ps( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[A1]] @@ -10607,9 +10576,9 @@ define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 define <4 x double> @test_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 { ; CHECK-LABEL: define <4 x double> @test_vfnmadd256_pd( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP4]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[A1]] @@ -10628,10 +10597,10 @@ define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, ; ; CHECK-LABEL: define <4 x double> @test_mask_vfnmadd256_pd( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[A1]] @@ -10664,9 +10633,9 @@ define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, define <2 x double> @test_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 { ; CHECK-LABEL: define <2 x double> @test_vfnmadd128_pd( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP4]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[A1]] @@ -10685,10 +10654,10 @@ define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, ; ; CHECK-LABEL: define <2 x double> @test_mask_vfnmadd128_pd( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP8]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[A1]] @@ -10721,8 +10690,8 @@ define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, define <8 x float> @test_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 { ; CHECK-LABEL: define <8 x float> @test_vfnmsub256_ps( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP4]] @@ -10745,10 +10714,10 @@ define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 ; ; CHECK-LABEL: define <8 x float> @test_mask_vfnmsub256_ps( ; CHECK-SAME: <8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[A1]] @@ -10781,8 +10750,8 @@ define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 define <4 x float> @test_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 { ; CHECK-LABEL: define <4 x float> @test_vfnmsub128_ps( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP4]] @@ -10805,10 +10774,10 @@ define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 ; ; CHECK-LABEL: define <4 x float> @test_mask_vfnmsub128_ps( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[A1]] @@ -10844,8 +10813,8 @@ define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 define <4 x double> @test_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 { ; CHECK-LABEL: define <4 x double> @test_vfnmsub256_pd( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP4]] @@ -10868,10 +10837,10 @@ define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, ; ; CHECK-LABEL: define <4 x double> @test_mask_vfnmsub256_pd( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[A1]] @@ -10907,8 +10876,8 @@ define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, define <2 x double> @test_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 { ; CHECK-LABEL: define <2 x double> @test_vfnmsub128_pd( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP4]] @@ -10931,10 +10900,10 @@ define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, ; ; CHECK-LABEL: define <2 x double> @test_mask_vfnmsub128_pd( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[A1]] @@ -10972,9 +10941,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, < ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfnmsub_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> splat (double -0.000000e+00), [[X0]] @@ -11012,9 +10981,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, < ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfnmsub_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x double> splat (double -0.000000e+00), [[X0]] @@ -11052,9 +11021,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4 ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfnmsub_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <4 x float> splat (float -0.000000e+00), [[X0]] @@ -11092,9 +11061,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8 ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfnmsub_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP1:%.*]] = fsub <8 x float> splat (float -0.000000e+00), [[X0]] @@ -11128,8 +11097,8 @@ define <8 x float> @test_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x flo ; CHECK-LABEL: define <8 x float> @test_fmaddsub256_ps( ; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]], <8 x float> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP5]], [[TMP6]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP7]] @@ -11156,9 +11125,9 @@ define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 ; CHECK-LABEL: define <8 x float> @test_mask_fmaddsub256_ps( ; CHECK-SAME: <8 x float> [[A:%.*]], <8 x float> [[B:%.*]], <8 x float> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]] @@ -11196,8 +11165,8 @@ define <4 x float> @test_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x flo ; CHECK-LABEL: define <4 x float> @test_fmaddsub128_ps( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP5]], [[TMP6]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP7]] @@ -11224,9 +11193,9 @@ define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 ; CHECK-LABEL: define <4 x float> @test_mask_fmaddsub128_ps( ; CHECK-SAME: <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x float> [[C:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]] @@ -11267,8 +11236,8 @@ define <4 x double> @test_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 ; CHECK-LABEL: define <4 x double> @test_vfmaddsub256_pd( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP5]], [[TMP6]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP7]] @@ -11295,9 +11264,9 @@ define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a ; CHECK-LABEL: define <4 x double> @test_mask_vfmaddsub256_pd( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]] @@ -11338,8 +11307,8 @@ define <2 x double> @test_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 ; CHECK-LABEL: define <2 x double> @test_vfmaddsub128_pd( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP5]], [[TMP6]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP7]] @@ -11366,9 +11335,9 @@ define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a ; CHECK-LABEL: define <2 x double> @test_mask_vfmaddsub128_pd( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]] @@ -11410,9 +11379,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0, ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmaddsub_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]] @@ -11454,9 +11423,9 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0, ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_maskz_vfmaddsub_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP12]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]] @@ -11497,9 +11466,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_256(<4 x double> %x0, ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmaddsub_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]] @@ -11541,9 +11510,9 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0, ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_maskz_vfmaddsub_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP12]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]] @@ -11584,9 +11553,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_128(<4 x float> %x0, < ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmaddsub_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]] @@ -11628,9 +11597,9 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, < ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_maskz_vfmaddsub_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP12]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]] @@ -11671,9 +11640,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_256(<8 x float> %x0, < ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmaddsub_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]] @@ -11712,9 +11681,9 @@ define <8 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_256(<8 x float> %x0, < ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_maskz_vfmaddsub_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP12]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]] @@ -11752,9 +11721,9 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_128(<2 x double> %x0, ; CHECK-LABEL: define <2 x double> @test_int_x86_avx512_mask3_vfmsubadd_pd_128( ; CHECK-SAME: <2 x double> [[X0:%.*]], <2 x double> [[X1:%.*]], <2 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP13]] @@ -11796,9 +11765,9 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_256(<4 x double> %x0, ; CHECK-LABEL: define <4 x double> @test_int_x86_avx512_mask3_vfmsubadd_pd_256( ; CHECK-SAME: <4 x double> [[X0:%.*]], <4 x double> [[X1:%.*]], <4 x double> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP13]] @@ -11840,9 +11809,9 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_128(<4 x float> %x0, < ; CHECK-LABEL: define <4 x float> @test_int_x86_avx512_mask3_vfmsubadd_ps_128( ; CHECK-SAME: <4 x float> [[X0:%.*]], <4 x float> [[X1:%.*]], <4 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP13]] @@ -11884,9 +11853,9 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, < ; CHECK-LABEL: define <8 x float> @test_int_x86_avx512_mask3_vfmsubadd_ps_256( ; CHECK-SAME: <8 x float> [[X0:%.*]], <8 x float> [[X1:%.*]], <8 x float> [[X2:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP6]], [[TMP10]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP13]] @@ -11924,10 +11893,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmk( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -11970,10 +11939,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1 ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmka( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -12016,9 +11985,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1 ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmkz( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -12046,9 +12015,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmkza( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -12076,10 +12045,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmb( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -12134,10 +12103,10 @@ define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1 ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmba( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -12192,9 +12161,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1 ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmbz( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -12234,9 +12203,9 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a ; ; CHECK-LABEL: define <4 x float> @test_mask_vfmadd128_ps_rmbza( ; CHECK-SAME: <4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -12276,10 +12245,10 @@ define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> % ; ; CHECK-LABEL: define <2 x double> @test_mask_vfmadd128_pd_rmk( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -12322,9 +12291,9 @@ define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> ; ; CHECK-LABEL: define <2 x double> @test_mask_vfmadd128_pd_rmkz( ; CHECK-SAME: <2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] @@ -12352,10 +12321,10 @@ define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> % ; ; CHECK-LABEL: define <4 x double> @test_mask_vfmadd256_pd_rmk( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]], i8 [[MASK:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP10]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -12398,9 +12367,9 @@ define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> ; ; CHECK-LABEL: define <4 x double> @test_mask_vfmadd256_pd_rmkz( ; CHECK-SAME: <4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], ptr [[PTR_A2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll index 216096199fd06..5e937485ff282 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics-upgrade.ll @@ -16,8 +16,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpbusd.256(<8 x i32>, <8 x i32>, <8 x define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8> @@ -49,11 +49,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]] @@ -141,8 +141,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpbusd.128(<4 x i32>, <4 x i32>, <4 x define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> @@ -174,11 +174,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -270,8 +270,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpbusds.256(<8 x i32>, <8 x i32>, <8 define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8> @@ -303,11 +303,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -395,8 +395,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpbusds.128(<4 x i32>, <4 x i32>, <4 define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8> @@ -428,11 +428,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -524,8 +524,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16> @@ -557,11 +557,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -649,8 +649,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16> @@ -682,11 +682,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -779,8 +779,8 @@ declare <8 x i32> @llvm.x86.avx512.maskz.vpdpwssds.256(<8 x i32>, <8 x i32>, <8 define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16> @@ -812,11 +812,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -904,8 +904,8 @@ declare <4 x i32> @llvm.x86.avx512.maskz.vpdpwssds.128(<4 x i32>, <4 x i32>, <4 define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16> @@ -937,11 +937,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll index 26b1306e03894..1d3046804b74f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vl_vnni-intrinsics.ll @@ -15,8 +15,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32>, <32 x i8>, <32 x i8>) define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer @@ -44,11 +44,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]] @@ -131,8 +131,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32>, <16 x i8>, <16 x i8>) define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer @@ -160,11 +160,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -253,8 +253,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32>, <32 x i8>, <32 x i8> define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpbusds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer @@ -282,11 +282,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpbusds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP40:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -369,8 +369,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32>, <16 x i8>, <16 x i8> define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpbusds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer @@ -398,11 +398,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpbusds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP40:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -491,8 +491,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x i32>) define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16> @@ -524,11 +524,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -619,8 +619,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x i32>) define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16> @@ -652,11 +652,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -753,8 +753,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32>, <8 x i32>, <8 x i32> define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx512_vpdpwssds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16> @@ -786,11 +786,11 @@ define <8 x i32>@test_int_x86_avx512_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <8 x i32>, <8 x i32> } @test_int_x86_avx512_mask_vpdpwssds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -881,8 +881,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32>, <4 x i32>, <4 x i32> define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx512_vpdpwssds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -926,11 +926,11 @@ define <4 x i32>@test_int_x86_avx512_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4, i8 %x3) sanitize_memory { ; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @test_int_x86_avx512_mask_vpdpwssds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]], i8 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll index f6410c6799a55..5c99f8a3a1fb6 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics-upgrade.ll @@ -16,8 +16,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpbusd.512(<16 x i32>, <16 x i32>, < define <16 x i32>@test_int_x86_avx512_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpbusd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8> @@ -49,11 +49,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]] @@ -141,8 +141,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpbusds.512(<16 x i32>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpbusds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8> @@ -174,11 +174,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <16 x i32> % define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -266,8 +266,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpwssd.512(<16 x i32>, <16 x i32>, < define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpwssd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16> @@ -299,11 +299,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -391,8 +391,8 @@ declare <16 x i32> @llvm.x86.avx512.maskz.vpdpwssds.512(<16 x i32>, <16 x i32>, define <16 x i32>@test_int_x86_avx512_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpwssds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16> @@ -424,11 +424,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpwssds_512(<16 x i32> %x0, <16 x i32> % define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll index 6d4ce6dec5198..236ff45c6cd08 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx512vnni-intrinsics.ll @@ -15,8 +15,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpbusd.512(<16 x i32>, <64 x i8>, <64 x i8 define <16 x i32> @test_int_x86_avx512_ask_vpdpbusd_512(<16 x i32> %x0, <64 x i8> %x1, <64 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_ask_vpdpbusd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i8> [[X2:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer @@ -44,11 +44,11 @@ define <16 x i32> @test_int_x86_avx512_ask_vpdpbusd_512(<16 x i32> %x0, <64 x i8 define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512(<16 x i32> %x0, <64 x i8> %x1, ptr %x2p, <64 x i8> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <64 x i8> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]] @@ -131,8 +131,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpbusds.512(<16 x i32>, <64 x i8>, <64 x i define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <64 x i8> %x1, <64 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpbusds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer @@ -160,11 +160,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpbusds_512(<16 x i32> %x0, <64 x i8> %x define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512(<16 x i32> %x0, <64 x i8> %x1, ptr %x2p, <64 x i8> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpbusds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <64 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <64 x i8> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP40:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -247,8 +247,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32>, <16 x i32>, <16 x i define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_vpdpwssd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16> @@ -280,11 +280,11 @@ define <16 x i32>@test_int_x86_avx512_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssd_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] @@ -375,8 +375,8 @@ declare <16 x i32> @llvm.x86.avx512.vpdpwssds.512(<16 x i32>, <16 x i32>, <16 x define <16 x i32>@test_int_x86_avx512_ask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <16 x i32> @test_int_x86_avx512_ask_vpdpwssds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i32> [[X1]] to <32 x i16> @@ -408,11 +408,11 @@ define <16 x i32>@test_int_x86_avx512_ask_vpdpwssds_512(<16 x i32> %x0, <16 x i3 define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512(<16 x i32> %x0, <16 x i32> %x1, ptr %x2p, <16 x i32> %x4, i16 %x3) sanitize_memory { ; CHECK-LABEL: define { <16 x i32>, <16 x i32> } @test_int_x86_avx512_mask_vpdpwssds_512( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i32> [[X4:%.*]], i16 [[X3:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll index 1de2a54486e58..0344fbd5ee2a9 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx_vnni-intrinsics.ll @@ -15,8 +15,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusd.256(<8 x i32>, <32 x i8>, <32 x i8>) define <8 x i32>@test_int_x86_avx_vpdpbusd_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpbusd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer @@ -46,8 +46,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusd.128(<4 x i32>, <16 x i8>, <16 x i8>) define <4 x i32>@test_int_x86_avx_vpdpbusd_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpbusd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer @@ -77,8 +77,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpbusds.256(<8 x i32>, <32 x i8>, <32 x i8> define <8 x i32>@test_int_x86_avx_vpdpbusds_256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpbusds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer @@ -108,8 +108,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpbusds.128(<4 x i32>, <16 x i8>, <16 x i8> define <4 x i32>@test_int_x86_avx_vpdpbusds_128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpbusds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer @@ -139,8 +139,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x i32>) define <8 x i32>@test_int_x86_avx_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpwssd_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16> @@ -174,8 +174,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x i32>) define <4 x i32>@test_int_x86_avx_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpwssd_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16> @@ -209,8 +209,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssds.256(<8 x i32>, <8 x i32>, <8 x i32> define <8 x i32>@test_int_x86_avx_vpdpwssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx_vpdpwssds_256( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[X1]] to <16 x i16> @@ -244,8 +244,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssds.128(<4 x i32>, <4 x i32>, <4 x i32> define <4 x i32>@test_int_x86_avx_vpdpwssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx_vpdpwssds_128( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[X2:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[X1]] to <8 x i16> diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll index 66cbebee80dc3..707b46bb8686e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint16-intrinsics.ll @@ -26,8 +26,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwsud_128(<4 x i32> %A, <4 x i32> %B, <4 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwsud_128( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -44,8 +44,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwsud_256(<8 x i32> %A, <8 x i32> %B, <8 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwsud_256( ; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -62,8 +62,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwsuds_128(<4 x i32> %A, <4 x i32> %B, <4 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwsuds_128( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -80,8 +80,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwsuds_256(<8 x i32> %A, <8 x i32> %B, <8 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwsuds_256( ; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -98,8 +98,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwusd_128(<4 x i32> %A, <4 x i32> %B, <4 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwusd_128( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -116,8 +116,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwusd_256(<8 x i32> %A, <8 x i32> %B, <8 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwusd_256( ; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -134,8 +134,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwusds_128(<4 x i32> %A, <4 x i32> %B, <4 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwusds_128( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -152,8 +152,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwusds_256(<8 x i32> %A, <8 x i32> %B, <8 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwusds_256( ; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -170,8 +170,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwuud_128(<4 x i32> %A, <4 x i32> %B, <4 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwuud_128( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -188,8 +188,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwuud_256(<8 x i32> %A, <8 x i32> %B, <8 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwuud_256( ; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] @@ -206,8 +206,8 @@ define <4 x i32> @test_int_x86_avx2_vpdpwuuds_128(<4 x i32> %A, <4 x i32> %B, <4 ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpwuuds_128( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]] @@ -224,8 +224,8 @@ define <8 x i32> @test_int_x86_avx2_vpdpwuuds_256(<8 x i32> %A, <8 x i32> %B, <8 ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpwuuds_256( ; CHECK-SAME: <8 x i32> [[A:%.*]], <8 x i32> [[B:%.*]], <8 x i32> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll index 3df0f1df153c5..4a7050790007b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll @@ -10,15 +10,15 @@ target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" -declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory { +define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbssd_128( -; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1:![0-9]+]] @@ -26,22 +26,18 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16 +; CHECK-NEXT: [[TMP30:%.*]] = load <16 x i8>, ptr [[X2P]], align 16 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16 -; CHECK-NEXT: [[TMP29:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8> -; CHECK-NEXT: [[TMP30:%.*]] = bitcast <4 x i32> [[X2]] to <16 x i8> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> -; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[_MSLD]] to <16 x i8> -; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <16 x i8> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16 ; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i8> [[TMP29]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <16 x i8> [[TMP30]], zeroinitializer -; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP14]], [[TMP15]] -; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP16]], [[TMP15]] -; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP14]], [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP15]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP12]], [[TMP16]] +; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP15]], [[TMP17]] ; CHECK-NEXT: [[TMP21:%.*]] = or <16 x i1> [[TMP18]], [[TMP19]] ; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i1> [[TMP21]], [[TMP20]] ; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP22]] to <16 x i8> @@ -49,18 +45,14 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <4 x i32> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP28:%.*]] = sext <4 x i1> [[TMP25]] to <4 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP28]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]]) -; CHECK-NEXT: [[TMP31:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8> -; CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x i32> [[X4]] to <16 x i8> -; CHECK-NEXT: [[TMP33:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> -; CHECK-NEXT: [[TMP34:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8> -; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <16 x i8> [[TMP33]], zeroinitializer -; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <16 x i8> [[TMP34]], zeroinitializer -; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <16 x i8> [[TMP31]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[TMP30]]) +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <16 x i8> [[TMP32]], zeroinitializer -; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP35]], [[TMP36]] -; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP37]], [[TMP36]] -; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP35]], [[TMP38]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP26]], [[TMP38]] +; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP27]], [[TMP38]] +; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP26]], [[TMP31]] ; CHECK-NEXT: [[TMP42:%.*]] = or <16 x i1> [[TMP39]], [[TMP40]] ; CHECK-NEXT: [[TMP43:%.*]] = or <16 x i1> [[TMP42]], [[TMP41]] ; CHECK-NEXT: [[TMP44:%.*]] = sext <16 x i1> [[TMP43]] to <16 x i8> @@ -68,28 +60,28 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt ; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <4 x i32> [[TMP45]], zeroinitializer ; CHECK-NEXT: [[TMP49:%.*]] = sext <4 x i1> [[TMP46]] to <4 x i32> ; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP49]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]]) +; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory { +define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbssds_128( -; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -97,22 +89,18 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16 +; CHECK-NEXT: [[TMP30:%.*]] = load <16 x i8>, ptr [[X2P]], align 16 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16 -; CHECK-NEXT: [[TMP29:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8> -; CHECK-NEXT: [[TMP30:%.*]] = bitcast <4 x i32> [[X2]] to <16 x i8> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> -; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[_MSLD]] to <16 x i8> -; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <16 x i8> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16 ; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i8> [[TMP29]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <16 x i8> [[TMP30]], zeroinitializer -; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP14]], [[TMP15]] -; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP16]], [[TMP15]] -; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP14]], [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP15]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP12]], [[TMP16]] +; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP15]], [[TMP17]] ; CHECK-NEXT: [[TMP21:%.*]] = or <16 x i1> [[TMP18]], [[TMP19]] ; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i1> [[TMP21]], [[TMP20]] ; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP22]] to <16 x i8> @@ -120,18 +108,14 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <4 x i32> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP28:%.*]] = sext <4 x i1> [[TMP25]] to <4 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP28]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]]) -; CHECK-NEXT: [[TMP31:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8> -; CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x i32> [[X4]] to <16 x i8> -; CHECK-NEXT: [[TMP33:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8> -; CHECK-NEXT: [[TMP34:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8> -; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <16 x i8> [[TMP33]], zeroinitializer -; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <16 x i8> [[TMP34]], zeroinitializer -; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <16 x i8> [[TMP31]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[TMP30]]) +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <16 x i8> [[TMP32]], zeroinitializer -; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP35]], [[TMP36]] -; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP37]], [[TMP36]] -; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP35]], [[TMP38]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP26]], [[TMP38]] +; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP27]], [[TMP38]] +; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP26]], [[TMP31]] ; CHECK-NEXT: [[TMP42:%.*]] = or <16 x i1> [[TMP39]], [[TMP40]] ; CHECK-NEXT: [[TMP43:%.*]] = or <16 x i1> [[TMP42]], [[TMP41]] ; CHECK-NEXT: [[TMP44:%.*]] = sext <16 x i1> [[TMP43]] to <16 x i8> @@ -139,28 +123,28 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p ; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <4 x i32> [[TMP45]], zeroinitializer ; CHECK-NEXT: [[TMP49:%.*]] = sext <4 x i1> [[TMP46]] to <4 x i32> ; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP49]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]]) +; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory { +define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbssd_256( -; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -168,22 +152,18 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32 +; CHECK-NEXT: [[TMP30:%.*]] = load <32 x i8>, ptr [[X2P]], align 32 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32 -; CHECK-NEXT: [[TMP29:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8> -; CHECK-NEXT: [[TMP30:%.*]] = bitcast <8 x i32> [[X2]] to <32 x i8> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8> -; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x i32> [[_MSLD]] to <32 x i8> -; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <32 x i8> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32 ; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <32 x i8> [[TMP29]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <32 x i8> [[TMP30]], zeroinitializer -; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP14]], [[TMP15]] -; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP16]], [[TMP15]] -; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP14]], [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP15]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP12]], [[TMP16]] +; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP15]], [[TMP17]] ; CHECK-NEXT: [[TMP21:%.*]] = or <32 x i1> [[TMP18]], [[TMP19]] ; CHECK-NEXT: [[TMP22:%.*]] = or <32 x i1> [[TMP21]], [[TMP20]] ; CHECK-NEXT: [[TMP23:%.*]] = sext <32 x i1> [[TMP22]] to <32 x i8> @@ -191,18 +171,14 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <8 x i32> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP28:%.*]] = sext <8 x i1> [[TMP25]] to <8 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP28]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]]) -; CHECK-NEXT: [[TMP31:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8> -; CHECK-NEXT: [[TMP32:%.*]] = bitcast <8 x i32> [[X4]] to <32 x i8> -; CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8> -; CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x i32> [[TMP4]] to <32 x i8> -; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <32 x i8> [[TMP33]], zeroinitializer -; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <32 x i8> [[TMP34]], zeroinitializer -; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <32 x i8> [[TMP31]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[TMP30]]) +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <32 x i8> [[TMP32]], zeroinitializer -; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP35]], [[TMP36]] -; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP37]], [[TMP36]] -; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP35]], [[TMP38]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP26]], [[TMP38]] +; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP27]], [[TMP38]] +; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP26]], [[TMP31]] ; CHECK-NEXT: [[TMP42:%.*]] = or <32 x i1> [[TMP39]], [[TMP40]] ; CHECK-NEXT: [[TMP43:%.*]] = or <32 x i1> [[TMP42]], [[TMP41]] ; CHECK-NEXT: [[TMP44:%.*]] = sext <32 x i1> [[TMP43]] to <32 x i8> @@ -210,28 +186,28 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt ; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <8 x i32> [[TMP45]], zeroinitializer ; CHECK-NEXT: [[TMP49:%.*]] = sext <8 x i1> [[TMP46]] to <8 x i32> ; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP49]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]]) +; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory { +define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbssds_256( -; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -239,22 +215,18 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32 +; CHECK-NEXT: [[TMP30:%.*]] = load <32 x i8>, ptr [[X2P]], align 32 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32 -; CHECK-NEXT: [[TMP29:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8> -; CHECK-NEXT: [[TMP30:%.*]] = bitcast <8 x i32> [[X2]] to <32 x i8> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8> -; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x i32> [[_MSLD]] to <32 x i8> -; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <32 x i8> [[TMP12]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32 ; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <32 x i8> [[TMP29]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <32 x i8> [[TMP30]], zeroinitializer -; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP14]], [[TMP15]] -; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP16]], [[TMP15]] -; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP14]], [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP15]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP12]], [[TMP16]] +; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP15]], [[TMP17]] ; CHECK-NEXT: [[TMP21:%.*]] = or <32 x i1> [[TMP18]], [[TMP19]] ; CHECK-NEXT: [[TMP22:%.*]] = or <32 x i1> [[TMP21]], [[TMP20]] ; CHECK-NEXT: [[TMP23:%.*]] = sext <32 x i1> [[TMP22]] to <32 x i8> @@ -262,18 +234,14 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p ; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <8 x i32> [[TMP24]], zeroinitializer ; CHECK-NEXT: [[TMP28:%.*]] = sext <8 x i1> [[TMP25]] to <8 x i32> ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP28]], [[TMP5]] -; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]]) -; CHECK-NEXT: [[TMP31:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8> -; CHECK-NEXT: [[TMP32:%.*]] = bitcast <8 x i32> [[X4]] to <32 x i8> -; CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8> -; CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x i32> [[TMP4]] to <32 x i8> -; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <32 x i8> [[TMP33]], zeroinitializer -; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <32 x i8> [[TMP34]], zeroinitializer -; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <32 x i8> [[TMP31]], zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[TMP30]]) +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer ; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <32 x i8> [[TMP32]], zeroinitializer -; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP35]], [[TMP36]] -; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP37]], [[TMP36]] -; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP35]], [[TMP38]] +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP26]], [[TMP38]] +; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP27]], [[TMP38]] +; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP26]], [[TMP31]] ; CHECK-NEXT: [[TMP42:%.*]] = or <32 x i1> [[TMP39]], [[TMP40]] ; CHECK-NEXT: [[TMP43:%.*]] = or <32 x i1> [[TMP42]], [[TMP41]] ; CHECK-NEXT: [[TMP44:%.*]] = sext <32 x i1> [[TMP43]] to <32 x i8> @@ -281,28 +249,28 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p ; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <8 x i32> [[TMP45]], zeroinitializer ; CHECK-NEXT: [[TMP49:%.*]] = sext <8 x i1> [[TMP46]] to <8 x i32> ; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP49]], [[TMP5]] -; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]]) +; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory { +define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbsud_128( -; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -310,38 +278,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, pt ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16 +; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory { +define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbsuds_128( -; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -349,38 +341,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, p ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16 +; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory { +define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbsud_256( -; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -388,38 +404,62 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, pt ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32 +; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory { +define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbsuds_256( -; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -427,38 +467,62 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, p ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32 +; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory { +define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbuud_128( -; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -466,38 +530,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, pt ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16 +; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>) +declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>) -define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory { +define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbuuds_128( -; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -505,38 +593,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, p ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16 +; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <4 x i32> [[RES]] ; - %x2 = load <4 x i32>, ptr %x2p - %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) - %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4) + %x2 = load <16 x i8>, ptr %x2p + %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2) + %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4) %res = add <4 x i32> %1, %2 ret <4 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory { +define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbuud_256( -; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -544,38 +656,62 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, pt ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32 +; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } -declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>) +declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>) -define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory { +define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory { ; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbuuds_256( -; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] @@ -583,25 +719,49 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, p ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: [[BB6]]: -; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32 +; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32 ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr -; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32 -; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]] -; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]]) -; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]] -; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]] -; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]]) +; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32 +; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer +; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]] +; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]] +; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8> +; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer +; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]]) +; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer +; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer +; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer +; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer +; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]] +; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]] +; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8> +; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32> +; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer +; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32> +; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]]) ; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]] ; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <8 x i32> [[RES]] ; - %x2 = load <8 x i32>, ptr %x2p - %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) - %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4) + %x2 = load <32 x i8>, ptr %x2p + %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2) + %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4) %res = add <8 x i32> %1, %2 ret <8 x i32> %res } diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll index e663a7bfeef24..cd2ccaf32e946 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll @@ -47,7 +47,7 @@ define void @test_x86_vcvtps2ph_256_m(ptr nocapture %d, <8 x float> %a) nounwind ; CHECK-LABEL: define void @test_x86_vcvtps2ph_256_m( ; CHECK-SAME: ptr captures(none) [[D:%.*]], <8 x float> [[A:%.*]]) #[[ATTR2:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP17:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP17:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <8 x i32> [[TMP17]], zeroinitializer @@ -76,7 +76,7 @@ define void @test_x86_vcvtps2ph_128_m(ptr nocapture %d, <4 x float> %a) nounwind ; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m( ; CHECK-SAME: ptr captures(none) [[D:%.*]], <4 x float> [[A:%.*]]) #[[ATTR2]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <4 x i32> [[TMP9]], zeroinitializer @@ -109,7 +109,7 @@ define void @test_x86_vcvtps2ph_128_m2(ptr nocapture %hf4x16, <4 x float> %f4X86 ; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m2( ; CHECK-SAME: ptr captures(none) [[HF4X16:%.*]], <4 x float> [[F4X86:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP0]], zeroinitializer @@ -145,7 +145,7 @@ define void @test_x86_vcvtps2ph_128_m3(ptr nocapture %hf4x16, <4 x float> %f4X86 ; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m3( ; CHECK-SAME: ptr captures(none) [[HF4X16:%.*]], <4 x float> [[F4X86:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP0]], zeroinitializer @@ -178,3 +178,6 @@ entry: } attributes #0 = { sanitize_memory } +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll index 3d98f60a8242a..d62fd7e8d1a89 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/mmx-intrinsics.ll @@ -22,7 +22,7 @@ define i64 @test1(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test1( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -68,7 +68,7 @@ define i64 @test88(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test88( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -108,7 +108,7 @@ define i64 @test87(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test87( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -148,7 +148,7 @@ define i64 @test86(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test86( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -188,7 +188,7 @@ define i64 @test85(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test85( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -228,7 +228,7 @@ define i64 @test84(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test84( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -268,7 +268,7 @@ define i64 @test83(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test83( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -308,7 +308,7 @@ define i64 @test82(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test82( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -348,7 +348,7 @@ define i64 @test81(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test81( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -388,7 +388,7 @@ define i64 @test80(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test80( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -428,7 +428,7 @@ define i64 @test79(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test79( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -468,7 +468,7 @@ define i64 @test78(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test78( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -508,7 +508,7 @@ define i64 @test77(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test77( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -548,7 +548,7 @@ define i64 @test76(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test76( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <4 x i16> @@ -596,7 +596,7 @@ define i64 @test75(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test75( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <2 x i32> @@ -644,7 +644,7 @@ define i64 @test74(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test74( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <4 x i16> @@ -1049,7 +1049,7 @@ define i64 @test65(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32> @@ -1094,7 +1094,7 @@ define i64 @test64(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16> @@ -1139,7 +1139,7 @@ define i64 @test63(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0 @@ -1178,7 +1178,7 @@ define i64 @test62(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32> @@ -1223,7 +1223,7 @@ define i64 @test61(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16> @@ -1268,7 +1268,7 @@ define i64 @test60(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0 @@ -1307,7 +1307,7 @@ define i64 @test59(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32> @@ -1352,7 +1352,7 @@ define i64 @test58(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16> @@ -1396,7 +1396,7 @@ define i64 @test56(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test56( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -1436,7 +1436,7 @@ define i64 @test55(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test55( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -1476,7 +1476,7 @@ define i64 @test54(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test54( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -1516,7 +1516,7 @@ define i64 @test53(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test53( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -1556,7 +1556,7 @@ define i64 @test52(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test52( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -1594,7 +1594,7 @@ define i64 @test51(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test51( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -1634,7 +1634,7 @@ define i64 @test50(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test50( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -1674,7 +1674,7 @@ define i64 @test49(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test49( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP16:%.*]] = bitcast <1 x i64> [[TMP13]] to <4 x i16> @@ -1732,7 +1732,7 @@ define i64 @test48(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test48( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -1772,7 +1772,7 @@ define i64 @test47(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test47( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -1812,7 +1812,7 @@ define i64 @test46(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test46( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -1852,7 +1852,7 @@ define i64 @test45(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test45( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -1891,7 +1891,7 @@ define i64 @test44(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0 @@ -1926,7 +1926,7 @@ define i64 @test43(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test43( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -1966,7 +1966,7 @@ define i64 @test42(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test42( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2006,7 +2006,7 @@ define i64 @test41(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test41( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2046,7 +2046,7 @@ define i64 @test40(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test40( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2086,7 +2086,7 @@ define i64 @test39(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test39( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2126,7 +2126,7 @@ define i64 @test38(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test38( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2166,7 +2166,7 @@ define i64 @test37(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test37( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2207,7 +2207,7 @@ define i64 @test36(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0 @@ -2240,7 +2240,7 @@ define i64 @test35(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test35( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32> @@ -2280,7 +2280,7 @@ define i64 @test34(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test34( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2320,7 +2320,7 @@ define i64 @test33(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test33( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2360,7 +2360,7 @@ define i64 @test32(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test32( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8> @@ -2399,7 +2399,7 @@ define i64 @test31(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test31( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2439,7 +2439,7 @@ define i64 @test30(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test30( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2479,7 +2479,7 @@ define i64 @test29(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test29( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2519,7 +2519,7 @@ define i64 @test28(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test28( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2559,7 +2559,7 @@ define i64 @test27(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test27( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2599,7 +2599,7 @@ define i64 @test26(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test26( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8> @@ -2639,7 +2639,7 @@ define void @test25(ptr %p, <1 x i64> %a) nounwind optsize ssp #0 { ; CHECK-LABEL: define void @test25( ; CHECK-SAME: ptr [[P:%.*]], <1 x i64> [[A:%.*]]) #[[ATTR3:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0 @@ -2702,9 +2702,9 @@ define void @test23(<1 x i64> %d, <1 x i64> %n, ptr %p) nounwind optsize ssp #0 ; CHECK-LABEL: define void @test23( ; CHECK-SAME: <1 x i64> [[D:%.*]], <1 x i64> [[N:%.*]], ptr [[P:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP4]] to <8 x i8> ; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[N]] to <8 x i8> @@ -2744,7 +2744,7 @@ define i64 @test22(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test22( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16> @@ -2850,7 +2850,7 @@ define i64 @test20(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test20( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = bitcast <1 x i64> [[TMP5]] to <2 x i32> @@ -2975,7 +2975,7 @@ define i64 @test16(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP6]], i32 0 ; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0 @@ -3112,7 +3112,7 @@ define i64 @test12(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test12( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> @@ -3152,7 +3152,7 @@ define i64 @test11(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test11( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -3192,7 +3192,7 @@ define i64 @test10(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test10( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8> @@ -3232,7 +3232,7 @@ define i64 @test9(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test9( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8> @@ -3273,7 +3273,7 @@ define i64 @test8(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test8( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -3313,7 +3313,7 @@ define i64 @test7(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test7( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP17:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8> @@ -3371,7 +3371,7 @@ define i64 @test6(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test6( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -3417,7 +3417,7 @@ define i64 @test5(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test5( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> @@ -3463,7 +3463,7 @@ define i64 @test4(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test4( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -3509,7 +3509,7 @@ define i64 @test3(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test3( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -3555,7 +3555,7 @@ define i64 @test2(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test2( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> @@ -3603,7 +3603,7 @@ define <4 x float> @test89(<4 x float> %a, <1 x i64> %b) nounwind #0 { ; CHECK-LABEL: define <4 x float> @test89( ; CHECK-SAME: <4 x float> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR4:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -3647,7 +3647,7 @@ define <1 x i64> @test_mm_insert_pi16(<1 x i64> %a.coerce, i32 %d) nounwind #0 { ; CHECK-SAME: <1 x i64> [[A_COERCE:%.*]], i32 [[D:%.*]]) #[[ATTR4]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to i64 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll index 9d7763a6ef589..46e814806f383 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/sse-intrinsics-x86.ll @@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_cmp_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer @@ -25,7 +25,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_cmp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -45,7 +45,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comieq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -64,7 +64,7 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comige_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -83,7 +83,7 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comigt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -102,7 +102,7 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comile_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -121,7 +121,7 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comilt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -140,7 +140,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comineq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -162,9 +162,9 @@ define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]] ; CHECK: 3: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5:[0-9]+]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] ; CHECK-NEXT: unreachable ; CHECK: 4: ; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[A0:%.*]]) @@ -183,9 +183,9 @@ define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: 4: ; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[A0:%.*]]) @@ -209,9 +209,9 @@ define void @test_x86_sse_ldmxcsr(ptr %a0) #0 { ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[_LDMXCSR]], 0 ; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]] -; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] ; CHECK: 5: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: 6: ; CHECK-NEXT: call void @llvm.x86.sse.ldmxcsr(ptr [[A0]]) @@ -227,7 +227,7 @@ declare void @llvm.x86.sse.ldmxcsr(ptr) nounwind define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_max_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]]) @@ -243,7 +243,7 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_max_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> @@ -260,7 +260,7 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_min_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]]) @@ -276,7 +276,7 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_min_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> @@ -296,9 +296,9 @@ define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) #0 { ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] ; CHECK: 3: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: 4: ; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> [[A0:%.*]]) @@ -377,9 +377,9 @@ define void @test_x86_sse_stmxcsr(ptr %a0) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr ; CHECK-NEXT: store i32 0, ptr [[TMP4]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]] ; CHECK: 5: -; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]] +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] ; CHECK-NEXT: unreachable ; CHECK: 6: ; CHECK-NEXT: call void @llvm.x86.sse.stmxcsr(ptr [[A0]]) @@ -394,7 +394,7 @@ declare void @llvm.x86.sse.stmxcsr(ptr) nounwind define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomieq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -413,7 +413,7 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomige_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -432,7 +432,7 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomigt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -451,7 +451,7 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomile_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -470,7 +470,7 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomilt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 @@ -489,7 +489,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomineq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll index 7048050180792..fc7b01b034f33 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/sse2-intrinsics-x86.ll @@ -15,7 +15,7 @@ target triple = "x86_64-unknown-linux-gnu" define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_cmp_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -33,7 +33,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_cmp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -53,7 +53,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comieq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -72,7 +72,7 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comige_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -91,7 +91,7 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comigt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -110,7 +110,7 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comile_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -129,7 +129,7 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comilt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -148,7 +148,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comineq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -340,7 +340,7 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0 @@ -363,7 +363,7 @@ declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 { ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -397,7 +397,7 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 { define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1) optsize #0 { ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load_optsize( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -542,7 +542,7 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_max_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]]) @@ -558,7 +558,7 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_max_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> @@ -575,7 +575,7 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_min_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]]) @@ -591,7 +591,7 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_min_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> @@ -629,7 +629,7 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_packssdw_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> @@ -662,7 +662,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 { define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_packsswb_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> @@ -695,7 +695,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 { define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_packuswb_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16> @@ -728,7 +728,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 { define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pavg_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]]) @@ -744,7 +744,7 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pavg_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]]) @@ -760,7 +760,7 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pmadd_wd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i16> [[TMP2]], zeroinitializer @@ -809,7 +809,7 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pmulh_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]]) @@ -825,7 +825,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pmulhu_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]]) @@ -841,7 +841,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psad_bw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> @@ -861,7 +861,7 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -883,7 +883,7 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -905,7 +905,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psll_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -975,7 +975,7 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -997,7 +997,7 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psra_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1051,7 +1051,7 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1073,7 +1073,7 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1095,7 +1095,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64 @@ -1116,7 +1116,7 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_w_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -1198,7 +1198,7 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomieq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -1217,7 +1217,7 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomige_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -1236,7 +1236,7 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomigt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -1255,7 +1255,7 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomile_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -1274,7 +1274,7 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomilt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 @@ -1293,7 +1293,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomineq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll index 1fcab72d571ea..618dde9b3dac6 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/sse41-intrinsics-x86.ll @@ -6,8 +6,8 @@ target triple = "x86_64-unknown-linux-gnu" define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_sse41_blendvpd( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x double> [[A2:%.*]] to <2 x i64> @@ -34,8 +34,8 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_sse41_blendvps( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x float> [[A2:%.*]] to <4 x i32> @@ -63,7 +63,7 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_dppd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = select <2 x i1> , <2 x i64> [[TMP3]], <2 x i64> zeroinitializer @@ -84,7 +84,7 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_dpps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> , <4 x i32> [[TMP3]], <4 x i32> zeroinitializer @@ -105,7 +105,7 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_insertps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -131,7 +131,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_mpsadbw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0 @@ -155,7 +155,7 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_mpsadbw_load_op0( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]] @@ -190,7 +190,7 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 { define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_packusdw( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> @@ -222,8 +222,8 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 { define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) #0 { ; CHECK-LABEL: @test_x86_sse41_pblendvb( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = ashr <16 x i8> [[A2:%.*]], splat (i8 7) @@ -262,7 +262,7 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_ptestc( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -281,7 +281,7 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_ptestnzc( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -300,7 +300,7 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_ptestz( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer @@ -347,7 +347,7 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_round_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i32> ; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i32 7) @@ -362,7 +362,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_round_sd_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -389,7 +389,7 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, ptr %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_round_ss_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll index 9a7f4b985293c..bd9661295a210 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg-too-large.ll @@ -26,8 +26,8 @@ entry: ret i64 %ret } -; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. +; If the size of __msan_va_arg_tls changes the second argument of `getelementptr` must also be changed. ; CHECK-LABEL: @many_args -; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) -; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) +; CHECK: getelementptr (i8, ptr @__msan_va_arg_tls, i64 792) +; CHECK-NOT: getelementptr (i8, ptr @__msan_va_arg_tls, i64 800) declare i64 @sum(i64 %n, ...) diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll index b61cb6aebb3ea..bec2ba9ea62f9 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll @@ -16,15 +16,15 @@ entry: ret i32 %call } -; CHECK: store i32 0, {{.*}} @__msan_param_tls {{.*}} i64 8 -; CHECK: store i32 0, {{.*}} @__msan_param_tls {{.*}} i64 16 -; CHECK: store i32 0, {{.*}} @__msan_param_tls {{.*}} i64 24 -; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls {{.*}} i64 8 -; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls {{.*}} i64 8 -; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls {{.*}} i64 16 -; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls {{.*}} i64 16 -; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls {{.*}} i64 24 -; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls {{.*}} i64 24 +; CHECK: store i32 0, {{.*}} @__msan_param_tls, i64 8 +; CHECK: store i32 0, {{.*}} @__msan_param_tls, i64 16 +; CHECK: store i32 0, {{.*}} @__msan_param_tls, i64 24 +; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls, i64 8 +; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls, i64 8 +; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls, i64 16 +; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls, i64 16 +; CHECK: store i32 0, {{.*}} @__msan_va_arg_tls, i64 24 +; CHECK-ORIGIN: store i32 0, {{.*}} @__msan_va_arg_origin_tls, i64 24 define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 { entry: diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll index 4bc14daaca427..c549c165ee966 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll @@ -39,9 +39,9 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit ; CHECK-NEXT: [[_MSPROP:%.*]] = sext i8 [[_MSLD]] to i32 ; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 ; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext [[TMP7]], i32 noundef 1, i32 noundef [[CONV]]) ; CHECK-NEXT: ret void @@ -80,9 +80,9 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4 ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]]) ; CHECK-NEXT: ret void @@ -122,9 +122,9 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m ; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64 ; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP7]] to double ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]]) ; CHECK-NEXT: ret void @@ -163,9 +163,9 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_ ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]]) ; CHECK-NEXT: ret void @@ -203,9 +203,9 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitiz ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i80, ptr [[TMP10]], align 16 ; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i80 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i80 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef [[TMP7]], i32 noundef 1, x86_fp80 noundef [[TMP7]]) ; CHECK-NEXT: ret void @@ -243,9 +243,9 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -264,7 +264,7 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i ; CHECK-SAME: i64 [[ARG_COERCE0:%.*]], i64 [[ARG_COERCE1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 @@ -295,12 +295,12 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr ; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -322,7 +322,7 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc ; CHECK-SAME: double [[ARG_COERCE0:%.*]], double [[ARG_COERCE1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 @@ -353,12 +353,12 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr ; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -390,15 +390,15 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_(ptr noundef byval(%stru ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false) ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP11]], i64 32, i1 false) ; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I7Double4EvT_iz(ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]]) ; CHECK-NEXT: ret void @@ -416,7 +416,7 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce ; CHECK-SAME: double [[ARG_COERCE0:%.*]], float [[ARG_COERCE1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 @@ -447,12 +447,12 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr ; CHECK-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 +; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -484,15 +484,15 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(ptr noundef byval( ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false) ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP11]], i64 32, i1 false) ; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble2EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]]) ; CHECK-NEXT: ret void @@ -518,15 +518,15 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(ptr noundef byval( ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false) ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP11]], i64 64, i1 false) ; CHECK-NEXT: store i64 64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]]) ; CHECK-NEXT: ret void @@ -561,17 +561,13 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32 ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -624,17 +620,13 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -679,17 +671,13 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -734,17 +722,13 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -789,17 +773,13 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -844,17 +824,13 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -899,17 +875,13 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0, ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -954,17 +926,13 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -1009,17 +977,13 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -1064,17 +1028,13 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -1119,17 +1079,13 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -1174,17 +1130,13 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) -; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16 -; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 ; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 @@ -1222,88 +1174,88 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval ; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false) ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 136), ptr align 8 [[TMP11]], i64 64, i1 false) ; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), ptr align 8 [[TMP14]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 200), ptr align 8 [[TMP14]], i64 64, i1 false) ; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080 ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), ptr align 8 [[TMP17]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 264), ptr align 8 [[TMP17]], i64 64, i1 false) ; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 87960930222080 ; CHECK-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), ptr align 8 [[TMP20]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 328), ptr align 8 [[TMP20]], i64 64, i1 false) ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), ptr align 8 [[TMP23]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 392), ptr align 8 [[TMP23]], i64 64, i1 false) ; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080 ; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), ptr align 8 [[TMP26]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 456), ptr align 8 [[TMP26]], i64 64, i1 false) ; CHECK-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP28:%.*]] = xor i64 [[TMP27]], 87960930222080 ; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), ptr align 8 [[TMP29]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 520), ptr align 8 [[TMP29]], i64 64, i1 false) ; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], 87960930222080 ; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), ptr align 8 [[TMP32]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 584), ptr align 8 [[TMP32]], i64 64, i1 false) ; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080 ; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), ptr align 8 [[TMP35]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 648), ptr align 8 [[TMP35]], i64 64, i1 false) ; CHECK-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP37:%.*]] = xor i64 [[TMP36]], 87960930222080 ; CHECK-NEXT: [[TMP38:%.*]] = inttoptr i64 [[TMP37]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), ptr align 8 [[TMP38]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 712), ptr align 8 [[TMP38]], i64 64, i1 false) ; CHECK-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP40:%.*]] = xor i64 [[TMP39]], 87960930222080 ; CHECK-NEXT: [[TMP41:%.*]] = inttoptr i64 [[TMP40]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP41]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP41]], i64 64, i1 false) ; CHECK-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080 ; CHECK-NEXT: [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), ptr align 8 [[TMP44]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), ptr align 8 [[TMP44]], i64 64, i1 false) ; CHECK-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080 ; CHECK-NEXT: [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), ptr align 8 [[TMP47]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), ptr align 8 [[TMP47]], i64 64, i1 false) ; CHECK-NEXT: [[TMP48:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP49:%.*]] = xor i64 [[TMP48]], 87960930222080 ; CHECK-NEXT: [[TMP50:%.*]] = inttoptr i64 [[TMP49]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), ptr align 8 [[TMP50]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), ptr align 8 [[TMP50]], i64 64, i1 false) ; CHECK-NEXT: [[TMP51:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP52:%.*]] = xor i64 [[TMP51]], 87960930222080 ; CHECK-NEXT: [[TMP53:%.*]] = inttoptr i64 [[TMP52]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), ptr align 8 [[TMP53]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), ptr align 8 [[TMP53]], i64 64, i1 false) ; CHECK-NEXT: [[TMP54:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP55:%.*]] = xor i64 [[TMP54]], 87960930222080 ; CHECK-NEXT: [[TMP56:%.*]] = inttoptr i64 [[TMP55]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), ptr align 8 [[TMP56]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), ptr align 8 [[TMP56]], i64 64, i1 false) ; CHECK-NEXT: [[TMP57:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP58:%.*]] = xor i64 [[TMP57]], 87960930222080 ; CHECK-NEXT: [[TMP59:%.*]] = inttoptr i64 [[TMP58]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), ptr align 8 [[TMP59]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), ptr align 8 [[TMP59]], i64 64, i1 false) ; CHECK-NEXT: [[TMP60:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP61:%.*]] = xor i64 [[TMP60]], 87960930222080 ; CHECK-NEXT: [[TMP62:%.*]] = inttoptr i64 [[TMP61]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), ptr align 8 [[TMP62]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), ptr align 8 [[TMP62]], i64 64, i1 false) ; CHECK-NEXT: [[TMP63:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP64:%.*]] = xor i64 [[TMP63]], 87960930222080 ; CHECK-NEXT: [[TMP65:%.*]] = inttoptr i64 [[TMP64]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), ptr align 8 [[TMP65]], i64 64, i1 false) -; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), i8 0, i32 48, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), ptr align 8 [[TMP65]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), i8 0, i32 48, i1 false) ; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]]) ; CHECK-NEXT: ret void diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll index 429829ef39ab9..8a9cf6081d7dd 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/x86-vpermi2.ll @@ -14,7 +14,7 @@ define <2 x i64> @shuffle_vpermv3_v2i64(<2 x i64> %x0, <2 x i64> %x1) #0 { ; CHECK-LABEL: define <2 x i64> @shuffle_vpermv3_v2i64( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[TMP1]], <2 x i64> , <2 x i64> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <2 x i64> @llvm.x86.avx512.vpermi2var.q.128(<2 x i64> [[X0]], <2 x i64> , <2 x i64> [[X1]]) @@ -42,9 +42,9 @@ define <2 x i64> @shuffle_vpermv3_v2i64_unary(<2 x i64> %x0) #0 { define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %m) #0 { ; CHECK-LABEL: define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> [[M]], splat (i64 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP4]], zeroinitializer @@ -74,9 +74,9 @@ define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits(<2 x i64> %x0, <2 x i64> %x define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits_negative(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %m) #0 { ; CHECK-LABEL: define <2 x i64> @shuffle_vpermv3_v2i64_demandedbits_negative( ; CHECK-SAME: <2 x i64> [[X0:%.*]], <2 x i64> [[X1:%.*]], <2 x i64> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> [[M]], splat (i64 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <2 x i64> [[TMP4]], zeroinitializer @@ -107,7 +107,7 @@ define <4 x i64> @shuffle_vpermv3_v4i64(<4 x i64> %x0, <4 x i64> %x1) #0 { ; CHECK-LABEL: define <4 x i64> @shuffle_vpermv3_v4i64( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[TMP1]], <4 x i64> , <4 x i64> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <4 x i64> @llvm.x86.avx512.vpermi2var.q.256(<4 x i64> [[X0]], <4 x i64> , <4 x i64> [[X1]]) @@ -135,9 +135,9 @@ define <4 x i64> @shuffle_vpermv3_v4i64_unary(<4 x i64> %x0) #0 { define <4 x i64> @shuffle_vpermv3_v4i64_demandedbits(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %m) #0 { ; CHECK-LABEL: define <4 x i64> @shuffle_vpermv3_v4i64_demandedbits( ; CHECK-SAME: <4 x i64> [[X0:%.*]], <4 x i64> [[X1:%.*]], <4 x i64> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i64> [[M]], splat (i64 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i64> [[TMP4]], zeroinitializer @@ -168,7 +168,7 @@ define <8 x i64> @shuffle_vpermv3_v8i64(<8 x i64> %x0, <8 x i64> %x1) #0 { ; CHECK-LABEL: define <8 x i64> @shuffle_vpermv3_v8i64( ; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[TMP1]], <8 x i64> , <8 x i64> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> [[X0]], <8 x i64> , <8 x i64> [[X1]]) @@ -196,9 +196,9 @@ define <8 x i64> @shuffle_vpermv3_v8i64_unary(<8 x i64> %x0) #0 { define <8 x i64> @shuffle_vpermv3_v8i64_demandedbits(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %m) #0 { ; CHECK-LABEL: define <8 x i64> @shuffle_vpermv3_v8i64_demandedbits( ; CHECK-SAME: <8 x i64> [[X0:%.*]], <8 x i64> [[X1:%.*]], <8 x i64> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <8 x i64> [[M]], splat (i64 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i64> [[TMP4]], zeroinitializer @@ -233,7 +233,7 @@ define <4 x i32> @shuffle_vpermv3_v4i32(<4 x i32> %x0, <4 x i32> %x1) #0 { ; CHECK-LABEL: define <4 x i32> @shuffle_vpermv3_v4i32( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[TMP1]], <4 x i32> , <4 x i32> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <4 x i32> @llvm.x86.avx512.vpermi2var.d.128(<4 x i32> [[X0]], <4 x i32> , <4 x i32> [[X1]]) @@ -261,9 +261,9 @@ define <4 x i32> @shuffle_vpermv3_v4i32_unary(<4 x i32> %x0) #0 { define <4 x i32> @shuffle_vpermv3_v4i32_demandedbits(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %m) #0 { ; CHECK-LABEL: define <4 x i32> @shuffle_vpermv3_v4i32_demandedbits( ; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], <4 x i32> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[M]], splat (i32 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i32> [[TMP4]], zeroinitializer @@ -294,7 +294,7 @@ define <8 x i32> @shuffle_vpermv3_v8i32(<8 x i32> %x0, <8 x i32> %x1) #0 { ; CHECK-LABEL: define <8 x i32> @shuffle_vpermv3_v8i32( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[TMP1]], <8 x i32> , <8 x i32> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <8 x i32> @llvm.x86.avx512.vpermi2var.d.256(<8 x i32> [[X0]], <8 x i32> , <8 x i32> [[X1]]) @@ -322,9 +322,9 @@ define <8 x i32> @shuffle_vpermv3_v8i32_unary(<8 x i32> %x0) #0 { define <8 x i32> @shuffle_vpermv3_v8i32_demandedbits(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %m) #0 { ; CHECK-LABEL: define <8 x i32> @shuffle_vpermv3_v8i32_demandedbits( ; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], <8 x i32> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <8 x i32> [[M]], splat (i32 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i32> [[TMP4]], zeroinitializer @@ -355,7 +355,7 @@ define <16 x i32> @shuffle_vpermv3_v16i32(<16 x i32> %x0, <16 x i32> %x1) #0 { ; CHECK-LABEL: define <16 x i32> @shuffle_vpermv3_v16i32( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[TMP1]], <16 x i32> , <16 x i32> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> [[X0]], <16 x i32> , <16 x i32> [[X1]]) @@ -383,9 +383,9 @@ define <16 x i32> @shuffle_vpermv3_v16i32_unary(<16 x i32> %x0) #0 { define <16 x i32> @shuffle_vpermv3_v16i32_demandedbits(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %m) #0 { ; CHECK-LABEL: define <16 x i32> @shuffle_vpermv3_v16i32_demandedbits( ; CHECK-SAME: <16 x i32> [[X0:%.*]], <16 x i32> [[X1:%.*]], <16 x i32> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <16 x i32> [[M]], splat (i32 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i32> [[TMP4]], zeroinitializer @@ -420,7 +420,7 @@ define <8 x i16> @shuffle_vpermv3_v8i16(<8 x i16> %x0, <8 x i16> %x1) #0 { ; CHECK-LABEL: define <8 x i16> @shuffle_vpermv3_v8i16( ; CHECK-SAME: <8 x i16> [[X0:%.*]], <8 x i16> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> [[TMP1]], <8 x i16> , <8 x i16> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <8 x i16> @llvm.x86.avx512.vpermi2var.hi.128(<8 x i16> [[X0]], <8 x i16> , <8 x i16> [[X1]]) @@ -448,9 +448,9 @@ define <8 x i16> @shuffle_vpermv3_v8i16_unary(<8 x i16> %x0) #0 { define <8 x i16> @shuffle_vpermv3_v8i16_demandedbits(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %m) #0 { ; CHECK-LABEL: define <8 x i16> @shuffle_vpermv3_v8i16_demandedbits( ; CHECK-SAME: <8 x i16> [[X0:%.*]], <8 x i16> [[X1:%.*]], <8 x i16> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <8 x i16> [[M]], splat (i16 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i16> [[TMP4]], zeroinitializer @@ -481,7 +481,7 @@ define <16 x i16> @shuffle_vpermv3_v16i16(<16 x i16> %x0, <16 x i16> %x1) #0 { ; CHECK-LABEL: define <16 x i16> @shuffle_vpermv3_v16i16( ; CHECK-SAME: <16 x i16> [[X0:%.*]], <16 x i16> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> [[TMP1]], <16 x i16> , <16 x i16> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <16 x i16> @llvm.x86.avx512.vpermi2var.hi.256(<16 x i16> [[X0]], <16 x i16> , <16 x i16> [[X1]]) @@ -509,9 +509,9 @@ define <16 x i16> @shuffle_vpermv3_v16i16_unary(<16 x i16> %x0) #0 { define <16 x i16> @shuffle_vpermv3_v16i16_demandedbits(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %m) #0 { ; CHECK-LABEL: define <16 x i16> @shuffle_vpermv3_v16i16_demandedbits( ; CHECK-SAME: <16 x i16> [[X0:%.*]], <16 x i16> [[X1:%.*]], <16 x i16> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <16 x i16> [[M]], splat (i16 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i16> [[TMP4]], zeroinitializer @@ -542,7 +542,7 @@ define <32 x i16> @shuffle_vpermv3_v32i16(<32 x i16> %x0, <32 x i16> %x1) #0 { ; CHECK-LABEL: define <32 x i16> @shuffle_vpermv3_v32i16( ; CHECK-SAME: <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[TMP1]], <32 x i16> , <32 x i16> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <32 x i16> @llvm.x86.avx512.vpermi2var.hi.512(<32 x i16> [[X0]], <32 x i16> , <32 x i16> [[X1]]) @@ -570,9 +570,9 @@ define <32 x i16> @shuffle_vpermv3_v32i16_unary(<32 x i16> %x0) #0 { define <32 x i16> @shuffle_vpermv3_v32i16_demandedbits(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %m) #0 { ; CHECK-LABEL: define <32 x i16> @shuffle_vpermv3_v32i16_demandedbits( ; CHECK-SAME: <32 x i16> [[X0:%.*]], <32 x i16> [[X1:%.*]], <32 x i16> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <32 x i16> [[M]], splat (i16 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i16> [[TMP4]], zeroinitializer @@ -607,7 +607,7 @@ define <16 x i8> @shuffle_vpermv3_v16i8(<16 x i8> %x0, <16 x i8> %x1) #0 { ; CHECK-LABEL: define <16 x i8> @shuffle_vpermv3_v16i8( ; CHECK-SAME: <16 x i8> [[X0:%.*]], <16 x i8> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> [[TMP1]], <16 x i8> , <16 x i8> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <16 x i8> @llvm.x86.avx512.vpermi2var.qi.128(<16 x i8> [[X0]], <16 x i8> , <16 x i8> [[X1]]) @@ -635,9 +635,9 @@ define <16 x i8> @shuffle_vpermv3_v16i8_unary(<16 x i8> %x0) #0 { define <16 x i8> @shuffle_vpermv3_v16i8_demandedbits(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %m) #0 { ; CHECK-LABEL: define <16 x i8> @shuffle_vpermv3_v16i8_demandedbits( ; CHECK-SAME: <16 x i8> [[X0:%.*]], <16 x i8> [[X1:%.*]], <16 x i8> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <16 x i8> [[M]], splat (i8 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <16 x i8> [[TMP4]], zeroinitializer @@ -668,7 +668,7 @@ define <32 x i8> @shuffle_vpermv3_v32i8(<32 x i8> %x0, <32 x i8> %x1) #0 { ; CHECK-LABEL: define <32 x i8> @shuffle_vpermv3_v32i8( ; CHECK-SAME: <32 x i8> [[X0:%.*]], <32 x i8> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> [[TMP1]], <32 x i8> , <32 x i8> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <32 x i8> @llvm.x86.avx512.vpermi2var.qi.256(<32 x i8> [[X0]], <32 x i8> , <32 x i8> [[X1]]) @@ -696,9 +696,9 @@ define <32 x i8> @shuffle_vpermv3_v32i8_unary(<32 x i8> %x0) #0 { define <32 x i8> @shuffle_vpermv3_v32i8_demandedbits(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %m) #0 { ; CHECK-LABEL: define <32 x i8> @shuffle_vpermv3_v32i8_demandedbits( ; CHECK-SAME: <32 x i8> [[X0:%.*]], <32 x i8> [[X1:%.*]], <32 x i8> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <32 x i8> [[M]], splat (i8 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <32 x i8> [[TMP4]], zeroinitializer @@ -729,7 +729,7 @@ define <64 x i8> @shuffle_vpermv3_v64i8(<64 x i8> %x0, <64 x i8> %x1) #0 { ; CHECK-LABEL: define <64 x i8> @shuffle_vpermv3_v64i8( ; CHECK-SAME: <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP1:%.*]] = call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> [[TMP1]], <64 x i8> , <64 x i8> [[TMP2]]) ; CHECK-NEXT: [[R:%.*]] = call <64 x i8> @llvm.x86.avx512.vpermi2var.qi.512(<64 x i8> [[X0]], <64 x i8> , <64 x i8> [[X1]]) @@ -757,9 +757,9 @@ define <64 x i8> @shuffle_vpermv3_v64i8_unary(<64 x i8> %x0) #0 { define <64 x i8> @shuffle_vpermv3_v64i8_demandedbits(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %m) #0 { ; CHECK-LABEL: define <64 x i8> @shuffle_vpermv3_v64i8_demandedbits( ; CHECK-SAME: <64 x i8> [[X0:%.*]], <64 x i8> [[X1:%.*]], <64 x i8> [[M:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <64 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = xor <64 x i8> [[M]], splat (i8 -1) ; CHECK-NEXT: [[TMP5:%.*]] = and <64 x i8> [[TMP4]], zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll index 236b019147036..399c0fec78ab9 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/array_types.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/array_types.ll @@ -1,89 +1,194 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s -; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck -check-prefix=CHECK %s --allow-empty +; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck -check-prefix=CHECK-ORIGIN %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" define [2 x i32] @InsertValue(i32 %x, i32 %y) sanitize_memory { +; CHECK-LABEL: define [2 x i32] @InsertValue( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[TMP0]], 0 +; CHECK-NEXT: [[A:%.*]] = insertvalue [2 x i32] undef, i32 [[X]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertvalue [2 x i32] [[TMP2]], i32 [[TMP1]], 1 +; CHECK-NEXT: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Y]], 1 +; CHECK-NEXT: store [2 x i32] [[TMP3]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret [2 x i32] [[B]] +; +; CHECK-ORIGIN-LABEL: define [2 x i32] @InsertValue( +; CHECK-ORIGIN-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-ORIGIN-NEXT: [[ENTRY:.*:]] +; CHECK-ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 +; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CHECK-ORIGIN-NEXT: call void @llvm.donothing() +; CHECK-ORIGIN-NEXT: [[TMP4:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[TMP0]], 0 +; CHECK-ORIGIN-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP1]], i32 0 +; CHECK-ORIGIN-NEXT: [[A:%.*]] = insertvalue [2 x i32] undef, i32 [[X]], 0 +; CHECK-ORIGIN-NEXT: [[TMP7:%.*]] = insertvalue [2 x i32] [[TMP4]], i32 [[TMP2]], 1 +; CHECK-ORIGIN-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP2]], 0 +; CHECK-ORIGIN-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 [[TMP3]], i32 [[TMP6]] +; CHECK-ORIGIN-NEXT: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Y]], 1 +; CHECK-ORIGIN-NEXT: store [2 x i32] [[TMP7]], ptr @__msan_retval_tls, align 8 +; CHECK-ORIGIN-NEXT: store i32 [[TMP9]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: ret [2 x i32] [[B]] +; entry: %a = insertvalue [2 x i32] undef, i32 %x, 0 %b = insertvalue [2 x i32] %a, i32 %y, 1 ret [2 x i32] %b } -; CHECK-LABEL: @InsertValue( -; CHECK-DAG: [[Sx:%.*]] = load i32, ptr @__msan_param_tls -; CHECK-DAG: [[Sy:%.*]] = load i32, ptr {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0 -; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1 -; CHECK: store [2 x i32] [[B]], ptr {{.*}}@__msan_retval_tls -; CHECK: ret [2 x i32] - - define [2 x double] @InsertValueDouble(double %x, double %y) sanitize_memory { +; CHECK-LABEL: define [2 x double] @InsertValueDouble( +; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[TMP0]], 0 +; CHECK-NEXT: [[A:%.*]] = insertvalue [2 x double] undef, double [[X]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertvalue [2 x i64] [[TMP2]], i64 [[TMP1]], 1 +; CHECK-NEXT: [[B:%.*]] = insertvalue [2 x double] [[A]], double [[Y]], 1 +; CHECK-NEXT: store [2 x i64] [[TMP3]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret [2 x double] [[B]] +; +; CHECK-ORIGIN-LABEL: define [2 x double] @InsertValueDouble( +; CHECK-ORIGIN-SAME: double [[X:%.*]], double [[Y:%.*]]) #[[ATTR0]] { +; CHECK-ORIGIN-NEXT: [[ENTRY:.*:]] +; CHECK-ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CHECK-ORIGIN-NEXT: call void @llvm.donothing() +; CHECK-ORIGIN-NEXT: [[TMP4:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[TMP0]], 0 +; CHECK-ORIGIN-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0 +; CHECK-ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP1]], i32 0 +; CHECK-ORIGIN-NEXT: [[A:%.*]] = insertvalue [2 x double] undef, double [[X]], 0 +; CHECK-ORIGIN-NEXT: [[TMP7:%.*]] = insertvalue [2 x i64] [[TMP4]], i64 [[TMP2]], 1 +; CHECK-ORIGIN-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-ORIGIN-NEXT: [[TMP9:%.*]] = select i1 [[TMP8]], i32 [[TMP3]], i32 [[TMP6]] +; CHECK-ORIGIN-NEXT: [[B:%.*]] = insertvalue [2 x double] [[A]], double [[Y]], 1 +; CHECK-ORIGIN-NEXT: store [2 x i64] [[TMP7]], ptr @__msan_retval_tls, align 8 +; CHECK-ORIGIN-NEXT: store i32 [[TMP9]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: ret [2 x double] [[B]] +; entry: %a = insertvalue [2 x double] undef, double %x, 0 %b = insertvalue [2 x double] %a, double %y, 1 ret [2 x double] %b } -; CHECK-LABEL: @InsertValueDouble( -; CHECK-DAG: [[Sx:%.*]] = load i64, ptr @__msan_param_tls -; CHECK-DAG: [[Sy:%.*]] = load i64, ptr {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0 -; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1 -; CHECK: store [2 x i64] [[B]], ptr {{.*}}@__msan_retval_tls -; CHECK: ret [2 x double] - - define i32 @ExtractValue([2 x i32] %a) sanitize_memory { +; CHECK-LABEL: define i32 @ExtractValue( +; CHECK-SAME: [2 x i32] [[A:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load [2 x i32], ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue [2 x i32] [[TMP0]], 1 +; CHECK-NEXT: [[X:%.*]] = extractvalue [2 x i32] [[A]], 1 +; CHECK-NEXT: store i32 [[TMP1]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[X]] +; +; CHECK-ORIGIN-LABEL: define i32 @ExtractValue( +; CHECK-ORIGIN-SAME: [2 x i32] [[A:%.*]]) #[[ATTR0]] { +; CHECK-ORIGIN-NEXT: [[ENTRY:.*:]] +; CHECK-ORIGIN-NEXT: [[TMP0:%.*]] = load [2 x i32], ptr @__msan_param_tls, align 8 +; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: call void @llvm.donothing() +; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = extractvalue [2 x i32] [[TMP0]], 1 +; CHECK-ORIGIN-NEXT: [[X:%.*]] = extractvalue [2 x i32] [[A]], 1 +; CHECK-ORIGIN-NEXT: store i32 [[TMP2]], ptr @__msan_retval_tls, align 8 +; CHECK-ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: ret i32 [[X]] +; entry: %x = extractvalue [2 x i32] %a, 1 ret i32 %x } -; CHECK-LABEL: @ExtractValue( -; CHECK: [[Sa:%.*]] = load [2 x i32], ptr @__msan_param_tls -; CHECK: [[Sx:%.*]] = extractvalue [2 x i32] [[Sa]], 1 -; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls -; CHECK: ret i32 - - ; Regression test for PR20493. %MyStruct = type { i32, i32, [3 x i32] } define i32 @ArrayInStruct(%MyStruct %s) sanitize_memory { +; CHECK-LABEL: define i32 @ArrayInStruct( +; CHECK-SAME: [[MYSTRUCT:%.*]] [[S:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load { i32, i32, [3 x i32] }, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i32, [3 x i32] } [[TMP1]], 2, 1 +; CHECK-NEXT: [[X:%.*]] = extractvalue [[MYSTRUCT]] [[S]], 2, 1 +; CHECK-NEXT: store i32 [[TMP2]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[X]] +; +; CHECK-ORIGIN-LABEL: define i32 @ArrayInStruct( +; CHECK-ORIGIN-SAME: [[MYSTRUCT:%.*]] [[S:%.*]]) #[[ATTR0]] { +; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load { i32, i32, [3 x i32] }, ptr @__msan_param_tls, align 8 +; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: call void @llvm.donothing() +; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = extractvalue { i32, i32, [3 x i32] } [[TMP1]], 2, 1 +; CHECK-ORIGIN-NEXT: [[X:%.*]] = extractvalue [[MYSTRUCT]] [[S]], 2, 1 +; CHECK-ORIGIN-NEXT: store i32 [[TMP3]], ptr @__msan_retval_tls, align 8 +; CHECK-ORIGIN-NEXT: store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: ret i32 [[X]] +; %x = extractvalue %MyStruct %s, 2, 1 ret i32 %x } -; CHECK-LABEL: @ArrayInStruct( -; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, ptr @__msan_param_tls -; CHECK: [[Sx:%.*]] = extractvalue { i32, i32, [3 x i32] } [[Ss]], 2, 1 -; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls -; CHECK: ret i32 - - define i32 @ArrayOfStructs([3 x { i32, i32 }] %a) sanitize_memory { +; CHECK-LABEL: define i32 @ArrayOfStructs( +; CHECK-SAME: [3 x { i32, i32 }] [[A:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load [3 x { i32, i32 }], ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue [3 x { i32, i32 }] [[TMP1]], 2, 1 +; CHECK-NEXT: [[X:%.*]] = extractvalue [3 x { i32, i32 }] [[A]], 2, 1 +; CHECK-NEXT: store i32 [[TMP2]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[X]] +; +; CHECK-ORIGIN-LABEL: define i32 @ArrayOfStructs( +; CHECK-ORIGIN-SAME: [3 x { i32, i32 }] [[A:%.*]]) #[[ATTR0]] { +; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load [3 x { i32, i32 }], ptr @__msan_param_tls, align 8 +; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: call void @llvm.donothing() +; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = extractvalue [3 x { i32, i32 }] [[TMP1]], 2, 1 +; CHECK-ORIGIN-NEXT: [[X:%.*]] = extractvalue [3 x { i32, i32 }] [[A]], 2, 1 +; CHECK-ORIGIN-NEXT: store i32 [[TMP3]], ptr @__msan_retval_tls, align 8 +; CHECK-ORIGIN-NEXT: store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: ret i32 [[X]] +; %x = extractvalue [3 x { i32, i32 }] %a, 2, 1 ret i32 %x } -; CHECK-LABEL: @ArrayOfStructs( -; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], ptr @__msan_param_tls -; CHECK: [[Sx:%.*]] = extractvalue [3 x { i32, i32 }] [[Ss]], 2, 1 -; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls -; CHECK: ret i32 - - define <8 x i16> @ArrayOfVectors([3 x <8 x i16>] %a) sanitize_memory { +; CHECK-LABEL: define <8 x i16> @ArrayOfVectors( +; CHECK-SAME: [3 x <8 x i16>] [[A:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load [3 x <8 x i16>], ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue [3 x <8 x i16>] [[TMP1]], 1 +; CHECK-NEXT: [[X:%.*]] = extractvalue [3 x <8 x i16>] [[A]], 1 +; CHECK-NEXT: store <8 x i16> [[TMP2]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x i16> [[X]] +; +; CHECK-ORIGIN-LABEL: define <8 x i16> @ArrayOfVectors( +; CHECK-ORIGIN-SAME: [3 x <8 x i16>] [[A:%.*]]) #[[ATTR0]] { +; CHECK-ORIGIN-NEXT: [[TMP1:%.*]] = load [3 x <8 x i16>], ptr @__msan_param_tls, align 8 +; CHECK-ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: call void @llvm.donothing() +; CHECK-ORIGIN-NEXT: [[TMP3:%.*]] = extractvalue [3 x <8 x i16>] [[TMP1]], 1 +; CHECK-ORIGIN-NEXT: [[X:%.*]] = extractvalue [3 x <8 x i16>] [[A]], 1 +; CHECK-ORIGIN-NEXT: store <8 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8 +; CHECK-ORIGIN-NEXT: store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-ORIGIN-NEXT: ret <8 x i16> [[X]] +; %x = extractvalue [3 x <8 x i16>] %a, 1 ret <8 x i16> %x } -; CHECK-LABEL: @ArrayOfVectors( -; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], ptr @__msan_param_tls -; CHECK: [[Sx:%.*]] = extractvalue [3 x <8 x i16>] [[Ss]], 1 -; CHECK: store <8 x i16> [[Sx]], ptr @__msan_retval_tls -; CHECK: ret <8 x i16> diff --git a/llvm/test/Instrumentation/MemorySanitizer/bmi.ll b/llvm/test/Instrumentation/MemorySanitizer/bmi.ll index 2f60bd8b357b8..46bec2956c73c 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/bmi.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/bmi.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s ; REQUIRES: x86-registered-target @@ -15,131 +16,171 @@ declare i64 @llvm.x86.bmi.pdep.64(i64, i64) declare i64 @llvm.x86.bmi.pext.64(i64, i64) define i32 @Test_bzhi_32(i32 %a, i32 %b) sanitize_memory { +; CHECK-LABEL: define i32 @Test_bzhi_32( +; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.x86.bmi.bzhi.32(i32 [[TMP1]], i32 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i32 @llvm.x86.bmi.bzhi.32(i32 [[A]], i32 [[B]]) +; CHECK-NEXT: store i32 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[C]] +; entry: %c = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a, i32 %b) ret i32 %c } -; CHECK-LABEL: @Test_bzhi_32( -; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32 -; CHECK-DAG: %[[X:.*]] = call i32 @llvm.x86.bmi.bzhi.32(i32 %[[SA]], i32 %b) -; CHECK-DAG: %[[S:.*]] = or i32 %[[SB1]], %[[X]] -; CHECK-DAG: store i32 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i32 define i64 @Test_bzhi_64(i64 %a, i64 %b) sanitize_memory { +; CHECK-LABEL: define i64 @Test_bzhi_64( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.x86.bmi.bzhi.64(i64 [[TMP1]], i64 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i64 @llvm.x86.bmi.bzhi.64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: store i64 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i64 [[C]] +; entry: %c = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a, i64 %b) ret i64 %c } -; CHECK-LABEL: @Test_bzhi_64( -; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64 -; CHECK-DAG: %[[X:.*]] = call i64 @llvm.x86.bmi.bzhi.64(i64 %[[SA]], i64 %b) -; CHECK-DAG: %[[S:.*]] = or i64 %[[SB1]], %[[X]] -; CHECK-DAG: store i64 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i64 define i32 @Test_bextr_32(i32 %a, i32 %b) sanitize_memory { +; CHECK-LABEL: define i32 @Test_bextr_32( +; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.x86.bmi.bextr.32(i32 [[TMP1]], i32 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i32 @llvm.x86.bmi.bextr.32(i32 [[A]], i32 [[B]]) +; CHECK-NEXT: store i32 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[C]] +; entry: %c = tail call i32 @llvm.x86.bmi.bextr.32(i32 %a, i32 %b) ret i32 %c } -; CHECK-LABEL: @Test_bextr_32( -; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32 -; CHECK-DAG: %[[X:.*]] = call i32 @llvm.x86.bmi.bextr.32(i32 %[[SA]], i32 %b) -; CHECK-DAG: %[[S:.*]] = or i32 %[[SB1]], %[[X]] -; CHECK-DAG: store i32 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i32 define i64 @Test_bextr_64(i64 %a, i64 %b) sanitize_memory { +; CHECK-LABEL: define i64 @Test_bextr_64( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.x86.bmi.bextr.64(i64 [[TMP1]], i64 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i64 @llvm.x86.bmi.bextr.64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: store i64 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i64 [[C]] +; entry: %c = tail call i64 @llvm.x86.bmi.bextr.64(i64 %a, i64 %b) ret i64 %c } -; CHECK-LABEL: @Test_bextr_64( -; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64 -; CHECK-DAG: %[[X:.*]] = call i64 @llvm.x86.bmi.bextr.64(i64 %[[SA]], i64 %b) -; CHECK-DAG: %[[S:.*]] = or i64 %[[SB1]], %[[X]] -; CHECK-DAG: store i64 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i64 define i32 @Test_pdep_32(i32 %a, i32 %b) sanitize_memory { +; CHECK-LABEL: define i32 @Test_pdep_32( +; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.x86.bmi.pdep.32(i32 [[TMP1]], i32 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i32 @llvm.x86.bmi.pdep.32(i32 [[A]], i32 [[B]]) +; CHECK-NEXT: store i32 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[C]] +; entry: %c = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a, i32 %b) ret i32 %c } -; CHECK-LABEL: @Test_pdep_32( -; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32 -; CHECK-DAG: %[[X:.*]] = call i32 @llvm.x86.bmi.pdep.32(i32 %[[SA]], i32 %b) -; CHECK-DAG: %[[S:.*]] = or i32 %[[SB1]], %[[X]] -; CHECK-DAG: store i32 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i32 define i64 @Test_pdep_64(i64 %a, i64 %b) sanitize_memory { +; CHECK-LABEL: define i64 @Test_pdep_64( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.x86.bmi.pdep.64(i64 [[TMP1]], i64 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i64 @llvm.x86.bmi.pdep.64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: store i64 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i64 [[C]] +; entry: %c = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a, i64 %b) ret i64 %c } -; CHECK-LABEL: @Test_pdep_64( -; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64 -; CHECK-DAG: %[[X:.*]] = call i64 @llvm.x86.bmi.pdep.64(i64 %[[SA]], i64 %b) -; CHECK-DAG: %[[S:.*]] = or i64 %[[SB1]], %[[X]] -; CHECK-DAG: store i64 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i64 define i32 @Test_pext_32(i32 %a, i32 %b) sanitize_memory { +; CHECK-LABEL: define i32 @Test_pext_32( +; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.x86.bmi.pext.32(i32 [[TMP1]], i32 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i32 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i32 @llvm.x86.bmi.pext.32(i32 [[A]], i32 [[B]]) +; CHECK-NEXT: store i32 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i32 [[C]] +; entry: %c = tail call i32 @llvm.x86.bmi.pext.32(i32 %a, i32 %b) ret i32 %c } -; CHECK-LABEL: @Test_pext_32( -; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32 -; CHECK-DAG: %[[X:.*]] = call i32 @llvm.x86.bmi.pext.32(i32 %[[SA]], i32 %b) -; CHECK-DAG: %[[S:.*]] = or i32 %[[SB1]], %[[X]] -; CHECK-DAG: store i32 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i32 define i64 @Test_pext_64(i64 %a, i64 %b) sanitize_memory { +; CHECK-LABEL: define i64 @Test_pext_64( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = sext i1 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.x86.bmi.pext.64(i64 [[TMP1]], i64 [[B]]) +; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = tail call i64 @llvm.x86.bmi.pext.64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: store i64 [[TMP5]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret i64 [[C]] +; entry: %c = tail call i64 @llvm.x86.bmi.pext.64(i64 %a, i64 %b) ret i64 %c } -; CHECK-LABEL: @Test_pext_64( -; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls -; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8) -; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0 -; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64 -; CHECK-DAG: %[[X:.*]] = call i64 @llvm.x86.bmi.pext.64(i64 %[[SA]], i64 %b) -; CHECK-DAG: %[[S:.*]] = or i64 %[[SB1]], %[[X]] -; CHECK-DAG: store i64 %[[S]], {{.*}}@__msan_retval_tls -; CHECK: ret i64 diff --git a/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll b/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll index e06576e2fead6..0acdf71361000 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll @@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" %struct.S = type { i64, i64, i64, [8 x i8] } -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{.*}} add {{.*}} ptrtoint {{.*}} @__msan_param_tls {{.*}} i64 8) {{.*}}, ptr align 8 {{.*}}, i64 32, i1 false) +; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), ptr align 8 {{.*}}, i64 32, i1 false) define void @Caller() sanitize_memory { entry: diff --git a/llvm/test/Instrumentation/MemorySanitizer/byval.ll b/llvm/test/Instrumentation/MemorySanitizer/byval.ll index 258cec866d6a8..9f6a7cb189547 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/byval.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/byval.ll @@ -1,4 +1,5 @@ -; RUN: opt < %s -S -passes="msan" 2>&1 | FileCheck %s --implicit-check-not "call void @llvm.mem" --implicit-check-not " load" --implicit-check-not " store" +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -S -passes="msan" 2>&1 | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -7,16 +8,28 @@ declare void @FnByVal(ptr byval(i128) %p); declare void @Fn(ptr %p); define i128 @ByValArgument(i32, ptr byval(i128) %p) sanitize_memory { -; CHECK-LABEL: @ByValArgument( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[#]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false) -; CHECK: [[X:%.*]] = load i128, ptr %p, align 8 -; CHECK: [[_MSLD:%.*]] = load i128, ptr %[[#]], align 8 -; CHECK: %[[#]] = load i32, ptr %[[#]], align 8 -; CHECK: store i128 [[_MSLD]], ptr @__msan_retval_tls, align 8 -; CHECK: store i32 %[[#]], ptr @__msan_retval_origin_tls, align 4 -; CHECK: ret i128 [[X]] +; CHECK-LABEL: define i128 @ByValArgument( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i128) [[P:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 16, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[X:%.*]] = load i128, ptr [[P]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 +; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr +; CHECK-NEXT: [[_MSLD:%.*]] = load i128, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 8 +; CHECK-NEXT: store i128 [[_MSLD]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i128 [[X]] ; entry: %x = load i128, ptr %p @@ -24,13 +37,20 @@ entry: } define i128 @ByValArgumentNoSanitize(i32, ptr byval(i128) %p) { -; CHECK-LABEL: @ByValArgumentNoSanitize( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 %[[#]], i8 0, i64 16, i1 false) -; CHECK: [[X:%.*]] = load i128, ptr %p, align 8 -; CHECK: store i128 0, ptr @__msan_retval_tls, align 8 -; CHECK: store i32 0, ptr @__msan_retval_origin_tls, align 4 -; CHECK: ret i128 [[X]] +; CHECK-LABEL: define i128 @ByValArgumentNoSanitize( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i128) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 16, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[X:%.*]] = load i128, ptr [[P]], align 8 +; CHECK-NEXT: store i128 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i128 [[X]] ; entry: %x = load i128, ptr %p @@ -38,13 +58,20 @@ entry: } define void @ByValForward(i32, ptr byval(i128) %p) sanitize_memory { -; CHECK-LABEL: @ByValForward( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[#]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false) -; CHECK: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK: call void @Fn(ptr %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForward( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i128) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 16, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @Fn(ptr [[P]]) +; CHECK-NEXT: ret void ; entry: call void @Fn(ptr %p) @@ -52,12 +79,19 @@ entry: } define void @ByValForwardNoSanitize(i32, ptr byval(i128) %p) { -; CHECK-LABEL: @ByValForwardNoSanitize( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 %[[#]], i8 0, i64 16, i1 false) -; CHECK: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK: call void @Fn(ptr %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForwardNoSanitize( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i128) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 16, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @Fn(ptr [[P]]) +; CHECK-NEXT: ret void ; entry: call void @Fn(ptr %p) @@ -65,14 +99,27 @@ entry: } define void @ByValForwardByVal(i32, ptr byval(i128) %p) sanitize_memory { -; CHECK-LABEL: @ByValForwardByVal( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[#]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr @__msan_param_tls, ptr %[[#]], i64 16, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 @__msan_param_origin_tls, ptr align 4 %[[#]], i64 16, i1 false) -; CHECK: call void @FnByVal(ptr byval(i128) %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForwardByVal( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i128) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP5]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 16, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 +; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -4 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr @__msan_param_tls, ptr [[TMP8]], i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 @__msan_param_origin_tls, ptr align 4 [[TMP11]], i64 16, i1 false) +; CHECK-NEXT: call void @FnByVal(ptr byval(i128) [[P]]) +; CHECK-NEXT: ret void ; entry: call void @FnByVal(ptr byval(i128) %p) @@ -80,12 +127,25 @@ entry: } define void @ByValForwardByValNoSanitize(i32, ptr byval(i128) %p) { -; CHECK-LABEL: @ByValForwardByValNoSanitize( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memset.p0.i64(ptr align 8 %[[#]], i8 0, i64 16, i1 false) -; CHECK: call void @llvm.memset.p0.i64(ptr @__msan_param_tls, i8 0, i64 16, i1 false) -; CHECK: call void @FnByVal(ptr byval(i128) %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForwardByValNoSanitize( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i128) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 16, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080 +; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416 +; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -4 +; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__msan_param_tls, i8 0, i64 16, i1 false) +; CHECK-NEXT: call void @FnByVal(ptr byval(i128) [[P]]) +; CHECK-NEXT: ret void ; entry: call void @FnByVal(ptr byval(i128) %p) @@ -96,16 +156,30 @@ declare void @FnByVal8(ptr byval(i8) %p); declare void @Fn8(ptr %p); define i8 @ByValArgument8(i32, ptr byval(i8) %p) sanitize_memory { -; CHECK-LABEL: @ByValArgument8( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[#]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false) -; CHECK: [[X:%.*]] = load i8, ptr %p, align 1 -; CHECK: [[_MSLD:%.*]] = load i8, ptr %[[#]], align 1 -; CHECK: %[[#]] = load i32, ptr %[[#]], align 4 -; CHECK: store i8 [[_MSLD]], ptr @__msan_retval_tls, align 8 -; CHECK: store i32 %[[#]], ptr @__msan_retval_origin_tls, align 4 -; CHECK: ret i8 [[X]] +; CHECK-LABEL: define i8 @ByValArgument8( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i8) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 1, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 4, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[P]], align 1 +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: [[_MSLD:%.*]] = load i8, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 [[TMP13]], ptr @__msan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i8 [[X]] ; entry: %x = load i8, ptr %p @@ -113,13 +187,21 @@ entry: } define i8 @ByValArgumentNoSanitize8(i32, ptr byval(i8) %p) { -; CHECK-LABEL: @ByValArgumentNoSanitize8( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memset.p0.i64(ptr align 1 %[[#]], i8 0, i64 1, i1 false) -; CHECK: [[X:%.*]] = load i8, ptr %p, align 1 -; CHECK: store i8 0, ptr @__msan_retval_tls, align 8 -; CHECK: store i32 0, ptr @__msan_retval_origin_tls, align 4 -; CHECK: ret i8 [[X]] +; CHECK-LABEL: define i8 @ByValArgumentNoSanitize8( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i8) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP3]], i8 0, i64 1, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[P]], align 1 +; CHECK-NEXT: store i8 0, ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4 +; CHECK-NEXT: ret i8 [[X]] ; entry: %x = load i8, ptr %p @@ -127,13 +209,21 @@ entry: } define void @ByValForward8(i32, ptr byval(i8) %p) sanitize_memory { -; CHECK-LABEL: @ByValForward8( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[#]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false) -; CHECK: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK: call void @Fn8(ptr %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForward8( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i8) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 1, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 4, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @Fn8(ptr [[P]]) +; CHECK-NEXT: ret void ; entry: call void @Fn8(ptr %p) @@ -141,12 +231,20 @@ entry: } define void @ByValForwardNoSanitize8(i32, ptr byval(i8) %p) { -; CHECK-LABEL: @ByValForwardNoSanitize8( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memset.p0.i64(ptr align 1 %[[#]], i8 0, i64 1, i1 false) -; CHECK: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK: call void @Fn8(ptr %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForwardNoSanitize8( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i8) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP3]], i8 0, i64 1, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @Fn8(ptr [[P]]) +; CHECK-NEXT: ret void ; entry: call void @Fn8(ptr %p) @@ -154,14 +252,28 @@ entry: } define void @ByValForwardByVal8(i32, ptr byval(i8) %p) sanitize_memory { -; CHECK-LABEL: @ByValForwardByVal8( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[#]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr @__msan_param_tls, ptr %[[#]], i64 1, i1 false) -; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 4 @__msan_param_origin_tls, ptr align 4 %[[#]], i64 4, i1 false) -; CHECK: call void @FnByVal8(ptr byval(i8) %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForwardByVal8( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i8) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 1, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP6]], ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), i64 4, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr @__msan_param_tls, ptr [[TMP9]], i64 1, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 @__msan_param_origin_tls, ptr align 4 [[TMP12]], i64 4, i1 false) +; CHECK-NEXT: call void @FnByVal8(ptr byval(i8) [[P]]) +; CHECK-NEXT: ret void ; entry: call void @FnByVal8(ptr byval(i8) %p) @@ -169,12 +281,26 @@ entry: } define void @ByValForwardByValNoSanitize8(i32, ptr byval(i8) %p) { -; CHECK-LABEL: @ByValForwardByValNoSanitize8( -; CHECK-NEXT: entry: -; CHECK: call void @llvm.memset.p0.i64(ptr align 1 %[[#]], i8 0, i64 1, i1 false) -; CHECK: call void @llvm.memset.p0.i64(ptr @__msan_param_tls, i8 0, i64 1, i1 false) -; CHECK: call void @FnByVal8(ptr byval(i8) %p) -; CHECK: ret void +; CHECK-LABEL: define void @ByValForwardByValNoSanitize8( +; CHECK-SAME: i32 [[TMP0:%.*]], ptr byval(i8) [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416 +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP3]], i8 0, i64 1, i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080 +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416 +; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -4 +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__msan_param_tls, i8 0, i64 1, i1 false) +; CHECK-NEXT: call void @FnByVal8(ptr byval(i8) [[P]]) +; CHECK-NEXT: ret void ; entry: call void @FnByVal8(ptr byval(i8) %p) diff --git a/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll b/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll index 0696ac92e59b8..582d75330f6d6 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/expand-experimental-reductions.ll @@ -134,7 +134,7 @@ define float @fadd_f32_accum(float %accum, <4 x float> %vec) #0 { ; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]] @@ -152,7 +152,7 @@ define float @fadd_f32_strict(float %param, <4 x float> %vec) #0 { ; CHECK-SAME: float [[PARAM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP3]], [[TMP1]] @@ -170,7 +170,7 @@ define float @fadd_f32_strict_accum(float %accum, <4 x float> %vec) #0 { ; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]] @@ -205,7 +205,7 @@ define float @fmul_f32_accum(float %accum, <4 x float> %vec) #0 { ; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]] @@ -223,7 +223,7 @@ define float @fmul_f32_strict(float %param, <4 x float> %vec) #0 { ; CHECK-SAME: float [[PARAM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP0]]) ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP3]], [[TMP1]] @@ -241,7 +241,7 @@ define float @fmul_f32_strict_accum(float %accum, <4 x float> %vec) #0 { ; CHECK-SAME: float [[ACCUM:%.*]], <4 x float> [[VEC:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll index 5ea407b3fda7a..a96046b9ed62d 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/funnel_shift.ll @@ -7,17 +7,17 @@ target triple = "x86_64-unknown-linux-gnu" define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %b64, <2 x i64> %b128, <4 x i64> %b256, <8 x i64> %b512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory { ; CHECK-LABEL: @var_funnel_i64( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0 ; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i64 @@ -51,17 +51,17 @@ define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64 define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %b32, <4 x i32> %b128, <8 x i32> %b256, <16 x i32> %b512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory { ; CHECK-LABEL: @var_funnel_i32( ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP3]], 0 ; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i32 @@ -95,17 +95,17 @@ define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3 define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %b16, <8 x i16> %b128, <16 x i16> %b256, <32 x i16> %b512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory { ; CHECK-LABEL: @var_funnel_i16( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP3]], 0 ; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i16 @@ -139,17 +139,17 @@ define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %b8, <16 x i8> %b128, <32 x i8> %b256, <64 x i8> %b512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory { ; CHECK-LABEL: @var_funnel_i8( ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP3]], 0 ; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i8 @@ -183,13 +183,13 @@ define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> % define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory { ; CHECK-LABEL: @var_rotate_i64( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP2]], 0 ; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i64 @@ -223,13 +223,13 @@ define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64 define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory { ; CHECK-LABEL: @var_rotate_i32( ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i32 @@ -263,13 +263,13 @@ define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3 define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory { ; CHECK-LABEL: @var_rotate_i16( ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i16 [[TMP2]], 0 ; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i16 @@ -303,13 +303,13 @@ define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i define void @var_rotate_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory { ; CHECK-LABEL: @var_rotate_i8( ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP2]], 0 ; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i8 diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll index cbc556f8a8ee2..0d94357d169f0 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll @@ -13,7 +13,7 @@ target triple = "i386-unknown-linux-gnu" define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_addsub_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -30,7 +30,7 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_addsub_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -46,8 +46,8 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_blendv_pd_256( -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -75,8 +75,8 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_blendv_ps_256( -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -105,7 +105,7 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_cmp_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -124,7 +124,7 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_cmp_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -141,7 +141,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_cmp_ps_256_pseudo_op( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP99:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -400,7 +400,7 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_dp_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -427,7 +427,7 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hadd_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> @@ -446,7 +446,7 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hadd_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -465,7 +465,7 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hsub_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP2]], <4 x i32> @@ -484,7 +484,7 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_hsub_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -527,7 +527,7 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr) nounwind readonly define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_pd( -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -554,7 +554,7 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(ptr, <2 x i64>) nounwind readonly define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_pd_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -581,7 +581,7 @@ declare <4 x double> @llvm.x86.avx.maskload.pd.256(ptr, <4 x i64>) nounwind read define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_ps( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -608,7 +608,7 @@ declare <4 x float> @llvm.x86.avx.maskload.ps(ptr, <4 x i32>) nounwind readonly define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx_maskload_ps_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -635,9 +635,9 @@ declare <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>) nounwind reado define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_pd( -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -665,9 +665,9 @@ declare void @llvm.x86.avx.maskstore.pd(ptr, <2 x i64>, <2 x double>) nounwind define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_pd_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -695,9 +695,9 @@ declare void @llvm.x86.avx.maskstore.pd.256(ptr, <4 x i64>, <4 x double>) nounwi define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_ps( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -725,9 +725,9 @@ declare void @llvm.x86.avx.maskstore.ps(ptr, <4 x i32>, <4 x float>) nounwind define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_avx_maskstore_ps_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -756,7 +756,7 @@ declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>) nounwin define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_max_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -773,7 +773,7 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_max_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -790,7 +790,7 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_min_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -807,7 +807,7 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_min_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -868,7 +868,7 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_ptestc_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -888,7 +888,7 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_ptestnzc_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -908,7 +908,7 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_ptestz_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -987,7 +987,7 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i64> [[A1]] to <2 x i1> @@ -1014,7 +1014,7 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i64> [[A1]] to <4 x i2> @@ -1056,7 +1056,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 { define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[A1]] to <4 x i2> @@ -1079,7 +1079,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) # } define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1119,7 +1119,7 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[A1:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = trunc <8 x i32> [[A1]] to <8 x i3> @@ -1146,7 +1146,7 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1166,7 +1166,7 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -1186,7 +1186,7 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -1206,7 +1206,7 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestc_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -1226,7 +1226,7 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1246,7 +1246,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -1266,7 +1266,7 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -1286,7 +1286,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestnzc_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -1306,7 +1306,7 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1326,7 +1326,7 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]] @@ -1346,7 +1346,7 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -1366,7 +1366,7 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_avx_vtestz_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -1410,7 +1410,7 @@ declare void @llvm.x86.avx.vzeroupper() nounwind define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 { ; CHECK-LABEL: @movnt_dq( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1442,7 +1442,7 @@ define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 { ; CHECK-LABEL: @movnt_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] @@ -1465,7 +1465,7 @@ declare void @llvm.x86.avx.movnt.ps.256(ptr, <8 x float>) nounwind define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 { ; add operation forces the execution domain. ; CHECK-LABEL: @movnt_pd( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1494,7 +1494,7 @@ declare void @llvm.x86.avx.movnt.pd.256(ptr, <4 x double>) nounwind define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_pclmulqdq( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll index cd79bcb2233fe..6471e09fc467a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll @@ -7,7 +7,7 @@ target triple = "i386-unknown-linux-gnu" define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packssdw( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer @@ -42,7 +42,7 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() #0 { define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packsswb( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer @@ -77,7 +77,7 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() #0 { define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packuswb( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer @@ -112,7 +112,7 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() #0 { define <32 x i8> @test_x86_avx2_pavg_b(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pavg_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] @@ -129,7 +129,7 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone define <16 x i16> @test_x86_avx2_pavg_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pavg_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] @@ -146,7 +146,7 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readno define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmadd_wd( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer @@ -197,7 +197,7 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmulh_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] @@ -214,7 +214,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmulhu_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] @@ -231,7 +231,7 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psad_bw( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] @@ -252,7 +252,7 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -275,7 +275,7 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -298,7 +298,7 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psll_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 @@ -372,7 +372,7 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -395,7 +395,7 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psra_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 @@ -452,7 +452,7 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -475,7 +475,7 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -498,7 +498,7 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 @@ -520,7 +520,7 @@ declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnon define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 { ; CHECK-LABEL: @test_x86_avx2_psrl_w_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -606,7 +606,7 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phadd_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -625,7 +625,7 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phadd_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -644,7 +644,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phadd_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -663,7 +663,7 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phsub_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -682,7 +682,7 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phsub_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -701,7 +701,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_phsub_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -720,7 +720,7 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[TMP1]], zeroinitializer @@ -748,7 +748,7 @@ declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind rea define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw_load_op0( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -787,7 +787,7 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) # define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pmul_hr_sw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] @@ -804,7 +804,7 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pshuf_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> [[TMP1]], <32 x i8> [[A1:%.*]]) @@ -822,7 +822,7 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psign_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]] @@ -839,7 +839,7 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psign_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -856,7 +856,7 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psign_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]] @@ -873,7 +873,7 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_mpsadbw( ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i8> [[TMP1]] to i256 @@ -898,7 +898,7 @@ declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind rea define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_mpsadbw_load_op0( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -934,7 +934,7 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 { define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_packusdw( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer @@ -968,8 +968,8 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 { define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendvb( -; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -995,7 +995,7 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> @@ -1012,7 +1012,7 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendd_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> @@ -1029,7 +1029,7 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_pblendd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> @@ -1046,7 +1046,7 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_permd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]] @@ -1063,7 +1063,7 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_permps( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 @@ -1088,7 +1088,7 @@ declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind reado define <2 x i64> @test_x86_avx2_maskload_q(ptr %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_q( -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1114,7 +1114,7 @@ declare <2 x i64> @llvm.x86.avx2.maskload.q(ptr, <2 x i64>) nounwind readonly define <4 x i64> @test_x86_avx2_maskload_q_256(ptr %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_q_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1140,7 +1140,7 @@ declare <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr, <4 x i64>) nounwind readonl define <4 x i32> @test_x86_avx2_maskload_d(ptr %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_d( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1166,7 +1166,7 @@ declare <4 x i32> @llvm.x86.avx2.maskload.d(ptr, <4 x i32>) nounwind readonly define <8 x i32> @test_x86_avx2_maskload_d_256(ptr %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_maskload_d_256( -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1192,9 +1192,9 @@ declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>) nounwind readonl define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_q( -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1221,9 +1221,9 @@ declare void @llvm.x86.avx2.maskstore.q(ptr, <2 x i64>, <2 x i64>) nounwind define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_q_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1250,9 +1250,9 @@ declare void @llvm.x86.avx2.maskstore.q.256(ptr, <4 x i64>, <4 x i64>) nounwind define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_d( -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1279,9 +1279,9 @@ declare void @llvm.x86.avx2.maskstore.d(ptr, <4 x i32>, <4 x i32>) nounwind define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2) #0 { ; CHECK-LABEL: @test_x86_avx2_maskstore_d_256( -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[A0:%.*]] to i64 @@ -1309,7 +1309,7 @@ declare void @llvm.x86.avx2.maskstore.d.256(ptr, <8 x i32>, <8 x i32>) nounwind define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer @@ -1350,7 +1350,7 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer @@ -1391,7 +1391,7 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer @@ -1424,7 +1424,7 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psllv_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer @@ -1458,7 +1458,7 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer @@ -1499,7 +1499,7 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer @@ -1540,7 +1540,7 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer @@ -1574,7 +1574,7 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrlv_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer @@ -1609,7 +1609,7 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrav_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer @@ -1642,7 +1642,7 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_avx2_psrav_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer @@ -1675,9 +1675,9 @@ declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind read define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i32> %idx, <2 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 @@ -1709,9 +1709,9 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr, define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4 x i32> %idx, <4 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 @@ -1743,9 +1743,9 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr, define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i64> %idx, <2 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 @@ -1777,9 +1777,9 @@ declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, ptr, define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4 x i64> %idx, <4 x double> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_pd_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 @@ -1811,9 +1811,9 @@ declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, ptr, define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32> %idx, <4 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -1845,9 +1845,9 @@ declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr, define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x i32> %idx, <8 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 @@ -1879,9 +1879,9 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr, define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64> %idx, <4 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -1913,9 +1913,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, ptr, define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x i64> %idx, <4 x float> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_ps_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -1947,9 +1947,9 @@ declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, ptr, define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %idx, <2 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 @@ -1981,9 +1981,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, ptr, define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32> %idx, <4 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 @@ -2015,9 +2015,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, ptr, define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128 @@ -2049,9 +2049,9 @@ declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, ptr, define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64> %idx, <4 x i64> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_q_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256 @@ -2083,9 +2083,9 @@ declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, ptr, define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %idx, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -2117,9 +2117,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr, define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32> %idx, <8 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_d_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 @@ -2151,9 +2151,9 @@ declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, ptr, define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %idx, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -2185,9 +2185,9 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, ptr, define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64> %idx, <4 x i32> %mask) #0 { ; CHECK-LABEL: @test_x86_avx2_gather_q_d_256( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -2219,10 +2219,10 @@ declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, ptr, define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <8 x float> %mask, ptr nocapture %out) #0 { ; CHECK-LABEL: @test_gather_mask( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP1]] to i256 @@ -2265,10 +2265,10 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, < define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i1> %mask) #0 { ; CHECK-LABEL: @test_mask_demanded_bits( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64> diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll index 8052b5e345265..1b7e3d780a32e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll @@ -22,7 +22,7 @@ define i64 @test1(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test1( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -69,7 +69,7 @@ define i64 @test88(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test88( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -110,7 +110,7 @@ define i64 @test87(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test87( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -151,7 +151,7 @@ define i64 @test86(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test86( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -192,7 +192,7 @@ define i64 @test85(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test85( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -233,7 +233,7 @@ define i64 @test84(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test84( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -274,7 +274,7 @@ define i64 @test83(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test83( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -315,7 +315,7 @@ define i64 @test82(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test82( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -356,7 +356,7 @@ define i64 @test81(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test81( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -397,7 +397,7 @@ define i64 @test80(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test80( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -438,7 +438,7 @@ define i64 @test79(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test79( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -479,7 +479,7 @@ define i64 @test78(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test78( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -520,7 +520,7 @@ define i64 @test77(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test77( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -561,7 +561,7 @@ define i64 @test76(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test76( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -610,7 +610,7 @@ define i64 @test75(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test75( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -659,7 +659,7 @@ define i64 @test74(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test74( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1076,7 +1076,7 @@ define i64 @test65(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> @@ -1122,7 +1122,7 @@ define i64 @test64(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -1168,7 +1168,7 @@ define i64 @test63(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0 @@ -1208,7 +1208,7 @@ define i64 @test62(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> @@ -1254,7 +1254,7 @@ define i64 @test61(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -1300,7 +1300,7 @@ define i64 @test60(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0 @@ -1340,7 +1340,7 @@ define i64 @test59(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32> @@ -1386,7 +1386,7 @@ define i64 @test58(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16> @@ -1431,7 +1431,7 @@ define i64 @test56(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test56( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1472,7 +1472,7 @@ define i64 @test55(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test55( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1513,7 +1513,7 @@ define i64 @test54(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test54( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1554,7 +1554,7 @@ define i64 @test53(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test53( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1595,7 +1595,7 @@ define i64 @test52(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test52( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1634,7 +1634,7 @@ define i64 @test51(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test51( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1675,7 +1675,7 @@ define i64 @test50(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test50( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1716,7 +1716,7 @@ define i64 @test49(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test49( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1775,7 +1775,7 @@ define i64 @test48(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test48( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1816,7 +1816,7 @@ define i64 @test47(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test47( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1857,7 +1857,7 @@ define i64 @test46(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test46( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1898,7 +1898,7 @@ define i64 @test45(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test45( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1938,7 +1938,7 @@ define i64 @test44(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0 @@ -1974,7 +1974,7 @@ define i64 @test43(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test43( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2015,7 +2015,7 @@ define i64 @test42(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test42( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2056,7 +2056,7 @@ define i64 @test41(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test41( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2097,7 +2097,7 @@ define i64 @test40(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test40( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2138,7 +2138,7 @@ define i64 @test39(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test39( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2179,7 +2179,7 @@ define i64 @test38(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test38( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2220,7 +2220,7 @@ define i64 @test37(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test37( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2262,7 +2262,7 @@ define i64 @test36(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0 @@ -2296,7 +2296,7 @@ define i64 @test35(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test35( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2337,7 +2337,7 @@ define i64 @test34(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test34( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2378,7 +2378,7 @@ define i64 @test33(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test33( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2419,7 +2419,7 @@ define i64 @test32(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test32( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2459,7 +2459,7 @@ define i64 @test31(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test31( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2500,7 +2500,7 @@ define i64 @test30(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test30( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2541,7 +2541,7 @@ define i64 @test29(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test29( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2582,7 +2582,7 @@ define i64 @test28(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test28( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2623,7 +2623,7 @@ define i64 @test27(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test27( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2664,7 +2664,7 @@ define i64 @test26(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test26( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2705,7 +2705,7 @@ define void @test25(ptr %p, <1 x i64> %a) nounwind optsize ssp #0 { ; CHECK-LABEL: define void @test25( ; CHECK-SAME: ptr [[P:%.*]], <1 x i64> [[A:%.*]]) #[[ATTR3:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2770,9 +2770,9 @@ define void @test23(<1 x i64> %d, <1 x i64> %n, ptr %p) nounwind optsize ssp #0 ; CHECK-LABEL: define void @test23( ; CHECK-SAME: <1 x i64> [[D:%.*]], <1 x i64> [[N:%.*]], ptr [[P:%.*]]) #[[ATTR3]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP4]] to <8 x i8> @@ -2813,7 +2813,7 @@ define i64 @test22(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test22( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -2922,7 +2922,7 @@ define i64 @test20(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test20( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3051,7 +3051,7 @@ define i64 @test16(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP6]], i32 0 @@ -3192,7 +3192,7 @@ define i64 @test12(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test12( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3233,7 +3233,7 @@ define i64 @test11(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test11( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3274,7 +3274,7 @@ define i64 @test10(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test10( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3315,7 +3315,7 @@ define i64 @test9(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test9( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3357,7 +3357,7 @@ define i64 @test8(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test8( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3398,7 +3398,7 @@ define i64 @test7(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test7( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3457,7 +3457,7 @@ define i64 @test6(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test6( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3504,7 +3504,7 @@ define i64 @test5(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test5( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3551,7 +3551,7 @@ define i64 @test4(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test4( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3598,7 +3598,7 @@ define i64 @test3(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test3( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3645,7 +3645,7 @@ define i64 @test2(<1 x i64> %a, <1 x i64> %b) #0 { ; CHECK-LABEL: define i64 @test2( ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -3694,7 +3694,7 @@ define <4 x float> @test89(<4 x float> %a, <1 x i64> %b) nounwind #0 { ; CHECK-LABEL: define <4 x float> @test89( ; CHECK-SAME: <4 x float> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR4:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -3740,7 +3740,7 @@ define <1 x i64> @test_mm_insert_pi16(<1 x i64> %a.coerce, i32 %d) nounwind #0 { ; CHECK-SAME: <1 x i64> [[A_COERCE:%.*]], i32 [[D:%.*]]) #[[ATTR4]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to i64 diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll index 017bbcf4f3689..e37894192276a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll @@ -13,7 +13,7 @@ define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_me ; CHECK-LABEL: define void @StoreIntrinsic( ; CHECK-SAME: ptr [[P:%.*]], <4 x float> [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649 @@ -25,8 +25,8 @@ define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_me ; ORIGINS-LABEL: define void @StoreIntrinsic( ; ORIGINS-SAME: ptr [[P:%.*]], <4 x float> [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; ORIGINS-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 -; ORIGINS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGINS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGINS-NEXT: call void @llvm.donothing() ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 ; ORIGINS-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649 @@ -107,7 +107,7 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti ; CHECK-LABEL: define <8 x i16> @Pmulhuw128( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] @@ -119,8 +119,8 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti ; ORIGINS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] { ; ORIGINS-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGINS-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGINS-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGINS-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGINS-NEXT: call void @llvm.donothing() ; ORIGINS-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP3]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll index ffad6fb5a2b68..6b7f813336f78 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll @@ -7,7 +7,7 @@ target triple = "i386-unknown-linux-gnu" define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_cmp_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -26,7 +26,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_cmp_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -47,7 +47,7 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comieq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -67,7 +67,7 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comige_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -87,7 +87,7 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comigt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -107,7 +107,7 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comile_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -127,7 +127,7 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comilt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -147,7 +147,7 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_comineq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -238,7 +238,7 @@ declare void @llvm.x86.sse.ldmxcsr(ptr) nounwind define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_max_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -255,7 +255,7 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_max_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -273,7 +273,7 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_min_ps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -290,7 +290,7 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_min_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -415,7 +415,7 @@ declare void @llvm.x86.sse.stmxcsr(ptr) nounwind define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomieq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -435,7 +435,7 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomige_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -455,7 +455,7 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomigt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -475,7 +475,7 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomile_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -495,7 +495,7 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomilt_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -515,7 +515,7 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse_ucomineq_ss( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll index 3a37eafd78ecb..806eac09c695e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll @@ -15,7 +15,7 @@ target triple = "i386-unknown-linux-gnu" define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_cmp_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -34,7 +34,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_cmp_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -55,7 +55,7 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comieq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -75,7 +75,7 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comige_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -95,7 +95,7 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comigt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -115,7 +115,7 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comile_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -135,7 +135,7 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comilt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -155,7 +155,7 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_comineq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -356,7 +356,7 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss( -; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -380,7 +380,7 @@ declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 { ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -415,7 +415,7 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 { define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1) optsize #0 { ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load_optsize( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -566,7 +566,7 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_max_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -583,7 +583,7 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_max_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -601,7 +601,7 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_min_pd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -618,7 +618,7 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_min_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -658,7 +658,7 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_packssdw_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer @@ -693,7 +693,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 { define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_packsswb_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer @@ -728,7 +728,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 { define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_packuswb_128( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer @@ -763,7 +763,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 { define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pavg_b( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] @@ -780,7 +780,7 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pavg_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] @@ -797,7 +797,7 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pmadd_wd( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP5:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer @@ -848,7 +848,7 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pmulh_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] @@ -865,7 +865,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_pmulhu_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]] @@ -882,7 +882,7 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psad_bw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] @@ -903,7 +903,7 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psll_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -926,7 +926,7 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psll_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -949,7 +949,7 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psll_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 @@ -1023,7 +1023,7 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psra_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -1046,7 +1046,7 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psra_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 @@ -1103,7 +1103,7 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_d( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128 @@ -1126,7 +1126,7 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_q( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128 @@ -1149,7 +1149,7 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_w( ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128 @@ -1171,7 +1171,7 @@ declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 { ; CHECK-LABEL: @test_x86_sse2_psrl_w_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -1257,7 +1257,7 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomieq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1277,7 +1277,7 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomige_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1297,7 +1297,7 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomigt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1317,7 +1317,7 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomile_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1337,7 +1337,7 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomilt_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -1357,7 +1357,7 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse2_ucomineq_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll index e51c53375d2b5..24f22bd56a64f 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll @@ -6,8 +6,8 @@ target triple = "i386-unknown-linux-gnu" define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 { ; CHECK-LABEL: @test_x86_sse41_blendvpd( -; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -35,8 +35,8 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 { ; CHECK-LABEL: @test_x86_sse41_blendvps( -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -65,7 +65,7 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_dppd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -87,7 +87,7 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_dpps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] @@ -109,7 +109,7 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_insertps( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128 @@ -136,7 +136,7 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_mpsadbw( ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128 @@ -161,7 +161,7 @@ declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind rea define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_mpsadbw_load_op0( ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 @@ -197,7 +197,7 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 { define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_packusdw( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer @@ -231,8 +231,8 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 { define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) #0 { ; CHECK-LABEL: @test_x86_sse41_pblendvb( -; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -273,7 +273,7 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_ptestc( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -293,7 +293,7 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_ptestnzc( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -313,7 +313,7 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_ptestz( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]] @@ -363,7 +363,7 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_round_sd( ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i32> @@ -379,7 +379,7 @@ declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) n define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_round_sd_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() @@ -407,7 +407,7 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, ptr %a1) #0 { ; CHECK-LABEL: @test_x86_sse41_round_ss_load( -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll index 7bc9cf3b8c582..436a3b31221d8 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll @@ -13,205 +13,205 @@ define dso_local i64 @many_args() { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 792), align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 200) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 216) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 224) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 232) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 248) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 264) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 272) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 280) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 288) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 296) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 312) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 328) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 336) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 344) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 352) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 360) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 376) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 392) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 400) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 408) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 416) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 424) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 440) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 456) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 464) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 472) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 480) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 488) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 504) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 520) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 528) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 536) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 544) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 552) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 568) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 584) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 592) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 600) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 608) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 616) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 632) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 648) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 656) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 664) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 672) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 680) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 696) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 712) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 720) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 728) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 736) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 744) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 760) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8 -; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 40), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 56), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 72), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 80), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 88), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 96), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 104), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 112), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 120), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 136), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 144), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 152), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 160), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 168), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 184), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 200), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 208), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 216), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 224), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 232), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 240), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 248), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 264), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 272), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 280), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 288), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 296), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 304), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 312), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 328), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 336), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 344), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 352), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 360), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 368), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 376), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 392), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 400), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 408), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 416), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 424), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 432), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 440), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 456), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 464), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 472), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 480), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 488), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 496), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 504), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 520), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 528), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 536), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 544), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 552), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 560), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 568), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 584), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 592), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 600), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 608), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 616), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 624), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 632), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 648), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 656), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 664), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 672), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 680), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 688), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 696), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 712), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 720), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 728), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 736), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 744), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 752), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 760), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 768), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 776), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 784), align 8 +; CHECK-NEXT: store i64 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 792), align 8 ; CHECK-NEXT: store i64 1040, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1) diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll index 27459397b34a3..cc2d94c5f867b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll @@ -18,12 +18,12 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]]) @@ -37,12 +37,12 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr { ; ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; ORIGIN-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 -; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; ORIGIN-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; ORIGIN-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; ORIGIN-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]]) @@ -58,12 +58,12 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr { ; ORIGIN2-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN2-NEXT: call void @llvm.donothing() ; ORIGIN2-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; ORIGIN2-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8 -; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; ORIGIN2-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 ; ORIGIN2-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]]) @@ -446,12 +446,12 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr { ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; CHECK-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; CHECK-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: store i80 0, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]]) @@ -465,12 +465,12 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr { ; ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; ORIGIN-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8 -; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; ORIGIN-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; ORIGIN-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN-NEXT: store i80 0, ptr @__msan_retval_tls, align 8 ; ORIGIN-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]]) @@ -486,12 +486,12 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr { ; ORIGIN2-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN2-NEXT: call void @llvm.donothing() ; ORIGIN2-NEXT: store i32 0, ptr @__msan_param_tls, align 8 -; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; ORIGIN2-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8 -; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; ORIGIN2-NEXT: store i80 0, ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; ORIGIN2-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN2-NEXT: store i80 0, ptr @__msan_retval_tls, align 8 ; ORIGIN2-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll index 74a62762fc184..681b331fcb137 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll @@ -40,8 +40,8 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit ; CHECK-NEXT: [[_MSPROP:%.*]] = sext i8 [[_MSLD]] to i32 ; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 ; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 [[_MSPROP]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: store i32 [[_MSPROP]], ptr @__msan_va_arg_tls, align 8 ; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext [[TMP7]], i32 noundef 1, i32 noundef [[CONV]]) @@ -82,8 +82,8 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4 ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 ; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]]) @@ -125,8 +125,8 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m ; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64 ; CHECK-NEXT: [[CONV:%.*]] = fpext float [[TMP7]] to double ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[TMP11]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: store i64 [[TMP11]], ptr @__msan_va_arg_tls, align 8 ; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]]) @@ -167,8 +167,8 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_ ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 ; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]]) @@ -208,8 +208,8 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitiz ; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i80, ptr [[TMP10]], align 16 ; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i80 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef [[TMP7]], i32 noundef 1, x86_fp80 noundef [[TMP7]]) @@ -249,8 +249,8 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz ; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr ; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 ; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]]) @@ -271,7 +271,7 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 @@ -302,12 +302,12 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr ; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -330,7 +330,7 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 @@ -361,12 +361,12 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr ; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -399,11 +399,11 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_(ptr noundef byval(%stru ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false) ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649 ; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr @@ -426,7 +426,7 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64 @@ -457,12 +457,12 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr ; CHECK-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8 -; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i32 [[_MSLD1]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]]) ; CHECK-NEXT: ret void @@ -495,11 +495,11 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(ptr noundef byval( ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP8]], i64 32, i1 false) ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649 ; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr @@ -530,11 +530,11 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(ptr noundef byval( ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false) ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649 ; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr @@ -1103,51 +1103,51 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649 ; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false) -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 72), ptr align 8 [[TMP8]], i64 64, i1 false) ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649 ; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 136), ptr align 8 [[TMP11]], i64 64, i1 false) ; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], -2147483649 ; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), ptr align 8 [[TMP14]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 200), ptr align 8 [[TMP14]], i64 64, i1 false) ; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649 ; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), ptr align 8 [[TMP17]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 264), ptr align 8 [[TMP17]], i64 64, i1 false) ; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649 ; CHECK-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), ptr align 8 [[TMP20]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 328), ptr align 8 [[TMP20]], i64 64, i1 false) ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], -2147483649 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), ptr align 8 [[TMP23]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 392), ptr align 8 [[TMP23]], i64 64, i1 false) ; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP24]], -2147483649 ; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), ptr align 8 [[TMP26]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 456), ptr align 8 [[TMP26]], i64 64, i1 false) ; CHECK-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], -2147483649 ; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), ptr align 8 [[TMP29]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 520), ptr align 8 [[TMP29]], i64 64, i1 false) ; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP31:%.*]] = and i64 [[TMP30]], -2147483649 ; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), ptr align 8 [[TMP32]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 584), ptr align 8 [[TMP32]], i64 64, i1 false) ; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP34:%.*]] = and i64 [[TMP33]], -2147483649 ; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), ptr align 8 [[TMP35]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 648), ptr align 8 [[TMP35]], i64 64, i1 false) ; CHECK-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP37:%.*]] = and i64 [[TMP36]], -2147483649 ; CHECK-NEXT: [[TMP38:%.*]] = inttoptr i64 [[TMP37]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), ptr align 8 [[TMP38]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 712), ptr align 8 [[TMP38]], i64 64, i1 false) ; CHECK-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP42:%.*]] = and i64 [[TMP41]], -2147483649 ; CHECK-NEXT: [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to ptr @@ -1155,47 +1155,47 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval ; CHECK-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP45:%.*]] = and i64 [[TMP44]], -2147483649 ; CHECK-NEXT: [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), ptr align 8 [[TMP46]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 64), ptr align 8 [[TMP46]], i64 64, i1 false) ; CHECK-NEXT: [[TMP47:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP48:%.*]] = and i64 [[TMP47]], -2147483649 ; CHECK-NEXT: [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), ptr align 8 [[TMP49]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 128), ptr align 8 [[TMP49]], i64 64, i1 false) ; CHECK-NEXT: [[TMP50:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP51:%.*]] = and i64 [[TMP50]], -2147483649 ; CHECK-NEXT: [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), ptr align 8 [[TMP52]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 192), ptr align 8 [[TMP52]], i64 64, i1 false) ; CHECK-NEXT: [[TMP53:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP54:%.*]] = and i64 [[TMP53]], -2147483649 ; CHECK-NEXT: [[TMP55:%.*]] = inttoptr i64 [[TMP54]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), ptr align 8 [[TMP55]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 256), ptr align 8 [[TMP55]], i64 64, i1 false) ; CHECK-NEXT: [[TMP56:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP57:%.*]] = and i64 [[TMP56]], -2147483649 ; CHECK-NEXT: [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), ptr align 8 [[TMP58]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 320), ptr align 8 [[TMP58]], i64 64, i1 false) ; CHECK-NEXT: [[TMP59:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP60:%.*]] = and i64 [[TMP59]], -2147483649 ; CHECK-NEXT: [[TMP61:%.*]] = inttoptr i64 [[TMP60]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), ptr align 8 [[TMP61]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 384), ptr align 8 [[TMP61]], i64 64, i1 false) ; CHECK-NEXT: [[TMP62:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP63:%.*]] = and i64 [[TMP62]], -2147483649 ; CHECK-NEXT: [[TMP64:%.*]] = inttoptr i64 [[TMP63]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), ptr align 8 [[TMP64]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 448), ptr align 8 [[TMP64]], i64 64, i1 false) ; CHECK-NEXT: [[TMP65:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP66:%.*]] = and i64 [[TMP65]], -2147483649 ; CHECK-NEXT: [[TMP67:%.*]] = inttoptr i64 [[TMP66]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), ptr align 8 [[TMP67]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 512), ptr align 8 [[TMP67]], i64 64, i1 false) ; CHECK-NEXT: [[TMP68:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP69:%.*]] = and i64 [[TMP68]], -2147483649 ; CHECK-NEXT: [[TMP70:%.*]] = inttoptr i64 [[TMP69]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), ptr align 8 [[TMP70]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 576), ptr align 8 [[TMP70]], i64 64, i1 false) ; CHECK-NEXT: [[TMP71:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP72:%.*]] = and i64 [[TMP71]], -2147483649 ; CHECK-NEXT: [[TMP73:%.*]] = inttoptr i64 [[TMP72]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), ptr align 8 [[TMP73]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 640), ptr align 8 [[TMP73]], i64 64, i1 false) ; CHECK-NEXT: [[TMP74:%.*]] = ptrtoint ptr [[ARG]] to i64 ; CHECK-NEXT: [[TMP75:%.*]] = and i64 [[TMP74]], -2147483649 ; CHECK-NEXT: [[TMP76:%.*]] = inttoptr i64 [[TMP75]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), ptr align 8 [[TMP76]], i64 64, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 704), ptr align 8 [[TMP76]], i64 64, i1 false) ; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]]) ; CHECK-NEXT: ret void diff --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll index ff37605acaddd..3ac6844b3ffe8 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll @@ -19,7 +19,7 @@ declare void @llvm.masked.compressstore.v16f32(<16 x float>, ptr, <16 x i1>) define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory { ; CHECK-LABEL: @Store( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 @@ -30,9 +30,9 @@ define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory { ; ; ADDR-LABEL: @Store( ; ADDR-NEXT: entry: -; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; ADDR-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 +; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -52,8 +52,8 @@ define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory { ; ; ORIGINS-LABEL: @Store( ; ORIGINS-NEXT: entry: -; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGINS-NEXT: call void @llvm.donothing() ; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 @@ -88,7 +88,7 @@ entry: define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory { ; CHECK-LABEL: @Load( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 @@ -101,8 +101,8 @@ define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memo ; ADDR-LABEL: @Load( ; ADDR-NEXT: entry: ; ADDR-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8 -; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; ADDR-NEXT: [[TMP1:%.*]] = load <4 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 40), align 8 +; ADDR-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; ADDR-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -123,8 +123,8 @@ define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memo ; ; ORIGINS-LABEL: @Load( ; ORIGINS-NEXT: entry: -; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGINS-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGINS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGINS-NEXT: call void @llvm.donothing() ; ORIGINS-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64 ; ORIGINS-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 @@ -235,7 +235,7 @@ entry: ; FIXME: Provide real implementation. define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory { ; CHECK-LABEL: @Gather( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64> ; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i64> [[TMP2]], splat (i64 87960930222080) @@ -246,9 +246,9 @@ define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %pas ; CHECK-NEXT: ret <16 x float> [[RET]] ; ; ADDR-LABEL: @Gather( -; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8 -; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 +; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> [[TMP2]], <16 x i64> zeroinitializer ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64> @@ -270,8 +270,8 @@ define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %pas ; ADDR-NEXT: ret <16 x float> [[RET]] ; ; ORIGINS-LABEL: @Gather( -; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8 -; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 136) to ptr), align 4 +; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 136), align 8 +; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 136), align 4 ; ORIGINS-NEXT: call void @llvm.donothing() ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64> ; ORIGINS-NEXT: [[TMP4:%.*]] = xor <16 x i64> [[TMP3]], splat (i64 87960930222080) @@ -326,8 +326,8 @@ define void @Scatter(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) sanitize ; CHECK-NEXT: ret void ; ; ADDR-LABEL: @Scatter( -; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8 -; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 96), align 8 +; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; ADDR-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> [[TMP2]], <8 x i64> zeroinitializer @@ -403,7 +403,7 @@ define void @ScatterNoSanitize(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask ; FIXME: Provide real implementation. define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory { ; CHECK-LABEL: @ExpandLoad( -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 @@ -415,8 +415,8 @@ define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthr ; ; ADDR-LABEL: @ExpandLoad( ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 ; ADDR-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 @@ -436,8 +436,8 @@ define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthr ; ADDR-NEXT: ret <16 x float> [[RET]] ; ; ORIGINS-LABEL: @ExpandLoad( -; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGINS-NEXT: call void @llvm.donothing() ; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 ; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080 @@ -492,8 +492,8 @@ define void @CompressStore(<16 x float> %value, ptr %ptr, <16 x i1> %mask) sanit ; CHECK-NEXT: ret void ; ; ADDR-LABEL: @CompressStore( -; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 -; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8 +; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 +; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 72), align 8 ; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 ; ADDR-NEXT: call void @llvm.donothing() ; ADDR-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64 diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll index b4feb1ec57224..0ad9e4dd32adf 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -18,7 +18,7 @@ define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory { ; CHECK-LABEL: define void @Store( ; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 @@ -30,8 +30,8 @@ define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory { ; ORIGIN-LABEL: define void @Store( ; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 @@ -53,8 +53,8 @@ define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory { ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]) ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 @@ -80,7 +80,7 @@ define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_me ; CHECK-LABEL: define void @AlignedStore( ; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 @@ -92,8 +92,8 @@ define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_me ; ORIGIN-LABEL: define void @AlignedStore( ; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 @@ -118,8 +118,8 @@ define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_me ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]) ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 @@ -353,7 +353,7 @@ define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uw ; CHECK-LABEL: define void @FuncWithPhi( ; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 0 @@ -397,8 +397,8 @@ define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uw ; ORIGIN-LABEL: define void @FuncWithPhi( ; ORIGIN-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[B]] to i64 ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 0 @@ -457,10 +457,10 @@ define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uw ; CALLS-LABEL: define void @FuncWithPhi( ; CALLS-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 ; CALLS-NEXT: call void @llvm.donothing() @@ -770,8 +770,8 @@ define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_ ; CALLS-LABEL: define void @SExt( ; CALLS-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 ; CALLS-NEXT: call void @llvm.donothing() @@ -844,7 +844,7 @@ define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitiz ; CHECK-LABEL: define void @MemCpy( ; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10) ; CHECK-NEXT: ret void @@ -852,8 +852,8 @@ define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitiz ; ORIGIN-LABEL: define void @MemCpy( ; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10) ; ORIGIN-NEXT: ret void @@ -861,8 +861,8 @@ define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitiz ; CALLS-LABEL: define void @MemCpy( ; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10) ; CALLS-NEXT: ret void @@ -911,7 +911,7 @@ define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable s ; CHECK-LABEL: define void @MemCpyInline( ; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10) ; CHECK-NEXT: ret void @@ -919,8 +919,8 @@ define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable s ; ORIGIN-LABEL: define void @MemCpyInline( ; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10) ; ORIGIN-NEXT: ret void @@ -928,8 +928,8 @@ define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable s ; CALLS-LABEL: define void @MemCpyInline( ; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10) ; CALLS-NEXT: ret void @@ -947,7 +947,7 @@ define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable saniti ; CHECK-LABEL: define void @MemMove( ; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10) ; CHECK-NEXT: ret void @@ -955,8 +955,8 @@ define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable saniti ; ORIGIN-LABEL: define void @MemMove( ; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10) ; ORIGIN-NEXT: ret void @@ -964,8 +964,8 @@ define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable saniti ; CALLS-LABEL: define void @MemMove( ; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10) ; CALLS-NEXT: ret void @@ -1065,9 +1065,9 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem ; CHECK-LABEL: define i32 @Select( ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[C]], i32 [[TMP1]], i32 [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[A]], [[B]] @@ -1081,12 +1081,12 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem ; ORIGIN-LABEL: define i32 @Select( ; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[C]], i32 [[TMP2]], i32 [[TMP4]] ; ORIGIN-NEXT: [[TMP7:%.*]] = xor i32 [[A]], [[B]] @@ -1103,12 +1103,12 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem ; CALLS-LABEL: define i32 @Select( ; CALLS-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[C]], i32 [[TMP2]], i32 [[TMP4]] ; CALLS-NEXT: [[TMP7:%.*]] = xor i32 [[A]], [[B]] @@ -1135,9 +1135,9 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind ; CHECK-LABEL: define <8 x i16> @SelectVector( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i16> [[A]], [[B]] @@ -1151,12 +1151,12 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind ; ORIGIN-LABEL: define <8 x i16> @SelectVector( ; ORIGIN-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; ORIGIN-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]] ; ORIGIN-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]] @@ -1177,12 +1177,12 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind ; CALLS-LABEL: define <8 x i16> @SelectVector( ; CALLS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; CALLS-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]] ; CALLS-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]] @@ -1213,9 +1213,9 @@ define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwta ; CHECK-LABEL: define <8 x i16> @SelectVector2( ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[C]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i16> [[A]], [[B]] @@ -1229,12 +1229,12 @@ define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwta ; ORIGIN-LABEL: define <8 x i16> @SelectVector2( ; ORIGIN-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; ORIGIN-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGIN-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]] ; ORIGIN-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]] @@ -1251,12 +1251,12 @@ define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwta ; CALLS-LABEL: define <8 x i16> @SelectVector2( ; CALLS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; CALLS-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]] ; CALLS-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]] @@ -1280,8 +1280,8 @@ define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } % ; CHECK-SAME: i1 zeroext [[X:%.*]], { i64, i64 } [[A:%.*]], { i64, i64 } [[B:%.*]]) #[[ATTR6:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[X]], { i64, i64 } [[TMP1]], { i64, i64 } [[TMP2]] ; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP3]] @@ -1294,10 +1294,10 @@ define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } % ; ORIGIN-NEXT: [[ENTRY:.*:]] ; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 +; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]] ; ORIGIN-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]] @@ -1313,10 +1313,10 @@ define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } % ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]] ; CALLS-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]] @@ -1337,8 +1337,8 @@ define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr, ; CHECK-SAME: i1 zeroext [[X:%.*]], { ptr, double } [[A:%.*]], { ptr, double } [[B:%.*]]) #[[ATTR6]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[X]], { i64, i64 } [[TMP1]], { i64, i64 } [[TMP2]] ; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP3]] @@ -1351,10 +1351,10 @@ define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr, ; ORIGIN-NEXT: [[ENTRY:.*:]] ; ORIGIN-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 +; ORIGIN-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; ORIGIN-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]] ; ORIGIN-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]] @@ -1370,10 +1370,10 @@ define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr, ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CALLS-NEXT: [[TMP4:%.*]] = load { i64, i64 }, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]] ; CALLS-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]] @@ -1475,7 +1475,7 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { ; CHECK-LABEL: define i32 @Div( ; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0 @@ -1491,8 +1491,8 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { ; ORIGIN-LABEL: define i32 @Div( ; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 ; ORIGIN-NEXT: call void @llvm.donothing() @@ -1510,8 +1510,8 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { ; CALLS-LABEL: define i32 @Div( ; CALLS-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 ; CALLS-NEXT: call void @llvm.donothing() @@ -1533,7 +1533,7 @@ define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory ; CHECK-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP1]] ; CHECK-NEXT: [[C:%.*]] = fdiv float [[A]], [[B]] @@ -1545,8 +1545,8 @@ define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory ; ORIGIN-NEXT: [[ENTRY:.*:]] ; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP2]] ; ORIGIN-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], 0 @@ -1561,8 +1561,8 @@ define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP2]] ; CALLS-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], 0 @@ -2416,7 +2416,7 @@ define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory { define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { ; CHECK-LABEL: define i32 @ExtractElement( ; CHECK-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i32 [[IDX]] @@ -2432,8 +2432,8 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { ; ; ORIGIN-LABEL: define i32 @ExtractElement( ; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] { -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 ; ORIGIN-NEXT: call void @llvm.donothing() @@ -2451,8 +2451,8 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { ; ; CALLS-LABEL: define i32 @ExtractElement( ; CALLS-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] { -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 ; CALLS-NEXT: call void @llvm.donothing() @@ -2470,9 +2470,9 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory { ; CHECK-LABEL: define <4 x i32> @InsertElement( ; CHECK-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[TMP3]], i32 [[IDX]] ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0 @@ -2487,12 +2487,12 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor ; ; ORIGIN-LABEL: define <4 x i32> @InsertElement( ; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] { -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 +; ORIGIN-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX]] ; ORIGIN-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0 @@ -2512,12 +2512,12 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor ; ; CALLS-LABEL: define <4 x i32> @InsertElement( ; CALLS-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] { -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 +; CALLS-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CALLS-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX]] ; CALLS-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0 @@ -2538,7 +2538,7 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory ; CHECK-LABEL: define <4 x i32> @ShuffleVector( ; CHECK-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> ; CHECK-NEXT: [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC]], <4 x i32> [[VEC1]], <4 x i32> @@ -2549,8 +2549,8 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory ; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] { ; ORIGIN-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; ORIGIN-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> ; ORIGIN-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128 @@ -2565,8 +2565,8 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory ; CALLS-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] { ; CALLS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 +; CALLS-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> ; CALLS-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128 @@ -2761,17 +2761,13 @@ define void @VAStart(i32 %x, ...) sanitize_memory { ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP16]], i8 0, i64 24, i1 false) ; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]) -; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[VA]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 16 -; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[VA]], i64 16 ; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64 ; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 ; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP2]], i64 176, i1 false) -; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[VA]] to i64 -; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[TMP24]], 8 -; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[VA]], i64 8 ; CHECK-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 ; CHECK-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[TMP27]] to i64 ; CHECK-NEXT: [[TMP29:%.*]] = xor i64 [[TMP28]], 87960930222080 @@ -2832,9 +2828,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory { ; ORIGIN-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr ; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false) ; ORIGIN-NEXT: call void @llvm.va_start.p0(ptr [[VA]]) -; ORIGIN-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64 -; ORIGIN-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], 16 -; ORIGIN-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr +; ORIGIN-NEXT: [[TMP33:%.*]] = getelementptr i8, ptr [[VA]], i64 16 ; ORIGIN-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 ; ORIGIN-NEXT: [[TMP35:%.*]] = ptrtoint ptr [[TMP34]] to i64 ; ORIGIN-NEXT: [[TMP36:%.*]] = xor i64 [[TMP35]], 87960930222080 @@ -2843,9 +2837,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory { ; ORIGIN-NEXT: [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to ptr ; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP2]], i64 176, i1 false) ; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP39]], ptr align 16 [[TMP4]], i64 176, i1 false) -; ORIGIN-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[VA]] to i64 -; ORIGIN-NEXT: [[TMP41:%.*]] = add i64 [[TMP40]], 8 -; ORIGIN-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr +; ORIGIN-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[VA]], i64 8 ; ORIGIN-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 ; ORIGIN-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 ; ORIGIN-NEXT: [[TMP45:%.*]] = xor i64 [[TMP44]], 87960930222080 @@ -2905,9 +2897,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory { ; CALLS-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr ; CALLS-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 24, i1 false) ; CALLS-NEXT: call void @llvm.va_start.p0(ptr [[VA]]) -; CALLS-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[VA]] to i64 -; CALLS-NEXT: [[TMP30:%.*]] = add i64 [[TMP29]], 16 -; CALLS-NEXT: [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr +; CALLS-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[VA]], i64 16 ; CALLS-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 ; CALLS-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64 ; CALLS-NEXT: [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080 @@ -2916,9 +2906,7 @@ define void @VAStart(i32 %x, ...) sanitize_memory { ; CALLS-NEXT: [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr ; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP35]], ptr align 16 [[TMP2]], i64 176, i1 false) ; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP4]], i64 176, i1 false) -; CALLS-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[VA]] to i64 -; CALLS-NEXT: [[TMP39:%.*]] = add i64 [[TMP38]], 8 -; CALLS-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr +; CALLS-NEXT: [[TMP40:%.*]] = getelementptr i8, ptr [[VA]], i64 8 ; CALLS-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP40]], align 8 ; CALLS-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[TMP41]] to i64 ; CALLS-NEXT: [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080 @@ -2948,7 +2936,7 @@ define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_m ; CHECK-LABEL: define void @VolatileStore( ; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 @@ -2960,8 +2948,8 @@ define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_m ; ORIGIN-LABEL: define void @VolatileStore( ; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64 ; ORIGIN-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080 @@ -2983,8 +2971,8 @@ define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_m ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]) ; CALLS-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64 @@ -3333,7 +3321,7 @@ define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory ; CHECK-LABEL: define <2 x i64> @ArgumentShadowAlignment( ; CHECK-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: ret <2 x i64> [[B]] @@ -3341,8 +3329,8 @@ define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory ; ORIGIN-LABEL: define <2 x i64> @ArgumentShadowAlignment( ; ORIGIN-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] { ; ORIGIN-NEXT: [[ENTRY:.*:]] -; ORIGIN-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8 ; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4 @@ -3351,8 +3339,8 @@ define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory ; CALLS-LABEL: define <2 x i64> @ArgumentShadowAlignment( ; CALLS-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] { ; CALLS-NEXT: [[ENTRY:.*:]] -; CALLS-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8 ; CALLS-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4 @@ -3371,7 +3359,7 @@ define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory { ; CHECK-SAME: i64 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR6]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0 ; CHECK-NEXT: [[A:%.*]] = insertvalue { i64, i32 } undef, i64 [[X]], 0 @@ -3385,8 +3373,8 @@ define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory { ; ORIGIN-NEXT: [[ENTRY:.*:]] ; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP4:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0 ; ORIGIN-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0 @@ -3405,8 +3393,8 @@ define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory { ; CALLS-NEXT: [[ENTRY:.*:]] ; CALLS-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CALLS-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CALLS-NEXT: call void @llvm.donothing() ; CALLS-NEXT: [[TMP4:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0 ; CALLS-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0 @@ -3458,22 +3446,22 @@ define void @VAArgStruct(ptr nocapture %s) sanitize_memory { ; CHECK-NEXT: [[_MSLD2:%.*]] = load i64, ptr [[TMP9]], align 4 ; CHECK-NEXT: [[TMP10:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16) ; CHECK-NEXT: store i32 -1, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080 ; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP13]], i64 16, i1 false) -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP13]], i64 16, i1 false) +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP16]], i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP16]], i64 16, i1 false) ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]]) ; CHECK-NEXT: ret void @@ -3515,48 +3503,48 @@ define void @VAArgStruct(ptr nocapture %s) sanitize_memory { ; ORIGIN-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16) ; ORIGIN-NEXT: store i32 -1, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; ORIGIN-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; ORIGIN-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 ; ORIGIN-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr ; ORIGIN-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416 ; ORIGIN-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false) -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false) -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false) +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false) +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; ORIGIN-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64 ; ORIGIN-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32 ; ORIGIN-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]] -; ORIGIN-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; ORIGIN-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64 ; ORIGIN-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32 ; ORIGIN-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]] -; ORIGIN-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8 +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 ; ORIGIN-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64 ; ORIGIN-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32 ; ORIGIN-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]] -; ORIGIN-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; ORIGIN-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64 ; ORIGIN-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32 ; ORIGIN-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]] -; ORIGIN-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8 ; ORIGIN-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; ORIGIN-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080 ; ORIGIN-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr ; ORIGIN-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416 ; ORIGIN-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false) -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false) +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP40]], i64 16, i1 false) +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 176), ptr align 8 [[TMP42]], i64 16, i1 false) ; ORIGIN-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]]) ; ORIGIN-NEXT: ret void @@ -3600,48 +3588,48 @@ define void @VAArgStruct(ptr nocapture %s) sanitize_memory { ; CALLS-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16) ; CALLS-NEXT: store i32 -1, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; CALLS-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CALLS-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 ; CALLS-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr ; CALLS-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416 ; CALLS-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false) -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false) -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false) +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false) +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CALLS-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64 ; CALLS-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32 ; CALLS-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]] -; CALLS-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CALLS-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64 ; CALLS-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32 ; CALLS-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]] -; CALLS-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8 +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 ; CALLS-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64 ; CALLS-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32 ; CALLS-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]] -; CALLS-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; CALLS-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64 ; CALLS-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32 ; CALLS-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]] -; CALLS-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8 ; CALLS-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CALLS-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080 ; CALLS-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr ; CALLS-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416 ; CALLS-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false) -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false) +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 176), ptr align 8 [[TMP40]], i64 16, i1 false) +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 176), ptr align 8 [[TMP42]], i64 16, i1 false) ; CALLS-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CALLS-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]]) ; CALLS-NEXT: ret void @@ -3685,22 +3673,22 @@ define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 { ; CHECK-NEXT: [[_MSLD2:%.*]] = load i64, ptr [[TMP9]], align 4 ; CHECK-NEXT: [[TMP10:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16) ; CHECK-NEXT: store i32 -1, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080 ; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP13]], i64 16, i1 false) -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 -; CHECK-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP13]], i64 16, i1 false) +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 +; CHECK-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080 ; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP16]], i64 16, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), ptr align 8 [[TMP16]], i64 16, i1 false) ; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]]) ; CHECK-NEXT: ret void @@ -3742,48 +3730,48 @@ define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 { ; ORIGIN-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16) ; ORIGIN-NEXT: store i32 -1, ptr @__msan_param_tls, align 8 ; ORIGIN-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4 -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; ORIGIN-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; ORIGIN-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; ORIGIN-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; ORIGIN-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; ORIGIN-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 ; ORIGIN-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr ; ORIGIN-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416 ; ORIGIN-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false) -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false) -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false) +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false) +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; ORIGIN-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64 ; ORIGIN-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32 ; ORIGIN-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]] -; ORIGIN-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; ORIGIN-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64 ; ORIGIN-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32 ; ORIGIN-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]] -; ORIGIN-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8 -; ORIGIN-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8 +; ORIGIN-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 ; ORIGIN-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64 ; ORIGIN-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32 ; ORIGIN-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]] -; ORIGIN-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8 -; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8 +; ORIGIN-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; ORIGIN-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64 ; ORIGIN-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32 ; ORIGIN-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]] -; ORIGIN-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8 +; ORIGIN-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8 ; ORIGIN-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; ORIGIN-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080 ; ORIGIN-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr ; ORIGIN-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416 ; ORIGIN-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false) -; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 48) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false) +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), ptr align 8 [[TMP40]], i64 16, i1 false) +; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 48), ptr align 8 [[TMP42]], i64 16, i1 false) ; ORIGIN-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; ORIGIN-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]]) ; ORIGIN-NEXT: ret void @@ -3827,48 +3815,48 @@ define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 { ; CALLS-NEXT: [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16) ; CALLS-NEXT: store i32 -1, ptr @__msan_param_tls, align 8 ; CALLS-NEXT: store i32 0, ptr @__msan_param_origin_tls, align 4 -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4 -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8 -; CALLS-NEXT: store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4 +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 +; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4 +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8 +; CALLS-NEXT: store i32 [[TMP13]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8 +; CALLS-NEXT: store i32 [[TMP19]], ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4 ; CALLS-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CALLS-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080 ; CALLS-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr ; CALLS-NEXT: [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416 ; CALLS-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false) -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false) -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_param_tls, i64 40), ptr align 8 [[TMP23]], i64 16, i1 false) +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 getelementptr (i8, ptr @__msan_param_origin_tls, i64 40), ptr align 4 [[TMP25]], i64 16, i1 false) +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 8), align 8 ; CALLS-NEXT: [[TMP26:%.*]] = zext i32 [[TMP13]] to i64 ; CALLS-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 32 ; CALLS-NEXT: [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]] -; CALLS-NEXT: store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP28]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 8), align 8 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 16), align 8 ; CALLS-NEXT: [[TMP29:%.*]] = zext i32 [[TMP19]] to i64 ; CALLS-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 32 ; CALLS-NEXT: [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]] -; CALLS-NEXT: store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8 -; CALLS-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP31]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 16), align 8 +; CALLS-NEXT: store i64 [[_MSLD]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 24), align 8 ; CALLS-NEXT: [[TMP32:%.*]] = zext i32 [[TMP13]] to i64 ; CALLS-NEXT: [[TMP33:%.*]] = shl i64 [[TMP32]], 32 ; CALLS-NEXT: [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]] -; CALLS-NEXT: store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8 -; CALLS-NEXT: store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP34]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 24), align 8 +; CALLS-NEXT: store i64 [[_MSLD2]], ptr getelementptr (i8, ptr @__msan_va_arg_tls, i64 32), align 8 ; CALLS-NEXT: [[TMP35:%.*]] = zext i32 [[TMP19]] to i64 ; CALLS-NEXT: [[TMP36:%.*]] = shl i64 [[TMP35]], 32 ; CALLS-NEXT: [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]] -; CALLS-NEXT: store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8 +; CALLS-NEXT: store i64 [[TMP37]], ptr getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 32), align 8 ; CALLS-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64 ; CALLS-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080 ; CALLS-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr ; CALLS-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416 ; CALLS-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false) -; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 48) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false) +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_tls, i64 48), ptr align 8 [[TMP40]], i64 16, i1 false) +; CALLS-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 getelementptr (i8, ptr @__msan_va_arg_origin_tls, i64 48), ptr align 8 [[TMP42]], i64 16, i1 false) ; CALLS-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CALLS-NEXT: call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]]) ; CALLS-NEXT: ret void diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll index 04fdd23aa5a88..846912ebef54a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll @@ -22,20 +22,20 @@ target triple = "x86_64-unknown-linux-gnu" define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @Store( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1:![0-9]+]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP6]], align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]]), !dbg [[DBG1]] -; CHECK-NEXT: store i32 [[X:%.*]], ptr [[P]], align 4, !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP6]], align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]]), !dbg [[DBG2]] +; CHECK-NEXT: store i32 [[X:%.*]], ptr [[P]], align 4, !dbg [[DBG2]] ; CHECK-NEXT: ret void ; entry: @@ -46,29 +46,29 @@ entry: define void @LoadAndCmp(ptr nocapture %a) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @LoadAndCmp( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP9:%.*]] = xor i32 [[TMP2]], 0, !dbg [[DBG7:![0-9]+]] -; CHECK-NEXT: [[TMP10:%.*]] = or i32 [[_MSLD]], 0, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], -1, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP13:%.*]] = and i32 [[TMP12]], [[TMP9]], !dbg [[DBG7]] -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], 0, !dbg [[DBG7]] -; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 [[TMP11]], [[TMP14]], !dbg [[DBG7]] -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP2]], 0, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP15:%.*]] = zext i1 [[_MSPROP_ICMP]] to i8, !dbg [[DBG8:![0-9]+]] -; CHECK-NEXT: call void @__msan_maybe_warning_1(i8 zeroext [[TMP15]], i32 zeroext [[TMP8]]), !dbg [[DBG8]] -; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG8]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP9:%.*]] = xor i32 [[TMP2]], 0, !dbg [[DBG8:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = or i32 [[_MSLD]], 0, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], -1, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP13:%.*]] = and i32 [[TMP12]], [[TMP9]], !dbg [[DBG8]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[TMP13]], 0, !dbg [[DBG8]] +; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 [[TMP11]], [[TMP14]], !dbg [[DBG8]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP2]], 0, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP15:%.*]] = zext i1 [[_MSPROP_ICMP]] to i8, !dbg [[DBG9:![0-9]+]] +; CHECK-NEXT: call void @__msan_maybe_warning_1(i8 zeroext [[TMP15]], i32 zeroext [[TMP8]]), !dbg [[DBG9]] +; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG9]] ; CHECK: if.then: ; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8 ; CHECK-NEXT: tail call void (...) @foo() #[[ATTR5:[0-9]+]] @@ -92,10 +92,10 @@ declare void @foo(...) define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory { ; CHECK-LABEL: @ReturnInt( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: ret i32 123, !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: ret i32 123, !dbg [[DBG2]] ; entry: ret i32 123, !dbg !10 @@ -104,22 +104,22 @@ entry: define void @CopyRetVal(ptr nocapture %a) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @CopyRetVal( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]], !dbg [[DBG1]] -; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG7]] -; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG7]] -; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: store i32 [[_MSRET]], ptr [[TMP5]], align 4, !dbg [[DBG7]] -; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], ptr [[A]], i32 zeroext [[TMP2]]), !dbg [[DBG7]] -; CHECK-NEXT: store i32 [[CALL]], ptr [[A]], align 4, !dbg [[DBG7]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]], !dbg [[DBG2]] +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG8]] +; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG8]] +; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: store i32 [[_MSRET]], ptr [[TMP5]], align 4, !dbg [[DBG8]] +; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], ptr [[A]], i32 zeroext [[TMP2]]), !dbg [[DBG8]] +; CHECK-NEXT: store i32 [[CALL]], ptr [[A]], align 4, !dbg [[DBG8]] ; CHECK-NEXT: ret void ; entry: @@ -133,32 +133,32 @@ entry: define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @SExt( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[B:%.*]], align 2, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[_MSLD:%.*]] = load i16, ptr [[TMP7]], align 2, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP12:%.*]] = sext i16 [[TMP4]] to i32, !dbg [[DBG7]] -; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]]), !dbg [[DBG8]] -; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr, !dbg [[DBG8]] -; CHECK-NEXT: store i32 [[_MSPROP]], ptr [[TMP15]], align 4, !dbg [[DBG8]] -; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], ptr [[A]], i32 zeroext [[TMP11]]), !dbg [[DBG8]] -; CHECK-NEXT: store i32 [[TMP12]], ptr [[A]], align 4, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[B:%.*]], align 2, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[_MSLD:%.*]] = load i16, ptr [[TMP7]], align 2, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP12:%.*]] = sext i16 [[TMP4]] to i32, !dbg [[DBG8]] +; CHECK-NEXT: call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]]), !dbg [[DBG9]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr, !dbg [[DBG9]] +; CHECK-NEXT: store i32 [[_MSPROP]], ptr [[TMP15]], align 4, !dbg [[DBG9]] +; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], ptr [[A]], i32 zeroext [[TMP11]]), !dbg [[DBG9]] +; CHECK-NEXT: store i32 [[TMP12]], ptr [[A]], align 4, !dbg [[DBG9]] ; CHECK-NEXT: ret void ; entry: @@ -171,8 +171,8 @@ entry: define void @MemSet(ptr nocapture %x) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @MemSet( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; entry: @@ -187,10 +187,10 @@ declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @MemCpy( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; entry: @@ -204,8 +204,8 @@ declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounw define void @MemSetInline(ptr nocapture %x) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @MemSetInline( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; entry: @@ -219,10 +219,10 @@ declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @MemCpyInline( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; entry: @@ -236,10 +236,10 @@ declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1 define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @MemMove( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; entry: @@ -256,8 +256,8 @@ declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture write define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind { ; CHECK-LABEL: @atomic_memcpy( -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1), !dbg !10 @@ -266,8 +266,8 @@ define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind { define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind { ; CHECK-LABEL: @atomic_memmove( -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1), !dbg !10 @@ -276,8 +276,8 @@ define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind { define void @atomic_memset(ptr nocapture %x) nounwind { ; CHECK-LABEL: @atomic_memset( -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X:%.*]], i8 88, i64 16, i32 1), !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X:%.*]], i8 88, i64 16, i32 1), !dbg [[DBG2]] ; CHECK-NEXT: ret void ; call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %x, i8 88, i64 16, i32 1), !dbg !10 @@ -290,21 +290,21 @@ define void @atomic_memset(ptr nocapture %x) nounwind { define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory { ; CHECK-LABEL: @Select( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], i32 [[TMP2]], i32 [[TMP4]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP2]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP9:%.*]] = or i32 [[TMP8]], [[TMP4]], !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP9]], i32 [[TMP6]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]], !dbg [[DBG1]] -; CHECK-NEXT: [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]], !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[C:%.*]], i32 [[TMP2]], i32 [[TMP4]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP2]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP9:%.*]] = or i32 [[TMP8]], [[TMP4]], !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP9]], i32 [[TMP6]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]], !dbg [[DBG2]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]], !dbg [[DBG2]] ; CHECK-NEXT: store i32 [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret i32 [[COND]] @@ -320,25 +320,25 @@ entry: define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory { ; CHECK-LABEL: @SelectVector( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C:%.*]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A:%.*]], [[B:%.*]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]], !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i1> [[C]] to i8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i8 [[TMP10]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i1> [[TMP0]] to i8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]], !dbg [[DBG1]] -; CHECK-NEXT: [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]], !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 32), align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[C:%.*]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = xor <8 x i16> [[A:%.*]], [[B:%.*]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]], !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x i1> [[C]] to i8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp ne i8 [[TMP10]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i1> [[TMP0]] to i8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]], !dbg [[DBG2]] +; CHECK-NEXT: [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]], !dbg [[DBG2]] ; CHECK-NEXT: store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP15]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret <8 x i16> [[COND]] @@ -354,10 +354,10 @@ entry: define ptr @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory { ; CHECK-LABEL: @IntToPtr( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[X:%.*]] to ptr, !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[X:%.*]] to ptr, !dbg [[DBG2]] ; CHECK-NEXT: store i64 [[TMP0]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret ptr [[TMP2]] @@ -374,13 +374,13 @@ entry: define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { ; CHECK-LABEL: @Div( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]] -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG2]] +; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG2]] ; CHECK-NEXT: store i32 [[TMP2]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP3]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret i32 [[DIV]] @@ -398,24 +398,24 @@ entry: define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @ShadowLoadAlignmentLarge( -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false), !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = load volatile i32, ptr [[Y]], align 64, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP11]], align 64, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 64, !dbg [[DBG7]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[Y:%.*]] = alloca i32, align 64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false), !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = load volatile i32, ptr [[Y]], align 64, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 64, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP12]], align 64, !dbg [[DBG8]] ; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP14]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret i32 [[TMP8]] @@ -429,14 +429,14 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory { define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { ; CHECK-LABEL: @ExtractElement( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX:%.*]], !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG1]] -; CHECK-NEXT: [[X:%.*]] = extractelement <4 x i32> [[VEC:%.*]], i32 [[IDX]], !dbg [[DBG1]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX:%.*]], !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG2]] +; CHECK-NEXT: [[X:%.*]] = extractelement <4 x i32> [[VEC:%.*]], i32 [[IDX]], !dbg [[DBG2]] ; CHECK-NEXT: store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP4]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret i32 [[X]] @@ -448,20 +448,20 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory { define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory { ; CHECK-LABEL: @InsertElement( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX:%.*]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 [[TMP4]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]], !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG1]] -; CHECK-NEXT: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC:%.*]], i32 [[X:%.*]], i32 [[IDX]], !dbg [[DBG1]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 24), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 24), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX:%.*]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 [[TMP4]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]], !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG2]] +; CHECK-NEXT: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC:%.*]], i32 [[X:%.*]], i32 [[IDX]], !dbg [[DBG2]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP10]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret <4 x i32> [[VEC1]] @@ -473,16 +473,16 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory { ; CHECK-LABEL: @ShuffleVector( -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> , !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]], !dbg [[DBG1]] -; CHECK-NEXT: [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]], <4 x i32> , !dbg [[DBG1]] +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 16), align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> , !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]], !dbg [[DBG2]] +; CHECK-NEXT: [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]], <4 x i32> , !dbg [[DBG2]] ; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: ret <4 x i32> [[VEC2]] @@ -499,74 +499,70 @@ declare void @llvm.va_start(ptr) nounwind define void @VAStart(i32 %x, ...) sanitize_memory { ; CHECK-LABEL: @VAStart( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false), !dbg [[DBG1]] -; CHECK-NEXT: [[SRCSZ:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800), !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[SRCSZ]], i1 false), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[SRCSZ]], i1 false), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP8]], i8 -1, i64 4, i1 false), !dbg [[DBG1]] -; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG1]] -; CHECK-NEXT: [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP17:%.*]] = and i64 [[TMP16]], -4, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false), !dbg [[DBG7]] -; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG7]] -; CHECK-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP21:%.*]] = xor i64 [[TMP20]], 87960930222080, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[TMP21]], 17592186044416, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr, !dbg [[DBG8]] -; CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP22]], align 4, !dbg [[DBG8]] -; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP4]], ptr [[X_ADDR]], i32 zeroext [[TMP5]]), !dbg [[DBG8]] -; CHECK-NEXT: store i32 [[X:%.*]], ptr [[X_ADDR]], align 4, !dbg [[DBG8]] -; CHECK-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11:![0-9]+]] -; CHECK-NEXT: [[TMP27:%.*]] = xor i64 [[TMP26]], 87960930222080, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[TMP27]], 17592186044416, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false), !dbg [[DBG11]] -; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]), !dbg [[DBG11]] -; CHECK-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], 16, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP35:%.*]] = ptrtoint ptr [[TMP34]] to i64, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP36:%.*]] = xor i64 [[TMP35]], 87960930222080, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[TMP36]], 17592186044416, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP2]], i64 176, i1 false), !dbg [[DBG11]] -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP39]], ptr align 16 [[TMP3]], i64 176, i1 false), !dbg [[DBG11]] -; CHECK-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], 8, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 8, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP44]] to i64, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP48:%.*]] = add i64 [[TMP46]], 17592186044416, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to ptr, !dbg [[DBG11]] -; CHECK-NEXT: [[TMP50:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176, !dbg [[DBG11]] -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP47]], ptr align 16 [[TMP50]], i64 [[TMP0]], i1 false), !dbg [[DBG11]] -; CHECK-NEXT: [[TMP51:%.*]] = getelementptr i8, ptr [[TMP3]], i32 176, !dbg [[DBG11]] -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP49]], ptr align 16 [[TMP51]], i64 [[TMP0]], i1 false), !dbg [[DBG11]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800), !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP3]], i1 false), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP9]], i8 -1, i64 4, i1 false), !dbg [[DBG2]] +; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG2]] +; CHECK-NEXT: [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP17:%.*]] = and i64 [[TMP16]], -4, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false), !dbg [[DBG8]] +; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG8]] +; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr, !dbg [[DBG9]] +; CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP21]], align 4, !dbg [[DBG9]] +; CHECK-NEXT: call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP5]], ptr [[X_ADDR]], i32 zeroext [[TMP6]]), !dbg [[DBG9]] +; CHECK-NEXT: store i32 [[X:%.*]], ptr [[X_ADDR]], align 4, !dbg [[DBG9]] +; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG10:![0-9]+]] +; CHECK-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP25]], 17592186044416, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr, !dbg [[DBG10]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 24, i1 false), !dbg [[DBG10]] +; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]]), !dbg [[DBG10]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[VA]], i64 16, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP29]], align 8, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[TMP30]] to i64, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP32:%.*]] = xor i64 [[TMP31]], 87960930222080, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[TMP32]], 17592186044416, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr, !dbg [[DBG10]] +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP33]], ptr align 16 [[TMP2]], i64 176, i1 false), !dbg [[DBG10]] +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP35]], ptr align 16 [[TMP4]], i64 176, i1 false), !dbg [[DBG10]] +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[VA]], i64 8, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[TMP37]] to i64, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr, !dbg [[DBG10]] +; CHECK-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176, !dbg [[DBG10]] +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP40]], ptr align 16 [[TMP43]], i64 [[TMP0]], i1 false), !dbg [[DBG10]] +; CHECK-NEXT: [[TMP44:%.*]] = getelementptr i8, ptr [[TMP4]], i32 176, !dbg [[DBG10]] +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP42]], ptr align 16 [[TMP44]], i64 [[TMP0]], i1 false), !dbg [[DBG10]] ; CHECK-NEXT: ret void ; entry: @@ -582,15 +578,15 @@ entry: define i32 @NoSanitizeMemory(i32 %x) uwtable { ; CHECK-LABEL: @NoSanitizeMemory( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[X:%.*]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = and i32 -1, [[TMP0]], !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0, !dbg [[DBG1]] -; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]], !dbg [[DBG1]] -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0, !dbg [[DBG1]] -; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG7]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[X:%.*]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 -1, [[TMP0]], !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0, !dbg [[DBG2]] +; CHECK-NEXT: [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]], !dbg [[DBG2]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0, !dbg [[DBG2]] +; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG8]] ; CHECK: if.then: -; CHECK-NEXT: tail call void @bar(), !dbg [[DBG8]] +; CHECK-NEXT: tail call void @bar(), !dbg [[DBG9]] ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -615,18 +611,18 @@ declare void @bar() define i32 @NoSanitizeMemoryAlloca() { ; CHECK-LABEL: @NoSanitizeMemoryAlloca( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[P:%.*]] = alloca i32, align 4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG1]] -; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false), !dbg [[DBG1]] -; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG7]] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG7]] -; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]]), !dbg [[DBG7]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[P:%.*]] = alloca i32, align 4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG2]] +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false), !dbg [[DBG2]] +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG8]] +; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]]), !dbg [[DBG8]] ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -647,10 +643,10 @@ declare i32 @NoSanitizeMemoryAllocaHelper(ptr %p) define i32 @NoSanitizeMemoryUndef() { ; CHECK-LABEL: @NoSanitizeMemoryUndef( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]] -; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef), !dbg [[DBG1]] +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG2]] +; CHECK-NEXT: [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef), !dbg [[DBG2]] ; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4 ; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8 @@ -672,21 +668,21 @@ declare void @foo8(ptr nocapture) define void @msan() sanitize_memory { ; CHECK-LABEL: @msan( ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]] -; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG1]] -; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG7]] -; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG7]] -; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG7]] -; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP2]], i8 -1, i64 1, i1 false), !dbg [[DBG7]] -; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG7]] -; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]] -; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG8]] -; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg -; CHECK-NEXT: ret void, !dbg +; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG2]] +; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG2]] +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG8]] +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG8]] +; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG8]] +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP2]], i8 -1, i64 1, i1 false), !dbg [[DBG8]] +; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG8]] +; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG9]] +; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG9]] +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg [[DBG11:![0-9]+]] +; CHECK-NEXT: ret void, !dbg [[DBG12:![0-9]+]] ; entry: %text = alloca i8, align 1, !dbg !10 diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll index 946c95b072ea9..13a50c28aa286 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll @@ -36,7 +36,7 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory { ; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR3:[0-9]+]] ; CHECK-NEXT: unreachable @@ -69,8 +69,8 @@ define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory { define void @NormalArgAfterNoUndef(i32 noundef %a, i32 %b) nounwind uwtable sanitize_memory { ; CHECK-LABEL: @NormalArgAfterNoUndef( -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 0 to ptr ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64 @@ -80,7 +80,7 @@ define void @NormalArgAfterNoUndef(i32 noundef %a, i32 %b) nounwind uwtable sani ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP7]], align 4 ; CHECK-NEXT: br label [[TMP9]] @@ -106,7 +106,7 @@ define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory { ; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr ; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]] ; CHECK: 8: ; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP7]], align 4 ; CHECK-NEXT: br label [[TMP9]] @@ -135,7 +135,7 @@ define void @CallNormalArgAfterNoUndef() nounwind uwtable sanitize_memory { ; CHECK-LABEL: @CallNormalArgAfterNoUndef( ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[R:%.*]] = call i32 @NormalRet() #[[ATTR0]] -; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: store i32 0, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @NormalArgAfterNoUndef(i32 [[R]], i32 [[R]]) #[[ATTR0]] ; CHECK-NEXT: ret void ; @@ -157,7 +157,7 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory { ; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0 -; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]] +; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]] ; CHECK: 7: ; CHECK-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR3]] ; CHECK-NEXT: unreachable diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll index 4b7a910af08bf..5d63367919d1a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; KMSAN instrumentation tests ; RUN: opt < %s -msan-kernel=1 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK @@ -6,309 +7,455 @@ target triple = "x86_64-unknown-linux-gnu" ; Check the instrumentation prologue. define void @Empty() nounwind uwtable sanitize_memory { +; CHECK-LABEL: define void @Empty( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: ret void +; entry: ret void } -; CHECK-LABEL: @Empty -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; %param_shadow: -; CHECK: getelementptr {{.*}} i32 0, i32 0 -; %retval_shadow: -; CHECK: getelementptr {{.*}} i32 0, i32 1 -; %va_arg_shadow: -; CHECK: getelementptr {{.*}} i32 0, i32 2 -; %va_arg_origin: -; CHECK: getelementptr {{.*}} i32 0, i32 3 -; %va_arg_overflow_size: -; CHECK: getelementptr {{.*}} i32 0, i32 4 -; %param_origin: -; CHECK: getelementptr {{.*}} i32 0, i32 5 -; %retval_origin: -; CHECK: getelementptr {{.*}} i32 0, i32 6 - ; Check instrumentation of stores - define void @Store1(ptr nocapture %p, i8 %x) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define void @Store1( +; CHECK-SAME: ptr captures(none) [[P:%.*]], i8 [[X:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB5]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8:[0-9]+]] +; CHECK-NEXT: br label %[[BB6]] +; CHECK: [[BB6]]: +; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[P]]) +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1 +; CHECK-NEXT: store i8 [[TMP7]], ptr [[TMP14]], align 1 +; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i8 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB12:.*]], !prof [[PROF1]] +; CHECK: [[BB10]]: +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]]) +; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4 +; CHECK-NEXT: br label %[[BB12]] +; CHECK: [[BB12]]: +; CHECK-NEXT: store i8 [[X]], ptr [[P]], align 1 +; CHECK-NEXT: ret void +; entry: store i8 %x, ptr %p ret void } -; CHECK-LABEL: @Store1 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: [[BASE:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] -; CHECK: [[SHADOW_PTR:%[a-z0-9_]+]] = inttoptr {{.*}} [[BASE]] -; CHECK: [[SHADOW:%[a-z0-9]+]] = load i64, ptr [[SHADOW_PTR]] -; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: icmp ne i64 [[SHADOW]] -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_metadata_ptr_for_store_1(ptr %p) -; CHECK: store i8 -; If the new shadow is non-zero, jump to __msan_chain_origin() -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_chain_origin -; Storing origin here: -; CHECK: store i32 -; CHECK: br label -; CHECK: {{^[0-9]+}}: -; CHECK: store i8 -; CHECK: ret void - define void @Store2(ptr nocapture %p, i16 %x) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define void @Store2( +; CHECK-SAME: ptr captures(none) [[P:%.*]], i16 [[X:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] +; CHECK: [[BB5]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB6]] +; CHECK: [[BB6]]: +; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_2(ptr [[P]]) +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1 +; CHECK-NEXT: store i16 [[TMP7]], ptr [[TMP14]], align 2 +; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i16 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB12:.*]], !prof [[PROF1]] +; CHECK: [[BB10]]: +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]]) +; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4 +; CHECK-NEXT: br label %[[BB12]] +; CHECK: [[BB12]]: +; CHECK-NEXT: store i16 [[X]], ptr [[P]], align 2 +; CHECK-NEXT: ret void +; entry: store i16 %x, ptr %p ret void } -; CHECK-LABEL: @Store2 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_metadata_ptr_for_store_2(ptr %p) -; CHECK: store i16 -; If the new shadow is non-zero, jump to __msan_chain_origin() -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_chain_origin -; Storing origin here: -; CHECK: store i32 -; CHECK: br label -; CHECK: {{^[0-9]+}}: -; CHECK: store i16 -; CHECK: ret void - - define void @Store4(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define void @Store4( +; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] +; CHECK: [[BB5]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB6]] +; CHECK: [[BB6]]: +; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr [[P]]) +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1 +; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i32 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB12:.*]], !prof [[PROF1]] +; CHECK: [[BB10]]: +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]]) +; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4 +; CHECK-NEXT: br label %[[BB12]] +; CHECK: [[BB12]]: +; CHECK-NEXT: store i32 [[X]], ptr [[P]], align 4 +; CHECK-NEXT: ret void +; entry: store i32 %x, ptr %p ret void } -; CHECK-LABEL: @Store4 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i32 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_metadata_ptr_for_store_4(ptr %p) -; CHECK: store i32 -; If the new shadow is non-zero, jump to __msan_chain_origin() -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_chain_origin -; Storing origin here: -; CHECK: store i32 -; CHECK: br label -; CHECK: {{^[0-9]+}}: -; CHECK: store i32 -; CHECK: ret void - define void @Store8(ptr nocapture %p, i64 %x) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define void @Store8( +; CHECK-SAME: ptr captures(none) [[P:%.*]], i64 [[X:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] +; CHECK: [[BB5]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB6]] +; CHECK: [[BB6]]: +; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr [[P]]) +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1 +; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP14]], align 8 +; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i64 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB15:.*]], !prof [[PROF1]] +; CHECK: [[BB10]]: +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]]) +; CHECK-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64 +; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[TMP18]], 32 +; CHECK-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]] +; CHECK-NEXT: store i64 [[TMP20]], ptr [[TMP15]], align 8 +; CHECK-NEXT: br label %[[BB15]] +; CHECK: [[BB15]]: +; CHECK-NEXT: store i64 [[X]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; entry: store i64 %x, ptr %p ret void } -; CHECK-LABEL: @Store8 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_metadata_ptr_for_store_8(ptr %p) -; CHECK: store i64 -; If the new shadow is non-zero, jump to __msan_chain_origin() -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_chain_origin -; Storing origin here: -; CHECK: store i64 -; CHECK: br label -; CHECK: {{^[0-9]+}}: -; CHECK: store i64 -; CHECK: ret void - define void @Store16(ptr nocapture %p, i128 %x) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define void @Store16( +; CHECK-SAME: ptr captures(none) [[P:%.*]], i128 [[X:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: [[TMP7:%.*]] = load i128, ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[_MSARG_O2]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]] +; CHECK: [[BB5]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB6]] +; CHECK: [[BB6]]: +; CHECK-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_n(ptr [[P]], i64 16) +; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1 +; CHECK-NEXT: store i128 [[TMP7]], ptr [[TMP14]], align 8 +; CHECK-NEXT: [[_MSCMP3:%.*]] = icmp ne i128 [[TMP7]], 0 +; CHECK-NEXT: br i1 [[_MSCMP3]], label %[[BB10:.*]], label %[[BB16:.*]], !prof [[PROF1]] +; CHECK: [[BB10]]: +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP10]]) +; CHECK-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64 +; CHECK-NEXT: [[TMP19:%.*]] = shl i64 [[TMP18]], 32 +; CHECK-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]] +; CHECK-NEXT: store i64 [[TMP20]], ptr [[TMP15]], align 8 +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[TMP15]], i32 1 +; CHECK-NEXT: store i64 [[TMP20]], ptr [[TMP21]], align 8 +; CHECK-NEXT: br label %[[BB16]] +; CHECK: [[BB16]]: +; CHECK-NEXT: store i128 [[X]], ptr [[P]], align 8 +; CHECK-NEXT: ret void +; entry: store i128 %x, ptr %p ret void } -; CHECK-LABEL: @Store16 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_metadata_ptr_for_store_n(ptr %p, i64 16) -; CHECK: store i128 -; If the new shadow is non-zero, jump to __msan_chain_origin() -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; CHECK: @__msan_chain_origin -; Storing origin here: -; CHECK: store i64 -; CHECK: br label -; CHECK: {{^[0-9]+}}: -; CHECK: store i128 -; CHECK: ret void - - ; Check instrumentation of loads define i8 @Load1(ptr nocapture %p) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define i8 @Load1( +; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[P]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr [[P]]) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1 +; CHECK-NEXT: [[_MSLD:%.*]] = load i8, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: store i8 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret i8 [[TMP7]] +; entry: %0 = load i8, ptr %p ret i8 %0 } -; CHECK-LABEL: @Load1 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; Load the value from %p. This is done before accessing the shadow -; to ease atomic handling. -; CHECK: load i8 -; CHECK: @__msan_metadata_ptr_for_load_1(ptr %p) -; Load the shadow and origin. -; CHECK: load i8 -; CHECK: load i32 - - define i16 @Load2(ptr nocapture %p) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define i16 @Load2( +; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[P]], align 2 +; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_2(ptr [[P]]) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1 +; CHECK-NEXT: [[_MSLD:%.*]] = load i16, ptr [[TMP9]], align 2 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: store i16 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret i16 [[TMP7]] +; entry: %0 = load i16, ptr %p ret i16 %0 } -; CHECK-LABEL: @Load2 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; Load the value from %p. This is done before accessing the shadow -; to ease atomic handling. -; CHECK: load i16 -; CHECK: @__msan_metadata_ptr_for_load_2(ptr %p) -; Load the shadow and origin. -; CHECK: load i16 -; CHECK: load i32 - - define i32 @Load4(ptr nocapture %p) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define i32 @Load4( +; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_4(ptr [[P]]) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1 +; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP9]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: store i32 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret i32 [[TMP7]] +; entry: %0 = load i32, ptr %p ret i32 %0 } -; CHECK-LABEL: @Load4 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; Load the value from %p. This is done before accessing the shadow -; to ease atomic handling. -; CHECK: load i32 -; CHECK: @__msan_metadata_ptr_for_load_4(ptr %p) -; Load the shadow and origin. -; CHECK: load i32 -; CHECK: load i32 - define i64 @Load8(ptr nocapture %p) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define i64 @Load8( +; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[P]], align 8 +; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_8(ptr [[P]]) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1 +; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 8 +; CHECK-NEXT: store i64 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret i64 [[TMP7]] +; entry: %0 = load i64, ptr %p ret i64 %0 } -; CHECK-LABEL: @Load8 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; Load the value from %p. This is done before accessing the shadow -; to ease atomic handling. -; CHECK: load i64 -; CHECK: @__msan_metadata_ptr_for_load_8(ptr %p) -; Load the shadow and origin. -; CHECK: load i64 -; CHECK: load i32 - define i128 @Load16(ptr nocapture %p) nounwind uwtable sanitize_memory { +; CHECK-LABEL: define i128 @Load16( +; CHECK-SAME: ptr captures(none) [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @__msan_warning(i32 [[TMP4]]) #[[ATTR8]] +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP7:%.*]] = load i128, ptr [[P]], align 8 +; CHECK-NEXT: [[TMP8:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_n(ptr [[P]], i64 16) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { ptr, ptr } [[TMP8]], 1 +; CHECK-NEXT: [[_MSLD:%.*]] = load i128, ptr [[TMP9]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 8 +; CHECK-NEXT: store i128 [[_MSLD]], ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: store i32 [[TMP11]], ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret i128 [[TMP7]] +; entry: %0 = load i128, ptr %p ret i128 %0 } -; CHECK-LABEL: @Load16 -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: ptrtoint {{.*}} [[PARAM_SHADOW]] -; Load the shadow of %p and check it -; CHECK: load i64 -; CHECK: icmp -; CHECK: br i1 -; CHECK: {{^[0-9]+}}: -; Load the value from %p. This is done before accessing the shadow -; to ease atomic handling. -; CHECK: load i128 -; CHECK: @__msan_metadata_ptr_for_load_n(ptr %p, i64 16) -; Load the shadow and origin. -; CHECK: load i128 -; CHECK: load i32 - - ; Test kernel-specific va_list instrumentation %struct.__va_list_tag = type { i32, i32, ptr, ptr } @@ -319,6 +466,68 @@ declare dso_local i32 @VAListFn(ptr, ptr) local_unnamed_addr ; Function Attrs: nounwind uwtable define dso_local i32 @VarArgFn(ptr %fmt, ...) local_unnamed_addr sanitize_memory #0 { +; CHECK-LABEL: define dso_local i32 @VarArgFn( +; CHECK-SAME: ptr [[FMT:%.*]], ...) local_unnamed_addr #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG_O:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[_MSARG_O]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 48, [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = alloca i8, i64 [[TMP6]], align 8 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 [[TMP6]], i1 false) +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP6]], i64 800) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP7]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[TMP8]], i1 false) +; CHECK-NEXT: [[TMP9:%.*]] = alloca i8, i64 [[TMP6]], align 8 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[TMP8]], i1 false) +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16 +; CHECK-NEXT: call void @__msan_poison_alloca(ptr [[ARGS]], i64 24, ptr @[[GLOB0:[0-9]+]]) +; CHECK-NEXT: [[TMP10:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[ARGS]]) +; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 0 +; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { ptr, ptr } [[TMP10]], 1 +; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 24, i1 false) +; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]]) +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[ARGS]], i64 16 +; CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[TMP16]]) +; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { ptr, ptr } [[TMP17]], 0 +; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { ptr, ptr } [[TMP17]], 1 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP18]], ptr align 16 [[TMP7]], i64 48, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP19]], ptr align 16 [[TMP9]], i64 48, i1 false) +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[ARGS]], i64 8 +; CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8 +; CHECK-NEXT: [[TMP24:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[TMP23]]) +; CHECK-NEXT: [[TMP25:%.*]] = extractvalue { ptr, ptr } [[TMP24]], 0 +; CHECK-NEXT: [[TMP26:%.*]] = extractvalue { ptr, ptr } [[TMP24]], 1 +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP7]], i32 48 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP25]], ptr align 16 [[TMP27]], i64 [[TMP5]], i1 false) +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[TMP9]], i32 48 +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP26]], ptr align 16 [[TMP28]], i64 [[TMP5]], i1 false) +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: store i64 [[TMP2]], ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_O2:%.*]] = getelementptr i8, ptr [[PARAM_ORIGIN]], i64 0 +; CHECK-NEXT: store i32 [[TMP4]], ptr [[_MSARG_O2]], align 4 +; CHECK-NEXT: [[_MSARG3:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: store i64 0, ptr [[_MSARG3]], align 8 +; CHECK-NEXT: store i32 0, ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 @VAListFn(ptr [[FMT]], ptr nonnull [[ARGS]]) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]]) +; CHECK-NEXT: store i32 [[_MSRET]], ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: store i32 [[TMP33]], ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret i32 [[CALL]] +; entry: %args = alloca [1 x %struct.__va_list_tag], align 16 call void @llvm.va_start(ptr nonnull %args) @@ -330,52 +539,45 @@ entry: ; Kernel is built without SSE support. attributes #0 = { "target-features"="+fxsr,+x87,-sse" } -; CHECK-LABEL: @VarArgFn -; CHECK: @__msan_get_context_state() -; CHECK: [[VA_ARG_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 2 -; CHECK: [[VA_ARG_ORIGIN:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 3 -; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4 -; CHECK: [[OSIZE:%[0-9]+]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]] ; Register save area is 48 bytes for non-SSE builds. -; CHECK: [[SIZE:%[0-9]+]] = add i64 48, [[OSIZE]] -; CHECK: [[SHADOWS:%[0-9]+]] = alloca i8, i64 [[SIZE]] -; CHECK: call void @llvm.memset{{.*}}(ptr align 8 [[SHADOWS]], i8 0, i64 [[SIZE]], i1 false) -; CHECK: [[COPYSZ:%[0-9]+]] = call i64 @llvm.umin.i64(i64 [[SIZE]], i64 800) -; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[SHADOWS]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[COPYSZ]] -; CHECK: [[ORIGINS:%[0-9]+]] = alloca i8, i64 [[SIZE]] -; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[ORIGINS]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[COPYSZ]] -; CHECK: call i32 @VAListFn ; Function Attrs: nounwind uwtable define dso_local void @VarArgCaller() local_unnamed_addr sanitize_memory { +; CHECK-LABEL: define dso_local void @VarArgCaller( +; CHECK-SAME: ) local_unnamed_addr #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state() +; CHECK-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0 +; CHECK-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1 +; CHECK-NEXT: [[VA_ARG_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 2 +; CHECK-NEXT: [[VA_ARG_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 3 +; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4 +; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5 +; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSARG:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 0 +; CHECK-NEXT: store i64 0, ptr [[_MSARG]], align 8 +; CHECK-NEXT: [[_MSARG1:%.*]] = getelementptr i8, ptr [[PARAM_SHADOW]], i64 8 +; CHECK-NEXT: store i32 0, ptr [[_MSARG1]], align 8 +; CHECK-NEXT: [[_MSARG_VA_S:%.*]] = getelementptr i8, ptr [[VA_ARG_SHADOW]], i64 0 +; CHECK-NEXT: [[_MSARG_VA_O:%.*]] = getelementptr i8, ptr [[VA_ARG_ORIGIN]], i64 0 +; CHECK-NEXT: [[_MSARG_VA_S2:%.*]] = getelementptr i8, ptr [[VA_ARG_SHADOW]], i64 8 +; CHECK-NEXT: [[_MSARG_VA_O3:%.*]] = getelementptr i8, ptr [[VA_ARG_ORIGIN]], i64 8 +; CHECK-NEXT: store i32 0, ptr [[_MSARG_VA_S2]], align 8 +; CHECK-NEXT: store i32 0, ptr [[_MSARG_VA_O3]], align 8 +; CHECK-NEXT: store i64 0, ptr [[VA_ARG_OVERFLOW_SIZE]], align 8 +; CHECK-NEXT: store i32 0, ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @VarArgFn(ptr @.str, i32 123) +; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr [[RETVAL_SHADOW]], align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL_ORIGIN]], align 4 +; CHECK-NEXT: ret void +; entry: %call = tail call i32 (ptr, ...) @VarArgFn(ptr @.str, i32 123) ret void } -; CHECK-LABEL: @VarArgCaller - -; CHECK: entry: -; CHECK: @__msan_get_context_state() -; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0 -; CHECK: [[VA_ARG_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 2 -; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4 - -; CHECK: [[PARAM_SI:%[_a-z0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]] -; CHECK: [[ARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[PARAM_SI]] to ptr -; First argument is initialized -; CHECK: store i64 0, ptr [[ARG1_S]] - -; Dangling cast of va_arg_shadow[0], unused because the first argument is fixed. -; CHECK: [[VA_CAST0:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64 - -; CHECK: [[VA_CAST1:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64 -; CHECK: [[ARG1_SI:%[_a-z0-9]+]] = add i64 [[VA_CAST1]], 8 -; CHECK: [[PARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[ARG1_SI]] to ptr - -; Shadow for 123 is 0. -; CHECK: store i32 0, ptr [[ARG1_S]] - -; CHECK: store i64 0, ptr [[VA_ARG_OVERFLOW_SIZE]] -; CHECK: call i32 (ptr, ...) @VarArgFn({{.*}} @.str{{.*}} i32 123) +;. +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575} +;. diff --git a/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll b/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll index 24276a28fdd70..e88341663fa3a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll @@ -8,7 +8,7 @@ define void @test_memcpy(ptr %p, ptr byval(i32) %p2) sanitize_memory { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P2:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 4, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 4, i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call ptr @__msan_memcpy(ptr [[P:%.*]], ptr [[P2]], i64 4) ; CHECK-NEXT: ret void @@ -22,7 +22,7 @@ define void @test_memmove(ptr %p, ptr byval(i32) %p2) sanitize_memory { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P2:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr -; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 4, i1 false) +; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP3]], ptr align 4 getelementptr (i8, ptr @__msan_param_tls, i64 8), i64 4, i1 false) ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP4:%.*]] = call ptr @__msan_memmove(ptr [[P:%.*]], ptr [[P2]], i64 4) ; CHECK-NEXT: ret void diff --git a/llvm/test/Instrumentation/MemorySanitizer/or.ll b/llvm/test/Instrumentation/MemorySanitizer/or.ll index 20993a54187ac..ce33022e46652 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/or.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/or.ll @@ -11,7 +11,7 @@ define i8 @test_or(i8 %a, i8 %b) sanitize_memory { ; CHECK-LABEL: define i8 @test_or( ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = xor i8 [[A]], -1 ; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[B]], -1 @@ -32,7 +32,7 @@ define i8 @test_disjoint_or(i8 %a, i8 %b) sanitize_memory { ; CHECK-IMPRECISE-LABEL: define i8 @test_disjoint_or( ; CHECK-IMPRECISE-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] { ; CHECK-IMPRECISE-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-IMPRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-IMPRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-IMPRECISE-NEXT: call void @llvm.donothing() ; CHECK-IMPRECISE-NEXT: [[TMP3:%.*]] = xor i8 [[A]], -1 ; CHECK-IMPRECISE-NEXT: [[TMP4:%.*]] = xor i8 [[B]], -1 @@ -48,7 +48,7 @@ define i8 @test_disjoint_or(i8 %a, i8 %b) sanitize_memory { ; CHECK-PRECISE-LABEL: define i8 @test_disjoint_or( ; CHECK-PRECISE-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] { ; CHECK-PRECISE-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-PRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-PRECISE-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-PRECISE-NEXT: call void @llvm.donothing() ; CHECK-PRECISE-NEXT: [[TMP3:%.*]] = xor i8 [[A]], -1 ; CHECK-PRECISE-NEXT: [[TMP4:%.*]] = xor i8 [[B]], -1 diff --git a/llvm/test/Instrumentation/MemorySanitizer/overflow.ll b/llvm/test/Instrumentation/MemorySanitizer/overflow.ll index 0cfae0008263f..9c9efcb72def3 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/overflow.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/overflow.ll @@ -8,7 +8,7 @@ define {i64, i1} @test_sadd_with_overflow(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define { i64, i1 } @test_sadd_with_overflow( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0 @@ -26,7 +26,7 @@ define {i64, i1} @test_uadd_with_overflow(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define { i64, i1 } @test_uadd_with_overflow( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0 @@ -44,7 +44,7 @@ define {i64, i1} @test_smul_with_overflow(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define { i64, i1 } @test_smul_with_overflow( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0 @@ -61,7 +61,7 @@ define {i64, i1} @test_umul_with_overflow(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define { i64, i1 } @test_umul_with_overflow( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0 @@ -78,7 +78,7 @@ define {i64, i1} @test_ssub_with_overflow(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define { i64, i1 } @test_ssub_with_overflow( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0 @@ -95,7 +95,7 @@ define {i64, i1} @test_usub_with_overflow(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define { i64, i1 } @test_usub_with_overflow( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0 @@ -113,7 +113,7 @@ define {<4 x i32>, <4 x i1>} @test_sadd_with_overflow_vec(<4 x i32> %a, <4 x i32 ; CHECK-LABEL: define { <4 x i32>, <4 x i1> } @test_sadd_with_overflow_vec( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll b/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll index 6d275b3e2d383..87ff4e6200b69 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/pr32842.ll @@ -13,7 +13,7 @@ define zeroext i1 @_Z1fii(i32 %x, i32 %y) sanitize_memory { ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[X]], -2147483648 ; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP0]], -1 diff --git a/llvm/test/Instrumentation/MemorySanitizer/saturating.ll b/llvm/test/Instrumentation/MemorySanitizer/saturating.ll index dcd8a080144ba..9473523c5f19e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/saturating.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/saturating.ll @@ -8,7 +8,7 @@ define i64 @test_sadd_sat(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define i64 @test_sadd_sat( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.sadd.sat.i64(i64 [[A]], i64 [[B]]) @@ -23,7 +23,7 @@ define i64 @test_uadd_sat(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define i64 @test_uadd_sat( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[A]], i64 [[B]]) @@ -38,7 +38,7 @@ define i64 @test_ssub_sat(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define i64 @test_ssub_sat( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.ssub.sat.i64(i64 [[A]], i64 [[B]]) @@ -53,7 +53,7 @@ define i64 @test_usub_sat(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define i64 @test_usub_sat( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[A]], i64 [[B]]) @@ -68,7 +68,7 @@ define i64 @test_sshl_sat(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define i64 @test_sshl_sat( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.sshl.sat.i64(i64 [[A]], i64 [[B]]) @@ -83,7 +83,7 @@ define i64 @test_ushl_sat(i64 %a, i64 %b) #0 { ; CHECK-LABEL: define i64 @test_ushl_sat( ; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.ushl.sat.i64(i64 [[A]], i64 [[B]]) @@ -98,7 +98,7 @@ define <4 x i32> @test_sadd_sat_vec(<4 x i32> %a, <4 x i32> %b) #0 { ; CHECK-LABEL: define <4 x i32> @test_sadd_sat_vec( ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) diff --git a/llvm/test/Instrumentation/MemorySanitizer/scmp.ll b/llvm/test/Instrumentation/MemorySanitizer/scmp.ll index 5c94c216106a2..0d4799fbe6f60 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/scmp.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/scmp.ll @@ -10,7 +10,7 @@ define i8 @scmp.8.8(i8 %x, i8 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp.8.8( ; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], 0 @@ -26,7 +26,7 @@ define i8 @scmp.8.16(i16 %x, i16 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp.8.16( ; CHECK-SAME: i16 [[X:%.*]], i16 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], 0 @@ -43,7 +43,7 @@ define i8 @scmp.8.32(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp.8.32( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -60,7 +60,7 @@ define i8 @scmp.8.64(i64 %x, i64 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp.8.64( ; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -77,7 +77,7 @@ define i8 @scmp.8.128(i128 %x, i128 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp.8.128( ; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i128 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i128 [[_MSPROP]], 0 @@ -94,7 +94,7 @@ define i32 @scmp.32.32(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i32 @scmp.32.32( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -110,7 +110,7 @@ define i32 @scmp.32.64(i64 %x, i64 %y) nounwind #0 { ; CHECK-LABEL: define i32 @scmp.32.64( ; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -127,7 +127,7 @@ define i64 @scmp.64.64(i64 %x, i64 %y) nounwind #0 { ; CHECK-LABEL: define i64 @scmp.64.64( ; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -143,7 +143,7 @@ define i4 @scmp_narrow_result(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i4 @scmp_narrow_result( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -160,7 +160,7 @@ define i8 @scmp_narrow_op(i62 %x, i62 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp_narrow_op( ; CHECK-SAME: i62 [[X:%.*]], i62 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i62, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i62 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i62 [[_MSPROP]], 0 @@ -177,7 +177,7 @@ define i141 @scmp_wide_result(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i141 @scmp_wide_result( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -194,7 +194,7 @@ define i8 @scmp_wide_op(i109 %x, i109 %y) nounwind #0 { ; CHECK-LABEL: define i8 @scmp_wide_op( ; CHECK-SAME: i109 [[X:%.*]], i109 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i109, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i109 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i109 [[_MSPROP]], 0 @@ -211,7 +211,7 @@ define i41 @scmp_uncommon_types(i7 %x, i7 %y) nounwind #0 { ; CHECK-LABEL: define i41 @scmp_uncommon_types( ; CHECK-SAME: i7 [[X:%.*]], i7 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i7, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i7 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i7 [[_MSPROP]], 0 @@ -228,7 +228,7 @@ define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @scmp_normal_vectors( ; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer @@ -244,7 +244,7 @@ define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind #0 ; CHECK-LABEL: define <4 x i8> @scmp_narrow_vec_result( ; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer @@ -261,7 +261,7 @@ define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @scmp_narrow_vec_op( ; CHECK-SAME: <4 x i8> [[X:%.*]], <4 x i8> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i8> [[_MSPROP]], zeroinitializer @@ -278,7 +278,7 @@ define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind #0 ; CHECK-LABEL: define <16 x i32> @scmp_wide_vec_result( ; CHECK-SAME: <16 x i8> [[X:%.*]], <16 x i8> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], zeroinitializer @@ -295,7 +295,7 @@ define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @scmp_wide_vec_op( ; CHECK-SAME: <16 x i64> [[X:%.*]], <16 x i64> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 128), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i64> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i64> [[_MSPROP]], zeroinitializer @@ -312,7 +312,7 @@ define <7 x i117> @scmp_uncommon_vectors(<7 x i7> %x, <7 x i7> %y) nounwind #0 { ; CHECK-LABEL: define <7 x i117> @scmp_uncommon_vectors( ; CHECK-SAME: <7 x i7> [[X:%.*]], <7 x i7> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <7 x i7>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <7 x i7>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <7 x i7>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <7 x i7> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <7 x i7> [[_MSPROP]], zeroinitializer @@ -329,7 +329,7 @@ define <1 x i3> @scmp_scalarize(<1 x i33> %x, <1 x i33> %y) nounwind #0 { ; CHECK-LABEL: define <1 x i3> @scmp_scalarize( ; CHECK-SAME: <1 x i33> [[X:%.*]], <1 x i33> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i33>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i33>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i33>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <1 x i33> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <1 x i33> [[_MSPROP]], zeroinitializer @@ -346,7 +346,7 @@ define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind #0 { ; CHECK-LABEL: define <2 x i8> @scmp_bool_operands( ; CHECK-SAME: <2 x i1> [[X:%.*]], <2 x i1> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i1>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i1>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i1> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i1> [[_MSPROP]], zeroinitializer @@ -363,7 +363,7 @@ define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwin ; CHECK-LABEL: define <2 x i16> @scmp_ret_wider_than_operands( ; CHECK-SAME: <2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <2 x i8> [[_MSPROP]], zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll b/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll index 1b70242dae2b5..3c9d6d8b91b3e 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/ucmp.ll @@ -10,7 +10,7 @@ define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp.8.8( ; CHECK-SAME: i8 [[X:%.*]], i8 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i8 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i8 [[_MSPROP]], 0 @@ -26,7 +26,7 @@ define i8 @ucmp.8.16(i16 %x, i16 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp.8.16( ; CHECK-SAME: i16 [[X:%.*]], i16 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i16 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i16 [[_MSPROP]], 0 @@ -43,7 +43,7 @@ define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp.8.32( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -60,7 +60,7 @@ define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp.8.64( ; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -77,7 +77,7 @@ define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp.8.128( ; CHECK-SAME: i128 [[X:%.*]], i128 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i128, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i128 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i128 [[_MSPROP]], 0 @@ -94,7 +94,7 @@ define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i32 @ucmp.32.32( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -110,7 +110,7 @@ define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind #0 { ; CHECK-LABEL: define i32 @ucmp.32.64( ; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -127,7 +127,7 @@ define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind #0 { ; CHECK-LABEL: define i64 @ucmp.64.64( ; CHECK-SAME: i64 [[X:%.*]], i64 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i64 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0 @@ -143,7 +143,7 @@ define i4 @ucmp_narrow_result(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i4 @ucmp_narrow_result( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -160,7 +160,7 @@ define i8 @ucmp_narrow_op(i62 %x, i62 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp_narrow_op( ; CHECK-SAME: i62 [[X:%.*]], i62 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i62, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i62, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i62 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i62 [[_MSPROP]], 0 @@ -177,7 +177,7 @@ define i141 @ucmp_wide_result(i32 %x, i32 %y) nounwind #0 { ; CHECK-LABEL: define i141 @ucmp_wide_result( ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i32 [[_MSPROP]], 0 @@ -194,7 +194,7 @@ define i8 @ucmp_wide_op(i109 %x, i109 %y) nounwind #0 { ; CHECK-LABEL: define i8 @ucmp_wide_op( ; CHECK-SAME: i109 [[X:%.*]], i109 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i109, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i109, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i109 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i109 [[_MSPROP]], 0 @@ -211,7 +211,7 @@ define i41 @ucmp_uncommon_types(i7 %x, i7 %y) nounwind #0 { ; CHECK-LABEL: define i41 @ucmp_uncommon_types( ; CHECK-SAME: i7 [[X:%.*]], i7 [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i7, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i7, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or i7 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or i7 [[_MSPROP]], 0 @@ -228,7 +228,7 @@ define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @ucmp_normal_vectors( ; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer @@ -244,7 +244,7 @@ define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind #0 ; CHECK-LABEL: define <4 x i8> @ucmp_narrow_vec_result( ; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], zeroinitializer @@ -261,7 +261,7 @@ define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind #0 { ; CHECK-LABEL: define <4 x i32> @ucmp_narrow_vec_op( ; CHECK-SAME: <4 x i8> [[X:%.*]], <4 x i8> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i8> [[_MSPROP]], zeroinitializer @@ -278,7 +278,7 @@ define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind #0 ; CHECK-LABEL: define <16 x i32> @ucmp_wide_vec_result( ; CHECK-SAME: <16 x i8> [[X:%.*]], <16 x i8> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], zeroinitializer @@ -295,7 +295,7 @@ define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind #0 { ; CHECK-LABEL: define <16 x i8> @ucmp_wide_vec_op( ; CHECK-SAME: <16 x i32> [[X:%.*]], <16 x i32> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 64), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], zeroinitializer @@ -312,7 +312,7 @@ define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind # ; CHECK-LABEL: define <17 x i2> @ucmp_uncommon_vectors( ; CHECK-SAME: <17 x i71> [[X:%.*]], <17 x i71> [[Y:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <17 x i71>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <17 x i71>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 256) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <17 x i71>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 256), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[_MSPROP:%.*]] = or <17 x i71> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[_MSPROP1:%.*]] = or <17 x i71> [[_MSPROP]], zeroinitializer diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll index 5da4c7357b6ad..bfc47dc6bdc2b 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fadd.ll @@ -13,7 +13,7 @@ define float @test_v2f32(float %a0, <2 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v2f32( ; CHECK-SAME: float [[A0:%.*]], <2 x float> [[A1:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -29,7 +29,7 @@ define float @test_v4f32(float %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v4f32( ; CHECK-SAME: float [[A0:%.*]], <4 x float> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -45,7 +45,7 @@ define float @test_v8f32(float %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v8f32( ; CHECK-SAME: float [[A0:%.*]], <8 x float> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -61,7 +61,7 @@ define float @test_v16f32(float %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v16f32( ; CHECK-SAME: float [[A0:%.*]], <16 x float> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -138,7 +138,7 @@ define double @test_v2f64(double %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v2f64( ; CHECK-SAME: double [[A0:%.*]], <2 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] @@ -154,7 +154,7 @@ define double @test_v4f64(double %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v4f64( ; CHECK-SAME: double [[A0:%.*]], <4 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] @@ -170,7 +170,7 @@ define double @test_v8f64(double %a0, <8 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v8f64( ; CHECK-SAME: double [[A0:%.*]], <8 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] @@ -186,7 +186,7 @@ define double @test_v16f64(double %a0, <16 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v16f64( ; CHECK-SAME: double [[A0:%.*]], <16 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll index 0c1c4edc4367f..db86d55616c62 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/vector-reduce-fmul.ll @@ -13,7 +13,7 @@ define float @test_v2f32(float %a0, <2 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v2f32( ; CHECK-SAME: float [[A0:%.*]], <2 x float> [[A1:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -29,7 +29,7 @@ define float @test_v4f32(float %a0, <4 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v4f32( ; CHECK-SAME: float [[A0:%.*]], <4 x float> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -45,7 +45,7 @@ define float @test_v8f32(float %a0, <8 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v8f32( ; CHECK-SAME: float [[A0:%.*]], <8 x float> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -61,7 +61,7 @@ define float @test_v16f32(float %a0, <16 x float> %a1) #0 { ; CHECK-LABEL: define float @test_v16f32( ; CHECK-SAME: float [[A0:%.*]], <16 x float> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP1]], [[TMP3]] @@ -138,7 +138,7 @@ define double @test_v2f64(double %a0, <2 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v2f64( ; CHECK-SAME: double [[A0:%.*]], <2 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] @@ -154,7 +154,7 @@ define double @test_v4f64(double %a0, <4 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v4f64( ; CHECK-SAME: double [[A0:%.*]], <4 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] @@ -170,7 +170,7 @@ define double @test_v8f64(double %a0, <8 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v8f64( ; CHECK-SAME: double [[A0:%.*]], <8 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v8i64(<8 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] @@ -186,7 +186,7 @@ define double @test_v16f64(double %a0, <16 x double> %a1) #0 { ; CHECK-LABEL: define double @test_v16f64( ; CHECK-SAME: double [[A0:%.*]], <16 x double> [[A1:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = or i64 [[TMP1]], [[TMP3]] diff --git a/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll b/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll index d1060fb33e1bc..1146131465883 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/vector_arith.ll @@ -15,7 +15,7 @@ define <4 x i32> @Test_sse2_pmadd_wd(<8 x i16> %a, <8 x i16> %b) sanitize_memory ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[TMP0]], zeroinitializer ; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer @@ -46,7 +46,7 @@ define <1 x i64> @Test_ssse3_pmadd_ub_sw(<1 x i64> %a, <1 x i64> %b) sanitize_me ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8> @@ -82,7 +82,7 @@ define <2 x i64> @Test_x86_sse2_psad_bw(<16 x i8> %a, <16 x i8> %b) sanitize_mem ; CHECK-LABEL: define <2 x i64> @Test_x86_sse2_psad_bw( ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64> @@ -104,7 +104,7 @@ define <1 x i64> @Test_x86_mmx_psad_bw(<1 x i64> %a, <1 x i64> %b) sanitize_memo ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR1]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8 -; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <1 x i64>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = or <1 x i64> [[TMP0]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP2]] to i64 diff --git a/llvm/test/Instrumentation/MemorySanitizer/vscale.ll b/llvm/test/Instrumentation/MemorySanitizer/vscale.ll index 0c0b393667bf0..514abedf8fe1a 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/vscale.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/vscale.ll @@ -435,7 +435,7 @@ define void @fn_param( %a, ptr %b) sanitize_memory { define void @test_param(ptr %a, ptr %b) sanitize_memory { ; CHECK-LABEL: define void @test_param( ; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 ; CHECK-NEXT: call void @llvm.donothing() ; CHECK-NEXT: [[TMP2:%.*]] = load , ptr [[A]], align 8 ; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64 @@ -455,8 +455,8 @@ define void @test_param(ptr %a, ptr %b) sanitize_memory { ; ; ORIGIN-LABEL: define void @test_param( ; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { -; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8 -; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4 +; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8 +; ORIGIN-NEXT: [[TMP2:%.*]] = load i32, ptr getelementptr (i8, ptr @__msan_param_origin_tls, i64 8), align 4 ; ORIGIN-NEXT: call void @llvm.donothing() ; ORIGIN-NEXT: [[TMP3:%.*]] = load , ptr [[A]], align 8 ; ORIGIN-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 diff --git a/llvm/test/Linker/Inputs/errno-tbaa-cxx-metadata.ll b/llvm/test/Linker/Inputs/errno-tbaa-cxx-metadata.ll new file mode 100644 index 0000000000000..eefb6d833f636 --- /dev/null +++ b/llvm/test/Linker/Inputs/errno-tbaa-cxx-metadata.ll @@ -0,0 +1,5 @@ +!llvm.errno.tbaa = !{!0} +!0 = !{!1, !1, i64 0} +!1 = !{!"int", !2, i64 0} +!2 = !{!"omnipotent char", !3, i64 0} +!3 = !{!"Simple C++ TBAA"} diff --git a/llvm/test/Linker/Inputs/errno-tbaa-metadata.ll b/llvm/test/Linker/Inputs/errno-tbaa-metadata.ll new file mode 100644 index 0000000000000..5dd468776bdee --- /dev/null +++ b/llvm/test/Linker/Inputs/errno-tbaa-metadata.ll @@ -0,0 +1,5 @@ +!llvm.errno.tbaa = !{!0} +!0 = !{!1, !1, i64 0} +!1 = !{!"int", !2, i64 0} +!2 = !{!"omnipotent char", !3, i64 0} +!3 = !{!"Simple C/C++ TBAA"} diff --git a/llvm/test/Linker/link-errno-tbaa-metadata.ll b/llvm/test/Linker/link-errno-tbaa-metadata.ll new file mode 100644 index 0000000000000..b58373d3acbef --- /dev/null +++ b/llvm/test/Linker/link-errno-tbaa-metadata.ll @@ -0,0 +1,8 @@ +; RUN: llvm-link %S/Inputs/errno-tbaa-metadata.ll %S/Inputs/errno-tbaa-cxx-metadata.ll -S -o - | FileCheck %s --check-prefix=CHECK-MERGE +; RUN: llvm-link %S/Inputs/errno-tbaa-metadata.ll %S/Inputs/errno-tbaa-metadata.ll -S -o - | FileCheck %s --check-prefix=CHECK-DEDUP + +; Ensure merging when linking modules w/ different errno TBAA hierarchies. +; CHECK-MERGE: !llvm.errno.tbaa = !{![[NODE0:[0-9]+]], ![[NODE1:[0-9]+]]} + +; Ensure deduplication when linking modules w/ identical errno TBAA nodes. +; CHECK-DEDUP: !llvm.errno.tbaa = !{![[NODE:[0-9]+]]} diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s index a313741ffe22d..40fcd6f4f6955 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s @@ -73,98 +73,98 @@ v_tanh_f32 v5, src_scc v_tanh_f32 v255, 0xaf123456 // GFX1250: v_tanh_f32_e32 v255, 0xaf123456 ; encoding: [0xff,0x3c,0xfe,0x7f,0x56,0x34,0x12,0xaf] -v_tanh_f16 v5, v1 -// GFX1250: v_tanh_f16_e32 v5, v1 ; encoding: [0x01,0x3f,0x0a,0x7e] +v_tanh_f16 v5.l, v1.l +// GFX1250: v_tanh_f16_e32 v5.l, v1.l ; encoding: [0x01,0x3f,0x0a,0x7e] -v_tanh_f16 v5, v127 -// GFX1250: v_tanh_f16_e32 v5, v127 ; encoding: [0x7f,0x3f,0x0a,0x7e] +v_tanh_f16 v5.l, v127.l +// GFX1250: v_tanh_f16_e32 v5.l, v127.l ; encoding: [0x7f,0x3f,0x0a,0x7e] -v_tanh_f16 v5, s1 -// GFX1250: v_tanh_f16_e32 v5, s1 ; encoding: [0x01,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, s1 +// GFX1250: v_tanh_f16_e32 v5.l, s1 ; encoding: [0x01,0x3e,0x0a,0x7e] -v_tanh_f16 v5, s105 -// GFX1250: v_tanh_f16_e32 v5, s105 ; encoding: [0x69,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, s105 +// GFX1250: v_tanh_f16_e32 v5.l, s105 ; encoding: [0x69,0x3e,0x0a,0x7e] -v_tanh_f16 v5, vcc_lo -// GFX1250: v_tanh_f16_e32 v5, vcc_lo ; encoding: [0x6a,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, vcc_lo +// GFX1250: v_tanh_f16_e32 v5.l, vcc_lo ; encoding: [0x6a,0x3e,0x0a,0x7e] -v_tanh_f16 v5, vcc_hi -// GFX1250: v_tanh_f16_e32 v5, vcc_hi ; encoding: [0x6b,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, vcc_hi +// GFX1250: v_tanh_f16_e32 v5.l, vcc_hi ; encoding: [0x6b,0x3e,0x0a,0x7e] -v_tanh_f16 v5, ttmp15 -// GFX1250: v_tanh_f16_e32 v5, ttmp15 ; encoding: [0x7b,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, ttmp15 +// GFX1250: v_tanh_f16_e32 v5.l, ttmp15 ; encoding: [0x7b,0x3e,0x0a,0x7e] -v_tanh_f16 v5, m0 -// GFX1250: v_tanh_f16_e32 v5, m0 ; encoding: [0x7d,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, m0 +// GFX1250: v_tanh_f16_e32 v5.l, m0 ; encoding: [0x7d,0x3e,0x0a,0x7e] -v_tanh_f16 v5, exec_lo -// GFX1250: v_tanh_f16_e32 v5, exec_lo ; encoding: [0x7e,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, exec_lo +// GFX1250: v_tanh_f16_e32 v5.l, exec_lo ; encoding: [0x7e,0x3e,0x0a,0x7e] -v_tanh_f16 v5, exec_hi -// GFX1250: v_tanh_f16_e32 v5, exec_hi ; encoding: [0x7f,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, exec_hi +// GFX1250: v_tanh_f16_e32 v5.l, exec_hi ; encoding: [0x7f,0x3e,0x0a,0x7e] -v_tanh_f16 v5, null -// GFX1250: v_tanh_f16_e32 v5, null ; encoding: [0x7c,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, null +// GFX1250: v_tanh_f16_e32 v5.l, null ; encoding: [0x7c,0x3e,0x0a,0x7e] -v_tanh_f16 v5, -1 -// GFX1250: v_tanh_f16_e32 v5, -1 ; encoding: [0xc1,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, -1 +// GFX1250: v_tanh_f16_e32 v5.l, -1 ; encoding: [0xc1,0x3e,0x0a,0x7e] -v_tanh_f16 v5, 0.5 -// GFX1250: v_tanh_f16_e32 v5, 0.5 ; encoding: [0xf0,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, 0.5 +// GFX1250: v_tanh_f16_e32 v5.l, 0.5 ; encoding: [0xf0,0x3e,0x0a,0x7e] -v_tanh_f16 v5, src_scc -// GFX1250: v_tanh_f16_e32 v5, src_scc ; encoding: [0xfd,0x3e,0x0a,0x7e] +v_tanh_f16 v5.l, src_scc +// GFX1250: v_tanh_f16_e32 v5.l, src_scc ; encoding: [0xfd,0x3e,0x0a,0x7e] -v_tanh_f16 v127, 0x8000 -// GFX1250: v_tanh_f16_e32 v127, 0x8000 ; encoding: [0xff,0x3e,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_tanh_f16 v127.l, 0x8000 +// GFX1250: v_tanh_f16_e32 v127.l, 0x8000 ; encoding: [0xff,0x3e,0xfe,0x7e,0x00,0x80,0x00,0x00] v_tanh_f16 v5.h, v1.h // GFX1250: v_tanh_f16_e32 v5.h, v1.h ; encoding: [0x81,0x3f,0x0a,0x7f] -v_tanh_bf16 v5, v1 -// GFX1250: v_tanh_bf16_e32 v5, v1 ; encoding: [0x01,0x95,0x0a,0x7e] +v_tanh_bf16 v5.l, v1.l +// GFX1250: v_tanh_bf16_e32 v5.l, v1.l ; encoding: [0x01,0x95,0x0a,0x7e] -v_tanh_bf16 v5, v127 -// GFX1250: v_tanh_bf16_e32 v5, v127 ; encoding: [0x7f,0x95,0x0a,0x7e] +v_tanh_bf16 v5.l, v127.l +// GFX1250: v_tanh_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0x95,0x0a,0x7e] -v_tanh_bf16 v5, s1 -// GFX1250: v_tanh_bf16_e32 v5, s1 ; encoding: [0x01,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, s1 +// GFX1250: v_tanh_bf16_e32 v5.l, s1 ; encoding: [0x01,0x94,0x0a,0x7e] -v_tanh_bf16 v5, s105 -// GFX1250: v_tanh_bf16_e32 v5, s105 ; encoding: [0x69,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, s105 +// GFX1250: v_tanh_bf16_e32 v5.l, s105 ; encoding: [0x69,0x94,0x0a,0x7e] -v_tanh_bf16 v5, vcc_lo -// GFX1250: v_tanh_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, vcc_lo +// GFX1250: v_tanh_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0x94,0x0a,0x7e] -v_tanh_bf16 v5, vcc_hi -// GFX1250: v_tanh_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, vcc_hi +// GFX1250: v_tanh_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0x94,0x0a,0x7e] -v_tanh_bf16 v5, ttmp15 -// GFX1250: v_tanh_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, ttmp15 +// GFX1250: v_tanh_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0x94,0x0a,0x7e] -v_tanh_bf16 v5, m0 -// GFX1250: v_tanh_bf16_e32 v5, m0 ; encoding: [0x7d,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, m0 +// GFX1250: v_tanh_bf16_e32 v5.l, m0 ; encoding: [0x7d,0x94,0x0a,0x7e] -v_tanh_bf16 v5, exec_lo -// GFX1250: v_tanh_bf16_e32 v5, exec_lo ; encoding: [0x7e,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, exec_lo +// GFX1250: v_tanh_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0x94,0x0a,0x7e] -v_tanh_bf16 v5, exec_hi -// GFX1250: v_tanh_bf16_e32 v5, exec_hi ; encoding: [0x7f,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, exec_hi +// GFX1250: v_tanh_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0x94,0x0a,0x7e] -v_tanh_bf16 v5, null -// GFX1250: v_tanh_bf16_e32 v5, null ; encoding: [0x7c,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, null +// GFX1250: v_tanh_bf16_e32 v5.l, null ; encoding: [0x7c,0x94,0x0a,0x7e] -v_tanh_bf16 v5, -1 -// GFX1250: v_tanh_bf16_e32 v5, -1 ; encoding: [0xc1,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, -1 +// GFX1250: v_tanh_bf16_e32 v5.l, -1 ; encoding: [0xc1,0x94,0x0a,0x7e] -v_tanh_bf16 v5, 0.5 -// GFX1250: v_tanh_bf16_e32 v5, 0.5 ; encoding: [0xf0,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, 0.5 +// GFX1250: v_tanh_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0x94,0x0a,0x7e] -v_tanh_bf16 v5, src_scc -// GFX1250: v_tanh_bf16_e32 v5, src_scc ; encoding: [0xfd,0x94,0x0a,0x7e] +v_tanh_bf16 v5.l, src_scc +// GFX1250: v_tanh_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0x94,0x0a,0x7e] -v_tanh_bf16 v127, 0x8000 -// GFX1250: v_tanh_bf16_e32 v127, 0x8000 ; encoding: [0xff,0x94,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_tanh_bf16 v127.l, 0x8000 +// GFX1250: v_tanh_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0x94,0xfe,0x7e,0x00,0x80,0x00,0x00] v_tanh_bf16 v5.h, v1.h // GFX1250: v_tanh_bf16_e32 v5.h, v1.h ; encoding: [0x81,0x95,0x0a,0x7f] @@ -214,347 +214,347 @@ v_prng_b32 v5, src_scc v_prng_b32 v255, 0xaf123456 // GFX1250: v_prng_b32_e32 v255, 0xaf123456 ; encoding: [0xff,0x96,0xfe,0x7f,0x56,0x34,0x12,0xaf] -v_rcp_bf16 v5, v1 -// GFX1250: v_rcp_bf16_e32 v5, v1 ; encoding: [0x01,0xf3,0x0a,0x7e] +v_rcp_bf16 v5.l, v1.l +// GFX1250: v_rcp_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf3,0x0a,0x7e] -v_rcp_bf16 v5, v127 -// GFX1250: v_rcp_bf16_e32 v5, v127 ; encoding: [0x7f,0xf3,0x0a,0x7e] +v_rcp_bf16 v5.l, v127.l +// GFX1250: v_rcp_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf3,0x0a,0x7e] -v_rcp_bf16 v5, s1 -// GFX1250: v_rcp_bf16_e32 v5, s1 ; encoding: [0x01,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, s1 +// GFX1250: v_rcp_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, s105 -// GFX1250: v_rcp_bf16_e32 v5, s105 ; encoding: [0x69,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, s105 +// GFX1250: v_rcp_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, vcc_lo -// GFX1250: v_rcp_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, vcc_lo +// GFX1250: v_rcp_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, vcc_hi -// GFX1250: v_rcp_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, vcc_hi +// GFX1250: v_rcp_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, ttmp15 -// GFX1250: v_rcp_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, ttmp15 +// GFX1250: v_rcp_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, m0 -// GFX1250: v_rcp_bf16_e32 v5, m0 ; encoding: [0x7d,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, m0 +// GFX1250: v_rcp_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, exec_lo -// GFX1250: v_rcp_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, exec_lo +// GFX1250: v_rcp_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, exec_hi -// GFX1250: v_rcp_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, exec_hi +// GFX1250: v_rcp_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, null -// GFX1250: v_rcp_bf16_e32 v5, null ; encoding: [0x7c,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, null +// GFX1250: v_rcp_bf16_e32 v5.l, null ; encoding: [0x7c,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, -1 -// GFX1250: v_rcp_bf16_e32 v5, -1 ; encoding: [0xc1,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, -1 +// GFX1250: v_rcp_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, 0.5 -// GFX1250: v_rcp_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, 0.5 +// GFX1250: v_rcp_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf2,0x0a,0x7e] -v_rcp_bf16 v5, src_scc -// GFX1250: v_rcp_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf2,0x0a,0x7e] +v_rcp_bf16 v5.l, src_scc +// GFX1250: v_rcp_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf2,0x0a,0x7e] -v_rcp_bf16 v127, 0x8000 -// GFX1250: v_rcp_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf2,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_rcp_bf16 v127.l, 0x8000 +// GFX1250: v_rcp_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf2,0xfe,0x7e,0x00,0x80,0x00,0x00] v_rcp_bf16 v5.h, v1.h // GFX1250: v_rcp_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf3,0x0a,0x7f] -v_sqrt_bf16 v5, v1 -// GFX1250: v_sqrt_bf16_e32 v5, v1 ; encoding: [0x01,0xf5,0x0a,0x7e] +v_sqrt_bf16 v5.l, v1.l +// GFX1250: v_sqrt_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf5,0x0a,0x7e] -v_sqrt_bf16 v5, v127 -// GFX1250: v_sqrt_bf16_e32 v5, v127 ; encoding: [0x7f,0xf5,0x0a,0x7e] +v_sqrt_bf16 v5.l, v127.l +// GFX1250: v_sqrt_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf5,0x0a,0x7e] -v_sqrt_bf16 v5, s1 -// GFX1250: v_sqrt_bf16_e32 v5, s1 ; encoding: [0x01,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, s1 +// GFX1250: v_sqrt_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, s105 -// GFX1250: v_sqrt_bf16_e32 v5, s105 ; encoding: [0x69,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, s105 +// GFX1250: v_sqrt_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, vcc_lo -// GFX1250: v_sqrt_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, vcc_lo +// GFX1250: v_sqrt_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, vcc_hi -// GFX1250: v_sqrt_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, vcc_hi +// GFX1250: v_sqrt_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, ttmp15 -// GFX1250: v_sqrt_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, ttmp15 +// GFX1250: v_sqrt_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, m0 -// GFX1250: v_sqrt_bf16_e32 v5, m0 ; encoding: [0x7d,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, m0 +// GFX1250: v_sqrt_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, exec_lo -// GFX1250: v_sqrt_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, exec_lo +// GFX1250: v_sqrt_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, exec_hi -// GFX1250: v_sqrt_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, exec_hi +// GFX1250: v_sqrt_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, null -// GFX1250: v_sqrt_bf16_e32 v5, null ; encoding: [0x7c,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, null +// GFX1250: v_sqrt_bf16_e32 v5.l, null ; encoding: [0x7c,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, -1 -// GFX1250: v_sqrt_bf16_e32 v5, -1 ; encoding: [0xc1,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, -1 +// GFX1250: v_sqrt_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, 0.5 -// GFX1250: v_sqrt_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, 0.5 +// GFX1250: v_sqrt_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf4,0x0a,0x7e] -v_sqrt_bf16 v5, src_scc -// GFX1250: v_sqrt_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf4,0x0a,0x7e] +v_sqrt_bf16 v5.l, src_scc +// GFX1250: v_sqrt_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf4,0x0a,0x7e] -v_sqrt_bf16 v127, 0x8000 -// GFX1250: v_sqrt_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf4,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_sqrt_bf16 v127.l, 0x8000 +// GFX1250: v_sqrt_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf4,0xfe,0x7e,0x00,0x80,0x00,0x00] v_sqrt_bf16 v5.h, v1.h // GFX1250: v_sqrt_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf5,0x0a,0x7f] -v_rsq_bf16 v5, v1 -// GFX1250: v_rsq_bf16_e32 v5, v1 ; encoding: [0x01,0xf7,0x0a,0x7e] +v_rsq_bf16 v5.l, v1.l +// GFX1250: v_rsq_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf7,0x0a,0x7e] -v_rsq_bf16 v5, v127 -// GFX1250: v_rsq_bf16_e32 v5, v127 ; encoding: [0x7f,0xf7,0x0a,0x7e] +v_rsq_bf16 v5.l, v127.l +// GFX1250: v_rsq_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf7,0x0a,0x7e] -v_rsq_bf16 v5, s1 -// GFX1250: v_rsq_bf16_e32 v5, s1 ; encoding: [0x01,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, s1 +// GFX1250: v_rsq_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, s105 -// GFX1250: v_rsq_bf16_e32 v5, s105 ; encoding: [0x69,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, s105 +// GFX1250: v_rsq_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, vcc_lo -// GFX1250: v_rsq_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, vcc_lo +// GFX1250: v_rsq_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, vcc_hi -// GFX1250: v_rsq_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, vcc_hi +// GFX1250: v_rsq_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, ttmp15 -// GFX1250: v_rsq_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, ttmp15 +// GFX1250: v_rsq_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, m0 -// GFX1250: v_rsq_bf16_e32 v5, m0 ; encoding: [0x7d,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, m0 +// GFX1250: v_rsq_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, exec_lo -// GFX1250: v_rsq_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, exec_lo +// GFX1250: v_rsq_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, exec_hi -// GFX1250: v_rsq_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, exec_hi +// GFX1250: v_rsq_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, null -// GFX1250: v_rsq_bf16_e32 v5, null ; encoding: [0x7c,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, null +// GFX1250: v_rsq_bf16_e32 v5.l, null ; encoding: [0x7c,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, -1 -// GFX1250: v_rsq_bf16_e32 v5, -1 ; encoding: [0xc1,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, -1 +// GFX1250: v_rsq_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, 0.5 -// GFX1250: v_rsq_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, 0.5 +// GFX1250: v_rsq_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf6,0x0a,0x7e] -v_rsq_bf16 v5, src_scc -// GFX1250: v_rsq_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf6,0x0a,0x7e] +v_rsq_bf16 v5.l, src_scc +// GFX1250: v_rsq_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf6,0x0a,0x7e] -v_rsq_bf16 v127, 0x8000 -// GFX1250: v_rsq_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf6,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_rsq_bf16 v127.l, 0x8000 +// GFX1250: v_rsq_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf6,0xfe,0x7e,0x00,0x80,0x00,0x00] v_rsq_bf16 v5.h, v1.h // GFX1250: v_rsq_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf7,0x0a,0x7f] -v_log_bf16 v5, v1 -// GFX1250: v_log_bf16_e32 v5, v1 ; encoding: [0x01,0xf9,0x0a,0x7e] +v_log_bf16 v5.l, v1.l +// GFX1250: v_log_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xf9,0x0a,0x7e] -v_log_bf16 v5, v127 -// GFX1250: v_log_bf16_e32 v5, v127 ; encoding: [0x7f,0xf9,0x0a,0x7e] +v_log_bf16 v5.l, v127.l +// GFX1250: v_log_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xf9,0x0a,0x7e] -v_log_bf16 v5, s1 -// GFX1250: v_log_bf16_e32 v5, s1 ; encoding: [0x01,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, s1 +// GFX1250: v_log_bf16_e32 v5.l, s1 ; encoding: [0x01,0xf8,0x0a,0x7e] -v_log_bf16 v5, s105 -// GFX1250: v_log_bf16_e32 v5, s105 ; encoding: [0x69,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, s105 +// GFX1250: v_log_bf16_e32 v5.l, s105 ; encoding: [0x69,0xf8,0x0a,0x7e] -v_log_bf16 v5, vcc_lo -// GFX1250: v_log_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, vcc_lo +// GFX1250: v_log_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xf8,0x0a,0x7e] -v_log_bf16 v5, vcc_hi -// GFX1250: v_log_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, vcc_hi +// GFX1250: v_log_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xf8,0x0a,0x7e] -v_log_bf16 v5, ttmp15 -// GFX1250: v_log_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, ttmp15 +// GFX1250: v_log_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xf8,0x0a,0x7e] -v_log_bf16 v5, m0 -// GFX1250: v_log_bf16_e32 v5, m0 ; encoding: [0x7d,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, m0 +// GFX1250: v_log_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xf8,0x0a,0x7e] -v_log_bf16 v5, exec_lo -// GFX1250: v_log_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, exec_lo +// GFX1250: v_log_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xf8,0x0a,0x7e] -v_log_bf16 v5, exec_hi -// GFX1250: v_log_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, exec_hi +// GFX1250: v_log_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xf8,0x0a,0x7e] -v_log_bf16 v5, null -// GFX1250: v_log_bf16_e32 v5, null ; encoding: [0x7c,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, null +// GFX1250: v_log_bf16_e32 v5.l, null ; encoding: [0x7c,0xf8,0x0a,0x7e] -v_log_bf16 v5, -1 -// GFX1250: v_log_bf16_e32 v5, -1 ; encoding: [0xc1,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, -1 +// GFX1250: v_log_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xf8,0x0a,0x7e] -v_log_bf16 v5, 0.5 -// GFX1250: v_log_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, 0.5 +// GFX1250: v_log_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xf8,0x0a,0x7e] -v_log_bf16 v5, src_scc -// GFX1250: v_log_bf16_e32 v5, src_scc ; encoding: [0xfd,0xf8,0x0a,0x7e] +v_log_bf16 v5.l, src_scc +// GFX1250: v_log_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xf8,0x0a,0x7e] -v_log_bf16 v127, 0x8000 -// GFX1250: v_log_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xf8,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_log_bf16 v127.l, 0x8000 +// GFX1250: v_log_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xf8,0xfe,0x7e,0x00,0x80,0x00,0x00] v_log_bf16 v5.h, v1.h // GFX1250: v_log_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xf9,0x0a,0x7f] -v_exp_bf16 v5, v1 -// GFX1250: v_exp_bf16_e32 v5, v1 ; encoding: [0x01,0xfb,0x0a,0x7e] +v_exp_bf16 v5.l, v1.l +// GFX1250: v_exp_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xfb,0x0a,0x7e] -v_exp_bf16 v5, v127 -// GFX1250: v_exp_bf16_e32 v5, v127 ; encoding: [0x7f,0xfb,0x0a,0x7e] +v_exp_bf16 v5.l, v127.l +// GFX1250: v_exp_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xfb,0x0a,0x7e] -v_exp_bf16 v5, s1 -// GFX1250: v_exp_bf16_e32 v5, s1 ; encoding: [0x01,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, s1 +// GFX1250: v_exp_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfa,0x0a,0x7e] -v_exp_bf16 v5, s105 -// GFX1250: v_exp_bf16_e32 v5, s105 ; encoding: [0x69,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, s105 +// GFX1250: v_exp_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfa,0x0a,0x7e] -v_exp_bf16 v5, vcc_lo -// GFX1250: v_exp_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, vcc_lo +// GFX1250: v_exp_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfa,0x0a,0x7e] -v_exp_bf16 v5, vcc_hi -// GFX1250: v_exp_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, vcc_hi +// GFX1250: v_exp_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfa,0x0a,0x7e] -v_exp_bf16 v5, ttmp15 -// GFX1250: v_exp_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, ttmp15 +// GFX1250: v_exp_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfa,0x0a,0x7e] -v_exp_bf16 v5, m0 -// GFX1250: v_exp_bf16_e32 v5, m0 ; encoding: [0x7d,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, m0 +// GFX1250: v_exp_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfa,0x0a,0x7e] -v_exp_bf16 v5, exec_lo -// GFX1250: v_exp_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, exec_lo +// GFX1250: v_exp_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfa,0x0a,0x7e] -v_exp_bf16 v5, exec_hi -// GFX1250: v_exp_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, exec_hi +// GFX1250: v_exp_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfa,0x0a,0x7e] -v_exp_bf16 v5, null -// GFX1250: v_exp_bf16_e32 v5, null ; encoding: [0x7c,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, null +// GFX1250: v_exp_bf16_e32 v5.l, null ; encoding: [0x7c,0xfa,0x0a,0x7e] -v_exp_bf16 v5, -1 -// GFX1250: v_exp_bf16_e32 v5, -1 ; encoding: [0xc1,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, -1 +// GFX1250: v_exp_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfa,0x0a,0x7e] -v_exp_bf16 v5, 0.5 -// GFX1250: v_exp_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, 0.5 +// GFX1250: v_exp_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfa,0x0a,0x7e] -v_exp_bf16 v5, src_scc -// GFX1250: v_exp_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfa,0x0a,0x7e] +v_exp_bf16 v5.l, src_scc +// GFX1250: v_exp_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfa,0x0a,0x7e] -v_exp_bf16 v127, 0x8000 -// GFX1250: v_exp_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfa,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_exp_bf16 v127.l, 0x8000 +// GFX1250: v_exp_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfa,0xfe,0x7e,0x00,0x80,0x00,0x00] v_exp_bf16 v5.h, v1.h // GFX1250: v_exp_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xfb,0x0a,0x7f] -v_sin_bf16 v5, v1 -// GFX1250: v_sin_bf16_e32 v5, v1 ; encoding: [0x01,0xfd,0x0a,0x7e] +v_sin_bf16 v5.l, v1.l +// GFX1250: v_sin_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xfd,0x0a,0x7e] -v_sin_bf16 v5, v127 -// GFX1250: v_sin_bf16_e32 v5, v127 ; encoding: [0x7f,0xfd,0x0a,0x7e] +v_sin_bf16 v5.l, v127.l +// GFX1250: v_sin_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xfd,0x0a,0x7e] -v_sin_bf16 v5, s1 -// GFX1250: v_sin_bf16_e32 v5, s1 ; encoding: [0x01,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, s1 +// GFX1250: v_sin_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfc,0x0a,0x7e] -v_sin_bf16 v5, s105 -// GFX1250: v_sin_bf16_e32 v5, s105 ; encoding: [0x69,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, s105 +// GFX1250: v_sin_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfc,0x0a,0x7e] -v_sin_bf16 v5, vcc_lo -// GFX1250: v_sin_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, vcc_lo +// GFX1250: v_sin_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfc,0x0a,0x7e] -v_sin_bf16 v5, vcc_hi -// GFX1250: v_sin_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, vcc_hi +// GFX1250: v_sin_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfc,0x0a,0x7e] -v_sin_bf16 v5, ttmp15 -// GFX1250: v_sin_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, ttmp15 +// GFX1250: v_sin_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfc,0x0a,0x7e] -v_sin_bf16 v5, m0 -// GFX1250: v_sin_bf16_e32 v5, m0 ; encoding: [0x7d,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, m0 +// GFX1250: v_sin_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfc,0x0a,0x7e] -v_sin_bf16 v5, exec_lo -// GFX1250: v_sin_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, exec_lo +// GFX1250: v_sin_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfc,0x0a,0x7e] -v_sin_bf16 v5, exec_hi -// GFX1250: v_sin_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, exec_hi +// GFX1250: v_sin_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfc,0x0a,0x7e] -v_sin_bf16 v5, null -// GFX1250: v_sin_bf16_e32 v5, null ; encoding: [0x7c,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, null +// GFX1250: v_sin_bf16_e32 v5.l, null ; encoding: [0x7c,0xfc,0x0a,0x7e] -v_sin_bf16 v5, -1 -// GFX1250: v_sin_bf16_e32 v5, -1 ; encoding: [0xc1,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, -1 +// GFX1250: v_sin_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfc,0x0a,0x7e] -v_sin_bf16 v5, 0.5 -// GFX1250: v_sin_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, 0.5 +// GFX1250: v_sin_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfc,0x0a,0x7e] -v_sin_bf16 v5, src_scc -// GFX1250: v_sin_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfc,0x0a,0x7e] +v_sin_bf16 v5.l, src_scc +// GFX1250: v_sin_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfc,0x0a,0x7e] -v_sin_bf16 v127, 0x8000 -// GFX1250: v_sin_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfc,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_sin_bf16 v127.l, 0x8000 +// GFX1250: v_sin_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfc,0xfe,0x7e,0x00,0x80,0x00,0x00] v_sin_bf16 v5.h, v1.h // GFX1250: v_sin_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xfd,0x0a,0x7f] -v_cos_bf16 v5, v1 -// GFX1250: v_cos_bf16_e32 v5, v1 ; encoding: [0x01,0xff,0x0a,0x7e] +v_cos_bf16 v5.l, v1.l +// GFX1250: v_cos_bf16_e32 v5.l, v1.l ; encoding: [0x01,0xff,0x0a,0x7e] -v_cos_bf16 v5, v127 -// GFX1250: v_cos_bf16_e32 v5, v127 ; encoding: [0x7f,0xff,0x0a,0x7e] +v_cos_bf16 v5.l, v127.l +// GFX1250: v_cos_bf16_e32 v5.l, v127.l ; encoding: [0x7f,0xff,0x0a,0x7e] -v_cos_bf16 v5, s1 -// GFX1250: v_cos_bf16_e32 v5, s1 ; encoding: [0x01,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, s1 +// GFX1250: v_cos_bf16_e32 v5.l, s1 ; encoding: [0x01,0xfe,0x0a,0x7e] -v_cos_bf16 v5, s105 -// GFX1250: v_cos_bf16_e32 v5, s105 ; encoding: [0x69,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, s105 +// GFX1250: v_cos_bf16_e32 v5.l, s105 ; encoding: [0x69,0xfe,0x0a,0x7e] -v_cos_bf16 v5, vcc_lo -// GFX1250: v_cos_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, vcc_lo +// GFX1250: v_cos_bf16_e32 v5.l, vcc_lo ; encoding: [0x6a,0xfe,0x0a,0x7e] -v_cos_bf16 v5, vcc_hi -// GFX1250: v_cos_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, vcc_hi +// GFX1250: v_cos_bf16_e32 v5.l, vcc_hi ; encoding: [0x6b,0xfe,0x0a,0x7e] -v_cos_bf16 v5, ttmp15 -// GFX1250: v_cos_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, ttmp15 +// GFX1250: v_cos_bf16_e32 v5.l, ttmp15 ; encoding: [0x7b,0xfe,0x0a,0x7e] -v_cos_bf16 v5, m0 -// GFX1250: v_cos_bf16_e32 v5, m0 ; encoding: [0x7d,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, m0 +// GFX1250: v_cos_bf16_e32 v5.l, m0 ; encoding: [0x7d,0xfe,0x0a,0x7e] -v_cos_bf16 v5, exec_lo -// GFX1250: v_cos_bf16_e32 v5, exec_lo ; encoding: [0x7e,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, exec_lo +// GFX1250: v_cos_bf16_e32 v5.l, exec_lo ; encoding: [0x7e,0xfe,0x0a,0x7e] -v_cos_bf16 v5, exec_hi -// GFX1250: v_cos_bf16_e32 v5, exec_hi ; encoding: [0x7f,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, exec_hi +// GFX1250: v_cos_bf16_e32 v5.l, exec_hi ; encoding: [0x7f,0xfe,0x0a,0x7e] -v_cos_bf16 v5, null -// GFX1250: v_cos_bf16_e32 v5, null ; encoding: [0x7c,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, null +// GFX1250: v_cos_bf16_e32 v5.l, null ; encoding: [0x7c,0xfe,0x0a,0x7e] -v_cos_bf16 v5, -1 -// GFX1250: v_cos_bf16_e32 v5, -1 ; encoding: [0xc1,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, -1 +// GFX1250: v_cos_bf16_e32 v5.l, -1 ; encoding: [0xc1,0xfe,0x0a,0x7e] -v_cos_bf16 v5, 0.5 -// GFX1250: v_cos_bf16_e32 v5, 0.5 ; encoding: [0xf0,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, 0.5 +// GFX1250: v_cos_bf16_e32 v5.l, 0.5 ; encoding: [0xf0,0xfe,0x0a,0x7e] -v_cos_bf16 v5, src_scc -// GFX1250: v_cos_bf16_e32 v5, src_scc ; encoding: [0xfd,0xfe,0x0a,0x7e] +v_cos_bf16 v5.l, src_scc +// GFX1250: v_cos_bf16_e32 v5.l, src_scc ; encoding: [0xfd,0xfe,0x0a,0x7e] -v_cos_bf16 v127, 0x8000 -// GFX1250: v_cos_bf16_e32 v127, 0x8000 ; encoding: [0xff,0xfe,0xfe,0x7e,0x00,0x80,0x00,0x00] +v_cos_bf16 v127.l, 0x8000 +// GFX1250: v_cos_bf16_e32 v127.l, 0x8000 ; encoding: [0xff,0xfe,0xfe,0x7e,0x00,0x80,0x00,0x00] v_cos_bf16 v5.h, v1.h // GFX1250: v_cos_bf16_e32 v5.h, v1.h ; encoding: [0x81,0xff,0x0a,0x7f] -v_cvt_f32_bf16 v5, v1 -// GFX1250: v_cvt_f32_bf16_e32 v5, v1 ; encoding: [0x01,0xe5,0x0a,0x7e] +v_cvt_f32_bf16 v5, v1.l +// GFX1250: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e] -v_cvt_f32_bf16 v5, v127 -// GFX1250: v_cvt_f32_bf16_e32 v5, v127 ; encoding: [0x7f,0xe5,0x0a,0x7e] +v_cvt_f32_bf16 v5, v127.l +// GFX1250: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e] v_cvt_f32_bf16 v5, s1 // GFX1250: v_cvt_f32_bf16_e32 v5, s1 ; encoding: [0x01,0xe4,0x0a,0x7e] @@ -676,11 +676,11 @@ v_cvt_pk_f32_bf8_e32 v[2:3], 3 v_cvt_pk_f32_bf8_e32 v[4:5], 3 // GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], 3 ; encoding: [0x83,0xde,0x08,0x7e] -v_cvt_pk_f32_bf8_e32 v[2:3], v3 -// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], v3 ; encoding: [0x03,0xdf,0x04,0x7e] +v_cvt_pk_f32_bf8_e32 v[2:3], v3.l +// GFX1250: v_cvt_pk_f32_bf8_e32 v[2:3], v3.l ; encoding: [0x03,0xdf,0x04,0x7e] -v_cvt_pk_f32_bf8_e32 v[4:5], v3 -// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v3 ; encoding: [0x03,0xdf,0x08,0x7e] +v_cvt_pk_f32_bf8_e32 v[4:5], v3.l +// GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v3.l ; encoding: [0x03,0xdf,0x08,0x7e] v_cvt_pk_f32_bf8_e32 v[4:5], v127.h // GFX1250: v_cvt_pk_f32_bf8_e32 v[4:5], v127.h ; encoding: [0xff,0xdf,0x08,0x7e] @@ -703,32 +703,32 @@ v_cvt_pk_f32_fp8_e32 v[4:5], v127.h v_cvt_pk_f32_fp8_e32 v[4:5], v127.l // GFX1250: v_cvt_pk_f32_fp8_e32 v[4:5], v127.l ; encoding: [0x7f,0xdd,0x08,0x7e] -v_sat_pk4_i4_i8 v1, v2 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, v2 ; encoding: [0x02,0xe7,0x02,0x7e] +v_sat_pk4_i4_i8 v1.l, v2 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, v2 ; encoding: [0x02,0xe7,0x02,0x7e] -v_sat_pk4_i4_i8 v1, s2 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, s2 ; encoding: [0x02,0xe6,0x02,0x7e] +v_sat_pk4_i4_i8 v1.l, s2 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, s2 ; encoding: [0x02,0xe6,0x02,0x7e] -v_sat_pk4_i4_i8 v1, 2 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, 2 ; encoding: [0x82,0xe6,0x02,0x7e] +v_sat_pk4_i4_i8 v1.l, 2 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, 2 ; encoding: [0x82,0xe6,0x02,0x7e] -v_sat_pk4_i4_i8 v1, 0x1234 -// GFX1250: v_sat_pk4_i4_i8_e32 v1, 0x1234 ; encoding: [0xff,0xe6,0x02,0x7e,0x34,0x12,0x00,0x00] +v_sat_pk4_i4_i8 v1.l, 0x1234 +// GFX1250: v_sat_pk4_i4_i8_e32 v1.l, 0x1234 ; encoding: [0xff,0xe6,0x02,0x7e,0x34,0x12,0x00,0x00] v_sat_pk4_i4_i8 v1.h, v2 // GFX1250: v_sat_pk4_i4_i8_e32 v1.h, v2 ; encoding: [0x02,0xe7,0x02,0x7f] -v_sat_pk4_u4_u8 v1, v2 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, v2 ; encoding: [0x02,0xe9,0x02,0x7e] +v_sat_pk4_u4_u8 v1.l, v2 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, v2 ; encoding: [0x02,0xe9,0x02,0x7e] -v_sat_pk4_u4_u8 v1, s2 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, s2 ; encoding: [0x02,0xe8,0x02,0x7e] +v_sat_pk4_u4_u8 v1.l, s2 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, s2 ; encoding: [0x02,0xe8,0x02,0x7e] -v_sat_pk4_u4_u8 v1, 2 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, 2 ; encoding: [0x82,0xe8,0x02,0x7e] +v_sat_pk4_u4_u8 v1.l, 2 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, 2 ; encoding: [0x82,0xe8,0x02,0x7e] -v_sat_pk4_u4_u8 v1, 0x1234 -// GFX1250: v_sat_pk4_u4_u8_e32 v1, 0x1234 ; encoding: [0xff,0xe8,0x02,0x7e,0x34,0x12,0x00,0x00] +v_sat_pk4_u4_u8 v1.l, 0x1234 +// GFX1250: v_sat_pk4_u4_u8_e32 v1.l, 0x1234 ; encoding: [0xff,0xe8,0x02,0x7e,0x34,0x12,0x00,0x00] v_sat_pk4_u4_u8 v1.h, v2 // GFX1250: v_sat_pk4_u4_u8_e32 v1.h, v2 ; encoding: [0x02,0xe9,0x02,0x7f] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s index 0a46f2f074e10..592619f41b7b5 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp16.s @@ -58,120 +58,120 @@ v_tanh_f32 v255, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi // GFX1250: v_tanh_f32_dpp v255, -|v255| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3c,0xfe,0x7f,0xff,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_f16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_tanh_f16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_f16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_f16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_tanh_f16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_f16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_mirror -// GFX1250: v_tanh_f16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_mirror +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_half_mirror -// GFX1250: v_tanh_f16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shl:1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shl:15 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shr:1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_shr:15 -// GFX1250: v_tanh_f16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_ror:1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_ror:15 -// GFX1250: v_tanh_f16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_f16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_tanh_f16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_f16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_tanh_f16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_f16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_tanh_f16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x3e,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_f16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3e,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_tanh_f16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_f16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x3e,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_tanh_f16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x3e,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_tanh_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_tanh_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_mirror -// GFX1250: v_tanh_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_mirror +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_half_mirror -// GFX1250: v_tanh_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shl:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shl:15 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shr:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_shr:15 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_ror:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_ror:15 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_tanh_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_tanh_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_tanh_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x94,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x94,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_tanh_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0x94,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5.h, v1.h quad_perm:[3,2,1,0] @@ -230,480 +230,480 @@ v_prng_b32 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 // GFX1250: v_prng_b32_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0x96,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rcp_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_rcp_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rcp_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_rcp_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_mirror -// GFX1250: v_rcp_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_mirror +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_half_mirror -// GFX1250: v_rcp_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shl:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shl:15 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shr:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_shr:15 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_ror:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_ror:15 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rcp_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_rcp_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_rcp_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rcp_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_rcp_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf2,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rcp_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf2,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_rcp_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rcp_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf2,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_rcp_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf2,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sqrt_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_sqrt_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sqrt_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_sqrt_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_mirror -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_mirror +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_half_mirror -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shl:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shl:15 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shr:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_shr:15 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_ror:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_ror:15 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_sqrt_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_sqrt_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sqrt_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_sqrt_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf4,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sqrt_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_sqrt_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sqrt_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_sqrt_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf4,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rsq_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_rsq_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rsq_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_rsq_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_mirror -// GFX1250: v_rsq_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_mirror +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_half_mirror -// GFX1250: v_rsq_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shl:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shl:15 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shr:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_shr:15 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_ror:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_ror:15 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rsq_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_rsq_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_rsq_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rsq_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_rsq_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf6,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rsq_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf6,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_rsq_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rsq_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf6,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_rsq_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf6,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_log_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_log_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_log_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_log_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_log_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_log_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_mirror -// GFX1250: v_log_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_log_bf16 v5.l, v1.l row_mirror +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_half_mirror -// GFX1250: v_log_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_log_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shl:1 -// GFX1250: v_log_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shl:15 -// GFX1250: v_log_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shr:1 -// GFX1250: v_log_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_shr:15 -// GFX1250: v_log_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_log_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_ror:1 -// GFX1250: v_log_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_log_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_ror:15 -// GFX1250: v_log_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_log_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_log_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_log_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_log_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_log_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_log_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_log_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_log_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xf8,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_log_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf8,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_log_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_log_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xf8,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_log_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf8,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_exp_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_exp_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_exp_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_exp_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_exp_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_exp_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_mirror -// GFX1250: v_exp_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_mirror +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_half_mirror -// GFX1250: v_exp_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shl:1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shl:15 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shr:1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_shr:15 -// GFX1250: v_exp_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_ror:1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_ror:15 -// GFX1250: v_exp_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_exp_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_exp_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_exp_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_exp_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_exp_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_exp_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfa,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_exp_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfa,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_exp_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_exp_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfa,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_exp_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfa,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sin_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_sin_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sin_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sin_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_sin_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sin_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_mirror -// GFX1250: v_sin_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_mirror +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_half_mirror -// GFX1250: v_sin_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shl:1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shl:15 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shr:1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_shr:15 -// GFX1250: v_sin_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_ror:1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_ror:15 -// GFX1250: v_sin_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sin_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_sin_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sin_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_sin_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sin_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_sin_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfc,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sin_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfc,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_sin_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sin_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfc,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_sin_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfc,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cos_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_cos_bf16 v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cos_bf16_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cos_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_cos_bf16 v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cos_bf16_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_mirror -// GFX1250: v_cos_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_mirror +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_half_mirror -// GFX1250: v_cos_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_half_mirror +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shl:1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shl:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shl:15 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shl:15 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shr:1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shr:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_shr:15 -// GFX1250: v_cos_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_shr:15 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_ror:1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_ror:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_ror:15 -// GFX1250: v_cos_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_ror:15 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cos_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_cos_bf16 v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_cos_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_cos_bf16 v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_cos_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_cos_bf16 v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xfe,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_cos_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfe,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_cos_bf16 v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_cos_bf16_dpp v127.l, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xfe,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16 v5.h, v1.h quad_perm:[3,2,1,0] // GFX1250: v_cos_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7f,0x81,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +v_cvt_f32_bf16 v5, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +v_cvt_f32_bf16 v5, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_mirror -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_mirror +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_half_mirror -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_half_mirror +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shl:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shl:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shl:15 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shl:15 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shr:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shr:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_shr:15 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_shr:15 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_ror:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_ror:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_ror:15 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_ror:15 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] +v_cvt_f32_bf16 v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +v_cvt_f32_bf16 v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] +v_cvt_f32_bf16 v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_cvt_f32_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +v_cvt_f32_bf16 v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16 v5, v1.h quad_perm:[3,2,1,0] @@ -750,24 +750,24 @@ v_cvt_pk_f16_fp8 v1, v2.h quad_perm:[0,1,2,3] // GFX1250: v_cvt_pk_f16_fp8_dpp v1, v2.h quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xea,0x02,0x7e,0x82,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x00,0xff] +v_sat_pk4_i4_i8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x04,0xff] +v_sat_pk4_i4_i8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe6,0x02,0x7e,0x02,0x39,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf // GFX1250: v_sat_pk4_i4_i8_dpp v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe6,0x02,0x7f,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x00,0xff] +v_sat_pk4_u4_u8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x04,0xff] +v_sat_pk4_u4_u8 v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xe8,0x02,0x7e,0x02,0x39,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v1.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s index 359aadc49ccc4..2aabe39383d12 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_dpp8.s @@ -14,32 +14,32 @@ v_tanh_f32 v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX1250: v_tanh_f32_dpp v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3c,0xfe,0x7f,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_f16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_f16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_f16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_f16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_f16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_f16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x3e,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_f16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3e,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_tanh_f16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_f16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x3e,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_tanh_f16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x3e,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_tanh_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0x94,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x94,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_tanh_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x94,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] @@ -58,152 +58,152 @@ v_prng_b32 v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX1250: v_prng_b32_dpp v255, v255 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0x96,0xfe,0x7f,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rcp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rcp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rcp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rcp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rcp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf2,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rcp_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf2,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_rcp_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rcp_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf2,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rcp_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf2,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sqrt_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sqrt_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sqrt_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sqrt_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sqrt_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sqrt_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_sqrt_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sqrt_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf4,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sqrt_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf4,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rsq_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rsq_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rsq_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_rsq_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rsq_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf6,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rsq_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf6,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_rsq_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rsq_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf6,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rsq_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf6,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_log_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_log_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_log_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_log_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_log_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_log_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf8,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_log_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf8,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_log_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_log_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xf8,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_log_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf8,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_exp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_exp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_exp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_exp_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_exp_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_exp_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfa,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_exp_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfa,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_exp_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_exp_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfa,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_exp_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfa,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sin_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sin_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sin_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sin_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_sin_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sin_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfc,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sin_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfc,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_sin_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sin_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfc,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sin_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfc,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cos_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cos_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cos_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cos_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cos_bf16 v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cos_bf16_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xfe,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_cos_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfe,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_cos_bf16 v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_cos_bf16_dpp v127.l, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xfe,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16 v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cos_bf16_dpp v5.h, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xfe,0x0a,0x7f,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cvt_f32_bf16 v5, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +v_cvt_f32_bf16 v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16 v127, v127 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_cvt_f32_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +v_cvt_f32_bf16 v127, v127.l dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16 v5, v1.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_bf8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f16_bf8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_bf8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_bf8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cvt_f16_bf8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_bf8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f16_bf8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_f16_bf8_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7f,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_fp8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f16_fp8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_fp8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f16_fp8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f16_fp8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cvt_f16_fp8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] +v_cvt_f16_fp8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cvt_f16_fp8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xee,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f16_fp8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] @@ -226,24 +226,24 @@ v_cvt_pk_f16_fp8 v1, v2.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_pk_f16_fp8_dpp v1, v2.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xea,0x02,0x7e,0x82,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_i4_i8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_i4_i8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe6,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sat_pk4_i4_i8_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe6,0x02,0x7f,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_u4_u8_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_u4_u8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe8,0x02,0x7e,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s index b4d4e365d0453..98d07ac1ece27 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s @@ -52,32 +52,32 @@ v_bitop3_b32 v255, 0xaf123456, vcc_hi, null bitop3:103 v_bitop3_b16 v5.l, v1.l, v2.l, s3 // GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x33,0xd6,0x01,0x05,0x0e,0x00] -v_bitop3_b16 v5, v1, v2, s3 bitop3:161 -// GFX1250: v_bitop3_b16 v5, v1, v2, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30] +v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:161 +// GFX1250: v_bitop3_b16 v5.l, v1.l, v2.l, s3 bitop3:0xa1 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0x05,0x0e,0x30] -v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 -// GFX1250: v_bitop3_b16 v5, v255, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1] +v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27 +// GFX1250: v_bitop3_b16 v5.l, v255.l, s2, s105 bitop3:0x27 ; encoding: [0x05,0x04,0x33,0xd6,0xff,0x05,0xa4,0xe1] -v_bitop3_b16 v5, s1, v255, exec_hi bitop3:100 -// GFX1250: v_bitop3_b16 v5, s1, v255, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89] +v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:100 +// GFX1250: v_bitop3_b16 v5.l, s1, v255.l, exec_hi bitop3:0x64 ; encoding: [0x05,0x04,0x33,0xd6,0x01,0xfe,0xff,0x89] -v_bitop3_b16 v5, s105, s105, exec_lo bitop3:0 -// GFX1250: v_bitop3_b16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01] +v_bitop3_b16 v5.l, s105, s105, exec_lo bitop3:0 +// GFX1250: v_bitop3_b16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x33,0xd6,0x69,0xd2,0xf8,0x01] -v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 -// GFX1250: v_bitop3_b16 v5, vcc_lo, ttmp15, v3 bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4] +v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15 +// GFX1250: v_bitop3_b16 v5.l, vcc_lo, ttmp15, v3.l bitop3:0x15 ; encoding: [0x05,0x02,0x33,0xd6,0x6a,0xf6,0x0c,0xa4] -v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:63 -// GFX1250: v_bitop3_b16 v5, vcc_hi, 0xfe0b, v255 bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00] +v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:63 +// GFX1250: v_bitop3_b16 v5.l, vcc_hi, 0xfe0b, v255.l bitop3:0x3f ; encoding: [0x05,0x07,0x33,0xd6,0x6b,0xfe,0xfd,0xe7,0x0b,0xfe,0x00,0x00] -v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 -// GFX1250: v_bitop3_b16 v5, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81] +v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24 +// GFX1250: v_bitop3_b16 v5.l, ttmp15, src_scc, ttmp15 bitop3:0x24 ; encoding: [0x05,0x04,0x33,0xd6,0x7b,0xfa,0xed,0x81] -v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 -// GFX1250: v_bitop3_b16 v5, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1] +v_bitop3_b16 v5.l, m0, 0.5, m0 bitop3:5 +// GFX1250: v_bitop3_b16 v5.l, m0, 0.5, m0 bitop3:5 ; encoding: [0x05,0x00,0x33,0xd6,0x7d,0xe0,0xf5,0xa1] -v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 -// GFX1250: v_bitop3_b16 v5, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1] +v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6 +// GFX1250: v_bitop3_b16 v5.l, exec_lo, -1, vcc_hi bitop3:6 ; encoding: [0x05,0x00,0x33,0xd6,0x7e,0x82,0xad,0xc1] v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] // GFX1250: v_bitop3_b16 v5.h, exec_hi, null, vcc_lo op_sel:[1,1,1,1] ; encoding: [0x05,0x78,0x33,0xd6,0x7f,0xf8,0xa8,0x01] @@ -563,17 +563,17 @@ v_cvt_sr_bf8_f16 v1, v2.l, v3 v_cvt_sr_bf8_f16 v1, v2.h, v3 // GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00] -v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0 -// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00] +v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:0 +// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00] -v_cvt_sr_bf8_f16 v1, v2, s3 -// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00] +v_cvt_sr_bf8_f16 v1, v2.l, s3 +// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00] -v_cvt_sr_bf8_f16 v1, v2, 0x1234 -// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] +v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 +// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] -v_cvt_sr_bf8_f16 v1, -v2, v3 -// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20] +v_cvt_sr_bf8_f16 v1, -v2.l, v3 +// GFX1250: v_cvt_sr_bf8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20] v_cvt_sr_bf8_f16 v1, |v2.l|, v3 // GFX1250: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00] @@ -605,14 +605,14 @@ v_cvt_sr_fp8_f16 v1, v2.l, v3 v_cvt_sr_fp8_f16 v1, v2.h, v3 // GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00] -v_cvt_sr_fp8_f16 v1, v2, s3 -// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00] +v_cvt_sr_fp8_f16 v1, v2.l, s3 +// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00] -v_cvt_sr_fp8_f16 v1, v2, 0x1234 -// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] +v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 +// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00] -v_cvt_sr_fp8_f16 v1, -v2, v3 -// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20] +v_cvt_sr_fp8_f16 v1, -v2.l, v3 +// GFX1250: v_cvt_sr_fp8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20] v_cvt_sr_fp8_f16 v1, |v2.l|, v3 // GFX1250: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00] @@ -644,11 +644,11 @@ v_cvt_pk_fp8_f32 v1.l, v2, v3 v_cvt_pk_fp8_f32 v1.h, v2, v3 // GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00] -v_cvt_pk_fp8_f32 v1, -v2, |v3| -// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20] +v_cvt_pk_fp8_f32 v1.l, -v2, |v3| +// GFX1250: v_cvt_pk_fp8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20] -v_cvt_pk_fp8_f32 v1, s2, 3 -// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00] +v_cvt_pk_fp8_f32 v1.l, s2, 3 +// GFX1250: v_cvt_pk_fp8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00] v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp // GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00] @@ -656,14 +656,14 @@ v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp v_cvt_pk_fp8_f32 v1.h, v2, v3 clamp // GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00] -v_cvt_pk_bf8_f32 v1, v2, v3 -// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00] +v_cvt_pk_bf8_f32 v1.l, v2, v3 +// GFX1250: v_cvt_pk_bf8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00] -v_cvt_pk_bf8_f32 v1, -v2, |v3| -// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20] +v_cvt_pk_bf8_f32 v1.l, -v2, |v3| +// GFX1250: v_cvt_pk_bf8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20] -v_cvt_pk_bf8_f32 v1, s2, 3 -// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00] +v_cvt_pk_bf8_f32 v1.l, s2, 3 +// GFX1250: v_cvt_pk_bf8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00] v_cvt_sr_fp8_f32 v1, v2, v3 // GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s index f766e52b39f2c..fc0ea8b2d927f 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s @@ -62,60 +62,60 @@ v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] // GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:161 quad_perm:[0,1,2,3] -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:161 quad_perm:[0,1,2,3] +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0xa1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x34,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x27 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0xe4,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:100 row_half_mirror -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:100 row_half_mirror +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v3.l bitop3:0x64 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x0e,0x8c,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, v255 bitop3:0 row_shl:1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v255 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l bitop3:0 row_shl:1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, v255.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, s105 bitop3:0x16 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x33,0xd6,0xfa,0x04,0xa6,0xc1,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:63 row_shr:1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:63 row_shr:1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_hi bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x33,0xd6,0xfa,0x04,0xae,0xe1,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, vcc_lo bitop3:0x24 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0xaa,0x81,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, ttmp15 bitop3:5 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xee,0xa1,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_hi bitop3:6 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfe,0xc1,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x33,0xd6,0xfa,0x04,0xfa,0x01,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:77 row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, exec_lo bitop3:0x4d row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x33,0xd6,0xfa,0x04,0xfa,0xa9,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:88 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, null bitop3:0x58 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x03,0x33,0xd6,0xfa,0x04,0xf2,0x09,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13] +v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:99 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_bitop3_b16_e64_dpp v5.l, v1.l, v2.l, -1 bitop3:0x63 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x04,0x33,0xd6,0xfa,0x04,0x06,0x6b,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_bitop3_b16_e64_dpp v255, v255, v255, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30] +v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:101 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_bitop3_b16_e64_dpp v255.l, v255.l, v255.l, src_scc bitop3:0x65 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x04,0x33,0xd6,0xfa,0xfe,0xf7,0xab,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, exec_hi op_sel:[1,1,1,1] row_ror:15 row_mask:0xf bank_mask:0xf @@ -470,12 +470,12 @@ v_cvt_sr_bf8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1 // GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] @@ -494,12 +494,12 @@ v_cvt_sr_fp8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1 // GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] +v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] +// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s index cce8e1ef24f5f..4f7df62659f68 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s @@ -1,4 +1,5 @@ // RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefixes=GFX125X-ERR,GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s +// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1251 -show-encoding %s 2>&1 | FileCheck --check-prefixes=GFX125X-ERR,GFX1251-ERR --implicit-check-not=error: --strict-whitespace %s v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0] // GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand. diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s index 8e73ecb4232e0..5ac9eb47381d6 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s @@ -46,50 +46,50 @@ v_bfrev_b32_e64 v5, src_scc v_bfrev_b32_e64 v255, 0xaf123456 // GFX1250: v_bfrev_b32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xb8,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_ceil_f16_e64 v5, v1 -// GFX1250: v_ceil_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x01,0x00,0x00] +v_ceil_f16_e64 v5.l, v1.l +// GFX1250: v_ceil_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x01,0x00,0x00] -v_ceil_f16_e64 v5, v255 -// GFX1250: v_ceil_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdc,0xd5,0xff,0x01,0x00,0x00] +v_ceil_f16_e64 v5.l, v255.l +// GFX1250: v_ceil_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdc,0xd5,0xff,0x01,0x00,0x00] -v_ceil_f16_e64 v5, s1 -// GFX1250: v_ceil_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, s1 +// GFX1250: v_ceil_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdc,0xd5,0x01,0x00,0x00,0x00] -v_ceil_f16_e64 v5, s105 -// GFX1250: v_ceil_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdc,0xd5,0x69,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, s105 +// GFX1250: v_ceil_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdc,0xd5,0x69,0x00,0x00,0x00] -v_ceil_f16_e64 v5, vcc_lo -// GFX1250: v_ceil_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x6a,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, vcc_lo +// GFX1250: v_ceil_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x6a,0x00,0x00,0x00] -v_ceil_f16_e64 v5, vcc_hi -// GFX1250: v_ceil_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x6b,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, vcc_hi +// GFX1250: v_ceil_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x6b,0x00,0x00,0x00] -v_ceil_f16_e64 v5, ttmp15 -// GFX1250: v_ceil_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdc,0xd5,0x7b,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, ttmp15 +// GFX1250: v_ceil_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdc,0xd5,0x7b,0x00,0x00,0x00] -v_ceil_f16_e64 v5, m0 -// GFX1250: v_ceil_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdc,0xd5,0x7d,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, m0 +// GFX1250: v_ceil_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdc,0xd5,0x7d,0x00,0x00,0x00] -v_ceil_f16_e64 v5, exec_lo -// GFX1250: v_ceil_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x7e,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, exec_lo +// GFX1250: v_ceil_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdc,0xd5,0x7e,0x00,0x00,0x00] -v_ceil_f16_e64 v5, exec_hi -// GFX1250: v_ceil_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x7f,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, exec_hi +// GFX1250: v_ceil_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdc,0xd5,0x7f,0x00,0x00,0x00] -v_ceil_f16_e64 v5, null -// GFX1250: v_ceil_f16_e64 v5, null ; encoding: [0x05,0x00,0xdc,0xd5,0x7c,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, null +// GFX1250: v_ceil_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdc,0xd5,0x7c,0x00,0x00,0x00] -v_ceil_f16_e64 v5, -1 -// GFX1250: v_ceil_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdc,0xd5,0xc1,0x00,0x00,0x00] +v_ceil_f16_e64 v5.l, -1 +// GFX1250: v_ceil_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdc,0xd5,0xc1,0x00,0x00,0x00] -v_ceil_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_ceil_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdc,0xd5,0xf0,0x00,0x00,0x08] +v_ceil_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_ceil_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdc,0xd5,0xf0,0x00,0x00,0x08] -v_ceil_f16_e64 v5, src_scc mul:4 -// GFX1250: v_ceil_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdc,0xd5,0xfd,0x00,0x00,0x10] +v_ceil_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_ceil_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdc,0xd5,0xfd,0x00,0x00,0x10] -v_ceil_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_ceil_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdc,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_ceil_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_ceil_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdc,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_ceil_f16 v5.l, v128.l // GFX1250: v_ceil_f16_e64 v5.l, v128.l ; encoding: [0x05,0x00,0xdc,0xd5,0x80,0x01,0x00,0x00] @@ -268,50 +268,50 @@ v_clz_i32_u32_e64 v5, src_scc v_clz_i32_u32_e64 v255, 0xaf123456 // GFX1250: v_clz_i32_u32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xb9,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cos_f16_e64 v5, v1 -// GFX1250: v_cos_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x01,0x00,0x00] +v_cos_f16_e64 v5.l, v1.l +// GFX1250: v_cos_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x01,0x00,0x00] -v_cos_f16_e64 v5, v255 -// GFX1250: v_cos_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe1,0xd5,0xff,0x01,0x00,0x00] +v_cos_f16_e64 v5.l, v255.l +// GFX1250: v_cos_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe1,0xd5,0xff,0x01,0x00,0x00] -v_cos_f16_e64 v5, s1 -// GFX1250: v_cos_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, s1 +// GFX1250: v_cos_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe1,0xd5,0x01,0x00,0x00,0x00] -v_cos_f16_e64 v5, s105 -// GFX1250: v_cos_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe1,0xd5,0x69,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, s105 +// GFX1250: v_cos_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe1,0xd5,0x69,0x00,0x00,0x00] -v_cos_f16_e64 v5, vcc_lo -// GFX1250: v_cos_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x6a,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, vcc_lo +// GFX1250: v_cos_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x6a,0x00,0x00,0x00] -v_cos_f16_e64 v5, vcc_hi -// GFX1250: v_cos_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x6b,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, vcc_hi +// GFX1250: v_cos_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x6b,0x00,0x00,0x00] -v_cos_f16_e64 v5, ttmp15 -// GFX1250: v_cos_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe1,0xd5,0x7b,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, ttmp15 +// GFX1250: v_cos_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe1,0xd5,0x7b,0x00,0x00,0x00] -v_cos_f16_e64 v5, m0 -// GFX1250: v_cos_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe1,0xd5,0x7d,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, m0 +// GFX1250: v_cos_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe1,0xd5,0x7d,0x00,0x00,0x00] -v_cos_f16_e64 v5, exec_lo -// GFX1250: v_cos_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x7e,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, exec_lo +// GFX1250: v_cos_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe1,0xd5,0x7e,0x00,0x00,0x00] -v_cos_f16_e64 v5, exec_hi -// GFX1250: v_cos_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x7f,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, exec_hi +// GFX1250: v_cos_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe1,0xd5,0x7f,0x00,0x00,0x00] -v_cos_f16_e64 v5, null -// GFX1250: v_cos_f16_e64 v5, null ; encoding: [0x05,0x00,0xe1,0xd5,0x7c,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, null +// GFX1250: v_cos_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe1,0xd5,0x7c,0x00,0x00,0x00] -v_cos_f16_e64 v5, -1 -// GFX1250: v_cos_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe1,0xd5,0xc1,0x00,0x00,0x00] +v_cos_f16_e64 v5.l, -1 +// GFX1250: v_cos_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe1,0xd5,0xc1,0x00,0x00,0x00] -v_cos_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_cos_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xe1,0xd5,0xf0,0x00,0x00,0x08] +v_cos_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cos_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xe1,0xd5,0xf0,0x00,0x00,0x08] -v_cos_f16_e64 v5, src_scc mul:4 -// GFX1250: v_cos_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xe1,0xd5,0xfd,0x00,0x00,0x10] +v_cos_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cos_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xe1,0xd5,0xfd,0x00,0x00,0x10] -v_cos_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_cos_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe1,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_cos_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_cos_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe1,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_cos_f16 v5.l, v128.l // GFX1250: v_cos_f16_e64 v5.l, v128.l ; encoding: [0x05,0x00,0xe1,0xd5,0x80,0x01,0x00,0x00] @@ -502,11 +502,11 @@ v_cvt_pk_f32_bf8_e64 v[2:3], 3 v_cvt_pk_f32_bf8_e64 v[2:3], 3 op_sel:[1,0] // GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], 3 op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x83,0x00,0x00,0x00] -v_cvt_pk_f32_bf8_e64 v[2:3], v3 -// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3 ; encoding: [0x02,0x00,0xef,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_bf8_e64 v[2:3], v3.l +// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3.l ; encoding: [0x02,0x00,0xef,0xd5,0x03,0x01,0x00,0x00] -v_cvt_pk_f32_bf8_e64 v[2:3], v3 op_sel:[1,0] -// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3 op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_bf8_e64 v[2:3], v3.h op_sel:[1,0] +// GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v3.h op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x03,0x01,0x00,0x00] v_cvt_pk_f32_bf8 v[2:3], v128.h // GFX1250: v_cvt_pk_f32_bf8_e64 v[2:3], v128.h op_sel:[1,0] ; encoding: [0x02,0x08,0xef,0xd5,0x80,0x01,0x00,0x00] @@ -526,11 +526,11 @@ v_cvt_pk_f32_fp8_e64 v[2:3], 3 v_cvt_pk_f32_fp8_e64 v[2:3], 3 op_sel:[1,0] // GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], 3 op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x83,0x00,0x00,0x00] -v_cvt_pk_f32_fp8_e64 v[2:3], v3 -// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3 ; encoding: [0x02,0x00,0xee,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_fp8_e64 v[2:3], v3.l +// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3.l ; encoding: [0x02,0x00,0xee,0xd5,0x03,0x01,0x00,0x00] -v_cvt_pk_f32_fp8_e64 v[2:3], v3 op_sel:[1,0] -// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3 op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x03,0x01,0x00,0x00] +v_cvt_pk_f32_fp8_e64 v[2:3], v3.h op_sel:[1,0] +// GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v3.h op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x03,0x01,0x00,0x00] v_cvt_pk_f32_fp8 v[2:3], v128.h // GFX1250: v_cvt_pk_f32_fp8_e64 v[2:3], v128.h op_sel:[1,0] ; encoding: [0x02,0x08,0xee,0xd5,0x80,0x01,0x00,0x00] @@ -568,50 +568,50 @@ v_cvt_pk_f32_fp8_e64 v[4:5], v3 v_cvt_pk_f32_fp8_e64 v[4:5], v3 op_sel:[1,0] // GFX1250: v_cvt_pk_f32_fp8_e64 v[4:5], v3 op_sel:[1,0] ; encoding: [0x04,0x08,0xee,0xd5,0x03,0x01,0x00,0x00] -v_cvt_f16_f32_e64 v5, v1 -// GFX1250: v_cvt_f16_f32_e64 v5, v1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, v1 +// GFX1250: v_cvt_f16_f32_e64 v5.l, v1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f16_f32_e64 v5, v255 -// GFX1250: v_cvt_f16_f32_e64 v5, v255 ; encoding: [0x05,0x00,0x8a,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, v255 +// GFX1250: v_cvt_f16_f32_e64 v5.l, v255 ; encoding: [0x05,0x00,0x8a,0xd5,0xff,0x01,0x00,0x00] -v_cvt_f16_f32_e64 v5, s1 -// GFX1250: v_cvt_f16_f32_e64 v5, s1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, s1 +// GFX1250: v_cvt_f16_f32_e64 v5.l, s1 ; encoding: [0x05,0x00,0x8a,0xd5,0x01,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, s105 -// GFX1250: v_cvt_f16_f32_e64 v5, s105 ; encoding: [0x05,0x00,0x8a,0xd5,0x69,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, s105 +// GFX1250: v_cvt_f16_f32_e64 v5.l, s105 ; encoding: [0x05,0x00,0x8a,0xd5,0x69,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, vcc_lo -// GFX1250: v_cvt_f16_f32_e64 v5, vcc_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, vcc_lo +// GFX1250: v_cvt_f16_f32_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, vcc_hi -// GFX1250: v_cvt_f16_f32_e64 v5, vcc_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, vcc_hi +// GFX1250: v_cvt_f16_f32_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, ttmp15 -// GFX1250: v_cvt_f16_f32_e64 v5, ttmp15 ; encoding: [0x05,0x00,0x8a,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, ttmp15 +// GFX1250: v_cvt_f16_f32_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0x8a,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, m0 -// GFX1250: v_cvt_f16_f32_e64 v5, m0 ; encoding: [0x05,0x00,0x8a,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, m0 +// GFX1250: v_cvt_f16_f32_e64 v5.l, m0 ; encoding: [0x05,0x00,0x8a,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, exec_lo -// GFX1250: v_cvt_f16_f32_e64 v5, exec_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, exec_lo +// GFX1250: v_cvt_f16_f32_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0x8a,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, exec_hi -// GFX1250: v_cvt_f16_f32_e64 v5, exec_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, exec_hi +// GFX1250: v_cvt_f16_f32_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0x8a,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, null -// GFX1250: v_cvt_f16_f32_e64 v5, null ; encoding: [0x05,0x00,0x8a,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, null +// GFX1250: v_cvt_f16_f32_e64 v5.l, null ; encoding: [0x05,0x00,0x8a,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, -1 -// GFX1250: v_cvt_f16_f32_e64 v5, -1 ; encoding: [0x05,0x00,0x8a,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_f16_f32_e64 v5.l, -1 +// GFX1250: v_cvt_f16_f32_e64 v5.l, -1 ; encoding: [0x05,0x00,0x8a,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f16_f32_e64 v5, 0.5 mul:2 -// GFX1250: v_cvt_f16_f32_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0x8a,0xd5,0xf0,0x00,0x00,0x08] +v_cvt_f16_f32_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cvt_f16_f32_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0x8a,0xd5,0xf0,0x00,0x00,0x08] -v_cvt_f16_f32_e64 v5, src_scc mul:4 -// GFX1250: v_cvt_f16_f32_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0x8a,0xd5,0xfd,0x00,0x00,0x10] +v_cvt_f16_f32_e64 v5.l, src_scc mul:4 +// GFX1250: v_cvt_f16_f32_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0x8a,0xd5,0xfd,0x00,0x00,0x10] -v_cvt_f16_f32_e64 v255, -|0xaf123456| clamp div:2 -// GFX1250: v_cvt_f16_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x8a,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] +v_cvt_f16_f32_e64 v255.l, -|0xaf123456| clamp div:2 +// GFX1250: v_cvt_f16_f32_e64 v255.l, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x8a,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] v_cvt_f16_f32 v128.l, v15 // GFX1250: v_cvt_f16_f32_e64 v128.l, v15 ; encoding: [0x80,0x00,0x8a,0xd5,0x0f,0x01,0x00,0x00] @@ -619,50 +619,50 @@ v_cvt_f16_f32 v128.l, v15 v_cvt_f16_f32 v128.h, v15 // GFX1250: v_cvt_f16_f32_e64 v128.h, v15 op_sel:[0,1] ; encoding: [0x80,0x40,0x8a,0xd5,0x0f,0x01,0x00,0x00] -v_cvt_f16_i16_e64 v5, v1 -// GFX1250: v_cvt_f16_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, v1.l +// GFX1250: v_cvt_f16_i16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f16_i16_e64 v5, v255 -// GFX1250: v_cvt_f16_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xd1,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, v255.l +// GFX1250: v_cvt_f16_i16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd1,0xd5,0xff,0x01,0x00,0x00] -v_cvt_f16_i16_e64 v5, s1 -// GFX1250: v_cvt_f16_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, s1 +// GFX1250: v_cvt_f16_i16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd1,0xd5,0x01,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, s105 -// GFX1250: v_cvt_f16_i16_e64 v5, s105 ; encoding: [0x05,0x00,0xd1,0xd5,0x69,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, s105 +// GFX1250: v_cvt_f16_i16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd1,0xd5,0x69,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, vcc_lo -// GFX1250: v_cvt_f16_i16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_f16_i16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, vcc_hi -// GFX1250: v_cvt_f16_i16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_f16_i16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, ttmp15 -// GFX1250: v_cvt_f16_i16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd1,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_f16_i16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd1,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, m0 -// GFX1250: v_cvt_f16_i16_e64 v5, m0 ; encoding: [0x05,0x00,0xd1,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, m0 +// GFX1250: v_cvt_f16_i16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd1,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, exec_lo -// GFX1250: v_cvt_f16_i16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, exec_lo +// GFX1250: v_cvt_f16_i16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd1,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, exec_hi -// GFX1250: v_cvt_f16_i16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, exec_hi +// GFX1250: v_cvt_f16_i16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd1,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, null -// GFX1250: v_cvt_f16_i16_e64 v5, null ; encoding: [0x05,0x00,0xd1,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, null +// GFX1250: v_cvt_f16_i16_e64 v5.l, null ; encoding: [0x05,0x00,0xd1,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, -1 -// GFX1250: v_cvt_f16_i16_e64 v5, -1 ; encoding: [0x05,0x00,0xd1,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_f16_i16_e64 v5.l, -1 +// GFX1250: v_cvt_f16_i16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd1,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f16_i16_e64 v5, 0.5 mul:2 -// GFX1250: v_cvt_f16_i16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd1,0xd5,0xf0,0x00,0x00,0x08] +v_cvt_f16_i16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cvt_f16_i16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd1,0xd5,0xf0,0x00,0x00,0x08] -v_cvt_f16_i16_e64 v5, src_scc mul:4 -// GFX1250: v_cvt_f16_i16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd1,0xd5,0xfd,0x00,0x00,0x10] +v_cvt_f16_i16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cvt_f16_i16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd1,0xd5,0xfd,0x00,0x00,0x10] -v_cvt_f16_i16_e64 v255, 0xfe0b clamp div:2 -// GFX1250: v_cvt_f16_i16_e64 v255, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd1,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] +v_cvt_f16_i16_e64 v255.l, 0xfe0b clamp div:2 +// GFX1250: v_cvt_f16_i16_e64 v255.l, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd1,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] v_cvt_f16_i16 v128.l, v15.l // GFX1250: v_cvt_f16_i16_e64 v128.l, v15.l ; encoding: [0x80,0x00,0xd1,0xd5,0x0f,0x01,0x00,0x00] @@ -670,50 +670,50 @@ v_cvt_f16_i16 v128.l, v15.l v_cvt_f16_i16 v128.h, v15.h // GFX1250: v_cvt_f16_i16_e64 v128.h, v15.h op_sel:[1,1] ; encoding: [0x80,0x48,0xd1,0xd5,0x0f,0x01,0x00,0x00] -v_cvt_f16_u16_e64 v5, v1 -// GFX1250: v_cvt_f16_u16_e64 v5, v1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, v1.l +// GFX1250: v_cvt_f16_u16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f16_u16_e64 v5, v255 -// GFX1250: v_cvt_f16_u16_e64 v5, v255 ; encoding: [0x05,0x00,0xd0,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, v255.l +// GFX1250: v_cvt_f16_u16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd0,0xd5,0xff,0x01,0x00,0x00] -v_cvt_f16_u16_e64 v5, s1 -// GFX1250: v_cvt_f16_u16_e64 v5, s1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, s1 +// GFX1250: v_cvt_f16_u16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd0,0xd5,0x01,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, s105 -// GFX1250: v_cvt_f16_u16_e64 v5, s105 ; encoding: [0x05,0x00,0xd0,0xd5,0x69,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, s105 +// GFX1250: v_cvt_f16_u16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd0,0xd5,0x69,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, vcc_lo -// GFX1250: v_cvt_f16_u16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_f16_u16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, vcc_hi -// GFX1250: v_cvt_f16_u16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_f16_u16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, ttmp15 -// GFX1250: v_cvt_f16_u16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd0,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_f16_u16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd0,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, m0 -// GFX1250: v_cvt_f16_u16_e64 v5, m0 ; encoding: [0x05,0x00,0xd0,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, m0 +// GFX1250: v_cvt_f16_u16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd0,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, exec_lo -// GFX1250: v_cvt_f16_u16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, exec_lo +// GFX1250: v_cvt_f16_u16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd0,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, exec_hi -// GFX1250: v_cvt_f16_u16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, exec_hi +// GFX1250: v_cvt_f16_u16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd0,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, null -// GFX1250: v_cvt_f16_u16_e64 v5, null ; encoding: [0x05,0x00,0xd0,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, null +// GFX1250: v_cvt_f16_u16_e64 v5.l, null ; encoding: [0x05,0x00,0xd0,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, -1 -// GFX1250: v_cvt_f16_u16_e64 v5, -1 ; encoding: [0x05,0x00,0xd0,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_f16_u16_e64 v5.l, -1 +// GFX1250: v_cvt_f16_u16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd0,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f16_u16_e64 v5, 0.5 mul:2 -// GFX1250: v_cvt_f16_u16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd0,0xd5,0xf0,0x00,0x00,0x08] +v_cvt_f16_u16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cvt_f16_u16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd0,0xd5,0xf0,0x00,0x00,0x08] -v_cvt_f16_u16_e64 v5, src_scc mul:4 -// GFX1250: v_cvt_f16_u16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd0,0xd5,0xfd,0x00,0x00,0x10] +v_cvt_f16_u16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cvt_f16_u16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd0,0xd5,0xfd,0x00,0x00,0x10] -v_cvt_f16_u16_e64 v255, 0xfe0b clamp div:2 -// GFX1250: v_cvt_f16_u16_e64 v255, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd0,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] +v_cvt_f16_u16_e64 v255.l, 0xfe0b clamp div:2 +// GFX1250: v_cvt_f16_u16_e64 v255.l, 0xfe0b clamp div:2 ; encoding: [0xff,0x80,0xd0,0xd5,0xff,0x00,0x00,0x18,0x0b,0xfe,0x00,0x00] v_cvt_f16_u16 v128.l, v15.l // GFX1250: v_cvt_f16_u16_e64 v128.l, v15.l ; encoding: [0x80,0x00,0xd0,0xd5,0x0f,0x01,0x00,0x00] @@ -721,11 +721,11 @@ v_cvt_f16_u16 v128.l, v15.l v_cvt_f16_u16 v128.h, v15.h // GFX1250: v_cvt_f16_u16_e64 v128.h, v15.h op_sel:[1,1] ; encoding: [0x80,0x48,0xd0,0xd5,0x0f,0x01,0x00,0x00] -v_cvt_f32_f16_e64 v5, v1 -// GFX1250: v_cvt_f32_f16_e64 v5, v1 ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f32_f16_e64 v5, v1.l +// GFX1250: v_cvt_f32_f16_e64 v5, v1.l ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f32_f16_e64 v5, v255 -// GFX1250: v_cvt_f32_f16_e64 v5, v255 ; encoding: [0x05,0x00,0x8b,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f32_f16_e64 v5, v255.l +// GFX1250: v_cvt_f32_f16_e64 v5, v255.l ; encoding: [0x05,0x00,0x8b,0xd5,0xff,0x01,0x00,0x00] v_cvt_f32_f16_e64 v5, s1 // GFX1250: v_cvt_f32_f16_e64 v5, s1 ; encoding: [0x05,0x00,0x8b,0xd5,0x01,0x00,0x00,0x00] @@ -1303,50 +1303,50 @@ v_cvt_flr_i32_f32_e64 v5, src_scc v_cvt_flr_i32_f32_e64 v255, -|0xaf123456| // GFX1250: v_cvt_floor_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8d,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf] -v_cvt_i16_f16_e64 v5, v1 -// GFX1250: v_cvt_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x01,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x01,0x00,0x00] -v_cvt_i16_f16_e64 v5, v255 -// GFX1250: v_cvt_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd3,0xd5,0xff,0x01,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd3,0xd5,0xff,0x01,0x00,0x00] -v_cvt_i16_f16_e64 v5, s1 -// GFX1250: v_cvt_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd3,0xd5,0x01,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, s105 -// GFX1250: v_cvt_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd3,0xd5,0x69,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd3,0xd5,0x69,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd3,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd3,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, m0 -// GFX1250: v_cvt_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd3,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd3,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd3,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd3,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, null -// GFX1250: v_cvt_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xd3,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, null +// GFX1250: v_cvt_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd3,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, -1 -// GFX1250: v_cvt_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd3,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd3,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xd3,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xd3,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v5, src_scc -// GFX1250: v_cvt_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xd3,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_i16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xd3,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_i16_f16_e64 v255, -|0xfe0b| clamp -// GFX1250: v_cvt_i16_f16_e64 v255, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_i16_f16_e64 v255.l, -|0xfe0b| clamp +// GFX1250: v_cvt_i16_f16_e64 v255.l, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_i16_f16 v1.l, v128.l // GFX1250: v_cvt_i16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xd3,0xd5,0x80,0x01,0x00,0x00] @@ -1435,11 +1435,11 @@ v_cvt_i32_f64_e64 v5, -|src_scc| v_cvt_i32_f64_e64 v255, 0xaf123456 clamp // GFX1250: v_cvt_i32_f64_e64 v255, 0xaf123456 clamp ; encoding: [0xff,0x80,0x83,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cvt_i32_i16_e64 v5, v1 -// GFX1250: v_cvt_i32_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x01,0x00,0x00] +v_cvt_i32_i16_e64 v5, v1.l +// GFX1250: v_cvt_i32_i16_e64 v5, v1.l ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x01,0x00,0x00] -v_cvt_i32_i16_e64 v5, v255 -// GFX1250: v_cvt_i32_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xea,0xd5,0xff,0x01,0x00,0x00] +v_cvt_i32_i16_e64 v5, v255.l +// GFX1250: v_cvt_i32_i16_e64 v5, v255.l ; encoding: [0x05,0x00,0xea,0xd5,0xff,0x01,0x00,0x00] v_cvt_i32_i16_e64 v5, s1 // GFX1250: v_cvt_i32_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xea,0xd5,0x01,0x00,0x00,0x00] @@ -1531,50 +1531,50 @@ v_cvt_nearest_i32_f32_e64 v5, src_scc v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| // GFX1250: v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8c,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf] -v_cvt_norm_i16_f16_e64 v5, v1 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x01,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x01,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, v255 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe3,0xd5,0xff,0x01,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe3,0xd5,0xff,0x01,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, s1 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe3,0xd5,0x01,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, s105 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe3,0xd5,0x69,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe3,0xd5,0x69,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_norm_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_norm_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe3,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe3,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, m0 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe3,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe3,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_norm_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe3,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_norm_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe3,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, null -// GFX1250: v_cvt_norm_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xe3,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, null +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe3,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, -1 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe3,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe3,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_norm_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe3,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe3,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v5, src_scc -// GFX1250: v_cvt_norm_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe3,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_norm_i16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_norm_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe3,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_norm_i16_f16_e64 v255, -|0xfe0b| -// GFX1250: v_cvt_norm_i16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xe3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_norm_i16_f16_e64 v255.l, -|0xfe0b| +// GFX1250: v_cvt_norm_i16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xe3,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_norm_i16_f16 v1.l, v128.l // GFX1250: v_cvt_norm_i16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xe3,0xd5,0x80,0x01,0x00,0x00] @@ -1582,50 +1582,50 @@ v_cvt_norm_i16_f16 v1.l, v128.l v_cvt_norm_i16_f16 v1.l, v128.h // GFX1250: v_cvt_norm_i16_f16_e64 v1.l, v128.h op_sel:[1,0] ; encoding: [0x01,0x08,0xe3,0xd5,0x80,0x01,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, v1 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x01,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x01,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, v255 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe4,0xd5,0xff,0x01,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe4,0xd5,0xff,0x01,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, s1 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe4,0xd5,0x01,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, s105 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe4,0xd5,0x69,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe4,0xd5,0x69,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_norm_u16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_norm_u16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe4,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe4,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, m0 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe4,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe4,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_norm_u16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe4,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_norm_u16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe4,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, null -// GFX1250: v_cvt_norm_u16_f16_e64 v5, null ; encoding: [0x05,0x00,0xe4,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, null +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe4,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, -1 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe4,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe4,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_norm_u16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe4,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe4,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v5, src_scc -// GFX1250: v_cvt_norm_u16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe4,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_norm_u16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_norm_u16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe4,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_norm_u16_f16_e64 v255, -|0xfe0b| -// GFX1250: v_cvt_norm_u16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xe4,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_norm_u16_f16_e64 v255.l, -|0xfe0b| +// GFX1250: v_cvt_norm_u16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xe4,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_norm_u16_f16 v1.l, v128.l // GFX1250: v_cvt_norm_u16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xe4,0xd5,0x80,0x01,0x00,0x00] @@ -1723,50 +1723,50 @@ v_cvt_rpi_i32_f32_e64 v5, src_scc v_cvt_rpi_i32_f32_e64 v255, -|0xaf123456| // GFX1250: v_cvt_nearest_i32_f32_e64 v255, -|0xaf123456| ; encoding: [0xff,0x01,0x8c,0xd5,0xff,0x00,0x00,0x20,0x56,0x34,0x12,0xaf] -v_cvt_u16_f16_e64 v5, v1 -// GFX1250: v_cvt_u16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x01,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, v1.l +// GFX1250: v_cvt_u16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x01,0x00,0x00] -v_cvt_u16_f16_e64 v5, v255 -// GFX1250: v_cvt_u16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd2,0xd5,0xff,0x01,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, v255.l +// GFX1250: v_cvt_u16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd2,0xd5,0xff,0x01,0x00,0x00] -v_cvt_u16_f16_e64 v5, s1 -// GFX1250: v_cvt_u16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, s1 +// GFX1250: v_cvt_u16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd2,0xd5,0x01,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, s105 -// GFX1250: v_cvt_u16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd2,0xd5,0x69,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, s105 +// GFX1250: v_cvt_u16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd2,0xd5,0x69,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, vcc_lo -// GFX1250: v_cvt_u16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x6a,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, vcc_lo +// GFX1250: v_cvt_u16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x6a,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, vcc_hi -// GFX1250: v_cvt_u16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x6b,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, vcc_hi +// GFX1250: v_cvt_u16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x6b,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, ttmp15 -// GFX1250: v_cvt_u16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd2,0xd5,0x7b,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, ttmp15 +// GFX1250: v_cvt_u16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd2,0xd5,0x7b,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, m0 -// GFX1250: v_cvt_u16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd2,0xd5,0x7d,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, m0 +// GFX1250: v_cvt_u16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd2,0xd5,0x7d,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, exec_lo -// GFX1250: v_cvt_u16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x7e,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, exec_lo +// GFX1250: v_cvt_u16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd2,0xd5,0x7e,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, exec_hi -// GFX1250: v_cvt_u16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x7f,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, exec_hi +// GFX1250: v_cvt_u16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd2,0xd5,0x7f,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, null -// GFX1250: v_cvt_u16_f16_e64 v5, null ; encoding: [0x05,0x00,0xd2,0xd5,0x7c,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, null +// GFX1250: v_cvt_u16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd2,0xd5,0x7c,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, -1 -// GFX1250: v_cvt_u16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd2,0xd5,0xc1,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, -1 +// GFX1250: v_cvt_u16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd2,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, 0.5 -// GFX1250: v_cvt_u16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xd2,0xd5,0xf0,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, 0.5 +// GFX1250: v_cvt_u16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xd2,0xd5,0xf0,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v5, src_scc -// GFX1250: v_cvt_u16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xd2,0xd5,0xfd,0x00,0x00,0x00] +v_cvt_u16_f16_e64 v5.l, src_scc +// GFX1250: v_cvt_u16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xd2,0xd5,0xfd,0x00,0x00,0x00] -v_cvt_u16_f16_e64 v255, -|0xfe0b| clamp -// GFX1250: v_cvt_u16_f16_e64 v255, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd2,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_cvt_u16_f16_e64 v255.l, -|0xfe0b| clamp +// GFX1250: v_cvt_u16_f16_e64 v255.l, -|0xfe0b| clamp ; encoding: [0xff,0x81,0xd2,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_cvt_u16_f16 v1.l, v128.l // GFX1250: v_cvt_u16_f16_e64 v1.l, v128.l ; encoding: [0x01,0x00,0xd2,0xd5,0x80,0x01,0x00,0x00] @@ -1855,11 +1855,11 @@ v_cvt_u32_f64_e64 v5, -|src_scc| v_cvt_u32_f64_e64 v255, 0xaf123456 clamp // GFX1250: v_cvt_u32_f64_e64 v255, 0xaf123456 clamp ; encoding: [0xff,0x80,0x95,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_cvt_u32_u16_e64 v5, v1 -// GFX1250: v_cvt_u32_u16_e64 v5, v1 ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x01,0x00,0x00] +v_cvt_u32_u16_e64 v5, v1.l +// GFX1250: v_cvt_u32_u16_e64 v5, v1.l ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x01,0x00,0x00] -v_cvt_u32_u16_e64 v5, v255 -// GFX1250: v_cvt_u32_u16_e64 v5, v255 ; encoding: [0x05,0x00,0xeb,0xd5,0xff,0x01,0x00,0x00] +v_cvt_u32_u16_e64 v5, v255.l +// GFX1250: v_cvt_u32_u16_e64 v5, v255.l ; encoding: [0x05,0x00,0xeb,0xd5,0xff,0x01,0x00,0x00] v_cvt_u32_u16_e64 v5, s1 // GFX1250: v_cvt_u32_u16_e64 v5, s1 ; encoding: [0x05,0x00,0xeb,0xd5,0x01,0x00,0x00,0x00] @@ -1906,50 +1906,50 @@ v_cvt_u32_u16 v1, v128.l v_cvt_u32_u16 v1, v128.h // GFX1250: v_cvt_u32_u16_e64 v1, v128.h op_sel:[1,0] ; encoding: [0x01,0x08,0xeb,0xd5,0x80,0x01,0x00,0x00] -v_exp_f16_e64 v5, v1 -// GFX1250: v_exp_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x01,0x00,0x00] +v_exp_f16_e64 v5.l, v1.l +// GFX1250: v_exp_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x01,0x00,0x00] -v_exp_f16_e64 v5, v255 -// GFX1250: v_exp_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd8,0xd5,0xff,0x01,0x00,0x00] +v_exp_f16_e64 v5.l, v255.l +// GFX1250: v_exp_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd8,0xd5,0xff,0x01,0x00,0x00] -v_exp_f16_e64 v5, s1 -// GFX1250: v_exp_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, s1 +// GFX1250: v_exp_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd8,0xd5,0x01,0x00,0x00,0x00] -v_exp_f16_e64 v5, s105 -// GFX1250: v_exp_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd8,0xd5,0x69,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, s105 +// GFX1250: v_exp_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd8,0xd5,0x69,0x00,0x00,0x00] -v_exp_f16_e64 v5, vcc_lo -// GFX1250: v_exp_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x6a,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, vcc_lo +// GFX1250: v_exp_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x6a,0x00,0x00,0x00] -v_exp_f16_e64 v5, vcc_hi -// GFX1250: v_exp_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x6b,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, vcc_hi +// GFX1250: v_exp_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x6b,0x00,0x00,0x00] -v_exp_f16_e64 v5, ttmp15 -// GFX1250: v_exp_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd8,0xd5,0x7b,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, ttmp15 +// GFX1250: v_exp_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd8,0xd5,0x7b,0x00,0x00,0x00] -v_exp_f16_e64 v5, m0 -// GFX1250: v_exp_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd8,0xd5,0x7d,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, m0 +// GFX1250: v_exp_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd8,0xd5,0x7d,0x00,0x00,0x00] -v_exp_f16_e64 v5, exec_lo -// GFX1250: v_exp_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x7e,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, exec_lo +// GFX1250: v_exp_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd8,0xd5,0x7e,0x00,0x00,0x00] -v_exp_f16_e64 v5, exec_hi -// GFX1250: v_exp_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x7f,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, exec_hi +// GFX1250: v_exp_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd8,0xd5,0x7f,0x00,0x00,0x00] -v_exp_f16_e64 v5, null -// GFX1250: v_exp_f16_e64 v5, null ; encoding: [0x05,0x00,0xd8,0xd5,0x7c,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, null +// GFX1250: v_exp_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd8,0xd5,0x7c,0x00,0x00,0x00] -v_exp_f16_e64 v5, -1 -// GFX1250: v_exp_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd8,0xd5,0xc1,0x00,0x00,0x00] +v_exp_f16_e64 v5.l, -1 +// GFX1250: v_exp_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd8,0xd5,0xc1,0x00,0x00,0x00] -v_exp_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_exp_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd8,0xd5,0xf0,0x00,0x00,0x08] +v_exp_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_exp_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd8,0xd5,0xf0,0x00,0x00,0x08] -v_exp_f16_e64 v5, src_scc mul:4 -// GFX1250: v_exp_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd8,0xd5,0xfd,0x00,0x00,0x10] +v_exp_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_exp_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd8,0xd5,0xfd,0x00,0x00,0x10] -v_exp_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_exp_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd8,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_exp_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_exp_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd8,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_exp_f16 v1.h, v128.l // GFX1250: v_exp_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd8,0xd5,0x80,0x01,0x00,0x00] @@ -2137,50 +2137,50 @@ v_ffbl_b32_e64 v5, src_scc v_ffbl_b32_e64 v255, 0xaf123456 // GFX1250: v_ctz_i32_b32_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xba,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_floor_f16_e64 v5, v1 -// GFX1250: v_floor_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x01,0x00,0x00] +v_floor_f16_e64 v5.l, v1.l +// GFX1250: v_floor_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x01,0x00,0x00] -v_floor_f16_e64 v5, v255 -// GFX1250: v_floor_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdb,0xd5,0xff,0x01,0x00,0x00] +v_floor_f16_e64 v5.l, v255.l +// GFX1250: v_floor_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdb,0xd5,0xff,0x01,0x00,0x00] -v_floor_f16_e64 v5, s1 -// GFX1250: v_floor_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, s1 +// GFX1250: v_floor_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdb,0xd5,0x01,0x00,0x00,0x00] -v_floor_f16_e64 v5, s105 -// GFX1250: v_floor_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdb,0xd5,0x69,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, s105 +// GFX1250: v_floor_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdb,0xd5,0x69,0x00,0x00,0x00] -v_floor_f16_e64 v5, vcc_lo -// GFX1250: v_floor_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x6a,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, vcc_lo +// GFX1250: v_floor_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x6a,0x00,0x00,0x00] -v_floor_f16_e64 v5, vcc_hi -// GFX1250: v_floor_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x6b,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, vcc_hi +// GFX1250: v_floor_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x6b,0x00,0x00,0x00] -v_floor_f16_e64 v5, ttmp15 -// GFX1250: v_floor_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdb,0xd5,0x7b,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, ttmp15 +// GFX1250: v_floor_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdb,0xd5,0x7b,0x00,0x00,0x00] -v_floor_f16_e64 v5, m0 -// GFX1250: v_floor_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdb,0xd5,0x7d,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, m0 +// GFX1250: v_floor_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdb,0xd5,0x7d,0x00,0x00,0x00] -v_floor_f16_e64 v5, exec_lo -// GFX1250: v_floor_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x7e,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, exec_lo +// GFX1250: v_floor_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdb,0xd5,0x7e,0x00,0x00,0x00] -v_floor_f16_e64 v5, exec_hi -// GFX1250: v_floor_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x7f,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, exec_hi +// GFX1250: v_floor_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdb,0xd5,0x7f,0x00,0x00,0x00] -v_floor_f16_e64 v5, null -// GFX1250: v_floor_f16_e64 v5, null ; encoding: [0x05,0x00,0xdb,0xd5,0x7c,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, null +// GFX1250: v_floor_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdb,0xd5,0x7c,0x00,0x00,0x00] -v_floor_f16_e64 v5, -1 -// GFX1250: v_floor_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdb,0xd5,0xc1,0x00,0x00,0x00] +v_floor_f16_e64 v5.l, -1 +// GFX1250: v_floor_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdb,0xd5,0xc1,0x00,0x00,0x00] -v_floor_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_floor_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdb,0xd5,0xf0,0x00,0x00,0x08] +v_floor_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_floor_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdb,0xd5,0xf0,0x00,0x00,0x08] -v_floor_f16_e64 v5, src_scc mul:4 -// GFX1250: v_floor_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdb,0xd5,0xfd,0x00,0x00,0x10] +v_floor_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_floor_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdb,0xd5,0xfd,0x00,0x00,0x10] -v_floor_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_floor_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdb,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_floor_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_floor_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdb,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_floor_f16 v1.h, v128.l // GFX1250: v_floor_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdb,0xd5,0x80,0x01,0x00,0x00] @@ -2269,50 +2269,50 @@ v_floor_f64_e64 v[6:7], -|src_scc| mul:4 v_floor_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_floor_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0x9a,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_fract_f16_e64 v5, v1 -// GFX1250: v_fract_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x01,0x00,0x00] +v_fract_f16_e64 v5.l, v1.l +// GFX1250: v_fract_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x01,0x00,0x00] -v_fract_f16_e64 v5, v255 -// GFX1250: v_fract_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdf,0xd5,0xff,0x01,0x00,0x00] +v_fract_f16_e64 v5.l, v255.l +// GFX1250: v_fract_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdf,0xd5,0xff,0x01,0x00,0x00] -v_fract_f16_e64 v5, s1 -// GFX1250: v_fract_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, s1 +// GFX1250: v_fract_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdf,0xd5,0x01,0x00,0x00,0x00] -v_fract_f16_e64 v5, s105 -// GFX1250: v_fract_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdf,0xd5,0x69,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, s105 +// GFX1250: v_fract_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdf,0xd5,0x69,0x00,0x00,0x00] -v_fract_f16_e64 v5, vcc_lo -// GFX1250: v_fract_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x6a,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, vcc_lo +// GFX1250: v_fract_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x6a,0x00,0x00,0x00] -v_fract_f16_e64 v5, vcc_hi -// GFX1250: v_fract_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x6b,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, vcc_hi +// GFX1250: v_fract_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x6b,0x00,0x00,0x00] -v_fract_f16_e64 v5, ttmp15 -// GFX1250: v_fract_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdf,0xd5,0x7b,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, ttmp15 +// GFX1250: v_fract_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdf,0xd5,0x7b,0x00,0x00,0x00] -v_fract_f16_e64 v5, m0 -// GFX1250: v_fract_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdf,0xd5,0x7d,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, m0 +// GFX1250: v_fract_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdf,0xd5,0x7d,0x00,0x00,0x00] -v_fract_f16_e64 v5, exec_lo -// GFX1250: v_fract_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x7e,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, exec_lo +// GFX1250: v_fract_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdf,0xd5,0x7e,0x00,0x00,0x00] -v_fract_f16_e64 v5, exec_hi -// GFX1250: v_fract_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x7f,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, exec_hi +// GFX1250: v_fract_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdf,0xd5,0x7f,0x00,0x00,0x00] -v_fract_f16_e64 v5, null -// GFX1250: v_fract_f16_e64 v5, null ; encoding: [0x05,0x00,0xdf,0xd5,0x7c,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, null +// GFX1250: v_fract_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdf,0xd5,0x7c,0x00,0x00,0x00] -v_fract_f16_e64 v5, -1 -// GFX1250: v_fract_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdf,0xd5,0xc1,0x00,0x00,0x00] +v_fract_f16_e64 v5.l, -1 +// GFX1250: v_fract_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdf,0xd5,0xc1,0x00,0x00,0x00] -v_fract_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_fract_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdf,0xd5,0xf0,0x00,0x00,0x08] +v_fract_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_fract_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdf,0xd5,0xf0,0x00,0x00,0x08] -v_fract_f16_e64 v5, src_scc mul:4 -// GFX1250: v_fract_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdf,0xd5,0xfd,0x00,0x00,0x10] +v_fract_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_fract_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdf,0xd5,0xfd,0x00,0x00,0x10] -v_fract_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_fract_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdf,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_fract_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_fract_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdf,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_fract_f16 v1.h, v128.l // GFX1250: v_fract_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdf,0xd5,0x80,0x01,0x00,0x00] @@ -2401,50 +2401,50 @@ v_fract_f64_e64 v[6:7], -|src_scc| mul:4 v_fract_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_fract_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xbe,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_frexp_exp_i16_f16_e64 v5, v1 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x01,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, v1.l +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x01,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, v255 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xda,0xd5,0xff,0x01,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, v255.l +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xda,0xd5,0xff,0x01,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, s1 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, s1 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xda,0xd5,0x01,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, s105 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xda,0xd5,0x69,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, s105 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xda,0xd5,0x69,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, vcc_lo -// GFX1250: v_frexp_exp_i16_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xda,0xd5,0x6a,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, vcc_lo +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xda,0xd5,0x6a,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, vcc_hi -// GFX1250: v_frexp_exp_i16_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xda,0xd5,0x6b,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, vcc_hi +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xda,0xd5,0x6b,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, ttmp15 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xda,0xd5,0x7b,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, ttmp15 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xda,0xd5,0x7b,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, m0 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xda,0xd5,0x7d,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, m0 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xda,0xd5,0x7d,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, exec_lo -// GFX1250: v_frexp_exp_i16_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xda,0xd5,0x7e,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, exec_lo +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xda,0xd5,0x7e,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, exec_hi -// GFX1250: v_frexp_exp_i16_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xda,0xd5,0x7f,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, exec_hi +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xda,0xd5,0x7f,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, null -// GFX1250: v_frexp_exp_i16_f16_e64 v5, null ; encoding: [0x05,0x00,0xda,0xd5,0x7c,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, null +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xda,0xd5,0x7c,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, -1 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xda,0xd5,0xc1,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, -1 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xda,0xd5,0xc1,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, 0.5 -// GFX1250: v_frexp_exp_i16_f16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xda,0xd5,0xf0,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, 0.5 +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xda,0xd5,0xf0,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v5, src_scc -// GFX1250: v_frexp_exp_i16_f16_e64 v5, src_scc ; encoding: [0x05,0x00,0xda,0xd5,0xfd,0x00,0x00,0x00] +v_frexp_exp_i16_f16_e64 v5.l, src_scc +// GFX1250: v_frexp_exp_i16_f16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xda,0xd5,0xfd,0x00,0x00,0x00] -v_frexp_exp_i16_f16_e64 v255, -|0xfe0b| -// GFX1250: v_frexp_exp_i16_f16_e64 v255, -|0xfe0b| ; encoding: [0xff,0x01,0xda,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] +v_frexp_exp_i16_f16_e64 v255.l, -|0xfe0b| +// GFX1250: v_frexp_exp_i16_f16_e64 v255.l, -|0xfe0b| ; encoding: [0xff,0x01,0xda,0xd5,0xff,0x00,0x00,0x20,0x0b,0xfe,0x00,0x00] v_frexp_exp_i16_f16 v1.h, v128.l // GFX1250: v_frexp_exp_i16_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xda,0xd5,0x80,0x01,0x00,0x00] @@ -2533,50 +2533,50 @@ v_frexp_exp_i32_f64_e64 v5, -|src_scc| v_frexp_exp_i32_f64_e64 v255, 0xaf123456 // GFX1250: v_frexp_exp_i32_f64_e64 v255, 0xaf123456 ; encoding: [0xff,0x00,0xbc,0xd5,0xff,0x00,0x00,0x00,0x56,0x34,0x12,0xaf] -v_frexp_mant_f16_e64 v5, v1 -// GFX1250: v_frexp_mant_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x01,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, v1.l +// GFX1250: v_frexp_mant_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x01,0x00,0x00] -v_frexp_mant_f16_e64 v5, v255 -// GFX1250: v_frexp_mant_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd9,0xd5,0xff,0x01,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, v255.l +// GFX1250: v_frexp_mant_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd9,0xd5,0xff,0x01,0x00,0x00] -v_frexp_mant_f16_e64 v5, s1 -// GFX1250: v_frexp_mant_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, s1 +// GFX1250: v_frexp_mant_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd9,0xd5,0x01,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, s105 -// GFX1250: v_frexp_mant_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd9,0xd5,0x69,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, s105 +// GFX1250: v_frexp_mant_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd9,0xd5,0x69,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, vcc_lo -// GFX1250: v_frexp_mant_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x6a,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, vcc_lo +// GFX1250: v_frexp_mant_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x6a,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, vcc_hi -// GFX1250: v_frexp_mant_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x6b,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, vcc_hi +// GFX1250: v_frexp_mant_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x6b,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, ttmp15 -// GFX1250: v_frexp_mant_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd9,0xd5,0x7b,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, ttmp15 +// GFX1250: v_frexp_mant_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd9,0xd5,0x7b,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, m0 -// GFX1250: v_frexp_mant_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd9,0xd5,0x7d,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, m0 +// GFX1250: v_frexp_mant_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd9,0xd5,0x7d,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, exec_lo -// GFX1250: v_frexp_mant_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x7e,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, exec_lo +// GFX1250: v_frexp_mant_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd9,0xd5,0x7e,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, exec_hi -// GFX1250: v_frexp_mant_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x7f,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, exec_hi +// GFX1250: v_frexp_mant_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd9,0xd5,0x7f,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, null -// GFX1250: v_frexp_mant_f16_e64 v5, null ; encoding: [0x05,0x00,0xd9,0xd5,0x7c,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, null +// GFX1250: v_frexp_mant_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd9,0xd5,0x7c,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, -1 -// GFX1250: v_frexp_mant_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd9,0xd5,0xc1,0x00,0x00,0x00] +v_frexp_mant_f16_e64 v5.l, -1 +// GFX1250: v_frexp_mant_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd9,0xd5,0xc1,0x00,0x00,0x00] -v_frexp_mant_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_frexp_mant_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd9,0xd5,0xf0,0x00,0x00,0x08] +v_frexp_mant_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_frexp_mant_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd9,0xd5,0xf0,0x00,0x00,0x08] -v_frexp_mant_f16_e64 v5, src_scc mul:4 -// GFX1250: v_frexp_mant_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd9,0xd5,0xfd,0x00,0x00,0x10] +v_frexp_mant_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_frexp_mant_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd9,0xd5,0xfd,0x00,0x00,0x10] -v_frexp_mant_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_frexp_mant_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd9,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_frexp_mant_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_frexp_mant_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd9,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_frexp_mant_f16 v1.h, v128.l // GFX1250: v_frexp_mant_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd9,0xd5,0x80,0x01,0x00,0x00] @@ -2665,50 +2665,50 @@ v_frexp_mant_f64_e64 v[6:7], -|src_scc| mul:4 v_frexp_mant_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_frexp_mant_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xbd,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_log_f16_e64 v5, v1 -// GFX1250: v_log_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x01,0x00,0x00] +v_log_f16_e64 v5.l, v1.l +// GFX1250: v_log_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x01,0x00,0x00] -v_log_f16_e64 v5, v255 -// GFX1250: v_log_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd7,0xd5,0xff,0x01,0x00,0x00] +v_log_f16_e64 v5.l, v255.l +// GFX1250: v_log_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd7,0xd5,0xff,0x01,0x00,0x00] -v_log_f16_e64 v5, s1 -// GFX1250: v_log_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x00,0x00,0x00] +v_log_f16_e64 v5.l, s1 +// GFX1250: v_log_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd7,0xd5,0x01,0x00,0x00,0x00] -v_log_f16_e64 v5, s105 -// GFX1250: v_log_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd7,0xd5,0x69,0x00,0x00,0x00] +v_log_f16_e64 v5.l, s105 +// GFX1250: v_log_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd7,0xd5,0x69,0x00,0x00,0x00] -v_log_f16_e64 v5, vcc_lo -// GFX1250: v_log_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x6a,0x00,0x00,0x00] +v_log_f16_e64 v5.l, vcc_lo +// GFX1250: v_log_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x6a,0x00,0x00,0x00] -v_log_f16_e64 v5, vcc_hi -// GFX1250: v_log_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x6b,0x00,0x00,0x00] +v_log_f16_e64 v5.l, vcc_hi +// GFX1250: v_log_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x6b,0x00,0x00,0x00] -v_log_f16_e64 v5, ttmp15 -// GFX1250: v_log_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd7,0xd5,0x7b,0x00,0x00,0x00] +v_log_f16_e64 v5.l, ttmp15 +// GFX1250: v_log_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd7,0xd5,0x7b,0x00,0x00,0x00] -v_log_f16_e64 v5, m0 -// GFX1250: v_log_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd7,0xd5,0x7d,0x00,0x00,0x00] +v_log_f16_e64 v5.l, m0 +// GFX1250: v_log_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd7,0xd5,0x7d,0x00,0x00,0x00] -v_log_f16_e64 v5, exec_lo -// GFX1250: v_log_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x7e,0x00,0x00,0x00] +v_log_f16_e64 v5.l, exec_lo +// GFX1250: v_log_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd7,0xd5,0x7e,0x00,0x00,0x00] -v_log_f16_e64 v5, exec_hi -// GFX1250: v_log_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x7f,0x00,0x00,0x00] +v_log_f16_e64 v5.l, exec_hi +// GFX1250: v_log_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd7,0xd5,0x7f,0x00,0x00,0x00] -v_log_f16_e64 v5, null -// GFX1250: v_log_f16_e64 v5, null ; encoding: [0x05,0x00,0xd7,0xd5,0x7c,0x00,0x00,0x00] +v_log_f16_e64 v5.l, null +// GFX1250: v_log_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd7,0xd5,0x7c,0x00,0x00,0x00] -v_log_f16_e64 v5, -1 -// GFX1250: v_log_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd7,0xd5,0xc1,0x00,0x00,0x00] +v_log_f16_e64 v5.l, -1 +// GFX1250: v_log_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd7,0xd5,0xc1,0x00,0x00,0x00] -v_log_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_log_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd7,0xd5,0xf0,0x00,0x00,0x08] +v_log_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_log_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd7,0xd5,0xf0,0x00,0x00,0x08] -v_log_f16_e64 v5, src_scc mul:4 -// GFX1250: v_log_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd7,0xd5,0xfd,0x00,0x00,0x10] +v_log_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_log_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd7,0xd5,0xfd,0x00,0x00,0x10] -v_log_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_log_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd7,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_log_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_log_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd7,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_log_f16 v1.h, v128.l // GFX1250: v_log_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd7,0xd5,0x80,0x01,0x00,0x00] @@ -2872,50 +2872,50 @@ v_movrelsd_b32_e64 v255, v255 v_nop_e64 // GFX1250: v_nop ; encoding: [0x00,0x00,0x80,0xd5,0x00,0x00,0x00,0x00] -v_not_b16_e64 v5, v1 -// GFX1250: v_not_b16_e64 v5, v1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x01,0x00,0x00] +v_not_b16_e64 v5.l, v1.l +// GFX1250: v_not_b16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x01,0x00,0x00] -v_not_b16_e64 v5, v255 -// GFX1250: v_not_b16_e64 v5, v255 ; encoding: [0x05,0x00,0xe9,0xd5,0xff,0x01,0x00,0x00] +v_not_b16_e64 v5.l, v255.l +// GFX1250: v_not_b16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe9,0xd5,0xff,0x01,0x00,0x00] -v_not_b16_e64 v5, s1 -// GFX1250: v_not_b16_e64 v5, s1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x00,0x00,0x00] +v_not_b16_e64 v5.l, s1 +// GFX1250: v_not_b16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe9,0xd5,0x01,0x00,0x00,0x00] -v_not_b16_e64 v5, s105 -// GFX1250: v_not_b16_e64 v5, s105 ; encoding: [0x05,0x00,0xe9,0xd5,0x69,0x00,0x00,0x00] +v_not_b16_e64 v5.l, s105 +// GFX1250: v_not_b16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe9,0xd5,0x69,0x00,0x00,0x00] -v_not_b16_e64 v5, vcc_lo -// GFX1250: v_not_b16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x6a,0x00,0x00,0x00] +v_not_b16_e64 v5.l, vcc_lo +// GFX1250: v_not_b16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x6a,0x00,0x00,0x00] -v_not_b16_e64 v5, vcc_hi -// GFX1250: v_not_b16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x6b,0x00,0x00,0x00] +v_not_b16_e64 v5.l, vcc_hi +// GFX1250: v_not_b16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x6b,0x00,0x00,0x00] -v_not_b16_e64 v5, ttmp15 -// GFX1250: v_not_b16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe9,0xd5,0x7b,0x00,0x00,0x00] +v_not_b16_e64 v5.l, ttmp15 +// GFX1250: v_not_b16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe9,0xd5,0x7b,0x00,0x00,0x00] -v_not_b16_e64 v5, m0 -// GFX1250: v_not_b16_e64 v5, m0 ; encoding: [0x05,0x00,0xe9,0xd5,0x7d,0x00,0x00,0x00] +v_not_b16_e64 v5.l, m0 +// GFX1250: v_not_b16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe9,0xd5,0x7d,0x00,0x00,0x00] -v_not_b16_e64 v5, exec_lo -// GFX1250: v_not_b16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x7e,0x00,0x00,0x00] +v_not_b16_e64 v5.l, exec_lo +// GFX1250: v_not_b16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe9,0xd5,0x7e,0x00,0x00,0x00] -v_not_b16_e64 v5, exec_hi -// GFX1250: v_not_b16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x7f,0x00,0x00,0x00] +v_not_b16_e64 v5.l, exec_hi +// GFX1250: v_not_b16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe9,0xd5,0x7f,0x00,0x00,0x00] -v_not_b16_e64 v5, null -// GFX1250: v_not_b16_e64 v5, null ; encoding: [0x05,0x00,0xe9,0xd5,0x7c,0x00,0x00,0x00] +v_not_b16_e64 v5.l, null +// GFX1250: v_not_b16_e64 v5.l, null ; encoding: [0x05,0x00,0xe9,0xd5,0x7c,0x00,0x00,0x00] -v_not_b16_e64 v5, -1 -// GFX1250: v_not_b16_e64 v5, -1 ; encoding: [0x05,0x00,0xe9,0xd5,0xc1,0x00,0x00,0x00] +v_not_b16_e64 v5.l, -1 +// GFX1250: v_not_b16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe9,0xd5,0xc1,0x00,0x00,0x00] -v_not_b16_e64 v5, 0.5 -// GFX1250: v_not_b16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe9,0xd5,0xf0,0x00,0x00,0x00] +v_not_b16_e64 v5.l, 0.5 +// GFX1250: v_not_b16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe9,0xd5,0xf0,0x00,0x00,0x00] -v_not_b16_e64 v5, src_scc -// GFX1250: v_not_b16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe9,0xd5,0xfd,0x00,0x00,0x00] +v_not_b16_e64 v5.l, src_scc +// GFX1250: v_not_b16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe9,0xd5,0xfd,0x00,0x00,0x00] -v_not_b16_e64 v255, 0xfe0b -// GFX1250: v_not_b16_e64 v255, 0xfe0b ; encoding: [0xff,0x00,0xe9,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] +v_not_b16_e64 v255.l, 0xfe0b +// GFX1250: v_not_b16_e64 v255.l, 0xfe0b ; encoding: [0xff,0x00,0xe9,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] v_not_b16 v1.h, v128.l // GFX1250: v_not_b16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xe9,0xd5,0x80,0x01,0x00,0x00] @@ -2971,50 +2971,50 @@ v_not_b32_e64 v255, 0xaf123456 v_pipeflush_e64 // GFX1250: v_pipeflush ; encoding: [0x00,0x00,0x9b,0xd5,0x00,0x00,0x00,0x00] -v_rcp_f16_e64 v5, v1 -// GFX1250: v_rcp_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x01,0x00,0x00] +v_rcp_f16_e64 v5.l, v1.l +// GFX1250: v_rcp_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x01,0x00,0x00] -v_rcp_f16_e64 v5, v255 -// GFX1250: v_rcp_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd4,0xd5,0xff,0x01,0x00,0x00] +v_rcp_f16_e64 v5.l, v255.l +// GFX1250: v_rcp_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd4,0xd5,0xff,0x01,0x00,0x00] -v_rcp_f16_e64 v5, s1 -// GFX1250: v_rcp_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, s1 +// GFX1250: v_rcp_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd4,0xd5,0x01,0x00,0x00,0x00] -v_rcp_f16_e64 v5, s105 -// GFX1250: v_rcp_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd4,0xd5,0x69,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, s105 +// GFX1250: v_rcp_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd4,0xd5,0x69,0x00,0x00,0x00] -v_rcp_f16_e64 v5, vcc_lo -// GFX1250: v_rcp_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x6a,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, vcc_lo +// GFX1250: v_rcp_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x6a,0x00,0x00,0x00] -v_rcp_f16_e64 v5, vcc_hi -// GFX1250: v_rcp_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x6b,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, vcc_hi +// GFX1250: v_rcp_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x6b,0x00,0x00,0x00] -v_rcp_f16_e64 v5, ttmp15 -// GFX1250: v_rcp_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd4,0xd5,0x7b,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, ttmp15 +// GFX1250: v_rcp_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd4,0xd5,0x7b,0x00,0x00,0x00] -v_rcp_f16_e64 v5, m0 -// GFX1250: v_rcp_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd4,0xd5,0x7d,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, m0 +// GFX1250: v_rcp_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd4,0xd5,0x7d,0x00,0x00,0x00] -v_rcp_f16_e64 v5, exec_lo -// GFX1250: v_rcp_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x7e,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, exec_lo +// GFX1250: v_rcp_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd4,0xd5,0x7e,0x00,0x00,0x00] -v_rcp_f16_e64 v5, exec_hi -// GFX1250: v_rcp_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x7f,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, exec_hi +// GFX1250: v_rcp_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd4,0xd5,0x7f,0x00,0x00,0x00] -v_rcp_f16_e64 v5, null -// GFX1250: v_rcp_f16_e64 v5, null ; encoding: [0x05,0x00,0xd4,0xd5,0x7c,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, null +// GFX1250: v_rcp_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd4,0xd5,0x7c,0x00,0x00,0x00] -v_rcp_f16_e64 v5, -1 -// GFX1250: v_rcp_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd4,0xd5,0xc1,0x00,0x00,0x00] +v_rcp_f16_e64 v5.l, -1 +// GFX1250: v_rcp_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd4,0xd5,0xc1,0x00,0x00,0x00] -v_rcp_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_rcp_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd4,0xd5,0xf0,0x00,0x00,0x08] +v_rcp_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rcp_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd4,0xd5,0xf0,0x00,0x00,0x08] -v_rcp_f16_e64 v5, src_scc mul:4 -// GFX1250: v_rcp_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd4,0xd5,0xfd,0x00,0x00,0x10] +v_rcp_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rcp_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd4,0xd5,0xfd,0x00,0x00,0x10] -v_rcp_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_rcp_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd4,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_rcp_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_rcp_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd4,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_rcp_f16 v1.h, v128.l // GFX1250: v_rcp_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd4,0xd5,0x80,0x01,0x00,0x00] @@ -3148,50 +3148,50 @@ v_rcp_iflag_f32_e64 v5, src_scc mul:4 v_rcp_iflag_f32_e64 v255, -|0xaf123456| clamp div:2 // GFX1250: v_rcp_iflag_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0xab,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] -v_rndne_f16_e64 v5, v1 -// GFX1250: v_rndne_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x01,0x00,0x00] +v_rndne_f16_e64 v5.l, v1.l +// GFX1250: v_rndne_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x01,0x00,0x00] -v_rndne_f16_e64 v5, v255 -// GFX1250: v_rndne_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xde,0xd5,0xff,0x01,0x00,0x00] +v_rndne_f16_e64 v5.l, v255.l +// GFX1250: v_rndne_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xde,0xd5,0xff,0x01,0x00,0x00] -v_rndne_f16_e64 v5, s1 -// GFX1250: v_rndne_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, s1 +// GFX1250: v_rndne_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xde,0xd5,0x01,0x00,0x00,0x00] -v_rndne_f16_e64 v5, s105 -// GFX1250: v_rndne_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xde,0xd5,0x69,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, s105 +// GFX1250: v_rndne_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xde,0xd5,0x69,0x00,0x00,0x00] -v_rndne_f16_e64 v5, vcc_lo -// GFX1250: v_rndne_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xde,0xd5,0x6a,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, vcc_lo +// GFX1250: v_rndne_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xde,0xd5,0x6a,0x00,0x00,0x00] -v_rndne_f16_e64 v5, vcc_hi -// GFX1250: v_rndne_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xde,0xd5,0x6b,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, vcc_hi +// GFX1250: v_rndne_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xde,0xd5,0x6b,0x00,0x00,0x00] -v_rndne_f16_e64 v5, ttmp15 -// GFX1250: v_rndne_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xde,0xd5,0x7b,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, ttmp15 +// GFX1250: v_rndne_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xde,0xd5,0x7b,0x00,0x00,0x00] -v_rndne_f16_e64 v5, m0 -// GFX1250: v_rndne_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xde,0xd5,0x7d,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, m0 +// GFX1250: v_rndne_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xde,0xd5,0x7d,0x00,0x00,0x00] -v_rndne_f16_e64 v5, exec_lo -// GFX1250: v_rndne_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xde,0xd5,0x7e,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, exec_lo +// GFX1250: v_rndne_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xde,0xd5,0x7e,0x00,0x00,0x00] -v_rndne_f16_e64 v5, exec_hi -// GFX1250: v_rndne_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xde,0xd5,0x7f,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, exec_hi +// GFX1250: v_rndne_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xde,0xd5,0x7f,0x00,0x00,0x00] -v_rndne_f16_e64 v5, null -// GFX1250: v_rndne_f16_e64 v5, null ; encoding: [0x05,0x00,0xde,0xd5,0x7c,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, null +// GFX1250: v_rndne_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xde,0xd5,0x7c,0x00,0x00,0x00] -v_rndne_f16_e64 v5, -1 -// GFX1250: v_rndne_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xde,0xd5,0xc1,0x00,0x00,0x00] +v_rndne_f16_e64 v5.l, -1 +// GFX1250: v_rndne_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xde,0xd5,0xc1,0x00,0x00,0x00] -v_rndne_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_rndne_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xde,0xd5,0xf0,0x00,0x00,0x08] +v_rndne_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rndne_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xde,0xd5,0xf0,0x00,0x00,0x08] -v_rndne_f16_e64 v5, src_scc mul:4 -// GFX1250: v_rndne_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xde,0xd5,0xfd,0x00,0x00,0x10] +v_rndne_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rndne_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xde,0xd5,0xfd,0x00,0x00,0x10] -v_rndne_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_rndne_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xde,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_rndne_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_rndne_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xde,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_rndne_f16 v1.h, v128.l // GFX1250: v_rndne_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xde,0xd5,0x80,0x01,0x00,0x00] @@ -3280,50 +3280,50 @@ v_rndne_f64_e64 v[6:7], -|src_scc| mul:4 v_rndne_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_rndne_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0x99,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_rsq_f16_e64 v5, v1 -// GFX1250: v_rsq_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x01,0x00,0x00] +v_rsq_f16_e64 v5.l, v1.l +// GFX1250: v_rsq_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x01,0x00,0x00] -v_rsq_f16_e64 v5, v255 -// GFX1250: v_rsq_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd6,0xd5,0xff,0x01,0x00,0x00] +v_rsq_f16_e64 v5.l, v255.l +// GFX1250: v_rsq_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd6,0xd5,0xff,0x01,0x00,0x00] -v_rsq_f16_e64 v5, s1 -// GFX1250: v_rsq_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, s1 +// GFX1250: v_rsq_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd6,0xd5,0x01,0x00,0x00,0x00] -v_rsq_f16_e64 v5, s105 -// GFX1250: v_rsq_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd6,0xd5,0x69,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, s105 +// GFX1250: v_rsq_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd6,0xd5,0x69,0x00,0x00,0x00] -v_rsq_f16_e64 v5, vcc_lo -// GFX1250: v_rsq_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x6a,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, vcc_lo +// GFX1250: v_rsq_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x6a,0x00,0x00,0x00] -v_rsq_f16_e64 v5, vcc_hi -// GFX1250: v_rsq_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x6b,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, vcc_hi +// GFX1250: v_rsq_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x6b,0x00,0x00,0x00] -v_rsq_f16_e64 v5, ttmp15 -// GFX1250: v_rsq_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd6,0xd5,0x7b,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, ttmp15 +// GFX1250: v_rsq_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd6,0xd5,0x7b,0x00,0x00,0x00] -v_rsq_f16_e64 v5, m0 -// GFX1250: v_rsq_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd6,0xd5,0x7d,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, m0 +// GFX1250: v_rsq_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd6,0xd5,0x7d,0x00,0x00,0x00] -v_rsq_f16_e64 v5, exec_lo -// GFX1250: v_rsq_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x7e,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, exec_lo +// GFX1250: v_rsq_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd6,0xd5,0x7e,0x00,0x00,0x00] -v_rsq_f16_e64 v5, exec_hi -// GFX1250: v_rsq_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x7f,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, exec_hi +// GFX1250: v_rsq_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd6,0xd5,0x7f,0x00,0x00,0x00] -v_rsq_f16_e64 v5, null -// GFX1250: v_rsq_f16_e64 v5, null ; encoding: [0x05,0x00,0xd6,0xd5,0x7c,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, null +// GFX1250: v_rsq_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd6,0xd5,0x7c,0x00,0x00,0x00] -v_rsq_f16_e64 v5, -1 -// GFX1250: v_rsq_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd6,0xd5,0xc1,0x00,0x00,0x00] +v_rsq_f16_e64 v5.l, -1 +// GFX1250: v_rsq_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd6,0xd5,0xc1,0x00,0x00,0x00] -v_rsq_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_rsq_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd6,0xd5,0xf0,0x00,0x00,0x08] +v_rsq_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rsq_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd6,0xd5,0xf0,0x00,0x00,0x08] -v_rsq_f16_e64 v5, src_scc mul:4 -// GFX1250: v_rsq_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd6,0xd5,0xfd,0x00,0x00,0x10] +v_rsq_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rsq_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd6,0xd5,0xfd,0x00,0x00,0x10] -v_rsq_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_rsq_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd6,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_rsq_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_rsq_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd6,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_rsq_f16 v1.h, v128.l // GFX1250: v_rsq_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd6,0xd5,0x80,0x01,0x00,0x00] @@ -3412,50 +3412,50 @@ v_rsq_f64_e64 v[6:7], -|src_scc| mul:4 v_rsq_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_rsq_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xb1,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_sat_pk_u8_i16_e64 v5, v1 -// GFX1250: v_sat_pk_u8_i16_e64 v5, v1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, v1 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, v1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, v255 -// GFX1250: v_sat_pk_u8_i16_e64 v5, v255 ; encoding: [0x05,0x00,0xe2,0xd5,0xff,0x01,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, v255 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, v255 ; encoding: [0x05,0x00,0xe2,0xd5,0xff,0x01,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, s1 -// GFX1250: v_sat_pk_u8_i16_e64 v5, s1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, s1 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe2,0xd5,0x01,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, s105 -// GFX1250: v_sat_pk_u8_i16_e64 v5, s105 ; encoding: [0x05,0x00,0xe2,0xd5,0x69,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, s105 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe2,0xd5,0x69,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, vcc_lo -// GFX1250: v_sat_pk_u8_i16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x6a,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, vcc_lo +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x6a,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, vcc_hi -// GFX1250: v_sat_pk_u8_i16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x6b,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, vcc_hi +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x6b,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, ttmp15 -// GFX1250: v_sat_pk_u8_i16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe2,0xd5,0x7b,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, ttmp15 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe2,0xd5,0x7b,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, m0 -// GFX1250: v_sat_pk_u8_i16_e64 v5, m0 ; encoding: [0x05,0x00,0xe2,0xd5,0x7d,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, m0 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe2,0xd5,0x7d,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, exec_lo -// GFX1250: v_sat_pk_u8_i16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x7e,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, exec_lo +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe2,0xd5,0x7e,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, exec_hi -// GFX1250: v_sat_pk_u8_i16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x7f,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, exec_hi +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe2,0xd5,0x7f,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, null -// GFX1250: v_sat_pk_u8_i16_e64 v5, null ; encoding: [0x05,0x00,0xe2,0xd5,0x7c,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, null +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, null ; encoding: [0x05,0x00,0xe2,0xd5,0x7c,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, -1 -// GFX1250: v_sat_pk_u8_i16_e64 v5, -1 ; encoding: [0x05,0x00,0xe2,0xd5,0xc1,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, -1 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe2,0xd5,0xc1,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, 0.5 -// GFX1250: v_sat_pk_u8_i16_e64 v5, 0.5 ; encoding: [0x05,0x00,0xe2,0xd5,0xf0,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, 0.5 +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, 0.5 ; encoding: [0x05,0x00,0xe2,0xd5,0xf0,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v5, src_scc -// GFX1250: v_sat_pk_u8_i16_e64 v5, src_scc ; encoding: [0x05,0x00,0xe2,0xd5,0xfd,0x00,0x00,0x00] +v_sat_pk_u8_i16_e64 v5.l, src_scc +// GFX1250: v_sat_pk_u8_i16_e64 v5.l, src_scc ; encoding: [0x05,0x00,0xe2,0xd5,0xfd,0x00,0x00,0x00] -v_sat_pk_u8_i16_e64 v255, 0xfe0b -// GFX1250: v_sat_pk_u8_i16_e64 v255, 0xfe0b ; encoding: [0xff,0x00,0xe2,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] +v_sat_pk_u8_i16_e64 v255.l, 0xfe0b +// GFX1250: v_sat_pk_u8_i16_e64 v255.l, 0xfe0b ; encoding: [0xff,0x00,0xe2,0xd5,0xff,0x00,0x00,0x00,0x0b,0xfe,0x00,0x00] v_sat_pk_u8_i16 v128.l, v1 // GFX1250: v_sat_pk_u8_i16_e64 v128.l, v1 ; encoding: [0x80,0x00,0xe2,0xd5,0x01,0x01,0x00,0x00] @@ -3463,50 +3463,50 @@ v_sat_pk_u8_i16 v128.l, v1 v_sat_pk_u8_i16 v128.h, v1 // GFX1250: v_sat_pk_u8_i16_e64 v128.h, v1 op_sel:[0,1] ; encoding: [0x80,0x40,0xe2,0xd5,0x01,0x01,0x00,0x00] -v_sin_f16_e64 v5, v1 -// GFX1250: v_sin_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x01,0x00,0x00] +v_sin_f16_e64 v5.l, v1.l +// GFX1250: v_sin_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x01,0x00,0x00] -v_sin_f16_e64 v5, v255 -// GFX1250: v_sin_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xe0,0xd5,0xff,0x01,0x00,0x00] +v_sin_f16_e64 v5.l, v255.l +// GFX1250: v_sin_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xe0,0xd5,0xff,0x01,0x00,0x00] -v_sin_f16_e64 v5, s1 -// GFX1250: v_sin_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, s1 +// GFX1250: v_sin_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xe0,0xd5,0x01,0x00,0x00,0x00] -v_sin_f16_e64 v5, s105 -// GFX1250: v_sin_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xe0,0xd5,0x69,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, s105 +// GFX1250: v_sin_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xe0,0xd5,0x69,0x00,0x00,0x00] -v_sin_f16_e64 v5, vcc_lo -// GFX1250: v_sin_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x6a,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, vcc_lo +// GFX1250: v_sin_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x6a,0x00,0x00,0x00] -v_sin_f16_e64 v5, vcc_hi -// GFX1250: v_sin_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x6b,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, vcc_hi +// GFX1250: v_sin_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x6b,0x00,0x00,0x00] -v_sin_f16_e64 v5, ttmp15 -// GFX1250: v_sin_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xe0,0xd5,0x7b,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, ttmp15 +// GFX1250: v_sin_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xe0,0xd5,0x7b,0x00,0x00,0x00] -v_sin_f16_e64 v5, m0 -// GFX1250: v_sin_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xe0,0xd5,0x7d,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, m0 +// GFX1250: v_sin_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xe0,0xd5,0x7d,0x00,0x00,0x00] -v_sin_f16_e64 v5, exec_lo -// GFX1250: v_sin_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x7e,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, exec_lo +// GFX1250: v_sin_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xe0,0xd5,0x7e,0x00,0x00,0x00] -v_sin_f16_e64 v5, exec_hi -// GFX1250: v_sin_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x7f,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, exec_hi +// GFX1250: v_sin_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xe0,0xd5,0x7f,0x00,0x00,0x00] -v_sin_f16_e64 v5, null -// GFX1250: v_sin_f16_e64 v5, null ; encoding: [0x05,0x00,0xe0,0xd5,0x7c,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, null +// GFX1250: v_sin_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xe0,0xd5,0x7c,0x00,0x00,0x00] -v_sin_f16_e64 v5, -1 -// GFX1250: v_sin_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xe0,0xd5,0xc1,0x00,0x00,0x00] +v_sin_f16_e64 v5.l, -1 +// GFX1250: v_sin_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xe0,0xd5,0xc1,0x00,0x00,0x00] -v_sin_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_sin_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xe0,0xd5,0xf0,0x00,0x00,0x08] +v_sin_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sin_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xe0,0xd5,0xf0,0x00,0x00,0x08] -v_sin_f16_e64 v5, src_scc mul:4 -// GFX1250: v_sin_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xe0,0xd5,0xfd,0x00,0x00,0x10] +v_sin_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sin_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xe0,0xd5,0xfd,0x00,0x00,0x10] -v_sin_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_sin_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe0,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_sin_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_sin_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xe0,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_sin_f16 v1.h, v128.l // GFX1250: v_sin_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xe0,0xd5,0x80,0x01,0x00,0x00] @@ -3559,50 +3559,50 @@ v_sin_f32_e64 v5, src_scc mul:4 v_sin_f32_e64 v255, -|0xaf123456| clamp div:2 // GFX1250: v_sin_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0xb5,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] -v_sqrt_f16_e64 v5, v1 -// GFX1250: v_sqrt_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x01,0x00,0x00] +v_sqrt_f16_e64 v5.l, v1.l +// GFX1250: v_sqrt_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x01,0x00,0x00] -v_sqrt_f16_e64 v5, v255 -// GFX1250: v_sqrt_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xd5,0xd5,0xff,0x01,0x00,0x00] +v_sqrt_f16_e64 v5.l, v255.l +// GFX1250: v_sqrt_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xd5,0xd5,0xff,0x01,0x00,0x00] -v_sqrt_f16_e64 v5, s1 -// GFX1250: v_sqrt_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, s1 +// GFX1250: v_sqrt_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xd5,0xd5,0x01,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, s105 -// GFX1250: v_sqrt_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xd5,0xd5,0x69,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, s105 +// GFX1250: v_sqrt_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xd5,0xd5,0x69,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, vcc_lo -// GFX1250: v_sqrt_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x6a,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, vcc_lo +// GFX1250: v_sqrt_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x6a,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, vcc_hi -// GFX1250: v_sqrt_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x6b,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, vcc_hi +// GFX1250: v_sqrt_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x6b,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, ttmp15 -// GFX1250: v_sqrt_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xd5,0xd5,0x7b,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, ttmp15 +// GFX1250: v_sqrt_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xd5,0xd5,0x7b,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, m0 -// GFX1250: v_sqrt_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xd5,0xd5,0x7d,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, m0 +// GFX1250: v_sqrt_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xd5,0xd5,0x7d,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, exec_lo -// GFX1250: v_sqrt_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x7e,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, exec_lo +// GFX1250: v_sqrt_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xd5,0xd5,0x7e,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, exec_hi -// GFX1250: v_sqrt_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x7f,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, exec_hi +// GFX1250: v_sqrt_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xd5,0xd5,0x7f,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, null -// GFX1250: v_sqrt_f16_e64 v5, null ; encoding: [0x05,0x00,0xd5,0xd5,0x7c,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, null +// GFX1250: v_sqrt_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xd5,0xd5,0x7c,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, -1 -// GFX1250: v_sqrt_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xd5,0xd5,0xc1,0x00,0x00,0x00] +v_sqrt_f16_e64 v5.l, -1 +// GFX1250: v_sqrt_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xd5,0xd5,0xc1,0x00,0x00,0x00] -v_sqrt_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_sqrt_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xd5,0xd5,0xf0,0x00,0x00,0x08] +v_sqrt_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sqrt_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xd5,0xd5,0xf0,0x00,0x00,0x08] -v_sqrt_f16_e64 v5, src_scc mul:4 -// GFX1250: v_sqrt_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xd5,0xd5,0xfd,0x00,0x00,0x10] +v_sqrt_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sqrt_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xd5,0xd5,0xfd,0x00,0x00,0x10] -v_sqrt_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_sqrt_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd5,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_sqrt_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_sqrt_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xd5,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_sqrt_f16 v1.h, v128.l // GFX1250: v_sqrt_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xd5,0xd5,0x80,0x01,0x00,0x00] @@ -3691,50 +3691,50 @@ v_sqrt_f64_e64 v[6:7], -|src_scc| mul:4 v_sqrt_f64_e64 v[254:255], 0xaf123456 clamp div:2 // GFX1250: v_sqrt_f64_e64 v[254:255], 0xaf123456 clamp div:2 ; encoding: [0xfe,0x80,0xb4,0xd5,0xff,0x00,0x00,0x18,0x56,0x34,0x12,0xaf] -v_trunc_f16_e64 v5, v1 -// GFX1250: v_trunc_f16_e64 v5, v1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x01,0x00,0x00] +v_trunc_f16_e64 v5.l, v1.l +// GFX1250: v_trunc_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x01,0x00,0x00] -v_trunc_f16_e64 v5, v255 -// GFX1250: v_trunc_f16_e64 v5, v255 ; encoding: [0x05,0x00,0xdd,0xd5,0xff,0x01,0x00,0x00] +v_trunc_f16_e64 v5.l, v255.l +// GFX1250: v_trunc_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xdd,0xd5,0xff,0x01,0x00,0x00] -v_trunc_f16_e64 v5, s1 -// GFX1250: v_trunc_f16_e64 v5, s1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, s1 +// GFX1250: v_trunc_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xdd,0xd5,0x01,0x00,0x00,0x00] -v_trunc_f16_e64 v5, s105 -// GFX1250: v_trunc_f16_e64 v5, s105 ; encoding: [0x05,0x00,0xdd,0xd5,0x69,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, s105 +// GFX1250: v_trunc_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xdd,0xd5,0x69,0x00,0x00,0x00] -v_trunc_f16_e64 v5, vcc_lo -// GFX1250: v_trunc_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x6a,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, vcc_lo +// GFX1250: v_trunc_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x6a,0x00,0x00,0x00] -v_trunc_f16_e64 v5, vcc_hi -// GFX1250: v_trunc_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x6b,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, vcc_hi +// GFX1250: v_trunc_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x6b,0x00,0x00,0x00] -v_trunc_f16_e64 v5, ttmp15 -// GFX1250: v_trunc_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xdd,0xd5,0x7b,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, ttmp15 +// GFX1250: v_trunc_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xdd,0xd5,0x7b,0x00,0x00,0x00] -v_trunc_f16_e64 v5, m0 -// GFX1250: v_trunc_f16_e64 v5, m0 ; encoding: [0x05,0x00,0xdd,0xd5,0x7d,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, m0 +// GFX1250: v_trunc_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xdd,0xd5,0x7d,0x00,0x00,0x00] -v_trunc_f16_e64 v5, exec_lo -// GFX1250: v_trunc_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x7e,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, exec_lo +// GFX1250: v_trunc_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xdd,0xd5,0x7e,0x00,0x00,0x00] -v_trunc_f16_e64 v5, exec_hi -// GFX1250: v_trunc_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x7f,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, exec_hi +// GFX1250: v_trunc_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xdd,0xd5,0x7f,0x00,0x00,0x00] -v_trunc_f16_e64 v5, null -// GFX1250: v_trunc_f16_e64 v5, null ; encoding: [0x05,0x00,0xdd,0xd5,0x7c,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, null +// GFX1250: v_trunc_f16_e64 v5.l, null ; encoding: [0x05,0x00,0xdd,0xd5,0x7c,0x00,0x00,0x00] -v_trunc_f16_e64 v5, -1 -// GFX1250: v_trunc_f16_e64 v5, -1 ; encoding: [0x05,0x00,0xdd,0xd5,0xc1,0x00,0x00,0x00] +v_trunc_f16_e64 v5.l, -1 +// GFX1250: v_trunc_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xdd,0xd5,0xc1,0x00,0x00,0x00] -v_trunc_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_trunc_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xdd,0xd5,0xf0,0x00,0x00,0x08] +v_trunc_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_trunc_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xdd,0xd5,0xf0,0x00,0x00,0x08] -v_trunc_f16_e64 v5, src_scc mul:4 -// GFX1250: v_trunc_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xdd,0xd5,0xfd,0x00,0x00,0x10] +v_trunc_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_trunc_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xdd,0xd5,0xfd,0x00,0x00,0x10] -v_trunc_f16_e64 v255, -|0xfe0b| clamp div:2 -// GFX1250: v_trunc_f16_e64 v255, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdd,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] +v_trunc_f16_e64 v255.l, -|0xfe0b| clamp div:2 +// GFX1250: v_trunc_f16_e64 v255.l, -|0xfe0b| clamp div:2 ; encoding: [0xff,0x81,0xdd,0xd5,0xff,0x00,0x00,0x38,0x0b,0xfe,0x00,0x00] v_trunc_f16 v1.h, v128.l // GFX1250: v_trunc_f16_e64 v1.h, v128.l op_sel:[0,1] ; encoding: [0x01,0x40,0xdd,0xd5,0x80,0x01,0x00,0x00] @@ -3868,98 +3868,98 @@ v_tanh_f32_e64 v5, src_scc mul:4 v_tanh_f32_e64 v255, -|0xaf123456| clamp div:2 // GFX1250: v_tanh_f32_e64 v255, -|0xaf123456| clamp div:2 ; encoding: [0xff,0x81,0x9e,0xd5,0xff,0x00,0x00,0x38,0x56,0x34,0x12,0xaf] -v_tanh_f16_e64 v5, v1 -// GFX1250: v_tanh_f16_e64 v5, v1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x01,0x00,0x00] +v_tanh_f16_e64 v5.l, v1.l +// GFX1250: v_tanh_f16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x01,0x00,0x00] -v_tanh_f16_e64 v5, v255 -// GFX1250: v_tanh_f16_e64 v5, v255 ; encoding: [0x05,0x00,0x9f,0xd5,0xff,0x01,0x00,0x00] +v_tanh_f16_e64 v5.l, v255.l +// GFX1250: v_tanh_f16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0x9f,0xd5,0xff,0x01,0x00,0x00] -v_tanh_f16_e64 v5, s1 -// GFX1250: v_tanh_f16_e64 v5, s1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, s1 +// GFX1250: v_tanh_f16_e64 v5.l, s1 ; encoding: [0x05,0x00,0x9f,0xd5,0x01,0x00,0x00,0x00] -v_tanh_f16_e64 v5, s105 -// GFX1250: v_tanh_f16_e64 v5, s105 ; encoding: [0x05,0x00,0x9f,0xd5,0x69,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, s105 +// GFX1250: v_tanh_f16_e64 v5.l, s105 ; encoding: [0x05,0x00,0x9f,0xd5,0x69,0x00,0x00,0x00] -v_tanh_f16_e64 v5, vcc_lo -// GFX1250: v_tanh_f16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x6a,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, vcc_lo +// GFX1250: v_tanh_f16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x6a,0x00,0x00,0x00] -v_tanh_f16_e64 v5, vcc_hi -// GFX1250: v_tanh_f16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x6b,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, vcc_hi +// GFX1250: v_tanh_f16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x6b,0x00,0x00,0x00] -v_tanh_f16_e64 v5, ttmp15 -// GFX1250: v_tanh_f16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0x9f,0xd5,0x7b,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, ttmp15 +// GFX1250: v_tanh_f16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0x9f,0xd5,0x7b,0x00,0x00,0x00] -v_tanh_f16_e64 v5, m0 -// GFX1250: v_tanh_f16_e64 v5, m0 ; encoding: [0x05,0x00,0x9f,0xd5,0x7d,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, m0 +// GFX1250: v_tanh_f16_e64 v5.l, m0 ; encoding: [0x05,0x00,0x9f,0xd5,0x7d,0x00,0x00,0x00] -v_tanh_f16_e64 v5, exec_lo -// GFX1250: v_tanh_f16_e64 v5, exec_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x7e,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, exec_lo +// GFX1250: v_tanh_f16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0x9f,0xd5,0x7e,0x00,0x00,0x00] -v_tanh_f16_e64 v5, exec_hi -// GFX1250: v_tanh_f16_e64 v5, exec_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x7f,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, exec_hi +// GFX1250: v_tanh_f16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0x9f,0xd5,0x7f,0x00,0x00,0x00] -v_tanh_f16_e64 v5, null -// GFX1250: v_tanh_f16_e64 v5, null ; encoding: [0x05,0x00,0x9f,0xd5,0x7c,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, null +// GFX1250: v_tanh_f16_e64 v5.l, null ; encoding: [0x05,0x00,0x9f,0xd5,0x7c,0x00,0x00,0x00] -v_tanh_f16_e64 v5, -1 -// GFX1250: v_tanh_f16_e64 v5, -1 ; encoding: [0x05,0x00,0x9f,0xd5,0xc1,0x00,0x00,0x00] +v_tanh_f16_e64 v5.l, -1 +// GFX1250: v_tanh_f16_e64 v5.l, -1 ; encoding: [0x05,0x00,0x9f,0xd5,0xc1,0x00,0x00,0x00] -v_tanh_f16_e64 v5, 0.5 mul:2 -// GFX1250: v_tanh_f16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0x9f,0xd5,0xf0,0x00,0x00,0x08] +v_tanh_f16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_tanh_f16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0x9f,0xd5,0xf0,0x00,0x00,0x08] -v_tanh_f16_e64 v5, src_scc mul:4 -// GFX1250: v_tanh_f16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0x9f,0xd5,0xfd,0x00,0x00,0x10] +v_tanh_f16_e64 v5.l, src_scc mul:4 +// GFX1250: v_tanh_f16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0x9f,0xd5,0xfd,0x00,0x00,0x10] -v_tanh_f16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_tanh_f16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0x9f,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_tanh_f16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_tanh_f16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0x9f,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_tanh_f16 v5.l, v128.h // GFX1250: v_tanh_f16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0x9f,0xd5,0x80,0x01,0x00,0x00] -v_tanh_bf16_e64 v5, v1 -// GFX1250: v_tanh_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x01,0x00,0x00] +v_tanh_bf16_e64 v5.l, v1.l +// GFX1250: v_tanh_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x01,0x00,0x00] -v_tanh_bf16_e64 v5, v255 -// GFX1250: v_tanh_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xca,0xd5,0xff,0x01,0x00,0x00] +v_tanh_bf16_e64 v5.l, v255.l +// GFX1250: v_tanh_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xca,0xd5,0xff,0x01,0x00,0x00] -v_tanh_bf16_e64 v5, s1 -// GFX1250: v_tanh_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, s1 +// GFX1250: v_tanh_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xca,0xd5,0x01,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, s105 -// GFX1250: v_tanh_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, s105 +// GFX1250: v_tanh_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, vcc_lo -// GFX1250: v_tanh_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xca,0xd5,0x6a,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, vcc_lo +// GFX1250: v_tanh_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xca,0xd5,0x6a,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, vcc_hi -// GFX1250: v_tanh_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xca,0xd5,0x6b,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, vcc_hi +// GFX1250: v_tanh_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xca,0xd5,0x6b,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, ttmp15 -// GFX1250: v_tanh_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, ttmp15 +// GFX1250: v_tanh_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, m0 -// GFX1250: v_tanh_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xca,0xd5,0x7d,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, m0 +// GFX1250: v_tanh_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xca,0xd5,0x7d,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, exec_lo -// GFX1250: v_tanh_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xca,0xd5,0x7e,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, exec_lo +// GFX1250: v_tanh_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xca,0xd5,0x7e,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, exec_hi -// GFX1250: v_tanh_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, exec_hi +// GFX1250: v_tanh_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, null -// GFX1250: v_tanh_bf16_e64 v5, null ; encoding: [0x05,0x00,0xca,0xd5,0x7c,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, null +// GFX1250: v_tanh_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xca,0xd5,0x7c,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, -1 -// GFX1250: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00] +v_tanh_bf16_e64 v5.l, -1 +// GFX1250: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00] -v_tanh_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08] +v_tanh_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08] -v_tanh_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10] +v_tanh_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10] -v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_tanh_bf16 v5.l, v128.h // GFX1250: v_tanh_bf16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0xca,0xd5,0x80,0x01,0x00,0x00] @@ -4000,347 +4000,347 @@ v_prng_b32_e64 v5, null v_prng_b32_e64 v5, -1 // GFX1250: v_prng_b32_e64 v5, -1 ; encoding: [0x05,0x00,0xcb,0xd5,0xc1,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, v1 -// GFX1250: v_rcp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x01,0x00,0x00] +v_rcp_bf16_e64 v5.l, v1.l +// GFX1250: v_rcp_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x01,0x00,0x00] -v_rcp_bf16_e64 v5, v255 -// GFX1250: v_rcp_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xf9,0xd5,0xff,0x01,0x00,0x00] +v_rcp_bf16_e64 v5.l, v255.l +// GFX1250: v_rcp_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xf9,0xd5,0xff,0x01,0x00,0x00] -v_rcp_bf16_e64 v5, s1 -// GFX1250: v_rcp_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, s1 +// GFX1250: v_rcp_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xf9,0xd5,0x01,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, s105 -// GFX1250: v_rcp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, s105 +// GFX1250: v_rcp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, vcc_lo -// GFX1250: v_rcp_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x6a,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, vcc_lo +// GFX1250: v_rcp_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x6a,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, vcc_hi -// GFX1250: v_rcp_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x6b,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, vcc_hi +// GFX1250: v_rcp_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x6b,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, ttmp15 -// GFX1250: v_rcp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, ttmp15 +// GFX1250: v_rcp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, m0 -// GFX1250: v_rcp_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xf9,0xd5,0x7d,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, m0 +// GFX1250: v_rcp_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xf9,0xd5,0x7d,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, exec_lo -// GFX1250: v_rcp_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x7e,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, exec_lo +// GFX1250: v_rcp_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xf9,0xd5,0x7e,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, exec_hi -// GFX1250: v_rcp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, exec_hi +// GFX1250: v_rcp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, null -// GFX1250: v_rcp_bf16_e64 v5, null ; encoding: [0x05,0x00,0xf9,0xd5,0x7c,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, null +// GFX1250: v_rcp_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xf9,0xd5,0x7c,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, -1 -// GFX1250: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00] +v_rcp_bf16_e64 v5.l, -1 +// GFX1250: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00] -v_rcp_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08] +v_rcp_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08] -v_rcp_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10] +v_rcp_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10] -v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_rcp_bf16 v5.h, v128.h // GFX1250: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00] -v_sqrt_bf16_e64 v5, v1 -// GFX1250: v_sqrt_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00] +v_sqrt_bf16_e64 v5.l, v1.l +// GFX1250: v_sqrt_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00] -v_sqrt_bf16_e64 v5, v255 -// GFX1250: v_sqrt_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfa,0xd5,0xff,0x01,0x00,0x00] +v_sqrt_bf16_e64 v5.l, v255.l +// GFX1250: v_sqrt_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfa,0xd5,0xff,0x01,0x00,0x00] -v_sqrt_bf16_e64 v5, s1 -// GFX1250: v_sqrt_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, s1 +// GFX1250: v_sqrt_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, s105 -// GFX1250: v_sqrt_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, s105 +// GFX1250: v_sqrt_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, vcc_lo -// GFX1250: v_sqrt_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x6a,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, vcc_lo +// GFX1250: v_sqrt_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x6a,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, vcc_hi -// GFX1250: v_sqrt_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x6b,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, vcc_hi +// GFX1250: v_sqrt_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x6b,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, ttmp15 -// GFX1250: v_sqrt_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, ttmp15 +// GFX1250: v_sqrt_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, m0 -// GFX1250: v_sqrt_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfa,0xd5,0x7d,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, m0 +// GFX1250: v_sqrt_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfa,0xd5,0x7d,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, exec_lo -// GFX1250: v_sqrt_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x7e,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, exec_lo +// GFX1250: v_sqrt_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfa,0xd5,0x7e,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, exec_hi -// GFX1250: v_sqrt_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, exec_hi +// GFX1250: v_sqrt_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, null -// GFX1250: v_sqrt_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfa,0xd5,0x7c,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, null +// GFX1250: v_sqrt_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfa,0xd5,0x7c,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, -1 -// GFX1250: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00] +v_sqrt_bf16_e64 v5.l, -1 +// GFX1250: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00] -v_sqrt_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08] +v_sqrt_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08] -v_sqrt_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10] +v_sqrt_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10] -v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_sqrt_bf16 v5.h, v128.h // GFX1250: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00] -v_rsq_bf16_e64 v5, v1 -// GFX1250: v_rsq_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00] +v_rsq_bf16_e64 v5.l, v1.l +// GFX1250: v_rsq_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00] -v_rsq_bf16_e64 v5, v255 -// GFX1250: v_rsq_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfb,0xd5,0xff,0x01,0x00,0x00] +v_rsq_bf16_e64 v5.l, v255.l +// GFX1250: v_rsq_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfb,0xd5,0xff,0x01,0x00,0x00] -v_rsq_bf16_e64 v5, s1 -// GFX1250: v_rsq_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, s1 +// GFX1250: v_rsq_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, s105 -// GFX1250: v_rsq_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, s105 +// GFX1250: v_rsq_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, vcc_lo -// GFX1250: v_rsq_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x6a,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, vcc_lo +// GFX1250: v_rsq_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x6a,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, vcc_hi -// GFX1250: v_rsq_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x6b,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, vcc_hi +// GFX1250: v_rsq_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x6b,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, ttmp15 -// GFX1250: v_rsq_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, ttmp15 +// GFX1250: v_rsq_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, m0 -// GFX1250: v_rsq_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfb,0xd5,0x7d,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, m0 +// GFX1250: v_rsq_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfb,0xd5,0x7d,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, exec_lo -// GFX1250: v_rsq_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x7e,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, exec_lo +// GFX1250: v_rsq_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfb,0xd5,0x7e,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, exec_hi -// GFX1250: v_rsq_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, exec_hi +// GFX1250: v_rsq_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, null -// GFX1250: v_rsq_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfb,0xd5,0x7c,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, null +// GFX1250: v_rsq_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfb,0xd5,0x7c,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, -1 -// GFX1250: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00] +v_rsq_bf16_e64 v5.l, -1 +// GFX1250: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00] -v_rsq_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08] +v_rsq_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08] -v_rsq_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10] +v_rsq_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10] -v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_rsq_bf16 v5.h, v128.h // GFX1250: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00] -v_log_bf16_e64 v5, v1 -// GFX1250: v_log_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00] +v_log_bf16_e64 v5.l, v1.l +// GFX1250: v_log_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00] -v_log_bf16_e64 v5, v255 -// GFX1250: v_log_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfc,0xd5,0xff,0x01,0x00,0x00] +v_log_bf16_e64 v5.l, v255.l +// GFX1250: v_log_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfc,0xd5,0xff,0x01,0x00,0x00] -v_log_bf16_e64 v5, s1 -// GFX1250: v_log_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, s1 +// GFX1250: v_log_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x00,0x00,0x00] -v_log_bf16_e64 v5, s105 -// GFX1250: v_log_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, s105 +// GFX1250: v_log_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00] -v_log_bf16_e64 v5, vcc_lo -// GFX1250: v_log_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x6a,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, vcc_lo +// GFX1250: v_log_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x6a,0x00,0x00,0x00] -v_log_bf16_e64 v5, vcc_hi -// GFX1250: v_log_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x6b,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, vcc_hi +// GFX1250: v_log_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x6b,0x00,0x00,0x00] -v_log_bf16_e64 v5, ttmp15 -// GFX1250: v_log_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, ttmp15 +// GFX1250: v_log_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00] -v_log_bf16_e64 v5, m0 -// GFX1250: v_log_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfc,0xd5,0x7d,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, m0 +// GFX1250: v_log_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfc,0xd5,0x7d,0x00,0x00,0x00] -v_log_bf16_e64 v5, exec_lo -// GFX1250: v_log_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x7e,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, exec_lo +// GFX1250: v_log_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfc,0xd5,0x7e,0x00,0x00,0x00] -v_log_bf16_e64 v5, exec_hi -// GFX1250: v_log_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, exec_hi +// GFX1250: v_log_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00] -v_log_bf16_e64 v5, null -// GFX1250: v_log_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfc,0xd5,0x7c,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, null +// GFX1250: v_log_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfc,0xd5,0x7c,0x00,0x00,0x00] -v_log_bf16_e64 v5, -1 -// GFX1250: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00] +v_log_bf16_e64 v5.l, -1 +// GFX1250: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00] -v_log_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08] +v_log_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08] -v_log_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10] +v_log_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10] -v_log_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_log_bf16 v5.h, v128.h // GFX1250: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00] -v_exp_bf16_e64 v5, v1 -// GFX1250: v_exp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00] +v_exp_bf16_e64 v5.l, v1.l +// GFX1250: v_exp_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00] -v_exp_bf16_e64 v5, v255 -// GFX1250: v_exp_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfd,0xd5,0xff,0x01,0x00,0x00] +v_exp_bf16_e64 v5.l, v255.l +// GFX1250: v_exp_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfd,0xd5,0xff,0x01,0x00,0x00] -v_exp_bf16_e64 v5, s1 -// GFX1250: v_exp_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, s1 +// GFX1250: v_exp_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x00,0x00,0x00] -v_exp_bf16_e64 v5, s105 -// GFX1250: v_exp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, s105 +// GFX1250: v_exp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00] -v_exp_bf16_e64 v5, vcc_lo -// GFX1250: v_exp_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x6a,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, vcc_lo +// GFX1250: v_exp_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x6a,0x00,0x00,0x00] -v_exp_bf16_e64 v5, vcc_hi -// GFX1250: v_exp_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x6b,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, vcc_hi +// GFX1250: v_exp_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x6b,0x00,0x00,0x00] -v_exp_bf16_e64 v5, ttmp15 -// GFX1250: v_exp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, ttmp15 +// GFX1250: v_exp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00] -v_exp_bf16_e64 v5, m0 -// GFX1250: v_exp_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfd,0xd5,0x7d,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, m0 +// GFX1250: v_exp_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfd,0xd5,0x7d,0x00,0x00,0x00] -v_exp_bf16_e64 v5, exec_lo -// GFX1250: v_exp_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x7e,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, exec_lo +// GFX1250: v_exp_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfd,0xd5,0x7e,0x00,0x00,0x00] -v_exp_bf16_e64 v5, exec_hi -// GFX1250: v_exp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, exec_hi +// GFX1250: v_exp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00] -v_exp_bf16_e64 v5, null -// GFX1250: v_exp_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfd,0xd5,0x7c,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, null +// GFX1250: v_exp_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfd,0xd5,0x7c,0x00,0x00,0x00] -v_exp_bf16_e64 v5, -1 -// GFX1250: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00] +v_exp_bf16_e64 v5.l, -1 +// GFX1250: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00] -v_exp_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08] +v_exp_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08] -v_exp_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10] +v_exp_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10] -v_exp_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_exp_bf16 v5.h, v128.h // GFX1250: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00] -v_sin_bf16_e64 v5, v1 -// GFX1250: v_sin_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00] +v_sin_bf16_e64 v5.l, v1.l +// GFX1250: v_sin_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00] -v_sin_bf16_e64 v5, v255 -// GFX1250: v_sin_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xfe,0xd5,0xff,0x01,0x00,0x00] +v_sin_bf16_e64 v5.l, v255.l +// GFX1250: v_sin_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xfe,0xd5,0xff,0x01,0x00,0x00] -v_sin_bf16_e64 v5, s1 -// GFX1250: v_sin_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, s1 +// GFX1250: v_sin_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x00,0x00,0x00] -v_sin_bf16_e64 v5, s105 -// GFX1250: v_sin_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, s105 +// GFX1250: v_sin_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00] -v_sin_bf16_e64 v5, vcc_lo -// GFX1250: v_sin_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x6a,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, vcc_lo +// GFX1250: v_sin_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x6a,0x00,0x00,0x00] -v_sin_bf16_e64 v5, vcc_hi -// GFX1250: v_sin_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x6b,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, vcc_hi +// GFX1250: v_sin_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x6b,0x00,0x00,0x00] -v_sin_bf16_e64 v5, ttmp15 -// GFX1250: v_sin_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, ttmp15 +// GFX1250: v_sin_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00] -v_sin_bf16_e64 v5, m0 -// GFX1250: v_sin_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xfe,0xd5,0x7d,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, m0 +// GFX1250: v_sin_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xfe,0xd5,0x7d,0x00,0x00,0x00] -v_sin_bf16_e64 v5, exec_lo -// GFX1250: v_sin_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x7e,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, exec_lo +// GFX1250: v_sin_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xfe,0xd5,0x7e,0x00,0x00,0x00] -v_sin_bf16_e64 v5, exec_hi -// GFX1250: v_sin_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, exec_hi +// GFX1250: v_sin_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00] -v_sin_bf16_e64 v5, null -// GFX1250: v_sin_bf16_e64 v5, null ; encoding: [0x05,0x00,0xfe,0xd5,0x7c,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, null +// GFX1250: v_sin_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xfe,0xd5,0x7c,0x00,0x00,0x00] -v_sin_bf16_e64 v5, -1 -// GFX1250: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00] +v_sin_bf16_e64 v5.l, -1 +// GFX1250: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00] -v_sin_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08] +v_sin_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08] -v_sin_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10] +v_sin_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10] -v_sin_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_sin_bf16 v5.h, v128.h // GFX1250: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00] -v_cos_bf16_e64 v5, v1 -// GFX1250: v_cos_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00] +v_cos_bf16_e64 v5.l, v1.l +// GFX1250: v_cos_bf16_e64 v5.l, v1.l ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00] -v_cos_bf16_e64 v5, v255 -// GFX1250: v_cos_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xff,0xd5,0xff,0x01,0x00,0x00] +v_cos_bf16_e64 v5.l, v255.l +// GFX1250: v_cos_bf16_e64 v5.l, v255.l ; encoding: [0x05,0x00,0xff,0xd5,0xff,0x01,0x00,0x00] -v_cos_bf16_e64 v5, s1 -// GFX1250: v_cos_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, s1 +// GFX1250: v_cos_bf16_e64 v5.l, s1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x00,0x00,0x00] -v_cos_bf16_e64 v5, s105 -// GFX1250: v_cos_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, s105 +// GFX1250: v_cos_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00] -v_cos_bf16_e64 v5, vcc_lo -// GFX1250: v_cos_bf16_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xff,0xd5,0x6a,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, vcc_lo +// GFX1250: v_cos_bf16_e64 v5.l, vcc_lo ; encoding: [0x05,0x00,0xff,0xd5,0x6a,0x00,0x00,0x00] -v_cos_bf16_e64 v5, vcc_hi -// GFX1250: v_cos_bf16_e64 v5, vcc_hi ; encoding: [0x05,0x00,0xff,0xd5,0x6b,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, vcc_hi +// GFX1250: v_cos_bf16_e64 v5.l, vcc_hi ; encoding: [0x05,0x00,0xff,0xd5,0x6b,0x00,0x00,0x00] -v_cos_bf16_e64 v5, ttmp15 -// GFX1250: v_cos_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, ttmp15 +// GFX1250: v_cos_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00] -v_cos_bf16_e64 v5, m0 -// GFX1250: v_cos_bf16_e64 v5, m0 ; encoding: [0x05,0x00,0xff,0xd5,0x7d,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, m0 +// GFX1250: v_cos_bf16_e64 v5.l, m0 ; encoding: [0x05,0x00,0xff,0xd5,0x7d,0x00,0x00,0x00] -v_cos_bf16_e64 v5, exec_lo -// GFX1250: v_cos_bf16_e64 v5, exec_lo ; encoding: [0x05,0x00,0xff,0xd5,0x7e,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, exec_lo +// GFX1250: v_cos_bf16_e64 v5.l, exec_lo ; encoding: [0x05,0x00,0xff,0xd5,0x7e,0x00,0x00,0x00] -v_cos_bf16_e64 v5, exec_hi -// GFX1250: v_cos_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, exec_hi +// GFX1250: v_cos_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00] -v_cos_bf16_e64 v5, null -// GFX1250: v_cos_bf16_e64 v5, null ; encoding: [0x05,0x00,0xff,0xd5,0x7c,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, null +// GFX1250: v_cos_bf16_e64 v5.l, null ; encoding: [0x05,0x00,0xff,0xd5,0x7c,0x00,0x00,0x00] -v_cos_bf16_e64 v5, -1 -// GFX1250: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00] +v_cos_bf16_e64 v5.l, -1 +// GFX1250: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00] -v_cos_bf16_e64 v5, 0.5 mul:2 -// GFX1250: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08] +v_cos_bf16_e64 v5.l, 0.5 mul:2 +// GFX1250: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08] -v_cos_bf16_e64 v5, src_scc mul:4 -// GFX1250: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10] +v_cos_bf16_e64 v5.l, src_scc mul:4 +// GFX1250: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10] -v_cos_bf16_e64 v255, -|0x8000| clamp div:2 -// GFX1250: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] +v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 +// GFX1250: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00] v_cos_bf16_e64 v5.h, v128.h // GFX1250: v_cos_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xff,0xd5,0x80,0x01,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v1 -// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v1.l +// GFX1250: v_cvt_f32_bf16_e64 v5, v1.l ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v255 -// GFX1250: v_cvt_f32_bf16_e64 v5, v255 ; encoding: [0x05,0x00,0xf2,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v255.l +// GFX1250: v_cvt_f32_bf16_e64 v5, v255.l ; encoding: [0x05,0x00,0xf2,0xd5,0xff,0x01,0x00,0x00] v_cvt_f32_bf16_e64 v5, s1 // GFX1250: v_cvt_f32_bf16_e64 v5, s1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x00,0x00,0x00] @@ -4372,11 +4372,11 @@ v_cvt_f32_bf16_e64 v5, null v_cvt_f32_bf16_e64 v5, -1 // GFX1250: v_cvt_f32_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf2,0xd5,0xc1,0x00,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v1 op_sel:[1] -// GFX1250: v_cvt_f32_bf16_e64 v5, v1 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v1.h op_sel:[1,0] +// GFX1250: v_cvt_f32_bf16_e64 v5, v1.h op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x01,0x00,0x00] -v_cvt_f32_bf16_e64 v5, v255 op_sel:[1] -// GFX1250: v_cvt_f32_bf16_e64 v5, v255 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0xff,0x01,0x00,0x00] +v_cvt_f32_bf16_e64 v5, v255.h op_sel:[1,0] +// GFX1250: v_cvt_f32_bf16_e64 v5, v255.h op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0xff,0x01,0x00,0x00] v_cvt_f32_bf16_e64 v5, s1 op_sel:[1] // GFX1250: v_cvt_f32_bf16_e64 v5, s1 op_sel:[1,0] ; encoding: [0x05,0x08,0xf2,0xd5,0x01,0x00,0x00,0x00] @@ -4492,32 +4492,32 @@ v_cvt_pk_f16_fp8 v1, v150 op_sel:[1] v_cvt_pk_f16_fp8 v1, s2 op_sel:[1] // GFX1250: v_cvt_pk_f16_fp8 v1, s2 op_sel:[1,0] ; encoding: [0x01,0x08,0xf5,0xd5,0x02,0x00,0x00,0x00] -v_sat_pk4_i4_i8 v150, v2 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, v2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x01,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, v2 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, v2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x01,0x00,0x00] -v_sat_pk4_i4_i8 v150, s2 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, s2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x00,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, s2 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, s2 ; encoding: [0x96,0x00,0xf3,0xd5,0x02,0x00,0x00,0x00] -v_sat_pk4_i4_i8 v150, 2 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, 2 ; encoding: [0x96,0x00,0xf3,0xd5,0x82,0x00,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, 2 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, 2 ; encoding: [0x96,0x00,0xf3,0xd5,0x82,0x00,0x00,0x00] -v_sat_pk4_i4_i8 v150, 0x1234 -// GFX1250: v_sat_pk4_i4_i8_e64 v150, 0x1234 ; encoding: [0x96,0x00,0xf3,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] +v_sat_pk4_i4_i8 v150.l, 0x1234 +// GFX1250: v_sat_pk4_i4_i8_e64 v150.l, 0x1234 ; encoding: [0x96,0x00,0xf3,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] v_sat_pk4_i4_i8 v150.h, v2 // GFX1250: v_sat_pk4_i4_i8_e64 v150.h, v2 op_sel:[0,1] ; encoding: [0x96,0x40,0xf3,0xd5,0x02,0x01,0x00,0x00] -v_sat_pk4_u4_u8 v150, v2 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, v2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x01,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, v2 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, v2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x01,0x00,0x00] -v_sat_pk4_u4_u8 v150, s2 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, s2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x00,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, s2 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, s2 ; encoding: [0x96,0x00,0xf4,0xd5,0x02,0x00,0x00,0x00] -v_sat_pk4_u4_u8 v150, 2 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, 2 ; encoding: [0x96,0x00,0xf4,0xd5,0x82,0x00,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, 2 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, 2 ; encoding: [0x96,0x00,0xf4,0xd5,0x82,0x00,0x00,0x00] -v_sat_pk4_u4_u8 v150, 0x1234 -// GFX1250: v_sat_pk4_u4_u8_e64 v150, 0x1234 ; encoding: [0x96,0x00,0xf4,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] +v_sat_pk4_u4_u8 v150.l, 0x1234 +// GFX1250: v_sat_pk4_u4_u8_e64 v150.l, 0x1234 ; encoding: [0x96,0x00,0xf4,0xd5,0xff,0x00,0x00,0x00,0x34,0x12,0x00,0x00] v_sat_pk4_u4_u8 v150.h, v2 // GFX1250: v_sat_pk4_u4_u8_e64 v150.h, v2 op_sel:[0,1] ; encoding: [0x96,0x40,0xf4,0xd5,0x02,0x01,0x00,0x00] diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s index f14705fa9143c..d1638565a386a 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s @@ -58,120 +58,120 @@ v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask // GFX1250: v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9e,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_mirror -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_f16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_tanh_f16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_tanh_f16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_tanh_f16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9f,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x9f,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] @@ -222,468 +222,468 @@ v_prng_b32_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf // GFX1250: v_prng_b32_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_mirror +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_half_mirror +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shl:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shl:15 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shr:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_shr:15 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_ror:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_ror:15 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] +v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] +v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 -// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] +v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1 +// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0] // GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xff,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[3,2,1,0] +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[0,1,2,3] +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_mirror -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_mirror +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x40,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_half_mirror -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_half_mirror +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x41,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:1 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:1 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x01,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:15 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:15 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x0f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:1 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:1 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x11,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:15 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:15 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:1 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:1 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x21,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:15 -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:15 +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x2f,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] +v_cvt_f32_bf16_e64_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16_e64_dpp v5, v128.h quad_perm:[3,2,1,0] @@ -766,24 +766,24 @@ v_cvt_pk_f16_fp8 v1, v128.h quad_perm:[0,1,2,3] // GFX1250: v_cvt_pk_f16_fp8_e64_dpp v1, v128.h op_sel:[1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x08,0xf5,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] +v_sat_pk4_i4_i8 v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 row_share:1 fi:1 -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] +v_sat_pk4_i4_i8 v150.l, v2 row_share:1 fi:1 +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v150.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf // GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.h, v2 op_sel:[0,1] quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x40,0xf3,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] +v_sat_pk4_u4_u8 v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 row_share:1 fi:1 -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] +v_sat_pk4_u4_u8 v150.l, v2 row_share:1 fi:1 +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 row_share:1 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xfa,0x00,0x00,0x00,0x02,0x51,0x05,0xff] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v150.h, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s index 0414421f0a906..6ec4d5f48f8b1 100644 --- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s @@ -18,40 +18,40 @@ v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 // GFX1250: v_tanh_f32_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9e,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_tanh_f16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_tanh_f16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_f16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_tanh_f16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_f16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x9f,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_f16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9f,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_f16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x9f,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_f16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_tanh_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] @@ -62,140 +62,140 @@ v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rcp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sqrt_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_rsq_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_log_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_exp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sin_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] +v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] +v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 -// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] +v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0 +// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cos_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0] @@ -262,8 +262,8 @@ v_cvt_f16_fp8 v128.l, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_f16_fp8_e64_dpp v128.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x80,0x00,0xf7,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] +v_cvt_f32_bf16_e64_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_cvt_f32_bf16_e64_dpp v5, v128.h dpp8:[7,6,5,4,3,2,1,0] @@ -298,24 +298,24 @@ v_cvt_pk_f16_fp8 v1, v128.h dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_cvt_pk_f16_fp8_e64_dpp v1, v128.h op_sel:[1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x08,0xf5,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_i4_i8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_i4_i8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf3,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_i4_i8 v150.h, v2 dpp8:[7,6,5,4,3,2,1,0] // GFX1250: v_sat_pk4_i4_i8_e64_dpp v150.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x40,0xf3,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf4,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x96,0x00,0xf4,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU -v_sat_pk4_u4_u8 v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 -// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] +v_sat_pk4_u4_u8 v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 +// GFX1250: v_sat_pk4_u4_u8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf4,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05] // GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU v_sat_pk4_u4_u8 v150.h, v2 dpp8:[7,6,5,4,3,2,1,0] diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s b/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s index 4e3e725a00556..819ecb866c5ae 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_sopk.s @@ -193,19 +193,19 @@ s_call_b64 vcc, 0x1234 s_call_b64 null, 0x1234 // GFX12: encoding: [0x34,0x12,0x7c,0xba] -s_getreg_b32 s0, hwreg(HW_REG_MODE) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_MODE) // GFX12: encoding: [0x01,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_STATUS) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_STATUS) // GFX12: encoding: [0x02,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_STATE_PRIV) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_STATE_PRIV) // GFX12: encoding: [0x04,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_GPR_ALLOC) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_GPR_ALLOC) // GFX12: encoding: [0x05,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_LDS_ALLOC) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_LDS_ALLOC) // GFX12: encoding: [0x06,0xf8,0x80,0xb8] s_getreg_b32 s0, hwreg(HW_REG_IB_STS) @@ -226,31 +226,31 @@ s_getreg_b32 s0, hwreg(HW_REG_PERF_SNAPSHOT_DATA1) s_getreg_b32 s0, hwreg(HW_REG_PERF_SNAPSHOT_DATA2) // GFX12: encoding: [0x10,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_PRIV) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) // GFX12: encoding: [0x11,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_USER) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) // GFX12: encoding: [0x12,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_TRAP_CTRL) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_TRAP_CTRL) // GFX12: encoding: [0x13,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_SCRATCH_BASE_LO) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO) // GFX12: encoding: [0x14,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_SCRATCH_BASE_HI) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI) // GFX12: encoding: [0x15,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_HW_ID1) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID1) // GFX12: encoding: [0x17,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_HW_ID2) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2) // GFX12: encoding: [0x18,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_DVGPR_ALLOC_LO) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_DVGPR_ALLOC_LO) // GFX12: encoding: [0x1f,0xf8,0x80,0xb8] -s_getreg_b32 s0, hwreg(HW_REG_DVGPR_ALLOC_HI) +s_getreg_b32 s0, hwreg(HW_REG_WAVE_DVGPR_ALLOC_HI) // GFX12: encoding: [0x20,0xf8,0x80,0xb8] s_getreg_b32 s0, hwreg(HW_REG_SHADER_CYCLES_LO) diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_sopk_alias.s b/llvm/test/MC/AMDGPU/gfx12_asm_sopk_alias.s index 4a25922f956d3..bd265938170f1 100644 --- a/llvm/test/MC/AMDGPU/gfx12_asm_sopk_alias.s +++ b/llvm/test/MC/AMDGPU/gfx12_asm_sopk_alias.s @@ -1,4 +1,46 @@ // RUN: llvm-mc -triple=amdgcn -show-encoding -mcpu=gfx1200 %s | FileCheck --check-prefix=GFX12 %s s_addk_i32 s0, 0x1234 -// GFX12: s_addk_co_i32 s0, 0x1234 ; encoding: [0x34,0x12,0x80,0xb7] +// GFX12: s_addk_co_i32 s0, 0x1234 ; encoding: [0x34,0x12,0x80,0xb7] + +s_getreg_b32 s0, hwreg(HW_REG_MODE) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_MODE) ; encoding: [0x01,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_STATUS) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_STATUS) ; encoding: [0x02,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_STATE_PRIV) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_STATE_PRIV) ; encoding: [0x04,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_GPR_ALLOC) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_GPR_ALLOC) ; encoding: [0x05,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_LDS_ALLOC) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_LDS_ALLOC) ; encoding: [0x06,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_PRIV) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) ; encoding: [0x11,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_USER) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) ; encoding: [0x12,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_TRAP_CTRL) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_TRAP_CTRL) ; encoding: [0x13,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_SCRATCH_BASE_LO) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO) ; encoding: [0x14,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_SCRATCH_BASE_HI) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI) ; encoding: [0x15,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_HW_ID1) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID1) ; encoding: [0x17,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_HW_ID2) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2) ; encoding: [0x18,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_DVGPR_ALLOC_LO) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_DVGPR_ALLOC_LO) ; encoding: [0x1f,0xf8,0x80,0xb8] + +s_getreg_b32 s0, hwreg(HW_REG_DVGPR_ALLOC_HI) +// GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_DVGPR_ALLOC_HI) ; encoding: [0x20,0xf8,0x80,0xb8] diff --git a/llvm/test/MC/AMDGPU/wave_any.s b/llvm/test/MC/AMDGPU/wave_any.s index 27502eff89bfc..3c265db30a324 100644 --- a/llvm/test/MC/AMDGPU/wave_any.s +++ b/llvm/test/MC/AMDGPU/wave_any.s @@ -1,13 +1,14 @@ +// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 6 // RUN: llvm-mc -triple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,+wavefrontsize64 -show-encoding %s | FileCheck --check-prefix=GFX10 %s v_cmp_ge_i32_e32 s0, v0 -// GFX10: v_cmp_ge_i32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d] +// GFX10: v_cmp_ge_i32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x0c,0x7d] v_cmp_ge_i32_e32 vcc_lo, s0, v1 -// GFX10: v_cmp_ge_i32_e32 vcc_lo, s0, v1 ; encoding: [0x00,0x02,0x0c,0x7d] +// GFX10: v_cmp_ge_i32_e32 vcc_lo, s0, v1 ; encoding: [0x00,0x02,0x0c,0x7d] v_cmp_ge_i32_e32 vcc, s0, v2 -// GFX10: v_cmp_ge_i32_e32 vcc_lo, s0, v2 ; encoding: [0x00,0x04,0x0c,0x7d] +// GFX10: v_cmp_ge_i32_e32 vcc_lo, s0, v2 ; encoding: [0x00,0x04,0x0c,0x7d] v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD // GFX10: v_cmp_le_f16_sdwa s0, v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06] @@ -16,10 +17,10 @@ v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD // GFX10: v_cmp_le_f16_sdwa s[0:1], v3, v4 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x08,0x96,0x7d,0x03,0x80,0x05,0x06] v_cmp_class_f32_e32 vcc_lo, s0, v0 -// GFX10: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] +// GFX10: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] v_cmp_class_f32_e32 vcc, s0, v0 -// GFX10: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] +// GFX10: v_cmp_class_f32_e32 vcc_lo, s0, v0 ; encoding: [0x00,0x00,0x10,0x7d] v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD // GFX10: v_cmp_class_f16_sdwa vcc_lo, v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x00,0x06,0x06] @@ -34,13 +35,13 @@ v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD // GFX10: v_cmp_class_f16_sdwa s[0:1], v1, v2 src0_sel:DWORD src1_sel:DWORD ; encoding: [0xf9,0x04,0x1e,0x7d,0x01,0x80,0x06,0x06] v_cndmask_b32_e32 v1, v2, v3, -// GFX10: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX10: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] v_cndmask_b32_e32 v1, v2, v3, vcc_lo -// GFX10: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX10: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] v_cndmask_b32_e32 v1, v2, v3, vcc -// GFX10: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] +// GFX10: v_cndmask_b32_e32 v1, v2, v3, vcc_lo ; encoding: [0x02,0x07,0x02,0x02] v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo // GFX10: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v4, vcc_lo ; encoding: [0x03,0x09,0x06,0x50] @@ -127,61 +128,61 @@ v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 ban // GFX10: v_subrev_co_ci_u32_dpp v5, vcc, v1, v2, vcc quad_perm:[0,1,2,3] row_mask:0x0 bank_mask:0x0 ; encoding: [0xfa,0x04,0x0a,0x54,0x01,0xe4,0x00,0x00] v_add_co_u32 v0, s0, v0, v2 -// GFX10: v_add_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_add_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] v_add_co_u32_e64 v0, s0, v0, v2 -// GFX10: v_add_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_add_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 -// GFX10: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] +// GFX10: v_add_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] v_sub_co_u32 v0, s0, v0, v2 -// GFX10: v_sub_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_sub_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] v_sub_co_u32_e64 v0, s0, v0, v2 -// GFX10: v_sub_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_sub_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 -// GFX10: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] +// GFX10: v_sub_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] v_subrev_co_u32 v0, s0, v0, v2 -// GFX10: v_subrev_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_subrev_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] v_subrev_co_u32_e64 v0, s0, v0, v2 -// GFX10: v_subrev_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_subrev_co_u32 v0, s0, v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 // GFX10: v_subrev_co_ci_u32_e64 v4, s0, v1, v5, s2 ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00] v_add_co_u32 v0, s[0:1], v0, v2 -// GFX10: v_add_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_add_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] v_add_co_u32 v0, exec, v0, v2 -// GFX10: v_add_co_u32 v0, exec, v0, v2 ; encoding: [0x00,0x7e,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_add_co_u32 v0, exec, v0, v2 ; encoding: [0x00,0x7e,0x0f,0xd7,0x00,0x05,0x02,0x00] v_add_co_u32 v0, exec_lo, v0, v2 -// GFX10: v_add_co_u32 v0, exec_lo, v0, v2 ; encoding: [0x00,0x7e,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_add_co_u32 v0, exec_lo, v0, v2 ; encoding: [0x00,0x7e,0x0f,0xd7,0x00,0x05,0x02,0x00] v_add_co_u32_e64 v0, s[0:1], v0, v2 -// GFX10: v_add_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_add_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x0f,0xd7,0x00,0x05,0x02,0x00] v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] // GFX10: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0x0a,0x00] v_sub_co_u32 v0, s[0:1], v0, v2 -// GFX10: v_sub_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_sub_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] v_sub_co_u32_e64 v0, s[0:1], v0, v2 -// GFX10: v_sub_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_sub_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x10,0xd7,0x00,0x05,0x02,0x00] v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] // GFX10: v_sub_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x29,0xd5,0x01,0x0b,0x0a,0x00] v_subrev_co_u32 v0, s[0:1], v0, v2 -// GFX10: v_subrev_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_subrev_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] v_subrev_co_u32_e64 v0, s[0:1], v0, v2 -// GFX10: v_subrev_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] +// GFX10: v_subrev_co_u32 v0, s[0:1], v0, v2 ; encoding: [0x00,0x00,0x19,0xd7,0x00,0x05,0x02,0x00] v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] // GFX10: v_subrev_co_ci_u32_e64 v4, s[0:1], v1, v5, s[2:3] ; encoding: [0x04,0x00,0x2a,0xd5,0x01,0x0b,0x0a,0x00] @@ -199,10 +200,10 @@ v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc // GFX10: v_add_co_ci_u32_e64 v4, s[0:1], v1, v5, vcc ; encoding: [0x04,0x00,0x28,0xd5,0x01,0x0b,0xaa,0x01] v_div_scale_f32 v2, s2, v0, v0, v2 -// GFX10: v_div_scale_f32 v2, s2, v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] +// GFX10: v_div_scale_f32 v2, s2, v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] v_div_scale_f32 v2, s[2:3], v0, v0, v2 -// GFX10: v_div_scale_f32 v2, s[2:3], v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] +// GFX10: v_div_scale_f32 v2, s[2:3], v0, v0, v2 ; encoding: [0x02,0x02,0x6d,0xd5,0x00,0x01,0x0a,0x04] v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] // GFX10: v_div_scale_f64 v[2:3], s2, v[0:1], v[0:1], v[2:3] ; encoding: [0x02,0x02,0x6e,0xd5,0x00,0x01,0x0a,0x04] @@ -223,7 +224,7 @@ v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] // GFX10: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] ; encoding: [0x00,0x06,0x76,0xd5,0x00,0x03,0x0a,0x04] v_cmpx_neq_f32_e32 v0, v1 -// GFX10: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c] +// GFX10: v_cmpx_neq_f32_e32 v0, v1 ; encoding: [0x00,0x03,0x3a,0x7c] v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD // GFX10: v_cmpx_neq_f32_sdwa v0, v1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x3a,0x7c,0x00,0x00,0x05,0x06] @@ -232,7 +233,7 @@ v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD // GFX10: v_cmpx_eq_u32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0xa5,0x7d,0x00,0x00,0x05,0x86] v_cmpx_class_f32_e64 v0, 1 -// GFX10: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x7e,0x00,0x98,0xd4,0x00,0x03,0x01,0x00] +// GFX10: v_cmpx_class_f32_e64 v0, 1 ; encoding: [0x7e,0x00,0x98,0xd4,0x00,0x03,0x01,0x00] v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD // GFX10: v_cmpx_class_f32_sdwa v0, 1 src0_sel:WORD_1 src1_sel:DWORD ; encoding: [0xf9,0x02,0x31,0x7d,0x00,0x00,0x05,0x86] diff --git a/llvm/test/MC/AMDGPU/wavesize-feature-unsupported-target.s b/llvm/test/MC/AMDGPU/wavesize-feature-unsupported-target.s new file mode 100644 index 0000000000000..3a8656c392ff5 --- /dev/null +++ b/llvm/test/MC/AMDGPU/wavesize-feature-unsupported-target.s @@ -0,0 +1,23 @@ +// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+wavefrontsize64 -o - %s | FileCheck -check-prefix=GFX1250 %s +// RUN: llvm-mc -triple=amdgcn -mcpu=gfx900 -mattr=+wavefrontsize32 -o - %s | FileCheck -check-prefix=GFX900 %s + +// Make sure setting both modes is supported at the same time. +// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,+wavefrontsize64 %s | FileCheck -check-prefixes=GFX10 %s + +// Test that there is no assertion when using an explicit +// wavefrontsize attribute on a target which does not support it. + +// GFX1250: v_add_f64_e32 v[0:1], 1.0, v[0:1] +// GFX900: v_add_f64 v[0:1], 1.0, v[0:1] +// GFX10: v_add_f64 v[0:1], 1.0, v[0:1] +v_add_f64 v[0:1], 1.0, v[0:1] + +// GFX1250: v_cmp_eq_u32_e64 s[0:1], 1.0, s1 +// GFX900: v_cmp_eq_u32_e64 s[0:1], 1.0, s1 +// GFX10: v_cmp_eq_u32_e64 s[0:1], 1.0, s1 +v_cmp_eq_u32_e64 s[0:1], 1.0, s1 + +// GFX1250: v_cndmask_b32_e64 v1, v2, v3, s[0:1] +// GFX900: v_cndmask_b32_e64 v1, v2, v3, s[0:1] +// GFX10: v_cndmask_b32_e64 v1, v2, v3, s[0:1] +v_cndmask_b32 v1, v2, v3, s[0:1] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx10_vopc.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx10_vopc.txt index 2156a682337e8..336f4b2e88f47 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx10_vopc.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx10_vopc.txt @@ -1,6 +1,6 @@ # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32 -disassemble -show-encoding < %s | FileCheck -strict-whitespace -check-prefix=W32 %s # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -strict-whitespace -check-prefix=W64 %s - +# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -strict-whitespace -check-prefix=W32 %s # W32: v_cmp_class_f32_e32 vcc_lo, -1, v2 ; encoding: [0xc1,0x04,0x10,0x7d] # W64: v_cmp_class_f32_e32 vcc, -1, v2 ; encoding: [0xc1,0x04,0x10,0x7d] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt index d2ec2133b1b88..7064479082b7a 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_salu_lit64.txt @@ -1,55 +1,56 @@ +# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s -# GFX1250: s_mov_b64 s[2:3], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x01,0x82,0xbe,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x01,0x82,0xbe,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_mov_b64 s[2:3], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x01,0x82,0xbe,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_add_nc_u64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0xa9,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0xa9,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_add_nc_u64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0xa9,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_and_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x8b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_and_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), lit64(0x10abcdef12345678) ; encoding: [0xfe,0xfe,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfe,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), lit64(0x10abcdef12345678) ; encoding: [0xfe,0xfe,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_and_not1_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x91,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_ashr_i64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x86,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x86,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_ashr_i64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x86,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_bfe_i64 s[2:3], lit64(0x80abcdef12345678), 5 ; encoding: [0xfe,0x85,0x82,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x80] 0xfe,0x85,0x82,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x80 +# GFX1250: s_bfe_i64 s[2:3], lit64(0x80abcdef12345678), 5 ; encoding: [0xfe,0x85,0x82,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x80] -# GFX1250: s_bfe_u64 s[2:3], lit64(0x10abcdef12345678), 5 ; encoding: [0xfe,0x85,0x02,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x85,0x02,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_bfe_u64 s[2:3], lit64(0x10abcdef12345678), 5 ; encoding: [0xfe,0x85,0x02,0x94,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_cselect_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x98,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x98,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_cselect_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x98,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_lshl_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x84,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x84,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_lshl_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x84,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_lshr_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x85,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x85,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_lshr_b64 s[2:3], lit64(0x10abcdef12345678), s4 ; encoding: [0xfe,0x04,0x82,0x85,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_mul_u64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0xaa,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0xaa,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_mul_u64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0xaa,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_nand_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8e,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x8e,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_nand_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8e,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_nor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x8f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_nor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_or_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x8c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_or_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x8c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_or_not1_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x92,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x92,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_or_not1_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x92,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_xnor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0x04,0xfe,0x82,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_xnor_b64 s[2:3], s[4:5], lit64(0x10abcdef12345678) ; encoding: [0x04,0xfe,0x82,0x90,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: s_xor_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x04,0x82,0x8d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: s_xor_b64 s[2:3], lit64(0x10abcdef12345678), s[4:5] ; encoding: [0xfe,0x04,0x82,0x8d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt index 963e69370a3ba..227e1c47b3d05 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt @@ -1,34 +1,35 @@ +# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s -# GFX1250: s_add_pc_i64 lit64(0x12345678abcd0) ; encoding: [0xfe,0x4b,0x80,0xbe,0xd0,0xbc,0x8a,0x67,0x45,0x23,0x01,0x00] 0xfe,0x4b,0x80,0xbe,0xd0,0xbc,0x8a,0x67,0x45,0x23,0x01,0x00 +# GFX1250: s_add_pc_i64 lit64(0x12345678abcd0) ; encoding: [0xfe,0x4b,0x80,0xbe,0xd0,0xbc,0x8a,0x67,0x45,0x23,0x01,0x00] -# GFX1250: s_add_pc_i64 0x64 ; encoding: [0xff,0x4b,0x80,0xbe,0x64,0x00,0x00,0x00] 0xff,0x4b,0x80,0xbe,0x64,0x00,0x00,0x00 +# GFX1250: s_add_pc_i64 0x64 ; encoding: [0xff,0x4b,0x80,0xbe,0x64,0x00,0x00,0x00] -# GFX1250: s_add_pc_i64 4 ; encoding: [0x84,0x4b,0x80,0xbe] 0x84,0x4b,0x80,0xbe +# GFX1250: s_add_pc_i64 4 ; encoding: [0x84,0x4b,0x80,0xbe] -# GFX1250: s_add_pc_i64 s[2:3] ; encoding: [0x02,0x4b,0x80,0xbe] 0x02,0x4b,0x80,0xbe +# GFX1250: s_add_pc_i64 s[2:3] ; encoding: [0x02,0x4b,0x80,0xbe] -# GFX1250: s_sendmsg_rtn_b32 s2, sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4c,0x82,0xbe] 0x88,0x4c,0x82,0xbe +# GFX1250: s_sendmsg_rtn_b32 s2, sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4c,0x82,0xbe] -# GFX1250: s_sendmsg_rtn_b64 s[2:3], sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4d,0x82,0xbe] 0x88,0x4d,0x82,0xbe +# GFX1250: s_sendmsg_rtn_b64 s[2:3], sendmsg(MSG_RTN_GET_CLUSTER_BARRIER_STATE) ; encoding: [0x88,0x4d,0x82,0xbe] -# GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe] 0x00,0x06,0x82,0xbe +# GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe] -# GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe] 0xc3,0x4e,0x80,0xbe +# GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe] -# GFX1250: s_get_barrier_state s3, -3 ; encoding: [0xc3,0x50,0x83,0xbe] 0xc3,0x50,0x83,0xbe +# GFX1250: s_get_barrier_state s3, -3 ; encoding: [0xc3,0x50,0x83,0xbe] -# GFX1250: s_get_barrier_state s3, -4 ; encoding: [0xc4,0x50,0x83,0xbe] 0xc4,0x50,0x83,0xbe +# GFX1250: s_get_barrier_state s3, -4 ; encoding: [0xc4,0x50,0x83,0xbe] -# GFX1250: s_get_barrier_state s3, m0 ; encoding: [0x7d,0x50,0x83,0xbe] 0x7d,0x50,0x83,0xbe +# GFX1250: s_get_barrier_state s3, m0 ; encoding: [0x7d,0x50,0x83,0xbe] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt index 30650b4fa227f..1571fb96dcf49 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_valu_lit64.txt @@ -1,232 +1,233 @@ +# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 # RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s -# GFX1250: v_add_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x05,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x05,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_add_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x05,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x30,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x30,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x30,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_class_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfe,0xff,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_class_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_eq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x45,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_eq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_gt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x49,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_gt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_gt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_gt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_gt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_gt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_le_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x47,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_le_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_le_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_le_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_le_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_le_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x43,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lt_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_lt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_lt_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ne_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xab,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ne_i64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ne_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xbb,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ne_u64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_neq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_neq_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x53,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nge_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_ngt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x57,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_ngt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nle_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x59,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nle_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nlg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x55,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nlg_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_nlt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_nlt_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_o_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4f,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_o_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmp_u_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x51,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmp_u_f64_e32 vcc_lo, lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7c,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_class_f64_e32 lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfe,0xff,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_class_f64_e32 lit64(0x10abcdef12345678), v255 ; encoding: [0xfe,0xfe,0xff,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_eq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x45,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_eq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x45,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_eq_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_eq_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_eq_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_eq_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb5,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ge_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xad,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xad,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ge_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xad,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ge_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbd,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xbd,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ge_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbd,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_gt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x49,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_gt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x49,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_gt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_gt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_gt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_gt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb9,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_le_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x47,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_le_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x47,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_le_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_le_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_le_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_le_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb7,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x43,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x43,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xa3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lt_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xa3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_lt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xb3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_lt_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xb3,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ne_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xab,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ne_i64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xab,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ne_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xbb,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ne_u64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xbb,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_neq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_neq_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5b,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x53,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nge_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x53,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_ngt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x57,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_ngt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x57,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nle_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x59,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nle_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x59,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nlg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x55,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nlg_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x55,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_nlt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x5d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_nlt_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x5d,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_o_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x4f,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_o_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x4f,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cmpx_u_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0x51,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cmpx_u_f64_e32 lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0x51,0x7d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cvt_f32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x1e,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x1e,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cvt_f32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x1e,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cvt_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x06,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x06,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cvt_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x06,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_cvt_u32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2a,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x2a,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_cvt_u32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2a,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_floor_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x34,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x34,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_floor_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x34,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_fract_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7c,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x7c,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_fract_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7c,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_frexp_exp_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x78,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x78,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_frexp_exp_i32_f64_e32 v255, lit64(0x10abcdef12345678) ; encoding: [0xfe,0x78,0xfe,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_frexp_mant_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7a,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x7a,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_frexp_mant_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x7a,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_max_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x1d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_max_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_min_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x1b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_min_num_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x1b,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_mul_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x0d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0xfc,0xfd,0x0d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_mul_f64_e32 v[254:255], lit64(0x10abcdef12345678), v[254:255] ; encoding: [0xfe,0xfc,0xfd,0x0d,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_rcp_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x5e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x5e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_rcp_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x5e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_rndne_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x32,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x32,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_rndne_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x32,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_rsq_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x62,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x62,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_rsq_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x62,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_sqrt_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x68,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x68,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_sqrt_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x68,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_trunc_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] 0xfe,0x2e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10 +# GFX1250: v_trunc_f64_e32 v[254:255], lit64(0x10abcdef12345678) ; encoding: [0xfe,0x2e,0xfc,0x7f,0x78,0x56,0x34,0x12,0xef,0xcd,0xab,0x10] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x4063233333333333) ; encoding: [0xfe,0x30,0xfc,0x7f,0x33,0x33,0x33,0x33,0x33,0x23,0x63,0x40] 0xfe,0x30,0xfc,0x7f,0x33,0x33,0x33,0x33,0x33,0x23,0x63,0x40 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x4063233333333333) ; encoding: [0xfe,0x30,0xfc,0x7f,0x33,0x33,0x33,0x33,0x33,0x23,0x63,0x40] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x448969368974c05b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x5b,0xc0,0x74,0x89,0x36,0x69,0x89,0x44] 0xfe,0x30,0xfc,0x7f,0x5b,0xc0,0x74,0x89,0x36,0x69,0x89,0x44 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x448969368974c05b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x5b,0xc0,0x74,0x89,0x36,0x69,0x89,0x44] -# GFX1250: v_ceil_f64_e32 v[254:255], 0x40632000 ; encoding: [0xff,0x30,0xfc,0x7f,0x00,0x20,0x63,0x40] 0xff,0x30,0xfc,0x7f,0x00,0x20,0x63,0x40 +# GFX1250: v_ceil_f64_e32 v[254:255], 0x40632000 ; encoding: [0xff,0x30,0xfc,0x7f,0x00,0x20,0x63,0x40] -# GFX1250: v_mov_b64_e32 v[0:1], 0x12345678 ; encoding: [0xff,0x3a,0x00,0x7e,0x78,0x56,0x34,0x12] 0xff,0x3a,0x00,0x7e,0x78,0x56,0x34,0x12 +# GFX1250: v_mov_b64_e32 v[0:1], 0x12345678 ; encoding: [0xff,0x3a,0x00,0x7e,0x78,0x56,0x34,0x12] -# GFX1250: v_ceil_f64_e32 v[254:255], 0.15915494309189532 ; encoding: [0xf8,0x30,0xfc,0x7f] 0xf8,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], 0.15915494309189532 ; encoding: [0xf8,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], -4.0 ; encoding: [0xf7,0x30,0xfc,0x7f] 0xf7,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], -4.0 ; encoding: [0xf7,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], 2.0 ; encoding: [0xf4,0x30,0xfc,0x7f] 0xf4,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], 2.0 ; encoding: [0xf4,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], 0 ; encoding: [0x80,0x30,0xfc,0x7f] 0x80,0x30,0xfc,0x7f +# GFX1250: v_ceil_f64_e32 v[254:255], 0 ; encoding: [0x80,0x30,0xfc,0x7f] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x7b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00] 0xfe,0x30,0xfc,0x7f,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x7b) ; encoding: [0xfe,0x30,0xfc,0x7f,0x7b,0x00,0x00,0x00,0x00,0x00,0x00,0x00] -# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x109a) ; encoding: [0xfe,0x30,0xfc,0x7f,0x9a,0x10,0x00,0x00,0x00,0x00,0x00,0x00] 0xfe,0x30,0xfc,0x7f,0x9a,0x10,0x00,0x00,0x00,0x00,0x00,0x00 +# GFX1250: v_ceil_f64_e32 v[254:255], lit64(0x109a) ; encoding: [0xfe,0x30,0xfc,0x7f,0x9a,0x10,0x00,0x00,0x00,0x00,0x00,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt index 07dbbddcdc2f9..94edf22e36acf 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt @@ -720,10 +720,12 @@ # GFX1250: v_cvt_f32_bf16_e32 v5, ttmp15 ; encoding: [0x7b,0xe4,0x0a,0x7e] 0x01,0xe5,0x0a,0x7e -# GFX1250: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e] +# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v1.l ; encoding: [0x01,0xe5,0x0a,0x7e] +# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v1 ; encoding: [0x01,0xe5,0x0a,0x7e] 0x7f,0xe5,0x0a,0x7e -# GFX1250: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e] +# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v127.l ; encoding: [0x7f,0xe5,0x0a,0x7e] +# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v127 ; encoding: [0x7f,0xe5,0x0a,0x7e] 0x6b,0xe4,0x0a,0x7e # GFX1250: v_cvt_f32_bf16_e32 v5, vcc_hi ; encoding: [0x6b,0xe4,0x0a,0x7e] @@ -732,7 +734,8 @@ # GFX1250: v_cvt_f32_bf16_e32 v5, vcc_lo ; encoding: [0x6a,0xe4,0x0a,0x7e] 0x81,0xe5,0x0a,0x7e -# GFX1250: v_cvt_f32_bf16_e32 v5, v1.h ; encoding: [0x81,0xe5,0x0a,0x7e] +# GFX1250-REAL16: v_cvt_f32_bf16_e32 v5, v1.h ; encoding: [0x81,0xe5,0x0a,0x7e] +# GFX1250-FAKE16: v_cvt_f32_bf16_e32 v5, v129/*Invalid register, operand has 'VS_32_Lo128' register class*/ ; encoding: [0x81,0xe5,0x0a,0x7e] 0xff,0xf0,0x02,0x7e,0x34,0x12,0x00,0x00 # GFX1250-REAL16: v_cvt_f16_bf8_e32 v1.l, 0x1234 ; encoding: [0xff,0xf0,0x02,0x7e,0x34,0x12,0x00,0x00] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt index c12ecb8d868aa..93286caa4fa2c 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt @@ -615,49 +615,64 @@ # GFX1250-REAL16: v_cos_bf16_dpp v5.h, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xfe,0x0a,0x7f,0x81,0x1b,0x00,0xff] 0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30 -# GFX1250: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v127, -|v127.l| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v127, -|v127| row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xfa,0xe4,0xfe,0x7e,0x7f,0x6f,0x35,0x30] 0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0xe4,0x00,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1b,0x00,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x41,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x40,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x21,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x2f,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x50,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x5f,0x01,0x01] 0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x01,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x0f,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x11,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x1f,0x01,0xff] 0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0xfa,0xe4,0x0a,0x7e,0x01,0x60,0x09,0x13] 0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.h quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v129/*Invalid register, operand has 'VGPR_32_Lo128' register class*/ quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xe4,0x0a,0x7e,0x81,0x1b,0x00,0xff] 0xfa,0xf0,0x02,0x7e,0x02,0x39,0x00,0xff # GFX1250-REAL16: v_cvt_f16_bf8_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xf0,0x02,0x7e,0x02,0x39,0x00,0xff] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt index fa7b940132f0c..fb3f1b25c6c7f 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp8.txt @@ -165,16 +165,20 @@ # GFX1250-FAKE16: v_add_f64_e32 v[156:157], v[129:130], v[187:188] ; encoding: [0x81,0x77,0x39,0x05] 0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00 -# GFX1250: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v127, v127.l dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v127, v127 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xe9,0xe4,0xfe,0x7e,0x7f,0x00,0x00,0x00] 0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] 0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0xea,0xe4,0x0a,0x7e,0x01,0x77,0x39,0x05] 0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05 -# GFX1250: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] +# GFX1250-REAL16: v_cvt_f32_bf16_dpp v5, v1.h dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] +# GFX1250-FAKE16: v_cvt_f32_bf16_dpp v5, v129/*Invalid register, operand has 'VGPR_32_Lo128' register class*/ dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xe4,0x0a,0x7e,0x81,0x77,0x39,0x05] 0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05 # GFX1250-REAL16: v_cvt_f16_bf8_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0xe9,0xf0,0x02,0x7e,0x02,0x77,0x39,0x05] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_wave64_feature.s b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_wave64_feature.s new file mode 100644 index 0000000000000..bdea636a9efe3 --- /dev/null +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_wave64_feature.s @@ -0,0 +1,13 @@ +# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+wavefrontsize64 -disassemble -o - %s | FileCheck %s + +# Make sure there's no assertion when trying to use an unsupported +# wave64 on a wave32-only target + +# CHECK: v_add_f64_e32 v[0:1], 1.0, v[0:1] +0xf2,0x00,0x00,0x04 + +# CHECK: v_cmp_eq_u32_e64 s[0:1], 1.0, s1 +0x00,0x00,0x4a,0xd4,0xf2,0x02,0x00,0x00 + +# CHECK: v_cndmask_b32_e64 v1, v2, v3, s[0:1] +0x01,0x00,0x01,0xd5,0x02,0x07,0x02,0x00 diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt index 49fa263f6bbf8..41c5724a596f9 100644 --- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_sopk.txt @@ -82,7 +82,7 @@ # GFX12: s_getreg_b32 s0, hwreg(52, 8, 3) ; encoding: [0x34,0x12,0x80,0xb8] 0x34,0x12,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_PRIV, 7, 25) ; encoding: [0xd1,0xc1,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 7, 25) ; encoding: [0xd1,0xc1,0x80,0xb8] 0xd1,0xc1,0x80,0xb8 # GFX12: s_getreg_b32 s105, hwreg(52, 8, 3) ; encoding: [0x34,0x12,0xe9,0xb8] @@ -163,7 +163,7 @@ # GFX12: s_setreg_b32 hwreg(52, 8, 3), vcc_lo ; encoding: [0x34,0x12,0x6a,0xb9] 0x34,0x12,0x6a,0xb9 -# GFX12: s_setreg_b32 hwreg(HW_REG_EXCP_FLAG_PRIV, 7, 25), s0 ; encoding: [0xd1,0xc1,0x00,0xb9] +# GFX12: s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 7, 25), s0 ; encoding: [0xd1,0xc1,0x00,0xb9] 0xd1,0xc1,0x00,0xb9 # GFX12: s_version 0x1234 ; encoding: [0x34,0x12,0x80,0xb0] @@ -187,43 +187,43 @@ # GFX12: s_version ((128|UC_VERSION_W64_BIT)|UC_VERSION_W32_BIT)|UC_VERSION_MDP_BIT ; encoding: [0x80,0xe0,0x80,0xb0] 0x80,0xe0,0x80,0xb0 -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0xaf123456 ; encoding: [0x01,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE), 0xaf123456 ; encoding: [0x01,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x01,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 31, 1), 0xaf123456 ; encoding: [0xc1,0x07,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 31, 1), 0xaf123456 ; encoding: [0xc1,0x07,0x80,0xb9,0x56,0x34,0x12,0xaf] 0xc1,0x07,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_STATUS), 0xaf123456 ; encoding: [0x02,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_STATUS), 0xaf123456 ; encoding: [0x02,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x02,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_GPR_ALLOC), 0xaf123456 ; encoding: [0x05,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_GPR_ALLOC), 0xaf123456 ; encoding: [0x05,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x05,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_LDS_ALLOC), 0xaf123456 ; encoding: [0x06,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_LDS_ALLOC), 0xaf123456 ; encoding: [0x06,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x06,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf # GFX12: s_setreg_imm32_b32 hwreg(HW_REG_IB_STS), 0xaf123456 ; encoding: [0x07,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x07,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_HW_ID1), 0xaf123456 ; encoding: [0x17,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_HW_ID1), 0xaf123456 ; encoding: [0x17,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x17,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_HW_ID2), 0xaf123456 ; encoding: [0x18,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] +# GFX12: s_setreg_imm32_b32 hwreg(HW_REG_WAVE_HW_ID2), 0xaf123456 ; encoding: [0x18,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf] 0x18,0xf8,0x80,0xb9,0x56,0x34,0x12,0xaf -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_MODE) ; encoding: [0x01,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_MODE) ; encoding: [0x01,0xf8,0x80,0xb8] 0x01,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_STATUS) ; encoding: [0x02,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_STATUS) ; encoding: [0x02,0xf8,0x80,0xb8] 0x02,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_STATE_PRIV) ; encoding: [0x04,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_STATE_PRIV) ; encoding: [0x04,0xf8,0x80,0xb8] 0x04,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_GPR_ALLOC) ; encoding: [0x05,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_GPR_ALLOC) ; encoding: [0x05,0xf8,0x80,0xb8] 0x05,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_LDS_ALLOC) ; encoding: [0x06,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_LDS_ALLOC) ; encoding: [0x06,0xf8,0x80,0xb8] 0x06,0xf8,0x80,0xb8 # GFX12: s_getreg_b32 s0, hwreg(HW_REG_IB_STS) ; encoding: [0x07,0xf8,0x80,0xb8] @@ -244,31 +244,31 @@ # GFX12: s_getreg_b32 s0, hwreg(HW_REG_PERF_SNAPSHOT_DATA2) ; encoding: [0x10,0xf8,0x80,0xb8] 0x10,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_PRIV) ; encoding: [0x11,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV) ; encoding: [0x11,0xf8,0x80,0xb8] 0x11,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_EXCP_FLAG_USER) ; encoding: [0x12,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER) ; encoding: [0x12,0xf8,0x80,0xb8] 0x12,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_TRAP_CTRL) ; encoding: [0x13,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_TRAP_CTRL) ; encoding: [0x13,0xf8,0x80,0xb8] 0x13,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_SCRATCH_BASE_LO) ; encoding: [0x14,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO) ; encoding: [0x14,0xf8,0x80,0xb8] 0x14,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_SCRATCH_BASE_HI) ; encoding: [0x15,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI) ; encoding: [0x15,0xf8,0x80,0xb8] 0x15,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_HW_ID1) ; encoding: [0x17,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID1) ; encoding: [0x17,0xf8,0x80,0xb8] 0x17,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_HW_ID2) ; encoding: [0x18,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_HW_ID2) ; encoding: [0x18,0xf8,0x80,0xb8] 0x18,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_DVGPR_ALLOC_LO) ; encoding: [0x1f,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_DVGPR_ALLOC_LO) ; encoding: [0x1f,0xf8,0x80,0xb8] 0x1f,0xf8,0x80,0xb8 -# GFX12: s_getreg_b32 s0, hwreg(HW_REG_DVGPR_ALLOC_HI) ; encoding: [0x20,0xf8,0x80,0xb8] +# GFX12: s_getreg_b32 s0, hwreg(HW_REG_WAVE_DVGPR_ALLOC_HI) ; encoding: [0x20,0xf8,0x80,0xb8] 0x20,0xf8,0x80,0xb8 # GFX12: s_getreg_b32 s0, hwreg(HW_REG_SHADER_CYCLES_LO) ; encoding: [0x1d,0xf8,0x80,0xb8] diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx9_wave32_feature.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx9_wave32_feature.txt new file mode 100644 index 0000000000000..40494b3dfa1ea --- /dev/null +++ b/llvm/test/MC/Disassembler/AMDGPU/gfx9_wave32_feature.txt @@ -0,0 +1,13 @@ +# RUN: llvm-mc -triple=amdgcn -mcpu=gfx900 -mattr=+wavefrontsize32 -disassemble -o - %s | FileCheck %s + +# Make sure there's no assertion when trying to use an unsupported +# wave32 on a wave64-only target + +# CHECK: v_add_f64 v[0:1], 1.0, v[0:1] +0x00,0x00,0x80,0xd2,0xf2,0x00,0x02,0x00 + +# CHECK: v_cmp_eq_u32_e64 s[0:1], 1.0, s1 +0x00,0x00,0xca,0xd0,0xf2,0x02,0x00,0x00 + +# CHECK: v_cndmask_b32_e64 v1, v2, v3, s[0:1] +0x01,0x00,0x00,0xd1,0x02,0x07,0x02,0x00 diff --git a/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt b/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt index da3601b00b199..cdfc8ce9e0ca5 100644 --- a/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt +++ b/llvm/test/MC/Disassembler/PowerPC/ppc-encoding-ISAFuture.txt @@ -231,3 +231,108 @@ #CHECK: vucmprhh 1, 3, 6 0x10,0x23,0x31,0x03 + +#CHECK: xxaes192encp 8, 10, 14 +0xf1,0x0b,0x76,0x10 + +#CHECK: xxaes256decp 14, 10, 6 +0xf1,0xca,0x3e,0x50 + +#CHECK: xxaes128genlkp 4, 8 +0xf0,0x80,0x46,0x90 + +#CHECK: xxgfmul128gcm 7, 5, 4 +0xf0,0xe5,0x26,0xd0 + +#CHECK: xvadduwm 4, 5, 7 +0xf0,0x85,0x3c,0x18 + +#CHECK: xvadduhm 4, 5, 7 +0xf0,0x85,0x3c,0x58 + +#CHECK: xvsubuwm 4, 5, 7 +0xf0,0x85,0x3c,0x98 + +#CHECK: xvsubuhm 4, 5, 7 +0xf0,0x85,0x3c,0xd8 + +#CHECK: xvmuluwm 4, 5, 7 +0xf0,0x85,0x3d,0x18 + +#CHECK: xvmuluhm 4, 5, 7 +0xf0,0x85,0x3d,0x58 + +#CHECK: xvmulhsw 4, 5, 7 +0xf0,0x85,0x3d,0x98 + +#CHECK: xvmulhsh 4, 5, 7 +0xf0,0x85,0x3d,0xd8 + +#CHECK: xvmulhuw 4, 5, 7 +0xf0,0x85,0x3b,0x90 + +#CHECK: xvmulhuh 4, 5, 7 +0xf0,0x85,0x3b,0xd0 + +#CHECK: xxmulmul 8, 3, 4, 2 +0xed,0x03,0x22,0x08 + +#CHECK: xxmulmulhiadd 8, 3, 4, 1, 0, 1 +0xed,0x03,0x25,0x48 + +#CHECK: xxmulmulloadd 8, 3, 4, 1, 0 +0xed,0x03,0x22,0x88 + +#CHECK: xxssumudm 8, 3, 4, 1 +0xed,0x03,0x24,0xc8 + +#CHECK: xxssumudmc 8, 3, 4, 1 +0xed,0x03,0x25,0xc8 + +#CHECK: xxssumudmcext 8, 3, 4, 6, 0 +0x05,0x00,0x00,0x00,0x89,0x03,0x21,0xa0 + +#CHECK: xsaddadduqm 4, 5, 7 +0xec,0x85,0x3b,0x00 + +#CHECK: xsaddaddsuqm 4, 5, 7 +0xec,0x85,0x3b,0x40 + +#CHECK: xsaddsubuqm 4, 5, 7 +0xec,0x85,0x3b,0x80 + +#CHECK: xsaddsubsuqm 4, 5, 7 +0xec,0x85,0x3f,0x00 + +#CHECK: xsrebase2t1uqm 4, 5, 7 +0xec,0x85,0x3c,0x88 + +#CHECK: xsrebase2t2uqm 4, 5, 7 +0xec,0x85,0x3d,0x88 + +#CHECK: xsrebase2t3uqm 4, 5, 7 +0xec,0x85,0x3e,0x88 + +#CHECK: xsrebase2t4uqm 4, 5, 7 +0xec,0x85,0x3e,0xc8 + +#CHECK: xsrebase3t1uqm 4, 5, 7 +0xec,0x85,0x3f,0x88 + +#CHECK: xsrebase3t2uqm 4, 5, 7 +0xec,0x85,0x3f,0xc8 + +#CHECK: xsrebase3t3uqm 4, 5, 7 +0xec,0x85,0x3e,0x18 + +#CHECK: xsmerge2t1uqm 4, 5, 7 +0xec,0x85,0x3f,0x40 + +#CHECK: xsmerge2t2uqm 4, 5, 7 +0xec,0x85,0x3f,0x80 + +#CHECK: xsmerge2t3uqm 4, 5, 7 +0xec,0x85,0x3a,0xc8 + +#CHECK: xsmerge3t1uqm 4, 5, 7 +0xec,0x85,0x3b,0xc8 diff --git a/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt b/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt index 66d05043301b6..f7e314fc819e4 100644 --- a/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt +++ b/llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding-ISAFuture.txt @@ -225,3 +225,108 @@ #CHECK: vucmprhh 1, 3, 6 0x03,0x31,0x23,0x10 + +#CHECK: xxaes192encp 8, 10, 14 +0x10,0x76,0x0b,0xf1 + +#CHECK: xxaes256decp 14, 10, 6 +0x50,0x3e,0xca,0xf1 + +#CHECK: xxaes128genlkp 4, 8 +0x90,0x46,0x80,0xf0 + +#CHECK: xxgfmul128gcm 7, 5, 4 +0xd0,0x26,0xe5,0xf0 + +#CHECK: xvadduwm 4, 5, 7 +0x18,0x3c,0x85,0xf0 + +#CHECK: xvadduhm 4, 5, 7 +0x58,0x3c,0x85,0xf0 + +#CHECK: xvsubuwm 4, 5, 7 +0x98,0x3c,0x85,0xf0 + +#CHECK: xvsubuhm 4, 5, 7 +0xd8,0x3c,0x85,0xf0 + +#CHECK: xvmuluwm 4, 5, 7 +0x18,0x3d,0x85,0xf0 + +#CHECK: xvmuluhm 4, 5, 7 +0x58,0x3d,0x85,0xf0 + +#CHECK: xvmulhsw 4, 5, 7 +0x98,0x3d,0x85,0xf0 + +#CHECK: xvmulhsh 4, 5, 7 +0xd8,0x3d,0x85,0xf0 + +#CHECK: xvmulhuw 4, 5, 7 +0x90,0x3b,0x85,0xf0 + +#CHECK: xvmulhuh 4, 5, 7 +0xd0,0x3b,0x85,0xf0 + +#CHECK: xxmulmul 8, 3, 4, 2 +0x08,0x22,0x03,0xed + +#CHECK: xxmulmulhiadd 8, 3, 4, 1, 0, 1 +0x48,0x25,0x03,0xed + +#CHECK: xxmulmulloadd 8, 3, 4, 1, 0 +0x88,0x22,0x03,0xed + +#CHECK: xxssumudm 8, 3, 4, 1 +0xc8,0x24,0x03,0xed + +#CHECK: xxssumudmc 8, 3, 4, 1 +0xc8,0x25,0x03,0xed + +#CHECK: xxssumudmcext 8, 3, 4, 6, 0 +0x00,0x00,0x00,0x05,0xa0,0x21,0x03,0x89 + +#CHECK: xsaddadduqm 4, 5, 7 +0x00,0x3b,0x85,0xec + +#CHECK: xsaddaddsuqm 4, 5, 7 +0x40,0x3b,0x85,0xec + +#CHECK: xsaddsubuqm 4, 5, 7 +0x80,0x3b,0x85,0xec + +#CHECK: xsaddsubsuqm 4, 5, 7 +0x00,0x3f,0x85,0xec + +#CHECK: xsrebase2t1uqm 4, 5, 7 +0x88,0x3c,0x85,0xec + +#CHECK: xsrebase2t2uqm 4, 5, 7 +0x88,0x3d,0x85,0xec + +#CHECK: xsrebase2t3uqm 4, 5, 7 +0x88,0x3e,0x85,0xec + +#CHECK: xsrebase2t4uqm 4, 5, 7 +0xc8,0x3e,0x85,0xec + +#CHECK: xsrebase3t1uqm 4, 5, 7 +0x88,0x3f,0x85,0xec + +#CHECK: xsrebase3t2uqm 4, 5, 7 +0xc8,0x3f,0x85,0xec + +#CHECK: xsrebase3t3uqm 4, 5, 7 +0x18,0x3e,0x85,0xec + +#CHECK: xsmerge2t1uqm 4, 5, 7 +0x40,0x3f,0x85,0xec + +#CHECK: xsmerge2t2uqm 4, 5, 7 +0x80,0x3f,0x85,0xec + +#CHECK: xsmerge2t3uqm 4, 5, 7 +0xc8,0x3a,0x85,0xec + +#CHECK: xsmerge3t1uqm 4, 5, 7 +0xc8,0x3b,0x85,0xec diff --git a/llvm/test/MC/ELF/cfi-sframe-fre-cases.s b/llvm/test/MC/ELF/cfi-sframe-fre-cases.s index 6d9e8c1b6480f..eeaa4021ceefd 100644 --- a/llvm/test/MC/ELF/cfi-sframe-fre-cases.s +++ b/llvm/test/MC/ELF/cfi-sframe-fre-cases.s @@ -17,7 +17,7 @@ fde4_fre_offset_sizes: # CHECK: FuncDescEntry [0] { # CHECK: Start FRE Offset: 0 # CHECK: FRE Type: Addr1 (0x0) - .cfi_startproc + .cfi_startproc # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x0 # CHECK-NEXT: Return Address Signed: No @@ -27,9 +27,9 @@ fde4_fre_offset_sizes: # CHECK-NEXT: RA Offset: -8 .long 0 # Uninteresting register no new fre, no effect on cfa - .cfi_offset 0, 8 + .cfi_offset 0, 8 .long 0 - .cfi_def_cfa_offset 0x78 + .cfi_def_cfa_offset 0x78 # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x8 # CHECK-NEXT: Return Address Signed: No @@ -37,11 +37,11 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: SP (0x1) # CHECK-NEXT: CFA Offset: 120 # CHECK-NEXT: RA Offset: -8 - .long 0 + .long 0 # Uninteresting register no new fre, no effect on cfa .cfi_rel_offset 1, 8 .long 0 - .cfi_def_cfa_offset 0x80 + .cfi_def_cfa_offset 0x80 # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x10 # CHECK-NEXT: Return Address Signed: No @@ -49,11 +49,11 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: SP (0x1) # CHECK-NEXT: CFA Offset: 128 # CHECK-NEXT: RA Offset: -8 - .long 0 + .long 0 # Uninteresting register no new fre, no effect on cfa .cfi_val_offset 1, 8 .long 0 - .cfi_def_cfa_offset 0x7FFF + .cfi_def_cfa_offset 0x7FFF # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x18 # CHECK-NEXT: Return Address Signed: No @@ -61,8 +61,8 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: SP (0x1) # CHECK-NEXT: CFA Offset: 32767 # CHECK-NEXT: RA Offset: -8 - .long 0 - .cfi_def_cfa_offset 0x8000 + .long 0 + .cfi_def_cfa_offset 0x8000 # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x1C # CHECK-NEXT: Return Address Signed: No @@ -70,8 +70,8 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: SP (0x1) # CHECK-NEXT: CFA Offset: 32768 # CHECK-NEXT: RA Offset: -8 - .long 0 - .cfi_def_cfa_offset 0x8 + .long 0 + .cfi_def_cfa_offset 0x8 # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x20 # CHECK-NEXT: Return Address Signed: No @@ -79,8 +79,8 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: SP (0x1) # CHECK-NEXT: CFA Offset: 8 # CHECK-NEXT: RA Offset: -8 - .long 0 - .cfi_adjust_cfa_offset 0x8 + .long 0 + .cfi_adjust_cfa_offset 0x8 # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x24 # CHECK-NEXT: Return Address Signed: No @@ -88,8 +88,8 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: SP (0x1) # CHECK-NEXT: CFA Offset: 16 # CHECK-NEXT: RA Offset: -8 - .long 0 - .cfi_def_cfa_register 6 # switch to fp + .long 0 + .cfi_def_cfa_register 6 # switch to fp # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x28 # CHECK-NEXT: Return Address Signed: No @@ -97,10 +97,10 @@ fde4_fre_offset_sizes: # CHECK-NEXT: Base Register: FP (0x0) # CHECK-NEXT: CFA Offset: 16 # CHECK-NEXT: RA Offset: -8 - .long 0 - .cfi_offset 7, 32 - # sp not the cfa but with large offset still changes encoding. - .cfi_offset 6, 0x7FF8 + .long 0 + .cfi_offset 7, 32 + # sp not the cfa but with large offset still changes encoding. + .cfi_offset 6, 0x7FF8 # CHECK: Frame Row Entry { # CHECK-NEXT: Start Address: 0x2C # CHECK-NEXT: Return Address Signed: No @@ -109,5 +109,75 @@ fde4_fre_offset_sizes: # CHECK-NEXT: CFA Offset: 16 # CHECK-NEXT: RA Offset: -8 # CHECK-NEXT: FP Offset: 32760 - .long 0 + .long 0 + .cfi_endproc + + .align 1024 +restore_reg: +# CHECK: FuncDescEntry [1] { +# CHECK: Start FRE Offset: 0x23 +# CHECK-NEXT: Num FREs: 3 + .cfi_startproc +# CHECK: Frame Row Entry { +# CHECK-NEXT: Start Address: 0x400 +# CHECK-NOT FP Offset{{.*}} +# CHECK: } + .long 0 + .cfi_offset 6, 32 +# CHECK Frame Row Entry { +# CHECK-NEXT Start Address: 0x404 +# CHECK: FP Offset: 32 + .long 0 + .cfi_restore 6 +# CHECK: Frame Row Entry { +# CHECK-NEXT: Start Address: 0x408 +# CHECK-NOT FP Offset{{.*}} +# CHECK: } + .long 0 + .cfi_endproc + + .align 1024 +remember_restore_state: +# CHECK: FuncDescEntry [2] { +# CHECK: Start FRE Offset: 0x2D +# CHECK-NEXT: Num FREs: 4 + .cfi_startproc +# CHECK: Frame Row Entry { +# CHECK-NEXT: Start Address: 0x800 +# CHECK-NOT FP Offset{{.*}} +# CHECK: } + .long 0 + .cfi_offset 6, 8 + .cfi_offset 7, 16 + .cfi_offset 8, 24 +# CHECK: Frame Row Entry { +# CHECK-NEXT: Start Address: 0x804 +# CHECK: Base Register: SP (0x1) +# CHECK-NEXT: CFA Offset: 8 +# CHECK-NEXT: RA Offset: -8 +# CHECK-NEXT: FP Offset: 8 +# CHECK-NEXT: } + .long 0 + .cfi_remember_state +# CHECK: Frame Row Entry { +# CHECK-NEXT: Start Address: 0x808 +# CHECK: Base Register: SP (0x1) +# CHECK-NEXT: CFA Offset: 8 +# CHECK-NEXT: RA Offset: -8 +# CHECK-NEXT: FP Offset: 32 +# CHECK-NEXT: } + .cfi_offset 6, 32 + .cfi_offset 7, 40 + .cfi_offset 8, 48 + .long 0 +# CHECK: Frame Row Entry { +# CHECK-NEXT: Start Address: 0x80C +# CHECK: Base Register: SP (0x1) +# CHECK-NEXT: CFA Offset: 8 +# CHECK-NEXT: RA Offset: -8 +# CHECK-NEXT: FP Offset: 8 +# CHECK-NEXT: } + .cfi_restore_state + .long 0 + .cfi_endproc diff --git a/llvm/test/MC/LoongArch/Macros/macros-la.s b/llvm/test/MC/LoongArch/Macros/macros-la.s index a732988ef1f1a..8022d5b038880 100644 --- a/llvm/test/MC/LoongArch/Macros/macros-la.s +++ b/llvm/test/MC/LoongArch/Macros/macros-la.s @@ -26,6 +26,7 @@ la.abs $a0, sym_abs # ABS-NEXT: lu32i.d $a0, %abs64_lo20(sym_abs) # ABS-NEXT: lu52i.d $a0, $a0, %abs64_hi12(sym_abs) # ABS-EMPTY: +# RELOC-NEXT: R_LARCH_MARK_LA - 0x0 # RELOC-NEXT: R_LARCH_ABS_HI20 sym_abs 0x0 # RELOC-NEXT: R_LARCH_ABS_LO12 sym_abs 0x0 # RELOC-NEXT: R_LARCH_ABS64_LO20 sym_abs 0x0 diff --git a/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s b/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s index 6ae7bd785773d..29fedd7c20646 100644 --- a/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s +++ b/llvm/test/MC/PowerPC/ppc-encoding-ISAFuture.s @@ -330,3 +330,145 @@ vucmprhh 1, 3, 6 #CHECK-BE: vucmprhh 1, 3, 6 # encoding: [0x10,0x23,0x31,0x03] #CHECK-LE: vucmprhh 1, 3, 6 # encoding: [0x03,0x31,0x23,0x10] + + xxaes192encp 8, 10, 14 +#CHECK-BE: xxaes192encp 8, 10, 14 # encoding: [0xf1,0x0b,0x76,0x10] +#CHECK-LE: xxaes192encp 8, 10, 14 # encoding: [0x10,0x76,0x0b,0xf1] + + xxaes256decp 14, 10, 6 +#CHECK-BE: xxaes256decp 14, 10, 6 # encoding: [0xf1,0xca,0x3e,0x50] +#CHECK-LE: xxaes256decp 14, 10, 6 # encoding: [0x50,0x3e,0xca,0xf1] + + xxaes128genlkp 4, 8 +#CHECK-BE: xxaes128genlkp 4, 8 # encoding: [0xf0,0x80,0x46,0x90] +#CHECK-LE: xxaes128genlkp 4, 8 # encoding: [0x90,0x46,0x80,0xf0] + + xxgfmul128gcm 7, 5, 4 +#CHECK-BE: xxgfmul128gcm 7, 5, 4 # encoding: [0xf0,0xe5,0x26,0xd0] +#CHECK-LE: xxgfmul128gcm 7, 5, 4 # encoding: [0xd0,0x26,0xe5,0xf0] + + xvadduwm 4, 5, 7 +#CHECK-BE: xvadduwm 4, 5, 7 # encoding: [0xf0,0x85,0x3c,0x18] +#CHECK-LE: xvadduwm 4, 5, 7 # encoding: [0x18,0x3c,0x85,0xf0] + + xvadduhm 4, 5, 7 +#CHECK-BE: xvadduhm 4, 5, 7 # encoding: [0xf0,0x85,0x3c,0x58] +#CHECK-LE: xvadduhm 4, 5, 7 # encoding: [0x58,0x3c,0x85,0xf0] + + xvsubuwm 4, 5, 7 +#CHECK-BE: xvsubuwm 4, 5, 7 # encoding: [0xf0,0x85,0x3c,0x98] +#CHECK-LE: xvsubuwm 4, 5, 7 # encoding: [0x98,0x3c,0x85,0xf0] + + xvsubuhm 4, 5, 7 +#CHECK-BE: xvsubuhm 4, 5, 7 # encoding: [0xf0,0x85,0x3c,0xd8] +#CHECK-LE: xvsubuhm 4, 5, 7 # encoding: [0xd8,0x3c,0x85,0xf0] + + xvmuluwm 4, 5, 7 +#CHECK-BE: xvmuluwm 4, 5, 7 # encoding: [0xf0,0x85,0x3d,0x18] +#CHECK-LE: xvmuluwm 4, 5, 7 # encoding: [0x18,0x3d,0x85,0xf0] + + xvmuluhm 4, 5, 7 +#CHECK-BE: xvmuluhm 4, 5, 7 # encoding: [0xf0,0x85,0x3d,0x58] +#CHECK-LE: xvmuluhm 4, 5, 7 # encoding: [0x58,0x3d,0x85,0xf0] + + xvmulhsw 4, 5, 7 +#CHECK-BE: xvmulhsw 4, 5, 7 # encoding: [0xf0,0x85,0x3d,0x98] +#CHECK-LE: xvmulhsw 4, 5, 7 # encoding: [0x98,0x3d,0x85,0xf0] + + xvmulhsh 4, 5, 7 +#CHECK-BE: xvmulhsh 4, 5, 7 # encoding: [0xf0,0x85,0x3d,0xd8] +#CHECK-LE: xvmulhsh 4, 5, 7 # encoding: [0xd8,0x3d,0x85,0xf0] + + xvmulhuw 4, 5, 7 +#CHECK-BE: xvmulhuw 4, 5, 7 # encoding: [0xf0,0x85,0x3b,0x90] +#CHECK-LE: xvmulhuw 4, 5, 7 # encoding: [0x90,0x3b,0x85,0xf0] + + xvmulhuh 4, 5, 7 +#CHECK-BE: xvmulhuh 4, 5, 7 # encoding: [0xf0,0x85,0x3b,0xd0] +#CHECK-LE: xvmulhuh 4, 5, 7 # encoding: [0xd0,0x3b,0x85,0xf0] + + xxmulmul 8, 3, 4, 2 +#CHECK-BE: xxmulmul 8, 3, 4, 2 # encoding: [0xed,0x03,0x22,0x08] +#CHECK-LE: xxmulmul 8, 3, 4, 2 # encoding: [0x08,0x22,0x03,0xed] + + xxmulmulhiadd 8, 3, 4, 1, 0, 1 +#CHECK-BE: xxmulmulhiadd 8, 3, 4, 1, 0, 1 # encoding: [0xed,0x03,0x25,0x48] +#CHECK-LE: xxmulmulhiadd 8, 3, 4, 1, 0, 1 # encoding: [0x48,0x25,0x03,0xed] + + xxmulmulloadd 8, 3, 4, 1, 0 +#CHECK-BE: xxmulmulloadd 8, 3, 4, 1, 0 # encoding: [0xed,0x03,0x22,0x88] +#CHECK-LE: xxmulmulloadd 8, 3, 4, 1, 0 # encoding: [0x88,0x22,0x03,0xed] + + xxssumudm 8, 3, 4, 1 +#CHECK-BE: xxssumudm 8, 3, 4, 1 # encoding: [0xed,0x03,0x24,0xc8] +#CHECK-LE: xxssumudm 8, 3, 4, 1 # encoding: [0xc8,0x24,0x03,0xed] + + xxssumudmc 8, 3, 4, 1 +#CHECK-BE: xxssumudmc 8, 3, 4, 1 # encoding: [0xed,0x03,0x25,0xc8] +#CHECK-LE: xxssumudmc 8, 3, 4, 1 # encoding: [0xc8,0x25,0x03,0xed] + + xxssumudmcext 8, 3, 4, 6, 0 +# CHECK-BE: xxssumudmcext 8, 3, 4, 6, 0 # encoding: [0x05,0x00,0x00,0x00, +# CHECK-BE-SAME: 0x89,0x03,0x21,0xa0] +# CHECK-LE: xxssumudmcext 8, 3, 4, 6, 0 # encoding: [0x00,0x00,0x00,0x05, +# CHECK-LE-SAME: 0xa0,0x21,0x03,0x89] + + xsaddadduqm 4, 5, 7 +#CHECK-BE: xsaddadduqm 4, 5, 7 # encoding: [0xec,0x85,0x3b,0x00] +#CHECK-LE: xsaddadduqm 4, 5, 7 # encoding: [0x00,0x3b,0x85,0xec] + + xsaddaddsuqm 4, 5, 7 +#CHECK-BE: xsaddaddsuqm 4, 5, 7 # encoding: [0xec,0x85,0x3b,0x40] +#CHECK-LE: xsaddaddsuqm 4, 5, 7 # encoding: [0x40,0x3b,0x85,0xec] + + xsaddsubuqm 4, 5, 7 +#CHECK-BE: xsaddsubuqm 4, 5, 7 # encoding: [0xec,0x85,0x3b,0x80] +#CHECK-LE: xsaddsubuqm 4, 5, 7 # encoding: [0x80,0x3b,0x85,0xec] + + xsaddsubsuqm 4, 5, 7 +#CHECK-BE: xsaddsubsuqm 4, 5, 7 # encoding: [0xec,0x85,0x3f,0x00] +#CHECK-LE: xsaddsubsuqm 4, 5, 7 # encoding: [0x00,0x3f,0x85,0xec] + + xsrebase2t1uqm 4, 5, 7 +#CHECK-BE: xsrebase2t1uqm 4, 5, 7 # encoding: [0xec,0x85,0x3c,0x88] +#CHECK-LE: xsrebase2t1uqm 4, 5, 7 # encoding: [0x88,0x3c,0x85,0xec] + + xsrebase2t2uqm 4, 5, 7 +#CHECK-BE: xsrebase2t2uqm 4, 5, 7 # encoding: [0xec,0x85,0x3d,0x88] +#CHECK-LE: xsrebase2t2uqm 4, 5, 7 # encoding: [0x88,0x3d,0x85,0xec] + + xsrebase2t3uqm 4, 5, 7 +#CHECK-BE: xsrebase2t3uqm 4, 5, 7 # encoding: [0xec,0x85,0x3e,0x88] +#CHECK-LE: xsrebase2t3uqm 4, 5, 7 # encoding: [0x88,0x3e,0x85,0xec] + + xsrebase2t4uqm 4, 5, 7 +#CHECK-BE: xsrebase2t4uqm 4, 5, 7 # encoding: [0xec,0x85,0x3e,0xc8] +#CHECK-LE: xsrebase2t4uqm 4, 5, 7 # encoding: [0xc8,0x3e,0x85,0xec] + + xsrebase3t1uqm 4, 5, 7 +#CHECK-BE: xsrebase3t1uqm 4, 5, 7 # encoding: [0xec,0x85,0x3f,0x88] +#CHECK-LE: xsrebase3t1uqm 4, 5, 7 # encoding: [0x88,0x3f,0x85,0xec] + + xsrebase3t2uqm 4, 5, 7 +#CHECK-BE: xsrebase3t2uqm 4, 5, 7 # encoding: [0xec,0x85,0x3f,0xc8] +#CHECK-LE: xsrebase3t2uqm 4, 5, 7 # encoding: [0xc8,0x3f,0x85,0xec] + + xsrebase3t3uqm 4, 5, 7 +#CHECK-BE: xsrebase3t3uqm 4, 5, 7 # encoding: [0xec,0x85,0x3e,0x18] +#CHECK-LE: xsrebase3t3uqm 4, 5, 7 # encoding: [0x18,0x3e,0x85,0xec] + + xsmerge2t1uqm 4, 5, 7 +#CHECK-BE: xsmerge2t1uqm 4, 5, 7 # encoding: [0xec,0x85,0x3f,0x40] +#CHECK-LE: xsmerge2t1uqm 4, 5, 7 # encoding: [0x40,0x3f,0x85,0xec] + + xsmerge2t2uqm 4, 5, 7 +#CHECK-BE: xsmerge2t2uqm 4, 5, 7 # encoding: [0xec,0x85,0x3f,0x80] +#CHECK-LE: xsmerge2t2uqm 4, 5, 7 # encoding: [0x80,0x3f,0x85,0xec] + + xsmerge2t3uqm 4, 5, 7 +#CHECK-BE: xsmerge2t3uqm 4, 5, 7 # encoding: [0xec,0x85,0x3a,0xc8] +#CHECK-LE: xsmerge2t3uqm 4, 5, 7 # encoding: [0xc8,0x3a,0x85,0xec] + + xsmerge3t1uqm 4, 5, 7 +#CHECK-BE: xsmerge3t1uqm 4, 5, 7 # encoding: [0xec,0x85,0x3b,0xc8] +#CHECK-LE: xsmerge3t1uqm 4, 5, 7 # encoding: [0xc8,0x3b,0x85,0xec] diff --git a/llvm/test/MC/X86/encoder-fail.s b/llvm/test/MC/X86/encoder-fail.s index a8b9f48c8fb70..f5718e14d138f 100644 --- a/llvm/test/MC/X86/encoder-fail.s +++ b/llvm/test/MC/X86/encoder-fail.s @@ -1,16 +1,38 @@ // RUN: not llvm-mc -triple x86_64-unknown-unknown --show-encoding %s 2>&1 | FileCheck %s +// RUN: not llvm-mc -triple x86_64-unknown-unknown --show-encoding -x86-asm-syntax=intel %s 2>&1 | FileCheck %s --check-prefix=CHECK-INTEL -// CHECK: error: can't encode 'dh' in an instruction requiring REX prefix +// CHECK: error: can't encode 'dh' in an instruction requiring EVEX/REX2/REX prefix movzx %dh, %rsi -// CHECK: error: can't encode 'ah' in an instruction requiring REX prefix +// CHECK: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix movzx %ah, %r8d -// CHECK: error: can't encode 'bh' in an instruction requiring REX prefix +// CHECK: error: can't encode 'bh' in an instruction requiring EVEX/REX2/REX prefix add %bh, %sil -// CHECK: error: can't encode 'ch' in an instruction requiring REX prefix +// CHECK: error: can't encode 'ch' in an instruction requiring EVEX/REX2/REX prefix mov %ch, (%r8) -// CHECK: error: can't encode 'dh' in an instruction requiring REX prefix +// CHECK: error: can't encode 'dh' in an instruction requiring EVEX/REX2/REX prefix mov %dh, (%rax,%r8) + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +add ah, ah, ah + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +and ah, byte ptr [-13426159], ah + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +ccmpa {dfv=of,cf} byte ptr [r8 + 4*rax + 291], ah + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +ccmpae {dfv=of,cf} byte ptr [r8 + 4*rax + 291], ah + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +sar ah, byte ptr [-13426159] + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +{rex2} add ah, al + +// CHECK-INTEL: error: can't encode 'ah' in an instruction requiring EVEX/REX2/REX prefix +{rex} add ah, al diff --git a/llvm/test/MachineVerifier/test_g_build_vector.mir b/llvm/test/MachineVerifier/test_g_build_vector.mir index 50b98017a49a7..9857306737108 100644 --- a/llvm/test/MachineVerifier/test_g_build_vector.mir +++ b/llvm/test/MachineVerifier/test_g_build_vector.mir @@ -16,17 +16,17 @@ body: | ; CHECK: Bad machine code: G_BUILD_VECTOR must produce a vector from scalar operands %3:_(<2 x s32>) = G_BUILD_VECTOR %2 - ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each elemement + ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each element %4:_(<2 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0 ; CHECK: Bad machine code: G_BUILD_VECTOR result element type must match source type - ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each elemement + ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each element %5:_(<4 x s16>) = G_BUILD_VECTOR %0, %0 %6:_(s16) = IMPLICIT_DEF ; CHECK: Bad machine code: G_BUILD_VECTOR result element type must match source type - ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each elemement + ; CHECK: Bad machine code: G_BUILD_VECTOR must have an operand for each element %7:_(<2 x s32>) = G_BUILD_VECTOR %6, %6, %6, %6 %8:_(p0) = IMPLICIT_DEF diff --git a/llvm/test/ObjectYAML/DXContainer/RootSignature-StaticSamplers1.3.yaml b/llvm/test/ObjectYAML/DXContainer/RootSignature-StaticSamplers1.3.yaml new file mode 100644 index 0000000000000..1623b05def009 --- /dev/null +++ b/llvm/test/ObjectYAML/DXContainer/RootSignature-StaticSamplers1.3.yaml @@ -0,0 +1,65 @@ +# RUN: yaml2obj %s | obj2yaml | FileCheck %s + +--- !dxcontainer +Header: + Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ] + Version: + Major: 1 + Minor: 0 + PartCount: 1 + PartOffsets: [ 60 ] +Parts: + - Name: RTS0 + Size: 80 + RootSignature: + Version: 3 + NumRootParameters: 0 + RootParametersOffset: 24 + NumStaticSamplers: 1 + StaticSamplersOffset: 24 + Parameters: [] + Samplers: + - Filter: MinLinearMagMipPoint + AddressU: Wrap + AddressV: Mirror + AddressW: MirrorOnce + MipLODBias: 1.23 + MaxAnisotropy: 20 + ComparisonFunc: LessEqual + BorderColor: TransparentBlack + MinLOD: 4.56 + MaxLOD: 8.90 + ShaderRegister: 31 + RegisterSpace: 32 + ShaderVisibility: Mesh + SAMPLER_FLAG_UINT_BORDER_COLOR: true + AllowInputAssemblerInputLayout: true + DenyGeometryShaderRootAccess: true + +#CHECK: - Name: RTS0 +#CHECK-NEXT: Size: 80 +#CHECK-NEXT: RootSignature: +#CHECK-NEXT: Version: 3 +#CHECK-NEXT: NumRootParameters: 0 +#CHECK-NEXT: RootParametersOffset: 24 +#CHECK-NEXT: NumStaticSamplers: 1 +#CHECK-NEXT: StaticSamplersOffset: 24 +#CHECK-NEXT: Parameters: [] +#CHECK-NEXT: Samplers: +#CHECK-NEXT: - Filter: MinLinearMagMipPoint +#CHECK-NEXT: AddressU: Wrap +#CHECK-NEXT: AddressV: Mirror +#CHECK-NEXT: AddressW: MirrorOnce +#CHECK-NEXT: MipLODBias: 1.23 +#CHECK-NEXT: MaxAnisotropy: 20 +#CHECK-NEXT: ComparisonFunc: LessEqual +#CHECK-NEXT: BorderColor: TransparentBlack +#CHECK-NEXT: MinLOD: 4.56 +#CHECK-NEXT: MaxLOD: 8.9 +#CHECK-NEXT: ShaderRegister: 31 +#CHECK-NEXT: RegisterSpace: 32 +#CHECK-NEXT: ShaderVisibility: Mesh +#CHECK-NEXT: SAMPLER_FLAG_UINT_BORDER_COLOR: true +#CHECK-NEXT: AllowInputAssemblerInputLayout: true +#CHECK-NEXT: DenyGeometryShaderRootAccess: true diff --git a/llvm/test/Other/new-pm-O0-defaults.ll b/llvm/test/Other/new-pm-O0-defaults.ll index 81d1ee0df2c5b..278a89261691a 100644 --- a/llvm/test/Other/new-pm-O0-defaults.ll +++ b/llvm/test/Other/new-pm-O0-defaults.ll @@ -44,6 +44,7 @@ ; CHECK-PRE-LINK: Running pass: CanonicalizeAliasesPass ; CHECK-PRE-LINK-NEXT: Running pass: NameAnonGlobalPass ; CHECK-THINLTO: Running pass: LowerTypeTestsPass +; CHECK-THINLTO-NEXT: Running pass: CoroConditionalWrapper ; CHECK-THINLTO-NEXT: Running pass: EliminateAvailableExternallyPass ; CHECK-THINLTO-NEXT: Running pass: GlobalDCEPass ; CHECK-LTO: Running pass: CrossDSOCFIPass on [module] diff --git a/llvm/test/TableGen/CPtrWildcard.td b/llvm/test/TableGen/CPtrWildcard.td new file mode 100644 index 0000000000000..230a6730c610a --- /dev/null +++ b/llvm/test/TableGen/CPtrWildcard.td @@ -0,0 +1,74 @@ +// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include %s -o - | FileCheck %s + +// Create an intrinsic that uses cPTR to overload on capability pointer types, +// and verify that we can match it correct in SelectionDAG. + +// CHECK: static const unsigned char MatcherTable[] = { +// CHECK-NEXT: /* 0*/ OPC_CheckOpcode, TARGET_VAL(ISD::INTRINSIC_WO_CHAIN), +// CHECK-NEXT:/* 3*/ OPC_CheckChild0Integer, [[#]], +// CHECK-NEXT:/* 5*/ OPC_RecordChild1, // #0 = $src +// CHECK-NEXT:/* 6*/ OPC_Scope, 9, /*->17*/ // 2 children in Scope +// CHECK-NEXT:/* 8*/ OPC_CheckChild1Type, /*MVT::c64*/126|128,1/*254*/, +// CHECK-NEXT:/* 11*/ OPC_MorphNodeTo1None, TARGET_VAL(MyTarget::C64_TO_I64), +// CHECK-NEXT: /*MVT::i64*/8, 1/*#Ops*/, 0, +// CHECK-NEXT: // Src: (intrinsic_wo_chain:{ *:[i64] } [[#]]:{ *:[iPTR] }, c64:{ *:[c64] }:$src) - Complexity = 8 +// CHECK-NEXT: // Dst: (C64_TO_I64:{ *:[i64] } ?:{ *:[c64] }:$src) +// CHECK-NEXT:/* 17*/ /*Scope*/ 9, /*->27*/ +// CHECK-NEXT:/* 18*/ OPC_CheckChild1Type, /*MVT::c128*/127|128,1/*255*/, +// CHECK-NEXT:/* 21*/ OPC_MorphNodeTo1None, TARGET_VAL(MyTarget::C128_TO_I64), +// CHECK-NEXT: /*MVT::i64*/8, 1/*#Ops*/, 0, +// CHECK-NEXT: // Src: (intrinsic_wo_chain:{ *:[i64] } [[#]]:{ *:[iPTR] }, c128:{ *:[c128] }:$src) - Complexity = 8 +// CHECK-NEXT: // Dst: (C128_TO_I64:{ *:[i64] } ?:{ *:[c128] }:$src) +// CHECK-NEXT:/* 27*/ 0, /*End of Scope*/ +// CHECK-NEXT: 0 +// CHECK-NEXT: }; // Total Array size is 29 bytes + +include "llvm/Target/Target.td" + +def my_cap_ty : LLVMQualPointerType<200> { + let VT = cPTR; +} + +def int_cap_get_length : + Intrinsic<[llvm_i64_ty], + [my_cap_ty], + [IntrNoMem, IntrWillReturn]>; + +class CapReg : Register { + let Namespace = "MyTarget"; +} + +def C64 : CapReg<"c0">; +def C64s + : RegisterClass<"MyTarget", [i64, c64], 64, + (add C64)>; + +def C128 : CapReg<"c0">; +def C128s + : RegisterClass<"MyTarget", [c128], 64, + (add C128)>; + +def C64_TO_I64 : Instruction { + let Namespace = "MyTarget"; + let OutOperandList = (outs C64s:$dst); + let InOperandList = (ins C64s:$src); +} + +def C128_TO_I64 : Instruction { + let Namespace = "MyTarget"; + let OutOperandList = (outs C64s:$dst); + let InOperandList = (ins C128s:$src); +} + +def : Pat< + (int_cap_get_length c64:$src), + (C64_TO_I64 $src) +>; + +def : Pat< + (int_cap_get_length c128:$src), + (C128_TO_I64 $src) +>; + +def MyTargetISA : InstrInfo; +def MyTarget : Target { let InstructionSet = MyTargetISA; } diff --git a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td index ce4f0108b4843..18960b43ab97d 100644 --- a/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td +++ b/llvm/test/TableGen/GlobalISelCombinerEmitter/match-table-cxx.td @@ -96,71 +96,71 @@ def MyCombiner: GICombiner<"GenMyCombiner", [ // CHECK: const uint8_t *GenMyCombiner::getMatchTable() const { // CHECK-NEXT: constexpr static uint8_t MatchTable0[] = { -// CHECK-NEXT: /* 0 */ GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(99), GIMT_Encode2(210), /*)*//*default:*//*Label 5*/ GIMT_Encode4(520), -// CHECK-NEXT: /* 10 */ /*TargetOpcode::G_STORE*//*Label 0*/ GIMT_Encode4(454), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), -// CHECK-NEXT: /* 182 */ /*TargetOpcode::G_SEXT*//*Label 1*/ GIMT_Encode4(472), GIMT_Encode4(0), -// CHECK-NEXT: /* 190 */ /*TargetOpcode::G_ZEXT*//*Label 2*/ GIMT_Encode4(484), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), -// CHECK-NEXT: /* 414 */ /*TargetOpcode::G_FNEG*//*Label 3*/ GIMT_Encode4(496), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), -// CHECK-NEXT: /* 450 */ /*TargetOpcode::G_FABS*//*Label 4*/ GIMT_Encode4(508), -// CHECK-NEXT: /* 454 */ // Label 0: @454 -// CHECK-NEXT: /* 454 */ GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(471), // Rule ID 2 // -// CHECK-NEXT: /* 459 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled), -// CHECK-NEXT: /* 462 */ // MIs[0] x -// CHECK-NEXT: /* 462 */ // No operand predicates -// CHECK-NEXT: /* 462 */ // MIs[0] y -// CHECK-NEXT: /* 462 */ // No operand predicates -// CHECK-NEXT: /* 462 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner0), -// CHECK-NEXT: /* 466 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner1), -// CHECK-NEXT: /* 470 */ // Combiner Rule #2: TwoMatchNoApply -// CHECK-NEXT: /* 470 */ GIR_EraseRootFromParent_Done, -// CHECK-NEXT: /* 471 */ // Label 6: @471 -// CHECK-NEXT: /* 471 */ GIM_Reject, -// CHECK-NEXT: /* 472 */ // Label 1: @472 -// CHECK-NEXT: /* 472 */ GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(483), // Rule ID 3 // -// CHECK-NEXT: /* 477 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled), -// CHECK-NEXT: /* 480 */ // MIs[0] a -// CHECK-NEXT: /* 480 */ // No operand predicates -// CHECK-NEXT: /* 480 */ // MIs[0] y -// CHECK-NEXT: /* 480 */ // No operand predicates -// CHECK-NEXT: /* 480 */ // Combiner Rule #3: NoMatchTwoApply -// CHECK-NEXT: /* 480 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner2), -// CHECK-NEXT: /* 483 */ // Label 7: @483 -// CHECK-NEXT: /* 483 */ GIM_Reject, -// CHECK-NEXT: /* 484 */ // Label 2: @484 -// CHECK-NEXT: /* 484 */ GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(495), // Rule ID 4 // -// CHECK-NEXT: /* 489 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule4Enabled), -// CHECK-NEXT: /* 492 */ // MIs[0] a -// CHECK-NEXT: /* 492 */ // No operand predicates -// CHECK-NEXT: /* 492 */ // MIs[0] y -// CHECK-NEXT: /* 492 */ // No operand predicates -// CHECK-NEXT: /* 492 */ // Combiner Rule #4: CombineCXXOrder -// CHECK-NEXT: /* 492 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner3), -// CHECK-NEXT: /* 495 */ // Label 8: @495 -// CHECK-NEXT: /* 495 */ GIM_Reject, -// CHECK-NEXT: /* 496 */ // Label 3: @496 -// CHECK-NEXT: /* 496 */ GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(507), // Rule ID 1 // -// CHECK-NEXT: /* 501 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled), -// CHECK-NEXT: /* 504 */ // MIs[0] a -// CHECK-NEXT: /* 504 */ // No operand predicates -// CHECK-NEXT: /* 504 */ // MIs[0] b -// CHECK-NEXT: /* 504 */ // No operand predicates -// CHECK-NEXT: /* 504 */ // Combiner Rule #1: TwoMatchTwoApply -// CHECK-NEXT: /* 504 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner1), -// CHECK-NEXT: /* 507 */ // Label 9: @507 -// CHECK-NEXT: /* 507 */ GIM_Reject, -// CHECK-NEXT: /* 508 */ // Label 4: @508 -// CHECK-NEXT: /* 508 */ GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(519), // Rule ID 0 // -// CHECK-NEXT: /* 513 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled), -// CHECK-NEXT: /* 516 */ // MIs[0] a -// CHECK-NEXT: /* 516 */ // No operand predicates -// CHECK-NEXT: /* 516 */ // MIs[0] b -// CHECK-NEXT: /* 516 */ // No operand predicates -// CHECK-NEXT: /* 516 */ // Combiner Rule #0: OneMatchOneApply -// CHECK-NEXT: /* 516 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0), -// CHECK-NEXT: /* 519 */ // Label 10: @519 -// CHECK-NEXT: /* 519 */ GIM_Reject, -// CHECK-NEXT: /* 520 */ // Label 5: @520 -// CHECK-NEXT: /* 520 */ GIM_Reject, -// CHECK-NEXT: /* 521 */ }; // Size: 521 bytes +// CHECK-NEXT: /* 0 */ GIM_SwitchOpcode, /*MI*/0, /*[*/GIMT_Encode2(99), GIMT_Encode2(211), /*)*//*default:*//*Label 5*/ GIMT_Encode4(524), +// CHECK-NEXT: /* 10 */ /*TargetOpcode::G_STORE*//*Label 0*/ GIMT_Encode4(458), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), +// CHECK-NEXT: /* 182 */ /*TargetOpcode::G_SEXT*//*Label 1*/ GIMT_Encode4(476), GIMT_Encode4(0), +// CHECK-NEXT: /* 190 */ /*TargetOpcode::G_ZEXT*//*Label 2*/ GIMT_Encode4(488), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), +// CHECK-NEXT: /* 418 */ /*TargetOpcode::G_FNEG*//*Label 3*/ GIMT_Encode4(500), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), GIMT_Encode4(0), +// CHECK-NEXT: /* 454 */ /*TargetOpcode::G_FABS*//*Label 4*/ GIMT_Encode4(512), +// CHECK-NEXT: /* 458 */ // Label 0: @458 +// CHECK-NEXT: /* 458 */ GIM_Try, /*On fail goto*//*Label 6*/ GIMT_Encode4(475), // Rule ID 2 // +// CHECK-NEXT: /* 463 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule2Enabled), +// CHECK-NEXT: /* 466 */ // MIs[0] x +// CHECK-NEXT: /* 466 */ // No operand predicates +// CHECK-NEXT: /* 466 */ // MIs[0] y +// CHECK-NEXT: /* 466 */ // No operand predicates +// CHECK-NEXT: /* 466 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner0), +// CHECK-NEXT: /* 470 */ GIM_CheckCxxInsnPredicate, /*MI*/0, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_GICombiner1), +// CHECK-NEXT: /* 474 */ // Combiner Rule #2: TwoMatchNoApply +// CHECK-NEXT: /* 474 */ GIR_EraseRootFromParent_Done, +// CHECK-NEXT: /* 475 */ // Label 6: @475 +// CHECK-NEXT: /* 475 */ GIM_Reject, +// CHECK-NEXT: /* 476 */ // Label 1: @476 +// CHECK-NEXT: /* 476 */ GIM_Try, /*On fail goto*//*Label 7*/ GIMT_Encode4(487), // Rule ID 3 // +// CHECK-NEXT: /* 481 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule3Enabled), +// CHECK-NEXT: /* 484 */ // MIs[0] a +// CHECK-NEXT: /* 484 */ // No operand predicates +// CHECK-NEXT: /* 484 */ // MIs[0] y +// CHECK-NEXT: /* 484 */ // No operand predicates +// CHECK-NEXT: /* 484 */ // Combiner Rule #3: NoMatchTwoApply +// CHECK-NEXT: /* 484 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner2), +// CHECK-NEXT: /* 487 */ // Label 7: @487 +// CHECK-NEXT: /* 487 */ GIM_Reject, +// CHECK-NEXT: /* 488 */ // Label 2: @488 +// CHECK-NEXT: /* 488 */ GIM_Try, /*On fail goto*//*Label 8*/ GIMT_Encode4(499), // Rule ID 4 // +// CHECK-NEXT: /* 493 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule4Enabled), +// CHECK-NEXT: /* 496 */ // MIs[0] a +// CHECK-NEXT: /* 496 */ // No operand predicates +// CHECK-NEXT: /* 496 */ // MIs[0] y +// CHECK-NEXT: /* 496 */ // No operand predicates +// CHECK-NEXT: /* 496 */ // Combiner Rule #4: CombineCXXOrder +// CHECK-NEXT: /* 496 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner3), +// CHECK-NEXT: /* 499 */ // Label 8: @499 +// CHECK-NEXT: /* 499 */ GIM_Reject, +// CHECK-NEXT: /* 500 */ // Label 3: @500 +// CHECK-NEXT: /* 500 */ GIM_Try, /*On fail goto*//*Label 9*/ GIMT_Encode4(511), // Rule ID 1 // +// CHECK-NEXT: /* 505 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule1Enabled), +// CHECK-NEXT: /* 508 */ // MIs[0] a +// CHECK-NEXT: /* 508 */ // No operand predicates +// CHECK-NEXT: /* 508 */ // MIs[0] b +// CHECK-NEXT: /* 508 */ // No operand predicates +// CHECK-NEXT: /* 508 */ // Combiner Rule #1: TwoMatchTwoApply +// CHECK-NEXT: /* 508 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner1), +// CHECK-NEXT: /* 511 */ // Label 9: @511 +// CHECK-NEXT: /* 511 */ GIM_Reject, +// CHECK-NEXT: /* 512 */ // Label 4: @512 +// CHECK-NEXT: /* 512 */ GIM_Try, /*On fail goto*//*Label 10*/ GIMT_Encode4(523), // Rule ID 0 // +// CHECK-NEXT: /* 517 */ GIM_CheckSimplePredicate, GIMT_Encode2(GICXXPred_Simple_IsRule0Enabled), +// CHECK-NEXT: /* 520 */ // MIs[0] a +// CHECK-NEXT: /* 520 */ // No operand predicates +// CHECK-NEXT: /* 520 */ // MIs[0] b +// CHECK-NEXT: /* 520 */ // No operand predicates +// CHECK-NEXT: /* 520 */ // Combiner Rule #0: OneMatchOneApply +// CHECK-NEXT: /* 520 */ GIR_DoneWithCustomAction, /*Fn*/GIMT_Encode2(GICXXCustomAction_GICombiner0), +// CHECK-NEXT: /* 523 */ // Label 10: @523 +// CHECK-NEXT: /* 523 */ GIM_Reject, +// CHECK-NEXT: /* 524 */ // Label 5: @524 +// CHECK-NEXT: /* 524 */ GIM_Reject, +// CHECK-NEXT: /* 525 */ }; // Size: 525 bytes // CHECK-NEXT: return MatchTable0; // CHECK-NEXT: } diff --git a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td index 7a86b5b726a82..fdabc53a3ff3b 100644 --- a/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td +++ b/llvm/test/TableGen/GlobalISelEmitter/GlobalISelEmitter.td @@ -535,7 +535,7 @@ def : Pat<(frag GPR32:$src1, complex:$src2, complex:$src3), // R00O-NEXT: GIM_Reject, // R00O: // Label [[DEFAULT_NUM]]: @[[DEFAULT]] // R00O-NEXT: GIM_Reject, -// R00O-NEXT: }; // Size: 1894 bytes +// R00O-NEXT: }; // Size: 1902 bytes def INSNBOB : I<(outs GPR32:$dst), (ins GPR32:$src1, GPR32:$src2, GPR32:$src3, GPR32:$src4), [(set GPR32:$dst, diff --git a/llvm/test/TableGen/RegClassByHwMode.td b/llvm/test/TableGen/RegClassByHwMode.td index 5d813d2bfc83a..ca72cfbd403bf 100644 --- a/llvm/test/TableGen/RegClassByHwMode.td +++ b/llvm/test/TableGen/RegClassByHwMode.td @@ -50,7 +50,7 @@ include "llvm/Target/Target.td" // INSTRINFO-NEXT: }; // INSTRINFO: static inline void InitMyTargetMCInstrInfo( -// INSTRINFO-NEXT: II->InitMCInstrInfo(MyTargetDescs.Insts, MyTargetInstrNameIndices, MyTargetInstrNameData, nullptr, nullptr, 321, &MyTargetRegClassByHwModeTables[0][0], 3); +// INSTRINFO-NEXT: II->InitMCInstrInfo(MyTargetDescs.Insts, MyTargetInstrNameIndices, MyTargetInstrNameData, nullptr, nullptr, {{[0-9]+}}, &MyTargetRegClassByHwModeTables[0][0], 3); diff --git a/llvm/test/TableGen/intrinsic-struct.td b/llvm/test/TableGen/intrinsic-struct.td index 467fd9057c183..032cdc10e74ed 100644 --- a/llvm/test/TableGen/intrinsic-struct.td +++ b/llvm/test/TableGen/intrinsic-struct.td @@ -1,22 +1,58 @@ // RUN: llvm-tblgen -gen-intrinsic-enums -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s --check-prefix=CHECK-ENUM -// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS > /dev/null 2>&1 +// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s --check-prefix=CHECK-IMPL // RUN: not llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS -DENABLE_ERROR 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR // XFAIL: vg_leak include "llvm/IR/Intrinsics.td" -// Make sure we can return up to 9 values. -// CHECK-ENUM: returns_9_results = {{[0-9]+}}, // llvm.returns.9.results -def int_returns_9_results : Intrinsic< - !listsplat(llvm_anyint_ty, 9), - [], [], "llvm.returns.9.results">; +// Make sure we can return up to 257 values. Intrinsics are in alphabetical order. +// CHECK-ENUM: returns_a0_results = {{[0-9]+}}, // llvm.returns.a0.results +// CHECK-ENUM: returns_b1_results, // llvm.returns.b1.results +// CHECK-ENUM: returns_c2_results, // llvm.returns.c2.results +// CHECK-ENUM: returns_d9_results, // llvm.returns.d9.results +// CHECK-ENUM: returns_e10_results, // llvm.returns.e10.results +// CHECK-ENUM: returns_f257_results, // llvm.returns.f257.results -#ifdef ENABLE_ERROR -// CHECK-ERROR: error: intrinsics can only return upto 9 values, 'int_returns_10_results' returns 10 values -// CHECK-ERROR-NEXT: def int_returns_10_results : Intrinsic< -def int_returns_10_results : Intrinsic< +// Make sure the encoding table is correctly generated. +// CHECK-IMPL: IIT_LongEncodingTable +// CHECK-IMPL-NEXT: 21, 255 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 15, 17, 15, 25, 15, 33, 15, 41, 15, 49, 15, 57, 15, 65, 15, 73, 15, 81, +// CHECK-IMPL-NEXT: 21, 0 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 0 +// CHECK-IMPL-NEXT: 21, 7 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 15, 17, 15, 25, 15, 33, 15, 41, 15, 49, 15, 57, 15, 65, 0 +// CHECK-IMPL-NEXT: 21, 8 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 15, 17, 15, 25, 15, 33, 15, 41, 15, 49, 15, 57, 15, 65, 15, 73, 0 +def int_returns_a0_results : Intrinsic< + [], + [], [], "llvm.returns.a0.results">; + +def int_returns_b1_results : Intrinsic< + [llvm_anyint_ty], + [], [], "llvm.returns.b1.results">; + +def int_returns_c2_results : Intrinsic< + !listsplat(llvm_anyint_ty, 2), + [], [], "llvm.returns.c2.results">; + +def int_returns_d9_results : Intrinsic< + !listsplat(llvm_anyint_ty, 9), + [], [], "llvm.returns.d9.results">; + +def int_returns_e10_results : Intrinsic< !listsplat(llvm_anyint_ty, 10), - [], [], "llvm.returns.10.results">; + [], [], "llvm.returns.e10.results">; + +def int_returns_f257_results : Intrinsic< + !listsplat(llvm_anyint_ty, 257), + [], [], "llvm.returns.f257.results">; + +#ifdef ENABLE_ERROR +// CHECK-ERROR: error: intrinsics can only return upto 257 values, 'int_returns_g258_results' returns 258 values +// CHECK-ERROR-NEXT: def int_returns_g258_results : Intrinsic< +def int_returns_g258_results : Intrinsic< + !listsplat(llvm_anyint_ty, 258), + [], [], "llvm.returns.g258.results">; #endif diff --git a/llvm/test/TableGen/intrinsic-varargs.td b/llvm/test/TableGen/intrinsic-varargs.td index 3634e16e20565..f94e1d0d6750e 100644 --- a/llvm/test/TableGen/intrinsic-varargs.td +++ b/llvm/test/TableGen/intrinsic-varargs.td @@ -3,5 +3,5 @@ include "llvm/IR/Intrinsics.td" -// CHECK: /* 0 */ 0, 29, 0, +// CHECK: /* 0 */ 0, 26, 0, def int_foo : Intrinsic<[], [llvm_vararg_ty]>; diff --git a/llvm/test/ThinLTO/X86/memprof-funcassigncloning2.ll b/llvm/test/ThinLTO/X86/memprof-funcassigncloning2.ll new file mode 100644 index 0000000000000..bcd3cea5b7ff1 --- /dev/null +++ b/llvm/test/ThinLTO/X86/memprof-funcassigncloning2.ll @@ -0,0 +1,142 @@ +;; Similar to funcassigncloning.ll but hand modified to add another allocation +;; whose pruned cold context only includes an immediate caller node that itself +;; doesn't need cloning, but calls a cloned allocating function, and is in a +;; function that gets cloned multiple times for a different callsite. This test +;; makes sure the non-cloned callsite is correctly updated in all function +;; clones. This case was missed because, due to context pruning, we don't have +;; any caller edges for the first callsite, so the handling that kicks in to +;; "reclone" other callsites in cloned functions was being missed. + +; RUN: opt -thinlto-bc %s >%t.o +; RUN: llvm-lto2 run %t.o -enable-memprof-context-disambiguation \ +; RUN: -supports-hot-cold-new \ +; RUN: -r=%t.o,main,plx \ +; RUN: -r=%t.o,_Znam, \ +; RUN: -memprof-verify-ccg -memprof-verify-nodes \ +; RUN: -pass-remarks=memprof-context-disambiguation -save-temps \ +; RUN: -o %t.out 2>&1 | FileCheck %s --check-prefix=REMARKS + +; RUN: llvm-dis %t.out.1.4.opt.bc -o - | FileCheck %s --check-prefix=IR + + +;; Try again but with distributed ThinLTO +; RUN: llvm-lto2 run %t.o -enable-memprof-context-disambiguation \ +; RUN: -supports-hot-cold-new \ +; RUN: -thinlto-distributed-indexes \ +; RUN: -r=%t.o,main,plx \ +; RUN: -r=%t.o,_Znam, \ +; RUN: -memprof-verify-ccg -memprof-verify-nodes \ +; RUN: -pass-remarks=memprof-context-disambiguation \ +; RUN: -o %t2.out + +;; Run ThinLTO backend +; RUN: opt -passes=memprof-context-disambiguation \ +; RUN: -memprof-import-summary=%t.o.thinlto.bc \ +; RUN: -pass-remarks=memprof-context-disambiguation \ +; RUN: %t.o -S 2>&1 | FileCheck %s --check-prefix=IR \ +; RUN: --check-prefix=REMARKS + + +source_filename = "funcassigncloning.ll" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +;; Eventually this function will be cloned several times (for the calls to new +;; for the various callers). However, function blah() includes an allocation +;; whose cold context was trimmed above here. We therefore should assume that +;; every caller of this function should call the same version of blah (which +;; will be the cloned ".memprof.1" version. +; Function Attrs: noinline optnone +define internal void @_Z1EPPcS0_(ptr %buf1, ptr %buf2) #0 { +entry: + call void @blah(), !callsite !19 + %call = call ptr @_Znam(i64 noundef 10), !memprof !0, !callsite !7 + %call1 = call ptr @_Znam(i64 noundef 10), !memprof !8, !callsite !15 + ret void +} + +; REMARKS: call in clone _Z1EPPcS0_ assigned to call function clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_.memprof.1 assigned to call function clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_.memprof.2 assigned to call function clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_.memprof.3 assigned to call function clone blah.memprof.1 + +; IR: define {{.*}} @_Z1EPPcS0_ +; IR: call {{.*}} @blah.memprof.1() +; IR: define {{.*}} @_Z1EPPcS0_.memprof.2 +; IR: call {{.*}} @blah.memprof.1() +; IR: define {{.*}} @_Z1EPPcS0_.memprof.3 +; IR: call {{.*}} @blah.memprof.1() + +declare ptr @_Znam(i64) + +define internal void @_Z1BPPcS0_() { +entry: + call void @_Z1EPPcS0_(ptr null, ptr null), !callsite !16 + ret void +} + +define internal void @_Z1CPPcS0_() { +entry: + call void @_Z1EPPcS0_(ptr null, ptr null), !callsite !17 + ret void +} + +define internal void @_Z1DPPcS0_() { +entry: + call void @_Z1EPPcS0_(ptr null, ptr null), !callsite !18 + ret void +} + +; Function Attrs: noinline optnone +define i32 @main() #0 { +entry: + call void @_Z1BPPcS0_() + call void @_Z1CPPcS0_() + call void @_Z1DPPcS0_() + ret i32 0 +} + +define internal void @blah() #0 { +entry: + %call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !memprof !22, !callsite !21 + ret void +} + +define internal void @foo() #0 { +entry: + call void @blah(), !callsite !20 + ret void +} + +; uselistorder directives +uselistorder ptr @_Znam, { 1, 0, 2 } + +attributes #0 = { noinline optnone } + +!0 = !{!1, !3, !5} +!1 = !{!2, !"cold"} +!2 = !{i64 -3461278137325233666, i64 -7799663586031895603} +!3 = !{!4, !"notcold"} +!4 = !{i64 -3461278137325233666, i64 -3483158674395044949} +!5 = !{!6, !"notcold"} +!6 = !{i64 -3461278137325233666, i64 -2441057035866683071} +!7 = !{i64 -3461278137325233666} +!8 = !{!9, !11, !13} +!9 = !{!10, !"notcold"} +!10 = !{i64 -1415475215210681400, i64 -2441057035866683071} +!11 = !{!12, !"cold"} +!12 = !{i64 -1415475215210681400, i64 -3483158674395044949} +!13 = !{!14, !"notcold"} +!14 = !{i64 -1415475215210681400, i64 -7799663586031895603} +!15 = !{i64 -1415475215210681400} +!16 = !{i64 -2441057035866683071} +!17 = !{i64 -3483158674395044949} +!18 = !{i64 -7799663586031895603} +!19 = !{i64 123} +!20 = !{i64 234} +!21 = !{i64 345} +!22 = !{!23, !25} +!23 = !{!24, !"cold"} +!24 = !{i64 345, i64 123} +!25 = !{!26, !"notcold"} +!26 = !{i64 345, i64 234} diff --git a/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll b/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll index 94c912876d7b9..6015607c05df4 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/inline-strcmp-debugloc.ll @@ -5,8 +5,9 @@ @.str = constant [3 x i8] c"-h\00" -define i32 @main() { -; CHECK-LABEL: define i32 @main() { +define i32 @main() !prof !8 { +; CHECK-LABEL: define i32 @main() +; CHECK: !prof [[PROF_0:![0-9]+]] ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br label %[[SUB_0:.*]], !dbg [[DBG4:![0-9]+]] ; CHECK: [[SUB_0]]: @@ -14,13 +15,13 @@ define i32 @main() { ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i32, !dbg [[DBG4]] ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[TMP1]], 45, !dbg [[DBG4]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0, !dbg [[DBG4]] -; CHECK-NEXT: br i1 [[TMP3]], label %[[NE:.*]], label %[[SUB_1:.*]], !dbg [[DBG4]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[NE:.*]], label %[[SUB_1:.*]], !dbg [[DBG4]], !prof [[PROF_1:![0-9]+]] ; CHECK: [[SUB_1]]: ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr null, i64 1), align 1, !dbg [[DBG4]] ; CHECK-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32, !dbg [[DBG4]] ; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], 104, !dbg [[DBG4]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0, !dbg [[DBG4]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[NE]], label %[[SUB_2:.*]], !dbg [[DBG4]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[NE]], label %[[SUB_2:.*]], !dbg [[DBG4]], !prof [[PROF_1]] ; CHECK: [[SUB_2]]: ; CHECK-NEXT: br label %[[NE]], !dbg [[DBG4]] ; CHECK: [[NE]]: @@ -46,11 +47,14 @@ declare i32 @strcmp(ptr, ptr) !4 = !DILocation(line: 258, column: 10, scope: !5) !5 = distinct !DISubprogram(name: "streq", scope: !1, file: !1, line: 257, type: !7, scopeLine: 257, unit: !0, retainedNodes: !2) !7 = !DISubroutineType(types: !2) +!8 = !{!"function_entry_count", i64 1000} ;. ; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: [[META2:![0-9]+]], retainedTypes: [[META2]], globals: [[META2]]) ; CHECK: [[META1]] = !DIFile(filename: "test.c", directory: {{.*}}) ; CHECK: [[META2]] = !{} +; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000} ; CHECK: [[DBG4]] = !DILocation(line: 258, column: 10, scope: [[META5:![0-9]+]]) ; CHECK: [[META5]] = distinct !DISubprogram(name: "streq", scope: [[META1]], file: [[META1]], line: 257, type: [[META6:![0-9]+]], scopeLine: 257, spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META2]]) ; CHECK: [[META6]] = !DISubroutineType(types: [[META2]]) +; CHECK: [[PROF_1]] = !{!"unknown", !"aggressive-instcombine"} ;. diff --git a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll index b26320be634b8..6fbe960109098 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll @@ -6,9 +6,10 @@ declare ptr @memchr(ptr, i32, i64) -define i1 @test_memchr_null(i32 %x) { +define i1 @test_memchr_null(i32 %x) !prof !0 { ; CHECK-LABEL: define i1 @test_memchr_null( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0:![0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -40,9 +41,10 @@ entry: ret i1 %isnull } -define ptr @test_memchr(i32 %x) { +define ptr @test_memchr(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -72,16 +74,17 @@ entry: ret ptr %memchr } -define ptr @test_memchr_smaller_n(i32 %x) { +define ptr @test_memchr_smaller_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_smaller_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ ; CHECK-NEXT: i8 48, label %[[MEMCHR_CASE:.*]] ; CHECK-NEXT: i8 49, label %[[MEMCHR_CASE1:.*]] ; CHECK-NEXT: i8 0, label %[[MEMCHR_CASE2:.*]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF_1:![0-9]+]] ; CHECK: [[MEMCHR_CASE]]: ; CHECK-NEXT: br label %[[MEMCHR_SUCCESS:.*]] ; CHECK: [[MEMCHR_CASE1]]: @@ -103,9 +106,10 @@ entry: ; negative tests -define ptr @test_memchr_larger_n(i32 %x) { +define ptr @test_memchr_larger_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_larger_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 6) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -115,9 +119,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_non_constant(i32 %x, ptr %str) { +define ptr @test_memchr_non_constant(i32 %x, ptr %str) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant( -; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr [[STR]], i32 [[X]], i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -127,8 +132,9 @@ entry: ret ptr %memchr } -define ptr @test_memchr_constant_ch() { -; CHECK-LABEL: define ptr @test_memchr_constant_ch() { +define ptr @test_memchr_constant_ch() !prof !0 { +; CHECK-LABEL: define ptr @test_memchr_constant_ch() +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 49, i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -138,9 +144,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) { +define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_dynamic_n( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i32 [[Y]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -150,9 +157,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_long(i32 %x) { +define ptr @test_memchr_long(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_long( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str_long, i32 [[X]], i64 8) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -163,9 +171,10 @@ entry: } ; We want to check that the compiler still calls memchr if the length is non-constant: -define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) { +define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant_length2( -; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 [[LEN]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -174,3 +183,7 @@ entry: %memchr = call ptr @memchr(ptr @str, i32 %x, i64 %len) ret ptr %memchr } + +!0 = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_1]] = !{!"unknown", !"aggressive-instcombine"} \ No newline at end of file diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll b/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll index c7fc1dc699671..f9b9dd13b0d0c 100644 --- a/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll +++ b/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll @@ -9,10 +9,10 @@ define void @fn1() { ; CHECK-LABEL: define void @fn1() { -; CHECK-NEXT: call void @llvm.assume(i1 false) [ "align"(ptr @global, i64 1) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @global, i64 1) ] ; CHECK-NEXT: ret void ; - call void @llvm.assume(i1 false) [ "align"(ptr @global, i64 1) ] + call void @llvm.assume(i1 true) [ "align"(ptr @global, i64 1) ] ret void } diff --git a/llvm/test/Transforms/Attributor/AMDGPU/tag-invariant-loads.ll b/llvm/test/Transforms/Attributor/AMDGPU/tag-invariant-loads.ll index d5aa6b10b5add..1ab607465dbbb 100644 --- a/llvm/test/Transforms/Attributor/AMDGPU/tag-invariant-loads.ll +++ b/llvm/test/Transforms/Attributor/AMDGPU/tag-invariant-loads.ll @@ -306,12 +306,12 @@ define amdgpu_kernel void @test_call_untouched_ptr() { define amdgpu_kernel void @test_make_buffer(ptr addrspace(1) %ptr) { ; AMDGCN-LABEL: define amdgpu_kernel void @test_make_buffer( ; AMDGCN-SAME: ptr addrspace(1) nofree readonly captures(none) [[PTR:%.*]]) #[[ATTR2]] { -; AMDGCN-NEXT: [[RSRC:%.*]] = call align 4 ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[PTR]], i16 noundef 0, i32 noundef 0, i32 noundef 0) #[[ATTR11:[0-9]+]] +; AMDGCN-NEXT: [[RSRC:%.*]] = call align 4 ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[PTR]], i16 noundef 0, i64 noundef 0, i32 noundef 0) #[[ATTR11:[0-9]+]] ; AMDGCN-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(7) [[RSRC]], align 4 ; AMDGCN-NEXT: call void @clobber(i32 [[VAL]]) #[[ATTR7]] ; AMDGCN-NEXT: ret void ; - %rsrc = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %ptr, i16 0, i32 0, i32 0) + %rsrc = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %ptr, i16 0, i64 0, i32 0) %val = load i32, ptr addrspace(7) %rsrc, align 4 ;; original %ptr may alias call void @clobber(i32 %val) @@ -321,12 +321,12 @@ define amdgpu_kernel void @test_make_buffer(ptr addrspace(1) %ptr) { define amdgpu_kernel void @test_make_buffer_noalias(ptr addrspace(1) noalias %ptr) { ; AMDGCN-LABEL: define amdgpu_kernel void @test_make_buffer_noalias( ; AMDGCN-SAME: ptr addrspace(1) noalias nofree readonly captures(none) [[PTR:%.*]]) #[[ATTR2]] { -; AMDGCN-NEXT: [[RSRC:%.*]] = call align 4 ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[PTR]], i16 noundef 0, i32 noundef 0, i32 noundef 0) #[[ATTR11]] +; AMDGCN-NEXT: [[RSRC:%.*]] = call align 4 ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[PTR]], i16 noundef 0, i64 noundef 0, i32 noundef 0) #[[ATTR11]] ; AMDGCN-NEXT: [[VAL:%.*]] = load i32, ptr addrspace(7) [[RSRC]], align 4, !invariant.load [[META0]] ; AMDGCN-NEXT: call void @clobber(i32 [[VAL]]) #[[ATTR7]] ; AMDGCN-NEXT: ret void ; - %rsrc = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %ptr, i16 0, i32 0, i32 0) + %rsrc = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %ptr, i16 0, i64 0, i32 0) %val = load i32, ptr addrspace(7) %rsrc, align 4 call void @clobber(i32 %val) ret void diff --git a/llvm/test/Transforms/Coroutines/ArgAddr.ll b/llvm/test/Transforms/Coroutines/ArgAddr.ll index ab70836508101..9328c67459077 100644 --- a/llvm/test/Transforms/Coroutines/ArgAddr.ll +++ b/llvm/test/Transforms/Coroutines/ArgAddr.ll @@ -45,7 +45,7 @@ coro_Cleanup: br label %coro_Suspend coro_Suspend: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret ptr %1 } @@ -69,7 +69,7 @@ declare i32 @llvm.coro.size.i32() declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-align16.ll b/llvm/test/Transforms/Coroutines/coro-align16.ll index 39902be9149e8..afdca77e8af3e 100644 --- a/llvm/test/Transforms/Coroutines/coro-align16.ll +++ b/llvm/test/Transforms/Coroutines/coro-align16.ll @@ -24,7 +24,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @capture_call(ptr) declare void @nocapture_call(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-align32.ll b/llvm/test/Transforms/Coroutines/coro-align32.ll index 3d910e951259b..9e82ec83011f5 100644 --- a/llvm/test/Transforms/Coroutines/coro-align32.ll +++ b/llvm/test/Transforms/Coroutines/coro-align32.ll @@ -28,7 +28,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -48,7 +48,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @capture_call(ptr) declare void @nocapture_call(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-align64-02.ll b/llvm/test/Transforms/Coroutines/coro-align64-02.ll index 3e2e33d2da260..13c0cbe0e24da 100644 --- a/llvm/test/Transforms/Coroutines/coro-align64-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-align64-02.ll @@ -24,7 +24,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @capture_call(ptr) declare void @nocapture_call(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-align64.ll b/llvm/test/Transforms/Coroutines/coro-align64.ll index 9623a99a8b27e..f6cf8f40b2b41 100644 --- a/llvm/test/Transforms/Coroutines/coro-align64.ll +++ b/llvm/test/Transforms/Coroutines/coro-align64.ll @@ -24,7 +24,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @capture_call(ptr) declare void @nocapture_call(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-align8-02.ll b/llvm/test/Transforms/Coroutines/coro-align8-02.ll index 758d4ce3e21b2..0a6723a41256e 100644 --- a/llvm/test/Transforms/Coroutines/coro-align8-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-align8-02.ll @@ -20,7 +20,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -40,7 +40,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @aligned_alloc(i32, i32) declare void @free(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-align8.ll b/llvm/test/Transforms/Coroutines/coro-align8.ll index 48a2687cc4799..ac083378803ec 100644 --- a/llvm/test/Transforms/Coroutines/coro-align8.ll +++ b/llvm/test/Transforms/Coroutines/coro-align8.ll @@ -24,7 +24,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @capture_call(ptr) declare void @nocapture_call(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O0.ll b/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O0.ll index bdd49413cf15b..851f8a7e4e293 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O0.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O0.ll @@ -24,7 +24,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -54,7 +54,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @myAlloc(i64, i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O2.ll b/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O2.ll index a0ab5b733fdf0..ee2215efd1cd9 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O2.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloc-with-param-O2.ll @@ -21,7 +21,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -49,7 +49,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @myAlloc(i64, i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-01.ll b/llvm/test/Transforms/Coroutines/coro-alloca-01.ll index 5208c055c4fdf..f0c0bb31d40f5 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-01.ll @@ -33,7 +33,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -55,7 +55,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-02.ll b/llvm/test/Transforms/Coroutines/coro-alloca-02.ll index 83f56009f00e3..832132d451776 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-02.ll @@ -25,7 +25,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-03.ll b/llvm/test/Transforms/Coroutines/coro-alloca-03.ll index 7740ed440a0d5..5148d87bbc2b2 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-03.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-03.ll @@ -23,7 +23,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @capture_call(ptr) declare void @nocapture_call(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-04.ll b/llvm/test/Transforms/Coroutines/coro-alloca-04.ll index c19cd253a9179..9df1fd4326899 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-04.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-04.ll @@ -32,7 +32,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -53,7 +53,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-05.ll b/llvm/test/Transforms/Coroutines/coro-alloca-05.ll index 96769e51fb80f..a096bb1beea21 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-05.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-05.ll @@ -23,7 +23,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -44,7 +44,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @print(i32) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll index bf75196047aff..22997fbbcdfd7 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll @@ -37,7 +37,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -63,7 +63,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll index 8bfb8cfabbd27..ac07dc33707c7 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll @@ -36,7 +36,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -49,7 +49,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.lifetime.start.p0(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll index 80be62ac64c8c..dab55c5f0cd41 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll @@ -32,7 +32,7 @@ await.ready: %StrayCoroSave = call token @llvm.coro.save(ptr null) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -59,7 +59,7 @@ await.ready: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -75,6 +75,6 @@ declare token @llvm.coro.save(ptr) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll b/llvm/test/Transforms/Coroutines/coro-alloca-09.ll index 2539811f46b7c..4736790dfe324 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-09.ll @@ -52,7 +52,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll index 8b8dbacbfc5c7..baec3f1a0c869 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll @@ -68,7 +68,7 @@ loop: ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -80,6 +80,6 @@ declare i64 @llvm.coro.size.i64() declare ptr @llvm.coro.begin(token, ptr writeonly) declare token @llvm.coro.save(ptr) declare i8 @llvm.coro.suspend(token, i1) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.lifetime.start(ptr nocapture) declare void @llvm.lifetime.end(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-outside-frame.ll b/llvm/test/Transforms/Coroutines/coro-alloca-outside-frame.ll index ac6a5752438ce..e93e97fb06643 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-outside-frame.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-outside-frame.ll @@ -33,7 +33,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -54,7 +54,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-with-addrspace.ll b/llvm/test/Transforms/Coroutines/coro-alloca-with-addrspace.ll index 410d3e35e1c93..12057a953701c 100644 --- a/llvm/test/Transforms/Coroutines/coro-alloca-with-addrspace.ll +++ b/llvm/test/Transforms/Coroutines/coro-alloca-with-addrspace.ll @@ -31,7 +31,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0) + call void @llvm.coro.end(ptr %hdl, i1 0) ret ptr %hdl } @@ -50,7 +50,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1) +declare void @llvm.coro.end(ptr, i1) declare void @print(ptr) declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll index d662638d2dd9a..6562ac2e9e430 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll @@ -72,8 +72,8 @@ declare { ptr, ptr, ptr, ptr } @llvm.coro.suspend.async.sl_p0i8p0i8p0i8p0i8s(i32 declare ptr @llvm.coro.prepare.async(ptr) declare token @llvm.coro.id.async(i32, i32, i32, ptr) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end.async(ptr, i1, ...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end.async(ptr, i1, ...) +declare void @llvm.coro.end(ptr, i1, token) declare {ptr, ptr, ptr} @llvm.coro.suspend.async(i32, ptr, ptr, ...) declare ptr @context_alloc() declare void @llvm.coro.async.context.dealloc(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll index 49c4207887340..efe6403941463 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll @@ -82,7 +82,7 @@ loop: loop_exit: call void @llvm.lifetime.end.p0(ptr %escaped_addr) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false) unreachable } @@ -96,8 +96,8 @@ declare { ptr, ptr, ptr, ptr } @llvm.coro.suspend.async.sl_p0i8p0i8p0i8p0i8s(i32 declare ptr @llvm.coro.prepare.async(ptr) declare token @llvm.coro.id.async(i32, i32, i32, ptr) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end.async(ptr, i1, ...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end.async(ptr, i1, ...) +declare void @llvm.coro.end(ptr, i1, token) declare {ptr, ptr, ptr} @llvm.coro.suspend.async(i32, ptr, ptr, ...) declare ptr @context_alloc() declare void @llvm.coro.async.context.dealloc(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll index 3a2201f4d30c0..2405b40326eea 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-coro-id-async-bug.ll @@ -16,7 +16,7 @@ entry: %5 = getelementptr inbounds <{ ptr, ptr }>, ptr %4, i32 0, i32 1 %6 = load ptr, ptr %5, align 8 %7 = load ptr, ptr %1, align 8 - %8 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @repo.0, ptr %6, ptr %7) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @repo.0, ptr %6, ptr %7) unreachable } @@ -35,6 +35,6 @@ declare token @llvm.coro.id.async(i32, i32, i32, ptr) #1 declare ptr @llvm.coro.begin(token, ptr writeonly) #1 ; Function Attrs: nounwind -declare i1 @llvm.coro.end.async(ptr, i1, ...) #1 +declare void @llvm.coro.end.async(ptr, i1, ...) #1 attributes #1 = { nounwind } diff --git a/llvm/test/Transforms/Coroutines/coro-async-declaration.ll b/llvm/test/Transforms/Coroutines/coro-async-declaration.ll index aee6aa4f78a83..2cbe5135d7c47 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-declaration.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-declaration.ll @@ -43,7 +43,7 @@ entry: %15 = getelementptr inbounds <{ ptr, ptr }>, ptr %14, i32 0, i32 1, !dbg !11 %16 = load ptr, ptr %15, align 8, !dbg !11 %17 = load ptr, ptr %1, align 8, !dbg !11 - %18 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @"$s3foo3FooO3baryyYaF.0.1", ptr %16, ptr %17), !dbg !11 + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @"$s3foo3FooO3baryyYaF.0.1", ptr %16, ptr %17), !dbg !11 unreachable, !dbg !11 } @@ -89,7 +89,7 @@ entry: } ; Function Attrs: nounwind -declare i1 @llvm.coro.end.async(ptr, i1, ...) #0 +declare void @llvm.coro.end.async(ptr, i1, ...) #0 attributes #0 = { nounwind } attributes #1 = { nomerge nounwind } diff --git a/llvm/test/Transforms/Coroutines/coro-async-dyn-align.ll b/llvm/test/Transforms/Coroutines/coro-async-dyn-align.ll index 040c9881c1ab3..ffcafca891199 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-dyn-align.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-dyn-align.ll @@ -27,8 +27,8 @@ declare void @llvm.coro.async.context.dealloc(ptr) declare ptr @llvm.coro.async.resume() declare token @llvm.coro.id.async(i32, i32, i32, ptr) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end.async(ptr, i1, ...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end.async(ptr, i1, ...) +declare void @llvm.coro.end(ptr, i1, token) declare swiftcc void @asyncReturn(ptr) declare swiftcc void @asyncSuspend(ptr) declare {ptr} @llvm.coro.suspend.async(i32, ptr, ptr, ...) @@ -91,6 +91,6 @@ entry: call void @opaque(ptr %tmp4) call void @llvm.coro.async.context.dealloc(ptr %callee_context) tail call swiftcc void @asyncReturn(ptr %async.ctxt) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable } diff --git a/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll index 0daa4b0c3da64..c5ce27c1328f2 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-end-bug.ll @@ -14,7 +14,7 @@ declare token @llvm.coro.id.async(i32, i32, i32, ptr) #0 declare ptr @llvm.coro.begin(token, ptr writeonly) #0 -declare i1 @llvm.coro.end.async(ptr, i1, ...) #0 +declare void @llvm.coro.end.async(ptr, i1, ...) #0 define swifttailcc void @repo(ptr swiftasync %0, ptr noalias nocapture %1, ptr noalias nocapture %2, ptr %3, ptr %4, ptr %Self, ptr %Self.AsyncSequence, ptr %Self.Element.Comparable) #1 { entry: @@ -27,7 +27,7 @@ entry: %10 = getelementptr inbounds <{ ptr, ptr }>, ptr %9, i32 0, i32 1 %11 = load ptr, ptr %10, align 8 %12 = load ptr, ptr %5, align 8 - %13 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %7, i1 false, ptr @repo.0, ptr %11, ptr %12, i1 %8, ptr null) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %7, i1 false, ptr @repo.0, ptr %11, ptr %12, i1 %8, ptr null) unreachable } diff --git a/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll b/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll index c898a1b0c2983..e745177e9cb28 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-no-cse-swift-async-context-addr.ll @@ -41,7 +41,7 @@ entry: %11 = call ptr @llvm.swift.async.context.addr() store ptr %9, ptr %11, align 8 - %12 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @repo.0, ptr %9, ptr %10) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @repo.0, ptr %9, ptr %10) unreachable } @@ -66,7 +66,7 @@ entry: declare { ptr } @llvm.coro.suspend.async.sl_p0i8s(i32, ptr, ptr, ...) #1 declare token @llvm.coro.id.async(i32, i32, i32, ptr) #1 declare ptr @llvm.coro.begin(token, ptr writeonly) #1 -declare i1 @llvm.coro.end.async(ptr, i1, ...) #1 +declare void @llvm.coro.end.async(ptr, i1, ...) #1 declare ptr @llvm.coro.async.resume() #1 declare ptr @llvm.swift.async.context.addr() #1 diff --git a/llvm/test/Transforms/Coroutines/coro-async-nomerge.ll b/llvm/test/Transforms/Coroutines/coro-async-nomerge.ll index ac39704b93da5..42652bd88bc58 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-nomerge.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-nomerge.ll @@ -11,7 +11,7 @@ declare { ptr } @llvm.coro.suspend.async.sl_p0i8s(i32, ptr, ptr, ...) declare ptr @llvm.coro.begin(token, ptr writeonly) declare token @llvm.coro.id.async(i32, i32, i32, ptr) -declare i1 @llvm.coro.end.async(ptr, i1, ...) +declare void @llvm.coro.end.async(ptr, i1, ...) define linkonce_odr hidden ptr @__swift_async_resume_get_context(ptr %0) { entry: @@ -53,7 +53,7 @@ bb2: br label %tailblock tailblock: - %t = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %id, i1 false, ptr @repo.0, ptr @return, ptr %0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %id, i1 false, ptr @repo.0, ptr @return, ptr %0) unreachable } @@ -115,6 +115,6 @@ bb2: br label %tailblock tailblock: - %t = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %id, i1 false, ptr @repo.0, ptr @return, ptr %0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %id, i1 false, ptr @repo.0, ptr @return, ptr %0) unreachable } diff --git a/llvm/test/Transforms/Coroutines/coro-async-phi.ll b/llvm/test/Transforms/Coroutines/coro-async-phi.ll index 25be1eaa059eb..7aa6857aa04eb 100644 --- a/llvm/test/Transforms/Coroutines/coro-async-phi.ll +++ b/llvm/test/Transforms/Coroutines/coro-async-phi.ll @@ -83,7 +83,7 @@ bb68: ; preds = %bb30 br label %bb126 bb126: - %i162 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %i12, i1 false, ptr @__swift_suspend_dispatch_2, ptr @doIt, ptr null, ptr null) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %i12, i1 false, ptr @__swift_suspend_dispatch_2, ptr @doIt, ptr null, ptr null) unreachable } @@ -119,7 +119,7 @@ entry: declare { ptr } @llvm.coro.suspend.async.sl_p0i8s(i32, ptr, ptr, ...) #1 ; Function Attrs: nounwind -declare i1 @llvm.coro.end.async(ptr, i1, ...) #1 +declare void @llvm.coro.end.async(ptr, i1, ...) #1 ; Function Attrs: argmemonly nounwind declare extern_weak swiftcc ptr @swift_task_alloc(i64) #5 diff --git a/llvm/test/Transforms/Coroutines/coro-async.ll b/llvm/test/Transforms/Coroutines/coro-async.ll index 331d6a60bed6b..f94c6c11aa8b1 100644 --- a/llvm/test/Transforms/Coroutines/coro-async.ll +++ b/llvm/test/Transforms/Coroutines/coro-async.ll @@ -101,7 +101,7 @@ entry: call void @some_user(i64 %val.2) store <4 x double> %vector_spill, ptr %vector, align 16 tail call swiftcc void @asyncReturn(ptr %async.ctxt, ptr %continuation_task_arg, ptr %actor) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable } @@ -211,7 +211,7 @@ entry: %continuation_actor_arg = extractvalue {ptr, ptr, ptr} %res.2, 1 tail call swiftcc void @asyncReturn(ptr %async.ctxt, ptr %continuation_task_arg, ptr %continuation_actor_arg) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -297,7 +297,7 @@ entry: call void @llvm.coro.async.context.dealloc(ptr %callee_context) %continuation_task_arg = extractvalue {ptr, ptr, ptr} %res, 1 tail call swiftcc void @asyncReturn(ptr %async.ctxt, ptr %continuation_task_arg, ptr %actor) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable } @@ -339,11 +339,11 @@ entry: is_equal: tail call swiftcc void @asyncReturn(ptr %async.ctxt, ptr %continuation_task_arg, ptr %actor) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable is_not_equal: - call i1 (ptr, i1, ...) @llvm.coro.end.async( + call void (ptr, i1, ...) @llvm.coro.end.async( ptr %hdl, i1 0, ptr @must_tail_call_return, ptr %async.ctxt, ptr %continuation_task_arg, ptr null) @@ -406,7 +406,7 @@ entry: call void @some_user(i64 %val.2) tail call swiftcc void @asyncReturn(ptr %async.ctxt, ptr %continuation_task_arg, ptr %actor) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable } @@ -431,7 +431,7 @@ entry: ptr @no_coro_suspend_fp) %hdl = call ptr @llvm.coro.begin(token %id, ptr null) call void @some_may_write(ptr %some_alloca) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable } @@ -459,7 +459,7 @@ entry: %hdl = call ptr @llvm.coro.begin(token %id, ptr null) store ptr null, ptr %some_alloca, align 8 call void @do_with_swifterror(ptr swifterror %some_alloca) - call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 0) unreachable } @@ -488,7 +488,7 @@ entry: %undefined_resume_pointer = call ptr @llvm.coro.async.resume() call void @use(ptr %undefined_resume_pointer) call void @crash() - %unused = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false) unreachable } ; CHECK-LABEL: define swiftcc void @undefined_coro_async_resume @@ -510,7 +510,7 @@ entry: %5 = getelementptr inbounds <{ ptr, ptr }>, ptr %4, i32 0, i32 1 %6 = load ptr, ptr %5, align 8 %7 = load ptr, ptr %1, align 8 - %8 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @simpleFunc.0, ptr %6, ptr %7) + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %3, i1 false, ptr @simpleFunc.0, ptr %6, ptr %7) unreachable } @@ -529,8 +529,8 @@ declare { ptr, ptr, ptr, ptr } @llvm.coro.suspend.async.sl_p0i8p0i8p0i8p0i8s(i32 declare ptr @llvm.coro.prepare.async(ptr) declare token @llvm.coro.id.async(i32, i32, i32, ptr) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end.async(ptr, i1, ...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end.async(ptr, i1, ...) +declare void @llvm.coro.end(ptr, i1, token) declare {ptr, ptr, ptr} @llvm.coro.suspend.async(i32, ptr, ptr, ...) declare ptr @llvm.coro.async.context.alloc(ptr, ptr) declare void @llvm.coro.async.context.dealloc(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll b/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll index ee64ce6e4482b..4aef572f47a35 100644 --- a/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll +++ b/llvm/test/Transforms/Coroutines/coro-await-suspend-handle-in-ramp.ll @@ -32,7 +32,7 @@ cleanup: br label %ret ret: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret void } @@ -53,7 +53,7 @@ declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) declare void @llvm.coro.await.suspend.handle(ptr, ptr, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @free(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-await-suspend-lower-invoke.ll b/llvm/test/Transforms/Coroutines/coro-await-suspend-lower-invoke.ll index fd3b7bd815300..67d179a8f9b04 100644 --- a/llvm/test/Transforms/Coroutines/coro-await-suspend-lower-invoke.ll +++ b/llvm/test/Transforms/Coroutines/coro-await-suspend-lower-invoke.ll @@ -88,7 +88,7 @@ cleanup: br label %ret ret: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret void } @@ -112,7 +112,7 @@ declare ptr @llvm.coro.begin(token, ptr) declare void @llvm.coro.await.suspend.void(ptr, ptr, ptr) declare i1 @llvm.coro.await.suspend.bool(ptr, ptr, ptr) declare void @llvm.coro.await.suspend.handle(ptr, ptr, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @__cxa_begin_catch(ptr) declare void @use_val(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-await-suspend-lower.ll b/llvm/test/Transforms/Coroutines/coro-await-suspend-lower.ll index 8d019e6954628..72a158abffc6b 100644 --- a/llvm/test/Transforms/Coroutines/coro-await-suspend-lower.ll +++ b/llvm/test/Transforms/Coroutines/coro-await-suspend-lower.ll @@ -65,7 +65,7 @@ cleanup: br label %ret ret: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret void } @@ -89,7 +89,7 @@ declare ptr @llvm.coro.begin(token, ptr) declare void @llvm.coro.await.suspend.void(ptr, ptr, ptr) declare i1 @llvm.coro.await.suspend.bool(ptr, ptr, ptr) declare void @llvm.coro.await.suspend.handle(ptr, ptr, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @free(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-byval-param.ll b/llvm/test/Transforms/Coroutines/coro-byval-param.ll index 864b7cae9ca5e..95bb7be73b04f 100644 --- a/llvm/test/Transforms/Coroutines/coro-byval-param.ll +++ b/llvm/test/Transforms/Coroutines/coro-byval-param.ll @@ -52,7 +52,7 @@ coro.free: ; preds = %cleanup33 br label %coro.ret coro.ret: ; preds = %coro.free, %cleanup33, %init.ready, %coro.init - %10 = call i1 @llvm.coro.end(ptr null, i1 false, token none) #10 + call void @llvm.coro.end(ptr null, i1 false, token none) #10 ret ptr %call2 } @@ -103,7 +103,7 @@ declare i8 @llvm.coro.suspend(token, i1) #2 declare void @_ZN4task12promise_type13final_suspendEv(ptr nonnull dereferenceable(1)) local_unnamed_addr #7 align 2 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 ; Function Attrs: nobuiltin nounwind declare void @_ZdlPv(ptr) local_unnamed_addr #8 diff --git a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll index 2f6d23da82692..d0e7c1c29eb32 100644 --- a/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll +++ b/llvm/test/Transforms/Coroutines/coro-catchswitch-cleanuppad.ll @@ -37,7 +37,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl catch.dispatch.1: @@ -106,7 +106,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-catchswitch.ll b/llvm/test/Transforms/Coroutines/coro-catchswitch.ll index 3cf6dc86f2c6d..4332f2df03d15 100644 --- a/llvm/test/Transforms/Coroutines/coro-catchswitch.ll +++ b/llvm/test/Transforms/Coroutines/coro-catchswitch.ll @@ -54,7 +54,7 @@ resume: ; preds = %await2.suspend br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret void cleanuppad: @@ -80,7 +80,7 @@ declare void @print(i32) declare noalias ptr @malloc(i32) declare void @free(ptr) -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 ; Function Attrs: nobuiltin nounwind diff --git a/llvm/test/Transforms/Coroutines/coro-debug-O2.ll b/llvm/test/Transforms/Coroutines/coro-debug-O2.ll index cc1dbcd1c80fd..4daaa5960f0ae 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug-O2.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug-O2.ll @@ -112,7 +112,7 @@ cleanup.cont: ; preds = %after.coro.free br label %coro.ret coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend - %end = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreachable: ; preds = %after.coro.free @@ -128,7 +128,7 @@ declare token @llvm.coro.save(ptr) declare ptr @llvm.coro.begin(token, ptr writeonly) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr nocapture readonly) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @new(i64) declare void @delete(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-debug-coro-frame.ll b/llvm/test/Transforms/Coroutines/coro-debug-coro-frame.ll index dff064ec084c9..f2aedefcfd381 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug-coro-frame.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug-coro-frame.ll @@ -205,7 +205,7 @@ cleanup.cont: ; preds = %after.coro.free br label %coro.ret coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend - %end = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreachable: ; preds = %after.coro.free @@ -334,7 +334,7 @@ cleanup.cont: ; preds = %after.coro.free br label %coro.ret coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend - %end = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreachable: ; preds = %after.coro.free @@ -350,7 +350,7 @@ declare token @llvm.coro.save(ptr) declare ptr @llvm.coro.begin(token, ptr writeonly) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr nocapture readonly) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @new(i64) declare void @delete(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values-not_used_in_frame.ll b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values-not_used_in_frame.ll index deaec7b8d7f89..483c1a8e8608a 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values-not_used_in_frame.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values-not_used_in_frame.ll @@ -123,7 +123,7 @@ cleanup.cont: ; preds = %after.coro.free br label %coro.ret coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend - %end = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreachable: ; preds = %after.coro.free @@ -155,7 +155,7 @@ declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @new(i64) diff --git a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll index 0934393a667ee..c524f38432ed1 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug-dbg.values.ll @@ -162,7 +162,7 @@ cleanup.cont: ; preds = %after.coro.free br label %coro.ret coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend - %end = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreachable: ; preds = %after.coro.free @@ -194,7 +194,7 @@ declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @new(i64) diff --git a/llvm/test/Transforms/Coroutines/coro-debug-frame-variable.ll b/llvm/test/Transforms/Coroutines/coro-debug-frame-variable.ll index 125ec752c8345..0ef24a6b1e2a6 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug-frame-variable.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug-frame-variable.ll @@ -186,7 +186,7 @@ cleanup.cont: ; preds = %after.coro.free br label %coro.ret coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend - %end = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreachable: ; preds = %after.coro.free @@ -201,7 +201,7 @@ declare token @llvm.coro.save(ptr) declare ptr @llvm.coro.begin(token, ptr writeonly) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr nocapture readonly) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @new(i64) declare void @delete(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-debug-spill-dbg.declare.ll b/llvm/test/Transforms/Coroutines/coro-debug-spill-dbg.declare.ll index 59a4b5b2dfbc8..bd9eb2036e6fb 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug-spill-dbg.declare.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug-spill-dbg.declare.ll @@ -74,7 +74,7 @@ cleanup: ; preds = %resume, %coro.begin br label %suspend suspend: ; preds = %cleanup, %coro.begin - %2 = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -104,7 +104,7 @@ declare i1 @llvm.coro.alloc(token) #4 declare ptr @llvm.coro.begin(token, ptr writeonly) #4 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #4 +declare void @llvm.coro.end(ptr, i1, token) #4 declare noalias ptr @malloc(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-debug.ll b/llvm/test/Transforms/Coroutines/coro-debug.ll index 5f8e9c9c1d16d..d1f1922c2a92f 100644 --- a/llvm/test/Transforms/Coroutines/coro-debug.ll +++ b/llvm/test/Transforms/Coroutines/coro-debug.ll @@ -69,10 +69,10 @@ coro_Cleanup: ; preds = %sw.epilog, %sw.bb1 br label %coro_Suspend, !dbg !24 coro_Suspend: ; preds = %coro_Cleanup, %sw.default - %7 = call i1 @llvm.coro.end(ptr null, i1 false, token none) #7, !dbg !24 - %8 = load ptr, ptr %coro_hdl, align 8, !dbg !24 + call void @llvm.coro.end(ptr null, i1 false, token none) #7, !dbg !24 + %7 = load ptr, ptr %coro_hdl, align 8, !dbg !24 store i32 0, ptr %late_local, !dbg !24 - ret ptr %8, !dbg !24 + ret ptr %7, !dbg !24 ehcleanup: %ex = landingpad { ptr, i32 } @@ -110,7 +110,7 @@ declare void @free(ptr) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) #5 +declare void @llvm.coro.end(ptr, i1, token) #5 ; Function Attrs: argmemonly nounwind readonly declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-early-twice.ll b/llvm/test/Transforms/Coroutines/coro-early-twice.ll index 39ec0ccc6fdb8..e4df0071bcc93 100644 --- a/llvm/test/Transforms/Coroutines/coro-early-twice.ll +++ b/llvm/test/Transforms/Coroutines/coro-early-twice.ll @@ -22,7 +22,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -33,4 +33,4 @@ declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) declare void @free(ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) diff --git a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-00.ll b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-00.ll index 2f5b989a620e0..ad84f7b33dc65 100644 --- a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-00.ll +++ b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-00.ll @@ -65,7 +65,7 @@ cleanup: ; preds = %invoke.cont15, %if.el br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreach: @@ -92,6 +92,6 @@ declare void @use_val(i32) declare void @__cxa_end_catch() ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @free(ptr) declare ptr @llvm.coro.free(token, ptr nocapture readonly) diff --git a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-01.ll b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-01.ll index d896c6a18b233..0b9bce5f9ad77 100644 --- a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-01.ll @@ -59,7 +59,7 @@ cleanup: ; preds = %invoke.cont15, %if.el br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreach: @@ -86,6 +86,6 @@ declare void @use_val(i32) declare void @__cxa_end_catch() ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @free(ptr) declare ptr @llvm.coro.free(token, ptr nocapture readonly) diff --git a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-02.ll b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-02.ll index 79aa58b85eda8..6202df1fe00e6 100644 --- a/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-eh-aware-edge-split-02.ll @@ -59,7 +59,7 @@ cleanup: ; preds = %invoke.cont15, %if.el br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -83,6 +83,6 @@ declare void @use_val(i32) declare void @__cxa_end_catch() ; Function Attrs: nounwind -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @free(ptr) declare ptr @llvm.coro.free(token, ptr nocapture readonly) diff --git a/llvm/test/Transforms/Coroutines/coro-frame-arrayalloca.ll b/llvm/test/Transforms/Coroutines/coro-frame-arrayalloca.ll index 7d5ddabf7ea8e..722ff81bc0cad 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-arrayalloca.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-arrayalloca.ll @@ -30,7 +30,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -65,7 +65,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll index bf08d6ff0b205..a2c9d58469427 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll @@ -50,7 +50,7 @@ cleanup: call void @free(ptr %mem) br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -66,7 +66,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll index 78c6f0cacd695..e7c28d92674ba 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll @@ -53,7 +53,7 @@ cleanup: call ptr @llvm.coro.free(token %0, ptr %1) br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -68,6 +68,6 @@ declare token @llvm.coro.save(ptr) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll index 82657318d7785..b75995f3eaa8a 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll @@ -55,7 +55,7 @@ cleanup: call ptr @llvm.coro.free(token %0, ptr %1) br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } ; CHECK: %a.Frame = type { ptr, ptr, %"struct.task::promise_type", %struct.big_structure, i1 } @@ -69,6 +69,6 @@ declare token @llvm.coro.save(ptr) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll index 66d41372cd9e7..427d8984e126c 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll @@ -50,7 +50,7 @@ cleanup: call void @free(ptr %mem) br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -66,7 +66,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll index 6ff31e566283b..81a5dcc1d3858 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll @@ -55,7 +55,7 @@ cleanup: call ptr @llvm.coro.free(token %0, ptr %1) br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } ; CHECK: %a.Frame = type { ptr, ptr, %"struct.task::promise_type", %struct.big_structure, i1, [26 x i8], %struct.big_structure.2 } @@ -69,6 +69,6 @@ declare token @llvm.coro.save(ptr) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll index c3da8e872dc07..6caa41f32f26e 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll @@ -55,7 +55,7 @@ cleanup: call ptr @llvm.coro.free(token %0, ptr %1) br label %coro.ret coro.ret: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } ; CHECK: %a.Frame = type { ptr, ptr, %"struct.task::promise_type", i1, [14 x i8], %struct.big_structure } @@ -69,6 +69,6 @@ declare token @llvm.coro.save(ptr) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-frame-unreachable.ll b/llvm/test/Transforms/Coroutines/coro-frame-unreachable.ll index b81f7d0ed7eac..3d290554e22c9 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame-unreachable.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame-unreachable.ll @@ -24,7 +24,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl no.predecessors: @@ -43,7 +43,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i1) diff --git a/llvm/test/Transforms/Coroutines/coro-frame.ll b/llvm/test/Transforms/Coroutines/coro-frame.ll index c20be8ce2ff68..d25d335fe63c6 100644 --- a/llvm/test/Transforms/Coroutines/coro-frame.ll +++ b/llvm/test/Transforms/Coroutines/coro-frame.ll @@ -26,7 +26,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl pad: %tok = cleanuppad within none [] @@ -58,7 +58,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll index df2ed7e4bcead..503b93ea76a02 100644 --- a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll +++ b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll @@ -43,7 +43,7 @@ entry: await.ready: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -80,7 +80,7 @@ entry: await.ready: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) call void @llvm.lifetime.end.p0(ptr %testval) ret void } @@ -128,7 +128,7 @@ if.end: await.ready: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -137,6 +137,6 @@ declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) declare ptr @llvm.coro.begin(token, ptr writeonly) #3 declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-materialize.ll b/llvm/test/Transforms/Coroutines/coro-materialize.ll index 3bf1360001dc7..f55db35edb3ea 100644 --- a/llvm/test/Transforms/Coroutines/coro-materialize.ll +++ b/llvm/test/Transforms/Coroutines/coro-materialize.ll @@ -41,7 +41,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -73,7 +73,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -109,7 +109,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -145,7 +145,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -186,7 +186,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -200,7 +200,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-noalias-param.ll b/llvm/test/Transforms/Coroutines/coro-noalias-param.ll index e48ad8fddd5fe..77068b8d75367 100644 --- a/llvm/test/Transforms/Coroutines/coro-noalias-param.ll +++ b/llvm/test/Transforms/Coroutines/coro-noalias-param.ll @@ -19,7 +19,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret void } @@ -33,7 +33,7 @@ declare i32 @llvm.coro.size.i32() declare i8 @llvm.coro.suspend(token, i1) declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll index c3d0fb1a18dd2..e40ac4e0ec162 100644 --- a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll +++ b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll @@ -90,7 +90,7 @@ coro.free: ; preds = %cleanup62 br label %coro.ret coro.ret: ; preds = %coro.free, %cleanup62, %final.suspend, %await2.suspend, %await.suspend, %init.suspend - %19 = call i1 @llvm.coro.end(ptr null, i1 false, token none) #12 + call void @llvm.coro.end(ptr null, i1 false, token none) #12 ret ptr %__promise } @@ -106,7 +106,7 @@ declare i8 @llvm.coro.suspend(token, i1) #3 declare ptr @_Z5Innerv() local_unnamed_addr declare dso_local void @_ZdlPv(ptr noundef) local_unnamed_addr #8 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @exit(i32 noundef) declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #10 declare void @dtor1() diff --git a/llvm/test/Transforms/Coroutines/coro-padding.ll b/llvm/test/Transforms/Coroutines/coro-padding.ll index 452b83bad388a..1de12a4f44a57 100644 --- a/llvm/test/Transforms/Coroutines/coro-padding.ll +++ b/llvm/test/Transforms/Coroutines/coro-padding.ll @@ -26,7 +26,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -54,7 +54,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-param-copy.ll b/llvm/test/Transforms/Coroutines/coro-param-copy.ll index 717ca46651414..f0e816bba9584 100644 --- a/llvm/test/Transforms/Coroutines/coro-param-copy.ll +++ b/llvm/test/Transforms/Coroutines/coro-param-copy.ll @@ -41,7 +41,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -83,7 +83,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.memset.p0.i32(ptr, i8, i32, i1) diff --git a/llvm/test/Transforms/Coroutines/coro-readnone-02.ll b/llvm/test/Transforms/Coroutines/coro-readnone-02.ll index 4ed962816154b..3ada99070967c 100644 --- a/llvm/test/Transforms/Coroutines/coro-readnone-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-readnone-02.ll @@ -39,7 +39,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -75,7 +75,7 @@ declare i8 @llvm.coro.suspend(token, i1) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @free(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-readnone.ll b/llvm/test/Transforms/Coroutines/coro-readnone.ll index 1fc91cefaf975..321ddab3ae4d7 100644 --- a/llvm/test/Transforms/Coroutines/coro-readnone.ll +++ b/llvm/test/Transforms/Coroutines/coro-readnone.ll @@ -33,7 +33,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -83,7 +83,7 @@ declare i8 @llvm.coro.suspend(token, i1) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @free(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll b/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll index b23c5222a3deb..94ed43ee43f2e 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-alloca-opaque-ptr.ll @@ -34,7 +34,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -69,7 +69,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -77,7 +77,7 @@ declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) declare void @llvm.coro.suspend.retcon.isVoid(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare token @llvm.coro.alloca.alloc.i32(i32, i32) declare ptr @llvm.coro.alloca.get(token) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll b/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll index aeb959e5ce711..6caa571cf1bbc 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-alloca.ll @@ -33,7 +33,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -69,7 +69,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -102,7 +102,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -170,7 +170,7 @@ forward: br label %back end: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -217,7 +217,7 @@ non_alloca_block: br label %suspend cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -225,7 +225,7 @@ declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) declare void @llvm.coro.suspend.retcon.isVoid(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare token @llvm.coro.alloca.alloc.i32(i32, i32) declare ptr @llvm.coro.alloca.get(token) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-frame.ll b/llvm/test/Transforms/Coroutines/coro-retcon-frame.ll index a81cdf475ae31..780f24e124a51 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-frame.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-frame.ll @@ -32,7 +32,7 @@ resume: br label %end end: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } ; Make sure we don't lose writes to the frame. @@ -52,5 +52,5 @@ end: declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll b/llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll index e7593cc8c6f81..35eb2e4df705b 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-once-private.ll @@ -30,14 +30,14 @@ neg.cont: br label %cleanup cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare void @prototype(ptr, i1 zeroext) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll b/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll index fa10ddefee00e..026e23913d647 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-once-value.ll @@ -29,7 +29,7 @@ neg.cont: br label %cleanup cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -72,7 +72,7 @@ neg.cont: cleanup: %new.val = add i32 %val, 123 %tok = call token (...) @llvm.coro.end.results(ptr null, i32 %new.val, ptr @deallocate) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token %tok) + call void @llvm.coro.end(ptr %hdl, i1 0, token %tok) unreachable } @@ -96,7 +96,7 @@ entry: declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare token @llvm.coro.end.results(...) declare ptr @llvm.coro.prepare.retcon(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-once-value2.ll b/llvm/test/Transforms/Coroutines/coro-retcon-once-value2.ll index c33e60e98cd8b..aad762e2c9335 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-once-value2.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-once-value2.ll @@ -19,7 +19,7 @@ cont: br label %cleanup cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -41,7 +41,7 @@ cont: cleanup: %tok = call token (...) @llvm.coro.end.results(i8 %val) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token %tok) + call void @llvm.coro.end(ptr %hdl, i1 0, token %tok) unreachable } @@ -63,7 +63,7 @@ cont: cleanup: %tok = call token (...) @llvm.coro.end.results(ptr null, i32 123, ptr @deallocate) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token %tok) + call void @llvm.coro.end(ptr %hdl, i1 0, token %tok) unreachable } @@ -71,7 +71,7 @@ cleanup: declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare token @llvm.coro.end.results(...) declare void @prototype(ptr, i1 zeroext) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll b/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll index 1908b31f52db3..5484fec1b3ce4 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-opaque-ptr.ll @@ -26,7 +26,7 @@ resume: ; preds = %loop br label %loop cleanup: ; preds = %loop - %0 = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) unreachable } @@ -72,14 +72,14 @@ resume: ; preds = %loop br label %loop cleanup: ; preds = %loop - %0 = call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) unreachable } declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare ptr @prototype(ptr, i1 zeroext) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-remat.ll b/llvm/test/Transforms/Coroutines/coro-retcon-remat.ll index fd16ba96181b9..160754e7d11c3 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-remat.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-remat.ll @@ -31,14 +31,14 @@ resume1: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare { ptr, i32 } @f_prototype(ptr, i1 zeroext) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll index 907d7e588ffe0..2f04453d69c4b 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values.ll @@ -27,7 +27,7 @@ resume: cleanup: call void @print(i32 %n.val) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -66,7 +66,7 @@ entry: declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare { i32, i1 } @llvm.coro.suspend.retcon.sl_i32i1s(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare ptr @prototype(ptr, i32, i1 zeroext) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll index 2caa6430ca012..a19c1ca0e7f3a 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll @@ -15,14 +15,14 @@ entry: %sum4 = call i32 @add(i32 %sum3, i32 %value1) %sum5 = call i32 @add(i32 %sum4, i32 %value2) call void @print(i32 %sum5) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i32 @llvm.coro.suspend.retcon.i32(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare ptr @prototype(ptr, i32) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll index 31839aa2a2b5f..6e4a287e53b0a 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll @@ -23,7 +23,7 @@ define hidden swiftcc { ptr, ptr } @no_suspends(ptr %buffer, i64 %arg) #1 { bb1: call void @print(i64 %arg) - call i1 @llvm.coro.end(ptr %begin, i1 false, token none) + call void @llvm.coro.end(ptr %begin, i1 false, token none) unreachable } @@ -41,7 +41,7 @@ declare void @llvm.lifetime.start.p0(ptr nocapture) #6 declare i1 @llvm.coro.suspend.retcon.i1(...) #5 declare void @llvm.lifetime.end.p0(ptr nocapture) #6 declare void @llvm.coro.alloca.free(token) #5 -declare i1 @llvm.coro.end(ptr, i1, token) #5 +declare void @llvm.coro.end(ptr, i1, token) #5 declare void @llvm.trap() diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-value.ll b/llvm/test/Transforms/Coroutines/coro-retcon-value.ll index 6a150c6a79807..d456c3b1cb2a3 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon-value.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-value.ll @@ -25,7 +25,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -64,7 +64,7 @@ entry: declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend.retcon.i8(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare {ptr, i32} @prototype(ptr, i8 zeroext) diff --git a/llvm/test/Transforms/Coroutines/coro-retcon.ll b/llvm/test/Transforms/Coroutines/coro-retcon.ll index e0484c6d66941..86eba3b5d134f 100644 --- a/llvm/test/Transforms/Coroutines/coro-retcon.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon.ll @@ -33,7 +33,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -105,7 +105,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -139,14 +139,14 @@ cleanup: call void @use_var_ptr(ptr %a) %al = load i32, ptr %a call void @use_var(i32 %al) - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare void @use_var(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-spill-after-phi.ll b/llvm/test/Transforms/Coroutines/coro-spill-after-phi.ll index bb43d8f4b3399..273ac7c5293d9 100644 --- a/llvm/test/Transforms/Coroutines/coro-spill-after-phi.ll +++ b/llvm/test/Transforms/Coroutines/coro-spill-after-phi.ll @@ -50,7 +50,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -63,7 +63,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare i32 @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-spill-corobegin.ll b/llvm/test/Transforms/Coroutines/coro-spill-corobegin.ll index f238955d1c3e9..bfc48adfed22a 100644 --- a/llvm/test/Transforms/Coroutines/coro-spill-corobegin.ll +++ b/llvm/test/Transforms/Coroutines/coro-spill-corobegin.ll @@ -33,7 +33,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -62,7 +62,7 @@ declare i8 @llvm.coro.suspend(token, i1) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print.i32(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-spill-defs-before-corobegin.ll b/llvm/test/Transforms/Coroutines/coro-spill-defs-before-corobegin.ll index 801c4a1776135..16df22b19fd11 100644 --- a/llvm/test/Transforms/Coroutines/coro-spill-defs-before-corobegin.ll +++ b/llvm/test/Transforms/Coroutines/coro-spill-defs-before-corobegin.ll @@ -36,7 +36,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl lpad: @@ -70,7 +70,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare i32 @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-spill-promise-02.ll b/llvm/test/Transforms/Coroutines/coro-spill-promise-02.ll index 3293e5c84b987..a3888f0eff562 100644 --- a/llvm/test/Transforms/Coroutines/coro-spill-promise-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-spill-promise-02.ll @@ -29,7 +29,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -57,7 +57,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-spill-promise.ll b/llvm/test/Transforms/Coroutines/coro-spill-promise.ll index 47e891a57d222..344d77bd54930 100644 --- a/llvm/test/Transforms/Coroutines/coro-spill-promise.ll +++ b/llvm/test/Transforms/Coroutines/coro-spill-promise.ll @@ -28,7 +28,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -48,7 +48,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare double @print(double) diff --git a/llvm/test/Transforms/Coroutines/coro-spill-suspend.ll b/llvm/test/Transforms/Coroutines/coro-spill-suspend.ll index 8de02c8b7de23..6a80f3637fd10 100644 --- a/llvm/test/Transforms/Coroutines/coro-spill-suspend.ll +++ b/llvm/test/Transforms/Coroutines/coro-spill-suspend.ll @@ -49,7 +49,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } diff --git a/llvm/test/Transforms/Coroutines/coro-split-00.ll b/llvm/test/Transforms/Coroutines/coro-split-00.ll index 9909627e60597..06f71c848e250 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-00.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-00.ll @@ -28,7 +28,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -86,7 +86,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) allockind("alloc,uninitialized") "alloc-family"="malloc" declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-01.ll b/llvm/test/Transforms/Coroutines/coro-split-01.ll index 7a03495e75d8d..e74e927839dff 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-01.ll @@ -26,7 +26,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } define i32 @main() { @@ -49,7 +49,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-02.ll b/llvm/test/Transforms/Coroutines/coro-split-02.ll index c487ab1e42ff6..a11ea466af29c 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-02.ll @@ -34,7 +34,7 @@ await.ready: call void @print(i32 %val) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -60,6 +60,6 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-alloc.ll b/llvm/test/Transforms/Coroutines/coro-split-alloc.ll index f6f50e2f3c76c..9a5e97fedd97d 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-alloc.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-alloc.ll @@ -33,7 +33,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -57,7 +57,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @custom_alloctor(i32, i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg-labels-inlined.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg-labels-inlined.ll index e9737b62b0b8f..995795b8de1fa 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-dbg-labels-inlined.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-dbg-labels-inlined.ll @@ -28,7 +28,7 @@ coro_Cleanup: br label %coro_Suspend, !dbg !37 coro_Suspend: - tail call i1 @llvm.coro.end(ptr null, i1 false, token none) #3, !dbg !40 + tail call void @llvm.coro.end(ptr null, i1 false, token none) #3, !dbg !40 ret ptr %2, !dbg !41 } @@ -60,7 +60,7 @@ declare token @llvm.coro.save(ptr) #0 declare i8 @llvm.coro.suspend(token, i1) #0 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #4 declare void @free(ptr nocapture) local_unnamed_addr #0 -declare i1 @llvm.coro.end(ptr, i1, token) #0 +declare void @llvm.coro.end(ptr, i1, token) #0 attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg-labels.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg-labels.ll index 490e4fc102349..4c5d9fb81c272 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-dbg-labels.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-dbg-labels.ll @@ -64,7 +64,7 @@ coro_Cleanup: ; preds = %for.cond br label %coro_Suspend, !dbg !37 coro_Suspend: ; preds = %for.cond, %if.then, %coro_Cleanup - tail call i1 @llvm.coro.end(ptr null, i1 false, token none) #3, !dbg !40 + tail call void @llvm.coro.end(ptr null, i1 false, token none) #3, !dbg !40 ret ptr %2, !dbg !41 } @@ -115,7 +115,7 @@ declare token @llvm.coro.save(ptr) #0 declare i8 @llvm.coro.suspend(token, i1) #0 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #4 declare void @free(ptr nocapture) local_unnamed_addr #0 -declare i1 @llvm.coro.end(ptr, i1, token) #0 +declare void @llvm.coro.end(ptr, i1, token) #0 attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll index 02bd2b2d0d65f..c53bea899ee51 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll @@ -38,7 +38,7 @@ coro_Cleanup: ; preds = %for.cond br label %coro_Suspend, !dbg !36 coro_Suspend: ; preds = %for.cond, %if.then, %coro_Cleanup - tail call i1 @llvm.coro.end(ptr null, i1 false, token none) #9, !dbg !38 + tail call void @llvm.coro.end(ptr null, i1 false, token none) #9, !dbg !38 ret ptr %2, !dbg !39 } @@ -57,7 +57,7 @@ declare i8 @llvm.coro.suspend(token, i1) #7 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5 declare void @free(ptr nocapture) local_unnamed_addr #6 -declare i1 @llvm.coro.end(ptr, i1, token) #7 +declare void @llvm.coro.end(ptr, i1, token) #7 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #5 declare void @llvm.dbg.value(metadata, metadata, metadata) #1 diff --git a/llvm/test/Transforms/Coroutines/coro-split-eh-00.ll b/llvm/test/Transforms/Coroutines/coro-split-eh-00.ll index d7d60bb2bfa22..0695071306d8d 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-eh-00.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-eh-00.ll @@ -17,7 +17,7 @@ resume: invoke void @print(i32 1) to label %suspend unwind label %lpad suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) call void @print(i32 0) ; should not be present in f.resume ret ptr %hdl @@ -26,8 +26,9 @@ lpad: cleanup call void @print(i32 2) - %need.resume = call i1 @llvm.coro.end(ptr null, i1 true, token none) - br i1 %need.resume, label %eh.resume, label %cleanup.cont + call void @llvm.coro.end(ptr null, i1 true, token none) + %in.ramp = call i1 @llvm.coro.is_in_ramp() + br i1 %in.ramp, label %cleanup.cont, label %eh.resume cleanup.cont: call void @print(i32 3) ; should not be present in f.resume @@ -80,7 +81,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare ptr @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-eh-01.ll b/llvm/test/Transforms/Coroutines/coro-split-eh-01.ll index b25c4b9f5a700..093fd85b80cdd 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-eh-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-eh-01.ll @@ -17,14 +17,14 @@ resume: invoke void @print(i32 1) to label %suspend unwind label %lpad suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) call void @print(i32 0) ; should not be present in f.resume ret ptr %hdl lpad: %tok = cleanuppad within none [] call void @print(i32 2) - %unused = call i1 @llvm.coro.end(ptr null, i1 true, token none) [ "funclet"(token %tok) ] + call void @llvm.coro.end(ptr null, i1 true, token none) [ "funclet"(token %tok) ] cleanupret from %tok unwind label %cleanup.cont cleanup.cont: @@ -74,7 +74,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare ptr @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-final-suspend.ll b/llvm/test/Transforms/Coroutines/coro-split-final-suspend.ll index fbefd43f73c36..b620b2d7fa4be 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-final-suspend.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-final-suspend.ll @@ -28,7 +28,7 @@ resume: invoke void @print(i32 1) to label %suspend unwind label %lpad suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) call void @print(i32 0) ret ptr %hdl @@ -37,8 +37,9 @@ lpad: cleanup call void @print(i32 2) - %need.resume = call i1 @llvm.coro.end(ptr null, i1 true, token none) - br i1 %need.resume, label %eh.resume, label %cleanup.cont + call void @llvm.coro.end(ptr null, i1 true, token none) + %in.ramp = call i1 @llvm.coro.is_in_ramp() + br i1 %in.ramp, label %cleanup.cont, label %eh.resume cleanup.cont: call void @print(i32 3) @@ -97,7 +98,7 @@ resume: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) call void @print(i32 0) ret ptr %hdl } @@ -122,7 +123,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare ptr @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-hidden.ll b/llvm/test/Transforms/Coroutines/coro-split-hidden.ll index fa4f0ab13bebc..2c1bf35c2fafc 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-hidden.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-hidden.ll @@ -30,7 +30,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -74,7 +74,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) allockind("alloc,uninitialized") declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll index e661932bf020e..70f15f6129d8e 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll @@ -24,7 +24,7 @@ declare void @llvm.assume(i1 noundef) declare i64 @llvm.coro.align.i64() declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr writeonly) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.free(token, ptr nocapture readonly) declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) declare token @llvm.coro.save(ptr) @@ -162,7 +162,7 @@ define ptr @f(i32 %0) presplitcoroutine align 32 { 61: ; preds = %60, %57, %54, %47, %12 %62 = getelementptr inbounds i8, ptr %3, i64 -16 - %63 = call i1 @llvm.coro.end(ptr null, i1 false, token none) #28 + call void @llvm.coro.end(ptr null, i1 false, token none) #28 ret ptr %62 } diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail-ppc64le.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail-ppc64le.ll index e8596b78460a5..cb3a12952d7e5 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail-ppc64le.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail-ppc64le.ll @@ -36,7 +36,7 @@ await.ready: i8 1, label %exit ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -58,7 +58,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail.ll index 70f29f4a9a4dc..d224d17fb52ea 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail.ll @@ -27,7 +27,7 @@ await.ready: i8 1, label %exit ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -50,7 +50,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl) diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail1.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail1.ll index 3edb8728d8550..4228a9db64866 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail1.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail1.ll @@ -45,7 +45,7 @@ final.suspend: pre.exit: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreach: unreachable @@ -83,7 +83,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare i8 @switch_result() diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail10.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail10.ll index a55b3d16e2ded..7bf0d72facb28 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail10.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail10.ll @@ -33,7 +33,7 @@ await.ready: i8 1, label %exit ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -47,7 +47,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl) diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail12.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail12.ll index 5baec378876bb..c818d1c2b144c 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail12.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail12.ll @@ -56,7 +56,7 @@ coro.free: br label %coro.end coro.end: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -73,7 +73,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail13.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail13.ll index 0290e42339e2a..c726810e25d13 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail13.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail13.ll @@ -39,7 +39,7 @@ lpad: %lpval = landingpad { ptr, i32 } cleanup - %need.resume = call i1 @llvm.coro.end(ptr null, i1 true, token none) + call void @llvm.coro.end(ptr null, i1 true, token none) resume { ptr, i32 } %lpval coro.free: @@ -47,7 +47,7 @@ coro.free: br label %coro.end coro.end: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -64,7 +64,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail2.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail2.ll index ca1611e19b9f9..04d2352107041 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail2.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail2.ll @@ -31,7 +31,7 @@ await.ready: i8 1, label %exit ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -54,7 +54,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl) diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail3.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail3.ll index 84cdac17beebb..558e38b3919fb 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail3.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail3.ll @@ -41,7 +41,7 @@ final.suspend: pre.exit: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void unreach: unreachable @@ -78,7 +78,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare i8 @switch_result() diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail4.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail4.ll index b647bd2e4a207..97cd6fb4375f1 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail4.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail4.ll @@ -38,7 +38,7 @@ coro.free: br label %coro.end coro.end: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -54,7 +54,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll index b2561751e6377..9a2697efa1f2b 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll @@ -32,7 +32,7 @@ await.ready: call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -49,7 +49,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @consume(ptr) diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll index 99174ff283120..36ae55e9e69e3 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll @@ -36,7 +36,7 @@ await.ready: call void @llvm.lifetime.end.p0(ptr %alloc.var) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -84,7 +84,7 @@ coro.free: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -101,7 +101,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll index 91f8543dffe93..8b67ccb8b8718 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll @@ -40,7 +40,7 @@ await.ready: br label %exit exit: %result = phi i64 [0, %entry], [0, %entry], [%foo, %await.suspend], [%foo, %await.suspend], [%foo, %await.ready] - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret i64 %result } @@ -90,7 +90,7 @@ coro.free: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -109,7 +109,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @delete(ptr nonnull) #2 diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail8.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail8.ll index 31b18d746be5f..5eeaf9db83118 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail8.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail8.ll @@ -32,7 +32,7 @@ await.ready: i8 1, label %exit ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -46,7 +46,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @print() diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail9.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail9.ll index 76376dbbbe3d8..2906877df924b 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-musttail9.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-musttail9.ll @@ -32,7 +32,7 @@ await.ready: i8 1, label %exit ] exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -46,7 +46,7 @@ declare token @llvm.coro.save(ptr) #2 declare ptr @llvm.coro.frame() #3 declare i8 @llvm.coro.suspend(token, i1) #2 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #1 -declare i1 @llvm.coro.end(ptr, i1, token) #2 +declare void @llvm.coro.end(ptr, i1, token) #2 declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1 declare ptr @malloc(i64) declare void @print() diff --git a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll index 12d65647c8b01..8c081f324e0cc 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll @@ -37,7 +37,7 @@ cleanup: br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -52,7 +52,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.lifetime.start.p0(ptr nocapture) declare void @llvm.lifetime.end.p0(ptr nocapture) diff --git a/llvm/test/Transforms/Coroutines/coro-split-noinline.ll b/llvm/test/Transforms/Coroutines/coro-split-noinline.ll index c53771570a079..498bb4745d43c 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-noinline.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-noinline.ll @@ -29,7 +29,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -53,7 +53,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) allockind("alloc,uninitialized") "alloc-family"="malloc" declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll index a5a2bcf2ecb81..848cf8b3e461f 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll @@ -37,7 +37,7 @@ await.ready: call void @print(i32 %val) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -76,7 +76,7 @@ await.ready: call void @print(i32 %val) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -91,6 +91,6 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll index abc91c3b11c6b..26037043a26ed 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll @@ -48,7 +48,7 @@ after.await: br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -68,6 +68,6 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll index efd1adfc54b53..26c4c72ef0726 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll @@ -36,7 +36,7 @@ await.ready: call void @print(i32 %val) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } ; CHECK-LABEL: @a.gep.resume( @@ -59,6 +59,6 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll index af5aa8ade0b65..be4bf4c14737c 100644 --- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll +++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll @@ -35,7 +35,7 @@ await.ready: call void @print(i32 %val) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -61,6 +61,6 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1, token) #3 +declare void @llvm.coro.end(ptr, i1, token) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Coroutines/coro-swifterror.ll b/llvm/test/Transforms/Coroutines/coro-swifterror.ll index 899be4a010326..76a4816219ffd 100644 --- a/llvm/test/Transforms/Coroutines/coro-swifterror.ll +++ b/llvm/test/Transforms/Coroutines/coro-swifterror.ll @@ -34,7 +34,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -76,7 +76,7 @@ resume: br label %loop cleanup: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) unreachable } @@ -86,7 +86,7 @@ declare token @llvm.coro.id.retcon(i32, i32, ptr, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) declare { i1, ptr } @llvm.coro.suspend.retcon.i1p0p0i8(...) declare i1 @llvm.coro.suspend.retcon.i1(...) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.prepare.retcon(ptr) declare ptr @f_prototype(ptr, i1 zeroext, ptr swifterror) diff --git a/llvm/test/Transforms/Coroutines/coro-zero-alloca.ll b/llvm/test/Transforms/Coroutines/coro-zero-alloca.ll index e3f09ba29cbf7..d1d826c7f1009 100644 --- a/llvm/test/Transforms/Coroutines/coro-zero-alloca.ll +++ b/llvm/test/Transforms/Coroutines/coro-zero-alloca.ll @@ -9,7 +9,7 @@ declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) declare i64 @llvm.coro.size.i64() declare ptr @llvm.coro.begin(token, ptr writeonly) declare i8 @llvm.coro.suspend(token, i1) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.free(token, ptr nocapture readonly) declare token @llvm.coro.save(ptr) @@ -42,7 +42,7 @@ wakeup: ; preds = %entry br label %cleanup suspend: ; preds = %cleanup, %entry - %unused = call i1 @llvm.coro.end(ptr %coro.state, i1 false, token none) + call void @llvm.coro.end(ptr %coro.state, i1 false, token none) ret void cleanup: ; preds = %wakeup, %entry diff --git a/llvm/test/Transforms/Coroutines/ex0.ll b/llvm/test/Transforms/Coroutines/ex0.ll index 9809488c85b37..420379ed5620a 100644 --- a/llvm/test/Transforms/Coroutines/ex0.ll +++ b/llvm/test/Transforms/Coroutines/ex0.ll @@ -24,7 +24,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -52,7 +52,7 @@ declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/ex1.ll b/llvm/test/Transforms/Coroutines/ex1.ll index 2db5ef6067cad..0a9b15641f2e6 100644 --- a/llvm/test/Transforms/Coroutines/ex1.ll +++ b/llvm/test/Transforms/Coroutines/ex1.ll @@ -20,7 +20,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -48,7 +48,7 @@ declare i32 @llvm.coro.size.i32() declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) diff --git a/llvm/test/Transforms/Coroutines/ex2.ll b/llvm/test/Transforms/Coroutines/ex2.ll index d9999d46b38cf..fb4eeb5ee8bbc 100644 --- a/llvm/test/Transforms/Coroutines/ex2.ll +++ b/llvm/test/Transforms/Coroutines/ex2.ll @@ -29,7 +29,7 @@ dyn.free: call void @CustomFree(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -63,7 +63,7 @@ declare i32 @llvm.coro.size.i32() declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) diff --git a/llvm/test/Transforms/Coroutines/ex3.ll b/llvm/test/Transforms/Coroutines/ex3.ll index e7fbc97d8f14f..3b3c579625df8 100644 --- a/llvm/test/Transforms/Coroutines/ex3.ll +++ b/llvm/test/Transforms/Coroutines/ex3.ll @@ -32,7 +32,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -67,7 +67,7 @@ declare i32 @llvm.coro.size.i32() declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) diff --git a/llvm/test/Transforms/Coroutines/ex4.ll b/llvm/test/Transforms/Coroutines/ex4.ll index 7c7a869e4a500..fa7b64b1379f6 100644 --- a/llvm/test/Transforms/Coroutines/ex4.ll +++ b/llvm/test/Transforms/Coroutines/ex4.ll @@ -27,7 +27,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -63,7 +63,7 @@ declare i32 @llvm.coro.size.i32() declare ptr @llvm.coro.begin(token, ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare void @llvm.coro.resume(ptr) declare void @llvm.coro.destroy(ptr) diff --git a/llvm/test/Transforms/Coroutines/ex5.ll b/llvm/test/Transforms/Coroutines/ex5.ll index bf5cbec266c91..3640b83ceb28b 100644 --- a/llvm/test/Transforms/Coroutines/ex5.ll +++ b/llvm/test/Transforms/Coroutines/ex5.ll @@ -31,7 +31,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret ptr %hdl } @@ -46,7 +46,7 @@ declare ptr @llvm.coro.begin(token, ptr) declare token @llvm.coro.save(ptr) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) ; CHECK-LABEL: @main define i32 @main() { diff --git a/llvm/test/Transforms/Coroutines/no-suspend.ll b/llvm/test/Transforms/Coroutines/no-suspend.ll index fd8c5ac990958..c08423d6053fc 100644 --- a/llvm/test/Transforms/Coroutines/no-suspend.ll +++ b/llvm/test/Transforms/Coroutines/no-suspend.ll @@ -32,7 +32,7 @@ dyn.free: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void } @@ -81,7 +81,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void } @@ -129,7 +129,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void lpad: %lpval = landingpad { ptr, i32 } @@ -190,7 +190,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void } @@ -244,7 +244,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void } @@ -291,7 +291,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void lpad: %lpval = landingpad { ptr, i32 } @@ -343,7 +343,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void } @@ -388,7 +388,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) ret void lpad: %lpval = landingpad { ptr, i32 } @@ -410,7 +410,7 @@ declare ptr @llvm.coro.begin(token, ptr) declare token @llvm.coro.save(ptr %hdl) declare i8 @llvm.coro.suspend(token, i1) declare ptr @llvm.coro.free(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.subfn.addr(ptr, i8) diff --git a/llvm/test/Transforms/Coroutines/phi-coro-end.ll b/llvm/test/Transforms/Coroutines/phi-coro-end.ll index aab76faed3f1a..adfcba01d6119 100644 --- a/llvm/test/Transforms/Coroutines/phi-coro-end.ll +++ b/llvm/test/Transforms/Coroutines/phi-coro-end.ll @@ -17,7 +17,7 @@ cleanup: suspend: %r = phi i32 [%n, %entry], [1, %cleanup] - call i1 @llvm.coro.end(ptr %hdl, i1 false, token none) + call void @llvm.coro.end(ptr %hdl, i1 false, token none) call void @print(i32 %r) ret ptr %hdl } @@ -41,7 +41,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/remarks.ll b/llvm/test/Transforms/Coroutines/remarks.ll index 5eaddbfc3d107..418a75cde49c9 100644 --- a/llvm/test/Transforms/Coroutines/remarks.ll +++ b/llvm/test/Transforms/Coroutines/remarks.ll @@ -33,7 +33,7 @@ cleanup: call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0, token none) + call void @llvm.coro.end(ptr %hdl, i1 0, token none) ret ptr %hdl } @@ -60,7 +60,7 @@ declare void @llvm.coro.destroy(ptr) declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare noalias ptr @malloc(i32) declare void @print(i32) diff --git a/llvm/test/Transforms/Coroutines/swift-async-dbg.ll b/llvm/test/Transforms/Coroutines/swift-async-dbg.ll index 5090274ea3ad4..00138c091890f 100644 --- a/llvm/test/Transforms/Coroutines/swift-async-dbg.ll +++ b/llvm/test/Transforms/Coroutines/swift-async-dbg.ll @@ -73,7 +73,7 @@ define swifttailcc void @coroutineA(ptr swiftasync %arg) !dbg !48 { %i33 = call { ptr } (i32, ptr, ptr, ...) @llvm.coro.suspend.async.sl_p0s(i32 0, ptr %i31, ptr nonnull @__swift_async_resume_get_context, ptr nonnull @coroutineA.1, ptr %i31, i64 0, i64 0, ptr %i29), !dbg !54 %i34 = extractvalue { ptr } %i33, 0, !dbg !54 %i35 = call ptr @__swift_async_resume_get_context(ptr %i34), !dbg !54 - %i45 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %i3, i1 false, ptr nonnull @coroutineA.0.1, ptr undef, ptr undef), !dbg !54 + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %i3, i1 false, ptr nonnull @coroutineA.0.1, ptr undef, ptr undef), !dbg !54 unreachable, !dbg !54 ; CHECK-NOT: define ; CHECK-LABEL: define {{.*}} @coroutineATY2_( @@ -116,7 +116,7 @@ define swifttailcc void @coroutineB(ptr swiftasync %arg) !dbg !37 { %i3 = call ptr @llvm.coro.begin(token %i2, ptr null) %i6 = getelementptr inbounds <{ ptr, ptr }>, ptr %arg, i64 0, i32 1, !dbg !42 %i712 = load ptr, ptr %i6, align 8, !dbg !42 - %i10 = call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %i3, i1 false, ptr nonnull @coroutineB.0, ptr %i712, ptr %arg), !dbg !42 + call void (ptr, i1, ...) @llvm.coro.end.async(ptr %i3, i1 false, ptr nonnull @coroutineB.0, ptr %i712, ptr %arg), !dbg !42 unreachable, !dbg !42 } define hidden swifttailcc void @coroutineB.0(ptr %arg, ptr %arg1) !dbg !44 { @@ -124,7 +124,7 @@ define hidden swifttailcc void @coroutineB.0(ptr %arg, ptr %arg1) !dbg !44 { ret void, !dbg !47 } -declare i1 @llvm.coro.end.async(ptr, i1, ...) +declare void @llvm.coro.end.async(ptr, i1, ...) declare ptr @llvm.coro.async.resume() declare ptr @llvm.coro.begin(token, ptr writeonly) declare ptr @llvm.swift.async.context.addr() diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll b/llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll new file mode 100644 index 0000000000000..346eaeaec72c1 --- /dev/null +++ b/llvm/test/Transforms/CorrelatedValuePropagation/pr161367.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s + +; Make sure that we apply trunc to the edge value of %x. +@g = external global i8 + +define i16 @pr161367(i64 %x) { +; CHECK-LABEL: define i16 @pr161367( +; CHECK-SAME: i64 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[X]] to i16 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[X]], sub (i64 0, i64 ptrtoint (ptr @g to i64)) +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT:.*]], label %[[ELSE:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RET:%.*]] = phi i16 [ trunc (i64 sub (i64 0, i64 ptrtoint (ptr @g to i64)) to i16), %[[ENTRY]] ], [ 0, %[[ELSE]] ] +; CHECK-NEXT: ret i16 [[RET]] +; +entry: + %trunc = trunc i64 %x to i16 + %exitcond = icmp eq i64 %x, sub (i64 0, i64 ptrtoint (ptr @g to i64)) + br i1 %exitcond, label %exit, label %else + +else: + br label %exit + +exit: + %ret = phi i16 [ %trunc, %entry ], [ 0, %else ] + ret i16 %ret +} diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/track-predecessor-ranges.ll b/llvm/test/Transforms/CorrelatedValuePropagation/track-predecessor-ranges.ll new file mode 100644 index 0000000000000..b5f688420d9c9 --- /dev/null +++ b/llvm/test/Transforms/CorrelatedValuePropagation/track-predecessor-ranges.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes="correlated-propagation" -S 2>&1 | FileCheck %s +; RUN: opt < %s -passes="correlated-propagation" -lvi-per-pred-ranges -S 2>&1 | FileCheck %s -check-prefix=LVI-PRED-RANGES + +@global = external local_unnamed_addr global [4338 x i32], align 16 + +define dso_local noundef zeroext i1 @bar(i64 noundef %arg, ptr noundef writeonly captures(none) %arg1) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef zeroext i1 @bar( +; CHECK-SAME: i64 noundef [[ARG:%.*]], ptr noundef writeonly captures(none) [[ARG1:%.*]]) local_unnamed_addr { +; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[ARG]], 1025 +; CHECK-NEXT: br i1 [[ICMP]], label %[[BB4:.*]], label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[ICMP3:%.*]] = icmp ult i64 [[ARG]], 262145 +; CHECK-NEXT: br i1 [[ICMP3]], label %[[BB4]], label %[[BB9:.*]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 7, %[[BB]] ], [ 15487, %[[BB2]] ] +; CHECK-NEXT: [[PHI5:%.*]] = phi i64 [ 3, %[[BB]] ], [ 7, %[[BB2]] ] +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[PHI]], [[ARG]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], [[PHI5]] +; CHECK-NEXT: [[ICMP6:%.*]] = icmp samesign ult i64 [[LSHR]], 4338 +; CHECK-NEXT: br i1 [[ICMP6]], label %[[BB8:.*]], label %[[BB7:.*]] +; CHECK: [[BB7]]: +; CHECK-NEXT: tail call void @llvm.ubsantrap(i8 18) +; CHECK-NEXT: unreachable +; CHECK: [[BB8]]: +; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr inbounds nuw [4338 x i32], ptr @global, i64 0, i64 [[LSHR]] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[GETELEMENTPTR]], align 4 +; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD]] to i64 +; CHECK-NEXT: store i64 [[SEXT]], ptr [[ARG1]], align 8 +; CHECK-NEXT: br label %[[BB9]] +; CHECK: [[BB9]]: +; CHECK-NEXT: [[PHI10:%.*]] = phi i1 [ true, %[[BB8]] ], [ false, %[[BB2]] ] +; CHECK-NEXT: ret i1 [[PHI10]] +; +; LVI-PRED-RANGES-LABEL: define dso_local noundef zeroext i1 @bar( +; LVI-PRED-RANGES-SAME: i64 noundef [[ARG:%.*]], ptr noundef writeonly captures(none) [[ARG1:%.*]]) local_unnamed_addr { +; LVI-PRED-RANGES-NEXT: [[BB:.*]]: +; LVI-PRED-RANGES-NEXT: [[ICMP:%.*]] = icmp ult i64 [[ARG]], 1025 +; LVI-PRED-RANGES-NEXT: br i1 [[ICMP]], label %[[BB4:.*]], label %[[BB2:.*]] +; LVI-PRED-RANGES: [[BB2]]: +; LVI-PRED-RANGES-NEXT: [[ICMP3:%.*]] = icmp ult i64 [[ARG]], 262145 +; LVI-PRED-RANGES-NEXT: br i1 [[ICMP3]], label %[[BB4]], label %[[BB9:.*]] +; LVI-PRED-RANGES: [[BB4]]: +; LVI-PRED-RANGES-NEXT: [[PHI:%.*]] = phi i64 [ 7, %[[BB]] ], [ 15487, %[[BB2]] ] +; LVI-PRED-RANGES-NEXT: [[PHI5:%.*]] = phi i64 [ 3, %[[BB]] ], [ 7, %[[BB2]] ] +; LVI-PRED-RANGES-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[PHI]], [[ARG]] +; LVI-PRED-RANGES-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], [[PHI5]] +; LVI-PRED-RANGES-NEXT: br i1 true, label %[[BB8:.*]], label %[[BB7:.*]] +; LVI-PRED-RANGES: [[BB7]]: +; LVI-PRED-RANGES-NEXT: tail call void @llvm.ubsantrap(i8 18) +; LVI-PRED-RANGES-NEXT: unreachable +; LVI-PRED-RANGES: [[BB8]]: +; LVI-PRED-RANGES-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr inbounds nuw [4338 x i32], ptr @global, i64 0, i64 [[LSHR]] +; LVI-PRED-RANGES-NEXT: [[LOAD:%.*]] = load i32, ptr [[GETELEMENTPTR]], align 4 +; LVI-PRED-RANGES-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD]] to i64 +; LVI-PRED-RANGES-NEXT: store i64 [[SEXT]], ptr [[ARG1]], align 8 +; LVI-PRED-RANGES-NEXT: br label %[[BB9]] +; LVI-PRED-RANGES: [[BB9]]: +; LVI-PRED-RANGES-NEXT: [[PHI10:%.*]] = phi i1 [ true, %[[BB8]] ], [ false, %[[BB2]] ] +; LVI-PRED-RANGES-NEXT: ret i1 [[PHI10]] +; +bb: + %icmp = icmp ult i64 %arg, 1025 + br i1 %icmp, label %bb4, label %bb2 + +bb2: ; preds = %bb + %icmp3 = icmp ult i64 %arg, 262145 + br i1 %icmp3, label %bb4, label %bb9 + +bb4: ; preds = %bb2, %bb + %phi = phi i64 [ 7, %bb ], [ 15487, %bb2 ] + %phi5 = phi i64 [ 3, %bb ], [ 7, %bb2 ] + %add = add nuw nsw i64 %phi, %arg + %lshr = lshr i64 %add, %phi5 + %icmp6 = icmp samesign ult i64 %lshr, 4338 + br i1 %icmp6, label %bb8, label %bb7 + +bb7: ; preds = %bb4 + tail call void @llvm.ubsantrap(i8 18) + unreachable + +bb8: ; preds = %bb4 + %getelementptr = getelementptr inbounds nuw [4338 x i32], ptr @global, i64 0, i64 %lshr + %load = load i32, ptr %getelementptr, align 4 + %sext = sext i32 %load to i64 + store i64 %sext, ptr %arg1, align 8 + br label %bb9 + +bb9: ; preds = %bb8, %bb2 + %phi10 = phi i1 [ true, %bb8 ], [ false, %bb2 ] + ret i1 %phi10 +} + +; Function Attrs: cold noreturn nounwind +declare void @llvm.ubsantrap(i8 immarg) #0 + +attributes #0 = { cold noreturn nounwind } diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll index cba1ba8dde768..ad0568486396f 100644 --- a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll +++ b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-transform.ll @@ -304,32 +304,43 @@ end: define void @pr106083_invalidBBarg_fold(i1 %cmp1, i1 %cmp2, i1 %not, ptr %d) { ; CHECK-LABEL: @pr106083_invalidBBarg_fold( ; CHECK-NEXT: bb: -; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[SEL_SI_UNFOLD_FALSE:%.*]] -; CHECK: sel.si.unfold.false: -; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ] -; CHECK-NEXT: br label [[BB1]] +; CHECK-NEXT: br label [[BB1:%.*]] ; CHECK: BB1: -; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB]] ], [ 1, [[BB7:%.*]] ], [ 0, [[SEL_SI_UNFOLD_FALSE]] ], [ 1, [[BB7_JT0:%.*]] ] -; CHECK-NEXT: [[SEL_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB1_BACKEDGE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SEL_SI_UNFOLD_FALSE]] ], [ [[SEL_SI_UNFOLD_PHI]], [[BB7_JT0]] ] +; CHECK-NEXT: [[I:%.*]] = phi i16 [ 0, [[BB1_BACKEDGE:%.*]] ], [ 0, [[BB:%.*]] ], [ 1, [[BB9:%.*]] ], [ 1, [[BB7_JT0:%.*]] ] ; CHECK-NEXT: br i1 [[NOT:%.*]], label [[BB7_JT0]], label [[BB2:%.*]] ; CHECK: BB2: ; CHECK-NEXT: store i16 0, ptr [[D:%.*]], align 2 -; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK-NEXT: br i1 [[CMP2:%.*]], label [[BB7:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0:%.*]] ; CHECK: spec.select.si.unfold.false: -; CHECK-NEXT: br label [[BB7]] +; CHECK-NEXT: br label [[BB9]] ; CHECK: spec.select.si.unfold.false.jt0: ; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB2]] ] ; CHECK-NEXT: br label [[BB7_JT0]] +; CHECK: sel.si.unfold.true: +; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB9]], label [[SEL_SI_UNFOLD_FALSE_JT1:%.*]] +; CHECK: sel.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[BB2]] ] +; CHECK-NEXT: br i1 [[CMP1]], label [[BB7_JT0]], label [[SEL_SI_UNFOLD_FALSE:%.*]] +; CHECK: sel.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 1, [[BB7]] ] +; CHECK-NEXT: br label [[BB9]] +; CHECK: sel.si.unfold.false.jt1: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT1:%.*]] = phi i32 [ 1, [[SEL_SI_UNFOLD_TRUE:%.*]] ] +; CHECK-NEXT: br label [[BB7_JT1:%.*]] ; CHECK: BB7: -; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[BB2]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ] -; CHECK-NEXT: [[_3:%.*]] = phi i32 [ [[SEL_SI_UNFOLD_PHI]], [[BB2]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: [[D_PROMOTED4:%.*]] = phi i16 [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] ], [ 1, [[SEL_SI_UNFOLD_TRUE]] ], [ 1, [[SEL_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: [[_3:%.*]] = phi i32 [ poison, [[SPEC_SELECT_SI_UNFOLD_FALSE]] ], [ poison, [[SEL_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2]], [[SEL_SI_UNFOLD_FALSE]] ] ; CHECK-NEXT: switch i32 [[_3]], label [[BB1_BACKEDGE]] [ ; CHECK-NEXT: i32 0, label [[BB1]] ; CHECK-NEXT: i32 1, label [[BB8:%.*]] ; CHECK-NEXT: ] +; CHECK: BB7.jt1: +; CHECK-NEXT: [[D_PROMOTED4_JT1:%.*]] = phi i16 [ 1, [[SEL_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: [[_3_JT1:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI2_JT1]], [[SEL_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: br label [[BB8]] ; CHECK: BB7.jt0: -; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ] -; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: [[D_PROMOTED4_JT0:%.*]] = phi i16 [ 0, [[BB1]] ], [ 1, [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ], [ 1, [[BB7]] ] +; CHECK-NEXT: [[_3_JT0:%.*]] = phi i32 [ 0, [[BB1]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT0]] ], [ [[DOTSI_UNFOLD_PHI1]], [[BB7]] ] ; CHECK-NEXT: br label [[BB1]] ; CHECK: BB1.backedge: ; CHECK-NEXT: br label [[BB1]] @@ -367,30 +378,40 @@ BB8: ; preds = %BB7 define void @pr106083_select_dead_uses(i1 %cmp1, i1 %not, ptr %p) { ; CHECK-LABEL: @pr106083_select_dead_uses( ; CHECK-NEXT: bb: -; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[DOTLOOPEXIT6:%.*]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] -; CHECK: spec.select.si.unfold.false: -; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 1, [[BB:%.*]] ] -; CHECK-NEXT: br label [[DOTLOOPEXIT6]] +; CHECK-NEXT: br label [[DOTLOOPEXIT6:%.*]] ; CHECK: .loopexit6: -; CHECK-NEXT: [[SPEC_SELECT_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[SELECT_UNFOLD:%.*]] ], [ 0, [[BB]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ] ; CHECK-NEXT: br i1 [[NOT:%.*]], label [[SELECT_UNFOLD_JT0:%.*]], label [[BB1:%.*]] ; CHECK: bb1: ; CHECK-NEXT: [[I:%.*]] = load i32, ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[NOT2:%.*]] = icmp eq i32 0, 0 -; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK-NEXT: br i1 [[NOT2]], label [[SELECT_UNFOLD:%.*]], label [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0:%.*]] ; CHECK: spec.select7.si.unfold.false: -; CHECK-NEXT: br label [[SELECT_UNFOLD]] +; CHECK-NEXT: br label [[SELECT_UNFOLD1:%.*]] ; CHECK: spec.select7.si.unfold.false.jt0: ; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[BB1]] ] ; CHECK-NEXT: br label [[SELECT_UNFOLD_JT0]] +; CHECK: spec.select.si.unfold.true: +; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[SELECT_UNFOLD1]], label [[SPEC_SELECT_SI_UNFOLD_FALSE_JT1:%.*]] +; CHECK: spec.select.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[BB1]] ] +; CHECK-NEXT: br i1 [[CMP1]], label [[SELECT_UNFOLD_JT0]], label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]] +; CHECK: spec.select.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 1, [[SELECT_UNFOLD]] ] +; CHECK-NEXT: br label [[SELECT_UNFOLD1]] +; CHECK: spec.select.si.unfold.false.jt1: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT1:%.*]] = phi i32 [ 1, [[SPEC_SELECT_SI_UNFOLD_TRUE:%.*]] ] +; CHECK-NEXT: br label [[SELECT_UNFOLD_JT1:%.*]] ; CHECK: select.unfold: -; CHECK-NEXT: [[_2:%.*]] = phi i32 [ [[SPEC_SELECT_SI_UNFOLD_PHI]], [[BB1]] ], [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ] +; CHECK-NEXT: [[_2:%.*]] = phi i32 [ poison, [[SPEC_SELECT7_SI_UNFOLD_FALSE:%.*]] ], [ poison, [[SPEC_SELECT_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ] ; CHECK-NEXT: switch i32 [[_2]], label [[BB2:%.*]] [ ; CHECK-NEXT: i32 0, label [[DOTPREHEADER_PREHEADER:%.*]] ; CHECK-NEXT: i32 1, label [[DOTLOOPEXIT6]] ; CHECK-NEXT: ] +; CHECK: select.unfold.jt1: +; CHECK-NEXT: [[_2_JT1:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI2_JT1]], [[SPEC_SELECT_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: br label [[DOTLOOPEXIT6]] ; CHECK: select.unfold.jt0: -; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: [[_2_JT0:%.*]] = phi i32 [ 0, [[DOTLOOPEXIT6]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT7_SI_UNFOLD_FALSE_JT0]] ], [ [[DOTSI_UNFOLD_PHI1]], [[SELECT_UNFOLD]] ] ; CHECK-NEXT: br label [[DOTPREHEADER_PREHEADER]] ; CHECK: .preheader.preheader: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll index 93872c3938768..663f459e23084 100644 --- a/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll +++ b/llvm/test/Transforms/DFAJumpThreading/dfa-unfold-select.ll @@ -463,3 +463,87 @@ unreachable: sw.bb: ; preds = %if.end br label %while.cond } + +define i16 @pr160250() { +; CHECK-LABEL: @pr160250( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_COND48:%.*]] +; CHECK: for.cond48: +; CHECK-NEXT: br i1 false, label [[CLEANUP87_JT0:%.*]], label [[IF_ELSE:%.*]] +; CHECK: if.else: +; CHECK-NEXT: br i1 false, label [[DOT6_SI_UNFOLD_TRUE:%.*]], label [[DOT5_SI_UNFOLD_TRUE:%.*]] +; CHECK: .5.si.unfold.true: +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT1_SI_UNFOLD_TRUE1:%.*]], label [[DOT5_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK: .5.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI1:%.*]] = phi i32 [ 0, [[IF_ELSE]] ] +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT1_SI_UNFOLD_TRUE:%.*]], label [[DOT5_SI_UNFOLD_FALSE:%.*]] +; CHECK: .5.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2:%.*]] = phi i32 [ 0, [[DOT5_SI_UNFOLD_TRUE]] ] +; CHECK-NEXT: br label [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] +; CHECK: .5.si.unfold.false.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI2_JT0:%.*]] = phi i32 [ 0, [[DOT5_SI_UNFOLD_TRUE1:%.*]] ] +; CHECK-NEXT: br label [[SPEC_SELECT1_SI_UNFOLD_TRUE]] +; CHECK: spec.select1.si.unfold.true: +; CHECK-NEXT: [[DOT5_SI_UNFOLD_PHI:%.*]] = phi i32 [ poison, [[DOT5_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI2]], [[DOT5_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT_SI_UNFOLD_FALSE1:%.*]], label [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT1:%.*]] +; CHECK: spec.select1.si.unfold.true.jt0: +; CHECK-NEXT: [[DOT5_SI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ [[DOTSI_UNFOLD_PHI1]], [[DOT5_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI2_JT0]], [[DOT5_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: br i1 false, label [[SPEC_SELECT_SI_UNFOLD_FALSE:%.*]], label [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK: spec.select1.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI:%.*]] = phi i32 [ 0, [[SPEC_SELECT1_SI_UNFOLD_TRUE]] ] +; CHECK-NEXT: br label [[SPEC_SELECT_SI_UNFOLD_FALSE1]] +; CHECK: spec.select1.si.unfold.false.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ 0, [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] ] +; CHECK-NEXT: br label [[SPEC_SELECT_SI_UNFOLD_FALSE]] +; CHECK: spec.select.si.unfold.false: +; CHECK-NEXT: [[SPEC_SELECT1_SI_UNFOLD_PHI:%.*]] = phi i32 [ [[DOT5_SI_UNFOLD_PHI]], [[SPEC_SELECT1_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI]], [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: br label [[CLEANUP87:%.*]] +; CHECK: spec.select.si.unfold.false.jt0: +; CHECK-NEXT: [[SPEC_SELECT1_SI_UNFOLD_PHI_JT0:%.*]] = phi i32 [ [[DOT5_SI_UNFOLD_PHI_JT0]], [[SPEC_SELECT1_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI_JT0]], [[SPEC_SELECT1_SI_UNFOLD_FALSE_JT1]] ] +; CHECK-NEXT: br label [[CLEANUP87_JT0]] +; CHECK: .6.si.unfold.true: +; CHECK-NEXT: br i1 false, label [[CLEANUP87]], label [[DOT6_SI_UNFOLD_FALSE_JT0:%.*]] +; CHECK: .6.si.unfold.true.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI3:%.*]] = phi i32 [ 0, [[IF_ELSE]] ] +; CHECK-NEXT: br i1 false, label [[CLEANUP87_JT0]], label [[DOT6_SI_UNFOLD_FALSE:%.*]] +; CHECK: .6.si.unfold.false: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI4:%.*]] = phi i32 [ 0, [[DOT6_SI_UNFOLD_TRUE]] ] +; CHECK-NEXT: br label [[CLEANUP87]] +; CHECK: .6.si.unfold.false.jt0: +; CHECK-NEXT: [[DOTSI_UNFOLD_PHI4_JT0:%.*]] = phi i32 [ 0, [[DOT6_SI_UNFOLD_TRUE1:%.*]] ] +; CHECK-NEXT: br label [[CLEANUP87_JT0]] +; CHECK: cleanup87: +; CHECK-NEXT: [[CLEANUP_DEST_SLOT_3:%.*]] = phi i32 [ [[SPEC_SELECT1_SI_UNFOLD_PHI]], [[SPEC_SELECT_SI_UNFOLD_FALSE1]] ], [ poison, [[DOT6_SI_UNFOLD_TRUE1]] ], [ [[DOTSI_UNFOLD_PHI4]], [[DOT6_SI_UNFOLD_FALSE]] ] +; CHECK-NEXT: switch i32 [[CLEANUP_DEST_SLOT_3]], label [[FOR_COND48_BACKEDGE:%.*]] [ +; CHECK-NEXT: i32 0, label [[FOR_COND48_BACKEDGE]] +; CHECK-NEXT: i32 1, label [[FOR_COND48_BACKEDGE]] +; CHECK-NEXT: ] +; CHECK: cleanup87.jt0: +; CHECK-NEXT: [[CLEANUP_DEST_SLOT_3_JT0:%.*]] = phi i32 [ 0, [[FOR_COND48]] ], [ [[SPEC_SELECT1_SI_UNFOLD_PHI_JT0]], [[SPEC_SELECT_SI_UNFOLD_FALSE]] ], [ [[DOTSI_UNFOLD_PHI3]], [[DOT6_SI_UNFOLD_TRUE]] ], [ [[DOTSI_UNFOLD_PHI4_JT0]], [[DOT6_SI_UNFOLD_FALSE_JT0]] ] +; CHECK-NEXT: br label [[FOR_COND48_BACKEDGE]] +; CHECK: for.cond48.backedge: +; CHECK-NEXT: br label [[FOR_COND48]] +; +entry: + %.5 = select i1 false, i32 0, i32 0 + %.6 = select i1 false, i32 0, i32 0 + br label %for.cond48 + +for.cond48: ; preds = %for.cond48.backedge, %entry + br i1 false, label %cleanup87, label %if.else + +if.else: ; preds = %for.cond48 + %spec.select1 = select i1 false, i32 %.5, i32 0 + %spec.select = select i1 false, i32 %.6, i32 %spec.select1 + br label %cleanup87 + +cleanup87: ; preds = %if.else, %for.cond48 + %cleanup.dest.slot.3 = phi i32 [ 0, %for.cond48 ], [ %spec.select, %if.else ] + switch i32 %cleanup.dest.slot.3, label %for.cond48.backedge [ + i32 0, label %for.cond48.backedge + i32 1, label %for.cond48.backedge + ] + +for.cond48.backedge: ; preds = %cleanup87, %cleanup87, %cleanup87 + br label %for.cond48 +} diff --git a/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll b/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll index ea0d5d3fca8ff..8a6f60ba7a204 100644 --- a/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll +++ b/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -passes=drop-unnecessary-assumes < %s | FileCheck %s +declare void @use(i32 %x) +declare i32 @get() + define void @basic_dead(i32 %x) { ; CHECK-LABEL: define void @basic_dead( ; CHECK-SAME: i32 [[X:%.*]]) { @@ -63,18 +66,17 @@ define i32 @multiple_live2(i32 %x, i32 %y) { ret i32 %y } -define void @operand_bundle_dead(ptr %x) { -; CHECK-LABEL: define void @operand_bundle_dead( +define void @operand_bundle_one_dead(ptr %x) { +; CHECK-LABEL: define void @operand_bundle_one_dead( ; CHECK-SAME: ptr [[X:%.*]]) { -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[X]], i64 8) ] ; CHECK-NEXT: ret void ; call void @llvm.assume(i1 true) ["align"(ptr %x, i64 8)] ret void } -define ptr @operand_bundle_live(ptr %x) { -; CHECK-LABEL: define ptr @operand_bundle_live( +define ptr @operand_bundle_one_live(ptr %x) { +; CHECK-LABEL: define ptr @operand_bundle_one_live( ; CHECK-SAME: ptr [[X:%.*]]) { ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[X]], i64 8) ] ; CHECK-NEXT: ret ptr [[X]] @@ -83,6 +85,93 @@ define ptr @operand_bundle_live(ptr %x) { ret ptr %x } +define void @operand_bundle_multiple_dead(ptr %x, ptr %y) { +; CHECK-LABEL: define void @operand_bundle_multiple_dead( +; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) { +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr %x, i64 8), "align"(ptr %y, i64 8)] + ret void +} + +define ptr @operand_bundle_one_live_one_dead(ptr %x, ptr %y) { +; CHECK-LABEL: define ptr @operand_bundle_one_live_one_dead( +; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[Y]], i64 8) ] +; CHECK-NEXT: ret ptr [[Y]] +; + call void @llvm.assume(i1 true) ["align"(ptr %x, i64 8), "align"(ptr %y, i64 8)] + ret ptr %y +} + +define i64 @operand_bundle_ignore_unaffected_operands(ptr %x, i64 %align) { +; CHECK-LABEL: define i64 @operand_bundle_ignore_unaffected_operands( +; CHECK-SAME: ptr [[X:%.*]], i64 [[ALIGN:%.*]]) { +; CHECK-NEXT: ret i64 [[ALIGN]] +; + call void @llvm.assume(i1 true) ["align"(ptr %x, i64 %align)] + ret i64 %align +} + +define void @operand_bundle_remove_dead_insts(ptr %x) { +; CHECK-LABEL: define void @operand_bundle_remove_dead_insts( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: ret void +; + %gep = getelementptr i8, ptr %x, i64 8 + call void @llvm.assume(i1 true) ["align"(ptr %gep, i64 8)] + ret void +} + +define void @operand_bundle_no_args() { +; CHECK-LABEL: define void @operand_bundle_no_args() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "cold"() ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["cold"()] + ret void +} + +; Can always drop ignore bundles, regardless of uses. +define ptr @operand_bundle_ignore(ptr %x) { +; CHECK-LABEL: define ptr @operand_bundle_ignore( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[X]]) ] +; CHECK-NEXT: ret ptr [[X]] +; + call void @llvm.assume(i1 true) ["ignore"(), "ignore"(ptr %x), "nonnull"(ptr %x)] + ret ptr %x +} + +define void @operand_bundle_separate_storage_both_dead(ptr %x, ptr %y) { +; CHECK-LABEL: define void @operand_bundle_separate_storage_both_dead( +; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) { +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["separate_storage"(ptr %x, ptr %y)] + ret void +} + +define ptr @operand_bundle_separate_storage_one_live1(ptr %x, ptr %y) { +; CHECK-LABEL: define ptr @operand_bundle_separate_storage_one_live1( +; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "separate_storage"(ptr [[X]], ptr [[Y]]) ] +; CHECK-NEXT: ret ptr [[Y]] +; + call void @llvm.assume(i1 true) ["separate_storage"(ptr %x, ptr %y)] + ret ptr %y +} + +define ptr @operand_bundle_separate_storage_one_live2(ptr %x, ptr %y) { +; CHECK-LABEL: define ptr @operand_bundle_separate_storage_one_live2( +; CHECK-SAME: ptr [[X:%.*]], ptr [[Y:%.*]]) { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "separate_storage"(ptr [[X]], ptr [[Y]]) ] +; CHECK-NEXT: ret ptr [[X]] +; + call void @llvm.assume(i1 true) ["separate_storage"(ptr %x, ptr %y)] + ret ptr %x +} + define void @type_test(ptr %x) { ; CHECK-LABEL: define void @type_test( ; CHECK-SAME: ptr [[X:%.*]]) { @@ -94,3 +183,136 @@ define void @type_test(ptr %x) { call void @llvm.assume(i1 %test) ret void } + +define void @multiple_dead_conds(i32 %x) { +; CHECK-LABEL: define void @multiple_dead_conds( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: ret void +; + %cond1 = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond1) + %cond2 = icmp ne i32 %x, 64 + call void @llvm.assume(i1 %cond2) + ret void +} + +define void @multiple_dead_bundles(ptr %x) { +; CHECK-LABEL: define void @multiple_dead_bundles( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr %x, i64 8), "nonnull"(ptr %x)] + ret void +} + +; The assume is eliminated, but currently leaves behind a dead cycle. +define void @dead_cycle(i1 %loop.cond) { +; CHECK-LABEL: define void @dead_cycle( +; CHECK-SAME: i1 [[LOOP_COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %cond = icmp ne i32 %iv, 64 + call void @llvm.assume(i1 %cond) + %iv.next = add i32 %iv, 1 + br i1 %loop.cond, label %loop, label %exit + +exit: + ret void +} + +define void @use_in_side_effect(i32 %x) { +; CHECK-LABEL: define void @use_in_side_effect( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: call void @use(i32 [[X]]) +; CHECK-NEXT: ret void +; + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + call void @use(i32 %x) + ret void +} + +define void @indirect_use_in_side_effect(i32 %x) { +; CHECK-LABEL: define void @indirect_use_in_side_effect( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 1 +; CHECK-NEXT: call void @use(i32 [[ADD]]) +; CHECK-NEXT: ret void +; + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + %add = add i32 %x, 1 + call void @use(i32 %add) + ret void +} + +; The affected value itself has a side effect, but we can still drop the +; assume. +define void @affected_value_has_side_effect() { +; CHECK-LABEL: define void @affected_value_has_side_effect() { +; CHECK-NEXT: [[X:%.*]] = call i32 @get() +; CHECK-NEXT: ret void +; + %x = call i32 @get() + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + ret void +} + +define i32 @affected_value_has_side_effect_and_is_used() { +; CHECK-LABEL: define i32 @affected_value_has_side_effect_and_is_used() { +; CHECK-NEXT: [[X:%.*]] = call i32 @get() +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: ret i32 [[X]] +; + %x = call i32 @get() + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + ret i32 %x +} + +@g = external global i8 +@g2 = external global i8 + +; Assumes on globals are currently not supported. +define void @assume_on_global() { +; CHECK-LABEL: define void @assume_on_global() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @g, i64 8) ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr @g, i64 8)] + ret void +} + +define void @assume_on_global_used_in_other_func() { +; CHECK-LABEL: define void @assume_on_global_used_in_other_func() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @g2, i64 8) ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr @g2, i64 8)] + ret void +} + +define ptr @other_func() { +; CHECK-LABEL: define ptr @other_func() { +; CHECK-NEXT: ret ptr @g2 +; + ret ptr @g2 +} diff --git a/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll b/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll index f09a51c48a52f..922413a13cdf8 100644 --- a/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll +++ b/llvm/test/Transforms/FunctionAttrs/make-buffer-rsrc.ll @@ -9,8 +9,8 @@ define amdgpu_kernel void @test_make_buffer_rsrc(ptr %p, ptr %q) { ; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) ; FNATTRS-LABEL: define {{[^@]+}}@test_make_buffer_rsrc ; FNATTRS-SAME: (ptr readonly captures(none) [[P:%.*]], ptr writeonly captures(none) [[Q:%.*]]) #[[ATTR0:[0-9]+]] { -; FNATTRS-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i32 4, i32 822243328) -; FNATTRS-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) +; FNATTRS-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i64 4, i32 822243328) +; FNATTRS-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i64 4, i32 822243328) ; FNATTRS-NEXT: [[V:%.*]] = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) [[P_RSRC]], i32 0, i32 0, i32 0) ; FNATTRS-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 [[V]], ptr addrspace(8) [[Q_RSRC]], i32 0, i32 0, i32 0) ; FNATTRS-NEXT: ret void @@ -18,21 +18,21 @@ define amdgpu_kernel void @test_make_buffer_rsrc(ptr %p, ptr %q) { ; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) ; ATTRIBUTOR-LABEL: define {{[^@]+}}@test_make_buffer_rsrc ; ATTRIBUTOR-SAME: (ptr nofree readonly captures(none) [[P:%.*]], ptr nofree writeonly captures(none) [[Q:%.*]]) #[[ATTR0:[0-9]+]] { -; ATTRIBUTOR-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i32 4, i32 822243328) #[[ATTR4:[0-9]+]] -; ATTRIBUTOR-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i32 4, i32 822243328) #[[ATTR4]] +; ATTRIBUTOR-NEXT: [[P_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[P]], i16 0, i64 4, i32 822243328) #[[ATTR4:[0-9]+]] +; ATTRIBUTOR-NEXT: [[Q_RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr [[Q]], i16 0, i64 4, i32 822243328) #[[ATTR4]] ; ATTRIBUTOR-NEXT: [[V:%.*]] = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) readonly captures(none) [[P_RSRC]], i32 0, i32 0, i32 0) #[[ATTR5:[0-9]+]] ; ATTRIBUTOR-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 [[V]], ptr addrspace(8) writeonly captures(none) [[Q_RSRC]], i32 0, i32 0, i32 0) #[[ATTR6:[0-9]+]] ; ATTRIBUTOR-NEXT: ret void ; - %p.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i32 4, i32 822243328) - %q.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %q, i16 0, i32 4, i32 822243328) + %p.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 0, i64 4, i32 822243328) + %q.rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %q, i16 0, i64 4, i32 822243328) %v = call i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) %p.rsrc, i32 0, i32 0, i32 0) call void @llvm.amdgcn.raw.ptr.buffer.store.i8(i8 %v, ptr addrspace(8) %q.rsrc, i32 0, i32 0, i32 0) ret void } ; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none) -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone, i16, i32, i32) #0 +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr readnone, i16, i64, i32) #0 ; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: read) declare i8 @llvm.amdgcn.raw.ptr.buffer.load.i8(ptr addrspace(8) nocapture readonly, i32, i32, i32 immarg) #1 diff --git a/llvm/test/Transforms/FunctionAttrs/nocapture.ll b/llvm/test/Transforms/FunctionAttrs/nocapture.ll index 60a4214548a72..8113ba65fe422 100644 --- a/llvm/test/Transforms/FunctionAttrs/nocapture.ll +++ b/llvm/test/Transforms/FunctionAttrs/nocapture.ll @@ -1398,5 +1398,73 @@ define void @assume_nonnull(ptr %p) { ret void } +define void @captures_metadata_address_is_null(ptr %x, ptr %y) { +; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; FNATTRS-LABEL: define void @captures_metadata_address_is_null +; FNATTRS-SAME: (ptr captures(address_is_null) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] { +; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META0:![0-9]+]] +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; ATTRIBUTOR-LABEL: define void @captures_metadata_address_is_null +; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] { +; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META0:![0-9]+]] +; ATTRIBUTOR-NEXT: ret void +; + store ptr %x, ptr %y, !captures !{!"address_is_null"} + ret void +} + +define void @captures_metadata_address(ptr %x, ptr %y) { +; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; FNATTRS-LABEL: define void @captures_metadata_address +; FNATTRS-SAME: (ptr captures(address) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] { +; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META1:![0-9]+]] +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; ATTRIBUTOR-LABEL: define void @captures_metadata_address +; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] { +; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META1:![0-9]+]] +; ATTRIBUTOR-NEXT: ret void +; + store ptr %x, ptr %y, !captures !{!"address"} + ret void +} + +define void @captures_metadata_address_read_provenance(ptr %x, ptr %y) { +; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; FNATTRS-LABEL: define void @captures_metadata_address_read_provenance +; FNATTRS-SAME: (ptr captures(address, read_provenance) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] { +; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META2:![0-9]+]] +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; ATTRIBUTOR-LABEL: define void @captures_metadata_address_read_provenance +; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] { +; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META2:![0-9]+]] +; ATTRIBUTOR-NEXT: ret void +; + store ptr %x, ptr %y, !captures !{!"address", !"read_provenance"} + ret void +} + +define void @captures_metadata_provenance(ptr %x, ptr %y) { +; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; FNATTRS-LABEL: define void @captures_metadata_provenance +; FNATTRS-SAME: (ptr captures(provenance) [[X:%.*]], ptr writeonly captures(none) initializes((0, 8)) [[Y:%.*]]) #[[ATTR17]] { +; FNATTRS-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META3:![0-9]+]] +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: write) +; ATTRIBUTOR-LABEL: define void @captures_metadata_provenance +; ATTRIBUTOR-SAME: (ptr nofree writeonly [[X:%.*]], ptr nofree nonnull writeonly captures(none) [[Y:%.*]]) #[[ATTR13]] { +; ATTRIBUTOR-NEXT: store ptr [[X]], ptr [[Y]], align 8, !captures [[META3:![0-9]+]] +; ATTRIBUTOR-NEXT: ret void +; + store ptr %x, ptr %y, !captures !{!"provenance"} + ret void +} + declare ptr @llvm.launder.invariant.group.p0(ptr) declare ptr @llvm.strip.invariant.group.p0(ptr) diff --git a/llvm/test/Transforms/FunctionAttrs/noreturn.ll b/llvm/test/Transforms/FunctionAttrs/noreturn.ll index fa80f6c2eced4..ae2ccb02733f3 100644 --- a/llvm/test/Transforms/FunctionAttrs/noreturn.ll +++ b/llvm/test/Transforms/FunctionAttrs/noreturn.ll @@ -81,9 +81,9 @@ define void @unreachable() { ; CHECK: @coro define void @coro() presplitcoroutine { call token @llvm.coro.id.retcon.once(i32 0, i32 0, ptr null, ptr @coro, ptr null, ptr null) - call i1 (ptr, i1, ...) @llvm.coro.end(ptr null, i1 false) + call void (ptr, i1, ...) @llvm.coro.end(ptr null, i1 false) unreachable } declare token @llvm.coro.id.retcon.once(i32 %size, i32 %align, ptr %buffer, ptr %prototype, ptr %alloc, ptr %free) -declare i1 @llvm.coro.end(ptr, i1, ...) +declare void @llvm.coro.end(ptr, i1, ...) diff --git a/llvm/test/Transforms/GVN/condprop.ll b/llvm/test/Transforms/GVN/condprop.ll index 15ffcbff1e157..eb2a9f1e847c4 100644 --- a/llvm/test/Transforms/GVN/condprop.ll +++ b/llvm/test/Transforms/GVN/condprop.ll @@ -321,6 +321,66 @@ different: ret i1 %cmp3 } +define i1 @test6_phi1(i1 %c, i32 %x, i32 %y) { +; CHECK-LABEL: @test6_phi1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]] +; CHECK: bb1: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]] +; CHECK-NEXT: br i1 [[CMP]], label [[BB2]], label [[BB3:%.*]] +; CHECK: bb2: +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ false, [[BB1]] ], [ true, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] +; CHECK: bb3: +; CHECK-NEXT: ret i1 false +; +entry: + %cmp.not = icmp ne i32 %x, %y + br i1 %c, label %bb1, label %bb2 + +bb1: + %cmp = icmp eq i32 %x, %y + br i1 %cmp, label %bb2, label %bb3 + +bb2: + %phi = phi i1 [ %cmp.not, %bb1 ], [ true, %entry ] + ret i1 %phi + +bb3: + ret i1 false +} + +define i1 @test6_phi2(i1 %c, i32 %x, i32 %y) { +; CHECK-LABEL: @test6_phi2( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 [[C:%.*]], label [[BB1:%.*]], label [[BB2:%.*]] +; CHECK: bb1: +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]] +; CHECK-NEXT: br i1 [[CMP]], label [[BB2]], label [[BB3:%.*]] +; CHECK: bb2: +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ [[CMP_NOT]], [[BB1]] ], [ true, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] +; CHECK: bb3: +; CHECK-NEXT: ret i1 false +; +entry: + br i1 %c, label %bb1, label %bb2 + +bb1: + %cmp.not = icmp ne i32 %x, %y + %cmp = icmp eq i32 %x, %y + br i1 %cmp, label %bb2, label %bb3 + +bb2: + %phi = phi i1 [ %cmp.not, %bb1 ], [ true, %entry ] + ret i1 %phi + +bb3: + ret i1 false +} + define i1 @test7(i32 %x, i32 %y) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]] diff --git a/llvm/test/Transforms/GlobalOpt/fastcc.ll b/llvm/test/Transforms/GlobalOpt/fastcc.ll index 854357e6fad97..edbd602a97f3b 100644 --- a/llvm/test/Transforms/GlobalOpt/fastcc.ll +++ b/llvm/test/Transforms/GlobalOpt/fastcc.ll @@ -1,16 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=globalopt -S | FileCheck %s declare token @llvm.call.preallocated.setup(i32) declare ptr @llvm.call.preallocated.arg(token, i32) define internal i32 @f(ptr %m) { -; CHECK-LABEL: define internal fastcc i32 @f +; CHECK-LABEL: define internal fastcc i32 @f( +; CHECK-SAME: ptr [[M:%.*]]) unnamed_addr { +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4 +; CHECK-NEXT: ret i32 [[V]] +; %v = load i32, ptr %m ret i32 %v } define internal x86_thiscallcc i32 @g(ptr %m) { -; CHECK-LABEL: define internal fastcc i32 @g +; CHECK-LABEL: define internal fastcc i32 @g( +; CHECK-SAME: ptr [[M:%.*]]) unnamed_addr { +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4 +; CHECK-NEXT: ret i32 [[V]] +; %v = load i32, ptr %m ret i32 %v } @@ -18,41 +27,80 @@ define internal x86_thiscallcc i32 @g(ptr %m) { ; Leave this one alone, because the user went out of their way to request this ; convention. define internal coldcc i32 @h(ptr %m) { -; CHECK-LABEL: define internal coldcc i32 @h +; CHECK-LABEL: define internal coldcc i32 @h( +; CHECK-SAME: ptr [[M:%.*]]) unnamed_addr { +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4 +; CHECK-NEXT: ret i32 [[V]] +; %v = load i32, ptr %m ret i32 %v } define internal i32 @j(ptr %m) { -; CHECK-LABEL: define internal i32 @j +; CHECK-LABEL: define internal i32 @j( +; CHECK-SAME: ptr [[M:%.*]]) { +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[M]], align 4 +; CHECK-NEXT: ret i32 [[V]] +; %v = load i32, ptr %m ret i32 %v } define internal i32 @inalloca(ptr inalloca(i32) %p) { -; CHECK-LABEL: define internal fastcc i32 @inalloca(ptr %p) +; CHECK-LABEL: define internal fastcc i32 @inalloca( +; CHECK-SAME: ptr [[P:%.*]]) unnamed_addr { +; CHECK-NEXT: [[RV:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: ret i32 [[RV]] +; %rv = load i32, ptr %p ret i32 %rv } define i32 @inalloca2_caller(ptr inalloca(i32) %p) { +; CHECK-LABEL: define i32 @inalloca2_caller( +; CHECK-SAME: ptr inalloca(i32) [[P:%.*]]) local_unnamed_addr { +; CHECK-NEXT: [[RV:%.*]] = musttail call i32 @inalloca2(ptr inalloca(i32) [[P]]) +; CHECK-NEXT: ret i32 [[RV]] +; %rv = musttail call i32 @inalloca2(ptr inalloca(i32) %p) ret i32 %rv } define internal i32 @inalloca2(ptr inalloca(i32) %p) { ; Because of the musttail caller, this inalloca cannot be dropped. -; CHECK-LABEL: define internal i32 @inalloca2(ptr inalloca(i32) %p) +; CHECK-LABEL: define internal i32 @inalloca2( +; CHECK-SAME: ptr inalloca(i32) [[P:%.*]]) unnamed_addr { +; CHECK-NEXT: [[RV:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: ret i32 [[RV]] +; %rv = load i32, ptr %p ret i32 %rv } define internal i32 @preallocated(ptr preallocated(i32) %p) { -; CHECK-LABEL: define internal fastcc i32 @preallocated(ptr %p) +; CHECK-LABEL: define internal fastcc i32 @preallocated( +; CHECK-SAME: ptr [[P:%.*]]) unnamed_addr { +; CHECK-NEXT: [[RV:%.*]] = load i32, ptr [[P]], align 4 +; CHECK-NEXT: ret i32 [[RV]] +; %rv = load i32, ptr %p ret i32 %rv } define void @call_things() { +; CHECK-LABEL: define void @call_things() local_unnamed_addr { +; CHECK-NEXT: [[M:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call fastcc i32 @f(ptr [[M]]) +; CHECK-NEXT: [[TMP2:%.*]] = call fastcc i32 @g(ptr [[M]]) +; CHECK-NEXT: [[TMP3:%.*]] = call coldcc i32 @h(ptr [[M]]) +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @j(ptr [[M]]) +; CHECK-NEXT: [[ARGS:%.*]] = alloca inalloca i32, align 4 +; CHECK-NEXT: [[TMP5:%.*]] = call fastcc i32 @inalloca(ptr [[ARGS]]) +; CHECK-NEXT: [[TMP6:%.*]] = call ptr @llvm.stacksave.p0() +; CHECK-NEXT: [[PAARG:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP7:%.*]] = call fastcc i32 @preallocated(ptr [[PAARG]]) +; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP6]]) +; CHECK-NEXT: ret void +; %m = alloca i32 call i32 @f(ptr %m) call x86_thiscallcc i32 @g(ptr %m) @@ -65,15 +113,25 @@ define void @call_things() { call i32 @preallocated(ptr preallocated(i32) %N) ["preallocated"(token %c)] ret void } -; CHECK-LABEL: define void @call_things() -; CHECK: call fastcc i32 @f -; CHECK: call fastcc i32 @g -; CHECK: call coldcc i32 @h -; CHECK: call i32 @j -; CHECK: call fastcc i32 @inalloca(ptr %args) -; CHECK-NOT: llvm.call.preallocated -; CHECK: call fastcc i32 @preallocated(ptr %paarg) @llvm.used = appending global [1 x ptr] [ - ptr @j + ptr @j ], section "llvm.metadata" + +define internal i32 @assume_fastcc() { +; CHECK-LABEL: define internal fastcc i32 @assume_fastcc() { +; CHECK-NEXT: [[OBJSIZE:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr @assume_fastcc, i1 false, i1 false, i1 false) +; CHECK-NEXT: ret i32 [[OBJSIZE]] +; + %objsize = call i32 @llvm.objectsize.i32.p0(ptr @assume_fastcc, i1 false, i1 false, i1 false) + ret i32 %objsize +} + +define internal i32 @constexpr_self_user() addrspace(1) { +; CHECK-LABEL: define internal fastcc i32 @constexpr_self_user() addrspace(1) { +; CHECK-NEXT: [[OBJSIZE:%.*]] = call i32 @llvm.objectsize.i32.p0(ptr addrspacecast (ptr addrspace(1) @constexpr_self_user to ptr), i1 false, i1 false, i1 false) +; CHECK-NEXT: ret i32 [[OBJSIZE]] +; + %objsize = call i32 @llvm.objectsize.i32.p0(ptr addrspacecast (ptr addrspace(1) @constexpr_self_user to ptr), i1 false, i1 false, i1 false) + ret i32 %objsize +} diff --git a/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll b/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll index 4a59e419369af..cb4e07ef3e26b 100644 --- a/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll +++ b/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll @@ -1,10 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -passes=indvars < %s | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" define void @f_sadd(ptr %a) { -; CHECK-LABEL: @f_sadd( +; CHECK-LABEL: define void @f_sadd( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: br i1 false, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0:![0-9]+]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 16 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; entry: br label %for.body @@ -18,9 +37,6 @@ for.body: ; preds = %entry, %cont store i8 0, ptr %arrayidx, align 1 %0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1) %1 = extractvalue { i32, i1 } %0, 1 -; CHECK: for.body: -; CHECK-NOT: @llvm.sadd.with.overflow -; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0 br i1 %1, label %trap, label %cont, !nosanitize !{} trap: ; preds = %for.body @@ -33,8 +49,71 @@ cont: ; preds = %for.body br i1 %cmp, label %for.body, label %for.cond.cleanup } +define void @f_sadd_overflow(ptr %a) { +; CHECK-LABEL: define void @f_sadd_overflow( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 2147483645, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 2147483647 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: br i1 true, label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; +entry: + br label %for.body + +for.cond.cleanup: ; preds = %cont + ret void + +for.body: ; preds = %entry, %cont + %i.04 = phi i32 [ 2147483645, %entry ], [ %2, %cont ] + %idxprom = sext i32 %i.04 to i64 + %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom + store i8 0, ptr %arrayidx, align 1 + %0 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %i.04, i32 1) + %1 = extractvalue { i32, i1 } %0, 1 + br i1 %1, label %trap, label %cont, !nosanitize !{} + +trap: ; preds = %for.body + tail call void @llvm.trap() #2, !nosanitize !{} + unreachable, !nosanitize !{} + +cont: ; preds = %for.body + %2 = extractvalue { i32, i1 } %0, 0 + %cmp = icmp sle i32 %2, 2147483647 + br i1 %cmp, label %for.body, label %for.cond.cleanup +} + define void @f_uadd(ptr %a) { -; CHECK-LABEL: @f_uadd( +; CHECK-LABEL: define void @f_uadd( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: br i1 false, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 16 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; entry: br label %for.body @@ -48,9 +127,6 @@ for.body: ; preds = %entry, %cont store i8 0, ptr %arrayidx, align 1 %0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %i.04, i32 1) %1 = extractvalue { i32, i1 } %0, 1 -; CHECK: for.body: -; CHECK-NOT: @llvm.uadd.with.overflow -; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0 br i1 %1, label %trap, label %cont, !nosanitize !{} trap: ; preds = %for.body @@ -63,8 +139,71 @@ cont: ; preds = %for.body br i1 %cmp, label %for.body, label %for.cond.cleanup } +define void @f_uadd_overflow(ptr %a) { +; CHECK-LABEL: define void @f_uadd_overflow( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ -6, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], -1 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: br i1 true, label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; +entry: + br label %for.body + +for.cond.cleanup: ; preds = %cont + ret void + +for.body: ; preds = %entry, %cont + %i.04 = phi i32 [ 4294967290, %entry ], [ %2, %cont ] + %idxprom = sext i32 %i.04 to i64 + %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom + store i8 0, ptr %arrayidx, align 1 + %0 = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %i.04, i32 1) + %1 = extractvalue { i32, i1 } %0, 1 + br i1 %1, label %trap, label %cont, !nosanitize !{} + +trap: ; preds = %for.body + tail call void @llvm.trap(), !nosanitize !{} + unreachable, !nosanitize !{} + +cont: ; preds = %for.body + %2 = extractvalue { i32, i1 } %0, 0 + %cmp = icmp ule i32 %2, 4294967295 + br i1 %cmp, label %for.body, label %for.cond.cleanup +} + define void @f_ssub(ptr nocapture %a) { -; CHECK-LABEL: @f_ssub( +; CHECK-LABEL: define void @f_ssub( +; CHECK-SAME: ptr captures(none) [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 15, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 +; CHECK-NEXT: br i1 false, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV_NEXT]], -1 +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; entry: br label %for.body @@ -78,9 +217,6 @@ for.body: ; preds = %entry, %cont store i8 0, ptr %arrayidx, align 1 %0 = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %i.04, i32 1) %1 = extractvalue { i32, i1 } %0, 1 -; CHECK: for.body: -; CHECK-NOT: @llvm.ssub.with.overflow.i32 -; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0 br i1 %1, label %trap, label %cont, !nosanitize !{} trap: ; preds = %for.body @@ -93,8 +229,76 @@ cont: ; preds = %for.body br i1 %cmp, label %for.body, label %for.cond.cleanup } +; It is theoretically possible to replace the `ssub.with.overflow` with a +; condition on the IV, but SCEV cannot represent non-unsigned-wrapping +; subtraction operations. +define void @f_ssub_overflow(ptr nocapture %a) { +; CHECK-LABEL: define void @f_ssub_overflow( +; CHECK-SAME: ptr captures(none) [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ -2147483642, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP0:%.*]] = trunc nsw i64 [[INDVARS_IV]] to i32 +; CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1 +; CHECK-NEXT: br i1 [[TMP2]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 +; CHECK-NEXT: br i1 true, label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; +entry: + br label %for.body + +for.cond.cleanup: ; preds = %cont + ret void + +for.body: ; preds = %entry, %cont + %i.04 = phi i32 [ -2147483642, %entry ], [ %2, %cont ] + %idxprom = sext i32 %i.04 to i64 + %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom + store i8 0, ptr %arrayidx, align 1 + %0 = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %i.04, i32 1) + %1 = extractvalue { i32, i1 } %0, 1 + br i1 %1, label %trap, label %cont, !nosanitize !{} + +trap: ; preds = %for.body + tail call void @llvm.trap(), !nosanitize !{} + unreachable, !nosanitize !{} + +cont: ; preds = %for.body + %2 = extractvalue { i32, i1 } %0, 0 + %cmp = icmp sge i32 %2, -2147483648 + br i1 %cmp, label %for.body, label %for.cond.cleanup +} + define void @f_usub(ptr nocapture %a) { -; CHECK-LABEL: @f_usub( +; CHECK-LABEL: define void @f_usub( +; CHECK-SAME: ptr captures(none) [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 15, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 +; CHECK-NEXT: br i1 false, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[CMP:%.*]] = icmp samesign ugt i64 [[INDVARS_IV_NEXT]], 0 +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; entry: br label %for.body @@ -109,9 +313,6 @@ for.body: ; preds = %entry, %cont %0 = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %i.04, i32 1) %1 = extractvalue { i32, i1 } %0, 1 -; CHECK: for.body: -; CHECK-NOT: @llvm.usub.with.overflow.i32 -; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0 br i1 %1, label %trap, label %cont, !nosanitize !{} trap: ; preds = %for.body @@ -124,8 +325,31 @@ cont: ; preds = %for.body br i1 %cmp, label %for.body, label %for.cond.cleanup } +; It is theoretically possible to replace the `usub.with.overflow` with a +; condition on the IV, but SCEV cannot represent non-unsigned-wrapping +; subtraction operations. define void @f_usub_overflow(ptr nocapture %a) { -; CHECK-LABEL: @f_usub_overflow( +; CHECK-LABEL: define void @f_usub_overflow( +; CHECK-SAME: ptr captures(none) [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 15, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP0:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i32 +; CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1 +; CHECK-NEXT: br i1 [[TMP2]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK: [[TRAP]]: +; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] +; CHECK-NEXT: unreachable, !nosanitize [[META0]] +; CHECK: [[CONT]]: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 +; CHECK-NEXT: br i1 true, label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] +; entry: br label %for.body @@ -139,13 +363,6 @@ for.body: ; preds = %entry, %cont store i8 0, ptr %arrayidx, align 1 %0 = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %i.04, i32 1) %1 = extractvalue { i32, i1 } %0, 1 - -; It is theoretically possible to prove this, but SCEV cannot -; represent non-unsigned-wrapping subtraction operations. - -; CHECK: for.body: -; CHECK: [[COND:%[^ ]+]] = extractvalue { i32, i1 } %1, 1 -; CHECK-NEXT: br i1 [[COND]], label %trap, label %cont, !nosanitize !0 br i1 %1, label %trap, label %cont, !nosanitize !{} trap: ; preds = %for.body @@ -166,3 +383,6 @@ declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone declare void @llvm.trap() #2 +;. +; CHECK: [[META0]] = !{} +;. diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll index ebc5c0d717c6d..678d462b0c1b7 100644 --- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll +++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll @@ -200,17 +200,17 @@ define amdgpu_kernel void @load_to_lds_fat_pointer_as_flat(ptr addrspace(7) %buf ret void } -define amdgpu_kernel void @make_buffer_rsrc_global_as_flat(ptr addrspace(1) %global, i32 %extent) { +define amdgpu_kernel void @make_buffer_rsrc_global_as_flat(ptr addrspace(1) %global, i64 %extent) { ;; NOTE: flags value not representative of real input ; CHECK-LABEL: define amdgpu_kernel void @make_buffer_rsrc_global_as_flat( -; CHECK-SAME: ptr addrspace(1) [[GLOBAL:%.*]], i32 [[EXTENT:%.*]]) { -; CHECK-NEXT: [[BUFFER_FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[GLOBAL]], i16 0, i32 [[EXTENT]], i32 0) -; CHECK-NEXT: store i32 [[EXTENT]], ptr addrspace(7) [[BUFFER_FAT_PTR]], align 4 +; CHECK-SAME: ptr addrspace(1) [[GLOBAL:%.*]], i64 [[EXTENT:%.*]]) { +; CHECK-NEXT: [[BUFFER_FAT_PTR:%.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) [[GLOBAL]], i16 0, i64 [[EXTENT]], i32 0) +; CHECK-NEXT: store i64 [[EXTENT]], ptr addrspace(7) [[BUFFER_FAT_PTR]], align 8 ; CHECK-NEXT: ret void ; %cast = addrspacecast ptr addrspace(1) %global to ptr - %buffer.fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %cast, i16 0, i32 %extent, i32 0) - store i32 %extent, ptr addrspace(7) %buffer.fat.ptr + %buffer.fat.ptr = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr %cast, i16 0, i64 %extent, i32 0) + store i64 %extent, ptr addrspace(7) %buffer.fat.ptr ret void } @@ -221,7 +221,7 @@ declare void @llvm.memcpy.p0.p3.i32(ptr nocapture writeonly, ptr addrspace(3) no declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1 declare void @llvm.amdgcn.load.to.lds.p0(ptr nocapture readonly, ptr addrspace(3) nocapture writeonly, i32 immarg, i32 immarg, i32 immarg) #1 -declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr readnone, i16, i32, i32) #0 +declare ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p0(ptr readnone, i16, i64, i32) #0 attributes #0 = { nounwind } attributes #1 = { argmemonly nounwind } diff --git a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll index c3f6dd700b451..466499a07cb1b 100644 --- a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll +++ b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll @@ -38,7 +38,7 @@ await.ready: call void @print(i32 %val) br label %exit exit: - call i1 @llvm.coro.end(ptr null, i1 false) + call void @llvm.coro.end(ptr null, i1 false) ret void } @@ -53,6 +53,6 @@ declare ptr @llvm.coro.frame() #5 declare i8 @llvm.coro.suspend(token, i1) #3 declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10 declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2 -declare i1 @llvm.coro.end(ptr, i1) #3 +declare void @llvm.coro.end(ptr, i1) #3 declare void @llvm.lifetime.start.p0(ptr nocapture) #4 declare void @llvm.lifetime.end.p0(ptr nocapture) #4 diff --git a/llvm/test/Transforms/Inline/dilocation-loop-metadata-update.ll b/llvm/test/Transforms/Inline/dilocation-loop-metadata-update.ll new file mode 100644 index 0000000000000..1bc132663331b --- /dev/null +++ b/llvm/test/Transforms/Inline/dilocation-loop-metadata-update.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inline -S | FileCheck %s + +; When inlining we need to update DILocation recursively for the followup +; metadata when updating llvm.loop metadata. + +define void @a() !dbg !3 { +; CHECK-LABEL: define void @a( +; CHECK-SAME: ) !dbg [[DBG3:![0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: br label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + br label %for.body, !llvm.loop !6 +} + +define void @f() !dbg !17 { +; CHECK-LABEL: define void @f( +; CHECK-SAME: ) !dbg [[DBG17:![0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[A_EXIT:.*]] +; CHECK: [[A_EXIT]]: +; CHECK-NEXT: br label %[[A_EXIT]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK: [[A_EXIT1:.*:]] +; CHECK-NEXT: ret void +; +entry: + call void @a(), !dbg !18 + ret void +} + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2} + +!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang", isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug, splitDebugInlining: false, nameTableKind: None) +!1 = !DIFile(filename: "foo.c", directory: "/") +!2 = !{i32 2, !"Debug Info Version", i32 3} +!3 = distinct !DISubprogram(name: "a", scope: !1, file: !1, line: 3, type: !4, scopeLine: 3, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0) +!4 = !DISubroutineType(types: !5) +!5 = !{} +!6 = distinct !{!6, !7, !8, !9, !10, !11} +!7 = !DILocation(line: 6, column: 3, scope: !3) +!8 = !DILocation(line: 7, column: 22, scope: !3) +!9 = !{!"llvm.loop.mustprogress"} +!10 = !{!"llvm.loop.distribute.enable", i1 true} +!11 = !{!"llvm.loop.distribute.followup_all", !7, !8, !9, !12, !13, !14} +!12 = !{!"llvm.loop.vectorize.width", i32 8} +!13 = !{!"llvm.loop.vectorize.enable", i1 true} +!14 = !{!"llvm.loop.vectorize.followup_all", !7, !8, !9, !15, !16} +!15 = !{!"llvm.loop.isvectorized"} +!16 = !{!"llvm.loop.unroll.count", i32 1} +!17 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 9, type: !4, scopeLine: 9, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0) +!18 = !DILocation(line: 9, column: 12, scope: !17) +;. +; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "clang", isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug, splitDebugInlining: false, nameTableKind: None) +; CHECK: [[META1]] = !DIFile(filename: "{{.*}}foo.c", directory: {{.*}}) +; CHECK: [[DBG3]] = distinct !DISubprogram(name: "a", scope: [[META1]], file: [[META1]], line: 3, type: [[META4:![0-9]+]], scopeLine: 3, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]]) +; CHECK: [[META4]] = !DISubroutineType(types: [[META5:![0-9]+]]) +; CHECK: [[META5]] = !{} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]], [[META10:![0-9]+]], [[META11:![0-9]+]]} +; CHECK: [[META7]] = !DILocation(line: 6, column: 3, scope: [[DBG3]]) +; CHECK: [[META8]] = !DILocation(line: 7, column: 22, scope: [[DBG3]]) +; CHECK: [[META9]] = !{!"llvm.loop.mustprogress"} +; CHECK: [[META10]] = !{!"llvm.loop.distribute.enable", i1 true} +; CHECK: [[META11]] = !{!"llvm.loop.distribute.followup_all", [[META7]], [[META8]], [[META9]], [[META12:![0-9]+]], [[META13:![0-9]+]], [[META14:![0-9]+]]} +; CHECK: [[META12]] = !{!"llvm.loop.vectorize.width", i32 8} +; CHECK: [[META13]] = !{!"llvm.loop.vectorize.enable", i1 true} +; CHECK: [[META14]] = !{!"llvm.loop.vectorize.followup_all", [[META7]], [[META8]], [[META9]], [[META15:![0-9]+]], [[META16:![0-9]+]]} +; CHECK: [[META15]] = !{!"llvm.loop.isvectorized"} +; CHECK: [[META16]] = !{!"llvm.loop.unroll.count", i32 1} +; CHECK: [[DBG17]] = distinct !DISubprogram(name: "f", scope: [[META1]], file: [[META1]], line: 9, type: [[META4]], scopeLine: 9, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]]) +; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META19:![0-9]+]], [[META21:![0-9]+]], [[META9]], [[META10]], [[META22:![0-9]+]]} +; CHECK: [[META19]] = !DILocation(line: 6, column: 3, scope: [[DBG3]], inlinedAt: [[META20:![0-9]+]]) +; CHECK: [[META20]] = distinct !DILocation(line: 9, column: 12, scope: [[DBG17]]) +; CHECK: [[META21]] = !DILocation(line: 7, column: 22, scope: [[DBG3]], inlinedAt: [[META20]]) +; CHECK: [[META22]] = !{!"llvm.loop.distribute.followup_all", [[META19]], [[META21]], [[META9]], [[META12]], [[META13]], [[META23:![0-9]+]]} +; CHECK: [[META23]] = !{!"llvm.loop.vectorize.followup_all", [[META19]], [[META21]], [[META9]], [[META15]], [[META16]]} +;. diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll index 077da9cda6523..3ff9439040438 100644 --- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll @@ -6527,15 +6527,15 @@ define ptr addrspace(8) @make_buffer_rsrc_poison() { ; CHECK-LABEL: @make_buffer_rsrc_poison( ; CHECK-NEXT: ret ptr addrspace(8) poison ; - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) poison, i16 0, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) poison, i16 0, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } define ptr addrspace(8) @make_buffer_rsrc_undef() { ; CHECK-LABEL: @make_buffer_rsrc_undef( -; CHECK-NEXT: [[RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) undef, i16 0, i32 1234, i32 5678) +; CHECK-NEXT: [[RSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) undef, i16 0, i64 1234, i32 5678) ; CHECK-NEXT: ret ptr addrspace(8) [[RSRC]] ; - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) undef, i16 0, i32 1234, i32 5678) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) undef, i16 0, i64 1234, i32 5678) ret ptr addrspace(8) %rsrc } diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll b/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll index beb84362b7f92..90877be255e0f 100644 --- a/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll +++ b/llvm/test/Transforms/InstCombine/AMDGPU/ptr-replace-alloca.ll @@ -109,4 +109,23 @@ bb: ret void } +@global = external addrspace(1) constant [16 x float], align 64 + +define float @issue160302(i1 %cond, ptr addrspace(5) %arg) { +; CHECK-LABEL: define float @issue160302( +; CHECK-SAME: i1 [[COND:%.*]], ptr addrspace(5) [[ARG:%.*]]) { +; CHECK-NEXT: [[AGG_TMP2_I4:%.*]] = alloca [16 x float], align 64, addrspace(5) +; CHECK-NEXT: [[SELECT_PTR:%.*]] = select i1 [[COND]], ptr addrspace(5) [[AGG_TMP2_I4]], ptr addrspace(5) [[ARG]] +; CHECK-NEXT: [[COND_I:%.*]] = load float, ptr addrspace(5) [[SELECT_PTR]], align 4 +; CHECK-NEXT: ret float [[COND_I]] +; + %agg.tmp2.i4 = alloca [16 x float], align 64, addrspace(5) + call void @llvm.memcpy.p5.p1.i64(ptr addrspace(5) %agg.tmp2.i4, ptr addrspace(1) @global, i64 0, i1 false) + %m_Data.i14.i = getelementptr [16 x float], ptr addrspace(5) %agg.tmp2.i4, i32 0, i32 0 + %gep = getelementptr [16 x float], ptr addrspace(5) %arg, i32 0, i32 0 + %select.ptr = select i1 %cond, ptr addrspace(5) %m_Data.i14.i, ptr addrspace(5) %gep + %cond.i = load float, ptr addrspace(5) %select.ptr, align 4 + ret float %cond.i +} + declare void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noalias writeonly captures(none), ptr addrspace(4) noalias readonly captures(none), i64, i1 immarg) #0 diff --git a/llvm/test/Transforms/InstCombine/NVPTX/nvvm-intrins.ll b/llvm/test/Transforms/InstCombine/NVPTX/nvvm-intrins.ll index 1819f4ed181c0..4d856699b2d24 100644 --- a/llvm/test/Transforms/InstCombine/NVPTX/nvvm-intrins.ll +++ b/llvm/test/Transforms/InstCombine/NVPTX/nvvm-intrins.ll @@ -185,52 +185,63 @@ define float @trunc_float_ftz(float %a) #0 { } ; Check NVVM intrinsics that correspond to LLVM cast operations. +; fp -> integer casts should not be converted, as the semantics +; for NaN/Inf/Overflow inputs are different. +; Only integer -> fp casts should be converted. ; CHECK-LABEL: @test_d2i define i32 @test_d2i(double %a) #0 { -; CHECK: fptosi double %a to i32 +; CHECK: call i32 @llvm.nvvm.d2i.rz(double %a) +; CHECK-NOT: fptosi double %a to i32 %ret = call i32 @llvm.nvvm.d2i.rz(double %a) ret i32 %ret } ; CHECK-LABEL: @test_f2i define i32 @test_f2i(float %a) #0 { -; CHECK: fptosi float %a to i32 +; CHECK: call i32 @llvm.nvvm.f2i.rz(float %a) +; CHECK-NOT: fptosi float %a to i32 %ret = call i32 @llvm.nvvm.f2i.rz(float %a) ret i32 %ret } ; CHECK-LABEL: @test_d2ll define i64 @test_d2ll(double %a) #0 { -; CHECK: fptosi double %a to i64 +; CHECK: call i64 @llvm.nvvm.d2ll.rz(double %a) +; CHECK-NOT: fptosi double %a to i64 %ret = call i64 @llvm.nvvm.d2ll.rz(double %a) ret i64 %ret } ; CHECK-LABEL: @test_f2ll define i64 @test_f2ll(float %a) #0 { -; CHECK: fptosi float %a to i64 +; CHECK: call i64 @llvm.nvvm.f2ll.rz(float %a) +; CHECK-NOT: fptosi float %a to i64 %ret = call i64 @llvm.nvvm.f2ll.rz(float %a) ret i64 %ret } ; CHECK-LABEL: @test_d2ui define i32 @test_d2ui(double %a) #0 { -; CHECK: fptoui double %a to i32 +; CHECK: call i32 @llvm.nvvm.d2ui.rz(double %a) +; CHECK-NOT: fptoui double %a to i32 %ret = call i32 @llvm.nvvm.d2ui.rz(double %a) ret i32 %ret } ; CHECK-LABEL: @test_f2ui define i32 @test_f2ui(float %a) #0 { -; CHECK: fptoui float %a to i32 +; CHECK: call i32 @llvm.nvvm.f2ui.rz(float %a) +; CHECK-NOT: fptoui float %a to i32 %ret = call i32 @llvm.nvvm.f2ui.rz(float %a) ret i32 %ret } ; CHECK-LABEL: @test_d2ull define i64 @test_d2ull(double %a) #0 { -; CHECK: fptoui double %a to i64 +; CHECK: call i64 @llvm.nvvm.d2ull.rz(double %a) +; CHECK-NOT: fptoui double %a to i64 %ret = call i64 @llvm.nvvm.d2ull.rz(double %a) ret i64 %ret } ; CHECK-LABEL: @test_f2ull define i64 @test_f2ull(float %a) #0 { -; CHECK: fptoui float %a to i64 +; CHECK: call i64 @llvm.nvvm.f2ull.rz(float %a) +; CHECK-NOT: fptoui float %a to i64 %ret = call i64 @llvm.nvvm.f2ull.rz(float %a) ret i64 %ret } @@ -497,4 +508,4 @@ declare float @llvm.nvvm.ui2f.rn(i32) declare double @llvm.nvvm.ull2d.rn(i64) declare float @llvm.nvvm.ull2f.rn(i64) declare i32 @llvm.nvvm.fshr.clamp.i32(i32, i32, i32) -declare i32 @llvm.nvvm.fshl.clamp.i32(i32, i32, i32) \ No newline at end of file +declare i32 @llvm.nvvm.fshl.clamp.i32(i32, i32, i32) diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll index 274632658496b..4185b10eeca95 100644 --- a/llvm/test/Transforms/InstCombine/assume-align.ll +++ b/llvm/test/Transforms/InstCombine/assume-align.ll @@ -247,6 +247,16 @@ define ptr @redundant_assume_align_8_via_asume(ptr %p) { ret ptr %p } +define ptr @assume_align_1(ptr %p) { +; CHECK-LABEL: @assume_align_1( +; CHECK-NEXT: call void @foo(ptr [[P:%.*]]) +; CHECK-NEXT: ret ptr [[P]] +; + call void @llvm.assume(i1 true) [ "align"(ptr %p, i32 1) ] + call void @foo(ptr %p) + ret ptr %p +} + declare void @foo(ptr) ; !align must have a constant integer alignment. diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll index e87a61a57ea47..7b0b871513513 100644 --- a/llvm/test/Transforms/InstCombine/assume.ll +++ b/llvm/test/Transforms/InstCombine/assume.ll @@ -498,13 +498,13 @@ not_taken: define i1 @nonnull3B(ptr %a, i1 %control) { ; CHECK-LABEL: @nonnull3B( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 ; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] ; CHECK: taken: -; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null -; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ] +; CHECK-NEXT: ret i1 true ; CHECK: not_taken: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ] ; CHECK-NEXT: ret i1 false ; entry: @@ -512,10 +512,10 @@ entry: %cmp = icmp ne ptr %load, null br i1 %control, label %taken, label %not_taken taken: - call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] + call void @llvm.assume(i1 true) ["nonnull"(ptr %load)] ret i1 %cmp not_taken: - call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] + call void @llvm.assume(i1 true) ["nonnull"(ptr %load)] ret i1 %control } @@ -544,7 +544,7 @@ taken: br label %exit exit: ; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load - call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] + call void @llvm.assume(i1 %cmp) ret i1 %cmp2 not_taken: call void @llvm.assume(i1 %cmp) @@ -575,7 +575,7 @@ taken: exit: ret i1 %cmp2 not_taken: - call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] + call void @llvm.assume(i1 %cmp) ret i1 %control } diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll index 119cffd73c662..d94e78c55a375 100644 --- a/llvm/test/Transforms/InstCombine/fcmp.ll +++ b/llvm/test/Transforms/InstCombine/fcmp.ll @@ -1812,6 +1812,46 @@ define i1 @fcmp_ule_fsub_const(float %x, float %y) { ret i1 %cmp } +define i1 @fcmp_ninf_ule_fsub_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_ninf_ule_fsub_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub float %x, %y + %cmp = fcmp ninf ule float %fs, 0.000000e+00 + ret i1 %cmp +} + +define i1 @fcmp_nnan_ule_fsub_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_nnan_ule_fsub_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp nnan ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub float %x, %y + %cmp = fcmp nnan ule float %fs, 0.000000e+00 + ret i1 %cmp +} + +define i1 @fcmp_ule_fsub_ninf_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_ule_fsub_ninf_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub ninf float %x, %y + %cmp = fcmp ule float %fs, 0.000000e+00 + ret i1 %cmp +} + +define i1 @fcmp_ule_fsub_nnan_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_ule_fsub_nnan_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp nnan ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub nnan float %x, %y + %cmp = fcmp ule float %fs, 0.000000e+00 + ret i1 %cmp +} + define i1 @fcmp_ugt_fsub_const(float %x, float %y) { ; CHECK-LABEL: @fcmp_ugt_fsub_const( ; CHECK-NEXT: [[FS:%.*]] = fsub float [[X:%.*]], [[Y:%.*]] diff --git a/llvm/test/Transforms/InstCombine/freeze-phi.ll b/llvm/test/Transforms/InstCombine/freeze-phi.ll index cdc9a5efe5933..62bb9dc31b76b 100644 --- a/llvm/test/Transforms/InstCombine/freeze-phi.ll +++ b/llvm/test/Transforms/InstCombine/freeze-phi.ll @@ -212,3 +212,31 @@ D: %y.fr = freeze i32 %y ret i32 %y.fr } + +; Make sure that fmf in phi node is dropped when freeze get folded. + +define float @pr161524(float noundef %arg) { +; CHECK-LABEL: @pr161524( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[COND:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[ARG:%.*]], i32 144) +; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_EXIT:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[FADD:%.*]] = fadd float [[ARG]], 1.000000e+00 +; CHECK-NEXT: br label [[IF_EXIT]] +; CHECK: if.exit: +; CHECK-NEXT: [[RET:%.*]] = phi float [ [[FADD]], [[IF_THEN]] ], [ [[ARG]], [[ENTRY:%.*]] ] +; CHECK-NEXT: ret float [[RET]] +; +entry: + %cond = tail call i1 @llvm.is.fpclass.f32(float %arg, i32 144) + br i1 %cond, label %if.then, label %if.exit + +if.then: + %fadd = fadd float %arg, 1.0 + br label %if.exit + +if.exit: + %ret = phi ninf float [ %fadd, %if.then ], [ %arg, %entry ] + %ret.fr = freeze float %ret + ret float %ret.fr +} diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll index af5cb0c75537b..ac7d65c2a3c6a 100644 --- a/llvm/test/Transforms/InstCombine/freeze.ll +++ b/llvm/test/Transforms/InstCombine/freeze.ll @@ -1464,6 +1464,27 @@ define ptr @freeze_ptrmask_nonnull(ptr %p, i64 noundef %m) { ret ptr %fr } +define i64 @pr161492_1(i1 %cond) { +; CHECK-LABEL: define i64 @pr161492_1( +; CHECK-SAME: i1 [[COND:%.*]]) { +; CHECK-NEXT: ret i64 0 +; + %fr1 = freeze i64 poison + %fr2 = freeze i64 poison + %ret = select i1 %cond, i64 %fr1, i64 %fr2 + ret i64 %ret +} + +define i64 @pr161492_2(i1 %cond) { +; CHECK-LABEL: define i64 @pr161492_2( +; CHECK-SAME: i1 [[COND:%.*]]) { +; CHECK-NEXT: ret i64 0 +; + %fr = freeze i64 poison + %ret = select i1 %cond, i64 %fr, i64 %fr + ret i64 %ret +} + !0 = !{} !1 = !{i64 4} !2 = !{i32 0, i32 100} diff --git a/llvm/test/Transforms/InstCombine/fsh.ll b/llvm/test/Transforms/InstCombine/fsh.ll index 0325c60997dfd..28c541e1a9eb2 100644 --- a/llvm/test/Transforms/InstCombine/fsh.ll +++ b/llvm/test/Transforms/InstCombine/fsh.ll @@ -1214,3 +1214,75 @@ define i31 @fshr_neg_amount_non_power_two(i31 %x, i31 %y) { %r = call i31 @llvm.fshr.i31(i31 %x, i31 %x, i31 %n) ret i31 %r } + +define i32 @rot_const_consecutive(i32 %x) { +; CHECK-LABEL: @rot_const_consecutive( +; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 8) +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 13) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 27) + ret i32 %r2 +} + +define i32 @rot_const_consecutive_multi_use(i32 %x) { +; CHECK-LABEL: @rot_const_consecutive_multi_use( +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 7) +; CHECK-NEXT: [[R3:%.*]] = call i32 @llvm.fshl.i32(i32 [[X]], i32 [[X]], i32 11) +; CHECK-NEXT: [[R2:%.*]] = and i32 [[R]], [[R3]] +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 4) + %and = and i32 %r, %r2 + ret i32 %and +} + +define i32 @rot_const_consecutive_cancel_out(i32 %x) { +; CHECK-LABEL: @rot_const_consecutive_cancel_out( +; CHECK-NEXT: ret i32 [[X:%.*]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 25) + ret i32 %r2 +} + +;; negative test, consecutive rotates only fold if shift amounts are const + +define i32 @rot_nonconst_shift(i32 %x, i32 %amt) { +; CHECK-LABEL: @rot_nonconst_shift( +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 7) +; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[R]], i32 [[R]], i32 [[AMT:%.*]]) +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 %amt) + ret i32 %r2 +} + +;; negative test, 1st funnel shift isn't a rotate. + +define i32 @fsh_rot(i32 %x, i32 %y) { +; CHECK-LABEL: @fsh_rot( +; CHECK-NEXT: [[FSH:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[Y:%.*]], i32 7) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[FSH]], i32 [[FSH]], i32 4) +; CHECK-NEXT: ret i32 [[R]] +; + %fsh = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 7) + %r = call i32 @llvm.fshl.i32(i32 %fsh, i32 %fsh, i32 4) + ret i32 %r +} + +;; negative test, 2nd funnel shift isn't a rotate. + +define i32 @rot_fsh(i32 %x, i32 %y) { +; CHECK-LABEL: @rot_fsh( +; CHECK-NEXT: [[Y:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 7) +; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[Y]], i32 [[R:%.*]], i32 4) +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %y, i32 4) + ret i32 %r2 +} + diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll index 0e5f0469264c7..e5731080fba44 100644 --- a/llvm/test/Transforms/InstCombine/funnel.ll +++ b/llvm/test/Transforms/InstCombine/funnel.ll @@ -635,3 +635,29 @@ define i32 @test_rotl_and_neg_wrong_mask(i32 %x, i32 %shamt) { %or = or i32 %shl, %shr ret i32 %or } + +declare void @use(i16) + +; Make sure the reused result does not produce poison. + +define i16 @fshl_concat_vector_may_produce_poison(i4 %x, i12 %y) { +; CHECK-LABEL: @fshl_concat_vector_may_produce_poison( +; CHECK-NEXT: [[X_FR:%.*]] = freeze i4 [[X:%.*]] +; CHECK-NEXT: [[ZEXT_X:%.*]] = zext i4 [[X_FR]] to i16 +; CHECK-NEXT: [[SLX:%.*]] = shl nuw i16 [[ZEXT_X]], 12 +; CHECK-NEXT: [[ZEXT_Y:%.*]] = zext i12 [[Y:%.*]] to i16 +; CHECK-NEXT: [[XY:%.*]] = or disjoint i16 [[SLX]], [[ZEXT_Y]] +; CHECK-NEXT: call void @use(i16 [[XY]]) +; CHECK-NEXT: [[YX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XY]], i16 [[XY]], i16 4) +; CHECK-NEXT: ret i16 [[YX]] +; + %x.fr = freeze i4 %x + %zext.x = zext i4 %x.fr to i16 + %slx = shl nuw nsw i16 %zext.x, 12 + %zext.y = zext i12 %y to i16 + %xy = or disjoint i16 %slx, %zext.y + call void @use(i16 %xy) + %sly = shl nuw i16 %zext.y, 4 + %yx = or disjoint i16 %sly, %zext.x + ret i16 %yx +} diff --git a/llvm/test/Transforms/InstCombine/icmp-clamp.ll b/llvm/test/Transforms/InstCombine/icmp-clamp.ll new file mode 100644 index 0000000000000..4866dbffb567a --- /dev/null +++ b/llvm/test/Transforms/InstCombine/icmp-clamp.ll @@ -0,0 +1,295 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare void @use(i32) + +define i1 @test_i32_eq(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_eq( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], 95 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 256 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_i32_ne(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_ne( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -161 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], -256 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp ne i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_i32_eq_no_add(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_eq_no_add( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], 161 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 0) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_i32_ne_no_add(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_ne_no_add( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X]], 160 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 0) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp ne i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_unsigned_eq(i32 %x) { +; CHECK-LABEL: define i1 @test_unsigned_eq( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -10 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 91 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10) + %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 100) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_unsigned_ne(i32 %x) { +; CHECK-LABEL: define i1 @test_unsigned_ne( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -101 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], -91 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10) + %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 100) + %cmp = icmp ne i32 %v2, %x + ret i1 %cmp +} + + +; Different bit widths +define i1 @test_i8_eq(i8 %x) { +; CHECK-LABEL: define i1 @test_i8_eq( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 50 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[TMP1]], 101 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i8 @llvm.smax.i8(i8 %x, i8 -50) + %v2 = tail call i8 @llvm.smin.i8(i8 %v1, i8 50) + %cmp = icmp eq i8 %v2, %x + ret i1 %cmp +} + +define i1 @test_i16_eq(i16 %x) { +; CHECK-LABEL: define i1 @test_i16_eq( +; CHECK-SAME: i16 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[X]], 1000 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i16 [[TMP1]], 2001 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -1000) + %v2 = tail call i16 @llvm.smin.i16(i16 %v1, i16 1000) + %cmp = icmp eq i16 %v2, %x + ret i1 %cmp +} + +define i1 @test_i64_eq(i64 %x) { +; CHECK-LABEL: define i1 @test_i64_eq( +; CHECK-SAME: i64 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[X]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP1]], -1 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i64 @llvm.smax.i64(i64 %x, i64 -1) + %v2 = tail call i64 @llvm.smin.i64(i64 %v1, i64 9223372036854775806) + %cmp = icmp eq i64 %v2, %x + ret i1 %cmp +} + +; Negative tests - wrong predicate +define i1 @test_wrong_pred_slt(i32 %x) { +; CHECK-LABEL: define i1 @test_wrong_pred_slt( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 160 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp slt i32 %v2, %x + ret i1 %cmp +} + + +; Negative tests - not a clamp pattern +define i1 @test_not_clamp_pattern(i32 %x, i32 %y) { +; CHECK-LABEL: define i1 @test_not_clamp_pattern( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[Y]], i32 -95) +; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %y, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Lo >= Hi +define i1 @test_invalid_range(i32 %x) { +; CHECK-LABEL: define i1 @test_invalid_range( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 50 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 100) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 50) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Lo is minimum signed value +define i1 @test_lo_min_signed(i32 %x) { +; CHECK-LABEL: define i1 @test_lo_min_signed( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X]], 161 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -2147483648) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Hi is maximum signed value +define i1 @test_hi_max_signed(i32 %x) { +; CHECK-LABEL: define i1 @test_hi_max_signed( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], -96 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 2147483647) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Hi is maximum unsigned value +define i1 @test_hi_max_unsigned(i32 %x) { +; CHECK-LABEL: define i1 @test_hi_max_unsigned( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X]], 9 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10) + %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 4294967295) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Multi-use tests - multiple uses of max +define i1 @test_multi_use_max(i32 %x) { +; CHECK-LABEL: define i1 @test_multi_use_max( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X]], i32 -95) +; CHECK-NEXT: call void @use(i32 [[V1]]) +; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + call void @use(i32 %v1) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Multi-use tests - multiple uses of min +define i1 @test_multi_use_min(i32 %x) { +; CHECK-LABEL: define i1 @test_multi_use_min( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X]], i32 -95) +; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160) +; CHECK-NEXT: call void @use(i32 [[V2]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + call void @use(i32 %v2) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Commuted tests +define i1 @test_commuted_eq(i32 %x) { +; CHECK-LABEL: define i1 @test_commuted_eq( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], 95 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 256 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %x, %v2 + ret i1 %cmp +} + + +; Vector tests - splat constants +define <2 x i1> @test_vec_splat_eq(<2 x i32> %x) { +; CHECK-LABEL: define <2 x i1> @test_vec_splat_eq( +; CHECK-SAME: <2 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[X]], splat (i32 50) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[TMP1]], splat (i32 101) +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> ) + %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> ) + %cmp = icmp eq <2 x i32> %v2, %x + ret <2 x i1> %cmp +} + +; Vector tests - poison elements +define <2 x i1> @test_vec_poison_eq(<2 x i32> %x) { +; CHECK-LABEL: define <2 x i1> @test_vec_poison_eq( +; CHECK-SAME: <2 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[X]], <2 x i32> ) +; CHECK-NEXT: [[V2:%.*]] = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[V1]], <2 x i32> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[V2]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> ) + %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> ) + %cmp = icmp eq <2 x i32> %v2, %x + ret <2 x i1> %cmp +} + +; Vector tests - non-splat +define <2 x i1> @test_vec_non_splat_eq(<2 x i32> %x) { +; CHECK-LABEL: define <2 x i1> @test_vec_non_splat_eq( +; CHECK-SAME: <2 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[X]], <2 x i32> ) +; CHECK-NEXT: [[V2:%.*]] = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[V1]], <2 x i32> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[V2]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> ) + %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> ) + %cmp = icmp eq <2 x i32> %v2, %x + ret <2 x i1> %cmp +} diff --git a/llvm/test/Transforms/InstCombine/in-freeze-phi.ll b/llvm/test/Transforms/InstCombine/in-freeze-phi.ll new file mode 100644 index 0000000000000..917d81b499c49 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/in-freeze-phi.ll @@ -0,0 +1,274 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=instcombine -S < %s | FileCheck %s + +define i32 @phi_freeze_same_consts(i1 %c0, i1 %c1) { +; CHECK-LABEL: define i32 @phi_freeze_same_consts( +; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]] +; CHECK: [[BB_FREEZE]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: ret i32 42 +; +entry: + br i1 %c0, label %bb_freeze, label %bb_other + +bb_freeze: + %f = freeze i32 undef + br label %final + +bb_other: + br i1 %c1, label %cA, label %cB +cA: + br label %final +cB: + br label %final + +final: + %phi = phi i32 [ %f, %bb_freeze ], [ 42, %cA ], [ 42, %cB ] + ret i32 %phi +} + +define i32 @phi_freeze_mixed_consts(i1 %c0, i1 %c1) { +; CHECK-LABEL: define i32 @phi_freeze_mixed_consts( +; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]] +; CHECK: [[BB_FREEZE]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[BB_FREEZE]] ], [ 42, %[[CA]] ], [ 7, %[[CB]] ] +; CHECK-NEXT: ret i32 [[PHI]] +; +entry: + br i1 %c0, label %bb_freeze, label %bb_other + +bb_freeze: + %f = freeze i32 undef + br label %final + +bb_other: + br i1 %c1, label %cA, label %cB +cA: + br label %final +cB: + br label %final + +final: + %phi = phi i32 [ %f, %bb_freeze ], [ 42, %cA ], [ 7, %cB ] + ret i32 %phi +} + +define i32 @phi_freeze_with_nonconst_incoming(i32 %x, i1 %c0, i1 %c1) { +; CHECK-LABEL: define i32 @phi_freeze_with_nonconst_incoming( +; CHECK-SAME: i32 [[X:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]] +; CHECK: [[BB_FREEZE]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[BB_FREEZE]] ], [ [[X]], %[[CA]] ], [ 13, %[[CB]] ] +; CHECK-NEXT: ret i32 [[PHI]] +; +entry: + br i1 %c0, label %bb_freeze, label %bb_other + +bb_freeze: + %f = freeze i32 undef + br label %final + +bb_other: + br i1 %c1, label %cA, label %cB +cA: + br label %final +cB: + br label %final + +final: + %phi = phi i32 [ %f, %bb_freeze ], [ %x, %cA ], [ 13, %cB ] + ret i32 %phi +} + +define <4 x i8> @phi_freeze_vector(i1 %c0, i1 %c1) { +; CHECK-LABEL: define <4 x i8> @phi_freeze_vector( +; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]] +; CHECK: [[BB_FREEZE]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: ret <4 x i8> splat (i8 9) +; +entry: + br i1 %c0, label %bb_freeze, label %bb_other + +bb_freeze: + %f = freeze <4 x i8> undef + br label %final + +bb_other: + br i1 %c1, label %cA, label %cB + +cA: + br label %final + +cB: + br label %final + +final: + %phi = phi <4 x i8> [ %f, %bb_freeze ], + [, %cA ], + [, %cB ] + ret <4 x i8> %phi +} + +define i32 @multi_use_one_folds_one_not_zero(i1 %c0, i1 %c1, i1 %c2) { +; CHECK-LABEL: define i32 @multi_use_one_folds_one_not_zero( +; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]], i1 [[C2:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_OTHER3:.*]], label %[[CC1:.*]] +; CHECK: [[BB_OTHER3]]: +; CHECK-NEXT: br label %[[MID:.*]] +; CHECK: [[CC1]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[MID]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[MID]] +; CHECK: [[MID]]: +; CHECK-NEXT: [[PHI_FOLD:%.*]] = phi i32 [ 0, %[[BB_OTHER3]] ], [ 1, %[[CA]] ], [ 1, %[[CB]] ] +; CHECK-NEXT: br i1 [[C2]], label %[[BB_FREEZE2:.*]], label %[[CD:.*]] +; CHECK: [[BB_FREEZE2]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER2:.*:]] +; CHECK-NEXT: br i1 true, label %[[CA]], label %[[CB]] +; CHECK: [[CC:.*:]] +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CD]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: ret i32 [[PHI_FOLD]] +; +entry: + %f = freeze i32 undef + br i1 %c0, label %bb_freeze, label %bb_other +bb_freeze: + br label %mid +bb_other: + br i1 %c1, label %cA, label %cB +cA: + br label %mid +cB: + br label %mid +mid: + %phi_no_fold = phi i32 [ %f, %bb_freeze ], [ 1, %cA ], [ 1, %cB ] + br i1 %c2, label %bb_freeze2, label %cD +bb_freeze2: + br label %final +bb_other2: + br i1 %c1, label %cA, label %cB +cC: + br label %final +cD: + br label %final +final: + %phi_fold = phi i32 [ %f, %bb_freeze2 ], [ 0, %cC ], [ 0, %cD ] + %a = add i32 %phi_fold, %phi_no_fold + ret i32 %a +} + +define i32 @phi_freeze_poison(i1 %c0, i1 %c1) { +; CHECK-LABEL: define i32 @phi_freeze_poison( +; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]] +; CHECK: [[BB_FREEZE]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: ret i32 0 +; +entry: + br i1 %c0, label %bb_freeze, label %bb_other + +bb_freeze: + %f = freeze i32 undef + br label %final + +bb_other: + br i1 %c1, label %cA, label %cB +cA: + br label %final +cB: + br label %final + +final: + %phi = phi i32 [ %f, %bb_freeze ], [ poison, %cA ], [ poison, %cB ] + ret i32 %phi +} + +define <2 x i32> @phi_freeze_poison_vec(i1 %c0, i1 %c1) { +; CHECK-LABEL: define <2 x i32> @phi_freeze_poison_vec( +; CHECK-SAME: i1 [[C0:%.*]], i1 [[C1:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[C0]], label %[[BB_FREEZE:.*]], label %[[BB_OTHER:.*]] +; CHECK: [[BB_FREEZE]]: +; CHECK-NEXT: br label %[[FINAL:.*]] +; CHECK: [[BB_OTHER]]: +; CHECK-NEXT: br i1 [[C1]], label %[[CA:.*]], label %[[CB:.*]] +; CHECK: [[CA]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[CB]]: +; CHECK-NEXT: br label %[[FINAL]] +; CHECK: [[FINAL]]: +; CHECK-NEXT: [[PHI:%.*]] = phi <2 x i32> [ zeroinitializer, %[[BB_FREEZE]] ], [ , %[[CA]] ], [ , %[[CB]] ] +; CHECK-NEXT: ret <2 x i32> [[PHI]] +; +entry: + br i1 %c0, label %bb_freeze, label %bb_other + +bb_freeze: + %f = freeze <2 x i32> undef + br label %final + +bb_other: + br i1 %c1, label %cA, label %cB +cA: + br label %final +cB: + br label %final + +final: + %phi = phi <2 x i32> [ %f, %bb_freeze ], [ , %cA ], [ , %cB ] + ret <2 x i32> %phi +} diff --git a/llvm/test/Transforms/InstCombine/insert-extract-shuffle.ll b/llvm/test/Transforms/InstCombine/insert-extract-shuffle.ll index f51e444a815c8..470d6be88672b 100644 --- a/llvm/test/Transforms/InstCombine/insert-extract-shuffle.ll +++ b/llvm/test/Transforms/InstCombine/insert-extract-shuffle.ll @@ -804,3 +804,49 @@ define <4 x i32> @infloop_D151807(<4 x float> %arg) { %i4 = insertelement <4 x i32> zeroinitializer, i32 %i3, i64 0 ret <4 x i32> %i4 } + +; Make sure we don't crash in this case. + +define i64 @pr160507(ptr %arg, i32 %arg1, i1 %arg2, i8 %arg3, i64 %arg4) { +; CHECK-LABEL: @pr160507( +; CHECK-NEXT: bb: +; CHECK-NEXT: br label [[BB5:%.*]] +; CHECK: bb5: +; CHECK-NEXT: br i1 [[ARG2:%.*]], label [[BB6:%.*]], label [[BB8:%.*]] +; CHECK: bb6: +; CHECK-NEXT: br label [[BB5]] +; CHECK: bb8: +; CHECK-NEXT: br label [[BB10:%.*]] +; CHECK: bb10: +; CHECK-NEXT: br label [[BB12:%.*]] +; CHECK: bb12: +; CHECK-NEXT: store i64 0, ptr [[ARG:%.*]], align 8 +; CHECK-NEXT: br label [[BB5]] +; +bb: + br label %bb5 + +bb5: + %phi = phi i8 [ 0, %bb ], [ %extractelement, %bb6 ], [ 0, %bb12 ] + br i1 %arg2, label %bb6, label %bb8 + +bb6: + %extractelement = extractelement <2 x i8> zeroinitializer, i64 %arg4 + br label %bb5 + +bb8: + %insertelement9 = insertelement <2 x i8> , i8 %phi, i64 0 + %zext = zext <2 x i8> %insertelement9 to <2 x i64> + %shufflevector = shufflevector <2 x i64> %zext, <2 x i64> poison, <4 x i32> + br label %bb10 + +bb10: + br label %bb12 + +bb12: + %extractelement11 = extractelement <2 x i64> %zext, i64 1 + %insertelement13 = insertelement <4 x i64> %shufflevector, i64 %extractelement11, i64 0 + %extractelement14 = extractelement <4 x i64> %insertelement13, i32 %arg1 + store i64 %extractelement14, ptr %arg, align 8 + br label %bb5 +} diff --git a/llvm/test/Transforms/InstCombine/preserve-profile.ll b/llvm/test/Transforms/InstCombine/preserve-profile.ll index dd83805ed3397..8cb3e685ae302 100644 --- a/llvm/test/Transforms/InstCombine/preserve-profile.ll +++ b/llvm/test/Transforms/InstCombine/preserve-profile.ll @@ -46,9 +46,59 @@ define i32 @NegBin(i1 %C) !prof !0 { ret i32 %V } +define i32 @select_C_minus_1_or_C_from_bool(i1 %x) !prof !0 { +; CHECK-LABEL: define i32 @select_C_minus_1_or_C_from_bool( +; CHECK-SAME: i1 [[X:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[X]], i32 41, i32 42, !prof [[PROF2:![0-9]+]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %ext = sext i1 %x to i32 + %add = add i32 %ext, 42 + ret i32 %add +} + +define i5 @and_add(i1 %x, i1 %y) !prof !0 { +; CHECK-LABEL: define i5 @and_add( +; CHECK-SAME: i1 [[X:%.*]], i1 [[Y:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X]], true +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP2]], i5 -2, i5 0, !prof [[PROF2]] +; CHECK-NEXT: ret i5 [[R]] +; + %xz = zext i1 %x to i5 + %ys = sext i1 %y to i5 + %sub = add i5 %xz, %ys + %r = and i5 %sub, 30 + ret i5 %r +} + +define i32 @add_zext_zext_i1(i1 %a) !prof !0 { +; CHECK-LABEL: define i32 @add_zext_zext_i1( +; CHECK-SAME: i1 [[A:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A]], i32 2, i32 0, !prof [[PROF2]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %zext = zext i1 %a to i32 + %add = add i32 %zext, %zext + ret i32 %add +} + +define i32 @no_count_no_branch_weights(i1 %a) { +; CHECK-LABEL: define i32 @no_count_no_branch_weights( +; CHECK-SAME: i1 [[A:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A]], i32 2, i32 0 +; CHECK-NEXT: ret i32 [[ADD]] +; + %zext = zext i1 %a to i32 + %add = add i32 %zext, %zext + ret i32 %add +} + + !0 = !{!"function_entry_count", i64 1000} !1 = !{!"branch_weights", i32 2, i32 3} ;. ; CHECK: [[PROF0]] = !{!"function_entry_count", i64 1000} ; CHECK: [[PROF1]] = !{!"branch_weights", i32 2, i32 3} +; CHECK: [[PROF2]] = !{!"unknown", !"instcombine"} ;. diff --git a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll new file mode 100644 index 0000000000000..61b13312521d2 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll @@ -0,0 +1,65 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s +target datalayout = "p1:64:64:64:32" + +define i32 @ptrtoaddr_inttoptr_arg(i32 %a) { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_arg( +; CHECK-SAME: i32 [[A:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[A]] to i64 +; CHECK-NEXT: [[TOPTR:%.*]] = inttoptr i64 [[TMP1]] to ptr addrspace(1) +; CHECK-NEXT: [[TOADDR:%.*]] = ptrtoaddr ptr addrspace(1) [[TOPTR]] to i32 +; CHECK-NEXT: ret i32 [[TOADDR]] +; + %toptr = inttoptr i32 %a to ptr addrspace(1) + %toaddr = ptrtoaddr ptr addrspace(1) %toptr to i32 + ret i32 %toaddr +} + +define i32 @ptrtoaddr_inttoptr() { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr() { +; CHECK-NEXT: ret i32 -1 +; + ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i32 -1 to ptr addrspace(1)) to i32) +} + +define i32 @ptrtoaddr_inttoptr_diff_size1() { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_diff_size1() { +; CHECK-NEXT: ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i64 -1 to ptr addrspace(1)) to i32) +; + ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i64 -1 to ptr addrspace(1)) to i32) +} + +define i32 @ptrtoaddr_inttoptr_diff_size2() { +; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_diff_size2() { +; CHECK-NEXT: ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i16 -1 to ptr addrspace(1)) to i32) +; + ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i16 -1 to ptr addrspace(1)) to i32) +} + +define i64 @ptrtoaddr_inttoptr_noas1() { +; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas1() { +; CHECK-NEXT: ret i64 1 +; + ret i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 1) to i64) +} + +define i64 @ptr2addr2_inttoptr_noas2() { +; CHECK-LABEL: define i64 @ptr2addr2_inttoptr_noas2() { +; CHECK-NEXT: ret i64 123 +; + ret i64 ptrtoaddr (ptr inttoptr (i64 123 to ptr) to i64) +} + +define i64 @ptrtoaddr_inttoptr_noas_diff_size1() { +; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas_diff_size1() { +; CHECK-NEXT: ret i64 ptrtoaddr (ptr inttoptr (i32 -1 to ptr) to i64) +; + ret i64 ptrtoaddr (ptr inttoptr (i32 -1 to ptr) to i64) +} + +define i64 @ptrtoaddr_inttoptr_noas_diff_size2() { +; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas_diff_size2() { +; CHECK-NEXT: ret i64 ptrtoaddr (ptr inttoptr (i128 -1 to ptr) to i64) +; + ret i64 ptrtoaddr (ptr inttoptr (i128 -1 to ptr) to i64) +} diff --git a/llvm/test/Transforms/InstCombine/select-masked_load.ll b/llvm/test/Transforms/InstCombine/select-masked_load.ll index b6bac612d6f9b..22e30ac019a5d 100644 --- a/llvm/test/Transforms/InstCombine/select-masked_load.ll +++ b/llvm/test/Transforms/InstCombine/select-masked_load.ll @@ -26,8 +26,7 @@ define <4 x i32> @masked_load_and_zero_inactive_2(ptr %ptr, <4 x i1> %mask) { ; No transform when the load's passthrough cannot be reused or altered. define <4 x i32> @masked_load_and_zero_inactive_3(ptr %ptr, <4 x i1> %mask, <4 x i32> %passthrough) { ; CHECK-LABEL: @masked_load_and_zero_inactive_3( -; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x i32> [[PASSTHROUGH:%.*]]) -; CHECK-NEXT: [[MASKED:%.*]] = select <4 x i1> [[MASK]], <4 x i32> [[LOAD]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[MASKED:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x i32> zeroinitializer) ; CHECK-NEXT: ret <4 x i32> [[MASKED]] ; %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough) @@ -116,6 +115,40 @@ entry: ret <8 x float> %1 } +define @fold_sel_into_masked_load_scalable(ptr %loc, %mask, %passthrough) { +; CHECK-LABEL: @fold_sel_into_masked_load_scalable( +; CHECK-NEXT: [[SEL:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[LOC:%.*]], i32 1, [[MASK:%.*]], [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret [[SEL]] +; + %load = call @llvm.masked.load.nxv4f32.p0(ptr %loc, i32 1, %mask, zeroinitializer) + %sel = select %mask, %load, %passthrough + ret %sel +} + +define @neg_fold_sel_into_masked_load_mask_mismatch(ptr %loc, %mask, %mask2, %passthrough) { +; CHECK-LABEL: @neg_fold_sel_into_masked_load_mask_mismatch( +; CHECK-NEXT: [[LOAD:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[LOC:%.*]], i32 1, [[MASK:%.*]], [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: [[SEL:%.*]] = select [[MASK2:%.*]], [[LOAD]], [[PASSTHROUGH]] +; CHECK-NEXT: ret [[SEL]] +; + %load = call @llvm.masked.load.nxv4f32.p0(ptr %loc, i32 1, %mask, %passthrough) + %sel = select %mask2, %load, %passthrough + ret %sel +} + +define @fold_sel_into_masked_load_scalable_one_use_check(ptr %loc1, %mask, %passthrough, ptr %loc2) { +; CHECK-LABEL: @fold_sel_into_masked_load_scalable_one_use_check( +; CHECK-NEXT: [[LOAD:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[LOC:%.*]], i32 1, [[MASK:%.*]], zeroinitializer) +; CHECK-NEXT: [[SEL:%.*]] = select [[MASK]], [[LOAD]], [[PASSTHROUGH:%.*]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0( [[LOAD]], ptr [[LOC2:%.*]], i32 1, [[MASK]]) +; CHECK-NEXT: ret [[SEL]] +; + %load = call @llvm.masked.load.nxv4f32.p0(ptr %loc1, i32 1, %mask, zeroinitializer) + %sel = select %mask, %load, %passthrough + call void @llvm.masked.store.nxv4f32.p0( %load, ptr %loc2, i32 1, %mask) + ret %sel +} + declare <8 x float> @llvm.masked.load.v8f32.p0(ptr, i32 immarg, <8 x i1>, <8 x float>) declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>) diff --git a/llvm/test/Transforms/InstCombine/vector-reductions.ll b/llvm/test/Transforms/InstCombine/vector-reductions.ll index 10f4aca72dbc7..f1e0dd9bd06d7 100644 --- a/llvm/test/Transforms/InstCombine/vector-reductions.ll +++ b/llvm/test/Transforms/InstCombine/vector-reductions.ll @@ -308,3 +308,174 @@ define i32 @diff_of_sums_type_mismatch2(<8 x i32> %v0, <4 x i32> %v1) { %r = sub i32 %r0, %r1 ret i32 %r } + +define i32 @constant_multiplied_4xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_4xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @constant_multiplied_3xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_3xi32( +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <3 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <3 x i32> %2, <3 x i32> poison, <3 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %3) + ret i32 %4 +} + +define i64 @constant_multiplied_4xi64(i64 %0) { +; CHECK-LABEL: @constant_multiplied_4xi64( +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i64 [[TMP2]] +; + %2 = insertelement <4 x i64> poison, i64 %0, i64 0 + %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <4 x i32> zeroinitializer + %4 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %3) + ret i64 %4 +} + +define i32 @constant_multiplied_8xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_8xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <8 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3) + ret i32 %4 +} + + +define i32 @constant_multiplied_16xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_16xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 4 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <16 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3) + ret i32 %4 +} + + +define i32 @constant_multiplied_4xi32_at_idx1(i32 %0) { +; CHECK-LABEL: @constant_multiplied_4xi32_at_idx1( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 1 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, + <4 x i32> + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @negative_constant_multiplied_4xi32(i32 %0) { +; CHECK-LABEL: @negative_constant_multiplied_4xi32( +; CHECK-NEXT: ret i32 poison +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 1 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @constant_multiplied_6xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_6xi32( +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0:%.*]], 6 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <6 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v6i32(<6 x i32> %3) + ret i32 %4 +} + +define i64 @constant_multiplied_6xi64(i64 %0) { +; CHECK-LABEL: @constant_multiplied_6xi64( +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0:%.*]], 6 +; CHECK-NEXT: ret i64 [[TMP2]] +; + %2 = insertelement <4 x i64> poison, i64 %0, i64 0 + %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <6 x i32> zeroinitializer + %4 = tail call i64 @llvm.vector.reduce.add.v6i64(<6 x i64> %3) + ret i64 %4 +} + +define i1 @constant_multiplied_8xi1(i1 %0) { +; CHECK-LABEL: @constant_multiplied_8xi1( +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i1> poison, i1 [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i1> [[TMP3]] to i8 +; CHECK-NEXT: [[TMP5:%.*]] = call range(i8 0, 9) i8 @llvm.ctpop.i8(i8 [[TMP4]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i8 [[TMP5]] to i1 +; CHECK-NEXT: ret i1 [[TMP6]] +; + %2 = insertelement <8 x i1> poison, i1 %0, i32 0 + %3 = shufflevector <8 x i1> %2, <8 x i1> poison, <8 x i32> zeroinitializer + %4 = tail call i1 @llvm.vector.reduce.add.v8i1(<8 x i1> %3) + ret i1 %4 +} + +define i2 @constant_multiplied_4xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_4xi2( +; CHECK-NEXT: ret i2 0 +; + %2 = insertelement <4 x i2> poison, i2 %0, i32 0 + %3 = shufflevector <4 x i2> %2, <4 x i2> poison, <4 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v4i2(<4 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_5xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_5xi2( +; CHECK-NEXT: ret i2 [[TMP0:%.*]] +; + %2 = insertelement <5 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <5 x i2> %2, <5 x i2> poison, <5 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v5i2(<5 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_6xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_6xi2( +; CHECK-NEXT: [[TMP2:%.*]] = shl i2 [[TMP0:%.*]], 1 +; CHECK-NEXT: ret i2 [[TMP2]] +; + %2 = insertelement <6 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <6 x i2> %2, <6 x i2> poison, <6 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v6i2(<6 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_7xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_7xi2( +; CHECK-NEXT: [[TMP2:%.*]] = sub i2 0, [[TMP0:%.*]] +; CHECK-NEXT: ret i2 [[TMP2]] +; + %2 = insertelement <7 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <7 x i2> %2, <7 x i2> poison, <7 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v7i2(<7 x i2> %3) + ret i2 %4 +} + +define i32 @negative_scalable_vector(i32 %0) { +; CHECK-LABEL: @negative_scalable_vector( +; CHECK-NEXT: [[TMP2:%.*]] = insertelement poison, i32 [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector [[TMP2]], poison, zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP3]]) +; CHECK-NEXT: ret i32 [[TMP4]] +; + %2 = insertelement poison, i32 %0, i64 0 + %3 = shufflevector %2, poison, zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.nxv4i32( %3) + ret i32 %4 +} diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/inttoptr-gep-index-width.ll b/llvm/test/Transforms/InstSimplify/ConstProp/inttoptr-gep-index-width.ll deleted file mode 100644 index 03056e8361e21..0000000000000 --- a/llvm/test/Transforms/InstSimplify/ConstProp/inttoptr-gep-index-width.ll +++ /dev/null @@ -1,14 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -S -passes=instsimplify < %s | FileCheck %s - -target datalayout = "p:16:16:16:8" - -; The GEP should only modify the low 8 bits of the pointer. -define ptr @test() { -; CHECK-LABEL: define ptr @test() { -; CHECK-NEXT: ret ptr inttoptr (i16 -256 to ptr) -; - %base = inttoptr i16 -1 to ptr - %gep = getelementptr i8, ptr %base, i8 1 - ret ptr %gep -} diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/inttoptr-gep-nonintegral.ll b/llvm/test/Transforms/InstSimplify/ConstProp/inttoptr-gep-nonintegral.ll new file mode 100644 index 0000000000000..f66825767bd0b --- /dev/null +++ b/llvm/test/Transforms/InstSimplify/ConstProp/inttoptr-gep-nonintegral.ll @@ -0,0 +1,145 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=instsimplify < %s | FileCheck %s +;; Check that we do not create new inttoptr instructions for unstable pointers +;; or pointers with external state (even if the values are all constants). +;; NOTE: for all but the zero address space, the GEP should only modify the +;; low 8 bits of the pointer. +target datalayout = "p:16:16:16:16-p1:16:16:16:8-pu2:16:16:16:8-pe3:16:16:16:8" + +define ptr @test_null_base_normal() { +; CHECK-LABEL: define ptr @test_null_base_normal() { +; CHECK-NEXT: ret ptr inttoptr (i16 -2 to ptr) +; + %gep = getelementptr i8, ptr null, i8 -2 + ret ptr %gep +} +define ptr @test_inttoptr_base_normal() { +; CHECK-LABEL: define ptr @test_inttoptr_base_normal() { +; CHECK-NEXT: ret ptr null +; + %base = inttoptr i16 -1 to ptr + %gep = getelementptr i8, ptr %base, i8 1 + ret ptr %gep +} + +;; Transformation is fine for non-integral address space, but we can only change +;; the index bits: (i8 -2 == i16 254) +define ptr addrspace(1) @test_null_base_nonintegral() { +; CHECK-LABEL: define ptr addrspace(1) @test_null_base_nonintegral() { +; CHECK-NEXT: ret ptr addrspace(1) inttoptr (i16 254 to ptr addrspace(1)) +; + %gep = getelementptr i8, ptr addrspace(1) null, i8 -2 + ret ptr addrspace(1) %gep +} +define ptr addrspace(1) @test_inttoptr_base_nonintegral() { +; CHECK-LABEL: define ptr addrspace(1) @test_inttoptr_base_nonintegral() { +; CHECK-NEXT: ret ptr addrspace(1) inttoptr (i16 -256 to ptr addrspace(1)) +; + %base = inttoptr i16 -1 to ptr addrspace(1) + %gep = getelementptr i8, ptr addrspace(1) %base, i8 1 + ret ptr addrspace(1) %gep +} + +;; For unstable pointers we should avoid any introduction of inttoptr +define ptr addrspace(2) @test_null_base_unstable() { +; CHECK-LABEL: define ptr addrspace(2) @test_null_base_unstable() { +; CHECK-NEXT: ret ptr addrspace(2) getelementptr (i8, ptr addrspace(2) null, i8 -2) +; + %gep = getelementptr i8, ptr addrspace(2) null, i8 -2 + ret ptr addrspace(2) %gep +} +define ptr addrspace(2) @test_inttoptr_base_unstable() { +; CHECK-LABEL: define ptr addrspace(2) @test_inttoptr_base_unstable() { +; CHECK-NEXT: ret ptr addrspace(2) getelementptr (i8, ptr addrspace(2) inttoptr (i16 -1 to ptr addrspace(2)), i8 1) +; + %base = inttoptr i16 -1 to ptr addrspace(2) + %gep = getelementptr i8, ptr addrspace(2) %base, i8 1 + ret ptr addrspace(2) %gep +} + +;; The same is true for pointers with external state: no new inttoptr +define ptr addrspace(3) @test_null_base_external() { +; CHECK-LABEL: define ptr addrspace(3) @test_null_base_external() { +; CHECK-NEXT: ret ptr addrspace(3) getelementptr (i8, ptr addrspace(3) null, i8 -2) +; + %gep = getelementptr i8, ptr addrspace(3) null, i8 -2 + ret ptr addrspace(3) %gep +} + +define ptr addrspace(3) @test_inttoptr_base_external() { +; CHECK-LABEL: define ptr addrspace(3) @test_inttoptr_base_external() { +; CHECK-NEXT: ret ptr addrspace(3) getelementptr (i8, ptr addrspace(3) inttoptr (i16 -1 to ptr addrspace(3)), i8 1) +; + %base = inttoptr i16 -1 to ptr addrspace(3) + %gep = getelementptr i8, ptr addrspace(3) %base, i8 1 + ret ptr addrspace(3) %gep +} + +define <2 x ptr> @test_vec_null_base_normal() { +; CHECK-LABEL: define <2 x ptr> @test_vec_null_base_normal() { +; CHECK-NEXT: ret <2 x ptr> getelementptr (i8, <2 x ptr> zeroinitializer, <2 x i16> ) +; + %gep = getelementptr i8, <2 x ptr> , <2 x i8> + ret <2 x ptr> %gep +} +define <2 x ptr> @test_vec_inttoptr_base_normal() { +; CHECK-LABEL: define <2 x ptr> @test_vec_inttoptr_base_normal() { +; CHECK-NEXT: ret <2 x ptr> getelementptr (i8, <2 x ptr> , <2 x i16> ) +; + %base = inttoptr <2 x i16> to <2 x ptr> + %gep = getelementptr i8, <2 x ptr> %base, <2 x i8> + ret <2 x ptr> %gep +} + +;; Transformation is fine for non-integral address space, but we can only change +;; the index bits: (i8 -2 == i16 254) +define <2 x ptr addrspace(1)> @test_vec_null_base_nonintegral() { +; CHECK-LABEL: define <2 x ptr addrspace(1)> @test_vec_null_base_nonintegral() { +; CHECK-NEXT: ret <2 x ptr addrspace(1)> getelementptr (i8, <2 x ptr addrspace(1)> zeroinitializer, <2 x i8> ) +; + %gep = getelementptr i8, <2 x ptr addrspace(1)> , <2 x i8> + ret <2 x ptr addrspace(1)> %gep +} +define <2 x ptr addrspace(1)> @test_vec_inttoptr_base_nonintegral() { +; CHECK-LABEL: define <2 x ptr addrspace(1)> @test_vec_inttoptr_base_nonintegral() { +; CHECK-NEXT: ret <2 x ptr addrspace(1)> getelementptr (i8, <2 x ptr addrspace(1)> , <2 x i8> ) +; + %base = inttoptr <2 x i16> to <2 x ptr addrspace(1)> + %gep = getelementptr i8, <2 x ptr addrspace(1)> %base, <2 x i8> + ret <2 x ptr addrspace(1)> %gep +} + +;; For unstable pointers we should avoid any introduction of inttoptr +define <2 x ptr addrspace(2)> @test_vec_null_base_unstable() { +; CHECK-LABEL: define <2 x ptr addrspace(2)> @test_vec_null_base_unstable() { +; CHECK-NEXT: ret <2 x ptr addrspace(2)> getelementptr (i8, <2 x ptr addrspace(2)> zeroinitializer, <2 x i8> ) +; + %gep = getelementptr i8, <2 x ptr addrspace(2)> , <2 x i8> + ret <2 x ptr addrspace(2)> %gep +} +define <2 x ptr addrspace(2)> @test_vec_inttoptr_base_unstable() { +; CHECK-LABEL: define <2 x ptr addrspace(2)> @test_vec_inttoptr_base_unstable() { +; CHECK-NEXT: ret <2 x ptr addrspace(2)> getelementptr (i8, <2 x ptr addrspace(2)> , <2 x i8> ) +; + %base = inttoptr <2 x i16> to <2 x ptr addrspace(2)> + %gep = getelementptr i8, <2 x ptr addrspace(2)> %base, <2 x i8> + ret <2 x ptr addrspace(2)> %gep +} + +;; The same is true for pointers with external state: no new inttoptr +define <2 x ptr addrspace(3)> @test_vec_null_base_external() { +; CHECK-LABEL: define <2 x ptr addrspace(3)> @test_vec_null_base_external() { +; CHECK-NEXT: ret <2 x ptr addrspace(3)> getelementptr (i8, <2 x ptr addrspace(3)> zeroinitializer, <2 x i8> ) +; + %gep = getelementptr i8, <2 x ptr addrspace(3)> , <2 x i8> + ret <2 x ptr addrspace(3)> %gep +} + +define <2 x ptr addrspace(3)> @test_vec_inttoptr_base_external() { +; CHECK-LABEL: define <2 x ptr addrspace(3)> @test_vec_inttoptr_base_external() { +; CHECK-NEXT: ret <2 x ptr addrspace(3)> getelementptr (i8, <2 x ptr addrspace(3)> , <2 x i8> ) +; + %base = inttoptr <2 x i16> to <2 x ptr addrspace(3)> + %gep = getelementptr i8, <2 x ptr addrspace(3)> %base, <2 x i8> + ret <2 x ptr addrspace(3)> %gep +} diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll index 543c73137c1b6..b1a1e6b86c293 100644 --- a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll +++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll @@ -334,8 +334,7 @@ define i32 @test_neg_1_5_d2i_rz() { ;+-------------------------------------------------------------+ define i32 @test_neg_1_5_f2ui_rm() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rm(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rm(float -1.5) ret i32 %res @@ -343,8 +342,7 @@ define i32 @test_neg_1_5_f2ui_rm() { define i32 @test_neg_1_5_f2ui_rn() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rn() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rn(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rn(float -1.5) ret i32 %res @@ -353,8 +351,7 @@ define i32 @test_neg_1_5_f2ui_rn() { define i32 @test_neg_1_5_f2ui_rp() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rp() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rp(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rp(float -1.5) ret i32 %res @@ -362,8 +359,7 @@ define i32 @test_neg_1_5_f2ui_rp() { define i32 @test_neg_1_5_f2ui_rz() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rz() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rz(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rz(float -1.5) ret i32 %res @@ -374,8 +370,7 @@ define i32 @test_neg_1_5_f2ui_rz() { ;+-------------------------------------------------------------+ define i32 @test_neg_1_5_f2ui_rm_ftz() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rm_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rm.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rm.ftz(float -1.5) ret i32 %res @@ -383,8 +378,7 @@ define i32 @test_neg_1_5_f2ui_rm_ftz() { define i32 @test_neg_1_5_f2ui_rn_ftz() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rn_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rn.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rn.ftz(float -1.5) ret i32 %res @@ -392,8 +386,7 @@ define i32 @test_neg_1_5_f2ui_rn_ftz() { define i32 @test_neg_1_5_f2ui_rp_ftz() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rp_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rp.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rp.ftz(float -1.5) ret i32 %res @@ -401,8 +394,7 @@ define i32 @test_neg_1_5_f2ui_rp_ftz() { define i32 @test_neg_1_5_f2ui_rz_ftz() { ; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rz_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rz.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rz.ftz(float -1.5) ret i32 %res @@ -412,8 +404,7 @@ define i32 @test_neg_1_5_f2ui_rz_ftz() { ;+-------------------------------------------------------------+ define i32 @test_neg_1_5_d2ui_rm() { ; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rm(double -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.d2ui.rm(double -1.5) ret i32 %res @@ -421,8 +412,7 @@ define i32 @test_neg_1_5_d2ui_rm() { define i32 @test_neg_1_5_d2ui_rn() { ; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rn() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rn(double -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.d2ui.rn(double -1.5) ret i32 %res @@ -431,8 +421,7 @@ define i32 @test_neg_1_5_d2ui_rn() { define i32 @test_neg_1_5_d2ui_rp() { ; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rp() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rp(double -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.d2ui.rp(double -1.5) ret i32 %res @@ -440,8 +429,7 @@ define i32 @test_neg_1_5_d2ui_rp() { define i32 @test_neg_1_5_d2ui_rz() { ; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rz() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rz(double -1.500000e+00) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.d2ui.rz(double -1.5) ret i32 %res @@ -526,7 +514,7 @@ define i32 @test_nan_f2i_rz_ftz() { ;+-------------------------------------------------------------+ define i32 @test_nan_d2i_rm() { ; CHECK-LABEL: define i32 @test_nan_d2i_rm() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2i.rm(double 0xFFF8000000000000) ret i32 %res @@ -534,7 +522,7 @@ define i32 @test_nan_d2i_rm() { define i32 @test_nan_d2i_rn() { ; CHECK-LABEL: define i32 @test_nan_d2i_rn() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2i.rn(double 0xFFF8000000000000) ret i32 %res @@ -543,7 +531,7 @@ define i32 @test_nan_d2i_rn() { define i32 @test_nan_d2i_rp() { ; CHECK-LABEL: define i32 @test_nan_d2i_rp() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2i.rp(double 0xFFF8000000000000) ret i32 %res @@ -551,7 +539,7 @@ define i32 @test_nan_d2i_rp() { define i32 @test_nan_d2i_rz() { ; CHECK-LABEL: define i32 @test_nan_d2i_rz() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2i.rz(double 0xFFF8000000000000) ret i32 %res @@ -632,7 +620,7 @@ define i32 @test_nan_f2ui_rz_ftz() { ;+-------------------------------------------------------------+ define i32 @test_nan_d2ui_rm() { ; CHECK-LABEL: define i32 @test_nan_d2ui_rm() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2ui.rm(double 0xFFF8000000000000) ret i32 %res @@ -640,7 +628,7 @@ define i32 @test_nan_d2ui_rm() { define i32 @test_nan_d2ui_rn() { ; CHECK-LABEL: define i32 @test_nan_d2ui_rn() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2ui.rn(double 0xFFF8000000000000) ret i32 %res @@ -649,7 +637,7 @@ define i32 @test_nan_d2ui_rn() { define i32 @test_nan_d2ui_rp() { ; CHECK-LABEL: define i32 @test_nan_d2ui_rp() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2ui.rp(double 0xFFF8000000000000) ret i32 %res @@ -657,7 +645,7 @@ define i32 @test_nan_d2ui_rp() { define i32 @test_nan_d2ui_rz() { ; CHECK-LABEL: define i32 @test_nan_d2ui_rz() { -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 -2147483648 ; %res = call i32 @llvm.nvvm.d2ui.rz(double 0xFFF8000000000000) ret i32 %res @@ -994,8 +982,7 @@ define i32 @test_neg_subnormal_d2i_rz() { ;+-------------------------------------------------------------+ define i32 @test_neg_subnormal_f2ui_rm() { ; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rm(float 0xB80FFFFFC0000000) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.f2ui.rm(float 0xB80FFFFFC0000000) ret i32 %res @@ -1065,8 +1052,7 @@ define i32 @test_neg_subnormal_f2ui_rz_ftz() { ;+-------------------------------------------------------------+ define i32 @test_neg_subnormal_d2ui_rm() { ; CHECK-LABEL: define i32 @test_neg_subnormal_d2ui_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rm(double 0x800FFFFFFFFFFFFF) -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 0 ; %res = call i32 @llvm.nvvm.d2ui.rm(double 0x800fffffffffffff) ret i32 %res diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll index be38177dce2c3..ffadf26f3c5b5 100644 --- a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll +++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll @@ -334,8 +334,7 @@ define i64 @test_neg_1_5_d2ll_rz() { ;+-------------------------------------------------------------+ define i64 @test_neg_1_5_f2ull_rm() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rm(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rm(float -1.5) ret i64 %res @@ -343,8 +342,7 @@ define i64 @test_neg_1_5_f2ull_rm() { define i64 @test_neg_1_5_f2ull_rn() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rn() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rn(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rn(float -1.5) ret i64 %res @@ -353,8 +351,7 @@ define i64 @test_neg_1_5_f2ull_rn() { define i64 @test_neg_1_5_f2ull_rp() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rp() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rp(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rp(float -1.5) ret i64 %res @@ -362,8 +359,7 @@ define i64 @test_neg_1_5_f2ull_rp() { define i64 @test_neg_1_5_f2ull_rz() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rz() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rz(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rz(float -1.5) ret i64 %res @@ -374,8 +370,7 @@ define i64 @test_neg_1_5_f2ull_rz() { ;+-------------------------------------------------------------+ define i64 @test_neg_1_5_f2ull_rm_ftz() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rm_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rm.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float -1.5) ret i64 %res @@ -383,8 +378,7 @@ define i64 @test_neg_1_5_f2ull_rm_ftz() { define i64 @test_neg_1_5_f2ull_rn_ftz() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rn_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rn.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float -1.5) ret i64 %res @@ -392,8 +386,7 @@ define i64 @test_neg_1_5_f2ull_rn_ftz() { define i64 @test_neg_1_5_f2ull_rp_ftz() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rp_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rp.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float -1.5) ret i64 %res @@ -401,8 +394,7 @@ define i64 @test_neg_1_5_f2ull_rp_ftz() { define i64 @test_neg_1_5_f2ull_rz_ftz() { ; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rz_ftz() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rz.ftz(float -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float -1.5) ret i64 %res @@ -412,8 +404,7 @@ define i64 @test_neg_1_5_f2ull_rz_ftz() { ;+-------------------------------------------------------------+ define i64 @test_neg_1_5_d2ull_rm() { ; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rm(double -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.d2ull.rm(double -1.5) ret i64 %res @@ -421,8 +412,7 @@ define i64 @test_neg_1_5_d2ull_rm() { define i64 @test_neg_1_5_d2ull_rn() { ; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rn() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rn(double -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.d2ull.rn(double -1.5) ret i64 %res @@ -431,8 +421,7 @@ define i64 @test_neg_1_5_d2ull_rn() { define i64 @test_neg_1_5_d2ull_rp() { ; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rp() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rp(double -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.d2ull.rp(double -1.5) ret i64 %res @@ -440,8 +429,7 @@ define i64 @test_neg_1_5_d2ull_rp() { define i64 @test_neg_1_5_d2ull_rz() { ; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rz() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rz(double -1.500000e+00) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.d2ull.rz(double -1.5) ret i64 %res @@ -456,7 +444,7 @@ define i64 @test_neg_1_5_d2ull_rz() { ;+-------------------------------------------------------------+ define i64 @test_nan_f2ll_rm() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rm() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rm(float 0x7FFFFF0000000000) ret i64 %res @@ -464,7 +452,7 @@ define i64 @test_nan_f2ll_rm() { define i64 @test_nan_f2ll_rn() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rn() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rn(float 0x7FFFFF0000000000) ret i64 %res @@ -473,7 +461,7 @@ define i64 @test_nan_f2ll_rn() { define i64 @test_nan_f2ll_rp() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rp() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rp(float 0x7FFFFF0000000000) ret i64 %res @@ -481,7 +469,7 @@ define i64 @test_nan_f2ll_rp() { define i64 @test_nan_f2ll_rz() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rz(float 0x7FFFFF0000000000) ret i64 %res @@ -492,7 +480,7 @@ define i64 @test_nan_f2ll_rz() { ;+-------------------------------------------------------------+ define i64 @test_nan_f2ll_rm_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rm_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rm.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -500,7 +488,7 @@ define i64 @test_nan_f2ll_rm_ftz() { define i64 @test_nan_f2ll_rn_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rn_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rn.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -508,7 +496,7 @@ define i64 @test_nan_f2ll_rn_ftz() { define i64 @test_nan_f2ll_rp_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rp_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rp.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -516,7 +504,7 @@ define i64 @test_nan_f2ll_rp_ftz() { define i64 @test_nan_f2ll_rz_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ll_rz_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ll.rz.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -526,7 +514,7 @@ define i64 @test_nan_f2ll_rz_ftz() { ;+-------------------------------------------------------------+ define i64 @test_nan_d2ll_rm() { ; CHECK-LABEL: define i64 @test_nan_d2ll_rm() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ll.rm(double 0xFFF8000000000000) ret i64 %res @@ -534,7 +522,7 @@ define i64 @test_nan_d2ll_rm() { define i64 @test_nan_d2ll_rn() { ; CHECK-LABEL: define i64 @test_nan_d2ll_rn() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ll.rn(double 0xFFF8000000000000) ret i64 %res @@ -543,7 +531,7 @@ define i64 @test_nan_d2ll_rn() { define i64 @test_nan_d2ll_rp() { ; CHECK-LABEL: define i64 @test_nan_d2ll_rp() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ll.rp(double 0xFFF8000000000000) ret i64 %res @@ -551,7 +539,7 @@ define i64 @test_nan_d2ll_rp() { define i64 @test_nan_d2ll_rz() { ; CHECK-LABEL: define i64 @test_nan_d2ll_rz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ll.rz(double 0xFFF8000000000000) ret i64 %res @@ -562,7 +550,7 @@ define i64 @test_nan_d2ll_rz() { ;+-------------------------------------------------------------+ define i64 @test_nan_f2ull_rm() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rm() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rm(float 0x7FFFFF0000000000) ret i64 %res @@ -570,7 +558,7 @@ define i64 @test_nan_f2ull_rm() { define i64 @test_nan_f2ull_rn() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rn() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rn(float 0x7FFFFF0000000000) ret i64 %res @@ -579,7 +567,7 @@ define i64 @test_nan_f2ull_rn() { define i64 @test_nan_f2ull_rp() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rp() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rp(float 0x7FFFFF0000000000) ret i64 %res @@ -587,7 +575,7 @@ define i64 @test_nan_f2ull_rp() { define i64 @test_nan_f2ull_rz() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rz(float 0x7FFFFF0000000000) ret i64 %res @@ -598,7 +586,7 @@ define i64 @test_nan_f2ull_rz() { ;+-------------------------------------------------------------+ define i64 @test_nan_f2ull_rm_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rm_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -606,7 +594,7 @@ define i64 @test_nan_f2ull_rm_ftz() { define i64 @test_nan_f2ull_rn_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rn_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -614,7 +602,7 @@ define i64 @test_nan_f2ull_rn_ftz() { define i64 @test_nan_f2ull_rp_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rp_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -622,7 +610,7 @@ define i64 @test_nan_f2ull_rp_ftz() { define i64 @test_nan_f2ull_rz_ftz() { ; CHECK-LABEL: define i64 @test_nan_f2ull_rz_ftz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float 0x7FFFFF0000000000) ret i64 %res @@ -632,7 +620,7 @@ define i64 @test_nan_f2ull_rz_ftz() { ;+-------------------------------------------------------------+ define i64 @test_nan_d2ull_rm() { ; CHECK-LABEL: define i64 @test_nan_d2ull_rm() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ull.rm(double 0xFFF8000000000000) ret i64 %res @@ -640,7 +628,7 @@ define i64 @test_nan_d2ull_rm() { define i64 @test_nan_d2ull_rn() { ; CHECK-LABEL: define i64 @test_nan_d2ull_rn() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ull.rn(double 0xFFF8000000000000) ret i64 %res @@ -649,7 +637,7 @@ define i64 @test_nan_d2ull_rn() { define i64 @test_nan_d2ull_rp() { ; CHECK-LABEL: define i64 @test_nan_d2ull_rp() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ull.rp(double 0xFFF8000000000000) ret i64 %res @@ -657,7 +645,7 @@ define i64 @test_nan_d2ull_rp() { define i64 @test_nan_d2ull_rz() { ; CHECK-LABEL: define i64 @test_nan_d2ull_rz() { -; CHECK-NEXT: ret i64 0 +; CHECK-NEXT: ret i64 -9223372036854775808 ; %res = call i64 @llvm.nvvm.d2ull.rz(double 0xFFF8000000000000) ret i64 %res @@ -994,8 +982,7 @@ define i64 @test_neg_subnormal_d2ll_rz() { ;+-------------------------------------------------------------+ define i64 @test_neg_subnormal_f2ull_rm() { ; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rm(float 0xB80FFFFFC0000000) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.f2ull.rm(float 0xB80FFFFFC0000000) ret i64 %res @@ -1065,8 +1052,7 @@ define i64 @test_neg_subnormal_f2ull_rz_ftz() { ;+-------------------------------------------------------------+ define i64 @test_neg_subnormal_d2ull_rm() { ; CHECK-LABEL: define i64 @test_neg_subnormal_d2ull_rm() { -; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rm(double 0x800FFFFFFFFFFFFF) -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 0 ; %res = call i64 @llvm.nvvm.d2ull.rm(double 0x800fffffffffffff) ret i64 %res diff --git a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll index fff6cfd8a3b4b..26b51146057e9 100644 --- a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll +++ b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll @@ -1,1388 +1,854 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s -declare half @llvm.minimum.f16(half, half) -declare half @llvm.maximum.f16(half, half) - -declare float @llvm.minnum.f32(float, float) -declare float @llvm.maxnum.f32(float, float) -declare float @llvm.minimum.f32(float, float) -declare float @llvm.maximum.f32(float, float) -declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) -declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) -declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>) -declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>) - -declare double @llvm.minnum.f64(double, double) -declare double @llvm.maxnum.f64(double, double) -declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) -declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) -declare double @llvm.minimum.f64(double, double) -declare double @llvm.maximum.f64(double, double) -declare <2 x double> @llvm.minimum.v2f64(<2 x double>, <2 x double>) -declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>) - -define float @test_minnum_const_nan(float %x) { -; CHECK-LABEL: @test_minnum_const_nan( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call float @llvm.minnum.f32(float %x, float 0x7fff000000000000) - ret float %r -} - -define float @test_maxnum_const_nan(float %x) { -; CHECK-LABEL: @test_maxnum_const_nan( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call float @llvm.maxnum.f32(float %x, float 0x7fff000000000000) - ret float %r -} - -define float @test_maximum_const_nan(float %x) { -; CHECK-LABEL: @test_maximum_const_nan( -; CHECK-NEXT: ret float 0x7FFF000000000000 -; - %r = call float @llvm.maximum.f32(float %x, float 0x7fff000000000000) - ret float %r -} - -define float @test_minimum_const_nan(float %x) { -; CHECK-LABEL: @test_minimum_const_nan( -; CHECK-NEXT: ret float 0x7FFF000000000000 -; - %r = call float @llvm.minimum.f32(float %x, float 0x7fff000000000000) - ret float %r -} - -define float @test_minnum_const_inf(float %x) { -; CHECK-LABEL: @test_minnum_const_inf( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0x7FF0000000000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.minnum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_maxnum_const_inf(float %x) { -; CHECK-LABEL: @test_maxnum_const_inf( -; CHECK-NEXT: ret float 0x7FF0000000000000 -; - %r = call float @llvm.maxnum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_maximum_const_inf(float %x) { -; CHECK-LABEL: @test_maximum_const_inf( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float 0x7FF0000000000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.maximum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_minimum_const_inf(float %x) { -; CHECK-LABEL: @test_minimum_const_inf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call float @llvm.minimum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_minnum_const_neg_inf(float %x) { -; CHECK-LABEL: @test_minnum_const_neg_inf( -; CHECK-NEXT: ret float 0xFFF0000000000000 -; - %r = call float @llvm.minnum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_maxnum_const_neg_inf(float %x) { -; CHECK-LABEL: @test_maxnum_const_neg_inf( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float 0xFFF0000000000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.maxnum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_maximum_const_neg_inf(float %x) { -; CHECK-LABEL: @test_maximum_const_neg_inf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call float @llvm.maximum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_minimum_const_neg_inf(float %x) { -; CHECK-LABEL: @test_minimum_const_neg_inf( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float 0xFFF0000000000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.minimum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_minnum_const_inf_nnan(float %x) { -; CHECK-LABEL: @test_minnum_const_inf_nnan( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan float @llvm.minnum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_maxnum_const_inf_nnan(float %x) { -; CHECK-LABEL: @test_maxnum_const_inf_nnan( -; CHECK-NEXT: ret float 0x7FF0000000000000 -; - %r = call nnan float @llvm.maxnum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_maximum_const_inf_nnan(float %x) { -; CHECK-LABEL: @test_maximum_const_inf_nnan( -; CHECK-NEXT: ret float 0x7FF0000000000000 -; - %r = call nnan float @llvm.maximum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_minimum_const_inf_nnan(float %x) { -; CHECK-LABEL: @test_minimum_const_inf_nnan( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan float @llvm.minimum.f32(float %x, float 0x7ff0000000000000) - ret float %r -} - -define float @test_minnum_const_inf_nnan_comm(float %x) { -; CHECK-LABEL: @test_minnum_const_inf_nnan_comm( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan float @llvm.minnum.f32(float 0x7ff0000000000000, float %x) - ret float %r -} - -define float @test_maxnum_const_inf_nnan_comm(float %x) { -; CHECK-LABEL: @test_maxnum_const_inf_nnan_comm( -; CHECK-NEXT: ret float 0x7FF0000000000000 -; - %r = call nnan float @llvm.maxnum.f32(float 0x7ff0000000000000, float %x) - ret float %r -} - -define float @test_maximum_const_inf_nnan_comm(float %x) { -; CHECK-LABEL: @test_maximum_const_inf_nnan_comm( -; CHECK-NEXT: ret float 0x7FF0000000000000 -; - %r = call nnan float @llvm.maximum.f32(float 0x7ff0000000000000, float %x) - ret float %r -} - -define float @test_minimum_const_inf_nnan_comm(float %x) { -; CHECK-LABEL: @test_minimum_const_inf_nnan_comm( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan float @llvm.minimum.f32(float 0x7ff0000000000000, float %x) - ret float %r -} - -define <2 x float> @test_minnum_const_inf_nnan_comm_vec(<2 x float> %x) { -; CHECK-LABEL: @test_minnum_const_inf_nnan_comm_vec( -; CHECK-NEXT: ret <2 x float> [[X:%.*]] -; - %r = call nnan <2 x float> @llvm.minnum.v2f32(<2 x float> , <2 x float> %x) - ret <2 x float> %r -} - -define <2 x float> @test_maxnum_const_inf_nnan_comm_vec(<2 x float> %x) { -; CHECK-LABEL: @test_maxnum_const_inf_nnan_comm_vec( -; CHECK-NEXT: ret <2 x float> splat (float 0x7FF0000000000000) -; - %r = call nnan <2 x float> @llvm.maxnum.v2f32(<2 x float> , <2 x float> %x) - ret <2 x float> %r -} - -define <2 x float> @test_maximum_const_inf_nnan_comm_vec(<2 x float> %x) { -; CHECK-LABEL: @test_maximum_const_inf_nnan_comm_vec( -; CHECK-NEXT: ret <2 x float> splat (float 0x7FF0000000000000) -; - %r = call nnan <2 x float> @llvm.maximum.v2f32(<2 x float> , <2 x float> %x) - ret <2 x float> %r -} - -define <2 x float> @test_minimum_const_inf_nnan_comm_vec(<2 x float> %x) { -; CHECK-LABEL: @test_minimum_const_inf_nnan_comm_vec( -; CHECK-NEXT: ret <2 x float> [[X:%.*]] -; - %r = call nnan <2 x float> @llvm.minimum.v2f32(<2 x float> , <2 x float> %x) - ret <2 x float> %r -} - -define float @test_minnum_const_neg_inf_nnan(float %x) { -; CHECK-LABEL: @test_minnum_const_neg_inf_nnan( -; CHECK-NEXT: ret float 0xFFF0000000000000 -; - %r = call nnan float @llvm.minnum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_maxnum_const_neg_inf_nnan(float %x) { -; CHECK-LABEL: @test_maxnum_const_neg_inf_nnan( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan float @llvm.maxnum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_maximum_const_neg_inf_nnan(float %x) { -; CHECK-LABEL: @test_maximum_const_neg_inf_nnan( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan float @llvm.maximum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_minimum_const_neg_inf_nnan(float %x) { -; CHECK-LABEL: @test_minimum_const_neg_inf_nnan( -; CHECK-NEXT: ret float 0xFFF0000000000000 -; - %r = call nnan float @llvm.minimum.f32(float %x, float 0xfff0000000000000) - ret float %r -} - -define float @test_minnum_const_max(float %x) { -; CHECK-LABEL: @test_minnum_const_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.minnum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_maxnum_const_max(float %x) { -; CHECK-LABEL: @test_maxnum_const_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.maxnum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_maximum_const_max(float %x) { -; CHECK-LABEL: @test_maximum_const_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.maximum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_minimum_const_max(float %x) { -; CHECK-LABEL: @test_minimum_const_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.minimum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_minnum_const_neg_max(float %x) { -; CHECK-LABEL: @test_minnum_const_neg_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.minnum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_maxnum_const_neg_max(float %x) { -; CHECK-LABEL: @test_maxnum_const_neg_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.maxnum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_maximum_const_neg_max(float %x) { -; CHECK-LABEL: @test_maximum_const_neg_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.maximum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_minimum_const_neg_max(float %x) { -; CHECK-LABEL: @test_minimum_const_neg_max( -; CHECK-NEXT: [[R:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call float @llvm.minimum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_minnum_const_max_ninf(float %x) { -; CHECK-LABEL: @test_minnum_const_max_ninf( -; CHECK-NEXT: [[R:%.*]] = call ninf float @llvm.minnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call ninf float @llvm.minnum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_maxnum_const_max_ninf(float %x) { -; CHECK-LABEL: @test_maxnum_const_max_ninf( -; CHECK-NEXT: ret float 0x47EFFFFFE0000000 -; - %r = call ninf float @llvm.maxnum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_maximum_const_max_ninf(float %x) { -; CHECK-LABEL: @test_maximum_const_max_ninf( -; CHECK-NEXT: [[R:%.*]] = call ninf float @llvm.maximum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call ninf float @llvm.maximum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_minimum_const_max_ninf(float %x) { -; CHECK-LABEL: @test_minimum_const_max_ninf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call ninf float @llvm.minimum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_minnum_const_neg_max_ninf(float %x) { -; CHECK-LABEL: @test_minnum_const_neg_max_ninf( -; CHECK-NEXT: ret float 0xC7EFFFFFE0000000 -; - %r = call ninf float @llvm.minnum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_maxnum_const_neg_max_ninf(float %x) { -; CHECK-LABEL: @test_maxnum_const_neg_max_ninf( -; CHECK-NEXT: [[R:%.*]] = call ninf float @llvm.maxnum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call ninf float @llvm.maxnum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_maximum_const_neg_max_ninf(float %x) { -; CHECK-LABEL: @test_maximum_const_neg_max_ninf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call ninf float @llvm.maximum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_minimum_const_neg_max_ninf(float %x) { -; CHECK-LABEL: @test_minimum_const_neg_max_ninf( -; CHECK-NEXT: [[R:%.*]] = call ninf float @llvm.minimum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: ret float [[R]] -; - %r = call ninf float @llvm.minimum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_minnum_const_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_minnum_const_max_nnan_ninf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan ninf float @llvm.minnum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_maxnum_const_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_maxnum_const_max_nnan_ninf( -; CHECK-NEXT: ret float 0x47EFFFFFE0000000 -; - %r = call nnan ninf float @llvm.maxnum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_maximum_const_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_maximum_const_max_nnan_ninf( -; CHECK-NEXT: ret float 0x47EFFFFFE0000000 -; - %r = call nnan ninf float @llvm.maximum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_minimum_const_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_minimum_const_max_nnan_ninf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan ninf float @llvm.minimum.f32(float %x, float 0x47efffffe0000000) - ret float %r -} - -define float @test_minnum_const_neg_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_minnum_const_neg_max_nnan_ninf( -; CHECK-NEXT: ret float 0xC7EFFFFFE0000000 -; - %r = call nnan ninf float @llvm.minnum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_maxnum_const_neg_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_maxnum_const_neg_max_nnan_ninf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan ninf float @llvm.maxnum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_maximum_const_neg_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_maximum_const_neg_max_nnan_ninf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %r = call nnan ninf float @llvm.maximum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -define float @test_minimum_const_neg_max_nnan_ninf(float %x) { -; CHECK-LABEL: @test_minimum_const_neg_max_nnan_ninf( -; CHECK-NEXT: ret float 0xC7EFFFFFE0000000 -; - %r = call nnan ninf float @llvm.minimum.f32(float %x, float 0xc7efffffe0000000) - ret float %r -} - -; From the LangRef for minnum/maxnum: -; "If either operand is a NaN, returns the other non-NaN operand." - -define double @maxnum_nan_op0(double %x) { -; CHECK-LABEL: @maxnum_nan_op0( -; CHECK-NEXT: ret double [[X:%.*]] -; - %r = call double @llvm.maxnum.f64(double 0x7ff8000000000000, double %x) - ret double %r -} - -define double @maxnum_nan_op1(double %x) { -; CHECK-LABEL: @maxnum_nan_op1( -; CHECK-NEXT: ret double [[X:%.*]] -; - %r = call double @llvm.maxnum.f64(double %x, double 0x7ff800000000dead) - ret double %r -} - -define double @minnum_nan_op0(double %x) { -; CHECK-LABEL: @minnum_nan_op0( -; CHECK-NEXT: ret double [[X:%.*]] -; - %r = call double @llvm.minnum.f64(double 0x7ff8000dead00000, double %x) - ret double %r -} - -define double @minnum_nan_op1(double %x) { -; CHECK-LABEL: @minnum_nan_op1( -; CHECK-NEXT: ret double [[X:%.*]] -; - %r = call double @llvm.minnum.f64(double %x, double 0x7ff800dead00dead) - ret double %r -} - -define <2 x double> @maxnum_nan_op0_vec(<2 x double> %x) { -; CHECK-LABEL: @maxnum_nan_op0_vec( -; CHECK-NEXT: ret <2 x double> [[X:%.*]] -; - %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> , <2 x double> %x) - ret <2 x double> %r -} - -define <2 x double> @maxnum_nan_op1_vec(<2 x double> %x) { -; CHECK-LABEL: @maxnum_nan_op1_vec( -; CHECK-NEXT: ret <2 x double> [[X:%.*]] -; - %r = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> ) - ret <2 x double> %r -} - -define <2 x double> @minnum_nan_op0_vec(<2 x double> %x) { -; CHECK-LABEL: @minnum_nan_op0_vec( -; CHECK-NEXT: ret <2 x double> [[X:%.*]] -; - %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> , <2 x double> %x) - ret <2 x double> %r -} - -define <2 x double> @minnum_nan_op1_vec(<2 x double> %x) { -; CHECK-LABEL: @minnum_nan_op1_vec( -; CHECK-NEXT: ret <2 x double> [[X:%.*]] -; - %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> ) - ret <2 x double> %r -} - -define float @maxnum_undef_op1(float %x) { -; CHECK-LABEL: @maxnum_undef_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maxnum.f32(float %x, float undef) - ret float %val -} - -define float @maxnum_poison_op1(float %x) { -; CHECK-LABEL: @maxnum_poison_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maxnum.f32(float %x, float poison) - ret float %val -} - -define float @maxnum_undef_op0(float %x) { -; CHECK-LABEL: @maxnum_undef_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maxnum.f32(float undef, float %x) - ret float %val -} - -define float @maxnum_poison_op0(float %x) { -; CHECK-LABEL: @maxnum_poison_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maxnum.f32(float poison, float %x) - ret float %val -} - -define float @minnum_undef_op1(float %x) { -; CHECK-LABEL: @minnum_undef_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minnum.f32(float %x, float undef) - ret float %val -} - -define float @minnum_poison_op1(float %x) { -; CHECK-LABEL: @minnum_poison_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minnum.f32(float %x, float poison) - ret float %val -} - -define float @minnum_undef_op0(float %x) { -; CHECK-LABEL: @minnum_undef_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minnum.f32(float undef, float %x) - ret float %val -} - -define float @minnum_poison_op0(float %x) { -; CHECK-LABEL: @minnum_poison_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minnum.f32(float poison, float %x) - ret float %val -} - -define float @minnum_undef_undef(float %x) { -; CHECK-LABEL: @minnum_undef_undef( -; CHECK-NEXT: ret float undef -; - %val = call float @llvm.minnum.f32(float undef, float undef) - ret float %val -} - -define float @minnum_poison_undef(float %x) { -; CHECK-LABEL: @minnum_poison_undef( -; CHECK-NEXT: ret float undef -; - %val = call float @llvm.minnum.f32(float poison, float undef) - ret float %val -} - -define float @minnum_undef_poison(float %x) { -; CHECK-LABEL: @minnum_undef_poison( -; CHECK-NEXT: ret float poison -; - %val = call float @llvm.minnum.f32(float undef, float poison) - ret float %val -} - -define float @maxnum_undef_undef(float %x) { -; CHECK-LABEL: @maxnum_undef_undef( -; CHECK-NEXT: ret float undef -; - %val = call float @llvm.maxnum.f32(float undef, float undef) - ret float %val -} - -define float @maxnum_poison_undef(float %x) { -; CHECK-LABEL: @maxnum_poison_undef( -; CHECK-NEXT: ret float undef -; - %val = call float @llvm.maxnum.f32(float poison, float undef) - ret float %val -} - -define float @maxnum_undef_poison(float %x) { -; CHECK-LABEL: @maxnum_undef_poison( -; CHECK-NEXT: ret float poison -; - %val = call float @llvm.maxnum.f32(float undef, float poison) - ret float %val -} - -define float @minnum_same_args(float %x) { -; CHECK-LABEL: @minnum_same_args( -; CHECK-NEXT: ret float [[X:%.*]] -; - %y = call float @llvm.minnum.f32(float %x, float %x) - ret float %y -} - -define float @maxnum_same_args(float %x) { -; CHECK-LABEL: @maxnum_same_args( -; CHECK-NEXT: ret float [[X:%.*]] -; - %y = call float @llvm.maxnum.f32(float %x, float %x) - ret float %y -} - -define float @minnum_x_minnum_x_y(float %x, float %y) { -; CHECK-LABEL: @minnum_x_minnum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minnum.f32(float %x, float %y) - %b = call float @llvm.minnum.f32(float %x, float %a) - ret float %b -} - -define float @minnum_y_minnum_x_y(float %x, float %y) { -; CHECK-LABEL: @minnum_y_minnum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minnum.f32(float %x, float %y) - %b = call float @llvm.minnum.f32(float %y, float %a) - ret float %b -} - -define float @minnum_x_y_minnum_x(float %x, float %y) { -; CHECK-LABEL: @minnum_x_y_minnum_x( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minnum.f32(float %x, float %y) - %b = call float @llvm.minnum.f32(float %a, float %x) - ret float %b -} - -define float @minnum_x_y_minnum_y(float %x, float %y) { -; CHECK-LABEL: @minnum_x_y_minnum_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minnum.f32(float %x, float %y) - %b = call float @llvm.minnum.f32(float %a, float %y) - ret float %b -} - -; negative test - -define float @minnum_z_minnum_x_y(float %x, float %y, float %z) { -; CHECK-LABEL: @minnum_z_minnum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.minnum.f32(float [[Z:%.*]], float [[A]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.minnum.f32(float %x, float %y) - %b = call float @llvm.minnum.f32(float %z, float %a) - ret float %b -} - -; negative test - -define float @minnum_x_y_minnum_z(float %x, float %y, float %z) { -; CHECK-LABEL: @minnum_x_y_minnum_z( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.minnum.f32(float [[A]], float [[Z:%.*]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.minnum.f32(float %x, float %y) - %b = call float @llvm.minnum.f32(float %a, float %z) - ret float %b -} - -; minnum(X, -INF) --> -INF - -define float @minnum_neginf(float %x) { -; CHECK-LABEL: @minnum_neginf( -; CHECK-NEXT: ret float 0xFFF0000000000000 -; - %val = call float @llvm.minnum.f32(float %x, float 0xFFF0000000000000) - ret float %val -} - -define <2 x double> @minnum_neginf_commute_vec(<2 x double> %x) { -; CHECK-LABEL: @minnum_neginf_commute_vec( -; CHECK-NEXT: ret <2 x double> splat (double 0xFFF0000000000000) -; - %r = call <2 x double> @llvm.minnum.v2f64(<2 x double> , <2 x double> %x) - ret <2 x double> %r -} - -; negative test - -define float @minnum_inf(float %x) { -; CHECK-LABEL: @minnum_inf( -; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.minnum.f32(float 0x7FF0000000000000, float [[X:%.*]]) -; CHECK-NEXT: ret float [[VAL]] -; - %val = call float @llvm.minnum.f32(float 0x7FF0000000000000, float %x) - ret float %val -} -define float @maxnum_x_maxnum_x_y(float %x, float %y) { -; CHECK-LABEL: @maxnum_x_maxnum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maxnum.f32(float %x, float %y) - %b = call float @llvm.maxnum.f32(float %x, float %a) - ret float %b -} - -define float @maxnum_y_maxnum_x_y(float %x, float %y) { -; CHECK-LABEL: @maxnum_y_maxnum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maxnum.f32(float %x, float %y) - %b = call float @llvm.maxnum.f32(float %y, float %a) - ret float %b -} - -define float @maxnum_x_y_maxnum_x(float %x, float %y) { -; CHECK-LABEL: @maxnum_x_y_maxnum_x( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maxnum.f32(float %x, float %y) - %b = call float @llvm.maxnum.f32(float %a, float %x) - ret float %b -} - -define float @maxnum_x_y_maxnum_y(float %x, float %y) { -; CHECK-LABEL: @maxnum_x_y_maxnum_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maxnum.f32(float %x, float %y) - %b = call float @llvm.maxnum.f32(float %a, float %y) - ret float %b -} - -; negative test - -define float @maxnum_z_maxnum_x_y(float %x, float %y, float %z) { -; CHECK-LABEL: @maxnum_z_maxnum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.maxnum.f32(float [[Z:%.*]], float [[A]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.maxnum.f32(float %x, float %y) - %b = call float @llvm.maxnum.f32(float %z, float %a) - ret float %b -} - -; negative test - -define float @maxnum_x_y_maxnum_z(float %x, float %y, float %z) { -; CHECK-LABEL: @maxnum_x_y_maxnum_z( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.maxnum.f32(float [[A]], float [[Z:%.*]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.maxnum.f32(float %x, float %y) - %b = call float @llvm.maxnum.f32(float %a, float %z) - ret float %b -} - -; maxnum(X, INF) --> INF - -define <2 x double> @maxnum_inf(<2 x double> %x) { -; CHECK-LABEL: @maxnum_inf( -; CHECK-NEXT: ret <2 x double> splat (double 0x7FF0000000000000) -; - %val = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double>) - ret <2 x double> %val -} - -define float @maxnum_inf_commute(float %x) { -; CHECK-LABEL: @maxnum_inf_commute( -; CHECK-NEXT: ret float 0x7FF0000000000000 -; - %val = call float @llvm.maxnum.f32(float 0x7FF0000000000000, float %x) - ret float %val -} - -; negative test - -define float @maxnum_neginf(float %x) { -; CHECK-LABEL: @maxnum_neginf( -; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.maxnum.f32(float 0xFFF0000000000000, float [[X:%.*]]) -; CHECK-NEXT: ret float [[VAL]] -; - %val = call float @llvm.maxnum.f32(float 0xFFF0000000000000, float %x) - ret float %val -} - -; From the LangRef for minimum/maximum: -; "If either operand is a NaN, returns NaN." - -define double @maximum_nan_op0(double %x) { -; CHECK-LABEL: @maximum_nan_op0( -; CHECK-NEXT: ret double 0x7FF8000000000000 -; - %r = call double @llvm.maximum.f64(double 0x7ff8000000000000, double %x) - ret double %r -} - -define double @maximum_nan_op1(double %x) { -; CHECK-LABEL: @maximum_nan_op1( -; CHECK-NEXT: ret double 0x7FF800000000DEAD -; - %r = call double @llvm.maximum.f64(double %x, double 0x7ff800000000dead) - ret double %r -} - -define double @minimum_nan_op0(double %x) { -; CHECK-LABEL: @minimum_nan_op0( -; CHECK-NEXT: ret double 0x7FF8000DEAD00000 -; - %r = call double @llvm.minimum.f64(double 0x7ff8000dead00000, double %x) - ret double %r -} - -define double @minimum_nan_op1(double %x) { -; CHECK-LABEL: @minimum_nan_op1( -; CHECK-NEXT: ret double 0x7FF800DEAD00DEAD -; - %r = call double @llvm.minimum.f64(double %x, double 0x7ff800dead00dead) - ret double %r -} - -define <2 x double> @maximum_nan_op0_vec_partial_poison(<2 x double> %x) { -; CHECK-LABEL: @maximum_nan_op0_vec_partial_poison( -; CHECK-NEXT: ret <2 x double> -; - %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> , <2 x double> %x) - ret <2 x double> %r -} - -define <2 x double> @maximum_nan_op1_vec_partial_poison(<2 x double> %x) { -; CHECK-LABEL: @maximum_nan_op1_vec_partial_poison( -; CHECK-NEXT: ret <2 x double> -; - %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double> ) - ret <2 x double> %r -} - -define <2 x double> @maximum_nan_op1_vec(<2 x double> %x) { -; CHECK-LABEL: @maximum_nan_op1_vec( -; CHECK-NEXT: ret <2 x double> -; - %r = call <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double> ) - ret <2 x double> %r -} - -define <2 x double> @minimum_nan_op0_vec_partial_poison(<2 x double> %x) { -; CHECK-LABEL: @minimum_nan_op0_vec_partial_poison( -; CHECK-NEXT: ret <2 x double> -; - %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> , <2 x double> %x) - ret <2 x double> %r -} - -define <2 x double> @minimum_nan_op1_vec_partial_poison(<2 x double> %x) { -; CHECK-LABEL: @minimum_nan_op1_vec_partial_poison( -; CHECK-NEXT: ret <2 x double> -; - %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> %x, <2 x double> ) - ret <2 x double> %r -} - -define <2 x double> @minimum_nan_op1_vec(<2 x double> %x) { -; CHECK-LABEL: @minimum_nan_op1_vec( -; CHECK-NEXT: ret <2 x double> splat (double 0x7FF800DEAD00DEAD) -; - %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> %x, <2 x double> ) - ret <2 x double> %r -} - -define float @maximum_undef_op1(float %x) { -; CHECK-LABEL: @maximum_undef_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maximum.f32(float %x, float undef) - ret float %val -} - -define float @maximum_poison_op1(float %x) { -; CHECK-LABEL: @maximum_poison_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maximum.f32(float %x, float poison) - ret float %val -} - -define float @maximum_undef_op0(float %x) { -; CHECK-LABEL: @maximum_undef_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maximum.f32(float undef, float %x) - ret float %val -} - -define float @maximum_poison_op0(float %x) { -; CHECK-LABEL: @maximum_poison_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.maximum.f32(float poison, float %x) - ret float %val -} - -define float @minimum_undef_op1(float %x) { -; CHECK-LABEL: @minimum_undef_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minimum.f32(float %x, float undef) - ret float %val -} - -define float @minimum_poison_op1(float %x) { -; CHECK-LABEL: @minimum_poison_op1( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minimum.f32(float %x, float poison) - ret float %val -} - -define float @minimum_undef_op0(float %x) { -; CHECK-LABEL: @minimum_undef_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minimum.f32(float undef, float %x) - ret float %val -} - -define float @minimum_poison_op0(float %x) { -; CHECK-LABEL: @minimum_poison_op0( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minimum.f32(float poison, float %x) - ret float %val -} - -define float @minimum_undef_undef(float %x) { -; CHECK-LABEL: @minimum_undef_undef( -; CHECK-NEXT: ret float undef -; - %val = call float @llvm.minimum.f32(float undef, float undef) - ret float %val -} - -define float @maximum_undef_undef(float %x) { -; CHECK-LABEL: @maximum_undef_undef( -; CHECK-NEXT: ret float undef -; - %val = call float @llvm.maximum.f32(float undef, float undef) - ret float %val -} - -define float @minimum_same_args(float %x) { -; CHECK-LABEL: @minimum_same_args( -; CHECK-NEXT: ret float [[X:%.*]] -; - %y = call float @llvm.minimum.f32(float %x, float %x) - ret float %y -} - -define float @maximum_same_args(float %x) { -; CHECK-LABEL: @maximum_same_args( -; CHECK-NEXT: ret float [[X:%.*]] -; - %y = call float @llvm.maximum.f32(float %x, float %x) - ret float %y -} - -define float @minimum_x_minimum_x_y(float %x, float %y) { -; CHECK-LABEL: @minimum_x_minimum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minimum.f32(float %x, float %y) - %b = call float @llvm.minimum.f32(float %x, float %a) - ret float %b -} - -define float @minimum_y_minimum_x_y(float %x, float %y) { -; CHECK-LABEL: @minimum_y_minimum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minimum.f32(float %x, float %y) - %b = call float @llvm.minimum.f32(float %y, float %a) - ret float %b -} - -define float @minimum_x_y_minimum_x(float %x, float %y) { -; CHECK-LABEL: @minimum_x_y_minimum_x( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minimum.f32(float %x, float %y) - %b = call float @llvm.minimum.f32(float %a, float %x) - ret float %b -} - -define float @minimum_x_y_minimum_y(float %x, float %y) { -; CHECK-LABEL: @minimum_x_y_minimum_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.minimum.f32(float %x, float %y) - %b = call float @llvm.minimum.f32(float %a, float %y) - ret float %b -} - -; negative test - -define float @minimum_z_minimum_x_y(float %x, float %y, float %z) { -; CHECK-LABEL: @minimum_z_minimum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.minimum.f32(float [[Z:%.*]], float [[A]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.minimum.f32(float %x, float %y) - %b = call float @llvm.minimum.f32(float %z, float %a) - ret float %b -} - -; negative test - -define float @minimum_x_y_minimum_z(float %x, float %y, float %z) { -; CHECK-LABEL: @minimum_x_y_minimum_z( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.minimum.f32(float [[A]], float [[Z:%.*]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.minimum.f32(float %x, float %y) - %b = call float @llvm.minimum.f32(float %a, float %z) - ret float %b -} - -define float @maximum_x_maximum_x_y(float %x, float %y) { -; CHECK-LABEL: @maximum_x_maximum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maximum.f32(float %x, float %y) - %b = call float @llvm.maximum.f32(float %x, float %a) - ret float %b -} - -define float @maximum_y_maximum_x_y(float %x, float %y) { -; CHECK-LABEL: @maximum_y_maximum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maximum.f32(float %x, float %y) - %b = call float @llvm.maximum.f32(float %y, float %a) - ret float %b -} - -define float @maximum_x_y_maximum_x(float %x, float %y) { -; CHECK-LABEL: @maximum_x_y_maximum_x( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maximum.f32(float %x, float %y) - %b = call float @llvm.maximum.f32(float %a, float %x) - ret float %b -} - -define float @maximum_x_y_maximum_y(float %x, float %y) { -; CHECK-LABEL: @maximum_x_y_maximum_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[A]] -; - %a = call float @llvm.maximum.f32(float %x, float %y) - %b = call float @llvm.maximum.f32(float %a, float %y) - ret float %b -} - -; negative test - -define float @maximum_z_maximum_x_y(float %x, float %y, float %z) { -; CHECK-LABEL: @maximum_z_maximum_x_y( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.maximum.f32(float [[Z:%.*]], float [[A]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.maximum.f32(float %x, float %y) - %b = call float @llvm.maximum.f32(float %z, float %a) - ret float %b -} - -; negative test - -define float @maximum_x_y_maximum_z(float %x, float %y, float %z) { -; CHECK-LABEL: @maximum_x_y_maximum_z( -; CHECK-NEXT: [[A:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[B:%.*]] = call float @llvm.maximum.f32(float [[A]], float [[Z:%.*]]) -; CHECK-NEXT: ret float [[B]] -; - %a = call float @llvm.maximum.f32(float %x, float %y) - %b = call float @llvm.maximum.f32(float %a, float %z) - ret float %b -} - -; negative test - minimum(X, -INF) != -INF because X could be NaN - -define float @minimum_neginf(float %x) { -; CHECK-LABEL: @minimum_neginf( -; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float 0xFFF0000000000000) -; CHECK-NEXT: ret float [[VAL]] -; - %val = call float @llvm.minimum.f32(float %x, float 0xFFF0000000000000) - ret float %val -} - -; negative test - minimum(-INF, X) != -INF because X could be NaN - -define <2 x double> @minimum_neginf_commute_vec(<2 x double> %x) { -; CHECK-LABEL: @minimum_neginf_commute_vec( -; CHECK-NEXT: [[R:%.*]] = call <2 x double> @llvm.minimum.v2f64(<2 x double> splat (double 0xFFF0000000000000), <2 x double> [[X:%.*]]) -; CHECK-NEXT: ret <2 x double> [[R]] -; - %r = call <2 x double> @llvm.minimum.v2f64(<2 x double> , <2 x double> %x) - ret <2 x double> %r -} - -; TODO: minimum(INF, X) --> X - -define float @minimum_inf(float %x) { -; CHECK-LABEL: @minimum_inf( -; CHECK-NEXT: ret float [[X:%.*]] -; - %val = call float @llvm.minimum.f32(float 0x7FF0000000000000, float %x) - ret float %val -} - -; negative test - maximum(X, INF) != INF because X could be NaN - -define <2 x double> @maximum_inf(<2 x double> %x) { -; CHECK-LABEL: @maximum_inf( -; CHECK-NEXT: [[VAL:%.*]] = call <2 x double> @llvm.maximum.v2f64(<2 x double> [[X:%.*]], <2 x double> splat (double 0x7FF0000000000000)) -; CHECK-NEXT: ret <2 x double> [[VAL]] -; - %val = call <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double>) - ret <2 x double> %val -} - -; negative test - maximum(INF, X) != INF because X could be NaN - -define float @maximum_inf_commute(float %x) { -; CHECK-LABEL: @maximum_inf_commute( -; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.maximum.f32(float 0x7FF0000000000000, float [[X:%.*]]) -; CHECK-NEXT: ret float [[VAL]] -; - %val = call float @llvm.maximum.f32(float 0x7FF0000000000000, float %x) - ret float %val -} - -define float @maximum_maximum_minimum(float %x, float %y) { -; CHECK-LABEL: @maximum_maximum_minimum( -; CHECK-NEXT: [[MAX:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[MAX]] -; - %max = call float @llvm.maximum.f32(float %x, float %y) - %min = call float @llvm.minimum.f32(float %x, float %y) - %val = call float @llvm.maximum.f32(float %max, float %min) - ret float %val -} - -define double @maximum_minimum_maximum(double %x, double %y) { -; CHECK-LABEL: @maximum_minimum_maximum( -; CHECK-NEXT: [[MAX:%.*]] = call double @llvm.maximum.f64(double [[X:%.*]], double [[Y:%.*]]) -; CHECK-NEXT: ret double [[MAX]] -; - %max = call double @llvm.maximum.f64(double %x, double %y) - %min = call double @llvm.minimum.f64(double %x, double %y) - %val = call double @llvm.maximum.f64(double %min, double %max) - ret double %val -} - -define float @maximum_minimum_minimum(float %x, float %y) { -; CHECK-LABEL: @maximum_minimum_minimum( -; CHECK-NEXT: [[MIN1:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[MIN2:%.*]] = call float @llvm.minimum.f32(float [[X]], float [[Y]]) -; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.maximum.f32(float [[MIN1]], float [[MIN2]]) -; CHECK-NEXT: ret float [[VAL]] -; - %min1 = call float @llvm.minimum.f32(float %x, float %y) - %min2 = call float @llvm.minimum.f32(float %x, float %y) - %val = call float @llvm.maximum.f32(float %min1, float %min2) - ret float %val -} - -define half @maximum_maximum_maximum(half %x, half %y) { -; CHECK-LABEL: @maximum_maximum_maximum( -; CHECK-NEXT: [[MAX1:%.*]] = call half @llvm.maximum.f16(half [[X:%.*]], half [[Y:%.*]]) -; CHECK-NEXT: ret half [[MAX1]] -; - %max1 = call half @llvm.maximum.f16(half %x, half %y) - %max2 = call half @llvm.maximum.f16(half %x, half %y) - %val = call half @llvm.maximum.f16(half %max1, half %max2) - ret half %val -} - -define <2 x float> @minimum_maximum_minimum(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @minimum_maximum_minimum( -; CHECK-NEXT: [[MIN:%.*]] = call <2 x float> @llvm.minimum.v2f32(<2 x float> [[X:%.*]], <2 x float> [[Y:%.*]]) -; CHECK-NEXT: ret <2 x float> [[MIN]] -; - %max = call <2 x float> @llvm.maximum.v2f32(<2 x float> %x, <2 x float> %y) - %min = call <2 x float> @llvm.minimum.v2f32(<2 x float> %x, <2 x float> %y) - %val = call <2 x float> @llvm.minimum.v2f32(<2 x float> %max, <2 x float> %min) - ret <2 x float> %val -} - -define <2 x double> @minimum_minimum_maximum(<2 x double> %x, <2 x double> %y) { -; CHECK-LABEL: @minimum_minimum_maximum( -; CHECK-NEXT: [[MIN:%.*]] = call <2 x double> @llvm.minimum.v2f64(<2 x double> [[X:%.*]], <2 x double> [[Y:%.*]]) -; CHECK-NEXT: ret <2 x double> [[MIN]] -; - %max = call <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double> %y) - %min = call <2 x double> @llvm.minimum.v2f64(<2 x double> %x, <2 x double> %y) - %val = call <2 x double> @llvm.minimum.v2f64(<2 x double> %min, <2 x double> %max) - ret <2 x double> %val -} - -define float @minimum_maximum_maximum(float %x, float %y) { -; CHECK-LABEL: @minimum_maximum_maximum( -; CHECK-NEXT: [[MAX1:%.*]] = call float @llvm.maximum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[MAX2:%.*]] = call float @llvm.maximum.f32(float [[X]], float [[Y]]) -; CHECK-NEXT: [[VAL:%.*]] = call float @llvm.minimum.f32(float [[MAX1]], float [[MAX2]]) -; CHECK-NEXT: ret float [[VAL]] -; - %max1 = call float @llvm.maximum.f32(float %x, float %y) - %max2 = call float @llvm.maximum.f32(float %x, float %y) - %val = call float @llvm.minimum.f32(float %max1, float %max2) - ret float %val -} - -define float @minimum_minimum_minimum(float %x, float %y) { -; CHECK-LABEL: @minimum_minimum_minimum( -; CHECK-NEXT: [[MIN1:%.*]] = call float @llvm.minimum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[MIN1]] -; - %min1 = call float @llvm.minimum.f32(float %x, float %y) - %min2 = call float @llvm.minimum.f32(float %x, float %y) - %val = call float @llvm.minimum.f32(float %min1, float %min2) - ret float %val -} - -define double @maxnum_maxnum_minnum(double %x, double %y) { -; CHECK-LABEL: @maxnum_maxnum_minnum( -; CHECK-NEXT: [[MAX:%.*]] = call double @llvm.maxnum.f64(double [[X:%.*]], double [[Y:%.*]]) -; CHECK-NEXT: ret double [[MAX]] -; - %max = call double @llvm.maxnum.f64(double %x, double %y) - %min = call double @llvm.minnum.f64(double %x, double %y) - %val = call double @llvm.maxnum.f64(double %max, double %min) - ret double %val -} - -define <2 x float> @maxnum_minnum_maxnum(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @maxnum_minnum_maxnum( -; CHECK-NEXT: [[MAX:%.*]] = call <2 x float> @llvm.maxnum.v2f32(<2 x float> [[X:%.*]], <2 x float> [[Y:%.*]]) -; CHECK-NEXT: ret <2 x float> [[MAX]] -; - %max = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) - %min = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) - %val = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %min, <2 x float> %max) - ret <2 x float> %val -} - -define <2 x double> @maxnum_minnum_minmum(<2 x double> %x, <2 x double> %y) { -; CHECK-LABEL: @maxnum_minnum_minmum( -; CHECK-NEXT: [[MIN1:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[X:%.*]], <2 x double> [[Y:%.*]]) -; CHECK-NEXT: [[MIN2:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[X]], <2 x double> [[Y]]) -; CHECK-NEXT: [[VAL:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> [[MIN1]], <2 x double> [[MIN2]]) -; CHECK-NEXT: ret <2 x double> [[VAL]] -; - %min1 = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) - %min2 = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) - %val = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %min1, <2 x double> %min2) - ret <2 x double> %val -} - -define float @maxnum_maxnum_maxnum(float %x, float %y) { -; CHECK-LABEL: @maxnum_maxnum_maxnum( -; CHECK-NEXT: [[MAX1:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[MAX1]] -; - %max1 = call float @llvm.maxnum.f32(float %x, float %y) - %max2 = call float @llvm.maxnum.f32(float %x, float %y) - %val = call float @llvm.maxnum.f32(float %max1, float %max2) - ret float %val -} - -define double @minnum_maxnum_minnum(double %x, double %y) { -; CHECK-LABEL: @minnum_maxnum_minnum( -; CHECK-NEXT: [[MIN:%.*]] = call double @llvm.minnum.f64(double [[X:%.*]], double [[Y:%.*]]) -; CHECK-NEXT: ret double [[MIN]] -; - %max = call double @llvm.maxnum.f64(double %x, double %y) - %min = call double @llvm.minnum.f64(double %x, double %y) - %val = call double @llvm.minnum.f64(double %max, double %min) - ret double %val -} - -define float @minnum_minnum_maxnum(float %x, float %y) { -; CHECK-LABEL: @minnum_minnum_maxnum( -; CHECK-NEXT: [[MIN:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: ret float [[MIN]] -; - %max = call float @llvm.maxnum.f32(float %x, float %y) - %min = call float @llvm.minnum.f32(float %x, float %y) - %val = call float @llvm.minnum.f32(float %min, float %max) - ret float %val -} - -define <2 x float> @minnum_maxnum_maxnum(<2 x float> %x, <2 x float> %y) { -; CHECK-LABEL: @minnum_maxnum_maxnum( -; CHECK-NEXT: [[MAX1:%.*]] = call <2 x float> @llvm.maxnum.v2f32(<2 x float> [[X:%.*]], <2 x float> [[Y:%.*]]) -; CHECK-NEXT: [[MAX2:%.*]] = call <2 x float> @llvm.maxnum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) -; CHECK-NEXT: [[VAL:%.*]] = call <2 x float> @llvm.minnum.v2f32(<2 x float> [[MAX1]], <2 x float> [[MAX2]]) -; CHECK-NEXT: ret <2 x float> [[VAL]] -; - %max1 = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) - %max2 = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) - %val = call <2 x float> @llvm.minnum.v2f32(<2 x float> %max1, <2 x float> %max2) - ret <2 x float> %val -} - -define <2 x double> @minnum_minnum_minmum(<2 x double> %x, <2 x double> %y) { -; CHECK-LABEL: @minnum_minnum_minmum( -; CHECK-NEXT: [[MIN1:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> [[X:%.*]], <2 x double> [[Y:%.*]]) -; CHECK-NEXT: ret <2 x double> [[MIN1]] -; - %min1 = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) - %min2 = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) - %val = call <2 x double> @llvm.minnum.v2f64(<2 x double> %min1, <2 x double> %min2) - ret <2 x double> %val +;############################################################### +;# NaN Tests # +;############################################################### +; minnum(X, qnan) -> X +; maxnum(X, qnan) -> X +; TODO: minnum(X, snan) -> qnan (currently we treat SNaN the same as QNaN) +; TODO: maxnum(X, snan) -> qnan (currently we treat SNaN the same as QNaN) +; minimum(X, nan) -> qnan +; maximum(X, nan) -> qnan +; TODO: minimumnum(X, nan) -> X +; TODO: maximumnum(X, nan) -> X + +define void @minmax_qnan_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_qnan_f32( +; CHECK-NEXT: store float [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FFF000000000000, ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FFF000000000000, ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FFF000000000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x7FFF000000000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float 0x7FFF000000000000) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float 0x7FFF000000000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float 0x7FFF000000000000) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float 0x7FFF000000000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float 0x7FFF000000000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float 0x7FFF000000000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; TODO currently snan is treated the same as qnan, but maxnum/minnum should really return qnan for these cases, not X +define void @minmax_snan_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_snan_f32( +; CHECK-NEXT: store float [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FFC000000000000, ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FFC000000000000, ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF4000000000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x7FF4000000000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float 0x7FF4000000000000) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float 0x7FF4000000000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float 0x7FF4000000000000) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float 0x7FF4000000000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float 0x7FF4000000000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float 0x7FF4000000000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +define void @minmax_qnan_nxv2f64_op0( %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_qnan_nxv2f64_op0( +; CHECK-NEXT: store [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store splat (double 0x7FF8000DEAD00000), ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store splat (double 0x7FF8000DEAD00000), ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call @llvm.minimumnum.nxv2f64( splat (double 0x7FF8000DEAD00000), [[X]]) +; CHECK-NEXT: store [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call @llvm.maximumnum.nxv2f64( splat (double 0x7FF8000DEAD00000), [[X]]) +; CHECK-NEXT: store [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call @llvm.minnum.nxv2f64( splat (double 0x7FF8000DEAD00000), %x) + store %minnum, ptr %minnum_res + %maxnum = call @llvm.maxnum.nxv2f64( splat (double 0x7FF8000DEAD00000), %x) + store %maxnum, ptr %maxnum_res + + %minimum = call @llvm.minimum.nxv2f64( splat (double 0x7FF8000DEAD00000), %x) + store %minimum, ptr %minimum_res + %maximum = call @llvm.maximum.nxv2f64( splat (double 0x7FF8000DEAD00000), %x) + store %maximum, ptr %maximum_res + + %minimumnum = call @llvm.minimumnum.nxv2f64( splat (double 0x7FF8000DEAD00000), %x) + store %minimumnum, ptr %minimumnum_res + %maximumnum = call @llvm.maximumnum.nxv2f64( splat (double 0x7FF8000DEAD00000), %x) + store %maximumnum, ptr %maximumnum_res + ret void +} + +; TODO currently snan is treated the same as qnan, but maxnum/minnum should really return qnan for these cases, not X +define void @minmax_snan_nxv2f64_op1( %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_snan_nxv2f64_op1( +; CHECK-NEXT: store [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store splat (double 0x7FFC00DEAD00DEAD), ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store splat (double 0x7FFC00DEAD00DEAD), ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call @llvm.minimumnum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), [[X]]) +; CHECK-NEXT: store [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call @llvm.maximumnum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), [[X]]) +; CHECK-NEXT: store [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call @llvm.minnum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), %x) + store %minnum, ptr %minnum_res + %maxnum = call @llvm.maxnum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), %x) + store %maxnum, ptr %maxnum_res + + %minimum = call @llvm.minimum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), %x) + store %minimum, ptr %minimum_res + %maximum = call @llvm.maximum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), %x) + store %maximum, ptr %maximum_res + + %minimumnum = call @llvm.minimumnum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), %x) + store %minimumnum, ptr %minimumnum_res + %maximumnum = call @llvm.maximumnum.nxv2f64( splat (double 0x7FF400DEAD00DEAD), %x) + store %maximumnum, ptr %maximumnum_res + ret void +} + +; TODO Currently, we treat SNaN and QNaN the same. However, for maxnum and minnum, we should not optimize this, as we should return <%x0, QNaN> instead of <%x0, %x1> +define void @minmax_mixed_snan_qnan_v2f64(<2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_mixed_snan_qnan_v2f64( +; CHECK-NEXT: store <2 x double> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> , ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> , ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> , <2 x double> [[X]]) +; CHECK-NEXT: store <2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> , <2 x double> [[X]]) +; CHECK-NEXT: store <2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call <2 x double> @llvm.minnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %minnum, ptr %minnum_res + %maxnum = call <2 x double> @llvm.maxnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %maxnum, ptr %maxnum_res + + %minimum = call <2 x double> @llvm.minimum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %minimum, ptr %minimum_res + %maximum = call <2 x double> @llvm.maximum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %maximum, ptr %maximum_res + + %minimumnum = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %minimumnum, ptr %minimumnum_res + %maximumnum = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %maximumnum, ptr %maximumnum_res + ret void +} + +; Test with vector variants (v2f64) with NaN and poison +; Use the poison element for flexibility to choose to return either the constant arg or the other arg X +define void @minmax_mixed_qnan_poison_v2f64(<2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_mixed_qnan_poison_v2f64( +; CHECK-NEXT: store <2 x double> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> , ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> , ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> , <2 x double> [[X]]) +; CHECK-NEXT: store <2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> , <2 x double> [[X]]) +; CHECK-NEXT: store <2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call <2 x double> @llvm.minnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %minnum, ptr %minnum_res + %maxnum = call <2 x double> @llvm.maxnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %maxnum, ptr %maxnum_res + + %minimum = call <2 x double> @llvm.minimum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %minimum, ptr %minimum_res + %maximum = call <2 x double> @llvm.maximum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %maximum, ptr %maximum_res + + %minimumnum = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %minimumnum, ptr %minimumnum_res + %maximumnum = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> , <2 x double> %x) + store <2 x double> %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Poison Tests # +;############################################################### +define void @minmax_poison_op0_f16(half %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_poison_op0_f16( +; CHECK-NEXT: store half [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[X]], ptr [[MAXNUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[X]], ptr [[MINIMUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[X]], ptr [[MAXIMUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call half @llvm.minimumnum.f16(half poison, half [[X]]) +; CHECK-NEXT: store half [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call half @llvm.maximumnum.f16(half poison, half [[X]]) +; CHECK-NEXT: store half [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: ret void +; + %minnum = call half @llvm.minnum.f16(half poison, half %x) + store half %minnum, ptr %minnum_res + %maxnum = call half @llvm.maxnum.f16(half poison, half %x) + store half %maxnum, ptr %maxnum_res + + %minimum = call half @llvm.minimum.f16(half poison, half %x) + store half %minimum, ptr %minimum_res + %maximum = call half @llvm.maximum.f16(half poison, half %x) + store half %maximum, ptr %maximum_res + + %minimumnum = call half @llvm.minimumnum.f16(half poison, half %x) + store half %minimumnum, ptr %minimumnum_res + %maximumnum = call half @llvm.maximumnum.f16(half poison, half %x) + store half %maximumnum, ptr %maximumnum_res + ret void +} + +define void @minmax_poison_op1_nxv2f64( %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_poison_op1_nxv2f64( +; CHECK-NEXT: store [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store [[X]], ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store [[X]], ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan @llvm.minimumnum.nxv2f64( [[X]], poison) +; CHECK-NEXT: store [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan @llvm.maximumnum.nxv2f64( [[X]], poison) +; CHECK-NEXT: store [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call nnan @llvm.minnum.nxv2f64( %x, poison) + store %minnum, ptr %minnum_res + %maxnum = call nnan @llvm.maxnum.nxv2f64( %x, poison) + store %maxnum, ptr %maxnum_res + + %minimum = call nnan @llvm.minimum.nxv2f64( %x, poison) + store %minimum, ptr %minimum_res + %maximum = call nnan @llvm.maximum.nxv2f64( %x, poison) + store %maximum, ptr %maximum_res + + %minimumnum = call nnan @llvm.minimumnum.nxv2f64( %x, poison) + store %minimumnum, ptr %minimumnum_res + %maximumnum = call nnan @llvm.maximumnum.nxv2f64( %x, poison) + store %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Positive Infinity Tests # +;############################################################### +; maxnum(X, +inf) -> +inf (ignoring SNaN -> QNaN propagation) +; minnum(X, +inf) -> X if nnan (ignoring NaN quieting) +; maximum(X, +inf) -> +inf if nnan +; minimum(X, +inf) -> X (ignoring NaN quieting) +; TODO: maximumnum(X, +inf) -> +inf +; TODO: minimumnum(X, +inf) -> X if nnan (ignoring NaN quieting) + +; Can only optimize maxnum and minimum without the nnan flag +define void @minmax_pos_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_pos_inf_f32( +; CHECK-NEXT: [[MINNUM:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0x7FF0000000000000) +; CHECK-NEXT: store float [[MINNUM]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FF0000000000000, ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUM:%.*]] = call float @llvm.maximum.f32(float [[X]], float 0x7FF0000000000000) +; CHECK-NEXT: store float [[MAXIMUM]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF0000000000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x7FF0000000000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float 0x7FF0000000000000) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float 0x7FF0000000000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float 0x7FF0000000000000) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float 0x7FF0000000000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float 0x7FF0000000000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float 0x7FF0000000000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; Can optimize all minmax variants if the nnan flag is set +; TODO maximumnum/minimumnum +define void @minmax_pos_inf_nnan_v2f32(<2 x float> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_pos_inf_nnan_v2f32( +; CHECK-NEXT: store <2 x float> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> splat (float 0x7FF0000000000000), ptr [[MAXNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> [[X]], ptr [[MINIMUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> splat (float 0x7FF0000000000000), ptr [[MAXIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan <2 x float> @llvm.minimumnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> [[X]]) +; CHECK-NEXT: store <2 x float> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan <2 x float> @llvm.maximumnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> [[X]]) +; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: ret void +; + %minnum = call nnan <2 x float> @llvm.minnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) + store <2 x float> %minnum, ptr %minnum_res + %maxnum = call nnan <2 x float> @llvm.maxnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) + store <2 x float> %maxnum, ptr %maxnum_res + + %minimum = call nnan <2 x float> @llvm.minimum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) + store <2 x float> %minimum, ptr %minimum_res + %maximum = call nnan <2 x float> @llvm.maximum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) + store <2 x float> %maximum, ptr %maximum_res + + %minimumnum = call nnan <2 x float> @llvm.minimumnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) + store <2 x float> %minimumnum, ptr %minimumnum_res + %maximumnum = call nnan <2 x float> @llvm.maximumnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) + store <2 x float> %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Negative Infinity Tests # +;############################################################### +; minnum(X, -inf) -> -inf (Ignoring SNaN -> QNaN propagation) +; maxnum(X, -inf) -> X if nnan +; minimum(X, -inf) -> -inf if nnan +; maximum(X, -inf) -> X (Ignoring NaN quieting) +; TODO: minimumnum(X, -inf) -> -inf +; TODO: maximumnum(X, -inf) -> X if nnan + +; Can only optimize minnum and maximum without the nnan flag +define void @minmax_neg_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_neg_inf_f32( +; CHECK-NEXT: store float 0xFFF0000000000000, ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXNUM:%.*]] = call float @llvm.maxnum.f32(float [[X:%.*]], float 0xFFF0000000000000) +; CHECK-NEXT: store float [[MAXNUM]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUM:%.*]] = call float @llvm.minimum.f32(float [[X]], float 0xFFF0000000000000) +; CHECK-NEXT: store float [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0xFFF0000000000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0xFFF0000000000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float 0xFFF0000000000000) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float 0xFFF0000000000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float 0xFFF0000000000000) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float 0xFFF0000000000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float 0xFFF0000000000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float 0xFFF0000000000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; Can optimize all minmax variants if the nnan flag is set +; TODO maximumnum/minimumnum +define void @minmax_neg_inf_nnan_v2f64(<2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_neg_inf_nnan_v2f64( +; CHECK-NEXT: store <2 x double> splat (double 0xFFF0000000000000), ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X:%.*]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> splat (double 0xFFF0000000000000), ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[X]], <2 x double> splat (double 0xFFF0000000000000)) +; CHECK-NEXT: store <2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[X]], <2 x double> splat (double 0xFFF0000000000000)) +; CHECK-NEXT: store <2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) + store <2 x double> %minnum, ptr %minnum_res + %maxnum = call nnan <2 x double> @llvm.maxnum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) + store <2 x double> %maxnum, ptr %maxnum_res + + %minimum = call nnan <2 x double> @llvm.minimum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) + store <2 x double> %minimum, ptr %minimum_res + %maximum = call nnan <2 x double> @llvm.maximum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) + store <2 x double> %maximum, ptr %maximum_res + + %minimumnum = call nnan <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) + store <2 x double> %minimumnum, ptr %minimumnum_res + %maximumnum = call nnan <2 x double> @llvm.maximumnum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) + store <2 x double> %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Largest Positive Float Constant Tests # +;############################################################### +; maxnum(X, +largest) -> +largest if ninf (ignoring SNaN -> QNaN propagation) +; minnum(X, +largest) -> X if ninf && nnan +; maximum(X, +largest) -> +largest if ninf && nnan +; minimum(X, +largest) -> X if ninf (ignoring quieting of sNaNs) +; TODO: maximumnum(X, +largest) -> +largest if ninf && nnan +; TODO: minimumnum(X, +largest) -> X if ninf && nnan + +; None of these should be optimized away without the nnan/ninf flags +define void @minmax_largest_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_largest_f32( +; CHECK-NEXT: [[MINNUM:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MINNUM]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXNUM:%.*]] = call float @llvm.maxnum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXNUM]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUM:%.*]] = call float @llvm.minimum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUM:%.*]] = call float @llvm.maximum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUM]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float 0x47EFFFFFE0000000) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float 0x47EFFFFFE0000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; We can optimize maxnum and minimum if we know ninf is set +define void @minmax_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_largest_f32_ninf( +; CHECK-NEXT: [[MINNUM:%.*]] = call ninf float @llvm.minnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MINNUM]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x47EFFFFFE0000000, ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUM:%.*]] = call ninf float @llvm.maximum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUM]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call ninf float @llvm.minimumnum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call ninf float @llvm.maximumnum.f32(float [[X]], float 0x47EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call ninf float @llvm.minnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %minnum, ptr %minnum_res + %maxnum = call ninf float @llvm.maxnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call ninf float @llvm.minimum.f32(float %x, float 0x47EFFFFFE0000000) + store float %minimum, ptr %minimum_res + %maximum = call ninf float @llvm.maximum.f32(float %x, float 0x47EFFFFFE0000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call ninf float @llvm.minimumnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call ninf float @llvm.maximumnum.f32(float %x, float 0x47EFFFFFE0000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; All can be optimized if both the ninf and nnan flags are set (ignoring SNaN propagation in minnum/maxnum) +; TODO maximumnum/minimumnum +define void @minmax_largest_v2f32_ninf_nnan(<2 x float> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_largest_v2f32_ninf_nnan( +; CHECK-NEXT: store <2 x float> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> splat (float 0x47EFFFFFE0000000), ptr [[MAXNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> [[X]], ptr [[MINIMUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> splat (float 0x47EFFFFFE0000000), ptr [[MAXIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan ninf <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[X]], <2 x float> splat (float 0x47EFFFFFE0000000)) +; CHECK-NEXT: store <2 x float> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan ninf <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[X]], <2 x float> splat (float 0x47EFFFFFE0000000)) +; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: ret void +; + %minnum = call ninf nnan <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) + store <2 x float> %minnum, ptr %minnum_res + %maxnum = call ninf nnan <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) + store <2 x float> %maxnum, ptr %maxnum_res + + %minimum = call ninf nnan <2 x float> @llvm.minimum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) + store <2 x float> %minimum, ptr %minimum_res + %maximum = call ninf nnan <2 x float> @llvm.maximum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) + store <2 x float> %maximum, ptr %maximum_res + + %minimumnum = call ninf nnan <2 x float> @llvm.minimumnum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) + store <2 x float> %minimumnum, ptr %minimumnum_res + %maximumnum = call ninf nnan <2 x float> @llvm.maximumnum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) + store <2 x float> %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Largest Negative Float Constant Tests # +;############################################################### +; maxnum(X, -largest) -> X if ninf && nnan +; minnum(X, -largest) -> -largest if ninf (ignoring SNaN -> QNaN propagation) +; maximum(X, -largest) -> X if ninf (ignoring quieting of sNaNs) +; minimum(X, -largest) -> -largest if ninf && nnan +; TODO: maximumnum(X, -largest) -> X if ninf && nnan +; TODO: minimumnum(X, -largest) -> -largest if ninf + +; None of these should be optimized away without the nnan/ninf flags +define void @minmax_neg_largest_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_neg_largest_f32( +; CHECK-NEXT: [[MINNUM:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MINNUM]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXNUM:%.*]] = call float @llvm.maxnum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXNUM]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUM:%.*]] = call float @llvm.minimum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUM:%.*]] = call float @llvm.maximum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUM]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; We can optimize minnum and maximum if we know ninf is set +define void @minmax_neg_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_neg_largest_f32_ninf( +; CHECK-NEXT: store float 0xC7EFFFFFE0000000, ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXNUM:%.*]] = call ninf float @llvm.maxnum.f32(float [[X:%.*]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXNUM]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUM:%.*]] = call ninf float @llvm.minimum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call ninf float @llvm.minimumnum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call ninf float @llvm.maximumnum.f32(float [[X]], float 0xC7EFFFFFE0000000) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call ninf float @llvm.minnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %minnum, ptr %minnum_res + %maxnum = call ninf float @llvm.maxnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %maxnum, ptr %maxnum_res + + %minimum = call ninf float @llvm.minimum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %minimum, ptr %minimum_res + %maximum = call ninf float @llvm.maximum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %maximum, ptr %maximum_res + + %minimumnum = call ninf float @llvm.minimumnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call ninf float @llvm.maximumnum.f32(float %x, float 0xC7EFFFFFE0000000) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +; All can be optimized if both the ninf and nnan flags are set (ignoring SNaN propagation in minnum/maxnum) +; TODO maximumnum/minimumnum +define void @minmax_neg_largest_nxv2f32_nnan_ninf( %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_neg_largest_nxv2f32_nnan_ninf( +; CHECK-NEXT: store splat (float 0xC7EFFFFFE0000000), ptr [[MINNUM_RES:%.*]], align 8 +; CHECK-NEXT: store [[X:%.*]], ptr [[MAXNUM_RES:%.*]], align 8 +; CHECK-NEXT: store splat (float 0xC7EFFFFFE0000000), ptr [[MINIMUM_RES:%.*]], align 8 +; CHECK-NEXT: store [[X]], ptr [[MAXIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan ninf @llvm.minimumnum.nxv2f32( [[X]], splat (float 0xC7EFFFFFE0000000)) +; CHECK-NEXT: store [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan ninf @llvm.maximumnum.nxv2f32( [[X]], splat (float 0xC7EFFFFFE0000000)) +; CHECK-NEXT: store [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: ret void +; + %minnum = call nnan ninf @llvm.minnum.nxv2f32( %x, splat (float 0xC7EFFFFFE0000000)) + store %minnum, ptr %minnum_res + %maxnum = call nnan ninf @llvm.maxnum.nxv2f32( %x, splat (float 0xC7EFFFFFE0000000)) + store %maxnum, ptr %maxnum_res + + %minimum = call nnan ninf @llvm.minimum.nxv2f32( %x, splat (float 0xC7EFFFFFE0000000)) + store %minimum, ptr %minimum_res + %maximum = call nnan ninf @llvm.maximum.nxv2f32( %x, splat (float 0xC7EFFFFFE0000000)) + store %maximum, ptr %maximum_res + + %minimumnum = call nnan ninf @llvm.minimumnum.nxv2f32( %x, splat (float 0xC7EFFFFFE0000000)) + store %minimumnum, ptr %minimumnum_res + %maximumnum = call nnan ninf @llvm.maximumnum.nxv2f32( %x, splat (float 0xC7EFFFFFE0000000)) + store %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Min(x, x) / Max(x, x) # +;############################################################### +; min(x, x) -> x and max(x, x) -> x for all variants (ignoring SNaN quieting) +define void @minmax_same_args(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_same_args( +; CHECK-NEXT: store float [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[X]]) +; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[X]]) +; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum = call float @llvm.minnum.f32(float %x, float %x) + store float %minnum, ptr %minnum_res + %maxnum = call float @llvm.maxnum.f32(float %x, float %x) + store float %maxnum, ptr %maxnum_res + + %minimum = call float @llvm.minimum.f32(float %x, float %x) + store float %minimum, ptr %minimum_res + %maximum = call float @llvm.maximum.f32(float %x, float %x) + store float %maximum, ptr %maximum_res + + %minimumnum = call float @llvm.minimumnum.f32(float %x, float %x) + store float %minimumnum, ptr %minimumnum_res + %maximumnum = call float @llvm.maximumnum.f32(float %x, float %x) + store float %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Nested calls: M(x, M(x, y)) -> M(x, y) # +;############################################################### +define void @minmax_x_minmax_xy(<2 x float> %x, <2 x float> %y, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_x_minmax_xy( +; CHECK-NEXT: [[MINNUM_XY:%.*]] = call <2 x float> @llvm.minnum.v2f32(<2 x float> [[X:%.*]], <2 x float> [[Y:%.*]]) +; CHECK-NEXT: store <2 x float> [[MINNUM_XY]], ptr [[MINNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXNUM_XY:%.*]] = call <2 x float> @llvm.maxnum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) +; CHECK-NEXT: store <2 x float> [[MAXNUM_XY]], ptr [[MAXNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUM_XY:%.*]] = call <2 x float> @llvm.minimum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) +; CHECK-NEXT: store <2 x float> [[MINIMUM_XY]], ptr [[MINIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call <2 x float> @llvm.maximum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) +; CHECK-NEXT: store <2 x float> [[MAXIMUM_XY]], ptr [[MAXIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) +; CHECK-NEXT: [[MINIMUMNUM_NESTED:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[X]], <2 x float> [[MINIMUMNUM_XY]]) +; CHECK-NEXT: store <2 x float> [[MINIMUMNUM_NESTED]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) +; CHECK-NEXT: [[MAXIMUMNUM_NESTED:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[X]], <2 x float> [[MAXIMUMNUM_XY]]) +; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM_NESTED]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: ret void +; + %minnum_xy = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) + %minnum_nested = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %minnum_xy) + store <2 x float> %minnum_nested, ptr %minnum_res + + %maxnum_xy = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %y) + %maxnum_nested = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %x, <2 x float> %maxnum_xy) + store <2 x float> %maxnum_nested, ptr %maxnum_res + + %minimum_xy = call <2 x float> @llvm.minimum.v2f32(<2 x float> %x, <2 x float> %y) + %minimum_nested = call <2 x float> @llvm.minimum.v2f32(<2 x float> %x, <2 x float> %minimum_xy) + store <2 x float> %minimum_nested, ptr %minimum_res + + %maximum_xy = call <2 x float> @llvm.maximum.v2f32(<2 x float> %x, <2 x float> %y) + %maximum_nested = call <2 x float> @llvm.maximum.v2f32(<2 x float> %x, <2 x float> %maximum_xy) + store <2 x float> %maximum_nested, ptr %maximum_res + + %minimumnum_xy = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> %x, <2 x float> %y) + %minimumnum_nested = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> %x, <2 x float> %minimumnum_xy) + store <2 x float> %minimumnum_nested, ptr %minimumnum_res + + %maximumnum_xy = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> %x, <2 x float> %y) + %maximumnum_nested = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> %x, <2 x float> %maximumnum_xy) + store <2 x float> %maximumnum_nested, ptr %maximumnum_res + ret void +} + +; Negative test: m(Z, m(X,Y)) cannot be optimized to m(x, y) +define void @minmax_z_minmax_xy(float %x, float %y, float %z, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_z_minmax_xy( +; CHECK-NEXT: [[MINNUM_XY:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) +; CHECK-NEXT: [[MINNUM_NESTED:%.*]] = call float @llvm.minnum.f32(float [[Z:%.*]], float [[MINNUM_XY]]) +; CHECK-NEXT: store float [[MINNUM_NESTED]], ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXNUM_XY:%.*]] = call float @llvm.maxnum.f32(float [[X]], float [[Y]]) +; CHECK-NEXT: [[MAXNUM_NESTED:%.*]] = call float @llvm.maxnum.f32(float [[Z]], float [[MAXNUM_XY]]) +; CHECK-NEXT: store float [[MAXNUM_NESTED]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUM_XY:%.*]] = call float @llvm.minimum.f32(float [[X]], float [[Y]]) +; CHECK-NEXT: [[MINIMUM_NESTED:%.*]] = call float @llvm.minimum.f32(float [[Z]], float [[MINIMUM_XY]]) +; CHECK-NEXT: store float [[MINIMUM_NESTED]], ptr [[MINIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call float @llvm.maximum.f32(float [[X]], float [[Y]]) +; CHECK-NEXT: [[MAXIMUM_NESTED:%.*]] = call float @llvm.maximum.f32(float [[Z]], float [[MAXIMUM_XY]]) +; CHECK-NEXT: store float [[MAXIMUM_NESTED]], ptr [[MAXIMUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[Y]]) +; CHECK-NEXT: [[MINIMUMNUM_NESTED:%.*]] = call float @llvm.minimumnum.f32(float [[Z]], float [[MINIMUMNUM_XY]]) +; CHECK-NEXT: store float [[MINIMUMNUM_NESTED]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[Y]]) +; CHECK-NEXT: [[MAXIMUMNUM_NESTED:%.*]] = call float @llvm.maximumnum.f32(float [[Z]], float [[MAXIMUMNUM_XY]]) +; CHECK-NEXT: store float [[MAXIMUMNUM_NESTED]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: ret void +; + %minnum_xy = call float @llvm.minnum.f32(float %x, float %y) + %minnum_nested = call float @llvm.minnum.f32(float %z, float %minnum_xy) + store float %minnum_nested, ptr %minnum_res + + %maxnum_xy = call float @llvm.maxnum.f32(float %x, float %y) + %maxnum_nested = call float @llvm.maxnum.f32(float %z, float %maxnum_xy) + store float %maxnum_nested, ptr %maxnum_res + + %minimum_xy = call float @llvm.minimum.f32(float %x, float %y) + %minimum_nested = call float @llvm.minimum.f32(float %z, float %minimum_xy) + store float %minimum_nested, ptr %minimum_res + + %maximum_xy = call float @llvm.maximum.f32(float %x, float %y) + %maximum_nested = call float @llvm.maximum.f32(float %z, float %maximum_xy) + store float %maximum_nested, ptr %maximum_res + + %minimumnum_xy = call float @llvm.minimumnum.f32(float %x, float %y) + %minimumnum_nested = call float @llvm.minimumnum.f32(float %z, float %minimumnum_xy) + store float %minimumnum_nested, ptr %minimumnum_res + + %maximumnum_xy = call float @llvm.maximumnum.f32(float %x, float %y) + %maximumnum_nested = call float @llvm.maximumnum.f32(float %z, float %maximumnum_xy) + store float %maximumnum_nested, ptr %maximumnum_res + ret void +} + +;############################################################### +;# Nested calls: M(M(x, y), M'(x, y)) -> M(x, y) # +;############################################################### +; m(m(X,Y), m'(Y,X)) -> m(X, Y) +; Test where m' is the same op as m +define void @minmax_minmax_xy_minmax_yx(half %x, half %y, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_minmax_xy_minmax_yx( +; CHECK-NEXT: [[MINNUM_XY:%.*]] = call half @llvm.minnum.f16(half [[X:%.*]], half [[Y:%.*]]) +; CHECK-NEXT: store half [[MINNUM_XY]], ptr [[MINNUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MAXNUM_XY:%.*]] = call half @llvm.maxnum.f16(half [[X]], half [[Y]]) +; CHECK-NEXT: store half [[MAXNUM_XY]], ptr [[MAXNUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MINIMUM_XY:%.*]] = call half @llvm.minimum.f16(half [[X]], half [[Y]]) +; CHECK-NEXT: store half [[MINIMUM_XY]], ptr [[MINIMUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call half @llvm.maximum.f16(half [[X]], half [[Y]]) +; CHECK-NEXT: store half [[MAXIMUM_XY]], ptr [[MAXIMUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call half @llvm.minimumnum.f16(half [[X]], half [[Y]]) +; CHECK-NEXT: [[MINIMUMNUM_YX:%.*]] = call half @llvm.minimumnum.f16(half [[Y]], half [[X]]) +; CHECK-NEXT: [[FINAL_MINIMUMNUM:%.*]] = call half @llvm.minimumnum.f16(half [[MINIMUMNUM_XY]], half [[MINIMUMNUM_YX]]) +; CHECK-NEXT: store half [[FINAL_MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call half @llvm.maximumnum.f16(half [[X]], half [[Y]]) +; CHECK-NEXT: [[MAXIMUMNUM_YX:%.*]] = call half @llvm.maximumnum.f16(half [[Y]], half [[X]]) +; CHECK-NEXT: [[FINAL_MAXIMUMNUM:%.*]] = call half @llvm.maximumnum.f16(half [[MAXIMUMNUM_XY]], half [[MAXIMUMNUM_YX]]) +; CHECK-NEXT: store half [[FINAL_MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: ret void +; + %minnum_xy = call half @llvm.minnum.f16(half %x, half %y) + %minnum_yx = call half @llvm.minnum.f16(half %y, half %x) + %final_minnum = call half @llvm.minnum.f16(half %minnum_xy, half %minnum_yx) + store half %final_minnum, ptr %minnum_res + + %maxnum_xy = call half @llvm.maxnum.f16(half %x, half %y) + %maxnum_yx = call half @llvm.maxnum.f16(half %y, half %x) + %final_maxnum = call half @llvm.maxnum.f16(half %maxnum_xy, half %maxnum_yx) + store half %final_maxnum, ptr %maxnum_res + + %minimum_xy = call half @llvm.minimum.f16(half %x, half %y) + %minimum_yx = call half @llvm.minimum.f16(half %y, half %x) + %final_minimum = call half @llvm.minimum.f16(half %minimum_xy, half %minimum_yx) + store half %final_minimum, ptr %minimum_res + + %maximum_xy = call half @llvm.maximum.f16(half %x, half %y) + %maximum_yx = call half @llvm.maximum.f16(half %y, half %x) + %final_maximum = call half @llvm.maximum.f16(half %maximum_xy, half %maximum_yx) + store half %final_maximum, ptr %maximum_res + + %minimumnum_xy = call half @llvm.minimumnum.f16(half %x, half %y) + %minimumnum_yx = call half @llvm.minimumnum.f16(half %y, half %x) + %final_minimumnum = call half @llvm.minimumnum.f16(half %minimumnum_xy, half %minimumnum_yx) + store half %final_minimumnum, ptr %minimumnum_res + + %maximumnum_xy = call half @llvm.maximumnum.f16(half %x, half %y) + %maximumnum_yx = call half @llvm.maximumnum.f16(half %y, half %x) + %final_maximumnum = call half @llvm.maximumnum.f16(half %maximumnum_xy, half %maximumnum_yx) + store half %final_maximumnum, ptr %maximumnum_res + ret void +} + +; m(m(X,Y), m'(Y,X)) -> m(X, Y) +; Test where m' is the opposite op from m +define void @minmax_minmax_xy_maxmin_yx(double %x, double %y, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_minmax_xy_maxmin_yx( +; CHECK-NEXT: [[MINNUM_XY:%.*]] = call double @llvm.minnum.f64(double [[Y:%.*]], double [[X:%.*]]) +; CHECK-NEXT: store double [[MINNUM_XY]], ptr [[MINNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXNUM_XY:%.*]] = call double @llvm.maxnum.f64(double [[Y]], double [[X]]) +; CHECK-NEXT: store double [[MAXNUM_XY]], ptr [[MAXNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUM_XY:%.*]] = call double @llvm.minimum.f64(double [[Y]], double [[X]]) +; CHECK-NEXT: store double [[MINIMUM_XY]], ptr [[MINIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call double @llvm.maximum.f64(double [[Y]], double [[X]]) +; CHECK-NEXT: store double [[MAXIMUM_XY]], ptr [[MAXIMUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call double @llvm.minimumnum.f64(double [[Y]], double [[X]]) +; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call double @llvm.maximumnum.f64(double [[X]], double [[Y]]) +; CHECK-NEXT: [[FINAL_MINIMUMNUM:%.*]] = call double @llvm.minimumnum.f64(double [[MINIMUMNUM_XY]], double [[MAXIMUMNUM_XY]]) +; CHECK-NEXT: store double [[FINAL_MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUMNUM_XY1:%.*]] = call double @llvm.maximumnum.f64(double [[Y]], double [[X]]) +; CHECK-NEXT: [[MINIMUMNUM_YX:%.*]] = call double @llvm.minimumnum.f64(double [[X]], double [[Y]]) +; CHECK-NEXT: [[FINAL_MAXIMUMNUM:%.*]] = call double @llvm.maximumnum.f64(double [[MAXIMUMNUM_XY1]], double [[MINIMUMNUM_YX]]) +; CHECK-NEXT: store double [[FINAL_MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: ret void +; + %minnum_xy = call double @llvm.minnum.f64(double %x, double %y) + %maxnum_yx = call double @llvm.maxnum.f64(double %y, double %x) + %final_minnum = call double @llvm.minnum.f64(double %minnum_xy, double %maxnum_yx) + store double %final_minnum, ptr %minnum_res + + %maxnum_xy = call double @llvm.maxnum.f64(double %x, double %y) + %minnum_yx = call double @llvm.minnum.f64(double %y, double %x) + %final_maxnum = call double @llvm.maxnum.f64(double %maxnum_xy, double %minnum_yx) + store double %final_maxnum, ptr %maxnum_res + + %minimum_xy = call double @llvm.minimum.f64(double %x, double %y) + %maximum_yx = call double @llvm.maximum.f64(double %y, double %x) + %final_minimum = call double @llvm.minimum.f64(double %minimum_xy, double %maximum_yx) + store double %final_minimum, ptr %minimum_res + + %maximum_xy = call double @llvm.maximum.f64(double %x, double %y) + %minimum_yx = call double @llvm.minimum.f64(double %y, double %x) + %final_maximum = call double @llvm.maximum.f64(double %maximum_xy, double %minimum_yx) + store double %final_maximum, ptr %maximum_res + + %minimumnum_xy = call double @llvm.minimumnum.f64(double %x, double %y) + %maximumnum_yx = call double @llvm.maximumnum.f64(double %y, double %x) + %final_minimumnum = call double @llvm.minimumnum.f64(double %minimumnum_xy, double %maximumnum_yx) + store double %final_minimumnum, ptr %minimumnum_res + + %maximumnum_xy = call double @llvm.maximumnum.f64(double %x, double %y) + %minimumnum_yx = call double @llvm.minimumnum.f64(double %y, double %x) + %final_maximumnum = call double @llvm.maximumnum.f64(double %maximumnum_xy, double %minimumnum_yx) + store double %final_maximumnum, ptr %maximumnum_res + ret void } diff --git a/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll b/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll index a3b8e4efbe939..180012a4e8211 100644 --- a/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll +++ b/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll @@ -18,3 +18,51 @@ define @foo_nxv8i1(i32 %a) { %mask = call @llvm.get.active.lane.mask.nxv8i1(i32 %a, i32 0) ret %mask } + +define @foo_vscale_max_255() vscale_range(1,16) { +; CHECK-LABEL: define @foo_vscale_max_255( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[MASK:%.*]] = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 255) +; CHECK-NEXT: ret [[MASK]] +; + %mask = call @llvm.get.active.lane.mask.nxv16i1(i32 0, i32 255) + ret %mask +} + +define @foo_vscale_max_256() vscale_range(1,16) { +; CHECK-LABEL: define @foo_vscale_max_256( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: ret splat (i1 true) +; + %mask = call @llvm.get.active.lane.mask.nxv16i1(i32 0, i32 256) + ret %mask +} + +define @foo_vscale_max_nxv2i1_1_1_2() vscale_range(1,1) { +; CHECK-LABEL: define @foo_vscale_max_nxv2i1_1_1_2( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: ret splat (i1 true) +; + %mask = call @llvm.get.active.lane.mask.nxv2i1(i32 0, i32 2) + ret %mask +} + +define @foo_vscale_max_nxv4i1_2_4_16() vscale_range(2,4) { +; CHECK-LABEL: define @foo_vscale_max_nxv4i1_2_4_16( +; CHECK-SAME: ) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: ret splat (i1 true) +; + %mask = call @llvm.get.active.lane.mask.nxv4i1(i128 0, i128 16) + ret %mask +} + +define @foo_vscale_max_nxv4i1_2_4_1_16() vscale_range(2,4) { +; CHECK-LABEL: define @foo_vscale_max_nxv4i1_2_4_1_16( +; CHECK-SAME: ) #[[ATTR2]] { +; CHECK-NEXT: [[MASK:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i128(i128 1, i128 16) +; CHECK-NEXT: ret [[MASK]] +; + %mask = call @llvm.get.active.lane.mask.nxv4i1(i128 1, i128 16) + ret %mask +} + diff --git a/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll b/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll index e69da434c0caf..1d3a13bede799 100644 --- a/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll +++ b/llvm/test/Transforms/LICM/AMDGPU/buffer-rsrc-ptrs.ll @@ -75,13 +75,13 @@ define void @hoistable_alias_scope(ptr addrspace(8) %p, ptr addrspace(8) %q, i32 ; CHECK-LABEL: define void @hoistable_alias_scope ; CHECK-SAME: (ptr addrspace(8) [[P:%.*]], ptr addrspace(8) [[Q:%.*]], i32 [[BOUND:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[HOISTABLE:%.*]] = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, i32 0, i32 0), !alias.scope !0, !noalias !3 +; CHECK-NEXT: [[HOISTABLE:%.*]] = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, i32 0, i32 0), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ORIG:%.*]] = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope !3, !noalias !0 +; CHECK-NEXT: [[ORIG:%.*]] = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope [[META3]], !noalias [[META0]] ; CHECK-NEXT: [[INC:%.*]] = add i32 [[HOISTABLE]], [[ORIG]] -; CHECK-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 [[INC]], ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope !3, !noalias !0 +; CHECK-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 [[INC]], ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope [[META3]], !noalias [[META0]] ; CHECK-NEXT: [[NEXT]] = add i32 [[I]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[NEXT]], [[BOUND]] ; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[TAIL:%.*]] @@ -165,8 +165,8 @@ define void @hoistable_buffer_construction_intrinsic(ptr addrspace(1) noalias %p ; CHECK-LABEL: define void @hoistable_buffer_construction_intrinsic ; CHECK-SAME: (ptr addrspace(1) noalias [[P_GLOBAL:%.*]], ptr addrspace(1) noalias [[Q_GLOBAL:%.*]], i32 [[BOUND:%.*]]) { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[P:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P_GLOBAL]], i16 0, i32 0, i32 0) -; CHECK-NEXT: [[Q:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[Q_GLOBAL]], i16 0, i32 0, i32 0) +; CHECK-NEXT: [[P:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[P_GLOBAL]], i16 0, i64 0, i32 0) +; CHECK-NEXT: [[Q:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[Q_GLOBAL]], i16 0, i64 0, i32 0) ; CHECK-NEXT: [[HOISTABLE:%.*]] = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, i32 0, i32 0) ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: @@ -181,8 +181,8 @@ define void @hoistable_buffer_construction_intrinsic(ptr addrspace(1) noalias %p ; CHECK-NEXT: ret void ; entry: - %p = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p.global, i16 0, i32 0, i32 0) - %q = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %q.global, i16 0, i32 0, i32 0) + %p = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %p.global, i16 0, i64 0, i32 0) + %q = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) %q.global, i16 0, i64 0, i32 0) br label %loop loop: %i = phi i32 [0, %entry], [%next, %loop] @@ -212,13 +212,13 @@ define void @hoistable_buffer_construction_alias_scope(ptr addrspace(1) %p.globa ; CHECK-NEXT: [[Q_EXT:%.*]] = zext i48 [[Q_TRUNC]] to i128 ; CHECK-NEXT: [[P:%.*]] = inttoptr i128 [[P_EXT]] to ptr addrspace(8) ; CHECK-NEXT: [[Q:%.*]] = inttoptr i128 [[Q_EXT]] to ptr addrspace(8) -; CHECK-NEXT: [[HOISTABLE:%.*]] = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, i32 0, i32 0), !alias.scope !0, !noalias !3 +; CHECK-NEXT: [[HOISTABLE:%.*]] = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) [[Q]], i32 0, i32 0, i32 0, i32 0), !alias.scope [[META0]], !noalias [[META3]] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ORIG:%.*]] = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope !3, !noalias !0 +; CHECK-NEXT: [[ORIG:%.*]] = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope [[META3]], !noalias [[META0]] ; CHECK-NEXT: [[INC:%.*]] = add i32 [[HOISTABLE]], [[ORIG]] -; CHECK-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 [[INC]], ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope !3, !noalias !0 +; CHECK-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 [[INC]], ptr addrspace(8) [[P]], i32 [[I]], i32 0, i32 0), !alias.scope [[META3]], !noalias [[META0]] ; CHECK-NEXT: [[NEXT]] = add i32 [[I]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[NEXT]], [[BOUND]] ; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[TAIL:%.*]] @@ -257,7 +257,7 @@ declare i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) nocapture r ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: write) declare void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32, ptr addrspace(8) nocapture writeonly, i32, i32, i32 immarg) #1 ; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) nocapture readnone, i16, i32, i32) #2 -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) readnone nocapture, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) readnone nocapture, i16, i64, i32) attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: read) } attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: write) } attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } diff --git a/llvm/test/Transforms/LICM/sink-with-coroutine.ll b/llvm/test/Transforms/LICM/sink-with-coroutine.ll index 2013df11d9c44..33ec28e40c0f3 100644 --- a/llvm/test/Transforms/LICM/sink-with-coroutine.ll +++ b/llvm/test/Transforms/LICM/sink-with-coroutine.ll @@ -22,7 +22,7 @@ define i64 @licm(i64 %n) #0 { ; CHECK-NEXT: [[T6:%.*]] = icmp ult i64 [[T5]], [[N]] ; CHECK-NEXT: br i1 [[T6]], label [[LOOP]], label [[BB2]] ; CHECK: bb2: -; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.coro.end(ptr null, i1 false, token none) +; CHECK-NEXT: call void @llvm.coro.end(ptr null, i1 false, token none) ; CHECK-NEXT: ret i64 0 ; entry: @@ -46,7 +46,7 @@ await.ready: br i1 %t6, label %loop, label %bb2 bb2: - %res = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret i64 0 } @@ -82,7 +82,7 @@ define i64 @hoist_threadlocal() presplitcoroutine { ; CHECK: loop.end: ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT]], label [[FOR_BODY]] ; CHECK: exit: -; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.coro.end(ptr null, i1 false, token none) +; CHECK-NEXT: call void @llvm.coro.end(ptr null, i1 false, token none) ; CHECK-NEXT: ret i64 0 ; entry: @@ -119,12 +119,11 @@ loop.end: br i1 %cmp, label %exit, label %for.body exit: - %res = call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret i64 0 } declare i8 @llvm.coro.suspend(token, i1) -declare i1 @llvm.coro.end(ptr, i1, token) declare nonnull ptr @readonly_funcs() readonly declare nonnull ptr @llvm.threadlocal.address(ptr nonnull) nounwind readnone willreturn declare void @not.reachable() diff --git a/llvm/test/Transforms/LoopFusion/da_separate_loops.ll b/llvm/test/Transforms/LoopFusion/da_separate_loops.ll new file mode 100644 index 0000000000000..6359f48199290 --- /dev/null +++ b/llvm/test/Transforms/LoopFusion/da_separate_loops.ll @@ -0,0 +1,182 @@ +; REQUIRES: asserts + +; RUN: opt -passes=loop-fusion -da-disable-delinearization-checks -disable-output -stats < %s 2>&1 | FileCheck -check-prefix=STAT %s +; STAT: 2 loop-fusion - DA checks passed + +; The two inner loops have no dependency and are allowed to be fused as in the +; outer loops, different levels are accessed to. + +; C Code +; +;; for (long int i = 0; i < n; i++) { +;; for (long int j = 0; j < n; j++) { +;; for (long int k = 0; k < n; k++) +;; A[i][j][k] = i; +;; for (long int k = 0; k < n; k++) +;; temp = A[i + 3][j + 2][k + 1]; +;; } +;; } + +define void @nonequal_outer_access(i64 %n, ptr %A) nounwind uwtable ssp { +entry: + %cmp10 = icmp sgt i64 %n, 0 + br i1 %cmp10, label %for.cond1.preheader.preheader, label %for.end26 + +for.cond1.preheader.preheader: ; preds = %entry + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc24 + %i.011 = phi i64 [ %inc25, %for.inc24 ], [ 0, %for.cond1.preheader.preheader ] + %cmp26 = icmp sgt i64 %n, 0 + br i1 %cmp26, label %for.cond4.preheader.preheader, label %for.inc24 + +for.cond4.preheader.preheader: ; preds = %for.cond1.preheader + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.cond4.preheader.preheader, %for.inc21 + %j.07 = phi i64 [ %inc22, %for.inc21 ], [ 0, %for.cond4.preheader.preheader ] + %cmp51 = icmp sgt i64 %n, 0 + br i1 %cmp51, label %for.body6.preheader, label %for.cond10.loopexit + +for.body6.preheader: ; preds = %for.cond4.preheader + br label %for.body6 + +for.body6: ; preds = %for.body6.preheader, %for.body6 + %k.02 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body6.preheader ] + %arrayidx8 = getelementptr inbounds [100 x [100 x i64]], ptr %A, i64 %i.011, i64 %j.07, i64 %k.02 + store i64 %i.011, ptr %arrayidx8, align 8 + %inc = add nsw i64 %k.02, 1 + %exitcond13 = icmp ne i64 %inc, %n + br i1 %exitcond13, label %for.body6, label %for.cond10.loopexit.loopexit + +for.cond10.loopexit.loopexit: ; preds = %for.body6 + br label %for.cond10.loopexit + +for.cond10.loopexit: ; preds = %for.cond10.loopexit.loopexit, %for.cond4.preheader + %cmp113 = icmp sgt i64 %n, 0 + br i1 %cmp113, label %for.body12.preheader, label %for.inc21 + +for.body12.preheader: ; preds = %for.cond10.loopexit + br label %for.body12 + +for.body12: ; preds = %for.body12.preheader, %for.body12 + %k9.05 = phi i64 [ %inc19, %for.body12 ], [ 0, %for.body12.preheader ] + %add = add nsw i64 %k9.05, 1 + %add13 = add nsw i64 %j.07, 2 + %add14 = add nsw i64 %i.011, 3 + %arrayidx17 = getelementptr inbounds [100 x [100 x i64]], ptr %A, i64 %add14, i64 %add13, i64 %add + %0 = load i64, ptr %arrayidx17, align 8 + %inc19 = add nsw i64 %k9.05, 1 + %exitcond = icmp ne i64 %inc19, %n + br i1 %exitcond, label %for.body12, label %for.inc21.loopexit + +for.inc21.loopexit: ; preds = %for.body12 + br label %for.inc21 + +for.inc21: ; preds = %for.inc21.loopexit, %for.cond10.loopexit + %inc22 = add nsw i64 %j.07, 1 + %exitcond14 = icmp ne i64 %inc22, %n + br i1 %exitcond14, label %for.cond4.preheader, label %for.inc24.loopexit + +for.inc24.loopexit: ; preds = %for.inc21 + br label %for.inc24 + +for.inc24: ; preds = %for.inc24.loopexit, %for.cond1.preheader + %inc25 = add nsw i64 %i.011, 1 + %exitcond15 = icmp ne i64 %inc25, %n + br i1 %exitcond15, label %for.cond1.preheader, label %for.end26.loopexit + +for.end26.loopexit: ; preds = %for.inc24 + br label %for.end26 + +for.end26: ; preds = %for.end26.loopexit, %entry + ret void +} + +; The two inner loops have a forward loop-carried dependency, allowing them +; to be fused. + +; C Code +; +;; for (long int i = 0; i < n; i++) { +;; for (long int j = 0; j < n; j++) { +;; for (long int k = 0; k < n; k++) +;; A[i][j][k] = i; +;; for (long int k = 0; k < n; k++) +;; temp = A[i][j][k - 1]; +;; } +;; } + +define void @forward_dep(i64 %n, ptr %A) nounwind uwtable ssp { +entry: + %cmp10 = icmp sgt i64 %n, 0 + br i1 %cmp10, label %for.cond1.preheader.preheader, label %for.end26 + +for.cond1.preheader.preheader: ; preds = %entry + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc24 + %i.011 = phi i64 [ %inc25, %for.inc24 ], [ 0, %for.cond1.preheader.preheader ] + %cmp26 = icmp sgt i64 %n, 0 + br i1 %cmp26, label %for.cond4.preheader.preheader, label %for.inc24 + +for.cond4.preheader.preheader: ; preds = %for.cond1.preheader + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.cond4.preheader.preheader, %for.inc21 + %j.07 = phi i64 [ %inc22, %for.inc21 ], [ 0, %for.cond4.preheader.preheader ] + %cmp51 = icmp sgt i64 %n, 0 + br i1 %cmp51, label %for.body6.preheader, label %for.cond10.loopexit + +for.body6.preheader: ; preds = %for.cond4.preheader + br label %for.body6 + +for.body6: ; preds = %for.body6.preheader, %for.body6 + %k.02 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body6.preheader ] + %arrayidx8 = getelementptr inbounds [100 x [100 x i64]], ptr %A, i64 %i.011, i64 %j.07, i64 %k.02 + store i64 %i.011, ptr %arrayidx8, align 8 + %inc = add nsw i64 %k.02, 1 + %exitcond13 = icmp ne i64 %inc, %n + br i1 %exitcond13, label %for.body6, label %for.cond10.loopexit.loopexit + +for.cond10.loopexit.loopexit: ; preds = %for.body6 + br label %for.cond10.loopexit + +for.cond10.loopexit: ; preds = %for.cond10.loopexit.loopexit, %for.cond4.preheader + %cmp113 = icmp sgt i64 %n, 0 + br i1 %cmp113, label %for.body12.preheader, label %for.inc21 + +for.body12.preheader: ; preds = %for.cond10.loopexit + br label %for.body12 + +for.body12: ; preds = %for.body12.preheader, %for.body12 + %k9.05 = phi i64 [ %inc19, %for.body12 ], [ 0, %for.body12.preheader ] + %add = add nsw i64 %k9.05, -1 + %arrayidx17 = getelementptr inbounds [100 x [100 x i64]], ptr %A, i64 %i.011, i64 %j.07, i64 %add + %0 = load i64, ptr %arrayidx17, align 8 + %inc19 = add nsw i64 %k9.05, 1 + %exitcond = icmp ne i64 %inc19, %n + br i1 %exitcond, label %for.body12, label %for.inc21.loopexit + +for.inc21.loopexit: ; preds = %for.body12 + br label %for.inc21 + +for.inc21: ; preds = %for.inc21.loopexit, %for.cond10.loopexit + %inc22 = add nsw i64 %j.07, 1 + %exitcond14 = icmp ne i64 %inc22, %n + br i1 %exitcond14, label %for.cond4.preheader, label %for.inc24.loopexit + +for.inc24.loopexit: ; preds = %for.inc21 + br label %for.inc24 + +for.inc24: ; preds = %for.inc24.loopexit, %for.cond1.preheader + %inc25 = add nsw i64 %i.011, 1 + %exitcond15 = icmp ne i64 %inc25, %n + br i1 %exitcond15, label %for.cond1.preheader, label %for.end26.loopexit + +for.end26.loopexit: ; preds = %for.inc24 + br label %for.end26 + +for.end26: ; preds = %for.end26.loopexit, %entry + ret void +} \ No newline at end of file diff --git a/llvm/test/Transforms/LoopFusion/simple.ll b/llvm/test/Transforms/LoopFusion/simple.ll index d63890df14461..f3cd5877bd4aa 100644 --- a/llvm/test/Transforms/LoopFusion/simple.ll +++ b/llvm/test/Transforms/LoopFusion/simple.ll @@ -298,42 +298,55 @@ bb23: ; preds = %bb17, %bb ret void } +; The following IR is a representation of the provided code below. With PR +; #146383, loop fusion is able to utilize the information from dependence +; analysis, enabling the loops in the function to be fused. +; +; void forward_dep(int *arg) { +; for (int i = 0; i < 100; i++) { +; int tmp = i - 3; +; int val = tmp * (i + 3) % i; +; arg[i] = val; +; } +; +; for (int j = 0; j < 100; j++) { +; int val = arg[j - 3]; +; arg[j] = val * 3; +; } +; } +; define void @forward_dep(ptr noalias %arg) { ; CHECK-LABEL: @forward_dep( -; CHECK-NEXT: bb: -; CHECK-NEXT: br label [[BB7:%.*]] +; CHECK-NEXT: [[BB:.*]]: +; CHECK-NEXT: br label %[[BB7:.*]] ; CHECK: bb7: -; CHECK-NEXT: [[DOT013:%.*]] = phi i32 [ 0, [[BB:%.*]] ], [ [[TMP15:%.*]], [[BB14:%.*]] ] -; CHECK-NEXT: [[INDVARS_IV22:%.*]] = phi i64 [ 0, [[BB]] ], [ [[INDVARS_IV_NEXT3:%.*]], [[BB14]] ] +; CHECK-NEXT: [[DOT013:%.*]] = phi i32 [ 0, %[[BB]] ], [ [[TMP15:%.*]], %[[BB25:.*]] ] +; CHECK-NEXT: [[INDVARS_IV22:%.*]] = phi i64 [ 0, %[[BB]] ], [ [[INDVARS_IV_NEXT3:%.*]], %[[BB25]] ] +; CHECK-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[BB25]] ], [ 0, %[[BB]] ] ; CHECK-NEXT: [[TMP:%.*]] = add nsw i32 [[DOT013]], -3 ; CHECK-NEXT: [[TMP8:%.*]] = add nuw nsw i64 [[INDVARS_IV22]], 3 ; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i32 ; CHECK-NEXT: [[TMP10:%.*]] = mul nsw i32 [[TMP]], [[TMP9]] ; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[INDVARS_IV22]] to i32 ; CHECK-NEXT: [[TMP12:%.*]] = srem i32 [[TMP10]], [[TMP11]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARG:%.*]], i64 [[INDVARS_IV22]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[INDVARS_IV22]] ; CHECK-NEXT: store i32 [[TMP12]], ptr [[TMP13]], align 4 -; CHECK-NEXT: br label [[BB14]] +; CHECK-NEXT: br label %[[BB14:.*]] ; CHECK: bb14: -; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV22]], 1 -; CHECK-NEXT: [[TMP15]] = add nuw nsw i32 [[DOT013]], 1 -; CHECK-NEXT: [[EXITCOND4:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT3]], 100 -; CHECK-NEXT: br i1 [[EXITCOND4]], label [[BB7]], label [[BB19_PREHEADER:%.*]] -; CHECK: bb19.preheader: -; CHECK-NEXT: br label [[BB19:%.*]] -; CHECK: bb19: -; CHECK-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[BB25:%.*]] ], [ 0, [[BB19_PREHEADER]] ] ; CHECK-NEXT: [[TMP20:%.*]] = add nsw i64 [[INDVARS_IV1]], -3 ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[TMP20]] ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = mul nsw i32 [[TMP22]], 3 ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[ARG]], i64 [[INDVARS_IV1]] ; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP24]], align 4 -; CHECK-NEXT: br label [[BB25]] +; CHECK-NEXT: br label %[[BB25]] ; CHECK: bb25: +; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV22]], 1 +; CHECK-NEXT: [[TMP15]] = add nuw nsw i32 [[DOT013]], 1 +; CHECK-NEXT: [[EXITCOND4:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT3]], 100 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV1]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[BB19]], label [[BB26:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[BB7]], label %[[BB26:.*]] ; CHECK: bb26: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll new file mode 100644 index 0000000000000..14a4c952d3510 --- /dev/null +++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check-dl.ll @@ -0,0 +1,50 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 6 +; RUN: opt -passes=loop-idiom -S %s | FileCheck %s + +target datalayout = "p:16:16" + +;. +; CHECK: @.crctable = private constant [256 x i32] zeroinitializer +;. +define void @test_with_dl() { +; CHECK-LABEL: define void @test_with_dl() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[PH:.*]] +; CHECK: [[PH_LOOPEXIT:.*]]: +; CHECK-NEXT: [[CRC_NEXT_LCSSA:%.*]] = phi i32 [ [[CRC_NEXT3:%.*]], %[[LOOP:.*]] ] +; CHECK-NEXT: br label %[[PH]] +; CHECK: [[PH]]: +; CHECK-NEXT: [[CRC_USE:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[CRC_NEXT_LCSSA]], %[[PH_LOOPEXIT]] ] +; CHECK-NEXT: br label %[[LOOP]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[CRC2:%.*]] = phi i32 [ 0, %[[PH]] ], [ [[CRC_NEXT3]], %[[LOOP]] ] +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i32 [[CRC2]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i16 +; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i32, ptr @.crctable, i16 [[INDEXER_EXT]] +; CHECK-NEXT: [[TBL_LD:%.*]] = load i32, ptr [[TBL_PTRADD]], align 4 +; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i32 [[CRC2]], 8 +; CHECK-NEXT: [[CRC_NEXT3]] = xor i32 [[CRC_LE_SHIFT]], [[TBL_LD]] +; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND1:%.*]] = icmp ne i16 [[IV]], 0 +; CHECK-NEXT: br i1 [[EXIT_COND1]], label %[[LOOP]], label %[[PH_LOOPEXIT]] +; +entry: + br label %ph + +ph: + %crc.use = phi i32 [ 0, %entry ], [ %crc.next, %loop ] + br label %loop + +loop: + %iv = phi i16 [ 0, %ph ], [ %iv.next, %loop ] + %crc = phi i32 [ 0, %ph ], [ %crc.next, %loop ] + %lshr.crc.1 = lshr i32 %crc, 1 + %crc.and.1 = and i32 %crc, 1 + %sb.check = icmp eq i32 %crc.and.1, 0 + %xor = xor i32 %lshr.crc.1, 0 + %crc.next = select i1 %sb.check, i32 %lshr.crc.1, i32 %xor + %iv.next = add i16 %iv, 1 + %exit.cond = icmp ult i16 %iv, 7 + br i1 %exit.cond, label %loop, label %ph +} diff --git a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll index 51dc142200d78..b2ec53ca405d4 100644 --- a/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll +++ b/llvm/test/Transforms/LoopIdiom/cyclic-redundancy-check.ll @@ -118,8 +118,8 @@ define i16 @crc16.le.tc16(i16 %msg, i16 %checksum) { ; CHECK-NEXT: [[IV_INDEXER:%.*]] = zext i8 [[IV_BITS]] to i16 ; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i16 [[MSG]], [[IV_INDEXER]] ; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i16 [[DATA_INDEXER]], [[CRC2]] -; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i16 [[CRC_DATA_INDEXER]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_LO]] to i64 +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i16 [[CRC_DATA_INDEXER]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.2, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i16 [[CRC2]], 8 @@ -166,8 +166,8 @@ define i8 @crc8.le.tc16(i16 %msg, i8 %checksum) { ; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i16 [[MSG]], [[IV_INDEXER]] ; CHECK-NEXT: [[CRC_INDEXER_CAST:%.*]] = zext i8 [[CRC2]] to i16 ; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i16 [[DATA_INDEXER]], [[CRC_INDEXER_CAST]] -; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i16 [[CRC_DATA_INDEXER]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_LO]] to i64 +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i16 [[CRC_DATA_INDEXER]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i8, ptr @.crctable.3, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD]] = load i8, ptr [[TBL_PTRADD]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1 @@ -212,8 +212,8 @@ define i16 @crc16.be.tc8.crc.init.li(i16 %checksum, i8 %msg) { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8 -; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64 +; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.4, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8 @@ -255,8 +255,8 @@ define i16 @crc16.be.tc8.crc.init.arg(i16 %crc.init) { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8 -; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64 +; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.5, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8 @@ -295,8 +295,8 @@ define i16 @crc16.be.tc8.crc.init.arg.flipped.sb.check(i16 %crc.init) { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[CRC2:%.*]] = phi i16 [ [[CRC_INIT]], %[[ENTRY]] ], [ [[CRC_NEXT3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[INDEXER_HI:%.*]] = lshr i16 [[CRC2]], 8 -; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = and i16 [[INDEXER_HI]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i16 [[INDEXER_HI_LO_BYTE]] to i64 +; CHECK-NEXT: [[INDEXER_HI_LO_BYTE:%.*]] = trunc i16 [[INDEXER_HI]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_HI_LO_BYTE]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i16, ptr @.crctable.6, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i16, ptr [[TBL_PTRADD]], align 2 ; CHECK-NEXT: [[CRC_BE_SHIFT:%.*]] = shl i16 [[CRC2]], 8 @@ -406,8 +406,8 @@ define i32 @crc32.le.tc8.data32(i32 %checksum, i32 %msg) { ; CHECK-NEXT: [[IV_INDEXER:%.*]] = zext i8 [[IV_BITS]] to i32 ; CHECK-NEXT: [[DATA_INDEXER:%.*]] = lshr i32 [[MSG]], [[IV_INDEXER]] ; CHECK-NEXT: [[CRC_DATA_INDEXER:%.*]] = xor i32 [[DATA_INDEXER]], [[CRC2]] -; CHECK-NEXT: [[INDEXER_LO:%.*]] = and i32 [[CRC_DATA_INDEXER]], 255 -; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i32 [[INDEXER_LO]] to i64 +; CHECK-NEXT: [[INDEXER_LO:%.*]] = trunc i32 [[CRC_DATA_INDEXER]] to i8 +; CHECK-NEXT: [[INDEXER_EXT:%.*]] = zext i8 [[INDEXER_LO]] to i64 ; CHECK-NEXT: [[TBL_PTRADD:%.*]] = getelementptr inbounds i32, ptr @.crctable.8, i64 [[INDEXER_EXT]] ; CHECK-NEXT: [[TBL_LD:%.*]] = load i32, ptr [[TBL_PTRADD]], align 4 ; CHECK-NEXT: [[CRC_LE_SHIFT:%.*]] = lshr i32 [[CRC2]], 8 diff --git a/llvm/test/Transforms/LoopInterchange/bail-out-all-deps.ll b/llvm/test/Transforms/LoopInterchange/bail-out-all-deps.ll new file mode 100644 index 0000000000000..83cfd91c4da4c --- /dev/null +++ b/llvm/test/Transforms/LoopInterchange/bail-out-all-deps.ll @@ -0,0 +1,44 @@ +; RUN: opt < %s -passes=loop-interchange -pass-remarks-output=%t \ +; RUN: -disable-output +; RUN: FileCheck -input-file %t %s + +; Check that loop interchange bails out early when finding a direction vector +; with all '*' elements. +; +; for (int i = 0; i < 4; i++) +; for (int j = 0; j < 4; j++) +; A[i & val][j & val] = 0; + +; CHECK: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Dependence +; CHECK-NEXT: Function: f +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: All loops have dependencies in all directions. +; CHECK-NEXT: ... +define void @f(ptr %A, i64 %val) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i64 [ 0, %for.i.header ], [ %j.next, %for.j ] + %subscript.0 = and i64 %i, %val + %subscript.1 = and i64 %j, %val + %idx = getelementptr inbounds [4 x i8], ptr %A, i64 %subscript.0, i64 %subscript.1 + store i8 0, ptr %idx + %j.next = add nuw nsw i64 %j, 1 + %exit.j = icmp eq i64 %j.next, 4 + br i1 %exit.j, label %for.i.latch, label %for.j + +for.i.latch: + %i.next = add nuw nsw i64 %i, 1 + %exit.i = icmp eq i64 %i.next, 4 + br i1 %exit.i, label %exit, label %for.i.header + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopInterchange/confused-dependence.ll b/llvm/test/Transforms/LoopInterchange/confused-dependence.ll index 49b7b0e4797b8..94080949f0af8 100644 --- a/llvm/test/Transforms/LoopInterchange/confused-dependence.ll +++ b/llvm/test/Transforms/LoopInterchange/confused-dependence.ll @@ -1,6 +1,6 @@ -; REQUIRES: asserts -; RUN: opt < %s -passes=loop-interchange -verify-dom-info -verify-loop-info \ -; RUN: -disable-output -debug 2>&1 | FileCheck %s +; RUN: opt < %s -passes=loop-interchange -pass-remarks-output=%t \ +; RUN: -disable-output +; RUN: FileCheck -input-file %t %s ;; In the following case, p0 and p1 may alias, so the direction vector must be [* *]. ;; @@ -10,9 +10,13 @@ ;; p0[4 * i + j] = p1[4 * j + i]; ;; } -; CHECK: Dependency matrix before interchange: -; CHECK-NEXT: * * -; CHECK-NEXT: Processing InnerLoopId = 1 and OuterLoopId = 0 +; CHECK: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Dependence +; CHECK-NEXT: Function: may_alias +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: All loops have dependencies in all directions. +; CHECK-NEXT: ... define void @may_alias(ptr %p0, ptr %p1) { entry: br label %for.i.header diff --git a/llvm/test/Transforms/LoopInterchange/legality-for-scalar-deps.ll b/llvm/test/Transforms/LoopInterchange/legality-for-scalar-deps.ll index c30f9a399fed8..5f4a8486d9ad7 100644 --- a/llvm/test/Transforms/LoopInterchange/legality-for-scalar-deps.ll +++ b/llvm/test/Transforms/LoopInterchange/legality-for-scalar-deps.ll @@ -21,13 +21,13 @@ ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: issue46867 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. ; CHECK: --- !Missed ; CHECK-NEXT: Pass: loop-interchange ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: issue46867 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. define void @issue46867(ptr noundef captures(none) %s, i32 noundef %c, ptr noundef readonly captures(none) %ff) { entry: %tobool7.not = icmp eq i32 %c, 0 @@ -121,7 +121,7 @@ land.end: ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: issue47401 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. define void @issue47401(ptr noundef writeonly captures(none) %e, ptr noundef readonly captures(none) %bb) { entry: br label %for.cond1.preheader @@ -175,7 +175,7 @@ land.end: ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: issue47295 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. define void @issue47295(ptr noundef captures(none) %f, ptr noundef writeonly captures(none) %cc) { entry: br label %for.cond1.preheader @@ -221,7 +221,7 @@ for.body4: ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: issue54176 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. define void @issue54176(i32 noundef %n, i32 noundef %m, ptr noundef captures(none) %aa, ptr noundef readonly captures(none) %bb, ptr noundef writeonly captures(none) %cc) { entry: diff --git a/llvm/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll b/llvm/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll index 73a566a310157..14836ba73433d 100644 --- a/llvm/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll +++ b/llvm/test/Transforms/LoopInterchange/loop-interchange-optimization-remarks.ll @@ -71,7 +71,7 @@ for.end19: ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: test01 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. ; CHECK-NEXT: ... ; DELIN: --- !Analysis @@ -147,7 +147,7 @@ define void @test02(i32 %k, i32 %N) { ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: test02 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. ; CHECK-NEXT: ... ; DELIN: --- !Analysis @@ -290,7 +290,7 @@ for.end17: ; CHECK-NEXT: Name: Dependence ; CHECK-NEXT: Function: test04 ; CHECK-NEXT: Args: -; CHECK-NEXT: - String: Cannot interchange loops due to dependences. +; CHECK-NEXT: - String: All loops have dependencies in all directions. ; CHECK-NEXT: ... ; DELIN: --- !Missed diff --git a/llvm/test/Transforms/LoopInterchange/unique-dep-matrix.ll b/llvm/test/Transforms/LoopInterchange/unique-dep-matrix.ll index 68089b43121c5..3af9e7304e3be 100644 --- a/llvm/test/Transforms/LoopInterchange/unique-dep-matrix.ll +++ b/llvm/test/Transforms/LoopInterchange/unique-dep-matrix.ll @@ -2,14 +2,13 @@ ; RUN: opt < %s -passes=loop-interchange -S -debug 2>&1 | FileCheck %s ; CHECK: Dependency matrix before interchange: -; CHECK-NEXT: * * ; CHECK-NEXT: = * ; CHECK-NEXT: < * ; CHECK-NEXT: Processing InnerLoopId ; This example is taken from github issue #54176 ; -define void @foo(i32 noundef %n, i32 noundef %m, ptr nocapture noundef %aa, ptr nocapture noundef readonly %bb, ptr nocapture noundef writeonly %cc) { +define void @foo(i32 noundef %n, i32 noundef %m, ptr nocapture noundef noalias %aa, ptr nocapture noundef readonly noalias %bb, ptr nocapture noundef writeonly noalias %cc) { entry: %arrayidx7 = getelementptr inbounds i8, ptr %aa, i64 512 br label %for.cond1.preheader diff --git a/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll b/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll new file mode 100644 index 0000000000000..1339afe146f21 --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll @@ -0,0 +1,75 @@ +; Test branch weight metadata, estimated trip count metadata, and block +; frequencies after loop peeling. + +; RUN: opt < %s -S -passes='print' 2>&1 | \ +; RUN: FileCheck -check-prefix=CHECK %s + +; The -implicit-check-not options make sure that no additional labels or calls +; to @f show up. +; RUN: opt < %s -S -passes='loop-unroll,print' \ +; RUN: -unroll-force-peel-count=2 2>&1 | \ +; RUN: FileCheck %s -check-prefix=CHECK-UR \ +; RUN: -implicit-check-not='{{^[^ ;]*:}}' \ +; RUN: -implicit-check-not='call void @f' + +; CHECK: block-frequency-info: test +; CHECK: do.body: float = 10.0, + +; The sum should still be ~10. +; +; CHECK-UR: block-frequency-info: test +; CHECK-UR: - [[DO_BODY_PEEL:.*]]: float = 1.0, +; CHECK-UR: - [[DO_BODY_PEEL2:.*]]: float = 0.9, +; CHECK-UR: - [[DO_BODY:.*]]: float = 8.1, + +declare void @f(i32) + +define void @test(i32 %n) { +; CHECK-UR-LABEL: define void @test( +; CHECK-UR: [[ENTRY:.*]]: +; CHECK-UR: br label %[[DO_BODY_PEEL_BEGIN:.*]] +; CHECK-UR: [[DO_BODY_PEEL_BEGIN]]: +; CHECK-UR: br label %[[DO_BODY_PEEL:.*]] +; CHECK-UR: [[DO_BODY_PEEL]]: +; CHECK-UR: call void @f +; CHECK-UR: br i1 %{{.*}}, label %[[DO_END:.*]], label %[[DO_BODY_PEEL_NEXT:.*]], !prof ![[#PROF:]] +; CHECK-UR: [[DO_BODY_PEEL_NEXT]]: +; CHECK-UR: br label %[[DO_BODY_PEEL2:.*]] +; CHECK-UR: [[DO_BODY_PEEL2]]: +; CHECK-UR: call void @f +; CHECK-UR: br i1 %{{.*}}, label %[[DO_END]], label %[[DO_BODY_PEEL_NEXT1:.*]], !prof ![[#PROF]] +; CHECK-UR: [[DO_BODY_PEEL_NEXT1]]: +; CHECK-UR: br label %[[DO_BODY_PEEL_NEXT5:.*]] +; CHECK-UR: [[DO_BODY_PEEL_NEXT5]]: +; CHECK-UR: br label %[[ENTRY_PEEL_NEWPH:.*]] +; CHECK-UR: [[ENTRY_PEEL_NEWPH]]: +; CHECK-UR: br label %[[DO_BODY]] +; CHECK-UR: [[DO_BODY]]: +; CHECK-UR: call void @f +; CHECK-UR: br i1 %{{.*}}, label %[[DO_END_LOOPEXIT:.*]], label %[[DO_BODY]], !prof ![[#PROF]], !llvm.loop ![[#LOOP_UR_LATCH:]] +; CHECK-UR: [[DO_END_LOOPEXIT]]: +; CHECK-UR: br label %[[DO_END]] +; CHECK-UR: [[DO_END]]: +; CHECK-UR: ret void + +entry: + br label %do.body + +do.body: + %i = phi i32 [ 0, %entry ], [ %inc, %do.body ] + %inc = add i32 %i, 1 + call void @f(i32 %i) + %c = icmp sge i32 %inc, %n + br i1 %c, label %do.end, label %do.body, !prof !0 + +do.end: + ret void +} + +!0 = !{!"branch_weights", i32 1, i32 9} + +; CHECK-UR: ![[#PROF]] = !{!"branch_weights", i32 1, i32 9} +; CHECK-UR: ![[#LOOP_UR_LATCH]] = distinct !{![[#LOOP_UR_LATCH]], ![[#LOOP_UR_PC:]], ![[#LOOP_UR_TC:]], ![[#DISABLE:]]} +; CHECK-UR: ![[#LOOP_UR_PC]] = !{!"llvm.loop.peeled.count", i32 2} +; CHECK-UR: ![[#LOOP_UR_TC]] = !{!"llvm.loop.estimated_trip_count", i32 8} +; CHECK-UR: ![[#DISABLE]] = !{!"llvm.loop.unroll.disable"} diff --git a/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll b/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll index c58f8f1f4e4ee..63a0dd4b4b4f9 100644 --- a/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll @@ -15,9 +15,9 @@ define void @test() { ; CHECK: loop.peel: ; CHECK-NEXT: [[X_PEEL:%.*]] = call i32 @get.x() ; CHECK-NEXT: switch i32 [[X_PEEL]], label [[LOOP_LATCH_PEEL:%.*]] [ -; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL]] -; CHECK-NEXT: i32 1, label [[LOOP_EXIT:%.*]] -; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] +; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL]] +; CHECK-NEXT: i32 1, label [[LOOP_EXIT:%.*]] +; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] ; CHECK-NEXT: ], !prof [[PROF0:![0-9]+]] ; CHECK: loop.latch.peel: ; CHECK-NEXT: br label [[LOOP_PEEL_NEXT:%.*]] @@ -26,10 +26,10 @@ define void @test() { ; CHECK: loop.peel2: ; CHECK-NEXT: [[X_PEEL3:%.*]] = call i32 @get.x() ; CHECK-NEXT: switch i32 [[X_PEEL3]], label [[LOOP_LATCH_PEEL4:%.*]] [ -; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL4]] -; CHECK-NEXT: i32 1, label [[LOOP_EXIT]] -; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] -; CHECK-NEXT: ], !prof [[PROF1:![0-9]+]] +; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL4]] +; CHECK-NEXT: i32 1, label [[LOOP_EXIT]] +; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] +; CHECK-NEXT: ], !prof [[PROF0]] ; CHECK: loop.latch.peel4: ; CHECK-NEXT: br label [[LOOP_PEEL_NEXT1:%.*]] ; CHECK: loop.peel.next1: @@ -41,31 +41,33 @@ define void @test() { ; CHECK: loop: ; CHECK-NEXT: [[X:%.*]] = call i32 @get.x() ; CHECK-NEXT: switch i32 [[X]], label [[LOOP_LATCH:%.*]] [ -; CHECK-NEXT: i32 0, label [[LOOP_LATCH]] -; CHECK-NEXT: i32 1, label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK-NEXT: i32 2, label [[LOOP_EXIT_LOOPEXIT]] -; CHECK-NEXT: ], !prof [[PROF2:![0-9]+]] +; CHECK-NEXT: i32 0, label [[LOOP_LATCH]] +; CHECK-NEXT: i32 1, label [[LOOP_EXIT_LOOPEXIT:%.*]] +; CHECK-NEXT: i32 2, label [[LOOP_EXIT_LOOPEXIT]] +; CHECK-NEXT: ], !prof [[PROF0]] ; CHECK: loop.latch: -; CHECK-NEXT: br label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br label [[LOOP]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: loop.exit.loopexit: ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: ; CHECK-NEXT: ret void +; +; DISABLEADV-LABEL: @test( +; DISABLEADV-NEXT: entry: +; DISABLEADV-NEXT: br label [[LOOP:%.*]] +; DISABLEADV: loop: +; DISABLEADV-NEXT: [[X:%.*]] = call i32 @get.x() +; DISABLEADV-NEXT: switch i32 [[X]], label [[LOOP_LATCH:%.*]] [ +; DISABLEADV-NEXT: i32 0, label [[LOOP_LATCH]] +; DISABLEADV-NEXT: i32 1, label [[LOOP_EXIT:%.*]] +; DISABLEADV-NEXT: i32 2, label [[LOOP_EXIT]] +; DISABLEADV-NEXT: ], !prof [[PROF0:![0-9]+]] +; DISABLEADV: loop.latch: +; DISABLEADV-NEXT: br label [[LOOP]] +; DISABLEADV: loop.exit: +; DISABLEADV-NEXT: ret void +; -; DISABLEADV-LABEL: @test() -; DISABLEADV-NEXT: entry: -; DISABLEADV-NEXT: br label %loop -; DISABLEADV: loop -; DISABLEADV-NEXT: %x = call i32 @get.x() -; DISABLEADV-NEXT: switch i32 %x, label %loop.latch [ -; DISABLEADV-NEXT: i32 0, label %loop.latch -; DISABLEADV-NEXT: i32 1, label %loop.exit -; DISABLEADV-NEXT: i32 2, label %loop.exit -; DISABLEADV-NEXT: ], !prof !0 -; DISABLEADV: loop.latch: -; DISABLEADV-NEXT: br label %loop -; DISABLEADV: loop.exit: -; DISABLEADV-NEXT: ret void entry: br label %loop @@ -89,9 +91,9 @@ loop.exit: ;. ; CHECK: [[PROF0]] = !{!"branch_weights", i32 100, i32 200, i32 20, i32 10} -; CHECK: [[PROF1]] = !{!"branch_weights", i32 90, i32 180, i32 20, i32 10} -; CHECK: [[PROF2]] = !{!"branch_weights", i32 80, i32 160, i32 20, i32 10} -; CHECK: [[LOOP3]] = distinct !{!3, !4, !5} -; CHECK: [[META4:![0-9]+]] = !{!"llvm.loop.peeled.count", i32 2} -; CHECK: [[META5:![0-9]+]] = !{!"llvm.loop.unroll.disable"} +; CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]} +; CHECK: [[META2]] = !{!"llvm.loop.peeled.count", i32 2} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.disable"} +;. +; DISABLEADV: [[PROF0]] = !{!"branch_weights", i32 100, i32 200, i32 20, i32 10} ;. diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll index d91cb5bab3827..e95121593e4f7 100644 --- a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll @@ -15,13 +15,13 @@ ; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !16 ; CHECK: [[NEXT0]]: ; CHECK: br i1 %c, label %{{.*}}, label %side_exit, !prof !15 -; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !17 +; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !16 ; CHECK: [[NEXT1]]: ; CHECK: br i1 %c, label %{{.*}}, label %side_exit, !prof !15 -; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !18 +; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !16 ; CHECK: [[NEXT2]]: ; CHECK: br i1 %c, label %{{.*}}, label %side_exit.loopexit, !prof !15 -; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !18 +; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !16, !llvm.loop !17 define i32 @basic(ptr %p, i32 %k, i1 %c) #0 !prof !15 { entry: @@ -84,6 +84,7 @@ attributes #1 = { nounwind optsize } ;CHECK: !15 = !{!"branch_weights", i32 1, i32 0} ; This is a weights of latch and its copies. ;CHECK: !16 = !{!"branch_weights", i32 3001, i32 1001} -;CHECK: !17 = !{!"branch_weights", i32 2000, i32 1001} -;CHECK: !18 = !{!"branch_weights", i32 1001, i32 1001} +;CHECK: !17 = distinct !{!17, !18, !19, {{.*}}} +;CHECK: !18 = !{!"llvm.loop.peeled.count", i32 4} +;CHECK: !19 = !{!"llvm.loop.estimated_trip_count", i32 0} diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll index 15dce234baee9..dec126f289d32 100644 --- a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll @@ -5,7 +5,7 @@ ; RUN: opt < %s -S -profile-summary-huge-working-set-size-threshold=9 -debug-only=loop-unroll -passes='require,function(require,loop-unroll)' 2>&1 | FileCheck %s --check-prefix=NOPEEL ; REQUIRES: asserts -; Make sure we use the profile information correctly to peel-off 3 iterations +; Make sure we use the profile information correctly to peel-off 4 iterations ; from the loop, and update the branch weights for the peeled loop properly. ; CHECK: Loop Unroll: F[basic] @@ -20,11 +20,11 @@ ; CHECK-LABEL: @basic ; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !15 ; CHECK: [[NEXT0]]: -; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !16 +; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !15 ; CHECK: [[NEXT1]]: -; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !17 +; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !15 ; CHECK: [[NEXT2]]: -; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !17 +; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !15, !llvm.loop !16 define void @basic(ptr %p, i32 %k) #0 !prof !15 { entry: @@ -104,6 +104,7 @@ attributes #1 = { nounwind optsize } !16 = !{!"branch_weights", i32 3001, i32 1001} ;CHECK: !15 = !{!"branch_weights", i32 3001, i32 1001} -;CHECK: !16 = !{!"branch_weights", i32 2000, i32 1001} -;CHECK: !17 = !{!"branch_weights", i32 1001, i32 1001} +;CHECK: !16 = distinct !{!16, !17, !18, {{.*}}} +;CHECK: !17 = !{!"llvm.loop.peeled.count", i32 4} +;CHECK: !18 = !{!"llvm.loop.estimated_trip_count", i32 0} diff --git a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll index ec71c67d250b4..0a3d201e617de 100644 --- a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll +++ b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll @@ -3,7 +3,7 @@ define i32 @f(i1 %cond1) #0 !prof !0 { ; CHECK-LABEL: define i32 @f -; CHECK-SAME: (i1 [[COND1:%.*]]) !prof [[PROF0:![0-9]+]] { +; CHECK-SAME: (i1 [[COND1:%.*]]) {{.*}}{ ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[LOOP1_PEEL_BEGIN:%.*]] ; CHECK: loop1.peel.begin: @@ -19,7 +19,7 @@ define i32 @f(i1 %cond1) #0 !prof !0 { ; CHECK-NEXT: br label [[LOOP1:%.*]] ; CHECK: loop1: ; CHECK-NEXT: [[LD:%.*]] = load i64, ptr null, align 8 -; CHECK-NEXT: br i1 [[COND1]], label [[LOOP1]], label [[EXIT1_LOOPEXIT:%.*]], !prof [[PROF2:![0-9]+]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[COND1]], label [[LOOP1]], label [[EXIT1_LOOPEXIT:%.*]], !prof [[PROF1]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: exit1.loopexit: ; CHECK-NEXT: [[LD_LCSSA_PH:%.*]] = phi i64 [ [[LD]], [[LOOP1]] ] ; CHECK-NEXT: br label [[EXIT1]] diff --git a/llvm/test/Transforms/LoopUnroll/scevunroll.ll b/llvm/test/Transforms/LoopUnroll/scevunroll.ll index b6b14e365cc1d..fa55eab062198 100644 --- a/llvm/test/Transforms/LoopUnroll/scevunroll.ll +++ b/llvm/test/Transforms/LoopUnroll/scevunroll.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -S -passes='loop(indvars),loop-unroll' -verify-loop-info | FileCheck %s ; +target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32" + ; Unit tests for loop unrolling using ScalarEvolution to compute trip counts. ; ; Indvars is run first to generate an "old" SCEV result. Some unit @@ -66,14 +68,14 @@ define i64 @earlyLoopTest(ptr %base) nounwind { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[BASE:%.*]], align 4 +; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[BASE:%.*]], align 8 ; CHECK-NEXT: br label [[TAIL:%.*]] ; CHECK: tail: ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[VAL]], 0 ; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_1:%.*]], label [[EXIT2:%.*]] ; CHECK: loop.1: ; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr i64, ptr [[BASE]], i64 1 -; CHECK-NEXT: [[VAL_1:%.*]] = load i64, ptr [[ADR_1]], align 4 +; CHECK-NEXT: [[VAL_1:%.*]] = load i64, ptr [[ADR_1]], align 8 ; CHECK-NEXT: [[S_NEXT_1:%.*]] = add i64 [[VAL]], [[VAL_1]] ; CHECK-NEXT: br label [[TAIL_1:%.*]] ; CHECK: tail.1: @@ -81,7 +83,7 @@ define i64 @earlyLoopTest(ptr %base) nounwind { ; CHECK-NEXT: br i1 [[CMP2_1]], label [[LOOP_2:%.*]], label [[EXIT2]] ; CHECK: loop.2: ; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr i64, ptr [[BASE]], i64 2 -; CHECK-NEXT: [[VAL_2:%.*]] = load i64, ptr [[ADR_2]], align 4 +; CHECK-NEXT: [[VAL_2:%.*]] = load i64, ptr [[ADR_2]], align 8 ; CHECK-NEXT: [[S_NEXT_2:%.*]] = add i64 [[S_NEXT_1]], [[VAL_2]] ; CHECK-NEXT: br label [[TAIL_2:%.*]] ; CHECK: tail.2: @@ -89,7 +91,7 @@ define i64 @earlyLoopTest(ptr %base) nounwind { ; CHECK-NEXT: br i1 [[CMP2_2]], label [[LOOP_3:%.*]], label [[EXIT2]] ; CHECK: loop.3: ; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr i64, ptr [[BASE]], i64 3 -; CHECK-NEXT: [[VAL_3:%.*]] = load i64, ptr [[ADR_3]], align 4 +; CHECK-NEXT: [[VAL_3:%.*]] = load i64, ptr [[ADR_3]], align 8 ; CHECK-NEXT: [[S_NEXT_3:%.*]] = add i64 [[S_NEXT_2]], [[VAL_3]] ; CHECK-NEXT: br i1 false, label [[TAIL_3:%.*]], label [[EXIT1:%.*]] ; CHECK: tail.3: @@ -381,7 +383,7 @@ define i32 @test_pr56044(ptr %src, i32 %a) { ; CHECK: loop.2.peel: ; CHECK-NEXT: [[IV_2_NEXT_PEEL:%.*]] = add i32 0, [[ADD_2]] ; CHECK-NEXT: [[IV_1_NEXT_PEEL:%.*]] = add nuw nsw i32 0, 1 -; CHECK-NEXT: [[EC_2_PEEL:%.*]] = icmp ult i32 [[IV_1_NEXT_PEEL]], 12345 +; CHECK-NEXT: [[EC_2_PEEL:%.*]] = icmp ne i32 [[IV_1_NEXT_PEEL]], 12345 ; CHECK-NEXT: br i1 [[EC_2_PEEL]], label [[LOOP_2_PEEL_NEXT:%.*]], label [[EXIT:%.*]] ; CHECK: loop.2.peel.next: ; CHECK-NEXT: br label [[LOOP_2_PEEL_NEXT2:%.*]] @@ -394,8 +396,8 @@ define i32 @test_pr56044(ptr %src, i32 %a) { ; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_NEXT_PEEL]], [[MID_PEEL_NEWPH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ] ; CHECK-NEXT: [[IV_2_NEXT]] = add i32 2, [[IV_2]] ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1 -; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i32 [[IV_1_NEXT]], 12345 -; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_1_NEXT]], 12345 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP_2]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: exit.loopexit: ; CHECK-NEXT: [[LCSSA_2_PH:%.*]] = phi i32 [ [[IV_2_NEXT]], [[LOOP_2]] ] ; CHECK-NEXT: br label [[EXIT]] @@ -435,3 +437,65 @@ exit: } declare void @fn(i32) + + +define void @peel_int_eq_condition(i32 %start) { +; CHECK-LABEL: @peel_int_eq_condition( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[START:%.*]], i32 100) +; CHECK-NEXT: [[TMP0:%.*]] = add nuw i32 [[SMAX]], 1 +; CHECK-NEXT: br label [[LOOP_PEEL_BEGIN:%.*]] +; CHECK: loop.peel.begin: +; CHECK-NEXT: br label [[LOOP_PEEL:%.*]] +; CHECK: loop.peel: +; CHECK-NEXT: [[C_0_PEEL:%.*]] = icmp eq i32 [[START]], [[START]] +; CHECK-NEXT: br i1 [[C_0_PEEL]], label [[IF_THEN_PEEL:%.*]], label [[LOOP_LATCH_PEEL:%.*]] +; CHECK: if.then.peel: +; CHECK-NEXT: call void @fn(i32 [[START]]) +; CHECK-NEXT: br label [[LOOP_LATCH_PEEL]] +; CHECK: loop.latch.peel: +; CHECK-NEXT: [[IV_NEXT_PEEL:%.*]] = add i32 [[START]], 1 +; CHECK-NEXT: [[EXITCOND_PEEL:%.*]] = icmp ne i32 [[IV_NEXT_PEEL]], [[TMP0]] +; CHECK-NEXT: br i1 [[EXITCOND_PEEL]], label [[LOOP_PEEL_NEXT:%.*]], label [[EXIT:%.*]] +; CHECK: loop.peel.next: +; CHECK-NEXT: br label [[LOOP_PEEL_NEXT1:%.*]] +; CHECK: loop.peel.next1: +; CHECK-NEXT: br label [[ENTRY_PEEL_NEWPH:%.*]] +; CHECK: entry.peel.newph: +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT_PEEL]], [[ENTRY_PEEL_NEWPH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[IV]], [[START]] +; CHECK-NEXT: br i1 [[C_0]], label [[IF_THEN:%.*]], label [[LOOP_LATCH]] +; CHECK: if.then: +; CHECK-NEXT: call void @fn(i32 [[IV]]) +; CHECK-NEXT: br label [[LOOP_LATCH]] +; CHECK: loop.latch: +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[TMP0]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: exit.loopexit: +; CHECK-NEXT: br label [[EXIT]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %start, %entry ], [ %iv.next, %loop.latch ] + %c.0 = icmp eq i32 %iv, %start + br i1 %c.0, label %if.then, label %loop.latch + +if.then: + call void @fn(i32 %iv) + br label %loop.latch + +loop.latch: + %iv.next = add i32 %iv, 1 + %ec = icmp slt i32 %iv, 100 + br i1 %ec, label %loop, label %exit + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll index 387bb4302de60..23918427e7003 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/call-costs.ll @@ -81,17 +81,6 @@ define void @powi_call(ptr %P) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load double, ptr [[GEP]], align 8 -; CHECK-NEXT: [[POWI:%.*]] = tail call double @llvm.powi.f64.i32(double [[L]], i32 3) -; CHECK-NEXT: store double [[POWI]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll index 56a4683298e3d..6e3d257e531ba 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll @@ -33,20 +33,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3 -; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]] -; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8 -; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1 -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -108,20 +95,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3 -; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]] -; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8 -; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1 -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index e4ee6776ae24c..6cf11be0e11f7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -362,8 +362,9 @@ define void @latch_branch_cost(ptr %dst) { ; PRED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 104 ; PRED-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br [[EXIT:label %.*]] -; PRED: [[SCALAR_PH:.*:]] +; PRED-NEXT: br label %[[EXIT:.*]] +; PRED: [[EXIT]]: +; PRED-NEXT: ret void ; entry: br label %loop @@ -585,8 +586,9 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true ; PRED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br [[EXIT:label %.*]] -; PRED: [[SCALAR_PH:.*:]] +; PRED-NEXT: br label %[[EXIT:.*]] +; PRED: [[EXIT]]: +; PRED-NEXT: ret void ; entry: br label %loop @@ -609,7 +611,6 @@ exit: } define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { -; ; COMMON-LABEL: define void @low_trip_count_fold_tail_scalarized_store( ; COMMON-SAME: ptr [[DST:%.*]]) { ; COMMON-NEXT: [[ENTRY:.*:]] @@ -659,16 +660,16 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { ; COMMON-NEXT: store i8 6, ptr [[TMP6]], align 1 ; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE12]] ; COMMON: [[PRED_STORE_CONTINUE12]]: -; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT:.*]] +; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT1:.*]] ; COMMON: [[PRED_STORE_IF13]]: ; COMMON-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 7 ; COMMON-NEXT: store i8 7, ptr [[TMP7]], align 1 -; COMMON-NEXT: br label %[[EXIT]] -; COMMON: [[EXIT]]: -; COMMON-NEXT: br label %[[SCALAR_PH:.*]] -; COMMON: [[SCALAR_PH]]: -; COMMON-NEXT: br [[EXIT1:label %.*]] -; COMMON: [[SCALAR_PH1:.*:]] +; COMMON-NEXT: br label %[[EXIT1]] +; COMMON: [[EXIT1]]: +; COMMON-NEXT: br label %[[SCALAR_PH1:.*]] +; COMMON: [[SCALAR_PH1]]: +; COMMON-NEXT: br [[EXIT:label %.*]] +; COMMON: [[SCALAR_PH:.*:]] ; entry: br label %loop @@ -1160,8 +1161,9 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) { ; PRED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 ; PRED-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; PRED: [[MIDDLE_BLOCK]]: -; PRED-NEXT: br [[EXIT:label %.*]] -; PRED: [[SCALAR_PH:.*:]] +; PRED-NEXT: br label %[[EXIT:.*]] +; PRED: [[EXIT]]: +; PRED-NEXT: ret void ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll index b3780013559a5..0a433ec76acf4 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll @@ -50,8 +50,7 @@ define void @test_pr25490(i32 %n, ptr noalias nocapture %a, ptr noalias nocaptur ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll index 1af55e91e861a..71acac25e4efe 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll @@ -65,36 +65,6 @@ define void @check_widen_intrinsic_with_nnan(ptr noalias %dst.0, ptr noalias %ds ; CHECK-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds double, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[L_1:%.*]] = load double, ptr [[GEP_SRC_1]], align 8 -; CHECK-NEXT: [[ABS:%.*]] = tail call nnan double @llvm.fabs.f64(double [[L_1]]) -; CHECK-NEXT: [[C_0:%.*]] = fcmp olt double [[ABS]], 1.000000e+00 -; CHECK-NEXT: br i1 [[C_0]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[L_2:%.*]] = load double, ptr [[SRC_2]], align 8 -; CHECK-NEXT: [[IV_SUB_1:%.*]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[GEP_IV_SUB_1:%.*]] = getelementptr double, ptr [[DST_0]], i64 [[IV_SUB_1]] -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_IV_SUB_1]], align 8 -; CHECK-NEXT: [[C_1:%.*]] = fcmp oeq double [[L_2]], 0.000000e+00 -; CHECK-NEXT: br i1 [[C_1]], label %[[MERGE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[IV_SUB_2:%.*]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[GEP_IV_SUB_2:%.*]] = getelementptr double, ptr [[DST_0]], i64 [[IV_SUB_2]] -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_IV_SUB_2]], align 8 -; CHECK-NEXT: br label %[[MERGE]] -; CHECK: [[MERGE]]: -; CHECK-NEXT: [[MERGE_IV:%.*]] = phi i64 [ [[IV_SUB_2]], %[[ELSE]] ], [ [[IV_SUB_1]], %[[THEN]] ] -; CHECK-NEXT: [[GEP_DST_1:%.*]] = getelementptr inbounds i32, ptr [[DST_1]], i64 [[MERGE_IV]] -; CHECK-NEXT: store i32 10, ptr [[GEP_DST_1]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll index 3a46944712567..dc52e644742e2 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll @@ -47,8 +47,7 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[IND_END:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i32 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i32 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i32 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -169,8 +168,7 @@ define i32 @select_icmp_var_start_iv_trunc(i32 %N, i32 %start) #0 { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll index b83d3af3a0d65..a3b7392dd280f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll @@ -53,8 +53,7 @@ define void @add_i8(ptr noalias nocapture noundef writeonly %A, ptr nocapture no ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[ITERATIONS]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -163,8 +162,7 @@ define void @add_i16(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[ITERATIONS]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -273,8 +271,7 @@ define void @add_i32(ptr noalias nocapture noundef writeonly %A, ptr nocapture n ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[ITERATIONS]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[ITERATIONS]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -466,8 +463,7 @@ define void @trip_count_based_on_ptrtoint(i64 %x) "target-cpu"="apple-m1" { ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[N_VEC]], 4 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[TMP12]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll index 87b8c4af1e0c7..307d4c43198af 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll @@ -135,8 +135,7 @@ define void @test_widen_induction(ptr %A, i64 %N) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -221,8 +220,7 @@ define void @test_widen_induction_variable_start(ptr %A, i64 %N, i64 %start) { ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END5:%.*]] = add i64 [[START]], [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -305,8 +303,7 @@ define void @test_widen_induction_step_2(ptr %A, i64 %N, i32 %step) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[IND_END4]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[IND_END4]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[IND_END4]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll b/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll new file mode 100644 index 0000000000000..cb4e99332c04b --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/epilogue-vectorization-fix-scalar-resume-values.ll @@ -0,0 +1,75 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt -passes=loop-vectorize -S %s | FileCheck %s + +; This tests exercises the out-of-band fixing-up of scalar resume values. + +target triple = "aarch64" + +define void @epilogue_vectorization_fix_scalar_resume_values(ptr %dst, i64 %n) { +; CHECK-LABEL: define void @epilogue_vectorization_fix_scalar_resume_values( +; CHECK-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ITER_CHECK:.*]]: +; CHECK-NEXT: [[REM:%.*]] = urem i64 [[N]], 3 +; CHECK-NEXT: br i1 true, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-NEXT: br i1 true, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[REM]], 32 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[REM]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP0]], align 1 +; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP1]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[REM]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX1]] +; CHECK-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP3]], align 1 +; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N3:%.*]] = icmp eq i64 [[REM]], 0 +; CHECK-NEXT: br i1 [[CMP_N3]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_DST_IV:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store i8 0, ptr [[GEP_DST_IV]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[REM]] +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %rem = urem i64 %n, 3 + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.dst.iv = getelementptr i8, ptr %dst, i64 %iv + store i8 0, ptr %gep.dst.iv, align 1 + %iv.next = add i64 %iv, 1 + %exit.cond = icmp eq i64 %iv.next, %rem + br i1 %exit.cond, label %exit, label %loop + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll index 91ec9da11928c..35d7e2cc8c586 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll @@ -58,8 +58,7 @@ define double @fp128_fmuladd_reduction(ptr %start0, ptr %start1, ptr %end0, ptr ; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[START0]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 8 ; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[START1]], i64 [[TMP7]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll index 890ff1dc05e4f..4bb8a0e72acb7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll @@ -69,20 +69,7 @@ define i32 @test_phi_iterator_invalidation(ptr %A, ptr noalias %B) { ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[SCALAR_RECUR]] to i32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV_NEXT]] -; CHECK-NEXT: [[FOR_NEXT]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV_NEXT]] -; CHECK-NEXT: store i32 [[SEXT]], ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1001 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll index 32fdc5cd6fc4f..56a1abd2384c8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll @@ -113,3 +113,49 @@ loop: exit: ret float %max.next } + +define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { +; CHECK-LABEL: define float @test_fmax_and_fmin( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) +; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[SUB]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ] + %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ] + %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv + %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv + %l.0 = load float, ptr %gep.src.0, align 4 + %l.1 = load float, ptr %gep.src.1, align 4 + %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0) + %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1) + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %sub = fsub float %max.next, %min.next + ret float %sub +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll index 939eaaa34c514..bfee39eac0ae2 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll @@ -18,21 +18,8 @@ define double @test_reduction_costs() { ; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_1:.*]] -; CHECK: [[LOOP_1]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_1]] ] -; CHECK-NEXT: [[R_1:%.*]] = phi double [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[R_1_NEXT:%.*]], %[[LOOP_1]] ] -; CHECK-NEXT: [[R_2:%.*]] = phi double [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[R_2_NEXT:%.*]], %[[LOOP_1]] ] -; CHECK-NEXT: [[R_1_NEXT]] = fadd double [[R_1]], 3.000000e+00 -; CHECK-NEXT: [[R_2_NEXT]] = fadd double [[R_2]], 9.000000e+00 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_1]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[R_1_NEXT_LCSSA:%.*]] = phi double [ [[R_1_NEXT]], %[[LOOP_1]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[R_2_NEXT_LCSSA:%.*]] = phi double [ [[R_2_NEXT]], %[[LOOP_1]] ], [ [[TMP1]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[DIV:%.*]] = fmul double [[R_1_NEXT_LCSSA]], [[R_2_NEXT_LCSSA]] +; CHECK-NEXT: [[DIV:%.*]] = fmul double [[TMP0]], [[TMP1]] ; CHECK-NEXT: ret double [[DIV]] ; entry: @@ -84,8 +71,7 @@ define void @test_iv_cost(ptr %ptr.start, i8 %a, i64 %b) { ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[IND_END:%.*]] = sub i64 [[START]], [[N_VEC]] ; CHECK-NEXT: [[IND_END2:%.*]] = getelementptr i8, ptr [[PTR_START]], i64 [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[START]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF4:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll index cc7b4aecc3642..fd6e275d098ca 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll @@ -68,10 +68,7 @@ define void @iv_casts(ptr %dst, ptr %src, i32 %x, i64 %N) #0 { ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; DEFAULT: [[VEC_EPILOG_ITER_CHECK]]: -; DEFAULT-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; DEFAULT-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64() -; DEFAULT-NEXT: [[TMP32:%.*]] = shl nuw i64 [[TMP31]], 2 -; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP32]] +; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP2]] ; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; DEFAULT: [[VEC_EPILOG_PH]]: ; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -812,7 +809,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) { ; PRED-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 1 ; PRED-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 ; PRED-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]] -; PRED-NEXT: br i1 [[TMP5]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; PRED-NEXT: br i1 [[TMP5]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; PRED: [[VECTOR_PH]]: ; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[UMAX1]], 1 ; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll index a74c33f26e58a..42a1940925968 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs.ll @@ -169,22 +169,9 @@ define i64 @int_and_pointer_iv(ptr %start, i32 %N) { ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i64> [[TMP5]], i32 2 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[RECUR_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[PTR_IV]], align 4 -; CHECK-NEXT: [[RECUR_NEXT]] = zext i32 [[L]] to i64 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 4 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RECUR_LCSSA:%.*]] = phi i64 [ [[SCALAR_RECUR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RECUR_LCSSA]] +; CHECK-NEXT: ret i64 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll index be8c8cd6480e4..93e71af74f4ac 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll @@ -41,9 +41,7 @@ define i64 @vector_loop_with_remaining_iterations(ptr %src, ptr noalias %dst, i3 ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP11]]) ; CHECK-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP15:%.*]] = shl nuw i64 [[TMP14]], 1 -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 1, [[TMP15]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 1, [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -174,9 +172,7 @@ define i64 @main_vector_loop_fixed_with_no_remaining_iterations(ptr %src, ptr no ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> [[TMP11]]) ; CHECK-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP15:%.*]] = shl nuw i64 [[TMP14]], 1 -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 1, [[TMP15]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 1, [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll index 9bd3d309c0ad9..9b4151f30d640 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-load-store.ll @@ -69,8 +69,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-4-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; INTERLEAVE-4-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-4: vec.epilog.iter.check: -; INTERLEAVE-4-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; INTERLEAVE-4-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; INTERLEAVE-4-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; INTERLEAVE-4-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; INTERLEAVE-4: vec.epilog.ph: ; INTERLEAVE-4-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -158,8 +157,7 @@ define void @interleave_single_load_store(ptr %src, ptr %dst, i64 %N, i8 %a, i8 ; INTERLEAVE-2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; INTERLEAVE-2-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-2: vec.epilog.iter.check: -; INTERLEAVE-2-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; INTERLEAVE-2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; INTERLEAVE-2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; INTERLEAVE-2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; INTERLEAVE-2: vec.epilog.ph: ; INTERLEAVE-2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll index a869cf647b5ce..aa94763b44a30 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleaving-reduction.ll @@ -51,8 +51,7 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-4-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; INTERLEAVE-4-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-4: vec.epilog.iter.check: -; INTERLEAVE-4-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; INTERLEAVE-4-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; INTERLEAVE-4-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; INTERLEAVE-4-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; INTERLEAVE-4: vec.epilog.ph: ; INTERLEAVE-4-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -184,8 +183,7 @@ define i32 @interleave_integer_reduction(ptr %src, i64 %N) { ; INTERLEAVE-4-VLA-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; INTERLEAVE-4-VLA-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; INTERLEAVE-4-VLA: vec.epilog.iter.check: -; INTERLEAVE-4-VLA-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; INTERLEAVE-4-VLA-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; INTERLEAVE-4-VLA-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; INTERLEAVE-4-VLA-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; INTERLEAVE-4-VLA: vec.epilog.ph: ; INTERLEAVE-4-VLA-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll index 58ebc7ce1f8f4..ee3a4a04566c9 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll @@ -57,8 +57,7 @@ define void @saddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[IND_END10:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IND_END13:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[TMP7]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -180,8 +179,7 @@ define void @umin(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[IND_END7:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST6]] ; CHECK-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC]] ; CHECK-NEXT: [[IND_END12:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll index f1571e67e5849..d80fdd1ce7270 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll @@ -51,22 +51,8 @@ define i32 @test_invariant_replicate_region(i32 %x, i1 %c) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[REM_1:%.*]] = urem i32 10, [[X]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ 0, %[[LOOP_HEADER]] ], [ [[REM_1]], %[[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 99 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], %[[LOOP_LATCH]] ], [ [[TMP17]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RES_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP17]] ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll b/llvm/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll index 5066a9b8337bd..bd33af286b05d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll @@ -35,8 +35,7 @@ define void @add_a(ptr noalias nocapture readonly %p, ptr noalias nocapture %q, ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -132,8 +131,7 @@ define void @add_a1(ptr noalias nocapture readonly %p, ptr noalias nocapture %q, ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -294,8 +292,7 @@ define void @add_c(ptr noalias nocapture readonly %p, ptr noalias nocapture %q, ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -472,8 +469,7 @@ define void @add_e(ptr noalias nocapture readonly %p, ptr noalias nocapture %q, ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -612,8 +608,7 @@ define void @add_f(ptr noalias nocapture readonly %p, ptr noalias nocapture %q, ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll index 9b62525370210..e424649cf50c6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll @@ -88,8 +88,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS1-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK-VS1: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-VS1-NEXT: [[IND_END4:%.*]] = add i64 [[TMP0]], [[N_VEC]] -; CHECK-VS1-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; CHECK-VS1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-VS1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-VS1-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK-VS1: [[VEC_EPILOG_PH]]: ; CHECK-VS1-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -182,8 +181,7 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef ; CHECK-VS2-NEXT: br i1 [[CMP_N]], label %[[WHILE_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK-VS2: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-VS2-NEXT: [[IND_END4:%.*]] = add i64 [[TMP0]], [[N_VEC]] -; CHECK-VS2-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; CHECK-VS2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-VS2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-VS2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK-VS2: [[VEC_EPILOG_PH]]: ; CHECK-VS2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -476,19 +474,8 @@ define i32 @tc4(ptr noundef readonly captures(none) %tmp) vscale_range(1,16) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_0179:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD]] = add i32 [[SUM_0179]], [[TMP5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; entry: br label %for.body @@ -522,6 +509,7 @@ define i32 @tc4_from_profile(ptr noundef readonly captures(none) %tmp, i64 %N) v ; CHECK-NEXT: [[ADD]] = add i32 [[SUM_0179]], [[TMP0]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]], !prof [[PROF9:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[ADD_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll index b157a2818e676..157b78704234a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll @@ -109,36 +109,35 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = insertelement <2 x double> poison, double [[TMP9]], i32 0 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[TMP9]], i32 1 ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = fcmp ogt <2 x double> [[TMP12]], zeroinitializer -; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP14]], <2 x double> zeroinitializer, <2 x double> splat (double 1.000000e+00) ; TFA_INTERLEAVE-NEXT: [[PREDPHI3:%.*]] = select <2 x i1> [[TMP14]], <2 x double> zeroinitializer, <2 x double> splat (double 1.000000e+00) ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; TFA_INTERLEAVE: pred.store.if: -; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 0 +; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 0 ; TFA_INTERLEAVE-NEXT: store double [[TMP20]], ptr [[P:%.*]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE]] ; TFA_INTERLEAVE: pred.store.continue: ; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP29]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]] -; TFA_INTERLEAVE: pred.store.if4: -; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 1 +; TFA_INTERLEAVE: pred.store.if3: +; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 1 ; TFA_INTERLEAVE-NEXT: store double [[TMP22]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE5]] -; TFA_INTERLEAVE: pred.store.continue5: +; TFA_INTERLEAVE: pred.store.continue4: ; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK2]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP31]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] -; TFA_INTERLEAVE: pred.store.if6: +; TFA_INTERLEAVE: pred.store.if5: ; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 0 ; TFA_INTERLEAVE-NEXT: store double [[TMP32]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE7]] -; TFA_INTERLEAVE: pred.store.continue7: +; TFA_INTERLEAVE: pred.store.continue6: ; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK2]], i32 1 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]] -; TFA_INTERLEAVE: pred.store.if8: +; TFA_INTERLEAVE: pred.store.if7: ; TFA_INTERLEAVE-NEXT: [[TMP34:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 1 ; TFA_INTERLEAVE-NEXT: store double [[TMP34]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE9]] -; TFA_INTERLEAVE: pred.store.continue9: +; TFA_INTERLEAVE: pred.store.continue8: ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP3]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll index fd02300232a84..d8a81f9316e4b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -51,11 +51,10 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 { ; TFCOMMON-NEXT: [[ENTRY:.*]]: ; TFCOMMON-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFCOMMON-NEXT: br label %[[VECTOR_BODY:.*]] ; TFCOMMON: [[VECTOR_BODY]]: ; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] ; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFCOMMON-NEXT: [[TMP6:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) @@ -76,12 +75,11 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 { ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1 -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFA_INTERLEAVE: [[VECTOR_BODY]]: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() @@ -179,11 +177,10 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFCOMMON-NEXT: [[ENTRY:.*]]: ; TFCOMMON-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFCOMMON-NEXT: br label %[[VECTOR_BODY:.*]] ; TFCOMMON: [[VECTOR_BODY]]: ; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFCOMMON-NEXT: [[TMP6:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], splat (i64 50) @@ -207,12 +204,11 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1 -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFA_INTERLEAVE: [[VECTOR_BODY]]: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() @@ -332,11 +328,10 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFCOMMON-NEXT: [[ENTRY:.*]]: ; TFCOMMON-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFCOMMON-NEXT: br label %[[VECTOR_BODY:.*]] ; TFCOMMON: [[VECTOR_BODY]]: ; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFCOMMON-NEXT: [[TMP6:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], splat (i64 50) @@ -363,12 +358,11 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1 -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFA_INTERLEAVE: [[VECTOR_BODY]]: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() @@ -608,11 +602,10 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFALWAYS-NEXT: [[ENTRY:.*]]: ; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFALWAYS-NEXT: br label %[[VECTOR_BODY:.*]] ; TFALWAYS: [[VECTOR_BODY]]: ; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] ; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFALWAYS-NEXT: [[TMP6:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) @@ -631,11 +624,10 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFFALLBACK-NEXT: [[ENTRY:.*]]: ; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFFALLBACK-NEXT: br label %[[VECTOR_BODY:.*]] ; TFFALLBACK: [[VECTOR_BODY]]: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFFALLBACK-NEXT: [[TMP6:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) @@ -656,12 +648,11 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1 -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFA_INTERLEAVE: [[VECTOR_BODY]]: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() @@ -763,13 +754,12 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub ; TFALWAYS-NEXT: [[ENTRY:.*]]: ; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFALWAYS-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M]], i64 0 ; TFALWAYS-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFALWAYS-NEXT: br label %[[VECTOR_BODY:.*]] ; TFALWAYS: [[VECTOR_BODY]]: ; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] ; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) @@ -793,13 +783,12 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub ; TFFALLBACK-NEXT: [[ENTRY:.*]]: ; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFFALLBACK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M]], i64 0 ; TFFALLBACK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFFALLBACK-NEXT: br label %[[VECTOR_BODY:.*]] ; TFFALLBACK: [[VECTOR_BODY]]: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]], poison) @@ -825,14 +814,13 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1 -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M]], i64 0 ; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]] ; TFA_INTERLEAVE: [[VECTOR_BODY]]: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP24:%.*]], %[[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] @@ -972,13 +960,11 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = load double, ptr [[P2]], align 8 ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7:[0-9]+]] ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = fcmp ogt double [[TMP6]], 0.000000e+00 -; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP8]], double 0.000000e+00, double 1.000000e+00 ; TFA_INTERLEAVE-NEXT: [[PREDPHI3:%.*]] = select i1 [[TMP8]], double 0.000000e+00, double 1.000000e+00 -; TFA_INTERLEAVE-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[ACTIVE_LANE_MASK2]], double [[PREDPHI3]], double [[PREDPHI]] ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = or i1 [[ACTIVE_LANE_MASK]], [[ACTIVE_LANE_MASK2]] ; TFA_INTERLEAVE-NEXT: br i1 [[TMP14]], label %[[BB8:.*]], label %[[TMP9]] ; TFA_INTERLEAVE: [[BB8]]: -; TFA_INTERLEAVE-NEXT: store double [[SPEC_SELECT]], ptr [[P]], align 8 +; TFA_INTERLEAVE-NEXT: store double [[PREDPHI3]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label %[[TMP9]] ; TFA_INTERLEAVE: [[TMP9]]: ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll index 80bf956927c77..9f518e448eb19 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/mul-simplification.ll @@ -62,18 +62,8 @@ define i32 @add_reduction_select_operand_constant_but_non_uniform() { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD2_REASS:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 42, %[[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ADD2_REASS]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[RDX_NEXT]] = add i32 0, [[RDX]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD2_REASS]], 64 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP3]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll index 544ef5c82c7ac..a6e0f8a2a1c3a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll @@ -32,14 +32,7 @@ define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincos_f32( @@ -112,14 +105,7 @@ define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincos_f64( @@ -209,15 +195,6 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly ; CHECK-ARMPL: [[TMP17:%.*]] = extractvalue { , } [[TMP15]], 1 ; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0( [[TMP16]], ptr [[TMP19:%.*]], i32 4, [[TMP14:%.*]]) ; CHECK-ARMPL: call void @llvm.masked.store.nxv4f32.p0( [[TMP17]], ptr [[TMP21:%.*]], i32 4, [[TMP14]]) -; CHECK-ARMPL: [[MIDDLE_BLOCK:.*:]] -; CHECK-ARMPL: [[SCALAR_PH:.*:]] -; CHECK-ARMPL: [[FOR_BODY:.*:]] -; CHECK-ARMPL: [[IF_THEN:.*:]] -; CHECK-ARMPL: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK-ARMPL: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK-ARMPL: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK-ARMPL: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK-ARMPL: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK-ARMPL: [[IF_MERGE:.*:]] ; CHECK-ARMPL: [[FOR_END:.*:]] ; @@ -277,14 +254,7 @@ define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @modf_f32( @@ -357,14 +327,7 @@ define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @modf_f64( @@ -441,14 +404,7 @@ define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincospi_f32( @@ -521,14 +477,7 @@ define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; ; CHECK-ARMPL-LABEL: define void @sincospi_f64( diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll index ff3f6e906e82c..56ace5497b996 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll @@ -30,17 +30,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; DEFAULT-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -59,17 +48,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; OPTSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; OPTSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -88,17 +66,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; MINSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; MINSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -390,23 +357,6 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) ; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8 -; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]] -; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1 -; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP72]], 2 -; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -531,23 +481,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; DEFAULT-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 -; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] -; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 -; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2 -; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]] -; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; DEFAULT-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -598,23 +531,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; OPTSIZE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] -; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 -; OPTSIZE-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; OPTSIZE-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; OPTSIZE-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2 -; OPTSIZE-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; OPTSIZE-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]] -; OPTSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; OPTSIZE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -665,23 +581,6 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; MINSIZE-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 -; MINSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] -; MINSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 -; MINSIZE-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; MINSIZE-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; MINSIZE-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP26]], 2 -; MINSIZE-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; MINSIZE-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IV]] -; MINSIZE-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; MINSIZE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -746,23 +645,6 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -789,23 +671,6 @@ define void @dont_vectorize_with_minsize() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -832,23 +697,6 @@ define void @dont_vectorize_with_minsize() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -913,23 +761,6 @@ define void @vectorization_forced_minsize_reduce_width() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -956,23 +787,6 @@ define void @vectorization_forced_minsize_reduce_width() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -999,23 +813,6 @@ define void @vectorization_forced_minsize_reduce_width() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll index f4784b6259ce1..229209e98af78 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll @@ -1381,8 +1381,8 @@ for.body: ; preds = %for.body.preheader, br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !loop !1 } -define i32 @red_extended_add_chain(ptr %start, ptr %end, i32 %offset) { -; CHECK-NEON-LABEL: define i32 @red_extended_add_chain( +define i32 @red_extended_add_incomplete_chain(ptr %start, ptr %end, i32 %offset) { +; CHECK-NEON-LABEL: define i32 @red_extended_add_incomplete_chain( ; CHECK-NEON-SAME: ptr [[START:%.*]], ptr [[END:%.*]], i32 [[OFFSET:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-NEON-NEXT: entry: ; CHECK-NEON-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 @@ -1404,7 +1404,7 @@ define i32 @red_extended_add_chain(ptr %start, ptr %end, i32 %offset) { ; CHECK-NEON-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] ; CHECK-NEON-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 ; CHECK-NEON-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <16 x i32> @llvm.vector.partial.reduce.add.v16i32.v16i32(<16 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) +; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] ; CHECK-NEON-NEXT: [[TMP4]] = add <16 x i32> [[PARTIAL_REDUCE]], [[BROADCAST_SPLAT]] ; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEON-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] @@ -1415,7 +1415,7 @@ define i32 @red_extended_add_chain(ptr %start, ptr %end, i32 %offset) { ; CHECK-NEON-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-NEON: scalar.ph: ; -; CHECK-SVE-LABEL: define i32 @red_extended_add_chain( +; CHECK-SVE-LABEL: define i32 @red_extended_add_incomplete_chain( ; CHECK-SVE-SAME: ptr [[START:%.*]], ptr [[END:%.*]], i32 [[OFFSET:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-SVE-NEXT: entry: ; CHECK-SVE-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 @@ -1452,7 +1452,7 @@ define i32 @red_extended_add_chain(ptr %start, ptr %end, i32 %offset) { ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: ; -; CHECK-SVE-MAXBW-LABEL: define i32 @red_extended_add_chain( +; CHECK-SVE-MAXBW-LABEL: define i32 @red_extended_add_incomplete_chain( ; CHECK-SVE-MAXBW-SAME: ptr [[START:%.*]], ptr [[END:%.*]], i32 [[OFFSET:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-SVE-MAXBW-NEXT: entry: ; CHECK-SVE-MAXBW-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 @@ -1478,7 +1478,7 @@ define i32 @red_extended_add_chain(ptr %start, ptr %end, i32 %offset) { ; CHECK-SVE-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] ; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP]], align 1 ; CHECK-SVE-MAXBW-NEXT: [[TMP7:%.*]] = zext [[WIDE_LOAD]] to -; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call @llvm.vector.partial.reduce.add.nxv8i32.nxv8i32( [[VEC_PHI]], [[TMP7]]) +; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = add [[VEC_PHI]], [[TMP7]] ; CHECK-SVE-MAXBW-NEXT: [[TMP8]] = add [[PARTIAL_REDUCE]], [[BROADCAST_SPLAT]] ; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-SVE-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll new file mode 100644 index 0000000000000..b033f6051f812 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll @@ -0,0 +1,469 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt --mattr=+neon,+dotprod -passes=loop-vectorize -force-vector-interleave=1 -enable-epilogue-vectorization=false -S %s | FileCheck %s + +target triple = "arm64-apple-macosx" + +define i32 @red_zext_mul_by_63(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_zext_mul_by_63( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 63) +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 63 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = zext i8 %l to i32 + %mul = mul i32 %l.ext, 63 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red.next +} + +define i32 @red_zext_mul_by_255(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_zext_mul_by_255( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 255) +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 255 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = zext i8 %l to i32 + %mul = mul i32 %l.ext, 255 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red.next +} + +define i32 @red_zext_mul_by_256(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_zext_mul_by_256( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 256) +; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 256 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = zext i8 %l to i32 + %mul = mul i32 %l.ext, 256 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red.next +} + +define i32 @red_sext_mul_by_63(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_sext_mul_by_63( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 63) +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = sext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 63 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr_iv = phi ptr [ %start, %entry ], [ %gep_iv_next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red_next, %loop ] + %l = load i8, ptr %ptr_iv, align 1 + %l_ext = sext i8 %l to i32 + %mul = mul i32 %l_ext, 63 + %red_next = add i32 %red, %mul + %gep_iv_next = getelementptr i8, ptr %ptr_iv, i64 1 + %ec = icmp eq ptr %ptr_iv, %end + br i1 %ec, label %exit, label %loop + +exit: + %red_next_lcssa = phi i32 [ %red_next, %loop ] + ret i32 %red_next_lcssa +} + +; Constants >= 128 cannot be treated as sign-extended. +define i32 @red_sext_mul_by_128(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_sext_mul_by_128( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 128) +; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = sext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 128 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr_iv = phi ptr [ %start, %entry ], [ %gep_iv_next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red_next, %loop ] + %l = load i8, ptr %ptr_iv, align 1 + %l_ext = sext i8 %l to i32 + %mul = mul i32 %l_ext, 128 + %red_next = add i32 %red, %mul + %gep_iv_next = getelementptr i8, ptr %ptr_iv, i64 1 + %ec = icmp eq ptr %ptr_iv, %end + br i1 %ec, label %exit, label %loop + +exit: + %red_next_lcssa = phi i32 [ %red_next, %loop ] + ret i32 %red_next_lcssa +} + +define i32 @red_sext_mul_by_255(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_sext_mul_by_255( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 255) +; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = sext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 255 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = sext i8 %l to i32 + %mul = mul i32 %l.ext, 255 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red.next +} + +define i32 @red_sext_mul_by_256(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @red_sext_mul_by_256( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], splat (i32 256) +; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[TMP4]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 +; CHECK-NEXT: [[L_EXT:%.*]] = sext i8 [[L]] to i32 +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[L_EXT]], 256 +; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[MUL]] +; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] + %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] + %l = load i8, ptr %ptr.iv, align 1 + %l.ext = sext i8 %l to i32 + %mul = mul i32 %l.ext, 256 + %red.next = add i32 %red, %mul + %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 + %ec = icmp eq ptr %ptr.iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red.next +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index 766b60a79520b..dd239c023c686 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -28,7 +28,8 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -80,15 +81,14 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END6:%.*]] = add i64 [[IDX_NEG]], [[IV_NEXT]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[IV_NEXT]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT]], [[WHILE_BODY]] ], [ 0, [[ENTRY]] ] @@ -112,7 +112,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[TMP13]] = add <4 x i32> [[TMP14]], [[VEC_PHI9]] ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX9]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]]) ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]] @@ -136,7 +136,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[CMP_IV_NEG:%.*]] = icmp ugt i64 [[IV_NEG]], 0 ; CHECK-NEXT: [[CMP_IV:%.*]] = icmp ne i64 [[ACCUM1]], -1 ; CHECK-NEXT: [[EXITCOND:%.*]] = and i1 [[CMP_IV_NEG]], [[CMP_IV]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: while.end.loopexit: ; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ [[ADD]], [[WHILE_BODY1]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret void @@ -495,11 +495,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret i32 [[TMP182]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index 43fccdc5a0706..49e9989b65d2f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -261,7 +261,8 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret i32 [[TMP13]] ; ; CHECK-NOI8MM-LABEL: define i32 @sudot_neon( ; CHECK-NOI8MM-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1:[0-9]+]] { @@ -296,7 +297,8 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP13]], [[TMP12]] ; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-NOI8MM: scalar.ph: +; CHECK-NOI8MM: for.exit: +; CHECK-NOI8MM-NEXT: ret i32 [[TMP15]] ; entry: br label %for.body @@ -349,12 +351,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret i32 [[TMP13]] ; ; CHECK-NOI8MM-LABEL: define i32 @usdot_neon( ; CHECK-NOI8MM-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1]] { @@ -384,12 +387,13 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 { ; CHECK-NOI8MM-NEXT: [[TMP13]] = add <16 x i32> [[TMP11]], [[VEC_PHI1]] ; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NOI8MM-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NOI8MM-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-NOI8MM: middle.block: ; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP13]], [[TMP12]] ; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-NOI8MM: scalar.ph: +; CHECK-NOI8MM: for.exit: +; CHECK-NOI8MM-NEXT: ret i32 [[TMP15]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll index 410993b4f4776..801eb810d8625 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll @@ -30,7 +30,8 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP9]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -65,7 +66,8 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP14]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -90,7 +92,8 @@ define i32 @dotp(ptr %a, ptr %b) { ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP9]] ; entry: br label %for.body @@ -196,11 +199,12 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP71]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_different_types( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -354,12 +358,13 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP139:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP139]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP139]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]] ; CHECK-INTERLEAVED-NEXT: [[TMP140:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP140]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_different_types( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -442,11 +447,12 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP71]] ; entry: br label %for.body @@ -491,11 +497,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP11]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_loop_carried( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -517,11 +524,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_loop_carried( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -543,11 +551,12 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = add <16 x i32> [[TMP7]], [[TMP8]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP9]], i32 15 ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -594,11 +603,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) { ; CHECK-INTERLEAVE1-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4 ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP12]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_not_phi( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { @@ -622,11 +632,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) { ; CHECK-INTERLEAVED-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4 ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP12]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_not_phi( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { @@ -650,11 +661,12 @@ define i32 @not_dotp_not_phi(ptr %a, ptr noalias %b, ptr noalias %c) { ; CHECK-MAXBW-NEXT: store <16 x i32> [[TMP8]], ptr [[TMP13]], align 4 ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = extractelement <16 x i32> [[TMP7]], i32 15 ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -733,7 +745,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) @@ -831,7 +843,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP50]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE10]], [[PARTIAL_REDUCE13]] ; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) @@ -897,7 +909,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) ; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) @@ -1292,11 +1304,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-INTERLEAVE1-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP182]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1627,11 +1640,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-INTERLEAVED-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP182]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp_predicated( ; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1962,11 +1976,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-MAXBW-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP182]] ; entry: br label %for.body @@ -2010,12 +2025,14 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15 ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: [[RESULT:%.*]] = add i32 [[TMP10]], [[TMP11]] +; CHECK-INTERLEAVE1-NEXT: ret i32 [[RESULT]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_extend_user( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -2045,13 +2062,15 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add <16 x i32> [[TMP12]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = extractelement <16 x i32> [[TMP10]], i32 15 ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: [[RESULT:%.*]] = add i32 [[TMP16]], [[TMP17]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_extend_user( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -2072,12 +2091,14 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) { ; CHECK-MAXBW-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]]) ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15 ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: [[RESULT:%.*]] = add i32 [[TMP10]], [[TMP11]] +; CHECK-MAXBW-NEXT: ret i32 [[RESULT]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index 09917fcab80c4..6e11e559151da 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -501,7 +501,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP71]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @not_dotp_different_types( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -660,7 +661,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]] ; CHECK-INTERLEAVED-NEXT: [[TMP142:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP142]] ; ; CHECK-MAXBW-LABEL: define i32 @not_dotp_different_types( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -747,7 +749,8 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP138]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP71]] ; entry: br label %for.body @@ -800,7 +803,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add [[TMP16]], [[TMP17]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 @@ -848,7 +851,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add [[TMP25]], [[TMP26]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nuw i32 [[TMP29]], 8 @@ -890,7 +893,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add [[TMP25]], [[TMP26]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 @@ -949,7 +952,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add [[TMP16]], [[TMP15]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8 @@ -987,7 +990,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add [[TMP30]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nuw i32 [[TMP27]], 8 @@ -1019,7 +1022,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add [[TMP20]], [[TMP19]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8 @@ -1108,7 +1111,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add [[TMP40]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP41]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP35]]) @@ -1226,7 +1229,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add [[TMP79]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP81]], [[TMP80]] ; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) @@ -1296,7 +1299,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32( [[VEC_PHI4]], [[TMP73]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32( [[PARTIAL_REDUCE16]]) ; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32( [[PARTIAL_REDUCE17]]) @@ -1393,11 +1396,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP22]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_predicated( ; CHECK-INTERLEAVED-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1430,11 +1434,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP22]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp_predicated( ; CHECK-MAXBW-SAME: i64 [[N:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -1467,11 +1472,12 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = xor i1 [[TMP19]], true -; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP21]] ; entry: br label %for.body @@ -1519,7 +1525,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add [[TMP13]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP14]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1566,7 +1572,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add [[TMP22]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP24]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[BIN_RDX]]) @@ -1601,7 +1607,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP24]] = add [[TMP22]], [[VEC_PHI1]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP24]]) ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1660,7 +1666,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add [[VEC_PHI]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP15]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1707,7 +1713,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add [[VEC_PHI1]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP25]], [[TMP24]] ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) @@ -1742,7 +1748,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP14]] = add [[VEC_PHI]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64( [[TMP14]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1860,7 +1866,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add i32 [[TMP21]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP23]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -1972,7 +1978,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2010,7 +2016,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2047,7 +2053,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64( [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2105,7 +2111,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2143,7 +2149,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2180,7 +2186,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64( [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2247,7 +2253,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add [[TMP17]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP18]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2301,7 +2307,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add [[TMP26]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add [[TMP28]], [[TMP27]] ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[BIN_RDX]]) @@ -2343,7 +2349,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-MAXBW-NEXT: [[TMP20]] = add [[TMP17]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64( [[TMP20]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2465,7 +2471,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]]) @@ -2565,7 +2571,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) @@ -2665,7 +2671,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index 1ef5b208a7b32..db3166cc0ec8d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -499,7 +499,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP10]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP13]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @zext_add_reduc_i8_i32_predicated( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]]) #[[ATTR0]] { @@ -527,7 +528,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP10]]) ; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]] ; ; CHECK-MAXBW-LABEL: define i32 @zext_add_reduc_i8_i32_predicated( ; CHECK-MAXBW-SAME: ptr [[A:%.*]]) #[[ATTR0]] { @@ -555,7 +557,8 @@ define i32 @zext_add_reduc_i8_i32_predicated(ptr %a) #0 { ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK-MAXBW: scalar.ph: +; CHECK-MAXBW: for.exit: +; CHECK-MAXBW-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -674,7 +677,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = sub <16 x i32> [[VEC_PHI]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] @@ -700,7 +703,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-INTERLEAVED-NEXT: [[TMP7]] = sub <16 x i32> [[VEC_PHI1]], [[TMP5]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -726,7 +729,7 @@ define i32 @zext_sub_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 { ; CHECK-MAXBW-NEXT: [[TMP10]] = sub [[VEC_PHI]], [[TMP9]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32( [[TMP10]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] @@ -768,7 +771,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i32> [[TMP3]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] @@ -794,7 +797,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[TMP5]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -820,7 +823,7 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP9]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] @@ -871,7 +874,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <16 x i32> [[VEC_PHI]], [[BROADCAST_SPLAT]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] @@ -906,7 +909,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI2]], [[BROADCAST_SPLAT]] ; CHECK-INTERLEAVED-NEXT: [[TMP21]] = add nuw i32 [[VEC_PHI1]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP21]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -942,7 +945,7 @@ define i32 @add_of_zext_outside_loop(i32 %a, ptr noalias %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add [[VEC_PHI]], [[BROADCAST_SPLAT]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv16i32( [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] @@ -993,7 +996,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP6]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] @@ -1028,7 +1031,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add <16 x i32> [[VEC_PHI2]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP8]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -1064,7 +1067,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32( [[VEC_PHI]], [[TMP9]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PARTIAL_REDUCE]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll index c4feabe960a67..edf7e280d7416 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll @@ -50,22 +50,9 @@ define i32 @pr70988(ptr %src, i32 %n) { ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP17]], i32 [[TMP18]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDUC]] -; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 -; CHECK-NEXT: [[TMP24]] = tail call i32 @llvm.smax.i32(i32 [[TMP23]], i32 [[MAX]]) -; CHECK-NEXT: [[INDUC_NEXT]] = add nuw nsw i64 [[INDUC]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDUC_NEXT]], [[UMAX]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[TMP24]], [[LOOP]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; entry: %1 = and i32 %n, 15 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll index efce4bdf712a0..1dcd665817196 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll @@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux" ; Test case from https://github.com/llvm/llvm-project/issues/148431. define void @test_predicated_load_cast_hint(ptr %dst.1, ptr %dst.2, ptr %src, i8 %n, i64 %off) #0 { ; CHECK-LABEL: define void @test_predicated_load_cast_hint( -; CHECK-SAME: ptr [[DST_1:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]], i8 [[N:%.*]], i64 [[OFF:%.*]]) { +; CHECK-SAME: ptr [[DST_1:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]], i8 [[N:%.*]], i64 [[OFF:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[N_EXT:%.*]] = sext i8 [[N]] to i32 ; CHECK-NEXT: [[N_SUB:%.*]] = add i32 [[N_EXT]], -15 @@ -66,205 +66,64 @@ define void @test_predicated_load_cast_hint(ptr %dst.1, ptr %dst.2, ptr %src, i8 ; CHECK-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT14]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX15]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[TMP2]], 15 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 16 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[TMP2]], 1 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 [[TMP2]]) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE50:.*]] ] -; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[INDEX]] to i8 -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i8 [[DOTCAST]], 4 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <16 x i32> poison, i32 [[INDEX]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT18:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT17]], <16 x i32> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[VEC_IV:%.*]] = add <16 x i32> [[BROADCAST_SPLAT18]], -; CHECK-NEXT: [[TMP25:%.*]] = icmp ule <16 x i32> [[VEC_IV]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP26:%.*]] = load i8, ptr [[SRC]], align 1, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT19:%.*]] = insertelement <16 x i8> poison, i8 [[TMP26]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT20:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT19]], <16 x i8> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[TMP27:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT20]] to <16 x i64> -; CHECK-NEXT: [[TMP28:%.*]] = extractelement <16 x i1> [[TMP25]], i32 0 -; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE22:.*]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE22]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i8> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE22]] ] +; CHECK-NEXT: [[TMP28:%.*]] = load i8, ptr [[SRC]], align 1, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i8> poison, i8 [[TMP28]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i8> [[BROADCAST_SPLATINSERT]], <4 x i8> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP25:%.*]] = zext <4 x i8> [[BROADCAST_SPLAT]] to <4 x i64> +; CHECK-NEXT: [[TMP26:%.*]] = zext <4 x i8> [[VEC_IND]] to <4 x i64> +; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0 +; CHECK-NEXT: br i1 [[TMP27]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] ; CHECK: [[PRED_STORE_IF]]: -; CHECK-NEXT: [[TMP29:%.*]] = add i8 [[OFFSET_IDX]], 0 -; CHECK-NEXT: [[TMP30:%.*]] = zext i8 [[TMP29]] to i64 -; CHECK-NEXT: [[TMP31:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP30]], i64 [[OFF]] -; CHECK-NEXT: [[TMP32:%.*]] = extractelement <16 x i64> [[TMP27]], i32 0 -; CHECK-NEXT: [[TMP33:%.*]] = or i64 [[TMP32]], 1 -; CHECK-NEXT: store i64 [[TMP33]], ptr [[TMP31]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] -; CHECK: [[PRED_STORE_CONTINUE]]: -; CHECK-NEXT: [[TMP34:%.*]] = extractelement <16 x i1> [[TMP25]], i32 1 -; CHECK-NEXT: br i1 [[TMP34]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] -; CHECK: [[PRED_STORE_IF21]]: -; CHECK-NEXT: [[TMP35:%.*]] = add i8 [[OFFSET_IDX]], 4 -; CHECK-NEXT: [[TMP36:%.*]] = zext i8 [[TMP35]] to i64 -; CHECK-NEXT: [[TMP37:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP36]], i64 [[OFF]] -; CHECK-NEXT: [[TMP38:%.*]] = extractelement <16 x i64> [[TMP27]], i32 1 -; CHECK-NEXT: [[TMP39:%.*]] = or i64 [[TMP38]], 1 -; CHECK-NEXT: store i64 [[TMP39]], ptr [[TMP37]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] -; CHECK: [[PRED_STORE_CONTINUE22]]: -; CHECK-NEXT: [[TMP40:%.*]] = extractelement <16 x i1> [[TMP25]], i32 2 -; CHECK-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] -; CHECK: [[PRED_STORE_IF23]]: -; CHECK-NEXT: [[TMP41:%.*]] = add i8 [[OFFSET_IDX]], 8 -; CHECK-NEXT: [[TMP42:%.*]] = zext i8 [[TMP41]] to i64 -; CHECK-NEXT: [[TMP43:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP42]], i64 [[OFF]] -; CHECK-NEXT: [[TMP44:%.*]] = extractelement <16 x i64> [[TMP27]], i32 2 -; CHECK-NEXT: [[TMP45:%.*]] = or i64 [[TMP44]], 1 -; CHECK-NEXT: store i64 [[TMP45]], ptr [[TMP43]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE24]] -; CHECK: [[PRED_STORE_CONTINUE24]]: -; CHECK-NEXT: [[TMP46:%.*]] = extractelement <16 x i1> [[TMP25]], i32 3 -; CHECK-NEXT: br i1 [[TMP46]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]] -; CHECK: [[PRED_STORE_IF25]]: -; CHECK-NEXT: [[TMP47:%.*]] = add i8 [[OFFSET_IDX]], 12 -; CHECK-NEXT: [[TMP48:%.*]] = zext i8 [[TMP47]] to i64 -; CHECK-NEXT: [[TMP49:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP48]], i64 [[OFF]] -; CHECK-NEXT: [[TMP50:%.*]] = extractelement <16 x i64> [[TMP27]], i32 3 -; CHECK-NEXT: [[TMP51:%.*]] = or i64 [[TMP50]], 1 -; CHECK-NEXT: store i64 [[TMP51]], ptr [[TMP49]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE26]] -; CHECK: [[PRED_STORE_CONTINUE26]]: -; CHECK-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP25]], i32 4 -; CHECK-NEXT: br i1 [[TMP52]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]] -; CHECK: [[PRED_STORE_IF27]]: -; CHECK-NEXT: [[TMP53:%.*]] = add i8 [[OFFSET_IDX]], 16 -; CHECK-NEXT: [[TMP54:%.*]] = zext i8 [[TMP53]] to i64 -; CHECK-NEXT: [[TMP55:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP54]], i64 [[OFF]] -; CHECK-NEXT: [[TMP56:%.*]] = extractelement <16 x i64> [[TMP27]], i32 4 -; CHECK-NEXT: [[TMP57:%.*]] = or i64 [[TMP56]], 1 -; CHECK-NEXT: store i64 [[TMP57]], ptr [[TMP55]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] -; CHECK: [[PRED_STORE_CONTINUE28]]: -; CHECK-NEXT: [[TMP58:%.*]] = extractelement <16 x i1> [[TMP25]], i32 5 -; CHECK-NEXT: br i1 [[TMP58]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30:.*]] -; CHECK: [[PRED_STORE_IF29]]: -; CHECK-NEXT: [[TMP59:%.*]] = add i8 [[OFFSET_IDX]], 20 -; CHECK-NEXT: [[TMP60:%.*]] = zext i8 [[TMP59]] to i64 -; CHECK-NEXT: [[TMP61:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP60]], i64 [[OFF]] -; CHECK-NEXT: [[TMP62:%.*]] = extractelement <16 x i64> [[TMP27]], i32 5 -; CHECK-NEXT: [[TMP63:%.*]] = or i64 [[TMP62]], 1 -; CHECK-NEXT: store i64 [[TMP63]], ptr [[TMP61]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE30]] -; CHECK: [[PRED_STORE_CONTINUE30]]: -; CHECK-NEXT: [[TMP64:%.*]] = extractelement <16 x i1> [[TMP25]], i32 6 -; CHECK-NEXT: br i1 [[TMP64]], label %[[PRED_STORE_IF31:.*]], label %[[PRED_STORE_CONTINUE32:.*]] -; CHECK: [[PRED_STORE_IF31]]: -; CHECK-NEXT: [[TMP65:%.*]] = add i8 [[OFFSET_IDX]], 24 -; CHECK-NEXT: [[TMP66:%.*]] = zext i8 [[TMP65]] to i64 -; CHECK-NEXT: [[TMP67:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP66]], i64 [[OFF]] -; CHECK-NEXT: [[TMP68:%.*]] = extractelement <16 x i64> [[TMP27]], i32 6 -; CHECK-NEXT: [[TMP69:%.*]] = or i64 [[TMP68]], 1 -; CHECK-NEXT: store i64 [[TMP69]], ptr [[TMP67]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE32]] -; CHECK: [[PRED_STORE_CONTINUE32]]: -; CHECK-NEXT: [[TMP70:%.*]] = extractelement <16 x i1> [[TMP25]], i32 7 -; CHECK-NEXT: br i1 [[TMP70]], label %[[PRED_STORE_IF33:.*]], label %[[PRED_STORE_CONTINUE34:.*]] -; CHECK: [[PRED_STORE_IF33]]: -; CHECK-NEXT: [[TMP71:%.*]] = add i8 [[OFFSET_IDX]], 28 -; CHECK-NEXT: [[TMP72:%.*]] = zext i8 [[TMP71]] to i64 -; CHECK-NEXT: [[TMP73:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP72]], i64 [[OFF]] -; CHECK-NEXT: [[TMP74:%.*]] = extractelement <16 x i64> [[TMP27]], i32 7 -; CHECK-NEXT: [[TMP75:%.*]] = or i64 [[TMP74]], 1 -; CHECK-NEXT: store i64 [[TMP75]], ptr [[TMP73]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE34]] -; CHECK: [[PRED_STORE_CONTINUE34]]: -; CHECK-NEXT: [[TMP76:%.*]] = extractelement <16 x i1> [[TMP25]], i32 8 -; CHECK-NEXT: br i1 [[TMP76]], label %[[PRED_STORE_IF35:.*]], label %[[PRED_STORE_CONTINUE36:.*]] -; CHECK: [[PRED_STORE_IF35]]: -; CHECK-NEXT: [[TMP77:%.*]] = add i8 [[OFFSET_IDX]], 32 -; CHECK-NEXT: [[TMP78:%.*]] = zext i8 [[TMP77]] to i64 -; CHECK-NEXT: [[TMP79:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP78]], i64 [[OFF]] -; CHECK-NEXT: [[TMP80:%.*]] = extractelement <16 x i64> [[TMP27]], i32 8 -; CHECK-NEXT: [[TMP81:%.*]] = or i64 [[TMP80]], 1 -; CHECK-NEXT: store i64 [[TMP81]], ptr [[TMP79]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE36]] -; CHECK: [[PRED_STORE_CONTINUE36]]: -; CHECK-NEXT: [[TMP82:%.*]] = extractelement <16 x i1> [[TMP25]], i32 9 -; CHECK-NEXT: br i1 [[TMP82]], label %[[PRED_STORE_IF37:.*]], label %[[PRED_STORE_CONTINUE38:.*]] -; CHECK: [[PRED_STORE_IF37]]: -; CHECK-NEXT: [[TMP83:%.*]] = add i8 [[OFFSET_IDX]], 36 -; CHECK-NEXT: [[TMP84:%.*]] = zext i8 [[TMP83]] to i64 -; CHECK-NEXT: [[TMP85:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP84]], i64 [[OFF]] -; CHECK-NEXT: [[TMP86:%.*]] = extractelement <16 x i64> [[TMP27]], i32 9 -; CHECK-NEXT: [[TMP87:%.*]] = or i64 [[TMP86]], 1 -; CHECK-NEXT: store i64 [[TMP87]], ptr [[TMP85]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE38]] -; CHECK: [[PRED_STORE_CONTINUE38]]: -; CHECK-NEXT: [[TMP88:%.*]] = extractelement <16 x i1> [[TMP25]], i32 10 -; CHECK-NEXT: br i1 [[TMP88]], label %[[PRED_STORE_IF39:.*]], label %[[PRED_STORE_CONTINUE40:.*]] -; CHECK: [[PRED_STORE_IF39]]: -; CHECK-NEXT: [[TMP89:%.*]] = add i8 [[OFFSET_IDX]], 40 -; CHECK-NEXT: [[TMP90:%.*]] = zext i8 [[TMP89]] to i64 -; CHECK-NEXT: [[TMP91:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP90]], i64 [[OFF]] -; CHECK-NEXT: [[TMP92:%.*]] = extractelement <16 x i64> [[TMP27]], i32 10 -; CHECK-NEXT: [[TMP93:%.*]] = or i64 [[TMP92]], 1 -; CHECK-NEXT: store i64 [[TMP93]], ptr [[TMP91]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE40]] -; CHECK: [[PRED_STORE_CONTINUE40]]: -; CHECK-NEXT: [[TMP94:%.*]] = extractelement <16 x i1> [[TMP25]], i32 11 -; CHECK-NEXT: br i1 [[TMP94]], label %[[PRED_STORE_IF41:.*]], label %[[PRED_STORE_CONTINUE42:.*]] -; CHECK: [[PRED_STORE_IF41]]: -; CHECK-NEXT: [[TMP95:%.*]] = add i8 [[OFFSET_IDX]], 44 -; CHECK-NEXT: [[TMP96:%.*]] = zext i8 [[TMP95]] to i64 -; CHECK-NEXT: [[TMP97:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP96]], i64 [[OFF]] -; CHECK-NEXT: [[TMP98:%.*]] = extractelement <16 x i64> [[TMP27]], i32 11 -; CHECK-NEXT: [[TMP99:%.*]] = or i64 [[TMP98]], 1 -; CHECK-NEXT: store i64 [[TMP99]], ptr [[TMP97]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE42]] -; CHECK: [[PRED_STORE_CONTINUE42]]: -; CHECK-NEXT: [[TMP100:%.*]] = extractelement <16 x i1> [[TMP25]], i32 12 -; CHECK-NEXT: br i1 [[TMP100]], label %[[PRED_STORE_IF43:.*]], label %[[PRED_STORE_CONTINUE44:.*]] -; CHECK: [[PRED_STORE_IF43]]: -; CHECK-NEXT: [[TMP101:%.*]] = add i8 [[OFFSET_IDX]], 48 -; CHECK-NEXT: [[TMP102:%.*]] = zext i8 [[TMP101]] to i64 +; CHECK-NEXT: [[TMP102:%.*]] = extractelement <4 x i64> [[TMP26]], i32 0 ; CHECK-NEXT: [[TMP103:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP102]], i64 [[OFF]] -; CHECK-NEXT: [[TMP104:%.*]] = extractelement <16 x i64> [[TMP27]], i32 12 +; CHECK-NEXT: [[TMP104:%.*]] = extractelement <4 x i64> [[TMP25]], i32 0 ; CHECK-NEXT: [[TMP105:%.*]] = or i64 [[TMP104]], 1 ; CHECK-NEXT: store i64 [[TMP105]], ptr [[TMP103]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE44]] -; CHECK: [[PRED_STORE_CONTINUE44]]: -; CHECK-NEXT: [[TMP106:%.*]] = extractelement <16 x i1> [[TMP25]], i32 13 -; CHECK-NEXT: br i1 [[TMP106]], label %[[PRED_STORE_IF45:.*]], label %[[PRED_STORE_CONTINUE46:.*]] -; CHECK: [[PRED_STORE_IF45]]: -; CHECK-NEXT: [[TMP107:%.*]] = add i8 [[OFFSET_IDX]], 52 -; CHECK-NEXT: [[TMP108:%.*]] = zext i8 [[TMP107]] to i64 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: [[TMP32:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1 +; CHECK-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18:.*]] +; CHECK: [[PRED_STORE_IF17]]: +; CHECK-NEXT: [[TMP108:%.*]] = extractelement <4 x i64> [[TMP26]], i32 1 ; CHECK-NEXT: [[TMP109:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP108]], i64 [[OFF]] -; CHECK-NEXT: [[TMP110:%.*]] = extractelement <16 x i64> [[TMP27]], i32 13 +; CHECK-NEXT: [[TMP110:%.*]] = extractelement <4 x i64> [[TMP25]], i32 1 ; CHECK-NEXT: [[TMP111:%.*]] = or i64 [[TMP110]], 1 ; CHECK-NEXT: store i64 [[TMP111]], ptr [[TMP109]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE46]] -; CHECK: [[PRED_STORE_CONTINUE46]]: -; CHECK-NEXT: [[TMP112:%.*]] = extractelement <16 x i1> [[TMP25]], i32 14 -; CHECK-NEXT: br i1 [[TMP112]], label %[[PRED_STORE_IF47:.*]], label %[[PRED_STORE_CONTINUE48:.*]] -; CHECK: [[PRED_STORE_IF47]]: -; CHECK-NEXT: [[TMP113:%.*]] = add i8 [[OFFSET_IDX]], 56 -; CHECK-NEXT: [[TMP114:%.*]] = zext i8 [[TMP113]] to i64 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE18]] +; CHECK: [[PRED_STORE_CONTINUE18]]: +; CHECK-NEXT: [[TMP37:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2 +; CHECK-NEXT: br i1 [[TMP37]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]] +; CHECK: [[PRED_STORE_IF19]]: +; CHECK-NEXT: [[TMP114:%.*]] = extractelement <4 x i64> [[TMP26]], i32 2 ; CHECK-NEXT: [[TMP115:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP114]], i64 [[OFF]] -; CHECK-NEXT: [[TMP116:%.*]] = extractelement <16 x i64> [[TMP27]], i32 14 +; CHECK-NEXT: [[TMP116:%.*]] = extractelement <4 x i64> [[TMP25]], i32 2 ; CHECK-NEXT: [[TMP117:%.*]] = or i64 [[TMP116]], 1 ; CHECK-NEXT: store i64 [[TMP117]], ptr [[TMP115]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE48]] -; CHECK: [[PRED_STORE_CONTINUE48]]: -; CHECK-NEXT: [[TMP118:%.*]] = extractelement <16 x i1> [[TMP25]], i32 15 -; CHECK-NEXT: br i1 [[TMP118]], label %[[PRED_STORE_IF49:.*]], label %[[PRED_STORE_CONTINUE50]] -; CHECK: [[PRED_STORE_IF49]]: -; CHECK-NEXT: [[TMP119:%.*]] = add i8 [[OFFSET_IDX]], 60 -; CHECK-NEXT: [[TMP120:%.*]] = zext i8 [[TMP119]] to i64 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE20]] +; CHECK: [[PRED_STORE_CONTINUE20]]: +; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3 +; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_IF21]]: +; CHECK-NEXT: [[TMP120:%.*]] = extractelement <4 x i64> [[TMP26]], i32 3 ; CHECK-NEXT: [[TMP121:%.*]] = getelementptr [16 x i64], ptr [[DST_1]], i64 [[TMP120]], i64 [[OFF]] -; CHECK-NEXT: [[TMP122:%.*]] = extractelement <16 x i64> [[TMP27]], i32 15 +; CHECK-NEXT: [[TMP122:%.*]] = extractelement <4 x i64> [[TMP25]], i32 3 ; CHECK-NEXT: [[TMP123:%.*]] = or i64 [[TMP122]], 1 ; CHECK-NEXT: store i64 [[TMP123]], ptr [[TMP121]], align 8, !alias.scope [[META3]] -; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE50]] -; CHECK: [[PRED_STORE_CONTINUE50]]: +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_CONTINUE22]]: ; CHECK-NEXT: store i8 0, ptr [[DST_2]], align 1, !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 -; CHECK-NEXT: [[TMP124:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP124]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX_NEXT]], i32 [[TMP2]]) +; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-NEXT: [[TMP48:%.*]] = xor i1 [[TMP47]], true +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i8> [[VEC_IND]], splat (i8 16) +; CHECK-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[SCALAR_PH]]: @@ -309,6 +168,219 @@ exit: ret void } +; Check computing costs for sdiv/udiv with invariant divisor and tail folding. +; From https://github.com/llvm/llvm-project/issues/160354. +define void @srem_sdiv_with_tail_folding(i32 %d.0, i32 %d.1, ptr %dst, i32 %end) #0 { +; CHECK-LABEL: define void @srem_sdiv_with_tail_folding( +; CHECK-SAME: i32 [[D_0:%.*]], i32 [[D_1:%.*]], ptr [[DST:%.*]], i32 [[END:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV_SUB:%.*]] = add nsw i32 [[IV]], -1 +; CHECK-NEXT: [[REM:%.*]] = srem i32 [[IV_SUB]], [[D_0]] +; CHECK-NEXT: [[REM_1:%.*]] = add nsw i32 [[REM]], 1 +; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[REM_1]], [[D_0]] +; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[IV_SUB]], [[D_1]] +; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[DIV]], 1 +; CHECK-NEXT: [[ADD_1_EXT:%.*]] = sext i32 [[ADD_1]] to i64 +; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[ADD_1_EXT]] +; CHECK-NEXT: store i32 [[IV]], ptr [[GEP_DST]], align 4 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp ne i32 [[IV_NEXT]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.header + +loop.header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ] + %iv.sub = add nsw i32 %iv, -1 + %rem = srem i32 %iv.sub, %d.0 + %rem.1 = add nsw i32 %rem, 1 + %c = icmp eq i32 %rem.1, %d.0 + br i1 %c, label %then, label %loop.latch + +then: + %div = sdiv i32 %iv.sub, %d.1 + %add.1 = add i32 %div, 1 + %add.1.ext = sext i32 %add.1 to i64 + %gep.dst = getelementptr i32, ptr %dst, i64 %add.1.ext + store i32 %iv, ptr %gep.dst, align 4 + br label %loop.latch + +loop.latch: + %iv.next = add nuw nsw i32 %iv, 1 + %ec = icmp ne i32 %iv.next, %end + br i1 %ec, label %loop.header, label %exit + +exit: + ret void +} + +; Check computing costs for predicated sdiv/udiv with invariant divisor without tail folding. +; From https://github.com/llvm/llvm-project/issues/160356. +define void @srem_sdiv_without_tail_folding(i32 %d.0, i32 %d.1, ptr %dst, i32 %end) #1 { +; CHECK-LABEL: define void @srem_sdiv_without_tail_folding( +; CHECK-SAME: i32 [[D_0:%.*]], i32 [[D_1:%.*]], ptr [[DST:%.*]], i32 [[END:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[END]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[END]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[END]], [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[D_0]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE12]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[VEC_IND]], splat (i32 -1) +; CHECK-NEXT: [[TMP1:%.*]] = srem <4 x i32> [[TMP0]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP1]], splat (i32 1) +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[TMP2]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i32 0 +; CHECK-NEXT: br i1 [[TMP4]], label %[[PRED_SDIV_IF:.*]], label %[[PRED_SDIV_CONTINUE:.*]] +; CHECK: [[PRED_SDIV_IF]]: +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = sdiv i32 [[TMP5]], [[D_1]] +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP6]], i32 0 +; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE]] +; CHECK: [[PRED_SDIV_CONTINUE]]: +; CHECK-NEXT: [[TMP8:%.*]] = phi <4 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_SDIV_IF]] ] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP3]], i32 1 +; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_SDIV_IF1:.*]], label %[[PRED_SDIV_CONTINUE2:.*]] +; CHECK: [[PRED_SDIV_IF1]]: +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1 +; CHECK-NEXT: [[TMP11:%.*]] = sdiv i32 [[TMP10]], [[D_1]] +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE2]] +; CHECK: [[PRED_SDIV_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <4 x i32> [ [[TMP8]], %[[PRED_SDIV_CONTINUE]] ], [ [[TMP12]], %[[PRED_SDIV_IF1]] ] +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP3]], i32 2 +; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_SDIV_IF3:.*]], label %[[PRED_SDIV_CONTINUE4:.*]] +; CHECK: [[PRED_SDIV_IF3]]: +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2 +; CHECK-NEXT: [[TMP16:%.*]] = sdiv i32 [[TMP15]], [[D_1]] +; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP16]], i32 2 +; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE4]] +; CHECK: [[PRED_SDIV_CONTINUE4]]: +; CHECK-NEXT: [[TMP18:%.*]] = phi <4 x i32> [ [[TMP13]], %[[PRED_SDIV_CONTINUE2]] ], [ [[TMP17]], %[[PRED_SDIV_IF3]] ] +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP3]], i32 3 +; CHECK-NEXT: br i1 [[TMP19]], label %[[PRED_SDIV_IF5:.*]], label %[[PRED_SDIV_CONTINUE6:.*]] +; CHECK: [[PRED_SDIV_IF5]]: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3 +; CHECK-NEXT: [[TMP21:%.*]] = sdiv i32 [[TMP20]], [[D_1]] +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP21]], i32 3 +; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE6]] +; CHECK: [[PRED_SDIV_CONTINUE6]]: +; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP18]], %[[PRED_SDIV_CONTINUE4]] ], [ [[TMP22]], %[[PRED_SDIV_IF5]] ] +; CHECK-NEXT: [[TMP24:%.*]] = add <4 x i32> [[TMP23]], splat (i32 1) +; CHECK-NEXT: [[TMP25:%.*]] = sext <4 x i32> [[TMP24]] to <4 x i64> +; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP3]], i32 0 +; CHECK-NEXT: br i1 [[TMP26]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i64> [[TMP25]], i32 0 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: [[TMP30:%.*]] = extractelement <4 x i1> [[TMP3]], i32 1 +; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; CHECK: [[PRED_STORE_IF7]]: +; CHECK-NEXT: [[TMP31:%.*]] = extractelement <4 x i64> [[TMP25]], i32 1 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP31]] +; CHECK-NEXT: [[TMP33:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; CHECK: [[PRED_STORE_CONTINUE8]]: +; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[TMP3]], i32 2 +; CHECK-NEXT: br i1 [[TMP34]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; CHECK: [[PRED_STORE_IF9]]: +; CHECK-NEXT: [[TMP35:%.*]] = extractelement <4 x i64> [[TMP25]], i32 2 +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP35]] +; CHECK-NEXT: [[TMP37:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: store i32 [[TMP37]], ptr [[TMP36]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; CHECK: [[PRED_STORE_CONTINUE10]]: +; CHECK-NEXT: [[TMP38:%.*]] = extractelement <4 x i1> [[TMP3]], i32 3 +; CHECK-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; CHECK: [[PRED_STORE_IF11]]: +; CHECK-NEXT: [[TMP39:%.*]] = extractelement <4 x i64> [[TMP25]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP39]] +; CHECK-NEXT: [[TMP41:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; CHECK: [[PRED_STORE_CONTINUE12]]: +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP42]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[END]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV_SUB:%.*]] = add nsw i32 [[IV]], -1 +; CHECK-NEXT: [[REM:%.*]] = srem i32 [[IV_SUB]], [[D_0]] +; CHECK-NEXT: [[REM_1:%.*]] = add nsw i32 [[REM]], 1 +; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[REM_1]], [[D_0]] +; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[IV_SUB]], [[D_1]] +; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[DIV]], 1 +; CHECK-NEXT: [[ADD_1_EXT:%.*]] = sext i32 [[ADD_1]] to i64 +; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[ADD_1_EXT]] +; CHECK-NEXT: store i32 [[IV]], ptr [[GEP_DST]], align 4 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp ne i32 [[IV_NEXT]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.header + +loop.header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ] + %iv.sub = add nsw i32 %iv, -1 + %rem = srem i32 %iv.sub, %d.0 + %rem.1 = add nsw i32 %rem, 1 + %c = icmp eq i32 %rem.1, %d.0 + br i1 %c, label %then, label %loop.latch + +then: + %div = sdiv i32 %iv.sub, %d.1 + %add.1 = add i32 %div, 1 + %add.1.ext = sext i32 %add.1 to i64 + %gep.dst = getelementptr i32, ptr %dst, i64 %add.1.ext + store i32 %iv, ptr %gep.dst, align 4 + br label %loop.latch + +loop.latch: + %iv.next = add nuw nsw i32 %iv, 1 + %ec = icmp ne i32 %iv.next, %end + br i1 %ec, label %loop.header, label %exit + +exit: + ret void +} + +attributes #0 = { "target-cpu"="neoverse-v1" } +attributes #1 = { "target-cpu"="neoverse-v2" } + !0 = distinct !{!0, !1, !2, !3} !1 = !{!"llvm.loop.mustprogress"} !2 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} @@ -327,4 +399,6 @@ exit: ; CHECK: [[META10]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META11]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META9]], [[META10]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META10]], [[META11]]} +; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META11]], [[META10]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll index 496bfbb18a106..0f82de629afa9 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll @@ -241,42 +241,8 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED: [[MIDDLE_BLOCK]]: ; PRED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP41]]) ; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH:.*]]: -; PRED-NEXT: br label %[[LOOP:.*]] -; PRED: [[LOOP]]: -; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ] -; PRED-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1 -; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]] -; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4 -; PRED-NEXT: [[OR3:%.*]] = or i32 [[SCALAR_RECUR10]], [[X]] -; PRED-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 -; PRED-NEXT: [[SHR:%.*]] = lshr i32 [[X]], 1 -; PRED-NEXT: [[TMP54:%.*]] = shl i32 [[OR3]], 1 -; PRED-NEXT: [[TMP55:%.*]] = or i32 [[TMP54]], 2 -; PRED-NEXT: [[SHL19:%.*]] = shl i32 [[X]], 1 -; PRED-NEXT: [[TMP56:%.*]] = or i32 [[SHR]], [[SHL19]] -; PRED-NEXT: [[TMP57:%.*]] = or i32 [[TMP56]], [[TMP55]] -; PRED-NEXT: [[TMP58:%.*]] = or i32 [[TMP57]], [[X]] -; PRED-NEXT: [[OR20:%.*]] = or i32 [[Z]], [[X]] -; PRED-NEXT: [[NOT:%.*]] = and i32 [[OR20]], 1 -; PRED-NEXT: [[AND:%.*]] = xor i32 [[NOT]], 1 -; PRED-NEXT: [[IDX_EXT_1:%.*]] = zext i32 [[AND]] to i64 -; PRED-NEXT: [[GEP_2:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_1]] -; PRED-NEXT: [[TMP59:%.*]] = load i32, ptr [[GEP_2]], align 4 -; PRED-NEXT: [[SHR24:%.*]] = lshr i32 [[TMP58]], 1 -; PRED-NEXT: [[IDX_EXT_2:%.*]] = zext i32 [[SHR24]] to i64 -; PRED-NEXT: [[GEP_3:%.*]] = getelementptr i32, ptr [[SRC_2]], i64 [[IDX_EXT_2]] -; PRED-NEXT: [[TMP60:%.*]] = load i32, ptr [[GEP_3]], align 4 -; PRED-NEXT: [[RED_1:%.*]] = or i32 [[TMP59]], [[SUM_RED]] -; PRED-NEXT: [[RED_2]] = or i32 [[RED_1]], [[TMP60]] -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[Y]] -; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; PRED: [[EXIT]]: -; PRED-NEXT: [[RED_2_LCSSA:%.*]] = phi i32 [ [[RED_2]], %[[LOOP]] ], [ [[TMP44]], %[[MIDDLE_BLOCK]] ] -; PRED-NEXT: ret i32 [[RED_2_LCSSA]] +; PRED-NEXT: ret i32 [[TMP44]] ; entry: br label %loop @@ -418,11 +384,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; VSCALEFORTUNING2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; VSCALEFORTUNING2-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; VSCALEFORTUNING2: [[VEC_EPILOG_ITER_CHECK]]: -; VSCALEFORTUNING2-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; VSCALEFORTUNING2-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() -; VSCALEFORTUNING2-NEXT: [[TMP27:%.*]] = shl nuw i64 [[TMP22]], 1 -; VSCALEFORTUNING2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP27]] -; VSCALEFORTUNING2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; VSCALEFORTUNING2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP5]] +; VSCALEFORTUNING2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF5:![0-9]+]] ; VSCALEFORTUNING2: [[VEC_EPILOG_PH]]: ; VSCALEFORTUNING2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; VSCALEFORTUNING2-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ [[TMP18]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -443,7 +406,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; VSCALEFORTUNING2-NEXT: [[TMP24]] = or [[TMP23]], [[VEC_PHI9]] ; VSCALEFORTUNING2-NEXT: [[INDEX_NEXT11]] = add nuw i64 [[IV]], [[TMP20]] ; VSCALEFORTUNING2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC5]] -; VSCALEFORTUNING2-NEXT: br i1 [[TMP25]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; VSCALEFORTUNING2-NEXT: br i1 [[TMP25]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VSCALEFORTUNING2: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; VSCALEFORTUNING2-NEXT: [[TMP26:%.*]] = call i16 @llvm.vector.reduce.or.nxv2i16( [[TMP24]]) ; VSCALEFORTUNING2-NEXT: [[CMP_N12:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]] @@ -461,7 +424,7 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; VSCALEFORTUNING2-NEXT: [[RED_NEXT]] = or i16 [[DIV]], [[RED]] ; VSCALEFORTUNING2-NEXT: [[IV_NEXT]] = add i64 [[IV1]], 1 ; VSCALEFORTUNING2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[N]] -; VSCALEFORTUNING2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; VSCALEFORTUNING2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] ; VSCALEFORTUNING2: [[EXIT]]: ; VSCALEFORTUNING2-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP18]], %[[MIDDLE_BLOCK]] ], [ [[TMP26]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; VSCALEFORTUNING2-NEXT: ret i16 [[RED_NEXT_LCSSA]] @@ -500,21 +463,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; PRED: [[MIDDLE_BLOCK]]: ; PRED-NEXT: [[TMP19:%.*]] = call i16 @llvm.vector.reduce.or.nxv8i16( [[TMP16]]) ; PRED-NEXT: br label %[[EXIT:.*]] -; PRED: [[SCALAR_PH:.*]]: -; PRED-NEXT: br label %[[LOOP:.*]] -; PRED: [[LOOP]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[RED:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; PRED-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; PRED-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]] -; PRED-NEXT: [[RED_NEXT]] = or i16 [[DIV]], [[RED]] -; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; PRED: [[EXIT]]: -; PRED-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i16 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP19]], %[[MIDDLE_BLOCK]] ] -; PRED-NEXT: ret i16 [[RED_NEXT_LCSSA]] +; PRED-NEXT: ret i16 [[TMP19]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll index c15e8d4252fba..ab9b48fb68f6b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll @@ -616,6 +616,45 @@ exit: ret double %red.next } +define i32 @test_ptr_iv_load_used_by_other_load(ptr %start, ptr %end) { +; CHECK-LABEL: define i32 @test_ptr_iv_load_used_by_other_load( +; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ null, %[[ENTRY]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[IV]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[TMP1]], 0 +; CHECK-NEXT: [[C_EXT:%.*]] = zext i1 [[C]] to i32 +; CHECK-NEXT: [[RED_NEXT]] = or i32 [[RED]], [[C_EXT]] +; CHECK-NEXT: [[IV_NEXT]] = getelementptr nusw i8, ptr [[IV]], i64 32 +; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV]], [[END]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RED_LCSSA]] +; +entry: + br label %loop + +loop: ; preds = %loop, %entry + %iv = phi ptr [ %iv.next, %loop ], [ null, %entry ] + %red = phi i32 [ %red.next, %loop ], [ 0, %entry ] + %0 = load ptr, ptr %iv, align 8 + %1 = load i8, ptr %0, align 8 + %c = icmp ne i8 %1, 0 + %c.ext = zext i1 %c to i32 + %red.next = or i32 %red, %c.ext + %iv.next = getelementptr nusw i8, ptr %iv, i64 32 + %ec = icmp eq ptr %iv, %end + br i1 %ec, label %exit, label %loop + +exit: + ret i32 %red +} + attributes #0 = { "target-cpu"="neoverse-512tvb" } !0 = !{!1, !2, i64 0} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll index 885c79048aaf7..5072058ed5b8f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -144,20 +144,8 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP12]], [[SUM_07]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP9]] ; @@ -390,23 +378,11 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT14]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP39]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP40:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP41:%.*]] = xor i1 [[TMP40]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP42]], [[SUM_07]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP30]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[ADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP30]] ; @@ -630,30 +606,12 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-ORDERED-TF-NEXT: [[TMP17:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[A2]], [[SCALAR_PH:%.*]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP19]], [[ADD_PHI2]] -; CHECK-ORDERED-TF-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[OR]] -; CHECK-ORDERED-TF-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDXB2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD2]] = fadd float [[TMP20]], [[ADD_PHI1]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 2 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[ADD1_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: [[ADD2_LCSSA:%.*]] = phi float [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: store float [[ADD1_LCSSA]], ptr [[A]], align 4 -; CHECK-ORDERED-TF-NEXT: store float [[ADD2_LCSSA]], ptr [[ARRAYIDXA]], align 4 +; CHECK-ORDERED-TF-NEXT: store float [[TMP16]], ptr [[A]], align 4 +; CHECK-ORDERED-TF-NEXT: store float [[TMP14]], ptr [[ARRAYIDXA]], align 4 ; CHECK-ORDERED-TF-NEXT: ret void ; @@ -863,28 +821,13 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP7]]) ; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END_LOOPEXIT:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ADD:%.*]] = fadd float [[TMP15]], [[TMP16]] -; CHECK-ORDERED-TF-NEXT: [[RDX]] = fadd float [[RES_014]], [[ADD]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-ORDERED-TF: for.end.loopexit: -; CHECK-ORDERED-TF-NEXT: [[RDX_LCSSA:%.*]] = phi float [ [[RDX]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[RDX_LCSSA]], [[FOR_END_LOOPEXIT]] ] +; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP12]], [[FOR_END_LOOPEXIT]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[RES]] ; @@ -1081,31 +1024,11 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP13:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP15]], 0.000000e+00 -; CHECK-ORDERED-TF-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK-ORDERED-TF: if.then: -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: br label [[FOR_INC]] -; CHECK-ORDERED-TF: for.inc: -; CHECK-ORDERED-TF-NEXT: [[PHI:%.*]] = phi float [ [[TMP16]], [[IF_THEN]] ], [ 3.000000e+00, [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[FADD]] = fadd float [[RES]], [[PHI]] -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[FADD]], [[FOR_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[RDX]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP12]] ; @@ -1245,7 +1168,7 @@ define float @fadd_multiple(ptr noalias nocapture %a, ptr noalias nocapture %b, ; CHECK-ORDERED-TF-NEXT: [[ADD3]] = fadd float [[ADD]], [[TMP1]] ; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-ORDERED-TF: for.end: ; CHECK-ORDERED-TF-NEXT: [[RDX:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: ret float [[RDX]] @@ -1542,25 +1465,11 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = xor i1 [[TMP54]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]]) -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP44]] ; @@ -1852,25 +1761,11 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[ACTIVE_LANE_MASK_NEXT18]] = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP53]], i64 [[TMP6]]) ; CHECK-ORDERED-TF-NEXT: [[TMP54:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-ORDERED-TF-NEXT: [[TMP55:%.*]] = xor i1 [[TMP54]], true -; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-ORDERED-TF-NEXT: br i1 [[TMP55]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-ORDERED-TF: middle.block: ; CHECK-ORDERED-TF-NEXT: br label [[FOR_END:%.*]] -; CHECK-ORDERED-TF: scalar.ph: -; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-ORDERED-TF-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-ORDERED-TF-NEXT: [[MULADD]] = tail call nnan float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[SUM_07]]) -; CHECK-ORDERED-TF-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-ORDERED-TF-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-ORDERED-TF-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-ORDERED-TF: for.end: -; CHECK-ORDERED-TF-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP44]], [[MIDDLE_BLOCK]] ] -; CHECK-ORDERED-TF-NEXT: ret float [[MULADD_LCSSA]] +; CHECK-ORDERED-TF-NEXT: ret float [[TMP44]] ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll index 2941b3677af81..8830ce33aecff 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-struct-return.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 6 ; RUN: opt < %s -mattr=+sve -passes=loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s target triple = "aarch64-unknown-linux-gnu" @@ -5,14 +6,41 @@ target triple = "aarch64-unknown-linux-gnu" ; Tests basic vectorization of scalable homogeneous struct literal returns. define void @struct_return_f32_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @struct_return_f32_widen -; CHECK-SAME: (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) -; CHECK: vector.body: -; CHECK: [[WIDE_CALL:%.*]] = call { , } @scalable_vec_masked_foo( [[WIDE_MASKED_LOAD:%.*]], [[ACTIVE_LANE_MASK:%.*]]) -; CHECK: [[WIDE_A:%.*]] = extractvalue { , } [[WIDE_CALL]], 0 -; CHECK: [[WIDE_B:%.*]] = extractvalue { , } [[WIDE_CALL]], 1 -; CHECK: call void @llvm.masked.store.nxv4f32.p0( [[WIDE_A]], ptr {{%.*}}, i32 4, [[ACTIVE_LANE_MASK]]) -; CHECK: call void @llvm.masked.store.nxv4f32.p0( [[WIDE_B]], ptr {{%.*}}, i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-LABEL: define void @struct_return_f32_widen( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = sub i64 1024, [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 1024, [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1024) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[TMP7]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP8:%.*]] = call { , } @scalable_vec_masked_foo( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { , } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { , } [[TMP8]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0( [[TMP9]], ptr [[TMP11]], i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0( [[TMP10]], ptr [[TMP12]], i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; entry: br label %for.body @@ -36,14 +64,41 @@ exit: } define void @struct_return_f64_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @struct_return_f64_widen -; CHECK-SAME: (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) -; CHECK: vector.body: -; CHECK: [[WIDE_CALL:%.*]] = call { , } @scalable_vec_masked_bar( [[WIDE_MASKED_LOAD:%.*]], [[ACTIVE_LANE_MASK:%.*]]) -; CHECK: [[WIDE_A:%.*]] = extractvalue { , } [[WIDE_CALL]], 0 -; CHECK: [[WIDE_B:%.*]] = extractvalue { , } [[WIDE_CALL]], 1 -; CHECK: call void @llvm.masked.store.nxv2f64.p0( [[WIDE_A]], ptr {{%.*}}, i32 8, [[ACTIVE_LANE_MASK]]) -; CHECK: call void @llvm.masked.store.nxv2f64.p0( [[WIDE_B]], ptr {{%.*}}, i32 8, [[ACTIVE_LANE_MASK]]) +; CHECK-LABEL: define void @struct_return_f64_widen( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = sub i64 1024, [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp ugt i64 1024, [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i64 [[TMP4]], i64 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1024) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP8:%.*]] = call { , } @scalable_vec_masked_bar( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { , } [[TMP8]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { , } [[TMP8]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0( [[TMP9]], ptr [[TMP11]], i32 8, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[OUT_B]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0( [[TMP10]], ptr [[TMP12]], i32 8, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP6]]) +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true +; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; entry: br label %for.body @@ -67,15 +122,59 @@ exit: } define void @struct_return_f32_widen_rt_checks(ptr %in, ptr writeonly %out_a, ptr writeonly %out_b) { -; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks -; CHECK-SAME: (ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]]) -; CHECK: entry: -; CHECK: br label %vector.memcheck -; CHECK: vector.memcheck: -; CHECK: vector.body: -; CHECK: call { , } @scalable_vec_masked_foo( [[WIDE_MASKED_LOAD:%.*]], [[ACTIVE_LANE_MASK:%.*]]) -; CHECK: for.body: -; CHECK: call { float, float } @foo(float [[LOAD:%.*]]) +; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks( +; CHECK-SAME: ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[IN3:%.*]] = ptrtoint ptr [[IN]] to i64 +; CHECK-NEXT: [[OUT_A2:%.*]] = ptrtoint ptr [[OUT_A]] to i64 +; CHECK-NEXT: [[OUT_B1:%.*]] = ptrtoint ptr [[OUT_B]] to i64 +; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[OUT_B1]], [[OUT_A2]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[OUT_A2]], [[IN3]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP5]], [[TMP4]] +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP1]], 4 +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[OUT_B1]], [[IN3]] +; CHECK-NEXT: [[DIFF_CHECK5:%.*]] = icmp ult i64 [[TMP7]], [[TMP6]] +; CHECK-NEXT: [[CONFLICT_RDX6:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK5]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX6]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 2 +; CHECK-NEXT: [[TMP12:%.*]] = sub i64 1024, [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 1024, [[TMP11]] +; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1024) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[TMP15]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP16:%.*]] = call { , } @scalable_vec_masked_foo( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { , } [[TMP16]], 0 +; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { , } [[TMP16]], 1 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0( [[TMP17]], ptr [[TMP19]], i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0( [[TMP18]], ptr [[TMP20]], i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP14]]) +; CHECK-NEXT: [[TMP21:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 +; CHECK-NEXT: [[TMP22:%.*]] = xor i1 [[TMP21]], true +; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH]]: +; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll index 4e989c5d3eca8..3b016f8d0a9ff 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll @@ -129,20 +129,8 @@ define i64 @same_exit_block_pre_inc_use4() { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP8]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -203,21 +191,8 @@ define i64 @loop_contains_safe_call() #1 { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -365,22 +340,8 @@ define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align( ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index f9eb9eb2a5a96..c775b44bd1ba6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -36,8 +36,7 @@ define void @cost_store_i8(ptr %dst) #0 { ; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 101, [[N_VEC]] ; DEFAULT-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; DEFAULT: vec.epilog.iter.check: -; DEFAULT-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 101, [[N_VEC]] -; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; DEFAULT: vec.epilog.ph: ; DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -89,16 +88,7 @@ define void @cost_store_i8(ptr %dst) #0 { ; PRED-NEXT: [[TMP12:%.*]] = xor i1 [[TMP14]], true ; PRED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PRED: middle.block: -; PRED-NEXT: br label [[EXIT:%.*]] -; PRED: scalar.ph: ; PRED-NEXT: br label [[LOOP:%.*]] -; PRED: loop: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] -; PRED-NEXT: store i8 0, ptr [[GEP]], align 1 -; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 100 -; PRED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; PRED: exit: ; PRED-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll index 3f230b7b9c3c4..e084307c0c2ae 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll @@ -490,8 +490,7 @@ define float @fadd_predicated(ptr noalias nocapture %a, i64 %n) { ; CHECK-ORDERED: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> splat (float -0.000000e+00) ; CHECK-ORDERED: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]]) ; CHECK-ORDERED: for.end: -; CHECK-ORDERED: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK-ORDERED: ret float %[[RES_PHI]] +; CHECK-ORDERED: ret float %[[RDX]] ; CHECK-UNORDERED-LABEL: @fadd_predicated ; CHECK-UNORDERED: vector.ph @@ -507,12 +506,8 @@ define float @fadd_predicated(ptr noalias nocapture %a, i64 %n) { ; CHECK-UNORDERED-NOT: call float @llvm.vector.reduce.fadd ; CHECK-UNORDERED: middle.block ; CHECK-UNORDERED: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v2f32(float -0.000000e+00, <2 x float> %[[MASK]]) -; CHECK-UNORDERED: for.body -; CHECK-UNORDERED: %[[LOAD:.*]] = load float, ptr -; CHECK-UNORDERED: %[[FADD2:.*]] = fadd float {{.*}}, %[[LOAD]] ; CHECK-UNORDERED: for.end -; CHECK-UNORDERED: %[[SUM:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK-UNORDERED: ret float %[[SUM]] +; CHECK-UNORDERED: ret float %[[RDX]] ; CHECK-NOT-VECTORIZED-LABEL: @fadd_predicated ; CHECK-NOT-VECTORIZED-NOT: vector.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll index bdbbfdfa97427..9526a848f8eab 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/struct-return-cost.ll @@ -31,10 +31,7 @@ define void @struct_return_widen(ptr noalias %in, ptr noalias writeonly %out_a, ; CHECK: [[VECTOR_BODY:.*:]] ; CHECK: [[TMP2:%.*]] = call { <2 x half>, <2 x half> } @fixed_vec_foo(<2 x half> [[WIDE_LOAD:%.*]]) ; CHECK: [[TMP3:%.*]] = call { <2 x half>, <2 x half> } @fixed_vec_foo(<2 x half> [[WIDE_LOAD1:%.*]]) -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR2:[0-9]+]] ; CHECK: [[EXIT:.*:]] ; entry: @@ -82,12 +79,9 @@ define void @struct_return_replicate(ptr noalias %in, ptr noalias writeonly %out ; CHECK: [[ENTRY:.*:]] ; CHECK: [[VECTOR_PH:.*:]] ; CHECK: [[VECTOR_BODY:.*:]] -; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] -; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR3]] +; CHECK: [[TMP2:%.*]] = tail call { half, half } @foo(half [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] +; CHECK: [[TMP4:%.*]] = tail call { half, half } @foo(half [[TMP3:%.*]]) #[[ATTR2]] ; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] -; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR3]] ; CHECK: [[EXIT:.*:]] ; entry: @@ -162,7 +156,7 @@ define void @struct_return_scalable(ptr noalias %in, ptr noalias writeonly %out_ ; CHECK: [[MIDDLE_BLOCK:.*:]] ; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR3]] +; CHECK: [[CALL:%.*]] = tail call { half, half } @foo(half [[IN_VAL:%.*]]) #[[ATTR2]] ; CHECK: [[EXIT:.*:]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll index 793813e55409e..b78ada07db1b3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll @@ -42,8 +42,7 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll index 44a8eba84a1d0..27779d5ceb0ac 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll @@ -41,8 +41,7 @@ define i64 @int_reduction_add(ptr %a, i64 %N) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll index 4c4c9e57b4ffb..ebc1c1ef1e773 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll @@ -38,8 +38,7 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll index 612aad5c665cd..bbc0e33af8c84 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll @@ -48,9 +48,8 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 1024, [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -60,7 +59,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-NEXT: store <8 x i8> splat (i8 1), ptr [[TMP9]], align 1 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -97,9 +96,8 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: -; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 1024, [[N_VEC]] -; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -109,7 +107,7 @@ define void @main_vf_vscale_x_16(ptr %A) #0 { ; CHECK-VF8-NEXT: store <8 x i8> splat (i8 1), ptr [[TMP9]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -152,7 +150,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-NEXT: store splat (i64 1), ptr [[TMP5]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] @@ -184,14 +182,13 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store splat (i64 1), ptr [[TMP7]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: -; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 1024, [[N_VEC]] -; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] @@ -201,7 +198,7 @@ define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -264,14 +261,13 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-NEXT: store splat (i64 1), ptr [[TMP7]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8 @@ -283,7 +279,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1 ; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] ; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -317,14 +313,13 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store splat (i64 1), ptr [[TMP7]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: -; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8 @@ -336,7 +331,7 @@ define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP9]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8 ; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]] -; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] ; CHECK-VF8-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -387,15 +382,14 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: store zeroinitializer, ptr [[TMP6]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 10000, [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 10000 @@ -406,7 +400,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: store <8 x i8> zeroinitializer, ptr [[NEXT_GEP2]], align 1 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 8 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 10000 -; CHECK-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: @@ -439,15 +433,14 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: store zeroinitializer, ptr [[TMP6]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-VF8-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: ; CHECK-VF8-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] -; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 10000, [[N_VEC]] -; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 -; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-VF8-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[START]], i64 10000 @@ -458,7 +451,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[NEXT_GEP2]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 8 ; CHECK-VF8-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 10000 -; CHECK-VF8-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP9]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -521,14 +514,13 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: store [[TMP13]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF4:%.*]] = urem i64 [[N]], 2 @@ -544,7 +536,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[TMP19]], align 4 ; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 2 ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC5]] ; CHECK-NEXT: br i1 [[CMP_N10]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -584,7 +576,7 @@ define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 1 ; CHECK-VF8-NEXT: store [[TMP11]], ptr [[TMP9]], align 4 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] @@ -614,18 +606,12 @@ exit: } ; Loop with vscale-based trip count vscale x 1024. -; TODO: No epilogue vectorizations should remain when choosing VF = vscale x 4. define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalias %b) vscale_range(1, 16) #0 { ; CHECK-LABEL: @trip_count_vscale_no_epilogue_iterations( -; CHECK-NEXT: iter.check: +; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1024 -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] -; CHECK: vector.main.loop.iter.check: -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 3 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: br label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 @@ -633,7 +619,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 2 @@ -652,32 +638,11 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-NEXT: store [[TMP13]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] -; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] -; CHECK: vec.epilog.ph: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] -; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT7:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX4]] -; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x float>, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX4]] -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x float>, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x float> [[WIDE_LOAD5]], [[WIDE_LOAD6]] -; CHECK-NEXT: store <2 x float> [[TMP20]], ptr [[TMP19]], align 4 -; CHECK-NEXT: [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 2 -; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT7]], [[N]] -; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] -; CHECK: vec.epilog.middle.block: -; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] -; CHECK: vec.epilog.scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] +; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; @@ -712,7 +677,7 @@ define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalia ; CHECK-VF8-NEXT: store [[TMP11]], ptr [[TMP9]], align 4 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] ; CHECK-VF8-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vscale-fixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vscale-fixed.ll index 5742b3ad45749..4706798c525bd 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vscale-fixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vscale-fixed.ll @@ -55,8 +55,7 @@ define void @main_vf_vscale_x_16(ptr %A, i64 %n) #0 { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -110,10 +109,7 @@ define void @main_vf_vscale_x_16(ptr %A, i64 %n) #0 { ; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-EPILOG-PREFER-SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-EPILOG-PREFER-SCALABLE: vec.epilog.iter.check: -; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[TMP12:%.*]] = shl nuw i64 [[TMP11]], 3 -; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP12]] +; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP1]] ; CHECK-EPILOG-PREFER-SCALABLE-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK-EPILOG-PREFER-SCALABLE: vec.epilog.ph: ; CHECK-EPILOG-PREFER-SCALABLE-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll index 33b3629337e8b..3b0bd87587cc0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll @@ -116,7 +116,8 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP19]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 @@ -243,10 +244,11 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP16:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP3:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 @@ -377,10 +379,11 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP18:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT4]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP6:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP18]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard1 to i32 @@ -537,10 +540,11 @@ define dso_local void @masked_strided_factor4(ptr noalias nocapture readonly %p, ; PREDICATED_TAIL_FOLDING-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 [[INDEX]], i32 [[TMP6]]) ; PREDICATED_TAIL_FOLDING-NEXT: [[TMP23:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; PREDICATED_TAIL_FOLDING-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] -; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP8:![0-9]+]] +; PREDICATED_TAIL_FOLDING-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP5:![0-9]+]] ; PREDICATED_TAIL_FOLDING: middle.block: ; PREDICATED_TAIL_FOLDING-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_TAIL_FOLDING: scalar.ph: +; PREDICATED_TAIL_FOLDING: for.end: +; PREDICATED_TAIL_FOLDING-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll index 16acd3f5ccdbd..b8b4fbd3140de 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll @@ -69,7 +69,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll index 069d369a4cdf0..cb2c003872573 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-optsize.ll @@ -29,7 +29,8 @@ define void @trip1025_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapt ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll index 61448bdbbc651..33ee0d6e2ae2f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-overflow-checks.ll @@ -33,7 +33,10 @@ define void @cannot_overflow_i32_induction_var(ptr noalias %dst, ptr readonly %s ; CHECK-NEXT: br i1 [[TMP5]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.cond.cleanup.loopexit: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void ; entry: %cmp6.not = icmp eq i32 %N, 0 @@ -87,10 +90,13 @@ define void @can_overflow_i64_induction_var(ptr noalias %dst, ptr readonly %src, ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP2]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i64 0 -; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.cond.cleanup.loopexit: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void ; entry: %cmp6.not = icmp eq i64 %N, 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll index b725669f78c30..b5544dc3310c9 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll @@ -36,21 +36,9 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP14]]) -; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: while.end.loopexit: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP19]] ; ; CHECK-IN-LOOP-LABEL: @add_reduction_i32( ; CHECK-IN-LOOP-NEXT: entry: @@ -81,21 +69,9 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[TMP19:%.*]] = xor i1 [[TMP18]], true ; CHECK-IN-LOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-IN-LOOP: middle.block: -; CHECK-IN-LOOP-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK-IN-LOOP: scalar.ph: ; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK-IN-LOOP: while.body: -; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]] -; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-IN-LOOP-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-IN-LOOP-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-IN-LOOP: while.end.loopexit: -; CHECK-IN-LOOP-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; CHECK-IN-LOOP-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; CHECK-IN-LOOP-NEXT: ret i32 [[TMP15]] ; entry: br label %while.body @@ -141,23 +117,11 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP17:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]] -; CHECK-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: while.end.loopexit: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP14]] ; ; CHECK-IN-LOOP-LABEL: @add_reduction_f32( ; CHECK-IN-LOOP-NEXT: entry: @@ -185,23 +149,11 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-IN-LOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-IN-LOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-IN-LOOP: middle.block: -; CHECK-IN-LOOP-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK-IN-LOOP: scalar.ph: ; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]] -; CHECK-IN-LOOP: while.body: -; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]] -; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]] -; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; CHECK-IN-LOOP-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; CHECK-IN-LOOP-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-IN-LOOP: while.end.loopexit: -; CHECK-IN-LOOP-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], [[WHILE_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-IN-LOOP-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-IN-LOOP-NEXT: ret float [[TMP14]] ; entry: br label %while.body @@ -251,32 +203,12 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP16:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP16]], true -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP20]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]] -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP26]], 5 -; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[RDX]], [[TMP27]] -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[RES]] = phi i32 [ [[RDX]], [[FOR_BODY]] ], [ [[XOR]], [[IF_THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: -; CHECK-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], [[FOR_INC]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RES_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP25]] ; ; CHECK-IN-LOOP-LABEL: @cond_xor_reduction( ; CHECK-IN-LOOP-NEXT: entry: @@ -308,31 +240,11 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-IN-LOOP-NEXT: [[TMP22:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-IN-LOOP-NEXT: [[TMP23:%.*]] = xor i1 [[TMP22]], true -; CHECK-IN-LOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-IN-LOOP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-IN-LOOP: middle.block: -; CHECK-IN-LOOP-NEXT: br label [[FOR_END:%.*]] -; CHECK-IN-LOOP: scalar.ph: -; CHECK-IN-LOOP-NEXT: br label [[FOR_BODY:%.*]] -; CHECK-IN-LOOP: for.body: -; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] -; CHECK-IN-LOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]] -; CHECK-IN-LOOP-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-IN-LOOP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP24]], 5 -; CHECK-IN-LOOP-NEXT: br i1 [[TOBOOL]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK-IN-LOOP: if.then: -; CHECK-IN-LOOP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-IN-LOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-IN-LOOP-NEXT: [[XOR:%.*]] = xor i32 [[RDX]], [[TMP25]] -; CHECK-IN-LOOP-NEXT: br label [[FOR_INC]] -; CHECK-IN-LOOP: for.inc: -; CHECK-IN-LOOP-NEXT: [[RES]] = phi i32 [ [[RDX]], [[FOR_BODY]] ], [ [[XOR]], [[IF_THEN]] ] -; CHECK-IN-LOOP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-IN-LOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-IN-LOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-IN-LOOP-NEXT: br label [[FOR_INC:%.*]] ; CHECK-IN-LOOP: for.end: -; CHECK-IN-LOOP-NEXT: [[RES_LCSSA:%.*]] = phi i32 [ [[RES]], [[FOR_INC]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; CHECK-IN-LOOP-NEXT: ret i32 [[RES_LCSSA]] +; CHECK-IN-LOOP-NEXT: ret i32 [[TMP19]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll index 18793864531a9..5531b3ca51140 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll @@ -72,7 +72,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -176,10 +177,11 @@ define void @cond_memset(i32 %val, ptr noalias readonly %cond_ptr, ptr noalias % ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT16]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[TMP93]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP66:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP67:%.*]] = xor i1 [[TMP66]], true -; CHECK-NEXT: br i1 [[TMP67]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP67]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll index ec178727ce73b..9ebe79096adc4 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll @@ -33,7 +33,8 @@ define void @simple_memset(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -73,10 +74,11 @@ define void @simple_memset_v4i32(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX1]], i64 [[TMP2]]) ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = xor i1 [[TMP6]], true -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -120,10 +122,11 @@ define void @simple_memcpy(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP12]], true -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -180,10 +183,11 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[TMP21:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP22:%.*]] = xor i1 [[TMP21]], true ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] -; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -231,10 +235,11 @@ define void @simple_gather_scatter(ptr noalias %dst, ptr noalias %src, ptr noali ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP15:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP16:%.*]] = xor i1 [[TMP15]], true -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -284,10 +289,11 @@ define void @uniform_load(ptr noalias %dst, ptr noalias readonly %src, i64 %n) # ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP13:%.*]] = xor i1 [[TMP14]], true -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: @@ -342,10 +348,11 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP17:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP18:%.*]] = xor i1 [[TMP17]], true -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: @@ -403,10 +410,11 @@ define void @uniform_store(ptr noalias %dst, ptr noalias readonly %src, i64 %n) ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP13]], true -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: +; CHECK: for.end: +; CHECK-NEXT: ret void ; entry: @@ -454,10 +462,11 @@ define void @simple_fdiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -509,10 +518,11 @@ define void @simple_idiv(ptr noalias %dst, ptr noalias %src, i64 %n) #0 { ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]]) ; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-NEXT: [[TMP17:%.*]] = xor i1 [[TMP14]], true -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: while.end.loopexit: +; CHECK-NEXT: ret void ; entry: br label %while.body @@ -551,7 +561,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: store [[BROADCAST_SPLAT]], ptr [[TMP7]], align 4 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll index c3ace983fd911..7628b39cf4eb7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll @@ -7,21 +7,20 @@ define void @test_big_little_params(ptr readonly %a, ptr readonly %b, ptr noalia ; CHECK-LABEL: define void @test_big_little_params ; CHECK-SAME: (ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 -; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025) +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i32.p0(ptr [[TMP0]], i32 4, [[ACTIVE_LANE_MASK]], poison) -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP1]], i32 1, [[ACTIVE_LANE_MASK]], poison) -; CHECK-NEXT: [[TMP2:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]], [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] -; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0( [[TMP2]], ptr [[TMP3]], i32 4, [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i32.p0(ptr [[TMP2]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP4:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]], [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0( [[TMP4]], ptr [[TMP5]], i32 4, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; CHECK-NEXT: [[TMP6:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[EXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]] @@ -52,21 +51,20 @@ define void @test_little_big_params(ptr readonly %a, ptr readonly %b, ptr noalia ; CHECK-LABEL: define void @test_little_big_params ; CHECK-SAME: (ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 1 -; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f32.p0(ptr [[TMP0]], i32 4, [[ACTIVE_LANE_MASK]], poison) -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP1]], i32 8, [[ACTIVE_LANE_MASK]], poison) -; CHECK-NEXT: [[TMP2:%.*]] = call @bar_vector( [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]], [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[C]], i64 [[INDEX]] -; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0( [[TMP2]], ptr [[TMP3]], i32 8, [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ splat (i1 true), [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f32.p0(ptr [[TMP2]], i32 4, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 8, [[ACTIVE_LANE_MASK]], poison) +; CHECK-NEXT: [[TMP4:%.*]] = call @bar_vector( [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD1]], [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0( [[TMP4]], ptr [[TMP5]], i32 8, [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; CHECK-NEXT: [[TMP6:%.*]] = extractelement [[ACTIVE_LANE_MASK_NEXT]], i64 0 ; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll index c54511e957ef8..209fa60b260aa 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll @@ -37,9 +37,7 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 % ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP23:%.*]] = shl nuw i64 [[TMP22]], 1 -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP23]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_PH]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll index e7d25a0446a70..742097bdae890 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll @@ -562,7 +562,8 @@ define void @simple_histogram_tailfold(ptr noalias %buckets, ptr readonly %indic ; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK:%.*]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: for.exit: +; CHECK-NEXT: ret void ; entry: br label %for.body @@ -626,7 +627,7 @@ define void @simple_histogram_rtdepcheck(ptr noalias %buckets, ptr %array, ptr % ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] @@ -719,7 +720,7 @@ define void @simple_histogram_64b(ptr noalias %buckets, ptr readonly %indices, i ; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv2p0.i64( [[TMP6]], i64 1, splat (i1 true)) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll index e6ff39bebeda3..6da3c77cd35c1 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/synthesize-mask-for-call.ll @@ -317,19 +317,7 @@ define void @test_v4_v4m(ptr noalias %a, ptr readonly %b) #3 { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -369,19 +357,7 @@ define void @test_v2_v4m(ptr noalias %a, ptr readonly %b) #3 { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2:[0-9]+]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -421,19 +397,7 @@ define void @test_v2_v4(ptr noalias %a, ptr readonly %b) #3 { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll index c44db7db673fe..1607755e624a3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll @@ -71,16 +71,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC]] ; DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA: middle.block: -; DATA-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA: scalar.ph: ; DATA-NEXT: br label [[WHILE_BODY:%.*]] -; DATA: while.body: -; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA: while.end.loopexit: ; DATA-NEXT: ret void ; @@ -115,16 +106,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_NO_LANEMASK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT6]], [[N_VEC]] ; DATA_NO_LANEMASK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_NO_LANEMASK: middle.block: -; DATA_NO_LANEMASK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA_NO_LANEMASK: scalar.ph: ; DATA_NO_LANEMASK-NEXT: br label [[WHILE_BODY:%.*]] -; DATA_NO_LANEMASK: while.body: -; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA_NO_LANEMASK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA_NO_LANEMASK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA_NO_LANEMASK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA_NO_LANEMASK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA_NO_LANEMASK: while.end.loopexit: ; DATA_NO_LANEMASK-NEXT: ret void ; @@ -150,16 +132,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL-NEXT: [[TMP7:%.*]] = xor i1 [[TMP6]], true ; DATA_AND_CONTROL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_AND_CONTROL: middle.block: -; DATA_AND_CONTROL-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA_AND_CONTROL: scalar.ph: ; DATA_AND_CONTROL-NEXT: br label [[WHILE_BODY:%.*]] -; DATA_AND_CONTROL: while.body: -; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA_AND_CONTROL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA_AND_CONTROL-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA_AND_CONTROL-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA_AND_CONTROL-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA_AND_CONTROL: while.end.loopexit: ; DATA_AND_CONTROL-NEXT: ret void ; @@ -190,16 +163,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[TMP12:%.*]] = xor i1 [[TMP15]], true ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; DATA_AND_CONTROL_NO_RT_CHECK: middle.block: -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_END_LOOPEXIT:%.*]] -; DATA_AND_CONTROL_NO_RT_CHECK: scalar.ph: ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_BODY:%.*]] -; DATA_AND_CONTROL_NO_RT_CHECK: while.body: -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[CMP10:%.*]] = icmp ult i64 [[INDEX_NEXT]], [[N]] -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br i1 [[CMP10]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; DATA_AND_CONTROL_NO_RT_CHECK: while.end.loopexit: ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll index a5f47e7275f65..2a19402347e40 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll @@ -212,8 +212,7 @@ define void @test_interleave_store_one_constant(ptr noalias %src, ptr noalias %d ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -366,8 +365,7 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF7]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll index 038330b99b0f5..c26176028626b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll @@ -22,21 +22,6 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) { ; VF2-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF2: [[MIDDLE_BLOCK]]: ; VF2-NEXT: br label %[[EXIT:.*]] -; VF2: [[SCALAR_PH:.*]]: -; VF2-NEXT: br label %[[LOOP:.*]] -; VF2: [[LOOP]]: -; VF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 -; VF2-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] -; VF2-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 -; VF2-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8 -; VF2-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1 -; VF2-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]] -; VF2-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 -; VF2-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8 -; VF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 2 -; VF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF2: [[EXIT]]: ; VF2-NEXT: ret void ; @@ -86,33 +71,18 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) { ; VF4-NEXT: br i1 false, label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]] ; VF4: [[PRED_STORE_IF5]]: ; VF4-NEXT: [[TMP27:%.*]] = shl nsw i64 3, 1 -; VF4-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP27]] -; VF4-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8 -; VF4-NEXT: store i64 [[TMP29]], ptr [[TMP28]], align 8 -; VF4-NEXT: [[TMP30:%.*]] = or disjoint i64 [[TMP27]], 1 -; VF4-NEXT: [[TMP31:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP30]] -; VF4-NEXT: [[TMP32:%.*]] = load i64, ptr [[TMP31]], align 8 -; VF4-NEXT: store i64 [[TMP32]], ptr [[TMP31]], align 8 -; VF4-NEXT: br label %[[PRED_STORE_CONTINUE6]] -; VF4: [[PRED_STORE_CONTINUE6]]: -; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]] -; VF4: [[MIDDLE_BLOCK]]: -; VF4-NEXT: br label %[[EXIT:.*]] -; VF4: [[SCALAR_PH:.*]]: -; VF4-NEXT: br label %[[LOOP:.*]] -; VF4: [[LOOP]]: -; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF4-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 -; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] +; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP27]] ; VF4-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 ; VF4-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8 -; VF4-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1 +; VF4-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[TMP27]], 1 ; VF4-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]] ; VF4-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 ; VF4-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8 -; VF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 2 -; VF4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] +; VF4-NEXT: br label %[[PRED_STORE_CONTINUE6]] +; VF4: [[PRED_STORE_CONTINUE6]]: +; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; VF4: [[MIDDLE_BLOCK]]: +; VF4-NEXT: br label %[[EXIT:.*]] ; VF4: [[EXIT]]: ; VF4-NEXT: ret void ; @@ -237,27 +207,6 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias % ; VF2-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2: [[MIDDLE_BLOCK]]: ; VF2-NEXT: br label %[[EXIT:.*]] -; VF2: [[SCALAR_PH:.*]]: -; VF2-NEXT: br label %[[LOOP:.*]] -; VF2: [[LOOP]]: -; VF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF2-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i64 [[IV]] -; VF2-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i64 [[IV]] -; VF2-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4 -; VF2-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_A_0]], i64 4 -; VF2-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4 -; VF2-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4 -; VF2-NEXT: [[ADD_0:%.*]] = fadd float [[L_A_0]], [[L_B_0]] -; VF2-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_B_0]], i64 4 -; VF2-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4 -; VF2-NEXT: [[ADD_1:%.*]] = fadd float [[L_A_1]], [[L_B_1]] -; VF2-NEXT: [[GEP_RES_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RES]], i64 [[IV]] -; VF2-NEXT: store float [[ADD_0]], ptr [[GEP_RES_0]], align 4 -; VF2-NEXT: [[GEP_RES_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_RES_0]], i64 4 -; VF2-NEXT: store float [[ADD_1]], ptr [[GEP_RES_1]], align 4 -; VF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; VF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF2: [[EXIT]]: ; VF2-NEXT: ret void ; @@ -282,27 +231,6 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias % ; VF4-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF4: [[MIDDLE_BLOCK]]: ; VF4-NEXT: br label %[[EXIT:.*]] -; VF4: [[SCALAR_PH:.*]]: -; VF4-NEXT: br label %[[LOOP:.*]] -; VF4: [[LOOP]]: -; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF4-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i64 [[IV]] -; VF4-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i64 [[IV]] -; VF4-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4 -; VF4-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_A_0]], i64 4 -; VF4-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4 -; VF4-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4 -; VF4-NEXT: [[ADD_0:%.*]] = fadd float [[L_A_0]], [[L_B_0]] -; VF4-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_B_0]], i64 4 -; VF4-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4 -; VF4-NEXT: [[ADD_1:%.*]] = fadd float [[L_A_1]], [[L_B_1]] -; VF4-NEXT: [[GEP_RES_0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RES]], i64 [[IV]] -; VF4-NEXT: store float [[ADD_0]], ptr [[GEP_RES_0]], align 4 -; VF4-NEXT: [[GEP_RES_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_RES_0]], i64 4 -; VF4-NEXT: store float [[ADD_1]], ptr [[GEP_RES_1]], align 4 -; VF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF4-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; VF4-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF4: [[EXIT]]: ; VF4-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll index 46ba7f645a03e..6c36dfb81311b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll @@ -16,7 +16,6 @@ define void @load_store_interleave_group(ptr noalias %data) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -24,7 +23,7 @@ define void @load_store_interleave_group(ptr noalias %data) { ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP0]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 8 ; CHECK-NEXT: store [[WIDE_LOAD]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -66,7 +65,6 @@ define void @test_2xi64_unary_op_load_interleave_group(ptr noalias %data, ptr no ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1111, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1111, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -75,7 +73,7 @@ define void @test_2xi64_unary_op_load_interleave_group(ptr noalias %data, ptr no ; CHECK-NEXT: [[TMP7:%.*]] = load , ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP9:%.*]] = fneg [[TMP7]] ; CHECK-NEXT: store [[TMP9]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll index a044ae8f5d90e..d290f2d4f5bc3 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll @@ -27,21 +27,6 @@ define void @load_store_interleave_group(ptr noalias %data) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 -; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] -; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 -; CHECK-NEXT: store i64 [[L_0]], ptr [[DATA_0]], align 8 -; CHECK-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[MUL_2]], 1 -; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[ADD_1]] -; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 -; CHECK-NEXT: store i64 [[L_1]], ptr [[DATA_1]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -105,25 +90,6 @@ define void @test_2xi64_with_wide_load(ptr noalias %data, ptr noalias %factor) { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[IV]] -; CHECK-NEXT: [[L_FACTOR:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[TMP13:%.*]] = shl nsw i64 [[IV]], 1 -; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP13]] -; CHECK-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 -; CHECK-NEXT: [[MUL_0:%.*]] = mul i64 [[L_FACTOR]], [[L_0]] -; CHECK-NEXT: store i64 [[MUL_0]], ptr [[DATA_0]], align 8 -; CHECK-NEXT: [[TMP14:%.*]] = or disjoint i64 [[TMP13]], 1 -; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP14]] -; CHECK-NEXT: [[L_1:%.*]] = load i64, ptr [[DATA_1]], align 8 -; CHECK-NEXT: [[MUL_1:%.*]] = mul i64 [[L_FACTOR]], [[L_1]] -; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll index edb951946d873..187edb580f8e2 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/type-shrinkage-insertelt.ll @@ -49,23 +49,6 @@ define void @test0(ptr noalias %M3, ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_INC1286_LOOPEXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[IF_THEN1165_US:%.*]] -; CHECK: if.then1165.us: -; CHECK-NEXT: [[INDVARS_IV1783:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1784:%.*]], [[IF_THEN1165_US]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_A:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[CONV1177_US:%.*]] = zext i16 [[L_A]] to i32 -; CHECK-NEXT: [[ADD1178_US:%.*]] = add nsw i32 [[CONV1177_US]], 10 -; CHECK-NEXT: [[CONV1179_US:%.*]] = trunc i32 [[ADD1178_US]] to i16 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8 -; CHECK-NEXT: [[IDXPROM1181_US:%.*]] = ashr exact i64 [[L_B]], 32 -; CHECK-NEXT: [[ARRAYIDX1185_US:%.*]] = getelementptr inbounds i16, ptr [[M3]], i64 [[IDXPROM1181_US]] -; CHECK-NEXT: store i16 [[CONV1179_US]], ptr [[ARRAYIDX1185_US]], align 2 -; CHECK-NEXT: [[INDVARS_IV_NEXT1784]] = add nuw nsw i64 [[INDVARS_IV1783]], 1 -; CHECK-NEXT: [[EXITCOND1785:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1784]], 16 -; CHECK-NEXT: br i1 [[EXITCOND1785]], label [[FOR_INC1286_LOOPEXIT]], label [[IF_THEN1165_US]] ; CHECK: for.inc1286.loopexit: ; CHECK-NEXT: ret void ; @@ -141,24 +124,6 @@ define void @test1(ptr noalias %M3, ptr noalias %A, ptr noalias %B, ptr noalias ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_INC1286_LOOPEXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[IF_THEN1165_US:%.*]] -; CHECK: if.then1165.us: -; CHECK-NEXT: [[INDVARS_IV1783:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT1784:%.*]], [[IF_THEN1165_US]] ] -; CHECK-NEXT: [[FPTR:%.*]] = load i32, ptr [[C]], align 4 -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_A:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[CONV1177_US:%.*]] = zext i16 [[L_A]] to i32 -; CHECK-NEXT: [[ADD1178_US:%.*]] = add nsw i32 [[CONV1177_US]], [[FPTR]] -; CHECK-NEXT: [[CONV1179_US:%.*]] = trunc i32 [[ADD1178_US]] to i16 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV1783]] -; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 8 -; CHECK-NEXT: [[IDXPROM1181_US:%.*]] = ashr exact i64 [[L_B]], 32 -; CHECK-NEXT: [[ARRAYIDX1185_US:%.*]] = getelementptr inbounds i16, ptr [[M3]], i64 [[IDXPROM1181_US]] -; CHECK-NEXT: store i16 [[CONV1179_US]], ptr [[ARRAYIDX1185_US]], align 2 -; CHECK-NEXT: [[INDVARS_IV_NEXT1784]] = add nuw nsw i64 [[INDVARS_IV1783]], 1 -; CHECK-NEXT: [[EXITCOND1785:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1784]], 16 -; CHECK-NEXT: br i1 [[EXITCOND1785]], label [[FOR_INC1286_LOOPEXIT]], label [[IF_THEN1165_US]] ; CHECK: for.inc1286.loopexit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll index 21928ce715007..44b4e5a8c2bc7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-loop-backedge-elimination-epilogue.ll @@ -29,8 +29,7 @@ define void @test_remove_vector_loop_region_epilogue(ptr %dst, i1 %c) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TC]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TC]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll index c8eecd7283f1e..96a25a853f880 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/widen-call-with-intrinsic-or-libfunc.ll @@ -127,7 +127,8 @@ define void @test(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/AMDGPU/buffer-fat-pointer.ll b/llvm/test/Transforms/LoopVectorize/AMDGPU/buffer-fat-pointer.ll index b7a697831e117..26b80967c336a 100644 --- a/llvm/test/Transforms/LoopVectorize/AMDGPU/buffer-fat-pointer.ll +++ b/llvm/test/Transforms/LoopVectorize/AMDGPU/buffer-fat-pointer.ll @@ -7,21 +7,21 @@ define amdgpu_kernel void @_dynamic_pack_simple_dispatch_0_pack_i32(ptr addrspace(1) %.ptr, i64 %v) { ; CHECK-LABEL: define amdgpu_kernel void @_dynamic_pack_simple_dispatch_0_pack_i32( ; CHECK-SAME: ptr addrspace(1) [[DOTPTR:%.*]], i64 [[V:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[_LR_PH5:.*]]: -; CHECK-NEXT: [[DOTRSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[DOTPTR]], i16 0, i32 -2147483648, i32 159744) +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[DOTRSRC:%.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p1(ptr addrspace(1) [[DOTPTR]], i16 0, i64 2147483648, i32 159744) ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(8) [[DOTRSRC]] to ptr addrspace(7) ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[_LR_PH5]] ], [ [[TMP5:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[TMP5:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr addrspace(7) [[TMP1]], i32 0 ; CHECK-NEXT: [[TMP5]] = add i64 [[TMP3]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[TMP3]], [[V]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[__CRIT_EDGE_LOOPEXIT:.*]], label %[[LOOP]] -; CHECK: [[__CRIT_EDGE_LOOPEXIT]]: +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; entry: - %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %.ptr, i16 0, i32 2147483648, i32 159744) + %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) %.ptr, i16 0, i64 2147483648, i32 159744) %fat = addrspacecast ptr addrspace(8) %rsrc to ptr addrspace(7) br label %loop @@ -36,4 +36,4 @@ exit: ; preds = %exit ret void } -declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) readnone, i16, i32, i32) +declare ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p1(ptr addrspace(1) readnone, i16, i64, i32) diff --git a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll index d5d0c14cf2c82..bc9cf4fe93622 100644 --- a/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll +++ b/llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll @@ -23,11 +23,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) { ; GFX9-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; GFX9-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; GFX9: middle.block: -; GFX9-NEXT: br label [[FOR_END:%.*]] -; GFX9: scalar.ph: ; GFX9-NEXT: br label [[FOR_BODY:%.*]] -; GFX9: for.body: -; GFX9-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; GFX9: for.end: ; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP3]], [[TMP2]] ; GFX9-NEXT: [[ADD_LCSSA:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> [[BIN_RDX]]) @@ -52,11 +48,7 @@ define half @vectorize_v2f16_loop(ptr addrspace(1) noalias %s) { ; VI-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; VI-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VI: middle.block: -; VI-NEXT: br label [[FOR_END:%.*]] -; VI: scalar.ph: ; VI-NEXT: br label [[FOR_BODY:%.*]] -; VI: for.body: -; VI-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; VI: for.end: ; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP3]], [[TMP2]] ; VI-NEXT: [[ADD_LCSSA:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH0000, <2 x half> [[BIN_RDX]]) diff --git a/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll b/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll index e83ac2eed2d49..58a24ee7c4677 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/active-lane-mask.ll @@ -36,18 +36,6 @@ define void @f0(ptr noalias %dst, ptr readonly %src, i64 %n) #0 { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[TMP10]], 3 -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i8 [[MUL]], ptr [[ARRAYIDX3]], align 1 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[FOR_END_LOOPEXIT]]: ; CHECK-NEXT: br label %[[FOR_END]] ; CHECK: [[FOR_END]]: @@ -81,7 +69,4 @@ attributes #0 = { nofree norecurse nounwind "target-features"="+armv8.1-m.main,+ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]], [[META5:![0-9]+]]} -; CHECK: [[META4]] = !{!"llvm.loop.vectorize.width", i32 16} -; CHECK: [[META5]] = !{!"llvm.loop.interleave.count", i32 2} ;. diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll index e52d85c51ab76..9a76019ec5f46 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll @@ -25,21 +25,7 @@ define void @test_stride1_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 -; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]] -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]] -; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -212,21 +198,7 @@ define void @test_stride3_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 3 -; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 -; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]] -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]] -; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -273,21 +245,7 @@ define void @test_stride4_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 4 -; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 -; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP8]] -; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[I_023]] -; CHECK-NEXT: store i32 [[ADD7]], ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END]], label [[FOR_BODY]] ; CHECK: end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll index 4cdfcf2c87b97..0a4ed7ff2eb38 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll @@ -22,11 +22,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP2]] ; @@ -75,11 +71,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP8]] ; @@ -126,11 +118,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] @@ -177,11 +165,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -228,11 +212,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -279,11 +259,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -330,11 +306,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP4]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] @@ -381,11 +353,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll index fc7922762a0b4..029d8bd64fe50 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll @@ -34,28 +34,11 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]] -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011]] -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[TMP13]] to i32 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[RES_010]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP10]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[RES_0_LCSSA]] ; entry: @@ -112,28 +95,11 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]] -; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011]] -; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[TMP13]] to i32 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[RES_010]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP10]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[RES_0_LCSSA]] ; entry: @@ -183,25 +149,13 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -245,26 +199,14 @@ define i32 @mul_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = mul nsw i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -308,26 +250,14 @@ define i32 @and_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ -1, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = and i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ -1, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -371,26 +301,14 @@ define i32 @or_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = or i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -434,26 +352,14 @@ define i32 @xor_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP3]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = xor i32 [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -497,26 +403,14 @@ define float @fadd_f32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP3]], <4 x float> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret float [[R_0_LCSSA]] ; entry: @@ -560,26 +454,14 @@ define float @fmul_f32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[ACTIVE_LANE_MASK]], <4 x float> [[TMP3]], <4 x float> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fmul fast float [[TMP7]], [[R_07]] -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 1.000000e+00, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi float [ 1.000000e+00, [[ENTRY:%.*]] ], [ [[TMP6]], [[FOR_BODY]] ] ; CHECK-NEXT: ret float [[R_0_LCSSA]] ; entry: @@ -622,7 +504,7 @@ define i32 @smin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -640,7 +522,7 @@ define i32 @smin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] @@ -689,7 +571,7 @@ define i32 @smax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -707,7 +589,7 @@ define i32 @smax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] @@ -756,7 +638,7 @@ define i32 @umin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -774,7 +656,7 @@ define i32 @umin_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] @@ -823,7 +705,7 @@ define i32 @umax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4]] = select <4 x i1> [[TMP3]], <4 x i32> [[VEC_PHI]], <4 x i32> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] @@ -841,7 +723,7 @@ define i32 @umax_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[ADD]] = select i1 [[C]], i32 [[R_07]], i32 [[TMP7]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll index 658b9a4569191..1540baab53719 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll @@ -1679,8 +1679,7 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64> ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4]] = add i64 [[VEC_PHI]], [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64> -; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP5]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP2]], [[TMP2]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]]) ; CHECK-NEXT: [[TMP8]] = add i64 [[VEC_PHI1]], [[TMP7]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll index 3426fb16841c5..6ea075f76aed4 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll @@ -30,17 +30,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; DEFAULT-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; DEFAULT-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -59,17 +48,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; OPTSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; OPTSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -88,17 +66,6 @@ define void @always_vectorize(ptr %p, i32 %x) { ; MINSIZE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[X]] -; MINSIZE-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -386,23 +353,6 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) ; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8 -; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]] -; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1 -; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]] -; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]] -; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP72]], 2 -; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]] -; DEFAULT-NEXT: [[ADD10:%.*]] = add i8 [[ADD]], [[MUL9]] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[INDVARS_IV]] -; DEFAULT-NEXT: store i8 [[ADD10]], ptr [[ARRAYIDX]], align 1 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -502,23 +452,6 @@ define void @dont_vectorize_with_minsize() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -545,23 +478,6 @@ define void @dont_vectorize_with_minsize() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -588,23 +504,6 @@ define void @dont_vectorize_with_minsize() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; @@ -659,23 +558,6 @@ define void @vectorization_forced() { ; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; DEFAULT: [[MIDDLE_BLOCK]]: ; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; DEFAULT: [[SCALAR_PH:.*]]: -; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] -; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; DEFAULT-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; DEFAULT-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; DEFAULT-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; DEFAULT-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; DEFAULT: [[FOR_COND_CLEANUP]]: ; DEFAULT-NEXT: ret void ; @@ -702,23 +584,6 @@ define void @vectorization_forced() { ; OPTSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; OPTSIZE: [[MIDDLE_BLOCK]]: ; OPTSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; OPTSIZE: [[SCALAR_PH:.*]]: -; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] -; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; OPTSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OPTSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; OPTSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; OPTSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; OPTSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; OPTSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; OPTSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; OPTSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OPTSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OPTSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; OPTSIZE: [[FOR_COND_CLEANUP]]: ; OPTSIZE-NEXT: ret void ; @@ -745,23 +610,6 @@ define void @vectorization_forced() { ; MINSIZE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; MINSIZE: [[MIDDLE_BLOCK]]: ; MINSIZE-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MINSIZE: [[SCALAR_PH:.*]]: -; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] -; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MINSIZE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[BVAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; MINSIZE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [1000 x i32], ptr @C, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[CVAL:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; MINSIZE-NEXT: [[MUL:%.*]] = mul nsw i32 [[BVAL]], [[CVAL]] -; MINSIZE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw [1000 x i16], ptr @A, i64 0, i64 [[INDVARS_IV]] -; MINSIZE-NEXT: [[AVAL:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[TRUNC:%.*]] = trunc i32 [[MUL]] to i16 -; MINSIZE-NEXT: [[ADD:%.*]] = add i16 [[TRUNC]], [[AVAL]] -; MINSIZE-NEXT: store i16 [[ADD]], ptr [[ARRAYIDX4]], align 2 -; MINSIZE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; MINSIZE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; MINSIZE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; MINSIZE: [[FOR_COND_CLEANUP]]: ; MINSIZE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll index 625f7a643a3ac..1ae71c8695401 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll @@ -52,7 +52,7 @@ define dso_local void @predicate_loop_hint(ptr noalias nocapture %A, ptr noalias ; CHECK: %index.next = add nuw i64 %index, 4 ; CHECK: br i1 %{{.*}}, label %{{.*}}, label %vector.body, !llvm.loop [[VEC_LOOP2:![0-9]+]] ; -; CHECK: br i1 %{{.*}}, label %{{.*}}, label %for.body, !llvm.loop [[SCALAR_LOOP2:![0-9]+]] +; CHECK-NOT: br i1 %{{.*}}, label %{{.*}}, label %for.body, !llvm.loop entry: br label %for.body @@ -78,9 +78,6 @@ for.body: ; CHECK-NEXT: [[MD_RT_UNROLL_DIS]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK-NEXT: [[SCALAR_LOOP1]] = distinct !{[[SCALAR_LOOP1]], [[MD_RT_UNROLL_DIS]], [[MD_IS_VEC]]} ; CHECK-NEXT: [[VEC_LOOP2]] = distinct !{[[VEC_LOOP2]], [[MD_IS_VEC]], [[MD_RT_UNROLL_DIS]]} -; CHECK-NEXT: [[SCALAR_LOOP2]] = distinct !{[[SCALAR_LOOP2]], [[ORIG_PRED_ENABLED:!.+]], [[ORIG_VEC_ENABLED:!.+]]} -; CHECK-NEXT: [[ORIG_PRED_ENABLED]] = !{!"llvm.loop.vectorize.predicate.enable", i1 true} -; CHECK-NEXT: [[ORIG_VEC_ENABLED]] = !{!"llvm.loop.vectorize.enable", i1 true} !6 = distinct !{!6, !7, !8} !7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll index 0b13343f6ff86..7afa8ce998121 100644 --- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll @@ -33,18 +33,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll index 3938b7a4c7ff6..abbd176a1df6e 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll @@ -143,8 +143,7 @@ define i1 @select_exit_cond(ptr %start, ptr %end, i64 %N) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll index 43cce8005bbf6..231175f362888 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll @@ -238,9 +238,8 @@ define void @QLA_F3_r_veq_norm2_V(ptr noalias %r, ptr noalias %a, i32 %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_FOR_END13_CRIT_EDGE:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ [[TMP158]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -276,7 +275,7 @@ define void @QLA_F3_r_veq_norm2_V(ptr noalias %r, ptr noalias %a, i32 %n) { ; CHECK-NEXT: [[TMP176]] = fadd fast <2 x double> [[TMP184]], [[TMP182]] ; CHECK-NEXT: [[INDEX_NEXT80]] = add nuw i64 [[INDVARS_IV]], 2 ; CHECK-NEXT: [[TMP185:%.*]] = icmp eq i64 [[INDEX_NEXT80]], [[N_VEC70]] -; CHECK-NEXT: br i1 [[TMP185]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP185]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP186:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[TMP176]]) ; CHECK-NEXT: [[CMP_N81:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC70]] @@ -318,7 +317,7 @@ define void @QLA_F3_r_veq_norm2_V(ptr noalias %r, ptr noalias %a, i32 %n) { ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV1]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_FOR_END13_CRIT_EDGE]], label %[[FOR_COND1_PREHEADER]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_FOR_END13_CRIT_EDGE]], label %[[FOR_COND1_PREHEADER]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[FOR_COND_FOR_END13_CRIT_EDGE]]: ; CHECK-NEXT: [[ADD10_2_LCSSA:%.*]] = phi double [ [[ADD10_2]], %[[FOR_COND1_PREHEADER]] ], [ [[TMP158]], %[[MIDDLE_BLOCK]] ], [ [[TMP186]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[PHITMP:%.*]] = fptrunc double [[ADD10_2_LCSSA]] to float @@ -385,6 +384,7 @@ for.end13: ; preds = %for.cond.for.end13_ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]} +; CHECK: [[PROF3]] = !{!"branch_weights", i32 2, i32 14} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll index 1b0a38689603d..7677c9666455a 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll @@ -84,8 +84,7 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-TWO-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-TWO-CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; VF-TWO-CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; VF-TWO-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; VF-TWO-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; VF-TWO-CHECK: [[VEC_EPILOG_PH]]: ; VF-TWO-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -187,8 +186,7 @@ define void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 %N) { ; VF-FOUR-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; VF-FOUR-CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; VF-FOUR-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; VF-FOUR-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; VF-FOUR-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; VF-FOUR-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; VF-FOUR-CHECK: [[VEC_EPILOG_PH]]: ; VF-FOUR-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -327,8 +325,7 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-TWO-CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; VF-TWO-CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; VF-TWO-CHECK-NEXT: [[IND_END18:%.*]] = trunc i64 [[N_VEC]] to i32 -; VF-TWO-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; VF-TWO-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; VF-TWO-CHECK: [[VEC_EPILOG_PH]]: ; VF-TWO-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -451,8 +448,7 @@ define void @f2(ptr noalias %A, ptr noalias %B, i32 %n) { ; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; VF-FOUR-CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; VF-FOUR-CHECK-NEXT: [[IND_END18:%.*]] = trunc i64 [[N_VEC]] to i32 -; VF-FOUR-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; VF-FOUR-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; VF-FOUR-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; VF-FOUR-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; VF-FOUR-CHECK: [[VEC_EPILOG_PH]]: ; VF-FOUR-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll index ca39b35aeae1c..d82a3cde4639a 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll @@ -72,8 +72,7 @@ define void @test(ptr %arr, i32 %len) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll index a7f0206089abf..024194db39332 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll @@ -46,19 +46,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFBFMIN-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFBFMIN: [[MIDDLE_BLOCK]]: ; ZVFBFMIN-NEXT: br label %[[EXIT:.*]] -; ZVFBFMIN: [[SCALAR_PH:.*]]: -; ZVFBFMIN-NEXT: br label %[[LOOP:.*]] -; ZVFBFMIN: [[LOOP]]: -; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] -; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] -; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]] -; ZVFBFMIN-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2 -; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; ZVFBFMIN: [[EXIT]]: ; ZVFBFMIN-NEXT: ret void ; @@ -155,23 +142,6 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 ; ZVFBFMIN-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; ZVFBFMIN: [[MIDDLE_BLOCK]]: ; ZVFBFMIN-NEXT: br label %[[EXIT:.*]] -; ZVFBFMIN: [[SCALAR_PH:.*]]: -; ZVFBFMIN-NEXT: br label %[[LOOP:.*]] -; ZVFBFMIN: [[LOOP]]: -; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] -; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] -; ZVFBFMIN-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]] -; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2 -; ZVFBFMIN-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP]], align 4 -; ZVFBFMIN-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float -; ZVFBFMIN-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float -; ZVFBFMIN-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]]) -; ZVFBFMIN-NEXT: store float [[FMULADD]], ptr [[C_GEP]], align 4 -; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; ZVFBFMIN: [[EXIT]]: ; ZVFBFMIN-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll index fde18785ce2c4..2087218bf3ea3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll @@ -27,31 +27,12 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP20]], splat (i1 true), i32 [[TMP12]]) -; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -101,31 +82,12 @@ define void @block_with_dead_inst_2(ptr %src) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[DEAD_GEP:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -175,34 +137,12 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -259,36 +199,12 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 { ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP20]], splat (i1 true), i32 [[TMP12]]) -; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br label %[[THEN_1:.*]] -; CHECK: [[THEN_1]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -345,38 +261,12 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br label %[[THEN_1:.*]] -; CHECK: [[THEN_1]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[ELSE_2:.*]] -; CHECK: [[ELSE_2]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -449,38 +339,12 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 % ; CHECK-NEXT: [[TMP23:%.*]] = select [[TMP17]], [[BROADCAST_SPLAT]], zeroinitializer ; CHECK-NEXT: [[TMP24:%.*]] = or [[TMP22]], [[TMP23]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP20]], [[TMP24]], i32 [[TMP27]]) -; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP27]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i16 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[ELSE:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br i1 [[IC]], label %[[THEN_1:.*]], label %[[ELSE]] -; CHECK: [[THEN_1]]: -; CHECK-NEXT: [[DEAD_GEP_1:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[ELSE_2:.*]] -; CHECK: [[ELSE_2]]: -; CHECK-NEXT: [[DEAD_GEP_2:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -543,24 +407,6 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i32 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[THEN]] ] -; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -613,24 +459,6 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 { ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ] -; CHECK-NEXT: [[XOR]] = xor i32 0, 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[C:%.*]] = icmp eq i16 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[L]], %[[LOOP_HEADER]] ], [ 99, %[[ELSE]] ] -; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -708,8 +536,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( zeroinitializer, align 4 [[TMP21]], splat (i1 true), i32 [[TMP18]]), !alias.scope [[META10:![0-9]+]], !noalias [[META13:![0-9]+]] -; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP18]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index 43fef428372dd..10f8f742bb1e2 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -36,23 +36,12 @@ define void @dead_load(ptr %p, i16 %start) { ; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[P]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP21]], splat (i1 true), i32 [[TMP16]]) -; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP16]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT2]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[START_EXT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV]], 111 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -321,28 +310,12 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP19]], align 4 [[TMP16]], splat (i1 true), i32 [[TMP8]]) -; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP8]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 false, label %[[LOOP_LATCH]], label %[[THEN:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[NOT_A]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 [[P]], ptr [[GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 9 -; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], 322 -; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -403,29 +376,13 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s ; CHECK-NEXT: [[TMP18:%.*]] = zext [[TMP17]] to ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP18]], align 4 [[TMP19]], splat (i1 true), i32 [[TMP10]]) -; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1 -; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_1]] -; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1 -; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[L_1]] to i32 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2 -; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll index b6230dc1e09ab..3fd90b2848848 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll @@ -32,18 +32,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -86,21 +75,9 @@ define i64 @vector_add_reduce(ptr noalias nocapture %a) { ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP9]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: -; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP11]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll index d20dd0587f44e..01b4502308c95 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll @@ -29,18 +29,7 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -61,18 +50,7 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -113,20 +91,9 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -147,18 +114,7 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -199,20 +155,9 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -233,18 +178,7 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -285,20 +219,9 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -319,18 +242,7 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]] -; FIXED-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -379,26 +291,9 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -422,24 +317,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -494,26 +372,9 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -537,24 +398,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) { ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]] -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -601,26 +445,9 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], 27 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -641,24 +468,7 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], 27 -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -705,26 +515,9 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], 27 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -745,24 +538,7 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], 27 -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i64 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -815,26 +591,9 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128 -; CHECK-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; CHECK: do_op: -; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i8 [[ELEM]], -1 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -856,24 +615,7 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) { ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[FOR_END:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[FOR_BODY:%.*]] -; FIXED: for.body: -; FIXED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; FIXED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; FIXED-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; FIXED-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128 -; FIXED-NEXT: br i1 [[C]], label [[DO_OP:%.*]], label [[LATCH]] -; FIXED: do_op: -; FIXED-NEXT: [[DIVREM:%.*]] = sdiv i8 [[ELEM]], -1 -; FIXED-NEXT: br label [[LATCH]] -; FIXED: latch: -; FIXED-NEXT: [[PHI:%.*]] = phi i8 [ [[ELEM]], [[FOR_BODY]] ], [ [[DIVREM]], [[DO_OP]] ] -; FIXED-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1 -; FIXED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; FIXED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; FIXED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; FIXED-NEXT: br label [[LATCH:%.*]] ; FIXED: for.end: ; FIXED-NEXT: ret void ; @@ -945,7 +687,7 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT7]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i32 [[TMP16]], 2 @@ -972,7 +714,7 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; CHECK-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 ; CHECK-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[MERGE_LCSSA]] @@ -1004,28 +746,9 @@ define i32 @udiv_sdiv_with_invariant_divisors(i8 %x, i16 %y, i1 %c) { ; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; FIXED: middle.block: ; FIXED-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3 -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: -; FIXED-NEXT: br label [[LOOP_HEADER:%.*]] -; FIXED: loop.header: -; FIXED-NEXT: [[IV:%.*]] = phi i16 [ -12, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; FIXED-NEXT: [[NARROW_IV:%.*]] = phi i8 [ -12, [[SCALAR_PH]] ], [ [[IV_NEXT_TRUNC:%.*]], [[LOOP_LATCH]] ] -; FIXED-NEXT: br i1 [[C]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; FIXED: then: -; FIXED-NEXT: [[UD:%.*]] = udiv i8 [[NARROW_IV]], [[X]] -; FIXED-NEXT: [[UD_EXT:%.*]] = zext i8 [[UD]] to i16 -; FIXED-NEXT: [[SD:%.*]] = sdiv i16 [[UD_EXT]], [[Y]] -; FIXED-NEXT: [[SD_EXT:%.*]] = sext i16 [[SD]] to i32 -; FIXED-NEXT: br label [[LOOP_LATCH]] -; FIXED: loop.latch: -; FIXED-NEXT: [[MERGE:%.*]] = phi i32 [ 0, [[LOOP_HEADER]] ], [ [[SD_EXT]], [[THEN]] ] -; FIXED-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], 1 -; FIXED-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; FIXED-NEXT: [[IV_NEXT_TRUNC]] = trunc i16 [[IV_NEXT]] to i8 -; FIXED-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; FIXED-NEXT: br label [[LOOP_LATCH:%.*]] ; FIXED: exit: -; FIXED-NEXT: [[MERGE_LCSSA:%.*]] = phi i32 [ [[MERGE]], [[LOOP_LATCH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] -; FIXED-NEXT: ret i32 [[MERGE_LCSSA]] +; FIXED-NEXT: ret i32 [[TMP7]] ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll index 663ead8c13cbd..21272cb72f4d6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll @@ -24,23 +24,13 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[VEC_IND]], ptr align 8 [[TMP14]], splat (i1 true), i32 [[TMP11]]) -; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP11]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY1:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] -; CHECK-NEXT: store i64 [[IV1]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -80,24 +70,12 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP7]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP10]] +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP9]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-iv-simplify.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-iv-simplify.ll deleted file mode 100644 index 4de0e666149f3..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-iv-simplify.ll +++ /dev/null @@ -1,333 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv64 -mattr='+v' --passes='loop(evl-iv-simplify)' < %s | FileCheck %s -; RUN: opt -S -mtriple=riscv64 -mattr='+v' --passes='loop(evl-iv-simplify),function(simplifycfg,dce)' < %s | FileCheck %s --check-prefix=LOOP-DEL - -define void @simple(ptr noalias %a, ptr noalias %b, %c, i64 %N) vscale_range(2, 1024) { -; CHECK-LABEL: define void @simple( -; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]] -; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; CHECK-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; CHECK: vector.ph: -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 -; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 -; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK: vector.body: -; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true) -; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[EVL_BASED_IV]], 0 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i32 0 -; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], splat (i1 true), i32 [[TMP12]]) -; CHECK-NEXT: [[TMP18:%.*]] = add nsw [[C]], [[VP_OP_LOAD1]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i32 0 -; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP18]], ptr align 4 [[TMP20]], splat (i1 true), i32 [[TMP12]]) -; CHECK-NEXT: [[TMP21:%.*]] = zext i32 [[TMP12]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: middle.block: -; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] -; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[ADD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] -; CHECK: for.cond.cleanup: -; CHECK-NEXT: ret void -; -; LOOP-DEL-LABEL: define void @simple( -; LOOP-DEL-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { -; LOOP-DEL-NEXT: entry: -; LOOP-DEL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N]] -; LOOP-DEL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; LOOP-DEL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 -; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] -; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_PH:%.*]] -; LOOP-DEL: vector.ph: -; LOOP-DEL-NEXT: br label [[VECTOR_BODY:%.*]] -; LOOP-DEL: vector.body: -; LOOP-DEL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; LOOP-DEL-NEXT: [[TMP4:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] -; LOOP-DEL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP4]], i32 4, i1 true) -; LOOP-DEL-NEXT: [[TMP6:%.*]] = add i64 [[EVL_BASED_IV]], 0 -; LOOP-DEL-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP6]] -; LOOP-DEL-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 -; LOOP-DEL-NEXT: [[VP_OP_LOAD1:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP10]], splat (i1 true), i32 [[TMP5]]) -; LOOP-DEL-NEXT: [[TMP11:%.*]] = add nsw [[C]], [[VP_OP_LOAD1]] -; LOOP-DEL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]] -; LOOP-DEL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 0 -; LOOP-DEL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP11]], ptr align 4 [[TMP13]], splat (i1 true), i32 [[TMP5]]) -; LOOP-DEL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64 -; LOOP-DEL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]] -; LOOP-DEL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] -; LOOP-DEL-NEXT: br i1 [[TMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; LOOP-DEL: for.body: -; LOOP-DEL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] -; LOOP-DEL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; LOOP-DEL-NEXT: [[ADD:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; LOOP-DEL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; LOOP-DEL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; LOOP-DEL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LOOP-DEL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; LOOP-DEL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; LOOP-DEL: for.cond.cleanup: -; LOOP-DEL-NEXT: ret void -; -entry: - %0 = sub i64 -1, %N - %1 = call i64 @llvm.vscale.i64() - %2 = mul i64 %1, 4 - %3 = icmp ult i64 %0, %2 - br i1 %3, label %scalar.ph, label %vector.ph - -vector.ph: ; preds = %entry - %4 = call i64 @llvm.vscale.i64() - %5 = mul i64 %4, 4 - %6 = call i64 @llvm.vscale.i64() - %7 = mul i64 %6, 4 - %8 = sub i64 %7, 1 - %n.rnd.up = add i64 %N, %8 - %n.mod.vf = urem i64 %n.rnd.up, %5 - %n.vec = sub i64 %n.rnd.up, %n.mod.vf - %9 = call i64 @llvm.vscale.i64() - %10 = mul i64 %9, 4 - br label %vector.body - -vector.body: ; preds = %vector.body, %vector.ph - %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] - %evl.based.iv = phi i64 [ 0, %vector.ph ], [ %index.evl.next, %vector.body ] - %11 = sub i64 %N, %evl.based.iv - %12 = call i32 @llvm.experimental.get.vector.length.i64(i64 %11, i32 4, i1 true) - %13 = add i64 %evl.based.iv, 0 - %14 = getelementptr inbounds i32, ptr %b, i64 %13 - %15 = getelementptr inbounds i32, ptr %14, i32 0 - %vp.op.load = call @llvm.vp.load.nxv4i32.p0(ptr align 4 %15, splat (i1 true), i32 %12) - %18 = add nsw %c, %vp.op.load - %19 = getelementptr inbounds i32, ptr %a, i64 %13 - %20 = getelementptr inbounds i32, ptr %19, i32 0 - call void @llvm.vp.store.nxv4i32.p0( %18, ptr align 4 %20, splat (i1 true), i32 %12) - %21 = zext i32 %12 to i64 - %index.evl.next = add i64 %21, %evl.based.iv - %index.next = add nuw i64 %index, %10 - %22 = icmp eq i64 %index.next, %n.vec - br i1 %22, label %middle.block, label %vector.body, !llvm.loop !0 - -middle.block: ; preds = %vector.body - br i1 true, label %for.cond.cleanup, label %scalar.ph - -scalar.ph: ; preds = %entry, %middle.block - %bc.resume.val = phi i64 [ %n.vec, %middle.block ], [ 0, %entry ] - br label %for.body - -for.body: ; preds = %for.body, %scalar.ph - %iv = phi i64 [ %bc.resume.val, %scalar.ph ], [ %iv.next, %for.body ] - %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv - %23 = load i32, ptr %arrayidx, align 4 - %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %23, ptr %arrayidx4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !3 - -for.cond.cleanup: ; preds = %middle.block, %for.body - ret void -} - -; Fixed IV steps resulting from vscale_range with a single element - -define void @fixed_iv_step(ptr %arg0, ptr %arg1, i64 %N) #0 { -; CHECK-LABEL: define void @fixed_iv_step( -; CHECK-SAME: ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[VECTOR_PH:%.*]] -; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[ARG0]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK: vector.body: -; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP0]], i32 2, i1 true) -; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[ARG1]], i64 [[EVL_BASED_IV]] -; CHECK-NEXT: tail call void @llvm.vp.store.nxv2p0.p0( [[BROADCAST_SPLAT]], ptr align 8 [[GEP]], splat (i1 true), i32 [[TMP1]]) -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP2]] -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_END_LOOPEXIT5:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4]] -; CHECK: for.end.loopexit5: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: for.end: -; CHECK-NEXT: ret void -; -; LOOP-DEL-LABEL: define void @fixed_iv_step( -; LOOP-DEL-SAME: ptr [[ARG0:%.*]], ptr [[ARG1:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { -; LOOP-DEL-NEXT: entry: -; LOOP-DEL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[ARG0]], i64 0 -; LOOP-DEL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; LOOP-DEL-NEXT: br label [[VECTOR_BODY:%.*]] -; LOOP-DEL: vector.body: -; LOOP-DEL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; LOOP-DEL-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] -; LOOP-DEL-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP0]], i32 2, i1 true) -; LOOP-DEL-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[ARG1]], i64 [[EVL_BASED_IV]] -; LOOP-DEL-NEXT: tail call void @llvm.vp.store.nxv2p0.p0( [[BROADCAST_SPLAT]], ptr align 8 [[GEP]], splat (i1 true), i32 [[TMP1]]) -; LOOP-DEL-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; LOOP-DEL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP2]] -; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] -; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4]] -; LOOP-DEL: for.end: -; LOOP-DEL-NEXT: ret void -; -entry: - br label %vector.ph - -vector.ph: - %n.rnd.up = add nsw i64 %N, 15 - %n.vec = and i64 %n.rnd.up, -16 - %broadcast.splatinsert = insertelement poison, ptr %arg0, i64 0 - %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer - br label %vector.body - -vector.body: - %lsr.iv32 = phi i64 [ %lsr.iv.next33, %vector.body ], [ %n.vec, %vector.ph ] - %evl.based.iv = phi i64 [ 0, %vector.ph ], [ %index.evl.next, %vector.body ] - %41 = sub i64 %N, %evl.based.iv - %42 = tail call i32 @llvm.experimental.get.vector.length.i64(i64 %41, i32 2, i1 true) - %gep = getelementptr ptr, ptr %arg1, i64 %evl.based.iv - tail call void @llvm.vp.store.nxv2p0.p0( %broadcast.splat, ptr align 8 %gep, splat (i1 true), i32 %42) - %43 = zext i32 %42 to i64 - %index.evl.next = add i64 %evl.based.iv, %43 - %lsr.iv.next33 = add i64 %lsr.iv32, -16 - %44 = icmp eq i64 %lsr.iv.next33, 0 - br i1 %44, label %for.end.loopexit5, label %vector.body, !llvm.loop !3 - -for.end.loopexit5: - br label %for.end - -for.end: - ret void -} - -; Fixed IV step and trip count -define void @fixed_iv_step_tc(ptr %arg0, ptr %arg1) #0 { -; CHECK-LABEL: define void @fixed_iv_step_tc( -; CHECK-SAME: ptr [[ARG0:%.*]], ptr [[ARG1:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[VECTOR_PH:%.*]] -; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[ARG0]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] -; CHECK: vector.body: -; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = sub i64 87, [[EVL_BASED_IV]] -; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP0]], i32 2, i1 true) -; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[ARG1]], i64 [[EVL_BASED_IV]] -; CHECK-NEXT: tail call void @llvm.vp.store.nxv2p0.p0( [[BROADCAST_SPLAT]], ptr align 8 [[GEP]], splat (i1 true), i32 [[TMP1]]) -; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP2]] -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 87 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_END_LOOPEXIT5:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4]] -; CHECK: for.end.loopexit5: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: for.end: -; CHECK-NEXT: ret void -; -; LOOP-DEL-LABEL: define void @fixed_iv_step_tc( -; LOOP-DEL-SAME: ptr [[ARG0:%.*]], ptr [[ARG1:%.*]]) #[[ATTR1]] { -; LOOP-DEL-NEXT: entry: -; LOOP-DEL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[ARG0]], i64 0 -; LOOP-DEL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; LOOP-DEL-NEXT: br label [[VECTOR_BODY:%.*]] -; LOOP-DEL: vector.body: -; LOOP-DEL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; LOOP-DEL-NEXT: [[TMP0:%.*]] = sub i64 87, [[EVL_BASED_IV]] -; LOOP-DEL-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP0]], i32 2, i1 true) -; LOOP-DEL-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[ARG1]], i64 [[EVL_BASED_IV]] -; LOOP-DEL-NEXT: tail call void @llvm.vp.store.nxv2p0.p0( [[BROADCAST_SPLAT]], ptr align 8 [[GEP]], splat (i1 true), i32 [[TMP1]]) -; LOOP-DEL-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; LOOP-DEL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[EVL_BASED_IV]], [[TMP2]] -; LOOP-DEL-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 87 -; LOOP-DEL-NEXT: br i1 [[TMP3]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4]] -; LOOP-DEL: for.end: -; LOOP-DEL-NEXT: ret void -; -entry: - br label %vector.ph - -vector.ph: - %n.rnd.up = add nsw i64 87, 15 - %n.vec = and i64 %n.rnd.up, -16 - %broadcast.splatinsert = insertelement poison, ptr %arg0, i64 0 - %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer - br label %vector.body - -vector.body: - %lsr.iv32 = phi i64 [ %lsr.iv.next33, %vector.body ], [ %n.vec, %vector.ph ] - %evl.based.iv = phi i64 [ 0, %vector.ph ], [ %index.evl.next, %vector.body ] - %41 = sub i64 87, %evl.based.iv - %42 = tail call i32 @llvm.experimental.get.vector.length.i64(i64 %41, i32 2, i1 true) - %gep = getelementptr ptr, ptr %arg1, i64 %evl.based.iv - tail call void @llvm.vp.store.nxv2p0.p0( %broadcast.splat, ptr align 8 %gep, splat (i1 true), i32 %42) - %43 = zext i32 %42 to i64 - %index.evl.next = add i64 %evl.based.iv, %43 - %lsr.iv.next33 = add i64 %lsr.iv32, -16 - %44 = icmp eq i64 %lsr.iv.next33, 0 - br i1 %44, label %for.end.loopexit5, label %vector.body, !llvm.loop !3 - -for.end.loopexit5: - br label %for.end - -for.end: - ret void -} - -declare i64 @llvm.vscale.i64() - -declare i32 @llvm.experimental.get.vector.length.i64(i64, i32 immarg, i1 immarg) - -declare @llvm.vp.load.nxv4i32.p0(ptr nocapture, , i32) - -declare void @llvm.vp.store.nxv4i32.p0(, ptr nocapture, , i32) - -attributes #0 = { vscale_range(8,8) } - -!0 = distinct !{!0, !1, !2, !4} -!1 = !{!"llvm.loop.isvectorized", i32 1} -!2 = !{!"llvm.loop.unroll.runtime.disable"} -!3 = distinct !{!3, !2, !1, !4} -!4 = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} -;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[META3]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]], [[META3]]} -;. -; LOOP-DEL: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]} -; LOOP-DEL: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; LOOP-DEL: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; LOOP-DEL: [[META3]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} -; LOOP-DEL: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]], [[META3]]} -;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll index a2ab7c4cc52ad..143a51dc811f1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll @@ -46,19 +46,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; ZVFHMIN-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; ZVFHMIN: [[MIDDLE_BLOCK]]: ; ZVFHMIN-NEXT: br label %[[EXIT:.*]] -; ZVFHMIN: [[SCALAR_PH:.*]]: -; ZVFHMIN-NEXT: br label %[[LOOP:.*]] -; ZVFHMIN: [[LOOP]]: -; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; ZVFHMIN-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]] -; ZVFHMIN-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]] -; ZVFHMIN-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2 -; ZVFHMIN-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2 -; ZVFHMIN-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]] -; ZVFHMIN-NEXT: store half [[Z]], ptr [[A_GEP]], align 2 -; ZVFHMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; ZVFHMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] -; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; ZVFHMIN: [[EXIT]]: ; ZVFHMIN-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll index ea2ccb07b388b..1c6954c187e5f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll @@ -111,23 +111,12 @@ define void @predicated_strided_store(ptr %start) { ; RVA23-NEXT: [[TMP4:%.*]] = mul [[VEC_IND]], splat (i64 7) ; RVA23-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START:%.*]], [[TMP4]] ; RVA23-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0( zeroinitializer, align 1 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; RVA23-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; RVA23-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; RVA23-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RVA23-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; RVA23: middle.block: -; RVA23-NEXT: br label [[EXIT:%.*]] -; RVA23: scalar.ph: ; RVA23-NEXT: br label [[LOOP:%.*]] -; RVA23: loop: -; RVA23-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; RVA23-NEXT: [[TMP8:%.*]] = mul i64 [[IV]], 7 -; RVA23-NEXT: [[ADD_PTR:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]] -; RVA23-NEXT: store i8 0, ptr [[ADD_PTR]], align 1 -; RVA23-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; RVA23-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 585 -; RVA23-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; RVA23: exit: ; RVA23-NEXT: ret void ; @@ -149,23 +138,12 @@ define void @predicated_strided_store(ptr %start) { ; RVA23ZVL1024B-NEXT: [[TMP4:%.*]] = mul [[VEC_IND]], splat (i64 7) ; RVA23ZVL1024B-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START:%.*]], [[TMP4]] ; RVA23ZVL1024B-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0( zeroinitializer, align 1 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; RVA23ZVL1024B-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; RVA23ZVL1024B-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; RVA23ZVL1024B-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RVA23ZVL1024B-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; RVA23ZVL1024B: middle.block: -; RVA23ZVL1024B-NEXT: br label [[EXIT:%.*]] -; RVA23ZVL1024B: scalar.ph: ; RVA23ZVL1024B-NEXT: br label [[LOOP:%.*]] -; RVA23ZVL1024B: loop: -; RVA23ZVL1024B-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; RVA23ZVL1024B-NEXT: [[TMP8:%.*]] = mul i64 [[IV]], 7 -; RVA23ZVL1024B-NEXT: [[ADD_PTR:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP8]] -; RVA23ZVL1024B-NEXT: store i8 0, ptr [[ADD_PTR]], align 1 -; RVA23ZVL1024B-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; RVA23ZVL1024B-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 585 -; RVA23ZVL1024B-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; RVA23ZVL1024B: exit: ; RVA23ZVL1024B-NEXT: ret void ; @@ -213,27 +191,12 @@ define void @store_to_addr_generated_from_invariant_addr(ptr noalias %p0, ptr no ; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0( zeroinitializer, align 4 [[TMP7]], splat (i1 true), i32 [[TMP3]]) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0( zeroinitializer, align 4 [[TMP7]], splat (i1 true), i32 [[TMP3]]) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0( zeroinitializer, align 1 [[TMP7]], splat (i1 true), i32 [[TMP3]]) -; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP3]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP4]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr i32, ptr [[P1]], i64 [[IV]] -; CHECK-NEXT: store ptr [[P0]], ptr [[ARRAYIDX11]], align 8 -; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[P2]], align 4 -; CHECK-NEXT: [[BITS_TO_GO:%.*]] = getelementptr i8, ptr [[P3]], i64 [[TMP10]] -; CHECK-NEXT: store i32 0, ptr [[BITS_TO_GO]], align 4 -; CHECK-NEXT: store i32 0, ptr [[BITS_TO_GO]], align 4 -; CHECK-NEXT: store i8 0, ptr [[BITS_TO_GO]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll index d3db55e251870..4ccec2ca61778 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll @@ -69,8 +69,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 { ; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector [[DOTSPLATINSERT24]], poison, zeroinitializer ; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP59]], splat (i1 true), i32 [[TMP27]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] -; CHECK-NEXT: [[TMP47:%.*]] = zext i32 [[TMP27]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP47]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT25]] ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -120,7 +119,136 @@ exit: ret void } +define void @test_3_inductions(ptr noalias %dst, ptr noalias %src, i64 %n) #1 { +; CHECK-LABEL: define void @test_3_inductions( +; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[DST]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i32 2) +; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i32 1), [[TMP2]] +; CHECK-NEXT: [[INDUCTION1:%.*]] = add zeroinitializer, [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND2:%.*]] = phi [ [[INDUCTION1]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 2, [[TMP3]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i32 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = or [[VEC_IND2]], [[VEC_IND]] +; CHECK-NEXT: [[TMP6:%.*]] = sext [[TMP5]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], [[TMP6]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2p0.nxv2p0( [[TMP7]], align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[VEC_IND_NEXT5]] = add [[VEC_IND2]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv.0 = phi i32 [ 1, %entry ], [ %iv.0.next, %loop ] + %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ] + %iv.2 = phi i32 [ 0, %entry ], [ %iv.2.next, %loop ] + %iv.or = or i32 %iv.2, %iv.0 + %iv.or.ext = sext i32 %iv.or to i64 + %gep.src = getelementptr i8, ptr %src, i64 %iv.or.ext + store ptr %gep.src, ptr %dst, align 8 + %iv.0.next = add i32 %iv.0, 2 + %iv.1.next = add i64 %iv.1, 1 + %iv.2.next = add i32 %iv.2, 2 + %ec = icmp eq i64 %iv.1, %n + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +define void @redundant_iv_trunc_for_cse(ptr noalias %src, ptr noalias %dst, i64 %n) #0 { +; CHECK-LABEL: define void @redundant_iv_trunc_for_cse( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i32 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND1:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP4]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq [[VP_OP_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = shl [[VEC_IND1]], splat (i32 16) +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP5]], [[TMP6]], [[VEC_IND]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc [[PREDPHI]] to +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv4i8.p0( [[TMP7]], ptr align 1 [[TMP8]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[VEC_IND_NEXT2]] = add [[VEC_IND1]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv + %l = load i32, ptr %gep.src + %c.0 = icmp eq i32 %l, 0 + %trunc.iv = trunc i64 %iv to i32 + br i1 %c.0, label %then, label %loop.latch + +then: + %trunc.iv.2 = trunc i64 %iv to i32 + %shl.iv = shl i32 %trunc.iv.2, 16 + br label %loop.latch + +loop.latch: + %p = phi i32 [ %shl.iv, %then ], [ %trunc.iv, %loop.header ] + %trunc.p = trunc i32 %p to i8 + %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv + store i8 %trunc.p, ptr %gep.dst, align 1 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, %n + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + + + attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } +attributes #1 = { "target-cpu"="sifive-p670" } ;. ; CHECK: [[META0]] = !{[[META1:![0-9]+]]} ; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} @@ -132,4 +260,6 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } ; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META8]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META7]], [[META8]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index 63d1af38e93f0..7e6e45feaa834 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -133,24 +133,11 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP10]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] -; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 -; IF-EVL-OUTLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32 -; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]] -; IF-EVL-OUTLOOP-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; IF-EVL-OUTLOOP: for.cond.cleanup.loopexit: -; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_COND_CLEANUP]] ; IF-EVL-OUTLOOP: for.cond.cleanup: -; IF-EVL-OUTLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; IF-EVL-OUTLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP12]], [[FOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: ret i32 [[R_0_LCSSA]] ; ; IF-EVL-INLOOP-LABEL: @add_i16_i32( @@ -176,24 +163,11 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; IF-EVL-INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] -; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 -; IF-EVL-INLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32 -; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]] -; IF-EVL-INLOOP-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]] ; IF-EVL-INLOOP: for.cond.cleanup.loopexit: -; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_COND_CLEANUP]] ; IF-EVL-INLOOP: for.cond.cleanup: -; IF-EVL-INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; IF-EVL-INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP11]], [[FOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: ret i32 [[R_0_LCSSA]] ; entry: @@ -330,22 +304,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP15]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP18]] ; ; IF-EVL-INLOOP-LABEL: @smin( ; IF-EVL-INLOOP-NEXT: entry: @@ -367,22 +328,9 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP16]], [[RDX]] -; IF-EVL-INLOOP-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP16]], i32 [[RDX]] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_MINMAX]] ; ; IF-EVL-LABEL: @smin( ; IF-EVL-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll index 43560d25f8ce2..31c8b74194062 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll @@ -31,24 +31,7 @@ define void @load_store_factor2_i32(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; CHECK-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -73,24 +56,7 @@ define void @load_store_factor2_i32(ptr %p) { ; FIXED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; FIXED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; FIXED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; FIXED-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; FIXED-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -121,24 +87,7 @@ define void @load_store_factor2_i32(ptr %p) { ; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; SCALABLE-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -194,24 +143,7 @@ define void @load_store_factor2_i64(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -236,24 +168,7 @@ define void @load_store_factor2_i64(ptr %p) { ; FIXED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -284,24 +199,7 @@ define void @load_store_factor2_i64(ptr %p) { ; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -359,29 +257,7 @@ define void @load_store_factor3_i32(ptr %p) { ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; CHECK-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4 -; CHECK-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3 -; CHECK-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -410,29 +286,7 @@ define void @load_store_factor3_i32(ptr %p) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; FIXED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; FIXED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; FIXED-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; FIXED-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4 -; FIXED-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3 -; FIXED-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -465,29 +319,7 @@ define void @load_store_factor3_i32(ptr %p) { ; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; SCALABLE-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[Y1:%.*]] = add i32 [[X1]], 2 -; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i32, ptr [[Q2]], align 4 -; SCALABLE-NEXT: [[Y2:%.*]] = add i32 [[X2]], 3 -; SCALABLE-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -551,29 +383,7 @@ define void @load_store_factor3_i64(ptr %p) { ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -602,29 +412,7 @@ define void @load_store_factor3_i64(ptr %p) { ; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -657,29 +445,7 @@ define void @load_store_factor3_i64(ptr %p) { ; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -745,34 +511,7 @@ define void @load_store_factor4(ptr %p) { ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -803,34 +542,7 @@ define void @load_store_factor4(ptr %p) { ; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -865,34 +577,7 @@ define void @load_store_factor4(ptr %p) { ; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -966,39 +651,7 @@ define void @load_store_factor5(ptr %p) { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1033,39 +686,7 @@ define void @load_store_factor5(ptr %p) { ; FIXED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1102,39 +723,7 @@ define void @load_store_factor5(ptr %p) { ; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1216,44 +805,7 @@ define void @load_store_factor6(ptr %p) { ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1291,44 +843,7 @@ define void @load_store_factor6(ptr %p) { ; FIXED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1367,44 +882,7 @@ define void @load_store_factor6(ptr %p) { ; SCALABLE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1494,49 +972,7 @@ define void @load_store_factor7(ptr %p) { ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; CHECK-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; CHECK-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; CHECK-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; CHECK-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1578,49 +1014,7 @@ define void @load_store_factor7(ptr %p) { ; FIXED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; FIXED-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; FIXED-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; FIXED-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; FIXED-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; FIXED-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1661,49 +1055,7 @@ define void @load_store_factor7(ptr %p) { ; SCALABLE-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; SCALABLE-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; SCALABLE-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -1801,54 +1153,7 @@ define void @load_store_factor8(ptr %p) { ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; CHECK-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; CHECK-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; CHECK-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; CHECK-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; CHECK-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; CHECK-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; CHECK-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; CHECK-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; CHECK-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; CHECK-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; CHECK-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; CHECK-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; CHECK-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; CHECK-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; CHECK-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; CHECK-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; CHECK-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; CHECK-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; CHECK-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; CHECK-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; CHECK-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; CHECK-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1 -; CHECK-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]] -; CHECK-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8 -; CHECK-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8 -; CHECK-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -1891,54 +1196,7 @@ define void @load_store_factor8(ptr %p) { ; FIXED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; FIXED-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; FIXED-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; FIXED-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; FIXED-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; FIXED-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; FIXED-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; FIXED-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; FIXED-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; FIXED-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; FIXED-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; FIXED-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; FIXED-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; FIXED-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; FIXED-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; FIXED-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; FIXED-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; FIXED-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; FIXED-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; FIXED-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; FIXED-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; FIXED-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; FIXED-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; FIXED-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; FIXED-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; FIXED-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; FIXED-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; FIXED-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; FIXED-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1 -; FIXED-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]] -; FIXED-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8 -; FIXED-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8 -; FIXED-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -1981,54 +1239,7 @@ define void @load_store_factor8(ptr %p) { ; SCALABLE-NEXT: [[TMP25:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[Y0:%.*]] = add i64 [[X0]], 1 -; SCALABLE-NEXT: store i64 [[Y0]], ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[Y1:%.*]] = add i64 [[X1]], 2 -; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[OFFSET2:%.*]] = add i64 [[OFFSET1]], 1 -; SCALABLE-NEXT: [[Q2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET2]] -; SCALABLE-NEXT: [[X2:%.*]] = load i64, ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[Y2:%.*]] = add i64 [[X2]], 3 -; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8 -; SCALABLE-NEXT: [[OFFSET3:%.*]] = add i64 [[OFFSET2]], 1 -; SCALABLE-NEXT: [[Q3:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET3]] -; SCALABLE-NEXT: [[X3:%.*]] = load i64, ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[Y3:%.*]] = add i64 [[X3]], 4 -; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8 -; SCALABLE-NEXT: [[OFFSET4:%.*]] = add i64 [[OFFSET3]], 1 -; SCALABLE-NEXT: [[Q4:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET4]] -; SCALABLE-NEXT: [[X4:%.*]] = load i64, ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[Y4:%.*]] = add i64 [[X4]], 5 -; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8 -; SCALABLE-NEXT: [[OFFSET5:%.*]] = add i64 [[OFFSET4]], 1 -; SCALABLE-NEXT: [[Q5:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET5]] -; SCALABLE-NEXT: [[X5:%.*]] = load i64, ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[Y5:%.*]] = add i64 [[X5]], 6 -; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8 -; SCALABLE-NEXT: [[OFFSET6:%.*]] = add i64 [[OFFSET5]], 1 -; SCALABLE-NEXT: [[Q6:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET6]] -; SCALABLE-NEXT: [[X6:%.*]] = load i64, ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[Y6:%.*]] = add i64 [[X6]], 7 -; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8 -; SCALABLE-NEXT: [[OFFSET7:%.*]] = add i64 [[OFFSET6]], 1 -; SCALABLE-NEXT: [[Q7:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET7]] -; SCALABLE-NEXT: [[X7:%.*]] = load i64, ptr [[Q7]], align 8 -; SCALABLE-NEXT: [[Y7:%.*]] = add i64 [[X7]], 8 -; SCALABLE-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -2118,23 +1329,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; CHECK-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]] -; CHECK-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]] -; CHECK-NEXT: store i32 [[RES]], ptr [[DST]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2157,23 +1352,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; FIXED-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]] -; FIXED-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]] -; FIXED-NEXT: store i32 [[RES]], ptr [[DST]], align 4 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -2202,23 +1381,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) { ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i32, ptr [[Q1]], align 4 -; SCALABLE-NEXT: [[RES:%.*]] = add i32 [[X0]], [[X1]] -; SCALABLE-NEXT: [[DST:%.*]] = getelementptr i32, ptr [[Q]], i64 [[I]] -; SCALABLE-NEXT: store i32 [[RES]], ptr [[DST]], align 4 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; @@ -2273,23 +1436,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; CHECK-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; CHECK-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; CHECK-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]] -; CHECK-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]] -; CHECK-NEXT: store i64 [[RES]], ptr [[DST]], align 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -2312,23 +1459,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; FIXED: middle.block: -; FIXED-NEXT: br label [[EXIT:%.*]] -; FIXED: scalar.ph: ; FIXED-NEXT: br label [[LOOP:%.*]] -; FIXED: loop: -; FIXED-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; FIXED-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; FIXED-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; FIXED-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; FIXED-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; FIXED-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; FIXED-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; FIXED-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]] -; FIXED-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]] -; FIXED-NEXT: store i64 [[RES]], ptr [[DST]], align 8 -; FIXED-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; FIXED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; FIXED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; FIXED: exit: ; FIXED-NEXT: ret void ; @@ -2357,23 +1488,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) { ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; SCALABLE: middle.block: -; SCALABLE-NEXT: br label [[EXIT:%.*]] -; SCALABLE: scalar.ph: ; SCALABLE-NEXT: br label [[LOOP:%.*]] -; SCALABLE: loop: -; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1 -; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]] -; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8 -; SCALABLE-NEXT: [[OFFSET1:%.*]] = add i64 [[OFFSET0]], 1 -; SCALABLE-NEXT: [[Q1:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET1]] -; SCALABLE-NEXT: [[X1:%.*]] = load i64, ptr [[Q1]], align 8 -; SCALABLE-NEXT: [[RES:%.*]] = add i64 [[X0]], [[X1]] -; SCALABLE-NEXT: [[DST:%.*]] = getelementptr i64, ptr [[Q]], i64 [[I]] -; SCALABLE-NEXT: store i64 [[RES]], ptr [[DST]], align 8 -; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; SCALABLE: exit: ; SCALABLE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll index a30aebb16a8c1..ef0f0cf8777e7 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll @@ -96,7 +96,8 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_DATA: middle.block: ; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA: scalar.ph: +; PREDICATED_DATA: for.end: +; PREDICATED_DATA-NEXT: ret void ; ; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor2 ; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0:[0-9]+]] { @@ -135,9 +136,13 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.store.nxv32i8.p0( [[INTERLEAVED_VEC]], ptr align 1 [[TMP10]], [[INTERLEAVED_MASK4]], i32 [[INTERLEAVE_EVL3]]) ; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]] ; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP12:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; PREDICATED_DATA-WITH-EVL: middle.block: ; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA-WITH-EVL: scalar.ph: +; PREDICATED_DATA-WITH-EVL: for.end: +; PREDICATED_DATA-WITH-EVL-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 @@ -270,10 +275,11 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP1]] ; PREDICATED_DATA-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; PREDICATED_DATA-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; PREDICATED_DATA-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; PREDICATED_DATA: middle.block: ; PREDICATED_DATA-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA: scalar.ph: +; PREDICATED_DATA: for.end: +; PREDICATED_DATA-NEXT: ret void ; ; PREDICATED_DATA-WITH-EVL-LABEL: define void @masked_strided_factor4 ; PREDICATED_DATA-WITH-EVL-SAME: (ptr noalias readonly captures(none) [[P:%.*]], ptr noalias captures(none) [[Q:%.*]], i8 zeroext [[GUARD:%.*]]) #[[ATTR0]] { @@ -316,9 +322,13 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali ; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.store.nxv64i8.p0( [[INTERLEAVED_VEC]], ptr align 1 [[TMP15]], [[INTERLEAVED_MASK4]], i32 [[INTERLEAVE_EVL3]]) ; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]] ; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] +; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP16:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; PREDICATED_DATA-WITH-EVL: middle.block: ; PREDICATED_DATA-WITH-EVL-NEXT: br label [[FOR_END:%.*]] -; PREDICATED_DATA-WITH-EVL: scalar.ph: +; PREDICATED_DATA-WITH-EVL: for.end: +; PREDICATED_DATA-WITH-EVL-NEXT: ret void ; entry: %conv = zext i8 %guard to i32 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll index cf2f78b578981..328ee16a92db4 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll @@ -62,18 +62,7 @@ define void @load_store(ptr %p) { ; LMUL2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; LMUL2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL2: middle.block: -; LMUL2-NEXT: br label [[FOR_END:%.*]] -; LMUL2: scalar.ph: ; LMUL2-NEXT: br label [[FOR_BODY:%.*]] -; LMUL2: for.body: -; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; LMUL2-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] -; LMUL2-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 -; LMUL2-NEXT: [[W:%.*]] = add i64 [[V]], 1 -; LMUL2-NEXT: store i64 [[W]], ptr [[Q]], align 8 -; LMUL2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LMUL2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; LMUL2: for.end: ; LMUL2-NEXT: ret void ; @@ -96,18 +85,7 @@ define void @load_store(ptr %p) { ; LMUL4-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; LMUL4-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL4: middle.block: -; LMUL4-NEXT: br label [[FOR_END:%.*]] -; LMUL4: scalar.ph: ; LMUL4-NEXT: br label [[FOR_BODY:%.*]] -; LMUL4: for.body: -; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; LMUL4-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] -; LMUL4-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 -; LMUL4-NEXT: [[W:%.*]] = add i64 [[V]], 1 -; LMUL4-NEXT: store i64 [[W]], ptr [[Q]], align 8 -; LMUL4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LMUL4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; LMUL4: for.end: ; LMUL4-NEXT: ret void ; @@ -130,18 +108,7 @@ define void @load_store(ptr %p) { ; LMUL8-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; LMUL8-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; LMUL8: middle.block: -; LMUL8-NEXT: br label [[FOR_END:%.*]] -; LMUL8: scalar.ph: ; LMUL8-NEXT: br label [[FOR_BODY:%.*]] -; LMUL8: for.body: -; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; LMUL8-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]] -; LMUL8-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8 -; LMUL8-NEXT: [[W:%.*]] = add i64 [[V]], 1 -; LMUL8-NEXT: store i64 [[W]], ptr [[Q]], align 8 -; LMUL8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; LMUL8-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; LMUL8: for.end: ; LMUL8-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll index 53907fadf8187..8ef53cade01ac 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll @@ -133,21 +133,7 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv4i8.p0( [[TMP7]], ptr align 1 [[TMP12]], splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[I_08]] -; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP15]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 [[I_08]] -; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP16]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 8 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -186,21 +172,7 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0( [[TMP11]], ptr align 1 [[TMP4]], splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[I_08]] -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -240,21 +212,7 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0( [[TMP11]], ptr align 1 [[TMP4]], splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[I_08]] -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 32 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -293,21 +251,7 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0( [[TMP7]], ptr align 1 [[DST]], splat (i1 true), i32 [[TMP5]]) ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]] -; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]] -; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP9]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll index 86b3a7e32c852..06b47aa6551a0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll @@ -34,32 +34,13 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) { ; VLENUNK-NEXT: [[TMP17:%.*]] = add [[PREDPHI]], [[BROADCAST_SPLAT]] ; VLENUNK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] ; VLENUNK-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP17]], ptr align 4 [[TMP18]], splat (i1 true), i32 [[TMP7]]) -; VLENUNK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 -; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] -; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] +; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] +; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; VLENUNK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VLENUNK: middle.block: -; VLENUNK-NEXT: br label [[FOR_END:%.*]] -; VLENUNK: scalar.ph: -; VLENUNK-NEXT: br label [[FOR_BODY:%.*]] -; VLENUNK: for.body: -; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; VLENUNK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 512 -; VLENUNK-NEXT: br i1 [[ICMP]], label [[DO_LOAD:%.*]], label [[LATCH]] -; VLENUNK: do_load: -; VLENUNK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; VLENUNK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; VLENUNK-NEXT: br label [[LATCH]] -; VLENUNK: latch: -; VLENUNK-NEXT: [[PHI:%.*]] = phi i32 [ [[ELEM]], [[DO_LOAD]] ], [ 0, [[FOR_BODY]] ] -; VLENUNK-NEXT: [[ADD:%.*]] = add i32 [[PHI]], [[V]] -; VLENUNK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; VLENUNK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; VLENUNK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VLENUNK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] +; VLENUNK-NEXT: br label [[LATCH:%.*]] ; VLENUNK: for.end: ; VLENUNK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll index 7d32302abfe24..89819f2be4967 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll @@ -52,8 +52,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV32-NEXT: [[TMP18:%.*]] = fadd [[WIDE_MASKED_GATHER6]], [[TMP17]] ; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], [[VEC_IND]] ; RV32-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0( [[TMP18]], align 8 [[TMP19]], [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; RV32-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 -; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; RV32-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; RV32-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV32-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -121,8 +120,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV64-NEXT: [[TMP18:%.*]] = fadd [[WIDE_MASKED_GATHER6]], [[TMP17]] ; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], [[VEC_IND]] ; RV64-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0( [[TMP18]], align 8 [[TMP19]], [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; RV64-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 -; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; RV64-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; RV64-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV64-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll index e0bd8aa3a7a2a..0a9b1e0af48bc 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll @@ -108,7 +108,8 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdot( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -143,7 +144,8 @@ define i32 @vqdot(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -263,12 +265,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]] ; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED-V: middle.block: ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotu( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -298,12 +301,13 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]]) ; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FIXED-ZVQDOTQ: middle.block: ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -423,12 +427,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]] ; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED-V: middle.block: ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotsu( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -458,12 +463,13 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]]) ; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FIXED-ZVQDOTQ: middle.block: ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -582,12 +588,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-V-NEXT: [[TMP13]] = add <8 x i32> [[TMP11]], [[VEC_PHI1]] ; FIXED-V-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-V-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; FIXED-V-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED-V: middle.block: ; FIXED-V-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP13]], [[TMP12]] ; FIXED-V-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) ; FIXED-V-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-V: scalar.ph: +; FIXED-V: for.exit: +; FIXED-V-NEXT: ret i32 [[TMP15]] ; ; FIXED-ZVQDOTQ-LABEL: define i32 @vqdotsu2( ; FIXED-ZVQDOTQ-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { @@ -617,12 +624,13 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 { ; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]]) ; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FIXED-ZVQDOTQ: middle.block: ; FIXED-ZVQDOTQ-NEXT: [[BIN_RDX:%.*]] = add <2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] ; FIXED-ZVQDOTQ-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[BIN_RDX]]) ; FIXED-ZVQDOTQ-NEXT: br label [[FOR_EXIT:%.*]] -; FIXED-ZVQDOTQ: scalar.ph: +; FIXED-ZVQDOTQ: for.exit: +; FIXED-ZVQDOTQ-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll index d0068e134fd32..65928f80a76f6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll @@ -43,37 +43,13 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 ; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP23]], i64 poison, i64 [[INDEX]] ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[PREDPHI]] ; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0( zeroinitializer, ptr align 2 [[TMP24]], [[TMP22]], i32 [[TMP25]]) -; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP25]] to i64 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP26]], [[INDEX]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[C_2:%.*]] = icmp ule i64 [[IV]], [[B]] -; CHECK-NEXT: br i1 [[C_2]], label [[ELSE_1]], label [[MERGE:%.*]] -; CHECK: else.1: -; CHECK-NEXT: [[C_3:%.*]] = icmp ule i64 [[IV]], [[C]] -; CHECK-NEXT: br i1 [[C_3]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: br label [[MERGE]] -; CHECK: merge: -; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ poison, [[THEN_1]] ], [ [[IV]], [[THEN_2]] ] -; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[IDX]] -; CHECK-NEXT: store i16 0, ptr [[GETELEMENTPTR]], align 2 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000 -; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index 3739f85afe740..8d4d282a5236d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -37,27 +37,7 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT1:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_COND1:%.*]] -; CHECK: for.cond: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH1:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ] -; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52 -; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32 -; CHECK-NEXT: br i1 [[CMP_SLT]], label [[COND_FALSE:%.*]], label [[FOR_BODY]] -; CHECK: cond.false: -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32 -; CHECK-NEXT: br label [[FOR_BODY]] -; CHECK: for.body: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], [[FOR_COND1]] ], [ [[ZEXT]], [[COND_FALSE]] ] -; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 -; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1]], label [[EXIT1]] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll index 9b6bc684249f1..735fb769de8b9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll @@ -29,20 +29,8 @@ define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ADD_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -85,20 +73,8 @@ define i32 @sub(ptr %a, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP3]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 1024, %[[SCALAR_PH]] ], [ [[SUB:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[SUB]] = sub i32 [[RDX]], [[X]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUB_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop @@ -144,23 +120,8 @@ define i32 @addsub(ptr %a, ptr %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP5]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[SUB:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RDX]], [[X]] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[SUB]] = sub i32 [[ADD]], [[Y]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[LOOP]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUB_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP8]] ; entry: br label %loop @@ -209,20 +170,8 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[OR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -267,20 +216,8 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[AND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -325,20 +262,8 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP11]] ; entry: br label %for.body @@ -384,21 +309,8 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -445,21 +357,8 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP12]] ; entry: br label %for.body @@ -505,20 +404,8 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[ADD_LCSSA]] +; CHECK-NEXT: ret float [[TMP11]] ; entry: br label %for.body @@ -561,20 +448,8 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, [[TMP8]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[ADD_LCSSA]] +; CHECK-NEXT: ret half [[TMP11]] ; entry: br label %for.body @@ -744,21 +619,8 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret float [[TMP12]] ; entry: br label %for.body @@ -803,21 +665,8 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret half [[TMP12]] ; entry: br label %for.body @@ -862,21 +711,8 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret bfloat [[TMP12]] ; entry: br label %for.body @@ -923,21 +759,8 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret float [[TMP12]] ; entry: br label %for.body @@ -982,21 +805,8 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) # ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret half [[TMP12]] ; entry: br label %for.body @@ -1041,21 +851,8 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16( [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]] -; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]] +; CHECK-NEXT: ret bfloat [[TMP12]] ; entry: br label %for.body @@ -1243,22 +1040,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[MULADD_LCSSA]] +; CHECK-NEXT: ret float [[TMP16]] ; entry: br label %for.body @@ -1305,22 +1088,8 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, [[TMP9]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]]) -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret half [[MULADD_LCSSA]] +; CHECK-NEXT: ret half [[TMP16]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll index 5876a6bf32848..850a6cb7ddb0d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll @@ -12,11 +12,9 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i64 2) ; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP2]], splat (i64 3) +; CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP0]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION1:%.*]] = add zeroinitializer, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP4]], splat (i64 4) +; CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP0]], splat (i64 4) ; CHECK-NEXT: [[INDUCTION2:%.*]] = add zeroinitializer, [[TMP5]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -30,12 +28,10 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: [[TMP8:%.*]] = mul i64 4, [[TMP7]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP8]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP7]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i64 [[TMP10]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP7]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer ; CHECK-NEXT: [[TMP13:%.*]] = sub [[VEC_IND]], splat (i64 1) @@ -53,9 +49,8 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: [[INTERLEAVE_EVL:%.*]] = mul nuw nsw i32 [[TMP6]], 3 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave3.nxv12i8( [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_GATHER9]], [[WIDE_MASKED_GATHER10]]) ; CHECK-NEXT: call void @llvm.vp.store.nxv12i8.p0( [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], splat (i1 true), i32 [[INTERLEAVE_EVL]]) -; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT8]] ; CHECK-NEXT: [[VEC_IND_NEXT11]] = add [[VEC_IND3]], [[BROADCAST_SPLAT6]] ; CHECK-NEXT: [[VEC_IND_NEXT12]] = add [[VEC_IND4]], [[BROADCAST_SPLAT]] @@ -63,36 +58,6 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_0:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_0_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_1_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_2_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[WIDE_IV_0_SUB:%.*]] = sub i64 [[WIDE_IV_0]], 1 -; CHECK-NEXT: [[A_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_0_SUB]] -; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[A_GEP0]], align 1 -; CHECK-NEXT: [[WIDE_IV_1_SUB:%.*]] = sub i64 [[WIDE_IV_1]], 1 -; CHECK-NEXT: [[B_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_1_SUB]] -; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[B_GEP0]], align 1 -; CHECK-NEXT: [[WIDE_IV_2_SUB:%.*]] = sub i64 [[WIDE_IV_2]], 1 -; CHECK-NEXT: [[C_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_2_SUB]] -; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[C_GEP0]], align 1 -; CHECK-NEXT: [[IV_MUL:%.*]] = mul i64 [[IV]], 3 -; CHECK-NEXT: [[BASE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IV_MUL]] -; CHECK-NEXT: [[A_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 0 -; CHECK-NEXT: store i8 [[A]], ptr [[A_GEP1]], align 1 -; CHECK-NEXT: [[B_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 1 -; CHECK-NEXT: store i8 [[B]], ptr [[B_GEP1]], align 1 -; CHECK-NEXT: [[C_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 2 -; CHECK-NEXT: store i8 [[C]], ptr [[C_GEP1]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[WIDE_IV_0_NEXT]] = add i64 [[WIDE_IV_0]], 2 -; CHECK-NEXT: [[WIDE_IV_1_NEXT]] = add i64 [[WIDE_IV_1]], 3 -; CHECK-NEXT: [[WIDE_IV_2_NEXT]] = add i64 [[WIDE_IV_2]], 4 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -104,11 +69,9 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv8i64() ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i64 2) ; NO-REG-PRESSURE-CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.stepvector.nxv8i64() -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP2]], splat (i64 3) +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP0]], splat (i64 3) ; NO-REG-PRESSURE-CHECK-NEXT: [[INDUCTION1:%.*]] = add zeroinitializer, [[TMP3]] -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.stepvector.nxv8i64() -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP4]], splat (i64 4) +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP0]], splat (i64 4) ; NO-REG-PRESSURE-CHECK-NEXT: [[INDUCTION2:%.*]] = add zeroinitializer, [[TMP5]] ; NO-REG-PRESSURE-CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-REG-PRESSURE-CHECK: [[VECTOR_BODY]]: @@ -122,12 +85,10 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP8:%.*]] = mul i64 4, [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP8]], i64 0 ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP6]] to i64 -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP9]] +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i64 [[TMP10]], i64 0 ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP6]] to i64 -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP11]] +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP13:%.*]] = sub [[VEC_IND]], splat (i64 1) @@ -145,9 +106,8 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVE_EVL:%.*]] = mul nuw nsw i32 [[TMP6]], 3 ; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave3.nxv24i8( [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_GATHER9]], [[WIDE_MASKED_GATHER10]]) ; NO-REG-PRESSURE-CHECK-NEXT: call void @llvm.vp.store.nxv24i8.p0( [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], splat (i1 true), i32 [[INTERLEAVE_EVL]]) -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP6]] to i64 -; NO-REG-PRESSURE-CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]] -; NO-REG-PRESSURE-CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; NO-REG-PRESSURE-CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] +; NO-REG-PRESSURE-CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT8]] ; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT11]] = add [[VEC_IND3]], [[BROADCAST_SPLAT6]] ; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT12]] = add [[VEC_IND4]], [[BROADCAST_SPLAT]] @@ -155,36 +115,6 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-REG-PRESSURE-CHECK: [[MIDDLE_BLOCK]]: ; NO-REG-PRESSURE-CHECK-NEXT: br label %[[EXIT:.*]] -; NO-REG-PRESSURE-CHECK: [[SCALAR_PH:.*]]: -; NO-REG-PRESSURE-CHECK-NEXT: br label %[[LOOP:.*]] -; NO-REG-PRESSURE-CHECK: [[LOOP]]: -; NO-REG-PRESSURE-CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_0_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_1_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[WIDE_IV_2_NEXT:%.*]], %[[LOOP]] ] -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0_SUB:%.*]] = sub i64 [[WIDE_IV_0]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[A_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_0_SUB]] -; NO-REG-PRESSURE-CHECK-NEXT: [[A:%.*]] = load i8, ptr [[A_GEP0]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1_SUB:%.*]] = sub i64 [[WIDE_IV_1]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[B_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_1_SUB]] -; NO-REG-PRESSURE-CHECK-NEXT: [[B:%.*]] = load i8, ptr [[B_GEP0]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2_SUB:%.*]] = sub i64 [[WIDE_IV_2]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[C_GEP0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[WIDE_IV_2_SUB]] -; NO-REG-PRESSURE-CHECK-NEXT: [[C:%.*]] = load i8, ptr [[C_GEP0]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[IV_MUL:%.*]] = mul i64 [[IV]], 3 -; NO-REG-PRESSURE-CHECK-NEXT: [[BASE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IV_MUL]] -; NO-REG-PRESSURE-CHECK-NEXT: [[A_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 0 -; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[A]], ptr [[A_GEP1]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[B_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 1 -; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[B]], ptr [[B_GEP1]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[C_GEP1:%.*]] = getelementptr i8, ptr [[BASE]], i8 2 -; NO-REG-PRESSURE-CHECK-NEXT: store i8 [[C]], ptr [[C_GEP1]], align 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_0_NEXT]] = add i64 [[WIDE_IV_0]], 2 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_1_NEXT]] = add i64 [[WIDE_IV_1]], 3 -; NO-REG-PRESSURE-CHECK-NEXT: [[WIDE_IV_2_NEXT]] = add i64 [[WIDE_IV_2]], 4 -; NO-REG-PRESSURE-CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 1024 -; NO-REG-PRESSURE-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; NO-REG-PRESSURE-CHECK: [[EXIT]]: ; NO-REG-PRESSURE-CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll index 7b8404abdc54b..b80368df96089 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll @@ -21,18 +21,8 @@ define float @s311(float %a_0, float %s311_sum) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ [[S311_SUM]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED_NEXT]] = fadd float [[A_0]], [[RED]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1200 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll index a165dde0d217e..5ca9bfdb29c2c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll @@ -53,10 +53,9 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: br [[EXIT:label %.*]] -; RV64: [[SCALAR_PH:.*:]] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: +; RV64-NEXT: br label %[[EXIT:.*]] +; RV64: [[EXIT]]: +; RV64-NEXT: ret void ; ; RV32-LABEL: define void @vector_reverse_i32( ; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -93,10 +92,9 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { ; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: br [[EXIT:label %.*]] -; RV32: [[SCALAR_PH:.*:]] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: +; RV32-NEXT: br label %[[EXIT:.*]] +; RV32: [[EXIT]]: +; RV32-NEXT: ret void ; ; RV64-UF2-LABEL: define void @vector_reverse_i32( ; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -718,10 +716,9 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; RV64: [[MIDDLE_BLOCK]]: -; RV64-NEXT: br [[EXIT:label %.*]] -; RV64: [[SCALAR_PH:.*:]] -; RV64-NEXT: br label %[[FOR_BODY:.*]] -; RV64: [[FOR_BODY]]: +; RV64-NEXT: br label %[[EXIT:.*]] +; RV64: [[EXIT]]: +; RV64-NEXT: ret void ; ; RV32-LABEL: define void @vector_reverse_f32_simplify( ; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { @@ -758,10 +755,9 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) { ; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; RV32: [[MIDDLE_BLOCK]]: -; RV32-NEXT: br [[EXIT:label %.*]] -; RV32: [[SCALAR_PH:.*:]] -; RV32-NEXT: br label %[[FOR_BODY:.*]] -; RV32: [[FOR_BODY]]: +; RV32-NEXT: br label %[[EXIT:.*]] +; RV32: [[EXIT]]: +; RV32-NEXT: ret void ; ; RV64-UF2-LABEL: define void @vector_reverse_f32_simplify( ; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll index ecde1646ab2b4..e046816b694c0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll @@ -28,19 +28,7 @@ define void @test(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -81,19 +69,7 @@ define void @test_may_clobber(ptr %p) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -137,19 +113,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -193,19 +157,7 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 -; CHECK-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll index 544ddc539c832..7330ce61515d9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll @@ -27,18 +27,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -84,18 +73,7 @@ define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ELEM]], [[V]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -179,18 +157,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -235,23 +202,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP9]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: -; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP11]] ; entry: br label %for.body @@ -292,16 +245,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -340,16 +284,7 @@ define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store ptr [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index a596c639d08d1..3c90908b0a08f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -28,18 +28,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -84,18 +73,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -140,23 +118,9 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64( [[TMP11]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 -; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[AADDR]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: -; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP14]] ; entry: br label %for.body @@ -197,16 +161,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -246,17 +201,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -356,18 +301,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] -; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll index 4bfe9a4487604..8971b0cadfa48 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll @@ -29,21 +29,8 @@ define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]] -; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[COND_LCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -91,21 +78,8 @@ define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]] -; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[COND_LCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -151,21 +125,8 @@ define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 3, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 -; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3 -; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7 -; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTLCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -211,21 +172,8 @@ define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]] ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[A]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 -; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3 -; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]] -; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTLCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -271,21 +219,8 @@ define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) { ; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4 -; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00 -; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1 -; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1 -; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]] -; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[DOTLCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -373,29 +308,8 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1 ; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0 ; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]] -; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35 -; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_INC]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[I_013]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP15]], 2 -; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[CMP3]], i32 1, i32 [[R_012]] -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[FOR_INC]]: -; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ] -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] ; CHECK: [[FOR_END_LOOPEXIT]]: -; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[R_1_LCSSA]] +; CHECK-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll index 4c84913eea23d..2fbc73ef74d16 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll @@ -26,25 +26,12 @@ define void @single_constant_stride_int_scaled(ptr %p) { ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP15]], splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP16:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP16]], align 4 [[TMP15]], splat (i1 true), i32 [[TMP11]]) -; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[SCALAR_PH:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8 -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -143,26 +130,12 @@ define void @single_constant_stride_int_iv(ptr %p) { ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP12]], splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP13:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP13]], align 4 [[TMP12]], splat (i1 true), i32 [[TMP7]]) -; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4 -; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -261,25 +234,12 @@ define void @single_constant_stride_ptr_iv(ptr %p) { ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP20]], align 4 [[VECTOR_GEP]], splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64 ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP11]] to i64 -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP9]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[SCALAR_PH:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH1]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4 -; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1 -; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4 -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8 -; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -834,8 +794,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[TMP20:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], [[TMP18]] ; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP20]], align 4 [[TMP21]], splat (i1 true), i32 [[TMP43]]), !alias.scope [[META9:![0-9]+]], !noalias [[META6]] -; STRIDED-NEXT: [[TMP46:%.*]] = zext i32 [[TMP43]] to i64 -; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP46]] +; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP44]] ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] @@ -1184,21 +1143,16 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector [[DOTSPLATINSERT9]], poison, zeroinitializer ; STRIDED-NEXT: [[TMP18:%.*]] = mul [[TMP19]], [[DOTSPLAT10]] ; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], [[TMP18]] -; STRIDED-NEXT: [[TMP27:%.*]] = call @llvm.stepvector.nxv4i64() -; STRIDED-NEXT: [[TMP21:%.*]] = mul [[TMP27]], [[DOTSPLAT10]] -; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP21]] +; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP18]] ; STRIDED-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[VECTOR_GEP7]], splat (i1 true), i32 [[TMP14]]), !alias.scope [[META13:![0-9]+]] ; STRIDED-NEXT: [[TMP30:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP30]], align 4 [[VECTOR_GEP]], splat (i1 true), i32 [[TMP14]]), !alias.scope [[META16:![0-9]+]], !noalias [[META13]] ; STRIDED-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 ; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] -; STRIDED-NEXT: [[TMP20:%.*]] = zext i32 [[TMP14]] to i64 -; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP20]] +; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP16]] ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]] -; STRIDED-NEXT: [[TMP22:%.*]] = zext i32 [[TMP14]] to i64 -; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP22]] -; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]] +; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP25]] ; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; STRIDED: middle.block: @@ -1270,9 +1224,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-UF2-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector [[BROADCAST_SPLATINSERT10]], poison, zeroinitializer ; STRIDED-UF2-NEXT: [[TMP16:%.*]] = mul [[TMP15]], [[BROADCAST_SPLAT11]] ; STRIDED-UF2-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI9]], [[TMP16]] -; STRIDED-UF2-NEXT: [[TMP17:%.*]] = call @llvm.stepvector.nxv4i64() -; STRIDED-UF2-NEXT: [[TMP18:%.*]] = mul [[TMP17]], [[BROADCAST_SPLAT11]] -; STRIDED-UF2-NEXT: [[VECTOR_GEP12:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP18]] +; STRIDED-UF2-NEXT: [[VECTOR_GEP12:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP16]] ; STRIDED-UF2-NEXT: [[STEP_ADD:%.*]] = getelementptr i8, [[VECTOR_GEP12]], [[TMP14]] ; STRIDED-UF2-NEXT: [[STEP_ADD13:%.*]] = getelementptr i8, [[VECTOR_GEP]], [[TMP14]] ; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[VECTOR_GEP12]], i32 4, splat (i1 true), poison), !alias.scope [[META15:![0-9]+]] @@ -1284,8 +1236,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] ; STRIDED-UF2-NEXT: [[TMP21:%.*]] = mul i64 [[STRIDE]], [[TMP9]] ; STRIDED-UF2-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP21]] -; STRIDED-UF2-NEXT: [[TMP22:%.*]] = mul i64 [[STRIDE]], [[TMP9]] -; STRIDED-UF2-NEXT: [[PTR_IND15]] = getelementptr i8, ptr [[POINTER_PHI9]], i64 [[TMP22]] +; STRIDED-UF2-NEXT: [[PTR_IND15]] = getelementptr i8, ptr [[POINTER_PHI9]], i64 [[TMP21]] ; STRIDED-UF2-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; STRIDED-UF2-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; STRIDED-UF2: middle.block: @@ -1363,25 +1314,13 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) { ; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[TMP4]], splat (i1 true), i32 [[TMP2]]) ; NOSTRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]] ; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; NOSTRIDED-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; NOSTRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP6]], [[EVL_BASED_IV]] -; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; NOSTRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP3]], [[EVL_BASED_IV]] +; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; NOSTRIDED-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; NOSTRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; NOSTRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; NOSTRIDED: middle.block: -; NOSTRIDED-NEXT: br label [[EXIT:%.*]] -; NOSTRIDED: scalar.ph: ; NOSTRIDED-NEXT: br label [[LOOP:%.*]] -; NOSTRIDED: loop: -; NOSTRIDED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NOSTRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], i64 [[IV]] -; NOSTRIDED-NEXT: [[TMP8:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; NOSTRIDED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT]], i64 [[IV]] -; NOSTRIDED-NEXT: store i64 [[TMP8]], ptr [[ARRAYIDX2]], align 8 -; NOSTRIDED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; NOSTRIDED: exit: ; NOSTRIDED-NEXT: ret void ; @@ -1459,25 +1398,13 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) { ; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[TMP4]], splat (i1 true), i32 [[TMP2]]) ; STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]] ; STRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; STRIDED-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP6]], [[EVL_BASED_IV]] -; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP3]], [[EVL_BASED_IV]] +; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; STRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; STRIDED: middle.block: -; STRIDED-NEXT: br label [[EXIT:%.*]] -; STRIDED: scalar.ph: ; STRIDED-NEXT: br label [[LOOP:%.*]] -; STRIDED: loop: -; STRIDED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; STRIDED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[IN]], i64 [[IV]] -; STRIDED-NEXT: [[TMP8:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; STRIDED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT]], i64 [[IV]] -; STRIDED-NEXT: store i64 [[TMP8]], ptr [[ARRAYIDX2]], align 8 -; STRIDED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]] ; STRIDED: exit: ; STRIDED-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll index c10bfc32e6e29..8ab0f6f4c14f1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll @@ -1199,25 +1199,13 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[TMP15:%.*]] = ptrtoint [[TMP14]] to ; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP15]], ptr align 8 [[TMP16]], splat (i1 true), i32 [[TMP11]]) -; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64 -; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64 -; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll index 61f97aa0a47ed..34a82757eccc0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll @@ -43,23 +43,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP20]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP27]], 3 -; IF-EVL-OUTLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP27]], i32 0 -; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP24]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP24]] ; ; IF-EVL-INLOOP-LABEL: define i32 @cond_add( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0:[0-9]+]] { @@ -84,23 +70,9 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3 -; IF-EVL-INLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP25]], i32 0 -; IF-EVL-INLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[TMP22]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @cond_add( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0:[0-9]+]] { @@ -239,30 +211,12 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP23]] ; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[PREDPHI]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 3 -; IF-EVL-OUTLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL-OUTLOOP: if.then: -; IF-EVL-OUTLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[RDX]], [[TMP28]] -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_INC]] -; IF-EVL-OUTLOOP: for.inc: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[RDX]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-OUTLOOP-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[FOR_INC]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP27]] ; ; IF-EVL-INLOOP-LABEL: define i32 @cond_add_pred( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -284,29 +238,11 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]] ; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: -; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3 -; IF-EVL-INLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL-INLOOP: if.then: -; IF-EVL-INLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[RDX]], [[TMP25]] -; IF-EVL-INLOOP-NEXT: br label [[FOR_INC]] -; IF-EVL-INLOOP: for.inc: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[RDX]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-INLOOP-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[FOR_INC]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[TMP22]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @cond_add_pred( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -466,27 +402,12 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] ; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP19]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-OUTLOOP-NEXT: [[TMP37:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP37]], [[IV_TRUNC]] -; IF-EVL-OUTLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP37]], i32 0 -; IF-EVL-OUTLOOP-NEXT: [[ADD]] = add nsw i32 [[SELECT]], [[RDX]] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP22]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP22]] ; ; IF-EVL-INLOOP-LABEL: define i32 @step_cond_add( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -516,26 +437,11 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] ; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-INLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], [[IV_TRUNC]] -; IF-EVL-INLOOP-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[TMP28]], i32 0 -; IF-EVL-INLOOP-NEXT: [[ADD1]] = add nsw i32 [[SELECT]], [[RDX1]] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD1]], [[FOR_BODY]] ], [ [[ADD]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[ADD]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @step_cond_add( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -700,31 +606,12 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] ; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add [[VEC_IND2]], [[BROADCAST_SPLAT2]] ; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL-OUTLOOP: middle.block: ; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP24]]) -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-OUTLOOP: scalar.ph: -; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] -; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] -; IF-EVL-OUTLOOP-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 -; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP38]], [[IV_TRUNC]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[MIDDLE_BLOCK]] -; IF-EVL-OUTLOOP: if.then: -; IF-EVL-OUTLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[BC_MERGE_RDX]], [[TMP38]] -; IF-EVL-OUTLOOP-NEXT: br label [[MIDDLE_BLOCK]] -; IF-EVL-OUTLOOP: for.inc: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[BC_MERGE_RDX]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-OUTLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-OUTLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-OUTLOOP-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; IF-EVL-OUTLOOP: for.end: -; IF-EVL-OUTLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[MIDDLE_BLOCK]] ], [ [[TMP27]], [[MIDDLE_BLOCK1]] ] -; IF-EVL-OUTLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-OUTLOOP-NEXT: ret i32 [[TMP27]] ; ; IF-EVL-INLOOP-LABEL: define i32 @step_cond_add_pred( ; IF-EVL-INLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -753,30 +640,11 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] ; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-INLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-INLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL-INLOOP: middle.block: -; IF-EVL-INLOOP-NEXT: br label [[FOR_END:%.*]] -; IF-EVL-INLOOP: scalar.ph: -; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] -; IF-EVL-INLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] -; IF-EVL-INLOOP-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 -; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 -; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP35]], [[IV_TRUNC]] -; IF-EVL-INLOOP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[MIDDLE_BLOCK]] -; IF-EVL-INLOOP: if.then: -; IF-EVL-INLOOP-NEXT: [[ADD_PRED:%.*]] = add nsw i32 [[BC_MERGE_RDX]], [[TMP35]] -; IF-EVL-INLOOP-NEXT: br label [[MIDDLE_BLOCK]] -; IF-EVL-INLOOP: for.inc: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD]] = phi i32 [ [[ADD_PRED]], [[IF_THEN]] ], [ [[BC_MERGE_RDX]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-INLOOP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-INLOOP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3]] +; IF-EVL-INLOOP-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; IF-EVL-INLOOP: for.end: -; IF-EVL-INLOOP-NEXT: [[RDX_ADD_LCSSA:%.*]] = phi i32 [ [[RDX_ADD]], [[MIDDLE_BLOCK]] ], [ [[TMP17]], [[MIDDLE_BLOCK1]] ] -; IF-EVL-INLOOP-NEXT: ret i32 [[RDX_ADD_LCSSA]] +; IF-EVL-INLOOP-NEXT: ret i32 [[TMP17]] ; ; NO-VP-OUTLOOP-LABEL: define i32 @step_cond_add_pred( ; NO-VP-OUTLOOP-SAME: ptr [[A:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) #[[ATTR0]] { @@ -931,20 +799,16 @@ for.end: ; IF-EVL-OUTLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; IF-EVL-OUTLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; IF-EVL-OUTLOOP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; IF-EVL-OUTLOOP: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL-OUTLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; IF-EVL-OUTLOOP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL-OUTLOOP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL-OUTLOOP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; IF-EVL-OUTLOOP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} ;. ; IF-EVL-INLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; IF-EVL-INLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; IF-EVL-INLOOP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; IF-EVL-INLOOP: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL-INLOOP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; IF-EVL-INLOOP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL-INLOOP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL-INLOOP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; IF-EVL-INLOOP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} ;. ; NO-VP-OUTLOOP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP-OUTLOOP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll index 22d216e059af3..8cd540c3888db 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll @@ -33,20 +33,6 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = sdiv i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; @@ -143,20 +129,6 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = udiv i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; @@ -252,20 +224,6 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = srem i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; @@ -361,20 +319,6 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[EXIT:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[LOOP:.*]] -; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 -; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i64, ptr [[B_GEP]], align 8 -; IF-EVL-NEXT: [[TMP18:%.*]] = urem i64 [[TMP16]], [[TMP17]] -; IF-EVL-NEXT: [[C_GEP:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i64 [[TMP18]], ptr [[C_GEP]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; IF-EVL-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; IF-EVL: [[EXIT]]: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index 55facaa96631e..c7ba826295de8 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -42,19 +42,6 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP24]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP24]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -167,23 +154,9 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP31]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[FOR2]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -316,25 +289,9 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]] ; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ 11, %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP38]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR2]], [[FOR3]] -; IF-EVL-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[FOR1]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] -; IF-EVL-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -469,7 +426,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: store [[TMP11]], ptr [[TMP12]], align 4 ; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP3]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 @@ -495,7 +452,7 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]] @@ -609,25 +566,13 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[TMP15:%.*]] = call @llvm.experimental.vp.splice.nxv2i64( [[VECTOR_RECUR]], [[TMP20]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP11]]) ; IF-EVL-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP15]], ptr align 8 [[TMP9]], splat (i1 true), i32 [[TMP11]]) -; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP11]] to i64 -; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]] -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]] +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP7]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: [[MIDDLE_BLOCK]]: ; IF-EVL-NEXT: br label %[[FOR_END:.*]] -; IF-EVL: [[SCALAR_PH:.*]]: -; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] -; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ 33, %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[TMP14]] = add i64 [[IV1]], 42 -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[IV1]] -; IF-EVL-NEXT: store i64 [[FOR1]], ptr [[ARRAYIDX]], align 8 -; IF-EVL-NEXT: [[IV1_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV1_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3]] ; IF-EVL: [[FOR_END]]: ; IF-EVL-NEXT: ret void ; @@ -714,13 +659,11 @@ for.end: ; IF-EVL: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; IF-EVL: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; IF-EVL: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; IF-EVL: [[META4]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; IF-EVL: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} ; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; IF-EVL: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} -; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} ;. ; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index df550ecac561e..b9a4e97cd9f24 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -30,21 +30,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @add( ; NO-VP-NEXT: entry: @@ -129,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -146,7 +134,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -231,23 +219,11 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[OR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @or( ; NO-VP-NEXT: entry: @@ -327,23 +303,11 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[AND_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @and( ; NO-VP-NEXT: entry: @@ -423,23 +387,11 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[XOR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @xor( ; NO-VP-NEXT: entry: @@ -519,24 +471,11 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @smin( ; NO-VP-NEXT: entry: @@ -618,24 +557,11 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[SMAX]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMAX_LCSSA:%.*]] = phi i32 [ [[SMAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @smax( ; NO-VP-NEXT: entry: @@ -717,24 +643,11 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[UMIN]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMIN_LCSSA:%.*]] = phi i32 [ [[UMIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @umin( ; NO-VP-NEXT: entry: @@ -816,24 +729,11 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[UMAX]] = select i1 [[CMP_I]], i32 [[TMP17]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMAX_LCSSA:%.*]] = phi i32 [ [[UMAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_MINMAX]] ; ; NO-VP-LABEL: @umax( ; NO-VP-NEXT: entry: @@ -915,23 +815,11 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[ADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP15]] ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: @@ -1016,7 +904,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]] ; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] @@ -1033,7 +921,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -1119,24 +1007,11 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MIN_LCSSA]] +; IF-EVL-NEXT: ret float [[RDX_MINMAX_SELECT]] ; ; NO-VP-LABEL: @fmin( ; NO-VP-NEXT: entry: @@ -1220,24 +1095,11 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP17]], [[RDX]] -; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP17]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[RDX_MINMAX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MAX_LCSSA]] +; IF-EVL-NEXT: ret float [[RDX_MINMAX_SELECT]] ; ; NO-VP-LABEL: @fmax( ; NO-VP-NEXT: entry: @@ -1324,7 +1186,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1342,7 +1204,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1432,7 +1294,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1450,7 +1312,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1539,25 +1401,11 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MULADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP18]] ; ; NO-VP-LABEL: @fmuladd( ; NO-VP-NEXT: entry: @@ -1644,27 +1492,14 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP20]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP21]], 3 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_icmp( ; NO-VP-NEXT: entry: @@ -1749,27 +1584,14 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP16]]) ; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP20]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP21]], 3.000000e+00 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_fcmp( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll index a23933c7fb005..0c22a9eb2acab 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll @@ -32,21 +32,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP12]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; @@ -153,34 +139,15 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP10]], splat (i1 true), i32 [[TMP4]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add [[TMP9]], [[WIDE_MASKED_GATHER2]] ; IF-EVL-NEXT: [[TMP12]] = call @llvm.vp.merge.nxv4i32( splat (i1 true), [[TMP11]], [[VEC_PHI]], i32 [[TMP4]]) -; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP4]] to i64 -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP12]]) -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP16]] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP17]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 3 -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD2]] = add nsw i32 [[ADD1]], [[TMP18]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: -; IF-EVL-NEXT: [[ADD2_LCSSA:%.*]] = phi i32 [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD2_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @load_factor_4_with_gap( ; NO-VP-NEXT: entry: @@ -300,22 +267,9 @@ define void @store_factor_4_with_gap(i32 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]] ; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add [[VEC_IND2]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[TMP15:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 0 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 1 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 3 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -425,34 +379,15 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP10]], splat (i1 true), i32 [[TMP4]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add [[TMP9]], [[WIDE_MASKED_GATHER2]] ; IF-EVL-NEXT: [[TMP12]] = call @llvm.vp.merge.nxv4i32( splat (i1 true), [[TMP11]], [[VEC_PHI]], i32 [[TMP4]]) -; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP4]] to i64 -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP12]]) -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP16]] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP17]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 2 -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD2]] = add nsw i32 [[ADD1]], [[TMP18]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: -; IF-EVL-NEXT: [[ADD2_LCSSA:%.*]] = phi i32 [ [[ADD2]], [[FOR_BODY]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD2_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP15]] ; ; NO-VP-LABEL: @load_factor_4_with_tail_gap( ; NO-VP-NEXT: entry: @@ -573,22 +508,9 @@ define void @store_factor_4_with_tail_gap(i32 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]] ; IF-EVL-NEXT: [[VEC_IND_NEXT5]] = add [[VEC_IND2]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP8:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[TMP15:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 0 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 1 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX1]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i32 [[TMP15]], i32 2 -; IF-EVL-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP15]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -696,37 +618,15 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP14]], splat (i1 true), i32 [[TMP6]]) ; IF-EVL-NEXT: [[TMP15:%.*]] = add [[TMP13]], [[WIDE_MASKED_GATHER5]] ; IF-EVL-NEXT: [[TMP16]] = call @llvm.vp.merge.nxv4i32( splat (i1 true), [[TMP15]], [[VEC_PHI]], i32 [[TMP6]]) -; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP6]] to i64 -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP16]]) -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[N]], [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 0 -; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[RDX]], [[TMP20]] -; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 1 -; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP21]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 2 -; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD1]], [[TMP22]] -; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [4 x i32], ptr [[A]], i64 [[IV]], i32 3 -; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 -; IF-EVL-NEXT: [[ADD3]] = add nsw i32 [[ADD2]], [[TMP23]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[IV_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[EXIT]] ; IF-EVL: exit: -; IF-EVL-NEXT: [[ADD3_LCSSA:%.*]] = phi i32 [ [[ADD3]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD3_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP19]] ; ; NO-VP-LABEL: @load_factor_4_reverse( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll index 00c88a46c3a0a..1aea6aaff66a3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll @@ -26,18 +26,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]] -; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]] -; IF-EVL-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i32 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT1]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll index a03b4306bad66..e94e64fe11d2f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll @@ -32,17 +32,6 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] -; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 -; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp uge i64 [[I_NEXT]], [[TC]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]] ; CHECK: [[EXIT_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: @@ -92,17 +81,6 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] -; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 -; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[TC]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]] ; CHECK: [[EXIT_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: @@ -152,17 +130,6 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] -; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 -; CHECK-NEXT: store i64 [[Y]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[TC_ADD]] -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT_LOOPEXIT]], label %[[LOOP]] ; CHECK: [[EXIT_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll index 58b4c5311dbec..b13c671ae3d56 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll @@ -30,25 +30,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: -; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]] -; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP23]], 0 -; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL: if.then: -; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_011]] -; IF-EVL-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add i32 [[TMP23]], [[TMP24]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 -; IF-EVL-NEXT: br label [[FOR_INC]] -; IF-EVL: for.inc: -; IF-EVL-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]] +; IF-EVL-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll index 6c487ab8090d6..dcb7bf484f4ae 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll @@ -29,21 +29,9 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[ADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP14]] ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index e14ff7ce29a10..7179e7dc48c8d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -30,21 +30,9 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ADD_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @add( ; NO-VP-NEXT: entry: @@ -129,7 +117,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP4]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]]) @@ -147,7 +135,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] @@ -233,24 +221,12 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32( [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[OR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @or( ; NO-VP-NEXT: entry: @@ -332,24 +308,12 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32( [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[AND_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @and( ; NO-VP-NEXT: entry: @@ -431,24 +395,12 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[XOR_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP17]] ; ; NO-VP-LABEL: @xor( ; NO-VP-NEXT: entry: @@ -532,25 +484,12 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32( [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[SMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMIN_LCSSA:%.*]] = phi i32 [ [[SMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @smin( ; NO-VP-NEXT: entry: @@ -638,25 +577,12 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.smax.nxv4i32( [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[SMAX]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[SMAX_LCSSA:%.*]] = phi i32 [ [[SMAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[SMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @smax( ; NO-VP-NEXT: entry: @@ -744,25 +670,12 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umin.nxv4i32( [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[UMIN]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMIN_LCSSA:%.*]] = phi i32 [ [[UMIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMIN_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @umin( ; NO-VP-NEXT: entry: @@ -850,25 +763,12 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32( [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[UMAX]] = select i1 [[CMP_I]], i32 [[TMP19]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[UMAX_LCSSA:%.*]] = phi i32 [ [[UMAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[UMAX_LCSSA]] +; IF-EVL-NEXT: ret i32 [[TMP18]] ; ; NO-VP-LABEL: @umax( ; NO-VP-NEXT: entry: @@ -954,24 +854,12 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP14]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[ADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP17]] ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: @@ -1056,7 +944,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP4]] = fmul reassoc <8 x float> [[WIDE_LOAD2]], [[VEC_PHI1]] ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP6:%.*]] = fmul reassoc <8 x float> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]]) @@ -1074,7 +962,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP15:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] @@ -1162,25 +1050,12 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmin.nxv4f32( [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[MIN]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MIN_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP18]] ; ; NO-VP-LABEL: @fmin( ; NO-VP-NEXT: entry: @@ -1268,25 +1143,12 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32( [[TMP15]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP19]], [[RDX]] -; IF-EVL-NEXT: [[MAX]] = select i1 [[CMP]], float [[TMP19]], float [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MAX_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP18]] ; ; NO-VP-LABEL: @fmax( ; NO-VP-NEXT: entry: @@ -1375,7 +1237,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) @@ -1393,7 +1255,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP19:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MIN_LCSSA]] @@ -1483,7 +1345,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) ; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) @@ -1501,7 +1363,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP21:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MAX_LCSSA]] @@ -1590,26 +1452,12 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP20:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, [[TMP17]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[RDX]]) -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret float [[MULADD_LCSSA]] +; IF-EVL-NEXT: ret float [[TMP20]] ; ; NO-VP-LABEL: @fmuladd( ; NO-VP-NEXT: entry: @@ -1696,27 +1544,14 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP19]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP20]], 3 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_icmp( ; NO-VP-NEXT: entry: @@ -1801,27 +1636,14 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] ; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: middle.block: ; IF-EVL-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[TMP15]]) ; IF-EVL-NEXT: [[TMP19:%.*]] = freeze i1 [[TMP18]] ; IF-EVL-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP19]], i32 [[INV:%.*]], i32 [[START:%.*]] -; IF-EVL-NEXT: br label [[FOR_END:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP20]], 3.000000e+00 -; IF-EVL-NEXT: [[ANYOF]] = select i1 [[CMP_I]], i32 [[INV]], i32 [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[ANYOF_LCSSA:%.*]] = phi i32 [ [[ANYOF]], [[FOR_BODY]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; IF-EVL-NEXT: ret i32 [[ANYOF_LCSSA]] +; IF-EVL-NEXT: ret i32 [[RDX_SELECT]] ; ; NO-VP-LABEL: @anyof_fcmp( ; NO-VP-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index 5b9bc501afff4..e70894b981dff 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -43,20 +43,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[LOOPEND:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] -; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 -; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] -; IF-EVL-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 -; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 -; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]] ; IF-EVL: loopend: ; IF-EVL-NEXT: ret void ; @@ -179,27 +166,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[LOOPEND:%.*]] -; IF-EVL: scalar.ph: -; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ] -; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]] -; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 -; IF-EVL-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100 -; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; IF-EVL: if.then: -; IF-EVL-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]] -; IF-EVL-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4 -; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] -; IF-EVL-NEXT: store i32 [[V]], ptr [[GEPS]], align 4 -; IF-EVL-NEXT: br label [[FOR_INC]] -; IF-EVL: for.inc: -; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 -; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]] +; IF-EVL-NEXT: br label [[FOR_INC:%.*]] ; IF-EVL: loopend: ; IF-EVL-NEXT: ret void ; @@ -351,22 +318,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1 -; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]] -; IF-EVL-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1 -; IF-EVL-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1 -; IF-EVL-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]] -; IF-EVL-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 -; IF-EVL-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; IF-EVL-NEXT: br i1 [[CMP_NOT]], label [[EXIT]], label [[LOOP]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll index b13f97d41862e..e1c62fe2d043d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll @@ -31,19 +31,7 @@ define void @test(ptr %p) { ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 8 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -125,19 +113,7 @@ define void @test_may_clobber1(ptr %p) { ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -157,19 +133,7 @@ define void @test_may_clobber1(ptr %p) { ; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: middle.block: -; NO-VP-NEXT: br label [[EXIT:%.*]] -; NO-VP: scalar.ph: ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 100 -; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -259,19 +223,7 @@ define void @test_may_clobber3(ptr %p) { ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 10 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -291,19 +243,7 @@ define void @test_may_clobber3(ptr %p) { ; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: middle.block: -; NO-VP-NEXT: br label [[EXIT:%.*]] -; NO-VP: scalar.ph: ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 10 -; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -347,19 +287,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; @@ -446,19 +374,7 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[EXIT:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[LOOP:%.*]] -; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] -; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 -; IF-EVL-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; IF-EVL-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IF-EVL-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 3001 -; IF-EVL-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; IF-EVL: exit: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll index 0bb7ad0d57055..f804329169fe0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll @@ -38,16 +38,6 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) { ; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[TMP22:%.*]] = sub nuw nsw i64 1, [[IV1]] -; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP22]] -; CHECK-NEXT: store i64 0, ptr [[ARRAYIDX14]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll index 5c89f218fdf7d..c5319c6165f89 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll @@ -31,20 +31,6 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]] -; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -95,20 +81,6 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8 -; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[X]], [[EXT_L]] -; CHECK-NEXT: [[TRUNC_AND:%.*]] = trunc i64 [[AND]] to i8 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -151,21 +123,6 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]] -; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1 -; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: store i8 0, ptr [[DST]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[ADD]] = add i8 [[F_039]], 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[F_039]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -260,23 +217,6 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]] -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[X]] to i64 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr double, ptr [[SRC]], i64 [[IDXPROM]] -; CHECK-NEXT: [[RETVAL:%.*]] = load double, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: store double [[RETVAL]], ptr [[DST]], align 8 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[V]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll index 6efb0358242c7..000dc4a13f63a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll @@ -22,20 +22,6 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]] -; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC1]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32 -; CHECK-NEXT: [[MUL16:%.*]] = mul i32 0, [[CONV]] -; CHECK-NEXT: [[SHR35:%.*]] = lshr i32 [[MUL16]], 1 -; CHECK-NEXT: [[CONV36:%.*]] = trunc i32 [[SHR35]] to i8 -; CHECK-NEXT: store i8 [[CONV36]], ptr null, align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], 8 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index adfecdff8fcc8..bae97e53a1ff9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -29,16 +29,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -97,16 +87,6 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -285,30 +265,13 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: [[PREDPHI:%.*]] = select [[TMP10]], [[WIDE_MASKED_GATHER]], zeroinitializer ; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0( [[PREDPHI]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP17]]) -; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP17]] to i64 -; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] -; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] -; SCALABLE: [[DO_LOAD]]: -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; SCALABLE-NEXT: br label %[[LATCH]] -; SCALABLE: [[LATCH]]: -; SCALABLE-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ] -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -383,30 +346,13 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[PREDPHI:%.*]] = select [[TMP10]], [[WIDE_MASKED_GATHER]], zeroinitializer ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0( [[PREDPHI]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP7]]) -; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 -; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] -; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] -; TF-SCALABLE: [[DO_LOAD]]: -; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 -; TF-SCALABLE-NEXT: br label %[[LATCH]] -; TF-SCALABLE: [[LATCH]]: -; TF-SCALABLE-NEXT: [[PHI:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[V]], %[[DO_LOAD]] ] -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -453,19 +399,9 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; SCALABLE-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -521,19 +457,9 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -573,19 +499,9 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -641,19 +557,9 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -698,24 +604,13 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[VEC_IND]], align 8 [[BROADCAST_SPLAT1]], splat (i1 true), i32 [[TMP7]]) ; SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[TMP16]], splat (i1 true), i32 [[TMP7]]) -; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 -; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[TMP10]] -; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[TMP10]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -780,24 +675,13 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[VEC_IND]], align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT3]], ptr align 8 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] -; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -843,29 +727,13 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[BROADCAST_SPLAT1]], align 8 [[BROADCAST_SPLAT2]], [[TMP10]], i32 [[TMP7]]) ; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP7]]) -; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 -; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] -; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] -; SCALABLE: [[DO_STORE]]: -; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; SCALABLE-NEXT: br label %[[LATCH]] -; SCALABLE: [[LATCH]]: -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -940,29 +808,13 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[BROADCAST_SPLAT1]], align 8 [[BROADCAST_SPLAT2]], [[TMP10]], i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP9]]) -; TF-SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64 -; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]] -; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 -; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] -; TF-SCALABLE: [[DO_STORE]]: -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 -; TF-SCALABLE-NEXT: br label %[[LATCH]] -; TF-SCALABLE: [[LATCH]]: -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -1008,19 +860,9 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]] ; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]] ; SCALABLE-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; SCALABLE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; SCALABLE: [[MIDDLE_BLOCK]]: ; SCALABLE-NEXT: br label %[[FOR_END:.*]] -; SCALABLE: [[SCALAR_PH:.*]]: -; SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; SCALABLE: [[FOR_BODY]]: -; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 -; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; SCALABLE: [[FOR_END]]: ; SCALABLE-NEXT: ret void ; @@ -1076,19 +918,9 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] -; TF-SCALABLE: [[SCALAR_PH:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 -; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll index 8c67b4cb7996e..1676461863583 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll @@ -15,15 +15,6 @@ define void @foo(ptr %arg) #0 { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]] -; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[COND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -61,18 +52,8 @@ define i32 @test_remove_iv(i32 %start) #0 { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32( [[TMP5]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[START]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], 3 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 5 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll index 649ce601c66d1..0a64723b6ff9d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -30,21 +30,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], [[TMP22]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll index b0f0c39711274..b106f99130785 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/addressing.ll @@ -25,11 +25,7 @@ define i32 @foo(ptr nocapture %A) { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 poison ; @@ -76,11 +72,7 @@ define i32 @foo1(ptr nocapture noalias %A, ptr nocapture %PtrPtr) { ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 poison ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll index 1d4cbc3cebcde..78c71fd3beb89 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll @@ -38,15 +38,6 @@ define void @test_scalar_steps_target_instruction_cost(ptr %dst) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 3 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], 22 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll index a423f06ae9892..02e82b43fdd80 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll @@ -91,23 +91,7 @@ define void @test(ptr %p, i40 %a) { ; CHECK: pred.store.continue30: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32 -; CHECK-NEXT: [[ICMP_EQ:%.*]] = icmp eq i32 [[TRUNC]], 0 -; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[ICMP_EQ]] to i32 -; CHECK-NEXT: [[ICMP_ULT:%.*]] = icmp ult i32 0, [[ZEXT]] -; CHECK-NEXT: [[OR:%.*]] = or i1 [[ICMP_ULT]], true -; CHECK-NEXT: [[ICMP_SGT:%.*]] = icmp sgt i1 [[OR]], false -; CHECK-NEXT: store i1 [[ICMP_SGT]], ptr [[P]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[IV_NEXT]], 10 -; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll index 3c788b2ef539a..ee84ef243570a 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll @@ -63,19 +63,7 @@ define void @func_21() { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 6 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[LV:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @A, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LV]] = load i32, ptr [[A_PTR]], align 4 -; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[SCALAR_RECUR]], ptr [[B_PTR]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 5 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll index d40cb6ea2f60e..cfb180594b0ec 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll @@ -66,25 +66,6 @@ define void @test_scalar_iv_steps_used_by_replicate_and_first_lane_only_vpinst(p ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[MUL_IV:%.*]] = mul nsw i64 [[IV]], 4 -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i8, ptr [[SRC_1]], i64 [[MUL_IV]] -; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1 -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[L_1]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[IV_OR:%.*]] = or disjoint i64 [[IV]], 4 -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds [8 x i32], ptr @src, i64 0, i64 [[IV_OR]] -; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; CHECK-NEXT: store i32 [[L_2]], ptr [[DST]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/cleanup-runtime-checks.ll b/llvm/test/Transforms/LoopVectorize/X86/cleanup-runtime-checks.ll new file mode 100644 index 0000000000000..41753f7e4f27a --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/cleanup-runtime-checks.ll @@ -0,0 +1,79 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -p loop-vectorize -S %s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +declare ptr @get() +declare i1 @cond() + +; Make sure we can clean up the created runtime checks, if vectorization isn't +; profitable. +define void @widget(i32 %arg, i64 %arg1, ptr %src) #0 { +; CHECK-LABEL: define void @widget( +; CHECK-SAME: i32 [[ARG:%.*]], i64 [[ARG1:%.*]], ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[LOOP_1_HEADER:.*]] +; CHECK: [[LOOP_1_HEADER]]: +; CHECK-NEXT: br label %[[INNER_1:.*]] +; CHECK: [[INNER_1]]: +; CHECK-NEXT: [[C_1:%.*]] = call i1 @cond() +; CHECK-NEXT: br i1 [[C_1]], label %[[INNER_2:.*]], label %[[INNER_1]] +; CHECK: [[INNER_2]]: +; CHECK-NEXT: [[LOAD:%.*]] = call ptr @get() +; CHECK-NEXT: [[C_2:%.*]] = call i1 @cond() +; CHECK-NEXT: br i1 [[C_2]], label %[[LOOP_2_PREHEADER:.*]], label %[[LOOP_1_LATCH:.*]] +; CHECK: [[LOOP_2_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP_2:.*]] +; CHECK: [[LOOP_1_LATCH]]: +; CHECK-NEXT: br label %[[LOOP_1_HEADER]] +; CHECK: [[LOOP_2]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP_2]] ], [ [[ARG]], %[[LOOP_2_PREHEADER]] ] +; CHECK-NEXT: [[PHI8:%.*]] = phi i32 [ [[OR:%.*]], %[[LOOP_2]] ], [ 99, %[[LOOP_2_PREHEADER]] ] +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i32 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 +; CHECK-NEXT: [[OR]] = or i32 [[PHI8]], [[L]] +; CHECK-NEXT: store i32 [[OR]], ptr [[LOAD]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 100 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_2]], !prof [[PROF0:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.1.header + +loop.1.header: + br label %inner.1 + +inner.1: + %c.1 = call i1 @cond() + br i1 %c.1, label %inner.2, label %inner.1 + +inner.2: + %load = call ptr @get() + %c.2 = call i1 @cond() + br i1 %c.2, label %loop.2, label %loop.1.latch + +loop.1.latch: + br label %loop.1.header + +loop.2: + %iv = phi i32 [ %arg, %inner.2 ], [ %iv.next, %loop.2 ] + %phi8 = phi i32 [ 99, %inner.2 ], [ %or, %loop.2 ] + %gep.src = getelementptr i32, ptr %src, i32 %iv + %l = load i32, ptr %gep.src, align 4 + %or = or i32 %phi8, %l + store i32 %or, ptr %load, align 4 + %iv.next = add i32 %iv, 1 + %ec = icmp eq i32 %iv, 100 + br i1 %ec, label %exit, label %loop.2, !prof !0 + +exit: + ret void +} + +attributes #0 = { "target-features"="+avx2" } +!0 = !{!"branch_weights", i32 89478484, i32 1879048192} +;. +; CHECK: [[PROF0]] = !{!"branch_weights", i32 89478484, i32 1879048192} +;. diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll index 9dd7e9f0e97d5..f65a9d7d45ed8 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll @@ -22,19 +22,7 @@ define void @f1() { ; CHECK-NEXT: store <2 x ptr> , ptr [[TMP1]], align 8 ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[BB3:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[BB2:%.*]] -; CHECK: bb2: -; CHECK-NEXT: [[C_1_0:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[_TMP9:%.*]], [[BB2]] ] -; CHECK-NEXT: [[_TMP1:%.*]] = zext i16 0 to i64 -; CHECK-NEXT: [[_TMP2:%.*]] = getelementptr [1 x %rec8], ptr @a, i16 0, i64 [[_TMP1]] -; CHECK-NEXT: [[_TMP6:%.*]] = sext i16 [[C_1_0]] to i64 -; CHECK-NEXT: [[_TMP7:%.*]] = getelementptr [2 x ptr], ptr @b, i16 0, i64 [[_TMP6]] -; CHECK-NEXT: store ptr [[_TMP2]], ptr [[_TMP7]], align 8 -; CHECK-NEXT: [[_TMP9]] = add nsw i16 [[C_1_0]], 1 -; CHECK-NEXT: [[_TMP11:%.*]] = icmp slt i16 [[_TMP9]], 2 -; CHECK-NEXT: br i1 [[_TMP11]], label [[BB2]], label [[BB3]] ; CHECK: bb3: ; CHECK-NEXT: ret void ; @@ -102,25 +90,7 @@ define void @redundant_or_1(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK: pred.store.continue8: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 -; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], true -; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_0]], i1 false -; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -195,25 +165,7 @@ define void @redundant_or_2(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK: pred.store.continue8: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 -; CHECK-NEXT: [[OR:%.*]] = or i1 true, [[CMP]] -; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1]], i1 false -; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -289,25 +241,7 @@ define void @redundant_and_1(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK: pred.store.continue8: ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] -; CHECK: then.1: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 -; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], false -; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1]], i1 false -; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] -; CHECK: then.2: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -341,6 +275,23 @@ exit: define void @redundant_and_2(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK-LABEL: @redundant_and_2( ; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] +; CHECK: loop.header: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: br i1 [[C_0:%.*]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] +; CHECK: then.1: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 +; CHECK-NEXT: [[OR:%.*]] = and i1 false, [[CMP]] +; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR]], i1 [[C_1:%.*]], i1 false +; CHECK-NEXT: br i1 [[COND]], label [[THEN_2:%.*]], label [[LOOP_LATCH]] +; CHECK: then.2: +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[IV]] +; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 +; CHECK-NEXT: br label [[LOOP_LATCH]] +; CHECK: loop.latch: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 3 +; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll index 9506ad30c788b..6d2cda48f90ca 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll @@ -36,8 +36,7 @@ define i32 @conversion_cost1(i32 %n, ptr nocapture %A, ptr nocapture %B) nounwin ; CHECK-NEXT: br i1 [[CMP_N]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END5:%.*]] = add i64 3, [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll index 21fa6ceb2cc12..590b2691c3238 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-conditional-branches.ll @@ -580,8 +580,7 @@ define void @cost_duplicate_recipe_for_sinking(ptr %A, i64 %N) #2 { ; CHECK: middle.block: ; CHECK-NEXT: br label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll index ee88abbe4d1c0..e0dd3768ec111 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll @@ -92,24 +92,8 @@ define i64 @second_lshr_operand_zero_via_scev() { ; CHECK-NEXT: [[BIN_RDX:%.*]] = or <2 x i64> [[TMP11]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOPS:.*]] -; CHECK: [[LOOPS]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOPS]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOPS]] ] -; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 0 -; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP14]], [[EXT_0]] -; CHECK-NEXT: [[CONV_1:%.*]] = zext i32 [[SHR]] to i64 -; CHECK-NEXT: [[RED_NEXT_V:%.*]] = select i1 [[C]], i64 [[AND]], i64 [[CONV_1]] -; CHECK-NEXT: [[RED_NEXT]] = or i64 [[RED_NEXT_V]], [[RED]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOPS]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOPS]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP13]] ; entry: %ext.0 = sext i8 0 to i32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index 0078d00de28f8..9453ad7c61f68 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -222,9 +222,8 @@ define float @PR27826(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END9:%.*]] = mul i64 [[N_VEC]], 32 -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP124]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -269,7 +268,7 @@ define float @PR27826(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 ; CHECK-NEXT: [[TMP155]] = fadd fast <4 x float> [[TMP154]], [[TMP153]] ; CHECK-NEXT: [[INDEX_NEXT13]] = add nuw i64 [[INDEX10]], 4 ; CHECK-NEXT: [[TMP156:%.*]] = icmp eq i64 [[INDEX_NEXT13]], [[N_VEC8]] -; CHECK-NEXT: br i1 [[TMP156]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP156]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP157:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP155]]) ; CHECK-NEXT: [[CMP_N14:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC8]] @@ -289,7 +288,7 @@ define float @PR27826(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 ; CHECK-NEXT: [[ADD4]] = fadd fast float [[ADD]], [[T2]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 32 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], [[T0]] -; CHECK-NEXT: br i1 [[CMP1]], label [[FOR]], label [[LOOPEXIT]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP1]], label [[FOR]], label [[LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: loopexit: ; CHECK-NEXT: [[ADD4_LCSSA:%.*]] = phi float [ [[ADD4]], [[FOR]] ], [ [[TMP124]], [[MIDDLE_BLOCK]] ], [ [[TMP157]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_END]] @@ -369,10 +368,10 @@ define void @multi_exit(ptr %dst, ptr %src.1, ptr %src.2, i64 %A, i64 %B) #0 { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[SRC_1]], align 8, !alias.scope [[META5:![0-9]+]] +; CHECK-NEXT: [[TMP13:%.*]] = load i64, ptr [[SRC_1]], align 8, !alias.scope [[META6:![0-9]+]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP13]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[SRC_2]], align 8, !alias.scope [[META8:![0-9]+]] +; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[SRC_2]], align 8, !alias.scope [[META9:![0-9]+]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <2 x i64> poison, i64 [[TMP14]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT9]], <2 x i64> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq <2 x i64> [[BROADCAST_SPLAT]], zeroinitializer @@ -380,10 +379,10 @@ define void @multi_exit(ptr %dst, ptr %src.1, ptr %src.2, i64 %A, i64 %B) #0 { ; CHECK-NEXT: [[TMP17:%.*]] = and <2 x i1> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[TMP18:%.*]] = zext <2 x i1> [[TMP17]] to <2 x i8> ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i8> [[TMP18]], i32 1 -; CHECK-NEXT: store i8 [[TMP19]], ptr [[DST]], align 1, !alias.scope [[META10:![0-9]+]], !noalias [[META12:![0-9]+]] +; CHECK-NEXT: store i8 [[TMP19]], ptr [[DST]], align 1, !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH]] ; CHECK: scalar.ph: @@ -406,7 +405,7 @@ define void @multi_exit(ptr %dst, ptr %src.1, ptr %src.2, i64 %A, i64 %B) #0 { ; CHECK-NEXT: [[IV_1_NEXT]] = add i32 [[IV_1]], 1 ; CHECK-NEXT: [[IV_1_NEXT_WIDE]] = zext i32 [[IV_1_NEXT]] to i64 ; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_1_NEXT_WIDE]], [[B]] -; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -485,7 +484,7 @@ define i1 @any_of_cost(ptr %start, ptr %end) #0 { ; CHECK-NEXT: [[TMP27]] = or <2 x i1> [[VEC_PHI3]], [[TMP25]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = or <2 x i1> [[TMP27]], [[TMP26]] ; CHECK-NEXT: [[TMP29:%.*]] = call i1 @llvm.vector.reduce.or.v2i1(<2 x i1> [[BIN_RDX]]) @@ -505,7 +504,7 @@ define i1 @any_of_cost(ptr %start, ptr %end) #0 { ; CHECK-NEXT: [[ANY_OF_NEXT]] = select i1 [[CMP13_NOT_NOT]], i1 [[ANY_OF]], i1 false ; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds i8, ptr [[PTR_IV]], i64 40 ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] -; CHECK-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[ANY_OF_NEXT_LCSSA:%.*]] = phi i1 [ [[ANY_OF_NEXT]], [[LOOP]] ] ; CHECK-NEXT: ret i1 [[ANY_OF_NEXT_LCSSA]] @@ -562,7 +561,7 @@ define i64 @cost_assume(ptr %end, i64 %N) { ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP11]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[TMP8]], [[TMP7]] ; CHECK-NEXT: [[BIN_RDX5:%.*]] = add <2 x i64> [[TMP9]], [[BIN_RDX]] @@ -583,7 +582,7 @@ define i64 @cost_assume(ptr %end, i64 %N) { ; CHECK-NEXT: tail call void @llvm.assume(i1 [[C]]) ; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw [9 x i8], ptr null, i64 [[IV_NEXT]] ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[GEP]], [[END]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i64 [ [[TMP12]], [[LOOP]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[DOTLCSSA]] @@ -627,7 +626,7 @@ define void @reduction_store(ptr noalias %src, ptr %dst, i1 %x) #2 { ; CHECK-NEXT: [[TMP12]] = and <4 x i32> [[VEC_PHI1]], [[TMP2]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 24 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = and <4 x i32> [[TMP12]], [[TMP11]] ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[BIN_RDX]]) @@ -649,7 +648,7 @@ define void @reduction_store(ptr noalias %src, ptr %dst, i1 %x) #2 { ; CHECK-NEXT: store i32 [[RED_NEXT]], ptr [[DST]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 29 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -694,22 +693,12 @@ define i64 @live_in_known_1_via_scev() { ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_PHI]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 8 -; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> [[VEC_PHI]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 3, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED_MUL]] = mul nsw i64 [[RED]], [[P_EXT]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[RED_MUL]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 [[TMP3]] ; entry: %sel = select i1 false, i32 3, i32 0 @@ -751,25 +740,12 @@ define i64 @cost_loop_invariant_recipes(i1 %x, i64 %y) { ; CHECK: vector.body: ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ splat (i64 1), [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3]] = mul <2 x i64> [[TMP2]], [[VEC_PHI]] -; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP3]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT_I_I_I:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 1, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[NOT_X:%.*]] = xor i1 [[X]], true -; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[NOT_X]] to i64 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[Y]], [[EXT]] -; CHECK-NEXT: [[RED_MUL]] = mul i64 [[SHL]], [[RED]] -; CHECK-NEXT: [[IV_NEXT_I_I_I]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RED_MUL_LCSSA:%.*]] = phi i64 [ [[RED_MUL]], [[LOOP]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RED_MUL_LCSSA]] +; CHECK-NEXT: ret i64 [[TMP4]] ; entry: br label %loop @@ -805,24 +781,13 @@ define i32 @narrowed_reduction(ptr %a, i1 %cmp) #0 { ; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i32> [[TMP1]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i32> [[TMP3]] to <16 x i1> ; CHECK-NEXT: [[TMP7]] = zext <16 x i1> [[TMP5]] to <16 x i32> -; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP20:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP5]]) ; CHECK-NEXT: [[TMP21:%.*]] = zext i1 [[TMP20]] to i32 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP1:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[VEC_EPILOG_PH:%.*]] ], [ [[INC:%.*]], [[LOOP1]] ] -; CHECK-NEXT: [[OR13:%.*]] = phi i32 [ 0, [[VEC_EPILOG_PH]] ], [ [[OR:%.*]], [[LOOP1]] ] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[OR13]], 1 -; CHECK-NEXT: [[OR]] = or i32 [[AND]], [[CONV]] -; CHECK-NEXT: [[INC]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 16 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP1]] ; CHECK: exit: -; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[LOOP1]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[OR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP21]] ; entry: %conv = zext i1 %cmp to i32 @@ -891,7 +856,7 @@ define i32 @g(i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4) ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = or <4 x i32> [[TMP16]], [[TMP15]] ; CHECK-NEXT: [[BIN_RDX5:%.*]] = or <4 x i32> [[TMP17]], [[BIN_RDX]] @@ -900,9 +865,8 @@ define i32 @g(i64 %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i32 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i32 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i32 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP20]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -926,7 +890,7 @@ define i32 @g(i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT15]] = add nuw i32 [[INDEX9]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT11]] = add <4 x i32> [[VEC_IND10]], splat (i32 4) ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT15]], [[N_VEC8]] -; CHECK-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP26]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: [[CMP_N16:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC8]] @@ -943,7 +907,7 @@ define i32 @g(i64 %n) { ; CHECK-NEXT: [[SELECT_I:%.*]] = select i1 [[EXITCOND]], i32 0, i32 2 ; CHECK-NEXT: [[SELECT_NEXT]] = or i32 [[SELECT_I]], [[SELECT]] ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[SELECT_NEXT_LCSSA:%.*]] = phi i32 [ [[SELECT_NEXT]], [[LOOP]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ], [ [[TMP27]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[SELECT_NEXT_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll index 3d07eca646380..249efe1706e0f 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll @@ -39,30 +39,9 @@ define i1 @fn(ptr %nno) #0 { ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP12]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY20:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 10, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ] -; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ] -; CHECK-NEXT: [[REM4:%.*]] = and i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i64 [[REM4]], 0 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: br i1 [[CMP21]], label [[IF_THEN22:%.*]], label [[FOR_INC35]] -; CHECK: if.then: -; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[TMP15]], 1 -; CHECK-NEXT: [[REM27:%.*]] = urem i32 [[MUL]], 10 -; CHECK-NEXT: br label [[FOR_INC35]] -; CHECK: loop.latch: -; CHECK-NEXT: [[REM27_PN:%.*]] = phi i32 [ [[REM27]], [[IF_THEN22]] ], [ [[TMP15]], [[FOR_BODY20]] ] -; CHECK-NEXT: [[SUM_1]] = or i32 [[REM27_PN]], [[SUM_01]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: [[CMP19_NOT:%.*]] = icmp eq i64 [[INDVARS_IV]], 0 -; CHECK-NEXT: br i1 [[CMP19_NOT]], label [[EXIT]], label [[FOR_BODY20]] +; CHECK-NEXT: br label [[FOR_INC35:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_INC35]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[CMP41:%.*]] = icmp eq i32 [[SUM_1_LCSSA]], 0 +; CHECK-NEXT: [[CMP41:%.*]] = icmp eq i32 [[TMP14]], 0 ; CHECK-NEXT: ret i1 [[CMP41]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll index 90d261b78c27c..ed288d2f99a0b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll @@ -54,8 +54,7 @@ define void @test_pr59459(i64 %iv.start, ptr %arr) { ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END6:%.*]] = add i64 [[IV_START]], [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -146,11 +145,9 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[IND_END:%.*]] = mul i16 [[DOTCAST]], [[TMP0]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <16 x i16> splat (i16 16), [[TMP2]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <16 x i16> poison, i16 [[OFF]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <16 x i16> [[BROADCAST_SPLATINSERT2]], <16 x i16> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[TMP0]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[OFF]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i16> [[DOTSPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i16> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i16> , [[TMP2]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <16 x i16> zeroinitializer, [[TMP11]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -159,10 +156,10 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <16 x i16> [[VEC_IND]], [[TMP1]] ; CHECK-NEXT: [[STEP_ADD_2:%.*]] = add <16 x i16> [[STEP_ADD]], [[TMP1]] ; CHECK-NEXT: [[STEP_ADD_3:%.*]] = add <16 x i16> [[STEP_ADD_2]], [[TMP1]] -; CHECK-NEXT: [[TMP4:%.*]] = sub <16 x i16> [[VEC_IND]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP5:%.*]] = sub <16 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[STEP_ADD_2]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP7:%.*]] = sub <16 x i16> [[STEP_ADD_3]], [[BROADCAST_SPLAT3]] +; CHECK-NEXT: [[TMP4:%.*]] = sub <16 x i16> [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP5:%.*]] = sub <16 x i16> [[STEP_ADD]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[STEP_ADD_2]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP7:%.*]] = sub <16 x i16> [[STEP_ADD_3]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[K:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 16 ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 32 @@ -174,15 +171,14 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i16> [[STEP_ADD_3]], [[TMP1]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[L]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[DOTCAST9:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[IND_END10:%.*]] = mul i16 [[DOTCAST9]], [[TMP0]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[L]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll index d0c311eb4521f..cc84fabd00ecc 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll @@ -495,18 +495,7 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3 ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 4, [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[FOR]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[IV]] -; CHECK-NEXT: store i32 [[SUB]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll index c0ff8816c2543..3b0ad73d91338 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll @@ -59,8 +59,7 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 { ; AUTO_VEC-NEXT: [[DOTCAST12:%.*]] = sitofp i64 [[N_VEC]] to float ; AUTO_VEC-NEXT: [[TMP11:%.*]] = fmul fast float 5.000000e-01, [[DOTCAST12]] ; AUTO_VEC-NEXT: [[IND_END1:%.*]] = fadd fast float 1.000000e+00, [[TMP11]] -; AUTO_VEC-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; AUTO_VEC-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; AUTO_VEC-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; AUTO_VEC-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; AUTO_VEC: [[VEC_EPILOG_PH]]: ; AUTO_VEC-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -353,8 +352,7 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) { ; AUTO_VEC-NEXT: [[DOTCAST16:%.*]] = sitofp i64 [[N_VEC]] to float ; AUTO_VEC-NEXT: [[TMP12:%.*]] = fmul reassoc float 4.200000e+01, [[DOTCAST16]] ; AUTO_VEC-NEXT: [[IND_END1:%.*]] = fadd reassoc float 1.000000e+00, [[TMP12]] -; AUTO_VEC-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; AUTO_VEC-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; AUTO_VEC-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; AUTO_VEC-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; AUTO_VEC: [[VEC_EPILOG_PH]]: ; AUTO_VEC-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll index be546a1e79f0a..2f33e111d8ca7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/gather_scatter.ll @@ -45,7 +45,8 @@ define void @foo1(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; AVX512-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo1( ; FVW2-NEXT: entry: @@ -70,7 +71,8 @@ define void @foo1(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; FVW2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -137,7 +139,8 @@ define void @foo2(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2( ; FVW2-NEXT: entry: @@ -182,7 +185,8 @@ define void @foo2(ptr noalias %in, ptr noalias %out, ptr noalias %trigger, ptr n ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -250,7 +254,8 @@ define void @foo3(ptr noalias %in, ptr noalias %out, ptr noalias %trigger) { ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo3( ; FVW2-NEXT: entry: @@ -295,7 +300,8 @@ define void @foo3(ptr noalias %in, ptr noalias %out, ptr noalias %trigger) { ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -350,7 +356,8 @@ define void @foo2_addrspace(ptr addrspace(1) noalias %in, ptr addrspace(1) noali ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2_addrspace( ; FVW2-NEXT: entry: @@ -395,7 +402,8 @@ define void @foo2_addrspace(ptr addrspace(1) noalias %in, ptr addrspace(1) noali ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -449,7 +457,8 @@ define void @foo2_addrspace2(ptr addrspace(1) noalias %in, ptr addrspace(0) noal ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2_addrspace2( ; FVW2-NEXT: entry: @@ -494,7 +503,8 @@ define void @foo2_addrspace2(ptr addrspace(1) noalias %in, ptr addrspace(0) noal ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -548,7 +558,8 @@ define void @foo2_addrspace3(ptr addrspace(0) noalias %in, ptr addrspace(1) noal ; AVX512-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; AVX512: middle.block: ; AVX512-NEXT: br label [[FOR_END:%.*]] -; AVX512: scalar.ph: +; AVX512: for.end: +; AVX512-NEXT: ret void ; ; FVW2-LABEL: @foo2_addrspace3( ; FVW2-NEXT: entry: @@ -593,7 +604,8 @@ define void @foo2_addrspace3(ptr addrspace(0) noalias %in, ptr addrspace(1) noal ; FVW2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; FVW2: middle.block: ; FVW2-NEXT: br label [[FOR_END:%.*]] -; FVW2: scalar.ph: +; FVW2: for.end: +; FVW2-NEXT: ret void ; entry: br label %for.body @@ -689,8 +701,7 @@ define void @test_gather_not_profitable_pr48429(i32 %d, ptr readonly noalias %pt ; AVX512-NEXT: [[IND_END12:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP23]] ; AVX512-NEXT: [[TMP38:%.*]] = mul i64 [[N_VEC]], 64 ; AVX512-NEXT: [[IND_END15:%.*]] = getelementptr i8, ptr [[DEST]], i64 [[TMP38]] -; AVX512-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; AVX512-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF17:![0-9]+]] ; AVX512: vec.epilog.ph: ; AVX512-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll index b2d587cbb1df9..877fcd4d638eb 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll @@ -90,29 +90,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE: middle.block: ; SSE-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x double> [[PREDPHI3]], [[PREDPHI]] ; SSE-NEXT: [[TMP11:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[BIN_RDX]]) -; SSE-NEXT: br label [[DONE:%.*]] -; SSE: scalar.ph: -; SSE-NEXT: br label [[LOOP:%.*]] -; SSE: loop: -; SSE-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] -; SSE-NEXT: [[TOT:%.*]] = phi double [ 0.000000e+00, [[SCALAR_PH]] ], [ [[TOT_NEXT:%.*]], [[NEXT_ITER]] ] -; SSE-NEXT: [[ADDR:%.*]] = getelementptr double, ptr [[ARR]], i32 [[I]] -; SSE-NEXT: [[NEXTVAL:%.*]] = load double, ptr [[ADDR]], align 8 -; SSE-NEXT: [[TST:%.*]] = fcmp fast une double [[NEXTVAL]], 4.200000e+01 -; SSE-NEXT: br i1 [[TST]], label [[DO_ADD:%.*]], label [[NO_ADD:%.*]] -; SSE: do.add: -; SSE-NEXT: [[TOT_NEW:%.*]] = fadd fast double [[TOT]], [[NEXTVAL]] -; SSE-NEXT: br label [[NEXT_ITER]] -; SSE: no.add: -; SSE-NEXT: br label [[NEXT_ITER]] -; SSE: next.iter: -; SSE-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] -; SSE-NEXT: [[I_NEXT]] = add i32 [[I]], 1 -; SSE-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]] +; SSE-NEXT: br label [[NEXT_ITER:%.*]] ; SSE: done: -; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; SSE-NEXT: ret double [[TOT_NEXT_LCSSA]] +; SSE-NEXT: ret double [[TMP11]] ; ; AVX-LABEL: @sumIfVector( ; AVX-NEXT: entry: @@ -153,29 +133,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX-NEXT: [[BIN_RDX10:%.*]] = fadd fast <4 x double> [[PREDPHI8]], [[BIN_RDX]] ; AVX-NEXT: [[BIN_RDX11:%.*]] = fadd fast <4 x double> [[PREDPHI9]], [[BIN_RDX10]] ; AVX-NEXT: [[TMP21:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[BIN_RDX11]]) -; AVX-NEXT: br label [[DONE:%.*]] -; AVX: scalar.ph: -; AVX-NEXT: br label [[LOOP:%.*]] -; AVX: loop: -; AVX-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] -; AVX-NEXT: [[TOT:%.*]] = phi double [ 0.000000e+00, [[SCALAR_PH]] ], [ [[TOT_NEXT:%.*]], [[NEXT_ITER]] ] -; AVX-NEXT: [[ADDR:%.*]] = getelementptr double, ptr [[ARR]], i32 [[I]] -; AVX-NEXT: [[NEXTVAL:%.*]] = load double, ptr [[ADDR]], align 8 -; AVX-NEXT: [[TST:%.*]] = fcmp fast une double [[NEXTVAL]], 4.200000e+01 -; AVX-NEXT: br i1 [[TST]], label [[DO_ADD:%.*]], label [[NO_ADD:%.*]] -; AVX: do.add: -; AVX-NEXT: [[TOT_NEW:%.*]] = fadd fast double [[TOT]], [[NEXTVAL]] -; AVX-NEXT: br label [[NEXT_ITER]] -; AVX: no.add: -; AVX-NEXT: br label [[NEXT_ITER]] -; AVX: next.iter: -; AVX-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] -; AVX-NEXT: [[I_NEXT]] = add i32 [[I]], 1 -; AVX-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]] +; AVX-NEXT: br label [[NEXT_ITER:%.*]] ; AVX: done: -; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] -; AVX-NEXT: ret double [[TOT_NEXT_LCSSA]] +; AVX-NEXT: ret double [[TMP21]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll index e94e0789c42cb..a19b294541172 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll @@ -44,8 +44,7 @@ define i32 @iv_used_widened_and_truncated(ptr %dst, i64 %N) #0 { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[SCALAR_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] @@ -410,21 +409,9 @@ define i16 @iv_and_step_trunc() { ; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[REC_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[IV_NEXT]] to i16 -; CHECK-NEXT: [[REC_NEXT]] = mul i16 [[TMP3]], [[TMP4]] -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[REC_LCSSA:%.*]] = phi i16 [ [[SCALAR_RECUR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[REC_LCSSA]] +; CHECK-NEXT: ret i16 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop @@ -613,16 +600,7 @@ define void @wide_iv_trunc(ptr %dst, i64 %N) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll index 4b4103e9806b9..61f07eff768c1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll @@ -17,9 +17,7 @@ define i16 @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = mul i16 [[DOTCAST]], [[O_1]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i16> splat (i16 4), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[O_1]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i16> [[DOTSPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i16> zeroinitializer, [[TMP2]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -89,19 +87,17 @@ define i16 @wide_sub_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[TMP1:%.*]] = mul i16 [[DOTCAST]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> splat (i16 4), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i16> poison, i16 [[O_1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT1]], <4 x i16> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[TMP0]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[O_1]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i16> [[DOTSPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i16> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i16> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i16> zeroinitializer, [[TMP3]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i16> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i16> [[VEC_IND]], [[TMP2]] -; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT2]] +; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[STEP_ADD]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[DST:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 4 ; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr [[TMP6]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll index 91c7e7a37eb93..2f9627855a2c9 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleave-cost.ll @@ -38,36 +38,6 @@ define void @test_free_instructions_feeding_geps_for_interleave_groups(ptr noali ; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[P_INVAR]], align 4 -; CHECK-NEXT: [[IV_MUL:%.*]] = shl i64 [[IV]], 2 -; CHECK-NEXT: [[GEP_DST_19:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[IV_MUL]] -; CHECK-NEXT: store float [[L_0]], ptr [[GEP_DST_19]], align 4 -; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[P_INVAR]], align 4 -; CHECK-NEXT: [[ADD_1:%.*]] = or disjoint i64 [[IV_MUL]], 1 -; CHECK-NEXT: [[GEP_DST_119:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_1]] -; CHECK-NEXT: store float [[L_1]], ptr [[GEP_DST_119]], align 4 -; CHECK-NEXT: [[ADD_2:%.*]] = or disjoint i64 [[IV_MUL]], 2 -; CHECK-NEXT: [[GEP_DST_129:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_2]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_129]], align 4 -; CHECK-NEXT: [[ADD_3:%.*]] = or disjoint i64 [[IV_MUL]], 3 -; CHECK-NEXT: [[GEP_DST_140:%.*]] = getelementptr float, ptr [[DST_1]], i64 [[ADD_3]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_140]], align 4 -; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[P_INVAR]], align 4 -; CHECK-NEXT: [[GEP_DST_247:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[IV_MUL]] -; CHECK-NEXT: store float [[L_2]], ptr [[GEP_DST_247]], align 4 -; CHECK-NEXT: [[GEP_DST_255:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_1]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_255]], align 4 -; CHECK-NEXT: [[GEP_DST_265:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_2]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_265]], align 4 -; CHECK-NEXT: [[GEP_DST_276:%.*]] = getelementptr float, ptr [[DST_2]], i64 [[ADD_3]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[GEP_DST_276]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -504,17 +474,6 @@ define void @interleave_store_double_i64(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]], i32 1 -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_1]], align 8 -; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { double, i64 }, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_0]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -616,17 +575,6 @@ define void @interleave_store_i64_double_2(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP_0]], align 8 -; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr { i64, double }, ptr [[DST]], i64 [[IV]], i32 1 -; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll index 228bc80cef9d1..e2329fe31cd56 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleaving.ll @@ -34,13 +34,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; SSE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; SSE-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SSE: middle.block: -; SSE-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; SSE: scalar.ph: ; SSE-NEXT: br label [[FOR_BODY:%.*]] ; SSE: for.cond.cleanup: ; SSE-NEXT: ret void -; SSE: for.body: -; SSE-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; ; AVX1-LABEL: @foo( ; AVX1-NEXT: entry: @@ -88,13 +84,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; AVX1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; AVX1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX1: middle.block: -; AVX1-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; AVX1: scalar.ph: ; AVX1-NEXT: br label [[FOR_BODY:%.*]] ; AVX1: for.cond.cleanup: ; AVX1-NEXT: ret void -; AVX1: for.body: -; AVX1-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; ; AVX2-LABEL: @foo( ; AVX2-NEXT: entry: @@ -142,13 +134,9 @@ define void @foo(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) { ; AVX2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; AVX2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX2: middle.block: -; AVX2-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; AVX2: scalar.ph: ; AVX2-NEXT: br label [[FOR_BODY:%.*]] ; AVX2: for.cond.cleanup: ; AVX2-NEXT: ret void -; AVX2: for.body: -; AVX2-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; ; ATOM-LABEL: @foo( ; ATOM-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll index b480eaf7502a8..d75fd0e0023f7 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll @@ -63,8 +63,7 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca ; CHECK-NEXT: [[IND_END12:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IND_END15:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[TMP13]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -190,8 +189,7 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur ; CHECK-NEXT: [[IND_END9:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST8]] ; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC]] ; CHECK-NEXT: [[IND_END14:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF7:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll index 42d3019cc0ba2..9a3616a4340ff 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll @@ -46,8 +46,7 @@ define i32 @inv_load_conditional(ptr %a, i64 %n, ptr %b, i32 %k) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[SMAX2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll index 199f1c15fbc3d..5d40e6ab954fd 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll @@ -61,8 +61,7 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[SMAX2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -169,8 +168,7 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[SMAX2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF17:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -317,8 +315,7 @@ define void @variant_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX10]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[SMAX10]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF29:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -398,16 +395,20 @@ for.end: ; preds = %for.body define void @test_store_of_final_reduction_value(i64 %x, ptr %dst) { ; CHECK-LABEL: define void @test_store_of_final_reduction_value( ; CHECK-SAME: i64 [[X:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV4:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED_NEXT]] = mul i64 [[RED]], [[X]] -; CHECK-NEXT: store i64 [[RED_NEXT]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV4]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV4]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[X]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ , %[[VECTOR_PH]] ], [ [[TMP0:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0]] = mul <2 x i64> [[VEC_PHI]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP0]]) +; CHECK-NEXT: store i64 [[TMP1]], ptr [[DST]], align 8 +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll index 9e0ef737eb59f..2a8c698f3f7fa 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll @@ -63,27 +63,9 @@ define i32 @test_explicit_pred(i64 %len) { ; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP18]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP19]], [[BIN_RDX13]] ; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EARLYCND:%.*]] = icmp slt i64 [[IV]], [[LEN]] -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP21]] ; entry: %alloca = alloca [4096 x i32] @@ -212,28 +194,9 @@ define i32 @test_explicit_pred_generic(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [4096 x i32] @@ -390,27 +353,9 @@ define i32 @test_invariant_address(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP98]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP99]], [[BIN_RDX7]] ; CHECK-NEXT: [[TMP101:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ALLOCA]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP101]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP101]] ; entry: %alloca = alloca [4096 x i32] @@ -659,28 +604,9 @@ define i32 @test_step_narrower_than_access(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX37:%.*]] = add <4 x i32> [[TMP146]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX38:%.*]] = add <4 x i32> [[TMP147]], [[BIN_RDX37]] ; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX38]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR_I16P:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR_I16P]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP149]] ; entry: %alloca = alloca [4096 x i32] @@ -974,28 +900,9 @@ define i32 @test_non_zero_start(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [4096 x i32] @@ -1216,28 +1123,9 @@ define i32 @test_non_unit_stride(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP114]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP115]], [[BIN_RDX7]] ; CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 2 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4093 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP117]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP117]] ; entry: %alloca = alloca [4096 x i32] @@ -1366,28 +1254,9 @@ define i32 @neg_off_by_many(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [1024 x i32] @@ -1516,28 +1385,9 @@ define i32 @neg_off_by_one_iteration(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [4095 x i32] @@ -1666,28 +1516,9 @@ define i32 @neg_off_by_one_byte(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %alloca = alloca [16383 x i8] @@ -1985,28 +1816,9 @@ define i32 @test_allocsize(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %allocation = call nonnull ptr @my_alloc(i32 16384) @@ -2136,28 +1948,9 @@ define i32 @test_allocsize_array(i64 %len, ptr %test_base) nofree nosync { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %allocation = call nonnull ptr @my_array_alloc(i32 4096, i32 4) @@ -2297,28 +2090,9 @@ define i32 @test_allocsize_cond_deref(i1 %allzero, ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX10:%.*]] = add <4 x i32> [[TMP74]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add <4 x i32> [[TMP75]], [[BIN_RDX10]] ; CHECK-NEXT: [[TMP77:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX11]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[ALLOCATION]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP77]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP77]] ; entry: %allocation = call nonnull ptr @my_alloc(i32 16384) diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll index cf04cd21c16be..b907e7e2fbfbf 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -1378,8 +1378,7 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX1-NEXT: br i1 [[CMP_N]], [[FOR_END_LOOPEXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; AVX1: [[VEC_EPILOG_ITER_CHECK]]: -; AVX1-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; AVX1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; AVX1-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF19:![0-9]+]] ; AVX1: [[VEC_EPILOG_PH]]: ; AVX1-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -1471,8 +1470,7 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX2-NEXT: br i1 [[CMP_N]], [[FOR_END_LOOPEXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; AVX2: [[VEC_EPILOG_ITER_CHECK]]: -; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; AVX2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; AVX2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF33:![0-9]+]] ; AVX2: [[VEC_EPILOG_PH]]: ; AVX2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -1564,8 +1562,7 @@ define void @foo7(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX512-NEXT: br i1 [[CMP_N]], [[FOR_END_LOOPEXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; AVX512: [[VEC_EPILOG_ITER_CHECK]]: -; AVX512-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; AVX512-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF21]] ; AVX512: [[VEC_EPILOG_PH]]: ; AVX512-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -1702,8 +1699,7 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX1-NEXT: br i1 [[CMP_N]], [[FOR_END_LOOPEXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; AVX1: [[VEC_EPILOG_ITER_CHECK]]: -; AVX1-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; AVX1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; AVX1-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF19]] ; AVX1: [[VEC_EPILOG_PH]]: ; AVX1-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -1795,8 +1791,7 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX2-NEXT: br i1 [[CMP_N]], [[FOR_END_LOOPEXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; AVX2: [[VEC_EPILOG_ITER_CHECK]]: -; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; AVX2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; AVX2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF33]] ; AVX2: [[VEC_EPILOG_PH]]: ; AVX2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -1888,8 +1883,7 @@ define void @foo8(ptr noalias nocapture %out, ptr noalias nocapture readonly %in ; AVX512-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX512-NEXT: br i1 [[CMP_N]], [[FOR_END_LOOPEXIT:label %.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; AVX512: [[VEC_EPILOG_ITER_CHECK]]: -; AVX512-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; AVX512-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; AVX512-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF21]] ; AVX512: [[VEC_EPILOG_PH]]: ; AVX512-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll index d0991a5c52fd2..e23f8a9b63ef0 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll @@ -1199,19 +1199,7 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 ; O1VEC2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; O1VEC2: middle.block: -; O1VEC2-NEXT: br label [[FOR_END:%.*]] -; O1VEC2: scalar.ph: ; O1VEC2-NEXT: br label [[FOR_BODY:%.*]] -; O1VEC2: for.body: -; O1VEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]] -; O1VEC2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[N]] -; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]] -; O1VEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; O1VEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; O1VEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; O1VEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] ; O1VEC2: for.end: ; O1VEC2-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4 ; O1VEC2-NEXT: ret i32 [[TMP11]] @@ -1239,19 +1227,7 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 ; OzVEC2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OzVEC2: middle.block: -; OzVEC2-NEXT: br label [[FOR_END:%.*]] -; OzVEC2: scalar.ph: ; OzVEC2-NEXT: br label [[FOR_BODY:%.*]] -; OzVEC2: for.body: -; OzVEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS_IV]] -; OzVEC2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[N]] -; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS_IV]] -; OzVEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; OzVEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; OzVEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 -; OzVEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] ; OzVEC2: for.end: ; OzVEC2-NEXT: [[TMP11:%.*]] = load i32, ptr [[A]], align 4 ; OzVEC2-NEXT: ret i32 [[TMP11]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll index fc37e5f96c309..e1140b59681fe 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll @@ -32,18 +32,6 @@ define i32 @foo_optsize() #0 { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret i32 0 ; @@ -69,18 +57,6 @@ define i32 @foo_optsize() #0 { ; AUTOVF-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[FOR_END:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[FOR_BODY:.*]] -; AUTOVF: [[FOR_BODY]]: -; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; AUTOVF: [[FOR_END]]: ; AUTOVF-NEXT: ret i32 0 ; @@ -128,18 +104,6 @@ define i32 @foo_minsize() #1 { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; CHECK-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret i32 0 ; @@ -165,18 +129,6 @@ define i32 @foo_minsize() #1 { ; AUTOVF-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[FOR_END:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[FOR_BODY:.*]] -; AUTOVF: [[FOR_BODY]]: -; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] -; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 -; AUTOVF-NEXT: [[DOT:%.*]] = select i1 [[CMP1]], i8 2, i8 1 -; AUTOVF-NEXT: store i8 [[DOT]], ptr [[ARRAYIDX]], align 1 -; AUTOVF-NEXT: [[INC]] = add nsw i32 [[I_08]], 1 -; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[I_08]], 202 -; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; AUTOVF: [[FOR_END]]: ; AUTOVF-NEXT: ret i32 0 ; @@ -226,18 +178,6 @@ define void @scev4stride1(ptr noalias nocapture %a, ptr noalias nocapture readon ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_07]], [[K]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[MUL]] -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_07]] -; CHECK-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] ; CHECK: [[FOR_END_LOOPEXIT]]: ; CHECK-NEXT: ret void ; @@ -263,18 +203,6 @@ define void @scev4stride1(ptr noalias nocapture %a, ptr noalias nocapture readon ; AUTOVF-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[FOR_END_LOOPEXIT:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[FOR_BODY:.*]] -; AUTOVF: [[FOR_BODY]]: -; AUTOVF-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; AUTOVF-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_07]], [[K]] -; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[MUL]] -; AUTOVF-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; AUTOVF-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_07]] -; AUTOVF-NEXT: store i32 [[TMP6]], ptr [[ARRAYIDX1]], align 4 -; AUTOVF-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 -; AUTOVF-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 256 -; AUTOVF-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] ; AUTOVF: [[FOR_END_LOOPEXIT]]: ; AUTOVF-NEXT: ret void ; @@ -431,14 +359,6 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72 -; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -475,14 +395,6 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 { ; AUTOVF-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; AUTOVF: [[MIDDLE_BLOCK]]: ; AUTOVF-NEXT: br label %[[EXIT:.*]] -; AUTOVF: [[SCALAR_PH:.*]]: -; AUTOVF-NEXT: br label %[[LOOP:.*]] -; AUTOVF: [[LOOP]]: -; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72 -; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] -; AUTOVF-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; AUTOVF: [[EXIT]]: ; AUTOVF-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll index 65f84871e9b34..5d76dfb781636 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll @@ -108,11 +108,7 @@ define void @parallel_loop(ptr nocapture %a, ptr nocapture %b) nounwind uwtable ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll b/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll index 62eacf6ab5953..619693abf51e4 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll @@ -104,23 +104,8 @@ define i8 @pr141968(i1 %cond, i8 %v) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: br i1 [[COND]], label %[[LOOP_LATCH]], label %[[COND_FALSE:.*]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: [[SDIV:%.*]] = sdiv i16 [[SEXT]], [[ZEXT_TRUE]] -; CHECK-NEXT: [[SDIV_TRUNC:%.*]] = trunc i16 [[SDIV]] to i8 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[RET:%.*]] = phi i8 [ [[SDIV_TRUNC]], %[[COND_FALSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i8 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RET_LCSSA:%.*]] = phi i8 [ [[RET]], %[[LOOP_LATCH]] ], [ [[PREDPHI]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i8 [[RET_LCSSA]] +; CHECK-NEXT: ret i8 [[PREDPHI]] ; entry: %zext.true = zext i1 true to i16 diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll index c1adffde07510..31269b1b8c221 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll @@ -57,8 +57,7 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[UMAX2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll b/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll index 972164fe49624..47db49c72766a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr34438.ll @@ -16,26 +16,13 @@ define void @small_tc(ptr noalias nocapture %A, ptr noalias nocapture readonly % ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[TMP0:%.*]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, ptr [[TMP2:%.*]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, ptr [[B:%.*]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, ptr [[A:%.*]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[TMP4:%.*]] = fadd fast <8 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; CHECK-NEXT: store <8 x float> [[TMP4]], ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <8 x float> [[TMP4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP6]], [[TMP7]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll index 5d16ce5346bbf..737bcf35fbd2c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr47437.ll @@ -264,9 +264,8 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; AVX1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; AVX1-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; AVX1: vec.epilog.iter.check: -; AVX1-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; AVX1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; AVX1-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]] +; AVX1-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; AVX1-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; AVX1: vec.epilog.ph: ; AVX1-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_PH]] ] ; AVX1-NEXT: [[N_MOD_VF24:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 4 @@ -294,7 +293,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; AVX1-NEXT: store <4 x i32> [[TMP57]], ptr [[TMP58]], align 4 ; AVX1-NEXT: [[INDEX_NEXT33]] = add nuw i64 [[INDEX26]], 4 ; AVX1-NEXT: [[TMP59:%.*]] = icmp eq i64 [[INDEX_NEXT33]], [[N_VEC25]] -; AVX1-NEXT: br i1 [[TMP59]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]] +; AVX1-NEXT: br i1 [[TMP59]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP4:![0-9]+]] ; AVX1: vec.epilog.middle.block: ; AVX1-NEXT: [[CMP_N34:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC25]] ; AVX1-NEXT: br i1 [[CMP_N34]], label [[FOR_END_LOOPEXIT]], label [[SCALAR_PH]] @@ -324,7 +323,7 @@ define void @test_muladd(ptr noalias nocapture %d1, ptr noalias nocapture readon ; AVX1-NEXT: store i32 [[ADD18]], ptr [[ARRAYIDX20]], align 4 ; AVX1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; AVX1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] -; AVX1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; AVX1-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; AVX1: for.end.loopexit: ; AVX1-NEXT: br label [[FOR_END]] ; AVX1: for.end: diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll b/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll index 00980655b61ed..e7f56a45ebdc6 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll @@ -43,26 +43,8 @@ define ptr @test(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[CMP_1:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP_1]], label %[[LOOP_LATCH]], label %[[THEN:.*]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[M:%.*]] = phi i32 [ [[L]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 [[M]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP_2:%.*]] = icmp slt i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[CMP_2]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[GEP_LCSSA:%.*]] = phi ptr [ [[GEP_SRC]], %[[LOOP_LATCH]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[GEP_LCSSA]] +; CHECK-NEXT: ret ptr [[TMP2]] ; entry: br label %loop.header diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll b/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll index 8a48f997052f0..286da4d31c799 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll @@ -22,10 +22,10 @@ define ptr addrspace(10) @japi1_vect_42283(ptr nocapture readonly %0, i32 %1) lo ; CHECK-NEXT: [[DOTUNPACK2:%.*]] = load i64, ptr addrspace(10) [[DOTELT1]], align 8, !tbaa [[JTBAA_IMMUT_TBAA8]] ; CHECK-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP2]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP8]], 4 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[TOP:.*]] -; CHECK: [[TOP]]: +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; CHECK-NEXT: [[TMP17:%.*]] = icmp ult i64 [[TMP8]], 16 -; CHECK-NEXT: br i1 [[TMP17]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 [[TMP17]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP8]], 16 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP8]], [[N_MOD_VF]] @@ -59,16 +59,15 @@ define ptr addrspace(10) @japi1_vect_42283(ptr nocapture readonly %0, i32 %1) lo ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD5]], splat (i64 4) ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK1:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK1]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[L44:.*]], label %[[MIDDLE_BLOCK:.*]] +; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP8]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[SCALAR_PH]], !prof [[PROF15:![0-9]+]] -; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[TOP]] ] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[L44:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] +; CHECK: [[VEC_EPILOG_ITER_CHECK]]: +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]] +; CHECK: [[VEC_EPILOG_PH]]: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[N_MOD_VF4:%.*]] = urem i64 [[TMP8]], 4 ; CHECK-NEXT: [[N_VEC5:%.*]] = sub i64 [[TMP8]], [[N_MOD_VF4]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement <4 x ptr addrspace(10)> poison, ptr addrspace(10) [[DOTUNPACK]], i64 0 @@ -78,10 +77,10 @@ define ptr addrspace(10) @japi1_vect_42283(ptr nocapture readonly %0, i32 %1) lo ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[VEC_EPILOG_RESUME_VAL]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[DOTSPLAT]], -; CHECK-NEXT: br label %[[L26:.*]] -; CHECK: [[L26]]: -; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDEX_NEXT14:%.*]], %[[L26]] ] -; CHECK-NEXT: [[VEC_IND8:%.*]] = phi <4 x i64> [ [[INDUCTION]], %[[SCALAR_PH]] ], [ [[VEC_IND_NEXT9:%.*]], %[[L26]] ] +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT14:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND8:%.*]] = phi <4 x i64> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds { ptr addrspace(10), i64 }, ptr addrspace(13) [[TMP7]], <4 x i64> [[VEC_IND8]], i32 0 ; CHECK-NEXT: call void @llvm.masked.scatter.v4p10.v4p13(<4 x ptr addrspace(10)> [[BROADCAST_SPLAT11]], <4 x ptr addrspace(13)> [[TMP28]], i32 8, <4 x i1> splat (i1 true)), !tbaa [[JTBAA_ARRAYBUF_TBAA10]] ; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds { ptr addrspace(10), i64 }, ptr addrspace(13) [[TMP7]], <4 x i64> [[VEC_IND8]], i32 1 @@ -89,22 +88,22 @@ define ptr addrspace(10) @japi1_vect_42283(ptr nocapture readonly %0, i32 %1) lo ; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX7]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i64> [[VEC_IND8]], splat (i64 4) ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP30]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[L26]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP30]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC5]] ; CHECK-NEXT: br i1 [[CMP_N15]], label %[[L44]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i64 [ [[N_VEC5]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ITER_CHECK]] ] -; CHECK-NEXT: br label %[[L27:.*]] -; CHECK: [[L27]]: -; CHECK-NEXT: [[VALUE_PHI5:%.*]] = phi i64 [ [[BC_RESUME_VAL6]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[TMP27:%.*]], %[[L27]] ] +; CHECK-NEXT: [[BC_RESUME_VAL17:%.*]] = phi i64 [ [[N_VEC5]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: br label %[[L26:.*]] +; CHECK: [[L26]]: +; CHECK-NEXT: [[VALUE_PHI5:%.*]] = phi i64 [ [[BC_RESUME_VAL17]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[TMP27:%.*]], %[[L26]] ] ; CHECK-NEXT: [[DOTREPACK:%.*]] = getelementptr inbounds { ptr addrspace(10), i64 }, ptr addrspace(13) [[TMP7]], i64 [[VALUE_PHI5]], i32 0 ; CHECK-NEXT: store ptr addrspace(10) [[DOTUNPACK]], ptr addrspace(13) [[DOTREPACK]], align 8, !tbaa [[JTBAA_ARRAYBUF_TBAA10]] ; CHECK-NEXT: [[DOTREPACK4:%.*]] = getelementptr inbounds { ptr addrspace(10), i64 }, ptr addrspace(13) [[TMP7]], i64 [[VALUE_PHI5]], i32 1 ; CHECK-NEXT: store i64 [[DOTUNPACK2]], ptr addrspace(13) [[DOTREPACK4]], align 8, !tbaa [[JTBAA_ARRAYBUF_TBAA10]] ; CHECK-NEXT: [[TMP27]] = add i64 [[VALUE_PHI5]], 1 ; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[VALUE_PHI5]], [[TMP2]] -; CHECK-NEXT: br i1 [[DOTNOT]], label %[[L44]], label %[[L27]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[DOTNOT]], label %[[L44]], label %[[L26]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[L44]]: ; CHECK-NEXT: ret ptr addrspace(10) null ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll index 3922796a1a4b8..36163790706ed 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll @@ -39,23 +39,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF0:![0-9]+]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[BB6:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 99, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 1 -; CHECK-NEXT: [[ICMP17:%.*]] = icmp eq i64 [[AND]], 0 -; CHECK-NEXT: br i1 [[ICMP17]], label [[BB18:%.*]], label [[LOOP_LATCH]], !prof [[PROF5:![0-9]+]] -; CHECK: bb18: -; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV]], 1 -; CHECK-NEXT: [[GETELEMENTPTR19:%.*]] = getelementptr inbounds i64, ptr [[ARR]], i64 [[OR]] -; CHECK-NEXT: store i64 1, ptr [[GETELEMENTPTR19]], align 8 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[ICMP22:%.*]] = icmp eq i64 [[IV_NEXT]], 90 -; CHECK-NEXT: br i1 [[ICMP22]], label [[BB6]], label [[LOOP_HEADER]], !prof [[PROF6:![0-9]+]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: bb6: ; CHECK-NEXT: ret void ; @@ -99,6 +83,4 @@ attributes #0 = {"target-cpu"="haswell" "target-features"="+avx2" } ; CHECK: [[META2]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[META4]] = !{!"llvm.loop.estimated_trip_count", i32 24} -; CHECK: [[PROF5]] = !{!"branch_weights", i32 1, i32 1} -; CHECK: [[PROF6]] = !{!"branch_weights", i32 1, i32 95} ;. diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll index 2bc3a97d162f0..f066000fe9f66 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll @@ -71,23 +71,11 @@ define float @reduction_sum_float_fastmath(i32 %n, ptr %array) { ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[BIN_RDX]]) -; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ 0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]] -; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4 -; CHECK-NEXT: [[SUM_INC]] = fadd fast float [[SUM]], [[VALUE]] -; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1 -; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096 -; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]] ; CHECK: loop.exit.loopexit: -; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: -; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ] ; CHECK-NEXT: ret float [[SUM_LCSSA]] ; entry: @@ -134,23 +122,11 @@ define float @reduction_sum_float_only_reassoc(i32 %n, ptr %array) { ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <4 x float> [[TMP7]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]]) -; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ -0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]] -; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4 -; CHECK-NEXT: [[SUM_INC]] = fadd reassoc float [[SUM]], [[VALUE]] -; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1 -; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096 -; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]] ; CHECK: loop.exit.loopexit: -; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: -; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ] ; CHECK-NEXT: ret float [[SUM_LCSSA]] ; entry: @@ -197,23 +173,11 @@ define float @reduction_sum_float_only_reassoc_and_contract(i32 %n, ptr %array) ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc contract <4 x float> [[TMP7]], [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = call reassoc contract float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[BIN_RDX]]) -; CHECK-NEXT: br label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IDX:%.*]] = phi i32 [ [[IDX_INC:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[SUM_INC:%.*]], [[LOOP]] ], [ -0.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ADDRESS:%.*]] = getelementptr float, ptr [[ARRAY]], i32 [[IDX]] -; CHECK-NEXT: [[VALUE:%.*]] = load float, ptr [[ADDRESS]], align 4 -; CHECK-NEXT: [[SUM_INC]] = fadd reassoc contract float [[SUM]], [[VALUE]] -; CHECK-NEXT: [[IDX_INC]] = add i32 [[IDX]], 1 -; CHECK-NEXT: [[BE_COND:%.*]] = icmp ne i32 [[IDX_INC]], 4096 -; CHECK-NEXT: br i1 [[BE_COND]], label [[LOOP]], label [[LOOP_EXIT_LOOPEXIT]] ; CHECK: loop.exit.loopexit: -; CHECK-NEXT: [[SUM_INC_LCSSA:%.*]] = phi float [ [[SUM_INC]], [[LOOP]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: -; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[SUM_INC_LCSSA]], [[LOOP_EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[SUM_LCSSA:%.*]] = phi float [ -0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP9]], [[LOOP]] ] ; CHECK-NEXT: ret float [[SUM_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll index bdef894794850..70b05ac34559e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll @@ -40,36 +40,16 @@ define void @smax_call_uniform(ptr %dst, i64 %x) { ; CHECK-NEXT: br label %[[PRED_UREM_CONTINUE6]] ; CHECK: [[PRED_UREM_CONTINUE6]]: ; CHECK-NEXT: [[TMP13:%.*]] = tail call i64 @llvm.smax.i64(i64 0, i64 0) -; CHECK-NEXT: [[P:%.*]] = select i1 [[C]], i64 1, i64 [[TMP13]] ; CHECK-NEXT: [[PREDPHI7:%.*]] = select i1 [[C]], i64 1, i64 [[TMP13]] -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[P]], 1 ; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[PREDPHI7]], 1 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[DST]], i64 [[ADD]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP17]] -; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8 +; CHECK-NEXT: store i64 0, ptr [[TMP19]], align 8 ; CHECK-NEXT: store i64 0, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[REM1:%.*]] = urem i64 [[MUL]], [[X]] -; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[REM1]], i64 0) -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, %[[LOOP_HEADER]] ], [ [[SMAX]], %[[ELSE]] ] -; CHECK-NEXT: [[IV_NEXT:%.*]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IV_NEXT]] -; CHECK-NEXT: store i64 0, ptr [[GEP1]], align 8 -; CHECK-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT1]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll new file mode 100644 index 0000000000000..87848730c8f01 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll @@ -0,0 +1,460 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6 +; RUN: opt -p loop-vectorize -mtriple=x86_64-linux-gnu -S %s | FileCheck --check-prefix=I64 %s +; RUN: opt -p loop-vectorize -mtriple=i386-pc-linux-gnu -S %s | FileCheck --check-prefix=I32 %s + + +define void @test_store_initially_interleave(i32 %n, ptr noalias %src) #0 { +; I64-LABEL: define void @test_store_initially_interleave( +; I64-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; I64-NEXT: [[ITER_CHECK:.*:]] +; I64-NEXT: [[TMP4:%.*]] = add i32 [[N]], 1 +; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP4]], 4 +; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; I64: [[VECTOR_SCEVCHECK]]: +; I64-NEXT: [[TMP1:%.*]] = icmp slt i32 [[N]], 0 +; I64-NEXT: br i1 [[TMP1]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; I64: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; I64-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP4]], 16 +; I64-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; I64: [[VECTOR_PH]]: +; I64-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP4]], 16 +; I64-NEXT: [[TMP2:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 +; I64-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 16, i32 [[N_MOD_VF]] +; I64-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP4]], [[TMP3]] +; I64-NEXT: br label %[[VECTOR_BODY:.*]] +; I64: [[VECTOR_BODY]]: +; I64-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I64-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I64-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; I64-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4) +; I64-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4) +; I64-NEXT: [[IV:%.*]] = add i32 [[INDEX]], 0 +; I64-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 1 +; I64-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 2 +; I64-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 3 +; I64-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 4 +; I64-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 5 +; I64-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 6 +; I64-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 7 +; I64-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 8 +; I64-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 9 +; I64-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 10 +; I64-NEXT: [[TMP15:%.*]] = add i32 [[INDEX]], 11 +; I64-NEXT: [[TMP16:%.*]] = add i32 [[INDEX]], 12 +; I64-NEXT: [[TMP17:%.*]] = add i32 [[INDEX]], 13 +; I64-NEXT: [[TMP18:%.*]] = add i32 [[INDEX]], 14 +; I64-NEXT: [[TMP19:%.*]] = add i32 [[INDEX]], 15 +; I64-NEXT: [[TMP20:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double> +; I64-NEXT: [[TMP21:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double> +; I64-NEXT: [[TMP22:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double> +; I64-NEXT: [[TMP23:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double> +; I64-NEXT: [[ADD_PTR_I:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[IV]] +; I64-NEXT: [[TMP25:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]] +; I64-NEXT: [[TMP26:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]] +; I64-NEXT: [[TMP27:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]] +; I64-NEXT: [[TMP28:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]] +; I64-NEXT: [[TMP29:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]] +; I64-NEXT: [[TMP30:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]] +; I64-NEXT: [[TMP31:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]] +; I64-NEXT: [[TMP32:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]] +; I64-NEXT: [[TMP33:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]] +; I64-NEXT: [[TMP34:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]] +; I64-NEXT: [[TMP35:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP15]] +; I64-NEXT: [[TMP36:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP16]] +; I64-NEXT: [[TMP37:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP17]] +; I64-NEXT: [[TMP38:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP18]] +; I64-NEXT: [[TMP39:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP19]] +; I64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADD_PTR_I]], align 4 +; I64-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP25]], align 4 +; I64-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP26]], align 4 +; I64-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP27]], align 4 +; I64-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP28]], align 4 +; I64-NEXT: [[TMP45:%.*]] = load ptr, ptr [[TMP29]], align 4 +; I64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP30]], align 4 +; I64-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP31]], align 4 +; I64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP32]], align 4 +; I64-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP33]], align 4 +; I64-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP34]], align 4 +; I64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP35]], align 4 +; I64-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP36]], align 4 +; I64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP37]], align 4 +; I64-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP38]], align 4 +; I64-NEXT: [[TMP55:%.*]] = load ptr, ptr [[TMP39]], align 4 +; I64-NEXT: [[CONV:%.*]] = extractelement <4 x double> [[TMP20]], i32 0 +; I64-NEXT: store double [[CONV]], ptr [[TMP0]], align 4 +; I64-NEXT: [[TMP57:%.*]] = extractelement <4 x double> [[TMP20]], i32 1 +; I64-NEXT: store double [[TMP57]], ptr [[TMP41]], align 4 +; I64-NEXT: [[TMP58:%.*]] = extractelement <4 x double> [[TMP20]], i32 2 +; I64-NEXT: store double [[TMP58]], ptr [[TMP42]], align 4 +; I64-NEXT: [[TMP59:%.*]] = extractelement <4 x double> [[TMP20]], i32 3 +; I64-NEXT: store double [[TMP59]], ptr [[TMP43]], align 4 +; I64-NEXT: [[TMP60:%.*]] = extractelement <4 x double> [[TMP21]], i32 0 +; I64-NEXT: store double [[TMP60]], ptr [[TMP44]], align 4 +; I64-NEXT: [[TMP61:%.*]] = extractelement <4 x double> [[TMP21]], i32 1 +; I64-NEXT: store double [[TMP61]], ptr [[TMP45]], align 4 +; I64-NEXT: [[TMP62:%.*]] = extractelement <4 x double> [[TMP21]], i32 2 +; I64-NEXT: store double [[TMP62]], ptr [[TMP46]], align 4 +; I64-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP21]], i32 3 +; I64-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4 +; I64-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP22]], i32 0 +; I64-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4 +; I64-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP22]], i32 1 +; I64-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4 +; I64-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP22]], i32 2 +; I64-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4 +; I64-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP22]], i32 3 +; I64-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4 +; I64-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP23]], i32 0 +; I64-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4 +; I64-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP23]], i32 1 +; I64-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4 +; I64-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP23]], i32 2 +; I64-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4 +; I64-NEXT: [[TMP71:%.*]] = extractelement <4 x double> [[TMP23]], i32 3 +; I64-NEXT: store double [[TMP71]], ptr [[TMP55]], align 4 +; I64-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; I64-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4) +; I64-NEXT: [[TMP72:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; I64-NEXT: br i1 [[TMP72]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; I64: [[MIDDLE_BLOCK]]: +; I64-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] +; I64: [[VEC_EPILOG_ITER_CHECK]]: +; I64-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP3]], 4 +; I64-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] +; I64: [[VEC_EPILOG_PH]]: +; I64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; I64-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP4]], 4 +; I64-NEXT: [[TMP73:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0 +; I64-NEXT: [[TMP74:%.*]] = select i1 [[TMP73]], i32 4, i32 [[N_MOD_VF2]] +; I64-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP4]], [[TMP74]] +; I64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0 +; I64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; I64-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], +; I64-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; I64: [[VEC_EPILOG_VECTOR_BODY]]: +; I64-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I64-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I64-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 0 +; I64-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 1 +; I64-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 2 +; I64-NEXT: [[TMP78:%.*]] = add i32 [[INDEX4]], 3 +; I64-NEXT: [[TMP79:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double> +; I64-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]] +; I64-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]] +; I64-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]] +; I64-NEXT: [[TMP83:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP78]] +; I64-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4 +; I64-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4 +; I64-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4 +; I64-NEXT: [[TMP87:%.*]] = load ptr, ptr [[TMP83]], align 4 +; I64-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP79]], i32 0 +; I64-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4 +; I64-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP79]], i32 1 +; I64-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4 +; I64-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP79]], i32 2 +; I64-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4 +; I64-NEXT: [[TMP91:%.*]] = extractelement <4 x double> [[TMP79]], i32 3 +; I64-NEXT: store double [[TMP91]], ptr [[TMP87]], align 4 +; I64-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4 +; I64-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4) +; I64-NEXT: [[TMP92:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]] +; I64-NEXT: br i1 [[TMP92]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; I64: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; I64-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] +; I64: [[VEC_EPILOG_SCALAR_PH]]: +; +; I32-LABEL: define void @test_store_initially_interleave( +; I32-SAME: i32 [[N:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; I32-NEXT: [[ENTRY:.*:]] +; I32-NEXT: [[TMP0:%.*]] = add i32 [[N]], 1 +; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP0]], 4 +; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; I32: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; I32-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i32 [[TMP0]], 16 +; I32-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] +; I32: [[VECTOR_PH]]: +; I32-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16 +; I32-NEXT: [[TMP1:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 +; I32-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 16, i32 [[N_MOD_VF]] +; I32-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[TMP2]] +; I32-NEXT: br label %[[VECTOR_BODY:.*]] +; I32: [[VECTOR_BODY]]: +; I32-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I32-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I32-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], splat (i32 4) +; I32-NEXT: [[STEP_ADD_2:%.*]] = add <4 x i32> [[STEP_ADD]], splat (i32 4) +; I32-NEXT: [[STEP_ADD_3:%.*]] = add <4 x i32> [[STEP_ADD_2]], splat (i32 4) +; I32-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 +; I32-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 +; I32-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2 +; I32-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 3 +; I32-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 4 +; I32-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 5 +; I32-NEXT: [[TMP9:%.*]] = add i32 [[INDEX]], 6 +; I32-NEXT: [[TMP10:%.*]] = add i32 [[INDEX]], 7 +; I32-NEXT: [[TMP11:%.*]] = add i32 [[INDEX]], 8 +; I32-NEXT: [[TMP12:%.*]] = add i32 [[INDEX]], 9 +; I32-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 10 +; I32-NEXT: [[TMP14:%.*]] = add i32 [[INDEX]], 11 +; I32-NEXT: [[TMP40:%.*]] = add i32 [[INDEX]], 12 +; I32-NEXT: [[TMP41:%.*]] = add i32 [[INDEX]], 13 +; I32-NEXT: [[TMP42:%.*]] = add i32 [[INDEX]], 14 +; I32-NEXT: [[TMP43:%.*]] = add i32 [[INDEX]], 15 +; I32-NEXT: [[TMP44:%.*]] = uitofp <4 x i32> [[VEC_IND]] to <4 x double> +; I32-NEXT: [[TMP45:%.*]] = uitofp <4 x i32> [[STEP_ADD]] to <4 x double> +; I32-NEXT: [[TMP46:%.*]] = uitofp <4 x i32> [[STEP_ADD_2]] to <4 x double> +; I32-NEXT: [[TMP55:%.*]] = uitofp <4 x i32> [[STEP_ADD_3]] to <4 x double> +; I32-NEXT: [[TMP15:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP3]] +; I32-NEXT: [[TMP16:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP4]] +; I32-NEXT: [[TMP17:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP5]] +; I32-NEXT: [[TMP18:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP6]] +; I32-NEXT: [[TMP19:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP7]] +; I32-NEXT: [[TMP20:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP8]] +; I32-NEXT: [[TMP21:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP9]] +; I32-NEXT: [[TMP22:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP10]] +; I32-NEXT: [[TMP56:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP11]] +; I32-NEXT: [[TMP57:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP12]] +; I32-NEXT: [[TMP58:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP13]] +; I32-NEXT: [[TMP59:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP14]] +; I32-NEXT: [[TMP60:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP40]] +; I32-NEXT: [[TMP61:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP41]] +; I32-NEXT: [[TMP62:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP42]] +; I32-NEXT: [[TMP71:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP43]] +; I32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP15]], align 4 +; I32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP16]], align 4 +; I32-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP17]], align 4 +; I32-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP18]], align 4 +; I32-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP19]], align 4 +; I32-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP20]], align 4 +; I32-NEXT: [[TMP29:%.*]] = load ptr, ptr [[TMP21]], align 4 +; I32-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP22]], align 4 +; I32-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP56]], align 4 +; I32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP57]], align 4 +; I32-NEXT: [[TMP49:%.*]] = load ptr, ptr [[TMP58]], align 4 +; I32-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP59]], align 4 +; I32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP60]], align 4 +; I32-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP61]], align 4 +; I32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP62]], align 4 +; I32-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP71]], align 4 +; I32-NEXT: [[TMP31:%.*]] = extractelement <4 x double> [[TMP44]], i32 0 +; I32-NEXT: store double [[TMP31]], ptr [[TMP23]], align 4 +; I32-NEXT: [[TMP32:%.*]] = extractelement <4 x double> [[TMP44]], i32 1 +; I32-NEXT: store double [[TMP32]], ptr [[TMP24]], align 4 +; I32-NEXT: [[TMP33:%.*]] = extractelement <4 x double> [[TMP44]], i32 2 +; I32-NEXT: store double [[TMP33]], ptr [[TMP25]], align 4 +; I32-NEXT: [[TMP34:%.*]] = extractelement <4 x double> [[TMP44]], i32 3 +; I32-NEXT: store double [[TMP34]], ptr [[TMP26]], align 4 +; I32-NEXT: [[TMP35:%.*]] = extractelement <4 x double> [[TMP45]], i32 0 +; I32-NEXT: store double [[TMP35]], ptr [[TMP27]], align 4 +; I32-NEXT: [[TMP36:%.*]] = extractelement <4 x double> [[TMP45]], i32 1 +; I32-NEXT: store double [[TMP36]], ptr [[TMP28]], align 4 +; I32-NEXT: [[TMP37:%.*]] = extractelement <4 x double> [[TMP45]], i32 2 +; I32-NEXT: store double [[TMP37]], ptr [[TMP29]], align 4 +; I32-NEXT: [[TMP38:%.*]] = extractelement <4 x double> [[TMP45]], i32 3 +; I32-NEXT: store double [[TMP38]], ptr [[TMP30]], align 4 +; I32-NEXT: [[TMP63:%.*]] = extractelement <4 x double> [[TMP46]], i32 0 +; I32-NEXT: store double [[TMP63]], ptr [[TMP47]], align 4 +; I32-NEXT: [[TMP64:%.*]] = extractelement <4 x double> [[TMP46]], i32 1 +; I32-NEXT: store double [[TMP64]], ptr [[TMP48]], align 4 +; I32-NEXT: [[TMP65:%.*]] = extractelement <4 x double> [[TMP46]], i32 2 +; I32-NEXT: store double [[TMP65]], ptr [[TMP49]], align 4 +; I32-NEXT: [[TMP66:%.*]] = extractelement <4 x double> [[TMP46]], i32 3 +; I32-NEXT: store double [[TMP66]], ptr [[TMP50]], align 4 +; I32-NEXT: [[TMP67:%.*]] = extractelement <4 x double> [[TMP55]], i32 0 +; I32-NEXT: store double [[TMP67]], ptr [[TMP51]], align 4 +; I32-NEXT: [[TMP68:%.*]] = extractelement <4 x double> [[TMP55]], i32 1 +; I32-NEXT: store double [[TMP68]], ptr [[TMP52]], align 4 +; I32-NEXT: [[TMP69:%.*]] = extractelement <4 x double> [[TMP55]], i32 2 +; I32-NEXT: store double [[TMP69]], ptr [[TMP53]], align 4 +; I32-NEXT: [[TMP70:%.*]] = extractelement <4 x double> [[TMP55]], i32 3 +; I32-NEXT: store double [[TMP70]], ptr [[TMP54]], align 4 +; I32-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; I32-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD_3]], splat (i32 4) +; I32-NEXT: [[TMP39:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; I32-NEXT: br i1 [[TMP39]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; I32: [[MIDDLE_BLOCK]]: +; I32-NEXT: br label %[[VEC_EPILOG_ITER_CHECK:.*]] +; I32: [[VEC_EPILOG_ITER_CHECK]]: +; I32-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 4 +; I32-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] +; I32: [[VEC_EPILOG_PH]]: +; I32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; I32-NEXT: [[N_MOD_VF2:%.*]] = urem i32 [[TMP0]], 4 +; I32-NEXT: [[TMP72:%.*]] = icmp eq i32 [[N_MOD_VF2]], 0 +; I32-NEXT: [[TMP73:%.*]] = select i1 [[TMP72]], i32 4, i32 [[N_MOD_VF2]] +; I32-NEXT: [[N_VEC3:%.*]] = sub i32 [[TMP0]], [[TMP73]] +; I32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[BC_RESUME_VAL]], i64 0 +; I32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; I32-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], +; I32-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; I32: [[VEC_EPILOG_VECTOR_BODY]]: +; I32-NEXT: [[INDEX4:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I32-NEXT: [[VEC_IND5:%.*]] = phi <4 x i32> [ [[INDUCTION]], %[[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT7:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; I32-NEXT: [[TMP74:%.*]] = add i32 [[INDEX4]], 0 +; I32-NEXT: [[TMP75:%.*]] = add i32 [[INDEX4]], 1 +; I32-NEXT: [[TMP76:%.*]] = add i32 [[INDEX4]], 2 +; I32-NEXT: [[TMP77:%.*]] = add i32 [[INDEX4]], 3 +; I32-NEXT: [[TMP78:%.*]] = uitofp <4 x i32> [[VEC_IND5]] to <4 x double> +; I32-NEXT: [[TMP79:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP74]] +; I32-NEXT: [[TMP80:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP75]] +; I32-NEXT: [[TMP81:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP76]] +; I32-NEXT: [[TMP82:%.*]] = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 [[TMP77]] +; I32-NEXT: [[TMP83:%.*]] = load ptr, ptr [[TMP79]], align 4 +; I32-NEXT: [[TMP84:%.*]] = load ptr, ptr [[TMP80]], align 4 +; I32-NEXT: [[TMP85:%.*]] = load ptr, ptr [[TMP81]], align 4 +; I32-NEXT: [[TMP86:%.*]] = load ptr, ptr [[TMP82]], align 4 +; I32-NEXT: [[TMP87:%.*]] = extractelement <4 x double> [[TMP78]], i32 0 +; I32-NEXT: store double [[TMP87]], ptr [[TMP83]], align 4 +; I32-NEXT: [[TMP88:%.*]] = extractelement <4 x double> [[TMP78]], i32 1 +; I32-NEXT: store double [[TMP88]], ptr [[TMP84]], align 4 +; I32-NEXT: [[TMP89:%.*]] = extractelement <4 x double> [[TMP78]], i32 2 +; I32-NEXT: store double [[TMP89]], ptr [[TMP85]], align 4 +; I32-NEXT: [[TMP90:%.*]] = extractelement <4 x double> [[TMP78]], i32 3 +; I32-NEXT: store double [[TMP90]], ptr [[TMP86]], align 4 +; I32-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX4]], 4 +; I32-NEXT: [[VEC_IND_NEXT7]] = add <4 x i32> [[VEC_IND5]], splat (i32 4) +; I32-NEXT: [[TMP91:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC3]] +; I32-NEXT: br i1 [[TMP91]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; I32: [[VEC_EPILOG_MIDDLE_BLOCK]]: +; I32-NEXT: br label %[[VEC_EPILOG_SCALAR_PH]] +; I32: [[VEC_EPILOG_SCALAR_PH]]: +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %inc, %loop ] + %conv = uitofp i32 %iv to double + %add.ptr.i = getelementptr nusw { ptr, ptr, ptr }, ptr null, i32 %iv + %0 = load ptr, ptr %add.ptr.i, align 4 + store double %conv, ptr %0, align 4 + %inc = add i32 %iv, 1 + %ec = icmp eq i32 %iv, %n + br i1 %ec, label %exit, label %loop + +exit: ; preds = %loop + ret void +} + +define void @test_store_loaded_value(ptr noalias %src, ptr noalias %dst, i32 %n) #0 { +; I64-LABEL: define void @test_store_loaded_value( +; I64-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; I64-NEXT: [[BB:.*:]] +; I64-NEXT: [[PRE:%.*]] = icmp slt i32 [[N]], 1 +; I64-NEXT: br i1 [[PRE]], [[EXIT:label %.*]], label %[[PH:.*]] +; I64: [[PH]]: +; I64-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64 +; I64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 4 +; I64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; I64: [[VECTOR_PH]]: +; I64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 4 +; I64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]] +; I64-NEXT: br label %[[VECTOR_BODY:.*]] +; I64: [[VECTOR_BODY]]: +; I64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I64-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; I64-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; I64-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; I64-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; I64-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP0]] +; I64-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]] +; I64-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]] +; I64-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]] +; I64-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8 +; I64-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8 +; I64-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8 +; I64-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8 +; I64-NEXT: [[TMP12:%.*]] = shl i64 [[TMP0]], 1 +; I64-NEXT: [[TMP13:%.*]] = shl i64 [[TMP1]], 1 +; I64-NEXT: [[TMP14:%.*]] = shl i64 [[TMP2]], 1 +; I64-NEXT: [[TMP15:%.*]] = shl i64 [[TMP3]], 1 +; I64-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]] +; I64-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]] +; I64-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]] +; I64-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP15]] +; I64-NEXT: store double [[TMP8]], ptr [[TMP16]], align 8 +; I64-NEXT: store double [[TMP9]], ptr [[TMP17]], align 8 +; I64-NEXT: store double [[TMP10]], ptr [[TMP18]], align 8 +; I64-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8 +; I64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; I64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; I64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; I64: [[MIDDLE_BLOCK]]: +; I64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]] +; I64-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]] +; I64: [[SCALAR_PH]]: +; +; I32-LABEL: define void @test_store_loaded_value( +; I32-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { +; I32-NEXT: [[BB:.*:]] +; I32-NEXT: [[PRE:%.*]] = icmp slt i32 [[N]], 1 +; I32-NEXT: br i1 [[PRE]], [[EXIT:label %.*]], label %[[PH:.*]] +; I32: [[PH]]: +; I32-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64 +; I32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_EXT]], 4 +; I32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; I32: [[VECTOR_PH]]: +; I32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_EXT]], 4 +; I32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_EXT]], [[N_MOD_VF]] +; I32-NEXT: br label %[[VECTOR_BODY:.*]] +; I32: [[VECTOR_BODY]]: +; I32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; I32-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; I32-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; I32-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; I32-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; I32-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP0]] +; I32-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]] +; I32-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]] +; I32-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]] +; I32-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP4]], align 8 +; I32-NEXT: [[TMP9:%.*]] = load double, ptr [[TMP5]], align 8 +; I32-NEXT: [[TMP10:%.*]] = load double, ptr [[TMP6]], align 8 +; I32-NEXT: [[TMP11:%.*]] = load double, ptr [[TMP7]], align 8 +; I32-NEXT: [[TMP12:%.*]] = shl i64 [[TMP0]], 1 +; I32-NEXT: [[TMP13:%.*]] = shl i64 [[TMP1]], 1 +; I32-NEXT: [[TMP14:%.*]] = shl i64 [[TMP2]], 1 +; I32-NEXT: [[TMP15:%.*]] = shl i64 [[TMP3]], 1 +; I32-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]] +; I32-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]] +; I32-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP14]] +; I32-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP15]] +; I32-NEXT: store double [[TMP8]], ptr [[TMP16]], align 8 +; I32-NEXT: store double [[TMP9]], ptr [[TMP17]], align 8 +; I32-NEXT: store double [[TMP10]], ptr [[TMP18]], align 8 +; I32-NEXT: store double [[TMP11]], ptr [[TMP19]], align 8 +; I32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; I32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; I32-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; I32: [[MIDDLE_BLOCK]]: +; I32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_EXT]], [[N_VEC]] +; I32-NEXT: br i1 [[CMP_N]], [[EXIT_LOOPEXIT:label %.*]], label %[[SCALAR_PH]] +; I32: [[SCALAR_PH]]: +; +bb: + %pre = icmp slt i32 %n, 1 + br i1 %pre, label %exit, label %ph + +ph: + %n.ext = zext i32 %n to i64 + br label %loop + +loop: + %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i8, ptr %src, i64 %iv + %l = load double, ptr %gep.src, align 8 + %sext = shl i64 %iv, 1 + %gep.dst = getelementptr i8, ptr %dst, i64 %sext + store double %l, ptr %gep.dst, align 8 + %ec = icmp eq i64 %iv.next, %n.ext + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + ret void +} + +attributes #0 = { "target-cpu"="znver2" } + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll index c2dfce0aa70b8..bdbac7c1a9931 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll @@ -70,8 +70,7 @@ define void @_Z3fn1v() #0 { ; CHECK-NEXT: [[TMP64:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IND_END9:%.*]] = add i64 8, [[TMP64]] ; CHECK-NEXT: [[IND_END12:%.*]] = mul i64 [[N_VEC]], 2 -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP6]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -160,8 +159,7 @@ define void @_Z3fn1v() #0 { ; CHECK-NEXT: [[TMP42:%.*]] = mul i64 [[N_VEC32]], 2 ; CHECK-NEXT: [[IND_END55:%.*]] = add i64 8, [[TMP42]] ; CHECK-NEXT: [[IND_END58:%.*]] = mul i64 [[N_VEC32]], 2 -; CHECK-NEXT: [[N_VEC_REMAINING49:%.*]] = sub i64 [[TMP28]], [[N_VEC32]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK50:%.*]] = icmp ult i64 [[N_VEC_REMAINING49]], 8 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK50:%.*]] = icmp ult i64 [[N_MOD_VF31]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK50]], label %[[VEC_EPILOG_SCALAR_PH40]], label %[[VEC_EPILOG_PH42]], !prof [[PROF3]] ; CHECK: [[VEC_EPILOG_PH42]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL51:%.*]] = phi i64 [ [[N_VEC32]], %[[VEC_EPILOG_ITER_CHECK43]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK24]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll index b713a39c078d5..272b62bdbd5aa 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll @@ -33,8 +33,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT_1_LOOPEXIT1:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_3:.*]] ; CHECK: [[LOOP_2_PREHEADER]]: ; CHECK-NEXT: br label %[[LOOP_2:.*]] ; CHECK: [[LOOP_2]]: @@ -48,13 +46,6 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK-NEXT: store i16 0, ptr [[GEP_DST]], align 2 ; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_2]], [[IV_1_LCSSA]] ; CHECK-NEXT: br i1 [[EC_2]], label %[[LOOP_2]], label %[[EXIT_1_LOOPEXIT:.*]] -; CHECK: [[LOOP_3]]: -; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_4]] -; CHECK-NEXT: store i8 0, ptr [[GEP_DST_2]], align 1 -; CHECK-NEXT: [[IV_4_NEXT]] = add i64 [[IV_4]], 1 -; CHECK-NEXT: [[EC_3:%.*]] = icmp ult i64 [[IV_4_NEXT]], [[IV_1_LCSSA]] -; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP_3]], label %[[EXIT_1_LOOPEXIT1]] ; CHECK: [[EXIT_1_LOOPEXIT]]: ; CHECK-NEXT: br label %[[EXIT_1:.*]] ; CHECK: [[EXIT_1_LOOPEXIT1]]: diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index f877e1b311cea..e99ffda9e4043 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -39,12 +39,8 @@ define void @example1() optsize { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[TMP7:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[TMP6:%.*]] ; CHECK: 6: -; CHECK-NEXT: br i1 poison, label [[TMP7]], label [[TMP6]] -; CHECK: 7: ; CHECK-NEXT: ret void ; br label %1 @@ -123,8 +119,6 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[DOT_PREHEADER_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[DOTLR_PH5:%.*]] ; CHECK: ..preheader_crit_edge: ; CHECK-NEXT: [[PHITMP:%.*]] = zext nneg i32 [[N]] to i64 ; CHECK-NEXT: br label [[DOTPREHEADER]] @@ -134,7 +128,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP16]], label [[DOT_CRIT_EDGE:%.*]], label [[DOTLR_PH_PREHEADER:%.*]] ; CHECK: .lr.ph.preheader: ; CHECK-NEXT: br label [[VECTOR_PH8:%.*]] -; CHECK: vector.ph8: +; CHECK: vector.ph7: ; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[N_RND_UP10:%.*]] = add nuw nsw i64 [[TMP17]], 3 ; CHECK-NEXT: [[N_VEC12:%.*]] = and i64 [[N_RND_UP10]], 8589934588 @@ -142,7 +136,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[BROADCAST_SPLATINSERT19:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_114]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT20:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT19]], <4 x i64> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY13:%.*]] -; CHECK: vector.body15: +; CHECK: vector.body14: ; CHECK-NEXT: [[INDEX16:%.*]] = phi i64 [ 0, [[VECTOR_PH8]] ], [ [[INDEX_NEXT29:%.*]], [[PRED_STORE_CONTINUE26:%.*]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[I_0_LCSSA]], [[INDEX16]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX16]], i64 0 @@ -151,7 +145,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT20]] ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP18]], i64 0 ; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]] -; CHECK: pred.store.if19: +; CHECK: pred.store.if18: ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr @c, i64 [[OFFSET_IDX]] @@ -160,10 +154,10 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP25:%.*]] = and i32 [[TMP23]], [[TMP21]] ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]] -; CHECK: pred.store.continue20: +; CHECK: pred.store.continue19: ; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP18]], i64 1 ; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]] -; CHECK: pred.store.if21: +; CHECK: pred.store.if20: ; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP27]] ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 @@ -173,10 +167,10 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP33:%.*]] = and i32 [[TMP31]], [[TMP29]] ; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]] -; CHECK: pred.store.continue22: +; CHECK: pred.store.continue21: ; CHECK-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[TMP18]], i64 2 ; CHECK-NEXT: br i1 [[TMP34]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]] -; CHECK: pred.store.if23: +; CHECK: pred.store.if22: ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 2 ; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP35]] ; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4 @@ -186,10 +180,10 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP41:%.*]] = and i32 [[TMP39]], [[TMP37]] ; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]] -; CHECK: pred.store.continue24: +; CHECK: pred.store.continue23: ; CHECK-NEXT: [[TMP42:%.*]] = extractelement <4 x i1> [[TMP18]], i64 3 ; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26]] -; CHECK: pred.store.if25: +; CHECK: pred.store.if24: ; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr @b, i64 [[TMP43]] ; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP44]], align 4 @@ -199,18 +193,12 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP49:%.*]] = and i32 [[TMP47]], [[TMP45]] ; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]] -; CHECK: pred.store.continue26: +; CHECK: pred.store.continue25: ; CHECK-NEXT: [[INDEX_NEXT29]] = add nuw i64 [[INDEX16]], 4 ; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT29]], [[N_VEC12]] -; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK28:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: middle.block28: -; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: scalar.ph7: +; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK27:%.*]], label [[VECTOR_BODY13]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: middle.block27: ; CHECK-NEXT: br label [[DOTLR_PH1:%.*]] -; CHECK: .lr.ph5: -; CHECK-NEXT: br i1 poison, label [[DOT_PREHEADER_CRIT_EDGE]], label [[DOTLR_PH5]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOTLR_PH]], label [[DOTLR_PH1]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]] ; CHECK: ._crit_edge: @@ -328,11 +316,7 @@ define void @example3(i32 %n, ptr noalias nocapture %p, ptr noalias nocapture %q ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]] ; CHECK: ._crit_edge: @@ -418,12 +402,8 @@ define void @example23b(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[TMP5:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[TMP4:%.*]] ; CHECK: 4: -; CHECK-NEXT: br i1 poison, label [[TMP5]], label [[TMP4]] -; CHECK: 5: ; CHECK-NEXT: ret void ; br label %1 @@ -516,12 +496,8 @@ define void @example23c(ptr noalias nocapture %src, ptr noalias nocapture %dst) ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[TMP26:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[TMP25:%.*]] ; CHECK: 25: -; CHECK-NEXT: br i1 poison, label [[TMP26]], label [[TMP25]] -; CHECK: 26: ; CHECK-NEXT: ret void ; br label %1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll index 931c927d304ed..15e26782f8e66 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll @@ -15,17 +15,17 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[ITER_CHECK:.*]]: ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64 ; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[J]] to i64 -; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]] -; CHECK: [[VECTOR_PH1]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -184,15 +184,15 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: br i1 false, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: br i1 false, label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[TMP171:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP172:%.*]] = add i64 [[INDEX9]], 0 ; CHECK-NEXT: [[TMP173:%.*]] = add i64 [[INDEX9]], 1 ; CHECK-NEXT: [[TMP174:%.*]] = add i64 [[INDEX9]], 2 @@ -216,20 +216,20 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP168]] = add <4 x i32> [[TMP167]], [[TMP166]] ; CHECK-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX9]], 4 ; CHECK-NEXT: [[TMP169:%.*]] = icmp eq i64 [[INDEX_NEXT12]], 100 -; CHECK-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP170:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP168]]) -; CHECK-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]] +; CHECK: [[VEC_EPILOG_SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] -; CHECK-NEXT: br label %[[FOR_BODY1:.*]] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_COND_CLEANUP]]: -; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY1]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ADD7_LCSSA]] -; CHECK: [[FOR_BODY1]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY1]] ] -; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY1]] ] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA1]] ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]] @@ -239,24 +239,24 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; ; MAX-BW-LABEL: define i32 @matrix_row_col( ; MAX-BW-SAME: ptr readonly captures(none) [[DATA:%.*]], i32 [[I:%.*]], i32 [[J:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { ; MAX-BW-NEXT: [[ITER_CHECK:.*]]: ; MAX-BW-NEXT: [[IDXPROM:%.*]] = sext i32 [[I]] to i64 ; MAX-BW-NEXT: [[IDXPROM5:%.*]] = sext i32 [[J]] to i64 -; MAX-BW-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; MAX-BW: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: +; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] ; MAX-BW: [[VECTOR_PH]]: -; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]] -; MAX-BW: [[VECTOR_PH1]]: ; MAX-BW-NEXT: br label %[[VECTOR_BODY:.*]] ; MAX-BW: [[VECTOR_BODY]]: -; MAX-BW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH1]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP144:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP145:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP146:%.*]], %[[VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP147:%.*]], %[[VECTOR_BODY]] ] ; MAX-BW-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; MAX-BW-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; MAX-BW-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -415,15 +415,15 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]]) ; MAX-BW-NEXT: br i1 false, label %[[FOR_COND_CLEANUP:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; MAX-BW: [[VEC_EPILOG_ITER_CHECK]]: -; MAX-BW-NEXT: br i1 false, label %[[SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; MAX-BW-NEXT: br i1 false, label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF8:![0-9]+]] ; MAX-BW: [[VEC_EPILOG_PH]]: -; MAX-BW-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] -; MAX-BW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ] +; MAX-BW-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; MAX-BW-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; MAX-BW-NEXT: [[TMP171:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 -; MAX-BW-NEXT: br label %[[FOR_BODY:.*]] -; MAX-BW: [[FOR_BODY]]: -; MAX-BW-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[FOR_BODY]] ] -; MAX-BW-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[FOR_BODY]] ] +; MAX-BW-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] +; MAX-BW: [[VEC_EPILOG_VECTOR_BODY]]: +; MAX-BW-NEXT: [[INDEX9:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] +; MAX-BW-NEXT: [[VEC_PHI10:%.*]] = phi <4 x i32> [ [[TMP171]], %[[VEC_EPILOG_PH]] ], [ [[TMP168:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; MAX-BW-NEXT: [[TMP172:%.*]] = add i64 [[INDEX9]], 0 ; MAX-BW-NEXT: [[TMP173:%.*]] = add i64 [[INDEX9]], 1 ; MAX-BW-NEXT: [[TMP174:%.*]] = add i64 [[INDEX9]], 2 @@ -447,20 +447,20 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[TMP168]] = add <4 x i32> [[TMP167]], [[TMP166]] ; MAX-BW-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX9]], 4 ; MAX-BW-NEXT: [[TMP169:%.*]] = icmp eq i64 [[INDEX_NEXT12]], 100 -; MAX-BW-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; MAX-BW-NEXT: br i1 [[TMP169]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; MAX-BW: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; MAX-BW-NEXT: [[TMP170:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP168]]) -; MAX-BW-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[SCALAR_PH]] -; MAX-BW: [[SCALAR_PH]]: +; MAX-BW-NEXT: br i1 true, label %[[FOR_COND_CLEANUP]], label %[[VEC_EPILOG_SCALAR_PH]] +; MAX-BW: [[VEC_EPILOG_SCALAR_PH]]: ; MAX-BW-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 96, %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; MAX-BW-NEXT: [[BC_MERGE_RDX13:%.*]] = phi i32 [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP149]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] -; MAX-BW-NEXT: br label %[[FOR_BODY1:.*]] +; MAX-BW-NEXT: br label %[[FOR_BODY:.*]] ; MAX-BW: [[FOR_COND_CLEANUP]]: -; MAX-BW-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY1]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] +; MAX-BW-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], %[[FOR_BODY]] ], [ [[TMP149]], %[[MIDDLE_BLOCK]] ], [ [[TMP170]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; MAX-BW-NEXT: ret i32 [[ADD7_LCSSA]] -; MAX-BW: [[FOR_BODY1]]: -; MAX-BW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY1]] ] -; MAX-BW-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY1]] ] +; MAX-BW: [[FOR_BODY]]: +; MAX-BW-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; MAX-BW-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX13]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD7]], %[[FOR_BODY]] ] ; MAX-BW-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]] ; MAX-BW-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[INT_TBAA1]] ; MAX-BW-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]] @@ -470,7 +470,7 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; MAX-BW-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]] ; MAX-BW-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; MAX-BW-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100 -; MAX-BW-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY1]], !llvm.loop [[LOOP9:![0-9]+]] +; MAX-BW-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; entry: %idxprom = sext i32 %i to i64 @@ -555,26 +555,9 @@ define void @test(ptr %A, ptr noalias %B) #0 { ; CHECK-NEXT: store i8 [[TMP35]], ptr [[TMP27]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 -; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[IV_0:%.*]] = add nuw nsw i64 [[IV]], 0 -; CHECK-NEXT: [[IV_1:%.*]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[IN0:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_0]] -; CHECK-NEXT: [[IN1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_1]] -; CHECK-NEXT: [[V0:%.*]] = load i32, ptr [[IN0]], align 4 -; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[IN1]], align 4 -; CHECK-NEXT: [[REDUCE_ADD_0:%.*]] = add i32 [[V0]], [[V1]] -; CHECK-NEXT: [[REDUCE_ADD_0_NARROW:%.*]] = trunc i32 [[REDUCE_ADD_0]] to i8 -; CHECK-NEXT: [[OUT:%.*]] = getelementptr inbounds [1024 x i8], ptr [[B]], i64 0, i64 [[IV_0]] -; CHECK-NEXT: store i8 [[REDUCE_ADD_0_NARROW]], ptr [[OUT]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_0]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] ; CHECK: [[FOR_COND_CLEANUP]]: ; CHECK-NEXT: ret void ; @@ -675,26 +658,9 @@ define void @test(ptr %A, ptr noalias %B) #0 { ; MAX-BW-NEXT: store i8 [[TMP67]], ptr [[TMP51]], align 1 ; MAX-BW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; MAX-BW-NEXT: [[TMP68:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 -; MAX-BW-NEXT: br i1 [[TMP68]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; MAX-BW-NEXT: br i1 [[TMP68]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; MAX-BW: [[MIDDLE_BLOCK]]: ; MAX-BW-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; MAX-BW: [[SCALAR_PH:.*]]: -; MAX-BW-NEXT: br label %[[FOR_BODY:.*]] -; MAX-BW: [[FOR_BODY]]: -; MAX-BW-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] -; MAX-BW-NEXT: [[IV_0:%.*]] = add nuw nsw i64 [[IV]], 0 -; MAX-BW-NEXT: [[IV_1:%.*]] = add nuw nsw i64 [[IV]], 1 -; MAX-BW-NEXT: [[IN0:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_0]] -; MAX-BW-NEXT: [[IN1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[A]], i64 0, i64 [[IV_1]] -; MAX-BW-NEXT: [[V0:%.*]] = load i32, ptr [[IN0]], align 4 -; MAX-BW-NEXT: [[V1:%.*]] = load i32, ptr [[IN1]], align 4 -; MAX-BW-NEXT: [[REDUCE_ADD_0:%.*]] = add i32 [[V0]], [[V1]] -; MAX-BW-NEXT: [[REDUCE_ADD_0_NARROW:%.*]] = trunc i32 [[REDUCE_ADD_0]] to i8 -; MAX-BW-NEXT: [[OUT:%.*]] = getelementptr inbounds [1024 x i8], ptr [[B]], i64 0, i64 [[IV_0]] -; MAX-BW-NEXT: store i8 [[REDUCE_ADD_0_NARROW]], ptr [[OUT]], align 1 -; MAX-BW-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV_0]], 2 -; MAX-BW-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 1024 -; MAX-BW-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP]] ; MAX-BW: [[FOR_COND_CLEANUP]]: ; MAX-BW-NEXT: ret void ; @@ -745,9 +711,10 @@ attributes #0 = { "target-cpu"="core-avx2" "target-features"="+avx,+avx2,+sse,+s ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} ; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]], [[META7]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]], [[META6]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META7]]} +; CHECK: [[PROF8]] = !{!"branch_weights", i32 4, i32 28} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META6]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META6]], [[META7]]} ;. ; MAX-BW: [[INT_TBAA1]] = !{[[META2:![0-9]+]], [[META2]], i64 0} ; MAX-BW: [[META2]] = !{!"int", [[META3:![0-9]+]], i64 0} @@ -756,7 +723,8 @@ attributes #0 = { "target-cpu"="core-avx2" "target-features"="+avx,+avx2,+sse,+s ; MAX-BW: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} ; MAX-BW: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} ; MAX-BW: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} -; MAX-BW: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]], [[META7]]} -; MAX-BW: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]], [[META6]]} -; MAX-BW: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META7]]} +; MAX-BW: [[PROF8]] = !{!"branch_weights", i32 4, i32 28} +; MAX-BW: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]} +; MAX-BW: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META6]]} +; MAX-BW: [[LOOP11]] = distinct !{[[LOOP11]], [[META6]], [[META7]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll index 669e9252256de..7069534f3b683 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll @@ -28,23 +28,9 @@ define dso_local void @tail_folding_enabled(ptr noalias nocapture %A, ptr noalia ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; entry: br label %for.body @@ -89,25 +75,11 @@ define dso_local void @tail_folding_disabled(ptr noalias nocapture %A, ptr noali ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP6]], ptr [[TMP7]], i32 4, <8 x i1> [[TMP1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; entry: br label %for.body @@ -170,28 +142,12 @@ define i32 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B, ; CHECK-NEXT: [[TMP11:%.*]] = select <8 x i1> [[TMP4]], <8 x i32> [[TMP10]], <8 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP11]]) -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4 -; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDXB]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP14]] -; CHECK-NEXT: [[SUM_1]] = add nuw nsw i32 [[ADD]], [[SUM_0]] -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_BODY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_1_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll index 27150cb6cca0d..63f9a1310d15a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll @@ -398,27 +398,9 @@ define i32 @test_count_bits(ptr %test_base) { ; CHECK-NEXT: [[BIN_RDX13:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX14:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX13]] ; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX14]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[BYTE:%.*]] = udiv i64 [[IV]], 8 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[BYTE]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[BIT:%.*]] = urem i64 [[IV]], 8 -; CHECK-NEXT: [[BIT_TRUNC:%.*]] = trunc i64 [[BIT]] to i8 -; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[EARLYCND]], [[BIT_TRUNC]] -; CHECK-NEXT: [[TEST:%.*]] = and i8 [[MASK]], 1 -; CHECK-NEXT: [[VAL:%.*]] = zext i8 [[TEST]] to i32 -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP41]] ; entry: %alloca = alloca [4096 x i32] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll index 3ae8001f9e439..28de5c7915a84 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll @@ -141,20 +141,7 @@ define void @vectorized1(ptr noalias nocapture %A, ptr noalias nocapture readonl ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP8]], [[TMP9]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -200,22 +187,9 @@ define void @vectorized2(ptr noalias nocapture %A, ptr noalias nocapture readonl ; CHECK-NEXT: store <8 x float> [[TMP5]], ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP7]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP7]], [[TMP8]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP7]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll index 3618affdf1880..1e94f83a24d0a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll @@ -38,21 +38,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; IF-EVL: scalar.ph: ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; IF-EVL: for.cond.cleanup: ; IF-EVL-NEXT: ret void ; @@ -104,8 +90,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; NO-VP: vec.epilog.iter.check: -; NO-VP-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; NO-VP-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; NO-VP-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 8 ; NO-VP-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; NO-VP: vec.epilog.ph: ; NO-VP-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll index 69cdd655f9dc6..455fe83dbb6df 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll @@ -74,22 +74,7 @@ define void @test_pr59090(ptr %l_out, ptr noalias %b) #0 { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10008 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_MUL:%.*]] = mul nuw i64 [[IV]], 6 -; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: store i8 [[L]], ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ARRAYIDX77:%.*]] = getelementptr i8, ptr [[L_OUT]], i64 [[IV_MUL]] -; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX77]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ADD_2:%.*]] = add i64 [[IV_MUL]], 2 -; CHECK-NEXT: [[ARRAYIDX97:%.*]] = getelementptr i8, ptr [[L_OUT]], i64 [[ADD_2]] -; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX97]], align 1, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 10000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll index bdedcca391a19..9ea9e1193f956 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/widened-value-used-as-scalar-and-first-lane.ll @@ -48,25 +48,7 @@ define void @iv.4_used_as_vector_and_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[G_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[G_SRC]], align 8 -; CHECK-NEXT: [[IV_4:%.*]] = add nuw nsw i64 [[IV]], 4 -; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[L]], 128 -; CHECK-NEXT: br i1 [[C]], label [[LOOP_THEN:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.then: -; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV_4]], 1 -; CHECK-NEXT: [[G_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[OR]] -; CHECK-NEXT: store i64 [[IV_4]], ptr [[G_DST]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 32 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -131,25 +113,7 @@ define void @iv.4_used_as_first_lane(ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[G_SRC:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[G_SRC]], align 8 -; CHECK-NEXT: [[IV_4:%.*]] = add nuw nsw i64 [[IV]], 4 -; CHECK-NEXT: [[C:%.*]] = icmp ule i64 [[L]], 128 -; CHECK-NEXT: br i1 [[C]], label [[LOOP_THEN:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.then: -; CHECK-NEXT: [[OR:%.*]] = or disjoint i64 [[IV_4]], 1 -; CHECK-NEXT: [[G_DST:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[OR]] -; CHECK-NEXT: store i64 [[L]], ptr [[G_DST]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 32 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll index f9403b8e3fb4a..774f0dba47224 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll @@ -134,30 +134,9 @@ define i32 @predicated_sdiv_masked_load(ptr %a, ptr %b, i32 %x, i1 %c) { ; SINK-GATHER-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SINK-GATHER: middle.block: ; SINK-GATHER-NEXT: [[TMP49:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP47]]) -; SINK-GATHER-NEXT: br label [[FOR_END:%.*]] -; SINK-GATHER: scalar.ph: -; SINK-GATHER-NEXT: br label [[FOR_BODY:%.*]] -; SINK-GATHER: for.body: -; SINK-GATHER-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ] -; SINK-GATHER-NEXT: [[R:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[T7:%.*]], [[FOR_INC]] ] -; SINK-GATHER-NEXT: [[T0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]] -; SINK-GATHER-NEXT: [[T1:%.*]] = load i32, ptr [[T0]], align 4 -; SINK-GATHER-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; SINK-GATHER: if.then: -; SINK-GATHER-NEXT: [[T2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]] -; SINK-GATHER-NEXT: [[T3:%.*]] = load i32, ptr [[T2]], align 4 -; SINK-GATHER-NEXT: [[T4:%.*]] = sdiv i32 [[T3]], [[X]] -; SINK-GATHER-NEXT: [[T5:%.*]] = add nsw i32 [[T4]], [[T1]] -; SINK-GATHER-NEXT: br label [[FOR_INC]] -; SINK-GATHER: for.inc: -; SINK-GATHER-NEXT: [[T6:%.*]] = phi i32 [ [[T1]], [[FOR_BODY]] ], [ [[T5]], [[IF_THEN]] ] -; SINK-GATHER-NEXT: [[T7]] = add i32 [[R]], [[T6]] -; SINK-GATHER-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; SINK-GATHER-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 10000 -; SINK-GATHER-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]] +; SINK-GATHER-NEXT: br label [[FOR_INC:%.*]] ; SINK-GATHER: for.end: -; SINK-GATHER-NEXT: [[T8:%.*]] = phi i32 [ [[T7]], [[FOR_INC]] ], [ [[TMP49]], [[MIDDLE_BLOCK]] ] -; SINK-GATHER-NEXT: ret i32 [[T8]] +; SINK-GATHER-NEXT: ret i32 [[TMP49]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/blend-in-header.ll b/llvm/test/Transforms/LoopVectorize/blend-in-header.ll index 85f72d283a0e4..6f262109f95be 100644 --- a/llvm/test/Transforms/LoopVectorize/blend-in-header.ll +++ b/llvm/test/Transforms/LoopVectorize/blend-in-header.ll @@ -111,8 +111,6 @@ define i64 @invar_cond(i1 %c) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -164,8 +162,6 @@ define i64 @invar_cond_incoming_ops_reordered(i1 %c) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] diff --git a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll index afdbfaa92835b..f64255f29d335 100644 --- a/llvm/test/Transforms/LoopVectorize/bsd_regex.ll +++ b/llvm/test/Transforms/LoopVectorize/bsd_regex.ll @@ -37,11 +37,7 @@ define i32 @foo(ptr nocapture %A) { ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 undef ; diff --git a/llvm/test/Transforms/LoopVectorize/check-prof-info.ll b/llvm/test/Transforms/LoopVectorize/check-prof-info.ll index ce9d1f24ac883..b59ad8481597c 100644 --- a/llvm/test/Transforms/LoopVectorize/check-prof-info.ll +++ b/llvm/test/Transforms/LoopVectorize/check-prof-info.ll @@ -19,12 +19,8 @@ define void @_Z3foov() { ; CHECK: vector.body: ; CHECK: br i1 [[TMP6:%.*]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: middle.block: -; CHECK: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK: for.body: -; CHECK: br i1 [[EXITCOND:%.*]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !prof [[PROF5:![0-9]+]] ; ; CHECK-MASKED-LABEL: @_Z3foov( ; CHECK-MASKED: entry: @@ -34,12 +30,8 @@ define void @_Z3foov() { ; CHECK-MASKED: vector.body: ; CHECK-MASKED: br i1 [[TMP18:%.*]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK-MASKED: middle.block: -; CHECK-MASKED: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK-MASKED: scalar.ph: ; CHECK-MASKED: br label [[FOR_BODY:%.*]] ; CHECK-MASKED: for.cond.cleanup: -; CHECK-MASKED: for.body: -; CHECK-MASKED: br i1 [[EXITCOND:%.*]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !prof [[PROF5:![0-9]+]] ; ; CHECK-SCALABLE-LABEL: @_Z3foov( ; CHECK-SCALABLE: entry: diff --git a/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll b/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll index bd0655ddff379..143a0afd77195 100644 --- a/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll +++ b/llvm/test/Transforms/LoopVectorize/constantfolder-infer-correct-gepty.ll @@ -19,19 +19,6 @@ define void @test(ptr %data) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[OR_IV_1:%.*]] = or disjoint i64 [[IV]], 1 -; CHECK-NEXT: [[GEP_POSTSCALE:%.*]] = getelementptr [64 x float], ptr @postscale, i64 0, i64 [[OR_IV_1]] -; CHECK-NEXT: [[LOAD_POSTSCALE:%.*]] = load float, ptr [[GEP_POSTSCALE]], align 4, !tbaa [[FLOAT_TBAA0]] -; CHECK-NEXT: [[LRINT:%.*]] = tail call i64 @llvm.lrint.i64.f32(float [[LOAD_POSTSCALE]]) -; CHECK-NEXT: [[LRINT_TRUNC:%.*]] = trunc i64 [[LRINT]] to i16 -; CHECK-NEXT: store i16 [[LRINT_TRUNC]], ptr [[DATA]], align 2, !tbaa [[SHORT_TBAA4]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], 8 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/constantfolder.ll b/llvm/test/Transforms/LoopVectorize/constantfolder.ll index 37f2e73b0cf9f..66592b0ccf677 100644 --- a/llvm/test/Transforms/LoopVectorize/constantfolder.ll +++ b/llvm/test/Transforms/LoopVectorize/constantfolder.ll @@ -16,20 +16,6 @@ define void @const_fold_ptradd(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[CONST_0]] -; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -70,20 +56,6 @@ define void @const_fold_inbounds_ptradd(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[CONST_0]] -; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -125,20 +97,6 @@ define void @const_fold_select(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[OR:%.*]] = or i64 [[D]], [[CONST_1]] -; CHECK-NEXT: store i64 [[OR]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -179,24 +137,6 @@ define void @const_fold_add_sub_mul_ashr_lshr(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[ADD:%.*]] = add i64 2, [[CONST_1]] -; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[ADD]], [[CONST_1]] -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SUB]], [[CONST_1]] -; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[ASHR]], 3 -; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[MUL]], [[CONST_1]] -; CHECK-NEXT: store i64 [[LSHR]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -241,22 +181,6 @@ define void @const_fold_and_or_xor(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[OR:%.*]] = or i64 2, [[CONST_1]] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[OR]], [[CONST_1]] -; CHECK-NEXT: [[XOR:%.*]] = and i64 [[AND]], [[CONST_1]] -; CHECK-NEXT: store i64 [[XOR]], ptr [[DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -299,21 +223,6 @@ define void @const_fold_cmp_zext(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_1:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 1, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[VAL:%.*]] = icmp ugt i64 2, [[CONST_1]] -; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[VAL]] to i8 -; CHECK-NEXT: store i8 [[ZEXT]], ptr [[DST]], align 1 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -355,20 +264,6 @@ define void @const_fold_trunc(ptr %dst, i64 %d) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[CONST_0:%.*]] = phi i64 [ [[D]], %[[ELSE]] ], [ 0, %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[CONST_0]] to i16 -; CHECK-NEXT: store i16 [[TRUNC]], ptr [[DST]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll index 33e688c418d0e..62399c5d4b4ee 100644 --- a/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll +++ b/llvm/test/Transforms/LoopVectorize/create-induction-resume.ll @@ -67,19 +67,7 @@ define void @test(i32 %arg, i32 %L1.limit, i32 %L2.switch, i1 %c, ptr %dst) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[L2_HEADER_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[L2_INNER_HEADER:%.*]] -; CHECK: L2.Inner.header: -; CHECK-NEXT: [[L2_ACCUM:%.*]] = phi i32 [ [[L2_ACCUM_NEXT:%.*]], [[L2_INNER_HEADER]] ], [ 1, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[L2_IV:%.*]] = phi i64 [ [[L2_IV_NEXT:%.*]], [[L2_INNER_HEADER]] ], [ 1, [[SCALAR_PH]] ] -; CHECK-NEXT: [[L2_ACCUM_NEXT]] = sub i32 [[L2_ACCUM]], [[L1_EXIT_VAL]] -; CHECK-NEXT: [[L2_DUMMY_BUT_NEED_IT:%.*]] = sext i32 [[L2_ACCUM_NEXT]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[L2_IV]] -; CHECK-NEXT: store i64 [[L2_DUMMY_BUT_NEED_IT]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[L2_IV_NEXT]] = add nuw nsw i64 [[L2_IV]], 1 -; CHECK-NEXT: [[L2_EXIT_COND:%.*]] = icmp ugt i64 [[L2_IV]], 11 -; CHECK-NEXT: br i1 [[L2_EXIT_COND]], label [[L2_HEADER_LOOPEXIT]], label [[L2_INNER_HEADER]] ; CHECK: L2.exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/cse-casts.ll b/llvm/test/Transforms/LoopVectorize/cse-casts.ll new file mode 100644 index 0000000000000..e923560bb77e8 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/cse-casts.ll @@ -0,0 +1,351 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6 +; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -S %s | FileCheck %s + +define i8 @preserve_flags_when_cloning_trunc(i8 %start, ptr noalias %src, ptr noalias %dst) { +; CHECK-LABEL: define i8 @preserve_flags_when_cloning_trunc( +; CHECK-SAME: i8 [[START:%.*]], ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i8> splat (i8 1), i8 [[START]], i32 0 +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i8> [ [[TMP0]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i8> [ splat (i8 1), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SRC]], align 4 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i1> [[TMP2]] to <4 x i16> +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16, ptr [[DST]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, ptr [[TMP4]], i32 4 +; CHECK-NEXT: store <4 x i16> [[TMP3]], ptr [[TMP4]], align 2 +; CHECK-NEXT: store <4 x i16> [[TMP3]], ptr [[TMP5]], align 2 +; CHECK-NEXT: [[TMP6]] = mul <4 x i8> [[VEC_PHI]], splat (i8 3) +; CHECK-NEXT: [[TMP7]] = mul <4 x i8> [[VEC_PHI1]], splat (i8 3) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 416 +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <4 x i8> [[TMP7]], [[TMP6]] +; CHECK-NEXT: [[TMP9:%.*]] = call i8 @llvm.vector.reduce.mul.v4i8(<4 x i8> [[BIN_RDX]]) +; CHECK-NEXT: br label %[[SCALAR_PH:.*]] +; CHECK: [[SCALAR_PH]]: +; +entry: + br label %loop + +loop: + %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] + %red = phi i8 [ %red.next, %loop ], [ %start, %entry ] + %l = load i32, ptr %src, align 4 + %cmp = icmp ne i32 %l, 0 + %cmp.ext = zext i1 %cmp to i64 + %cmp.trunc = trunc i64 %cmp.ext to i16 + %gep.dst = getelementptr i16, ptr %dst, i64 %iv + store i16 %cmp.trunc, ptr %gep.dst, align 2 + %red.next = mul i8 %red, 3 + %iv.next = add i64 %iv, 1 + %ec = icmp ult i64 %iv, 416 + br i1 %ec, label %loop, label %exit + +exit: + ret i8 %red.next +} + + +define void @preserve_flags_narrowing_extends_and_truncs(ptr noalias %A, ptr noalias %B, ptr noalias %C) { +; CHECK-LABEL: define void @preserve_flags_narrowing_extends_and_truncs( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: br i1 true, label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i8> poison, i8 [[TMP1]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP3:%.*]] = phi <4 x i8> [ poison, %[[VECTOR_BODY]] ], [ [[TMP2]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: br i1 true, label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2:.*]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 1 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i8> [[TMP3]], i8 [[TMP5]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <4 x i8> [ [[TMP3]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP6]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF3:.*]], label %[[PRED_LOAD_CONTINUE4:.*]] +; CHECK: [[PRED_LOAD_IF3]]: +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 2 +; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1 +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i8> [[TMP7]], i8 [[TMP9]], i32 2 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE4]] +; CHECK: [[PRED_LOAD_CONTINUE4]]: +; CHECK-NEXT: [[TMP11:%.*]] = phi <4 x i8> [ [[TMP7]], %[[PRED_LOAD_CONTINUE2]] ], [ [[TMP10]], %[[PRED_LOAD_IF3]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF5:.*]], label %[[PRED_LOAD_CONTINUE6:.*]] +; CHECK: [[PRED_LOAD_IF5]]: +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 3 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, ptr [[TMP12]], align 1 +; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i8> [[TMP11]], i8 [[TMP13]], i32 3 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE6]] +; CHECK: [[PRED_LOAD_CONTINUE6]]: +; CHECK-NEXT: [[TMP15:%.*]] = phi <4 x i8> [ [[TMP11]], %[[PRED_LOAD_CONTINUE4]] ], [ [[TMP14]], %[[PRED_LOAD_IF5]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF7:.*]], label %[[PRED_LOAD_CONTINUE8:.*]] +; CHECK: [[PRED_LOAD_IF7]]: +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 4 +; CHECK-NEXT: [[TMP17:%.*]] = load i8, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP18:%.*]] = insertelement <4 x i8> poison, i8 [[TMP17]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE8]] +; CHECK: [[PRED_LOAD_CONTINUE8]]: +; CHECK-NEXT: [[TMP19:%.*]] = phi <4 x i8> [ poison, %[[PRED_LOAD_CONTINUE6]] ], [ [[TMP18]], %[[PRED_LOAD_IF7]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF9:.*]], label %[[PRED_LOAD_CONTINUE10:.*]] +; CHECK: [[PRED_LOAD_IF9]]: +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 5 +; CHECK-NEXT: [[TMP21:%.*]] = load i8, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i8> [[TMP19]], i8 [[TMP21]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE10]] +; CHECK: [[PRED_LOAD_CONTINUE10]]: +; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i8> [ [[TMP19]], %[[PRED_LOAD_CONTINUE8]] ], [ [[TMP22]], %[[PRED_LOAD_IF9]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF11:.*]], label %[[PRED_LOAD_CONTINUE12:.*]] +; CHECK: [[PRED_LOAD_IF11]]: +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 6 +; CHECK-NEXT: [[TMP25:%.*]] = load i8, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i8> [[TMP23]], i8 [[TMP25]], i32 2 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE12]] +; CHECK: [[PRED_LOAD_CONTINUE12]]: +; CHECK-NEXT: [[TMP27:%.*]] = phi <4 x i8> [ [[TMP23]], %[[PRED_LOAD_CONTINUE10]] ], [ [[TMP26]], %[[PRED_LOAD_IF11]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF13:.*]], label %[[PRED_LOAD_CONTINUE14:.*]] +; CHECK: [[PRED_LOAD_IF13]]: +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 7 +; CHECK-NEXT: [[TMP29:%.*]] = load i8, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i8> [[TMP27]], i8 [[TMP29]], i32 3 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE14]] +; CHECK: [[PRED_LOAD_CONTINUE14]]: +; CHECK-NEXT: [[TMP31:%.*]] = phi <4 x i8> [ [[TMP27]], %[[PRED_LOAD_CONTINUE12]] ], [ [[TMP30]], %[[PRED_LOAD_IF13]] ] +; CHECK-NEXT: [[TMP32:%.*]] = zext <4 x i8> [[TMP15]] to <4 x i64> +; CHECK-NEXT: [[TMP33:%.*]] = zext <4 x i8> [[TMP31]] to <4 x i64> +; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 0 +; CHECK-NEXT: [[TMP35:%.*]] = extractelement <4 x i64> [[TMP32]], i32 0 +; CHECK-NEXT: store i64 [[TMP35]], ptr [[TMP34]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]] +; CHECK: [[PRED_STORE_IF15]]: +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 1 +; CHECK-NEXT: [[TMP37:%.*]] = extractelement <4 x i64> [[TMP32]], i32 1 +; CHECK-NEXT: store i64 [[TMP37]], ptr [[TMP36]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE16]] +; CHECK: [[PRED_STORE_CONTINUE16]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18:.*]] +; CHECK: [[PRED_STORE_IF17]]: +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 2 +; CHECK-NEXT: [[TMP39:%.*]] = extractelement <4 x i64> [[TMP32]], i32 2 +; CHECK-NEXT: store i64 [[TMP39]], ptr [[TMP38]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE18]] +; CHECK: [[PRED_STORE_CONTINUE18]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]] +; CHECK: [[PRED_STORE_IF19]]: +; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 3 +; CHECK-NEXT: [[TMP41:%.*]] = extractelement <4 x i64> [[TMP32]], i32 3 +; CHECK-NEXT: store i64 [[TMP41]], ptr [[TMP40]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE20]] +; CHECK: [[PRED_STORE_CONTINUE20]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]] +; CHECK: [[PRED_STORE_IF21]]: +; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 4 +; CHECK-NEXT: [[TMP43:%.*]] = extractelement <4 x i64> [[TMP33]], i32 0 +; CHECK-NEXT: store i64 [[TMP43]], ptr [[TMP42]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE22]] +; CHECK: [[PRED_STORE_CONTINUE22]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]] +; CHECK: [[PRED_STORE_IF23]]: +; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 5 +; CHECK-NEXT: [[TMP45:%.*]] = extractelement <4 x i64> [[TMP33]], i32 1 +; CHECK-NEXT: store i64 [[TMP45]], ptr [[TMP44]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE24]] +; CHECK: [[PRED_STORE_CONTINUE24]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]] +; CHECK: [[PRED_STORE_IF25]]: +; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 6 +; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i64> [[TMP33]], i32 2 +; CHECK-NEXT: store i64 [[TMP47]], ptr [[TMP46]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE26]] +; CHECK: [[PRED_STORE_CONTINUE26]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]] +; CHECK: [[PRED_STORE_IF27]]: +; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 7 +; CHECK-NEXT: [[TMP49:%.*]] = extractelement <4 x i64> [[TMP33]], i32 3 +; CHECK-NEXT: store i64 [[TMP49]], ptr [[TMP48]], align 4 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE28]] +; CHECK: [[PRED_STORE_CONTINUE28]]: +; CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 0 +; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 1 +; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 2 +; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 3 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP50]], i32 0 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x ptr> [[TMP54]], ptr [[TMP51]], i32 1 +; CHECK-NEXT: [[TMP56:%.*]] = insertelement <4 x ptr> [[TMP55]], ptr [[TMP52]], i32 2 +; CHECK-NEXT: [[TMP57:%.*]] = insertelement <4 x ptr> [[TMP56]], ptr [[TMP53]], i32 3 +; CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 4 +; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 5 +; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 6 +; CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 7 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x ptr> poison, ptr [[TMP58]], i32 0 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x ptr> [[TMP62]], ptr [[TMP59]], i32 1 +; CHECK-NEXT: [[TMP64:%.*]] = insertelement <4 x ptr> [[TMP63]], ptr [[TMP60]], i32 2 +; CHECK-NEXT: [[TMP65:%.*]] = insertelement <4 x ptr> [[TMP64]], ptr [[TMP61]], i32 3 +; CHECK-NEXT: br i1 true, label %[[PRED_LOAD_IF29:.*]], label %[[PRED_LOAD_CONTINUE30:.*]] +; CHECK: [[PRED_LOAD_IF29]]: +; CHECK-NEXT: [[TMP66:%.*]] = load i8, ptr [[TMP50]], align 1 +; CHECK-NEXT: [[TMP67:%.*]] = insertelement <4 x i8> poison, i8 [[TMP66]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE30]] +; CHECK: [[PRED_LOAD_CONTINUE30]]: +; CHECK-NEXT: [[TMP68:%.*]] = phi <4 x i8> [ poison, %[[PRED_STORE_CONTINUE28]] ], [ [[TMP67]], %[[PRED_LOAD_IF29]] ] +; CHECK-NEXT: br i1 true, label %[[PRED_LOAD_IF31:.*]], label %[[PRED_LOAD_CONTINUE32:.*]] +; CHECK: [[PRED_LOAD_IF31]]: +; CHECK-NEXT: [[TMP69:%.*]] = load i8, ptr [[TMP51]], align 1 +; CHECK-NEXT: [[TMP70:%.*]] = insertelement <4 x i8> [[TMP68]], i8 [[TMP69]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE32]] +; CHECK: [[PRED_LOAD_CONTINUE32]]: +; CHECK-NEXT: [[TMP71:%.*]] = phi <4 x i8> [ [[TMP68]], %[[PRED_LOAD_CONTINUE30]] ], [ [[TMP70]], %[[PRED_LOAD_IF31]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF33:.*]], label %[[PRED_LOAD_CONTINUE34:.*]] +; CHECK: [[PRED_LOAD_IF33]]: +; CHECK-NEXT: [[TMP72:%.*]] = load i8, ptr [[TMP52]], align 1 +; CHECK-NEXT: [[TMP73:%.*]] = insertelement <4 x i8> [[TMP71]], i8 [[TMP72]], i32 2 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE34]] +; CHECK: [[PRED_LOAD_CONTINUE34]]: +; CHECK-NEXT: [[TMP74:%.*]] = phi <4 x i8> [ [[TMP71]], %[[PRED_LOAD_CONTINUE32]] ], [ [[TMP73]], %[[PRED_LOAD_IF33]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF35:.*]], label %[[PRED_LOAD_CONTINUE36:.*]] +; CHECK: [[PRED_LOAD_IF35]]: +; CHECK-NEXT: [[TMP75:%.*]] = load i8, ptr [[TMP53]], align 1 +; CHECK-NEXT: [[TMP76:%.*]] = insertelement <4 x i8> [[TMP74]], i8 [[TMP75]], i32 3 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE36]] +; CHECK: [[PRED_LOAD_CONTINUE36]]: +; CHECK-NEXT: [[TMP77:%.*]] = phi <4 x i8> [ [[TMP74]], %[[PRED_LOAD_CONTINUE34]] ], [ [[TMP76]], %[[PRED_LOAD_IF35]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF37:.*]], label %[[PRED_LOAD_CONTINUE38:.*]] +; CHECK: [[PRED_LOAD_IF37]]: +; CHECK-NEXT: [[TMP78:%.*]] = load i8, ptr [[TMP58]], align 1 +; CHECK-NEXT: [[TMP79:%.*]] = insertelement <4 x i8> poison, i8 [[TMP78]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE38]] +; CHECK: [[PRED_LOAD_CONTINUE38]]: +; CHECK-NEXT: [[TMP80:%.*]] = phi <4 x i8> [ poison, %[[PRED_LOAD_CONTINUE36]] ], [ [[TMP79]], %[[PRED_LOAD_IF37]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF39:.*]], label %[[PRED_LOAD_CONTINUE40:.*]] +; CHECK: [[PRED_LOAD_IF39]]: +; CHECK-NEXT: [[TMP81:%.*]] = load i8, ptr [[TMP59]], align 1 +; CHECK-NEXT: [[TMP82:%.*]] = insertelement <4 x i8> [[TMP80]], i8 [[TMP81]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE40]] +; CHECK: [[PRED_LOAD_CONTINUE40]]: +; CHECK-NEXT: [[TMP83:%.*]] = phi <4 x i8> [ [[TMP80]], %[[PRED_LOAD_CONTINUE38]] ], [ [[TMP82]], %[[PRED_LOAD_IF39]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF41:.*]], label %[[PRED_LOAD_CONTINUE42:.*]] +; CHECK: [[PRED_LOAD_IF41]]: +; CHECK-NEXT: [[TMP84:%.*]] = load i8, ptr [[TMP60]], align 1 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <4 x i8> [[TMP83]], i8 [[TMP84]], i32 2 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE42]] +; CHECK: [[PRED_LOAD_CONTINUE42]]: +; CHECK-NEXT: [[TMP86:%.*]] = phi <4 x i8> [ [[TMP83]], %[[PRED_LOAD_CONTINUE40]] ], [ [[TMP85]], %[[PRED_LOAD_IF41]] ] +; CHECK-NEXT: br i1 false, label %[[PRED_LOAD_IF43:.*]], label %[[PRED_LOAD_CONTINUE44:.*]] +; CHECK: [[PRED_LOAD_IF43]]: +; CHECK-NEXT: [[TMP87:%.*]] = load i8, ptr [[TMP61]], align 1 +; CHECK-NEXT: [[TMP88:%.*]] = insertelement <4 x i8> [[TMP86]], i8 [[TMP87]], i32 3 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE44]] +; CHECK: [[PRED_LOAD_CONTINUE44]]: +; CHECK-NEXT: [[TMP89:%.*]] = phi <4 x i8> [ [[TMP86]], %[[PRED_LOAD_CONTINUE42]] ], [ [[TMP88]], %[[PRED_LOAD_IF43]] ] +; CHECK-NEXT: [[TMP90:%.*]] = trunc <4 x i8> [[TMP77]] to <4 x i1> +; CHECK-NEXT: [[TMP91:%.*]] = trunc <4 x i8> [[TMP89]] to <4 x i1> +; CHECK-NEXT: [[TMP92:%.*]] = and <4 x i1> [[TMP90]], splat (i1 true) +; CHECK-NEXT: [[TMP93:%.*]] = and <4 x i1> [[TMP91]], splat (i1 true) +; CHECK-NEXT: [[TMP94:%.*]] = select <4 x i1> [[TMP90]], <4 x float> splat (float 1.000000e+00), <4 x float> zeroinitializer +; CHECK-NEXT: [[TMP95:%.*]] = select <4 x i1> [[TMP91]], <4 x float> splat (float 1.000000e+00), <4 x float> zeroinitializer +; CHECK-NEXT: [[TMP96:%.*]] = select <4 x i1> [[TMP92]], <4 x float> splat (float 3.000000e+00), <4 x float> [[TMP94]] +; CHECK-NEXT: [[TMP97:%.*]] = select <4 x i1> [[TMP93]], <4 x float> splat (float 3.000000e+00), <4 x float> [[TMP95]] +; CHECK-NEXT: [[TMP98:%.*]] = bitcast <4 x float> [[TMP96]] to <4 x i32> +; CHECK-NEXT: [[TMP99:%.*]] = bitcast <4 x float> [[TMP97]] to <4 x i32> +; CHECK-NEXT: [[TMP100:%.*]] = trunc <4 x i32> [[TMP98]] to <4 x i8> +; CHECK-NEXT: [[TMP101:%.*]] = trunc <4 x i32> [[TMP99]] to <4 x i8> +; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF45:.*]], label %[[PRED_STORE_CONTINUE46:.*]] +; CHECK: [[PRED_STORE_IF45]]: +; CHECK-NEXT: [[TMP102:%.*]] = extractelement <4 x i8> [[TMP100]], i32 0 +; CHECK-NEXT: store i8 [[TMP102]], ptr [[TMP50]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE46]] +; CHECK: [[PRED_STORE_CONTINUE46]]: +; CHECK-NEXT: br i1 true, label %[[PRED_STORE_IF47:.*]], label %[[PRED_STORE_CONTINUE48:.*]] +; CHECK: [[PRED_STORE_IF47]]: +; CHECK-NEXT: [[TMP103:%.*]] = extractelement <4 x i8> [[TMP100]], i32 1 +; CHECK-NEXT: store i8 [[TMP103]], ptr [[TMP51]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE48]] +; CHECK: [[PRED_STORE_CONTINUE48]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF49:.*]], label %[[PRED_STORE_CONTINUE50:.*]] +; CHECK: [[PRED_STORE_IF49]]: +; CHECK-NEXT: [[TMP104:%.*]] = extractelement <4 x i8> [[TMP100]], i32 2 +; CHECK-NEXT: store i8 [[TMP104]], ptr [[TMP52]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE50]] +; CHECK: [[PRED_STORE_CONTINUE50]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF51:.*]], label %[[PRED_STORE_CONTINUE52:.*]] +; CHECK: [[PRED_STORE_IF51]]: +; CHECK-NEXT: [[TMP105:%.*]] = extractelement <4 x i8> [[TMP100]], i32 3 +; CHECK-NEXT: store i8 [[TMP105]], ptr [[TMP53]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE52]] +; CHECK: [[PRED_STORE_CONTINUE52]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF53:.*]], label %[[PRED_STORE_CONTINUE54:.*]] +; CHECK: [[PRED_STORE_IF53]]: +; CHECK-NEXT: [[TMP106:%.*]] = extractelement <4 x i8> [[TMP101]], i32 0 +; CHECK-NEXT: store i8 [[TMP106]], ptr [[TMP58]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE54]] +; CHECK: [[PRED_STORE_CONTINUE54]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF55:.*]], label %[[PRED_STORE_CONTINUE56:.*]] +; CHECK: [[PRED_STORE_IF55]]: +; CHECK-NEXT: [[TMP107:%.*]] = extractelement <4 x i8> [[TMP101]], i32 1 +; CHECK-NEXT: store i8 [[TMP107]], ptr [[TMP59]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE56]] +; CHECK: [[PRED_STORE_CONTINUE56]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF57:.*]], label %[[PRED_STORE_CONTINUE58:.*]] +; CHECK: [[PRED_STORE_IF57]]: +; CHECK-NEXT: [[TMP108:%.*]] = extractelement <4 x i8> [[TMP101]], i32 2 +; CHECK-NEXT: store i8 [[TMP108]], ptr [[TMP60]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE58]] +; CHECK: [[PRED_STORE_CONTINUE58]]: +; CHECK-NEXT: br i1 false, label %[[PRED_STORE_IF59:.*]], label %[[PRED_STORE_CONTINUE60:.*]] +; CHECK: [[PRED_STORE_IF59]]: +; CHECK-NEXT: [[TMP109:%.*]] = extractelement <4 x i8> [[TMP101]], i32 3 +; CHECK-NEXT: store i8 [[TMP109]], ptr [[TMP61]], align 1 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE60]] +; CHECK: [[PRED_STORE_CONTINUE60]]: +; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.A = getelementptr inbounds i8, ptr %A, i64 %iv + %l = load i8, ptr %gep.A + %l.ext = zext i8 %l to i64 + %gep.C = getelementptr inbounds i8, ptr %C, i64 %iv + store i64 %l.ext, ptr %gep.C + %gep.B = getelementptr inbounds i8, ptr %B, i64 %iv + %l.1 = load i8, ptr %gep.B, align 1 + %masked = and i8 %l.1, 1 + %l.1.trunc = trunc i8 %l.1 to i1 + %sel.0 = select i1 %l.1.trunc, float 1.000000e+00, float 0.000000e+00 + %masked.trunc = trunc i8 %masked to i1 + %sel.1 = select i1 %masked.trunc, float 3.000000e+00, float %sel.0 + %bc = bitcast float %sel.1 to i32 + %bc.trunc = trunc i32 %bc to i8 + store i8 %bc.trunc, ptr %gep.B, align 1 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll index 0a8e9dc0b4093..02e1d0e9e7004 100644 --- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll @@ -94,20 +94,8 @@ define void @pr47390(ptr %a) { ; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ] -; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[PRIMARY_ADD]] = add i32 [[PRIMARY]], 1 -; CHECK-NEXT: [[SECONDARY_ADD]] = add i32 [[SECONDARY]], 1 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[SECONDARY]] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[GEP]], align 8 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SECONDARY]], 5 -; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll b/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll index f61478bfc8856..b31b73274e1cc 100644 --- a/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll +++ b/llvm/test/Transforms/LoopVectorize/debugloc-optimize-vfuf-term.ll @@ -15,15 +15,6 @@ define i32 @foo(ptr %p) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]], !dbg [[DBG3]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ], !dbg [[DBG7:![0-9]+]] -; CHECK-NEXT: [[CONV:%.*]] = trunc i64 0 to i8, !dbg [[DBG8:![0-9]+]] -; CHECK-NEXT: store i8 [[CONV]], ptr [[P]], align 1, !dbg [[DBG3]] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG9:![0-9]+]] -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1, !dbg [[DBG10:![0-9]+]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG11:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret i32 0 ; @@ -64,9 +55,4 @@ exit: ; preds = %loop ; CHECK: [[META4]] = distinct !DISubprogram(name: "foo", scope: [[META1]], file: [[META1]], line: 11, type: [[META5:![0-9]+]], spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META6:![0-9]+]]) ; CHECK: [[META5]] = distinct !DISubroutineType(types: [[META6]]) ; CHECK: [[META6]] = !{} -; CHECK: [[DBG7]] = !DILocation(line: 4, scope: [[META4]]) -; CHECK: [[DBG8]] = !DILocation(line: 5, scope: [[META4]]) -; CHECK: [[DBG9]] = !DILocation(line: 7, scope: [[META4]]) -; CHECK: [[DBG10]] = !DILocation(line: 8, scope: [[META4]]) -; CHECK: [[DBG11]] = !DILocation(line: 9, scope: [[META4]]) ;. diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll index b3338f475ca1d..75420d40f2aad 100644 --- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll +++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6 ; RUN: opt -p loop-vectorize -force-vector-width=2 -S %s | FileCheck %s declare void @llvm.assume(i1) @@ -47,29 +47,8 @@ define void @deref_assumption_in_header_constant_trip_count(ptr noalias noundef ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -123,27 +102,8 @@ define void @align_deref_assumption_in_header_constant_trip_count_loop_invariant ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] @@ -216,29 +176,8 @@ define void @deref_assumption_too_small_in_header_constant_trip_count(ptr noalia ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 2) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -312,29 +251,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_1(ptr noalias ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -408,29 +326,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_via_arg_attrib ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -504,29 +401,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_not_known(ptr ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -596,29 +472,8 @@ define void @deref_assumption_in_then_constant_trip_count(ptr noalias noundef %a ; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -692,29 +547,8 @@ define void @deref_assumption_in_latch_constant_trip_count(ptr noalias noundef % ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -747,7 +581,7 @@ exit: define void @deref_assumption_in_header_variable_trip_count(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c, i64 %N) nofree nosync{ ; CHECK-LABEL: define void @deref_assumption_in_header_variable_trip_count( ; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -792,30 +626,8 @@ define void @deref_assumption_in_header_variable_trip_count(ptr noalias noundef ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void ; entry: br label %loop.header @@ -867,28 +679,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_1(ptr noali ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -958,28 +750,8 @@ define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_1 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 3999) ] @@ -1031,28 +803,8 @@ define void @align_and_deref_assumption_in_preheader_constant_trip_count_align_4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] @@ -1105,28 +857,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_4_known_via ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -1196,28 +928,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_4_not_known ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -1287,28 +999,8 @@ define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_4 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 3999) ] @@ -1376,27 +1068,8 @@ define void @may_free_align_deref_assumption_in_header_constant_trip_count_loop_ ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] @@ -1465,27 +1138,8 @@ define void @may_free_local_ptr_align_deref_assumption_in_header_constant_trip_c ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: %a = call ptr @get_ptr() @@ -1519,25 +1173,306 @@ exit: declare ptr @get_ptr() declare void @may_free() -;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]} -; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} -; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]} -; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} -; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]} -; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} -; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]]} -;. +define void @deref_assumption_in_header_constant_trip_count_nofree_via_context(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_nofree_via_context( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_may_free(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_may_free( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR2]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: call void @may_free() +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP17]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP9:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP8]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> [[TMP9]], i32 [[TMP12]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP14:%.*]] = phi <2 x i32> [ [[TMP9]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP13]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP4]], <2 x i32> [[TMP14]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP15]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + call void @may_free() + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_nofree_via_context_but_missing_nosync(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_nofree_via_context_but_missing_nosync( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_multiple_loop_predecessors(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c, i1 %pre) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_multiple_loop_predecessors( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i1 [[PRE:%.*]]) #[[ATTR2]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br i1 [[PRE]], label %[[THEN:.*]], label %[[ELSE:.*]] +; CHECK: [[THEN]]: +; CHECK-NEXT: store i32 0, ptr [[A]], align 4 +; CHECK-NEXT: br label %[[LOOP_HEADER_PREHEADER:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: store i32 0, ptr [[B]], align 4 +; CHECK-NEXT: br label %[[LOOP_HEADER_PREHEADER]] +; CHECK: [[LOOP_HEADER_PREHEADER]]: +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br i1 %pre, label %then, label %else + +then: + store i32 0, ptr %a + br label %loop.header + +else: + store i32 0, ptr %b + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + + diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll index d97624fa6eace..274bd043cd86b 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-const-TC.ll @@ -24,16 +24,7 @@ define dso_local void @constTC(ptr noalias nocapture %A) optsize { ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1800 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 1800 -; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll index 4f5a26e9c89cb..156c2bdca7b0e 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll @@ -198,16 +198,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3 ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], [[N]] -; CHECK-NEXT: br i1 [[COND]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/epilog-iv-select-cmp.ll b/llvm/test/Transforms/LoopVectorize/epilog-iv-select-cmp.ll index 2d75576bc36ee..5e3a70222d7bb 100644 --- a/llvm/test/Transforms/LoopVectorize/epilog-iv-select-cmp.ll +++ b/llvm/test/Transforms/LoopVectorize/epilog-iv-select-cmp.ll @@ -33,9 +33,8 @@ define i64 @select_icmp_const(ptr %a, i64 %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i64 [ [[RDX_SELECT]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 3, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -60,7 +59,7 @@ define i64 @select_icmp_const(ptr %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[TMP7]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT6]] = add <4 x i64> [[VEC_IND5]], splat (i64 4) ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP12]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> [[TMP11]]) ; CHECK-NEXT: [[RDX_SELECT_CMP10:%.*]] = icmp ne i64 [[TMP13]], -9223372036854775808 @@ -80,7 +79,7 @@ define i64 @select_icmp_const(ptr %a, i64 %n) { ; CHECK-NEXT: [[SEL]] = select i1 [[C]], i64 [[IV]], i64 [[RDX]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[SEL_LCSSA:%.*]] = phi i64 [ [[SEL]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[RDX_SELECT11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[SEL_LCSSA]] @@ -127,7 +126,7 @@ define i64 @select_fcmp_const_fast(ptr %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> [[TMP4]]) ; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], -9223372036854775808 @@ -135,9 +134,8 @@ define i64 @select_fcmp_const_fast(ptr %a, i64 %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i64 [ [[RDX_SELECT]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 2, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -162,7 +160,7 @@ define i64 @select_fcmp_const_fast(ptr %a, i64 %n) { ; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[TMP7]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT6]] = add <4 x i64> [[VEC_IND5]], splat (i64 4) ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP12]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> [[TMP11]]) ; CHECK-NEXT: [[RDX_SELECT_CMP10:%.*]] = icmp ne i64 [[TMP13]], -9223372036854775808 @@ -182,7 +180,7 @@ define i64 @select_fcmp_const_fast(ptr %a, i64 %n) { ; CHECK-NEXT: [[SEL]] = select i1 [[C]], i64 [[IV]], i64 [[RDX]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[SEL_LCSSA:%.*]] = phi i64 [ [[SEL]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[RDX_SELECT11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[SEL_LCSSA]] @@ -235,7 +233,7 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i8> [[VEC_IND]], splat (i8 4) ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> [[TMP8]]) ; CHECK-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i8 [[TMP10]], -128 @@ -244,9 +242,8 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[IND_END:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i32 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i32 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i32 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP3]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -274,7 +271,7 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: [[INDEX_NEXT11]] = add nuw i32 [[INDEX4]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT6]] = add <4 x i8> [[VEC_IND5]], splat (i8 4) ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_NEXT11]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP18]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP19:%.*]] = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> [[TMP17]]) ; CHECK-NEXT: [[RDX_SELECT_CMP12:%.*]] = icmp ne i8 [[TMP19]], -128 @@ -294,7 +291,7 @@ define i8 @select_icmp_var_start(ptr %a, i8 %n, i8 %start) { ; CHECK-NEXT: [[SEL]] = select i1 [[C]], i8 [[IV]], i8 [[RDX]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[SEL_LCSSA:%.*]] = phi i8 [ [[SEL]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[RDX_SELECT13]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i8 [[SEL_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-any-of-reductions.ll b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-any-of-reductions.ll index 1b822011990ba..1a99c47aa351d 100644 --- a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-any-of-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-any-of-reductions.ll @@ -34,8 +34,7 @@ define i32 @any_of_reduction_epilog(ptr %src, i64 %N) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -129,8 +128,7 @@ define i32 @any_of_reduction_epilog_arg_as_start_value(ptr %src, i64 %N, i32 %st ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -228,8 +226,7 @@ define i1 @any_of_reduction_i1_epilog(i64 %N, i32 %a) { ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[IND_END6:%.*]] = trunc i64 [[N_VEC]] to i32 -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -362,8 +359,7 @@ define i1 @any_of_reduction_i1_epilog2(ptr %start, ptr %end, i64 %x) { ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[N_VEC]], 16 ; CHECK-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP24]] -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll index 5e97cedb452b4..15daf90ad770c 100644 --- a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll @@ -30,9 +30,8 @@ define i64 @int_reduction_add(ptr %a, i64 %N) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[VEC_EPILOG_ITER_CHECK]] ], [ 5, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -48,7 +47,7 @@ define i64 @int_reduction_add(ptr %a, i64 %N) { ; CHECK-NEXT: [[TMP10]] = add <4 x i64> [[WIDE_LOAD6]], [[VEC_PHI5]] ; CHECK-NEXT: [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 4 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT7]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP11]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP10]]) ; CHECK-NEXT: [[CMP_N8:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] @@ -65,7 +64,7 @@ define i64 @int_reduction_add(ptr %a, i64 %N) { ; CHECK-NEXT: [[ADD]] = add i64 [[TMP13]], [[SUM]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ [[TMP12]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[ADD_LCSSA]] @@ -111,15 +110,14 @@ define float @fp_reduction_max(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[TMP4]] = select fast <4 x i1> [[TMP3]], <4 x float> [[VEC_PHI]], <4 x float> [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP6]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0.000000e+00, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -137,7 +135,7 @@ define float @fp_reduction_max(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[TMP11]] = select fast <4 x i1> [[TMP10]], <4 x float> [[VEC_PHI5]], <4 x float> [[WIDE_LOAD6]] ; CHECK-NEXT: [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT7]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> [[TMP11]]) ; CHECK-NEXT: [[CMP_N8:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] @@ -155,7 +153,7 @@ define float @fp_reduction_max(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[V0]] = select fast i1 [[C0]], float [[RESULT_08]], float [[L0]] ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[V0_LCSSA:%.*]] = phi float [ [[V0]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP13]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[V0_LCSSA]] @@ -201,13 +199,13 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP7]] = zext <4 x i16> [[TMP6]] to <4 x i32> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP10:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP6]]) ; CHECK-NEXT: [[TMP11:%.*]] = zext i16 [[TMP10]] to i32 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: br i1 true, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: br i1 true, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i32 [ 256, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP11]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -225,7 +223,7 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP20]] = zext <4 x i16> [[TMP19]] to <4 x i32> ; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i32 [[INDEX1]], 4 ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT4]], 256 -; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP21]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP23:%.*]] = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> [[TMP19]]) ; CHECK-NEXT: [[TMP24:%.*]] = zext i16 [[TMP23]] to i32 @@ -244,7 +242,7 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[XOR]] = or i32 [[SUM_02]], [[EXT]] ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ [[TMP24]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[XOR_LCSSA]] to i16 @@ -295,16 +293,15 @@ define float @multiple_fp_rdx(ptr %A, i64 %N) { ; CHECK-NEXT: [[TMP4]] = fmul fast <4 x float> [[VEC_PHI]], [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP7:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP6]], [[VEC_EPILOG_ITER_CHECK]] ], [ 1.500000e+01, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -324,7 +321,7 @@ define float @multiple_fp_rdx(ptr %A, i64 %N) { ; CHECK-NEXT: [[TMP14]] = fmul fast <4 x float> [[VEC_PHI7]], [[WIDE_LOAD9]] ; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX6]], 4 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC5]] -; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP16:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP14]]) ; CHECK-NEXT: [[TMP17:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP13]]) @@ -345,7 +342,7 @@ define float @multiple_fp_rdx(ptr %A, i64 %N) { ; CHECK-NEXT: [[MUL]] = fmul fast float [[PROD]], [[TMP18]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[TMP17]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP16]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] @@ -400,15 +397,14 @@ define i32 @reduction_phi_start_val(ptr %A, i64 %N) { ; CHECK-NEXT: [[TMP4]] = sub <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], [[VEC_EPILOG_ITER_CHECK]] ], [ [[START_SUM]], [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -424,7 +420,7 @@ define i32 @reduction_phi_start_val(ptr %A, i64 %N) { ; CHECK-NEXT: [[TMP11]] = sub <4 x i32> [[VEC_PHI5]], [[WIDE_LOAD6]] ; CHECK-NEXT: [[INDEX_NEXT7]] = add nuw i64 [[INDEX4]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT7]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP11]]) ; CHECK-NEXT: [[CMP_N8:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] @@ -441,7 +437,7 @@ define i32 @reduction_phi_start_val(ptr %A, i64 %N) { ; CHECK-NEXT: [[SUB]] = sub nsw i32 [[SUM]], [[LOAD]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: for.cond: ; CHECK-NEXT: [[SUB_LCSSA]] = phi i32 [ [[SUB]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP13]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nuw nsw i64 [[OUTER_IV]], 1 @@ -501,15 +497,14 @@ define i64 @test_reduction_with_widen_induction_order_1(ptr %A, i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -531,7 +526,7 @@ define i64 @test_reduction_with_widen_induction_order_1(ptr %A, i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX4]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i64> [[VEC_IND5]], splat (i64 4) ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]]) ; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] @@ -549,7 +544,7 @@ define i64 @test_reduction_with_widen_induction_order_1(ptr %A, i64 %N) { ; CHECK-NEXT: store i64 [[IV_1]], ptr [[GEP_A]], align 4 ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]] @@ -596,15 +591,14 @@ define i64 @test_reduction_with_widen_induction_order_2(ptr %A, i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 -; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -626,7 +620,7 @@ define i64 @test_reduction_with_widen_induction_order_2(ptr %A, i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX4]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i64> [[VEC_IND6]], splat (i64 4) ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]]) ; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] @@ -644,7 +638,7 @@ define i64 @test_reduction_with_widen_induction_order_2(ptr %A, i64 %N) { ; CHECK-NEXT: store i64 [[IV_1]], ptr [[GEP_A]], align 4 ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-trunc-induction-steps.ll b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-trunc-induction-steps.ll index 09bfad56923ab..f79deac2a45b0 100644 --- a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-trunc-induction-steps.ll +++ b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-trunc-induction-steps.ll @@ -37,8 +37,7 @@ define void @trunc_iv_steps_with_epilogue(ptr %A, i64 %N) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP0]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: vec.epilog.ph: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll index ff550da1ae0e1..4af9f4a13b62b 100644 --- a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll +++ b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll @@ -18,9 +18,7 @@ define void @test(ptr %dst) personality ptr null { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP0:%.*]] = mul i32 160, [[STEP]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i32> splat (i32 4), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> zeroinitializer, [[TMP2]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll index ff2baec8c912e..eca39e6f0b6ba 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-dead-instructions.ll @@ -108,25 +108,8 @@ define i32 @sink_after_dead_inst(ptr %A.ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; CHECK-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; CHECK-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; CHECK-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; CHECK-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; CHECK-NEXT: br i1 [[VEC_DEAD]], label %[[FOR_END]], label %[[LOOP]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[FOR_LCSSA]] +; CHECK-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll index fd19760159e68..ebfe16bf78abd 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-interleave-only.ll @@ -22,21 +22,8 @@ define float @for_load_interleave_only(ptr %src) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[SRC]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[L:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 16 -; CHECK-NEXT: [[L]] = load float, ptr [[PTR_IV]], align 4 -; CHECK-NEXT: store float 0.000000e+00, ptr [[PTR_IV]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[FOR_LCSSA:%.*]] = phi float [ [[FOR]], %[[LOOP]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[FOR_LCSSA]] +; CHECK-NEXT: ret float [[TMP2]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll index 149157aaa4b55..74129806ad6fb 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-multiply-recurrences.ll @@ -119,22 +119,7 @@ define void @test_pr54223_sink_after_insertion_order(ptr noalias %a, ptr noalias ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[FOR_1_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR6:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[FOR_2_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[NEG:%.*]] = fneg float [[SCALAR_RECUR6]] -; CHECK-NEXT: [[MULADD:%.*]] = call float @llvm.fmuladd.f32(float [[SCALAR_RECUR]], float [[NEG]], float 0.000000e+00) -; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds float, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[FOR_1_NEXT]] = load float, ptr [[A]], align 4 -; CHECK-NEXT: [[FOR_2_NEXT]] = load float, ptr [[B]], align 4 -; CHECK-NEXT: store float [[MULADD]], ptr [[DST_GEP]], align 4 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll new file mode 100644 index 0000000000000..e97d6e66d9d7a --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll @@ -0,0 +1,244 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF2IC1 %s +; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF2IC2 %s +; RUN: opt -passes=loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF1IC2 %s + +define i32 @FOR_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { +; VF2IC1-LABEL: define i32 @FOR_used_outside( +; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC1-NEXT: [[ENTRY:.*]]: +; VF2IC1-NEXT: br label %[[LOOP:.*]] +; VF2IC1: [[LOOP]]: +; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 +; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 +; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1: [[FOR_END]]: +; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC1-NEXT: ret i32 [[TMP32]] +; +; VF2IC2-LABEL: define i32 @FOR_used_outside( +; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*]]: +; VF2IC2-NEXT: br label %[[LOOP:.*]] +; VF2IC2: [[LOOP]]: +; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 +; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 +; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2: [[FOR_END]]: +; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC2-NEXT: ret i32 [[TMP66]] +; +; VF1IC2-LABEL: define i32 @FOR_used_outside( +; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF1IC2-NEXT: [[ENTRY:.*]]: +; VF1IC2-NEXT: br label %[[LOOP:.*]] +; VF1IC2: [[LOOP]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 +; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2: [[FOR_END]]: +; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF1IC2-NEXT: ret i32 [[TMP30]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %for = phi i32 [ 33, %entry ], [ %for.next, %loop ] + %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv + %for.next = load i32, ptr %gep.A, align 4 + %add = add nsw i32 %for, %for.next + %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv + store i32 %add, ptr %gep.B, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %for.end, label %loop + +for.end: + ret i32 %for +} + +define i32 @FOR_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { +; VF2IC1-LABEL: define i32 @FOR_next_used_outside( +; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC1-NEXT: [[ENTRY:.*]]: +; VF2IC1-NEXT: br label %[[LOOP:.*]] +; VF2IC1: [[LOOP]]: +; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 +; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 +; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1: [[FOR_END]]: +; VF2IC1-NEXT: [[TMP28:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] +; VF2IC1-NEXT: ret i32 [[TMP28]] +; +; VF2IC2-LABEL: define i32 @FOR_next_used_outside( +; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*]]: +; VF2IC2-NEXT: br label %[[LOOP:.*]] +; VF2IC2: [[LOOP]]: +; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 +; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 +; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2: [[FOR_END]]: +; VF2IC2-NEXT: [[TMP62:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] +; VF2IC2-NEXT: ret i32 [[TMP62]] +; +; VF1IC2-LABEL: define i32 @FOR_next_used_outside( +; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF1IC2-NEXT: [[ENTRY:.*]]: +; VF1IC2-NEXT: br label %[[LOOP:.*]] +; VF1IC2: [[LOOP]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 +; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2: [[FOR_END]]: +; VF1IC2-NEXT: [[TMP27:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] +; VF1IC2-NEXT: ret i32 [[TMP27]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %for = phi i32 [ 33, %entry ], [ %for.next, %loop ] + %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv + %for.next = load i32, ptr %gep.A, align 4 + %add = add nsw i32 %for, %for.next + %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv + store i32 %add, ptr %gep.B, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %for.end, label %loop + +for.end: + ret i32 %for.next +} + +define i32 @FOR_and_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { +; VF2IC1-LABEL: define i32 @FOR_and_next_used_outside( +; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC1-NEXT: [[ENTRY:.*]]: +; VF2IC1-NEXT: br label %[[LOOP:.*]] +; VF2IC1: [[LOOP]]: +; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 +; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 +; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1: [[FOR_END]]: +; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] +; VF2IC1-NEXT: [[RES:%.*]] = add i32 [[TMP32]], [[TMP33]] +; VF2IC1-NEXT: ret i32 [[RES]] +; +; VF2IC2-LABEL: define i32 @FOR_and_next_used_outside( +; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*]]: +; VF2IC2-NEXT: br label %[[LOOP:.*]] +; VF2IC2: [[LOOP]]: +; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 +; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 +; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2: [[FOR_END]]: +; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] +; VF2IC2-NEXT: [[RES:%.*]] = add i32 [[TMP66]], [[TMP71]] +; VF2IC2-NEXT: ret i32 [[RES]] +; +; VF1IC2-LABEL: define i32 @FOR_and_next_used_outside( +; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF1IC2-NEXT: [[ENTRY:.*]]: +; VF1IC2-NEXT: br label %[[LOOP:.*]] +; VF1IC2: [[LOOP]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 +; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2: [[FOR_END]]: +; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] +; VF1IC2-NEXT: [[RES:%.*]] = add i32 [[TMP30]], [[TMP33]] +; VF1IC2-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %for = phi i32 [ 33, %entry ], [ %for.next, %loop ] + %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv + %for.next = load i32, ptr %gep.A, align 4 + %add = add nsw i32 %for, %for.next + %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv + store i32 %add, ptr %gep.B, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %for.end, label %loop + +for.end: + %res = add i32 %for, %for.next + ret i32 %res +} + + diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll index 443e44b6de944..bd0c098d335a2 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll @@ -1193,19 +1193,9 @@ define i64 @constant_folded_previous_value() { ; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NO-IC-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; UNROLL-NO-IC: middle.block: -; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[SCALAR_BODY:%.*]] -; UNROLL-NO-IC: scalar.body: -; UNROLL-NO-IC-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-IC-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-IC-NEXT: [[VAR3]] = add i64 0, 1 -; UNROLL-NO-IC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; UNROLL-NO-IC-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000 -; UNROLL-NO-IC-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]] ; UNROLL-NO-IC: for.end: -; UNROLL-NO-IC-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i64 [[VAR2_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i64 1 ; ; UNROLL-NO-VF-LABEL: @constant_folded_previous_value( ; UNROLL-NO-VF-NEXT: entry: @@ -1218,19 +1208,9 @@ define i64 @constant_folded_previous_value() { ; UNROLL-NO-VF-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NO-VF-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; UNROLL-NO-VF: middle.block: -; UNROLL-NO-VF-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[SCALAR_BODY:%.*]] -; UNROLL-NO-VF: scalar.body: -; UNROLL-NO-VF-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-VF-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ] -; UNROLL-NO-VF-NEXT: [[VAR3]] = add i64 0, 1 -; UNROLL-NO-VF-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; UNROLL-NO-VF-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000 -; UNROLL-NO-VF-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]] ; UNROLL-NO-VF: for.end: -; UNROLL-NO-VF-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i64 [[VAR2_LCSSA]] +; UNROLL-NO-VF-NEXT: ret i64 1 ; ; SINK-AFTER-LABEL: @constant_folded_previous_value( ; SINK-AFTER-NEXT: entry: @@ -1243,19 +1223,9 @@ define i64 @constant_folded_previous_value() { ; SINK-AFTER-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; SINK-AFTER-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; SINK-AFTER: middle.block: -; SINK-AFTER-NEXT: br label [[FOR_END:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[SCALAR_BODY:%.*]] -; SINK-AFTER: scalar.body: -; SINK-AFTER-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[SCALAR_BODY]] ] -; SINK-AFTER-NEXT: [[VAR2:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[VAR3:%.*]], [[SCALAR_BODY]] ] -; SINK-AFTER-NEXT: [[VAR3]] = add i64 0, 1 -; SINK-AFTER-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; SINK-AFTER-NEXT: [[COND:%.*]] = icmp eq i64 [[I_NEXT]], 1000 -; SINK-AFTER-NEXT: br i1 [[COND]], label [[FOR_END]], label [[SCALAR_BODY]] ; SINK-AFTER: for.end: -; SINK-AFTER-NEXT: [[VAR2_LCSSA:%.*]] = phi i64 [ [[VAR2]], [[SCALAR_BODY]] ], [ 1, [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i64 [[VAR2_LCSSA]] +; SINK-AFTER-NEXT: ret i64 1 ; entry: br label %scalar.body @@ -2725,21 +2695,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP49]], [[TMP48]] ; UNROLL-NO-IC-NEXT: [[TMP51:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-IC: bb1: -; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[VAR]] -; UNROLL-NO-IC: bb2: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-IC-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-IC-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP51]] ; ; UNROLL-NO-VF-LABEL: @sink_into_replication_region( ; UNROLL-NO-VF-NEXT: bb: @@ -2785,21 +2743,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; UNROLL-NO-VF-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25:![0-9]+]], !llvm.loop [[LOOP26:![0-9]+]] ; UNROLL-NO-VF: middle.block: ; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP12]] -; UNROLL-NO-VF-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-VF: bb1: -; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i32 [[VAR]] -; UNROLL-NO-VF: bb2: -; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-VF-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-VF-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]] +; UNROLL-NO-VF-NEXT: ret i32 [[BIN_RDX]] ; ; SINK-AFTER-LABEL: @sink_into_replication_region( ; SINK-AFTER-NEXT: bb: @@ -2868,21 +2814,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; SINK-AFTER-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25:![0-9]+]], !llvm.loop [[LOOP26:![0-9]+]] ; SINK-AFTER: middle.block: ; SINK-AFTER-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP25]]) -; SINK-AFTER-NEXT: br label [[BB1:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[BB2:%.*]] ; SINK-AFTER: bb1: -; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i32 [[VAR]] -; SINK-AFTER: bb2: -; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; SINK-AFTER-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; SINK-AFTER-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27:![0-9]+]] +; SINK-AFTER-NEXT: ret i32 [[TMP27]] ; bb: br label %bb2 @@ -3078,25 +3012,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP73]], [[TMP72]] ; UNROLL-NO-IC-NEXT: [[TMP75:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-IC: bb1: -; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP75]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[VAR]] -; UNROLL-NO-IC: bb2: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] -; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-IC-NEXT: store i32 [[VAR3]], ptr [[G]], align 4 -; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; UNROLL-NO-IC-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-IC-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP75]] ; ; UNROLL-NO-VF-LABEL: @sink_into_replication_region_multiple( ; UNROLL-NO-VF-NEXT: bb: @@ -3155,25 +3073,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; UNROLL-NO-VF-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25]], !llvm.loop [[LOOP28:![0-9]+]] ; UNROLL-NO-VF: middle.block: ; UNROLL-NO-VF-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP17]], [[TMP16]] -; UNROLL-NO-VF-NEXT: br label [[BB1:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[BB2:%.*]] ; UNROLL-NO-VF: bb1: -; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i32 [[VAR]] -; UNROLL-NO-VF: bb2: -; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] -; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; UNROLL-NO-VF-NEXT: store i32 [[VAR3]], ptr [[G]], align 4 -; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; UNROLL-NO-VF-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; UNROLL-NO-VF-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; UNROLL-NO-VF-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]] +; UNROLL-NO-VF-NEXT: ret i32 [[BIN_RDX]] ; ; SINK-AFTER-LABEL: @sink_into_replication_region_multiple( ; SINK-AFTER-NEXT: bb: @@ -3273,25 +3175,9 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; SINK-AFTER-NEXT: br i1 [[TMP38]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !prof [[PROF25]], !llvm.loop [[LOOP28:![0-9]+]] ; SINK-AFTER: middle.block: ; SINK-AFTER-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP37]]) -; SINK-AFTER-NEXT: br label [[BB1:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[BB2:%.*]] ; SINK-AFTER: bb1: -; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i32 [[VAR]] -; SINK-AFTER: bb2: -; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH:%.*]] ] -; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] -; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] -; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] -; SINK-AFTER-NEXT: store i32 [[VAR3]], ptr [[G]], align 4 -; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 -; SINK-AFTER-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; SINK-AFTER-NEXT: [[VAR9:%.*]] = icmp slt i32 [[VAR3]], 2 -; SINK-AFTER-NEXT: br i1 [[VAR9]], label [[BB1]], label [[BB2]], !prof [[PROF27]] +; SINK-AFTER-NEXT: ret i32 [[TMP39]] ; bb: br label %bb2 @@ -3341,26 +3227,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; UNROLL-NO-IC-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP3]], i32 2 -; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; UNROLL-NO-IC-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; UNROLL-NO-IC-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; UNROLL-NO-IC-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; UNROLL-NO-IC-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; UNROLL-NO-IC-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; UNROLL-NO-IC-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; UNROLL-NO-IC-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; UNROLL-NO-IC-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; UNROLL-NO-IC-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]] ; UNROLL-NO-IC: for.end: -; UNROLL-NO-IC-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[FOR_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; ; UNROLL-NO-VF-LABEL: @sink_after_dead_inst( ; UNROLL-NO-VF-NEXT: entry: @@ -3382,26 +3251,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; UNROLL-NO-VF-NEXT: [[TMP11:%.*]] = icmp eq i32 [[TMP7]], 16 ; UNROLL-NO-VF-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; UNROLL-NO-VF: middle.block: -; UNROLL-NO-VF-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-VF: scalar.ph: ; UNROLL-NO-VF-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-VF: loop: -; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-VF-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ] -; UNROLL-NO-VF-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; UNROLL-NO-VF-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; UNROLL-NO-VF-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; UNROLL-NO-VF-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; UNROLL-NO-VF-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; UNROLL-NO-VF-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; UNROLL-NO-VF-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; UNROLL-NO-VF-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; UNROLL-NO-VF-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; UNROLL-NO-VF-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; UNROLL-NO-VF-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]] ; UNROLL-NO-VF: for.end: -; UNROLL-NO-VF-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-VF-NEXT: ret i32 [[FOR_LCSSA]] +; UNROLL-NO-VF-NEXT: ret i32 [[TMP10]] ; ; SINK-AFTER-LABEL: @sink_after_dead_inst( ; SINK-AFTER-NEXT: entry: @@ -3423,26 +3275,9 @@ define i32 @sink_after_dead_inst(ptr %A.ptr, i32 %n) { ; SINK-AFTER-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; SINK-AFTER: middle.block: ; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i32> [[TMP3]], i32 2 -; SINK-AFTER-NEXT: br label [[FOR_END:%.*]] -; SINK-AFTER: scalar.ph: ; SINK-AFTER-NEXT: br label [[LOOP:%.*]] -; SINK-AFTER: loop: -; SINK-AFTER-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; SINK-AFTER-NEXT: [[FOR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[FOR_PREV:%.*]], [[LOOP]] ] -; SINK-AFTER-NEXT: [[CMP:%.*]] = icmp eq i32 [[FOR]], 15 -; SINK-AFTER-NEXT: [[C:%.*]] = icmp eq i1 [[CMP]], true -; SINK-AFTER-NEXT: [[VEC_DEAD:%.*]] = and i1 [[C]], true -; SINK-AFTER-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; SINK-AFTER-NEXT: [[B1:%.*]] = or i16 [[IV_NEXT]], [[IV_NEXT]] -; SINK-AFTER-NEXT: [[B3:%.*]] = and i1 [[CMP]], [[C]] -; SINK-AFTER-NEXT: [[FOR_PREV]] = zext i16 [[B1]] to i32 -; SINK-AFTER-NEXT: [[EXT:%.*]] = zext i1 [[B3]] to i32 -; SINK-AFTER-NEXT: [[A_GEP:%.*]] = getelementptr i32, ptr [[A_PTR]], i16 [[IV]] -; SINK-AFTER-NEXT: store i32 0, ptr [[A_GEP]], align 4 -; SINK-AFTER-NEXT: br i1 [[VEC_DEAD]], label [[FOR_END]], label [[LOOP]] ; SINK-AFTER: for.end: -; SINK-AFTER-NEXT: [[FOR_LCSSA:%.*]] = phi i32 [ [[FOR]], [[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], [[MIDDLE_BLOCK]] ] -; SINK-AFTER-NEXT: ret i32 [[FOR_LCSSA]] +; SINK-AFTER-NEXT: ret i32 [[VECTOR_RECUR_EXTRACT_FOR_PHI]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/flags.ll b/llvm/test/Transforms/LoopVectorize/flags.ll index cb86f5f190b54..2268085e5fe73 100644 --- a/llvm/test/Transforms/LoopVectorize/flags.ll +++ b/llvm/test/Transforms/LoopVectorize/flags.ll @@ -129,20 +129,8 @@ define float @fast_math(ptr noalias %s) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP3:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP1]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[S]], i64 [[IV]] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD]] = fadd fast float [[RED]], [[TMP4]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[ADD_LCSSA]] +; CHECK-NEXT: ret float [[TMP3]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll index 2b15aae628274..f56699a45320d 100644 --- a/llvm/test/Transforms/LoopVectorize/float-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll @@ -85,17 +85,15 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N) ; VEC4_INTERL2: vector.ph: ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483640 ; VEC4_INTERL2-NEXT: [[FPINC_INS:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[FPINC_INS]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = fmul fast float [[FPINC]], [[DOTCAST]] ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fsub fast float [[INIT:%.*]], [[TMP1]] -; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[FPINC_INS]], -; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 -; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], splat (float 4.000000e+00) +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[DOTSPLAT3]], -; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub fast <4 x float> [[DOTSPLAT]], [[TMP2]] +; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], +; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub fast <4 x float> [[DOTSPLAT3]], [[TMP7]] ; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC4_INTERL2: vector.body: ; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -332,17 +330,15 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32 ; VEC4_INTERL2: vector.ph: ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483640 ; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = fmul reassoc float [[FPINC]], [[DOTCAST]] ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fsub reassoc float [[INIT:%.*]], [[TMP1]] -; VEC4_INTERL2-NEXT: [[MUL:%.*]] = fmul reassoc <4 x float> [[DOTSPLATINSERT2]], -; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <4 x float> [[MUL]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 -; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = fmul reassoc <4 x float> [[BROADCAST_SPLAT]], splat (float 4.000000e+00) +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT1]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = fmul reassoc <4 x float> [[DOTSPLAT3]], -; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub reassoc <4 x float> [[DOTSPLAT]], [[TMP2]] +; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = fmul reassoc <4 x float> [[BROADCAST_SPLAT]], +; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub reassoc <4 x float> [[DOTSPLAT3]], [[TMP7]] ; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC4_INTERL2: vector.body: ; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -834,22 +830,20 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL2: vector.ph: ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], 2147483640 ; VEC4_INTERL2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 +; VEC4_INTERL2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = fmul fast float [[DOTCAST]], -5.000000e-01 ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000 ; VEC4_INTERL2-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]] ; VEC4_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]] -; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLATINSERT2]], -; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <4 x float> [[TMP19]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 -; VEC4_INTERL2-NEXT: [[BROADCAST:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], splat (float 4.000000e+00) +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT7:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT6]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[TMP4:%.*]] = fmul fast <4 x float> [[DOTSPLAT7]], -; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], [[TMP4]] +; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], +; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT7]], [[TMP19]] ; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC4_INTERL2: vector.body: ; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -860,8 +854,8 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16 ; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND10]], ptr [[TMP6]], align 4 ; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD11]], ptr [[TMP7]], align 4 -; VEC4_INTERL2-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[VEC_IND10]], [[BROADCAST]] -; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[STEP_ADD11]], [[BROADCAST]] +; VEC4_INTERL2-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[VEC_IND10]], [[DOTSPLAT]] +; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[STEP_ADD11]], [[DOTSPLAT]] ; VEC4_INTERL2-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float -5.000000e-01) ; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float -2.500000e+00) ; VEC4_INTERL2-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[TMP10]], [[TMP8]] @@ -1655,11 +1649,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) { ; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; VEC4_INTERL1-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VEC4_INTERL1: middle.block: -; VEC4_INTERL1-NEXT: br label [[EXIT:%.*]] -; VEC4_INTERL1: scalar.ph: ; VEC4_INTERL1-NEXT: br label [[LOOP:%.*]] -; VEC4_INTERL1: loop: -; VEC4_INTERL1-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; VEC4_INTERL1: exit: ; VEC4_INTERL1-NEXT: ret i32 0 ; @@ -1678,11 +1668,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) { ; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; VEC4_INTERL2-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VEC4_INTERL2: middle.block: -; VEC4_INTERL2-NEXT: br label [[EXIT:%.*]] -; VEC4_INTERL2: scalar.ph: ; VEC4_INTERL2-NEXT: br label [[LOOP:%.*]] -; VEC4_INTERL2: loop: -; VEC4_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; VEC4_INTERL2: exit: ; VEC4_INTERL2-NEXT: ret i32 0 ; @@ -1705,11 +1691,7 @@ define i32 @float_induction_with_dbg_on_fadd(ptr %dst) { ; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 ; VEC1_INTERL2-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VEC1_INTERL2: middle.block: -; VEC1_INTERL2-NEXT: br label [[EXIT:%.*]] -; VEC1_INTERL2: scalar.ph: ; VEC1_INTERL2-NEXT: br label [[LOOP:%.*]] -; VEC1_INTERL2: loop: -; VEC1_INTERL2-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; VEC1_INTERL2: exit: ; VEC1_INTERL2-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll b/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll index 93031c757582a..555e695cfa935 100644 --- a/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll +++ b/llvm/test/Transforms/LoopVectorize/float-minmax-instruction-flag.ll @@ -66,22 +66,9 @@ define float @minloopattr(ptr nocapture readonly %arg) #0 { ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> [[TMP4]]) -; CHECK-NEXT: br label [[OUT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[T1:%.*]] = phi i64 [ [[T7:%.*]], [[LOOP]] ], [ 1, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[T2:%.*]] = phi float [ [[T6:%.*]], [[LOOP]] ], [ [[T]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[T3:%.*]] = getelementptr float, ptr [[ARG]], i64 [[T1]] -; CHECK-NEXT: [[T4:%.*]] = load float, ptr [[T3]], align 4 -; CHECK-NEXT: [[T5:%.*]] = fcmp olt float [[T2]], [[T4]] -; CHECK-NEXT: [[T6]] = select i1 [[T5]], float [[T2]], float [[T4]] -; CHECK-NEXT: [[T7]] = add i64 [[T1]], 1 -; CHECK-NEXT: [[T8:%.*]] = icmp eq i64 [[T7]], 65537 -; CHECK-NEXT: br i1 [[T8]], label [[OUT]], label [[LOOP]] ; CHECK: out: -; CHECK-NEXT: [[T6_LCSSA:%.*]] = phi float [ [[T6]], [[LOOP]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[T6_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; top: %t = load float, ptr %arg diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll index 616f1566c207c..5b7c27a0b5f1b 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll @@ -113,3 +113,49 @@ loop: exit: ret float %max.next } + +define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { +; CHECK-LABEL: define float @test_fmax_and_fmin( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) +; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[SUB]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ] + %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ] + %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv + %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv + %l.0 = load float, ptr %gep.src.0, align 4 + %l.1 = load float, ptr %gep.src.1, align 4 + %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0) + %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1) + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %sub = fsub float %max.next, %min.next + ret float %sub +} diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll index 1a2b233d1079b..8b6a6e1e46101 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll @@ -683,3 +683,49 @@ loop: exit: ret float %max.next } + +define float @test_fmax_and_fmax(ptr %src.0, ptr %src.1, i64 %n) { +; CHECK-LABEL: define float @test_fmax_and_fmax( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) +; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[SUB]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ] + %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ] + %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv + %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv + %l.0 = load float, ptr %gep.src.0, align 4 + %l.1 = load float, ptr %gep.src.1, align 4 + %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0) + %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1) + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %sub = fsub float %max.next, %min.next + ret float %sub +} diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll index c86e27173bffa..f7376a0f8e205 100644 --- a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll +++ b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll @@ -67,23 +67,7 @@ define i32 @test(ptr nocapture %f) #0 { ; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NOSIMPLIFY: for.body: -; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; UNROLL-NOSIMPLIFY-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 100 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: if.then: -; UNROLL-NOSIMPLIFY-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], 20 -; UNROLL-NOSIMPLIFY-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4 -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: for.inc: -; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NOSIMPLIFY: for.end: ; UNROLL-NOSIMPLIFY-NEXT: ret i32 0 ; @@ -449,25 +433,7 @@ define void @minimal_bit_widths(i1 %c) { ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NOSIMPLIFY: for.body: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP9:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 1000, [[SCALAR_PH]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: if.then: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 -; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP6]], ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: for.inc: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP9]] = add nuw nsw i64 [[TMP1]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP2]], -1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NOSIMPLIFY: for.end: ; UNROLL-NOSIMPLIFY-NEXT: ret void ; @@ -575,26 +541,7 @@ define void @minimal_bit_widths_with_aliasing_store(i1 %c, ptr %ptr) { ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NOSIMPLIFY: for.body: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = phi i64 [ [[TMP9:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ 1000, [[SCALAR_PH]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP1]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: store i8 0, ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: if.then: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = zext i8 [[TMP4]] to i32 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i8 -; UNROLL-NOSIMPLIFY-NEXT: store i8 [[TMP6]], ptr [[TMP3]], align 1 -; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC]] -; UNROLL-NOSIMPLIFY: for.inc: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP9]] = add nuw nsw i64 [[TMP1]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP7]] = add i64 [[TMP2]], -1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0 -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NOSIMPLIFY: for.end: ; UNROLL-NOSIMPLIFY-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll b/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll index f0b32c618947a..ccf05d73945ff 100644 --- a/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction-multiple-uses-in-same-instruction.ll @@ -24,17 +24,7 @@ define void @multiple_iv_uses_in_same_instruction(ptr %ptr) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [100 x [100 x i32]], ptr [[PTR]], i64 0, i64 [[IV]], i64 [[IV]] -; CHECK-NEXT: [[T:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: store i32 [[T]], ptr [[GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/induction-step.ll b/llvm/test/Transforms/LoopVectorize/induction-step.ll index b3cb3a77467ee..53d5ac472c892 100644 --- a/llvm/test/Transforms/LoopVectorize/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/induction-step.ll @@ -291,18 +291,6 @@ define void @iv_no_binary_op_in_descriptor(i1 %c, ptr %dst) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT_P:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT:%.*]] = add i64 [[IV]], 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT_P]] = phi i64 [ [[IV_NEXT]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT_P]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -337,8 +325,6 @@ define void @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = mul i16 [[DOTCAST]], [[O_1]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[O_1]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <8 x i16> poison, i16 [[O_1]], i64 0 ; CHECK-NEXT: [[DOTSPLAT1:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT1]], <8 x i16> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP1:%.*]] = mul <8 x i16> , [[DOTSPLAT1]] @@ -350,7 +336,7 @@ define void @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <8 x i16> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = add <8 x i16> [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP4:%.*]] = add <8 x i16> [[VEC_IND]], [[DOTSPLAT1]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <8 x i16> [[TMP4]], ptr [[TMP5]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP3]], 8 @@ -362,11 +348,11 @@ define void @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i16 [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i16 [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_2:%.*]] = phi i16 [ [[BC_RESUME_VAL5]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_2:%.*]] = phi i16 [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[ADD]] = add i16 [[IV_2]], [[O_1]] ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i16 [[ADD]], ptr [[GEP_DST]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll index 261c336b329fa..cc55a51e134a6 100644 --- a/llvm/test/Transforms/LoopVectorize/induction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction.ll @@ -2764,19 +2764,9 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[TMP0]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[B_0:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[B_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; CHECK-NEXT: [[B_NEXT]] = add i8 [[B_0]], -1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[B_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP2]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[A_0_AND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP2]] ; ; IND-LABEL: @i8_loop( ; IND-NEXT: entry: @@ -2789,11 +2779,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; IND-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; IND-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[EXIT:%.*]] -; IND: scalar.ph: ; IND-NEXT: br label [[LOOP:%.*]] -; IND: loop: -; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; IND: exit: ; IND-NEXT: ret i32 0 ; @@ -2808,11 +2794,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; UNROLL-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; UNROLL-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[EXIT:%.*]] -; UNROLL: scalar.ph: ; UNROLL-NEXT: br label [[LOOP:%.*]] -; UNROLL: loop: -; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; UNROLL: exit: ; UNROLL-NEXT: ret i32 0 ; @@ -2833,19 +2815,9 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = and <2 x i32> [[TMP1]], [[TMP0]] ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[B_0:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[B_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; UNROLL-NO-IC-NEXT: [[B_NEXT]] = add i8 [[B_0]], -1 -; UNROLL-NO-IC-NEXT: [[EC:%.*]] = icmp eq i8 [[B_NEXT]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; UNROLL-NO-IC: exit: -; UNROLL-NO-IC-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[A_0_AND_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP3]] ; ; INTERLEAVE-LABEL: @i8_loop( ; INTERLEAVE-NEXT: entry: @@ -2858,11 +2830,7 @@ define i32 @i8_loop() nounwind readnone ssp uwtable { ; INTERLEAVE-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; INTERLEAVE-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: br label [[EXIT:%.*]] -; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: br label [[LOOP:%.*]] -; INTERLEAVE: loop: -; INTERLEAVE-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; INTERLEAVE: exit: ; INTERLEAVE-NEXT: ret i32 0 ; @@ -2897,19 +2865,9 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[TMP0]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[B_0:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[B_0_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; CHECK-NEXT: [[B_0_NEXT]] = add i16 [[B_0]], -1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[B_0_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP2]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[A_0_AND_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP2]] ; ; IND-LABEL: @i16_loop( ; IND-NEXT: entry: @@ -2922,11 +2880,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; IND-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536 ; IND-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[EXIT:%.*]] -; IND: scalar.ph: ; IND-NEXT: br label [[LOOP:%.*]] -; IND: loop: -; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; IND: exit: ; IND-NEXT: ret i32 0 ; @@ -2941,11 +2895,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; UNROLL-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536 ; UNROLL-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[EXIT:%.*]] -; UNROLL: scalar.ph: ; UNROLL-NEXT: br label [[LOOP:%.*]] -; UNROLL: loop: -; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; UNROLL: exit: ; UNROLL-NEXT: ret i32 0 ; @@ -2966,19 +2916,9 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = and <2 x i32> [[TMP1]], [[TMP0]] ; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[A_0:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[A_0_AND:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[B_0:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[B_0_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[A_0_AND]] = and i32 [[A_0]], 4 -; UNROLL-NO-IC-NEXT: [[B_0_NEXT]] = add i16 [[B_0]], -1 -; UNROLL-NO-IC-NEXT: [[EC:%.*]] = icmp eq i16 [[B_0_NEXT]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; UNROLL-NO-IC: exit: -; UNROLL-NO-IC-NEXT: [[A_0_AND_LCSSA:%.*]] = phi i32 [ [[A_0_AND]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[A_0_AND_LCSSA]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP3]] ; ; INTERLEAVE-LABEL: @i16_loop( ; INTERLEAVE-NEXT: entry: @@ -2991,11 +2931,7 @@ define i32 @i16_loop() nounwind readnone ssp uwtable { ; INTERLEAVE-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 65536 ; INTERLEAVE-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: br label [[EXIT:%.*]] -; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: br label [[LOOP:%.*]] -; INTERLEAVE: loop: -; INTERLEAVE-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; INTERLEAVE: exit: ; INTERLEAVE-NEXT: ret i32 0 ; @@ -5025,28 +4961,9 @@ define i32 @PR32419(i32 %a, i16 %b) { ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP15]]) -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I:%.*]] = phi i32 [ -20, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[VAR0:%.*]] = phi i32 [ [[A]], [[SCALAR_PH]] ], [ [[VAR6:%.*]], [[FOR_INC]] ] -; CHECK-NEXT: [[VAR1:%.*]] = trunc i32 [[I]] to i16 -; CHECK-NEXT: [[VAR2:%.*]] = icmp eq i16 [[VAR1]], 0 -; CHECK-NEXT: br i1 [[VAR2]], label [[FOR_INC]], label [[FOR_COND:%.*]] -; CHECK: for.cond: -; CHECK-NEXT: [[VAR3:%.*]] = urem i16 [[B]], [[VAR1]] -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[VAR4:%.*]] = phi i16 [ [[VAR3]], [[FOR_COND]] ], [ 0, [[FOR_BODY]] ] -; CHECK-NEXT: [[VAR5:%.*]] = sext i16 [[VAR4]] to i32 -; CHECK-NEXT: [[VAR6]] = or i32 [[VAR0]], [[VAR5]] -; CHECK-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[I_NEXT]], 0 -; CHECK-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: -; CHECK-NEXT: [[VAR7:%.*]] = phi i32 [ [[VAR6]], [[FOR_INC]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[VAR7]] +; CHECK-NEXT: ret i32 [[TMP17]] ; ; IND-LABEL: @PR32419( ; IND-NEXT: entry: @@ -5086,15 +5003,7 @@ define i32 @PR32419(i32 %a, i16 %b) { ; IND-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20 ; IND-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[FOR_END:%.*]] -; IND: scalar.ph: -; IND-NEXT: br label [[FOR_BODY:%.*]] -; IND: for.body: -; IND-NEXT: br i1 poison, label [[FOR_INC:%.*]], label [[FOR_COND:%.*]] -; IND: for.cond: -; IND-NEXT: br label [[FOR_INC]] -; IND: for.inc: -; IND-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] +; IND-NEXT: br label [[FOR_INC:%.*]] ; IND: for.end: ; IND-NEXT: [[VAR7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP14]]) ; IND-NEXT: ret i32 [[VAR7]] @@ -5160,15 +5069,7 @@ define i32 @PR32419(i32 %a, i16 %b) { ; UNROLL-NEXT: [[TMP28:%.*]] = icmp eq i32 [[INDEX_NEXT]], 20 ; UNROLL-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[FOR_END:%.*]] -; UNROLL: scalar.ph: -; UNROLL-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL: for.body: -; UNROLL-NEXT: br i1 poison, label [[FOR_INC:%.*]], label [[FOR_COND:%.*]] -; UNROLL: for.cond: -; UNROLL-NEXT: br label [[FOR_INC]] -; UNROLL: for.inc: -; UNROLL-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NEXT: br label [[FOR_INC:%.*]] ; UNROLL: for.end: ; UNROLL-NEXT: [[BIN_RDX:%.*]] = or <2 x i32> [[TMP27]], [[TMP26]] ; UNROLL-NEXT: [[VAR7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[BIN_RDX]]) @@ -5239,28 +5140,9 @@ define i32 @PR32419(i32 %a, i16 %b) { ; UNROLL-NO-IC: middle.block: ; UNROLL-NO-IC-NEXT: [[BIN_RDX:%.*]] = or <2 x i32> [[TMP29]], [[TMP28]] ; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[BIN_RDX]]) -; UNROLL-NO-IC-NEXT: br label [[FOR_END:%.*]] -; UNROLL-NO-IC: scalar.ph: -; UNROLL-NO-IC-NEXT: br label [[FOR_BODY:%.*]] -; UNROLL-NO-IC: for.body: -; UNROLL-NO-IC-NEXT: [[I:%.*]] = phi i32 [ -20, [[SCALAR_PH:%.*]] ], [ [[I_NEXT:%.*]], [[FOR_INC:%.*]] ] -; UNROLL-NO-IC-NEXT: [[VAR0:%.*]] = phi i32 [ [[A]], [[SCALAR_PH]] ], [ [[VAR6:%.*]], [[FOR_INC]] ] -; UNROLL-NO-IC-NEXT: [[VAR1:%.*]] = trunc i32 [[I]] to i16 -; UNROLL-NO-IC-NEXT: [[VAR2:%.*]] = icmp eq i16 [[VAR1]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[VAR2]], label [[FOR_INC]], label [[FOR_COND:%.*]] -; UNROLL-NO-IC: for.cond: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = urem i16 [[B]], [[VAR1]] -; UNROLL-NO-IC-NEXT: br label [[FOR_INC]] -; UNROLL-NO-IC: for.inc: -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i16 [ [[VAR3]], [[FOR_COND]] ], [ 0, [[FOR_BODY]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = sext i16 [[VAR4]] to i32 -; UNROLL-NO-IC-NEXT: [[VAR6]] = or i32 [[VAR0]], [[VAR5]] -; UNROLL-NO-IC-NEXT: [[I_NEXT]] = add nsw i32 [[I]], 1 -; UNROLL-NO-IC-NEXT: [[COND:%.*]] = icmp eq i32 [[I_NEXT]], 0 -; UNROLL-NO-IC-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]] +; UNROLL-NO-IC-NEXT: br label [[FOR_INC:%.*]] ; UNROLL-NO-IC: for.end: -; UNROLL-NO-IC-NEXT: [[VAR7:%.*]] = phi i32 [ [[VAR6]], [[FOR_INC]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] -; UNROLL-NO-IC-NEXT: ret i32 [[VAR7]] +; UNROLL-NO-IC-NEXT: ret i32 [[TMP31]] ; ; INTERLEAVE-LABEL: @PR32419( ; INTERLEAVE-NEXT: entry: @@ -5818,23 +5700,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]] -; CHECK-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr i32, ptr [[DST]], i32 [[IV_TRUNC]] -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[IV_TRUNC]], [[MUL]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[DST_GEP]], align 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TRUNC_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -5862,11 +5728,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; IND-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; IND-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; IND: middle.block: -; IND-NEXT: br label [[EXIT:%.*]] -; IND: scalar.ph: ; IND-NEXT: br label [[LOOP:%.*]] -; IND: loop: -; IND-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; IND: exit: ; IND-NEXT: ret void ; @@ -5900,11 +5762,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; UNROLL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; UNROLL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: br label [[EXIT:%.*]] -; UNROLL: scalar.ph: ; UNROLL-NEXT: br label [[LOOP:%.*]] -; UNROLL: loop: -; UNROLL-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; UNROLL: exit: ; UNROLL-NEXT: ret void ; @@ -5937,23 +5795,7 @@ define void @pr52460_first_order_recurrence_truncated_iv(ptr noalias %src, ptr % ; UNROLL-NO-IC-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; UNROLL-NO-IC-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] ; UNROLL-NO-IC: middle.block: -; UNROLL-NO-IC-NEXT: br label [[EXIT:%.*]] -; UNROLL-NO-IC: scalar.ph: ; UNROLL-NO-IC-NEXT: br label [[LOOP:%.*]] -; UNROLL-NO-IC: loop: -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[TRUNC_IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TRUNC_IV_NEXT:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_TRUNC:%.*]], [[LOOP]] ] -; UNROLL-NO-IC-NEXT: [[LV:%.*]] = load i32, ptr [[SRC]], align 4 -; UNROLL-NO-IC-NEXT: [[MUL:%.*]] = mul nsw i32 [[LV]], [[RECUR]] -; UNROLL-NO-IC-NEXT: [[TRUNC_IV_NEXT]] = add i32 [[TRUNC_IV]], 1 -; UNROLL-NO-IC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; UNROLL-NO-IC-NEXT: [[IV_TRUNC]] = trunc i64 [[IV]] to i32 -; UNROLL-NO-IC-NEXT: [[DST_GEP:%.*]] = getelementptr i32, ptr [[DST]], i32 [[IV_TRUNC]] -; UNROLL-NO-IC-NEXT: [[ADD:%.*]] = add i32 [[IV_TRUNC]], [[MUL]] -; UNROLL-NO-IC-NEXT: store i32 [[ADD]], ptr [[DST_GEP]], align 4 -; UNROLL-NO-IC-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[TRUNC_IV_NEXT]], 100 -; UNROLL-NO-IC-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]] ; UNROLL-NO-IC: exit: ; UNROLL-NO-IC-NEXT: ret void ; @@ -6211,12 +6053,10 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; UNROLL: vector.ph: ; UNROLL-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -4 ; UNROLL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 +; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; UNROLL-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] -; UNROLL-NEXT: [[TMP15:%.*]] = shl <2 x i32> [[BROADCAST_SPLATINSERT]], -; UNROLL-NEXT: [[TMP16:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> poison, <2 x i32> zeroinitializer -; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 -; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer +; UNROLL-NEXT: [[TMP16:%.*]] = shl <2 x i32> [[DOTSPLAT]], splat (i32 1) ; UNROLL-NEXT: [[TMP17:%.*]] = mul nuw <2 x i32> [[DOTSPLAT]], ; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]] ; UNROLL: vector.body: @@ -6293,9 +6133,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; UNROLL-NO-IC-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; UNROLL-NO-IC-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]] ; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = mul <2 x i32> splat (i32 2), [[BROADCAST_SPLAT]] -; UNROLL-NO-IC-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 -; UNROLL-NO-IC-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer -; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = mul <2 x i32> , [[DOTSPLAT]] +; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = mul <2 x i32> , [[BROADCAST_SPLAT]] ; UNROLL-NO-IC-NEXT: [[INDUCTION:%.*]] = add <2 x i32> zeroinitializer, [[TMP18]] ; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]] ; UNROLL-NO-IC: vector.body: @@ -6365,12 +6203,10 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; INTERLEAVE: vector.ph: ; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -8 ; INTERLEAVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 +; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; INTERLEAVE-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] -; INTERLEAVE-NEXT: [[TMP15:%.*]] = shl <4 x i32> [[BROADCAST_SPLATINSERT]], -; INTERLEAVE-NEXT: [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP15]], <4 x i32> poison, <4 x i32> zeroinitializer -; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 -; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; INTERLEAVE-NEXT: [[TMP16:%.*]] = shl <4 x i32> [[DOTSPLAT]], splat (i32 2) ; INTERLEAVE-NEXT: [[TMP17:%.*]] = mul <4 x i32> [[DOTSPLAT]], ; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; INTERLEAVE: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll b/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll index 9222af933475b..8975c058c6b79 100644 --- a/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll @@ -18,23 +18,9 @@ define i32 @one_direct_branch(ptr %src) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; entry: br label %loop @@ -73,26 +59,9 @@ define i32 @two_direct_branch(ptr %src) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP3]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]] -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[PHI_XOR_1:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[PHI_XOR_1]], [[BB]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; entry: br label %loop @@ -141,26 +110,9 @@ define i32 @cond_branch(i32 %a, ptr %src) { ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[PREDPHI]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[LV:%.*]] = load i32, ptr [[SRC_GEP]], align 4 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 25500, [[LV]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[THEN:%.*]] -; CHECK: then: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[PHI_XOR:%.*]] = phi i32 [ [[XOR]], [[LOOP]] ], [ 10, [[THEN]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[PHI_XOR]], [[LOOP_LATCH]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[XOR_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; entry: br label %loop @@ -205,18 +157,9 @@ define i32 @optimizable_trunc_used_outside() { ; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[VEC_IND]], i32 3 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT_I_I:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT_I_I]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[IV_TRUNC_LCSSA:%.*]] = phi i32 [ [[IV_TRUNC]], [[LOOP]] ], [ [[TMP1]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[IV_TRUNC_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP1]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll b/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll index 1128dd354f659..2c97bb7622740 100644 --- a/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/interleave-with-i65-induction.ll @@ -33,19 +33,6 @@ define void @i65_induction_with_negative_step(ptr %dst) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_I65:%.*]] = phi i65 [ 0, %[[SCALAR_PH]] ], [ [[IV_I65_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TRUNC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[TRUNC]] = trunc i65 [[IV_I65]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TRUNC]] -; CHECK-NEXT: store i64 [[FOR]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: [[IV_I65_NEXT]] = add i65 [[IV_I65]], -1 -; CHECK-NEXT: br i1 [[ICMP]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll index 85e7477837cde..eca9c1fe74c21 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll @@ -27,23 +27,6 @@ define void @gep_for_first_member_does_not_dominate_insert_point(ptr %str, ptr n ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV2_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[OR_1:%.*]] = or disjoint i64 [[IV2]], 1 -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[STR]], i64 [[OR_1]] -; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[GEP1]], align 1 -; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i8, ptr [[STR]], i64 [[IV2]] -; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[GEP0]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP9]], [[TMP10]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1 -; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[IV2]], 2 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll index 4dc9cfd5264bc..bd0fd77e7c391 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-metadata.ll @@ -45,23 +45,6 @@ define void @merge_tbaa_interleave_group(ptr nocapture readonly %p, ptr noalias ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_VEC4R]], ptr [[P]], i64 [[IV]], i32 0 -; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[X]], align 8, !tbaa [[TBAA0]] -; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP19]], 2.000000e+00 -; CHECK-NEXT: [[X4:%.*]] = getelementptr inbounds [20 x %struct.Vec2r], ptr [[CP]], i64 0, i64 [[IV]], i32 0 -; CHECK-NEXT: store double [[MUL]], ptr [[X4]], align 8, !tbaa [[TBAA10:![0-9]+]] -; CHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_VEC4R]], ptr [[P]], i64 [[IV]], i32 1 -; CHECK-NEXT: [[TMP20:%.*]] = load double, ptr [[Y]], align 8, !tbaa [[TBAA5]] -; CHECK-NEXT: [[MUL7:%.*]] = fmul double [[TMP20]], 3.000000e+00 -; CHECK-NEXT: [[Y10:%.*]] = getelementptr inbounds [20 x %struct.Vec2r], ptr [[CP]], i64 0, i64 [[IV]], i32 1 -; CHECK-NEXT: store double [[MUL7]], ptr [[Y10]], align 8, !tbaa [[TBAA12:![0-9]+]] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -126,20 +109,20 @@ define void @ir_tbaa_different(ptr %base, ptr %end, ptr %src) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[BASE]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[SRC]], align 4, !alias.scope [[META13:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[SRC]], align 4, !alias.scope [[META10:![0-9]+]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP11]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4, !alias.scope [[META16:![0-9]+]], !noalias [[META13]] +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4, !alias.scope [[META13:![0-9]+]], !noalias [[META10]] ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x float> [[WIDE_VEC]], <4 x float> poison, <2 x i32> ; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <4 x float> [[WIDE_VEC]], <4 x float> poison, <2 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = fmul <2 x float> [[STRIDED_VEC3]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP7]], <4 x i32> ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <4 x i32> -; CHECK-NEXT: store <4 x float> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4, !alias.scope [[META16]], !noalias [[META13]] +; CHECK-NEXT: store <4 x float> [[INTERLEAVED_VEC]], ptr [[NEXT_GEP]], align 4, !alias.scope [[META13]], !noalias [[META10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -152,9 +135,9 @@ define void @ir_tbaa_different(ptr %base, ptr %end, ptr %src) { ; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 8 ; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[PTR_IV]], align 4 ; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[L_1]], [[L_INVAR]] -; CHECK-NEXT: store float [[MUL_1]], ptr [[PTR_IV]], align 4, !tbaa [[TBAA10]] +; CHECK-NEXT: store float [[MUL_1]], ptr [[PTR_IV]], align 4, !tbaa [[TBAA16:![0-9]+]] ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR_IV]], i64 4 -; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_1]], align 4, !tbaa [[TBAA12]] +; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_1]], align 4, !tbaa [[TBAA18:![0-9]+]] ; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[L_2]], [[L_INVAR]] ; CHECK-NEXT: store float [[MUL_2]], ptr [[GEP_1]], align 4 ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] @@ -278,15 +261,15 @@ exit: ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META8:![0-9]+]], [[META9:![0-9]+]]} ; CHECK: [[META8]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[TBAA10]] = !{[[META11:![0-9]+]], [[META2]], i64 0} -; CHECK: [[META11]] = !{!"Vec2r", [[META2]], i64 0, [[META2]], i64 8} -; CHECK: [[TBAA12]] = !{[[META11]], [[META2]], i64 8} +; CHECK: [[META10]] = !{[[META11:![0-9]+]]} +; CHECK: [[META11]] = distinct !{[[META11]], [[META12:![0-9]+]]} +; CHECK: [[META12]] = distinct !{[[META12]], !"LVerDomain"} ; CHECK: [[META13]] = !{[[META14:![0-9]+]]} -; CHECK: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]} -; CHECK: [[META15]] = distinct !{[[META15]], !"LVerDomain"} -; CHECK: [[META16]] = !{[[META17:![0-9]+]]} -; CHECK: [[META17]] = distinct !{[[META17]], [[META15]]} -; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META8]], [[META9]]} +; CHECK: [[META14]] = distinct !{[[META14]], [[META12]]} +; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META8]], [[META9]]} +; CHECK: [[TBAA16]] = !{[[META17:![0-9]+]], [[META2]], i64 0} +; CHECK: [[META17]] = !{!"Vec2r", [[META2]], i64 0, [[META2]], i64 8} +; CHECK: [[TBAA18]] = !{[[META17]], [[META2]], i64 8} ; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META8]]} ; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META8]], [[META9]]} ; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META9]], [[META8]]} diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll index 4885dd2e33815..b4cad1142134c 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll @@ -47,11 +47,7 @@ define void @test_array_load2_store2(i32 %C, i32 %D) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -124,11 +120,7 @@ define void @test_struct_array_load3_store3() { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -206,11 +198,7 @@ define i32 @test_struct_load4(ptr nocapture readonly %S) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[SUB8_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: ret i32 [[SUB8_LCSSA]] @@ -279,13 +267,9 @@ define void @test_struct_store4(ptr noalias nocapture readonly %A, ptr noalias n ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body @@ -365,13 +349,9 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]] ; entry: br label %for.body @@ -619,11 +599,7 @@ define void @load_gap_reverse(ptr noalias nocapture %P1, ptr noalias nocapture % ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_EXIT]] ; CHECK: for.exit: ; CHECK-NEXT: ret void ; @@ -681,13 +657,9 @@ define void @mixed_load2_store2(ptr noalias nocapture readonly %A, ptr noalias n ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_COND_CLEANUP]] ; entry: br label %for.body @@ -753,13 +725,9 @@ define void @mixed_load3_store3(ptr nocapture %A) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body @@ -836,17 +804,13 @@ define void @int_float_struct(ptr nocapture readonly %A) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]]) ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]]) +; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]]) ; CHECK-NEXT: store i32 [[ADD_LCSSA]], ptr @SA, align 4 ; CHECK-NEXT: store float [[ADD3_LCSSA]], ptr @SB, align 4 ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/is_fpclass.ll b/llvm/test/Transforms/LoopVectorize/is_fpclass.ll index ab70c14a0be61..6c4ee5b7359dc 100644 --- a/llvm/test/Transforms/LoopVectorize/is_fpclass.ll +++ b/llvm/test/Transforms/LoopVectorize/is_fpclass.ll @@ -20,19 +20,7 @@ define void @d() { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[I7:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[I3:%.*]] = load float, ptr null, align 4 -; CHECK-NEXT: [[I4:%.*]] = getelementptr float, ptr @d, i64 [[I]] -; CHECK-NEXT: [[I5:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[I3]], i32 0) -; CHECK-NEXT: [[I6:%.*]] = select i1 [[I5]], float 0.000000e+00, float 1.000000e+00 -; CHECK-NEXT: store float [[I6]], ptr [[I4]], align 4 -; CHECK-NEXT: [[I7]] = add i64 [[I]], 1 -; CHECK-NEXT: [[I8:%.*]] = icmp eq i64 [[I7]], 128 -; CHECK-NEXT: br i1 [[I8]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll index e662039ee6eff..70b1ea13677b8 100644 --- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll +++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll @@ -31,21 +31,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], 9223372036854775807 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP6]], i64 331 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC1VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC1VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i64 @select_decreasing_induction_icmp_const_start( ; IC4VF4-SAME: ptr [[A:%.*]]) { @@ -101,21 +88,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP18]], 9223372036854775807 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP18]], i64 331 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i64 @select_decreasing_induction_icmp_const_start( ; IC4VF1-SAME: ptr [[A:%.*]]) { @@ -159,21 +133,8 @@ define i64 @select_decreasing_induction_icmp_const_start(ptr %a) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[RDX_MINMAX5]], 9223372036854775807 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[RDX_MINMAX5]], i64 331 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i64 [ 19999, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF1-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF1-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop @@ -227,21 +188,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP7]], 32767 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP7]], i16 0 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC1VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 -; IC1VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i16 @select_decreasing_induction_icmp_table_i16( ; IC4VF4-SAME: i16 noundef [[VAL:%.*]]) { @@ -460,21 +408,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP116]], 32767 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP116]], i16 0 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i16 @select_decreasing_induction_icmp_table_i16( ; IC4VF1-SAME: i16 noundef [[VAL:%.*]]) { @@ -523,21 +458,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[RDX_MINMAX5]], 32767 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[RDX_MINMAX5]], i16 0 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF1-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF1-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i16 [[RDX_SELECT]] ; entry: br label %loop @@ -592,21 +514,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP7]], 32767 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP7]], i16 0 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC1VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 -; IC1VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i16 @select_decreasing_induction_icmp_table_half( ; IC4VF4-SAME: half noundef [[VAL:%.*]]) { @@ -825,21 +734,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[TMP116]], 32767 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[TMP116]], i16 0 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i16 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i16 @select_decreasing_induction_icmp_table_half( ; IC4VF1-SAME: half noundef [[VAL:%.*]]) { @@ -888,21 +784,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i16 [[RDX_MINMAX5]], 32767 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i16 [[RDX_MINMAX5]], i16 0 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] -; IC4VF1-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 -; IC4VF1-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i16 [[IV]], -1 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_TABLE_VAL]], i16 [[IV_NEXT]], i16 [[RDX]] -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i16 [[IV_NEXT]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i16 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i16 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i16 [[RDX_SELECT]] ; entry: br label %loop @@ -954,21 +837,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) { ; IC1VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP6]], -1 ; IC1VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP6]], i64 331 ; IC1VF4-NEXT: br label %[[EXIT:.*]] -; IC1VF4: [[SCALAR_PH:.*]]: -; IC1VF4-NEXT: br label %[[LOOP:.*]] -; IC1VF4: [[LOOP]]: -; IC1VF4-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC1VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC1VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC1VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC1VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC1VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC1VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC1VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC1VF4: [[EXIT]]: -; IC1VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC1VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC1VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF4-LABEL: define i64 @select_decreasing_induction_icmp_iv_unsigned( ; IC4VF4-SAME: ptr [[A:%.*]]) { @@ -1024,21 +894,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) { ; IC4VF4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[TMP18]], -1 ; IC4VF4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[TMP18]], i64 331 ; IC4VF4-NEXT: br label %[[EXIT:.*]] -; IC4VF4: [[SCALAR_PH:.*]]: -; IC4VF4-NEXT: br label %[[LOOP:.*]] -; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF4-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF4-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF4-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF4-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF4-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF4: [[EXIT]]: -; IC4VF4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF4-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF4-NEXT: ret i64 [[RDX_SELECT]] ; ; IC4VF1-LABEL: define i64 @select_decreasing_induction_icmp_iv_unsigned( ; IC4VF1-SAME: ptr [[A:%.*]]) { @@ -1082,21 +939,8 @@ define i64 @select_decreasing_induction_icmp_iv_unsigned(ptr %a) { ; IC4VF1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i64 [[RDX_MINMAX5]], -1 ; IC4VF1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i64 [[RDX_MINMAX5]], i64 331 ; IC4VF1-NEXT: br label %[[EXIT:.*]] -; IC4VF1: [[SCALAR_PH:.*]]: -; IC4VF1-NEXT: br label %[[LOOP:.*]] -; IC4VF1: [[LOOP]]: -; IC4VF1-NEXT: [[IV:%.*]] = phi i64 [ 9223372036854775807, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[RDX:%.*]] = phi i64 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] -; IC4VF1-NEXT: [[GEP_A_IV:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; IC4VF1-NEXT: [[LD_A:%.*]] = load i64, ptr [[GEP_A_IV]], align 8 -; IC4VF1-NEXT: [[CMP_A_3:%.*]] = icmp sgt i64 [[LD_A]], 3 -; IC4VF1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP_A_3]], i64 [[IV]], i64 [[RDX]] -; IC4VF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; IC4VF1-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV]], 0 -; IC4VF1-NEXT: br i1 [[EXIT_COND]], label %[[EXIT]], label %[[LOOP]] ; IC4VF1: [[EXIT]]: -; IC4VF1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[LOOP]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; IC4VF1-NEXT: ret i64 [[SPEC_SELECT_LCSSA]] +; IC4VF1-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll index 0ace54731dc2d..b991d58eb2b8d 100644 --- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll +++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-trunc.ll @@ -261,22 +261,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) { ; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP6]], -2147483648 ; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP6]], i32 331 ; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC1: [[SCALAR_PH:.*]]: -; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC1: [[FOR_BODY]]: -; CHECK-VF4IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC1-NEXT: [[TMP7:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP7]], 3 -; CHECK-VF4IC1-NEXT: [[TMP8:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP8]], i32 [[RDX]] -; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000 -; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC1: [[EXIT]]: -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF4IC4-LABEL: define i32 @select_icmp_const_truncated_iv_const_exit( ; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) { @@ -322,22 +308,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) { ; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], -2147483648 ; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 331 ; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC4: [[SCALAR_PH:.*]]: -; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC4: [[FOR_BODY]]: -; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC4-NEXT: [[TMP16:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP16]], 3 -; CHECK-VF4IC4-NEXT: [[TMP17:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP17]], i32 [[RDX]] -; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000 -; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC4: [[EXIT]]: -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF1IC4-LABEL: define i32 @select_icmp_const_truncated_iv_const_exit( ; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) { @@ -384,22 +356,8 @@ define i32 @select_icmp_const_truncated_iv_const_exit(ptr %a) { ; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX5]], -2147483648 ; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX5]], i32 331 ; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF1IC4: [[SCALAR_PH:.*]]: -; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF1IC4: [[FOR_BODY]]: -; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-VF1IC4-NEXT: [[TMP26:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP26]], 3 -; CHECK-VF1IC4-NEXT: [[TMP27:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP27]], i32 [[RDX]] -; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 20000 -; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF1IC4: [[EXIT]]: -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -446,22 +404,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) { ; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP6]], -2147483648 ; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP6]], i32 -1 ; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC1: [[SCALAR_PH:.*]]: -; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC1: [[FOR_BODY]]: -; CHECK-VF4IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC1-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP7]], 0.000000e+00 -; CHECK-VF4IC1-NEXT: [[TMP8:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP8]], i32 [[RDX]] -; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648 -; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC1: [[EXIT]]: -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF4IC4-LABEL: define i32 @select_fcmp_max_valid_const_ub( ; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) { @@ -507,22 +451,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) { ; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP15]], -2147483648 ; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP15]], i32 -1 ; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC4: [[SCALAR_PH:.*]]: -; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC4: [[FOR_BODY]]: -; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC4-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP16]], 0.000000e+00 -; CHECK-VF4IC4-NEXT: [[TMP17:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP17]], i32 [[RDX]] -; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648 -; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC4: [[EXIT]]: -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF1IC4-LABEL: define i32 @select_fcmp_max_valid_const_ub( ; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) { @@ -569,22 +499,8 @@ define i32 @select_fcmp_max_valid_const_ub(ptr %a) { ; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX5]], -2147483648 ; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX5]], i32 -1 ; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF1IC4: [[SCALAR_PH:.*]]: -; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF1IC4: [[FOR_BODY]]: -; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] -; CHECK-VF1IC4-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP26]], 0.000000e+00 -; CHECK-VF1IC4-NEXT: [[TMP27:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[TMP27]], i32 [[RDX]] -; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 2147483648 -; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF1IC4: [[EXIT]]: -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body @@ -636,22 +552,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) { ; CHECK-VF4IC1-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP5]], 0 ; CHECK-VF4IC1-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP5]], i32 331 ; CHECK-VF4IC1-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC1: [[SCALAR_PH:.*]]: -; CHECK-VF4IC1-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC1: [[FOR_BODY]]: -; CHECK-VF4IC1-NEXT: [[IV1:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] -; CHECK-VF4IC1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 -; CHECK-VF4IC1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 3 -; CHECK-VF4IC1-NEXT: [[CONV:%.*]] = trunc i64 [[IV1]] to i32 -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]] -; CHECK-VF4IC1-NEXT: [[INC]] = add nuw nsw i64 [[IV1]], 1 -; CHECK-VF4IC1-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294 -; CHECK-VF4IC1-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC1: [[EXIT]]: -; CHECK-VF4IC1-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC1-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC1-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF4IC4-LABEL: define i32 @select_icmp_truncated_unsigned_iv_range( ; CHECK-VF4IC4-SAME: ptr [[A:%.*]]) { @@ -698,22 +600,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) { ; CHECK-VF4IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[TMP14]], 0 ; CHECK-VF4IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[TMP14]], i32 331 ; CHECK-VF4IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF4IC4: [[SCALAR_PH:.*]]: -; CHECK-VF4IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF4IC4: [[FOR_BODY]]: -; CHECK-VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF4IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-VF4IC4-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-VF4IC4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP15]], 3 -; CHECK-VF4IC4-NEXT: [[CONV:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]] -; CHECK-VF4IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF4IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294 -; CHECK-VF4IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF4IC4: [[EXIT]]: -; CHECK-VF4IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF4IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF4IC4-NEXT: ret i32 [[RDX_SELECT]] ; ; CHECK-VF1IC4-LABEL: define i32 @select_icmp_truncated_unsigned_iv_range( ; CHECK-VF1IC4-SAME: ptr [[A:%.*]]) { @@ -762,22 +650,8 @@ define i32 @select_icmp_truncated_unsigned_iv_range(ptr %a) { ; CHECK-VF1IC4-NEXT: [[RDX_SELECT_CMP:%.*]] = icmp ne i32 [[RDX_MINMAX6]], 0 ; CHECK-VF1IC4-NEXT: [[RDX_SELECT:%.*]] = select i1 [[RDX_SELECT_CMP]], i32 [[RDX_MINMAX6]], i32 331 ; CHECK-VF1IC4-NEXT: br label %[[EXIT:.*]] -; CHECK-VF1IC4: [[SCALAR_PH:.*]]: -; CHECK-VF1IC4-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-VF1IC4: [[FOR_BODY]]: -; CHECK-VF1IC4-NEXT: [[IV:%.*]] = phi i64 [ 2147483646, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[RDX:%.*]] = phi i32 [ 331, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[FOR_BODY]] ] -; CHECK-VF1IC4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-VF1IC4-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-VF1IC4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP24]], 3 -; CHECK-VF1IC4-NEXT: [[CONV:%.*]] = trunc i64 [[IV]] to i32 -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT]] = select i1 [[CMP]], i32 [[CONV]], i32 [[RDX]] -; CHECK-VF1IC4-NEXT: [[INC]] = add nuw nsw i64 [[IV]], 1 -; CHECK-VF1IC4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 4294967294 -; CHECK-VF1IC4-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK-VF1IC4: [[EXIT]]: -; CHECK-VF1IC4-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ] -; CHECK-VF1IC4-NEXT: ret i32 [[SPEC_SELECT_LCSSA]] +; CHECK-VF1IC4-NEXT: ret i32 [[RDX_SELECT]] ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll b/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll index 3f91baa117b7f..86515ebe25637 100644 --- a/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll +++ b/llvm/test/Transforms/LoopVectorize/iv_outside_user.ll @@ -102,16 +102,8 @@ define i32 @constpre() { ; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INC_PHI:%.*]] = phi i32 [ 32, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = sub nsw i32 [[INC_PHI]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[INC_PHI_LCSSA:%.*]] = phi i32 [ [[INC_PHI]], %[[FOR_BODY]] ], [ 2, %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[INC_PHI_LCSSA]] +; CHECK-NEXT: ret i32 2 ; entry: br label %for.body @@ -142,18 +134,8 @@ define ptr @geppre(ptr %ptr) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -16 ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INC_PHI:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[PTR_PHI:%.*]] = phi ptr [ [[PTR]], %[[SCALAR_PH]] ], [ [[INC_PTR:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[INC]] = add nsw i32 [[INC_PHI]], 1 -; CHECK-NEXT: [[INC_PTR]] = getelementptr i32, ptr [[PTR_PHI]], i32 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 32 -; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[PTR_PHI_LCSSA:%.*]] = phi ptr [ [[PTR_PHI]], %[[FOR_BODY]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[PTR_PHI_LCSSA]] +; CHECK-NEXT: ret ptr [[IND_ESCAPE]] ; entry: br label %for.body @@ -411,18 +393,8 @@ define i64 @iv_scalar_steps_and_outside_users(ptr %ptr) { ; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; VEC-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; VEC-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; VEC-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; VEC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; VEC: [[EXIT]]: -; VEC-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 1001, %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i64 [[IV_LCSSA]] +; VEC-NEXT: ret i64 1001 ; ; INTERLEAVE-LABEL: define i64 @iv_scalar_steps_and_outside_users( ; INTERLEAVE-SAME: ptr [[PTR:%.*]]) { @@ -442,18 +414,8 @@ define i64 @iv_scalar_steps_and_outside_users(ptr %ptr) { ; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: -; INTERLEAVE-NEXT: [[IV_LCSSA:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 1001, %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i64 [[IV_LCSSA]] +; INTERLEAVE-NEXT: ret i64 1001 ; entry: br label %loop @@ -491,20 +453,8 @@ define i32 @iv_2_dead_in_loop_only_used_outside(ptr %ptr) { ; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; VEC-NEXT: [[IV_2_NEXT]] = add nuw i32 [[IV_2]], 2 -; VEC-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; VEC-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; VEC-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; VEC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; VEC: [[EXIT]]: -; VEC-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], %[[LOOP]] ], [ 2002, %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i32 [[IV_2_LCSSA]] +; VEC-NEXT: ret i32 2002 ; ; INTERLEAVE-LABEL: define i32 @iv_2_dead_in_loop_only_used_outside( ; INTERLEAVE-SAME: ptr [[PTR:%.*]]) { @@ -524,20 +474,8 @@ define i32 @iv_2_dead_in_loop_only_used_outside(ptr %ptr) { ; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add nuw i32 [[IV_2]], 2 -; INTERLEAVE-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[IV]], ptr [[GEP_PTR]], align 4 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ugt i64 [[IV]], 1000 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: -; INTERLEAVE-NEXT: [[IV_2_LCSSA:%.*]] = phi i32 [ [[IV_2]], %[[LOOP]] ], [ 2002, %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i32 [[IV_2_LCSSA]] +; INTERLEAVE-NEXT: ret i32 2002 ; entry: br label %loop @@ -1092,18 +1030,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification(ptr %dst) { ; VEC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[E_EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; VEC-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VEC-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[IV]] -; VEC-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; VEC-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; VEC: [[E_EXIT]]: -; VEC-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i32 [[RES]] +; VEC-NEXT: ret i32 [[TMP5]] ; ; INTERLEAVE-LABEL: define i32 @test_iv_uniform_with_outside_use_scev_simplification( ; INTERLEAVE-SAME: ptr [[DST:%.*]]) { @@ -1126,18 +1054,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification(ptr %dst) { ; INTERLEAVE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[E_EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; INTERLEAVE-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[IV]] -; INTERLEAVE-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; INTERLEAVE-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; INTERLEAVE: [[E_EXIT]]: -; INTERLEAVE-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i32 [[RES]] +; INTERLEAVE-NEXT: ret i32 [[TMP5]] ; entry: %step.1 = sext i8 0 to i32 @@ -1187,19 +1105,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(ptr %dst) { ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[TMP5]], i32 1 ; VEC-NEXT: br label %[[E_EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; VEC-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VEC-NEXT: [[INC:%.*]] = add i32 [[IV]], 1 -; VEC-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[INC]] -; VEC-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; VEC-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; VEC: [[E_EXIT]]: -; VEC-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i32 [[RES]] +; VEC-NEXT: ret i32 [[TMP7]] ; ; INTERLEAVE-LABEL: define i32 @test_iv_uniform_with_outside_use_scev_simplification_2( ; INTERLEAVE-SAME: ptr [[DST:%.*]]) { @@ -1224,19 +1131,8 @@ define i32 @test_iv_uniform_with_outside_use_scev_simplification_2(ptr %dst) { ; INTERLEAVE-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], {{!llvm.loop ![0-9]+}} ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[E_EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i32 [[IV]] -; INTERLEAVE-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; INTERLEAVE-NEXT: [[INC:%.*]] = add i32 [[IV]], 1 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add i32 [[STEP_2]], [[INC]] -; INTERLEAVE-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[IV_NEXT]], 8 -; INTERLEAVE-NEXT: br i1 [[CMP_I]], label %[[LOOP]], label %[[E_EXIT]] ; INTERLEAVE: [[E_EXIT]]: -; INTERLEAVE-NEXT: [[RES:%.*]] = phi i32 [ [[IV_NEXT]], %[[LOOP]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i32 [[RES]] +; INTERLEAVE-NEXT: ret i32 [[TMP5]] ; entry: %step.1 = sext i8 0 to i32 @@ -1356,24 +1252,12 @@ define i64 @test_iv_increment_incremented(ptr %dst) { ; VEC-NEXT: [[TMP2:%.*]] = getelementptr i16, ptr [[TMP1]], i32 -1 ; VEC-NEXT: store <2 x i16> splat (i16 1), ptr [[TMP2]], align 2 ; VEC-NEXT: [[TMP5:%.*]] = add i64 1, -1 -; VEC-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 1 +; VEC-NEXT: [[IV_1_NEXT_LCSSA1:%.*]] = add i64 [[TMP5]], 1 ; VEC-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VEC: [[MIDDLE_BLOCK]]: ; VEC-NEXT: br label %[[EXIT:.*]] -; VEC: [[SCALAR_PH:.*]]: -; VEC-NEXT: br label %[[LOOP:.*]] -; VEC: [[LOOP]]: -; VEC-NEXT: [[IV_1:%.*]] = phi i64 [ 3, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[IV_2:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; VEC-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV_1]] -; VEC-NEXT: store i16 1, ptr [[GEP]], align 2 -; VEC-NEXT: [[IV_2_NEXT]] = add i64 [[IV_2]], -1 -; VEC-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_2_NEXT]], 0 -; VEC-NEXT: [[IV_1_NEXT]] = add i64 [[IV_2_NEXT]], 1 -; VEC-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VEC: [[EXIT]]: -; VEC-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_1_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; VEC-NEXT: ret i64 [[IV_1_NEXT_LCSSA]] +; VEC-NEXT: ret i64 [[IV_1_NEXT_LCSSA1]] ; ; INTERLEAVE-LABEL: define i64 @test_iv_increment_incremented( ; INTERLEAVE-SAME: ptr [[DST:%.*]]) { @@ -1387,24 +1271,12 @@ define i64 @test_iv_increment_incremented(ptr %dst) { ; INTERLEAVE-NEXT: store i16 1, ptr [[TMP0]], align 2 ; INTERLEAVE-NEXT: store i16 1, ptr [[TMP1]], align 2 ; INTERLEAVE-NEXT: [[TMP2:%.*]] = add i64 1, -1 -; INTERLEAVE-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1 +; INTERLEAVE-NEXT: [[IV_1_NEXT_LCSSA1:%.*]] = add i64 [[TMP2]], 1 ; INTERLEAVE-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV_1:%.*]] = phi i64 [ 3, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[IV_2:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV_1]] -; INTERLEAVE-NEXT: store i16 1, ptr [[GEP]], align 2 -; INTERLEAVE-NEXT: [[IV_2_NEXT]] = add i64 [[IV_2]], -1 -; INTERLEAVE-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_2_NEXT]], 0 -; INTERLEAVE-NEXT: [[IV_1_NEXT]] = add i64 [[IV_2_NEXT]], 1 -; INTERLEAVE-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: -; INTERLEAVE-NEXT: [[IV_1_NEXT_LCSSA:%.*]] = phi i64 [ [[IV_1_NEXT]], %[[LOOP]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; INTERLEAVE-NEXT: ret i64 [[IV_1_NEXT_LCSSA]] +; INTERLEAVE-NEXT: ret i64 [[IV_1_NEXT_LCSSA1]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll index 11d48df8b8aaa..9358fd9cc8440 100644 --- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll +++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll @@ -48,29 +48,9 @@ define i16 @test_access_size_not_multiple_of_align(i64 %len, ptr %test_base) { ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP17:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP15]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i16, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i16 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i16 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i16 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i16 [[TMP17]] ; entry: %alloca = alloca [163840 x i16], align 4 @@ -142,29 +122,9 @@ define i32 @test_access_size_multiple_of_align_but_offset_by_1(i64 %len, ptr %te ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP15]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i32, ptr [[START]], i64 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ADDR]], align 4 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i32 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP17]] ; entry: %alloca = alloca [163840 x i32], align 4 @@ -370,26 +330,7 @@ define void @test_rev_loops_deref_loads(ptr nocapture noundef writeonly %dest) { ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1023, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[IV]] -; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP19]], 3 -; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[IV]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP20]], 2 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[IV]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: exit: ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false) ; CHECK-NEXT: ret void @@ -481,27 +422,7 @@ define void @test_rev_loops_non_deref_loads(ptr nocapture noundef writeonly %des ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1023, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[OFF:%.*]] = add i64 [[IV]], -1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[OFF]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP22]], 3 -; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[OFF]] -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP23]], 2 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[OFF]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: exit: ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false) ; CHECK-NEXT: ret void @@ -574,30 +495,9 @@ define i16 @test_strided_access(i64 %len, ptr %test_base) { ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP15:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP13]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, ptr [[TEST_BASE]], i64 [[IV]] -; CHECK-NEXT: [[L_T:%.*]] = load i8, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i8 [[L_T]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[IV_STRIDE:%.*]] = mul i64 [[IV]], 2 -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr inbounds i16, ptr [[ALLOCA]], i64 [[IV_STRIDE]] -; CHECK-NEXT: [[VAL:%.*]] = load i16, ptr [[ADDR]], align 2 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i16 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i16 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp eq i64 [[IV]], 4095 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i16 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP15]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i16 [[TMP15]] ; entry: %alloca = alloca [163840 x i16], align 4 @@ -681,27 +581,7 @@ define void @test_rev_loops_strided_deref_loads(ptr nocapture noundef writeonly ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 511, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[IV]] -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP21]], 3 -; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[IV_STRIDED:%.*]] = mul i64 [[IV]], 2 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[IV_STRIDED]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP22]], 2 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[IV]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[CMP2_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT]], label [[EXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: exit: ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DEST:%.*]], ptr [[LOCAL_DEST]], i64 1024, i1 false) ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll b/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll index b224534720a2d..b14a1cdff92c2 100644 --- a/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll +++ b/llvm/test/Transforms/LoopVectorize/load-deref-pred-neg-off.ll @@ -52,28 +52,9 @@ define i8 @test_negative_off(i16 %len, ptr %test_base) { ; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP20:%.*]] = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> [[TMP18]]) -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ -1000, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] -; CHECK-NEXT: [[ACCUM:%.*]] = phi i8 [ 0, [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LATCH]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i16 [[IV]] -; CHECK-NEXT: [[EARLYCND:%.*]] = load i1, ptr [[TEST_ADDR]], align 1 -; CHECK-NEXT: br i1 [[EARLYCND]], label [[PRED:%.*]], label [[LATCH]] -; CHECK: pred: -; CHECK-NEXT: [[ADDR:%.*]] = getelementptr i8, ptr [[ALLOCA]], i16 [[IV]] -; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[ADDR]], align 1 -; CHECK-NEXT: br label [[LATCH]] -; CHECK: latch: -; CHECK-NEXT: [[VAL_PHI:%.*]] = phi i8 [ 0, [[LOOP]] ], [ [[VAL]], [[PRED]] ] -; CHECK-NEXT: [[ACCUM_NEXT]] = add i8 [[ACCUM]], [[VAL_PHI]] -; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i16 [[IV]], -990 -; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]] +; CHECK-NEXT: br label [[LATCH:%.*]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i8 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP20]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i8 [[ACCUM_NEXT_LCSSA]] +; CHECK-NEXT: ret i8 [[TMP20]] ; entry: %alloca = alloca [64638 x i8] diff --git a/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll index f44fc4e5568b1..096a0a87cbb8a 100644 --- a/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/load-of-struct-deref-pred.ll @@ -30,28 +30,6 @@ define void @accesses_to_struct_dereferenceable(ptr noalias %dst) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[D]], 0 -; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 0, i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: if.else: -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 1, i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ [[L_A]], [[IF_THEN]] ], [ [[L_B]], [[IF_ELSE]] ] -; CHECK-NEXT: store i32 [[TMP_0]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 32000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -265,29 +243,6 @@ define void @accesses_to_struct_may_not_be_dereferenceable_access_size(ptr noali ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: [[D:%.*]] = load i32, ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i32 [[D]], 0 -; CHECK-NEXT: br i1 [[CMP3]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 0, i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: if.else: -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr @foo, i64 0, i32 1, i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i64, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[T:%.*]] = trunc i64 [[L_B]] to i32 -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ [[L_A]], [[IF_THEN]] ], [ [[T]], [[IF_ELSE]] ] -; CHECK-NEXT: store i32 [[TMP_0]], ptr [[GEP_DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 32000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll index c589c77895353..aed1e2920bbdc 100644 --- a/llvm/test/Transforms/LoopVectorize/loop-form.ll +++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll @@ -79,17 +79,7 @@ define void @bottom_tested(ptr %p, i32 %n) { ; TAILFOLD-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; TAILFOLD-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TAILFOLD: middle.block: -; TAILFOLD-NEXT: br label [[IF_END:%.*]] -; TAILFOLD: scalar.ph: ; TAILFOLD-NEXT: br label [[FOR_COND:%.*]] -; TAILFOLD: for.cond: -; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[FOR_COND]] ] -; TAILFOLD-NEXT: [[IPROM:%.*]] = sext i32 [[I]] to i64 -; TAILFOLD-NEXT: [[B:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IPROM]] -; TAILFOLD-NEXT: store i16 0, ptr [[B]], align 4 -; TAILFOLD-NEXT: [[INC]] = add nsw i32 [[I]], 1 -; TAILFOLD-NEXT: [[CMP:%.*]] = icmp slt i32 [[I]], [[N]] -; TAILFOLD-NEXT: br i1 [[CMP]], label [[FOR_COND]], label [[IF_END]] ; TAILFOLD: if.end: ; TAILFOLD-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll b/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll index 781980dce87b3..1fe802f9e1093 100644 --- a/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll +++ b/llvm/test/Transforms/LoopVectorize/make-followup-loop-id.ll @@ -32,17 +32,6 @@ define void @scalar_loop_dead(ptr noundef captures(none) %a, float noundef %x) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X]], [[LOAD]] -; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -83,7 +72,7 @@ define void @scalar_loop_live(ptr noundef captures(none) %a, float noundef %x, i ; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP0]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -98,7 +87,7 @@ define void @scalar_loop_live(ptr noundef captures(none) %a, float noundef %x, i ; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -128,9 +117,6 @@ exit: ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized"} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.count", i32 8} ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]], [[META6:![0-9]+]]} -; CHECK: [[META5]] = !{!"llvm.loop.vectorize.enable", i1 true} -; CHECK: [[META6]] = !{!"llvm.loop.vectorize.followup_all", [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll index bb5199208ba15..30ee4803de607 100644 --- a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll @@ -69,19 +69,7 @@ define void @maxvf3() { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[J]] -; CHECK-NEXT: store i8 69, ptr [[AJ]], align 8 -; CHECK-NEXT: [[JP3:%.*]] = add nuw nsw i32 3, [[J]] -; CHECK-NEXT: [[AJP3:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[JP3]] -; CHECK-NEXT: store i8 7, ptr [[AJP3]], align 8 -; CHECK-NEXT: [[J_NEXT]] = add nuw nsw i32 [[J]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[J_NEXT]], 15 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/metadata.ll b/llvm/test/Transforms/LoopVectorize/metadata.ll index e2dadff3e985b..3c59a279e077d 100644 --- a/llvm/test/Transforms/LoopVectorize/metadata.ll +++ b/llvm/test/Transforms/LoopVectorize/metadata.ll @@ -142,18 +142,6 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[CHAR_TBAA0]], !range [[RNG9:![0-9]+]] -; CHECK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -180,18 +168,6 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[CHAR_TBAA0]], !range [[RNG9:![0-9]+]] -; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -229,21 +205,9 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; CHECK-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -267,21 +231,9 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8 ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -319,21 +271,9 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; CHECK-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -357,21 +297,9 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP7]], align 4 ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4 -; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -409,21 +337,9 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) { ; CHECK-NEXT: store <2 x double> [[TMP1]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; CHECK-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -447,21 +363,9 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) { ; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8 ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: br label %[[EXIT:.*]] -; INTERLEAVE: [[SCALAR_PH:.*]]: -; INTERLEAVE-NEXT: br label %[[LOOP:.*]] -; INTERLEAVE: [[LOOP]]: -; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]] -; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[CHAR_TBAA0]] -; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]] -; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] -; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8 -; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -506,7 +410,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[TMP3]], splat (i32 2) ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -522,7 +426,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; CHECK-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]] ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]] ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]], !custom_md [[META2]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP13:![0-9]+]], !custom_md [[META2]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -555,7 +459,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; INTERLEAVE-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2) ; INTERLEAVE-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[STEP_ADD3]], splat (i32 2) ; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; INTERLEAVE: [[MIDDLE_BLOCK]]: ; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -571,7 +475,7 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) { ; INTERLEAVE-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]] ; INTERLEAVE-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]] ; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]] -; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]], !custom_md [[META2]] +; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP13:![0-9]+]], !custom_md [[META2]] ; INTERLEAVE: [[EXIT]]: ; INTERLEAVE-NEXT: ret void ; @@ -617,12 +521,11 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_ ; CHECK: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]} ; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]} -; CHECK: [[RNG9]] = !{i64 0, i64 2} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META5]], [[META6]]} ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META5]], [[META6]]} ; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]} ; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META5]], [[META6]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]} -; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META6]], [[META5]]} ;. ; INTERLEAVE: [[CHAR_TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0, i64 0} ; INTERLEAVE: [[META1]] = !{!"omnipotent char", [[META2]]} @@ -633,10 +536,9 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_ ; INTERLEAVE: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"} ; INTERLEAVE: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]} ; INTERLEAVE: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]} -; INTERLEAVE: [[RNG9]] = !{i64 0, i64 2} +; INTERLEAVE: [[LOOP9]] = distinct !{[[LOOP9]], [[META5]], [[META6]]} ; INTERLEAVE: [[LOOP10]] = distinct !{[[LOOP10]], [[META5]], [[META6]]} ; INTERLEAVE: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]} ; INTERLEAVE: [[LOOP12]] = distinct !{[[LOOP12]], [[META5]], [[META6]]} -; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]} -; INTERLEAVE: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]} +; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META6]], [[META5]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll b/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll index 7866728168888..47a2a84b44601 100644 --- a/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/minimumnum-maximumnum-reductions.ll @@ -26,20 +26,8 @@ define float @maximumnum_intrinsic(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call float @llvm.maximumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop @@ -82,20 +70,8 @@ define float @maximumnum_intrinsic_fast(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call fast <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmax.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call fast float @llvm.maximumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop @@ -138,20 +114,8 @@ define float @minimumnum_intrinsic(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call float @llvm.minimumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop @@ -194,20 +158,8 @@ define float @minimumnum_intrinsic_fast(ptr readonly %x) { ; CHECK-NEXT: [[RDX_MINMAX:%.*]] = call fast <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[TMP3]], <2 x float> [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fmin.v2f32(<2 x float> [[RDX_MINMAX]]) ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[IV1]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP1]], align 4 -; CHECK-NEXT: [[RED_NEXT]] = tail call fast float @llvm.minimumnum.f32(float [[RED]], float [[L]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[IV1]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RED_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[TMP6]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll index 2e88ff6e99fdf..a1fc1b8f34ff3 100644 --- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll +++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll @@ -34,10 +34,6 @@ define i32 @main() #0 { ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll index d928a4b7ebe4b..b19f9c5a3b60d 100644 --- a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll @@ -12,14 +12,7 @@ define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; entry: @@ -55,14 +48,7 @@ define void @sincos_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noali ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincos.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; entry: @@ -91,9 +77,9 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly ; CHECK-LABEL: define void @predicated_sincos( ; CHECK-SAME: float [[X:%.*]], ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { ; CHECK: [[ENTRY:.*:]] -; CHECK: [[VECTOR_BODY1:.*]]: -; CHECK: [[VECTOR_BODY:.*:]] -; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY1]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN2:.*]] ] +; CHECK: [[VECTOR_BODY:.*]]: +; CHECK: [[VECTOR_BODY1:.*:]] +; CHECK: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_BODY]] ], [ [[INDEX_NEXT:%.*]], %[[IF_THEN1:.*]] ] ; CHECK: [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> [[WIDE_LOAD:%.*]]) ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0 ; CHECK: [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1 @@ -107,23 +93,14 @@ define void @predicated_sincos(float %x, ptr noalias %in, ptr noalias writeonly ; CHECK: br label %[[PRED_STORE_CONTINUE]] ; CHECK: [[PRED_STORE_CONTINUE]]: ; CHECK: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1 -; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN2]] +; CHECK: br i1 [[TMP12]], label %[[PRED_STORE_IF1:.*]], label %[[IF_THEN1]] ; CHECK: [[PRED_STORE_IF1]]: ; CHECK: [[TMP15:%.*]] = extractelement <2 x float> [[TMP5]], i32 1 ; CHECK: store float [[TMP15]], ptr [[TMP14:%.*]], align 4 ; CHECK: [[TMP17:%.*]] = extractelement <2 x float> [[TMP6]], i32 1 ; CHECK: store float [[TMP17]], ptr [[TMP16:%.*]], align 4 -; CHECK: br label %[[IF_THEN2]] -; CHECK: [[IF_THEN2]]: -; CHECK: [[IF_THEN:.*:]] -; CHECK: [[IF_THEN3:.*:]] -; CHECK: [[IF_THEN4:.*:]] -; CHECK: [[IF_THEN1:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincos.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 +; CHECK: br label %[[IF_THEN1]] +; CHECK: [[IF_THEN1]]: ; CHECK: [[IF_MERGE:.*:]] ; CHECK: [[FOR_END:.*:]] ; @@ -167,14 +144,7 @@ define void @modf_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.modf.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; entry: @@ -210,14 +180,7 @@ define void @modf_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.modf.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; entry: @@ -253,14 +216,7 @@ define void @sincospi_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP3]], 1 ; CHECK: store <2 x float> [[TMP4]], ptr [[TMP7:%.*]], align 4 ; CHECK: store <2 x float> [[TMP5]], ptr [[TMP9:%.*]], align 4 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { float, float } @llvm.sincospi.f32(float [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 -; CHECK: store float [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 4 -; CHECK: store float [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 4 ; CHECK: [[EXIT:.*:]] ; entry: @@ -296,14 +252,7 @@ define void @sincospi_f64(ptr noalias %in, ptr noalias writeonly %out_a, ptr noa ; CHECK: [[TMP5:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP3]], 1 ; CHECK: store <2 x double> [[TMP4]], ptr [[TMP7:%.*]], align 8 ; CHECK: store <2 x double> [[TMP5]], ptr [[TMP9:%.*]], align 8 -; CHECK: [[MIDDLE_BLOCK:.*:]] -; CHECK: [[SCALAR_PH:.*:]] ; CHECK: [[FOR_BODY:.*:]] -; CHECK: [[CALL:%.*]] = tail call { double, double } @llvm.sincospi.f64(double [[IN_VAL:%.*]]) -; CHECK: [[EXTRACT_A:%.*]] = extractvalue { double, double } [[CALL]], 0 -; CHECK: [[EXTRACT_B:%.*]] = extractvalue { double, double } [[CALL]], 1 -; CHECK: store double [[EXTRACT_A]], ptr [[ARRAYIDX2:%.*]], align 8 -; CHECK: store double [[EXTRACT_B]], ptr [[ARRAYIDX4:%.*]], align 8 ; CHECK: [[EXIT:.*:]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll index 6cdd154f0e00e..8525b3aa5d349 100644 --- a/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll @@ -240,11 +240,9 @@ define void @pr52024(ptr %dst, i16 %N) { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = mul i16 24, [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = mul <2 x i16> splat (i16 2), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <2 x i16> poison, i16 [[REM_TRUNC]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT3]], <2 x i16> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <2 x i16> poison, i16 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <2 x i16> poison, i16 [[REM_TRUNC]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT5]], <2 x i16> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = mul <2 x i16> , [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP7:%.*]] = mul <2 x i16> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i16> zeroinitializer, [[TMP7]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -252,8 +250,8 @@ define void @pr52024(ptr %dst, i16 %N) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i16> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i16> [[VEC_IND]], [[TMP6]] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 8, [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = sub <2 x i16> [[VEC_IND]], [[BROADCAST_SPLAT4]] -; CHECK-NEXT: [[TMP9:%.*]] = sub <2 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[TMP8:%.*]] = sub <2 x i16> [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP9:%.*]] = sub <2 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT6]] ; CHECK-NEXT: [[TMP10:%.*]] = zext <2 x i16> [[TMP8]] to <2 x i32> ; CHECK-NEXT: [[TMP11:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i32> ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[DST]], i32 [[OFFSET_IDX]] diff --git a/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll b/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll index 9b6774e3d63fe..481fa04cf7164 100644 --- a/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll +++ b/llvm/test/Transforms/LoopVectorize/noalias-scope-decl.ll @@ -26,20 +26,6 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[TMP7]], 1.000000e+02 -; CHECK-NEXT: tail call void @llvm.experimental.noalias.scope.decl(metadata [[META0]]) -; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP7]], 1.000000e+00 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 1599 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization-liveout.ll b/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization-liveout.ll index ee74f2225a425..18803e71f1041 100644 --- a/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization-liveout.ll +++ b/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization-liveout.ll @@ -41,9 +41,8 @@ define signext i32 @f1(ptr noalias %A, ptr noalias %B, i32 signext %n) { ; VF-TWO-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-TWO-CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; VF-TWO-CHECK: vec.epilog.iter.check: -; VF-TWO-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 -; VF-TWO-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 +; VF-TWO-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; VF-TWO-CHECK: vec.epilog.ph: ; VF-TWO-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; VF-TWO-CHECK-NEXT: [[N_MOD_VF3:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 2 @@ -58,7 +57,7 @@ define signext i32 @f1(ptr noalias %A, ptr noalias %B, i32 signext %n) { ; VF-TWO-CHECK-NEXT: [[TMP13:%.*]] = add nsw <2 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD8]] ; VF-TWO-CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX6]], 2 ; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC4]] -; VF-TWO-CHECK-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; VF-TWO-CHECK-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF-TWO-CHECK: vec.epilog.middle.block: ; VF-TWO-CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i32> [[TMP13]], i32 1 ; VF-TWO-CHECK-NEXT: [[CMP_N5:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC4]] @@ -75,7 +74,7 @@ define signext i32 @f1(ptr noalias %A, ptr noalias %B, i32 signext %n) { ; VF-TWO-CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] ; VF-TWO-CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VF-TWO-CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] -; VF-TWO-CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]], !llvm.loop [[LOOP4:![0-9]+]] +; VF-TWO-CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; VF-TWO-CHECK: for.end.loopexit: ; VF-TWO-CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ] ; VF-TWO-CHECK-NEXT: br label [[FOR_END]] diff --git a/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll index ce77811e81562..1319d068145a8 100644 --- a/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/optimal-epilog-vectorization.ll @@ -45,8 +45,7 @@ define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -189,8 +188,7 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[IND_END4:%.*]] = trunc i64 [[N_VEC]] to i32 -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -220,11 +218,11 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; CHECK-NEXT: br i1 [[CMP_N6]], label %[[FOR_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK: [[VEC_EPILOG_SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL9:%.*]] = phi i32 [ [[IND_END]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END4]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[BC_RESUME_VAL5]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[BC_RESUME_VAL9]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INC:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[TMP32:%.*]] = xor i32 [[I_014]], -1 ; CHECK-NEXT: [[SUB2:%.*]] = add i32 [[TMP32]], [[N]] ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[SUB2]] to i64 @@ -327,8 +325,7 @@ define void @f3(ptr noalias %A, i64 %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -383,8 +380,7 @@ define void @f3(ptr noalias %A, i64 %n) { ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_PH]]: ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -505,12 +501,12 @@ define void @induction_resume_value_requires_non_trivial_scev_expansion(ptr %dst ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: br i1 true, label %[[OUTER_LATCH]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ 85, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 85, %[[VEC_EPILOG_ITER_CHECK]] ], [ 1, %[[ITER_CHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i8 [ [[IND_END4]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL14:%.*]] = phi i64 [ 85, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 85, %[[VEC_EPILOG_ITER_CHECK]] ], [ 1, %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL15:%.*]] = phi i8 [ [[IND_END4]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: br label %[[INNER:.*]] ; CHECK: [[INNER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[INNER]] ] -; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL6]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[INNER]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL14]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[INNER]] ] +; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL15]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[INNER]] ] ; CHECK-NEXT: [[IV_2_NEXT]] = sub i8 [[IV_2]], [[TRUNC_ADD]] ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i8 [[IV_2]], ptr [[GEP_DST]], align 1 @@ -592,12 +588,12 @@ define void @induction_resume_value_requires_non_trivial_scev_expansion(ptr %dst ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br i1 true, label %[[OUTER_LATCH]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ 85, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 85, %[[VEC_EPILOG_ITER_CHECK]] ], [ 1, %[[ITER_CHECK]] ] -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i8 [ [[IND_END4]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[BC_RESUME_VAL14:%.*]] = phi i64 [ 85, %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ 85, %[[VEC_EPILOG_ITER_CHECK]] ], [ 1, %[[ITER_CHECK]] ] +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[BC_RESUME_VAL15:%.*]] = phi i8 [ [[IND_END4]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br label %[[INNER:.*]] ; CHECK-PROFITABLE-BY-DEFAULT: [[INNER]]: -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[INNER]] ] -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL6]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[INNER]] ] +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL14]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[INNER]] ] +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL15]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[INNER]] ] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV_2_NEXT]] = sub i8 [[IV_2]], [[TRUNC_ADD]] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: store i8 [[IV_2]], ptr [[GEP_DST]], align 1 @@ -665,8 +661,7 @@ define void @f4(ptr noalias %A, i32 signext %n) { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -691,10 +686,10 @@ define void @f4(ptr noalias %A, i32 signext %n) { ; CHECK-NEXT: [[CMP_N5:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC3]] ; CHECK-NEXT: br i1 [[CMP_N5]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i64 [ [[N_VEC3]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL9:%.*]] = phi i64 [ [[N_VEC3]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL4]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL9]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[TMP11:%.*]] = trunc i64 [[IV]] to i32 ; CHECK-NEXT: [[CONV:%.*]] = trunc i32 [[TMP11]] to i8 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] @@ -732,8 +727,7 @@ define void @f4(ptr noalias %A, i32 signext %n) { ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 2 ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_PH]]: ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -758,10 +752,10 @@ define void @f4(ptr noalias %A, i32 signext %n) { ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[CMP_N5:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC3]] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br i1 [[CMP_N5]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK-PROFITABLE-BY-DEFAULT: [[VEC_EPILOG_SCALAR_PH]]: -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i64 [ [[N_VEC3]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[BC_RESUME_VAL9:%.*]] = phi i64 [ [[N_VEC3]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: br label %[[LOOP:.*]] ; CHECK-PROFITABLE-BY-DEFAULT: [[LOOP]]: -; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL4]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL9]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[TMP11:%.*]] = trunc i64 [[IV]] to i32 ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[CONV:%.*]] = trunc i32 [[TMP11]] to i8 ; CHECK-PROFITABLE-BY-DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll index 819cfaadeecbf..9f82795e1f71c 100644 --- a/llvm/test/Transforms/LoopVectorize/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/optsize.ll @@ -273,19 +273,8 @@ define void @pr43371() optsize { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY29:.*]] ; CHECK: [[FOR_COND_CLEANUP28]]: ; CHECK-NEXT: unreachable -; CHECK: [[FOR_BODY29]]: -; CHECK-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; CHECK-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; CHECK-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; CHECK-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; CHECK-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; CHECK-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; PGSO-LABEL: define void @pr43371( ; PGSO-SAME: ) #[[ATTR0]] { @@ -310,19 +299,8 @@ define void @pr43371() optsize { ; PGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; PGSO: [[SCALAR_PH:.*]]: -; PGSO-NEXT: br label %[[FOR_BODY29:.*]] ; PGSO: [[FOR_COND_CLEANUP28]]: ; PGSO-NEXT: unreachable -; PGSO: [[FOR_BODY29]]: -; PGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; PGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; PGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; PGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; PGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; PGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; PGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; PGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; NPGSO-LABEL: define void @pr43371( ; NPGSO-SAME: ) #[[ATTR0]] { @@ -347,19 +325,8 @@ define void @pr43371() optsize { ; NPGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; NPGSO: [[SCALAR_PH:.*]]: -; NPGSO-NEXT: br label %[[FOR_BODY29:.*]] ; NPGSO: [[FOR_COND_CLEANUP28]]: ; NPGSO-NEXT: unreachable -; NPGSO: [[FOR_BODY29]]: -; NPGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; NPGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; NPGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; NPGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; NPGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; NPGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; NPGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; NPGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; We do not want to generate SCEV predicates when optimising for size, because ; that will lead to extra code generation such as the SCEV overflow runtime @@ -407,19 +374,8 @@ define void @pr43371_pgso() !prof !14 { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY29:.*]] ; CHECK: [[FOR_COND_CLEANUP28]]: ; CHECK-NEXT: unreachable -; CHECK: [[FOR_BODY29]]: -; CHECK-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; CHECK-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; CHECK-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; CHECK-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; CHECK-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; CHECK-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; CHECK-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; PGSO-LABEL: define void @pr43371_pgso( ; PGSO-SAME: ) !prof [[PROF14]] { @@ -444,19 +400,8 @@ define void @pr43371_pgso() !prof !14 { ; PGSO-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_COND_CLEANUP28:.*]] -; PGSO: [[SCALAR_PH:.*]]: -; PGSO-NEXT: br label %[[FOR_BODY29:.*]] ; PGSO: [[FOR_COND_CLEANUP28]]: ; PGSO-NEXT: unreachable -; PGSO: [[FOR_BODY29]]: -; PGSO-NEXT: [[I24_0170:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[INC37:%.*]], %[[FOR_BODY29]] ] -; PGSO-NEXT: [[ADD33:%.*]] = add i16 undef, [[I24_0170]] -; PGSO-NEXT: [[IDXPROM34:%.*]] = zext i16 [[ADD33]] to i32 -; PGSO-NEXT: [[ARRAYIDX35:%.*]] = getelementptr [2592 x i16], ptr @cm_array, i32 0, i32 [[IDXPROM34]] -; PGSO-NEXT: store i16 0, ptr [[ARRAYIDX35]], align 1 -; PGSO-NEXT: [[INC37]] = add i16 [[I24_0170]], 1 -; PGSO-NEXT: [[CMP26:%.*]] = icmp ult i16 [[INC37]], 756 -; PGSO-NEXT: br i1 [[CMP26]], label %[[FOR_BODY29]], label %[[FOR_COND_CLEANUP28]] ; ; NPGSO-LABEL: define void @pr43371_pgso( ; NPGSO-SAME: ) !prof [[PROF14]] { @@ -686,16 +631,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] -; CHECK-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] -; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -734,16 +669,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; PGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[FOR_END:.*]] -; PGSO: [[SCALAR_PH:.*]]: -; PGSO-NEXT: br label %[[FOR_BODY:.*]] -; PGSO: [[FOR_BODY]]: -; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; PGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] -; PGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] -; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 -; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; PGSO: [[FOR_END]]: ; PGSO-NEXT: ret void ; @@ -782,16 +707,6 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; NPGSO-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[FOR_END:.*]] -; NPGSO: [[SCALAR_PH:.*]]: -; NPGSO-NEXT: br label %[[FOR_BODY:.*]] -; NPGSO: [[FOR_BODY]]: -; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; NPGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] -; NPGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] -; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 -; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; NPGSO: [[FOR_END]]: ; NPGSO-NEXT: ret void ; @@ -830,7 +745,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; CHECK-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: @@ -843,7 +758,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -862,7 +777,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; PGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; PGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; PGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; PGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; PGSO: [[MIDDLE_BLOCK]]: ; PGSO-NEXT: br label %[[SCALAR_PH]] ; PGSO: [[SCALAR_PH]]: @@ -875,7 +790,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; PGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; PGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; PGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; PGSO: [[FOR_END]]: ; PGSO-NEXT: ret void ; @@ -894,7 +809,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; NPGSO-NEXT: store <2 x i16> splat (i16 42), ptr [[TMP1]], align 4 ; NPGSO-NEXT: [[INDEX_NEXT]] = add nuw i32 [[TMP0]], 2 ; NPGSO-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024 -; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; NPGSO-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: br label %[[SCALAR_PH]] ; NPGSO: [[SCALAR_PH]]: @@ -907,7 +822,7 @@ define void @stride1_pgso(ptr noalias %B, i32 %BStride) !prof !14 { ; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 ; NPGSO-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; NPGSO-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1025 -; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; NPGSO-NEXT: br i1 [[EXITCOND]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; NPGSO: [[FOR_END]]: ; NPGSO-NEXT: ret void ; @@ -1092,10 +1007,8 @@ exit: ; CHECK: [[META17]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]} ; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]} -; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META21:![0-9]+]]} -; CHECK: [[META21]] = !{!"llvm.loop.vectorize.enable", i1 true} -; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} -; CHECK: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]} +; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} +; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} ;. ; PGSO: [[PROF14]] = !{!"function_entry_count", i64 0} ; PGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]} @@ -1103,10 +1016,8 @@ exit: ; PGSO: [[META17]] = !{!"llvm.loop.unroll.runtime.disable"} ; PGSO: [[LOOP18]] = distinct !{[[LOOP18]], [[META16]], [[META17]]} ; PGSO: [[LOOP19]] = distinct !{[[LOOP19]], [[META16]], [[META17]]} -; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META21:![0-9]+]]} -; PGSO: [[META21]] = !{!"llvm.loop.vectorize.enable", i1 true} -; PGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} -; PGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META16]]} +; PGSO: [[LOOP20]] = distinct !{[[LOOP20]], [[META16]], [[META17]]} +; PGSO: [[LOOP21]] = distinct !{[[LOOP21]], [[META16]]} ;. ; NPGSO: [[PROF14]] = !{!"function_entry_count", i64 0} ; NPGSO: [[LOOP15]] = distinct !{[[LOOP15]], [[META16:![0-9]+]], [[META17:![0-9]+]]} @@ -1119,8 +1030,6 @@ exit: ; NPGSO: [[LOOP22]] = distinct !{[[LOOP22]], [[META16]], [[META17]]} ; NPGSO: [[LOOP23]] = distinct !{[[LOOP23]], [[META17]], [[META16]]} ; NPGSO: [[LOOP24]] = distinct !{[[LOOP24]], [[META16]], [[META17]]} -; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META26:![0-9]+]]} -; NPGSO: [[META26]] = !{!"llvm.loop.vectorize.enable", i1 true} -; NPGSO: [[LOOP27]] = distinct !{[[LOOP27]], [[META16]], [[META17]]} -; NPGSO: [[LOOP28]] = distinct !{[[LOOP28]], [[META16]]} +; NPGSO: [[LOOP25]] = distinct !{[[LOOP25]], [[META16]], [[META17]]} +; NPGSO: [[LOOP26]] = distinct !{[[LOOP26]], [[META16]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/phi-cost.ll b/llvm/test/Transforms/LoopVectorize/phi-cost.ll index bf5631c783fe9..7b5d0b69639fa 100644 --- a/llvm/test/Transforms/LoopVectorize/phi-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/phi-cost.ll @@ -185,13 +185,9 @@ define i32 @red_phi_0(i32 %start, ptr %src) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 -; CHECK-NEXT: br i1 [[TMP1]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: br i1 [[TMP1]], label %[[SCALAR_PH1:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[SCALAR_PH1]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH1:.*:]] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: br i1 poison, label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> , i32 [[START]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[TMP0]]) diff --git a/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll b/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll index a25632562009c..f2d6834c91d53 100644 --- a/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll +++ b/llvm/test/Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll @@ -29,22 +29,6 @@ define void @pr154045(ptr %p, i1 %c, i64 %x) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: [[REM:%.*]] = srem i64 0, [[X]] -; CHECK-NEXT: br label %[[LATCH]] -; CHECK: [[LATCH]]: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[REM]], %[[ELSE]] ], [ 0, %[[LOOP]] ] -; CHECK-NEXT: [[PHI_TRUNC:%.*]] = trunc i64 [[PHI]] to i32 -; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[PHI_TRUNC]], 0 -; CHECK-NEXT: store i32 [[SHL]], ptr [[P]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/pr32859.ll b/llvm/test/Transforms/LoopVectorize/pr32859.ll index a29a6bd735feb..2d30e0c9ad10f 100644 --- a/llvm/test/Transforms/LoopVectorize/pr32859.ll +++ b/llvm/test/Transforms/LoopVectorize/pr32859.ll @@ -10,7 +10,7 @@ ; CHECK: %e.0.ph = phi i32 [ 0, %if.end.2.i ], [ 0, %middle.block ] ; Function Attrs: nounwind uwtable -define void @main() #0 { +define void @main(i32 %n) #0 { entry: br label %for.cond1.preheader.i @@ -21,7 +21,7 @@ for.cond1.preheader.i: ; preds = %if.end.2.i, %entry if.end.2.i: ; preds = %for.cond1.preheader.i %inc5.i = add nsw i32 %c.06.i, 1 - %cmp.i = icmp slt i32 %inc5.i, 16 + %cmp.i = icmp slt i32 %inc5.i, %n br i1 %cmp.i, label %for.cond1.preheader.i, label %for.cond.preheader for.cond.preheader: ; preds = %if.end.2.i diff --git a/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll b/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll index b0e2ae6524491..98963a72c5ad0 100644 --- a/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll +++ b/llvm/test/Transforms/LoopVectorize/pr36983-multiple-lcssa.ll @@ -20,18 +20,8 @@ define i16 @duplicate_lcssa(i16 %val) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI1:%.*]] = extractelement <4 x i16> [[TMP0]], i32 2 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ [[VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = sub nsw i16 [[IV]], 1 -; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i16 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[LCSSA_1:%.*]] = phi i16 [ [[RES]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI1]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[LCSSA_2:%.*]] = phi i16 [ [[RES]], %[[LOOP]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI1]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[LCSSA_2]] +; CHECK-NEXT: ret i16 [[VECTOR_RECUR_EXTRACT_FOR_PHI1]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll b/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll index d1b912d47a0ce..a1cb361d20bee 100644 --- a/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/pr44488-predication.ll @@ -43,26 +43,7 @@ define i16 @test_true_and_false_branch_equal() { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 12 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_07:%.*]] = phi i16 [ 99, [[SCALAR_PH:%.*]] ], [ [[INC7:%.*]], [[FOR_LATCH:%.*]] ] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr @v_38, align 1 -; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i16 [[LV]], 32767 -; CHECK-NEXT: br i1 [[CMP1]], label [[COND_END:%.*]], label [[COND_END]] -; CHECK: cond.end: -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i16 [[LV]], 0 -; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_LATCH]], label [[COND_FALSE4:%.*]] -; CHECK: cond.false4: -; CHECK-NEXT: [[REM:%.*]] = srem i16 5786, [[LV]] -; CHECK-NEXT: br label [[FOR_LATCH]] -; CHECK: for.latch: -; CHECK-NEXT: [[COND6:%.*]] = phi i16 [ [[REM]], [[COND_FALSE4]] ], [ 5786, [[COND_END]] ] -; CHECK-NEXT: store i16 [[COND6]], ptr @v_39, align 1 -; CHECK-NEXT: [[INC7]] = add nsw i16 [[I_07]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[INC7]], 111 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]] +; CHECK-NEXT: br label [[FOR_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: [[RV:%.*]] = load i16, ptr @v_39, align 1 ; CHECK-NEXT: ret i16 [[RV]] diff --git a/llvm/test/Transforms/LoopVectorize/pr45259.ll b/llvm/test/Transforms/LoopVectorize/pr45259.ll index fade7264f6494..f33437fd8ebde 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45259.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45259.ll @@ -10,16 +10,15 @@ define i8 @widget(ptr %arr, i8 %t9) { ; CHECK-NEXT: br label [[BB6:%.*]] ; CHECK: bb6: ; CHECK-NEXT: [[T1_0:%.*]] = phi ptr [ [[ARR]], [[BB:%.*]] ], [ null, [[BB6]] ] +; CHECK-NEXT: [[T1_0_LCSSA2:%.*]] = ptrtoint ptr [[T1_0]] to i64 ; CHECK-NEXT: [[C:%.*]] = call i1 @cond() ; CHECK-NEXT: br i1 [[C]], label [[FOR_PREHEADER:%.*]], label [[BB6]] ; CHECK: for.preheader: -; CHECK-NEXT: [[T1_0_LCSSA:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] ; CHECK-NEXT: [[T1_0_LCSSA4:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] ; CHECK-NEXT: [[T1_0_LCSSA1:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] -; CHECK-NEXT: [[T1_0_LCSSA3:%.*]] = ptrtoint ptr [[T1_0_LCSSA]] to i64 -; CHECK-NEXT: [[T1_0_LCSSA2:%.*]] = ptrtoint ptr [[T1_0_LCSSA4]] to i64 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[ARR1]] to i32 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] +; CHECK-NEXT: [[T1_0_LCSSA3:%.*]] = ptrtoint ptr [[T1_0_LCSSA4]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[T1_0_LCSSA3]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll index 8450db69ecb68..9ed35fb0a79e8 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll @@ -57,16 +57,7 @@ define void @pr45679(ptr %A) { ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 -; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -118,16 +109,7 @@ define void @pr45679(ptr %A) { ; VF2UF2-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; VF2UF2-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2UF2: middle.block: -; VF2UF2-NEXT: br label [[EXIT:%.*]] -; VF2UF2: scalar.ph: ; VF2UF2-NEXT: br label [[LOOP:%.*]] -; VF2UF2: loop: -; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; VF2UF2-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; VF2UF2-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; VF2UF2-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 -; VF2UF2-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; VF2UF2: exit: ; VF2UF2-NEXT: ret void ; @@ -174,16 +156,7 @@ define void @pr45679(ptr %A) { ; VF1UF4-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; VF1UF4-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF1UF4: middle.block: -; VF1UF4-NEXT: br label [[EXIT:%.*]] -; VF1UF4: scalar.ph: ; VF1UF4-NEXT: br label [[LOOP:%.*]] -; VF1UF4: loop: -; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] -; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] -; VF1UF4-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 -; VF1UF4-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 -; VF1UF4-NEXT: [[COND:%.*]] = icmp eq i32 [[RIVPLUS1]], 14 -; VF1UF4-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP]] ; VF1UF4: exit: ; VF1UF4-NEXT: ret void ; @@ -253,17 +226,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; @@ -319,17 +282,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; VF2UF2-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; VF2UF2-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF2UF2: middle.block: -; VF2UF2-NEXT: br label [[FOR_END:%.*]] -; VF2UF2: scalar.ph: ; VF2UF2-NEXT: br label [[FOR_BODY:%.*]] -; VF2UF2: for.body: -; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; VF2UF2-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; VF2UF2-NEXT: store i64 [[V]], ptr [[B]], align 8 -; VF2UF2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF2UF2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14 -; VF2UF2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; VF2UF2: for.end: ; VF2UF2-NEXT: ret void ; @@ -380,17 +333,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; VF1UF4-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; VF1UF4-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF1UF4: middle.block: -; VF1UF4-NEXT: br label [[FOR_END:%.*]] -; VF1UF4: scalar.ph: ; VF1UF4-NEXT: br label [[FOR_BODY:%.*]] -; VF1UF4: for.body: -; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; VF1UF4-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; VF1UF4-NEXT: store i64 [[V]], ptr [[B]], align 8 -; VF1UF4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; VF1UF4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 14 -; VF1UF4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; VF1UF4: for.end: ; VF1UF4-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll index 673d582b2b177..01c6c3f23b5a4 100644 --- a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll +++ b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll @@ -31,23 +31,13 @@ define void @test(i16 %x, i64 %y, ptr %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[LOOP_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4 -; CHECK-NEXT: [[V2:%.*]] = trunc i64 [[IV]] to i8 -; CHECK-NEXT: [[V3:%.*]] = add i8 [[V2]], 1 -; CHECK-NEXT: [[CMP15:%.*]] = icmp slt i8 [[V3]], 5 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[INC]] -; CHECK-NEXT: br i1 [[CMP15]], label [[LOOP]], label [[LOOP_EXIT]] ; CHECK: loop.exit: ; CHECK-NEXT: [[DIV_1:%.*]] = udiv i64 [[Y]], [[ADD]] ; CHECK-NEXT: [[V1:%.*]] = add i64 [[DIV_1]], 1 ; CHECK-NEXT: br label [[LOOP_2:%.*]] ; CHECK: loop.2: -; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_NEXT_1:%.*]], [[LOOP_2]] ], [ 0, [[LOOP_EXIT]] ] +; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[IV_NEXT_1:%.*]], [[LOOP_2]] ], [ 0, [[LOOP]] ] ; CHECK-NEXT: [[IV_NEXT_1]] = add i64 [[IV_1]], [[V1]] ; CHECK-NEXT: call void @use(i64 [[IV_NEXT_1]]) ; CHECK-NEXT: [[EC:%.*]] = icmp ult i64 [[IV_NEXT_1]], 200 diff --git a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll index 047d36bafbf88..b9cb1cb5abae8 100644 --- a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll +++ b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll @@ -28,18 +28,15 @@ define void @f() { ; CHECK: outer.latch: ; CHECK-NEXT: br label [[OUTER_HEADER]] ; CHECK: outer.exit.0: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi ptr [ [[TMP0]], [[OUTER_HEADER]] ] ; CHECK-NEXT: br label [[LOOP_PREHEADER:%.*]] ; CHECK: outer.exit.1: -; CHECK-NEXT: [[DOTLCSSA1:%.*]] = phi ptr [ [[TMP0]], [[INNER_1_LATCH]] ] ; CHECK-NEXT: br label [[LOOP_PREHEADER]] ; CHECK: loop.preheader: -; CHECK-NEXT: [[TMP1:%.*]] = phi ptr [ [[DOTLCSSA]], [[OUTER_EXIT_0]] ], [ [[DOTLCSSA1]], [[OUTER_EXIT_1]] ] ; CHECK-NEXT: br label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 1 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 1 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr @f.e, [[SCEVGEP]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[TMP1]], getelementptr inbounds nuw (i8, ptr @f.e, i64 4) +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[TMP0]], getelementptr inbounds nuw (i8, ptr @f.e, i64 4) ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -59,7 +56,7 @@ define void @f() { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[CONV6_US_US_US:%.*]] = zext i1 false to i32 ; CHECK-NEXT: store i32 [[CONV6_US_US_US]], ptr @f.e, align 1 -; CHECK-NEXT: store i8 10, ptr [[TMP1]], align 1 +; CHECK-NEXT: store i8 10, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 500 ; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll index 75437fe01589b..615ea062afd53 100644 --- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll @@ -61,24 +61,9 @@ define dso_local i16 @reverse_interleave_load_fold_mask() optsize { ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP28:%.*]] = call i16 @llvm.vector.reduce.add.v2i16(<2 x i16> [[TMP26]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 41, [[SCALAR_PH:%.*]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IVMINUS1]] = add nsw i16 [[IV]], -1 -; CHECK-NEXT: [[GEPA0:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 0 -; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[GEPA0]], align 1 -; CHECK-NEXT: [[GEPA3:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 3 -; CHECK-NEXT: [[TMP30:%.*]] = load i16, ptr [[GEPA3]], align 1 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i16 [[TMP29]], [[TMP30]] -; CHECK-NEXT: [[PREVSUM]] = add nsw i16 [[SUM]], [[ADD]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i16 [[IV]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[PREVSUM_LCSSA:%.*]] = phi i16 [ [[PREVSUM]], [[LOOP]] ], [ [[TMP28]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i16 [[PREVSUM_LCSSA]] +; CHECK-NEXT: ret i16 [[TMP28]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll index 637b4abf7b14f..7b3500933314a 100644 --- a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll +++ b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll @@ -33,31 +33,9 @@ define i32 @test(i32 %a, i1 %c.1, i1 %c.2 ) #0 { ; CHECK: middle.block: ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI7]]) ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i32> [[PREDPHI5]], i32 1 -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 6, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[V_2:%.*]] = phi i32 [ 35902, [[SCALAR_PH]] ], [ [[P_2:%.*]], [[LOOP_LATCH]] ] -; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_LATCH]], label [[BODY_1:%.*]] -; CHECK: body.1: -; CHECK-NEXT: [[V_2_ADD:%.*]] = add i32 [[V_2]], 10 -; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[BODY_2:%.*]] -; CHECK: body.2: -; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V_2_ADD]], 20 -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A]], 1 -; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[ADD_1]], [[XOR]] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[IV]], [[LOOP_HEADER]] ], [ 9, [[BODY_1]] ], [ 9, [[BODY_2]] ] -; CHECK-NEXT: [[P_2]] = phi i32 [ [[V_2]], [[LOOP_HEADER]] ], [ [[V_2_ADD]], [[BODY_1]] ], [ [[ADD_2]], [[BODY_2]] ] -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp ult i32 [[IV]], 181 -; CHECK-NEXT: br i1 [[EC]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: -; CHECK-NEXT: [[E_1:%.*]] = phi i32 [ [[P_1]], [[LOOP_LATCH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[E_2:%.*]] = phi i32 [ [[P_2]], [[LOOP_LATCH]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[RES:%.*]] = add i32 [[E_1]], [[E_2]] +; CHECK-NEXT: [[RES:%.*]] = add i32 [[TMP9]], [[TMP10]] ; CHECK-NEXT: ret i32 [[RES]] ; bb: diff --git a/llvm/test/Transforms/LoopVectorize/pr66616.ll b/llvm/test/Transforms/LoopVectorize/pr66616.ll index d5b2519109385..1ef614ab32472 100644 --- a/llvm/test/Transforms/LoopVectorize/pr66616.ll +++ b/llvm/test/Transforms/LoopVectorize/pr66616.ll @@ -18,41 +18,32 @@ define void @pr66616(ptr %ptr) { ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[PREHEADER:%.*]] -; CHECK: scalar.ph: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ] ; CHECK-NEXT: br label [[LOOP_1:%.*]] -; CHECK: loop.1: -; CHECK-NEXT: [[IV_1:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[INC:%.*]], [[LOOP_1]] ] -; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[PTR]], align 4 -; CHECK-NEXT: [[ADD3:%.*]] = add i32 [[LOAD]], 1 -; CHECK-NEXT: [[INC]] = add i8 [[IV_1]], 1 -; CHECK-NEXT: [[COND1:%.*]] = icmp eq i8 [[INC]], 0 -; CHECK-NEXT: br i1 [[COND1]], label [[PREHEADER]], label [[LOOP_1]] ; CHECK: preheader: -; CHECK-NEXT: [[ADD3_LCSSA:%.*]] = phi i32 [ [[ADD3]], [[LOOP_1]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[TMP4:%.*]] = sub i32 0, [[ADD3_LCSSA]] +; CHECK-NEXT: [[TMP4:%.*]] = sub i32 -1, [[DOTLCSSA]] ; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP5]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH1:%.*]], label [[VECTOR_PH2:%.*]] -; CHECK: vector.ph2: +; CHECK: vector.ph1: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP6]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP6]], [[N_MOD_VF]] ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[ADD3_LCSSA]], [[DOTCAST]] +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP3]], [[DOTCAST]] ; CHECK-NEXT: [[IND_END5:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY3:%.*]] -; CHECK: vector.body3: +; CHECK: vector.body2: ; CHECK-NEXT: [[INDEX8:%.*]] = phi i64 [ 0, [[VECTOR_PH2]] ], [ [[INDEX_NEXT9:%.*]], [[VECTOR_BODY3]] ] ; CHECK-NEXT: [[INDEX_NEXT9]] = add nuw i64 [[INDEX8]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT9]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK6:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: middle.block6: +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK5:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: middle.block5: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP6]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH1]] -; CHECK: scalar.ph1: -; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK6]] ], [ [[ADD3_LCSSA]], [[PREHEADER]] ] -; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END5]], [[MIDDLE_BLOCK6]] ], [ [[PTR]], [[PREHEADER]] ] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[TMP8]], [[MIDDLE_BLOCK5]] ], [ [[TMP3]], [[LOOP_1]] ] +; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END5]], [[MIDDLE_BLOCK5]] ], [ [[PTR]], [[LOOP_1]] ] ; CHECK-NEXT: br label [[LOOP_2:%.*]] ; CHECK: loop.2: ; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_I:%.*]], [[LOOP_2]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH1]] ] diff --git a/llvm/test/Transforms/LoopVectorize/predicate-switch.ll b/llvm/test/Transforms/LoopVectorize/predicate-switch.ll index 70428f0c07cac..565e203e68f72 100644 --- a/llvm/test/Transforms/LoopVectorize/predicate-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/predicate-switch.ll @@ -425,20 +425,6 @@ define void @switch_all_to_default(ptr %start) { ; IC1-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IC1: [[MIDDLE_BLOCK]]: ; IC1-NEXT: br label %[[EXIT:.*]] -; IC1: [[SCALAR_PH:.*]]: -; IC1-NEXT: br label %[[LOOP_HEADER:.*]] -; IC1: [[LOOP_HEADER]]: -; IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; IC1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC1-NEXT: switch i64 [[IV]], label %[[LOOP_LATCH]] [ -; IC1-NEXT: i64 120, label %[[LOOP_LATCH]] -; IC1-NEXT: i64 100, label %[[LOOP_LATCH]] -; IC1-NEXT: ] -; IC1: [[LOOP_LATCH]]: -; IC1-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[START]], i64 [[IV]] -; IC1-NEXT: store i64 42, ptr [[GEP]], align 1 -; IC1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; IC1: [[EXIT]]: ; IC1-NEXT: ret void ; @@ -459,20 +445,6 @@ define void @switch_all_to_default(ptr %start) { ; IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; IC2: [[MIDDLE_BLOCK]]: ; IC2-NEXT: br label %[[EXIT:.*]] -; IC2: [[SCALAR_PH:.*]]: -; IC2-NEXT: br label %[[LOOP_HEADER:.*]] -; IC2: [[LOOP_HEADER]]: -; IC2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; IC2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC2-NEXT: switch i64 [[IV]], label %[[LOOP_LATCH]] [ -; IC2-NEXT: i64 120, label %[[LOOP_LATCH]] -; IC2-NEXT: i64 100, label %[[LOOP_LATCH]] -; IC2-NEXT: ] -; IC2: [[LOOP_LATCH]]: -; IC2-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[START]], i64 [[IV]] -; IC2-NEXT: store i64 42, ptr [[GEP]], align 1 -; IC2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; IC2: [[EXIT]]: ; IC2-NEXT: ret void ; @@ -513,21 +485,6 @@ define void @switch_unconditional(ptr %start) { ; IC1-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IC1: [[MIDDLE_BLOCK]]: ; IC1-NEXT: br label %[[EXIT:.*]] -; IC1: [[SCALAR_PH:.*]]: -; IC1-NEXT: br label %[[LOOP:.*]] -; IC1: [[LOOP]]: -; IC1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; IC1-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[START]], i64 [[IV]] -; IC1-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4 -; IC1-NEXT: switch i32 [[X]], label %[[FOO:.*]] [ -; IC1-NEXT: ] -; IC1: [[FOO]]: -; IC1-NEXT: br label %[[LATCH]] -; IC1: [[LATCH]]: -; IC1-NEXT: store i32 0, ptr [[GEP]], align 4 -; IC1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]] ; IC1: [[EXIT]]: ; IC1-NEXT: ret void ; @@ -548,21 +505,6 @@ define void @switch_unconditional(ptr %start) { ; IC2-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IC2: [[MIDDLE_BLOCK]]: ; IC2-NEXT: br label %[[EXIT:.*]] -; IC2: [[SCALAR_PH:.*]]: -; IC2-NEXT: br label %[[LOOP:.*]] -; IC2: [[LOOP]]: -; IC2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; IC2-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[START]], i64 [[IV]] -; IC2-NEXT: [[X:%.*]] = load i32, ptr [[GEP]], align 4 -; IC2-NEXT: switch i32 [[X]], label %[[FOO:.*]] [ -; IC2-NEXT: ] -; IC2: [[FOO]]: -; IC2-NEXT: br label %[[LATCH]] -; IC2: [[LATCH]]: -; IC2-NEXT: store i32 0, ptr [[GEP]], align 4 -; IC2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; IC2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; IC2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP]] ; IC2: [[EXIT]]: ; IC2-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll index dfdaaf14114cc..52555d550f3d9 100644 --- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll +++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll @@ -58,26 +58,6 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52 -; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32 -; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], %[[LOOP_HEADER]] ], [ [[ZEXT]], %[[COND_FALSE]] ] -; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 -; CHECK-NEXT: store i8 [[TRUNC]], ptr [[P]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -174,28 +154,6 @@ define void @loop_invariant_srem(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i8 [[IV]], 2 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 -; CHECK-NEXT: [[ASHR:%.*]] = ashr i64 [[SHL]], 52 -; CHECK-NEXT: [[TRUNC_I32:%.*]] = trunc i64 [[ASHR]] to i32 -; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[B]] to i32 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TRUNC_I32]], %[[LOOP_HEADER]] ], [ [[ZEXT]], %[[COND_FALSE]] ] -; CHECK-NEXT: [[SHL_I32:%.*]] = shl i32 [[COND]], 8 -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SHL_I32]] to i8 -; CHECK-NEXT: [[REM:%.*]] = srem i8 [[IV]], [[TRUNC]] -; CHECK-NEXT: [[GEP_P_REM:%.*]] = getelementptr i32, ptr [[P]], i8 [[REM]] -; CHECK-NEXT: store i32 4, ptr [[GEP_P_REM]], align 4 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV]], 8 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -245,19 +203,6 @@ define void @loop_invariant_float_store(ptr %p, i32 %a) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 -; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] -; CHECK: [[COND_FALSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: store float [[TMP10]], ptr [[P]], align 4 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV]], 8 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -315,19 +260,6 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] -; CHECK: [[ELSE]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 1, %[[LOOP_HEADER]] ], [ 0, %[[ELSE]] ] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[DST]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 3 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll index 14526afc46088..6542c42678cc5 100644 --- a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll +++ b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll @@ -27,17 +27,6 @@ define void @_Z3fooPf(ptr %a) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[P:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[P]], 2.000000e+00 -; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; @@ -58,25 +47,8 @@ define void @_Z3fooPf(ptr %a) { ; DEBUGLOC-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG24]], !llvm.loop [[LOOP25:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: ; DEBUGLOC-NEXT: br label %[[FOR_END:.*]], !dbg [[DBG24]] -; DEBUGLOC: [[SCALAR_PH:.*]]: -; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG18]] -; DEBUGLOC: [[FOR_BODY]]: -; DEBUGLOC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], !dbg [[DBG19]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[INDVARS_IV]], [[META9:![0-9]+]], !DIExpression(), [[DBG19]]) -; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]], !dbg [[DBG20]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META11:![0-9]+]], !DIExpression(), [[DBG20]]) -; DEBUGLOC-NEXT: [[P:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !dbg [[DBG21]] -; DEBUGLOC-NEXT: #dbg_value(float [[P]], [[META12:![0-9]+]], !DIExpression(), [[DBG21]]) -; DEBUGLOC-NEXT: [[MUL:%.*]] = fmul float [[P]], 2.000000e+00, !dbg [[DBG22]] -; DEBUGLOC-NEXT: #dbg_value(float [[MUL]], [[META14:![0-9]+]], !DIExpression(), [[DBG22]]) -; DEBUGLOC-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG23]] -; DEBUGLOC-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1, !dbg [[DBG28:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[INDVARS_IV_NEXT]], [[META15:![0-9]+]], !DIExpression(), [[DBG28]]) -; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024, !dbg [[DBG29:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META16:![0-9]+]], !DIExpression(), [[DBG29]]) -; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]], !dbg [[DBG24]], !llvm.loop [[LOOP30:![0-9]+]] ; DEBUGLOC: [[FOR_END]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG32:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG28:![0-9]+]] ; entry: br label %for.body @@ -122,7 +94,7 @@ define void @widen_ptr_induction_dbg(ptr %start, ptr %end) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -134,54 +106,54 @@ define void @widen_ptr_induction_dbg(ptr %start, ptr %end) { ; CHECK-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1 ; CHECK-NEXT: store ptr [[IV]], ptr [[IV]], align 1 ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]] -; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @widen_ptr_induction_dbg( -; DEBUGLOC-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) !dbg [[DBG33:![0-9]+]] { +; DEBUGLOC-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) !dbg [[DBG29:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64, !dbg [[DBG38:![0-9]+]] -; DEBUGLOC-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -8, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]], !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1, !dbg [[DBG38]] -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4, !dbg [[DBG38]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG38]] +; DEBUGLOC-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64, !dbg [[DBG34:![0-9]+]] +; DEBUGLOC-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[END1]], -8, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]], !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 3, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1, !dbg [[DBG34]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 4, !dbg [[DBG34]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG34]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] ; DEBUGLOC-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 8 ; DEBUGLOC-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP4]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG38]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG34]] ; DEBUGLOC: [[VECTOR_BODY]]: ; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; DEBUGLOC-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG39:![0-9]+]] -; DEBUGLOC-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> , !dbg [[DBG39]] -; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0, !dbg [[DBG40:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 1, !dbg [[DBG40]] +; DEBUGLOC-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], %[[VECTOR_PH]] ], [ [[PTR_IND:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG35:![0-9]+]] +; DEBUGLOC-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> , !dbg [[DBG35]] +; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x ptr> [[VECTOR_GEP]], i32 0, !dbg [[DBG36:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 1, !dbg [[DBG36]] ; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; DEBUGLOC-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32, !dbg [[DBG39]] -; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG41:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG41]], !llvm.loop [[LOOP42:![0-9]+]] +; DEBUGLOC-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 32, !dbg [[DBG35]] +; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG37:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG37]], !llvm.loop [[LOOP38:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]], !dbg [[DBG41]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG41]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]], !dbg [[DBG37]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG37]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ], !dbg [[DBG39]] -; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG38]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ], !dbg [[DBG35]] +; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG34]] ; DEBUGLOC: [[LOOP]]: -; DEBUGLOC-NEXT: [[IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG39]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[IV]], [[META35:![0-9]+]], !DIExpression(), [[DBG39]]) -; DEBUGLOC-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1, !dbg [[DBG43:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[IV_NEXT]], [[META36:![0-9]+]], !DIExpression(), [[DBG43]]) -; DEBUGLOC-NEXT: store ptr [[IV]], ptr [[IV]], align 1, !dbg [[DBG40]] -; DEBUGLOC-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]], !dbg [[DBG44:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP_NOT]], [[META37:![0-9]+]], !DIExpression(), [[DBG44]]) -; DEBUGLOC-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG41]], !llvm.loop [[LOOP45:![0-9]+]] +; DEBUGLOC-NEXT: [[IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG35]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[IV]], [[META31:![0-9]+]], !DIExpression(), [[DBG35]]) +; DEBUGLOC-NEXT: [[IV_NEXT]] = getelementptr inbounds ptr, ptr [[IV]], i64 1, !dbg [[DBG39:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[IV_NEXT]], [[META32:![0-9]+]], !DIExpression(), [[DBG39]]) +; DEBUGLOC-NEXT: store ptr [[IV]], ptr [[IV]], align 1, !dbg [[DBG36]] +; DEBUGLOC-NEXT: [[CMP_NOT:%.*]] = icmp eq ptr [[IV_NEXT]], [[END]], !dbg [[DBG40:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP_NOT]], [[META33:![0-9]+]], !DIExpression(), [[DBG40]]) +; DEBUGLOC-NEXT: br i1 [[CMP_NOT]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG37]], !llvm.loop [[LOOP41:![0-9]+]] ; DEBUGLOC: [[EXIT]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG46:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG42:![0-9]+]] ; entry: br label %loop @@ -254,7 +226,7 @@ define void @predicated_phi_dbg(i64 %n, ptr %x) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] @@ -274,96 +246,96 @@ define void @predicated_phi_dbg(i64 %n, ptr %x) { ; CHECK-NEXT: store i64 [[D]], ptr [[IDX]], align 8 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @predicated_phi_dbg( -; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[X:%.*]]) !dbg [[DBG47:![0-9]+]] { +; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[X:%.*]]) !dbg [[DBG43:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1), !dbg [[DBG56:![0-9]+]] -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4, !dbg [[DBG56]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG56]] +; DEBUGLOC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1), !dbg [[DBG52:![0-9]+]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4, !dbg [[DBG52]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]], !dbg [[DBG52]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG56]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG52]] ; DEBUGLOC: [[VECTOR_BODY]]: -; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6:.*]] ], !dbg [[DBG57:![0-9]+]] -; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6]] ], !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 5), !dbg [[DBG58:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP1]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6:.*]] ], !dbg [[DBG53:![0-9]+]] +; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_UDIV_CONTINUE6]] ], !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = icmp ult <4 x i64> [[VEC_IND]], splat (i64 5), !dbg [[DBG54:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP1]], label %[[PRED_UDIV_IF:.*]], label %[[PRED_UDIV_CONTINUE:.*]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF]]: -; DEBUGLOC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = udiv i64 [[N]], [[TMP2]], !dbg [[DBG59:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP3]], i32 0, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = udiv i64 [[N]], [[TMP2]], !dbg [[DBG55:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> poison, i64 [[TMP3]], i32 0, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE]]: -; DEBUGLOC-NEXT: [[TMP5:%.*]] = phi <4 x i64> [ poison, %[[VECTOR_BODY]] ], [ [[TMP4]], %[[PRED_UDIV_IF]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP6]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2:.*]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP5:%.*]] = phi <4 x i64> [ poison, %[[VECTOR_BODY]] ], [ [[TMP4]], %[[PRED_UDIV_IF]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP6]], label %[[PRED_UDIV_IF1:.*]], label %[[PRED_UDIV_CONTINUE2:.*]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF1]]: -; DEBUGLOC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP8:%.*]] = udiv i64 [[N]], [[TMP7]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP8]], i32 1, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE2]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP8:%.*]] = udiv i64 [[N]], [[TMP7]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP5]], i64 [[TMP8]], i32 1, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE2]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE2]]: -; DEBUGLOC-NEXT: [[TMP10:%.*]] = phi <4 x i64> [ [[TMP5]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], %[[PRED_UDIV_IF1]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP11]], label %[[PRED_UDIV_IF3:.*]], label %[[PRED_UDIV_CONTINUE4:.*]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP10:%.*]] = phi <4 x i64> [ [[TMP5]], %[[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], %[[PRED_UDIV_IF1]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP11]], label %[[PRED_UDIV_IF3:.*]], label %[[PRED_UDIV_CONTINUE4:.*]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF3]]: -; DEBUGLOC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP13:%.*]] = udiv i64 [[N]], [[TMP12]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP13]], i32 2, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE4]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP13:%.*]] = udiv i64 [[N]], [[TMP12]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP13]], i32 2, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE4]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE4]]: -; DEBUGLOC-NEXT: [[TMP15:%.*]] = phi <4 x i64> [ [[TMP10]], %[[PRED_UDIV_CONTINUE2]] ], [ [[TMP14]], %[[PRED_UDIV_IF3]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3, !dbg [[DBG58]] -; DEBUGLOC-NEXT: br i1 [[TMP16]], label %[[PRED_UDIV_IF5:.*]], label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP15:%.*]] = phi <4 x i64> [ [[TMP10]], %[[PRED_UDIV_CONTINUE2]] ], [ [[TMP14]], %[[PRED_UDIV_IF3]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3, !dbg [[DBG54]] +; DEBUGLOC-NEXT: br i1 [[TMP16]], label %[[PRED_UDIV_IF5:.*]], label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_IF5]]: -; DEBUGLOC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP18:%.*]] = udiv i64 [[N]], [[TMP17]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP18]], i32 3, !dbg [[DBG59]] -; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP18:%.*]] = udiv i64 [[N]], [[TMP17]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP18]], i32 3, !dbg [[DBG55]] +; DEBUGLOC-NEXT: br label %[[PRED_UDIV_CONTINUE6]], !dbg [[DBG54]] ; DEBUGLOC: [[PRED_UDIV_CONTINUE6]]: -; DEBUGLOC-NEXT: [[TMP20:%.*]] = phi <4 x i64> [ [[TMP15]], %[[PRED_UDIV_CONTINUE4]] ], [ [[TMP19]], %[[PRED_UDIV_IF5]] ], !dbg [[DBG59]] -; DEBUGLOC-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP20]], <4 x i64> zeroinitializer, !dbg [[DBG60:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[X]], i64 [[INDEX]], !dbg [[DBG61:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP21]], align 8, !dbg [[DBG62:![0-9]+]] -; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4), !dbg [[DBG57]] -; DEBUGLOC-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG63:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG63]], !llvm.loop [[LOOP64:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP20:%.*]] = phi <4 x i64> [ [[TMP15]], %[[PRED_UDIV_CONTINUE4]] ], [ [[TMP19]], %[[PRED_UDIV_IF5]] ], !dbg [[DBG55]] +; DEBUGLOC-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP20]], <4 x i64> zeroinitializer, !dbg [[DBG56:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[X]], i64 [[INDEX]], !dbg [[DBG57:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP21]], align 8, !dbg [[DBG58:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4), !dbg [[DBG53]] +; DEBUGLOC-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG59:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG59]], !llvm.loop [[LOOP60:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]], !dbg [[DBG63]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]], !dbg [[DBG63]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]], !dbg [[DBG59]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]], !dbg [[DBG59]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], !dbg [[DBG57]] -; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG56]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], !dbg [[DBG53]] +; DEBUGLOC-NEXT: br label %[[FOR_BODY:.*]], !dbg [[DBG52]] ; DEBUGLOC: [[FOR_BODY]]: -; DEBUGLOC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_INC:.*]] ], !dbg [[DBG57]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[I]], [[META49:![0-9]+]], !DIExpression(), [[DBG57]]) -; DEBUGLOC-NEXT: [[CMP:%.*]] = icmp ult i64 [[I]], 5, !dbg [[DBG58]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP]], [[META50:![0-9]+]], !DIExpression(), [[DBG58]]) -; DEBUGLOC-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[FOR_INC]], !dbg [[DBG65:![0-9]+]] +; DEBUGLOC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[FOR_INC:.*]] ], !dbg [[DBG53]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[I]], [[META45:![0-9]+]], !DIExpression(), [[DBG53]]) +; DEBUGLOC-NEXT: [[CMP:%.*]] = icmp ult i64 [[I]], 5, !dbg [[DBG54]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[CMP]], [[META46:![0-9]+]], !DIExpression(), [[DBG54]]) +; DEBUGLOC-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[FOR_INC]], !dbg [[DBG61:![0-9]+]] ; DEBUGLOC: [[IF_THEN]]: -; DEBUGLOC-NEXT: [[TMP4:%.*]] = udiv i64 [[N]], [[I]], !dbg [[DBG59]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[TMP4]], [[META51:![0-9]+]], !DIExpression(), [[DBG59]]) -; DEBUGLOC-NEXT: br label %[[FOR_INC]], !dbg [[DBG66:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = udiv i64 [[N]], [[I]], !dbg [[DBG55]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[TMP4]], [[META47:![0-9]+]], !DIExpression(), [[DBG55]]) +; DEBUGLOC-NEXT: br label %[[FOR_INC]], !dbg [[DBG62:![0-9]+]] ; DEBUGLOC: [[FOR_INC]]: -; DEBUGLOC-NEXT: [[D:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[TMP4]], %[[IF_THEN]] ], !dbg [[DBG60]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[D]], [[META52:![0-9]+]], !DIExpression(), [[DBG60]]) -; DEBUGLOC-NEXT: [[IDX:%.*]] = getelementptr i64, ptr [[X]], i64 [[I]], !dbg [[DBG61]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[IDX]], [[META53:![0-9]+]], !DIExpression(), [[DBG61]]) -; DEBUGLOC-NEXT: store i64 [[D]], ptr [[IDX]], align 8, !dbg [[DBG62]] -; DEBUGLOC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1, !dbg [[DBG67:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[I_NEXT]], [[META54:![0-9]+]], !DIExpression(), [[DBG67]]) -; DEBUGLOC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]], !dbg [[DBG68:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[COND]], [[META55:![0-9]+]], !DIExpression(), [[DBG68]]) -; DEBUGLOC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !dbg [[DBG63]], !llvm.loop [[LOOP69:![0-9]+]] +; DEBUGLOC-NEXT: [[D:%.*]] = phi i64 [ 0, %[[FOR_BODY]] ], [ [[TMP4]], %[[IF_THEN]] ], !dbg [[DBG56]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[D]], [[META48:![0-9]+]], !DIExpression(), [[DBG56]]) +; DEBUGLOC-NEXT: [[IDX:%.*]] = getelementptr i64, ptr [[X]], i64 [[I]], !dbg [[DBG57]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[IDX]], [[META49:![0-9]+]], !DIExpression(), [[DBG57]]) +; DEBUGLOC-NEXT: store i64 [[D]], ptr [[IDX]], align 8, !dbg [[DBG58]] +; DEBUGLOC-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1, !dbg [[DBG63:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[I_NEXT]], [[META50:![0-9]+]], !DIExpression(), [[DBG63]]) +; DEBUGLOC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]], !dbg [[DBG64:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[COND]], [[META51:![0-9]+]], !DIExpression(), [[DBG64]]) +; DEBUGLOC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END]], !dbg [[DBG59]], !llvm.loop [[LOOP65:![0-9]+]] ; DEBUGLOC: [[FOR_END]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG70:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG66:![0-9]+]] ; entry: br label %for.body @@ -415,7 +387,7 @@ define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4) ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -429,57 +401,57 @@ define void @scalar_cast_dbg(ptr nocapture %a, i32 %start, i64 %k) { ; CHECK-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @scalar_cast_dbg( -; DEBUGLOC-SAME: ptr captures(none) [[A:%.*]], i32 [[START:%.*]], i64 [[K:%.*]]) !dbg [[DBG71:![0-9]+]] { +; DEBUGLOC-SAME: ptr captures(none) [[A:%.*]], i32 [[START:%.*]], i64 [[K:%.*]]) !dbg [[DBG67:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K]], 4, !dbg [[DBG78:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]], !dbg [[DBG78]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[K]], 4, !dbg [[DBG74:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]], !dbg [[DBG74]] ; DEBUGLOC: [[VECTOR_SCEVCHECK]]: -; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[K]], -1, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[TMP0]], 4294967295, !dbg [[DBG78]] -; DEBUGLOC-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]], !dbg [[DBG78]] -; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG79:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = add i64 [[K]], -1, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[TMP0]], 4294967295, !dbg [[DBG74]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]], !dbg [[DBG74]] +; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG75:![0-9]+]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[K]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[K]], [[N_MOD_VF]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG79]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG75]] ; DEBUGLOC: [[VECTOR_BODY]]: -; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG79]] -; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG80:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32, !dbg [[DBG80]] -; DEBUGLOC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP5]], !dbg [[DBG81:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP6]], align 4, !dbg [[DBG82:![0-9]+]] -; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG79]] -; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4), !dbg [[DBG80]] -; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG83:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG83]], !llvm.loop [[LOOP84:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG75]] +; DEBUGLOC-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG76:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP5:%.*]] = trunc i64 [[INDEX]] to i32, !dbg [[DBG76]] +; DEBUGLOC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TMP5]], !dbg [[DBG77:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x i32> [[VEC_IND]], ptr [[TMP6]], align 4, !dbg [[DBG78:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG75]] +; DEBUGLOC-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4), !dbg [[DBG76]] +; DEBUGLOC-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG79:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG79]], !llvm.loop [[LOOP80:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]], !dbg [[DBG83]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG83]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]], !dbg [[DBG79]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG79]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], !dbg [[DBG79]] -; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG78]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], !dbg [[DBG75]] +; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG74]] ; DEBUGLOC: [[LOOP]]: -; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG79]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META73:![0-9]+]], !DIExpression(), [[DBG79]]) -; DEBUGLOC-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32, !dbg [[DBG80]] -; DEBUGLOC-NEXT: #dbg_value(i32 [[TRUNC_IV]], [[META74:![0-9]+]], !DIExpression(), [[DBG80]]) -; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TRUNC_IV]], !dbg [[DBG81]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META75:![0-9]+]], !DIExpression(), [[DBG81]]) -; DEBUGLOC-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG82]] -; DEBUGLOC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1, !dbg [[DBG85:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META76:![0-9]+]], !DIExpression(), [[DBG85]]) -; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]], !dbg [[DBG86:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META77:![0-9]+]], !DIExpression(), [[DBG86]]) -; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG83]], !llvm.loop [[LOOP87:![0-9]+]] +; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG75]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META69:![0-9]+]], !DIExpression(), [[DBG75]]) +; DEBUGLOC-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32, !dbg [[DBG76]] +; DEBUGLOC-NEXT: #dbg_value(i32 [[TRUNC_IV]], [[META70:![0-9]+]], !DIExpression(), [[DBG76]]) +; DEBUGLOC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[TRUNC_IV]], !dbg [[DBG77]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[ARRAYIDX]], [[META71:![0-9]+]], !DIExpression(), [[DBG77]]) +; DEBUGLOC-NEXT: store i32 [[TRUNC_IV]], ptr [[ARRAYIDX]], align 4, !dbg [[DBG78]] +; DEBUGLOC-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1, !dbg [[DBG81:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META72:![0-9]+]], !DIExpression(), [[DBG81]]) +; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[K]], !dbg [[DBG82:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META73:![0-9]+]], !DIExpression(), [[DBG82]]) +; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG79]], !llvm.loop [[LOOP83:![0-9]+]] ; DEBUGLOC: [[EXIT]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG88:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG84:![0-9]+]] ; entry: br label %loop @@ -522,7 +494,7 @@ define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) { ; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -538,60 +510,60 @@ define void @widen_intrinsic_dbg(i64 %n, ptr %y, ptr %x) { ; CHECK-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; ; DEBUGLOC-LABEL: define void @widen_intrinsic_dbg( -; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[Y:%.*]], ptr [[X:%.*]]) !dbg [[DBG89:![0-9]+]] { +; DEBUGLOC-SAME: i64 [[N:%.*]], ptr [[Y:%.*]], ptr [[X:%.*]]) !dbg [[DBG85:![0-9]+]] { ; DEBUGLOC-NEXT: [[ENTRY:.*]]: -; DEBUGLOC-NEXT: [[Y2:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG98:![0-9]+]] -; DEBUGLOC-NEXT: [[X1:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG98]] -; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4, !dbg [[DBG98]] -; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]], !dbg [[DBG98]] +; DEBUGLOC-NEXT: [[Y2:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG94:![0-9]+]] +; DEBUGLOC-NEXT: [[X1:%.*]] = ptrtoint ptr [[X]] to i64, !dbg [[DBG94]] +; DEBUGLOC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4, !dbg [[DBG94]] +; DEBUGLOC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]], !dbg [[DBG94]] ; DEBUGLOC: [[VECTOR_MEMCHECK]]: -; DEBUGLOC-NEXT: [[TMP0:%.*]] = sub i64 [[X1]], [[Y2]], !dbg [[DBG98]] -; DEBUGLOC-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16, !dbg [[DBG98]] -; DEBUGLOC-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG99:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP0:%.*]] = sub i64 [[X1]], [[Y2]], !dbg [[DBG94]] +; DEBUGLOC-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16, !dbg [[DBG94]] +; DEBUGLOC-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]], !dbg [[DBG95:![0-9]+]] ; DEBUGLOC: [[VECTOR_PH]]: ; DEBUGLOC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 ; DEBUGLOC-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG99]] +; DEBUGLOC-NEXT: br label %[[VECTOR_BODY:.*]], !dbg [[DBG95]] ; DEBUGLOC: [[VECTOR_BODY]]: -; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG99]] -; DEBUGLOC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDEX]], !dbg [[DBG100:![0-9]+]] -; DEBUGLOC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !dbg [[DBG101:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]]), !dbg [[DBG102:![0-9]+]] -; DEBUGLOC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDEX]], !dbg [[DBG103:![0-9]+]] -; DEBUGLOC-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4, !dbg [[DBG104:![0-9]+]] -; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG99]] -; DEBUGLOC-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG105:![0-9]+]] -; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG105]], !llvm.loop [[LOOP106:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ], !dbg [[DBG95]] +; DEBUGLOC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDEX]], !dbg [[DBG96:![0-9]+]] +; DEBUGLOC-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !dbg [[DBG97:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]]), !dbg [[DBG98:![0-9]+]] +; DEBUGLOC-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDEX]], !dbg [[DBG99:![0-9]+]] +; DEBUGLOC-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4, !dbg [[DBG100:![0-9]+]] +; DEBUGLOC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4, !dbg [[DBG95]] +; DEBUGLOC-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]], !dbg [[DBG101:![0-9]+]] +; DEBUGLOC-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !dbg [[DBG101]], !llvm.loop [[LOOP102:![0-9]+]] ; DEBUGLOC: [[MIDDLE_BLOCK]]: -; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]], !dbg [[DBG105]] -; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG105]] +; DEBUGLOC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]], !dbg [[DBG101]] +; DEBUGLOC-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]], !dbg [[DBG101]] ; DEBUGLOC: [[SCALAR_PH]]: -; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ], !dbg [[DBG99]] -; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG98]] +; DEBUGLOC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ], !dbg [[DBG95]] +; DEBUGLOC-NEXT: br label %[[LOOP:.*]], !dbg [[DBG94]] ; DEBUGLOC: [[LOOP]]: -; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG99]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META91:![0-9]+]], !DIExpression(), [[DBG99]]) -; DEBUGLOC-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[IV]], !dbg [[DBG100]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_Y]], [[META92:![0-9]+]], !DIExpression(), [[DBG100]]) -; DEBUGLOC-NEXT: [[LOAD:%.*]] = load float, ptr [[GEP_Y]], align 4, !dbg [[DBG101]] -; DEBUGLOC-NEXT: #dbg_value(float [[LOAD]], [[META93:![0-9]+]], !DIExpression(), [[DBG101]]) -; DEBUGLOC-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[LOAD]]), !dbg [[DBG102]] -; DEBUGLOC-NEXT: #dbg_value(float [[CALL]], [[META94:![0-9]+]], !DIExpression(), [[DBG102]]) -; DEBUGLOC-NEXT: [[GEP_X:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[IV]], !dbg [[DBG103]] -; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_X]], [[META95:![0-9]+]], !DIExpression(), [[DBG103]]) -; DEBUGLOC-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4, !dbg [[DBG104]] -; DEBUGLOC-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG107:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META96:![0-9]+]], !DIExpression(), [[DBG107]]) -; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]], !dbg [[DBG108:![0-9]+]] -; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META97:![0-9]+]], !DIExpression(), [[DBG108]]) -; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG105]], !llvm.loop [[LOOP109:![0-9]+]] +; DEBUGLOC-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ], !dbg [[DBG95]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV]], [[META87:![0-9]+]], !DIExpression(), [[DBG95]]) +; DEBUGLOC-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[IV]], !dbg [[DBG96]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_Y]], [[META88:![0-9]+]], !DIExpression(), [[DBG96]]) +; DEBUGLOC-NEXT: [[LOAD:%.*]] = load float, ptr [[GEP_Y]], align 4, !dbg [[DBG97]] +; DEBUGLOC-NEXT: #dbg_value(float [[LOAD]], [[META89:![0-9]+]], !DIExpression(), [[DBG97]]) +; DEBUGLOC-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[LOAD]]), !dbg [[DBG98]] +; DEBUGLOC-NEXT: #dbg_value(float [[CALL]], [[META90:![0-9]+]], !DIExpression(), [[DBG98]]) +; DEBUGLOC-NEXT: [[GEP_X:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[IV]], !dbg [[DBG99]] +; DEBUGLOC-NEXT: #dbg_value(ptr [[GEP_X]], [[META91:![0-9]+]], !DIExpression(), [[DBG99]]) +; DEBUGLOC-NEXT: store float [[CALL]], ptr [[GEP_X]], align 4, !dbg [[DBG100]] +; DEBUGLOC-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !dbg [[DBG103:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i64 [[IV_NEXT]], [[META92:![0-9]+]], !DIExpression(), [[DBG103]]) +; DEBUGLOC-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]], !dbg [[DBG104:![0-9]+]] +; DEBUGLOC-NEXT: #dbg_value(i1 [[EXITCOND]], [[META93:![0-9]+]], !DIExpression(), [[DBG104]]) +; DEBUGLOC-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !dbg [[DBG101]], !llvm.loop [[LOOP105:![0-9]+]] ; DEBUGLOC: [[EXIT]]: -; DEBUGLOC-NEXT: ret void, !dbg [[DBG110:![0-9]+]] +; DEBUGLOC-NEXT: ret void, !dbg [[DBG106:![0-9]+]] ; entry: br label %loop @@ -618,23 +590,21 @@ exit: ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; CHECK: [[META4]] = !{!"llvm.loop.vectorize.width", i32 4} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]} ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]]} ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]} ;. ; DEBUGLOC: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C, file: [[META1:![0-9]+]], producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug) ; DEBUGLOC: [[META1]] = !DIFile(filename: "{{.*}}", directory: {{.*}}) ; DEBUGLOC: [[DBG5]] = distinct !DISubprogram(name: "_Z3fooPf", linkageName: "_Z3fooPf", scope: null, file: [[META1]], line: 1, type: [[META6:![0-9]+]], scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META8:![0-9]+]]) ; DEBUGLOC: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]]) ; DEBUGLOC: [[META7]] = !{} -; DEBUGLOC: [[META8]] = !{[[META9]], [[META11]], [[META12]], [[META14]], [[META15]], [[META16]]} +; DEBUGLOC: [[META8]] = !{[[META9:![0-9]+]], [[META11:![0-9]+]], [[META12:![0-9]+]], [[META14:![0-9]+]], [[META15:![0-9]+]], [[META16:![0-9]+]]} ; DEBUGLOC: [[META9]] = !DILocalVariable(name: "1", scope: [[DBG5]], file: [[META1]], line: 2, type: [[META10:![0-9]+]]) ; DEBUGLOC: [[META10]] = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned) ; DEBUGLOC: [[META11]] = !DILocalVariable(name: "2", scope: [[DBG5]], file: [[META1]], line: 3, type: [[META10]]) @@ -654,87 +624,83 @@ exit: ; DEBUGLOC: [[LOOP25]] = distinct !{[[LOOP25]], [[META26:![0-9]+]], [[META27:![0-9]+]]} ; DEBUGLOC: [[META26]] = !{!"llvm.loop.isvectorized", i32 1} ; DEBUGLOC: [[META27]] = !{!"llvm.loop.unroll.runtime.disable"} -; DEBUGLOC: [[DBG28]] = !DILocation(line: 7, column: 1, scope: [[DBG5]]) -; DEBUGLOC: [[DBG29]] = !DILocation(line: 8, column: 1, scope: [[DBG5]]) -; DEBUGLOC: [[LOOP30]] = distinct !{[[LOOP30]], [[META31:![0-9]+]]} -; DEBUGLOC: [[META31]] = !{!"llvm.loop.vectorize.width", i32 4} -; DEBUGLOC: [[DBG32]] = !DILocation(line: 10, column: 1, scope: [[DBG5]]) -; DEBUGLOC: [[DBG33]] = distinct !DISubprogram(name: "widen_ptr_induction_dbg", linkageName: "widen_ptr_induction_dbg", scope: null, file: [[META1]], line: 11, type: [[META6]], scopeLine: 11, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META34:![0-9]+]]) -; DEBUGLOC: [[META34]] = !{[[META35]], [[META36]], [[META37]]} -; DEBUGLOC: [[META35]] = !DILocalVariable(name: "7", scope: [[DBG33]], file: [[META1]], line: 12, type: [[META10]]) -; DEBUGLOC: [[META36]] = !DILocalVariable(name: "8", scope: [[DBG33]], file: [[META1]], line: 13, type: [[META10]]) -; DEBUGLOC: [[META37]] = !DILocalVariable(name: "9", scope: [[DBG33]], file: [[META1]], line: 15, type: [[META17]]) -; DEBUGLOC: [[DBG38]] = !DILocation(line: 11, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG39]] = !DILocation(line: 12, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG40]] = !DILocation(line: 14, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG41]] = !DILocation(line: 16, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[LOOP42]] = distinct !{[[LOOP42]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG43]] = !DILocation(line: 13, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG44]] = !DILocation(line: 15, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[LOOP45]] = distinct !{[[LOOP45]], [[META27]], [[META26]]} -; DEBUGLOC: [[DBG46]] = !DILocation(line: 17, column: 1, scope: [[DBG33]]) -; DEBUGLOC: [[DBG47]] = distinct !DISubprogram(name: "predicated_phi_dbg", linkageName: "predicated_phi_dbg", scope: null, file: [[META1]], line: 18, type: [[META6]], scopeLine: 18, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META48:![0-9]+]]) -; DEBUGLOC: [[META48]] = !{[[META49]], [[META50]], [[META51]], [[META52]], [[META53]], [[META54]], [[META55]]} -; DEBUGLOC: [[META49]] = !DILocalVariable(name: "10", scope: [[DBG47]], file: [[META1]], line: 19, type: [[META10]]) -; DEBUGLOC: [[META50]] = !DILocalVariable(name: "11", scope: [[DBG47]], file: [[META1]], line: 20, type: [[META17]]) -; DEBUGLOC: [[META51]] = !DILocalVariable(name: "12", scope: [[DBG47]], file: [[META1]], line: 22, type: [[META10]]) -; DEBUGLOC: [[META52]] = !DILocalVariable(name: "13", scope: [[DBG47]], file: [[META1]], line: 24, type: [[META10]]) -; DEBUGLOC: [[META53]] = !DILocalVariable(name: "14", scope: [[DBG47]], file: [[META1]], line: 25, type: [[META10]]) -; DEBUGLOC: [[META54]] = !DILocalVariable(name: "15", scope: [[DBG47]], file: [[META1]], line: 27, type: [[META10]]) -; DEBUGLOC: [[META55]] = !DILocalVariable(name: "16", scope: [[DBG47]], file: [[META1]], line: 28, type: [[META17]]) -; DEBUGLOC: [[DBG56]] = !DILocation(line: 18, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG57]] = !DILocation(line: 19, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG58]] = !DILocation(line: 20, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG59]] = !DILocation(line: 22, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG60]] = !DILocation(line: 24, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG61]] = !DILocation(line: 25, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG62]] = !DILocation(line: 26, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG63]] = !DILocation(line: 29, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[LOOP64]] = distinct !{[[LOOP64]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG65]] = !DILocation(line: 21, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG66]] = !DILocation(line: 23, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG67]] = !DILocation(line: 27, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG68]] = !DILocation(line: 28, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[LOOP69]] = distinct !{[[LOOP69]], [[META27]], [[META26]]} -; DEBUGLOC: [[DBG70]] = !DILocation(line: 30, column: 1, scope: [[DBG47]]) -; DEBUGLOC: [[DBG71]] = distinct !DISubprogram(name: "scalar_cast_dbg", linkageName: "scalar_cast_dbg", scope: null, file: [[META1]], line: 31, type: [[META6]], scopeLine: 31, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META72:![0-9]+]]) -; DEBUGLOC: [[META72]] = !{[[META73]], [[META74]], [[META75]], [[META76]], [[META77]]} -; DEBUGLOC: [[META73]] = !DILocalVariable(name: "17", scope: [[DBG71]], file: [[META1]], line: 32, type: [[META10]]) -; DEBUGLOC: [[META74]] = !DILocalVariable(name: "18", scope: [[DBG71]], file: [[META1]], line: 33, type: [[META13]]) -; DEBUGLOC: [[META75]] = !DILocalVariable(name: "19", scope: [[DBG71]], file: [[META1]], line: 34, type: [[META10]]) -; DEBUGLOC: [[META76]] = !DILocalVariable(name: "20", scope: [[DBG71]], file: [[META1]], line: 36, type: [[META10]]) -; DEBUGLOC: [[META77]] = !DILocalVariable(name: "21", scope: [[DBG71]], file: [[META1]], line: 37, type: [[META17]]) -; DEBUGLOC: [[DBG78]] = !DILocation(line: 31, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG79]] = !DILocation(line: 32, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG80]] = !DILocation(line: 33, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG81]] = !DILocation(line: 34, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG82]] = !DILocation(line: 35, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG83]] = !DILocation(line: 38, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[LOOP84]] = distinct !{[[LOOP84]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG85]] = !DILocation(line: 36, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG86]] = !DILocation(line: 37, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[LOOP87]] = distinct !{[[LOOP87]], [[META26]]} -; DEBUGLOC: [[DBG88]] = !DILocation(line: 39, column: 1, scope: [[DBG71]]) -; DEBUGLOC: [[DBG89]] = distinct !DISubprogram(name: "widen_intrinsic_dbg", linkageName: "widen_intrinsic_dbg", scope: null, file: [[META1]], line: 40, type: [[META6]], scopeLine: 40, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META90:![0-9]+]]) -; DEBUGLOC: [[META90]] = !{[[META91]], [[META92]], [[META93]], [[META94]], [[META95]], [[META96]], [[META97]]} -; DEBUGLOC: [[META91]] = !DILocalVariable(name: "22", scope: [[DBG89]], file: [[META1]], line: 41, type: [[META10]]) -; DEBUGLOC: [[META92]] = !DILocalVariable(name: "23", scope: [[DBG89]], file: [[META1]], line: 42, type: [[META10]]) -; DEBUGLOC: [[META93]] = !DILocalVariable(name: "24", scope: [[DBG89]], file: [[META1]], line: 43, type: [[META13]]) -; DEBUGLOC: [[META94]] = !DILocalVariable(name: "25", scope: [[DBG89]], file: [[META1]], line: 44, type: [[META13]]) -; DEBUGLOC: [[META95]] = !DILocalVariable(name: "26", scope: [[DBG89]], file: [[META1]], line: 45, type: [[META10]]) -; DEBUGLOC: [[META96]] = !DILocalVariable(name: "27", scope: [[DBG89]], file: [[META1]], line: 47, type: [[META10]]) -; DEBUGLOC: [[META97]] = !DILocalVariable(name: "28", scope: [[DBG89]], file: [[META1]], line: 48, type: [[META17]]) -; DEBUGLOC: [[DBG98]] = !DILocation(line: 40, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG99]] = !DILocation(line: 41, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG100]] = !DILocation(line: 42, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG101]] = !DILocation(line: 43, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG102]] = !DILocation(line: 44, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG103]] = !DILocation(line: 45, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG104]] = !DILocation(line: 46, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG105]] = !DILocation(line: 49, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[LOOP106]] = distinct !{[[LOOP106]], [[META26]], [[META27]]} -; DEBUGLOC: [[DBG107]] = !DILocation(line: 47, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[DBG108]] = !DILocation(line: 48, column: 1, scope: [[DBG89]]) -; DEBUGLOC: [[LOOP109]] = distinct !{[[LOOP109]], [[META26]]} -; DEBUGLOC: [[DBG110]] = !DILocation(line: 50, column: 1, scope: [[DBG89]]) +; DEBUGLOC: [[DBG28]] = !DILocation(line: 10, column: 1, scope: [[DBG5]]) +; DEBUGLOC: [[DBG29]] = distinct !DISubprogram(name: "widen_ptr_induction_dbg", linkageName: "widen_ptr_induction_dbg", scope: null, file: [[META1]], line: 11, type: [[META6]], scopeLine: 11, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META30:![0-9]+]]) +; DEBUGLOC: [[META30]] = !{[[META31]], [[META32]], [[META33]]} +; DEBUGLOC: [[META31]] = !DILocalVariable(name: "7", scope: [[DBG29]], file: [[META1]], line: 12, type: [[META10]]) +; DEBUGLOC: [[META32]] = !DILocalVariable(name: "8", scope: [[DBG29]], file: [[META1]], line: 13, type: [[META10]]) +; DEBUGLOC: [[META33]] = !DILocalVariable(name: "9", scope: [[DBG29]], file: [[META1]], line: 15, type: [[META17]]) +; DEBUGLOC: [[DBG34]] = !DILocation(line: 11, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG35]] = !DILocation(line: 12, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG36]] = !DILocation(line: 14, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG37]] = !DILocation(line: 16, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[LOOP38]] = distinct !{[[LOOP38]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG39]] = !DILocation(line: 13, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG40]] = !DILocation(line: 15, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[LOOP41]] = distinct !{[[LOOP41]], [[META27]], [[META26]]} +; DEBUGLOC: [[DBG42]] = !DILocation(line: 17, column: 1, scope: [[DBG29]]) +; DEBUGLOC: [[DBG43]] = distinct !DISubprogram(name: "predicated_phi_dbg", linkageName: "predicated_phi_dbg", scope: null, file: [[META1]], line: 18, type: [[META6]], scopeLine: 18, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META44:![0-9]+]]) +; DEBUGLOC: [[META44]] = !{[[META45]], [[META46]], [[META47]], [[META48]], [[META49]], [[META50]], [[META51]]} +; DEBUGLOC: [[META45]] = !DILocalVariable(name: "10", scope: [[DBG43]], file: [[META1]], line: 19, type: [[META10]]) +; DEBUGLOC: [[META46]] = !DILocalVariable(name: "11", scope: [[DBG43]], file: [[META1]], line: 20, type: [[META17]]) +; DEBUGLOC: [[META47]] = !DILocalVariable(name: "12", scope: [[DBG43]], file: [[META1]], line: 22, type: [[META10]]) +; DEBUGLOC: [[META48]] = !DILocalVariable(name: "13", scope: [[DBG43]], file: [[META1]], line: 24, type: [[META10]]) +; DEBUGLOC: [[META49]] = !DILocalVariable(name: "14", scope: [[DBG43]], file: [[META1]], line: 25, type: [[META10]]) +; DEBUGLOC: [[META50]] = !DILocalVariable(name: "15", scope: [[DBG43]], file: [[META1]], line: 27, type: [[META10]]) +; DEBUGLOC: [[META51]] = !DILocalVariable(name: "16", scope: [[DBG43]], file: [[META1]], line: 28, type: [[META17]]) +; DEBUGLOC: [[DBG52]] = !DILocation(line: 18, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG53]] = !DILocation(line: 19, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG54]] = !DILocation(line: 20, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG55]] = !DILocation(line: 22, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG56]] = !DILocation(line: 24, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG57]] = !DILocation(line: 25, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG58]] = !DILocation(line: 26, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG59]] = !DILocation(line: 29, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[LOOP60]] = distinct !{[[LOOP60]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG61]] = !DILocation(line: 21, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG62]] = !DILocation(line: 23, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG63]] = !DILocation(line: 27, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG64]] = !DILocation(line: 28, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[LOOP65]] = distinct !{[[LOOP65]], [[META27]], [[META26]]} +; DEBUGLOC: [[DBG66]] = !DILocation(line: 30, column: 1, scope: [[DBG43]]) +; DEBUGLOC: [[DBG67]] = distinct !DISubprogram(name: "scalar_cast_dbg", linkageName: "scalar_cast_dbg", scope: null, file: [[META1]], line: 31, type: [[META6]], scopeLine: 31, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META68:![0-9]+]]) +; DEBUGLOC: [[META68]] = !{[[META69]], [[META70]], [[META71]], [[META72]], [[META73]]} +; DEBUGLOC: [[META69]] = !DILocalVariable(name: "17", scope: [[DBG67]], file: [[META1]], line: 32, type: [[META10]]) +; DEBUGLOC: [[META70]] = !DILocalVariable(name: "18", scope: [[DBG67]], file: [[META1]], line: 33, type: [[META13]]) +; DEBUGLOC: [[META71]] = !DILocalVariable(name: "19", scope: [[DBG67]], file: [[META1]], line: 34, type: [[META10]]) +; DEBUGLOC: [[META72]] = !DILocalVariable(name: "20", scope: [[DBG67]], file: [[META1]], line: 36, type: [[META10]]) +; DEBUGLOC: [[META73]] = !DILocalVariable(name: "21", scope: [[DBG67]], file: [[META1]], line: 37, type: [[META17]]) +; DEBUGLOC: [[DBG74]] = !DILocation(line: 31, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG75]] = !DILocation(line: 32, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG76]] = !DILocation(line: 33, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG77]] = !DILocation(line: 34, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG78]] = !DILocation(line: 35, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG79]] = !DILocation(line: 38, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[LOOP80]] = distinct !{[[LOOP80]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG81]] = !DILocation(line: 36, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG82]] = !DILocation(line: 37, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[LOOP83]] = distinct !{[[LOOP83]], [[META26]]} +; DEBUGLOC: [[DBG84]] = !DILocation(line: 39, column: 1, scope: [[DBG67]]) +; DEBUGLOC: [[DBG85]] = distinct !DISubprogram(name: "widen_intrinsic_dbg", linkageName: "widen_intrinsic_dbg", scope: null, file: [[META1]], line: 40, type: [[META6]], scopeLine: 40, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META86:![0-9]+]]) +; DEBUGLOC: [[META86]] = !{[[META87]], [[META88]], [[META89]], [[META90]], [[META91]], [[META92]], [[META93]]} +; DEBUGLOC: [[META87]] = !DILocalVariable(name: "22", scope: [[DBG85]], file: [[META1]], line: 41, type: [[META10]]) +; DEBUGLOC: [[META88]] = !DILocalVariable(name: "23", scope: [[DBG85]], file: [[META1]], line: 42, type: [[META10]]) +; DEBUGLOC: [[META89]] = !DILocalVariable(name: "24", scope: [[DBG85]], file: [[META1]], line: 43, type: [[META13]]) +; DEBUGLOC: [[META90]] = !DILocalVariable(name: "25", scope: [[DBG85]], file: [[META1]], line: 44, type: [[META13]]) +; DEBUGLOC: [[META91]] = !DILocalVariable(name: "26", scope: [[DBG85]], file: [[META1]], line: 45, type: [[META10]]) +; DEBUGLOC: [[META92]] = !DILocalVariable(name: "27", scope: [[DBG85]], file: [[META1]], line: 47, type: [[META10]]) +; DEBUGLOC: [[META93]] = !DILocalVariable(name: "28", scope: [[DBG85]], file: [[META1]], line: 48, type: [[META17]]) +; DEBUGLOC: [[DBG94]] = !DILocation(line: 40, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG95]] = !DILocation(line: 41, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG96]] = !DILocation(line: 42, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG97]] = !DILocation(line: 43, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG98]] = !DILocation(line: 44, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG99]] = !DILocation(line: 45, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG100]] = !DILocation(line: 46, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG101]] = !DILocation(line: 49, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[LOOP102]] = distinct !{[[LOOP102]], [[META26]], [[META27]]} +; DEBUGLOC: [[DBG103]] = !DILocation(line: 47, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[DBG104]] = !DILocation(line: 48, column: 1, scope: [[DBG85]]) +; DEBUGLOC: [[LOOP105]] = distinct !{[[LOOP105]], [[META26]]} +; DEBUGLOC: [[DBG106]] = !DILocation(line: 50, column: 1, scope: [[DBG85]]) ;. diff --git a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll index 57f0dc205dba1..787fa31751b6a 100644 --- a/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/preserve-dbg-loc-reduction-inloop.ll @@ -22,7 +22,7 @@ loop: %load = load i32, ptr %gep, align 4 %red.next = add i32 %red, %load %iv.next = add i64 %iv, 1 - %exitcond = icmp eq i64 %iv.next, 256 + %exitcond = icmp eq i64 %iv.next, 257 br i1 %exitcond, label %exit, label %loop exit: diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll index f20d4922b475e..73ddddc69a7c7 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-min-max.ll @@ -20,10 +20,6 @@ define i32 @reduction_smin(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -66,10 +62,6 @@ define i32 @reduction_smin_select_ops_flipped(ptr nocapture %A, ptr nocapture %B ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -111,10 +103,6 @@ define i32 @reduction_smin_intrinsic(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: ret i32 [[TMP3]] @@ -159,10 +147,6 @@ define i32 @reduction_umax(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -205,10 +189,6 @@ define i32 @reduction_umax_select_ops_flipped(ptr nocapture %A, ptr nocapture %B ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -250,10 +230,6 @@ define i32 @reduction_umax_intrinsic(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP1]]) ; CHECK-NEXT: ret i32 [[TMP3]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll index 925290b10b35e..1b9dcadbbfc39 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll @@ -61,11 +61,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP26]] ; @@ -170,11 +166,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP48]] ; @@ -263,11 +255,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP29]] ; @@ -373,11 +361,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP48]] ; @@ -485,11 +469,7 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP46]] ; @@ -594,11 +574,7 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 [[TMP45]] ; @@ -701,11 +677,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[TMP45]] ; @@ -806,11 +778,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[TMP43]] ; @@ -911,11 +879,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[TMP43]] ; @@ -1016,11 +980,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret float [[TMP43]] ; @@ -1123,11 +1083,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret float [[TMP45]] ; @@ -1211,11 +1167,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -1297,11 +1249,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[RDX_MINMAX]] ; @@ -1356,21 +1304,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]] -; CHECK: if.then: -; CHECK-NEXT: br i1 poison, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then8: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: if.else: -; CHECK-NEXT: br i1 poison, label [[IF_THEN16:%.*]], label [[FOR_INC]] -; CHECK: if.then16: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]]) ; CHECK-NEXT: ret float [[SUM_1_LCSSA]] @@ -1478,11 +1412,7 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP31:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[TMP32:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP30]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[TMP33:%.*]] = trunc <4 x i32> [[TMP32]] to <4 x i8> @@ -1572,11 +1502,7 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[TMP31:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP29]], <4 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[TMP32:%.*]] = trunc <4 x i32> [[TMP31]] to <4 x i8> diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll index cad3ca1394bb9..183462f71d480 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll @@ -35,11 +35,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP7]], [[TMP5]] ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add i32 [[TMP9]], [[BIN_RDX]] @@ -114,11 +110,7 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: br i1 poison, label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i64 [[TMP19]], [[TMP17]] ; CHECK-NEXT: [[BIN_RDX11:%.*]] = add i64 [[TMP21]], [[BIN_RDX]] @@ -345,11 +337,7 @@ define i32 @predicated(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP111:%.*]] = icmp eq i64 [[INDEX_NEXT]], 272 ; CHECK-NEXT: br i1 [[TMP111]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP104]], [[TMP101]] ; CHECK-NEXT: [[BIN_RDX34:%.*]] = add i32 [[TMP107]], [[BIN_RDX]] @@ -581,17 +569,9 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16) ; CHECK-NEXT: [[TMP119:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP119]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP119]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]] -; CHECK: if.then: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: ; CHECK-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP112]], [[TMP109]] ; CHECK-NEXT: [[BIN_RDX36:%.*]] = mul i32 [[TMP115]], [[BIN_RDX]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll index f6e8de608645a..ec7fde81b205b 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll @@ -23,21 +23,8 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L7]] = add i32 [[SUM_02]], [[L3]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[TMP2]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP2]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_single( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) { @@ -61,22 +48,9 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP5]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP5]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L7]] = add i32 [[SUM_02]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -125,26 +99,8 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -183,27 +139,9 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -251,22 +189,8 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L3]] -; CHECK-NEXT: [[L9]] = add i32 [[L7]], 3 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP3]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_const( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) { @@ -294,23 +218,9 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP7]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L7]], 3 -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -360,26 +270,8 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[PROD_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 1, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L7:%.*]] = mul i32 [[PROD_02]], [[L6]] -; CHECK-NEXT: [[L8:%.*]] = mul i32 [[L7]], [[L3]] -; CHECK-NEXT: [[L9]] = mul i32 [[L8]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_prod( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -418,27 +310,9 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP15]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[PROD_0_LCSSA:%.*]] = mul i32 [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[PROD_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 1, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = mul i32 [[PROD_02]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = mul i32 [[L7]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = mul i32 [[L8]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[PROD_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[PROD_0_LCSSA]] ; entry: @@ -491,26 +365,8 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = mul nsw i32 [[L5]], [[L3]] -; CHECK-NEXT: [[L7:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L8:%.*]] = add i32 [[SUM_02]], [[L7]] -; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L6]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_mix( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -547,27 +403,9 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP10]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP13]], [[TMP10]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = mul nsw i32 [[L5]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[SUM_02]], [[L7]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -617,24 +455,8 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 19, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = mul i32 [[SUM_02]], [[L3]] -; CHECK-NEXT: [[L7]] = mul i32 [[L6]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_mul( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -666,25 +488,9 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP11]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = mul i32 [[TMP11]], [[TMP9]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L7:%.*]], %[[DOTLR_PH]] ], [ 19, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = mul i32 [[SUM_02]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L7]] = mul i32 [[L6]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L7]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -731,24 +537,8 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 120, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[L1]], [[L0]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @start_at_non_zero( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[IN:%.*]], ptr captures(none) [[COEFF:%.*]], ptr captures(none) [[OUT:%.*]]) { @@ -780,24 +570,8 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP9]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_09:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 120, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[COEFF]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = mul nsw i32 [[L1]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -844,24 +618,8 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], %[[FOR_BODY]] ], [ -1, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = and i32 [[RESULT_08]], [[L0]] -; CHECK-NEXT: [[AND]] = and i32 [[ADD]], [[L1]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_and( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -893,25 +651,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = and i32 [[TMP11]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = and i32 [[TMP11]], [[TMP9]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[AND:%.*]], %[[FOR_BODY]] ], [ -1, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = and i32 [[RESULT_08]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[AND]] = and i32 [[ADD]], [[L1]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]] ; entry: @@ -958,24 +700,8 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_or( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1005,25 +731,9 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = or i32 [[TMP9]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = or i32 [[TMP9]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[OR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]] ; entry: @@ -1070,24 +780,8 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_xor( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1117,25 +811,9 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = xor i32 [[TMP9]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = xor i32 [[TMP9]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi i32 [ [[XOR:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[L1]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[RESULT_0_LCSSA]] ; entry: @@ -1183,24 +861,8 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[FADD:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[RESULT_08]], [[L0]] -; CHECK-NEXT: [[FADD]] = fadd fast float [[ADD]], [[L1]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FADD]], %[[FOR_BODY]] ], [ [[TMP3]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret float [[TMP3]] ; ; CHECK-INTERLEAVED-LABEL: define float @reduction_fadd( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1232,25 +894,9 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd fast float [[TMP7]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = fadd fast float [[TMP7]], [[TMP6]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi float [ [[FADD:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fadd fast float [[RESULT_08]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[FADD]] = fadd fast float [[ADD]], [[L1]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[RESULT_0_LCSSA]] ; entry: @@ -1298,24 +944,8 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[RESULT_08:%.*]] = phi float [ [[FMUL:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = fmul fast float [[RESULT_08]], [[L0]] -; CHECK-NEXT: [[FMUL]] = fmul fast float [[ADD]], [[L1]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FMUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] +; CHECK-NEXT: ret float [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define float @reduction_fmul( ; CHECK-INTERLEAVED-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) { @@ -1347,25 +977,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fmul fast float [[TMP11]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = fmul fast float [[TMP11]], [[TMP9]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[RESULT_08:%.*]] = phi float [ [[FMUL:%.*]], %[[FOR_BODY]] ], [ 0.000000e+00, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fmul fast float [[RESULT_08]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[FMUL]] = fmul fast float [[ADD]], [[L1]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[FMUL]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[RESULT_0_LCSSA]] ; entry: @@ -1410,21 +1024,8 @@ define i32 @reduction_sub_lhs(ptr noalias nocapture %A) { ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 3, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP5]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sub_lhs( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]]) { @@ -1450,21 +1051,8 @@ define i32 @reduction_sub_lhs(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP5]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 3, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -1519,38 +1107,8 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]]) ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ] -; CHECK-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], %[[SCALAR_PH]] ], [ [[SUM_1:%.*]], %[[FOR_INC]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[CMP3:%.*]] = fcmp ogt float [[L0]], [[L1]] -; CHECK-NEXT: br i1 [[CMP3]], label %[[IF_THEN:.*]], label %[[FOR_INC]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[CMP6:%.*]] = fcmp ogt float [[L1]], 1.000000e+00 -; CHECK-NEXT: br i1 [[CMP6]], label %[[IF_THEN8:.*]], label %[[IF_ELSE:.*]] -; CHECK: [[IF_THEN8]]: -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[SUM_033]], [[L0]] -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[IF_ELSE]]: -; CHECK-NEXT: [[CMP14:%.*]] = fcmp ogt float [[L0]], 2.000000e+00 -; CHECK-NEXT: br i1 [[CMP14]], label %[[IF_THEN16:.*]], label %[[FOR_INC]] -; CHECK: [[IF_THEN16]]: -; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[SUM_033]], [[L1]] -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[FOR_INC]]: -; CHECK-NEXT: [[SUM_1]] = phi float [ [[ADD]], %[[IF_THEN8]] ], [ [[ADD19]], %[[IF_THEN16]] ], [ [[SUM_033]], %[[IF_ELSE]] ], [ [[SUM_033]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_END]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ [[SUM_1]], %[[FOR_INC]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[SUM_1_LCSSA]] +; CHECK-NEXT: ret float [[TMP13]] ; ; CHECK-INTERLEAVED-LABEL: define float @reduction_conditional( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], float [[S:%.*]]) { @@ -1602,38 +1160,8 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x float> [[PREDPHI9]], [[PREDPHI6]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_INC:.*]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_033:%.*]] = phi float [ [[S]], %[[SCALAR_PH]] ], [ [[SUM_1:%.*]], %[[FOR_INC]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[CMP3:%.*]] = fcmp ogt float [[L0]], [[L1]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP3]], label %[[IF_THEN:.*]], label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[IF_THEN]]: -; CHECK-INTERLEAVED-NEXT: [[CMP6:%.*]] = fcmp ogt float [[L1]], 1.000000e+00 -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP6]], label %[[IF_THEN8:.*]], label %[[IF_ELSE:.*]] -; CHECK-INTERLEAVED: [[IF_THEN8]]: -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = fadd fast float [[SUM_033]], [[L0]] -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[IF_ELSE]]: -; CHECK-INTERLEAVED-NEXT: [[CMP14:%.*]] = fcmp ogt float [[L0]], 2.000000e+00 -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP14]], label %[[IF_THEN16:.*]], label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[IF_THEN16]]: -; CHECK-INTERLEAVED-NEXT: [[ADD19:%.*]] = fadd fast float [[SUM_033]], [[L1]] -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[FOR_INC]]: -; CHECK-INTERLEAVED-NEXT: [[SUM_1]] = phi float [ [[ADD]], %[[IF_THEN8]] ], [ [[ADD19]], %[[IF_THEN16]] ], [ [[SUM_033]], %[[IF_ELSE]] ], [ [[SUM_033]], %[[FOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_END]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[SUM_1_LCSSA:%.*]] = phi float [ [[SUM_1]], %[[FOR_INC]] ], [ [[TMP24]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret float [[SUM_1_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret float [[TMP24]] ; entry: br label %for.body @@ -1679,11 +1207,11 @@ for.end: define i32 @reduction_sum_multiuse(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-LABEL: define i32 @reduction_sum_multiuse( ; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[_LR_PH1:.*]]: ; CHECK-NEXT: br label %[[DOTLR_PH:.*]] ; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] +; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] ; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 ; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] @@ -1703,11 +1231,11 @@ define i32 @reduction_sum_multiuse(ptr noalias nocapture %A, ptr noalias nocaptu ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sum_multiuse( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { -; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*]]: +; CHECK-INTERLEAVED-NEXT: [[_LR_PH1:.*]]: ; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] ; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] +; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L10:%.*]], %[[DOTLR_PH]] ], [ 0, %[[_LR_PH1]] ] ; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 ; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] @@ -1778,26 +1306,8 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP7]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_predicated( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -1836,27 +1346,9 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP15]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = add i32 [[TMP15]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i32, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L5:%.*]] = load i32, ptr [[L4]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L6:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L7:%.*]] = add i32 [[SUM_02]], [[L6]] -; CHECK-INTERLEAVED-NEXT: [[L8:%.*]] = add i32 [[L7]], [[L3]] -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[L8]], [[L5]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[SUM_0_LCSSA]] ; entry: @@ -1902,27 +1394,13 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP5]] = zext <4 x i8> [[TMP4]] to <4 x i32> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[TMP4]]) ; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32 ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-NEXT: [[L9]] = add i32 [[SUM_02]], [[L3E]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP8]] to i8 ; CHECK-NEXT: ret i8 [[SUM_0_LCSSA]] ; ; CHECK-INTERLEAVED-LABEL: define i8 @reduction_add_trunc( @@ -1951,28 +1429,14 @@ define i8 @reduction_add_trunc(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP11]] = zext <4 x i8> [[TMP9]] to <4 x i32> ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i8> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext i8 [[TMP13]] to i32 ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L9]] = add i32 [[SUM_02]], [[L3E]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP14]] to i8 ; CHECK-INTERLEAVED-NEXT: ret i8 [[SUM_0_LCSSA]] ; entry: @@ -2016,27 +1480,13 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP5]] = zext <4 x i8> [[TMP4]] to <4 x i32> ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP7:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[TMP4]]) ; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP7]] to i32 ; CHECK-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK: [[_LR_PH:.*:]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-NEXT: [[L9]] = and i32 [[SUM_02]], [[L3E]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK: [[__CRIT_EDGE:.*:]] -; CHECK-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP8]] to i8 ; CHECK-NEXT: ret i8 [[SUM_0_LCSSA]] ; ; CHECK-INTERLEAVED-LABEL: define i8 @reduction_and_trunc( @@ -2065,28 +1515,14 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %A) { ; CHECK-INTERLEAVED-NEXT: [[TMP11]] = zext <4 x i8> [[TMP9]] to <4 x i32> ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = and <4 x i8> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext i8 [[TMP13]] to i32 ; CHECK-INTERLEAVED-NEXT: br [[DOT_CRIT_EDGE:label %.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[DOTLR_PH:.*]] -; CHECK-INTERLEAVED: [[_LR_PH:.*:]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], %[[DOTLR_PH]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02P:%.*]] = phi i32 [ [[L9:%.*]], %[[DOTLR_PH]] ], [ 255, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_02:%.*]] = and i32 [[SUM_02P]], 255 -; CHECK-INTERLEAVED-NEXT: [[L2:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L3:%.*]] = load i8, ptr [[L2]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L3E:%.*]] = zext i8 [[L3]] to i32 -; CHECK-INTERLEAVED-NEXT: [[L9]] = and i32 [[SUM_02]], [[L3E]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INDVARS_IV_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], [[DOT_CRIT_EDGE]], label %[[DOTLR_PH]] ; CHECK-INTERLEAVED: [[__CRIT_EDGE:.*:]] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA1:%.*]] = phi i32 [ [[L9]], %[[DOTLR_PH]] ], [ [[TMP14]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[SUM_0_LCSSA1]] to i8 +; CHECK-INTERLEAVED-NEXT: [[SUM_0_LCSSA:%.*]] = trunc i32 [[TMP14]] to i8 ; CHECK-INTERLEAVED-NEXT: ret i8 [[SUM_0_LCSSA]] ; entry: @@ -2133,7 +1569,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[TMP4]] = fadd float [[VEC_PHI]], [[TMP3]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] @@ -2151,7 +1587,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[SUM_07]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[FOR_END]]: ; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[MULADD_LCSSA]] @@ -2185,7 +1621,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = fadd float [[VEC_PHI1]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd float [[TMP9]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2204,7 +1640,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-INTERLEAVED-NEXT: [[MULADD]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]]) ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: [[FOR_END]]: ; CHECK-INTERLEAVED-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[MULADD_LCSSA]] @@ -2373,7 +1809,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-NEXT: [[TMP7]] = fadd float [[VEC_PHI]], [[TMP6]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -2388,17 +1824,17 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: br i1 [[C]], label %[[FOO:.*]], label %[[BAR:.*]] -; CHECK: [[FOO]]: +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: ; CHECK-NEXT: br label %[[LATCH]] -; CHECK: [[BAR]]: +; CHECK: [[ELSE]]: ; CHECK-NEXT: [[MULADD:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[SUM]]) ; CHECK-NEXT: br label %[[LATCH]] ; CHECK: [[LATCH]]: -; CHECK-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[FOO]] ], [ [[MULADD]], %[[BAR]] ] +; CHECK-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[IF]] ], [ [[MULADD]], %[[ELSE]] ] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi float [ [[SUM_NEXT]], %[[LATCH]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[SUM_NEXT_LCSSA]] @@ -2437,7 +1873,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-INTERLEAVED-NEXT: [[TMP13]] = fadd float [[VEC_PHI1]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = fadd float [[TMP13]], [[TMP10]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2463,7 +1899,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) { ; CHECK-INTERLEAVED-NEXT: [[SUM_NEXT]] = phi float [ [[SUM]], %[[IF]] ], [ [[MULADD]], %[[ELSE]] ] ; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: [[EXIT]]: ; CHECK-INTERLEAVED-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi float [ [[SUM_NEXT]], %[[LATCH]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret float [[SUM_NEXT_LCSSA]] @@ -2524,7 +1960,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END7:.*]], label %[[SCALAR_PH]] @@ -2550,7 +1986,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: [[FOR_END7]]: ; CHECK-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[G_1_LCSSA]] @@ -2590,7 +2026,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add i32 [[VEC_PHI1]], [[TMP13]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP14]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] @@ -2617,7 +2053,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-INTERLEAVED-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-INTERLEAVED-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP27:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: [[FOR_END7]]: ; CHECK-INTERLEAVED-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[G_1_LCSSA]] @@ -2675,13 +2111,12 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP13]], <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]]) ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[TMP18:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32> -; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP18]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP13]], <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]]) ; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END7:.*]], label %[[SCALAR_PH]] @@ -2708,7 +2143,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: [[FOR_END7]]: ; CHECK-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[G_1_LCSSA]] @@ -2746,17 +2181,15 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP28]], <4 x i32> [[TMP30]], <4 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP12]]) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = add i32 [[VEC_PHI1]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = zext <4 x i8> [[TMP7]] to <4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = select <4 x i1> [[TMP27]], <4 x i32> [[TMP31]], <4 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = select <4 x i1> [[TMP27]], <4 x i32> [[TMP29]], <4 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]]) ; CHECK-INTERLEAVED-NEXT: [[TMP17]] = add i32 [[TMP11]], [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = zext <4 x i8> [[TMP8]] to <4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = select <4 x i1> [[TMP28]], <4 x i32> [[TMP22]], <4 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = select <4 x i1> [[TMP28]], <4 x i32> [[TMP30]], <4 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP18]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20]] = add i32 [[TMP14]], [[TMP19]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP20]], [[TMP17]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] @@ -2784,7 +2217,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-INTERLEAVED-NEXT: [[G_1]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[G_016]], %[[FOR_BODY2]] ] ; CHECK-INTERLEAVED-NEXT: [[INC6]] = add nuw nsw i32 [[A_117]], 1 ; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC6]], [[I]] -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP29:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END7]], label %[[FOR_BODY2]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVED: [[FOR_END7]]: ; CHECK-INTERLEAVED-NEXT: [[G_1_LCSSA:%.*]] = phi i32 [ [[G_1]], %[[FOR_INC5]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] ; CHECK-INTERLEAVED-NEXT: ret i32 [[G_1_LCSSA]] @@ -2893,34 +2326,11 @@ define i32 @predicated_or_dominates_reduction(ptr %b) { ; CHECK-NEXT: [[TMP48]] = add i32 [[VEC_PHI]], [[TMP47]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_COND_CLEANUP]]: -; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], %[[FOR_INC:.*]] ], [ [[TMP48]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[A_1_LCSSA]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[G_09:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC3:%.*]], %[[FOR_INC]] ] -; CHECK-NEXT: [[A_08:%.*]] = phi i32 [ undef, %[[SCALAR_PH]] ], [ [[A_1]], %[[FOR_INC]] ] -; CHECK-NEXT: [[D:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]], i32 1 -; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[D]], align 4 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP45]], 0 -; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[LOR_LHS_FALSE:.*]], label %[[IF_THEN:.*]] -; CHECK: [[LOR_LHS_FALSE]]: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]] -; CHECK-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP46]], 0 -; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[FOR_INC]], label %[[IF_THEN]] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[A_08]], 1 -; CHECK-NEXT: br label %[[FOR_INC]] -; CHECK: [[FOR_INC]]: -; CHECK-NEXT: [[A_1]] = phi i32 [ [[INC]], %[[IF_THEN]] ], [ [[A_08]], %[[LOR_LHS_FALSE]] ] -; CHECK-NEXT: [[INC3]] = add nuw nsw i32 [[G_09]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC3]], 1000 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] +; CHECK-NEXT: ret i32 [[TMP48]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @predicated_or_dominates_reduction( ; CHECK-INTERLEAVED-SAME: ptr [[B:%.*]]) { @@ -3054,35 +2464,12 @@ define i32 @predicated_or_dominates_reduction(ptr %b) { ; CHECK-INTERLEAVED-NEXT: [[TMP98]] = add i32 [[VEC_PHI1]], [[TMP97]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP99:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP99]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP99]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP98]], [[TMP94]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_COND_CLEANUP:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] ; CHECK-INTERLEAVED: [[FOR_COND_CLEANUP]]: -; CHECK-INTERLEAVED-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ [[A_1:%.*]], %[[FOR_INC:.*]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[A_1_LCSSA]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[G_09:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC3:%.*]], %[[FOR_INC]] ] -; CHECK-INTERLEAVED-NEXT: [[A_08:%.*]] = phi i32 [ undef, %[[SCALAR_PH]] ], [ [[A_1]], %[[FOR_INC]] ] -; CHECK-INTERLEAVED-NEXT: [[D:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]], i32 1 -; CHECK-INTERLEAVED-NEXT: [[TMP100:%.*]] = load i32, ptr [[D]], align 4 -; CHECK-INTERLEAVED-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP100]], 0 -; CHECK-INTERLEAVED-NEXT: br i1 [[TOBOOL_NOT]], label %[[LOR_LHS_FALSE:.*]], label %[[IF_THEN:.*]] -; CHECK-INTERLEAVED: [[LOR_LHS_FALSE]]: -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x %struct.e], ptr [[B]], i32 0, i32 [[G_09]] -; CHECK-INTERLEAVED-NEXT: [[TMP101:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP101]], 0 -; CHECK-INTERLEAVED-NEXT: br i1 [[TOBOOL2_NOT]], label %[[FOR_INC]], label %[[IF_THEN]] -; CHECK-INTERLEAVED: [[IF_THEN]]: -; CHECK-INTERLEAVED-NEXT: [[INC:%.*]] = add nsw i32 [[A_08]], 1 -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_INC]] -; CHECK-INTERLEAVED: [[FOR_INC]]: -; CHECK-INTERLEAVED-NEXT: [[A_1]] = phi i32 [ [[INC]], %[[IF_THEN]] ], [ [[A_08]], %[[LOR_LHS_FALSE]] ] -; CHECK-INTERLEAVED-NEXT: [[INC3]] = add nuw nsw i32 [[G_09]], 1 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC3]], 1000 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -3138,27 +2525,11 @@ define i32 @reduction_add_sub(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[X_05]], [[L0]] -; CHECK-NEXT: [[SUB]] = sub nsw i32 [[ADD]], [[L0_B]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_add_sub( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -3190,28 +2561,12 @@ define i32 @reduction_add_sub(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-INTERLEAVED-NEXT: [[TMP13]] = add i32 [[TMP9]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[SUB:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-INTERLEAVED-NEXT: [[ADD:%.*]] = add nsw i32 [[X_05]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[SUB]] = sub nsw i32 [[ADD]], [[L0_B]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[SUB]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -3257,27 +2612,11 @@ define i32 @reduction_sub_add(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[FOR_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[X_05:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUB]], [[L0_B]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @reduction_sub_add( ; CHECK-INTERLEAVED-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]]) { @@ -3309,28 +2648,12 @@ define i32 @reduction_sub_add(ptr noalias nocapture %A, ptr noalias nocapture %B ; CHECK-INTERLEAVED-NEXT: [[TMP13]] = add i32 [[TMP9]], [[TMP12]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP13]], [[TMP11]] ; CHECK-INTERLEAVED-NEXT: br label %[[FOR_END:.*]] -; CHECK-INTERLEAVED: [[SCALAR_PH:.*]]: -; CHECK-INTERLEAVED-NEXT: br label %[[FOR_BODY:.*]] -; CHECK-INTERLEAVED: [[FOR_BODY]]: -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[X_05:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-INTERLEAVED-NEXT: [[L0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-INTERLEAVED-NEXT: [[L0_B:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 -; CHECK-INTERLEAVED-NEXT: [[SUB:%.*]] = sub nsw i32 [[X_05]], [[L0]] -; CHECK-INTERLEAVED-NEXT: [[ADD]] = add nsw i32 [[SUB]], [[L0_B]] -; CHECK-INTERLEAVED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-INTERLEAVED-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 256 -; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[FOR_END]], label %[[FOR_BODY]] ; CHECK-INTERLEAVED: [[FOR_END]]: -; CHECK-INTERLEAVED-NEXT: [[X_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] -; CHECK-INTERLEAVED-NEXT: ret i32 [[X_0_LCSSA]] +; CHECK-INTERLEAVED-NEXT: ret i32 [[BIN_RDX]] ; entry: br label %for.body @@ -3354,6 +2677,129 @@ for.end: ; preds = %for.body, %entry ret i32 %x.0.lcssa } +; Test that bundling recipes that share an operand into an expression works. +; In this case the two extends are the recipes that share an operand. +define i64 @reduction_expression_same_operands(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { +; CHECK-LABEL: define i64 @reduction_expression_same_operands( +; CHECK-SAME: ptr readonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], i32 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP1]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64> +; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i64> [[TMP3]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP4]]) +; CHECK-NEXT: [[TMP6]] = add i64 [[VEC_PHI]], [[TMP5]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i64 [ [[RDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[IV]] +; CHECK-NEXT: [[LOAD0:%.*]] = load i16, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CONV0:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[CONV0]], [[CONV1]] +; CHECK-NEXT: [[MUL:%.*]] = sext i32 [[MUL1]] to i64 +; CHECK-NEXT: [[RDX_NEXT]] = add nsw i64 [[RDX]], [[MUL]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i64 [[R_0_LCSSA]] +; +; CHECK-INTERLEAVED-LABEL: define i64 @reduction_expression_same_operands( +; CHECK-INTERLEAVED-SAME: ptr readonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], i32 [[N:%.*]]) { +; CHECK-INTERLEAVED-NEXT: [[ENTRY:.*]]: +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8 +; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK-INTERLEAVED: [[VECTOR_PH]]: +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 8 +; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] +; CHECK-INTERLEAVED-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK-INTERLEAVED: [[VECTOR_BODY]]: +; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[INDEX]] +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 4 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP1]], align 4 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i16>, ptr [[TMP2]], align 4 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = mul nsw <4 x i64> [[TMP4]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]]) +; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = sext <4 x i16> [[WIDE_LOAD2]] to <4 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nsw <4 x i64> [[TMP9]], [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP10]]) +; CHECK-INTERLEAVED-NEXT: [[TMP12]] = add i64 [[VEC_PHI1]], [[TMP11]] +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-INTERLEAVED: [[MIDDLE_BLOCK]]: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i64 [[TMP12]], [[TMP7]] +; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] +; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-INTERLEAVED: [[SCALAR_PH]]: +; CHECK-INTERLEAVED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-INTERLEAVED-NEXT: br label %[[LOOP:.*]] +; CHECK-INTERLEAVED: [[LOOP]]: +; CHECK-INTERLEAVED-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-INTERLEAVED-NEXT: [[RDX:%.*]] = phi i64 [ [[RDX_NEXT:%.*]], %[[LOOP]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ] +; CHECK-INTERLEAVED-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[IV]] +; CHECK-INTERLEAVED-NEXT: [[LOAD0:%.*]] = load i16, ptr [[ARRAYIDX]], align 4 +; CHECK-INTERLEAVED-NEXT: [[CONV0:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-INTERLEAVED-NEXT: [[CONV1:%.*]] = sext i16 [[LOAD0]] to i32 +; CHECK-INTERLEAVED-NEXT: [[MUL1:%.*]] = mul nsw i32 [[CONV0]], [[CONV1]] +; CHECK-INTERLEAVED-NEXT: [[MUL:%.*]] = sext i32 [[MUL1]] to i64 +; CHECK-INTERLEAVED-NEXT: [[RDX_NEXT]] = add nsw i64 [[RDX]], [[MUL]] +; CHECK-INTERLEAVED-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; CHECK-INTERLEAVED-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; CHECK-INTERLEAVED-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] +; CHECK-INTERLEAVED: [[EXIT]]: +; CHECK-INTERLEAVED-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT]], %[[LOOP]] ], [ [[BIN_RDX]], %[[MIDDLE_BLOCK]] ] +; CHECK-INTERLEAVED-NEXT: ret i64 [[R_0_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv + %load0 = load i16, ptr %arrayidx, align 4 + %conv0 = sext i16 %load0 to i32 + %conv1 = sext i16 %load0 to i32 + %mul = mul nsw i32 %conv0, %conv1 + %conv = sext i32 %mul to i64 + %rdx.next = add nsw i64 %rdx, %conv + %iv.next = add nuw nsw i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %exit, label %loop + +exit: + %r.0.lcssa = phi i64 [ %rdx.next, %loop ] + ret i64 %r.0.lcssa +} + declare float @llvm.fmuladd.f32(float, float, float) !6 = distinct !{!6, !7, !8} diff --git a/llvm/test/Transforms/LoopVectorize/reduction-order.ll b/llvm/test/Transforms/LoopVectorize/reduction-order.ll index b07c3833ca235..b51db48c1c6ed 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-order.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-order.ll @@ -1,63 +1,93 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 ; RUN: opt -passes='loop-vectorize' -force-vector-width=4 -force-vector-interleave=1 -S < %s 2>&1 | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" ; Make sure the selects generated from reduction are always emitted ; in deterministic order. -; CHECK-LABEL: @foo( -; CHECK: vector.body: -; CHECK: [[VEC_PHI_1:%.+]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD_5:%.+]], %vector.body ] -; CHECK: [[VEC_PHI_2:%.+]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD_3:%.+]], %vector.body ] -; CHECK: icmp ule <4 x i64> -; CHECK-NEXT: [[ADD_3]] = add <4 x i32> splat (i32 3), [[VEC_PHI_2]] -; CHECK-NEXT: [[ADD_5]] = add <4 x i32> [[VEC_PHI_1]], splat (i32 5) -; CHECK: select <4 x i1> {{.*}}, <4 x i32> [[ADD_5]], <4 x i32> -; CHECK-NEXT: select <4 x i1> {{.*}}, <4 x i32> [[ADD_3]], <4 x i32> -; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body ; -define internal i64 @foo(ptr %t0) !prof !1 { -t16: - br label %t20 - -t17: ; preds = %t20 - %t18 = phi i32 [ %t24, %t20 ] - %t19 = phi i32 [ %t28, %t20 ] - br label %t31 +define i32 @foo() !prof !1 { +; CHECK-LABEL: define i32 @foo() {{.*}}{ +; CHECK-NEXT: [[T16:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI_1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[ADD_5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI_2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[ADD_3:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IV]], splat (i64 9) +; CHECK-NEXT: [[ADD_3]] = add <4 x i32> splat (i32 3), [[VEC_PHI_2]] +; CHECK-NEXT: [[ADD_5]] = add <4 x i32> [[VEC_PHI_1]], splat (i32 5) +; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[ADD_5]], <4 x i32> [[VEC_PHI_1]] +; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[ADD_3]], <4 x i32> [[VEC_PHI_2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]]) +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP7]], [[TMP6]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + br label %loop -t20: ; preds = %t20, %t16 - %t21 = phi i64 [ 0, %t16 ], [ %t29, %t20 ] - %t22 = phi i32 [ 0, %t16 ], [ %t28, %t20 ] - %t23 = phi i32 [ 0, %t16 ], [ %t24, %t20 ] - %t24 = add i32 3, %t23 - %t28 = add i32 %t22, 5 - %t29 = add nuw nsw i64 %t21, 1 - %t30 = icmp eq i64 %t29, 10 - br i1 %t30, label %t17, label %t20, !prof !2 +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %red.1 = phi i32 [ 0, %entry ], [ %red.1.next, %loop ] + %red.2 = phi i32 [ 0, %entry ], [ %red.2.next, %loop ] + %red.2.next = add i32 3, %red.2 + %red.1.next = add i32 %red.1, 5 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 10 + br i1 %ec, label %exit, label %loop, !prof !2 -t31: - ret i64 undef +exit: + %r.2 = phi i32 [ %red.2.next, %loop ] + %r.1 = phi i32 [ %red.1.next, %loop ] + %add = add i32 %r.2, %r.1 + ret i32 %add } ; Make sure we do not fail when checking for ordered reduction. This test just ; exercises the path and bails out without performing vectorization. -; CHECK-LABEL: quux -; CHECK-NOT: fadd <4 x -define void @quux(i1 %arg) { -bb: +define double @quux(i1 %arg) { +; CHECK-LABEL: define double @quux( +; CHECK-SAME: i1 [[ARG:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[HEADER:.*]] +; CHECK: [[HEADER]]: +; CHECK-NEXT: [[TMP5:%.*]] = phi double [ 1.300000e+01, %[[ENTRY]] ], [ [[TMP:%.*]], %[[LATCH:.*]] ] +; CHECK-NEXT: [[TMP6:%.*]] = fadd double [[TMP5]], 1.000000e+00 +; CHECK-NEXT: br label %[[LATCH]] +; CHECK: [[LATCH]]: +; CHECK-NEXT: [[TMP]] = phi double [ [[TMP6]], %[[HEADER]] ] +; CHECK-NEXT: br i1 [[ARG]], label %[[HEADER]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[R:%.*]] = phi double [ [[TMP]], %[[LATCH]] ] +; CHECK-NEXT: ret double [[R]] +; +entry: br label %header -latch: ; preds = %header - %tmp = phi double [ %tmp6, %header ] - br i1 %arg, label %header, label %bb2 - -bb2: ; preds = %latch - %tmp3 = phi double [ %tmp, %latch ] - ret void - -header: ; preds = %latch, %bb - %tmp5 = phi double [ 1.300000e+01, %bb ], [ %tmp, %latch ] +header: + %tmp5 = phi double [ 1.300000e+01, %entry ], [ %tmp, %latch ] %tmp6 = fadd double %tmp5, 1.000000e+00 br label %latch + +latch: + %tmp = phi double [ %tmp6, %header ] + br i1 %arg, label %header, label %exit + +exit: + %r = phi double [ %tmp, %latch ] + ret double %r } !1 = !{!"function_entry_count", i64 801} diff --git a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll index 7d35ad0095c8f..855a0ce56f2c7 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-predselect.ll @@ -60,11 +60,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] @@ -162,11 +158,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP43]]) ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] @@ -267,11 +259,7 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[DOT_CRIT_EDGE:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] -; CHECK: .lr.ph: -; CHECK-NEXT: br i1 poison, label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[PROD_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[PROD_0_LCSSA]] @@ -371,11 +359,7 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -475,11 +459,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -579,11 +559,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP42]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -683,11 +659,7 @@ define float @reduction_fadd(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP42]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] @@ -787,11 +759,7 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP43:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP43]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP42]]) ; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] @@ -874,11 +842,7 @@ define i32 @reduction_min(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] @@ -959,11 +923,7 @@ define i32 @reduction_max(ptr nocapture %A, ptr nocapture %B) { ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[TMP25]]) ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll index 916a83a727f89..65d57015b0140 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction.ll @@ -775,21 +775,7 @@ define float @reduction_conditional(ptr %A, ptr %B, ptr %C, float %S) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[IF_THEN:%.*]], label [[FOR_INC:%.*]] -; CHECK: if.then: -; CHECK-NEXT: br i1 poison, label [[IF_THEN8:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then8: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: if.else: -; CHECK-NEXT: br i1 poison, label [[IF_THEN16:%.*]], label [[FOR_INC]] -; CHECK: if.then16: -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: br i1 poison, label [[FOR_BODY]], label [[FOR_END]] +; CHECK-NEXT: br label [[FOR_INC:%.*]] ; CHECK: for.end: ; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[PREDPHI3]]) ; CHECK-NEXT: ret float [[SUM_1_LCSSA]] diff --git a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll index e6ad5937dc5e2..e621b804d5633 100644 --- a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll @@ -24,20 +24,8 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[BODY:.*]] -; CHECK: [[BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_TMP:%.*]] = phi i32 [ [[SUM:%.*]], %[[BODY]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[LOAD0:%.*]] = load i32, ptr [[GEP0]], align 4 -; CHECK-NEXT: [[SUM]] = add i32 [[SUM_TMP]], [[LOAD0]] -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 256 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[BODY]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM]], %[[BODY]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP4]] ; entry: br label %body diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll index cb0c778b95026..5894c3af1d637 100644 --- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll @@ -109,14 +109,13 @@ define void @runtime_checks_ptr_inductions(ptr %dst.1, ptr %dst.2, i1 %c) { ; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ [[DST_1]], %[[ENTRY]] ], [ [[PTR_IV_1_NEXT:%.*]], %[[LOOP_1]] ] ; CHECK-NEXT: [[CALL:%.*]] = call i32 @val() ; CHECK-NEXT: [[SEL_DST:%.*]] = select i1 [[C]], ptr [[DST_1]], ptr [[DST_2]] +; CHECK-NEXT: [[SEL_DST_LCSSA12:%.*]] = ptrtoint ptr [[SEL_DST]] to i64 ; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr i8, ptr [[PTR_IV_1]], i64 1 ; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i32 [[CALL]], 0 ; CHECK-NEXT: br i1 [[EC_1]], label %[[LOOP_2_HEADER_PREHEADER:.*]], label %[[LOOP_1]] ; CHECK: [[LOOP_2_HEADER_PREHEADER]]: -; CHECK-NEXT: [[SEL_DST_LCSSA1:%.*]] = phi ptr [ [[SEL_DST]], %[[LOOP_1]] ] ; CHECK-NEXT: [[PTR_IV_1_LCSSA:%.*]] = phi ptr [ [[PTR_IV_1]], %[[LOOP_1]] ] ; CHECK-NEXT: [[SEL_DST_LCSSA:%.*]] = phi ptr [ [[SEL_DST]], %[[LOOP_1]] ] -; CHECK-NEXT: [[SEL_DST_LCSSA12:%.*]] = ptrtoint ptr [[SEL_DST_LCSSA1]] to i64 ; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[PTR_IV_1_LCSSA]] to i64 @@ -140,13 +139,13 @@ define void @runtime_checks_ptr_inductions(ptr %dst.1, ptr %dst.2, i1 %c) { ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 1023, %[[MIDDLE_BLOCK]] ], [ 1, %[[VECTOR_MEMCHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_LCSSA]], %[[VECTOR_MEMCHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[SEL_DST_LCSSA]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_LCSSA]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[SEL_DST_LCSSA]], %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP_2_HEADER:.*]] ; CHECK: [[LOOP_2_HEADER]]: ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[DEC7:%.*]], %[[LOOP_2_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL4]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[PTR_IV_3:%.*]] = phi ptr [ [[PTR_IV_3_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL5]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[PTR_IV_3:%.*]] = phi ptr [ [[PTR_IV_3_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL4]], %[[SCALAR_PH]] ] ; CHECK-NEXT: [[EC_2:%.*]] = icmp eq i32 [[IV]], 1024 ; CHECK-NEXT: br i1 [[EC_2]], label %[[EXIT:.*]], label %[[LOOP_2_LATCH]] ; CHECK: [[LOOP_2_LATCH]]: @@ -220,14 +219,18 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[STEP]], i32 1) ; CHECK-NEXT: [[TMP8:%.*]] = udiv i32 [[TMP7]], [[UMAX]] ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[INDVAR_LCSSA1]], 2 +; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP12]], i32 0) +; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP3]], -1 +; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[SMAX1]], [[TMP14]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP15]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; CHECK: [[VECTOR_SCEVCHECK]]: ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STEP]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP9]], 2 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP9]], [[N_MOD_VF]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP15]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP15]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[IV_1_LCSSA]], [[N_VEC]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -239,7 +242,7 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP9]], [[N_VEC]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP15]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ [[IV_1_LCSSA]], %[[LOOP_2_PREHEADER]] ], [ [[IV_1_LCSSA]], %[[VECTOR_SCEVCHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll new file mode 100644 index 0000000000000..0896848905c6c --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll @@ -0,0 +1,140 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck %s + +define i32 @preserve_inbounds(i64 %start, ptr %ptr) { +; CHECK-LABEL: define i32 @preserve_inbounds( +; CHECK-SAME: i64 [[START:%.*]], ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 -3 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[REVERSE]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[END]]: +; CHECK-NEXT: ret i32 [[TMP6]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rev.ind = phi i64 [ %start, %entry ], [ %rev.ind.next, %loop ] + %redux = phi i32 [ 0, %entry ], [ %redux.next, %loop ] + %rev.ind.next = add i64 %rev.ind, -1 + %gep.ptr.ind = getelementptr inbounds i32, ptr %ptr, i64 %rev.ind.next + %ld.ptr = load i32, ptr %gep.ptr.ind, align 4 + %redux.next = add i32 %ld.ptr, %redux + %iv.next = add i32 %iv, 1 + %exit.cond = icmp ne i32 %iv.next, 1024 + br i1 %exit.cond, label %loop, label %end + +end: + ret i32 %redux.next +} + +define i32 @preserve_nusw(i64 %start, ptr %ptr) { +; CHECK-LABEL: define i32 @preserve_nusw( +; CHECK-SAME: i64 [[START:%.*]], ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nusw i32, ptr [[PTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr nusw i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr nusw i32, ptr [[TMP2]], i32 -3 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[REVERSE]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[END]]: +; CHECK-NEXT: ret i32 [[TMP6]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rev.ind = phi i64 [ %start, %entry ], [ %rev.ind.next, %loop ] + %redux = phi i32 [ 0, %entry ], [ %redux.next, %loop ] + %rev.ind.next = add i64 %rev.ind, -1 + %gep.ptr.ind = getelementptr nusw i32, ptr %ptr, i64 %rev.ind.next + %ld.ptr = load i32, ptr %gep.ptr.ind, align 4 + %redux.next = add i32 %ld.ptr, %redux + %iv.next = add i32 %iv, 1 + %exit.cond = icmp ne i32 %iv.next, 1024 + br i1 %exit.cond, label %loop, label %end + +end: + ret i32 %redux.next +} + +define i32 @drop_nuw(i64 %start, ptr %ptr) { +; CHECK-LABEL: define i32 @drop_nuw( +; CHECK-SAME: i64 [[START:%.*]], ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nuw i32, ptr [[PTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[TMP2]], i32 -3 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[REVERSE]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[END]]: +; CHECK-NEXT: ret i32 [[TMP6]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rev.ind = phi i64 [ %start, %entry ], [ %rev.ind.next, %loop ] + %redux = phi i32 [ 0, %entry ], [ %redux.next, %loop ] + %rev.ind.next = add i64 %rev.ind, -1 + %gep.ptr.ind = getelementptr nuw i32, ptr %ptr, i64 %rev.ind.next + %ld.ptr = load i32, ptr %gep.ptr.ind, align 4 + %redux.next = add i32 %ld.ptr, %redux + %iv.next = add i32 %iv, 1 + %exit.cond = icmp ne i32 %iv.next, 1024 + br i1 %exit.cond, label %loop, label %end + +end: + ret i32 %redux.next +} diff --git a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll index 579092136d651..31129d3bcc2f4 100644 --- a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll +++ b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll @@ -37,22 +37,8 @@ define i32 @reverse_induction_i64(i64 %startval, ptr %ptr) { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[LOOPEND:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[ADD_I7:%.*]] = phi i64 [ [[STARTVAL]], %[[SCALAR_PH]] ], [ [[ADD_I:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC4:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[REDUX5:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC_REDUX:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ADD_I]] = add i64 [[ADD_I7]], -1 -; CHECK-NEXT: [[KIND__I:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD_I]] -; CHECK-NEXT: [[TMP_I1:%.*]] = load i32, ptr [[KIND__I]], align 4 -; CHECK-NEXT: [[INC_REDUX]] = add i32 [[TMP_I1]], [[REDUX5]] -; CHECK-NEXT: [[INC4]] = add i32 [[I_06]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC4]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[LOOPEND]] ; CHECK: [[LOOPEND]]: -; CHECK-NEXT: [[INC_REDUX_LCSSA:%.*]] = phi i32 [ [[INC_REDUX]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[INC_REDUX_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -105,22 +91,8 @@ define i32 @reverse_induction_i128(i128 %startval, ptr %ptr) { ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]] ; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label %[[LOOPEND:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[ADD_I7:%.*]] = phi i128 [ [[STARTVAL]], %[[SCALAR_PH]] ], [ [[ADD_I:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC4:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[REDUX5:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[INC_REDUX:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[ADD_I]] = add i128 [[ADD_I7]], -1 -; CHECK-NEXT: [[KIND__I:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i128 [[ADD_I]] -; CHECK-NEXT: [[TMP_I1:%.*]] = load i32, ptr [[KIND__I]], align 4 -; CHECK-NEXT: [[INC_REDUX]] = add i32 [[TMP_I1]], [[REDUX5]] -; CHECK-NEXT: [[INC4]] = add i32 [[I_06]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC4]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[LOOPEND]] ; CHECK: [[LOOPEND]]: -; CHECK-NEXT: [[INC_REDUX_LCSSA:%.*]] = phi i32 [ [[INC_REDUX]], %[[FOR_BODY]] ], [ [[TMP13]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[INC_REDUX_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP13]] ; entry: br label %for.body @@ -263,19 +235,6 @@ define void @reverse_forward_induction_i64_i8() { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[WHILE_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[WHILE_BODY:.*]] -; CHECK: [[WHILE_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1023, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[FORWARD_INDUCTION_05:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[INC]] = add i8 [[FORWARD_INDUCTION_05]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[INC]] to i32 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_END]] ; CHECK: [[WHILE_END]]: ; CHECK-NEXT: ret void ; @@ -329,19 +288,6 @@ define void @reverse_forward_induction_i64_i8_signed() { ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[WHILE_END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[WHILE_BODY:.*]] -; CHECK: [[WHILE_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 1023, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[FORWARD_INDUCTION_05:%.*]] = phi i8 [ -127, %[[SCALAR_PH]] ], [ [[INC:%.*]], %[[WHILE_BODY]] ] -; CHECK-NEXT: [[INC]] = add i8 [[FORWARD_INDUCTION_05]], 1 -; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[INC]] to i32 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP13]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_END]] ; CHECK: [[WHILE_END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll index 79fdc07042525..f87be5a115044 100644 --- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll +++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll @@ -429,13 +429,9 @@ define dso_local void @forced_optsize(ptr noalias nocapture readonly %x_p, ptr n ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; ; FORCED_OPTSIZE-LABEL: @forced_optsize( ; FORCED_OPTSIZE-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll index a43ea07d0c7af..c7b27040d6484 100644 --- a/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll +++ b/llvm/test/Transforms/LoopVectorize/scev-exit-phi-invalidation.ll @@ -19,60 +19,49 @@ define void @test_pr63368(i1 %c, ptr %A) { ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP1]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP0]], [[VECTOR_BODY]] ] ; CHECK-NEXT: br label [[EXIT_1:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_1_HEADER:%.*]] -; CHECK: loop.1.header: -; CHECK-NEXT: [[IV_1:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_1_NEXT:%.*]], [[LOOP_1_LATCH:%.*]] ] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br i1 [[C]], label [[LOOP_1_LATCH]], label [[LOOP_1_LATCH]] -; CHECK: loop.1.latch: -; CHECK-NEXT: [[L_LCSSA:%.*]] = phi i32 [ [[L]], [[LOOP_1_HEADER]] ], [ [[L]], [[LOOP_1_HEADER]] ] -; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1 -; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i32 [[IV_1_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC_1]], label [[EXIT_1]], label [[LOOP_1_HEADER]] ; CHECK: exit.1: -; CHECK-NEXT: [[L_LCSSA_LCSSA:%.*]] = phi i32 [ [[L_LCSSA]], [[LOOP_1_LATCH]] ], [ [[TMP0]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1) +; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[DOTLCSSA]], i32 -1) ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[SMAX1]], 2 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP2]], 4 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH2:%.*]], label [[VECTOR_SCEVCHECK:%.*]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: -; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[L_LCSSA_LCSSA]], i32 -1) +; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 poison, i32 -1) ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SMAX]], 1 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 ; CHECK-NEXT: [[TMP5:%.*]] = add i8 1, [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], 1 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP3]], 255 ; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] -; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH2]], label [[VECTOR_PH3:%.*]] -; CHECK: vector.ph3: +; CHECK-NEXT: br i1 [[TMP8]], label [[SCALAR_PH]], label [[VECTOR_PH2:%.*]] +; CHECK: vector.ph2: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP9:%.*]] = trunc i32 [[N_VEC]] to i8 -; CHECK-NEXT: br label [[VECTOR_BODY4:%.*]] -; CHECK: vector.body4: -; CHECK-NEXT: [[INDEX5:%.*]] = phi i32 [ 0, [[VECTOR_PH3]] ], [ [[INDEX_NEXT6:%.*]], [[VECTOR_BODY4]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX5]] to i8 +; CHECK-NEXT: br label [[VECTOR_BODY3:%.*]] +; CHECK: vector.body3: +; CHECK-NEXT: [[INDEX4:%.*]] = phi i32 [ 0, [[VECTOR_PH2]] ], [ [[INDEX_NEXT5:%.*]], [[VECTOR_BODY3]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX4]] to i8 ; CHECK-NEXT: [[TMP10:%.*]] = add i8 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[A]], i8 [[TMP10]] ; CHECK-NEXT: store <4 x i8> zeroinitializer, ptr [[TMP11]], align 1 -; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i32 [[INDEX5]], 4 -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT6]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK7:%.*]], label [[VECTOR_BODY4]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: middle.block7: +; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i32 [[INDEX4]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT5]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK6:%.*]], label [[VECTOR_BODY3]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: middle.block6: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH2]] -; CHECK: scalar.ph2: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP9]], [[MIDDLE_BLOCK7]] ], [ 0, [[EXIT_1]] ], [ 0, [[VECTOR_SCEVCHECK]] ] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_2:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ [[TMP9]], [[MIDDLE_BLOCK6]] ], [ 0, [[EXIT_1]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; CHECK-NEXT: br label [[LOOP_2:%.*]] ; CHECK: loop.2: -; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH2]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ] +; CHECK-NEXT: [[IV_2:%.*]] = phi i8 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ] ; CHECK-NEXT: [[IV_2_NEXT]] = add i8 [[IV_2]], 1 ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i8 [[IV_2_NEXT]] ; CHECK-NEXT: store i8 0, ptr [[GEP_A]], align 1 ; CHECK-NEXT: [[IV_2_SEXT:%.*]] = sext i8 [[IV_2]] to i32 -; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[L_LCSSA_LCSSA]], [[IV_2_SEXT]] +; CHECK-NEXT: [[EC_2:%.*]] = icmp sge i32 [[DOTLCSSA]], [[IV_2_SEXT]] ; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2]], label [[EXIT_2]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit.2: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll b/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll index d620b92115a60..92af82868ad1e 100644 --- a/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll +++ b/llvm/test/Transforms/LoopVectorize/select-neg-cond.ll @@ -20,21 +20,6 @@ define void @neg_cond(ptr noalias %p, ptr noalias %q) { ; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_GEP:%.*]] = getelementptr i32, ptr [[P]], i32 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[Q_GEP:%.*]] = getelementptr i32, ptr [[Q]], i32 [[IV]] -; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[Q_GEP]], align 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 42 -; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[CMP]], true -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[NOT]], i32 42, i32 43 -; CHECK-NEXT: store i32 [[SEL]], ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll b/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll index b87cf904c897c..f4d5a84fe67c8 100644 --- a/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll +++ b/llvm/test/Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll @@ -25,21 +25,8 @@ define i64 @pr62565_incoming_value_known_undef(i64 %a, ptr %src) { ; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 undef ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ undef, [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1 -; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]] -; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[PHI]] +; CHECK-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop @@ -83,21 +70,8 @@ define i64 @pr62565_incoming_value_known_poison(i64 %a, ptr %src) { ; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 poison ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ poison, [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1 -; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]] -; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[PHI]] +; CHECK-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop @@ -141,21 +115,8 @@ define i64 @pr62565_incoming_value_may_be_poison(i64 %a, ptr %src, i64 %start) { ; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[TMP4]] ; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP5]], i64 [[A]], i64 [[START]] ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 1, [[SCALAR_PH:%.*]] ], [ [[ADD:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[START]], [[SCALAR_PH]] ], [ [[SELECT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 1 -; CHECK-NEXT: [[SELECT]] = select i1 [[C]], i64 [[RED]], i64 [[A]] -; CHECK-NEXT: [[ADD]] = add nuw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 32 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[LOOP]] ], [ [[RDX_SELECT]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[PHI]] +; CHECK-NEXT: ret i64 [[RDX_SELECT]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction.ll b/llvm/test/Transforms/LoopVectorize/select-reduction.ll index 0fd780e7b44bc..1f5646d2a3090 100644 --- a/llvm/test/Transforms/LoopVectorize/select-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/select-reduction.ll @@ -36,22 +36,11 @@ define i32 @test(i64 %N, i32 %x) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP3]]) -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[EXTRA_ITER]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[SEL_COND:%.*]] = icmp sgt i32 [[NEXT]], 10 -; CHECK-NEXT: [[SEL]] = select i1 [[SEL_COND]], i32 [[NEXT]], i32 10 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: -; CHECK-NEXT: [[SEL_LCSSA:%.*]] = phi i32 [ [[SEL]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[CHECK]] ], [ [[SEL_LCSSA]], [[EXIT_LOOPEXIT]] ] +; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[CHECK]] ], [ [[TMP5]], [[LOOP]] ] ; CHECK-NEXT: ret i32 [[RESULT]] ; entry: @@ -90,19 +79,9 @@ define i32 @pr66895_tail_fold_reduction_exit_inst_gets_simplified(i32 %n) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_PHI]]) -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 12, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1 -; CHECK-NEXT: [[RED_NEXT]] = mul i32 [[RED]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 0 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i32 [[RED_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP3]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll index edad0b59cf9ae..794e274a2628c 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-cond-poison.ll @@ -40,20 +40,8 @@ define noundef i32 @f(i32 noundef %g) { ; VF4IC2-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32 ; VF4IC2-NEXT: [[TMP16:%.*]] = add i32 0, [[TMP15]] ; VF4IC2-NEXT: br label %[[RETURN]] -; VF4IC2: [[SCALAR_PH:.*]]: -; VF4IC2-NEXT: br label %[[LOOP_HEADER:.*]] -; VF4IC2: [[LOOP_HEADER]]: -; VF4IC2-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF4IC2-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IV]], 3 -; VF4IC2-NEXT: [[SHR:%.*]] = ashr i32 [[G]], [[MUL]] -; VF4IC2-NEXT: [[EARLY_COND:%.*]] = icmp eq i32 [[SHR]], 0 -; VF4IC2-NEXT: br i1 [[EARLY_COND]], label %[[LOOP_LATCH]], label %[[RETURN]] -; VF4IC2: [[LOOP_LATCH]]: -; VF4IC2-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; VF4IC2-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 8 -; VF4IC2-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; VF4IC2: [[RETURN]]: -; VF4IC2-NEXT: [[RES:%.*]] = phi i32 [ [[SHR]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VECTOR_EARLY_EXIT]] ] +; VF4IC2-NEXT: [[RES:%.*]] = phi i32 [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ [[TMP16]], %[[VECTOR_EARLY_EXIT]] ] ; VF4IC2-NEXT: ret i32 [[RES]] ; ; VF8IC1-LABEL: define noundef i32 @f( @@ -80,20 +68,8 @@ define noundef i32 @f(i32 noundef %g) { ; VF8IC1-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32 ; VF8IC1-NEXT: [[TMP7:%.*]] = add i32 0, [[TMP6]] ; VF8IC1-NEXT: br label %[[RETURN]] -; VF8IC1: [[SCALAR_PH:.*]]: -; VF8IC1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8IC1: [[LOOP_HEADER]]: -; VF8IC1-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8IC1-NEXT: [[MUL:%.*]] = shl nuw nsw i32 [[IV]], 3 -; VF8IC1-NEXT: [[SHR:%.*]] = ashr i32 [[G]], [[MUL]] -; VF8IC1-NEXT: [[EARLY_COND:%.*]] = icmp eq i32 [[SHR]], 0 -; VF8IC1-NEXT: br i1 [[EARLY_COND]], label %[[LOOP_LATCH]], label %[[RETURN]] -; VF8IC1: [[LOOP_LATCH]]: -; VF8IC1-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; VF8IC1-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 8 -; VF8IC1-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; VF8IC1: [[RETURN]]: -; VF8IC1-NEXT: [[RES:%.*]] = phi i32 [ [[SHR]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VECTOR_EARLY_EXIT]] ] +; VF8IC1-NEXT: [[RES:%.*]] = phi i32 [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[TMP7]], %[[VECTOR_EARLY_EXIT]] ] ; VF8IC1-NEXT: ret i32 [[RES]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll index b1b3a3feb007a..96206977864e2 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll @@ -9,9 +9,9 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] @@ -22,7 +22,7 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP3]]) ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024 ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] -; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_SPLIT]]: ; CHECK-NEXT: br i1 [[TMP5]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -31,22 +31,8 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_si ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP4]], i1 true) ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[TMP8]] ; CHECK-NEXT: br label %[[LOOP_END]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP1:.*]] -; CHECK: [[LOOP1]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END]] -; CHECK: [[LOOP_INC]]: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP1]], label %[[LOOP_END]] ; CHECK: [[LOOP_END]]: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], %[[LOOP1]] ], [ -1, %[[LOOP_INC]] ], [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP9]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -331,9 +317,9 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IV_NEXT1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] -; CHECK-NEXT: br label %[[LOOP_HEADER1:.*]] -; CHECK: [[LOOP_HEADER1]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[LOOP_HEADER1]] ] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[NEXT_GEP]], align 2 @@ -343,10 +329,10 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]]) ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]] -; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_SPLIT:.*]], label %[[LOOP_HEADER1]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_SPLIT]]: -; CHECK-NEXT: br i1 [[TMP7]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[LOOP_LATCH1:.*]] -; CHECK: [[LOOP_LATCH1]]: +; CHECK-NEXT: br i1 [[TMP7]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[VECTOR_EARLY_EXIT]]: @@ -356,10 +342,10 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP12]] ; CHECK-NEXT: br label %[[EXIT_LOOPEXIT]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[IV1:%.*]] = phi ptr [ [[IV_NEXT1]], %[[LOOP_LATCH1]] ], [ [[A]], %[[LOOP_HEADER_PREHEADER]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IV_NEXT1]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[LOOP_HEADER_PREHEADER]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[IV1]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[IV]], align 2 ; CHECK-NEXT: [[C_0:%.*]] = icmp eq i16 [[L]], 0 ; CHECK-NEXT: br i1 [[C_0]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_LATCH]] @@ -368,7 +354,7 @@ define i64 @early_exit_alignment_and_deref_known_via_assumption_n_not_zero_i16_p ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV_NEXT]], [[A_END]] ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[EXIT_LOOPEXIT]]: -; CHECK-NEXT: [[P_PH:%.*]] = phi ptr [ [[A_END]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[A_END]], %[[LOOP_LATCH1]] ], [ [[TMP13]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[P_PH:%.*]] = phi ptr [ [[A_END]], %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ [[A_END]], %[[MIDDLE_BLOCK]] ], [ [[TMP13]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[P:%.*]] = phi ptr [ [[A]], %[[ENTRY]] ], [ [[P_PH]], %[[EXIT_LOOPEXIT]] ] @@ -514,3 +500,50 @@ exit: %first.addr.0.lcssa.i = phi ptr [ %first, %entry ], [ %iv, %loop.header ], [ %iv.next, %loop.latch ] ret ptr %first.addr.0.lcssa.i } + +define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context(ptr noalias %p1, ptr noalias %p2) nosync { +; CHECK-LABEL: define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context( +; CHECK-SAME: ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P1]], i64 4), "dereferenceable"(ptr [[P1]], i64 1024) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]] +; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] +; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 +; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] +; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END:.*]] +; CHECK: [[LOOP_INC]]: +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[LOOP_END]] +; CHECK: [[LOOP_END]]: +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX1]], %[[LOOP]] ], [ -1, %[[LOOP_INC]] ] +; CHECK-NEXT: ret i64 [[RETVAL]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %p1, i64 4), "dereferenceable"(ptr %p1, i64 1024) ] + call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 4), "dereferenceable"(ptr %p2, i64 1024) ] + br label %loop + +loop: + %index = phi i64 [ %index.next, %loop.inc ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index + %ld1 = load i8, ptr %arrayidx, align 1 + %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index + %ld2 = load i8, ptr %arrayidx1, align 1 + %cmp3 = icmp eq i8 %ld1, %ld2 + br i1 %cmp3, label %loop.inc, label %loop.end + +loop.inc: + %index.next = add i64 %index, 1 + %exitcond = icmp ne i64 %index.next, 1024 + br i1 %exitcond, label %loop, label %loop.end + +loop.end: + %retval = phi i64 [ %index, %loop ], [ -1, %loop.inc ] + ret i64 %retval +} diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll index b630557eb2cfe..d8e62c7b3b8d4 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-hint.ll @@ -43,24 +43,10 @@ define i64 @multi_exiting_to_different_exits_live_in_exit_values() { ; VF4IC4-NEXT: br label %[[E2:.*]] ; VF4IC4: [[VECTOR_EARLY_EXIT]]: ; VF4IC4-NEXT: br label %[[E1:.*]] -; VF4IC4: [[SCALAR_PH:.*]]: -; VF4IC4-NEXT: br label %[[LOOP_HEADER:.*]] -; VF4IC4: [[LOOP_HEADER]]: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[INC:%.*]], %[[LOOP_LATCH:.*]] ], [ 0, %[[SCALAR_PH]] ] -; VF4IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; VF4IC4-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; VF4IC4-NEXT: [[C_1:%.*]] = icmp eq i32 [[L]], 10 -; VF4IC4-NEXT: br i1 [[C_1]], label %[[E1]], label %[[LOOP_LATCH]] -; VF4IC4: [[LOOP_LATCH]]: -; VF4IC4-NEXT: [[INC]] = add nuw i64 [[IV]], 1 -; VF4IC4-NEXT: [[C_2:%.*]] = icmp eq i64 [[INC]], 128 -; VF4IC4-NEXT: br i1 [[C_2]], label %[[E2]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] ; VF4IC4: [[E1]]: -; VF4IC4-NEXT: [[P1:%.*]] = phi i64 [ 0, %[[LOOP_HEADER]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[P1]] +; VF4IC4-NEXT: ret i64 0 ; VF4IC4: [[E2]]: -; VF4IC4-NEXT: [[P2:%.*]] = phi i64 [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[P2]] +; VF4IC4-NEXT: ret i64 1 ; entry: %src = alloca [128 x i32] @@ -94,6 +80,4 @@ e2: ; VF4IC4: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; VF4IC4: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; VF4IC4: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; VF4IC4: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} -; VF4IC4: [[META4]] = !{!"llvm.loop.interleave.count", i32 4} ;. diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll index 6836f7b90ad19..a50ce969da7f4 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave-only.ll @@ -46,21 +46,9 @@ define i8 @iv_used_in_exit_with_math(i8 noundef %g) { ; CHECK-NEXT: [[TMP20:%.*]] = trunc i32 [[TMP19]] to i8 ; CHECK-NEXT: [[TMP23:%.*]] = trunc i32 [[TMP19]] to i8 ; CHECK-NEXT: br label %[[RETURN]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[S:%.*]] = shl nuw i8 1, [[IV]] -; CHECK-NEXT: [[A:%.*]] = and i8 [[S]], [[G]] -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[RETURN]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i8 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RES_IV1:%.*]] = phi i8 [ 32, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP20]], %[[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: [[RES_IV2:%.*]] = phi i8 [ 0, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP23]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV1:%.*]] = phi i8 [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP20]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV2:%.*]] = phi i8 [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP23]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: [[RES:%.*]] = add i8 [[RES_IV1]], [[RES_IV2]] ; CHECK-NEXT: ret i8 [[RES]] ; @@ -125,21 +113,9 @@ define i32 @iv_used_in_exit_with_loads(ptr align 4 dereferenceable(128) %src) { ; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[TMP27]] to i32 ; CHECK-NEXT: [[TMP29:%.*]] = add i32 [[INDEX]], [[TMP28]] ; CHECK-NEXT: br label %[[RETURN]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP]], align 4 -; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[RETURN]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 32 -; CHECK-NEXT: br i1 [[EC]], label %[[RETURN]], label %[[LOOP_HEADER]] ; CHECK: [[RETURN]]: -; CHECK-NEXT: [[RES_IV1:%.*]] = phi i32 [ 32, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: [[RES_IV2:%.*]] = phi i32 [ 0, %[[LOOP_LATCH]] ], [ [[IV]], %[[LOOP_HEADER]] ], [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV1:%.*]] = phi i32 [ 32, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RES_IV2:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ [[TMP29]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_IV1]], [[RES_IV2]] ; CHECK-NEXT: ret i32 [[RES]] ; diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll index a4ce68f0453ae..ed5dcc78eeb78 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-interleave.ll @@ -42,25 +42,11 @@ define i64 @multi_exiting_to_different_exits_live_in_exit_values() { ; VF4IC4: middle.block: ; VF4IC4-NEXT: br label [[E2:%.*]] ; VF4IC4: vector.early.exit: -; VF4IC4-NEXT: br label [[E1:%.*]] -; VF4IC4: scalar.ph: ; VF4IC4-NEXT: br label [[LOOP_HEADER:%.*]] -; VF4IC4: loop.header: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[INC:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] -; VF4IC4-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 -; VF4IC4-NEXT: [[C_1:%.*]] = icmp eq i32 [[L]], 10 -; VF4IC4-NEXT: br i1 [[C_1]], label [[E1]], label [[LOOP_LATCH]] -; VF4IC4: loop.latch: -; VF4IC4-NEXT: [[INC]] = add nuw i64 [[IV]], 1 -; VF4IC4-NEXT: [[C_2:%.*]] = icmp eq i64 [[INC]], 128 -; VF4IC4-NEXT: br i1 [[C_2]], label [[E2]], label [[LOOP_HEADER]] ; VF4IC4: e1: -; VF4IC4-NEXT: [[P1:%.*]] = phi i64 [ 0, [[LOOP_HEADER]] ], [ 0, [[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[P1]] +; VF4IC4-NEXT: ret i64 0 ; VF4IC4: e2: -; VF4IC4-NEXT: [[P2:%.*]] = phi i64 [ 1, [[LOOP_LATCH]] ], [ 1, [[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[P2]] +; VF4IC4-NEXT: ret i64 1 ; entry: %src = alloca [128 x i32] @@ -155,22 +141,8 @@ define i64 @same_exit_block_pre_inc_use1() { ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret i64 [[RETVAL]] ; entry: @@ -256,19 +228,8 @@ define ptr @same_exit_block_pre_inc_use1_ivptr() { ; VF4IC4-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], [[TMP6]] ; VF4IC4-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP7]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72 -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]] -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret ptr [[RETVAL]] ; entry: @@ -360,22 +321,8 @@ define i64 @same_exit_block_post_inc_use() { ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[IV_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret i64 [[RETVAL]] ; entry: @@ -470,27 +417,11 @@ define i64 @diff_exit_block_pre_inc_use1() { ; VF4IC4-NEXT: [[TMP8:%.*]] = select i1 [[TMP32]], i64 [[TMP31]], i64 [[TMP29]] ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] -; VF4IC4-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; VF4IC4: scalar.ph: ; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.early.exit: -; VF4IC4-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL1]] +; VF4IC4-NEXT: ret i64 [[TMP10]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL2]] +; VF4IC4-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -588,27 +519,11 @@ define i64 @diff_exit_block_post_inc_use1() { ; VF4IC4-NEXT: [[TMP8:%.*]] = select i1 [[TMP32]], i64 [[TMP31]], i64 [[TMP29]] ; VF4IC4-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], [[TMP8]] ; VF4IC4-NEXT: [[TMP10:%.*]] = add i64 3, [[TMP9]] -; VF4IC4-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; VF4IC4: scalar.ph: ; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], 67 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; VF4IC4: loop.early.exit: -; VF4IC4-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[IV]], [[LOOP]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL1]] +; VF4IC4-NEXT: ret i64 [[TMP10]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[IV_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; VF4IC4-NEXT: ret i64 [[RETVAL2]] +; VF4IC4-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -847,22 +762,8 @@ define i8 @same_exit_block_use_loaded_value() { ; VF4IC4-NEXT: [[TMP41:%.*]] = icmp uge i64 [[TMP8]], 12 ; VF4IC4-NEXT: [[TMP42:%.*]] = select i1 [[TMP41]], i8 [[TMP40]], i8 [[TMP38]] ; VF4IC4-NEXT: br label [[LOOP_END]] -; VF4IC4: scalar.ph: -; VF4IC4-NEXT: br label [[LOOP:%.*]] -; VF4IC4: loop: -; VF4IC4-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 0, [[SCALAR_PH:%.*]] ] -; VF4IC4-NEXT: [[GEP_P1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[IV]] -; VF4IC4-NEXT: [[LD1:%.*]] = load i8, ptr [[GEP_P1]], align 1 -; VF4IC4-NEXT: [[GEP_P2:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[IV]] -; VF4IC4-NEXT: [[LD2:%.*]] = load i8, ptr [[GEP_P2]], align 1 -; VF4IC4-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; VF4IC4-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; VF4IC4: loop.inc: -; VF4IC4-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF4IC4-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; VF4IC4-NEXT: br i1 [[EXITCOND]], label [[LOOP_END]], label [[LOOP]] ; VF4IC4: loop.end: -; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i8 [ [[LD1]], [[LOOP]] ], [ -1, [[LOOP_INC]] ], [ -1, [[MIDDLE_BLOCK]] ], [ [[TMP42]], [[VECTOR_EARLY_EXIT]] ] +; VF4IC4-NEXT: [[RETVAL:%.*]] = phi i8 [ -1, [[MIDDLE_BLOCK]] ], [ [[TMP42]], [[VECTOR_EARLY_EXIT]] ] ; VF4IC4-NEXT: ret i8 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll b/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll index 219c66f7a68a4..3bb39b95235ed 100644 --- a/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/single-value-blend-phis.ll @@ -29,28 +29,7 @@ define void @single_incoming_phi_no_blend_mask(i64 %a, i64 %b) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: br label [[LOOP_COND:%.*]] -; CHECK: loop.cond: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]] -; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -112,29 +91,7 @@ define void @single_incoming_phi_with_blend_mask(i64 %a, i64 %b) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_COND:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.cond: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ 0, [[LOOP_HEADER]] ], [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]] -; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -201,26 +158,7 @@ define void @multiple_incoming_phi_with_blend_mask(i64 %a, ptr noalias %dst) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[IV_TRUNC_2:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ], [ [[IV_TRUNC_2]], [[LOOP_NEXT]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i16 [[LV]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -297,29 +235,7 @@ define void @single_incoming_needs_predication(i64 %a, i64 %b) { ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: [[CMP_A:%.*]] = icmp ugt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_A]], label [[LOOP_COND:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.cond: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ [[IV_TRUNC]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @src, i16 0, i16 [[BLEND]] -; CHECK-NEXT: [[LV:%.*]] = load i16, ptr [[SRC_PTR]], align 1 -; CHECK-NEXT: [[CMP_B:%.*]] = icmp sgt i64 [[IV]], [[A]] -; CHECK-NEXT: br i1 [[CMP_B]], label [[LOOP_NEXT:%.*]], label [[LOOP_LATCH]] -; CHECK: loop.next: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[RES:%.*]] = phi i16 [ 0, [[LOOP_HEADER]] ], [ [[LV]], [[LOOP_COND]] ], [ 1, [[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[IV]] -; CHECK-NEXT: store i16 [[RES]], ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 63 -; CHECK-NEXT: br i1 [[CMP439]], label [[LOOP_HEADER]], label [[EXIT]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -371,20 +287,7 @@ define void @duplicated_incoming_blocks_blend(i32 %x, ptr %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[ADD_I:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[C_0:%.*]] = icmp ugt i32 [[IV]], [[X:%.*]] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[IV]], [[LOOP_HEADER]] ], [ [[IV]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_PTR:%.*]] = getelementptr i32, ptr [[PTR]], i32 [[P]] -; CHECK-NEXT: store i32 [[P]], ptr [[GEP_PTR]], align 4 -; CHECK-NEXT: [[ADD_I]] = add nsw i32 [[P]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD_I]], 1000 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_HEADER]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll index 04f04a8a08fc2..3500c5c9d81cd 100644 --- a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll +++ b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll @@ -34,22 +34,8 @@ define i64 @same_exit_block_phi_of_consts() { ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 0, [[LOOP]] ], [ 1, [[LOOP_INC]] ], [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 1, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -108,21 +94,7 @@ define i64 @diff_exit_block_phi_of_consts() { ; CHECK: middle.block: ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: ; CHECK-NEXT: ret i64 0 ; CHECK: loop.end: @@ -292,16 +264,7 @@ define i32 @diff_blocks_invariant_early_exit_cond(ptr %s) { ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: vector.early.exit: -; CHECK-NEXT: br label [[EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IND:%.*]] = phi i32 [ -10, [[SCALAR_PH:%.*]] ], [ [[IND_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: br i1 [[COND]], label [[FOR_INC]], label [[EARLY_EXIT]] -; CHECK: for.inc: -; CHECK-NEXT: [[IND_NEXT]] = add nsw i32 [[IND]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IND_NEXT]], 266 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]] ; CHECK: early.exit: ; CHECK-NEXT: tail call void @abort() ; CHECK-NEXT: unreachable diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll index 54408b24db114..79821b8be1734 100644 --- a/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll +++ b/llvm/test/Transforms/LoopVectorize/single_early_exit_live_outs.ll @@ -36,22 +36,8 @@ define i64 @same_exit_block_pre_inc_use1() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -116,24 +102,8 @@ define i32 @same_exit_block_pre_inc_use1_iv64_endi32_step2() { ; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[DOTCAST]], 2 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i32 9, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi i32 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = add i32 [[INDEX2]], 2 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL]] ; entry: @@ -197,23 +167,8 @@ define i32 @same_exit_block_pre_inc_use1_iv128_endi32_step2() { ; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[DOTCAST]], 2 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i32 9, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i128 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi i32 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9, [[SCALAR_PH]] ] -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC]] ], [ [[P1]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 3 -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i128 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = add i32 [[INDEX2]], 2 -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i128 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ [[INDEX2]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL]] ; entry: @@ -277,24 +232,8 @@ define float @same_exit_block_pre_inc_use1_iv64_endf32() { ; CHECK-NEXT: [[TMP11:%.*]] = fmul fast float 1.000000e+00, [[DOTCAST]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = fadd fast float 9.000000e+00, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi float [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ 9.000000e+00, [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = fadd fast float [[INDEX2]], 1.000000e+00 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi float [ [[INDEX2]], [[LOOP]] ], [ 1.230000e+02, [[LOOP_INC]] ], [ 1.230000e+02, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi float [ 1.230000e+02, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret float [[RETVAL]] ; entry: @@ -360,24 +299,8 @@ define ptr @same_exit_block_pre_inc_use1_iv64_endptr() { ; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 5 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP20]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi ptr [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ [[P2]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = getelementptr i8, ptr [[INDEX2]], i64 5 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[INDEX2]], [[LOOP]] ], [ [[P1]], [[LOOP_INC]] ], [ [[P1]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[P1]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret ptr [[RETVAL]] ; entry: @@ -438,19 +361,8 @@ define ptr @same_exit_block_pre_inc_use1_ivptr() { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP8]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72 -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret ptr [[RETVAL]] ; entry: @@ -512,23 +424,8 @@ define i64 @same_exit_block_pre_inc1_use_inv_cond(i1 %cond) { ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: [[CMP4:%.*]] = select i1 [[COND]], i1 [[CMP3]], i1 false -; CHECK-NEXT: br i1 [[CMP4]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -592,22 +489,8 @@ define i64 @same_exit_block_pre_inc_use1_gep_two_indices() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P1]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P2]], i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -670,22 +553,8 @@ define i64 @same_exit_block_pre_inc_use1_alloca_diff_type() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -745,22 +614,8 @@ define i64 @same_exit_block_pre_inc_use2() { ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -823,22 +678,8 @@ define i64 @same_exit_block_pre_inc_use3() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[INDEX]], [[LOOP]] ], [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[INDEX_LCSSA]] ; entry: @@ -902,20 +743,8 @@ define i64 @same_exit_block_pre_inc_use4() { ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP8]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -976,22 +805,8 @@ define i64 @same_exit_block_post_inc_use() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1051,19 +866,8 @@ define ptr @same_exit_block_post_inc_use1_ivptr() { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 1 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[PTR_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[P1]], [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[PTR]], align 1 -; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], 72 -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne ptr [[PTR_NEXT]], [[PTREND]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTR_NEXT]], [[LOOP]] ], [ [[PTREND]], [[LOOP_INC]] ], [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi ptr [ [[PTREND]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret ptr [[RETVAL]] ; entry: @@ -1123,22 +927,8 @@ define i64 @same_exit_block_post_inc_use2() { ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 1 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 66, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1200,27 +990,11 @@ define i64 @diff_exit_block_pre_inc_use1() { ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true) ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -1282,27 +1056,11 @@ define i64 @diff_exit_block_pre_inc_use2() { ; CHECK: middle.block: ; CHECK-NEXT: br label [[LOOP_END:%.*]] ; CHECK: vector.early.exit: -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ 67, [[LOOP]] ], [ 67, [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 67 ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 66 ; entry: %p1 = alloca [1024 x i8] @@ -1367,27 +1125,11 @@ define i64 @diff_exit_block_pre_inc_use3() { ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true) ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX2]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[INDEX_LCSSA]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[INDEX_LCSSA1:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[INDEX_LCSSA1]] +; CHECK-NEXT: ret i64 66 ; entry: %p1 = alloca [1024 x i8] @@ -1450,27 +1192,11 @@ define i64 @diff_exit_block_post_inc_use1() { ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP13]], i1 true) ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 67 ; entry: %p1 = alloca [1024 x i8] @@ -1536,27 +1262,11 @@ define i64 @diff_exit_block_post_inc_use2() { ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 1 ; CHECK-NEXT: [[TMP21:%.*]] = add i64 3, [[TMP11]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[TMP21]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[TMP21]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ 66, [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 66 ; entry: %p1 = alloca [1024 x i8] @@ -1624,29 +1334,11 @@ define i64 @diff_exit_block_post_inc_use3(i64 %start) { ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 1 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 [[START]], [[TMP12]] -; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[INDEX2_NEXT:%.*]], [[LOOP_INC]] ], [ [[START]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[INDEX2_NEXT]] = add i64 [[INDEX2]], 1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]] -; CHECK: loop.inc: -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.early.exit: -; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX2_NEXT]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] -; CHECK-NEXT: ret i64 [[RETVAL1]] +; CHECK-NEXT: ret i64 [[EARLY_EXIT_VALUE]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX2]], [[LOOP_INC]] ], [ [[IND_ESCAPE]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RETVAL2]] +; CHECK-NEXT: ret i64 [[IND_ESCAPE]] ; entry: %p1 = alloca [1024 x i8] @@ -1713,21 +1405,8 @@ define i64 @loop_contains_safe_call() { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1788,21 +1467,8 @@ define i64 @loop_contains_safe_div() { ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP9]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[LD1]], 20000 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[DIV]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -1864,22 +1530,8 @@ define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align( ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP11]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1 -; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: @@ -2071,22 +1723,8 @@ define i64 @same_exit_block_pre_inc_use1_deref_ptrs(ptr dereferenceable(1024) %p ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], [[FIRST_ACTIVE_LANE]] ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = add i64 3, [[TMP10]] ; CHECK-NEXT: br label [[LOOP_END]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ 3, [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]] -; CHECK: loop.inc: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]] ; CHECK: loop.end: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll b/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll index 9c14a8c08618f..1e4598e756645 100644 --- a/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll @@ -23,18 +23,16 @@ define i16 @test(ptr %arg, i64 %N) { ; CHECK-NEXT: [[C_3:%.*]] = call i1 @cond() ; CHECK-NEXT: br i1 [[C_3]], label [[LOOP_3_PREHEADER:%.*]], label [[INNER_LATCH:%.*]] ; CHECK: loop.3.preheader: -; CHECK-NEXT: [[L_1_LCSSA:%.*]] = phi ptr [ [[L_1]], [[INNER_BB]] ] -; CHECK-NEXT: [[L_2_LCSSA:%.*]] = phi ptr [ [[L_2]], [[INNER_BB]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[L_2_LCSSA]], i64 2 -; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[L_1_LCSSA]], i64 2 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[L_2]], i64 2 +; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[L_1]], i64 2 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[N]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4 -; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[L_1_LCSSA]], i64 [[TMP2]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[L_2_LCSSA]], [[SCEVGEP6]] +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[L_1]], i64 [[TMP2]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[L_2]], [[SCEVGEP3]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP5]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] @@ -67,19 +65,17 @@ define i16 @test(ptr %arg, i64 %N) { ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_3]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[C_5:%.*]] = icmp ult i64 [[IV]], [[N]] -; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i16, ptr [[L_1_LCSSA]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i16, ptr [[L_1]], i64 [[IV_NEXT]] ; CHECK-NEXT: [[LOOP_L_1:%.*]] = load i16, ptr [[GEP_1]], align 2 -; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i16, ptr [[L_2_LCSSA]], i64 0 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i16, ptr [[L_2]], i64 0 ; CHECK-NEXT: store i16 [[LOOP_L_1]], ptr [[GEP_2]], align 2 ; CHECK-NEXT: br i1 [[C_5]], label [[LOOP_3]], label [[EXIT_LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: exit.loopexit1: -; CHECK-NEXT: [[L_1_LCSSA3:%.*]] = phi ptr [ [[L_1]], [[INNER_LATCH]] ] ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[L_14:%.*]] = phi ptr [ [[L_1_LCSSA3]], [[EXIT_LOOPEXIT1]] ], [ [[L_1_LCSSA]], [[EXIT_LOOPEXIT]] ] -; CHECK-NEXT: [[L_3:%.*]] = load i16, ptr [[L_14]], align 2 +; CHECK-NEXT: [[L_3:%.*]] = load i16, ptr [[L_1]], align 2 ; CHECK-NEXT: ret i16 [[L_3]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll index 66300ed6024c6..19ab96dd822b6 100644 --- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll @@ -41,18 +41,7 @@ define void @pr75298_store_reduction_value_in_folded_loop(i64 %iv.start) optsize ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP4]]) ; CHECK-NEXT: store i32 [[TMP6]], ptr @a, align 4 -; CHECK-NEXT: br label [[EXIT_LOOPEXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_START]], [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr @c, align 4 -; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], [[L]] -; CHECK-NEXT: store i32 [[RED_NEXT]], ptr @a, align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 7 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT_LOOPEXIT]], label [[LOOP]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll index 7027d857fd040..ca32808bc482a 100644 --- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll @@ -23,19 +23,9 @@ define float @pr70988() { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT3]], 1022 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021 -; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTLCSSA]] +; CHECK-NEXT: ret float [[TMP5]] ; ; CHECK-ALM-LABEL: define float @pr70988() { ; CHECK-ALM-NEXT: entry: @@ -56,19 +46,9 @@ define float @pr70988() { ; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT3]], 1022 ; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-ALM: middle.block: -; CHECK-ALM-NEXT: br label [[EXIT:%.*]] -; CHECK-ALM: scalar.ph: ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] -; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00 -; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1 -; CHECK-ALM-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021 -; CHECK-ALM-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT]] ; CHECK-ALM: exit: -; CHECK-ALM-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-ALM-NEXT: ret float [[DOTLCSSA]] +; CHECK-ALM-NEXT: ret float [[TMP5]] ; entry: br label %loop @@ -123,21 +103,9 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[NARROW:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], [[L]] -; CHECK-NEXT: [[EC:%.*]] = icmp ult i32 [[NARROW]], 15 -; CHECK-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[DOTLCSSA]] +; CHECK-NEXT: ret float [[TMP13]] ; ; CHECK-ALM-LABEL: define float @pr72720reduction_using_active_lane_mask( ; CHECK-ALM-SAME: ptr [[SRC:%.*]]) { @@ -173,21 +141,9 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) { ; CHECK-ALM-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 16 ; CHECK-ALM-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-ALM: middle.block: -; CHECK-ALM-NEXT: br label [[EXIT:%.*]] -; CHECK-ALM: scalar.ph: ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] -; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[NARROW:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1 -; CHECK-ALM-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]] -; CHECK-ALM-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4 -; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], [[L]] -; CHECK-ALM-NEXT: [[EC:%.*]] = icmp ult i32 [[NARROW]], 15 -; CHECK-ALM-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT]] ; CHECK-ALM: exit: -; CHECK-ALM-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[RDX_NEXT]], [[LOOP]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] -; CHECK-ALM-NEXT: ret float [[DOTLCSSA]] +; CHECK-ALM-NEXT: ret float [[TMP11]] ; entry: br label %loop @@ -229,19 +185,9 @@ define float @fadd_reduction_with_live_in(float %inc) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: -; CHECK-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret float [[LCSSA]] +; CHECK-NEXT: ret float [[TMP5]] ; ; CHECK-ALM-LABEL: define float @fadd_reduction_with_live_in( ; CHECK-ALM-SAME: float [[INC:%.*]]) { @@ -263,19 +209,9 @@ define float @fadd_reduction_with_live_in(float %inc) { ; CHECK-ALM-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1002 ; CHECK-ALM-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-ALM: middle.block: -; CHECK-ALM-NEXT: br label [[EXIT:%.*]] -; CHECK-ALM: scalar.ph: ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] -; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] -; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 -; CHECK-ALM-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK-ALM: exit: -; CHECK-ALM-NEXT: [[LCSSA:%.*]] = phi float [ [[SUM_NEXT]], [[LOOP]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-ALM-NEXT: ret float [[LCSSA]] +; CHECK-ALM-NEXT: ret float [[TMP5]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll index 97f686c9c025a..dcab18fd93ed2 100644 --- a/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/strided-accesses-interleave-only.ll @@ -22,16 +22,6 @@ define void @test_variable_stride(ptr %dst, i32 %scale) { ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[SCALE]] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[DST]], i32 [[IDX]] -; CHECK-NEXT: store i32 [[IV]], ptr [[GEP]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/struct-return-replicate.ll b/llvm/test/Transforms/LoopVectorize/struct-return-replicate.ll index 5c622f825beaf..99916a503750a 100644 --- a/llvm/test/Transforms/LoopVectorize/struct-return-replicate.ll +++ b/llvm/test/Transforms/LoopVectorize/struct-return-replicate.ll @@ -453,6 +453,221 @@ exit: ret void } +define void @struct_return_2xf32_replicate_predicated(ptr %a) { +; VF4-LABEL: define void @struct_return_2xf32_replicate_predicated( +; VF4-SAME: ptr [[A:%.*]]) { +; VF4-NEXT: [[ENTRY:.*:]] +; VF4-NEXT: br label %[[VECTOR_PH:.*]] +; VF4: [[VECTOR_PH]]: +; VF4-NEXT: br label %[[VECTOR_BODY:.*]] +; VF4: [[VECTOR_BODY]]: +; VF4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE12:.*]] ] +; VF4-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; VF4-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 8 +; VF4-NEXT: [[TMP1:%.*]] = fcmp ogt <4 x float> [[WIDE_LOAD]], zeroinitializer +; VF4-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0 +; VF4-NEXT: br i1 [[TMP2]], label %[[PRED_CALL_IF:.*]], label %[[PRED_CALL_CONTINUE:.*]] +; VF4: [[PRED_CALL_IF]]: +; VF4-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[WIDE_LOAD]], i32 0 +; VF4-NEXT: [[TMP4:%.*]] = tail call { float, float } @fn2(float [[TMP3]]) #[[ATTR3:[0-9]+]] +; VF4-NEXT: [[TMP5:%.*]] = extractvalue { float, float } [[TMP4]], 0 +; VF4-NEXT: [[TMP6:%.*]] = insertelement <4 x float> poison, float [[TMP5]], i32 0 +; VF4-NEXT: [[TMP7:%.*]] = insertvalue { <4 x float>, <4 x float> } poison, <4 x float> [[TMP6]], 0 +; VF4-NEXT: [[TMP8:%.*]] = extractvalue { float, float } [[TMP4]], 1 +; VF4-NEXT: [[TMP9:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP7]], 1 +; VF4-NEXT: [[TMP10:%.*]] = insertelement <4 x float> [[TMP9]], float [[TMP8]], i32 0 +; VF4-NEXT: [[TMP11:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP7]], <4 x float> [[TMP10]], 1 +; VF4-NEXT: br label %[[PRED_CALL_CONTINUE]] +; VF4: [[PRED_CALL_CONTINUE]]: +; VF4-NEXT: [[TMP12:%.*]] = phi { <4 x float>, <4 x float> } [ poison, %[[VECTOR_BODY]] ], [ [[TMP7]], %[[PRED_CALL_IF]] ] +; VF4-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1 +; VF4-NEXT: br i1 [[TMP13]], label %[[PRED_CALL_IF1:.*]], label %[[PRED_CALL_CONTINUE2:.*]] +; VF4: [[PRED_CALL_IF1]]: +; VF4-NEXT: [[TMP14:%.*]] = extractelement <4 x float> [[WIDE_LOAD]], i32 1 +; VF4-NEXT: [[TMP15:%.*]] = tail call { float, float } @fn2(float [[TMP14]]) #[[ATTR3]] +; VF4-NEXT: [[TMP16:%.*]] = extractvalue { float, float } [[TMP15]], 0 +; VF4-NEXT: [[TMP17:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP12]], 0 +; VF4-NEXT: [[TMP18:%.*]] = insertelement <4 x float> [[TMP17]], float [[TMP16]], i32 1 +; VF4-NEXT: [[TMP19:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP12]], <4 x float> [[TMP18]], 0 +; VF4-NEXT: [[TMP20:%.*]] = extractvalue { float, float } [[TMP15]], 1 +; VF4-NEXT: [[TMP21:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP19]], 1 +; VF4-NEXT: [[TMP22:%.*]] = insertelement <4 x float> [[TMP21]], float [[TMP20]], i32 1 +; VF4-NEXT: [[TMP23:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP19]], <4 x float> [[TMP22]], 1 +; VF4-NEXT: br label %[[PRED_CALL_CONTINUE2]] +; VF4: [[PRED_CALL_CONTINUE2]]: +; VF4-NEXT: [[TMP24:%.*]] = phi { <4 x float>, <4 x float> } [ [[TMP12]], %[[PRED_CALL_CONTINUE]] ], [ [[TMP19]], %[[PRED_CALL_IF1]] ] +; VF4-NEXT: [[TMP25:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2 +; VF4-NEXT: br i1 [[TMP25]], label %[[PRED_CALL_IF3:.*]], label %[[PRED_CALL_CONTINUE4:.*]] +; VF4: [[PRED_CALL_IF3]]: +; VF4-NEXT: [[TMP26:%.*]] = extractelement <4 x float> [[WIDE_LOAD]], i32 2 +; VF4-NEXT: [[TMP27:%.*]] = tail call { float, float } @fn2(float [[TMP26]]) #[[ATTR3]] +; VF4-NEXT: [[TMP28:%.*]] = extractvalue { float, float } [[TMP27]], 0 +; VF4-NEXT: [[TMP29:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP24]], 0 +; VF4-NEXT: [[TMP30:%.*]] = insertelement <4 x float> [[TMP29]], float [[TMP28]], i32 2 +; VF4-NEXT: [[TMP31:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP24]], <4 x float> [[TMP30]], 0 +; VF4-NEXT: [[TMP32:%.*]] = extractvalue { float, float } [[TMP27]], 1 +; VF4-NEXT: [[TMP33:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP31]], 1 +; VF4-NEXT: [[TMP34:%.*]] = insertelement <4 x float> [[TMP33]], float [[TMP32]], i32 2 +; VF4-NEXT: [[TMP35:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP31]], <4 x float> [[TMP34]], 1 +; VF4-NEXT: br label %[[PRED_CALL_CONTINUE4]] +; VF4: [[PRED_CALL_CONTINUE4]]: +; VF4-NEXT: [[TMP36:%.*]] = phi { <4 x float>, <4 x float> } [ [[TMP24]], %[[PRED_CALL_CONTINUE2]] ], [ [[TMP31]], %[[PRED_CALL_IF3]] ] +; VF4-NEXT: [[TMP37:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3 +; VF4-NEXT: br i1 [[TMP37]], label %[[PRED_CALL_IF5:.*]], label %[[PRED_CALL_CONTINUE6:.*]] +; VF4: [[PRED_CALL_IF5]]: +; VF4-NEXT: [[TMP38:%.*]] = extractelement <4 x float> [[WIDE_LOAD]], i32 3 +; VF4-NEXT: [[TMP39:%.*]] = tail call { float, float } @fn2(float [[TMP38]]) #[[ATTR3]] +; VF4-NEXT: [[TMP40:%.*]] = extractvalue { float, float } [[TMP39]], 0 +; VF4-NEXT: [[TMP41:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP36]], 0 +; VF4-NEXT: [[TMP42:%.*]] = insertelement <4 x float> [[TMP41]], float [[TMP40]], i32 3 +; VF4-NEXT: [[TMP43:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP36]], <4 x float> [[TMP42]], 0 +; VF4-NEXT: [[TMP44:%.*]] = extractvalue { float, float } [[TMP39]], 1 +; VF4-NEXT: [[TMP45:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP43]], 1 +; VF4-NEXT: [[TMP46:%.*]] = insertelement <4 x float> [[TMP45]], float [[TMP44]], i32 3 +; VF4-NEXT: [[TMP47:%.*]] = insertvalue { <4 x float>, <4 x float> } [[TMP43]], <4 x float> [[TMP46]], 1 +; VF4-NEXT: br label %[[PRED_CALL_CONTINUE6]] +; VF4: [[PRED_CALL_CONTINUE6]]: +; VF4-NEXT: [[TMP48:%.*]] = phi { <4 x float>, <4 x float> } [ [[TMP36]], %[[PRED_CALL_CONTINUE4]] ], [ [[TMP43]], %[[PRED_CALL_IF5]] ] +; VF4-NEXT: [[TMP49:%.*]] = extractvalue { <4 x float>, <4 x float> } [[TMP48]], 0 +; VF4-NEXT: [[TMP50:%.*]] = fdiv <4 x float> [[TMP49]], [[WIDE_LOAD]] +; VF4-NEXT: [[TMP51:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0 +; VF4-NEXT: br i1 [[TMP51]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF4: [[PRED_STORE_IF]]: +; VF4-NEXT: [[TMP52:%.*]] = add i64 [[INDEX]], 0 +; VF4-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP52]] +; VF4-NEXT: [[TMP54:%.*]] = extractelement <4 x float> [[TMP50]], i32 0 +; VF4-NEXT: store float [[TMP54]], ptr [[TMP53]], align 8 +; VF4-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF4: [[PRED_STORE_CONTINUE]]: +; VF4-NEXT: [[TMP55:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1 +; VF4-NEXT: br i1 [[TMP55]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]] +; VF4: [[PRED_STORE_IF7]]: +; VF4-NEXT: [[TMP56:%.*]] = add i64 [[INDEX]], 1 +; VF4-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP56]] +; VF4-NEXT: [[TMP58:%.*]] = extractelement <4 x float> [[TMP50]], i32 1 +; VF4-NEXT: store float [[TMP58]], ptr [[TMP57]], align 8 +; VF4-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; VF4: [[PRED_STORE_CONTINUE8]]: +; VF4-NEXT: [[TMP59:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2 +; VF4-NEXT: br i1 [[TMP59]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]] +; VF4: [[PRED_STORE_IF9]]: +; VF4-NEXT: [[TMP60:%.*]] = add i64 [[INDEX]], 2 +; VF4-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP60]] +; VF4-NEXT: [[TMP62:%.*]] = extractelement <4 x float> [[TMP50]], i32 2 +; VF4-NEXT: store float [[TMP62]], ptr [[TMP61]], align 8 +; VF4-NEXT: br label %[[PRED_STORE_CONTINUE10]] +; VF4: [[PRED_STORE_CONTINUE10]]: +; VF4-NEXT: [[TMP63:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3 +; VF4-NEXT: br i1 [[TMP63]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12]] +; VF4: [[PRED_STORE_IF11]]: +; VF4-NEXT: [[TMP64:%.*]] = add i64 [[INDEX]], 3 +; VF4-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP64]] +; VF4-NEXT: [[TMP66:%.*]] = extractelement <4 x float> [[TMP50]], i32 3 +; VF4-NEXT: store float [[TMP66]], ptr [[TMP65]], align 8 +; VF4-NEXT: br label %[[PRED_STORE_CONTINUE12]] +; VF4: [[PRED_STORE_CONTINUE12]]: +; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; VF4-NEXT: [[TMP67:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; VF4-NEXT: br i1 [[TMP67]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; VF4: [[MIDDLE_BLOCK]]: +; +; VF2IC2-LABEL: define void @struct_return_2xf32_replicate_predicated( +; VF2IC2-SAME: ptr [[A:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*:]] +; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]] +; VF2IC2: [[VECTOR_PH]]: +; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF2IC2: [[VECTOR_BODY]]: +; VF2IC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ] +; VF2IC2-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; VF2IC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 2 +; VF2IC2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 8 +; VF2IC2-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP1]], align 8 +; VF2IC2-NEXT: [[TMP2:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD]], zeroinitializer +; VF2IC2-NEXT: [[TMP3:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD1]], zeroinitializer +; VF2IC2-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP4]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; VF2IC2: [[PRED_STORE_IF]]: +; VF2IC2-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 0 +; VF2IC2-NEXT: [[TMP6:%.*]] = tail call { float, float } @fn2(float [[TMP5]]) #[[ATTR3:[0-9]+]] +; VF2IC2-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0 +; VF2IC2-NEXT: [[TMP8:%.*]] = extractvalue { float, float } [[TMP6]], 0 +; VF2IC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]] +; VF2IC2-NEXT: [[TMP10:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 0 +; VF2IC2-NEXT: [[TMP11:%.*]] = fdiv float [[TMP8]], [[TMP10]] +; VF2IC2-NEXT: store float [[TMP11]], ptr [[TMP9]], align 8 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE]] +; VF2IC2: [[PRED_STORE_CONTINUE]]: +; VF2IC2-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF2:.*]], label %[[PRED_STORE_CONTINUE3:.*]] +; VF2IC2: [[PRED_STORE_IF2]]: +; VF2IC2-NEXT: [[TMP13:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 1 +; VF2IC2-NEXT: [[TMP14:%.*]] = tail call { float, float } @fn2(float [[TMP13]]) #[[ATTR3]] +; VF2IC2-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 1 +; VF2IC2-NEXT: [[TMP16:%.*]] = extractvalue { float, float } [[TMP14]], 0 +; VF2IC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP15]] +; VF2IC2-NEXT: [[TMP18:%.*]] = extractelement <2 x float> [[WIDE_LOAD]], i32 1 +; VF2IC2-NEXT: [[TMP19:%.*]] = fdiv float [[TMP16]], [[TMP18]] +; VF2IC2-NEXT: store float [[TMP19]], ptr [[TMP17]], align 8 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE3]] +; VF2IC2: [[PRED_STORE_CONTINUE3]]: +; VF2IC2-NEXT: [[TMP20:%.*]] = extractelement <2 x i1> [[TMP3]], i32 0 +; VF2IC2-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5:.*]] +; VF2IC2: [[PRED_STORE_IF4]]: +; VF2IC2-NEXT: [[TMP21:%.*]] = extractelement <2 x float> [[WIDE_LOAD1]], i32 0 +; VF2IC2-NEXT: [[TMP22:%.*]] = tail call { float, float } @fn2(float [[TMP21]]) #[[ATTR3]] +; VF2IC2-NEXT: [[TMP23:%.*]] = add i64 [[INDEX]], 2 +; VF2IC2-NEXT: [[TMP24:%.*]] = extractvalue { float, float } [[TMP22]], 0 +; VF2IC2-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]] +; VF2IC2-NEXT: [[TMP26:%.*]] = extractelement <2 x float> [[WIDE_LOAD1]], i32 0 +; VF2IC2-NEXT: [[TMP27:%.*]] = fdiv float [[TMP24]], [[TMP26]] +; VF2IC2-NEXT: store float [[TMP27]], ptr [[TMP25]], align 8 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE5]] +; VF2IC2: [[PRED_STORE_CONTINUE5]]: +; VF2IC2-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1 +; VF2IC2-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7]] +; VF2IC2: [[PRED_STORE_IF6]]: +; VF2IC2-NEXT: [[TMP29:%.*]] = extractelement <2 x float> [[WIDE_LOAD1]], i32 1 +; VF2IC2-NEXT: [[TMP30:%.*]] = tail call { float, float } @fn2(float [[TMP29]]) #[[ATTR3]] +; VF2IC2-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 3 +; VF2IC2-NEXT: [[TMP32:%.*]] = extractvalue { float, float } [[TMP30]], 0 +; VF2IC2-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP31]] +; VF2IC2-NEXT: [[TMP34:%.*]] = extractelement <2 x float> [[WIDE_LOAD1]], i32 1 +; VF2IC2-NEXT: [[TMP35:%.*]] = fdiv float [[TMP32]], [[TMP34]] +; VF2IC2-NEXT: store float [[TMP35]], ptr [[TMP33]], align 8 +; VF2IC2-NEXT: br label %[[PRED_STORE_CONTINUE7]] +; VF2IC2: [[PRED_STORE_CONTINUE7]]: +; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; VF2IC2-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; VF2IC2-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; VF2IC2: [[MIDDLE_BLOCK]]: +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.inc ] + %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv + %in_val = load float, ptr %arrayidx, align 8 + %sgt_zero = fcmp ogt float %in_val, 0.0 + br i1 %sgt_zero, label %if.then, label %for.inc + +if.then: + %call = tail call { float, float } @fn2(float %in_val) #3 + %extract_a = extractvalue { float, float } %call, 0 + %div = fdiv float %extract_a, %in_val + store float %div, ptr %arrayidx, align 8 + br label %for.inc + +for.inc: + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, 1024 + br i1 %exitcond.not, label %exit, label %for.body + +exit: + ret void +} + declare { i64 } @fn1(float) declare { float, float } @fn2(float) declare { i32, i32, i32 } @fn3(i32) @@ -464,3 +679,4 @@ declare { <8 x i32>, <8 x i32>, <8 x i32> } @fixed_vec_fn3(<8 x i32>) attributes #0 = { nounwind "vector-function-abi-variant"="_ZGVnN8v_fn1(fixed_vec_fn1)" } attributes #1 = { nounwind "vector-function-abi-variant"="_ZGVnN8v_fn2(fixed_vec_fn2)" } attributes #2 = { nounwind "vector-function-abi-variant"="_ZGVnN8v_fn3(fixed_vec_fn3)" } +attributes #3 = { nounwind "vector-function-abi-variant"="_ZGVnM8v_fn2(fixed_vec_fn2)" } diff --git a/llvm/test/Transforms/LoopVectorize/struct-return.ll b/llvm/test/Transforms/LoopVectorize/struct-return.ll index b721e9e489804..f2e2e2846614b 100644 --- a/llvm/test/Transforms/LoopVectorize/struct-return.ll +++ b/llvm/test/Transforms/LoopVectorize/struct-return.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 6 ; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -S -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize 2>%t | FileCheck %s ; RUN: cat %t | FileCheck --check-prefix=CHECK-REMARKS %s @@ -7,14 +8,30 @@ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" ; CHECK-REMARKS: remark: {{.*}} vectorized loop define void @struct_return_f32_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @struct_return_f32_widen -; CHECK-SAME: (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) -; CHECK: vector.body: -; CHECK: [[WIDE_CALL:%.*]] = call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD:%.*]]) -; CHECK: [[WIDE_A:%.*]] = extractvalue { <2 x float>, <2 x float> } [[WIDE_CALL]], 0 -; CHECK: [[WIDE_B:%.*]] = extractvalue { <2 x float>, <2 x float> } [[WIDE_CALL]], 1 -; CHECK: store <2 x float> [[WIDE_A]], ptr {{%.*}}, align 4 -; CHECK: store <2 x float> [[WIDE_B]], ptr {{%.*}}, align 4 +; CHECK-LABEL: define void @struct_return_f32_widen( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP1]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x float> [[TMP2]], ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x float> [[TMP3]], ptr [[TMP5]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; entry: br label %for.body @@ -39,14 +56,30 @@ exit: ; CHECK-REMARKS: remark: {{.*}} vectorized loop define void @struct_return_f64_widen(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @struct_return_f64_widen -; CHECK-SAME: (ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) -; CHECK: vector.body: -; CHECK: [[WIDE_CALL:%.*]] = call { <2 x double>, <2 x double> } @fixed_vec_bar(<2 x double> [[WIDE_LOAD:%.*]]) -; CHECK: [[WIDE_A:%.*]] = extractvalue { <2 x double>, <2 x double> } [[WIDE_CALL]], 0 -; CHECK: [[WIDE_B:%.*]] = extractvalue { <2 x double>, <2 x double> } [[WIDE_CALL]], 1 -; CHECK: store <2 x double> [[WIDE_A]], ptr {{%.*}}, align 8 -; CHECK: store <2 x double> [[WIDE_B]], ptr {{%.*}}, align 8 +; CHECK-LABEL: define void @struct_return_f64_widen( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x double>, <2 x double> } @fixed_vec_bar(<2 x double> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <2 x double>, <2 x double> } [[TMP1]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x double> [[TMP2]], ptr [[TMP4]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[OUT_B]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[TMP5]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; entry: br label %for.body @@ -71,14 +104,43 @@ exit: ; CHECK-REMARKS: remark: {{.*}} vectorized loop define void @struct_return_f32_widen_rt_checks(ptr %in, ptr writeonly %out_a, ptr writeonly %out_b) { -; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks -; CHECK-SAME: (ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]]) -; CHECK: entry: -; CHECK: br label %vector.memcheck -; CHECK: vector.memcheck: -; CHECK: vector.body: -; CHECK: call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD:%.*]]) -; CHECK: for.body: +; CHECK-LABEL: define void @struct_return_f32_widen_rt_checks( +; CHECK-SAME: ptr [[IN:%.*]], ptr writeonly [[OUT_A:%.*]], ptr writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[IN3:%.*]] = ptrtoint ptr [[IN]] to i32 +; CHECK-NEXT: [[OUT_A2:%.*]] = ptrtoint ptr [[OUT_A]] to i32 +; CHECK-NEXT: [[OUT_B1:%.*]] = ptrtoint ptr [[OUT_B]] to i32 +; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] +; CHECK: [[VECTOR_MEMCHECK]]: +; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[OUT_B1]], [[OUT_A2]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP0]], 8 +; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[OUT_A2]], [[IN3]] +; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i32 [[TMP1]], 8 +; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[OUT_B1]], [[IN3]] +; CHECK-NEXT: [[DIFF_CHECK5:%.*]] = icmp ult i32 [[TMP2]], 8 +; CHECK-NEXT: [[CONFLICT_RDX6:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK5]] +; CHECK-NEXT: br i1 [[CONFLICT_RDX6]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = call { <2 x float>, <2 x float> } @fixed_vec_foo(<2 x float> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <2 x float>, <2 x float> } [[TMP4]], 1 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH]]: +; ; CHECK call { float, float } @foo(float [[LOAD:%.*]]) entry: br label %for.body @@ -105,9 +167,28 @@ exit: ; TODO: Allow mixed-struct type vectorization and mark overflow intrinsics as trivially vectorizable. ; CHECK-REMARKS: remark: {{.*}} loop not vectorized: call instruction cannot be vectorized define void @test_overflow_intrinsic(ptr noalias readonly %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @test_overflow_intrinsic -; CHECK-NOT: vector.body: -; CHECK-NOT: @llvm.sadd.with.overflow.v{{.+}}i32 +; CHECK-LABEL: define void @test_overflow_intrinsic( +; CHECK-SAME: ptr noalias readonly [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[IN_VAL]], i32 [[IN_VAL]]) +; CHECK-NEXT: [[EXTRACT_RET:%.*]] = extractvalue { i32, i1 } [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[CALL]], 1 +; CHECK-NEXT: [[ZEXT_OVERFLOW:%.*]] = zext i1 [[EXTRACT_OVERFLOW]] to i8 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store i32 [[EXTRACT_RET]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store i8 [[ZEXT_OVERFLOW]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -133,9 +214,27 @@ exit: ; CHECK-REMARKS: remark: {{.*}} vectorized loop define void @struct_return_i32_three_results_widen(ptr noalias %in, ptr noalias writeonly %out_a) { -; CHECK-LABEL: define void @struct_return_i32_three_results_widen -; CHECK: vector.body: -; CHECK: call { <2 x i32>, <2 x i32>, <2 x i32> } @fixed_vec_qux(<2 x i32> [[WIDE_LOAD:%.*]]) +; CHECK-LABEL: define void @struct_return_i32_three_results_widen( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @fixed_vec_qux(<2 x i32> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[OUT_A]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[TMP2]], ptr [[TMP3]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; entry: br label %for.body @@ -159,10 +258,50 @@ exit: ; (mainly it does not crash). ; CHECK-REMARKS: remark: {{.*}} vectorized loop define void @scalarized_predicated_struct_return(ptr %a) { -; CHECK-LABEL: define void @scalarized_predicated_struct_return -; CHECK: vector.body: -; CHECK: pred.store.if: -; CHECK: tail call { i64, i64 } @bar_i64(i64 {{.+}}) +; CHECK-LABEL: define void @scalarized_predicated_struct_return( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <2 x i64> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; CHECK: [[PRED_STORE_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = tail call { i64, i64 } @bar_i64(i64 [[TMP3]]) #[[ATTR4:[0-9]+]] +; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i64, i64 } [[TMP4]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = udiv i64 [[TMP5]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP8]] +; CHECK-NEXT: store i64 [[TMP7]], ptr [[TMP9]], align 8 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]] +; CHECK: [[PRED_STORE_CONTINUE]]: +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2]] +; CHECK: [[PRED_STORE_IF1]]: +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 +; CHECK-NEXT: [[TMP12:%.*]] = tail call { i64, i64 } @bar_i64(i64 [[TMP11]]) #[[ATTR4]] +; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { i64, i64 } [[TMP12]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 +; CHECK-NEXT: [[TMP15:%.*]] = udiv i64 [[TMP13]], [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP16]] +; CHECK-NEXT: store i64 [[TMP15]], ptr [[TMP17]], align 8 +; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE2]] +; CHECK: [[PRED_STORE_CONTINUE2]]: +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; entry: br label %for.body @@ -192,8 +331,27 @@ exit: ; Negative test. Widening structs of vectors is not supported. ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_struct_of_vectors(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @negative_struct_of_vectors -; CHECK-NOT: vector.body: +; CHECK-LABEL: define void @negative_struct_of_vectors( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load <1 x float>, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { <1 x float>, <1 x float> } @foo(<1 x float> [[IN_VAL]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue { <1 x float>, <1 x float> } [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_B:%.*]] = extractvalue { <1 x float>, <1 x float> } [[CALL]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store <1 x float> [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store <1 x float> [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -219,9 +377,27 @@ exit: ; Negative test. Widening structs with mixed element types is not supported. ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_mixed_element_type_struct_return(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @negative_mixed_element_type_struct_return -; CHECK-NOT: vector.body: -; CHECK-NOT: call {{.*}} @fixed_vec_baz +; CHECK-LABEL: define void @negative_mixed_element_type_struct_return( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { float, i32 } @baz(float [[IN_VAL]]) #[[ATTR5:[0-9]+]] +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue { float, i32 } [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_B:%.*]] = extractvalue { float, i32 } [[CALL]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store i32 [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -249,9 +425,27 @@ exit: ; Negative test. Widening non-literal structs is not supported. ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_named_struct_return(ptr noalias readonly %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @negative_named_struct_return -; CHECK-NOT: vector.body: -; CHECK-NOT: call {{.*}} @fixed_vec_bar +; CHECK-LABEL: define void @negative_named_struct_return( +; CHECK-SAME: ptr noalias readonly [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load double, ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[CALL:%.*]] = tail call [[NAMED_STRUCT:%.*]] @[[BAR_NAMED:[a-zA-Z0-9_$\"\\.-]*[a-zA-Z_$\"\\.-][a-zA-Z0-9_$\"\\.-]*]](double [[IN_VAL]]) #[[ATTR6:[0-9]+]] +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue [[NAMED_STRUCT]] [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_B:%.*]] = extractvalue [[NAMED_STRUCT]] [[CALL]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store double [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 8 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store double [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -277,8 +471,28 @@ exit: ; Negative test. Nested homogeneous structs are not supported. ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_nested_struct(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @negative_nested_struct -; CHECK-NOT: vector.body: +; CHECK-LABEL: define void @negative_nested_struct( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { { float, float } } @foo_nested_struct(float [[IN_VAL]]) #[[ATTR1]] +; CHECK-NEXT: [[EXTRACT_INNER:%.*]] = extractvalue { { float, float } } [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[EXTRACT_INNER]], 0 +; CHECK-NEXT: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[EXTRACT_INNER]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -305,8 +519,24 @@ exit: ; Negative test. The second element of the struct cannot be widened. ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_non_widenable_element(ptr noalias %in, ptr noalias writeonly %out_a) { -; CHECK-LABEL: define void @negative_non_widenable_element -; CHECK-NOT: vector.body: +; CHECK-LABEL: define void @negative_non_widenable_element( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { float, [1 x float] } @foo_one_non_widenable_element(float [[IN_VAL]]) #[[ATTR1]] +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue { float, [1 x float] } [[CALL]], 0 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -329,8 +559,28 @@ exit: ; Negative test. Homogeneous structs of arrays are not supported. ; CHECK-REMARKS-COUNT: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_struct_array_elements(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @negative_struct_array_elements -; CHECK-NOT: vector.body: +; CHECK-LABEL: define void @negative_struct_array_elements( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { [2 x float] } @foo_arrays(float [[IN_VAL]]) #[[ATTR1]] +; CHECK-NEXT: [[EXTRACT_INNER:%.*]] = extractvalue { [2 x float] } [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue [2 x float] [[EXTRACT_INNER]], 0 +; CHECK-NEXT: [[EXTRACT_B:%.*]] = extractvalue [2 x float] [[EXTRACT_INNER]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -357,8 +607,26 @@ exit: ; Negative test. Widening struct loads is not supported. ; CHECK-REMARKS: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_struct_load(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) { -; CHECK-LABEL: define void @negative_struct_load -; CHECK-NOT: vector.body: +; CHECK-LABEL: define void @negative_struct_load( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { float, float }, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[CALL:%.*]] = load { float, float }, ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[EXTRACT_A:%.*]] = extractvalue { float, float } [[CALL]], 0 +; CHECK-NEXT: [[EXTRACT_B:%.*]] = extractvalue { float, float } [[CALL]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUT_A]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_A]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[OUT_B]], i64 [[IV]] +; CHECK-NEXT: store float [[EXTRACT_B]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body @@ -383,8 +651,23 @@ exit: ; Negative test. Widening struct stores is not supported. ; CHECK-REMARKS: remark: {{.*}} loop not vectorized: instruction return type cannot be vectorized define void @negative_struct_return_store_struct(ptr noalias %in, ptr noalias writeonly %out) { -; CHECK-LABEL: define void @negative_struct_return_store_struct -; CHECK-NOT: vector.body: +; CHECK-LABEL: define void @negative_struct_return_store_struct( +; CHECK-SAME: ptr noalias [[IN:%.*]], ptr noalias writeonly [[OUT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { float, float }, ptr [[IN]], i64 [[IV]] +; CHECK-NEXT: [[IN_VAL:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = tail call { float, float } @foo(float [[IN_VAL]]) #[[ATTR1]] +; CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr inbounds { float, float }, ptr [[OUT]], i64 [[IV]] +; CHECK-NEXT: store { float, float } [[CALL]], ptr [[OUT_PTR]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll index 87eebb7baf880..a852b731ea13b 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll @@ -54,16 +54,6 @@ define i32 @test(ptr %vf1, i64 %n) { ; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] -; CHECK-NEXT: [[TMP18:%.*]] = alloca i8, i64 [[N]], align 16 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[VF1]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store ptr [[TMP18]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV]], 200 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret i32 0 ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll index 4bc4e54ae60fa..00e04c7daee51 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll @@ -34,15 +34,6 @@ define void @canonical_small_tc_i8(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 15 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -94,15 +85,6 @@ define void @canonical_upper_limit_i8(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 255 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -154,15 +136,6 @@ define void @canonical_lower_limit_i16(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 257 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -214,15 +187,6 @@ define void @canonical_upper_limit_i16(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 65535 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -274,15 +238,6 @@ define void @canonical_lower_limit_i32(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 65537 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -334,15 +289,6 @@ define void @canonical_upper_limit_i32(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 4294967295 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -394,15 +340,6 @@ define void @canonical_lower_limit_i64(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], 4294967297 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -454,15 +391,6 @@ define void @canonical_upper_limit_i64(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[IV_NEXT]], -1 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; @@ -514,15 +442,6 @@ define void @canonical_lower_limit_i128(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[END:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i256 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i256 [[IV]] -; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i256 [[IV]], 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i256 [[IV_NEXT]], 18446744073709551617 -; CHECK-NEXT: br i1 [[COND]], label %[[END]], label %[[LOOP]] ; CHECK: [[END]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll index 6fd7c709a0442..b6f43aaa86e33 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll @@ -55,22 +55,6 @@ define void @tail_fold_switch(ptr %dst, i32 %0) { ; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: switch i32 [[TMP0]], label %[[LOOP_LATCH]] [ -; CHECK-NEXT: i32 0, label %[[LOOP_LATCH]] -; CHECK-NEXT: i32 1, label %[[IF_THEN:.*]] -; CHECK-NEXT: ] -; CHECK: [[IF_THEN]]: -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 4 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll index 45c56a0d7b79d..3bc5da155b351 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll @@ -53,18 +53,9 @@ define void @VF1-VPlanExe(ptr %dst) { ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: br label %for.body @@ -132,17 +123,9 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void -; CHECK: for.body: -; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[PTR1]], [[SCALAR_PH:%.*]] ] -; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8 -; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1 -; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]] -; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] ; entry: %ptr2 = getelementptr inbounds double, ptr %ptr1, i64 15 diff --git a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll index 387a02e63fe59..8a162930ffd99 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll @@ -133,26 +133,7 @@ define void @ext_cmp(ptr %src.1, ptr %src.2, ptr noalias %dst) { ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i16, ptr [[SRC_1]], i64 [[IV]] -; CHECK-NEXT: [[I2:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2 -; CHECK-NEXT: [[I3:%.*]] = sext i16 [[I2]] to i32 -; CHECK-NEXT: [[C_1:%.*]] = icmp sgt i32 0, [[I3]] -; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i8, ptr [[SRC_2]], i64 [[IV]] -; CHECK-NEXT: [[I4:%.*]] = load i8, ptr [[GEP_SRC_2]], align 2 -; CHECK-NEXT: [[I5:%.*]] = zext i8 [[I4]] to i32 -; CHECK-NEXT: [[I6:%.*]] = select i1 [[C_1]], i32 0, i32 [[I5]] -; CHECK-NEXT: [[I7:%.*]] = and i32 [[I6]], 0 -; CHECK-NEXT: [[I8:%.*]] = trunc nuw nsw i32 [[I7]] to i16 -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store i16 [[I8]], ptr [[GEP_DST]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll b/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll index 83ecf1adc80b5..6e7cdba1cd3ce 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-loads-p16.ll @@ -26,21 +26,7 @@ define void @pr77468(ptr noalias %src, ptr noalias %dst, i1 %x) { ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i16 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 -; CHECK-NEXT: [[X_EXT:%.*]] = zext i1 [[X]] to i32 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X_EXT]], [[L]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i16 [[IV]] -; CHECK-NEXT: [[T:%.*]] = trunc i32 [[AND]] to i16 -; CHECK-NEXT: store i16 [[T]], ptr [[GEP_DST]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i16 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll index 2f5f157e55f63..2aebb73081364 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-reductions.ll @@ -18,11 +18,7 @@ define i8 @reduction_and_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[AND_LCSSA_OFF0:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]]) ; CHECK-NEXT: ret i8 [[AND_LCSSA_OFF0]] @@ -64,11 +60,7 @@ define i16 @reduction_or_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[XOR_LCSSA_OFF0:%.*]] = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> [[TMP2]]) ; CHECK-NEXT: ret i16 [[XOR_LCSSA_OFF0]] @@ -110,11 +102,7 @@ define i16 @reduction_xor_trunc(ptr noalias nocapture %ptr) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: br i1 poison, label [[FOR_END]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: [[XOR_LCSSA_OFF0:%.*]] = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> [[TMP2]]) ; CHECK-NEXT: ret i16 [[XOR_LCSSA_OFF0]] diff --git a/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll b/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll index 4a372b5f786e6..498c58d1bfd82 100644 --- a/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll +++ b/llvm/test/Transforms/LoopVectorize/trunc-shifts.ll @@ -24,20 +24,7 @@ define void @test_pr47927_lshr_const_shift_ops(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = lshr i32 [[F]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -81,20 +68,7 @@ define void @test_shl_const_shift_ops(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = shl i32 [[F]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -138,20 +112,7 @@ define void @test_ashr_const_shift_ops(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = ashr i32 [[F]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -195,22 +156,7 @@ define void @test_shl_const_shifted_op(ptr %dst, i32 %f) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV_EXT]] -; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[LV]] to i32 -; CHECK-NEXT: [[L:%.*]] = shl i32 19, [[ZEXT]] -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -257,22 +203,7 @@ define void @test_lshr_by_18(ptr %A) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]] -; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32 -; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 18 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -318,22 +249,7 @@ define void @test_lshr_by_4(ptr %A) { ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i64 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV_EXT]] -; CHECK-NEXT: [[LV:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[LV_EXT:%.*]] = zext i8 [[LV]] to i32 -; CHECK-NEXT: [[L:%.*]] = lshr i32 [[LV_EXT]], 4 -; CHECK-NEXT: [[L_T:%.*]] = trunc i32 [[L]] to i8 -; CHECK-NEXT: store i8 [[L_T]], ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[IV_NEXT]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[CONV]], 100 -; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll b/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll index d6273e015f24c..b85f2746a0b14 100644 --- a/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll +++ b/llvm/test/Transforms/LoopVectorize/uitofp-preserve-nneg.ll @@ -22,19 +22,7 @@ define void @uitofp_preserve_nneg(ptr %result, i32 %size, float %y) { ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[FOR_EXIT:%.*]] -; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER4:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[CONV:%.*]] = uitofp nneg i32 [[TMP4]] to float -; CHECK-NEXT: [[TMP5:%.*]] = fmul float [[CONV]], [[Y]] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = zext nneg i32 [[TMP4]] to i64 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[RESULT]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store float [[TMP5]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[TMP4]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 256 -; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_EXIT]] ; CHECK: for.exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll index ccb301f4a3f79..985a9a2c2d155 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll @@ -21,21 +21,6 @@ define void @blend_uniform_iv_trunc(i1 %c) { ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[IV_TRUNC_2:%.*]] = trunc i64 [[IV]] to i16 -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]] -; CHECK: [[LOOP_NEXT]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[BLEND:%.*]] = phi i16 [ poison, %[[LOOP_HEADER]] ], [ [[IV_TRUNC_2]], %[[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i16 [[BLEND]] -; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -79,20 +64,6 @@ define void @blend_uniform_iv(i1 %c) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]] -; CHECK: [[LOOP_NEXT]]: -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[BLEND:%.*]] = phi i64 [ poison, %[[LOOP_HEADER]] ], [ [[IV]], %[[LOOP_NEXT]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[BLEND]] -; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -150,25 +121,6 @@ define void @blend_chain_iv(i1 %c) { ; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT:.*]], label %[[LOOP_LATCH]] -; CHECK: [[LOOP_NEXT]]: -; CHECK-NEXT: br i1 [[C]], label %[[LOOP_NEXT_2:.*]], label %[[LOOP_NEXT_3:.*]] -; CHECK: [[LOOP_NEXT_2]]: -; CHECK-NEXT: br label %[[LOOP_NEXT_3]] -; CHECK: [[LOOP_NEXT_3]]: -; CHECK-NEXT: [[BLEND_1:%.*]] = phi i64 [ undef, %[[LOOP_NEXT]] ], [ [[IV]], %[[LOOP_NEXT_2]] ] -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[BLEND:%.*]] = phi i64 [ undef, %[[LOOP_HEADER]] ], [ [[BLEND_1]], %[[LOOP_NEXT_3]] ] -; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds [32 x i16], ptr @dst, i16 0, i64 [[BLEND]] -; CHECK-NEXT: store i16 0, ptr [[DST_PTR]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[CMP439:%.*]] = icmp ult i64 [[IV]], 31 -; CHECK-NEXT: br i1 [[CMP439]], label %[[LOOP_HEADER]], label %[[EXIT]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -275,22 +227,6 @@ define void @redundant_branch_and_blends_without_mask(ptr %A) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_IV:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_IV]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L]], 10 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[P_1:%.*]] = phi i32 [ [[L]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[P_2:%.*]] = phi i32 [ [[ADD]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[RES:%.*]] = add i32 [[P_1]], [[P_2]] -; CHECK-NEXT: store i32 [[RES]], ptr [[GEP_IV]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll index 2c49fda1ad520..571c55c276dd5 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1.ll @@ -24,7 +24,8 @@ define void @ld_div1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -64,10 +65,11 @@ define void @ld_div2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -112,10 +114,11 @@ define void @ld_div3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -167,10 +170,11 @@ define void @ld_div1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -214,10 +218,11 @@ define void @ld_div2_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -269,10 +274,11 @@ define void @ld_div3_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -324,7 +330,7 @@ define void @ld_div1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -379,7 +385,7 @@ define void @ld_div2_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -426,7 +432,7 @@ define void @ld_div3_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -467,7 +473,7 @@ define void @ld_div1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -516,7 +522,7 @@ define void @ld_div2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -565,7 +571,7 @@ define void @ld_div3_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -621,7 +627,7 @@ define void @ld_div1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -669,7 +675,7 @@ define void @ld_div2_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -725,7 +731,7 @@ define void @ld_div3_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -781,7 +787,7 @@ define void @ld_div1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -837,7 +843,7 @@ define void @ld_div2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -885,7 +891,7 @@ define void @ld_div3_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -933,10 +939,11 @@ define void @test_step_is_not_invariant(ptr %A) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 56 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll index c7525fb684d83..6cf82fc2c9d48 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll @@ -24,7 +24,8 @@ define void @ld_and_neg1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -64,10 +65,11 @@ define void @ld_and_neg2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -112,10 +114,11 @@ define void @ld_and_neg3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -167,10 +170,11 @@ define void @ld_and_neg1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -212,10 +216,11 @@ define void @ld_and_neg2_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store i64 [[TMP5]], ptr [[TMP7]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -267,7 +272,7 @@ define void @ld_and_neg1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -322,7 +327,7 @@ define void @ld_and_neg2_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -371,7 +376,7 @@ define void @ld_and_neg2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -427,7 +432,7 @@ define void @ld_and_neg2_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -483,7 +488,7 @@ define void @ld_and_neg2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -539,7 +544,7 @@ define void @ld_and_neg3_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll index 27cefa2d41927..9ed22400b7055 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_div_urem.ll @@ -58,7 +58,8 @@ define void @ld_div2_urem3_1(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -130,10 +131,11 @@ define void @ld_div2_urem3_2(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8) ; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -203,10 +205,11 @@ define void @ld_div4(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8) ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop @@ -247,10 +250,11 @@ define void @ld_div8_urem3(ptr noalias %A, ptr noalias %B) { ; CHECK-NEXT: store <8 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: +; CHECK: exit: +; CHECK-NEXT: ret void ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll index cee53b5b1d2f9..2b5d0f3cb0125 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_lshr.ll @@ -25,7 +25,8 @@ define void @ld_lshr0_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr0_step1_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -46,7 +47,8 @@ define void @ld_lshr0_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -86,10 +88,11 @@ define void @ld_lshr1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr1_step1_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -123,10 +126,11 @@ define void @ld_lshr1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -166,10 +170,11 @@ define void @ld_lshr2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF2-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr2_step1_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -189,10 +194,11 @@ define void @ld_lshr2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: store <4 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8 ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF4-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -244,10 +250,11 @@ define void @ld_lshr0_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr0_step2_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -296,10 +303,11 @@ define void @ld_lshr0_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -343,10 +351,11 @@ define void @ld_lshr1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store i64 [[TMP8]], ptr [[TMP6]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF2-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_lshr1_step2_start0_ind1 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -379,10 +388,11 @@ define void @ld_lshr1_step2_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: store i64 [[TMP14]], ptr [[TMP10]], align 8 ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF4-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -434,7 +444,7 @@ define void @ld_lshr0_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -486,7 +496,7 @@ define void @ld_lshr0_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -541,7 +551,7 @@ define void @ld_lshr1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF2-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -593,7 +603,7 @@ define void @ld_lshr1_step3_start0_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF4-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -643,7 +653,7 @@ define void @ld_lshr1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -681,7 +691,7 @@ define void @ld_lshr1_step1_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF4-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -729,7 +739,7 @@ define void @ld_lshr1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store i64 [[TMP9]], ptr [[TMP7]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -766,7 +776,7 @@ define void @ld_lshr1_step2_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: store i64 [[TMP15]], ptr [[TMP11]], align 8 ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF4-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -822,7 +832,7 @@ define void @ld_lshr1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -875,7 +885,7 @@ define void @ld_lshr1_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -931,7 +941,7 @@ define void @ld_lshr2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF2-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -984,7 +994,7 @@ define void @ld_lshr2_step3_start1_ind1(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF4-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll index 0f8289d06d761..12851d7d91cc7 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction2.ll @@ -35,7 +35,8 @@ define void @ld_div1_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div1_step1_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -76,7 +77,8 @@ define void @ld_div1_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -121,10 +123,11 @@ define void @ld_div2_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP5]], align 8 ; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; VF2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div2_step1_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -162,10 +165,11 @@ define void @ld_div2_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -218,10 +222,11 @@ define void @ld_div3_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div3_step1_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -259,10 +264,11 @@ define void @ld_div3_step1_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -322,10 +328,11 @@ define void @ld_div1_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div1_step2_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -378,10 +385,11 @@ define void @ld_div1_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -441,10 +449,11 @@ define void @ld_div2_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div2_step2_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -497,10 +506,11 @@ define void @ld_div2_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -560,10 +570,11 @@ define void @ld_div3_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[EXIT:%.*]] -; VF2: scalar.ph: +; VF2: exit: +; VF2-NEXT: ret void ; ; VF4-LABEL: define void @ld_div3_step2_start0_ind2 ; VF4-SAME: (ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) { @@ -616,10 +627,11 @@ define void @ld_div3_step2_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 500 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[EXIT:%.*]] -; VF4: scalar.ph: +; VF4: exit: +; VF4-NEXT: ret void ; entry: br label %loop @@ -679,7 +691,7 @@ define void @ld_div1_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -735,7 +747,7 @@ define void @ld_div1_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -798,7 +810,7 @@ define void @ld_div2_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -854,7 +866,7 @@ define void @ld_div2_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -917,7 +929,7 @@ define void @ld_div3_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF2-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -973,7 +985,7 @@ define void @ld_div3_step3_start0_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; VF4-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1030,7 +1042,7 @@ define void @ld_div1_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1072,7 +1084,7 @@ define void @ld_div1_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1129,7 +1141,7 @@ define void @ld_div2_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1171,7 +1183,7 @@ define void @ld_div2_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1228,7 +1240,7 @@ define void @ld_div3_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998 -; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; VF2-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1270,7 +1282,7 @@ define void @ld_div3_step1_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 996 -; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; VF4-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1334,7 +1346,7 @@ define void @ld_div1_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1391,7 +1403,7 @@ define void @ld_div1_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1455,7 +1467,7 @@ define void @ld_div2_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1512,7 +1524,7 @@ define void @ld_div2_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1576,7 +1588,7 @@ define void @ld_div3_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 498 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1633,7 +1645,7 @@ define void @ld_div3_step2_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 8) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 496 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1697,7 +1709,7 @@ define void @ld_div1_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1754,7 +1766,7 @@ define void @ld_div1_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1818,7 +1830,7 @@ define void @ld_div2_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1875,7 +1887,7 @@ define void @ld_div2_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: @@ -1939,7 +1951,7 @@ define void @ld_div3_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF2-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 6) ; VF2-NEXT: [[VEC_IND_NEXT2]] = add <2 x i64> [[VEC_IND1]], splat (i64 2) ; VF2-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; VF2-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; VF2: middle.block: ; VF2-NEXT: br label [[SCALAR_PH:%.*]] ; VF2: scalar.ph: @@ -1996,7 +2008,7 @@ define void @ld_div3_step3_start1_ind2(ptr noalias %A, ptr noalias %B) { ; VF4-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 12) ; VF4-NEXT: [[VEC_IND_NEXT2]] = add <4 x i64> [[VEC_IND1]], splat (i64 4) ; VF4-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], 332 -; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; VF4-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; VF4: middle.block: ; VF4-NEXT: br label [[SCALAR_PH:%.*]] ; VF4: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll index 5f83e39200644..5d07341263bc2 100644 --- a/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll +++ b/llvm/test/Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll @@ -23,26 +23,7 @@ define void @test_not_first_lane_only_constant(ptr %A, ptr noalias %B) { ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]] -; CHECK-NEXT: br i1 false, label [[LOOP_LATCH]], label [[ELSE_1:%.*]] -; CHECK: else.1: -; CHECK-NEXT: br i1 false, label [[THEN_2:%.*]], label [[ELSE_2:%.*]] -; CHECK: then.2: -; CHECK-NEXT: br label [[ELSE_2]] -; CHECK: else.2: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ [[B]], [[ELSE_2]] ], [ poison, [[LOOP_HEADER]] ] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -98,29 +79,7 @@ define void @test_not_first_lane_only_wide_compare(ptr %A, ptr noalias %B, i16 % ; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_0:%.*]] = icmp ult i16 [[L_0]], [[X]] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[ELSE_1:%.*]] -; CHECK: else.1: -; CHECK-NEXT: [[C_1:%.*]] = icmp ult i16 [[L_0]], [[Y]] -; CHECK-NEXT: br i1 [[C_1]], label [[THEN_2:%.*]], label [[ELSE_2:%.*]] -; CHECK: then.2: -; CHECK-NEXT: br label [[ELSE_2]] -; CHECK: else.2: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ [[B]], [[ELSE_2]] ], [ poison, [[LOOP_HEADER]] ] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -179,29 +138,7 @@ define void @test_not_first_lane_only_wide_compare_incoming_order_swapped(ptr %A ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i16, ptr [[A]], i16 [[IV]] -; CHECK-NEXT: [[L_0:%.*]] = load i16, ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_0:%.*]] = icmp ult i16 [[L_0]], [[X]] -; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[ELSE_1:%.*]] -; CHECK: else.1: -; CHECK-NEXT: [[C_1:%.*]] = icmp ult i16 [[L_0]], [[Y]] -; CHECK-NEXT: br i1 [[C_1]], label [[THEN_2:%.*]], label [[ELSE_2:%.*]] -; CHECK: then.2: -; CHECK-NEXT: br label [[ELSE_2]] -; CHECK: else.2: -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[MERGE:%.*]] = phi ptr [ poison, [[LOOP_HEADER]] ], [ [[B]], [[ELSE_2]] ] -; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[MERGE]], align 2 -; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1 -; CHECK-NEXT: store i16 [[L]], ptr [[GEP_A]], align 2 -; CHECK-NEXT: [[C_2:%.*]] = icmp eq i16 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[C_2]], label [[EXIT]], label [[LOOP_HEADER]] +; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll index 462865d11507a..8da1dca52e87b 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll @@ -31,20 +31,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn ; VF8UF1-NEXT: br label %[[EXIT:.*]] ; VF8UF1: [[VECTOR_EARLY_EXIT]]: ; VF8UF1-NEXT: br label %[[EXIT]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF1: [[LOOP_HEADER]]: -; VF8UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF1: [[LOOP_LATCH]]: -; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF1: [[EXIT]]: -; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] ; VF8UF1-NEXT: ret i8 [[RES]] ; ; VF8UF2-LABEL: define i8 @test_early_exit_max_tc_less_than_16( @@ -70,20 +58,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn ; VF8UF2-NEXT: br label %[[EXIT:.*]] ; VF8UF2: [[VECTOR_EARLY_EXIT]]: ; VF8UF2-NEXT: br label %[[EXIT]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF2: [[LOOP_HEADER]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF2-NEXT: [[P_SRC:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] -; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 -; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF2: [[LOOP_LATCH]]: -; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF2: [[EXIT]]: -; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] ; VF8UF2-NEXT: ret i8 [[RES]] ; ; VF16UF1-LABEL: define i8 @test_early_exit_max_tc_less_than_16( @@ -104,20 +80,8 @@ define i8 @test_early_exit_max_tc_less_than_16(ptr dereferenceable(16) %A) nosyn ; VF16UF1-NEXT: br label %[[EXIT:.*]] ; VF16UF1: [[VECTOR_EARLY_EXIT]]: ; VF16UF1-NEXT: br label %[[EXIT]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF16UF1: [[LOOP_HEADER]]: -; VF16UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF16UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF16UF1: [[LOOP_LATCH]]: -; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF16UF1: [[EXIT]]: -; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 1, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] ; VF16UF1-NEXT: ret i8 [[RES]] ; entry: @@ -166,20 +130,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer ; VF8UF1-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> [[TMP3]], i1 true) ; VF8UF1-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], [[FIRST_ACTIVE_LANE]] ; VF8UF1-NEXT: br label %[[EXIT]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF1: [[LOOP_HEADER]]: -; VF8UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF1: [[LOOP_LATCH]]: -; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF1: [[EXIT]]: -; VF8UF1-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ] +; VF8UF1-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ] ; VF8UF1-NEXT: ret i64 [[RES]] ; ; VF8UF2-LABEL: define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside( @@ -212,20 +164,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer ; VF8UF2-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 [[TMP7]] ; VF8UF2-NEXT: [[TMP12:%.*]] = add i64 0, [[TMP11]] ; VF8UF2-NEXT: br label %[[EXIT]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]] -; VF8UF2: [[LOOP_HEADER]]: -; VF8UF2-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF8UF2-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF8UF2: [[LOOP_LATCH]]: -; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF8UF2: [[EXIT]]: -; VF8UF2-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VECTOR_EARLY_EXIT]] ] +; VF8UF2-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP12]], %[[VECTOR_EARLY_EXIT]] ] ; VF8UF2-NEXT: ret i64 [[RES]] ; ; VF16UF1-LABEL: define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside( @@ -248,20 +188,8 @@ define i64 @test_early_exit_max_tc_less_than_16_with_iv_used_outside(ptr derefer ; VF16UF1-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v16i1(<16 x i1> [[TMP3]], i1 true) ; VF16UF1-NEXT: [[TMP5:%.*]] = add i64 0, [[FIRST_ACTIVE_LANE]] ; VF16UF1-NEXT: br label %[[EXIT]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]] -; VF16UF1: [[LOOP_HEADER]]: -; VF16UF1-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; VF16UF1-NEXT: [[P_SRC1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV1]] -; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC1]], align 1 -; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 -; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] -; VF16UF1: [[LOOP_LATCH]]: -; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV1]], 1 -; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]] ; VF16UF1: [[EXIT]]: -; VF16UF1-NEXT: [[RES:%.*]] = phi i64 [ [[IV1]], %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP5]], %[[VECTOR_EARLY_EXIT]] ] +; VF16UF1-NEXT: [[RES:%.*]] = phi i64 [ 1, %[[MIDDLE_BLOCK]] ], [ [[TMP5]], %[[VECTOR_EARLY_EXIT]] ] ; VF16UF1-NEXT: ret i64 [[RES]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll index d01358407f02f..2317af5619749 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-outside-iv-users.ll @@ -17,18 +17,8 @@ define i64 @remove_loop_region_int_iv_used_outside(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr null, ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[IV]], %[[LOOP]] ], [ 15, %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 15 ; entry: br label %loop @@ -60,18 +50,8 @@ define i64 @remove_loop_region_int_iv_inc_used_outside(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr null, ptr [[GEP]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i64 [ [[IV_NEXT]], %[[LOOP]] ], [ 16, %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret i64 [[RES]] +; CHECK-NEXT: ret i64 16 ; entry: br label %loop @@ -105,19 +85,8 @@ define ptr @remove_loop_region_ptr_iv_used_outside(ptr %dst) { ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -8 ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[INT_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INT_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; CHECK-NEXT: [[INT_IV_NEXT]] = add i64 [[INT_IV]], 1 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[INT_IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi ptr [ [[PTR_IV]], %[[LOOP]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[RES]] +; CHECK-NEXT: ret ptr [[IND_ESCAPE]] ; entry: br label %loop @@ -151,19 +120,8 @@ define ptr @remove_loop_region_ptr_iv_inc_used_outside(ptr %dst) { ; CHECK-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[INT_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INT_IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 -; CHECK-NEXT: [[INT_IV_NEXT]] = add i64 [[INT_IV]], 1 -; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 8 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[INT_IV_NEXT]], 16 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi ptr [ [[PTR_IV_NEXT]], %[[LOOP]] ], [ [[TMP0]], %[[MIDDLE_BLOCK]] ] -; CHECK-NEXT: ret ptr [[RES]] +; CHECK-NEXT: ret ptr [[TMP0]] ; entry: br label %loop diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll index 5f8646925bf6d..e160a15ece47d 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll @@ -176,15 +176,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF1: [[MIDDLE_BLOCK]]: ; VF8UF1-NEXT: br label %[[EXIT:.*]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP:.*]] -; VF8UF1: [[LOOP]]: -; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] -; VF8UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF8UF1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF8UF1-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF8UF1: [[EXIT]]: ; VF8UF1-NEXT: ret void ; @@ -316,15 +307,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF2: [[MIDDLE_BLOCK]]: ; VF8UF2-NEXT: br label %[[EXIT:.*]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP:.*]] -; VF8UF2: [[LOOP]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] -; VF8UF2-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF8UF2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF8UF2-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF8UF2: [[EXIT]]: ; VF8UF2-NEXT: ret void ; @@ -455,15 +437,6 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF16UF1: [[MIDDLE_BLOCK]]: ; VF16UF1-NEXT: br label %[[EXIT:.*]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP:.*]] -; VF16UF1: [[LOOP]]: -; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] -; VF16UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2 -; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; VF16UF1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; VF16UF1-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] ; VF16UF1: [[EXIT]]: ; VF16UF1-NEXT: ret void ; @@ -728,23 +701,14 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF1: [[PRED_STORE_IF13]]: ; VF8UF1-NEXT: [[TMP40:%.*]] = mul i64 7, [[STEP]] ; VF8UF1-NEXT: [[TMP41:%.*]] = add i64 0, [[TMP40]] -; VF8UF1-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[STEP]] -; VF8UF1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP42]] -; VF8UF1-NEXT: store i8 0, ptr [[TMP43]], align 1 +; VF8UF1-NEXT: [[IV_NEXT:%.*]] = add i64 [[TMP41]], [[STEP]] +; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] +; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE14]] ; VF8UF1: [[PRED_STORE_CONTINUE14]]: ; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF1: [[MIDDLE_BLOCK]]: ; VF8UF1-NEXT: br label %[[EXIT:.*]] -; VF8UF1: [[SCALAR_PH:.*]]: -; VF8UF1-NEXT: br label %[[LOOP:.*]] -; VF8UF1: [[LOOP]]: -; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] -; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] -; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; VF8UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16 -; VF8UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; VF8UF1: [[EXIT]]: ; VF8UF1-NEXT: ret void ; @@ -922,22 +886,13 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF2-NEXT: [[TMP81:%.*]] = mul i64 15, [[STEP]] ; VF8UF2-NEXT: [[TMP82:%.*]] = add i64 0, [[TMP81]] ; VF8UF2-NEXT: [[TMP83:%.*]] = add i64 [[TMP82]], [[STEP]] -; VF8UF2-NEXT: [[TMP84:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]] -; VF8UF2-NEXT: store i8 0, ptr [[TMP84]], align 1 +; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]] +; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE30]] ; VF8UF2: [[PRED_STORE_CONTINUE30]]: ; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF8UF2: [[MIDDLE_BLOCK]]: ; VF8UF2-NEXT: br label %[[EXIT:.*]] -; VF8UF2: [[SCALAR_PH:.*]]: -; VF8UF2-NEXT: br label %[[LOOP:.*]] -; VF8UF2: [[LOOP]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] -; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] -; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; VF8UF2-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16 -; VF8UF2-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; VF8UF2: [[EXIT]]: ; VF8UF2-NEXT: ret void ; @@ -1114,22 +1069,13 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF16UF1-NEXT: [[TMP80:%.*]] = mul i64 15, [[STEP]] ; VF16UF1-NEXT: [[TMP81:%.*]] = add i64 0, [[TMP80]] ; VF16UF1-NEXT: [[TMP82:%.*]] = add i64 [[TMP81]], [[STEP]] -; VF16UF1-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]] -; VF16UF1-NEXT: store i8 0, ptr [[TMP83]], align 1 +; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]] +; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 ; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE30]] ; VF16UF1: [[PRED_STORE_CONTINUE30]]: ; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; VF16UF1: [[MIDDLE_BLOCK]]: ; VF16UF1-NEXT: br label %[[EXIT:.*]] -; VF16UF1: [[SCALAR_PH:.*]]: -; VF16UF1-NEXT: br label %[[LOOP:.*]] -; VF16UF1: [[LOOP]]: -; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] -; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] -; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 -; VF16UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16 -; VF16UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]] ; VF16UF1: [[EXIT]]: ; VF16UF1-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll index 0b86a2280b529..027dcaf771072 100644 --- a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll +++ b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll @@ -22,13 +22,11 @@ define void @test_versioned_with_sext_use(i32 %offset, ptr %dst) { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -94,13 +92,11 @@ define void @test_versioned_with_zext_use(i32 %offset, ptr %dst) { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -233,13 +229,11 @@ define void @test_versioned_with_different_uses(i32 %offset, ptr noalias %dst.1, ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX2]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX2]], 1 @@ -414,26 +408,20 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 4 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[G_64]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[G_64]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] ; CHECK-NEXT: store <4 x i16> splat (i16 1), ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i16 [[G_16]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], [[G_64]] diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll index 4e6ef0de6a9ed..06b044872c217 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll @@ -580,6 +580,127 @@ exit: ret i32 %add } +define i32 @print_mulacc_negated(ptr %a, ptr %b) { +; CHECK-LABEL: 'print_mulacc_negated' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF +; CHECK-NEXT: Live-in vp<%1> = VF * UF +; CHECK-NEXT: Live-in vp<%2> = vector-trip-count +; CHECK-NEXT: Live-in ir<1024> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: EMIT vp<%3> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%4> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<%3>, vp<%8> +; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<1>, vp<%0> +; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%5> +; CHECK-NEXT: vp<%6> = vector-pointer ir<%gep.a> +; CHECK-NEXT: WIDEN ir<%load.a> = load vp<%6> +; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%5> +; CHECK-NEXT: vp<%7> = vector-pointer ir<%gep.b> +; CHECK-NEXT: WIDEN ir<%load.b> = load vp<%7> +; CHECK-NEXT: EXPRESSION vp<%8> = ir<%accum> + reduce.add (sub (0, mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32))) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%4>, vp<%1> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%10> = compute-reduction-result ir<%accum>, vp<%8> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<1024>, vp<%2> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb: +; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<%10> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%2>, middle.block ], [ ir<0>, ir-bb ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%10>, middle.block ], [ ir<0>, ir-bb ] +; CHECK-NEXT: Successor(s): ir-bb +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %accum = phi i32 [ 0, %entry ], [ %add, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) +; CHECK-NEXT: IR %gep.a = getelementptr i8, ptr %a, i64 %iv +; CHECK-NEXT: IR %load.a = load i8, ptr %gep.a, align 1 +; CHECK-NEXT: IR %ext.a = zext i8 %load.a to i32 +; CHECK-NEXT: IR %gep.b = getelementptr i8, ptr %b, i64 %iv +; CHECK-NEXT: IR %load.b = load i8, ptr %gep.b, align 1 +; CHECK-NEXT: IR %ext.b = zext i8 %load.b to i32 +; CHECK-NEXT: IR %mul = mul i32 %ext.b, %ext.a +; CHECK-NEXT: IR %sub = sub i32 0, %mul +; CHECK-NEXT: IR %add = add i32 %accum, %sub +; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 +; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %iv.next, 1024 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' { +; CHECK-NEXT: Live-in ir<1024> = vector-trip-count +; CHECK-NEXT: Live-in ir<1024> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb: +; CHECK-NEXT: Successor(s): vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector.body +; CHECK-EMPTY: +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi ir<0>, ir<%add> +; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%index> +; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a> +; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%index> +; CHECK-NEXT: WIDEN ir<%load.b> = load ir<%gep.b> +; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32 +; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32 +; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a> +; CHECK-NEXT: WIDEN ir<%sub> = sub ir<0>, ir<%mul> +; CHECK-NEXT: REDUCE ir<%add> = ir<%accum> + reduce.add (ir<%sub>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<1024> +; CHECK-NEXT: Successor(s): middle.block, vector.body +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, ir<%add> +; CHECK-NEXT: Successor(s): ir-bb +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb: +; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<[[RED_RESULT]]> from middle.block) +; CHECK-NEXT: No successors +; CHECK-NEXT: } +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %accum = phi i32 [ 0, %entry ], [ %add, %loop ] + %gep.a = getelementptr i8, ptr %a, i64 %iv + %load.a = load i8, ptr %gep.a, align 1 + %ext.a = zext i8 %load.a to i32 + %gep.b = getelementptr i8, ptr %b, i64 %iv + %load.b = load i8, ptr %gep.b, align 1 + %ext.b = zext i8 %load.b to i32 + %mul = mul i32 %ext.b, %ext.a + %sub = sub i32 0, %mul + %add = add i32 %accum, %sub + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, 1024 + br i1 %exitcond.not, label %exit, label %loop + +exit: + ret i32 %add +} + define i64 @print_mulacc_sub_extended(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { ; CHECK-LABEL: 'print_mulacc_sub_extended' ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { @@ -632,3 +753,50 @@ exit: %r.0.lcssa = phi i64 [ %rdx.next, %loop ] ret i64 %r.0.lcssa } + +define i64 @print_mulacc_duplicate_extends(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { +; CHECK-LABEL: 'print_mulacc_duplicate_extends' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF +; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<%n> = original trip-count +; CHECK-EMPTY: +; CHECK: vector.ph: +; CHECK-NEXT: EMIT vp<[[RDX_START:%.+]]> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[IV_NEXT:%.+]]> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX:%.+]]> = phi vp<[[RDX_START]]>, vp<[[RDX_NEXT:%.+]]> +; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1> +; CHECK-NEXT: CLONE ir<[[ARRAYIDX0:%.+]]> = getelementptr inbounds ir<%x>, vp<[[STEPS]]> +; CHECK-NEXT: vp<[[ADDR0:%.+]]> = vector-pointer ir<[[ARRAYIDX0]]> +; CHECK-NEXT: WIDEN ir<[[LOAD0:%.+]]> = load vp<[[ADDR0]]> +; CHECK-NEXT: EXPRESSION vp<[[RDX_NEXT:%.+]]> = ir<[[RDX]]> + reduce.sub (mul nsw (ir<[[LOAD0]]> sext to i64), (ir<[[LOAD0]]> sext to i64)) +; CHECK-NEXT: EMIT vp<[[IV_NEXT]]> = add nuw vp<[[IV]]>, vp<[[VFxUF]]> +; CHECK-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i16, ptr %x, i32 %iv + %load0 = load i16, ptr %arrayidx, align 4 + %conv0 = sext i16 %load0 to i32 + %mul = mul nsw i32 %conv0, %conv0 + %conv = sext i32 %mul to i64 + %rdx.next = sub nsw i64 %rdx, %conv + %iv.next = add nuw nsw i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %exit, label %loop + +exit: + %r.0.lcssa = phi i64 [ %rdx.next, %loop ] + ret i64 %r.0.lcssa +} diff --git a/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll b/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll index 06b7bd8c9f84d..d08ca8c99e8ba 100644 --- a/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll +++ b/llvm/test/Transforms/LoopVectorize/widen-gep-all-indices-invariant.ll @@ -21,19 +21,6 @@ define void @pr63340(ptr %A, ptr %B) { ; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] -; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] -; CHECK-NEXT: br label [[LOOP_LATCH]] -; CHECK: loop.latch: -; CHECK-NEXT: [[F_0_I:%.*]] = phi ptr [ [[A]], [[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[F_0_I]], i64 1 -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds ptr, ptr [[B]], i8 [[IV]] -; CHECK-NEXT: store ptr [[GEP]], ptr [[GEP_B]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i8 [[IV_NEXT]], -128 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP_HEADER]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -78,17 +65,6 @@ define void @wide_gep_index_invariant(ptr noalias %dst, ptr noalias %src, i64 %n ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[SRC]], align 8 -; CHECK-NEXT: [[GEP_L:%.*]] = getelementptr float, ptr [[L]], i64 [[N]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr [[GEP_L]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -131,17 +107,6 @@ define void @wide_gep_multiple_indices_some_invariant(ptr noalias %dst, ptr noal ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[EXIT:%.*]] -; CHECK: scalar.ph: -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[SRC]], align 8 -; CHECK-NEXT: [[GEP_L:%.*]] = getelementptr [10 x float], ptr [[L]], i32 [[X]], i64 [[IV]] -; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr ptr, ptr [[DST]], i64 [[IV]] -; CHECK-NEXT: store ptr [[GEP_L]], ptr [[GEP_DST]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll b/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll index 055f2fdb84834..922ebe7211b6e 100644 --- a/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll +++ b/llvm/test/Transforms/LoopVectorize/widen-intrinsic.ll @@ -20,17 +20,6 @@ define void @powi_only_first_lane_used_of_second_arg(ptr %p, i32 %pow) { ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[P_GEP:%.*]] = getelementptr float, ptr [[P]], i32 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load float, ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[Y:%.*]] = call float @llvm.powi.f32.i32(float [[X]], i32 [[POW]]) -; CHECK-NEXT: store float [[Y]], ptr [[P_GEP]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/funcassigncloning2.ll b/llvm/test/Transforms/MemProfContextDisambiguation/funcassigncloning2.ll new file mode 100644 index 0000000000000..18def1d41c30c --- /dev/null +++ b/llvm/test/Transforms/MemProfContextDisambiguation/funcassigncloning2.ll @@ -0,0 +1,122 @@ +;; Similar to funcassigncloning.ll but hand modified to add another allocation +;; whose pruned cold context only includes an immediate caller node that itself +;; doesn't need cloning, but calls a cloned allocating function, and is in a +;; function that gets cloned multiple times for a different callsite. This test +;; makes sure the non-cloned callsite is correctly updated in all function +;; clones. This case was missed because, due to context pruning, we don't have +;; any caller edges for the first callsite, so the handling that kicks in to +;; "reclone" other callsites in cloned functions was being missed. + +; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \ +; RUN: -memprof-verify-ccg -memprof-verify-nodes \ +; RUN: -pass-remarks=memprof-context-disambiguation \ +; RUN: %s -S 2>&1 | FileCheck %s --check-prefix=IR --check-prefix=REMARKS + + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +;; Eventually this function will be cloned several times (for the calls to new +;; for the various callers). However, function blah() includes an allocation +;; whose cold context was trimmed above here. We therefore should assume that +;; every caller of this function should call the same version of blah (which +;; will be the cloned ".memprof.1" version. +define internal void @_Z1EPPcS0_(ptr %buf1, ptr %buf2) #0 { +entry: + call void @blah(), !callsite !19 + %call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !memprof !0, !callsite !7 + %call1 = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !memprof !8, !callsite !15 + ret void +} + +; REMARKS: created clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_ assigned to call function clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_.memprof.1 assigned to call function clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_.memprof.2 assigned to call function clone blah.memprof.1 +; REMARKS: call in clone _Z1EPPcS0_.memprof.3 assigned to call function clone blah.memprof.1 + +; IR: define {{.*}} @_Z1EPPcS0_ +; IR: call {{.*}} @blah.memprof.1() +; IR: define {{.*}} @_Z1EPPcS0_.memprof.1 +; IR: call {{.*}} @blah.memprof.1() +; IR: define {{.*}} @_Z1EPPcS0_.memprof.2 +; IR: call {{.*}} @blah.memprof.1() +; IR: define {{.*}} @_Z1EPPcS0_.memprof.3 +; IR: call {{.*}} @blah.memprof.1() + +declare ptr @_Znam(i64) #1 + +define internal void @_Z1BPPcS0_(ptr %0, ptr %1) { +entry: + call void @_Z1EPPcS0_(ptr noundef %0, ptr noundef %1), !callsite !16 + ret void +} + +; Function Attrs: noinline +define internal void @_Z1CPPcS0_(ptr %0, ptr %1) #2 { +entry: + call void @_Z1EPPcS0_(ptr noundef %0, ptr noundef %1), !callsite !17 + ret void +} + +define internal void @_Z1DPPcS0_(ptr %0, ptr %1) #3 { +entry: + call void @_Z1EPPcS0_(ptr noundef %0, ptr noundef %1), !callsite !18 + ret void +} + +define internal void @blah() #0 { +entry: + %call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #6, !memprof !22, !callsite !21 + ret void +} + +define internal void @foo() #0 { +entry: + call void @blah(), !callsite !20 + ret void +} + +; Function Attrs: nocallback nofree nounwind willreturn memory(argmem: write) +declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #4 + +declare i32 @sleep() #5 + +; uselistorder directives +uselistorder ptr @_Znam, { 1, 0, 2 } + +attributes #0 = { "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" } +attributes #1 = { "no-trapping-math"="true" } +attributes #2 = { noinline } +attributes #3 = { "frame-pointer"="all" } +attributes #4 = { nocallback nofree nounwind willreturn memory(argmem: write) } +attributes #5 = { "disable-tail-calls"="true" } +attributes #6 = { builtin } + +!0 = !{!1, !3, !5} +!1 = !{!2, !"cold"} +!2 = !{i64 -3461278137325233666, i64 -7799663586031895603} +!3 = !{!4, !"notcold"} +!4 = !{i64 -3461278137325233666, i64 -3483158674395044949} +!5 = !{!6, !"notcold"} +!6 = !{i64 -3461278137325233666, i64 -2441057035866683071} +!7 = !{i64 -3461278137325233666} +!8 = !{!9, !11, !13} +!9 = !{!10, !"notcold"} +!10 = !{i64 -1415475215210681400, i64 -2441057035866683071} +!11 = !{!12, !"cold"} +!12 = !{i64 -1415475215210681400, i64 -3483158674395044949} +!13 = !{!14, !"notcold"} +!14 = !{i64 -1415475215210681400, i64 -7799663586031895603} +!15 = !{i64 -1415475215210681400} +!16 = !{i64 -2441057035866683071} +!17 = !{i64 -3483158674395044949} +!18 = !{i64 -7799663586031895603} +!19 = !{i64 123} +!20 = !{i64 234} +!21 = !{i64 345} +!22 = !{!23, !25} +!23 = !{!24, !"cold"} +!24 = !{i64 345, i64 123} +!25 = !{!26, !"notcold"} +!26 = !{i64 345, i64 234} diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll b/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll index e301fa03ea099..0bf622276b328 100644 --- a/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll +++ b/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll @@ -47,7 +47,7 @@ ; RUN: -memprof-allow-recursive-callsites=true \ ; RUN: -memprof-clone-recursive-contexts=false \ ; RUN: %s -S 2>&1 | FileCheck %s \ -; RUN: --implicit-check-not "memprof_recursive.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \ +; RUN: --implicit-check-not "memprof_recursive.cc:12:10: call in clone _Z1Ci.memprof.1 assigned to call function clone _Z1Bi.memprof" \ ; RUN: --check-prefix=ALL --check-prefix=ALLOW-RECUR-CALLSITES --check-prefix=ALLOW-RECUR-CONTEXTS ;; Skipping recursive callsites should result in no cloning. @@ -56,7 +56,7 @@ ; RUN: -pass-remarks=memprof-context-disambiguation \ ; RUN: -memprof-allow-recursive-callsites=false \ ; RUN: %s -S 2>&1 | FileCheck %s \ -; RUN: --implicit-check-not "memprof_recursive.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \ +; RUN: --implicit-check-not "memprof_recursive.cc:12:10: call in clone _Z1Ci.memprof.1 assigned to call function clone _Z1Bi.memprof" \ ; RUN: --implicit-check-not="created clone" \ ; RUN: --implicit-check-not="marked with memprof allocation attribute cold" \ ; RUN: --check-prefix=ALL @@ -87,7 +87,7 @@ ; RUN: -memprof-allow-recursive-contexts=false \ ; RUN: -memprof-clone-recursive-contexts=false \ ; RUN: %s -S 2>&1 | FileCheck %s \ -; RUN: --implicit-check-not "memprof_recursive.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \ +; RUN: --implicit-check-not "memprof_recursive.cc:12:10: call in clone _Z1Ci.memprof.1 assigned to call function clone _Z1Bi.memprof" \ ; RUN: --check-prefix=ALL --check-prefix=ALLOW-RECUR-CALLSITES --check-prefix=SKIP-RECUR-CONTEXTS ; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:4:0: created clone _Z1Dv.memprof.1 diff --git a/llvm/test/Transforms/PGOProfile/chr-lifetimes.ll b/llvm/test/Transforms/PGOProfile/chr-lifetimes.ll new file mode 100644 index 0000000000000..b29834f9fe960 --- /dev/null +++ b/llvm/test/Transforms/PGOProfile/chr-lifetimes.ll @@ -0,0 +1,245 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes='require,chr' -S | FileCheck %s + +declare void @foo() +declare void @bar() +declare void @baz(i64) + +; Test that when we have a static alloca in an entry block that will get split, +; the alloca remains static and we preserve its lifetime annotations. +define void @test_chr_with_lifetimes(ptr %i) !prof !14 { +; CHECK-LABEL: @test_chr_with_lifetimes( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TEST:%.*]] = alloca i32, align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I:%.*]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = freeze i1 [[TMP1]] +; CHECK-NEXT: [[TMP10:%.*]] = select i1 true, i1 [[TMP9]], i1 false +; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP1]] +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP10]], i1 [[TMP11]], i1 false +; CHECK-NEXT: br i1 [[TMP5]], label [[ENTRY_SPLIT:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15:![0-9]+]] +; CHECK: entry.split: +; CHECK-NEXT: [[TMP6:%.*]] = select i1 true, i64 0, i64 4, !prof [[PROF16:![0-9]+]] +; CHECK-NEXT: call void @baz(i64 [[TMP6]]) +; CHECK-NEXT: br i1 false, label [[BB1:%.*]], label [[BB0:%.*]], !prof [[PROF17:![0-9]+]] +; CHECK: bb0: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label [[BB1]] +; CHECK: entry.split.nonchr: +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP1]], i64 0, i64 4, !prof [[PROF16]] +; CHECK-NEXT: call void @baz(i64 [[TMP7]]) +; CHECK-NEXT: br i1 [[TMP1]], label [[BB0_NONCHR:%.*]], label [[BB1]], !prof [[PROF16]] +; CHECK: bb0.nonchr: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label [[BB1]] +; CHECK: bb1: +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEST]]) +; CHECK-NEXT: store ptr [[TEST]], ptr [[I]], align 8 +; CHECK-NEXT: br label [[BB2:%.*]] +; CHECK: bb2: +; CHECK-NEXT: [[TMP2:%.*]] = phi ptr [ [[TMP3:%.*]], [[BB2]] ], [ null, [[BB1]] ] +; CHECK-NEXT: [[TMP3]] = getelementptr i8, ptr [[TMP2]], i64 24 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq ptr [[TMP2]], [[I]] +; CHECK-NEXT: br i1 [[TMP4]], label [[BB3:%.*]], label [[BB2]] +; CHECK: bb3: +; CHECK-NEXT: ret void +; +entry: + %1 = load i32, ptr %i + %2 = icmp eq i32 %1, 0 + %3 = select i1 %2, i64 4, i64 0, !prof !15 + %test = alloca i32, align 8 + call void @baz(i64 %3) + br i1 %2, label %bb1, label %bb0, !prof !15 + +bb0: + call void @foo() + br label %bb1 + +bb1: + call void @llvm.lifetime.start.p0(ptr %test) + store ptr %test, ptr %i, align 8 + br label %bb2 + +bb2: + %4 = phi ptr [ %5, %bb2 ], [ null, %bb1 ] + %5 = getelementptr i8, ptr %4, i64 24 + %6 = icmp eq ptr %4, %i + br i1 %6, label %bb3, label %bb2 + +bb3: + ret void +} + +; Test that we remove lifetime markers that would otherwise refer to phi +; nodes given the dynamic allocas they referred to have been duplicated. +define void @test_chr_dynamic_alloca(ptr %i) !prof !14 { +; CHECK-LABEL: @test_chr_dynamic_alloca( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TEST1:%.*]] = load i32, ptr [[I:%.*]], align 4 +; CHECK-NEXT: [[TEST2:%.*]] = icmp eq i32 [[TEST1]], 5 +; CHECK-NEXT: br i1 [[TEST2]], label [[BB4:%.*]], label [[BB3:%.*]] +; CHECK: bb4: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = freeze i1 [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = select i1 true, i1 [[TMP2]], i1 false +; CHECK-NEXT: [[TMP4:%.*]] = freeze i1 [[TMP1]] +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP3]], i1 [[TMP4]], i1 false +; CHECK-NEXT: br i1 [[TMP5]], label [[BB4_SPLIT:%.*]], label [[BB4_SPLIT_NONCHR:%.*]], !prof [[PROF15]] +; CHECK: bb4.split: +; CHECK-NEXT: [[TMP6:%.*]] = select i1 true, i64 0, i64 4, !prof [[PROF16]] +; CHECK-NEXT: [[TEST:%.*]] = alloca i32, align 8 +; CHECK-NEXT: call void @baz(i64 [[TMP6]]) +; CHECK-NEXT: br i1 false, label [[BB1:%.*]], label [[BB0:%.*]], !prof [[PROF17]] +; CHECK: bb0: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: store ptr [[TEST]], ptr [[I]], align 8 +; CHECK-NEXT: br label [[BB1]] +; CHECK: bb4.split.nonchr: +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP1]], i64 0, i64 4, !prof [[PROF16]] +; CHECK-NEXT: [[TEST_NONCHR:%.*]] = alloca i32, align 8 +; CHECK-NEXT: call void @baz(i64 [[TMP7]]) +; CHECK-NEXT: br i1 [[TMP1]], label [[BB0_NONCHR:%.*]], label [[BB1]], !prof [[PROF16]] +; CHECK: bb0.nonchr: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: store ptr [[TEST_NONCHR]], ptr [[I]], align 8 +; CHECK-NEXT: br label [[BB1]] +; CHECK: bb1: +; CHECK-NEXT: [[TMP8:%.*]] = phi ptr [ [[TEST]], [[BB0]] ], [ [[TEST]], [[BB4_SPLIT]] ], [ [[TEST_NONCHR]], [[BB0_NONCHR]] ], [ [[TEST_NONCHR]], [[BB4_SPLIT_NONCHR]] ] +; CHECK-NEXT: call void @bar() +; CHECK-NEXT: store ptr [[TMP8]], ptr [[I]], align 8 +; CHECK-NEXT: br label [[BB2:%.*]] +; CHECK: bb2: +; CHECK-NEXT: [[TMP9:%.*]] = phi ptr [ [[TMP10:%.*]], [[BB2]] ], [ null, [[BB1]] ] +; CHECK-NEXT: [[TMP10]] = getelementptr i8, ptr [[TMP9]], i64 24 +; CHECK-NEXT: [[TEST5:%.*]] = load ptr, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq ptr [[TMP9]], [[TEST5]] +; CHECK-NEXT: br i1 [[TMP11]], label [[BB3]], label [[BB2]] +; CHECK: bb3: +; CHECK-NEXT: ret void +; +entry: + %test1 = load i32, ptr %i + %test2 = icmp eq i32 %test1, 5 + br i1 %test2, label %bb4, label %bb3 + +bb4: + %1 = load i32, ptr %i + %2 = icmp eq i32 %1, 0 + %3 = select i1 %2, i64 4, i64 0, !prof !15 + %test = alloca i32, align 8 + call void @baz(i64 %3) + br i1 %2, label %bb1, label %bb0, !prof !15 + +bb0: + call void @foo() + call void @llvm.lifetime.start.p0(ptr %test) + store ptr %test, ptr %i, align 8 + br label %bb1 + +bb1: + call void @bar() + call void @llvm.lifetime.start.p0(ptr %test) + store ptr %test, ptr %i, align 8 + br label %bb2 + +bb2: + %4 = phi ptr [ %5, %bb2 ], [ null, %bb1 ] + %5 = getelementptr i8, ptr %4, i64 24 + %test5 = load ptr, ptr %test + call void @llvm.lifetime.end.p0(ptr %test) + %6 = icmp eq ptr %4, %test5 + br i1 %6, label %bb3, label %bb2 + +bb3: + ret void +} + +; Test that we do not move around allocas that occur in the entry block +; before splitting. If we accidentally sink them, we can move them after +; their users. +define void @test_no_move_allocas(ptr %i) !prof !14 { +; CHECK-LABEL: @test_no_move_allocas( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TEST:%.*]] = alloca i32, align 8 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEST]]) +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I:%.*]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = freeze i1 [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = select i1 true, i1 [[TMP2]], i1 false +; CHECK-NEXT: [[TMP4:%.*]] = freeze i1 [[TMP1]] +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP3]], i1 [[TMP4]], i1 false +; CHECK-NEXT: br i1 [[TMP5]], label [[ENTRY_SPLIT:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]] +; CHECK: entry.split: +; CHECK-NEXT: [[TMP6:%.*]] = select i1 true, i64 0, i64 4, !prof [[PROF16]] +; CHECK-NEXT: call void @baz(i64 [[TMP6]]) +; CHECK-NEXT: br i1 false, label [[BB1:%.*]], label [[BB0:%.*]], !prof [[PROF17]] +; CHECK: bb0: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label [[BB1]] +; CHECK: entry.split.nonchr: +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP1]], i64 0, i64 4, !prof [[PROF16]] +; CHECK-NEXT: call void @baz(i64 [[TMP7]]) +; CHECK-NEXT: br i1 [[TMP1]], label [[BB0_NONCHR:%.*]], label [[BB1]], !prof [[PROF16]] +; CHECK: bb0.nonchr: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label [[BB1]] +; CHECK: bb1: +; CHECK-NEXT: call void @bar() +; CHECK-NEXT: br label [[BB2:%.*]] +; CHECK: bb2: +; CHECK-NEXT: [[TMP8:%.*]] = phi ptr [ [[TMP9:%.*]], [[BB2]] ], [ null, [[BB1]] ] +; CHECK-NEXT: [[TMP9]] = getelementptr i8, ptr [[TMP8]], i64 24 +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq ptr [[TMP8]], [[I]] +; CHECK-NEXT: br i1 [[TMP10]], label [[BB3:%.*]], label [[BB2]] +; CHECK: bb3: +; CHECK-NEXT: ret void +; +entry: + %test = alloca i32, align 8 + call void @llvm.lifetime.start.p0(ptr %test) + %1 = load i32, ptr %i + %2 = icmp eq i32 %1, 0 + %3 = select i1 %2, i64 4, i64 0, !prof !15 + call void @baz(i64 %3) + br i1 %2, label %bb1, label %bb0, !prof !15 + +bb0: + call void @foo() + br label %bb1 + +bb1: + call void @bar() + br label %bb2 + +bb2: + %4 = phi ptr [ %5, %bb2 ], [ null, %bb1 ] + %5 = getelementptr i8, ptr %4, i64 24 + %6 = icmp eq ptr %4, %i + br i1 %6, label %bb3, label %bb2 + +bb3: + ret void +} + + +!llvm.module.flags = !{!0} +!0 = !{i32 1, !"ProfileSummary", !1} +!1 = !{!2, !3, !4, !5, !6, !7, !8, !9} +!2 = !{!"ProfileFormat", !"InstrProf"} +!3 = !{!"TotalCount", i64 10000} +!4 = !{!"MaxCount", i64 10} +!5 = !{!"MaxInternalCount", i64 1} +!6 = !{!"MaxFunctionCount", i64 1000} +!7 = !{!"NumCounts", i64 3} +!8 = !{!"NumFunctions", i64 3} +!9 = !{!"DetailedSummary", !10} +!10 = !{!11, !12, !13} +!11 = !{i32 10000, i64 100, i32 1} +!12 = !{i32 999000, i64 100, i32 1} +!13 = !{i32 999999, i64 1, i32 2} + +!14 = !{!"function_entry_count", i64 100} +!15 = !{!"branch_weights", i32 0, i32 1} +; CHECK: !15 = !{!"branch_weights", i32 1000, i32 0} diff --git a/llvm/test/Transforms/PGOProfile/profcheck-synthetic.ll b/llvm/test/Transforms/PGOProfile/profcheck-synthetic.ll new file mode 100644 index 0000000000000..a3fd6b1f512a9 --- /dev/null +++ b/llvm/test/Transforms/PGOProfile/profcheck-synthetic.ll @@ -0,0 +1,73 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 6 +; RUN: opt -passes=prof-inject -profcheck-weights-for-test %s -S -o - | FileCheck %s --check-prefixes=TEST,CHECK +; RUN: opt -passes=prof-inject %s -S -o - | FileCheck %s --check-prefixes=NORMAL,CHECK + +define void @foo(i32 %cond) { +; TEST-LABEL: define void @foo( +; TEST-SAME: i32 [[COND:%.*]]) !prof [[PROF0:![0-9]+]] { +; TEST-NEXT: [[I:%.*]] = icmp eq i32 [[COND]], 0 +; TEST-NEXT: br i1 [[I]], label %[[A:.*]], label %[[B:.*]], !prof [[PROF1:![0-9]+]] +; TEST: [[A]]: +; TEST-NEXT: switch i32 [[COND]], label %[[DEFAULT:.*]] [ +; TEST-NEXT: i32 10, label %[[C:.*]] +; TEST-NEXT: i32 20, label %[[D:.*]] +; TEST-NEXT: ], !prof [[PROF2:![0-9]+]] +; TEST: [[BB1:.*:]] +; TEST-NEXT: br label %[[B]] +; TEST: [[B]]: +; TEST-NEXT: ret void +; TEST: [[DEFAULT]]: +; TEST-NEXT: ret void +; TEST: [[C]]: +; TEST-NEXT: ret void +; TEST: [[D]]: +; TEST-NEXT: ret void +; +; NORMAL-LABEL: define void @foo( +; NORMAL-SAME: i32 [[COND:%.*]]) !prof [[PROF0:![0-9]+]] { +; NORMAL-NEXT: [[I:%.*]] = icmp eq i32 [[COND]], 0 +; NORMAL-NEXT: br i1 [[I]], label %[[A:.*]], label %[[B:.*]], !prof [[PROF1:![0-9]+]] +; NORMAL: [[A]]: +; NORMAL-NEXT: switch i32 [[COND]], label %[[DEFAULT:.*]] [ +; NORMAL-NEXT: i32 10, label %[[C:.*]] +; NORMAL-NEXT: i32 20, label %[[D:.*]] +; NORMAL-NEXT: ], !prof [[PROF2:![0-9]+]] +; NORMAL: [[BB1:.*:]] +; NORMAL-NEXT: br label %[[B]] +; NORMAL: [[B]]: +; NORMAL-NEXT: ret void +; NORMAL: [[DEFAULT]]: +; NORMAL-NEXT: ret void +; NORMAL: [[C]]: +; NORMAL-NEXT: ret void +; NORMAL: [[D]]: +; NORMAL-NEXT: ret void +; + %i = icmp eq i32 %cond, 0 + br i1 %i, label %a, label %b +a: + switch i32 %cond, label %default [ + i32 10, label %c + i32 20, label %d + ] + br label %b +b: + ret void +default: + ret void +c: + ret void +d: + ret void +} +;. +; TEST: [[PROF0]] = !{!"function_entry_count", i64 1000} +; TEST: [[PROF1]] = !{!"branch_weights", i32 3, i32 5} +; TEST: [[PROF2]] = !{!"branch_weights", i32 5, i32 7, i32 11} +;. +; NORMAL: [[PROF0]] = !{!"function_entry_count", i64 1000} +; NORMAL: [[PROF1]] = !{!"branch_weights", i32 3, i32 5} +; NORMAL: [[PROF2]] = !{!"branch_weights", i32 1, i32 1, i32 1} +;. +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll index b056f44a6c469..8d20a3ba8ed08 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll @@ -14,16 +14,9 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: [[SUB:%.*]] = add i32 [[XA]], -1 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[SUB]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[XB]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP1]], [[TMP0]] -; CHECK-NEXT: [[SMAX7:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP2]], i64 32000) -; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 32000 -; CHECK-NEXT: [[UMIN8:%.*]] = zext i1 [[TMP3]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP2]], [[UMIN8]] -; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[SMAX7]], [[TMP4]] -; CHECK-NEXT: [[UMAX9:%.*]] = tail call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1) -; CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP5]], [[UMAX9]] -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], [[UMIN8]] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP0]], i64 31999) +; CHECK-NEXT: [[SMAX10:%.*]] = add nuw nsw i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[SMAX10]], [[TMP0]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP8]], 23 ; CHECK-NEXT: [[IDENT_CHECK_NOT:%.*]] = icmp eq i32 [[XB]], 1 ; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[MIN_ITERS_CHECK]], [[IDENT_CHECK_NOT]] @@ -50,13 +43,11 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PREHEADER13]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], -8 -; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[N_VEC]], [[TMP1]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP18]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[INDEX]], [[TMP1]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP19]], [[TMP0]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDEX]], [[TMP0]] ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP20]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META0:![0-9]+]] @@ -75,7 +66,7 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER13]] -; CHECK: for.body.preheader13: +; CHECK: for.body.preheader14: ; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ [[TMP0]], [[VECTOR_MEMCHECK]] ], [ [[TMP0]], [[FOR_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll index 435e6fccd620c..5e9fe8c4135ac 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll @@ -34,8 +34,8 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef ; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP2]], [[WHILE_END_LOOPEXIT]] ] ; CHECK-NEXT: [[AND:%.*]] = and i32 [[BLOCKSIZE]], 15 ; CHECK-NEXT: [[CMP2_NOT15:%.*]] = icmp eq i32 [[AND]], 0 -; CHECK-NEXT: br i1 [[CMP2_NOT15]], label [[WHILE_END5:%.*]], label [[MIDDLE_BLOCK:%.*]] -; CHECK: middle.block: +; CHECK-NEXT: br i1 [[CMP2_NOT15]], label [[WHILE_END5:%.*]], label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = tail call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 0, i32 [[AND]]) ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[PSRC_ADDR_0_LCSSA]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison) ; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32> @@ -44,7 +44,7 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef ; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[SUM_0_LCSSA]], [[TMP6]] ; CHECK-NEXT: br label [[WHILE_END5]] ; CHECK: while.end5: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUM_1_LCSSA]], [[BLOCKSIZE]] ; CHECK-NEXT: [[CONV6:%.*]] = trunc i32 [[DIV]] to i8 ; CHECK-NEXT: store i8 [[CONV6]], ptr [[PRESULT:%.*]], align 1 diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll index 2fe420183c683..92891286d11d1 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll @@ -46,6 +46,7 @@ define dso_local void @test(ptr %start, ptr %end) #0 { ; AVX2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 124 ; AVX2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[BB12_PREHEADER11:%.*]], label [[VECTOR_PH:%.*]] ; AVX2: vector.ph: +; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24 ; AVX2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775776 ; AVX2-NEXT: br label [[VECTOR_BODY:%.*]] ; AVX2: vector.body: @@ -84,7 +85,6 @@ define dso_local void @test(ptr %start, ptr %end) #0 { ; AVX2: vec.epilog.iter.check: ; AVX2-NEXT: [[TMP26:%.*]] = shl i64 [[N_VEC]], 2 ; AVX2-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP26]] -; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24 ; AVX2-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0 ; AVX2-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[BB12_PREHEADER1]], label [[BB12_PREHEADER11]] ; AVX2: vec.epilog.ph: diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll index f7bc01e0e8af1..bcdf90c6c5c89 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll @@ -30,6 +30,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[MIN_ITERS_CHECK6:%.*]] = icmp ult i32 [[N]], 16 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]] ; CHECK: [[VECTOR_PH1]]: +; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 12 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483632 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[A]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x double> [[BROADCAST_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer @@ -67,7 +68,6 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 { ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 12 ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER9]], label %[[VEC_EPILOG_PH]] ; CHECK: [[VEC_EPILOG_PH]]: diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll index 645dbc49269f0..02e05b2e4138a 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll @@ -7,8 +7,8 @@ define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16 -; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -28,22 +28,22 @@ define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -62,22 +62,22 @@ define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -87,9 +87,9 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -109,22 +109,22 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -144,22 +144,22 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 ; NOTE: value from %load1 in stored in %gep_s0 - store i8 %load1, ptr %gep_s0, align 16 - store i8 %load0, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load1, ptr %gep_s0, align 1 + store i8 %load0, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -170,9 +170,9 @@ define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> , <31 x i8> poison) +; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 1, <31 x i1> , <31 x i8> poison) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <31 x i8> [[TMP2]], <31 x i8> poison, <16 x i32> -; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -192,22 +192,22 @@ define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -226,22 +226,22 @@ define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -251,10 +251,10 @@ define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> , <31 x i8> poison) +; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 1, <31 x i1> , <31 x i8> poison) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -274,22 +274,22 @@ define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -308,22 +308,22 @@ define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load1, ptr %gep_s0, align 16 - store i8 %load0, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load1, ptr %gep_s0, align 1 + store i8 %load0, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -335,8 +335,8 @@ define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]] ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 1 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %stride0 = mul nsw i64 %stride, 0 @@ -373,22 +373,22 @@ define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -407,22 +407,22 @@ define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -434,9 +434,9 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]] ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 1 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> poison, <16 x i32> -; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %stride0 = mul nsw i64 %stride, 0 @@ -473,22 +473,22 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -507,43 +507,34 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load1, ptr %gep_s0, align 16 - store i8 %load0, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load1, ptr %gep_s0, align 1 + store i8 %load0, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } -; TODO: We want to generate this code: -; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { -; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 -; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 -; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4) -; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> -; store <16 x i8> %bitcast_, ptr %gep_s0, align 16 -; ret void -; } -define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { -; CHECK-LABEL: define void @constant_stride_widen_no_reordering( +define void @constant_stride_masked_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @constant_stride_masked_no_reordering( ; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 16, <28 x i1> , <28 x i8> poison) -; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> -; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 1, <28 x i1> , <28 x i8> poison) +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -563,22 +554,22 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 26 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 27 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -597,33 +588,134 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } +; TODO: We want to generate this code: +; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) #0 { +; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 +; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 +; %1 = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 100, <4 x i1> splat (i1 true), i32 4) +; %2 = bitcast <4 x i32> %1 to <16 x i8> +; store <16 x i8> %2, ptr %gep_s0, align 1 +; ret void +; } +define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @constant_stride_widen_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 100 +; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 200 +; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 300 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 1 +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 100 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 101 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 102 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 103 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 200 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 201 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 202 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 203 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 300 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 301 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 302 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 303 + + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 + + ret void +} ; TODO: We want to generate this code: ; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 ; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 -; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4) +; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4) ; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> -; store <16 x i8> %bitcast_, ptr %gep_s0, align 16 +; store <16 x i8> %bitcast_, ptr %gep_s0, align 1 ; ret void ; } define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { @@ -638,10 +730,10 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET8]] ; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET12]] ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 16 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 16 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 1 ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> @@ -649,7 +741,7 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> -; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %offset0 = mul nsw i64 %stride, 0 @@ -686,22 +778,22 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %offset14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %offset15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -720,22 +812,22 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/test-delete-tree.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/test-delete-tree.ll new file mode 100644 index 0000000000000..c4e6c4e5d5db5 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/test-delete-tree.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s + +; CHECK-NOT: TreeEntryToStridedPtrInfoMap is not cleared +define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { +; CHECK-LABEL: define void @const_stride_1_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 1 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 + + %load0 = load i8, ptr %gep_l0 + %load1 = load i8, ptr %gep_l1 + %load2 = load i8, ptr %gep_l2 + %load3 = load i8, ptr %gep_l3 + %load4 = load i8, ptr %gep_l4 + %load5 = load i8, ptr %gep_l5 + %load6 = load i8, ptr %gep_l6 + %load7 = load i8, ptr %gep_l7 + %load8 = load i8, ptr %gep_l8 + %load9 = load i8, ptr %gep_l9 + %load10 = load i8, ptr %gep_l10 + %load11 = load i8, ptr %gep_l11 + %load12 = load i8, ptr %gep_l12 + %load13 = load i8, ptr %gep_l13 + %load14 = load i8, ptr %gep_l14 + %load15 = load i8, ptr %gep_l15 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0 + store i8 %load1, ptr %gep_s1 + store i8 %load2, ptr %gep_s2 + store i8 %load3, ptr %gep_s3 + store i8 %load4, ptr %gep_s4 + store i8 %load5, ptr %gep_s5 + store i8 %load6, ptr %gep_s6 + store i8 %load7, ptr %gep_s7 + store i8 %load8, ptr %gep_s8 + store i8 %load9, ptr %gep_s9 + store i8 %load10, ptr %gep_s10 + store i8 %load11, ptr %gep_s11 + store i8 %load12, ptr %gep_s12 + store i8 %load13, ptr %gep_s13 + store i8 %load14, ptr %gep_s14 + store i8 %load15, ptr %gep_s15 + + ret void +} diff --git a/llvm/test/Transforms/SLPVectorizer/X86/insertelement-with-copyable-args.ll b/llvm/test/Transforms/SLPVectorizer/X86/insertelement-with-copyable-args.ll new file mode 100644 index 0000000000000..67fb9ddf983c9 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/insertelement-with-copyable-args.ll @@ -0,0 +1,66 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-99999 < %s | FileCheck %s + +define i64 @test(i32 %arg) { +; CHECK-LABEL: define i64 @test( +; CHECK-SAME: i32 [[ARG:%.*]]) { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[FREEZE:%.*]] = freeze i32 0 +; CHECK-NEXT: br i1 false, label %[[BB1:.*]], label %[[BB1]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr addrspace(1) null, align 4 +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> , i32 [[ARG]], i32 3 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[FREEZE]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> poison, i32 [[FREEZE]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = add <2 x i32> [[TMP2]], zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> poison, i32 [[ARG]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> [[TMP5]], i32 [[LOAD]], i32 1 +; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <2 x i32> [[TMP4]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP7]], i32 0 +; CHECK-NEXT: [[AND:%.*]] = and i1 [[TMP8]], false +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i32> , <4 x i32> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq <4 x i32> [[TMP10]], [[TMP0]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <4 x i32> [[TMP10]], [[TMP0]] +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i1> [[TMP11]], <4 x i1> [[TMP12]], <4 x i32> +; CHECK-NEXT: br i1 false, label %[[BB11:.*]], label %[[BB12:.*]] +; CHECK: [[BB11]]: +; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[ADD]] to i64 +; CHECK-NEXT: ret i64 0 +; CHECK: [[BB12]]: +; CHECK-NEXT: [[ZEXT13:%.*]] = zext i32 [[ADD]] to i64 +; CHECK-NEXT: ret i64 0 +; +bb: + %freeze = freeze i32 0 + br i1 false, label %bb1, label %bb1 + +bb1: + %load = load i32, ptr addrspace(1) null, align 4 + %0 = insertelement <4 x i32> , i32 %freeze, i32 2 + %1 = insertelement <4 x i32> , i32 %arg, i32 3 + %add = add i32 %freeze, 0 + %2 = insertelement <2 x i32> poison, i32 %freeze, i32 0 + %3 = shufflevector <2 x i32> %2, <2 x i32> poison, <2 x i32> zeroinitializer + %4 = add <2 x i32> %3, zeroinitializer + %5 = insertelement <2 x i32> poison, i32 %arg, i32 0 + %6 = insertelement <2 x i32> %5, i32 %load, i32 1 + %7 = icmp ult <2 x i32> %4, %6 + %8 = extractelement <2 x i1> %7, i32 0 + %and = and i1 %8, false + %9 = insertelement <4 x i32> %0, i32 %add, i32 1 + %10 = icmp eq <4 x i32> %9, %1 + %11 = icmp ult <4 x i32> %9, %1 + %12 = shufflevector <4 x i1> %10, <4 x i1> %11, <4 x i32> + br i1 false, label %bb11, label %bb12 + +bb11: + %zext = zext i32 %add to i64 + ret i64 0 + +bb12: + %zext13 = zext i32 %add to i64 + ret i64 0 +} diff --git a/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll b/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll new file mode 100644 index 0000000000000..9cdcdf1b5d5ca --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll @@ -0,0 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s -slp-threshold=-100 | FileCheck %s +define i1 @foo(i1 %v) { ; assume %v is 1 +; CHECK-LABEL: define i1 @foo( +; CHECK-SAME: i1 [[V:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i1> poison, i1 [[V]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i1> [[TMP0]], <2 x i1> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i1> zeroinitializer, [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 +; CHECK-NEXT: [[SUB:%.*]] = sub i1 [[TMP3]], [[TMP4]] +; CHECK-NEXT: ret i1 [[SUB]] +; +entry: + %not = xor i1 %v, 1 ; 0 + %not1 = xor i1 %not, 1 ; 1 + %mul = mul i1 %v, 1 ; 1 + %sub = sub i1 %not1, %mul ; 0 + ret i1 %sub ; 0 +} diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll b/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll index d34ac2bb30040..85c8ed20210b8 100644 --- a/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll +++ b/llvm/test/Transforms/SimplifyCFG/hoist-with-metadata.ll @@ -424,6 +424,174 @@ join: ret ptr %phi } +define void @hoist_captures_same(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_same( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META9:![0-9]+]] +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +out: + ret void +} + +define void @hoist_captures_different(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_different( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META10:![0-9]+]] +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"read_provenance"} + br label %out + +out: + ret void +} + +define void @hoist_captures_overlap(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_overlap( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META10]] +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"address", !"read_provenance"} + br label %out + +out: + ret void +} + +define void @hoist_captures_subsume1(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_subsume1( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META9]] +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"address_is_null"} + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +out: + ret void +} + +define void @hoist_captures_subsume2(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_subsume2( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8, !captures [[META11:![0-9]+]] +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"provenance"} + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"read_provenance"} + br label %out + +out: + ret void +} + +define void @hoist_captures_full_set(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_full_set( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8 +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"provenance"} + br label %out + +out: + ret void +} + +define void @hoist_captures_only_one1(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_only_one1( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8 +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +else: + store ptr %x, ptr %y + br label %out + +out: + ret void +} + +define void @hoist_captures_only_one2(i1 %c, ptr %x, ptr %y) { +; CHECK-LABEL: @hoist_captures_only_one2( +; CHECK-NEXT: if: +; CHECK-NEXT: store ptr [[X:%.*]], ptr [[Y:%.*]], align 8 +; CHECK-NEXT: ret void +; +if: + br i1 %c, label %then, label %else + +then: + store ptr %x, ptr %y + br label %out + +else: + store ptr %x, ptr %y, !captures !{!"address"} + br label %out + +out: + ret void +} + !0 = !{ i8 0, i8 1 } !1 = !{ i8 3, i8 5 } !2 = !{} @@ -445,4 +613,7 @@ join: ; CHECK: [[META6]] = !{float 2.500000e+00} ; CHECK: [[META7]] = !{i32 5, i32 6} ; CHECK: [[META8]] = !{i32 4, i32 5} +; CHECK: [[META9]] = !{!"address"} +; CHECK: [[META10]] = !{!"address", !"read_provenance"} +; CHECK: [[META11]] = !{!"provenance"} ;. diff --git a/llvm/test/Transforms/SimplifyCFG/nonintegral.ll b/llvm/test/Transforms/SimplifyCFG/nonintegral.ll index 423ac4d1e69c1..1bdd436f01d02 100644 --- a/llvm/test/Transforms/SimplifyCFG/nonintegral.ll +++ b/llvm/test/Transforms/SimplifyCFG/nonintegral.ll @@ -1,12 +1,143 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=simplifycfg -S < %s | FileCheck %s -target datalayout = "ni:1" +target datalayout = "pu1:64:64-pe2:64:64:64:32" -define void @test_01(ptr addrspace(1) align 8 %ptr) { -; CHECK-LABEL: @test_01( -; CHECK-NOT: ptrtoint -; CHECK-NEXT: icmp eq ptr addrspace(1) %ptr, null -; CHECK-NOT: ptrtoint +;; TODO: it would probably be better to just emit a pointer compare against null. +define void @test_default_null_base(ptr addrspace(0) align 8 %ptr) { +; CHECK-LABEL: define void @test_default_null_base( +; CHECK-SAME: ptr align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr [[PTR]] to i64 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[MAGICPTR]], 0 +; CHECK-NEXT: br i1 [[COND]], label %[[TRUE2:.*]], label %[[FALSE1:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr [[PTR]], align 8 +; CHECK-NEXT: store i64 3, ptr [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET:.*]] +; CHECK: [[COMMON_RET]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(0) %ptr, null + %cond2 = icmp eq ptr addrspace(0) %ptr, null + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(0) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(0) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(0) %ptr, align 8 + ret void +} + +;; We should not introduce ptrtoint instructions with unstable pointers +define void @test_default_inttoptr_base(ptr addrspace(0) align 8 %ptr) { +; CHECK-LABEL: define void @test_default_inttoptr_base( +; CHECK-SAME: ptr align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr [[PTR]] to i64 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[MAGICPTR]], 4 +; CHECK-NEXT: br i1 [[COND]], label %[[TRUE2:.*]], label %[[FALSE1:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr [[PTR]], align 8 +; CHECK-NEXT: store i64 3, ptr [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET:.*]] +; CHECK: [[COMMON_RET]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(0) %ptr, inttoptr (i32 4 to ptr addrspace(0)) + %cond2 = icmp eq ptr addrspace(0) %ptr, inttoptr (i32 4 to ptr addrspace(0)) + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(0) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(0) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(0) %ptr, align 8 + ret void +} + +;; We should not introduce ptrtoint instructions with unstable pointers +define void @test_default_mixed_base(ptr addrspace(0) align 8 %ptr) { +; CHECK-LABEL: define void @test_default_mixed_base( +; CHECK-SAME: ptr align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[COND2:%.*]] = icmp eq ptr [[PTR]], null +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr [[PTR]] to i64 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[MAGICPTR]], 4 +; CHECK-NEXT: br i1 [[COND]], label %[[FALSE2:.*]], label %[[FALSE1:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr [[PTR]], align 8 +; CHECK-NEXT: br i1 [[COND2]], label %[[TRUE2:.*]], label %[[FALSE2]] +; CHECK: [[COMMON_RET:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; CHECK: [[FALSE2]]: +; CHECK-NEXT: store i64 3, ptr [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(0) %ptr, inttoptr (i32 4 to ptr addrspace(0)) + %cond2 = icmp eq ptr addrspace(0) %ptr, null + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(0) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(0) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(0) %ptr, align 8 + ret void +} + +;; We should not introduce ptrtoint instructions with unstable pointers +define void @test_unstable_null_base(ptr addrspace(1) align 8 %ptr) { +; CHECK-LABEL: define void @test_unstable_null_base( +; CHECK-SAME: ptr addrspace(1) align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[COND1:%.*]] = icmp eq ptr addrspace(1) [[PTR]], null +; CHECK-NEXT: [[COND2:%.*]] = icmp eq ptr addrspace(1) [[PTR]], null +; CHECK-NEXT: br i1 [[COND1]], label %[[TRUE1:.*]], label %[[FALSE1:.*]] +; CHECK: [[TRUE1]]: +; CHECK-NEXT: br i1 [[COND2]], label %[[TRUE2:.*]], label %[[FALSE2:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[TRUE1]] +; CHECK: [[COMMON_RET:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; CHECK: [[FALSE2]]: +; CHECK-NEXT: store i64 3, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; %cond1 = icmp eq ptr addrspace(1) %ptr, null %cond2 = icmp eq ptr addrspace(1) %ptr, null br i1 %cond1, label %true1, label %false1 @@ -26,3 +157,200 @@ false2: store i64 3, ptr addrspace(1) %ptr, align 8 ret void } + +;; We should not introduce ptrtoint instructions with unstable pointers +define void @test_unstable_inttoptr_base(ptr addrspace(1) align 8 %ptr) { +; CHECK-LABEL: define void @test_unstable_inttoptr_base( +; CHECK-SAME: ptr addrspace(1) align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[COND1:%.*]] = icmp eq ptr addrspace(1) [[PTR]], inttoptr (i32 4 to ptr addrspace(1)) +; CHECK-NEXT: [[COND2:%.*]] = icmp eq ptr addrspace(1) [[PTR]], inttoptr (i32 4 to ptr addrspace(1)) +; CHECK-NEXT: br i1 [[COND1]], label %[[TRUE1:.*]], label %[[FALSE1:.*]] +; CHECK: [[TRUE1]]: +; CHECK-NEXT: br i1 [[COND2]], label %[[TRUE2:.*]], label %[[FALSE2:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[TRUE1]] +; CHECK: [[COMMON_RET:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; CHECK: [[FALSE2]]: +; CHECK-NEXT: store i64 3, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(1) %ptr, inttoptr (i32 4 to ptr addrspace(1)) + %cond2 = icmp eq ptr addrspace(1) %ptr, inttoptr (i32 4 to ptr addrspace(1)) + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(1) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(1) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(1) %ptr, align 8 + ret void +} + +;; We should not introduce ptrtoint instructions with unstable pointers +define void @test_unstable_mixed_base(ptr addrspace(1) align 8 %ptr) { +; CHECK-LABEL: define void @test_unstable_mixed_base( +; CHECK-SAME: ptr addrspace(1) align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[COND1:%.*]] = icmp eq ptr addrspace(1) [[PTR]], inttoptr (i32 4 to ptr addrspace(1)) +; CHECK-NEXT: [[COND2:%.*]] = icmp eq ptr addrspace(1) [[PTR]], null +; CHECK-NEXT: br i1 [[COND1]], label %[[TRUE1:.*]], label %[[FALSE1:.*]] +; CHECK: [[TRUE1]]: +; CHECK-NEXT: br i1 [[COND2]], label %[[TRUE2:.*]], label %[[FALSE2:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[TRUE1]] +; CHECK: [[COMMON_RET:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; CHECK: [[FALSE2]]: +; CHECK-NEXT: store i64 3, ptr addrspace(1) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(1) %ptr, inttoptr (i32 4 to ptr addrspace(1)) + %cond2 = icmp eq ptr addrspace(1) %ptr, null + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(1) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(1) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(1) %ptr, align 8 + ret void +} + +;; This transformation is fine for pointers with external state. +;; TODO: it would probably be better to just emit a pointer compare against null. +define void @test_external_null_base(ptr addrspace(2) align 8 %ptr) { +; CHECK-LABEL: define void @test_external_null_base( +; CHECK-SAME: ptr addrspace(2) align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr addrspace(2) [[PTR]] to i64 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[MAGICPTR]], 0 +; CHECK-NEXT: br i1 [[COND]], label %[[TRUE2:.*]], label %[[FALSE1:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: store i64 3, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET:.*]] +; CHECK: [[COMMON_RET]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(2) %ptr, null + %cond2 = icmp eq ptr addrspace(2) %ptr, null + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(2) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(2) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(2) %ptr, align 8 + ret void +} + +;; This transformation is fine for pointers with external state (even with inttoptr). +define void @test_external_inttoptr_base(ptr addrspace(2) align 8 %ptr) { +; CHECK-LABEL: define void @test_external_inttoptr_base( +; CHECK-SAME: ptr addrspace(2) align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr addrspace(2) [[PTR]] to i64 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[MAGICPTR]], 4 +; CHECK-NEXT: br i1 [[COND]], label %[[TRUE2:.*]], label %[[FALSE1:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: store i64 3, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET:.*]] +; CHECK: [[COMMON_RET]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(2) %ptr, inttoptr (i32 4 to ptr addrspace(2)) + %cond2 = icmp eq ptr addrspace(2) %ptr, inttoptr (i32 4 to ptr addrspace(2)) + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(2) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(2) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(2) %ptr, align 8 + ret void +} + +;; This transformation is fine for pointers with external state (even with inttoptr). +define void @test_external_mixed_base(ptr addrspace(2) align 8 %ptr) { +; CHECK-LABEL: define void @test_external_mixed_base( +; CHECK-SAME: ptr addrspace(2) align 8 [[PTR:%.*]]) { +; CHECK-NEXT: [[COND2:%.*]] = icmp eq ptr addrspace(2) [[PTR]], null +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr addrspace(2) [[PTR]] to i64 +; CHECK-NEXT: [[COND:%.*]] = icmp eq i64 [[MAGICPTR]], 4 +; CHECK-NEXT: br i1 [[COND]], label %[[FALSE2:.*]], label %[[FALSE1:.*]] +; CHECK: [[FALSE1]]: +; CHECK-NEXT: store i64 1, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br i1 [[COND2]], label %[[TRUE2:.*]], label %[[FALSE2]] +; CHECK: [[COMMON_RET:.*]]: +; CHECK-NEXT: ret void +; CHECK: [[TRUE2]]: +; CHECK-NEXT: store i64 2, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; CHECK: [[FALSE2]]: +; CHECK-NEXT: store i64 3, ptr addrspace(2) [[PTR]], align 8 +; CHECK-NEXT: br label %[[COMMON_RET]] +; + %cond1 = icmp eq ptr addrspace(2) %ptr, inttoptr (i32 4 to ptr addrspace(2)) + %cond2 = icmp eq ptr addrspace(2) %ptr, null + br i1 %cond1, label %true1, label %false1 + +true1: + br i1 %cond2, label %true2, label %false2 + +false1: + store i64 1, ptr addrspace(2) %ptr, align 8 + br label %true1 + +true2: + store i64 2, ptr addrspace(2) %ptr, align 8 + ret void + +false2: + store i64 3, ptr addrspace(2) %ptr, align 8 + ret void +} diff --git a/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll b/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll index fe2e897125eb8..9d78b97c204a8 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals ; RUN: opt < %s -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S | FileCheck %s ; int foo1_with_default(int a) { @@ -11,20 +11,20 @@ ; return 4; ; } -define i32 @foo1_with_default(i32 %a) { +define i32 @foo1_with_default(i32 %a) !prof !0 { ; CHECK-LABEL: @foo1_with_default( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[A:%.*]], 20 -; CHECK-NEXT: [[SWITCH_SELECT:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 2, i32 4 +; CHECK-NEXT: [[SWITCH_SELECT:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 2, i32 4, !prof [[PROF1:![0-9]+]] ; CHECK-NEXT: [[SWITCH_SELECTCMP1:%.*]] = icmp eq i32 [[A]], 10 -; CHECK-NEXT: [[SWITCH_SELECT2:%.*]] = select i1 [[SWITCH_SELECTCMP1]], i32 10, i32 [[SWITCH_SELECT]] +; CHECK-NEXT: [[SWITCH_SELECT2:%.*]] = select i1 [[SWITCH_SELECTCMP1]], i32 10, i32 [[SWITCH_SELECT]], !prof [[PROF2:![0-9]+]] ; CHECK-NEXT: ret i32 [[SWITCH_SELECT2]] ; entry: switch i32 %a, label %sw.epilog [ i32 10, label %sw.bb i32 20, label %sw.bb1 - ] + ], !prof !1 sw.bb: br label %return @@ -41,20 +41,20 @@ return: } ; Same as above, but both cases have the same value. -define i32 @same_value(i32 %a) { +define i32 @same_value(i32 %a) !prof !0 { ; CHECK-LABEL: @same_value( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE1:%.*]] = icmp eq i32 [[A:%.*]], 10 ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE2:%.*]] = icmp eq i32 [[A]], 20 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = or i1 [[SWITCH_SELECTCMP_CASE1]], [[SWITCH_SELECTCMP_CASE2]] -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 10, i32 4 +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 10, i32 4, !prof [[PROF3:![0-9]+]] ; CHECK-NEXT: ret i32 [[TMP0]] ; entry: switch i32 %a, label %sw.epilog [ i32 10, label %sw.bb i32 20, label %sw.bb - ] + ], !prof !1 sw.bb: br label %return @@ -67,17 +67,17 @@ return: ret i32 %retval.0 } -define i1 @switch_to_select_same2_case_results_different_default(i8 %0) { +define i1 @switch_to_select_same2_case_results_different_default(i8 %0) !prof !0 { ; CHECK-LABEL: @switch_to_select_same2_case_results_different_default( ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP0:%.*]], -5 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %0, label %2 [ i8 4, label %3 i8 0, label %3 - ] + ], !prof !1 2: br label %3 @@ -87,18 +87,18 @@ define i1 @switch_to_select_same2_case_results_different_default(i8 %0) { ret i1 %4 } -define i1 @switch_to_select_same2_case_results_different_default_and_positive_offset_for_case(i8 %0) { +define i1 @switch_to_select_same2_case_results_different_default_and_positive_offset_for_case(i8 %0) !prof !0 { ; CHECK-LABEL: @switch_to_select_same2_case_results_different_default_and_positive_offset_for_case( ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 [[TMP0:%.*]], 43 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP2]], -3 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP3]] ; switch i8 %0, label %2 [ i8 43, label %3 i8 45, label %3 - ] + ], !prof !1 2: br label %3 @@ -108,20 +108,20 @@ define i1 @switch_to_select_same2_case_results_different_default_and_positive_of ret i1 %4 } -define i8 @switch_to_select_same2_case_results_different_default_and_negative_offset_for_case(i32 %i) { +define i8 @switch_to_select_same2_case_results_different_default_and_negative_offset_for_case(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same2_case_results_different_default_and_negative_offset_for_case( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[I:%.*]], -5 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[TMP0]], -3 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i8 3, i8 42 +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i8 3, i8 42, !prof [[PROF3]] ; CHECK-NEXT: ret i8 [[TMP1]] ; entry: switch i32 %i, label %default [ i32 -3, label %end i32 -5, label %end - ] + ], !prof !1 default: br label %end @@ -131,12 +131,12 @@ end: ret i8 %t0 } -define i1 @switch_to_select_same4_case_results_different_default(i32 %i) { +define i1 @switch_to_select_same4_case_results_different_default(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same4_case_results_different_default( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[I:%.*]], -7 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF4:![0-9]+]] ; CHECK-NEXT: ret i1 [[TMP0]] ; entry: @@ -145,7 +145,7 @@ entry: i32 2, label %lor.end i32 4, label %lor.end i32 6, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -155,12 +155,12 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_same4_case_results_different_default_alt_bitmask(i32 %i) { +define i1 @switch_to_select_same4_case_results_different_default_alt_bitmask(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same4_case_results_different_default_alt_bitmask( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[I:%.*]], -11 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP0]] ; entry: @@ -169,7 +169,7 @@ entry: i32 2, label %lor.end i32 8, label %lor.end i32 10, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -179,13 +179,13 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_same4_case_results_different_default_positive_offset(i32 %i) { +define i1 @switch_to_select_same4_case_results_different_default_positive_offset(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same4_case_results_different_default_positive_offset( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[I:%.*]], 2 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[TMP0]], -11 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP1]] ; entry: @@ -194,7 +194,7 @@ entry: i32 4, label %lor.end i32 10, label %lor.end i32 12, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -204,7 +204,7 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_invalid_mask(i32 %i) { +define i1 @switch_to_select_invalid_mask(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_invalid_mask( ; CHECK-NEXT: entry: ; CHECK-NEXT: switch i32 [[I:%.*]], label [[LOR_RHS:%.*]] [ @@ -212,7 +212,7 @@ define i1 @switch_to_select_invalid_mask(i32 %i) { ; CHECK-NEXT: i32 4, label [[LOR_END]] ; CHECK-NEXT: i32 10, label [[LOR_END]] ; CHECK-NEXT: i32 12, label [[LOR_END]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF5:![0-9]+]] ; CHECK: lor.rhs: ; CHECK-NEXT: br label [[LOR_END]] ; CHECK: lor.end: @@ -225,7 +225,7 @@ entry: i32 4, label %lor.end i32 10, label %lor.end i32 12, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -235,14 +235,14 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_nonpow2_cases(i32 %i) { +define i1 @switch_to_select_nonpow2_cases(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_nonpow2_cases( ; CHECK-NEXT: entry: ; CHECK-NEXT: switch i32 [[I:%.*]], label [[LOR_RHS:%.*]] [ ; CHECK-NEXT: i32 0, label [[LOR_END:%.*]] ; CHECK-NEXT: i32 2, label [[LOR_END]] ; CHECK-NEXT: i32 4, label [[LOR_END]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6:![0-9]+]] ; CHECK: lor.rhs: ; CHECK-NEXT: br label [[LOR_END]] ; CHECK: lor.end: @@ -254,7 +254,7 @@ entry: i32 0, label %lor.end i32 2, label %lor.end i32 4, label %lor.end - ] + ], !prof !3 lor.rhs: br label %lor.end @@ -265,7 +265,7 @@ lor.end: } ; TODO: we can produce the optimal code when there is no default also -define i8 @switch_to_select_two_case_results_no_default(i32 %i) { +define i8 @switch_to_select_two_case_results_no_default(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_two_case_results_no_default( ; CHECK-NEXT: entry: ; CHECK-NEXT: switch i32 [[I:%.*]], label [[DEFAULT:%.*]] [ @@ -273,7 +273,7 @@ define i8 @switch_to_select_two_case_results_no_default(i32 %i) { ; CHECK-NEXT: i32 2, label [[END]] ; CHECK-NEXT: i32 4, label [[CASE3:%.*]] ; CHECK-NEXT: i32 6, label [[CASE3]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF5]] ; CHECK: case3: ; CHECK-NEXT: br label [[END]] ; CHECK: default: @@ -288,7 +288,7 @@ entry: i32 2, label %case2 i32 4, label %case3 i32 6, label %case4 - ] + ], !prof !2 case1: br label %end @@ -310,12 +310,12 @@ end: ret i8 %t0 } -define i1 @no_range(i8 %f) { +define i1 @no_range(i8 %f) !prof !0 { ; CHECK-LABEL: @no_range( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 60 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 60 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF7:![0-9]+]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -335,7 +335,7 @@ define i1 @no_range(i8 %f) { i8 253, label %bb2 i8 254, label %bb2 i8 255, label %bb2 - ] + ], !prof !4 bb1: br label %bb3 bb2: @@ -345,7 +345,7 @@ bb3: ret i1 %phi } -define i1 @negative_no_range(i8 %f) { +define i1 @negative_no_range(i8 %f) !prof !0 { ; CHECK-LABEL: @negative_no_range( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 52, label [[BB2:%.*]] @@ -364,12 +364,12 @@ define i1 @negative_no_range(i8 %f) { ; CHECK-NEXT: i8 -3, label [[BB2]] ; CHECK-NEXT: i8 -2, label [[BB2]] ; CHECK-NEXT: i8 -1, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF8:![0-9]+]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[_0_SROA_0_0:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[_0_SROA_0_0]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 52, label %bb2 @@ -388,7 +388,7 @@ define i1 @negative_no_range(i8 %f) { i8 253, label %bb2 i8 254, label %bb2 i8 255, label %bb2 - ] + ], !prof !4 bb1: br label %bb3 bb2: @@ -400,18 +400,19 @@ bb3: ; Using ranges. -define i1 @range0to4odd(i8 range(i8 0, 4) %f) { +define i1 @range0to4odd(i8 range(i8 0, 4) %f) !prof !0 { ; CHECK-LABEL: @range0to4odd( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ i8 1, label %bb2 i8 3, label %bb2 - ] + ], !prof !1 + bb1: br label %bb3 bb2: @@ -421,18 +422,18 @@ bb3: ret i1 %phi } -define i1 @range1to4odd(i8 range(i8 1, 4) %f) { +define i1 @range1to4odd(i8 range(i8 1, 4) %f) !prof !0 { ; CHECK-LABEL: @range1to4odd( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ i8 1, label %bb2 i8 3, label %bb2 - ] + ], !prof !1 bb1: br label %bb3 bb2: @@ -442,12 +443,12 @@ bb3: ret i1 %phi } -define i1 @range0to8odd(i8 range(i8 0, 8) %f) { +define i1 @range0to8odd(i8 range(i8 0, 8) %f) !prof !0 { ; CHECK-LABEL: @range0to8odd( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -455,7 +456,7 @@ define i1 @range0to8odd(i8 range(i8 0, 8) %f) { i8 3, label %bb2 i8 5, label %bb2 i8 7, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -465,12 +466,12 @@ bb3: ret i1 %phi } -define i1 @range0to8most_significant_bit(i8 range(i8 0, 8) %f) { +define i1 @range0to8most_significant_bit(i8 range(i8 0, 8) %f) !prof !0 { ; CHECK-LABEL: @range0to8most_significant_bit( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -478,7 +479,7 @@ define i1 @range0to8most_significant_bit(i8 range(i8 0, 8) %f) { i8 5, label %bb2 i8 6, label %bb2 i8 7, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -488,12 +489,12 @@ bb3: ret i1 %phi } -define i1 @range0to15_middle_two_bits(i8 range(i8 0, 16) %f) { +define i1 @range0to15_middle_two_bits(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_middle_two_bits( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -501,7 +502,8 @@ define i1 @range0to15_middle_two_bits(i8 range(i8 0, 16) %f) { i8 7, label %bb2 i8 14, label %bb2 i8 15, label %bb2 - ] + ], !prof !2 + bb1: br label %bb3 bb2: @@ -511,24 +513,25 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 6, label [[BB2:%.*]] ; CHECK-NEXT: i8 7, label [[BB2]] ; CHECK-NEXT: i8 14, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[_0_SROA_0_0:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[_0_SROA_0_0]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 7, label %bb2 i8 14, label %bb2 - ] + ], !prof !3 + bb1: br label %bb3 bb2: @@ -538,19 +541,19 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_pow_2(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_pow_2(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_pow_2( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = sub i8 [[F:%.*]], 6 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP0]], -2 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP1]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 7, label %bb2 - ] + ], !prof !1 bb1: br label %bb3 bb2: @@ -560,19 +563,19 @@ bb3: ret i1 %phi } -define i1 @negative_range0to5even(i8 range(i8 0, 5) %f) { +define i1 @negative_range0to5even(i8 range(i8 0, 5) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to5even( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = sub i8 [[F:%.*]], 2 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP0]], -3 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP1]] ; switch i8 %f, label %bb1 [ i8 2, label %bb2 i8 4, label %bb2 - ] + ], !prof !1 bb1: br label %bb3 bb2: @@ -582,16 +585,17 @@ bb3: ret i1 %phi } -define i1 @range0to15_corner_case(i8 range(i8 0, 16) %f) { +define i1 @range0to15_corner_case(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_corner_case( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[COND:%.*]] = icmp eq i8 [[F:%.*]], 15 -; CHECK-NEXT: [[DOT:%.*]] = select i1 [[COND]], i1 true, i1 false +; CHECK-NEXT: [[DOT:%.*]] = select i1 [[COND]], i1 true, i1 false, !prof [[PROF9:![0-9]+]] ; CHECK-NEXT: ret i1 [[DOT]] ; switch i8 %f, label %bb1 [ i8 15, label %bb2 - ] + ], !prof !5 + bb1: br label %bb3 bb2: @@ -601,19 +605,19 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_corner_case(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_corner_case(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_corner_case( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE1:%.*]] = icmp eq i8 [[F:%.*]], 15 ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE2:%.*]] = icmp eq i8 [[F]], 8 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = or i1 [[SWITCH_SELECTCMP_CASE1]], [[SWITCH_SELECTCMP_CASE2]] -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP0]] ; switch i8 %f, label %bb1 [ i8 15, label %bb2 - i8 8, label %bb2 - ] + i8 8, label %bb2 + ], !prof !1 bb1: br label %bb3 bb2: @@ -626,12 +630,12 @@ bb3: ; Out of range scenarios. Check if the cases, that have a value out of range ; are eliminated and the optimization is performed. -define i1 @range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) { +define i1 @range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_out_of_range_non_prime( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -640,7 +644,7 @@ define i1 @range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) { i8 14, label %bb2 i8 15, label %bb2 i8 22, label %bb2 - ] + ], !prof !6 bb1: br label %bb3 bb2: @@ -650,12 +654,12 @@ bb3: ret i1 %phi } -define i1 @range0to15_out_of_range_non_prime_more(i8 range(i8 0, 16) %f) { +define i1 @range0to15_out_of_range_non_prime_more(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_out_of_range_non_prime_more( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -665,7 +669,7 @@ define i1 @range0to15_out_of_range_non_prime_more(i8 range(i8 0, 16) %f) { i8 15, label %bb2 i8 22, label %bb2 i8 23, label %bb2 - ] + ], !prof !7 bb1: br label %bb3 bb2: @@ -675,25 +679,25 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_out_of_range_non_prime( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 6, label [[BB2:%.*]] ; CHECK-NEXT: i8 14, label [[BB2]] ; CHECK-NEXT: i8 15, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[TMP2]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 14, label %bb2 i8 15, label %bb2 i8 23, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -703,25 +707,25 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_out_of_range(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_out_of_range(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_out_of_range( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 6, label [[BB2:%.*]] ; CHECK-NEXT: i8 7, label [[BB2]] ; CHECK-NEXT: i8 14, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[_0_SROA_0_0:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[_0_SROA_0_0]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 7, label %bb2 i8 14, label %bb2 - i8 150, label %bb2 - ] + i8 -106, label %bb2 + ], !prof !2 bb1: br label %bb3 bb2: @@ -731,7 +735,7 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_all_out_of_range(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_all_out_of_range(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_all_out_of_range( ; CHECK-NEXT: bb1: ; CHECK-NEXT: ret i1 false @@ -741,7 +745,7 @@ define i1 @negative_range0to15_all_out_of_range(i8 range(i8 0, 16) %f) { i8 23, label %bb2 i8 30, label %bb2 i8 31, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -750,3 +754,43 @@ bb3: %phi = phi i1 [ false, %bb1 ], [ true, %bb2 ] ret i1 %phi } + +define i32 @negative_constfold_select() { +; CHECK-LABEL: @negative_constfold_select( +; CHECK-NEXT: entry: +; CHECK-NEXT: ret i32 poison +; +entry: + switch i32 poison, label %default [ + i32 0, label %bb + i32 2, label %bb + ] + +bb: + br label %default + +default: + %ret = phi i32 [ poison, %entry ], [ poison, %bb ] + ret i32 %ret +} + +!0 = !{!"function_entry_count", i64 1000} +!1 = !{!"branch_weights", i32 3, i32 5, i32 7} +!2 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13} +!3 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11} +!4 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17, i32 19, i32 23, i32 29, i32 31, i32 37, i32 41, i32 43, i32 47, i32 53, i32 59, i32 61} +!5 = !{!"branch_weights", i32 3, i32 5} +!6 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17} +!7 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17, i32 19} +;. +; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF1]] = !{!"branch_weights", i32 7, i32 8} +; CHECK: [[PROF2]] = !{!"branch_weights", i32 5, i32 10} +; CHECK: [[PROF3]] = !{!"branch_weights", i32 12, i32 3} +; CHECK: [[PROF4]] = !{!"branch_weights", i32 36, i32 3} +; CHECK: [[PROF5]] = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13} +; CHECK: [[PROF6]] = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11} +; CHECK: [[PROF7]] = !{!"branch_weights", i32 496, i32 3} +; CHECK: [[PROF8]] = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17, i32 19, i32 23, i32 29, i32 31, i32 37, i32 41, i32 43, i32 47, i32 53, i32 59, i32 61} +; CHECK: [[PROF9]] = !{!"branch_weights", i32 5, i32 3} +;. diff --git a/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll b/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll index 336fc5e14d758..8103124e3e5a6 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -switch-range-to-icmp < %s | FileCheck %s -target datalayout="p:40:64:64:32" +target datalayout="p:40:64:64:32-pe200:64:64:64:32-pu201:64:64:64:32" declare void @foo1() @@ -89,6 +89,63 @@ F: ; preds = %0 ret void } +; We also allow the transformation for pointers with external state +define void @test1_ptr_external_state(ptr addrspace(200) %V) { +; CHECK-LABEL: @test1_ptr_external_state( +; CHECK-NEXT: [[MAGICPTR:%.*]] = ptrtoint ptr addrspace(200) [[V:%.*]] to i64 +; CHECK-NEXT: switch i64 [[MAGICPTR]], label [[F:%.*]] [ +; CHECK-NEXT: i64 17, label [[T:%.*]] +; CHECK-NEXT: i64 4, label [[T]] +; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void +; CHECK: T: +; CHECK-NEXT: call void @foo1() +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: F: +; CHECK-NEXT: call void @foo2() +; CHECK-NEXT: br label [[COMMON_RET]] +; + %C1 = icmp eq ptr addrspace(200) %V, inttoptr (i32 4 to ptr addrspace(200)) + %C2 = icmp eq ptr addrspace(200) %V, inttoptr (i32 17 to ptr addrspace(200)) + %CN = or i1 %C1, %C2 ; [#uses=1] + br i1 %CN, label %T, label %F +T: ; preds = %0 + call void @foo1( ) + ret void +F: ; preds = %0 + call void @foo2( ) + ret void +} + +; But it is not permitted for unstable pointer representations +define void @test1_ptr_unstable(ptr addrspace(201) %V) { +; CHECK-LABEL: @test1_ptr_unstable( +; CHECK-NEXT: [[C1:%.*]] = icmp eq ptr addrspace(201) [[V:%.*]], inttoptr (i32 4 to ptr addrspace(201)) +; CHECK-NEXT: [[C2:%.*]] = icmp eq ptr addrspace(201) [[V]], inttoptr (i32 17 to ptr addrspace(201)) +; CHECK-NEXT: [[CN:%.*]] = or i1 [[C1]], [[C2]] +; CHECK-NEXT: br i1 [[CN]], label [[T:%.*]], label [[F:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void +; CHECK: T: +; CHECK-NEXT: call void @foo1() +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: F: +; CHECK-NEXT: call void @foo2() +; CHECK-NEXT: br label [[COMMON_RET]] +; + %C1 = icmp eq ptr addrspace(201) %V, inttoptr (i32 4 to ptr addrspace(201)) + %C2 = icmp eq ptr addrspace(201) %V, inttoptr (i32 17 to ptr addrspace(201)) + %CN = or i1 %C1, %C2 ; [#uses=1] + br i1 %CN, label %T, label %F +T: ; preds = %0 + call void @foo1( ) + ret void +F: ; preds = %0 + call void @foo2( ) + ret void +} + define void @test2(i32 %V) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: switch i32 [[V:%.*]], label [[T:%.*]] [ diff --git a/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll b/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll index 6341c8945247d..1503a1b51d256 100644 --- a/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll +++ b/llvm/test/Transforms/VectorCombine/AArch64/combine-shuffle-ext.ll @@ -14,9 +14,9 @@ define <4 x i32> @load_i32_zext_to_v4i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -36,9 +36,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_both_nneg(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext nneg <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext nneg <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -58,9 +58,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_inner_nneg(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext nneg <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext nneg <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -80,9 +80,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_outer_nneg(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -102,9 +102,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_inner_nneg_outer_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext nneg <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext nneg <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -125,9 +125,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_clobber_after_load(ptr %di) { ; CHECK-NEXT: call void @use.i32(i32 0) ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -148,9 +148,9 @@ define <4 x i32> @load_i32_sext_zext_to_v4i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -170,9 +170,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_load_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: call void @use.i32(i32 [[L]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; @@ -194,9 +194,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_ins_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: call void @use.v2i32(<2 x i32> [[VEC_INS]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; @@ -218,9 +218,9 @@ define <4 x i32> @load_i32_zext_to_v4i32_bc_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: call void @use.v8i8(<8 x i8> [[VEC_BC]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; @@ -266,10 +266,10 @@ define <4 x i32> @load_i32_zext_to_v4i32_shuffle_other_users(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> -; CHECK-NEXT: call void @use.v8i16(<4 x i16> [[VEC_SHUFFLE]]) +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[E_1]] to <4 x i32> +; CHECK-NEXT: call void @use.v8i16(<4 x i16> [[E_1]]) ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -290,9 +290,9 @@ define <8 x i32> @load_i64_zext_to_v8i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[DI]], align 8 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i64> , i64 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i64> [[VEC_INS]] to <16 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <16 x i8> [[VEC_BC]] to <16 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i16> [[EXT_1]], <16 x i16> poison, <8 x i32> -; CHECK-NEXT: [[OUTER_EXT:%.*]] = zext nneg <8 x i16> [[VEC_SHUFFLE]] to <8 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i8> [[VEC_BC]], <16 x i8> poison, <8 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_SHUFFLE]] to <8 x i16> +; CHECK-NEXT: [[OUTER_EXT:%.*]] = zext nneg <8 x i16> [[EXT_1]] to <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[OUTER_EXT]] ; entry: @@ -312,9 +312,9 @@ define <3 x i32> @load_i24_zext_to_v3i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i24, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i24> , i24 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i24> [[VEC_INS]] to <6 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <6 x i8> [[VEC_BC]] to <6 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i16> [[EXT_1]], <6 x i16> poison, <3 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <3 x i16> [[VEC_SHUFFLE]] to <3 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i8> [[VEC_BC]], <6 x i8> poison, <3 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <3 x i8> [[VEC_SHUFFLE]] to <3 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <3 x i16> [[EXT_1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[EXT_2]] ; entry: @@ -334,9 +334,9 @@ define <4 x i32> @load_i32_insert_idx_1_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 1 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -356,9 +356,9 @@ define <4 x i32> @mask_extracts_not_all_elements_1_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -378,9 +378,9 @@ define <4 x i32> @mask_extracts_not_all_elements_2_sext(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = zext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = zext nneg <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -422,9 +422,9 @@ define <4 x i32> @load_i32_sext_to_v4i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[E_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[E_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[E_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[E_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -444,9 +444,9 @@ define <8 x i32> @load_i64_sext_to_v8i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[DI]], align 8 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i64> , i64 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i64> [[VEC_INS]] to <16 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <16 x i8> [[VEC_BC]] to <16 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i16> [[EXT_1]], <16 x i16> poison, <8 x i32> -; CHECK-NEXT: [[OUTER_EXT:%.*]] = sext <8 x i16> [[VEC_SHUFFLE]] to <8 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <16 x i8> [[VEC_BC]], <16 x i8> poison, <8 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_SHUFFLE]] to <8 x i16> +; CHECK-NEXT: [[OUTER_EXT:%.*]] = sext <8 x i16> [[EXT_1]] to <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[OUTER_EXT]] ; entry: @@ -466,9 +466,9 @@ define <3 x i32> @load_i24_sext_to_v3i32(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i24, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i24> , i24 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i24> [[VEC_INS]] to <6 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <6 x i8> [[VEC_BC]] to <6 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i16> [[EXT_1]], <6 x i16> poison, <3 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <3 x i16> [[VEC_SHUFFLE]] to <3 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <6 x i8> [[VEC_BC]], <6 x i8> poison, <3 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <3 x i8> [[VEC_SHUFFLE]] to <3 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <3 x i16> [[EXT_1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[EXT_2]] ; entry: @@ -488,9 +488,9 @@ define <4 x i32> @load_i32_insert_idx_1(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 1 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -510,9 +510,9 @@ define <4 x i32> @mask_extracts_not_all_elements_1(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: @@ -532,9 +532,9 @@ define <4 x i32> @mask_extracts_not_all_elements_2(ptr %di) { ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[DI]], align 4 ; CHECK-NEXT: [[VEC_INS:%.*]] = insertelement <2 x i32> , i32 [[L]], i64 0 ; CHECK-NEXT: [[VEC_BC:%.*]] = bitcast <2 x i32> [[VEC_INS]] to <8 x i8> -; CHECK-NEXT: [[EXT_1:%.*]] = sext <8 x i8> [[VEC_BC]] to <8 x i16> -; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[EXT_1]], <8 x i16> poison, <4 x i32> -; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[VEC_SHUFFLE]] to <4 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[VEC_BC]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[EXT_1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: [[EXT_2:%.*]] = sext <4 x i16> [[EXT_1]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[EXT_2]] ; entry: diff --git a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll index acbc836ffcab0..ed29719d49493 100644 --- a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll +++ b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll @@ -205,8 +205,8 @@ define <8 x i8> @abs_different(<8 x i8> %a) { define <4 x i32> @poison_intrinsic(<2 x i16> %l256) { ; CHECK-LABEL: @poison_intrinsic( ; CHECK-NEXT: [[L266:%.*]] = call <2 x i16> @llvm.abs.v2i16(<2 x i16> [[L256:%.*]], i1 false) -; CHECK-NEXT: [[L267:%.*]] = zext <2 x i16> [[L266]] to <2 x i32> -; CHECK-NEXT: [[L271:%.*]] = shufflevector <2 x i32> [[L267]], <2 x i32> poison, <4 x i32> +; CHECK-NEXT: [[L267:%.*]] = shufflevector <2 x i16> [[L266]], <2 x i16> poison, <4 x i32> +; CHECK-NEXT: [[L271:%.*]] = zext <4 x i16> [[L267]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[L271]] ; %l266 = call <2 x i16> @llvm.abs.v2i16(<2 x i16> %l256, i1 false) @@ -534,9 +534,9 @@ define <4 x i64> @single_zext(<4 x i32> %x) { define <4 x i64> @not_zext(<4 x i32> %x) { ; CHECK-LABEL: @not_zext( -; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X:%.*]] to <4 x i64> -; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> -; CHECK-NEXT: ret <4 x i64> [[REVSHUF]] +; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[X]], <4 x i32> poison, <4 x i32> +; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[REVSHUF:%.*]] to <4 x i64> +; CHECK-NEXT: ret <4 x i64> [[ZEXT]] ; %zext = zext <4 x i32> %x to <4 x i64> %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> @@ -922,10 +922,9 @@ define <4 x i8> @singleop(<4 x i8> %a, <4 x i8> %b) { define <4 x i64> @cast_mismatched_types(<4 x i32> %x) { ; CHECK-LABEL: @cast_mismatched_types( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <2 x i32> -; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[SHUF]] to <2 x i64> -; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x i64> [[ZEXT]], <2 x i64> poison, <4 x i32> -; CHECK-NEXT: ret <4 x i64> [[EXTSHUF]] +; CHECK-SAME: <4 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X]] to <4 x i64> +; CHECK-NEXT: ret <4 x i64> [[ZEXT]] ; %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <2 x i32> %zext = zext <2 x i32> %shuf to <2 x i64> diff --git a/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll b/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll index 8c504843d87d8..b293976974bf5 100644 --- a/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll +++ b/llvm/test/Transforms/VectorCombine/AMDGPU/narrow-phi-of-shuffles.ll @@ -392,7 +392,7 @@ define <4 x i32> @shuffle_v4i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -427,7 +427,7 @@ define <8 x i32> @shuffle_v8i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -462,7 +462,7 @@ define <16 x i32> @shuffle_v16i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -497,7 +497,7 @@ define <32 x i32> @shuffle_v32i32(<3 x i32> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x i32> [[ARG0]], <3 x i32> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1092,7 +1092,7 @@ define <4 x float> @shuffle_v4f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1127,7 +1127,7 @@ define <6 x float> @shuffle_v6f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1162,7 +1162,7 @@ define <8 x float> @shuffle_v8f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1197,7 +1197,7 @@ define <16 x float> @shuffle_v16f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -1232,7 +1232,7 @@ define <32 x float> @shuffle_v32f32(<3 x float> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x float> [[ARG0]], <3 x float> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: diff --git a/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll b/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll index 79e72aaed6082..38c624e942343 100644 --- a/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll +++ b/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll @@ -357,7 +357,7 @@ define <4 x i32> @or_sext_v4i8_to_v4i32_constant_with_loss(<4 x i8> %a) { define <4 x i16> @and_trunc_nuw_nsw_constant(<4 x i32> %a) { ; CHECK-LABEL: @and_trunc_nuw_nsw_constant( ; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], -; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i16> +; CHECK-NEXT: [[AND:%.*]] = trunc nuw nsw <4 x i32> [[AND_INNER]] to <4 x i16> ; CHECK-NEXT: ret <4 x i16> [[AND]] ; %t1 = trunc nuw nsw <4 x i32> %a to <4 x i16> @@ -368,7 +368,7 @@ define <4 x i16> @and_trunc_nuw_nsw_constant(<4 x i32> %a) { define <4 x i8> @and_trunc_nuw_nsw_minus_constant(<4 x i32> %a) { ; CHECK-LABEL: @and_trunc_nuw_nsw_minus_constant( ; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], -; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i8> +; CHECK-NEXT: [[AND:%.*]] = trunc nuw <4 x i32> [[AND_INNER]] to <4 x i8> ; CHECK-NEXT: ret <4 x i8> [[AND]] ; %t1 = trunc nuw nsw <4 x i32> %a to <4 x i8> @@ -379,7 +379,7 @@ define <4 x i8> @and_trunc_nuw_nsw_minus_constant(<4 x i32> %a) { define <4 x i8> @and_trunc_nuw_nsw_multiconstant(<4 x i32> %a) { ; CHECK-LABEL: @and_trunc_nuw_nsw_multiconstant( ; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], -; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i8> +; CHECK-NEXT: [[AND:%.*]] = trunc nuw <4 x i32> [[AND_INNER]] to <4 x i8> ; CHECK-NEXT: ret <4 x i8> [[AND]] ; %t1 = trunc nuw nsw <4 x i32> %a to <4 x i8> @@ -391,7 +391,7 @@ define <4 x i8> @and_trunc_nuw_nsw_multiconstant(<4 x i32> %a) { define <4 x i32> @or_zext_nneg_constant(<4 x i16> %a) { ; CHECK-LABEL: @or_zext_nneg_constant( ; CHECK-NEXT: [[OR_INNER:%.*]] = or <4 x i16> [[A:%.*]], -; CHECK-NEXT: [[OR:%.*]] = zext <4 x i16> [[OR_INNER]] to <4 x i32> +; CHECK-NEXT: [[OR:%.*]] = zext nneg <4 x i16> [[OR_INNER]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[OR]] ; %z1 = zext nneg <4 x i16> %a to <4 x i32> diff --git a/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll b/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll index 59422e98cbcc6..594017ecf84c3 100644 --- a/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll +++ b/llvm/test/Transforms/VectorCombine/X86/narrow-phi-of-shuffles.ll @@ -605,7 +605,7 @@ define <4 x bfloat> @shuffle_v4bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -640,7 +640,7 @@ define <6 x bfloat> @shuffle_v6bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -675,7 +675,7 @@ define <8 x bfloat> @shuffle_v8bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -710,7 +710,7 @@ define <16 x bfloat> @shuffle_v16bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -745,7 +745,7 @@ define <32 x bfloat> @shuffle_v32bf16(<3 x bfloat> %arg0, i1 %cond) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK: [[THEN]]: -; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> +; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x bfloat> [[ARG0]], <3 x bfloat> poison, <3 x i32> ; CHECK-NEXT: tail call void @func0() ; CHECK-NEXT: br label %[[FINALLY:.*]] ; CHECK: [[ELSE]]: @@ -850,7 +850,7 @@ define <4 x half> @shuffle_v4f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -866,7 +866,7 @@ define <4 x half> @shuffle_v4f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -933,7 +933,7 @@ define <6 x half> @shuffle_v6f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -949,7 +949,7 @@ define <6 x half> @shuffle_v6f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -1016,7 +1016,7 @@ define <8 x half> @shuffle_v8f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -1032,7 +1032,7 @@ define <8 x half> @shuffle_v8f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -1099,7 +1099,7 @@ define <16 x half> @shuffle_v16f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -1115,7 +1115,7 @@ define <16 x half> @shuffle_v16f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: @@ -1182,7 +1182,7 @@ define <32 x half> @shuffle_v32f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V1-NEXT: [[ENTRY:.*:]] ; CHECK-V1-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V1: [[THEN]]: -; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V1-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V1-NEXT: tail call void @func0() ; CHECK-V1-NEXT: br label %[[FINALLY:.*]] ; CHECK-V1: [[ELSE]]: @@ -1198,7 +1198,7 @@ define <32 x half> @shuffle_v32f16(<3 x half> %arg0, i1 %cond) { ; CHECK-V2-NEXT: [[ENTRY:.*:]] ; CHECK-V2-NEXT: br i1 [[COND]], label %[[THEN:.*]], label %[[ELSE:.*]] ; CHECK-V2: [[THEN]]: -; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> +; CHECK-V2-NEXT: [[TMP0:%.*]] = shufflevector <3 x half> [[ARG0]], <3 x half> poison, <3 x i32> ; CHECK-V2-NEXT: tail call void @func0() ; CHECK-V2-NEXT: br label %[[FINALLY:.*]] ; CHECK-V2: [[ELSE]]: diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll index fba4b60ef417b..82a739964c9d0 100644 --- a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll +++ b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-casts.ll @@ -342,3 +342,59 @@ define <16 x i32> @concat_sext_zext_v8i16_v16i32(<8 x i16> %a0, <8 x i16> %a1) { %r = shufflevector <8 x i32> %x0, <8 x i32> %x1, <16 x i32> ret <16 x i32> %r } + +; Unary shuffles + +define <4 x i16> @unary_shuffle_zext_v8i8_v4i16(<8 x i8> %a0) { +; CHECK-LABEL: define <4 x i16> @unary_shuffle_zext_v8i8_v4i16( +; CHECK-SAME: <8 x i8> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[A0]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[X1:%.*]] = zext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: ret <4 x i16> [[X1]] +; + %x1 = zext <8 x i8> %a0 to <8 x i16> + %vec.shuffle = shufflevector <8 x i16> %x1, <8 x i16> poison, <4 x i32> + ret <4 x i16> %vec.shuffle +} + +define <4 x i16> @unary_shuffle_sext_v8i8_v4i16(<8 x i8> %a0) { +; CHECK-LABEL: define <4 x i16> @unary_shuffle_sext_v8i8_v4i16( +; CHECK-SAME: <8 x i8> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i8> [[A0]], <8 x i8> poison, <4 x i32> +; CHECK-NEXT: [[X1:%.*]] = sext <4 x i8> [[VEC_SHUFFLE]] to <4 x i16> +; CHECK-NEXT: ret <4 x i16> [[X1]] +; + %x1 = sext <8 x i8> %a0 to <8 x i16> + %vec.shuffle = shufflevector <8 x i16> %x1, <8 x i16> poison, <4 x i32> + ret <4 x i16> %vec.shuffle +} + +; negative - avoid loop with foldBitcastOfShuffle + +define <2 x i32> @unary_shuffle_bitcast_v8i8_v2i32(<8 x i8> %a0) { +; CHECK-LABEL: define <2 x i32> @unary_shuffle_bitcast_v8i8_v2i32( +; CHECK-SAME: <8 x i8> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[X1:%.*]] = bitcast <8 x i8> [[A0]] to <2 x i32> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <2 x i32> [[X1]], <2 x i32> poison, <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[VEC_SHUFFLE]] +; + %x1 = bitcast <8 x i8> %a0 to <2 x i32> + %vec.shuffle = shufflevector <2 x i32> %x1, <2 x i32> poison, <2 x i32> + ret <2 x i32> %vec.shuffle +} + +; negative - multiuse + +define <4 x i16> @unary_shuffle_sext_v8i8_v4i16_multiuse(<8 x i8> %a0, ptr %a1) { +; CHECK-LABEL: define <4 x i16> @unary_shuffle_sext_v8i8_v4i16_multiuse( +; CHECK-SAME: <8 x i8> [[A0:%.*]], ptr [[A1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[X1:%.*]] = sext <8 x i8> [[A0]] to <8 x i16> +; CHECK-NEXT: [[VEC_SHUFFLE:%.*]] = shufflevector <8 x i16> [[X1]], <8 x i16> poison, <4 x i32> +; CHECK-NEXT: store <8 x i16> [[X1]], ptr [[A1]], align 16 +; CHECK-NEXT: ret <4 x i16> [[VEC_SHUFFLE]] +; + %x1 = sext <8 x i8> %a0 to <8 x i16> + %vec.shuffle = shufflevector <8 x i16> %x1, <8 x i16> poison, <4 x i32> + store <8 x i16> %x1, ptr %a1, align 16 + ret <4 x i16> %vec.shuffle +} diff --git a/llvm/test/Unit/CMakeLists.txt b/llvm/test/Unit/CMakeLists.txt new file mode 100644 index 0000000000000..6b0abe199673f --- /dev/null +++ b/llvm/test/Unit/CMakeLists.txt @@ -0,0 +1,5 @@ +add_lit_testsuite(check-llvm-unit "Running lit suite for LLVM unit tests" + ${CMAKE_CURRENT_BINARY_DIR} + EXCLUDE_FROM_CHECK_ALL + DEPENDS UnitTests + ) diff --git a/llvm/test/Verifier/assume-bundles.ll b/llvm/test/Verifier/assume-bundles.ll index d8037b965edb5..728b118c99fb6 100644 --- a/llvm/test/Verifier/assume-bundles.ll +++ b/llvm/test/Verifier/assume-bundles.ll @@ -3,7 +3,7 @@ declare void @llvm.assume(i1) -define void @func(ptr %P, i32 %P1, ptr %P2, ptr %P3) { +define void @func(ptr %P, i32 %P1, ptr %P2, ptr %P3, i1 %cond) { ; CHECK: tags must be valid attribute names ; CHECK: "adazdazd" call void @llvm.assume(i1 true) ["adazdazd"()] @@ -32,5 +32,7 @@ define void @func(ptr %P, i32 %P1, ptr %P2, ptr %P3) { call void @llvm.assume(i1 true) ["separate_storage"(ptr %P, i32 123)] ; CHECK: dereferenceable assumptions should have 2 arguments call void @llvm.assume(i1 true) ["align"(ptr %P, i32 4), "dereferenceable"(ptr %P)] +; CHECK: assume with operand bundles must have i1 true condition + call void @llvm.assume(i1 %cond) ["nonnull"(ptr %P)] ret void } diff --git a/llvm/test/Verifier/captures-metadata.ll b/llvm/test/Verifier/captures-metadata.ll new file mode 100644 index 0000000000000..ae08ddd036f16 --- /dev/null +++ b/llvm/test/Verifier/captures-metadata.ll @@ -0,0 +1,37 @@ +; RUN: not opt -passes=verify < %s 2>&1 | FileCheck %s + +; CHECK: !captures metadata can only be applied to store instructions +define void @wrong_instr_type(ptr %x) { + load ptr, ptr %x, !captures !{!"address"} + ret void +} + +; CHECK: captures metadata can only be applied to store with value operand of pointer type +define void @wrong_op_type(i32 %x, ptr %y) { + store i32 %x, ptr %y, !captures !{!"address"} + ret void +} + +; CHECK: !captures metadata cannot be empty +define void @empty(ptr %x, ptr %y) { + store ptr %x, ptr %y, !captures !{} + ret void +} + +; CHECK: !captures metadata must be a list of strings +define void @not_string(ptr %x, ptr %y) { + store ptr %x, ptr %y, !captures !{!{}} + ret void +} + +; CHECK: invalid entry in !captures metadata +define void @invalid_str(ptr %x, ptr %y) { + store ptr %x, ptr %y, !captures !{!"foo"} + ret void +} + +; CHECK: invalid entry in !captures metadata +define void @invalid_none(ptr %x, ptr %y) { + store ptr %x, ptr %y, !captures !{!"none"} + ret void +} diff --git a/llvm/test/Verifier/errno-tbaa-metadata-1.ll b/llvm/test/Verifier/errno-tbaa-metadata-1.ll new file mode 100644 index 0000000000000..0530653309966 --- /dev/null +++ b/llvm/test/Verifier/errno-tbaa-metadata-1.ll @@ -0,0 +1,5 @@ +; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: assembly parsed, but does not verify as correct! +; CHECK-NEXT: llvm.errno.tbaa must have at least one operand +!llvm.errno.tbaa = !{} diff --git a/llvm/test/Verifier/errno-tbaa-metadata-2.ll b/llvm/test/Verifier/errno-tbaa-metadata-2.ll new file mode 100644 index 0000000000000..6b2a4c6e8bda7 --- /dev/null +++ b/llvm/test/Verifier/errno-tbaa-metadata-2.ll @@ -0,0 +1,9 @@ +; RUN: not llvm-as < %s -o /dev/null 2>&1 | FileCheck %s + +; CHECK: assembly parsed, but does not verify as correct! +; CHECK-NEXT: Malformed struct tag metadata: base and access-type should be non-null and point to Metadata nodes +!llvm.errno.tbaa = !{!0} +!0 = !{!1, i64 0, !1} +!1 = !{!"int", !2, i64 0} +!2 = !{!"omnipotent char", !3, i64 0} +!3 = !{!"Simple C/C++ TBAA"} diff --git a/llvm/test/Verifier/preallocated-invalid.ll b/llvm/test/Verifier/preallocated-invalid.ll index 38ed1067c497d..2c5aff231e1bd 100644 --- a/llvm/test/Verifier/preallocated-invalid.ll +++ b/llvm/test/Verifier/preallocated-invalid.ll @@ -65,13 +65,21 @@ define void @preallocated_one_call() { ret void } -; CHECK: must be a constant +; CHECK: immarg operand has non-immediate parameter define void @preallocated_setup_constant() { %ac = call i32 @blackbox() %cs = call token @llvm.call.preallocated.setup(i32 %ac) ret void } +; CHECK: llvm.call.preallocated.alloc arg index must be a constant +define void @preallocated_arg_constant() { + %ac = call i32 @blackbox() + %cs = call token @llvm.call.preallocated.setup(i32 3) + call token @llvm.call.preallocated.arg(token %cs, i32 %ac) + ret void +} + ; CHECK: must be between 0 and corresponding define void @preallocated_setup_arg_index_in_bounds() { %cs = call token @llvm.call.preallocated.setup(i32 2) diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py index dd3f947b186b3..781240aac94b6 100644 --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -48,15 +48,17 @@ # directories. config.excludes = ["Inputs", "CMakeLists.txt", "README.txt", "LICENSE.txt"] -# Exclude llvm-reduce tests for profcheck because we substitute the FileCheck -# binary with a no-op command for profcheck, but llvm-reduce tests have RUN -# commands of the form llvm-reduce --test FileCheck, which explode if we -# substitute FileCheck because llvm-reduce expects FileCheck in these tests. -# It's not really possible to exclude these tests from the command substitution, -# so we just exclude llvm-reduce tests from this config altogether. This should -# be fine though as profcheck config tests are mostly concerned with opt. if config.enable_profcheck: - config.excludes = config.excludes + ["llvm-reduce"] + # Exclude llvm-reduce tests for profcheck because we substitute the FileCheck + # binary with a no-op command for profcheck, but llvm-reduce tests have RUN + # commands of the form llvm-reduce --test FileCheck, which explode if we + # substitute FileCheck because llvm-reduce expects FileCheck in these tests. + # It's not really possible to exclude these tests from the command substitution, + # so we just exclude llvm-reduce tests from this config altogether. This should + # be fine though as profcheck config tests are mostly concerned with opt. + config.excludes.append("llvm-reduce") + # (Issue #161235) Temporarily exclude LoopVectorize. + config.excludes.append("LoopVectorize") # test_source_root: The root path where tests are located. config.test_source_root = os.path.dirname(__file__) diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/conflicting-prefixes.ll b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/conflicting-prefixes.ll new file mode 100644 index 0000000000000..fdc53951d6bb0 --- /dev/null +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/conflicting-prefixes.ll @@ -0,0 +1,16 @@ +; RUN: sed 's/RETVAL/1/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKA %s +; RUN: sed 's/RETVAL/2/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKA %s +; RUN: sed 's/RETVAL/3/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKB %s +; RUN: sed 's/RETVAL/4/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKB %s + +define i32 @foo() { + ret i32 RETVAL +} + +define i32 @bar() { + ret i32 100 +} diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/conflicting-prefixes.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/conflicting-prefixes.ll.expected new file mode 100644 index 0000000000000..b3cad11e2ec1d --- /dev/null +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/conflicting-prefixes.ll.expected @@ -0,0 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no-generate-body-for-unused-prefixes +; RUN: sed 's/RETVAL/1/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKA %s +; RUN: sed 's/RETVAL/2/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKA %s +; RUN: sed 's/RETVAL/3/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKB %s +; RUN: sed 's/RETVAL/4/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKB %s + +define i32 @foo() { + ret i32 RETVAL +} + +define i32 @bar() { +; CHECK-LABEL: bar: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 100 +; CHECK-NEXT: ret + ret i32 100 +} diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/differing-set-of-functions.ll b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/differing-set-of-functions.ll new file mode 100644 index 0000000000000..6c3c66e1a7229 --- /dev/null +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/differing-set-of-functions.ll @@ -0,0 +1,14 @@ +; RUN: sed 's/FN/foo/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKA %s +; RUN: sed 's/FN/foo/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKB %s +; RUN: sed 's/FN/bar/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKC %s + +define i32 @FN() { + ret i32 1 +} + +define i32 @common() { + ret i32 100 +} diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/differing-set-of-functions.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/differing-set-of-functions.ll.expected new file mode 100644 index 0000000000000..b851f3a3ae249 --- /dev/null +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/differing-set-of-functions.ll.expected @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --include-generated-funcs +; RUN: sed 's/FN/foo/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKA %s +; RUN: sed 's/FN/foo/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKB %s +; RUN: sed 's/FN/bar/g' %s | llc -mtriple=riscv32 \ +; RUN: | FileCheck -check-prefixes=CHECK,CHECKC %s + +define i32 @FN() { + ret i32 1 +} + +define i32 @common() { + ret i32 100 +} +; CHECK-LABEL: foo: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: ret +; +; CHECK-LABEL: common: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 100 +; CHECK-NEXT: ret +; +; CHECKA-LABEL: foo: +; CHECKA: # %bb.0: +; CHECKA-NEXT: li a0, 1 +; CHECKA-NEXT: ret +; +; CHECKA-LABEL: common: +; CHECKA: # %bb.0: +; CHECKA-NEXT: li a0, 100 +; CHECKA-NEXT: ret +; +; CHECKB-LABEL: foo: +; CHECKB: # %bb.0: +; CHECKB-NEXT: li a0, 1 +; CHECKB-NEXT: ret +; +; CHECKB-LABEL: common: +; CHECKB: # %bb.0: +; CHECKB-NEXT: li a0, 100 +; CHECKB-NEXT: ret +; +; CHECKC-LABEL: bar: +; CHECKC: # %bb.0: +; CHECKC-NEXT: li a0, 1 +; CHECKC-NEXT: ret +; +; CHECKC-LABEL: common: +; CHECKC: # %bb.0: +; CHECKC-NEXT: li a0, 100 +; CHECKC-NEXT: ret diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/conflicting-prefixes.test b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/conflicting-prefixes.test new file mode 100644 index 0000000000000..e835b5f83f9a0 --- /dev/null +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/conflicting-prefixes.test @@ -0,0 +1,7 @@ +# REQUIRES: riscv-registered-target + +# RUN: cp -f %S/Inputs/conflicting-prefixes.ll %t.ll +# RUN: %update_llc_test_checks --no-generate-body-for-unused-prefixes %t.ll 2>&1 | FileCheck %s +# RUN: diff -u %S/Inputs/conflicting-prefixes.ll.expected %t.ll + +# CHECK: WARNING: For function 'foo', the following RUN lines will not generate checks due to conflicting output: RUN #1 (prefixes: CHECK, CHECKA), RUN #2 (prefixes: CHECK, CHECKA), RUN #3 (prefixes: CHECK, CHECKB), RUN #4 (prefixes: CHECK, CHECKB): diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/differing-set-of-functions.test b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/differing-set-of-functions.test new file mode 100644 index 0000000000000..749f3f2a528c1 --- /dev/null +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/differing-set-of-functions.test @@ -0,0 +1,11 @@ +# REQUIRES: riscv-registered-target + +# RUN: cp -f %S/Inputs/differing-set-of-functions.ll %t.ll +# RUN: %update_llc_test_checks --include-generated-funcs %t.ll 2>&1 | FileCheck --allow-empty %s +# RUN: diff -u %S/Inputs/differing-set-of-functions.ll.expected %t.ll + +# We shouldn't print the warning for clashing CHECK prefixes in the case that +# we're trying to handle a function that is only present for some RUN lines. +# Better warning behaviour than this might be possible. + +# CHECK-NOT: WARNING diff --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/prefix-never-matches.test b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/prefix-never-matches.test index 2e75148addd84..90ae70bda64d9 100644 --- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/prefix-never-matches.test +++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/prefix-never-matches.test @@ -4,5 +4,5 @@ # RUN: %update_llc_test_checks --no-generate-body-for-unused-prefixes %t.ll 2>&1 | FileCheck %s # RUN: FileCheck --input-file=%t.ll %s --check-prefix=OUTPUT -# CHECK: WARNING: Prefix A had conflicting output +# CHECK: WARNING: For function 'fold_v2i64', the following RUN lines will not generate checks due to conflicting output # OUTPUT-NOT: A: diff --git a/llvm/test/tools/llvm-ar/option-X.test b/llvm/test/tools/llvm-ar/option-X.test index 03681d9cd003f..875f8339e9d18 100644 --- a/llvm/test/tools/llvm-ar/option-X.test +++ b/llvm/test/tools/llvm-ar/option-X.test @@ -3,7 +3,6 @@ ## The option specifies the type of object file llvm-ar will operate on. # RUN: rm -rf %t && mkdir %t && cd %t -# RUN: unset OBJECT_MODE # RUN: yaml2obj --docnum=1 -DCLASS=ELFCLASS32 %s -o elf32.o # RUN: yaml2obj --docnum=1 -DCLASS=ELFCLASS64 %s -o elf64.o @@ -11,7 +10,7 @@ # RUN: yaml2obj --docnum=2 -DFLAG=0x1F7 %s -o xcoff64.o ## Test default -X option when creating a new archive. -# RUN: llvm-ar -q -c archive-default.a xcoff32.o elf32.o xcoff64.o elf64.o 2>&1 | \ +# RUN: env -u OBJECT_MODE llvm-ar -q -c archive-default.a xcoff32.o elf32.o xcoff64.o elf64.o 2>&1 | \ # RUN: FileCheck %s --check-prefixes=WARN-XCOFF64,WARN-ELF64 # RUN: llvm-ar -t -Xany archive-default.a | \ # RUN: FileCheck %s --check-prefixes=OBJ32 @@ -74,7 +73,7 @@ # RUN: FileCheck %s --check-prefixes=OBJ32_64 ## Test -X option for print operation. -# RUN: llvm-ar -t archive-any.a | \ +# RUN: env -u OBJECT_MODE llvm-ar -t archive-any.a | \ # RUN: FileCheck %s --check-prefixes=OBJ32 # RUN: llvm-ar -t -X32 archive-any.a | \ @@ -115,7 +114,7 @@ # RUN: cmp elf64.o any/elf64.o ## Extract a 64-bit object file with option -X32 (or default object mode). -# RUN: not llvm-ar --output=err64 -x archive-any.a xcoff64.o 2>&1 | \ +# RUN: env -u OBJECT_MODE not llvm-ar --output=err64 -x archive-any.a xcoff64.o 2>&1 | \ # RUN: FileCheck %s -DFILE=xcoff64.o --check-prefixes=ERR64 # RUN: not llvm-ar --output=err64 -x -X32 archive-any.a xcoff64.o 2>&1 | \ # RUN: FileCheck %s -DFILE=xcoff64.o --check-prefixes=ERR64 @@ -156,7 +155,7 @@ ## Without -X64, -X32_64 or -Xany, nothing changed here, ## since xcoff.o is a 64-bit object file in command line, but ## the xcoff.o member in archive-rep.a is a 32-bit object file. -# RUN: llvm-ar -r archive-rep.a xcoff.o +# RUN: env -u OBJECT_MODE llvm-ar -r archive-rep.a xcoff.o # RUN: llvm-ar -t -Xany archive-rep.a | \ # RUN: FileCheck %s --check-prefixes=REP # RUN: llvm-nm -Xany --print-armap archive-rep.a | \ @@ -178,7 +177,7 @@ ## Test move member. # RUN: cp archive-any.a archive.a ## Do not move 64-bit object without options -X64, -X32_64, Xany. -# RUN: llvm-ar -ma elf32.o archive.a xcoff64.o 2>&1 | \ +# RUN: env -u OBJECT_MODE llvm-ar -ma elf32.o archive.a xcoff64.o 2>&1 | \ # RUN: FileCheck %s --check-prefix=WARN-XCOFF64 # RUN: llvm-ar -t -Xany archive.a | \ @@ -240,7 +239,7 @@ # MOVE32-EMPTY: ## Move after a file with a bitness that doesn't match the object mode. -# RUN: not llvm-ar -ma xcoff64.o archive-any.a xcoff32.o 2>&1 | \ +# RUN: env -u OBJECT_MODE not llvm-ar -ma xcoff64.o archive-any.a xcoff32.o 2>&1 | \ # RUN: FileCheck %s --check-prefixes=ERR-INSERT-POINT # RUN: not llvm-ar -X32 -ma xcoff64.o archive-any.a xcoff32.o 2>&1 | \ @@ -308,7 +307,7 @@ # RUN: yaml2obj --docnum=5 %s -o wasm.o # RUN: yaml2obj --docnum=6 %s -o coff.o -# RUN: llvm-ar -q -c archive-other32.a coff.o 32.bc 64.bc wasm.o macho32.o macho64.o 2>&1 | \ +# RUN: env -u OBJECT_MODE llvm-ar -q -c archive-other32.a coff.o 32.bc 64.bc wasm.o macho32.o macho64.o 2>&1 | \ # RUN: FileCheck %s --check-prefixes=WARN-64 # RUN: llvm-ar -t -Xany archive-other32.a | \ # RUN: FileCheck %s --check-prefixes=OTHER32 diff --git a/llvm/test/tools/llvm-cov/Inputs/binary-formats.canonical.json b/llvm/test/tools/llvm-cov/Inputs/binary-formats.canonical.json index 5f9122d01da9a..f219ca6c7e179 100644 --- a/llvm/test/tools/llvm-cov/Inputs/binary-formats.canonical.json +++ b/llvm/test/tools/llvm-cov/Inputs/binary-formats.canonical.json @@ -33,4 +33,4 @@ CHECK-SAME: "mcdc":{"count":0,"covered":0,"notcovered":0,"percent":0}, CHECK-SAME: "regions":{"count":1,"covered":1,"notcovered":0,"percent":100}}} CHECK-SAME: ], CHECK-SAME: "type":"llvm.coverage.json.export" -CHECK-SAME: "version":"3.0.1" +CHECK-SAME: "version":"3.1.0" diff --git a/llvm/test/tools/llvm-cov/mcdc-export-json.test b/llvm/test/tools/llvm-cov/mcdc-export-json.test index e6dbd17bee5b2..4b6f3b011451a 100644 --- a/llvm/test/tools/llvm-cov/mcdc-export-json.test +++ b/llvm/test/tools/llvm-cov/mcdc-export-json.test @@ -1,10 +1,10 @@ // RUN: llvm-profdata merge %S/Inputs/mcdc-general.proftext -o %t.profdata // RUN: llvm-cov export --format=text %S/Inputs/mcdc-general.o -instr-profile %t.profdata | FileCheck %s -// CHECK: 12,7,12,27,2,4,0,0,5,[true,true,true,true] -// CHECK: 15,7,15,13,1,2,0,0,5,[true,true] -// CHECK: 15,19,15,25,1,1,0,0,5,[true,false] -// CHECK: 18,7,19,15,1,3,0,0,5,[true,true,false,true] +// CHECK: 12,7,12,27,2,4,0,0,5,[true,true,true,true],[{"conditions":[false,null,false,null],"executed":true,"result":false},{"conditions":[false,null,true,false],"executed":true,"result":false},{"conditions":[true,false,false,null],"executed":true,"result":false},{"conditions":[true,false,true,false],"executed":true,"result":false},{"conditions":[true,false,true,true],"executed":true,"result":true},{"conditions":[true,true,null,null],"executed":true,"result":true}] +// CHECK: 15,7,15,13,1,2,0,0,5,[true,true],[{"conditions":[false,null],"executed":true,"result":false},{"conditions":[true,false],"executed":true,"result":false},{"conditions":[true,true],"executed":true,"result":true}] +// CHECK: 15,19,15,25,1,1,0,0,5,[true,false],[{"conditions":[false,null],"executed":true,"result":false},{"conditions":[true,true],"executed":true,"result":true}] +// CHECK: 18,7,19,15,1,3,0,0,5,[true,true,false,true],[{"conditions":[false,null,null,null],"executed":true,"result":false},{"conditions":[true,false,null,null],"executed":true,"result":false},{"conditions":[true,true,true,false],"executed":true,"result":false},{"conditions":[true,true,true,true],"executed":true,"result":true}] // CHECK: "mcdc":{"count":12,"covered":10,"notcovered":2,"percent":83.333333333333343} Instructions for regenerating the test: diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/COFF/01-coff-print-basic-details.test b/llvm/test/tools/llvm-debuginfo-analyzer/COFF/01-coff-print-basic-details.test index 035382897d17e..0696c57d33b1c 100644 --- a/llvm/test/tools/llvm-debuginfo-analyzer/COFF/01-coff-print-basic-details.test +++ b/llvm/test/tools/llvm-debuginfo-analyzer/COFF/01-coff-print-basic-details.test @@ -18,31 +18,14 @@ ; sorted by the debug information internal offset; it includes its lexical ; level and debug info format. -; RUN: llvm-debuginfo-analyzer --attribute=level,format \ -; RUN: --output-sort=offset \ -; RUN: --print=scopes,symbols,types,lines,instructions \ -; RUN: %p/Inputs/test-codeview-clang.o 2>&1 | \ -; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s - -; If `--output-sort=id`, elements are iterated in the order in which they were -; added (which matches the increasing offset of the reference output). ; RUN: llvm-debuginfo-analyzer --attribute=level,format \ ; RUN: --output-sort=id \ ; RUN: --print=scopes,symbols,types,lines,instructions \ ; RUN: %p/Inputs/test-codeview-clang.o 2>&1 | \ ; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s -; If `--output-sort=none`, `LVScope::Children` is not sorted; it, however, -; reflects the order in which elements were added (same as `--output-sort=id`). -; This is expected to change once #69160 is resolved though. -; RUN: llvm-debuginfo-analyzer --attribute=level,format \ -; RUN: --output-sort=none \ -; RUN: --print=scopes,symbols,types,lines,instructions \ -; RUN: %p/Inputs/test-codeview-clang.o 2>&1 | \ -; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s - ; RUN: llvm-debuginfo-analyzer --attribute=level,format \ -; RUN: --output-sort=offset \ +; RUN: --output-sort=id \ ; RUN: --print=elements \ ; RUN: %p/Inputs/test-codeview-clang.o 2>&1 | \ ; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s @@ -80,3 +63,43 @@ ; ONE-NEXT: [003] {Code} 'addq $0x20, %rsp' ; ONE-NEXT: [003] {Code} 'retq' ; ONE-NEXT: [002] {TypeAlias} 'INTPTR' -> '* const int' + +; RUN: llvm-debuginfo-analyzer --attribute=level,format \ +; RUN: --output-sort=none \ +; RUN: --print=scopes,symbols,types,lines,instructions \ +; RUN: %p/Inputs/test-codeview-clang.o 2>&1 | \ +; RUN: FileCheck --strict-whitespace -check-prefix=ONE-NOSORT %s + +; ONE-NOSORT: Logical View: +; ONE-NOSORT-NEXT: [000] {File} 'test-codeview-clang.o' -> COFF-x86-64 +; ONE-NOSORT-EMPTY: +; ONE-NOSORT-NEXT: [001] {CompileUnit} 'test.cpp' +; ONE-NOSORT-NEXT: [002] {Function} extern not_inlined 'foo' -> 'int' +; ONE-NOSORT-NEXT: [003] {Block} +; ONE-NOSORT-NEXT: [004] {Variable} 'CONSTANT' -> 'const int' +; ONE-NOSORT-NEXT: [004] 5 {Line} +; ONE-NOSORT-NEXT: [004] {Code} 'movl $0x7, 0x4(%rsp)' +; ONE-NOSORT-NEXT: [004] 6 {Line} +; ONE-NOSORT-NEXT: [004] {Code} 'movl $0x7, 0x1c(%rsp)' +; ONE-NOSORT-NEXT: [004] {Code} 'jmp 0x8' +; ONE-NOSORT-NEXT: [003] {TypeAlias} 'INTEGER' -> 'int' +; ONE-NOSORT-NEXT: [003] {Parameter} 'ParamPtr' -> '* const int' +; ONE-NOSORT-NEXT: [003] {Parameter} 'ParamUnsigned' -> 'unsigned' +; ONE-NOSORT-NEXT: [003] {Parameter} 'ParamBool' -> 'bool' +; ONE-NOSORT-NEXT: [003] 2 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'subq $0x20, %rsp' +; ONE-NOSORT-NEXT: [003] {Code} 'andb $0x1, %r8b' +; ONE-NOSORT-NEXT: [003] {Code} 'movb %r8b, 0x1b(%rsp)' +; ONE-NOSORT-NEXT: [003] {Code} 'movl %edx, 0x14(%rsp)' +; ONE-NOSORT-NEXT: [003] {Code} 'movq %rcx, 0x8(%rsp)' +; ONE-NOSORT-NEXT: [003] 3 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'testb $0x1, 0x1b(%rsp)' +; ONE-NOSORT-NEXT: [003] {Code} 'je 0x15' +; ONE-NOSORT-NEXT: [003] 8 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'movl 0x14(%rsp), %eax' +; ONE-NOSORT-NEXT: [003] {Code} 'movl %eax, 0x1c(%rsp)' +; ONE-NOSORT-NEXT: [003] 9 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'movl 0x1c(%rsp), %eax' +; ONE-NOSORT-NEXT: [003] {Code} 'addq $0x20, %rsp' +; ONE-NOSORT-NEXT: [003] {Code} 'retq' +; ONE-NOSORT-NEXT: [002] {TypeAlias} 'INTPTR' -> '* const int' diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-compare-logical-elements.test b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-compare-logical-elements.test index a076887140c28..1b790eeb3b691 100644 --- a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-compare-logical-elements.test +++ b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-compare-logical-elements.test @@ -35,8 +35,8 @@ ; ONE-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int' ; ONE-NEXT: [002] 2 {Function} extern not_inlined 'foo' -> 'int' ; ONE-NEXT: [003] {Block} -; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' ; ONE-NEXT: +[004] 4 {TypeAlias} 'INTEGER' -> 'int' +; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' ; ONE-NEXT: [003] 2 {Parameter} 'ParamBool' -> 'bool' ; ONE-NEXT: [003] 2 {Parameter} 'ParamPtr' -> 'INTPTR' ; ONE-NEXT: [003] 2 {Parameter} 'ParamUnsigned' -> 'unsigned int' diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-print-basic-details.test b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-print-basic-details.test index 35662554d5593..1ce9c1ef682a2 100644 --- a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-print-basic-details.test +++ b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/01-dwarf-print-basic-details.test @@ -18,32 +18,23 @@ ; sorted by the debug information internal offset; it includes its lexical ; level and debug info format. -; RUN: llvm-debuginfo-analyzer --attribute=level,format \ -; RUN: --output-sort=offset \ -; RUN: --print=scopes,symbols,types,lines,instructions \ -; RUN: %p/Inputs/test-dwarf-clang.o 2>&1 | \ -; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s - -; If `--output-sort=id`, elements are iterated in the order in which they -; were added (which matches the increasing offset of the reference output). ; RUN: llvm-debuginfo-analyzer --attribute=level,format \ ; RUN: --output-sort=id \ ; RUN: --print=scopes,symbols,types,lines,instructions \ ; RUN: %p/Inputs/test-dwarf-clang.o 2>&1 | \ ; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s -; If `--output-sort=none`, `LVScope::Children` is not sorted; it, however, -; reflects the order in which elements were added (same as `--output-sort=id`). -; This is expected to change once #69160 is resolved though. ; RUN: llvm-debuginfo-analyzer --attribute=level,format \ -; RUN: --output-sort=none \ -; RUN: --print=scopes,symbols,types,lines,instructions \ +; RUN: --output-sort=id \ +; RUN: --print=elements \ ; RUN: %p/Inputs/test-dwarf-clang.o 2>&1 | \ ; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s +; For DWARF, `--output-sort=offset` matches `--output-sort=id`, i.e., +; `LVElement`s are always iterated in the order in which they were added. ; RUN: llvm-debuginfo-analyzer --attribute=level,format \ ; RUN: --output-sort=offset \ -; RUN: --print=elements \ +; RUN: --print=scopes,symbols,types,lines,instructions \ ; RUN: %p/Inputs/test-dwarf-clang.o 2>&1 | \ ; RUN: FileCheck --strict-whitespace -check-prefix=ONE %s @@ -84,3 +75,47 @@ ; ONE-NEXT: [003] {Code} 'retq' ; ONE-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int' ; ONE-NEXT: [002] 9 {Line} + +; RUN: llvm-debuginfo-analyzer --attribute=level,format \ +; RUN: --output-sort=none \ +; RUN: --print=scopes,symbols,types,lines,instructions \ +; RUN: %p/Inputs/test-dwarf-clang.o 2>&1 | \ +; RUN: FileCheck --strict-whitespace -check-prefix=ONE-NOSORT %s + +; ONE-NOSORT: Logical View: +; ONE-NOSORT-NEXT: [000] {File} 'test-dwarf-clang.o' -> elf64-x86-64 +; ONE-NOSORT-EMPTY: +; ONE-NOSORT-NEXT: [001] {CompileUnit} 'test.cpp' +; ONE-NOSORT-NEXT: [002] 2 {Function} extern not_inlined 'foo' -> 'int' +; ONE-NOSORT-NEXT: [003] {Block} +; ONE-NOSORT-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' +; ONE-NOSORT-NEXT: [004] 5 {Line} +; ONE-NOSORT-NEXT: [004] {Code} 'movl $0x7, -0x1c(%rbp)' +; ONE-NOSORT-NEXT: [004] 6 {Line} +; ONE-NOSORT-NEXT: [004] {Code} 'movl $0x7, -0x4(%rbp)' +; ONE-NOSORT-NEXT: [004] {Code} 'jmp 0x6' +; ONE-NOSORT-NEXT: [003] 4 {TypeAlias} 'INTEGER' -> 'int' +; ONE-NOSORT-NEXT: [003] 2 {Parameter} 'ParamPtr' -> 'INTPTR' +; ONE-NOSORT-NEXT: [003] 2 {Parameter} 'ParamUnsigned' -> 'unsigned int' +; ONE-NOSORT-NEXT: [003] 2 {Parameter} 'ParamBool' -> 'bool' +; ONE-NOSORT-NEXT: [003] 2 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'pushq %rbp' +; ONE-NOSORT-NEXT: [003] {Code} 'movq %rsp, %rbp' +; ONE-NOSORT-NEXT: [003] {Code} 'movb %dl, %al' +; ONE-NOSORT-NEXT: [003] {Code} 'movq %rdi, -0x10(%rbp)' +; ONE-NOSORT-NEXT: [003] {Code} 'movl %esi, -0x14(%rbp)' +; ONE-NOSORT-NEXT: [003] {Code} 'andb $0x1, %al' +; ONE-NOSORT-NEXT: [003] {Code} 'movb %al, -0x15(%rbp)' +; ONE-NOSORT-NEXT: [003] 3 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'testb $0x1, -0x15(%rbp)' +; ONE-NOSORT-NEXT: [003] {Code} 'je 0x13' +; ONE-NOSORT-NEXT: [003] 8 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'movl -0x14(%rbp), %eax' +; ONE-NOSORT-NEXT: [003] 8 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'movl %eax, -0x4(%rbp)' +; ONE-NOSORT-NEXT: [003] 9 {Line} +; ONE-NOSORT-NEXT: [003] {Code} 'movl -0x4(%rbp), %eax' +; ONE-NOSORT-NEXT: [003] {Code} 'popq %rbp' +; ONE-NOSORT-NEXT: [003] {Code} 'retq' +; ONE-NOSORT-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int' +; ONE-NOSORT-NEXT: [002] 9 {Line} diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-incorrect-function-compare.test b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-incorrect-function-compare.test index 278d4f4850f5f..78604d9164c0f 100644 --- a/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-incorrect-function-compare.test +++ b/llvm/test/tools/llvm-debuginfo-analyzer/DWARF/pr-57040-incorrect-function-compare.test @@ -55,8 +55,8 @@ ; ONE-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int' ; ONE-NEXT: [002] 2 {Function} extern not_inlined 'foo' -> 'int' ; ONE-NEXT: [003] {Block} -; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' ; ONE-NEXT: +[004] 4 {TypeAlias} 'INTEGER' -> 'int' +; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' ; ONE-NEXT: [003] 2 {Parameter} 'ParamBool' -> 'bool' ; ONE-NEXT: [003] 2 {Parameter} 'ParamPtr' -> 'INTPTR' ; ONE-NEXT: [003] 2 {Parameter} 'ParamUnsigned' -> 'unsigned int' diff --git a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test index f52c9c7cc7164..98fc47e3d3c80 100644 --- a/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test +++ b/llvm/test/tools/llvm-debuginfo-analyzer/WebAssembly/01-wasm-compare-logical-elements.test @@ -38,8 +38,8 @@ ; ONE-NEXT: [002] 1 {TypeAlias} 'INTPTR' -> '* const int' ; ONE-NEXT: [002] 2 {Function} extern not_inlined 'foo' -> 'int' ; ONE-NEXT: [003] {Block} -; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' ; ONE-NEXT: +[004] 4 {TypeAlias} 'INTEGER' -> 'int' +; ONE-NEXT: [004] 5 {Variable} 'CONSTANT' -> 'const INTEGER' ; ONE-NEXT: [003] 2 {Parameter} 'ParamBool' -> 'bool' ; ONE-NEXT: [003] 2 {Parameter} 'ParamPtr' -> 'INTPTR' ; ONE-NEXT: [003] 2 {Parameter} 'ParamUnsigned' -> 'unsigned int' diff --git a/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml b/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml new file mode 100644 index 0000000000000..17e91f1cc1393 --- /dev/null +++ b/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml @@ -0,0 +1,1656 @@ +# Object file copied from llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test +# Then manually tempered with some of the value of the attribute +# I hope there are easier ways to construct tests like this. + +# RUN: yaml2obj %s -o %t.o +# RUN: not llvm-dwarfdump -verify -debug-info %t.o | FileCheck %s --check-prefix=CHECK_INVALID --implicit-check-not=error: +# RUN: llvm-dwarfdump -debug-line -verbose -debug-info %t.o | FileCheck %s --check-prefix=CHECK_DEBUG_LINE + +# CHECK_INVALID: error: DW_AT_LLVM_stmt_sequence offset 0x00000000 is not within the line table bounds [0x00000034, 0x000000fd) +# CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] (0x00000000) + +# CHECK_DEBUG_LINE: Address Line Column File ISA Discriminator OpIndex Flags +# CHECK_DEBUG_LINE-NEXT: ------------------ ------ ------ ------ --- ------------- ------- ------------- +# CHECK_DEBUG_LINE-NEXT: 0x00000034: 05 DW_LNS_set_column (10) +# CHECK_DEBUG_LINE-NEXT: 0x00000036: 0a DW_LNS_set_prologue_end +# CHECK_DEBUG_LINE-NEXT: 0x00000037: 00 DW_LNE_set_address (0x0000000000000000) +# CHECK_DEBUG_LINE-NEXT: 0x00000042: 14 address += 0, line += 2, op-index += 0 +# CHECK_DEBUG_LINE-NEXT: 0x0000000000000000 3 10 1 0 0 0 is_stmt prologue_end +# CHECK_DEBUG_LINE-NEXT: 0x00000043: 05 DW_LNS_set_column (3) +# CHECK_DEBUG_LINE-NEXT: 0x00000045: 06 DW_LNS_negate_stmt +# CHECK_DEBUG_LINE-NEXT: 0x00000046: 4a address += 4, line += 0, op-index += 0 +# CHECK_DEBUG_LINE-NEXT: 0x0000000000000004 3 3 1 0 0 0 +# CHECK_DEBUG_LINE-NEXT: 0x00000047: 00 DW_LNE_end_sequence +# CHECK_DEBUG_LINE-NEXT: 0x0000000000000004 3 3 1 0 0 0 end_sequence + +# 0xd3 would be a valid offset, if the line table wasn't ill formed with two rows having the same PC (0x8c). +# CHECK_INVALID: error: DW_AT_LLVM_stmt_sequence offset 0x000000d3 does not point to a valid sequence offset in the line table +# CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] (0x000000d3) + +# CHECK_DEBUG_LINE: 0x000000d3: 05 DW_LNS_set_column (85) +# CHECK_DEBUG_LINE-NEXT: 0x000000d5: 0a DW_LNS_set_prologue_end +# CHECK_DEBUG_LINE-NEXT: 0x000000d6: 00 DW_LNE_set_address (0x000000000000008c) +# CHECK_DEBUG_LINE-NEXT: 0x000000e1: 03 DW_LNS_advance_line (30) +# CHECK_DEBUG_LINE-NEXT: 0x000000e3: 01 DW_LNS_copy +# CHECK_DEBUG_LINE-NEXT: 0x000000000000008c 30 85 1 0 0 0 is_stmt prologue_end +# CHECK_DEBUG_LINE-NEXT: 0x000000e4: 00 DW_LNE_end_sequence +# CHECK_DEBUG_LINE-NEXT: 0x000000000000008c 30 85 1 0 0 0 is_stmt end_sequence + +# CHECK_INVALID: error: DIE has invalid DW_AT_LLVM_stmt_sequence encoding +# CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_data4] (0x000000a7) +# CHECK_INVALID: error: DW_AT_LLVM_stmt_sequence offset 0x000000ab does not point to a valid sequence offset in the line table +# CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] (0x000000ab) + +# CHECK_INVALID: error: DW_AT_LLVM_stmt_sequence offset is beyond .debug_line bounds: 0x00eeeee7 +# CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] (0x00eeeee7) + +# CHECK_DEBUG_LINE: 0x000000f8: 02 DW_LNS_advance_pc (addr += 4, op-index += 0) +# CHECK_DEBUG_LINE-NEXT: 0x000000fa: 00 DW_LNE_end_sequence +# CHECK_DEBUG_LINE-NEXT: 0x0000000000000094 30 86 1 0 0 0 is_stmt end_sequence + +# CHECK_INVALID: error: Aggregated error counts: +# CHECK_INVALID-NEXT: error: DW_AT_LLVM_stmt_sequence offset out of bounds occurred 1 time(s). +# CHECK_INVALID-NEXT: error: DW_AT_LLVM_stmt_sequence offset out of line table bounds occurred 1 time(s). +# CHECK_INVALID-NEXT: error: Invalid DW_AT_LLVM_stmt_sequence encoding occurred 1 time(s). +# CHECK_INVALID-NEXT: error: Invalid DW_AT_LLVM_stmt_sequence offset occurred 2 time(s). + +--- !mach-o +IsLittleEndian: true +FileHeader: + magic: 0xFEEDFACF + cputype: 0x100000C + cpusubtype: 0x0 + filetype: 0x1 + ncmds: 5 + sizeofcmds: 1176 + flags: 0x2000 + reserved: 0x0 +LoadCommands: + - cmd: LC_SEGMENT_64 + cmdsize: 1032 + segname: '' + vmaddr: 0 + vmsize: 3125 + fileoff: 1208 + filesize: 3125 + maxprot: 7 + initprot: 7 + nsects: 12 + flags: 0 + Sections: + - sectname: __text + segname: __TEXT + addr: 0x0 + size: 148 + offset: 0x4B8 + align: 2 + reloff: 0x10F0 + nreloc: 8 + flags: 0x80000400 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 00040011C0035FD600100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA40058052000000947302000B0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6 + relocations: + - address: 0x8C + symbolnum: 4 + pcrel: true + length: 2 + extern: true + type: 2 + scattered: false + value: 0 + - address: 0x74 + symbolnum: 3 + pcrel: true + length: 2 + extern: true + type: 2 + scattered: false + value: 0 + - address: 0x6C + symbolnum: 1 + pcrel: false + length: 2 + extern: true + type: 4 + scattered: false + value: 0 + - address: 0x68 + symbolnum: 1 + pcrel: true + length: 2 + extern: true + type: 3 + scattered: false + value: 0 + - address: 0x60 + symbolnum: 5 + pcrel: true + length: 2 + extern: true + type: 2 + scattered: false + value: 0 + - address: 0x54 + symbolnum: 6 + pcrel: true + length: 2 + extern: true + type: 2 + scattered: false + value: 0 + - address: 0x48 + symbolnum: 9 + pcrel: true + length: 2 + extern: true + type: 2 + scattered: false + value: 0 + - address: 0x3C + symbolnum: 7 + pcrel: true + length: 2 + extern: true + type: 2 + scattered: false + value: 0 + - sectname: __cstring + segname: __TEXT + addr: 0x94 + size: 5 + offset: 0x54C + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: '7465737400' + - sectname: __debug_loc + segname: __DWARF + addr: 0x99 + size: 412 + offset: 0x551 + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 08000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000008000000000000000C00000000000000030070039F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F0000000000000000000000000000000018000000000000001C00000000000000030070039F0000000000000000000000000000000020000000000000002400000000000000010050240000000000000028000000000000000400A301509F00000000000000000000000000000000240000000000000028000000000000000100500000000000000000000000000000000038000000000000004400000000000000030011009F4400000000000000500000000000000001006350000000000000005C0000000000000001006400000000000000000000000000000000 + - sectname: __debug_abbrev + segname: __DWARF + addr: 0x235 + size: 372 + offset: 0x6ED + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + - sectname: __debug_info + segname: __DWARF + addr: 0x3A9 + size: 747 + offset: 0x861 + align: 0 + reloff: 0x1130 + nreloc: 16 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + relocations: + - address: 0x2A7 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x28E + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x253 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x1F5 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x1E1 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x1CE + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x1BA + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x1A7 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x169 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x12D + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0xF1 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0xC4 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x88 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x5F + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x37 + symbolnum: 2 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x22 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - sectname: __debug_str + segname: __DWARF + addr: 0x694 + size: 400 + offset: 0xB4C + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + - sectname: __apple_names + segname: __DWARF + addr: 0x824 + size: 288 + offset: 0xCDC + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 485341480100000009000000090000000C00000000000000010000000100060000000000FFFFFFFFFFFFFFFF0100000003000000040000000600000007000000080000004A08311CC78E3C8288CB36CF89CB36CFD1125E53522B705390D9F86F6A7F9A7C4908311C8C0000009C000000AC000000BC000000CC000000DC000000EC00000000010000100100000601000001000000F000000000000000D6000000010000005E00000000000000F600000001000000C30000000000000016010000010000002C01000000000000440100000100000052020000000000005C01000001000000A6020000000000002B0100000200000052020000A60200000000000026010000010000006801000000000000E6000000010000008700000000000000 + - sectname: __apple_objc + segname: __DWARF + addr: 0x944 + size: 36 + offset: 0xDFC + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF + - sectname: __apple_namespac + segname: __DWARF + addr: 0x968 + size: 36 + offset: 0xE20 + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF + - sectname: __apple_types + segname: __DWARF + addr: 0x98C + size: 195 + offset: 0xE44 + align: 0 + reloff: 0x0 + nreloc: 0 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B0000000380100000100000027020000130000000000002B010000010000000502000013000000000000C20000000100000057000000240000000000007401000001000000DE02000024000000000000BD000000010000005000000024000000000000 + - sectname: __debug_frame + segname: __DWARF + addr: 0xA50 + size: 232 + offset: 0xF08 + align: 3 + reloff: 0x11B0 + nreloc: 8 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000140000000000000020000000000000000800000000000000240000000000000028000000000000006400000000000000500C1D109E019D02930394040000000014000000000000008C000000000000000400000000000000140000000000000090000000000000000400000000000000 + relocations: + - address: 0xD8 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0xC0 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x98 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x80 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x68 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x50 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x38 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x20 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - sectname: __debug_line + segname: __DWARF + addr: 0xB38 + size: 253 + offset: 0xFF0 + align: 0 + reloff: 0x11F0 + nreloc: 8 + flags: 0x2000000 + reserved1: 0x0 + reserved2: 0x0 + reserved3: 0x0 + relocations: + - address: 0xED + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0xD9 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0xAA + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x96 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x7E + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x66 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x50 + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - address: 0x3A + symbolnum: 1 + pcrel: false + length: 3 + extern: false + type: 0 + scattered: false + value: 0 + - cmd: LC_BUILD_VERSION + cmdsize: 24 + platform: 1 + minos: 720896 + sdk: 0 + ntools: 0 + - cmd: LC_LINKER_OPTIMIZATION_HINT + cmdsize: 16 + dataoff: 4656 + datasize: 8 + - cmd: LC_SYMTAB + cmdsize: 24 + symoff: 4664 + nsyms: 11 + stroff: 4840 + strsize: 168 + - cmd: LC_DYSYMTAB + cmdsize: 80 + ilocalsym: 0 + nlocalsym: 3 + iextdefsym: 3 + nextdefsym: 8 + iundefsym: 11 + nundefsym: 0 + tocoff: 0 + ntoc: 0 + modtaboff: 0 + nmodtab: 0 + extrefsymoff: 0 + nextrefsyms: 0 + indirectsymoff: 0 + nindirectsyms: 0 + extreloff: 0 + nextrel: 0 + locreloff: 0 + nlocrel: 0 +LinkEditData: + NameList: + - n_strx: 155 + n_type: 0xE + n_sect: 1 + n_desc: 0 + n_value: 0 + - n_strx: 1 + n_type: 0xE + n_sect: 2 + n_desc: 0 + n_value: 148 + - n_strx: 149 + n_type: 0xE + n_sect: 2 + n_desc: 0 + n_value: 148 + - n_strx: 39 + n_type: 0xF + n_sect: 1 + n_desc: 192 + n_value: 140 + - n_strx: 14 + n_type: 0xF + n_sect: 1 + n_desc: 192 + n_value: 144 + - n_strx: 132 + n_type: 0xF + n_sect: 1 + n_desc: 0 + n_value: 0 + - n_strx: 115 + n_type: 0xF + n_sect: 1 + n_desc: 0 + n_value: 16 + - n_strx: 81 + n_type: 0xF + n_sect: 1 + n_desc: 0 + n_value: 32 + - n_strx: 98 + n_type: 0xF + n_sect: 1 + n_desc: 0 + n_value: 8 + - n_strx: 64 + n_type: 0xF + n_sect: 1 + n_desc: 0 + n_value: 24 + - n_strx: 8 + n_type: 0xF + n_sect: 1 + n_desc: 0 + n_value: 40 + StringTable: + - '' + - l_.str + - _main + - __ZN12length_errorC2EPKc + - __ZN12length_errorC1EPKc + - _function3_copy2 + - _function2_copy2 + - _function3_copy1 + - _function2_copy1 + - _function1_copy1 + - ltmp1 + - ltmp0 + - '' + - '' + - '' + - '' + - '' + - '' + - '' +DWARF: + debug_str: + - 'Facebook clang version 19.1.5 (https://git.internal.tfbnw.net/repos/git/rw/osmeta/external/llvm-project b36c9ae1f8f2b39e4aafb9ca4700c608c3036365)' + - stmt_seq_macho.cpp + - '/' + - '/private/tmp/stmt_seq' + - char + - __ARRAY_SIZE_TYPE__ + - function1_copy1 + - function3_copy1 + - function2_copy1 + - function3_copy2 + - function2_copy2 + - main + - length_error + - logic_error + - _ZN12length_errorC1EPKc + - _ZN12length_errorC2EPKc + - int + - a + - b + - result + - e + - sum + - this + - s + debug_abbrev: + - ID: 0 + Table: + - Code: 0x1 + Tag: DW_TAG_compile_unit + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_producer + Form: DW_FORM_strp + - Attribute: DW_AT_language + Form: DW_FORM_data2 + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_LLVM_sysroot + Form: DW_FORM_strp + - Attribute: DW_AT_stmt_list + Form: DW_FORM_sec_offset + - Attribute: DW_AT_comp_dir + Form: DW_FORM_strp + - Attribute: DW_AT_APPLE_optimized + Form: DW_FORM_flag_present + - Attribute: DW_AT_low_pc + Form: DW_FORM_addr + - Attribute: DW_AT_high_pc + Form: DW_FORM_data4 + - Code: 0x2 + Tag: DW_TAG_variable + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_location + Form: DW_FORM_exprloc + - Code: 0x3 + Tag: DW_TAG_array_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x4 + Tag: DW_TAG_subrange_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_count + Form: DW_FORM_data1 + - Code: 0x5 + Tag: DW_TAG_const_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x6 + Tag: DW_TAG_base_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_encoding + Form: DW_FORM_data1 + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Code: 0x7 + Tag: DW_TAG_base_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Attribute: DW_AT_encoding + Form: DW_FORM_data1 + - Code: 0x8 + Tag: DW_TAG_subprogram + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_low_pc + Form: DW_FORM_addr + - Attribute: DW_AT_high_pc + Form: DW_FORM_data4 + - Attribute: DW_AT_APPLE_omit_frame_ptr + Form: DW_FORM_flag_present + - Attribute: DW_AT_LLVM_stmt_sequence + Form: DW_FORM_sec_offset + - Attribute: DW_AT_frame_base + Form: DW_FORM_exprloc + - Attribute: DW_AT_call_all_calls + Form: DW_FORM_flag_present + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_external + Form: DW_FORM_flag_present + - Attribute: DW_AT_APPLE_optimized + Form: DW_FORM_flag_present + - Code: 0x9 + Tag: DW_TAG_formal_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0xA + Tag: DW_TAG_formal_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_location + Form: DW_FORM_sec_offset + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0xB + Tag: DW_TAG_variable + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_location + Form: DW_FORM_sec_offset + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0xC + Tag: DW_TAG_subprogram + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_low_pc + Form: DW_FORM_addr + - Attribute: DW_AT_high_pc + Form: DW_FORM_data4 + - Attribute: DW_AT_LLVM_stmt_sequence + Form: DW_FORM_data4 + - Attribute: DW_AT_frame_base + Form: DW_FORM_exprloc + - Attribute: DW_AT_call_all_calls + Form: DW_FORM_flag_present + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_external + Form: DW_FORM_flag_present + - Attribute: DW_AT_APPLE_optimized + Form: DW_FORM_flag_present + - Code: 0xD + Tag: DW_TAG_variable + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_location + Form: DW_FORM_exprloc + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0xE + Tag: DW_TAG_call_site + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_call_origin + Form: DW_FORM_ref4 + - Attribute: DW_AT_call_return_pc + Form: DW_FORM_addr + - Code: 0xF + Tag: DW_TAG_call_site_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_location + Form: DW_FORM_exprloc + - Attribute: DW_AT_call_value + Form: DW_FORM_exprloc + - Code: 0x10 + Tag: DW_TAG_structure_type + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_calling_convention + Form: DW_FORM_data1 + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_byte_size + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Code: 0x11 + Tag: DW_TAG_inheritance + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_data_member_location + Form: DW_FORM_data1 + - Code: 0x12 + Tag: DW_TAG_subprogram + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_declaration + Form: DW_FORM_flag_present + - Attribute: DW_AT_external + Form: DW_FORM_flag_present + - Attribute: DW_AT_APPLE_optimized + Form: DW_FORM_flag_present + - Attribute: DW_AT_explicit + Form: DW_FORM_flag_present + - Code: 0x13 + Tag: DW_TAG_formal_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_artificial + Form: DW_FORM_flag_present + - Code: 0x14 + Tag: DW_TAG_formal_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x15 + Tag: DW_TAG_subprogram + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_declaration + Form: DW_FORM_flag_present + - Attribute: DW_AT_external + Form: DW_FORM_flag_present + - Attribute: DW_AT_APPLE_optimized + Form: DW_FORM_flag_present + - Code: 0x16 + Tag: DW_TAG_pointer_type + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x17 + Tag: DW_TAG_subprogram + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_low_pc + Form: DW_FORM_addr + - Attribute: DW_AT_high_pc + Form: DW_FORM_data4 + - Attribute: DW_AT_APPLE_omit_frame_ptr + Form: DW_FORM_flag_present + - Attribute: DW_AT_LLVM_stmt_sequence + Form: DW_FORM_sec_offset + - Attribute: DW_AT_frame_base + Form: DW_FORM_exprloc + - Attribute: DW_AT_object_pointer + Form: DW_FORM_ref4 + - Attribute: DW_AT_call_all_calls + Form: DW_FORM_flag_present + - Attribute: DW_AT_linkage_name + Form: DW_FORM_strp + - Attribute: DW_AT_specification + Form: DW_FORM_ref4 + - Code: 0x18 + Tag: DW_TAG_formal_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_location + Form: DW_FORM_exprloc + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Attribute: DW_AT_artificial + Form: DW_FORM_flag_present + - Code: 0x19 + Tag: DW_TAG_formal_parameter + Children: DW_CHILDREN_no + Attributes: + - Attribute: DW_AT_location + Form: DW_FORM_exprloc + - Attribute: DW_AT_name + Form: DW_FORM_strp + - Attribute: DW_AT_decl_file + Form: DW_FORM_data1 + - Attribute: DW_AT_decl_line + Form: DW_FORM_data1 + - Attribute: DW_AT_type + Form: DW_FORM_ref4 + - Code: 0x1A + Tag: DW_TAG_call_site + Children: DW_CHILDREN_yes + Attributes: + - Attribute: DW_AT_call_origin + Form: DW_FORM_ref4 + - Attribute: DW_AT_call_tail_call + Form: DW_FORM_flag_present + - Attribute: DW_AT_call_pc + Form: DW_FORM_addr + debug_info: + - Length: 0x2E7 + Version: 4 + AbbrevTableID: 0 + AbbrOffset: 0x0 + AddrSize: 8 + Entries: + - AbbrCode: 0x1 + Values: + - Value: 0x0 + - Value: 0x21 + - Value: 0x92 + - Value: 0xA5 + - Value: 0x0 + - Value: 0xA7 + - Value: 0x1 + - Value: 0x0 + - Value: 0x94 + - AbbrCode: 0x2 + Values: + - Value: 0x3F + - Value: 0x1 + - Value: 0x27 + - Value: 0x9 + BlockData: [ 0x3, 0x94, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0 ] + - AbbrCode: 0x3 + Values: + - Value: 0x4B + - AbbrCode: 0x4 + Values: + - Value: 0x57 + - Value: 0x5 + - AbbrCode: 0x0 + - AbbrCode: 0x5 + Values: + - Value: 0x50 + - AbbrCode: 0x6 + Values: + - Value: 0xBD + - Value: 0x6 + - Value: 0x1 + - AbbrCode: 0x7 + Values: + - Value: 0xC2 + - Value: 0x8 + - Value: 0x7 + - AbbrCode: 0x8 + Values: + - Value: 0x0 + - Value: 0x8 + - Value: 0x1 + - BlockData: [ 0x6F ] + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x1 + - Value: 0xD6 + - Value: 0x1 + - Value: 0x2 + - Value: 0x2DE + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0x9 + Values: + - Value: 0x178 + - Value: 0x1 + - Value: 0x2 + - Value: 0x2DE + - AbbrCode: 0x0 + - AbbrCode: 0x8 + Values: + - Value: 0x8 + - Value: 0x8 + - Value: 0x1 + - Value: 0x4A + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x1 + - Value: 0xE6 + - Value: 0x1 + - Value: 0x6 + - Value: 0x2DE + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0xA + Values: + - Value: 0x0 + - Value: 0x178 + - Value: 0x1 + - Value: 0x6 + - Value: 0x2DE + - AbbrCode: 0xB + Values: + - Value: 0x39 + - Value: 0x17A + - Value: 0x1 + - Value: 0x7 + - Value: 0x2DE + - AbbrCode: 0x0 + - AbbrCode: 0x8 + Values: + - Value: 0x10 + - Value: 0x8 + - Value: 0x1 + - Value: 0x60 + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x1 + - Value: 0xF6 + - Value: 0x1 + - Value: 0xB + - Value: 0x2DE + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0xA + Values: + - Value: 0x5E + - Value: 0x178 + - Value: 0x1 + - Value: 0xB + - Value: 0x2DE + - AbbrCode: 0x0 + - AbbrCode: 0x8 + Values: + - Value: 0x18 + - Value: 0x8 + - Value: 0x1 + - Value: 0xD3 + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x1 + - Value: 0x106 + - Value: 0x1 + - Value: 0xF + - Value: 0x2DE + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0xA + Values: + - Value: 0x97 + - Value: 0x178 + - Value: 0x1 + - Value: 0xF + - Value: 0x2DE + - AbbrCode: 0xB + Values: + - Value: 0xD0 + - Value: 0x17A + - Value: 0x1 + - Value: 0x10 + - Value: 0x2DE + - AbbrCode: 0x0 + - AbbrCode: 0x8 + Values: + - Value: 0x20 + - Value: 0x8 + - Value: 0x1 + - Value: 0xE7 + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x1 + - Value: 0x116 + - Value: 0x1 + - Value: 0x14 + - Value: 0x2DE + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0xA + Values: + - Value: 0xF5 + - Value: 0x178 + - Value: 0x1 + - Value: 0x14 + - Value: 0x2DE + - AbbrCode: 0xB + Values: + - Value: 0x12E + - Value: 0x17C + - Value: 0x1 + - Value: 0x15 + - Value: 0x2DE + - AbbrCode: 0x0 + - AbbrCode: 0xC + Values: + - Value: 0x28 + - Value: 0x64 + - Value: 0xA7 + - Value: 0x1 + BlockData: [ 0x6D ] + - Value: 0x1 + - Value: 0x126 + - Value: 0x1 + - Value: 0x21 + - Value: 0x2DE + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0xD + Values: + - Value: 0x2 + BlockData: [ 0x8F, 0xF ] + - Value: 0x183 + - Value: 0x1 + - Value: 0x27 + - Value: 0x205 + - AbbrCode: 0xB + Values: + - Value: 0x151 + - Value: 0x185 + - Value: 0x1 + - Value: 0x22 + - Value: 0x2DE + - AbbrCode: 0xE + Values: + - Value: 0x12C + - Value: 0x40 + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x1 + BlockData: [ 0x33 ] + - AbbrCode: 0x0 + - AbbrCode: 0xE + Values: + - Value: 0xF0 + - Value: 0x4C + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x2 + BlockData: [ 0x10, 0x29 ] + - AbbrCode: 0x0 + - AbbrCode: 0xE + Values: + - Value: 0xC3 + - Value: 0x58 + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x1 + BlockData: [ 0x3B ] + - AbbrCode: 0x0 + - AbbrCode: 0xE + Values: + - Value: 0x5E + - Value: 0x64 + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x2 + BlockData: [ 0x10, 0x2A ] + - AbbrCode: 0x0 + - AbbrCode: 0xE + Values: + - Value: 0x252 + - Value: 0x78 + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x2 + BlockData: [ 0x8F, 0xF ] + - AbbrCode: 0x0 + - AbbrCode: 0x0 + - AbbrCode: 0x10 + Values: + - Value: 0x5 + - Value: 0x12B + - Value: 0x1 + - Value: 0x1 + - Value: 0x1D + - AbbrCode: 0x11 + Values: + - Value: 0x227 + - Value: 0x0 + - AbbrCode: 0x12 + Values: + - Value: 0x12B + - Value: 0x1 + - Value: 0x1E + - Value: 0x1 + - Value: 0x1 + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0x13 + Values: + - Value: 0x24D + - Value: 0x1 + - AbbrCode: 0x14 + Values: + - Value: 0x248 + - AbbrCode: 0x0 + - AbbrCode: 0x0 + - AbbrCode: 0x10 + Values: + - Value: 0x5 + - Value: 0x138 + - Value: 0x1 + - Value: 0x1 + - Value: 0x19 + - AbbrCode: 0x15 + Values: + - Value: 0x138 + - Value: 0x1 + - Value: 0x1A + - Value: 0x1 + - Value: 0x1 + - Value: 0x1 + - AbbrCode: 0x13 + Values: + - Value: 0x243 + - Value: 0x1 + - AbbrCode: 0x14 + Values: + - Value: 0x248 + - AbbrCode: 0x0 + - AbbrCode: 0x0 + - AbbrCode: 0x16 + Values: + - Value: 0x227 + - AbbrCode: 0x16 + Values: + - Value: 0x4B + - AbbrCode: 0x16 + Values: + - Value: 0x205 + - AbbrCode: 0x17 + Values: + - Value: 0x8C + - Value: 0x4 + - Value: 0x1 + - Value: 0xAB + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x271 + - Value: 0x1 + - Value: 0x144 + - Value: 0x214 + - AbbrCode: 0x18 + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x189 + - Value: 0x2E5 + - Value: 0x1 + - AbbrCode: 0x19 + Values: + - Value: 0x1 + BlockData: [ 0x51 ] + - Value: 0x18E + - Value: 0x1 + - Value: 0x1E + - Value: 0x248 + - AbbrCode: 0x1A + Values: + - Value: 0x2A6 + - Value: 0x1 + - Value: 0x8C + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x3 + BlockData: [ 0xA3, 0x1, 0x50 ] + - AbbrCode: 0xF + Values: + - Value: 0x1 + BlockData: [ 0x51 ] + - Value: 0x3 + BlockData: [ 0xA3, 0x1, 0x51 ] + - AbbrCode: 0x0 + - AbbrCode: 0x0 + - AbbrCode: 0x17 + Values: + - Value: 0x90 + - Value: 0x4 + - Value: 0x1 + - Value: 0xEEEEE7 + - Value: 0x1 + BlockData: [ 0x6F ] + - Value: 0x2C5 + - Value: 0x1 + - Value: 0x15C + - Value: 0x214 + - AbbrCode: 0x18 + Values: + - Value: 0x1 + BlockData: [ 0x50 ] + - Value: 0x189 + - Value: 0x2E5 + - Value: 0x1 + - AbbrCode: 0x19 + Values: + - Value: 0x1 + BlockData: [ 0x51 ] + - Value: 0x18E + - Value: 0x1 + - Value: 0x1E + - Value: 0x248 + - AbbrCode: 0x0 + - AbbrCode: 0x6 + Values: + - Value: 0x174 + - Value: 0x5 + - Value: 0x4 + - AbbrCode: 0x16 + Values: + - Value: 0x205 + - AbbrCode: 0x0 + debug_line: + - Length: 249 + Version: 4 + PrologueLength: 42 + MinInstLength: 1 + MaxOpsPerInst: 1 + DefaultIsStmt: 1 + LineBase: 251 + LineRange: 14 + OpcodeBase: 13 + StandardOpcodeLengths: [ 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 ] + Files: + - Name: stmt_seq_macho.cpp + DirIdx: 0 + ModTime: 0 + Length: 0 + Opcodes: + - Opcode: DW_LNS_set_column + Data: 10 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 0 + - Opcode: 0x14 + Data: 0 + - Opcode: DW_LNS_set_column + Data: 3 + - Opcode: DW_LNS_negate_stmt + Data: 0 + - Opcode: 0x4A + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_set_column + Data: 14 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 8 + - Opcode: 0x19 + Data: 0 + - Opcode: DW_LNS_set_column + Data: 5 + - Opcode: DW_LNS_negate_stmt + Data: 0 + - Opcode: 0x4A + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_set_column + Data: 14 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 16 + - Opcode: DW_LNS_advance_line + SData: 11 + Data: 0 + - Opcode: DW_LNS_copy + Data: 0 + - Opcode: DW_LNS_set_column + Data: 5 + - Opcode: DW_LNS_negate_stmt + Data: 0 + - Opcode: 0x4A + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_set_column + Data: 14 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 24 + - Opcode: DW_LNS_advance_line + SData: 16 + Data: 0 + - Opcode: DW_LNS_copy + Data: 0 + - Opcode: DW_LNS_set_column + Data: 5 + - Opcode: DW_LNS_negate_stmt + Data: 0 + - Opcode: 0x4A + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_set_column + Data: 20 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 32 + - Opcode: DW_LNS_advance_line + SData: 20 + Data: 0 + - Opcode: DW_LNS_copy + Data: 0 + - Opcode: DW_LNS_set_column + Data: 5 + - Opcode: 0x4B + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 40 + - Opcode: DW_LNS_advance_line + SData: 32 + Data: 0 + - Opcode: DW_LNS_copy + Data: 0 + - Opcode: DW_LNS_set_column + Data: 12 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: 0xF4 + Data: 0 + - Opcode: 0xBB + Data: 0 + - Opcode: DW_LNS_set_column + Data: 9 + - Opcode: DW_LNS_negate_stmt + Data: 0 + - Opcode: 0x82 + Data: 0 + - Opcode: DW_LNS_set_column + Data: 12 + - Opcode: DW_LNS_negate_stmt + Data: 0 + - Opcode: 0x4B + Data: 0 + - Opcode: 0xBB + Data: 0 + - Opcode: DW_LNS_set_column + Data: 9 + - Opcode: 0x81 + Data: 0 + - Opcode: DW_LNS_set_column + Data: 18 + - Opcode: 0x4C + Data: 0 + - Opcode: DW_LNS_set_column + Data: 9 + - Opcode: 0xF1 + Data: 0 + - Opcode: DW_LNS_set_column + Data: 5 + - Opcode: DW_LNS_set_epilogue_begin + Data: 0 + - Opcode: 0x4C + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_set_column + Data: 85 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 140 + - Opcode: DW_LNS_advance_line + SData: 29 + Data: 0 + - Opcode: DW_LNS_copy + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 + - Opcode: DW_LNS_set_column + Data: 86 + - Opcode: DW_LNS_set_prologue_end + Data: 0 + - Opcode: DW_LNS_extended_op + ExtLen: 9 + SubOpcode: DW_LNE_set_address + Data: 144 + - Opcode: DW_LNS_advance_line + SData: 29 + Data: 0 + - Opcode: DW_LNS_copy + Data: 0 + - Opcode: DW_LNS_advance_pc + Data: 4 + - Opcode: DW_LNS_extended_op + ExtLen: 1 + SubOpcode: DW_LNE_end_sequence + Data: 0 +... diff --git a/llvm/test/tools/llvm-ir2vec/entities.ll b/llvm/test/tools/llvm-ir2vec/entities.ll index 4b51adf30bf74..8dbce57302f6f 100644 --- a/llvm/test/tools/llvm-ir2vec/entities.ll +++ b/llvm/test/tools/llvm-ir2vec/entities.ll @@ -1,6 +1,6 @@ ; RUN: llvm-ir2vec entities | FileCheck %s -CHECK: 84 +CHECK: 110 CHECK-NEXT: Ret 0 CHECK-NEXT: Br 1 CHECK-NEXT: Switch 2 @@ -85,3 +85,29 @@ CHECK-NEXT: Function 80 CHECK-NEXT: Pointer 81 CHECK-NEXT: Constant 82 CHECK-NEXT: Variable 83 +CHECK-NEXT: FCMP_false 84 +CHECK-NEXT: FCMP_oeq 85 +CHECK-NEXT: FCMP_ogt 86 +CHECK-NEXT: FCMP_oge 87 +CHECK-NEXT: FCMP_olt 88 +CHECK-NEXT: FCMP_ole 89 +CHECK-NEXT: FCMP_one 90 +CHECK-NEXT: FCMP_ord 91 +CHECK-NEXT: FCMP_uno 92 +CHECK-NEXT: FCMP_ueq 93 +CHECK-NEXT: FCMP_ugt 94 +CHECK-NEXT: FCMP_uge 95 +CHECK-NEXT: FCMP_ult 96 +CHECK-NEXT: FCMP_ule 97 +CHECK-NEXT: FCMP_une 98 +CHECK-NEXT: FCMP_true 99 +CHECK-NEXT: ICMP_eq 100 +CHECK-NEXT: ICMP_ne 101 +CHECK-NEXT: ICMP_ugt 102 +CHECK-NEXT: ICMP_uge 103 +CHECK-NEXT: ICMP_ult 104 +CHECK-NEXT: ICMP_ule 105 +CHECK-NEXT: ICMP_sgt 106 +CHECK-NEXT: ICMP_sge 107 +CHECK-NEXT: ICMP_slt 108 +CHECK-NEXT: ICMP_sle 109 diff --git a/llvm/test/tools/llvm-lib/sym64-threshold.test b/llvm/test/tools/llvm-lib/sym64-threshold.test new file mode 100644 index 0000000000000..76f0a030274ef --- /dev/null +++ b/llvm/test/tools/llvm-lib/sym64-threshold.test @@ -0,0 +1,71 @@ +# RUN: yaml2obj --docnum=1 %s -o %t01234567890234567789.obj +# RUN: yaml2obj --docnum=2 %s -o %t-ec.obj +# RUN: env SYM64_THRESHOLD=100 llvm-lib -machine:amd64 -out:%t.lib %t01234567890234567789.obj +# RUN: llvm-nm --print-armap %t.lib | FileCheck --check-prefix=ARMAP %s +# ARMAP: Archive map +# ARMAP-NEXT: sym + +# RUN: env SYM64_THRESHOLD=100 not llvm-lib -machine:arm64x -out:%t-ec.lib %t-ec.obj %t01234567890234567789.obj 2>&1 | FileCheck %s +# CHECK: Archive is too large: ARM64X does not support archives larger than 4GB + +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_AMD64 + Characteristics: [ ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 1 + - !Symbol + Name: sym + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL # (0) + ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2) + StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2) +... + +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_ARM64 + Characteristics: [ ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 1 + - !Symbol + Name: sym + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL # (0) + ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2) + StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2) +... diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s index 911ad1900195c..fe3742c9e4d3b 100644 --- a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s +++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s @@ -2649,7 +2649,7 @@ zip2 z31.s, z31.s, z31.s # CHECK-NEXT: 1 5 0.50 2 V1UnitV,V1UnitV01 BFMLALT_ZZZI bfmlalt z0.s, z1.h, z2.h[7] # CHECK-NEXT: 1 5 0.50 2 V1UnitV,V1UnitV01 BFMLALT_ZZZI bfmlalt z0.s, z1.h, z7.h[7] # CHECK-NEXT: 1 5 0.50 2 V1UnitV,V1UnitV01 BFMLALT_ZZZ bfmlalt z14.s, z10.h, z21.h -# CHECK-NEXT: 1 5 0.50 3 V1UnitV,V1UnitV01 BFMMLA_ZZZ bfmmla z0.s, z1.h, z2.h +# CHECK-NEXT: 1 5 0.50 3 V1UnitV,V1UnitV01 BFMMLA_ZZZ_HtoS bfmmla z0.s, z1.h, z2.h # CHECK-NEXT: 1 1 1.00 1 V1UnitI,V1UnitM,V1UnitM0 BIC_PPzPP bic p0.b, p0/z, p0.b, p0.b # CHECK-NEXT: 1 1 1.00 1 V1UnitI,V1UnitM,V1UnitM0 BIC_PPzPP bic p15.b, p15/z, p15.b, p15.b # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 BIC_ZZZ bic z0.d, z0.d, z0.d @@ -4228,10 +4228,10 @@ zip2 z31.s, z31.s, z31.s # CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] SDIV_ZPmZ_S sdiv z0.s, p7/m, z0.s, z31.s # CHECK-NEXT: 1 20 7.00 20 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] SDIVR_ZPmZ_D sdivr z0.d, p7/m, z0.d, z31.d # CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] SDIVR_ZPmZ_S sdivr z0.s, p7/m, z0.s, z31.s -# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZI_D sdot z0.d, z1.h, z15.h[1] -# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZ_D sdot z0.d, z1.h, z31.h -# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZ_S sdot z0.s, z1.b, z31.b -# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZI_S sdot z0.s, z1.b, z7.b[3] +# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZI_HtoD sdot z0.d, z1.h, z15.h[1] +# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZ_HtoD sdot z0.d, z1.h, z31.h +# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZ_BtoS sdot z0.s, z1.b, z31.b +# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZI_BtoS sdot z0.s, z1.b, z7.b[3] # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 SEL_ZPZZ_B sel z23.b, p11, z13.b, z8.b # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 SEL_ZPZZ_D sel z23.d, p11, z13.d, z8.d # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 SEL_ZPZZ_H sel z23.h, p11, z13.h, z8.h @@ -4708,11 +4708,11 @@ zip2 z31.s, z31.s, z31.s # CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] UDIV_ZPmZ_S udiv z0.s, p7/m, z0.s, z31.s # CHECK-NEXT: 1 20 7.00 20 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] UDIVR_ZPmZ_D udivr z0.d, p7/m, z0.d, z31.d # CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] UDIVR_ZPmZ_S udivr z0.s, p7/m, z0.s, z31.s -# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZI_D udot z0.d, z1.h, z15.h[1] -# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZ_D udot z0.d, z1.h, z31.h +# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZI_HtoD udot z0.d, z1.h, z15.h[1] +# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZ_HtoD udot z0.d, z1.h, z31.h # CHECK-NEXT: 1 3 1.00 3 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UCVTF_ZPmZ_StoD ucvtf z24.d, p5/m, z9.s -# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZ_S udot z0.s, z1.b, z31.b -# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZI_S udot z0.s, z1.b, z7.b[3] +# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZ_BtoS udot z0.s, z1.b, z31.b +# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZI_BtoS udot z0.s, z1.b, z7.b[3] # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 UMAX_ZI_B umax z0.b, z0.b, #0 # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 UMAX_ZPmZ_B umax z31.b, p7/m, z31.b, z31.b # CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 UMAX_ZI_B umax z31.b, z31.b, #255 diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFive7/mask.s b/llvm/test/tools/llvm-mca/RISCV/SiFive7/mask.s new file mode 100644 index 0000000000000..486b535382f87 --- /dev/null +++ b/llvm/test/tools/llvm-mca/RISCV/SiFive7/mask.s @@ -0,0 +1,125 @@ +# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py +# RUN: llvm-mca -mtriple=riscv64 -mcpu=sifive-x280 -iterations=1 -instruction-tables=full < %s | FileCheck %s + +vsetvli zero, zero, e32, m1, ta, ma + +vmslt.vv v0, v4, v20 +vmsle.vv v8, v4, v20 +vmsgt.vv v8, v20, v4 +vmsge.vv v8, v20, v4 +vmseq.vv v8, v4, v20 +vmsne.vv v8, v4, v20 +vmsltu.vv v8, v4, v20 +vmsleu.vv v8, v4, v20 +vmsgtu.vv v8, v20, v4 +vmsgeu.vv v8, v20, v4 + +vmflt.vv v0, v4, v20 +vmfle.vv v8, v4, v20 +vmfgt.vv v8, v20, v4 +vmfge.vv v8, v20, v4 +vmfeq.vv v8, v4, v20 +vmfne.vv v8, v4, v20 + +vmadc.vv v8, v4, v20 +vmsbc.vv v8, v4, v20 + +vfirst.m a2, v4 +vpopc.m a2, v4 + +viota.m v8, v4 + +vmsbf.m v8, v4 +vmsif.m v8, v4 +vmsof.m v8, v4 + +# CHECK: Resources: +# CHECK-NEXT: [0] - VLEN512SiFive7FDiv:1 +# CHECK-NEXT: [1] - VLEN512SiFive7IDiv:1 +# CHECK-NEXT: [2] - VLEN512SiFive7PipeA:1 +# CHECK-NEXT: [3] - VLEN512SiFive7PipeAB:2 VLEN512SiFive7PipeA, VLEN512SiFive7PipeB +# CHECK-NEXT: [4] - VLEN512SiFive7PipeB:1 +# CHECK-NEXT: [5] - VLEN512SiFive7VA:1 +# CHECK-NEXT: [6] - VLEN512SiFive7VCQ:1 +# CHECK-NEXT: [7] - VLEN512SiFive7VL:1 +# CHECK-NEXT: [8] - VLEN512SiFive7VS:1 + +# CHECK: Instruction Info: +# CHECK-NEXT: [1]: #uOps +# CHECK-NEXT: [2]: Latency +# CHECK-NEXT: [3]: RThroughput +# CHECK-NEXT: [4]: MayLoad +# CHECK-NEXT: [5]: MayStore +# CHECK-NEXT: [6]: HasSideEffects (U) +# CHECK-NEXT: [7]: Bypass Latency +# CHECK-NEXT: [8]: Resources ( | [] | [, | [] | [, | [] | [,&1 | FileCheck --check-prefixes=GLOB32 --implicit-check-not="in t64" %s # RUN: cp t_all.a t_X32.a # RUN: env OBJECT_MODE=32 llvm-ranlib t_X32.a diff --git a/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test new file mode 100644 index 0000000000000..391b7ee3facce --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test @@ -0,0 +1,26 @@ +## Test that --offloading with a fatbin works correctly. +# REQUIRES: amdgpu-registered-target + +# RUN: yaml2obj %s -o %t.elf +# RUN: llvm-readobj --offloading %t.elf 2>&1 | \ +# RUN: FileCheck %s --check-prefix=WARN -DFILE_NAME=%t.elf + +# WARN: warning: '{{.*}}': Stream Error: The stream is too short to perform the requested operation. + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC +Sections: + - Name: .hip_fatbin + Type: SHT_PROGBITS + AddressAlign: 0x1000 + Content: 5F5F434C414E475F4F46464C4F41445F42554E444C455F5F0200000000000000001000000000000000000000000000001B0000000000000075782D2D0010000000000000D00F0000000000001F0000000000000068697076342D616D6467636E2D616D642D616D646873612D2D676678393038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007F454C460201014003000000000000000300E0000100000000000000000000004000000000000000100C0000000000003005000040003800090040000F000D000600000004000000400000000000000040000000000000004000000000000000F801000000000000F80100000000000008000000000000000100000004000000000000000000000000000000000000000000000000000000C008000000000000C008000000000000001000000000000001000000050000000009000000000000001900000000000000190000000000006C000000000000006C00000000000000001000000000000001000000060000007009000000000000702900000000000070290000000000007000000000000000900600000000000000100000000000000100000006000000E009000000000000E039000000000000E039000000000000000000000000000001000000000000000010000000000000020000000600000070090000000000007029000000000000702900000000000070000000000000007000000000000000080000000000000052E574640400000070090000000000007029000000000000702900000000000070000000000000009006000000000000010000000000000051E57464060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000004000000380200000000000038020000000000003802000000000000340500000000000034050000000000000400000000000000070000001D05000020000000414D44475055000083AE616D646873612E6B65726E656C7391DE0012AB2E616770725F636F756E7400A52E61726773DC001085AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA415F642E636F65726365A72E6F666673657400A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657285AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA425F642E636F65726365A72E6F666673657408A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657284A52E6E616D65A14EA72E6F666673657410A52E73697A6508AB2E76616C75655F6B696E64A862795F76616C756583A72E6F666673657418A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7883A72E6F66667365741CA52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7983A72E6F666673657420A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7A83A72E6F666673657424A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7883A72E6F666673657426A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7983A72E6F666673657428A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7A83A72E6F66667365742AA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7883A72E6F66667365742CA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7983A72E6F66667365742EA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7A83A72E6F666673657440A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7883A72E6F666673657448A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7983A72E6F666673657450A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7A83A72E6F666673657458A52E73697A6502AB2E76616C75655F6B696E64B068696464656E5F677269645F64696D73B92E67726F75705F7365676D656E745F66697865645F73697A6500B62E6B65726E6172675F7365676D656E745F616C69676E08B52E6B65726E6172675F7365676D656E745F73697A65CD0118A92E6C616E6775616765A84F70656E434C2043B12E6C616E67756167655F76657273696F6E920200B82E6D61785F666C61745F776F726B67726F75705F73697A65CD0400A52E6E616D65B25F5A3973696D706C65416464506A504B6A6DBB2E707269766174655F7365676D656E745F66697865645F73697A6500AB2E736770725F636F756E740CB12E736770725F7370696C6C5F636F756E7400A72E73796D626F6CB55F5A3973696D706C65416464506A504B6A6D2E6B64B82E756E69666F726D5F776F726B5F67726F75705F73697A6501B32E757365735F64796E616D69635F737461636BC2AB2E766770725F636F756E7404B12E766770725F7370696C6C5F636F756E7400AF2E7761766566726F6E745F73697A6540AD616D646873612E746172676574B9616D6467636E2D616D642D616D646873612D2D676678393038AE616D646873612E76657273696F6E92010200000000000000000000000000000000000000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E03900000000000001000000000000000100000001000000010000001A000000000008400000D20001000000360A4A7A5238A4D3F113F4DD04000000040000000200000001000000000000000300000000000000000000000000000000000000005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F623730363264386333326134613933330000000000000000000000000000000000000000000000000000000000000000000000180100000000000080100000000000000000000000000000000000000000000000000000000000004000AF008C000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000C20102C02400000002000AC0000000008002027E7FC08CBF07FF0486FFFF0000060406920600006800008FD2820002000302067E0200043203030638008050DC02007F020102067E0000003203030238008050DC00007F03700F8CBF03050468008070DC00027F00000081BF00000000060000000000000070070000000000000B000000000000001800000000000000050000000000000020080000000000000A000000000000004600000000000000F5FEFF6F00000000D0070000000000000400000000000000F807000000000000000000000000000000000000000000004C696E6B65723A20414D44204C4C442031392E302E3000414D4420636C616E672076657273696F6E2031392E302E306769742028202032343231322063393630313665636534313337356462646438663037356266333762643666633333323230376233290000414D4420636C616E672076657273696F6E2031382E302E3067697420287373683A2F2F6765727269746769742F6C696768746E696E672F65632F6C6C766D2D70726F6A65637420616D642D6D61696E6C696E652D6F70656E20323431373620663935303039613166393032313232343865313036333964653837653635636163616338643961372900000000000000000000000000000000000000000000000000460000000002080070290000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E0390000000000000100000000000000002E6E6F7465002E64796E73796D002E676E752E68617368002E68617368002E64796E737472002E726F64617461002E74657874002E64796E616D6963002E72656C726F5F70616464696E67002E627373002E636F6D6D656E74002E73796D746162002E7368737472746162002E73747274616200005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F62373036326438633332613461393333005F44594E414D494300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000070000000200000000000000380200000000000038020000000000003405000000000000000000000000000004000000000000000000000000000000070000000B00000002000000000000007007000000000000700700000000000060000000000000000500000001000000080000000000000018000000000000000F000000F6FFFF6F0200000000000000D007000000000000D007000000000000280000000000000002000000000000000800000000000000000000000000000019000000050000000200000000000000F807000000000000F80700000000000028000000000000000200000000000000040000000000000004000000000000001F000000030000000200000000000000200800000000000020080000000000004600000000000000000000000000000001000000000000000000000000000000270000000100000002000000000000008008000000000000800800000000000040000000000000000000000000000000400000000000000000000000000000002F000000010000000600000000000000001900000000000000090000000000006C00000000000000000000000000000000010000000000000000000000000000350000000600000003000000000000007029000000000000700900000000000070000000000000000500000000000000080000000000000010000000000000003E000000080000000300000000000000E029000000000000E00900000000000020060000000000000000000000000000010000000000000000000000000000004D000000080000000300000000000000E039000000000000E0090000000000000100000000000000000000000000000001000000000000000000000000000000520000000100000030000000000000000000000000000000E009000000000000F0000000000000000000000000000000010000000000000001000000000000005B0000000200000000000000000000000000000000000000D00A00000000000078000000000000000E0000000200000008000000000000001800000000000000630000000300000000000000000000000000000000000000480B00000000000075000000000000000000000000000000010000000000000000000000000000006D0000000300000000000000000000000000000000000000BD0B0000000000004F00000000000000000000000000000001000000000000000000000000000000 + - Name: .hipFatBinSegment + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x202FD0 + AddressAlign: 0x8 + Content: '465049480100000000102000000000000000000000000000' +... diff --git a/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test new file mode 100644 index 0000000000000..21ee60d2ea829 --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test @@ -0,0 +1,27 @@ +## Test that --offloading with a fatbin works correctly. +# REQUIRES: amdgpu-registered-target + +# RUN: yaml2obj %s -o %t.elf +# RUN: llvm-readobj --offloading %t.elf | \ +# RUN: FileCheck %s -DFILE_NAME=%t.elf + +# CHECK: host-x86_64-unknown-linux-- file://[[FILE_NAME]]#offset=8192&size=0 +# CHECK-NEXT: hipv4-amdgcn-amd-amdhsa--gfx908 file://[[FILE_NAME]]#offset=8192&size=4048 + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC +Sections: + - Name: .hip_fatbin + Type: SHT_PROGBITS + AddressAlign: 0x1000 + Content: 5F5F434C414E475F4F46464C4F41445F42554E444C455F5F0200000000000000001000000000000000000000000000001B00000000000000686F73742D7838365F36342D756E6B6E6F776E2D6C696E75782D2D0010000000000000D00F0000000000001F0000000000000068697076342D616D6467636E2D616D642D616D646873612D2D676678393038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007F454C460201014003000000000000000300E0000100000000000000000000004000000000000000100C0000000000003005000040003800090040000F000D000600000004000000400000000000000040000000000000004000000000000000F801000000000000F80100000000000008000000000000000100000004000000000000000000000000000000000000000000000000000000C008000000000000C008000000000000001000000000000001000000050000000009000000000000001900000000000000190000000000006C000000000000006C00000000000000001000000000000001000000060000007009000000000000702900000000000070290000000000007000000000000000900600000000000000100000000000000100000006000000E009000000000000E039000000000000E039000000000000000000000000000001000000000000000010000000000000020000000600000070090000000000007029000000000000702900000000000070000000000000007000000000000000080000000000000052E574640400000070090000000000007029000000000000702900000000000070000000000000009006000000000000010000000000000051E57464060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000004000000380200000000000038020000000000003802000000000000340500000000000034050000000000000400000000000000070000001D05000020000000414D44475055000083AE616D646873612E6B65726E656C7391DE0012AB2E616770725F636F756E7400A52E61726773DC001085AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA415F642E636F65726365A72E6F666673657400A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657285AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA425F642E636F65726365A72E6F666673657408A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657284A52E6E616D65A14EA72E6F666673657410A52E73697A6508AB2E76616C75655F6B696E64A862795F76616C756583A72E6F666673657418A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7883A72E6F66667365741CA52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7983A72E6F666673657420A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7A83A72E6F666673657424A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7883A72E6F666673657426A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7983A72E6F666673657428A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7A83A72E6F66667365742AA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7883A72E6F66667365742CA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7983A72E6F66667365742EA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7A83A72E6F666673657440A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7883A72E6F666673657448A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7983A72E6F666673657450A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7A83A72E6F666673657458A52E73697A6502AB2E76616C75655F6B696E64B068696464656E5F677269645F64696D73B92E67726F75705F7365676D656E745F66697865645F73697A6500B62E6B65726E6172675F7365676D656E745F616C69676E08B52E6B65726E6172675F7365676D656E745F73697A65CD0118A92E6C616E6775616765A84F70656E434C2043B12E6C616E67756167655F76657273696F6E920200B82E6D61785F666C61745F776F726B67726F75705F73697A65CD0400A52E6E616D65B25F5A3973696D706C65416464506A504B6A6DBB2E707269766174655F7365676D656E745F66697865645F73697A6500AB2E736770725F636F756E740CB12E736770725F7370696C6C5F636F756E7400A72E73796D626F6CB55F5A3973696D706C65416464506A504B6A6D2E6B64B82E756E69666F726D5F776F726B5F67726F75705F73697A6501B32E757365735F64796E616D69635F737461636BC2AB2E766770725F636F756E7404B12E766770725F7370696C6C5F636F756E7400AF2E7761766566726F6E745F73697A6540AD616D646873612E746172676574B9616D6467636E2D616D642D616D646873612D2D676678393038AE616D646873612E76657273696F6E92010200000000000000000000000000000000000000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E03900000000000001000000000000000100000001000000010000001A000000000008400000D20001000000360A4A7A5238A4D3F113F4DD04000000040000000200000001000000000000000300000000000000000000000000000000000000005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F623730363264386333326134613933330000000000000000000000000000000000000000000000000000000000000000000000180100000000000080100000000000000000000000000000000000000000000000000000000000004000AF008C000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000C20102C02400000002000AC0000000008002027E7FC08CBF07FF0486FFFF0000060406920600006800008FD2820002000302067E0200043203030638008050DC02007F020102067E0000003203030238008050DC00007F03700F8CBF03050468008070DC00027F00000081BF00000000060000000000000070070000000000000B000000000000001800000000000000050000000000000020080000000000000A000000000000004600000000000000F5FEFF6F00000000D0070000000000000400000000000000F807000000000000000000000000000000000000000000004C696E6B65723A20414D44204C4C442031392E302E3000414D4420636C616E672076657273696F6E2031392E302E306769742028202032343231322063393630313665636534313337356462646438663037356266333762643666633333323230376233290000414D4420636C616E672076657273696F6E2031382E302E3067697420287373683A2F2F6765727269746769742F6C696768746E696E672F65632F6C6C766D2D70726F6A65637420616D642D6D61696E6C696E652D6F70656E20323431373620663935303039613166393032313232343865313036333964653837653635636163616338643961372900000000000000000000000000000000000000000000000000460000000002080070290000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E0390000000000000100000000000000002E6E6F7465002E64796E73796D002E676E752E68617368002E68617368002E64796E737472002E726F64617461002E74657874002E64796E616D6963002E72656C726F5F70616464696E67002E627373002E636F6D6D656E74002E73796D746162002E7368737472746162002E73747274616200005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F62373036326438633332613461393333005F44594E414D494300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000070000000200000000000000380200000000000038020000000000003405000000000000000000000000000004000000000000000000000000000000070000000B00000002000000000000007007000000000000700700000000000060000000000000000500000001000000080000000000000018000000000000000F000000F6FFFF6F0200000000000000D007000000000000D007000000000000280000000000000002000000000000000800000000000000000000000000000019000000050000000200000000000000F807000000000000F80700000000000028000000000000000200000000000000040000000000000004000000000000001F000000030000000200000000000000200800000000000020080000000000004600000000000000000000000000000001000000000000000000000000000000270000000100000002000000000000008008000000000000800800000000000040000000000000000000000000000000400000000000000000000000000000002F000000010000000600000000000000001900000000000000090000000000006C00000000000000000000000000000000010000000000000000000000000000350000000600000003000000000000007029000000000000700900000000000070000000000000000500000000000000080000000000000010000000000000003E000000080000000300000000000000E029000000000000E00900000000000020060000000000000000000000000000010000000000000000000000000000004D000000080000000300000000000000E039000000000000E0090000000000000100000000000000000000000000000001000000000000000000000000000000520000000100000030000000000000000000000000000000E009000000000000F0000000000000000000000000000000010000000000000001000000000000005B0000000200000000000000000000000000000000000000D00A00000000000078000000000000000E0000000200000008000000000000001800000000000000630000000300000000000000000000000000000000000000480B00000000000075000000000000000000000000000000010000000000000000000000000000006D0000000300000000000000000000000000000000000000BD0B0000000000004F00000000000000000000000000000001000000000000000000000000000000 + - Name: .hipFatBinSegment + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x202FD0 + AddressAlign: 0x8 + Content: '465049480100000000102000000000000000000000000000' +... diff --git a/llvm/test/tools/llvm-remarkutil/Inputs/filter.yaml b/llvm/test/tools/llvm-remarkutil/Inputs/filter.yaml new file mode 100644 index 0000000000000..89def7fc4c0e5 --- /dev/null +++ b/llvm/test/tools/llvm-remarkutil/Inputs/filter.yaml @@ -0,0 +1,28 @@ +--- !Passed +Pass: pass1 +Name: Remark1 +DebugLoc: { File: 'path/to/func1.c', Line: 1, Column: 2 } +Function: func1 +Args: + - String: ' text' + - arg1: argval1 +... +--- !Missed +Pass: pass2 +Name: Remark2 +DebugLoc: { File: 'path/to/func2.c', Line: 1, Column: 2 } +Function: func2 +Args: + - String: ' text' + - arg2: argval2 +... +--- !Analysis +Pass: pass3 +Name: Remark3 +DebugLoc: { File: 'path/to/func3.c', Line: 1, Column: 2 } +Function: func3 +Args: + - String: ' text' + - arg3: argval3 + DebugLoc: { File: 'path/to/func3.c', Line: 2, Column: 2 } +... diff --git a/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark-magic.test b/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark-magic.test index f469eadc07f99..c21dbd72a2a18 100644 --- a/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark-magic.test +++ b/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark-magic.test @@ -2,5 +2,6 @@ RUN: not llvm-remarkutil instruction-count %p/Inputs/broken-remark-magic.bitstre RUN: not llvm-remarkutil instruction-mix %p/Inputs/broken-remark-magic.bitstream -o - 2>&1 | FileCheck %s RUN: not llvm-remarkutil annotation-count --annotation-type=remark %p/Inputs/broken-remark-magic.bitstream -o - 2>&1 | FileCheck %s RUN: not llvm-remarkutil count %p/Inputs/broken-remark-magic.bitstream -o - 2>&1 | FileCheck %s +RUN: not llvm-remarkutil filter %p/Inputs/broken-remark-magic.bitstream -o - 2>&1 | FileCheck %s CHECK: error: Automatic detection of remark format failed. Unknown magic number: '1234' diff --git a/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark.test b/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark.test index 78011aece08f7..339f082d4825b 100644 --- a/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark.test +++ b/llvm/test/tools/llvm-remarkutil/broken-bitstream-remark.test @@ -2,5 +2,6 @@ RUN: not llvm-remarkutil bitstream2yaml %p/Inputs/broken-remark -o - 2>&1 | File RUN: not llvm-remarkutil instruction-count --parser=bitstream %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s RUN: not llvm-remarkutil annotation-count --parser=bitstream --annotation-type=remark %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s RUN: not llvm-remarkutil count --parser=bitstream %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s +RUN: not llvm-remarkutil filter --parser=bitstream %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s CHECK: error: Unknown magic number: expecting RMRK, got --- . diff --git a/llvm/test/tools/llvm-remarkutil/broken-yaml-remark.test b/llvm/test/tools/llvm-remarkutil/broken-yaml-remark.test index 464d0b80c4ad0..9da3de4034b0f 100644 --- a/llvm/test/tools/llvm-remarkutil/broken-yaml-remark.test +++ b/llvm/test/tools/llvm-remarkutil/broken-yaml-remark.test @@ -3,5 +3,6 @@ RUN: not llvm-remarkutil instruction-count --parser=yaml %p/Inputs/broken-remark RUN: not llvm-remarkutil instruction-mix --parser=yaml %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s RUN: not llvm-remarkutil annotation-count --parser=yaml --annotation-type=remark %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s RUN: not llvm-remarkutil count --parser=yaml %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s +RUN: not llvm-remarkutil filter --parser=yaml %p/Inputs/broken-remark -o - 2>&1 | FileCheck %s CHECK: error: Type, Pass, Name or Function missing diff --git a/llvm/test/tools/llvm-remarkutil/empty-file.test b/llvm/test/tools/llvm-remarkutil/empty-file.test index d9820a088ea8f..9b2b000e9c24b 100644 --- a/llvm/test/tools/llvm-remarkutil/empty-file.test +++ b/llvm/test/tools/llvm-remarkutil/empty-file.test @@ -3,16 +3,19 @@ RUN: not llvm-remarkutil instruction-count --parser=yaml %p/Inputs/empty-file -o RUN: not llvm-remarkutil instruction-mix --parser=yaml %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --check-prefix=YAMLPARSER RUN: not llvm-remarkutil annotation-count --parser=yaml --annotation-type=remark %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --check-prefix=YAMLPARSER RUN: not llvm-remarkutil count --parser=yaml %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --check-prefix=YAMLPARSER +RUN: not llvm-remarkutil filter --parser=yaml %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --check-prefix=YAMLPARSER RUN: llvm-remarkutil bitstream2yaml %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=BITSTREAM2YAML RUN: llvm-remarkutil instruction-count --parser=bitstream %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=SIZEBITSTREAM RUN: llvm-remarkutil instruction-mix --parser=bitstream %p/Inputs/empty-file --report_style=csv -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=MIXBITSTREAM RUN: llvm-remarkutil annotation-count --parser=bitstream --annotation-type=remark %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=ANNOTATIONBITSTREAM RUN: llvm-remarkutil count --parser=bitstream %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=COUNTBITSTREAM +RUN: llvm-remarkutil filter --parser=bitstream %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=FILTERBITSTREAM ; Parser format auto-detection should treat empty files as bitstream files RUN: llvm-remarkutil instruction-count %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=SIZEBITSTREAM RUN: llvm-remarkutil instruction-mix %p/Inputs/empty-file --report_style=csv -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=MIXBITSTREAM RUN: llvm-remarkutil annotation-count --annotation-type=remark %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=ANNOTATIONBITSTREAM RUN: llvm-remarkutil count %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=COUNTBITSTREAM +RUN: llvm-remarkutil filter %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow-empty --check-prefix=FILTERBITSTREAM ; YAMLPARSER: error: document root is not of mapping type. @@ -30,3 +33,5 @@ RUN: llvm-remarkutil count %p/Inputs/empty-file -o - 2>&1 | FileCheck %s --allow ; MIXBITSTREAM-LABEL: Instruction,Count ; MIXBITSTREAM-EMPTY: + +; FILTERBITSTREAM-NOT: {{.}} diff --git a/llvm/test/tools/llvm-remarkutil/filter.test b/llvm/test/tools/llvm-remarkutil/filter.test new file mode 100644 index 0000000000000..8304b9f0129a8 --- /dev/null +++ b/llvm/test/tools/llvm-remarkutil/filter.test @@ -0,0 +1,59 @@ +RUN: llvm-remarkutil filter %p/Inputs/filter.yaml | diff %p/Inputs/filter.yaml - +RUN: llvm-remarkutil filter --rfunction=func %p/Inputs/filter.yaml | diff %p/Inputs/filter.yaml - +RUN: llvm-remarkutil filter --rremark-name=Remark %p/Inputs/filter.yaml | diff %p/Inputs/filter.yaml - +RUN: llvm-remarkutil filter --rpass-name=pass %p/Inputs/filter.yaml | diff %p/Inputs/filter.yaml - +RUN: llvm-remarkutil filter --rfilter-arg-by=argval %p/Inputs/filter.yaml | diff %p/Inputs/filter.yaml - + +RUN: llvm-remarkutil filter --rfunction=unc1 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK1 +RUN: llvm-remarkutil filter --rremark-name=ark3 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK3 +RUN: llvm-remarkutil filter --rpass-name=s1 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK1 +RUN: llvm-remarkutil filter --filter-arg-by=argval2 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK2 +RUN: llvm-remarkutil filter --function=func1 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK1 +RUN: llvm-remarkutil filter --pass-name=pass2 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK2 +RUN: llvm-remarkutil filter --remark-name=Remark3 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK3 +RUN: llvm-remarkutil filter --function=func1 --pass-name=pass1 --remark-name=Remark1 %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK1 +RUN: llvm-remarkutil filter --remark-type=passed %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK1 +RUN: llvm-remarkutil filter --remark-type=missed %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK2 +RUN: llvm-remarkutil filter --remark-type=analysis %p/Inputs/filter.yaml | FileCheck %s --strict-whitespace --check-prefix=REMARK3 + +RUN: llvm-remarkutil yaml2bitstream -o %t.opt.bitstream %p/Inputs/filter.yaml +RUN: llvm-remarkutil filter --function=func1 %t.opt.bitstream | FileCheck %s --strict-whitespace --check-prefix=REMARK1 + +RUN: llvm-remarkutil filter --function=func1 %t.opt.bitstream -o %t.r1.opt.bitstream +RUN: llvm-remarkutil bitstream2yaml %t.r1.opt.bitstream | FileCheck %s --strict-whitespace --check-prefix=REMARK1 + +RUN: llvm-remarkutil filter --function=func %p/Inputs/filter.yaml | FileCheck %s --allow-empty --strict-whitespace --check-prefix=EMPTY + +; REMARK1: --- !Passed +; REMARK1-NEXT: Pass: pass1 +; REMARK1-NEXT: Name: Remark1 +; REMARK1-NEXT: DebugLoc: { File: 'path/to/func1.c', Line: 1, Column: 2 } +; REMARK1-NEXT: Function: func1 +; REMARK1-NEXT: Args: +; REMARK1-NEXT: - String: ' text' +; REMARK1-NEXT: - arg1: argval1 +; REMARK1-NEXT: ... +; REMARK1-NOT: {{.}} +; REMARK2: --- !Missed +; REMARK2-NEXT: Pass: pass2 +; REMARK2-NEXT: Name: Remark2 +; REMARK2-NEXT: DebugLoc: { File: 'path/to/func2.c', Line: 1, Column: 2 } +; REMARK2-NEXT: Function: func2 +; REMARK2-NEXT: Args: +; REMARK2-NEXT: - String: ' text' +; REMARK2-NEXT: - arg2: argval2 +; REMARK2-NEXT: ... +; REMARK2-NOT: {{.}} +; REMARK3: --- !Analysis +; REMARK3-NEXT: Pass: pass3 +; REMARK3-NEXT: Name: Remark3 +; REMARK3-NEXT: DebugLoc: { File: 'path/to/func3.c', Line: 1, Column: 2 } +; REMARK3-NEXT: Function: func3 +; REMARK3-NEXT: Args: +; REMARK3-NEXT: - String: ' text' +; REMARK3-NEXT: - arg3: argval3 +; REMARK3-NEXT: DebugLoc: { File: 'path/to/func3.c', Line: 2, Column: 2 } +; REMARK3-NEXT: ... +; REMARK3-NOT: {{.}} + +; EMPTY-NOT: {{.}} diff --git a/llvm/test/tools/llvm-size/macho-pagezero.test b/llvm/test/tools/llvm-size/macho-pagezero.test new file mode 100644 index 0000000000000..db69fd0c9daeb --- /dev/null +++ b/llvm/test/tools/llvm-size/macho-pagezero.test @@ -0,0 +1,108 @@ +## Test the --exclude-pagezero option to skip __PAGEZERO segment in Mach-O files. + +# RUN: yaml2obj %s --docnum=1 -o %t-pagezero.o +# RUN: llvm-size %t-pagezero.o | \ +# RUN: FileCheck %s --check-prefix=NORMAL --match-full-lines +# RUN: llvm-size --exclude-pagezero %t-pagezero.o | \ +# RUN: FileCheck %s --check-prefix=SKIP --match-full-lines + +# RUN: yaml2obj %s --docnum=2 -o %t-pagezero32.o +# RUN: llvm-size %t-pagezero32.o | \ +# RUN: FileCheck %s --check-prefix=NORMAL --match-full-lines +# RUN: llvm-size --exclude-pagezero %t-pagezero32.o | \ +# RUN: FileCheck %s --check-prefix=SKIP --match-full-lines + +# NORMAL:__TEXT __DATA __OBJC others dec hex +# NORMAL-NEXT:20 100 0 4096 4216 1078 + +# SKIP:__TEXT __DATA __OBJC others dec hex +# SKIP-NEXT:20 100 0 0 120 78 + +--- !mach-o +FileHeader: + magic: 0xFEEDFACF + cputype: 0x100000C + cpusubtype: 0x0 + filetype: 0x2 + ncmds: 3 + sizeofcmds: 216 + flags: 0x2000 + reserved: 0x0 +LoadCommands: + - cmd: LC_SEGMENT_64 + cmdsize: 72 + segname: __PAGEZERO + vmaddr: 0x0 + vmsize: 4096 + fileoff: 0 + filesize: 0 + maxprot: 0 + initprot: 0 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT_64 + cmdsize: 72 + segname: __TEXT + vmaddr: 0x100000000 + vmsize: 20 + fileoff: 248 + filesize: 20 + maxprot: 7 + initprot: 5 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT_64 + cmdsize: 72 + segname: __DATA + vmaddr: 0x100001000 + vmsize: 100 + fileoff: 268 + filesize: 100 + maxprot: 7 + initprot: 3 + nsects: 0 + flags: 0 + +--- !mach-o +FileHeader: + magic: 0xFEEDFACE + cputype: 0x7 + cpusubtype: 0x3 + filetype: 0x2 + ncmds: 3 + sizeofcmds: 168 + flags: 0x2000 +LoadCommands: + - cmd: LC_SEGMENT + cmdsize: 56 + segname: __PAGEZERO + vmaddr: 0x0 + vmsize: 4096 + fileoff: 0 + filesize: 0 + maxprot: 0 + initprot: 0 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT + cmdsize: 56 + segname: __TEXT + vmaddr: 0x1000 + vmsize: 20 + fileoff: 196 + filesize: 20 + maxprot: 7 + initprot: 5 + nsects: 0 + flags: 0 + - cmd: LC_SEGMENT + cmdsize: 56 + segname: __DATA + vmaddr: 0x2000 + vmsize: 100 + fileoff: 216 + filesize: 100 + maxprot: 7 + initprot: 3 + nsects: 0 + flags: 0 diff --git a/llvm/test/tools/llvm-strings/eof.test b/llvm/test/tools/llvm-strings/eof.test index 19b5adc85ef0e..a2a3fc77db9a1 100644 --- a/llvm/test/tools/llvm-strings/eof.test +++ b/llvm/test/tools/llvm-strings/eof.test @@ -1,11 +1,13 @@ ## Show that llvm-strings prints the last string in the input even if no ## unprintable character follows it. -RUN: echo -n abcdefg | llvm-strings - | FileCheck %s --check-prefix=PRINT +RUN: echo -n abcdefg > %t +RUN: llvm-strings %t - | FileCheck %s --check-prefix=PRINT PRINT: abcdefg ## Show that llvm-strings does not print the last string in the input if it is ## too short and no unprintable character follows it. -RUN: echo -n abc | llvm-strings - | FileCheck --allow-empty %s --check-prefix=NOPRINT +RUN: echo -n abc > %t +RUN: llvm-strings %t - | FileCheck --allow-empty %s --check-prefix=NOPRINT NOPRINT-NOT: {{.}} diff --git a/llvm/test/tools/llvm-strings/stdin.test b/llvm/test/tools/llvm-strings/stdin.test index 06dcd194a3016..63f7194ab973d 100644 --- a/llvm/test/tools/llvm-strings/stdin.test +++ b/llvm/test/tools/llvm-strings/stdin.test @@ -1,3 +1,5 @@ +# XFAIL: system-aix + ## Show that llvm-strings can handle stdin input properly. ## Case 1: output with single string. diff --git a/llvm/test/tools/llvm-strings/whitespace.test b/llvm/test/tools/llvm-strings/whitespace.test index 7963ff73fb837..c51e5e62724cc 100644 --- a/llvm/test/tools/llvm-strings/whitespace.test +++ b/llvm/test/tools/llvm-strings/whitespace.test @@ -1,3 +1,4 @@ ## Show that the default output format matches GNU strings. -RUN: echo -n abcd | llvm-strings - | FileCheck %s --strict-whitespace --implicit-check-not={{.}} +RUN: echo -n abcd > %t +RUN: llvm-strings %t - | FileCheck %s --strict-whitespace --implicit-check-not={{.}} CHECK: {{^}}abcd{{$}} diff --git a/llvm/test/tools/llvm-tli-checker/ifuncs.yaml b/llvm/test/tools/llvm-tli-checker/ifuncs.yaml new file mode 100644 index 0000000000000..4eae66c3051a7 --- /dev/null +++ b/llvm/test/tools/llvm-tli-checker/ifuncs.yaml @@ -0,0 +1,39 @@ +# REQUIRES: x86-registered-target +# +# stpncpy is declared as available in TargetLibraryInfo for FreeBSD, but +# llvm-tli-checker won't be able to find it unless it knows how to check ifuncs. +# This test makes sure that llvm-tli-checker supports processing ifuncs. +# +# RUN: yaml2obj %s -o=%t1 +# RUN: llvm-tli-checker --triple=x86_64-unknown-freebsd %t1 | FileCheck %s +# +# CHECK: == Total TLI yes SDK yes: 1 +# + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + OSABI: ELFOSABI_FREEBSD + Type: ET_DYN + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + - Name: .rela.plt + Type: SHT_RELA + Flags: [ SHF_ALLOC, SHF_INFO_LINK ] + Address: 0x3CA20 + Link: .dynsym + AddressAlign: 0x8 + Relocations: + - Offset: 0x1E2C68 + Symbol: stpncpy + Type: R_X86_64_JUMP_SLOT +DynamicSymbols: + - Name: stpncpy + Type: STT_GNU_IFUNC + Section: .text + Binding: STB_WEAK + Value: 0x15D5E0 + Size: 0xC diff --git a/llvm/test/tools/yaml2obj/empty-or-invalid-doc.yaml b/llvm/test/tools/yaml2obj/empty-or-invalid-doc.yaml index 31a0973209f36..6da53297696ad 100644 --- a/llvm/test/tools/yaml2obj/empty-or-invalid-doc.yaml +++ b/llvm/test/tools/yaml2obj/empty-or-invalid-doc.yaml @@ -1,5 +1,6 @@ # RUN: echo "" | not yaml2obj 2>&1 | FileCheck %s -# RUN: echo -n "" | not yaml2obj 2>&1 | FileCheck %s +# RUN: echo -n "" > %t +# RUN: not yaml2obj %t 2>&1 | FileCheck %s # RUN: echo " " | not yaml2obj 2>&1 | FileCheck %s # RUN: echo " " | not yaml2obj 2>&1 | FileCheck %s # CHECK: yaml2obj: error: unknown document type diff --git a/llvm/tools/dsymutil/Reproducer.cpp b/llvm/tools/dsymutil/Reproducer.cpp index 31e49cdd0518c..0c1d3f90af299 100644 --- a/llvm/tools/dsymutil/Reproducer.cpp +++ b/llvm/tools/dsymutil/Reproducer.cpp @@ -37,9 +37,10 @@ ReproducerGenerate::ReproducerGenerate(std::error_code &EC, int Argc, char **Argv, bool GenerateOnExit) : Root(createReproducerDir(EC)), GenerateOnExit(GenerateOnExit) { llvm::append_range(Args, ArrayRef(Argv, Argc)); + auto RealFS = vfs::getRealFileSystem(); if (!Root.empty()) - FC = std::make_shared(Root, Root); - VFS = FileCollector::createCollectorVFS(vfs::getRealFileSystem(), FC); + FC = std::make_shared(Root, Root, RealFS); + VFS = FileCollector::createCollectorVFS(std::move(RealFS), FC); } ReproducerGenerate::~ReproducerGenerate() { diff --git a/llvm/tools/llvm-cgdata/llvm-cgdata.cpp b/llvm/tools/llvm-cgdata/llvm-cgdata.cpp index 047557e5a7fae..ea89c4d1dd25f 100644 --- a/llvm/tools/llvm-cgdata/llvm-cgdata.cpp +++ b/llvm/tools/llvm-cgdata/llvm-cgdata.cpp @@ -83,7 +83,9 @@ static CGDataAction Action; static std::optional OutputFormat; static std::vector InputFilenames; +namespace llvm { extern cl::opt IndexedCodeGenDataLazyLoading; +} // end namespace llvm static void exitWithError(Twine Message, StringRef Whence = "", StringRef Hint = "") { diff --git a/llvm/tools/llvm-config/llvm-config.cpp b/llvm/tools/llvm-config/llvm-config.cpp index 49df8fdcb7f79..7f8c55ab00989 100644 --- a/llvm/tools/llvm-config/llvm-config.cpp +++ b/llvm/tools/llvm-config/llvm-config.cpp @@ -357,18 +357,18 @@ int main(int argc, char **argv) { ActivePrefix = CurrentExecPrefix; { SmallString<256> Path(LLVM_INSTALL_INCLUDEDIR); - sys::fs::make_absolute(ActivePrefix, Path); + sys::path::make_absolute(ActivePrefix, Path); ActiveIncludeDir = std::string(Path); } { SmallString<256> Path(LLVM_TOOLS_INSTALL_DIR); - sys::fs::make_absolute(ActivePrefix, Path); + sys::path::make_absolute(ActivePrefix, Path); ActiveBinDir = std::string(Path); } ActiveLibDir = ActivePrefix + "/lib" + LLVM_LIBDIR_SUFFIX; { SmallString<256> Path(LLVM_INSTALL_PACKAGE_DIR); - sys::fs::make_absolute(ActivePrefix, Path); + sys::path::make_absolute(ActivePrefix, Path); ActiveCMakeDir = std::string(Path); } ActiveIncludeOption = "-I" + ActiveIncludeDir; diff --git a/llvm/tools/llvm-cov/CoverageExporterJson.cpp b/llvm/tools/llvm-cov/CoverageExporterJson.cpp index 06de33dc070e0..4c07c05396732 100644 --- a/llvm/tools/llvm-cov/CoverageExporterJson.cpp +++ b/llvm/tools/llvm-cov/CoverageExporterJson.cpp @@ -21,7 +21,8 @@ // -- Branches: array => List of Branches in the file // -- Branch: dict => Describes a branch of the file with counters // -- MCDC Records: array => List of MCDC records in the file -// -- MCDC Values: array => List of T/F covered condition values +// -- MCDC Values: array => List of T/F covered condition values and +// list of executed test vectors // -- Segments: array => List of Segments contained in the file // -- Segment: dict => Describes a segment of the file with a counter // -- Expansions: array => List of expansion records @@ -62,7 +63,7 @@ #include /// The semantic version combined as a string. -#define LLVM_COVERAGE_EXPORT_JSON_STR "3.0.1" +#define LLVM_COVERAGE_EXPORT_JSON_STR "3.1.0" /// Unique type identifier for JSON coverage export. #define LLVM_COVERAGE_EXPORT_JSON_TYPE_STR "llvm.coverage.json.export" @@ -108,13 +109,43 @@ json::Array gatherConditions(const coverage::MCDCRecord &Record) { return Conditions; } +json::Value renderCondState(const coverage::MCDCRecord::CondState CondState) { + switch (CondState) { + case coverage::MCDCRecord::MCDC_DontCare: + return json::Value(nullptr); + case coverage::MCDCRecord::MCDC_True: + return json::Value(true); + case coverage::MCDCRecord::MCDC_False: + return json::Value(false); + } + llvm_unreachable("Unknown llvm::coverage::MCDCRecord::CondState enum"); +} + +json::Array gatherTestVectors(coverage::MCDCRecord &Record) { + json::Array TestVectors; + unsigned NumConditions = Record.getNumConditions(); + for (unsigned tv = 0; tv < Record.getNumTestVectors(); tv++) { + + json::Array TVConditions; + for (unsigned c = 0; c < NumConditions; c++) + TVConditions.push_back(renderCondState(Record.getTVCondition(tv, c))); + + TestVectors.push_back( + json::Object({{"executed", json::Value(true)}, + {"result", renderCondState(Record.getTVResult(tv))}, + {"conditions", std::move(TVConditions)}})); + } + return TestVectors; +} + json::Array renderMCDCRecord(const coverage::MCDCRecord &Record) { const llvm::coverage::CounterMappingRegion &CMR = Record.getDecisionRegion(); const auto [TrueDecisions, FalseDecisions] = Record.getDecisions(); - return json::Array({CMR.LineStart, CMR.ColumnStart, CMR.LineEnd, - CMR.ColumnEnd, TrueDecisions, FalseDecisions, - CMR.FileID, CMR.ExpandedFileID, int64_t(CMR.Kind), - gatherConditions(Record)}); + return json::Array( + {CMR.LineStart, CMR.ColumnStart, CMR.LineEnd, CMR.ColumnEnd, + TrueDecisions, FalseDecisions, CMR.FileID, CMR.ExpandedFileID, + int64_t(CMR.Kind), gatherConditions(Record), + gatherTestVectors(const_cast(Record))}); } json::Array renderRegions(ArrayRef Regions) { @@ -216,32 +247,28 @@ json::Object renderSummary(const FileCoverageSummary &Summary) { } json::Array renderFileExpansions(const coverage::CoverageMapping &Coverage, - const coverage::CoverageData &FileCoverage, - const FileCoverageSummary &FileReport) { + const coverage::CoverageData &FileCoverage) { json::Array ExpansionArray; for (const auto &Expansion : FileCoverage.getExpansions()) ExpansionArray.push_back(renderExpansion(Coverage, Expansion)); return ExpansionArray; } -json::Array renderFileSegments(const coverage::CoverageData &FileCoverage, - const FileCoverageSummary &FileReport) { +json::Array renderFileSegments(const coverage::CoverageData &FileCoverage) { json::Array SegmentArray; for (const auto &Segment : FileCoverage) SegmentArray.push_back(renderSegment(Segment)); return SegmentArray; } -json::Array renderFileBranches(const coverage::CoverageData &FileCoverage, - const FileCoverageSummary &FileReport) { +json::Array renderFileBranches(const coverage::CoverageData &FileCoverage) { json::Array BranchArray; for (const auto &Branch : FileCoverage.getBranches()) BranchArray.push_back(renderBranch(Branch)); return BranchArray; } -json::Array renderFileMCDC(const coverage::CoverageData &FileCoverage, - const FileCoverageSummary &FileReport) { +json::Array renderFileMCDC(const coverage::CoverageData &FileCoverage) { json::Array MCDCRecordArray; for (const auto &Record : FileCoverage.getMCDCRecords()) MCDCRecordArray.push_back(renderMCDCRecord(Record)); @@ -256,12 +283,11 @@ json::Object renderFile(const coverage::CoverageMapping &Coverage, if (!Options.ExportSummaryOnly) { // Calculate and render detailed coverage information for given file. auto FileCoverage = Coverage.getCoverageForFile(Filename); - File["segments"] = renderFileSegments(FileCoverage, FileReport); - File["branches"] = renderFileBranches(FileCoverage, FileReport); - File["mcdc_records"] = renderFileMCDC(FileCoverage, FileReport); + File["segments"] = renderFileSegments(FileCoverage); + File["branches"] = renderFileBranches(FileCoverage); + File["mcdc_records"] = renderFileMCDC(FileCoverage); if (!Options.SkipExpansions) { - File["expansions"] = - renderFileExpansions(Coverage, FileCoverage, FileReport); + File["expansions"] = renderFileExpansions(Coverage, FileCoverage); } } File["summary"] = renderSummary(FileReport); diff --git a/llvm/tools/llvm-dwp/llvm-dwp.cpp b/llvm/tools/llvm-dwp/llvm-dwp.cpp index 61ba82d0634ac..31bad2d68982b 100644 --- a/llvm/tools/llvm-dwp/llvm-dwp.cpp +++ b/llvm/tools/llvm-dwp/llvm-dwp.cpp @@ -94,7 +94,7 @@ getDWOFilenames(StringRef ExecFilename) { dwarf::toString(Die.find(dwarf::DW_AT_comp_dir), ""); if (!DWOCompDir.empty()) { SmallString<16> DWOPath(DWOName); - sys::fs::make_absolute(DWOCompDir, DWOPath); + sys::path::make_absolute(DWOCompDir, DWOPath); if (!sys::fs::exists(DWOPath) && sys::fs::exists(DWOName)) DWOPaths.push_back(std::move(DWOName)); else diff --git a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp index aabebf0cc90a9..434449c7c5117 100644 --- a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp +++ b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp @@ -162,8 +162,8 @@ class IR2VecTool { for (const BasicBlock &BB : F) { for (const auto &I : BB.instructionsWithoutDebug()) { - unsigned Opcode = Vocabulary::getSlotIndex(I.getOpcode()); - unsigned TypeID = Vocabulary::getSlotIndex(I.getType()->getTypeID()); + unsigned Opcode = Vocabulary::getIndex(I.getOpcode()); + unsigned TypeID = Vocabulary::getIndex(I.getType()->getTypeID()); // Add "Next" relationship with previous instruction if (HasPrevOpcode) { @@ -184,7 +184,7 @@ class IR2VecTool { // Add "Arg" relationships unsigned ArgIndex = 0; for (const Use &U : I.operands()) { - unsigned OperandID = Vocabulary::getSlotIndex(*U); + unsigned OperandID = Vocabulary::getIndex(*U.get()); unsigned RelationID = ArgRelation + ArgIndex; OS << Opcode << '\t' << OperandID << '\t' << RelationID << '\n'; diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp index 31bf6a9d2d9c8..e09ddb45da6e9 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp +++ b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp @@ -1519,10 +1519,10 @@ class MemoryMatcher { static StringRef detectStubKind(const Session::MemoryRegionInfo &Stub) { using namespace support::endian; - auto Armv7MovWTle = byte_swap(0xe300c000); - auto Armv7BxR12le = byte_swap(0xe12fff1c); - auto Thumbv7MovWTle = byte_swap(0x0c00f240); - auto Thumbv7BxR12le = byte_swap(0x4760); + auto Armv7MovWTle = byte_swap(0xe300c000, endianness::little); + auto Armv7BxR12le = byte_swap(0xe12fff1c, endianness::little); + auto Thumbv7MovWTle = byte_swap(0x0c00f240, endianness::little); + auto Thumbv7BxR12le = byte_swap(0x4760, endianness::little); MemoryMatcher M(Stub.getContent()); if (M.matchMask(Thumbv7MovWTle)) { diff --git a/llvm/tools/llvm-mca/llvm-mca.cpp b/llvm/tools/llvm-mca/llvm-mca.cpp index a4194da4a7b63..a64539c09b81e 100644 --- a/llvm/tools/llvm-mca/llvm-mca.cpp +++ b/llvm/tools/llvm-mca/llvm-mca.cpp @@ -668,7 +668,7 @@ int main(int argc, char **argv) { return 1; } - IPP->postProcessInstruction(Inst.get(), MCI); + IPP->postProcessInstruction(*Inst.get(), MCI); InstToInstruments.insert({&MCI, Instruments}); LoweredSequence.emplace_back(std::move(Inst.get())); } diff --git a/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp b/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp index 9dac1646b1e26..d65b402571ae8 100644 --- a/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp +++ b/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp @@ -84,6 +84,10 @@ static Error wrapImages(ArrayRef> BuffersToWrap) { M, BuffersToWrap.front(), offloading::getOffloadEntryArray(M))) return Err; break; + case llvm::object::OFK_SYCL: + if (Error Err = offloading::wrapSYCLBinaries(M, BuffersToWrap.front())) + return Err; + break; default: return createStringError(getOffloadKindName(Kind) + " wrapping is not supported"); diff --git a/llvm/tools/llvm-opt-report/OptReport.cpp b/llvm/tools/llvm-opt-report/OptReport.cpp index 68ed92c8bacea..e4b4fc287b8c1 100644 --- a/llvm/tools/llvm-opt-report/OptReport.cpp +++ b/llvm/tools/llvm-opt-report/OptReport.cpp @@ -274,7 +274,7 @@ static bool writeReport(LocationInfoTy &LocationInfo) { for (auto &FI : LocationInfo) { SmallString<128> FileName(FI.first); if (!InputRelDir.empty()) - sys::fs::make_absolute(InputRelDir, FileName); + sys::path::make_absolute(InputRelDir, FileName); const auto &FileInfo = FI.second; diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp index 3092bfd42e25e..ab93316907cc6 100644 --- a/llvm/tools/llvm-readobj/ELFDumper.cpp +++ b/llvm/tools/llvm-readobj/ELFDumper.cpp @@ -1133,180 +1133,175 @@ const EnumEntry C6000ElfOSABI[] = { {"C6000_LINUX", "Linux C6000", ELF::ELFOSABI_C6000_LINUX} }; +// clang-format off const EnumEntry ElfMachineType[] = { - ENUM_ENT(EM_NONE, "None"), - ENUM_ENT(EM_M32, "WE32100"), - ENUM_ENT(EM_SPARC, "Sparc"), - ENUM_ENT(EM_386, "Intel 80386"), - ENUM_ENT(EM_68K, "MC68000"), - ENUM_ENT(EM_88K, "MC88000"), - ENUM_ENT(EM_IAMCU, "EM_IAMCU"), - ENUM_ENT(EM_860, "Intel 80860"), - ENUM_ENT(EM_MIPS, "MIPS R3000"), - ENUM_ENT(EM_S370, "IBM System/370"), - ENUM_ENT(EM_MIPS_RS3_LE, "MIPS R3000 little-endian"), - ENUM_ENT(EM_PARISC, "HPPA"), - ENUM_ENT(EM_VPP500, "Fujitsu VPP500"), - ENUM_ENT(EM_SPARC32PLUS, "Sparc v8+"), - ENUM_ENT(EM_960, "Intel 80960"), - ENUM_ENT(EM_PPC, "PowerPC"), - ENUM_ENT(EM_PPC64, "PowerPC64"), - ENUM_ENT(EM_S390, "IBM S/390"), - ENUM_ENT(EM_SPU, "SPU"), - ENUM_ENT(EM_V800, "NEC V800 series"), - ENUM_ENT(EM_FR20, "Fujistsu FR20"), - ENUM_ENT(EM_RH32, "TRW RH-32"), - ENUM_ENT(EM_RCE, "Motorola RCE"), - ENUM_ENT(EM_ARM, "ARM"), - ENUM_ENT(EM_ALPHA, "EM_ALPHA"), - ENUM_ENT(EM_SH, "Hitachi SH"), - ENUM_ENT(EM_SPARCV9, "Sparc v9"), - ENUM_ENT(EM_TRICORE, "Siemens Tricore"), - ENUM_ENT(EM_ARC, "ARC"), - ENUM_ENT(EM_H8_300, "Hitachi H8/300"), - ENUM_ENT(EM_H8_300H, "Hitachi H8/300H"), - ENUM_ENT(EM_H8S, "Hitachi H8S"), - ENUM_ENT(EM_H8_500, "Hitachi H8/500"), - ENUM_ENT(EM_IA_64, "Intel IA-64"), - ENUM_ENT(EM_MIPS_X, "Stanford MIPS-X"), - ENUM_ENT(EM_COLDFIRE, "Motorola Coldfire"), - ENUM_ENT(EM_68HC12, "Motorola MC68HC12 Microcontroller"), - ENUM_ENT(EM_MMA, "Fujitsu Multimedia Accelerator"), - ENUM_ENT(EM_PCP, "Siemens PCP"), - ENUM_ENT(EM_NCPU, "Sony nCPU embedded RISC processor"), - ENUM_ENT(EM_NDR1, "Denso NDR1 microprocesspr"), - ENUM_ENT(EM_STARCORE, "Motorola Star*Core processor"), - ENUM_ENT(EM_ME16, "Toyota ME16 processor"), - ENUM_ENT(EM_ST100, "STMicroelectronics ST100 processor"), - ENUM_ENT(EM_TINYJ, "Advanced Logic Corp. TinyJ embedded processor"), - ENUM_ENT(EM_X86_64, "Advanced Micro Devices X86-64"), - ENUM_ENT(EM_PDSP, "Sony DSP processor"), - ENUM_ENT(EM_PDP10, "Digital Equipment Corp. PDP-10"), - ENUM_ENT(EM_PDP11, "Digital Equipment Corp. PDP-11"), - ENUM_ENT(EM_FX66, "Siemens FX66 microcontroller"), - ENUM_ENT(EM_ST9PLUS, "STMicroelectronics ST9+ 8/16 bit microcontroller"), - ENUM_ENT(EM_ST7, "STMicroelectronics ST7 8-bit microcontroller"), - ENUM_ENT(EM_68HC16, "Motorola MC68HC16 Microcontroller"), - ENUM_ENT(EM_68HC11, "Motorola MC68HC11 Microcontroller"), - ENUM_ENT(EM_68HC08, "Motorola MC68HC08 Microcontroller"), - ENUM_ENT(EM_68HC05, "Motorola MC68HC05 Microcontroller"), - ENUM_ENT(EM_SVX, "Silicon Graphics SVx"), - ENUM_ENT(EM_ST19, "STMicroelectronics ST19 8-bit microcontroller"), - ENUM_ENT(EM_VAX, "Digital VAX"), - ENUM_ENT(EM_CRIS, "Axis Communications 32-bit embedded processor"), - ENUM_ENT(EM_JAVELIN, "Infineon Technologies 32-bit embedded cpu"), - ENUM_ENT(EM_FIREPATH, "Element 14 64-bit DSP processor"), - ENUM_ENT(EM_ZSP, "LSI Logic's 16-bit DSP processor"), - ENUM_ENT(EM_MMIX, "Donald Knuth's educational 64-bit processor"), - ENUM_ENT(EM_HUANY, - "Harvard Universitys's machine-independent object format"), - ENUM_ENT(EM_PRISM, "Vitesse Prism"), - ENUM_ENT(EM_AVR, "Atmel AVR 8-bit microcontroller"), - ENUM_ENT(EM_FR30, "Fujitsu FR30"), - ENUM_ENT(EM_D10V, "Mitsubishi D10V"), - ENUM_ENT(EM_D30V, "Mitsubishi D30V"), - ENUM_ENT(EM_V850, "NEC v850"), - ENUM_ENT(EM_M32R, "Renesas M32R (formerly Mitsubishi M32r)"), - ENUM_ENT(EM_MN10300, "Matsushita MN10300"), - ENUM_ENT(EM_MN10200, "Matsushita MN10200"), - ENUM_ENT(EM_PJ, "picoJava"), - ENUM_ENT(EM_OPENRISC, "OpenRISC 32-bit embedded processor"), - ENUM_ENT(EM_ARC_COMPACT, "EM_ARC_COMPACT"), - ENUM_ENT(EM_XTENSA, "Tensilica Xtensa Processor"), - ENUM_ENT(EM_VIDEOCORE, "Alphamosaic VideoCore processor"), - ENUM_ENT(EM_TMM_GPP, "Thompson Multimedia General Purpose Processor"), - ENUM_ENT(EM_NS32K, "National Semiconductor 32000 series"), - ENUM_ENT(EM_TPC, "Tenor Network TPC processor"), - ENUM_ENT(EM_SNP1K, "EM_SNP1K"), - ENUM_ENT(EM_ST200, "STMicroelectronics ST200 microcontroller"), - ENUM_ENT(EM_IP2K, "Ubicom IP2xxx 8-bit microcontrollers"), - ENUM_ENT(EM_MAX, "MAX Processor"), - ENUM_ENT(EM_CR, "National Semiconductor CompactRISC"), - ENUM_ENT(EM_F2MC16, "Fujitsu F2MC16"), - ENUM_ENT(EM_MSP430, "Texas Instruments msp430 microcontroller"), - ENUM_ENT(EM_BLACKFIN, "Analog Devices Blackfin"), - ENUM_ENT(EM_SE_C33, "S1C33 Family of Seiko Epson processors"), - ENUM_ENT(EM_SEP, "Sharp embedded microprocessor"), - ENUM_ENT(EM_ARCA, "Arca RISC microprocessor"), - ENUM_ENT(EM_UNICORE, "Unicore"), - ENUM_ENT(EM_EXCESS, "eXcess 16/32/64-bit configurable embedded CPU"), - ENUM_ENT(EM_DXP, "Icera Semiconductor Inc. Deep Execution Processor"), - ENUM_ENT(EM_ALTERA_NIOS2, "Altera Nios"), - ENUM_ENT(EM_CRX, "National Semiconductor CRX microprocessor"), - ENUM_ENT(EM_XGATE, "Motorola XGATE embedded processor"), - ENUM_ENT(EM_C166, "Infineon Technologies xc16x"), - ENUM_ENT(EM_M16C, "Renesas M16C"), - ENUM_ENT(EM_DSPIC30F, - "Microchip Technology dsPIC30F Digital Signal Controller"), - ENUM_ENT(EM_CE, "Freescale Communication Engine RISC core"), - ENUM_ENT(EM_M32C, "Renesas M32C"), - ENUM_ENT(EM_TSK3000, "Altium TSK3000 core"), - ENUM_ENT(EM_RS08, "Freescale RS08 embedded processor"), - ENUM_ENT(EM_SHARC, "EM_SHARC"), - ENUM_ENT(EM_ECOG2, "Cyan Technology eCOG2 microprocessor"), - ENUM_ENT(EM_SCORE7, "SUNPLUS S+Core"), - ENUM_ENT(EM_DSP24, "New Japan Radio (NJR) 24-bit DSP Processor"), - ENUM_ENT(EM_VIDEOCORE3, "Broadcom VideoCore III processor"), - ENUM_ENT(EM_LATTICEMICO32, "Lattice Mico32"), - ENUM_ENT(EM_SE_C17, "Seiko Epson C17 family"), - ENUM_ENT(EM_TI_C6000, "Texas Instruments TMS320C6000 DSP family"), - ENUM_ENT(EM_TI_C2000, "Texas Instruments TMS320C2000 DSP family"), - ENUM_ENT(EM_TI_C5500, "Texas Instruments TMS320C55x DSP family"), - ENUM_ENT(EM_MMDSP_PLUS, - "STMicroelectronics 64bit VLIW Data Signal Processor"), - ENUM_ENT(EM_CYPRESS_M8C, "Cypress M8C microprocessor"), - ENUM_ENT(EM_R32C, "Renesas R32C series microprocessors"), - ENUM_ENT(EM_TRIMEDIA, "NXP Semiconductors TriMedia architecture family"), - ENUM_ENT(EM_HEXAGON, "Qualcomm Hexagon"), - ENUM_ENT(EM_8051, "Intel 8051 and variants"), - ENUM_ENT(EM_STXP7X, "STMicroelectronics STxP7x family"), - ENUM_ENT( - EM_NDS32, - "Andes Technology compact code size embedded RISC processor family"), - ENUM_ENT(EM_ECOG1, "Cyan Technology eCOG1 microprocessor"), - // FIXME: Following EM_ECOG1X definitions is dead code since EM_ECOG1X has - // an identical number to EM_ECOG1. - ENUM_ENT(EM_ECOG1X, "Cyan Technology eCOG1X family"), - ENUM_ENT(EM_MAXQ30, "Dallas Semiconductor MAXQ30 Core microcontrollers"), - ENUM_ENT(EM_XIMO16, "New Japan Radio (NJR) 16-bit DSP Processor"), - ENUM_ENT(EM_MANIK, "M2000 Reconfigurable RISC Microprocessor"), - ENUM_ENT(EM_CRAYNV2, "Cray Inc. NV2 vector architecture"), - ENUM_ENT(EM_RX, "Renesas RX"), - ENUM_ENT(EM_METAG, "Imagination Technologies Meta processor architecture"), - ENUM_ENT(EM_MCST_ELBRUS, - "MCST Elbrus general purpose hardware architecture"), - ENUM_ENT(EM_ECOG16, "Cyan Technology eCOG16 family"), - ENUM_ENT(EM_CR16, "National Semiconductor CompactRISC 16-bit processor"), - ENUM_ENT(EM_ETPU, "Freescale Extended Time Processing Unit"), - ENUM_ENT(EM_SLE9X, "Infineon Technologies SLE9X core"), - ENUM_ENT(EM_L10M, "EM_L10M"), - ENUM_ENT(EM_K10M, "EM_K10M"), - ENUM_ENT(EM_AARCH64, "AArch64"), - ENUM_ENT(EM_AVR32, "Atmel Corporation 32-bit microprocessor family"), - ENUM_ENT(EM_STM8, "STMicroeletronics STM8 8-bit microcontroller"), - ENUM_ENT(EM_TILE64, "Tilera TILE64 multicore architecture family"), - ENUM_ENT(EM_TILEPRO, "Tilera TILEPro multicore architecture family"), - ENUM_ENT(EM_MICROBLAZE, - "Xilinx MicroBlaze 32-bit RISC soft processor core"), - ENUM_ENT(EM_CUDA, "NVIDIA CUDA architecture"), - ENUM_ENT(EM_TILEGX, "Tilera TILE-Gx multicore architecture family"), - ENUM_ENT(EM_CLOUDSHIELD, "EM_CLOUDSHIELD"), - ENUM_ENT(EM_COREA_1ST, "EM_COREA_1ST"), - ENUM_ENT(EM_COREA_2ND, "EM_COREA_2ND"), - ENUM_ENT(EM_ARC_COMPACT2, "EM_ARC_COMPACT2"), - ENUM_ENT(EM_OPEN8, "EM_OPEN8"), - ENUM_ENT(EM_RL78, "Renesas RL78"), - ENUM_ENT(EM_VIDEOCORE5, "Broadcom VideoCore V processor"), - ENUM_ENT(EM_78KOR, "EM_78KOR"), - ENUM_ENT(EM_56800EX, "EM_56800EX"), - ENUM_ENT(EM_AMDGPU, "EM_AMDGPU"), - ENUM_ENT(EM_RISCV, "RISC-V"), - ENUM_ENT(EM_LANAI, "EM_LANAI"), - ENUM_ENT(EM_BPF, "EM_BPF"), - ENUM_ENT(EM_VE, "NEC SX-Aurora Vector Engine"), - ENUM_ENT(EM_LOONGARCH, "LoongArch"), - ENUM_ENT(EM_INTELGT, "Intel Graphics Technology"), + ENUM_ENT(EM_NONE, "None"), + ENUM_ENT(EM_M32, "WE32100"), + ENUM_ENT(EM_SPARC, "Sparc"), + ENUM_ENT(EM_386, "Intel 80386"), + ENUM_ENT(EM_68K, "MC68000"), + ENUM_ENT(EM_88K, "MC88000"), + ENUM_ENT(EM_IAMCU, "EM_IAMCU"), + ENUM_ENT(EM_860, "Intel 80860"), + ENUM_ENT(EM_MIPS, "MIPS R3000"), + ENUM_ENT(EM_S370, "IBM System/370"), + ENUM_ENT(EM_MIPS_RS3_LE, "MIPS R3000 little-endian"), + ENUM_ENT(EM_PARISC, "HPPA"), + ENUM_ENT(EM_VPP500, "Fujitsu VPP500"), + ENUM_ENT(EM_SPARC32PLUS, "Sparc v8+"), + ENUM_ENT(EM_960, "Intel 80960"), + ENUM_ENT(EM_PPC, "PowerPC"), + ENUM_ENT(EM_PPC64, "PowerPC64"), + ENUM_ENT(EM_S390, "IBM S/390"), + ENUM_ENT(EM_SPU, "SPU"), + ENUM_ENT(EM_V800, "NEC V800 series"), + ENUM_ENT(EM_FR20, "Fujistsu FR20"), + ENUM_ENT(EM_RH32, "TRW RH-32"), + ENUM_ENT(EM_RCE, "Motorola RCE"), + ENUM_ENT(EM_ARM, "ARM"), + ENUM_ENT(EM_ALPHA, "EM_ALPHA"), + ENUM_ENT(EM_SH, "Hitachi SH"), + ENUM_ENT(EM_SPARCV9, "Sparc v9"), + ENUM_ENT(EM_TRICORE, "Siemens Tricore"), + ENUM_ENT(EM_ARC, "ARC"), + ENUM_ENT(EM_H8_300, "Hitachi H8/300"), + ENUM_ENT(EM_H8_300H, "Hitachi H8/300H"), + ENUM_ENT(EM_H8S, "Hitachi H8S"), + ENUM_ENT(EM_H8_500, "Hitachi H8/500"), + ENUM_ENT(EM_IA_64, "Intel IA-64"), + ENUM_ENT(EM_MIPS_X, "Stanford MIPS-X"), + ENUM_ENT(EM_COLDFIRE, "Motorola Coldfire"), + ENUM_ENT(EM_68HC12, "Motorola MC68HC12 Microcontroller"), + ENUM_ENT(EM_MMA, "Fujitsu Multimedia Accelerator"), + ENUM_ENT(EM_PCP, "Siemens PCP"), + ENUM_ENT(EM_NCPU, "Sony nCPU embedded RISC processor"), + ENUM_ENT(EM_NDR1, "Denso NDR1 microprocesspr"), + ENUM_ENT(EM_STARCORE, "Motorola Star*Core processor"), + ENUM_ENT(EM_ME16, "Toyota ME16 processor"), + ENUM_ENT(EM_ST100, "STMicroelectronics ST100 processor"), + ENUM_ENT(EM_TINYJ, "Advanced Logic Corp. TinyJ embedded processor"), + ENUM_ENT(EM_X86_64, "Advanced Micro Devices X86-64"), + ENUM_ENT(EM_PDSP, "Sony DSP processor"), + ENUM_ENT(EM_PDP10, "Digital Equipment Corp. PDP-10"), + ENUM_ENT(EM_PDP11, "Digital Equipment Corp. PDP-11"), + ENUM_ENT(EM_FX66, "Siemens FX66 microcontroller"), + ENUM_ENT(EM_ST9PLUS, "STMicroelectronics ST9+ 8/16 bit microcontroller"), + ENUM_ENT(EM_ST7, "STMicroelectronics ST7 8-bit microcontroller"), + ENUM_ENT(EM_68HC16, "Motorola MC68HC16 Microcontroller"), + ENUM_ENT(EM_68HC11, "Motorola MC68HC11 Microcontroller"), + ENUM_ENT(EM_68HC08, "Motorola MC68HC08 Microcontroller"), + ENUM_ENT(EM_68HC05, "Motorola MC68HC05 Microcontroller"), + ENUM_ENT(EM_SVX, "Silicon Graphics SVx"), + ENUM_ENT(EM_ST19, "STMicroelectronics ST19 8-bit microcontroller"), + ENUM_ENT(EM_VAX, "Digital VAX"), + ENUM_ENT(EM_CRIS, "Axis Communications 32-bit embedded processor"), + ENUM_ENT(EM_JAVELIN, "Infineon Technologies 32-bit embedded cpu"), + ENUM_ENT(EM_FIREPATH, "Element 14 64-bit DSP processor"), + ENUM_ENT(EM_ZSP, "LSI Logic's 16-bit DSP processor"), + ENUM_ENT(EM_MMIX, "Donald Knuth's educational 64-bit processor"), + ENUM_ENT(EM_HUANY, "Harvard Universitys's machine-independent object format"), + ENUM_ENT(EM_PRISM, "Vitesse Prism"), + ENUM_ENT(EM_AVR, "Atmel AVR 8-bit microcontroller"), + ENUM_ENT(EM_FR30, "Fujitsu FR30"), + ENUM_ENT(EM_D10V, "Mitsubishi D10V"), + ENUM_ENT(EM_D30V, "Mitsubishi D30V"), + ENUM_ENT(EM_V850, "NEC v850"), + ENUM_ENT(EM_M32R, "Renesas M32R (formerly Mitsubishi M32r)"), + ENUM_ENT(EM_MN10300, "Matsushita MN10300"), + ENUM_ENT(EM_MN10200, "Matsushita MN10200"), + ENUM_ENT(EM_PJ, "picoJava"), + ENUM_ENT(EM_OPENRISC, "OpenRISC 32-bit embedded processor"), + ENUM_ENT(EM_ARC_COMPACT, "EM_ARC_COMPACT"), + ENUM_ENT(EM_XTENSA, "Tensilica Xtensa Processor"), + ENUM_ENT(EM_VIDEOCORE, "Alphamosaic VideoCore processor"), + ENUM_ENT(EM_TMM_GPP, "Thompson Multimedia General Purpose Processor"), + ENUM_ENT(EM_NS32K, "National Semiconductor 32000 series"), + ENUM_ENT(EM_TPC, "Tenor Network TPC processor"), + ENUM_ENT(EM_SNP1K, "EM_SNP1K"), + ENUM_ENT(EM_ST200, "STMicroelectronics ST200 microcontroller"), + ENUM_ENT(EM_IP2K, "Ubicom IP2xxx 8-bit microcontrollers"), + ENUM_ENT(EM_MAX, "MAX Processor"), + ENUM_ENT(EM_CR, "National Semiconductor CompactRISC"), + ENUM_ENT(EM_F2MC16, "Fujitsu F2MC16"), + ENUM_ENT(EM_MSP430, "Texas Instruments msp430 microcontroller"), + ENUM_ENT(EM_BLACKFIN, "Analog Devices Blackfin"), + ENUM_ENT(EM_SE_C33, "S1C33 Family of Seiko Epson processors"), + ENUM_ENT(EM_SEP, "Sharp embedded microprocessor"), + ENUM_ENT(EM_ARCA, "Arca RISC microprocessor"), + ENUM_ENT(EM_UNICORE, "Unicore"), + ENUM_ENT(EM_EXCESS, "eXcess 16/32/64-bit configurable embedded CPU"), + ENUM_ENT(EM_DXP, "Icera Semiconductor Inc. Deep Execution Processor"), + ENUM_ENT(EM_ALTERA_NIOS2, "Altera Nios"), + ENUM_ENT(EM_CRX, "National Semiconductor CRX microprocessor"), + ENUM_ENT(EM_XGATE, "Motorola XGATE embedded processor"), + ENUM_ENT(EM_C166, "Infineon Technologies xc16x"), + ENUM_ENT(EM_M16C, "Renesas M16C"), + ENUM_ENT(EM_DSPIC30F, "Microchip Technology dsPIC30F Digital Signal Controller"), + ENUM_ENT(EM_CE, "Freescale Communication Engine RISC core"), + ENUM_ENT(EM_M32C, "Renesas M32C"), + ENUM_ENT(EM_TSK3000, "Altium TSK3000 core"), + ENUM_ENT(EM_RS08, "Freescale RS08 embedded processor"), + ENUM_ENT(EM_SHARC, "EM_SHARC"), + ENUM_ENT(EM_ECOG2, "Cyan Technology eCOG2 microprocessor"), + ENUM_ENT(EM_SCORE7, "SUNPLUS S+Core"), + ENUM_ENT(EM_DSP24, "New Japan Radio (NJR) 24-bit DSP Processor"), + ENUM_ENT(EM_VIDEOCORE3, "Broadcom VideoCore III processor"), + ENUM_ENT(EM_LATTICEMICO32, "Lattice Mico32"), + ENUM_ENT(EM_SE_C17, "Seiko Epson C17 family"), + ENUM_ENT(EM_TI_C6000, "Texas Instruments TMS320C6000 DSP family"), + ENUM_ENT(EM_TI_C2000, "Texas Instruments TMS320C2000 DSP family"), + ENUM_ENT(EM_TI_C5500, "Texas Instruments TMS320C55x DSP family"), + ENUM_ENT(EM_MMDSP_PLUS, "STMicroelectronics 64bit VLIW Data Signal Processor"), + ENUM_ENT(EM_CYPRESS_M8C, "Cypress M8C microprocessor"), + ENUM_ENT(EM_R32C, "Renesas R32C series microprocessors"), + ENUM_ENT(EM_TRIMEDIA, "NXP Semiconductors TriMedia architecture family"), + ENUM_ENT(EM_HEXAGON, "Qualcomm Hexagon"), + ENUM_ENT(EM_8051, "Intel 8051 and variants"), + ENUM_ENT(EM_STXP7X, "STMicroelectronics STxP7x family"), + ENUM_ENT(EM_NDS32, "Andes Technology compact code size embedded RISC processor family"), + ENUM_ENT(EM_ECOG1, "Cyan Technology eCOG1 microprocessor"), + // FIXME: Following EM_ECOG1X definitions is dead code since EM_ECOG1X has + // an identical number to EM_ECOG1. + ENUM_ENT(EM_ECOG1X, "Cyan Technology eCOG1X family"), + ENUM_ENT(EM_MAXQ30, "Dallas Semiconductor MAXQ30 Core microcontrollers"), + ENUM_ENT(EM_XIMO16, "New Japan Radio (NJR) 16-bit DSP Processor"), + ENUM_ENT(EM_MANIK, "M2000 Reconfigurable RISC Microprocessor"), + ENUM_ENT(EM_CRAYNV2, "Cray Inc. NV2 vector architecture"), + ENUM_ENT(EM_RX, "Renesas RX"), + ENUM_ENT(EM_METAG, "Imagination Technologies Meta processor architecture"), + ENUM_ENT(EM_MCST_ELBRUS, "MCST Elbrus general purpose hardware architecture"), + ENUM_ENT(EM_ECOG16, "Cyan Technology eCOG16 family"), + ENUM_ENT(EM_CR16, "National Semiconductor CompactRISC 16-bit processor"), + ENUM_ENT(EM_ETPU, "Freescale Extended Time Processing Unit"), + ENUM_ENT(EM_SLE9X, "Infineon Technologies SLE9X core"), + ENUM_ENT(EM_L10M, "EM_L10M"), + ENUM_ENT(EM_K10M, "EM_K10M"), + ENUM_ENT(EM_AARCH64, "AArch64"), + ENUM_ENT(EM_AVR32, "Atmel Corporation 32-bit microprocessor family"), + ENUM_ENT(EM_STM8, "STMicroeletronics STM8 8-bit microcontroller"), + ENUM_ENT(EM_TILE64, "Tilera TILE64 multicore architecture family"), + ENUM_ENT(EM_TILEPRO, "Tilera TILEPro multicore architecture family"), + ENUM_ENT(EM_MICROBLAZE, "Xilinx MicroBlaze 32-bit RISC soft processor core"), + ENUM_ENT(EM_CUDA, "NVIDIA CUDA architecture"), + ENUM_ENT(EM_TILEGX, "Tilera TILE-Gx multicore architecture family"), + ENUM_ENT(EM_CLOUDSHIELD, "EM_CLOUDSHIELD"), + ENUM_ENT(EM_COREA_1ST, "EM_COREA_1ST"), + ENUM_ENT(EM_COREA_2ND, "EM_COREA_2ND"), + ENUM_ENT(EM_ARC_COMPACT2, "EM_ARC_COMPACT2"), + ENUM_ENT(EM_OPEN8, "EM_OPEN8"), + ENUM_ENT(EM_RL78, "Renesas RL78"), + ENUM_ENT(EM_VIDEOCORE5, "Broadcom VideoCore V processor"), + ENUM_ENT(EM_78KOR, "EM_78KOR"), + ENUM_ENT(EM_56800EX, "EM_56800EX"), + ENUM_ENT(EM_AMDGPU, "EM_AMDGPU"), + ENUM_ENT(EM_RISCV, "RISC-V"), + ENUM_ENT(EM_LANAI, "EM_LANAI"), + ENUM_ENT(EM_BPF, "EM_BPF"), + ENUM_ENT(EM_VE, "NEC SX-Aurora Vector Engine"), + ENUM_ENT(EM_LOONGARCH, "LoongArch"), + ENUM_ENT(EM_INTELGT, "Intel Graphics Technology"), }; +// clang-format on const EnumEntry ElfSymbolBindings[] = { {"Local", "LOCAL", ELF::STB_LOCAL}, diff --git a/llvm/tools/llvm-readobj/ObjDumper.cpp b/llvm/tools/llvm-readobj/ObjDumper.cpp index bd670aeab9ed8..0b59dd48d4203 100644 --- a/llvm/tools/llvm-readobj/ObjDumper.cpp +++ b/llvm/tools/llvm-readobj/ObjDumper.cpp @@ -16,6 +16,8 @@ #include "llvm/Object/Archive.h" #include "llvm/Object/Decompressor.h" #include "llvm/Object/ObjectFile.h" +#include "llvm/Object/OffloadBinary.h" +#include "llvm/Object/OffloadBundle.h" #include "llvm/Support/Error.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/ScopedPrinter.h" @@ -230,4 +232,14 @@ void ObjDumper::printSectionsAsHex(const object::ObjectFile &Obj, } } +void ObjDumper::printOffloading(const object::ObjectFile &Obj) { + SmallVector Bundles; + if (Error Err = object::extractOffloadBundleFatBinary(Obj, Bundles)) + reportWarning(std::move(Err), Obj.getFileName()); + + // Print out all the FatBin Bundles that are contained in this buffer. + for (const auto &[Index, Bundle] : llvm::enumerate(Bundles)) + Bundle.printEntriesAsURI(); +} + } // namespace llvm diff --git a/llvm/tools/llvm-readobj/ObjDumper.h b/llvm/tools/llvm-readobj/ObjDumper.h index a654078a770ff..d26439435a82b 100644 --- a/llvm/tools/llvm-readobj/ObjDumper.h +++ b/llvm/tools/llvm-readobj/ObjDumper.h @@ -16,6 +16,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Object/ObjectFile.h" +#include "llvm/Object/OffloadBinary.h" #include "llvm/Support/CommandLine.h" #include @@ -188,6 +189,7 @@ class ObjDumper { std::function WarningHandler; void reportUniqueWarning(Error Err) const; void reportUniqueWarning(const Twine &Msg) const; + void printOffloading(const object::ObjectFile &Obj); protected: ScopedPrinter &W; diff --git a/llvm/tools/llvm-readobj/Opts.td b/llvm/tools/llvm-readobj/Opts.td index 711522c4acb14..97d5d7f96dc32 100644 --- a/llvm/tools/llvm-readobj/Opts.td +++ b/llvm/tools/llvm-readobj/Opts.td @@ -32,6 +32,7 @@ def file_header : FF<"file-header", "Display file header">; def headers : FF<"headers", "Equivalent to setting: --file-header, --program-headers, --section-headers">; defm hex_dump : Eq<"hex-dump", "Display the specified section(s) as hexadecimal bytes">, MetaVarName<"">; def pretty_print : FF<"pretty-print", "Pretty print JSON output">; +def offloading : FF<"offloading", "Display the content of the offloading section">; def relocs : FF<"relocs", "Display the relocation entries in the file">; def section_data : FF<"section-data", "Display section data for each section shown. This option has no effect for GNU style output">; def section_details : FF<"section-details", "Display the section details">; diff --git a/llvm/tools/llvm-readobj/llvm-readobj.cpp b/llvm/tools/llvm-readobj/llvm-readobj.cpp index 2b34761b2cc6c..5327731805010 100644 --- a/llvm/tools/llvm-readobj/llvm-readobj.cpp +++ b/llvm/tools/llvm-readobj/llvm-readobj.cpp @@ -135,6 +135,7 @@ static bool HashHistogram; static bool Memtag; static bool NeededLibraries; static bool Notes; +static bool Offloading; static bool ProgramHeaders; static bool SectionGroups; static std::vector SFrame; @@ -274,6 +275,7 @@ static void parseOptions(const opt::InputArgList &Args) { opts::Memtag = Args.hasArg(OPT_memtag); opts::NeededLibraries = Args.hasArg(OPT_needed_libs); opts::Notes = Args.hasArg(OPT_notes); + opts::Offloading = Args.hasArg(OPT_offloading); opts::PrettyPrint = Args.hasArg(OPT_pretty_print); opts::ProgramHeaders = Args.hasArg(OPT_program_headers); opts::SectionGroups = Args.hasArg(OPT_section_groups); @@ -459,6 +461,8 @@ static void dumpObject(ObjectFile &Obj, ScopedPrinter &Writer, Dumper->printGnuHashTable(); if (opts::VersionInfo) Dumper->printVersionInfo(); + if (opts::Offloading) + Dumper->printOffloading(Obj); if (opts::StringTable) Dumper->printStringTable(); if (Obj.isELF()) { @@ -707,6 +711,7 @@ int llvm_readobj_main(int argc, char **argv, const llvm::ToolContext &) { opts::DynamicTable = true; opts::Notes = true; opts::VersionInfo = true; + opts::Offloading = true; opts::UnwindInfo = true; opts::SectionGroups = true; opts::HashHistogram = true; diff --git a/llvm/tools/llvm-remarkutil/CMakeLists.txt b/llvm/tools/llvm-remarkutil/CMakeLists.txt index ed398ad272024..c6e9334d87c04 100644 --- a/llvm/tools/llvm-remarkutil/CMakeLists.txt +++ b/llvm/tools/llvm-remarkutil/CMakeLists.txt @@ -8,6 +8,7 @@ add_llvm_tool(llvm-remarkutil RemarkConvert.cpp RemarkCount.cpp RemarkCounter.cpp + RemarkFilter.cpp RemarkInstructionMix.cpp RemarkSizeDiff.cpp RemarkUtil.cpp diff --git a/llvm/tools/llvm-remarkutil/RemarkCounter.cpp b/llvm/tools/llvm-remarkutil/RemarkCounter.cpp index 7d5c84815b3bb..2e842c8c2d72e 100644 --- a/llvm/tools/llvm-remarkutil/RemarkCounter.cpp +++ b/llvm/tools/llvm-remarkutil/RemarkCounter.cpp @@ -25,6 +25,9 @@ static cl::SubCommand CountSub("count", INPUT_FORMAT_COMMAND_LINE_OPTIONS(CountSub) INPUT_OUTPUT_COMMAND_LINE_OPTIONS(CountSub) +REMARK_FILTER_COMMAND_LINE_OPTIONS(CountSub) + +REMARK_FILTER_SETUP_FUNC() static cl::list Keys("args", cl::desc("Specify remark argument/s to count by."), @@ -34,45 +37,7 @@ static cl::list RKeys( cl::desc( "Specify remark argument/s to count (accepts regular expressions)."), cl::value_desc("arguments"), cl::sub(CountSub), cl::ValueOptional); -static cl::opt - RemarkNameOpt("remark-name", - cl::desc("Optional remark name to filter collection by."), - cl::ValueOptional, cl::sub(CountSub)); -static cl::opt - PassNameOpt("pass-name", cl::ValueOptional, - cl::desc("Optional remark pass name to filter collection by."), - cl::sub(CountSub)); -static cl::opt RemarkFilterArgByOpt( - "filter-arg-by", cl::desc("Optional remark arg to filter collection by."), - cl::ValueOptional, cl::sub(CountSub)); -static cl::opt - RemarkNameOptRE("rremark-name", - cl::desc("Optional remark name to filter collection by " - "(accepts regular expressions)."), - cl::ValueOptional, cl::sub(CountSub)); -static cl::opt - RemarkArgFilterOptRE("rfilter-arg-by", - cl::desc("Optional remark arg to filter collection by " - "(accepts regular expressions)."), - cl::sub(CountSub), cl::ValueOptional); -static cl::opt - PassNameOptRE("rpass-name", cl::ValueOptional, - cl::desc("Optional remark pass name to filter collection " - "by (accepts regular expressions)."), - cl::sub(CountSub)); -static cl::opt RemarkTypeOpt( - "remark-type", cl::desc("Optional remark type to filter collection by."), - cl::values(clEnumValN(Type::Unknown, "unknown", "UNKOWN"), - clEnumValN(Type::Passed, "passed", "PASSED"), - clEnumValN(Type::Missed, "missed", "MISSED"), - clEnumValN(Type::Analysis, "analysis", "ANALYSIS"), - clEnumValN(Type::AnalysisFPCommute, "analysis-fp-commute", - "ANALYSIS_FP_COMMUTE"), - clEnumValN(Type::AnalysisAliasing, "analysis-aliasing", - "ANALYSIS_ALIASING"), - clEnumValN(Type::Failure, "failure", "FAILURE")), - cl::init(Type::Failure), cl::sub(CountSub)); static cl::opt CountByOpt( "count-by", cl::desc("Specify the property to collect remarks by."), cl::values( @@ -112,21 +77,6 @@ static unsigned getValForKey(StringRef Key, const Remark &Remark) { return *RemarkArg->getValAsInt(); } -bool Filters::filterRemark(const Remark &Remark) { - if (RemarkNameFilter && !RemarkNameFilter->match(Remark.RemarkName)) - return false; - if (PassNameFilter && !PassNameFilter->match(Remark.PassName)) - return false; - if (RemarkTypeFilter) - return *RemarkTypeFilter == Remark.RemarkType; - if (ArgFilter) { - if (!any_of(Remark.Args, - [this](Argument Arg) { return ArgFilter->match(Arg.Val); })) - return false; - } - return true; -} - Error ArgumentCounter::getAllMatchingArgumentsInRemark( StringRef Buffer, ArrayRef Arguments, Filters &Filter) { auto MaybeParser = createRemarkParser(InputFormat, Buffer); @@ -223,33 +173,6 @@ Error RemarkCounter::print(StringRef OutputFileName) { return Error::success(); } -Expected getRemarkFilter() { - // Create Filter properties. - auto MaybeRemarkNameFilter = - FilterMatcher::createExactOrRE(RemarkNameOpt, RemarkNameOptRE); - if (!MaybeRemarkNameFilter) - return MaybeRemarkNameFilter.takeError(); - - auto MaybePassNameFilter = - FilterMatcher::createExactOrRE(PassNameOpt, PassNameOptRE); - if (!MaybePassNameFilter) - return MaybePassNameFilter.takeError(); - - auto MaybeRemarkArgFilter = FilterMatcher::createExactOrRE( - RemarkFilterArgByOpt, RemarkArgFilterOptRE); - if (!MaybeRemarkArgFilter) - return MaybeRemarkArgFilter.takeError(); - - std::optional RemarkType; - if (RemarkTypeOpt != Type::Failure) - RemarkType = RemarkTypeOpt; - - // Create RemarkFilter. - return Filters{std::move(*MaybeRemarkNameFilter), - std::move(*MaybePassNameFilter), - std::move(*MaybeRemarkArgFilter), RemarkType}; -} - Error useCollectRemark(StringRef Buffer, Counter &Counter, Filters &Filter) { // Create Parser. auto MaybeParser = createRemarkParser(InputFormat, Buffer); @@ -278,7 +201,7 @@ static Error collectRemarks() { if (!MaybeBuf) return MaybeBuf.takeError(); StringRef Buffer = (*MaybeBuf)->getBuffer(); - auto MaybeFilter = getRemarkFilter(); + auto MaybeFilter = getRemarkFilters(); if (!MaybeFilter) return MaybeFilter.takeError(); auto &Filter = *MaybeFilter; diff --git a/llvm/tools/llvm-remarkutil/RemarkCounter.h b/llvm/tools/llvm-remarkutil/RemarkCounter.h index 3b977791d87c2..69e552e3742ec 100644 --- a/llvm/tools/llvm-remarkutil/RemarkCounter.h +++ b/llvm/tools/llvm-remarkutil/RemarkCounter.h @@ -14,6 +14,7 @@ #include "RemarkUtilHelpers.h" #include "llvm/ADT/MapVector.h" #include "llvm/Support/Regex.h" +#include namespace llvm { namespace remarks { @@ -45,18 +46,6 @@ inline std::string groupByToStr(GroupBy GroupBy) { } } -/// Filter out remarks based on remark properties based on name, pass name, -/// argument and type. -struct Filters { - std::optional RemarkNameFilter; - std::optional PassNameFilter; - std::optional ArgFilter; - std::optional RemarkTypeFilter; - - /// Returns true if \p Remark satisfies all the provided filters. - bool filterRemark(const Remark &Remark); -}; - /// Abstract counter class used to define the general required methods for /// counting a remark. struct Counter { diff --git a/llvm/tools/llvm-remarkutil/RemarkFilter.cpp b/llvm/tools/llvm-remarkutil/RemarkFilter.cpp new file mode 100644 index 0000000000000..acfef6608677c --- /dev/null +++ b/llvm/tools/llvm-remarkutil/RemarkFilter.cpp @@ -0,0 +1,84 @@ +//===- RemarkFilter.cpp ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Generic tool to filter remarks +// +//===----------------------------------------------------------------------===// + +#include "RemarkUtilHelpers.h" +#include "RemarkUtilRegistry.h" + +#include "llvm/Support/Error.h" +#include "llvm/Support/Regex.h" + +using namespace llvm; +using namespace remarks; +using namespace llvm::remarkutil; + +namespace filter { + +static cl::SubCommand FilterSub("filter", + "Filter remarks based on specified criteria."); + +INPUT_FORMAT_COMMAND_LINE_OPTIONS(FilterSub) +OUTPUT_FORMAT_COMMAND_LINE_OPTIONS(FilterSub) +INPUT_OUTPUT_COMMAND_LINE_OPTIONS(FilterSub) +REMARK_FILTER_COMMAND_LINE_OPTIONS(FilterSub) + +REMARK_FILTER_SETUP_FUNC() + +static Error tryFilter() { + auto MaybeFilter = getRemarkFilters(); + if (!MaybeFilter) + return MaybeFilter.takeError(); + Filters &Filter = *MaybeFilter; + + auto MaybeBuf = getInputMemoryBuffer(InputFileName); + if (!MaybeBuf) + return MaybeBuf.takeError(); + auto MaybeParser = createRemarkParser(InputFormat, (*MaybeBuf)->getBuffer()); + if (!MaybeParser) + return MaybeParser.takeError(); + auto &Parser = **MaybeParser; + + Format SerializerFormat = OutputFormat; + if (SerializerFormat == Format::Auto) { + SerializerFormat = Parser.ParserFormat; + if (OutputFileName.empty() || OutputFileName == "-") + SerializerFormat = Format::YAML; + } + + auto MaybeOF = getOutputFileForRemarks(OutputFileName, SerializerFormat); + if (!MaybeOF) + return MaybeOF.takeError(); + auto OF = std::move(*MaybeOF); + + auto MaybeSerializer = createRemarkSerializer(SerializerFormat, OF->os()); + if (!MaybeSerializer) + return MaybeSerializer.takeError(); + auto &Serializer = **MaybeSerializer; + + auto MaybeRemark = Parser.next(); + for (; MaybeRemark; MaybeRemark = Parser.next()) { + Remark &Remark = **MaybeRemark; + if (!Filter.filterRemark(Remark)) + continue; + Serializer.emit(Remark); + } + + auto E = MaybeRemark.takeError(); + if (!E.isA()) + return E; + consumeError(std::move(E)); + OF->keep(); + return Error::success(); +} + +static CommandRegistration FilterReg(&FilterSub, tryFilter); + +} // namespace filter diff --git a/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.cpp b/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.cpp index ad6c46eceb8f2..be529480e7d24 100644 --- a/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.cpp +++ b/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.cpp @@ -92,5 +92,22 @@ FilterMatcher::createExactOrRE(const llvm::cl::opt &ExactArg, return std::nullopt; } +bool Filters::filterRemark(const Remark &Remark) { + if (FunctionFilter && !FunctionFilter->match(Remark.FunctionName)) + return false; + if (RemarkNameFilter && !RemarkNameFilter->match(Remark.RemarkName)) + return false; + if (PassNameFilter && !PassNameFilter->match(Remark.PassName)) + return false; + if (RemarkTypeFilter) + return *RemarkTypeFilter == Remark.RemarkType; + if (ArgFilter) { + if (!any_of(Remark.Args, + [this](Argument Arg) { return ArgFilter->match(Arg.Val); })) + return false; + } + return true; +} + } // namespace remarks } // namespace llvm diff --git a/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.h b/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.h index 894ac8354e18b..0dd550765c1c6 100644 --- a/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.h +++ b/llvm/tools/llvm-remarkutil/RemarkUtilHelpers.h @@ -9,12 +9,11 @@ // Helpers for remark utilites // //===----------------------------------------------------------------------===// -#include "llvm-c/Remarks.h" #include "llvm/ADT/StringRef.h" #include "llvm/Remarks/Remark.h" #include "llvm/Remarks/RemarkFormat.h" #include "llvm/Remarks/RemarkParser.h" -#include "llvm/Remarks/YAMLRemarkSerializer.h" +#include "llvm/Remarks/RemarkSerializer.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Error.h" #include "llvm/Support/FileSystem.h" @@ -43,6 +42,16 @@ clEnumValN(Format::Bitstream, "bitstream", "Bitstream")), \ cl::sub(SUBOPT)); +#define OUTPUT_FORMAT_COMMAND_LINE_OPTIONS(SUBOPT) \ + static cl::opt OutputFormat( \ + "serializer", cl::init(Format::Auto), \ + cl::desc("Output remark format to serialize"), \ + cl::values(clEnumValN(Format::Auto, "auto", \ + "Follow the parser format (default)"), \ + clEnumValN(Format::YAML, "yaml", "YAML"), \ + clEnumValN(Format::Bitstream, "bitstream", "Bitstream")), \ + cl::sub(SUBOPT)); + #define DEBUG_LOC_INFO_COMMAND_LINE_OPTIONS(SUBOPT) \ static cl::opt UseDebugLoc( \ "use-debug-loc", \ @@ -52,6 +61,87 @@ "number)"), \ cl::init(false), cl::sub(SUBOPT)); +#define REMARK_FILTER_COMMAND_LINE_OPTIONS(SUBOPT) \ + static cl::opt FunctionOpt( \ + "function", cl::sub(SUBOPT), cl::ValueOptional, \ + cl::desc("Optional function name to filter collection by.")); \ + static cl::opt FunctionOptRE( \ + "rfunction", cl::sub(SUBOPT), cl::ValueOptional, \ + cl::desc("Optional function name to filter collection by " \ + "(accepts regular expressions).")); \ + static cl::opt RemarkNameOpt( \ + "remark-name", \ + cl::desc("Optional remark name to filter collection by."), \ + cl::ValueOptional, cl::sub(SUBOPT)); \ + static cl::opt RemarkNameOptRE( \ + "rremark-name", \ + cl::desc("Optional remark name to filter collection by " \ + "(accepts regular expressions)."), \ + cl::ValueOptional, cl::sub(SUBOPT)); \ + static cl::opt PassNameOpt( \ + "pass-name", cl::ValueOptional, \ + cl::desc("Optional remark pass name to filter collection by."), \ + cl::sub(SUBOPT)); \ + static cl::opt PassNameOptRE( \ + "rpass-name", cl::ValueOptional, \ + cl::desc("Optional remark pass name to filter collection " \ + "by (accepts regular expressions)."), \ + cl::sub(SUBOPT)); \ + static cl::opt RemarkTypeOpt( \ + "remark-type", \ + cl::desc("Optional remark type to filter collection by."), \ + cl::values(clEnumValN(Type::Unknown, "unknown", "UNKOWN"), \ + clEnumValN(Type::Passed, "passed", "PASSED"), \ + clEnumValN(Type::Missed, "missed", "MISSED"), \ + clEnumValN(Type::Analysis, "analysis", "ANALYSIS"), \ + clEnumValN(Type::AnalysisFPCommute, "analysis-fp-commute", \ + "ANALYSIS_FP_COMMUTE"), \ + clEnumValN(Type::AnalysisAliasing, "analysis-aliasing", \ + "ANALYSIS_ALIASING"), \ + clEnumValN(Type::Failure, "failure", "FAILURE")), \ + cl::sub(SUBOPT)); \ + static cl::opt RemarkFilterArgByOpt( \ + "filter-arg-by", \ + cl::desc("Optional remark arg to filter collection by."), \ + cl::ValueOptional, cl::sub(SUBOPT)); \ + static cl::opt RemarkArgFilterOptRE( \ + "rfilter-arg-by", \ + cl::desc("Optional remark arg to filter collection by " \ + "(accepts regular expressions)."), \ + cl::sub(SUBOPT), cl::ValueOptional); + +#define REMARK_FILTER_SETUP_FUNC() \ + static Expected getRemarkFilters() { \ + auto MaybeFunctionFilter = \ + FilterMatcher::createExactOrRE(FunctionOpt, FunctionOptRE); \ + if (!MaybeFunctionFilter) \ + return MaybeFunctionFilter.takeError(); \ + \ + auto MaybeRemarkNameFilter = \ + FilterMatcher::createExactOrRE(RemarkNameOpt, RemarkNameOptRE); \ + if (!MaybeRemarkNameFilter) \ + return MaybeRemarkNameFilter.takeError(); \ + \ + auto MaybePassNameFilter = \ + FilterMatcher::createExactOrRE(PassNameOpt, PassNameOptRE); \ + if (!MaybePassNameFilter) \ + return MaybePassNameFilter.takeError(); \ + \ + auto MaybeRemarkArgFilter = FilterMatcher::createExactOrRE( \ + RemarkFilterArgByOpt, RemarkArgFilterOptRE); \ + if (!MaybeRemarkArgFilter) \ + return MaybeRemarkArgFilter.takeError(); \ + \ + std::optional TypeFilter; \ + if (RemarkTypeOpt.getNumOccurrences()) \ + TypeFilter = RemarkTypeOpt.getValue(); \ + \ + return Filters{std::move(*MaybeFunctionFilter), \ + std::move(*MaybeRemarkNameFilter), \ + std::move(*MaybePassNameFilter), \ + std::move(*MaybeRemarkArgFilter), TypeFilter}; \ + } + namespace llvm { namespace remarks { Expected> @@ -95,5 +185,18 @@ class FilterMatcher { } }; +/// Filter out remarks based on remark properties (function, remark name, pass +/// name, argument values and type). +struct Filters { + std::optional FunctionFilter; + std::optional RemarkNameFilter; + std::optional PassNameFilter; + std::optional ArgFilter; + std::optional RemarkTypeFilter; + + /// Returns true if \p Remark satisfies all the provided filters. + bool filterRemark(const Remark &Remark); +}; + } // namespace remarks } // namespace llvm diff --git a/llvm/tools/llvm-size/Opts.td b/llvm/tools/llvm-size/Opts.td index edae43f1abd24..88e39f293a505 100644 --- a/llvm/tools/llvm-size/Opts.td +++ b/llvm/tools/llvm-size/Opts.td @@ -21,6 +21,9 @@ def grp_mach_o : OptionGroup<"kind">, HelpText<"OPTIONS (Mach-O specific)">; def arch_EQ : Joined<["--"], "arch=">, HelpText<"architecture(s) from a Mach-O file to dump">, Group; def : Separate<["--", "-"], "arch">, Alias; def l : F<"l", "When format is darwin, use long format to include addresses and offsets">, Group; +def exclude_pagezero + : FF<"exclude-pagezero", "Do not include __PAGEZERO segment in totals">, + Group; def : F<"A", "Alias for --format">, Alias, AliasArgs<["sysv"]>; def : F<"B", "Alias for --format">, Alias, AliasArgs<["berkeley"]>; diff --git a/llvm/tools/llvm-size/llvm-size.cpp b/llvm/tools/llvm-size/llvm-size.cpp index acc7843ffac8b..ec94db4ff7382 100644 --- a/llvm/tools/llvm-size/llvm-size.cpp +++ b/llvm/tools/llvm-size/llvm-size.cpp @@ -79,6 +79,7 @@ static bool DarwinLongFormat; static RadixTy Radix = RadixTy::decimal; static bool TotalSizes; static bool HasMachOFiles = false; +static bool ExcludePageZero = false; static std::vector InputFilenames; @@ -313,7 +314,7 @@ static void printDarwinSegmentSizes(MachOObjectFile *MachO) { total_data += Seg.vmsize; else if (SegmentName == "__OBJC") total_objc += Seg.vmsize; - else + else if (!ExcludePageZero || SegmentName != "__PAGEZERO") total_others += Seg.vmsize; } } else if (Load.C.cmd == MachO::LC_SEGMENT) { @@ -339,7 +340,7 @@ static void printDarwinSegmentSizes(MachOObjectFile *MachO) { total_data += Seg.vmsize; else if (SegmentName == "__OBJC") total_objc += Seg.vmsize; - else + else if (!ExcludePageZero || SegmentName != "__PAGEZERO") total_others += Seg.vmsize; } } @@ -914,6 +915,7 @@ int llvm_size_main(int argc, char **argv, const llvm::ToolContext &) { ELFCommons = Args.hasArg(OPT_common); DarwinLongFormat = Args.hasArg(OPT_l); + ExcludePageZero = Args.hasArg(OPT_exclude_pagezero); TotalSizes = Args.hasArg(OPT_totals); StringRef V = Args.getLastArgValue(OPT_format_EQ, "berkeley"); if (V == "berkeley") diff --git a/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp b/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp index 3cd5d597ee133..0cf8c5c63bef2 100644 --- a/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp +++ b/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp @@ -153,8 +153,12 @@ void SDKNameMap::maybeInsertSymbol(const SymbolRef &S, const ObjectFile &O) { uint32_t Flags = unwrapIgnoreError(S.getFlags()); section_iterator Section = unwrapIgnoreError(S.getSection(), /*Default=*/O.section_end()); - if (Type == SymbolRef::ST_Function && (Flags & SymbolRef::SF_Global) && - Section != O.section_end()) { + bool IsRegularFunction = Type == SymbolRef::ST_Function && + (Flags & SymbolRef::SF_Global) && + Section != O.section_end(); + bool IsIFunc = + Type == SymbolRef::ST_Other && (Flags & SymbolRef::SF_Indirect); + if (IsRegularFunction || IsIFunc) { StringRef Name = unwrapIgnoreError(S.getName()); insert({ Name, true }); } diff --git a/llvm/tools/opt/NewPMDriver.cpp b/llvm/tools/opt/NewPMDriver.cpp index 0c991b71a6b26..c19fc19f90afe 100644 --- a/llvm/tools/opt/NewPMDriver.cpp +++ b/llvm/tools/opt/NewPMDriver.cpp @@ -361,27 +361,25 @@ bool llvm::runPassPipeline( bool ShouldPreserveBitcodeUseListOrder, bool EmitSummaryIndex, bool EmitModuleHash, bool EnableDebugify, bool VerifyDIPreserve, bool EnableProfcheck, bool UnifiedLTO) { - auto FS = vfs::getRealFileSystem(); std::optional P; switch (PGOKindFlag) { case InstrGen: - P = PGOOptions(ProfileFile, "", "", MemoryProfileFile, FS, - PGOOptions::IRInstr, PGOOptions::NoCSAction, - PGOColdFuncAttr); + P = PGOOptions(ProfileFile, "", "", MemoryProfileFile, PGOOptions::IRInstr, + PGOOptions::NoCSAction, PGOColdFuncAttr); break; case InstrUse: - P = PGOOptions(ProfileFile, "", ProfileRemappingFile, MemoryProfileFile, FS, + P = PGOOptions(ProfileFile, "", ProfileRemappingFile, MemoryProfileFile, PGOOptions::IRUse, PGOOptions::NoCSAction, PGOColdFuncAttr); break; case SampleUse: - P = PGOOptions(ProfileFile, "", ProfileRemappingFile, MemoryProfileFile, FS, + P = PGOOptions(ProfileFile, "", ProfileRemappingFile, MemoryProfileFile, PGOOptions::SampleUse, PGOOptions::NoCSAction, PGOColdFuncAttr); break; case NoPGO: if (DebugInfoForProfiling || PseudoProbeForProfiling || !MemoryProfileFile.empty()) - P = PGOOptions("", "", "", MemoryProfileFile, FS, PGOOptions::NoAction, + P = PGOOptions("", "", "", MemoryProfileFile, PGOOptions::NoAction, PGOOptions::NoCSAction, PGOColdFuncAttr, DebugInfoForProfiling, PseudoProbeForProfiling); else @@ -403,7 +401,7 @@ bool llvm::runPassPipeline( P->CSProfileGenFile = CSProfileGenFile; } else P = PGOOptions("", CSProfileGenFile, ProfileRemappingFile, - /*MemoryProfile=*/"", FS, PGOOptions::NoAction, + /*MemoryProfile=*/"", PGOOptions::NoAction, PGOOptions::CSIRInstr); } else /* CSPGOKindFlag == CSInstrUse */ { if (!P) { diff --git a/llvm/unittests/ADT/APFloatTest.cpp b/llvm/unittests/ADT/APFloatTest.cpp index 141282ea254b4..30f0a8e5089ef 100644 --- a/llvm/unittests/ADT/APFloatTest.cpp +++ b/llvm/unittests/ADT/APFloatTest.cpp @@ -10176,4 +10176,11 @@ TEST(APFloatTest, hasSignBitInMSB) { EXPECT_FALSE(APFloat::hasSignBitInMSB(APFloat::Float8E8M0FNU())); } +TEST(APFloatTest, FrexpQuietSNaN) { + APFloat SNaN = APFloat::getSNaN(APFloat::PPCDoubleDouble()); + int Exp; + APFloat Result = frexp(SNaN, Exp, APFloat::rmNearestTiesToEven); + EXPECT_FALSE(Result.isSignaling()); +} + } // namespace diff --git a/llvm/unittests/ADT/APIntTest.cpp b/llvm/unittests/ADT/APIntTest.cpp index 116693c873f30..ca9f9f17ee112 100644 --- a/llvm/unittests/ADT/APIntTest.cpp +++ b/llvm/unittests/ADT/APIntTest.cpp @@ -3718,8 +3718,9 @@ TEST(APIntTest, ScaleBitMask) { TEST(APIntTest, DenseMap) { DenseMap Map; APInt ZeroWidthInt(0, 0, false); - Map.insert({ZeroWidthInt, 0}); - Map.find(ZeroWidthInt); + Map.insert({ZeroWidthInt, 123}); + auto It = Map.find(ZeroWidthInt); + EXPECT_EQ(It->second, 123); } TEST(APIntTest, TryExt) { diff --git a/llvm/unittests/ADT/BitVectorTest.cpp b/llvm/unittests/ADT/BitVectorTest.cpp index 6a4780c143e54..12ba0041af551 100644 --- a/llvm/unittests/ADT/BitVectorTest.cpp +++ b/llvm/unittests/ADT/BitVectorTest.cpp @@ -8,6 +8,7 @@ #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallBitVector.h" #include "gtest/gtest.h" @@ -1177,6 +1178,98 @@ TYPED_TEST(BitVectorTest, Iterators) { EXPECT_EQ(List[i++], Bit); } +TYPED_TEST(BitVectorTest, BidirectionalIterator) { + // Test decrement operators. + TypeParam Vec(100, false); + Vec.set(10); + Vec.set(20); + Vec.set(30); + Vec.set(40); + + // Test that we can decrement from end(). + auto EndIt = Vec.set_bits_end(); + auto LastIt = EndIt; + --LastIt; + EXPECT_EQ(*LastIt, 40U); + + // Test post-decrement. + auto It = Vec.set_bits_end(); + auto PrevIt = It--; + EXPECT_EQ(PrevIt, Vec.set_bits_end()); + EXPECT_EQ(*It, 40U); + + // Test pre-decrement. + --It; + EXPECT_EQ(*It, 30U); + + // Test full backward iteration. + std::vector BackwardBits; + for (auto RIt = Vec.set_bits_end(); RIt != Vec.set_bits_begin();) { + --RIt; + BackwardBits.push_back(*RIt); + } + EXPECT_EQ(BackwardBits.size(), 4U); + EXPECT_EQ(BackwardBits[0], 40U); + EXPECT_EQ(BackwardBits[1], 30U); + EXPECT_EQ(BackwardBits[2], 20U); + EXPECT_EQ(BackwardBits[3], 10U); +} + +TYPED_TEST(BitVectorTest, ReverseIteration) { + // Test using llvm::reverse. + TypeParam Vec(100, false); + Vec.set(5); + Vec.set(15); + Vec.set(25); + Vec.set(35); + Vec.set(45); + + std::vector ReversedBits; + for (unsigned Bit : llvm::reverse(Vec.set_bits())) { + ReversedBits.push_back(Bit); + } + + EXPECT_EQ(ReversedBits.size(), 5U); + EXPECT_EQ(ReversedBits[0], 45U); + EXPECT_EQ(ReversedBits[1], 35U); + EXPECT_EQ(ReversedBits[2], 25U); + EXPECT_EQ(ReversedBits[3], 15U); + EXPECT_EQ(ReversedBits[4], 5U); +} + +TYPED_TEST(BitVectorTest, BidirectionalIteratorEdgeCases) { + // Test empty BitVector. + TypeParam Empty; + EXPECT_EQ(Empty.set_bits_begin(), Empty.set_bits_end()); + + // Decrementing end() on empty should give -1 (no bits set). + auto EmptyEndIt = Empty.set_bits_end(); + --EmptyEndIt; + // After decrement on empty, iterator should still be at "no bit" position. + EXPECT_EQ(*EmptyEndIt, static_cast(-1)); + + // Test single bit. + TypeParam Single(10, false); + Single.set(5); + + auto SingleIt = Single.set_bits_end(); + --SingleIt; + EXPECT_EQ(*SingleIt, 5U); + // After decrementing past the first element, the iterator is in an + // undefined state (before begin), so we don't test this case. + + // Test all bits set. + TypeParam AllSet(10, true); + std::vector AllBitsReverse; + for (unsigned Bit : llvm::reverse(AllSet.set_bits())) { + AllBitsReverse.push_back(Bit); + } + EXPECT_EQ(AllBitsReverse.size(), 10U); + for (unsigned i = 0; i < 10; ++i) { + EXPECT_EQ(AllBitsReverse[i], 9 - i); + } +} + TYPED_TEST(BitVectorTest, PushBack) { TypeParam Vec(10, false); EXPECT_EQ(-1, Vec.find_first()); diff --git a/llvm/unittests/ADT/EquivalenceClassesTest.cpp b/llvm/unittests/ADT/EquivalenceClassesTest.cpp index 3d5c48eb8e1b6..8172ff97e5169 100644 --- a/llvm/unittests/ADT/EquivalenceClassesTest.cpp +++ b/llvm/unittests/ADT/EquivalenceClassesTest.cpp @@ -108,6 +108,29 @@ TEST(EquivalenceClassesTest, SimpleErase4) { EXPECT_FALSE(EqClasses.erase(1)); } +TEST(EquivalenceClassesTest, EraseKeepsLeaderBit) { + EquivalenceClasses EC; + + // Create a set {1, 2} where 1 is the leader. + EC.unionSets(1, 2); + + // Verify initial state. + EXPECT_EQ(EC.getLeaderValue(2), 1); + + // Erase 2, the non-leader member. + EXPECT_TRUE(EC.erase(2)); + + // Verify that we have exactly one equivalence class. + ASSERT_NE(EC.begin(), EC.end()); + ASSERT_EQ(std::next(EC.begin()), EC.end()); + + // Verify that 1 is still a leader after erasing 2. + const auto *Elem = *EC.begin(); + ASSERT_NE(Elem, nullptr); + EXPECT_EQ(Elem->getData(), 1); + EXPECT_TRUE(Elem->isLeader()) << "The leader bit was lost!"; +} + TEST(EquivalenceClassesTest, TwoSets) { EquivalenceClasses EqClasses; // Form sets of odd and even numbers, check that we split them into these diff --git a/llvm/unittests/ADT/ImmutableSetTest.cpp b/llvm/unittests/ADT/ImmutableSetTest.cpp index c0bde4c4d680b..87bc2a8da4bad 100644 --- a/llvm/unittests/ADT/ImmutableSetTest.cpp +++ b/llvm/unittests/ADT/ImmutableSetTest.cpp @@ -164,4 +164,35 @@ TEST_F(ImmutableSetTest, IterLongSetTest) { ASSERT_EQ(6, i); } +TEST_F(ImmutableSetTest, AddIfNotFoundTest) { + ImmutableSet::Factory f(/*canonicalize=*/false); + ImmutableSet S = f.getEmptySet(); + S = f.add(S, 1); + S = f.add(S, 2); + S = f.add(S, 3); + + ImmutableSet T1 = f.add(S, 1); + ImmutableSet T2 = f.add(S, 2); + ImmutableSet T3 = f.add(S, 3); + EXPECT_EQ(S.getRoot(), T1.getRoot()); + EXPECT_EQ(S.getRoot(), T2.getRoot()); + EXPECT_EQ(S.getRoot(), T3.getRoot()); + + ImmutableSet U = f.add(S, 4); + EXPECT_NE(S.getRoot(), U.getRoot()); +} + +TEST_F(ImmutableSetTest, RemoveIfNotFoundTest) { + ImmutableSet::Factory f(/*canonicalize=*/false); + ImmutableSet S = f.getEmptySet(); + S = f.add(S, 1); + S = f.add(S, 2); + S = f.add(S, 3); + + ImmutableSet T = f.remove(S, 4); + EXPECT_EQ(S.getRoot(), T.getRoot()); + + ImmutableSet U = f.remove(S, 3); + EXPECT_NE(S.getRoot(), U.getRoot()); } +} // namespace diff --git a/llvm/unittests/ADT/PackedVectorTest.cpp b/llvm/unittests/ADT/PackedVectorTest.cpp index 30fc7c0b6d07f..df2cbf0e7f0f8 100644 --- a/llvm/unittests/ADT/PackedVectorTest.cpp +++ b/llvm/unittests/ADT/PackedVectorTest.cpp @@ -71,6 +71,14 @@ TEST(PackedVectorTest, RawBitsSize) { EXPECT_EQ(12u, Vec.raw_bits().size()); } +TEST(PackedVectorTest, SignedValueOverwrite) { + PackedVector Vec(1); + Vec[0] = -1; + EXPECT_EQ(-1, Vec[0]); + Vec[0] = 1; + EXPECT_EQ(1, Vec[0]); +} + #ifdef EXPECT_DEBUG_DEATH TEST(PackedVectorTest, UnsignedValues) { diff --git a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp index dc6059dcf6827..b6e8567ee514d 100644 --- a/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp +++ b/llvm/unittests/Analysis/FunctionPropertiesAnalysisTest.cpp @@ -43,8 +43,11 @@ class FunctionPropertiesAnalysisTest : public testing::Test { public: FunctionPropertiesAnalysisTest() { auto VocabVector = ir2vec::Vocabulary::createDummyVocabForTest(1); - MAM.registerPass([&] { return IR2VecVocabAnalysis(VocabVector); }); - IR2VecVocab = ir2vec::Vocabulary(std::move(VocabVector)); + MAM.registerPass([VocabVector = std::move(VocabVector)]() mutable { + return IR2VecVocabAnalysis(std::move(VocabVector)); + }); + IR2VecVocab = + new ir2vec::Vocabulary(ir2vec::Vocabulary::createDummyVocabForTest(1)); MAM.registerPass([&] { return PassInstrumentationAnalysis(); }); FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); }); FAM.registerPass([&] { return DominatorTreeAnalysis(); }); @@ -66,7 +69,7 @@ class FunctionPropertiesAnalysisTest : public testing::Test { std::unique_ptr LI; FunctionAnalysisManager FAM; ModuleAnalysisManager MAM; - ir2vec::Vocabulary IR2VecVocab; + ir2vec::Vocabulary *IR2VecVocab; void TearDown() override { // Restore original IR2Vec weights @@ -78,7 +81,7 @@ class FunctionPropertiesAnalysisTest : public testing::Test { FunctionPropertiesInfo buildFPI(Function &F) { // FunctionPropertiesInfo assumes IR2VecVocabAnalysis has been run to // use IR2Vec. - auto VocabResult = MAM.getResult(*F.getParent()); + auto &VocabResult = MAM.getResult(*F.getParent()); (void)VocabResult; return FunctionPropertiesInfo::getFunctionPropertiesInfo(F, FAM); } @@ -106,7 +109,7 @@ class FunctionPropertiesAnalysisTest : public testing::Test { } std::unique_ptr createEmbedder(const Function &F) { - auto Emb = ir2vec::Embedder::create(IR2VecKind::Symbolic, F, IR2VecVocab); + auto Emb = ir2vec::Embedder::create(IR2VecKind::Symbolic, F, *IR2VecVocab); EXPECT_TRUE(static_cast(Emb)); return Emb; } diff --git a/llvm/unittests/Analysis/IR2VecTest.cpp b/llvm/unittests/Analysis/IR2VecTest.cpp index 9f2f6a3496ce0..743628fffac76 100644 --- a/llvm/unittests/Analysis/IR2VecTest.cpp +++ b/llvm/unittests/Analysis/IR2VecTest.cpp @@ -295,7 +295,7 @@ TEST(IR2VecTest, ZeroDimensionEmbedding) { // Fixture for IR2Vec tests requiring IR setup. class IR2VecTestFixture : public ::testing::Test { protected: - Vocabulary V; + Vocabulary *V; LLVMContext Ctx; std::unique_ptr M; Function *F = nullptr; @@ -304,7 +304,7 @@ class IR2VecTestFixture : public ::testing::Test { Instruction *RetInst = nullptr; void SetUp() override { - V = Vocabulary(Vocabulary::createDummyVocabForTest(2)); + V = new Vocabulary(Vocabulary::createDummyVocabForTest(2)); // Setup IR M = std::make_unique("TestM", Ctx); @@ -322,7 +322,7 @@ class IR2VecTestFixture : public ::testing::Test { }; TEST_F(IR2VecTestFixture, GetInstVecMap_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &InstMap = Emb->getInstVecMap(); @@ -341,7 +341,7 @@ TEST_F(IR2VecTestFixture, GetInstVecMap_Symbolic) { } TEST_F(IR2VecTestFixture, GetInstVecMap_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &InstMap = Emb->getInstVecMap(); @@ -358,7 +358,7 @@ TEST_F(IR2VecTestFixture, GetInstVecMap_FlowAware) { } TEST_F(IR2VecTestFixture, GetBBVecMap_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &BBMap = Emb->getBBVecMap(); @@ -373,7 +373,7 @@ TEST_F(IR2VecTestFixture, GetBBVecMap_Symbolic) { } TEST_F(IR2VecTestFixture, GetBBVecMap_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &BBMap = Emb->getBBVecMap(); @@ -388,7 +388,7 @@ TEST_F(IR2VecTestFixture, GetBBVecMap_FlowAware) { } TEST_F(IR2VecTestFixture, GetBBVector_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &BBVec = Emb->getBBVector(*BB); @@ -398,7 +398,7 @@ TEST_F(IR2VecTestFixture, GetBBVector_Symbolic) { } TEST_F(IR2VecTestFixture, GetBBVector_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &BBVec = Emb->getBBVector(*BB); @@ -408,7 +408,7 @@ TEST_F(IR2VecTestFixture, GetBBVector_FlowAware) { } TEST_F(IR2VecTestFixture, GetFunctionVector_Symbolic) { - auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, V); + auto Emb = Embedder::create(IR2VecKind::Symbolic, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &FuncVec = Emb->getFunctionVector(); @@ -420,7 +420,7 @@ TEST_F(IR2VecTestFixture, GetFunctionVector_Symbolic) { } TEST_F(IR2VecTestFixture, GetFunctionVector_FlowAware) { - auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, V); + auto Emb = Embedder::create(IR2VecKind::FlowAware, *F, *V); ASSERT_TRUE(static_cast(Emb)); const auto &FuncVec = Emb->getFunctionVector(); @@ -435,6 +435,7 @@ static constexpr unsigned MaxOpcodes = Vocabulary::MaxOpcodes; static constexpr unsigned MaxTypeIDs = Vocabulary::MaxTypeIDs; static constexpr unsigned MaxCanonicalTypeIDs = Vocabulary::MaxCanonicalTypeIDs; static constexpr unsigned MaxOperands = Vocabulary::MaxOperandKinds; +static constexpr unsigned MaxPredicateKinds = Vocabulary::MaxPredicateKinds; // Mapping between LLVM Type::TypeID tokens and Vocabulary::CanonicalTypeID // names and their canonical string keys. @@ -460,9 +461,13 @@ TEST(IR2VecVocabularyTest, DummyVocabTest) { EXPECT_EQ(Emb.size(), Dim); // Should have the correct total number of embeddings - EXPECT_EQ(VocabVecSize, MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands); + EXPECT_EQ(VocabVecSize, MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + + MaxPredicateKinds); - auto ExpectedVocab = VocabVec; + // Collect embeddings for later comparison before moving VocabVec + std::vector ExpectedVocab; + for (const auto &Emb : VocabVec) + ExpectedVocab.push_back(Emb); IR2VecVocabAnalysis VocabAnalysis(std::move(VocabVec)); LLVMContext TestCtx; @@ -480,17 +485,17 @@ TEST(IR2VecVocabularyTest, DummyVocabTest) { } TEST(IR2VecVocabularyTest, SlotIdxMapping) { - // Test getSlotIndex for Opcodes + // Test getIndex for Opcodes #define EXPECT_OPCODE_SLOT(NUM, OPCODE, CLASS) \ - EXPECT_EQ(Vocabulary::getSlotIndex(NUM), static_cast(NUM - 1)); + EXPECT_EQ(Vocabulary::getIndex(NUM), static_cast(NUM - 1)); #define HANDLE_INST(NUM, OPCODE, CLASS) EXPECT_OPCODE_SLOT(NUM, OPCODE, CLASS) #include "llvm/IR/Instruction.def" #undef HANDLE_INST #undef EXPECT_OPCODE_SLOT - // Test getSlotIndex for Types + // Test getIndex for Types #define EXPECT_TYPE_SLOT(TypeIDTok, CanonEnum, CanonStr) \ - EXPECT_EQ(Vocabulary::getSlotIndex(Type::TypeIDTok), \ + EXPECT_EQ(Vocabulary::getIndex(Type::TypeIDTok), \ MaxOpcodes + static_cast( \ Vocabulary::CanonicalTypeID::CanonEnum)); @@ -498,7 +503,7 @@ TEST(IR2VecVocabularyTest, SlotIdxMapping) { #undef EXPECT_TYPE_SLOT - // Test getSlotIndex for Value operands + // Test getIndex for Value operands LLVMContext Ctx; Module M("TestM", Ctx); FunctionType *FTy = @@ -508,40 +513,59 @@ TEST(IR2VecVocabularyTest, SlotIdxMapping) { #define EXPECTED_VOCAB_OPERAND_SLOT(X) \ MaxOpcodes + MaxCanonicalTypeIDs + static_cast(X) // Test Function operand - EXPECT_EQ(Vocabulary::getSlotIndex(*F), + EXPECT_EQ(Vocabulary::getIndex(*F), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::FunctionID)); // Test Constant operand Constant *C = ConstantInt::get(Type::getInt32Ty(Ctx), 42); - EXPECT_EQ(Vocabulary::getSlotIndex(*C), + EXPECT_EQ(Vocabulary::getIndex(*C), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::ConstantID)); // Test Pointer operand BasicBlock *BB = BasicBlock::Create(Ctx, "entry", F); AllocaInst *PtrVal = new AllocaInst(Type::getInt32Ty(Ctx), 0, "ptr", BB); - EXPECT_EQ(Vocabulary::getSlotIndex(*PtrVal), + EXPECT_EQ(Vocabulary::getIndex(*PtrVal), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::PointerID)); // Test Variable operand (function argument) Argument *Arg = F->getArg(0); - EXPECT_EQ(Vocabulary::getSlotIndex(*Arg), + EXPECT_EQ(Vocabulary::getIndex(*Arg), EXPECTED_VOCAB_OPERAND_SLOT(Vocabulary::OperandKind::VariableID)); #undef EXPECTED_VOCAB_OPERAND_SLOT + + // Test getIndex for predicates +#define EXPECTED_VOCAB_PREDICATE_SLOT(X) \ + MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + static_cast(X) + for (unsigned P = CmpInst::FIRST_FCMP_PREDICATE; + P <= CmpInst::LAST_FCMP_PREDICATE; ++P) { + CmpInst::Predicate Pred = static_cast(P); + unsigned ExpectedIdx = + EXPECTED_VOCAB_PREDICATE_SLOT((P - CmpInst::FIRST_FCMP_PREDICATE)); + EXPECT_EQ(Vocabulary::getIndex(Pred), ExpectedIdx); + } + auto ICMP_Start = CmpInst::LAST_FCMP_PREDICATE + 1; + for (unsigned P = CmpInst::FIRST_ICMP_PREDICATE; + P <= CmpInst::LAST_ICMP_PREDICATE; ++P) { + CmpInst::Predicate Pred = static_cast(P); + unsigned ExpectedIdx = EXPECTED_VOCAB_PREDICATE_SLOT( + ICMP_Start + P - CmpInst::FIRST_ICMP_PREDICATE); + EXPECT_EQ(Vocabulary::getIndex(Pred), ExpectedIdx); + } +#undef EXPECTED_VOCAB_PREDICATE_SLOT } #if GTEST_HAS_DEATH_TEST #ifndef NDEBUG TEST(IR2VecVocabularyTest, NumericIDMapInvalidInputs) { // Test invalid opcode IDs - EXPECT_DEATH(Vocabulary::getSlotIndex(0u), "Invalid opcode"); - EXPECT_DEATH(Vocabulary::getSlotIndex(MaxOpcodes + 1), "Invalid opcode"); + EXPECT_DEATH(Vocabulary::getIndex(0u), "Invalid opcode"); + EXPECT_DEATH(Vocabulary::getIndex(MaxOpcodes + 1), "Invalid opcode"); // Test invalid type IDs - EXPECT_DEATH(Vocabulary::getSlotIndex(static_cast(MaxTypeIDs)), + EXPECT_DEATH(Vocabulary::getIndex(static_cast(MaxTypeIDs)), + "Invalid type ID"); + EXPECT_DEATH(Vocabulary::getIndex(static_cast(MaxTypeIDs + 10)), "Invalid type ID"); - EXPECT_DEATH( - Vocabulary::getSlotIndex(static_cast(MaxTypeIDs + 10)), - "Invalid type ID"); } #endif // NDEBUG #endif // GTEST_HAS_DEATH_TEST @@ -551,7 +575,7 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) { EXPECT_EQ(Vocabulary::getStringKey(12), "Add"); #define EXPECT_OPCODE(NUM, OPCODE, CLASS) \ - EXPECT_EQ(Vocabulary::getStringKey(Vocabulary::getSlotIndex(NUM)), \ + EXPECT_EQ(Vocabulary::getStringKey(Vocabulary::getIndex(NUM)), \ Vocabulary::getVocabKeyForOpcode(NUM)); #define HANDLE_INST(NUM, OPCODE, CLASS) EXPECT_OPCODE(NUM, OPCODE, CLASS) #include "llvm/IR/Instruction.def" @@ -569,6 +593,7 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) { #undef EXPECT_CANONICAL_TYPE_NAME + // Verify OperandKind -> string mapping #define HANDLE_OPERAND_KINDS(X) \ X(FunctionID, "Function") \ X(PointerID, "Pointer") \ @@ -592,6 +617,28 @@ TEST(IR2VecVocabularyTest, StringKeyGeneration) { Vocabulary::getStringKey(MaxOpcodes + MaxCanonicalTypeIDs + 1); EXPECT_EQ(FuncArgKey, "Function"); EXPECT_EQ(PtrArgKey, "Pointer"); + +// Verify PredicateKind -> string mapping +#define EXPECT_PREDICATE_KIND(PredNum, PredPos, PredKind) \ + do { \ + std::string PredStr = \ + std::string(PredKind) + "_" + \ + CmpInst::getPredicateName(static_cast(PredNum)) \ + .str(); \ + unsigned Pos = MaxOpcodes + MaxCanonicalTypeIDs + MaxOperands + PredPos; \ + EXPECT_EQ(Vocabulary::getStringKey(Pos), PredStr); \ + } while (0) + + for (unsigned P = CmpInst::FIRST_FCMP_PREDICATE; + P <= CmpInst::LAST_FCMP_PREDICATE; ++P) + EXPECT_PREDICATE_KIND(P, P - CmpInst::FIRST_FCMP_PREDICATE, "FCMP"); + + auto ICMP_Pos = CmpInst::LAST_FCMP_PREDICATE + 1; + for (unsigned P = CmpInst::FIRST_ICMP_PREDICATE; + P <= CmpInst::LAST_ICMP_PREDICATE; ++P) + EXPECT_PREDICATE_KIND(P, ICMP_Pos++, "ICMP"); + +#undef EXPECT_PREDICATE_KIND } TEST(IR2VecVocabularyTest, VocabularyDimensions) { @@ -627,10 +674,12 @@ TEST(IR2VecVocabularyTest, InvalidAccess) { #endif // GTEST_HAS_DEATH_TEST TEST(IR2VecVocabularyTest, TypeIDStringKeyMapping) { + Vocabulary V = Vocabulary(Vocabulary::createDummyVocabForTest()); #define EXPECT_TYPE_TO_CANONICAL(TypeIDTok, CanonEnum, CanonStr) \ - EXPECT_EQ( \ - Vocabulary::getStringKey(Vocabulary::getSlotIndex(Type::TypeIDTok)), \ - CanonStr); + do { \ + unsigned FlatIdx = V.getIndex(Type::TypeIDTok); \ + EXPECT_EQ(Vocabulary::getStringKey(FlatIdx), CanonStr); \ + } while (0); IR2VEC_HANDLE_TYPE_BIMAP(EXPECT_TYPE_TO_CANONICAL) @@ -638,14 +687,20 @@ TEST(IR2VecVocabularyTest, TypeIDStringKeyMapping) { } TEST(IR2VecVocabularyTest, InvalidVocabularyConstruction) { - std::vector InvalidVocab; - InvalidVocab.push_back(Embedding(2, 1.0)); - InvalidVocab.push_back(Embedding(2, 2.0)); - - Vocabulary V(std::move(InvalidVocab)); + // Test 1: Create invalid VocabStorage with insufficient sections + std::vector> InvalidSectionData; + // Only add one section with 2 embeddings, but the vocabulary needs 4 sections + std::vector Section1; + Section1.push_back(Embedding(2, 1.0)); + Section1.push_back(Embedding(2, 2.0)); + InvalidSectionData.push_back(std::move(Section1)); + + VocabStorage InvalidStorage(std::move(InvalidSectionData)); + Vocabulary V(std::move(InvalidStorage)); EXPECT_FALSE(V.isValid()); { + // Test 2: Default-constructed vocabulary should be invalid Vocabulary InvalidResult; EXPECT_FALSE(InvalidResult.isValid()); #if GTEST_HAS_DEATH_TEST @@ -656,4 +711,265 @@ TEST(IR2VecVocabularyTest, InvalidVocabularyConstruction) { } } +TEST(VocabStorageTest, DefaultConstructor) { + VocabStorage storage; + + EXPECT_EQ(storage.size(), 0u); + EXPECT_EQ(storage.getNumSections(), 0u); + EXPECT_EQ(storage.getDimension(), 0u); + EXPECT_FALSE(storage.isValid()); + + // Test iterators on empty storage + EXPECT_EQ(storage.begin(), storage.end()); +} + +TEST(VocabStorageTest, BasicConstruction) { + // Create test data with 3 sections + std::vector> sectionData; + + // Section 0: 2 embeddings of dimension 3 + std::vector section0; + section0.emplace_back(std::vector{1.0, 2.0, 3.0}); + section0.emplace_back(std::vector{4.0, 5.0, 6.0}); + sectionData.push_back(std::move(section0)); + + // Section 1: 1 embedding of dimension 3 + std::vector section1; + section1.emplace_back(std::vector{7.0, 8.0, 9.0}); + sectionData.push_back(std::move(section1)); + + // Section 2: 3 embeddings of dimension 3 + std::vector section2; + section2.emplace_back(std::vector{10.0, 11.0, 12.0}); + section2.emplace_back(std::vector{13.0, 14.0, 15.0}); + section2.emplace_back(std::vector{16.0, 17.0, 18.0}); + sectionData.push_back(std::move(section2)); + + VocabStorage storage(std::move(sectionData)); + + EXPECT_EQ(storage.size(), 6u); // Total: 2 + 1 + 3 = 6 + EXPECT_EQ(storage.getNumSections(), 3u); + EXPECT_EQ(storage.getDimension(), 3u); + EXPECT_TRUE(storage.isValid()); +} + +TEST(VocabStorageTest, SectionAccess) { + // Create test data + std::vector> sectionData; + + std::vector section0; + section0.emplace_back(std::vector{1.0, 2.0}); + section0.emplace_back(std::vector{3.0, 4.0}); + sectionData.push_back(std::move(section0)); + + std::vector section1; + section1.emplace_back(std::vector{5.0, 6.0}); + sectionData.push_back(std::move(section1)); + + VocabStorage storage(std::move(sectionData)); + + // Test section access + EXPECT_EQ(storage[0].size(), 2u); + EXPECT_EQ(storage[1].size(), 1u); + + // Test embedding values + EXPECT_THAT(storage[0][0].getData(), ElementsAre(1.0, 2.0)); + EXPECT_THAT(storage[0][1].getData(), ElementsAre(3.0, 4.0)); + EXPECT_THAT(storage[1][0].getData(), ElementsAre(5.0, 6.0)); +} + +#if GTEST_HAS_DEATH_TEST +#ifndef NDEBUG +TEST(VocabStorageTest, InvalidSectionAccess) { + std::vector> sectionData; + std::vector section0; + section0.emplace_back(std::vector{1.0, 2.0}); + sectionData.push_back(std::move(section0)); + + VocabStorage storage(std::move(sectionData)); + + EXPECT_DEATH(storage[1], "Invalid section ID"); + EXPECT_DEATH(storage[10], "Invalid section ID"); +} + +TEST(VocabStorageTest, EmptySection) { + std::vector> sectionData; + std::vector emptySection; // Empty section + sectionData.push_back(std::move(emptySection)); + + std::vector validSection; + validSection.emplace_back(std::vector{1.0}); + sectionData.push_back(std::move(validSection)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "Vocabulary section is empty"); +} + +TEST(VocabStorageTest, EmptyMiddleSection) { + std::vector> sectionData; + + // Valid first section + std::vector validSection1; + validSection1.emplace_back(std::vector{1.0}); + sectionData.push_back(std::move(validSection1)); + + // Empty middle section + std::vector emptySection; + sectionData.push_back(std::move(emptySection)); + + // Valid last section + std::vector validSection2; + validSection2.emplace_back(std::vector{2.0}); + sectionData.push_back(std::move(validSection2)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "Vocabulary section is empty"); +} + +TEST(VocabStorageTest, NoSections) { + std::vector> sectionData; // No sections + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "Vocabulary has no sections"); +} + +TEST(VocabStorageTest, MismatchedDimensionsAcrossSections) { + std::vector> sectionData; + + // Section 0: embeddings with dimension 2 + std::vector section0; + section0.emplace_back(std::vector{1.0, 2.0}); + section0.emplace_back(std::vector{3.0, 4.0}); + sectionData.push_back(std::move(section0)); + + // Section 1: embedding with dimension 3 (mismatch!) + std::vector section1; + section1.emplace_back(std::vector{5.0, 6.0, 7.0}); + sectionData.push_back(std::move(section1)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "All embeddings must have the same dimension"); +} + +TEST(VocabStorageTest, MismatchedDimensionsWithinSection) { + std::vector> sectionData; + + // Section 0: first embedding with dimension 2, second with dimension 3 + std::vector section0; + section0.emplace_back(std::vector{1.0, 2.0}); + section0.emplace_back(std::vector{3.0, 4.0, 5.0}); // Mismatch! + sectionData.push_back(std::move(section0)); + + EXPECT_DEATH(VocabStorage(std::move(sectionData)), + "All embeddings must have the same dimension"); +} +#endif // NDEBUG +#endif // GTEST_HAS_DEATH_TEST + +TEST(VocabStorageTest, IteratorBasics) { + std::vector> sectionData; + + std::vector section0; + section0.emplace_back(std::vector{1.0, 2.0}); + section0.emplace_back(std::vector{3.0, 4.0}); + sectionData.push_back(std::move(section0)); + + std::vector section1; + section1.emplace_back(std::vector{5.0, 6.0}); + sectionData.push_back(std::move(section1)); + + VocabStorage storage(std::move(sectionData)); + + // Test iterator basics + auto it = storage.begin(); + auto end = storage.end(); + + EXPECT_NE(it, end); + + // Check first embedding + EXPECT_THAT((*it).getData(), ElementsAre(1.0, 2.0)); + + // Advance to second embedding + ++it; + EXPECT_NE(it, end); + EXPECT_THAT((*it).getData(), ElementsAre(3.0, 4.0)); + + // Advance to third embedding (in section 1) + ++it; + EXPECT_NE(it, end); + EXPECT_THAT((*it).getData(), ElementsAre(5.0, 6.0)); + + // Advance past the end + ++it; + EXPECT_EQ(it, end); +} + +TEST(VocabStorageTest, IteratorTraversal) { + std::vector> sectionData; + + // Section 0: 2 embeddings + std::vector section0; + section0.emplace_back(std::vector{10.0}); + section0.emplace_back(std::vector{20.0}); + sectionData.push_back(std::move(section0)); + + // Section 1: 1 embedding + std::vector section1; + section1.emplace_back(std::vector{25.0}); + sectionData.push_back(std::move(section1)); + + // Section 2: 3 embeddings + std::vector section2; + section2.emplace_back(std::vector{30.0}); + section2.emplace_back(std::vector{40.0}); + section2.emplace_back(std::vector{50.0}); + sectionData.push_back(std::move(section2)); + + VocabStorage storage(std::move(sectionData)); + + // Collect all values using iterator + std::vector values; + for (const auto &emb : storage) { + EXPECT_EQ(emb.size(), 1u); + values.push_back(emb[0]); + } + + // Should get all embeddings from all sections + EXPECT_THAT(values, ElementsAre(10.0, 20.0, 25.0, 30.0, 40.0, 50.0)); +} + +TEST(VocabStorageTest, IteratorComparison) { + std::vector> sectionData; + std::vector section0; + section0.emplace_back(std::vector{1.0}); + section0.emplace_back(std::vector{2.0}); + sectionData.push_back(std::move(section0)); + + VocabStorage storage(std::move(sectionData)); + + auto it1 = storage.begin(); + auto it2 = storage.begin(); + auto end = storage.end(); + + // Test equality + EXPECT_EQ(it1, it2); + EXPECT_NE(it1, end); + + // Advance one iterator + ++it1; + EXPECT_NE(it1, it2); + EXPECT_NE(it1, end); + + // Advance second iterator to match + ++it2; + EXPECT_EQ(it1, it2); + + // Advance both to end + ++it1; + ++it2; + EXPECT_EQ(it1, end); + EXPECT_EQ(it2, end); + EXPECT_EQ(it1, it2); +} + } // end anonymous namespace diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp index 8c4fd8bb449d5..d8457a30fd2f7 100644 --- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp +++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp @@ -24,7 +24,9 @@ using namespace llvm; using namespace llvm::memprof; +namespace llvm { LLVM_ABI extern cl::opt MemProfKeepAllNotColdContexts; +} // end namespace llvm namespace { @@ -228,8 +230,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -278,8 +279,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -333,8 +333,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -393,8 +392,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -465,8 +463,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) ASSERT_NE(Call, nullptr); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals)); @@ -539,8 +536,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) // Restore original option value. MemProfKeepAllNotColdContexts = OrigMemProfKeepAllNotColdContexts; - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals)); @@ -668,8 +664,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) // The hot allocations will be converted to NotCold and pruned as they // are unnecessary to determine how to clone the cold allocation. - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); diff --git a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp index 45dc50ec0839b..c8752c78d1c35 100644 --- a/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp +++ b/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp @@ -25,9 +25,10 @@ #include "llvm/Support/raw_ostream.h" #include "gtest/gtest.h" -LLVM_ABI extern llvm::cl::opt ScalePartialSampleProfileWorkingSetSize; - namespace llvm { + +LLVM_ABI extern cl::opt ScalePartialSampleProfileWorkingSetSize; + namespace { class ProfileSummaryInfoTest : public testing::Test { diff --git a/llvm/unittests/CAS/CMakeLists.txt b/llvm/unittests/CAS/CMakeLists.txt index ab709e30369bf..0f8fcb9e98954 100644 --- a/llvm/unittests/CAS/CMakeLists.txt +++ b/llvm/unittests/CAS/CMakeLists.txt @@ -1,7 +1,3 @@ -if (LLVM_ENABLE_ONDISK_CAS) - add_definitions(-DLLVM_ENABLE_ONDISK_CAS=1) -endif() - set(LLVM_LINK_COMPONENTS Support CAS @@ -12,6 +8,7 @@ add_llvm_unittest(CASTests ActionCacheTest.cpp CASTestConfig.cpp ObjectStoreTest.cpp + OnDiskTrieRawHashMapTest.cpp ProgramTest.cpp ) diff --git a/llvm/unittests/CAS/OnDiskTrieRawHashMapTest.cpp b/llvm/unittests/CAS/OnDiskTrieRawHashMapTest.cpp new file mode 100644 index 0000000000000..7bedfe4b29e30 --- /dev/null +++ b/llvm/unittests/CAS/OnDiskTrieRawHashMapTest.cpp @@ -0,0 +1,220 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/CAS/OnDiskTrieRawHashMap.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/Support/Alignment.h" +#include "llvm/Testing/Support/Error.h" +#include "llvm/Testing/Support/SupportHelpers.h" +#include "gtest/gtest.h" + +#if LLVM_ENABLE_ONDISK_CAS +using namespace llvm; +using namespace llvm::cas; + +namespace { + +struct OnDiskTrieRawHashMapTestFixture + : public ::testing::TestWithParam { + static constexpr size_t MB = 1024u * 1024u; + static constexpr size_t DataSize = 8; // Multiple of 8B. + + std::optional Temp; + size_t NumHashBytes; + + void SetUp() override { + Temp.emplace("trie-raw-hash-map", /*Unique=*/true); + NumHashBytes = GetParam(); + } + void TearDown() override { Temp.reset(); } + + Expected createTrie() { + size_t NumHashBits = NumHashBytes * 8; + return OnDiskTrieRawHashMap::create( + Temp->path((Twine(NumHashBytes) + "B").str()), "index", + /*NumHashBits=*/NumHashBits, DataSize, /*MaxFileSize=*/MB, + /*NewInitialFileSize=*/std::nullopt); + } +}; + +// Create tries with various sizes of hash and with data. +TEST_P(OnDiskTrieRawHashMapTestFixture, General) { + std::optional Trie1; + ASSERT_THAT_ERROR(createTrie().moveInto(Trie1), Succeeded()); + std::optional Trie2; + ASSERT_THAT_ERROR(createTrie().moveInto(Trie2), Succeeded()); + + uint8_t Hash0Bytes[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + uint8_t Hash1Bytes[8] = {1, 0, 0, 0, 0, 0, 0, 0}; + auto Hash0 = ArrayRef(Hash0Bytes).take_front(NumHashBytes); + auto Hash1 = ArrayRef(Hash1Bytes).take_front(NumHashBytes); + constexpr StringLiteral Data0v1Bytes = "data0.v1"; + constexpr StringLiteral Data0v2Bytes = "data0.v2"; + constexpr StringLiteral Data1Bytes = "data1..."; + static_assert(Data0v1Bytes.size() == DataSize, "math error"); + static_assert(Data0v2Bytes.size() == DataSize, "math error"); + static_assert(Data1Bytes.size() == DataSize, "math error"); + ArrayRef Data0v1 = ArrayRef(Data0v1Bytes.data(), Data0v1Bytes.size()); + ArrayRef Data0v2 = ArrayRef(Data0v2Bytes.data(), Data0v2Bytes.size()); + ArrayRef Data1 = ArrayRef(Data1Bytes.data(), Data1Bytes.size()); + + // Lookup when trie is empty. + EXPECT_FALSE(Trie1->find(Hash0)); + + // Insert. + std::optional Offset; + std::optional> Data; + { + std::optional Insertion; + ASSERT_THAT_ERROR(Trie1->insert({Hash0, Data0v1}).moveInto(Insertion), + Succeeded()); + EXPECT_EQ(Hash0, (*Insertion)->Hash); + EXPECT_EQ(Data0v1, (*Insertion)->Data); + EXPECT_TRUE(isAddrAligned(Align(8), (*Insertion)->Data.data())); + + Offset = Insertion->getOffset(); + Data = (*Insertion)->Data; + } + + // Find. + { + auto Lookup = Trie1->find(Hash0); + ASSERT_TRUE(Lookup); + EXPECT_EQ(Hash0, Lookup->Hash); + EXPECT_EQ(Data0v1, Lookup->Data); + EXPECT_EQ(Offset->get(), Lookup.getOffset().get()); + } + + // Find in a different instance of the same on-disk trie that existed + // before the insertion. + { + auto Lookup = Trie2->find(Hash0); + ASSERT_TRUE(Lookup); + EXPECT_EQ(Hash0, Lookup->Hash); + EXPECT_EQ(Data0v1, Lookup->Data); + EXPECT_EQ(Offset->get(), Lookup.getOffset().get()); + } + + // Create a new instance and check that too. + Trie2.reset(); + ASSERT_THAT_ERROR(createTrie().moveInto(Trie2), Succeeded()); + { + auto Lookup = Trie2->find(Hash0); + ASSERT_TRUE(Lookup); + EXPECT_EQ(Hash0, Lookup->Hash); + EXPECT_EQ(Data0v1, Lookup->Data); + EXPECT_EQ(Offset->get(), Lookup.getOffset().get()); + } + + // Change the data. + llvm::copy(Data0v2, Data->data()); + { + auto Lookup = Trie2->find(Hash0); + ASSERT_TRUE(Lookup); + EXPECT_EQ(Hash0, Lookup->Hash); + EXPECT_EQ(Data0v2, Lookup->Data); + EXPECT_EQ(Offset->get(), Lookup.getOffset().get()); + } + + // Find different hash. + EXPECT_FALSE(Trie1->find(Hash1)); + EXPECT_FALSE(Trie2->find(Hash1)); + + // Recover from an offset. + { + OnDiskTrieRawHashMap::const_pointer Recovered; + ASSERT_THAT_ERROR(Trie1->recoverFromFileOffset(*Offset).moveInto(Recovered), + Succeeded()); + ASSERT_TRUE(Recovered); + EXPECT_EQ(Offset->get(), Recovered.getOffset().get()); + EXPECT_EQ(Hash0, Recovered->Hash); + EXPECT_EQ(Data0v2, Recovered->Data); + } + + // Recover from a bad offset. + { + FileOffset BadOffset(1); + OnDiskTrieRawHashMap::const_pointer Recovered; + ASSERT_THAT_ERROR( + Trie1->recoverFromFileOffset(BadOffset).moveInto(Recovered), Failed()); + } + + // Insert another thing. + { + std::optional Insertion; + ASSERT_THAT_ERROR(Trie1->insert({Hash1, Data1}).moveInto(Insertion), + Succeeded()); + EXPECT_EQ(Hash1, (*Insertion)->Hash); + EXPECT_EQ(Data1, (*Insertion)->Data); + EXPECT_TRUE(isAddrAligned(Align(8), (*Insertion)->Data.data())); + + EXPECT_NE(Offset->get(), Insertion->getOffset().get()); + } + + // Validate. + { + auto RecordVerify = + [&](FileOffset Offset, + OnDiskTrieRawHashMap::ConstValueProxy Proxy) -> Error { + if (Proxy.Hash.size() != NumHashBytes) + return createStringError("wrong hash size"); + if (Proxy.Data.size() != DataSize) + return createStringError("wrong data size"); + + return Error::success(); + }; + ASSERT_THAT_ERROR(Trie1->validate(RecordVerify), Succeeded()); + ASSERT_THAT_ERROR(Trie2->validate(RecordVerify), Succeeded()); + } + + // Size and capacity. + { + EXPECT_EQ(Trie1->capacity(), MB); + EXPECT_EQ(Trie2->capacity(), MB); + EXPECT_LE(Trie1->size(), MB); + EXPECT_LE(Trie2->size(), MB); + } +} + +INSTANTIATE_TEST_SUITE_P(OnDiskTrieRawHashMapTest, + OnDiskTrieRawHashMapTestFixture, + ::testing::Values(1, 2, 4, 8)); + +TEST(OnDiskTrieRawHashMapTest, OutOfSpace) { + unittest::TempDir Temp("trie-raw-hash-map", /*Unique=*/true); + std::optional Trie; + + // Too small to create header. + ASSERT_THAT_ERROR(OnDiskTrieRawHashMap::create( + Temp.path("NoSpace1").str(), "index", + /*NumHashBits=*/8, /*DataSize=*/8, /*MaxFileSize=*/8, + /*NewInitialFileSize=*/std::nullopt) + .moveInto(Trie), + Failed()); + + // Just enough for root node but not enough for any insertion. + ASSERT_THAT_ERROR(OnDiskTrieRawHashMap::create( + Temp.path("NoSpace2").str(), "index", + /*NumHashBits=*/8, /*DataSize=*/8, /*MaxFileSize=*/118, + /*NewInitialFileSize=*/std::nullopt, + /*NewTableNumRootBits=*/1, /*NewTableNumSubtrieBits=*/1) + .moveInto(Trie), + Succeeded()); + uint8_t Hash0Bytes[1] = {0}; + auto Hash0 = ArrayRef(Hash0Bytes); + constexpr StringLiteral Data0v1Bytes = "data0.v1"; + ArrayRef Data0v1 = ArrayRef(Data0v1Bytes.data(), Data0v1Bytes.size()); + std::optional Insertion; + ASSERT_THAT_ERROR(Trie->insert({Hash0, Data0v1}).moveInto(Insertion), + Failed()); +} + +} // namespace + +#endif // LLVM_ENABLE_ONDISK_CAS diff --git a/llvm/unittests/CodeGen/RegAllocScoreTest.cpp b/llvm/unittests/CodeGen/RegAllocScoreTest.cpp index 86bfc7a81d1be..432dc9348fbd3 100644 --- a/llvm/unittests/CodeGen/RegAllocScoreTest.cpp +++ b/llvm/unittests/CodeGen/RegAllocScoreTest.cpp @@ -31,11 +31,14 @@ #include "gtest/gtest.h" using namespace llvm; + +namespace llvm { LLVM_ABI extern cl::opt CopyWeight; LLVM_ABI extern cl::opt LoadWeight; LLVM_ABI extern cl::opt StoreWeight; LLVM_ABI extern cl::opt CheapRematWeight; LLVM_ABI extern cl::opt ExpensiveRematWeight; +} // namespace llvm namespace { // Include helper functions to ease the manipulation of MachineFunctions. diff --git a/llvm/unittests/CodeGen/TypeTraitsTest.cpp b/llvm/unittests/CodeGen/TypeTraitsTest.cpp index dde86280cff6a..f0ed0e870cbb3 100644 --- a/llvm/unittests/CodeGen/TypeTraitsTest.cpp +++ b/llvm/unittests/CodeGen/TypeTraitsTest.cpp @@ -6,13 +6,16 @@ // //===----------------------------------------------------------------------===// +#include "llvm/CodeGen/RDFRegisters.h" #include "llvm/CodeGen/RegisterPressure.h" #include "llvm/CodeGen/ScheduleDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "gtest/gtest.h" +#include #include +#include using namespace llvm; @@ -23,3 +26,35 @@ static_assert(std::is_trivially_copyable_v, "trivially copyable"); static_assert(std::is_trivially_copyable_v, "trivially copyable"); static_assert(std::is_trivially_copyable_v, "trivially copyable"); + +// https://llvm.org/PR105169 +// Verify that we won't accidently specialize std::less and std::equal_to in a +// wrong way. +// C++17 [namespace.std]/2, C++20/23 [namespace.std]/5: +// A program may explicitly instantiate a template defined in the standard +// library only if the declaration +// - depends on the name of a user-defined type and +// - the instantiation meets the standard library requirements for the +// original template. +template constexpr bool CheckStdCmpRequirements() { + // std::less and std::equal_to are literal, default constructible, and + // copyable classes. + Fn f1{}; + auto f2 = f1; + auto f3 = std::move(f2); + f2 = f3; + f2 = std::move(f3); + + // Properties held on all known implementations, although not guaranteed by + // the standard. + static_assert(std::is_empty_v); + static_assert(std::is_trivially_default_constructible_v); + static_assert(std::is_trivially_copyable_v); + + return true; +} + +static_assert(CheckStdCmpRequirements>(), + "same as the original template"); +static_assert(CheckStdCmpRequirements>(), + "same as the original template"); diff --git a/llvm/unittests/DebugInfo/LogicalView/DWARFReaderTest.cpp b/llvm/unittests/DebugInfo/LogicalView/DWARFReaderTest.cpp index 78dc8502e9676..fb728c8c22e77 100644 --- a/llvm/unittests/DebugInfo/LogicalView/DWARFReaderTest.cpp +++ b/llvm/unittests/DebugInfo/LogicalView/DWARFReaderTest.cpp @@ -163,13 +163,12 @@ void checkUnspecifiedParameters(LVReader *Reader) { LVPublicNames::const_iterator IterNames = PublicNames.cbegin(); LVScope *Function = (*IterNames).first; EXPECT_EQ(Function->getName(), "foo_printf"); - const LVElements *Elements = Function->getChildren(); - ASSERT_NE(Elements, nullptr); + const LVElementsView Elements = Function->getChildren(); // foo_printf is a variadic function whose prototype is // `int foo_printf(const char *, ...)`, where the '...' is represented by a // DW_TAG_unspecified_parameters, i.e. we expect to find at least one child // for which getIsUnspecified() returns true. - EXPECT_TRUE(llvm::any_of(*Elements, [](const LVElement *elt) { + EXPECT_TRUE(llvm::any_of(Elements, [](const LVElement *elt) { return elt->getIsSymbol() && static_cast(elt)->getIsUnspecified(); })); @@ -183,8 +182,8 @@ void checkScopeModule(LVReader *Reader) { EXPECT_EQ(Root->getFileFormatName(), "Mach-O 64-bit x86-64"); EXPECT_EQ(Root->getName(), DwarfClangModule); - ASSERT_NE(CompileUnit->getChildren(), nullptr); - LVElement *FirstChild = *(CompileUnit->getChildren()->begin()); + LVElement *FirstChild = *(CompileUnit->getChildren().begin()); + ASSERT_NE(FirstChild, nullptr); EXPECT_EQ(FirstChild->getIsScope(), 1); LVScopeModule *Module = static_cast(FirstChild); EXPECT_EQ(Module->getIsModule(), 1); diff --git a/llvm/unittests/Frontend/CMakeLists.txt b/llvm/unittests/Frontend/CMakeLists.txt index 836a844b710db..1ce34e77cb348 100644 --- a/llvm/unittests/Frontend/CMakeLists.txt +++ b/llvm/unittests/Frontend/CMakeLists.txt @@ -1,5 +1,6 @@ set(LLVM_LINK_COMPONENTS Analysis + BinaryFormat Core FrontendHLSL FrontendOffloading diff --git a/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp b/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp index 1eb03f16527ec..451c376219c38 100644 --- a/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp +++ b/llvm/unittests/Frontend/HLSLRootSignatureDumpTest.cpp @@ -266,7 +266,8 @@ TEST(HLSLRootSignatureTest, DefaultStaticSamplerDump) { "minLOD = 0.000000e+00, " "maxLOD = 3.402823e+38, " "space = 0, " - "visibility = All" + "visibility = All, " + "flags = None" ")"; EXPECT_EQ(Out, Expected); } @@ -287,6 +288,7 @@ TEST(HLSLRootSignatureTest, DefinedStaticSamplerDump) { Sampler.MaxLOD = 32.0f; Sampler.Space = 7; Sampler.Visibility = llvm::dxbc::ShaderVisibility::Domain; + Sampler.Flags = llvm::dxbc::StaticSamplerFlags::NonNormalizedCoordinates; std::string Out; llvm::raw_string_ostream OS(Out); @@ -305,7 +307,8 @@ TEST(HLSLRootSignatureTest, DefinedStaticSamplerDump) { "minLOD = 1.000000e+00, " "maxLOD = 3.200000e+01, " "space = 7, " - "visibility = Domain" + "visibility = Domain, " + "flags = NonNormalizedCoordinates" ")"; EXPECT_EQ(Out, Expected); } diff --git a/llvm/unittests/IR/DataLayoutTest.cpp b/llvm/unittests/IR/DataLayoutTest.cpp index e0c0f35847f07..9ca88141ca0eb 100644 --- a/llvm/unittests/IR/DataLayoutTest.cpp +++ b/llvm/unittests/IR/DataLayoutTest.cpp @@ -320,7 +320,8 @@ TEST(DataLayout, ParsePointerSpec) { "\"p[]::[:[:]]\"")); // address space - for (StringRef Str : {"p0x0:32:32", "px:32:32:32", "p16777216:32:32:32:32"}) + for (StringRef Str : + {"p0x0:32:32", "p10_000:32:32:32", "p16777216:32:32:32:32"}) EXPECT_THAT_EXPECTED( DataLayout::parse(Str), FailedWithMessage("address space must be a 24-bit integer")); @@ -401,6 +402,26 @@ TEST(DataLayout, ParsePointerSpec) { EXPECT_THAT_EXPECTED( DataLayout::parse(Str), FailedWithMessage("index size cannot be larger than the pointer size")); + + // Only 'e', 'u', and 'n' flags are valid. + EXPECT_THAT_EXPECTED( + DataLayout::parse("pa:32:32"), + FailedWithMessage("'a' is not a valid pointer specification flag")); + EXPECT_THAT_EXPECTED( + DataLayout::parse("puX:32:32"), + FailedWithMessage("'X' is not a valid pointer specification flag")); + // Flags must be before the address space number. + EXPECT_THAT_EXPECTED( + DataLayout::parse("p2n:32:32"), + FailedWithMessage("address space must be a 24-bit integer")); + + // AS0 cannot be non-integral. + for (StringRef Str : {"pe:64:64", "pu:64:64", "pue:64:64", "pe0:64:64", + "pu0:64:64", "peu0:64:64"}) + EXPECT_THAT_EXPECTED( + DataLayout::parse(Str), + FailedWithMessage( + "address space 0 cannot be unstable or have external state")); } TEST(DataLayoutTest, ParseNativeIntegersSpec) { @@ -556,18 +577,127 @@ TEST(DataLayout, GetPointerPrefAlignment) { } TEST(DataLayout, IsNonIntegralAddressSpace) { - DataLayout Default; - EXPECT_THAT(Default.getNonIntegralAddressSpaces(), ::testing::SizeIs(0)); + const DataLayout Default; + EXPECT_THAT(Default.getNonStandardAddressSpaces(), ::testing::SizeIs(0)); EXPECT_FALSE(Default.isNonIntegralAddressSpace(0)); EXPECT_FALSE(Default.isNonIntegralAddressSpace(1)); - DataLayout Custom = cantFail(DataLayout::parse("ni:2:16777215")); - EXPECT_THAT(Custom.getNonIntegralAddressSpaces(), + const DataLayout Custom = cantFail(DataLayout::parse("ni:2:16777215")); + EXPECT_THAT(Custom.getNonStandardAddressSpaces(), ::testing::ElementsAreArray({2U, 16777215U})); EXPECT_FALSE(Custom.isNonIntegralAddressSpace(0)); EXPECT_FALSE(Custom.isNonIntegralAddressSpace(1)); EXPECT_TRUE(Custom.isNonIntegralAddressSpace(2)); + EXPECT_TRUE(Custom.mustNotIntroduceIntToPtr(2)); + EXPECT_TRUE(Custom.mustNotIntroducePtrToInt(2)); EXPECT_TRUE(Custom.isNonIntegralAddressSpace(16777215)); + EXPECT_TRUE(Custom.mustNotIntroduceIntToPtr(16777215)); + EXPECT_TRUE(Custom.mustNotIntroducePtrToInt(16777215)); + + // Pointers are marked as non-integral if the address size != total size + for (const auto *Layout : {"p2:64:64:64:32", "p2:128:64:64:64"}) { + const DataLayout DL = cantFail(DataLayout::parse(Layout)); + EXPECT_TRUE(DL.isNonIntegralAddressSpace(2)); + EXPECT_FALSE(DL.hasUnstableRepresentation(2)); + EXPECT_FALSE(DL.hasExternalState(2)); + EXPECT_FALSE(DL.mustNotIntroduceIntToPtr(2)); + EXPECT_FALSE(DL.mustNotIntroducePtrToInt(2)); + EXPECT_THAT(DL.getNonStandardAddressSpaces(), + ::testing::ElementsAreArray({2U})); + } + // Pointers can be marked as unstable using 'pu' + for (const auto *Layout : {"pu2:64:64:64:64", "pu2:64:64:64:32"}) { + const DataLayout DL = cantFail(DataLayout::parse(Layout)); + // Note: isNonIntegralAddressSpace returns true for even with index == + EXPECT_TRUE(DL.isNonIntegralAddressSpace(2)); + EXPECT_TRUE(DL.hasUnstableRepresentation(2)); + EXPECT_FALSE(DL.hasExternalState(2)); + EXPECT_TRUE(DL.mustNotIntroducePtrToInt(2)); + EXPECT_TRUE(DL.mustNotIntroduceIntToPtr(2)); + EXPECT_THAT(DL.getNonStandardAddressSpaces(), + ::testing::ElementsAreArray({2U})); + } + + // Non-integral pointers with external state ('e' flag). + for (const auto *Layout : {"pe2:64:64:64:32", "pe2:64:64:64:64"}) { + const DataLayout DL = cantFail(DataLayout::parse(Layout)); + EXPECT_TRUE(DL.isNonIntegralAddressSpace(2)); + EXPECT_TRUE(DL.hasExternalState(2)); + EXPECT_TRUE(DL.mustNotIntroduceIntToPtr(2)); + EXPECT_FALSE(DL.mustNotIntroducePtrToInt(2)); + EXPECT_FALSE(DL.hasUnstableRepresentation(2)); + EXPECT_THAT(DL.getNonStandardAddressSpaces(), + ::testing::ElementsAreArray({2U})); + } + + // It is also possible to have both unstable representation and external state + for (const auto *Layout : {"peu2:64:64:64:32", "pue2:128:64:64:64"}) { + const DataLayout DL = cantFail(DataLayout::parse(Layout)); + EXPECT_TRUE(DL.isNonIntegralAddressSpace(2)); + EXPECT_TRUE(DL.hasExternalState(2)); + EXPECT_TRUE(Custom.mustNotIntroduceIntToPtr(2)); + EXPECT_TRUE(Custom.mustNotIntroducePtrToInt(2)); + EXPECT_TRUE(DL.hasUnstableRepresentation(2)); + EXPECT_THAT(DL.getNonStandardAddressSpaces(), + ::testing::ElementsAreArray({2U})); + } + + // For backwards compatibility, the ni DataLayout part overrides any + // p[e][u]. + for (const auto *Layout : + {"ni:2-p2:64:64:64:32", "ni:2-pu2:64:64:64:32", "ni:2-pu2:64:64:64:32", + "p2:64:64:64:32-ni:2", "pu2:64:64:64:32-ni:2", "pe2:64:64:64:32-ni:2", + "peeee2:64:64:64:32-pu2:64:64:64:32-ni:2"}) { + DataLayout DL = cantFail(DataLayout::parse(Layout)); + EXPECT_TRUE(DL.isNonIntegralAddressSpace(2)); + EXPECT_TRUE(DL.hasUnstableRepresentation(2)); + // The external state property is new and not expected for existing uses of + // non-integral pointers, so existing :ni data layouts should not set it. + EXPECT_FALSE(DL.hasExternalState(2)); + EXPECT_THAT(DL.getNonStandardAddressSpaces(), + ::testing::ElementsAreArray({2U})); + } +} + +TEST(DataLayout, NonIntegralHelpers) { + DataLayout DL = cantFail(DataLayout::parse( + "p1:128:128:128:64-pu2:32:32:32:32-pu3:64:64:64:32-pe4:64:64:64:32")); + EXPECT_THAT(DL.getNonStandardAddressSpaces(), + ::testing::ElementsAreArray({1u, 2u, 3u, 4u})); + struct Result { + unsigned Addrspace; + bool NonIntegral; + bool Unstable; + bool ExternalState; + unsigned Size; + } ExpectedResults[] = { + {0, false, false, false, 64}, {1, true, false, false, 128}, + {2, true, true, false, 32}, {3, true, true, false, 64}, + {4, true, false, true, 64}, + }; + LLVMContext Ctx; + for (const auto &Exp : ExpectedResults) { + EXPECT_EQ(Exp.NonIntegral, DL.isNonIntegralAddressSpace(Exp.Addrspace)); + EXPECT_EQ(Exp.Unstable, DL.hasUnstableRepresentation(Exp.Addrspace)); + EXPECT_EQ(Exp.ExternalState, DL.hasExternalState(Exp.Addrspace)); + bool AvoidIntToPtr = Exp.Unstable || Exp.ExternalState; + EXPECT_EQ(AvoidIntToPtr, DL.mustNotIntroduceIntToPtr(Exp.Addrspace)); + bool AvoidPtrToInt = Exp.Unstable; + EXPECT_EQ(AvoidPtrToInt, DL.mustNotIntroducePtrToInt(Exp.Addrspace)); + Type *PtrTy = PointerType::get(Ctx, Exp.Addrspace); + Type *PtrVecTy = VectorType::get(PtrTy, 2, /*Scalable=*/false); + Type *ScalablePtrVecTy = VectorType::get(PtrTy, 1, /*Scalable=*/true); + for (Type *Ty : {PtrTy, PtrVecTy, ScalablePtrVecTy}) { + EXPECT_EQ(AvoidPtrToInt, DL.mustNotIntroducePtrToInt(Ty)); + EXPECT_EQ(AvoidIntToPtr, DL.mustNotIntroduceIntToPtr(Ty)); + // The old API should return true for both unstable and non-integral. + EXPECT_EQ(Exp.Unstable || Exp.NonIntegral, + DL.isNonIntegralPointerType(Ty)); + } + // Both helpers gracefully handle non-pointer, non-vector-of-pointers: + EXPECT_FALSE(DL.mustNotIntroducePtrToInt(IntegerType::getInt1Ty(Ctx))); + EXPECT_FALSE(DL.mustNotIntroduceIntToPtr(IntegerType::getInt1Ty(Ctx))); + } } TEST(DataLayoutTest, CopyAssignmentInvalidatesStructLayout) { diff --git a/llvm/unittests/IR/IntrinsicsTest.cpp b/llvm/unittests/IR/IntrinsicsTest.cpp index 49af83609d98c..cfd99ed542162 100644 --- a/llvm/unittests/IR/IntrinsicsTest.cpp +++ b/llvm/unittests/IR/IntrinsicsTest.cpp @@ -189,4 +189,12 @@ TEST_F(IntrinsicsTest, InstrProfInheritance) { } } +// Check that getFnAttributes for intrinsics that do not have any function +// attributes correcty returns an empty set. +TEST(IntrinsicAttributes, TestGetFnAttributesBug) { + using namespace Intrinsic; + LLVMContext Context; + AttributeSet AS = getFnAttributes(Context, experimental_guard); + EXPECT_FALSE(AS.hasAttributes()); +} } // end namespace diff --git a/llvm/unittests/MC/StringTableBuilderTest.cpp b/llvm/unittests/MC/StringTableBuilderTest.cpp index 05f469a229bf9..44a985be6cfcb 100644 --- a/llvm/unittests/MC/StringTableBuilderTest.cpp +++ b/llvm/unittests/MC/StringTableBuilderTest.cpp @@ -58,8 +58,8 @@ TEST(StringTableBuilderTest, BasicWinCOFF) { std::string Expected; - ExpectedSize = support::endian::byte_swap( - ExpectedSize); + ExpectedSize = support::endian::byte_swap(ExpectedSize, + llvm::endianness::little); Expected.append((const char*)&ExpectedSize, 4); Expected += "pygmy hippopotamus"; Expected += '\x00'; diff --git a/llvm/unittests/Object/DXContainerTest.cpp b/llvm/unittests/Object/DXContainerTest.cpp index 396d060a75bfd..d6f7b26b99cd7 100644 --- a/llvm/unittests/Object/DXContainerTest.cpp +++ b/llvm/unittests/Object/DXContainerTest.cpp @@ -1200,4 +1200,52 @@ TEST(RootSignature, ParseStaticSamplers) { ASSERT_EQ(Sampler.RegisterSpace, 32u); ASSERT_EQ(Sampler.ShaderVisibility, 7u); } + { + // this is testing static sampler parsing for root signature version 1.2, + // it changes: the version number, the size of root signature being emitted + // and the values for flag fields. + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x90, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x4c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0xa4, 0x70, 0x9d, 0x3f, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x85, 0xeb, 0x91, 0x40, 0x66, 0x66, 0x0e, 0x41, + 0x1f, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00}; + DXContainer C = + llvm::cantFail(DXContainer::create(getMemoryBuffer<148>(Buffer))); + + auto MaybeRS = C.getRootSignature(); + ASSERT_TRUE(MaybeRS.has_value()); + const auto &RS = MaybeRS.value(); + ASSERT_EQ(RS.getVersion(), 3U); + ASSERT_EQ(RS.getNumParameters(), 0U); + ASSERT_EQ(RS.getRootParametersOffset(), 0U); + ASSERT_EQ(RS.getNumStaticSamplers(), 1U); + ASSERT_EQ(RS.getStaticSamplersOffset(), 24U); + ASSERT_EQ(RS.getFlags(), 17U); + + auto Sampler = *RS.samplers().begin(); + + ASSERT_EQ(Sampler.Filter, 10U); + ASSERT_EQ(Sampler.AddressU, 1U); + ASSERT_EQ(Sampler.AddressV, 2U); + ASSERT_EQ(Sampler.AddressW, 5U); + ASSERT_FLOAT_EQ(Sampler.MipLODBias, 1.23F); + ASSERT_EQ(Sampler.MaxAnisotropy, 20U); + ASSERT_EQ(Sampler.ComparisonFunc, 4U); + ASSERT_EQ(Sampler.BorderColor, 0U); + ASSERT_FLOAT_EQ(Sampler.MinLOD, 4.56F); + ASSERT_FLOAT_EQ(Sampler.MaxLOD, 8.9F); + ASSERT_EQ(Sampler.ShaderRegister, 31U); + ASSERT_EQ(Sampler.RegisterSpace, 32U); + ASSERT_EQ(Sampler.ShaderVisibility, 7U); + ASSERT_EQ(Sampler.Flags, 1U); + } } diff --git a/llvm/unittests/Object/ELFTest.cpp b/llvm/unittests/Object/ELFTest.cpp index faf855c09cfe8..7c68ab5c8985f 100644 --- a/llvm/unittests/Object/ELFTest.cpp +++ b/llvm/unittests/Object/ELFTest.cpp @@ -7,6 +7,10 @@ //===----------------------------------------------------------------------===// #include "llvm/Object/ELF.h" +#include "llvm/Object/ELFObjectFile.h" +#include "llvm/ObjectYAML/yaml2obj.h" +#include "llvm/Support/Error.h" +#include "llvm/Support/YAMLTraits.h" #include "llvm/Testing/Support/Error.h" #include "gtest/gtest.h" @@ -310,3 +314,71 @@ TEST(ELFTest, Hash) { // presuming 32-bit long. Thus make sure that extra bit doesn't appear. EXPECT_EQ(hashSysV("ZZZZZW9p"), 0U); } + +template +static Expected> toBinary(SmallVectorImpl &Storage, + StringRef Yaml) { + raw_svector_ostream OS(Storage); + yaml::Input YIn(Yaml); + if (!yaml::convertYAML(YIn, OS, [](const Twine &Msg) {})) + return createStringError(std::errc::invalid_argument, + "unable to convert YAML"); + return ELFObjectFile::create(MemoryBufferRef(OS.str(), "dummyELF")); +} + +TEST(ELFObjectFileTest, ELFNoteIteratorOverflow) { + using Elf_Shdr_Range = ELFFile::Elf_Shdr_Range; + using Elf_Phdr_Range = ELFFile::Elf_Phdr_Range; + + SmallString<0> Storage; + Expected> ElfOrErr = toBinary(Storage, R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +ProgramHeaders: + - Type: PT_NOTE + FileSize: 0xffffffffffffff88 + FirstSec: .note.gnu.build-id + LastSec: .note.gnu.build-id +Sections: + - Name: .note.gnu.build-id + Type: SHT_NOTE + AddressAlign: 0x04 + ShOffset: 0xffffffffffffff88 + Notes: + - Name: "GNU" + Desc: "abb50d82b6bdc861" + Type: 3 +)"); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + ELFFile Obj = ElfOrErr.get().getELFFile(); + + auto CheckOverflow = [&](auto &&PhdrOrShdr, uint64_t Offset, uint64_t Size) { + Error Err = Error::success(); + Obj.notes(PhdrOrShdr, Err); + + std::string ErrMessage; + handleAllErrors(std::move(Err), [&](const ErrorInfoBase &EI) { + ErrMessage = EI.message(); + }); + + EXPECT_EQ(ErrMessage, ("invalid offset (0x" + Twine::utohexstr(Offset) + + ") or size (0x" + Twine::utohexstr(Size) + ")") + .str()); + }; + + Expected PhdrsOrErr = Obj.program_headers(); + EXPECT_FALSE(!PhdrsOrErr); + for (Elf_Phdr_Impl P : *PhdrsOrErr) + if (P.p_type == ELF::PT_NOTE) + CheckOverflow(P, P.p_offset, P.p_filesz); + + Expected ShdrsOrErr = Obj.sections(); + EXPECT_FALSE(!ShdrsOrErr); + for (Elf_Shdr_Impl S : *ShdrsOrErr) + if (S.sh_type == ELF::SHT_NOTE) + CheckOverflow(S, S.sh_offset, S.sh_size); +} diff --git a/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp b/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp index b0ad208625436..1b21fe01dfca9 100644 --- a/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp +++ b/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp @@ -526,3 +526,54 @@ TEST(RootSignature, ParseStaticSamplers) { EXPECT_EQ(Storage.size(), 144u); EXPECT_TRUE(memcmp(Buffer, Storage.data(), 144u) == 0); } + +TEST(RootSignature, ParseStaticSamplersV13) { + SmallString<128> Storage; + + // First read a fully explicit yaml with all sizes and offsets provided + ASSERT_TRUE(convert(Storage, R"(--- !dxcontainer +Header: + Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ] + Version: + Major: 1 + Minor: 0 + PartCount: 1 + PartOffsets: [ 60 ] +Parts: + - Name: RTS0 + Size: 76 + RootSignature: + Version: 3 + NumRootParameters: 0 + RootParametersOffset: 24 + NumStaticSamplers: 1 + StaticSamplersOffset: 24 + Parameters: [] + Samplers: + - ShaderRegister: 31 + RegisterSpace: 32 + ShaderVisibility: All + SAMPLER_FLAG_UINT_BORDER_COLOR: true + AllowInputAssemblerInputLayout: true + DenyGeometryShaderRootAccess: true + )")); + + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x90, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x4c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x7f, + 0x1f, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00}; + + EXPECT_EQ(Storage.size(), 148U); + EXPECT_TRUE(memcmp(Buffer, Storage.data(), 148U) == 0); +} diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp index abe36bc759658..6ea951eee920b 100644 --- a/llvm/unittests/ProfileData/MemProfTest.cpp +++ b/llvm/unittests/ProfileData/MemProfTest.cpp @@ -26,13 +26,14 @@ #include -LLVM_ABI extern llvm::cl::opt MemProfLifetimeAccessDensityColdThreshold; -LLVM_ABI extern llvm::cl::opt MemProfAveLifetimeColdThreshold; -LLVM_ABI extern llvm::cl::opt +namespace llvm { + +LLVM_ABI extern cl::opt MemProfLifetimeAccessDensityColdThreshold; +LLVM_ABI extern cl::opt MemProfAveLifetimeColdThreshold; +LLVM_ABI extern cl::opt MemProfMinAveLifetimeAccessDensityHotThreshold; -LLVM_ABI extern llvm::cl::opt MemProfUseHotHints; +LLVM_ABI extern cl::opt MemProfUseHotHints; -namespace llvm { namespace memprof { namespace { diff --git a/llvm/unittests/Remarks/YAMLRemarksSerializerTest.cpp b/llvm/unittests/Remarks/YAMLRemarksSerializerTest.cpp index 112cd92285685..974356d9cf30a 100644 --- a/llvm/unittests/Remarks/YAMLRemarksSerializerTest.cpp +++ b/llvm/unittests/Remarks/YAMLRemarksSerializerTest.cpp @@ -163,3 +163,33 @@ TEST(YAMLRemarks, SerializerRemarkParsedStrTabStandaloneNoStrTab) { "...\n"), std::move(PreFilledStrTab)); } + +TEST(YAMLRemarks, SerializerRemarkStringRefOOBRead) { + remarks::Remark R; + R.RemarkType = remarks::Type::Missed; + R.PassName = StringRef("passAAAA", 4); + R.RemarkName = StringRef("nameAAAA", 4); + R.FunctionName = StringRef("funcAAAA", 4); + R.Loc = remarks::RemarkLocation{StringRef("pathAAAA", 4), 3, 4}; + R.Hotness = 5; + R.Args.emplace_back(); + R.Args.back().Key = StringRef("keyAAAA", 3); + R.Args.back().Val = StringRef("valueAAAA", 5); + R.Args.emplace_back(); + R.Args.back().Key = StringRef("keydebugAAAA", 8); + R.Args.back().Val = StringRef("valuedebugAAAA", 10); + R.Args.back().Loc = + remarks::RemarkLocation{StringRef("argpathAAAA", 7), 6, 7}; + checkStandalone(remarks::Format::YAML, R, + "--- !Missed\n" + "Pass: pass\n" + "Name: name\n" + "DebugLoc: { File: path, Line: 3, Column: 4 }\n" + "Function: func\n" + "Hotness: 5\n" + "Args:\n" + " - key: value\n" + " - keydebug: valuedebug\n" + " DebugLoc: { File: argpath, Line: 6, Column: 7 }\n" + "...\n"); +} diff --git a/llvm/unittests/Support/AlignmentTest.cpp b/llvm/unittests/Support/AlignmentTest.cpp index 7b771977027b4..3a4416128d0e5 100644 --- a/llvm/unittests/Support/AlignmentTest.cpp +++ b/llvm/unittests/Support/AlignmentTest.cpp @@ -44,6 +44,16 @@ TEST(AlignmentTest, AlignConstexprConstant) { EXPECT_EQ(Align(alignof(uint64_t)), kConstantAlign); } +TEST(AlignmentTest, ConstexprAssign) { + constexpr auto assignAndGet = []() constexpr { + Align A = Align::Constant<8>(); + Align B = Align::Constant<16>(); + A = B; + return A.value(); + }; + static_assert(assignAndGet() == 16); +} + std::vector getValidAlignments() { std::vector Out; for (size_t Shift = 0; Shift < 64; ++Shift) diff --git a/llvm/unittests/Support/CommandLineTest.cpp b/llvm/unittests/Support/CommandLineTest.cpp index 88e6445190b59..7f538f155be15 100644 --- a/llvm/unittests/Support/CommandLineTest.cpp +++ b/llvm/unittests/Support/CommandLineTest.cpp @@ -2117,6 +2117,22 @@ TEST(CommandLineTest, ConsumeAfterTwoPositionals) { EXPECT_TRUE(Errs.empty()); } +TEST(CommandLineTest, ConsumeOptionalString) { + cl::ResetCommandLineParser(); + + StackOption, cl::opt>> + Input("input"); + + const char *Args[] = {"prog", "--input=\"value\""}; + + std::string Errs; + raw_string_ostream OS(Errs); + ASSERT_TRUE(cl::ParseCommandLineOptions(2, Args, StringRef(), &OS)); + ASSERT_TRUE(Input.has_value()); + EXPECT_EQ("\"value\"", *Input); + EXPECT_TRUE(Errs.empty()); +} + TEST(CommandLineTest, ResetAllOptionOccurrences) { cl::ResetCommandLineParser(); diff --git a/llvm/unittests/Support/EndianTest.cpp b/llvm/unittests/Support/EndianTest.cpp index c48b7707b7751..0ee631db74ac1 100644 --- a/llvm/unittests/Support/EndianTest.cpp +++ b/llvm/unittests/Support/EndianTest.cpp @@ -24,16 +24,15 @@ TEST(Endian, Read) { unsigned char littleval[] = {0x00, 0x04, 0x03, 0x02, 0x01}; int32_t BigAsHost = 0x00010203; EXPECT_EQ(BigAsHost, - (endian::read(bigval))); + (endian::read(bigval, llvm::endianness::big))); int32_t LittleAsHost = 0x02030400; - EXPECT_EQ( - LittleAsHost, - (endian::read(littleval))); + EXPECT_EQ(LittleAsHost, (endian::read( + littleval, llvm::endianness::little))); EXPECT_EQ( - (endian::read(bigval + 1)), - (endian::read(littleval + - 1))); + (endian::read(bigval + 1, llvm::endianness::big)), + (endian::read(littleval + 1, + llvm::endianness::little))); } TEST(Endian, WriteNext) { diff --git a/llvm/unittests/Support/FileCollectorTest.cpp b/llvm/unittests/Support/FileCollectorTest.cpp index 184d0e3fdfd17..0ece86947b4f2 100644 --- a/llvm/unittests/Support/FileCollectorTest.cpp +++ b/llvm/unittests/Support/FileCollectorTest.cpp @@ -43,7 +43,8 @@ class TestingFileCollector : public FileCollector { TEST(FileCollectorTest, addFile) { TempDir root("add_file_root", /*Unique*/ true); std::string root_fs(root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addFile("/path/to/a"); FileCollector.addFile("/path/to/b"); @@ -77,7 +78,8 @@ TEST(FileCollectorTest, addDirectory) { TempFile c(ccc.str()); std::string root_fs(file_root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addDirectory(file_root.path()); @@ -105,7 +107,8 @@ TEST(FileCollectorTest, copyFiles) { // Create file collector and add files. TempDir root("copy_files_root", /*Unique*/ true); std::string root_fs(root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addFile(a.path()); FileCollector.addFile(b.path()); FileCollector.addFile(c.path()); @@ -133,7 +136,8 @@ TEST(FileCollectorTest, recordAndConstructDirectory) { // Create file collector and add files. TempDir root("copy_files_root", /*Unique*/ true); std::string root_fs(root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addFile(a.path()); // The empty directory isn't seen until we add it. @@ -169,7 +173,8 @@ TEST(FileCollectorTest, recordVFSAccesses) { // Create file collector and add files. TempDir root("copy_files_root", /*Unique*/ true); std::string root_fs(root.path()); - auto Collector = std::make_shared(root_fs, root_fs); + auto Collector = std::make_shared( + root_fs, root_fs, vfs::getRealFileSystem()); auto VFS = FileCollector::createCollectorVFS(vfs::getRealFileSystem(), Collector); VFS->status(a.path()); @@ -216,7 +221,8 @@ TEST(FileCollectorTest, Symlinks) { // Root where files are copied to. TempDir reproducer_root("reproducer_root", /*Unique*/ true); std::string root_fs(reproducer_root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); // Add all the files to the collector. FileCollector.addFile(a.path()); @@ -264,7 +270,8 @@ TEST(FileCollectorTest, recordVFSSymlinkAccesses) { // Create file collector and add files. TempDir root("copy_files_root", true); std::string root_fs(root.path()); - auto Collector = std::make_shared(root_fs, root_fs); + auto Collector = std::make_shared( + root_fs, root_fs, vfs::getRealFileSystem()); auto VFS = FileCollector::createCollectorVFS(vfs::getRealFileSystem(), Collector); SmallString<256> Output; diff --git a/llvm/unittests/Support/MustacheTest.cpp b/llvm/unittests/Support/MustacheTest.cpp index ddc9efc035e17..e2c4422f32fd1 100644 --- a/llvm/unittests/Support/MustacheTest.cpp +++ b/llvm/unittests/Support/MustacheTest.cpp @@ -22,7 +22,7 @@ using namespace llvm::json; TEST(MustacheInterpolation, NoInterpolation) { // Mustache-free templates should render as-is. Value D = {}; - auto T = Template("Hello from {Mustache}!\n"); + Template T("Hello from {Mustache}!\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -32,7 +32,7 @@ TEST(MustacheInterpolation, NoInterpolation) { TEST(MustacheInterpolation, BasicInterpolation) { // Unadorned tags should interpolate content into the template. Value D = Object{{"subject", "World"}}; - auto T = Template("Hello, {{subject}}!"); + Template T("Hello, {{subject}}!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -42,7 +42,7 @@ TEST(MustacheInterpolation, BasicInterpolation) { TEST(MustacheInterpolation, NoReinterpolation) { // Interpolated tag output should not be re-interpolated. Value D = Object{{"template", "{{planet}}"}, {"planet", "Earth"}}; - auto T = Template("{{template}}: {{planet}}"); + Template T("{{template}}: {{planet}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -54,7 +54,7 @@ TEST(MustacheInterpolation, HTMLEscaping) { Value D = Object{ {"forbidden", "& \" < >"}, }; - auto T = Template("These characters should be HTML escaped: {{forbidden}}\n"); + Template T("These characters should be HTML escaped: {{forbidden}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -67,8 +67,7 @@ TEST(MustacheInterpolation, Ampersand) { Value D = Object{ {"forbidden", "& \" < >"}, }; - auto T = - Template("These characters should not be HTML escaped: {{&forbidden}}\n"); + Template T("These characters should not be HTML escaped: {{&forbidden}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -78,7 +77,7 @@ TEST(MustacheInterpolation, Ampersand) { TEST(MustacheInterpolation, BasicIntegerInterpolation) { // Integers should interpolate seamlessly. Value D = Object{{"mph", 85}}; - auto T = Template("{{mph}} miles an hour!"); + Template T("{{mph}} miles an hour!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -88,7 +87,7 @@ TEST(MustacheInterpolation, BasicIntegerInterpolation) { TEST(MustacheInterpolation, AmpersandIntegerInterpolation) { // Integers should interpolate seamlessly. Value D = Object{{"mph", 85}}; - auto T = Template("{{&mph}} miles an hour!"); + Template T("{{&mph}} miles an hour!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -98,7 +97,7 @@ TEST(MustacheInterpolation, AmpersandIntegerInterpolation) { TEST(MustacheInterpolation, BasicDecimalInterpolation) { // Decimals should interpolate seamlessly with proper significance. Value D = Object{{"power", 1.21}}; - auto T = Template("{{power}} jiggawatts!"); + Template T("{{power}} jiggawatts!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -108,7 +107,7 @@ TEST(MustacheInterpolation, BasicDecimalInterpolation) { TEST(MustacheInterpolation, BasicNullInterpolation) { // Nulls should interpolate as the empty string. Value D = Object{{"cannot", nullptr}}; - auto T = Template("I ({{cannot}}) be seen!"); + Template T("I ({{cannot}}) be seen!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -118,7 +117,7 @@ TEST(MustacheInterpolation, BasicNullInterpolation) { TEST(MustacheInterpolation, AmpersandNullInterpolation) { // Nulls should interpolate as the empty string. Value D = Object{{"cannot", nullptr}}; - auto T = Template("I ({{&cannot}}) be seen!"); + Template T("I ({{&cannot}}) be seen!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -128,7 +127,7 @@ TEST(MustacheInterpolation, AmpersandNullInterpolation) { TEST(MustacheInterpolation, BasicContextMissInterpolation) { // Failed context lookups should default to empty strings. Value D = Object{}; - auto T = Template("I ({{cannot}}) be seen!"); + Template T("I ({{cannot}}) be seen!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -138,7 +137,7 @@ TEST(MustacheInterpolation, BasicContextMissInterpolation) { TEST(MustacheInterpolation, DottedNamesBasicInterpolation) { // Dotted names should be considered a form of shorthand for sections. Value D = Object{{"person", Object{{"name", "Joe"}}}}; - auto T = Template("{{person.name}} == {{#person}}{{name}}{{/person}}"); + Template T("{{person.name}} == {{#person}}{{name}}{{/person}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -148,7 +147,7 @@ TEST(MustacheInterpolation, DottedNamesBasicInterpolation) { TEST(MustacheInterpolation, DottedNamesAmpersandInterpolation) { // Dotted names should be considered a form of shorthand for sections. Value D = Object{{"person", Object{{"name", "Joe"}}}}; - auto T = Template("{{&person.name}} == {{#person}}{{&name}}{{/person}}"); + Template T("{{&person.name}} == {{#person}}{{&name}}{{/person}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -163,7 +162,7 @@ TEST(MustacheInterpolation, DottedNamesArbitraryDepth) { Object{{"c", Object{{"d", Object{{"e", Object{{"name", "Phil"}}}}}}}}}}}}; - auto T = Template("{{a.b.c.d.e.name}}"); + Template T("{{a.b.c.d.e.name}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -173,7 +172,7 @@ TEST(MustacheInterpolation, DottedNamesArbitraryDepth) { TEST(MustacheInterpolation, DottedNamesBrokenChains) { // Any falsey value prior to the last part of the name should yield ''. Value D = Object{{"a", Object{}}}; - auto T = Template("{{a.b.c}} == "); + Template T("{{a.b.c}} == "); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -184,7 +183,7 @@ TEST(MustacheInterpolation, DottedNamesBrokenChainResolution) { // Each part of a dotted name should resolve only against its parent. Value D = Object{{"a", Object{{"b", Object{}}}}, {"c", Object{{"name", "Jim"}}}}; - auto T = Template("{{a.b.c.name}} == "); + Template T("{{a.b.c.name}} == "); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -201,7 +200,7 @@ TEST(MustacheInterpolation, DottedNamesInitialResolution) { Object{{"d", Object{{"e", Object{{"name", "Phil"}}}}}}}}}}}, {"b", Object{{"c", Object{{"d", Object{{"e", Object{{"name", "Wrong"}}}}}}}}}}; - auto T = Template("{{#a}}{{b.c.d.e.name}}{{/a}}"); + Template T("{{#a}}{{b.c.d.e.name}}{{/a}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -212,7 +211,7 @@ TEST(MustacheInterpolation, DottedNamesContextPrecedence) { // Dotted names should be resolved against former resolutions. Value D = Object{{"a", Object{{"b", Object{}}}}, {"b", Object{{"c", "ERROR"}}}}; - auto T = Template("{{#a}}{{b.c}}{{/a}}"); + Template T("{{#a}}{{b.c}}{{/a}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -222,7 +221,7 @@ TEST(MustacheInterpolation, DottedNamesContextPrecedence) { TEST(MustacheInterpolation, DottedNamesAreNotSingleKeys) { // Dotted names shall not be parsed as single, atomic keys Value D = Object{{"a.b", "c"}}; - auto T = Template("{{a.b}}"); + Template T("{{a.b}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -232,7 +231,7 @@ TEST(MustacheInterpolation, DottedNamesAreNotSingleKeys) { TEST(MustacheInterpolation, DottedNamesNoMasking) { // Dotted Names in a given context are unavailable due to dot splitting Value D = Object{{"a.b", "c"}, {"a", Object{{"b", "d"}}}}; - auto T = Template("{{a.b}}"); + Template T("{{a.b}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -242,7 +241,7 @@ TEST(MustacheInterpolation, DottedNamesNoMasking) { TEST(MustacheInterpolation, ImplicitIteratorsBasicInterpolation) { // Unadorned tags should interpolate content into the template. Value D = "world"; - auto T = Template("Hello, {{.}}!\n"); + Template T("Hello, {{.}}!\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -252,7 +251,7 @@ TEST(MustacheInterpolation, ImplicitIteratorsBasicInterpolation) { TEST(MustacheInterpolation, ImplicitIteratorsAmersand) { // Basic interpolation should be HTML escaped. Value D = "& \" < >"; - auto T = Template("These characters should not be HTML escaped: {{&.}}\n"); + Template T("These characters should not be HTML escaped: {{&.}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -262,7 +261,7 @@ TEST(MustacheInterpolation, ImplicitIteratorsAmersand) { TEST(MustacheInterpolation, ImplicitIteratorsInteger) { // Integers should interpolate seamlessly. Value D = 85; - auto T = Template("{{.}} miles an hour!\n"); + Template T("{{.}} miles an hour!\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -272,7 +271,7 @@ TEST(MustacheInterpolation, ImplicitIteratorsInteger) { TEST(MustacheInterpolation, InterpolationSurroundingWhitespace) { // Interpolation should not alter surrounding whitespace. Value D = Object{{"string", "---"}}; - auto T = Template("| {{string}} |"); + Template T("| {{string}} |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -282,7 +281,7 @@ TEST(MustacheInterpolation, InterpolationSurroundingWhitespace) { TEST(MustacheInterpolation, AmersandSurroundingWhitespace) { // Interpolation should not alter surrounding whitespace. Value D = Object{{"string", "---"}}; - auto T = Template("| {{&string}} |"); + Template T("| {{&string}} |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -292,7 +291,7 @@ TEST(MustacheInterpolation, AmersandSurroundingWhitespace) { TEST(MustacheInterpolation, StandaloneInterpolationWithWhitespace) { // Standalone interpolation should not alter surrounding whitespace. Value D = Object{{"string", "---"}}; - auto T = Template(" {{string}}\n"); + Template T(" {{string}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -302,7 +301,7 @@ TEST(MustacheInterpolation, StandaloneInterpolationWithWhitespace) { TEST(MustacheInterpolation, StandaloneAmpersandWithWhitespace) { // Standalone interpolation should not alter surrounding whitespace. Value D = Object{{"string", "---"}}; - auto T = Template(" {{&string}}\n"); + Template T(" {{&string}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -312,7 +311,7 @@ TEST(MustacheInterpolation, StandaloneAmpersandWithWhitespace) { TEST(MustacheInterpolation, InterpolationWithPadding) { // Superfluous in-tag whitespace should be ignored. Value D = Object{{"string", "---"}}; - auto T = Template("|{{ string }}|"); + Template T("|{{ string }}|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -322,7 +321,7 @@ TEST(MustacheInterpolation, InterpolationWithPadding) { TEST(MustacheInterpolation, AmpersandWithPadding) { // Superfluous in-tag whitespace should be ignored. Value D = Object{{"string", "---"}}; - auto T = Template("|{{& string }}|"); + Template T("|{{& string }}|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -332,7 +331,7 @@ TEST(MustacheInterpolation, AmpersandWithPadding) { TEST(MustacheInterpolation, InterpolationWithPaddingAndNewlines) { // Superfluous in-tag whitespace should be ignored. Value D = Object{{"string", "---"}}; - auto T = Template("|{{ string \n\n\n }}|"); + Template T("|{{ string \n\n\n }}|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -341,7 +340,7 @@ TEST(MustacheInterpolation, InterpolationWithPaddingAndNewlines) { TEST(MustacheSections, Truthy) { Value D = Object{{"boolean", true}}; - auto T = Template("{{#boolean}}This should be rendered.{{/boolean}}"); + Template T("{{#boolean}}This should be rendered.{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -350,7 +349,7 @@ TEST(MustacheSections, Truthy) { TEST(MustacheSections, Falsey) { Value D = Object{{"boolean", false}}; - auto T = Template("{{#boolean}}This should not be rendered.{{/boolean}}"); + Template T("{{#boolean}}This should not be rendered.{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -360,7 +359,7 @@ TEST(MustacheSections, Falsey) { TEST(MustacheInterpolation, IsFalseyNull) { // Mustache-free templates should render as-is. Value D = Object{{"boolean", nullptr}}; - auto T = Template("Hello, {{#boolean}}World{{/boolean}}"); + Template T("Hello, {{#boolean}}World{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -370,7 +369,7 @@ TEST(MustacheInterpolation, IsFalseyNull) { TEST(MustacheInterpolation, IsFalseyArray) { // Mustache-free templates should render as-is. Value D = Object{{"boolean", Array()}}; - auto T = Template("Hello, {{#boolean}}World{{/boolean}}"); + Template T("Hello, {{#boolean}}World{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -380,7 +379,7 @@ TEST(MustacheInterpolation, IsFalseyArray) { TEST(MustacheInterpolation, IsFalseyObject) { // Mustache-free templates should render as-is. Value D = Object{{"boolean", Object{}}}; - auto T = Template("Hello, {{#boolean}}World{{/boolean}}"); + Template T("Hello, {{#boolean}}World{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -390,7 +389,7 @@ TEST(MustacheInterpolation, IsFalseyObject) { TEST(MustacheInterpolation, DoubleRendering) { // Mustache-free templates should render as-is. Value D1 = Object{{"subject", "World"}}; - auto T = Template("Hello, {{subject}}!"); + Template T("Hello, {{subject}}!"); std::string Out1; raw_string_ostream OS1(Out1); T.render(D1, OS1); @@ -404,7 +403,7 @@ TEST(MustacheInterpolation, DoubleRendering) { TEST(MustacheSections, NullIsFalsey) { Value D = Object{{"null", nullptr}}; - auto T = Template("{{#null}}This should not be rendered.{{/null}}"); + Template T("{{#null}}This should not be rendered.{{/null}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -413,7 +412,7 @@ TEST(MustacheSections, NullIsFalsey) { TEST(MustacheSections, Context) { Value D = Object{{"context", Object{{"name", "Joe"}}}}; - auto T = Template("{{#context}}Hi {{name}}.{{/context}}"); + Template T("{{#context}}Hi {{name}}.{{/context}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -425,7 +424,7 @@ TEST(MustacheSections, ParentContexts) { {"b", "wrong"}, {"sec", Object{{"b", "bar"}}}, {"c", Object{{"d", "baz"}}}}; - auto T = Template("{{#sec}}{{a}}, {{b}}, {{c.d}}{{/sec}}"); + Template T("{{#sec}}{{a}}, {{b}}, {{c.d}}{{/sec}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -434,7 +433,7 @@ TEST(MustacheSections, ParentContexts) { TEST(MustacheSections, VariableTest) { Value D = Object{{"foo", "bar"}}; - auto T = Template("{{#foo}}{{.}} is {{foo}}{{/foo}}"); + Template T("{{#foo}}{{.}} is {{foo}}{{/foo}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -450,14 +449,14 @@ TEST(MustacheSections, ListContexts) { Array{Object{{"mname", "1"}, {"bottoms", Array{Object{{"bname", "x"}}, Object{{"bname", "y"}}}}}}}}}}}; - auto T = Template("{{#tops}}" - "{{#middles}}" - "{{tname.lower}}{{mname}}." - "{{#bottoms}}" - "{{tname.upper}}{{mname}}{{bname}}." - "{{/bottoms}}" - "{{/middles}}" - "{{/tops}}"); + Template T("{{#tops}}" + "{{#middles}}" + "{{tname.lower}}{{mname}}." + "{{#bottoms}}" + "{{tname.upper}}{{mname}}{{bname}}." + "{{/bottoms}}" + "{{/middles}}" + "{{/tops}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -469,7 +468,7 @@ TEST(MustacheSections, DeeplyNestedContexts) { {"a", Object{{"one", 1}}}, {"b", Object{{"two", 2}}}, {"c", Object{{"three", 3}, {"d", Object{{"four", 4}, {"five", 5}}}}}}; - auto T = Template( + Template T( "{{#a}}\n{{one}}\n{{#b}}\n{{one}}{{two}}{{one}}\n{{#c}}\n{{one}}{{two}}{{" "three}}{{two}}{{one}}\n{{#d}}\n{{one}}{{two}}{{three}}{{four}}{{three}}{" "{two}}{{one}}\n{{#five}}\n{{one}}{{two}}{{three}}{{four}}{{five}}{{four}" @@ -490,7 +489,7 @@ TEST(MustacheSections, DeeplyNestedContexts) { TEST(MustacheSections, List) { Value D = Object{{"list", Array{Object{{"item", 1}}, Object{{"item", 2}}, Object{{"item", 3}}}}}; - auto T = Template("{{#list}}{{item}}{{/list}}"); + Template T("{{#list}}{{item}}{{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -499,7 +498,7 @@ TEST(MustacheSections, List) { TEST(MustacheSections, EmptyList) { Value D = Object{{"list", Array{}}}; - auto T = Template("{{#list}}Yay lists!{{/list}}"); + Template T("{{#list}}Yay lists!{{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -508,8 +507,8 @@ TEST(MustacheSections, EmptyList) { TEST(MustacheSections, Doubled) { Value D = Object{{"bool", true}, {"two", "second"}}; - auto T = Template("{{#bool}}\n* first\n{{/bool}}\n* " - "{{two}}\n{{#bool}}\n* third\n{{/bool}}\n"); + Template T("{{#bool}}\n* first\n{{/bool}}\n* " + "{{two}}\n{{#bool}}\n* third\n{{/bool}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -518,7 +517,7 @@ TEST(MustacheSections, Doubled) { TEST(MustacheSections, NestedTruthy) { Value D = Object{{"bool", true}}; - auto T = Template("| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |"); + Template T("| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -527,7 +526,7 @@ TEST(MustacheSections, NestedTruthy) { TEST(MustacheSections, NestedFalsey) { Value D = Object{{"bool", false}}; - auto T = Template("| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |"); + Template T("| A {{#bool}}B {{#bool}}C{{/bool}} D{{/bool}} E |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -536,7 +535,7 @@ TEST(MustacheSections, NestedFalsey) { TEST(MustacheSections, ContextMisses) { Value D = Object{}; - auto T = Template("[{{#missing}}Found key 'missing'!{{/missing}}]"); + Template T("[{{#missing}}Found key 'missing'!{{/missing}}]"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -545,7 +544,7 @@ TEST(MustacheSections, ContextMisses) { TEST(MustacheSections, ImplicitIteratorString) { Value D = Object{{"list", Array{"a", "b", "c", "d", "e"}}}; - auto T = Template("{{#list}}({{.}}){{/list}}"); + Template T("{{#list}}({{.}}){{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -554,7 +553,7 @@ TEST(MustacheSections, ImplicitIteratorString) { TEST(MustacheSections, ImplicitIteratorInteger) { Value D = Object{{"list", Array{1, 2, 3, 4, 5}}}; - auto T = Template("{{#list}}({{.}}){{/list}}"); + Template T("{{#list}}({{.}}){{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -563,7 +562,7 @@ TEST(MustacheSections, ImplicitIteratorInteger) { TEST(MustacheSections, ImplicitIteratorArray) { Value D = Object{{"list", Array{Array{1, 2, 3}, Array{"a", "b", "c"}}}}; - auto T = Template("{{#list}}({{#.}}{{.}}{{/.}}){{/list}}"); + Template T("{{#list}}({{#.}}{{.}}{{/.}}){{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -572,7 +571,7 @@ TEST(MustacheSections, ImplicitIteratorArray) { TEST(MustacheSections, ImplicitIteratorHTMLEscaping) { Value D = Object{{"list", Array{"&", "\"", "<", ">"}}}; - auto T = Template("{{#list}}({{.}}){{/list}}"); + Template T("{{#list}}({{.}}){{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -581,7 +580,7 @@ TEST(MustacheSections, ImplicitIteratorHTMLEscaping) { TEST(MustacheSections, ImplicitIteratorAmpersand) { Value D = Object{{"list", Array{"&", "\"", "<", ">"}}}; - auto T = Template("{{#list}}({{&.}}){{/list}}"); + Template T("{{#list}}({{&.}}){{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -590,7 +589,7 @@ TEST(MustacheSections, ImplicitIteratorAmpersand) { TEST(MustacheSections, ImplicitIteratorRootLevel) { Value D = Array{Object{{"value", "a"}}, Object{{"value", "b"}}}; - auto T = Template("{{#.}}({{value}}){{/.}}"); + Template T("{{#.}}({{value}}){{/.}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -599,7 +598,7 @@ TEST(MustacheSections, ImplicitIteratorRootLevel) { TEST(MustacheSections, DottedNamesTruthy) { Value D = Object{{"a", Object{{"b", Object{{"c", true}}}}}}; - auto T = Template("{{#a.b.c}}Here{{/a.b.c}} == Here"); + Template T("{{#a.b.c}}Here{{/a.b.c}} == Here"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -608,7 +607,7 @@ TEST(MustacheSections, DottedNamesTruthy) { TEST(MustacheSections, DottedNamesFalsey) { Value D = Object{{"a", Object{{"b", Object{{"c", false}}}}}}; - auto T = Template("{{#a.b.c}}Here{{/a.b.c}} == "); + Template T("{{#a.b.c}}Here{{/a.b.c}} == "); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -617,7 +616,7 @@ TEST(MustacheSections, DottedNamesFalsey) { TEST(MustacheSections, DottedNamesBrokenChains) { Value D = Object{{"a", Object{}}}; - auto T = Template("{{#a.b.c}}Here{{/a.b.c}} == "); + Template T("{{#a.b.c}}Here{{/a.b.c}} == "); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -626,7 +625,7 @@ TEST(MustacheSections, DottedNamesBrokenChains) { TEST(MustacheSections, SurroundingWhitespace) { Value D = Object{{"boolean", true}}; - auto T = Template(" | {{#boolean}}\t|\t{{/boolean}} | \n"); + Template T(" | {{#boolean}}\t|\t{{/boolean}} | \n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -635,8 +634,7 @@ TEST(MustacheSections, SurroundingWhitespace) { TEST(MustacheSections, InternalWhitespace) { Value D = Object{{"boolean", true}}; - auto T = Template( - " | {{#boolean}} {{! Important Whitespace }}\n {{/boolean}} | \n"); + Template T(" | {{#boolean}} {{! Important Whitespace }}\n {{/boolean}} | \n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -645,8 +643,7 @@ TEST(MustacheSections, InternalWhitespace) { TEST(MustacheSections, IndentedInlineSections) { Value D = Object{{"boolean", true}}; - auto T = - Template(" {{#boolean}}YES{{/boolean}}\n {{#boolean}}GOOD{{/boolean}}\n"); + Template T(" {{#boolean}}YES{{/boolean}}\n {{#boolean}}GOOD{{/boolean}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -655,7 +652,7 @@ TEST(MustacheSections, IndentedInlineSections) { TEST(MustacheSections, StandaloneLines) { Value D = Object{{"boolean", true}}; - auto T = Template("| This Is\n{{#boolean}}\n|\n{{/boolean}}\n| A Line\n"); + Template T("| This Is\n{{#boolean}}\n|\n{{/boolean}}\n| A Line\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -664,7 +661,7 @@ TEST(MustacheSections, StandaloneLines) { TEST(MustacheSections, IndentedStandaloneLines) { Value D = Object{{"boolean", true}}; - auto T = Template("| This Is\n {{#boolean}}\n|\n {{/boolean}}\n| A Line\n"); + Template T("| This Is\n {{#boolean}}\n|\n {{/boolean}}\n| A Line\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -673,7 +670,7 @@ TEST(MustacheSections, IndentedStandaloneLines) { TEST(MustacheSections, StandaloneLineEndings) { Value D = Object{{"boolean", true}}; - auto T = Template("|\r\n{{#boolean}}\r\n{{/boolean}}\r\n|"); + Template T("|\r\n{{#boolean}}\r\n{{/boolean}}\r\n|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -682,7 +679,7 @@ TEST(MustacheSections, StandaloneLineEndings) { TEST(MustacheSections, StandaloneWithoutPreviousLine) { Value D = Object{{"boolean", true}}; - auto T = Template(" {{#boolean}}\n#{{/boolean}}\n/"); + Template T(" {{#boolean}}\n#{{/boolean}}\n/"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -691,7 +688,7 @@ TEST(MustacheSections, StandaloneWithoutPreviousLine) { TEST(MustacheSections, StandaloneWithoutNewline) { Value D = Object{{"boolean", true}}; - auto T = Template("#{{#boolean}}\n/\n {{/boolean}}"); + Template T("#{{#boolean}}\n/\n {{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -700,7 +697,7 @@ TEST(MustacheSections, StandaloneWithoutNewline) { TEST(MustacheSections, Padding) { Value D = Object{{"boolean", true}}; - auto T = Template("|{{# boolean }}={{/ boolean }}|"); + Template T("|{{# boolean }}={{/ boolean }}|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -709,7 +706,7 @@ TEST(MustacheSections, Padding) { TEST(MustacheInvertedSections, Falsey) { Value D = Object{{"boolean", false}}; - auto T = Template("{{^boolean}}This should be rendered.{{/boolean}}"); + Template T("{{^boolean}}This should be rendered.{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -718,7 +715,7 @@ TEST(MustacheInvertedSections, Falsey) { TEST(MustacheInvertedSections, Truthy) { Value D = Object{{"boolean", true}}; - auto T = Template("{{^boolean}}This should not be rendered.{{/boolean}}"); + Template T("{{^boolean}}This should not be rendered.{{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -727,7 +724,7 @@ TEST(MustacheInvertedSections, Truthy) { TEST(MustacheInvertedSections, NullIsFalsey) { Value D = Object{{"null", nullptr}}; - auto T = Template("{{^null}}This should be rendered.{{/null}}"); + Template T("{{^null}}This should be rendered.{{/null}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -736,7 +733,7 @@ TEST(MustacheInvertedSections, NullIsFalsey) { TEST(MustacheInvertedSections, Context) { Value D = Object{{"context", Object{{"name", "Joe"}}}}; - auto T = Template("{{^context}}Hi {{name}}.{{/context}}"); + Template T("{{^context}}Hi {{name}}.{{/context}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -746,7 +743,7 @@ TEST(MustacheInvertedSections, Context) { TEST(MustacheInvertedSections, List) { Value D = Object{ {"list", Array{Object{{"n", 1}}, Object{{"n", 2}}, Object{{"n", 3}}}}}; - auto T = Template("{{^list}}{{n}}{{/list}}"); + Template T("{{^list}}{{n}}{{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -755,7 +752,7 @@ TEST(MustacheInvertedSections, List) { TEST(MustacheInvertedSections, EmptyList) { Value D = Object{{"list", Array{}}}; - auto T = Template("{{^list}}Yay lists!{{/list}}"); + Template T("{{^list}}Yay lists!{{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -764,8 +761,8 @@ TEST(MustacheInvertedSections, EmptyList) { TEST(MustacheInvertedSections, Doubled) { Value D = Object{{"bool", false}, {"two", "second"}}; - auto T = Template("{{^bool}}\n* first\n{{/bool}}\n* " - "{{two}}\n{{^bool}}\n* third\n{{/bool}}\n"); + Template T("{{^bool}}\n* first\n{{/bool}}\n* " + "{{two}}\n{{^bool}}\n* third\n{{/bool}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -774,7 +771,7 @@ TEST(MustacheInvertedSections, Doubled) { TEST(MustacheInvertedSections, NestedFalsey) { Value D = Object{{"bool", false}}; - auto T = Template("| A {{^bool}}B {{^bool}}C{{/bool}} D{{/bool}} E |"); + Template T("| A {{^bool}}B {{^bool}}C{{/bool}} D{{/bool}} E |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -783,7 +780,7 @@ TEST(MustacheInvertedSections, NestedFalsey) { TEST(MustacheInvertedSections, NestedTruthy) { Value D = Object{{"bool", true}}; - auto T = Template("| A {{^bool}}B {{^bool}}C{{/bool}} D{{/bool}} E |"); + Template T("| A {{^bool}}B {{^bool}}C{{/bool}} D{{/bool}} E |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -792,7 +789,7 @@ TEST(MustacheInvertedSections, NestedTruthy) { TEST(MustacheInvertedSections, ContextMisses) { Value D = Object{}; - auto T = Template("[{{^missing}}Cannot find key 'missing'!{{/missing}}]"); + Template T("[{{^missing}}Cannot find key 'missing'!{{/missing}}]"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -801,7 +798,7 @@ TEST(MustacheInvertedSections, ContextMisses) { TEST(MustacheInvertedSections, DottedNamesTruthy) { Value D = Object{{"a", Object{{"b", Object{{"c", true}}}}}}; - auto T = Template("{{^a.b.c}}Not Here{{/a.b.c}} == "); + Template T("{{^a.b.c}}Not Here{{/a.b.c}} == "); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -810,7 +807,7 @@ TEST(MustacheInvertedSections, DottedNamesTruthy) { TEST(MustacheInvertedSections, DottedNamesFalsey) { Value D = Object{{"a", Object{{"b", Object{{"c", false}}}}}}; - auto T = Template("{{^a.b.c}}Not Here{{/a.b.c}} == Not Here"); + Template T("{{^a.b.c}}Not Here{{/a.b.c}} == Not Here"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -819,7 +816,7 @@ TEST(MustacheInvertedSections, DottedNamesFalsey) { TEST(MustacheInvertedSections, DottedNamesBrokenChains) { Value D = Object{{"a", Object{}}}; - auto T = Template("{{^a.b.c}}Not Here{{/a.b.c}} == Not Here"); + Template T("{{^a.b.c}}Not Here{{/a.b.c}} == Not Here"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -828,7 +825,7 @@ TEST(MustacheInvertedSections, DottedNamesBrokenChains) { TEST(MustacheInvertedSections, SurroundingWhitespace) { Value D = Object{{"boolean", false}}; - auto T = Template(" | {{^boolean}}\t|\t{{/boolean}} | \n"); + Template T(" | {{^boolean}}\t|\t{{/boolean}} | \n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -837,8 +834,7 @@ TEST(MustacheInvertedSections, SurroundingWhitespace) { TEST(MustacheInvertedSections, InternalWhitespace) { Value D = Object{{"boolean", false}}; - auto T = Template( - " | {{^boolean}} {{! Important Whitespace }}\n {{/boolean}} | \n"); + Template T(" | {{^boolean}} {{! Important Whitespace }}\n {{/boolean}} | \n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -847,8 +843,7 @@ TEST(MustacheInvertedSections, InternalWhitespace) { TEST(MustacheInvertedSections, IndentedInlineSections) { Value D = Object{{"boolean", false}}; - auto T = - Template(" {{^boolean}}NO{{/boolean}}\n {{^boolean}}WAY{{/boolean}}\n"); + Template T(" {{^boolean}}NO{{/boolean}}\n {{^boolean}}WAY{{/boolean}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -857,7 +852,7 @@ TEST(MustacheInvertedSections, IndentedInlineSections) { TEST(MustacheInvertedSections, StandaloneLines) { Value D = Object{{"boolean", false}}; - auto T = Template("| This Is\n{{^boolean}}\n|\n{{/boolean}}\n| A Line\n"); + Template T("| This Is\n{{^boolean}}\n|\n{{/boolean}}\n| A Line\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -866,7 +861,7 @@ TEST(MustacheInvertedSections, StandaloneLines) { TEST(MustacheInvertedSections, StandaloneIndentedLines) { Value D = Object{{"boolean", false}}; - auto T = Template("| This Is\n {{^boolean}}\n|\n {{/boolean}}\n| A Line\n"); + Template T("| This Is\n {{^boolean}}\n|\n {{/boolean}}\n| A Line\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -875,7 +870,7 @@ TEST(MustacheInvertedSections, StandaloneIndentedLines) { TEST(MustacheInvertedSections, StandaloneLineEndings) { Value D = Object{{"boolean", false}}; - auto T = Template("|\r\n{{^boolean}}\r\n{{/boolean}}\r\n|"); + Template T("|\r\n{{^boolean}}\r\n{{/boolean}}\r\n|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -884,7 +879,7 @@ TEST(MustacheInvertedSections, StandaloneLineEndings) { TEST(MustacheInvertedSections, StandaloneWithoutPreviousLine) { Value D = Object{{"boolean", false}}; - auto T = Template(" {{^boolean}}\n^{{/boolean}}\n/"); + Template T(" {{^boolean}}\n^{{/boolean}}\n/"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -893,7 +888,7 @@ TEST(MustacheInvertedSections, StandaloneWithoutPreviousLine) { TEST(MustacheInvertedSections, StandaloneWithoutNewline) { Value D = Object{{"boolean", false}}; - auto T = Template("^{{^boolean}}\n/\n {{/boolean}}"); + Template T("^{{^boolean}}\n/\n {{/boolean}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -902,7 +897,7 @@ TEST(MustacheInvertedSections, StandaloneWithoutNewline) { TEST(MustacheInvertedSections, Padding) { Value D = Object{{"boolean", false}}; - auto T = Template("|{{^ boolean }}={{/ boolean }}|"); + Template T("|{{^ boolean }}={{/ boolean }}|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -911,7 +906,7 @@ TEST(MustacheInvertedSections, Padding) { TEST(MustachePartials, BasicBehavior) { Value D = Object{}; - auto T = Template("{{>text}}"); + Template T("{{>text}}"); T.registerPartial("text", "from partial"); std::string Out; raw_string_ostream OS(Out); @@ -921,7 +916,7 @@ TEST(MustachePartials, BasicBehavior) { TEST(MustachePartials, FailedLookup) { Value D = Object{}; - auto T = Template("{{>text}}"); + Template T("{{>text}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -930,7 +925,7 @@ TEST(MustachePartials, FailedLookup) { TEST(MustachePartials, Context) { Value D = Object{{"text", "content"}}; - auto T = Template("{{>partial}}"); + Template T("{{>partial}}"); T.registerPartial("partial", "*{{text}}*"); std::string Out; raw_string_ostream OS(Out); @@ -942,7 +937,7 @@ TEST(MustachePartials, Recursion) { Value D = Object{{"content", "X"}, {"nodes", Array{Object{{"content", "Y"}, {"nodes", Array{}}}}}}; - auto T = Template("{{>node}}"); + Template T("{{>node}}"); T.registerPartial("node", "{{content}}({{#nodes}}{{>node}}{{/nodes}})"); std::string Out; raw_string_ostream OS(Out); @@ -952,7 +947,7 @@ TEST(MustachePartials, Recursion) { TEST(MustachePartials, Nested) { Value D = Object{{"a", "hello"}, {"b", "world"}}; - auto T = Template("{{>outer}}"); + Template T("{{>outer}}"); T.registerPartial("outer", "*{{a}} {{>inner}}*"); T.registerPartial("inner", "{{b}}!"); std::string Out; @@ -963,7 +958,7 @@ TEST(MustachePartials, Nested) { TEST(MustachePartials, SurroundingWhitespace) { Value D = Object{}; - auto T = Template("| {{>partial}} |"); + Template T("| {{>partial}} |"); T.registerPartial("partial", "\t|\t"); std::string Out; raw_string_ostream OS(Out); @@ -973,7 +968,7 @@ TEST(MustachePartials, SurroundingWhitespace) { TEST(MustachePartials, InlineIndentation) { Value D = Object{{"data", "|"}}; - auto T = Template(" {{data}} {{> partial}}\n"); + Template T(" {{data}} {{> partial}}\n"); T.registerPartial("partial", "<\n<"); std::string Out; raw_string_ostream OS(Out); @@ -983,7 +978,7 @@ TEST(MustachePartials, InlineIndentation) { TEST(MustachePartials, PaddingWhitespace) { Value D = Object{{"boolean", true}}; - auto T = Template("|{{> partial }}|"); + Template T("|{{> partial }}|"); T.registerPartial("partial", "[]"); std::string Out; raw_string_ostream OS(Out); @@ -991,9 +986,19 @@ TEST(MustachePartials, PaddingWhitespace) { EXPECT_EQ("|[]|", Out); } +TEST(MustachePartials, StandaloneIndentation) { + mustache::Template T("\\\n {{>partial}}\n/\n"); + T.registerPartial("partial", "|\n{{{content}}}\n|\n"); + std::string O; + raw_string_ostream OS(O); + Value DataContext = Object{{"content", "<\n->"}}; + T.render(DataContext, OS); + EXPECT_EQ("\\\n |\n <\n->\n |\n/\n", OS.str()); +} + TEST(MustacheLambdas, BasicInterpolation) { Value D = Object{}; - auto T = Template("Hello, {{lambda}}!"); + Template T("Hello, {{lambda}}!"); Lambda L = []() -> llvm::json::Value { return "World"; }; T.registerLambda("lambda", L); std::string Out; @@ -1004,7 +1009,7 @@ TEST(MustacheLambdas, BasicInterpolation) { TEST(MustacheLambdas, InterpolationExpansion) { Value D = Object{{"planet", "World"}}; - auto T = Template("Hello, {{lambda}}!"); + Template T("Hello, {{lambda}}!"); Lambda L = []() -> llvm::json::Value { return "{{planet}}"; }; T.registerLambda("lambda", L); std::string Out; @@ -1015,7 +1020,7 @@ TEST(MustacheLambdas, InterpolationExpansion) { TEST(MustacheLambdas, BasicMultipleCalls) { Value D = Object{}; - auto T = Template("{{lambda}} == {{lambda}} == {{lambda}}"); + Template T("{{lambda}} == {{lambda}} == {{lambda}}"); int I = 0; Lambda L = [&I]() -> llvm::json::Value { I += 1; @@ -1030,7 +1035,7 @@ TEST(MustacheLambdas, BasicMultipleCalls) { TEST(MustacheLambdas, Escaping) { Value D = Object{}; - auto T = Template("<{{lambda}}{{&lambda}}"); + Template T("<{{lambda}}{{&lambda}}"); Lambda L = []() -> llvm::json::Value { return ">"; }; T.registerLambda("lambda", L); std::string Out; @@ -1041,7 +1046,7 @@ TEST(MustacheLambdas, Escaping) { TEST(MustacheLambdas, Sections) { Value D = Object{}; - auto T = Template("<{{#lambda}}{{x}}{{/lambda}}>"); + Template T("<{{#lambda}}{{x}}{{/lambda}}>"); SectionLambda L = [](StringRef Text) -> llvm::json::Value { if (Text == "{{x}}") { return "yes"; @@ -1059,7 +1064,7 @@ TEST(MustacheLambdas, SectionExpansion) { Value D = Object{ {"planet", "Earth"}, }; - auto T = Template("<{{#lambda}}-{{/lambda}}>"); + Template T("<{{#lambda}}-{{/lambda}}>"); SectionLambda L = [](StringRef Text) -> llvm::json::Value { SmallString<128> Result; Result += Text; @@ -1076,7 +1081,7 @@ TEST(MustacheLambdas, SectionExpansion) { TEST(MustacheLambdas, SectionsMultipleCalls) { Value D = Object{}; - auto T = Template("{{#lambda}}FILE{{/lambda}} != {{#lambda}}LINE{{/lambda}}"); + Template T("{{#lambda}}FILE{{/lambda}} != {{#lambda}}LINE{{/lambda}}"); SectionLambda L = [](StringRef Text) -> llvm::json::Value { SmallString<128> Result; Result += "__"; @@ -1093,7 +1098,7 @@ TEST(MustacheLambdas, SectionsMultipleCalls) { TEST(MustacheLambdas, InvertedSections) { Value D = Object{{"static", "static"}}; - auto T = Template("<{{^lambda}}{{static}}{{/lambda}}>"); + Template T("<{{^lambda}}{{static}}{{/lambda}}>"); SectionLambda L = [](StringRef Text) -> llvm::json::Value { return false; }; T.registerLambda("lambda", L); std::string Out; @@ -1105,7 +1110,7 @@ TEST(MustacheLambdas, InvertedSections) { TEST(MustacheComments, Inline) { // Comment blocks should be removed from the template. Value D = {}; - auto T = Template("12345{{! Comment Block! }}67890"); + Template T("12345{{! Comment Block! }}67890"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1115,8 +1120,7 @@ TEST(MustacheComments, Inline) { TEST(MustacheComments, Multiline) { // Multiline comments should be permitted. Value D = {}; - auto T = - Template("12345{{!\n This is a\n multi-line comment...\n}}67890\n"); + Template T("12345{{!\n This is a\n multi-line comment...\n}}67890\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1126,7 +1130,7 @@ TEST(MustacheComments, Multiline) { TEST(MustacheComments, Standalone) { // All standalone comment lines should be removed. Value D = {}; - auto T = Template("Begin.\n{{! Comment Block! }}\nEnd.\n"); + Template T("Begin.\n{{! Comment Block! }}\nEnd.\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1136,7 +1140,7 @@ TEST(MustacheComments, Standalone) { TEST(MustacheComments, IndentedStandalone) { // All standalone comment lines should be removed. Value D = {}; - auto T = Template("Begin.\n {{! Indented Comment Block! }}\nEnd.\n"); + Template T("Begin.\n {{! Indented Comment Block! }}\nEnd.\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1146,7 +1150,7 @@ TEST(MustacheComments, IndentedStandalone) { TEST(MustacheComments, StandaloneLineEndings) { // "\r\n" should be considered a newline for standalone tags. Value D = {}; - auto T = Template("|\r\n{{! Standalone Comment }}\r\n|"); + Template T("|\r\n{{! Standalone Comment }}\r\n|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1156,7 +1160,7 @@ TEST(MustacheComments, StandaloneLineEndings) { TEST(MustacheComments, StandaloneWithoutPreviousLine) { // Standalone tags should not require a newline to precede them. Value D = {}; - auto T = Template(" {{! I'm Still Standalone }}\n!"); + Template T(" {{! I'm Still Standalone }}\n!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1166,7 +1170,7 @@ TEST(MustacheComments, StandaloneWithoutPreviousLine) { TEST(MustacheComments, StandaloneWithoutNewline) { // Standalone tags should not require a newline to follow them. Value D = {}; - auto T = Template("!\n {{! I'm Still Standalone }}"); + Template T("!\n {{! I'm Still Standalone }}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1176,7 +1180,7 @@ TEST(MustacheComments, StandaloneWithoutNewline) { TEST(MustacheComments, MultilineStandalone) { // All standalone comment lines should be removed. Value D = {}; - auto T = Template("Begin.\n{{!\nSomething's going on here...\n}}\nEnd.\n"); + Template T("Begin.\n{{!\nSomething's going on here...\n}}\nEnd.\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1186,8 +1190,7 @@ TEST(MustacheComments, MultilineStandalone) { TEST(MustacheComments, IndentedMultilineStandalone) { // All standalone comment lines should be removed. Value D = {}; - auto T = - Template("Begin.\n {{!\n Something's going on here...\n }}\nEnd.\n"); + Template T("Begin.\n {{!\n Something's going on here...\n }}\nEnd.\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1197,7 +1200,7 @@ TEST(MustacheComments, IndentedMultilineStandalone) { TEST(MustacheComments, IndentedInline) { // Inline comments should not strip whitespace. Value D = {}; - auto T = Template(" 12 {{! 34 }}\n"); + Template T(" 12 {{! 34 }}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1207,7 +1210,7 @@ TEST(MustacheComments, IndentedInline) { TEST(MustacheComments, SurroundingWhitespace) { // Comment removal should preserve surrounding whitespace. Value D = {}; - auto T = Template("12345 {{! Comment Block! }} 67890"); + Template T("12345 {{! Comment Block! }} 67890"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1218,7 +1221,7 @@ TEST(MustacheComments, VariableNameCollision) { // Comments must never render, even if a variable with the same name exists. Value D = Object{ {"! comment", 1}, {"! comment ", 2}, {"!comment", 3}, {"comment", 4}}; - auto T = Template("comments never show: >{{! comment }}<"); + Template T("comments never show: >{{! comment }}<"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); @@ -1231,90 +1234,226 @@ TEST(MustacheComments, VariableNameCollision) { // implemented, these assertions should be changed back to EXPECT_EQ. TEST(MustacheTripleMustache, Basic) { Value D = Object{{"subject", "World"}}; - auto T = Template("Hello, {{{subject}}}!"); + Template T("Hello, {{{subject}}}!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("Hello, World!", Out); + EXPECT_EQ("Hello, World!", Out); } TEST(MustacheTripleMustache, IntegerInterpolation) { Value D = Object{{"mph", 85}}; - auto T = Template("{{{mph}}} miles an hour!"); + Template T("{{{mph}}} miles an hour!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("85 miles an hour!", Out); + EXPECT_EQ("85 miles an hour!", Out); } TEST(MustacheTripleMustache, DecimalInterpolation) { Value D = Object{{"power", 1.21}}; - auto T = Template("{{{power}}} jiggawatts!"); + Template T("{{{power}}} jiggawatts!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("1.21 jiggawatts!", Out); + EXPECT_EQ("1.21 jiggawatts!", Out); } TEST(MustacheTripleMustache, NullInterpolation) { Value D = Object{{"cannot", nullptr}}; - auto T = Template("I ({{{cannot}}}) be seen!"); + Template T("I ({{{cannot}}}) be seen!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("I () be seen!", Out); + EXPECT_EQ("I () be seen!", Out); } TEST(MustacheTripleMustache, ContextMissInterpolation) { Value D = Object{}; - auto T = Template("I ({{{cannot}}}) be seen!"); + Template T("I ({{{cannot}}}) be seen!"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("I () be seen!", Out); + EXPECT_EQ("I () be seen!", Out); } TEST(MustacheTripleMustache, DottedNames) { Value D = Object{{"person", Object{{"name", "Joe"}}}}; - auto T = Template("{{{person.name}}}"); + Template T("{{{person.name}}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("Joe", Out); + EXPECT_EQ("Joe", Out); } TEST(MustacheTripleMustache, ImplicitIterator) { Value D = Object{{"list", Array{"", ""}}}; - auto T = Template("{{#list}}({{{.}}}){{/list}}"); + Template T("{{#list}}({{{.}}}){{/list}}"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("()()", Out); + EXPECT_EQ("()()", Out); } TEST(MustacheTripleMustache, SurroundingWhitespace) { Value D = Object{{"string", "---"}}; - auto T = Template("| {{{string}}} |"); + Template T("| {{{string}}} |"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("| --- |", Out); + EXPECT_EQ("| --- |", Out); } TEST(MustacheTripleMustache, Standalone) { Value D = Object{{"string", "---"}}; - auto T = Template(" {{{string}}}\n"); + Template T(" {{{string}}}\n"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE(" ---\n", Out); + EXPECT_EQ(" ---\n", Out); } TEST(MustacheTripleMustache, WithPadding) { Value D = Object{{"string", "---"}}; - auto T = Template("|{{{ string }}}|"); + Template T("|{{{ string }}}|"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("|---|", Out); +} + +TEST(MustacheDelimiters, PairBehavior) { + Value D = Object{{"text", "Hey!"}}; + Template T("{{=<% %>=}}(<%text%>)"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("(Hey!)", Out); +} + +TEST(MustacheDelimiters, SpecialCharacters) { + Value D = Object{{"text", "It worked!"}}; + Template T("({{=[ ]=}}[text])"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("(It worked!)", Out); +} + +TEST(MustacheDelimiters, Sections) { + Value D = Object{{"section", true}, {"data", "I got interpolated."}}; + auto T = + Template("[\n{{#section}}\n {{data}}\n |data|\n{{/section}}\n\n{{= " + "| | =}}\n|#section|\n {{data}}\n |data|\n|/section|\n]\n"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("[\n I got interpolated.\n |data|\n\n {{data}}\n I got " + "interpolated.\n]\n", + Out); +} + +TEST(MustacheDelimiters, InvertedSections) { + Value D = Object{{"section", false}, {"data", "I got interpolated."}}; + auto T = + Template("[\n{{^section}}\n {{data}}\n |data|\n{{/section}}\n\n{{= " + "| | =}}\n|^section|\n {{data}}\n |data|\n|/section|\n]\n"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("[\n I got interpolated.\n |data|\n\n {{data}}\n I got " + "interpolated.\n]\n", + Out); +} + +TEST(MustacheDelimiters, PartialInheritence) { + Value D = Object{{"value", "yes"}}; + Template T("[ {{>include}} ]\n{{= | | =}}\n[ |>include| ]\n"); + T.registerPartial("include", ".{{value}}."); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("[ .yes. ]\n[ .yes. ]\n", Out); +} + +TEST(MustacheDelimiters, PostPartialBehavior) { + Value D = Object{{"value", "yes"}}; + Template T("[ {{>include}} ]\n[ .{{value}}. .|value|. ]\n"); + T.registerPartial("include", ".{{value}}. {{= | | =}} .|value|."); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("[ .yes. .yes. ]\n[ .yes. .|value|. ]\n", Out); +} + +TEST(MustacheDelimiters, SurroundingWhitespace) { + Value D = Object{}; + Template T("| {{=@ @=}} |"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("| |", Out); +} + +TEST(MustacheDelimiters, OutlyingWhitespaceInline) { + Value D = Object{}; + Template T(" | {{=@ @=}}\n"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ(" | \n", Out); +} + +TEST(MustacheDelimiters, StandaloneTag) { + Value D = Object{}; + Template T("Begin.\n{{=@ @=}}\nEnd.\n"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("Begin.\nEnd.\n", Out); +} + +TEST(MustacheDelimiters, IndentedStandaloneTag) { + Value D = Object{}; + Template T("Begin.\n {{=@ @=}}\nEnd.\n"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("Begin.\nEnd.\n", Out); +} + +TEST(MustacheDelimiters, StandaloneLineEndings) { + Value D = Object{}; + Template T("|\r\n{{= @ @ =}}\r\n|"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("|\r\n|", Out); +} + +TEST(MustacheDelimiters, StandaloneWithoutPreviousLine) { + Value D = Object{}; + Template T(" {{=@ @=}}\n="); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("=", Out); +} + +TEST(MustacheDelimiters, StandaloneWithoutNewline) { + Value D = Object{}; + Template T("=\n {{=@ @=}}"); + std::string Out; + raw_string_ostream OS(Out); + T.render(D, OS); + EXPECT_EQ("=\n", Out); +} + +TEST(MustacheDelimiters, PairwithPadding) { + Value D = Object{}; + Template T("|{{= @ @ =}}|"); std::string Out; raw_string_ostream OS(Out); T.render(D, OS); - EXPECT_NE("|---|", Out); + EXPECT_EQ("||", Out); } diff --git a/llvm/unittests/Support/Path.cpp b/llvm/unittests/Support/Path.cpp index 888729b9dd249..eb649defc0021 100644 --- a/llvm/unittests/Support/Path.cpp +++ b/llvm/unittests/Support/Path.cpp @@ -255,14 +255,14 @@ TEST(Support, Path) { { SmallString<32> Relative("foo.cpp"); - sys::fs::make_absolute("/root", Relative); + path::make_absolute("/root", Relative); Relative[5] = '/'; // Fix up windows paths. ASSERT_EQ("/root/foo.cpp", Relative); } { SmallString<32> Relative("foo.cpp"); - sys::fs::make_absolute("//root", Relative); + path::make_absolute("//root", Relative); Relative[6] = '/'; // Fix up windows paths. ASSERT_EQ("//root/foo.cpp", Relative); } diff --git a/llvm/unittests/Support/ProgramTest.cpp b/llvm/unittests/Support/ProgramTest.cpp index eac0246d8c59e..13a142fcb0624 100644 --- a/llvm/unittests/Support/ProgramTest.cpp +++ b/llvm/unittests/Support/ProgramTest.cpp @@ -695,7 +695,14 @@ TEST_F(ProgramEnvTest, TestExecuteEmptyEnvironment) { int RetCode = ExecuteAndWait(Executable, argv, ArrayRef{}, {}, 0, 0, &Error, &ExecutionFailed); EXPECT_FALSE(ExecutionFailed) << Error; +#ifndef __MINGW32__ + // When running with an empty environment, the child process doesn't in herit + // the PATH variable. On MinGW, it is common for executables to require a + // shared libstdc++ or libc++ DLL, which may be in PATH but not in the + // directory of SupportTests.exe - leading to STATUS_DLL_NOT_FOUND errors. + // Therefore, waive this failure in MinGW environments. ASSERT_EQ(0, RetCode); +#endif } } // end anonymous namespace diff --git a/llvm/unittests/Support/SipHashTest.cpp b/llvm/unittests/Support/SipHashTest.cpp index 7c557eb488acc..3037e6436e18d 100644 --- a/llvm/unittests/Support/SipHashTest.cpp +++ b/llvm/unittests/Support/SipHashTest.cpp @@ -50,6 +50,13 @@ TEST(SipHashTest, SipHash_2_4_128) { } } +// Tests for the 64-bit stable SipHash wrapper. +TEST(SipHashTest, StableSipHash) { + EXPECT_EQ(0xB2BB69BB0A2AC0F1UL, getStableSipHash("")); + EXPECT_EQ(0x9304ABFF427B72E8UL, getStableSipHash("strlen")); + EXPECT_EQ(0x55F45179A08AE51BUL, getStableSipHash("_ZN1 ind; f")); +} + // Tests for the ptrauth-specific SipHash wrapper. TEST(SipHashTest, PointerAuthSipHash) { // Test some basic cases. diff --git a/llvm/unittests/Support/TypeSizeTest.cpp b/llvm/unittests/Support/TypeSizeTest.cpp index b02b7e6009535..018b2405d4005 100644 --- a/llvm/unittests/Support/TypeSizeTest.cpp +++ b/llvm/unittests/Support/TypeSizeTest.cpp @@ -58,6 +58,7 @@ static_assert(ElementCount::getFixed(8).divideCoefficientBy(2) == static_assert(ElementCount::getFixed(8).multiplyCoefficientBy(3) == ElementCount::getFixed(24)); static_assert(ElementCount::getFixed(8).isKnownMultipleOf(2)); +static_assert(!ElementCount::getFixed(8).isKnownMultipleOf(0)); constexpr TypeSize TSFixed0 = TypeSize::getFixed(0); constexpr TypeSize TSFixed1 = TypeSize::getFixed(1); diff --git a/llvm/unittests/Support/VirtualFileSystemTest.cpp b/llvm/unittests/Support/VirtualFileSystemTest.cpp index 6228de8aa897a..f52f25f93744d 100644 --- a/llvm/unittests/Support/VirtualFileSystemTest.cpp +++ b/llvm/unittests/Support/VirtualFileSystemTest.cpp @@ -1941,7 +1941,30 @@ TEST_F(VFSFromYAMLTest, ReturnsExternalPathVFSHit) { EXPECT_EQ(0, NumDiagnostics); } -TEST_F(VFSFromYAMLTest, RootRelativeTest) { +TEST_F(VFSFromYAMLTest, RelativeFileDirWithOverlayRelativeSetting) { + auto Lower = makeIntrusiveRefCnt(); + Lower->addDirectory("//root/foo/bar"); + Lower->addRegularFile("//root/foo/bar/a"); + Lower->setCurrentWorkingDirectory("//root/foo"); + IntrusiveRefCntPtr FS = + getFromYAMLString("{\n" + " 'case-sensitive': false,\n" + " 'overlay-relative': true,\n" + " 'roots': [\n" + " { 'name': '//root/foo/bar/b', 'type': 'file',\n" + " 'external-contents': 'a'\n" + " }\n" + " ]\n" + "}", + Lower, "bar/overlay"); + + ASSERT_NE(FS.get(), nullptr); + ErrorOr S = FS->status("//root/foo/bar/b"); + ASSERT_FALSE(S.getError()); + EXPECT_EQ("//root/foo/bar/a", S->getName()); +} + +TEST_F(VFSFromYAMLTest, RootRelativeToOverlayDirTest) { auto Lower = makeIntrusiveRefCnt(); Lower->addDirectory("//root/foo/bar"); Lower->addRegularFile("//root/foo/bar/a"); @@ -2004,6 +2027,35 @@ TEST_F(VFSFromYAMLTest, RootRelativeTest) { #endif } +TEST_F(VFSFromYAMLTest, RootRelativeToCWDTest) { + auto Lower = makeIntrusiveRefCnt(); + Lower->addDirectory("//root/foo/bar"); + Lower->addRegularFile("//root/foo/bar/a"); + Lower->addDirectory("//root/foo/bar/cwd"); + Lower->addRegularFile("//root/foo/bar/cwd/a"); + Lower->setCurrentWorkingDirectory("//root/foo/bar/cwd"); + IntrusiveRefCntPtr FS = + getFromYAMLString("{\n" + " 'case-sensitive': false,\n" + " 'root-relative': 'cwd',\n" + " 'roots': [\n" + " { 'name': 'b', 'type': 'file',\n" + " 'external-contents': '//root/foo/bar/a'\n" + " }\n" + " ]\n" + "}", + Lower, "//root/foo/bar/overlay"); + + ASSERT_NE(FS.get(), nullptr); + + ErrorOr S1 = FS->status("//root/foo/bar/b"); + ASSERT_TRUE(S1.getError()); + + ErrorOr S2 = FS->status("//root/foo/bar/cwd/b"); + ASSERT_FALSE(S2.getError()); + EXPECT_EQ("//root/foo/bar/a", S2->getName()); +} + TEST_F(VFSFromYAMLTest, ReturnsInternalPathVFSHit) { auto BaseFS = makeIntrusiveRefCnt(); BaseFS->addFile("//root/foo/realname", 0, @@ -2489,6 +2541,7 @@ TEST_F(VFSFromYAMLTest, RelativePaths) { SmallString<128> CWD; EC = llvm::sys::fs::current_path(CWD); ASSERT_FALSE(EC); + Lower->setCurrentWorkingDirectory(CWD); // Filename at root level without a parent directory. IntrusiveRefCntPtr FS = getFromYAMLString( diff --git a/llvm/unittests/Transforms/Coroutines/ExtraRematTest.cpp b/llvm/unittests/Transforms/Coroutines/ExtraRematTest.cpp index 68bf640334b5f..f477a118b4c8b 100644 --- a/llvm/unittests/Transforms/Coroutines/ExtraRematTest.cpp +++ b/llvm/unittests/Transforms/Coroutines/ExtraRematTest.cpp @@ -97,7 +97,7 @@ StringRef Text = R"( call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0) + call void @llvm.coro.end(ptr %hdl, i1 0) ret ptr %hdl } @@ -110,7 +110,7 @@ StringRef Text = R"( declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin(token, ptr) - declare i1 @llvm.coro.end(ptr, i1) + declare void @llvm.coro.end(ptr, i1) declare i32 @should.remat(i32) @@ -212,7 +212,7 @@ StringRef TextCoroBeginCustomABI = R"( call void @free(ptr %mem) br label %suspend suspend: - call i1 @llvm.coro.end(ptr %hdl, i1 0) + call void @llvm.coro.end(ptr %hdl, i1 0) ret ptr %hdl } @@ -225,7 +225,7 @@ StringRef TextCoroBeginCustomABI = R"( declare token @llvm.coro.id(i32, ptr, ptr, ptr) declare i1 @llvm.coro.alloc(token) declare ptr @llvm.coro.begin.custom.abi(token, ptr, i32) - declare i1 @llvm.coro.end(ptr, i1) + declare void @llvm.coro.end(ptr, i1) declare i32 @should.remat(i32) diff --git a/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp b/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp index 40a8c1d8d3da1..3c9374b526b09 100644 --- a/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp +++ b/llvm/unittests/Transforms/Utils/BasicBlockUtilsTest.cpp @@ -672,7 +672,7 @@ define void @positive_case(i32 %0) #0 { destroy: ret void exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } @@ -689,13 +689,13 @@ define void @notpresplit(i32 %0) { destroy: ret void exit: - call i1 @llvm.coro.end(ptr null, i1 false, token none) + call void @llvm.coro.end(ptr null, i1 false, token none) ret void } declare token @llvm.coro.save(ptr) declare i8 @llvm.coro.suspend(token, i1) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) attributes #0 = { presplitcoroutine } )IR"); diff --git a/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp b/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp index 17809e7beda95..1b0073e026bae 100644 --- a/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp +++ b/llvm/unittests/tools/llvm-mca/X86/TestIncrementalMCA.cpp @@ -130,6 +130,10 @@ TEST_F(X86TestBase, TestInstructionRecycling) { mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, /*CallLatency=*/100); IB.setInstRecycleCallback(GetRecycledInst); + // Setup a generic IPP that does not do anything (as it is not target + // specific) for testing purposes. + auto IPP = std::make_unique(*STI, *MCII); + const SmallVector Instruments; // Tile size = 7 for (unsigned i = 0U, E = MCIs.size(); i < E;) { @@ -147,8 +151,10 @@ TEST_F(X86TestBase, TestInstructionRecycling) { }); ASSERT_FALSE(bool(RemainingE)); ASSERT_TRUE(RecycledInst); + IPP->postProcessInstruction(*RecycledInst, MCIs[i]); ISM.addRecycledInst(RecycledInst); } else { + IPP->postProcessInstruction(*InstOrErr.get(), MCIs[i]); ISM.addInst(std::move(InstOrErr.get())); } } diff --git a/llvm/utils/FileCheck/FileCheck.cpp b/llvm/utils/FileCheck/FileCheck.cpp index 185b6b30994fc..305c28b4c7257 100644 --- a/llvm/utils/FileCheck/FileCheck.cpp +++ b/llvm/utils/FileCheck/FileCheck.cpp @@ -384,7 +384,7 @@ BuildInputAnnotations(const SourceMgr &SM, unsigned CheckFileBufferID, std::vector &Annotations, unsigned &LabelWidth) { struct CompareSMLoc { - bool operator()(const SMLoc &LHS, const SMLoc &RHS) const { + bool operator()(SMLoc LHS, SMLoc RHS) const { return LHS.getPointer() < RHS.getPointer(); } }; diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp index bc42efa3b2e9c..be7537c83da3a 100644 --- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp +++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp @@ -21,6 +21,13 @@ #include using namespace llvm; +// As the type of more than one return values is represented as an anonymous +// struct, which is encoded with `IIT_STRUCT` followed by a byte specifying +// the number of return values, starting from 2 (encoded as 0) to 257 +// (encoded as 255). So, the maximum number of values that an intrinsic can +// return is 257. +static constexpr unsigned MaxNumReturn = 257; + //===----------------------------------------------------------------------===// // CodeGenIntrinsic Implementation //===----------------------------------------------------------------------===// @@ -29,15 +36,6 @@ CodeGenIntrinsicContext::CodeGenIntrinsicContext(const RecordKeeper &RC) { for (const Record *Rec : RC.getAllDerivedDefinitions("IntrinsicProperty")) if (Rec->getValueAsBit("IsDefault")) DefaultProperties.push_back(Rec); - - // The maximum number of values that an intrinsic can return is the size of - // of `IIT_RetNumbers` list - 1 (since we index into this list using the - // number of return values as the index). - const auto *IIT_RetNumbers = - dyn_cast_or_null(RC.getGlobal("IIT_RetNumbers")); - if (!IIT_RetNumbers) - PrintFatalError("unable to find 'IIT_RetNumbers' list"); - MaxNumReturn = IIT_RetNumbers->size() - 1; } CodeGenIntrinsicTable::CodeGenIntrinsicTable(const RecordKeeper &RC) { @@ -302,11 +300,10 @@ CodeGenIntrinsic::CodeGenIntrinsic(const Record *R, } unsigned NumRet = R->getValueAsListInit("RetTypes")->size(); - if (NumRet > Ctx.MaxNumReturn) + if (NumRet > MaxNumReturn) PrintFatalError(DefLoc, "intrinsics can only return upto " + - Twine(Ctx.MaxNumReturn) + " values, '" + - DefName + "' returns " + Twine(NumRet) + - " values"); + Twine(MaxNumReturn) + " values, '" + DefName + + "' returns " + Twine(NumRet) + " values"); const Record *TypeInfo = R->getValueAsDef("TypeInfo"); if (!TypeInfo->isSubClassOf("TypeInfoGen")) diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h index 676f575b2749d..2e86149514f46 100644 --- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h +++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h @@ -30,9 +30,6 @@ class RecordKeeper; struct CodeGenIntrinsicContext { explicit CodeGenIntrinsicContext(const RecordKeeper &RC); std::vector DefaultProperties; - - // Maximum number of values an intrinsic can return. - unsigned MaxNumReturn; }; struct CodeGenIntrinsic { diff --git a/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp b/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp index 559868dd54efe..75dffb18fca5a 100644 --- a/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp +++ b/llvm/utils/TableGen/Basic/IntrinsicEmitter.cpp @@ -794,12 +794,15 @@ AttributeSet Intrinsic::getFnAttributes(LLVMContext &C, ID id) {{ if (id == 0) return AttributeSet(); auto [FnAttrID, _] = unpackID(IntrinsicsToAttributesMap[id - 1]); + if (FnAttrID == {}) + return AttributeSet(); return getIntrinsicFnAttributeSet(C, FnAttrID); } #endif // GET_INTRINSIC_ATTRIBUTES )", - UniqAttributesBitSize, MaxNumAttrs, NoFunctionAttrsID); + UniqAttributesBitSize, MaxNumAttrs, NoFunctionAttrsID, + NoFunctionAttrsID); } void IntrinsicEmitter::EmitIntrinsicToBuiltinMap( diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp index a61ba54d3ffd2..f2fd889746bac 100644 --- a/llvm/utils/TableGen/CodeEmitterGen.cpp +++ b/llvm/utils/TableGen/CodeEmitterGen.cpp @@ -471,12 +471,8 @@ void CodeEmitterGen::run(raw_ostream &O) { << ";\n"; O << R"( const unsigned opcode = MI.getOpcode(); - if (opcode < FirstSupportedOpcode) { - std::string msg; - raw_string_ostream Msg(msg); - Msg << "Unsupported instruction: " << MI; - report_fatal_error(Msg.str().c_str()); - } + if (opcode < FirstSupportedOpcode) + reportUnsupportedInst(MI); unsigned TableIndex = opcode - FirstSupportedOpcode; )"; @@ -502,10 +498,7 @@ void CodeEmitterGen::run(raw_ostream &O) { // Default case: unhandled opcode. O << " default:\n" - << " std::string msg;\n" - << " raw_string_ostream Msg(msg);\n" - << " Msg << \"Not supported instr: \" << MI;\n" - << " report_fatal_error(Msg.str().c_str());\n" + << " reportUnsupportedInst(MI);\n" << " }\n"; if (UseAPInt) O << " Inst = Value;\n"; @@ -521,12 +514,10 @@ void CodeEmitterGen::run(raw_ostream &O) { << " const MCSubtargetInfo &STI) const {\n" << " switch (MI.getOpcode()) {\n"; emitCaseMap(O, BitOffsetCaseMap); - O << " }\n" - << " std::string msg;\n" - << " raw_string_ostream Msg(msg);\n" - << " Msg << \"Not supported instr[opcode]: \" << MI << \"[\" << OpNum " - "<< \"]\";\n" - << " report_fatal_error(Msg.str().c_str());\n" + O << " default:\n" + << " reportUnsupportedInst(MI);\n" + << " }\n" + << " reportUnsupportedOperand(MI, OpNum);\n" << "}\n\n" << "#endif // GET_OPERAND_BIT_OFFSET\n\n"; } diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp index af75e44f63e48..75bea77faba42 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp @@ -335,6 +335,8 @@ bool TypeSetByHwMode::intersect(SetType &Out, const SetType &In) { using WildPartT = std::pair>; static const WildPartT WildParts[] = { {MVT::iPTR, [](MVT T) { return T.isScalarInteger() || T == MVT::iPTR; }}, + {MVT::cPTR, + [](MVT T) { return T.isCheriCapability() || T == MVT::cPTR; }}, }; bool Changed = false; @@ -816,6 +818,10 @@ void TypeInfer::expandOverloads(TypeSetByHwMode::SetType &Out, if (Out.count(MVT::pAny)) { Out.erase(MVT::pAny); Out.insert(MVT::iPTR); + for (MVT T : MVT::cheri_capability_valuetypes()) { + if (Legal.count(T)) + Out.insert(MVT::cPTR); + } } else if (Out.count(MVT::iAny)) { Out.erase(MVT::iAny); for (MVT T : MVT::integer_valuetypes()) @@ -1647,9 +1653,11 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode &N, case SDTCisVT: // Operand must be a particular type. return NodeToApply.UpdateNodeType(ResNo, VVT, TP); - case SDTCisPtrTy: - // Operand must be same as target pointer type. - return NodeToApply.UpdateNodeType(ResNo, MVT::iPTR, TP); + case SDTCisPtrTy: { + // Operand must be a legal pointer (iPTR, or possibly cPTR) type. + const TypeSetByHwMode &PtrTys = TP.getDAGPatterns().getLegalPtrTypes(); + return NodeToApply.UpdateNodeType(ResNo, PtrTys, TP); + } case SDTCisInt: // Require it to be one of the legal integer VTs. return TI.EnforceInteger(NodeToApply.getExtType(ResNo)); @@ -3293,6 +3301,7 @@ CodeGenDAGPatterns::CodeGenDAGPatterns(const RecordKeeper &R, PatternRewriterFn PatternRewriter) : Records(R), Target(R), Intrinsics(R), LegalVTS(Target.getLegalValueTypes()), + LegalPtrVTS(ComputeLegalPtrTypes()), PatternRewriter(std::move(PatternRewriter)) { ParseNodeInfo(); ParseNodeTransforms(); @@ -3328,6 +3337,36 @@ const Record *CodeGenDAGPatterns::getSDNodeNamed(StringRef Name) const { return N; } +// Compute the subset of iPTR and cPTR legal for each mode, coalescing into the +// default mode where possible to avoid predicate explosion. +TypeSetByHwMode CodeGenDAGPatterns::ComputeLegalPtrTypes() const { + auto LegalPtrsForSet = [](const MachineValueTypeSet &In) { + MachineValueTypeSet Out; + Out.insert(MVT::iPTR); + for (MVT T : MVT::cheri_capability_valuetypes()) { + if (In.count(T)) { + Out.insert(MVT::cPTR); + break; + } + } + return Out; + }; + + const TypeSetByHwMode &LegalTypes = getLegalTypes(); + MachineValueTypeSet LegalPtrsDefault = + LegalPtrsForSet(LegalTypes.get(DefaultMode)); + + TypeSetByHwMode LegalPtrTypes; + for (const auto &I : LegalTypes) { + MachineValueTypeSet S = LegalPtrsForSet(I.second); + if (I.first != DefaultMode && S == LegalPtrsDefault) + continue; + LegalPtrTypes.getOrCreate(I.first).insert(S); + } + + return LegalPtrTypes; +} + // Parse all of the SDNode definitions for the target, populating SDNodes. void CodeGenDAGPatterns::ParseNodeInfo() { const CodeGenHwModes &CGH = getTargetInfo().getHwModes(); diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h index 64fec275faa68..2ed8d1376b045 100644 --- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h +++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h @@ -1135,6 +1135,7 @@ class CodeGenDAGPatterns { std::vector PatternsToMatch; TypeSetByHwMode LegalVTS; + TypeSetByHwMode LegalPtrVTS; using PatternRewriterFn = std::function; PatternRewriterFn PatternRewriter; @@ -1148,6 +1149,7 @@ class CodeGenDAGPatterns { CodeGenTarget &getTargetInfo() { return Target; } const CodeGenTarget &getTargetInfo() const { return Target; } const TypeSetByHwMode &getLegalTypes() const { return LegalVTS; } + const TypeSetByHwMode &getLegalPtrTypes() const { return LegalPtrVTS; } const Record *getSDNodeNamed(StringRef Name) const; @@ -1249,6 +1251,7 @@ class CodeGenDAGPatterns { } private: + TypeSetByHwMode ComputeLegalPtrTypes() const; void ParseNodeInfo(); void ParseNodeTransforms(); void ParseComplexPatterns(); diff --git a/llvm/utils/TableGen/Common/DAGISelMatcher.cpp b/llvm/utils/TableGen/Common/DAGISelMatcher.cpp index 255974624e8f0..4fdb386bf45e7 100644 --- a/llvm/utils/TableGen/Common/DAGISelMatcher.cpp +++ b/llvm/utils/TableGen/Common/DAGISelMatcher.cpp @@ -328,6 +328,14 @@ static bool TypesAreContradictory(MVT::SimpleValueType T1, if (T1 == T2) return false; + if (T1 == MVT::pAny) + return TypesAreContradictory(MVT::iPTR, T2) && + TypesAreContradictory(MVT::cPTR, T2); + + if (T2 == MVT::pAny) + return TypesAreContradictory(T1, MVT::iPTR) && + TypesAreContradictory(T1, MVT::cPTR); + // If either type is about iPtr, then they don't conflict unless the other // one is not a scalar integer type. if (T1 == MVT::iPTR) @@ -336,7 +344,13 @@ static bool TypesAreContradictory(MVT::SimpleValueType T1, if (T2 == MVT::iPTR) return !MVT(T1).isInteger() || MVT(T1).isVector(); - // Otherwise, they are two different non-iPTR types, they conflict. + if (T1 == MVT::cPTR) + return !MVT(T2).isCheriCapability() || MVT(T2).isVector(); + + if (T2 == MVT::cPTR) + return !MVT(T1).isCheriCapability() || MVT(T1).isVector(); + + // Otherwise, they are two different non-iPTR/cPTR types, they conflict. return true; } diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp index 3f615160f683e..5d49715879280 100644 --- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp +++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.cpp @@ -1467,7 +1467,9 @@ Error OperandMatcher::addTypeCheckPredicate(const TypeSetByHwMode &VTy, if (!VTy.isMachineValueType()) return failUnsupported("unsupported typeset"); - if (VTy.getMachineValueType() == MVT::iPTR && OperandIsAPointer) { + if ((VTy.getMachineValueType() == MVT::iPTR || + VTy.getMachineValueType() == MVT::cPTR) && + OperandIsAPointer) { addPredicate(0); return Error::success(); } diff --git a/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp index b617a3dbca586..3a2ef55656067 100644 --- a/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp +++ b/llvm/utils/TableGen/Common/VarLenCodeEmitterGen.cpp @@ -320,10 +320,7 @@ void VarLenCodeEmitterGen::run(raw_ostream &OS) { } // Default case: unhandled opcode OS << " default:\n" - << " std::string msg;\n" - << " raw_string_ostream Msg(msg);\n" - << " Msg << \"Not supported instr: \" << MI;\n" - << " report_fatal_error(Msg.str().c_str());\n" + << " reportUnsupportedInst(MI);\n" << " }\n"; OS << "}\n\n"; } diff --git a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp index 8d8189983270e..268e6bbc4eee3 100644 --- a/llvm/utils/TableGen/DAGISelMatcherOpt.cpp +++ b/llvm/utils/TableGen/DAGISelMatcherOpt.cpp @@ -519,9 +519,9 @@ static void FactorScope(std::unique_ptr &MatcherPtr) { CheckTypeMatcher *CTM = cast_or_null( FindNodeWithKind(Optn, Matcher::CheckType)); if (!CTM || - // iPTR checks could alias any other case without us knowing, don't - // bother with them. - CTM->getType() == MVT::iPTR || + // iPTR/cPTR checks could alias any other case without us knowing, + // don't bother with them. + CTM->getType() == MVT::iPTR || CTM->getType() == MVT::cPTR || // SwitchType only works for result #0. CTM->getResNo() != 0 || // If the CheckType isn't at the start of the list, see if we can move diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py index 1c795afa9e700..a5e3c39bfdecd 100644 --- a/llvm/utils/UpdateTestChecks/common.py +++ b/llvm/utils/UpdateTestChecks/common.py @@ -882,6 +882,7 @@ def __str__(self): class FunctionTestBuilder: def __init__(self, run_list, flags, scrubber_args, path, ginfo): + self._run_list = run_list self._verbose = flags.verbose self._record_args = flags.function_signature self._check_attributes = flags.check_attributes @@ -917,15 +918,53 @@ def __init__(self, run_list, flags, scrubber_args, path, ginfo): self._func_order.update({prefix: []}) self._global_var_dict.update({prefix: dict()}) + # Return true if there is conflicting output for different runs for the + # given prefix and function name. + def has_conflicting_output(self, prefix, func): + # There was conflicting output if the func_dict is None for this + # prefix and function. + return self._func_dict[prefix].get(func) is None + def finish_and_get_func_dict(self): - for prefix in self.get_failed_prefixes(): - warn( - "Prefix %s had conflicting output from different RUN lines for all functions in test %s" - % ( - prefix, - self._path, + all_funcs = set() + for prefix in self._func_dict: + all_funcs.update(self._func_dict[prefix].keys()) + + warnings_to_print = collections.defaultdict(list) + for func in sorted(list(all_funcs)): + for i, run_info in enumerate(self._run_list): + prefixes = run_info[0] + if not prefixes: + continue + + # Check if this RUN line produces this function at all. If + # not, we can skip analysing this function for this RUN. + run_contains_func = all( + func in self._func_dict.get(p, {}) for p in prefixes ) + if not run_contains_func: + continue + + # Check if this RUN line can print any checks for this + # function. It can't if all of its prefixes have conflicting + # (None) output. + cannot_print_for_this_run = all( + self.has_conflicting_output(p, func) for p in prefixes + ) + if cannot_print_for_this_run: + warnings_to_print[func].append((i, prefixes)) + + for func, warning_info in warnings_to_print.items(): + conflict_strs = [] + for run_index, prefixes in warning_info: + conflict_strs.append( + f"RUN #{run_index + 1} (prefixes: {', '.join(prefixes)})" + ) + warn( + f"For function '{func}', the following RUN lines will not generate checks due to conflicting output: {', '.join(conflict_strs)}", + test_file=self._path, ) + return self._func_dict def func_order(self): @@ -1078,20 +1117,6 @@ def processed_prefixes(self, prefixes): """ self._processed_prefixes.update(prefixes) - def get_failed_prefixes(self): - # This returns the list of those prefixes that failed to match any function, - # because there were conflicting bodies produced by different RUN lines, in - # all instances of the prefix. - for prefix in self._func_dict: - if self._func_dict[prefix] and ( - not [ - fct - for fct in self._func_dict[prefix] - if self._func_dict[prefix][fct] is not None - ] - ): - yield prefix - ##### Generator of LLVM IR CHECK lines diff --git a/llvm/utils/git/code-format-helper.py b/llvm/utils/git/code-format-helper.py index 7a5311d668f79..6f809c5977c75 100755 --- a/llvm/utils/git/code-format-helper.py +++ b/llvm/utils/git/code-format-helper.py @@ -173,7 +173,8 @@ def run(self, changed_files: List[str], args: FormatArgs) -> bool: f":warning: The {self.friendly_name} failed without printing " "a diff. Check the logs for stderr output. :warning:" ) - self.update_pr(comment_text, args, create_new=False) + if should_update_gh: + self.update_pr(comment_text, args, create_new=False) return False diff --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py index eb1abb2f37f20..948788d501d1d 100755 --- a/llvm/utils/git/github-automation.py +++ b/llvm/utils/git/github-automation.py @@ -297,9 +297,12 @@ def run(self) -> bool: print(e) continue + total_prs_url = f"https://github.com/llvm/llvm-project/pulls?q=author%3A{self.issue.user.login}+is%3Apr" + merged_prs_url = total_prs_url + "+is%3Amerged" comment = f""" ### Activity Summary: - * [{total_prs} Pull Requests](https://github.com/llvm/llvm-project/pulls/{self.issue.user.login}) ({merged_prs} merged) + * [{total_prs} Pull Requests]({total_prs_url}) + * [{merged_prs} Merged Pull Requests]({merged_prs_url}) * Top 3 Committers: {get_user_values_str(get_top_values(merged_by))} * Top 3 Reviewers: {get_user_values_str(get_top_values(reviewed_by))} """ diff --git a/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn index d4ec80b3d5be8..c143acfc915bc 100644 --- a/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/lib/Core/BUILD.gn @@ -36,6 +36,7 @@ static_library("Core") { "GDBIndex.cpp", "HashUtilities.cpp", "JumpTable.cpp", + "MCInstUtils.cpp", "MCPlusBuilder.cpp", "ParallelUtilities.cpp", "Relocation.cpp", diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn index d18cefff335c5..036123371d24c 100644 --- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn +++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn @@ -27,6 +27,7 @@ static_library("bugprone") { "CapturingThisInMemberVariableCheck.cpp", "CastingThroughVoidCheck.cpp", "ChainedComparisonCheck.cpp", + "CommandProcessorCheck.cpp", "ComparePointerToMemberVirtualFunctionCheck.cpp", "CopyConstructorInitCheck.cpp", "CrtpConstructorAccessibilityCheck.cpp", diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn index ac2cc2fd8236f..b097e139b9c7f 100644 --- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn +++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn @@ -16,7 +16,6 @@ static_library("cert") { ] sources = [ "CERTTidyModule.cpp", - "CommandProcessorCheck.cpp", "DefaultOperatorNewAlignmentCheck.cpp", "DontModifyStdNamespaceCheck.cpp", "FloatLoopCounter.cpp", diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/readability/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/readability/BUILD.gn index 4de101d600040..327b80b449e78 100644 --- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/readability/BUILD.gn +++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/readability/BUILD.gn @@ -52,6 +52,7 @@ static_library("readability") { "RedundantFunctionPtrDereferenceCheck.cpp", "RedundantInlineSpecifierCheck.cpp", "RedundantMemberInitCheck.cpp", + "RedundantParenthesesCheck.cpp", "RedundantPreprocessorCheck.cpp", "RedundantSmartptrGetCheck.cpp", "RedundantStringCStrCheck.cpp", diff --git a/llvm/utils/gn/secondary/clang/lib/Analysis/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Analysis/BUILD.gn index b8c8585a33a9b..5f9eb9adce04a 100644 --- a/llvm/utils/gn/secondary/clang/lib/Analysis/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Analysis/BUILD.gn @@ -27,6 +27,7 @@ static_library("Analysis") { "FixitUtil.cpp", "IntervalPartition.cpp", "IssueHash.cpp", + "LifetimeAnnotations.cpp", "LifetimeSafety.cpp", "LiveVariables.cpp", "MacroExpansionContext.cpp", diff --git a/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn index 739e2fbb35982..6733cf4ef3772 100644 --- a/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn @@ -18,6 +18,7 @@ static_library("DependencyScanning") { "//llvm/lib/TargetParser", ] sources = [ + "DependencyScannerImpl.cpp", "DependencyScanningFilesystem.cpp", "DependencyScanningService.cpp", "DependencyScanningTool.cpp", diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni b/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni index ac48b940bce20..2ab2a0eb2783a 100644 --- a/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni +++ b/llvm/utils/gn/secondary/compiler-rt/lib/builtins/sources.gni @@ -526,6 +526,13 @@ if (current_cpu == "ve") { ] } +if (current_cpu == "wasm") { + builtins_sources += [ + "wasm/__c_longjmp.S", + "wasm/__cpp_exceptions.S", + ] +} + if (!compiler_rt_exclude_atomic_builtin) { builtins_sources += [ "atomic.c" ] } diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn index aa29b80fe8747..f771099cb4c4a 100644 --- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn +++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn @@ -290,6 +290,7 @@ if (current_toolchain == default_toolchain) { "__atomic/check_memory_order.h", "__atomic/contention_t.h", "__atomic/fence.h", + "__atomic/floating_point_helper.h", "__atomic/is_always_lock_free.h", "__atomic/kill_dependency.h", "__atomic/memory_order.h", @@ -1495,6 +1496,7 @@ if (current_toolchain == default_toolchain) { "__type_traits/is_floating_point.h", "__type_traits/is_function.h", "__type_traits/is_fundamental.h", + "__type_traits/is_generic_transparent_comparator.h", "__type_traits/is_implicit_lifetime.h", "__type_traits/is_implicitly_default_constructible.h", "__type_traits/is_integral.h", @@ -1537,6 +1539,7 @@ if (current_toolchain == default_toolchain) { "__type_traits/make_32_64_or_128_bit.h", "__type_traits/make_const_lvalue_ref.h", "__type_traits/make_signed.h", + "__type_traits/make_transparent.h", "__type_traits/make_unsigned.h", "__type_traits/maybe_const.h", "__type_traits/nat.h", diff --git a/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn index 2f692d752ee18..c37f43c637767 100644 --- a/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/CAS/BUILD.gn @@ -4,9 +4,11 @@ static_library("CAS") { "ActionCache.cpp", "ActionCaches.cpp", "BuiltinCAS.cpp", + "DatabaseFile.cpp", "InMemoryCAS.cpp", "MappedFileRegionArena.cpp", "ObjectStore.cpp", "OnDiskCommon.cpp", + "OnDiskTrieRawHashMap.cpp", ] } diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn index 822e3cec4823d..646f61d15f4a3 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn @@ -8,7 +8,6 @@ static_library("Vectorize") { "//llvm/lib/Transforms/Utils", ] sources = [ - "EVLIndVarSimplify.cpp", "LoadStoreVectorizer.cpp", "LoopIdiomVectorize.cpp", "LoopVectorizationLegality.cpp", diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-remarkutil/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-remarkutil/BUILD.gn index 4e4ffb54dbe3e..2d02c151058e9 100644 --- a/llvm/utils/gn/secondary/llvm/tools/llvm-remarkutil/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/tools/llvm-remarkutil/BUILD.gn @@ -10,6 +10,7 @@ executable("llvm-remarkutil") { "RemarkConvert.cpp", "RemarkCount.cpp", "RemarkCounter.cpp", + "RemarkFilter.cpp", "RemarkInstructionMix.cpp", "RemarkSizeDiff.cpp", "RemarkUtil.cpp", diff --git a/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn index de6de0b119e9e..ccb447f1b7254 100644 --- a/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/unittests/CAS/BUILD.gn @@ -10,6 +10,7 @@ unittest("CASTests") { "ActionCacheTest.cpp", "CASTestConfig.cpp", "ObjectStoreTest.cpp", + "OnDiskTrieRawHashMapTest.cpp", "ProgramTest.cpp", ] } diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py index 9ae8ac75bee08..a7e2705f609af 100644 --- a/llvm/utils/lit/lit/TestRunner.py +++ b/llvm/utils/lit/lit/TestRunner.py @@ -1541,8 +1541,10 @@ def regex_escape(s): return s path_substitutions = [ - ("s", sourcepath), ("S", sourcedir), ("p", sourcedir), - ("t", tmpName), ("T", tmpDir) + ("s", sourcepath), + ("S", sourcedir), + ("p", sourcedir), + ("t", tmpName), ] for path_substitution in path_substitutions: letter = path_substitution[0] @@ -1919,6 +1921,14 @@ def processLine(ln): # seems reasonable. ln = _caching_re_compile(a).sub(str(b), escapePercents(ln)) + # TODO(boomanaiden154): Remove when we branch LLVM 22 so people on the + # release branch will have sufficient time to migrate. + if bool(_caching_re_compile("%T").search(ln)): + raise ValueError( + "%T is no longer supported. Please create directories with names " + "based on %t." + ) + # Strip the trailing newline and any extra whitespace. return ln.strip() diff --git a/llvm/utils/lit/tests/Inputs/shtest-shell/capital-t-error-message.txt b/llvm/utils/lit/tests/Inputs/shtest-shell/capital-t-error-message.txt new file mode 100644 index 0000000000000..e69dfee8fced8 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/shtest-shell/capital-t-error-message.txt @@ -0,0 +1,2 @@ +# Check that we return a decent error message when someone uses %T +# RUN: echo %T > %t diff --git a/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt b/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt index 75ce8b7733ad7..cfa829f0bf2f7 100644 --- a/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt +++ b/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt @@ -18,15 +18,15 @@ # Check force remove commands success whether the directory does or doesn't exist. # # Check the mkdir command with -p option. -# RUN: rm -f -r %T/test -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: rm -f -r %t.test +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=REMOVE-PARENT-DIR < %t.out %s -# RUN: mkdir -p %T/test -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: mkdir -p %t.test +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=MAKE-PARENT-DIR < %t.out %s -# RUN: rm -f %T/test || true -# RUN: rm -f -r %T/test -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: rm -f %t.test || true +# RUN: rm -f -r %t.test +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=REMOVE-PARENT-DIR < %t.out %s # # MAKE-PARENT-DIR: True @@ -34,15 +34,15 @@ # # Check the mkdir command without -p option. # -# RUN: rm -rf %T/test1 -# RUN: mkdir %T/test1 -# RUN: %{python} %S/../check_path.py dir %T/test1 > %t.out +# RUN: rm -rf %t.test1 +# RUN: mkdir %t.test1 +# RUN: %{python} %S/../check_path.py dir %t.test1 > %t.out # RUN: FileCheck --check-prefix=MAKE-DIR < %t.out %s -# RUN: cd %T/test1 && mkdir foo -# RUN: %{python} %S/../check_path.py dir %T/test1 > %t.out +# RUN: cd %t.test1 && mkdir foo +# RUN: %{python} %S/../check_path.py dir %t.test1 > %t.out # RUN: FileCheck --check-prefix=MAKE-DIR < %t.out %s -# RUN: cd %T && rm -rf %T/test1 -# RUN: %{python} %S/../check_path.py dir %T/test1 > %t.out +# RUN: cd .. && rm -rf %t.test1 +# RUN: %{python} %S/../check_path.py dir %t.test1 > %t.out # RUN: FileCheck --check-prefix=REMOVE-DIR < %t.out %s # # MAKE-DIR: True @@ -50,18 +50,18 @@ # # Check creating and removing multiple folders and rm * operation. # -# RUN: rm -rf %T/test -# RUN: mkdir -p %T/test/test1 %T/test/test2 -# RUN: %{python} %S/../check_path.py dir %T/test %T/test/test1 %T/test/test2 > %t.out +# RUN: rm -rf %t.test +# RUN: mkdir -p %t.test/test1 %t.test/test2 +# RUN: %{python} %S/../check_path.py dir %t.test %t.test/test1 %t.test/test2 > %t.out # RUN: FileCheck --check-prefix=DIRS-EXIST < %t.out %s -# RUN: mkdir %T/test || true -# RUN: echo "create a temp file" > %T/test/temp.write -# RUN: echo "create a temp1 file" > %T/test/test1/temp1.write -# RUN: echo "create a temp2 file" > %T/test/test2/temp2.write -# RUN: %{python} %S/../check_path.py file %T/test/temp.write %T/test/test1/temp1.write %T/test/test2/temp2.write> %t.out +# RUN: mkdir %t.test || true +# RUN: echo "create a temp file" > %t.test/temp.write +# RUN: echo "create a temp1 file" > %t.test/test1/temp1.write +# RUN: echo "create a temp2 file" > %t.test/test2/temp2.write +# RUN: %{python} %S/../check_path.py file %t.test/temp.write %t.test/test1/temp1.write %t.test/test2/temp2.write> %t.out # RUN: FileCheck --check-prefix=FILES-EXIST < %t.out %s -# RUN: rm -r -f %T/* -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: rm -r -f %t* +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=REMOVE-ALL < %t.out %s # # DIRS-EXIST: True @@ -81,7 +81,7 @@ # RUN: echo "hello-2" > %t1.stdout # RUN: diff %t.stdout %t1.stdout || true # -# RUN: mkdir -p %T/dir1 %T/dir2 -# RUN: cd %T/dir1 && echo "hello" > temp1.txt -# RUN: cd %T/dir2 && echo "hello" > temp2.txt -# RUN: diff temp2.txt ../dir1/temp1.txt +# RUN: mkdir -p %t.dir1 %t.dir2 +# RUN: cd %t.dir1 && echo "hello" > temp1.txt +# RUN: cd %t.dir2 && echo "hello" > temp2.txt +# RUN: diff temp2.txt ../%{t:stem}.tmp.dir1/temp1.txt diff --git a/llvm/utils/lit/tests/shtest-readfile-external.py b/llvm/utils/lit/tests/shtest-readfile-external.py index 99b0160d933fe..c00bff45c8703 100644 --- a/llvm/utils/lit/tests/shtest-readfile-external.py +++ b/llvm/utils/lit/tests/shtest-readfile-external.py @@ -1,5 +1,8 @@ ## Tests the readfile substitution. +# TODO(boomanaiden154): This sometimes fails, possibly due to buffers not being flushed. +# ALLOW_RETRIES: 2 + # UNSUPPORTED: system-windows # RUN: env LIT_USE_INTERNAL_SHELL=0 not %{lit} -a -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S/Inputs/shtest-readfile/Output %s @@ -11,7 +14,7 @@ # CHECK-LABEL: FAIL: shtest-readfile :: file-does-not-exist.txt ({{[^)]*}}) # CHECK: echo $(cat /file/does/not/exist) && test -e /file/does/not/exist {{.*}} -# CHECK: cat: /file/does/not/exist: No such file or directory +# CHECK: {{.*}}cat{{.*}}/file/does/not/exist{{.*}} # CHECK-LABEL: FAIL: shtest-readfile :: relative-paths.txt ({{[^)]*}}) # CHECK: echo $(cat rel_path_test_folder/test_file) && test -e rel_path_test_folder/test_file {{.*}} diff --git a/llvm/utils/lit/tests/shtest-readfile.py b/llvm/utils/lit/tests/shtest-readfile.py index a122dd7664272..66e3a042bf787 100644 --- a/llvm/utils/lit/tests/shtest-readfile.py +++ b/llvm/utils/lit/tests/shtest-readfile.py @@ -1,5 +1,8 @@ ## Tests the readfile substitution. +# TODO(boomanaiden154): This sometimes fails, possibly due to buffers not being flushed. +# ALLOW_RETRIES: 2 + # RUN: env LIT_USE_INTERNAL_SHELL=1 not %{lit} -a -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S%{fs-sep}Inputs%{fs-sep}shtest-readfile%{fs-sep}Output %s # CHECK: -- Testing: 4 tests{{.*}} @@ -10,7 +13,7 @@ # CHECK-LABEL: FAIL: shtest-readfile :: file-does-not-exist.txt ({{[^)]*}}) # CHECK: # executed command: @echo 'echo %{readfile:/file/does/not/exist}' -# CHECK: # | File specified in readfile substitution does not exist: /file/does/not/exist +# CHECK: # | File specified in readfile substitution does not exist: {{.*}}/file/does/not/exist # CHECK-LABEL: FAIL: shtest-readfile :: relative-paths.txt ({{[^)]*}}) # CHECK: echo hello diff --git a/llvm/utils/lit/tests/shtest-shell.py b/llvm/utils/lit/tests/shtest-shell.py index 498f6bb0adc11..38db1b75486cf 100644 --- a/llvm/utils/lit/tests/shtest-shell.py +++ b/llvm/utils/lit/tests/shtest-shell.py @@ -12,6 +12,10 @@ # CHECK: -- Testing: +# CHECK: UNRESOLVED: shtest-shell :: capital-t-error-message.txt +# CHECK: *** TEST 'shtest-shell :: capital-t-error-message.txt' FAILED *** +# CHECK: ValueError: %T is no longer supported. Please create directories with names based on %t. + # CHECK: FAIL: shtest-shell :: colon-error.txt # CHECK: *** TEST 'shtest-shell :: colon-error.txt' FAILED *** # CHECK: : @@ -633,5 +637,5 @@ # CHECK: *** # CHECK: PASS: shtest-shell :: valid-shell.txt -# CHECK: Unresolved Tests (1) +# CHECK: Unresolved Tests (2) # CHECK: Failed Tests (37) diff --git a/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py b/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py index 2661a2c8d6448..2d96feae5b58e 100644 --- a/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py +++ b/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py @@ -2,7 +2,7 @@ # ulimit does not work on non-POSIX platforms. # These tests are specific to options that Darwin does not support. -# UNSUPPORTED: system-windows, system-darwin +# UNSUPPORTED: system-windows, system-darwin, system-aix # RUN: not %{lit} -a -v %{inputs}/shtest-ulimit-nondarwin | FileCheck %s diff --git a/llvm/utils/llvm-test-mustache-spec/llvm-test-mustache-spec.cpp b/llvm/utils/llvm-test-mustache-spec/llvm-test-mustache-spec.cpp index 1f566e13f070a..9007eb365a15f 100644 --- a/llvm/utils/llvm-test-mustache-spec/llvm-test-mustache-spec.cpp +++ b/llvm/utils/llvm-test-mustache-spec/llvm-test-mustache-spec.cpp @@ -54,20 +54,6 @@ static int NumXFail = 0; static int NumSuccess = 0; static const StringMap> XFailTestNames = {{ - {"delimiters.json", - { - "Pair Behavior", - "Special Characters", - "Sections", - "Inverted Sections", - "Partial Inheritence", - "Post-Partial Behavior", - "Standalone Tag", - "Indented Standalone Tag", - "Standalone Line Endings", - "Standalone Without Previous Line", - "Standalone Without Newline", - }}, {"~dynamic-names.json", { "Basic Behavior - Partial", @@ -113,7 +99,6 @@ static const StringMap> XFailTestNames = {{ "Block reindentation", "Intrinsic indentation", "Nested block reindentation", - }}, {"~lambdas.json", { @@ -126,23 +111,7 @@ static const StringMap> XFailTestNames = {{ "Section - Expansion", "Section - Alternate Delimiters", "Section - Multiple Calls", - - }}, - {"interpolation.json", - { - "Triple Mustache", - "Triple Mustache Integer Interpolation", - "Triple Mustache Decimal Interpolation", - "Triple Mustache Null Interpolation", - "Triple Mustache Context Miss Interpolation", - "Dotted Names - Triple Mustache Interpolation", - "Implicit Iterators - Triple Mustache", - "Triple Mustache - Surrounding Whitespace", - "Triple Mustache - Standalone", - "Triple Mustache With Padding", }}, - {"partials.json", {"Standalone Indentation"}}, - {"sections.json", {"Implicit Iterator - Triple mustache"}}, }}; struct TestData { diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt index 98c6d84950ff7..77e6ab7c5a6ea 100644 --- a/llvm/utils/profcheck-xfail.txt +++ b/llvm/utils/profcheck-xfail.txt @@ -107,6 +107,7 @@ Instrumentation/AddressSanitizer/AMDGPU/global_metadata_addrspacecasts.ll Instrumentation/AddressSanitizer/AMDGPU/instrument-stack.ll Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_lds_globals.ll Instrumentation/AddressSanitizer/AMDGPU/no_redzones_in_scratch_globals.ll +Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll Instrumentation/AddressSanitizer/asan_address_space_attr.ll Instrumentation/AddressSanitizer/asan-detect-invalid-pointer-pair.ll Instrumentation/AddressSanitizer/asan-disable-sanitizer-instrumentation.ll @@ -837,8 +838,6 @@ Transforms/InstCombine/2011-02-14-InfLoop.ll Transforms/InstCombine/AArch64/sve-intrinsic-sel.ll Transforms/InstCombine/AArch64/sve-intrinsic-simplify-binop.ll Transforms/InstCombine/AArch64/sve-intrinsic-simplify-shift.ll -Transforms/InstCombine/add2.ll -Transforms/InstCombine/add.ll Transforms/InstCombine/add-mask.ll Transforms/InstCombine/add-shl-mul-umax.ll Transforms/InstCombine/add-shl-sdiv-to-srem.ll @@ -1097,6 +1096,7 @@ Transforms/IROutliner/outlining-remapped-outputs.ll Transforms/IROutliner/outlining-same-constants.ll Transforms/IROutliner/outlining-same-globals.ll Transforms/IROutliner/outlining-same-output-blocks.ll +Transforms/IROutliner/outlining-special-state.ll Transforms/IROutliner/outlining-strip-loop-info.ll Transforms/IROutliner/outlining-swift-error.ll Transforms/IROutliner/phi-node-exit-path-order.ll @@ -1159,449 +1159,6 @@ Transforms/LoopUnroll/AArch64/unrolling-multi-exit.ll Transforms/LoopUnroll/peel-last-iteration-expansion-cost.ll Transforms/LoopUnroll/peel-last-iteration-with-guards.ll Transforms/LoopUnroll/peel-last-iteration-with-variable-trip-count.ll -Transforms/LoopVectorize/12-12-11-if-conv.ll -Transforms/LoopVectorize/AArch64/aarch64-predication.ll -Transforms/LoopVectorize/AArch64/arith-fp-frem-costs.ll -Transforms/LoopVectorize/AArch64/blend-costs.ll -Transforms/LoopVectorize/AArch64/check-prof-info.ll -Transforms/LoopVectorize/AArch64/clamped-trip-count.ll -Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll -Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll -Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll -Transforms/LoopVectorize/AArch64/drop-poison-generating-flags.ll -Transforms/LoopVectorize/AArch64/early_exit_costs.ll -Transforms/LoopVectorize/AArch64/epilog-iv-select-cmp.ll -Transforms/LoopVectorize/AArch64/epilog-vectorization-factors.ll -Transforms/LoopVectorize/AArch64/epilog-vectorization-widen-inductions.ll -Transforms/LoopVectorize/AArch64/extend-vectorization-factor-for-unprofitable-memops.ll -Transforms/LoopVectorize/AArch64/f128-fmuladd-reduction.ll -Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll -Transforms/LoopVectorize/AArch64/first-order-recurrence.ll -Transforms/LoopVectorize/AArch64/fixed-wide-lane-mask.ll -Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll -Transforms/LoopVectorize/AArch64/fmin-without-fast-math-flags.ll -Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll -Transforms/LoopVectorize/AArch64/induction-costs.ll -Transforms/LoopVectorize/AArch64/induction-costs-sve.ll -Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll -Transforms/LoopVectorize/AArch64/interleaved_cost.ll -Transforms/LoopVectorize/AArch64/interleave-with-gaps.ll -Transforms/LoopVectorize/AArch64/interleave-with-runtime-checks.ll -Transforms/LoopVectorize/AArch64/interleaving-load-store.ll -Transforms/LoopVectorize/AArch64/interleaving-reduction.ll -Transforms/LoopVectorize/AArch64/intrinsiccost.ll -Transforms/LoopVectorize/AArch64/invariant-replicate-region.ll -Transforms/LoopVectorize/AArch64/loop-vectorization-factors.ll -Transforms/LoopVectorize/AArch64/loopvectorize_pr33804_double.ll -Transforms/LoopVectorize/AArch64/low_trip_count_predicates.ll -Transforms/LoopVectorize/AArch64/low_trip_memcheck_cost.ll -Transforms/LoopVectorize/AArch64/masked-call.ll -Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll -Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll -Transforms/LoopVectorize/AArch64/neoverse-epilogue-vect.ll -Transforms/LoopVectorize/AArch64/optsize_minsize.ll -Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll -Transforms/LoopVectorize/AArch64/outer_loop_test1_no_explicit_vect_width.ll -Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll -Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll -Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll -Transforms/LoopVectorize/AArch64/partial-reduce.ll -Transforms/LoopVectorize/AArch64/pr31900.ll -Transforms/LoopVectorize/AArch64/pr33053.ll -Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll -Transforms/LoopVectorize/AArch64/predicated-costs.ll -Transforms/LoopVectorize/AArch64/predication_costs.ll -Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll -Transforms/LoopVectorize/AArch64/reduction-small-size.ll -Transforms/LoopVectorize/AArch64/reg-usage.ll -Transforms/LoopVectorize/AArch64/replicating-load-store-costs.ll -Transforms/LoopVectorize/AArch64/runtime-check-trip-count-decisions.ll -Transforms/LoopVectorize/AArch64/scalable-call.ll -Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll -Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll -Transforms/LoopVectorize/AArch64/scalable-reductions.ll -Transforms/LoopVectorize/AArch64/scalable-reductions-tf.ll -Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll -Transforms/LoopVectorize/AArch64/scalable-struct-return.ll -Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll -Transforms/LoopVectorize/AArch64/scalable-vectorization.ll -Transforms/LoopVectorize/AArch64/scalarize-store-with-predication.ll -Transforms/LoopVectorize/AArch64/sdiv-pow2.ll -Transforms/LoopVectorize/AArch64/select-costs.ll -Transforms/LoopVectorize/AArch64/simple_early_exit.ll -Transforms/LoopVectorize/AArch64/single-early-exit-interleave.ll -Transforms/LoopVectorize/AArch64/smallest-and-widest-types.ll -Transforms/LoopVectorize/AArch64/store-costs-sve.ll -Transforms/LoopVectorize/AArch64/strict-fadd.ll -Transforms/LoopVectorize/AArch64/struct-return-cost.ll -Transforms/LoopVectorize/AArch64/sve2-histcnt-epilogue.ll -Transforms/LoopVectorize/AArch64/sve-basic-vec.ll -Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll -Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll -Transforms/LoopVectorize/AArch64/sve-epilog-vect-reductions.ll -Transforms/LoopVectorize/AArch64/sve-epilog-vect-strict-reductions.ll -Transforms/LoopVectorize/AArch64/sve-epilog-vect-vscale-tune.ll -Transforms/LoopVectorize/AArch64/sve-epilog-vscale-fixed.ll -Transforms/LoopVectorize/AArch64/sve-gather-scatter-cost.ll -Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll -Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll -Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll -Transforms/LoopVectorize/AArch64/sve-large-strides.ll -Transforms/LoopVectorize/AArch64/sve-multiexit.ll -Transforms/LoopVectorize/AArch64/sve-select-cmp.ll -Transforms/LoopVectorize/AArch64/sve-tail-folding-cost.ll -Transforms/LoopVectorize/AArch64/sve-tail-folding-forced.ll -Transforms/LoopVectorize/AArch64/sve-tail-folding.ll -Transforms/LoopVectorize/AArch64/sve-tail-folding-option.ll -Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll -Transforms/LoopVectorize/AArch64/sve-tail-folding-unroll.ll -Transforms/LoopVectorize/AArch64/sve-wide-lane-mask.ll -Transforms/LoopVectorize/AArch64/tail-folding-styles.ll -Transforms/LoopVectorize/AArch64/tail-fold-uniform-memops.ll -Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll -Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll -Transforms/LoopVectorize/AArch64/uniform-args-call-variants.ll -Transforms/LoopVectorize/AArch64/unsafe-vf-hint-remark.ll -Transforms/LoopVectorize/AArch64/veclib-function-calls.ll -Transforms/LoopVectorize/AArch64/veclib-intrinsic-calls.ll -Transforms/LoopVectorize/AArch64/vplan-printing.ll -Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll -Transforms/LoopVectorize/ARM/mve-icmpcost.ll -Transforms/LoopVectorize/ARM/mve-multiexit.ll -Transforms/LoopVectorize/ARM/mve-qabs.ll -Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll -Transforms/LoopVectorize/ARM/mve-reductions.ll -Transforms/LoopVectorize/ARM/mve-reduction-types.ll -Transforms/LoopVectorize/ARM/mve-selectandorcost.ll -Transforms/LoopVectorize/ARM/optsize_minsize.ll -Transforms/LoopVectorize/ARM/prefer-tail-loop-folding.ll -Transforms/LoopVectorize/ARM/scalar-block-cost.ll -Transforms/LoopVectorize/ARM/tail-folding-allowed.ll -Transforms/LoopVectorize/ARM/tail-folding-counting-down.ll -Transforms/LoopVectorize/ARM/tail-folding-loop-hint.ll -Transforms/LoopVectorize/ARM/tail-folding-not-allowed.ll -Transforms/LoopVectorize/ARM/tail-folding-prefer-flag.ll -Transforms/LoopVectorize/ARM/tail-folding-reductions-allowed.ll -Transforms/LoopVectorize/as_cast.ll -Transforms/LoopVectorize/assume.ll -Transforms/LoopVectorize/bzip_reverse_loops.ll -Transforms/LoopVectorize/calloc.ll -Transforms/LoopVectorize/cast-induction.ll -Transforms/LoopVectorize/consecutive-ptr-uniforms.ll -Transforms/LoopVectorize/dbg-outer-loop-vect.ll -Transforms/LoopVectorize/debugloc.ll -Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll -Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size-needs-loop-guards.ll -Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll -Transforms/LoopVectorize/diag-with-hotness-info.ll -Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll -Transforms/LoopVectorize/early_exit_legality.ll -Transforms/LoopVectorize/epilog-iv-select-cmp.ll -Transforms/LoopVectorize/epilog-vectorization-any-of-reductions.ll -Transforms/LoopVectorize/epilog-vectorization-reductions.ll -Transforms/LoopVectorize/epilog-vectorization-trunc-induction-steps.ll -Transforms/LoopVectorize/explicit_outer_detection.ll -Transforms/LoopVectorize/explicit_outer_uniform_diverg_branch.ll -Transforms/LoopVectorize/first-order-recurrence-complex.ll -Transforms/LoopVectorize/first-order-recurrence.ll -Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll -Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll -Transforms/LoopVectorize/float-induction.ll -Transforms/LoopVectorize/float-minmax-instruction-flag.ll -Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll -Transforms/LoopVectorize/fmax-without-fast-math-flags.ll -Transforms/LoopVectorize/fmin-without-fast-math-flags.ll -Transforms/LoopVectorize/forked-pointers.ll -Transforms/LoopVectorize/gcc-examples.ll -Transforms/LoopVectorize/Hexagon/invalidate-cm-after-invalidating-interleavegroups.ll -Transforms/LoopVectorize/Hexagon/maximum-vf-crash.ll -Transforms/LoopVectorize/hoist-loads.ll -Transforms/LoopVectorize/i8-induction.ll -Transforms/LoopVectorize/icmp-uniforms.ll -Transforms/LoopVectorize/if-conversion.ll -Transforms/LoopVectorize/if-conversion-nest.ll -Transforms/LoopVectorize/if-pred-non-void.ll -Transforms/LoopVectorize/if-pred-not-when-safe.ll -Transforms/LoopVectorize/if-pred-stores.ll -Transforms/LoopVectorize/if-reduction.ll -Transforms/LoopVectorize/induction.ll -Transforms/LoopVectorize/instruction-only-used-outside-of-loop.ll -Transforms/LoopVectorize/interleave-and-scalarize-only.ll -Transforms/LoopVectorize/interleaved-accesses-2.ll -Transforms/LoopVectorize/interleaved-accesses-3.ll -Transforms/LoopVectorize/interleaved-accesses-different-insert-position.ll -Transforms/LoopVectorize/interleaved-accesses.ll -Transforms/LoopVectorize/interleaved-accesses-masked-group.ll -Transforms/LoopVectorize/interleaved-accesses-pred-stores.ll -Transforms/LoopVectorize/interleaved-accesses-requiring-scev-predicates.ll -Transforms/LoopVectorize/interleaved-accesses-uniform-load.ll -Transforms/LoopVectorize/invariant-store-vectorization-2.ll -Transforms/LoopVectorize/invariant-store-vectorization.ll -Transforms/LoopVectorize/is_fpclass.ll -Transforms/LoopVectorize/iv-select-cmp-decreasing.ll -Transforms/LoopVectorize/iv-select-cmp.ll -Transforms/LoopVectorize/iv-select-cmp-nested-loop.ll -Transforms/LoopVectorize/iv-select-cmp-no-wrap.ll -Transforms/LoopVectorize/iv-select-cmp-trunc.ll -Transforms/LoopVectorize/lcssa-crashes.ll -Transforms/LoopVectorize/load-deref-pred-align.ll -Transforms/LoopVectorize/load-deref-pred-neg-off.ll -Transforms/LoopVectorize/load-deref-pred-poison-ub-ops-feeding-pointer.ll -Transforms/LoopVectorize/load-of-struct-deref-pred.ll -Transforms/LoopVectorize/loop-form.ll -Transforms/LoopVectorize/loop-with-constant-exit-condition.ll -Transforms/LoopVectorize/memdep-fold-tail.ll -Transforms/LoopVectorize/metadata.ll -Transforms/LoopVectorize/minmax_reduction.ll -Transforms/LoopVectorize/multiple-exits-versioning.ll -Transforms/LoopVectorize/multiple-result-intrinsics.ll -Transforms/LoopVectorize/noalias-scope-decl.ll -Transforms/LoopVectorize/no_outside_user.ll -Transforms/LoopVectorize/no_switch.ll -Transforms/LoopVectorize/optimal-epilog-vectorization-liveout.ll -Transforms/LoopVectorize/optimal-epilog-vectorization.ll -Transforms/LoopVectorize/optimal-epilog-vectorization-scalable.ll -Transforms/LoopVectorize/optsize.ll -Transforms/LoopVectorize/outer_loop_hcfg_construction.ll -Transforms/LoopVectorize/outer-loop-inner-latch-successors.ll -Transforms/LoopVectorize/outer_loop_scalable.ll -Transforms/LoopVectorize/outer_loop_test1.ll -Transforms/LoopVectorize/outer_loop_test2.ll -Transforms/LoopVectorize/outer-loop-vec-phi-predecessor-order.ll -Transforms/LoopVectorize/outer-loop-wide-phis.ll -Transforms/LoopVectorize/phi-cost.ll -Transforms/LoopVectorize/pointer-induction.ll -Transforms/LoopVectorize/PowerPC/exit-branch-cost.ll -Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll -Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll -Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization-profitability.ll -Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll -Transforms/LoopVectorize/PowerPC/vplan-scalarivsext-crash.ll -Transforms/LoopVectorize/pr154045-dont-fold-extractelement-livein.ll -Transforms/LoopVectorize/pr30654-phiscev-sext-trunc.ll -Transforms/LoopVectorize/pr32859.ll -Transforms/LoopVectorize/pr34681.ll -Transforms/LoopVectorize/pr37248.ll -Transforms/LoopVectorize/pr39099.ll -Transforms/LoopVectorize/pr44488-predication.ll -Transforms/LoopVectorize/pr45525.ll -Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll -Transforms/LoopVectorize/pr48832.ll -Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll -Transforms/LoopVectorize/pr55100-expand-scev-predicate-used.ll -Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll -Transforms/LoopVectorize/predicatedinst-loop-invariant.ll -Transforms/LoopVectorize/predicate-switch.ll -Transforms/LoopVectorize/preserve-dbg-loc-and-loop-metadata.ll -Transforms/LoopVectorize/reduction-inloop-cond.ll -Transforms/LoopVectorize/reduction-inloop.ll -Transforms/LoopVectorize/reduction-inloop-pred.ll -Transforms/LoopVectorize/reduction-inloop-uf4.ll -Transforms/LoopVectorize/reduction.ll -Transforms/LoopVectorize/reduction-order.ll -Transforms/LoopVectorize/reduction-predselect.ll -Transforms/LoopVectorize/reduction-small-size.ll -Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll -Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll -Transforms/LoopVectorize/RISCV/dead-ops-cost.ll -Transforms/LoopVectorize/RISCV/divrem.ll -Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll -Transforms/LoopVectorize/RISCV/inloop-reduction.ll -Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll -Transforms/LoopVectorize/RISCV/mask-index-type.ll -Transforms/LoopVectorize/RISCV/pr154103.ll -Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll -Transforms/LoopVectorize/RISCV/pr88802.ll -Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll -Transforms/LoopVectorize/RISCV/reductions.ll -Transforms/LoopVectorize/RISCV/safe-dep-distance.ll -Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll -Transforms/LoopVectorize/RISCV/strided-accesses.ll -Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll -Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll -Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll -Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll -Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll -Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll -Transforms/LoopVectorize/RISCV/uniform-load-store.ll -Transforms/LoopVectorize/runtime-checks-difference.ll -Transforms/LoopVectorize/same-base-access.ll -Transforms/LoopVectorize/scalable-assume.ll -Transforms/LoopVectorize/scalable-first-order-recurrence.ll -Transforms/LoopVectorize/scalable-noalias-scope-decl.ll -Transforms/LoopVectorize/scalarized-bitcast.ll -Transforms/LoopVectorize/scalarize-masked-call.ll -Transforms/LoopVectorize/scalar-select.ll -Transforms/LoopVectorize/scev-predicate-reasoning.ll -Transforms/LoopVectorize/select-cmp.ll -Transforms/LoopVectorize/select-cmp-multiuse.ll -Transforms/LoopVectorize/select-cmp-predicated.ll -Transforms/LoopVectorize/select-neg-cond.ll -Transforms/LoopVectorize/select-reduction.ll -Transforms/LoopVectorize/select-reduction-start-value-may-be-undef-or-poison.ll -Transforms/LoopVectorize/select-with-fastflags.ll -Transforms/LoopVectorize/single-early-exit-cond-poison.ll -Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll -Transforms/LoopVectorize/single-early-exit-interleave-hint.ll -Transforms/LoopVectorize/single-early-exit-interleave.ll -Transforms/LoopVectorize/single-early-exit-interleave-only.ll -Transforms/LoopVectorize/single_early_exit_live_outs.ll -Transforms/LoopVectorize/single_early_exit.ll -Transforms/LoopVectorize/single_early_exit_with_outer_loop.ll -Transforms/LoopVectorize/single-value-blend-phis.ll -Transforms/LoopVectorize/skip-iterations.ll -Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll -Transforms/LoopVectorize/strict-fadd-interleave-only.ll -Transforms/LoopVectorize/struct-return.ll -Transforms/LoopVectorize/SystemZ/branch-for-predicated-block.ll -Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll -Transforms/LoopVectorize/SystemZ/load-scalarization-cost-0.ll -Transforms/LoopVectorize/SystemZ/mem-interleaving-costs.ll -Transforms/LoopVectorize/SystemZ/pr38110.ll -Transforms/LoopVectorize/SystemZ/pr47665.ll -Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll -Transforms/LoopVectorize/SystemZ/scalar-steps-with-users-demanding-all-lanes-and-first-lane-only.ll -Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll -Transforms/LoopVectorize/tail-folding-counting-down.ll -Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll -Transforms/LoopVectorize/tail-folding-switch.ll -Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll -Transforms/LoopVectorize/trip-count-expansion-may-introduce-ub.ll -Transforms/LoopVectorize/tripcount.ll -Transforms/LoopVectorize/trunc-extended-icmps.ll -Transforms/LoopVectorize/uncountable-early-exit-vplan.ll -Transforms/LoopVectorize/uniform-blend.ll -Transforms/LoopVectorize/unroll_nonlatch.ll -Transforms/LoopVectorize/unused-blend-mask-for-first-operand.ll -Transforms/LoopVectorize/vectorize-pointer-phis.ll -Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll -Transforms/LoopVectorize/vect-phiscev-sext-trunc.ll -Transforms/LoopVectorize/vect.stats.ll -Transforms/LoopVectorize/VE/disable_lv.ll -Transforms/LoopVectorize/version-stride-with-integer-casts.ll -Transforms/LoopVectorize/vplan-predicate-switch.ll -Transforms/LoopVectorize/vplan-printing.ll -Transforms/LoopVectorize/vplan-printing-outer-loop.ll -Transforms/LoopVectorize/vplan-printing-reductions.ll -Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll -Transforms/LoopVectorize/vplan-sink-scalars-and-merge-vf1.ll -Transforms/LoopVectorize/vplan-vectorize-inner-loop-reduction.ll -Transforms/LoopVectorize/vplan-widen-call-instruction.ll -Transforms/LoopVectorize/vplan-widen-select-instruction.ll -Transforms/LoopVectorize/WebAssembly/memory-interleave.ll -Transforms/LoopVectorize/X86/avx1.ll -Transforms/LoopVectorize/X86/avx512.ll -Transforms/LoopVectorize/X86/consecutive-ptr-uniforms.ll -Transforms/LoopVectorize/X86/constant-fold.ll -Transforms/LoopVectorize/X86/conversion-cost.ll -Transforms/LoopVectorize/X86/cost-conditional-branches.ll -Transforms/LoopVectorize/X86/cost-constant-known-via-scev.ll -Transforms/LoopVectorize/X86/CostModel/handle-iptr-with-data-layout-to-not-assert.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-f32-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-f32-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-f32-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-f64-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-f64-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-f64-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i16-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i16-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i16-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i32-stride-3-indices-01u.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i32-stride-3-indices-0uu.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i32-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i32-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i32-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i64-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i8-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i8-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-load-i8-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-f32-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-f32-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-f32-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-f64-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-f64-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i16-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i16-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i16-stride-6.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i16-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i32-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i32-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i32-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i64-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i64-stride-7.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i8-stride-3.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i8-stride-5.ll -Transforms/LoopVectorize/X86/CostModel/interleaved-store-i8-stride-6.ll -Transforms/LoopVectorize/X86/cost-model.ll -Transforms/LoopVectorize/X86/CostModel/masked-gather-i32-with-i8-index.ll -Transforms/LoopVectorize/X86/CostModel/masked-gather-i64-with-i8-index.ll -Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll -Transforms/LoopVectorize/X86/CostModel/masked-load-i16.ll -Transforms/LoopVectorize/X86/CostModel/masked-load-i32.ll -Transforms/LoopVectorize/X86/CostModel/masked-load-i64.ll -Transforms/LoopVectorize/X86/CostModel/masked-load-i8.ll -Transforms/LoopVectorize/X86/CostModel/masked-scatter-i32-with-i8-index.ll -Transforms/LoopVectorize/X86/CostModel/masked-scatter-i64-with-i8-index.ll -Transforms/LoopVectorize/X86/CostModel/masked-store-i16.ll -Transforms/LoopVectorize/X86/CostModel/masked-store-i32.ll -Transforms/LoopVectorize/X86/CostModel/masked-store-i64.ll -Transforms/LoopVectorize/X86/CostModel/masked-store-i8.ll -Transforms/LoopVectorize/X86/divs-with-tail-folding.ll -Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll -Transforms/LoopVectorize/X86/drop-poison-generating-flags.ll -Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll -Transforms/LoopVectorize/X86/fixed-order-recurrence.ll -Transforms/LoopVectorize/X86/float-induction-x86.ll -Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll -Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll -Transforms/LoopVectorize/X86/gather_scatter.ll -Transforms/LoopVectorize/X86/imprecise-through-phis.ll -Transforms/LoopVectorize/X86/induction-costs.ll -Transforms/LoopVectorize/X86/interleaved-accesses-use-after-free.ll -Transforms/LoopVectorize/X86/interleaved-accesses-waw-dependency.ll -Transforms/LoopVectorize/X86/intrinsiccost.ll -Transforms/LoopVectorize/X86/invariant-load-gather.ll -Transforms/LoopVectorize/X86/invariant-store-vectorization.ll -Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll -Transforms/LoopVectorize/X86/load-deref-pred.ll -Transforms/LoopVectorize/X86/masked_load_store.ll -Transforms/LoopVectorize/X86/masked-store-cost.ll -Transforms/LoopVectorize/X86/multi-exit-cost.ll -Transforms/LoopVectorize/X86/no_fpmath.ll -Transforms/LoopVectorize/X86/no_fpmath_with_hotness.ll -Transforms/LoopVectorize/X86/optsize.ll -Transforms/LoopVectorize/X86/outer_loop_test1_no_explicit_vect_width.ll -Transforms/LoopVectorize/X86/pr109581-unused-blend.ll -Transforms/LoopVectorize/X86/pr141968-instsimplifyfolder.ll -Transforms/LoopVectorize/X86/pr23997.ll -Transforms/LoopVectorize/X86/pr47437.ll -Transforms/LoopVectorize/X86/pr51366-sunk-instruction-used-outside-of-loop.ll -Transforms/LoopVectorize/X86/pr54634.ll -Transforms/LoopVectorize/X86/pr55096-scalarize-add.ll -Transforms/LoopVectorize/X86/pr56319-vector-exit-cond-optimization-epilogue-vectorization.ll -Transforms/LoopVectorize/X86/pr81872.ll -Transforms/LoopVectorize/X86/predicate-switch.ll -Transforms/LoopVectorize/X86/propagate-metadata.ll -Transforms/LoopVectorize/X86/reduction-fastmath.ll -Transforms/LoopVectorize/X86/reg-usage.ll -Transforms/LoopVectorize/X86/replicate-recipe-with-only-first-lane-used.ll -Transforms/LoopVectorize/X86/replicate-uniform-call.ll -Transforms/LoopVectorize/X86/scatter_crash.ll -Transforms/LoopVectorize/X86/small-size.ll -Transforms/LoopVectorize/X86/strided_load_cost.ll -Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll -Transforms/LoopVectorize/X86/tail_loop_folding.ll -Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll -Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll -Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll -Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll -Transforms/LoopVectorize/X86/vector_max_bandwidth.ll -Transforms/LoopVectorize/X86/vector_ptr_load_store.ll -Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll -Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll -Transforms/LoopVectorize/X86/x86-interleaved-store-accesses-with-gaps.ll -Transforms/LoopVectorize/X86/x86-pr39099.ll -Transforms/LoopVectorize/X86/x86-predication.ll Transforms/LoopVersioning/add-phi-update-users.ll Transforms/LoopVersioning/basic.ll Transforms/LoopVersioning/bound-check-partially-known.ll @@ -1739,6 +1296,7 @@ Transforms/PGOProfile/chr-dead-pred.ll Transforms/PGOProfile/chr-dup-threshold.ll Transforms/PGOProfile/chr.ll Transforms/PGOProfile/chr-poison.ll +Transforms/PGOProfile/chr-lifetimes.ll Transforms/PGOProfile/comdat.ll Transforms/PGOProfile/cspgo_profile_summary.ll Transforms/PGOProfile/memop_profile_funclet_wasm.ll @@ -1856,7 +1414,6 @@ Transforms/SimplifyCFG/merge-cond-stores.ll Transforms/SimplifyCFG/multiple-phis.ll Transforms/SimplifyCFG/PhiBlockMerge.ll Transforms/SimplifyCFG/pr48641.ll -Transforms/SimplifyCFG/preserve-branchweights.ll Transforms/SimplifyCFG/preserve-store-alignment.ll Transforms/SimplifyCFG/rangereduce.ll Transforms/SimplifyCFG/RISCV/select-trunc-i64.ll @@ -1866,7 +1423,6 @@ Transforms/SimplifyCFG/safe-abs.ll Transforms/SimplifyCFG/SimplifyEqualityComparisonWithOnlyPredecessor-domtree-preservation-edgecase.ll Transforms/SimplifyCFG/speculate-blocks.ll Transforms/SimplifyCFG/speculate-derefable-load.ll -Transforms/SimplifyCFG/suppress-zero-branch-weights.ll Transforms/SimplifyCFG/switch_create-custom-dl.ll Transforms/SimplifyCFG/switch_create.ll Transforms/SimplifyCFG/switch-dup-bbs.ll diff --git a/mlir/.clang-format b/mlir/.clang-format index a74fda4b67345..76cc928e64588 100644 --- a/mlir/.clang-format +++ b/mlir/.clang-format @@ -1,2 +1,3 @@ BasedOnStyle: LLVM AlwaysBreakTemplateDeclarations: Yes +LineEnding: LF diff --git a/mlir/cmake/modules/AddMLIRPython.cmake b/mlir/cmake/modules/AddMLIRPython.cmake index f25595116edca..fa6aec8a603a9 100644 --- a/mlir/cmake/modules/AddMLIRPython.cmake +++ b/mlir/cmake/modules/AddMLIRPython.cmake @@ -113,11 +113,12 @@ endfunction() # DEPENDS_TARGET_SRC_DEPS: List of cpp sources for extension library (for generating a DEPFILE). # IMPORT_PATHS: List of paths to add to PYTHONPATH for stubgen. # PATTERN_FILE: (Optional) Pattern file (see https://nanobind.readthedocs.io/en/latest/typing.html#pattern-files). +# VERBOSE: Emit logging/status messages during stub generation (default: OFF). # Outputs: # NB_STUBGEN_CUSTOM_TARGET: The target corresponding to generation which other targets can depend on. function(mlir_generate_type_stubs) cmake_parse_arguments(ARG - "" + "VERBOSE" "MODULE_NAME;OUTPUT_DIR;PATTERN_FILE" "IMPORT_PATHS;DEPENDS_TARGETS;OUTPUTS;DEPENDS_TARGET_SRC_DEPS" ${ARGN}) @@ -152,6 +153,9 @@ function(mlir_generate_type_stubs) --include-private --output-dir "${ARG_OUTPUT_DIR}") + if(NOT ARG_VERBOSE) + list(APPEND _nb_stubgen_cmd "--quiet") + endif() if(ARG_PATTERN_FILE) list(APPEND _nb_stubgen_cmd "-p;${ARG_PATTERN_FILE}") list(APPEND ARG_DEPENDS_TARGETS "${ARG_PATTERN_FILE}") @@ -166,10 +170,35 @@ function(mlir_generate_type_stubs) file(GENERATE OUTPUT "${_depfile}" CONTENT "${_depfiles}") endif() - message(DEBUG "Generating type-stubs outputs ${_generated_type_stubs}") + if(ARG_VERBOSE) + message(STATUS "Generating type-stubs outputs ${_generated_type_stubs}") + endif() + + # If PYTHONPATH is set and points to the build location of the python package then when stubgen runs, _mlir will get + # imported twice and bad things will happen (e.g., Assertion `!instance && “PyGlobals already constructed”’). + # This happens because mlir is a namespace package and the importer/loader can't distinguish between + # mlir._mlir_libs._mlir and _mlir_libs._mlir imported from CWD. + # So try to filter out any entries in PYTHONPATH that end in "MLIR_BINDINGS_PYTHON_INSTALL_PREFIX/.." + # (e.g., python_packages/mlir_core/). + set(_pythonpath "$ENV{PYTHONPATH}") + cmake_path(CONVERT "${MLIR_BINDINGS_PYTHON_INSTALL_PREFIX}/.." TO_NATIVE_PATH_LIST _install_prefix NORMALIZE) + if(WIN32) + set(_path_sep ";") + set(_trailing_sep "\\") + else() + set(_path_sep ":") + set(_trailing_sep "/") + # `;` is the CMake list delimiter so Windows paths are automatically lists + # and Unix paths can be made into lists by replacing `:` with `;` + string(REPLACE "${_path_sep}" ";" _pythonpath "${_pythonpath}") + endif() + string(REGEX REPLACE "${_trailing_sep}$" "" _install_prefix "${_install_prefix}") + list(FILTER _pythonpath EXCLUDE REGEX "(${_install_prefix}|${_install_prefix}${_trailing_sep})$") + # Note, ${_pythonpath} is a list but "${_pythonpath}" is not a list - it's a string with ";" chars in it. + string(JOIN "${_path_sep}" _pythonpath ${_pythonpath}) add_custom_command( OUTPUT ${_generated_type_stubs} - COMMAND ${_nb_stubgen_cmd} + COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH="${_pythonpath}" ${_nb_stubgen_cmd} WORKING_DIRECTORY "${CMAKE_CURRENT_FUNCTION_LIST_DIR}" DEPENDS "${ARG_DEPENDS_TARGETS}" DEPFILE "${_depfile}" diff --git a/mlir/docs/DefiningDialects/Operations.md b/mlir/docs/DefiningDialects/Operations.md index f988bebea1223..7c1be84727476 100644 --- a/mlir/docs/DefiningDialects/Operations.md +++ b/mlir/docs/DefiningDialects/Operations.md @@ -1649,6 +1649,15 @@ inline constexpr MyBitEnum operator&(MyBitEnum a, MyBitEnum b) { inline constexpr MyBitEnum operator^(MyBitEnum a, MyBitEnum b) { return static_cast(static_cast(a) ^ static_cast(b)); } +inline constexpr MyBitEnum &operator|=(MyBitEnum &a, MyBitEnum b) { + return a = a | b; +} +inline constexpr MyBitEnum &operator&=(MyBitEnum &a, MyBitEnum b) { + return a = a & b; +} +inline constexpr MyBitEnum &operator^=(MyBitEnum &a, MyBitEnum b) { + return a = a ^ b; +} inline constexpr MyBitEnum operator~(MyBitEnum bits) { // Ensure only bits that can be present in the enum are set return static_cast(~static_cast(bits) & static_cast(15u)); diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md index 529de55304206..178c07338ac45 100644 --- a/mlir/docs/Tutorials/Toy/Ch-6.md +++ b/mlir/docs/Tutorials/Toy/Ch-6.md @@ -245,7 +245,7 @@ define void @main() ``` The full code listing for dumping LLVM IR can be found in -`examples/toy/Ch6/toy.cpp` in the `dumpLLVMIR()` function: +`examples/toy/Ch6/toyc.cpp` in the `dumpLLVMIR()` function: ```c++ diff --git a/mlir/examples/standalone/CMakeLists.txt b/mlir/examples/standalone/CMakeLists.txt index e2bcda7fa6f0b..c6c49fde12d2e 100644 --- a/mlir/examples/standalone/CMakeLists.txt +++ b/mlir/examples/standalone/CMakeLists.txt @@ -63,8 +63,12 @@ if(MLIR_ENABLE_BINDINGS_PYTHON) include(MLIRDetectPythonEnv) mlir_configure_python_dev_packages() # Note: for EXTERNAL_PROJECT_BUILD this must be set from the command line. - set(MLIR_PYTHON_PACKAGE_PREFIX "mlir_standalone" CACHE STRING "" FORCE) - set(MLIR_BINDINGS_PYTHON_INSTALL_PREFIX "python_packages/standalone/${MLIR_PYTHON_PACKAGE_PREFIX}" CACHE STRING "" FORCE) + if(NOT MLIR_PYTHON_PACKAGE_PREFIX) + set(MLIR_PYTHON_PACKAGE_PREFIX "mlir_standalone" CACHE STRING "" FORCE) + endif() + if(NOT MLIR_BINDINGS_PYTHON_INSTALL_PREFIX) + set(MLIR_BINDINGS_PYTHON_INSTALL_PREFIX "python_packages/standalone/${MLIR_PYTHON_PACKAGE_PREFIX}" CACHE STRING "" FORCE) + endif() add_subdirectory(python) endif() add_subdirectory(test) diff --git a/mlir/examples/standalone/include/Standalone/StandalonePasses.td b/mlir/examples/standalone/include/Standalone/StandalonePasses.td index 4cb2be02e4a20..d5aad34f2f457 100644 --- a/mlir/examples/standalone/include/Standalone/StandalonePasses.td +++ b/mlir/examples/standalone/include/Standalone/StandalonePasses.td @@ -1,4 +1,4 @@ -//===- StandalonePsss.td - Standalone dialect passes -------*- tablegen -*-===// +//===- StandalonePasses.td - Standalone dialect passes -------*- tablegen -*-===// // // This file is licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/mlir/examples/standalone/pyproject.toml b/mlir/examples/standalone/pyproject.toml new file mode 100644 index 0000000000000..5a1e6e86513c3 --- /dev/null +++ b/mlir/examples/standalone/pyproject.toml @@ -0,0 +1,65 @@ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# Copyright (c) 2025. + +[project] +name = "standalone-python-bindings" +dynamic = ["version"] +requires-python = ">=3.8,<=3.14" +dependencies = [ + "numpy>=1.19.5, <=2.1.2", + "PyYAML>=5.4.0, <=6.0.1", + "ml_dtypes>=0.1.0, <=0.6.0; python_version<'3.13'", + "ml_dtypes>=0.5.0, <=0.6.0; python_version>='3.13'", +] + +[project.urls] +Homepage = "https://github.com/llvm/llvm-project" +Discussions = "https://discourse.llvm.org/" +"Issue Tracker" = "https://github.com/llvm/llvm-project/issues?q=is%3Aissue%20state%3Aopen%20label%3Amlir%3Apython%20" +"Source Code" = "https://github.com/llvm/llvm-project/tree/main/mlir/python" + +[build-system] +requires = [ + "scikit-build-core>=0.10.7", + "typing_extensions>=4.12.2", + "nanobind>=2.9, <3.0", + "pybind11>=2.10.0, <=2.13.6", +] +build-backend = "scikit_build_core.build" + +[tool.scikit-build] +# This is the minimum version of scikit-build-core. +minimum-version = "0.10.7" +# This pyproject.toml must be adjacent to the root CMakeLists.txt (wherever project(...) is specified). +cmake.source-dir = "." +# This is for installing/distributing the python bindings target and only the python bindings target. +build.targets = ["StandalonePythonModules"] +install.components = ["StandalonePythonModules"] + +[tool.scikit-build.cmake.define] +# Optional +CMAKE_C_COMPILER = { env = "CMAKE_C_COMPILER", default = "" } +CMAKE_CXX_COMPILER = { env = "CMAKE_CXX_COMPILER", default = "" } +CMAKE_C_COMPILER_LAUNCHER = { env = "CMAKE_C_COMPILER_LAUNCHER", default = "" } +CMAKE_CXX_COMPILER_LAUNCHER = { env = "CMAKE_CXX_COMPILER_LAUNCHER", default = "" } +CMAKE_GENERATOR = { env = "CMAKE_GENERATOR", default = "Ninja" } +LLVM_USE_LINKER = { env = "LLVM_USE_LINKER", default = "" } +# Optional but highly recommended (this makes the bindings compatible with other bindings packages +# by preventing symbol collisions). +CMAKE_VISIBILITY_INLINES_HIDDEN = "ON" +CMAKE_C_VISIBILITY_PRESET = "hidden" +CMAKE_CXX_VISIBILITY_PRESET = "hidden" + +# Non-optional (alternatively you could use CMAKE_PREFIX_PATH here). +MLIR_DIR = { env = "MLIR_DIR", default = "" } +# Non-optional +CMAKE_BUILD_TYPE = { env = "CMAKE_BUILD_TYPE", default = "Release" } +MLIR_ENABLE_BINDINGS_PYTHON = "ON" +# Effectively non-optional (any downstream project should specify this). +MLIR_PYTHON_PACKAGE_PREFIX = "mlir_standalone" +# This specifies the directory in the install directory (i.e., /tmp/pip-wheel/platlib) where _mlir_libs, dialects, etc. +# are installed. Thus, this will be the package location (and the name of the package) that pip assumes is +# the root package. +MLIR_BINDINGS_PYTHON_INSTALL_PREFIX = "mlir_standalone" diff --git a/mlir/examples/standalone/python/CMakeLists.txt b/mlir/examples/standalone/python/CMakeLists.txt index d48c5bcdde137..905c944939756 100644 --- a/mlir/examples/standalone/python/CMakeLists.txt +++ b/mlir/examples/standalone/python/CMakeLists.txt @@ -30,6 +30,9 @@ declare_mlir_python_extension(StandalonePythonSources.Pybind11Extension PRIVATE_LINK_LIBS LLVMSupport EMBED_CAPI_LINK_LIBS + MLIRCAPIIR + MLIRCAPIArith + MLIRCAPITransforms StandaloneCAPI PYTHON_BINDINGS_LIBRARY pybind11 ) @@ -42,6 +45,9 @@ declare_mlir_python_extension(StandalonePythonSources.NanobindExtension PRIVATE_LINK_LIBS LLVMSupport EMBED_CAPI_LINK_LIBS + MLIRCAPIIR + MLIRCAPIArith + MLIRCAPITransforms StandaloneCAPI PYTHON_BINDINGS_LIBRARY nanobind ) @@ -58,9 +64,6 @@ add_mlir_python_common_capi_library(StandalonePythonCAPI RELATIVE_INSTALL_ROOT "../../../.." DECLARED_SOURCES StandalonePythonSources - # TODO: Remove this in favor of showing fine grained registration once - # available. - MLIRPythonExtension.RegisterEverything MLIRPythonSources.Core MLIRPythonSources.Dialects.builtin ) @@ -71,75 +74,77 @@ add_mlir_python_common_capi_library(StandalonePythonCAPI set(StandalonePythonModules_ROOT_PREFIX "${MLIR_BINARY_DIR}/${MLIR_BINDINGS_PYTHON_INSTALL_PREFIX}") -# Everything here is very tightly coupled. See the ample descriptions at the bottom of -# mlir/python/CMakeLists.txt. - -# For a non-external projects build (e.g., installed distro) the type gen targets for the core _mlir module -# need to be re-declared. On the contrary, for an external projects build, the MLIRPythonExtension.Core.type_stub_gen -# target already exists and can just be added to DECLARED_SOURCES (see below). -if(NOT EXTERNAL_PROJECT_BUILD) - set(_core_type_stub_sources - _mlir/__init__.pyi - _mlir/ir.pyi - _mlir/passmanager.pyi - _mlir/rewrite.pyi - ) - get_target_property(_core_extension_srcs MLIRPythonExtension.Core INTERFACE_SOURCES) +if(NOT CMAKE_CROSSCOMPILING) + # Everything here is very tightly coupled. See the ample descriptions at the bottom of + # mlir/python/CMakeLists.txt. + + # For a non-external projects build (e.g., installed distro) the type gen targets for the core _mlir module + # need to be re-declared. On the contrary, for an external projects build, the MLIRPythonExtension.Core.type_stub_gen + # target already exists and can just be added to DECLARED_SOURCES (see below). + if(NOT EXTERNAL_PROJECT_BUILD) + set(_core_type_stub_sources + _mlir/__init__.pyi + _mlir/ir.pyi + _mlir/passmanager.pyi + _mlir/rewrite.pyi + ) + get_target_property(_core_extension_srcs MLIRPythonExtension.Core INTERFACE_SOURCES) + mlir_generate_type_stubs( + MODULE_NAME _mlir + DEPENDS_TARGETS StandalonePythonModules.extension._mlir.dso + OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs" + OUTPUTS "${_core_type_stub_sources}" + DEPENDS_TARGET_SRC_DEPS "${_core_extension_srcs}" + IMPORT_PATHS "${StandalonePythonModules_ROOT_PREFIX}/_mlir_libs" + VERBOSE + ) + set(_mlir_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") + + list(TRANSFORM _core_type_stub_sources PREPEND "_mlir_libs/") + declare_mlir_python_sources( + StandalonePythonExtension.Core.type_stub_gen + ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs" + ADD_TO_PARENT StandalonePythonSources + SOURCES "${_core_type_stub_sources}" + ) + endif() + + get_target_property(_standalone_extension_srcs StandalonePythonSources.NanobindExtension INTERFACE_SOURCES) mlir_generate_type_stubs( - MODULE_NAME _mlir - DEPENDS_TARGETS StandalonePythonModules.extension._mlir.dso + MODULE_NAME mlir_standalone._mlir_libs._standaloneDialectsNanobind + DEPENDS_TARGETS + StandalonePythonModules.extension._mlir.dso + StandalonePythonModules.extension._standaloneDialectsNanobind.dso OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs" - OUTPUTS "${_core_type_stub_sources}" - DEPENDS_TARGET_SRC_DEPS "${_core_extension_srcs}" - IMPORT_PATHS "${StandalonePythonModules_ROOT_PREFIX}/_mlir_libs" + OUTPUTS + _standaloneDialectsNanobind/__init__.pyi + _standaloneDialectsNanobind/standalone.pyi + DEPENDS_TARGET_SRC_DEPS "${_standalone_extension_srcs}" + IMPORT_PATHS "${StandalonePythonModules_ROOT_PREFIX}/.." ) - set(_mlir_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") + set(_standaloneDialectsNanobind_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") - list(TRANSFORM _core_type_stub_sources PREPEND "_mlir_libs/") declare_mlir_python_sources( - StandalonePythonExtension.Core.type_stub_gen + StandalonePythonSources.type_stub_gen ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs" ADD_TO_PARENT StandalonePythonSources - SOURCES "${_core_type_stub_sources}" + SOURCES + _mlir_libs/_standaloneDialectsNanobind/__init__.pyi + _mlir_libs/_standaloneDialectsNanobind/standalone.pyi ) endif() -get_target_property(_standalone_extension_srcs StandalonePythonSources.NanobindExtension INTERFACE_SOURCES) -mlir_generate_type_stubs( - MODULE_NAME mlir_standalone._mlir_libs._standaloneDialectsNanobind - DEPENDS_TARGETS - StandalonePythonModules.extension._mlir.dso - StandalonePythonModules.extension._standaloneDialectsNanobind.dso - OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs" - OUTPUTS - _standaloneDialectsNanobind/__init__.pyi - _standaloneDialectsNanobind/standalone.pyi - DEPENDS_TARGET_SRC_DEPS "${_standalone_extension_srcs}" - IMPORT_PATHS "${StandalonePythonModules_ROOT_PREFIX}/.." -) -set(_standaloneDialectsNanobind_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") - -declare_mlir_python_sources( - StandalonePythonSources.type_stub_gen - ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs" - ADD_TO_PARENT StandalonePythonSources - SOURCES - _mlir_libs/_standaloneDialectsNanobind/__init__.pyi - _mlir_libs/_standaloneDialectsNanobind/standalone.pyi -) set(_declared_sources StandalonePythonSources - # TODO: Remove this in favor of showing fine grained registration once - # available. - MLIRPythonExtension.RegisterEverything MLIRPythonSources.Core MLIRPythonSources.Dialects.builtin ) # For an external projects build, the MLIRPythonExtension.Core.type_stub_gen # target already exists and can just be added to DECLARED_SOURCES. -if(EXTERNAL_PROJECT_BUILD) +if(EXTERNAL_PROJECT_BUILD AND (NOT CMAKE_CROSSCOMPILING)) list(APPEND _declared_sources MLIRPythonExtension.Core.type_stub_gen) endif() + add_mlir_python_modules(StandalonePythonModules ROOT_PREFIX "${StandalonePythonModules_ROOT_PREFIX}" INSTALL_PREFIX "${MLIR_BINDINGS_PYTHON_INSTALL_PREFIX}" @@ -147,7 +152,10 @@ add_mlir_python_modules(StandalonePythonModules COMMON_CAPI_LINK_LIBS StandalonePythonCAPI ) -if(NOT EXTERNAL_PROJECT_BUILD) - add_dependencies(StandalonePythonModules "${_mlir_typestub_gen_target}") + +if(NOT CMAKE_CROSSCOMPILING) + if(NOT EXTERNAL_PROJECT_BUILD) + add_dependencies(StandalonePythonModules "${_mlir_typestub_gen_target}") + endif() + add_dependencies(StandalonePythonModules "${_standaloneDialectsNanobind_typestub_gen_target}") endif() -add_dependencies(StandalonePythonModules "${_standaloneDialectsNanobind_typestub_gen_target}") diff --git a/mlir/examples/standalone/python/StandaloneExtensionNanobind.cpp b/mlir/examples/standalone/python/StandaloneExtensionNanobind.cpp index e06ec3b6472b8..0ec6cdfa7994b 100644 --- a/mlir/examples/standalone/python/StandaloneExtensionNanobind.cpp +++ b/mlir/examples/standalone/python/StandaloneExtensionNanobind.cpp @@ -10,6 +10,7 @@ //===----------------------------------------------------------------------===// #include "Standalone-c/Dialects.h" +#include "mlir-c/Dialect/Arith.h" #include "mlir/Bindings/Python/Nanobind.h" #include "mlir/Bindings/Python/NanobindAdaptors.h" @@ -22,17 +23,21 @@ NB_MODULE(_standaloneDialectsNanobind, m) { auto standaloneM = m.def_submodule("standalone"); standaloneM.def( - "register_dialect", + "register_dialects", [](MlirContext context, bool load) { - MlirDialectHandle handle = mlirGetDialectHandle__standalone__(); - mlirDialectHandleRegisterDialect(handle, context); + MlirDialectHandle arithHandle = mlirGetDialectHandle__arith__(); + MlirDialectHandle standaloneHandle = + mlirGetDialectHandle__standalone__(); + mlirDialectHandleRegisterDialect(arithHandle, context); + mlirDialectHandleRegisterDialect(standaloneHandle, context); if (load) { - mlirDialectHandleLoadDialect(handle, context); + mlirDialectHandleLoadDialect(arithHandle, context); + mlirDialectHandleRegisterDialect(standaloneHandle, context); } }, nb::arg("context").none() = nb::none(), nb::arg("load") = true, // clang-format off - nb::sig("def register_dialect(context: " MAKE_MLIR_PYTHON_QUALNAME("ir.Context") ", load: bool = True) -> None") + nb::sig("def register_dialects(context: " MAKE_MLIR_PYTHON_QUALNAME("ir.Context") ", load: bool = True) -> None") // clang-format on ); } diff --git a/mlir/examples/standalone/python/StandaloneExtensionPybind11.cpp b/mlir/examples/standalone/python/StandaloneExtensionPybind11.cpp index 397db4c20e743..da8c2167dc36b 100644 --- a/mlir/examples/standalone/python/StandaloneExtensionPybind11.cpp +++ b/mlir/examples/standalone/python/StandaloneExtensionPybind11.cpp @@ -10,6 +10,7 @@ //===----------------------------------------------------------------------===// #include "Standalone-c/Dialects.h" +#include "mlir-c/Dialect/Arith.h" #include "mlir/Bindings/Python/PybindAdaptors.h" using namespace mlir::python::adaptors; @@ -21,12 +22,16 @@ PYBIND11_MODULE(_standaloneDialectsPybind11, m) { auto standaloneM = m.def_submodule("standalone"); standaloneM.def( - "register_dialect", + "register_dialects", [](MlirContext context, bool load) { - MlirDialectHandle handle = mlirGetDialectHandle__standalone__(); - mlirDialectHandleRegisterDialect(handle, context); + MlirDialectHandle arithHandle = mlirGetDialectHandle__arith__(); + MlirDialectHandle standaloneHandle = + mlirGetDialectHandle__standalone__(); + mlirDialectHandleRegisterDialect(arithHandle, context); + mlirDialectHandleRegisterDialect(standaloneHandle, context); if (load) { - mlirDialectHandleLoadDialect(handle, context); + mlirDialectHandleLoadDialect(arithHandle, context); + mlirDialectHandleRegisterDialect(standaloneHandle, context); } }, py::arg("context") = py::none(), py::arg("load") = true); diff --git a/mlir/examples/standalone/test/CAPI/CMakeLists.txt b/mlir/examples/standalone/test/CAPI/CMakeLists.txt index eaa6cfc102c73..9d5cda5eca5fc 100644 --- a/mlir/examples/standalone/test/CAPI/CMakeLists.txt +++ b/mlir/examples/standalone/test/CAPI/CMakeLists.txt @@ -6,9 +6,7 @@ add_mlir_aggregate(StandaloneCAPITestLib SHARED EMBED_LIBS MLIRCAPIIR - # TODO: Remove this in favor of showing fine grained dialect registration - # (once available). - MLIRCAPIRegisterEverything + MLIRCAPIArith StandaloneCAPI ) diff --git a/mlir/examples/standalone/test/CAPI/standalone-capi-test.c b/mlir/examples/standalone/test/CAPI/standalone-capi-test.c index 54f3ca7f7ff14..62add133fd4c5 100644 --- a/mlir/examples/standalone/test/CAPI/standalone-capi-test.c +++ b/mlir/examples/standalone/test/CAPI/standalone-capi-test.c @@ -12,21 +12,12 @@ #include #include "Standalone-c/Dialects.h" +#include "mlir-c/Dialect/Arith.h" #include "mlir-c/IR.h" -#include "mlir-c/RegisterEverything.h" - -static void registerAllUpstreamDialects(MlirContext ctx) { - MlirDialectRegistry registry = mlirDialectRegistryCreate(); - mlirRegisterAllDialects(registry); - mlirContextAppendDialectRegistry(ctx, registry); - mlirDialectRegistryDestroy(registry); -} int main(int argc, char **argv) { MlirContext ctx = mlirContextCreate(); - // TODO: Create the dialect handles for the builtin dialects and avoid this. - // This adds dozens of MB of binary size over just the standalone dialect. - registerAllUpstreamDialects(ctx); + mlirDialectHandleRegisterDialect(mlirGetDialectHandle__arith__(), ctx); mlirDialectHandleRegisterDialect(mlirGetDialectHandle__standalone__(), ctx); MlirModule module = mlirModuleCreateParse( diff --git a/mlir/examples/standalone/test/python/smoketest.py b/mlir/examples/standalone/test/python/smoketest.py index bd40c65d16164..26d84fd63e947 100644 --- a/mlir/examples/standalone/test/python/smoketest.py +++ b/mlir/examples/standalone/test/python/smoketest.py @@ -3,7 +3,6 @@ import sys from mlir_standalone.ir import * -from mlir_standalone.dialects import builtin as builtin_d if sys.argv[1] == "pybind11": from mlir_standalone.dialects import standalone_pybind11 as standalone_d @@ -14,7 +13,7 @@ with Context(): - standalone_d.register_dialect() + standalone_d.register_dialects() module = Module.parse( """ %0 = arith.constant 2 : i32 diff --git a/mlir/include/mlir-c/Dialect/LLVM.h b/mlir/include/mlir-c/Dialect/LLVM.h index 65b14254e4492..c1ade9ed8617c 100644 --- a/mlir/include/mlir-c/Dialect/LLVM.h +++ b/mlir/include/mlir-c/Dialect/LLVM.h @@ -306,7 +306,8 @@ typedef enum MlirLLVMDINameTableKind MlirLLVMDINameTableKind; MLIR_CAPI_EXPORTED MlirAttribute mlirLLVMDICompileUnitAttrGet( MlirContext ctx, MlirAttribute id, unsigned int sourceLanguage, MlirAttribute file, MlirAttribute producer, bool isOptimized, - MlirLLVMDIEmissionKind emissionKind, MlirLLVMDINameTableKind nameTableKind); + MlirLLVMDIEmissionKind emissionKind, MlirLLVMDINameTableKind nameTableKind, + MlirAttribute splitDebugFilename); /// Creates a LLVM DIFlags attribute. MLIR_CAPI_EXPORTED MlirAttribute mlirLLVMDIFlagsAttrGet(MlirContext ctx, diff --git a/mlir/include/mlir-c/IR.h b/mlir/include/mlir-c/IR.h index 061d7620ba077..c464e4da66f17 100644 --- a/mlir/include/mlir-c/IR.h +++ b/mlir/include/mlir-c/IR.h @@ -634,6 +634,10 @@ MLIR_CAPI_EXPORTED MlirContext mlirOperationGetContext(MlirOperation op); /// Gets the location of the operation. MLIR_CAPI_EXPORTED MlirLocation mlirOperationGetLocation(MlirOperation op); +/// Sets the location of the operation. +MLIR_CAPI_EXPORTED void mlirOperationSetLocation(MlirOperation op, + MlirLocation loc); + /// Gets the type id of the operation. /// Returns null if the operation does not have a registered operation /// description. diff --git a/mlir/include/mlir-c/Rewrite.h b/mlir/include/mlir-c/Rewrite.h index 374d2fb78de88..77be1f480eacf 100644 --- a/mlir/include/mlir-c/Rewrite.h +++ b/mlir/include/mlir-c/Rewrite.h @@ -37,6 +37,7 @@ DEFINE_C_API_STRUCT(MlirRewriterBase, void); DEFINE_C_API_STRUCT(MlirFrozenRewritePatternSet, void); DEFINE_C_API_STRUCT(MlirGreedyRewriteDriverConfig, void); DEFINE_C_API_STRUCT(MlirRewritePatternSet, void); +DEFINE_C_API_STRUCT(MlirPatternRewriter, void); //===----------------------------------------------------------------------===// /// RewriterBase API inherited from OpBuilder @@ -315,6 +316,8 @@ MLIR_CAPI_EXPORTED MlirLogicalResult mlirApplyPatternsAndFoldGreedily( #if MLIR_ENABLE_PDL_IN_PATTERNMATCH DEFINE_C_API_STRUCT(MlirPDLPatternModule, void); +DEFINE_C_API_STRUCT(MlirPDLValue, const void); +DEFINE_C_API_STRUCT(MlirPDLResultList, void); MLIR_CAPI_EXPORTED MlirPDLPatternModule mlirPDLPatternModuleFromModule(MlirModule op); @@ -323,6 +326,69 @@ MLIR_CAPI_EXPORTED void mlirPDLPatternModuleDestroy(MlirPDLPatternModule op); MLIR_CAPI_EXPORTED MlirRewritePatternSet mlirRewritePatternSetFromPDLPatternModule(MlirPDLPatternModule op); + +/// Cast the MlirPDLValue to an MlirValue. +/// Return a null value if the cast fails, just like llvm::dyn_cast. +MLIR_CAPI_EXPORTED MlirValue mlirPDLValueAsValue(MlirPDLValue value); + +/// Cast the MlirPDLValue to an MlirType. +/// Return a null value if the cast fails, just like llvm::dyn_cast. +MLIR_CAPI_EXPORTED MlirType mlirPDLValueAsType(MlirPDLValue value); + +/// Cast the MlirPDLValue to an MlirOperation. +/// Return a null value if the cast fails, just like llvm::dyn_cast. +MLIR_CAPI_EXPORTED MlirOperation mlirPDLValueAsOperation(MlirPDLValue value); + +/// Cast the MlirPDLValue to an MlirAttribute. +/// Return a null value if the cast fails, just like llvm::dyn_cast. +MLIR_CAPI_EXPORTED MlirAttribute mlirPDLValueAsAttribute(MlirPDLValue value); + +/// Push the MlirValue into the given MlirPDLResultList. +MLIR_CAPI_EXPORTED void +mlirPDLResultListPushBackValue(MlirPDLResultList results, MlirValue value); + +/// Push the MlirType into the given MlirPDLResultList. +MLIR_CAPI_EXPORTED void mlirPDLResultListPushBackType(MlirPDLResultList results, + MlirType value); + +/// Push the MlirOperation into the given MlirPDLResultList. +MLIR_CAPI_EXPORTED void +mlirPDLResultListPushBackOperation(MlirPDLResultList results, + MlirOperation value); + +/// Push the MlirAttribute into the given MlirPDLResultList. +MLIR_CAPI_EXPORTED void +mlirPDLResultListPushBackAttribute(MlirPDLResultList results, + MlirAttribute value); + +/// This function type is used as callbacks for PDL native rewrite functions. +/// Input values can be accessed by `values` with its size `nValues`; +/// output values can be added into `results` by `mlirPDLResultListPushBack*` +/// APIs. And the return value indicates whether the rewrite succeeds. +typedef MlirLogicalResult (*MlirPDLRewriteFunction)( + MlirPatternRewriter rewriter, MlirPDLResultList results, size_t nValues, + MlirPDLValue *values, void *userData); + +/// Register a rewrite function into the given PDL pattern module. +/// `userData` will be provided as an argument to the rewrite function. +MLIR_CAPI_EXPORTED void mlirPDLPatternModuleRegisterRewriteFunction( + MlirPDLPatternModule pdlModule, MlirStringRef name, + MlirPDLRewriteFunction rewriteFn, void *userData); + +/// This function type is used as callbacks for PDL native constraint functions. +/// Input values can be accessed by `values` with its size `nValues`; +/// output values can be added into `results` by `mlirPDLResultListPushBack*` +/// APIs. And the return value indicates whether the constraint holds. +typedef MlirLogicalResult (*MlirPDLConstraintFunction)( + MlirPatternRewriter rewriter, MlirPDLResultList results, size_t nValues, + MlirPDLValue *values, void *userData); + +/// Register a constraint function into the given PDL pattern module. +/// `userData` will be provided as an argument to the constraint function. +MLIR_CAPI_EXPORTED void mlirPDLPatternModuleRegisterConstraintFunction( + MlirPDLPatternModule pdlModule, MlirStringRef name, + MlirPDLConstraintFunction constraintFn, void *userData); + #endif // MLIR_ENABLE_PDL_IN_PATTERNMATCH #undef DEFINE_C_API_STRUCT diff --git a/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h b/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h index 8744d8d0e4bca..847951ab5fd46 100644 --- a/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h +++ b/mlir/include/mlir/Bindings/Python/NanobindAdaptors.h @@ -19,7 +19,9 @@ #ifndef MLIR_BINDINGS_PYTHON_NANOBINDADAPTORS_H #define MLIR_BINDINGS_PYTHON_NANOBINDADAPTORS_H +#include #include +#include #include #include "mlir-c/Diagnostics.h" @@ -30,6 +32,57 @@ // clang-format on #include "llvm/ADT/Twine.h" +namespace mlir { +namespace python { +namespace { + +// Safely calls Python initialization code on first use, avoiding deadlocks. +template +class SafeInit { +public: + typedef std::unique_ptr (*F)(); + + explicit SafeInit(F init_fn) : initFn(init_fn) {} + + T &get() { + if (T *result = output.load()) { + return *result; + } + + // Note: init_fn() may be called multiple times if, for example, the GIL is + // released during its execution. The intended use case is for module + // imports which are safe to perform multiple times. We are careful not to + // hold a lock across init_fn() to avoid lock ordering problems. + std::unique_ptr m = initFn(); + { + nanobind::ft_lock_guard lock(mu); + if (T *result = output.load()) { + return *result; + } + T *p = m.release(); + output.store(p); + return *p; + } + } + +private: + nanobind::ft_mutex mu; + std::atomic output{nullptr}; + F initFn; +}; + +nanobind::module_ &irModule() { + static SafeInit init([]() { + return std::make_unique( + nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir"))); + }); + return init.get(); +} + +} // namespace +} // namespace python +} // namespace mlir + // Raw CAPI type casters need to be declared before use, so always include them // first. namespace nanobind { @@ -63,7 +116,8 @@ mlirApiObjectToCapsule(nanobind::handle apiObject) { /// Casts object <-> MlirAffineMap. template <> struct type_caster { - NB_TYPE_CASTER(MlirAffineMap, const_name("MlirAffineMap")) + NB_TYPE_CASTER(MlirAffineMap, + const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.AffineMap"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToAffineMap(capsule->ptr()); @@ -75,7 +129,7 @@ struct type_caster { cleanup_list *cleanup) noexcept { nanobind::object capsule = nanobind::steal(mlirPythonAffineMapToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("AffineMap") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .release(); @@ -85,7 +139,8 @@ struct type_caster { /// Casts object <-> MlirAttribute. template <> struct type_caster { - NB_TYPE_CASTER(MlirAttribute, const_name("MlirAttribute")) + NB_TYPE_CASTER(MlirAttribute, + const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Attribute"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToAttribute(capsule->ptr()); @@ -97,7 +152,7 @@ struct type_caster { cleanup_list *cleanup) noexcept { nanobind::object capsule = nanobind::steal(mlirPythonAttributeToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("Attribute") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .attr(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR)() @@ -108,7 +163,7 @@ struct type_caster { /// Casts object -> MlirBlock. template <> struct type_caster { - NB_TYPE_CASTER(MlirBlock, const_name("MlirBlock")) + NB_TYPE_CASTER(MlirBlock, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Block"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToBlock(capsule->ptr()); @@ -121,16 +176,15 @@ struct type_caster { /// Casts object -> MlirContext. template <> struct type_caster { - NB_TYPE_CASTER(MlirContext, const_name("MlirContext")) + NB_TYPE_CASTER(MlirContext, + const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Context"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (src.is_none()) { // Gets the current thread-bound context. // TODO: This raises an error of "No current context" currently. // Update the implementation to pretty-print the helpful error that the // core implementations print in this case. - src = nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("Context") - .attr("current"); + src = mlir::python::irModule().attr("Context").attr("current"); } std::optional capsule = mlirApiObjectToCapsule(src); value = mlirPythonCapsuleToContext(capsule->ptr()); @@ -141,7 +195,8 @@ struct type_caster { /// Casts object <-> MlirDialectRegistry. template <> struct type_caster { - NB_TYPE_CASTER(MlirDialectRegistry, const_name("MlirDialectRegistry")) + NB_TYPE_CASTER(MlirDialectRegistry, + const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.DialectRegistry"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToDialectRegistry(capsule->ptr()); @@ -153,7 +208,7 @@ struct type_caster { cleanup_list *cleanup) noexcept { nanobind::object capsule = nanobind::steal( mlirPythonDialectRegistryToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("DialectRegistry") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .release(); @@ -163,13 +218,12 @@ struct type_caster { /// Casts object <-> MlirLocation. template <> struct type_caster { - NB_TYPE_CASTER(MlirLocation, const_name("MlirLocation")) + NB_TYPE_CASTER(MlirLocation, + const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Location"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (src.is_none()) { // Gets the current thread-bound context. - src = nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("Location") - .attr("current"); + src = mlir::python::irModule().attr("Location").attr("current"); } if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToLocation(capsule->ptr()); @@ -181,7 +235,7 @@ struct type_caster { cleanup_list *cleanup) noexcept { nanobind::object capsule = nanobind::steal(mlirPythonLocationToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("Location") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .release(); @@ -191,7 +245,7 @@ struct type_caster { /// Casts object <-> MlirModule. template <> struct type_caster { - NB_TYPE_CASTER(MlirModule, const_name("MlirModule")) + NB_TYPE_CASTER(MlirModule, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Module"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToModule(capsule->ptr()); @@ -203,7 +257,7 @@ struct type_caster { cleanup_list *cleanup) noexcept { nanobind::object capsule = nanobind::steal(mlirPythonModuleToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("Module") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .release(); @@ -213,8 +267,9 @@ struct type_caster { /// Casts object <-> MlirFrozenRewritePatternSet. template <> struct type_caster { - NB_TYPE_CASTER(MlirFrozenRewritePatternSet, - const_name("MlirFrozenRewritePatternSet")) + NB_TYPE_CASTER( + MlirFrozenRewritePatternSet, + const_name(MAKE_MLIR_PYTHON_QUALNAME("rewrite.FrozenRewritePatternSet"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToFrozenRewritePatternSet(capsule->ptr()); @@ -236,7 +291,8 @@ struct type_caster { /// Casts object <-> MlirOperation. template <> struct type_caster { - NB_TYPE_CASTER(MlirOperation, const_name("MlirOperation")) + NB_TYPE_CASTER(MlirOperation, + const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Operation"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToOperation(capsule->ptr()); @@ -250,7 +306,7 @@ struct type_caster { return nanobind::none(); nanobind::object capsule = nanobind::steal(mlirPythonOperationToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("Operation") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .release(); @@ -260,7 +316,7 @@ struct type_caster { /// Casts object <-> MlirValue. template <> struct type_caster { - NB_TYPE_CASTER(MlirValue, const_name("MlirValue")) + NB_TYPE_CASTER(MlirValue, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Value"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToValue(capsule->ptr()); @@ -274,7 +330,7 @@ struct type_caster { return nanobind::none(); nanobind::object capsule = nanobind::steal(mlirPythonValueToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("Value") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .attr(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR)() @@ -285,7 +341,8 @@ struct type_caster { /// Casts object -> MlirPassManager. template <> struct type_caster { - NB_TYPE_CASTER(MlirPassManager, const_name("MlirPassManager")) + NB_TYPE_CASTER(MlirPassManager, const_name(MAKE_MLIR_PYTHON_QUALNAME( + "passmanager.PassManager"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToPassManager(capsule->ptr()); @@ -298,7 +355,7 @@ struct type_caster { /// Casts object <-> MlirTypeID. template <> struct type_caster { - NB_TYPE_CASTER(MlirTypeID, const_name("MlirTypeID")) + NB_TYPE_CASTER(MlirTypeID, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.TypeID"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToTypeID(capsule->ptr()); @@ -312,7 +369,7 @@ struct type_caster { return nanobind::none(); nanobind::object capsule = nanobind::steal(mlirPythonTypeIDToCapsule(v)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("TypeID") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .release(); @@ -322,7 +379,7 @@ struct type_caster { /// Casts object <-> MlirType. template <> struct type_caster { - NB_TYPE_CASTER(MlirType, const_name("MlirType")) + NB_TYPE_CASTER(MlirType, const_name(MAKE_MLIR_PYTHON_QUALNAME("ir.Type"))) bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { if (auto capsule = mlirApiObjectToCapsule(src)) { value = mlirPythonCapsuleToType(capsule->ptr()); @@ -334,7 +391,7 @@ struct type_caster { cleanup_list *cleanup) noexcept { nanobind::object capsule = nanobind::steal(mlirPythonTypeToCapsule(t)); - return nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) + return mlir::python::irModule() .attr("Type") .attr(MLIR_PYTHON_CAPI_FACTORY_ATTR)(capsule) .attr(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR)() @@ -345,7 +402,7 @@ struct type_caster { /// Casts MlirStringRef -> object. template <> struct type_caster { - NB_TYPE_CASTER(MlirStringRef, const_name("MlirStringRef")) + NB_TYPE_CASTER(MlirStringRef, const_name("str")) static handle from_cpp(MlirStringRef s, rv_policy, cleanup_list *cleanup) noexcept { return nanobind::str(s.data, s.length).release(); @@ -453,11 +510,9 @@ class mlir_attribute_subclass : public pure_subclass { mlir_attribute_subclass(nanobind::handle scope, const char *attrClassName, IsAFunctionTy isaFunction, GetTypeIDFunctionTy getTypeIDFunction = nullptr) - : mlir_attribute_subclass( - scope, attrClassName, isaFunction, - nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("Attribute"), - getTypeIDFunction) {} + : mlir_attribute_subclass(scope, attrClassName, isaFunction, + irModule().attr("Attribute"), + getTypeIDFunction) {} /// Subclasses with a provided mlir.ir.Attribute super-class. This must /// be used if the subclass is being defined in the same extension module @@ -540,11 +595,8 @@ class mlir_type_subclass : public pure_subclass { mlir_type_subclass(nanobind::handle scope, const char *typeClassName, IsAFunctionTy isaFunction, GetTypeIDFunctionTy getTypeIDFunction = nullptr) - : mlir_type_subclass( - scope, typeClassName, isaFunction, - nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("Type"), - getTypeIDFunction) {} + : mlir_type_subclass(scope, typeClassName, isaFunction, + irModule().attr("Type"), getTypeIDFunction) {} /// Subclasses with a provided mlir.ir.Type super-class. This must /// be used if the subclass is being defined in the same extension module @@ -631,10 +683,8 @@ class mlir_value_subclass : public pure_subclass { /// Subclasses by looking up the super-class dynamically. mlir_value_subclass(nanobind::handle scope, const char *valueClassName, IsAFunctionTy isaFunction) - : mlir_value_subclass( - scope, valueClassName, isaFunction, - nanobind::module_::import_(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("Value")) {} + : mlir_value_subclass(scope, valueClassName, isaFunction, + irModule().attr("Value")) {} /// Subclasses with a provided mlir.ir.Value super-class. This must /// be used if the subclass is being defined in the same extension module diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 1a37d057776e2..3c18ecc753d0f 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -1489,8 +1489,8 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> { VectorContractLoweringAttr.summary, [{::llvm::cl::values( clEnumValN(::mlir::vector::VectorContractLowering::Dot, "dot", "Progressively lower to finer grained `vector.contract` and dot-products. (default)"), - clEnumValN(::mlir::vector::VectorContractLowering::Matmul, "matmul", - "Lower to `vector.matrix_multiply`, maps 1-1 to LLVM matrix intrinsics."), + clEnumValN(::mlir::vector::VectorContractLowering::LLVMIntr, "llvmintr", + "Lower directly to `llvm.intr.matrix.multiply`."), clEnumValN(::mlir::vector::VectorContractLowering::OuterProduct, "outerproduct", "Lower to `vector.outerproduct`."), clEnumValN(::mlir::vector::VectorContractLowering::ParallelArith, "parallelarith", @@ -1502,8 +1502,8 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> { VectorTransposeLoweringAttr.summary, [{::llvm::cl::values( clEnumValN(::mlir::vector::VectorTransposeLowering::EltWise, "eltwise", "Lower transpose into element-wise extract and inserts (default)"), - clEnumValN(::mlir::vector::VectorTransposeLowering::Flat, "flat", - "Lower 2-D transpose to `vector.flat_transpose`, maps 1-1 to LLVM matrix intrinsics"), + clEnumValN(::mlir::vector::VectorTransposeLowering::LLVMIntr, "llvmintr", + "Lower 2-D transpose directly to `llvm.intr.matrix.transpose`"), clEnumValN(::mlir::vector::VectorTransposeLowering::Shuffle1D, "shuffle1d", "Lower 2-D transpose to `vector.shuffle` on 1-D vector."), clEnumValN(::mlir::vector::VectorTransposeLowering::Shuffle16x16, "shuffle16x16", diff --git a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td index a24a918357f2d..8370d350afd1e 100644 --- a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td @@ -235,7 +235,7 @@ def AMDGPU_FatRawBufferCastOp : DeclareOpInterfaceMethods, ViewLikeOpInterface, AttrSizedOperandSegments]>, Arguments<(ins AnyMemRef:$source, - Optional:$validBytes, + Optional:$validBytes, Optional>:$cacheSwizzleStride, DefaultValuedAttr:$boundsCheck, UnitAttr:$resetOffset)>, @@ -680,8 +680,8 @@ def AMDGPU_PermlaneSwapOp : AMDGPU_Op<"permlane_swap", [Pure, AllTypesMatch<["re * `$fetch_inactive`: Optional. Used to dertermine behavior of a fetch from a disabled lane. `fetch_inactive = false`: If the source lane is disabled, use `bound_ctrl` to determine the source value. `fetch_inactive = true`: If the source lane is disabled, fetch the source value anyway (ignoring `bound_ctrl`). - * `$bound_ctrl`: Optional. Used to determine what a thread should do if its source operand is from - a disabled lane: use the value zero, or disable the write. + * `$bound_ctrl`: Optional. Used to determine what a thread should do if its source operand is from + a disabled lane: use the value zero, or disable the write. `bound_ctrl = false`: Do not write when source is from a disabled lane `bound_ctrl = true`: Use zero as input if source is from a disabled lane diff --git a/mlir/include/mlir/Dialect/Arith/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Arith/Transforms/Transforms.h index 8d6c68cef680d..ffd367ef11abc 100644 --- a/mlir/include/mlir/Dialect/Arith/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Arith/Transforms/Transforms.h @@ -53,7 +53,7 @@ reifyValueBound(OpBuilder &b, Location loc, presburger::BoundType type, /// ValueBoundsOpInterface, no bound can be computed. FailureOr reifyIndexValueBound( OpBuilder &b, Location loc, presburger::BoundType type, Value value, - ValueBoundsConstraintSet::StopConditionFn stopCondition = nullptr, + const ValueBoundsConstraintSet::StopConditionFn &stopCondition = nullptr, bool closedUB = false); /// Reify a bound for the specified dimension of the given shaped value in terms @@ -65,7 +65,7 @@ FailureOr reifyIndexValueBound( FailureOr reifyShapedValueDimBound( OpBuilder &b, Location loc, presburger::BoundType type, Value value, int64_t dim, - ValueBoundsConstraintSet::StopConditionFn stopCondition = nullptr, + const ValueBoundsConstraintSet::StopConditionFn &stopCondition = nullptr, bool closedUB = false); } // namespace arith diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h index f3b34f9fded7f..dd693a25fd54f 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -260,12 +260,12 @@ struct BufferizationOptions { std::function; /// Initializer function for analysis state. using AnalysisStateInitFn = std::function; - /// Tensor -> MemRef type converter. - /// Parameters: tensor type, memory space, func op, bufferization options + /// Tensor-like -> Buffer-like type conversion. + /// Parameters: tensor-like type, memory space, func op, bufferization options using FunctionArgTypeConverterFn = - std::function; - /// Tensor -> MemRef type converter. + /// Tensor -> MemRef type conversion. /// Parameters: tensor type, memory space, bufferization options using UnknownTypeConverterFn = std::function; @@ -335,10 +335,12 @@ struct BufferizationOptions { /// predictable. void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption); - /// Type converter from tensors to memrefs. This type converter is used to - /// determine bufferized function argument and result types. By default, a - /// type converter that returns a memref type with a fully dynamic layout map - /// is used. + /// Type conversion from tensors to buffers. This type conversion is used to + /// determine bufferized function argument and result types. + /// + /// By default, if tensor is a (builtin) tensor type, it is converted to a + /// memref type with a fully dynamic layout map; if tensor is a (generic) + /// tensor-like type, it is converted using TensorLikeType::getBufferType(). /// /// If `bufferizeFunctionBoundaries` is not set, this function isn't used. FunctionArgTypeConverterFn functionArgTypeConverterFn = nullptr; @@ -350,10 +352,9 @@ struct BufferizationOptions { /// If `bufferizeFunctionBoundaries` is not set, this flag has no effect. bool inferFunctionResultLayout = true; - /// Type converter from tensors to memrefs. This type converter is used if no - /// memref type could be inferred during bufferization. By default, a type - /// converter that returns a memref type with a fully dynamic layout map is - /// used. + /// Type conversion from tensors to memrefs. This type conversion is used if + /// no memref type could be inferred during bufferization. By default, returns + /// a memref type with a fully dynamic layout map. UnknownTypeConverterFn unknownTypeConverterFn = nullptr; // Use during type conversion to determine the memory space for memref based diff --git a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td index 721f9f6b320ad..f52eb7b91dc4c 100644 --- a/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td +++ b/mlir/include/mlir/Dialect/EmitC/IR/EmitC.td @@ -1645,7 +1645,7 @@ def EmitC_ClassOp return } } - // Class with a final speciferAdd commentMore actions + // Class with a final specifer emitc.class final @modelClass { emitc.field @fieldName0 : !emitc.array<1xf32> = {emitc.opaque = "input_tensor"} emitc.func @execute() { @@ -1667,8 +1667,6 @@ def EmitC_ClassOp Block &getBlock(); }]; - let hasCustomAssemblyFormat = 1; - let assemblyFormat = [{ (`final` $final_specifier^)? $sym_name attr-dict-with-keyword $body }]; } diff --git a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.td b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.td index 187ac9aa18aac..0c8a0c7a677ab 100644 --- a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.td @@ -39,8 +39,19 @@ def GpuMapParallelLoopsPass encountered to the local workgroup. Within each mapping, the first three dimensions are mapped to x/y/z hardware ids and all following dimensions are mapped to sequential loops. + + Ordering of the loop mapping against the different dimensions is controlled + by the `mapping-policy` option. + Two policies are supported: + 1. `outermost-first` (default): the outermost loop maps to X, then Y + and finally Z. + 2. `innermost-first`: the innermost loop maps to X, then Y and finally Z. }]; let dependentDialects = ["mlir::gpu::GPUDialect"]; + let options = [Option<"mappingPolicyStr", "mapping-policy", "std::string", + /*default=*/"\"outermost-first\"", + "Policy outlining how to assign loops to GPU dimensions." + "Supported values are `outermost-first` and `innermost-first`.">]; } def GpuEliminateBarriers diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td index 75bce6b0a0e54..147f8c2040049 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td @@ -425,17 +425,19 @@ def LLVM_DICompileUnitAttr : LLVM_Attr<"DICompileUnit", "di_compile_unit", OptionalParameter<"StringAttr">:$producer, "bool":$isOptimized, "DIEmissionKind":$emissionKind, - OptionalParameter<"DINameTableKind">:$nameTableKind + OptionalParameter<"DINameTableKind">:$nameTableKind, + OptionalParameter<"StringAttr">:$splitDebugFilename ); let builders = [ AttrBuilderWithInferredContext<(ins "DistinctAttr":$id, "unsigned":$sourceLanguage, "DIFileAttr":$file, "StringAttr":$producer, "bool":$isOptimized, "DIEmissionKind":$emissionKind, - CArg<"DINameTableKind", "DINameTableKind::Default">:$nameTableKind + CArg<"DINameTableKind", "DINameTableKind::Default">:$nameTableKind, + CArg<"StringAttr", "{}">:$splitDebugFilename ), [{ return $_get(id.getContext(), id, sourceLanguage, file, producer, - isOptimized, emissionKind, nameTableKind); + isOptimized, emissionKind, nameTableKind, splitDebugFilename); }]> ]; let assemblyFormat = "`<` struct(params) `>`"; diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td index e12b8ac84ba23..398388bd720be 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td @@ -184,6 +184,15 @@ def LLVM_UMinOp : LLVM_BinarySameArgsIntrOpI<"umin">; def LLVM_SinOp : LLVM_UnaryIntrOpF<"sin">; def LLVM_CosOp : LLVM_UnaryIntrOpF<"cos">; def LLVM_TanOp : LLVM_UnaryIntrOpF<"tan">; +def LLVM_SincosOp : LLVM_TwoResultIntrOp<"sincos", [], [0], + [Pure], /*requiresFastmath=*/1> { + let arguments = + (ins LLVM_ScalarOrVectorOf:$val, + DefaultValuedAttr:$fastmathFlags); + let assemblyFormat = "`(` operands `)` attr-dict `:` " + "functional-type(operands, results)"; + let hasVerifier = 1; +} def LLVM_ASinOp : LLVM_UnaryIntrOpF<"asin">; def LLVM_ACosOp : LLVM_UnaryIntrOpF<"acos">; diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td index 1f3974846a5ef..29001e26eaaaf 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td @@ -569,7 +569,7 @@ def ROCDL_MakeBufferRsrcOp : ROCDL_IntrOp<"make.buffer.rsrc", [0], [0], [Pure], 1>, Arguments<(ins LLVM_AnyPointer:$base, I16:$stride, - I32:$numRecords, + I64:$numRecords, I32:$flags)> { let results = (outs LLVM_AnyPointer:$res); let assemblyFormat = "operands attr-dict `:` type($base) `to` type($res)"; @@ -985,7 +985,6 @@ class ScaleArgInfo { //===---------------------------------------------------------------------===// // Scaled {fp4,bf8,fp8} to {bf16,f16,f32} conversion intrinsics //===---------------------------------------------------------------------===// - foreach smallT = [ ScaleArgInfo, ScaleArgInfo, @@ -996,6 +995,8 @@ foreach smallT = [ ScaleArgInfo, ScaleArgInfo, ] in { + + // Up-scaling def ROCDL_CvtPkScalePk8 # largeT.nameForOp # smallT.nameForOp # Op : ROCDL_ConcreteNonMemIntrOp<"cvt.scale.pk8." # largeT.name # "." # smallT.name, [Pure], 1, [2], ["scaleSel"]>, @@ -1010,13 +1011,30 @@ foreach smallT = [ attr-dict $src `,` $scale `[` $scaleSel `]` `:` type($res) }]; } + + // Down-scaling + def ROCDL_CvtScaleF32Pk8 # smallT.nameForOp # largeT.nameForOp # Op : + ROCDL_ConcreteNonMemIntrOp<"cvt.scalef32.pk8." # smallT.name # "." # largeT.name, + [Pure], 1>, + Arguments<(ins largeT.type:$src, F32:$scale)> { + let results = (outs smallT.type:$res); + let summary = "Scale and convert packed " + # largeT.name # " to packed " # smallT.name ; + let description = [{ + Convert 8 packed }] # largeT.name # [{ values to packed }] + # smallT.name # [{, multiplying by the exponent part of `scale` + before doing so. This op is for gfx1250+ arch. + }]; + let assemblyFormat = [{ + attr-dict $src `,` $scale `:` type($res) + }]; + } } // foreach largeT } // foreach smallTOp //===---------------------------------------------------------------------===// // Scaled {bf6,fp6} to {bf16,f16,f32} conversion intrinsics //===---------------------------------------------------------------------===// - foreach smallT = [ ScaleArgInfo, ScaleArgInfo @@ -1360,6 +1378,37 @@ def ROCDL_CvtScaleF32PkFp4F32Op : }]; } +//===----------------------------------------------------------------------===// +// FMED3 operations +//===----------------------------------------------------------------------===// + +def ROCDL_FMed3Op : ROCDL_IntrOp<"fmed3", [0], [], [Pure, AllTypesMatch<["res", "src0", "src1", "src2"]>], 1>, + Arguments<(ins LLVM_ScalarOrVectorOf:$src0, + LLVM_ScalarOrVectorOf:$src1, + LLVM_ScalarOrVectorOf:$src2)> { + let results = (outs LLVM_ScalarOrVectorOf:$res); + let summary = "Median of three float/half values"; + let description = [{ + Computes the median of three floating-point values using the AMDGPU fmed3 intrinsic. + This operation is equivalent to `max(min(a, b), min(max(a, b), c))` but uses the + hardware-accelerated V_MED3_F16/V_MED3_F32 instruction for better performance. + + The operation supports both scalar and vector floating-point types (f16, f32). + + Example: + ```mlir + // Scalar f32 median + %result = rocdl.fmed3 %a, %b, %c : f32 + + // Vector f16 median + %result = rocdl.fmed3 %va, %vb, %vc : vector<4xf16> + ``` + }]; + let assemblyFormat = [{ + $src0 `,` $src1 `,` $src2 attr-dict `:` type($res) + }]; +} + //===----------------------------------------------------------------------===// // ROCDL target attribute. //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/LLVMIR/XeVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/XeVMOps.td index 514b01a69fb9b..4f7a8421c07b9 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/XeVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/XeVMOps.td @@ -634,4 +634,37 @@ def XeVM_TargetAttr : XeVM_Attr<"XeVMTarget", "target"> { let genVerifyDecl = 1; } +//===----------------------------------------------------------------------===// +// XeVM special register op definitions +//===----------------------------------------------------------------------===// + +class XeVM_SpecialIdRegisterOp traits = []> + : XeVM_Op, + Results<(outs AnyTypeOf<[I32, I64]>:$res)>, + Arguments<(ins OptionalAttr:$range)> { + let assemblyFormat = "(`range` $range^)? attr-dict `:` type($res)"; +} + +multiclass XeVM_SpecialRegisterXYZ traits = []> { + def XOp : XeVM_SpecialIdRegisterOp; + def YOp : XeVM_SpecialIdRegisterOp; + def ZOp : XeVM_SpecialIdRegisterOp; +} + +//===----------------------------------------------------------------------===// +// Workitem index and range +defm XeVM_WorkitemId : XeVM_SpecialRegisterXYZ<"local_id">; +defm XeVM_WorkgroupDim : XeVM_SpecialRegisterXYZ<"local_size">; + +//===----------------------------------------------------------------------===// +// Workgroup index and range +defm XeVM_WorkgroupId : XeVM_SpecialRegisterXYZ<"group_id">; +defm XeVM_GridDim : XeVM_SpecialRegisterXYZ<"group_count">; + +//===----------------------------------------------------------------------===// +// Lane, Subgroup index and range +def XeVM_LaneIdOp : XeVM_SpecialIdRegisterOp<"lane_id">; +def XeVM_SubgroupIdOp : XeVM_SpecialIdRegisterOp<"subgroup_id">; +def XeVM_SubgroupSizeOp : XeVM_SpecialIdRegisterOp<"subgroup_size">; + #endif // XEVMIR_OPS diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td index f36b41ccf6745..3390f380c7eb8 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgRelayoutOps.td @@ -239,6 +239,14 @@ def Linalg_PackOp : Linalg_RelayoutOp<"pack", [ ArrayRef outerDimsPerm, ArrayRef innerTiles); + // Same as above function but here dynamic dimensions are assumed + // to require padding. + static bool requirePaddingValueStrict(ArrayRef inputShape, + ArrayRef innerDimsPos, + ArrayRef outputShape, + ArrayRef outerDimsPerm, + ArrayRef innerTiles); + static Value createDestinationTensor(OpBuilder &b, Location loc, Value source, ArrayRef innerTileSizes, ArrayRef innerDimsPos, ArrayRef outerDimsPerm); diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td index 8f3232f01544f..0d6ebc087e2f3 100644 --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td @@ -17,6 +17,7 @@ include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td" include "mlir/Dialect/Transform/IR/TransformTypes.td" include "mlir/Dialect/SCF/IR/DeviceMappingInterface.td" include "mlir/Interfaces/SideEffectInterfaces.td" +include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/IR/OpBase.td" include "mlir/IR/RegionKindInterface.td" @@ -236,11 +237,51 @@ def BufferizeToAllocationOp : Op, - OpBuilder<(ins "Value":$target, "int64_t":$memorySpace)> - ]; +//===----------------------------------------------------------------------===// +// PromoteTensorOp +//===----------------------------------------------------------------------===// + +def PromoteTensorOp : Op, + DeclareOpInterfaceMethods, + SameOperandsAndResultType]> { + let summary = "Request a tensor value to live in a specific memory space " + "after bufferization"; + let description = [{ + Requests that a tensor value lives in a specific memory space for its + lifetime. This is achieved by allocating a new tensor in the desired + memory space with `bufferization.alloc_tensor` and optionally materializing + the source value into that allocation with + `bufferization.materialize_in_destination`. All uses of the original value + are then redirected to the promoted value. + + The generated code for promoting tensor value %0 resembles the following: + + %1 = bufferization.alloc_tensor() + { memory_space = memory_space } + // Note: the materialization is omitted if %0 is never read and is only + // written into (i.e., it behaves as a result tensor). + %2 = bufferization.materialize_in_destination %0 in %1 + // ... + + + Deallocation is not handled by this transform. + + Return modes: + - Produces a silenceable failure if the given handle does not point to + tensor-typed values. + - Succeeds otherwise and returns a handle to the promoted value(s), i.e., + the result of materialization if present and the allocation otherwise. + }]; + + let arguments = (ins TransformValueHandleTypeInterface:$tensor, + OptionalAttr:$memory_space); + let results = (outs TransformValueHandleTypeInterface:$promoted); + + let assemblyFormat = + "(`to` $memory_space^)? $tensor attr-dict `:` type($tensor)"; } //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index 64d3a2448b409..7266687584b38 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1858,6 +1858,7 @@ void populateDecomposePadPatterns(RewritePatternSet &patterns); /// Populates patterns to transform linalg.conv_2d_xxx operations into /// linalg.generic (for img2col packing) and linalg.matmul. +/// Note: currently limited to Tensor semantics only. /// \see rewriteInIm2Col for more details. void populateConvertConv2DToImg2ColPatterns(RewritePatternSet &patterns); @@ -1914,9 +1915,12 @@ void populateElementwiseOpsFusionPatterns( using ControlPropagationFn = std::function; /// Patterns to bubble up or down data layout ops across other operations. +/// The function also has an option to allow the patterns to propagate with +/// poison padding if requested by the caller. void populateDataLayoutPropagationPatterns( RewritePatternSet &patterns, - const ControlPropagationFn &controlPackUnPackPropagation); + const ControlPropagationFn &controlPackUnPackPropagation, + bool PoisonPaddingOk = false); /// Patterns to sink extract slice across other operations. void populateExtractSliceSinkingPatterns( diff --git a/mlir/include/mlir/Dialect/Math/IR/MathOps.td b/mlir/include/mlir/Dialect/Math/IR/MathOps.td index cfd8c4b8f11f7..af65af6fedec6 100644 --- a/mlir/include/mlir/Dialect/Math/IR/MathOps.td +++ b/mlir/include/mlir/Dialect/Math/IR/MathOps.td @@ -510,6 +510,43 @@ def Math_SinhOp : Math_FloatUnaryOp<"sinh"> { let hasFolder = 1; } +//===----------------------------------------------------------------------===// +// SinCosOp +//===----------------------------------------------------------------------===// + +def Math_SincosOp : Math_Op<"sincos", + [SameOperandsAndResultShape, + DeclareOpInterfaceMethods, + AllTypesMatch<["operand", "sin", "cos"]>]> { + let summary = "sine and cosine of the specified value"; + let description = [{ + The `sincos` operation computes both the sine and cosine of a given value + simultaneously. It takes one operand of floating point type (i.e., scalar, + tensor or vector) and returns two results of the same type. This operation + can be more efficient than computing sine and cosine separately when both + values are needed. + + Example: + + ```mlir + // Scalar sine and cosine values. + %sin, %cos = math.sincos %input : f64 + ``` + }]; + + let arguments = (ins FloatLike:$operand, + DefaultValuedAttr:$fastmath); + let results = (outs FloatLike:$sin, FloatLike:$cos); + + let assemblyFormat = [{ $operand (`fastmath` `` $fastmath^)? + attr-dict `:` type($operand) }]; + + let extraClassDeclaration = [{ + std::optional> getShapeForUnroll(); + }]; +} + //===----------------------------------------------------------------------===// // CountLeadingZerosOp //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.td b/mlir/include/mlir/Dialect/Math/Transforms/Passes.td index 4d415aeac8f58..48346abd84285 100644 --- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.td @@ -64,4 +64,12 @@ def MathExpandOpsPass : Pass<"math-expand-ops"> { ]; } +def MathSincosFusionPass : Pass<"math-sincos-fusion"> { + let summary = "Fuse sin and cos operations."; + let description = [{ + Fuse sin and cos operations into a sincos operation. + }]; + let dependentDialects = ["math::MathDialect"]; +} + #endif // MLIR_DIALECT_MATH_TRANSFORMS_PASSES diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h b/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h index bdec699eb4ce4..30f33ed2fd1d6 100644 --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRef.h @@ -18,6 +18,7 @@ #include "mlir/Interfaces/ControlFlowInterfaces.h" #include "mlir/Interfaces/InferIntRangeInterface.h" #include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Interfaces/MemOpInterfaces.h" #include "mlir/Interfaces/MemorySlotInterfaces.h" #include "mlir/Interfaces/ShapedOpInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" diff --git a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td index 671cc05e963b4..d4d67bfb278d5 100644 --- a/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td +++ b/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td @@ -15,6 +15,7 @@ include "mlir/Interfaces/CastInterfaces.td" include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/InferIntRangeInterface.td" include "mlir/Interfaces/InferTypeOpInterface.td" +include "mlir/Interfaces/MemOpInterfaces.td" include "mlir/Interfaces/MemorySlotInterfaces.td" include "mlir/Interfaces/ShapedOpInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" @@ -145,7 +146,8 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [ DeclareOpInterfaceMethods, Pure, ViewLikeOpInterface, - SameOperandsAndResultType + SameOperandsAndResultType, + DeclareOpInterfaceMethods ]> { let summary = "assumption that gives alignment information to the input memref"; @@ -153,7 +155,7 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [ The `assume_alignment` operation takes a memref and an integer alignment value. It returns a new SSA value of the same memref type, but associated with the assumption that the underlying buffer is aligned to the given - alignment. + alignment. If the buffer isn't aligned to the given alignment, its result is poison. This operation doesn't affect the semantics of a program where the @@ -168,7 +170,7 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [ let assemblyFormat = "$memref `,` $alignment attr-dict `:` type($memref)"; let extraClassDeclaration = [{ MemRefType getType() { return ::llvm::cast(getResult().getType()); } - + Value getViewSource() { return getMemref(); } }]; @@ -176,6 +178,41 @@ def AssumeAlignmentOp : MemRef_Op<"assume_alignment", [ let hasFolder = 1; } +//===----------------------------------------------------------------------===// +// DistinctObjectsOp +//===----------------------------------------------------------------------===// + +def DistinctObjectsOp : MemRef_Op<"distinct_objects", [ + Pure, + DeclareOpInterfaceMethods + // ViewLikeOpInterface TODO: ViewLikeOpInterface only supports a single argument + ]> { + let summary = "assumption that acesses to specific memrefs will never alias"; + let description = [{ + The `distinct_objects` operation takes a list of memrefs and returns the same + memrefs, with the additional assumption that accesses to them will never + alias with each other. This means that loads and stores to different + memrefs in the list can be safely reordered. + + If the memrefs do alias, the load/store behavior is undefined. This + operation doesn't affect the semantics of a valid program. It is + intended for optimization purposes, allowing the compiler to generate more + efficient code based on the non-aliasing assumption. The optimization is + best-effort. + + Example: + + ```mlir + %1, %2 = memref.distinct_objects %a, %b : memref, memref + ``` + }]; + let arguments = (ins Variadic:$operands); + let results = (outs Variadic:$results); + + let assemblyFormat = "$operands attr-dict `:` type($operands)"; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // AllocOp //===----------------------------------------------------------------------===// @@ -456,6 +493,7 @@ def MemRef_AllocaScopeReturnOp : MemRef_Op<"alloca_scope.return", def MemRef_CastOp : MemRef_Op<"cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, MemRefsNormalizable, Pure, SameOperandsAndResultShape, @@ -1194,6 +1232,7 @@ def LoadOp : MemRef_Op<"load", "memref", "result", "::llvm::cast($_self).getElementType()">, MemRefsNormalizable, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "load operation"; @@ -1237,28 +1276,28 @@ def LoadOp : MemRef_Op<"load", OpBuilder<(ins "Value":$memref, "ValueRange":$indices, CArg<"bool", "false">:$nontemporal, - CArg<"uint64_t", "0">:$alignment), [{ + CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{ return build($_builder, $_state, memref, indices, nontemporal, - alignment != 0 ? $_builder.getI64IntegerAttr(alignment) : - nullptr); + alignment ? $_builder.getI64IntegerAttr(alignment->value()) : + nullptr); }]>, OpBuilder<(ins "Type":$resultType, "Value":$memref, "ValueRange":$indices, CArg<"bool", "false">:$nontemporal, - CArg<"uint64_t", "0">:$alignment), [{ + CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{ return build($_builder, $_state, resultType, memref, indices, nontemporal, - alignment != 0 ? $_builder.getI64IntegerAttr(alignment) : - nullptr); + alignment ? $_builder.getI64IntegerAttr(alignment->value()) : + nullptr); }]>, OpBuilder<(ins "TypeRange":$resultTypes, "Value":$memref, "ValueRange":$indices, CArg<"bool", "false">:$nontemporal, - CArg<"uint64_t", "0">:$alignment), [{ + CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{ return build($_builder, $_state, resultTypes, memref, indices, nontemporal, - alignment != 0 ? $_builder.getI64IntegerAttr(alignment) : - nullptr); + alignment ? $_builder.getI64IntegerAttr(alignment->value()) : + nullptr); }]> ]; @@ -1284,6 +1323,7 @@ def LoadOp : MemRef_Op<"load", def MemRef_MemorySpaceCastOp : MemRef_Op<"memory_space_cast", [ DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, + MemorySpaceCastOpInterface, MemRefsNormalizable, Pure, SameOperandsAndResultElementType, @@ -1302,6 +1342,10 @@ def MemRef_MemorySpaceCastOp : MemRef_Op<"memory_space_cast", [ If the source and target address spaces are the same, this operation is a noop. + Finally, if the target memory-space is the generic/default memory-space, + then it is assumed this cast can be bubbled down safely. See the docs of + `MemorySpaceCastOpInterface` interface for more details. + Example: ```mlir @@ -1321,6 +1365,27 @@ def MemRef_MemorySpaceCastOp : MemRef_Op<"memory_space_cast", [ let extraClassDeclaration = [{ Value getViewSource() { return getSource(); } + + //===------------------------------------------------------------------===// + // MemorySpaceCastConsumerOpInterface + //===------------------------------------------------------------------===// + /// Returns the `source` memref. + TypedValue getSourcePtr(); + /// Returns the `dest` memref. + TypedValue getTargetPtr(); + /// Returns whether the memory-space cast is valid. Only casts between + /// memrefs are considered valid. Further, the `tgt` and `src` should only + /// differ on the memory-space parameter of the memref type. + bool isValidMemorySpaceCast(PtrLikeTypeInterface tgt, + PtrLikeTypeInterface src); + /// Clones the operation using a new target type and source value. + MemorySpaceCastOpInterface cloneMemorySpaceCastOp( + OpBuilder &b, PtrLikeTypeInterface tgt, + TypedValue src); + /// Returns whether the `source` value can be promoted by the + /// `MemorySpaceCastConsumerOpInterface::bubbleDownCasts` method. The only + /// casts the op recognizes as promotable are to the generic memory-space. + bool isSourcePromotable(); }]; let hasFolder = 1; @@ -1376,6 +1441,7 @@ def MemRef_PrefetchOp : MemRef_Op<"prefetch"> { def MemRef_ReinterpretCastOp : MemRef_OpWithOffsetSizesAndStrides<"reinterpret_cast", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, AttrSizedOperandSegments, MemRefsNormalizable, Pure, @@ -1603,6 +1669,7 @@ def MemRef_RankOp : MemRef_Op<"rank", [Pure]> { def MemRef_ReshapeOp: MemRef_Op<"reshape", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, Pure, ViewLikeOpInterface]> { let summary = "memref reshape operation"; @@ -1701,6 +1768,7 @@ class MemRef_ReassociativeReshapeOp traits = []> : def MemRef_ExpandShapeOp : MemRef_ReassociativeReshapeOp<"expand_shape", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "operation to produce a memref with a higher rank."; let description = [{ @@ -1822,7 +1890,9 @@ def MemRef_ExpandShapeOp : MemRef_ReassociativeReshapeOp<"expand_shape", [ } def MemRef_CollapseShapeOp : MemRef_ReassociativeReshapeOp<"collapse_shape", [ - DeclareOpInterfaceMethods]> { + DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods + ]> { let summary = "operation to produce a memref with a smaller rank."; let description = [{ The `memref.collapse_shape` op produces a new view with a smaller rank @@ -1929,6 +1999,7 @@ def MemRef_StoreOp : MemRef_Op<"store", "memref", "value", "::llvm::cast($_self).getElementType()">, MemRefsNormalizable, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "store operation"; @@ -1971,10 +2042,10 @@ def MemRef_StoreOp : MemRef_Op<"store", "Value":$memref, "ValueRange":$indices, CArg<"bool", "false">:$nontemporal, - CArg<"uint64_t", "0">:$alignment), [{ + CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{ return build($_builder, $_state, valueToStore, memref, indices, nontemporal, - alignment != 0 ? $_builder.getI64IntegerAttr(alignment) : - nullptr); + alignment ? $_builder.getI64IntegerAttr(alignment->value()) : + nullptr); }]>, OpBuilder<(ins "Value":$valueToStore, "Value":$memref), [{ $_state.addOperands(valueToStore); @@ -2006,6 +2077,7 @@ def MemRef_StoreOp : MemRef_Op<"store", def SubViewOp : MemRef_OpWithOffsetSizesAndStrides<"subview", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, AttrSizedOperandSegments, OffsetSizeAndStrideOpInterface, @@ -2281,6 +2353,7 @@ def SubViewOp : MemRef_OpWithOffsetSizesAndStrides<"subview", [ def MemRef_TransposeOp : MemRef_Op<"transpose", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, Pure]>, Arguments<(ins AnyStridedMemRef:$in, AffineMapAttr:$permutation)>, Results<(outs AnyStridedMemRef)> { @@ -2316,6 +2389,7 @@ def MemRef_TransposeOp : MemRef_Op<"transpose", [ def MemRef_ViewOp : MemRef_Op<"view", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, Pure]> { let summary = "memref view operation"; @@ -2392,6 +2466,7 @@ def MemRef_ViewOp : MemRef_Op<"view", [ //===----------------------------------------------------------------------===// def AtomicRMWOp : MemRef_Op<"atomic_rmw", [ + DeclareOpInterfaceMethods, AllTypesMatch<["value", "result"]>, TypesMatchWith<"value type matches element type of memref", "memref", "value", diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td index 1eda5e4bc1618..8e43c4284d078 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauses.td @@ -995,6 +995,35 @@ class OpenMP_NumTeamsClauseSkip< def OpenMP_NumTeamsClause : OpenMP_NumTeamsClauseSkip<>; +//===----------------------------------------------------------------------===// +// V5.1: [10.1.2] `sizes` clause +//===----------------------------------------------------------------------===// + +class OpenMP_SizesClauseSkip< + bit traits = false, bit arguments = false, bit assemblyFormat = false, + bit description = false, bit extraClassDeclaration = false + > : OpenMP_Clause { + let arguments = (ins + Variadic:$sizes + ); + + let optAssemblyFormat = [{ + `sizes` `(` $sizes `:` type($sizes) `)` + }]; + + let description = [{ + The `sizes` clauses defines the size of a grid over a multi-dimensional + logical iteration space. This grid is used for loop transformations such as + `tile` and `strip`. The size per dimension can be a variable, but only + values that are not at least 2 make sense. It is not specified what happens + when smaller values are used, but should still result in a loop nest that + executes each logical iteration once. + }]; +} + +def OpenMP_SizesClause : OpenMP_SizesClauseSkip<>; + //===----------------------------------------------------------------------===// // V5.2: [10.1.2] `num_threads` clause //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td index 9dbe6897a3304..f693a0737e0fc 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td @@ -230,14 +230,24 @@ def TargetRegionFlagsNone : I32BitEnumAttrCaseNone<"none">; def TargetRegionFlagsGeneric : I32BitEnumAttrCaseBit<"generic", 0>; def TargetRegionFlagsSpmd : I32BitEnumAttrCaseBit<"spmd", 1>; def TargetRegionFlagsTripCount : I32BitEnumAttrCaseBit<"trip_count", 2>; +def TargetRegionFlagsNoLoop : I32BitEnumAttrCaseBit<"no_loop", 3>; def TargetRegionFlags : OpenMP_BitEnumAttr< "TargetRegionFlags", - "target region property flags", [ + "These flags describe properties of the target kernel. " + "TargetRegionFlagsGeneric - denotes generic kernel. " + "TargetRegionFlagsSpmd - denotes SPMD kernel. " + "TargetRegionFlagsNoLoop - denotes kernel where " + "num_teams * num_threads >= loop_trip_count. It allows the conversion " + "of loops into sequential code by ensuring that each team/thread " + "executes at most one iteration. " + "TargetRegionFlagsTripCount - checks if the loop trip count should be " + "calculated.", [ TargetRegionFlagsNone, TargetRegionFlagsGeneric, TargetRegionFlagsSpmd, - TargetRegionFlagsTripCount + TargetRegionFlagsTripCount, + TargetRegionFlagsNoLoop ]>; //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td index bbcfb87fa03c6..5ad4e4b5b61d1 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td @@ -38,6 +38,44 @@ def OpenMP_MapBoundsType : OpenMP_Type<"MapBounds", "map_bounds_ty"> { let summary = "Type for representing omp map clause bounds information"; } +//===---------------------------------------------------------------------===// +// OpenMP Canonical Loop Info Type +//===---------------------------------------------------------------------===// + +def CanonicalLoopInfoType : OpenMP_Type<"CanonicalLoopInfo", "cli"> { + let summary = "Type for representing a reference to a canonical loop"; + let description = [{ + A variable of type CanonicalLoopInfo refers to an OpenMP-compatible + canonical loop in the same function. Values of this type are not + available at runtime and therefore cannot be used by the program itself, + i.e. an opaque type. It is similar to the transform dialect's + `!transform.interface` type, but instead of implementing an interface + for each transformation, the OpenMP dialect itself defines possible + operations on this type. + + A value of type CanonicalLoopInfoType (in the following: CLI) value can be + + 1. created by omp.new_cli. + 2. passed to omp.canonical_loop to associate the loop to that CLI. A CLI + can only be associated once. + 3. passed to an omp loop transformation operation that modifies the loop + associated with the CLI. The CLI is the "applyee" and the operation is + the consumer. A CLI can only be consumed once. + 4. passed to an omp loop transformation operation to associate the cli with + a result of that transformation. The CLI is the "generatee" and the + operation is the generator. + + A CLI cannot + + 1. be returned from a function. + 2. be passed to operations that are not specifically designed to take a + CanonicalLoopInfoType, including AnyType. + + A CLI directly corresponds to an object of + OpenMPIRBuilder's CanonicalLoopInfo struct when lowering to LLVM-IR. + }]; +} + //===----------------------------------------------------------------------===// // Base classes for OpenMP dialect operations. //===----------------------------------------------------------------------===// @@ -211,8 +249,35 @@ class OpenMP_Op traits = [], // Doesn't actually create a C++ base class (only defines default values for // tablegen classes that derive from this). Use LoopTransformationInterface // instead for common operations. -class OpenMPTransform_Op traits = []> : - OpenMP_Op], traits) > { +class OpenMPTransform_Op traits = [], + list clauses = []> : + OpenMP_Op], traits), + clauses = clauses> { +} + +// Base clause for loop transformations using the standard syntax. +// +// omp.opname ($generatees) <- ($applyees) clause(...) clause(...) ... +// omp.opname ($applyees) clause(...) clause(...) ... +// +// $generatees is optional and is assumed to be empty if omitted +class OpenMPTransformBase_Op traits = [], + list clauses = []> : + OpenMPTransform_Op { + + let arguments = !con( + (ins Variadic:$generatees, + Variadic:$applyees + ), clausesArgs); + + let assemblyFormat = [{ custom($generatees, $applyees) }] + # clausesAssemblyFormat + # [{ attr-dict }]; } #endif // OPENMP_OP_BASE diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td index 5c77e215467e4..b73091ea0ca53 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -357,44 +357,6 @@ def SingleOp : OpenMP_Op<"single", traits = [ let hasVerifier = 1; } -//===---------------------------------------------------------------------===// -// OpenMP Canonical Loop Info Type -//===---------------------------------------------------------------------===// - -def CanonicalLoopInfoType : OpenMP_Type<"CanonicalLoopInfo", "cli"> { - let summary = "Type for representing a reference to a canonical loop"; - let description = [{ - A variable of type CanonicalLoopInfo refers to an OpenMP-compatible - canonical loop in the same function. Values of this type are not - available at runtime and therefore cannot be used by the program itself, - i.e. an opaque type. It is similar to the transform dialect's - `!transform.interface` type, but instead of implementing an interface - for each transformation, the OpenMP dialect itself defines possible - operations on this type. - - A value of type CanonicalLoopInfoType (in the following: CLI) value can be - - 1. created by omp.new_cli. - 2. passed to omp.canonical_loop to associate the loop to that CLI. A CLI - can only be associated once. - 3. passed to an omp loop transformation operation that modifies the loop - associated with the CLI. The CLI is the "applyee" and the operation is - the consumer. A CLI can only be consumed once. - 4. passed to an omp loop transformation operation to associate the cli with - a result of that transformation. The CLI is the "generatee" and the - operation is the generator. - - A CLI cannot - - 1. be returned from a function. - 2. be passed to operations that are not specifically designed to take a - CanonicalLoopInfoType, including AnyType. - - A CLI directly corresponds to an object of - OpenMPIRBuilder's CanonicalLoopInfo struct when lowering to LLVM-IR. - }]; -} - //===---------------------------------------------------------------------===// // OpenMP Canonical Loop Info Creation //===---------------------------------------------------------------------===// @@ -563,6 +525,31 @@ def UnrollHeuristicOp : OpenMPTransform_Op<"unroll_heuristic", []> { let hasCustomAssemblyFormat = 1; } +//===----------------------------------------------------------------------===// +// OpenMP tile operation +//===----------------------------------------------------------------------===// + +def TileOp : OpenMPTransformBase_Op<"tile", + clauses = [OpenMP_SizesClause]> { + let summary = "OpenMP tile operation"; + let description = [{ + Represents the OpenMP tile directive introduced in OpenMP 5.1. + + The construct partitions the logical iteration space of the affected loops + into equally-sized tiles, then creates two sets of nested loops. The outer + loops, called the grid loops, iterate over all tiles. The inner loops, + called the intratile loops, iterate over the logical iterations of a tile. + The sizes clause determines the size of a tile. + + Currently, the affected loops must be rectangular (the tripcount of the + inner loop must not depend on any iv of an surrounding affected loop) and + perfectly nested (except for the innermost affected loop, no operations + other than the nested loop and the terminator in the loop body). + }] # clausesDescription; + + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // 2.8.3 Workshare Construct //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td index c169f48e573d0..c97bd04d32896 100644 --- a/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td +++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td @@ -79,4 +79,14 @@ def Ptr_PtrAddFlags : I32Enum<"PtrAddFlags", "Pointer add flags", [ let cppNamespace = "::mlir::ptr"; } +//===----------------------------------------------------------------------===// +// Ptr diff flags enum properties. +//===----------------------------------------------------------------------===// + +def Ptr_PtrDiffFlags : I8BitEnum<"PtrDiffFlags", "Pointer difference flags", [ + I8BitEnumCase<"none", 0>, I8BitEnumCase<"nuw", 1>, I8BitEnumCase<"nsw", 2> + ]> { + let cppNamespace = "::mlir::ptr"; +} + #endif // PTR_ENUMS diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td index 468a3004d5c62..e14f64330c294 100644 --- a/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td +++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td @@ -415,6 +415,63 @@ def Ptr_PtrAddOp : Pointer_Op<"ptr_add", [ }]; } +//===----------------------------------------------------------------------===// +// PtrDiffOp +//===----------------------------------------------------------------------===// + +def Ptr_PtrDiffOp : Pointer_Op<"ptr_diff", [ + Pure, AllTypesMatch<["lhs", "rhs"]>, SameOperandsAndResultShape + ]> { + let summary = "Pointer difference operation"; + let description = [{ + The `ptr_diff` operation computes the difference between two pointers, + returning an integer or index value representing the number of bytes + between them. + + The operation supports both scalar and shaped types with value semantics: + - When both operands are scalar: produces a single difference value + - When both are shaped: performs element-wise subtraction, + shapes must be the same + + The operation also supports the following flags: + - `none`: No flags are set. + - `nuw`: No Unsigned Wrap, if the subtraction causes an unsigned overflow + (that is: the result would be negative), the result is a poison value. + - `nsw`: No Signed Wrap, if the subtraction causes a signed overflow, the + result is a poison value. + + NOTE: The pointer difference is calculated using an integer type specified + by the data layout. The final result will be sign-extended or truncated to + fit the result type as necessary. + + Example: + + ```mlir + // Scalar pointers + %diff = ptr.ptr_diff %p1, %p2 : !ptr.ptr<#ptr.generic_space> -> i64 + + // Shaped pointers + %diffs = ptr.ptr_diff nsw %ptrs1, %ptrs2 : + vector<4x!ptr.ptr<#ptr.generic_space>> -> vector<4xi64> + ``` + }]; + let arguments = (ins + Ptr_PtrLikeType:$lhs, Ptr_PtrLikeType:$rhs, + DefaultValuedProp, "PtrDiffFlags::none">:$flags + ); + let results = (outs Ptr_IntLikeType:$result); + let assemblyFormat = [{ + ($flags^)? $lhs `,` $rhs attr-dict `:` type($lhs) `->` type($result) + }]; + let extraClassDeclaration = [{ + /// Returns the operand's ptr type. + ptr::PtrType getPtrType(); + /// Returns the result's underlying int type. + Type getIntType(); + }]; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // ScatterOp //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h b/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h index 668ee6386f71f..7c735d825b445 100644 --- a/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h +++ b/mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h @@ -183,6 +183,7 @@ struct SCFTilingOptions { ArrayRef givenTileSizes, ValueRange destinationTensors)>; // Type of the callback function that generates the loop terminator. + // - `loops` : generated loops from the GenerateLoopHeaderFn callback // - `tiledResults` : Tiles of the result computed for the iteration space // tile. // - `resultOffsets` : For each of the `tiledResults`, the offset at which @@ -193,7 +194,8 @@ struct SCFTilingOptions { // tensor. // Returns the `CustomLoopHeaderInfo` object (described above) using GenerateLoopTerminatorFn = std::function loops, + ValueRange tiledResults, ArrayRef> resultOffsets, ArrayRef> resultSizes, ValueRange destinationTensors)>; diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td index 953e7c304da85..48759f2a3c9e8 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td @@ -1331,8 +1331,6 @@ def Tosa_ExpOp : Tosa_ElementwiseUnaryOp<"exp"> { Extension<[Tosa_EXT_BF16]>, ]; - let hasFolder = 1; - let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } @@ -1385,8 +1383,6 @@ def Tosa_LogOp : Tosa_ElementwiseUnaryOp<"log"> { Extension<[Tosa_EXT_BF16]>, ]; - let hasFolder = 1; - let assemblyFormat = "operands attr-dict `:` functional-type(operands, results)"; } diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td index 553d69cc21d17..93ab120339d55 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td @@ -282,8 +282,7 @@ def Tosa_Shape : Tosa_Type<"shape", "shape"> { !tosa.shape<0> ``` }]; - let parameters = (ins "int" : $rank); - let builders = [TypeBuilder<(ins "int" : $rank)>]; + let parameters = (ins "int":$rank); let assemblyFormat = "`<` $rank `>`"; let genVerifyDecl = 1; diff --git a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h index 74e1d28ffac82..ba11259790676 100644 --- a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h +++ b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h @@ -9,6 +9,7 @@ #ifndef MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS_H #define MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS_H +#include "mlir/Dialect/Transform/IR/TransformOps.h" #include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/OpDefinition.h" diff --git a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td index d68d451afac40..d095659fc4838 100644 --- a/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td +++ b/mlir/include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td @@ -11,10 +11,15 @@ include "mlir/Dialect/Transform/IR/TransformDialect.td" include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.td" +include "mlir/Interfaces/ControlFlowInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/BuiltinAttributes.td" include "mlir/IR/CommonAttrConstraints.td" +//===----------------------------------------------------------------------===// +// KnobOp +//===----------------------------------------------------------------------===// + def KnobOp : Op, DeclareOpInterfaceMethods, @@ -52,4 +57,53 @@ def KnobOp : Op` (`=` $selected^ `from`)? `options` `=` $options attr-dict `->` type(results)"; } +//===----------------------------------------------------------------------===// +// AlternativesOp +//===----------------------------------------------------------------------===// + +def AlternativesOp : Op, + DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, + SingleBlockImplicitTerminator<"::mlir::transform::YieldOp">, + NoRegionArguments +]> { + let summary = "Represents a choice among its regions, i.e. sub-schedules"; + + let description = [{ + This op represents a choice over which of its regions is to be used. + + When `selected_region` is provided, the semantics are that this op is to be + substituted for by the selected region, meaning the region's results become + the results of this op. Without a provided `selected_region`, the semantics + are that this non-deterministic choice is yet to be resolved -- which in + terms of the op's interpreted semantics is a failure. + + The `selected_region` argument is either an `IntegerAttr` or a param holding + an `IntegerAttr`, which should provide a valid zero-based index with respect + to the number of alternatives, i.e. regions. + }]; + let cppNamespace = [{ mlir::transform::tune }]; + + let arguments = (ins Builtin_StringAttr:$name, + OptionalAttr:$selected_region_attr, + Optional:$selected_region_param); + let results = (outs Variadic:$results); + let regions = (region VariadicRegion>:$alternatives); + + let assemblyFormat = [{ + `<` $name `>` + (`selected_region` `=` custom( + $selected_region_attr, $selected_region_param)^)? + attr-dict-with-keyword + (`:` type($selected_region_param)^)? + (`->` type($results)^)? + regions + }]; + + let hasVerifier = 1; +} + #endif // MLIR_DIALECT_TRANSFORM_TUNEEXTENSION_TUNEEXTENSIONOPS diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h index 63410b8bea747..bbf55f5d507e3 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h @@ -27,6 +27,7 @@ #include "mlir/Interfaces/DestinationStyleOpInterface.h" #include "mlir/Interfaces/IndexingMapOpInterface.h" #include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Interfaces/MemOpInterfaces.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "mlir/Interfaces/VectorInterfaces.h" #include "mlir/Interfaces/ViewLikeInterface.h" diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td index 26d06624cb976..252c0b72456df 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -24,6 +24,7 @@ include "mlir/Interfaces/DestinationStyleOpInterface.td" include "mlir/Interfaces/IndexingMapOpInterface.td" include "mlir/Interfaces/InferIntRangeInterface.td" include "mlir/Interfaces/InferTypeOpInterface.td" +include "mlir/Interfaces/MemOpInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/VectorInterfaces.td" include "mlir/Interfaces/ViewLikeInterface.td" @@ -1246,6 +1247,7 @@ def Vector_TransferReadOp : DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, AttrSizedOperandSegments, DestinationStyleOpInterface ]>, @@ -1493,6 +1495,7 @@ def Vector_TransferWriteOp : DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, AttrSizedOperandSegments, DestinationStyleOpInterface ]>, @@ -1649,6 +1652,7 @@ def Vector_TransferWriteOp : def Vector_LoadOp : Vector_Op<"load", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods ]> { let summary = "reads an n-D slice of memory into an n-D vector"; let description = [{ @@ -1765,6 +1769,7 @@ def Vector_LoadOp : Vector_Op<"load", [ def Vector_StoreOp : Vector_Op<"store", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods ]> { let summary = "writes an n-D vector to an n-D slice of memory"; let description = [{ @@ -1869,7 +1874,7 @@ def Vector_StoreOp : Vector_Op<"store", [ } def Vector_MaskedLoadOp : - Vector_Op<"maskedload">, + Vector_Op<"maskedload", [DeclareOpInterfaceMethods]>, Arguments<(ins Arg:$base, Variadic:$indices, VectorOfNonZeroRankOf<[I1]>:$mask, @@ -1961,7 +1966,7 @@ def Vector_MaskedLoadOp : } def Vector_MaskedStoreOp : - Vector_Op<"maskedstore">, + Vector_Op<"maskedstore", [DeclareOpInterfaceMethods]>, Arguments<(ins Arg:$base, Variadic:$indices, VectorOfNonZeroRankOf<[I1]>:$mask, @@ -2041,6 +2046,7 @@ def Vector_MaskedStoreOp : def Vector_GatherOp : Vector_Op<"gather", [ DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, DeclareOpInterfaceMethods ]>, Arguments<(ins Arg, "", [MemRead]>:$base, @@ -2144,7 +2150,7 @@ def Vector_GatherOp : } def Vector_ScatterOp : - Vector_Op<"scatter">, + Vector_Op<"scatter", [DeclareOpInterfaceMethods]>, Arguments<(ins Arg:$base, Variadic:$offsets, VectorOfNonZeroRankOf<[AnyInteger, Index]>:$indices, @@ -2229,7 +2235,7 @@ def Vector_ScatterOp : } def Vector_ExpandLoadOp : - Vector_Op<"expandload">, + Vector_Op<"expandload", [DeclareOpInterfaceMethods]>, Arguments<(ins Arg:$base, Variadic:$indices, FixedVectorOfNonZeroRankOf<[I1]>:$mask, @@ -2317,7 +2323,7 @@ def Vector_ExpandLoadOp : } def Vector_CompressStoreOp : - Vector_Op<"compressstore">, + Vector_Op<"compressstore", [DeclareOpInterfaceMethods]>, Arguments<(ins Arg:$base, Variadic:$indices, FixedVectorOfNonZeroRankOf<[I1]>:$mask, diff --git a/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransformsBase.td b/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransformsBase.td index ef0951ab1d166..34febf2c4ff4b 100644 --- a/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransformsBase.td +++ b/mlir/include/mlir/Dialect/Vector/Transforms/VectorTransformsBase.td @@ -14,10 +14,9 @@ include "mlir/IR/EnumAttr.td" // Lower transpose into element-wise extract and inserts. def VectorTransposeLowering_Elementwise: I32EnumAttrCase<"EltWise", 0, "eltwise">; -// Lower 2-D transpose to `vector.flat_transpose`, maps 1-1 to LLVM matrix -// intrinsics. -def VectorTransposeLowering_FlatTranspose: - I32EnumAttrCase<"Flat", 1, "flat_transpose">; +// Lower directly to LLVM matrix intrinsics. +def VectorTransposeLowering_LLVMIntr: + I32EnumAttrCase<"LLVMIntr", 1, "llvmintr">; // Lower 2-D transpose to `vector.shuffle` on 1-D vector. def VectorTransposeLowering_Shuffle1D: I32EnumAttrCase<"Shuffle1D", 2, "shuffle_1d">; @@ -27,7 +26,7 @@ def VectorTransposeLowering_Shuffle16x16: def VectorTransposeLoweringAttr : I32EnumAttr< "VectorTransposeLowering", "control the lowering of `vector.transpose` operations.", - [VectorTransposeLowering_Elementwise, VectorTransposeLowering_FlatTranspose, + [VectorTransposeLowering_Elementwise, VectorTransposeLowering_LLVMIntr, VectorTransposeLowering_Shuffle1D, VectorTransposeLowering_Shuffle16x16]> { let cppNamespace = "::mlir::vector"; } @@ -48,9 +47,9 @@ def VectorMultiReductionLoweringAttr: I32EnumAttr< // Progressively lower to finer grained `vector.contract` and dot-products. def VectorContractLowering_Dot: I32EnumAttrCase<"Dot", 0, "dot">; -// Lower to `vector.matrix_multiply`, maps 1-1 to LLVM matrix intrinsics. -def VectorContractLowering_Matmul: - I32EnumAttrCase<"Matmul", 1, "matmulintrinsics">; +// Lower directly to LLVM intrinsics. +def VectorContractLowering_LLVMIntr: + I32EnumAttrCase<"LLVMIntr", 1, "llvmintr">; // Lower to `vector.outerproduct`. def VectorContractLowering_OuterProduct: I32EnumAttrCase<"OuterProduct", 2, "outerproduct">; @@ -61,7 +60,7 @@ def VectorContractLowering_ParallelArith: def VectorContractLoweringAttr: I32EnumAttr< "VectorContractLowering", "control the lowering of `vector.contract` operations.", - [VectorContractLowering_Dot, VectorContractLowering_Matmul, + [VectorContractLowering_Dot, VectorContractLowering_LLVMIntr, VectorContractLowering_OuterProduct, VectorContractLowering_ParallelArith]> { let cppNamespace = "::mlir::vector"; } diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h index 97163c4532378..a57aadcdcc5b0 100644 --- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h +++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h @@ -227,7 +227,8 @@ bool isLinearizableVector(VectorType type); /// /// Note: all read offsets are set to 0. Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, - ArrayRef inputVectorSizes, Value padValue, + ArrayRef inputVectorSizes, + std::optional padValue = std::nullopt, bool useInBoundsInsteadOfMasking = false, ArrayRef inputScalableVecDims = {}); diff --git a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td index 38c217fc68507..468242d1c2780 100644 --- a/mlir/include/mlir/Dialect/X86Vector/X86Vector.td +++ b/mlir/include/mlir/Dialect/X86Vector/X86Vector.td @@ -468,11 +468,6 @@ def DotInt8Op : AVX_Op<"dot.i8", [Pure, intr += "." + std::to_string(opBitWidth); return intr; } - - SmallVector getIntrinsicOperands( - ::mlir::ArrayRef operands, - const ::mlir::LLVMTypeConverter &typeConverter, - ::mlir::RewriterBase &rewriter); }]; } diff --git a/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h b/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h index 44b81796b1313..b74c15e5b7ac1 100644 --- a/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h @@ -9,9 +9,9 @@ #ifndef MLIR_DIALECT_XEGPU_TRANSFORMS_TRANSFORMS_H #define MLIR_DIALECT_XEGPU_TRANSFORMS_TRANSFORMS_H +#include "mlir/IR/Operation.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/LogicalResult.h" -#include "mlir/IR/Operation.h" #include #include @@ -47,9 +47,11 @@ struct UnrollOptions { /// Function that converts a ShapedType (TensorDescType or VectorType) /// into the unrolled type based on the tileShape. It returns a vector of - /// types representing the unrolled types for simplicity. + /// types representing the unrolled types for simplicity. When + /// `returnSingleType` is true, it returns a vector containing only one single + /// unrolled type. using UnrolledTypeFnType = std::function( - ShapedType type, ArrayRef tileShape)>; + ShapedType type, ArrayRef tileShape, bool returnSingleType)>; UnrolledTypeFnType getUnrolledTypes = nullptr; UnrollOptions &setUnrolledTypesFn(UnrolledTypeFnType fn) { getUnrolledTypes = std::move(fn); diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h index 9d8d81a839fcb..9205f16f97bbb 100644 --- a/mlir/include/mlir/IR/Builders.h +++ b/mlir/include/mlir/IR/Builders.h @@ -515,6 +515,12 @@ class OpBuilder : public Builder { /// Create an operation of specific op type at the current insertion point, /// and immediately try to fold it. This functions populates 'results' with /// the results of the operation. + /// + /// Note: This performs opportunistic eager folding during IR construction. + /// The folders are designed to operate efficiently on canonical IR, which + /// this API does not enforce. Complete folding isn't only expected in the + /// context of canonicalization which intertwine folders with pattern + /// rewrites until fixed-point. template void createOrFold(SmallVectorImpl &results, Location location, Args &&...args) { diff --git a/mlir/include/mlir/IR/PDLPatternMatch.h.inc b/mlir/include/mlir/IR/PDLPatternMatch.h.inc index 96ba98a850de0..d5fb57d7c360d 100644 --- a/mlir/include/mlir/IR/PDLPatternMatch.h.inc +++ b/mlir/include/mlir/IR/PDLPatternMatch.h.inc @@ -53,7 +53,7 @@ public: /// value is not an instance of `T`. template ::value, T, std::optional>> + std::is_constructible_v, T, std::optional>> ResultT dyn_cast() const { return isa() ? castImpl() : ResultT(); } diff --git a/mlir/include/mlir/Interfaces/CMakeLists.txt b/mlir/include/mlir/Interfaces/CMakeLists.txt index 2add220fdfb7c..a5feb592045c0 100644 --- a/mlir/include/mlir/Interfaces/CMakeLists.txt +++ b/mlir/include/mlir/Interfaces/CMakeLists.txt @@ -8,6 +8,7 @@ add_mlir_interface(IndexingMapOpInterface) add_mlir_interface(InferIntRangeInterface) add_mlir_interface(InferTypeOpInterface) add_mlir_interface(LoopLikeInterface) +add_mlir_interface(MemOpInterfaces) add_mlir_interface(ParallelCombiningOpInterface) add_mlir_interface(RuntimeVerifiableOpInterface) add_mlir_interface(ShapedOpInterfaces) diff --git a/mlir/include/mlir/Interfaces/MemOpInterfaces.h b/mlir/include/mlir/Interfaces/MemOpInterfaces.h new file mode 100644 index 0000000000000..cdc423f5da1a5 --- /dev/null +++ b/mlir/include/mlir/Interfaces/MemOpInterfaces.h @@ -0,0 +1,36 @@ +//===- MemOpInterfaces.h - Memory operation interfaces ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains declarations of interfaces for operations that interact +// with memory. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_INTERFACES_MEMOPINTERFACES_H +#define MLIR_INTERFACES_MEMOPINTERFACES_H + +#include "mlir/IR/OpDefinition.h" + +namespace mlir { +namespace detail { +/// Attempt to verify the given memory space cast operation. +LogicalResult verifyMemorySpaceCastOpInterface(Operation *op); + +/// Tries to bubble-down inplace a `MemorySpaceCastOpInterface` operation +/// referenced by `operand`. On success, it returns `std::nullopt`. It +/// returns failure if `operand` doesn't reference a +/// `MemorySpaceCastOpInterface` op. +FailureOr>> +bubbleDownInPlaceMemorySpaceCastImpl(OpOperand &operand, ValueRange results); +} // namespace detail +} // namespace mlir + +/// Include the generated interface declarations. +#include "mlir/Interfaces/MemOpInterfaces.h.inc" + +#endif // MLIR_INTERFACES_MEMOPINTERFACES_H diff --git a/mlir/include/mlir/Interfaces/MemOpInterfaces.td b/mlir/include/mlir/Interfaces/MemOpInterfaces.td new file mode 100644 index 0000000000000..1a64e97c3412d --- /dev/null +++ b/mlir/include/mlir/Interfaces/MemOpInterfaces.td @@ -0,0 +1,125 @@ +//===- MemOpInterfaces.td - Memory operation interfaces -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains interfaces for operations that interact with memory. +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_INTERFACES_MEMOPINTERFACES_TD +#define MLIR_INTERFACES_MEMOPINTERFACES_TD + +include "mlir/IR/OpBase.td" + +def MemorySpaceCastConsumerOpInterface : + OpInterface<"MemorySpaceCastConsumerOpInterface"> { + let description = [{ + An interface for operations that can consume memory-space cast-like + operations. + + This interface can be used to bubble-down memory-space cast operations, + see the `bubble-down-memory-space-casts` pass for an example. + }]; + let cppNamespace = "::mlir"; + let methods = [ + InterfaceMethod<[{ + Attempt to bubble-down the incoming cast-like operands. On success + returns a `std::optional>`, otherwise it returns + failure. If the optional is `std::nullopt` then the cast was performed + in place, otherwise the method returns a list of replacement values. + If new results are produced, these must be compatible with the original + operation results. + + If the operation was not modified in place, then the interface + guarantees it is valid to erase the original operation. + If the operation was modified in place, then the interface must + guarantee no operations were created by the method, and that no further + IR modification is necessary. + + Any implementations of this method must not erase/replace the original + operation, instead it is the caller responsibility to erase or replace + the op with the results provided by the method. + + Finally, any implementations of this method have to guarantee that the + IR remains valid at all times. + }], + "::llvm::FailureOr>>", + "bubbleDownCasts", + (ins "::mlir::OpBuilder &":$builder) + >, + ]; +} + +def MemorySpaceCastOpInterface : OpInterface<"MemorySpaceCastOpInterface"> { + let description = [{ + An interface for operations that perform memory-space casts. This + interface assumes that the cast operation is `pure`. + + These operations expect to have a well-defined ptr-like operand, and + a well-defined target ptr-like result. + + This interface also allows to determine whether a cast can be bubbled-down + by the `MemorySpaceCastConsumerOpInterface`, allowing control over which + casts can be bubbled-down or not. + }]; + let cppNamespace = "::mlir"; + let methods = [ + InterfaceMethod<[{ + Returns the source ptr-like value. + }], + "::mlir::TypedValue<::mlir::PtrLikeTypeInterface>", "getSourcePtr" + >, + InterfaceMethod<[{ + Returns the target ptr-like value. + }], + "::mlir::TypedValue<::mlir::PtrLikeTypeInterface>", "getTargetPtr" + >, + InterfaceMethod<[{ + Returns whether the memory space cast specified by `tgt` and `src` + is supported. + }], + "bool", "isValidMemorySpaceCast", + (ins "::mlir::PtrLikeTypeInterface":$tgt, + "::mlir::PtrLikeTypeInterface":$src) + >, + InterfaceMethod<[{ + Clones the memory space cast op with the given source and target type. + }], + "::mlir::MemorySpaceCastOpInterface", "cloneMemorySpaceCastOp", + (ins "::mlir::OpBuilder &":$builder, "::mlir::PtrLikeTypeInterface":$tgt, + "::mlir::TypedValue<::mlir::PtrLikeTypeInterface>":$src) + >, + InterfaceMethod<[{ + Returns whether the source pointer of the memory-space cast can be used + by the `MemorySpaceCastConsumerOpInterface::bubbleDownCasts` method to + promote the source pointer and bubble down the cast. + + For example, a cast operation might decide that all casts to the generic + memory-space can be promoted. + }], + "bool", "isSourcePromotable" + > + ]; + let verify = [{ + return ::mlir::detail::verifyMemorySpaceCastOpInterface($_op); + }]; + let extraClassDeclaration = [{ + /// Returns the underlying `MemorySpaceCastOpInterface` op if `value` + /// is produced by a `MemorySpaceCastOpInterface` op, and + /// `isSourcePromotable` returns true, otherwise it returns null. + static ::mlir::MemorySpaceCastOpInterface + getIfPromotableCast(::mlir::Value value) { + auto op = ::llvm::dyn_cast_or_null<::mlir::MemorySpaceCastOpInterface>( + value.getDefiningOp()); + if (!op || !op.isSourcePromotable()) + return nullptr; + return op; + } + }]; +} + +#endif // MLIR_INTERFACES_MEMOPINTERFACES_TD diff --git a/mlir/include/mlir/TableGen/Class.h b/mlir/include/mlir/TableGen/Class.h index 10349676625d1..e6bedc7cc896d 100644 --- a/mlir/include/mlir/TableGen/Class.h +++ b/mlir/include/mlir/TableGen/Class.h @@ -789,6 +789,10 @@ class Class { std::forward(args)...); } + const std::vector> &getMethods() const { + return methods; + } + /// Add a new field to the class. Class fields added this way are always /// private. template diff --git a/mlir/include/mlir/Transforms/BubbleDownMemorySpaceCasts.h b/mlir/include/mlir/Transforms/BubbleDownMemorySpaceCasts.h new file mode 100644 index 0000000000000..99db092879a90 --- /dev/null +++ b/mlir/include/mlir/Transforms/BubbleDownMemorySpaceCasts.h @@ -0,0 +1,20 @@ +//===-- BubbleDownMemorySpaceCasts.h - Bubble down cast patterns ---C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_TRANSFORMS_BUBBLEDOWNMEMORYSPACECASTS_H +#define MLIR_TRANSFORMS_BUBBLEDOWNMEMORYSPACECASTS_H + +namespace mlir { +class PatternBenefit; +class RewritePatternSet; +/// Collect a set of patterns to bubble-down memory-space cast operations. +void populateBubbleDownMemorySpaceCastPatterns(RewritePatternSet &patterns, + PatternBenefit benefit); +} // namespace mlir + +#endif // MLIR_TRANSFORMS_BUBBLEDOWNMEMORYSPACECASTS_H diff --git a/mlir/include/mlir/Transforms/FoldUtils.h b/mlir/include/mlir/Transforms/FoldUtils.h index 2e7a6fe3e362c..ee89e8d0e7c3f 100644 --- a/mlir/include/mlir/Transforms/FoldUtils.h +++ b/mlir/include/mlir/Transforms/FoldUtils.h @@ -40,7 +40,10 @@ class OperationFolder { /// deduplicated constants. If successful, replaces `op`'s uses with /// folded results, and returns success. If the op was completely folded it is /// erased. If it is just updated in place, `inPlaceUpdate` is set to true. - LogicalResult tryToFold(Operation *op, bool *inPlaceUpdate = nullptr); + /// On success() and when in-place, the folder is invoked until + /// `maxIterations` is reached (default INT_MAX). + LogicalResult tryToFold(Operation *op, bool *inPlaceUpdate = nullptr, + int maxIterations = INT_MAX); /// Tries to fold a pre-existing constant operation. `constValue` represents /// the value of the constant, and can be optionally passed if the value is @@ -82,7 +85,10 @@ class OperationFolder { /// Tries to perform folding on the given `op`. If successful, populates /// `results` with the results of the folding. - LogicalResult tryToFold(Operation *op, SmallVectorImpl &results); + /// On success() and when in-place, the folder is invoked until + /// `maxIterations` is reached (default INT_MAX). + LogicalResult tryToFold(Operation *op, SmallVectorImpl &results, + int maxIterations = INT_MAX); /// Try to process a set of fold results. Populates `results` on success, /// otherwise leaves it unchanged. diff --git a/mlir/include/mlir/Transforms/Passes.h b/mlir/include/mlir/Transforms/Passes.h index 9cd2ef34e15ea..1c035f2a843ff 100644 --- a/mlir/include/mlir/Transforms/Passes.h +++ b/mlir/include/mlir/Transforms/Passes.h @@ -46,6 +46,7 @@ class GreedyRewriteConfig; #define GEN_PASS_DECL_SYMBOLPRIVATIZE #define GEN_PASS_DECL_TOPOLOGICALSORT #define GEN_PASS_DECL_COMPOSITEFIXEDPOINTPASS +#define GEN_PASS_DECL_BUBBLEDOWNMEMORYSPACECASTS #include "mlir/Transforms/Passes.h.inc" /// Creates an instance of the Canonicalizer pass, configured with default diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td index beb59784947c5..b2b7f20a497e3 100644 --- a/mlir/include/mlir/Transforms/Passes.td +++ b/mlir/include/mlir/Transforms/Passes.td @@ -585,4 +585,48 @@ def CompositeFixedPointPass : Pass<"composite-fixed-point-pass"> { ]; } +def BubbleDownMemorySpaceCasts : + Pass<"bubble-down-memory-space-casts"> { + let summary = "Bubbles down memory-space cast operations."; + let description = [{ + This pass tries to iteratively bubble down all possible memory-space cast + operations. It is important to note that the determination of which casts + are bubbled down is based on the interfaces + `MemorySpaceCastConsumerOpInterface`, and `MemorySpaceCastOpInterface`, and + not the pass. The pass only looks for operations implementing the + `MemorySpaceCastConsumerOpInterface` interface, and invoking the interface + methods to perform the bubbling down. + + Example: + + ```mlir + func.func @op_with_cast_sequence(%arg0: memref<4x4xf32, 1>, %arg1: index, %arg2: f32) -> memref<16xf32> { + %memspacecast = memref.memory_space_cast %arg0 : memref<4x4xf32, 1> to memref<4x4xf32> + %c0 = arith.constant 0 : index + %c4 = arith.constant 4 : index + %expanded = memref.expand_shape %memspacecast [[0], [1, 2]] output_shape [4, 2, 2] : memref<4x4xf32> into memref<4x2x2xf32> + %collapsed = memref.collapse_shape %expanded [[0, 1, 2]] : memref<4x2x2xf32> into memref<16xf32> + %loaded = memref.load %collapsed[%c0] : memref<16xf32> + %added = arith.addf %loaded, %arg2 : f32 + memref.store %added, %collapsed[%c0] : memref<16xf32> + %atomic_result = memref.atomic_rmw addf %arg2, %collapsed[%c4] : (f32, memref<16xf32>) -> f32 + return %collapsed : memref<16xf32> + } + // mlir-opt --bubble-down-memory-space-casts + func.func @op_with_cast_sequence(%arg0: memref<4x4xf32, 1>, %arg1: index, %arg2: f32) -> memref<16xf32> { + %c4 = arith.constant 4 : index + %c0 = arith.constant 0 : index + %expand_shape = memref.expand_shape %arg0 [[0], [1, 2]] output_shape [4, 2, 2] : memref<4x4xf32, 1> into memref<4x2x2xf32, 1> + %collapse_shape = memref.collapse_shape %expand_shape [[0, 1, 2]] : memref<4x2x2xf32, 1> into memref<16xf32, 1> + %memspacecast = memref.memory_space_cast %collapse_shape : memref<16xf32, 1> to memref<16xf32> + %0 = memref.load %collapse_shape[%c0] : memref<16xf32, 1> + %1 = arith.addf %0, %arg2 : f32 + memref.store %1, %collapse_shape[%c0] : memref<16xf32, 1> + %2 = memref.atomic_rmw addf %arg2, %collapse_shape[%c4] : (f32, memref<16xf32, 1>) -> f32 + return %memspacecast : memref<16xf32> + } + ``` + }]; +} + #endif // MLIR_TRANSFORMS_PASSES diff --git a/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp b/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp index 65df355216f74..d705d8d4c7819 100644 --- a/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp @@ -109,19 +109,19 @@ LivenessAnalysis::visitOperation(Operation *op, ArrayRef operands, foundLiveResult = true; } LDBG() << "[visitOperation] Adding dependency for result: " << r - << " after op: " << *op; + << " after op: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); addDependency(const_cast(r), getProgramPointAfter(op)); } return success(); } void LivenessAnalysis::visitBranchOperand(OpOperand &operand) { + Operation *op = operand.getOwner(); LDBG() << "Visiting branch operand: " << operand.get() - << " in op: " << *operand.getOwner(); + << " in op: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); // We know (at the moment) and assume (for the future) that `operand` is a // non-forwarded branch operand of a `RegionBranchOpInterface`, // `BranchOpInterface`, `RegionBranchTerminatorOpInterface` or return-like op. - Operation *op = operand.getOwner(); assert((isa(op) || isa(op) || isa(op)) && "expected the op to be `RegionBranchOpInterface`, " @@ -146,12 +146,13 @@ void LivenessAnalysis::visitBranchOperand(OpOperand &operand) { // Therefore, if the result value is live, we conservatively consider the // non-forwarded operand of the region branch operation with result may // live and record all result. - for (Value result : op->getResults()) { + for (auto [resultIndex, result] : llvm::enumerate(op->getResults())) { if (getLatticeElement(result)->isLive) { mayLive = true; - LDBG() << "[visitBranchOperand] Non-forwarded branch " - "operand may be live due to live result: " - << result; + LDBG() << "[visitBranchOperand] Non-forwarded branch operand may be " + "live due to live result #" + << resultIndex << ": " + << OpWithFlags(op, OpPrintingFlags().skipRegions()); break; } } @@ -233,7 +234,8 @@ void LivenessAnalysis::visitBranchOperand(OpOperand &operand) { SmallVector resultsLiveness; for (const Value result : op->getResults()) resultsLiveness.push_back(getLatticeElement(result)); - LDBG() << "Visiting operation for non-forwarded branch operand: " << *op; + LDBG() << "Visiting operation for non-forwarded branch operand: " + << OpWithFlags(op, OpPrintingFlags().skipRegions()); (void)visitOperation(op, operandLiveness, resultsLiveness); // We also visit the parent op with the parent's results and this operand if @@ -299,8 +301,6 @@ RunLivenessAnalysis::RunLivenessAnalysis(Operation *op) { // The framework doesn't visit operations in dead blocks, so we need to // explicitly mark them as dead. op->walk([&](Operation *op) { - if (op->getNumResults() == 0) - return; for (auto result : llvm::enumerate(op->getResults())) { if (getLiveness(result.value())) continue; diff --git a/mlir/lib/Bindings/Python/DialectLLVM.cpp b/mlir/lib/Bindings/Python/DialectLLVM.cpp index 55b9331270cdc..38de4a0e329a0 100644 --- a/mlir/lib/Bindings/Python/DialectLLVM.cpp +++ b/mlir/lib/Bindings/Python/DialectLLVM.cpp @@ -33,21 +33,37 @@ static void populateDialectLLVMSubmodule(const nanobind::module_ &m) { auto llvmStructType = mlir_type_subclass(m, "StructType", mlirTypeIsALLVMStructType); - llvmStructType.def_classmethod( - "get_literal", - [](const nb::object &cls, const std::vector &elements, - bool packed, MlirLocation loc) { - CollectDiagnosticsToStringScope scope(mlirLocationGetContext(loc)); - - MlirType type = mlirLLVMStructTypeLiteralGetChecked( - loc, elements.size(), elements.data(), packed); - if (mlirTypeIsNull(type)) { - throw nb::value_error(scope.takeMessage().c_str()); - } - return cls(type); - }, - "cls"_a, "elements"_a, nb::kw_only(), "packed"_a = false, - "loc"_a = nb::none()); + llvmStructType + .def_classmethod( + "get_literal", + [](const nb::object &cls, const std::vector &elements, + bool packed, MlirLocation loc) { + CollectDiagnosticsToStringScope scope(mlirLocationGetContext(loc)); + + MlirType type = mlirLLVMStructTypeLiteralGetChecked( + loc, elements.size(), elements.data(), packed); + if (mlirTypeIsNull(type)) { + throw nb::value_error(scope.takeMessage().c_str()); + } + return cls(type); + }, + "cls"_a, "elements"_a, nb::kw_only(), "packed"_a = false, + "loc"_a = nb::none()) + .def_classmethod( + "get_literal_unchecked", + [](const nb::object &cls, const std::vector &elements, + bool packed, MlirContext context) { + CollectDiagnosticsToStringScope scope(context); + + MlirType type = mlirLLVMStructTypeLiteralGet( + context, elements.size(), elements.data(), packed); + if (mlirTypeIsNull(type)) { + throw nb::value_error(scope.takeMessage().c_str()); + } + return cls(type); + }, + "cls"_a, "elements"_a, nb::kw_only(), "packed"_a = false, + "context"_a = nb::none()); llvmStructType.def_classmethod( "get_identified", diff --git a/mlir/lib/Bindings/Python/IRAffine.cpp b/mlir/lib/Bindings/Python/IRAffine.cpp index bc6aa0dac6221..7147f2cbad149 100644 --- a/mlir/lib/Bindings/Python/IRAffine.cpp +++ b/mlir/lib/Bindings/Python/IRAffine.cpp @@ -574,7 +574,9 @@ void mlir::python::populateIRAffine(nb::module_ &m) { }) .def_prop_ro( "context", - [](PyAffineExpr &self) { return self.getContext().getObject(); }) + [](PyAffineExpr &self) -> nb::typed { + return self.getContext().getObject(); + }) .def("compose", [](PyAffineExpr &self, PyAffineMap &other) { return PyAffineExpr(self.getContext(), @@ -706,28 +708,29 @@ void mlir::python::populateIRAffine(nb::module_ &m) { [](PyAffineMap &self) { return static_cast(llvm::hash_value(self.get().ptr)); }) - .def_static("compress_unused_symbols", - [](const nb::list &affineMaps, - DefaultingPyMlirContext context) { - SmallVector maps; - pyListToVector( - affineMaps, maps, "attempting to create an AffineMap"); - std::vector compressed(affineMaps.size()); - auto populate = [](void *result, intptr_t idx, - MlirAffineMap m) { - static_cast(result)[idx] = (m); - }; - mlirAffineMapCompressUnusedSymbols( - maps.data(), maps.size(), compressed.data(), populate); - std::vector res; - res.reserve(compressed.size()); - for (auto m : compressed) - res.emplace_back(context->getRef(), m); - return res; - }) + .def_static( + "compress_unused_symbols", + [](const nb::list &affineMaps, DefaultingPyMlirContext context) { + SmallVector maps; + pyListToVector( + affineMaps, maps, "attempting to create an AffineMap"); + std::vector compressed(affineMaps.size()); + auto populate = [](void *result, intptr_t idx, MlirAffineMap m) { + static_cast(result)[idx] = (m); + }; + mlirAffineMapCompressUnusedSymbols(maps.data(), maps.size(), + compressed.data(), populate); + std::vector res; + res.reserve(compressed.size()); + for (auto m : compressed) + res.emplace_back(context->getRef(), m); + return res; + }) .def_prop_ro( "context", - [](PyAffineMap &self) { return self.getContext().getObject(); }, + [](PyAffineMap &self) -> nb::typed { + return self.getContext().getObject(); + }, "Context that owns the Affine Map") .def( "dump", [](PyAffineMap &self) { mlirAffineMapDump(self); }, @@ -893,7 +896,9 @@ void mlir::python::populateIRAffine(nb::module_ &m) { }) .def_prop_ro( "context", - [](PyIntegerSet &self) { return self.getContext().getObject(); }) + [](PyIntegerSet &self) -> nb::typed { + return self.getContext().getObject(); + }) .def( "dump", [](PyIntegerSet &self) { mlirIntegerSetDump(self); }, kDumpDocstring) diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp index 212228fbac91e..045c0fbf4630f 100644 --- a/mlir/lib/Bindings/Python/IRAttributes.cpp +++ b/mlir/lib/Bindings/Python/IRAttributes.cpp @@ -485,7 +485,7 @@ class PyArrayAttribute : public PyConcreteAttribute { PyArrayAttributeIterator &dunderIter() { return *this; } - nb::object dunderNext() { + nb::typed dunderNext() { // TODO: Throw is an inefficient way to stop iteration. if (nextIndex >= mlirArrayAttrGetNumElements(attr.get())) throw nb::stop_iteration(); @@ -526,7 +526,8 @@ class PyArrayAttribute : public PyConcreteAttribute { "Gets a uniqued Array attribute"); c.def( "__getitem__", - [](PyArrayAttribute &arr, intptr_t i) { + [](PyArrayAttribute &arr, + intptr_t i) -> nb::typed { if (i >= mlirArrayAttrGetNumElements(arr)) throw nb::index_error("ArrayAttribute index out of range"); return PyAttribute(arr.getContext(), arr.getItem(i)).maybeDownCast(); @@ -574,6 +575,18 @@ class PyFloatAttribute : public PyConcreteAttribute { }, nb::arg("type"), nb::arg("value"), nb::arg("loc") = nb::none(), "Gets an uniqued float point attribute associated to a type"); + c.def_static( + "get_unchecked", + [](PyType &type, double value, DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirAttribute attr = + mlirFloatAttrDoubleGet(context.get()->get(), type, value); + if (mlirAttributeIsNull(attr)) + throw MLIRError("Invalid attribute", errors.take()); + return PyFloatAttribute(type.getContext(), attr); + }, + nb::arg("type"), nb::arg("value"), nb::arg("context") = nb::none(), + "Gets an uniqued float point attribute associated to a type"); c.def_static( "get_f32", [](double value, DefaultingPyMlirContext context) { @@ -1010,14 +1023,16 @@ class PyDenseElementsAttribute [](PyDenseElementsAttribute &self) -> bool { return mlirDenseElementsAttrIsSplat(self); }) - .def("get_splat_value", [](PyDenseElementsAttribute &self) { - if (!mlirDenseElementsAttrIsSplat(self)) - throw nb::value_error( - "get_splat_value called on a non-splat attribute"); - return PyAttribute(self.getContext(), - mlirDenseElementsAttrGetSplatValue(self)) - .maybeDownCast(); - }); + .def("get_splat_value", + [](PyDenseElementsAttribute &self) + -> nb::typed { + if (!mlirDenseElementsAttrIsSplat(self)) + throw nb::value_error( + "get_splat_value called on a non-splat attribute"); + return PyAttribute(self.getContext(), + mlirDenseElementsAttrGetSplatValue(self)) + .maybeDownCast(); + }); } static PyType_Slot slots[]; @@ -1332,7 +1347,7 @@ class PyDenseIntElementsAttribute /// Returns the element at the given linear position. Asserts if the index /// is out of range. - nb::object dunderGetItem(intptr_t pos) { + nb::int_ dunderGetItem(intptr_t pos) { if (pos < 0 || pos >= dunderLen()) { throw nb::index_error("attempt to access out of bounds element"); } @@ -1522,13 +1537,15 @@ class PyDictAttribute : public PyConcreteAttribute { }, nb::arg("value") = nb::dict(), nb::arg("context") = nb::none(), "Gets an uniqued dict attribute"); - c.def("__getitem__", [](PyDictAttribute &self, const std::string &name) { - MlirAttribute attr = - mlirDictionaryAttrGetElementByName(self, toMlirStringRef(name)); - if (mlirAttributeIsNull(attr)) - throw nb::key_error("attempt to access a non-existent attribute"); - return PyAttribute(self.getContext(), attr).maybeDownCast(); - }); + c.def("__getitem__", + [](PyDictAttribute &self, + const std::string &name) -> nb::typed { + MlirAttribute attr = + mlirDictionaryAttrGetElementByName(self, toMlirStringRef(name)); + if (mlirAttributeIsNull(attr)) + throw nb::key_error("attempt to access a non-existent attribute"); + return PyAttribute(self.getContext(), attr).maybeDownCast(); + }); c.def("__getitem__", [](PyDictAttribute &self, intptr_t index) { if (index < 0 || index >= self.dunderLen()) { throw nb::index_error("attempt to access out of bounds attribute"); @@ -1594,10 +1611,11 @@ class PyTypeAttribute : public PyConcreteAttribute { }, nb::arg("value"), nb::arg("context") = nb::none(), "Gets a uniqued Type attribute"); - c.def_prop_ro("value", [](PyTypeAttribute &self) { - return PyType(self.getContext(), mlirTypeAttrGetValue(self.get())) - .maybeDownCast(); - }); + c.def_prop_ro( + "value", [](PyTypeAttribute &self) -> nb::typed { + return PyType(self.getContext(), mlirTypeAttrGetValue(self.get())) + .maybeDownCast(); + }); } }; diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp index 4b238e11c7fff..32b2b0c648cff 100644 --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -513,7 +513,7 @@ class PyOperationIterator { PyOperationIterator &dunderIter() { return *this; } - nb::object dunderNext() { + nb::typed dunderNext() { parentOperation->checkValid(); if (mlirOperationIsNull(next)) { throw nb::stop_iteration(); @@ -562,7 +562,7 @@ class PyOperationList { return count; } - nb::object dunderGetItem(intptr_t index) { + nb::typed dunderGetItem(intptr_t index) { parentOperation->checkValid(); if (index < 0) { index += dunderLen(); @@ -725,7 +725,7 @@ nb::object PyMlirContext::attachDiagnosticHandler(nb::object callback) { new PyDiagnosticHandler(get(), std::move(callback)); nb::object pyHandlerObject = nb::cast(pyHandler, nb::rv_policy::take_ownership); - pyHandlerObject.inc_ref(); + (void)pyHandlerObject.inc_ref(); // In these C callbacks, the userData is a PyDiagnosticHandler* that is // guaranteed to be known to pybind. @@ -1395,7 +1395,7 @@ nb::object PyOperation::getCapsule() { return nb::steal(mlirPythonOperationToCapsule(get())); } -nb::object PyOperation::createFromCapsule(nb::object capsule) { +nb::object PyOperation::createFromCapsule(const nb::object &capsule) { MlirOperation rawOperation = mlirPythonCapsuleToOperation(capsule.ptr()); if (mlirOperationIsNull(rawOperation)) throw nb::python_error(); @@ -1605,7 +1605,9 @@ class PyConcreteValue : public PyValue { }, nb::arg("other_value")); cls.def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, - [](DerivedTy &self) { return self.maybeDownCast(); }); + [](DerivedTy &self) -> nb::typed { + return self.maybeDownCast(); + }); DerivedTy::bindDerived(cls); } @@ -1623,13 +1625,14 @@ class PyOpResult : public PyConcreteValue { using PyConcreteValue::PyConcreteValue; static void bindDerived(ClassTy &c) { - c.def_prop_ro("owner", [](PyOpResult &self) { - assert( - mlirOperationEqual(self.getParentOperation()->get(), - mlirOpResultGetOwner(self.get())) && - "expected the owner of the value in Python to match that in the IR"); - return self.getParentOperation().getObject(); - }); + c.def_prop_ro( + "owner", [](PyOpResult &self) -> nb::typed { + assert(mlirOperationEqual(self.getParentOperation()->get(), + mlirOpResultGetOwner(self.get())) && + "expected the owner of the value in Python to match that in " + "the IR"); + return self.getParentOperation().getObject(); + }); c.def_prop_ro("result_number", [](PyOpResult &self) { return mlirOpResultGetResultNumber(self.get()); }); @@ -1638,9 +1641,9 @@ class PyOpResult : public PyConcreteValue { /// Returns the list of types of the values held by container. template -static std::vector getValueTypes(Container &container, - PyMlirContextRef &context) { - std::vector result; +static std::vector> +getValueTypes(Container &container, PyMlirContextRef &context) { + std::vector> result; result.reserve(container.size()); for (int i = 0, e = container.size(); i < e; ++i) { result.push_back(PyType(context->getRef(), @@ -1671,9 +1674,10 @@ class PyOpResultList : public Sliceable { c.def_prop_ro("types", [](PyOpResultList &self) { return getValueTypes(self, self.operation->getContext()); }); - c.def_prop_ro("owner", [](PyOpResultList &self) { - return self.operation->createOpView(); - }); + c.def_prop_ro("owner", + [](PyOpResultList &self) -> nb::typed { + return self.operation->createOpView(); + }); } PyOperationRef &getOperation() { return operation; } @@ -2104,7 +2108,7 @@ PyInsertionPoint PyInsertionPoint::after(PyOperationBase &op) { size_t PyMlirContext::getLiveModuleCount() { return liveModules.size(); } nb::object PyInsertionPoint::contextEnter(nb::object insertPoint) { - return PyThreadContextEntry::pushInsertionPoint(insertPoint); + return PyThreadContextEntry::pushInsertionPoint(std::move(insertPoint)); } void PyInsertionPoint::contextExit(const nb::object &excType, @@ -2125,7 +2129,7 @@ nb::object PyAttribute::getCapsule() { return nb::steal(mlirPythonAttributeToCapsule(*this)); } -PyAttribute PyAttribute::createFromCapsule(nb::object capsule) { +PyAttribute PyAttribute::createFromCapsule(const nb::object &capsule) { MlirAttribute rawAttr = mlirPythonCapsuleToAttribute(capsule.ptr()); if (mlirAttributeIsNull(rawAttr)) throw nb::python_error(); @@ -2677,7 +2681,8 @@ class PyOpAttributeMap { PyOpAttributeMap(PyOperationRef operation) : operation(std::move(operation)) {} - nb::object dunderGetItemNamed(const std::string &name) { + nb::typed + dunderGetItemNamed(const std::string &name) { MlirAttribute attr = mlirOperationGetAttributeByName(operation->get(), toMlirStringRef(name)); if (mlirAttributeIsNull(attr)) { @@ -2962,24 +2967,27 @@ void mlir::python::populateIRCore(nb::module_ &m) { }) .def_static("_get_live_count", &PyMlirContext::getLiveCount) .def("_get_context_again", - [](PyMlirContext &self) { + [](PyMlirContext &self) -> nb::typed { PyMlirContextRef ref = PyMlirContext::forContext(self.get()); return ref.releaseObject(); }) .def("_get_live_module_count", &PyMlirContext::getLiveModuleCount) .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyMlirContext::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyMlirContext::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, + &PyMlirContext::createFromCapsule) .def("__enter__", &PyMlirContext::contextEnter) .def("__exit__", &PyMlirContext::contextExit, nb::arg("exc_type").none(), nb::arg("exc_value").none(), nb::arg("traceback").none()) .def_prop_ro_static( "current", - [](nb::object & /*class*/) { + [](nb::object & /*class*/) + -> std::optional> { auto *context = PyThreadContextEntry::getDefaultContext(); if (!context) - return nb::none(); + return {}; return nb::cast(context); }, + nb::sig("def current(/) -> Context | None"), "Gets the Context bound to the current thread or raises ValueError") .def_prop_ro( "dialects", @@ -3123,7 +3131,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { //---------------------------------------------------------------------------- nb::class_(m, "DialectRegistry") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyDialectRegistry::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyDialectRegistry::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, + &PyDialectRegistry::createFromCapsule) .def(nb::init<>()); //---------------------------------------------------------------------------- @@ -3131,7 +3140,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { //---------------------------------------------------------------------------- nb::class_(m, "Location") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyLocation::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyLocation::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyLocation::createFromCapsule) .def("__enter__", &PyLocation::contextEnter) .def("__exit__", &PyLocation::contextExit, nb::arg("exc_type").none(), nb::arg("exc_value").none(), nb::arg("traceback").none()) @@ -3210,13 +3219,11 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("end_line"), nb::arg("end_col"), nb::arg("context") = nb::none(), kContextGetFileRangeDocstring) .def("is_a_file", mlirLocationIsAFileLineColRange) - .def_prop_ro( - "filename", - [](MlirLocation loc) { - return mlirIdentifierStr( - mlirLocationFileLineColRangeGetFilename(loc)); - }, - nb::sig("def filename(self) -> str")) + .def_prop_ro("filename", + [](MlirLocation loc) { + return mlirIdentifierStr( + mlirLocationFileLineColRangeGetFilename(loc)); + }) .def_prop_ro("start_line", mlirLocationFileLineColRangeGetStartLine) .def_prop_ro("start_col", mlirLocationFileLineColRangeGetStartColumn) .def_prop_ro("end_line", mlirLocationFileLineColRangeGetEndLine) @@ -3265,12 +3272,10 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("name"), nb::arg("childLoc") = nb::none(), nb::arg("context") = nb::none(), kContextGetNameLocationDocString) .def("is_a_name", mlirLocationIsAName) - .def_prop_ro( - "name_str", - [](MlirLocation loc) { - return mlirIdentifierStr(mlirLocationNameGetName(loc)); - }, - nb::sig("def name_str(self) -> str")) + .def_prop_ro("name_str", + [](MlirLocation loc) { + return mlirIdentifierStr(mlirLocationNameGetName(loc)); + }) .def_prop_ro("child_loc", [](PyLocation &self) { return PyLocation(self.getContext(), @@ -3286,7 +3291,9 @@ void mlir::python::populateIRCore(nb::module_ &m) { "Gets a Location from a LocationAttr") .def_prop_ro( "context", - [](PyLocation &self) { return self.getContext().getObject(); }, + [](PyLocation &self) -> nb::typed { + return self.getContext().getObject(); + }, "Context that owns the Location") .def_prop_ro( "attr", @@ -3313,12 +3320,13 @@ void mlir::python::populateIRCore(nb::module_ &m) { //---------------------------------------------------------------------------- nb::class_(m, "Module", nb::is_weak_referenceable()) .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyModule::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyModule::createFromCapsule, - kModuleCAPICreate) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyModule::createFromCapsule, + kModuleCAPICreate) .def("_clear_mlir_module", &PyModule::clearMlirModule) .def_static( "parse", - [](const std::string &moduleAsm, DefaultingPyMlirContext context) { + [](const std::string &moduleAsm, DefaultingPyMlirContext context) + -> nb::typed { PyMlirContext::ErrorCapture errors(context->getRef()); MlirModule module = mlirModuleCreateParse( context->get(), toMlirStringRef(moduleAsm)); @@ -3330,7 +3338,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { kModuleParseDocstring) .def_static( "parse", - [](nb::bytes moduleAsm, DefaultingPyMlirContext context) { + [](nb::bytes moduleAsm, DefaultingPyMlirContext context) + -> nb::typed { PyMlirContext::ErrorCapture errors(context->getRef()); MlirModule module = mlirModuleCreateParse( context->get(), toMlirStringRef(moduleAsm)); @@ -3342,7 +3351,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { kModuleParseDocstring) .def_static( "parseFile", - [](const std::string &path, DefaultingPyMlirContext context) { + [](const std::string &path, DefaultingPyMlirContext context) + -> nb::typed { PyMlirContext::ErrorCapture errors(context->getRef()); MlirModule module = mlirModuleCreateParseFromFile( context->get(), toMlirStringRef(path)); @@ -3354,7 +3364,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { kModuleParseDocstring) .def_static( "create", - [](const std::optional &loc) { + [](const std::optional &loc) + -> nb::typed { PyLocation pyLoc = maybeGetTracebackLocation(loc); MlirModule module = mlirModuleCreateEmpty(pyLoc.get()); return PyModule::forModule(module).releaseObject(); @@ -3362,11 +3373,13 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("loc") = nb::none(), "Creates an empty module") .def_prop_ro( "context", - [](PyModule &self) { return self.getContext().getObject(); }, + [](PyModule &self) -> nb::typed { + return self.getContext().getObject(); + }, "Context that created the Module") .def_prop_ro( "operation", - [](PyModule &self) { + [](PyModule &self) -> nb::typed { return PyOperation::forOperation(self.getContext(), mlirModuleGetOperation(self.get()), self.getRef().releaseObject()) @@ -3430,21 +3443,19 @@ void mlir::python::populateIRCore(nb::module_ &m) { }) .def_prop_ro( "context", - [](PyOperationBase &self) { + [](PyOperationBase &self) -> nb::typed { PyOperation &concreteOperation = self.getOperation(); concreteOperation.checkValid(); return concreteOperation.getContext().getObject(); }, "Context that owns the Operation") - .def_prop_ro( - "name", - [](PyOperationBase &self) { - auto &concreteOperation = self.getOperation(); - concreteOperation.checkValid(); - MlirOperation operation = concreteOperation.get(); - return mlirIdentifierStr(mlirOperationGetName(operation)); - }, - nb::sig("def name(self) -> str")) + .def_prop_ro("name", + [](PyOperationBase &self) { + auto &concreteOperation = self.getOperation(); + concreteOperation.checkValid(); + MlirOperation operation = concreteOperation.get(); + return mlirIdentifierStr(mlirOperationGetName(operation)); + }) .def_prop_ro("operands", [](PyOperationBase &self) { return PyOpOperandList(self.getOperation().getRef()); @@ -3461,28 +3472,35 @@ void mlir::python::populateIRCore(nb::module_ &m) { "Returns the list of Operation results.") .def_prop_ro( "result", - [](PyOperationBase &self) { + [](PyOperationBase &self) -> nb::typed { auto &operation = self.getOperation(); return PyOpResult(operation.getRef(), getUniqueResult(operation)) .maybeDownCast(); }, "Shortcut to get an op result if it has only one (throws an error " "otherwise).") - .def_prop_ro( + .def_prop_rw( "location", [](PyOperationBase &self) { PyOperation &operation = self.getOperation(); return PyLocation(operation.getContext(), mlirOperationGetLocation(operation.get())); }, - "Returns the source location the operation was defined or derived " - "from.") + [](PyOperationBase &self, const PyLocation &location) { + PyOperation &operation = self.getOperation(); + mlirOperationSetLocation(operation.get(), location.get()); + }, + nb::for_getter("Returns the source location the operation was " + "defined or derived from."), + nb::for_setter("Sets the source location the operation was defined " + "or derived from.")) .def_prop_ro("parent", - [](PyOperationBase &self) -> nb::object { + [](PyOperationBase &self) + -> std::optional> { auto parent = self.getOperation().getParentOperation(); if (parent) return parent->getObject(); - return nb::none(); + return {}; }) .def( "__str__", @@ -3553,13 +3571,14 @@ void mlir::python::populateIRCore(nb::module_ &m) { "of the parent block.") .def( "clone", - [](PyOperationBase &self, nb::object ip) { + [](PyOperationBase &self, + const nb::object &ip) -> nb::typed { return self.getOperation().clone(ip); }, nb::arg("ip") = nb::none()) .def( "detach_from_parent", - [](PyOperationBase &self) { + [](PyOperationBase &self) -> nb::typed { PyOperation &operation = self.getOperation(); operation.checkValid(); if (!operation.isAttached()) @@ -3578,12 +3597,11 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, "Reports if the operation is attached to its parent block.") .def("erase", [](PyOperationBase &self) { self.getOperation().erase(); }) - .def( - "walk", &PyOperationBase::walk, nb::arg("callback"), - nb::arg("walk_order") = MlirWalkPostOrder, - // clang-format off - nb::sig("def walk(self, callback: Callable[[Operation], WalkResult], walk_order: WalkOrder = " MAKE_MLIR_PYTHON_QUALNAME("ir.WalkOrder.POST_ORDER") ") -> None") - // clang-format on + .def("walk", &PyOperationBase::walk, nb::arg("callback"), + nb::arg("walk_order") = MlirWalkPostOrder, + // clang-format off + nb::sig("def walk(self, callback: Callable[[Operation], WalkResult], walk_order: WalkOrder) -> None") + // clang-format on ); nb::class_(m, "Operation") @@ -3595,7 +3613,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { std::optional attributes, std::optional> successors, int regions, const std::optional &location, - const nb::object &maybeIp, bool inferType) { + const nb::object &maybeIp, + bool inferType) -> nb::typed { // Unpack/validate operands. llvm::SmallVector mlirOperands; if (operands) { @@ -3620,7 +3639,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { .def_static( "parse", [](const std::string &sourceStr, const std::string &sourceName, - DefaultingPyMlirContext context) { + DefaultingPyMlirContext context) + -> nb::typed { return PyOperation::parse(context->getRef(), sourceStr, sourceName) ->createOpView(); }, @@ -3629,9 +3649,16 @@ void mlir::python::populateIRCore(nb::module_ &m) { "Parses an operation. Supports both text assembly format and binary " "bytecode format.") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyOperation::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyOperation::createFromCapsule) - .def_prop_ro("operation", [](nb::object self) { return self; }) - .def_prop_ro("opview", &PyOperation::createOpView) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, + &PyOperation::createFromCapsule) + .def_prop_ro("operation", + [](nb::object self) -> nb::typed { + return self; + }) + .def_prop_ro("opview", + [](PyOperation &self) -> nb::typed { + return self.createOpView(); + }) .def_prop_ro("block", &PyOperation::getBlock) .def_prop_ro( "successors", @@ -3644,7 +3671,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { auto opViewClass = nb::class_(m, "OpView") - .def(nb::init(), nb::arg("operation")) + .def(nb::init>(), + nb::arg("operation")) .def( "__init__", [](PyOpView *self, std::string_view name, @@ -3671,9 +3699,15 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("successors") = nb::none(), nb::arg("regions") = nb::none(), nb::arg("loc") = nb::none(), nb::arg("ip") = nb::none()) - - .def_prop_ro("operation", &PyOpView::getOperationObject) - .def_prop_ro("opview", [](nb::object self) { return self; }) + .def_prop_ro( + "operation", + [](PyOpView &self) -> nb::typed { + return self.getOperationObject(); + }) + .def_prop_ro("opview", + [](nb::object self) -> nb::typed { + return self; + }) .def( "__str__", [](PyOpView &self) { return nb::str(self.getOperationObject()); }) @@ -3717,7 +3751,8 @@ void mlir::python::populateIRCore(nb::module_ &m) { "Builds a specific, generated OpView based on class level attributes."); opViewClass.attr("parse") = classmethod( [](const nb::object &cls, const std::string &sourceStr, - const std::string &sourceName, DefaultingPyMlirContext context) { + const std::string &sourceName, + DefaultingPyMlirContext context) -> nb::typed { PyOperationRef parsed = PyOperation::parse(context->getRef(), sourceStr, sourceName); @@ -3752,7 +3787,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { "Returns a forward-optimized sequence of blocks.") .def_prop_ro( "owner", - [](PyRegion &self) { + [](PyRegion &self) -> nb::typed { return self.getParentOperation()->createOpView(); }, "Returns the operation owning this region.") @@ -3777,7 +3812,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyBlock::getCapsule) .def_prop_ro( "owner", - [](PyBlock &self) { + [](PyBlock &self) -> nb::typed { return self.getParentOperation()->createOpView(); }, "Returns the owning operation of this block.") @@ -3960,11 +3995,12 @@ void mlir::python::populateIRCore(nb::module_ &m) { "Returns the block that this InsertionPoint points to.") .def_prop_ro( "ref_operation", - [](PyInsertionPoint &self) -> nb::object { + [](PyInsertionPoint &self) + -> std::optional> { auto refOperation = self.getRefOperation(); if (refOperation) return refOperation->getObject(); - return nb::none(); + return {}; }, "The reference operation before which new operations are " "inserted, or None if the insertion point is at the end of " @@ -3979,10 +4015,12 @@ void mlir::python::populateIRCore(nb::module_ &m) { .def(nb::init(), nb::arg("cast_from_type"), "Casts the passed attribute to the generic Attribute") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyAttribute::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyAttribute::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, + &PyAttribute::createFromCapsule) .def_static( "parse", - [](const std::string &attrSpec, DefaultingPyMlirContext context) { + [](const std::string &attrSpec, DefaultingPyMlirContext context) + -> nb::typed { PyMlirContext::ErrorCapture errors(context->getRef()); MlirAttribute attr = mlirAttributeParseGet( context->get(), toMlirStringRef(attrSpec)); @@ -3995,10 +4033,12 @@ void mlir::python::populateIRCore(nb::module_ &m) { "failure.") .def_prop_ro( "context", - [](PyAttribute &self) { return self.getContext().getObject(); }, + [](PyAttribute &self) -> nb::typed { + return self.getContext().getObject(); + }, "Context that owns the Attribute") .def_prop_ro("type", - [](PyAttribute &self) { + [](PyAttribute &self) -> nb::typed { return PyType(self.getContext(), mlirAttributeGetType(self)) .maybeDownCast(); @@ -4049,7 +4089,10 @@ void mlir::python::populateIRCore(nb::module_ &m) { "mlirTypeID was expected to be non-null."); return PyTypeID(mlirTypeID); }) - .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, &PyAttribute::maybeDownCast); + .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, + [](PyAttribute &self) -> nb::typed { + return self.maybeDownCast(); + }); //---------------------------------------------------------------------------- // Mapping of PyNamedAttribute @@ -4074,7 +4117,6 @@ void mlir::python::populateIRCore(nb::module_ &m) { [](PyNamedAttribute &self) { return mlirIdentifierStr(self.namedAttr.name); }, - nb::sig("def name(self) -> str"), "The name of the NamedAttribute binding") .def_prop_ro( "attr", @@ -4091,10 +4133,11 @@ void mlir::python::populateIRCore(nb::module_ &m) { .def(nb::init(), nb::arg("cast_from_type"), "Casts the passed type to the generic Type") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyType::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyType::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyType::createFromCapsule) .def_static( "parse", - [](std::string typeSpec, DefaultingPyMlirContext context) { + [](std::string typeSpec, + DefaultingPyMlirContext context) -> nb::typed { PyMlirContext::ErrorCapture errors(context->getRef()); MlirType type = mlirTypeParseGet(context->get(), toMlirStringRef(typeSpec)); @@ -4105,7 +4148,10 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::arg("asm"), nb::arg("context") = nb::none(), kContextParseTypeDocstring) .def_prop_ro( - "context", [](PyType &self) { return self.getContext().getObject(); }, + "context", + [](PyType &self) -> nb::typed { + return self.getContext().getObject(); + }, "Context that owns the Type") .def("__eq__", [](PyType &self, PyType &other) { return self == other; }) .def( @@ -4139,7 +4185,10 @@ void mlir::python::populateIRCore(nb::module_ &m) { printAccum.parts.append(")"); return printAccum.join(); }) - .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, &PyType::maybeDownCast) + .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, + [](PyType &self) -> nb::typed { + return self.maybeDownCast(); + }) .def_prop_ro("typeid", [](PyType &self) { MlirTypeID mlirTypeID = mlirTypeGetTypeID(self); if (!mlirTypeIDIsNull(mlirTypeID)) @@ -4154,7 +4203,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { //---------------------------------------------------------------------------- nb::class_(m, "TypeID") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyTypeID::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyTypeID::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyTypeID::createFromCapsule) // Note, this tests whether the underlying TypeIDs are the same, // not whether the wrapper MlirTypeIDs are the same, nor whether // the Python objects are the same (i.e., PyTypeID is a value type). @@ -4175,10 +4224,10 @@ void mlir::python::populateIRCore(nb::module_ &m) { nb::class_(m, "Value") .def(nb::init(), nb::keep_alive<0, 1>(), nb::arg("value")) .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, &PyValue::getCapsule) - .def(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyValue::createFromCapsule) + .def_static(MLIR_PYTHON_CAPI_FACTORY_ATTR, &PyValue::createFromCapsule) .def_prop_ro( "context", - [](PyValue &self) { + [](PyValue &self) -> nb::typed { return self.getParentOperation()->getContext().getObject(); }, "Context in which the value lives.") @@ -4266,7 +4315,7 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, nb::arg("state"), kGetNameAsOperand) .def_prop_ro("type", - [](PyValue &self) { + [](PyValue &self) -> nb::typed { return PyType(self.getParentOperation()->getContext(), mlirValueGetType(self.get())) .maybeDownCast(); @@ -4285,17 +4334,15 @@ void mlir::python::populateIRCore(nb::module_ &m) { kValueReplaceAllUsesWithDocstring) .def( "replace_all_uses_except", - [](MlirValue self, MlirValue with, PyOperation &exception) { + [](PyValue &self, PyValue &with, PyOperation &exception) { MlirOperation exceptedUser = exception.get(); mlirValueReplaceAllUsesExcept(self, with, 1, &exceptedUser); }, nb::arg("with_"), nb::arg("exceptions"), - nb::sig("def replace_all_uses_except(self, with_: Value, exceptions: " - "Operation) -> None"), kValueReplaceAllUsesExceptDocstring) .def( "replace_all_uses_except", - [](MlirValue self, MlirValue with, nb::list exceptions) { + [](PyValue &self, PyValue &with, const nb::list &exceptions) { // Convert Python list to a SmallVector of MlirOperations llvm::SmallVector exceptionOps; for (nb::handle exception : exceptions) { @@ -4307,8 +4354,6 @@ void mlir::python::populateIRCore(nb::module_ &m) { exceptionOps.data()); }, nb::arg("with_"), nb::arg("exceptions"), - nb::sig("def replace_all_uses_except(self, with_: Value, exceptions: " - "Sequence[Operation]) -> None"), kValueReplaceAllUsesExceptDocstring) .def( "replace_all_uses_except", @@ -4332,7 +4377,10 @@ void mlir::python::populateIRCore(nb::module_ &m) { }, nb::arg("with_"), nb::arg("exceptions"), kValueReplaceAllUsesExceptDocstring) - .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, &PyValue::maybeDownCast) + .def(MLIR_PYTHON_MAYBE_DOWNCAST_ATTR, + [](PyValue &self) -> nb::typed { + return self.maybeDownCast(); + }) .def_prop_ro( "location", [](MlirValue self) { @@ -4357,7 +4405,11 @@ void mlir::python::populateIRCore(nb::module_ &m) { //---------------------------------------------------------------------------- nb::class_(m, "SymbolTable") .def(nb::init()) - .def("__getitem__", &PySymbolTable::dunderGetItem) + .def("__getitem__", + [](PySymbolTable &self, + const std::string &name) -> nb::typed { + return self.dunderGetItem(name); + }) .def("insert", &PySymbolTable::insert, nb::arg("operation")) .def("erase", &PySymbolTable::erase, nb::arg("operation")) .def("__delitem__", &PySymbolTable::dunderDel) diff --git a/mlir/lib/Bindings/Python/IRInterfaces.cpp b/mlir/lib/Bindings/Python/IRInterfaces.cpp index 44aad10ded082..31d4798ffb906 100644 --- a/mlir/lib/Bindings/Python/IRInterfaces.cpp +++ b/mlir/lib/Bindings/Python/IRInterfaces.cpp @@ -212,22 +212,18 @@ class PyConcreteOpInterface { /// Returns the operation instance from which this object was constructed. /// Throws a type error if this object was constructed from a subclass of /// OpView. - nb::object getOperationObject() { - if (operation == nullptr) { + nb::typed getOperationObject() { + if (operation == nullptr) throw nb::type_error("Cannot get an operation from a static interface"); - } - return operation->getRef().releaseObject(); } /// Returns the opview of the operation instance from which this object was /// constructed. Throws a type error if this object was constructed form a /// subclass of OpView. - nb::object getOpView() { - if (operation == nullptr) { + nb::typed getOpView() { + if (operation == nullptr) throw nb::type_error("Cannot get an opview from a static interface"); - } - return operation->createOpView(); } @@ -362,10 +358,9 @@ class PyShapedTypeComponents { "Returns whether the given shaped type component is ranked.") .def_prop_ro( "rank", - [](PyShapedTypeComponents &self) -> nb::object { - if (!self.ranked) { - return nb::none(); - } + [](PyShapedTypeComponents &self) -> std::optional { + if (!self.ranked) + return {}; return nb::int_(self.shape.size()); }, "Returns the rank of the given ranked shaped type components. If " @@ -373,10 +368,9 @@ class PyShapedTypeComponents { "returned.") .def_prop_ro( "shape", - [](PyShapedTypeComponents &self) -> nb::object { - if (!self.ranked) { - return nb::none(); - } + [](PyShapedTypeComponents &self) -> std::optional { + if (!self.ranked) + return {}; return nb::list(self.shape); }, "Returns the shape of the ranked shaped type components as a list " diff --git a/mlir/lib/Bindings/Python/IRModule.h b/mlir/lib/Bindings/Python/IRModule.h index 6e97c00d478f1..edbd73eade906 100644 --- a/mlir/lib/Bindings/Python/IRModule.h +++ b/mlir/lib/Bindings/Python/IRModule.h @@ -273,8 +273,7 @@ class DefaultingPyMlirContext : public Defaulting { public: using Defaulting::Defaulting; - static constexpr const char kTypeDescription[] = - MAKE_MLIR_PYTHON_QUALNAME("ir.Context"); + static constexpr const char kTypeDescription[] = "Context"; static PyMlirContext &resolve(); }; @@ -500,8 +499,7 @@ class DefaultingPyLocation : public Defaulting { public: using Defaulting::Defaulting; - static constexpr const char kTypeDescription[] = - MAKE_MLIR_PYTHON_QUALNAME("ir.Location"); + static constexpr const char kTypeDescription[] = "Location"; static PyLocation &resolve(); operator MlirLocation() const { return *get(); } @@ -671,7 +669,7 @@ class PyOperation : public PyOperationBase, public BaseContextObject { /// Creates a PyOperation from the MlirOperation wrapped by a capsule. /// Ownership of the underlying MlirOperation is taken by calling this /// function. - static nanobind::object createFromCapsule(nanobind::object capsule); + static nanobind::object createFromCapsule(const nanobind::object &capsule); /// Creates an operation. See corresponding python docstring. static nanobind::object @@ -1020,7 +1018,7 @@ class PyAttribute : public BaseContextObject { /// Note that PyAttribute instances are uniqued, so the returned object /// may be a pre-existing object. Ownership of the underlying MlirAttribute /// is taken by calling this function. - static PyAttribute createFromCapsule(nanobind::object capsule); + static PyAttribute createFromCapsule(const nanobind::object &capsule); nanobind::object maybeDownCast(); @@ -1101,10 +1099,12 @@ class PyConcreteAttribute : public BaseTy { return DerivedTy::isaFunction(otherAttr); }, nanobind::arg("other")); - cls.def_prop_ro("type", [](PyAttribute &attr) { - return PyType(attr.getContext(), mlirAttributeGetType(attr)) - .maybeDownCast(); - }); + cls.def_prop_ro( + "type", + [](PyAttribute &attr) -> nanobind::typed { + return PyType(attr.getContext(), mlirAttributeGetType(attr)) + .maybeDownCast(); + }); cls.def_prop_ro_static( "static_typeid", [](nanobind::object & /*class*/) -> PyTypeID { diff --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp index cab3bf549295b..34c5b8dd86a66 100644 --- a/mlir/lib/Bindings/Python/IRTypes.cpp +++ b/mlir/lib/Bindings/Python/IRTypes.cpp @@ -501,7 +501,7 @@ class PyComplexType : public PyConcreteType { "Create a complex type"); c.def_prop_ro( "element_type", - [](PyComplexType &self) { + [](PyComplexType &self) -> nb::typed { return PyType(self.getContext(), mlirComplexTypeGetElementType(self)) .maybeDownCast(); }, @@ -515,7 +515,7 @@ class PyComplexType : public PyConcreteType { void mlir::PyShapedType::bindDerived(ClassTy &c) { c.def_prop_ro( "element_type", - [](PyShapedType &self) { + [](PyShapedType &self) -> nb::typed { return PyType(self.getContext(), mlirShapedTypeGetElementType(self)) .maybeDownCast(); }, @@ -639,11 +639,16 @@ class PyVectorType : public PyConcreteType { using PyConcreteType::PyConcreteType; static void bindDerived(ClassTy &c) { - c.def_static("get", &PyVectorType::get, nb::arg("shape"), + c.def_static("get", &PyVectorType::getChecked, nb::arg("shape"), nb::arg("element_type"), nb::kw_only(), nb::arg("scalable") = nb::none(), nb::arg("scalable_dims") = nb::none(), nb::arg("loc") = nb::none(), "Create a vector type") + .def_static("get_unchecked", &PyVectorType::get, nb::arg("shape"), + nb::arg("element_type"), nb::kw_only(), + nb::arg("scalable") = nb::none(), + nb::arg("scalable_dims") = nb::none(), + nb::arg("context") = nb::none(), "Create a vector type") .def_prop_ro( "scalable", [](MlirType self) { return mlirVectorTypeIsScalable(self); }) @@ -658,10 +663,11 @@ class PyVectorType : public PyConcreteType { } private: - static PyVectorType get(std::vector shape, PyType &elementType, - std::optional scalable, - std::optional> scalableDims, - DefaultingPyLocation loc) { + static PyVectorType + getChecked(std::vector shape, PyType &elementType, + std::optional scalable, + std::optional> scalableDims, + DefaultingPyLocation loc) { if (scalable && scalableDims) { throw nb::value_error("'scalable' and 'scalable_dims' kwargs " "are mutually exclusive."); @@ -696,6 +702,42 @@ class PyVectorType : public PyConcreteType { throw MLIRError("Invalid type", errors.take()); return PyVectorType(elementType.getContext(), type); } + + static PyVectorType get(std::vector shape, PyType &elementType, + std::optional scalable, + std::optional> scalableDims, + DefaultingPyMlirContext context) { + if (scalable && scalableDims) { + throw nb::value_error("'scalable' and 'scalable_dims' kwargs " + "are mutually exclusive."); + } + + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirType type; + if (scalable) { + if (scalable->size() != shape.size()) + throw nb::value_error("Expected len(scalable) == len(shape)."); + + SmallVector scalableDimFlags = llvm::to_vector(llvm::map_range( + *scalable, [](const nb::handle &h) { return nb::cast(h); })); + type = mlirVectorTypeGetScalable(shape.size(), shape.data(), + scalableDimFlags.data(), elementType); + } else if (scalableDims) { + SmallVector scalableDimFlags(shape.size(), false); + for (int64_t dim : *scalableDims) { + if (static_cast(dim) >= scalableDimFlags.size() || dim < 0) + throw nb::value_error("Scalable dimension index out of bounds."); + scalableDimFlags[dim] = true; + } + type = mlirVectorTypeGetScalable(shape.size(), shape.data(), + scalableDimFlags.data(), elementType); + } else { + type = mlirVectorTypeGet(shape.size(), shape.data(), elementType); + } + if (mlirTypeIsNull(type)) + throw MLIRError("Invalid type", errors.take()); + return PyVectorType(elementType.getContext(), type); + } }; /// Ranked Tensor Type subclass - RankedTensorType. @@ -724,6 +766,22 @@ class PyRankedTensorType nb::arg("shape"), nb::arg("element_type"), nb::arg("encoding") = nb::none(), nb::arg("loc") = nb::none(), "Create a ranked tensor type"); + c.def_static( + "get_unchecked", + [](std::vector shape, PyType &elementType, + std::optional &encodingAttr, + DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirType t = mlirRankedTensorTypeGet( + shape.size(), shape.data(), elementType, + encodingAttr ? encodingAttr->get() : mlirAttributeGetNull()); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyRankedTensorType(elementType.getContext(), t); + }, + nb::arg("shape"), nb::arg("element_type"), + nb::arg("encoding") = nb::none(), nb::arg("context") = nb::none(), + "Create a ranked tensor type"); c.def_prop_ro( "encoding", [](PyRankedTensorType &self) @@ -731,8 +789,7 @@ class PyRankedTensorType MlirAttribute encoding = mlirRankedTensorTypeGetEncoding(self.get()); if (mlirAttributeIsNull(encoding)) return std::nullopt; - return nb::cast>( - PyAttribute(self.getContext(), encoding).maybeDownCast()); + return PyAttribute(self.getContext(), encoding).maybeDownCast(); }); } }; @@ -759,6 +816,17 @@ class PyUnrankedTensorType }, nb::arg("element_type"), nb::arg("loc") = nb::none(), "Create a unranked tensor type"); + c.def_static( + "get_unchecked", + [](PyType &elementType, DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirType t = mlirUnrankedTensorTypeGet(elementType); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyUnrankedTensorType(elementType.getContext(), t); + }, + nb::arg("element_type"), nb::arg("context") = nb::none(), + "Create a unranked tensor type"); } }; @@ -791,12 +859,33 @@ class PyMemRefType : public PyConcreteType { nb::arg("shape"), nb::arg("element_type"), nb::arg("layout") = nb::none(), nb::arg("memory_space") = nb::none(), nb::arg("loc") = nb::none(), "Create a memref type") + .def_static( + "get_unchecked", + [](std::vector shape, PyType &elementType, + PyAttribute *layout, PyAttribute *memorySpace, + DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirAttribute layoutAttr = + layout ? *layout : mlirAttributeGetNull(); + MlirAttribute memSpaceAttr = + memorySpace ? *memorySpace : mlirAttributeGetNull(); + MlirType t = + mlirMemRefTypeGet(elementType, shape.size(), shape.data(), + layoutAttr, memSpaceAttr); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyMemRefType(elementType.getContext(), t); + }, + nb::arg("shape"), nb::arg("element_type"), + nb::arg("layout") = nb::none(), + nb::arg("memory_space") = nb::none(), + nb::arg("context") = nb::none(), "Create a memref type") .def_prop_ro( "layout", [](PyMemRefType &self) -> nb::typed { - return nb::cast>( - PyAttribute(self.getContext(), mlirMemRefTypeGetLayout(self)) - .maybeDownCast()); + return PyAttribute(self.getContext(), + mlirMemRefTypeGetLayout(self)) + .maybeDownCast(); }, "The layout of the MemRef type.") .def( @@ -825,8 +914,7 @@ class PyMemRefType : public PyConcreteType { MlirAttribute a = mlirMemRefTypeGetMemorySpace(self); if (mlirAttributeIsNull(a)) return std::nullopt; - return nb::cast>( - PyAttribute(self.getContext(), a).maybeDownCast()); + return PyAttribute(self.getContext(), a).maybeDownCast(); }, "Returns the memory space of the given MemRef type."); } @@ -860,6 +948,22 @@ class PyUnrankedMemRefType }, nb::arg("element_type"), nb::arg("memory_space").none(), nb::arg("loc") = nb::none(), "Create a unranked memref type") + .def_static( + "get_unchecked", + [](PyType &elementType, PyAttribute *memorySpace, + DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirAttribute memSpaceAttr = {}; + if (memorySpace) + memSpaceAttr = *memorySpace; + + MlirType t = mlirUnrankedMemRefTypeGet(elementType, memSpaceAttr); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyUnrankedMemRefType(elementType.getContext(), t); + }, + nb::arg("element_type"), nb::arg("memory_space").none(), + nb::arg("context") = nb::none(), "Create a unranked memref type") .def_prop_ro( "memory_space", [](PyUnrankedMemRefType &self) @@ -867,8 +971,7 @@ class PyUnrankedMemRefType MlirAttribute a = mlirUnrankedMemrefGetMemorySpace(self); if (mlirAttributeIsNull(a)) return std::nullopt; - return nb::cast>( - PyAttribute(self.getContext(), a).maybeDownCast()); + return PyAttribute(self.getContext(), a).maybeDownCast(); }, "Returns the memory space of the given Unranked MemRef type."); } @@ -907,12 +1010,12 @@ class PyTupleType : public PyConcreteType { }, nb::arg("elements"), nb::arg("context") = nb::none(), // clang-format off - nb::sig("def get_tuple(elements: Sequence[Type], context: mlir.ir.Context | None = None) -> TupleType"), + nb::sig("def get_tuple(elements: Sequence[Type], context: Context | None = None) -> TupleType"), // clang-format on "Create a tuple type"); c.def( "get_type", - [](PyTupleType &self, intptr_t pos) { + [](PyTupleType &self, intptr_t pos) -> nb::typed { return PyType(self.getContext(), mlirTupleTypeGetType(self, pos)) .maybeDownCast(); }, @@ -967,7 +1070,7 @@ class PyFunctionType : public PyConcreteType { }, nb::arg("inputs"), nb::arg("results"), nb::arg("context") = nb::none(), // clang-format off - nb::sig("def get(inputs: Sequence[Type], results: Sequence[Type], context: mlir.ir.Context | None = None) -> FunctionType"), + nb::sig("def get(inputs: Sequence[Type], results: Sequence[Type], context: Context | None = None) -> FunctionType"), // clang-format on "Gets a FunctionType from a list of input and result types"); c.def_prop_ro( diff --git a/mlir/lib/Bindings/Python/MainModule.cpp b/mlir/lib/Bindings/Python/MainModule.cpp index 52656138843b9..a14f09f77d2c3 100644 --- a/mlir/lib/Bindings/Python/MainModule.cpp +++ b/mlir/lib/Bindings/Python/MainModule.cpp @@ -115,9 +115,6 @@ NB_MODULE(_mlir, m) { }); }, "typeid"_a, nb::kw_only(), "replace"_a = false, - // clang-format off - nb::sig("def register_type_caster(typeid: " MAKE_MLIR_PYTHON_QUALNAME("ir.TypeID") ", *, replace: bool = False) -> object"), - // clang-format on "Register a type caster for casting MLIR types to custom user types."); m.def( MLIR_PYTHON_CAPI_VALUE_CASTER_REGISTER_ATTR, @@ -130,9 +127,6 @@ NB_MODULE(_mlir, m) { }); }, "typeid"_a, nb::kw_only(), "replace"_a = false, - // clang-format off - nb::sig("def register_value_caster(typeid: " MAKE_MLIR_PYTHON_QUALNAME("ir.TypeID") ", *, replace: bool = False) -> object"), - // clang-format on "Register a value caster for casting MLIR values to custom user values."); // Define and populate IR submodule. diff --git a/mlir/lib/Bindings/Python/Rewrite.cpp b/mlir/lib/Bindings/Python/Rewrite.cpp index 3476793369907..836f44fd7d4be 100644 --- a/mlir/lib/Bindings/Python/Rewrite.cpp +++ b/mlir/lib/Bindings/Python/Rewrite.cpp @@ -9,12 +9,15 @@ #include "Rewrite.h" #include "IRModule.h" +#include "mlir-c/IR.h" #include "mlir-c/Rewrite.h" +#include "mlir-c/Support.h" // clang-format off #include "mlir/Bindings/Python/Nanobind.h" #include "mlir-c/Bindings/Python/Interop.h" // This is expected after nanobind. // clang-format on #include "mlir/Config/mlir-config.h" +#include "nanobind/nanobind.h" namespace nb = nanobind; using namespace mlir; @@ -24,6 +27,40 @@ using namespace mlir::python; namespace { #if MLIR_ENABLE_PDL_IN_PATTERNMATCH +static nb::object objectFromPDLValue(MlirPDLValue value) { + if (MlirValue v = mlirPDLValueAsValue(value); !mlirValueIsNull(v)) + return nb::cast(v); + if (MlirOperation v = mlirPDLValueAsOperation(value); !mlirOperationIsNull(v)) + return nb::cast(v); + if (MlirAttribute v = mlirPDLValueAsAttribute(value); !mlirAttributeIsNull(v)) + return nb::cast(v); + if (MlirType v = mlirPDLValueAsType(value); !mlirTypeIsNull(v)) + return nb::cast(v); + + throw std::runtime_error("unsupported PDL value type"); +} + +static std::vector objectsFromPDLValues(size_t nValues, + MlirPDLValue *values) { + std::vector args; + args.reserve(nValues); + for (size_t i = 0; i < nValues; ++i) + args.push_back(objectFromPDLValue(values[i])); + return args; +} + +// Convert the Python object to a boolean. +// If it evaluates to False, treat it as success; +// otherwise, treat it as failure. +// Note that None is considered success. +static MlirLogicalResult logicalResultFromObject(const nb::object &obj) { + if (obj.is_none()) + return mlirLogicalResultSuccess(); + + return nb::cast(obj) ? mlirLogicalResultFailure() + : mlirLogicalResultSuccess(); +} + /// Owning Wrapper around a PDLPatternModule. class PyPDLPatternModule { public: @@ -38,6 +75,34 @@ class PyPDLPatternModule { } MlirPDLPatternModule get() { return module; } + void registerRewriteFunction(const std::string &name, + const nb::callable &fn) { + mlirPDLPatternModuleRegisterRewriteFunction( + get(), mlirStringRefCreate(name.data(), name.size()), + [](MlirPatternRewriter rewriter, MlirPDLResultList results, + size_t nValues, MlirPDLValue *values, + void *userData) -> MlirLogicalResult { + nb::handle f = nb::handle(static_cast(userData)); + return logicalResultFromObject( + f(rewriter, results, objectsFromPDLValues(nValues, values))); + }, + fn.ptr()); + } + + void registerConstraintFunction(const std::string &name, + const nb::callable &fn) { + mlirPDLPatternModuleRegisterConstraintFunction( + get(), mlirStringRefCreate(name.data(), name.size()), + [](MlirPatternRewriter rewriter, MlirPDLResultList results, + size_t nValues, MlirPDLValue *values, + void *userData) -> MlirLogicalResult { + nb::handle f = nb::handle(static_cast(userData)); + return logicalResultFromObject( + f(rewriter, results, objectsFromPDLValues(nValues, values))); + }, + fn.ptr()); + } + private: MlirPDLPatternModule module; }; @@ -62,7 +127,7 @@ class PyFrozenRewritePatternSet { mlirPythonFrozenRewritePatternSetToCapsule(get())); } - static nb::object createFromCapsule(nb::object capsule) { + static nb::object createFromCapsule(const nb::object &capsule) { MlirFrozenRewritePatternSet rawPm = mlirPythonCapsuleToFrozenRewritePatternSet(capsule.ptr()); if (rawPm.ptr == nullptr) @@ -78,10 +143,48 @@ class PyFrozenRewritePatternSet { /// Create the `mlir.rewrite` here. void mlir::python::populateRewriteSubmodule(nb::module_ &m) { + nb::class_(m, "PatternRewriter"); //---------------------------------------------------------------------------- - // Mapping of the top-level PassManager + // Mapping of the PDLResultList and PDLModule //---------------------------------------------------------------------------- #if MLIR_ENABLE_PDL_IN_PATTERNMATCH + nb::class_(m, "PDLResultList") + .def( + "append", + [](MlirPDLResultList results, const PyValue &value) { + mlirPDLResultListPushBackValue(results, value); + }, + // clang-format off + nb::sig("def append(self, value: " MAKE_MLIR_PYTHON_QUALNAME("ir.Value") ")") + // clang-format on + ) + .def( + "append", + [](MlirPDLResultList results, const PyOperation &op) { + mlirPDLResultListPushBackOperation(results, op); + }, + // clang-format off + nb::sig("def append(self, op: " MAKE_MLIR_PYTHON_QUALNAME("ir.Operation") ")") + // clang-format on + ) + .def( + "append", + [](MlirPDLResultList results, const PyType &type) { + mlirPDLResultListPushBackType(results, type); + }, + // clang-format off + nb::sig("def append(self, type: " MAKE_MLIR_PYTHON_QUALNAME("ir.Type") ")") + // clang-format on + ) + .def( + "append", + [](MlirPDLResultList results, const PyAttribute &attr) { + mlirPDLResultListPushBackAttribute(results, attr); + }, + // clang-format off + nb::sig("def append(self, attr: " MAKE_MLIR_PYTHON_QUALNAME("ir.Attribute") ")") + // clang-format on + ); nb::class_(m, "PDLModule") .def( "__init__", @@ -103,10 +206,27 @@ void mlir::python::populateRewriteSubmodule(nb::module_ &m) { nb::sig("def __init__(self, module: " MAKE_MLIR_PYTHON_QUALNAME("ir.Module") ") -> None"), // clang-format on "module"_a, "Create a PDL module from the given module.") - .def("freeze", [](PyPDLPatternModule &self) { - return new PyFrozenRewritePatternSet(mlirFreezeRewritePattern( - mlirRewritePatternSetFromPDLPatternModule(self.get()))); - }); + .def( + "freeze", + [](PyPDLPatternModule &self) { + return new PyFrozenRewritePatternSet(mlirFreezeRewritePattern( + mlirRewritePatternSetFromPDLPatternModule(self.get()))); + }, + nb::keep_alive<0, 1>()) + .def( + "register_rewrite_function", + [](PyPDLPatternModule &self, const std::string &name, + const nb::callable &fn) { + self.registerRewriteFunction(name, fn); + }, + nb::keep_alive<1, 3>()) + .def( + "register_constraint_function", + [](PyPDLPatternModule &self, const std::string &name, + const nb::callable &fn) { + self.registerConstraintFunction(name, fn); + }, + nb::keep_alive<1, 3>()); #endif // MLIR_ENABLE_PDL_IN_PATTERNMATCH nb::class_(m, "FrozenRewritePatternSet") .def_prop_ro(MLIR_PYTHON_CAPI_PTR_ATTR, diff --git a/mlir/lib/CAPI/Dialect/LLVM.cpp b/mlir/lib/CAPI/Dialect/LLVM.cpp index 7a33046c6c872..eaad8a87aab9b 100644 --- a/mlir/lib/CAPI/Dialect/LLVM.cpp +++ b/mlir/lib/CAPI/Dialect/LLVM.cpp @@ -253,17 +253,16 @@ MlirAttribute mlirLLVMDIFileAttrGet(MlirContext ctx, MlirAttribute name, cast(unwrap(directory)))); } -MlirAttribute -mlirLLVMDICompileUnitAttrGet(MlirContext ctx, MlirAttribute id, - unsigned int sourceLanguage, MlirAttribute file, - MlirAttribute producer, bool isOptimized, - MlirLLVMDIEmissionKind emissionKind, - MlirLLVMDINameTableKind nameTableKind) { +MlirAttribute mlirLLVMDICompileUnitAttrGet( + MlirContext ctx, MlirAttribute id, unsigned int sourceLanguage, + MlirAttribute file, MlirAttribute producer, bool isOptimized, + MlirLLVMDIEmissionKind emissionKind, MlirLLVMDINameTableKind nameTableKind, + MlirAttribute splitDebugFilename) { return wrap(DICompileUnitAttr::get( unwrap(ctx), cast(unwrap(id)), sourceLanguage, cast(unwrap(file)), cast(unwrap(producer)), - isOptimized, DIEmissionKind(emissionKind), - DINameTableKind(nameTableKind))); + isOptimized, DIEmissionKind(emissionKind), DINameTableKind(nameTableKind), + cast(unwrap(splitDebugFilename)))); } MlirAttribute mlirLLVMDIFlagsAttrGet(MlirContext ctx, uint64_t value) { diff --git a/mlir/lib/CAPI/IR/IR.cpp b/mlir/lib/CAPI/IR/IR.cpp index e9844a7cc1909..188186598c5c5 100644 --- a/mlir/lib/CAPI/IR/IR.cpp +++ b/mlir/lib/CAPI/IR/IR.cpp @@ -656,6 +656,10 @@ MlirLocation mlirOperationGetLocation(MlirOperation op) { return wrap(unwrap(op)->getLoc()); } +void mlirOperationSetLocation(MlirOperation op, MlirLocation loc) { + unwrap(op)->setLoc(unwrap(loc)); +} + MlirTypeID mlirOperationGetTypeID(MlirOperation op) { if (auto info = unwrap(op)->getRegisteredInfo()) return wrap(info->getTypeID()); diff --git a/mlir/lib/CAPI/Transforms/Rewrite.cpp b/mlir/lib/CAPI/Transforms/Rewrite.cpp index 6f85357a14a18..8ee6308cadf83 100644 --- a/mlir/lib/CAPI/Transforms/Rewrite.cpp +++ b/mlir/lib/CAPI/Transforms/Rewrite.cpp @@ -13,6 +13,8 @@ #include "mlir/CAPI/Rewrite.h" #include "mlir/CAPI/Support.h" #include "mlir/CAPI/Wrap.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/PDLPatternMatch.h.inc" #include "mlir/IR/PatternMatch.h" #include "mlir/Rewrite/FrozenRewritePatternSet.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" @@ -301,6 +303,19 @@ mlirApplyPatternsAndFoldGreedilyWithOp(MlirOperation op, return wrap(mlir::applyPatternsGreedily(unwrap(op), *unwrap(patterns))); } +//===----------------------------------------------------------------------===// +/// PatternRewriter API +//===----------------------------------------------------------------------===// + +inline mlir::PatternRewriter *unwrap(MlirPatternRewriter rewriter) { + assert(rewriter.ptr && "unexpected null rewriter"); + return static_cast(rewriter.ptr); +} + +inline MlirPatternRewriter wrap(mlir::PatternRewriter *rewriter) { + return {rewriter}; +} + //===----------------------------------------------------------------------===// /// PDLPatternModule API //===----------------------------------------------------------------------===// @@ -331,4 +346,93 @@ mlirRewritePatternSetFromPDLPatternModule(MlirPDLPatternModule op) { op.ptr = nullptr; return wrap(m); } + +inline const mlir::PDLValue *unwrap(MlirPDLValue value) { + assert(value.ptr && "unexpected null PDL value"); + return static_cast(value.ptr); +} + +inline MlirPDLValue wrap(const mlir::PDLValue *value) { return {value}; } + +inline mlir::PDLResultList *unwrap(MlirPDLResultList results) { + assert(results.ptr && "unexpected null PDL results"); + return static_cast(results.ptr); +} + +inline MlirPDLResultList wrap(mlir::PDLResultList *results) { + return {results}; +} + +MlirValue mlirPDLValueAsValue(MlirPDLValue value) { + return wrap(unwrap(value)->dyn_cast()); +} + +MlirType mlirPDLValueAsType(MlirPDLValue value) { + return wrap(unwrap(value)->dyn_cast()); +} + +MlirOperation mlirPDLValueAsOperation(MlirPDLValue value) { + return wrap(unwrap(value)->dyn_cast()); +} + +MlirAttribute mlirPDLValueAsAttribute(MlirPDLValue value) { + return wrap(unwrap(value)->dyn_cast()); +} + +void mlirPDLResultListPushBackValue(MlirPDLResultList results, + MlirValue value) { + unwrap(results)->push_back(unwrap(value)); +} + +void mlirPDLResultListPushBackType(MlirPDLResultList results, MlirType value) { + unwrap(results)->push_back(unwrap(value)); +} + +void mlirPDLResultListPushBackOperation(MlirPDLResultList results, + MlirOperation value) { + unwrap(results)->push_back(unwrap(value)); +} + +void mlirPDLResultListPushBackAttribute(MlirPDLResultList results, + MlirAttribute value) { + unwrap(results)->push_back(unwrap(value)); +} + +inline std::vector wrap(ArrayRef values) { + std::vector mlirValues; + mlirValues.reserve(values.size()); + for (auto &value : values) { + mlirValues.push_back(wrap(&value)); + } + return mlirValues; +} + +void mlirPDLPatternModuleRegisterRewriteFunction( + MlirPDLPatternModule pdlModule, MlirStringRef name, + MlirPDLRewriteFunction rewriteFn, void *userData) { + unwrap(pdlModule)->registerRewriteFunction( + unwrap(name), + [userData, rewriteFn](PatternRewriter &rewriter, PDLResultList &results, + ArrayRef values) -> LogicalResult { + std::vector mlirValues = wrap(values); + return unwrap(rewriteFn(wrap(&rewriter), wrap(&results), + mlirValues.size(), mlirValues.data(), + userData)); + }); +} + +void mlirPDLPatternModuleRegisterConstraintFunction( + MlirPDLPatternModule pdlModule, MlirStringRef name, + MlirPDLConstraintFunction constraintFn, void *userData) { + unwrap(pdlModule)->registerConstraintFunction( + unwrap(name), + [userData, constraintFn](PatternRewriter &rewriter, + PDLResultList &results, + ArrayRef values) -> LogicalResult { + std::vector mlirValues = wrap(values); + return unwrap(constraintFn(wrap(&rewriter), wrap(&results), + mlirValues.size(), mlirValues.data(), + userData)); + }); +} #endif // MLIR_ENABLE_PDL_IN_PATTERNMATCH diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp index 0078eed8b7a67..85f0fd1dd1048 100644 --- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp +++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp @@ -57,8 +57,25 @@ static Value convertUnsignedToI32(ConversionPatternRewriter &rewriter, static Value createI32Constant(ConversionPatternRewriter &rewriter, Location loc, int32_t value) { - Type i32 = rewriter.getI32Type(); - return LLVM::ConstantOp::create(rewriter, loc, i32, value); + return LLVM::ConstantOp::create(rewriter, loc, rewriter.getI32Type(), value); +} + +/// Convert an unsigned number `val` to i64. +static Value convertUnsignedToI64(ConversionPatternRewriter &rewriter, + Location loc, Value val) { + IntegerType i64 = rewriter.getI64Type(); + // Force check that `val` is of int type. + auto valTy = cast(val.getType()); + if (i64 == valTy) + return val; + return valTy.getWidth() > 64 + ? Value(LLVM::TruncOp::create(rewriter, loc, i64, val)) + : Value(LLVM::ZExtOp::create(rewriter, loc, i64, val)); +} + +static Value createI64Constant(ConversionPatternRewriter &rewriter, + Location loc, int64_t value) { + return LLVM::ConstantOp::create(rewriter, loc, rewriter.getI64Type(), value); } static Value createI1Constant(ConversionPatternRewriter &rewriter, Location loc, @@ -95,7 +112,7 @@ static Value getNumRecords(ConversionPatternRewriter &rewriter, Location loc, MemRefType memrefType, MemRefDescriptor &memrefDescriptor, ArrayRef strides, - uint32_t elementByteWidth) { + int64_t elementByteWidth) { if (memrefType.hasStaticShape() && !llvm::any_of(strides, ShapedType::isDynamic)) { int64_t size = memrefType.getRank() == 0 ? 1 : 0; @@ -103,9 +120,7 @@ static Value getNumRecords(ConversionPatternRewriter &rewriter, Location loc, for (uint32_t i = 0, e = memrefType.getRank(); i < e; ++i) size = std::max(shape[i] * strides[i], size); size = size * elementByteWidth; - assert(size < std::numeric_limits::max() && - "the memref buffer is too large"); - return createI32Constant(rewriter, loc, static_cast(size)); + return createI64Constant(rewriter, loc, size); } Value maxIndex; for (uint32_t i = 0, e = memrefType.getRank(); i < e; ++i) { @@ -116,9 +131,9 @@ static Value getNumRecords(ConversionPatternRewriter &rewriter, Location loc, ? LLVM::UMaxOp::create(rewriter, loc, maxIndex, maxThisDim) : maxThisDim; } - Value maxIndexI32 = convertUnsignedToI32(rewriter, loc, maxIndex); - Value byteWidthConst = createI32Constant(rewriter, loc, elementByteWidth); - return LLVM::MulOp::create(rewriter, loc, maxIndexI32, byteWidthConst); + Value maxIndexI64 = convertUnsignedToI64(rewriter, loc, maxIndex); + Value byteWidthConst = createI64Constant(rewriter, loc, elementByteWidth); + return LLVM::MulOp::create(rewriter, loc, maxIndexI64, byteWidthConst); } static Value makeBufferRsrc(ConversionPatternRewriter &rewriter, Location loc, @@ -536,52 +551,49 @@ struct LDSBarrierOpLowering : public ConvertOpToLLVMPattern { LogicalResult matchAndRewrite(LDSBarrierOp op, LDSBarrierOp::Adaptor adaptor, ConversionPatternRewriter &rewriter) const override { - bool requiresInlineAsm = chipset < kGfx90a || chipset.majorVersion == 11; - + Location loc = op.getLoc(); + // This ensures that waits on global memory aren't introduced on + // chips that don't have the BackOffBarrier feature enabled in LLVM. + bool requiresInlineAsm = chipset < kGfx90a; + + Attribute mmra = + rewriter.getAttr("amdgpu-synchronize-as", "local"); + // Note: while there *is* a workgroup-one-as scope, this, when combined with + // the MMRA, will lead to the fence having no effect. This is because the + // codepaths for an atomic load or store will observe that a + // one-address-space atomic to LDS requires no synchronization because + // operations on LDS are totally ordered with respect to each other, and so + // will not emit the correct waitcnt operations that these fences are + // intended to produce. Therefore, we use a broader type of fence and rely + // on the MMRA to relax it to the semantics we want. + StringRef scope = "workgroup"; + + auto relFence = LLVM::FenceOp::create(rewriter, loc, + LLVM::AtomicOrdering::release, scope); + relFence->setDiscardableAttr(LLVM::LLVMDialect::getMmraAttrName(), mmra); if (requiresInlineAsm) { auto asmDialectAttr = LLVM::AsmDialectAttr::get(rewriter.getContext(), LLVM::AsmDialect::AD_ATT); - const char *asmStr = - ";;;WARNING: BREAKS DEBUG WATCHES\ns_waitcnt lgkmcnt(0)\ns_barrier"; + const char *asmStr = ";;;WARNING: BREAKS DEBUG WATCHES\ns_barrier"; const char *constraints = ""; - rewriter.replaceOpWithNewOp( - op, + LLVM::InlineAsmOp::create( + rewriter, loc, /*resultTypes=*/TypeRange(), /*operands=*/ValueRange(), /*asm_string=*/asmStr, constraints, /*has_side_effects=*/true, /*is_align_stack=*/false, LLVM::TailCallKind::None, /*asm_dialect=*/asmDialectAttr, /*operand_attrs=*/ArrayAttr()); - return success(); - } - if (chipset.majorVersion < 12) { - constexpr int32_t ldsOnlyBitsGfx6789 = ~(0x1f << 8); - constexpr int32_t ldsOnlyBitsGfx10 = ~(0x3f << 8); - // Left in place in case someone disables the inline ASM path or future - // chipsets use the same bit pattern. - constexpr int32_t ldsOnlyBitsGfx11 = ~(0x3f << 4); - - int32_t ldsOnlyBits; - if (chipset.majorVersion == 11) - ldsOnlyBits = ldsOnlyBitsGfx11; - else if (chipset.majorVersion == 10) - ldsOnlyBits = ldsOnlyBitsGfx10; - else if (chipset.majorVersion <= 9) - ldsOnlyBits = ldsOnlyBitsGfx6789; - else - return op.emitOpError( - "don't know how to lower this for chipset major version") - << chipset.majorVersion; - - Location loc = op->getLoc(); - ROCDL::SWaitcntOp::create(rewriter, loc, ldsOnlyBits); - rewriter.replaceOpWithNewOp(op); + } else if (chipset.majorVersion < 12) { + ROCDL::SBarrierOp::create(rewriter, loc); } else { - Location loc = op->getLoc(); - ROCDL::WaitDscntOp::create(rewriter, loc, 0); ROCDL::BarrierSignalOp::create(rewriter, loc, -1); - rewriter.replaceOpWithNewOp(op, -1); + ROCDL::BarrierWaitOp::create(rewriter, loc, -1); } + auto acqFence = LLVM::FenceOp::create(rewriter, loc, + LLVM::AtomicOrdering::acquire, scope); + acqFence->setDiscardableAttr(LLVM::LLVMDialect::getMmraAttrName(), mmra); + rewriter.replaceOp(op, acqFence); return success(); } }; diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp index 1037e296c8128..2285d2695db4e 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -20,20 +20,20 @@ using namespace mlir; -LLVM::LLVMFuncOp mlir::getOrDefineFunction(gpu::GPUModuleOp moduleOp, - Location loc, OpBuilder &b, - StringRef name, +LLVM::LLVMFuncOp mlir::getOrDefineFunction(Operation *moduleOp, Location loc, + OpBuilder &b, StringRef name, LLVM::LLVMFunctionType type) { - LLVM::LLVMFuncOp ret; - if (!(ret = moduleOp.template lookupSymbol(name))) { - OpBuilder::InsertionGuard guard(b); - b.setInsertionPointToStart(moduleOp.getBody()); - ret = LLVM::LLVMFuncOp::create(b, loc, name, type, LLVM::Linkage::External); - } - return ret; + auto existing = dyn_cast_or_null( + SymbolTable::lookupSymbolIn(moduleOp, name)); + if (existing) + return existing; + + OpBuilder::InsertionGuard guard(b); + b.setInsertionPointToStart(&moduleOp->getRegion(0).front()); + return LLVM::LLVMFuncOp::create(b, loc, name, type, LLVM::Linkage::External); } -static SmallString<16> getUniqueSymbolName(gpu::GPUModuleOp moduleOp, +static SmallString<16> getUniqueSymbolName(Operation *moduleOp, StringRef prefix) { // Get a unique global name. unsigned stringNumber = 0; @@ -41,15 +41,16 @@ static SmallString<16> getUniqueSymbolName(gpu::GPUModuleOp moduleOp, do { stringConstName.clear(); (prefix + Twine(stringNumber++)).toStringRef(stringConstName); - } while (moduleOp.lookupSymbol(stringConstName)); + } while (SymbolTable::lookupSymbolIn(moduleOp, stringConstName)); return stringConstName; } -LLVM::GlobalOp -mlir::getOrCreateStringConstant(OpBuilder &b, Location loc, - gpu::GPUModuleOp moduleOp, Type llvmI8, - StringRef namePrefix, StringRef str, - uint64_t alignment, unsigned addrSpace) { +LLVM::GlobalOp mlir::getOrCreateStringConstant(OpBuilder &b, Location loc, + Operation *moduleOp, Type llvmI8, + StringRef namePrefix, + StringRef str, + uint64_t alignment, + unsigned addrSpace) { llvm::SmallString<20> nullTermStr(str); nullTermStr.push_back('\0'); // Null terminate for C auto globalType = @@ -57,7 +58,7 @@ mlir::getOrCreateStringConstant(OpBuilder &b, Location loc, StringAttr attr = b.getStringAttr(nullTermStr); // Try to find existing global. - for (auto globalOp : moduleOp.getOps()) + for (auto globalOp : moduleOp->getRegion(0).getOps()) if (globalOp.getGlobalType() == globalType && globalOp.getConstant() && globalOp.getValueAttr() == attr && globalOp.getAlignment().value_or(0) == alignment && @@ -66,7 +67,7 @@ mlir::getOrCreateStringConstant(OpBuilder &b, Location loc, // Not found: create new global. OpBuilder::InsertionGuard guard(b); - b.setInsertionPointToStart(moduleOp.getBody()); + b.setInsertionPointToStart(&moduleOp->getRegion(0).front()); SmallString<16> name = getUniqueSymbolName(moduleOp, namePrefix); return LLVM::GlobalOp::create(b, loc, globalType, /*isConstant=*/true, LLVM::Linkage::Internal, @@ -396,10 +397,11 @@ LogicalResult GPUPrintfOpToHIPLowering::matchAndRewrite( auto ptrType = LLVM::LLVMPointerType::get(rewriter.getContext()); mlir::Type llvmI32 = typeConverter->convertType(rewriter.getI32Type()); mlir::Type llvmI64 = typeConverter->convertType(rewriter.getI64Type()); - // Note: this is the GPUModule op, not the ModuleOp that surrounds it - // This ensures that global constants and declarations are placed within - // the device code, not the host code - auto moduleOp = gpuPrintfOp->getParentOfType(); + + Operation *moduleOp = gpuPrintfOp->getParentWithTrait(); + if (!moduleOp) + return rewriter.notifyMatchFailure(gpuPrintfOp, + "Couldn't find a parent module"); auto ocklBegin = getOrDefineFunction(moduleOp, loc, rewriter, "__ockl_printf_begin", @@ -496,10 +498,10 @@ LogicalResult GPUPrintfOpToLLVMCallLowering::matchAndRewrite( mlir::Type ptrType = LLVM::LLVMPointerType::get(rewriter.getContext(), addressSpace); - // Note: this is the GPUModule op, not the ModuleOp that surrounds it - // This ensures that global constants and declarations are placed within - // the device code, not the host code - auto moduleOp = gpuPrintfOp->getParentOfType(); + Operation *moduleOp = gpuPrintfOp->getParentWithTrait(); + if (!moduleOp) + return rewriter.notifyMatchFailure(gpuPrintfOp, + "Couldn't find a parent module"); auto printfType = LLVM::LLVMFunctionType::get(rewriter.getI32Type(), {ptrType}, @@ -541,10 +543,10 @@ LogicalResult GPUPrintfOpToVPrintfLowering::matchAndRewrite( mlir::Type llvmI8 = typeConverter->convertType(rewriter.getIntegerType(8)); mlir::Type ptrType = LLVM::LLVMPointerType::get(rewriter.getContext()); - // Note: this is the GPUModule op, not the ModuleOp that surrounds it - // This ensures that global constants and declarations are placed within - // the device code, not the host code - auto moduleOp = gpuPrintfOp->getParentOfType(); + Operation *moduleOp = gpuPrintfOp->getParentWithTrait(); + if (!moduleOp) + return rewriter.notifyMatchFailure(gpuPrintfOp, + "Couldn't find a parent module"); // Create a valid global location removing any metadata attached to the // location as debug info metadata inside of a function cannot be used outside @@ -663,7 +665,7 @@ static IntegerAttr wrapNumericMemorySpace(MLIRContext *ctx, unsigned space) { /// Generates a symbol with 0-sized array type for dynamic shared memory usage, /// or uses existing symbol. -LLVM::GlobalOp getDynamicSharedMemorySymbol( +static LLVM::GlobalOp getDynamicSharedMemorySymbol( ConversionPatternRewriter &rewriter, gpu::GPUModuleOp moduleOp, gpu::DynamicSharedMemoryOp op, const LLVMTypeConverter *typeConverter, MemRefType memrefType, unsigned alignmentBit) { diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h index e17b06379988c..66d3bb40a8f5a 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h @@ -18,15 +18,18 @@ namespace mlir { // Helper Functions //===----------------------------------------------------------------------===// +/// Note that these functions don't take a `SymbolTable` because GPU module +/// lowerings can have name collisions as an intermediate state. + /// Find or create an external function declaration in the given module. -LLVM::LLVMFuncOp getOrDefineFunction(gpu::GPUModuleOp moduleOp, Location loc, +LLVM::LLVMFuncOp getOrDefineFunction(Operation *moduleOp, Location loc, OpBuilder &b, StringRef name, LLVM::LLVMFunctionType type); /// Create a global that contains the given string. If a global with the same /// string already exists in the module, return that global. LLVM::GlobalOp getOrCreateStringConstant(OpBuilder &b, Location loc, - gpu::GPUModuleOp moduleOp, Type llvmI8, + Operation *moduleOp, Type llvmI8, StringRef namePrefix, StringRef str, uint64_t alignment = 0, unsigned addrSpace = 0); diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp index a95263bb55f69..852c50c965f11 100644 --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -436,7 +436,7 @@ void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) { LLVM::FAbsOp, LLVM::FCeilOp, LLVM::FFloorOp, LLVM::FRemOp, LLVM::LogOp, LLVM::Log10Op, LLVM::Log2Op, LLVM::PowOp, LLVM::RoundEvenOp, LLVM::RoundOp, LLVM::SinOp, - LLVM::SqrtOp>(); + LLVM::SincosOp, LLVM::SqrtOp>(); // TODO: Remove once we support replacing non-root ops. target.addLegalOp(); @@ -466,6 +466,100 @@ void mlir::configureGpuToNVVMTypeConverter(LLVMTypeConverter &converter) { }); } +struct SincosOpLowering : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(math::SincosOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op.getLoc(); + Value input = adaptor.getOperand(); + Type inputType = input.getType(); + auto convertedInput = maybeExt(input, rewriter); + auto computeType = convertedInput.getType(); + + StringRef sincosFunc; + if (isa(computeType)) { + const arith::FastMathFlags flag = op.getFastmath(); + const bool useApprox = + mlir::arith::bitEnumContainsAny(flag, arith::FastMathFlags::afn); + sincosFunc = useApprox ? "__nv_fast_sincosf" : "__nv_sincosf"; + } else if (isa(computeType)) { + sincosFunc = "__nv_sincos"; + } else { + return rewriter.notifyMatchFailure(op, + "unsupported operand type for sincos"); + } + + auto ptrType = LLVM::LLVMPointerType::get(rewriter.getContext()); + + Value sinPtr, cosPtr; + { + OpBuilder::InsertionGuard guard(rewriter); + auto *scope = + op->getParentWithTrait(); + assert(scope && "Expected op to be inside automatic allocation scope"); + rewriter.setInsertionPointToStart(&scope->getRegion(0).front()); + auto one = rewriter.create( + loc, rewriter.getI32Type(), rewriter.getI32IntegerAttr(1)); + sinPtr = + rewriter.create(loc, ptrType, computeType, one, 0); + cosPtr = + rewriter.create(loc, ptrType, computeType, one, 0); + } + + createSincosCall(rewriter, loc, sincosFunc, convertedInput, sinPtr, cosPtr, + op); + + auto sinResult = rewriter.create(loc, computeType, sinPtr); + auto cosResult = rewriter.create(loc, computeType, cosPtr); + + rewriter.replaceOp(op, {maybeTrunc(sinResult, inputType, rewriter), + maybeTrunc(cosResult, inputType, rewriter)}); + return success(); + } + +private: + Value maybeExt(Value operand, PatternRewriter &rewriter) const { + if (isa(operand.getType())) + return rewriter.create( + operand.getLoc(), Float32Type::get(rewriter.getContext()), operand); + return operand; + } + + Value maybeTrunc(Value operand, Type type, PatternRewriter &rewriter) const { + if (operand.getType() != type) + return rewriter.create(operand.getLoc(), type, operand); + return operand; + } + + void createSincosCall(ConversionPatternRewriter &rewriter, Location loc, + StringRef funcName, Value input, Value sinPtr, + Value cosPtr, Operation *op) const { + auto voidType = LLVM::LLVMVoidType::get(rewriter.getContext()); + auto ptrType = sinPtr.getType(); + + SmallVector operandTypes = {input.getType(), ptrType, ptrType}; + auto funcType = LLVM::LLVMFunctionType::get(voidType, operandTypes); + + auto funcAttr = StringAttr::get(op->getContext(), funcName); + auto funcOp = + SymbolTable::lookupNearestSymbolFrom(op, funcAttr); + + if (!funcOp) { + auto parentFunc = op->getParentOfType(); + assert(parentFunc && "expected there to be a parent function"); + OpBuilder b(parentFunc); + + auto globalloc = loc->findInstanceOfOrUnknown(); + funcOp = LLVM::LLVMFuncOp::create(b, globalloc, funcName, funcType); + } + + SmallVector callOperands = {input, sinPtr, cosPtr}; + rewriter.create(loc, funcOp, callOperands); + } +}; + template static void populateOpPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns, @@ -589,6 +683,9 @@ void mlir::populateLibDeviceConversionPatterns( "__nv_tan", "__nv_fast_tanf"); populateOpPatterns(converter, patterns, benefit, "__nv_tanhf", "__nv_tanh"); + + // Custom pattern for sincos since it returns two values + patterns.add(converter, benefit); } void mlir::populateGpuToNVVMConversionPatterns( diff --git a/mlir/lib/Conversion/LLVMCommon/PrintCallHelper.cpp b/mlir/lib/Conversion/LLVMCommon/PrintCallHelper.cpp index d95aeba8a4488..da4443dc86053 100644 --- a/mlir/lib/Conversion/LLVMCommon/PrintCallHelper.cpp +++ b/mlir/lib/Conversion/LLVMCommon/PrintCallHelper.cpp @@ -67,7 +67,7 @@ LogicalResult mlir::LLVM::createPrintStrCall( auto arrayTy = LLVM::LLVMArrayType::get(IntegerType::get(ctx, 8), elementVals.size()); auto globalOp = LLVM::GlobalOp::create( - builder, loc, arrayTy, /*constant=*/true, LLVM::Linkage::Private, + builder, loc, arrayTy, /*isConstant=*/true, LLVM::Linkage::Private, ensureSymbolNameIsUnique(moduleOp, symbolName, symbolTables), dataAttr); auto ptrTy = LLVM::LLVMPointerType::get(builder.getContext()); diff --git a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp index 853f45498ac52..229e40e2061cb 100644 --- a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp +++ b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp @@ -121,6 +121,38 @@ using CountTrailingZerosOpLowering = LLVM::CountTrailingZerosOp>; using AbsIOpLowering = IntOpWithFlagLowering; +// A `sincos` is converted into `llvm.intr.sincos` followed by extractvalue ops. +struct SincosOpLowering : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(math::SincosOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + const LLVMTypeConverter &typeConverter = *this->getTypeConverter(); + mlir::Location loc = op.getLoc(); + mlir::Type operandType = adaptor.getOperand().getType(); + mlir::Type llvmOperandType = typeConverter.convertType(operandType); + mlir::Type sinType = typeConverter.convertType(op.getSin().getType()); + mlir::Type cosType = typeConverter.convertType(op.getCos().getType()); + if (!llvmOperandType || !sinType || !cosType) + return failure(); + + ConvertFastMath attrs(op); + + auto structType = LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), {llvmOperandType, llvmOperandType}); + + auto sincosOp = rewriter.create( + loc, structType, adaptor.getOperand(), attrs.getAttrs()); + + auto sinValue = LLVM::ExtractValueOp::create(rewriter, loc, sincosOp, 0); + auto cosValue = LLVM::ExtractValueOp::create(rewriter, loc, sincosOp, 1); + + rewriter.replaceOp(op, {sinValue, cosValue}); + return success(); + } +}; + // A `expm1` is converted into `exp - 1`. struct ExpM1OpLowering : public ConvertOpToLLVMPattern { using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; @@ -393,6 +425,7 @@ void mlir::populateMathToLLVMConversionPatterns( RoundEvenOpLowering, RoundOpLowering, RsqrtOpLowering, + SincosOpLowering, SinOpLowering, SinhOpLowering, ASinOpLowering, diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp index 262e0e7a30c63..a6f816aa07377 100644 --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -48,8 +48,8 @@ static bool isStaticStrideOrOffset(int64_t strideOrOffset) { } static FailureOr -getFreeFn(OpBuilder &b, const LLVMTypeConverter *typeConverter, ModuleOp module, - SymbolTableCollection *symbolTables) { +getFreeFn(OpBuilder &b, const LLVMTypeConverter *typeConverter, + Operation *module, SymbolTableCollection *symbolTables) { bool useGenericFn = typeConverter->getOptions().useGenericFunctions; if (useGenericFn) @@ -465,6 +465,51 @@ struct AssumeAlignmentOpLowering } }; +struct DistinctObjectsOpLowering + : public ConvertOpToLLVMPattern { + using ConvertOpToLLVMPattern< + memref::DistinctObjectsOp>::ConvertOpToLLVMPattern; + explicit DistinctObjectsOpLowering(const LLVMTypeConverter &converter) + : ConvertOpToLLVMPattern(converter) {} + + LogicalResult + matchAndRewrite(memref::DistinctObjectsOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + ValueRange operands = adaptor.getOperands(); + if (operands.size() <= 1) { + // Fast path. + rewriter.replaceOp(op, operands); + return success(); + } + + Location loc = op.getLoc(); + SmallVector ptrs; + for (auto [origOperand, newOperand] : + llvm::zip_equal(op.getOperands(), operands)) { + auto memrefType = cast(origOperand.getType()); + MemRefDescriptor memRefDescriptor(newOperand); + Value ptr = memRefDescriptor.bufferPtr(rewriter, loc, *getTypeConverter(), + memrefType); + ptrs.push_back(ptr); + } + + auto cond = + LLVM::ConstantOp::create(rewriter, loc, rewriter.getI1Type(), 1); + // Generate separate_storage assumptions for each pair of pointers. + for (auto i : llvm::seq(ptrs.size() - 1)) { + for (auto j : llvm::seq(i + 1, ptrs.size())) { + Value ptr1 = ptrs[i]; + Value ptr2 = ptrs[j]; + LLVM::AssumeOp::create(rewriter, loc, cond, + LLVM::AssumeSeparateStorageTag{}, ptr1, ptr2); + } + } + + rewriter.replaceOp(op, operands); + return success(); + } +}; + // A `dealloc` is converted into a call to `free` on the underlying data buffer. // The memref descriptor being an SSA value, there is no need to clean it up // in any way. @@ -483,8 +528,8 @@ class DeallocOpLowering : public ConvertOpToLLVMPattern { ConversionPatternRewriter &rewriter) const override { // Insert the `free` declaration if it is not already present. FailureOr freeFunc = - getFreeFn(rewriter, getTypeConverter(), op->getParentOfType(), - symbolTables); + getFreeFn(rewriter, getTypeConverter(), + op->getParentWithTrait(), symbolTables); if (failed(freeFunc)) return failure(); Value allocatedPtr; @@ -1997,22 +2042,23 @@ void mlir::populateFinalizeMemRefToLLVMConversionPatterns( patterns.add< AllocaOpLowering, AllocaScopeOpLowering, - AtomicRMWOpLowering, AssumeAlignmentOpLowering, + AtomicRMWOpLowering, ConvertExtractAlignedPointerAsIndex, DimOpLowering, + DistinctObjectsOpLowering, ExtractStridedMetadataOpLowering, GenericAtomicRMWOpLowering, GetGlobalMemrefOpLowering, LoadOpLowering, MemRefCastOpLowering, - MemorySpaceCastOpLowering, MemRefReinterpretCastOpLowering, MemRefReshapeOpLowering, + MemorySpaceCastOpLowering, PrefetchOpLowering, RankOpLowering, - ReassociatingReshapeOpConversion, ReassociatingReshapeOpConversion, + ReassociatingReshapeOpConversion, StoreOpLowering, SubViewOpLowering, TransposeOpLowering, diff --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp index 49d06497dbeea..a90dcc8cc3ef1 100644 --- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp +++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp @@ -512,7 +512,7 @@ calculateMemoryRequirements(Value accessedPtr, bool isNontemporal, if (!sizeInBytes.has_value()) return failure(); - memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned; + memoryAccess |= spirv::MemoryAccess::Aligned; auto memAccessAttr = spirv::MemoryAccessAttr::get(ctx, memoryAccess); auto alignmentValue = preferredAlignment ? preferredAlignment : *sizeInBytes; auto alignment = IntegerAttr::get(IntegerType::get(ctx, 32), alignmentValue); @@ -699,6 +699,35 @@ LoadOpPattern::matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor, return success(); } +template +static FailureOr> +extractLoadCoordsForComposite(memref::LoadOp loadOp, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) { + // At present we only support linear "tiling" as specified in Vulkan, this + // means that texels are assumed to be laid out in memory in a row-major + // order. This allows us to support any memref layout that is a permutation of + // the dimensions. Future work will pass an optional image layout to the + // rewrite pattern so that we can support optimized target specific tilings. + SmallVector indices = adaptor.getIndices(); + AffineMap map = loadOp.getMemRefType().getLayout().getAffineMap(); + if (!map.isPermutation()) + return rewriter.notifyMatchFailure( + loadOp, + "Cannot lower memrefs with memory layout which is not a permutation"); + + // The memrefs layout determines the dimension ordering so we need to follow + // the map to get the ordering of the dimensions/indices. + const unsigned dimCount = map.getNumDims(); + SmallVector coords(dimCount); + for (unsigned dim = 0; dim < dimCount; ++dim) + coords[map.getDimPosition(dim)] = indices[dim]; + + // We need to reverse the coordinates because the memref layout is slowest to + // fastest moving and the vector coordinates for the image op is fastest to + // slowest moving. + return llvm::to_vector(llvm::reverse(coords)); +} + LogicalResult ImageLoadOpPattern::matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const { @@ -755,13 +784,17 @@ ImageLoadOpPattern::matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor, // Build a vector of coordinates or just a scalar index if we have a 1D image. Value coords; - if (memrefType.getRank() != 1) { + if (memrefType.getRank() == 1) { + coords = adaptor.getIndices()[0]; + } else { + FailureOr> maybeCoords = + extractLoadCoordsForComposite(loadOp, adaptor, rewriter); + if (failed(maybeCoords)) + return failure(); auto coordVectorType = VectorType::get({loadOp.getMemRefType().getRank()}, adaptor.getIndices().getType()[0]); coords = spirv::CompositeConstructOp::create(rewriter, loc, coordVectorType, - adaptor.getIndices()); - } else { - coords = adaptor.getIndices()[0]; + maybeCoords.value()); } // Fetch the value out of the image. diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp index 79cb49a4f7dbc..d6a262275be3d 100644 --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -741,7 +741,7 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op, } // Adjust the load offset. - auto laneId = gpu::LaneIdOp::create(rewriter, loc, /*upperBound=*/nullptr); + auto laneId = gpu::LaneIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); FailureOr offsets = nvgpu::getLaneIdToLdMatrixMatrixCoord(rewriter, loc, *params); if (failed(offsets)) { @@ -781,7 +781,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op, "conversion to distributed non-ldmatrix compatible load"); } - Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upperBound=*/nullptr); + Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); // This is the individual element type. Type loadedElType = regInfo->registerLLVMType; @@ -915,7 +915,7 @@ convertTransferWriteToStores(RewriterBase &rewriter, vector::TransferWriteOp op, return rewriter.notifyMatchFailure(op, "not mma sync reg info"); VectorType vectorType = getMmaSyncVectorOperandType(*regInfo); - Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upperBound=*/nullptr); + Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); for (unsigned i = 0; i < vectorType.getShape()[0]; i++) { Value logicalValueId = arith::ConstantOp::create( diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index e7266740894b1..e0b1a88d01cdc 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -1987,17 +1987,13 @@ struct VectorScalableStepOpLowering /// %e = add %c, %d /// ``` /// `vector.matrix_multiply` later lowers to `llvm.matrix.multiply`. -// -/// This only kicks in when vectorContractLowering is set to Matmul and -/// the vector.contract op is a row-major matrix multiply. class ContractionOpToMatmulOpLowering : public vector::MaskableOpRewritePattern { public: using MaskableOpRewritePattern::MaskableOpRewritePattern; - ContractionOpToMatmulOpLowering( - vector::VectorContractLowering vectorContractLowering, - MLIRContext *context, PatternBenefit benefit = 100) + ContractionOpToMatmulOpLowering(MLIRContext *context, + PatternBenefit benefit = 100) : MaskableOpRewritePattern(context, benefit) {} FailureOr @@ -2005,23 +2001,22 @@ class ContractionOpToMatmulOpLowering PatternRewriter &rewriter) const override; }; -/// Progressively lower a `vector.contract %a, %b, %c` with row-major matmul -/// semantics to: +/// Lower a qualifying `vector.contract %a, %b, %c` (with row-major matmul +/// semantics directly into `llvm.intr.matrix.multiply`: +/// BEFORE: +/// ```mlir +/// %res = vector.contract #matmat_trait %lhs, %rhs, %acc +/// : vector<2x4xf32>, vector<4x3xf32> into vector<2x3xf32> /// ``` -/// %mta = maybe_transpose -/// %mtb = maybe_transpose -/// %flattened_a = vector.shape_cast %mta -/// %flattened_b = vector.shape_cast %mtb -/// %flattened_d = llvm.intr.matrix.multiply %flattened_a, %flattened_b -/// %mtd = vector.shape_cast %flattened_d -/// %d = maybe_untranspose %mtd -/// %e = add %c, %d +/// +/// AFTER: +/// ```mlir +/// %lhs = vector.shape_cast %arg0 : vector<2x4xf32> to vector<8xf32> +/// %rhs = vector.shape_cast %arg1 : vector<4x3xf32> to vector<12xf32> +/// %matmul = llvm.intr.matrix.multiply %lhs, %rhs +/// %res = arith.addf %acc, %matmul : vector<2x3xf32> /// ``` // -/// This only kicks in when vectorContractLowering is set to `Matmul`. -/// vector.transpose operations are inserted if the vector.contract op is not a -/// row-major matrix multiply. -/// /// Scalable vectors are not supported. FailureOr ContractionOpToMatmulOpLowering::matchAndRewriteMaskableOp( vector::ContractionOp op, MaskingOpInterface maskOp, @@ -2116,7 +2111,19 @@ FailureOr ContractionOpToMatmulOpLowering::matchAndRewriteMaskableOp( return res; } -/// Lowers vector.transpose to llvm.intr.matrix.transpose +/// Lowers vector.transpose directly to llvm.intr.matrix.transpose +/// +/// BEFORE: +/// ```mlir +/// %tr = vector.transpose %vec, [1, 0] : vector<2x4xf32> to vector<4x2xf32> +/// ``` +/// AFTER: +/// ```mlir +/// %vec_cs = vector.shape_cast %vec : vector<2x4xf32> to vector<8xf32> +/// %tr = llvm.intr.matrix.transpose %vec_sc +/// {columns = 2 : i32, rows = 4 : i32} : vector<8xf32> into vector<8xf32> +/// %res = vector.shape_cast %tr : vector<8xf32> to vector<4x2xf32> +/// ``` class TransposeOpToMatrixTransposeOpLowering : public OpRewritePattern { public: diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp index cae490e5f03e7..f958edf2746e9 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp @@ -70,7 +70,7 @@ void ConvertVectorToLLVMPass::runOnOperation() { populateVectorBitCastLoweringPatterns(patterns); populateVectorBroadcastLoweringPatterns(patterns); populateVectorContractLoweringPatterns(patterns, vectorContractLowering); - if (vectorContractLowering == vector::VectorContractLowering::Matmul) { + if (vectorContractLowering == vector::VectorContractLowering::LLVMIntr) { // This pattern creates a dependency on the LLVM dialect, hence we don't // include it in `populateVectorContractLoweringPatterns` that is part of // the Vector dialect (and should not depend on LLVM). @@ -80,7 +80,7 @@ void ConvertVectorToLLVMPass::runOnOperation() { populateVectorShapeCastLoweringPatterns(patterns); populateVectorInterleaveLoweringPatterns(patterns); populateVectorTransposeLoweringPatterns(patterns, vectorTransposeLowering); - if (vectorTransposeLowering == vector::VectorTransposeLowering::Flat) { + if (vectorTransposeLowering == vector::VectorTransposeLowering::LLVMIntr) { // This pattern creates a dependency on the LLVM dialect, hence we don't // include it in `populateVectorTransposeLoweringPatterns` that is part of // the Vector dialect (and should not depend on LLVM). diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp index 04f56b9691fd1..5061a4454a7fd 100644 --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -753,7 +753,7 @@ struct VectorLoadOpConverter final spirv::MemoryAccessAttr memoryAccessAttr; IntegerAttr alignmentAttr; if (alignment.has_value()) { - memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned; + memoryAccess |= spirv::MemoryAccess::Aligned; memoryAccessAttr = spirv::MemoryAccessAttr::get(rewriter.getContext(), memoryAccess); alignmentAttr = rewriter.getI32IntegerAttr(alignment.value()); @@ -822,7 +822,7 @@ struct VectorStoreOpConverter final spirv::MemoryAccessAttr memoryAccessAttr; IntegerAttr alignmentAttr; if (alignment.has_value()) { - memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned; + memoryAccess |= spirv::MemoryAccess::Aligned; memoryAccessAttr = spirv::MemoryAccessAttr::get(rewriter.getContext(), memoryAccess); alignmentAttr = rewriter.getI32IntegerAttr(alignment.value()); diff --git a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp index f3e065a12ded0..9821a75a55f49 100644 --- a/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp +++ b/mlir/lib/Dialect/Affine/Transforms/SimplifyAffineMinMax.cpp @@ -246,6 +246,6 @@ void SimplifyAffineMinMaxPass::runOnOperation() { patterns.add( func.getContext()); FrozenRewritePatternSet frozenPatterns(std::move(patterns)); - if (failed(applyPatternsGreedily(func, std::move(frozenPatterns)))) + if (failed(applyPatternsGreedily(func, frozenPatterns))) return signalPassFailure(); } diff --git a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp index 7cfd6d3a98df8..898d76ce8d9b5 100644 --- a/mlir/lib/Dialect/Arith/IR/ArithOps.cpp +++ b/mlir/lib/Dialect/Arith/IR/ArithOps.cpp @@ -1282,6 +1282,13 @@ OpFoldResult arith::MulFOp::fold(FoldAdaptor adaptor) { if (matchPattern(adaptor.getRhs(), m_OneFloat())) return getLhs(); + if (arith::bitEnumContainsAll(getFastmath(), arith::FastMathFlags::nnan | + arith::FastMathFlags::nsz)) { + // mulf(x, 0) -> 0 + if (matchPattern(adaptor.getRhs(), m_AnyZeroFloat())) + return getRhs(); + } + return constFoldBinaryOp( adaptor.getOperands(), [](const APFloat &a, const APFloat &b) { return a * b; }); diff --git a/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp b/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp index 7626d356a37f2..c64e10f534f8e 100644 --- a/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp +++ b/mlir/lib/Dialect/Arith/Transforms/EmulateUnsupportedFloats.cpp @@ -123,7 +123,8 @@ void mlir::arith::populateEmulateUnsupportedFloatsLegality( vector::OuterProductOp, vector::ScanOp>( [&](Operation *op) { return converter.isLegal(op); }); target.addLegalOp(); + arith::ConstantOp, arith::SelectOp, vector::SplatOp, + vector::BroadcastOp>(); } void EmulateUnsupportedFloatsPass::runOnOperation() { diff --git a/mlir/lib/Dialect/Arith/Transforms/ReifyValueBounds.cpp b/mlir/lib/Dialect/Arith/Transforms/ReifyValueBounds.cpp index 4bdd1e6a54d69..127563c8f4967 100644 --- a/mlir/lib/Dialect/Arith/Transforms/ReifyValueBounds.cpp +++ b/mlir/lib/Dialect/Arith/Transforms/ReifyValueBounds.cpp @@ -6,6 +6,8 @@ // //===----------------------------------------------------------------------===// +#include + #include "mlir/Dialect/Arith/Transforms/Transforms.h" #include "mlir/Dialect/Arith/IR/Arith.h" @@ -69,7 +71,8 @@ FailureOr mlir::arith::reifyValueBound( AffineMap boundMap; ValueDimList mapOperands; if (failed(ValueBoundsConstraintSet::computeBound( - boundMap, mapOperands, type, var, stopCondition, closedUB))) + boundMap, mapOperands, type, var, std::move(stopCondition), + closedUB))) return failure(); // Materialize tensor.dim/memref.dim ops. @@ -116,7 +119,7 @@ FailureOr mlir::arith::reifyValueBound( FailureOr mlir::arith::reifyShapedValueDimBound( OpBuilder &b, Location loc, presburger::BoundType type, Value value, - int64_t dim, ValueBoundsConstraintSet::StopConditionFn stopCondition, + int64_t dim, const ValueBoundsConstraintSet::StopConditionFn &stopCondition, bool closedUB) { auto reifyToOperands = [&](Value v, std::optional d, ValueBoundsConstraintSet &cstr) { @@ -134,7 +137,8 @@ FailureOr mlir::arith::reifyShapedValueDimBound( FailureOr mlir::arith::reifyIndexValueBound( OpBuilder &b, Location loc, presburger::BoundType type, Value value, - ValueBoundsConstraintSet::StopConditionFn stopCondition, bool closedUB) { + const ValueBoundsConstraintSet::StopConditionFn &stopCondition, + bool closedUB) { auto reifyToOperands = [&](Value v, std::optional d, ValueBoundsConstraintSet &cstr) { return v != value; diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp index f7b0b87085f3d..e0cf353da207f 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -338,11 +338,21 @@ bool OpFilter::isOpAllowed(Operation *op) const { namespace { /// Default function arg type converter: Use a fully dynamic layout map. -BaseMemRefType -defaultFunctionArgTypeConverter(TensorType type, Attribute memorySpace, +BufferLikeType +defaultFunctionArgTypeConverter(TensorLikeType type, Attribute memorySpace, func::FuncOp funcOp, const BufferizationOptions &options) { - return getMemRefTypeWithFullyDynamicLayout(type, memorySpace); + if (auto tensorType = mlir::dyn_cast(type)) { + return cast( + getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace)); + } + + // If not builtin, fallback to TensorLikeType::getBufferType() + auto bufferType = + type.getBufferType(options, [&]() { return funcOp->emitError(); }); + assert(succeeded(bufferType) && + "a valid buffer is always expected at function boundary"); + return *bufferType; } /// Default unknown type converter: Use a fully dynamic layout map. BaseMemRefType @@ -385,14 +395,25 @@ BufferizationOptions::dynCastBufferizableOp(Value value) const { void BufferizationOptions::setFunctionBoundaryTypeConversion( LayoutMapOption layoutMapOption) { - functionArgTypeConverterFn = [=](TensorType tensorType, Attribute memorySpace, + functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace, func::FuncOp funcOp, const BufferizationOptions &options) { - if (layoutMapOption == LayoutMapOption::IdentityLayoutMap) - return bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType, - memorySpace); - return bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType, - memorySpace); + if (auto tensorType = mlir::dyn_cast(type)) { + if (layoutMapOption == LayoutMapOption::IdentityLayoutMap) + return cast( + bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType, + memorySpace)); + return cast( + bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType, + memorySpace)); + } + + // If not builtin, fallback to TensorLikeType::getBufferType() + auto bufferType = + type.getBufferType(options, [&]() { return funcOp->emitError(); }); + assert(succeeded(bufferType) && + "a valid buffer is always expected at function boundary"); + return *bufferType; }; inferFunctionResultLayout = layoutMapOption == LayoutMapOption::InferLayoutMap; diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp index 68ef51992efee..701ab52a491a8 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -401,7 +401,7 @@ bufferization::bufferizeBlockSignature(Block *block, RewriterBase &rewriter, // Compute the new signature. SmallVector newTypes; for (BlockArgument &bbArg : block->getArguments()) { - auto tensorType = dyn_cast(bbArg.getType()); + auto tensorType = dyn_cast(bbArg.getType()); if (!tensorType) { newTypes.push_back(bbArg.getType()); continue; diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp index f69efd1b3fa8c..d9d69342e42a8 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -49,29 +49,47 @@ void FuncAnalysisState::startFunctionAnalysis(FuncOp funcOp) { #endif // NDEBUG } +// Note: this is a local adaptor to unify TensorType and TensorLikeType code +// paths that both work with BufferizationOptions. +static mlir::Attribute +getDefaultMemorySpace(const BufferizationOptions &options, + TensorLikeType type) { + if (auto tensorType = dyn_cast(type)) { + return *options.defaultMemorySpaceFn(tensorType); + } + return nullptr; +} + /// Return the index-th bufferized function argument type. This assumes that the /// specified argument is a tensor. If the tensor is ranked, a layout map may be /// specified by the user (as per `options.functionArgTypeConverterFn`). -static BaseMemRefType +static BufferLikeType getBufferizedFunctionArgType(FuncOp funcOp, int64_t index, const BufferizationOptions &options) { - auto tensorType = - dyn_cast(funcOp.getFunctionType().getInput(index)); - assert(tensorType && "expected TensorType"); - - BaseMemRefType memrefType = options.functionArgTypeConverterFn( - tensorType, *options.defaultMemorySpaceFn(tensorType), funcOp, options); - - auto layoutAttr = funcOp.getArgAttrOfType( - index, BufferizationDialect::kBufferLayoutAttrName); - if (!layoutAttr) - return memrefType; - - auto rankedMemrefType = dyn_cast(memrefType); - assert(rankedMemrefType && "buffer layout not supported on unranked tensors"); - return MemRefType::get(rankedMemrefType.getShape(), - rankedMemrefType.getElementType(), layoutAttr, - rankedMemrefType.getMemorySpace()); + auto type = + dyn_cast(funcOp.getFunctionType().getInput(index)); + assert(type && "expected TensorLikeType"); + + // Note: For builtin tensors there is additional logic related to layout. + if (auto tensorType = dyn_cast(type)) { + BufferLikeType memrefType = options.functionArgTypeConverterFn( + type, *options.defaultMemorySpaceFn(tensorType), funcOp, options); + + auto layoutAttr = funcOp.getArgAttrOfType( + index, BufferizationDialect::kBufferLayoutAttrName); + if (!layoutAttr) + return memrefType; + + auto rankedMemrefType = dyn_cast(memrefType); + assert(rankedMemrefType && + "buffer layout not supported on unranked tensors"); + return cast(MemRefType::get( + rankedMemrefType.getShape(), rankedMemrefType.getElementType(), + layoutAttr, rankedMemrefType.getMemorySpace())); + } + + return options.functionArgTypeConverterFn(type, /*memSpace=*/nullptr, funcOp, + options); } /// Return the FuncOp called by `callOp`. @@ -227,13 +245,13 @@ struct CallOpInterface FunctionType funcType = funcOp.getFunctionType(); Type resultType = funcType.getResult(cast(value).getResultNumber()); - if (auto bufferizedType = dyn_cast(resultType)) - return cast(bufferizedType); + if (auto bufferizedType = dyn_cast(resultType)) + return bufferizedType; // Otherwise, call the type converter to compute the bufferized type. - auto tensorType = cast(resultType); + auto tensorType = cast(resultType); return cast(options.functionArgTypeConverterFn( - tensorType, *options.defaultMemorySpaceFn(tensorType), funcOp, + tensorType, getDefaultMemorySpace(options, tensorType), funcOp, options)); } @@ -248,7 +266,7 @@ struct CallOpInterface SmallVector resultTypes; for (Value result : callOp.getResults()) { Type returnType = result.getType(); - if (!isa(returnType)) { + if (!isa(returnType)) { // Non-tensor values are returned. resultTypes.push_back(returnType); continue; @@ -272,7 +290,7 @@ struct CallOpInterface for (OpOperand &opOperand : callOp->getOpOperands()) { // Non-tensor operands are just copied. - if (!isa(opOperand.get().getType())) { + if (!isa(opOperand.get().getType())) { newOperands.push_back(opOperand.get()); continue; } @@ -285,8 +303,8 @@ struct CallOpInterface Value buffer = *maybeBuffer; // Caller / callee type mismatch is handled with castOrReallocMemRefValue. - auto memRefType = funcType.getInput(opOperand.getOperandNumber()); - if (!isa(memRefType)) { + auto bufferType = funcType.getInput(opOperand.getOperandNumber()); + if (!isa(bufferType)) { // The called function was not bufferized yet. This can happen when // there cycles in the function call graph. Compute the bufferized // result type. @@ -296,7 +314,7 @@ struct CallOpInterface state); if (failed(maybeBufferType)) return failure(); - memRefType = *maybeBufferType; + bufferType = *maybeBufferType; } // Since we don't yet have a clear layout story, to_buffer may @@ -305,8 +323,8 @@ struct CallOpInterface // that will either canonicalize away or fail compilation until we can do // something better. Insert a reallocation + copy if it cannot be // statically guaranteed that a direct cast would be valid. - if (buffer.getType() != memRefType) { - auto memrefDstType = dyn_cast(memRefType); + if (buffer.getType() != bufferType) { + auto memrefDstType = dyn_cast(bufferType); assert(memrefDstType && "buffer layout not supported on unranked tensors"); FailureOr replacement = bufferization::castOrReallocMemRefValue( @@ -370,7 +388,7 @@ struct FuncOpInterface static bool supportsUnstructuredControlFlow() { return true; } bool hasTensorSemantics(Operation *op) const { - auto isaTensor = llvm::IsaPred; + auto isaTensor = llvm::IsaPred; // A function has tensor semantics if it has tensor arguments/results. auto funcOp = cast(op); @@ -406,8 +424,8 @@ struct FuncOpInterface // Function arguments are special. if (bbArg.getOwner() == &funcOp.getBody().front()) - return cast( - getBufferizedFunctionArgType(funcOp, bbArg.getArgNumber(), options)); + return getBufferizedFunctionArgType(funcOp, bbArg.getArgNumber(), + options); return OpWithUnstructuredControlFlowBufferizableOpInterfaceExternalModel:: getBufferType(op, value, options, state, invocationStack); @@ -430,7 +448,7 @@ struct FuncOpInterface SmallVector argTypes; for (const auto &it : llvm::enumerate(funcType.getInputs())) { Type argType = it.value(); - if (isa(argType)) { + if (isa(argType)) { argTypes.push_back( getBufferizedFunctionArgType(funcOp, it.index(), options)); continue; @@ -441,9 +459,9 @@ struct FuncOpInterface // Compute the result types. SmallVector retTypes; for (Type resultType : funcType.getResults()) { - if (auto tensorType = dyn_cast(resultType)) { - BaseMemRefType resultType = options.functionArgTypeConverterFn( - tensorType, *options.defaultMemorySpaceFn(tensorType), funcOp, + if (auto tensorType = dyn_cast(resultType)) { + BufferLikeType resultType = options.functionArgTypeConverterFn( + tensorType, getDefaultMemorySpace(options, tensorType), funcOp, options); retTypes.push_back(resultType); continue; @@ -473,7 +491,7 @@ struct FuncOpInterface SmallVector returnValues; for (auto [returnVal, bufferizedType] : llvm::zip_equal(returnOp->getOperands(), retTypes)) { - auto tensorType = dyn_cast(returnVal.getType()); + auto tensorType = dyn_cast(returnVal.getType()); rewriter.setInsertionPoint(returnOp); // If not a tensor type just forward it. diff --git a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp index 582593adfa5c0..f1da1a125e9ef 100644 --- a/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp +++ b/mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp @@ -122,6 +122,16 @@ static LogicalResult collapseBranch(Block *&successor, Block *successorDest = successorBranch.getDest(); if (successorDest == successor) return failure(); + // Don't try to collapse branches which participate in a cycle. + BranchOp nextBranch = dyn_cast(successorDest->getTerminator()); + llvm::DenseSet visited{successor, successorDest}; + while (nextBranch) { + Block *nextBranchDest = nextBranch.getDest(); + if (visited.contains(nextBranchDest)) + return failure(); + visited.insert(nextBranchDest); + nextBranch = dyn_cast(nextBranchDest->getTerminator()); + } // Update the operands to the successor. If the branch parent has no // arguments, we can use the branch operands directly. diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp index 43b02f16aa829..c0f9132de3db4 100644 --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -535,17 +535,26 @@ parseAttributions(OpAsmParser &parser, StringRef keyword, /*allowType=*/true); } -/// Prints a GPU function memory attribution. static void printAttributions(OpAsmPrinter &p, StringRef keyword, - ArrayRef values) { + ArrayRef values, + ArrayAttr attributes = {}) { if (values.empty()) return; - auto printBlockArg = [](BlockArgument v) { - return llvm::formatv("{} : {}", v, v.getType()); - }; - p << ' ' << keyword << '(' - << llvm::interleaved(llvm::map_range(values, printBlockArg)) << ')'; + p << ' ' << keyword << '('; + llvm::interleaveComma( + llvm::enumerate(values), p, [&p, attributes](auto pair) { + BlockArgument v = pair.value(); + p << v << " : " << v.getType(); + + size_t attributionIndex = pair.index(); + DictionaryAttr attrs; + if (attributes && attributionIndex < attributes.size()) + attrs = llvm::cast(attributes[attributionIndex]); + if (attrs) + p.printOptionalAttrDict(attrs.getValue()); + }); + p << ')'; } /// Verifies a GPU function memory attribution. @@ -1649,28 +1658,6 @@ ParseResult GPUFuncOp::parse(OpAsmParser &parser, OperationState &result) { return parser.parseRegion(*body, entryArgs); } -static void printAttributions(OpAsmPrinter &p, StringRef keyword, - ArrayRef values, - ArrayAttr attributes) { - if (values.empty()) - return; - - p << ' ' << keyword << '('; - llvm::interleaveComma( - llvm::enumerate(values), p, [&p, attributes](auto pair) { - BlockArgument v = pair.value(); - p << v << " : " << v.getType(); - - size_t attributionIndex = pair.index(); - DictionaryAttr attrs; - if (attributes && attributionIndex < attributes.size()) - attrs = llvm::cast(attributes[attributionIndex]); - if (attrs) - p.printOptionalAttrDict(attrs.getValue()); - }); - p << ')'; -} - void GPUFuncOp::print(OpAsmPrinter &p) { p << ' '; p.printSymbolName(getName()); diff --git a/mlir/lib/Dialect/GPU/Transforms/ParallelLoopMapper.cpp b/mlir/lib/Dialect/GPU/Transforms/ParallelLoopMapper.cpp index a098e721303a8..594c7a265667e 100644 --- a/mlir/lib/Dialect/GPU/Transforms/ParallelLoopMapper.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/ParallelLoopMapper.cpp @@ -52,6 +52,7 @@ gpu::setMappingAttr(ParallelOp ploopOp, namespace gpu { namespace { enum MappingLevel { MapGrid = 0, MapBlock = 1, Sequential = 2 }; +enum class MappingPolicy { OutermostFirst, InnermostFirst }; } // namespace static constexpr int kNumHardwareIds = 3; @@ -65,16 +66,30 @@ static MappingLevel &operator++(MappingLevel &mappingLevel) { return mappingLevel; } +// Map the policy string to a typed mapping policy. +// TODO: Revisit this and possibly use a loop interchange pass instead. +static FailureOr getMappingPolicyFromStr(StringRef policy) { + std::string policyCanonical = policy.trim().lower(); + + std::optional option = + llvm::StringSwitch>(policyCanonical) + .Case("innermost-first", MappingPolicy::InnermostFirst) + .Case("outermost-first", MappingPolicy::OutermostFirst) + .Default(std::nullopt); + + if (!option) + return failure(); + return *option; +} + /// Computed the hardware id to use for a given mapping level. Will /// assign x,y and z hardware ids for the first 3 dimensions and use /// sequential after. -/// TODO: Make this use x for the inner-most loop that is -/// distributed to map to x, the next innermost to y and the next innermost to -/// z. static Processor getHardwareIdForMapping(MappingLevel level, int dimension) { if (dimension >= kNumHardwareIds || level == Sequential) return Processor::Sequential; + switch (level) { case MapGrid: switch (dimension) { @@ -107,20 +122,35 @@ static Processor getHardwareIdForMapping(MappingLevel level, int dimension) { /// Add mapping information to the given parallel loop. Do not add /// mapping information if the loop already has it. Also, don't /// start a mapping at a nested loop. -static void mapParallelOp(ParallelOp parallelOp, - MappingLevel mappingLevel = MapGrid) { +static void +mapParallelOp(ParallelOp parallelOp, MappingLevel mappingLevel = MapGrid, + MappingPolicy mappingPolicy = MappingPolicy::OutermostFirst) { // Do not try to add a mapping to already mapped loops or nested loops. if (parallelOp->getAttr(getMappingAttrName()) || ((mappingLevel == MapGrid) && parallelOp->getParentOfType())) return; + const int numLoops = static_cast(parallelOp.getNumLoops()); + const int loopsToMap = std::min(numLoops, kNumHardwareIds); + MLIRContext *ctx = parallelOp.getContext(); Builder b(ctx); SmallVector attrs; - attrs.reserve(parallelOp.getNumLoops()); - for (int i = 0, e = parallelOp.getNumLoops(); i < e; ++i) { + attrs.reserve(numLoops); + + for (int i = 0; i < numLoops; ++i) { + + // Determine the mapping to use for this loop. + // If the are more loops to map than HW IDs map to sequential. + int hwMapping = kNumHardwareIds; + if (i < loopsToMap) { + hwMapping = (mappingPolicy == MappingPolicy::OutermostFirst) + ? i + : (loopsToMap - 1 - i); + } + attrs.push_back(b.getAttr( - getHardwareIdForMapping(mappingLevel, i), b.getDimIdentityMap(), + getHardwareIdForMapping(mappingLevel, hwMapping), b.getDimIdentityMap(), b.getDimIdentityMap())); } (void)setMappingAttr(parallelOp, attrs); @@ -129,16 +159,31 @@ static void mapParallelOp(ParallelOp parallelOp, // walk but just iterate over the operations. for (Operation &op : *parallelOp.getBody()) { if (ParallelOp nested = dyn_cast(op)) - mapParallelOp(nested, mappingLevel); + mapParallelOp(nested, mappingLevel, mappingPolicy); } } namespace { struct GpuMapParallelLoopsPass : public impl::GpuMapParallelLoopsPassBase { + using Base::Base; + void runOnOperation() override { + // Parse the mapping policy. + FailureOr policyOrFailure = + getMappingPolicyFromStr(mappingPolicyStr); + if (failed(policyOrFailure)) { + getOperation()->emitError() << "Invalid mapping policy specified."; + return signalPassFailure(); + } + + MappingPolicy policy = *policyOrFailure; + MappingLevel topLevel = MappingLevel::MapGrid; + for (Region ®ion : getOperation()->getRegions()) { - region.walk([](ParallelOp parallelOp) { mapParallelOp(parallelOp); }); + region.walk([&](ParallelOp parallelOp) { + mapParallelOp(parallelOp, topLevel, policy); + }); } } }; diff --git a/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp index b45fdf34e78e1..81c3069cec16e 100644 --- a/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp @@ -430,7 +430,7 @@ createSubgroupDPPReduction(PatternRewriter &rewriter, gpu::SubgroupReduceOp op, dpp = ROCDL::PermlaneX16Op::create(rewriter, loc, res.getType(), res, res, uint32Max, uint32Max, /*fi=*/true, - /*bound_ctrl=*/false); + /*boundControl=*/false); res = vector::makeArithReduction( rewriter, loc, gpu::convertReductionKind(mode), res, dpp); } else { diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index a3d5d25b96ec2..5d08cccb4faab 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -4085,6 +4085,25 @@ printIndirectBrOpSucessors(OpAsmPrinter &p, IndirectBrOp op, Type flagType, p << "]"; } +//===----------------------------------------------------------------------===// +// SincosOp (intrinsic) +//===----------------------------------------------------------------------===// + +LogicalResult LLVM::SincosOp::verify() { + auto operandType = getOperand().getType(); + auto resultType = getResult().getType(); + auto resultStructType = + mlir::dyn_cast(resultType); + if (!resultStructType || resultStructType.getBody().size() != 2 || + resultStructType.getBody()[0] != operandType || + resultStructType.getBody()[1] != operandType) { + return emitOpError("expected result type to be an homogeneous struct with " + "two elements matching the operand type, but got ") + << resultType; + } + return success(); +} + //===----------------------------------------------------------------------===// // AssumeOp (intrinsic) //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp b/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp index 5ceae9b16af20..67573c4ee6061 100644 --- a/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp +++ b/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp @@ -77,7 +77,7 @@ static void addScopeToFunction(LLVM::LLVMFuncOp llvmFunc, auto subprogramFlags = LLVM::DISubprogramFlags::Optimized; if (!llvmFunc.isExternal()) { id = DistinctAttr::create(UnitAttr::get(context)); - subprogramFlags = subprogramFlags | LLVM::DISubprogramFlags::Definition; + subprogramFlags |= LLVM::DISubprogramFlags::Definition; } else { compileUnitAttr = {}; } diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index 578931e1351c6..59013a23b3e3b 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -5310,6 +5310,32 @@ bool PackOp::requirePaddingValue(ArrayRef inputShape, return false; } +bool PackOp::requirePaddingValueStrict(ArrayRef inputShape, + ArrayRef innerDimsPos, + ArrayRef outputShape, + ArrayRef outerDimsPerm, + ArrayRef innerTiles) { + SmallVector outputTileSizes( + outputShape.take_front(inputShape.size())); + if (!outerDimsPerm.empty()) { + assert(outerDimsPerm.size() == outputTileSizes.size() && + "expected output and outer_dims_perm to have same size"); + applyPermutationToVector(outputTileSizes, + invertPermutationVector(outerDimsPerm)); + } + for (auto [pos, tileSize] : llvm::zip_equal(innerDimsPos, innerTiles)) { + if (ShapedType::isDynamic(inputShape[pos]) || + ShapedType::isDynamic(outputTileSizes[pos])) + return true; + std::optional constantTile = getConstantIntValue(tileSize); + if (!constantTile) + return true; + if (inputShape[pos] % (*constantTile) != 0) + return true; + } + return false; +} + LogicalResult PackOp::verify() { if (failed(commonVerifierPackAndUnPackOp(*this))) return failure(); @@ -5583,14 +5609,13 @@ static bool inferStaticShape(PackOp packOp, SmallVectorImpl &srcShape, LogicalResult PackOp::canonicalize(PackOp packOp, PatternRewriter &rewriter) { // Fold an pack(unpack(x)) to x. if (auto unPackOp = packOp.getSource().getDefiningOp()) { - if (unPackOp.getSourceType() != packOp.getDestType()) - return failure(); - if (packOp.getPaddingValue() || - !hasSameInnerOuterAttribute(packOp, unPackOp) || - !haveSameTiles(packOp, unPackOp)) - return failure(); - rewriter.replaceOp(packOp, unPackOp.getSource()); - return success(); + if (unPackOp.getSourceType() == packOp.getDestType() && + !packOp.getPaddingValue() && + hasSameInnerOuterAttribute(packOp, unPackOp) && + haveSameTiles(packOp, unPackOp)) { + rewriter.replaceOp(packOp, unPackOp.getSource()); + return success(); + } } // Fold optional PaddingValue operand away if padding is not needed. diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp index 6ee2d8653d2dc..dd9b4c2490ef4 100644 --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -42,6 +42,7 @@ #include "mlir/Transforms/GreedyPatternRewriteDriver.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/ScopeExit.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/TypeSwitch.h" #include "llvm/Support/DebugLog.h" #include "llvm/Support/LogicalResult.h" @@ -273,32 +274,6 @@ void transform::ApplyFoldPackUnpackIntoEmptyPatternsOp::populatePatterns( // BufferizeToAllocationOp //===----------------------------------------------------------------------===// -void transform::BufferizeToAllocationOp::build(OpBuilder &b, - OperationState &result, - Value target, - Attribute memorySpace) { - SmallVector resultTypes; - resultTypes.push_back(b.getType()); - resultTypes.push_back(b.getType()); - return build(b, result, - /*resultTypes=*/resultTypes, - /*target=*/target, - /*memory_space=*/memorySpace); -} - -void transform::BufferizeToAllocationOp::build(OpBuilder &b, - OperationState &result, - Value target, - int64_t memorySpace) { - SmallVector resultTypes; - resultTypes.push_back(b.getType()); - resultTypes.push_back(b.getType()); - return build(b, result, - /*resultTypes=*/resultTypes, - /*target=*/target, - /*memory_space=*/b.getI64IntegerAttr(memorySpace)); -} - namespace { class NewOpsListener : public RewriterBase::ForwardingListener { public: @@ -408,6 +383,95 @@ LogicalResult transform::BufferizeToAllocationOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// PromoteTensorOp +//===----------------------------------------------------------------------===// + +/// Return true if the operand may be read from by its owner. This is currently +/// very conservative and only looks inside linalg operations to prevent +/// unintentional data loss. +static bool mayBeRead(OpOperand &operand) { + auto linalgOp = dyn_cast(operand.getOwner()); + + // Be conservative about ops we cannot analyze deeper. + if (!linalgOp) + return true; + + // Look inside linalg ops. + Value blockArgument = linalgOp.getMatchingBlockArgument(&operand); + return !blockArgument.use_empty(); +} + +/// Return true if the value may be read through any of its uses. +static bool mayBeRead(Value value) { + // If the value has a reference semantics, it + // may be read through any alias... + if (!isa(value.getType())) + return true; + return llvm::any_of(value.getUses(), + static_cast(mayBeRead)); +} + +DiagnosedSilenceableFailure +transform::PromoteTensorOp::apply(transform::TransformRewriter &rewriter, + transform::TransformResults &results, + transform::TransformState &state) { + SmallVector promoted; + for (Value tensor : state.getPayloadValues(getTensor())) { + auto type = dyn_cast(tensor.getType()); + if (!type) { + return emitSilenceableError() << "non-tensor type: " << tensor; + } + + Operation *definingOp = tensor.getDefiningOp(); + if (definingOp) + rewriter.setInsertionPointAfter(definingOp); + else + rewriter.setInsertionPointToStart(cast(tensor).getOwner()); + + // Check this before we emit operations using this value. + bool needsMaterialization = mayBeRead(tensor); + + SmallVector dynamicDims; + llvm::SmallPtrSet preservedOps; + for (auto [pos, dim] : llvm::enumerate(type.getShape())) { + if (!ShapedType::isDynamic(dim)) + continue; + Value cst = rewriter.create(tensor.getLoc(), pos); + auto dimOp = rewriter.create(tensor.getLoc(), tensor, cst); + preservedOps.insert(dimOp); + dynamicDims.push_back(dimOp); + } + auto allocation = rewriter.create( + tensor.getLoc(), type, dynamicDims); + // Set memory space if provided. + if (getMemorySpaceAttr()) + allocation.setMemorySpaceAttr(getMemorySpaceAttr()); + Value allocated = allocation; + + // Only insert a materialization (typically bufferizes to a copy) when the + // value may be read from. + if (needsMaterialization) { + auto copy = rewriter.create( + tensor.getLoc(), tensor, allocated); + preservedOps.insert(copy); + promoted.push_back(copy.getResult()); + } else { + promoted.push_back(allocated); + } + rewriter.replaceAllUsesExcept(tensor, promoted.back(), preservedOps); + } + results.setValues(cast(getPromoted()), promoted); + return DiagnosedSilenceableFailure::success(); +} + +void transform::PromoteTensorOp::getEffects( + SmallVectorImpl &effects) { + transform::onlyReadsHandle(getTensorMutable(), effects); + transform::producesHandle(getOperation()->getOpResults(), effects); + transform::modifiesPayload(effects); +} + //===----------------------------------------------------------------------===// // DecomposeOp //===----------------------------------------------------------------------===// @@ -2799,7 +2863,7 @@ SplitOp::apply(transform::TransformRewriter &rewriter, } opList.append(first); - if (second.size()) + if (!second.empty()) opList.append(second); } results.set(cast(getSplitList()), opList); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp index 108abe800b13e..ebc4dcf6bbcb5 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp @@ -20,6 +20,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" +#include #include namespace mlir { @@ -124,6 +125,10 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) { auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -155,10 +160,15 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) { Location loc = convOp.getLoc(); + assert(isa(filterType) && + "expected filter type to be a ranked tensor"); + auto tensorFilterType = cast(filterType); + // Reshape output and filter to the LHS and result of a (B)MNK matmul. SmallVector filterReassocIndices = {{0, 1, 2}, {3}}; auto reshapedFilterType = - RankedTensorType::get({fh * fw * ic, oc}, filterType.getElementType()); + RankedTensorType::get({fh * fw * ic, oc}, filterType.getElementType(), + tensorFilterType.getEncoding()); Value reshapedFilter = tensor::CollapseShapeOp::create( rewriter, loc, reshapedFilterType, filter, filterReassocIndices); @@ -253,6 +263,10 @@ rewriteInIm2Col(RewriterBase &rewriter, auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -404,6 +418,10 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) { auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -435,9 +453,14 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) { auto loc = convOp.getLoc(); MLIRContext *context = rewriter.getContext(); + assert(isa(filterType) && + "expected filter type to be a ranked tensor"); + auto tensorFilterType = cast(filterType); + SmallVector filterReassocIndices = {{0}, {1, 2, 3}}; auto reshapedFilterType = - RankedTensorType::get({oc, ic * fh * fw}, inputType.getElementType()); + RankedTensorType::get({oc, ic * fh * fw}, inputType.getElementType(), + tensorFilterType.getEncoding()); Value reshapedFilter = tensor::CollapseShapeOp::create( rewriter, loc, reshapedFilterType, filter, filterReassocIndices); @@ -529,6 +552,10 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) { auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -560,11 +587,16 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) { Location loc = convOp.getLoc(); + assert(isa(filterType) && + "expected filter type to be a ranked tensor"); + auto tensorFilterType = cast(filterType); + // Reshape output and filter to the LHS and result of a "row-wise" matrix // multiplication. SmallVector filterReassocIndices = {{0}, {1, 2, 3}}; auto reshapedFilterType = - RankedTensorType::get({oc, fh * fw * ic}, filterType.getElementType()); + RankedTensorType::get({oc, fh * fw * ic}, filterType.getElementType(), + tensorFilterType.getEncoding()); Value reshapedFilter = tensor::CollapseShapeOp::create( rewriter, loc, reshapedFilterType, filter, filterReassocIndices); diff --git a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp index 6c17c3c2d0cab..3bb5f8af821c0 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DataLayoutPropagation.cpp @@ -14,6 +14,7 @@ #include "mlir/Dialect/UB/IR/UBOps.h" #include "mlir/Dialect/Utils/IndexingUtils.h" #include "mlir/IR/Dominance.h" +#include "mlir/IR/TypeUtilities.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/TypeSwitch.h" @@ -189,40 +190,20 @@ static SmallVector computeOuterDims(ArrayRef perm, return outerDimsPerm; } -/// Returns a tuple for packed operand and indexing_map with the assumptions: -/// 1) The generic op is the producer of the pack op. -/// 2) The generic op has only one result. -/// If the operand is a scalar or packing dimensions are all irrelevant to the -/// operand, the operand and the updated indexing map will be returned. -/// Otherwise, it returns the packed operand and the updated indexing map. E.g., -/// -/// #map0 = affine_map<(d0, d1) -> (d0, d1)> -/// #map1 = affine_map<(d0, d1) -> (d0)> -/// #map2 = affine_map<(d0, d1) -> (d1)> -/// %0 = linalg.generic {indexing_maps = [#map1, #map2, #map0], -/// iterator_types = ["parallel", "parallel"]} -/// ins(%arg0, %arg1 : tensor, tensor) -/// outs(%init : tensor) { -/// ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): -/// %4 = arith.addf %arg3, %arg4 : f32 -/// linalg.yield %4 : f32 -/// } -> tensor -/// %1 = linalg.pack %0 -/// inner_dims_pos = [0, 1] -/// inner_tiles = [8, 2] -/// into %dest : tensor -> tensor -/// -/// Taking the first input operand as an example, the inner tile size of d1 is -/// 8. Thus, the below operation and `affine_map<(d0, d1, d2, d3)> -> -/// affine_map<(d1, d3)>` will be returned. -/// -/// %pack = linalg.pack %arg0 -/// inner_dims_pos = [0] -/// inner_tiles = [8] -/// into %init : tensor -> tensor -static std::tuple -getOrCreatePackedViewOfOperand(OpBuilder &b, Location loc, PackInfo packInfo, - GenericOp genericOp, OpOperand *opOperand) { +struct PackedOperandDetails { + SmallVector innerTileSizes; + SmallVector innerDimsPos; + SmallVector outerDimsPerm; + AffineMap indexingMap; +}; + +/// Helper function for getOrCreatePackedViewOfOperand that populates +/// the details of the packedOperand that needs to be formed and also +/// returns if the packing would require padding. +static bool getPackedOperandDetails( + OpBuilder &b, PackInfo packInfo, GenericOp genericOp, OpOperand *opOperand, + DenseMap &packedOperandMap) { + PackedOperandDetails currOperandDetails; int64_t numOrigLoops = genericOp.getNumLoops(); int64_t numInnerLoops = packInfo.getNumTiledLoops(); int64_t numLoops = numOrigLoops + numInnerLoops; @@ -231,9 +212,12 @@ getOrCreatePackedViewOfOperand(OpBuilder &b, Location loc, PackInfo packInfo, SmallVector exprs(origIndexingMap.getResults()); // If the OpOperand is a scalar or a zero-rank tensor, no need to pack. - if (genericOp.isScalar(opOperand) || exprs.empty()) - return std::make_tuple(opOperand->get(), - AffineMap::get(numLoops, 0, exprs, b.getContext())); + if (genericOp.isScalar(opOperand) || exprs.empty()) { + currOperandDetails.indexingMap = + AffineMap::get(numLoops, 0, exprs, b.getContext()); + packedOperandMap[opOperand] = currOperandDetails; + return false; + } // Step 1. Construct the information of packing data dimensions; append inner // dimensions to the indexing maps for the operand. @@ -281,18 +265,86 @@ getOrCreatePackedViewOfOperand(OpBuilder &b, Location loc, PackInfo packInfo, exprs = auxVec; } } - auto indexingMap = AffineMap::get(numLoops, 0, exprs, b.getContext()); + currOperandDetails.indexingMap = + AffineMap::get(numLoops, 0, exprs, b.getContext()); // The operand does not have dimensions that relates to pack op. + if (innerDimsPos.empty() && outerDimsPerm.empty()) { + packedOperandMap[opOperand] = currOperandDetails; + return false; + } + auto inputType = cast(opOperand->get().getType()); + + auto maybeIntInnerTileSizes = + llvm::map_to_vector(innerTileSizes, [](OpFoldResult ofr) -> int64_t { + std::optional maybeCst = getConstantIntValue(ofr); + return maybeCst.value_or(ShapedType::kDynamic); + }); + bool requirePadding = linalg::PackOp::requirePaddingValueStrict( + inputType.getShape(), innerDimsPos, + linalg::PackOp::inferPackedType(inputType, maybeIntInnerTileSizes, + innerDimsPos, outerDimsPerm) + .getShape(), + outerDimsPerm, innerTileSizes); + currOperandDetails.innerDimsPos = innerDimsPos; + currOperandDetails.innerTileSizes = innerTileSizes; + currOperandDetails.outerDimsPerm = outerDimsPerm; + packedOperandMap[opOperand] = currOperandDetails; + + return requirePadding; +} + +/// Returns a tuple for packed operand and indexing_map with the assumptions: +/// 1) The generic op is the producer of the pack op. +/// 2) The generic op has only one result. +/// If the operand is a scalar or packing dimensions are all irrelevant to the +/// operand, the operand and the updated indexing map will be returned. +/// Otherwise, it returns the packed operand and the updated indexing map. E.g., +/// +/// #map0 = affine_map<(d0, d1) -> (d0, d1)> +/// #map1 = affine_map<(d0, d1) -> (d0)> +/// #map2 = affine_map<(d0, d1) -> (d1)> +/// %0 = linalg.generic {indexing_maps = [#map1, #map2, #map0], +/// iterator_types = ["parallel", "parallel"]} +/// ins(%arg0, %arg1 : tensor, tensor) +/// outs(%init : tensor) { +/// ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): +/// %4 = arith.addf %arg3, %arg4 : f32 +/// linalg.yield %4 : f32 +/// } -> tensor +/// %1 = linalg.pack %0 +/// inner_dims_pos = [0, 1] +/// inner_tiles = [8, 2] +/// into %dest : tensor -> tensor +/// +/// Taking the first input operand as an example, the inner tile size of d1 is +/// 8. Thus, the below operation and `affine_map<(d0, d1, d2, d3)> -> +/// affine_map<(d1, d3)>` will be returned. +/// +/// %pack = linalg.pack %arg0 +/// inner_dims_pos = [0] +/// inner_tiles = [8] +/// into %init : tensor -> tensor +static std::tuple getOrCreatePackedViewOfOperand( + OpBuilder &b, Location loc, OpOperand *opOperand, + const DenseMap &packedOperandMap) { + assert(packedOperandMap.contains(opOperand) && + "packed operand details expected to be populated"); + auto currOperandDetails = packedOperandMap.at(opOperand); + auto innerDimsPos = currOperandDetails.innerDimsPos; + auto outerDimsPerm = currOperandDetails.outerDimsPerm; + auto innerTileSizes = currOperandDetails.innerTileSizes; if (innerDimsPos.empty() && outerDimsPerm.empty()) - return std::make_tuple(opOperand->get(), indexingMap); + return std::make_tuple(opOperand->get(), currOperandDetails.indexingMap); auto empty = linalg::PackOp::createDestinationTensor( b, loc, opOperand->get(), innerTileSizes, innerDimsPos, outerDimsPerm); - auto packedOperand = linalg::PackOp::create( - b, loc, opOperand->get(), empty, innerDimsPos, innerTileSizes, - /*padding=*/std::nullopt, outerDimsPerm); - return std::make_tuple(packedOperand, indexingMap); + auto poison = ub::PoisonOp::create( + b, loc, getElementTypeOrSelf(opOperand->get().getType())); + Value packedOperand = + linalg::PackOp::create(b, loc, opOperand->get(), empty, innerDimsPos, + innerTileSizes, poison, outerDimsPerm); + return std::make_tuple(packedOperand, currOperandDetails.indexingMap); } /// This function is a helper subroutine to pack a genericOp and return it. It @@ -301,10 +353,10 @@ getOrCreatePackedViewOfOperand(OpBuilder &b, Location loc, PackInfo packInfo, /// around it. Implicitly this will only work when a packInfo can be obtained. /// This make sure that we are only using this function on parallel permuted /// dimensions. -static GenericOp packGenericOp(RewriterBase &rewriter, GenericOp genericOp, - Value dest, AffineMap packedOutIndexingMap, - const PackInfo &packInfo, - bool isFoldableUnpackPack) { +static FailureOr +packGenericOp(RewriterBase &rewriter, GenericOp genericOp, Value dest, + AffineMap packedOutIndexingMap, const PackInfo &packInfo, + bool isFoldableUnpackPack, bool poisonPaddingOk) { Location loc = genericOp.getLoc(); SmallVector inputOperands; SmallVector inputOperandsFromUnpackedSource; @@ -314,9 +366,18 @@ static GenericOp packGenericOp(RewriterBase &rewriter, GenericOp genericOp, packOp.getInnerDimsPos() == unPackOp.getInnerDimsPos() && llvm::equal(packOp.getMixedTiles(), unPackOp.getMixedTiles()); }; + DenseMap packedOperandMap; + bool requiresPadding = false; + for (OpOperand *inputOperand : genericOp.getDpsInputOperands()) { + requiresPadding |= getPackedOperandDetails(rewriter, packInfo, genericOp, + inputOperand, packedOperandMap); + } + if (requiresPadding && !poisonPaddingOk) + return failure(); + for (OpOperand *inputOperand : genericOp.getDpsInputOperands()) { auto [packedOperand, packedIndexingMap] = getOrCreatePackedViewOfOperand( - rewriter, loc, packInfo, genericOp, inputOperand); + rewriter, loc, inputOperand, packedOperandMap); auto unpackOp = inputOperand->get().getDefiningOp(); auto packOp = packedOperand.getDefiningOp(); if (packOp && unpackOp && hasEquivalentTiles(packOp, unpackOp)) { @@ -407,7 +468,8 @@ static bool isGenericOutsNotUsed(linalg::GenericOp genericOp) { /// } -> tensor static FailureOr bubbleUpPackOpThroughGenericOp(RewriterBase &rewriter, linalg::PackOp packOp, - const ControlPropagationFn &controlFn) { + const ControlPropagationFn &controlFn, + bool poisonPaddingOk) { auto genericOp = packOp.getSource().getDefiningOp(); if (!genericOp) return failure(); @@ -470,10 +532,15 @@ bubbleUpPackOpThroughGenericOp(RewriterBase &rewriter, linalg::PackOp packOp, } // Rebuild the indexing map for the corresponding init operand. - auto [packedOutOperand, packedOutIndexingMap] = - getOrCreatePackedViewOfOperand(rewriter, genericOp.getLoc(), *packInfo, - genericOp, opOperand); + DenseMap packedOperandMap; + bool requiresPadding = getPackedOperandDetails(rewriter, *packInfo, genericOp, + opOperand, packedOperandMap); + if (requiresPadding && !poisonPaddingOk) + return failure(); + auto [packedOutOperand, packedOutIndexingMap] = + getOrCreatePackedViewOfOperand(rewriter, genericOp.getLoc(), opOperand, + packedOperandMap); // Forward the new tensor.empty as a destination if it is one of the following // situations: // 1) The dps init operand is a tensor.empty. @@ -488,7 +555,8 @@ bubbleUpPackOpThroughGenericOp(RewriterBase &rewriter, linalg::PackOp packOp, // pack(unpack) isn't naively foldable because the unpack op can be from // an arbitrary domain so we need to keep both. return packGenericOp(rewriter, genericOp, dest, packedOutIndexingMap, - *packInfo, /*isFoldableUnpackPack=*/false); + *packInfo, /*isFoldableUnpackPack=*/false, + poisonPaddingOk); } /// Wrapper pattern that applies bubbleUpPackOpThroughGenericOp method. @@ -496,13 +564,15 @@ struct BubbleUpPackOpThroughGenericOpPattern : public OpRewritePattern { public: BubbleUpPackOpThroughGenericOpPattern(MLIRContext *context, - ControlPropagationFn fun) - : OpRewritePattern(context), controlFn(std::move(fun)) {} + ControlPropagationFn fun, + bool poisonPaddingOk) + : OpRewritePattern(context), controlFn(std::move(fun)), + poisonPaddingOk(std::move(poisonPaddingOk)) {} LogicalResult matchAndRewrite(linalg::PackOp packOp, PatternRewriter &rewriter) const override { - auto genericOp = - bubbleUpPackOpThroughGenericOp(rewriter, packOp, controlFn); + auto genericOp = bubbleUpPackOpThroughGenericOp(rewriter, packOp, controlFn, + poisonPaddingOk); if (failed(genericOp)) return failure(); rewriter.replaceOp(packOp, genericOp->getResults()); @@ -511,6 +581,7 @@ struct BubbleUpPackOpThroughGenericOpPattern private: ControlPropagationFn controlFn; + bool poisonPaddingOk; }; /// Propagate a linalg.pack operation up through a tensor.pad. The idea is to @@ -1080,7 +1151,8 @@ static FailureOr getUnPackedOperand(GenericOp genericOp) { /// static FailureOr> pushDownUnPackOpThroughGenericOp(RewriterBase &rewriter, GenericOp genericOp, - ControlPropagationFn controlFn) { + ControlPropagationFn controlFn, + bool poisonPaddingOk) { if (genericOp.getNumResults() != 1) return failure(); @@ -1107,9 +1179,17 @@ pushDownUnPackOpThroughGenericOp(RewriterBase &rewriter, GenericOp genericOp, return failure(); // Rebuild the indexing map for the corresponding init operand. + DenseMap packedOperandMap; + bool requiresPadding = + getPackedOperandDetails(rewriter, *packInfo, genericOp, + genericOp.getDpsInitOperand(0), packedOperandMap); + if (requiresPadding && !poisonPaddingOk) + return failure(); + auto [packedOutOperand, packedOutIndexingMap] = - getOrCreatePackedViewOfOperand(rewriter, genericOp.getLoc(), *packInfo, - genericOp, genericOp.getDpsInitOperand(0)); + getOrCreatePackedViewOfOperand(rewriter, genericOp.getLoc(), + genericOp.getDpsInitOperand(0), + packedOperandMap); auto destPack = packedOutOperand.getDefiningOp(); // Forward the new tensor.empty as a destination if it is one of the following @@ -1129,9 +1209,12 @@ pushDownUnPackOpThroughGenericOp(RewriterBase &rewriter, GenericOp genericOp, // pack(unpack) is foldable in this case. This is because in pushing down the // unpack, by default we will populate an additional pack op after the unpack. // This guarantees them to be foldable. - GenericOp newGenericOp = + auto maybeGenericOp = packGenericOp(rewriter, genericOp, dest, packedOutIndexingMap, *packInfo, - /*isFoldableUnpackPack=*/true); + /*isFoldableUnpackPack=*/true, poisonPaddingOk); + if (failed(maybeGenericOp)) + return failure(); + GenericOp newGenericOp = *maybeGenericOp; Value newResult = newGenericOp.getTiedOpResult(newGenericOp.getDpsInitOperand(0)); @@ -1157,13 +1240,15 @@ pushDownUnPackOpThroughGenericOp(RewriterBase &rewriter, GenericOp genericOp, struct PushDownUnPackOpThroughGenericOp : public OpRewritePattern { public: PushDownUnPackOpThroughGenericOp(MLIRContext *context, - ControlPropagationFn fun) - : OpRewritePattern(context), controlFn(std::move(fun)) {} + ControlPropagationFn fun, + bool poisonPaddingOk) + : OpRewritePattern(context), controlFn(std::move(fun)), + poisonPaddingOk(std::move(poisonPaddingOk)) {} LogicalResult matchAndRewrite(GenericOp genericOp, PatternRewriter &rewriter) const override { - auto genericAndRepl = - pushDownUnPackOpThroughGenericOp(rewriter, genericOp, controlFn); + auto genericAndRepl = pushDownUnPackOpThroughGenericOp( + rewriter, genericOp, controlFn, poisonPaddingOk); if (failed(genericAndRepl)) return failure(); rewriter.replaceOp(genericOp, std::get<1>(*genericAndRepl)); @@ -1172,6 +1257,7 @@ struct PushDownUnPackOpThroughGenericOp : public OpRewritePattern { private: ControlPropagationFn controlFn; + bool poisonPaddingOk; }; /// Propagate a linalg.unpack operation through a tensor.pad. The idea is to @@ -1522,12 +1608,14 @@ class PushDownExtractSliceOpThroughGenericOp final void mlir::linalg::populateDataLayoutPropagationPatterns( RewritePatternSet &patterns, - const ControlPropagationFn &controlPackUnPackPropagation) { - patterns - .insert( - patterns.getContext(), controlPackUnPackPropagation); + const ControlPropagationFn &controlPackUnPackPropagation, + bool PoisonPaddingOk) { + patterns.insert( + patterns.getContext(), controlPackUnPackPropagation); + patterns.insert( + patterns.getContext(), controlPackUnPackPropagation, PoisonPaddingOk); } void mlir::linalg::populateExtractSliceSinkingPatterns( diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp index 8942670767231..0956c5d771394 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp @@ -141,7 +141,7 @@ SmallVector linalg::computePaddedShape( projectedDims.flip(paddingDim); AffineMap projectedMap = mlir::projectDims(partialIndexingMap, projectedDims, - /*compressDims=*/true); + /*compressDimsFlag=*/true); // If we are padding to the next multiple of, compose with ceil(sz) * sz. OpFoldResult paddingDimOfr; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index 3ee6ae1029f72..4919d9a26b8cb 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -287,7 +287,7 @@ struct VectorizationState { /// moment we only make sure that there are no broadcast dimensions, but this /// might change if indexing maps evolve. bool isValidMaskingMap(AffineMap maskingMap) { - return maskingMap.getBroadcastDims().size() == 0; + return maskingMap.getBroadcastDims().empty(); } /// Turn the input indexing map into a valid masking map. @@ -923,7 +923,7 @@ static uint64_t getTrailingNonUnitLoopDimIdx(LinalgOp linalgOp) { llvm::count_if(loopRanges, [](int64_t dim) { return dim != 1; }) == 1) && "For statically shaped Linalg Ops, only one " "non-unit loop dim is expected"); - assert(loopRanges.size() != 0 && "Empty loops, nothing to analyse."); + assert(!loopRanges.empty() && "Empty loops, nothing to analyse."); size_t idx = loopRanges.size() - 1; for (; idx != 0; idx--) @@ -1770,12 +1770,9 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, linalg::PackOp packOp, rewriter.setInsertionPoint(packOp); Location loc = packOp.getLoc(); - auto padValue = packOp.getPaddingValue(); - if (!padValue) { - padValue = arith::ConstantOp::create( - rewriter, loc, - rewriter.getZeroAttr(packOp.getSourceType().getElementType())); - } + std::optional padValue = packOp.getPaddingValue() + ? std::optional(packOp.getPaddingValue()) + : std::nullopt; // If the input vector sizes are not provided, then the vector sizes are // determined by the result tensor shape. In case the vector sizes aren't @@ -1936,11 +1933,8 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp, } // -- Generate the read operation -- - auto padValue = arith::ConstantOp::create( - rewriter, loc, - rewriter.getZeroAttr(unpackOp.getSourceType().getElementType())); Value readResult = vector::createReadOrMaskedRead( - rewriter, loc, unpackOp.getSource(), readVectorSizes, padValue, + rewriter, loc, unpackOp.getSource(), readVectorSizes, std::nullopt, useInBoundsInsteadOfMasking, readScalableVectorFlags); // -- Generate the transpose operation -- diff --git a/mlir/lib/Dialect/Math/IR/MathOps.cpp b/mlir/lib/Dialect/Math/IR/MathOps.cpp index a21631cbf8510..bbeef0f6ee9e5 100644 --- a/mlir/lib/Dialect/Math/IR/MathOps.cpp +++ b/mlir/lib/Dialect/Math/IR/MathOps.cpp @@ -284,6 +284,16 @@ OpFoldResult math::SinhOp::fold(FoldAdaptor adaptor) { }); } +//===----------------------------------------------------------------------===// +// SinCosOp getShapeForUnroll +//===----------------------------------------------------------------------===// + +std::optional> math::SincosOp::getShapeForUnroll() { + if (auto vt = mlir::dyn_cast(getOperand().getType())) + return llvm::to_vector<4>(vt.getShape()); + return std::nullopt; +} + //===----------------------------------------------------------------------===// // CountLeadingZerosOp folder //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt index ff62b515533c3..8899c3a1d1a42 100644 --- a/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Math/Transforms/CMakeLists.txt @@ -3,6 +3,7 @@ add_mlir_dialect_library(MLIRMathTransforms ExpandOps.cpp ExtendToSupportedTypes.cpp PolynomialApproximation.cpp + SincosFusion.cpp UpliftToFMA.cpp ADDITIONAL_HEADER_DIRS diff --git a/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp b/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp new file mode 100644 index 0000000000000..69407df201cfa --- /dev/null +++ b/mlir/lib/Dialect/Math/Transforms/SincosFusion.cpp @@ -0,0 +1,80 @@ +//===- SincosFusion.cpp - Fuse sin/cos into sincos -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/Math/IR/Math.h" +#include "mlir/Dialect/Math/Transforms/Passes.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +using namespace mlir; +using namespace mlir::math; + +namespace { + +/// Fuse a math.sin and math.cos in the same block that use the same operand and +/// have identical fastmath flags into a single math.sincos. +struct SincosFusionPattern : OpRewritePattern { + using Base::Base; + + LogicalResult matchAndRewrite(math::SinOp sinOp, + PatternRewriter &rewriter) const override { + Value operand = sinOp.getOperand(); + mlir::arith::FastMathFlags sinFastMathFlags = sinOp.getFastmath(); + + math::CosOp cosOp = nullptr; + sinOp->getBlock()->walk([&](math::CosOp op) { + if (op.getOperand() == operand && op.getFastmath() == sinFastMathFlags) { + cosOp = op; + return WalkResult::interrupt(); + } + return WalkResult::advance(); + }); + + if (!cosOp) + return failure(); + + Operation *firstOp = sinOp->isBeforeInBlock(cosOp) ? sinOp.getOperation() + : cosOp.getOperation(); + rewriter.setInsertionPoint(firstOp); + + Type elemType = sinOp.getType(); + auto sincos = math::SincosOp::create(rewriter, firstOp->getLoc(), + TypeRange{elemType, elemType}, operand, + sinOp.getFastmathAttr()); + + rewriter.replaceOp(sinOp, sincos.getSin()); + rewriter.replaceOp(cosOp, sincos.getCos()); + return success(); + } +}; + +} // namespace + +namespace mlir::math { +#define GEN_PASS_DEF_MATHSINCOSFUSIONPASS +#include "mlir/Dialect/Math/Transforms/Passes.h.inc" +} // namespace mlir::math + +namespace { + +struct MathSincosFusionPass final + : math::impl::MathSincosFusionPassBase { + using MathSincosFusionPassBase::MathSincosFusionPassBase; + + void runOnOperation() override { + RewritePatternSet patterns(&getContext()); + patterns.add(&getContext()); + + GreedyRewriteConfig config; + if (failed( + applyPatternsGreedily(getOperation(), std::move(patterns), config))) + return signalPassFailure(); + } +}; + +} // namespace diff --git a/mlir/lib/Dialect/MemRef/IR/CMakeLists.txt b/mlir/lib/Dialect/MemRef/IR/CMakeLists.txt index 734294bd014c6..e25a0121a3359 100644 --- a/mlir/lib/Dialect/MemRef/IR/CMakeLists.txt +++ b/mlir/lib/Dialect/MemRef/IR/CMakeLists.txt @@ -20,6 +20,7 @@ add_mlir_dialect_library(MLIRMemRefDialect MLIRInferIntRangeInterface MLIRInferTypeOpInterface MLIRIR + MLIRMemOpInterfaces MLIRMemorySlotInterfaces MLIRShapedOpInterfaces MLIRSideEffectInterfaces diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp index 5d15d5f6e3de4..e9bdcda296da5 100644 --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -111,6 +111,65 @@ static void constifyIndexValues(SmallVectorImpl &values, } } +/// Helper function to retrieve a lossless memory-space cast, and the +/// corresponding new result memref type. +static std::tuple +getMemorySpaceCastInfo(BaseMemRefType resultTy, Value src) { + MemorySpaceCastOpInterface castOp = + MemorySpaceCastOpInterface::getIfPromotableCast(src); + + // Bail if the cast is not lossless. + if (!castOp) + return {}; + + // Transform the source and target type of `castOp` to have the same metadata + // as `resultTy`. Bail if not possible. + FailureOr srcTy = resultTy.clonePtrWith( + castOp.getSourcePtr().getType().getMemorySpace(), std::nullopt); + if (failed(srcTy)) + return {}; + + FailureOr tgtTy = resultTy.clonePtrWith( + castOp.getTargetPtr().getType().getMemorySpace(), std::nullopt); + if (failed(tgtTy)) + return {}; + + // Check if this is a valid memory-space cast. + if (!castOp.isValidMemorySpaceCast(*tgtTy, *srcTy)) + return {}; + + return std::make_tuple(castOp, *tgtTy, *srcTy); +} + +/// Implementation of `bubbleDownCasts` method for memref operations that +/// return a single memref result. +template +static FailureOr>> +bubbleDownCastsPassthroughOpImpl(ConcreteOpTy op, OpBuilder &builder, + OpOperand &src) { + auto [castOp, tgtTy, resTy] = getMemorySpaceCastInfo(op.getType(), src.get()); + // Bail if we cannot cast. + if (!castOp) + return failure(); + + // Create the new operands. + SmallVector operands; + llvm::append_range(operands, op->getOperands()); + operands[src.getOperandNumber()] = castOp.getSourcePtr(); + + // Create the new op and results. + auto newOp = ConcreteOpTy::create( + builder, op.getLoc(), TypeRange(resTy), operands, op.getProperties(), + llvm::to_vector_of(op->getDiscardableAttrs())); + + // Insert a memory-space cast to the original memory space of the op. + MemorySpaceCastOpInterface result = castOp.cloneMemorySpaceCastOp( + builder, tgtTy, + cast>(newOp.getResult())); + return std::optional>( + SmallVector({result.getTargetPtr()})); +} + //===----------------------------------------------------------------------===// // AllocOp / AllocaOp //===----------------------------------------------------------------------===// @@ -542,6 +601,34 @@ OpFoldResult AssumeAlignmentOp::fold(FoldAdaptor adaptor) { return getMemref(); } +FailureOr>> +AssumeAlignmentOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getMemrefMutable()); +} + +//===----------------------------------------------------------------------===// +// DistinctObjectsOp +//===----------------------------------------------------------------------===// + +LogicalResult DistinctObjectsOp::verify() { + if (getOperandTypes() != getResultTypes()) + return emitOpError("operand types and result types must match"); + + if (getOperandTypes().empty()) + return emitOpError("expected at least one operand"); + + return success(); +} + +LogicalResult DistinctObjectsOp::inferReturnTypes( + MLIRContext * /*context*/, std::optional /*location*/, + ValueRange operands, DictionaryAttr /*attributes*/, + OpaqueProperties /*properties*/, RegionRange /*regions*/, + SmallVectorImpl &inferredReturnTypes) { + llvm::copy(operands.getTypes(), std::back_inserter(inferredReturnTypes)); + return success(); +} + //===----------------------------------------------------------------------===// // CastOp //===----------------------------------------------------------------------===// @@ -710,6 +797,11 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) { return succeeded(foldMemRefCast(*this)) ? getResult() : Value(); } +FailureOr>> +CastOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSourceMutable()); +} + //===----------------------------------------------------------------------===// // CopyOp //===----------------------------------------------------------------------===// @@ -1601,6 +1693,12 @@ OpFoldResult LoadOp::fold(FoldAdaptor adaptor) { return OpFoldResult(); } +FailureOr>> +LoadOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getMemrefMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // MemorySpaceCastOp //===----------------------------------------------------------------------===// @@ -1645,6 +1743,32 @@ OpFoldResult MemorySpaceCastOp::fold(FoldAdaptor adaptor) { return Value{}; } +TypedValue MemorySpaceCastOp::getSourcePtr() { + return cast>(getSource()); +} + +TypedValue MemorySpaceCastOp::getTargetPtr() { + return cast>(getDest()); +} + +bool MemorySpaceCastOp::isValidMemorySpaceCast(PtrLikeTypeInterface tgt, + PtrLikeTypeInterface src) { + return isa(tgt) && + tgt.clonePtrWith(src.getMemorySpace(), std::nullopt) == src; +} + +MemorySpaceCastOpInterface MemorySpaceCastOp::cloneMemorySpaceCastOp( + OpBuilder &b, PtrLikeTypeInterface tgt, + TypedValue src) { + assert(isValidMemorySpaceCast(tgt, src.getType()) && "invalid arguments"); + return MemorySpaceCastOp::create(b, getLoc(), tgt, src); +} + +/// The only cast we recognize as promotable is to the generic space. +bool MemorySpaceCastOp::isSourcePromotable() { + return getDest().getType().getMemorySpace() == nullptr; +} + //===----------------------------------------------------------------------===// // PrefetchOp //===----------------------------------------------------------------------===// @@ -2041,6 +2165,11 @@ void ReinterpretCastOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +ReinterpretCastOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSourceMutable()); +} + //===----------------------------------------------------------------------===// // Reassociative reshape ops //===----------------------------------------------------------------------===// @@ -2348,6 +2477,11 @@ void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, ComposeExpandOfCollapseOp>(context); } +FailureOr>> +ExpandShapeOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSrcMutable()); +} + /// Compute the layout map after collapsing a given source MemRef type with the /// specified reassociation indices. /// @@ -2569,6 +2703,11 @@ OpFoldResult CollapseShapeOp::fold(FoldAdaptor adaptor) { adaptor.getOperands()); } +FailureOr>> +CollapseShapeOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSrcMutable()); +} + //===----------------------------------------------------------------------===// // ReshapeOp //===----------------------------------------------------------------------===// @@ -2609,6 +2748,11 @@ LogicalResult ReshapeOp::verify() { return success(); } +FailureOr>> +ReshapeOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSourceMutable()); +} + //===----------------------------------------------------------------------===// // StoreOp //===----------------------------------------------------------------------===// @@ -2626,6 +2770,12 @@ LogicalResult StoreOp::fold(FoldAdaptor adaptor, return foldMemRefCast(*this, getValueToStore()); } +FailureOr>> +StoreOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getMemrefMutable(), + ValueRange()); +} + //===----------------------------------------------------------------------===// // SubViewOp //===----------------------------------------------------------------------===// @@ -3282,6 +3432,11 @@ OpFoldResult SubViewOp::fold(FoldAdaptor adaptor) { return {}; } +FailureOr>> +SubViewOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSourceMutable()); +} + //===----------------------------------------------------------------------===// // TransposeOp //===----------------------------------------------------------------------===// @@ -3382,6 +3537,11 @@ OpFoldResult TransposeOp::fold(FoldAdaptor) { return {}; } +FailureOr>> +TransposeOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getInMutable()); +} + //===----------------------------------------------------------------------===// // ViewOp //===----------------------------------------------------------------------===// @@ -3525,6 +3685,11 @@ void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +ViewOp::bubbleDownCasts(OpBuilder &builder) { + return bubbleDownCastsPassthroughOpImpl(*this, builder, getSourceMutable()); +} + //===----------------------------------------------------------------------===// // AtomicRMWOp //===----------------------------------------------------------------------===// @@ -3570,6 +3735,12 @@ OpFoldResult AtomicRMWOp::fold(FoldAdaptor adaptor) { return OpFoldResult(); } +FailureOr>> +AtomicRMWOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getMemrefMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // TableGen'd op method definitions //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp index 3de9c3898c713..6200366cded29 100644 --- a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp +++ b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp @@ -191,7 +191,7 @@ computeSuffixProductIRBlock(Location loc, OpBuilder &builder, } MemrefValue skipFullyAliasingOperations(MemrefValue source) { - while (auto op = source.getDefiningOp()) { + while (auto *op = source.getDefiningOp()) { if (auto subViewOp = dyn_cast(op); subViewOp && subViewOp.hasZeroOffset() && subViewOp.hasUnitStride()) { // A `memref.subview` with an all zero offset, and all unit strides, still @@ -208,7 +208,7 @@ MemrefValue skipFullyAliasingOperations(MemrefValue source) { } MemrefValue skipViewLikeOps(MemrefValue source) { - while (auto op = source.getDefiningOp()) { + while (auto *op = source.getDefiningOp()) { if (auto viewLike = dyn_cast(op)) { if (source == viewLike.getViewDest()) { source = cast(viewLike.getViewSource()); diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 3d70e28ed23ab..5672942a18231 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -33,6 +33,7 @@ #include "llvm/ADT/TypeSwitch.h" #include "llvm/ADT/bit.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" +#include "llvm/Support/InterleavedRange.h" #include #include #include @@ -77,6 +78,232 @@ struct LLVMPointerPointerLikeModel }; } // namespace +/// Generate a name of a canonical loop nest of the format +/// `(_r_s)*`. Hereby, `_r` identifies the region +/// argument index of an operation that has multiple regions, if the operation +/// has multiple regions. +/// `_s` identifies the position of an operation within a region, where +/// only operations that may potentially contain loops ("container operations" +/// i.e. have region arguments) are counted. Again, it is omitted if there is +/// only one such operation in a region. If there are canonical loops nested +/// inside each other, also may also use the format `_d` where is the +/// nesting depth of the loop. +/// +/// The generated name is a best-effort to make canonical loop unique within an +/// SSA namespace. This also means that regions with IsolatedFromAbove property +/// do not consider any parents or siblings. +static std::string generateLoopNestingName(StringRef prefix, + CanonicalLoopOp op) { + struct Component { + /// If true, this component describes a region operand of an operation (the + /// operand's owner) If false, this component describes an operation located + /// in a parent region + bool isRegionArgOfOp; + bool skip = false; + bool isUnique = false; + + size_t idx; + Operation *op; + Region *parentRegion; + size_t loopDepth; + + Operation *&getOwnerOp() { + assert(isRegionArgOfOp && "Must describe a region operand"); + return op; + } + size_t &getArgIdx() { + assert(isRegionArgOfOp && "Must describe a region operand"); + return idx; + } + + Operation *&getContainerOp() { + assert(!isRegionArgOfOp && "Must describe a operation of a region"); + return op; + } + size_t &getOpPos() { + assert(!isRegionArgOfOp && "Must describe a operation of a region"); + return idx; + } + bool isLoopOp() const { + assert(!isRegionArgOfOp && "Must describe a operation of a region"); + return isa(op); + } + Region *&getParentRegion() { + assert(!isRegionArgOfOp && "Must describe a operation of a region"); + return parentRegion; + } + size_t &getLoopDepth() { + assert(!isRegionArgOfOp && "Must describe a operation of a region"); + return loopDepth; + } + + void skipIf(bool v = true) { skip = skip || v; } + }; + + // List of ancestors, from inner to outer. + // Alternates between + // * region argument of an operation + // * operation within a region + SmallVector components; + + // Gather a list of parent regions and operations, and the position within + // their parent + Operation *o = op.getOperation(); + while (o) { + // Operation within a region + Region *r = o->getParentRegion(); + if (!r) + break; + + llvm::ReversePostOrderTraversal traversal(&r->getBlocks().front()); + size_t idx = 0; + bool found = false; + size_t sequentialIdx = -1; + bool isOnlyContainerOp = true; + for (Block *b : traversal) { + for (Operation &op : *b) { + if (&op == o && !found) { + sequentialIdx = idx; + found = true; + } + if (op.getNumRegions()) { + idx += 1; + if (idx > 1) + isOnlyContainerOp = false; + } + if (found && !isOnlyContainerOp) + break; + } + } + + Component &containerOpInRegion = components.emplace_back(); + containerOpInRegion.isRegionArgOfOp = false; + containerOpInRegion.isUnique = isOnlyContainerOp; + containerOpInRegion.getContainerOp() = o; + containerOpInRegion.getOpPos() = sequentialIdx; + containerOpInRegion.getParentRegion() = r; + + Operation *parent = r->getParentOp(); + + // Region argument of an operation + Component ®ionArgOfOperation = components.emplace_back(); + regionArgOfOperation.isRegionArgOfOp = true; + regionArgOfOperation.isUnique = true; + regionArgOfOperation.getArgIdx() = 0; + regionArgOfOperation.getOwnerOp() = parent; + + // The IsolatedFromAbove trait of the parent operation implies that each + // individual region argument has its own separate namespace, so no + // ambiguity. + if (!parent || parent->hasTrait()) + break; + + // Component only needed if operation has multiple region operands. Region + // arguments may be optional, but we currently do not consider this. + if (parent->getRegions().size() > 1) { + auto getRegionIndex = [](Operation *o, Region *r) { + for (auto [idx, region] : llvm::enumerate(o->getRegions())) { + if (®ion == r) + return idx; + } + llvm_unreachable("Region not child of its parent operation"); + }; + regionArgOfOperation.isUnique = false; + regionArgOfOperation.getArgIdx() = getRegionIndex(parent, r); + } + + // next parent + o = parent; + } + + // Determine whether a region-argument component is not needed + for (Component &c : components) + c.skipIf(c.isRegionArgOfOp && c.isUnique); + + // Find runs of nested loops and determine each loop's depth in the loop nest + size_t numSurroundingLoops = 0; + for (Component &c : llvm::reverse(components)) { + if (c.skip) + continue; + + // non-skipped multi-argument operands interrupt the loop nest + if (c.isRegionArgOfOp) { + numSurroundingLoops = 0; + continue; + } + + // Multiple loops in a region means each of them is the outermost loop of a + // new loop nest + if (!c.isUnique) + numSurroundingLoops = 0; + + c.getLoopDepth() = numSurroundingLoops; + + // Next loop is surrounded by one more loop + if (isa(c.getContainerOp())) + numSurroundingLoops += 1; + } + + // In loop nests, skip all but the innermost loop that contains the depth + // number + bool isLoopNest = false; + for (Component &c : components) { + if (c.skip || c.isRegionArgOfOp) + continue; + + if (!isLoopNest && c.getLoopDepth() >= 1) { + // Innermost loop of a loop nest of at least two loops + isLoopNest = true; + } else if (isLoopNest) { + // Non-innermost loop of a loop nest + c.skipIf(c.isUnique); + + // If there is no surrounding loop left, this must have been the outermost + // loop; leave loop-nest mode for the next iteration + if (c.getLoopDepth() == 0) + isLoopNest = false; + } + } + + // Skip non-loop unambiguous regions (but they should interrupt loop nests, so + // we mark them as skipped only after computing loop nests) + for (Component &c : components) + c.skipIf(!c.isRegionArgOfOp && c.isUnique && + !isa(c.getContainerOp())); + + // Components can be skipped if they are already disambiguated by their parent + // (or does not have a parent) + bool newRegion = true; + for (Component &c : llvm::reverse(components)) { + c.skipIf(newRegion && c.isUnique); + + // non-skipped components disambiguate unique children + if (!c.skip) + newRegion = true; + + // ...except canonical loops that need a suffix for each nest + if (!c.isRegionArgOfOp && c.getContainerOp()) + newRegion = false; + } + + // Compile the nesting name string + SmallString<64> Name{prefix}; + llvm::raw_svector_ostream NameOS(Name); + for (auto &c : llvm::reverse(components)) { + if (c.skip) + continue; + + if (c.isRegionArgOfOp) + NameOS << "_r" << c.getArgIdx(); + else if (c.getLoopDepth() >= 1) + NameOS << "_d" << c.getLoopDepth(); + else + NameOS << "_s" << c.getOpPos(); + } + + return NameOS.str().str(); +} + void OpenMPDialect::initialize() { addOperations< #define GET_OP_LIST @@ -182,7 +409,7 @@ static ParseResult parseClauseAttr(AsmParser &parser, ClauseAttr &attr) { } template -void printClauseAttr(OpAsmPrinter &p, Operation *op, ClauseAttr attr) { +static void printClauseAttr(OpAsmPrinter &p, Operation *op, ClauseAttr attr) { p << stringifyEnum(attr.getValue()); } @@ -1511,8 +1738,8 @@ static LogicalResult verifySynchronizationHint(Operation *op, uint64_t hint) { //===----------------------------------------------------------------------===// // Helper function to get bitwise AND of `value` and 'flag' -uint64_t mapTypeToBitFlag(uint64_t value, - llvm::omp::OpenMPOffloadMappingFlags flag) { +static uint64_t mapTypeToBitFlag(uint64_t value, + llvm::omp::OpenMPOffloadMappingFlags flag) { return value & llvm::to_underlying(flag); } @@ -2111,6 +2338,31 @@ Operation *TargetOp::getInnermostCapturedOmpOp() { }); } +/// Check if we can promote SPMD kernel to No-Loop kernel. +static bool canPromoteToNoLoop(Operation *capturedOp, TeamsOp teamsOp, + WsloopOp *wsLoopOp) { + // num_teams clause can break no-loop teams/threads assumption. + if (teamsOp.getNumTeamsUpper()) + return false; + + // Reduction kernels are slower in no-loop mode. + if (teamsOp.getNumReductionVars()) + return false; + if (wsLoopOp->getNumReductionVars()) + return false; + + // Check if the user allows the promotion of kernels to no-loop mode. + OffloadModuleInterface offloadMod = + capturedOp->getParentOfType(); + if (!offloadMod) + return false; + auto ompFlags = offloadMod.getFlags(); + if (!ompFlags) + return false; + return ompFlags.getAssumeTeamsOversubscription() && + ompFlags.getAssumeThreadsOversubscription(); +} + TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) { // A non-null captured op is only valid if it resides inside of a TargetOp // and is the result of calling getInnermostCapturedOmpOp() on it. @@ -2139,7 +2391,8 @@ TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) { // Detect target-teams-distribute-parallel-wsloop[-simd]. if (numWrappers == 2) { - if (!isa(innermostWrapper)) + WsloopOp *wsloopOp = dyn_cast(innermostWrapper); + if (!wsloopOp) return TargetRegionFlags::generic; innermostWrapper = std::next(innermostWrapper); @@ -2150,12 +2403,17 @@ TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) { if (!isa_and_present(parallelOp)) return TargetRegionFlags::generic; - Operation *teamsOp = parallelOp->getParentOp(); - if (!isa_and_present(teamsOp)) + TeamsOp teamsOp = dyn_cast(parallelOp->getParentOp()); + if (!teamsOp) return TargetRegionFlags::generic; - if (teamsOp->getParentOp() == targetOp.getOperation()) - return TargetRegionFlags::spmd | TargetRegionFlags::trip_count; + if (teamsOp->getParentOp() == targetOp.getOperation()) { + TargetRegionFlags result = + TargetRegionFlags::spmd | TargetRegionFlags::trip_count; + if (canPromoteToNoLoop(capturedOp, teamsOp, wsloopOp)) + result = result | TargetRegionFlags::no_loop; + return result; + } } // Detect target-teams-distribute[-simd] and target-teams-loop. else if (isa(innermostWrapper)) { @@ -3128,6 +3386,9 @@ void NewCliOp::getAsmResultNames(OpAsmSetValueNameFn setNameFn) { Value result = getResult(); auto [newCli, gen, cons] = decodeCli(result); + // Structured binding `gen` cannot be captured in lambdas before C++20 + OpOperand *generator = gen; + // Derive the CLI variable name from its generator: // * "canonloop" for omp.canonical_loop // * custom name for loop transformation generatees @@ -3141,71 +3402,29 @@ void NewCliOp::getAsmResultNames(OpAsmSetValueNameFn setNameFn) { cliName = TypeSwitch(gen->getOwner()) .Case([&](CanonicalLoopOp op) { - // Find the canonical loop nesting: For each ancestor add a - // "+_r" suffix (in reverse order) - SmallVector components; - Operation *o = op.getOperation(); - while (o) { - if (o->hasTrait()) - break; - - Region *r = o->getParentRegion(); - if (!r) - break; - - auto getSequentialIndex = [](Region *r, Operation *o) { - llvm::ReversePostOrderTraversal traversal( - &r->getBlocks().front()); - size_t idx = 0; - for (Block *b : traversal) { - for (Operation &op : *b) { - if (&op == o) - return idx; - // Only consider operations that are containers as - // possible children - if (!op.getRegions().empty()) - idx += 1; - } - } - llvm_unreachable("Operation not part of the region"); - }; - size_t sequentialIdx = getSequentialIndex(r, o); - components.push_back(("s" + Twine(sequentialIdx)).str()); - - Operation *parent = r->getParentOp(); - if (!parent) - break; - - // If the operation has more than one region, also count in - // which of the regions - if (parent->getRegions().size() > 1) { - auto getRegionIndex = [](Operation *o, Region *r) { - for (auto [idx, region] : - llvm::enumerate(o->getRegions())) { - if (®ion == r) - return idx; - } - llvm_unreachable("Region not child its parent operation"); - }; - size_t regionIdx = getRegionIndex(parent, r); - components.push_back(("r" + Twine(regionIdx)).str()); - } - - // next parent - o = parent; - } - - SmallString<64> Name("canonloop"); - for (const std::string &s : reverse(components)) { - Name += '_'; - Name += s; - } - - return Name; + return generateLoopNestingName("canonloop", op); }) .Case([&](UnrollHeuristicOp op) -> std::string { llvm_unreachable("heuristic unrolling does not generate a loop"); }) + .Case([&](TileOp op) -> std::string { + auto [generateesFirst, generateesCount] = + op.getGenerateesODSOperandIndexAndLength(); + unsigned firstGrid = generateesFirst; + unsigned firstIntratile = generateesFirst + generateesCount / 2; + unsigned end = generateesFirst + generateesCount; + unsigned opnum = generator->getOperandNumber(); + // In the OpenMP apply and looprange clauses, indices are 1-based + if (firstGrid <= opnum && opnum < firstIntratile) { + unsigned gridnum = opnum - firstGrid + 1; + return ("grid" + Twine(gridnum)).str(); + } + if (firstIntratile <= opnum && opnum < end) { + unsigned intratilenum = opnum - firstIntratile + 1; + return ("intratile" + Twine(intratilenum)).str(); + } + llvm_unreachable("Unexpected generatee argument"); + }) .Default([&](Operation *op) { assert(false && "TODO: Custom name for this operation"); return "transformed"; @@ -3292,7 +3511,8 @@ void CanonicalLoopOp::getAsmBlockNames(OpAsmSetBlockNameFn setNameFn) { void CanonicalLoopOp::getAsmBlockArgumentNames(Region ®ion, OpAsmSetValueNameFn setNameFn) { - setNameFn(region.getArgument(0), "iv"); + std::string ivName = generateLoopNestingName("iv", *this); + setNameFn(region.getArgument(0), ivName); } void CanonicalLoopOp::print(OpAsmPrinter &p) { @@ -3433,6 +3653,138 @@ UnrollHeuristicOp::getGenerateesODSOperandIndexAndLength() { return {0, 0}; } +//===----------------------------------------------------------------------===// +// TileOp +//===----------------------------------------------------------------------===// + +static void printLoopTransformClis(OpAsmPrinter &p, TileOp op, + OperandRange generatees, + OperandRange applyees) { + if (!generatees.empty()) + p << '(' << llvm::interleaved(generatees) << ')'; + + if (!applyees.empty()) + p << " <- (" << llvm::interleaved(applyees) << ')'; +} + +static ParseResult parseLoopTransformClis( + OpAsmParser &parser, + SmallVectorImpl &generateesOperands, + SmallVectorImpl &applyeesOperands) { + if (parser.parseOptionalLess()) { + // Syntax 1: generatees present + + if (parser.parseOperandList(generateesOperands, + mlir::OpAsmParser::Delimiter::Paren)) + return failure(); + + if (parser.parseLess()) + return failure(); + } else { + // Syntax 2: generatees omitted + } + + // Parse `<-` (`<` has already been parsed) + if (parser.parseMinus()) + return failure(); + + if (parser.parseOperandList(applyeesOperands, + mlir::OpAsmParser::Delimiter::Paren)) + return failure(); + + return success(); +} + +LogicalResult TileOp::verify() { + if (getApplyees().empty()) + return emitOpError() << "must apply to at least one loop"; + + if (getSizes().size() != getApplyees().size()) + return emitOpError() << "there must be one tile size for each applyee"; + + if (!getGeneratees().empty() && + 2 * getSizes().size() != getGeneratees().size()) + return emitOpError() + << "expecting two times the number of generatees than applyees"; + + DenseSet parentIVs; + + Value parent = getApplyees().front(); + for (auto &&applyee : llvm::drop_begin(getApplyees())) { + auto [parentCreate, parentGen, parentCons] = decodeCli(parent); + auto [create, gen, cons] = decodeCli(applyee); + + if (!parentGen) + return emitOpError() << "applyee CLI has no generator"; + + auto parentLoop = dyn_cast_or_null(parentGen->getOwner()); + if (!parentGen) + return emitOpError() + << "currently only supports omp.canonical_loop as applyee"; + + parentIVs.insert(parentLoop.getInductionVar()); + + if (!gen) + return emitOpError() << "applyee CLI has no generator"; + auto loop = dyn_cast_or_null(gen->getOwner()); + if (!loop) + return emitOpError() + << "currently only supports omp.canonical_loop as applyee"; + + // Canonical loop must be perfectly nested, i.e. the body of the parent must + // only contain the omp.canonical_loop of the nested loops, and + // omp.terminator + bool isPerfectlyNested = [&]() { + auto &parentBody = parentLoop.getRegion(); + if (!parentBody.hasOneBlock()) + return false; + auto &parentBlock = parentBody.getBlocks().front(); + + auto nestedLoopIt = parentBlock.begin(); + if (nestedLoopIt == parentBlock.end() || + (&*nestedLoopIt != loop.getOperation())) + return false; + + auto termIt = std::next(nestedLoopIt); + if (termIt == parentBlock.end() || !isa(termIt)) + return false; + + if (std::next(termIt) != parentBlock.end()) + return false; + + return true; + }(); + if (!isPerfectlyNested) + return emitOpError() << "tiled loop nest must be perfectly nested"; + + if (parentIVs.contains(loop.getTripCount())) + return emitOpError() << "tiled loop nest must be rectangular"; + + parent = applyee; + } + + // TODO: The tile sizes must be computed before the loop, but checking this + // requires dominance analysis. For instance: + // + // %canonloop = omp.new_cli + // omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + // // write to %x + // omp.terminator + // } + // %ts = llvm.load %x + // omp.tile <- (%canonloop) sizes(%ts : i32) + + return success(); +} + +std::pair TileOp ::getApplyeesODSOperandIndexAndLength() { + return getODSOperandIndexAndLength(odsIndex_applyees); +} + +std::pair TileOp::getGenerateesODSOperandIndexAndLength() { + return getODSOperandIndexAndLength(odsIndex_generatees); +} + //===----------------------------------------------------------------------===// // Critical construct (2.17.1) //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp b/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp index f0209af8a1ca3..51f25f755a8a6 100644 --- a/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp +++ b/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp @@ -15,6 +15,7 @@ #include "mlir/IR/Matchers.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Transforms/InliningUtils.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/ADT/TypeSwitch.h" using namespace mlir; @@ -391,6 +392,39 @@ LogicalResult PtrAddOp::inferReturnTypes( return success(); } +//===----------------------------------------------------------------------===// +// PtrDiffOp +//===----------------------------------------------------------------------===// + +LogicalResult PtrDiffOp::verify() { + // If the operands are not shaped early exit. + if (!isa(getLhs().getType())) + return success(); + + // Just check the container type matches, `SameOperandsAndResultShape` handles + // the actual shape. + if (getResult().getType().getTypeID() != getLhs().getType().getTypeID()) { + return emitError() << "expected the result to have the same container " + "type as the operands when operands are shaped"; + } + + return success(); +} + +ptr::PtrType PtrDiffOp::getPtrType() { + Type lhsType = getLhs().getType(); + if (auto shapedType = dyn_cast(lhsType)) + return cast(shapedType.getElementType()); + return cast(lhsType); +} + +Type PtrDiffOp::getIntType() { + Type resultType = getResult().getType(); + if (auto shapedType = dyn_cast(resultType)) + return shapedType.getElementType(); + return resultType; +} + //===----------------------------------------------------------------------===// // ToPtrOp //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp index fb179e64d8e7b..47c99642b9c37 100644 --- a/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp @@ -188,8 +188,8 @@ struct ExecuteRegionOpInterface TypeRange newResultTypes(yieldOp.getResults()); // Create new op and move over region. - auto newOp = - scf::ExecuteRegionOp::create(rewriter, op->getLoc(), newResultTypes); + auto newOp = scf::ExecuteRegionOp::create( + rewriter, op->getLoc(), newResultTypes, executeRegionOp.getNoInline()); newOp.getRegion().takeBody(executeRegionOp.getRegion()); // Bufferize every block. diff --git a/mlir/lib/Dialect/SCF/Transforms/StructuralTypeConversions.cpp b/mlir/lib/Dialect/SCF/Transforms/StructuralTypeConversions.cpp index b0c781c7aff11..9468927021495 100644 --- a/mlir/lib/Dialect/SCF/Transforms/StructuralTypeConversions.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/StructuralTypeConversions.cpp @@ -185,6 +185,30 @@ class ConvertWhileOpTypes }; } // namespace +namespace { +class ConvertIndexSwitchOpTypes + : public Structural1ToNConversionPattern { +public: + using Structural1ToNConversionPattern::Structural1ToNConversionPattern; + + std::optional + convertSourceOp(IndexSwitchOp op, OneToNOpAdaptor adaptor, + ConversionPatternRewriter &rewriter, + TypeRange dstTypes) const { + auto newOp = + IndexSwitchOp::create(rewriter, op.getLoc(), dstTypes, op.getArg(), + op.getCases(), op.getNumCases()); + + for (unsigned i = 0u; i < op.getNumRegions(); i++) { + auto &dstRegion = newOp.getRegion(i); + rewriter.inlineRegionBefore(op.getRegion(i), dstRegion, dstRegion.end()); + } + return newOp; + } +}; +} // namespace + namespace { // When the result types of a ForOp/IfOp get changed, the operand types of the // corresponding yield op need to be changed. In order to trigger the @@ -220,18 +244,19 @@ void mlir::scf::populateSCFStructuralTypeConversions( const TypeConverter &typeConverter, RewritePatternSet &patterns, PatternBenefit benefit) { patterns.add( - typeConverter, patterns.getContext(), benefit); + ConvertWhileOpTypes, ConvertConditionOpTypes, + ConvertIndexSwitchOpTypes>(typeConverter, patterns.getContext(), + benefit); } void mlir::scf::populateSCFStructuralTypeConversionTarget( const TypeConverter &typeConverter, ConversionTarget &target) { - target.addDynamicallyLegalOp( + target.addDynamicallyLegalOp( [&](Operation *op) { return typeConverter.isLegal(op->getResults()); }); target.addDynamicallyLegalOp([&](scf::YieldOp op) { // We only have conversions for a subset of ops that use scf.yield // terminators. - if (!isa(op->getParentOp())) + if (!isa(op->getParentOp())) return true; return typeConverter.isLegal(op.getOperands()); }); diff --git a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp index 89e2c57d709dd..36685d3affe03 100644 --- a/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp @@ -665,8 +665,8 @@ generateLoopNestUsingCustomOp( return failure(); } - if (failed(generateLoopTerminatorFn(rewriter, loc, tiledResults, - resultOffsets, resultSizes, + if (failed(generateLoopTerminatorFn(rewriter, loc, loopHeaderInfo->loops, + tiledResults, resultOffsets, resultSizes, loopHeaderInfo->destinationTensors))) { return failure(); } diff --git a/mlir/lib/Dialect/SCF/Utils/Utils.cpp b/mlir/lib/Dialect/SCF/Utils/Utils.cpp index e7bce98c607df..10eae8906ce31 100644 --- a/mlir/lib/Dialect/SCF/Utils/Utils.cpp +++ b/mlir/lib/Dialect/SCF/Utils/Utils.cpp @@ -671,9 +671,10 @@ LogicalResult mlir::loopUnrollJamByFactor(scf::ForOp forOp, return success(); } -Range emitNormalizedLoopBoundsForIndexType(RewriterBase &rewriter, Location loc, - OpFoldResult lb, OpFoldResult ub, - OpFoldResult step) { +static Range emitNormalizedLoopBoundsForIndexType(RewriterBase &rewriter, + Location loc, OpFoldResult lb, + OpFoldResult ub, + OpFoldResult step) { Range normalizedLoopBounds; normalizedLoopBounds.offset = rewriter.getIndexAttr(0); normalizedLoopBounds.stride = rewriter.getIndexAttr(1); diff --git a/mlir/lib/Dialect/Shard/Transforms/ShardingPropagation.cpp b/mlir/lib/Dialect/Shard/Transforms/ShardingPropagation.cpp index a647128cf0500..3bfbf373209e3 100644 --- a/mlir/lib/Dialect/Shard/Transforms/ShardingPropagation.cpp +++ b/mlir/lib/Dialect/Shard/Transforms/ShardingPropagation.cpp @@ -128,13 +128,13 @@ getOrderedPossibleShardingAttrs(ArrayRef mustShardings, curShardingAttrs.push_back(optionalShardings[i]); dfsCreateShardingAttrs(i + 1); curShardingAttrs.pop_back(); - curShardingAttrs.push_back({}); + curShardingAttrs.emplace_back(); dfsCreateShardingAttrs(i + 1); curShardingAttrs.pop_back(); return; } - curShardingAttrs.push_back({}); + curShardingAttrs.emplace_back(); dfsCreateShardingAttrs(i + 1); curShardingAttrs.pop_back(); }; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp index 659282a995123..f53950242e10c 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp @@ -344,7 +344,7 @@ void LoopEmitter::initSubSectIterator(OpBuilder &builder, Location loc) { // Reverse queue into a stack. std::reverse(remDepStack[t][lvl].begin(), remDepStack[t][lvl].end()); for (auto [loop, coeff] : dependentLvlMap[t][lvl]) - depRedOrder.emplace_back(std::make_tuple(loop, t, lvl)); + depRedOrder.emplace_back(loop, t, lvl); } if (depRedOrder.empty()) diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp index 8d636460c667e..caf80165fc640 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp @@ -1562,26 +1562,6 @@ OpFoldResult TransposeOp::fold(FoldAdaptor adaptor) { return getInput1(); } -OpFoldResult tosa::LogOp::fold(FoldAdaptor adaptor) { - auto input = getInput1(); - // Element-wise log(exp(x)) = x - if (auto op = input.getDefiningOp()) { - return op.getInput1(); - } - - return {}; -} - -OpFoldResult tosa::ExpOp::fold(FoldAdaptor adaptor) { - auto input = getInput1(); - // Element-wise exp(log(x)) = x - if (auto op = input.getDefiningOp()) { - return op.getInput1(); - } - - return {}; -} - OpFoldResult tosa::NegateOp::fold(FoldAdaptor adaptor) { // Element-wise negate(negate(x)) = x // iff all zero points are constant 0 diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 26ad641128b3d..332f1a0e5506f 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -1843,12 +1843,6 @@ LogicalResult MatMulOp::verify() { return emitOpError("expect quantized operands to have same widths, got ") << aQuantWidth << " and " << bQuantWidth; } - } else { - // non-quantized element types - if (aElementType != bElementType) { - return emitOpError("expect same element type for inputs a and b, got ") - << aElementType << " and " << bElementType; - } } // check a_zp and b_zp diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp index 91fea676ac44a..4fc7ce81d9821 100644 --- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp @@ -205,148 +205,142 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { constCheckers.emplace_back(checkConstantOperandNegate); } - bool levelCheckKernel(Operation *op, int32_t v, const StringRef checkDesc) { - if (v > tosaLevel.MAX_KERNEL) { - op->emitOpError() << "failed level check: " << checkDesc; - return false; - } - return true; + LogicalResult levelCheckKernel(Operation *op, int32_t v, + const StringRef checkDesc) { + if (v > tosaLevel.MAX_KERNEL) + return op->emitOpError() << "failed level check: " << checkDesc; + return success(); } - bool levelCheckStride(Operation *op, int32_t v, const StringRef checkDesc) { - if (v > tosaLevel.MAX_STRIDE) { - op->emitOpError() << "failed level check: " << checkDesc; - return false; - } - return true; + LogicalResult levelCheckStride(Operation *op, int32_t v, + const StringRef checkDesc) { + if (v > tosaLevel.MAX_STRIDE) + return op->emitOpError() << "failed level check: " << checkDesc; + return success(); } - bool levelCheckScale(Operation *op, int32_t v, const StringRef checkDesc) { - if (v > tosaLevel.MAX_SCALE) { - op->emitOpError() << "failed level check: " << checkDesc; - return false; - } - return true; + LogicalResult levelCheckScale(Operation *op, int32_t v, + const StringRef checkDesc) { + if (v > tosaLevel.MAX_SCALE) + return op->emitOpError() << "failed level check: " << checkDesc; + return success(); } - bool levelCheckListSize(Operation *op, int32_t v, const StringRef checkDesc) { - if (v > tosaLevel.MAX_TENSOR_LIST_SIZE) { - op->emitOpError() << "failed level check for MAX_TENSOR_LIST_SIZE: " - << checkDesc; - return false; - } - return true; + LogicalResult levelCheckListSize(Operation *op, int32_t v, + const StringRef checkDesc) { + if (v > tosaLevel.MAX_TENSOR_LIST_SIZE) + return op->emitOpError() + << "failed level check for MAX_TENSOR_LIST_SIZE: " << checkDesc; + return success(); } // Perform the Level Rank check on the tensor type. - bool levelCheckRank(Operation *op, const Type typeToCheck, - const StringRef operandOrResult, int32_t highest_rank) { + LogicalResult levelCheckRank(Operation *op, const Type typeToCheck, + const StringRef operandOrResult, + int32_t highest_rank) { if (ShapedType type = dyn_cast(typeToCheck)) { - if (!type.hasRank()) { - op->emitOpError() << "failed level check: unranked tensor"; - return false; - } - if (type.getRank() > highest_rank) { - op->emitOpError() << "failed level check: " << operandOrResult - << " rank(shape) <= MAX_RANK"; - return false; - } + if (!type.hasRank()) + return op->emitOpError() << "failed level check: unranked tensor"; + if (type.getRank() > highest_rank) + return op->emitOpError() << "failed level check: " << operandOrResult + << " rank(shape) <= MAX_RANK"; } - return true; + return success(); } // Perform the Level Rank check on the tensor value. - bool levelCheckRank(Operation *op, const Value &v, - const StringRef operandOrResult, int32_t highest_rank) { + LogicalResult levelCheckRank(Operation *op, const Value &v, + const StringRef operandOrResult, + int32_t highest_rank) { return levelCheckRank(op, v.getType(), operandOrResult, highest_rank); } // Perform the Level tensor size check on the tensor type. - bool levelCheckSize(Operation *op, const Type &typeToCheck, - const StringRef operandOrResult); + LogicalResult levelCheckSize(Operation *op, const Type &typeToCheck, + const StringRef operandOrResult); // Perform the Level tensor size check on the tensor value. - bool levelCheckSize(Operation *op, const Value &v, - const StringRef operandOrResult) { + LogicalResult levelCheckSize(Operation *op, const Value &v, + const StringRef operandOrResult) { return levelCheckSize(op, v.getType(), operandOrResult); } // Level check sizes of all operands and results of the operation. template - bool levelCheckSizes(T tosaOp) { + LogicalResult levelCheckSizes(T tosaOp) { auto op = tosaOp.getOperation(); for (auto v : op->getOperands()) { - if (!levelCheckSize(op, v, "operand")) - return false; + if (failed(levelCheckSize(op, v, "operand"))) + return failure(); } for (auto v : op->getResults()) { - if (!levelCheckSize(op, v, "result")) - return false; + if (failed(levelCheckSize(op, v, "result"))) + return failure(); } - return true; + return success(); } // Level check ranks of all operands, attribute and results of the operation. template - bool levelCheckRanks(T tosaOp) { + LogicalResult levelCheckRanks(T tosaOp) { auto op = tosaOp.getOperation(); for (auto v : op->getOperands()) { - if (!levelCheckRank(op, v, "operand", tosaLevel.MAX_RANK)) - return false; + if (failed(levelCheckRank(op, v, "operand", tosaLevel.MAX_RANK))) + return failure(); } for (auto v : op->getResults()) { - if (!levelCheckRank(op, v, "result", tosaLevel.MAX_RANK)) - return false; + if (failed(levelCheckRank(op, v, "result", tosaLevel.MAX_RANK))) + return failure(); } - return true; + return success(); } // Level check ranks and sizes. - bool levelCheckRanksAndSizes(Operation *op); + LogicalResult levelCheckRanksAndSizes(Operation *op); // Pool Op: level check kernel/stride/pad values template - bool levelCheckPool(Operation *op) { + LogicalResult levelCheckPool(Operation *op) { if (auto poolOp = dyn_cast(op)) { for (auto k : poolOp.getKernel()) { - if (!levelCheckKernel(op, k, "kernel <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, k, "kernel <= MAX_KERNEL"))) { + return failure(); } } for (auto s : poolOp.getStride()) { - if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) { - return false; + if (failed(levelCheckStride(op, s, "stride <= MAX_STRIDE"))) { + return failure(); } } for (auto p : poolOp.getPad()) { - if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, p, "pad <= MAX_KERNEL"))) { + return failure(); } } } - return true; + return success(); } // Conv Op: level check dilation/stride/pad values template - bool levelCheckConv(Operation *op) { + LogicalResult levelCheckConv(Operation *op) { if (auto convOp = dyn_cast(op)) { for (auto k : convOp.getDilation()) { - if (!levelCheckKernel(op, k, "dilation <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, k, "dilation <= MAX_KERNEL"))) { + return failure(); } } for (auto p : convOp.getPad()) { - if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, p, "pad <= MAX_KERNEL"))) { + return failure(); } } for (auto s : convOp.getStride()) { - if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) { - return false; + if (failed(levelCheckStride(op, s, "stride <= MAX_STRIDE"))) { + return failure(); } } auto dilation = convOp.getDilation(); @@ -356,100 +350,100 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { if (isa(op)) { assert(shape.size() == 4); assert(dilation.size() == 2); - if (!levelCheckKernel(op, dilation[0] * shape[1], - "dilation_y * KH <= MAX_KERNEL)") || - !levelCheckKernel(op, dilation[1] * shape[2], - "dilation_x * KW <= MAX_KERNEL)")) - return false; + if (failed(levelCheckKernel(op, dilation[0] * shape[1], + "dilation_y * KH <= MAX_KERNEL)")) || + failed(levelCheckKernel(op, dilation[1] * shape[2], + "dilation_x * KW <= MAX_KERNEL)"))) + return failure(); } else if (isa(op)) { assert(shape.size() == 5); assert(dilation.size() == 3); - if (!levelCheckKernel(op, dilation[0] * shape[1], - "dilation_d * KD <= MAX_KERNEL)") || - !levelCheckKernel(op, dilation[1] * shape[2], - "dilation_y * KH <= MAX_KERNEL)") || - !levelCheckKernel(op, dilation[2] * shape[3], - "dilation_x * KW <= MAX_KERNEL)")) - return false; + if (failed(levelCheckKernel(op, dilation[0] * shape[1], + "dilation_d * KD <= MAX_KERNEL)")) || + failed(levelCheckKernel(op, dilation[1] * shape[2], + "dilation_y * KH <= MAX_KERNEL)")) || + failed(levelCheckKernel(op, dilation[2] * shape[3], + "dilation_x * KW <= MAX_KERNEL)"))) + return failure(); } else if (isa(op)) { assert(shape.size() == 4); assert(dilation.size() == 2); - if (!levelCheckKernel(op, dilation[0] * shape[0], - "dilation_y * KH <= MAX_KERNEL)") || - !levelCheckKernel(op, dilation[1] * shape[1], - "dilation_x * KW <= MAX_KERNEL)")) - return false; + if (failed(levelCheckKernel(op, dilation[0] * shape[0], + "dilation_y * KH <= MAX_KERNEL)")) || + failed(levelCheckKernel(op, dilation[1] * shape[1], + "dilation_x * KW <= MAX_KERNEL)"))) + return failure(); } } } - return true; + return success(); } // FFT op: level check H, W in input shape [N,H,W] template - bool levelCheckFFT(Operation *op) { + LogicalResult levelCheckFFT(Operation *op) { if (isa(op)) { for (auto v : op->getOperands()) { if (ShapedType type = dyn_cast(v.getType())) { auto shape = type.getShape(); assert(shape.size() == 3); - if (!levelCheckKernel(op, shape[1], "H <= MAX_KERNEL") || - !levelCheckKernel(op, shape[2], "W <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, shape[1], "H <= MAX_KERNEL")) || + failed(levelCheckKernel(op, shape[2], "W <= MAX_KERNEL"))) { + return failure(); } } } } - return true; + return success(); } // TransposeConv2d op: level check kH/kW, outpad, and stride - bool levelCheckTransposeConv2d(Operation *op) { + LogicalResult levelCheckTransposeConv2d(Operation *op) { if (auto transpose = dyn_cast(op)) { if (ShapedType filterType = dyn_cast(transpose.getWeight().getType())) { auto shape = filterType.getShape(); assert(shape.size() == 4); // level check kernel sizes for kH and KW - if (!levelCheckKernel(op, shape[1], "KH <= MAX_KERNEL") || - !levelCheckKernel(op, shape[2], "KW <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, shape[1], "KH <= MAX_KERNEL")) || + failed(levelCheckKernel(op, shape[2], "KW <= MAX_KERNEL"))) { + return failure(); } } for (auto p : transpose.getOutPad()) { - if (!levelCheckKernel(op, p, "pad <= MAX_KERNEL")) { - return false; + if (failed(levelCheckKernel(op, p, "pad <= MAX_KERNEL"))) { + return failure(); } } for (auto s : transpose.getStride()) { - if (!levelCheckStride(op, s, "stride <= MAX_STRIDE")) { - return false; + if (failed(levelCheckStride(op, s, "stride <= MAX_STRIDE"))) { + return failure(); } } } - return true; + return success(); } // Resize op: level check max scales - bool levelCheckResize(Operation *op) { + LogicalResult levelCheckResize(Operation *op) { if (auto resize = dyn_cast(op)) { SmallVector scale; if (!tosa::getConstShapeValues(resize.getScale().getDefiningOp(), scale)) { - return false; + return failure(); } const int64_t scaleYN = scale[0]; const int64_t scaleYD = scale[1]; const int64_t scaleXN = scale[2]; const int64_t scaleXD = scale[3]; - if (!levelCheckScale(op, scaleYN / scaleYD, - "scale_y_n/scale_y_d <= MAX_SCALE") || - !levelCheckScale(op, scaleXN / scaleXD, - "scale_x_n/scale_x_d <= MAX_SCALE")) { - return false; + if (failed(levelCheckScale(op, scaleYN / scaleYD, + "scale_y_n/scale_y_d <= MAX_SCALE")) || + failed(levelCheckScale(op, scaleXN / scaleXD, + "scale_x_n/scale_x_d <= MAX_SCALE"))) { + return failure(); } } - return true; + return success(); } // Recursively perform a bottom-up search to determine the maximum nesting @@ -468,62 +462,65 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { getMaxNestedDepth(op, depth); } - bool levelCheckMaxNesting(Operation *op) { + LogicalResult levelCheckMaxNesting(Operation *op) { int32_t maxNestedDepth = 0; getMaxNestedDepth(op, maxNestedDepth); if (maxNestedDepth >= tosaLevel.MAX_NESTING) { op->emitOpError() << "failed level check: " << maxNestedDepth << " >= MAX_NESTING"; - return false; + return failure(); } - return true; + return success(); } - bool levelCheckListSize(Operation *op) { + LogicalResult levelCheckListSize(Operation *op) { if (auto concat = dyn_cast(op)) { return levelCheckListSize(op, concat.getInput1().size(), "input1"); } if (auto custom = dyn_cast(op)) { - if (!levelCheckListSize(op, custom.getInputList().size(), "input_list") || - !levelCheckListSize(op, custom.getOutputList().size(), - "output_list")) { - return false; + if (failed(levelCheckListSize(op, custom.getInputList().size(), + "input_list")) || + failed(levelCheckListSize(op, custom.getOutputList().size(), + "output_list"))) { + return failure(); } } if (auto condIf = dyn_cast(op)) { - if (!levelCheckListSize(op, condIf.getInputList().size(), "inputs") || - !levelCheckListSize(op, condIf.getOutputList().size(), "outputs")) { - return false; + if (failed( + levelCheckListSize(op, condIf.getInputList().size(), "inputs")) || + failed(levelCheckListSize(op, condIf.getOutputList().size(), + "outputs"))) { + return failure(); } } if (auto w = dyn_cast(op)) { - if (!levelCheckListSize(op, w.getInputList().size(), "inputs") || - !levelCheckListSize(op, w.getOutputList().size(), "outputs")) { - return false; + if (failed(levelCheckListSize(op, w.getInputList().size(), "inputs")) || + failed(levelCheckListSize(op, w.getOutputList().size(), "outputs"))) { + return failure(); } } - return true; + return success(); } - bool attributeCheckRescale(Operation *op) { + LogicalResult attributeCheckRescale(Operation *op) { if (auto rescale = dyn_cast(op)) { if (rescale.getRoundingMode() == RoundingMode::DOUBLE_ROUND && !targetEnv.allows(Extension::doubleround)) { op->emitOpError() << "failed attribute check: rounding_mode = DOUBLE_ROUND " << "requires extension [doubleround]"; - return false; + return failure(); } if (rescale.getRoundingMode() == RoundingMode::INEXACT_ROUND && !targetEnv.allows(Extension::inexactround)) { op->emitOpError() << "failed attribute check: rounding_mode = INEXACT_ROUND " << "requires extension [inexactround]"; - return false; + return failure(); } } - return true; + return success(); } // configure profile and level values from pass options profileName and @@ -563,8 +560,8 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { } } - bool CheckVariable(Operation *op); - bool CheckVariableReadOrWrite(Operation *op); + LogicalResult CheckVariable(Operation *op); + LogicalResult CheckVariableReadOrWrite(Operation *op); bool isValidElementType(Type type, const bool allowUnsigned = false); SmallVector< @@ -577,62 +574,66 @@ struct TosaValidation : public tosa::impl::TosaValidationBase { }; template <> -bool TosaValidation::levelCheckRanks(tosa::ArgMaxOp tosaOp) { +LogicalResult TosaValidation::levelCheckRanks(tosa::ArgMaxOp tosaOp) { auto *op = tosaOp.getOperation(); - if (!levelCheckRank(op, tosaOp.getInput(), "operand", tosaLevel.MAX_RANK)) - return false; + if (failed( + levelCheckRank(op, tosaOp.getInput(), "operand", tosaLevel.MAX_RANK))) + return failure(); // rank(output) = rank(input) - 1 - if (!levelCheckRank(op, tosaOp.getOutput(), "result", tosaLevel.MAX_RANK - 1)) - return false; + if (failed(levelCheckRank(op, tosaOp.getOutput(), "result", + tosaLevel.MAX_RANK - 1))) + return failure(); - return true; + return success(); } template <> -bool TosaValidation::levelCheckRanks(tosa::IfOp tosaOp) { +LogicalResult TosaValidation::levelCheckRanks(tosa::IfOp tosaOp) { auto *op = tosaOp.getOperation(); // Only the condition input has rank limitation. - if (!levelCheckRank(op, tosaOp.getCondition(), "operand", tosaLevel.MAX_RANK)) - return false; + if (failed(levelCheckRank(op, tosaOp.getCondition(), "operand", + tosaLevel.MAX_RANK))) + return failure(); - return true; + return success(); } template <> -bool TosaValidation::levelCheckRanks(tosa::VariableOp tosaOp) { +LogicalResult TosaValidation::levelCheckRanks(tosa::VariableOp tosaOp) { auto *op = tosaOp.getOperation(); auto variableType = getVariableType(tosaOp); - if (!levelCheckRank(op, variableType, "variable type", tosaLevel.MAX_RANK)) - return false; + if (failed(levelCheckRank(op, variableType, "variable type", + tosaLevel.MAX_RANK))) + return failure(); - return true; + return success(); } template <> -bool TosaValidation::levelCheckSizes(tosa::VariableOp tosaOp) { +LogicalResult TosaValidation::levelCheckSizes(tosa::VariableOp tosaOp) { auto *op = tosaOp.getOperation(); auto variableType = getVariableType(tosaOp); - if (!levelCheckSize(op, variableType, "variable type")) - return false; + if (failed(levelCheckSize(op, variableType, "variable type"))) + return failure(); - return true; + return success(); } -bool TosaValidation::levelCheckRanksAndSizes(Operation *op) { +LogicalResult TosaValidation::levelCheckRanksAndSizes(Operation *op) { #define CHECK_RANKS_AND_SIZES(tosaOp) \ if (isa(op)) { \ - if (!levelCheckRanks(cast(op))) \ - return false; \ - if (!levelCheckSizes(cast(op))) \ - return false; \ + if (failed(levelCheckRanks(cast(op)))) \ + return failure(); \ + if (failed(levelCheckSizes(cast(op)))) \ + return failure(); \ } #define CHECK_SIZES(tosaOp) \ if (isa(op)) { \ - if (!levelCheckSizes(cast(op))) \ - return false; \ + if (failed(levelCheckSizes(cast(op)))) \ + return failure(); \ } // Tensor Operators @@ -735,24 +736,21 @@ bool TosaValidation::levelCheckRanksAndSizes(Operation *op) { #undef CHECK_RANKS_AND_SIZES #undef CHECK_SIZES - return true; + return success(); } // Perform the Level tensor size check on the tensor type. -bool TosaValidation::levelCheckSize(Operation *op, const Type &typeToCheck, - const StringRef operandOrResult) { +LogicalResult TosaValidation::levelCheckSize(Operation *op, + const Type &typeToCheck, + const StringRef operandOrResult) { if (ShapedType type = dyn_cast(typeToCheck)) { - if (!type.hasRank()) { - op->emitOpError() << "failed level check: unranked tensor"; - return false; - } + if (!type.hasRank()) + return op->emitOpError() << "failed level check: unranked tensor"; auto shape = type.getShape(); for (auto dim : shape) { - if (mlir::ShapedType::isDynamic(dim)) { - op->emitOpError() << "failed level check: " << operandOrResult - << " shape dimension cannot be dynamic"; - return false; - } + if (mlir::ShapedType::isDynamic(dim)) + return op->emitOpError() << "failed level check: " << operandOrResult + << " shape dimension cannot be dynamic"; } int64_t element_bits = type.getElementTypeBitWidth(); @@ -765,14 +763,12 @@ bool TosaValidation::levelCheckSize(Operation *op, const Type &typeToCheck, // For each tensor, the number of tensor elements multiplied by the // element size in bytes must be representable as a tensor_size_t. const int64_t max_size = (INT64_C(1) << tosaLevel.MAX_LOG2_SIZE) - 1; - if (size > max_size) { - op->emitOpError() - << "failed level check: " << operandOrResult - << " tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)"; - return false; - } + if (size > max_size) + return op->emitOpError() + << "failed level check: " << operandOrResult + << " tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)"; } - return true; + return success(); } LogicalResult TosaValidation::applyLevelCheck(Operation *op) { @@ -782,28 +778,28 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) { } // check rank and sizes early so later checks can assume shaped operands - if (!levelCheckRanksAndSizes(op)) + if (failed(levelCheckRanksAndSizes(op))) return failure(); // additional level checks from spec 0.70 - if (!levelCheckPool(op) || - !levelCheckConv(op) || - !levelCheckConv(op) || - !levelCheckConv(op) || - !levelCheckFFT(op) || - !levelCheckPool(op) || - !levelCheckFFT(op) || !levelCheckTransposeConv2d(op) || - !levelCheckResize(op)) { + if (failed(levelCheckPool(op)) || + failed(levelCheckConv(op)) || + failed(levelCheckConv(op)) || + failed(levelCheckConv(op)) || + failed(levelCheckFFT(op)) || + failed(levelCheckPool(op)) || + failed(levelCheckFFT(op)) || + failed(levelCheckTransposeConv2d(op)) || failed(levelCheckResize(op))) { return failure(); } // level check MAX_TENSOR_LIST_SIZE - if (!levelCheckListSize(op)) { + if (failed(levelCheckListSize(op))) { return failure(); } if (isa(op) || isa(op)) { - if (!levelCheckMaxNesting(op)) { + if (failed(levelCheckMaxNesting(op))) { return failure(); } } @@ -812,7 +808,7 @@ LogicalResult TosaValidation::applyLevelCheck(Operation *op) { } LogicalResult TosaValidation::applyAttributeCheck(Operation *op) { - if (!attributeCheckRescale(op)) + if (failed(attributeCheckRescale(op))) return failure(); return success(); } @@ -823,14 +819,12 @@ inline bool CompatibleTypes(const mlir::Type &type, return type == declaredType; } -bool TosaValidation::CheckVariable(Operation *op) { +LogicalResult TosaValidation::CheckVariable(Operation *op) { if (auto variableOp = dyn_cast(op)) { mlir::StringAttr nameAttr = variableOp.getNameAttr(); - if (variablesMap.count(nameAttr)) { - op->emitOpError() << "name has already been declared"; - return false; - } + if (variablesMap.count(nameAttr)) + return op->emitOpError() << "name has already been declared"; auto elementType = variableOp.getType(); DenseIntElementsAttr varShapeAttr = variableOp.getVarShape(); @@ -841,51 +835,44 @@ bool TosaValidation::CheckVariable(Operation *op) { variablesMap[nameAttr] = variableType; } - return true; + return success(); } -bool TosaValidation::CheckVariableReadOrWrite(Operation *op) { +LogicalResult TosaValidation::CheckVariableReadOrWrite(Operation *op) { if (isa(op) || isa(op)) { mlir::StringAttr nameAttr = cast(op->getAttr("name")); - if (!variablesMap.count(nameAttr)) { - op->emitOpError() << "name has not been declared"; - return false; - } + if (!variablesMap.count(nameAttr)) + return op->emitOpError() << "name has not been declared"; auto varType = variablesMap[nameAttr]; for (auto v : op->getOperands()) { auto type = v.getType(); - if (!CompatibleTypes(type, varType)) { - op->emitOpError() << "operand type does not equal variable type"; - return false; - } + if (!CompatibleTypes(type, varType)) + return op->emitOpError() << "operand type does not equal variable type"; } for (auto v : op->getResults()) { auto type = v.getType(); - if (!CompatibleTypes(type, varType)) { - op->emitOpError() << "result type does not equal variable type"; - return false; - } + if (!CompatibleTypes(type, varType)) + return op->emitOpError() << "result type does not equal variable type"; } } - return true; + return success(); } LogicalResult TosaValidation::applyVariableCheck(Operation *op) { - if (!CheckVariable(op) || !CheckVariableReadOrWrite(op)) { + if (failed(CheckVariable(op)) || failed(CheckVariableReadOrWrite(op))) return failure(); - } return success(); } -bool checkErrorIfResize(Operation *op) { +LogicalResult checkErrorIfResize(Operation *op) { auto resize = dyn_cast(op); if (!resize) - return true; + return success(); const Value input = resize.getInput(); const Value output = resize.getOutput(); @@ -894,10 +881,8 @@ bool checkErrorIfResize(Operation *op) { const RankedTensorType outputType = llvm::dyn_cast(output.getType()); - if (!inputType || !outputType) { - op->emitOpError("expect ranked input/output tensor"); - return false; - } + if (!inputType || !outputType) + return op->emitOpError("expect ranked input/output tensor"); // Ensure the image size is supported by GPU APIs and that for integer // implementations, position * stride does not overflow int32_t. @@ -906,17 +891,15 @@ bool checkErrorIfResize(Operation *op) { outputType.getDimSize(1), outputType.getDimSize(2), inputType.getDimSize(1), inputType.getDimSize(2)}; const int64_t *maxDim = llvm::max_element(sizes); - if (maxDim != sizes.end() && *maxDim >= 16384) { - op->emitOpError("expect input/output height/width dims to be < 16384, ") - << "got [OH, OW, IH, IW] = " << sizes; - return false; - } + if (maxDim != sizes.end() && *maxDim >= 16384) + return op->emitOpError( + "expect input/output height/width dims to be < 16384, ") + << "got [OH, OW, IH, IW] = " << sizes; } SmallVector scale; - if (!tosa::getConstShapeValues(resize.getScale().getDefiningOp(), scale)) { - return false; - } + if (!tosa::getConstShapeValues(resize.getScale().getDefiningOp(), scale)) + return failure(); const int64_t scaleYN = scale[0]; const int64_t scaleYD = scale[1]; @@ -924,57 +907,45 @@ bool checkErrorIfResize(Operation *op) { const int64_t scaleXD = scale[3]; // Ensure scale values don't overflow int32 accumulator - if (scaleYN > (1 << 11) || scaleXN > (1 << 11)) { - op->emitOpError("expect all scale numerator values to be <= (1 << 11), " - "got scale_y_n=") - << scaleYN << ", scale_x_n=" << scaleXN; - return false; - } + if (scaleYN > (1 << 11) || scaleXN > (1 << 11)) + return op->emitOpError( + "expect all scale numerator values to be <= (1 << 11), " + "got scale_y_n=") + << scaleYN << ", scale_x_n=" << scaleXN; - if (scaleYD >= 16 * scaleYN || scaleXD >= 16 * scaleXN) { - op->emitOpError("expect a downscale ratio larger than 1/16, got y=") - << scaleYN << "/" << scaleYD << ", x=" << scaleXN << "/" << scaleXD; - return false; - } + if (scaleYD >= 16 * scaleYN || scaleXD >= 16 * scaleXN) + return op->emitOpError("expect a downscale ratio larger than 1/16, got y=") + << scaleYN << "/" << scaleYD << ", x=" << scaleXN << "/" << scaleXD; SmallVector offset; SmallVector border; if (!tosa::getConstShapeValues(resize.getOffset().getDefiningOp(), offset) || - !tosa::getConstShapeValues(resize.getBorder().getDefiningOp(), border)) { - return false; - } + !tosa::getConstShapeValues(resize.getBorder().getDefiningOp(), border)) + return failure(); const int64_t offsetY = offset[0]; const int64_t offsetX = offset[1]; // Set a consistent lower limit of 1/16 downscale to simplify // implementations - if (offsetY < -scaleYN || offsetY >= 16 * scaleYN) { - op->emitOpError( - "expect offsetY / scaleYNumerator to be in range [-1, 16), got ") - << offsetY << "/" << scaleYN; - return false; - } - if (offsetX < -scaleXN || offsetX >= 16 * scaleXN) { - op->emitOpError( - "expect offsetX / scaleXNumerator to be in range [-1, 16), got ") - << offsetX << "/" << scaleXN; - return false; - } + if (offsetY < -scaleYN || offsetY >= 16 * scaleYN) + return op->emitOpError( + "expect offsetY / scaleYNumerator to be in range [-1, 16), got ") + << offsetY << "/" << scaleYN; + if (offsetX < -scaleXN || offsetX >= 16 * scaleXN) + return op->emitOpError( + "expect offsetX / scaleXNumerator to be in range [-1, 16), got ") + << offsetX << "/" << scaleXN; const int64_t borderY = border[0]; const int64_t borderX = border[1]; - if (borderY < -16 * scaleYN || borderY >= scaleYN) { - op->emitOpError( - "expect borderY / scaleYNumerator to be in range [-16, 1), got ") - << borderY << "/" << scaleYN; - return false; - } - if (borderX < -16 * scaleXN || borderX >= scaleXN) { - op->emitOpError( - "expect borderX / scaleXNumerator to be in range [-16, 1), got ") - << borderX << "/" << scaleXN; - return false; - } + if (borderY < -16 * scaleYN || borderY >= scaleYN) + return op->emitOpError( + "expect borderY / scaleYNumerator to be in range [-16, 1), got ") + << borderY << "/" << scaleYN; + if (borderX < -16 * scaleXN || borderX >= scaleXN) + return op->emitOpError( + "expect borderX / scaleXNumerator to be in range [-16, 1), got ") + << borderX << "/" << scaleXN; // The following section of code is mostly duplicated with ResizeOp::verify(). // @@ -1001,81 +972,72 @@ bool checkErrorIfResize(Operation *op) { if (ih != ShapedType::kDynamic) { const std::optional calculatedOutHeightMinusOne = idivCheck((ih - 1) * scaleYN - offsetY + borderY, scaleYD); - if (!calculatedOutHeightMinusOne.has_value()) { - op->emitOpError("expected (input_height - 1) * scale_y_n - offset_y + " - "border_y ") - << "to be wholly divisible by scale_y_d, got ((" << ih << " - 1) * " - << scaleYN << " - " << offsetY << " + " << borderY << ") / " - << scaleYD; - return false; - } + if (!calculatedOutHeightMinusOne.has_value()) + return op->emitOpError( + "expected (input_height - 1) * scale_y_n - offset_y + " + "border_y ") + << "to be wholly divisible by scale_y_d, got ((" << ih + << " - 1) * " << scaleYN << " - " << offsetY << " + " << borderY + << ") / " << scaleYD; const int64_t calculatedOutHeight = calculatedOutHeightMinusOne.value() + 1; - if (oh != ShapedType::kDynamic && calculatedOutHeight != oh) { - op->emitOpError("calculated output height did not match expected: ") - << "calculated=" << calculatedOutHeight << ", expected=" << oh; - return false; - } + if (oh != ShapedType::kDynamic && calculatedOutHeight != oh) + return op->emitOpError( + "calculated output height did not match expected: ") + << "calculated=" << calculatedOutHeight << ", expected=" << oh; } if (iw != ShapedType::kDynamic) { const std::optional calculatedOutWidthMinusOne = idivCheck((iw - 1) * scaleXN - offsetX + borderX, scaleXD); - if (!calculatedOutWidthMinusOne.has_value()) { - op->emitOpError("expected (input_width - 1) * scale_x_n - offset_x + " - "border_x ") - << "to be wholly divisible by scale_x_d, got ((" << iw << " - 1) * " - << scaleXN << " - " << offsetX << " + " << borderX << ") / " - << scaleXD; - return false; - } + if (!calculatedOutWidthMinusOne.has_value()) + return op->emitOpError( + "expected (input_width - 1) * scale_x_n - offset_x + " + "border_x ") + << "to be wholly divisible by scale_x_d, got ((" << iw + << " - 1) * " << scaleXN << " - " << offsetX << " + " << borderX + << ") / " << scaleXD; const int64_t calculatedOutWidth = calculatedOutWidthMinusOne.value() + 1; - if (ow != ShapedType::kDynamic && calculatedOutWidth != ow) { - op->emitOpError("calculated output width did not match expected: ") - << "calculated=" << calculatedOutWidth << ", expected=" << ow; - return false; - } + if (ow != ShapedType::kDynamic && calculatedOutWidth != ow) + return op->emitOpError("calculated output width did not match expected: ") + << "calculated=" << calculatedOutWidth << ", expected=" << ow; } - return true; + return success(); } -bool checkErrorIfMul(Operation *op) { +LogicalResult checkErrorIfMul(Operation *op) { auto mul = dyn_cast(op); if (!mul) - return true; + return success(); // REQUIRE(0 <= shift && shift <= 63); // REQUIRE(is_same() || shift == 0); ElementsAttr shift_elem; - if (!matchPattern(mul.getShift(), m_Constant(&shift_elem))) { - return true; - } + if (!matchPattern(mul.getShift(), m_Constant(&shift_elem))) + return success(); int32_t shift = shift_elem.getValues()[0].getInt(); auto inputElemType = getElementTypeOrSelf(mul.getInput1()); if (inputElemType.isInteger(32)) { // 0 <= shift <= 63 for int32_t type - if (shift < 0 || shift > 63) { - op->emitOpError() << "requires 0 <= shift && shift <= 63, but got: " - << shift; - return false; - } + if (shift < 0 || shift > 63) + return op->emitOpError() + << "requires 0 <= shift && shift <= 63, but got: " << shift; } else { // shift must be 0 for all other types - if (shift != 0) { - op->emitOpError() << "requires shift = 0 for all input data types that " - "are not int32_t, but got: " - << shift; - return false; - } + if (shift != 0) + return op->emitOpError() + << "requires shift = 0 for all input data types that " + "are not int32_t, but got: " + << shift; } - return true; + return success(); } -bool checkErrorIfTable(Operation *op) { +LogicalResult checkErrorIfTable(Operation *op) { auto table = dyn_cast(op); if (!table) - return true; + return success(); // REQUIRE(length(table) == TABLE_SIZE) where TABLE_SIZE is 256 or 513 const auto inputElemType = getElementTypeOrSelf(table.getInput1().getType()); @@ -1084,26 +1046,24 @@ bool checkErrorIfTable(Operation *op) { const ShapeAdaptor tableShape(table.getTable().getType()); if (tableShape.hasStaticShape()) { const auto numElements = tableShape.getNumElements(); - if (numElements != tableSize) { - op->emitOpError() << "requires table size of " << tableSize << ", got " - << numElements; - return false; - } + if (numElements != tableSize) + return op->emitOpError() << "requires table size of " << tableSize + << ", got " << numElements; } - return true; + return success(); } -bool checkErrorIfRescale(Operation *op) { +LogicalResult checkErrorIfRescale(Operation *op) { auto rescale = dyn_cast(op); if (!rescale) - return true; + return success(); auto inputType = llvm::dyn_cast(rescale.getInput().getType()); auto outputType = llvm::dyn_cast(rescale.getOutput().getType()); if (!inputType || !outputType || !inputType.getElementType().isInteger() || !outputType.getElementType().isInteger()) - return true; + return success(); auto inElemType = inputType.getElementType(); auto outElemType = outputType.getElementType(); @@ -1117,81 +1077,65 @@ bool checkErrorIfRescale(Operation *op) { auto roundingMode = rescale.getRoundingMode(); // ERROR_IF(scale32 && is_same()) - if (scale32 && inWidth == 48) { - op->emitOpError() << "scale32 is not allowed with 48-bit input."; - return false; - } + if (scale32 && inWidth == 48) + return op->emitOpError() << "scale32 is not allowed with 48-bit input."; // ERROR_IF(!scale32 && (rounding_mode == DOUBLE_ROUND)) - if (!scale32 && roundingMode == RoundingMode::DOUBLE_ROUND) { - op->emitOpError() << "DOUBLE_ROUND is only allowed with scale32=true."; - return false; - } + if (!scale32 && roundingMode == RoundingMode::DOUBLE_ROUND) + return op->emitOpError() + << "DOUBLE_ROUND is only allowed with scale32=true."; // ERROR_IF(input_unsigned && output_unsigned) - if (inputUnsigned && outputUnsigned) { - op->emitOpError() << "input and output cannot be both unsigned."; - return false; - } + if (inputUnsigned && outputUnsigned) + return op->emitOpError() << "input and output cannot be both unsigned."; // ERROR_IF(is_same() && input_unsigned) - if (outWidth == 32 && inputUnsigned) { - op->emitOpError() << "i32 output type is not allowed with unsigned input."; - return false; - } + if (outWidth == 32 && inputUnsigned) + return op->emitOpError() + << "i32 output type is not allowed with unsigned input."; // ERROR_IF(is_same() && output_unsigned) - if (inWidth == 32 && outputUnsigned) { - op->emitOpError() << "i32 input type is not allowed with unsigned output."; - return false; - } + if (inWidth == 32 && outputUnsigned) + return op->emitOpError() + << "i32 input type is not allowed with unsigned output."; // ERROR_IF(is_same() && output_unsigned) - if (inWidth == 48 && outputUnsigned) { - op->emitOpError() << "i48 input type is not allowed with unsigned output."; - return false; - } + if (inWidth == 48 && outputUnsigned) + return op->emitOpError() + << "i48 input type is not allowed with unsigned output."; // ERROR_IF(is_same && input_unsigned) - if (inWidth == 48 && inputUnsigned) { - op->emitOpError() << "i48 input type cannot be unsigned."; - return false; - } + if (inWidth == 48 && inputUnsigned) + return op->emitOpError() << "i48 input type cannot be unsigned."; // ERROR_IF(is_same && input_unsigned) - if (inWidth == 32 && inputUnsigned) { - op->emitOpError() << "i32 input type cannot be unsigned."; - return false; - } + if (inWidth == 32 && inputUnsigned) + return op->emitOpError() << "i32 input type cannot be unsigned."; // ERROR_IF(is_same && output_unsigned) - if (outWidth == 32 && outputUnsigned) { - op->emitOpError() << "i32 output type cannot be unsigned."; - return false; - } + if (outWidth == 32 && outputUnsigned) + return op->emitOpError() << "i32 output type cannot be unsigned."; - return true; + return success(); } -bool checkErrorIfPad(Operation *op) { +LogicalResult checkErrorIfPad(Operation *op) { auto pad = dyn_cast(op); if (!pad) - return true; + return success(); DenseIntElementsAttr paddingAttr; if (!matchPattern(pad.getPadding(), m_Constant(&paddingAttr))) // Pad verifier will catch this - return true; + return success(); for (const APInt &val : paddingAttr.getValues()) { - if (val.getSExtValue() < 0) { - op->emitOpError() << "padding value must all be non-negative, got " - << val.getSExtValue(); - return false; - } + if (val.getSExtValue() < 0) + return op->emitOpError() << "padding value must all be non-negative, got " + << val.getSExtValue(); } - return true; + return success(); } static bool isOpIsolatedWithinRegion(Operation *op, Region *region) { @@ -1201,7 +1145,7 @@ static bool isOpIsolatedWithinRegion(Operation *op, Region *region) { }); } -static bool isRegionIsolatedFromAbove(Region ®ionToCheck) { +static LogicalResult isRegionIsolatedFromAbove(Region ®ionToCheck) { bool noLiveInValue = true; regionToCheck.walk([&noLiveInValue, ®ionToCheck](Operation *op) { if (!isOpIsolatedWithinRegion(op, ®ionToCheck)) { @@ -1210,23 +1154,22 @@ static bool isRegionIsolatedFromAbove(Region ®ionToCheck) { } return WalkResult::advance(); }); - return noLiveInValue; + return noLiveInValue ? success() : failure(); } LogicalResult checkIsolatedRegion(Operation *op, Region ®ionToCheck, StringRef regionName) { - if (isRegionIsolatedFromAbove(regionToCheck)) + if (succeeded(isRegionIsolatedFromAbove(regionToCheck))) return success(); - op->emitOpError() - << "is not conformant to the TOSA specification. It requires the '" - << regionName << "' region is isolated from above.\n"; - return failure(); + return op->emitOpError() + << "is not conformant to the TOSA specification. It requires the '" + << regionName << "' region is isolated from above.\n"; } -bool checkErrorIfCondIf(Operation *op) { +LogicalResult checkErrorIfCondIf(Operation *op) { auto ifOp = dyn_cast(op); if (!ifOp) - return true; + return success(); // Currently the dialect supports declaring cond_if operations that // have then/else regions that reference values from outside these @@ -1257,49 +1200,53 @@ bool checkErrorIfCondIf(Operation *op) { // tosa.yield %arg4 // } - return failed(checkIsolatedRegion(op, ifOp.getThenGraph(), "then")) || - failed(checkIsolatedRegion(op, ifOp.getElseGraph(), "else")); + if (failed(checkIsolatedRegion(op, ifOp.getThenGraph(), "then")) || + failed(checkIsolatedRegion(op, ifOp.getElseGraph(), "else"))) + return failure(); + return success(); } -bool checkErrorIfWhileLoop(Operation *op) { +LogicalResult checkErrorIfWhileLoop(Operation *op) { auto whileOp = dyn_cast(op); if (!whileOp) - return true; + return success(); - return failed(checkIsolatedRegion(op, whileOp.getCondGraph(), "cond")) || - failed(checkIsolatedRegion(op, whileOp.getBodyGraph(), "body")); + if (failed(checkIsolatedRegion(op, whileOp.getCondGraph(), "cond")) || + failed(checkIsolatedRegion(op, whileOp.getBodyGraph(), "body"))) + return failure(); + return success(); } -bool checkErrorIfScatter(Operation *op) { +LogicalResult checkErrorIfScatter(Operation *op) { auto scatterOp = dyn_cast(op); if (!scatterOp) - return true; + return success(); // for constant indices, check that there are no duplicate values DenseIntElementsAttr indicesAttr; if (!matchPattern(scatterOp.getIndices(), m_Constant(&indicesAttr))) - return true; + return success(); auto const indicesType = dyn_cast(scatterOp.getIndices().getType()); if (!indicesType || !indicesType.hasRank()) { op->emitOpError("expect ranked indices tensor"); - return false; + return failure(); } if (!hasUniqueConstantScatterIndices(indicesType, indicesAttr)) { op->emitOpError("indices values contain duplicates"); - return false; + return failure(); } - return true; + return success(); } LogicalResult TosaValidation::applyErrorIfCheck(Operation *op) { - if (!checkErrorIfResize(op) || !checkErrorIfMul(op) || - !checkErrorIfTable(op) || !checkErrorIfRescale(op) || - !checkErrorIfPad(op) || !checkErrorIfCondIf(op) || - !checkErrorIfWhileLoop(op) || !checkErrorIfScatter(op)) + if (failed(checkErrorIfResize(op)) || failed(checkErrorIfMul(op)) || + failed(checkErrorIfTable(op)) || failed(checkErrorIfRescale(op)) || + failed(checkErrorIfPad(op)) || failed(checkErrorIfCondIf(op)) || + failed(checkErrorIfWhileLoop(op)) || failed(checkErrorIfScatter(op))) return failure(); return success(); } diff --git a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp index 132ed815c354e..3385b2a38afc1 100644 --- a/mlir/lib/Dialect/Transform/IR/TransformOps.cpp +++ b/mlir/lib/Dialect/Transform/IR/TransformOps.cpp @@ -616,11 +616,10 @@ DiagnosedSilenceableFailure transform::ApplyConversionPatternsOp::apply( if (diag.succeeded()) { // Tracking failure is the only failure. return trackingFailure; - } else { - diag.attachNote() << "tracking listener also failed: " - << trackingFailure.getMessage(); - (void)trackingFailure.silence(); } + diag.attachNote() << "tracking listener also failed: " + << trackingFailure.getMessage(); + (void)trackingFailure.silence(); } if (!diag.succeeded()) diff --git a/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp b/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp index 842e880ca9150..c627158e999ed 100644 --- a/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp +++ b/mlir/lib/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp @@ -6,13 +6,24 @@ // //===----------------------------------------------------------------------===// +#include "mlir/Dialect/Transform/IR/TransformOps.h" #include "mlir/Dialect/Transform/Interfaces/TransformInterfaces.h" +#include "mlir/IR/OpImplementation.h" #include "llvm/Support/Debug.h" #include "mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h" using namespace mlir; +static ParseResult parseAlternativesOpSelectedRegion( + OpAsmParser &parser, IntegerAttr &selectedRegionAttr, + std::optional &selectedRegionParam); + +static void printAlternativesOpSelectedRegion(OpAsmPrinter &printer, + Operation *op, + IntegerAttr selectedRegionAttr, + Value selectedRegionParam); + #define GET_OP_CLASSES #include "mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp.inc" @@ -57,3 +68,176 @@ LogicalResult transform::tune::KnobOp::verify() { return success(); } + +//===----------------------------------------------------------------------===// +// AlternativesOp +//===----------------------------------------------------------------------===// + +static ParseResult parseAlternativesOpSelectedRegion( + OpAsmParser &parser, IntegerAttr &selectedRegionAttr, + std::optional &selectedRegionParam) { + size_t selectedRegionIdx; + OptionalParseResult attrParseRes = + parser.parseOptionalInteger(selectedRegionIdx); + if (attrParseRes.has_value()) { + if (failed(*attrParseRes)) + return failure(); + + selectedRegionAttr = parser.getBuilder().getIndexAttr(selectedRegionIdx); + return success(); + } + + OpAsmParser::UnresolvedOperand param; + auto paramParseRes = parser.parseOptionalOperand(param); + if (paramParseRes.has_value()) { + if (failed(*paramParseRes)) + return failure(); + + selectedRegionParam = param; + return success(); + } + + return parser.emitError(parser.getCurrentLocation()) + << "expected either an integer attribute or a transform.param operand"; +} + +static void printAlternativesOpSelectedRegion(OpAsmPrinter &printer, + Operation *op, + IntegerAttr selectedRegionAttr, + Value selectedRegionParam) { + if (selectedRegionAttr) + printer << selectedRegionAttr.getValue(); + if (selectedRegionParam) + printer << selectedRegionParam; +} + +OperandRange transform::tune::AlternativesOp::getEntrySuccessorOperands( + RegionBranchPoint point) { + // No operands will be forwarded to the region(s). + return getOperands().slice(0, 0); +} + +void transform::tune::AlternativesOp::getSuccessorRegions( + RegionBranchPoint point, SmallVectorImpl ®ions) { + if (point.isParent()) + if (auto selectedRegionIdx = getSelectedRegionAttr()) + regions.emplace_back( + &getAlternatives()[selectedRegionIdx->getSExtValue()], + Block::BlockArgListType()); + else + for (Region &alternative : getAlternatives()) + regions.emplace_back(&alternative, Block::BlockArgListType()); + else + regions.emplace_back(getOperation()->getResults()); +} + +void transform::tune::AlternativesOp::getRegionInvocationBounds( + ArrayRef operands, SmallVectorImpl &bounds) { + (void)operands; + bounds.reserve(getNumRegions()); + + if (auto selectedRegionIdx = getSelectedRegionAttr()) { + bounds.resize(getNumRegions(), InvocationBounds(0, 0)); + bounds[selectedRegionIdx->getSExtValue()] = InvocationBounds(1, 1); + } else { + bounds.resize(getNumRegions(), InvocationBounds(0, 1)); + } +} + +void transform::tune::AlternativesOp::getEffects( + SmallVectorImpl &effects) { + onlyReadsHandle(getSelectedRegionParamMutable(), effects); + producesHandle(getOperation()->getOpResults(), effects); + // TODO: should effects from regions be forwarded? +} + +DiagnosedSilenceableFailure +transform::tune::AlternativesOp::apply(transform::TransformRewriter &rewriter, + transform::TransformResults &results, + transform::TransformState &state) { + std::optional selectedRegionIdx; + + if (auto selectedRegionAttr = getSelectedRegionAttr()) + selectedRegionIdx = selectedRegionAttr->getSExtValue(); + + if (Value selectedRegionParam = getSelectedRegionParam()) { + ArrayRef associatedAttrs = state.getParams(selectedRegionParam); + IntegerAttr selectedRegionAttr; + if (associatedAttrs.size() != 1 || + !(selectedRegionAttr = dyn_cast(associatedAttrs[0]))) + return emitDefiniteFailure() + << "param should hold exactly one integer attribute, got: " + << associatedAttrs[0]; + selectedRegionIdx = selectedRegionAttr.getValue().getSExtValue(); + } + + if (!selectedRegionIdx) + return emitDefiniteFailure() << "non-deterministic choice " << getName() + << " is only resolved through providing a " + "`selected_region` attr/param"; + + if (*selectedRegionIdx < 0 || *selectedRegionIdx >= getNumRegions()) + return emitDefiniteFailure() + << "'selected_region' attribute/param specifies region at index " + << *selectedRegionIdx << " while op has only " << getNumRegions() + << " regions"; + + Region &selectedRegion = getRegion(*selectedRegionIdx); + auto scope = state.make_region_scope(selectedRegion); + Block &block = selectedRegion.front(); + // Apply the region's ops one by one. + for (Operation &transform : block.without_terminator()) { + DiagnosedSilenceableFailure result = + state.applyTransform(cast(transform)); + if (result.isDefiniteFailure()) + return result; + + if (result.isSilenceableFailure()) { + for (const auto &res : getResults()) + results.set(res, {}); + return result; + } + } + // Forward the operation mapping for values yielded from the region to the + // values produced by the alternatives op. + transform::detail::forwardTerminatorOperands(&block, state, results); + return DiagnosedSilenceableFailure::success(); +} + +LogicalResult transform::tune::AlternativesOp::verify() { + for (auto *region : getRegions()) { + auto yieldTerminator = + llvm::dyn_cast_if_present(region->front().back()); + if (!yieldTerminator) + return emitOpError() << "expected '" + << transform::YieldOp::getOperationName() + << "' as terminator"; + + if (yieldTerminator->getNumOperands() != getNumResults()) + return yieldTerminator.emitOpError() + << "expected terminator to have as many operands as the parent op " + "has results"; + + for (auto [i, operandType, resultType] : llvm::zip_equal( + llvm::seq(0, yieldTerminator->getNumOperands()), + yieldTerminator->getOperands().getType(), getResultTypes())) { + if (operandType == resultType) + continue; + return yieldTerminator.emitOpError() + << "the type of the terminator operand #" << i + << " must match the type of the corresponding parent op result (" + << operandType << " vs " << resultType << ")"; + } + } + + if (auto selectedRegionAttr = getSelectedRegionAttr()) { + size_t regionIdx = selectedRegionAttr->getSExtValue(); + if (regionIdx < 0 || regionIdx >= getNumRegions()) + return emitOpError() + << "'selected_region' attribute specifies region at index " + << regionIdx << " while op has only " << getNumRegions() + << " regions"; + } + + return success(); +} diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 347141e2773b8..b0132e889302f 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -396,14 +396,31 @@ std::optional vector::getConstantVscaleMultiplier(Value value) { return {}; } -/// Converts an IntegerAttr to have the specified type if needed. -/// This handles cases where integer constant attributes have a different type -/// than the target element type. -static IntegerAttr convertIntegerAttr(IntegerAttr intAttr, Type expectedType) { - if (intAttr.getType() == expectedType) - return intAttr; // Already correct type +/// Converts numeric attributes to the expected type. Supports +/// integer-to-integer and float-to-integer conversions. Returns the original +/// attribute if no conversion is needed or supported. +static Attribute convertNumericAttr(Attribute attr, Type expectedType) { + // Integer-to-integer conversion + if (auto intAttr = dyn_cast(attr)) { + if (auto intType = dyn_cast(expectedType)) { + if (intAttr.getType() != expectedType) + return IntegerAttr::get(expectedType, intAttr.getInt()); + } + return attr; + } + + // Float-to-integer bitcast (preserves bit representation) + if (auto floatAttr = dyn_cast(attr)) { + auto intType = dyn_cast(expectedType); + if (!intType) + return attr; + + APFloat floatVal = floatAttr.getValue(); + APInt intVal = floatVal.bitcastToAPInt(); + return IntegerAttr::get(expectedType, intVal); + } - return IntegerAttr::get(expectedType, intAttr.getInt()); + return attr; } //===----------------------------------------------------------------------===// @@ -563,7 +580,7 @@ namespace { // ElideSingleElementReduction for ReduceOp. struct ElideUnitDimsInMultiDimReduction : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(MultiDimReductionOp reductionOp, PatternRewriter &rewriter) const override { @@ -713,7 +730,7 @@ std::optional> ReductionOp::getShapeForUnroll() { namespace { struct ElideSingleElementReduction : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ReductionOp reductionOp, PatternRewriter &rewriter) const override { @@ -2180,7 +2197,7 @@ namespace { // Pattern to rewrite a ExtractOp(Broadcast) -> Broadcast. class ExtractOpFromBroadcast final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractOp extractOp, PatternRewriter &rewriter) const override { @@ -2203,7 +2220,7 @@ class ExtractOpFromBroadcast final : public OpRewritePattern { // Pattern to rewrite a ExtractOp(CreateMask) -> CreateMask. class ExtractOpFromCreateMask final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractOp extractOp, PatternRewriter &rewriter) const override { @@ -2473,16 +2490,11 @@ static OpFoldResult foldFromElementsToConstant(FromElementsOp fromElementsOp, if (!destEltType.isIntOrIndexOrFloat() && !isa(destEltType)) return {}; - // Convert integer attributes to the target type if needed, leave others - // unchanged. - auto convertedElements = - llvm::map_to_vector(elements, [&](Attribute attr) -> Attribute { - if (auto intAttr = dyn_cast(attr)) { - return convertIntegerAttr(intAttr, destEltType); - } - return attr; // Non-integer attributes (FloatAttr, etc.) returned - // unchanged - }); + // Constant attributes might have a different type than the return type. + // Convert them before creating the dense elements attribute. + auto convertedElements = llvm::map_to_vector(elements, [&](Attribute attr) { + return convertNumericAttr(attr, destEltType); + }); return DenseElementsAttr::get(destVecType, convertedElements); } @@ -2534,7 +2546,7 @@ rewriteFromElementsAsBroadcast(FromElementsOp fromElementsOp, class FromElementsToShapeCast : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(FromElementsOp fromElements, PatternRewriter &rewriter) const override { @@ -2926,7 +2938,7 @@ namespace { // Fold broadcast1(broadcast2(x)) into broadcast1(x). struct BroadcastFolder : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(BroadcastOp broadcastOp, PatternRewriter &rewriter) const override { @@ -3097,7 +3109,7 @@ namespace { // Pattern to rewrite a 0-D shuffle with [0] or [1] mask returning a 1-D vector // to a broadcast. struct Canonicalize0DShuffleOp : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ShuffleOp shuffleOp, PatternRewriter &rewriter) const override { @@ -3153,7 +3165,7 @@ static Value getScalarSplatSource(Value value) { /// Pattern to rewrite shuffle(splat-like(v), splat-like(v)) as broadcast(v). class ShuffleSplat final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ShuffleOp op, PatternRewriter &rewriter) const override { @@ -3170,7 +3182,7 @@ class ShuffleSplat final : public OpRewritePattern { /// vector.interleave. class ShuffleInterleave : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ShuffleOp op, PatternRewriter &rewriter) const override { @@ -3314,7 +3326,7 @@ namespace { // broadcast. class InsertToBroadcast final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(InsertOp insertOp, PatternRewriter &rewriter) const override { @@ -3332,7 +3344,7 @@ class InsertToBroadcast final : public OpRewritePattern { /// Pattern to rewrite a insert(splat-like(v), splat-like(v)) as broadcast(v). class InsertSplatToSplat final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(InsertOp op, PatternRewriter &rewriter) const override { @@ -3368,7 +3380,7 @@ class InsertSplatToSplat final : public OpRewritePattern { /// %result = vector.from_elements %c1, %c2 : vector<2xi32> class InsertChainFullyInitialized final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(InsertOp op, PatternRewriter &rewriter) const override { @@ -3503,19 +3515,13 @@ foldDenseElementsAttrDestInsertOp(InsertOp insertOp, Attribute srcAttr, SmallVector insertedValues; Type destEltType = destTy.getElementType(); - /// Converts integer attributes to the expected type if there's a mismatch. - /// Non-integer attributes are left unchanged. + /// Converts attribute to the expected type if there's + /// a mismatch. if (auto denseSource = llvm::dyn_cast(srcAttr)) { for (auto value : denseSource.getValues()) - if (auto intAttr = dyn_cast(value)) - insertedValues.push_back(convertIntegerAttr(intAttr, destEltType)); - else - insertedValues.push_back(value); // Non-integer attributes unchanged + insertedValues.push_back(convertNumericAttr(value, destEltType)); } else { - if (auto intAttr = dyn_cast(srcAttr)) - insertedValues.push_back(convertIntegerAttr(intAttr, destEltType)); - else - insertedValues.push_back(srcAttr); // Non-integer attributes unchanged + insertedValues.push_back(convertNumericAttr(srcAttr, destEltType)); } auto allValues = llvm::to_vector(denseDst.getValues()); @@ -3742,7 +3748,7 @@ namespace { class FoldInsertStridedSliceSplat final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(InsertStridedSliceOp insertStridedSliceOp, PatternRewriter &rewriter) const override { @@ -3762,7 +3768,7 @@ class FoldInsertStridedSliceSplat final class FoldInsertStridedSliceOfExtract final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(InsertStridedSliceOp insertStridedSliceOp, PatternRewriter &rewriter) const override { @@ -3792,7 +3798,7 @@ class FoldInsertStridedSliceOfExtract final class InsertStridedSliceConstantFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; // Do not create constants with more than `vectorSizeFoldThreashold` elements, // unless the source vector constant has a single use. @@ -4244,7 +4250,7 @@ namespace { // %mask = vector.create_mask %new_ub : vector<8xi1> class StridedSliceCreateMaskFolder final : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; public: LogicalResult matchAndRewrite(ExtractStridedSliceOp extractStridedSliceOp, @@ -4304,7 +4310,7 @@ class StridedSliceCreateMaskFolder final class StridedSliceConstantMaskFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractStridedSliceOp extractStridedSliceOp, PatternRewriter &rewriter) const override { @@ -4359,7 +4365,7 @@ class StridedSliceConstantMaskFolder final class StridedSliceBroadcast final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractStridedSliceOp op, PatternRewriter &rewriter) const override { @@ -4410,7 +4416,7 @@ class StridedSliceBroadcast final /// Rewrite extract_strided_slice(splat-like(v)) with broadcast(v). class StridedSliceSplat final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractStridedSliceOp op, PatternRewriter &rewriter) const override { @@ -4442,7 +4448,7 @@ class StridedSliceSplat final : public OpRewritePattern { class ContiguousExtractStridedSliceToExtract final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractStridedSliceOp op, PatternRewriter &rewriter) const override { @@ -5017,7 +5023,7 @@ namespace { /// ``` struct TransferReadAfterWriteToBroadcast : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(TransferReadOp readOp, PatternRewriter &rewriter) const override { @@ -5099,6 +5105,14 @@ void TransferReadOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +TransferReadOp::bubbleDownCasts(OpBuilder &builder) { + if (!hasPureBufferSemantics()) + return failure(); + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // TransferWriteOp //===----------------------------------------------------------------------===// @@ -5444,7 +5458,7 @@ namespace { /// any other uses. class FoldWaw final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(TransferWriteOp writeOp, PatternRewriter &rewriter) const override { if (!llvm::isa(writeOp.getShapedType())) @@ -5500,7 +5514,7 @@ class FoldWaw final : public OpRewritePattern { struct SwapExtractSliceOfTransferWrite : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(tensor::InsertSliceOp insertOp, PatternRewriter &rewriter) const override { @@ -5586,6 +5600,14 @@ void TransferWriteOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +TransferWriteOp::bubbleDownCasts(OpBuilder &builder) { + if (!hasPureBufferSemantics()) + return failure(); + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + ValueRange()); +} + //===----------------------------------------------------------------------===// // LoadOp //===----------------------------------------------------------------------===// @@ -5640,6 +5662,12 @@ std::optional> LoadOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } +FailureOr>> +LoadOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // StoreOp //===----------------------------------------------------------------------===// @@ -5679,6 +5707,12 @@ std::optional> StoreOp::getShapeForUnroll() { return llvm::to_vector<4>(getVectorType().getShape()); } +FailureOr>> +StoreOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + ValueRange()); +} + //===----------------------------------------------------------------------===// // MaskedLoadOp //===----------------------------------------------------------------------===// @@ -5703,7 +5737,7 @@ LogicalResult MaskedLoadOp::verify() { namespace { class MaskedLoadFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(MaskedLoadOp load, PatternRewriter &rewriter) const override { switch (getMaskFormat(load.getMask())) { @@ -5733,6 +5767,12 @@ OpFoldResult MaskedLoadOp::fold(FoldAdaptor) { return OpFoldResult(); } +FailureOr>> +MaskedLoadOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // MaskedStoreOp //===----------------------------------------------------------------------===// @@ -5754,7 +5794,7 @@ LogicalResult MaskedStoreOp::verify() { namespace { class MaskedStoreFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(MaskedStoreOp store, PatternRewriter &rewriter) const override { switch (getMaskFormat(store.getMask())) { @@ -5783,6 +5823,12 @@ LogicalResult MaskedStoreOp::fold(FoldAdaptor adaptor, return memref::foldMemRefCast(*this); } +FailureOr>> +MaskedStoreOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + ValueRange()); +} + //===----------------------------------------------------------------------===// // GatherOp //===----------------------------------------------------------------------===// @@ -5844,7 +5890,7 @@ static LogicalResult isZeroBasedContiguousSeq(Value indexVec) { namespace { class GatherFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(GatherOp gather, PatternRewriter &rewriter) const override { switch (getMaskFormat(gather.getMask())) { @@ -5864,7 +5910,7 @@ class GatherFolder final : public OpRewritePattern { /// maskedload. Only 1D fixed vectors are supported for now. class FoldContiguousGather final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(GatherOp op, PatternRewriter &rewriter) const override { if (!isa(op.getBase().getType())) @@ -5886,6 +5932,12 @@ void GatherOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +GatherOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // ScatterOp //===----------------------------------------------------------------------===// @@ -5910,7 +5962,7 @@ LogicalResult ScatterOp::verify() { namespace { class ScatterFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ScatterOp scatter, PatternRewriter &rewriter) const override { switch (getMaskFormat(scatter.getMask())) { @@ -5930,7 +5982,7 @@ class ScatterFolder final : public OpRewritePattern { /// maskedstore. Only 1D fixed vectors are supported for now. class FoldContiguousScatter final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ScatterOp op, PatternRewriter &rewriter) const override { if (failed(isZeroBasedContiguousSeq(op.getIndices()))) @@ -5948,6 +6000,12 @@ void ScatterOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +ScatterOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + ValueRange()); +} + //===----------------------------------------------------------------------===// // ExpandLoadOp //===----------------------------------------------------------------------===// @@ -5972,7 +6030,7 @@ LogicalResult ExpandLoadOp::verify() { namespace { class ExpandLoadFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExpandLoadOp expand, PatternRewriter &rewriter) const override { switch (getMaskFormat(expand.getMask())) { @@ -5996,6 +6054,12 @@ void ExpandLoadOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +ExpandLoadOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + getResult()); +} + //===----------------------------------------------------------------------===// // CompressStoreOp //===----------------------------------------------------------------------===// @@ -6017,7 +6081,7 @@ LogicalResult CompressStoreOp::verify() { namespace { class CompressStoreFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(CompressStoreOp compress, PatternRewriter &rewriter) const override { switch (getMaskFormat(compress.getMask())) { @@ -6042,6 +6106,12 @@ void CompressStoreOp::getCanonicalizationPatterns(RewritePatternSet &results, results.add(context); } +FailureOr>> +CompressStoreOp::bubbleDownCasts(OpBuilder &builder) { + return mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(getBaseMutable(), + ValueRange()); +} + //===----------------------------------------------------------------------===// // ShapeCastOp //===----------------------------------------------------------------------===// @@ -6190,7 +6260,7 @@ static VectorType trimTrailingOneDims(VectorType oldType) { class ShapeCastCreateMaskFolderTrailingOneDim final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ShapeCastOp shapeOp, PatternRewriter &rewriter) const override { @@ -6260,7 +6330,7 @@ class ShapeCastCreateMaskFolderTrailingOneDim final /// If both (i) and (ii) are possible, (i) is chosen. class ShapeCastBroadcastFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ShapeCastOp shapeCastOp, PatternRewriter &rewriter) const override { @@ -6544,7 +6614,7 @@ namespace { // Rewrites two back-to-back TransposeOp operations into a single TransposeOp. class TransposeFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransposeOp transposeOp, PatternRewriter &rewriter) const override { @@ -6576,7 +6646,7 @@ class TransposeFolder final : public OpRewritePattern { /// Replace transpose(splat-like(v)) with broadcast(v) class FoldTransposeSplat final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(TransposeOp transposeOp, PatternRewriter &rewriter) const override { @@ -6593,7 +6663,7 @@ class FoldTransposeSplat final : public OpRewritePattern { /// Folds transpose(create_mask) into a new transposed create_mask. class FoldTransposeCreateMask final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(TransposeOp transpOp, PatternRewriter &rewriter) const override { @@ -6630,7 +6700,7 @@ class FoldTransposeCreateMask final : public OpRewritePattern { /// Folds transpose(shape_cast) into a new shape_cast. class FoldTransposeShapeCast final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(TransposeOp transposeOp, PatternRewriter &rewriter) const override { @@ -6680,7 +6750,7 @@ class FoldTransposeShapeCast final : public OpRewritePattern { /// within the groups [0,1] and [3,4], like (1 0 2 4 3 5 6). class FoldTransposeBroadcast : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; FoldTransposeBroadcast(MLIRContext *context, PatternBenefit benefit = 1) : OpRewritePattern(context, benefit) {} @@ -6901,7 +6971,7 @@ namespace { /// %0 = vector.constant_mask [8, 16] : vector<8x[16]xi1> class CreateMaskFolder final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(CreateMaskOp createMaskOp, PatternRewriter &rewriter) const override { @@ -7230,7 +7300,7 @@ LogicalResult MaskOp::fold(FoldAdaptor adaptor, /// %0 = arith.select %mask, %a, %passthru : vector<8xf32> /// class CanonializeEmptyMaskOp : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(MaskOp maskOp, PatternRewriter &rewriter) const override { @@ -7340,7 +7410,7 @@ OpFoldResult SplatOp::fold(FoldAdaptor adaptor) { // vector.broadcast. class SplatToBroadcastPattern final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(SplatOp splatOp, PatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(splatOp, splatOp.getType(), diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp index dedc3b3f30201..61d9357e19bb4 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp @@ -34,7 +34,7 @@ namespace { /// convertible to the lower level target dialect (LLVM, SPIR-V, etc.) directly. class BroadcastOpLowering : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::BroadcastOp op, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp index 65702ffa152d9..efe8d14b3532a 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp @@ -1151,7 +1151,7 @@ FailureOr ContractionOpLowering::lowerReduction( /// class OuterProductOpLowering : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::OuterProductOp op, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp index 1f96a3a108006..6bc8347bc6f76 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorGather.cpp @@ -50,7 +50,7 @@ namespace { /// /// Supports vector types with a fixed leading dimension. struct UnrollGather : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::GatherOp op, PatternRewriter &rewriter) const override { @@ -98,7 +98,7 @@ struct UnrollGather : OpRewritePattern { /// ATM this is effectively limited to reading a 1D Vector from a 2D MemRef, /// but should be fairly straightforward to extend beyond that. struct RemoveStrideFromGatherSource : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::GatherOp op, PatternRewriter &rewriter) const override { @@ -164,7 +164,7 @@ struct RemoveStrideFromGatherSource : OpRewritePattern { /// `tensor.extract`s. To avoid out-of-bounds memory accesses, these /// loads/extracts are made conditional using `scf.if` ops. struct Gather1DToConditionalLoads : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::GatherOp op, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp index 9d6a865a9301f..479fc0c6a9d8c 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorInterleave.cpp @@ -163,7 +163,7 @@ class UnrollDeinterleaveOp final /// : vector<7xi16>, vector<7xi16> /// ``` struct InterleaveToShuffle final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::InterleaveOp op, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp index 5617b067d249e..7730c4e7c950a 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMask.cpp @@ -48,7 +48,7 @@ namespace { /// until a one-dimensional vector is reached. class CreateMaskOpLowering : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::CreateMaskOp op, PatternRewriter &rewriter) const override { @@ -100,7 +100,7 @@ class CreateMaskOpLowering : public OpRewritePattern { /// will be folded at LLVM IR level. class ConstantMaskOpLowering : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ConstantMaskOp op, PatternRewriter &rewriter) const override { @@ -184,7 +184,7 @@ namespace { /// and actually match the traits of its the nested `MaskableOpInterface`. template struct MaskOpRewritePattern : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; private: LogicalResult matchAndRewrite(MaskOp maskOp, diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp index 4773732d8d9a6..e86e2a97038db 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp @@ -39,7 +39,7 @@ namespace { class InnerOuterDimReductionConversion : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; explicit InnerOuterDimReductionConversion( MLIRContext *context, vector::VectorMultiReductionLowering options, @@ -136,7 +136,7 @@ class InnerOuterDimReductionConversion class ReduceMultiDimReductionRank : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; explicit ReduceMultiDimReductionRank( MLIRContext *context, vector::VectorMultiReductionLowering options, @@ -304,7 +304,7 @@ class ReduceMultiDimReductionRank /// and combines results struct TwoDimMultiReductionToElementWise : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp, PatternRewriter &rewriter) const override { @@ -359,7 +359,7 @@ struct TwoDimMultiReductionToElementWise /// a sequence of vector.reduction ops. struct TwoDimMultiReductionToReduction : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp, PatternRewriter &rewriter) const override { @@ -420,7 +420,7 @@ struct TwoDimMultiReductionToReduction /// separately. struct OneDimMultiReductionToTwoDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MultiDimReductionOp multiReductionOp, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp index af4851eb5f158..258f2cbc77736 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp @@ -99,7 +99,7 @@ namespace { /// return %7, %8 : vector<2x3xi32>, vector<2xi32> /// ``` struct ScanToArithOps : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ScanOp scanOp, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp index 603ea41d43360..c5f22b2eafeb7 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShapeCast.cpp @@ -189,7 +189,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern { } public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ShapeCastOp op, PatternRewriter &rewriter) const override { @@ -356,7 +356,7 @@ class ShapeCastOpRewritePattern : public OpRewritePattern { class ScalableShapeCastOpRewritePattern : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ShapeCastOp op, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp index 78102f7325b9f..8f46ad6ea892b 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorShuffle.cpp @@ -44,7 +44,7 @@ namespace { /// struct MixedSizeInputShuffleOpRewrite final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ShuffleOp shuffleOp, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp index ee5568aefda27..08e7c895831ce 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorStep.cpp @@ -24,7 +24,7 @@ using namespace mlir::vector; namespace { struct StepToArithConstantOpRewrite final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::StepOp stepOp, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp index 6407a868abd85..7521e2491335b 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorToFromElementsToShuffleTree.cpp @@ -667,7 +667,7 @@ getToElementsDefiningOps(FromElementsOp fromElemsOp, struct ToFromElementsToShuffleTreeRewrite final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::FromElementsOp fromElemsOp, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp index 9e7d0ced3e6d1..c3f7de0ac3c4e 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTranspose.cpp @@ -300,7 +300,7 @@ namespace { /// %x = vector.insert .., .. [.., ..] class TransposeOpLowering : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; TransposeOpLowering(vector::VectorTransposeLowering vectorTransposeLowering, MLIRContext *context, PatternBenefit benefit = 1) @@ -395,7 +395,7 @@ class TransposeOpLowering : public OpRewritePattern { class Transpose2DWithUnitDimToShapeCast : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; Transpose2DWithUnitDimToShapeCast(MLIRContext *context, PatternBenefit benefit = 1) @@ -433,7 +433,7 @@ class Transpose2DWithUnitDimToShapeCast class TransposeOp2DToShuffleLowering : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; TransposeOp2DToShuffleLowering( vector::VectorTransposeLowering vectorTransposeLowering, diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp index cab12894487e2..963b2c803bc5a 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp @@ -54,7 +54,7 @@ namespace { // input by inserting vector.broadcast. struct CastAwayExtractStridedSliceLeadingOneDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp, PatternRewriter &rewriter) const override { @@ -104,7 +104,7 @@ struct CastAwayExtractStridedSliceLeadingOneDim // inputs by inserting vector.broadcast. struct CastAwayInsertStridedSliceLeadingOneDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::InsertStridedSliceOp insertOp, PatternRewriter &rewriter) const override { @@ -145,7 +145,7 @@ struct CastAwayInsertStridedSliceLeadingOneDim // Casts away leading one dimensions in vector.insert's vector inputs by // inserting vector.broadcast. struct CastAwayInsertLeadingOneDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::InsertOp insertOp, PatternRewriter &rewriter) const override { @@ -221,7 +221,7 @@ static Value dropUnitDimsFromMask(OpBuilder &b, Location loc, Value mask, // 1 dimensions. struct CastAwayTransferReadLeadingOneDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransferReadOp read, PatternRewriter &rewriter) const override { @@ -275,7 +275,7 @@ struct CastAwayTransferReadLeadingOneDim // 1 dimensions. struct CastAwayTransferWriteLeadingOneDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransferWriteOp write, PatternRewriter &rewriter) const override { @@ -541,7 +541,7 @@ class CastAwayElementwiseLeadingOneDim : public RewritePattern { // vector.broadcast back to the original shape. struct CastAwayConstantMaskLeadingOneDim : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ConstantMaskOp mask, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp index 78f74eef7bee3..7acc120508a44 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateMaskedLoadStore.cpp @@ -48,7 +48,7 @@ namespace { /// struct VectorMaskedLoadOpConverter final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MaskedLoadOp maskedLoadOp, PatternRewriter &rewriter) const override { @@ -64,7 +64,6 @@ struct VectorMaskedLoadOpConverter final Value mask = maskedLoadOp.getMask(); Value base = maskedLoadOp.getBase(); Value iValue = maskedLoadOp.getPassThru(); - std::optional alignment = maskedLoadOp.getAlignment(); auto indices = llvm::to_vector_of(maskedLoadOp.getIndices()); Value one = arith::ConstantOp::create(rewriter, loc, indexType, IntegerAttr::get(indexType, 1)); @@ -76,7 +75,7 @@ struct VectorMaskedLoadOpConverter final [&](OpBuilder &builder, Location loc) { auto loadedValue = memref::LoadOp::create( builder, loc, base, indices, /*nontemporal=*/false, - alignment.value_or(0)); + llvm::MaybeAlign(maskedLoadOp.getAlignment().value_or(0))); auto combinedValue = vector::InsertOp::create(builder, loc, loadedValue, iValue, i); scf::YieldOp::create(builder, loc, combinedValue.getResult()); @@ -118,7 +117,7 @@ struct VectorMaskedLoadOpConverter final /// struct VectorMaskedStoreOpConverter final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MaskedStoreOp maskedStoreOp, PatternRewriter &rewriter) const override { @@ -135,7 +134,6 @@ struct VectorMaskedStoreOpConverter final Value base = maskedStoreOp.getBase(); Value value = maskedStoreOp.getValueToStore(); bool nontemporal = false; - std::optional alignment = maskedStoreOp.getAlignment(); auto indices = llvm::to_vector_of(maskedStoreOp.getIndices()); Value one = arith::ConstantOp::create(rewriter, loc, indexType, IntegerAttr::get(indexType, 1)); @@ -145,8 +143,9 @@ struct VectorMaskedStoreOpConverter final auto ifOp = scf::IfOp::create(rewriter, loc, maskBit, /*else=*/false); rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); auto extractedValue = vector::ExtractOp::create(rewriter, loc, value, i); - memref::StoreOp::create(rewriter, loc, extractedValue, base, indices, - nontemporal, alignment.value_or(0)); + memref::StoreOp::create( + rewriter, loc, extractedValue, base, indices, nontemporal, + llvm::MaybeAlign(maskedStoreOp.getAlignment().value_or(0))); rewriter.setInsertionPointAfter(ifOp); indices.back() = diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp index 264cbc1869b9a..3a6684f4edfb7 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp @@ -548,7 +548,7 @@ namespace { // NOTE: By default, all RMW sequences are atomic. Set `disableAtomicRMW` to // `false` to generate non-atomic RMW sequences. struct ConvertVectorStore final : OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; ConvertVectorStore(MLIRContext *context, bool disableAtomicRMW) : OpConversionPattern(context), @@ -827,7 +827,7 @@ struct ConvertVectorStore final : OpConversionPattern { /// adjusted mask . struct ConvertVectorMaskedStore final : OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MaskedStoreOp op, OpAdaptor adaptor, @@ -950,7 +950,7 @@ struct ConvertVectorMaskedStore final /// those cases, loads are converted to byte-aligned, byte-sized loads and the /// target vector is extracted from the loaded vector. struct ConvertVectorLoad final : OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LogicalResult matchAndRewrite(vector::LoadOp op, OpAdaptor adaptor, @@ -1059,7 +1059,7 @@ struct ConvertVectorLoad final : OpConversionPattern { /// bitcasting, since each `i8` container element holds two `i4` values. struct ConvertVectorMaskedLoad final : OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MaskedLoadOp op, OpAdaptor adaptor, @@ -1257,7 +1257,7 @@ static bool fitsInMultiByteContainerTy(VectorType subByteVecTy, // TODO: Document-me struct ConvertVectorTransferRead final : OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransferReadOp op, OpAdaptor adaptor, @@ -1942,7 +1942,7 @@ namespace { /// advantage of high-level information to avoid leaving LLVM to scramble with /// peephole optimizations. struct RewriteBitCastOfTruncI : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::BitCastOp bitCastOp, PatternRewriter &rewriter) const override { @@ -2147,7 +2147,7 @@ struct RewriteAlignedSubByteIntExt : OpRewritePattern { /// %5 = vector.bitcast %4 : vector<4xi8> to vector<8xi4> /// struct RewriteAlignedSubByteIntTrunc : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(arith::TruncIOp truncOp, PatternRewriter &rewriter) const override { @@ -2200,7 +2200,7 @@ struct RewriteAlignedSubByteIntTrunc : OpRewritePattern { /// %2 = arith.trunci %1 : vector<16x8xi8> to vector<16x8xi4> /// struct RewriteVectorTranspose : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; RewriteVectorTranspose(MLIRContext *context, PatternBenefit benefit) : OpRewritePattern(context, benefit) {} diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp index f6d6555f4c6e2..9e49873a4b4b0 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp @@ -34,7 +34,7 @@ using namespace mlir::vector; class DecomposeDifferentRankInsertStridedSlice : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(InsertStridedSliceOp op, PatternRewriter &rewriter) const override { @@ -84,7 +84,7 @@ class DecomposeDifferentRankInsertStridedSlice class ConvertSameRankInsertStridedSliceIntoShuffle : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; void initialize() { // This pattern creates recursive InsertStridedSliceOp, but the recursion is @@ -183,7 +183,7 @@ class ConvertSameRankInsertStridedSliceIntoShuffle class Convert1DExtractStridedSliceIntoShuffle : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(ExtractStridedSliceOp op, PatternRewriter &rewriter) const override { @@ -271,7 +271,7 @@ class Convert1DExtractStridedSliceIntoExtractInsertChain final class DecomposeNDExtractStridedSlice : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; void initialize() { // This pattern creates recursive ExtractStridedSliceOp, but the recursion diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp index 82bac8c499028..71fba71c9f15f 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp @@ -214,7 +214,7 @@ SmallVector static getStridedSliceInsertionIndices( /// vector.extract_strided_slice operation. struct LinearizeVectorExtractStridedSlice final : public mlir::OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorExtractStridedSlice(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) @@ -285,7 +285,7 @@ struct LinearizeVectorExtractStridedSlice final /// struct LinearizeVectorInsertStridedSlice final : public mlir::OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorInsertStridedSlice(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) @@ -348,7 +348,7 @@ struct LinearizeVectorInsertStridedSlice final /// of the original shuffle operation. struct LinearizeVectorShuffle final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorShuffle(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -423,7 +423,7 @@ struct LinearizeVectorShuffle final /// struct LinearizeVectorExtract final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorExtract(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -501,7 +501,7 @@ struct LinearizeVectorExtract final /// struct LinearizeVectorInsert final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorInsert(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -575,7 +575,7 @@ struct LinearizeVectorInsert final /// %out_nd = vector.shape_cast %out_1d: vector<16xf16> to vector<4x4xf16> struct LinearizeVectorBitCast final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorBitCast(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -598,7 +598,7 @@ struct LinearizeVectorBitCast final /// %out_nd = vector.shape_cast %out_1d : vector<16xf32> to vector<4x4xf32> struct LinearizeVectorSplat final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorSplat(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) @@ -629,7 +629,7 @@ struct LinearizeVectorSplat final /// %shape_cast = vector.shape_cast %mask : vector<4xi1> to vector<1x4xi1> struct LinearizeVectorCreateMask final : OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorCreateMask(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) @@ -684,7 +684,7 @@ struct LinearizeVectorCreateMask final /// For generic cases, the vector unroll pass should be used to unroll the load /// to vector<1x1x...xN> form and then linearized struct LinearizeVectorLoad final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorLoad(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -731,7 +731,7 @@ struct LinearizeVectorLoad final : public OpConversionPattern { /// to vector<1x1x...xN> form and then linearized struct LinearizeVectorStore final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorStore(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -778,7 +778,7 @@ struct LinearizeVectorStore final /// struct LinearizeVectorFromElements final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorFromElements(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) : OpConversionPattern(typeConverter, context, benefit) {} @@ -814,7 +814,7 @@ struct LinearizeVectorFromElements final /// struct LinearizeVectorToElements final : public OpConversionPattern { - using OpConversionPattern::OpConversionPattern; + using Base::Base; LinearizeVectorToElements(const TypeConverter &typeConverter, MLIRContext *context, PatternBenefit benefit = 1) diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp index c364a8b54167c..1121d9550f265 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp @@ -1081,7 +1081,7 @@ class RewriteScalarExtractOfTransferRead /// Rewrite transfer_writes of vectors of size 1 (e.g., vector<1x1xf32>) /// to memref.store. class RewriteScalarWrite : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp index 866f789ec6a39..d6a6d7cdba673 100644 --- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp @@ -78,7 +78,7 @@ namespace { /// ``` struct MultiReduceToContract : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp, PatternRewriter &rewriter) const override { @@ -138,7 +138,7 @@ struct MultiReduceToContract /// ``` struct CombineContractABTranspose final : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ContractionOp contractOp, PatternRewriter &rewriter) const override { @@ -202,7 +202,7 @@ struct CombineContractABTranspose final /// ``` struct CombineContractResultTranspose final : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransposeOp resTOp, PatternRewriter &rewriter) const override { @@ -568,7 +568,7 @@ static SmallVector getIntValueVector(ArrayAttr arrayAttr) { // %2 = vector.extract %1[1] : f16 from vector<2xf16> struct BubbleDownVectorBitCastForExtract : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ExtractOp extractOp, PatternRewriter &rewriter) const override { @@ -643,7 +643,7 @@ struct BubbleDownVectorBitCastForExtract // %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16> struct BubbleDownBitCastForStridedSliceExtract : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp, PatternRewriter &rewriter) const override { @@ -721,7 +721,7 @@ struct BubbleDownBitCastForStridedSliceExtract // %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8> // struct BubbleUpBitCastForInsert : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, PatternRewriter &rewriter) const override { @@ -794,7 +794,7 @@ struct BubbleUpBitCastForInsert : public OpRewritePattern { // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32> struct BubbleUpBitCastForStridedSliceInsert : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp, PatternRewriter &rewriter) const override { @@ -892,7 +892,7 @@ struct BubbleUpBitCastForStridedSliceInsert // %7 = vector.insert_strided_slice %6, %cst { // offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32> struct BreakDownVectorBitCast : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; public: BreakDownVectorBitCast(MLIRContext *context, @@ -1131,7 +1131,7 @@ struct ReorderElementwiseOpsOnBroadcast final class ExtractOpFromElementwise final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ExtractOp op, PatternRewriter &rewriter) const override { @@ -1206,7 +1206,7 @@ static bool isSupportedMemSinkElementType(Type type) { /// ``` class ExtractOpFromLoad final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ExtractOp op, PatternRewriter &rewriter) const override { @@ -1285,7 +1285,7 @@ class ExtractOpFromLoad final : public OpRewritePattern { class StoreOpFromSplatOrBroadcast final : public OpRewritePattern { public: - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::StoreOp op, PatternRewriter &rewriter) const override { @@ -1476,7 +1476,7 @@ static bool allI1ConstantValuesSetTo(arith::ConstantOp constantOp, bool value) { /// InstCombine seems to handle vectors with multiple elements but not the /// single element ones. struct FoldI1Select : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(arith::SelectOp selectOp, PatternRewriter &rewriter) const override { @@ -1560,7 +1560,7 @@ getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) { /// Drop inner most contiguous unit dimensions from transfer_read operand. class DropInnerMostUnitDimsTransferRead : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransferReadOp readOp, PatternRewriter &rewriter) const override { @@ -1651,7 +1651,7 @@ class DropInnerMostUnitDimsTransferRead /// Note, this pattern will not collapse "scalable unit" dims (i.e. `[1]`). class DropInnerMostUnitDimsTransferWrite : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp, PatternRewriter &rewriter) const override { @@ -1728,7 +1728,7 @@ class DropInnerMostUnitDimsTransferWrite /// with the RHS transposed) lowering. struct CanonicalizeContractMatmulToMMT final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; using FilterConstraintType = std::function; @@ -1845,7 +1845,7 @@ struct CanonicalizeContractMatmulToMMT final template struct FoldArithExtIntoContractionOp : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ContractionOp contractOp, PatternRewriter &rewriter) const override { @@ -1878,7 +1878,7 @@ struct FoldArithExtIntoContractionOp /// %b = vector.reduction %a, %acc /// ``` struct ChainedReduction final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ReductionOp op, PatternRewriter &rewriter) const override { @@ -2033,7 +2033,7 @@ struct DropUnitDimFromElementwiseOps final /// ``` struct DropUnitDimsFromTransposeOp final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::TransposeOp op, PatternRewriter &rewriter) const override { @@ -2110,7 +2110,7 @@ struct DropUnitDimsFromTransposeOp final /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32> /// ``` struct DropUnitDimsFromScfForOp final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(scf::ForOp forOp, PatternRewriter &rewriter) const override { @@ -2155,7 +2155,7 @@ struct DropUnitDimsFromScfForOp final : OpRewritePattern { /// %c = vector.reduction %b, %acc /// ``` struct ReduceRedundantZero final : OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + using Base::Base; LogicalResult matchAndRewrite(vector::ReductionOp op, PatternRewriter &rewriter) const override { diff --git a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp index 6551a60b5812e..025ee9a04a1de 100644 --- a/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp +++ b/mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp @@ -319,7 +319,7 @@ bool vector::isLinearizableVector(VectorType type) { Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef inputVectorSizes, - Value padValue, + std::optional padValue, bool useInBoundsInsteadOfMasking, ArrayRef inputScalableVecDims) { assert(!llvm::is_contained(inputVectorSizes, ShapedType::kDynamic) && @@ -328,9 +328,11 @@ Value vector::createReadOrMaskedRead(OpBuilder &builder, Location loc, auto sourceShape = sourceShapedType.getShape(); assert(sourceShape.size() == inputVectorSizes.size() && "expected same ranks."); - auto vectorType = VectorType::get(inputVectorSizes, padValue.getType(), - inputScalableVecDims); - assert(padValue.getType() == sourceShapedType.getElementType() && + auto vectorType = + VectorType::get(inputVectorSizes, sourceShapedType.getElementType(), + inputScalableVecDims); + assert((!padValue.has_value() || + padValue.value().getType() == sourceShapedType.getElementType()) && "expected same pad element type to match source element type"); int64_t readRank = inputVectorSizes.size(); auto zero = arith::ConstantIndexOp::create(builder, loc, 0); diff --git a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp index 0fa353abc4972..ef35c39316555 100644 --- a/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp +++ b/mlir/lib/Dialect/X86Vector/IR/X86VectorDialect.cpp @@ -83,29 +83,6 @@ x86vector::DotOp::getIntrinsicOperands(ArrayRef operands, return intrinsicOperands; } -SmallVector x86vector::DotInt8Op::getIntrinsicOperands( - ArrayRef operands, const LLVMTypeConverter &typeConverter, - RewriterBase &rewriter) { - SmallVector intrinsicOprnds; - Adaptor adaptor(operands, *this); - intrinsicOprnds.push_back(adaptor.getW()); - // Bitcast `a` and `b` to i32 - Value bitcast_a = LLVM::BitcastOp::create( - rewriter, getLoc(), - VectorType::get((getA().getType().getShape()[0] / 4), - rewriter.getIntegerType(32)), - adaptor.getA()); - intrinsicOprnds.push_back(bitcast_a); - Value bitcast_b = LLVM::BitcastOp::create( - rewriter, getLoc(), - VectorType::get((getB().getType().getShape()[0] / 4), - rewriter.getIntegerType(32)), - adaptor.getB()); - intrinsicOprnds.push_back(bitcast_b); - - return intrinsicOprnds; -} - SmallVector x86vector::BcstToPackedF32Op::getIntrinsicOperands( ArrayRef operands, const LLVMTypeConverter &typeConverter, RewriterBase &rewriter) { diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp index 20608f97611bb..81b5788d0b9b4 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp @@ -23,7 +23,7 @@ namespace mlir { namespace xegpu { -bool isSharedMemory(const MemRefType &memrefTy) { +static bool isSharedMemory(const MemRefType &memrefTy) { Attribute attr = memrefTy.getMemorySpace(); if (auto intAttr = llvm::dyn_cast(attr)) return intAttr.getInt() == 3; @@ -340,7 +340,7 @@ LogicalResult CreateNdDescOp::verify() { return success(); } -ParseResult parseOptionalDynamicIndexList( +static ParseResult parseOptionalDynamicIndexList( OpAsmParser &parser, SmallVectorImpl &values, DenseI64ArrayAttr &integers, SmallVectorImpl *valueTypes = nullptr, @@ -378,9 +378,9 @@ ParseResult parseOptionalDynamicIndexList( return success(); } -void printOptionalDynamicIndexList(OpAsmPrinter &printer, Operation *op, - OperandRange values, - DenseI64ArrayAttr integers) { +static void printOptionalDynamicIndexList(OpAsmPrinter &printer, Operation *op, + OperandRange values, + DenseI64ArrayAttr integers) { if (!integers || integers.empty()) return; printDynamicIndexList(printer, op, values, integers, diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUBlocking.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUBlocking.cpp index 7efa4b9fbd934..36c498e8b849d 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUBlocking.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUBlocking.cpp @@ -319,7 +319,8 @@ void XeGPUBlockingPass::runOnOperation() { options.setNativeShapeFn([&](Operation *op) { return getTileShape(op); }); - options.setUnrolledTypesFn([&](ShapedType type, ArrayRef tileShape) { + options.setUnrolledTypesFn([&](ShapedType type, ArrayRef tileShape, + bool returnSingleType = false) { Type elemTy = type.getElementType(); Type newTy; @@ -352,6 +353,8 @@ void XeGPUBlockingPass::runOnOperation() { newTy = type.clone(tileShape, elemTy); } + if (returnSingleType) + return SmallVector{newTy}; std::optional> ratio = computeShapeRatio(type.getShape(), tileShape); assert(ratio && "The shape of the type must be a multiple of tileShape."); diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp index 449b8eb030b07..882691fd19f58 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp @@ -268,7 +268,7 @@ struct MoveFuncBodyToWarpExecuteOnLane0 /// %r = gpu.warp_execute_on_lane_0(%laneid) -> /// (!xegpu.tensor_desc<4x8xf32, #layout0>) { /// ... -/// %td = xegpu.create_nd_tdesc %arg0[0, 0] +/// %td = xegpu.create_nd_tdesc %arg0 /// : memref<4x8xf32> -> !xegpu.tensor_desc<4x8xf32, #layout0> /// vector.yield %td /// } @@ -277,11 +277,11 @@ struct MoveFuncBodyToWarpExecuteOnLane0 /// ``` /// %r:2 = gpu.warp_execute_on_lane_0(%laneid) -> (...) { /// ... -/// %dead = xegpu.create_nd_tdesc %arg0[0, 0] +/// %dead = xegpu.create_nd_tdesc %arg0 /// : memref<4x8xf32> -> !xegpu.tensor_desc<4x8xf32, #layout0> /// vector.yield %arg0, %dead /// } -/// %td = xegpu.create_nd_tdesc %r#0[0, 0]: memref<4x8xf32> +/// %td = xegpu.create_nd_tdesc %r#0: memref<4x8xf32> /// -> !xegpu.tensor_desc<4x8xf32> /// /// ``` @@ -301,6 +301,10 @@ struct CreateNdDescDistribution final : public gpu::WarpDistributionPattern { if (!layout) return rewriter.notifyMatchFailure( descOp, "the tensor descriptor lacks layout attribute"); + // CreateNdOp must not have offsets. + if (descOp.getMixedOffsets().size()) + return rewriter.notifyMatchFailure( + descOp, "xegpu::CreateNdDescOp must not have offsets"); SmallVector newRetIndices; rewriter.setInsertionPoint(warpOp); @@ -339,22 +343,23 @@ struct CreateNdDescDistribution final : public gpu::WarpDistributionPattern { /// #layout0 = #xegpu.layout /// gpu.warp_execute_on_lane_0(%laneid) -> () { /// ... -/// xegpu.store_nd %arg0, %arg1: vector<4x8xf32>, +/// xegpu.store_nd %arg0, %arg1 [%x, %y]: vector<4x8xf32>, /// !xegpu.tensor_desc<4x8xf32, #layout0> /// } /// ``` /// To /// ``` /// %r:2 = gpu.warp_execute_on_lane_0(%laneid) -> (vector<4x1xf32>, -/// !xegpu.tensor_desc<4x8xf32, #layout0>) { -/// gpu.yield %arg0, %arg1: vector<4x8xf32>, !xegpu.tensor_desc<4x8xf32, -/// #layout0> +/// !xegpu.tensor_desc<4x8xf32, #layout0>, index, index) { +/// ... +/// gpu.yield %arg0, %arg1, %x, %y: vector<4x8xf32>, +/// !xegpu.tensor_desc<4x8xf32, #layout0>, index, index /// } /// %0 = vector.shape_cast %r#0: vector<4x1xf32> to vector<4xf32> /// %1 = unrealized_conversion_cast %r#1: !xegpu.tensor_desc<4x8xf32, /// #layout0> /// -> !xegpu.tensor_desc<4x8xf32> -/// xegpu.store_nd %0, %1: vector<4xf32>, +/// xegpu.store_nd %0, %1 [%r#2, %r#3]: vector<4xf32>, /// !xegpu.tensor_desc<4x8xf32> /// /// ``` @@ -368,10 +373,15 @@ struct StoreNdDistribution final : public gpu::WarpDistributionPattern { if (!storeOp) return failure(); - int64_t offsetSize = static_cast(storeOp.getOffsets().size()); - if ((offsetSize != 0) || storeOp.getConstOffsetsAttr()) - return failure(); - + SmallVector offsets = storeOp.getMixedOffsets(); + // Expecting offsets to be present. + if (offsets.empty()) + return rewriter.notifyMatchFailure(storeOp, + "the store op must have offsets"); + SmallVector offsetsAsValues = + vector::getAsValues(rewriter, storeOp.getLoc(), offsets); + SmallVector offsetTypes = llvm::to_vector( + llvm::map_range(offsetsAsValues, [](Value v) { return v.getType(); })); xegpu::TensorDescType tensorDescTy = storeOp.getTensorDescType(); xegpu::LayoutAttr layout = tensorDescTy.getLayoutAttr(); if (!layout) @@ -387,13 +397,13 @@ struct StoreNdDistribution final : public gpu::WarpDistributionPattern { distributedTypeByWarpOpOrFailure.value(); SmallVector newRetIndices; + SmallVector newYieldedValues = {storeOp.getValue(), + storeOp.getTensorDesc()}; + SmallVector newYieldedTypes = {distributedTypeByWarpOp, tensorDescTy}; + newYieldedValues.append(offsetsAsValues.begin(), offsetsAsValues.end()); + newYieldedTypes.append(offsetTypes.begin(), offsetTypes.end()); gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( - rewriter, warpOp, - /* new yielded values = */ - ValueRange{storeOp.getValue(), storeOp.getTensorDesc()}, - /* new yielded types = */ - TypeRange{distributedTypeByWarpOp, storeOp.getTensorDescType()}, - newRetIndices); + rewriter, warpOp, newYieldedValues, newYieldedTypes, newRetIndices); // Create a new store op outside the warp op with the distributed vector // type. Tensor descriptor is not distributed. rewriter.setInsertionPointAfter(newWarpOp); @@ -418,6 +428,9 @@ struct StoreNdDistribution final : public gpu::WarpDistributionPattern { newStoreOperands.push_back( resolveDistributedTy(newWarpOp.getResult(newRetIndices[1]), distributedTensorDescTy, rewriter)); + // Collect offsets. + for (size_t i = 2; i < newRetIndices.size(); ++i) + newStoreOperands.push_back(newWarpOp.getResult(newRetIndices[i])); auto newStoreOp = xegpu::StoreNdOp::create(rewriter, newWarpOp.getLoc(), TypeRange{}, @@ -491,9 +504,15 @@ struct LoadNdDistribution final : public gpu::WarpDistributionPattern { loadOp, "xegpu::LoadNdOp require chip information to determine transpose " "requirement"); - int64_t offsetSize = static_cast(loadOp.getOffsets().size()); - if ((offsetSize != 0) || loadOp.getConstOffsetsAttr()) - return failure(); + // Expecting offsets to be present. + SmallVector offsets = loadOp.getMixedOffsets(); + if (offsets.empty()) + return rewriter.notifyMatchFailure(loadOp, + "the load op must have offsets"); + SmallVector offsetsAsValues = + vector::getAsValues(rewriter, loadOp.getLoc(), offsets); + SmallVector offsetTypes = llvm::to_vector( + llvm::map_range(offsetsAsValues, [](Value v) { return v.getType(); })); xegpu::TensorDescType tensorDescTy = loadOp.getTensorDescType(); xegpu::LayoutAttr layout = tensorDescTy.getLayoutAttr(); @@ -506,10 +525,12 @@ struct LoadNdDistribution final : public gpu::WarpDistributionPattern { cast(warpOp.getResult(operandIdx).getType()); SmallVector newRetIndices; + SmallVector newYieldedValues = {loadOp.getTensorDesc()}; + SmallVector newYieldedTypes = {tensorDescTy}; + newYieldedValues.append(offsetsAsValues.begin(), offsetsAsValues.end()); + newYieldedTypes.append(offsetTypes.begin(), offsetTypes.end()); gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( - rewriter, warpOp, - /* new yielded values = */ loadOp.getTensorDesc(), - /* new yielded types = */ tensorDescTy, newRetIndices); + rewriter, warpOp, newYieldedValues, newYieldedTypes, newRetIndices); // Create a new load op outside the warp op with the distributed vector // type. @@ -523,11 +544,15 @@ struct LoadNdDistribution final : public gpu::WarpDistributionPattern { loadOp.getTensorDescType().dropLayouts(); // Distributed tensor // descriptor type does not // contain layout info. + SmallVector newLoadOperands{ + resolveDistributedTy(newWarpOp.getResult(newRetIndices[0]), + distributedTensorDescTy, rewriter)}; + // Collect offsets. + for (size_t i = 1; i < newRetIndices.size(); ++i) + newLoadOperands.push_back(newWarpOp.getResult(newRetIndices[i])); auto newLoadOp = xegpu::LoadNdOp::create( rewriter, newWarpOp.getLoc(), loadNdDistValueTyOrFailure.value(), - resolveDistributedTy(newWarpOp->getResult(newRetIndices[0]), - distributedTensorDescTy, rewriter), - loadOp->getAttrs()); + newLoadOperands, loadOp->getAttrs()); xegpu::removeLayoutAttrs(newLoadOp); // Set the packed attribute if the layout requires it. newLoadOp.setPacked(requirePacked(layout)); @@ -677,85 +702,6 @@ struct DpasDistribution final : public gpu::WarpDistributionPattern { } }; -/// Sink an update_nd_offset op feeding into yield op of an enclosing -/// `gpu.warp_execute_on_lane_0` region. The warp op will still contain the -/// original op that will not be used by the yield op (and should be cleaned -/// up later). The yield op will bypass the updateOp's arguments. The tensor -/// descriptor type is not distributed. Appropriate cast ops are inserted if -/// the distributed types does not match expected xegpu SIMT types. -/// Example: -/// ``` -/// #layout0 = #xegpu.layout -/// %r = gpu.warp_execute_on_lane_0(%laneid) -> -/// (!xegpu.tensor_desc<4x8xf32, #layout0>) { -/// ... -/// %update = xegpu.update_nd_offset %arg0, [%c32, %c16]: -/// !xegpu.tensor_desc<4x8xf32, #layout0> -/// gpu.yield %update -/// } -/// ... -/// ``` -/// To -/// ``` -/// %r:2 = gpu.warp_execute_on_lane_0(%laneid) -> ( -/// !xegpu.tensor_desc<4x8xf32, #layout0>, -/// !xegpu.tensor_desc<4x8xf32, #layout0>, index, index) { -/// ... -/// %dead = xegpu.update_nd_offset %arg0, [%c32, %c16]: -/// !xegpu.tensor_desc<4x8xf32, #layout0> gpu.yield %dead, %arg0 -/// gpu.yield %dead, %arg0, %c32, %c16 -/// } -/// %0 = xegpu.unrealized_conversion_cast %r#1: !xegpu.tensor_desc<4x8xf32, -/// #layout0> -> !xegpu.tensor_desc<4x8xf32> -/// %1 = xegpu.update_nd_offset %0, [%r#2, %r#3]: -/// !xegpu.tensor_desc<4x8xf32> -/// ... -/// ``` -struct UpdateNdOffsetDistribution final : public gpu::WarpDistributionPattern { - using gpu::WarpDistributionPattern::WarpDistributionPattern; - LogicalResult matchAndRewrite(gpu::WarpExecuteOnLane0Op warpOp, - PatternRewriter &rewriter) const override { - OpOperand *operand = - getWarpResult(warpOp, llvm::IsaPred); - if (!operand) - return rewriter.notifyMatchFailure( - warpOp, "warp result is not a xegpu::UpdateNdOffset op"); - auto updateOp = operand->get().getDefiningOp(); - unsigned operandIdx = operand->getOperandNumber(); - - SmallVector newRetIndices; - gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( - rewriter, warpOp, updateOp->getOperands(), updateOp.getOperandTypes(), - newRetIndices); - rewriter.setInsertionPointAfter(newWarpOp); - // new update op does not have layout attribute. - xegpu::TensorDescType distributedTensorDescTy = - updateOp.getTensorDescType().dropLayouts(); - SmallVector newUpdateOperands = - llvm::map_to_vector(newRetIndices, [&](size_t i) { - // For the tensor descriptor operand, the layout attribute is - // dropped after distribution. Types needs to be resolved in this - // case. - if (isa(newWarpOp.getResult(i).getType())) { - return resolveDistributedTy(newWarpOp.getResult(i), - distributedTensorDescTy, rewriter); - } - return newWarpOp.getResult(i); - }); - // Create a new update op outside the warp op. - auto newUpdateOp = xegpu::UpdateNdOffsetOp::create( - rewriter, newWarpOp.getLoc(), distributedTensorDescTy, - newUpdateOperands, updateOp->getAttrs()); - xegpu::removeLayoutAttrs(newUpdateOp); - Value distributedVal = newWarpOp.getResult(operandIdx); - // Resolve the distributed type with the original type. - Value typeResolved = resolveDistributedTy( - newUpdateOp.getResult(), distributedVal.getType(), rewriter); - rewriter.replaceAllUsesWith(distributedVal, typeResolved); - return success(); - } -}; - /// Distribute a prefetch_nd op at the end of enclosing /// `gpu.warp_execute_on_lane_0`. In case arguments for the prefetch are passed /// through the warp op interface they would be propagated as returned values. @@ -769,18 +715,19 @@ struct UpdateNdOffsetDistribution final : public gpu::WarpDistributionPattern { /// #layout0 = #xegpu.layout /// gpu.warp_execute_on_lane_0(%laneid) -> () { /// ... -/// xegpu.prefetch_nd %arg0 : !xegpu.tensor_desc<4x8xf32, #layout0> +/// xegpu.prefetch_nd %arg0 [%x, %y] : !xegpu.tensor_desc<4x8xf32, #layout0> /// } /// ``` /// To /// ``` /// %r:1 = gpu.warp_execute_on_lane_0(%laneid) -> ( -/// !xegpu.tensor_desc<4x8xf32, #layout0>) { -/// gpu.yield %arg0: !xegpu.tensor_desc<4x8xf32, #layout0> +/// !xegpu.tensor_desc<4x8xf32, #layout0>, index, index) { +/// gpu.yield %arg0, %x, %y: !xegpu.tensor_desc<4x8xf32, #layout0>, index, +/// index /// } /// %1 = unrealized_conversion_cast %r#0: !xegpu.tensor_desc<4x8xf32, /// #layout0> -> !xegpu.tensor_desc<4x8xf32> -/// xegpu.prefetch_nd %1 : !xegpu.tensor_desc<4x8xf32> +/// xegpu.prefetch_nd %1 [%r#1, %r#2] : !xegpu.tensor_desc<4x8xf32> /// /// ``` struct PrefetchNdDistribution final : public gpu::WarpDistributionPattern { @@ -793,17 +740,25 @@ struct PrefetchNdDistribution final : public gpu::WarpDistributionPattern { if (!prefetchOp) return failure(); - int64_t offsetSize = static_cast(prefetchOp.getOffsets().size()); - if ((offsetSize != 0) || prefetchOp.getConstOffsetsAttr()) - return failure(); + SmallVector offsets = prefetchOp.getMixedOffsets(); + // PrefetchNdOp must have offsets. + if (offsets.empty()) + return rewriter.notifyMatchFailure(prefetchOp, + "the prefetch op must have offsets"); + SmallVector offsetsAsValues = + vector::getAsValues(rewriter, prefetchOp.getLoc(), offsets); + SmallVector offsetTypes = llvm::to_vector( + llvm::map_range(offsetsAsValues, [](Value v) { return v.getType(); })); xegpu::LayoutAttr layout = prefetchOp.getTensorDescType().getLayoutAttr(); if (!layout) return rewriter.notifyMatchFailure( prefetchOp, "the source tensor descriptor lacks layout attribute"); - SmallVector newYieldValues = {prefetchOp.getTensorDesc()}; - SmallVector newYieldTypes = {prefetchOp.getTensorDescType()}; + SmallVector newYieldValues = {prefetchOp.getTensorDesc()}; + SmallVector newYieldTypes = {prefetchOp.getTensorDescType()}; + newYieldValues.append(offsetsAsValues.begin(), offsetsAsValues.end()); + newYieldTypes.append(offsetTypes.begin(), offsetTypes.end()); SmallVector newRetIndices; gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( rewriter, warpOp, newYieldValues, newYieldTypes, newRetIndices); @@ -814,6 +769,9 @@ struct PrefetchNdDistribution final : public gpu::WarpDistributionPattern { rewriter.setInsertionPointAfter(newWarpOp); SmallVector newPrefetchOperands = {resolveDistributedTy( newWarpOp.getResult(newRetIndices[0]), newTensorDescTy, rewriter)}; + // Collect offsets. + for (size_t i = 1; i < newRetIndices.size(); ++i) + newPrefetchOperands.push_back(newWarpOp.getResult(newRetIndices[i])); xegpu::PrefetchNdOp::create(rewriter, newWarpOp.getLoc(), TypeRange{}, newPrefetchOperands, prefetchOp->getAttrs()); xegpu::removeLayoutAttrs(prefetchOp); @@ -1456,15 +1414,14 @@ struct XeGPUSubgroupDistributePass final void xegpu::populateXeGPUSubgroupDistributePatterns( RewritePatternSet &patterns) { - patterns - .add( - patterns.getContext(), - /*pattern benefit=*/regularPatternBenefit); + patterns.add( + patterns.getContext(), + /*pattern benefit=*/regularPatternBenefit); patterns.add( patterns.getContext(), /*pattern benefit=*/highPatternBenefit); diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp index 29c9fcdfebcdb..a178d0fe4b0b0 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp @@ -56,8 +56,9 @@ struct UnrollPattern : public OpRewritePattern { } SmallVector getUnrolledTypes(ShapedType type, - ArrayRef tileShape) const { - return options.getUnrolledTypes(type, tileShape); + ArrayRef tileShape, + bool returnSingleType = false) const { + return options.getUnrolledTypes(type, tileShape, returnSingleType); } /// Emulate the the unpack behavior using insert_strided_slice for VectorType @@ -121,53 +122,79 @@ struct UnrollPattern : public OpRewritePattern { xegpu::UnrollOptions options; }; +// Generic helper function for unrolling operations with offsets. +// +// Iterates over tile offsets within the tensor descriptor shape and calls +// the provided createOp function for each computed offset. This is used by +// operations like LoadNd, StoreNd, CreateNdDesc, and PrefetchNd when they +// have explicit offsets that need to be adjusted for each unrolled tile. +SmallVector computeUnrolledOffsets( + SmallVector mixedOffsets, xegpu::TensorDescType tdescTy, + ArrayRef targetShape, + const std::function)> &createOp, + Location loc, PatternRewriter &rewriter) { + int64_t rank = tdescTy.getRank(); + ArrayRef shape = tdescTy.getShape(); + + auto addi = [&](OpFoldResult a, int64_t b) -> Value { + std::optional maybeInt = getConstantIntValue(a); + if (maybeInt) { + return arith::ConstantIndexOp::create(rewriter, loc, *maybeInt + b); + } else { + auto aV = llvm::cast(a); + auto bV = arith::ConstantIndexOp::create(rewriter, loc, b); + return rewriter.createOrFold(loc, aV, bV); + } + }; + + SmallVector oldOffsets = llvm::to_vector( + llvm::drop_begin(mixedOffsets, mixedOffsets.size() - rank)); + auto validIdxes = + llvm::seq(mixedOffsets.size() - rank, mixedOffsets.size()); + + SmallVector newOps; + for (SmallVector offsets : + StaticTileOffsetRange(shape, targetShape)) { + + for (auto [idx, oldOff, offset] : + llvm::zip(validIdxes, oldOffsets, offsets)) + mixedOffsets[idx] = addi(oldOff, offset); + + auto newOp = createOp(mixedOffsets); + newOps.push_back(newOp); + } + return newOps; +} + struct UnrollCreateNdOp : public UnrollPattern { using UnrollPattern::UnrollPattern; LogicalResult matchAndRewrite(xegpu::CreateNdDescOp op, PatternRewriter &rewriter) const override { Location loc = op.getLoc(); xegpu::TensorDescType tdescTy = op.getType(); - int64_t rank = tdescTy.getRank(); - ArrayRef shape = tdescTy.getShape(); std::optional> targetShape = getTargetShape(op); if (!targetShape) return failure(); - auto newTdescTy = getUnrolledTypes(tdescTy, *targetShape)[0]; - - auto addi = [&](OpFoldResult a, int64_t b) -> Value { - std::optional maybeInt = getConstantIntValue(a); - if (maybeInt) { - return arith::ConstantIndexOp::create(rewriter, loc, *maybeInt + b); - } else { - auto aV = llvm::cast(a); - auto bV = arith::ConstantIndexOp::create(rewriter, loc, b); - return rewriter.createOrFold(loc, aV, bV); - } - }; - - SmallVector mixedOffsets = op.getMixedOffsets(); - - // For n-D memrefs where n > rank, we need to handle the last `rank` - // dimensions only, and keep the first `n-rank` dimensions as is. - SmallVector oldOffsets = llvm::to_vector( - llvm::drop_begin(mixedOffsets, mixedOffsets.size() - rank)); - auto validIdxes = - llvm::seq(mixedOffsets.size() - rank, mixedOffsets.size()); - SmallVector newOps; - for (SmallVector offsets : - StaticTileOffsetRange(shape, *targetShape)) { - - for (auto [idx, oldOff, offset] : - llvm::zip(validIdxes, oldOffsets, offsets)) - mixedOffsets[idx] = addi(oldOff, offset); + auto newTdescTy = getUnrolledTypes(tdescTy, *targetShape)[0]; + bool hasOffsets = op.getMixedOffsets().size() != 0; + if (!hasOffsets) { auto newOp = xegpu::CreateNdDescOp::create( - rewriter, loc, newTdescTy, op.getSource(), mixedOffsets, - op.getMixedSizes(), op.getMixedStrides()); + rewriter, loc, newTdescTy, op.getSource(), op.getMixedSizes(), + op.getMixedStrides()); newOps.push_back(newOp); + } else { + auto createOp = [&](SmallVector offsets) -> Value { + return xegpu::CreateNdDescOp::create( + rewriter, loc, newTdescTy, op.getSource(), offsets, + op.getMixedSizes(), op.getMixedStrides()); + }; + + newOps = computeUnrolledOffsets(op.getMixedOffsets(), tdescTy, + *targetShape, createOp, loc, rewriter); } Value castOp = unpack(newOps, tdescTy, *targetShape, loc, rewriter); rewriter.replaceOp(op, castOp); @@ -216,17 +243,30 @@ struct UnrollPrefetchNdOp : public UnrollPattern { return failure(); int64_t offsetSize = static_cast(op.getOffsets().size()); - if ((offsetSize != 0) || op.getConstOffsetsAttr()) - return failure(); + bool hasOffsets = (offsetSize != 0) || op.getConstOffsetsAttr(); + + SmallVector convertedTdescTypes = getUnrolledTypes( + tdescTy, *targetShape, /*returnSingleType*/ hasOffsets); - SmallVector convertedTdescTypes = - getUnrolledTypes(tdescTy, *targetShape); SmallVector convertedTdesc = pack( op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter); - for (auto t : convertedTdesc) - xegpu::PrefetchNdOp::create(rewriter, loc, TypeRange(), t, - op->getAttrs()); + if (!hasOffsets) { + for (auto t : convertedTdesc) + xegpu::PrefetchNdOp::create(rewriter, loc, TypeRange(), t, + op->getAttrs()); + } else { + auto createPrefetch = [&](SmallVector offsets) -> Value { + xegpu::PrefetchNdOp::create(rewriter, loc, convertedTdesc[0], offsets, + op.getL1HintAttr(), op.getL2HintAttr(), + op.getL3HintAttr()); + // return dummy Value to satisfy function's signature + return nullptr; + }; + + computeUnrolledOffsets(op.getMixedOffsets(), tdescTy, *targetShape, + createPrefetch, loc, rewriter); + } rewriter.eraseOp(op); return success(); @@ -247,22 +287,33 @@ struct UnrollLoadNdOp : public UnrollPattern { return failure(); int64_t offsetSize = static_cast(op.getOffsets().size()); - if ((offsetSize != 0) || op.getConstOffsetsAttr()) - return failure(); + bool hasOffsets = (offsetSize != 0) || op.getConstOffsetsAttr(); Type elemTy = tdescTy.getElementType(); VectorType newValueTy = valueTy.cloneWith(*targetShape, elemTy); - SmallVector convertedTdescTypes = - getUnrolledTypes(tdescTy, *targetShape); + SmallVector convertedTdescTypes = getUnrolledTypes( + tdescTy, *targetShape, /*returnSingleType*/ hasOffsets); + SmallVector convertedTdescs = pack( op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter); - SmallVector newOps; - for (auto t : convertedTdescs) { - auto newOp = - xegpu::LoadNdOp::create(rewriter, loc, newValueTy, t, op->getAttrs()); - newOps.push_back(newOp); + + if (!hasOffsets) { + for (auto t : convertedTdescs) { + auto newOp = xegpu::LoadNdOp::create(rewriter, loc, newValueTy, t, + op->getAttrs()); + newOps.push_back(newOp); + } + } else { + auto createLoad = [&](SmallVector offsets) { + return xegpu::LoadNdOp::create( + rewriter, loc, newValueTy, convertedTdescs[0], offsets, + op.getPackedAttr(), op.getTransposeAttr(), op.getL1HintAttr(), + op.getL2HintAttr(), op.getL3HintAttr()); + }; + newOps = computeUnrolledOffsets(op.getMixedOffsets(), tdescTy, + *targetShape, createLoad, loc, rewriter); } Value castOp = unpack(newOps, op.getType(), *targetShape, loc, rewriter); @@ -285,22 +336,36 @@ struct UnrollStoreNdOp : public UnrollPattern { return failure(); int64_t offsetSize = static_cast(op.getOffsets().size()); - if ((offsetSize != 0) || op.getConstOffsetsAttr()) - return failure(); + bool hasOffsets = (offsetSize != 0) || op.getConstOffsetsAttr(); SmallVector convertedValTypes = getUnrolledTypes(valueTy, *targetShape); - SmallVector convertedTdescTypes = - getUnrolledTypes(tdescTy, *targetShape); + SmallVector convertedTdescTypes = getUnrolledTypes( + tdescTy, *targetShape, /*returnSingleType*/ hasOffsets); - SmallVector convertedValues = - pack(op.getValue(), convertedValTypes, *targetShape, loc, rewriter); SmallVector convertedTdescs = pack( op.getTensorDesc(), convertedTdescTypes, *targetShape, loc, rewriter); - for (auto [v, t] : llvm::zip(convertedValues, convertedTdescs)) - xegpu::StoreNdOp::create(rewriter, loc, v, t, op.getL1HintAttr(), - op.getL2HintAttr(), op.getL3HintAttr()); + SmallVector convertedValues = + pack(op.getValue(), convertedValTypes, *targetShape, loc, rewriter); + if (!hasOffsets) { + for (auto [v, t] : llvm::zip(convertedValues, convertedTdescs)) + xegpu::StoreNdOp::create(rewriter, loc, v, t, op.getL1HintAttr(), + op.getL2HintAttr(), op.getL3HintAttr()); + } else { + size_t valueIndex = 0; + auto createStore = [&](SmallVector offsets) { + xegpu::StoreNdOp::create(rewriter, loc, convertedValues[valueIndex++], + convertedTdescs[0], offsets, + op.getL1HintAttr(), op.getL2HintAttr(), + op.getL3HintAttr()); + // return dummy Value to satisfy function's signature + return nullptr; + }; + + computeUnrolledOffsets(op.getMixedOffsets(), tdescTy, *targetShape, + createStore, loc, rewriter); + } rewriter.eraseOp(op); return success(); @@ -537,6 +602,195 @@ struct UnrollLoadGatherOp : public UnrollPattern { } }; +/// This pattern handles the unrolling of LoadGatherOp with offsets (gathered +/// load). +/// It unrolls the offsets and mask operands accordingly, and creates multiple +/// LoadGatherOp with the unrolled operands. +struct UnrollLoadGatherOpWithOffset + : public UnrollPattern { + using UnrollPattern::UnrollPattern; + LogicalResult matchAndRewrite(xegpu::LoadGatherOp op, + PatternRewriter &rewriter) const override { + Location loc = op.getLoc(); + VectorType valueTy = llvm::dyn_cast(op.getType()); + Value offsets = op.getOffsets(); + Value mask = op.getMask(); + + // Only handle the case where offsets are present (scattered load) + if (!offsets) + return failure(); + + std::optional> targetShape = getTargetShape(op); + if (!targetShape) + return failure(); + + SmallVector targetMaskShape(*targetShape); + int64_t chunkSize = 1; + if (auto chunkSizeAttr = op->getAttr("chunk_size")) { + if (auto intAttr = llvm::dyn_cast(chunkSizeAttr)) + chunkSize = intAttr.getInt(); + } + + // Unroll mask and offsets with correct shape + VectorType maskTy = llvm::dyn_cast(mask.getType()); + VectorType offsetsTy = llvm::dyn_cast(offsets.getType()); + Type elemTy = valueTy.getElementType(); + VectorType newValueTy = valueTy.cloneWith(*targetShape, elemTy); + + SmallVector convertedMaskTypes; + SmallVector convertedMasks; + SmallVector convertedOffsetTypes; + SmallVector convertedOffsets; + + if (chunkSize > 1) { + // For chunked loads, mask and offsets have one less dimension + targetMaskShape.pop_back(); + int64_t blockedChunkSize = targetShape->back(); + int64_t numNewChunks = chunkSize / blockedChunkSize; + chunkSize = blockedChunkSize; + + convertedMaskTypes = getUnrolledTypes(maskTy, targetMaskShape); + convertedOffsetTypes = getUnrolledTypes(offsetsTy, targetMaskShape); + + SmallVector convertedMasksBase = + pack(mask, convertedMaskTypes, targetMaskShape, loc, rewriter); + SmallVector convertedOffsetsBase = + pack(offsets, convertedOffsetTypes, targetMaskShape, loc, rewriter); + + for (auto maskVal : convertedMasksBase) + convertedMasks.append(numNewChunks, maskVal); + + for (auto [baseOffset, offsetType] : + llvm::zip(convertedOffsetsBase, convertedOffsetTypes)) { + for (int64_t i = 0; i < numNewChunks; ++i) { + Value inc = arith::ConstantIndexOp::create(rewriter, loc, + i * blockedChunkSize); + Value incVec = + vector::BroadcastOp::create(rewriter, loc, offsetType, inc); + Value offsetVal = + arith::AddIOp::create(rewriter, loc, baseOffset, incVec); + convertedOffsets.push_back(offsetVal); + } + } + } else { + convertedMaskTypes = getUnrolledTypes(maskTy, targetMaskShape); + convertedMasks = + pack(mask, convertedMaskTypes, targetMaskShape, loc, rewriter); + + convertedOffsetTypes = getUnrolledTypes(offsetsTy, *targetShape); + convertedOffsets = + pack(offsets, convertedOffsetTypes, *targetShape, loc, rewriter); + } + + SmallVector newOps; + for (auto [o, m] : llvm::zip(convertedOffsets, convertedMasks)) { + auto newOp = xegpu::LoadGatherOp::create( + rewriter, loc, newValueTy, op.getSource(), o, m, + rewriter.getI64IntegerAttr(chunkSize), op.getL1HintAttr(), + op.getL2HintAttr(), op.getL3HintAttr()); + newOps.push_back(newOp); + } + + Value castOp = unpack(newOps, op.getType(), *targetShape, loc, rewriter); + rewriter.replaceOp(op, castOp); + return success(); + } +}; + +/// This pattern handles the unrolling of StoreScatterOp with offsets (scattered +/// store). +/// It unrolls the offsets and mask operands accordingly, and creates multiple +/// StoreScatterOp with the unrolled operands. +struct UnrollStoreScatterOpWithOffsets + : public UnrollPattern { + using UnrollPattern::UnrollPattern; + LogicalResult matchAndRewrite(xegpu::StoreScatterOp op, + PatternRewriter &rewriter) const override { + Location loc = op.getLoc(); + VectorType valueTy = llvm::dyn_cast(op.getValue().getType()); + Value offsets = op.getOffsets(); + Value mask = op.getMask(); + + // Only handle the case where offsets are present (scattered store) + if (!offsets) + return failure(); + + std::optional> targetShape = getTargetShape(op); + if (!targetShape) + return failure(); + + int64_t chunkSize = 1; + if (auto chunkSizeAttr = op->getAttr("chunk_size")) { + if (auto intAttr = llvm::dyn_cast(chunkSizeAttr)) + chunkSize = intAttr.getInt(); + } + + SmallVector targetMaskShape(*targetShape); + VectorType maskTy = llvm::dyn_cast(mask.getType()); + VectorType offsetsTy = llvm::dyn_cast(offsets.getType()); + + SmallVector convertedMaskTypes; + SmallVector convertedMasks; + SmallVector convertedOffsetTypes; + SmallVector convertedOffsets; + + if (chunkSize > 1) { + targetMaskShape.pop_back(); + int64_t blockedChunkSize = targetShape->back(); + int64_t numNewChunks = chunkSize / blockedChunkSize; + chunkSize = blockedChunkSize; + + convertedMaskTypes = getUnrolledTypes(maskTy, targetMaskShape); + convertedOffsetTypes = getUnrolledTypes(offsetsTy, targetMaskShape); + + SmallVector convertedMasksBase = + pack(mask, convertedMaskTypes, targetMaskShape, loc, rewriter); + SmallVector convertedOffsetsBase = + pack(offsets, convertedOffsetTypes, targetMaskShape, loc, rewriter); + + for (auto maskVal : convertedMasksBase) + convertedMasks.append(numNewChunks, maskVal); + + for (auto [baseOffset, offsetType] : + llvm::zip(convertedOffsetsBase, convertedOffsetTypes)) { + for (int64_t i = 0; i < numNewChunks; ++i) { + Value inc = arith::ConstantIndexOp::create(rewriter, loc, + i * blockedChunkSize); + Value incVec = + vector::BroadcastOp::create(rewriter, loc, offsetType, inc); + Value offsetVal = + arith::AddIOp::create(rewriter, loc, baseOffset, incVec); + convertedOffsets.push_back(offsetVal); + } + } + } else { + convertedMaskTypes = getUnrolledTypes(maskTy, targetMaskShape); + convertedMasks = + pack(mask, convertedMaskTypes, targetMaskShape, loc, rewriter); + + convertedOffsetTypes = getUnrolledTypes(offsetsTy, *targetShape); + convertedOffsets = + pack(offsets, convertedOffsetTypes, *targetShape, loc, rewriter); + } + + SmallVector convertedValTypes = + getUnrolledTypes(valueTy, *targetShape); + SmallVector convertedValues = + pack(op.getValue(), convertedValTypes, *targetShape, loc, rewriter); + + for (auto [v, o, m] : + llvm::zip(convertedValues, convertedOffsets, convertedMasks)) { + xegpu::StoreScatterOp::create(rewriter, loc, v, op.getDest(), o, m, + rewriter.getI64IntegerAttr(chunkSize), + op.getL1HintAttr(), op.getL2HintAttr(), + op.getL3HintAttr()); + } + + rewriter.eraseOp(op); + return success(); + } +}; + struct UnrollPrefetchOp : public UnrollPattern { using UnrollPattern::UnrollPattern; LogicalResult matchAndRewrite(xegpu::PrefetchOp op, @@ -766,6 +1020,7 @@ void mlir::xegpu::populateXeGPUUnrollPatterns( .add( + UnrollUpdateOffsetOp, UnrollLoadMatrixOp, UnrollStoreMatrixOp, + UnrollLoadGatherOpWithOffset, UnrollStoreScatterOpWithOffsets>( patterns.getContext(), options); } diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp index d7592fed6d186..784e5d68ce885 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp @@ -824,7 +824,7 @@ struct WgToSgStoreScatterOpWithOffset return failure(); xegpu::DistributeLayoutAttr layout = - xegpu::getDistributeLayoutAttr(op.getValue()); + xegpu::getDistributeLayoutAttr(op.getOperand(0)); if (!layout || !layout.isForWorkgroup()) return failure(); @@ -844,12 +844,19 @@ struct WgToSgStoreScatterOpWithOffset auto chunkSizeAttr = rewriter.getI64IntegerAttr(chunkSize); for (auto [val, offs, mask] : llvm::zip( adaptor.getValue(), adaptor.getOffsets(), adaptor.getMask())) { - xegpu::StoreScatterOp::create(rewriter, loc, val, op.getDest(), offs, - mask, chunkSizeAttr, op.getL1HintAttr(), - op.getL2HintAttr(), op.getL3HintAttr()); + auto store = xegpu::StoreScatterOp::create( + rewriter, loc, val, op.getDest(), offs, mask, chunkSizeAttr, + op.getL1HintAttr(), op.getL2HintAttr(), op.getL3HintAttr()); // Update the layout attribute to drop sg_layout and sg_data. - if (auto newLayout = layout.dropSgLayoutAndData()) - op->setAttr("layout", newLayout); + if (!layout.getEffectiveLaneLayoutAsInt().empty() || + !layout.getEffectiveInstDataAsInt().empty()) { + for (OpOperand &operand : store->getOpOperands()) { + // Skip for operand one (memref) + if (operand.getOperandNumber() == 1) + continue; + xegpu::setDistributeLayoutAttr(operand, layout.dropSgLayoutAndData()); + } + } } rewriter.eraseOp(op); return success(); @@ -1027,6 +1034,70 @@ struct WgToSgVectorShapeCastOp } }; +/// Pattern for lowering vector.multi_reduction op to subgroup level. +/// Current limitation: the sg_layout in the reduced dimension being 1 +/// so that reduction is local to subgroup & no cross-subgroup communication is +/// needed. +/// TODO: Add cases to handle more general situations which require SLM access. +struct WgToSgMultiDimReductionOp + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(vector::MultiDimReductionOp op, OneToNOpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + VectorType srcType = op.getSourceVectorType(); + VectorType dstType = dyn_cast(op.getResult().getType()); + if (!dstType) + return failure(); + + auto srcShape = srcType.getShape(); + xegpu::DistributeLayoutAttr layout = + xegpu::getDistributeLayoutAttr(op.getResult()); + if (!layout || !layout.isForWorkgroup()) + return failure(); + + auto reductionDims = llvm::to_vector(op.getReductionDims()); + + SmallVector sgLayout = llvm::cast(layout) + .getParent() + .getEffectiveSgLayoutAsInt(); + SmallVector sgData = llvm::cast(layout) + .getParent() + .getEffectiveSgDataAsInt(); + + // Check that the sgLayout in the reduced dimension is 1 and + // each sg gets the entire slice to reduce. + for (int64_t dim : reductionDims) { + if (sgLayout[dim] != 1 || sgData[dim] != srcShape[dim]) + return rewriter.notifyMatchFailure( + op, + "sgLayout in each reduced dimension must be 1 and sgData in the " + "reduced dim must match srcShape in that dim"); + } + + SmallVector sgShape = getSgShapeAndCount(srcShape, layout).first; + + VectorType newDstType = + VectorType::get({sgShape}, dstType.getElementType()); + + SmallVector newReductions; + for (auto sgSrc : adaptor.getSource()) { + auto newOp = rewriter.create( + op.getLoc(), newDstType, op.getKind(), sgSrc, adaptor.getAcc()[0], + op.getReductionDims()); + if (!layout.getEffectiveLaneLayoutAsInt().empty() || + !layout.getEffectiveInstDataAsInt().empty()) + xegpu::setDistributeLayoutAttr(newOp->getResult(0), + layout.dropSgLayoutAndData()); + newReductions.push_back(newOp.getResult()); + } + + rewriter.replaceOpWithMultiple(op, {newReductions}); + return success(); + } +}; + } // namespace namespace mlir { @@ -1040,8 +1111,8 @@ void populateXeGPUWgToSgDistributePatterns(RewritePatternSet &patterns) { WgToSgElementwiseOp, WgToSgVectorBroadcastOp, WgToSgConvertLayoutOp, WgToSgArithConstantOp, WgToSgLoadGatherOpWithOffset, WgToSgStoreScatterOpWithOffset, WgToSgLoadMatrixOp, - WgToSgStoreMatrixOp, WgToSgVectorStepOp, WgToSgVectorShapeCastOp>( - patterns.getContext()); + WgToSgStoreMatrixOp, WgToSgVectorStepOp, WgToSgVectorShapeCastOp, + WgToSgMultiDimReductionOp>(patterns.getContext()); } } // namespace xegpu } // namespace mlir @@ -1183,10 +1254,7 @@ void XeGPUWgToSgDistributePass::runOnOperation() { target.addDynamicallyLegalOp( [=](xegpu::StoreScatterOp op) -> bool { - // Check if the layout attribute is present on the result. - auto layout = op->getAttrOfType("layout"); - if (!layout) - return true; + auto layout = xegpu::getDistributeLayoutAttr(op.getOperand(0)); return isLegal(layout); }); @@ -1195,6 +1263,11 @@ void XeGPUWgToSgDistributePass::runOnOperation() { return isLegal(xegpu::getDistributeLayoutAttr(op.getResult())); }); + target.addDynamicallyLegalOp( + [=](vector::MultiDimReductionOp op) -> bool { + return isLegal(xegpu::getDistributeLayoutAttr(op.getResult())); + }); + target.addDynamicallyLegalOp( [=](xegpu::ConvertLayoutOp op) -> bool { return isLegal(op.getInputLayout()) && isLegal(op.getTargetLayout()); diff --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp index 0ada4cc96570a..db0516533afcb 100644 --- a/mlir/lib/ExecutionEngine/JitRunner.cpp +++ b/mlir/lib/ExecutionEngine/JitRunner.cpp @@ -271,7 +271,7 @@ Error checkCompatibleReturnType(LLVM::LLVMFuncOp mainFunction) { return Error::success(); } template -Error compileAndExecuteSingleReturnFunction( +static Error compileAndExecuteSingleReturnFunction( Options &options, Operation *module, StringRef entryPoint, CompileAndExecuteConfig config, std::unique_ptr tm) { auto mainFunction = dyn_cast_or_null( diff --git a/mlir/lib/ExecutionEngine/LevelZeroRuntimeWrappers.cpp b/mlir/lib/ExecutionEngine/LevelZeroRuntimeWrappers.cpp index 21eaf28c9f214..d0728274b94c8 100644 --- a/mlir/lib/ExecutionEngine/LevelZeroRuntimeWrappers.cpp +++ b/mlir/lib/ExecutionEngine/LevelZeroRuntimeWrappers.cpp @@ -328,12 +328,12 @@ struct DynamicEventPool { } }; -L0RTContextWrapper &getRtContext() { +static L0RTContextWrapper &getRtContext() { thread_local static L0RTContextWrapper rtContext(0); return rtContext; } -DynamicEventPool &getDynamicEventPool() { +static DynamicEventPool &getDynamicEventPool() { thread_local static DynamicEventPool dynEventPool{&getRtContext()}; return dynEventPool; } @@ -492,8 +492,8 @@ extern "C" void mgpuMemcpy(void *dst, void *src, size_t sizeBytes, } template -void mgpuMemset(void *dst, PATTERN_TYPE value, size_t count, - StreamWrapper *stream) { +static void mgpuMemset(void *dst, PATTERN_TYPE value, size_t count, + StreamWrapper *stream) { L0RTContextWrapper &rtContext = getRtContext(); auto listType = rtContext.copyEngineMaxMemoryFillPatternSize >= sizeof(PATTERN_TYPE) diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp index 3d366276b4375..8f199b60fccdc 100644 --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -14,6 +14,7 @@ #include "mlir/IR/IRMapping.h" #include "mlir/IR/Matchers.h" #include "llvm/ADT/SmallVectorExtras.h" +#include "llvm/Support/DebugLog.h" using namespace mlir; @@ -486,9 +487,18 @@ OpBuilder::tryFold(Operation *op, SmallVectorImpl &results, // Try to fold the operation. SmallVector foldResults; + LDBG() << "Trying to fold: " + << OpWithFlags(op, OpPrintingFlags().skipRegions()); if (failed(op->fold(foldResults))) return cleanupFailure(); + int count = 0; + do { + LDBG() << "Folded in place #" << count + << " times: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); + count++; + } while (foldResults.empty() && succeeded(op->fold(foldResults))); + // An in-place fold does not require generation of any constants. if (foldResults.empty()) return success(); diff --git a/mlir/lib/Interfaces/CMakeLists.txt b/mlir/lib/Interfaces/CMakeLists.txt index fdc19844702bc..388de1c3e5abf 100644 --- a/mlir/lib/Interfaces/CMakeLists.txt +++ b/mlir/lib/Interfaces/CMakeLists.txt @@ -11,6 +11,7 @@ set(LLVM_OPTIONAL_SOURCES InferIntRangeInterface.cpp InferTypeOpInterface.cpp LoopLikeInterface.cpp + MemOpInterfaces.cpp MemorySlotInterfaces.cpp ParallelCombiningOpInterface.cpp RuntimeVerifiableOpInterface.cpp @@ -79,6 +80,7 @@ add_mlir_library(MLIRLoopLikeInterface MLIRFunctionInterfaces ) +add_mlir_interface_library(MemOpInterfaces) add_mlir_interface_library(MemorySlotInterfaces) add_mlir_interface_library(ParallelCombiningOpInterface) add_mlir_interface_library(RuntimeVerifiableOpInterface) diff --git a/mlir/lib/Interfaces/MemOpInterfaces.cpp b/mlir/lib/Interfaces/MemOpInterfaces.cpp new file mode 100644 index 0000000000000..fe5c717f67bc4 --- /dev/null +++ b/mlir/lib/Interfaces/MemOpInterfaces.cpp @@ -0,0 +1,73 @@ +//===- MemOpInterfaces.cpp - Memory operation interfaces ---------*- C++-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Interfaces/MemOpInterfaces.h" +#include "mlir/IR/Attributes.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Value.h" + +using namespace mlir; + +LogicalResult mlir::detail::verifyMemorySpaceCastOpInterface(Operation *op) { + auto memCastOp = cast(op); + + // Verify that the source and target pointers are valid + Value sourcePtr = memCastOp.getSourcePtr(); + Value targetPtr = memCastOp.getTargetPtr(); + + if (!sourcePtr || !targetPtr) { + return op->emitError() + << "memory space cast op must have valid source and target pointers"; + } + + if (sourcePtr.getType().getTypeID() != targetPtr.getType().getTypeID()) { + return op->emitError() + << "expected source and target types of the same kind"; + } + + // Verify the Types are of `PtrLikeTypeInterface` type. + auto sourceType = dyn_cast(sourcePtr.getType()); + if (!sourceType) { + return op->emitError() + << "source type must implement `PtrLikeTypeInterface`, but got: " + << sourcePtr.getType(); + } + + auto targetType = dyn_cast(targetPtr.getType()); + if (!targetType) { + return op->emitError() + << "target type must implement `PtrLikeTypeInterface`, but got: " + << targetPtr.getType(); + } + + // Verify that the operation has exactly one result + if (op->getNumResults() != 1) { + return op->emitError() + << "memory space cast op must have exactly one result"; + } + + return success(); +} + +FailureOr>> +mlir::detail::bubbleDownInPlaceMemorySpaceCastImpl(OpOperand &operand, + ValueRange results) { + MemorySpaceCastOpInterface castOp = + MemorySpaceCastOpInterface::getIfPromotableCast(operand.get()); + + // Bail if the src is not valid. + if (!castOp) + return failure(); + + // Modify the op. + operand.set(castOp.getSourcePtr()); + return std::optional>(); +} + +#include "mlir/Interfaces/MemOpInterfaces.cpp.inc" diff --git a/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp b/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp index af4ea5ac1cec8..0f28cbc751c1c 100644 --- a/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp +++ b/mlir/lib/Interfaces/Utils/InferIntRangeCommon.cpp @@ -304,7 +304,7 @@ static ConstantIntRanges inferDivURange(const ConstantIntRanges &lhs, umin = lhsMin.udiv(rhsMax); // X u/ Y u<= X. - APInt umax = lhsMax; + const APInt &umax = lhsMax; return ConstantIntRanges::fromUnsigned(umin, umax); } diff --git a/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp b/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp index d6b8a8a1df426..e3f075fcc1294 100644 --- a/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp +++ b/mlir/lib/Target/IRDLToCpp/IRDLToCpp.cpp @@ -54,6 +54,7 @@ struct OpStrings { std::string opCppName; SmallVector opResultNames; SmallVector opOperandNames; + SmallVector opRegionNames; }; static std::string joinNameList(llvm::ArrayRef names) { @@ -87,8 +88,8 @@ static TypeStrings getStrings(irdl::TypeOp type) { /// Generates OpStrings from an OperatioOp static OpStrings getStrings(irdl::OperationOp op) { auto operandOp = op.getOp(); - auto resultOp = op.getOp(); + auto regionsOp = op.getOp(); OpStrings strings; strings.opName = op.getSymName(); @@ -108,6 +109,13 @@ static OpStrings getStrings(irdl::OperationOp op) { })); } + if (regionsOp) { + strings.opRegionNames = SmallVector( + llvm::map_range(regionsOp->getNames(), [](Attribute attr) { + return llvm::formatv("{0}", cast(attr)); + })); + } + return strings; } @@ -122,6 +130,7 @@ static void fillDict(irdl::detail::dictionary &dict, static void fillDict(irdl::detail::dictionary &dict, const OpStrings &strings) { const auto operandCount = strings.opOperandNames.size(); const auto resultCount = strings.opResultNames.size(); + const auto regionCount = strings.opRegionNames.size(); dict["OP_NAME"] = strings.opName; dict["OP_CPP_NAME"] = strings.opCppName; @@ -131,6 +140,7 @@ static void fillDict(irdl::detail::dictionary &dict, const OpStrings &strings) { operandCount ? joinNameList(strings.opOperandNames) : "{\"\"}"; dict["OP_RESULT_INITIALIZER_LIST"] = resultCount ? joinNameList(strings.opResultNames) : "{\"\"}"; + dict["OP_REGION_COUNT"] = std::to_string(regionCount); } /// Fills a dictionary with values from DialectStrings @@ -179,6 +189,8 @@ static void generateOpGetterDeclarations(irdl::detail::dictionary &dict, const OpStrings &opStrings) { auto opGetters = std::string{}; auto resGetters = std::string{}; + auto regionGetters = std::string{}; + auto regionAdaptorGetters = std::string{}; for (size_t i = 0, end = opStrings.opOperandNames.size(); i < end; ++i) { const auto op = @@ -196,8 +208,23 @@ static void generateOpGetterDeclarations(irdl::detail::dictionary &dict, op, i); } + for (size_t i = 0, end = opStrings.opRegionNames.size(); i < end; ++i) { + const auto op = + llvm::convertToCamelFromSnakeCase(opStrings.opRegionNames[i], true); + regionAdaptorGetters += llvm::formatv( + R"(::mlir::Region &get{0}() { return *getRegions()[{1}]; } + )", + op, i); + regionGetters += llvm::formatv( + R"(::mlir::Region &get{0}() { return (*this)->getRegion({1}); } + )", + op, i); + } + dict["OP_OPERAND_GETTER_DECLS"] = opGetters; dict["OP_RESULT_GETTER_DECLS"] = resGetters; + dict["OP_REGION_ADAPTER_GETTER_DECLS"] = regionAdaptorGetters; + dict["OP_REGION_GETTER_DECLS"] = regionGetters; } static void generateOpBuilderDeclarations(irdl::detail::dictionary &dict, @@ -238,6 +265,22 @@ static void generateOpBuilderDeclarations(irdl::detail::dictionary &dict, dict["OP_BUILD_DECLS"] = buildDecls; } +// add traits to the dictionary, return true if any were added +static SmallVector generateTraits(irdl::OperationOp op, + const OpStrings &strings) { + SmallVector cppTraitNames; + if (!strings.opRegionNames.empty()) { + cppTraitNames.push_back( + llvm::formatv("::mlir::OpTrait::NRegions<{0}>::Impl", + strings.opRegionNames.size()) + .str()); + + // Requires verifyInvariantsImpl is implemented on the op + cppTraitNames.emplace_back("::mlir::OpTrait::OpInvariants"); + } + return cppTraitNames; +} + static LogicalResult generateOperationInclude(irdl::OperationOp op, raw_ostream &output, irdl::detail::dictionary &dict) { @@ -247,6 +290,13 @@ static LogicalResult generateOperationInclude(irdl::OperationOp op, const auto opStrings = getStrings(op); fillDict(dict, opStrings); + SmallVector traitNames = generateTraits(op, opStrings); + if (traitNames.empty()) + dict["OP_TEMPLATE_ARGS"] = opStrings.opCppName; + else + dict["OP_TEMPLATE_ARGS"] = llvm::formatv("{0}, {1}", opStrings.opCppName, + llvm::join(traitNames, ", ")); + generateOpGetterDeclarations(dict, opStrings); generateOpBuilderDeclarations(dict, opStrings); @@ -301,6 +351,110 @@ static LogicalResult generateInclude(irdl::DialectOp dialect, return success(); } +static void generateRegionConstraintVerifiers( + irdl::detail::dictionary &dict, irdl::OperationOp op, + const OpStrings &strings, SmallVectorImpl &verifierHelpers, + SmallVectorImpl &verifierCalls) { + auto regionsOp = op.getOp(); + if (strings.opRegionNames.empty() || !regionsOp) + return; + + for (size_t i = 0; i < strings.opRegionNames.size(); ++i) { + std::string regionName = strings.opRegionNames[i]; + std::string helperFnName = + llvm::formatv("__mlir_irdl_local_region_constraint_{0}_{1}", + strings.opCppName, regionName) + .str(); + + // Extract the actual region constraint from the IRDL RegionOp + std::string condition = "true"; + std::string textualConditionName = "any region"; + + if (auto regionDefOp = + dyn_cast(regionsOp->getArgs()[i].getDefiningOp())) { + // Generate constraint condition based on RegionOp attributes + SmallVector conditionParts; + SmallVector descriptionParts; + + // Check number of blocks constraint + if (auto blockCount = regionDefOp.getNumberOfBlocks()) { + conditionParts.push_back( + llvm::formatv("region.getBlocks().size() == {0}", + blockCount.value()) + .str()); + descriptionParts.push_back( + llvm::formatv("exactly {0} block(s)", blockCount.value()).str()); + } + + // Check entry block arguments constraint + if (regionDefOp.getConstrainedArguments()) { + size_t expectedArgCount = regionDefOp.getEntryBlockArgs().size(); + conditionParts.push_back( + llvm::formatv("region.getNumArguments() == {0}", expectedArgCount) + .str()); + descriptionParts.push_back( + llvm::formatv("{0} entry block argument(s)", expectedArgCount) + .str()); + } + + // Combine conditions + if (!conditionParts.empty()) { + condition = llvm::join(conditionParts, " && "); + } + + // Generate descriptive error message + if (!descriptionParts.empty()) { + textualConditionName = + llvm::formatv("region with {0}", + llvm::join(descriptionParts, " and ")) + .str(); + } + } + + verifierHelpers.push_back(llvm::formatv( + R"(static ::llvm::LogicalResult {0}(::mlir::Operation *op, ::mlir::Region ®ion, ::llvm::StringRef regionName, unsigned regionIndex) {{ + if (!({1})) {{ + return op->emitOpError("region #") << regionIndex + << (regionName.empty() ? " " : " ('" + regionName + "') ") + << "failed to verify constraint: {2}"; + } + return ::mlir::success(); +})", + helperFnName, condition, textualConditionName)); + + verifierCalls.push_back(llvm::formatv(R"( + if (::mlir::failed({0}(*this, (*this)->getRegion({1}), "{2}", {1}))) + return ::mlir::failure();)", + helperFnName, i, regionName) + .str()); + } +} + +static void generateVerifiers(irdl::detail::dictionary &dict, + irdl::OperationOp op, const OpStrings &strings) { + SmallVector verifierHelpers; + SmallVector verifierCalls; + + generateRegionConstraintVerifiers(dict, op, strings, verifierHelpers, + verifierCalls); + + // Add an overall verifier that sequences the helper calls + std::string verifierDef = + llvm::formatv(R"( +::llvm::LogicalResult {0}::verifyInvariantsImpl() {{ + if(::mlir::failed(verify())) + return ::mlir::failure(); + + {1} + + return ::mlir::success(); +})", + strings.opCppName, llvm::join(verifierCalls, "\n")); + + dict["OP_VERIFIER_HELPERS"] = llvm::join(verifierHelpers, "\n"); + dict["OP_VERIFIER"] = verifierDef; +} + static std::string generateOpDefinition(irdl::detail::dictionary &dict, irdl::OperationOp op) { static const auto perOpDefTemplate = mlir::irdl::detail::Template{ @@ -370,6 +524,8 @@ void {0}::build(::mlir::OpBuilder &opBuilder, ::mlir::OperationState &opState, { dict["OP_BUILD_DEFS"] = buildDefinition; + generateVerifiers(dict, op, opStrings); + std::string str; llvm::raw_string_ostream stream{str}; perOpDefTemplate.render(stream, dict); @@ -427,7 +583,7 @@ static LogicalResult generateLib(irdl::DialectOp dialect, raw_ostream &output, dict["TYPE_PARSER"] = llvm::formatv( R"(static ::mlir::OptionalParseResult generatedTypeParser(::mlir::AsmParser &parser, ::llvm::StringRef *mnemonic, ::mlir::Type &value) { return ::mlir::AsmParser::KeywordSwitch<::mlir::OptionalParseResult>(parser) - {0} + {0} .Default([&](llvm::StringRef keyword, llvm::SMLoc) {{ *mnemonic = keyword; return std::nullopt; @@ -520,6 +676,8 @@ static LogicalResult verifySupported(irdl::DialectOp dialect) { "IRDL C++ translation does not yet support variadic results"); })) .Case(([](irdl::AnyOp) { return success(); })) + .Case(([](irdl::RegionOp) { return success(); })) + .Case(([](irdl::RegionsOp) { return success(); })) .Default([](mlir::Operation *op) -> LogicalResult { return op->emitError("IRDL C++ translation does not yet support " "translation of ") diff --git a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt index e9068e9488f99..93ce0bef1f269 100644 --- a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt +++ b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDecl.txt @@ -12,15 +12,15 @@ public: struct Properties { }; public: - __OP_CPP_NAME__GenericAdaptorBase(::mlir::Operation *op) - : odsAttrs(op->getRawDictionaryAttrs()), odsOpName(op->getName()), - odsRegions(op->getRegions()) + __OP_CPP_NAME__GenericAdaptorBase(::mlir::Operation *op) + : odsAttrs(op->getRawDictionaryAttrs()), odsOpName(op->getName()), + odsRegions(op->getRegions()) {} /// Return the unstructured operand index of a structured operand along with // the amount of unstructured operands it contains. std::pair - getStructuredOperandIndexAndLength (unsigned index, + getStructuredOperandIndexAndLength (unsigned index, unsigned odsOperandsSize) { return {index, 1}; } @@ -32,6 +32,12 @@ public: ::mlir::DictionaryAttr getAttributes() { return odsAttrs; } + + __OP_REGION_ADAPTER_GETTER_DECLS__ + + ::mlir::RegionRange getRegions() { + return odsRegions; + } protected: ::mlir::DictionaryAttr odsAttrs; ::std::optional<::mlir::OperationName> odsOpName; @@ -42,28 +48,28 @@ protected: } // namespace detail template -class __OP_CPP_NAME__GenericAdaptor +class __OP_CPP_NAME__GenericAdaptor : public detail::__OP_CPP_NAME__GenericAdaptorBase { using ValueT = ::llvm::detail::ValueOfRange; using Base = detail::__OP_CPP_NAME__GenericAdaptorBase; public: __OP_CPP_NAME__GenericAdaptor(RangeT values, ::mlir::DictionaryAttr attrs, - ::mlir::OpaqueProperties properties, - ::mlir::RegionRange regions = {}) - : __OP_CPP_NAME__GenericAdaptor(values, attrs, - (properties ? *properties.as<::mlir::EmptyProperties *>() + ::mlir::OpaqueProperties properties, + ::mlir::RegionRange regions = {}) + : __OP_CPP_NAME__GenericAdaptor(values, attrs, + (properties ? *properties.as<::mlir::EmptyProperties *>() : ::mlir::EmptyProperties{}), regions) {} - __OP_CPP_NAME__GenericAdaptor(RangeT values, + __OP_CPP_NAME__GenericAdaptor(RangeT values, const __OP_CPP_NAME__GenericAdaptorBase &base) : Base(base), odsOperands(values) {} - // This template parameter allows using __OP_CPP_NAME__ which is declared + // This template parameter allows using __OP_CPP_NAME__ which is declared // later. template >> - __OP_CPP_NAME__GenericAdaptor(RangeT values, LateInst op) + __OP_CPP_NAME__GenericAdaptor(RangeT values, LateInst op) : Base(op), odsOperands(values) {} /// Return the unstructured operand index of a structured operand along with @@ -77,7 +83,7 @@ public: RangeT getStructuredOperands(unsigned index) { auto valueRange = getStructuredOperandIndexAndLength(index); return {std::next(odsOperands.begin(), valueRange.first), - std::next(odsOperands.begin(), + std::next(odsOperands.begin(), valueRange.first + valueRange.second)}; } @@ -91,7 +97,7 @@ private: RangeT odsOperands; }; -class __OP_CPP_NAME__Adaptor +class __OP_CPP_NAME__Adaptor : public __OP_CPP_NAME__GenericAdaptor<::mlir::ValueRange> { public: using __OP_CPP_NAME__GenericAdaptor::__OP_CPP_NAME__GenericAdaptor; @@ -100,7 +106,7 @@ public: ::llvm::LogicalResult verify(::mlir::Location loc); }; -class __OP_CPP_NAME__ : public ::mlir::Op<__OP_CPP_NAME__> { +class __OP_CPP_NAME__ : public ::mlir::Op<__OP_TEMPLATE_ARGS__> { public: using Op::Op; using Op::print; @@ -112,6 +118,8 @@ public: return {}; } + ::llvm::LogicalResult verifyInvariantsImpl(); + static constexpr ::llvm::StringLiteral getOperationName() { return ::llvm::StringLiteral("__DIALECT_NAME__.__OP_NAME__"); } @@ -147,7 +155,7 @@ public: ::mlir::Operation::operand_range getStructuredOperands(unsigned index) { auto valueRange = getStructuredOperandIndexAndLength(index); return {std::next(getOperation()->operand_begin(), valueRange.first), - std::next(getOperation()->operand_begin(), + std::next(getOperation()->operand_begin(), valueRange.first + valueRange.second)}; } @@ -162,18 +170,19 @@ public: ::mlir::Operation::result_range getStructuredResults(unsigned index) { auto valueRange = getStructuredResultIndexAndLength(index); return {std::next(getOperation()->result_begin(), valueRange.first), - std::next(getOperation()->result_begin(), + std::next(getOperation()->result_begin(), valueRange.first + valueRange.second)}; } __OP_OPERAND_GETTER_DECLS__ __OP_RESULT_GETTER_DECLS__ - + __OP_REGION_GETTER_DECLS__ + __OP_BUILD_DECLS__ - static void build(::mlir::OpBuilder &odsBuilder, - ::mlir::OperationState &odsState, - ::mlir::TypeRange resultTypes, - ::mlir::ValueRange operands, + static void build(::mlir::OpBuilder &odsBuilder, + ::mlir::OperationState &odsState, + ::mlir::TypeRange resultTypes, + ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {}); static __OP_CPP_NAME__ create(::mlir::OpBuilder &odsBuilder, diff --git a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt index 30ca420d77448..f4a1b7a996263 100644 --- a/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt +++ b/mlir/lib/Target/IRDLToCpp/Templates/PerOperationDef.txt @@ -6,12 +6,14 @@ R"( __NAMESPACE_OPEN__ +__OP_VERIFIER_HELPERS__ + __OP_BUILD_DEFS__ -void __OP_CPP_NAME__::build(::mlir::OpBuilder &odsBuilder, - ::mlir::OperationState &odsState, - ::mlir::TypeRange resultTypes, - ::mlir::ValueRange operands, +void __OP_CPP_NAME__::build(::mlir::OpBuilder &odsBuilder, + ::mlir::OperationState &odsState, + ::mlir::TypeRange resultTypes, + ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes) { assert(operands.size() == __OP_OPERAND_COUNT__); @@ -19,6 +21,9 @@ void __OP_CPP_NAME__::build(::mlir::OpBuilder &odsBuilder, odsState.addOperands(operands); odsState.addAttributes(attributes); odsState.addTypes(resultTypes); + for (unsigned i = 0; i != __OP_REGION_COUNT__; ++i) { + (void)odsState.addRegion(); + } } __OP_CPP_NAME__ @@ -44,6 +49,7 @@ __OP_CPP_NAME__::create(::mlir::ImplicitLocOpBuilder &odsBuilder, return create(odsBuilder, odsBuilder.getLoc(), resultTypes, operands, attributes); } +__OP_VERIFIER__ __NAMESPACE_CLOSE__ diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp index 510ec6fe6456f..8b0326518770d 100644 --- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp +++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp @@ -61,7 +61,8 @@ DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) { return DICompileUnitAttr::get( context, getOrCreateDistinctID(node), node->getSourceLanguage(), translate(node->getFile()), getStringAttrOrNull(node->getRawProducer()), - node->isOptimized(), emissionKind.value(), nameTableKind.value()); + node->isOptimized(), emissionKind.value(), nameTableKind.value(), + getStringAttrOrNull(node->getRawSplitDebugFilename())); } DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) { diff --git a/mlir/lib/Target/LLVMIR/DebugTranslation.cpp b/mlir/lib/Target/LLVMIR/DebugTranslation.cpp index a55445deddc2d..eeb87253e5eb8 100644 --- a/mlir/lib/Target/LLVMIR/DebugTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/DebugTranslation.cpp @@ -124,7 +124,9 @@ llvm::DICompileUnit *DebugTranslation::translateImpl(DICompileUnitAttr attr) { attr.getSourceLanguage(), translate(attr.getFile()), attr.getProducer() ? attr.getProducer().getValue() : "", attr.getIsOptimized(), - /*Flags=*/"", /*RV=*/0, /*SplitName=*/{}, + /*Flags=*/"", /*RV=*/0, + attr.getSplitDebugFilename() ? attr.getSplitDebugFilename().getValue() + : "", static_cast( attr.getEmissionKind()), 0, true, false, diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp index 4921a1990b6e8..9fcb02eb4be3d 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -2591,13 +2591,34 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, } builder.SetInsertPoint(*regionBlock, (*regionBlock)->begin()); + + // Check if we can generate no-loop kernel + bool noLoopMode = false; + omp::TargetOp targetOp = wsloopOp->getParentOfType(); + if (targetOp) { + Operation *targetCapturedOp = targetOp.getInnermostCapturedOmpOp(); + // We need this check because, without it, noLoopMode would be set to true + // for every omp.wsloop nested inside a no-loop SPMD target region, even if + // that loop is not the top-level SPMD one. + if (loopOp == targetCapturedOp) { + omp::TargetRegionFlags kernelFlags = + targetOp.getKernelExecFlags(targetCapturedOp); + if (omp::bitEnumContainsAll(kernelFlags, + omp::TargetRegionFlags::spmd | + omp::TargetRegionFlags::no_loop) && + !omp::bitEnumContainsAny(kernelFlags, + omp::TargetRegionFlags::generic)) + noLoopMode = true; + } + } + llvm::OpenMPIRBuilder::InsertPointOrErrorTy wsloopIP = ompBuilder->applyWorkshareLoop( ompLoc.DL, loopInfo, allocaIP, loopNeedsBarrier, convertToScheduleKind(schedule), chunk, isSimd, scheduleMod == omp::ScheduleModifier::monotonic, scheduleMod == omp::ScheduleModifier::nonmonotonic, isOrdered, - workshareLoopType); + workshareLoopType, noLoopMode); if (failed(handleError(wsloopIP, opInst))) return failure(); @@ -3154,6 +3175,45 @@ applyUnrollHeuristic(omp::UnrollHeuristicOp op, llvm::IRBuilderBase &builder, return success(); } +/// Apply a `#pragma omp tile` / `!$omp tile` transformation using the +/// OpenMPIRBuilder. +static LogicalResult applyTile(omp::TileOp op, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) { + llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder(); + llvm::OpenMPIRBuilder::LocationDescription loc(builder); + + SmallVector translatedLoops; + SmallVector translatedSizes; + + for (Value size : op.getSizes()) { + llvm::Value *translatedSize = moduleTranslation.lookupValue(size); + assert(translatedSize && + "sizes clause arguments must already be translated"); + translatedSizes.push_back(translatedSize); + } + + for (Value applyee : op.getApplyees()) { + llvm::CanonicalLoopInfo *consBuilderCLI = + moduleTranslation.lookupOMPLoop(applyee); + assert(applyee && "Canonical loop must already been translated"); + translatedLoops.push_back(consBuilderCLI); + } + + auto generatedLoops = + ompBuilder->tileLoops(loc.DL, translatedLoops, translatedSizes); + if (!op.getGeneratees().empty()) { + for (auto [mlirLoop, genLoop] : + zip_equal(op.getGeneratees(), generatedLoops)) + moduleTranslation.mapOmpLoop(mlirLoop, genLoop); + } + + // CLIs can only be consumed once + for (Value applyee : op.getApplyees()) + moduleTranslation.invalidateOmpLoop(applyee); + + return success(); +} + /// Convert an Atomic Ordering attribute to llvm::AtomicOrdering. static llvm::AtomicOrdering convertAtomicOrdering(std::optional ao) { @@ -3595,8 +3655,10 @@ getDeclareTargetRefPtrSuffix(LLVM::GlobalOp globalOp, llvm::StringRef(loc.getFilename()), loc.getLine()); }; + auto vfs = llvm::vfs::getRealFileSystem(); os << llvm::format( - "_%x", ompBuilder.getTargetEntryUniqueInfo(fileInfoCallBack).FileID); + "_%x", + ompBuilder.getTargetEntryUniqueInfo(fileInfoCallBack, *vfs).FileID); } os << "_decl_tgt_ref_ptr"; @@ -5425,6 +5487,12 @@ initTargetDefaultAttrs(omp::TargetOp targetOp, Operation *capturedOp, ? llvm::omp::OMP_TGT_EXEC_MODE_GENERIC_SPMD : llvm::omp::OMP_TGT_EXEC_MODE_GENERIC : llvm::omp::OMP_TGT_EXEC_MODE_SPMD; + if (omp::bitEnumContainsAll(kernelFlags, + omp::TargetRegionFlags::spmd | + omp::TargetRegionFlags::no_loop) && + !omp::bitEnumContainsAny(kernelFlags, omp::TargetRegionFlags::generic)) + attrs.ExecFlags = llvm::omp::OMP_TGT_EXEC_MODE_SPMD_NO_LOOP; + attrs.MinTeams = minTeamsVal; attrs.MaxTeams.front() = maxTeamsVal; attrs.MinThreads = 1; @@ -5888,10 +5956,12 @@ convertDeclareTargetAttr(Operation *op, mlir::omp::DeclareTargetAttr attribute, lineNo); }; + auto vfs = llvm::vfs::getRealFileSystem(); + ompBuilder->registerTargetGlobalVariable( captureClause, deviceClause, isDeclaration, isExternallyVisible, - ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack), mangledName, - generatedRefs, /*OpenMPSimd*/ false, targetTriple, + ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack, *vfs), + mangledName, generatedRefs, /*OpenMPSimd*/ false, targetTriple, /*GlobalInitializer*/ nullptr, /*VariableLinkage*/ nullptr, gVal->getType(), gVal); @@ -5901,9 +5971,9 @@ convertDeclareTargetAttr(Operation *op, mlir::omp::DeclareTargetAttr attribute, ompBuilder->Config.hasRequiresUnifiedSharedMemory())) { ompBuilder->getAddrOfDeclareTargetVar( captureClause, deviceClause, isDeclaration, isExternallyVisible, - ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack), mangledName, - generatedRefs, /*OpenMPSimd*/ false, targetTriple, gVal->getType(), - /*GlobalInitializer*/ nullptr, + ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack, *vfs), + mangledName, generatedRefs, /*OpenMPSimd*/ false, targetTriple, + gVal->getType(), /*GlobalInitializer*/ nullptr, /*VariableLinkage*/ nullptr); } } @@ -6196,6 +6266,9 @@ convertHostOrTargetOperation(Operation *op, llvm::IRBuilderBase &builder, // the omp.canonical_loop. return applyUnrollHeuristic(op, builder, moduleTranslation); }) + .Case([&](omp::TileOp op) { + return applyTile(op, builder, moduleTranslation); + }) .Case([&](omp::TargetAllocMemOp) { return convertTargetAllocMemOp(*op, builder, moduleTranslation); }) diff --git a/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp index 7e610cd42e931..8d6fffcca45f2 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp @@ -351,6 +351,42 @@ translateConstantOp(ConstantOp constantOp, llvm::IRBuilderBase &builder, return success(); } +/// Translate ptr.ptr_diff operation operation to LLVM IR. +static LogicalResult +translatePtrDiffOp(PtrDiffOp ptrDiffOp, llvm::IRBuilderBase &builder, + LLVM::ModuleTranslation &moduleTranslation) { + llvm::Value *lhs = moduleTranslation.lookupValue(ptrDiffOp.getLhs()); + llvm::Value *rhs = moduleTranslation.lookupValue(ptrDiffOp.getRhs()); + + if (!lhs || !rhs) + return ptrDiffOp.emitError("Failed to lookup operands"); + + // Translate result type to LLVM type + llvm::Type *resultType = + moduleTranslation.convertType(ptrDiffOp.getResult().getType()); + if (!resultType) + return ptrDiffOp.emitError("Failed to translate result type"); + + PtrDiffFlags flags = ptrDiffOp.getFlags(); + + // Convert both pointers to integers using ptrtoaddr, and compute the + // difference: lhs - rhs + llvm::Value *llLhs = builder.CreatePtrToAddr(lhs); + llvm::Value *llRhs = builder.CreatePtrToAddr(rhs); + llvm::Value *result = builder.CreateSub( + llLhs, llRhs, /*Name=*/"", + /*HasNUW=*/(flags & PtrDiffFlags::nuw) == PtrDiffFlags::nuw, + /*HasNSW=*/(flags & PtrDiffFlags::nsw) == PtrDiffFlags::nsw); + + // Convert the difference to the expected result type by truncating or + // extending. + if (result->getType() != resultType) + result = builder.CreateIntCast(result, resultType, /*isSigned=*/true); + + moduleTranslation.mapValue(ptrDiffOp.getResult(), result); + return success(); +} + /// Implementation of the dialect interface that translates operations belonging /// to the `ptr` dialect to LLVM IR. class PtrDialectLLVMIRTranslationInterface @@ -371,6 +407,9 @@ class PtrDialectLLVMIRTranslationInterface .Case([&](PtrAddOp ptrAddOp) { return translatePtrAddOp(ptrAddOp, builder, moduleTranslation); }) + .Case([&](PtrDiffOp ptrDiffOp) { + return translatePtrDiffOp(ptrDiffOp, builder, moduleTranslation); + }) .Case([&](LoadOp loadOp) { return translateLoadOp(loadOp, builder, moduleTranslation); }) diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index 7a888bb3778a9..9603813e059d3 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -447,7 +447,7 @@ ModuleImport::processAliasScopeMetadata(const llvm::MDNode *node) { if (verifySelfRef(node)) return DistinctAttr::create(builder.getUnitAttr()); - auto name = cast(node->getOperand(0)); + auto *name = cast(node->getOperand(0)); return builder.getStringAttr(name->getString()); }; @@ -1123,7 +1123,7 @@ void ModuleImport::setExactFlag(llvm::Instruction *inst, Operation *op) const { void ModuleImport::setDisjointFlag(llvm::Instruction *inst, Operation *op) const { auto iface = cast(op); - auto instDisjoint = cast(inst); + auto *instDisjoint = cast(inst); iface.setIsDisjoint(instDisjoint->isDisjoint()); } @@ -1374,7 +1374,7 @@ LogicalResult ModuleImport::convertAlias(llvm::GlobalAlias *alias) { AliasOp aliasOp = AliasOp::create(builder, mlirModule.getLoc(), type, convertLinkageFromLLVM(alias->getLinkage()), alias->getName(), - /*dso_local=*/alias->isDSOLocal(), + /*dsoLocal=*/alias->isDSOLocal(), /*thread_local=*/alias->isThreadLocal(), /*attrs=*/ArrayRef()); globalInsertionOp = aliasOp; @@ -1507,8 +1507,8 @@ LogicalResult ModuleImport::convertGlobal(llvm::GlobalVariable *globalVar) { GlobalOp globalOp = GlobalOp::create( builder, mlirModule.getLoc(), type, globalVar->isConstant(), convertLinkageFromLLVM(globalVar->getLinkage()), StringRef(globalName), - valueAttr, alignment, /*addr_space=*/globalVar->getAddressSpace(), - /*dso_local=*/globalVar->isDSOLocal(), + valueAttr, alignment, /*addrSpace=*/globalVar->getAddressSpace(), + /*dsoLocal=*/globalVar->isDSOLocal(), /*thread_local=*/globalVar->isThreadLocal(), /*comdat=*/SymbolRefAttr(), /*attrs=*/ArrayRef(), /*dbgExprs=*/globalExpressionAttrs); globalInsertionOp = globalOp; diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index adc5a74e2031f..5a3eb209f0a92 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -647,7 +647,7 @@ llvm::Constant *mlir::LLVM::detail::getLLVMConstant( llvm::ElementCount::get(numElements, /*Scalable=*/isScalable), child); if (llvmType->isArrayTy()) { auto *arrayType = llvm::ArrayType::get(elementType, numElements); - if (child->isZeroValue()) { + if (child->isZeroValue() && !elementType->isFPOrFPVectorTy()) { return llvm::ConstantAggregateZero::get(arrayType); } else { if (llvm::ConstantDataSequential::isElementTypeCompatible( diff --git a/mlir/lib/Transforms/BubbleDownMemorySpaceCasts.cpp b/mlir/lib/Transforms/BubbleDownMemorySpaceCasts.cpp new file mode 100644 index 0000000000000..00dac19e37171 --- /dev/null +++ b/mlir/lib/Transforms/BubbleDownMemorySpaceCasts.cpp @@ -0,0 +1,69 @@ +//===- BubbleDownMemorySpaceCasts.cpp - Bubble down casts transform -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Transforms/BubbleDownMemorySpaceCasts.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Interfaces/MemOpInterfaces.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "mlir/Transforms/Passes.h" +#include "llvm/Support/Debug.h" + +using namespace mlir; + +namespace mlir { +#define GEN_PASS_DEF_BUBBLEDOWNMEMORYSPACECASTS +#include "mlir/Transforms/Passes.h.inc" +} // namespace mlir + +namespace { +//===----------------------------------------------------------------------===// +// BubbleDownCastsPattern pattern +//===----------------------------------------------------------------------===// +/// Pattern to bubble down casts into consumer operations. +struct BubbleDownCastsPattern + : public OpInterfaceRewritePattern { + using OpInterfaceRewritePattern::OpInterfaceRewritePattern; + + LogicalResult matchAndRewrite(MemorySpaceCastConsumerOpInterface op, + PatternRewriter &rewriter) const override { + FailureOr>> results = + op.bubbleDownCasts(rewriter); + if (failed(results)) + return failure(); + if (!results->has_value()) { + rewriter.modifyOpInPlace(op, []() {}); + return success(); + } + rewriter.replaceOp(op, **results); + return success(); + } +}; + +//===----------------------------------------------------------------------===// +// BubbleDownMemorySpaceCasts pass +//===----------------------------------------------------------------------===// + +struct BubbleDownMemorySpaceCasts + : public impl::BubbleDownMemorySpaceCastsBase { + using impl::BubbleDownMemorySpaceCastsBase< + BubbleDownMemorySpaceCasts>::BubbleDownMemorySpaceCastsBase; + + void runOnOperation() override { + RewritePatternSet patterns(&getContext()); + populateBubbleDownMemorySpaceCastPatterns(patterns, PatternBenefit(1)); + if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) + signalPassFailure(); + } +}; +} // namespace + +void mlir::populateBubbleDownMemorySpaceCastPatterns( + RewritePatternSet &patterns, PatternBenefit benefit) { + patterns.add(patterns.getContext(), benefit); +} diff --git a/mlir/lib/Transforms/CMakeLists.txt b/mlir/lib/Transforms/CMakeLists.txt index 058039e47313e..54b67f5c7a91e 100644 --- a/mlir/lib/Transforms/CMakeLists.txt +++ b/mlir/lib/Transforms/CMakeLists.txt @@ -6,6 +6,7 @@ add_mlir_library(MLIRTransforms ControlFlowSink.cpp CSE.cpp GenerateRuntimeVerification.cpp + BubbleDownMemorySpaceCasts.cpp InlinerPass.cpp LocationSnapshot.cpp LoopInvariantCodeMotion.cpp @@ -31,6 +32,7 @@ add_mlir_library(MLIRTransforms MLIRAnalysis MLIRFunctionInterfaces MLIRLoopLikeInterface + MLIRMemOpInterfaces MLIRMemorySlotInterfaces MLIRPass MLIRRuntimeVerifiableOpInterface diff --git a/mlir/lib/Transforms/Mem2Reg.cpp b/mlir/lib/Transforms/Mem2Reg.cpp index d36a3c1362c19..b3057129fb9fd 100644 --- a/mlir/lib/Transforms/Mem2Reg.cpp +++ b/mlir/lib/Transforms/Mem2Reg.cpp @@ -286,7 +286,7 @@ LogicalResult MemorySlotPromotionAnalyzer::computeBlockingUses( mlir::getForwardSlice(slot.ptr, &forwardSlice); for (Operation *user : forwardSlice) { // If the next operation has no blocking uses, everything is fine. - auto it = userToBlockingUses.find(user); + auto *it = userToBlockingUses.find(user); if (it == userToBlockingUses.end()) continue; diff --git a/mlir/lib/Transforms/RemoveDeadValues.cpp b/mlir/lib/Transforms/RemoveDeadValues.cpp index 0e84b6dd17f29..e0c65b0e09774 100644 --- a/mlir/lib/Transforms/RemoveDeadValues.cpp +++ b/mlir/lib/Transforms/RemoveDeadValues.cpp @@ -88,6 +88,8 @@ struct FunctionToCleanUp { struct OperationToCleanup { Operation *op; BitVector nonLive; + Operation *callee = + nullptr; // Optional: For CallOpInterface ops, stores the callee function }; struct BlockArgsToCleanup { @@ -287,7 +289,8 @@ static void processSimpleOp(Operation *op, RunLivenessAnalysis &la, static void processFuncOp(FunctionOpInterface funcOp, Operation *module, RunLivenessAnalysis &la, DenseSet &nonLiveSet, RDVFinalCleanupList &cl) { - LDBG() << "Processing function op: " << funcOp.getOperation()->getName(); + LDBG() << "Processing function op: " + << OpWithFlags(funcOp, OpPrintingFlags().skipRegions()); if (funcOp.isPublic() || funcOp.isExternal()) { LDBG() << "Function is public or external, skipping: " << funcOp.getOperation()->getName(); @@ -306,19 +309,19 @@ static void processFuncOp(FunctionOpInterface funcOp, Operation *module, nonLiveSet.insert(arg); } - // Do (2). + // Do (2). (Skip creating generic operand cleanup entries for call ops. + // Call arguments will be removed in the call-site specific segment-aware + // cleanup, avoiding generic eraseOperands bitvector mechanics.) SymbolTable::UseRange uses = *funcOp.getSymbolUses(module); for (SymbolTable::SymbolUse use : uses) { Operation *callOp = use.getUser(); assert(isa(callOp) && "expected a call-like user"); - // The number of operands in the call op may not match the number of - // arguments in the func op. - BitVector nonLiveCallOperands(callOp->getNumOperands(), false); - SmallVector callOpOperands = - operandsToOpOperands(cast(callOp).getArgOperands()); - for (int index : nonLiveArgs.set_bits()) - nonLiveCallOperands.set(callOpOperands[index]->getOperandNumber()); - cl.operands.push_back({callOp, nonLiveCallOperands}); + // Push an empty operand cleanup entry so that call-site specific logic in + // cleanUpDeadVals runs (it keys off CallOpInterface). The BitVector is + // intentionally all false to avoid generic erasure. + // Store the funcOp as the callee to avoid expensive symbol lookup later. + cl.operands.push_back({callOp, BitVector(callOp->getNumOperands(), false), + funcOp.getOperation()}); } // Do (3). @@ -746,6 +749,10 @@ static void cleanUpDeadVals(RDVFinalCleanupList &list) { // 3. Functions LDBG() << "Cleaning up " << list.functions.size() << " functions"; + // Record which function arguments were erased so we can shrink call-site + // argument segments for CallOpInterface operations (e.g. ops using + // AttrSizedOperandSegments) in the next phase. + DenseMap erasedFuncArgs; for (auto &f : list.functions) { LDBG() << "Cleaning up function: " << f.funcOp.getOperation()->getName(); LDBG() << " Erasing " << f.nonLiveArgs.count() << " non-live arguments"; @@ -754,17 +761,52 @@ static void cleanUpDeadVals(RDVFinalCleanupList &list) { // Some functions may not allow erasing arguments or results. These calls // return failure in such cases without modifying the function, so it's okay // to proceed. - (void)f.funcOp.eraseArguments(f.nonLiveArgs); + if (succeeded(f.funcOp.eraseArguments(f.nonLiveArgs))) { + // Record only if we actually erased something. + if (f.nonLiveArgs.any()) + erasedFuncArgs.try_emplace(f.funcOp.getOperation(), f.nonLiveArgs); + } (void)f.funcOp.eraseResults(f.nonLiveRets); } // 4. Operands LDBG() << "Cleaning up " << list.operands.size() << " operand lists"; for (OperationToCleanup &o : list.operands) { - if (o.op->getNumOperands() > 0) { - LDBG() << "Erasing " << o.nonLive.count() - << " non-live operands from operation: " - << OpWithFlags(o.op, OpPrintingFlags().skipRegions()); + // Handle call-specific cleanup only when we have a cached callee reference. + // This avoids expensive symbol lookup and is defensive against future + // changes. + bool handledAsCall = false; + if (o.callee && isa(o.op)) { + auto call = cast(o.op); + auto it = erasedFuncArgs.find(o.callee); + if (it != erasedFuncArgs.end()) { + const BitVector &deadArgIdxs = it->second; + MutableOperandRange args = call.getArgOperandsMutable(); + // First, erase the call arguments corresponding to erased callee + // args. We iterate backwards to preserve indices. + for (unsigned argIdx : llvm::reverse(deadArgIdxs.set_bits())) + args.erase(argIdx); + // If this operand cleanup entry also has a generic nonLive bitvector, + // clear bits for call arguments we already erased above to avoid + // double-erasing (which could impact other segments of ops with + // AttrSizedOperandSegments). + if (o.nonLive.any()) { + // Map the argument logical index to the operand number(s) recorded. + int operandOffset = call.getArgOperands().getBeginOperandIndex(); + for (int argIdx : deadArgIdxs.set_bits()) { + int operandNumber = operandOffset + argIdx; + if (operandNumber < static_cast(o.nonLive.size())) + o.nonLive.reset(operandNumber); + } + } + handledAsCall = true; + } + } + // Perform generic operand erasure for: + // - Non-call operations + // - Call operations without cached callee (where handledAsCall is false) + // But skip call operations that were already handled via segment-aware path + if (!handledAsCall && o.nonLive.any()) { o.op->eraseOperands(o.nonLive); } } diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp index 5e07509871ea2..68ad3acf295c8 100644 --- a/mlir/lib/Transforms/Utils/FoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp @@ -16,6 +16,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/Operation.h" +#include "llvm/Support/DebugLog.h" using namespace mlir; @@ -67,7 +68,8 @@ static Operation *materializeConstant(Dialect *dialect, OpBuilder &builder, // OperationFolder //===----------------------------------------------------------------------===// -LogicalResult OperationFolder::tryToFold(Operation *op, bool *inPlaceUpdate) { +LogicalResult OperationFolder::tryToFold(Operation *op, bool *inPlaceUpdate, + int maxIterations) { if (inPlaceUpdate) *inPlaceUpdate = false; @@ -86,7 +88,7 @@ LogicalResult OperationFolder::tryToFold(Operation *op, bool *inPlaceUpdate) { // Try to fold the operation. SmallVector results; - if (failed(tryToFold(op, results))) + if (failed(tryToFold(op, results, maxIterations))) return failure(); // Check to see if the operation was just updated in place. @@ -224,10 +226,19 @@ bool OperationFolder::isFolderOwnedConstant(Operation *op) const { /// Tries to perform folding on the given `op`. If successful, populates /// `results` with the results of the folding. LogicalResult OperationFolder::tryToFold(Operation *op, - SmallVectorImpl &results) { + SmallVectorImpl &results, + int maxIterations) { SmallVector foldResults; - if (failed(op->fold(foldResults)) || - failed(processFoldResults(op, results, foldResults))) + if (failed(op->fold(foldResults))) + return failure(); + int count = 1; + do { + LDBG() << "Folded in place #" << count + << " times: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); + } while (count++ < maxIterations && foldResults.empty() && + succeeded(op->fold(foldResults))); + + if (failed(processFoldResults(op, results, foldResults))) return failure(); return success(); } diff --git a/mlir/python/CMakeLists.txt b/mlir/python/CMakeLists.txt index d6686bb89ce4e..9f5246de6bda0 100644 --- a/mlir/python/CMakeLists.txt +++ b/mlir/python/CMakeLists.txt @@ -873,85 +873,89 @@ if(NOT LLVM_ENABLE_IDE) ) endif() -# _mlir stubgen -# Note: All this needs to come before add_mlir_python_modules(MLIRPythonModules so that the install targets for the -# generated type stubs get created. - -set(_core_type_stub_sources - _mlir/__init__.pyi - _mlir/ir.pyi - _mlir/passmanager.pyi - _mlir/rewrite.pyi -) - -# Note 1: INTERFACE_SOURCES is a genex ($ $) -# which will be evaluated by file(GENERATE ...) inside mlir_generate_type_stubs. This will evaluate to the correct -# thing in the build dir (i.e., actual source dir paths) and in the install dir -# (where it's a conventional path; see install/lib/cmake/mlir/MLIRTargets.cmake). -# -# Note 2: MLIRPythonExtension.Core is the target that is defined using target_sources(INTERFACE) -# **NOT** MLIRPythonModules.extension._mlir.dso. So be sure to use the correct target! -get_target_property(_core_extension_srcs MLIRPythonExtension.Core INTERFACE_SOURCES) - -# Why is MODULE_NAME _mlir here but mlir._mlir_libs._mlirPythonTestNanobind below??? -# The _mlir extension can be imported independently of any other python code and/or extension modules. -# I.e., you could do `cd $MLIRPythonModules_ROOT_PREFIX/_mlir_libs && python -c "import _mlir"` (try it!). -# _mlir is also (currently) the only extension for which this is possible because dialect extensions modules, -# which generally make use of `mlir_value_subclass/mlir_type_subclass/mlir_attribute_subclass`, perform an -# `import mlir` right when they're loaded (see the mlir_*_subclass ctors in NanobindAdaptors.h). -# Note, this also why IMPORT_PATHS "${MLIRPythonModules_ROOT_PREFIX}/_mlir_libs" here while below -# "${MLIRPythonModules_ROOT_PREFIX}/.." (because MLIR_BINDINGS_PYTHON_INSTALL_PREFIX, by default, ends at mlir). -# -# Further note: this function creates file targets like -# "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs/_mlir/__init__.pyi". These must match the file targets -# that declare_mlir_python_sources expects, which are like "${ROOT_DIR}/${WHATEVER_SOURCE}". -# This is why _mlir_libs is prepended below. -mlir_generate_type_stubs( - MODULE_NAME _mlir - DEPENDS_TARGETS MLIRPythonModules.extension._mlir.dso - OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs" - OUTPUTS "${_core_type_stub_sources}" - DEPENDS_TARGET_SRC_DEPS "${_core_extension_srcs}" - IMPORT_PATHS "${MLIRPythonModules_ROOT_PREFIX}/_mlir_libs" -) -set(_mlir_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") - -list(TRANSFORM _core_type_stub_sources PREPEND "_mlir_libs/") -# Note, we do not do ADD_TO_PARENT here so that the type stubs are not associated (as mlir_DEPENDS) with -# MLIRPythonSources.Core (or something) when a distro is installed/created. Otherwise they would not be regenerated -# by users of the distro (the stubs are still installed in the distro - they are just not added to mlir_DEPENDS). -declare_mlir_python_sources( - MLIRPythonExtension.Core.type_stub_gen - ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs" - SOURCES "${_core_type_stub_sources}" -) - -# _mlirPythonTestNanobind stubgen +# Stubgen doesn't work when cross-compiling (stubgen will run in the host interpreter and then fail +# to find the extension module for the host arch). +if(NOT CMAKE_CROSSCOMPILING) + # _mlir stubgen + # Note: All this needs to come before add_mlir_python_modules(MLIRPythonModules so that the install targets for the + # generated type stubs get created. + + set(_core_type_stub_sources + _mlir/__init__.pyi + _mlir/ir.pyi + _mlir/passmanager.pyi + _mlir/rewrite.pyi + ) -if(MLIR_INCLUDE_TESTS) - get_target_property(_test_extension_srcs MLIRPythonTestSources.PythonTestExtensionNanobind INTERFACE_SOURCES) + # Note 1: INTERFACE_SOURCES is a genex ($ $) + # which will be evaluated by file(GENERATE ...) inside mlir_generate_type_stubs. This will evaluate to the correct + # thing in the build dir (i.e., actual source dir paths) and in the install dir + # (where it's a conventional path; see install/lib/cmake/mlir/MLIRTargets.cmake). + # + # Note 2: MLIRPythonExtension.Core is the target that is defined using target_sources(INTERFACE) + # **NOT** MLIRPythonModules.extension._mlir.dso. So be sure to use the correct target! + get_target_property(_core_extension_srcs MLIRPythonExtension.Core INTERFACE_SOURCES) + + # Why is MODULE_NAME _mlir here but mlir._mlir_libs._mlirPythonTestNanobind below??? + # The _mlir extension can be imported independently of any other python code and/or extension modules. + # I.e., you could do `cd $MLIRPythonModules_ROOT_PREFIX/_mlir_libs && python -c "import _mlir"` (try it!). + # _mlir is also (currently) the only extension for which this is possible because dialect extensions modules, + # which generally make use of `mlir_value_subclass/mlir_type_subclass/mlir_attribute_subclass`, perform an + # `import mlir` right when they're loaded (see the mlir_*_subclass ctors in NanobindAdaptors.h). + # Note, this also why IMPORT_PATHS "${MLIRPythonModules_ROOT_PREFIX}/_mlir_libs" here while below + # "${MLIRPythonModules_ROOT_PREFIX}/.." (because MLIR_BINDINGS_PYTHON_INSTALL_PREFIX, by default, ends at mlir). + # + # Further note: this function creates file targets like + # "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs/_mlir/__init__.pyi". These must match the file targets + # that declare_mlir_python_sources expects, which are like "${ROOT_DIR}/${WHATEVER_SOURCE}". + # This is why _mlir_libs is prepended below. mlir_generate_type_stubs( - # This is the FQN path because dialect modules import _mlir when loaded. See above. - MODULE_NAME mlir._mlir_libs._mlirPythonTestNanobind - DEPENDS_TARGETS - # You need both _mlir and _mlirPythonTestNanobind because dialect modules import _mlir when loaded - # (so _mlir needs to be built before calling stubgen). - MLIRPythonModules.extension._mlir.dso - MLIRPythonModules.extension._mlirPythonTestNanobind.dso - # You need this one so that ir.py "built" because mlir._mlir_libs.__init__.py import mlir.ir in _site_initialize. - MLIRPythonModules.sources.MLIRPythonSources.Core.Python + MODULE_NAME _mlir + DEPENDS_TARGETS MLIRPythonModules.extension._mlir.dso OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs" - OUTPUTS _mlirPythonTestNanobind.pyi - DEPENDS_TARGET_SRC_DEPS "${_test_extension_srcs}" - IMPORT_PATHS "${MLIRPythonModules_ROOT_PREFIX}/.." + OUTPUTS "${_core_type_stub_sources}" + DEPENDS_TARGET_SRC_DEPS "${_core_extension_srcs}" + IMPORT_PATHS "${MLIRPythonModules_ROOT_PREFIX}/_mlir_libs" ) - set(_mlirPythonTestNanobind_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") + set(_mlir_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") + + list(TRANSFORM _core_type_stub_sources PREPEND "_mlir_libs/") + # Note, we do not do ADD_TO_PARENT here so that the type stubs are not associated (as mlir_DEPENDS) with + # MLIRPythonSources.Core (or something) when a distro is installed/created. Otherwise they would not be regenerated + # by users of the distro (the stubs are still installed in the distro - they are just not added to mlir_DEPENDS). declare_mlir_python_sources( - MLIRPythonTestSources.PythonTestExtensionNanobind.type_stub_gen + MLIRPythonExtension.Core.type_stub_gen ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs" - ADD_TO_PARENT MLIRPythonTestSources.Dialects - SOURCES _mlir_libs/_mlirPythonTestNanobind.pyi + SOURCES "${_core_type_stub_sources}" ) + + # _mlirPythonTestNanobind stubgen + + if(MLIR_INCLUDE_TESTS) + get_target_property(_test_extension_srcs MLIRPythonTestSources.PythonTestExtensionNanobind INTERFACE_SOURCES) + mlir_generate_type_stubs( + # This is the FQN path because dialect modules import _mlir when loaded. See above. + MODULE_NAME mlir._mlir_libs._mlirPythonTestNanobind + DEPENDS_TARGETS + # You need both _mlir and _mlirPythonTestNanobind because dialect modules import _mlir when loaded + # (so _mlir needs to be built before calling stubgen). + MLIRPythonModules.extension._mlir.dso + MLIRPythonModules.extension._mlirPythonTestNanobind.dso + # You need this one so that ir.py "built" because mlir._mlir_libs.__init__.py import mlir.ir in _site_initialize. + MLIRPythonModules.sources.MLIRPythonSources.Core.Python + OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs/_mlir_libs" + OUTPUTS _mlirPythonTestNanobind.pyi + DEPENDS_TARGET_SRC_DEPS "${_test_extension_srcs}" + IMPORT_PATHS "${MLIRPythonModules_ROOT_PREFIX}/.." + ) + set(_mlirPythonTestNanobind_typestub_gen_target "${NB_STUBGEN_CUSTOM_TARGET}") + declare_mlir_python_sources( + MLIRPythonTestSources.PythonTestExtensionNanobind.type_stub_gen + ROOT_DIR "${CMAKE_CURRENT_BINARY_DIR}/type_stubs" + ADD_TO_PARENT MLIRPythonTestSources.Dialects + SOURCES _mlir_libs/_mlirPythonTestNanobind.pyi + ) + endif() endif() ################################################################################ @@ -959,18 +963,23 @@ endif() # This must come last. ################################################################################ +set(_declared_sources MLIRPythonSources MLIRPythonExtension.RegisterEverything) +if(NOT CMAKE_CROSSCOMPILING) + list(APPEND _declared_sources MLIRPythonExtension.Core.type_stub_gen) +endif() + add_mlir_python_modules(MLIRPythonModules ROOT_PREFIX ${MLIRPythonModules_ROOT_PREFIX} INSTALL_PREFIX "${MLIR_BINDINGS_PYTHON_INSTALL_PREFIX}" DECLARED_SOURCES - MLIRPythonSources - MLIRPythonExtension.RegisterEverything - MLIRPythonExtension.Core.type_stub_gen + ${_declared_sources} ${_ADDL_TEST_SOURCES} COMMON_CAPI_LINK_LIBS MLIRPythonCAPI ) -add_dependencies(MLIRPythonModules "${_mlir_typestub_gen_target}") -if(MLIR_INCLUDE_TESTS) - add_dependencies(MLIRPythonModules "${_mlirPythonTestNanobind_typestub_gen_target}") +if(NOT CMAKE_CROSSCOMPILING) + add_dependencies(MLIRPythonModules "${_mlir_typestub_gen_target}") + if(MLIR_INCLUDE_TESTS) + add_dependencies(MLIRPythonModules "${_mlirPythonTestNanobind_typestub_gen_target}") + endif() endif() diff --git a/mlir/python/mlir/dialects/transform/structured.py b/mlir/python/mlir/dialects/transform/structured.py index bf40cc532065d..e3bacb5777d9f 100644 --- a/mlir/python/mlir/dialects/transform/structured.py +++ b/mlir/python/mlir/dialects/transform/structured.py @@ -44,18 +44,12 @@ def __init__( loc=None, ip=None, ): - # No other types are allowed, so hard-code those here. - allocated_buffer_type = transform.AnyValueType.get() - new_ops_type = transform.AnyOpType.get() - if isinstance(memory_space, int): memory_space = str(memory_space) if isinstance(memory_space, str): memory_space = Attribute.parse(memory_space) super().__init__( - allocated_buffer_type, - new_ops_type, target, memory_space=memory_space, memcpy_op=memcpy_op, diff --git a/mlir/python/mlir/dialects/transform/tune.py b/mlir/python/mlir/dialects/transform/tune.py index f63f88a382422..b3bfa8015c4d8 100644 --- a/mlir/python/mlir/dialects/transform/tune.py +++ b/mlir/python/mlir/dialects/transform/tune.py @@ -6,6 +6,9 @@ from ...ir import ( Type, + Value, + Operation, + OpView, Attribute, ArrayAttr, StringAttr, @@ -19,7 +22,10 @@ from .._transform_tune_extension_ops_gen import _Dialect try: - from .._ods_common import _cext as _ods_cext + from .._ods_common import ( + get_op_result_or_value as _get_op_result_or_value, + _cext as _ods_cext, + ) except ImportError as e: raise RuntimeError("Error loading imports from extension module") from e @@ -36,7 +42,7 @@ def __init__( ArrayAttr, Sequence[Union[Attribute, bool, int, float, str]], Attribute ], *, - selected: Optional[Attribute] = None, + selected: Optional[Union[Attribute, bool, int, float, str]] = None, loc=None, ip=None, ): @@ -75,8 +81,62 @@ def knob( ArrayAttr, Sequence[Union[Attribute, bool, int, float, str]], Attribute ], *, - selected: Optional[Attribute] = None, + selected: Optional[Union[Attribute, bool, int, float, str]] = None, loc=None, ip=None, ): return KnobOp(result, name, options, selected=selected, loc=loc, ip=ip) + + +@_ods_cext.register_operation(_Dialect, replace=True) +class AlternativesOp(AlternativesOp): + def __init__( + self, + results: Sequence[Type], + name: Union[StringAttr, str], + num_alternatives: int, + *, + selected_region: Optional[ + Union[int, IntegerAttr, Value, Operation, OpView] + ] = None, + loc=None, + ip=None, + ): + if isinstance(name, str): + name = StringAttr.get(name) + + selected_region_attr = selected_region_param = None + if isinstance(selected_region, IntegerAttr): + selected_region_attr = selected_region + elif isinstance(selected_region, int): + selected_region_attr = IntegerAttr.get( + IntegerType.get_signless(32), selected_region + ) + elif isinstance(selected_region, (Value, Operation, OpView)): + selected_region_param = _get_op_result_or_value(selected_region) + + super().__init__( + results, + name, + num_alternatives, + selected_region_attr=selected_region_attr, + selected_region_param=selected_region_param, + loc=loc, + ip=ip, + ) + for region in self.regions: + region.blocks.append() + + +def alternatives( + results: Sequence[Type], + name: Union[StringAttr, str], + num_alternatives: int, + *, + selected_region: Optional[Union[int, IntegerAttr, Value, Operation, OpView]] = None, + loc=None, + ip=None, +): + return AlternativesOp( + results, name, num_alternatives, selected_region=selected_region, loc=loc, ip=ip + ) diff --git a/mlir/test/CAPI/llvm.c b/mlir/test/CAPI/llvm.c index 12a436ad12fc4..f5fbb4645cd5d 100644 --- a/mlir/test/CAPI/llvm.c +++ b/mlir/test/CAPI/llvm.c @@ -270,7 +270,7 @@ static void testDebugInfoAttributes(MlirContext ctx) { MlirAttribute compile_unit = mlirLLVMDICompileUnitAttrGet( ctx, id, LLVMDWARFSourceLanguageC99, file, foo, false, - MlirLLVMDIEmissionKindFull, MlirLLVMDINameTableKindDefault); + MlirLLVMDIEmissionKindFull, MlirLLVMDINameTableKindDefault, bar); // CHECK: #llvm.di_compile_unit<{{.*}}> mlirAttributeDump(compile_unit); diff --git a/mlir/test/CMakeLists.txt b/mlir/test/CMakeLists.txt index 628adcfb6e285..e64935364997c 100644 --- a/mlir/test/CMakeLists.txt +++ b/mlir/test/CMakeLists.txt @@ -84,6 +84,7 @@ llvm_canonicalize_cmake_booleans( MLIR_RUN_CUDA_SM80_TESTS MLIR_RUN_CUDA_SM80_LT_TESTS MLIR_RUN_CUDA_SM90_TESTS + BUILD_SHARED_LIBS ) configure_lit_site_cfg( @@ -125,6 +126,10 @@ set(MLIR_TEST_DEPENDS if(NOT MLIR_STANDALONE_BUILD) list(APPEND MLIR_TEST_DEPENDS FileCheck count not split-file yaml2obj) endif() +# Examples/standalone/test.toy (vis-a-vis the standalone example) depends on these. +if(LLVM_INCLUDE_EXAMPLES) + list(APPEND MLIR_TEST_DEPENDS MLIRCAPIArith) +endif() set(MLIR_TEST_DEPENDS ${MLIR_TEST_DEPENDS} mlir-capi-pdl-test diff --git a/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir b/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir index cc1162d8b0de8..2fd3df6dcfa71 100644 --- a/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir +++ b/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir @@ -8,6 +8,8 @@ // Note: #gpu.address_space is hardcoded to `1` here because the // test pass doesn't set up the GPU address space conversions. +// CHECK: #[[$MMRA_TAG:.+]] = #llvm.mmra_tag<"amdgpu-synchronize-as":"local"> + #gpu_global_addrspace = 1 // CHECK-LABEL: func @fat_raw_buffer_cast @@ -17,7 +19,7 @@ func.func @fat_raw_buffer_cast(%buf: memref<8xi32, #gpu_global_addrspace>) -> me // CHECK-DAG: %[[offset:.*]] = llvm.extractvalue %[[desc]][2] // CHECK-DAG: %[[sizes:.*]] = llvm.extractvalue %[[desc]][3] // CHECK-DAG: %[[strides:.*]] = llvm.extractvalue %[[desc]][4] - // CHECK-DAG: %[[numRecords:.*]] = llvm.mlir.constant(32 : i32) : i32 + // CHECK-DAG: %[[numRecords:.*]] = llvm.mlir.constant(32 : i64) : i64 // CHECK-DAG: %[[strideArg:.*]] = llvm.mlir.constant(0 : i16) : i16 // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) @@ -38,7 +40,7 @@ func.func @fat_raw_buffer_cast_0d(%buf: memref) -> m // CHECK: %[[desc:.*]] = builtin.unrealized_conversion_cast %{{.*}} : memref to !llvm.struct<(ptr<1>, ptr<1>, i64)> // CHECK-DAG: %[[base:.*]] = llvm.extractvalue %[[desc]][1] // CHECK-DAG: %[[offset:.*]] = llvm.extractvalue %[[desc]][2] - // CHECK-DAG: %[[numRecords:.*]] = llvm.mlir.constant(4 : i32) : i32 + // CHECK-DAG: %[[numRecords:.*]] = llvm.mlir.constant(4 : i64) : i64 // CHECK-DAG: %[[strideArg:.*]] = llvm.mlir.constant(0 : i16) : i16 // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) @@ -57,9 +59,8 @@ func.func @fat_raw_buffer_cast_dyn_size_offset(%buf: memref) -> memref<8xi32, #amdgpu.address_space> { - // CHECK: %[[numRecords:.*]] = arith.constant -1 : i32 + // CHECK: %[[numRecords:.*]] = arith.constant -1 : i64 // CHECK: rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %{{.*}} - %cu32_max = arith.constant 0xffffffff : i32 - %ret = amdgpu.fat_raw_buffer_cast %buf validBytes(%cu32_max) : memref<8xi32, #gpu_global_addrspace> to memref<8xi32, #amdgpu.address_space> + %cu64_max = arith.constant -1 : i64 + %ret = amdgpu.fat_raw_buffer_cast %buf validBytes(%cu64_max) : memref<8xi32, #gpu_global_addrspace> to memref<8xi32, #amdgpu.address_space> return %ret : memref<8xi32, #amdgpu.address_space> } @@ -115,9 +116,7 @@ func.func @fat_raw_buffer_cast_cache_swizzle(%buf: memref<64x64xi32, #gpu_global // CHECK-LABEL: func @gpu_gcn_raw_buffer_load_scalar_i32 func.func @gpu_gcn_raw_buffer_load_scalar_i32(%buf: memref) -> i32 { - // Extra constant for byte width - // CHECK: llvm.mlir.constant(4 : i32) - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(4 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(4 : i64) // CHECK: %[[stride:.*]] = llvm.mlir.constant(0 : i16) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) @@ -130,7 +129,7 @@ func.func @gpu_gcn_raw_buffer_load_scalar_i32(%buf: memref) -> i32 { // CHECK-LABEL: func @gpu_gcn_raw_buffer_load_i32 func.func @gpu_gcn_raw_buffer_load_i32(%buf: memref<64xi32>, %idx: i32) -> i32 { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // CHECK: %[[stride:.*]] = llvm.mlir.constant(0 : i16) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) @@ -155,11 +154,10 @@ func.func @gpu_gcn_raw_buffer_load_i32_strided(%buf: memref<16x16xi32, strided<[ // CHECK: %[[stride_j:.*]] = llvm.extractvalue %[[descriptor]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[ext_j:.*]] = llvm.mul %[[sz_j]], %[[stride_j]] : i64 // CHECK: %[[num_records:.*]] = llvm.intr.umax(%[[ext_i]], %[[ext_j]]) : (i64, i64) -> i64 - // CHECK: %[[num_rec_i32:.*]] = llvm.trunc %[[num_records]] : i64 to i32 - // CHECK: %[[elem_size_2:.*]] = llvm.mlir.constant(4 : i32) : i32 - // CHECK: %[[num_rec_bytes_i32:.*]] = llvm.mul %[[num_rec_i32]], %[[elem_size_2]] : i32 + // CHECK: %[[elem_size_2:.*]] = llvm.mlir.constant(4 : i64) : i64 + // CHECK: %[[num_rec_bytes:.*]] = llvm.mul %[[num_records]], %[[elem_size_2]] : i64 // CHECK: %[[stride:.*]] = llvm.mlir.constant(0 : i16) : i16 - // CHECK: %[[rsrc:.*]] = rocdl.make.buffer.rsrc %[[ptr]], %[[stride]], %[[num_rec_bytes_i32]], %{{.*}} : !llvm.ptr to <8> + // CHECK: %[[rsrc:.*]] = rocdl.make.buffer.rsrc %[[ptr]], %[[stride]], %[[num_rec_bytes]], %{{.*}} : !llvm.ptr to <8> // CHECK: %[[stride_i_1:.*]] = llvm.extractvalue %[[descriptor]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[stride_i_i32:.*]] = llvm.trunc %[[stride_i_1]] : i64 to i32 // CHECK: %[[t_0:.*]] = llvm.mul %{{.*}}, %[[stride_i_i32]] : i32 @@ -207,7 +205,7 @@ func.func @gpu_gcn_raw_buffer_load_2xi32(%buf: memref<64xi32>, %idx: i32) -> vec // CHECK-LABEL: func @gpu_gcn_raw_buffer_load_i8 func.func @gpu_gcn_raw_buffer_load_i8(%buf: memref<64xi8>, %idx: i32) -> i8 { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i64) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %{{.*}} // CHECK: %[[ret:.*]] = rocdl.raw.ptr.buffer.load %[[resource]], %{{.*}}, %{{.*}}, %{{.*}} : i8 // CHECK: return %[[ret]] @@ -217,7 +215,7 @@ func.func @gpu_gcn_raw_buffer_load_i8(%buf: memref<64xi8>, %idx: i32) -> i8 { // CHECK-LABEL: func @gpu_gcn_raw_buffer_load_2xi8 func.func @gpu_gcn_raw_buffer_load_2xi8(%buf: memref<64xi8>, %idx: i32) -> vector<2xi8> { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i64) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %{{.*}} // CHECK: %[[loaded:.*]] = rocdl.raw.ptr.buffer.load %[[resource]], %{{.*}}, %{{.*}}, %{{.*}} : i16 // CHECK: %[[ret:.*]] = llvm.bitcast %[[loaded]] : i16 to vector<2xi8> @@ -237,7 +235,7 @@ func.func @gpu_gcn_raw_buffer_load_16xi8(%buf: memref<64xi8>, %idx: i32) -> vect // CHECK-LABEL: func @gpu_gcn_raw_buffer_load_f8E5M2FNUZ func.func @gpu_gcn_raw_buffer_load_f8E5M2FNUZ(%buf: memref<64xf8E5M2FNUZ>, %idx: i32) -> f8E5M2FNUZ { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i64) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %{{.*}} // CHECK: %[[loaded:.*]] = rocdl.raw.ptr.buffer.load %[[resource]], %{{.*}}, %{{.*}}, %{{.*}} : i8 // CHECK: %[[ret:.*]] = builtin.unrealized_conversion_cast %[[loaded]] : i8 to f8E5M2FNUZ @@ -248,7 +246,7 @@ func.func @gpu_gcn_raw_buffer_load_f8E5M2FNUZ(%buf: memref<64xf8E5M2FNUZ>, %idx: // CHECK-LABEL: func @gpu_gcn_raw_buffer_load_4xf8E4M3FNUZ func.func @gpu_gcn_raw_buffer_load_4xf8E4M3FNUZ(%buf: memref<64xf8E4M3FNUZ>, %idx: i32) -> vector<4xf8E4M3FNUZ> { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(64 : i64) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %{{.*}} // CHECK: %[[loaded:.*]] = rocdl.raw.ptr.buffer.load %[[resource]], %{{.*}}, %{{.*}}, %{{.*}} : i32 // CHECK: %[[cast:.*]] = llvm.bitcast %[[loaded]] : i32 to vector<4xi8> @@ -271,7 +269,7 @@ func.func @gpu_gcn_raw_buffer_store_scalar_i32(%value: i32, %buf: memref) { // CHECK-LABEL: func @gpu_gcn_raw_buffer_store_i32 func.func @gpu_gcn_raw_buffer_store_i32(%value: i32, %buf: memref<64xi32>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -307,7 +305,7 @@ func.func @gpu_gcn_raw_buffer_store_16xi8(%value: vector<16xi8>, %buf: memref<64 // And more so for atomic add // CHECK-LABEL: func @gpu_gcn_raw_buffer_atomic_fadd_f32 func.func @gpu_gcn_raw_buffer_atomic_fadd_f32(%value: f32, %buf: memref<64xf32>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -318,7 +316,7 @@ func.func @gpu_gcn_raw_buffer_atomic_fadd_f32(%value: f32, %buf: memref<64xf32>, // CHECK-LABEL: func @gpu_gcn_raw_buffer_atomic_fadd_v2f16 func.func @gpu_gcn_raw_buffer_atomic_fadd_v2f16(%value: vector<2xf16>, %buf: memref<64xf16>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(128 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(128 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -329,7 +327,7 @@ func.func @gpu_gcn_raw_buffer_atomic_fadd_v2f16(%value: vector<2xf16>, %buf: mem // CHECK-LABEL: func @gpu_gcn_raw_buffer_atomic_fadd_v2bf16 func.func @gpu_gcn_raw_buffer_atomic_fadd_v2bf16(%value: vector<2xbf16>, %buf: memref<64xbf16>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(128 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(128 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -340,7 +338,7 @@ func.func @gpu_gcn_raw_buffer_atomic_fadd_v2bf16(%value: vector<2xbf16>, %buf: m // CHECK-LABEL: func @gpu_gcn_raw_buffer_atomic_fmax_f32 func.func @gpu_gcn_raw_buffer_atomic_fmax_f32(%value: f32, %buf: memref<64xf32>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -351,7 +349,7 @@ func.func @gpu_gcn_raw_buffer_atomic_fmax_f32(%value: f32, %buf: memref<64xf32>, // CHECK-LABEL: func @gpu_gcn_raw_buffer_atomic_smax_i32 func.func @gpu_gcn_raw_buffer_atomic_smax_i32(%value: i32, %buf: memref<64xi32>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -362,7 +360,7 @@ func.func @gpu_gcn_raw_buffer_atomic_smax_i32(%value: i32, %buf: memref<64xi32>, // CHECK-LABEL: func @gpu_gcn_raw_buffer_atomic_umin_i32 func.func @gpu_gcn_raw_buffer_atomic_umin_i32(%value: i32, %buf: memref<64xi32>, %idx: i32) { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -376,7 +374,7 @@ func.func @gpu_gcn_raw_buffer_atomic_umin_i32(%value: i32, %buf: memref<64xi32>, func.func @amdgpu_raw_buffer_atomic_cmpswap_f32(%src : f32, %cmp : f32, %buf : memref<64xf32>, %idx: i32) -> f32 { // CHECK: %[[srcCast:.*]] = llvm.bitcast %[[src]] : f32 to i32 // CHECK: %[[cmpCast:.*]] = llvm.bitcast %[[cmp]] : f32 to i32 - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(256 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -390,7 +388,7 @@ func.func @amdgpu_raw_buffer_atomic_cmpswap_f32(%src : f32, %cmp : f32, %buf : m // CHECK-LABEL: func @amdgpu_raw_buffer_atomic_cmpswap_i64 // CHECK-SAME: (%[[src:.*]]: i64, %[[cmp:.*]]: i64, {{.*}}) func.func @amdgpu_raw_buffer_atomic_cmpswap_i64(%src : i64, %cmp : i64, %buf : memref<64xi64>, %idx: i32) -> i64 { - // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(512 : i32) + // CHECK: %[[numRecords:.*]] = llvm.mlir.constant(512 : i64) // GFX9: %[[flags:.*]] = llvm.mlir.constant(159744 : i32) // RDNA: %[[flags:.*]] = llvm.mlir.constant(822243328 : i32) // CHECK: %[[resource:.*]] = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %[[numRecords]], %[[flags]] @@ -414,19 +412,16 @@ func.func @amdgpu_raw_buffer_atomic_cmpswap_v2f16(%src : vector<2xf16>, %cmp : v // CHECK-LABEL: func @lds_barrier func.func @lds_barrier() { + // CHECK: llvm.fence syncscope("workgroup") release {llvm.mmra = #[[$MMRA_TAG]]} // GFX908: llvm.inline_asm has_side_effects asm_dialect = att - // GFX908-SAME: ";;;WARNING: BREAKS DEBUG WATCHES\0As_waitcnt lgkmcnt(0)\0As_barrier" - // GFX90A: rocdl.s.waitcnt -7937 + // GFX908-SAME: ";;;WARNING: BREAKS DEBUG WATCHES\0As_barrier" // GFX90A-NEXT: rocdl.s.barrier - // GFX942: rocdl.s.waitcnt -7937 // GFX942-NEXT: rocdl.s.barrier - // GFX10: rocdl.s.waitcnt -16129 // GFX10-NEXT: rocdl.s.barrier - // GFX11: llvm.inline_asm has_side_effects asm_dialect = att - // GFX11-SAME: ";;;WARNING: BREAKS DEBUG WATCHES\0As_waitcnt lgkmcnt(0)\0As_barrier" - // GFX12: rocdl.s.wait.dscnt 0 + // GFX11-NEXT: rocdl.s.barrier // GFX12-NEXT: rocdl.s.barrier.signal -1 // GFX12-NEXT: rocdl.s.barrier.wait -1 + // CHECK-NEXT: llvm.fence syncscope("workgroup") acquire {llvm.mmra = #[[$MMRA_TAG]]} amdgpu.lds_barrier func.return } diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir index ef06af3ad3163..a4b5dde8a2187 100644 --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -1109,3 +1109,42 @@ gpu.module @test_module_55 { func.return %result32, %result64 : f32, f64 } } + +gpu.module @test_module_56 { + // CHECK: gpu.module @test_module_56 + + // CHECK-DAG: llvm.func @__nv_sincosf(f32, !llvm.ptr, !llvm.ptr) + // CHECK-DAG: llvm.func @__nv_sincos(f64, !llvm.ptr, !llvm.ptr) + + // CHECK-LABEL: func @gpu_sincos + // CHECK-SAME: %[[ARG_f16:.*]]: f16, %[[ARG_f32:.*]]: f32, %[[ARG_f64:.*]]: f64 + func.func @gpu_sincos(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f16, f32, f32, f64, f64) { + // CHECK-COUNT-6: llvm.alloca + // CHECK: %[[ARG_f16_ext:.*]] = llvm.fpext %[[ARG_f16]] : f16 to f32 + // CHECK: llvm.call @__nv_sincosf(%[[ARG_f16_ext]], %{{.+}}, %{{.+}}) : (f32, !llvm.ptr, !llvm.ptr) -> () + // CHECK-COUNT-2: llvm.fptrunc + // CHECK: llvm.call @__nv_sincosf(%[[ARG_f32]], %{{.+}}, %{{.+}}) : (f32, !llvm.ptr, !llvm.ptr) -> () + // CHECK: llvm.call @__nv_sincos(%[[ARG_f64]], %{{.+}}, %{{.+}}) : (f64, !llvm.ptr, !llvm.ptr) -> () + %sin16, %cos16 = math.sincos %arg_f16 : f16 + %sin32, %cos32 = math.sincos %arg_f32 : f32 + %sin64, %cos64 = math.sincos %arg_f64 : f64 + func.return %sin16, %cos16, %sin32, %cos32, %sin64, %cos64 : f16, f16, f32, f32, f64, f64 + } + + // CHECK: llvm.func @__nv_fast_sincosf(f32, !llvm.ptr, !llvm.ptr) + + // CHECK-LABEL: func @gpu_sincos_fastmath + // CHECK-SAME: %[[ARG_f16:.*]]: f16, %[[ARG_f32:.*]]: f32, %[[ARG_f64:.*]]: f64 + func.func @gpu_sincos_fastmath(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f16, f32, f32, f64, f64) { + // CHECK-COUNT-6: llvm.alloca + // CHECK: %[[ARG_f16_ext:.*]] = llvm.fpext %[[ARG_f16]] : f16 to f32 + // CHECK: llvm.call @__nv_fast_sincosf(%[[ARG_f16_ext]], %{{.+}}, %{{.+}}) : (f32, !llvm.ptr, !llvm.ptr) -> () + // CHECK-COUNT-2: llvm.fptrunc + // CHECK: llvm.call @__nv_fast_sincosf(%[[ARG_f32]], %{{.+}}, %{{.+}}) : (f32, !llvm.ptr, !llvm.ptr) -> () + // CHECK: llvm.call @__nv_sincos(%[[ARG_f64]], %{{.+}}, %{{.+}}) : (f64, !llvm.ptr, !llvm.ptr) -> () + %sin16, %cos16 = math.sincos %arg_f16 fastmath : f16 + %sin32, %cos32 = math.sincos %arg_f32 fastmath : f32 + %sin64, %cos64 = math.sincos %arg_f64 fastmath : f64 + func.return %sin16, %cos16, %sin32, %cos32, %sin64, %cos64 : f16, f16, f32, f32, f64, f64 + } +} diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir index 2dc6a5ab2a86c..32da31202b688 100644 --- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir +++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir @@ -1,5 +1,6 @@ // RUN: mlir-opt %s -convert-gpu-to-rocdl='runtime=HIP' -split-input-file | FileCheck %s +// CHECK-LABEL: gpu.module @test_module gpu.module @test_module { // CHECK-DAG: llvm.mlir.global internal constant @[[$PRINT_GLOBAL0:[A-Za-z0-9_]+]]("Hello, world\0A\00") // CHECK-DAG: llvm.mlir.global internal constant @[[$PRINT_GLOBAL1:[A-Za-z0-9_]+]]("Hello: %d\0A\00") @@ -40,3 +41,38 @@ gpu.module @test_module { gpu.return } } + +// ----- + +// The bulitin.module we're targetting is wrapped in a fake gpu.module +// because the convert-gpu-to-rocdl pass only runs an `gpu.module` ops, +// even though the printf patterns could run in other contexts. + +// CHECK-LABEL: gpu.module @fake_gpu_module_for_test +// CHECK-LABEL: builtin.module @test_module +gpu.module @fake_gpu_module_for_test { +builtin.module @test_module { + // CHECK-DAG: llvm.mlir.global internal constant @[[$PRINT_GLOBAL1:[A-Za-z0-9_]+]]("Hello: %d\0A\00") + // CHECK-DAG: llvm.func @__ockl_printf_append_args(i64, i32, i64, i64, i64, i64, i64, i64, i64, i32) -> i64 + // CHECK-DAG: llvm.func @__ockl_printf_append_string_n(i64, !llvm.ptr, i64, i32) -> i64 + // CHECK-DAG: llvm.func @__ockl_printf_begin(i64) -> i64 + + // CHECK-LABEL: llvm.func @test_printf + // CHECK: (%[[ARG0:.*]]: i32) + llvm.func @test_printf(%arg0: i32) { + // CHECK: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK-NEXT: %[[DESC0:.*]] = llvm.call @__ockl_printf_begin(%0) : (i64) -> i64 + // CHECK-NEXT: %[[FORMATSTR:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL1]] : !llvm.ptr + // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<11 x i8> + // CHECK-NEXT: %[[FORMATLEN:.*]] = llvm.mlir.constant(11 : i64) : i64 + // CHECK-NEXT: %[[ISLAST:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK-NEXT: %[[ISNTLAST:.*]] = llvm.mlir.constant(0 : i32) : i32 + // CHECK-NEXT: %[[DESC1:.*]] = llvm.call @__ockl_printf_append_string_n(%[[DESC0]], %[[FORMATSTART]], %[[FORMATLEN]], %[[ISNTLAST]]) : (i64, !llvm.ptr, i64, i32) -> i64 + // CHECK-NEXT: %[[NARGS1:.*]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK-NEXT: %[[ARG0_64:.*]] = llvm.zext %[[ARG0]] : i32 to i64 + // CHECK-NEXT: %{{.*}} = llvm.call @__ockl_printf_append_args(%[[DESC1]], %[[NARGS1]], %[[ARG0_64]], %[[CST0]], %[[CST0]], %[[CST0]], %[[CST0]], %[[CST0]], %[[CST0]], %[[ISLAST]]) : (i64, i32, i64, i64, i64, i64, i64, i64, i64, i32) -> i64 + gpu.printf "Hello: %d\n", %arg0 : i32 + llvm.return + } +} +} diff --git a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir index f4541220fe4d2..f7d27120d4207 100644 --- a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir +++ b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir @@ -230,6 +230,16 @@ func.func @trigonometrics(%arg0: f32) { // ----- +// CHECK-LABEL: func @sincos +// CHECK-SAME: [[ARG0:%.+]]: f32 +func.func @sincos(%arg0: f32) { + // CHECK: llvm.intr.sincos([[ARG0]]) : (f32) -> !llvm.struct<(f32, f32)> + %0:2 = math.sincos %arg0 : f32 + func.return +} + +// ----- + // CHECK-LABEL: func @inverse_trigonometrics // CHECK-SAME: [[ARG0:%.+]]: f32 func.func @inverse_trigonometrics(%arg0: f32) { diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir index 45b1a1f1ca40c..0cbe064572911 100644 --- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir @@ -195,6 +195,36 @@ func.func @assume_alignment(%0 : memref<4x4xf16>) { // ----- +// ALL-LABEL: func @distinct_objects +// ALL-SAME: (%[[ARG0:.*]]: memref, %[[ARG1:.*]]: memref, %[[ARG2:.*]]: memref) +func.func @distinct_objects(%arg0: memref, %arg1: memref, %arg2: memref) -> (memref, memref, memref) { +// ALL-DAG: %[[CAST_0:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref to !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> +// ALL-DAG: %[[CAST_1:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref to !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> +// ALL-DAG: %[[CAST_2:.*]] = builtin.unrealized_conversion_cast %[[ARG2]] : memref to !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> +// ALL: %[[PTR_0:.*]] = llvm.extractvalue %[[CAST_0]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> +// ALL: %[[PTR_1:.*]] = llvm.extractvalue %[[CAST_1]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> +// ALL: %[[PTR_2:.*]] = llvm.extractvalue %[[CAST_2]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> +// ALL: %[[TRUE:.*]] = llvm.mlir.constant(true) : i1 +// ALL: llvm.intr.assume %[[TRUE]] ["separate_storage"(%[[PTR_0]], %[[PTR_1]] : !llvm.ptr, !llvm.ptr)] : i1 +// ALL: llvm.intr.assume %[[TRUE]] ["separate_storage"(%[[PTR_0]], %[[PTR_2]] : !llvm.ptr, !llvm.ptr)] : i1 +// ALL: llvm.intr.assume %[[TRUE]] ["separate_storage"(%[[PTR_1]], %[[PTR_2]] : !llvm.ptr, !llvm.ptr)] : i1 + %1, %2, %3 = memref.distinct_objects %arg0, %arg1, %arg2 : memref, memref, memref + return %1, %2, %3 : memref, memref, memref +} + +// ----- + +// ALL-LABEL: func @distinct_objects_noop +// ALL-SAME: (%[[ARG0:.*]]: memref) +func.func @distinct_objects_noop(%arg0: memref) -> memref { +// 1-operand version is noop +// ALL-NEXT: return %[[ARG0]] + %1 = memref.distinct_objects %arg0 : memref + return %1 : memref +} + +// ----- + // CHECK-LABEL: func @assume_alignment_w_offset // CHECK-INTERFACE-LABEL: func @assume_alignment_w_offset func.func @assume_alignment_w_offset(%0 : memref<4x4xf16, strided<[?, ?], offset: ?>>) { diff --git a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir index e6321e99693ac..ab3c8b7397e1a 100644 --- a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir +++ b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir @@ -515,6 +515,12 @@ module attributes { // Check Image Support. +// CHECK: #[[$COLMAJMAP:.*]] = affine_map<(d0, d1) -> (d1, d0)> +#col_major = affine_map<(d0, d1) -> (d1, d0)> +// CHECK: #[[$CUSTOMLAYOUTMAP:.*]] = affine_map<(d0, d1, d2) -> (d2, d1, d0)> +#custom = affine_map<(d0, d1, d2) -> (d2, d1, d0)> +// CHECK: #[[$NONPERMMAP:.*]] = affine_map<(d0, d1) -> (d0, d1 mod 2)> +#non_permutation = affine_map<(d0, d1) -> (d0, d1 mod 2)> module attributes { spirv.target_env = #spirv.target_env<#spirv.vce>, %[[ARG1:.*]]: memref<1xf32, #spirv.storage_class> func.func @load_from_image_1D(%arg0: memref<1xf32, #spirv.storage_class>, %arg1: memref<1xf32, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1xf32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<1xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<1xf32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> %cst = arith.constant 0 : index // CHECK: %[[COORDS:.*]] = builtin.unrealized_conversion_cast %{{.*}} : index to i32 // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> @@ -550,121 +556,206 @@ module attributes { } // CHECK-LABEL: @load_from_image_2D( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1xf32, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1xf32, #spirv.storage_class> - func.func @load_from_image_2D(%arg0: memref<1x1xf32, #spirv.storage_class>, %arg1: memref<1x1xf32, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1xf32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x4xf32, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x4xf32, #spirv.storage_class> + func.func @load_from_image_2D(%arg0: memref<2x4xf32, #spirv.storage_class>, %arg1: memref<2x4xf32, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x4xf32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 3 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 3 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index + // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> + // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> + // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xf32> + // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xf32> + %0 = memref.load %arg0[%y, %x] : memref<2x4xf32, #spirv.storage_class> + // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : f32 + memref.store %0, %arg1[%y, %x] : memref<2x4xf32, #spirv.storage_class> + return + } + + // CHECK-LABEL: @load_from_col_major_image_2D( + // CHECK-SAME: %[[ARG0:.*]]: memref<2x4xf32, #[[$COLMAJMAP]], #spirv.storage_class>, %[[ARG1:.*]]: memref<2x4xf32, #spirv.storage_class> + func.func @load_from_col_major_image_2D(%arg0: memref<2x4xf32, #col_major, #spirv.storage_class>, %arg1: memref<2x4xf32, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x4xf32, #[[$COLMAJMAP]], #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 3 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 3 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}} : (i32, i32) -> vector<2xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xf32> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xf32> - %0 = memref.load %arg0[%cst, %cst] : memref<1x1xf32, #spirv.storage_class> + %0 = memref.load %arg0[%x, %y] : memref<2x4xf32, #col_major, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : f32 - memref.store %0, %arg1[%cst, %cst] : memref<1x1xf32, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x4xf32, #spirv.storage_class> return } // CHECK-LABEL: @load_from_image_3D( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1x1xf32, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1x1xf32, #spirv.storage_class> - func.func @load_from_image_3D(%arg0: memref<1x1x1xf32, #spirv.storage_class>, %arg1: memref<1x1x1xf32, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1x1xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1x1xf32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3x4xf32, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3x4xf32, #spirv.storage_class> + func.func @load_from_image_3D(%arg0: memref<2x3x4xf32, #spirv.storage_class>, %arg1: memref<2x3x4xf32, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3x4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3x4xf32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 3 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 3 : index + // CHECK: %[[Y:.*]] = arith.constant 2 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 2 : index + // CHECK: %[[Z:.*]] = arith.constant 1 : index + // CHECK: %[[Z32:.*]] = builtin.unrealized_conversion_cast %[[Z]] : index to i32 + %z = arith.constant 1 : index + // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> + // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]], %[[Z32]] : (i32, i32, i32) -> vector<3xi32> + // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<3xi32> -> vector<4xf32> + // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xf32> + %0 = memref.load %arg0[%z, %y, %x] : memref<2x3x4xf32, #spirv.storage_class> + // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : f32 + memref.store %0, %arg1[%z, %y, %x] : memref<2x3x4xf32, #spirv.storage_class> + return + } + + // CHECK-LABEL: @load_from_custom_layout_image_3D( + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3x4xf32, #[[$CUSTOMLAYOUTMAP]], #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3x4xf32, #spirv.storage_class> + func.func @load_from_custom_layout_image_3D(%arg0: memref<2x3x4xf32, #custom, #spirv.storage_class>, %arg1: memref<2x3x4xf32, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3x4xf32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3x4xf32, #[[$CUSTOMLAYOUTMAP]], #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 3 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 3 : index + // CHECK: %[[Y:.*]] = arith.constant 2 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 2 : index + // CHECK: %[[Z:.*]] = arith.constant 1 : index + // CHECK: %[[Z32:.*]] = builtin.unrealized_conversion_cast %[[Z]] : index to i32 + %z = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}}, %{{.*}} : (i32, i32, i32) -> vector<3xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]], %[[Z32]] : (i32, i32, i32) -> vector<3xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<3xi32> -> vector<4xf32> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xf32> - %0 = memref.load %arg0[%cst, %cst, %cst] : memref<1x1x1xf32, #spirv.storage_class> + %0 = memref.load %arg0[%x, %y, %z] : memref<2x3x4xf32, #custom, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : f32 - memref.store %0, %arg1[%cst, %cst, %cst] : memref<1x1x1xf32, #spirv.storage_class> + memref.store %0, %arg1[%z, %y, %x] : memref<2x3x4xf32, #spirv.storage_class> return } // CHECK-LABEL: @load_from_image_2D_f16( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1xf16, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1xf16, #spirv.storage_class> - func.func @load_from_image_2D_f16(%arg0: memref<1x1xf16, #spirv.storage_class>, %arg1: memref<1x1xf16, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1xf16, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1xf16, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3xf16, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3xf16, #spirv.storage_class> + func.func @load_from_image_2D_f16(%arg0: memref<2x3xf16, #spirv.storage_class>, %arg1: memref<2x3xf16, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3xf16, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3xf16, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 2 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 2 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}} : (i32, i32) -> vector<2xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xf16> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xf16> - %0 = memref.load %arg0[%cst, %cst] : memref<1x1xf16, #spirv.storage_class> + %0 = memref.load %arg0[%y, %x] : memref<2x3xf16, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : f16 - memref.store %0, %arg1[%cst, %cst] : memref<1x1xf16, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x3xf16, #spirv.storage_class> return } // CHECK-LABEL: @load_from_image_2D_i32( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1xi32, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1xi32, #spirv.storage_class> - func.func @load_from_image_2D_i32(%arg0: memref<1x1xi32, #spirv.storage_class>, %arg1: memref<1x1xi32, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1xi32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1xi32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3xi32, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3xi32, #spirv.storage_class> + func.func @load_from_image_2D_i32(%arg0: memref<2x3xi32, #spirv.storage_class>, %arg1: memref<2x3xi32, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3xi32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3xi32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 2 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 2 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}} : (i32, i32) -> vector<2xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xi32> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xi32> - %0 = memref.load %arg0[%cst, %cst] : memref<1x1xi32, #spirv.storage_class> + %0 = memref.load %arg0[%y, %x] : memref<2x3xi32, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : i32 - memref.store %0, %arg1[%cst, %cst] : memref<1x1xi32, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x3xi32, #spirv.storage_class> return } // CHECK-LABEL: @load_from_image_2D_ui32( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1xui32, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1xui32, #spirv.storage_class> - func.func @load_from_image_2D_ui32(%arg0: memref<1x1xui32, #spirv.storage_class>, %arg1: memref<1x1xui32, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1xui32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1xui32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3xui32, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3xui32, #spirv.storage_class> + func.func @load_from_image_2D_ui32(%arg0: memref<2x3xui32, #spirv.storage_class>, %arg1: memref<2x3xui32, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3xui32, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3xui32, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 2 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 2 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}} : (i32, i32) -> vector<2xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xui32> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xui32> - %0 = memref.load %arg0[%cst, %cst] : memref<1x1xui32, #spirv.storage_class> + %0 = memref.load %arg0[%y, %x] : memref<2x3xui32, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : ui32 - memref.store %0, %arg1[%cst, %cst] : memref<1x1xui32, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x3xui32, #spirv.storage_class> return } // CHECK-LABEL: @load_from_image_2D_i16( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1xi16, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1xi16, #spirv.storage_class> - func.func @load_from_image_2D_i16(%arg0: memref<1x1xi16, #spirv.storage_class>, %arg1: memref<1x1xi16, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1xi16, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1xi16, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3xi16, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3xi16, #spirv.storage_class> + func.func @load_from_image_2D_i16(%arg0: memref<2x3xi16, #spirv.storage_class>, %arg1: memref<2x3xi16, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3xi16, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3xi16, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 2 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 2 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}} : (i32, i32) -> vector<2xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xi16> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xi16> - %0 = memref.load %arg0[%cst, %cst] : memref<1x1xi16, #spirv.storage_class> + %0 = memref.load %arg0[%y, %x] : memref<2x3xi16, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : i16 - memref.store %0, %arg1[%cst, %cst] : memref<1x1xi16, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x3xi16, #spirv.storage_class> return } // CHECK-LABEL: @load_from_image_2D_ui16( - // CHECK-SAME: %[[ARG0:.*]]: memref<1x1xui16, #spirv.storage_class>, %[[ARG1:.*]]: memref<1x1xui16, #spirv.storage_class> - func.func @load_from_image_2D_ui16(%arg0: memref<1x1xui16, #spirv.storage_class>, %arg1: memref<1x1xui16, #spirv.storage_class>) { -// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %arg1 : memref<1x1xui16, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> -// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %arg0 : memref<1x1xui16, #spirv.storage_class> to !spirv.ptr>, UniformConstant> - %cst = arith.constant 0 : index + // CHECK-SAME: %[[ARG0:.*]]: memref<2x3xui16, #spirv.storage_class>, %[[ARG1:.*]]: memref<2x3xui16, #spirv.storage_class> + func.func @load_from_image_2D_ui16(%arg0: memref<2x3xui16, #spirv.storage_class>, %arg1: memref<2x3xui16, #spirv.storage_class>) { +// CHECK-DAG: %[[SB:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : memref<2x3xui16, #spirv.storage_class> to !spirv.ptr [0])>, StorageBuffer> +// CHECK-DAG: %[[IMAGE_PTR:.*]] = builtin.unrealized_conversion_cast %[[ARG0]] : memref<2x3xui16, #spirv.storage_class> to !spirv.ptr>, UniformConstant> + // CHECK: %[[X:.*]] = arith.constant 2 : index + // CHECK: %[[X32:.*]] = builtin.unrealized_conversion_cast %[[X]] : index to i32 + %x = arith.constant 2 : index + // CHECK: %[[Y:.*]] = arith.constant 1 : index + // CHECK: %[[Y32:.*]] = builtin.unrealized_conversion_cast %[[Y]] : index to i32 + %y = arith.constant 1 : index // CHECK: %[[SIMAGE:.*]] = spirv.Load "UniformConstant" %[[IMAGE_PTR]] : !spirv.sampled_image> // CHECK: %[[IMAGE:.*]] = spirv.Image %[[SIMAGE]] : !spirv.sampled_image> - // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %{{.*}}, %{{.*}} : (i32, i32) -> vector<2xi32> + // CHECK: %[[COORDS:.*]] = spirv.CompositeConstruct %[[X32]], %[[Y32]] : (i32, i32) -> vector<2xi32> // CHECK: %[[RES_VEC:.*]] = spirv.ImageFetch %[[IMAGE]], %[[COORDS]] : !spirv.image, vector<2xi32> -> vector<4xui16> // CHECK: %[[RESULT:.*]] = spirv.CompositeExtract %[[RES_VEC]][0 : i32] : vector<4xui16> - %0 = memref.load %arg0[%cst, %cst] : memref<1x1xui16, #spirv.storage_class> + %0 = memref.load %arg0[%y, %x] : memref<2x3xui16, #spirv.storage_class> // CHECK: spirv.Store "StorageBuffer" %{{.*}}, %[[RESULT]] : ui16 - memref.store %0, %arg1[%cst, %cst] : memref<1x1xui16, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x3xui16, #spirv.storage_class> return } @@ -697,4 +788,15 @@ module attributes { memref.store %0, %arg1[%cst] : memref<1xvector<1xf32>, #spirv.storage_class> return } + + // CHECK-LABEL: @load_non_perm_layout( + func.func @load_non_perm_layout(%arg0: memref<2x4xf32, #non_permutation, #spirv.storage_class>, %arg1: memref<2x4xf32, #spirv.storage_class>) { + %x = arith.constant 3 : index + %y = arith.constant 1 : index + // CHECK-NOT: spirv.Image + // CHECK-NOT: spirv.ImageFetch + %0 = memref.load %arg0[%y, %x] : memref<2x4xf32, #non_permutation, #spirv.storage_class> + memref.store %0, %arg1[%y, %x] : memref<2x4xf32, #spirv.storage_class> + return + } } diff --git a/mlir/test/Conversion/VectorToLLVM/pass-option-serialization.mlir b/mlir/test/Conversion/VectorToLLVM/pass-option-serialization.mlir index 323d86ac40988..7e7925a473d53 100644 --- a/mlir/test/Conversion/VectorToLLVM/pass-option-serialization.mlir +++ b/mlir/test/Conversion/VectorToLLVM/pass-option-serialization.mlir @@ -13,7 +13,7 @@ // RUN: mlir-opt --convert-vector-to-llvm --dump-pass-pipeline %s 2>&1 | FileCheck %s --check-prefix=DEFAULT -// RUN: mlir-opt --convert-vector-to-llvm='vector-contract-lowering=matmul vector-transpose-lowering=flat' \ +// RUN: mlir-opt --convert-vector-to-llvm='vector-contract-lowering=llvmintr vector-transpose-lowering=llvmintr' \ // RUN: --dump-pass-pipeline %s 2>&1 | FileCheck %s --check-prefix=NON-DEFAULT // CHECK: builtin.module( @@ -26,5 +26,5 @@ // CHECK-SAME: reassociate-fp-reductions={{[aA-zZ0-9]+}} // DEFAULT: vector-contract-lowering=dot // DEFAULT: vector-transpose-lowering=eltwise -// NON-DEFAULT: vector-contract-lowering=matmul -// NON-DEFAULT: vector-transpose-lowering=flat +// NON-DEFAULT: vector-contract-lowering=llvm +// NON-DEFAULT: vector-transpose-lowering=llvm diff --git a/mlir/test/Dialect/AMDGPU/ops.mlir b/mlir/test/Dialect/AMDGPU/ops.mlir index 369e0fff538e1..8f427e9d56f45 100644 --- a/mlir/test/Dialect/AMDGPU/ops.mlir +++ b/mlir/test/Dialect/AMDGPU/ops.mlir @@ -360,7 +360,7 @@ func.func @fat_raw_buffer_cast_easy(%m: memref<8xi32>) -> memref<8xi32, #amdgpu. // CHECK-SAME: cacheSwizzleStride(%{{[^)]*}}) // CHECK-SAME: boundsCheck(false) // CHECK-SAME: resetOffset -func.func @fat_raw_buffer_cast(%m: memref<8xi32, strided<[1], offset: ?>>, %validBytes: i32, %cacheSwizzle: i14) -> memref<8xi32, #amdgpu.address_space> { +func.func @fat_raw_buffer_cast(%m: memref<8xi32, strided<[1], offset: ?>>, %validBytes: i64, %cacheSwizzle: i14) -> memref<8xi32, #amdgpu.address_space> { %ret = amdgpu.fat_raw_buffer_cast %m validBytes(%validBytes) cacheSwizzleStride(%cacheSwizzle) boundsCheck(false) resetOffset : memref<8xi32, strided<[1], offset: ?>> to memref<8xi32, #amdgpu.address_space> func.return %ret : memref<8xi32, #amdgpu.address_space> diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir index ca3de3a2d7703..2fe0995c9d4df 100644 --- a/mlir/test/Dialect/Arith/canonicalize.mlir +++ b/mlir/test/Dialect/Arith/canonicalize.mlir @@ -2216,6 +2216,18 @@ func.func @test_mulf1(%arg0 : f32, %arg1 : f32) -> (f32) { return %2 : f32 } +// CHECK-LABEL: @test_mulf2( +func.func @test_mulf2(%arg0 : f32) -> (f32, f32) { + // CHECK-DAG: %[[C0:.+]] = arith.constant 0.000000e+00 : f32 + // CHECK-DAG: %[[C0n:.+]] = arith.constant -0.000000e+00 : f32 + // CHECK-NEXT: return %[[C0]], %[[C0n]] + %c0 = arith.constant 0.0 : f32 + %c0n = arith.constant -0.0 : f32 + %0 = arith.mulf %c0, %arg0 fastmath : f32 + %1 = arith.mulf %c0n, %arg0 fastmath : f32 + return %0, %1 : f32, f32 +} + // ----- // CHECK-LABEL: @test_divf( diff --git a/mlir/test/Dialect/Arith/constant-fold.mlir b/mlir/test/Dialect/Arith/constant-fold.mlir new file mode 100644 index 0000000000000..172945fafdaf3 --- /dev/null +++ b/mlir/test/Dialect/Arith/constant-fold.mlir @@ -0,0 +1,18 @@ +// Test with the default (one application of the folder) and then with 2 iterations. +// RUN: mlir-opt %s --pass-pipeline="builtin.module(func.func(test-single-fold))" | FileCheck %s --check-prefixes=CHECK,CHECK-ONE +// RUN: mlir-opt %s --pass-pipeline="builtin.module(func.func(test-single-fold{max-iterations=2}))" | FileCheck %s --check-prefixes=CHECK,CHECK-TWO + + +// Folding entirely this requires to move the constant to the right +// before invoking the op-specific folder. +// With one iteration, we just push the constant to the right. +// With a second iteration, we actually fold the "add" (x+0->x) +// CHECK: func @recurse_fold_traits(%[[ARG0:.*]]: i32) +func.func @recurse_fold_traits(%arg0 : i32) -> i32 { + %cst0 = arith.constant 0 : i32 +// CHECK-ONE: %[[ADD:.*]] = arith.addi %[[ARG0]], + %res = arith.addi %cst0, %arg0 : i32 +// CHECK-ONE: return %[[ADD]] : i32 +// CHECK-TWO: return %[[ARG0]] : i32 + return %res : i32 +} diff --git a/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir b/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir index 99790cc45d490..fcd004ac554aa 100644 --- a/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir +++ b/mlir/test/Dialect/Arith/emulate-unsupported-floats.mlir @@ -85,3 +85,14 @@ func.func @no_expansion(%x: f32) -> f32 { %y = arith.addf %x, %c : f32 func.return %y : f32 } + +// ----- + +func.func @no_promote_select(%c: i1, %x: bf16, %y: bf16) -> bf16 { +// CHECK-LABEL: @no_promote_select +// CHECK-SAME: (%[[C:.+]]: i1, %[[X:.+]]: bf16, %[[Y:.+]]: bf16) +// CHECK: %[[Z:.+]] = arith.select %[[C]], %[[X]], %[[Y]] : bf16 +// CHECK: return %[[Z]] + %z = arith.select %c, %x, %y : bf16 + func.return %z : bf16 +} diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir index 2efb5893c8511..6054a61912532 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir @@ -380,6 +380,20 @@ func.func @execute_region_test(%t1 : tensor) // ----- +// CHECK-LABEL: func @no_inline_execute_region_not_canonicalized +func.func @no_inline_execute_region_not_canonicalized() { + %c = arith.constant 42 : i32 + // CHECK: scf.execute_region + // CHECK-SAME: no_inline + %v = scf.execute_region -> i32 no_inline { + scf.yield %c : i32 + } + // CHECK: return + return +} + +// ----- + // CHECK: func private @some_external_func(memref>) func.func private @some_external_func(tensor) @@ -810,3 +824,59 @@ module @inner_module { return %t : tensor<5xf32> } } + +// ----- + +// CHECK: func.func @custom_types( +// CHECK-SAME: %[[arg:.*]]: !test.test_memref<[4, 4], f64> +// CHECK-SAME: ) -> (!test.test_memref<[4, 8], f64>, +// CHECK-SAME: !test.test_memref<[4, 8], f64>) +func.func @custom_types(%arg: !test.test_tensor<[4, 4], f64>) + -> (!test.test_tensor<[4, 8], f64>, !test.test_tensor<[4, 8], f64>) { + // CHECK: %[[out1:.*]] = "test.dummy_memref_op"(%[[arg]]) : + // CHECK-SAME: (!test.test_memref<[4, 4], f64>) -> !test.test_memref<[4, 8], f64> + %out1 = "test.dummy_tensor_op"(%arg) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> + + // CHECK: %[[alloc:.*]] = "test.create_memref_op" + // CHECK: %[[out2:.*]] = "test.dummy_memref_op"(%[[alloc]]) + // CHECK-SAME: (!test.test_memref<[4, 4], f64>) -> !test.test_memref<[4, 8], f64> + %alloc = "test.create_tensor_op"() : () -> !test.test_tensor<[4, 4], f64> + %out2 = "test.dummy_tensor_op"(%alloc) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> + + // CHECK: return %[[out1]], %[[out2]] + return %out1, %out2 : + !test.test_tensor<[4, 8], f64>, !test.test_tensor<[4, 8], f64> +} + +// ----- + +// CHECK: func.func @custom_types_foo( +// CHECK-SAME: %[[arg:.*]]: !test.test_memref<[4, 4], f64> +// CHECK-SAME: ) -> !test.test_memref<[4, 4], f64> +func.func @custom_types_foo(%arg: !test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 4], f64> { + // CHECK: %[[out:.*]] = "test.dummy_memref_op"(%[[arg]]) + %out = "test.dummy_tensor_op"(%arg) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 4], f64> + // CHECK: return %[[out]] + return %out : !test.test_tensor<[4, 4], f64> +} + +// CHECK: func.func @custom_types_bar( +// CHECK-SAME: %[[arg:.*]]: !test.test_memref<[4, 4], f64> +// CHECK-SAME: ) -> !test.test_memref<[4, 8], f64> +func.func @custom_types_bar(%arg: !test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> { + // CHECK: %[[call:.*]] = call @custom_types_foo(%[[arg]]) + %call = func.call @custom_types_foo(%arg) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 4], f64> + + // CHECK: %[[out:.*]] = "test.dummy_memref_op"(%[[call]]) + %out = "test.dummy_tensor_op"(%call) : (!test.test_tensor<[4, 4], f64>) + -> !test.test_tensor<[4, 8], f64> + + // CHECK: return %[[out]] + return %out : !test.test_tensor<[4, 8], f64> +} diff --git a/mlir/test/Dialect/ControlFlow/canonicalize.mlir b/mlir/test/Dialect/ControlFlow/canonicalize.mlir index bf69935a00bf0..17f7d28ba59fb 100644 --- a/mlir/test/Dialect/ControlFlow/canonicalize.mlir +++ b/mlir/test/Dialect/ControlFlow/canonicalize.mlir @@ -490,3 +490,147 @@ func.func @branchCondProp(%arg0: i1) { ^exit: return } + +// ----- + +/// Test that control-flow cycles are not simplified infinitely. + +// CHECK-LABEL: @cycle_2_blocks +// CHECK: cf.br ^bb1 +// CHECK: ^bb1: +// CHECK: cf.br ^bb1 +func.func @cycle_2_blocks() { + cf.br ^bb1 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb1 +} + +// CHECK-LABEL: @no_cycle_2_blocks +// CHECK: %[[VAL_0:.*]] = arith.constant 1 : i32 +// CHECK: return %[[VAL_0]] : i32 +func.func @no_cycle_2_blocks() -> i32 { + cf.br ^bb1 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb3 +^bb3: + %ret = arith.constant 1 : i32 + return %ret : i32 +} + +// CHECK-LABEL: @cycle_4_blocks +// CHECK: cf.br ^bb1 +// CHECK: ^bb1: +// CHECK: cf.br ^bb1 +func.func @cycle_4_blocks() { + cf.br ^bb1 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb3 +^bb3: + cf.br ^bb4 +^bb4: + cf.br ^bb1 +} + +// CHECK-LABEL: @no_cycle_4_blocks +// CHECK: %[[VAL_0:.*]] = arith.constant 1 : i32 +// CHECK: return %[[VAL_0]] : i32 +func.func @no_cycle_4_blocks() -> i32 { + cf.br ^bb1 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb3 +^bb3: + cf.br ^bb4 +^bb4: + cf.br ^bb5 +^bb5: + %ret = arith.constant 1 : i32 + return %ret : i32 +} + +// CHECK-LABEL: @delayed_3_cycle +// CHECK: cf.br ^bb1 +// CHECK: ^bb1: +// CHECK: cf.br ^bb1 +func.func @delayed_3_cycle() { + cf.br ^bb1 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb3 +^bb3: + cf.br ^bb4 +^bb4: + cf.br ^bb5 +^bb5: + cf.br ^bb3 +} + +// CHECK-LABEL: @cycle_1_block +// CHECK: cf.br ^bb1 +// CHECK: ^bb1: +// CHECK: cf.br ^bb1 +func.func @cycle_1_block() { + cf.br ^bb1 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb2 +} + +// CHECK-LABEL: @unsimplified_cycle_1 +// CHECK-SAME: %[[ARG0:.*]]: i1) { +// CHECK: cf.cond_br %[[ARG0]], ^bb1, ^bb2 +// CHECK: ^bb1: +// CHECK: cf.br ^bb2 +// CHECK: ^bb2: +// CHECK: cf.br ^bb3 +// CHECK: ^bb3: +// CHECK: cf.br ^bb3 +func.func @unsimplified_cycle_1(%c : i1) { + cf.cond_br %c, ^bb1, ^bb2 +^bb1: + cf.br ^bb2 +^bb2: + cf.br ^bb3 +^bb3: + cf.br ^bb4 +^bb4: + cf.br ^bb3 +} + +// Make sure we terminate when other cf passes can't help us. + +// CHECK-LABEL: @unsimplified_cycle_2 +// CHECK-SAME: %[[ARG0:.*]]: i1) { +// CHECK: cf.cond_br %[[ARG0]], ^bb1, ^bb3 +// CHECK: ^bb1: +// CHECK: cf.br ^bb2 {A} +// CHECK: ^bb2: +// CHECK: cf.br ^bb2 {E} +// CHECK: ^bb3: +// CHECK: cf.br ^bb1 +func.func @unsimplified_cycle_2(%c : i1) { + cf.cond_br %c, ^bb6, ^bb7 +^bb6: + cf.br ^bb5 {F} +^bb5: + cf.br ^bb1 {A} +^bb1: + cf.br ^bb2 {B} +^bb2: + cf.br ^bb3 {C} +^bb3: + cf.br ^bb4 {D} +^bb4: + cf.br ^bb1 {E} +^bb7: + cf.br ^bb6 +} diff --git a/mlir/test/Dialect/GPU/mapping.mlir b/mlir/test/Dialect/GPU/mapping.mlir index 395987317a1e6..b313ab69cc001 100644 --- a/mlir/test/Dialect/GPU/mapping.mlir +++ b/mlir/test/Dialect/GPU/mapping.mlir @@ -1,4 +1,5 @@ -// RUN: mlir-opt -gpu-map-parallel-loops -split-input-file %s | FileCheck %s +// RUN: mlir-opt -gpu-map-parallel-loops -split-input-file %s | FileCheck %s --check-prefix=OUTER +// RUN: mlir-opt -gpu-map-parallel-loops="mapping-policy=innermost-first" -split-input-file %s | FileCheck %s --check-prefix=INNER func.func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index, %arg3 : index) { @@ -14,14 +15,23 @@ func.func @parallel_loop(%arg0 : index, %arg1 : index, %arg2 : index, return } -// CHECK-LABEL: func @parallel_loop( -// CHECK: scf.parallel -// CHECK: scf.parallel -// CHECK: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} -// CHECK: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} -// CHECK-NOT: mapping +// OUTER-LABEL: func @parallel_loop( +// OUTER: scf.parallel +// OUTER: scf.parallel +// OUTER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// OUTER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// OUTER-NOT: mapping + +// INNER-LABEL: func @parallel_loop( +// INNER: scf.parallel +// INNER: scf.parallel +// INNER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// INNER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// INNER-NOT: mapping // ----- @@ -42,20 +52,38 @@ func.func @parallel_loop_4d(%arg0 : index, %arg1 : index, %arg2 : index, return } -// CHECK-LABEL: func @parallel_loop_4d( -// CHECK: scf.parallel -// CHECK: scf.parallel -// CHECK: scf.parallel -// CHECK: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} -// CHECK: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} -// CHECK: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, -// CHECK-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} -// CHECK-NOT: mapping +// OUTER-LABEL: func @parallel_loop_4d( +// OUTER: scf.parallel +// OUTER: scf.parallel +// OUTER: scf.parallel +// OUTER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// OUTER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// OUTER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// OUTER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// OUTER-NOT: mapping + +// INNER-LABEL: func @parallel_loop_4d( +// INNER: scf.parallel +// INNER: scf.parallel +// INNER: scf.parallel +// INNER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// INNER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// INNER: {mapping = [#gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>, +// INNER-SAME: #gpu.loop_dim_map (d0), bound = (d0) -> (d0)>]} +// INNER-NOT: mapping diff --git a/mlir/test/Dialect/GPU/memref-to-llvm.mlir b/mlir/test/Dialect/GPU/memref-to-llvm.mlir new file mode 100644 index 0000000000000..81a96bf29e84f --- /dev/null +++ b/mlir/test/Dialect/GPU/memref-to-llvm.mlir @@ -0,0 +1,33 @@ +// RUN: mlir-opt --convert-to-llvm %s | FileCheck %s + +// Checking that malloc and free are declared in the proper module. + +// CHECK: module attributes {gpu.container_module} { +// CHECK: llvm.func @free(!llvm.ptr) +// CHECK: llvm.func @malloc(i64) -> !llvm.ptr +// CHECK: gpu.module @kernels { +// CHECK: llvm.func @free(!llvm.ptr) +// CHECK: llvm.func @malloc(i64) -> !llvm.ptr +// CHECK: gpu.func @kernel_1 +// CHECK: llvm.call @malloc({{.*}}) : (i64) -> !llvm.ptr +// CHECK: llvm.call @free({{.*}}) : (!llvm.ptr) -> () +// CHECK: gpu.return +// CHECK: } +// CHECK: } +// CHECK: } +module attributes {gpu.container_module} { + + gpu.module @kernels { + gpu.func @kernel_1() kernel { + %memref_a = memref.alloc() : memref<8x16xf32> + memref.dealloc %memref_a : memref<8x16xf32> + gpu.return + } + } + + func.func @main() { + %memref_a = memref.alloc() : memref<8x16xf32> + memref.dealloc %memref_a : memref<8x16xf32> + return + } +} diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir index e3e2474d917c8..7772e7a1681c4 100644 --- a/mlir/test/Dialect/GPU/ops.mlir +++ b/mlir/test/Dialect/GPU/ops.mlir @@ -68,6 +68,31 @@ module attributes {gpu.container_module} { return } + // CHECK-LABEL: func @launch_with_attributions( + func.func @launch_with_attributions(%blk : index, %thrd : index, %float : f32, %data : memref) { + // CHECK: gpu.launch + gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %blk, %grid_y = %blk, %grid_z = %blk) + threads(%tx, %ty, %tz) in (%block_x = %thrd, %block_y = %thrd, %block_z = %thrd) + // CHECK-SAME: workgroup(%[[WGROUP1:.*]] : memref<42xf32, 3>, %[[WGROUP2:.*]] : memref<2xf32, 3>) + workgroup(%arg1: memref<42xf32, 3>, %arg2: memref<2xf32, 3>) + // CHECK-SAME: private(%[[PRIVATE1:.*]] : memref<2xf32, 5>, %[[PRIVATE2:.*]] : memref<1xf32, 5>) + private(%arg3: memref<2xf32, 5>, %arg4: memref<1xf32, 5>) + { + "use"(%float) : (f32) -> () + "use"(%data) : (memref) -> () + // CHECK: "use"(%[[WGROUP1]], %[[WGROUP2]]) + "use"(%arg1, %arg2) : (memref<42xf32, 3>, memref<2xf32, 3>) -> () + // CHECK: "use"(%[[PRIVATE1]]) + "use"(%arg3) : (memref<2xf32, 5>) -> () + // CHECK: "use"(%[[PRIVATE2]]) + "use"(%arg4) : (memref<1xf32, 5>) -> () + // CHECK: gpu.terminator + gpu.terminator + } + return + } + + gpu.module @kernels { gpu.func @kernel_1(%arg0 : f32, %arg1 : memref) kernel { %tIdX = gpu.thread_id x @@ -228,17 +253,20 @@ module attributes {gpu.container_module} { gpu.module @gpu_funcs { // CHECK-LABEL: gpu.func @kernel_1({{.*}}: f32) - // CHECK: workgroup - // CHECK: private - // CHECK: attributes gpu.func @kernel_1(%arg0: f32) - workgroup(%arg1: memref<42xf32, 3>) - private(%arg2: memref<2xf32, 5>, %arg3: memref<1xf32, 5>) + // CHECK: workgroup(%[[WGROUP1:.*]] : memref<42xf32, 3>, %[[WGROUP2:.*]] : memref<2xf32, 3>) + workgroup(%arg1: memref<42xf32, 3>, %arg2: memref<2xf32, 3>) + // CHECK: private(%[[PRIVATE1:.*]] : memref<2xf32, 5>, %[[PRIVATE2:.*]] : memref<1xf32, 5>) + private(%arg3: memref<2xf32, 5>, %arg4: memref<1xf32, 5>) kernel - attributes {foo="bar"} { - "use"(%arg1) : (memref<42xf32, 3>) -> () - "use"(%arg2) : (memref<2xf32, 5>) -> () - "use"(%arg3) : (memref<1xf32, 5>) -> () + // CHECK: attributes {foo = "bar"} + attributes {foo = "bar"} { + // CHECK: "use"(%[[WGROUP1]], %[[WGROUP2]]) + "use"(%arg1, %arg2) : (memref<42xf32, 3>, memref<2xf32, 3>) -> () + // CHECK: "use"(%[[PRIVATE1]]) + "use"(%arg3) : (memref<2xf32, 5>) -> () + // CHECK: "use"(%[[PRIVATE2]]) + "use"(%arg4) : (memref<1xf32, 5>) -> () gpu.return } diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir index 1adecf264e8f6..627abd0665d8c 100644 --- a/mlir/test/Dialect/LLVMIR/invalid.mlir +++ b/mlir/test/Dialect/LLVMIR/invalid.mlir @@ -2014,3 +2014,24 @@ llvm.mlir.alias external @alias_resolver : !llvm.ptr { } // expected-error@+1 {{'llvm.mlir.ifunc' op must have a function resolver}} llvm.mlir.ifunc external @foo : !llvm.func, !llvm.ptr @alias_resolver {dso_local} + +// ----- + +llvm.func @invalid_sincos_nonhomogeneous_return_type(%f: f32) -> () { + // expected-error@+1 {{op expected result type to be an homogeneous struct with two elements matching the operand type}} + llvm.intr.sincos(%f) : (f32) -> !llvm.struct<(f32, f64)> +} + +// ----- + +llvm.func @invalid_sincos_non_struct_return_type(%f: f32) -> () { + // expected-error@+1 {{op expected result type to be an homogeneous struct with two elements matching the operand type}} + llvm.intr.sincos(%f) : (f32) -> f32 +} + +// ----- + +llvm.func @invalid_sincos_gt_2_element_struct_return_type(%f: f32) -> () { + // expected-error@+1 {{op expected result type to be an homogeneous struct with two elements matching the operand type}} + llvm.intr.sincos(%f) : (f32) -> !llvm.struct<(f32, f32, f32)> +} diff --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir index 959bb35302b20..6134695e9ced6 100644 --- a/mlir/test/Dialect/LLVMIR/rocdl.mlir +++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir @@ -29,6 +29,20 @@ func.func @rocdl_special_regs() -> i32 { llvm.return %0 : i32 } +func.func @rocdl.fmed3.scalar(%a: f32, %b: f32, %c: f32) -> f32 { + // CHECK-LABEL: rocdl.fmed3.scalar + // CHECK: %0 = rocdl.fmed3 %arg0, %arg1, %arg2 : f32 + %0 = rocdl.fmed3 %a, %b, %c : f32 + llvm.return %0 : f32 +} + +func.func @rocdl.fmed3.vector(%a: vector<4xf16>, %b: vector<4xf16>, %c: vector<4xf16>) -> vector<4xf16> { + // CHECK-LABEL: rocdl.fmed3.vector + // CHECK: %0 = rocdl.fmed3 %arg0, %arg1, %arg2 : vector<4xf16> + %0 = rocdl.fmed3 %a, %b, %c : vector<4xf16> + llvm.return %0 : vector<4xf16> +} + func.func @rocdl.barrier() { // CHECK: rocdl.barrier rocdl.barrier @@ -652,7 +666,7 @@ llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) { llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr, %stride : i16, - %numRecords : i32, + %numRecords : i64, %flags : i32) -> !llvm.ptr<8> { // CHECK-LABEL: rocdl.make.buffer.rsrc // CHECK: %{{.*}} = rocdl.make.buffer.rsrc %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.ptr to <8> @@ -1054,6 +1068,38 @@ llvm.func @rocdl.cvt.scale.pk8(%i32: i32, %v2xi32: vector<2xi32>, %scale: i32) { // ----- +// CHECK-LABEL: rocdl.cvt.scalef32.pk8 +llvm.func @rocdl.cvt.scalef32.pk8(%v8xf32: vector<8xf32>, + %v8xf16: vector<8xf16>, + %v8xbf16: vector<8xbf16>, + %scale: f32) { + + // CHECK: rocdl.cvt.scalef32.pk8.fp8.f32 + %0 = rocdl.cvt.scalef32.pk8.fp8.f32 %v8xf32, %scale : vector<2xi32> + // CHECK: rocdl.cvt.scalef32.pk8.bf8.f32 + %1 = rocdl.cvt.scalef32.pk8.bf8.f32 %v8xf32, %scale : vector<2xi32> + // CHECK: rocdl.cvt.scalef32.pk8.fp4.f32 + %2 = rocdl.cvt.scalef32.pk8.fp4.f32 %v8xf32, %scale : i32 + + // CHECK: rocdl.cvt.scalef32.pk8.fp8.f16 + %3 = rocdl.cvt.scalef32.pk8.fp8.f16 %v8xf16, %scale : vector<2xi32> + // CHECK: rocdl.cvt.scalef32.pk8.bf8.f16 + %4 = rocdl.cvt.scalef32.pk8.bf8.f16 %v8xf16, %scale : vector<2xi32> + // CHECK: rocdl.cvt.scalef32.pk8.fp4.f16 + %5 = rocdl.cvt.scalef32.pk8.fp4.f16 %v8xf16, %scale : i32 + + // CHECK: rocdl.cvt.scalef32.pk8.fp8.bf16 + %6 = rocdl.cvt.scalef32.pk8.fp8.bf16 %v8xbf16, %scale : vector<2xi32> + // CHECK: rocdl.cvt.scalef32.pk8.bf8.bf16 + %7 = rocdl.cvt.scalef32.pk8.bf8.bf16 %v8xbf16, %scale : vector<2xi32> + // CHECK: rocdl.cvt.scalef32.pk8.fp4.bf16 + %8 = rocdl.cvt.scalef32.pk8.fp4.bf16 %v8xbf16, %scale : i32 + + llvm.return +} + +// ----- + // CHECK-LABEL: rocdl.cvt.scale.pk16 llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) { diff --git a/mlir/test/Dialect/LLVMIR/xevm.mlir b/mlir/test/Dialect/LLVMIR/xevm.mlir index bb1f650a1cd12..66fb2949a270f 100644 --- a/mlir/test/Dialect/LLVMIR/xevm.mlir +++ b/mlir/test/Dialect/LLVMIR/xevm.mlir @@ -116,3 +116,39 @@ func.func @prefetch(%ptr: !llvm.ptr<1>) { // CHECK-LABEL: @xevm_module [#xevm.target] { gpu.module @xevm_module [#xevm.target]{ } + +// ----- +// CHECK-LABEL: @xevm_special_ids +llvm.func @xevm_special_ids() -> i32 { + // CHECK: xevm.local_id.x : i32 + %1 = xevm.local_id.x : i32 + // CHECK: xevm.local_id.y : i32 + %2 = xevm.local_id.y : i32 + // CHECK: xevm.local_id.z : i32 + %3 = xevm.local_id.z : i32 + // CHECK: xevm.local_size.x : i32 + %4 = xevm.local_size.x : i32 + // CHECK: xevm.local_size.y : i32 + %5 = xevm.local_size.y : i32 + // CHECK: xevm.local_size.z : i32 + %6 = xevm.local_size.z : i32 + // CHECK: xevm.group_id.x : i32 + %7 = xevm.group_id.x : i32 + // CHECK: xevm.group_id.y : i32 + %8 = xevm.group_id.y : i32 + // CHECK: xevm.group_id.z : i32 + %9 = xevm.group_id.z : i32 + // CHECK: xevm.group_count.x : i32 + %10 = xevm.group_count.x : i32 + // CHECK: xevm.group_count.y : i32 + %11 = xevm.group_count.y : i32 + // CHECK: xevm.group_count.z : i32 + %12 = xevm.group_count.z : i32 + // CHECK: xevm.lane_id : i32 + %14 = xevm.lane_id : i32 + // CHECK: xevm.subgroup_size : i32 + %39 = xevm.subgroup_size : i32 + // CHECK: xevm.subgroup_id : i32 + %40 = xevm.subgroup_id : i32 + llvm.return %1 : i32 +} diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir index 5c5f7e861d37d..26d2d98572f47 100644 --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -1756,10 +1756,11 @@ func.func @pack_unpack(%t: tensor<16x16x?x?xf32>, %tile1: index, %tile2: index) // CHECK-SAME: %[[T:.+]]: tensor<16x16x8x8xf32> // CHECK: return %[[T]] : tensor<16x16x8x8xf32> func.func @pack_unpack(%t: tensor<16x16x8x8xf32>) -> tensor<16x16x8x8xf32> { + %cst = arith.constant 0.000000e+00 : f32 %tensor_empty = tensor.empty() : tensor<128x128xf32> %unpacked = linalg.unpack %t inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty : tensor<16x16x8x8xf32> -> tensor<128x128xf32> %tensor_empty1 = tensor.empty() : tensor<16x16x8x8xf32> - %packed = linalg.pack %unpacked inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty1 : tensor<128x128xf32> -> tensor<16x16x8x8xf32> + %packed = linalg.pack %unpacked padding_value(%cst : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 8] into %tensor_empty1 : tensor<128x128xf32> -> tensor<16x16x8x8xf32> return %packed : tensor<16x16x8x8xf32> } diff --git a/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir b/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir index 8627fcd2576b9..152a392afe247 100644 --- a/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir +++ b/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir @@ -26,6 +26,26 @@ module attributes {transform.with_named_sequence} { // ----- +// Memref semantics is not supported. +// Check that we emit an error. +func.func @negative_conv_memref(%arg0: memref<1x16x16x4xf32>, %arg1: memref<16x3x3x4xf32>, %arg2: memref<1x14x14x16xf32>) { + // expected-note@below {{when applied to this op}} + linalg.conv_2d_nhwc_fhwc {dilations = dense<1> : memref<2xi64>, strides = dense<1> : memref<2xi64> } + ins(%arg0, %arg1: memref<1x16x16x4xf32>, memref<16x3x3x4xf32>) outs(%arg2: memref<1x14x14x16xf32>) + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_fhwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // expected-error@below {{failed to apply}} + %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield + } +} + +// ----- + // Check that we get the proper handles for the img2col tensor producer // and the final instruction. @@ -267,6 +287,31 @@ module attributes {transform.with_named_sequence} { // ----- +// Check that the encoding on the filter (weights) tensor is propagated when applying the transform. + +// CHECK: func.func @batch_nchw_conv_with_filter_encoding(%[[INPUT:.+]]: tensor<8x4x16x16xf32>, %[[FILTER:.*]]: tensor<16x4x3x3xf32, 42 : i32>, %[[OUTPUT:.*]]: tensor<8x16x14x14xf32>) +// CHECK-DAG: %[[COLLAPSED_FILTER:.+]] = tensor.collapse_shape %[[FILTER]] + // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]] : tensor<16x4x3x3xf32, 42 : i32> into tensor<16x36xf32, 42 : i32> +// CHECK: %[[COL_TENSOR:.+]] = linalg.generic {{.*}} ins(%[[INPUT]] : tensor<8x4x16x16xf32>) +// CHECK: %[[MATMUL_RESULT:.+]] = linalg.generic {{.*}} ins(%[[COLLAPSED_FILTER]], %[[COL_TENSOR]] : tensor<16x36xf32, 42 : i32>, tensor<8x36x196xf32>) +func.func @batch_nchw_conv_with_filter_encoding(%arg0: tensor<8x4x16x16xf32>, %arg1: tensor<16x4x3x3xf32, 42 : i32>, %arg2: tensor<8x16x14x14xf32>) -> tensor<8x16x14x14xf32> { + %0 = linalg.conv_2d_nchw_fchw + {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } + ins(%arg0, %arg1: tensor<8x4x16x16xf32>, tensor<16x4x3x3xf32, 42 : i32>) + outs(%arg2: tensor<8x16x14x14xf32>) -> tensor<8x16x14x14xf32> + return %0 : tensor<8x16x14x14xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield + } +} + +// ----- + // CHECK: IR printer: tensor_producer // CHECK-NEXT: %[[COL_TENSOR:.+]] = linalg.generic // CHECK-SAME: affine_map<(d0, d1, d2) -> (d0, d1 floordiv 14 + d2 floordiv 12, d1 mod 14 + (d2 mod 12) floordiv 4, d2 mod 4)> @@ -290,7 +335,7 @@ module attributes {transform.with_named_sequence} { // CHECK-DAG: %[[COLLAPSED_OUT:.+]] = tensor.collapse_shape %[[OUTPUT]] {{\[}}[0], [1, 2], [3]] : tensor<1x14x14x16xf32> into tensor<1x196x16xf32> // CHECK: %[[INIT_COL_TENSOR:.+]] = tensor.empty() : tensor<1x196x36xf32> // CHECK: %[[COL_TENSOR:.+]] = linalg.generic -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [#[[MAP0]], #[[MAP1]]], {{.*}} ins(%[[INPUT]] : tensor<1x16x16x4xf32>) outs(%[[INIT_COL_TENSOR]] : tensor<1x196x36xf32>) // CHECK: ^bb0(%[[OUT_DATA:.+]]: f32) // CHECK: linalg.yield %{{.+}} : f32 // CHECK: %[[MATMUL_RESULT:.+]] = linalg.generic @@ -327,6 +372,31 @@ module attributes {transform.with_named_sequence} { // ----- +// Check that the encoding on the filter (weights) tensor is propagated when applying the transform. + +// CHECK: func.func @conv_2d_nhwc_fhwc_with_filter_encoding(%[[INPUT:.+]]: tensor<1x16x16x4xf32>, %[[FILTER:.*]]: tensor<16x3x3x4xf32, 42 : i32>, %[[OUTPUT:.*]]: tensor<1x14x14x16xf32>) +// CHECK-DAG: %[[COLLAPSED_FILTER:.+]] = tensor.collapse_shape %[[FILTER]] + // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]] : tensor<16x3x3x4xf32, 42 : i32> into tensor<16x36xf32, 42 : i32> +// CHECK: %[[COL_TENSOR:.+]] = linalg.generic {{.*}} ins(%[[INPUT]] : tensor<1x16x16x4xf32>) +// CHECK: %[[MATMUL_RESULT:.+]] = linalg.generic {{.*}} ins(%[[COL_TENSOR]], %[[COLLAPSED_FILTER]] : tensor<1x196x36xf32>, tensor<16x36xf32, 42 : i32>) +func.func @conv_2d_nhwc_fhwc_with_filter_encoding(%input: tensor<1x16x16x4xf32>, %filter: tensor<16x3x3x4xf32, 42 : i32>, %out: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> { + %0 = linalg.conv_2d_nhwc_fhwc + { dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } + ins(%input, %filter: tensor<1x16x16x4xf32>, tensor<16x3x3x4xf32, 42 : i32>) + outs(%out: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> + return %0 : tensor<1x14x14x16xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_fhwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield + } +} + +// ----- + // Check for signed extend when the input type is smaller than the accumulator type. // CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> diff --git a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir index a5f8d63a3e912..7a16bc0a4faee 100644 --- a/mlir/test/Dialect/Linalg/data-layout-propagation.mlir +++ b/mlir/test/Dialect/Linalg/data-layout-propagation.mlir @@ -1450,6 +1450,33 @@ func.func @push_unpack_in_padded_domain_out_used(%arg0: tensor<8x8x4x8xf32>, %ar // ----- +#map = affine_map<(d0, d1) -> (d0, d1)> +func.func @push_unpack_in_padded_domain_multiple_inputs(%arg0: tensor<1x4x16x16xf32>, %arg1: tensor<8x64xf32>, %arg2: tensor<8x64xf32>) -> tensor<8x64xf32> { + %0 = tensor.empty() : tensor<8x64xf32> + %unpack = linalg.unpack %arg0 inner_dims_pos = [0, 1] inner_tiles = [16, 16] into %0 : tensor<1x4x16x16xf32> -> tensor<8x64xf32> + %1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg1, %unpack : tensor<8x64xf32>, tensor<8x64xf32>) outs(%arg2 : tensor<8x64xf32>) { + ^bb0(%in: f32, %in_0: f32, %out: f32): + %2 = arith.addf %in, %in_0 : f32 + linalg.yield %2 : f32 + } -> tensor<8x64xf32> + return %1 : tensor<8x64xf32> +} +// CHECK-LABEL: func.func @push_unpack_in_padded_domain_multiple_inputs +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]] +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]] +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]] +// CHECK-DAG: %[[POISON:.+]] = ub.poison : f32 +// CHECK: %[[PACK:.+]] = linalg.pack %[[ARG1]] padding_value(%[[POISON]] : f32) +// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [16, 16] +// CHECK: %[[ELEM:.+]] = linalg.generic +// CHECK: ins(%[[PACK]], %[[ARG0]] +// CHECK: %[[UNPACK:.+]] = linalg.unpack %[[ELEM]] +// CHECK-SAME: inner_dims_pos = [0, 1] inner_tiles = [16, 16] +// CHECK-SAME: into %[[ARG2]] +// CHECK: return %[[UNPACK]] + +// ----- + module { func.func @push_extract_through_generic(%arg0: tensor<128x7x128xf32>, %arg1: tensor, %arg2: tensor, %arg3: index) -> tensor { %extracted_slice = tensor.extract_slice %arg0[0, 0, %arg3] [128, 7, %arg3] [1, 1, 1] : tensor<128x7x128xf32> to tensor<128x7x?xf32> @@ -1473,7 +1500,7 @@ module { // CHECK: } : tensor to tensor // CHECK: %[[EMPTY:.+]] = tensor.empty() : tensor<128x5x128xbf16> // CHECK: %[[GENERIC:.+]] = linalg.generic -// CHECK-SAME: ins(%[[ARG0]], %[[PADDED]] +// CHECK-SAME: ins(%[[ARG0]], %[[PADDED]] // CHECK-SAME: outs(%[[EMPTY]] // CHECK: %[[EXTRACT:.+]] = tensor.extract_slice %3[%[[ARG3]], 0, 0] [%[[ARG3]], 5, 128] [1, 1, 1] : tensor<128x5x128xbf16> to tensor // CHECK: return %[[EXTRACT]] @@ -1492,7 +1519,7 @@ func.func @nopush_extract_through_generic_nodimexpr1(%arg0: tensor<128x7x128xf32 // CHECK-LABEL: func.func @nopush_extract_through_generic_nodimexpr1 // CHECK: %[[GENERIC:.+]] = linalg.generic -// CHECK: return %[[GENERIC]] +// CHECK: return %[[GENERIC]] // ----- @@ -1508,7 +1535,7 @@ func.func @nopush_extract_through_generic_nodimexpr2(%arg0: tensor<128x?x128xf32 // CHECK-LABEL: func.func @nopush_extract_through_generic_nodimexpr2 // CHECK: %[[GENERIC:.+]] = linalg.generic -// CHECK: return %[[GENERIC]] +// CHECK: return %[[GENERIC]] // ----- @@ -1575,7 +1602,7 @@ func.func @push_extract_through_generic_rank0_operand(%arg0: tensor<128x128xf32> // CHECK-LABEL: func.func @push_extract_through_generic_rank0_operand // CHECK: %[[GENERIC:.+]] = linalg.generic -// CHECK: %[[EXTRACT:.+]] = tensor.extract_slice %[[GENERIC]] +// CHECK: %[[EXTRACT:.+]] = tensor.extract_slice %[[GENERIC]] // CHECK: return %[[EXTRACT]] // ----- diff --git a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir index c09046b08e898..35f520a9f22a8 100644 --- a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir +++ b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir @@ -339,8 +339,8 @@ module attributes {transform.with_named_sequence} { // CHECK-LABEL: func.func @test_vectorize_pack( // CHECK-SAME: %[[VAL_0:.*]]: tensor<32x8x16xf32>, // CHECK-SAME: %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> { -// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_2:.*]] = ub.poison : f32 +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32> // CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32> // CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32> diff --git a/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir b/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir index aa86678ba405f..62bf1f55c9af2 100644 --- a/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir +++ b/mlir/test/Dialect/Linalg/vectorization/linalg-ops.mlir @@ -1068,16 +1068,16 @@ module attributes {transform.with_named_sequence} { // CHECK-SAME: %[[DEST:.*]]: tensor, // CHECK-SAME: %[[SRC:.*]]: tensor func.func @test_vectorize_dynamic_shapes_unpack_scalable_vec(%dest: tensor, %src: tensor) -> tensor { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 - // CHECK: %[[C01:.*]] = arith.constant 0 - // CHECK: %[[C02:.*]] = arith.constant 0 + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C01:.*]] = arith.constant 0 + // CHECK-DAG: %[[C02:.*]] = arith.constant 0 // CHECK: %[[DIM4:.*]] = tensor.dim %[[SRC]], %[[C02]] : tensor // CHECK: %[[CNST14:.*]] = arith.constant 1 // CHECK: %[[DIM6:.*]] = tensor.dim %[[SRC]], %[[CNST14]] : tensor // CHECK: %[[CNST16:.*]] = arith.constant 16 : index // CHECK: %[[CNST2:.*]] = arith.constant 2 : index // CHECK: %[[MASK_READ:.*]] = vector.create_mask %[[DIM4]], %[[DIM6]], %[[CNST16]], %[[CNST2]] : vector<2x1x[16]x2xi1> - // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} : tensor, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32> + // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} %[[PAD]] {{.*}}: tensor, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32> // CHECK: %[[TR:.*]] = vector.transpose %[[READ]], [0, 3, 1, 2] : vector<2x1x[16]x2xf32> to vector<2x2x1x[16]xf32> // CHECK: %[[SC:.*]] = vector.shape_cast %[[TR]] : vector<2x2x1x[16]xf32> to vector<4x[16]xf32> // CHECK: %[[MASK_WRITE:.*]] = vector.create_mask {{.*}} : vector<4x[16]xi1> @@ -1100,9 +1100,9 @@ module attributes {transform.with_named_sequence} { // CHECK-SAME: %[[DEST:.*]]: tensor, // CHECK-SAME: %[[SRC:.*]]: tensor func.func @test_vectorize_dynamic_shapes_unpack_scalable_vec_and_tile_size(%dest: tensor, %src: tensor) -> tensor { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 - // CHECK: %[[C01:.*]] = arith.constant 0 - // CHECK: %[[C02:.*]] = arith.constant 0 + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C01:.*]] = arith.constant 0 + // CHECK-DAG: %[[C02:.*]] = arith.constant 0 // CHECK: %[[DIM4:.*]] = tensor.dim %[[SRC]], %[[C02]] : tensor // CHECK: %[[C1_2:.*]] = arith.constant 1 // CHECK: %[[DIM6:.*]] = tensor.dim %[[SRC]], %[[C1_2]] : tensor @@ -1110,7 +1110,7 @@ func.func @test_vectorize_dynamic_shapes_unpack_scalable_vec_and_tile_size(%dest // CHECK: %[[DIM_2:.*]] = tensor.dim %[[SRC]], %[[C2]] : tensor // CHECK: %[[C2_1:.*]] = arith.constant 2 : index // CHECK: %[[MASK_READ:.*]] = vector.create_mask %[[DIM4]], %[[DIM6]], %[[DIM_2]], %[[C2_1]] : vector<2x1x[16]x2xi1> - // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} : tensor, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32> + // CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] {{.*}} vector.transfer_read %{{.*}} %[[PAD]] {{.*}}: tensor, vector<2x1x[16]x2xf32> } : vector<2x1x[16]x2xi1> -> vector<2x1x[16]x2xf32> // CHECK: %[[TR:.*]] = vector.transpose %[[READ]], [0, 3, 1, 2] : vector<2x1x[16]x2xf32> to vector<2x2x1x[16]xf32> // CHECK: %[[SC:.*]] = vector.shape_cast %[[TR]] : vector<2x2x1x[16]xf32> to vector<4x[16]xf32> // CHECK: %[[MASK_WRITE:.*]] = vector.create_mask {{.*}} : vector<4x[16]xi1> @@ -1138,14 +1138,14 @@ module attributes {transform.with_named_sequence} { // CHECK-SAME: %[[SRC:.*]]: tensor<8x8x32x16xf32> // CHECK-SAME: %[[DEST:.*]]: tensor<256x128xf32> func.func @test_vectorize_unpack(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[C0:.*]]= arith.constant 0 : index - // CHECK: %[[C8:.*]] = arith.constant 8 : index - // CHECK: %[[C80:.*]] = arith.constant 8 : index - // CHECK: %[[C32:.*]] = arith.constant 32 : index - // CHECK: %[[C16:.*]] = arith.constant 16 : index + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C0:.*]]= arith.constant 0 : index + // CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index + // CHECK-DAG: %[[C80:.*]] = arith.constant 8 : index + // CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index + // CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index // CHECK: %[[MSK0:.*]] = vector.create_mask %[[C8]], %[[C80]], %[[C32]], %[[C16]] : vector<16x8x32x16xi1> - // CHECK: %[[READ0:.*]] = vector.mask %[[MSK0]] { vector.transfer_read %[[SRC]]{{.*}}} : vector<16x8x32x16xi1> -> vector<16x8x32x16xf32> + // CHECK: %[[READ0:.*]] = vector.mask %[[MSK0]] { vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : vector<16x8x32x16xi1> -> vector<16x8x32x16xf32> // CHECK: %[[TRANSP0:.*]] = vector.transpose %[[READ0]], [0, 2, 1, 3] : vector<16x8x32x16xf32> to vector<16x32x8x16xf32> // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP0]] : vector<16x32x8x16xf32> to vector<512x128xf32> // CHECK: %[[C01:.*]] = arith.constant 0 : index @@ -1171,9 +1171,9 @@ func.func @test_vectorize_unpack(%source: tensor<8x8x32x16xf32>, %dest: tensor<2 // CHECK-SAME: %[[SRC:.*]]: tensor<8x8x32x16xf32> // CHECK-SAME: %[[DEST:.*]]: tensor<256x128xf32> func.func @test_vectorize_unpack_no_masks(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[C0:.*]] = arith.constant 0 : index - // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-AD: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [0, 2, 1, 3] : vector<8x8x32x16xf32> to vector<8x32x8x16xf32> // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<8x32x8x16xf32> to vector<256x128xf32> // CHECK: %[[C00:.*]] = arith.constant 0 : index @@ -1196,9 +1196,9 @@ func.func @test_vectorize_unpack_no_masks(%source: tensor<8x8x32x16xf32>, %dest: // CHECK-SAME: %[[SRC:.*]]: tensor<8x8x32x16xf32> // CHECK-SAME: %[[DEST:.*]]: tensor<256x128xf32> func.func @test_vectorize_unpack_with_outer_perm(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[C0:.*]] = arith.constant 0 : index - // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [1, 2, 0, 3] : vector<8x8x32x16xf32> to vector<8x32x8x16xf32> // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<8x32x8x16xf32> to vector<256x128xf32> // CHECK: %[[C00:.*]] = arith.constant 0 : index @@ -1221,9 +1221,9 @@ func.func @test_vectorize_unpack_no_masks(%source: tensor<8x8x32x16xf32>, %dest: // CHECK-SAME: %[[SRC:.*]]: tensor<8x8x32x16xf32> // CHECK-SAME: %[[DEST:.*]]: tensor<256x128xf32> func.func @test_vectorize_unpack_no_vector_sizes(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>) -> tensor<256x128xf32> { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[C0:.*]] = arith.constant 0 : index - // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x8x32x16xf32>, vector<8x8x32x16xf32> // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [0, 2, 1, 3] : vector<8x8x32x16xf32> to vector<8x32x8x16xf32> // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<8x32x8x16xf32> to vector<256x128xf32> // CHECK: %[[C00:.*]] = arith.constant 0 : index @@ -1246,9 +1246,9 @@ func.func @test_vectorize_unpack_no_vector_sizes(%source: tensor<8x8x32x16xf32>, // CHECK-SAME: %[[SRC:.*]]: tensor<8x4x16x16xf32> // CHECK-SAME: %[[DEST:.*]]: tensor<64x127xf32> func.func @test_vectorize_unpack_no_vector_sizes_slice_output(%source: tensor<8x4x16x16xf32>, %dest: tensor<64x127xf32>) -> tensor<64x127xf32> { - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[C0:.*]] = arith.constant 0 : index - // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<8x4x16x16xf32>, vector<8x4x16x16xf32> + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<8x4x16x16xf32>, vector<8x4x16x16xf32> // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [1, 2, 0, 3] : vector<8x4x16x16xf32> to vector<4x16x8x16xf32> // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<4x16x8x16xf32> to vector<64x128xf32> // CHECK: %[[C00:.*]] = arith.constant 0 : index @@ -1275,9 +1275,9 @@ func.func @test_vectorize_unpack_no_vector_sizes_permute(%source: tensor<4x7x4xf %0 = linalg.unpack %source outer_dims_perm=[1, 0] inner_dims_pos = [1] inner_tiles = [4] into %dest : tensor<4x7x4xf32> -> tensor<7x16xf32> return %0 : tensor<7x16xf32> } - // CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK: %[[C0:.*]] = arith.constant 0 : index - // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}}} : tensor<4x7x4xf32>, vector<4x7x4xf32> + // CHECK-DAG: %[[PAD:.*]] = ub.poison : f32 + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[READ:.*]] = vector.transfer_read %[[SRC]]{{.*}} %[[PAD]] {{.*}} : tensor<4x7x4xf32>, vector<4x7x4xf32> // CHECK: %[[TRANSP:.*]] = vector.transpose %[[READ]], [1, 0, 2] : vector<4x7x4xf32> to vector<7x4x4xf32> // CHECK: %[[SHAPC:.*]] = vector.shape_cast %[[TRANSP]] : vector<7x4x4xf32> to vector<7x16xf32> // CHECK: %[[C00:.*]] = arith.constant 0 : index @@ -1308,7 +1308,7 @@ func.func @test_vectorize_pack(%src: tensor<32x8x16xf32>, %dest: tensor<4x1x32x1 %pack = linalg.pack %src outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %dest : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32> return %pack : tensor<4x1x32x16x2xf32> } -// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK-DAG: %[[CST:.*]] = ub.poison : f32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[READ:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]], %[[C0]]], %[[CST]] // CHECK-SAME: {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32> @@ -1376,7 +1376,7 @@ func.func @test_vectorize_dynamic_pack(%src: tensor, %dest: tensor } -// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK-DAG: %[[CST:.*]] = ub.poison : f32 // CHECK-DAG: %[[C0_1:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C0_0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1_0:.*]] = arith.constant 1 : index @@ -1417,7 +1417,7 @@ func.func @test_vectorize_pack_no_vector_sizes(%src: tensor<64x4xf32>, %dest: te %pack = linalg.pack %src outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [16, 2] into %dest : tensor<64x4xf32> -> tensor<2x4x16x2xf32> return %pack : tensor<2x4x16x2xf32> } -// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK-DAG: %[[CST:.*]] = ub.poison : f32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[READ:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CST]] // CHECK-SAME: {in_bounds = [true, true]} : tensor<64x4xf32>, vector<64x4xf32> diff --git a/mlir/test/Dialect/Math/ops.mlir b/mlir/test/Dialect/Math/ops.mlir index cb10fc4397ffc..f085d1c62ea86 100644 --- a/mlir/test/Dialect/Math/ops.mlir +++ b/mlir/test/Dialect/Math/ops.mlir @@ -62,6 +62,18 @@ func.func @sin(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) { return } +// CHECK-LABEL: func @sincos( +// CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>) +func.func @sincos(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) { + // CHECK: %{{.*}} = math.sincos %[[F]] : f32 + %0:2 = math.sincos %f : f32 + // CHECK: %{{.*}} = math.sincos %[[V]] : vector<4xf32> + %1:2 = math.sincos %v : vector<4xf32> + // CHECK: %{{.*}} = math.sincos %[[T]] : tensor<4x4x?xf32> + %2:2 = math.sincos %t : tensor<4x4x?xf32> + return +} + // CHECK-LABEL: func @erf( // CHECK-SAME: %[[F:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[T:.*]]: tensor<4x4x?xf32>) func.func @erf(%f: f32, %v: vector<4xf32>, %t: tensor<4x4x?xf32>) { diff --git a/mlir/test/Dialect/Math/sincos-fusion.mlir b/mlir/test/Dialect/Math/sincos-fusion.mlir new file mode 100644 index 0000000000000..29fb9f12475b8 --- /dev/null +++ b/mlir/test/Dialect/Math/sincos-fusion.mlir @@ -0,0 +1,86 @@ +// RUN: mlir-opt -math-sincos-fusion %s | FileCheck %s + +// CHECK-LABEL: func.func @sincos_fusion( +// CHECK-SAME: %[[ARG0:.*]]: f32, +// CHECK-SAME: %[[ARG1:.*]]: f32) -> (f32, f32, f32, f32) { +// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] : f32 +// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = math.sincos %[[ARG1]] : f32 +// CHECK: return %[[VAL_0]], %[[VAL_1]], %[[VAL_3]], %[[VAL_2]] : f32, f32, f32, f32 +// CHECK: } +func.func @sincos_fusion(%arg0 : f32, %arg1 : f32) -> (f32, f32, f32, f32) { + %0 = math.sin %arg0 : f32 + %1 = math.cos %arg0 : f32 + + %2 = math.cos %arg1 : f32 + %3 = math.sin %arg1 : f32 + + func.return %0, %1, %2, %3 : f32, f32, f32, f32 +} + +func.func private @sink(%arg0 : f32) + +// CHECK: func.func private @sink(f32) +// CHECK-LABEL: func.func @sincos_ensure_ssa_dominance( +// CHECK-SAME: %[[ARG0:.*]]: f32, +// CHECK-SAME: %[[ARG1:.*]]: f32) -> (f32, f32, f32, f32) { +// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] : f32 +// CHECK: call @sink(%[[VAL_0]]) : (f32) -> () +// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = math.sincos %[[ARG1]] : f32 +// CHECK: call @sink(%[[VAL_3]]) : (f32) -> () +// CHECK: return %[[VAL_0]], %[[VAL_1]], %[[VAL_3]], %[[VAL_2]] : f32, f32, f32, f32 +// CHECK: } +func.func @sincos_ensure_ssa_dominance(%arg0 : f32, %arg1 : f32) -> (f32, f32, f32, f32) { + %0 = math.sin %arg0 : f32 + func.call @sink(%0) : (f32) -> () + %1 = math.cos %arg0 : f32 + %2 = math.cos %arg1 : f32 + func.call @sink(%2) : (f32) -> () + %3 = math.sin %arg1 : f32 + func.return %0, %1, %2, %3 : f32, f32, f32, f32 +} + +// CHECK-LABEL: func.func @sincos_fusion_no_match_fmf( +// CHECK-SAME: %[[ARG0:.*]]: f32) -> (f32, f32) { +// CHECK: %[[VAL_0:.*]] = math.sin %[[ARG0]] fastmath : f32 +// CHECK: %[[VAL_1:.*]] = math.cos %[[ARG0]] : f32 +// CHECK: return %[[VAL_0]], %[[VAL_1]] : f32, f32 +// CHECK: } +func.func @sincos_fusion_no_match_fmf(%arg0 : f32) -> (f32, f32) { + %0 = math.sin %arg0 fastmath : f32 + %1 = math.cos %arg0 : f32 + func.return %0, %1 : f32, f32 +} + +// CHECK-LABEL: func.func @sincos_no_fusion_different_block( +// CHECK-SAME: %[[ARG0:.*]]: f32, +// CHECK-SAME: %[[ARG1:.*]]: i1) -> f32 { +// CHECK: %[[VAL_0:.*]] = scf.if %[[ARG1]] -> (f32) { +// CHECK: %[[VAL_1:.*]] = math.sin %[[ARG0]] : f32 +// CHECK: scf.yield %[[VAL_1]] : f32 +// CHECK: } else { +// CHECK: %[[VAL_2:.*]] = math.cos %[[ARG0]] : f32 +// CHECK: scf.yield %[[VAL_2]] : f32 +// CHECK: } +// CHECK: return %[[VAL_0]] : f32 +// CHECK: } +func.func @sincos_no_fusion_different_block(%arg0 : f32, %flag : i1) -> f32 { + %0 = scf.if %flag -> f32 { + %s = math.sin %arg0 : f32 + scf.yield %s : f32 + } else { + %c = math.cos %arg0 : f32 + scf.yield %c : f32 + } + func.return %0 : f32 +} + +// CHECK-LABEL: func.func @sincos_fusion_preserve_fastmath( +// CHECK-SAME: %[[ARG0:.*]]: f32) -> (f32, f32) { +// CHECK: %[[VAL_0:.*]], %[[VAL_1:.*]] = math.sincos %[[ARG0]] fastmath : f32 +// CHECK: return %[[VAL_0]], %[[VAL_1]] : f32, f32 +// CHECK: } +func.func @sincos_fusion_preserve_fastmath(%arg0 : f32) -> (f32, f32) { + %0 = math.sin %arg0 fastmath : f32 + %1 = math.cos %arg0 fastmath : f32 + func.return %0, %1 : f32, f32 +} diff --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir index 3f96d907632b7..5ff292058ccc1 100644 --- a/mlir/test/Dialect/MemRef/invalid.mlir +++ b/mlir/test/Dialect/MemRef/invalid.mlir @@ -1169,3 +1169,19 @@ func.func @expand_shape_invalid_output_shape( into memref<2x15x20xf32, strided<[60000, 4000, 2], offset: 100>> return } + +// ----- + +func.func @distinct_objects_types_mismatch(%arg0: memref, %arg1: memref) -> (memref, memref) { + // expected-error @+1 {{operand types and result types must match}} + %0, %1 = "memref.distinct_objects"(%arg0, %arg1) : (memref, memref) -> (memref, memref) + return %0, %1 : memref, memref +} + +// ----- + +func.func @distinct_objects_0_operands() { + // expected-error @+1 {{expected at least one operand}} + "memref.distinct_objects"() : () -> () + return +} diff --git a/mlir/test/Dialect/MemRef/ops.mlir b/mlir/test/Dialect/MemRef/ops.mlir index 6c2298a3f8acb..a90c9505a8405 100644 --- a/mlir/test/Dialect/MemRef/ops.mlir +++ b/mlir/test/Dialect/MemRef/ops.mlir @@ -302,6 +302,15 @@ func.func @assume_alignment(%0: memref<4x4xf16>) { return } +// CHECK-LABEL: func @distinct_objects +// CHECK-SAME: (%[[ARG0:.*]]: memref, %[[ARG1:.*]]: memref, %[[ARG2:.*]]: memref) +func.func @distinct_objects(%arg0: memref, %arg1: memref, %arg2: memref) -> (memref, memref, memref) { + // CHECK: %[[RES:.*]]:3 = memref.distinct_objects %[[ARG0]], %[[ARG1]], %[[ARG2]] : memref, memref, memref + %1, %2, %3 = memref.distinct_objects %arg0, %arg1, %arg2 : memref, memref, memref + // CHECK: return %[[RES]]#0, %[[RES]]#1, %[[RES]]#2 : memref, memref, memref + return %1, %2, %3 : memref, memref, memref +} + // CHECK-LABEL: func @expand_collapse_shape_static func.func @expand_collapse_shape_static( %arg0: memref<3x4x5xf32>, diff --git a/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir b/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir index adadb8bbac49d..0e9385ee75c47 100644 --- a/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir +++ b/mlir/test/Dialect/OpenMP/cli-canonical_loop.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s | FileCheck %s -// RUN: mlir-opt %s | mlir-opt | FileCheck %s +// RUN: mlir-opt %s | FileCheck %s --enable-var-scope +// RUN: mlir-opt %s | mlir-opt | FileCheck %s --enable-var-scope // CHECK-LABEL: @omp_canonloop_raw( @@ -24,10 +24,10 @@ func.func @omp_canonloop_raw(%tc : i32) -> () { func.func @omp_canonloop_sequential_raw(%tc : i32) -> () { // CHECK-NEXT: %canonloop_s0 = omp.new_cli %canonloop_s0 = "omp.new_cli" () : () -> (!omp.cli) - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%[[tc]]) { "omp.canonical_loop" (%tc, %canonloop_s0) ({ ^bb_first(%iv_first: i32): - // CHECK-NEXT: = llvm.add %iv, %iv : i32 + // CHECK-NEXT: = llvm.add %iv_s0, %iv_s0 : i32 %newval = llvm.add %iv_first, %iv_first : i32 // CHECK-NEXT: omp.terminator omp.terminator @@ -36,7 +36,7 @@ func.func @omp_canonloop_sequential_raw(%tc : i32) -> () { // CHECK-NEXT: %canonloop_s1 = omp.new_cli %canonloop_s1 = "omp.new_cli" () : () -> (!omp.cli) - // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv : i32 in range(%[[tc]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%[[tc]]) { "omp.canonical_loop" (%tc, %canonloop_s1) ({ ^bb_second(%iv_second: i32): // CHECK: omp.terminator @@ -52,17 +52,17 @@ func.func @omp_canonloop_sequential_raw(%tc : i32) -> () { // CHECK-LABEL: @omp_nested_canonloop_raw( // CHECK-SAME: %[[tc_outer:.+]]: i32, %[[tc_inner:.+]]: i32) func.func @omp_nested_canonloop_raw(%tc_outer : i32, %tc_inner : i32) -> () { - // CHECK-NEXT: %canonloop_s0 = omp.new_cli + // CHECK-NEXT: %canonloop = omp.new_cli %outer = "omp.new_cli" () : () -> (!omp.cli) - // CHECK-NEXT: %canonloop_s0_s0 = omp.new_cli + // CHECK-NEXT: %canonloop_d1 = omp.new_cli %inner = "omp.new_cli" () : () -> (!omp.cli) - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc_outer]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc_outer]]) { "omp.canonical_loop" (%tc_outer, %outer) ({ ^bb_outer(%iv_outer: i32): - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%[[tc_inner]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc_inner]]) { "omp.canonical_loop" (%tc_inner, %inner) ({ ^bb_inner(%iv_inner: i32): - // CHECK-NEXT: = llvm.add %iv, %iv_0 : i32 + // CHECK-NEXT: = llvm.add %iv, %iv_d1 : i32 %newval = llvm.add %iv_outer, %iv_inner: i32 // CHECK-NEXT: omp.terminator omp.terminator @@ -108,16 +108,24 @@ func.func @omp_canonloop_constant_pretty() -> () { func.func @omp_canonloop_sequential_pretty(%tc : i32) -> () { // CHECK-NEXT: %canonloop_s0 = omp.new_cli %canonloop_s0 = omp.new_cli - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) { - omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%tc) { // CHECK-NEXT: omp.terminator omp.terminator } // CHECK: %canonloop_s1 = omp.new_cli %canonloop_s1 = omp.new_cli - // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv : i32 in range(%[[tc]]) { - omp.canonical_loop(%canonloop_s1) %iv_0 : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%tc) { + // CHECK-NEXT: omp.terminator + omp.terminator + } + + // CHECK: %canonloop_s2 = omp.new_cli + %canonloop_s2 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop_s2) %iv_s2 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s2) %iv_s2 : i32 in range(%tc) { // CHECK-NEXT: omp.terminator omp.terminator } @@ -126,17 +134,17 @@ func.func @omp_canonloop_sequential_pretty(%tc : i32) -> () { } -// CHECK-LABEL: @omp_canonloop_nested_pretty( +// CHECK-LABEL: @omp_canonloop_2d_nested_pretty( // CHECK-SAME: %[[tc:.+]]: i32) -func.func @omp_canonloop_nested_pretty(%tc : i32) -> () { - // CHECK-NEXT: %canonloop_s0 = omp.new_cli - %canonloop_s0 = omp.new_cli - // CHECK-NEXT: %canonloop_s0_s0 = omp.new_cli - %canonloop_s0_s0 = omp.new_cli - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) { - omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%tc) { - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%[[tc]]) { - omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%tc) { +func.func @omp_canonloop_2d_nested_pretty(%tc : i32) -> () { + // CHECK-NEXT: %canonloop = omp.new_cli + %canonloop = omp.new_cli + // CHECK-NEXT: %canonloop_d1 = omp.new_cli + %canonloop_d1 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%tc) { // CHECK: omp.terminator omp.terminator } @@ -147,6 +155,77 @@ func.func @omp_canonloop_nested_pretty(%tc : i32) -> () { } +// CHECK-LABEL: @omp_canonloop_3d_nested_pretty( +// CHECK-SAME: %[[tc:.+]]: i32) +func.func @omp_canonloop_3d_nested_pretty(%tc : i32) -> () { + // CHECK: %canonloop = omp.new_cli + %canonloop = omp.new_cli + // CHECK: %canonloop_d1 = omp.new_cli + %canonloop_d1 = omp.new_cli + // CHECK: %canonloop_d2 = omp.new_cli + %canonloop_d2 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_d1) %iv_1d : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d2) %iv_d2 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_d2) %iv_d2 : i32 in range(%tc) { + // CHECK-NEXT: omp.terminator + omp.terminator + // CHECK-NEXT: } + } + // CHECK-NEXT: omp.terminator + omp.terminator + // CHECK-NEXT: } + } + // CHECK-NEXT: omp.terminator + omp.terminator + } + + return +} + + +// CHECK-LABEL: @omp_canonloop_sequential_nested_pretty( +// CHECK-SAME: %[[tc:.+]]: i32) +func.func @omp_canonloop_sequential_nested_pretty(%tc : i32) -> () { + // CHECK-NEXT: %canonloop_s0 = omp.new_cli + %canonloop_s0 = omp.new_cli + // CHECK-NEXT: %canonloop_s0_d1 = omp.new_cli + %canonloop_s0_d1 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s0) %iv_s0 : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_d1) %iv_s0_d1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s0_d1) %iv_s0_d1 : i32 in range(%tc) { + // CHECK-NEXT: omp.terminator + omp.terminator + // CHECK-NEXT: } + } + // CHECK-NEXT: omp.terminator + omp.terminator + // CHECK-NEXT: } + } + + // CHECK-NEXT: %canonloop_s1 = omp.new_cli + %canonloop_s1 = omp.new_cli + // CHECK-NEXT: %canonloop_s1_d1 = omp.new_cli + %canonloop_s1_d1 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s1) %iv_s1 : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_s1_d1) %iv_s1_d1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop_s1_d1) %iv_s1d1 : i32 in range(%tc) { + // CHECK-NEXT: omp.terminator + omp.terminator + // CHECK-NEXT: } + } + // CHECK-NEXT: omp.terminator + omp.terminator + } + + return +} + + // CHECK-LABEL: @omp_newcli_unused( // CHECK-SAME: ) func.func @omp_newcli_unused() -> () { @@ -155,3 +234,74 @@ func.func @omp_newcli_unused() -> () { // CHECK-NEXT: return return } + + +// CHECK-LABEL: @omp_canonloop_multiregion_isolatedfromabove( +func.func @omp_canonloop_multiregion_isolatedfromabove() -> () { + omp.private {type = firstprivate} @x.privatizer : !llvm.ptr init { + ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr): + %c42_i32 = arith.constant 42: i32 + // CHECK: omp.canonical_loop %iv : i32 in range(%c42_i32) { + omp.canonical_loop %iv1 : i32 in range(%c42_i32) { + omp.terminator + } + // CHECK: omp.yield + omp.yield(%arg0 : !llvm.ptr) + } copy { + ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr): + %c42_i32 = arith.constant 42: i32 + // CHECK: omp.canonical_loop %iv : i32 in range(%c42_i32) { + omp.canonical_loop %iv : i32 in range(%c42_i32) { + // CHECK: omp.canonical_loop %iv_d1 : i32 in range(%c42_i32) { + omp.canonical_loop %iv_d1 : i32 in range(%c42_i32) { + omp.terminator + } + omp.terminator + } + // CHECK: omp.yield + omp.yield(%arg0 : !llvm.ptr) + } dealloc { + ^bb0(%arg0: !llvm.ptr): + %c42_i32 = arith.constant 42: i32 + // CHECK: omp.canonical_loop %iv_s0 : i32 in range(%c42_i32) { + omp.canonical_loop %iv_s0 : i32 in range(%c42_i32) { + omp.terminator + } + // CHECK: omp.canonical_loop %iv_s1 : i32 in range(%c42_i32) { + omp.canonical_loop %iv_s1 : i32 in range(%c42_i32) { + omp.terminator + } + // CHECK: omp.yield + omp.yield + } + + // CHECK: return + return +} + + +// CHECK-LABEL: @omp_canonloop_multiregion( +func.func @omp_canonloop_multiregion(%c : i1) -> () { + %c42_i32 = arith.constant 42: i32 + %canonloop1 = omp.new_cli + %canonloop2 = omp.new_cli + %canonloop3 = omp.new_cli + scf.if %c { + // CHECK: omp.canonical_loop(%canonloop_r0) %iv_r0 : i32 in range(%c42_i32) { + omp.canonical_loop(%canonloop1) %iv1 : i32 in range(%c42_i32) { + omp.terminator + } + } else { + // CHECK: omp.canonical_loop(%canonloop_r1_s0) %iv_r1_s0 : i32 in range(%c42_i32) { + omp.canonical_loop(%canonloop2) %iv2 : i32 in range(%c42_i32) { + omp.terminator + } + // CHECK: omp.canonical_loop(%canonloop_r1_s1) %iv_r1_s1 : i32 in range(%c42_i32) { + omp.canonical_loop(%canonloop3) %iv3 : i32 in range(%c42_i32) { + omp.terminator + } + } + + // CHECK: return + return +} diff --git a/mlir/test/Dialect/OpenMP/cli-tile.mlir b/mlir/test/Dialect/OpenMP/cli-tile.mlir new file mode 100644 index 0000000000000..73d54784c52b7 --- /dev/null +++ b/mlir/test/Dialect/OpenMP/cli-tile.mlir @@ -0,0 +1,138 @@ +// RUN: mlir-opt %s | FileCheck %s --enable-var-scope +// RUN: mlir-opt %s | mlir-opt | FileCheck %s --enable-var-scope + + +// Raw syntax check (MLIR output is always pretty-printed) +// CHECK-LABEL: @omp_tile_raw( +// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) { +func.func @omp_tile_raw(%tc : i32, %ts : i32) -> () { + // CHECK-NEXT: %canonloop = omp.new_cli + %canonloop = "omp.new_cli" () : () -> (!omp.cli) + // CHECK-NEXT: %grid1 = omp.new_cli + %grid = "omp.new_cli" () : () -> (!omp.cli) + // CHECK-NEXT: %intratile1 = omp.new_cli + %intratile = "omp.new_cli" () : () -> (!omp.cli) + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { + "omp.canonical_loop" (%tc, %canonloop) ({ + ^bb0(%iv: i32): + // CHECK: omp.terminator + omp.terminator + }) : (i32, !omp.cli) -> () + // CHECK: omp.tile (%grid1, %intratile1) <- (%canonloop) sizes(%[[ts]] : i32) + "omp.tile"(%grid, %intratile, %canonloop, %ts) <{operandSegmentSizes = array}> : (!omp.cli, !omp.cli, !omp.cli, i32) -> () + //"omp.tile" (%canonloop) : (!omp.cli) -> () + return +} + + +// Pretty syntax check +// CHECK-LABEL: @omp_tile_pretty( +// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) { +func.func @omp_tile_pretty(%tc : i32, %ts : i32) -> () { + // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli + %canonloop = omp.new_cli + // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli + %grid = omp.new_cli + // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli + %intratile = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.tile (%grid1, %intratile1) <- (%canonloop) sizes(%[[ts]] : i32) + omp.tile(%grid, %intratile) <- (%canonloop) sizes(%ts : i32) + return +} + + +// Specifying the generatees for omp.tile is optional +// CHECK-LABEL: @omp_tile_optionalgen_pretty( +// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) { +func.func @omp_tile_optionalgen_pretty(%tc : i32, %ts : i32) -> () { + // CHECK-NEXT: %canonloop = omp.new_cli + %canonloop = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.tile <- (%canonloop) sizes(%[[ts]] : i32) + omp.tile <- (%canonloop) sizes(%ts : i32) + return +} + + +// Two-dimensional tiling +// CHECK-LABEL: @omp_tile_2d_pretty( +// CHECK-SAME: %[[tc1:.+]]: i32, %[[tc2:.+]]: i32, %[[ts1:.+]]: i32, %[[ts2:.+]]: i32) { +func.func @omp_tile_2d_pretty(%tc1 : i32, %tc2 : i32, %ts1 : i32, %ts2 : i32) -> () { + // CHECK-NEXT: %canonloop = omp.new_cli + %cli_outer = omp.new_cli + // CHECK-NEXT: %canonloop_d1 = omp.new_cli + %cli_inner = omp.new_cli + // CHECK-NEXT: %grid1 = omp.new_cli + %grid1 = omp.new_cli + // CHECK-NEXT: %grid2 = omp.new_cli + %grid2 = omp.new_cli + // CHECK-NEXT: %intratile1 = omp.new_cli + %intratile1 = omp.new_cli + // CHECK-NEXT: %intratile2 = omp.new_cli + %intratile2 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc1]]) { + omp.canonical_loop(%cli_outer) %iv_outer : i32 in range(%tc1) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc2]]) { + omp.canonical_loop(%cli_inner) %iv_inner : i32 in range(%tc2) { + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.tile (%grid1, %grid2, %intratile1, %intratile2) <- (%canonloop, %canonloop_d1) sizes(%[[ts1]], %[[ts2]] : i32, i32) + omp.tile (%grid1, %grid2, %intratile1, %intratile2) <- (%cli_outer, %cli_inner) sizes(%ts1, %ts2 : i32, i32) + return +} + + +// Three-dimensional tiling +// CHECK-LABEL: @omp_tile_3d_pretty( +// CHECK-SAME: %[[tc:.+]]: i32, %[[ts:.+]]: i32) { +func.func @omp_tile_3d_pretty(%tc : i32, %ts : i32) -> () { + // CHECK-NEXT: %canonloop = omp.new_cli + %cli_outer = omp.new_cli + // CHECK-NEXT: %canonloop_d1 = omp.new_cli + %cli_middle = omp.new_cli + // CHECK-NEXT: %canonloop_d2 = omp.new_cli + %cli_inner = omp.new_cli + // CHECK-NEXT: %grid1 = omp.new_cli + %grid1 = omp.new_cli + // CHECK-NEXT: %grid2 = omp.new_cli + %grid2 = omp.new_cli + // CHECK-NEXT: %grid3 = omp.new_cli + %grid3 = omp.new_cli + // CHECK-NEXT: %intratile1 = omp.new_cli + %intratile1 = omp.new_cli + // CHECK-NEXT: %intratile2 = omp.new_cli + %intratile2 = omp.new_cli + // CHECK-NEXT: %intratile3 = omp.new_cli + %intratile3 = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { + omp.canonical_loop(%cli_outer) %iv_outer : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) { + omp.canonical_loop(%cli_middle) %iv_middle : i32 in range(%tc) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d2) %iv_d2 : i32 in range(%[[tc]]) { + omp.canonical_loop(%cli_inner) %iv_inner : i32 in range(%tc) { + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.terminator + omp.terminator + } + // CHECK: omp.tile (%grid1, %grid2, %grid3, %intratile1, %intratile2, %intratile3) <- (%canonloop, %canonloop_d1, %canonloop_d2) sizes(%[[ts]], %[[ts]], %[[ts]] : i32, i32, i32) + omp.tile (%grid1, %grid2, %grid3, %intratile1, %intratile2, %intratile3) <- (%cli_outer, %cli_middle, %cli_inner) sizes(%ts, %ts, %ts: i32, i32, i32) + return +} diff --git a/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir b/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir index cda7d0b500166..16884f4245e76 100644 --- a/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir +++ b/mlir/test/Dialect/OpenMP/cli-unroll-heuristic.mlir @@ -1,18 +1,18 @@ -// RUN: mlir-opt %s | FileCheck %s -// RUN: mlir-opt %s | mlir-opt | FileCheck %s +// RUN: mlir-opt %s | FileCheck %s --enable-var-scope +// RUN: mlir-opt %s | mlir-opt | FileCheck %s --enable-var-scope // CHECK-LABEL: @omp_unroll_heuristic_raw( // CHECK-SAME: %[[tc:.+]]: i32) { func.func @omp_unroll_heuristic_raw(%tc : i32) -> () { - // CHECK-NEXT: %canonloop_s0 = omp.new_cli + // CHECK-NEXT: %canonloop = omp.new_cli %canonloop = "omp.new_cli" () : () -> (!omp.cli) - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { "omp.canonical_loop" (%tc, %canonloop) ({ ^bb0(%iv: i32): omp.terminator }) : (i32, !omp.cli) -> () - // CHECK: omp.unroll_heuristic(%canonloop_s0) + // CHECK: omp.unroll_heuristic(%canonloop) "omp.unroll_heuristic" (%canonloop) : (!omp.cli) -> () return } @@ -22,12 +22,12 @@ func.func @omp_unroll_heuristic_raw(%tc : i32) -> () { // CHECK-SAME: %[[tc:.+]]: i32) { func.func @omp_unroll_heuristic_pretty(%tc : i32) -> () { // CHECK-NEXT: %[[CANONLOOP:.+]] = omp.new_cli - %canonloop = "omp.new_cli" () : () -> (!omp.cli) - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) { + %canonloop = omp.new_cli + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { omp.terminator } - // CHECK: omp.unroll_heuristic(%canonloop_s0) + // CHECK: omp.unroll_heuristic(%canonloop) omp.unroll_heuristic(%canonloop) return } @@ -36,13 +36,13 @@ func.func @omp_unroll_heuristic_pretty(%tc : i32) -> () { // CHECK-LABEL: @omp_unroll_heuristic_nested_pretty( // CHECK-SAME: %[[tc:.+]]: i32) { func.func @omp_unroll_heuristic_nested_pretty(%tc : i32) -> () { - // CHECK-NEXT: %canonloop_s0 = omp.new_cli + // CHECK-NEXT: %canonloop = omp.new_cli %cli_outer = omp.new_cli - // CHECK-NEXT: %canonloop_s0_s0 = omp.new_cli + // CHECK-NEXT: %canonloop_d1 = omp.new_cli %cli_inner = omp.new_cli - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0) %iv : i32 in range(%[[tc]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop) %iv : i32 in range(%[[tc]]) { omp.canonical_loop(%cli_outer) %iv_outer : i32 in range(%tc) { - // CHECK-NEXT: omp.canonical_loop(%canonloop_s0_s0) %iv_0 : i32 in range(%[[tc]]) { + // CHECK-NEXT: omp.canonical_loop(%canonloop_d1) %iv_d1 : i32 in range(%[[tc]]) { omp.canonical_loop(%cli_inner) %iv_inner : i32 in range(%tc) { // CHECK: omp.terminator omp.terminator @@ -51,9 +51,9 @@ func.func @omp_unroll_heuristic_nested_pretty(%tc : i32) -> () { omp.terminator } - // CHECK: omp.unroll_heuristic(%canonloop_s0) + // CHECK: omp.unroll_heuristic(%canonloop) omp.unroll_heuristic(%cli_outer) - // CHECK-NEXT: omp.unroll_heuristic(%canonloop_s0_s0) + // CHECK-NEXT: omp.unroll_heuristic(%canonloop_d1) omp.unroll_heuristic(%cli_inner) return } diff --git a/mlir/test/Dialect/OpenMP/invalid-tile.mlir b/mlir/test/Dialect/OpenMP/invalid-tile.mlir new file mode 100644 index 0000000000000..e63a062d810ed --- /dev/null +++ b/mlir/test/Dialect/OpenMP/invalid-tile.mlir @@ -0,0 +1,119 @@ +// RUN: mlir-opt -split-input-file -verify-diagnostics %s + + +func.func @missing_sizes(%tc : i32, %ts : i32) { + %canonloop = omp.new_cli + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + omp.terminator + } + + // expected-error@+1 {{'omp.tile' op there must be one tile size for each applyee}} + omp.tile <-(%canonloop) + + llvm.return +} + +// ----- + +func.func @no_loop(%tc : i32, %ts : i32) { + // expected-error@+1 {{'omp.tile' op must apply to at least one loop}} + omp.tile <-() + + return +} + +// ----- + +func.func @missing_generator(%tc : i32, %ts : i32) { + // expected-error@+1 {{'omp.new_cli' op CLI has no generator}} + %canonloop = omp.new_cli + + // expected-note@+1 {{see consumer here: "omp.tile"(%0, %arg1) <{operandSegmentSizes = array}> : (!omp.cli, i32) -> ()}} + omp.tile <-(%canonloop) sizes(%ts : i32) + + return +} + +// ----- + +func.func @insufficient_sizes(%tc : i32, %ts : i32) { + %canonloop1 = omp.new_cli + %canonloop2 = omp.new_cli + omp.canonical_loop(%canonloop1) %iv : i32 in range(%tc) { + omp.terminator + } + omp.canonical_loop(%canonloop2) %iv : i32 in range(%tc) { + omp.terminator + } + + // expected-error@+1 {{'omp.tile' op there must be one tile size for each applyee}} + omp.tile <-(%canonloop1, %canonloop2) sizes(%ts : i32) + + llvm.return +} + +// ----- + +func.func @insufficient_applyees(%tc : i32, %ts : i32) { + %canonloop = omp.new_cli + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + omp.terminator + } + + // expected-error@+1 {{omp.tile' op there must be one tile size for each applyee}} + omp.tile <- (%canonloop) sizes(%ts, %ts : i32, i32) + + return +} + +// ----- + +func.func @insufficient_generatees(%tc : i32, %ts : i32) { + %canonloop = omp.new_cli + %grid = omp.new_cli + omp.canonical_loop(%canonloop) %iv : i32 in range(%tc) { + omp.terminator + } + + // expected-error@+1 {{'omp.tile' op expecting two times the number of generatees than applyees}} + omp.tile (%grid) <- (%canonloop) sizes(%ts : i32) + + return +} + +// ----- + +func.func @not_perfectly_nested(%tc : i32, %ts : i32) { + %canonloop1 = omp.new_cli + %canonloop2 = omp.new_cli + omp.canonical_loop(%canonloop1) %iv1 : i32 in range(%tc) { + %v = arith.constant 42 : i32 + omp.canonical_loop(%canonloop2) %iv2 : i32 in range(%tc) { + omp.terminator + } + omp.terminator + } + + // expected-error@+1 {{'omp.tile' op tiled loop nest must be perfectly nested}} + omp.tile <-(%canonloop1, %canonloop2) sizes(%ts, %ts : i32, i32) + + llvm.return +} + +// ----- + +func.func @non_nectangular(%tc : i32, %ts : i32) { + %canonloop1 = omp.new_cli + %canonloop2 = omp.new_cli + omp.canonical_loop(%canonloop1) %iv1 : i32 in range(%tc) { + omp.canonical_loop(%canonloop2) %iv2 : i32 in range(%iv1) { + omp.terminator + } + omp.terminator + } + + // expected-error@+1 {{'omp.tile' op tiled loop nest must be rectangular}} + omp.tile <-(%canonloop1, %canonloop2) sizes(%ts, %ts : i32, i32) + + llvm.return +} diff --git a/mlir/test/Dialect/Ptr/invalid.mlir b/mlir/test/Dialect/Ptr/invalid.mlir index cc1eeb3cb5744..83e1c880650c5 100644 --- a/mlir/test/Dialect/Ptr/invalid.mlir +++ b/mlir/test/Dialect/Ptr/invalid.mlir @@ -70,3 +70,11 @@ func.func @ptr_add_shape_mismatch(%ptrs: tensor<8x!ptr.ptr<#ptr.generic_space>>, %res = ptr.ptr_add %ptrs, %offsets : tensor<8x!ptr.ptr<#ptr.generic_space>>, tensor<4xi64> return %res : tensor<8x!ptr.ptr<#ptr.generic_space>> } + +// ----- + +func.func @ptr_diff_mismatch(%lhs: tensor<8x!ptr.ptr<#ptr.generic_space>>, %rhs: tensor<8x!ptr.ptr<#ptr.generic_space>>) -> vector<8xi64> { + // expected-error@+1 {{the result to have the same container type as the operands when operands are shaped}} + %res = ptr.ptr_diff %lhs, %rhs : tensor<8x!ptr.ptr<#ptr.generic_space>> -> vector<8xi64> + return %res : vector<8xi64> +} diff --git a/mlir/test/Dialect/Ptr/ops.mlir b/mlir/test/Dialect/Ptr/ops.mlir index 7b2254185f57c..0a906ad559e21 100644 --- a/mlir/test/Dialect/Ptr/ops.mlir +++ b/mlir/test/Dialect/Ptr/ops.mlir @@ -211,3 +211,31 @@ func.func @constant_large_address_ops() -> (!ptr.ptr<#ptr.generic_space>, !ptr.p %addr_large = ptr.constant #ptr.address<0x123456789ABCDEF0> : !ptr.ptr<#llvm.address_space<0>> return %addr_max32, %addr_large : !ptr.ptr<#ptr.generic_space>, !ptr.ptr<#llvm.address_space<0>> } + +/// Test ptr_diff operations with scalar pointers +func.func @ptr_diff_scalar_ops(%ptr1: !ptr.ptr<#ptr.generic_space>, %ptr2: !ptr.ptr<#ptr.generic_space>) -> (i64, index, i32) { + %diff_i64 = ptr.ptr_diff %ptr1, %ptr2 : !ptr.ptr<#ptr.generic_space> -> i64 + %diff_index = ptr.ptr_diff %ptr1, %ptr2 : !ptr.ptr<#ptr.generic_space> -> index + %diff_i32 = ptr.ptr_diff nuw %ptr1, %ptr2 : !ptr.ptr<#ptr.generic_space> -> i32 + return %diff_i64, %diff_index, %diff_i32 : i64, index, i32 +} + +/// Test ptr_diff operations with vector pointers +func.func @ptr_diff_vector_ops(%ptrs1: vector<4x!ptr.ptr<#ptr.generic_space>>, %ptrs2: vector<4x!ptr.ptr<#ptr.generic_space>>) -> (vector<4xi64>, vector<4xindex>) { + %diff_i64 = ptr.ptr_diff none %ptrs1, %ptrs2 : vector<4x!ptr.ptr<#ptr.generic_space>> -> vector<4xi64> + %diff_index = ptr.ptr_diff %ptrs1, %ptrs2 : vector<4x!ptr.ptr<#ptr.generic_space>> -> vector<4xindex> + return %diff_i64, %diff_index : vector<4xi64>, vector<4xindex> +} + +/// Test ptr_diff operations with tensor pointers +func.func @ptr_diff_tensor_ops(%ptrs1: tensor<8x!ptr.ptr<#ptr.generic_space>>, %ptrs2: tensor<8x!ptr.ptr<#ptr.generic_space>>) -> (tensor<8xi64>, tensor<8xi32>) { + %diff_i64 = ptr.ptr_diff nsw %ptrs1, %ptrs2 : tensor<8x!ptr.ptr<#ptr.generic_space>> -> tensor<8xi64> + %diff_i32 = ptr.ptr_diff nsw | nuw %ptrs1, %ptrs2 : tensor<8x!ptr.ptr<#ptr.generic_space>> -> tensor<8xi32> + return %diff_i64, %diff_i32 : tensor<8xi64>, tensor<8xi32> +} + +/// Test ptr_diff operations with 2D tensor pointers +func.func @ptr_diff_tensor_2d_ops(%ptrs1: tensor<4x8x!ptr.ptr<#ptr.generic_space>>, %ptrs2: tensor<4x8x!ptr.ptr<#ptr.generic_space>>) -> tensor<4x8xi64> { + %diff = ptr.ptr_diff %ptrs1, %ptrs2 : tensor<4x8x!ptr.ptr<#ptr.generic_space>> -> tensor<4x8xi64> + return %diff : tensor<4x8xi64> +} diff --git a/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir b/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir index f5d6a08b7de31..515de5502f322 100644 --- a/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/scf_1_N_conversion.mlir @@ -86,3 +86,47 @@ func.func @while(%arg0: tensor<1024xf32, #SparseVector>, %c: i1) -> tensor<1024x } return %0: tensor<1024xf32, #SparseVector> } + +// CHECK-LABEL: func.func @index_switch( +// CHECK-SAME: %[[PRED:.*0]]: index, +// CHECK-SAME: %[[VAL_A_1:.*1]]: memref, +// CHECK-SAME: %[[VAL_A_2:.*2]]: memref, +// CHECK-SAME: %[[VAL_A_3:.*3]]: memref, +// CHECK-SAME: %[[VAL_A_4:.*4]]: !sparse_tensor.storage_specifier +// CHECK-SAME: %[[VAL_B_1:.*5]]: memref, +// CHECK-SAME: %[[VAL_B_2:.*6]]: memref, +// CHECK-SAME: %[[VAL_B_3:.*7]]: memref, +// CHECK-SAME: %[[VAL_B_4:.*8]]: !sparse_tensor.storage_specifier +// CHECK-SAME: %[[VAL_C_1:.*9]]: memref, +// CHECK-SAME: %[[VAL_C_2:.*10]]: memref, +// CHECK-SAME: %[[VAL_C_3:.*11]]: memref, +// CHECK-SAME: %[[VAL_C_4:.*12]]: !sparse_tensor.storage_specifier + +// CHECK: %[[RES:.*]]:4 = scf.index_switch %[[PRED]] +// CHECK-SAME: -> memref, memref, memref, !sparse_tensor.storage_specifier +// CHECK: case 1 { +// CHECK: scf.yield %[[VAL_A_1]], %[[VAL_A_2]], %[[VAL_A_3]], %[[VAL_A_4]] +// CHECK: case 2 { +// CHECK: scf.yield %[[VAL_B_1]], %[[VAL_B_2]], %[[VAL_B_3]], %[[VAL_B_4]] +// CHECK: default { +// CHECK: scf.yield %[[VAL_C_1]], %[[VAL_C_2]], %[[VAL_C_3]], %[[VAL_C_4]] + +// CHECK: return %[[RES]]#0, %[[RES]]#1, %[[RES]]#2, %[[RES]]#3 : +// CHECK-SAME: memref, memref, memref, !sparse_tensor.storage_specifier + +func.func @index_switch(%pred: index, %a: tensor<5xf32, #SparseVector>, + %b: tensor<5xf32, #SparseVector>, + %c: tensor<5xf32, #SparseVector>) -> tensor<5xf32, #SparseVector> { + %0 = scf.index_switch %pred -> tensor<5xf32, #SparseVector> + case 1 { + scf.yield %a : tensor<5xf32, #SparseVector> + } + case 2 { + scf.yield %b : tensor<5xf32, #SparseVector> + } + default { + scf.yield %c : tensor<5xf32, #SparseVector> + } + + return %0 : tensor<5xf32, #SparseVector> +} diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir index fd2a3f1d361eb..e8525a5d2ed62 100644 --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -1104,26 +1104,6 @@ func.func @canonicalize_pad_slice_dynamic_noupdate(%arg0: tensor<1x16x?x3xf32>) // ----- -// CHECK-LABEL: @fold_log_exp -func.func @fold_log_exp(%arg0: tensor) -> tensor { - // CHECK: return %arg{{.*}} : tensor - %0 = tosa.exp %arg0 : (tensor) -> tensor - %1 = tosa.log %0 : (tensor) -> tensor - return %1 : tensor -} - -// ----- - -// CHECK-LABEL: @fold_exp_log -func.func @fold_exp_log(%arg0: tensor) -> tensor { - // CHECK: return %arg{{.*}} : tensor - %0 = tosa.log %arg0 : (tensor) -> tensor - %1 = tosa.exp %0 : (tensor) -> tensor - return %1 : tensor -} - -// ----- - // CHECK-LABEL: @fold_negate_negate func.func @fold_negate_negate(%arg0: tensor) -> tensor { // CHECK: return %arg{{.*}} : tensor diff --git a/mlir/test/Dialect/Tosa/error_if_check.mlir b/mlir/test/Dialect/Tosa/error_if_check.mlir index 290773b23193f..2f9421c43d2fb 100644 --- a/mlir/test/Dialect/Tosa/error_if_check.mlir +++ b/mlir/test/Dialect/Tosa/error_if_check.mlir @@ -269,20 +269,6 @@ func.func @test_cond_if_simplified_form_not_isolated_from_above(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { - %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ - ^bb0(%arg3: tensor, %arg4: tensor): - tosa.yield %arg3 : tensor - }, { - ^bb0(%arg3: tensor, %arg4: tensor): - tosa.yield %arg4 : tensor - }) : (tensor, tensor, tensor) -> tensor - return %0 : tensor -} - -// ----- - func.func @test_while_loop_cond_not_isolated_from_above(%arg0: tensor, %arg1: tensor, %arg2: tensor) { %0 = "tosa.const"() {values = dense<0> : tensor} : () -> tensor // expected-error@+1 {{'tosa.while_loop' op is not conformant to the TOSA specification. It requires the 'cond' region is isolated from above.}} @@ -318,22 +304,3 @@ func.func @test_while_loop_body_not_isolated_from_above(%arg0: tensor, %arg }) : (tensor) -> (tensor) return } - -// ----- - -// Check isolated while_loops are valid -func.func @test_while_loop_isolated_from_above(%arg0: tensor, %arg1: tensor) { - %0 = "tosa.const"() {values = dense<0> : tensor} : () -> tensor - %1:3 = "tosa.while_loop"(%0, %arg0, %arg1) ({ - ^bb0(%arg3: tensor, %arg4: tensor, %arg5: tensor): - %2 = "tosa.greater_equal"(%arg3, %arg5) : (tensor, tensor) -> tensor - %3 = "tosa.logical_not"(%2) : (tensor) -> tensor - "tosa.yield"(%3) : (tensor) -> () - }, { - ^bb0(%arg3: tensor, %arg4: tensor, %arg5: tensor): - %2 = "tosa.const"() {values = dense<1> : tensor} : () -> tensor - %3 = "tosa.add"(%arg3, %2) : (tensor, tensor) -> tensor - "tosa.yield"(%3, %arg4, %arg5) : (tensor, tensor, tensor) -> () - }) : (tensor, tensor, tensor) -> (tensor, tensor, tensor) - return -} diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir index bee0eb1309572..868b7b7a93335 100644 --- a/mlir/test/Dialect/Tosa/ops.mlir +++ b/mlir/test/Dialect/Tosa/ops.mlir @@ -976,6 +976,15 @@ func.func @test_matmul_f8E5M2(%arg0: tensor<1x14x19xf8E5M2>, %arg1: tensor<1x19x return %0 : tensor<1x14x28xf16> } +// ----- +// CHECK-LABEL: test_matmul_f8E5M2_f8E4M3 +func.func @test_matmul_f8E5M2_f8E4M3(%arg0: tensor<1x14x19xf8E5M2>, %arg1: tensor<1x19x28xf8E4M3FN>) -> tensor<1x14x28xf16> { + %azp0 = "tosa.const"() <{values = dense<0.0> : tensor<1xf8E5M2>}> : () -> tensor<1xf8E5M2> + %bzp0 = "tosa.const"() <{values = dense<0.0> : tensor<1xf8E4M3FN>}> : () -> tensor<1xf8E4M3FN> + %0 = tosa.matmul %arg0, %arg1, %azp0, %bzp0 : (tensor<1x14x19xf8E5M2>, tensor<1x19x28xf8E4M3FN>, tensor<1xf8E5M2>, tensor<1xf8E4M3FN>) -> tensor<1x14x28xf16> + return %0 : tensor<1x14x28xf16> +} + // ----- // CHECK-LABEL: max_pool2d_f8E5M2 func.func @test_max_pool2d_f8E5M2(%arg0: tensor<1x32x32x8xf8E5M2>) -> tensor<1x32x32x8xf8E5M2> { diff --git a/mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir b/mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir new file mode 100644 index 0000000000000..f05ae7f58261d --- /dev/null +++ b/mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir @@ -0,0 +1,34 @@ +// RUN: mlir-opt %s -split-input-file -verify-diagnostics --tosa-validate="profile=pro_int,pro_fp extension=int16,int4,bf16,fp8e4m3,fp8e5m2,fft,variable,controlflow,doubleround,inexactround strict-op-spec-alignment" | FileCheck %s + +// ----- + +// CHECK-LABEL: test_cond_if_isolated_from_above +func.func @test_cond_if_isolated_from_above(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { + %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({ + ^bb0(%arg3: tensor, %arg4: tensor): + tosa.yield %arg3 : tensor + }, { + ^bb0(%arg3: tensor, %arg4: tensor): + tosa.yield %arg4 : tensor + }) : (tensor, tensor, tensor) -> tensor + return %0 : tensor +} + +// ----- + +// CHECK-LABEL: test_while_loop_isolated_from_above +func.func @test_while_loop_isolated_from_above(%arg0: tensor, %arg1: tensor) { + %0 = "tosa.const"() {values = dense<0> : tensor} : () -> tensor + %1:3 = "tosa.while_loop"(%0, %arg0, %arg1) ({ + ^bb0(%arg3: tensor, %arg4: tensor, %arg5: tensor): + %2 = "tosa.greater_equal"(%arg3, %arg5) : (tensor, tensor) -> tensor + %3 = "tosa.logical_not"(%2) : (tensor) -> tensor + "tosa.yield"(%3) : (tensor) -> () + }, { + ^bb0(%arg3: tensor, %arg4: tensor, %arg5: tensor): + %2 = "tosa.const"() {values = dense<1> : tensor} : () -> tensor + %3 = "tosa.add"(%arg3, %2) : (tensor, tensor) -> tensor + "tosa.yield"(%3, %arg4, %arg5) : (tensor, tensor, tensor) -> () + }) : (tensor, tensor, tensor) -> (tensor, tensor, tensor) + return +} diff --git a/mlir/test/Dialect/Transform/test-promote-tensors.mlir b/mlir/test/Dialect/Transform/test-promote-tensors.mlir new file mode 100644 index 0000000000000..bc9a05af64156 --- /dev/null +++ b/mlir/test/Dialect/Transform/test-promote-tensors.mlir @@ -0,0 +1,104 @@ +// RUN: mlir-opt %s --transform-interpreter --split-input-file | FileCheck %s + +// CHECK-LABEL: @promote_in0 +// CHECK-SAME: (%[[ARG0:.+]]: tensor, %{{.*}}, %{{.*}}) +// CHECK: %[[C0:.+]] = arith.constant 0 +// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] +// CHECK: %[[ALLOC:.+]] = bufferization.alloc_tensor(%[[DIM]]) {memory_space = 1 : i64} +// CHECK: %[[MAT:.+]] = bufferization.materialize_in_destination %[[ARG0]] in %[[ALLOC]] +// CHECK: linalg.matmul ins(%[[MAT]], %{{.*}} +func.func @promote_in0(%arg0: tensor, %arg1: tensor<42x?xf32>, %arg2: tensor) -> tensor { + %0 = linalg.matmul ins(%arg0, %arg1: tensor, tensor<42x?xf32>) + outs(%arg2: tensor) -> tensor + return %0 : tensor +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%root: !transform.any_op) { + %mm = transform.structured.match ops{["linalg.matmul"]} in %root + : (!transform.any_op) -> !transform.any_op + %op0 = transform.get_operand %mm[0] + : (!transform.any_op) -> !transform.any_value + transform.structured.promote_tensor to 1 %op0 : !transform.any_value + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @promote_out +// CHECK-SAME: (%{{.*}}: tensor, %{{.*}}: tensor, %[[ARG2:.+]]: tensor) +func.func @promote_out(%arg0: tensor, %arg1: tensor, %arg2: tensor) -> tensor { + // CHECK: %[[C0:.+]] = arith.constant 0 + // CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG2]], %[[C0]] + // CHECK: %[[C1:.+]] = arith.constant 1 + // CHECK: %[[DIM1:.+]] = tensor.dim %[[ARG2]], %[[C1]] + // CHECK: %[[ALLOC:.+]] = bufferization.alloc_tensor(%[[DIM0]], %[[DIM1]]) {memory_space = 1 : i64} + // CHECK-NOT: materialize_in_destination + // CHECK: linalg.add {{.*}} outs(%[[ALLOC]] + %0 = linalg.add ins(%arg0, %arg1 : tensor, tensor) + outs(%arg2 : tensor) -> tensor + return %0 : tensor +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%root: !transform.any_op) { + %la = transform.structured.match ops{["linalg.add"]} in %root + : (!transform.any_op) -> !transform.any_op + %init = transform.get_operand %la[2] + : (!transform.any_op) -> !transform.any_value + transform.structured.promote_tensor to 1 %init : !transform.any_value + + transform.yield + } +} + +// ----- + +// CHECK-LABEL: @promote_in0_out_bufferize +// CHECK-SAME: (%[[ARG0:.+]]: tensor, %{{.*}}: tensor<42x?xf32>, %[[ARG2:.+]]: tensor) +func.func @promote_in0_out_bufferize(%arg0: tensor, %arg1: tensor<42x?xf32>, %arg2: tensor) -> tensor { + // CHECK: %[[IN1:.+]] = bufferization.to_buffer %arg1 : tensor<42x?xf32> to memref<42x?xf32, strided<[?, ?], offset: ?>> + // CHECK: %[[IN0:.+]] = bufferization.to_buffer %arg0 : tensor to memref> + // CHECK: %{{.+}} = bufferization.to_buffer %arg0 : tensor to memref> + // CHECK: %{{.+}} = bufferization.to_buffer %arg2 : tensor to memref> + // CHECK: %{{.+}} = bufferization.to_buffer %arg2 : tensor to memref> + // CHECK: %[[C0:.+]] = arith.constant 0 : index + // CHECK: %{{.+}} = memref.dim %{{.+}}, %[[C0]] : memref> + // CHECK: %[[C1:.+]] = arith.constant 1 : index + // CHECK: %{{.+}} = memref.dim %{{.+}}, %[[C1]] : memref> + // CHECK: %[[ALLOC_OUT:.+]] = memref.alloc(%{{.+}}, %{{.+}}) {alignment = 64 : i64} : memref + // CHECK: %{{.+}} = arith.constant 0 : index + // CHECK: %{{.+}} = memref.dim %{{.+}}, %{{.+}} : memref> + // CHECK: %[[ALLOC_IN:.+]] = memref.alloc(%{{.+}}) {alignment = 64 : i64} : memref + // CHECK: memref.copy %[[IN0]], %[[ALLOC_IN]] : memref> to memref + // CHECK: linalg.add ins(%[[ALLOC_IN]], %[[IN1]] : memref, memref<42x?xf32, strided<[?, ?], offset: ?>>) outs(%[[ALLOC_OUT]] : memref) + %0 = linalg.add ins(%arg0, %arg1: tensor, tensor<42x?xf32>) + outs(%arg2: tensor) -> tensor + return %0 : tensor +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%root: !transform.any_op) { + %la = transform.structured.match ops{["linalg.add"]} in %root + : (!transform.any_op) -> !transform.any_op + %op0 = transform.get_operand %la[0] + : (!transform.any_op) -> !transform.any_value + transform.structured.promote_tensor to 1 %op0 : !transform.any_value + + %init = transform.get_operand %la[2] + : (!transform.any_op) -> !transform.any_value + transform.structured.promote_tensor to 1 %init : !transform.any_value + + %func = transform.structured.match ops{["func.func"]} in %root + : (!transform.any_op) -> !transform.any_op + + %bufferized = transform.bufferization.one_shot_bufferize %func + : (!transform.any_op) -> !transform.any_op + + transform.yield + } +} + + + diff --git a/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir b/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir index 2e5f433abeb71..efc3890288456 100644 --- a/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir +++ b/mlir/test/Dialect/Transform/test-tune-extension-invalid.mlir @@ -19,3 +19,88 @@ module attributes {transform.with_named_sequence} { transform.yield } } + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + // expected-error@below {{'selected_region' attribute specifies region at index 2 while op has only 2 regions}} + transform.tune.alternatives<"bifurcation"> selected_region = 2 { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %singleton_of_c0 = transform.param.constant [0] -> !transform.any_param + // expected-error@below {{param should hold exactly one integer attribute, got: [0]}} + transform.tune.alternatives<"bifurcation"> selected_region = %singleton_of_c0 : !transform.any_param { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %c0 = transform.param.constant 0 -> !transform.any_param + %c1 = transform.param.constant 1 -> !transform.any_param + %c0_and_c1 = transform.merge_handles %c0, %c1 : !transform.any_param + // expected-error@below {{param should hold exactly one integer attribute}} + transform.tune.alternatives<"bifurcation"> selected_region = %c0_and_c1 : !transform.any_param { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %c2 = transform.param.constant 2 -> !transform.any_param + // expected-error@below {{'selected_region' attribute/param specifies region at index 2 while op has only 2 regions}} + transform.tune.alternatives<"bifurcation"> selected_region = %c2 : !transform.any_param { + transform.yield + }, { + transform.yield + } + transform.yield + } +} + +// ----- + +func.func private @f() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + // expected-error@below {{non-deterministic choice "bifurcation" is only resolved through providing a `selected_region` attr/param}} + transform.tune.alternatives<"bifurcation"> { + transform.yield + }, { + transform.yield + } + transform.yield + } +} diff --git a/mlir/test/Dialect/Transform/test-tune-extension.mlir b/mlir/test/Dialect/Transform/test-tune-extension.mlir index 0a253c6d5f837..5da48a2218ec6 100644 --- a/mlir/test/Dialect/Transform/test-tune-extension.mlir +++ b/mlir/test/Dialect/Transform/test-tune-extension.mlir @@ -59,3 +59,129 @@ module attributes {transform.with_named_sequence} { transform.yield } } + + +// ----- + +// CHECK-LABEL: schedule_with_two_independent_choices_already_made +func.func @schedule_with_two_independent_choices_already_made( + %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) + -> tensor<128x128xf32> { +// CHECK-NOT: scf.forall +// CHECK: scf.for +// CHECK-NOT: scf.for +// CHECK: scf.forall +// CHECK-NOT: scf.for +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: linalg.matmul +// CHECK: scf.forall.in_parallel +// CHECK: tensor.parallel_insert_slice +// CHECK: tensor.insert_slice +// CHECK: scf.yield + %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>) + outs(%arg2: tensor<128x128xf32>) -> tensor<128x128xf32> + return %0 : tensor<128x128xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op + + %tiled_matmul = transform.tune.alternatives<"outer_par_or_seq_tiling"> selected_region = 0 -> !transform.any_op + { // First alternative/region, with index = 0 + %contained_matmul, %loop = transform.structured.tile_using_for %matmul tile_sizes [8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + }, { // Second alternative/region, with index = 1 + %contained_matmul, %loop = transform.structured.tile_using_forall %matmul tile_sizes [8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + } + + transform.tune.alternatives<"inner_par_or_seq_tiling"> selected_region = 1 -> !transform.any_op { + %contained_matmul, %loop = transform.structured.tile_using_for %tiled_matmul tile_sizes [0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + }, { + %contained_matmul, %loop = transform.structured.tile_using_forall %tiled_matmul tile_sizes [0, 16] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + } + + transform.yield + } +} + +// ----- + +// CHECK-LABEL: subschedule_with_choice_resolved_in_main_schedule +func.func @subschedule_with_choice_resolved_in_main_schedule( + %arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) + -> tensor<128x128xf32> { +// CHECK-NOT: scf.for +// CHECK: scf.forall +// CHECK-NOT: scf.forall +// CHECK: scf.for +// CHECK-NOT: scf.forall +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: tensor.extract_slice +// CHECK: linalg.matmul +// CHECK: tensor.insert_slice +// CHECK: scf.yield +// CHECK: scf.forall.in_parallel +// CHECK: tensor.parallel_insert_slice + %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x128xf32>, tensor<128x128xf32>) + outs(%arg2: tensor<128x128xf32>) -> tensor<128x128xf32> + return %0 : tensor<128x128xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @subschedule_with_embedded_choice(%matmul: !transform.any_op {transform.readonly}, + %par_or_seq: !transform.param {transform.readonly}, + %tile_size: !transform.param {transform.readonly}) -> !transform.any_op { + %tiled_matmul = transform.tune.alternatives<"par_or_seq_tiling"> selected_region = %par_or_seq : !transform.param -> !transform.any_op { + %contained_matmul, %loop = transform.structured.tile_using_for %matmul tile_sizes [%tile_size] : (!transform.any_op, !transform.param) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + }, { + %contained_matmul, %loop = transform.structured.tile_using_forall %matmul tile_sizes [%tile_size] : (!transform.any_op, !transform.param) -> (!transform.any_op, !transform.any_op) + transform.yield %contained_matmul : !transform.any_op + } + transform.yield %tiled_matmul : !transform.any_op + } + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op + %outer_par = transform.param.constant 1 -> !transform.param + %outer_tile_size = transform.param.constant 32 -> !transform.param + %inner_seq = transform.tune.knob<"inner_par_or_seq"> = 0 from options = [0, 1] -> !transform.param + %inner_tile_size = transform.param.constant 8 -> !transform.param + %tiled_matmul = transform.include @subschedule_with_embedded_choice failures(propagate) (%matmul, %outer_par, %outer_tile_size) : (!transform.any_op, !transform.param, !transform.param) -> !transform.any_op + %tiled_tiled_matmul = transform.include @subschedule_with_embedded_choice failures(propagate) (%tiled_matmul, %inner_seq, %inner_tile_size) : (!transform.any_op, !transform.param, !transform.param) -> !transform.any_op + transform.yield + } +} + +// ----- + +// CHECK-LABEL: eeny_meeny_miny_moe +func.func private @eeny_meeny_miny_moe() + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0 : (!transform.any_op) -> !transform.any_op + + %tiled_matmul = transform.tune.alternatives<"4way"> selected_region = 3 -> !transform.any_param + { // First alternative/region, with index = 0 + %out = transform.param.constant "eeny" -> !transform.any_param + transform.yield %out : !transform.any_param + }, { // Second alternative/region, with index = 1 + %out = transform.param.constant "meeny" -> !transform.any_param + transform.yield %out : !transform.any_param + }, { // Third alternative/region, with index = 2 + %out = transform.param.constant "miny" -> !transform.any_param + transform.yield %out : !transform.any_param + }, { // Fourth alternative/region, with index = 3 + %out = transform.param.constant "moe" -> !transform.any_param + transform.yield %out : !transform.any_param + } + transform.yield + } +} \ No newline at end of file diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir index 08d28be3f8f73..5448976f84760 100644 --- a/mlir/test/Dialect/Vector/canonicalize.mlir +++ b/mlir/test/Dialect/Vector/canonicalize.mlir @@ -3411,16 +3411,86 @@ func.func @negative_from_elements_poison_constant_mix() -> vector<2xf32> { return %1 : vector<2xf32> } +// ----- + +// CHECK-LABEL: func @from_elements_float8_to_i8_conversion( +// CHECK-NEXT: %[[CST:.*]] = arith.constant dense<[0, 56, -72, 69, 127, -1]> : vector<6xi8> +// CHECK-NEXT: return %[[CST]] : vector<6xi8> +func.func @from_elements_float8_to_i8_conversion() -> vector<6xi8> { + %cst0 = llvm.mlir.constant(0.0 : f8E4M3FN) : i8 + %cst1 = llvm.mlir.constant(1.0 : f8E4M3FN) : i8 + %cst_neg1 = llvm.mlir.constant(-1.0 : f8E4M3FN) : i8 + %cst_pi = llvm.mlir.constant(3.14 : f8E4M3FN) : i8 + %cst_inf = llvm.mlir.constant(0x7F : f8E4M3FN) : i8 + %cst_neg_inf = llvm.mlir.constant(0xFF : f8E4M3FN) : i8 + %v = vector.from_elements %cst0, %cst1, %cst_neg1, %cst_pi, %cst_inf, %cst_neg_inf : vector<6xi8> + return %v : vector<6xi8> +} + +// CHECK-LABEL: func @from_elements_float16_to_i16_conversion( +// CHECK-NEXT: %[[CST:.*]] = arith.constant dense<[0, 15360, -17408, 16968, 31743, -1025]> : vector<6xi16> +// CHECK-NEXT: return %[[CST]] : vector<6xi16> +func.func @from_elements_float16_to_i16_conversion() -> vector<6xi16> { + %cst0 = llvm.mlir.constant(0.0 : f16) : i16 + %cst1 = llvm.mlir.constant(1.0 : f16) : i16 + %cst_neg1 = llvm.mlir.constant(-1.0 : f16) : i16 + %cst_pi = llvm.mlir.constant(3.14 : f16) : i16 + %cst_max = llvm.mlir.constant(65504.0 : f16) : i16 + %cst_min = llvm.mlir.constant(-65504.0 : f16) : i16 + %v = vector.from_elements %cst0, %cst1, %cst_neg1, %cst_pi, %cst_max, %cst_min : vector<6xi16> + return %v : vector<6xi16> +} + +// CHECK-LABEL: func @from_elements_f64_to_i64_conversion( +// CHECK-NEXT: %[[CST:.*]] = arith.constant dense<[0, 4607182418800017408, -4616189618054758400, 4614253070214989087, 9218868437227405311, -4503599627370497]> : vector<6xi64> +// CHECK-NEXT: return %[[CST]] : vector<6xi64> +func.func @from_elements_f64_to_i64_conversion() -> vector<6xi64> { + %cst0 = llvm.mlir.constant(0.0 : f64) : i64 + %cst1 = llvm.mlir.constant(1.0 : f64) : i64 + %cst_neg1 = llvm.mlir.constant(-1.0 : f64) : i64 + %cst_pi = llvm.mlir.constant(3.14 : f64) : i64 + %cst_max = llvm.mlir.constant(1.7976931348623157e+308 : f64) : i64 + %cst_min = llvm.mlir.constant(-1.7976931348623157e+308 : f64) : i64 + %v = vector.from_elements %cst0, %cst1, %cst_neg1, %cst_pi, %cst_max, %cst_min : vector<6xi64> + return %v : vector<6xi64> +} + +// ----- + +// CHECK-LABEL: func @from_elements_i1_to_i8_conversion( +// CHECK-NEXT: %[[CST:.*]] = arith.constant dense<0> : vector<1xi8> +// CHECK-NEXT: return %[[CST]] : vector<1xi8> +func.func @from_elements_i1_to_i8_conversion() -> vector<1xi8> { + %cst = llvm.mlir.constant(0: i1) : i8 + %v = vector.from_elements %cst : vector<1xi8> + return %v : vector<1xi8> +} + +// ----- + +// CHECK-LABEL: func @from_elements_index_to_i64_conversion( +// CHECK-NEXT: %[[CST:.*]] = arith.constant dense<[0, 1, 42]> : vector<3xi64> +// CHECK-NEXT: return %[[CST]] : vector<3xi64> +func.func @from_elements_index_to_i64_conversion() -> vector<3xi64> { + %cst0 = llvm.mlir.constant(0 : index) : i64 + %cst1 = llvm.mlir.constant(1 : index) : i64 + %cst42 = llvm.mlir.constant(42 : index) : i64 + %v = vector.from_elements %cst0, %cst1, %cst42 : vector<3xi64> + return %v : vector<3xi64> +} + // +--------------------------------------------------------------------------- // End of Tests for foldFromElementsToConstant // +--------------------------------------------------------------------------- // ----- -// CHECK-LABEL: func @vector_insert_const_regression( +// Not a DenseElementsAttr, don't fold. + +// CHECK-LABEL: func @negative_insert_llvm_undef( // CHECK: llvm.mlir.undef // CHECK: vector.insert -func.func @vector_insert_const_regression(%arg0: i8) -> vector<4xi8> { +func.func @negative_insert_llvm_undef(%arg0: i8) -> vector<4xi8> { %0 = llvm.mlir.undef : vector<4xi8> %1 = vector.insert %arg0, %0 [0] : i8 into vector<4xi8> return %1 : vector<4xi8> diff --git a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir index 3950e54006eec..bf4f094263545 100644 --- a/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-contract-to-matrix-intrinsics-transforms.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s --convert-vector-to-llvm='vector-contract-lowering=matmul' | FileCheck %s +// RUN: mlir-opt %s --convert-vector-to-llvm='vector-contract-lowering=llvmintr' | FileCheck %s #matmat_accesses = [ affine_map<(i, j, k) -> (i, k)>, diff --git a/mlir/test/Dialect/Vector/vector-transpose-to-matrix-intrinsics-transform.mlir b/mlir/test/Dialect/Vector/vector-transpose-to-matrix-intrinsics-transform.mlir index 94689fa0dfb88..f68badaa122cd 100644 --- a/mlir/test/Dialect/Vector/vector-transpose-to-matrix-intrinsics-transform.mlir +++ b/mlir/test/Dialect/Vector/vector-transpose-to-matrix-intrinsics-transform.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s --convert-vector-to-llvm='vector-transpose-lowering=flat' --split-input-file | FileCheck %s +// RUN: mlir-opt %s --convert-vector-to-llvm='vector-transpose-lowering=llvmintr' --split-input-file | FileCheck %s // CHECK-LABEL: func @transpose( func.func @transpose(%arg0: vector<2x4xf32>) -> vector<4x2xf32> { diff --git a/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir b/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir index 13b0ed176eb0c..59fac26d18cf4 100644 --- a/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir +++ b/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir @@ -8,15 +8,15 @@ // CHECK-LABEL: gpu.func @store_nd_1d // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<16xf32>) { // CHECK-DAG: %[[CST:.*]] = arith.constant dense<1.000000e+00> : vector<1xf32> -// CHECK-DAG: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<16xf32> -> !xegpu.tensor_desc<16xf32> -// CHECK: xegpu.store_nd %[[CST]], %[[T0]] : vector<1xf32>, !xegpu.tensor_desc<16xf32> +// CHECK-DAG: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<16xf32> -> !xegpu.tensor_desc<16xf32> +// CHECK: xegpu.store_nd %[[CST]], %[[T0]][%{{.*}}] : vector<1xf32>, !xegpu.tensor_desc<16xf32> // CHECK: gpu.return gpu.module @xevm_module{ gpu.func @store_nd_1d(%arg0: memref<16xf32>) { %c0 = arith.constant 0 : index %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> : vector<16xf32> - %0 = xegpu.create_nd_tdesc %arg0[%c0] : memref<16xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> - xegpu.store_nd %cst, %0 : vector<16xf32>, !xegpu.tensor_desc<16xf32, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0 : memref<16xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> + xegpu.store_nd %cst, %0 [%c0] : vector<16xf32>, !xegpu.tensor_desc<16xf32, #xegpu.layout> gpu.return } } @@ -25,14 +25,14 @@ gpu.module @xevm_module{ // CHECK-LABEL: gpu.func @store_nd_2d // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<16x16xf16>) { // CHECK-DAG: %[[CST:.*]] = arith.constant dense<1.000000e+00> : vector<16xf16> -// CHECK-DAG: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> -// CHECK: xegpu.store_nd %[[CST]], %[[T0]] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> +// CHECK-DAG: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK: xegpu.store_nd %[[CST]], %[[T0]][%{{.*}}] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> gpu.module @xevm_module{ gpu.func @store_nd_2d(%arg0: memref<16x16xf16>) { %c0 = arith.constant 0 : index %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> : vector<16x16xf16> - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - xegpu.store_nd %cst, %0 : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0 : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + xegpu.store_nd %cst, %0 [%c0, %c0] : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> gpu.return } } @@ -42,17 +42,17 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @load_nd_1d // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<16xf32>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16xf32>) { -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<16xf32> -> !xegpu.tensor_desc<16xf32> -// CHECK-DAG: %[[T1:.*]] = xegpu.load_nd %[[T0]] : !xegpu.tensor_desc<16xf32> -> vector<1xf32> -// CHECK-DAG: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%{{.*}}] : memref<16xf32> -> !xegpu.tensor_desc<16xf32> -// CHECK: xegpu.store_nd %[[T1]], %[[T2]] : vector<1xf32>, !xegpu.tensor_desc<16xf32> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<16xf32> -> !xegpu.tensor_desc<16xf32> +// CHECK-DAG: %[[T1:.*]] = xegpu.load_nd %[[T0]][%{{.*}}] : !xegpu.tensor_desc<16xf32> -> vector<1xf32> +// CHECK-DAG: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<16xf32> -> !xegpu.tensor_desc<16xf32> +// CHECK: xegpu.store_nd %[[T1]], %[[T2]][%{{.*}}] : vector<1xf32>, !xegpu.tensor_desc<16xf32> gpu.module @xevm_module{ gpu.func @load_nd_1d(%arg0: memref<16xf32>, %arg1: memref<16xf32>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0] : memref<16xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16xf32, #xegpu.layout> -> vector<16xf32> - %2 = xegpu.create_nd_tdesc %arg1[%c0] : memref<16xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> - xegpu.store_nd %1, %2 : vector<16xf32>, !xegpu.tensor_desc<16xf32, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0 : memref<16xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> + %1 = xegpu.load_nd %0 [%c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16xf32, #xegpu.layout> -> vector<16xf32> + %2 = xegpu.create_nd_tdesc %arg1 : memref<16xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> + xegpu.store_nd %1, %2 [%c0] : vector<16xf32>, !xegpu.tensor_desc<16xf32, #xegpu.layout> gpu.return } } @@ -60,17 +60,17 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @load_nd_2d // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<16x16xf16>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16x16xf16>) { -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> -// CHECK-DAG: %[[T1:.*]] = xegpu.load_nd %[[T0]] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> -// CHECK-DAG: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> -// CHECK: xegpu.store_nd %[[T1]], %[[T2]] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK-DAG: %[[T1:.*]] = xegpu.load_nd %[[T0]][%{{.*}}] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> +// CHECK-DAG: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK: xegpu.store_nd %[[T1]], %[[T2]][%{{.*}}] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> gpu.module @xevm_module{ gpu.func @load_nd_2d(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> - %2 = xegpu.create_nd_tdesc %arg1[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - xegpu.store_nd %1, %2 : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0 : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %1 = xegpu.load_nd %0[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> + %2 = xegpu.create_nd_tdesc %arg1: memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + xegpu.store_nd %1, %2[%c0, %c0] : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> gpu.return } } @@ -78,21 +78,21 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @load_nd_array_length // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<16x16xf16>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16x16xf16>) { -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr> -// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]] : !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr> -> vector<32xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr> +// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]][%{{.*}}] : !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr> -> vector<32xf16> // CHECK: %[[T2:.*]] = vector.shape_cast %[[T1]] : vector<32xf16> to vector<2x16x1xf16> // CHECK: %[[T3:.*]] = vector.extract %[[T2]][0] : vector<16x1xf16> from vector<2x16x1xf16> -// CHECK-DAG: %[[T4:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK-DAG: %[[T4:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> // CHECK-DAG: %[[T5:.*]] = vector.shape_cast %[[T3]] : vector<16x1xf16> to vector<16xf16> -// CHECK: xegpu.store_nd %[[T5]], %[[T4]] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> +// CHECK: xegpu.store_nd %[[T5]], %[[T4]][%{{.*}}] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> gpu.module @xevm_module{ gpu.func @load_nd_array_length(%arg0: memref<16x16xf16>, %arg1: memref<16x16xf16>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr, #xegpu.layout> -> vector<2x16x16xf16> + %0 = xegpu.create_nd_tdesc %arg0 : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr, #xegpu.layout> + %1 = xegpu.load_nd %0[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.block_tdesc_attr, #xegpu.layout> -> vector<2x16x16xf16> %2 = vector.extract %1[%c0] {layout_result_0 = #xegpu.layout} : vector<16x16xf16> from vector<2x16x16xf16> - %3 = xegpu.create_nd_tdesc %arg1[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - xegpu.store_nd %2, %3 : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %3 = xegpu.create_nd_tdesc %arg1 : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + xegpu.store_nd %2, %3[%c0, %c0] : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> gpu.return } } @@ -100,23 +100,23 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @load_dpas_store // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<8x16xf16>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16x16xf16>, %[[ARG2:[0-9a-zA-Z]+]]: memref<8x16xf32>) { -// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16> -// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T2]] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16> -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> -// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]] <{packed}> : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> +// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16> +// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T2]][%{{.*}}] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]][%{{.*}}] <{packed}> : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> // CHECK-DAG: %[[T4:.*]] = xegpu.dpas %[[T3]], %[[T1]] : vector<8xf16>, vector<16xf16> -> vector<8xf32> -// CHECK-DAG: %[[T5:.*]] = xegpu.create_nd_tdesc %[[ARG2]][%{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32> -// CHECK: xegpu.store_nd %[[T4]], %[[T5]] : vector<8xf32>, !xegpu.tensor_desc<8x16xf32> +// CHECK-DAG: %[[T5:.*]] = xegpu.create_nd_tdesc %[[ARG2]] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32> +// CHECK: xegpu.store_nd %[[T4]], %[[T5]][%{{.*}}] : vector<8xf32>, !xegpu.tensor_desc<8x16xf32> gpu.module @xevm_module{ gpu.func @load_dpas_store(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> - %2 = xegpu.create_nd_tdesc %arg1[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - %3 = xegpu.load_nd %2 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> + %0 = xegpu.create_nd_tdesc %arg0 : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> + %1 = xegpu.load_nd %0[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> + %2 = xegpu.create_nd_tdesc %arg1: memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %3 = xegpu.load_nd %2[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> %4 = xegpu.dpas %1, %3 {layout_result_0 = #xegpu.layout} : vector<8x16xf16>, vector<16x16xf16> -> vector<8x16xf32> - %5 = xegpu.create_nd_tdesc %arg2[%c0, %c0] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> - xegpu.store_nd %4, %5 : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> + %5 = xegpu.create_nd_tdesc %arg2 : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> + xegpu.store_nd %4, %5[%c0, %c0] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> gpu.return } } @@ -125,27 +125,27 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @load_dpas_postop_store // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<8x16xf16>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16x16xf16>, %[[ARG2:[0-9a-zA-Z]+]]: memref<8x16xf32>) { -// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16> -// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T2]] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16> -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%{{.*}}] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> -// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]] <{packed}> : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> +// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16> +// CHECK: %[[T3:.*]] = xegpu.load_nd %[[T2]][%{{.*}}] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]][%{{.*}}] <{packed}> : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> // CHECK-DAG: %[[T4:.*]] = xegpu.dpas %[[T3]], %[[T1]] : vector<8xf16>, vector<16xf16> -> vector<8xf32> // CHECK: %[[T5:.*]] = vector.shape_cast %[[T4]] : vector<8xf32> to vector<8x1xf32> // CHECK: %[[T6:.*]] = math.exp %[[T5]] {{{.*}}} : vector<8x1xf32> // CHECK-DAG: %[[T8:.*]] = vector.shape_cast %[[T6]] : vector<8x1xf32> to vector<8xf32> -// CHECK-DAG: %[[T7:.*]] = xegpu.create_nd_tdesc %[[ARG2]][{{.*}}] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32> -// CHECK: xegpu.store_nd %[[T8]], %[[T7]] : vector<8xf32>, !xegpu.tensor_desc<8x16xf32> +// CHECK-DAG: %[[T7:.*]] = xegpu.create_nd_tdesc %[[ARG2]] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32> +// CHECK: xegpu.store_nd %[[T8]], %[[T7]][{{.*}}] : vector<8xf32>, !xegpu.tensor_desc<8x16xf32> gpu.module @xevm_module{ gpu.func @load_dpas_postop_store(%arg0: memref<8x16xf16>, %arg1: memref<16x16xf16>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> - %2 = xegpu.create_nd_tdesc %arg1[%c0, %c0] : memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - %3 = xegpu.load_nd %2 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> + %0 = xegpu.create_nd_tdesc %arg0 : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> + %1 = xegpu.load_nd %0[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> + %2 = xegpu.create_nd_tdesc %arg1: memref<16x16xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %3 = xegpu.load_nd %2[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> %4 = xegpu.dpas %1, %3 {layout_result_0 = #xegpu.layout} : vector<8x16xf16>, vector<16x16xf16> -> vector<8x16xf32> %5 = math.exp %4 {layout_result_0 = #xegpu.layout} : vector<8x16xf32> - %6 = xegpu.create_nd_tdesc %arg2[%c0, %c0] : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> - xegpu.store_nd %5, %6 : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> + %6 = xegpu.create_nd_tdesc %arg2 : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> + xegpu.store_nd %5, %6[%c0, %c0] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> gpu.return } } @@ -155,17 +155,17 @@ gpu.module @xevm_module{ // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: ui64, %[[ARG1:[0-9a-zA-Z]+]]: ui64, %[[ARG2:[0-9a-zA-Z]+]]: index, // CHECK-SAME: %[[ARG3:[0-9a-zA-Z]+]]: index, %[[ARG4:[0-9a-zA-Z]+]]: index, // CHECK-SAME: %[[ARG5:[0-9a-zA-Z]+]]: index, %[[ARG6:[0-9a-zA-Z]+]]: index, %[[ARG7:[0-9a-zA-Z]+]]: index) { -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][{{.*}}], shape : [%[[ARG2]], %[[ARG3]]], strides : [%[[ARG4]], %[[ARG5]]] : ui64 -> !xegpu.tensor_desc<16x16xf16> -// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> -// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG1]][{{.*}}], shape : [%[[ARG2]], %[[ARG3]]], strides : [%[[ARG4]], %[[ARG5]]] : ui64 -> !xegpu.tensor_desc<16x16xf16> -// CHECK: xegpu.store_nd %[[T1]], %[[T2]] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]], shape : [%[[ARG2]], %[[ARG3]]], strides : [%[[ARG4]], %[[ARG5]]] : ui64 -> !xegpu.tensor_desc<16x16xf16> +// CHECK: %[[T1:.*]] = xegpu.load_nd %[[T0]][{{.*}}] : !xegpu.tensor_desc<16x16xf16> -> vector<16xf16> +// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG1]], shape : [%[[ARG2]], %[[ARG3]]], strides : [%[[ARG4]], %[[ARG5]]] : ui64 -> !xegpu.tensor_desc<16x16xf16> +// CHECK: xegpu.store_nd %[[T1]], %[[T2]][{{.*}}] : vector<16xf16>, !xegpu.tensor_desc<16x16xf16> gpu.module @xevm_module{ gpu.func @create_nd_tdesc_non_memref(%arg0: ui64, %arg1: ui64, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: index, %arg7: index) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0], shape:[%arg2, %arg3], strides:[%arg4, %arg5] : ui64 -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> - %2 = xegpu.create_nd_tdesc %arg1[%c0, %c0], shape:[%arg2, %arg3], strides:[%arg4, %arg5] : ui64 -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - xegpu.store_nd %1, %2 : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0, shape:[%arg2, %arg3], strides:[%arg4, %arg5] : ui64 -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + %1 = xegpu.load_nd %0[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> -> vector<16x16xf16> + %2 = xegpu.create_nd_tdesc %arg1, shape:[%arg2, %arg3], strides:[%arg4, %arg5] : ui64 -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + xegpu.store_nd %1, %2[%c0, %c0] : vector<16x16xf16>, !xegpu.tensor_desc<16x16xf16, #xegpu.layout> gpu.return } } @@ -178,21 +178,21 @@ gpu.module @xevm_module{ // CHECK-DAG: %[[BLOCK_ID_Y:.*]] = gpu.block_id y // CHECK-DAG: %[[Y_COORD:.*]] = arith.muli %[[BLOCK_ID_Y]], %c16 : index // CHECK-DAG: %[[X_COORD:.*]] = arith.muli %[[BLOCK_ID_X]], %c8 : index -// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG2]][%[[X_COORD]], %[[Y_COORD]]] : memref<1024x1024xf32> -> !xegpu.tensor_desc<8x16xf32> -// CHECK-NEXT: %[[T3:.*]] = xegpu.load_nd %[[T2]] : !xegpu.tensor_desc<8x16xf32> -> vector<8xf32> +// CHECK: %[[T2:.*]] = xegpu.create_nd_tdesc %[[ARG2]] : memref<1024x1024xf32> -> !xegpu.tensor_desc<8x16xf32> +// CHECK-NEXT: %[[T3:.*]] = xegpu.load_nd %[[T2]][%[[X_COORD]], %[[Y_COORD]]] : !xegpu.tensor_desc<8x16xf32> -> vector<8xf32> // CHECK-NEXT: %[[T4:.*]] = vector.shape_cast %[[T3]] : vector<8xf32> to vector<8x1xf32> // CHECK: %[[T5:.*]] = scf.for %[[K:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG4:.*]] = %[[T4]]) -> (vector<8x1xf32>) { -// CHECK-DAG: %[[T10:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%[[K]], %[[Y_COORD]]] : memref<1024x1024xbf16> -> !xegpu.tensor_desc<16x16xbf16> -// CHECK-DAG: %[[T11:.*]] = xegpu.load_nd %[[T10]] <{packed}> : !xegpu.tensor_desc<16x16xbf16> -> vector<16xbf16> -// CHECK-DAG: %[[T12:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%[[X_COORD]], %[[K]]] : memref<1024x1024xbf16> -> !xegpu.tensor_desc<8x16xbf16> -// CHECK-DAG: %[[T13:.*]] = xegpu.load_nd %[[T12]] : !xegpu.tensor_desc<8x16xbf16> -> vector<8xbf16> +// CHECK-DAG: %[[T10:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<1024x1024xbf16> -> !xegpu.tensor_desc<16x16xbf16> +// CHECK-DAG: %[[T11:.*]] = xegpu.load_nd %[[T10]][%[[K]], %[[Y_COORD]]] <{packed}> : !xegpu.tensor_desc<16x16xbf16> -> vector<16xbf16> +// CHECK-DAG: %[[T12:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<1024x1024xbf16> -> !xegpu.tensor_desc<8x16xbf16> +// CHECK-DAG: %[[T13:.*]] = xegpu.load_nd %[[T12]][%[[X_COORD]], %[[K]]] : !xegpu.tensor_desc<8x16xbf16> -> vector<8xbf16> // CHECK-DAG: %[[T14:.*]] = vector.shape_cast %[[ARG4]] : vector<8x1xf32> to vector<8xf32> // CHECK-NEXT: %[[T15:.*]] = xegpu.dpas %[[T13]], %[[T11]], %[[T14]] : vector<8xbf16>, vector<16xbf16>, vector<8xf32> -> vector<8xf32> // CHECK-NEXT: %[[T16:.*]] = vector.shape_cast %[[T15]] : vector<8xf32> to vector<8x1xf32> // CHECK-NEXT: scf.yield %[[T16]] : vector<8x1xf32> // CHECK-NEXT: } // CHECK-NEXT: %[[T9:.*]] = vector.shape_cast %[[T5]] : vector<8x1xf32> to vector<8xf32> -// CHECK-NEXT: xegpu.store_nd %[[T9]], %[[T2]] : vector<8xf32>, !xegpu.tensor_desc<8x16xf32> +// CHECK-NEXT: xegpu.store_nd %[[T9]], %[[T2]][%[[X_COORD]], %[[Y_COORD]]] : vector<8xf32>, !xegpu.tensor_desc<8x16xf32> gpu.module @xevm_module{ gpu.func @gemm(%arg0: memref<1024x1024xbf16>, %arg1: memref<1024x1024xbf16>, %arg2: memref<1024x1024xf32>){ %c0 = arith.constant 0 : index @@ -203,91 +203,31 @@ gpu.func @gemm(%arg0: memref<1024x1024xbf16>, %arg1: memref<1024x1024xbf16>, %ar %block_id_y = gpu.block_id y %0 = arith.muli %block_id_x, %c8 : index %1 = arith.muli %block_id_y, %c16 : index - %2 = xegpu.create_nd_tdesc %arg2[%0, %1] : memref<1024x1024xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> - %3 = xegpu.load_nd %2 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf32, #xegpu.layout> -> vector<8x16xf32> + %2 = xegpu.create_nd_tdesc %arg2 : memref<1024x1024xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> + %3 = xegpu.load_nd %2[%0, %1] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf32, #xegpu.layout> -> vector<8x16xf32> %4 = scf.for %arg3 = %c0 to %c1024 step %c16 iter_args(%arg4 = %3) -> (vector<8x16xf32>) { - %5 = xegpu.create_nd_tdesc %arg0[%0, %arg3] : memref<1024x1024xbf16> -> !xegpu.tensor_desc<8x16xbf16, #xegpu.layout> - %6 = xegpu.create_nd_tdesc %arg1[%arg3, %1] : memref<1024x1024xbf16> -> !xegpu.tensor_desc<16x16xbf16, #xegpu.layout> - %7 = xegpu.load_nd %5 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xbf16, #xegpu.layout> -> vector<8x16xbf16> - %8 = xegpu.load_nd %6 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xbf16, #xegpu.layout> -> vector<16x16xbf16> + %5 = xegpu.create_nd_tdesc %arg0: memref<1024x1024xbf16> -> !xegpu.tensor_desc<8x16xbf16, #xegpu.layout> + %6 = xegpu.create_nd_tdesc %arg1 : memref<1024x1024xbf16> -> !xegpu.tensor_desc<16x16xbf16, #xegpu.layout> + %7 = xegpu.load_nd %5[%0, %arg3] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xbf16, #xegpu.layout> -> vector<8x16xbf16> + %8 = xegpu.load_nd %6[%arg3, %1] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x16xbf16, #xegpu.layout> -> vector<16x16xbf16> %9 = xegpu.dpas %7, %8, %arg4 {layout_result_0 = #xegpu.layout} : vector<8x16xbf16>, vector<16x16xbf16>, vector<8x16xf32> -> vector<8x16xf32> scf.yield %9 : vector<8x16xf32> } {layout_result_0 = #xegpu.layout} - xegpu.store_nd %4, %2 : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> + xegpu.store_nd %4, %2[%0, %1] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> gpu.return } } -// ----- -// CHECK-LABEL: gpu.func @update_nd_offset_1d( -// CHECK: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf32>) { -// CHECK: %[[CST:.*]] = arith.constant dense<1.000000e+00> : vector<1xf32> -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<256xf32> -> !xegpu.tensor_desc<16xf32> -// CHECK: %[[T1:.*]] = xegpu.update_nd_offset %[[T0]], [%c32] : !xegpu.tensor_desc<16xf32> -// CHECK: xegpu.store_nd %[[CST]], %[[T1]] : vector<1xf32>, !xegpu.tensor_desc<16xf32> -gpu.module @xevm_module{ - gpu.func @update_nd_offset_1d(%arg0: memref<256xf32>) { - %c0 = arith.constant 0 : index - %c32 = arith.constant 32 : index - %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> : vector<16xf32> - %0 = xegpu.create_nd_tdesc %arg0[%c0] : memref<256xf32> -> !xegpu.tensor_desc<16xf32, #xegpu.layout> - %1 = xegpu.update_nd_offset %0, [%c32] : !xegpu.tensor_desc<16xf32, #xegpu.layout> - xegpu.store_nd %cst, %1 : vector<16xf32>, !xegpu.tensor_desc<16xf32, #xegpu.layout> - gpu.return - } -} - -// ----- -// CHECK-LABEL: gpu.func @update_nd_offset_2d -// CHECK: %[[ARG0:[0-9a-zA-Z]+]]: memref<256x256xf32>) { -// CHECK: %[[CST:.*]] = arith.constant dense<1.000000e+00> : vector<16xf32> -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<256x256xf32> -> !xegpu.tensor_desc<16x16xf32> -// CHECK: %[[T1:.*]] = xegpu.update_nd_offset %[[T0]], [%c32, %c32] : !xegpu.tensor_desc<16x16xf32> -// CHECK: xegpu.store_nd %[[CST]], %[[T1]] : vector<16xf32>, !xegpu.tensor_desc<16x16xf32> -gpu.module @xevm_module{ - gpu.func @update_nd_offset_2d(%arg0: memref<256x256xf32>) { - %c0 = arith.constant 0 : index - %c32 = arith.constant 32 : index - %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> : vector<16x16xf32> - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<256x256xf32> -> !xegpu.tensor_desc<16x16xf32, #xegpu.layout> - %1 = xegpu.update_nd_offset %0, [%c32, %c32] : !xegpu.tensor_desc<16x16xf32, #xegpu.layout> - xegpu.store_nd %cst, %1 : vector<16x16xf32>, !xegpu.tensor_desc<16x16xf32, #xegpu.layout> - gpu.return - } -} - // ----- // CHECK-LABEL: gpu.func @prefetch_2d // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<256x256xf16>) { -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16> -// CHECK: xegpu.prefetch_nd %[[T0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16x16xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16> +// CHECK: xegpu.prefetch_nd %[[T0]][%{{.*}}] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16x16xf16> gpu.module @xevm_module{ gpu.func @prefetch_2d(%arg0: memref<256x256xf16>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - xegpu.prefetch_nd %0 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> - gpu.return - } -} - -// ----- -// Explicitly check that update_nd_offset op's source retain layout when yielded from the warp op (PR150545) -// CHECK-LABEL: gpu.func @check_update_nd_offset_distributed_tensor_desc -// CHECK: %[[W:.*]] = gpu.warp_execute_on_lane_0(%{{.*}})[16] -> -// CHECK-SAME: (!xegpu.tensor_desc<16x16xf32, #xegpu.layout>) { -// CHECK: %[[T0:.*]] = "some_op"() : () -> !xegpu.tensor_desc<16x16xf32, #xegpu.layout> -// CHECK: gpu.yield %[[T0]] : !xegpu.tensor_desc<16x16xf32, #xegpu.layout> -// CHECK: } -// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[W]] : -// CHECK-SAME: !xegpu.tensor_desc<16x16xf32, #xegpu.layout> to !xegpu.tensor_desc<16x16xf32> {resolve_simt_type_mismatch} -// CHECK: xegpu.update_nd_offset %[[T1]], [%{{.*}}] : !xegpu.tensor_desc<16x16xf32> -gpu.module @xevm_module{ - gpu.func @check_update_nd_offset_distributed_tensor_desc() { - %c32 = arith.constant 32 : index - %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> : vector<16x16xf32> - %0 = "some_op"() : () -> !xegpu.tensor_desc<16x16xf32, #xegpu.layout> - %1 = xegpu.update_nd_offset %0, [%c32, %c32] : !xegpu.tensor_desc<16x16xf32, #xegpu.layout> - xegpu.store_nd %cst, %1 : vector<16x16xf32>, !xegpu.tensor_desc<16x16xf32, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0 : memref<256x256xf16> -> !xegpu.tensor_desc<16x16xf16, #xegpu.layout> + xegpu.prefetch_nd %0[%c0, %c0] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16x16xf16, #xegpu.layout> gpu.return } } @@ -295,13 +235,13 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @prefetch_1d // CHECK: (%[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { -// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<256xf16> -> !xegpu.tensor_desc<16xf16> -// CHECK: xegpu.prefetch_nd %[[T0]] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16xf16> +// CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<256xf16> -> !xegpu.tensor_desc<16xf16> +// CHECK: xegpu.prefetch_nd %[[T0]][%{{.*}}] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16xf16> gpu.module @xevm_module{ gpu.func @prefetch_1d(%arg0: memref<256xf16>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0] : memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> - xegpu.prefetch_nd %0 <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16xf16, #xegpu.layout> + %0 = xegpu.create_nd_tdesc %arg0: memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> + xegpu.prefetch_nd %0[%c0] <{l1_hint = #xegpu.cache_hint, l2_hint = #xegpu.cache_hint}> : !xegpu.tensor_desc<16xf16, #xegpu.layout> gpu.return } } @@ -309,18 +249,18 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @gpu_barrier({{.*}}) { // CHECK: %[[T0:.*]] = xegpu.create_nd_tdesc %{{.*}} : memref<256xf16> -> !xegpu.tensor_desc<16xf16> -// CHECK-NEXT: %[[T1:.*]] = xegpu.load_nd %[[T0]] : !xegpu.tensor_desc<16xf16> -> vector<1xf16> +// CHECK-NEXT: %[[T1:.*]] = xegpu.load_nd %[[T0]][{{.*}}] : !xegpu.tensor_desc<16xf16> -> vector<1xf16> // CHECK-NEXT: gpu.barrier // CHECK-NEXT: %[[T2:.*]] = xegpu.create_nd_tdesc %{{.*}} : memref<256xf16> -> !xegpu.tensor_desc<16xf16> -// CHECK-NEXT: xegpu.store_nd %[[T1]], %[[T2]] : vector<1xf16>, !xegpu.tensor_desc<16xf16> +// CHECK-NEXT: xegpu.store_nd %[[T1]], %[[T2]][{{.*}}] : vector<1xf16>, !xegpu.tensor_desc<16xf16> gpu.module @xevm_module{ gpu.func @gpu_barrier(%arg0: memref<256xf16>, %arg1: memref<256xf16>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0] : memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16xf16, #xegpu.layout> -> vector<16xf16> + %0 = xegpu.create_nd_tdesc %arg0 : memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> + %1 = xegpu.load_nd %0[%c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16xf16, #xegpu.layout> -> vector<16xf16> gpu.barrier - %2 = xegpu.create_nd_tdesc %arg1[%c0] : memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> - xegpu.store_nd %1, %2 : vector<16xf16>, !xegpu.tensor_desc<16xf16, #xegpu.layout> + %2 = xegpu.create_nd_tdesc %arg1 : memref<256xf16> -> !xegpu.tensor_desc<16xf16, #xegpu.layout> + xegpu.store_nd %1, %2[%c0] : vector<16xf16>, !xegpu.tensor_desc<16xf16, #xegpu.layout> gpu.return } } @@ -341,6 +281,7 @@ gpu.module @xevm_module{ // CHECK-NEXT: vector.from_elements %[[RED0]], %[[RED1]] : vector<2xf32> gpu.module @xevm_module{ gpu.func @vector_multi_reduction_dim1_distributed_dim0_reduction() { + %c0 = arith.constant 0 : index %0 = "some_def"() : () -> !xegpu.tensor_desc<1x32xf32, #xegpu.layout> %src = "some_def"() {layout_result_0 = #xegpu.layout} : () -> (vector<16x32xf32>) %acc = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [0]>} dense<0.0> : vector<32xf32> @@ -348,7 +289,7 @@ gpu.func @vector_multi_reduction_dim1_distributed_dim0_reduction() { : vector<16x32xf32> to vector<32xf32> %3 = vector.shape_cast %1 {layout_result_0 = #xegpu.layout} : vector<32xf32> to vector<1x32xf32> - xegpu.store_nd %3, %0 : vector<1x32xf32>, !xegpu.tensor_desc<1x32xf32, #xegpu.layout> + xegpu.store_nd %3, %0[%c0, %c0] : vector<1x32xf32>, !xegpu.tensor_desc<1x32xf32, #xegpu.layout> gpu.return } } @@ -367,6 +308,7 @@ gpu.func @vector_multi_reduction_dim1_distributed_dim0_reduction() { // CHECK-REDUCTION-NEXT: vector.from_elements %[[W]]#2, %[[W]]#1 : vector<2xf32> gpu.module @xevm_module{ gpu.func @vector_multi_reduction_dim1_distributed_dim1_reduction() { + %c0 = arith.constant 0 : index %0 = "some_def"() : () -> !xegpu.tensor_desc<2x16xf32, #xegpu.layout> %src = "some_def"() {layout_result_0 = #xegpu.layout} : () -> (vector<2x16xf32>) %acc = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} dense<0.0> : vector<2xf32> @@ -375,7 +317,7 @@ gpu.func @vector_multi_reduction_dim1_distributed_dim1_reduction() { %3 = vector.shape_cast %1 {layout_result_0 = #xegpu.layout} : vector<2xf32> to vector<2x1xf32> %4 = vector.broadcast %3 {layout_result_0 = #xegpu.layout} : vector<2x1xf32> to vector<2x16xf32> - xegpu.store_nd %4, %0 : vector<2x16xf32>, !xegpu.tensor_desc<2x16xf32, #xegpu.layout> + xegpu.store_nd %4, %0[%c0, %c0] : vector<2x16xf32>, !xegpu.tensor_desc<2x16xf32, #xegpu.layout> gpu.return } } @@ -394,6 +336,7 @@ gpu.func @vector_multi_reduction_dim1_distributed_dim1_reduction() { // CHECK-NEXT: vector.from_elements %[[R0]], %[[R1]] : vector<2xf32> gpu.module @xevm_module{ gpu.func @vector_multi_reduction_dim0_distributed_dim1_reduction() { + %c0 = arith.constant 0 : index %0 = "some_def"() : () -> !xegpu.tensor_desc<32x1xf32, #xegpu.layout> %src = "some_def"() {layout_result_0 = #xegpu.layout} : () -> (vector<32x16xf32>) %acc = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} dense<0.0> : vector<32xf32> @@ -401,7 +344,7 @@ gpu.func @vector_multi_reduction_dim0_distributed_dim1_reduction() { : vector<32x16xf32> to vector<32xf32> %3 = vector.shape_cast %1 {layout_result_0 = #xegpu.layout} : vector<32xf32> to vector<32x1xf32> - xegpu.store_nd %3, %0 : vector<32x1xf32>, !xegpu.tensor_desc<32x1xf32, #xegpu.layout> + xegpu.store_nd %3, %0[%c0, %c0] : vector<32x1xf32>, !xegpu.tensor_desc<32x1xf32, #xegpu.layout> gpu.return } } @@ -422,6 +365,7 @@ gpu.func @vector_multi_reduction_dim0_distributed_dim1_reduction() { // CHECK-REDUCTION-NEXT: vector.from_elements %[[W]]#2, %[[W]]#1 : vector<2xf32> gpu.module @xevm_module{ gpu.func @vector_multi_reduction_dim0_distributed_dim0_reduction() { + %c0 = arith.constant 0 : index %0 = "some_def"() : () -> !xegpu.tensor_desc<16x2xf32, #xegpu.layout> %src = "some_def"() {layout_result_0 = #xegpu.layout} : () -> (vector<16x2xf32>) %acc = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [0]>} dense<0.0> : vector<2xf32> @@ -430,7 +374,7 @@ gpu.func @vector_multi_reduction_dim0_distributed_dim0_reduction() { %3 = vector.shape_cast %1 {layout_result_0 = #xegpu.layout} : vector<2xf32> to vector<1x2xf32> %4 = vector.broadcast %3 {layout_result_0 = #xegpu.layout} : vector<1x2xf32> to vector<16x2xf32> - xegpu.store_nd %4, %0 : vector<16x2xf32>, !xegpu.tensor_desc<16x2xf32, #xegpu.layout> + xegpu.store_nd %4, %0[%c0, %c0] : vector<16x2xf32>, !xegpu.tensor_desc<16x2xf32, #xegpu.layout> gpu.return } } @@ -537,9 +481,9 @@ gpu.module @xevm_module{ %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> : vector<16xf16> %ptr = memref.extract_aligned_pointer_as_index %arg0 : memref<256x256xf16> -> index %ptr_i64 = arith.index_cast %ptr : index to i64 - %tdesc = xegpu.create_nd_tdesc %ptr_i64[%c0], shape: [16], strides: [16] : i64 + %tdesc = xegpu.create_nd_tdesc %ptr_i64, shape: [16], strides: [16] : i64 -> !xegpu.tensor_desc<16xf16, #xegpu.layout> - xegpu.store_nd %cst, %tdesc : vector<16xf16>, !xegpu.tensor_desc<16xf16, #xegpu.layout> + xegpu.store_nd %cst, %tdesc[%c0] : vector<16xf16>, !xegpu.tensor_desc<16xf16, #xegpu.layout> gpu.return } } @@ -549,7 +493,7 @@ gpu.module @xevm_module{ // CHECK-LABEL: gpu.func @vector_transpose( // CHECK: %[[CST:.*]] = arith.constant dense<1.000000e+00> : vector<2xf32> // CHECK: %[[DEST:.*]] = xegpu.create_nd_tdesc %{{.*}} : memref<2x16xf32> -> !xegpu.tensor_desc<2x16xf32> -// CHECK: xegpu.store_nd %[[CST]], %[[DEST]] : vector<2xf32>, !xegpu.tensor_desc<2x16xf32> +// CHECK: xegpu.store_nd %[[CST]], %[[DEST]][{{.*}}] : vector<2xf32>, !xegpu.tensor_desc<2x16xf32> gpu.module @xevm_module{ gpu.func @vector_transpose(%arg0: memref<2x16xf32>) { %cst = arith.constant {layout_result_0 = #xegpu.layout} dense<1.000000e+00> @@ -557,9 +501,9 @@ gpu.module @xevm_module{ %c0 = arith.constant 0 : index %transpose = vector.transpose %cst, [1, 0] {layout_result_0 = #xegpu.layout} : vector<16x2xf32> to vector<2x16xf32> - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<2x16xf32> + %0 = xegpu.create_nd_tdesc %arg0 : memref<2x16xf32> -> !xegpu.tensor_desc<2x16xf32, #xegpu.layout> - xegpu.store_nd %transpose, %0 : vector<2x16xf32>, + xegpu.store_nd %transpose, %0[%c0, %c0] : vector<2x16xf32>, !xegpu.tensor_desc<2x16xf32, #xegpu.layout> gpu.return } @@ -570,7 +514,7 @@ gpu.module @xevm_module{ // CHECK: %[[CAST:.*]] = vector.bitcast %{{.*}} : vector<4x2xi8> to vector<4x1xi16> // CHECK-NEXT: %[[DEST:.*]] = xegpu.create_nd_tdesc %{{.*}} : memref<4x16xi16> -> !xegpu.tensor_desc<4x16xi16> // CHECK-NEXT: %[[T0:.*]] = vector.shape_cast %[[CAST]] : vector<4x1xi16> to vector<4xi16> -// CHECK-NEXT: xegpu.store_nd %[[T0]], %[[DEST]] : vector<4xi16>, !xegpu.tensor_desc<4x16xi16> +// CHECK-NEXT: xegpu.store_nd %[[T0]], %[[DEST]][{{.*}}] : vector<4xi16>, !xegpu.tensor_desc<4x16xi16> gpu.module @xevm_module{ gpu.func @vector_bitcast(%arg0: memref<4x16xi16>) { %cst = "some_op"() {layout_result_0 = #xegpu.layout} @@ -578,9 +522,9 @@ gpu.module @xevm_module{ %bitcast = vector.bitcast %cst {layout_result_0 = #xegpu.layout} : vector<4x32xi8> to vector<4x16xi16> %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<4x16xi16> + %0 = xegpu.create_nd_tdesc %arg0 : memref<4x16xi16> -> !xegpu.tensor_desc<4x16xi16, #xegpu.layout> - xegpu.store_nd %bitcast, %0 : vector<4x16xi16>, + xegpu.store_nd %bitcast, %0[%c0, %c0] : vector<4x16xi16>, !xegpu.tensor_desc<4x16xi16, #xegpu.layout> gpu.return } @@ -589,10 +533,10 @@ gpu.module @xevm_module{ // ----- // CHECK-LABEL: gpu.func @mma_transpose_b( // CHECK: %[[ARG0:[0-9a-zA-Z]+]]: memref<8x16xf16>, %[[ARG1:[0-9a-zA-Z]+]]: memref<16x8xi32>, %[[ARG2:[0-9a-zA-Z]+]]: memref<8x16xf32>) { -// CHECK-DAG: %[[ADESC:.*]] = xegpu.create_nd_tdesc %[[ARG0]][%{{.*}}] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16> -// CHECK-DAG: %[[BDESC:.*]] = xegpu.create_nd_tdesc %[[ARG1]][%{{.*}}] : memref<16x8xi32> -> !xegpu.tensor_desc<16x8xi32> -// CHECK-DAG: %[[A:.*]] = xegpu.load_nd %[[ADESC]] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16> -// CHECK-DAG: %[[B:.*]] = xegpu.load_nd %[[BDESC]] <{transpose = array}> : !xegpu.tensor_desc<16x8xi32> -> vector<8xi32> +// CHECK-DAG: %[[ADESC:.*]] = xegpu.create_nd_tdesc %[[ARG0]] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16> +// CHECK-DAG: %[[BDESC:.*]] = xegpu.create_nd_tdesc %[[ARG1]] : memref<16x8xi32> -> !xegpu.tensor_desc<16x8xi32> +// CHECK-DAG: %[[A:.*]] = xegpu.load_nd %[[ADESC]][%{{.*}}] : !xegpu.tensor_desc<8x16xf16> -> vector<8xf16> +// CHECK-DAG: %[[B:.*]] = xegpu.load_nd %[[BDESC]][%{{.*}}] <{transpose = array}> : !xegpu.tensor_desc<16x8xi32> -> vector<8xi32> // CHECK-NEXT: %[[BCAST0:.*]] = vector.shape_cast %[[B]] : vector<8xi32> to vector<1x8xi32> // CHECK-NEXT: %[[BCAST1:.*]] = vector.bitcast %[[BCAST0]] : vector<1x8xi32> to vector<1x16xf16> // CHECK-NEXT: %[[BCAST2:.*]] = vector.shape_cast %[[BCAST1]] : vector<1x16xf16> to vector<16xf16> @@ -600,13 +544,13 @@ gpu.module @xevm_module{ gpu.module @xevm_module{ gpu.func @mma_transpose_b(%arg0: memref<8x16xf16>, %arg1: memref<16x8xi32>, %arg2: memref<8x16xf32>) { %c0 = arith.constant 0 : index - %0 = xegpu.create_nd_tdesc %arg0[%c0, %c0] : memref<8x16xf16> + %0 = xegpu.create_nd_tdesc %arg0 : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16, #xegpu.layout> - %1 = xegpu.load_nd %0 {layout_result_0 = #xegpu.layout} + %1 = xegpu.load_nd %0[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<8x16xf16, #xegpu.layout> -> vector<8x16xf16> - %2 = xegpu.create_nd_tdesc %arg1[%c0, %c0] : memref<16x8xi32> + %2 = xegpu.create_nd_tdesc %arg1 : memref<16x8xi32> -> !xegpu.tensor_desc<16x8xi32, #xegpu.layout> - %3 = xegpu.load_nd %2 {layout_result_0 = #xegpu.layout} + %3 = xegpu.load_nd %2[%c0, %c0] {layout_result_0 = #xegpu.layout} : !xegpu.tensor_desc<16x8xi32, #xegpu.layout> -> vector<16x8xi32> %4 = vector.bitcast %3 {layout_result_0 = #xegpu.layout} : vector<16x8xi32> to vector<16x16xf16> @@ -614,9 +558,9 @@ gpu.module @xevm_module{ : vector<16x16xf16> to vector<16x16xf16> %6 = xegpu.dpas %1, %5 {layout_result_0 = #xegpu.layout} : vector<8x16xf16>, vector<16x16xf16> -> vector<8x16xf32> - %7 = xegpu.create_nd_tdesc %arg2[%c0, %c0] : memref<8x16xf32> + %7 = xegpu.create_nd_tdesc %arg2 : memref<8x16xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.layout> - xegpu.store_nd %6, %7 : vector<8x16xf32>, + xegpu.store_nd %6, %7[%c0, %c0] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32, #xegpu.layout> gpu.return diff --git a/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir b/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir index 547c7355e00c6..b73bc69393dab 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir @@ -7,10 +7,8 @@ gpu.module @test { //CHECK: [[IDY:%.+]] = affine.apply #map()[[[sgId]]] //CHECK: [[c32:%.+]] = arith.constant 32 : index //CHECK: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]] - //CHECK: [[c0:%.+]] = arith.constant 0 : index - //CHECK: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK: [[c128:%.+]] = arith.constant 128 : index - //CHECK: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK: [[MODY:%.+]] = index.remu [[LOCALY]], [[c128]] //CHECK: [[BASE:%.+]] = vector.step : vector<32xindex> //CHECK: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex> @@ -23,10 +21,8 @@ gpu.module @test { //CHECK: [[IDY:%.+]] = affine.apply #map()[[[sgId]]] //CHECK: [[c32:%.+]] = arith.constant 32 : index //CHECK: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]] - //CHECK: [[c0:%.+]] = arith.constant 0 : index - //CHECK: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK: [[c128:%.+]] = arith.constant 128 : index - //CHECK: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK: [[MODY:%.+]] = index.remu [[LOCALY]], [[c128]] //CHECK: [[BASE:%.+]] = vector.step : vector<32xindex> //CHECK: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex> diff --git a/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns-no-desc-offsets.mlir b/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns-no-desc-offsets.mlir new file mode 100644 index 0000000000000..6eee5a544e3f8 --- /dev/null +++ b/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns-no-desc-offsets.mlir @@ -0,0 +1,61 @@ +// RUN: mlir-opt --test-xegpu-unrolling-patterns -split-input-file %s | FileCheck %s + +gpu.module @xevm_test { + + // CHECK-LABEL: create_nd_tdesc + // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32> + // CHECK: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32> + // CHECK: [[cast:%.+]] = builtin.unrealized_conversion_cast + // CHECK-SAME: !xegpu.tensor_desc<8x16xf32> + // CHECK-SAME: to !xegpu.tensor_desc<24x32xf32, #xegpu.layout> {__xegpu_blocking_tile_shape__ = array, __xegpu_blocking_unpack__} + gpu.func @create_nd_tdesc(%src: memref<24x32xf32>) -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout> { + %tdesc = xegpu.create_nd_tdesc %src : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + gpu.return %tdesc : !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + } + +//----- + // CHECK-LABEL: load_nd + // CHECK-SAME: [[arg0:%.+]]: memref<256x318xf32> + // CHECK: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]] : memref<256x318xf32> -> !xegpu.tensor_desc<8x16xf32> + // CHECK-COUNT-6: [[ld:%.+]] = xegpu.load_nd {{.*}}[{{.*}}] : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32> + // CHECK-COUNT-6: [[insert:%.+]] = vector.insert_strided_slice {{.*}} : vector<8x16xf32> into vector<24x32xf32> + gpu.func @load_nd(%src: memref<256x318xf32>) -> vector<24x32xf32> { + %tdesc = xegpu.create_nd_tdesc %src : memref<256x318xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + %ld = xegpu.load_nd %tdesc[8, 16]: !xegpu.tensor_desc<24x32xf32, #xegpu.layout> -> vector<24x32xf32> + gpu.return %ld : vector<24x32xf32> + } + +//----- + // CHECK-LABEL: load_nd_store_nd + // CHECK-SAME: [[arg0:%.+]]: memref<256x318xf32> + // CHECK: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]] : memref<256x318xf32> -> !xegpu.tensor_desc<8x16xf32> + // CHECK-COUNT-6: [[data:%.+]] = xegpu.load_nd {{.*}}[{{.*}}] : !xegpu.tensor_desc<8x16xf32> -> vector<8x16xf32> + // CHECK-COUNT-6: xegpu.store_nd {{.*}}[{{.*}}] : vector<8x16xf32>, !xegpu.tensor_desc<8x16xf32> + gpu.func @load_nd_store_nd(%src: memref<256x318xf32>) { + %tdesc = xegpu.create_nd_tdesc %src : memref<256x318xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + %ld = xegpu.load_nd %tdesc[8, 16]: !xegpu.tensor_desc<24x32xf32, #xegpu.layout> -> vector<24x32xf32> + xegpu.store_nd %ld, %tdesc[0, 0] : vector<24x32xf32>, !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + gpu.return + } + +//----- + // CHECK-LABEL: prefetch_nd_tdesc + // CHECK-SAME: [[arg0:%.+]]: memref<24x32xf32> + // CHECK: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32> + // CHECK-COUNT-6: xegpu.prefetch_nd {{.*}}[{{.*}}] : !xegpu.tensor_desc<8x16xf32> + gpu.func @prefetch_nd_tdesc(%src: memref<24x32xf32>) { + %tdesc = xegpu.create_nd_tdesc %src : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + xegpu.prefetch_nd %tdesc[8, 16] : !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + gpu.return + } + +//----- + + // CHECK-LABEL: load_nd_offsets_at_both_places + // CHECK-COUNT-2: builtin.unrealized_conversion_cast + gpu.func @load_nd_offsets_at_both_places(%src: memref<256x318xf32>) -> vector<24x32xf32> { + %tdesc = xegpu.create_nd_tdesc %src[16, 8] : memref<256x318xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout> + %ld = xegpu.load_nd %tdesc[8, 16]: !xegpu.tensor_desc<24x32xf32, #xegpu.layout> -> vector<24x32xf32> + gpu.return %ld : vector<24x32xf32> + } +} diff --git a/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir b/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir index 6999da5d222fe..dbc52b8a98894 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir @@ -210,6 +210,27 @@ gpu.module @test { gpu.return %ld : vector<32xf32> } +//----- + + + // CHECK-LABEL: load_with_offsets + // CHECK-SAME: [[arg0:%.+]]: ui64 + // CHECK-COUNT-2: xegpu.load {{.*}}[{{.*}}], {{.*}} <{chunk_size = 1 : i64, l1_hint = #xegpu.cache_hint}> : ui64, vector<16xindex>, vector<16xi1> -> vector<16xf32> + gpu.func @load_with_offsets(%src: ui64) -> vector<32xf32> { + %cst = arith.constant dense<[ + 0, 8, 16, 24, 32, 40, 48, 56, + 64, 72, 80, 88, 96, 104, 112, 120, + 128, 136, 144, 152, 160, 168, 176, 184, + 192, 200, 208, 216, 224, 232, 240, 248 + ]> : vector<32xindex> + + %c17 = arith.constant 17: index + %mask = vector.create_mask %c17: vector<32xi1> + %ld = xegpu.load %src[%cst], %mask {chunk_size = 1, layout_result_0 = #xegpu.layout, l1_hint = #xegpu.cache_hint} : ui64, vector<32xindex>, vector<32xi1> -> vector<32xf32> + + gpu.return %ld : vector<32xf32> + } + //----- // CHECK-LABEL: prefetch @@ -254,6 +275,28 @@ gpu.module @test { gpu.return } + + //----- + + // CHECK-LABEL: store_with_offsets + // CHECK-SAME: [[arg0:%.+]]: ui64 + // CHECK-COUNT-2: xegpu.store {{.*}}[{{.*}}], {{.*}} <{chunk_size = 1 : i64, l1_hint = #xegpu.cache_hint}> : vector<16xf32>, ui64, vector<16xindex>, vector<16xi1> + gpu.func @store_with_offsets(%src: ui64) { + %cst = arith.constant dense<[ + 0, 8, 16, 24, 32, 40, 48, 56, + 64, 72, 80, 88, 96, 104, 112, 120, + 128, 136, 144, 152, 160, 168, 176, 184, + 192, 200, 208, 216, 224, 232, 240, 248 + ]> : vector<32xindex> + + %c17 = arith.constant 17: index + %mask = vector.create_mask %c17: vector<32xi1> + + %st_vec = arith.constant dense<1023.0>: vector<32xf32> + xegpu.store %st_vec, %src[%cst], %mask {chunk_size = 1, layout = #xegpu.layout, l1_hint = #xegpu.cache_hint} : vector<32xf32>, ui64, vector<32xindex>, vector<32xi1> + + gpu.return + } //----- // CHECK-LABEL: create_tdesc_step_chunk @@ -319,6 +362,29 @@ gpu.module @test { gpu.return %ld : vector<32x4xf32> } +//----- + // CHECK-LABEL: load_with_offsets_chunk + // CHECK-SAME: [[arg0:%.+]]: ui64 + // CHECK: [[cst:%.+]] = arith.constant dense<0.000000e+00> : vector<32x4xf32> + // CHECK: [[cst0:%.+]] = arith.constant dense<[130, 138, 146, 154, 162, 170, 178, 186, 194, 202, 210, 218, 226, 234, 242, 250]> : vector<16xindex> + // CHECK: [[cst1:%.+]] = arith.constant dense<[2, 10, 18, 26, 34, 42, 50, 58, 66, 74, 82, 90, 98, 106, 114, 122]> : vector<16xindex> + // CHECK: [[cst2:%.+]] = arith.constant dense<[128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248]> : vector<16xindex> + // CHECK: [[cst3:%.+]] = arith.constant dense<[0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120]> : vector<16xindex> + // CHECK-COUNT-4: xegpu.load {{.*}}[{{.*}}], {{.*}} <{chunk_size = 2 : i64, l1_hint = #xegpu.cache_hint}> : ui64, vector<16xindex>, vector<16xi1> -> vector<16x2xf32> + gpu.func @load_with_offsets_chunk(%src: ui64) -> vector<32x4xf32> { + %cst = arith.constant dense<[ + 0, 8, 16, 24, 32, 40, 48, 56, + 64, 72, 80, 88, 96, 104, 112, 120, + 128, 136, 144, 152, 160, 168, 176, 184, + 192, 200, 208, 216, 224, 232, 240, 248 + ]> : vector<32xindex> + + %c17 = arith.constant 17: index + %mask = vector.create_mask %c17: vector<32xi1> + %ld = xegpu.load %src[%cst], %mask {chunk_size = 4, layout_result_0 = #xegpu.layout, l1_hint = #xegpu.cache_hint} : ui64, vector<32xindex>, vector<32xi1> -> vector<32x4xf32> + gpu.return %ld : vector<32x4xf32> + } + //----- // CHECK-LABEL: store_chunk // CHECK-SAME: [[arg0:%.+]]: ui64 @@ -342,6 +408,31 @@ gpu.module @test { gpu.return } +//----- + // CHECK-LABEL: store_with_offsets_chunk + // CHECK-SAME: [[arg0:%.+]]: ui64 + // CHECK: [[cst:%.+]] = arith.constant dense<1.023000e+03> : vector<16x2xf32 + // CHECK: [[cst0:%.+]] = arith.constant dense<[130, 138, 146, 154, 162, 170, 178, 186, 194, 202, 210, 218, 226, 234, 242, 250]> : vector<16xindex> + // CHECK: [[cst1:%.+]] = arith.constant dense<[2, 10, 18, 26, 34, 42, 50, 58, 66, 74, 82, 90, 98, 106, 114, 122]> : vector<16xindex> + // CHECK: [[cst2:%.+]] = arith.constant dense<[128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248]> : vector<16xindex> + // CHECK: [[cst3:%.+]] = arith.constant dense<[0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120]> : vector<16xindex> + // CHECK-COUNT-4: xegpu.store {{.*}}[{{.*}}], {{.*}} <{chunk_size = 2 : i64, l1_hint = #xegpu.cache_hint}> : vector<16x2xf32>, ui64, vector<16xindex>, vector<16xi1> + gpu.func @store_with_offsets_chunk(%src: ui64) { + %cst = arith.constant dense<[ + 0, 8, 16, 24, 32, 40, 48, 56, + 64, 72, 80, 88, 96, 104, 112, 120, + 128, 136, 144, 152, 160, 168, 176, 184, + 192, 200, 208, 216, 224, 232, 240, 248 + ]> : vector<32xindex> + + %c17 = arith.constant 17: index + %mask = vector.create_mask %c17: vector<32xi1> + + %st_vec = arith.constant dense<1023.>: vector<32x4xf32> + xegpu.store %st_vec, %src[%cst], %mask {chunk_size = 4, layout = #xegpu.layout, l1_hint = #xegpu.cache_hint} : vector<32x4xf32>, ui64, vector<32xindex>, vector<32xi1> + gpu.return + } + //----- // CHECK-LABEL: prefetch_chunk // CHECK-SAME: [[arg0:%.+]]: ui64 diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir index e5cc65e6bd3d7..d2d250cbe0f66 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir @@ -27,12 +27,10 @@ gpu.module @test_round_robin_assignment { //CHECK: [[LX:%.+]] = index.mul [[IdX]], [[C64]] //CHECK: [[C0:%.+]] = arith.constant 0 : index //CHECK: [[C0_1:%.+]] = arith.constant 0 : index - //CHECK: [[ADDY:%.+]] = arith.addi [[LY]], [[C0]] : index - //CHECK: [[ADDX:%.+]] = arith.addi [[LX]], [[C0_1]] : index //CHECK: [[C128:%.+]] = arith.constant 128 : index - //CHECK: [[offY:%.+]] = index.remu [[ADDY]], [[C128]] + //CHECK: [[offY:%.+]] = index.remu [[LY]], [[C128]] //CHECK: [[C64_2:%.+]] = arith.constant 64 : index - //CHECK: [[offX:%.+]] = index.remu [[ADDX]], [[C64_2]] + //CHECK: [[offX:%.+]] = index.remu [[LX]], [[C64_2]] //CHECK: xegpu.create_nd_tdesc [[ARG_0]][[[offY]], [[offX]]] : memref<256x128xf32> -> !xegpu.tensor_desc<16x64xf32> %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32> -> !xegpu.tensor_desc<128x64xf32, #xegpu.layout> diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops-rr.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops-rr.mlir index 6ff7a94d678a3..dce73dee507e1 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops-rr.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops-rr.mlir @@ -82,4 +82,20 @@ gpu.module @test_distribution { : vector<256x128xf16>, vector<128x256xf16> -> vector<256x256xf32> gpu.return } + + // CHECK-LABEL: vector_reduce_dim_1 + gpu.func @vector_reduce_dim_1(%src: memref<256x64xf32>) { + // CHECK: %[[CST:.*]] = arith.constant dense<1.000000e+00> : vector<16xf32> + %cst = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} dense<1.0> : vector<256xf32> + %tdesc = xegpu.create_nd_tdesc %src : memref<256x64xf32> + -> !xegpu.tensor_desc<256x64xf32, #xegpu.layout> + %load = xegpu.load_nd %tdesc[0, 0] + : !xegpu.tensor_desc<256x64xf32, #xegpu.layout> + -> vector<256x64xf32> + // CHECK-COUNT-2: vector.multi_reduction , {{.*}}, %[[CST]] [1] : vector<16x64xf32> to vector<16xf32> + // CHECK-NOT: vector.multi_reduction + %reduce = vector.multi_reduction , %load, %cst {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} [1] + : vector<256x64xf32> to vector<256xf32> + gpu.return + } } diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir index 3478a9b91da5f..38392fd10b742 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir @@ -282,15 +282,20 @@ gpu.module @test_distribution { // CHECK-LABEL: @store_scatter // CHECK-SAME: %[[ARG0:.*]]: memref<256xf16> gpu.func @store_scatter(%dest : memref<256xf16>) { - // CHECK: %[[VAL:.*]] = arith.constant dense<2.550000e+01> : vector<8xf16> - // CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<8xindex> - // CHECK: %[[MASK:.*]] = arith.constant dense : vector<8xi1> + // CHECK: %[[VAL:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<2.550000e+01> : vector<8xf16> + // CHECK: %[[CST:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense<0> : vector<8xindex> + // CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<8xi1> // CHECK: xegpu.store %[[VAL]], %[[ARG0]][%[[CST]]], %[[MASK]] <{chunk_size = 1 : i64, l1_hint = #xegpu.cache_hint}> + // CHECK-SAME: {layout_operand_0 = #xegpu.layout, layout_operand_2 = #xegpu.layout, + // CHECK-SAME: layout_operand_3 = #xegpu.layout} // CHECK-SAME: : vector<8xf16>, memref<256xf16>, vector<8xindex>, vector<8xi1> - %val = arith.constant {layout_result_0 = #xegpu.layout} dense<25.5> : vector<256xf16> - %offset = arith.constant {layout_result_0 = #xegpu.layout} dense<0> : vector<256xindex> - %mask = arith.constant {layout_result_0 = #xegpu.layout} dense<1> : vector<256xi1> - xegpu.store %val, %dest[%offset], %mask {chunk_size = 1, layout = #xegpu.layout, l1_hint = #xegpu.cache_hint} + %val = arith.constant {layout_result_0 = #xegpu.layout} dense<25.5> : vector<256xf16> + %offset = arith.constant {layout_result_0 = #xegpu.layout} dense<0> : vector<256xindex> + %mask = arith.constant {layout_result_0 = #xegpu.layout} dense<1> : vector<256xi1> + xegpu.store %val, %dest[%offset], %mask {chunk_size = 1, layout_operand_0 = #xegpu.layout, + layout_operand_2 = #xegpu.layout, + layout_operand_3 = #xegpu.layout, + l1_hint = #xegpu.cache_hint} : vector<256xf16>, memref<256xf16>, vector<256xindex>, vector<256xi1> gpu.return } @@ -325,12 +330,10 @@ gpu.module @test_distribution { //CHECK: [[l_off_x:%.+]] = index.mul [[id_x]], [[c32_1]] //CHECK: [[c0:%.+]] = arith.constant 0 : index //CHECK: [[c0_1:%.+]] = arith.constant 0 : index - //CHECK: [[l_off_y_0:%.+]] = arith.addi [[l_off_y]], [[c0]] : index - //CHECK: [[l_off_x_0:%.+]] = arith.addi [[l_off_x]], [[c0_1]] : index //CHECK: [[c64:%.+]] = arith.constant 64 : index - //CHECK: [[off_y:%.+]] = index.remu [[l_off_y_0]], [[c64]] + //CHECK: [[off_y:%.+]] = index.remu [[l_off_y]], [[c64]] //CHECK: [[c128:%.+]] = arith.constant 128 : index - //CHECK: [[off_x:%.+]] = index.remu [[l_off_x_0]], [[c128]] + //CHECK: [[off_x:%.+]] = index.remu [[l_off_x]], [[c128]] //CHECK: xegpu.load_matrix [[mdesc]][[[off_y]], [[off_x]]] <{layout = #xegpu.layout}>: !xegpu.mem_desc<64x128xf32>, index, index -> vector<32x32xf32> %0 = xegpu.create_mem_desc %arg0 : memref<32768xi8, 3> -> !xegpu.mem_desc<64x128xf32> %1 = xegpu.load_matrix %0[0, 0] <{layout = #xegpu.layout}>: !xegpu.mem_desc<64x128xf32> -> vector<64x128xf32> @@ -349,13 +352,11 @@ gpu.module @test_distribution { //CHECK: [[id_y:%.+]] = affine.apply #map()[[[sgid]]] //CHECK: [[id_x:%.+]] = affine.apply #map1()[[[sgid]]] //CHECK: [[c32:%.+]] = arith.constant 32 : index - //CHECK: [[l_off_y_0:%.+]] = index.mul [[id_y]], [[c32]] + //CHECK: [[l_off_y:%.+]] = index.mul [[id_y]], [[c32]] //CHECK: [[c32_1:%.+]] = arith.constant 32 : index - //CHECK: [[l_off_x_0:%.+]] = index.mul [[id_x]], [[c32_1]] + //CHECK: [[l_off_x:%.+]] = index.mul [[id_x]], [[c32_1]] //CHECK: [[c0:%.+]] = arith.constant 0 : index //CHECK: [[c0_2:%.+]] = arith.constant 0 : index - //CHECK: [[l_off_y:%.+]] = arith.addi [[l_off_y_0]], [[c0]] : index - //CHECK: [[l_off_x:%.+]] = arith.addi [[l_off_x_0]], [[c0_2]] : index //CHECK: [[c64:%.+]] = arith.constant 64 : index //CHECK: [[off_y:%.+]] = index.remu [[l_off_y]], [[c64]] //CHECK: [[c128:%.+]] = arith.constant 128 : index @@ -367,16 +368,55 @@ gpu.module @test_distribution { gpu.return } + // CHECK-LABEL: @vector_reduce_dim_0 + gpu.func @vector_reduce_dim_0(%src: memref<4x128xf32>) { + %cst = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [0]>} dense<1.0> : vector<128xf32> + %tdesc = xegpu.create_nd_tdesc %src : memref<4x128xf32> + -> !xegpu.tensor_desc<4x128xf32, #xegpu.layout> + %load = xegpu.load_nd %tdesc[0, 0] + : !xegpu.tensor_desc<4x128xf32, #xegpu.layout> + -> vector<4x128xf32> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [0] : vector<4x4xf32> to vector<4xf32> + %reduce = vector.multi_reduction , %load, %cst {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [0]>} [0] + : vector<4x128xf32> to vector<128xf32> + gpu.return + } + + // CHECK-LABEL: @vector_reduce_dim_1 + gpu.func @vector_reduce_dim_1(%src: memref<256x64xf32>) { + %cst = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} dense<1.0> : vector<256xf32> + %tdesc = xegpu.create_nd_tdesc %src : memref<256x64xf32> + -> !xegpu.tensor_desc<256x64xf32, #xegpu.layout> + %load = xegpu.load_nd %tdesc[0, 0] + : !xegpu.tensor_desc<256x64xf32, #xegpu.layout> + -> vector<256x64xf32> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<16x64xf32> to vector<16xf32> + %reduce = vector.multi_reduction , %load, %cst {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [1]>} [1] + : vector<256x64xf32> to vector<256xf32> + gpu.return + } + + // CHECK-LABEL: @vector_reduce_4D + gpu.func @vector_reduce_4D(%src: ui64) { + %cst_acc = arith.constant {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [3]>} dense<0.0> : vector<4x2x6xf16> + %offset = arith.constant {layout_result_0 = #xegpu.layout} dense<0> : vector<4x2x6x32xindex> + %mask = arith.constant {layout_result_0 = #xegpu.layout} dense : vector<4x2x6x32xi1> + %load = xegpu.load %src[%offset], %mask {layout_result_0 = #xegpu.layout} : ui64, vector<4x2x6x32xindex>, vector<4x2x6x32xi1> -> vector<4x2x6x32xf16> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [3] : vector<1x1x1x32xf16> to vector<1x1x1xf16> + %reduce = vector.multi_reduction , %load, %cst_acc {layout_result_0 = #xegpu.slice<#xegpu.layout, dims = [3]>} [3] + : vector<4x2x6x32xf16> to vector<4x2x6xf16> + gpu.return + } + // CHECK-LABEL: vector_step_op gpu.func @vector_step_op_slice_attr() { //CHECK: [[sgId:%.+]] = gpu.subgroup_id : index //CHECK-DAG: [[IDY:%.+]] = affine.apply #map2()[[[sgId]]] //CHECK-DAG: [[c32:%.+]] = arith.constant 32 : index - //CHECK-DAG: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]] + //CHECK-DAG: [[LY:%.+]] = index.mul [[IDY]], [[c32]] //CHECK-DAG: [[c0:%.+]] = arith.constant 0 : index - //CHECK-DAG: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK-DAG: [[c128:%.+]] = arith.constant 128 : index - //CHECK-DAG: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK-DAG: [[MODY:%.+]] = index.remu [[LY]], [[c128]] //CHECK-DAG: [[BASE:%.+]] = vector.step : vector<32xindex> //CHECK-DAG: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex> @@ -390,9 +430,8 @@ gpu.module @test_distribution { //CHECK-DAG: [[c8:%.+]] = arith.constant 8 : index //CHECK-DAG: [[LOCALY:%.+]] = index.mul [[sgId]], [[c8]] //CHECK-DAG: [[c0:%.+]] = arith.constant 0 : index - //CHECK-DAG: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK-DAG: [[c128:%.+]] = arith.constant 128 : index - //CHECK-DAG: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK-DAG: [[MODY:%.+]] = index.remu [[LOCALY]], [[c128]] //CHECK-DAG: [[BASE:%.+]] = vector.step : vector<8xindex> //CHECK-DAG: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<8xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<8xindex> diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir index c0fb373835e3d..e83229e3a3995 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir @@ -14,12 +14,10 @@ gpu.module @test_1_1_assignment { //CHECK: [[LX:%.+]] = index.mul [[SGIDX]], [[C32]] //CHECK: [[C0:%.+]] = arith.constant 0 : index //CHECK: [[C0_1:%.+]] = arith.constant 0 : index - //CHECK: [[UY:%.+]] = arith.addi [[LY]], [[C0]] : index - //CHECK: [[UX:%.+]] = arith.addi [[LX]], [[C0_1]] : index //CHECK: [[C256:%.+]] = arith.constant 256 : index - //CHECK: [[Y:%.+]] = index.remu [[UY]], [[C256]] + //CHECK: [[Y:%.+]] = index.remu [[LY]], [[C256]] //CHECK: [[C128:%.+]] = arith.constant 128 : index - //CHECK: [[X:%.+]] = index.remu [[UX]], [[C128]] + //CHECK: [[X:%.+]] = index.remu [[LX]], [[C128]] //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][[[Y]], [[X]]] : memref<256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout> %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout> @@ -37,17 +35,13 @@ gpu.module @test_1_1_assignment { //CHECK: [[LX:%.+]] = index.mul [[SGIDX]], [[C32]] //CHECK: [[C0:%.+]] = arith.constant 0 : index //CHECK: [[C0_2:%.+]] = arith.constant 0 : index - //CHECK: [[UY:%.+]] = arith.addi [[LY]], [[C0]] : index - //CHECK: [[UX:%.+]] = arith.addi [[LX]], [[C0_2]] : index //CHECK: [[C256:%.+]] = arith.constant 256 : index - //CHECK: [[MODY:%.+]] = index.remu [[UY]], [[C256]] + //CHECK: [[MODY:%.+]] = index.remu [[LY]], [[C256]] //CHECK: [[C128:%.+]] = arith.constant 128 : index - //CHECK: [[MODX:%.+]] = index.remu [[UX]], [[C128]] + //CHECK: [[MODX:%.+]] = index.remu [[LX]], [[C128]] //CHECK: [[C0_3:%.+]] = arith.constant 0 : index - //CHECK: [[Y:%.+]] = index.add [[MODY]], [[C0_3]] //CHECK: [[C0_4:%.+]] = arith.constant 0 : index - //CHECK: [[X:%.+]] = index.add [[MODX]], [[C0_4]] - //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][1, [[Y]], [[X]]] : memref<3x256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout> + //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][1, [[MODY]], [[MODX]]] : memref<3x256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout> %tdesc = xegpu.create_nd_tdesc %src[1, 0, 0] : memref<3x256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout> gpu.return diff --git a/mlir/test/Examples/standalone/lit.local.cfg b/mlir/test/Examples/standalone/lit.local.cfg index 3b12dcbd99e83..6cf89358f8992 100644 --- a/mlir/test/Examples/standalone/lit.local.cfg +++ b/mlir/test/Examples/standalone/lit.local.cfg @@ -7,7 +7,15 @@ config.substitutions.append(("%cmake_exe", config.host_cmake)) config.substitutions.append(("%cmake_generator", config.host_cmake_generator)) config.substitutions.append(("%host_cxx", config.host_cxx)) config.substitutions.append(("%host_cc", config.host_cc)) +config.substitutions.append(("%hostc_compiler_launcher", config.host_c_compiler_launcher)) +config.substitutions.append(("%hostcxx_compiler_launcher", config.host_cxx_compiler_launcher)) config.substitutions.append(("%enable_libcxx", config.enable_libcxx)) config.substitutions.append(("%mlir_cmake_dir", config.mlir_cmake_dir)) +config.substitutions.append(("%mlir_obj_root", config.mlir_obj_root)) config.substitutions.append(("%llvm_use_linker", config.llvm_use_linker)) config.substitutions.append(("%cmake_build_type", config.cmake_build_type)) + +if not config.llvm_shared_libs_build: + config.available_features.add("non-shared-libs-build") +if config.enable_bindings_python: + config.available_features.add("bindings-python") diff --git a/mlir/test/Examples/standalone/test.wheel.toy b/mlir/test/Examples/standalone/test.wheel.toy new file mode 100644 index 0000000000000..5ff927129793b --- /dev/null +++ b/mlir/test/Examples/standalone/test.wheel.toy @@ -0,0 +1,33 @@ +# There's no real issue with windows here, it's just that some CMake generated paths for targets end up being longer +# than 255 chars when combined with the fact that pip wants to install into a tmp directory buried under +# C/Users/ContainerAdministrator/AppData/Local/Temp. +# UNSUPPORTED: target={{.*(windows).*}} +# REQUIRES: non-shared-libs-build +# REQUIRES: bindings-python + +# RUN: export CMAKE_BUILD_TYPE=%cmake_build_type +# RUN: export CMAKE_CXX_COMPILER=%host_cxx +# RUN: export CMAKE_CXX_COMPILER_LAUNCHER=%hostcxx_compiler_launcher +# RUN: export CMAKE_C_COMPILER=%host_cc +# RUN: export CMAKE_C_COMPILER_LAUNCHER=%hostc_compiler_launcher +# RUN: export CMAKE_GENERATOR=%cmake_generator +# RUN: export LLVM_USE_LINKER=%llvm_use_linker +# RUN: export MLIR_DIR="%mlir_cmake_dir" + +# RUN: %python -m pip wheel "%mlir_src_root/examples/standalone" -w "%mlir_obj_root/wheelhouse" -v | tee %t + +# RUN: rm -rf "%mlir_obj_root/standalone-python-bindings-install" +# RUN: %python -m pip install standalone_python_bindings -f "%mlir_obj_root/wheelhouse" --target "%mlir_obj_root/standalone-python-bindings-install" -v | tee -a %t + +# RUN: export PYTHONPATH="%mlir_obj_root/standalone-python-bindings-install" +# RUN: %python "%mlir_src_root/examples/standalone/test/python/smoketest.py" nanobind | tee -a %t + +# RUN: FileCheck --input-file=%t %s + +# CHECK: Successfully built standalone-python-bindings + +# CHECK: module { +# CHECK: %[[C2:.*]] = arith.constant 2 : i32 +# CHECK: %[[V0:.*]] = standalone.foo %[[C2]] : i32 +# CHECK: } + diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir index f7d79a304acb0..6192ed345debf 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/pack-unpack-mmt4d.mlir @@ -2,7 +2,7 @@ // DEFINE: -transform-interpreter -test-transform-dialect-erase-schedule \ // DEFINE: -cse -canonicalize -test-lower-to-llvm // DEFINE: %{entry_point} = main -// DEFINE: %{run} = mlir-runner -e %{entry_point} -entry-point-result=void \ +// DEFINE: %{run} = %mcr_aarch64_cmd -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\ // DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils // RUN: %{compile} | %{run} | FileCheck %s diff --git a/mlir/test/Target/LLVMIR/Import/debug-info.ll b/mlir/test/Target/LLVMIR/Import/debug-info.ll index 9e2a17fb436af..e056e43a0982c 100644 --- a/mlir/test/Target/LLVMIR/Import/debug-info.ll +++ b/mlir/test/Target/LLVMIR/Import/debug-info.ll @@ -215,7 +215,7 @@ define void @composite_type() !dbg !3 { ; // ----- ; CHECK-DAG: #[[FILE:.+]] = #llvm.di_file<"debug-info.ll" in "/"> -; CHECK-DAG: #[[CU:.+]] = #llvm.di_compile_unit, sourceLanguage = DW_LANG_C, file = #[[FILE]], isOptimized = false, emissionKind = None, nameTableKind = None> +; CHECK-DAG: #[[CU:.+]] = #llvm.di_compile_unit, sourceLanguage = DW_LANG_C, file = #[[FILE]], isOptimized = false, emissionKind = None, nameTableKind = None, splitDebugFilename = "test.dwo"> ; Verify an empty subroutine types list is supported. ; CHECK-DAG: #[[SP_TYPE:.+]] = #llvm.di_subroutine_type ; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram, compileUnit = #[[CU]], scope = #[[FILE]], name = "subprogram", linkageName = "subprogram", file = #[[FILE]], line = 42, scopeLine = 42, subprogramFlags = Definition, type = #[[SP_TYPE]]> @@ -227,7 +227,7 @@ define void @subprogram() !dbg !3 { !llvm.dbg.cu = !{!1} !llvm.module.flags = !{!0} !0 = !{i32 2, !"Debug Info Version", i32 3} -!1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, nameTableKind: None) +!1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, nameTableKind: None, splitDebugFilename: "test.dwo") !2 = !DIFile(filename: "debug-info.ll", directory: "/") !3 = distinct !DISubprogram(name: "subprogram", linkageName: "subprogram", scope: !2, file: !2, line: 42, scopeLine: 42, spFlags: DISPFlagDefinition, unit: !1, type: !4) !4 = !DISubroutineType(cc: DW_CC_normal, types: !5) diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll index 5e913691a59b0..d2bb80982bb3d 100644 --- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll +++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll @@ -733,12 +733,12 @@ define void @assume(i1 %true) { } ; CHECK-LABEL: @assume_with_opbundles -; CHECK-SAME: %[[TRUE:[a-zA-Z0-9]+]] ; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]] -define void @assume_with_opbundles(i1 %true, ptr %p) { +define void @assume_with_opbundles(ptr %p) { + ; CHECK: %[[TRUE:.+]] = llvm.mlir.constant(true) : i1 ; CHECK: %[[ALIGN:.+]] = llvm.mlir.constant(8 : i32) : i32 ; CHECK: llvm.intr.assume %[[TRUE]] ["align"(%[[PTR]], %[[ALIGN]] : !llvm.ptr, i32)] : i1 - call void @llvm.assume(i1 %true) ["align"(ptr %p, i32 8)] + call void @llvm.assume(i1 true) ["align"(ptr %p, i32 8)] ret void } @@ -829,7 +829,7 @@ define void @coro_suspend(i32 %0, i1 %1, ptr %2) { ; CHECK-LABEL: llvm.func @coro_end define void @coro_end(ptr %0, i1 %1) { ; CHECK: llvm.intr.coro.end - call i1 @llvm.coro.end(ptr %0, i1 %1, token none) + call void @llvm.coro.end(ptr %0, i1 %1, token none) ret void } @@ -1296,7 +1296,7 @@ declare i64 @llvm.coro.align.i64() declare i32 @llvm.coro.align.i32() declare token @llvm.coro.save(ptr) declare i8 @llvm.coro.suspend(token, i1) -declare i1 @llvm.coro.end(ptr, i1, token) +declare void @llvm.coro.end(ptr, i1, token) declare ptr @llvm.coro.free(token, ptr nocapture readonly) declare void @llvm.coro.resume(ptr) declare ptr @llvm.coro.promise(ptr nocapture, i32, i1) diff --git a/mlir/test/Target/LLVMIR/global_float_array.mlir b/mlir/test/Target/LLVMIR/global_float_array.mlir new file mode 100644 index 0000000000000..eba7948d2c55e --- /dev/null +++ b/mlir/test/Target/LLVMIR/global_float_array.mlir @@ -0,0 +1,4 @@ +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s + +// CHECK: @test = internal global [1 x float] [float -0.000000e+00] +llvm.mlir.global internal @test(dense<-0.000000e+00> : tensor<1xf32>) {addr_space = 0 : i32} : !llvm.array<1 x f32> diff --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir index 274d64af78283..38ae63d1908e9 100644 --- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir @@ -43,7 +43,7 @@ llvm.func @func_no_debug() { #cu = #llvm.di_compile_unit< id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #file, producer = "MLIR", isOptimized = true, emissionKind = Full, - nameTableKind = None + nameTableKind = None, splitDebugFilename = "test.dwo" > #composite = #llvm.di_composite_type< tag = DW_TAG_structure_type, name = "composite", file = #file, @@ -140,7 +140,7 @@ llvm.func @empty_types() { llvm.return } loc(fused<#sp1>["foo.mlir":2:1]) -// CHECK: ![[CU_LOC:.*]] = distinct !DICompileUnit(language: DW_LANG_C, file: ![[CU_FILE_LOC:.*]], producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, nameTableKind: None) +// CHECK: ![[CU_LOC:.*]] = distinct !DICompileUnit(language: DW_LANG_C, file: ![[CU_FILE_LOC:.*]], producer: "MLIR", isOptimized: true, runtimeVersion: 0, splitDebugFilename: "test.dwo", emissionKind: FullDebug, nameTableKind: None) // CHECK: ![[CU_FILE_LOC]] = !DIFile(filename: "foo.mlir", directory: "/test/") // CHECK: ![[FUNC_LOC]] = distinct !DISubprogram(name: "func_with_debug", linkageName: "func_with_debug", scope: ![[NESTED_NAMESPACE:.*]], file: ![[CU_FILE_LOC]], line: 3, type: ![[FUNC_TYPE:.*]], scopeLine: 3, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: ![[CU_LOC]]) diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir index e4f18f3e524e7..d63584e5e03ab 100644 --- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir @@ -146,6 +146,11 @@ llvm.func @trig_test(%arg0: f32, %arg1: vector<8xf32>) { llvm.intr.tan(%arg0) : (f32) -> f32 // CHECK: call <8 x float> @llvm.tan.v8f32 llvm.intr.tan(%arg1) : (vector<8xf32>) -> vector<8xf32> + + // CHECK: call { float, float } @llvm.sincos.f32 + llvm.intr.sincos(%arg0) : (f32) -> !llvm.struct<(f32, f32)> + // CHECK: call { <8 x float>, <8 x float> } @llvm.sincos.v8f32 + llvm.intr.sincos(%arg1) : (vector<8xf32>) -> !llvm.struct<(vector<8xf32>, vector<8xf32>)> llvm.return } @@ -460,10 +465,11 @@ llvm.func @assume_without_opbundles(%cond: i1) { } // CHECK-LABEL: @assume_with_opbundles -llvm.func @assume_with_opbundles(%cond: i1, %p: !llvm.ptr) { +llvm.func @assume_with_opbundles(%p: !llvm.ptr) { + %true = llvm.mlir.constant(true) : i1 %0 = llvm.mlir.constant(8 : i32) : i32 - // CHECK: call void @llvm.assume(i1 %{{.+}}) [ "align"(ptr %{{.+}}, i32 8) ] - llvm.intr.assume %cond ["align"(%p, %0 : !llvm.ptr, i32)] : i1 + // CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i32 8) ] + llvm.intr.assume %true ["align"(%p, %0 : !llvm.ptr, i32)] : i1 llvm.return } @@ -846,8 +852,8 @@ llvm.func @coro_suspend(%arg0: i32, %arg1 : i1, %arg2 : !llvm.ptr) { // CHECK-LABEL: @coro_end llvm.func @coro_end(%arg0: !llvm.ptr, %arg1 : i1) { %none = llvm.mlir.none : !llvm.token - // CHECK: call i1 @llvm.coro.end - %0 = llvm.intr.coro.end %arg0, %arg1, %none : (!llvm.ptr, i1, !llvm.token) -> i1 + // CHECK: call void @llvm.coro.end + llvm.intr.coro.end %arg0, %arg1, %none : (!llvm.ptr, i1, !llvm.token) -> !llvm.void llvm.return } @@ -1301,6 +1307,8 @@ llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) { // CHECK-DAG: declare <8 x float> @llvm.ceil.v8f32(<8 x float>) #0 // CHECK-DAG: declare float @llvm.cos.f32(float) // CHECK-DAG: declare <8 x float> @llvm.cos.v8f32(<8 x float>) #0 +// CHECK-DAG: declare { float, float } @llvm.sincos.f32(float) +// CHECK-DAG: declare { <8 x float>, <8 x float> } @llvm.sincos.v8f32(<8 x float>) #0 // CHECK-DAG: declare float @llvm.copysign.f32(float, float) // CHECK-DAG: declare float @llvm.rint.f32(float) // CHECK-DAG: declare double @llvm.rint.f64(double) @@ -1374,7 +1382,7 @@ llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) { // CHECK-DAG: declare i32 @llvm.coro.size.i32() // CHECK-DAG: declare token @llvm.coro.save(ptr) // CHECK-DAG: declare i8 @llvm.coro.suspend(token, i1) -// CHECK-DAG: declare i1 @llvm.coro.end(ptr, i1, token) +// CHECK-DAG: declare void @llvm.coro.end(ptr, i1, token) // CHECK-DAG: declare ptr @llvm.coro.free(token, ptr readonly captures(none)) // CHECK-DAG: declare void @llvm.coro.resume(ptr) // CHECK-DAG: declare ptr @llvm.coro.promise(ptr captures(none), i32, i1) diff --git a/mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir b/mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir new file mode 100644 index 0000000000000..4ac4f02103e8c --- /dev/null +++ b/mlir/test/Target/LLVMIR/openmp-cli-tile01.mlir @@ -0,0 +1,101 @@ +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s + + + +llvm.func @tile_trivial_loop(%baseptr: !llvm.ptr, %tc: i32, %ts: i32) -> () { + %literal_cli = omp.new_cli + omp.canonical_loop(%literal_cli) %iv : i32 in range(%tc) { + %ptr = llvm.getelementptr inbounds %baseptr[%iv] : (!llvm.ptr, i32) -> !llvm.ptr, f32 + %val = llvm.mlir.constant(42.0 : f32) : f32 + llvm.store %val, %ptr : f32, !llvm.ptr + omp.terminator + } + omp.tile <- (%literal_cli) sizes(%ts : i32) + llvm.return +} + + +// CHECK: ; ModuleID = 'LLVMDialectModule' +// CHECK-NEXT: source_filename = "LLVMDialectModule" +// CHECK-EMPTY: +// CHECK-NEXT: define void @tile_trivial_loop(ptr %0, i32 %1, i32 %2) { +// CHECK-NEXT: br label %omp_omp.loop.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.preheader: ; preds = %3 +// CHECK-NEXT: %4 = udiv i32 %1, %2 +// CHECK-NEXT: %5 = urem i32 %1, %2 +// CHECK-NEXT: %6 = icmp ne i32 %5, 0 +// CHECK-NEXT: %7 = zext i1 %6 to i32 +// CHECK-NEXT: %omp_floor0.tripcount = add nuw i32 %4, %7 +// CHECK-NEXT: br label %omp_floor0.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.preheader: ; preds = %omp_omp.loop.preheader +// CHECK-NEXT: br label %omp_floor0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.header: ; preds = %omp_floor0.inc, %omp_floor0.preheader +// CHECK-NEXT: %omp_floor0.iv = phi i32 [ 0, %omp_floor0.preheader ], [ %omp_floor0.next, %omp_floor0.inc ] +// CHECK-NEXT: br label %omp_floor0.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.cond: ; preds = %omp_floor0.header +// CHECK-NEXT: %omp_floor0.cmp = icmp ult i32 %omp_floor0.iv, %omp_floor0.tripcount +// CHECK-NEXT: br i1 %omp_floor0.cmp, label %omp_floor0.body, label %omp_floor0.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.body: ; preds = %omp_floor0.cond +// CHECK-NEXT: %8 = icmp eq i32 %omp_floor0.iv, %4 +// CHECK-NEXT: %9 = select i1 %8, i32 %5, i32 %2 +// CHECK-NEXT: br label %omp_tile0.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.preheader: ; preds = %omp_floor0.body +// CHECK-NEXT: br label %omp_tile0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.header: ; preds = %omp_tile0.inc, %omp_tile0.preheader +// CHECK-NEXT: %omp_tile0.iv = phi i32 [ 0, %omp_tile0.preheader ], [ %omp_tile0.next, %omp_tile0.inc ] +// CHECK-NEXT: br label %omp_tile0.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.cond: ; preds = %omp_tile0.header +// CHECK-NEXT: %omp_tile0.cmp = icmp ult i32 %omp_tile0.iv, %9 +// CHECK-NEXT: br i1 %omp_tile0.cmp, label %omp_tile0.body, label %omp_tile0.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.body: ; preds = %omp_tile0.cond +// CHECK-NEXT: %10 = mul nuw i32 %2, %omp_floor0.iv +// CHECK-NEXT: %11 = add nuw i32 %10, %omp_tile0.iv +// CHECK-NEXT: br label %omp_omp.loop.body +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.body: ; preds = %omp_tile0.body +// CHECK-NEXT: br label %omp.loop.region +// CHECK-EMPTY: +// CHECK-NEXT: omp.loop.region: ; preds = %omp_omp.loop.body +// CHECK-NEXT: %12 = getelementptr inbounds float, ptr %0, i32 %11 +// CHECK-NEXT: store float 4.200000e+01, ptr %12, align 4 +// CHECK-NEXT: br label %omp.region.cont +// CHECK-EMPTY: +// CHECK-NEXT: omp.region.cont: ; preds = %omp.loop.region +// CHECK-NEXT: br label %omp_tile0.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.inc: ; preds = %omp.region.cont +// CHECK-NEXT: %omp_tile0.next = add nuw i32 %omp_tile0.iv, 1 +// CHECK-NEXT: br label %omp_tile0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.exit: ; preds = %omp_tile0.cond +// CHECK-NEXT: br label %omp_tile0.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.after: ; preds = %omp_tile0.exit +// CHECK-NEXT: br label %omp_floor0.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.inc: ; preds = %omp_tile0.after +// CHECK-NEXT: %omp_floor0.next = add nuw i32 %omp_floor0.iv, 1 +// CHECK-NEXT: br label %omp_floor0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.exit: ; preds = %omp_floor0.cond +// CHECK-NEXT: br label %omp_floor0.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.after: ; preds = %omp_floor0.exit +// CHECK-NEXT: br label %omp_omp.loop.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.after: ; preds = %omp_floor0.after +// CHECK-NEXT: ret void +// CHECK-NEXT: } +// CHECK-EMPTY: +// CHECK-NEXT: !llvm.module.flags = !{!0} +// CHECK-EMPTY: +// CHECK-NEXT: !0 = !{i32 2, !"Debug Info Version", i32 3} diff --git a/mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir b/mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir new file mode 100644 index 0000000000000..6fad81cd0c299 --- /dev/null +++ b/mlir/test/Target/LLVMIR/openmp-cli-tile02.mlir @@ -0,0 +1,190 @@ +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s + + +llvm.func @tile_2d_loop(%baseptr: !llvm.ptr, %tc1: i32, %tc2: i32, %ts1: i32, %ts2: i32) -> () { + %literal_outer = omp.new_cli + %literal_inner = omp.new_cli + omp.canonical_loop(%literal_outer) %iv1 : i32 in range(%tc1) { + omp.canonical_loop(%literal_inner) %iv2 : i32 in range(%tc2) { + %idx = llvm.add %iv1, %iv2 : i32 + %ptr = llvm.getelementptr inbounds %baseptr[%idx] : (!llvm.ptr, i32) -> !llvm.ptr, f32 + %val = llvm.mlir.constant(42.0 : f32) : f32 + llvm.store %val, %ptr : f32, !llvm.ptr + omp.terminator + } + omp.terminator + } + omp.tile <- (%literal_outer, %literal_inner) sizes(%ts1, %ts2 : i32,i32) + llvm.return +} + + +// CHECK: ; ModuleID = 'LLVMDialectModule' +// CHECK-NEXT: source_filename = "LLVMDialectModule" +// CHECK-EMPTY: +// CHECK-NEXT: define void @tile_2d_loop(ptr %0, i32 %1, i32 %2, i32 %3, i32 %4) { +// CHECK-NEXT: br label %omp_omp.loop.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.preheader: ; preds = %5 +// CHECK-NEXT: %6 = udiv i32 %1, %3 +// CHECK-NEXT: %7 = urem i32 %1, %3 +// CHECK-NEXT: %8 = icmp ne i32 %7, 0 +// CHECK-NEXT: %9 = zext i1 %8 to i32 +// CHECK-NEXT: %omp_floor0.tripcount = add nuw i32 %6, %9 +// CHECK-NEXT: %10 = udiv i32 %2, %4 +// CHECK-NEXT: %11 = urem i32 %2, %4 +// CHECK-NEXT: %12 = icmp ne i32 %11, 0 +// CHECK-NEXT: %13 = zext i1 %12 to i32 +// CHECK-NEXT: %omp_floor1.tripcount = add nuw i32 %10, %13 +// CHECK-NEXT: br label %omp_floor0.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.header: ; preds = %omp_omp.loop.inc +// CHECK-NEXT: %omp_omp.loop.iv = phi i32 [ %omp_omp.loop.next, %omp_omp.loop.inc ] +// CHECK-NEXT: br label %omp_omp.loop.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.cond: ; preds = %omp_omp.loop.header +// CHECK-NEXT: %omp_omp.loop.cmp = icmp ult i32 %19, %1 +// CHECK-NEXT: br i1 %omp_omp.loop.cmp, label %omp_omp.loop.body, label %omp_omp.loop.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.body: ; preds = %omp_tile1.body, %omp_omp.loop.cond +// CHECK-NEXT: br label %omp.loop.region +// CHECK-EMPTY: +// CHECK-NEXT: omp.loop.region: ; preds = %omp_omp.loop.body +// CHECK-NEXT: br label %omp_omp.loop.preheader1 +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.preheader1: ; preds = %omp.loop.region +// CHECK-NEXT: br label %omp_omp.loop.body4 +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.preheader: ; preds = %omp_omp.loop.preheader +// CHECK-NEXT: br label %omp_floor0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.header: ; preds = %omp_floor0.inc, %omp_floor0.preheader +// CHECK-NEXT: %omp_floor0.iv = phi i32 [ 0, %omp_floor0.preheader ], [ %omp_floor0.next, %omp_floor0.inc ] +// CHECK-NEXT: br label %omp_floor0.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.cond: ; preds = %omp_floor0.header +// CHECK-NEXT: %omp_floor0.cmp = icmp ult i32 %omp_floor0.iv, %omp_floor0.tripcount +// CHECK-NEXT: br i1 %omp_floor0.cmp, label %omp_floor0.body, label %omp_floor0.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.body: ; preds = %omp_floor0.cond +// CHECK-NEXT: br label %omp_floor1.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.preheader: ; preds = %omp_floor0.body +// CHECK-NEXT: br label %omp_floor1.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.header: ; preds = %omp_floor1.inc, %omp_floor1.preheader +// CHECK-NEXT: %omp_floor1.iv = phi i32 [ 0, %omp_floor1.preheader ], [ %omp_floor1.next, %omp_floor1.inc ] +// CHECK-NEXT: br label %omp_floor1.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.cond: ; preds = %omp_floor1.header +// CHECK-NEXT: %omp_floor1.cmp = icmp ult i32 %omp_floor1.iv, %omp_floor1.tripcount +// CHECK-NEXT: br i1 %omp_floor1.cmp, label %omp_floor1.body, label %omp_floor1.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.body: ; preds = %omp_floor1.cond +// CHECK-NEXT: %14 = icmp eq i32 %omp_floor0.iv, %6 +// CHECK-NEXT: %15 = select i1 %14, i32 %7, i32 %3 +// CHECK-NEXT: %16 = icmp eq i32 %omp_floor1.iv, %10 +// CHECK-NEXT: %17 = select i1 %16, i32 %11, i32 %4 +// CHECK-NEXT: br label %omp_tile0.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.preheader: ; preds = %omp_floor1.body +// CHECK-NEXT: br label %omp_tile0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.header: ; preds = %omp_tile0.inc, %omp_tile0.preheader +// CHECK-NEXT: %omp_tile0.iv = phi i32 [ 0, %omp_tile0.preheader ], [ %omp_tile0.next, %omp_tile0.inc ] +// CHECK-NEXT: br label %omp_tile0.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.cond: ; preds = %omp_tile0.header +// CHECK-NEXT: %omp_tile0.cmp = icmp ult i32 %omp_tile0.iv, %15 +// CHECK-NEXT: br i1 %omp_tile0.cmp, label %omp_tile0.body, label %omp_tile0.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.body: ; preds = %omp_tile0.cond +// CHECK-NEXT: br label %omp_tile1.preheader +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.preheader: ; preds = %omp_tile0.body +// CHECK-NEXT: br label %omp_tile1.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.header: ; preds = %omp_tile1.inc, %omp_tile1.preheader +// CHECK-NEXT: %omp_tile1.iv = phi i32 [ 0, %omp_tile1.preheader ], [ %omp_tile1.next, %omp_tile1.inc ] +// CHECK-NEXT: br label %omp_tile1.cond +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.cond: ; preds = %omp_tile1.header +// CHECK-NEXT: %omp_tile1.cmp = icmp ult i32 %omp_tile1.iv, %17 +// CHECK-NEXT: br i1 %omp_tile1.cmp, label %omp_tile1.body, label %omp_tile1.exit +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.body: ; preds = %omp_tile1.cond +// CHECK-NEXT: %18 = mul nuw i32 %3, %omp_floor0.iv +// CHECK-NEXT: %19 = add nuw i32 %18, %omp_tile0.iv +// CHECK-NEXT: %20 = mul nuw i32 %4, %omp_floor1.iv +// CHECK-NEXT: %21 = add nuw i32 %20, %omp_tile1.iv +// CHECK-NEXT: br label %omp_omp.loop.body +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.body4: ; preds = %omp_omp.loop.preheader1 +// CHECK-NEXT: br label %omp.loop.region12 +// CHECK-EMPTY: +// CHECK-NEXT: omp.loop.region12: ; preds = %omp_omp.loop.body4 +// CHECK-NEXT: %22 = add i32 %19, %21 +// CHECK-NEXT: %23 = getelementptr inbounds float, ptr %0, i32 %22 +// CHECK-NEXT: store float 4.200000e+01, ptr %23, align 4 +// CHECK-NEXT: br label %omp.region.cont11 +// CHECK-EMPTY: +// CHECK-NEXT: omp.region.cont11: ; preds = %omp.loop.region12 +// CHECK-NEXT: br label %omp_tile1.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.inc: ; preds = %omp.region.cont11 +// CHECK-NEXT: %omp_tile1.next = add nuw i32 %omp_tile1.iv, 1 +// CHECK-NEXT: br label %omp_tile1.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.exit: ; preds = %omp_tile1.cond +// CHECK-NEXT: br label %omp_tile1.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile1.after: ; preds = %omp_tile1.exit +// CHECK-NEXT: br label %omp_tile0.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.inc: ; preds = %omp_tile1.after +// CHECK-NEXT: %omp_tile0.next = add nuw i32 %omp_tile0.iv, 1 +// CHECK-NEXT: br label %omp_tile0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.exit: ; preds = %omp_tile0.cond +// CHECK-NEXT: br label %omp_tile0.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_tile0.after: ; preds = %omp_tile0.exit +// CHECK-NEXT: br label %omp_floor1.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.inc: ; preds = %omp_tile0.after +// CHECK-NEXT: %omp_floor1.next = add nuw i32 %omp_floor1.iv, 1 +// CHECK-NEXT: br label %omp_floor1.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.exit: ; preds = %omp_floor1.cond +// CHECK-NEXT: br label %omp_floor1.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor1.after: ; preds = %omp_floor1.exit +// CHECK-NEXT: br label %omp_floor0.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.inc: ; preds = %omp_floor1.after +// CHECK-NEXT: %omp_floor0.next = add nuw i32 %omp_floor0.iv, 1 +// CHECK-NEXT: br label %omp_floor0.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.exit: ; preds = %omp_floor0.cond +// CHECK-NEXT: br label %omp_floor0.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_floor0.after: ; preds = %omp_floor0.exit +// CHECK-NEXT: br label %omp_omp.loop.after +// CHECK-EMPTY: +// CHECK-NEXT: omp.region.cont: ; No predecessors! +// CHECK-NEXT: br label %omp_omp.loop.inc +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.inc: ; preds = %omp.region.cont +// CHECK-NEXT: %omp_omp.loop.next = add nuw i32 %19, 1 +// CHECK-NEXT: br label %omp_omp.loop.header +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.exit: ; preds = %omp_omp.loop.cond +// CHECK-NEXT: br label %omp_omp.loop.after +// CHECK-EMPTY: +// CHECK-NEXT: omp_omp.loop.after: ; preds = %omp_floor0.after, %omp_omp.loop.exit +// CHECK-NEXT: ret void +// CHECK-NEXT: } +// CHECK-EMPTY: +// CHECK-NEXT: !llvm.module.flags = !{!0} +// CHECK-EMPTY: +// CHECK-NEXT: !0 = !{i32 2, !"Debug Info Version", i32 3} diff --git a/mlir/test/Target/LLVMIR/ptr.mlir b/mlir/test/Target/LLVMIR/ptr.mlir index 2fa794130ec52..e2687e52ece57 100644 --- a/mlir/test/Target/LLVMIR/ptr.mlir +++ b/mlir/test/Target/LLVMIR/ptr.mlir @@ -281,3 +281,99 @@ llvm.func @ptr_add_cst() -> !ptr.ptr<#llvm.address_space<0>> { %res = ptr.ptr_add %ptr, %off : !ptr.ptr<#llvm.address_space<0>>, i32 llvm.return %res : !ptr.ptr<#llvm.address_space<0>> } + +// CHECK-LABEL: define i64 @ptr_diff_scalar +// CHECK-SAME: (ptr %[[PTR1:.*]], ptr %[[PTR2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint ptr %[[PTR1]] to i64 +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint ptr %[[PTR2]] to i64 +// CHECK-NEXT: %[[DIFF:.*]] = sub i64 %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: ret i64 %[[DIFF]] +// CHECK-NEXT: } +llvm.func @ptr_diff_scalar(%ptr1: !ptr.ptr<#llvm.address_space<0>>, %ptr2: !ptr.ptr<#llvm.address_space<0>>) -> i64 { + %diff = ptr.ptr_diff %ptr1, %ptr2 : !ptr.ptr<#llvm.address_space<0>> -> i64 + llvm.return %diff : i64 +} + +// CHECK-LABEL: define i32 @ptr_diff_scalar_i32 +// CHECK-SAME: (ptr %[[PTR1:.*]], ptr %[[PTR2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint ptr %[[PTR1]] to i64 +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint ptr %[[PTR2]] to i64 +// CHECK-NEXT: %[[DIFF:.*]] = sub i64 %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: %[[TRUNC:.*]] = trunc i64 %[[DIFF]] to i32 +// CHECK-NEXT: ret i32 %[[TRUNC]] +// CHECK-NEXT: } +llvm.func @ptr_diff_scalar_i32(%ptr1: !ptr.ptr<#llvm.address_space<0>>, %ptr2: !ptr.ptr<#llvm.address_space<0>>) -> i32 { + %diff = ptr.ptr_diff %ptr1, %ptr2 : !ptr.ptr<#llvm.address_space<0>> -> i32 + llvm.return %diff : i32 +} + +// CHECK-LABEL: define <4 x i64> @ptr_diff_vector +// CHECK-SAME: (<4 x ptr> %[[PTRS1:.*]], <4 x ptr> %[[PTRS2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint <4 x ptr> %[[PTRS1]] to <4 x i64> +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint <4 x ptr> %[[PTRS2]] to <4 x i64> +// CHECK-NEXT: %[[DIFF:.*]] = sub <4 x i64> %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: ret <4 x i64> %[[DIFF]] +// CHECK-NEXT: } +llvm.func @ptr_diff_vector(%ptrs1: vector<4x!ptr.ptr<#llvm.address_space<0>>>, %ptrs2: vector<4x!ptr.ptr<#llvm.address_space<0>>>) -> vector<4xi64> { + %diffs = ptr.ptr_diff %ptrs1, %ptrs2 : vector<4x!ptr.ptr<#llvm.address_space<0>>> -> vector<4xi64> + llvm.return %diffs : vector<4xi64> +} + +// CHECK-LABEL: define <8 x i32> @ptr_diff_vector_i32 +// CHECK-SAME: (<8 x ptr> %[[PTRS1:.*]], <8 x ptr> %[[PTRS2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint <8 x ptr> %[[PTRS1]] to <8 x i64> +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint <8 x ptr> %[[PTRS2]] to <8 x i64> +// CHECK-NEXT: %[[DIFF:.*]] = sub <8 x i64> %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: %[[TRUNC:.*]] = trunc <8 x i64> %[[DIFF]] to <8 x i32> +// CHECK-NEXT: ret <8 x i32> %[[TRUNC]] +// CHECK-NEXT: } +llvm.func @ptr_diff_vector_i32(%ptrs1: vector<8x!ptr.ptr<#llvm.address_space<0>>>, %ptrs2: vector<8x!ptr.ptr<#llvm.address_space<0>>>) -> vector<8xi32> { + %diffs = ptr.ptr_diff %ptrs1, %ptrs2 : vector<8x!ptr.ptr<#llvm.address_space<0>>> -> vector<8xi32> + llvm.return %diffs : vector<8xi32> +} + +// CHECK-LABEL: define i64 @ptr_diff_with_constants() { +// CHECK-NEXT: ret i64 4096 +// CHECK-NEXT: } +llvm.func @ptr_diff_with_constants() -> i64 { + %ptr1 = ptr.constant #ptr.address<0x2000> : !ptr.ptr<#llvm.address_space<0>> + %ptr2 = ptr.constant #ptr.address<0x1000> : !ptr.ptr<#llvm.address_space<0>> + %diff = ptr.ptr_diff %ptr1, %ptr2 : !ptr.ptr<#llvm.address_space<0>> -> i64 + llvm.return %diff : i64 +} + +// CHECK-LABEL: define i64 @ptr_diff_with_flags_nsw +// CHECK-SAME: (ptr %[[PTR1:.*]], ptr %[[PTR2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint ptr %[[PTR1]] to i64 +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint ptr %[[PTR2]] to i64 +// CHECK-NEXT: %[[DIFF:.*]] = sub nsw i64 %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: ret i64 %[[DIFF]] +// CHECK-NEXT: } +llvm.func @ptr_diff_with_flags_nsw(%ptr1: !ptr.ptr<#llvm.address_space<0>>, %ptr2: !ptr.ptr<#llvm.address_space<0>>) -> i64 { + %diff = ptr.ptr_diff nsw %ptr1, %ptr2 : !ptr.ptr<#llvm.address_space<0>> -> i64 + llvm.return %diff : i64 +} + +// CHECK-LABEL: define i64 @ptr_diff_with_flags_nuw +// CHECK-SAME: (ptr %[[PTR1:.*]], ptr %[[PTR2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint ptr %[[PTR1]] to i64 +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint ptr %[[PTR2]] to i64 +// CHECK-NEXT: %[[DIFF:.*]] = sub nuw i64 %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: ret i64 %[[DIFF]] +// CHECK-NEXT: } +llvm.func @ptr_diff_with_flags_nuw(%ptr1: !ptr.ptr<#llvm.address_space<0>>, %ptr2: !ptr.ptr<#llvm.address_space<0>>) -> i64 { + %diff = ptr.ptr_diff nuw %ptr1, %ptr2 : !ptr.ptr<#llvm.address_space<0>> -> i64 + llvm.return %diff : i64 +} + +// CHECK-LABEL: define i64 @ptr_diff_with_flags_nsw_nuw +// CHECK-SAME: (ptr %[[PTR1:.*]], ptr %[[PTR2:.*]]) { +// CHECK-NEXT: %[[P1INT:.*]] = ptrtoint ptr %[[PTR1]] to i64 +// CHECK-NEXT: %[[P2INT:.*]] = ptrtoint ptr %[[PTR2]] to i64 +// CHECK-NEXT: %[[DIFF:.*]] = sub nuw nsw i64 %[[P1INT]], %[[P2INT]] +// CHECK-NEXT: ret i64 %[[DIFF]] +// CHECK-NEXT: } +llvm.func @ptr_diff_with_flags_nsw_nuw(%ptr1: !ptr.ptr<#llvm.address_space<0>>, %ptr2: !ptr.ptr<#llvm.address_space<0>>) -> i64 { + %diff = ptr.ptr_diff nsw | nuw %ptr1, %ptr2 : !ptr.ptr<#llvm.address_space<0>> -> i64 + llvm.return %diff : i64 +} diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir index bebd1b4317b2f..00ee6b795c43a 100644 --- a/mlir/test/Target/LLVMIR/rocdl.mlir +++ b/mlir/test/Target/LLVMIR/rocdl.mlir @@ -907,10 +907,10 @@ llvm.func @rocdl.global.load.lds(%src : !llvm.ptr<1>, %dst: !llvm.ptr<3>) { llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr, %stride : i16, - %numRecords : i32, + %numRecords : i64, %flags : i32) -> !llvm.ptr<8> { // CHECK-LABEL: rocdl.make.buffer.rsrc - // CHECK: %[[rsrc:.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) + // CHECK: %[[rsrc:.*]] = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %{{.*}}, i16 %{{.*}}, i64 %{{.*}}, i32 %{{.*}}) // CHECK: ret ptr addrspace(8) %[[rsrc]] %rsrc = rocdl.make.buffer.rsrc %ptr, %stride, %numRecords, %flags : !llvm.ptr to !llvm.ptr<8> llvm.return %rsrc : !llvm.ptr<8> @@ -918,10 +918,10 @@ llvm.func @rocdl.make.buffer.rsrc(%ptr : !llvm.ptr, llvm.func @rocdl.make.buffer.rsrc.p7.p1(%ptr : !llvm.ptr<1>, %stride : i16, - %numRecords : i32, + %numRecords : i64, %flags : i32) -> !llvm.ptr<7> { // CHECK-LABEL: rocdl.make.buffer.rsrc.p7.p1 - // CHECK: %[[rsrc:.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %{{.*}}, i16 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) + // CHECK: %[[rsrc:.*]] = call ptr addrspace(7) @llvm.amdgcn.make.buffer.rsrc.p7.p1(ptr addrspace(1) %{{.*}}, i16 %{{.*}}, i64 %{{.*}}, i32 %{{.*}}) // CHECK: ret ptr addrspace(7) %[[rsrc]] %rsrc = rocdl.make.buffer.rsrc %ptr, %stride, %numRecords, %flags : <1> to <7> llvm.return %rsrc : !llvm.ptr<7> @@ -1298,6 +1298,20 @@ llvm.func @rocdl_last_use(%ptr: !llvm.ptr<1>) -> i32 { llvm.return %ret : i32 } +llvm.func @test_fmed3_f16(%arg0: f16, %arg1: f16, %arg2: f16) -> f16 { + // CHECK-LABEL: define half @test_fmed3_f16(half %0, half %1, half %2) + %0 = rocdl.fmed3 %arg0, %arg1, %arg2 : f16 + llvm.return %0 : f16 + // CHECK: call half @llvm.amdgcn.fmed3.f16(half %0, half %1, half %2) +} + +llvm.func @test_fmed3_f32(%arg0: f32, %arg1: f32, %arg2: f32) -> f32 { + // CHECK-LABEL: define float @test_fmed3_f32(float %0, float %1, float %2) + %0 = rocdl.fmed3 %arg0, %arg1, %arg2 : f32 + llvm.return %0 : f32 + // CHECK: call float @llvm.amdgcn.fmed3.f32(float %0, float %1, float %2) +} + // CHECK-LABEL: rocdl.cvt.scale.pk8 // CHECK-SAME:(i32 %[[I32:.+]], <2 x i32> %[[V2I32:.+]], i32 %[[SCALE:.+]]) llvm.func @rocdl.cvt.scale.pk8(%i32: i32, %v2xi32: vector<2xi32>, %scale: i32) { @@ -1326,6 +1340,34 @@ llvm.func @rocdl.cvt.scale.pk8(%i32: i32, %v2xi32: vector<2xi32>, %scale: i32) { llvm.return } +// CHECK-LABEL: rocdl.cvt.scalef32.pk8 +// CHECK-SAME:(<8 x float> %[[V8F32:.+]], <8 x half> %[[V8F16:.+]], <8 x bfloat> %[[V8BF16:.+]], float %[[SCALE:.+]]) +llvm.func @rocdl.cvt.scalef32.pk8(%v8xf32: vector<8xf32>, %v8xf16: vector<8xf16>, %v8xbf16: vector<8xbf16>, %scale: f32) { + + // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f32(<8 x float> %[[V8F32]], float %[[SCALE]]) + %0 = rocdl.cvt.scalef32.pk8.fp8.f32 %v8xf32, %scale : vector<2xi32> + // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f32(<8 x float> %[[V8F32]], float %[[SCALE]]) + %1 = rocdl.cvt.scalef32.pk8.bf8.f32 %v8xf32, %scale : vector<2xi32> + // CHECK: call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f32(<8 x float> %[[V8F32]], float %[[SCALE]]) + %2 = rocdl.cvt.scalef32.pk8.fp4.f32 %v8xf32, %scale : i32 + + // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.f16(<8 x half> %[[V8F16]], float %[[SCALE]]) + %3 = rocdl.cvt.scalef32.pk8.fp8.f16 %v8xf16, %scale : vector<2xi32> + // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.f16(<8 x half> %[[V8F16]], float %[[SCALE]]) + %4 = rocdl.cvt.scalef32.pk8.bf8.f16 %v8xf16, %scale : vector<2xi32> + // CHECK: call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.f16(<8 x half> %[[V8F16]], float %[[SCALE]]) + %5 = rocdl.cvt.scalef32.pk8.fp4.f16 %v8xf16, %scale : i32 + + // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.fp8.bf16(<8 x bfloat> %[[V8BF16]], float %[[SCALE]]) + %6 = rocdl.cvt.scalef32.pk8.fp8.bf16 %v8xbf16, %scale : vector<2xi32> + // CHECK: call <2 x i32> @llvm.amdgcn.cvt.scalef32.pk8.bf8.bf16(<8 x bfloat> %[[V8BF16]], float %[[SCALE]]) + %7 = rocdl.cvt.scalef32.pk8.bf8.bf16 %v8xbf16, %scale : vector<2xi32> + // CHECK: call i32 @llvm.amdgcn.cvt.scalef32.pk8.fp4.bf16(<8 x bfloat> %[[V8BF16]], float %[[SCALE]]) + %8 = rocdl.cvt.scalef32.pk8.fp4.bf16 %v8xbf16, %scale : i32 + + llvm.return +} + // CHECK-LABEL: @rocdl.cvt.scale.pk16 // CHECK-SAME:(<3 x i32> %[[SRC0:.+]], i32 %[[SCALE:.+]]) llvm.func @rocdl.cvt.scale.pk16(%v3xi32: vector<3xi32>, %scale:i32) { diff --git a/mlir/test/Target/SPIRV/execution-mode.mlir b/mlir/test/Target/SPIRV/execution-mode.mlir index e51ba7c0269a4..2178a8a77a225 100644 --- a/mlir/test/Target/SPIRV/execution-mode.mlir +++ b/mlir/test/Target/SPIRV/execution-mode.mlir @@ -1,10 +1,15 @@ // RUN: mlir-translate -no-implicit-module -test-spirv-roundtrip %s | FileCheck %s -spirv.module Logical GLSL450 requires #spirv.vce { +// RUN: %if spirv-tools %{ rm -rf %t %} +// RUN: %if spirv-tools %{ mkdir %t %} +// RUN: %if spirv-tools %{ mlir-translate --no-implicit-module --serialize-spirv --split-input-file --spirv-save-validation-files-with-prefix=%t/module %s %} +// RUN: %if spirv-tools %{ spirv-val %t %} + +spirv.module Logical OpenCL requires #spirv.vce { spirv.func @foo() -> () "None" { spirv.Return } - spirv.EntryPoint "GLCompute" @foo + spirv.EntryPoint "Kernel" @foo // CHECK: spirv.ExecutionMode @foo "LocalSizeHint", 3, 4, 5 spirv.ExecutionMode @foo "LocalSizeHint", 3, 4, 5 } diff --git a/mlir/test/Target/SPIRV/function-call.mlir b/mlir/test/Target/SPIRV/function-call.mlir index a7473a8ccd7ba..2e94ded3401ce 100644 --- a/mlir/test/Target/SPIRV/function-call.mlir +++ b/mlir/test/Target/SPIRV/function-call.mlir @@ -1,26 +1,31 @@ // RUN: mlir-translate -no-implicit-module -test-spirv-roundtrip %s | FileCheck %s -spirv.module Logical GLSL450 requires #spirv.vce { - spirv.GlobalVariable @var1 : !spirv.ptr, Input> +// RUN: %if spirv-tools %{ rm -rf %t %} +// RUN: %if spirv-tools %{ mkdir %t %} +// RUN: %if spirv-tools %{ mlir-translate --no-implicit-module --serialize-spirv --split-input-file --spirv-save-validation-files-with-prefix=%t/module %s %} +// RUN: %if spirv-tools %{ spirv-val %t %} + +spirv.module Logical GLSL450 requires #spirv.vce { + spirv.GlobalVariable @var1 : !spirv.ptr, StorageBuffer> spirv.func @fmain() -> i32 "None" { %0 = spirv.Constant 16 : i32 - %1 = spirv.mlir.addressof @var1 : !spirv.ptr, Input> + %1 = spirv.mlir.addressof @var1 : !spirv.ptr, StorageBuffer> // CHECK: {{%.*}} = spirv.FunctionCall @f_0({{%.*}}) : (i32) -> i32 %3 = spirv.FunctionCall @f_0(%0) : (i32) -> i32 - // CHECK: spirv.FunctionCall @f_1({{%.*}}, {{%.*}}) : (i32, !spirv.ptr, Input>) -> () - spirv.FunctionCall @f_1(%3, %1) : (i32, !spirv.ptr, Input>) -> () - // CHECK: {{%.*}} = spirv.FunctionCall @f_2({{%.*}}) : (!spirv.ptr, Input>) -> !spirv.ptr, Input> - %4 = spirv.FunctionCall @f_2(%1) : (!spirv.ptr, Input>) -> !spirv.ptr, Input> + // CHECK: spirv.FunctionCall @f_1({{%.*}}, {{%.*}}) : (i32, !spirv.ptr, StorageBuffer>) -> () + spirv.FunctionCall @f_1(%3, %1) : (i32, !spirv.ptr, StorageBuffer>) -> () + // CHECK: {{%.*}} = spirv.FunctionCall @f_2({{%.*}}) : (!spirv.ptr, StorageBuffer>) -> !spirv.ptr, StorageBuffer> + %4 = spirv.FunctionCall @f_2(%1) : (!spirv.ptr, StorageBuffer>) -> !spirv.ptr, StorageBuffer> spirv.ReturnValue %3 : i32 } spirv.func @f_0(%arg0 : i32) -> i32 "None" { spirv.ReturnValue %arg0 : i32 } - spirv.func @f_1(%arg0 : i32, %arg1 : !spirv.ptr, Input>) -> () "None" { + spirv.func @f_1(%arg0 : i32, %arg1 : !spirv.ptr, StorageBuffer>) -> () "None" { spirv.Return } - spirv.func @f_2(%arg0 : !spirv.ptr, Input>) -> !spirv.ptr, Input> "None" { - spirv.ReturnValue %arg0 : !spirv.ptr, Input> + spirv.func @f_2(%arg0 : !spirv.ptr, StorageBuffer>) -> !spirv.ptr, StorageBuffer> "None" { + spirv.ReturnValue %arg0 : !spirv.ptr, StorageBuffer> } spirv.func @f_loop_with_function_call(%count : i32) -> () "None" { diff --git a/mlir/test/Transforms/remove-dead-values-call-segments.mlir b/mlir/test/Transforms/remove-dead-values-call-segments.mlir new file mode 100644 index 0000000000000..fed9cabbd2ee8 --- /dev/null +++ b/mlir/test/Transforms/remove-dead-values-call-segments.mlir @@ -0,0 +1,23 @@ +// RUN: mlir-opt --split-input-file --remove-dead-values --mlir-print-op-generic %s | FileCheck %s --check-prefix=GEN + +// ----- +// Private callee: both args become dead after internal DCE; RDV drops callee +// args and shrinks the *args* segment on the call-site to zero; sizes kept in +// sync. + +module { + func.func private @callee(%x: i32, %y: i32) { + %u = arith.addi %x, %x : i32 // %y is dead + return + } + + func.func @caller(%a: i32, %b: i32) { + // args segment initially has 2 operands. + "test.call_with_segments"(%a, %b) { callee = @callee, + operandSegmentSizes = array } : (i32, i32) -> () + return + } +} + +// GEN: "test.call_with_segments"() <{callee = @callee, operandSegmentSizes = array}> : () -> () +// ^ args shrank from 2 -> 0 diff --git a/mlir/test/Transforms/remove-dead-values.mlir b/mlir/test/Transforms/remove-dead-values.mlir index fa2c145bd3701..56449469dc29f 100644 --- a/mlir/test/Transforms/remove-dead-values.mlir +++ b/mlir/test/Transforms/remove-dead-values.mlir @@ -615,3 +615,37 @@ module @last_block_not_exit { // CHECK-LABEL: @call_private_but_not_use // CHECK: call @terminated_with_condbr(%false, %true) : (i1, i1) } + +// ----- + +// Test the elimination of function arguments. + +// CHECK-LABEL: func private @single_parameter +// CHECK-SAME: () { +func.func private @single_parameter(%arg0: index) { + return +} + +// CHECK-LABEL: func.func private @mutl_parameter( +// CHECK-SAME: %[[ARG0:.*]]: index) +// CHECK: return %[[ARG0]] +func.func private @mutl_parameter(%arg0: index, %arg1: index, %arg2: index) -> index { + return %arg1 : index +} + +// CHECK-LABEL: func private @eliminate_parameter +// CHECK-SAME: () { +func.func private @eliminate_parameter(%arg0: index, %arg1: index) { + call @single_parameter(%arg0) : (index) -> () + return +} + +// CHECK-LABEL: func @callee +// CHECK-SAME: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index) +func.func @callee(%arg0: index, %arg1: index, %arg2: index) -> index { +// CHECK: call @eliminate_parameter() : () -> () + call @eliminate_parameter(%arg0, %arg1) : (index, index) -> () +// CHECK: call @mutl_parameter(%[[ARG1]]) : (index) -> index + %res = call @mutl_parameter(%arg0, %arg1, %arg2) : (index, index, index) -> (index) + return %res : index +} diff --git a/mlir/test/Transforms/test-bubble-down-memory-space-casts.mlir b/mlir/test/Transforms/test-bubble-down-memory-space-casts.mlir new file mode 100644 index 0000000000000..e4fce89cffb45 --- /dev/null +++ b/mlir/test/Transforms/test-bubble-down-memory-space-casts.mlir @@ -0,0 +1,298 @@ +// RUN: mlir-opt %s --bubble-down-memory-space-casts | FileCheck %s + +#map = affine_map<(d0, d1)[s0] -> (d1 * s0 + d0)> + +// CHECK: #[[$ATTR_0:.+]] = affine_map<(d0, d1)[s0] -> (d1 * s0 + d0)> +// CHECK-LABEL: func.func @load_store( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK: %[[VAL_0:.*]] = memref.load %[[ARG0]]{{\[}}%[[ARG1]]] : memref +// CHECK: memref.store %[[VAL_0]], %[[ARG0]]{{\[}}%[[ARG1]]] : memref +// CHECK: return +// CHECK: } +func.func @load_store(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %0 = memref.load %memspacecast[%arg1] : memref + memref.store %0, %memspacecast[%arg1] : memref + return +} + +// CHECK-LABEL: func.func @load_store_unfoldable( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK: %[[VAL_0:.*]] = memref.memory_space_cast %[[ARG0]] : memref to memref +// CHECK: %[[VAL_1:.*]] = memref.load %[[VAL_0]]{{\[}}%[[ARG1]]] : memref +// CHECK: memref.store %[[VAL_1]], %[[VAL_0]]{{\[}}%[[ARG1]]] : memref +// CHECK: return +// CHECK: } +func.func @load_store_unfoldable(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %0 = memref.load %memspacecast[%arg1] : memref + memref.store %0, %memspacecast[%arg1] : memref + return +} + +// CHECK-LABEL: func.func @cast( +// CHECK-SAME: %[[ARG0:.*]]: memref<2xf32, 1>, +// CHECK-SAME: %[[ARG1:.*]]: memref<*xf32, 1>) -> (memref<*xf32>, memref<3x2xf32>) { +// CHECK: %[[VAL_0:.*]] = memref.cast %[[ARG0]] : memref<2xf32, 1> to memref<*xf32, 1> +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref<*xf32, 1> to memref<*xf32> +// CHECK: %[[VAL_2:.*]] = memref.cast %[[ARG1]] : memref<*xf32, 1> to memref<3x2xf32, 1> +// CHECK: %[[VAL_3:.*]] = memref.memory_space_cast %[[VAL_2]] : memref<3x2xf32, 1> to memref<3x2xf32> +// CHECK: return %[[VAL_1]], %[[VAL_3]] : memref<*xf32>, memref<3x2xf32> +// CHECK: } +func.func @cast(%arg0: memref<2xf32, 1>, %arg1: memref<*xf32, 1>) -> (memref<*xf32>, memref<3x2xf32>) { + %memspacecast = memref.memory_space_cast %arg0 : memref<2xf32, 1> to memref<2xf32> + %1 = memref.cast %memspacecast : memref<2xf32> to memref<*xf32> + %memspacecast_1 = memref.memory_space_cast %arg1 : memref<*xf32, 1> to memref<*xf32> + %2 = memref.cast %memspacecast_1 : memref<*xf32> to memref<3x2xf32> + return %1, %2 : memref<*xf32>, memref<3x2xf32> +} + +// CHECK-LABEL: func.func @view( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index, %[[ARG2:.*]]: index) -> memref { +// CHECK: %[[VAL_0:.*]] = arith.constant 100 : index +// CHECK: %[[VAL_1:.*]] = memref.view %[[ARG0]]{{\[}}%[[ARG1]]]{{\[}}%[[ARG2]], %[[VAL_0]]] : memref to memref +// CHECK: %[[VAL_2:.*]] = memref.memory_space_cast %[[VAL_1]] : memref to memref +// CHECK: return %[[VAL_2]] : memref +// CHECK: } +func.func @view(%arg0: memref, %arg1: index, %arg2: index) -> memref { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %c100 = arith.constant 100 : index + %view = memref.view %memspacecast[%arg1][%arg2, %c100] : memref to memref + return %view : memref +} + +// CHECK-LABEL: func.func @subview( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) -> memref<8x2xf32, strided<[?, 2], offset: ?>> { +// CHECK: %[[VAL_0:.*]] = memref.subview %[[ARG0]][4, 2] [8, 2] [3, 2] : memref to memref<8x2xf32, strided<[?, 2], offset: ?>, 1> +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref<8x2xf32, strided<[?, 2], offset: ?>, 1> to memref<8x2xf32, strided<[?, 2], offset: ?>> +// CHECK: return %[[VAL_1]] : memref<8x2xf32, strided<[?, 2], offset: ?>> +// CHECK: } +func.func @subview(%arg0: memref, %arg1: index) -> memref<8x2xf32, strided<[?, 2], offset: ?>> { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %subview = memref.subview %memspacecast[4, 2] [8, 2] [3, 2] : memref to memref<8x2xf32, strided<[?, 2], offset: ?>> + return %subview : memref<8x2xf32, strided<[?, 2], offset: ?>> +} + +// CHECK-LABEL: func.func @reinterpret_cast( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) -> memref<10x?xf32, strided<[?, 1], offset: ?>> { +// CHECK-DAG: %[[VAL_0:.*]] = arith.constant 10 : index +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_2:.*]] = memref.reinterpret_cast %[[ARG0]] to offset: {{\[}}%[[VAL_1]]], sizes: [10, %[[VAL_0]]], strides: {{\[}}%[[VAL_0]], 1] : memref to memref<10x?xf32, strided<[?, 1], offset: ?>, 1> +// CHECK: %[[VAL_3:.*]] = memref.memory_space_cast %[[VAL_2]] : memref<10x?xf32, strided<[?, 1], offset: ?>, 1> to memref<10x?xf32, strided<[?, 1], offset: ?>> +// CHECK: return %[[VAL_3]] : memref<10x?xf32, strided<[?, 1], offset: ?>> +// CHECK: } +func.func @reinterpret_cast(%arg0: memref, %arg1: index) -> memref<10x?xf32, strided<[?, 1], offset: ?>> { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %c0 = arith.constant 0 : index + %c10 = arith.constant 10 : index + %reinterpret_cast = memref.reinterpret_cast %memspacecast to offset: [%c0], sizes: [10, %c10], strides: [%c10, 1] : memref to memref<10x?xf32, strided<[?, 1], offset: ?>> + return %reinterpret_cast : memref<10x?xf32, strided<[?, 1], offset: ?>> +} + +// CHECK-LABEL: func.func @reshape( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: memref<1xindex>) -> memref { +// CHECK: %[[VAL_0:.*]] = memref.reshape %[[ARG0]](%[[ARG1]]) : (memref, memref<1xindex>) -> memref +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref to memref +// CHECK: return %[[VAL_1]] : memref +// CHECK: } +func.func @reshape(%arg0: memref, %arg1: memref<1xindex>) -> memref { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %reshape = memref.reshape %memspacecast(%arg1) : (memref, memref<1xindex>) -> memref + return %reshape : memref +} + +// CHECK-LABEL: func.func @expand_shape( +// CHECK-SAME: %[[ARG0:.*]]: memref<12xf32, 1>) -> memref<3x4xf32> { +// CHECK: %[[VAL_0:.*]] = memref.expand_shape %[[ARG0]] {{\[\[}}0, 1]] output_shape [3, 4] : memref<12xf32, 1> into memref<3x4xf32, 1> +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref<3x4xf32, 1> to memref<3x4xf32> +// CHECK: return %[[VAL_1]] : memref<3x4xf32> +// CHECK: } +func.func @expand_shape(%arg0: memref<12xf32, 1>) -> memref<3x4xf32> { + %memspacecast = memref.memory_space_cast %arg0 : memref<12xf32, 1> to memref<12xf32> + %expand_shape = memref.expand_shape %memspacecast [[0, 1]] output_shape [3, 4] : memref<12xf32> into memref<3x4xf32> + return %expand_shape : memref<3x4xf32> +} + +// CHECK-LABEL: func.func @collapse_shape( +// CHECK-SAME: %[[ARG0:.*]]: memref<3x4xf32, 1>) -> memref<12xf32> { +// CHECK: %[[VAL_0:.*]] = memref.collapse_shape %[[ARG0]] {{\[\[}}0, 1]] : memref<3x4xf32, 1> into memref<12xf32, 1> +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref<12xf32, 1> to memref<12xf32> +// CHECK: return %[[VAL_1]] : memref<12xf32> +// CHECK: } +func.func @collapse_shape(%arg0: memref<3x4xf32, 1>) -> memref<12xf32> { + %memspacecast = memref.memory_space_cast %arg0 : memref<3x4xf32, 1> to memref<3x4xf32> + %collapse_shape = memref.collapse_shape %memspacecast [[0, 1]] : memref<3x4xf32> into memref<12xf32> + return %collapse_shape : memref<12xf32> +} + +// CHECK-LABEL: func.func @transpose( +// CHECK-SAME: %[[ARG0:.*]]: memref) -> memref { +// CHECK: %[[VAL_0:.*]] = memref.transpose %[[ARG0]] (d0, d1) -> (d1, d0) : memref to memref +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref to memref +// CHECK: return %[[VAL_1]] : memref +// CHECK: } +func.func @transpose(%arg0: memref) -> memref { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %transpose = memref.transpose %memspacecast (d0, d1) -> (d1, d0) : memref to memref + return %transpose : memref +} + +// CHECK-LABEL: func.func @atomic_rmw( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index, +// CHECK-SAME: %[[ARG2:.*]]: f32) -> f32 { +// CHECK: %[[VAL_0:.*]] = memref.atomic_rmw addf %[[ARG2]], %[[ARG0]]{{\[}}%[[ARG1]]] : (f32, memref) -> f32 +// CHECK: return %[[VAL_0]] : f32 +// CHECK: } +func.func @atomic_rmw(%arg0: memref, %arg1: index, %arg2: f32) -> f32 { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %0 = memref.atomic_rmw addf %arg2, %memspacecast[%arg1] : (f32, memref) -> f32 + return %0 : f32 +} + +// CHECK-LABEL: func.func @assume_alignment( +// CHECK-SAME: %[[ARG0:.*]]: memref) -> memref { +// CHECK: %[[VAL_0:.*]] = memref.assume_alignment %[[ARG0]], 16 : memref +// CHECK: %[[VAL_1:.*]] = memref.memory_space_cast %[[VAL_0]] : memref to memref +// CHECK: return %[[VAL_1]] : memref +// CHECK: } +func.func @assume_alignment(%arg0: memref) -> memref { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %1 = memref.assume_alignment %memspacecast, 16 : memref + return %1 : memref +} + +// CHECK-LABEL: func.func @op_with_cast_sequence( +// CHECK-SAME: %[[ARG0:.*]]: memref<4x4xf32, 1>, +// CHECK-SAME: %[[ARG1:.*]]: index, +// CHECK-SAME: %[[ARG2:.*]]: f32) -> memref<16xf32> { +// CHECK-DAG: %[[VAL_0:.*]] = arith.constant 4 : index +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_2:.*]] = memref.expand_shape %[[ARG0]] {{\[\[}}0], [1, 2]] output_shape [4, 2, 2] : memref<4x4xf32, 1> into memref<4x2x2xf32, 1> +// CHECK: %[[VAL_3:.*]] = memref.collapse_shape %[[VAL_2]] {{\[\[}}0, 1, 2]] : memref<4x2x2xf32, 1> into memref<16xf32, 1> +// CHECK: %[[VAL_4:.*]] = memref.memory_space_cast %[[VAL_3]] : memref<16xf32, 1> to memref<16xf32> +// CHECK: %[[VAL_5:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<16xf32, 1> +// CHECK: %[[VAL_6:.*]] = arith.addf %[[VAL_5]], %[[ARG2]] : f32 +// CHECK: memref.store %[[VAL_6]], %[[VAL_3]]{{\[}}%[[VAL_1]]] : memref<16xf32, 1> +// CHECK: %[[VAL_7:.*]] = memref.atomic_rmw addf %[[ARG2]], %[[VAL_3]]{{\[}}%[[VAL_0]]] : (f32, memref<16xf32, 1>) -> f32 +// CHECK: return %[[VAL_4]] : memref<16xf32> +// CHECK: } +func.func @op_with_cast_sequence(%arg0: memref<4x4xf32, 1>, %arg1: index, %arg2: f32) -> memref<16xf32> { + %memspacecast = memref.memory_space_cast %arg0 : memref<4x4xf32, 1> to memref<4x4xf32> + %c0 = arith.constant 0 : index + %c4 = arith.constant 4 : index + %expanded = memref.expand_shape %memspacecast [[0], [1, 2]] output_shape [4, 2, 2] : memref<4x4xf32> into memref<4x2x2xf32> + %collapsed = memref.collapse_shape %expanded [[0, 1, 2]] : memref<4x2x2xf32> into memref<16xf32> + %loaded = memref.load %collapsed[%c0] : memref<16xf32> + %added = arith.addf %loaded, %arg2 : f32 + memref.store %added, %collapsed[%c0] : memref<16xf32> + %atomic_result = memref.atomic_rmw addf %arg2, %collapsed[%c4] : (f32, memref<16xf32>) -> f32 + return %collapsed : memref<16xf32> +} + +// CHECK-LABEL: func.func @transfer_read_write( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK: %[[VAL_0:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_1:.*]] = vector.transfer_read %[[ARG0]]{{\[}}%[[ARG1]]], %[[VAL_0]] : memref, vector<4xf32> +// CHECK: vector.transfer_write %[[VAL_1]], %[[ARG0]]{{\[}}%[[ARG1]]] : vector<4xf32>, memref +// CHECK: return +// CHECK: } +func.func @transfer_read_write(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %c0 = arith.constant 0.0 : f32 + %0 = vector.transfer_read %memspacecast[%arg1], %c0 : memref, vector<4xf32> + vector.transfer_write %0, %memspacecast[%arg1] : vector<4xf32>, memref + return +} + +// NOTE: The operations disappear because they can get folded. +// CHECK-LABEL: func.func @transfer_read_write_tensor( +// CHECK-SAME: %[[ARG0:.*]]: tensor, +// CHECK-SAME: %[[ARG1:.*]]: index) -> tensor { +// CHECK: return %[[ARG0]] : tensor +// CHECK: } +func.func @transfer_read_write_tensor(%arg0: tensor, %arg1: index) -> tensor { + %c0 = arith.constant 0.0 : f32 + %0 = vector.transfer_read %arg0[%arg1], %c0 : tensor, vector<4xf32> + %1 = vector.transfer_write %0, %arg0[%arg1] : vector<4xf32>, tensor + return %1 : tensor +} + +// CHECK-LABEL: func.func @vector_load_store( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK: %[[VAL_0:.*]] = vector.load %[[ARG0]]{{\[}}%[[ARG1]]] : memref, vector<4xf32> +// CHECK: vector.store %[[VAL_0]], %[[ARG0]]{{\[}}%[[ARG1]]] : memref, vector<4xf32> +// CHECK: return +// CHECK: } +func.func @vector_load_store(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %0 = vector.load %memspacecast[%arg1] : memref, vector<4xf32> + vector.store %0, %memspacecast[%arg1] : memref, vector<4xf32> + return +} + +// CHECK-LABEL: func.func @masked_load_store( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK-DAG: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : vector<4xf32> +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant dense<[true, true, false, false]> : vector<4xi1> +// CHECK: %[[VAL_2:.*]] = vector.maskedload %[[ARG0]]{{\[}}%[[ARG1]]], %[[VAL_1]], %[[VAL_0]] : memref, vector<4xi1>, vector<4xf32> into vector<4xf32> +// CHECK: vector.maskedstore %[[ARG0]]{{\[}}%[[ARG1]]], %[[VAL_1]], %[[VAL_2]] : memref, vector<4xi1>, vector<4xf32> +// CHECK: return +// CHECK: } +func.func @masked_load_store(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %mask = arith.constant dense<[true, true, false, false]> : vector<4xi1> + %passthrough = arith.constant dense<0.0> : vector<4xf32> + %0 = vector.maskedload %memspacecast[%arg1], %mask, %passthrough : memref, vector<4xi1>, vector<4xf32> into vector<4xf32> + vector.maskedstore %memspacecast[%arg1], %mask, %0 : memref, vector<4xi1>, vector<4xf32> + return +} + +// CHECK-LABEL: func.func @gather_scatter( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK-DAG: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : vector<4xf32> +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant dense : vector<4xi1> +// CHECK-DAG: %[[VAL_2:.*]] = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex> +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_4:.*]] = vector.gather %[[ARG0]]{{\[}}%[[VAL_3]]] {{\[}}%[[VAL_2]]], %[[VAL_1]], %[[VAL_0]] : memref, vector<4xindex>, vector<4xi1>, vector<4xf32> into vector<4xf32> +// CHECK: vector.scatter %[[ARG0]]{{\[}}%[[VAL_3]]] {{\[}}%[[VAL_2]]], %[[VAL_1]], %[[VAL_4]] : memref, vector<4xindex>, vector<4xi1>, vector<4xf32> +// CHECK: return +// CHECK: } +func.func @gather_scatter(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %c0 = arith.constant 0 : index + %indices = arith.constant dense<[0, 1, 2, 3]> : vector<4xindex> + %mask = arith.constant dense : vector<4xi1> + %passthrough = arith.constant dense<0.0> : vector<4xf32> + %0 = vector.gather %memspacecast[%c0] [%indices], %mask, %passthrough : memref, vector<4xindex>, vector<4xi1>, vector<4xf32> into vector<4xf32> + vector.scatter %memspacecast[%c0] [%indices], %mask, %0 : memref, vector<4xindex>, vector<4xi1>, vector<4xf32> + return +} + +// CHECK-LABEL: func.func @expandload_compressstore( +// CHECK-SAME: %[[ARG0:.*]]: memref, +// CHECK-SAME: %[[ARG1:.*]]: index) { +// CHECK-DAG: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : vector<4xf32> +// CHECK-DAG: %[[VAL_1:.*]] = arith.constant dense<[true, true, false, false]> : vector<4xi1> +// CHECK: %[[VAL_2:.*]] = vector.expandload %[[ARG0]]{{\[}}%[[ARG1]]], %[[VAL_1]], %[[VAL_0]] : memref, vector<4xi1>, vector<4xf32> into vector<4xf32> +// CHECK: vector.compressstore %[[ARG0]]{{\[}}%[[ARG1]]], %[[VAL_1]], %[[VAL_2]] : memref, vector<4xi1>, vector<4xf32> +// CHECK: return +// CHECK: } +func.func @expandload_compressstore(%arg0: memref, %arg1: index) { + %memspacecast = memref.memory_space_cast %arg0 : memref to memref + %mask = arith.constant dense<[true, true, false, false]> : vector<4xi1> + %passthrough = arith.constant dense<0.0> : vector<4xf32> + %0 = vector.expandload %memspacecast[%arg1], %mask, %passthrough : memref, vector<4xi1>, vector<4xf32> into vector<4xf32> + vector.compressstore %memspacecast[%arg1], %mask, %0 : memref, vector<4xi1>, vector<4xf32> + return +} diff --git a/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp b/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp index d332270468ea8..d45aaf788f9c2 100644 --- a/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestDataLayoutPropagation.cpp @@ -33,7 +33,8 @@ struct TestDataLayoutPropagationPass MLIRContext *context = &getContext(); RewritePatternSet patterns(context); linalg::populateDataLayoutPropagationPatterns( - patterns, [](OpOperand *opOperand) { return true; }); + patterns, [](OpOperand *opOperand) { return true; }, + /*poisonPaddingOk=*/true); linalg::ControlPropagationFn controlExtract = [](OpOperand *opOperand) -> bool { Operation *producer = opOperand->get().getDefiningOp(); diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp index 987e8f3654ce8..21d75f58b0a3a 100644 --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -431,3 +431,47 @@ void TestDialect::getCanonicalizationPatterns( RewritePatternSet &results) const { results.add(&dialectCanonicalizationPattern); } + +//===----------------------------------------------------------------------===// +// TestCallWithSegmentsOp +//===----------------------------------------------------------------------===// +// The op `test.call_with_segments` models a call-like operation whose operands +// are divided into 3 variadic segments: `prefix`, `args`, and `suffix`. +// Only the middle segment represents the actual call arguments. The op uses +// the AttrSizedOperandSegments trait, so we can derive segment boundaries from +// the generated `operandSegmentSizes` attribute. We provide custom helpers to +// expose the logical call arguments as both a read-only range and a mutable +// range bound to the proper segment so that insertion/erasure updates the +// attribute automatically. + +// Segment layout indices in the DenseI32ArrayAttr: [prefix, args, suffix]. +static constexpr unsigned kTestCallWithSegmentsArgsSegIndex = 1; + +Operation::operand_range CallWithSegmentsOp::getArgOperands() { + // Leverage generated getters for segment sizes: slice between prefix and + // suffix using current operand list. + return getOperation()->getOperands().slice(getPrefix().size(), + getArgs().size()); +} + +MutableOperandRange CallWithSegmentsOp::getArgOperandsMutable() { + Operation *op = getOperation(); + + // Obtain the canonical segment size attribute name for this op. + auto segName = + CallWithSegmentsOp::getOperandSegmentSizesAttrName(op->getName()); + auto sizesAttr = op->getAttrOfType(segName); + assert(sizesAttr && "missing operandSegmentSizes attribute on op"); + + // Compute the start and length of the args segment from the prefix size and + // args size stored in the attribute. + auto sizes = sizesAttr.asArrayRef(); + unsigned start = static_cast(sizes[0]); // prefix size + unsigned len = static_cast(sizes[1]); // args size + + NamedAttribute segNamed(segName, sizesAttr); + MutableOperandRange::OperandSegment binding{kTestCallWithSegmentsArgsSegIndex, + segNamed}; + + return MutableOperandRange(op, start, len, {binding}); +} diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td index d9bbb3261febc..6ea27187655ee 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -3746,4 +3746,47 @@ def TestOpWithSuccessorRef : TEST_Op<"dummy_op_with_successor_ref"> { }]; } +def CallWithSegmentsOp : TEST_Op<"call_with_segments", + [AttrSizedOperandSegments, + DeclareOpInterfaceMethods]> { + let summary = "test call op with segmented args"; + let arguments = (ins + FlatSymbolRefAttr:$callee, + Variadic:$prefix, // non-arg segment (e.g., 'in') + Variadic:$args, // <-- the call *arguments* segment + Variadic:$suffix // non-arg segment (e.g., 'out') + ); + let results = (outs); + let assemblyFormat = [{ + $callee `(` $prefix `:` type($prefix) `)` + `(` $args `:` type($args) `)` + `(` $suffix `:` type($suffix) `)` attr-dict + }]; + + // Provide stub implementations for the ArgAndResultAttrsOpInterface. + let extraClassDeclaration = [{ + ::mlir::ArrayAttr getArgAttrsAttr() { return {}; } + ::mlir::ArrayAttr getResAttrsAttr() { return {}; } + void setArgAttrsAttr(::mlir::ArrayAttr) {} + void setResAttrsAttr(::mlir::ArrayAttr) {} + ::mlir::Attribute removeArgAttrsAttr() { return {}; } + ::mlir::Attribute removeResAttrsAttr() { return {}; } + }]; + + let extraClassDefinition = [{ + ::mlir::CallInterfaceCallable $cppClass::getCallableForCallee() { + if (auto sym = (*this)->getAttrOfType<::mlir::SymbolRefAttr>("callee")) + return ::mlir::CallInterfaceCallable(sym); + return ::mlir::CallInterfaceCallable(); + } + void $cppClass::setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { + if (auto sym = callee.dyn_cast<::mlir::SymbolRefAttr>()) + (*this)->setAttr("callee", sym); + else + (*this)->removeAttr("callee"); + } + }]; +} + + #endif // TEST_OPS diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt b/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt index 103bc94d86920..7d325778f09cb 100644 --- a/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt +++ b/mlir/test/lib/Dialect/TestIRDLToCpp/CMakeLists.txt @@ -12,5 +12,7 @@ add_mlir_library(MLIRTestIRDLToCppDialect mlir_target_link_libraries(MLIRTestIRDLToCppDialect PUBLIC MLIRIR MLIRPass + MLIRSCFDialect MLIRTransforms + MLIRTestDialect ) diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp index 9550e4c96e547..421db7e4c0094 100644 --- a/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp +++ b/mlir/test/lib/Dialect/TestIRDLToCpp/TestIRDLToCppDialect.cpp @@ -13,6 +13,7 @@ // #include "mlir/IR/Dialect.h" #include "mlir/IR/Region.h" +#include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/Interfaces/InferTypeOpInterface.h" @@ -54,16 +55,34 @@ struct TestOpConversion : public OpConversionPattern { } }; +struct TestRegionConversion + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(mlir::test_irdl_to_cpp::ConditionalOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // Just exercising the C++ API even though these are not enforced in the + // dialect definition + assert(op.getThen().getBlocks().size() == 1); + assert(adaptor.getElse().getBlocks().size() == 1); + auto ifOp = scf::IfOp::create(rewriter, op.getLoc(), op.getInput()); + rewriter.replaceOp(op, ifOp); + return success(); + } +}; + struct ConvertTestDialectToSomethingPass : PassWrapper> { void runOnOperation() override { MLIRContext *ctx = &getContext(); RewritePatternSet patterns(ctx); - patterns.add(ctx); + patterns.add(ctx); ConversionTarget target(getContext()); - target.addIllegalOp(); - target.addLegalOp(); - target.addLegalOp(); + target.addIllegalOp(); + target.addLegalOp(); if (failed(applyPartialConversion(getOperation(), target, std::move(patterns)))) signalPassFailure(); @@ -73,6 +92,10 @@ struct ConvertTestDialectToSomethingPass StringRef getDescription() const final { return "Checks the convertability of an irdl dialect"; } + + void getDependentDialects(DialectRegistry ®istry) const override { + registry.insert(); + } }; void registerIrdlTestDialect(mlir::DialectRegistry ®istry) { diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir b/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir index f6233ee18190a..1915324ccb459 100644 --- a/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir +++ b/mlir/test/lib/Dialect/TestIRDLToCpp/test_conversion.testd.mlir @@ -1,15 +1,29 @@ // RUN: mlir-opt %s --pass-pipeline="builtin.module(test-irdl-conversion-check)" | FileCheck %s // CHECK-LABEL: module { module { - // CHECK: func.func @test() { + // CHECK: func.func @test(%[[test_arg:[^ ]*]]: i1) { // CHECK: %[[v0:[^ ]*]] = "test_irdl_to_cpp.bar"() : () -> i32 // CHECK: %[[v1:[^ ]*]] = "test_irdl_to_cpp.bar"() : () -> i32 // CHECK: %[[v2:[^ ]*]] = "test_irdl_to_cpp.hash"(%[[v0]], %[[v0]]) : (i32, i32) -> i32 + // CHECK: scf.if %[[test_arg]] // CHECK: return // CHECK: } - func.func @test() { + func.func @test(%test_arg: i1) { %0 = "test_irdl_to_cpp.bar"() : () -> i32 %1 = "test_irdl_to_cpp.beef"(%0, %0) : (i32, i32) -> i32 + "test_irdl_to_cpp.conditional"(%test_arg) ({ + ^cond(%test: i1): + %3 = "test_irdl_to_cpp.bar"() : () -> i32 + "test.terminator"() : ()->() + }, { + ^then(%what: i1, %ever: i32): + %4 = "test_irdl_to_cpp.bar"() : () -> i32 + "test.terminator"() : ()->() + }, { + ^else(): + %5 = "test_irdl_to_cpp.bar"() : () -> i32 + "test.terminator"() : ()->() + }) : (i1) -> () return } diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir index 42e713e0adecd..85fb8cb15acef 100644 --- a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir +++ b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp.irdl.mlir @@ -2,7 +2,7 @@ // CHECK: class TestIrdlToCpp irdl.dialect @test_irdl_to_cpp { - + // CHECK: class FooType irdl.type @foo @@ -32,4 +32,53 @@ irdl.dialect @test_irdl_to_cpp { irdl.operands(lhs: %0, rhs: %0) irdl.results(res: %0) } + + // CHECK: ConditionalOp declarations + // CHECK: ConditionalOpGenericAdaptorBase + // CHECK: ::mlir::Region &getCond() { return *getRegions()[0]; } + // CHECK: ::mlir::Region &getThen() { return *getRegions()[1]; } + // CHECK: ::mlir::Region &getElse() { return *getRegions()[2]; } + // + // CHECK: class ConditionalOp : public ::mlir::Op::Impl, ::mlir::OpTrait::OpInvariants> + // CHECK: ::mlir::Region &getCond() { return (*this)->getRegion(0); } + // CHECK: ::mlir::Region &getThen() { return (*this)->getRegion(1); } + // CHECK: ::mlir::Region &getElse() { return (*this)->getRegion(2); } + + // CHECK: ConditionalOp definitions + // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_cond + // CHECK: if (!(region.getNumArguments() == 1)) { + // CHECK: failed to verify constraint: region with 1 entry block argument(s) + + // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_then + // CHECK: if (!(true)) { + + // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_else + // CHECK: if (!(region.getNumArguments() == 0)) { + // CHECK: failed to verify constraint: region with 0 entry block argument(s) + + // CHECK: ConditionalOp::build + // CHECK: for (unsigned i = 0; i != 3; ++i) + // CHECK-NEXT: (void)odsState.addRegion(); + + // CHECK: ConditionalOp::verifyInvariantsImpl + // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_cond + // CHECK: failure + // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_then + // CHECK: failure + // CHECK: __mlir_irdl_local_region_constraint_ConditionalOp_else + // CHECK: failure + // CHECK: success + irdl.operation @conditional { + %r0 = irdl.region // Unconstrained region + %r1 = irdl.region() // Region with no entry block arguments + + // TODO(#161018): support irdl.is in irdl-to-cpp + // %v0 = irdl.is i1 // Type constraint: i1 (boolean) + %v0 = irdl.any + %r2 = irdl.region(%v0) // Region with one i1 entry block argument + irdl.regions(cond: %r2, then: %r0, else: %r1) + + %0 = irdl.any + irdl.operands(input: %0) + } } diff --git a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir index 403b49235467c..cc2745643db7e 100644 --- a/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir +++ b/mlir/test/lib/Dialect/TestIRDLToCpp/test_irdl_to_cpp_invalid_unsupported_types.irdl.mlir @@ -7,7 +7,7 @@ irdl.dialect @test_irdl_to_cpp { irdl.results(res: %1) } } -// ----- +// ----- irdl.dialect @test_irdl_to_cpp { irdl.operation @operands_no_any_of { @@ -42,7 +42,7 @@ irdl.dialect @test_irdl_to_cpp { irdl.dialect @test_irdl_to_cpp { irdl.type @ty { - %0 = irdl.any + %0 = irdl.any // expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.parameters operation}} irdl.parameters(ty: %0) } @@ -50,30 +50,9 @@ irdl.dialect @test_irdl_to_cpp { // ----- -irdl.dialect @test_irdl_to_cpp { - irdl.operation @test_op { - // expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.region operation}} - %0 = irdl.region() - irdl.regions(reg: %0) - } - -} - -// ----- - -irdl.dialect @test_irdl_to_cpp { - irdl.operation @test_op { - // expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.regions operation}} - irdl.regions() - } - -} - -// ----- - irdl.dialect @test_irdl_to_cpp { irdl.type @test_derived { // expected-error@+1 {{IRDL C++ translation does not yet support translation of irdl.base operation}} %0 = irdl.base "!builtin.integer" - } + } } diff --git a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp index e1ba45c60ac36..e51cac4286f0c 100644 --- a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp +++ b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp @@ -50,52 +50,71 @@ struct TestXeGPUUnrollingPatterns void runOnOperation() override { MLIRContext *ctx = &getContext(); xegpu::UnrollOptions options; - options.setNativeShapeFn( - [&](Operation *op) -> std::optional> { - if (isa(op)) { - xegpu::TensorDescType tdescTy; - if (auto createNdOp = dyn_cast(op)) { - tdescTy = createNdOp.getType(); - } else if (auto updateNdOp = - dyn_cast(op)) { - tdescTy = updateNdOp.getTensorDescType(); - } else if (auto prefetchNdOp = dyn_cast(op)) { - tdescTy = prefetchNdOp.getTensorDescType(); - } else if (auto loadNdOp = dyn_cast(op)) { - tdescTy = loadNdOp.getTensorDescType(); - } else if (auto storeNdOp = dyn_cast(op)) { - tdescTy = storeNdOp.getTensorDescType(); - } else if (auto createOp = dyn_cast(op)) { - tdescTy = createOp.getType(); - } else if (auto updateOp = dyn_cast(op)) { - tdescTy = updateOp.getTensorDescType(); - } else if (auto prefetchOp = dyn_cast(op)) { - tdescTy = prefetchOp.getTensorDescType(); - } else if (auto loadOp = dyn_cast(op)) { - tdescTy = loadOp.getTensorDescType(); - } else if (auto storeOp = dyn_cast(op)) { - tdescTy = storeOp.getTensorDescType(); + options.setNativeShapeFn([&](Operation *op) + -> std::optional> { + if (isa(op)) { + xegpu::TensorDescType tdescTy; + if (auto createNdOp = dyn_cast(op)) { + tdescTy = createNdOp.getType(); + } else if (auto updateNdOp = dyn_cast(op)) { + tdescTy = updateNdOp.getTensorDescType(); + } else if (auto prefetchNdOp = dyn_cast(op)) { + tdescTy = prefetchNdOp.getTensorDescType(); + } else if (auto loadNdOp = dyn_cast(op)) { + tdescTy = loadNdOp.getTensorDescType(); + } else if (auto storeNdOp = dyn_cast(op)) { + tdescTy = storeNdOp.getTensorDescType(); + } else if (auto createOp = dyn_cast(op)) { + tdescTy = createOp.getType(); + } else if (auto updateOp = dyn_cast(op)) { + tdescTy = updateOp.getTensorDescType(); + } else if (auto prefetchOp = dyn_cast(op)) { + tdescTy = prefetchOp.getTensorDescType(); + } else if (auto loadOp = dyn_cast(op)) { + if (loadOp.getOffsets()) { + auto layout = xegpu::getDistributeLayoutAttr(loadOp.getResult()); + if (layout && layout.isForSubgroup()) { + auto inst_data = layout.getEffectiveInstDataAsInt(); + if (!inst_data.empty()) + return SmallVector(inst_data.begin(), inst_data.end()); } - - if (auto layout = tdescTy.getLayoutAttr()) { - auto inst_data = layout.getInstData(); - if (inst_data && layout.isForSubgroup()) - return SmallVector(inst_data.asArrayRef().begin(), - inst_data.asArrayRef().end()); + return std::nullopt; + } + tdescTy = loadOp.getTensorDescType(); + } else if (auto storeOp = dyn_cast(op)) { + if (storeOp.getOffsets()) { + auto layout = llvm::dyn_cast_or_null( + op->getAttr("layout")); + if (layout && layout.isForSubgroup()) { + auto inst_data = layout.getEffectiveInstDataAsInt(); + if (!inst_data.empty()) + return SmallVector(inst_data.begin(), inst_data.end()); } + return std::nullopt; } + tdescTy = storeOp.getTensorDescType(); + } - if (isa(op)) - return SmallVector{8, 16, 16}; + if (auto layout = tdescTy.getLayoutAttr()) { + auto inst_data = layout.getInstData(); + if (inst_data && layout.isForSubgroup()) + return SmallVector(inst_data.asArrayRef().begin(), + inst_data.asArrayRef().end()); + } + } - return std::nullopt; - }); + if (isa(op)) + return SmallVector{8, 16, 16}; + + return std::nullopt; + }); options.setUnrolledTypesFn( - [&](ShapedType type, ArrayRef tileShape) -> SmallVector { + [&](ShapedType type, ArrayRef tileShape, + bool returnSingleType = false) -> SmallVector { Type elemTy = type.getElementType(); Type newTy; @@ -137,6 +156,8 @@ struct TestXeGPUUnrollingPatterns newTy = type.clone(tileShape, elemTy); } + if (returnSingleType) + return SmallVector{newTy}; std::optional> ratio = computeShapeRatio(type.getShape(), tileShape); assert(ratio && "Expecting the ratio to be valid."); @@ -152,8 +173,6 @@ struct TestXeGPUUnrollingPatterns #undef DEBUG_TYPE #define DEBUG_TYPE "test-xegpu-layout-interface" -#define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") -#define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") // Test pattern for distributing vector::StepOp from workgroup to subgroup. // Validates DistributeLayoutAttr interfaces for offset computation diff --git a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterfaceTransformOps.cpp b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterfaceTransformOps.cpp index 7981c72c2f2c8..326fec3ee5cf0 100644 --- a/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterfaceTransformOps.cpp +++ b/mlir/test/lib/Interfaces/TilingInterface/TestTilingInterfaceTransformOps.cpp @@ -581,7 +581,8 @@ DiagnosedSilenceableFailure transform::TestTileUsingCustomLoopOp::apply( }; scf::SCFTilingOptions::GenerateLoopTerminatorFn terminatorFn = - [&](RewriterBase &rewriter, Location loc, ValueRange tiledResults, + [&](RewriterBase &rewriter, Location loc, + ArrayRef loops, ValueRange tiledResults, ArrayRef> resultOffsets, ArrayRef> resultSizes, ValueRange destinationTensors) -> LogicalResult { diff --git a/mlir/test/lib/Transforms/TestSingleFold.cpp b/mlir/test/lib/Transforms/TestSingleFold.cpp index 5bd9dd2a1f075..e55f36aea0a7c 100644 --- a/mlir/test/lib/Transforms/TestSingleFold.cpp +++ b/mlir/test/lib/Transforms/TestSingleFold.cpp @@ -26,6 +26,9 @@ struct TestSingleFold : public PassWrapper>, public RewriterBase::Listener { MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSingleFold) + TestSingleFold() = default; + TestSingleFold(const TestSingleFold &pass) : PassWrapper(pass) {} + StringRef getArgument() const final { return "test-single-fold"; } StringRef getDescription() const final { return "Test single-pass operation folding and dead constant elimination"; @@ -45,13 +48,18 @@ struct TestSingleFold : public PassWrapper>, if (it != existingConstants.end()) existingConstants.erase(it); } + + Option maxIterations{*this, "max-iterations", + llvm::cl::desc("Max iterations in the tryToFold"), + llvm::cl::init(1)}; }; } // namespace void TestSingleFold::foldOperation(Operation *op, OperationFolder &helper) { // Attempt to fold the specified operation, including handling unused or // duplicated constants. - (void)helper.tryToFold(op); + bool inPlaceUpdate = false; + (void)helper.tryToFold(op, &inPlaceUpdate, maxIterations); } void TestSingleFold::runOnOperation() { diff --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in index 2fc595dfabbf5..1aaf7989e3ce5 100644 --- a/mlir/test/lit.site.cfg.py.in +++ b/mlir/test/lit.site.cfg.py.in @@ -15,6 +15,8 @@ config.native_target = "@LLVM_NATIVE_ARCH@" config.host_os = "@HOST_OS@" config.host_cc = "@HOST_CC@" config.host_cxx = "@HOST_CXX@" +config.host_c_compiler_launcher = "@CMAKE_C_COMPILER_LAUNCHER@" +config.host_cxx_compiler_launcher = "@CMAKE_CXX_COMPILER_LAUNCHER@" config.enable_libcxx = "@LLVM_ENABLE_LIBCXX@" config.host_cmake = "@CMAKE_COMMAND@" config.host_cmake_generator = "@CMAKE_GENERATOR@" @@ -58,6 +60,7 @@ config.mlir_run_cuda_sm80_tests = @MLIR_RUN_CUDA_SM80_TESTS@ config.mlir_run_cuda_sm80_lt_tests = @MLIR_RUN_CUDA_SM80_LT_TESTS@ config.mlir_run_cuda_sm90_tests = @MLIR_RUN_CUDA_SM90_TESTS@ config.mlir_include_integration_tests = @MLIR_INCLUDE_INTEGRATION_TESTS@ +config.llvm_shared_libs_build = @BUILD_SHARED_LIBS@ config.arm_emulator_executable = "@ARM_EMULATOR_EXECUTABLE@" # Some tests marked with 'UNSUPPORTED: target=aarch64{{.*}}' are still run when # configured with ARM_EMULATOR_EXECUTABLE and the default target is not aarch64. diff --git a/mlir/test/mlir-tblgen/attr-duplicated-builder-error.td b/mlir/test/mlir-tblgen/attr-duplicated-builder-error.td new file mode 100644 index 0000000000000..5f1c61a3a505d --- /dev/null +++ b/mlir/test/mlir-tblgen/attr-duplicated-builder-error.td @@ -0,0 +1,48 @@ +// RUN: not mlir-tblgen -gen-attrdef-decls -I %S/../../include %s 2>&1 | FileCheck %s + +include "mlir/IR/OpBase.td" + +def Test_Dialect : Dialect { + let name = "test"; + let cppNamespace = "::test"; +} + +class TestAttr traits = []> + : AttrDef { + let mnemonic = attrMnemonic; +} + +def TestAttr : TestAttr<"Test", "test"> { + let summary = "Test attrubute"; + let description = "Test attribute"; + + let parameters = (ins AttrParameter<"std::int64_t", "arg">:$arg); + let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ + return $_get($_ctxt, arg); + }]>]; + + let assemblyFormat = "`<` $arg `>`"; + + let skipDefaultBuilders = 0; + let genVerifyDecl = 1; + let genMnemonicAlias = 1; +} + +def Test_TestAttrOp : Op { + let summary = "test operation with attribute"; + let description = "test operation with attribute"; + + let arguments = (ins TestAttr:$testAttr); + let assemblyFormat = "$testAttr attr-dict"; +} + +// CHECK: attr-duplicated-builder-error.td:20:7: error: builder `get` conflicts with an existing builder. +// CHECK-NEXT: let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ +// CHECK-NEXT: ^ +// CHECK-NEXT: note: A new builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: is shadowed by an existing builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: Please remove one of the conflicting definitions. diff --git a/mlir/test/mlir-tblgen/attr-duplicated-custom-builders-error.td b/mlir/test/mlir-tblgen/attr-duplicated-custom-builders-error.td new file mode 100644 index 0000000000000..0e09f667c1ccd --- /dev/null +++ b/mlir/test/mlir-tblgen/attr-duplicated-custom-builders-error.td @@ -0,0 +1,52 @@ +// RUN: not mlir-tblgen -gen-attrdef-decls -I %S/../../include %s 2>&1 | FileCheck %s + +include "mlir/IR/OpBase.td" + +def Test_Dialect : Dialect { + let name = "test"; + let cppNamespace = "::test"; +} + +class TestAttr traits = []> + : AttrDef { + let mnemonic = attrMnemonic; +} + +def TestAttr : TestAttr<"Test", "test"> { + let summary = "Test attrubute"; + let description = "Test attribute"; + + let parameters = (ins AttrParameter<"std::int64_t", "arg">:$arg); + let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ + return $_get($_ctxt, arg); + }]>, + AttrBuilder<(ins "std::int64_t":$arg), [{ + // Duplicated builder + return $_get($_ctxt, arg); + }]>]; + + let assemblyFormat = "`<` $arg `>`"; + + let skipDefaultBuilders = 1; + let genVerifyDecl = 1; + let genMnemonicAlias = 1; +} + +def Test_TestAttrOp : Op { + let summary = "test operation with attribute"; + let description = "test operation with attribute"; + + let arguments = (ins TestAttr:$testAttr); + let assemblyFormat = "$testAttr attr-dict"; +} + +// CHECK: attr-duplicated-custom-builders-error.td:20:7: error: builder `get` conflicts with an existing builder. +// CHECK-NEXT: let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ +// CHECK-NEXT: ^ +// CHECK-NEXT: note: A new builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: is shadowed by an existing builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: Please remove one of the conflicting definitions. diff --git a/mlir/test/mlir-tblgen/op-format-invalid.td b/mlir/test/mlir-tblgen/op-format-invalid.td index 2f29543f67381..0a022ad43a749 100644 --- a/mlir/test/mlir-tblgen/op-format-invalid.td +++ b/mlir/test/mlir-tblgen/op-format-invalid.td @@ -307,7 +307,7 @@ def DirectiveTypeZOperandInvalidI : TestFormat_Op<[{ def LiteralInvalidA : TestFormat_Op<[{ `a:` }]>; -// CHECK: error: expected valid literal but got '1': single character literal must be a letter or one of '_:,=<>()[]{}?+*' +// CHECK: error: expected valid literal but got '1': single character literal must be a letter or one of '_:,=<>()[]{}?+-*' def LiteralInvalidB : TestFormat_Op<[{ `1` }]>; diff --git a/mlir/test/mlir-tblgen/op-format-spec.td b/mlir/test/mlir-tblgen/op-format-spec.td index 1541cd09f53e0..1ac231116454b 100644 --- a/mlir/test/mlir-tblgen/op-format-spec.td +++ b/mlir/test/mlir-tblgen/op-format-spec.td @@ -123,7 +123,7 @@ def DirectiveTypeValid : TestFormat_Op<[{ // CHECK-NOT: error def LiteralValid : TestFormat_Op<[{ - `_` `:` `,` `=` `<` `>` `(` `)` `[` `]` `?` `+` `*` ` ` `` `->` `\n` `abc$._` + `_` `:` `,` `=` `<` `>` `(` `)` `[` `]` `?` `+` `-` `*` ` ` `` `->` `\n` `abc$._` attr-dict }]>; diff --git a/mlir/test/python/dialects/python_test.py b/mlir/test/python/dialects/python_test.py index 6ac25e129dacc..1194e32c960c8 100644 --- a/mlir/test/python/dialects/python_test.py +++ b/mlir/test/python/dialects/python_test.py @@ -904,7 +904,7 @@ def types(lst): assert ( typing.get_type_hints(test.same_variadic_result_vfv)["return"] - is Union[OpResult, OpResultList, test.SameVariadicResultSizeOpVFV] + == Union[OpResult, OpResultList, test.SameVariadicResultSizeOpVFV] ) assert ( type(test.same_variadic_result_vfv([i[0], i[1]], i[2], [i[3], i[4]])) @@ -992,7 +992,7 @@ def types(lst): assert ( typing.get_type_hints(test.results_variadic)["return"] - is Union[OpResult, OpResultList, test.ResultsVariadicOp] + == Union[OpResult, OpResultList, test.ResultsVariadicOp] ) assert type(test.results_variadic([i[0]])) is OpResult op_res_variadic = test.ResultsVariadicOp([i[0]]) @@ -1003,7 +1003,7 @@ def types(lst): assert type(op_res_variadic.res) is OpResultList -# CHECK-LABEL: TEST: testVariadicAndNormalRegion +# CHECK-LABEL: TEST: testVariadicAndNormalRegionOp @run def testVariadicAndNormalRegionOp(): with Context() as ctx, Location.unknown(ctx): @@ -1024,3 +1024,6 @@ def testVariadicAndNormalRegionOp(): is RegionSequence ) assert type(region_op.variadic) is RegionSequence + + assert isinstance(region_op.opview, OpView) + assert isinstance(region_op.operation.opview, OpView) diff --git a/mlir/test/python/dialects/transform_tune_ext.py b/mlir/test/python/dialects/transform_tune_ext.py index dfb93594bca52..eb2a083211ef7 100644 --- a/mlir/test/python/dialects/transform_tune_ext.py +++ b/mlir/test/python/dialects/transform_tune_ext.py @@ -1,21 +1,21 @@ # RUN: %PYTHON %s | FileCheck %s -from mlir.ir import * +from mlir import ir from mlir.dialects import transform from mlir.dialects.transform import tune, debug def run(f): - print("\nTEST:", f.__name__) - with Context(), Location.unknown(): - module = Module.create() - with InsertionPoint(module.body): + print("\n// TEST:", f.__name__) + with ir.Context(), ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): sequence = transform.SequenceOp( transform.FailurePropagationMode.Propagate, [], transform.AnyOpType.get(), ) - with InsertionPoint(sequence.body): + with ir.InsertionPoint(sequence.body): f(sequence.bodyTarget) transform.YieldOp() print(module) @@ -29,10 +29,10 @@ def testKnobOp(target): # CHECK: %[[HEADS_OR_TAILS:.*]] = transform.tune.knob<"coin"> options = [true, false] -> !transform.any_param heads_or_tails = tune.KnobOp( - result=any_param, name=StringAttr.get("coin"), options=[True, False] + result=any_param, name=ir.StringAttr.get("coin"), options=[True, False] ) # CHECK: transform.tune.knob<"animal"> options = ["cat", "dog", unit] -> !transform.any_param - tune.KnobOp(any_param, name="animal", options=["cat", "dog", UnitAttr.get()]) + tune.KnobOp(any_param, name="animal", options=["cat", "dog", ir.UnitAttr.get()]) # CHECK: transform.tune.knob<"tile_size"> options = [2, 4, 8, 16, 24, 32] -> !transform.any_param tune.KnobOp(any_param, "tile_size", [2, 4, 8, 16, 24, 32]) # CHECK: transform.tune.knob<"magic_value"> options = [2.000000e+00, 2.250000e+00, 2.500000e+00, 2.750000e+00, 3.000000e+00] -> !transform.any_param @@ -45,7 +45,10 @@ def testKnobOp(target): heads = tune.KnobOp(any_param, "coin", options=[True, False], selected=True) # CHECK: transform.tune.knob<"animal"> = "dog" from options = ["cat", "dog", unit] -> !transform.any_param tune.KnobOp( - any_param, name="animal", options=["cat", "dog", UnitAttr.get()], selected="dog" + any_param, + name="animal", + options=["cat", "dog", ir.UnitAttr.get()], + selected="dog", ) # CHECK: transform.tune.knob<"tile_size"> = 8 : i64 from options = [2, 4, 8, 16, 24, 32] -> !transform.any_param tune.KnobOp(any_param, "tile_size", [2, 4, 8, 16, 24, 32], selected=8) @@ -57,16 +60,90 @@ def testKnobOp(target): # CHECK: transform.tune.knob<"range_as_a_dict"> = 4 : i64 from options = {start = 2 : i64, step = 2 : i64, stop = 16 : i64} -> !transform.any_param # NB: Membership of `selected` in non-ArrayAttr `options` is _not_ verified. - i64 = IntegerType.get_signless(64) + i64 = ir.IntegerType.get_signless(64) tune.knob( any_param, "range_as_a_dict", - DictAttr.get( + ir.DictAttr.get( { - "start": IntegerAttr.get(i64, 2), - "stop": IntegerAttr.get(i64, 16), - "step": IntegerAttr.get(i64, 2), + "start": ir.IntegerAttr.get(i64, 2), + "stop": ir.IntegerAttr.get(i64, 16), + "step": ir.IntegerAttr.get(i64, 2), } ), selected=4, ) + + +# CHECK-LABEL: TEST: testAlternativesOp +@run +def testAlternativesOp(target): + any_param = transform.AnyParamType.get() + + # CHECK: %[[LEFT_OR_RIGHT_OUTCOME:.*]] = transform.tune.alternatives<"left_or_right"> -> !transform.any_param { + left_or_right = tune.AlternativesOp( + [transform.AnyParamType.get()], "left_or_right", 2 + ) + idx_for_left, idx_for_right = 0, 1 + with ir.InsertionPoint(left_or_right.alternatives[idx_for_left].blocks[0]): + # CHECK: %[[C0:.*]] = transform.param.constant 0 + i32_0 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0) + c0 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_0) + # CHECK: transform.yield %[[C0]] + transform.yield_(c0) + # CHECK-NEXT: }, { + with ir.InsertionPoint(left_or_right.alternatives[idx_for_right].blocks[0]): + # CHECK: %[[C1:.*]] = transform.param.constant 1 + i32_1 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1) + c1 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1) + # CHECK: transform.yield %[[C1]] + transform.yield_(c1) + # CHECK-NEXT: } + outcome_of_left_or_right_decision = left_or_right.results[0] + + # CHECK: transform.tune.alternatives<"fork_in_the_road"> selected_region = 0 -> !transform.any_param { + fork_in_the_road = tune.AlternativesOp( + [transform.AnyParamType.get()], "fork_in_the_road", 2, selected_region=0 + ) + with ir.InsertionPoint(fork_in_the_road.alternatives[idx_for_left].blocks[0]): + # CHECK: %[[C0:.*]] = transform.param.constant 0 + i32_0 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 0) + c0 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_0) + # CHECK: transform.yield %[[C0]] + transform.yield_(c0) + # CHECK-NEXT: }, { + with ir.InsertionPoint(fork_in_the_road.alternatives[idx_for_right].blocks[0]): + # CHECK: %[[C1:.*]] = transform.param.constant 1 + i32_1 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1) + c1 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1) + # CHECK: transform.yield %[[C1]] + transform.yield_(c1) + # CHECK-NEXT: } + + # CHECK: transform.tune.alternatives<"left_or_right_as_before"> selected_region = %[[LEFT_OR_RIGHT_OUTCOME]] : !transform.any_param { + left_or_right_as_before = tune.AlternativesOp( + [], + "left_or_right_as_before", + 2, + selected_region=outcome_of_left_or_right_decision, + ) + with ir.InsertionPoint( + left_or_right_as_before.alternatives[idx_for_left].blocks[0] + ): + # CHECK: transform.param.constant 1337 + i32_1337 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 1337) + c1337 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_1337) + # CHECK: transform.debug.emit_param_as_remark + debug.emit_param_as_remark(c1337) + transform.yield_([]) + # CHECK-NEXT: }, { + with ir.InsertionPoint( + left_or_right_as_before.alternatives[idx_for_right].blocks[0] + ): + # CHECK: transform.param.constant 42 + i32_42 = ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 42) + c42 = transform.ParamConstantOp(transform.AnyParamType.get(), i32_42) + # CHECK: transform.debug.emit_param_as_remark + debug.emit_param_as_remark(c42) + transform.yield_([]) + # CHECK-NEXT: } diff --git a/mlir/test/python/dialects/transform_vector_ext.py b/mlir/test/python/dialects/transform_vector_ext.py index 28902b012f7cb..0cd9333dc1218 100644 --- a/mlir/test/python/dialects/transform_vector_ext.py +++ b/mlir/test/python/dialects/transform_vector_ext.py @@ -74,9 +74,9 @@ def enum_configurable_patterns(): # CHECK: transform.apply_patterns.vector.lower_contraction vector.ApplyLowerContractionPatternsOp() # CHECK: transform.apply_patterns.vector.lower_contraction - # CHECK-SAME: lowering_strategy = matmulintrinsics + # CHECK-SAME: lowering_strategy = llvmintr vector.ApplyLowerContractionPatternsOp( - lowering_strategy=vector.VectorContractLowering.Matmul + lowering_strategy=vector.VectorContractLowering.LLVMIntr ) # CHECK: transform.apply_patterns.vector.lower_contraction # CHECK-SAME: lowering_strategy = parallelarith @@ -105,9 +105,9 @@ def enum_configurable_patterns(): lowering_strategy=vector.VectorTransposeLowering.EltWise ) # CHECK: transform.apply_patterns.vector.lower_transpose - # CHECK-SAME: lowering_strategy = flat_transpose + # CHECK-SAME: lowering_strategy = llvmintr vector.ApplyLowerTransposePatternsOp( - lowering_strategy=vector.VectorTransposeLowering.Flat + lowering_strategy=vector.VectorTransposeLowering.LLVMIntr ) # CHECK: transform.apply_patterns.vector.lower_transpose # CHECK-SAME: lowering_strategy = shuffle_1d @@ -120,10 +120,10 @@ def enum_configurable_patterns(): lowering_strategy=vector.VectorTransposeLowering.Shuffle16x16 ) # CHECK: transform.apply_patterns.vector.lower_transpose - # CHECK-SAME: lowering_strategy = flat_transpose + # CHECK-SAME: lowering_strategy = llvmintr # CHECK-SAME: avx2_lowering_strategy = true vector.ApplyLowerTransposePatternsOp( - lowering_strategy=vector.VectorTransposeLowering.Flat, + lowering_strategy=vector.VectorTransposeLowering.LLVMIntr, avx2_lowering_strategy=True, ) diff --git a/mlir/test/python/integration/dialects/pdl.py b/mlir/test/python/integration/dialects/pdl.py index dd6c74ce622c8..c8e6197e03842 100644 --- a/mlir/test/python/integration/dialects/pdl.py +++ b/mlir/test/python/integration/dialects/pdl.py @@ -86,3 +86,154 @@ def add_func(a, b): frozen = get_pdl_patterns() apply_patterns_and_fold_greedily(module_.operation, frozen) return module_ + + +# If we use arith.constant and arith.addi here, +# these C++-defined folding/canonicalization will be applied +# implicitly in the greedy pattern rewrite driver to +# make our Python-defined folding useless, +# so here we define a new dialect to workaround this. +def load_myint_dialect(): + from mlir.dialects import irdl + + m = Module.create() + with InsertionPoint(m.body): + myint = irdl.dialect("myint") + with InsertionPoint(myint.body): + constant = irdl.operation_("constant") + with InsertionPoint(constant.body): + iattr = irdl.base(base_name="#builtin.integer") + i32 = irdl.is_(TypeAttr.get(IntegerType.get_signless(32))) + irdl.attributes_([iattr], ["value"]) + irdl.results_([i32], ["cst"], [irdl.Variadicity.single]) + add = irdl.operation_("add") + with InsertionPoint(add.body): + i32 = irdl.is_(TypeAttr.get(IntegerType.get_signless(32))) + irdl.operands_( + [i32, i32], + ["lhs", "rhs"], + [irdl.Variadicity.single, irdl.Variadicity.single], + ) + irdl.results_([i32], ["res"], [irdl.Variadicity.single]) + + m.operation.verify() + irdl.load_dialects(m) + + +# This PDL pattern is to fold constant additions, +# i.e. add(constant0, constant1) -> constant2 +# where constant2 = constant0 + constant1. +def get_pdl_pattern_fold(): + m = Module.create() + i32 = IntegerType.get_signless(32) + with InsertionPoint(m.body): + + @pdl.pattern(benefit=1, sym_name="myint_add_fold") + def pat(): + t = pdl.TypeOp(i32) + a0 = pdl.AttributeOp() + a1 = pdl.AttributeOp() + c0 = pdl.OperationOp( + name="myint.constant", attributes={"value": a0}, types=[t] + ) + c1 = pdl.OperationOp( + name="myint.constant", attributes={"value": a1}, types=[t] + ) + v0 = pdl.ResultOp(c0, 0) + v1 = pdl.ResultOp(c1, 0) + op0 = pdl.OperationOp(name="myint.add", args=[v0, v1], types=[t]) + + @pdl.rewrite() + def rew(): + sum = pdl.apply_native_rewrite( + [pdl.AttributeType.get()], "add_fold", [a0, a1] + ) + newOp = pdl.OperationOp( + name="myint.constant", attributes={"value": sum}, types=[t] + ) + pdl.ReplaceOp(op0, with_op=newOp) + + @pdl.pattern(benefit=1, sym_name="myint_add_zero_fold") + def pat(): + t = pdl.TypeOp(i32) + v0 = pdl.OperandOp() + v1 = pdl.OperandOp() + v = pdl.apply_native_constraint([pdl.ValueType.get()], "has_zero", [v0, v1]) + op0 = pdl.OperationOp(name="myint.add", args=[v0, v1], types=[t]) + + @pdl.rewrite() + def rew(): + pdl.ReplaceOp(op0, with_values=[v]) + + def add_fold(rewriter, results, values): + a0, a1 = values + results.append(IntegerAttr.get(i32, a0.value + a1.value)) + + def is_zero(value): + op = value.owner + if isinstance(op, Operation): + return op.name == "myint.constant" and op.attributes["value"].value == 0 + return False + + # Check if either operand is a constant zero, + # and append the other operand to the results if so. + def has_zero(rewriter, results, values): + v0, v1 = values + if is_zero(v0): + results.append(v1) + return False + if is_zero(v1): + results.append(v0) + return False + return True + + pdl_module = PDLModule(m) + pdl_module.register_rewrite_function("add_fold", add_fold) + pdl_module.register_constraint_function("has_zero", has_zero) + return pdl_module.freeze() + + +# CHECK-LABEL: TEST: test_pdl_register_function +# CHECK: "myint.constant"() {value = 8 : i32} : () -> i32 +@construct_and_print_in_module +def test_pdl_register_function(module_): + load_myint_dialect() + + module_ = Module.parse( + """ + %c0 = "myint.constant"() { value = 2 }: () -> (i32) + %c1 = "myint.constant"() { value = 3 }: () -> (i32) + %x = "myint.add"(%c0, %c1): (i32, i32) -> (i32) + "myint.add"(%x, %c1): (i32, i32) -> (i32) + """ + ) + + frozen = get_pdl_pattern_fold() + apply_patterns_and_fold_greedily(module_, frozen) + + return module_ + + +# CHECK-LABEL: TEST: test_pdl_register_function_constraint +# CHECK: return %arg0 : i32 +@construct_and_print_in_module +def test_pdl_register_function_constraint(module_): + load_myint_dialect() + + module_ = Module.parse( + """ + func.func @f(%x : i32) -> i32 { + %c0 = "myint.constant"() { value = 1 }: () -> (i32) + %c1 = "myint.constant"() { value = -1 }: () -> (i32) + %a = "myint.add"(%c0, %c1): (i32, i32) -> (i32) + %b = "myint.add"(%a, %x): (i32, i32) -> (i32) + %c = "myint.add"(%b, %a): (i32, i32) -> (i32) + func.return %c : i32 + } + """ + ) + + frozen = get_pdl_pattern_fold() + apply_patterns_and_fold_greedily(module_, frozen) + + return module_ diff --git a/mlir/test/python/ir/builtin_types.py b/mlir/test/python/ir/builtin_types.py index b42bfd9bc6587..54863253fc770 100644 --- a/mlir/test/python/ir/builtin_types.py +++ b/mlir/test/python/ir/builtin_types.py @@ -371,11 +371,16 @@ def testAbstractShapedType(): # CHECK-LABEL: TEST: testVectorType @run def testVectorType(): + shape = [2, 3] + with Context(): + f32 = F32Type.get() + # CHECK: unchecked vector type: vector<2x3xf32> + print("unchecked vector type:", VectorType.get_unchecked(shape, f32)) + with Context(), Location.unknown(): f32 = F32Type.get() - shape = [2, 3] - # CHECK: vector type: vector<2x3xf32> - print("vector type:", VectorType.get(shape, f32)) + # CHECK: checked vector type: vector<2x3xf32> + print("checked vector type:", VectorType.get(shape, f32)) none = NoneType.get() try: diff --git a/mlir/test/python/ir/operation.py b/mlir/test/python/ir/operation.py index 4a3625c953d52..cb4cfc8c8a6ec 100644 --- a/mlir/test/python/ir/operation.py +++ b/mlir/test/python/ir/operation.py @@ -696,6 +696,7 @@ def testOperationPrint(): # CHECK: resource1: "0x08 module.operation.print(large_elements_limit=2) + # CHECK-LABEL: TEST: testKnownOpView @run def testKnownOpView(): @@ -969,6 +970,13 @@ def testOperationLoc(): assert op.location == loc assert op.operation.location == loc + another_loc = Location.name("another_loc") + op.location = another_loc + assert op.location == another_loc + assert op.operation.location == another_loc + # CHECK: loc("another_loc") + print(op.location) + # CHECK-LABEL: TEST: testModuleMerge @run diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp index 3140f12c0b7e8..b9115657d6bf3 100644 --- a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp @@ -513,14 +513,57 @@ getCustomBuilderParams(std::initializer_list prefix, return builderParams; } +static std::string getSignature(const Method &m) { + std::string signature; + llvm::raw_string_ostream os(signature); + raw_indented_ostream indentedOs(os); + m.writeDeclTo(indentedOs); + return signature; +} + +static void emitDuplicatedBuilderError(const Method ¤tMethod, + StringRef methodName, + const Class &defCls, + const AttrOrTypeDef &def) { + + // Try to search for method that makes `get` redundant. + auto loc = def.getDef()->getFieldLoc("builders"); + for (auto &method : defCls.getMethods()) { + if (method->getName() == methodName && + method->makesRedundant(currentMethod)) { + PrintError(loc, llvm::Twine("builder `") + methodName + + "` conflicts with an existing builder. "); + PrintFatalNote(llvm::Twine("A new builder with signature:\n") + + getSignature(currentMethod) + + "\nis shadowed by an existing builder with signature:\n" + + getSignature(*method) + + "\nPlease remove one of the conflicting " + "definitions."); + } + } + + // This code shouldn't be reached, but leaving this here for potential future + // use. + PrintFatalError(loc, "Failed to generate builder " + methodName); +} + void DefGen::emitCustomBuilder(const AttrOrTypeBuilder &builder) { // Don't emit a body if there isn't one. auto props = builder.getBody() ? Method::Static : Method::StaticDeclaration; StringRef returnType = def.getCppClassName(); if (std::optional builderReturnType = builder.getReturnType()) returnType = *builderReturnType; - Method *m = defCls.addMethod(returnType, "get", props, - getCustomBuilderParams({}, builder)); + + llvm::StringRef methodName = "get"; + const auto parameters = getCustomBuilderParams({}, builder); + Method *m = defCls.addMethod(returnType, methodName, props, parameters); + + // If method is pruned, report error and terminate. + if (!m) { + auto curMethod = Method(returnType, methodName, props, parameters); + emitDuplicatedBuilderError(curMethod, methodName, defCls, def); + } + if (!builder.getBody()) return; @@ -547,11 +590,19 @@ void DefGen::emitCheckedCustomBuilder(const AttrOrTypeBuilder &builder) { StringRef returnType = def.getCppClassName(); if (std::optional builderReturnType = builder.getReturnType()) returnType = *builderReturnType; - Method *m = defCls.addMethod( - returnType, "getChecked", props, - getCustomBuilderParams( - {{"::llvm::function_ref<::mlir::InFlightDiagnostic()>", "emitError"}}, - builder)); + + llvm::StringRef methodName = "getChecked"; + auto parameters = getCustomBuilderParams( + {{"::llvm::function_ref<::mlir::InFlightDiagnostic()>", "emitError"}}, + builder); + Method *m = defCls.addMethod(returnType, methodName, props, parameters); + + // If method is pruned, report error and terminate. + if (!m) { + auto curMethod = Method(returnType, methodName, props, parameters); + emitDuplicatedBuilderError(curMethod, methodName, defCls, def); + } + if (!builder.getBody()) return; diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp index a1899a81afcce..8dd971374fa21 100644 --- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp @@ -403,6 +403,7 @@ void DefFormat::genLiteralParser(StringRef value, FmtContext &ctx, .Case("]", "RSquare") .Case("?", "Question") .Case("+", "Plus") + .Case("-", "Minus") .Case("*", "Star") .Case("...", "Ellipsis") << "()"; diff --git a/mlir/tools/mlir-tblgen/EnumsGen.cpp b/mlir/tools/mlir-tblgen/EnumsGen.cpp index d152763f7382e..d4d32f5885971 100644 --- a/mlir/tools/mlir-tblgen/EnumsGen.cpp +++ b/mlir/tools/mlir-tblgen/EnumsGen.cpp @@ -364,6 +364,9 @@ getAllBitsUnsetCase(llvm::ArrayRef cases) { // inline constexpr operator|( a, b); // inline constexpr operator&( a, b); // inline constexpr operator^( a, b); +// inline constexpr &operator|=( &a, b); +// inline constexpr &operator&=( &a, b); +// inline constexpr &operator^=( &a, b); // inline constexpr operator~( bits); // inline constexpr bool bitEnumContainsAll( bits, bit); // inline constexpr bool bitEnumContainsAny( bits, bit); @@ -385,6 +388,15 @@ inline constexpr {0} operator&({0} a, {0} b) {{ inline constexpr {0} operator^({0} a, {0} b) {{ return static_cast<{0}>(static_cast<{1}>(a) ^ static_cast<{1}>(b)); } +inline constexpr {0} &operator|=({0} &a, {0} b) {{ + return a = a | b; +} +inline constexpr {0} &operator&=({0} &a, {0} b) {{ + return a = a & b; +} +inline constexpr {0} &operator^=({0} &a, {0} b) {{ + return a = a ^ b; +} inline constexpr {0} operator~({0} bits) {{ // Ensure only bits that can be present in the enum are set return static_cast<{0}>(~static_cast<{1}>(bits) & static_cast<{1}>({2}u)); diff --git a/mlir/tools/mlir-tblgen/FormatGen.cpp b/mlir/tools/mlir-tblgen/FormatGen.cpp index 4dfdde2146679..04d3ed1f3b70d 100644 --- a/mlir/tools/mlir-tblgen/FormatGen.cpp +++ b/mlir/tools/mlir-tblgen/FormatGen.cpp @@ -518,7 +518,7 @@ bool mlir::tblgen::isValidLiteral(StringRef value, // If there is only one character, this must either be punctuation or a // single character bare identifier. if (value.size() == 1) { - StringRef bare = "_:,=<>()[]{}?+*"; + StringRef bare = "_:,=<>()[]{}?+-*"; if (isalpha(front) || bare.contains(front)) return true; if (emitError) diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp index 4fdde76a613bb..7e8e559baf878 100644 --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -3104,8 +3104,8 @@ void OpEmitter::genBuilder() { std::optional body = builder.getBody(); auto properties = body ? Method::Static : Method::StaticDeclaration; auto *method = opClass.addMethod("void", "build", properties, arguments); - if (body) - ERROR_IF_PRUNED(method, "build", op); + + ERROR_IF_PRUNED(method, "build", op); if (method) method->setDeprecated(builder.getDeprecatedMessage()); diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp index 0d113b3748354..ccf21d16005af 100644 --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -852,6 +852,7 @@ static void genLiteralParser(StringRef value, MethodBody &body) { .Case("]", "RSquare()") .Case("?", "Question()") .Case("+", "Plus()") + .Case("-", "Minus()") .Case("*", "Star()") .Case("...", "Ellipsis()"); } diff --git a/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp b/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp index 5e279b542fdf9..eaf04379cb529 100644 --- a/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp +++ b/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp @@ -231,10 +231,10 @@ TEST(BarvinokTest, computeNumTermsCone) { // We expect the answer to be // (⌊M⌋ + 1)(⌊N⌋ + 1)(⌊P⌋ + 1) = // ⌊M⌋⌊N⌋⌊P⌋ + ⌊M⌋⌊N⌋ + ⌊N⌋⌊P⌋ + ⌊M⌋⌊P⌋ + ⌊M⌋ + ⌊N⌋ + ⌊P⌋ + 1. - for (unsigned i = 0; i < 2; i++) + for (auto &i : count) for (unsigned j = 0; j < 2; j++) for (unsigned k = 0; k < 2; k++) - EXPECT_EQ(count[i][j][k], 1); + EXPECT_EQ(i[j][k], 1); } /// We define some simple polyhedra with unimodular tangent cones and verify diff --git a/mlir/unittests/Analysis/Presburger/MatrixTest.cpp b/mlir/unittests/Analysis/Presburger/MatrixTest.cpp index cb8df8b346011..e2c2a9bcb7d26 100644 --- a/mlir/unittests/Analysis/Presburger/MatrixTest.cpp +++ b/mlir/unittests/Analysis/Presburger/MatrixTest.cpp @@ -390,7 +390,7 @@ TEST(MatrixTest, gramSchmidt) { EXPECT_EQ_FRAC_MATRIX(gs, FracMatrix::identity(10)); } -void checkReducedBasis(FracMatrix mat, Fraction delta) { +static void checkReducedBasis(FracMatrix mat, Fraction delta) { FracMatrix gsOrth = mat.gramSchmidt(); // Size-reduced check. diff --git a/offload/DeviceRTL/CMakeLists.txt b/offload/DeviceRTL/CMakeLists.txt deleted file mode 100644 index e4916f4d49755..0000000000000 --- a/offload/DeviceRTL/CMakeLists.txt +++ /dev/null @@ -1,188 +0,0 @@ -set(LIBOMPTARGET_BUILD_DEVICERTL_BCLIB TRUE CACHE BOOL - "Can be set to false to disable building this library.") - -if (NOT LIBOMPTARGET_BUILD_DEVICERTL_BCLIB) - message(STATUS "Not building DeviceRTL: Disabled by LIBOMPTARGET_BUILD_DEVICERTL_BCLIB") - return() -endif() - -# Check to ensure the host system is a supported host architecture. -if(NOT ${CMAKE_SIZEOF_VOID_P} EQUAL "8") - message(STATUS "Not building DeviceRTL: Runtime does not support 32-bit hosts") - return() -endif() - -if (LLVM_DIR) - # Builds that use pre-installed LLVM have LLVM_DIR set. - # A standalone or LLVM_ENABLE_RUNTIMES=openmp build takes this route - find_program(CLANG_TOOL clang PATHS ${LLVM_TOOLS_BINARY_DIR} NO_DEFAULT_PATH) -elseif (LLVM_TOOL_CLANG_BUILD AND NOT CMAKE_CROSSCOMPILING AND NOT OPENMP_STANDALONE_BUILD) - # LLVM in-tree builds may use CMake target names to discover the tools. - # A LLVM_ENABLE_PROJECTS=openmp build takes this route - set(CLANG_TOOL $) -else() - message(STATUS "Not building DeviceRTL. No appropriate clang found") - return() -endif() - -set(devicertl_base_directory ${CMAKE_CURRENT_SOURCE_DIR}) -set(include_directory ${devicertl_base_directory}/include) -set(source_directory ${devicertl_base_directory}/src) - -set(include_files - ${include_directory}/Allocator.h - ${include_directory}/Configuration.h - ${include_directory}/Debug.h - ${include_directory}/Interface.h - ${include_directory}/LibC.h - ${include_directory}/Mapping.h - ${include_directory}/Profiling.h - ${include_directory}/State.h - ${include_directory}/Synchronization.h - ${include_directory}/DeviceTypes.h - ${include_directory}/DeviceUtils.h - ${include_directory}/Workshare.h -) - -set(src_files - ${source_directory}/Allocator.cpp - ${source_directory}/Configuration.cpp - ${source_directory}/Debug.cpp - ${source_directory}/Kernel.cpp - ${source_directory}/LibC.cpp - ${source_directory}/Mapping.cpp - ${source_directory}/Misc.cpp - ${source_directory}/Parallelism.cpp - ${source_directory}/Profiling.cpp - ${source_directory}/Reduction.cpp - ${source_directory}/State.cpp - ${source_directory}/Synchronization.cpp - ${source_directory}/Tasking.cpp - ${source_directory}/DeviceUtils.cpp - ${source_directory}/Workshare.cpp -) - -# We disable the slp vectorizer during the runtime optimization to avoid -# vectorized accesses to the shared state. Generally, those are "good" but -# the optimizer pipeline (esp. Attributor) does not fully support vectorized -# instructions yet and we end up missing out on way more important constant -# propagation. That said, we will run the vectorizer again after the runtime -# has been linked into the user program. -set(clang_opt_flags -O3 -mllvm -openmp-opt-disable -DSHARED_SCRATCHPAD_SIZE=512 -mllvm -vectorize-slp=false ) - -# If the user built with the GPU C library enabled we will use that instead. -if(${LIBOMPTARGET_GPU_LIBC_SUPPORT}) - list(APPEND clang_opt_flags -DOMPTARGET_HAS_LIBC) -endif() - -# Set flags for LLVM Bitcode compilation. -set(bc_flags -c -flto -std=c++17 -fvisibility=hidden - ${clang_opt_flags} -nogpulib -nostdlibinc - -fno-rtti -fno-exceptions -fconvergent-functions - -Wno-unknown-cuda-version - -DOMPTARGET_DEVICE_RUNTIME - -I${include_directory} - -I${devicertl_base_directory}/../include - -I${devicertl_base_directory}/../../libc -) - -# first create an object target -function(compileDeviceRTLLibrary target_name target_triple) - set(target_bc_flags ${ARGN}) - - foreach(src ${src_files}) - get_filename_component(infile ${src} ABSOLUTE) - get_filename_component(outfile ${src} NAME) - set(outfile "${outfile}-${target_name}.o") - set(depfile "${outfile}.d") - - # Passing an empty CPU to -march= suppressed target specific metadata. - add_custom_command(OUTPUT ${outfile} - COMMAND ${CLANG_TOOL} - ${bc_flags} - --target=${target_triple} - ${target_bc_flags} - -MD -MF ${depfile} - ${infile} -o ${outfile} - DEPENDS ${infile} - DEPFILE ${depfile} - COMMENT "Building LLVM bitcode ${outfile}" - VERBATIM - ) - if(TARGET clang) - # Add a file-level dependency to ensure that clang is up-to-date. - # By default, add_custom_command only builds clang if the - # executable is missing. - add_custom_command(OUTPUT ${outfile} - DEPENDS clang - APPEND - ) - endif() - set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${outfile}) - - list(APPEND obj_files ${CMAKE_CURRENT_BINARY_DIR}/${outfile}) - endforeach() - # Trick to combine these into a bitcode file via the linker's LTO pass. This - # is used to provide the legacy `libomptarget-.bc` files. Hack this - # through as an executable to get it to use the relocatable link. - add_executable(libomptarget-${target_name} ${obj_files}) - set_target_properties(libomptarget-${target_name} PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${LIBOMPTARGET_LLVM_LIBRARY_INTDIR} - LINKER_LANGUAGE CXX - BUILD_RPATH "" - INSTALL_RPATH "" - RUNTIME_OUTPUT_NAME libomptarget-${target_name}.bc) - target_compile_options(libomptarget-${target_name} PRIVATE - "--target=${target_triple}" "-fuse-ld=lld" "-march=" "-mcpu=" - "-Wno-unused-command-line-argument") - target_link_options(libomptarget-${target_name} PRIVATE "--target=${target_triple}" - "-r" "-nostdlib" "-flto" "-Wl,--lto-emit-llvm" - "-fuse-ld=lld" "-march=" "-mcpu=") - install(TARGETS libomptarget-${target_name} - PERMISSIONS OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ - DESTINATION "lib${LLVM_LIBDIR_SUFFIX}/${target_triple}") - - add_library(omptarget.${target_name}.all_objs OBJECT IMPORTED) - set_property(TARGET omptarget.${target_name}.all_objs APPEND PROPERTY IMPORTED_OBJECTS - ${LIBOMPTARGET_LLVM_LIBRARY_INTDIR}/libomptarget-${target_name}.bc) - add_dependencies(omptarget.${target_name}.all_objs libomptarget-${target_name}) - - # Archive all the object files generated above into a static library - add_library(omptarget.${target_name} STATIC) - set_target_properties(omptarget.${target_name} PROPERTIES - ARCHIVE_OUTPUT_DIRECTORY "${LIBOMPTARGET_LLVM_LIBRARY_INTDIR}/${target_triple}" - ARCHIVE_OUTPUT_NAME ompdevice - LINKER_LANGUAGE CXX - ) - target_link_libraries(omptarget.${target_name} PRIVATE omptarget.${target_name}.all_objs) - target_link_options(omptarget.${target_name} PRIVATE "--target=${target_triple}" - "-Wno-unused-command-line-argument" "-r" "-nostdlib" "-flto" - "-Wl,--lto-emit-llvm" "-fuse-ld=lld" "-march=" "-mcpu=") - - install(TARGETS omptarget.${target_name} - ARCHIVE DESTINATION "lib${LLVM_LIBDIR_SUFFIX}/${target_triple}") - - if (CMAKE_EXPORT_COMPILE_COMMANDS) - set(ide_target_name omptarget-ide-${target_name}) - add_library(${ide_target_name} STATIC EXCLUDE_FROM_ALL ${src_files}) - target_compile_options(${ide_target_name} PRIVATE - -fvisibility=hidden --target=${target_triple} - -nogpulib -nostdlibinc -Wno-unknown-cuda-version - ) - target_compile_definitions(${ide_target_name} PRIVATE SHARED_SCRATCHPAD_SIZE=512) - target_include_directories(${ide_target_name} PRIVATE - ${include_directory} - ${devicertl_base_directory}/../../libc - ${devicertl_base_directory}/../include - ) - install(TARGETS ${ide_target_name} EXCLUDE_FROM_ALL) - endif() -endfunction() - -if(NOT LLVM_TARGETS_TO_BUILD OR "AMDGPU" IN_LIST LLVM_TARGETS_TO_BUILD) - compileDeviceRTLLibrary(amdgpu amdgcn-amd-amdhsa -Xclang -mcode-object-version=none) -endif() - -if(NOT LLVM_TARGETS_TO_BUILD OR "NVPTX" IN_LIST LLVM_TARGETS_TO_BUILD) - compileDeviceRTLLibrary(nvptx nvptx64-nvidia-cuda --cuda-feature=+ptx63) -endif() diff --git a/offload/liboffload/API/Memory.td b/offload/liboffload/API/Memory.td index cc98b672a26a9..79e8038330048 100644 --- a/offload/liboffload/API/Memory.td +++ b/offload/liboffload/API/Memory.td @@ -21,6 +21,9 @@ def ol_alloc_type_t : Enum { def olMemAlloc : Function { let desc = "Creates a memory allocation on the specified device."; + let details = [ + "All allocations through olMemAlloc regardless of source share a single virtual address range. There is no risk of multiple devices returning equal pointers to different memory." + ]; let params = [ Param<"ol_device_handle_t", "Device", "handle of the device to allocate on", PARAM_IN>, Param<"ol_alloc_type_t", "Type", "type of the allocation", PARAM_IN>, @@ -42,6 +45,56 @@ def olMemFree : Function { let returns = []; } +def ol_mem_info_t : Enum { + let desc = "Supported memory info."; + let is_typed = 1; + let etors = [ + TaggedEtor<"DEVICE", "ol_device_handle_t", "The handle of the device associated with the allocation.">, + TaggedEtor<"BASE", "void *", "Base address of this allocation.">, + TaggedEtor<"SIZE", "size_t", "Size of this allocation in bytes.">, + TaggedEtor<"TYPE", "ol_alloc_type_t", "Type of this allocation.">, + ]; +} + +def olGetMemInfo : Function { + let desc = "Queries the given property of a memory allocation allocated with olMemAlloc."; + let details = [ + "`olGetMemInfoSize` can be used to query the storage size required for the given query.", + "The provided pointer can point to any location inside the allocation.", + ]; + let params = [ + Param<"const void *", "Ptr", "pointer to the allocated memory", PARAM_IN>, + Param<"ol_mem_info_t", "PropName", "type of the info to retrieve", PARAM_IN>, + Param<"size_t", "PropSize", "the number of bytes pointed to by PropValue.", PARAM_IN>, + TypeTaggedParam<"void*", "PropValue", "array of bytes holding the info. " + "If Size is not equal to or greater to the real number of bytes needed to return the info " + "then the OL_ERRC_INVALID_SIZE error is returned and pPlatformInfo is not used.", PARAM_OUT, + TypeInfo<"PropName" , "PropSize">> + ]; + let returns = [ + Return<"OL_ERRC_INVALID_SIZE", [ + "`PropSize == 0`", + "If `PropSize` is less than the real number of bytes needed to return the info." + ]>, + Return<"OL_ERRC_NOT_FOUND", ["memory was not allocated by liboffload"]> + ]; +} + +def olGetMemInfoSize : Function { + let desc = "Returns the storage size of the given queue query."; + let details = [ + "The provided pointer can point to any location inside the allocation.", + ]; + let params = [ + Param<"const void *", "Ptr", "pointer to the allocated memory", PARAM_IN>, + Param<"ol_mem_info_t", "PropName", "type of the info to query", PARAM_IN>, + Param<"size_t*", "PropSizeRet", "pointer to the number of bytes required to store the query", PARAM_OUT> + ]; + let returns = [ + Return<"OL_ERRC_NOT_FOUND", ["memory was not allocated by liboffload"]> + ]; +} + def olMemcpy : Function { let desc = "Enqueue a memcpy operation."; let details = [ diff --git a/offload/liboffload/src/OffloadImpl.cpp b/offload/liboffload/src/OffloadImpl.cpp index c5d083db7522e..051882da7c6c7 100644 --- a/offload/liboffload/src/OffloadImpl.cpp +++ b/offload/liboffload/src/OffloadImpl.cpp @@ -39,12 +39,28 @@ using namespace llvm::omp::target; using namespace llvm::omp::target::plugin; using namespace error; +struct ol_platform_impl_t { + ol_platform_impl_t(std::unique_ptr Plugin, + ol_platform_backend_t BackendType) + : Plugin(std::move(Plugin)), BackendType(BackendType) {} + std::unique_ptr Plugin; + llvm::SmallVector> Devices; + ol_platform_backend_t BackendType; + + /// Complete all pending work for this platform and perform any needed + /// cleanup. + /// + /// After calling this function, no liboffload functions should be called with + /// this platform handle. + llvm::Error destroy(); +}; + // Handle type definitions. Ideally these would be 1:1 with the plugins, but // we add some additional data here for now to avoid churn in the plugin // interface. struct ol_device_impl_t { ol_device_impl_t(int DeviceNum, GenericDeviceTy *Device, - ol_platform_handle_t Platform, InfoTreeNode &&DevInfo) + ol_platform_impl_t &Platform, InfoTreeNode &&DevInfo) : DeviceNum(DeviceNum), Device(Device), Platform(Platform), Info(std::forward(DevInfo)) {} @@ -55,7 +71,7 @@ struct ol_device_impl_t { int DeviceNum; GenericDeviceTy *Device; - ol_platform_handle_t Platform; + ol_platform_impl_t &Platform; InfoTreeNode Info; llvm::SmallVector<__tgt_async_info *> OutstandingQueues; @@ -102,31 +118,17 @@ struct ol_device_impl_t { } }; -struct ol_platform_impl_t { - ol_platform_impl_t(std::unique_ptr Plugin, - ol_platform_backend_t BackendType) - : Plugin(std::move(Plugin)), BackendType(BackendType) {} - std::unique_ptr Plugin; - llvm::SmallVector> Devices; - ol_platform_backend_t BackendType; +llvm::Error ol_platform_impl_t::destroy() { + llvm::Error Result = Plugin::success(); + for (auto &D : Devices) + if (auto Err = D->destroy()) + Result = llvm::joinErrors(std::move(Result), std::move(Err)); - /// Complete all pending work for this platform and perform any needed - /// cleanup. - /// - /// After calling this function, no liboffload functions should be called with - /// this platform handle. - llvm::Error destroy() { - llvm::Error Result = Plugin::success(); - for (auto &D : Devices) - if (auto Err = D->destroy()) - Result = llvm::joinErrors(std::move(Result), std::move(Err)); + if (auto Res = Plugin->deinit()) + Result = llvm::joinErrors(std::move(Result), std::move(Res)); - if (auto Res = Plugin->deinit()) - Result = llvm::joinErrors(std::move(Result), std::move(Res)); - - return Result; - } -}; + return Result; +} struct ol_queue_impl_t { ol_queue_impl_t(__tgt_async_info *AsyncInfo, ol_device_handle_t Device) @@ -182,6 +184,9 @@ namespace offload { struct AllocInfo { ol_device_handle_t Device; ol_alloc_type_t Type; + void *Start; + // One byte past the end + void *End; }; // Global shared state for liboffload @@ -200,12 +205,15 @@ struct OffloadContext { bool ValidationEnabled = true; DenseMap AllocInfoMap{}; std::mutex AllocInfoMapMutex{}; - SmallVector Platforms{}; + // Partitioned list of memory base addresses. Each element in this list is a + // key in AllocInfoMap + llvm::SmallVector AllocBases{}; + SmallVector, 4> Platforms{}; size_t RefCount; ol_device_handle_t HostDevice() { // The host platform is always inserted last - return Platforms.back().Devices[0].get(); + return Platforms.back()->Devices[0].get(); } static OffloadContext &get() { @@ -244,38 +252,35 @@ Error initPlugins(OffloadContext &Context) { // Attempt to create an instance of each supported plugin. #define PLUGIN_TARGET(Name) \ do { \ - Context.Platforms.emplace_back(ol_platform_impl_t{ \ - std::unique_ptr(createPlugin_##Name()), \ - pluginNameToBackend(#Name)}); \ + if (StringRef(#Name) != "host") \ + Context.Platforms.emplace_back(std::make_unique( \ + std::unique_ptr(createPlugin_##Name()), \ + pluginNameToBackend(#Name))); \ } while (false); #include "Shared/Targets.def" // Preemptively initialize all devices in the plugin for (auto &Platform : Context.Platforms) { - // Do not use the host plugin - it isn't supported. - if (Platform.BackendType == OL_PLATFORM_BACKEND_UNKNOWN) - continue; - auto Err = Platform.Plugin->init(); + auto Err = Platform->Plugin->init(); [[maybe_unused]] std::string InfoMsg = toString(std::move(Err)); - for (auto DevNum = 0; DevNum < Platform.Plugin->number_of_devices(); + for (auto DevNum = 0; DevNum < Platform->Plugin->number_of_devices(); DevNum++) { - if (Platform.Plugin->init_device(DevNum) == OFFLOAD_SUCCESS) { - auto Device = &Platform.Plugin->getDevice(DevNum); + if (Platform->Plugin->init_device(DevNum) == OFFLOAD_SUCCESS) { + auto Device = &Platform->Plugin->getDevice(DevNum); auto Info = Device->obtainInfoImpl(); if (auto Err = Info.takeError()) return Err; - Platform.Devices.emplace_back(std::make_unique( - DevNum, Device, &Platform, std::move(*Info))); + Platform->Devices.emplace_back(std::make_unique( + DevNum, Device, *Platform, std::move(*Info))); } } } // Add the special host device auto &HostPlatform = Context.Platforms.emplace_back( - ol_platform_impl_t{nullptr, OL_PLATFORM_BACKEND_HOST}); - HostPlatform.Devices.emplace_back( - std::make_unique(-1, nullptr, nullptr, InfoTreeNode{})); - Context.HostDevice()->Platform = &HostPlatform; + std::make_unique(nullptr, OL_PLATFORM_BACKEND_HOST)); + HostPlatform->Devices.emplace_back(std::make_unique( + -1, nullptr, *HostPlatform, InfoTreeNode{})); Context.TracingEnabled = std::getenv("OFFLOAD_TRACE"); Context.ValidationEnabled = !std::getenv("OFFLOAD_DISABLE_VALIDATION"); @@ -312,10 +317,10 @@ Error olShutDown_impl() { for (auto &P : OldContext->Platforms) { // Host plugin is nullptr and has no deinit - if (!P.Plugin || !P.Plugin->is_initialized()) + if (!P->Plugin || !P->Plugin->is_initialized()) continue; - if (auto Res = P.destroy()) + if (auto Res = P->destroy()) Result = llvm::joinErrors(std::move(Result), std::move(Res)); } @@ -380,7 +385,7 @@ Error olGetDeviceInfoImplDetail(ol_device_handle_t Device, // These are not implemented by the plugin interface switch (PropName) { case OL_DEVICE_INFO_PLATFORM: - return Info.write(Device->Platform); + return Info.write(&Device->Platform); case OL_DEVICE_INFO_TYPE: return Info.write(OL_DEVICE_TYPE_GPU); @@ -513,7 +518,7 @@ Error olGetDeviceInfoImplDetailHost(ol_device_handle_t Device, switch (PropName) { case OL_DEVICE_INFO_PLATFORM: - return Info.write(Device->Platform); + return Info.write(&Device->Platform); case OL_DEVICE_INFO_TYPE: return Info.write(OL_DEVICE_TYPE_HOST); case OL_DEVICE_INFO_NAME: @@ -591,7 +596,7 @@ Error olGetDeviceInfoSize_impl(ol_device_handle_t Device, Error olIterateDevices_impl(ol_device_iterate_cb_t Callback, void *UserData) { for (auto &Platform : OffloadContext::get().Platforms) { - for (auto &Device : Platform.Devices) { + for (auto &Device : Platform->Devices) { if (!Callback(Device.get(), UserData)) { break; } @@ -613,20 +618,61 @@ TargetAllocTy convertOlToPluginAllocTy(ol_alloc_type_t Type) { } } +constexpr size_t MAX_ALLOC_TRIES = 50; Error olMemAlloc_impl(ol_device_handle_t Device, ol_alloc_type_t Type, size_t Size, void **AllocationOut) { - auto Alloc = - Device->Device->dataAlloc(Size, nullptr, convertOlToPluginAllocTy(Type)); - if (!Alloc) - return Alloc.takeError(); + SmallVector Rejects; + + // Repeat the allocation up to a certain amount of times. If it happens to + // already be allocated (e.g. by a device from another vendor) throw it away + // and try again. + for (size_t Count = 0; Count < MAX_ALLOC_TRIES; Count++) { + auto NewAlloc = Device->Device->dataAlloc(Size, nullptr, + convertOlToPluginAllocTy(Type)); + if (!NewAlloc) + return NewAlloc.takeError(); + + void *NewEnd = &static_cast(*NewAlloc)[Size]; + auto &AllocBases = OffloadContext::get().AllocBases; + auto &AllocInfoMap = OffloadContext::get().AllocInfoMap; + { + std::lock_guard Lock(OffloadContext::get().AllocInfoMapMutex); + + // Check that this memory region doesn't overlap another one + // That is, the start of this allocation needs to be after another + // allocation's end point, and the end of this allocation needs to be + // before the next one's start. + // `Gap` is the first alloc who ends after the new alloc's start point. + auto Gap = + std::lower_bound(AllocBases.begin(), AllocBases.end(), *NewAlloc, + [&](const void *Iter, const void *Val) { + return AllocInfoMap.at(Iter).End <= Val; + }); + if (Gap == AllocBases.end() || NewEnd <= AllocInfoMap.at(*Gap).Start) { + // Success, no conflict + AllocInfoMap.insert_or_assign( + *NewAlloc, AllocInfo{Device, Type, *NewAlloc, NewEnd}); + AllocBases.insert( + std::lower_bound(AllocBases.begin(), AllocBases.end(), *NewAlloc), + *NewAlloc); + *AllocationOut = *NewAlloc; + + for (void *R : Rejects) + if (auto Err = + Device->Device->dataDelete(R, convertOlToPluginAllocTy(Type))) + return Err; + return Error::success(); + } - *AllocationOut = *Alloc; - { - std::lock_guard Lock(OffloadContext::get().AllocInfoMapMutex); - OffloadContext::get().AllocInfoMap.insert_or_assign( - *Alloc, AllocInfo{Device, Type}); + // To avoid the next attempt allocating the same memory we just freed, we + // hold onto it until we complete the allocation + Rejects.push_back(*NewAlloc); + } } - return Error::success(); + + // We've tried multiple times, and can't allocate a non-overlapping region. + return createOffloadError(ErrorCode::BACKEND_FAILURE, + "failed to allocate non-overlapping memory"); } Error olMemFree_impl(void *Address) { @@ -642,6 +688,9 @@ Error olMemFree_impl(void *Address) { Device = AllocInfo.Device; Type = AllocInfo.Type; OffloadContext::get().AllocInfoMap.erase(Address); + + auto &Bases = OffloadContext::get().AllocBases; + Bases.erase(std::lower_bound(Bases.begin(), Bases.end(), Address)); } if (auto Res = @@ -651,6 +700,60 @@ Error olMemFree_impl(void *Address) { return Error::success(); } +Error olGetMemInfoImplDetail(const void *Ptr, ol_mem_info_t PropName, + size_t PropSize, void *PropValue, + size_t *PropSizeRet) { + InfoWriter Info(PropSize, PropValue, PropSizeRet); + std::lock_guard Lock(OffloadContext::get().AllocInfoMapMutex); + + auto &AllocBases = OffloadContext::get().AllocBases; + auto &AllocInfoMap = OffloadContext::get().AllocInfoMap; + const AllocInfo *Alloc = nullptr; + if (AllocInfoMap.contains(Ptr)) { + // Fast case, we have been given the base pointer directly + Alloc = &AllocInfoMap.at(Ptr); + } else { + // Slower case, we need to look up the base pointer first + // Find the first memory allocation whose end is after the target pointer, + // and then check to see if it is in range + auto Loc = std::lower_bound(AllocBases.begin(), AllocBases.end(), Ptr, + [&](const void *Iter, const void *Val) { + return AllocInfoMap.at(Iter).End <= Val; + }); + if (Loc == AllocBases.end() || Ptr < AllocInfoMap.at(*Loc).Start) + return Plugin::error(ErrorCode::NOT_FOUND, + "allocated memory information not found"); + Alloc = &AllocInfoMap.at(*Loc); + } + + switch (PropName) { + case OL_MEM_INFO_DEVICE: + return Info.write(Alloc->Device); + case OL_MEM_INFO_BASE: + return Info.write(Alloc->Start); + case OL_MEM_INFO_SIZE: + return Info.write(static_cast(Alloc->End) - + static_cast(Alloc->Start)); + case OL_MEM_INFO_TYPE: + return Info.write(Alloc->Type); + default: + return createOffloadError(ErrorCode::INVALID_ENUMERATION, + "olGetMemInfo enum '%i' is invalid", PropName); + } + + return Error::success(); +} + +Error olGetMemInfo_impl(const void *Ptr, ol_mem_info_t PropName, + size_t PropSize, void *PropValue) { + return olGetMemInfoImplDetail(Ptr, PropName, PropSize, PropValue, nullptr); +} + +Error olGetMemInfoSize_impl(const void *Ptr, ol_mem_info_t PropName, + size_t *PropSizeRet) { + return olGetMemInfoImplDetail(Ptr, PropName, 0, nullptr, PropSizeRet); +} + Error olCreateQueue_impl(ol_device_handle_t Device, ol_queue_handle_t *Queue) { auto CreatedQueue = std::make_unique(nullptr, Device); diff --git a/offload/libomptarget/OpenMP/InteropAPI.cpp b/offload/libomptarget/OpenMP/InteropAPI.cpp index eb5425ecbf062..c55ef2c2e672c 100644 --- a/offload/libomptarget/OpenMP/InteropAPI.cpp +++ b/offload/libomptarget/OpenMP/InteropAPI.cpp @@ -124,7 +124,7 @@ void *getProperty(omp_interop_val_t &InteropVal, case omp_ipr_device_context: return InteropVal.device_info.Context; case omp_ipr_targetsync: - return InteropVal.async_info->Queue; + return InteropVal.async_info ? InteropVal.async_info->Queue : nullptr; default:; } getTypeMismatch(Property, Err); @@ -167,7 +167,6 @@ bool getPropertyCheck(omp_interop_val_t **InteropPtr, omp_interop_property_t property_id, \ int *err) { \ omp_interop_val_t *interop_val = (omp_interop_val_t *)interop; \ - assert((interop_val)->interop_type == kmp_interop_type_targetsync); \ if (!getPropertyCheck(&interop_val, property_id, err)) { \ return (RETURN_TYPE)(0); \ } \ @@ -275,8 +274,8 @@ omp_interop_val_t *__tgt_interop_get(ident_t *LocRef, int32_t InteropType, return Interop; } -int __tgt_interop_use(ident_t *LocRef, omp_interop_val_t *Interop, - interop_ctx_t *Ctx, dep_pack_t *Deps) { +int __tgt_interop_use60(ident_t *LocRef, omp_interop_val_t *Interop, + interop_ctx_t *Ctx, dep_pack_t *Deps) { bool Nowait = Ctx->flags.nowait; DP("Call to %s with interop " DPxMOD ", nowait %" PRId32 "\n", __func__, DPxPTR(Interop), Nowait); @@ -359,6 +358,40 @@ EXTERN int ompx_interop_add_completion_callback(omp_interop_val_t *Interop, return omp_irc_success; } +// Backwards compatibility wrappers +void __tgt_interop_init(ident_t *LocRef, int32_t Gtid, + omp_interop_val_t *&InteropPtr, int32_t InteropType, + int32_t DeviceId, int32_t Ndeps, + kmp_depend_info_t *DepList, int32_t HaveNowait) { + constexpr int32_t old_kmp_interop_type_targetsync = 2; + interop_ctx_t Ctx = {0, {false, (bool)HaveNowait, 0}, Gtid}; + dep_pack_t Deps = {Ndeps, 0, DepList, nullptr}; + InteropPtr = + __tgt_interop_get(LocRef, + InteropType == old_kmp_interop_type_targetsync + ? kmp_interop_type_targetsync + : kmp_interop_type_target, + DeviceId, 0, nullptr, &Ctx, Ndeps ? &Deps : nullptr); +} + +void __tgt_interop_use(ident_t *LocRef, int32_t Gtid, + omp_interop_val_t *&InteropPtr, int32_t DeviceId, + int32_t Ndeps, kmp_depend_info_t *DepList, + int32_t HaveNowait) { + interop_ctx_t Ctx = {0, {false, (bool)HaveNowait, 0}, Gtid}; + dep_pack_t Deps = {Ndeps, 0, DepList, nullptr}; + __tgt_interop_use60(LocRef, InteropPtr, &Ctx, Ndeps ? &Deps : nullptr); +} + +void __tgt_interop_destroy(ident_t *LocRef, int32_t Gtid, + omp_interop_val_t *&InteropPtr, int32_t DeviceId, + int32_t Ndeps, kmp_depend_info_t *DepList, + int32_t HaveNowait) { + interop_ctx_t Ctx = {0, {false, (bool)HaveNowait, 0}, Gtid}; + dep_pack_t Deps = {Ndeps, 0, DepList, nullptr}; + __tgt_interop_release(LocRef, InteropPtr, &Ctx, Ndeps ? &Deps : nullptr); +} + } // extern "C" llvm::Expected omp_interop_val_t::getDevice() const { diff --git a/offload/libomptarget/exports b/offload/libomptarget/exports index 8e2db6ba8bba4..1374bfea81511 100644 --- a/offload/libomptarget/exports +++ b/offload/libomptarget/exports @@ -68,8 +68,11 @@ VERS1.0 { omp_get_interop_int; omp_get_interop_name; omp_get_interop_type_desc; - __tgt_interop_get; + __tgt_interop_init; __tgt_interop_use; + __tgt_interop_destroy; + __tgt_interop_get; + __tgt_interop_use60; __tgt_interop_release; __tgt_target_sync; __llvmPushCallConfiguration; diff --git a/offload/libomptarget/omptarget.cpp b/offload/libomptarget/omptarget.cpp index 39286d41ec865..a1950cbb62908 100644 --- a/offload/libomptarget/omptarget.cpp +++ b/offload/libomptarget/omptarget.cpp @@ -330,6 +330,54 @@ int targetDataMapper(ident_t *Loc, DeviceTy &Device, void *ArgBase, void *Arg, return Rc; } +/// Returns a buffer of the requested \p Size, to be used as the source for +/// `submitData`. +/// +/// For small buffers (`Size <= sizeof(void*)`), uses \p AsyncInfo's +/// getVoidPtrLocation(). +/// For larger buffers, creates a dynamic buffer which will be eventually +/// deleted by \p AsyncInfo's post-processing callback. +static char *getOrCreateSourceBufferForSubmitData(AsyncInfoTy &AsyncInfo, + int64_t Size) { + constexpr int64_t VoidPtrSize = sizeof(void *); + + if (Size <= VoidPtrSize) { + void *&BufferElement = AsyncInfo.getVoidPtrLocation(); + return reinterpret_cast(&BufferElement); + } + + // Create a dynamic buffer for larger data and schedule its deletion. + char *DataBuffer = new char[Size]; + AsyncInfo.addPostProcessingFunction([DataBuffer]() { + delete[] DataBuffer; + return OFFLOAD_SUCCESS; + }); + return DataBuffer; +} + +/// Calculates the target pointee base by applying the host +/// pointee begin/base delta to the target pointee begin. +/// +/// ``` +/// TgtPteeBase = TgtPteeBegin - (HstPteeBegin - HstPteeBase) +/// ``` +static void *calculateTargetPointeeBase(void *HstPteeBase, void *HstPteeBegin, + void *TgtPteeBegin) { + uint64_t Delta = reinterpret_cast(HstPteeBegin) - + reinterpret_cast(HstPteeBase); + void *TgtPteeBase = reinterpret_cast( + reinterpret_cast(TgtPteeBegin) - Delta); + + DP("HstPteeBase: " DPxMOD ", HstPteeBegin: " DPxMOD + ", Delta (HstPteeBegin - HstPteeBase): %" PRIu64 ".\n", + DPxPTR(HstPteeBase), DPxPTR(HstPteeBegin), Delta); + DP("TgtPteeBase (TgtPteeBegin - Delta): " DPxMOD ", TgtPteeBegin : " DPxMOD + "\n", + DPxPTR(TgtPteeBase), DPxPTR(TgtPteeBegin)); + + return TgtPteeBase; +} + /// Utility function to perform a pointer attachment operation. /// /// For something like: @@ -399,16 +447,8 @@ static int performPointerAttachment(DeviceTy &Device, AsyncInfoTy &AsyncInfo, constexpr int64_t VoidPtrSize = sizeof(void *); assert(HstPtrSize >= VoidPtrSize && "PointerSize is too small"); - uint64_t Delta = reinterpret_cast(HstPteeBegin) - - reinterpret_cast(HstPteeBase); - void *TgtPteeBase = reinterpret_cast( - reinterpret_cast(TgtPteeBegin) - Delta); - DP("HstPteeBase: " DPxMOD ", HstPteeBegin: " DPxMOD - ", Delta (HstPteeBegin - HstPteeBase): %" PRIu64 ".\n", - DPxPTR(HstPteeBase), DPxPTR(HstPteeBegin), Delta); - DP("TgtPteeBase (TgtPteeBegin - Delta): " DPxMOD ", TgtPteeBegin : " DPxMOD - "\n", - DPxPTR(TgtPteeBase), DPxPTR(TgtPteeBegin)); + void *TgtPteeBase = + calculateTargetPointeeBase(HstPteeBase, HstPteeBegin, TgtPteeBegin); // Add shadow pointer tracking if (!PtrTPR.getEntry()->addShadowPointer( @@ -435,48 +475,32 @@ static int performPointerAttachment(DeviceTy &Device, AsyncInfoTy &AsyncInfo, return OFFLOAD_SUCCESS; }; - bool IsPtrAFortranDescriptor = HstPtrSize > VoidPtrSize; - if (!IsPtrAFortranDescriptor) { - // For "regular" pointers, we can use the VoidPtrLocation from AsyncInfo as - // the buffer space for the submission. - void *&BufferElement = AsyncInfo.getVoidPtrLocation(); - BufferElement = TgtPteeBase; - - // Submit the updated pointer value to device - return HandleSubmitResult(Device.submitData( - TgtPtrAddr, &BufferElement, VoidPtrSize, AsyncInfo, PtrTPR.getEntry())); + // Get a buffer to be used as the source for data submission. + char *SrcBuffer = getOrCreateSourceBufferForSubmitData(AsyncInfo, HstPtrSize); + + // The pointee's address should occupy the first VoidPtrSize bytes + // irrespective of HstPtrSize. + std::memcpy(SrcBuffer, &TgtPteeBase, VoidPtrSize); + + // For larger "pointers" (e.g., Fortran descriptors), copy remaining + // descriptor fields from the host descriptor into the buffer. + if (HstPtrSize > VoidPtrSize) { + uint64_t HstDescriptorFieldsSize = HstPtrSize - VoidPtrSize; + void *HstDescriptorFieldsAddr = + reinterpret_cast(HstPtrAddr) + VoidPtrSize; + std::memcpy(SrcBuffer + VoidPtrSize, HstDescriptorFieldsAddr, + HstDescriptorFieldsSize); + + DP("Updating %" PRId64 " bytes of descriptor (" DPxMOD + ") (pointer + %" PRId64 " additional bytes from host descriptor " DPxMOD + ")\n", + HstPtrSize, DPxPTR(TgtPtrAddr), HstDescriptorFieldsSize, + DPxPTR(HstDescriptorFieldsAddr)); } - // For larger "pointers" (like Fortran's descriptors), we create a dynamic - // buffer, which will be eventually destroyed by AsyncInfo's post-processing - // callback. - char *DataBuffer = new char[HstPtrSize]; - - // For such descriptors, to the first VoidPtrSize bytes, we store the - // pointee's device address. - std::memcpy(DataBuffer, &TgtPteeBase, sizeof(void *)); - - // And to the remaining bytes, we copy the remaining contents of the host - // descriptor after the initial VoidPtrSize bytes. - uint64_t HstDescriptorFieldsSize = HstPtrSize - VoidPtrSize; - void *HstDescriptorFieldsAddr = - reinterpret_cast(HstPtrAddr) + VoidPtrSize; - std::memcpy(DataBuffer + VoidPtrSize, HstDescriptorFieldsAddr, - HstDescriptorFieldsSize); - - DP("Updating %" PRId64 " bytes of descriptor (" DPxMOD ") (pointer + %" PRId64 - " additional bytes from host descriptor " DPxMOD ")\n", - HstPtrSize, DPxPTR(TgtPtrAddr), HstDescriptorFieldsSize, - DPxPTR(HstDescriptorFieldsAddr)); - - // Submit the entire buffer to device - int SubmitResult = Device.submitData(TgtPtrAddr, DataBuffer, HstPtrSize, + // Submit the populated source buffer to device. + int SubmitResult = Device.submitData(TgtPtrAddr, SrcBuffer, HstPtrSize, AsyncInfo, PtrTPR.getEntry()); - - AsyncInfo.addPostProcessingFunction([DataBuffer]() -> int { - delete[] DataBuffer; - return OFFLOAD_SUCCESS; - }); return HandleSubmitResult(SubmitResult); } @@ -525,10 +549,17 @@ int targetDataBegin(ident_t *Loc, DeviceTy &Device, int32_t ArgNum, // ATTACH map-types are supposed to be handled after all mapping for the // construct is done. Defer their processing. if (ArgTypes[I] & OMP_TGT_MAPTYPE_ATTACH) { - AttachInfo->AttachEntries.emplace_back( - /*PointerBase=*/HstPtrBase, /*PointeeBegin=*/HstPtrBegin, - /*PointerSize=*/DataSize, /*MapType=*/ArgTypes[I], - /*PointeeName=*/HstPtrName); + const bool IsCorrespondingPointerInit = + (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE); + // We don't need to keep track of PRIVATE | ATTACH entries. They + // represent corresponding-pointer-initialization, and are handled + // similar to firstprivate (PRIVATE | TO) entries by + // PrivateArgumentManager. + if (!IsCorrespondingPointerInit) + AttachInfo->AttachEntries.emplace_back( + /*PointerBase=*/HstPtrBase, /*PointeeBegin=*/HstPtrBegin, + /*PointerSize=*/DataSize, /*MapType=*/ArgTypes[I], + /*PointeeName=*/HstPtrName); DP("Deferring ATTACH map-type processing for argument %d\n", I); continue; @@ -1397,13 +1428,24 @@ class PrivateArgumentManagerTy { uint32_t Padding; /// Host pointer name map_var_info_t HstPtrName = nullptr; + /// For corresponding-pointer-initialization: host pointee base address. + void *HstPteeBase = nullptr; + /// For corresponding-pointer-initialization: host pointee begin address. + void *HstPteeBegin = nullptr; + /// Whether this argument needs corresponding-pointer-initialization. + bool IsCorrespondingPointerInit = false; FirstPrivateArgInfoTy(int Index, void *HstPtr, uint32_t Size, uint32_t Alignment, uint32_t Padding, - map_var_info_t HstPtrName = nullptr) + map_var_info_t HstPtrName = nullptr, + void *HstPteeBase = nullptr, + void *HstPteeBegin = nullptr, + bool IsCorrespondingPointerInit = false) : HstPtrBegin(reinterpret_cast(HstPtr)), HstPtrEnd(HstPtrBegin + Size), Index(Index), Alignment(Alignment), - Size(Size), Padding(Padding), HstPtrName(HstPtrName) {} + Size(Size), Padding(Padding), HstPtrName(HstPtrName), + HstPteeBase(HstPteeBase), HstPteeBegin(HstPteeBegin), + IsCorrespondingPointerInit(IsCorrespondingPointerInit) {} }; /// A vector of target pointers for all private arguments @@ -1421,6 +1463,153 @@ class PrivateArgumentManagerTy { /// A pointer to a \p AsyncInfoTy object AsyncInfoTy &AsyncInfo; + /// \returns the value of the target pointee's base to be used for + /// corresponding-pointer-initialization. + void *getTargetPointeeBaseForCorrespondingPointerInitialization( + void *HstPteeBase, void *HstPteeBegin) { + // See if the pointee's begin address has corresponding storage on device. + void *TgtPteeBegin = [&]() -> void * { + if (!HstPteeBegin) { + DP("Corresponding-pointer-initialization: pointee begin address is " + "null\n"); + return nullptr; + } + + return Device.getMappingInfo() + .getTgtPtrBegin(HstPteeBegin, /*Size=*/0, /*UpdateRefCount=*/false, + /*UseHoldRefCount=*/false) + .TargetPointer; + }(); + + // If it does, we calculate target pointee base using it, and return it. + // Otherwise, we retain the host pointee's base as the target pointee base + // of the initialized pointer. It's the user's responsibility to ensure + // that if a lookup fails, the host pointee is accessible on the device. + return TgtPteeBegin ? calculateTargetPointeeBase(HstPteeBase, HstPteeBegin, + TgtPteeBegin) + : HstPteeBase; + } + + /// Initialize the source buffer for corresponding-pointer-initialization. + /// + /// It computes and stores the target pointee base address (or the host + /// pointee's base address, if lookup of target pointee fails) to the first + /// `sizeof(void*)` bytes of \p Buffer, and for larger pointers + /// (Fortran descriptors), the remaining fields of the host descriptor + /// \p HstPtr after those `sizeof(void*)` bytes. + /// + /// Corresponding-pointer-initialization represents the initialization of the + /// private version of a base-pointer/referring-pointer on a target construct. + /// + /// For example, for the following test: + /// ```cpp + /// int x[10]; + /// int *px = &x[0]; + /// ... + /// #pragma omp target data map(tofrom:px) + /// { + /// int **ppx = omp_get_mapped_ptr(&px, omp_get_default_device()); + /// #pragma omp target map(tofrom:px[1]) is_device_ptr(ppx) + /// { + /// foo(px, ppx); + /// } + /// } + /// ``` + /// The following shows a possible way to implement the mapping of `px`, + /// which is pre-determined firstprivate and should get initialized + /// via corresponding-pointer-initialization: + /// + /// (A) Possible way to implement the above with PRIVATE | ATTACH: + /// ```llvm + /// ; maps for px: + /// ; &px[0], &px[1], sizeof(px[1]), TO | FROM // (1) + /// ; &px, &px[1], sizeof(px), ATTACH // (2) + /// ; &px, &px[1], sizeof(px), PRIVATE | ATTACH | PARAM // (3) + /// call... @__omp_outlined...(ptr %px, ptr %ppx) + /// define ... @__omp_outlined(ptr %px, ptr %ppx) {... + /// foo(%px, %ppx) + /// ...} + /// ``` + /// `(1)` maps the pointee `px[1]. + /// `(2)` attaches it to the mapped version of `px`. It can be controlled by + /// the user based on the `attach(auto/always/never)` map-type modifier. + /// `(3)` privatizes and initializes the private pointer `px`, and passes it + /// into the kernel as the argument `%px`. Can be skipped if `px` is not + /// referenced in the target construct. + /// + /// While this method is not too beneficial compared to just doing the + /// initialization in the body of the kernel, like: + /// (B) Possible way to implement the above without PRIVATE | ATTACH: + /// ```llvm + /// ; maps for px: + /// ; &px[0], &px[1], sizeof(px[1]), TO | FROM | PARAM // (4) + /// ; &px, &px[1], sizeof(px), ATTACH // (5) + /// call... @__omp_outlined...(ptr %px0, ptr %ppx) + /// define ... __omp_outlined...(ptr %px0, ptr %ppx) { + /// %px = alloca ptr; + /// store ptr %px0, ptr %px + /// foo(%px, %ppx) + /// } + /// ``` + /// + /// (B) is not so convenient for Fortran descriptors, because in + /// addition to the lookup, the remaining fields of the descriptor have + /// to be passed into the kernel to initialize the private copy, which + /// makes (A) a cleaner option for them. e.g. + /// ```f90 + /// integer, pointer :: p(:) + /// !$omp target map(p(1)) + /// ``` + /// + /// (C) Possible mapping for the above Fortran test using PRIVATE | ATTACH: + /// ```llvm + /// ; maps for p: + /// ; &p(1), &p(1), sizeof(p(1)), TO | FROM + /// ; &ref_ptr(p), &p(1), sizeof(ref_ptr(p)), ATTACH + /// ; &ref_ptr(p), &p(1), sizeof(ref_ptr(p)), PRIVATE | ATTACH | PARAM + /// call... @__omp_outlined...(ptr %ref_ptr_of_p) + void initBufferForCorrespondingPointerInitialization(char *Buffer, + void *HstPtr, + int64_t HstPtrSize, + void *HstPteeBase, + void *HstPteeBegin) { + constexpr int64_t VoidPtrSize = sizeof(void *); + assert(HstPtrSize >= VoidPtrSize && + "corresponding-pointer-initialization: pointer size is too small"); + + void *TgtPteeBase = + getTargetPointeeBaseForCorrespondingPointerInitialization(HstPteeBase, + HstPteeBegin); + + // Store the target pointee base address to the first VoidPtrSize bytes + DP("Initializing corresponding-pointer-initialization source buffer " + "for " DPxMOD ", with pointee base " DPxMOD "\n", + DPxPTR(HstPtr), DPxPTR(TgtPteeBase)); + std::memcpy(Buffer, &TgtPteeBase, VoidPtrSize); + if (HstPtrSize <= VoidPtrSize) + return; + + // For Fortran descriptors, copy the remaining descriptor fields from host + uint64_t HstDescriptorFieldsSize = HstPtrSize - VoidPtrSize; + void *HstDescriptorFieldsAddr = static_cast(HstPtr) + VoidPtrSize; + DP("Copying %" PRId64 + " bytes of descriptor fields into corresponding-pointer-initialization " + "buffer at offset %" PRId64 ", from " DPxMOD "\n", + HstDescriptorFieldsSize, VoidPtrSize, DPxPTR(HstDescriptorFieldsAddr)); + std::memcpy(Buffer + VoidPtrSize, HstDescriptorFieldsAddr, + HstDescriptorFieldsSize); + } + + /// Helper function to create and initialize a buffer to be used as the source + /// for corresponding-pointer-initialization. + void *createAndInitSourceBufferForCorrespondingPointerInitialization( + void *HstPtr, int64_t HstPtrSize, void *HstPteeBase, void *HstPteeBegin) { + char *Buffer = getOrCreateSourceBufferForSubmitData(AsyncInfo, HstPtrSize); + initBufferForCorrespondingPointerInitialization(Buffer, HstPtr, HstPtrSize, + HstPteeBase, HstPteeBegin); + return Buffer; + } + // TODO: What would be the best value here? Should we make it configurable? // If the size is larger than this threshold, we will allocate and transfer it // immediately instead of packing it. @@ -1435,7 +1624,9 @@ class PrivateArgumentManagerTy { int addArg(void *HstPtr, int64_t ArgSize, int64_t ArgOffset, bool IsFirstPrivate, void *&TgtPtr, int TgtArgsIndex, map_var_info_t HstPtrName = nullptr, - const bool AllocImmediately = false) { + const bool AllocImmediately = false, void *HstPteeBase = nullptr, + void *HstPteeBegin = nullptr, + bool IsCorrespondingPointerInit = false) { // If the argument is not first-private, or its size is greater than a // predefined threshold, we will allocate memory and issue the transfer // immediately. @@ -1458,9 +1649,19 @@ class PrivateArgumentManagerTy { // If first-private, copy data from host if (IsFirstPrivate) { DP("Submitting firstprivate data to the device.\n"); - int Ret = Device.submitData(TgtPtr, HstPtr, ArgSize, AsyncInfo); + + // The source value used for corresponding-pointer-initialization + // is different vs regular firstprivates. + void *DataSource = + IsCorrespondingPointerInit + ? createAndInitSourceBufferForCorrespondingPointerInitialization( + HstPtr, ArgSize, HstPteeBase, HstPteeBegin) + : HstPtr; + int Ret = Device.submitData(TgtPtr, DataSource, ArgSize, AsyncInfo); if (Ret != OFFLOAD_SUCCESS) { - DP("Copying data to device failed, failed.\n"); + DP("Copying %s data to device failed.\n", + IsCorrespondingPointerInit ? "corresponding-pointer-initialization" + : "firstprivate"); return OFFLOAD_FAIL; } } @@ -1506,8 +1707,10 @@ class PrivateArgumentManagerTy { } } - FirstPrivateArgInfo.emplace_back(TgtArgsIndex, HstPtr, ArgSize, - StartAlignment, Padding, HstPtrName); + FirstPrivateArgInfo.emplace_back( + TgtArgsIndex, HstPtr, ArgSize, StartAlignment, Padding, HstPtrName, + HstPteeBase, HstPteeBegin, IsCorrespondingPointerInit); + FirstPrivateArgSize += Padding + ArgSize; } @@ -1526,7 +1729,13 @@ class PrivateArgumentManagerTy { for (FirstPrivateArgInfoTy &Info : FirstPrivateArgInfo) { // First pad the pointer as we (have to) pad it on the device too. Itr = std::next(Itr, Info.Padding); - std::copy(Info.HstPtrBegin, Info.HstPtrEnd, Itr); + + if (Info.IsCorrespondingPointerInit) + initBufferForCorrespondingPointerInitialization( + &*Itr, Info.HstPtrBegin, Info.Size, Info.HstPteeBase, + Info.HstPteeBegin); + else + std::copy(Info.HstPtrBegin, Info.HstPtrEnd, Itr); Itr = std::next(Itr, Info.Size); } // Allocate target memory @@ -1682,8 +1891,40 @@ static int processDataBefore(ident_t *Loc, int64_t DeviceId, void *HostPtr, TgtPtrBegin = HstPtrBase; TgtBaseOffset = 0; } else if (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE) { + // For cases like: + // ``` + // int *p = ...; + // #pragma omp target map(p[0:10]) + // ``` + // `p` is predetermined firstprivate on the target construct, and the + // method to determine the initial value of the private copy on the + // device is called "corresponding-pointer-initialization". + // + // Such firstprivate pointers that need + // corresponding-pointer-initialization are represented using the + // `PRIVATE | ATTACH` map-types, in contrast to regular firstprivate + // entries, which use `PRIVATE | TO`. The structure of these + // `PRIVATE | ATTACH` entries is the same as the non-private + // `ATTACH` entries used to represent pointer-attachments, i.e.: + // ``` + // &hst_ptr_base/begin, &hst_ptee_begin, sizeof(hst_ptr) + // ``` + const bool IsAttach = (ArgTypes[I] & OMP_TGT_MAPTYPE_ATTACH); + void *HstPteeBase = nullptr; + void *HstPteeBegin = nullptr; + if (IsAttach) { + // For corresponding-pointer-initialization, Args[I] is HstPteeBegin, + // and ArgBases[I] is both HstPtrBase/HstPtrBegin. + HstPteeBase = *reinterpret_cast(HstPtrBase); + HstPteeBegin = Args[I]; + HstPtrBegin = ArgBases[I]; + } TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin; - const bool IsFirstPrivate = (ArgTypes[I] & OMP_TGT_MAPTYPE_TO); + // Corresponding-pointer-initialization is a special case of firstprivate, + // since it also involves initializing the private pointer. + const bool IsFirstPrivate = + (ArgTypes[I] & OMP_TGT_MAPTYPE_TO) || IsAttach; + // If there is a next argument and it depends on the current one, we need // to allocate the private memory immediately. If this is not the case, // then the argument can be marked for optimization and packed with the @@ -1692,9 +1933,11 @@ static int processDataBefore(ident_t *Loc, int64_t DeviceId, void *HostPtr, (I < ArgNum - 1 && (ArgTypes[I + 1] & OMP_TGT_MAPTYPE_MEMBER_OF)); Ret = PrivateArgumentManager.addArg( HstPtrBegin, ArgSizes[I], TgtBaseOffset, IsFirstPrivate, TgtPtrBegin, - TgtArgs.size(), HstPtrName, AllocImmediately); + /*TgtArgsIndex=*/TgtArgs.size(), HstPtrName, AllocImmediately, + HstPteeBase, HstPteeBegin, /*IsCorrespondingPointerInit=*/IsAttach); if (Ret != OFFLOAD_SUCCESS) { - REPORT("Failed to process %sprivate argument " DPxMOD "\n", + REPORT("Failed to process %s%sprivate argument " DPxMOD "\n", + IsAttach ? "corresponding-pointer-initialization " : "", (IsFirstPrivate ? "first-" : ""), DPxPTR(HstPtrBegin)); return OFFLOAD_FAIL; } diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp index 64470e9fabf46..f73fa0475a3a7 100644 --- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp +++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp @@ -423,7 +423,11 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy { assert(MemoryManager && "Invalid memory manager"); assert(PtrStorage && "Invalid pointer storage"); - *PtrStorage = MemoryManager->allocate(Size, nullptr); + auto PtrStorageOrErr = MemoryManager->allocate(Size, nullptr); + if (!PtrStorageOrErr) + return PtrStorageOrErr.takeError(); + + *PtrStorage = *PtrStorageOrErr; if (Size && *PtrStorage == nullptr) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failure to allocate from AMDGPU memory manager"); @@ -443,15 +447,12 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy { private: /// Allocation callback that will be called once the memory manager does not /// have more previously allocated buffers. - void *allocate(size_t Size, void *HstPtr, TargetAllocTy Kind) override; + Expected allocate(size_t Size, void *HstPtr, + TargetAllocTy Kind) override; /// Deallocation callback that will be called by the memory manager. - int free(void *TgtPtr, TargetAllocTy Kind) override { - if (auto Err = MemoryPool->deallocate(TgtPtr)) { - consumeError(std::move(Err)); - return OFFLOAD_FAIL; - } - return OFFLOAD_SUCCESS; + Error free(void *TgtPtr, TargetAllocTy Kind) override { + return MemoryPool->deallocate(TgtPtr); } /// The underlying plugin that owns this memory manager. @@ -2339,12 +2340,12 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy { } /// Allocate memory on the device or related to the device. - void *allocate(size_t Size, void *, TargetAllocTy Kind) override; + Expected allocate(size_t Size, void *, TargetAllocTy Kind) override; /// Deallocate memory on the device or related to the device. - int free(void *TgtPtr, TargetAllocTy Kind) override { + Error free(void *TgtPtr, TargetAllocTy Kind) override { if (TgtPtr == nullptr) - return OFFLOAD_SUCCESS; + return Plugin::success(); AMDGPUMemoryPoolTy *MemoryPool = nullptr; switch (Kind) { @@ -2360,17 +2361,14 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy { break; } - if (!MemoryPool) { - REPORT("No memory pool for the specified allocation kind\n"); - return OFFLOAD_FAIL; - } + if (!MemoryPool) + return Plugin::error(ErrorCode::OUT_OF_RESOURCES, + "no memory pool for the specified allocation kind"); - if (Error Err = MemoryPool->deallocate(TgtPtr)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return OFFLOAD_FAIL; - } + if (auto Err = MemoryPool->deallocate(TgtPtr)) + return Err; - return OFFLOAD_SUCCESS; + return Plugin::success(); } /// Synchronize current thread with the pending operations on the async info. @@ -2714,6 +2712,37 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy { return Plugin::success(); } + interop_spec_t selectInteropPreference(int32_t InteropType, + int32_t NumPrefers, + interop_spec_t *Prefers) override { + // TODO: update once targetsync is supported + if (InteropType == kmp_interop_type_target) + return interop_spec_t{tgt_fr_hsa, {false, 0}, 0}; + return interop_spec_t{tgt_fr_none, {false, 0}, 0}; + } + + Expected + createInterop(int32_t InteropType, interop_spec_t &InteropSpec) override { + auto *Ret = new omp_interop_val_t( + DeviceId, static_cast(InteropType)); + Ret->fr_id = tgt_fr_hsa; + Ret->vendor_id = omp_vendor_amd; + + // TODO: implement targetsync support + + Ret->device_info.Platform = nullptr; + Ret->device_info.Device = reinterpret_cast(Agent.handle); + Ret->device_info.Context = nullptr; + + return Ret; + } + + Error releaseInterop(omp_interop_val_t *Interop) override { + if (Interop) + delete Interop; + return Plugin::success(); + } + Error enqueueHostCallImpl(void (*Callback)(void *), void *UserData, AsyncInfoWrapperTy &AsyncInfo) override { AMDGPUStreamTy *Stream = nullptr; @@ -3813,14 +3842,13 @@ static Error Plugin::check(int32_t Code, const char *ErrFmt, ArgsTy... Args) { return Plugin::error(OffloadErrCode, ErrFmt, Args..., Desc); } -void *AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr, - TargetAllocTy Kind) { +Expected AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr, + TargetAllocTy Kind) { // Allocate memory from the pool. void *Ptr = nullptr; - if (auto Err = MemoryPool->allocate(Size, &Ptr)) { - consumeError(std::move(Err)); - return nullptr; - } + if (auto Err = MemoryPool->allocate(Size, &Ptr)) + return std::move(Err); + assert(Ptr && "Invalid pointer"); // Get a list of agents that can access this memory pool. @@ -3830,14 +3858,13 @@ void *AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr, [&](hsa_agent_t Agent) { return MemoryPool->canAccess(Agent); }); // Allow all valid kernel agents to access the allocation. - if (auto Err = MemoryPool->enableAccess(Ptr, Size, Agents)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = MemoryPool->enableAccess(Ptr, Size, Agents)) + return std::move(Err); return Ptr; } -void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) { +Expected AMDGPUDeviceTy::allocate(size_t Size, void *, + TargetAllocTy Kind) { if (Size == 0) return nullptr; @@ -3856,17 +3883,14 @@ void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) { break; } - if (!MemoryPool) { - REPORT("No memory pool for the specified allocation kind\n"); - return nullptr; - } + if (!MemoryPool) + return Plugin::error(ErrorCode::UNSUPPORTED, + "no memory pool for the specified allocation kind"); // Allocate from the corresponding memory pool. void *Alloc = nullptr; - if (Error Err = MemoryPool->allocate(Size, &Alloc)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = MemoryPool->allocate(Size, &Alloc)) + return std::move(Err); if (Alloc) { // Get a list of agents that can access this memory pool. Inherently @@ -3879,10 +3903,8 @@ void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) { }); // Enable all valid kernel agents to access the buffer. - if (auto Err = MemoryPool->enableAccess(Alloc, Size, Agents)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = MemoryPool->enableAccess(Alloc, Size, Agents)) + return std::move(Err); } return Alloc; diff --git a/offload/plugins-nextgen/common/include/MemoryManager.h b/offload/plugins-nextgen/common/include/MemoryManager.h index a4f6e628c403a..8f6c1adcdaa58 100644 --- a/offload/plugins-nextgen/common/include/MemoryManager.h +++ b/offload/plugins-nextgen/common/include/MemoryManager.h @@ -25,6 +25,10 @@ #include "Shared/Utils.h" #include "omptarget.h" +#include "llvm/Support/Error.h" + +namespace llvm { + /// Base class of per-device allocator. class DeviceAllocatorTy { public: @@ -32,11 +36,13 @@ class DeviceAllocatorTy { /// Allocate a memory of size \p Size . \p HstPtr is used to assist the /// allocation. - virtual void *allocate(size_t Size, void *HstPtr, - TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; + virtual Expected + allocate(size_t Size, void *HstPtr, + TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; /// Delete the pointer \p TgtPtr on the device - virtual int free(void *TgtPtr, TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; + virtual Error free(void *TgtPtr, + TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; }; /// Class of memory manager. The memory manager is per-device by using @@ -134,17 +140,17 @@ class MemoryManagerTy { size_t SizeThreshold = 1U << 13; /// Request memory from target device - void *allocateOnDevice(size_t Size, void *HstPtr) const { + Expected allocateOnDevice(size_t Size, void *HstPtr) const { return DeviceAllocator.allocate(Size, HstPtr, TARGET_ALLOC_DEVICE); } /// Deallocate data on device - int deleteOnDevice(void *Ptr) const { return DeviceAllocator.free(Ptr); } + Error deleteOnDevice(void *Ptr) const { return DeviceAllocator.free(Ptr); } /// This function is called when it tries to allocate memory on device but the /// device returns out of memory. It will first free all memory in the /// FreeList and try to allocate again. - void *freeAndAllocate(size_t Size, void *HstPtr) { + Expected freeAndAllocate(size_t Size, void *HstPtr) { std::vector RemoveList; // Deallocate all memory in FreeList @@ -154,7 +160,8 @@ class MemoryManagerTy { if (List.empty()) continue; for (const NodeTy &N : List) { - deleteOnDevice(N.Ptr); + if (auto Err = deleteOnDevice(N.Ptr)) + return Err; RemoveList.push_back(N.Ptr); } FreeLists[I].clear(); @@ -175,14 +182,22 @@ class MemoryManagerTy { /// allocate directly on the device. If a \p nullptr is returned, it might /// be because the device is OOM. In that case, it will free all unused /// memory and then try again. - void *allocateOrFreeAndAllocateOnDevice(size_t Size, void *HstPtr) { - void *TgtPtr = allocateOnDevice(Size, HstPtr); + Expected allocateOrFreeAndAllocateOnDevice(size_t Size, + void *HstPtr) { + auto TgtPtrOrErr = allocateOnDevice(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); + + void *TgtPtr = *TgtPtrOrErr; // We cannot get memory from the device. It might be due to OOM. Let's // free all memory in FreeLists and try again. if (TgtPtr == nullptr) { DP("Failed to get memory on device. Free all memory in FreeLists and " "try again.\n"); - TgtPtr = freeAndAllocate(Size, HstPtr); + TgtPtrOrErr = freeAndAllocate(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); + TgtPtr = *TgtPtrOrErr; } if (TgtPtr == nullptr) @@ -204,16 +219,17 @@ class MemoryManagerTy { /// Destructor ~MemoryManagerTy() { - for (auto Itr = PtrToNodeTable.begin(); Itr != PtrToNodeTable.end(); - ++Itr) { - assert(Itr->second.Ptr && "nullptr in map table"); - deleteOnDevice(Itr->second.Ptr); + for (auto &PtrToNode : PtrToNodeTable) { + assert(PtrToNode.second.Ptr && "nullptr in map table"); + if (auto Err = deleteOnDevice(PtrToNode.second.Ptr)) + REPORT("Failure to delete memory: %s\n", + toString(std::move(Err)).data()); } } /// Allocate memory of size \p Size from target device. \p HstPtr is used to /// assist the allocation. - void *allocate(size_t Size, void *HstPtr) { + Expected allocate(size_t Size, void *HstPtr) { // If the size is zero, we will not bother the target device. Just return // nullptr directly. if (Size == 0) @@ -228,11 +244,14 @@ class MemoryManagerTy { DP("%zu is greater than the threshold %zu. Allocate it directly from " "device\n", Size, SizeThreshold); - void *TgtPtr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + auto TgtPtrOrErr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); - DP("Got target pointer " DPxMOD ". Return directly.\n", DPxPTR(TgtPtr)); + DP("Got target pointer " DPxMOD ". Return directly.\n", + DPxPTR(*TgtPtrOrErr)); - return TgtPtr; + return *TgtPtrOrErr; } NodeTy *NodePtr = nullptr; @@ -260,8 +279,11 @@ class MemoryManagerTy { if (NodePtr == nullptr) { DP("Cannot find a node in the FreeLists. Allocate on device.\n"); // Allocate one on device - void *TgtPtr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + auto TgtPtrOrErr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); + void *TgtPtr = *TgtPtrOrErr; if (TgtPtr == nullptr) return nullptr; @@ -282,7 +304,7 @@ class MemoryManagerTy { } /// Deallocate memory pointed by \p TgtPtr - int free(void *TgtPtr) { + Error free(void *TgtPtr) { DP("MemoryManagerTy::free: target memory " DPxMOD ".\n", DPxPTR(TgtPtr)); NodeTy *P = nullptr; @@ -314,7 +336,7 @@ class MemoryManagerTy { FreeLists[B].insert(*P); } - return OFFLOAD_SUCCESS; + return Error::success(); } /// Get the size threshold from the environment variable @@ -344,4 +366,6 @@ class MemoryManagerTy { constexpr const size_t MemoryManagerTy::BucketSize[]; constexpr const int MemoryManagerTy::NumBuckets; +} // namespace llvm + #endif // LLVM_OPENMP_LIBOMPTARGET_PLUGINS_COMMON_MEMORYMANAGER_H diff --git a/offload/plugins-nextgen/common/include/PluginInterface.h b/offload/plugins-nextgen/common/include/PluginInterface.h index 9d5651a3d7b4e..5620437716b31 100644 --- a/offload/plugins-nextgen/common/include/PluginInterface.h +++ b/offload/plugins-nextgen/common/include/PluginInterface.h @@ -193,7 +193,7 @@ struct InfoTreeNode { InfoTreeNode() : InfoTreeNode("", std::monostate{}, "") {} InfoTreeNode(std::string Key, VariantType Value, std::string Units) - : Key(Key), Value(Value), Units(Units) {} + : Key(std::move(Key)), Value(Value), Units(std::move(Units)) {} /// Add a new info entry as a child of this node. The entry requires at least /// a key string in \p Key. The value in \p Value is optional and can be any @@ -202,7 +202,7 @@ struct InfoTreeNode { /// use that value for an appropriate olGetDeviceInfo query template InfoTreeNode *add(std::string Key, T Value = T(), - const std::string &Units = std::string(), + std::string Units = std::string(), std::optional DeviceInfoKey = std::nullopt) { assert(!Key.empty() && "Invalid info key"); @@ -217,7 +217,8 @@ struct InfoTreeNode { else ValueVariant = std::string{Value}; - auto Ptr = &Children->emplace_back(Key, ValueVariant, Units); + auto Ptr = + &Children->emplace_back(std::move(Key), ValueVariant, std::move(Units)); if (DeviceInfoKey) DeviceInfoMap[*DeviceInfoKey] = Children->size() - 1; diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp index 30b5db782370d..15b6b9866e5a2 100644 --- a/offload/plugins-nextgen/common/src/PluginInterface.cpp +++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp @@ -73,11 +73,17 @@ struct RecordReplayTy { }; llvm::SmallVector GlobalEntries{}; - void *suggestAddress(uint64_t MaxMemoryAllocation) { + Expected suggestAddress(uint64_t MaxMemoryAllocation) { // Get a valid pointer address for this system - void *Addr = + auto AddrOrErr = Device->allocate(1024, /*HstPtr=*/nullptr, TARGET_ALLOC_DEFAULT); - Device->free(Addr); + if (!AddrOrErr) + return AddrOrErr.takeError(); + + void *Addr = *AddrOrErr; + if (auto Err = Device->free(Addr)) + return std::move(Err); + // Align Address to MaxMemoryAllocation Addr = (void *)utils::alignPtr((Addr), MaxMemoryAllocation); return Addr; @@ -86,8 +92,12 @@ struct RecordReplayTy { Error preAllocateVAMemory(uint64_t MaxMemoryAllocation, void *VAddr) { size_t ASize = MaxMemoryAllocation; - if (!VAddr && isRecording()) - VAddr = suggestAddress(MaxMemoryAllocation); + if (!VAddr && isRecording()) { + auto VAddrOrErr = suggestAddress(MaxMemoryAllocation); + if (!VAddrOrErr) + return VAddrOrErr.takeError(); + VAddr = *VAddrOrErr; + } DP("Request %ld bytes allocated at %p\n", MaxMemoryAllocation, VAddr); @@ -117,8 +127,11 @@ struct RecordReplayTy { constexpr size_t STEP = 1024 * 1024 * 1024ULL; MemoryStart = nullptr; for (TotalSize = MAX_MEMORY_ALLOCATION; TotalSize > 0; TotalSize -= STEP) { - MemoryStart = + auto MemoryStartOrErr = Device->allocate(TotalSize, /*HstPtr=*/nullptr, TARGET_ALLOC_DEFAULT); + if (!MemoryStartOrErr) + return MemoryStartOrErr.takeError(); + MemoryStart = *MemoryStartOrErr; if (MemoryStart) break; } @@ -352,13 +365,15 @@ struct RecordReplayTy { return Plugin::success(); } - void deinit() { + Error deinit() { if (UsedVAMap) { if (auto Err = Device->memoryVAUnMap(MemoryStart, TotalSize)) - report_fatal_error("Error on releasing virtual memory space"); + return Err; } else { - Device->free(MemoryStart); + if (auto Err = Device->free(MemoryStart)) + return Err; } + return Plugin::success(); } }; } // namespace llvm::omp::target::plugin @@ -838,7 +853,8 @@ Error GenericDeviceTy::deinit(GenericPluginTy &Plugin) { RecordReplayTy &RecordReplay = Plugin.getRecordReplay(); if (RecordReplay.isRecordingOrReplaying()) - RecordReplay.deinit(); + if (auto Err = RecordReplay.deinit()) + return Err; if (RPCServer) if (auto Err = RPCServer->deinitDevice(*this)) @@ -1297,7 +1313,10 @@ Expected GenericDeviceTy::dataAlloc(int64_t Size, void *HostPtr, case TARGET_ALLOC_DEFAULT: case TARGET_ALLOC_DEVICE: if (MemoryManager) { - Alloc = MemoryManager->allocate(Size, HostPtr); + auto AllocOrErr = MemoryManager->allocate(Size, HostPtr); + if (!AllocOrErr) + return AllocOrErr.takeError(); + Alloc = *AllocOrErr; if (!Alloc) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failed to allocate from memory manager"); @@ -1305,12 +1324,16 @@ Expected GenericDeviceTy::dataAlloc(int64_t Size, void *HostPtr, } [[fallthrough]]; case TARGET_ALLOC_HOST: - case TARGET_ALLOC_SHARED: - Alloc = allocate(Size, HostPtr, Kind); + case TARGET_ALLOC_SHARED: { + auto AllocOrErr = allocate(Size, HostPtr, Kind); + if (!AllocOrErr) + return AllocOrErr.takeError(); + Alloc = *AllocOrErr; if (!Alloc) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failed to allocate from device allocator"); } + } // Report error if the memory manager or the device allocator did not return // any memory buffer. @@ -1382,28 +1405,19 @@ Error GenericDeviceTy::dataDelete(void *TgtPtr, TargetAllocTy Kind) { #undef DEALLOCATION_ERROR } - int Res; switch (Kind) { case TARGET_ALLOC_DEFAULT: case TARGET_ALLOC_DEVICE: if (MemoryManager) { - Res = MemoryManager->free(TgtPtr); - if (Res) - return Plugin::error( - ErrorCode::OUT_OF_RESOURCES, - "failure to deallocate device pointer %p via memory manager", - TgtPtr); + if (auto Err = MemoryManager->free(TgtPtr)) + return Err; break; } [[fallthrough]]; case TARGET_ALLOC_HOST: case TARGET_ALLOC_SHARED: - Res = free(TgtPtr, Kind); - if (Res) - return Plugin::error( - ErrorCode::UNKNOWN, - "failure to deallocate device pointer %p via device deallocator", - TgtPtr); + if (auto Err = free(TgtPtr, Kind)) + return Err; } // Unregister deallocated pinned memory buffer if the type is host memory. @@ -1714,7 +1728,8 @@ int32_t GenericPluginTy::is_initialized() const { return Initialized; } int32_t GenericPluginTy::isPluginCompatible(StringRef Image) { auto HandleError = [&](Error Err) -> bool { [[maybe_unused]] std::string ErrStr = toString(std::move(Err)); - DP("Failure to check validity of image %p: %s", Image, ErrStr.c_str()); + DP("Failure to check validity of image %p: %s", Image.data(), + ErrStr.c_str()); return false; }; switch (identify_magic(Image)) { @@ -1742,7 +1757,8 @@ int32_t GenericPluginTy::isPluginCompatible(StringRef Image) { int32_t GenericPluginTy::isDeviceCompatible(int32_t DeviceId, StringRef Image) { auto HandleError = [&](Error Err) -> bool { [[maybe_unused]] std::string ErrStr = toString(std::move(Err)); - DP("Failure to check validity of image %p: %s", Image, ErrStr.c_str()); + DP("Failure to check validity of image %p: %s", Image.data(), + ErrStr.c_str()); return false; }; switch (identify_magic(Image)) { diff --git a/offload/plugins-nextgen/common/src/RPC.cpp b/offload/plugins-nextgen/common/src/RPC.cpp index 17d69b49b3b7e..e19f2ef94de6e 100644 --- a/offload/plugins-nextgen/common/src/RPC.cpp +++ b/offload/plugins-nextgen/common/src/RPC.cpp @@ -28,15 +28,22 @@ rpc::Status handleOffloadOpcodes(plugin::GenericDeviceTy &Device, switch (Port.get_opcode()) { case LIBC_MALLOC: { Port.recv_and_send([&](rpc::Buffer *Buffer, uint32_t) { - Buffer->data[0] = reinterpret_cast( - Device.allocate(Buffer->data[0], nullptr, TARGET_ALLOC_DEVICE)); + auto PtrOrErr = + Device.allocate(Buffer->data[0], nullptr, TARGET_ALLOC_DEVICE); + void *Ptr = nullptr; + if (!PtrOrErr) + llvm::consumeError(PtrOrErr.takeError()); + else + Ptr = *PtrOrErr; + Buffer->data[0] = reinterpret_cast(Ptr); }); break; } case LIBC_FREE: { Port.recv([&](rpc::Buffer *Buffer, uint32_t) { - Device.free(reinterpret_cast(Buffer->data[0]), - TARGET_ALLOC_DEVICE); + if (auto Err = Device.free(reinterpret_cast(Buffer->data[0]), + TARGET_ALLOC_DEVICE)) + llvm::consumeError(std::move(Err)); }); break; } @@ -171,9 +178,13 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device, plugin::DeviceImageTy &Image) { uint64_t NumPorts = std::min(Device.requestedRPCPortCount(), rpc::MAX_PORT_COUNT); - void *RPCBuffer = Device.allocate( + auto RPCBufferOrErr = Device.allocate( rpc::Server::allocation_size(Device.getWarpSize(), NumPorts), nullptr, TARGET_ALLOC_HOST); + if (!RPCBufferOrErr) + return RPCBufferOrErr.takeError(); + + void *RPCBuffer = *RPCBufferOrErr; if (!RPCBuffer) return plugin::Plugin::error( error::ErrorCode::UNKNOWN, @@ -198,7 +209,8 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device, Error RPCServerTy::deinitDevice(plugin::GenericDeviceTy &Device) { std::lock_guard Lock(BufferMutex); - Device.free(Buffers[Device.getDeviceId()], TARGET_ALLOC_HOST); + if (auto Err = Device.free(Buffers[Device.getDeviceId()], TARGET_ALLOC_HOST)) + return Err; Buffers[Device.getDeviceId()] = nullptr; Devices[Device.getDeviceId()] = nullptr; return Error::success(); diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp index b2f840113cff3..e5c4a1bfa9853 100644 --- a/offload/plugins-nextgen/cuda/src/rtl.cpp +++ b/offload/plugins-nextgen/cuda/src/rtl.cpp @@ -561,14 +561,12 @@ struct CUDADeviceTy : public GenericDeviceTy { } /// Allocate memory on the device or related to the device. - void *allocate(size_t Size, void *, TargetAllocTy Kind) override { + Expected allocate(size_t Size, void *, TargetAllocTy Kind) override { if (Size == 0) return nullptr; - if (auto Err = setContext()) { - REPORT("Failure to alloc memory: %s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = setContext()) + return std::move(Err); void *MemAlloc = nullptr; CUdeviceptr DevicePtr; @@ -589,23 +587,18 @@ struct CUDADeviceTy : public GenericDeviceTy { break; } - if (auto Err = - Plugin::check(Res, "error in cuMemAlloc[Host|Managed]: %s")) { - REPORT("Failure to alloc memory: %s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = Plugin::check(Res, "error in cuMemAlloc[Host|Managed]: %s")) + return std::move(Err); return MemAlloc; } /// Deallocate memory on the device or related to the device. - int free(void *TgtPtr, TargetAllocTy Kind) override { + Error free(void *TgtPtr, TargetAllocTy Kind) override { if (TgtPtr == nullptr) - return OFFLOAD_SUCCESS; + return Plugin::success(); - if (auto Err = setContext()) { - REPORT("Failure to free memory: %s\n", toString(std::move(Err)).data()); - return OFFLOAD_FAIL; - } + if (auto Err = setContext()) + return Err; CUresult Res; switch (Kind) { @@ -619,11 +612,7 @@ struct CUDADeviceTy : public GenericDeviceTy { break; } - if (auto Err = Plugin::check(Res, "error in cuMemFree[Host]: %s")) { - REPORT("Failure to free memory: %s\n", toString(std::move(Err)).data()); - return OFFLOAD_FAIL; - } - return OFFLOAD_SUCCESS; + return Plugin::check(Res, "error in cuMemFree[Host]: %s"); } /// Synchronize current thread with the pending operations on the async info. @@ -928,6 +917,50 @@ struct CUDADeviceTy : public GenericDeviceTy { return Plugin::success(); } + interop_spec_t selectInteropPreference(int32_t InteropType, + int32_t NumPrefers, + interop_spec_t *Prefers) override { + return interop_spec_t{tgt_fr_cuda, {true, 0}, 0}; + } + + Expected + createInterop(int32_t InteropType, interop_spec_t &InteropSpec) override { + auto *Ret = new omp_interop_val_t( + DeviceId, static_cast(InteropType)); + Ret->fr_id = tgt_fr_cuda; + Ret->vendor_id = omp_vendor_nvidia; + + if (InteropType == kmp_interop_type_target || + InteropType == kmp_interop_type_targetsync) { + Ret->device_info.Platform = nullptr; + Ret->device_info.Device = reinterpret_cast(Device); + Ret->device_info.Context = Context; + } + + if (InteropType == kmp_interop_type_targetsync) { + Ret->async_info = new __tgt_async_info(); + if (auto Err = setContext()) + return Err; + CUstream Stream; + if (auto Err = CUDAStreamManager.getResource(Stream)) + return Err; + + Ret->async_info->Queue = Stream; + } + return Ret; + } + + Error releaseInterop(omp_interop_val_t *Interop) override { + if (!Interop) + return Plugin::success(); + + if (Interop->async_info) + delete Interop->async_info; + + delete Interop; + return Plugin::success(); + } + Error enqueueHostCallImpl(void (*Callback)(void *), void *UserData, AsyncInfoWrapperTy &AsyncInfo) override { if (auto Err = setContext()) @@ -1310,8 +1343,12 @@ struct CUDADeviceTy : public GenericDeviceTy { // Allocate a buffer to store all of the known constructor / destructor // functions in so we can iterate them on the device. - void *Buffer = + auto BufferOrErr = allocate(Funcs.size() * sizeof(void *), nullptr, TARGET_ALLOC_DEVICE); + if (!BufferOrErr) + return BufferOrErr.takeError(); + + void *Buffer = *BufferOrErr; if (!Buffer) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failed to allocate memory for global buffer"); @@ -1360,12 +1397,10 @@ struct CUDADeviceTy : public GenericDeviceTy { Error Err = Plugin::success(); AsyncInfoWrapper.finalize(Err); + if (Err) + return Err; - if (free(Buffer, TARGET_ALLOC_DEVICE) != OFFLOAD_SUCCESS) - return Plugin::error(ErrorCode::UNKNOWN, - "failed to free memory for global buffer"); - - return Err; + return free(Buffer, TARGET_ALLOC_DEVICE); } /// Stream manager for CUDA streams. diff --git a/offload/plugins-nextgen/host/src/rtl.cpp b/offload/plugins-nextgen/host/src/rtl.cpp index 44e2584fe53cc..0845032d0aae2 100644 --- a/offload/plugins-nextgen/host/src/rtl.cpp +++ b/offload/plugins-nextgen/host/src/rtl.cpp @@ -240,7 +240,7 @@ struct GenELF64DeviceTy : public GenericDeviceTy { } /// Allocate memory. Use std::malloc in all cases. - void *allocate(size_t Size, void *, TargetAllocTy Kind) override { + Expected allocate(size_t Size, void *, TargetAllocTy Kind) override { if (Size == 0) return nullptr; @@ -257,9 +257,9 @@ struct GenELF64DeviceTy : public GenericDeviceTy { } /// Free the memory. Use std::free in all cases. - int free(void *TgtPtr, TargetAllocTy Kind) override { + Error free(void *TgtPtr, TargetAllocTy Kind) override { std::free(TgtPtr); - return OFFLOAD_SUCCESS; + return Plugin::success(); } /// This plugin does nothing to lock buffers. Do not return an error, just diff --git a/offload/test/mapping/lambda_by_value.cpp b/offload/test/mapping/lambda_by_value.cpp index 5516dedd72a98..4c0278d405925 100644 --- a/offload/test/mapping/lambda_by_value.cpp +++ b/offload/test/mapping/lambda_by_value.cpp @@ -1,4 +1,5 @@ -// RUN: %libomptarget-compilexx-run-and-check-generic +// RUN: %libomptarget-compileopt-generic -fno-exceptions +// RUN: %libomptarget-run-generic 2>&1 | %fcheck-generic #include #include diff --git a/offload/test/mapping/map_back_race.cpp b/offload/test/mapping/map_back_race.cpp index 8a988d3be3b4f..49bbe87e2449d 100644 --- a/offload/test/mapping/map_back_race.cpp +++ b/offload/test/mapping/map_back_race.cpp @@ -2,6 +2,9 @@ // Taken from https://github.com/llvm/llvm-project/issues/54216 +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: gpu + #include #include #include diff --git a/offload/test/mapping/map_both_pointer_pointee.c b/offload/test/mapping/map_both_pointer_pointee.c index 7be1ba465e7db..1934b702dbbac 100644 --- a/offload/test/mapping/map_both_pointer_pointee.c +++ b/offload/test/mapping/map_both_pointer_pointee.c @@ -1,11 +1,10 @@ -// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu -// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu -// RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu -// RUN: %libomptarget-compile-run-and-check-x86_64-unknown-linux-gnu -// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda +// RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: unified_shared_memory // UNSUPPORTED: amdgcn-amd-amdhsa +// +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// XFAIL: nvidiagpu #pragma omp declare target int *ptr1; diff --git a/offload/test/mapping/map_ptr_and_star_local.c b/offload/test/mapping/map_ptr_and_star_local.c index cc826b3c0290b..97fa7cd53715f 100644 --- a/offload/test/mapping/map_ptr_and_star_local.c +++ b/offload/test/mapping/map_ptr_and_star_local.c @@ -1,6 +1,9 @@ -// RUN: %libomptarget-compilexx-run-and-check-generic +// RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: libc +// +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// XFAIL: gpu #include #include diff --git a/offload/test/mapping/map_structptr_and_member_global.c b/offload/test/mapping/map_structptr_and_member_global.c index 960eea419964f..f855e87d7218a 100644 --- a/offload/test/mapping/map_structptr_and_member_global.c +++ b/offload/test/mapping/map_structptr_and_member_global.c @@ -1,6 +1,9 @@ -// RUN: %libomptarget-compilexx-run-and-check-generic +// RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: libc +// +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// XFAIL: gpu #include #include diff --git a/offload/test/mapping/map_structptr_and_member_local.c b/offload/test/mapping/map_structptr_and_member_local.c index bd759407ef09c..bd9e2a89eb6f1 100644 --- a/offload/test/mapping/map_structptr_and_member_local.c +++ b/offload/test/mapping/map_structptr_and_member_local.c @@ -1,6 +1,9 @@ -// RUN: %libomptarget-compilexx-run-and-check-generic +// RUN: %libomptarget-compile-run-and-check-generic // REQUIRES: libc +// +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// XFAIL: gpu #include #include diff --git a/offload/test/offloading/CUDA/basic_launch_multi_arg.cu b/offload/test/offloading/CUDA/basic_launch_multi_arg.cu index 1f84a0e1288d4..b2e1edf51e171 100644 --- a/offload/test/offloading/CUDA/basic_launch_multi_arg.cu +++ b/offload/test/offloading/CUDA/basic_launch_multi_arg.cu @@ -5,10 +5,10 @@ // RUN: %t | %fcheck-generic // clang-format on -// UNSUPPORTED: aarch64-unknown-linux-gnu -// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO -// UNSUPPORTED: x86_64-unknown-linux-gnu -// UNSUPPORTED: x86_64-unknown-linux-gnu-LTO +// REQUIRES: gpu +// +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// XFAIL: gpu #include diff --git a/offload/test/offloading/bug51781.c b/offload/test/offloading/bug51781.c index 2f30b035afbbe..ff7fa51aafc2a 100644 --- a/offload/test/offloading/bug51781.c +++ b/offload/test/offloading/bug51781.c @@ -16,6 +16,7 @@ // the generic state machine. // // RUN: %libomptarget-compile-generic -O2 -foffload-lto -Rpass=openmp-opt \ +// RUN: -Xoffload-linker -mllvm=-openmp-opt-disable-spmdization \ // RUN: -mllvm -openmp-opt-disable-spmdization > %t.custom 2>&1 // RUN: %fcheck-nvptx64-nvidia-cuda -check-prefix=CUSTOM -input-file=%t.custom // RUN: %fcheck-amdgcn-amd-amdhsa -check-prefix=CUSTOM -input-file=%t.custom @@ -24,7 +25,9 @@ // Repeat with reduction clause, which has managed to break the custom state // machine in the past. // -// RUN: %libomptarget-compile-generic -O2 -foffload-lto -Rpass=openmp-opt -DADD_REDUCTION \ +// RUN: %libomptarget-compile-generic -O2 -foffload-lto -Rpass=openmp-opt \ +// RUN: -DADD_REDUCTION \ +// RUN: -Xoffload-linker -mllvm=-openmp-opt-disable-spmdization \ // RUN: -mllvm -openmp-opt-disable-spmdization > %t.custom 2>&1 // RUN: %fcheck-nvptx64-nvidia-cuda -check-prefix=CUSTOM -input-file=%t.custom // RUN: %fcheck-amdgcn-amd-amdhsa -check-prefix=CUSTOM -input-file=%t.custom diff --git a/offload/test/offloading/fortran/declare-target-automap.f90 b/offload/test/offloading/fortran/declare-target-automap.f90 index b9c2d34c834fa..b44c0b2815274 100644 --- a/offload/test/offloading/fortran/declare-target-automap.f90 +++ b/offload/test/offloading/fortran/declare-target-automap.f90 @@ -1,6 +1,9 @@ !Offloading test for AUTOMAP modifier in declare target enter ! REQUIRES: flang, amdgpu +! FIXME: https://github.com/llvm/llvm-project/issues/161265 +! XFAIL: amdgpu + ! RUN: %libomptarget-compile-fortran-run-and-check-generic program automap_program use iso_c_binding, only: c_loc diff --git a/offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f90 b/offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f90 new file mode 100644 index 0000000000000..65e04af66e022 --- /dev/null +++ b/offload/test/offloading/fortran/target-declare-mapper-parent-allocatable.f90 @@ -0,0 +1,43 @@ +! This test validates that declare mapper for a derived type that extends +! a parent type with an allocatable component correctly maps the nested +! allocatable payload via the mapper when the whole object is mapped on +! target. + +! REQUIRES: flang, amdgpu + +! RUN: %libomptarget-compile-fortran-run-and-check-generic + +program target_declare_mapper_parent_allocatable + implicit none + + type, abstract :: base_t + real, allocatable :: base_arr(:) + end type base_t + + type, extends(base_t) :: real_t + real, allocatable :: real_arr(:) + end type real_t + !$omp declare mapper(custommapper: real_t :: t) map(t%base_arr, t%real_arr) + + type(real_t) :: r + integer :: i + allocate(r%base_arr(10), source=1.0) + allocate(r%real_arr(10), source=1.0) + + !$omp target map(mapper(custommapper), tofrom: r) + do i = 1, size(r%base_arr) + r%base_arr(i) = 2.0 + r%real_arr(i) = 3.0 + r%real_arr(i) = r%base_arr(1) + end do + !$omp end target + + + !CHECK: base_arr: 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. + print*, "base_arr: ", r%base_arr + !CHECK: real_arr: 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. + print*, "real_arr: ", r%real_arr + + deallocate(r%real_arr) + deallocate(r%base_arr) +end program target_declare_mapper_parent_allocatable diff --git a/offload/test/offloading/fortran/target-no-loop.f90 b/offload/test/offloading/fortran/target-no-loop.f90 new file mode 100644 index 0000000000000..3c88b00a53541 --- /dev/null +++ b/offload/test/offloading/fortran/target-no-loop.f90 @@ -0,0 +1,97 @@ +! REQUIRES: flang +! REQUIRES: gpu + +! RUN: %libomptarget-compile-fortran-generic -O3 -fopenmp-assume-threads-oversubscription -fopenmp-assume-teams-oversubscription +! RUN: env LIBOMPTARGET_INFO=16 OMP_NUM_TEAMS=16 OMP_TEAMS_THREAD_LIMIT=16 %libomptarget-run-generic 2>&1 | %fcheck-generic +function check_errors(array) result (errors) + integer, intent(in) :: array(1024) + integer :: errors + integer :: i + errors = 0 + do i = 1, 1024 + if ( array( i) .ne. (i) ) then + errors = errors + 1 + end if + end do +end function + +program main + use omp_lib + implicit none + integer :: i,j,red + integer :: array(1024), errors = 0 + array = 1 + + ! No-loop kernel + !$omp target teams distribute parallel do + do i = 1, 1024 + array(i) = i + end do + errors = errors + check_errors(array) + + ! SPMD kernel (num_teams clause blocks promotion to no-loop) + array = 1 + !$omp target teams distribute parallel do num_teams(3) + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! No-loop kernel + array = 1 + !$omp target teams distribute parallel do num_threads(64) + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! SPMD kernel + array = 1 + !$omp target parallel do + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! Generic kernel + array = 1 + !$omp target teams distribute + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! SPMD kernel (reduction clause blocks promotion to no-loop) + array = 1 + red =0 + !$omp target teams distribute parallel do reduction(+:red) + do i = 1, 1024 + red = red + array(i) + end do + + if (red .ne. 1024) then + errors = errors + 1 + end if + + print *,"number of errors: ", errors + +end program main + +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD-No-Loop mode +! CHECK: info: #Args: 3 Teams x Thrds: 64x 16 +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode +! CHECK: info: #Args: 3 Teams x Thrds: 3x 16 {{.*}} +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD-No-Loop mode +! CHECK: info: #Args: 3 Teams x Thrds: 64x 16 {{.*}} +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode +! CHECK: info: #Args: 3 Teams x Thrds: 1x 16 +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} Generic mode +! CHECK: info: #Args: 3 Teams x Thrds: 16x 16 {{.*}} +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode +! CHECK: info: #Args: 4 Teams x Thrds: 16x 16 {{.*}} +! CHECK: number of errors: 0 + diff --git a/offload/test/offloading/interop.c b/offload/test/offloading/interop.c index 26287e3ec5333..d9fa2ef883b9c 100644 --- a/offload/test/offloading/interop.c +++ b/offload/test/offloading/interop.c @@ -1,5 +1,6 @@ // RUN: %libomptarget-compile-run-and-check-generic -// REQUIRES: nvptx64-nvidia-cuda + +// XFAIL: * #include #include diff --git a/offload/test/offloading/single_threaded_for_barrier_hang_1.c b/offload/test/offloading/single_threaded_for_barrier_hang_1.c index 8ee6b51fb6818..a007521a5c742 100644 --- a/offload/test/offloading/single_threaded_for_barrier_hang_1.c +++ b/offload/test/offloading/single_threaded_for_barrier_hang_1.c @@ -1,6 +1,9 @@ // RUN: %libomptarget-compile-run-and-check-generic // RUN: %libomptarget-compileopt-run-and-check-generic +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: gpu + #include #include diff --git a/offload/test/offloading/single_threaded_for_barrier_hang_2.c b/offload/test/offloading/single_threaded_for_barrier_hang_2.c index a98abd6922da7..cabd2ed3dde71 100644 --- a/offload/test/offloading/single_threaded_for_barrier_hang_2.c +++ b/offload/test/offloading/single_threaded_for_barrier_hang_2.c @@ -1,6 +1,7 @@ // RUN: %libomptarget-compile-run-and-check-generic -// FIXME: This fails with optimization enabled and prints b: 0 -// FIXME: RUN: %libomptarget-compileopt-run-and-check-generic + +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: gpu #include #include diff --git a/offload/test/offloading/spmdization.c b/offload/test/offloading/spmdization.c index 7f3f47d9ef32e..48627cd7dae1a 100644 --- a/offload/test/offloading/spmdization.c +++ b/offload/test/offloading/spmdization.c @@ -2,7 +2,8 @@ // RUN: %libomptarget-compileopt-generic // RUN: env LIBOMPTARGET_INFO=16 \ // RUN: %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefixes=CHECK,SPMD -// RUN: %libomptarget-compileopt-generic -mllvm --openmp-opt-disable-spmdization +// RUN: %libomptarget-compileopt-generic -mllvm --openmp-opt-disable-spmdization \ +// RUN: -Xoffload-linker -mllvm=--openmp-opt-disable-spmdization // RUN: env LIBOMPTARGET_INFO=16 \ // RUN: %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefixes=CHECK,GENERIC // clang-format on diff --git a/offload/test/sanitizer/ptr_outside_alloc_1.c b/offload/test/sanitizer/ptr_outside_alloc_1.c index bdd028352e403..b30ce12ef1ea2 100644 --- a/offload/test/sanitizer/ptr_outside_alloc_1.c +++ b/offload/test/sanitizer/ptr_outside_alloc_1.c @@ -5,12 +5,10 @@ // RUN: %not --crash env -u LLVM_DISABLE_SYMBOLIZATION OFFLOAD_TRACK_ALLOCATION_TRACES=1 %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefixes=CHECK,TRACE // clang-format on -// UNSUPPORTED: aarch64-unknown-linux-gnu -// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO -// UNSUPPORTED: x86_64-unknown-linux-gnu -// UNSUPPORTED: x86_64-unknown-linux-gnu-LTO -// UNSUPPORTED: s390x-ibm-linux-gnu -// UNSUPPORTED: s390x-ibm-linux-gnu-LTO +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: nvidiagpu +// +// REQUIRES: gpu #include diff --git a/offload/test/sanitizer/ptr_outside_alloc_2.c b/offload/test/sanitizer/ptr_outside_alloc_2.c index 6a67962f9eb32..3bb8bdaca8b48 100644 --- a/offload/test/sanitizer/ptr_outside_alloc_2.c +++ b/offload/test/sanitizer/ptr_outside_alloc_2.c @@ -3,12 +3,10 @@ // RUN: %not --crash env -u LLVM_DISABLE_SYMBOLIZATION OFFLOAD_TRACK_ALLOCATION_TRACES=1 %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefixes=CHECK // clang-format on -// UNSUPPORTED: aarch64-unknown-linux-gnu -// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO -// UNSUPPORTED: x86_64-unknown-linux-gnu -// UNSUPPORTED: x86_64-unknown-linux-gnu-LTO -// UNSUPPORTED: s390x-ibm-linux-gnu -// UNSUPPORTED: s390x-ibm-linux-gnu-LTO +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: nvidiagpu +// +// REQUIRES: gpu #include diff --git a/offload/test/sanitizer/use_after_free_1.c b/offload/test/sanitizer/use_after_free_1.c index c4783c5c36df9..acc1de373f9e3 100644 --- a/offload/test/sanitizer/use_after_free_1.c +++ b/offload/test/sanitizer/use_after_free_1.c @@ -5,12 +5,10 @@ // RUN: %not --crash env -u LLVM_DISABLE_SYMBOLIZATION OFFLOAD_TRACK_ALLOCATION_TRACES=1 %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefixes=CHECK,TRACE // clang-format on -// UNSUPPORTED: aarch64-unknown-linux-gnu -// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO -// UNSUPPORTED: x86_64-unknown-linux-gnu -// UNSUPPORTED: x86_64-unknown-linux-gnu-LTO -// UNSUPPORTED: s390x-ibm-linux-gnu -// UNSUPPORTED: s390x-ibm-linux-gnu-LTO +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: nvidiagpu +// +// REQUIRES: gpu #include diff --git a/offload/test/sanitizer/use_after_free_2.c b/offload/test/sanitizer/use_after_free_2.c index 1c1e09744a750..3d70fb7b3a3fc 100644 --- a/offload/test/sanitizer/use_after_free_2.c +++ b/offload/test/sanitizer/use_after_free_2.c @@ -3,12 +3,10 @@ // RUN: %not --crash env -u LLVM_DISABLE_SYMBOLIZATION OFFLOAD_TRACK_ALLOCATION_TRACES=1 %libomptarget-run-generic 2>&1 | %fcheck-generic --check-prefixes=CHECK // clang-format on -// UNSUPPORTED: aarch64-unknown-linux-gnu -// UNSUPPORTED: aarch64-unknown-linux-gnu-LTO -// UNSUPPORTED: x86_64-unknown-linux-gnu -// UNSUPPORTED: x86_64-unknown-linux-gnu-LTO -// UNSUPPORTED: s390x-ibm-linux-gnu -// UNSUPPORTED: s390x-ibm-linux-gnu-LTO +// FIXME: https://github.com/llvm/llvm-project/issues/161265 +// UNSUPPORTED: nvidiagpu +// +// REQUIRES: gpu // If offload memory pooling is enabled for a large allocation, reuse error is // not detected. UNSUPPORTED: large_allocation_memory_pool diff --git a/offload/tools/deviceinfo/llvm-offload-device-info.cpp b/offload/tools/deviceinfo/llvm-offload-device-info.cpp index 67a6e07fc6b05..9b58d67f017ca 100644 --- a/offload/tools/deviceinfo/llvm-offload-device-info.cpp +++ b/offload/tools/deviceinfo/llvm-offload-device-info.cpp @@ -137,7 +137,7 @@ ol_result_t printDeviceValue(std::ostream &S, ol_device_handle_t Dev, size_t Size; OFFLOAD_ERR(olGetDeviceInfoSize(Dev, Info, &Size)); Val.resize(Size); - OFFLOAD_ERR(olGetDeviceInfo(Dev, Info, sizeof(Val), Val.data())); + OFFLOAD_ERR(olGetDeviceInfo(Dev, Info, Size, Val.data())); doWrite(S, reinterpret_cast(Val.data())); } else { T Val; diff --git a/offload/unittests/OffloadAPI/CMakeLists.txt b/offload/unittests/OffloadAPI/CMakeLists.txt index ba35c1ee87aac..50c99a5d5b639 100644 --- a/offload/unittests/OffloadAPI/CMakeLists.txt +++ b/offload/unittests/OffloadAPI/CMakeLists.txt @@ -27,7 +27,9 @@ add_offload_unittest("memory" memory/olMemAlloc.cpp memory/olMemFill.cpp memory/olMemFree.cpp - memory/olMemcpy.cpp) + memory/olMemcpy.cpp + memory/olGetMemInfo.cpp + memory/olGetMemInfoSize.cpp) add_offload_unittest("platform" platform/olGetPlatformInfo.cpp diff --git a/offload/unittests/OffloadAPI/memory/olGetMemInfo.cpp b/offload/unittests/OffloadAPI/memory/olGetMemInfo.cpp new file mode 100644 index 0000000000000..a4b382ff298ad --- /dev/null +++ b/offload/unittests/OffloadAPI/memory/olGetMemInfo.cpp @@ -0,0 +1,130 @@ +//===------- Offload API tests - olGetMemInfo -----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "../common/Fixtures.hpp" +#include +#include + +constexpr size_t SIZE = 1024; + +struct olGetMemInfoBaseTest : OffloadDeviceTest { + void *OffsetPtr() { return &reinterpret_cast(Ptr)[123]; } + + void *Ptr; +}; + +template +struct olGetMemInfoTest : olGetMemInfoBaseTest { + void SetUp() override { + RETURN_ON_FATAL_FAILURE(OffloadDeviceTest::SetUp()); + ASSERT_SUCCESS(olMemAlloc(Device, AllocType, SIZE, &Ptr)); + } + + void TearDown() override { + ASSERT_SUCCESS(olMemFree(Ptr)); + RETURN_ON_FATAL_FAILURE(OffloadDeviceTest::TearDown()); + } +}; +using olGetMemInfoDeviceTest = olGetMemInfoTest; +OFFLOAD_TESTS_INSTANTIATE_DEVICE_FIXTURE(olGetMemInfoDeviceTest); +using olGetMemInfoManagedTest = olGetMemInfoTest; +OFFLOAD_TESTS_INSTANTIATE_DEVICE_FIXTURE(olGetMemInfoManagedTest); +using olGetMemInfoHostTest = olGetMemInfoTest; +OFFLOAD_TESTS_INSTANTIATE_DEVICE_FIXTURE(olGetMemInfoHostTest); + +#define PER_ALLOC_TEST(FUNCTION) \ + TEST_P(olGetMemInfoDeviceTest, FUNCTION) { \ + FUNCTION(this, Ptr, OL_ALLOC_TYPE_DEVICE); \ + } \ + TEST_P(olGetMemInfoManagedTest, FUNCTION) { \ + FUNCTION(this, Ptr, OL_ALLOC_TYPE_MANAGED); \ + } \ + TEST_P(olGetMemInfoHostTest, FUNCTION) { \ + FUNCTION(this, OffsetPtr(), OL_ALLOC_TYPE_HOST); \ + } \ + TEST_P(olGetMemInfoDeviceTest, FUNCTION##Offset) { \ + FUNCTION(this, Ptr, OL_ALLOC_TYPE_DEVICE); \ + } \ + TEST_P(olGetMemInfoManagedTest, FUNCTION##Offset) { \ + FUNCTION(this, OffsetPtr(), OL_ALLOC_TYPE_MANAGED); \ + } \ + TEST_P(olGetMemInfoHostTest, FUNCTION##Offset) { \ + FUNCTION(this, OffsetPtr(), OL_ALLOC_TYPE_HOST); \ + } + +void SuccessDevice(olGetMemInfoBaseTest *Fixture, void *Ptr, + ol_alloc_type_t Type) { + ol_device_handle_t RetrievedDevice; + ASSERT_SUCCESS(olGetMemInfo(Fixture->Ptr, OL_MEM_INFO_DEVICE, + sizeof(RetrievedDevice), &RetrievedDevice)); + ASSERT_EQ(RetrievedDevice, Fixture->Device); +} +PER_ALLOC_TEST(SuccessDevice); + +void SuccessBase(olGetMemInfoBaseTest *Fixture, void *Ptr, + ol_alloc_type_t Type) { + void *RetrievedBase; + ASSERT_SUCCESS(olGetMemInfo(Fixture->Ptr, OL_MEM_INFO_BASE, + sizeof(RetrievedBase), &RetrievedBase)); + ASSERT_EQ(RetrievedBase, Fixture->Ptr); +} +PER_ALLOC_TEST(SuccessBase); + +void SuccessSize(olGetMemInfoBaseTest *Fixture, void *Ptr, + ol_alloc_type_t Type) { + size_t RetrievedSize; + ASSERT_SUCCESS(olGetMemInfo(Fixture->Ptr, OL_MEM_INFO_SIZE, + sizeof(RetrievedSize), &RetrievedSize)); + ASSERT_EQ(RetrievedSize, SIZE); +} +PER_ALLOC_TEST(SuccessSize); + +void SuccessType(olGetMemInfoBaseTest *Fixture, void *Ptr, + ol_alloc_type_t Type) { + ol_alloc_type_t RetrievedType; + ASSERT_SUCCESS(olGetMemInfo(Fixture->Ptr, OL_MEM_INFO_TYPE, + sizeof(RetrievedType), &RetrievedType)); + ASSERT_EQ(RetrievedType, Type); +} +PER_ALLOC_TEST(SuccessType); + +TEST_P(olGetMemInfoDeviceTest, InvalidNotFound) { + // Assuming that we aren't unlucky and happen to get 0x1234 as a random + // pointer + void *RetrievedBase; + ASSERT_ERROR(OL_ERRC_NOT_FOUND, + olGetMemInfo(reinterpret_cast(0x1234), OL_MEM_INFO_BASE, + sizeof(RetrievedBase), &RetrievedBase)); +} + +TEST_P(olGetMemInfoDeviceTest, InvalidNullPtr) { + ol_device_handle_t RetrievedDevice; + ASSERT_ERROR(OL_ERRC_INVALID_NULL_POINTER, + olGetMemInfo(nullptr, OL_MEM_INFO_DEVICE, + sizeof(RetrievedDevice), &RetrievedDevice)); +} + +TEST_P(olGetMemInfoDeviceTest, InvalidSizeZero) { + ol_device_handle_t RetrievedDevice; + ASSERT_ERROR(OL_ERRC_INVALID_SIZE, + olGetMemInfo(Ptr, OL_MEM_INFO_DEVICE, 0, &RetrievedDevice)); +} + +TEST_P(olGetMemInfoDeviceTest, InvalidSizeSmall) { + ol_device_handle_t RetrievedDevice; + ASSERT_ERROR(OL_ERRC_INVALID_SIZE, + olGetMemInfo(Ptr, OL_MEM_INFO_DEVICE, + sizeof(RetrievedDevice) - 1, &RetrievedDevice)); +} + +TEST_P(olGetMemInfoDeviceTest, InvalidNullPointerPropValue) { + ol_device_handle_t RetrievedDevice; + ASSERT_ERROR( + OL_ERRC_INVALID_NULL_POINTER, + olGetMemInfo(Ptr, OL_MEM_INFO_DEVICE, sizeof(RetrievedDevice), nullptr)); +} diff --git a/offload/unittests/OffloadAPI/memory/olGetMemInfoSize.cpp b/offload/unittests/OffloadAPI/memory/olGetMemInfoSize.cpp new file mode 100644 index 0000000000000..f1a1e790fb22f --- /dev/null +++ b/offload/unittests/OffloadAPI/memory/olGetMemInfoSize.cpp @@ -0,0 +1,63 @@ +//===------- Offload API tests - olGetMemInfoSize -------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include "../common/Fixtures.hpp" + +struct olGetMemInfoSizeTest : OffloadDeviceTest { + void *OffsetPtr() { return &reinterpret_cast(Ptr)[123]; } + + void SetUp() override { + RETURN_ON_FATAL_FAILURE(OffloadDeviceTest::SetUp()); + ASSERT_SUCCESS(olMemAlloc(Device, OL_ALLOC_TYPE_DEVICE, 0x1024, &Ptr)); + } + + void TearDown() override { + ASSERT_SUCCESS(olMemFree(Ptr)); + RETURN_ON_FATAL_FAILURE(OffloadDeviceTest::TearDown()); + } + + void *Ptr; +}; +OFFLOAD_TESTS_INSTANTIATE_DEVICE_FIXTURE(olGetMemInfoSizeTest); + +TEST_P(olGetMemInfoSizeTest, SuccessDevice) { + size_t Size = 0; + ASSERT_SUCCESS(olGetMemInfoSize(Ptr, OL_MEM_INFO_DEVICE, &Size)); + ASSERT_EQ(Size, sizeof(ol_device_handle_t)); +} + +TEST_P(olGetMemInfoSizeTest, SuccessBase) { + size_t Size = 0; + ASSERT_SUCCESS(olGetMemInfoSize(Ptr, OL_MEM_INFO_BASE, &Size)); + ASSERT_EQ(Size, sizeof(void *)); +} + +TEST_P(olGetMemInfoSizeTest, SuccessSize) { + size_t Size = 0; + ASSERT_SUCCESS(olGetMemInfoSize(Ptr, OL_MEM_INFO_SIZE, &Size)); + ASSERT_EQ(Size, sizeof(size_t)); +} + +TEST_P(olGetMemInfoSizeTest, SuccessType) { + size_t Size = 0; + ASSERT_SUCCESS(olGetMemInfoSize(Ptr, OL_MEM_INFO_TYPE, &Size)); + ASSERT_EQ(Size, sizeof(ol_alloc_type_t)); +} + +TEST_P(olGetMemInfoSizeTest, InvalidSymbolInfoEnumeration) { + size_t Size = 0; + ASSERT_ERROR(OL_ERRC_INVALID_ENUMERATION, + olGetMemInfoSize(Ptr, OL_MEM_INFO_FORCE_UINT32, &Size)); +} + +TEST_P(olGetMemInfoSizeTest, InvalidNullPointer) { + ASSERT_ERROR(OL_ERRC_INVALID_NULL_POINTER, + olGetMemInfoSize(Ptr, OL_MEM_INFO_DEVICE, nullptr)); +} diff --git a/offload/unittests/OffloadAPI/memory/olMemAlloc.cpp b/offload/unittests/OffloadAPI/memory/olMemAlloc.cpp index 00e428ec2abc7..445262aa0c583 100644 --- a/offload/unittests/OffloadAPI/memory/olMemAlloc.cpp +++ b/offload/unittests/OffloadAPI/memory/olMemAlloc.cpp @@ -34,6 +34,26 @@ TEST_P(olMemAllocTest, SuccessAllocDevice) { olMemFree(Alloc); } +TEST_P(olMemAllocTest, SuccessAllocMany) { + std::vector Allocs; + Allocs.reserve(1000); + + constexpr ol_alloc_type_t TYPES[3] = { + OL_ALLOC_TYPE_DEVICE, OL_ALLOC_TYPE_MANAGED, OL_ALLOC_TYPE_HOST}; + + for (size_t I = 1; I < 1000; I++) { + void *Alloc = nullptr; + ASSERT_SUCCESS(olMemAlloc(Device, TYPES[I % 3], 1024 * I, &Alloc)); + ASSERT_NE(Alloc, nullptr); + + Allocs.push_back(Alloc); + } + + for (auto *A : Allocs) { + olMemFree(A); + } +} + TEST_P(olMemAllocTest, InvalidNullDevice) { void *Alloc = nullptr; ASSERT_ERROR(OL_ERRC_INVALID_NULL_HANDLE, diff --git a/openmp/device/CMakeLists.txt b/openmp/device/CMakeLists.txt index ded961adcce1f..54cfdfef440a5 100644 --- a/openmp/device/CMakeLists.txt +++ b/openmp/device/CMakeLists.txt @@ -64,7 +64,7 @@ set_target_properties(libompdevice PROPERTIES RUNTIME_OUTPUT_NAME libomptarget-${target_name}.bc) # If the user built with the GPU C library enabled we will use that instead. -if(LIBOMPTARGET_GPU_LIBC_SUPPORT) +if(TARGET libc) target_compile_definitions(libompdevice PRIVATE OMPTARGET_HAS_LIBC) endif() target_compile_definitions(libompdevice PRIVATE SHARED_SCRATCHPAD_SIZE=512) diff --git a/openmp/device/src/Workshare.cpp b/openmp/device/src/Workshare.cpp index 59a2cc3f27aca..653104ce883d1 100644 --- a/openmp/device/src/Workshare.cpp +++ b/openmp/device/src/Workshare.cpp @@ -800,10 +800,6 @@ template class StaticLoopChunker { // If we know we have more threads than iterations we can indicate that to // avoid an outer loop. - if (config::getAssumeThreadsOversubscription()) { - OneIterationPerThread = true; - } - if (OneIterationPerThread) ASSERT(NumThreads >= NumIters, "Broken assumption"); @@ -851,10 +847,6 @@ template class StaticLoopChunker { // If we know we have more blocks than iterations we can indicate that to // avoid an outer loop. - if (config::getAssumeTeamsOversubscription()) { - OneIterationPerThread = true; - } - if (OneIterationPerThread) ASSERT(NumBlocks >= NumIters, "Broken assumption"); @@ -914,11 +906,6 @@ template class StaticLoopChunker { // If we know we have more threads (across all blocks) than iterations we // can indicate that to avoid an outer loop. - if (config::getAssumeTeamsOversubscription() & - config::getAssumeThreadsOversubscription()) { - OneIterationPerThread = true; - } - if (OneIterationPerThread) ASSERT(NumBlocks * NumThreads >= NumIters, "Broken assumption"); diff --git a/openmp/runtime/test/transform/fuse/foreach.cpp b/openmp/runtime/test/transform/fuse/foreach.cpp new file mode 100644 index 0000000000000..176465b201faa --- /dev/null +++ b/openmp/runtime/test/transform/fuse/foreach.cpp @@ -0,0 +1,191 @@ +// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include +#include +#include + +struct Reporter { + const char *name; + + Reporter(const char *name) : name(name) { print("ctor"); } + + Reporter() : name("") { print("ctor"); } + + Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); } + + Reporter(Reporter &&that) : name(that.name) { print("move ctor"); } + + ~Reporter() { print("dtor"); } + + const Reporter &operator=(const Reporter &that) { + print("copy assign"); + this->name = that.name; + return *this; + } + + const Reporter &operator=(Reporter &&that) { + print("move assign"); + this->name = that.name; + return *this; + } + + struct Iterator { + const Reporter *owner; + int pos; + + Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {} + + Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) { + owner->print("iterator copy ctor"); + } + + Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) { + owner->print("iterator move ctor"); + } + + ~Iterator() { owner->print("iterator dtor"); } + + const Iterator &operator=(const Iterator &that) { + owner->print("iterator copy assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + const Iterator &operator=(Iterator &&that) { + owner->print("iterator move assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + bool operator==(const Iterator &that) const { + owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos); + return this->pos == that.pos; + } + + Iterator &operator++() { + owner->print("iterator prefix ++"); + pos -= 1; + return *this; + } + + Iterator operator++(int) { + owner->print("iterator postfix ++"); + auto result = *this; + pos -= 1; + return result; + } + + int operator*() const { + int result = 2 - pos; + owner->print("iterator deref: %i", result); + return result; + } + + size_t operator-(const Iterator &that) const { + int result = (2 - this->pos) - (2 - that.pos); + owner->print("iterator distance: %d", result); + return result; + } + + Iterator operator+(int steps) const { + owner->print("iterator advance: %i += %i", 2 - this->pos, steps); + return Iterator(owner, pos - steps); + } + + void print(const char *msg) const { owner->print(msg); } + }; + + Iterator begin() const { + print("begin()"); + return Iterator(this, 2); + } + + Iterator end() const { + print("end()"); + return Iterator(this, -1); + } + + void print(const char *msg, ...) const { + va_list args; + va_start(args, msg); + printf("[%s] ", name); + vprintf(msg, args); + printf("\n"); + va_end(args); + } +}; + +int main() { + printf("do\n"); +#pragma omp fuse + { + for (Reporter a{"C"}; auto &&v : Reporter("A")) + printf("v=%d\n", v); + for (Reporter aa{"D"}; auto &&vv : Reporter("B")) + printf("vv=%d\n", vv); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +// CHECK: [C] ctor +// CHECK-NEXT: [A] ctor +// CHECK-NEXT: [A] end() +// CHECK-NEXT: [A] begin() +// CHECK-NEXT: [A] begin() +// CHECK-NEXT: [A] iterator distance: 3 +// CHECK-NEXT: [D] ctor +// CHECK-NEXT: [B] ctor +// CHECK-NEXT: [B] end() +// CHECK-NEXT: [B] begin() +// CHECK-NEXT: [B] begin() +// CHECK-NEXT: [B] iterator distance: 3 +// CHECK-NEXT: [A] iterator advance: 0 += 0 +// CHECK-NEXT: [A] iterator move assign +// CHECK-NEXT: [A] iterator deref: 0 +// CHECK-NEXT: v=0 +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [B] iterator advance: 0 += 0 +// CHECK-NEXT: [B] iterator move assign +// CHECK-NEXT: [B] iterator deref: 0 +// CHECK-NEXT: vv=0 +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [A] iterator advance: 0 += 1 +// CHECK-NEXT: [A] iterator move assign +// CHECK-NEXT: [A] iterator deref: 1 +// CHECK-NEXT: v=1 +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [B] iterator advance: 0 += 1 +// CHECK-NEXT: [B] iterator move assign +// CHECK-NEXT: [B] iterator deref: 1 +// CHECK-NEXT: vv=1 +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [A] iterator advance: 0 += 2 +// CHECK-NEXT: [A] iterator move assign +// CHECK-NEXT: [A] iterator deref: 2 +// CHECK-NEXT: v=2 +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [B] iterator advance: 0 += 2 +// CHECK-NEXT: [B] iterator move assign +// CHECK-NEXT: [B] iterator deref: 2 +// CHECK-NEXT: vv=2 +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] dtor +// CHECK-NEXT: [D] dtor +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [A] dtor +// CHECK-NEXT: [C] dtor +// CHECK-NEXT: done + +#endif diff --git a/openmp/runtime/test/transform/fuse/intfor.c b/openmp/runtime/test/transform/fuse/intfor.c new file mode 100644 index 0000000000000..b8171b4df7042 --- /dev/null +++ b/openmp/runtime/test/transform/fuse/intfor.c @@ -0,0 +1,50 @@ +// RUN: %libomp-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include + +int main() { + printf("do\n"); +#pragma omp fuse + { + for (int i = 5; i <= 25; i += 5) + printf("i=%d\n", i); + for (int j = 10; j < 100; j += 10) + printf("j=%d\n", j); + for (int k = 10; k > 0; --k) + printf("k=%d\n", k); + } + printf("done\n"); + return EXIT_SUCCESS; +} +#endif /* HEADER */ + +// CHECK: do +// CHECK-NEXT: i=5 +// CHECK-NEXT: j=10 +// CHECK-NEXT: k=10 +// CHECK-NEXT: i=10 +// CHECK-NEXT: j=20 +// CHECK-NEXT: k=9 +// CHECK-NEXT: i=15 +// CHECK-NEXT: j=30 +// CHECK-NEXT: k=8 +// CHECK-NEXT: i=20 +// CHECK-NEXT: j=40 +// CHECK-NEXT: k=7 +// CHECK-NEXT: i=25 +// CHECK-NEXT: j=50 +// CHECK-NEXT: k=6 +// CHECK-NEXT: j=60 +// CHECK-NEXT: k=5 +// CHECK-NEXT: j=70 +// CHECK-NEXT: k=4 +// CHECK-NEXT: j=80 +// CHECK-NEXT: k=3 +// CHECK-NEXT: j=90 +// CHECK-NEXT: k=2 +// CHECK-NEXT: k=1 +// CHECK-NEXT: done diff --git a/openmp/runtime/test/transform/fuse/iterfor.cpp b/openmp/runtime/test/transform/fuse/iterfor.cpp new file mode 100644 index 0000000000000..552484b2981c4 --- /dev/null +++ b/openmp/runtime/test/transform/fuse/iterfor.cpp @@ -0,0 +1,194 @@ +// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include +#include +#include + +struct Reporter { + const char *name; + + Reporter(const char *name) : name(name) { print("ctor"); } + + Reporter() : name("") { print("ctor"); } + + Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); } + + Reporter(Reporter &&that) : name(that.name) { print("move ctor"); } + + ~Reporter() { print("dtor"); } + + const Reporter &operator=(const Reporter &that) { + print("copy assign"); + this->name = that.name; + return *this; + } + + const Reporter &operator=(Reporter &&that) { + print("move assign"); + this->name = that.name; + return *this; + } + + struct Iterator { + const Reporter *owner; + int pos; + + Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {} + + Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) { + owner->print("iterator copy ctor"); + } + + Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) { + owner->print("iterator move ctor"); + } + + ~Iterator() { owner->print("iterator dtor"); } + + const Iterator &operator=(const Iterator &that) { + owner->print("iterator copy assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + const Iterator &operator=(Iterator &&that) { + owner->print("iterator move assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + bool operator==(const Iterator &that) const { + owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos); + return this->pos == that.pos; + } + + bool operator!=(const Iterator &that) const { + owner->print("iterator %d != %d", 2 - this->pos, 2 - that.pos); + return this->pos != that.pos; + } + + Iterator &operator++() { + owner->print("iterator prefix ++"); + pos -= 1; + return *this; + } + + Iterator operator++(int) { + owner->print("iterator postfix ++"); + auto result = *this; + pos -= 1; + return result; + } + + int operator*() const { + int result = 2 - pos; + owner->print("iterator deref: %i", result); + return result; + } + + size_t operator-(const Iterator &that) const { + int result = (2 - this->pos) - (2 - that.pos); + owner->print("iterator distance: %d", result); + return result; + } + + Iterator operator+(int steps) const { + owner->print("iterator advance: %i += %i", 2 - this->pos, steps); + return Iterator(owner, pos - steps); + } + }; + + Iterator begin() const { + print("begin()"); + return Iterator(this, 2); + } + + Iterator end() const { + print("end()"); + return Iterator(this, -1); + } + + void print(const char *msg, ...) const { + va_list args; + va_start(args, msg); + printf("[%s] ", name); + vprintf(msg, args); + printf("\n"); + va_end(args); + } +}; + +int main() { + printf("do\n"); + Reporter C("C"); + Reporter D("D"); +#pragma omp fuse + { + for (auto it = C.begin(); it != C.end(); ++it) + printf("v=%d\n", *it); + + for (auto it = D.begin(); it != D.end(); ++it) + printf("vv=%d\n", *it); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +#endif /* HEADER */ + +// CHECK: do +// CHECK: [C] ctor +// CHECK-NEXT: [D] ctor +// CHECK-NEXT: [C] begin() +// CHECK-NEXT: [C] begin() +// CHECK-NEXT: [C] end() +// CHECK-NEXT: [C] iterator distance: 3 +// CHECK-NEXT: [D] begin() +// CHECK-NEXT: [D] begin() +// CHECK-NEXT: [D] end() +// CHECK-NEXT: [D] iterator distance: 3 +// CHECK-NEXT: [C] iterator advance: 0 += 0 +// CHECK-NEXT: [C] iterator move assign +// CHECK-NEXT: [C] iterator deref: 0 +// CHECK-NEXT: v=0 +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] iterator advance: 0 += 0 +// CHECK-NEXT: [D] iterator move assign +// CHECK-NEXT: [D] iterator deref: 0 +// CHECK-NEXT: vv=0 +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator advance: 0 += 1 +// CHECK-NEXT: [C] iterator move assign +// CHECK-NEXT: [C] iterator deref: 1 +// CHECK-NEXT: v=1 +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] iterator advance: 0 += 1 +// CHECK-NEXT: [D] iterator move assign +// CHECK-NEXT: [D] iterator deref: 1 +// CHECK-NEXT: vv=1 +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator advance: 0 += 2 +// CHECK-NEXT: [C] iterator move assign +// CHECK-NEXT: [C] iterator deref: 2 +// CHECK-NEXT: v=2 +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] iterator advance: 0 += 2 +// CHECK-NEXT: [D] iterator move assign +// CHECK-NEXT: [D] iterator deref: 2 +// CHECK-NEXT: vv=2 +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: done +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] dtor +// CHECK-NEXT: [C] dtor diff --git a/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-foreach.cpp b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-foreach.cpp new file mode 100644 index 0000000000000..dcbbdf1b6734e --- /dev/null +++ b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-foreach.cpp @@ -0,0 +1,207 @@ +// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include +#include +#include + +struct Reporter { + const char *name; + + Reporter(const char *name) : name(name) { print("ctor"); } + + Reporter() : name("") { print("ctor"); } + + Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); } + + Reporter(Reporter &&that) : name(that.name) { print("move ctor"); } + + ~Reporter() { print("dtor"); } + + const Reporter &operator=(const Reporter &that) { + print("copy assign"); + this->name = that.name; + return *this; + } + + const Reporter &operator=(Reporter &&that) { + print("move assign"); + this->name = that.name; + return *this; + } + + struct Iterator { + const Reporter *owner; + int pos; + + Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {} + + Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) { + owner->print("iterator copy ctor"); + } + + Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) { + owner->print("iterator move ctor"); + } + + ~Iterator() { owner->print("iterator dtor"); } + + const Iterator &operator=(const Iterator &that) { + owner->print("iterator copy assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + const Iterator &operator=(Iterator &&that) { + owner->print("iterator move assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + bool operator==(const Iterator &that) const { + owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos); + return this->pos == that.pos; + } + + Iterator &operator++() { + owner->print("iterator prefix ++"); + pos -= 1; + return *this; + } + + Iterator operator++(int) { + owner->print("iterator postfix ++"); + auto result = *this; + pos -= 1; + return result; + } + + int operator*() const { + int result = 2 - pos; + owner->print("iterator deref: %i", result); + return result; + } + + size_t operator-(const Iterator &that) const { + int result = (2 - this->pos) - (2 - that.pos); + owner->print("iterator distance: %d", result); + return result; + } + + Iterator operator+(int steps) const { + owner->print("iterator advance: %i += %i", 2 - this->pos, steps); + return Iterator(owner, pos - steps); + } + + void print(const char *msg) const { owner->print(msg); } + }; + + Iterator begin() const { + print("begin()"); + return Iterator(this, 2); + } + + Iterator end() const { + print("end()"); + return Iterator(this, -1); + } + + void print(const char *msg, ...) const { + va_list args; + va_start(args, msg); + printf("[%s] ", name); + vprintf(msg, args); + printf("\n"); + va_end(args); + } +}; + +int main() { + printf("do\n"); +#pragma omp parallel for collapse(2) num_threads(1) + for (int i = 0; i < 3; ++i) +#pragma omp fuse + { + for (Reporter c{"init-stmt"}; auto &&v : Reporter("range")) + printf("i=%d v=%d\n", i, v); + for (int vv = 0; vv < 3; ++vv) + printf("i=%d vv=%d\n", i, vv); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +#endif /* HEADER */ + +// CHECK: do +// CHECK-NEXT: [init-stmt] ctor +// CHECK-NEXT: [range] ctor +// CHECK-NEXT: [range] end() +// CHECK-NEXT: [range] begin() +// CHECK-NEXT: [range] begin() +// CHECK-NEXT: [range] iterator distance: 3 +// CHECK-NEXT: [range] iterator advance: 0 += 0 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 0 +// CHECK-NEXT: i=0 v=0 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=0 vv=0 +// CHECK-NEXT: [range] iterator advance: 0 += 1 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 1 +// CHECK-NEXT: i=0 v=1 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=0 vv=1 +// CHECK-NEXT: [range] iterator advance: 0 += 2 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 2 +// CHECK-NEXT: i=0 v=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=0 vv=2 +// CHECK-NEXT: [range] iterator advance: 0 += 0 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 0 +// CHECK-NEXT: i=1 v=0 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=1 vv=0 +// CHECK-NEXT: [range] iterator advance: 0 += 1 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 1 +// CHECK-NEXT: i=1 v=1 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=1 vv=1 +// CHECK-NEXT: [range] iterator advance: 0 += 2 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 2 +// CHECK-NEXT: i=1 v=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=1 vv=2 +// CHECK-NEXT: [range] iterator advance: 0 += 0 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 0 +// CHECK-NEXT: i=2 v=0 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=2 vv=0 +// CHECK-NEXT: [range] iterator advance: 0 += 1 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 1 +// CHECK-NEXT: i=2 v=1 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=2 vv=1 +// CHECK-NEXT: [range] iterator advance: 0 += 2 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 2 +// CHECK-NEXT: i=2 v=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=2 vv=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: [range] dtor +// CHECK-NEXT: [init-stmt] dtor +// CHECK-NEXT: done diff --git a/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-intfor.c b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-intfor.c new file mode 100644 index 0000000000000..9630fec50bc20 --- /dev/null +++ b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-intfor.c @@ -0,0 +1,45 @@ +// RUN: %libomp-cxx-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include + +int main() { + printf("do\n"); +#pragma omp parallel for collapse(2) num_threads(1) + for (int i = 0; i < 3; ++i) +#pragma omp fuse + { + for (int j = 0; j < 3; ++j) + printf("i=%d j=%d\n", i, j); + for (int k = 0; k < 3; ++k) + printf("i=%d k=%d\n", i, k); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +#endif /* HEADER */ + +// CHECK: do +// CHECK-NEXT: i=0 j=0 +// CHECK-NEXT: i=0 k=0 +// CHECK-NEXT: i=0 j=1 +// CHECK-NEXT: i=0 k=1 +// CHECK-NEXT: i=0 j=2 +// CHECK-NEXT: i=0 k=2 +// CHECK-NEXT: i=1 j=0 +// CHECK-NEXT: i=1 k=0 +// CHECK-NEXT: i=1 j=1 +// CHECK-NEXT: i=1 k=1 +// CHECK-NEXT: i=1 j=2 +// CHECK-NEXT: i=1 k=2 +// CHECK-NEXT: i=2 j=0 +// CHECK-NEXT: i=2 k=0 +// CHECK-NEXT: i=2 j=1 +// CHECK-NEXT: i=2 k=1 +// CHECK-NEXT: i=2 j=2 +// CHECK-NEXT: i=2 k=2 +// CHECK-NEXT: done diff --git a/orc-rt/include/orc-rt/CallableTraitsHelper.h b/orc-rt/include/orc-rt/CallableTraitsHelper.h new file mode 100644 index 0000000000000..12d7d5672c73a --- /dev/null +++ b/orc-rt/include/orc-rt/CallableTraitsHelper.h @@ -0,0 +1,74 @@ +//===- CallableTraitsHelper.h - Callable arg/ret type extractor -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// CallableTraitsHelper API. +// +//===----------------------------------------------------------------------===// + +#ifndef ORC_RT_CALLABLETRAITSHELPER_H +#define ORC_RT_CALLABLETRAITSHELPER_H + +#include +#include + +namespace orc_rt { + +/// CallableTraitsHelper takes an implementation class template Impl and some +/// callable type C and passes the return and argument types of C to the Impl +/// class template. +/// +/// This can be used to simplify the implementation of classes that need to +/// operate on callable types. +template